xref: /linux/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c (revision 1a9239bb4253f9076b5b4b2a1a4e8d7defd77a95)
1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2020, Mellanox Technologies inc. All rights reserved. */
3 
4 #include <linux/netdevice.h>
5 #include "en.h"
6 #include "en/fs.h"
7 #include "eswitch.h"
8 #include "ipsec.h"
9 #include "fs_core.h"
10 #include "lib/ipsec_fs_roce.h"
11 #include "lib/fs_chains.h"
12 #include "esw/ipsec_fs.h"
13 #include "en_rep.h"
14 
15 #define NUM_IPSEC_FTE BIT(15)
16 #define MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_SIZE 16
17 #define IPSEC_TUNNEL_DEFAULT_TTL 0x40
18 
19 #define MLX5_IPSEC_FS_SA_SELECTOR_MAX_NUM_GROUPS 16
20 
21 enum {
22 	MLX5_IPSEC_ASO_OK,
23 	MLX5_IPSEC_ASO_BAD_REPLY,
24 
25 	/* For crypto offload, set by driver */
26 	MLX5_IPSEC_ASO_SW_CRYPTO_OFFLOAD = 0xAA,
27 };
28 
29 struct mlx5e_ipsec_fc {
30 	struct mlx5_fc *cnt;
31 	struct mlx5_fc *drop;
32 };
33 
34 struct mlx5e_ipsec_tx {
35 	struct mlx5e_ipsec_ft ft;
36 	struct mlx5e_ipsec_miss pol;
37 	struct mlx5e_ipsec_miss sa;
38 	struct mlx5e_ipsec_rule status;
39 	struct mlx5_flow_namespace *ns;
40 	struct mlx5e_ipsec_fc *fc;
41 	struct mlx5_fs_chains *chains;
42 	u8 allow_tunnel_mode : 1;
43 };
44 
45 struct mlx5e_ipsec_status_checks {
46 	struct mlx5_flow_group *pass_group;
47 	struct mlx5_flow_handle *packet_offload_pass_rule;
48 	struct mlx5_flow_handle *crypto_offload_pass_rule;
49 	struct mlx5_flow_group *drop_all_group;
50 	struct mlx5e_ipsec_drop all;
51 };
52 
53 struct mlx5e_ipsec_rx {
54 	struct mlx5e_ipsec_ft ft;
55 	struct mlx5e_ipsec_miss pol;
56 	struct mlx5e_ipsec_miss sa;
57 	struct mlx5e_ipsec_miss sa_sel;
58 	struct mlx5e_ipsec_status_checks status_checks;
59 	struct mlx5e_ipsec_fc *fc;
60 	struct mlx5_fs_chains *chains;
61 	struct mlx5_flow_table *pol_miss_ft;
62 	struct mlx5_flow_handle *pol_miss_rule;
63 	u8 allow_tunnel_mode : 1;
64 };
65 
66 /* IPsec RX flow steering */
family2tt(u32 family)67 static enum mlx5_traffic_types family2tt(u32 family)
68 {
69 	if (family == AF_INET)
70 		return MLX5_TT_IPV4_IPSEC_ESP;
71 	return MLX5_TT_IPV6_IPSEC_ESP;
72 }
73 
ipsec_rx(struct mlx5e_ipsec * ipsec,u32 family,int type)74 static struct mlx5e_ipsec_rx *ipsec_rx(struct mlx5e_ipsec *ipsec, u32 family, int type)
75 {
76 	if (ipsec->is_uplink_rep && type == XFRM_DEV_OFFLOAD_PACKET)
77 		return ipsec->rx_esw;
78 
79 	if (family == AF_INET)
80 		return ipsec->rx_ipv4;
81 
82 	return ipsec->rx_ipv6;
83 }
84 
ipsec_tx(struct mlx5e_ipsec * ipsec,int type)85 static struct mlx5e_ipsec_tx *ipsec_tx(struct mlx5e_ipsec *ipsec, int type)
86 {
87 	if (ipsec->is_uplink_rep && type == XFRM_DEV_OFFLOAD_PACKET)
88 		return ipsec->tx_esw;
89 
90 	return ipsec->tx;
91 }
92 
93 static struct mlx5_fs_chains *
ipsec_chains_create(struct mlx5_core_dev * mdev,struct mlx5_flow_table * miss_ft,enum mlx5_flow_namespace_type ns,int base_prio,int base_level,struct mlx5_flow_table ** root_ft)94 ipsec_chains_create(struct mlx5_core_dev *mdev, struct mlx5_flow_table *miss_ft,
95 		    enum mlx5_flow_namespace_type ns, int base_prio,
96 		    int base_level, struct mlx5_flow_table **root_ft)
97 {
98 	struct mlx5_chains_attr attr = {};
99 	struct mlx5_fs_chains *chains;
100 	struct mlx5_flow_table *ft;
101 	int err;
102 
103 	attr.flags = MLX5_CHAINS_AND_PRIOS_SUPPORTED |
104 		     MLX5_CHAINS_IGNORE_FLOW_LEVEL_SUPPORTED;
105 	attr.max_grp_num = 2;
106 	attr.default_ft = miss_ft;
107 	attr.ns = ns;
108 	attr.fs_base_prio = base_prio;
109 	attr.fs_base_level = base_level;
110 	chains = mlx5_chains_create(mdev, &attr);
111 	if (IS_ERR(chains))
112 		return chains;
113 
114 	/* Create chain 0, prio 1, level 0 to connect chains to prev in fs_core */
115 	ft = mlx5_chains_get_table(chains, 0, 1, 0);
116 	if (IS_ERR(ft)) {
117 		err = PTR_ERR(ft);
118 		goto err_chains_get;
119 	}
120 
121 	*root_ft = ft;
122 	return chains;
123 
124 err_chains_get:
125 	mlx5_chains_destroy(chains);
126 	return ERR_PTR(err);
127 }
128 
ipsec_chains_destroy(struct mlx5_fs_chains * chains)129 static void ipsec_chains_destroy(struct mlx5_fs_chains *chains)
130 {
131 	mlx5_chains_put_table(chains, 0, 1, 0);
132 	mlx5_chains_destroy(chains);
133 }
134 
135 static struct mlx5_flow_table *
ipsec_chains_get_table(struct mlx5_fs_chains * chains,u32 prio)136 ipsec_chains_get_table(struct mlx5_fs_chains *chains, u32 prio)
137 {
138 	return mlx5_chains_get_table(chains, 0, prio + 1, 0);
139 }
140 
ipsec_chains_put_table(struct mlx5_fs_chains * chains,u32 prio)141 static void ipsec_chains_put_table(struct mlx5_fs_chains *chains, u32 prio)
142 {
143 	mlx5_chains_put_table(chains, 0, prio + 1, 0);
144 }
145 
ipsec_ft_create(struct mlx5_flow_namespace * ns,int level,int prio,int num_reserved_entries,int max_num_groups,u32 flags)146 static struct mlx5_flow_table *ipsec_ft_create(struct mlx5_flow_namespace *ns,
147 					       int level, int prio,
148 					       int num_reserved_entries,
149 					       int max_num_groups, u32 flags)
150 {
151 	struct mlx5_flow_table_attr ft_attr = {};
152 
153 	ft_attr.autogroup.num_reserved_entries = num_reserved_entries;
154 	ft_attr.autogroup.max_num_groups = max_num_groups;
155 	ft_attr.max_fte = NUM_IPSEC_FTE;
156 	ft_attr.level = level;
157 	ft_attr.prio = prio;
158 	ft_attr.flags = flags;
159 
160 	return mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
161 }
162 
ipsec_rx_status_drop_destroy(struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_rx * rx)163 static void ipsec_rx_status_drop_destroy(struct mlx5e_ipsec *ipsec,
164 					 struct mlx5e_ipsec_rx *rx)
165 {
166 	mlx5_del_flow_rules(rx->status_checks.all.rule);
167 	mlx5_fc_destroy(ipsec->mdev, rx->status_checks.all.fc);
168 	mlx5_destroy_flow_group(rx->status_checks.drop_all_group);
169 }
170 
ipsec_rx_status_pass_destroy(struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_rx * rx)171 static void ipsec_rx_status_pass_destroy(struct mlx5e_ipsec *ipsec,
172 					 struct mlx5e_ipsec_rx *rx)
173 {
174 	mlx5_del_flow_rules(rx->status_checks.packet_offload_pass_rule);
175 	mlx5_del_flow_rules(rx->status_checks.crypto_offload_pass_rule);
176 }
177 
ipsec_rx_rule_add_match_obj(struct mlx5e_ipsec_sa_entry * sa_entry,struct mlx5e_ipsec_rx * rx,struct mlx5_flow_spec * spec)178 static void ipsec_rx_rule_add_match_obj(struct mlx5e_ipsec_sa_entry *sa_entry,
179 					struct mlx5e_ipsec_rx *rx,
180 					struct mlx5_flow_spec *spec)
181 {
182 	struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
183 
184 	if (rx == ipsec->rx_esw) {
185 		mlx5_esw_ipsec_rx_rule_add_match_obj(sa_entry, spec);
186 	} else {
187 		MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
188 				 misc_parameters_2.metadata_reg_c_2);
189 		MLX5_SET(fte_match_param, spec->match_value,
190 			 misc_parameters_2.metadata_reg_c_2,
191 			 sa_entry->ipsec_obj_id | BIT(31));
192 
193 		spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
194 	}
195 }
196 
rx_add_rule_drop_auth_trailer(struct mlx5e_ipsec_sa_entry * sa_entry,struct mlx5e_ipsec_rx * rx)197 static int rx_add_rule_drop_auth_trailer(struct mlx5e_ipsec_sa_entry *sa_entry,
198 					 struct mlx5e_ipsec_rx *rx)
199 {
200 	struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
201 	struct mlx5_flow_table *ft = rx->ft.status;
202 	struct mlx5_core_dev *mdev = ipsec->mdev;
203 	struct mlx5_flow_destination dest = {};
204 	struct mlx5_flow_act flow_act = {};
205 	struct mlx5_flow_handle *rule;
206 	struct mlx5_fc *flow_counter;
207 	struct mlx5_flow_spec *spec;
208 	int err;
209 
210 	spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
211 	if (!spec)
212 		return -ENOMEM;
213 
214 	flow_counter = mlx5_fc_create(mdev, true);
215 	if (IS_ERR(flow_counter)) {
216 		err = PTR_ERR(flow_counter);
217 		mlx5_core_err(mdev,
218 			      "Failed to add ipsec rx status drop rule counter, err=%d\n", err);
219 		goto err_cnt;
220 	}
221 	sa_entry->ipsec_rule.auth.fc = flow_counter;
222 
223 	flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT;
224 	flow_act.flags = FLOW_ACT_NO_APPEND;
225 	dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
226 	dest.counter = flow_counter;
227 	if (rx == ipsec->rx_esw)
228 		spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK;
229 
230 	MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters_2.ipsec_syndrome);
231 	MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.ipsec_syndrome, 1);
232 	spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
233 	ipsec_rx_rule_add_match_obj(sa_entry, rx, spec);
234 	rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
235 	if (IS_ERR(rule)) {
236 		err = PTR_ERR(rule);
237 		mlx5_core_err(mdev,
238 			      "Failed to add ipsec rx status drop rule, err=%d\n", err);
239 		goto err_rule;
240 	}
241 	sa_entry->ipsec_rule.auth.rule = rule;
242 
243 	flow_counter = mlx5_fc_create(mdev, true);
244 	if (IS_ERR(flow_counter)) {
245 		err = PTR_ERR(flow_counter);
246 		mlx5_core_err(mdev,
247 			      "Failed to add ipsec rx status drop rule counter, err=%d\n", err);
248 		goto err_cnt_2;
249 	}
250 	sa_entry->ipsec_rule.trailer.fc = flow_counter;
251 
252 	dest.counter = flow_counter;
253 	MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.ipsec_syndrome, 2);
254 	rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
255 	if (IS_ERR(rule)) {
256 		err = PTR_ERR(rule);
257 		mlx5_core_err(mdev,
258 			      "Failed to add ipsec rx status drop rule, err=%d\n", err);
259 		goto err_rule_2;
260 	}
261 	sa_entry->ipsec_rule.trailer.rule = rule;
262 
263 	kvfree(spec);
264 	return 0;
265 
266 err_rule_2:
267 	mlx5_fc_destroy(mdev, sa_entry->ipsec_rule.trailer.fc);
268 err_cnt_2:
269 	mlx5_del_flow_rules(sa_entry->ipsec_rule.auth.rule);
270 err_rule:
271 	mlx5_fc_destroy(mdev, sa_entry->ipsec_rule.auth.fc);
272 err_cnt:
273 	kvfree(spec);
274 	return err;
275 }
276 
rx_add_rule_drop_replay(struct mlx5e_ipsec_sa_entry * sa_entry,struct mlx5e_ipsec_rx * rx)277 static int rx_add_rule_drop_replay(struct mlx5e_ipsec_sa_entry *sa_entry, struct mlx5e_ipsec_rx *rx)
278 {
279 	struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
280 	struct mlx5_flow_table *ft = rx->ft.status;
281 	struct mlx5_core_dev *mdev = ipsec->mdev;
282 	struct mlx5_flow_destination dest = {};
283 	struct mlx5_flow_act flow_act = {};
284 	struct mlx5_flow_handle *rule;
285 	struct mlx5_fc *flow_counter;
286 	struct mlx5_flow_spec *spec;
287 	int err;
288 
289 	spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
290 	if (!spec)
291 		return -ENOMEM;
292 
293 	flow_counter = mlx5_fc_create(mdev, true);
294 	if (IS_ERR(flow_counter)) {
295 		err = PTR_ERR(flow_counter);
296 		mlx5_core_err(mdev,
297 			      "Failed to add ipsec rx status drop rule counter, err=%d\n", err);
298 		goto err_cnt;
299 	}
300 
301 	flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT;
302 	flow_act.flags = FLOW_ACT_NO_APPEND;
303 	dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
304 	dest.counter = flow_counter;
305 	if (rx == ipsec->rx_esw)
306 		spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK;
307 
308 	MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters_2.metadata_reg_c_4);
309 	MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.metadata_reg_c_4, 1);
310 	spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
311 	ipsec_rx_rule_add_match_obj(sa_entry, rx, spec);
312 	rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
313 	if (IS_ERR(rule)) {
314 		err = PTR_ERR(rule);
315 		mlx5_core_err(mdev,
316 			      "Failed to add ipsec rx status drop rule, err=%d\n", err);
317 		goto err_rule;
318 	}
319 
320 	sa_entry->ipsec_rule.replay.rule = rule;
321 	sa_entry->ipsec_rule.replay.fc = flow_counter;
322 
323 	kvfree(spec);
324 	return 0;
325 
326 err_rule:
327 	mlx5_fc_destroy(mdev, flow_counter);
328 err_cnt:
329 	kvfree(spec);
330 	return err;
331 }
332 
ipsec_rx_status_drop_all_create(struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_rx * rx)333 static int ipsec_rx_status_drop_all_create(struct mlx5e_ipsec *ipsec,
334 					   struct mlx5e_ipsec_rx *rx)
335 {
336 	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
337 	struct mlx5_flow_table *ft = rx->ft.status;
338 	struct mlx5_core_dev *mdev = ipsec->mdev;
339 	struct mlx5_flow_destination dest = {};
340 	struct mlx5_flow_act flow_act = {};
341 	struct mlx5_flow_handle *rule;
342 	struct mlx5_fc *flow_counter;
343 	struct mlx5_flow_spec *spec;
344 	struct mlx5_flow_group *g;
345 	u32 *flow_group_in;
346 	int err = 0;
347 
348 	flow_group_in = kvzalloc(inlen, GFP_KERNEL);
349 	spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
350 	if (!flow_group_in || !spec) {
351 		err = -ENOMEM;
352 		goto err_out;
353 	}
354 
355 	MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ft->max_fte - 1);
356 	MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ft->max_fte - 1);
357 	g = mlx5_create_flow_group(ft, flow_group_in);
358 	if (IS_ERR(g)) {
359 		err = PTR_ERR(g);
360 		mlx5_core_err(mdev,
361 			      "Failed to add ipsec rx status drop flow group, err=%d\n", err);
362 		goto err_out;
363 	}
364 
365 	flow_counter = mlx5_fc_create(mdev, false);
366 	if (IS_ERR(flow_counter)) {
367 		err = PTR_ERR(flow_counter);
368 		mlx5_core_err(mdev,
369 			      "Failed to add ipsec rx status drop rule counter, err=%d\n", err);
370 		goto err_cnt;
371 	}
372 
373 	flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT;
374 	dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
375 	dest.counter = flow_counter;
376 	if (rx == ipsec->rx_esw)
377 		spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK;
378 	rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
379 	if (IS_ERR(rule)) {
380 		err = PTR_ERR(rule);
381 		mlx5_core_err(mdev,
382 			      "Failed to add ipsec rx status drop rule, err=%d\n", err);
383 		goto err_rule;
384 	}
385 
386 	rx->status_checks.drop_all_group = g;
387 	rx->status_checks.all.rule = rule;
388 	rx->status_checks.all.fc = flow_counter;
389 
390 	kvfree(flow_group_in);
391 	kvfree(spec);
392 	return 0;
393 
394 err_rule:
395 	mlx5_fc_destroy(mdev, flow_counter);
396 err_cnt:
397 	mlx5_destroy_flow_group(g);
398 err_out:
399 	kvfree(flow_group_in);
400 	kvfree(spec);
401 	return err;
402 }
403 
ipsec_rx_status_pass_group_create(struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_rx * rx)404 static int ipsec_rx_status_pass_group_create(struct mlx5e_ipsec *ipsec,
405 					     struct mlx5e_ipsec_rx *rx)
406 {
407 	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
408 	struct mlx5_flow_table *ft = rx->ft.status;
409 	struct mlx5_flow_group *fg;
410 	void *match_criteria;
411 	u32 *flow_group_in;
412 	int err = 0;
413 
414 	flow_group_in = kvzalloc(inlen, GFP_KERNEL);
415 	if (!flow_group_in)
416 		return -ENOMEM;
417 
418 	MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
419 		 MLX5_MATCH_MISC_PARAMETERS_2);
420 	match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in,
421 				      match_criteria);
422 	MLX5_SET_TO_ONES(fte_match_param, match_criteria,
423 			 misc_parameters_2.ipsec_syndrome);
424 	MLX5_SET_TO_ONES(fte_match_param, match_criteria,
425 			 misc_parameters_2.metadata_reg_c_4);
426 
427 	MLX5_SET(create_flow_group_in, flow_group_in,
428 		 start_flow_index, ft->max_fte - 3);
429 	MLX5_SET(create_flow_group_in, flow_group_in,
430 		 end_flow_index, ft->max_fte - 2);
431 
432 	fg = mlx5_create_flow_group(ft, flow_group_in);
433 	if (IS_ERR(fg)) {
434 		err = PTR_ERR(fg);
435 		mlx5_core_warn(ipsec->mdev,
436 			       "Failed to create rx status pass flow group, err=%d\n",
437 			       err);
438 	}
439 	rx->status_checks.pass_group = fg;
440 
441 	kvfree(flow_group_in);
442 	return err;
443 }
444 
445 static struct mlx5_flow_handle *
ipsec_rx_status_pass_create(struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_rx * rx,struct mlx5_flow_destination * dest,u8 aso_ok)446 ipsec_rx_status_pass_create(struct mlx5e_ipsec *ipsec,
447 			    struct mlx5e_ipsec_rx *rx,
448 			    struct mlx5_flow_destination *dest,
449 			    u8 aso_ok)
450 {
451 	struct mlx5_flow_act flow_act = {};
452 	struct mlx5_flow_handle *rule;
453 	struct mlx5_flow_spec *spec;
454 	int err;
455 
456 	spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
457 	if (!spec)
458 		return ERR_PTR(-ENOMEM);
459 
460 	MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
461 			 misc_parameters_2.ipsec_syndrome);
462 	MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
463 			 misc_parameters_2.metadata_reg_c_4);
464 	MLX5_SET(fte_match_param, spec->match_value,
465 		 misc_parameters_2.ipsec_syndrome, 0);
466 	MLX5_SET(fte_match_param, spec->match_value,
467 		 misc_parameters_2.metadata_reg_c_4, aso_ok);
468 	if (rx == ipsec->rx_esw)
469 		spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK;
470 	spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
471 	flow_act.flags = FLOW_ACT_NO_APPEND | FLOW_ACT_IGNORE_FLOW_LEVEL;
472 	flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
473 			  MLX5_FLOW_CONTEXT_ACTION_COUNT;
474 	rule = mlx5_add_flow_rules(rx->ft.status, spec, &flow_act, dest, 2);
475 	if (IS_ERR(rule)) {
476 		err = PTR_ERR(rule);
477 		mlx5_core_warn(ipsec->mdev,
478 			       "Failed to add ipsec rx status pass rule, err=%d\n", err);
479 		goto err_rule;
480 	}
481 
482 	kvfree(spec);
483 	return rule;
484 
485 err_rule:
486 	kvfree(spec);
487 	return ERR_PTR(err);
488 }
489 
mlx5_ipsec_rx_status_destroy(struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_rx * rx)490 static void mlx5_ipsec_rx_status_destroy(struct mlx5e_ipsec *ipsec,
491 					 struct mlx5e_ipsec_rx *rx)
492 {
493 	ipsec_rx_status_pass_destroy(ipsec, rx);
494 	mlx5_destroy_flow_group(rx->status_checks.pass_group);
495 	ipsec_rx_status_drop_destroy(ipsec, rx);
496 }
497 
mlx5_ipsec_rx_status_create(struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_rx * rx,struct mlx5_flow_destination * dest)498 static int mlx5_ipsec_rx_status_create(struct mlx5e_ipsec *ipsec,
499 				       struct mlx5e_ipsec_rx *rx,
500 				       struct mlx5_flow_destination *dest)
501 {
502 	struct mlx5_flow_destination pol_dest[2];
503 	struct mlx5_flow_handle *rule;
504 	int err;
505 
506 	err = ipsec_rx_status_drop_all_create(ipsec, rx);
507 	if (err)
508 		return err;
509 
510 	err = ipsec_rx_status_pass_group_create(ipsec, rx);
511 	if (err)
512 		goto err_pass_group_create;
513 
514 	rule = ipsec_rx_status_pass_create(ipsec, rx, dest,
515 					   MLX5_IPSEC_ASO_SW_CRYPTO_OFFLOAD);
516 	if (IS_ERR(rule)) {
517 		err = PTR_ERR(rule);
518 		goto err_crypto_offload_pass_create;
519 	}
520 	rx->status_checks.crypto_offload_pass_rule = rule;
521 
522 	pol_dest[0].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
523 	pol_dest[0].ft = rx->ft.pol;
524 	pol_dest[1] = dest[1];
525 	rule = ipsec_rx_status_pass_create(ipsec, rx, pol_dest,
526 					   MLX5_IPSEC_ASO_OK);
527 	if (IS_ERR(rule)) {
528 		err = PTR_ERR(rule);
529 		goto err_packet_offload_pass_create;
530 	}
531 	rx->status_checks.packet_offload_pass_rule = rule;
532 
533 	return 0;
534 
535 err_packet_offload_pass_create:
536 	mlx5_del_flow_rules(rx->status_checks.crypto_offload_pass_rule);
537 err_crypto_offload_pass_create:
538 	mlx5_destroy_flow_group(rx->status_checks.pass_group);
539 err_pass_group_create:
540 	ipsec_rx_status_drop_destroy(ipsec, rx);
541 	return err;
542 }
543 
ipsec_miss_create(struct mlx5_core_dev * mdev,struct mlx5_flow_table * ft,struct mlx5e_ipsec_miss * miss,struct mlx5_flow_destination * dest)544 static int ipsec_miss_create(struct mlx5_core_dev *mdev,
545 			     struct mlx5_flow_table *ft,
546 			     struct mlx5e_ipsec_miss *miss,
547 			     struct mlx5_flow_destination *dest)
548 {
549 	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
550 	MLX5_DECLARE_FLOW_ACT(flow_act);
551 	struct mlx5_flow_spec *spec;
552 	u32 *flow_group_in;
553 	int err = 0;
554 
555 	flow_group_in = kvzalloc(inlen, GFP_KERNEL);
556 	spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
557 	if (!flow_group_in || !spec) {
558 		err = -ENOMEM;
559 		goto out;
560 	}
561 
562 	/* Create miss_group */
563 	MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ft->max_fte - 1);
564 	MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ft->max_fte - 1);
565 	miss->group = mlx5_create_flow_group(ft, flow_group_in);
566 	if (IS_ERR(miss->group)) {
567 		err = PTR_ERR(miss->group);
568 		mlx5_core_err(mdev, "fail to create IPsec miss_group err=%d\n",
569 			      err);
570 		goto out;
571 	}
572 
573 	/* Create miss rule */
574 	miss->rule = mlx5_add_flow_rules(ft, spec, &flow_act, dest, 1);
575 	if (IS_ERR(miss->rule)) {
576 		mlx5_destroy_flow_group(miss->group);
577 		err = PTR_ERR(miss->rule);
578 		mlx5_core_err(mdev, "fail to create IPsec miss_rule err=%d\n",
579 			      err);
580 		goto out;
581 	}
582 out:
583 	kvfree(flow_group_in);
584 	kvfree(spec);
585 	return err;
586 }
587 
ipsec_rx_update_default_dest(struct mlx5e_ipsec_rx * rx,struct mlx5_flow_destination * old_dest,struct mlx5_flow_destination * new_dest)588 static void ipsec_rx_update_default_dest(struct mlx5e_ipsec_rx *rx,
589 					 struct mlx5_flow_destination *old_dest,
590 					 struct mlx5_flow_destination *new_dest)
591 {
592 	mlx5_modify_rule_destination(rx->pol_miss_rule, new_dest, old_dest);
593 	mlx5_modify_rule_destination(rx->status_checks.crypto_offload_pass_rule,
594 				     new_dest, old_dest);
595 }
596 
handle_ipsec_rx_bringup(struct mlx5e_ipsec * ipsec,u32 family)597 static void handle_ipsec_rx_bringup(struct mlx5e_ipsec *ipsec, u32 family)
598 {
599 	struct mlx5e_ipsec_rx *rx = ipsec_rx(ipsec, family, XFRM_DEV_OFFLOAD_PACKET);
600 	struct mlx5_flow_namespace *ns = mlx5e_fs_get_ns(ipsec->fs, false);
601 	struct mlx5_flow_destination old_dest, new_dest;
602 
603 	old_dest = mlx5_ttc_get_default_dest(mlx5e_fs_get_ttc(ipsec->fs, false),
604 					     family2tt(family));
605 
606 	mlx5_ipsec_fs_roce_rx_create(ipsec->mdev, ipsec->roce, ns, &old_dest, family,
607 				     MLX5E_ACCEL_FS_ESP_FT_ROCE_LEVEL, MLX5E_NIC_PRIO);
608 
609 	new_dest.ft = mlx5_ipsec_fs_roce_ft_get(ipsec->roce, family);
610 	new_dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
611 	ipsec_rx_update_default_dest(rx, &old_dest, &new_dest);
612 }
613 
handle_ipsec_rx_cleanup(struct mlx5e_ipsec * ipsec,u32 family)614 static void handle_ipsec_rx_cleanup(struct mlx5e_ipsec *ipsec, u32 family)
615 {
616 	struct mlx5e_ipsec_rx *rx = ipsec_rx(ipsec, family, XFRM_DEV_OFFLOAD_PACKET);
617 	struct mlx5_flow_destination old_dest, new_dest;
618 
619 	old_dest.ft = mlx5_ipsec_fs_roce_ft_get(ipsec->roce, family);
620 	old_dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
621 	new_dest = mlx5_ttc_get_default_dest(mlx5e_fs_get_ttc(ipsec->fs, false),
622 					     family2tt(family));
623 	ipsec_rx_update_default_dest(rx, &old_dest, &new_dest);
624 
625 	mlx5_ipsec_fs_roce_rx_destroy(ipsec->roce, family, ipsec->mdev);
626 }
627 
ipsec_mpv_work_handler(struct work_struct * _work)628 static void ipsec_mpv_work_handler(struct work_struct *_work)
629 {
630 	struct mlx5e_ipsec_mpv_work *work = container_of(_work, struct mlx5e_ipsec_mpv_work, work);
631 	struct mlx5e_ipsec *ipsec = work->slave_priv->ipsec;
632 
633 	switch (work->event) {
634 	case MPV_DEVCOM_IPSEC_MASTER_UP:
635 		mutex_lock(&ipsec->tx->ft.mutex);
636 		if (ipsec->tx->ft.refcnt)
637 			mlx5_ipsec_fs_roce_tx_create(ipsec->mdev, ipsec->roce, ipsec->tx->ft.pol,
638 						     true);
639 		mutex_unlock(&ipsec->tx->ft.mutex);
640 
641 		mutex_lock(&ipsec->rx_ipv4->ft.mutex);
642 		if (ipsec->rx_ipv4->ft.refcnt)
643 			handle_ipsec_rx_bringup(ipsec, AF_INET);
644 		mutex_unlock(&ipsec->rx_ipv4->ft.mutex);
645 
646 		mutex_lock(&ipsec->rx_ipv6->ft.mutex);
647 		if (ipsec->rx_ipv6->ft.refcnt)
648 			handle_ipsec_rx_bringup(ipsec, AF_INET6);
649 		mutex_unlock(&ipsec->rx_ipv6->ft.mutex);
650 		break;
651 	case MPV_DEVCOM_IPSEC_MASTER_DOWN:
652 		mutex_lock(&ipsec->tx->ft.mutex);
653 		if (ipsec->tx->ft.refcnt)
654 			mlx5_ipsec_fs_roce_tx_destroy(ipsec->roce, ipsec->mdev);
655 		mutex_unlock(&ipsec->tx->ft.mutex);
656 
657 		mutex_lock(&ipsec->rx_ipv4->ft.mutex);
658 		if (ipsec->rx_ipv4->ft.refcnt)
659 			handle_ipsec_rx_cleanup(ipsec, AF_INET);
660 		mutex_unlock(&ipsec->rx_ipv4->ft.mutex);
661 
662 		mutex_lock(&ipsec->rx_ipv6->ft.mutex);
663 		if (ipsec->rx_ipv6->ft.refcnt)
664 			handle_ipsec_rx_cleanup(ipsec, AF_INET6);
665 		mutex_unlock(&ipsec->rx_ipv6->ft.mutex);
666 		break;
667 	}
668 
669 	complete(&work->master_priv->ipsec->comp);
670 }
671 
ipsec_rx_ft_disconnect(struct mlx5e_ipsec * ipsec,u32 family)672 static void ipsec_rx_ft_disconnect(struct mlx5e_ipsec *ipsec, u32 family)
673 {
674 	struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(ipsec->fs, false);
675 
676 	mlx5_ttc_fwd_default_dest(ttc, family2tt(family));
677 }
678 
ipsec_rx_policy_destroy(struct mlx5e_ipsec_rx * rx)679 static void ipsec_rx_policy_destroy(struct mlx5e_ipsec_rx *rx)
680 {
681 	if (rx->chains) {
682 		ipsec_chains_destroy(rx->chains);
683 	} else {
684 		mlx5_del_flow_rules(rx->pol.rule);
685 		mlx5_destroy_flow_group(rx->pol.group);
686 		mlx5_destroy_flow_table(rx->ft.pol);
687 	}
688 
689 	if (rx->pol_miss_rule) {
690 		mlx5_del_flow_rules(rx->pol_miss_rule);
691 		mlx5_destroy_flow_table(rx->pol_miss_ft);
692 	}
693 }
694 
ipsec_rx_sa_selector_destroy(struct mlx5_core_dev * mdev,struct mlx5e_ipsec_rx * rx)695 static void ipsec_rx_sa_selector_destroy(struct mlx5_core_dev *mdev,
696 					 struct mlx5e_ipsec_rx *rx)
697 {
698 	mlx5_del_flow_rules(rx->sa_sel.rule);
699 	mlx5_fc_destroy(mdev, rx->sa_sel.fc);
700 	rx->sa_sel.fc = NULL;
701 	mlx5_destroy_flow_group(rx->sa_sel.group);
702 	mlx5_destroy_flow_table(rx->ft.sa_sel);
703 }
704 
rx_destroy(struct mlx5_core_dev * mdev,struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_rx * rx,u32 family)705 static void rx_destroy(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
706 		       struct mlx5e_ipsec_rx *rx, u32 family)
707 {
708 	/* disconnect */
709 	if (rx != ipsec->rx_esw)
710 		ipsec_rx_ft_disconnect(ipsec, family);
711 
712 	mlx5_del_flow_rules(rx->sa.rule);
713 	mlx5_destroy_flow_group(rx->sa.group);
714 	mlx5_destroy_flow_table(rx->ft.sa);
715 	if (rx->allow_tunnel_mode)
716 		mlx5_eswitch_unblock_encap(mdev);
717 	mlx5_ipsec_rx_status_destroy(ipsec, rx);
718 	mlx5_destroy_flow_table(rx->ft.status);
719 
720 	ipsec_rx_sa_selector_destroy(mdev, rx);
721 
722 	ipsec_rx_policy_destroy(rx);
723 
724 	mlx5_ipsec_fs_roce_rx_destroy(ipsec->roce, family, mdev);
725 
726 #ifdef CONFIG_MLX5_ESWITCH
727 	if (rx == ipsec->rx_esw)
728 		mlx5_chains_put_table(esw_chains(ipsec->mdev->priv.eswitch),
729 				      0, 1, 0);
730 #endif
731 }
732 
ipsec_rx_create_attr_set(struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_rx * rx,u32 family,struct mlx5e_ipsec_rx_create_attr * attr)733 static void ipsec_rx_create_attr_set(struct mlx5e_ipsec *ipsec,
734 				     struct mlx5e_ipsec_rx *rx,
735 				     u32 family,
736 				     struct mlx5e_ipsec_rx_create_attr *attr)
737 {
738 	if (rx == ipsec->rx_esw) {
739 		/* For packet offload in switchdev mode, RX & TX use FDB namespace */
740 		attr->ns = ipsec->tx_esw->ns;
741 		mlx5_esw_ipsec_rx_create_attr_set(ipsec, attr);
742 		return;
743 	}
744 
745 	attr->ns = mlx5e_fs_get_ns(ipsec->fs, false);
746 	attr->ttc = mlx5e_fs_get_ttc(ipsec->fs, false);
747 	attr->family = family;
748 	attr->prio = MLX5E_NIC_PRIO;
749 	attr->pol_level = MLX5E_ACCEL_FS_POL_FT_LEVEL;
750 	attr->sa_level = MLX5E_ACCEL_FS_ESP_FT_LEVEL;
751 	attr->status_level = MLX5E_ACCEL_FS_ESP_FT_ERR_LEVEL;
752 	attr->chains_ns = MLX5_FLOW_NAMESPACE_KERNEL;
753 }
754 
ipsec_rx_status_pass_dest_get(struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_rx * rx,struct mlx5e_ipsec_rx_create_attr * attr,struct mlx5_flow_destination * dest)755 static int ipsec_rx_status_pass_dest_get(struct mlx5e_ipsec *ipsec,
756 					 struct mlx5e_ipsec_rx *rx,
757 					 struct mlx5e_ipsec_rx_create_attr *attr,
758 					 struct mlx5_flow_destination *dest)
759 {
760 	struct mlx5_flow_table *ft;
761 	int err;
762 
763 	if (rx == ipsec->rx_esw)
764 		return mlx5_esw_ipsec_rx_status_pass_dest_get(ipsec, dest);
765 
766 	*dest = mlx5_ttc_get_default_dest(attr->ttc, family2tt(attr->family));
767 	err = mlx5_ipsec_fs_roce_rx_create(ipsec->mdev, ipsec->roce, attr->ns, dest,
768 					   attr->family, MLX5E_ACCEL_FS_ESP_FT_ROCE_LEVEL,
769 					   attr->prio);
770 	if (err)
771 		return err;
772 
773 	ft = mlx5_ipsec_fs_roce_ft_get(ipsec->roce, attr->family);
774 	if (ft) {
775 		dest->type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
776 		dest->ft = ft;
777 	}
778 
779 	return 0;
780 }
781 
ipsec_rx_sa_miss_dest_get(struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_rx * rx,struct mlx5e_ipsec_rx_create_attr * attr,struct mlx5_flow_destination * dest,struct mlx5_flow_destination * miss_dest)782 static void ipsec_rx_sa_miss_dest_get(struct mlx5e_ipsec *ipsec,
783 				      struct mlx5e_ipsec_rx *rx,
784 				      struct mlx5e_ipsec_rx_create_attr *attr,
785 				      struct mlx5_flow_destination *dest,
786 				      struct mlx5_flow_destination *miss_dest)
787 {
788 	if (rx == ipsec->rx_esw)
789 		*miss_dest = *dest;
790 	else
791 		*miss_dest =
792 			mlx5_ttc_get_default_dest(attr->ttc,
793 						  family2tt(attr->family));
794 }
795 
ipsec_rx_default_dest_get(struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_rx * rx,struct mlx5_flow_destination * dest)796 static void ipsec_rx_default_dest_get(struct mlx5e_ipsec *ipsec,
797 				      struct mlx5e_ipsec_rx *rx,
798 				      struct mlx5_flow_destination *dest)
799 {
800 	dest->type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
801 	dest->ft = rx->pol_miss_ft;
802 }
803 
ipsec_rx_ft_connect(struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_rx * rx,struct mlx5e_ipsec_rx_create_attr * attr)804 static void ipsec_rx_ft_connect(struct mlx5e_ipsec *ipsec,
805 				struct mlx5e_ipsec_rx *rx,
806 				struct mlx5e_ipsec_rx_create_attr *attr)
807 {
808 	struct mlx5_flow_destination dest = {};
809 
810 	dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
811 	dest.ft = rx->ft.sa;
812 	mlx5_ttc_fwd_dest(attr->ttc, family2tt(attr->family), &dest);
813 }
814 
ipsec_rx_chains_create_miss(struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_rx * rx,struct mlx5e_ipsec_rx_create_attr * attr,struct mlx5_flow_destination * dest)815 static int ipsec_rx_chains_create_miss(struct mlx5e_ipsec *ipsec,
816 				       struct mlx5e_ipsec_rx *rx,
817 				       struct mlx5e_ipsec_rx_create_attr *attr,
818 				       struct mlx5_flow_destination *dest)
819 {
820 	struct mlx5_flow_table_attr ft_attr = {};
821 	MLX5_DECLARE_FLOW_ACT(flow_act);
822 	struct mlx5_flow_handle *rule;
823 	struct mlx5_flow_table *ft;
824 	int err;
825 
826 	if (rx == ipsec->rx_esw) {
827 		/* No need to create miss table for switchdev mode,
828 		 * just set it to the root chain table.
829 		 */
830 		rx->pol_miss_ft = dest->ft;
831 		return 0;
832 	}
833 
834 	ft_attr.max_fte = 1;
835 	ft_attr.autogroup.max_num_groups = 1;
836 	ft_attr.level = attr->pol_level;
837 	ft_attr.prio = attr->prio;
838 
839 	ft = mlx5_create_auto_grouped_flow_table(attr->ns, &ft_attr);
840 	if (IS_ERR(ft))
841 		return PTR_ERR(ft);
842 
843 	rule = mlx5_add_flow_rules(ft, NULL, &flow_act, dest, 1);
844 	if (IS_ERR(rule)) {
845 		err = PTR_ERR(rule);
846 		goto err_rule;
847 	}
848 
849 	rx->pol_miss_ft = ft;
850 	rx->pol_miss_rule = rule;
851 
852 	return 0;
853 
854 err_rule:
855 	mlx5_destroy_flow_table(ft);
856 	return err;
857 }
858 
ipsec_rx_policy_create(struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_rx * rx,struct mlx5e_ipsec_rx_create_attr * attr,struct mlx5_flow_destination * dest)859 static int ipsec_rx_policy_create(struct mlx5e_ipsec *ipsec,
860 				  struct mlx5e_ipsec_rx *rx,
861 				  struct mlx5e_ipsec_rx_create_attr *attr,
862 				  struct mlx5_flow_destination *dest)
863 {
864 	struct mlx5_flow_destination default_dest;
865 	struct mlx5_core_dev *mdev = ipsec->mdev;
866 	struct mlx5_flow_table *ft;
867 	int err;
868 
869 	err = ipsec_rx_chains_create_miss(ipsec, rx, attr, dest);
870 	if (err)
871 		return err;
872 
873 	ipsec_rx_default_dest_get(ipsec, rx, &default_dest);
874 
875 	if (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_PRIO) {
876 		rx->chains = ipsec_chains_create(mdev,
877 						 default_dest.ft,
878 						 attr->chains_ns,
879 						 attr->prio,
880 						 attr->sa_level,
881 						 &rx->ft.pol);
882 		if (IS_ERR(rx->chains))
883 			err = PTR_ERR(rx->chains);
884 	} else {
885 		ft = ipsec_ft_create(attr->ns, attr->pol_level,
886 				     attr->prio, 1, 2, 0);
887 		if (IS_ERR(ft)) {
888 			err = PTR_ERR(ft);
889 			goto err_out;
890 		}
891 		rx->ft.pol = ft;
892 
893 		err = ipsec_miss_create(mdev, rx->ft.pol, &rx->pol,
894 					&default_dest);
895 		if (err)
896 			mlx5_destroy_flow_table(rx->ft.pol);
897 	}
898 
899 	if (!err)
900 		return 0;
901 
902 err_out:
903 	if (rx->pol_miss_rule) {
904 		mlx5_del_flow_rules(rx->pol_miss_rule);
905 		mlx5_destroy_flow_table(rx->pol_miss_ft);
906 	}
907 	return err;
908 }
909 
ipsec_rx_sa_selector_create(struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_rx * rx,struct mlx5e_ipsec_rx_create_attr * attr)910 static int ipsec_rx_sa_selector_create(struct mlx5e_ipsec *ipsec,
911 				       struct mlx5e_ipsec_rx *rx,
912 				       struct mlx5e_ipsec_rx_create_attr *attr)
913 {
914 	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
915 	struct mlx5_core_dev *mdev = ipsec->mdev;
916 	struct mlx5_flow_act flow_act = {};
917 	struct mlx5_flow_destination dest;
918 	struct mlx5_flow_handle *rule;
919 	struct mlx5_flow_table *ft;
920 	struct mlx5_flow_group *fg;
921 	u32 *flow_group_in;
922 	struct mlx5_fc *fc;
923 	int err;
924 
925 	flow_group_in = kvzalloc(inlen, GFP_KERNEL);
926 	if (!flow_group_in)
927 		return -ENOMEM;
928 
929 	ft = ipsec_ft_create(attr->ns, attr->status_level, attr->prio, 1,
930 			     MLX5_IPSEC_FS_SA_SELECTOR_MAX_NUM_GROUPS, 0);
931 	if (IS_ERR(ft)) {
932 		err = PTR_ERR(ft);
933 		mlx5_core_err(mdev, "Failed to create RX SA selector flow table, err=%d\n",
934 			      err);
935 		goto err_ft;
936 	}
937 
938 	MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index,
939 		 ft->max_fte - 1);
940 	MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
941 		 ft->max_fte - 1);
942 	fg = mlx5_create_flow_group(ft, flow_group_in);
943 	if (IS_ERR(fg)) {
944 		err = PTR_ERR(fg);
945 		mlx5_core_err(mdev, "Failed to create RX SA selector miss group, err=%d\n",
946 			      err);
947 		goto err_fg;
948 	}
949 
950 	fc = mlx5_fc_create(mdev, false);
951 	if (IS_ERR(fc)) {
952 		err = PTR_ERR(fc);
953 		mlx5_core_err(mdev,
954 			      "Failed to create ipsec RX SA selector miss rule counter, err=%d\n",
955 			      err);
956 		goto err_cnt;
957 	}
958 
959 	dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
960 	dest.counter = fc;
961 	flow_act.action =
962 		MLX5_FLOW_CONTEXT_ACTION_COUNT | MLX5_FLOW_CONTEXT_ACTION_DROP;
963 
964 	rule = mlx5_add_flow_rules(ft, NULL, &flow_act, &dest, 1);
965 	if (IS_ERR(rule)) {
966 		err = PTR_ERR(rule);
967 		mlx5_core_err(mdev, "Failed to create RX SA selector miss drop rule, err=%d\n",
968 			      err);
969 		goto err_rule;
970 	}
971 
972 	rx->ft.sa_sel = ft;
973 	rx->sa_sel.group = fg;
974 	rx->sa_sel.fc = fc;
975 	rx->sa_sel.rule = rule;
976 
977 	kvfree(flow_group_in);
978 
979 	return 0;
980 
981 err_rule:
982 	mlx5_fc_destroy(mdev, fc);
983 err_cnt:
984 	mlx5_destroy_flow_group(fg);
985 err_fg:
986 	mlx5_destroy_flow_table(ft);
987 err_ft:
988 	kvfree(flow_group_in);
989 	return err;
990 }
991 
992 /* The decryption processing is as follows:
993  *
994  *   +----------+                         +-------------+
995  *   |          |                         |             |
996  *   |  Kernel  <--------------+----------+ policy miss <------------+
997  *   |          |              ^          |             |            ^
998  *   +----^-----+              |          +-------------+            |
999  *        |                  crypto                                  |
1000  *      miss                offload ok                         allow/default
1001  *        ^                    ^                                     ^
1002  *        |                    |                  packet             |
1003  *   +----+---------+     +----+-------------+   offload ok   +------+---+
1004  *   |              |     |                  |   (no UPSPEC)  |          |
1005  *   | SA (decrypt) +----->      status      +--->------->----+  policy  |
1006  *   |              |     |                  |                |          |
1007  *   +--------------+     ++---------+-------+                +-^----+---+
1008  *                         |         |                          |    |
1009  *                         v        packet             +-->->---+    v
1010  *                         |       offload ok        match           |
1011  *                       fails    (with UPSPEC)        |           block
1012  *                         |         |   +-------------+-+           |
1013  *                         v         v   |               |  miss     v
1014  *                        drop       +--->    SA sel     +--------->drop
1015  *                                       |               |
1016  *                                       +---------------+
1017  */
1018 
rx_create(struct mlx5_core_dev * mdev,struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_rx * rx,u32 family)1019 static int rx_create(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
1020 		     struct mlx5e_ipsec_rx *rx, u32 family)
1021 {
1022 	struct mlx5_flow_destination dest[2], miss_dest;
1023 	struct mlx5e_ipsec_rx_create_attr attr;
1024 	struct mlx5_flow_table *ft;
1025 	u32 flags = 0;
1026 	int err;
1027 
1028 	ipsec_rx_create_attr_set(ipsec, rx, family, &attr);
1029 
1030 	err = ipsec_rx_status_pass_dest_get(ipsec, rx, &attr, &dest[0]);
1031 	if (err)
1032 		return err;
1033 
1034 	ft = ipsec_ft_create(attr.ns, attr.status_level, attr.prio, 3, 4, 0);
1035 	if (IS_ERR(ft)) {
1036 		err = PTR_ERR(ft);
1037 		goto err_fs_ft_status;
1038 	}
1039 	rx->ft.status = ft;
1040 
1041 	err = ipsec_rx_sa_selector_create(ipsec, rx, &attr);
1042 	if (err)
1043 		goto err_fs_ft_sa_sel;
1044 
1045 	/* Create FT */
1046 	if (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_TUNNEL)
1047 		rx->allow_tunnel_mode = mlx5_eswitch_block_encap(mdev);
1048 	if (rx->allow_tunnel_mode)
1049 		flags = MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
1050 	ft = ipsec_ft_create(attr.ns, attr.sa_level, attr.prio, 1, 2, flags);
1051 	if (IS_ERR(ft)) {
1052 		err = PTR_ERR(ft);
1053 		goto err_fs_ft;
1054 	}
1055 	rx->ft.sa = ft;
1056 
1057 	ipsec_rx_sa_miss_dest_get(ipsec, rx, &attr, &dest[0], &miss_dest);
1058 	err = ipsec_miss_create(mdev, rx->ft.sa, &rx->sa, &miss_dest);
1059 	if (err)
1060 		goto err_fs;
1061 
1062 	err = ipsec_rx_policy_create(ipsec, rx, &attr, &dest[0]);
1063 	if (err)
1064 		goto err_policy;
1065 
1066 	dest[1].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
1067 	dest[1].counter = rx->fc->cnt;
1068 	err = mlx5_ipsec_rx_status_create(ipsec, rx, dest);
1069 	if (err)
1070 		goto err_add;
1071 
1072 	/* connect */
1073 	if (rx != ipsec->rx_esw)
1074 		ipsec_rx_ft_connect(ipsec, rx, &attr);
1075 	return 0;
1076 
1077 err_add:
1078 	ipsec_rx_policy_destroy(rx);
1079 err_policy:
1080 	mlx5_del_flow_rules(rx->sa.rule);
1081 	mlx5_destroy_flow_group(rx->sa.group);
1082 err_fs:
1083 	mlx5_destroy_flow_table(rx->ft.sa);
1084 	if (rx->allow_tunnel_mode)
1085 		mlx5_eswitch_unblock_encap(mdev);
1086 err_fs_ft:
1087 	ipsec_rx_sa_selector_destroy(mdev, rx);
1088 err_fs_ft_sa_sel:
1089 	mlx5_destroy_flow_table(rx->ft.status);
1090 err_fs_ft_status:
1091 	mlx5_ipsec_fs_roce_rx_destroy(ipsec->roce, family, mdev);
1092 	return err;
1093 }
1094 
rx_get(struct mlx5_core_dev * mdev,struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_rx * rx,u32 family)1095 static int rx_get(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
1096 		  struct mlx5e_ipsec_rx *rx, u32 family)
1097 {
1098 	int err;
1099 
1100 	if (rx->ft.refcnt)
1101 		goto skip;
1102 
1103 	err = mlx5_eswitch_block_mode(mdev);
1104 	if (err)
1105 		return err;
1106 
1107 	err = rx_create(mdev, ipsec, rx, family);
1108 	if (err) {
1109 		mlx5_eswitch_unblock_mode(mdev);
1110 		return err;
1111 	}
1112 
1113 skip:
1114 	rx->ft.refcnt++;
1115 	return 0;
1116 }
1117 
rx_put(struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_rx * rx,u32 family)1118 static void rx_put(struct mlx5e_ipsec *ipsec, struct mlx5e_ipsec_rx *rx,
1119 		   u32 family)
1120 {
1121 	if (--rx->ft.refcnt)
1122 		return;
1123 
1124 	rx_destroy(ipsec->mdev, ipsec, rx, family);
1125 	mlx5_eswitch_unblock_mode(ipsec->mdev);
1126 }
1127 
rx_ft_get(struct mlx5_core_dev * mdev,struct mlx5e_ipsec * ipsec,u32 family,int type)1128 static struct mlx5e_ipsec_rx *rx_ft_get(struct mlx5_core_dev *mdev,
1129 					struct mlx5e_ipsec *ipsec, u32 family,
1130 					int type)
1131 {
1132 	struct mlx5e_ipsec_rx *rx = ipsec_rx(ipsec, family, type);
1133 	int err;
1134 
1135 	mutex_lock(&rx->ft.mutex);
1136 	err = rx_get(mdev, ipsec, rx, family);
1137 	mutex_unlock(&rx->ft.mutex);
1138 	if (err)
1139 		return ERR_PTR(err);
1140 
1141 	return rx;
1142 }
1143 
rx_ft_get_policy(struct mlx5_core_dev * mdev,struct mlx5e_ipsec * ipsec,u32 family,u32 prio,int type)1144 static struct mlx5_flow_table *rx_ft_get_policy(struct mlx5_core_dev *mdev,
1145 						struct mlx5e_ipsec *ipsec,
1146 						u32 family, u32 prio, int type)
1147 {
1148 	struct mlx5e_ipsec_rx *rx = ipsec_rx(ipsec, family, type);
1149 	struct mlx5_flow_table *ft;
1150 	int err;
1151 
1152 	mutex_lock(&rx->ft.mutex);
1153 	err = rx_get(mdev, ipsec, rx, family);
1154 	if (err)
1155 		goto err_get;
1156 
1157 	ft = rx->chains ? ipsec_chains_get_table(rx->chains, prio) : rx->ft.pol;
1158 	if (IS_ERR(ft)) {
1159 		err = PTR_ERR(ft);
1160 		goto err_get_ft;
1161 	}
1162 
1163 	mutex_unlock(&rx->ft.mutex);
1164 	return ft;
1165 
1166 err_get_ft:
1167 	rx_put(ipsec, rx, family);
1168 err_get:
1169 	mutex_unlock(&rx->ft.mutex);
1170 	return ERR_PTR(err);
1171 }
1172 
rx_ft_put(struct mlx5e_ipsec * ipsec,u32 family,int type)1173 static void rx_ft_put(struct mlx5e_ipsec *ipsec, u32 family, int type)
1174 {
1175 	struct mlx5e_ipsec_rx *rx = ipsec_rx(ipsec, family, type);
1176 
1177 	mutex_lock(&rx->ft.mutex);
1178 	rx_put(ipsec, rx, family);
1179 	mutex_unlock(&rx->ft.mutex);
1180 }
1181 
rx_ft_put_policy(struct mlx5e_ipsec * ipsec,u32 family,u32 prio,int type)1182 static void rx_ft_put_policy(struct mlx5e_ipsec *ipsec, u32 family, u32 prio, int type)
1183 {
1184 	struct mlx5e_ipsec_rx *rx = ipsec_rx(ipsec, family, type);
1185 
1186 	mutex_lock(&rx->ft.mutex);
1187 	if (rx->chains)
1188 		ipsec_chains_put_table(rx->chains, prio);
1189 
1190 	rx_put(ipsec, rx, family);
1191 	mutex_unlock(&rx->ft.mutex);
1192 }
1193 
ipsec_counter_rule_tx(struct mlx5_core_dev * mdev,struct mlx5e_ipsec_tx * tx)1194 static int ipsec_counter_rule_tx(struct mlx5_core_dev *mdev, struct mlx5e_ipsec_tx *tx)
1195 {
1196 	struct mlx5_flow_destination dest = {};
1197 	struct mlx5_flow_act flow_act = {};
1198 	struct mlx5_flow_handle *fte;
1199 	struct mlx5_flow_spec *spec;
1200 	int err;
1201 
1202 	spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
1203 	if (!spec)
1204 		return -ENOMEM;
1205 
1206 	/* create fte */
1207 	flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW |
1208 			  MLX5_FLOW_CONTEXT_ACTION_COUNT;
1209 	dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
1210 	dest.counter = tx->fc->cnt;
1211 	fte = mlx5_add_flow_rules(tx->ft.status, spec, &flow_act, &dest, 1);
1212 	if (IS_ERR(fte)) {
1213 		err = PTR_ERR(fte);
1214 		mlx5_core_err(mdev, "Fail to add ipsec tx counter rule err=%d\n", err);
1215 		goto err_rule;
1216 	}
1217 
1218 	kvfree(spec);
1219 	tx->status.rule = fte;
1220 	return 0;
1221 
1222 err_rule:
1223 	kvfree(spec);
1224 	return err;
1225 }
1226 
1227 /* IPsec TX flow steering */
tx_destroy(struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_tx * tx,struct mlx5_ipsec_fs * roce)1228 static void tx_destroy(struct mlx5e_ipsec *ipsec, struct mlx5e_ipsec_tx *tx,
1229 		       struct mlx5_ipsec_fs *roce)
1230 {
1231 	mlx5_ipsec_fs_roce_tx_destroy(roce, ipsec->mdev);
1232 	if (tx->chains) {
1233 		ipsec_chains_destroy(tx->chains);
1234 	} else {
1235 		mlx5_del_flow_rules(tx->pol.rule);
1236 		mlx5_destroy_flow_group(tx->pol.group);
1237 		mlx5_destroy_flow_table(tx->ft.pol);
1238 	}
1239 
1240 	if (tx == ipsec->tx_esw) {
1241 		mlx5_del_flow_rules(tx->sa.rule);
1242 		mlx5_destroy_flow_group(tx->sa.group);
1243 	}
1244 	mlx5_destroy_flow_table(tx->ft.sa);
1245 	if (tx->allow_tunnel_mode)
1246 		mlx5_eswitch_unblock_encap(ipsec->mdev);
1247 	mlx5_del_flow_rules(tx->status.rule);
1248 	mlx5_destroy_flow_table(tx->ft.status);
1249 }
1250 
ipsec_tx_create_attr_set(struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_tx * tx,struct mlx5e_ipsec_tx_create_attr * attr)1251 static void ipsec_tx_create_attr_set(struct mlx5e_ipsec *ipsec,
1252 				     struct mlx5e_ipsec_tx *tx,
1253 				     struct mlx5e_ipsec_tx_create_attr *attr)
1254 {
1255 	if (tx == ipsec->tx_esw) {
1256 		mlx5_esw_ipsec_tx_create_attr_set(ipsec, attr);
1257 		return;
1258 	}
1259 
1260 	attr->prio = 0;
1261 	attr->pol_level = 0;
1262 	attr->sa_level = 1;
1263 	attr->cnt_level = 2;
1264 	attr->chains_ns = MLX5_FLOW_NAMESPACE_EGRESS_IPSEC;
1265 }
1266 
tx_create(struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_tx * tx,struct mlx5_ipsec_fs * roce)1267 static int tx_create(struct mlx5e_ipsec *ipsec, struct mlx5e_ipsec_tx *tx,
1268 		     struct mlx5_ipsec_fs *roce)
1269 {
1270 	struct mlx5_core_dev *mdev = ipsec->mdev;
1271 	struct mlx5e_ipsec_tx_create_attr attr;
1272 	struct mlx5_flow_destination dest = {};
1273 	struct mlx5_flow_table *ft;
1274 	u32 flags = 0;
1275 	int err;
1276 
1277 	ipsec_tx_create_attr_set(ipsec, tx, &attr);
1278 	ft = ipsec_ft_create(tx->ns, attr.cnt_level, attr.prio, 1, 1, 0);
1279 	if (IS_ERR(ft))
1280 		return PTR_ERR(ft);
1281 	tx->ft.status = ft;
1282 
1283 	err = ipsec_counter_rule_tx(mdev, tx);
1284 	if (err)
1285 		goto err_status_rule;
1286 
1287 	if (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_TUNNEL)
1288 		tx->allow_tunnel_mode = mlx5_eswitch_block_encap(mdev);
1289 	if (tx->allow_tunnel_mode)
1290 		flags = MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
1291 	ft = ipsec_ft_create(tx->ns, attr.sa_level, attr.prio, 1, 4, flags);
1292 	if (IS_ERR(ft)) {
1293 		err = PTR_ERR(ft);
1294 		goto err_sa_ft;
1295 	}
1296 	tx->ft.sa = ft;
1297 
1298 	if (tx == ipsec->tx_esw) {
1299 		dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
1300 		dest.vport.num = MLX5_VPORT_UPLINK;
1301 		err = ipsec_miss_create(mdev, tx->ft.sa, &tx->sa, &dest);
1302 		if (err)
1303 			goto err_sa_miss;
1304 		memset(&dest, 0, sizeof(dest));
1305 	}
1306 
1307 	if (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_PRIO) {
1308 		tx->chains = ipsec_chains_create(
1309 			mdev, tx->ft.sa, attr.chains_ns, attr.prio, attr.pol_level,
1310 			&tx->ft.pol);
1311 		if (IS_ERR(tx->chains)) {
1312 			err = PTR_ERR(tx->chains);
1313 			goto err_pol_ft;
1314 		}
1315 
1316 		goto connect_roce;
1317 	}
1318 
1319 	ft = ipsec_ft_create(tx->ns, attr.pol_level, attr.prio, 1, 2, 0);
1320 	if (IS_ERR(ft)) {
1321 		err = PTR_ERR(ft);
1322 		goto err_pol_ft;
1323 	}
1324 	tx->ft.pol = ft;
1325 	dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
1326 	dest.ft = tx->ft.sa;
1327 	err = ipsec_miss_create(mdev, tx->ft.pol, &tx->pol, &dest);
1328 	if (err) {
1329 		mlx5_destroy_flow_table(tx->ft.pol);
1330 		goto err_pol_ft;
1331 	}
1332 
1333 connect_roce:
1334 	err = mlx5_ipsec_fs_roce_tx_create(mdev, roce, tx->ft.pol, false);
1335 	if (err)
1336 		goto err_roce;
1337 	return 0;
1338 
1339 err_roce:
1340 	if (tx->chains) {
1341 		ipsec_chains_destroy(tx->chains);
1342 	} else {
1343 		mlx5_del_flow_rules(tx->pol.rule);
1344 		mlx5_destroy_flow_group(tx->pol.group);
1345 		mlx5_destroy_flow_table(tx->ft.pol);
1346 	}
1347 err_pol_ft:
1348 	if (tx == ipsec->tx_esw) {
1349 		mlx5_del_flow_rules(tx->sa.rule);
1350 		mlx5_destroy_flow_group(tx->sa.group);
1351 	}
1352 err_sa_miss:
1353 	mlx5_destroy_flow_table(tx->ft.sa);
1354 err_sa_ft:
1355 	if (tx->allow_tunnel_mode)
1356 		mlx5_eswitch_unblock_encap(mdev);
1357 	mlx5_del_flow_rules(tx->status.rule);
1358 err_status_rule:
1359 	mlx5_destroy_flow_table(tx->ft.status);
1360 	return err;
1361 }
1362 
ipsec_esw_tx_ft_policy_set(struct mlx5_core_dev * mdev,struct mlx5_flow_table * ft)1363 static void ipsec_esw_tx_ft_policy_set(struct mlx5_core_dev *mdev,
1364 				       struct mlx5_flow_table *ft)
1365 {
1366 #ifdef CONFIG_MLX5_ESWITCH
1367 	struct mlx5_eswitch *esw = mdev->priv.eswitch;
1368 	struct mlx5e_rep_priv *uplink_rpriv;
1369 	struct mlx5e_priv *priv;
1370 
1371 	esw->offloads.ft_ipsec_tx_pol = ft;
1372 	uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
1373 	priv = netdev_priv(uplink_rpriv->netdev);
1374 	if (!priv->channels.num)
1375 		return;
1376 
1377 	mlx5e_rep_deactivate_channels(priv);
1378 	mlx5e_rep_activate_channels(priv);
1379 #endif
1380 }
1381 
tx_get(struct mlx5_core_dev * mdev,struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_tx * tx)1382 static int tx_get(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
1383 		  struct mlx5e_ipsec_tx *tx)
1384 {
1385 	int err;
1386 
1387 	if (tx->ft.refcnt)
1388 		goto skip;
1389 
1390 	err = mlx5_eswitch_block_mode(mdev);
1391 	if (err)
1392 		return err;
1393 
1394 	err = tx_create(ipsec, tx, ipsec->roce);
1395 	if (err) {
1396 		mlx5_eswitch_unblock_mode(mdev);
1397 		return err;
1398 	}
1399 
1400 	if (tx == ipsec->tx_esw)
1401 		ipsec_esw_tx_ft_policy_set(mdev, tx->ft.pol);
1402 
1403 skip:
1404 	tx->ft.refcnt++;
1405 	return 0;
1406 }
1407 
tx_put(struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_tx * tx)1408 static void tx_put(struct mlx5e_ipsec *ipsec, struct mlx5e_ipsec_tx *tx)
1409 {
1410 	if (--tx->ft.refcnt)
1411 		return;
1412 
1413 	if (tx == ipsec->tx_esw) {
1414 		mlx5_esw_ipsec_restore_dest_uplink(ipsec->mdev);
1415 		ipsec_esw_tx_ft_policy_set(ipsec->mdev, NULL);
1416 	}
1417 
1418 	tx_destroy(ipsec, tx, ipsec->roce);
1419 	mlx5_eswitch_unblock_mode(ipsec->mdev);
1420 }
1421 
tx_ft_get_policy(struct mlx5_core_dev * mdev,struct mlx5e_ipsec * ipsec,u32 prio,int type)1422 static struct mlx5_flow_table *tx_ft_get_policy(struct mlx5_core_dev *mdev,
1423 						struct mlx5e_ipsec *ipsec,
1424 						u32 prio, int type)
1425 {
1426 	struct mlx5e_ipsec_tx *tx = ipsec_tx(ipsec, type);
1427 	struct mlx5_flow_table *ft;
1428 	int err;
1429 
1430 	mutex_lock(&tx->ft.mutex);
1431 	err = tx_get(mdev, ipsec, tx);
1432 	if (err)
1433 		goto err_get;
1434 
1435 	ft = tx->chains ? ipsec_chains_get_table(tx->chains, prio) : tx->ft.pol;
1436 	if (IS_ERR(ft)) {
1437 		err = PTR_ERR(ft);
1438 		goto err_get_ft;
1439 	}
1440 
1441 	mutex_unlock(&tx->ft.mutex);
1442 	return ft;
1443 
1444 err_get_ft:
1445 	tx_put(ipsec, tx);
1446 err_get:
1447 	mutex_unlock(&tx->ft.mutex);
1448 	return ERR_PTR(err);
1449 }
1450 
tx_ft_get(struct mlx5_core_dev * mdev,struct mlx5e_ipsec * ipsec,int type)1451 static struct mlx5e_ipsec_tx *tx_ft_get(struct mlx5_core_dev *mdev,
1452 					struct mlx5e_ipsec *ipsec, int type)
1453 {
1454 	struct mlx5e_ipsec_tx *tx = ipsec_tx(ipsec, type);
1455 	int err;
1456 
1457 	mutex_lock(&tx->ft.mutex);
1458 	err = tx_get(mdev, ipsec, tx);
1459 	mutex_unlock(&tx->ft.mutex);
1460 	if (err)
1461 		return ERR_PTR(err);
1462 
1463 	return tx;
1464 }
1465 
tx_ft_put(struct mlx5e_ipsec * ipsec,int type)1466 static void tx_ft_put(struct mlx5e_ipsec *ipsec, int type)
1467 {
1468 	struct mlx5e_ipsec_tx *tx = ipsec_tx(ipsec, type);
1469 
1470 	mutex_lock(&tx->ft.mutex);
1471 	tx_put(ipsec, tx);
1472 	mutex_unlock(&tx->ft.mutex);
1473 }
1474 
tx_ft_put_policy(struct mlx5e_ipsec * ipsec,u32 prio,int type)1475 static void tx_ft_put_policy(struct mlx5e_ipsec *ipsec, u32 prio, int type)
1476 {
1477 	struct mlx5e_ipsec_tx *tx = ipsec_tx(ipsec, type);
1478 
1479 	mutex_lock(&tx->ft.mutex);
1480 	if (tx->chains)
1481 		ipsec_chains_put_table(tx->chains, prio);
1482 
1483 	tx_put(ipsec, tx);
1484 	mutex_unlock(&tx->ft.mutex);
1485 }
1486 
setup_fte_addr4(struct mlx5_flow_spec * spec,struct mlx5e_ipsec_addr * addrs)1487 static void setup_fte_addr4(struct mlx5_flow_spec *spec,
1488 			    struct mlx5e_ipsec_addr *addrs)
1489 {
1490 	__be32 *saddr = &addrs->saddr.a4;
1491 	__be32 *smask = &addrs->smask.m4;
1492 	__be32 *daddr = &addrs->daddr.a4;
1493 	__be32 *dmask = &addrs->dmask.m4;
1494 
1495 	if (!*saddr && !*daddr)
1496 		return;
1497 
1498 	spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
1499 
1500 	MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_version);
1501 	MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_version, 4);
1502 
1503 	if (*saddr) {
1504 		memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
1505 				    outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4), saddr, 4);
1506 		memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1507 				    outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4), smask, 4);
1508 	}
1509 
1510 	if (*daddr) {
1511 		memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
1512 				    outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4), daddr, 4);
1513 		memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1514 				    outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4), dmask, 4);
1515 	}
1516 }
1517 
setup_fte_addr6(struct mlx5_flow_spec * spec,struct mlx5e_ipsec_addr * addrs)1518 static void setup_fte_addr6(struct mlx5_flow_spec *spec,
1519 			    struct mlx5e_ipsec_addr *addrs)
1520 {
1521 	__be32 *saddr = addrs->saddr.a6;
1522 	__be32 *smask = addrs->smask.m6;
1523 	__be32 *daddr = addrs->daddr.a6;
1524 	__be32 *dmask = addrs->dmask.m6;
1525 
1526 	if (addr6_all_zero(saddr) && addr6_all_zero(daddr))
1527 		return;
1528 
1529 	spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
1530 
1531 	MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_version);
1532 	MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_version, 6);
1533 
1534 	if (!addr6_all_zero(saddr)) {
1535 		memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
1536 				    outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6), saddr, 16);
1537 		memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1538 				    outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6), dmask, 16);
1539 	}
1540 
1541 	if (!addr6_all_zero(daddr)) {
1542 		memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
1543 				    outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6), daddr, 16);
1544 		memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1545 				    outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6), smask, 16);
1546 	}
1547 }
1548 
setup_fte_esp(struct mlx5_flow_spec * spec)1549 static void setup_fte_esp(struct mlx5_flow_spec *spec)
1550 {
1551 	/* ESP header */
1552 	spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
1553 
1554 	MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_protocol);
1555 	MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_protocol, IPPROTO_ESP);
1556 }
1557 
setup_fte_spi(struct mlx5_flow_spec * spec,u32 spi,bool encap)1558 static void setup_fte_spi(struct mlx5_flow_spec *spec, u32 spi, bool encap)
1559 {
1560 	/* SPI number */
1561 	spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
1562 
1563 	if (encap) {
1564 		MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
1565 				 misc_parameters.inner_esp_spi);
1566 		MLX5_SET(fte_match_param, spec->match_value,
1567 			 misc_parameters.inner_esp_spi, spi);
1568 	} else {
1569 		MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
1570 				 misc_parameters.outer_esp_spi);
1571 		MLX5_SET(fte_match_param, spec->match_value,
1572 			 misc_parameters.outer_esp_spi, spi);
1573 	}
1574 }
1575 
setup_fte_no_frags(struct mlx5_flow_spec * spec)1576 static void setup_fte_no_frags(struct mlx5_flow_spec *spec)
1577 {
1578 	/* Non fragmented */
1579 	spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
1580 
1581 	MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.frag);
1582 	MLX5_SET(fte_match_param, spec->match_value, outer_headers.frag, 0);
1583 }
1584 
setup_fte_reg_a(struct mlx5_flow_spec * spec)1585 static void setup_fte_reg_a(struct mlx5_flow_spec *spec)
1586 {
1587 	/* Add IPsec indicator in metadata_reg_a */
1588 	spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
1589 
1590 	MLX5_SET(fte_match_param, spec->match_criteria,
1591 		 misc_parameters_2.metadata_reg_a, MLX5_ETH_WQE_FT_META_IPSEC);
1592 	MLX5_SET(fte_match_param, spec->match_value,
1593 		 misc_parameters_2.metadata_reg_a, MLX5_ETH_WQE_FT_META_IPSEC);
1594 }
1595 
setup_fte_reg_c4(struct mlx5_flow_spec * spec,u32 reqid)1596 static void setup_fte_reg_c4(struct mlx5_flow_spec *spec, u32 reqid)
1597 {
1598 	/* Pass policy check before choosing this SA */
1599 	spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
1600 
1601 	MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
1602 			 misc_parameters_2.metadata_reg_c_4);
1603 	MLX5_SET(fte_match_param, spec->match_value,
1604 		 misc_parameters_2.metadata_reg_c_4, reqid);
1605 }
1606 
setup_fte_upper_proto_match(struct mlx5_flow_spec * spec,struct upspec * upspec)1607 static void setup_fte_upper_proto_match(struct mlx5_flow_spec *spec, struct upspec *upspec)
1608 {
1609 	switch (upspec->proto) {
1610 	case IPPROTO_UDP:
1611 		if (upspec->dport) {
1612 			MLX5_SET(fte_match_set_lyr_2_4, spec->match_criteria,
1613 				 udp_dport, upspec->dport_mask);
1614 			MLX5_SET(fte_match_set_lyr_2_4, spec->match_value,
1615 				 udp_dport, upspec->dport);
1616 		}
1617 		if (upspec->sport) {
1618 			MLX5_SET(fte_match_set_lyr_2_4, spec->match_criteria,
1619 				 udp_sport, upspec->sport_mask);
1620 			MLX5_SET(fte_match_set_lyr_2_4, spec->match_value,
1621 				 udp_sport, upspec->sport);
1622 		}
1623 		break;
1624 	case IPPROTO_TCP:
1625 		if (upspec->dport) {
1626 			MLX5_SET(fte_match_set_lyr_2_4, spec->match_criteria,
1627 				 tcp_dport, upspec->dport_mask);
1628 			MLX5_SET(fte_match_set_lyr_2_4, spec->match_value,
1629 				 tcp_dport, upspec->dport);
1630 		}
1631 		if (upspec->sport) {
1632 			MLX5_SET(fte_match_set_lyr_2_4, spec->match_criteria,
1633 				 tcp_sport, upspec->sport_mask);
1634 			MLX5_SET(fte_match_set_lyr_2_4, spec->match_value,
1635 				 tcp_sport, upspec->sport);
1636 		}
1637 		break;
1638 	default:
1639 		return;
1640 	}
1641 
1642 	spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
1643 	MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, spec->match_criteria, ip_protocol);
1644 	MLX5_SET(fte_match_set_lyr_2_4, spec->match_value, ip_protocol, upspec->proto);
1645 }
1646 
ipsec_fs_get_ns(struct mlx5e_ipsec * ipsec,int type,u8 dir)1647 static enum mlx5_flow_namespace_type ipsec_fs_get_ns(struct mlx5e_ipsec *ipsec,
1648 						     int type, u8 dir)
1649 {
1650 	if (ipsec->is_uplink_rep && type == XFRM_DEV_OFFLOAD_PACKET)
1651 		return MLX5_FLOW_NAMESPACE_FDB;
1652 
1653 	if (dir == XFRM_DEV_OFFLOAD_IN)
1654 		return MLX5_FLOW_NAMESPACE_KERNEL;
1655 
1656 	return MLX5_FLOW_NAMESPACE_EGRESS;
1657 }
1658 
setup_modify_header(struct mlx5e_ipsec * ipsec,int type,u32 val,u8 dir,struct mlx5_flow_act * flow_act)1659 static int setup_modify_header(struct mlx5e_ipsec *ipsec, int type, u32 val, u8 dir,
1660 			       struct mlx5_flow_act *flow_act)
1661 {
1662 	enum mlx5_flow_namespace_type ns_type = ipsec_fs_get_ns(ipsec, type, dir);
1663 	u8 action[3][MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {};
1664 	struct mlx5_core_dev *mdev = ipsec->mdev;
1665 	struct mlx5_modify_hdr *modify_hdr;
1666 	u8 num_of_actions = 1;
1667 
1668 	MLX5_SET(set_action_in, action[0], action_type, MLX5_ACTION_TYPE_SET);
1669 	switch (dir) {
1670 	case XFRM_DEV_OFFLOAD_IN:
1671 		MLX5_SET(set_action_in, action[0], field,
1672 			 MLX5_ACTION_IN_FIELD_METADATA_REG_B);
1673 
1674 		num_of_actions++;
1675 		MLX5_SET(set_action_in, action[1], action_type, MLX5_ACTION_TYPE_SET);
1676 		MLX5_SET(set_action_in, action[1], field, MLX5_ACTION_IN_FIELD_METADATA_REG_C_2);
1677 		MLX5_SET(set_action_in, action[1], data, val);
1678 		MLX5_SET(set_action_in, action[1], offset, 0);
1679 		MLX5_SET(set_action_in, action[1], length, 32);
1680 
1681 		if (type == XFRM_DEV_OFFLOAD_CRYPTO) {
1682 			num_of_actions++;
1683 			MLX5_SET(set_action_in, action[2], action_type,
1684 				 MLX5_ACTION_TYPE_SET);
1685 			MLX5_SET(set_action_in, action[2], field,
1686 				 MLX5_ACTION_IN_FIELD_METADATA_REG_C_4);
1687 			MLX5_SET(set_action_in, action[2], data,
1688 				 MLX5_IPSEC_ASO_SW_CRYPTO_OFFLOAD);
1689 			MLX5_SET(set_action_in, action[2], offset, 0);
1690 			MLX5_SET(set_action_in, action[2], length, 32);
1691 		}
1692 		break;
1693 	case XFRM_DEV_OFFLOAD_OUT:
1694 		MLX5_SET(set_action_in, action[0], field,
1695 			 MLX5_ACTION_IN_FIELD_METADATA_REG_C_4);
1696 		break;
1697 	default:
1698 		return -EINVAL;
1699 	}
1700 
1701 	MLX5_SET(set_action_in, action[0], data, val);
1702 	MLX5_SET(set_action_in, action[0], offset, 0);
1703 	MLX5_SET(set_action_in, action[0], length, 32);
1704 
1705 	modify_hdr = mlx5_modify_header_alloc(mdev, ns_type, num_of_actions, action);
1706 	if (IS_ERR(modify_hdr)) {
1707 		mlx5_core_err(mdev, "Failed to allocate modify_header %ld\n",
1708 			      PTR_ERR(modify_hdr));
1709 		return PTR_ERR(modify_hdr);
1710 	}
1711 
1712 	flow_act->modify_hdr = modify_hdr;
1713 	flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
1714 	return 0;
1715 }
1716 
1717 static int
setup_pkt_tunnel_reformat(struct mlx5_core_dev * mdev,struct mlx5_accel_esp_xfrm_attrs * attrs,struct mlx5_pkt_reformat_params * reformat_params)1718 setup_pkt_tunnel_reformat(struct mlx5_core_dev *mdev,
1719 			  struct mlx5_accel_esp_xfrm_attrs *attrs,
1720 			  struct mlx5_pkt_reformat_params *reformat_params)
1721 {
1722 	struct ip_esp_hdr *esp_hdr;
1723 	struct ipv6hdr *ipv6hdr;
1724 	struct ethhdr *eth_hdr;
1725 	struct iphdr *iphdr;
1726 	char *reformatbf;
1727 	size_t bfflen;
1728 	void *hdr;
1729 
1730 	bfflen = sizeof(*eth_hdr);
1731 
1732 	if (attrs->dir == XFRM_DEV_OFFLOAD_OUT) {
1733 		bfflen += sizeof(*esp_hdr) + 8;
1734 
1735 		switch (attrs->addrs.family) {
1736 		case AF_INET:
1737 			bfflen += sizeof(*iphdr);
1738 			break;
1739 		case AF_INET6:
1740 			bfflen += sizeof(*ipv6hdr);
1741 			break;
1742 		default:
1743 			return -EINVAL;
1744 		}
1745 	}
1746 
1747 	reformatbf = kzalloc(bfflen, GFP_KERNEL);
1748 	if (!reformatbf)
1749 		return -ENOMEM;
1750 
1751 	eth_hdr = (struct ethhdr *)reformatbf;
1752 	switch (attrs->addrs.family) {
1753 	case AF_INET:
1754 		eth_hdr->h_proto = htons(ETH_P_IP);
1755 		break;
1756 	case AF_INET6:
1757 		eth_hdr->h_proto = htons(ETH_P_IPV6);
1758 		break;
1759 	default:
1760 		goto free_reformatbf;
1761 	}
1762 
1763 	ether_addr_copy(eth_hdr->h_dest, attrs->dmac);
1764 	ether_addr_copy(eth_hdr->h_source, attrs->smac);
1765 
1766 	switch (attrs->dir) {
1767 	case XFRM_DEV_OFFLOAD_IN:
1768 		reformat_params->type = MLX5_REFORMAT_TYPE_L3_ESP_TUNNEL_TO_L2;
1769 		break;
1770 	case XFRM_DEV_OFFLOAD_OUT:
1771 		reformat_params->type = MLX5_REFORMAT_TYPE_L2_TO_L3_ESP_TUNNEL;
1772 		reformat_params->param_0 = attrs->authsize;
1773 
1774 		hdr = reformatbf + sizeof(*eth_hdr);
1775 		switch (attrs->addrs.family) {
1776 		case AF_INET:
1777 			iphdr = (struct iphdr *)hdr;
1778 			memcpy(&iphdr->saddr, &attrs->addrs.saddr.a4, 4);
1779 			memcpy(&iphdr->daddr, &attrs->addrs.daddr.a4, 4);
1780 			iphdr->version = 4;
1781 			iphdr->ihl = 5;
1782 			iphdr->ttl = IPSEC_TUNNEL_DEFAULT_TTL;
1783 			iphdr->protocol = IPPROTO_ESP;
1784 			hdr += sizeof(*iphdr);
1785 			break;
1786 		case AF_INET6:
1787 			ipv6hdr = (struct ipv6hdr *)hdr;
1788 			memcpy(&ipv6hdr->saddr, &attrs->addrs.saddr.a6, 16);
1789 			memcpy(&ipv6hdr->daddr, &attrs->addrs.daddr.a6, 16);
1790 			ipv6hdr->nexthdr = IPPROTO_ESP;
1791 			ipv6hdr->version = 6;
1792 			ipv6hdr->hop_limit = IPSEC_TUNNEL_DEFAULT_TTL;
1793 			hdr += sizeof(*ipv6hdr);
1794 			break;
1795 		default:
1796 			goto free_reformatbf;
1797 		}
1798 
1799 		esp_hdr = (struct ip_esp_hdr *)hdr;
1800 		esp_hdr->spi = htonl(attrs->spi);
1801 		break;
1802 	default:
1803 		goto free_reformatbf;
1804 	}
1805 
1806 	reformat_params->size = bfflen;
1807 	reformat_params->data = reformatbf;
1808 	return 0;
1809 
1810 free_reformatbf:
1811 	kfree(reformatbf);
1812 	return -EINVAL;
1813 }
1814 
get_reformat_type(struct mlx5_accel_esp_xfrm_attrs * attrs)1815 static int get_reformat_type(struct mlx5_accel_esp_xfrm_attrs *attrs)
1816 {
1817 	switch (attrs->dir) {
1818 	case XFRM_DEV_OFFLOAD_IN:
1819 		if (attrs->encap)
1820 			return MLX5_REFORMAT_TYPE_DEL_ESP_TRANSPORT_OVER_UDP;
1821 		return MLX5_REFORMAT_TYPE_DEL_ESP_TRANSPORT;
1822 	case XFRM_DEV_OFFLOAD_OUT:
1823 		if (attrs->addrs.family == AF_INET) {
1824 			if (attrs->encap)
1825 				return MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_OVER_UDPV4;
1826 			return MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_OVER_IPV4;
1827 		}
1828 
1829 		if (attrs->encap)
1830 			return MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_OVER_UDPV6;
1831 		return MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_OVER_IPV6;
1832 	default:
1833 		WARN_ON(true);
1834 	}
1835 
1836 	return -EINVAL;
1837 }
1838 
1839 static int
setup_pkt_transport_reformat(struct mlx5_accel_esp_xfrm_attrs * attrs,struct mlx5_pkt_reformat_params * reformat_params)1840 setup_pkt_transport_reformat(struct mlx5_accel_esp_xfrm_attrs *attrs,
1841 			     struct mlx5_pkt_reformat_params *reformat_params)
1842 {
1843 	struct udphdr *udphdr;
1844 	char *reformatbf;
1845 	size_t bfflen;
1846 	__be32 spi;
1847 	void *hdr;
1848 
1849 	reformat_params->type = get_reformat_type(attrs);
1850 	if (reformat_params->type < 0)
1851 		return reformat_params->type;
1852 
1853 	switch (attrs->dir) {
1854 	case XFRM_DEV_OFFLOAD_IN:
1855 		break;
1856 	case XFRM_DEV_OFFLOAD_OUT:
1857 		bfflen = MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_SIZE;
1858 		if (attrs->encap)
1859 			bfflen += sizeof(*udphdr);
1860 
1861 		reformatbf = kzalloc(bfflen, GFP_KERNEL);
1862 		if (!reformatbf)
1863 			return -ENOMEM;
1864 
1865 		hdr = reformatbf;
1866 		if (attrs->encap) {
1867 			udphdr = (struct udphdr *)reformatbf;
1868 			udphdr->source = attrs->sport;
1869 			udphdr->dest = attrs->dport;
1870 			hdr += sizeof(*udphdr);
1871 		}
1872 
1873 		/* convert to network format */
1874 		spi = htonl(attrs->spi);
1875 		memcpy(hdr, &spi, sizeof(spi));
1876 
1877 		reformat_params->param_0 = attrs->authsize;
1878 		reformat_params->size = bfflen;
1879 		reformat_params->data = reformatbf;
1880 		break;
1881 	default:
1882 		return -EINVAL;
1883 	}
1884 
1885 	return 0;
1886 }
1887 
setup_pkt_reformat(struct mlx5e_ipsec * ipsec,struct mlx5_accel_esp_xfrm_attrs * attrs,struct mlx5_flow_act * flow_act)1888 static int setup_pkt_reformat(struct mlx5e_ipsec *ipsec,
1889 			      struct mlx5_accel_esp_xfrm_attrs *attrs,
1890 			      struct mlx5_flow_act *flow_act)
1891 {
1892 	enum mlx5_flow_namespace_type ns_type = ipsec_fs_get_ns(ipsec, attrs->type,
1893 								attrs->dir);
1894 	struct mlx5_pkt_reformat_params reformat_params = {};
1895 	struct mlx5_core_dev *mdev = ipsec->mdev;
1896 	struct mlx5_pkt_reformat *pkt_reformat;
1897 	int ret;
1898 
1899 	switch (attrs->mode) {
1900 	case XFRM_MODE_TRANSPORT:
1901 		ret = setup_pkt_transport_reformat(attrs, &reformat_params);
1902 		break;
1903 	case XFRM_MODE_TUNNEL:
1904 		ret = setup_pkt_tunnel_reformat(mdev, attrs, &reformat_params);
1905 		break;
1906 	default:
1907 		ret = -EINVAL;
1908 	}
1909 
1910 	if (ret)
1911 		return ret;
1912 
1913 	pkt_reformat =
1914 		mlx5_packet_reformat_alloc(mdev, &reformat_params, ns_type);
1915 	kfree(reformat_params.data);
1916 	if (IS_ERR(pkt_reformat))
1917 		return PTR_ERR(pkt_reformat);
1918 
1919 	flow_act->pkt_reformat = pkt_reformat;
1920 	flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
1921 	return 0;
1922 }
1923 
rx_add_rule_sa_selector(struct mlx5e_ipsec_sa_entry * sa_entry,struct mlx5e_ipsec_rx * rx,struct upspec * upspec)1924 static int rx_add_rule_sa_selector(struct mlx5e_ipsec_sa_entry *sa_entry,
1925 				   struct mlx5e_ipsec_rx *rx,
1926 				   struct upspec *upspec)
1927 {
1928 	struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
1929 	struct mlx5_core_dev *mdev = ipsec->mdev;
1930 	struct mlx5_flow_destination dest[2];
1931 	struct mlx5_flow_act flow_act = {};
1932 	struct mlx5_flow_handle *rule;
1933 	struct mlx5_flow_spec *spec;
1934 	int err = 0;
1935 
1936 	spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
1937 	if (!spec)
1938 		return -ENOMEM;
1939 
1940 	MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
1941 			 misc_parameters_2.ipsec_syndrome);
1942 	MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
1943 			 misc_parameters_2.metadata_reg_c_4);
1944 	MLX5_SET(fte_match_param, spec->match_value,
1945 		 misc_parameters_2.ipsec_syndrome, 0);
1946 	MLX5_SET(fte_match_param, spec->match_value,
1947 		 misc_parameters_2.metadata_reg_c_4, 0);
1948 	spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
1949 
1950 	ipsec_rx_rule_add_match_obj(sa_entry, rx, spec);
1951 
1952 	flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
1953 			  MLX5_FLOW_CONTEXT_ACTION_COUNT;
1954 	flow_act.flags = FLOW_ACT_IGNORE_FLOW_LEVEL;
1955 	dest[0].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
1956 	dest[0].ft = rx->ft.sa_sel;
1957 	dest[1].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
1958 	dest[1].counter = rx->fc->cnt;
1959 
1960 	rule = mlx5_add_flow_rules(rx->ft.status, spec, &flow_act, dest, 2);
1961 	if (IS_ERR(rule)) {
1962 		err = PTR_ERR(rule);
1963 		mlx5_core_err(mdev,
1964 			      "Failed to add ipsec rx pass rule, err=%d\n",
1965 			      err);
1966 		goto err_add_status_pass_rule;
1967 	}
1968 
1969 	sa_entry->ipsec_rule.status_pass = rule;
1970 
1971 	MLX5_SET(fte_match_param, spec->match_criteria,
1972 		 misc_parameters_2.ipsec_syndrome, 0);
1973 	MLX5_SET(fte_match_param, spec->match_criteria,
1974 		 misc_parameters_2.metadata_reg_c_4, 0);
1975 
1976 	setup_fte_upper_proto_match(spec, upspec);
1977 
1978 	flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1979 	dest[0].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
1980 	dest[0].ft = rx->ft.pol;
1981 
1982 	rule = mlx5_add_flow_rules(rx->ft.sa_sel, spec, &flow_act, &dest[0], 1);
1983 	if (IS_ERR(rule)) {
1984 		err = PTR_ERR(rule);
1985 		mlx5_core_err(mdev,
1986 			      "Failed to add ipsec rx sa selector rule, err=%d\n",
1987 			      err);
1988 		goto err_add_sa_sel_rule;
1989 	}
1990 
1991 	sa_entry->ipsec_rule.sa_sel = rule;
1992 
1993 	kvfree(spec);
1994 	return 0;
1995 
1996 err_add_sa_sel_rule:
1997 	mlx5_del_flow_rules(sa_entry->ipsec_rule.status_pass);
1998 err_add_status_pass_rule:
1999 	kvfree(spec);
2000 	return err;
2001 }
2002 
rx_add_rule(struct mlx5e_ipsec_sa_entry * sa_entry)2003 static int rx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
2004 {
2005 	struct mlx5_accel_esp_xfrm_attrs *attrs = &sa_entry->attrs;
2006 	struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry);
2007 	struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
2008 	struct mlx5_flow_destination dest[2];
2009 	struct mlx5_flow_act flow_act = {};
2010 	struct mlx5_flow_handle *rule;
2011 	struct mlx5_flow_spec *spec;
2012 	struct mlx5e_ipsec_rx *rx;
2013 	struct mlx5_fc *counter;
2014 	int err = 0;
2015 
2016 	rx = rx_ft_get(mdev, ipsec, attrs->addrs.family, attrs->type);
2017 	if (IS_ERR(rx))
2018 		return PTR_ERR(rx);
2019 
2020 	spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
2021 	if (!spec) {
2022 		err = -ENOMEM;
2023 		goto err_alloc;
2024 	}
2025 
2026 	if (attrs->addrs.family == AF_INET)
2027 		setup_fte_addr4(spec, &attrs->addrs);
2028 	else
2029 		setup_fte_addr6(spec, &attrs->addrs);
2030 
2031 	setup_fte_spi(spec, attrs->spi, attrs->encap);
2032 	if (!attrs->encap)
2033 		setup_fte_esp(spec);
2034 	setup_fte_no_frags(spec);
2035 
2036 	if (!attrs->drop) {
2037 		if (rx != ipsec->rx_esw)
2038 			err = setup_modify_header(ipsec, attrs->type,
2039 						  sa_entry->ipsec_obj_id | BIT(31),
2040 						  XFRM_DEV_OFFLOAD_IN, &flow_act);
2041 		else
2042 			err = mlx5_esw_ipsec_rx_setup_modify_header(sa_entry, &flow_act);
2043 
2044 		if (err)
2045 			goto err_mod_header;
2046 	}
2047 
2048 	switch (attrs->type) {
2049 	case XFRM_DEV_OFFLOAD_PACKET:
2050 		err = setup_pkt_reformat(ipsec, attrs, &flow_act);
2051 		if (err)
2052 			goto err_pkt_reformat;
2053 		break;
2054 	default:
2055 		break;
2056 	}
2057 
2058 	counter = mlx5_fc_create(mdev, true);
2059 	if (IS_ERR(counter)) {
2060 		err = PTR_ERR(counter);
2061 		goto err_add_cnt;
2062 	}
2063 	flow_act.crypto.type = MLX5_FLOW_CONTEXT_ENCRYPT_DECRYPT_TYPE_IPSEC;
2064 	flow_act.crypto.obj_id = sa_entry->ipsec_obj_id;
2065 	flow_act.flags |= FLOW_ACT_NO_APPEND;
2066 	flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_CRYPTO_DECRYPT |
2067 			   MLX5_FLOW_CONTEXT_ACTION_COUNT;
2068 	if (attrs->drop)
2069 		flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
2070 	else
2071 		flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
2072 	dest[0].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
2073 	dest[0].ft = rx->ft.status;
2074 	dest[1].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
2075 	dest[1].counter = counter;
2076 	rule = mlx5_add_flow_rules(rx->ft.sa, spec, &flow_act, dest, 2);
2077 	if (IS_ERR(rule)) {
2078 		err = PTR_ERR(rule);
2079 		mlx5_core_err(mdev, "fail to add RX ipsec rule err=%d\n", err);
2080 		goto err_add_flow;
2081 	}
2082 
2083 	if (attrs->upspec.proto && attrs->type == XFRM_DEV_OFFLOAD_PACKET) {
2084 		err = rx_add_rule_sa_selector(sa_entry, rx, &attrs->upspec);
2085 		if (err)
2086 			goto err_add_sa_sel;
2087 	}
2088 
2089 	if (attrs->type == XFRM_DEV_OFFLOAD_PACKET)
2090 		err = rx_add_rule_drop_replay(sa_entry, rx);
2091 	if (err)
2092 		goto err_add_replay;
2093 
2094 	err = rx_add_rule_drop_auth_trailer(sa_entry, rx);
2095 	if (err)
2096 		goto err_drop_reason;
2097 
2098 	kvfree(spec);
2099 
2100 	sa_entry->ipsec_rule.rule = rule;
2101 	sa_entry->ipsec_rule.modify_hdr = flow_act.modify_hdr;
2102 	sa_entry->ipsec_rule.fc = counter;
2103 	sa_entry->ipsec_rule.pkt_reformat = flow_act.pkt_reformat;
2104 	return 0;
2105 
2106 err_drop_reason:
2107 	if (sa_entry->ipsec_rule.replay.rule) {
2108 		mlx5_del_flow_rules(sa_entry->ipsec_rule.replay.rule);
2109 		mlx5_fc_destroy(mdev, sa_entry->ipsec_rule.replay.fc);
2110 	}
2111 err_add_replay:
2112 	if (sa_entry->ipsec_rule.sa_sel) {
2113 		mlx5_del_flow_rules(sa_entry->ipsec_rule.sa_sel);
2114 		mlx5_del_flow_rules(sa_entry->ipsec_rule.status_pass);
2115 	}
2116 err_add_sa_sel:
2117 	mlx5_del_flow_rules(rule);
2118 err_add_flow:
2119 	mlx5_fc_destroy(mdev, counter);
2120 err_add_cnt:
2121 	if (flow_act.pkt_reformat)
2122 		mlx5_packet_reformat_dealloc(mdev, flow_act.pkt_reformat);
2123 err_pkt_reformat:
2124 	if (flow_act.modify_hdr)
2125 		mlx5_modify_header_dealloc(mdev, flow_act.modify_hdr);
2126 err_mod_header:
2127 	kvfree(spec);
2128 err_alloc:
2129 	rx_ft_put(ipsec, attrs->addrs.family, attrs->type);
2130 	return err;
2131 }
2132 
tx_add_rule(struct mlx5e_ipsec_sa_entry * sa_entry)2133 static int tx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
2134 {
2135 	struct mlx5_accel_esp_xfrm_attrs *attrs = &sa_entry->attrs;
2136 	struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry);
2137 	struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
2138 	struct mlx5_flow_destination dest[2];
2139 	struct mlx5_flow_act flow_act = {};
2140 	struct mlx5_flow_handle *rule;
2141 	struct mlx5_flow_spec *spec;
2142 	struct mlx5e_ipsec_tx *tx;
2143 	struct mlx5_fc *counter;
2144 	int err;
2145 
2146 	tx = tx_ft_get(mdev, ipsec, attrs->type);
2147 	if (IS_ERR(tx))
2148 		return PTR_ERR(tx);
2149 
2150 	spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
2151 	if (!spec) {
2152 		err = -ENOMEM;
2153 		goto err_alloc;
2154 	}
2155 
2156 	setup_fte_no_frags(spec);
2157 	setup_fte_upper_proto_match(spec, &attrs->upspec);
2158 
2159 	switch (attrs->type) {
2160 	case XFRM_DEV_OFFLOAD_CRYPTO:
2161 		if (attrs->addrs.family == AF_INET)
2162 			setup_fte_addr4(spec, &attrs->addrs);
2163 		else
2164 			setup_fte_addr6(spec, &attrs->addrs);
2165 		setup_fte_spi(spec, attrs->spi, false);
2166 		setup_fte_esp(spec);
2167 		setup_fte_reg_a(spec);
2168 		break;
2169 	case XFRM_DEV_OFFLOAD_PACKET:
2170 		setup_fte_reg_c4(spec, attrs->reqid);
2171 		err = setup_pkt_reformat(ipsec, attrs, &flow_act);
2172 		if (err)
2173 			goto err_pkt_reformat;
2174 		break;
2175 	default:
2176 		break;
2177 	}
2178 
2179 	counter = mlx5_fc_create(mdev, true);
2180 	if (IS_ERR(counter)) {
2181 		err = PTR_ERR(counter);
2182 		goto err_add_cnt;
2183 	}
2184 
2185 	flow_act.crypto.type = MLX5_FLOW_CONTEXT_ENCRYPT_DECRYPT_TYPE_IPSEC;
2186 	flow_act.crypto.obj_id = sa_entry->ipsec_obj_id;
2187 	flow_act.flags |= FLOW_ACT_NO_APPEND;
2188 	flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_CRYPTO_ENCRYPT |
2189 			   MLX5_FLOW_CONTEXT_ACTION_COUNT;
2190 	if (attrs->drop)
2191 		flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
2192 	else
2193 		flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
2194 
2195 	dest[0].ft = tx->ft.status;
2196 	dest[0].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
2197 	dest[1].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
2198 	dest[1].counter = counter;
2199 	rule = mlx5_add_flow_rules(tx->ft.sa, spec, &flow_act, dest, 2);
2200 	if (IS_ERR(rule)) {
2201 		err = PTR_ERR(rule);
2202 		mlx5_core_err(mdev, "fail to add TX ipsec rule err=%d\n", err);
2203 		goto err_add_flow;
2204 	}
2205 
2206 	kvfree(spec);
2207 	sa_entry->ipsec_rule.rule = rule;
2208 	sa_entry->ipsec_rule.fc = counter;
2209 	sa_entry->ipsec_rule.pkt_reformat = flow_act.pkt_reformat;
2210 	return 0;
2211 
2212 err_add_flow:
2213 	mlx5_fc_destroy(mdev, counter);
2214 err_add_cnt:
2215 	if (flow_act.pkt_reformat)
2216 		mlx5_packet_reformat_dealloc(mdev, flow_act.pkt_reformat);
2217 err_pkt_reformat:
2218 	kvfree(spec);
2219 err_alloc:
2220 	tx_ft_put(ipsec, attrs->type);
2221 	return err;
2222 }
2223 
tx_add_policy(struct mlx5e_ipsec_pol_entry * pol_entry)2224 static int tx_add_policy(struct mlx5e_ipsec_pol_entry *pol_entry)
2225 {
2226 	struct mlx5_accel_pol_xfrm_attrs *attrs = &pol_entry->attrs;
2227 	struct mlx5_core_dev *mdev = mlx5e_ipsec_pol2dev(pol_entry);
2228 	struct mlx5e_ipsec *ipsec = pol_entry->ipsec;
2229 	struct mlx5_flow_destination dest[2] = {};
2230 	struct mlx5_flow_act flow_act = {};
2231 	struct mlx5_flow_handle *rule;
2232 	struct mlx5_flow_spec *spec;
2233 	struct mlx5_flow_table *ft;
2234 	struct mlx5e_ipsec_tx *tx;
2235 	int err, dstn = 0;
2236 
2237 	ft = tx_ft_get_policy(mdev, ipsec, attrs->prio, attrs->type);
2238 	if (IS_ERR(ft))
2239 		return PTR_ERR(ft);
2240 
2241 	spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
2242 	if (!spec) {
2243 		err = -ENOMEM;
2244 		goto err_alloc;
2245 	}
2246 
2247 	tx = ipsec_tx(ipsec, attrs->type);
2248 	if (attrs->addrs.family == AF_INET)
2249 		setup_fte_addr4(spec, &attrs->addrs);
2250 	else
2251 		setup_fte_addr6(spec, &attrs->addrs);
2252 
2253 	setup_fte_no_frags(spec);
2254 	setup_fte_upper_proto_match(spec, &attrs->upspec);
2255 
2256 	switch (attrs->action) {
2257 	case XFRM_POLICY_ALLOW:
2258 		flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
2259 		if (!attrs->reqid)
2260 			break;
2261 
2262 		err = setup_modify_header(ipsec, attrs->type, attrs->reqid,
2263 					  XFRM_DEV_OFFLOAD_OUT, &flow_act);
2264 		if (err)
2265 			goto err_mod_header;
2266 		break;
2267 	case XFRM_POLICY_BLOCK:
2268 		flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_DROP |
2269 				   MLX5_FLOW_CONTEXT_ACTION_COUNT;
2270 		dest[dstn].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
2271 		dest[dstn].counter = tx->fc->drop;
2272 		dstn++;
2273 		break;
2274 	default:
2275 		WARN_ON(true);
2276 		err = -EINVAL;
2277 		goto err_mod_header;
2278 	}
2279 
2280 	flow_act.flags |= FLOW_ACT_NO_APPEND;
2281 	if (tx == ipsec->tx_esw && tx->chains)
2282 		flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
2283 	dest[dstn].ft = tx->ft.sa;
2284 	dest[dstn].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
2285 	dstn++;
2286 	rule = mlx5_add_flow_rules(ft, spec, &flow_act, dest, dstn);
2287 	if (IS_ERR(rule)) {
2288 		err = PTR_ERR(rule);
2289 		mlx5_core_err(mdev, "fail to add TX ipsec rule err=%d\n", err);
2290 		goto err_action;
2291 	}
2292 
2293 	kvfree(spec);
2294 	pol_entry->ipsec_rule.rule = rule;
2295 	pol_entry->ipsec_rule.modify_hdr = flow_act.modify_hdr;
2296 	return 0;
2297 
2298 err_action:
2299 	if (flow_act.modify_hdr)
2300 		mlx5_modify_header_dealloc(mdev, flow_act.modify_hdr);
2301 err_mod_header:
2302 	kvfree(spec);
2303 err_alloc:
2304 	tx_ft_put_policy(ipsec, attrs->prio, attrs->type);
2305 	return err;
2306 }
2307 
rx_add_policy(struct mlx5e_ipsec_pol_entry * pol_entry)2308 static int rx_add_policy(struct mlx5e_ipsec_pol_entry *pol_entry)
2309 {
2310 	struct mlx5_accel_pol_xfrm_attrs *attrs = &pol_entry->attrs;
2311 	struct mlx5_core_dev *mdev = mlx5e_ipsec_pol2dev(pol_entry);
2312 	struct mlx5e_ipsec *ipsec = pol_entry->ipsec;
2313 	struct mlx5_flow_destination dest[2];
2314 	struct mlx5_flow_act flow_act = {};
2315 	struct mlx5_flow_handle *rule;
2316 	struct mlx5_flow_spec *spec;
2317 	struct mlx5_flow_table *ft;
2318 	struct mlx5e_ipsec_rx *rx;
2319 	int err, dstn = 0;
2320 
2321 	ft = rx_ft_get_policy(mdev, pol_entry->ipsec, attrs->addrs.family,
2322 			      attrs->prio, attrs->type);
2323 	if (IS_ERR(ft))
2324 		return PTR_ERR(ft);
2325 
2326 	rx = ipsec_rx(pol_entry->ipsec, attrs->addrs.family, attrs->type);
2327 
2328 	spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
2329 	if (!spec) {
2330 		err = -ENOMEM;
2331 		goto err_alloc;
2332 	}
2333 
2334 	if (attrs->addrs.family == AF_INET)
2335 		setup_fte_addr4(spec, &attrs->addrs);
2336 	else
2337 		setup_fte_addr6(spec, &attrs->addrs);
2338 
2339 	setup_fte_no_frags(spec);
2340 	setup_fte_upper_proto_match(spec, &attrs->upspec);
2341 
2342 	switch (attrs->action) {
2343 	case XFRM_POLICY_ALLOW:
2344 		flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
2345 		break;
2346 	case XFRM_POLICY_BLOCK:
2347 		flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT;
2348 		dest[dstn].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
2349 		dest[dstn].counter = rx->fc->drop;
2350 		dstn++;
2351 		break;
2352 	default:
2353 		WARN_ON(true);
2354 		err = -EINVAL;
2355 		goto err_action;
2356 	}
2357 
2358 	flow_act.flags |= FLOW_ACT_NO_APPEND;
2359 	if (rx == ipsec->rx_esw && rx->chains)
2360 		flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
2361 	ipsec_rx_default_dest_get(ipsec, rx, &dest[dstn]);
2362 	dstn++;
2363 	rule = mlx5_add_flow_rules(ft, spec, &flow_act, dest, dstn);
2364 	if (IS_ERR(rule)) {
2365 		err = PTR_ERR(rule);
2366 		mlx5_core_err(mdev, "Fail to add RX IPsec policy rule err=%d\n", err);
2367 		goto err_action;
2368 	}
2369 
2370 	kvfree(spec);
2371 	pol_entry->ipsec_rule.rule = rule;
2372 	return 0;
2373 
2374 err_action:
2375 	kvfree(spec);
2376 err_alloc:
2377 	rx_ft_put_policy(pol_entry->ipsec, attrs->addrs.family, attrs->prio,
2378 			 attrs->type);
2379 	return err;
2380 }
2381 
ipsec_fs_destroy_single_counter(struct mlx5_core_dev * mdev,struct mlx5e_ipsec_fc * fc)2382 static void ipsec_fs_destroy_single_counter(struct mlx5_core_dev *mdev,
2383 					    struct mlx5e_ipsec_fc *fc)
2384 {
2385 	mlx5_fc_destroy(mdev, fc->drop);
2386 	mlx5_fc_destroy(mdev, fc->cnt);
2387 	kfree(fc);
2388 }
2389 
ipsec_fs_destroy_counters(struct mlx5e_ipsec * ipsec)2390 static void ipsec_fs_destroy_counters(struct mlx5e_ipsec *ipsec)
2391 {
2392 	struct mlx5_core_dev *mdev = ipsec->mdev;
2393 
2394 	ipsec_fs_destroy_single_counter(mdev, ipsec->tx->fc);
2395 	ipsec_fs_destroy_single_counter(mdev, ipsec->rx_ipv4->fc);
2396 	if (ipsec->is_uplink_rep) {
2397 		ipsec_fs_destroy_single_counter(mdev, ipsec->tx_esw->fc);
2398 		ipsec_fs_destroy_single_counter(mdev, ipsec->rx_esw->fc);
2399 	}
2400 }
2401 
ipsec_fs_init_single_counter(struct mlx5_core_dev * mdev)2402 static struct mlx5e_ipsec_fc *ipsec_fs_init_single_counter(struct mlx5_core_dev *mdev)
2403 {
2404 	struct mlx5e_ipsec_fc *fc;
2405 	struct mlx5_fc *counter;
2406 	int err;
2407 
2408 	fc = kzalloc(sizeof(*fc), GFP_KERNEL);
2409 	if (!fc)
2410 		return ERR_PTR(-ENOMEM);
2411 
2412 	counter = mlx5_fc_create(mdev, false);
2413 	if (IS_ERR(counter)) {
2414 		err = PTR_ERR(counter);
2415 		goto err_cnt;
2416 	}
2417 	fc->cnt = counter;
2418 
2419 	counter = mlx5_fc_create(mdev, false);
2420 	if (IS_ERR(counter)) {
2421 		err = PTR_ERR(counter);
2422 		goto err_drop;
2423 	}
2424 	fc->drop = counter;
2425 
2426 	return fc;
2427 
2428 err_drop:
2429 	mlx5_fc_destroy(mdev, fc->cnt);
2430 err_cnt:
2431 	kfree(fc);
2432 	return ERR_PTR(err);
2433 }
2434 
ipsec_fs_init_counters(struct mlx5e_ipsec * ipsec)2435 static int ipsec_fs_init_counters(struct mlx5e_ipsec *ipsec)
2436 {
2437 	struct mlx5_core_dev *mdev = ipsec->mdev;
2438 	struct mlx5e_ipsec_fc *fc;
2439 	int err;
2440 
2441 	fc = ipsec_fs_init_single_counter(mdev);
2442 	if (IS_ERR(fc)) {
2443 		err = PTR_ERR(fc);
2444 		goto err_rx_cnt;
2445 	}
2446 	ipsec->rx_ipv4->fc = fc;
2447 
2448 	fc = ipsec_fs_init_single_counter(mdev);
2449 	if (IS_ERR(fc)) {
2450 		err = PTR_ERR(fc);
2451 		goto err_tx_cnt;
2452 	}
2453 	ipsec->tx->fc = fc;
2454 
2455 	if (ipsec->is_uplink_rep) {
2456 		fc = ipsec_fs_init_single_counter(mdev);
2457 		if (IS_ERR(fc)) {
2458 			err = PTR_ERR(fc);
2459 			goto err_rx_esw_cnt;
2460 		}
2461 		ipsec->rx_esw->fc = fc;
2462 
2463 		fc = ipsec_fs_init_single_counter(mdev);
2464 		if (IS_ERR(fc)) {
2465 			err = PTR_ERR(fc);
2466 			goto err_tx_esw_cnt;
2467 		}
2468 		ipsec->tx_esw->fc = fc;
2469 	}
2470 
2471 	/* Both IPv4 and IPv6 point to same flow counters struct. */
2472 	ipsec->rx_ipv6->fc = ipsec->rx_ipv4->fc;
2473 	return 0;
2474 
2475 err_tx_esw_cnt:
2476 	ipsec_fs_destroy_single_counter(mdev, ipsec->rx_esw->fc);
2477 err_rx_esw_cnt:
2478 	ipsec_fs_destroy_single_counter(mdev, ipsec->tx->fc);
2479 err_tx_cnt:
2480 	ipsec_fs_destroy_single_counter(mdev, ipsec->rx_ipv4->fc);
2481 err_rx_cnt:
2482 	return err;
2483 }
2484 
mlx5e_accel_ipsec_fs_read_stats(struct mlx5e_priv * priv,void * ipsec_stats)2485 void mlx5e_accel_ipsec_fs_read_stats(struct mlx5e_priv *priv, void *ipsec_stats)
2486 {
2487 	struct mlx5_core_dev *mdev = priv->mdev;
2488 	struct mlx5e_ipsec *ipsec = priv->ipsec;
2489 	struct mlx5e_ipsec_hw_stats *stats;
2490 	struct mlx5e_ipsec_fc *fc;
2491 	u64 packets, bytes;
2492 
2493 	stats = (struct mlx5e_ipsec_hw_stats *)ipsec_stats;
2494 
2495 	stats->ipsec_rx_pkts = 0;
2496 	stats->ipsec_rx_bytes = 0;
2497 	stats->ipsec_rx_drop_pkts = 0;
2498 	stats->ipsec_rx_drop_bytes = 0;
2499 	stats->ipsec_rx_drop_mismatch_sa_sel = 0;
2500 	stats->ipsec_tx_pkts = 0;
2501 	stats->ipsec_tx_bytes = 0;
2502 	stats->ipsec_tx_drop_pkts = 0;
2503 	stats->ipsec_tx_drop_bytes = 0;
2504 
2505 	fc = ipsec->rx_ipv4->fc;
2506 	mlx5_fc_query(mdev, fc->cnt, &stats->ipsec_rx_pkts, &stats->ipsec_rx_bytes);
2507 	mlx5_fc_query(mdev, fc->drop, &stats->ipsec_rx_drop_pkts,
2508 		      &stats->ipsec_rx_drop_bytes);
2509 	if (ipsec->rx_ipv4->sa_sel.fc)
2510 		mlx5_fc_query(mdev, ipsec->rx_ipv4->sa_sel.fc,
2511 			      &stats->ipsec_rx_drop_mismatch_sa_sel, &bytes);
2512 
2513 	fc = ipsec->tx->fc;
2514 	mlx5_fc_query(mdev, fc->cnt, &stats->ipsec_tx_pkts, &stats->ipsec_tx_bytes);
2515 	mlx5_fc_query(mdev, fc->drop, &stats->ipsec_tx_drop_pkts,
2516 		      &stats->ipsec_tx_drop_bytes);
2517 
2518 	if (ipsec->is_uplink_rep) {
2519 		fc = ipsec->rx_esw->fc;
2520 		if (!mlx5_fc_query(mdev, fc->cnt, &packets, &bytes)) {
2521 			stats->ipsec_rx_pkts += packets;
2522 			stats->ipsec_rx_bytes += bytes;
2523 		}
2524 
2525 		if (!mlx5_fc_query(mdev, fc->drop, &packets, &bytes)) {
2526 			stats->ipsec_rx_drop_pkts += packets;
2527 			stats->ipsec_rx_drop_bytes += bytes;
2528 		}
2529 
2530 		fc = ipsec->tx_esw->fc;
2531 		if (!mlx5_fc_query(mdev, fc->cnt, &packets, &bytes)) {
2532 			stats->ipsec_tx_pkts += packets;
2533 			stats->ipsec_tx_bytes += bytes;
2534 		}
2535 
2536 		if (!mlx5_fc_query(mdev, fc->drop, &packets, &bytes)) {
2537 			stats->ipsec_tx_drop_pkts += packets;
2538 			stats->ipsec_tx_drop_bytes += bytes;
2539 		}
2540 
2541 		if (ipsec->rx_esw->sa_sel.fc &&
2542 		    !mlx5_fc_query(mdev, ipsec->rx_esw->sa_sel.fc,
2543 				   &packets, &bytes))
2544 			stats->ipsec_rx_drop_mismatch_sa_sel += packets;
2545 	}
2546 }
2547 
2548 #ifdef CONFIG_MLX5_ESWITCH
mlx5e_ipsec_block_tc_offload(struct mlx5_core_dev * mdev)2549 static int mlx5e_ipsec_block_tc_offload(struct mlx5_core_dev *mdev)
2550 {
2551 	struct mlx5_eswitch *esw = mdev->priv.eswitch;
2552 	int err = 0;
2553 
2554 	if (esw) {
2555 		err = mlx5_esw_lock(esw);
2556 		if (err)
2557 			return err;
2558 	}
2559 
2560 	if (mdev->num_block_ipsec) {
2561 		err = -EBUSY;
2562 		goto unlock;
2563 	}
2564 
2565 	mdev->num_block_tc++;
2566 
2567 unlock:
2568 	if (esw)
2569 		mlx5_esw_unlock(esw);
2570 
2571 	return err;
2572 }
2573 #else
mlx5e_ipsec_block_tc_offload(struct mlx5_core_dev * mdev)2574 static int mlx5e_ipsec_block_tc_offload(struct mlx5_core_dev *mdev)
2575 {
2576 	if (mdev->num_block_ipsec)
2577 		return -EBUSY;
2578 
2579 	mdev->num_block_tc++;
2580 	return 0;
2581 }
2582 #endif
2583 
mlx5e_ipsec_unblock_tc_offload(struct mlx5_core_dev * mdev)2584 static void mlx5e_ipsec_unblock_tc_offload(struct mlx5_core_dev *mdev)
2585 {
2586 	mdev->num_block_tc--;
2587 }
2588 
mlx5e_accel_ipsec_fs_add_rule(struct mlx5e_ipsec_sa_entry * sa_entry)2589 int mlx5e_accel_ipsec_fs_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
2590 {
2591 	int err;
2592 
2593 	if (sa_entry->attrs.type == XFRM_DEV_OFFLOAD_PACKET) {
2594 		err = mlx5e_ipsec_block_tc_offload(sa_entry->ipsec->mdev);
2595 		if (err)
2596 			return err;
2597 	}
2598 
2599 	if (sa_entry->attrs.dir == XFRM_DEV_OFFLOAD_OUT)
2600 		err = tx_add_rule(sa_entry);
2601 	else
2602 		err = rx_add_rule(sa_entry);
2603 
2604 	if (err)
2605 		goto err_out;
2606 
2607 	return 0;
2608 
2609 err_out:
2610 	if (sa_entry->attrs.type == XFRM_DEV_OFFLOAD_PACKET)
2611 		mlx5e_ipsec_unblock_tc_offload(sa_entry->ipsec->mdev);
2612 	return err;
2613 }
2614 
mlx5e_accel_ipsec_fs_del_rule(struct mlx5e_ipsec_sa_entry * sa_entry)2615 void mlx5e_accel_ipsec_fs_del_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
2616 {
2617 	struct mlx5e_ipsec_rule *ipsec_rule = &sa_entry->ipsec_rule;
2618 	struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry);
2619 
2620 	mlx5_del_flow_rules(ipsec_rule->rule);
2621 	mlx5_fc_destroy(mdev, ipsec_rule->fc);
2622 	if (ipsec_rule->pkt_reformat)
2623 		mlx5_packet_reformat_dealloc(mdev, ipsec_rule->pkt_reformat);
2624 
2625 	if (sa_entry->attrs.type == XFRM_DEV_OFFLOAD_PACKET)
2626 		mlx5e_ipsec_unblock_tc_offload(mdev);
2627 
2628 	if (sa_entry->attrs.dir == XFRM_DEV_OFFLOAD_OUT) {
2629 		tx_ft_put(sa_entry->ipsec, sa_entry->attrs.type);
2630 		return;
2631 	}
2632 
2633 	if (ipsec_rule->modify_hdr)
2634 		mlx5_modify_header_dealloc(mdev, ipsec_rule->modify_hdr);
2635 
2636 	mlx5_del_flow_rules(ipsec_rule->trailer.rule);
2637 	mlx5_fc_destroy(mdev, ipsec_rule->trailer.fc);
2638 
2639 	mlx5_del_flow_rules(ipsec_rule->auth.rule);
2640 	mlx5_fc_destroy(mdev, ipsec_rule->auth.fc);
2641 
2642 	if (ipsec_rule->sa_sel) {
2643 		mlx5_del_flow_rules(ipsec_rule->sa_sel);
2644 		mlx5_del_flow_rules(ipsec_rule->status_pass);
2645 	}
2646 
2647 	if (ipsec_rule->replay.rule) {
2648 		mlx5_del_flow_rules(ipsec_rule->replay.rule);
2649 		mlx5_fc_destroy(mdev, ipsec_rule->replay.fc);
2650 	}
2651 	mlx5_esw_ipsec_rx_id_mapping_remove(sa_entry);
2652 	rx_ft_put(sa_entry->ipsec, sa_entry->attrs.addrs.family,
2653 		  sa_entry->attrs.type);
2654 }
2655 
mlx5e_accel_ipsec_fs_add_pol(struct mlx5e_ipsec_pol_entry * pol_entry)2656 int mlx5e_accel_ipsec_fs_add_pol(struct mlx5e_ipsec_pol_entry *pol_entry)
2657 {
2658 	int err;
2659 
2660 	err = mlx5e_ipsec_block_tc_offload(pol_entry->ipsec->mdev);
2661 	if (err)
2662 		return err;
2663 
2664 	if (pol_entry->attrs.dir == XFRM_DEV_OFFLOAD_OUT)
2665 		err = tx_add_policy(pol_entry);
2666 	else
2667 		err = rx_add_policy(pol_entry);
2668 
2669 	if (err)
2670 		goto err_out;
2671 
2672 	return 0;
2673 
2674 err_out:
2675 	mlx5e_ipsec_unblock_tc_offload(pol_entry->ipsec->mdev);
2676 	return err;
2677 }
2678 
mlx5e_accel_ipsec_fs_del_pol(struct mlx5e_ipsec_pol_entry * pol_entry)2679 void mlx5e_accel_ipsec_fs_del_pol(struct mlx5e_ipsec_pol_entry *pol_entry)
2680 {
2681 	struct mlx5e_ipsec_rule *ipsec_rule = &pol_entry->ipsec_rule;
2682 	struct mlx5_core_dev *mdev = mlx5e_ipsec_pol2dev(pol_entry);
2683 
2684 	mlx5_del_flow_rules(ipsec_rule->rule);
2685 
2686 	mlx5e_ipsec_unblock_tc_offload(pol_entry->ipsec->mdev);
2687 
2688 	if (pol_entry->attrs.dir == XFRM_DEV_OFFLOAD_IN) {
2689 		rx_ft_put_policy(pol_entry->ipsec,
2690 				 pol_entry->attrs.addrs.family,
2691 				 pol_entry->attrs.prio, pol_entry->attrs.type);
2692 		return;
2693 	}
2694 
2695 	if (ipsec_rule->modify_hdr)
2696 		mlx5_modify_header_dealloc(mdev, ipsec_rule->modify_hdr);
2697 
2698 	tx_ft_put_policy(pol_entry->ipsec, pol_entry->attrs.prio, pol_entry->attrs.type);
2699 }
2700 
mlx5e_accel_ipsec_fs_cleanup(struct mlx5e_ipsec * ipsec)2701 void mlx5e_accel_ipsec_fs_cleanup(struct mlx5e_ipsec *ipsec)
2702 {
2703 	if (!ipsec->tx)
2704 		return;
2705 
2706 	if (ipsec->roce)
2707 		mlx5_ipsec_fs_roce_cleanup(ipsec->roce);
2708 
2709 	ipsec_fs_destroy_counters(ipsec);
2710 	mutex_destroy(&ipsec->tx->ft.mutex);
2711 	WARN_ON(ipsec->tx->ft.refcnt);
2712 	kfree(ipsec->tx);
2713 
2714 	mutex_destroy(&ipsec->rx_ipv4->ft.mutex);
2715 	WARN_ON(ipsec->rx_ipv4->ft.refcnt);
2716 	kfree(ipsec->rx_ipv4);
2717 
2718 	mutex_destroy(&ipsec->rx_ipv6->ft.mutex);
2719 	WARN_ON(ipsec->rx_ipv6->ft.refcnt);
2720 	kfree(ipsec->rx_ipv6);
2721 
2722 	if (ipsec->is_uplink_rep) {
2723 		xa_destroy(&ipsec->ipsec_obj_id_map);
2724 
2725 		mutex_destroy(&ipsec->tx_esw->ft.mutex);
2726 		WARN_ON(ipsec->tx_esw->ft.refcnt);
2727 		kfree(ipsec->tx_esw);
2728 
2729 		mutex_destroy(&ipsec->rx_esw->ft.mutex);
2730 		WARN_ON(ipsec->rx_esw->ft.refcnt);
2731 		kfree(ipsec->rx_esw);
2732 	}
2733 }
2734 
mlx5e_accel_ipsec_fs_init(struct mlx5e_ipsec * ipsec,struct mlx5_devcom_comp_dev ** devcom)2735 int mlx5e_accel_ipsec_fs_init(struct mlx5e_ipsec *ipsec,
2736 			      struct mlx5_devcom_comp_dev **devcom)
2737 {
2738 	struct mlx5_core_dev *mdev = ipsec->mdev;
2739 	struct mlx5_flow_namespace *ns, *ns_esw;
2740 	int err = -ENOMEM;
2741 
2742 	ns = mlx5_get_flow_namespace(ipsec->mdev,
2743 				     MLX5_FLOW_NAMESPACE_EGRESS_IPSEC);
2744 	if (!ns)
2745 		return -EOPNOTSUPP;
2746 
2747 	if (ipsec->is_uplink_rep) {
2748 		ns_esw = mlx5_get_flow_namespace(mdev, MLX5_FLOW_NAMESPACE_FDB);
2749 		if (!ns_esw)
2750 			return -EOPNOTSUPP;
2751 
2752 		ipsec->tx_esw = kzalloc(sizeof(*ipsec->tx_esw), GFP_KERNEL);
2753 		if (!ipsec->tx_esw)
2754 			return -ENOMEM;
2755 
2756 		ipsec->rx_esw = kzalloc(sizeof(*ipsec->rx_esw), GFP_KERNEL);
2757 		if (!ipsec->rx_esw)
2758 			goto err_rx_esw;
2759 	}
2760 
2761 	ipsec->tx = kzalloc(sizeof(*ipsec->tx), GFP_KERNEL);
2762 	if (!ipsec->tx)
2763 		goto err_tx;
2764 
2765 	ipsec->rx_ipv4 = kzalloc(sizeof(*ipsec->rx_ipv4), GFP_KERNEL);
2766 	if (!ipsec->rx_ipv4)
2767 		goto err_rx_ipv4;
2768 
2769 	ipsec->rx_ipv6 = kzalloc(sizeof(*ipsec->rx_ipv6), GFP_KERNEL);
2770 	if (!ipsec->rx_ipv6)
2771 		goto err_rx_ipv6;
2772 
2773 	err = ipsec_fs_init_counters(ipsec);
2774 	if (err)
2775 		goto err_counters;
2776 
2777 	mutex_init(&ipsec->tx->ft.mutex);
2778 	mutex_init(&ipsec->rx_ipv4->ft.mutex);
2779 	mutex_init(&ipsec->rx_ipv6->ft.mutex);
2780 	ipsec->tx->ns = ns;
2781 
2782 	if (ipsec->is_uplink_rep) {
2783 		mutex_init(&ipsec->tx_esw->ft.mutex);
2784 		mutex_init(&ipsec->rx_esw->ft.mutex);
2785 		ipsec->tx_esw->ns = ns_esw;
2786 		xa_init_flags(&ipsec->ipsec_obj_id_map, XA_FLAGS_ALLOC1);
2787 	} else if (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_ROCE) {
2788 		ipsec->roce = mlx5_ipsec_fs_roce_init(mdev, devcom);
2789 	} else {
2790 		mlx5_core_warn(mdev, "IPsec was initialized without RoCE support\n");
2791 	}
2792 
2793 	return 0;
2794 
2795 err_counters:
2796 	kfree(ipsec->rx_ipv6);
2797 err_rx_ipv6:
2798 	kfree(ipsec->rx_ipv4);
2799 err_rx_ipv4:
2800 	kfree(ipsec->tx);
2801 err_tx:
2802 	kfree(ipsec->rx_esw);
2803 err_rx_esw:
2804 	kfree(ipsec->tx_esw);
2805 	return err;
2806 }
2807 
mlx5e_accel_ipsec_fs_modify(struct mlx5e_ipsec_sa_entry * sa_entry)2808 void mlx5e_accel_ipsec_fs_modify(struct mlx5e_ipsec_sa_entry *sa_entry)
2809 {
2810 	struct mlx5e_ipsec_sa_entry sa_entry_shadow = {};
2811 	int err;
2812 
2813 	memcpy(&sa_entry_shadow, sa_entry, sizeof(*sa_entry));
2814 	memset(&sa_entry_shadow.ipsec_rule, 0x00, sizeof(sa_entry->ipsec_rule));
2815 
2816 	err = mlx5e_accel_ipsec_fs_add_rule(&sa_entry_shadow);
2817 	if (err)
2818 		return;
2819 
2820 	mlx5e_accel_ipsec_fs_del_rule(sa_entry);
2821 	memcpy(sa_entry, &sa_entry_shadow, sizeof(*sa_entry));
2822 }
2823 
mlx5e_ipsec_fs_tunnel_enabled(struct mlx5e_ipsec_sa_entry * sa_entry)2824 bool mlx5e_ipsec_fs_tunnel_enabled(struct mlx5e_ipsec_sa_entry *sa_entry)
2825 {
2826 	struct mlx5_accel_esp_xfrm_attrs *attrs = &sa_entry->attrs;
2827 	struct mlx5e_ipsec_rx *rx;
2828 	struct mlx5e_ipsec_tx *tx;
2829 
2830 	rx = ipsec_rx(sa_entry->ipsec, attrs->addrs.family, attrs->type);
2831 	tx = ipsec_tx(sa_entry->ipsec, attrs->type);
2832 	if (sa_entry->attrs.dir == XFRM_DEV_OFFLOAD_OUT)
2833 		return tx->allow_tunnel_mode;
2834 
2835 	return rx->allow_tunnel_mode;
2836 }
2837 
mlx5e_ipsec_handle_mpv_event(int event,struct mlx5e_priv * slave_priv,struct mlx5e_priv * master_priv)2838 void mlx5e_ipsec_handle_mpv_event(int event, struct mlx5e_priv *slave_priv,
2839 				  struct mlx5e_priv *master_priv)
2840 {
2841 	struct mlx5e_ipsec_mpv_work *work;
2842 
2843 	reinit_completion(&master_priv->ipsec->comp);
2844 
2845 	if (!slave_priv->ipsec) {
2846 		complete(&master_priv->ipsec->comp);
2847 		return;
2848 	}
2849 
2850 	work = &slave_priv->ipsec->mpv_work;
2851 
2852 	INIT_WORK(&work->work, ipsec_mpv_work_handler);
2853 	work->event = event;
2854 	work->slave_priv = slave_priv;
2855 	work->master_priv = master_priv;
2856 	queue_work(slave_priv->ipsec->wq, &work->work);
2857 }
2858 
mlx5e_ipsec_send_event(struct mlx5e_priv * priv,int event)2859 void mlx5e_ipsec_send_event(struct mlx5e_priv *priv, int event)
2860 {
2861 	if (!priv->ipsec)
2862 		return; /* IPsec not supported */
2863 
2864 	mlx5_devcom_send_event(priv->devcom, event, event, priv);
2865 	wait_for_completion(&priv->ipsec->comp);
2866 }
2867