xref: /linux/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c (revision cbf658dd09419f1ef9de11b9604e950bdd5c170b)
1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2020, Mellanox Technologies inc. All rights reserved. */
3 
4 #include <linux/netdevice.h>
5 #include "en.h"
6 #include "en/fs.h"
7 #include "eswitch.h"
8 #include "ipsec.h"
9 #include "fs_core.h"
10 #include "lib/ipsec_fs_roce.h"
11 #include "lib/fs_chains.h"
12 #include "esw/ipsec_fs.h"
13 #include "en_rep.h"
14 
15 #define NUM_IPSEC_FTE BIT(15)
16 #define MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_SIZE 16
17 #define IPSEC_TUNNEL_DEFAULT_TTL 0x40
18 
19 #define MLX5_IPSEC_FS_SA_SELECTOR_MAX_NUM_GROUPS 16
20 
21 enum {
22 	MLX5_IPSEC_ASO_OK,
23 	MLX5_IPSEC_ASO_BAD_REPLY,
24 
25 	/* For crypto offload, set by driver */
26 	MLX5_IPSEC_ASO_SW_CRYPTO_OFFLOAD = 0xAA,
27 };
28 
29 struct mlx5e_ipsec_fc {
30 	struct mlx5_fc *cnt;
31 	struct mlx5_fc *drop;
32 };
33 
34 struct mlx5e_ipsec_tx {
35 	struct mlx5e_ipsec_ft ft;
36 	struct mlx5e_ipsec_miss pol;
37 	struct mlx5e_ipsec_miss sa;
38 	struct mlx5e_ipsec_rule status;
39 	struct mlx5_flow_namespace *ns;
40 	struct mlx5e_ipsec_fc *fc;
41 	struct mlx5_fs_chains *chains;
42 	u8 allow_tunnel_mode : 1;
43 };
44 
45 struct mlx5e_ipsec_status_checks {
46 	struct mlx5_flow_group *pass_group;
47 	struct mlx5_flow_handle *packet_offload_pass_rule;
48 	struct mlx5_flow_handle *crypto_offload_pass_rule;
49 	struct mlx5_flow_group *drop_all_group;
50 	struct mlx5e_ipsec_drop all;
51 };
52 
53 struct mlx5e_ipsec_rx {
54 	struct mlx5e_ipsec_ft ft;
55 	struct mlx5e_ipsec_miss pol;
56 	struct mlx5e_ipsec_miss sa;
57 	struct mlx5e_ipsec_miss sa_sel;
58 	struct mlx5e_ipsec_status_checks status_checks;
59 	struct mlx5e_ipsec_fc *fc;
60 	struct mlx5_fs_chains *chains;
61 	struct mlx5_flow_table *pol_miss_ft;
62 	struct mlx5_flow_handle *pol_miss_rule;
63 	u8 allow_tunnel_mode : 1;
64 };
65 
66 /* IPsec RX flow steering */
family2tt(u32 family)67 static enum mlx5_traffic_types family2tt(u32 family)
68 {
69 	if (family == AF_INET)
70 		return MLX5_TT_IPV4_IPSEC_ESP;
71 	return MLX5_TT_IPV6_IPSEC_ESP;
72 }
73 
ipsec_rx(struct mlx5e_ipsec * ipsec,u32 family,int type)74 static struct mlx5e_ipsec_rx *ipsec_rx(struct mlx5e_ipsec *ipsec, u32 family, int type)
75 {
76 	if (ipsec->is_uplink_rep && type == XFRM_DEV_OFFLOAD_PACKET)
77 		return ipsec->rx_esw;
78 
79 	if (family == AF_INET)
80 		return ipsec->rx_ipv4;
81 
82 	return ipsec->rx_ipv6;
83 }
84 
ipsec_tx(struct mlx5e_ipsec * ipsec,int type)85 static struct mlx5e_ipsec_tx *ipsec_tx(struct mlx5e_ipsec *ipsec, int type)
86 {
87 	if (ipsec->is_uplink_rep && type == XFRM_DEV_OFFLOAD_PACKET)
88 		return ipsec->tx_esw;
89 
90 	return ipsec->tx;
91 }
92 
93 static struct mlx5_fs_chains *
ipsec_chains_create(struct mlx5_core_dev * mdev,struct mlx5_flow_table * miss_ft,enum mlx5_flow_namespace_type ns,int base_prio,int base_level,struct mlx5_flow_table ** root_ft)94 ipsec_chains_create(struct mlx5_core_dev *mdev, struct mlx5_flow_table *miss_ft,
95 		    enum mlx5_flow_namespace_type ns, int base_prio,
96 		    int base_level, struct mlx5_flow_table **root_ft)
97 {
98 	struct mlx5_chains_attr attr = {};
99 	struct mlx5_fs_chains *chains;
100 	struct mlx5_flow_table *ft;
101 	int err;
102 
103 	attr.flags = MLX5_CHAINS_AND_PRIOS_SUPPORTED |
104 		     MLX5_CHAINS_IGNORE_FLOW_LEVEL_SUPPORTED;
105 	attr.max_grp_num = 2;
106 	attr.default_ft = miss_ft;
107 	attr.ns = ns;
108 	attr.fs_base_prio = base_prio;
109 	attr.fs_base_level = base_level;
110 	chains = mlx5_chains_create(mdev, &attr);
111 	if (IS_ERR(chains))
112 		return chains;
113 
114 	/* Create chain 0, prio 1, level 0 to connect chains to prev in fs_core */
115 	ft = mlx5_chains_get_table(chains, 0, 1, 0);
116 	if (IS_ERR(ft)) {
117 		err = PTR_ERR(ft);
118 		goto err_chains_get;
119 	}
120 
121 	*root_ft = ft;
122 	return chains;
123 
124 err_chains_get:
125 	mlx5_chains_destroy(chains);
126 	return ERR_PTR(err);
127 }
128 
ipsec_chains_destroy(struct mlx5_fs_chains * chains)129 static void ipsec_chains_destroy(struct mlx5_fs_chains *chains)
130 {
131 	mlx5_chains_put_table(chains, 0, 1, 0);
132 	mlx5_chains_destroy(chains);
133 }
134 
135 static struct mlx5_flow_table *
ipsec_chains_get_table(struct mlx5_fs_chains * chains,u32 prio)136 ipsec_chains_get_table(struct mlx5_fs_chains *chains, u32 prio)
137 {
138 	return mlx5_chains_get_table(chains, 0, prio + 1, 0);
139 }
140 
ipsec_chains_put_table(struct mlx5_fs_chains * chains,u32 prio)141 static void ipsec_chains_put_table(struct mlx5_fs_chains *chains, u32 prio)
142 {
143 	mlx5_chains_put_table(chains, 0, prio + 1, 0);
144 }
145 
ipsec_ft_create(struct mlx5_flow_namespace * ns,int level,int prio,int num_reserved_entries,int max_num_groups,u32 flags)146 static struct mlx5_flow_table *ipsec_ft_create(struct mlx5_flow_namespace *ns,
147 					       int level, int prio,
148 					       int num_reserved_entries,
149 					       int max_num_groups, u32 flags)
150 {
151 	struct mlx5_flow_table_attr ft_attr = {};
152 
153 	ft_attr.autogroup.num_reserved_entries = num_reserved_entries;
154 	ft_attr.autogroup.max_num_groups = max_num_groups;
155 	ft_attr.max_fte = NUM_IPSEC_FTE;
156 	ft_attr.level = level;
157 	ft_attr.prio = prio;
158 	ft_attr.flags = flags;
159 
160 	return mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
161 }
162 
ipsec_rx_status_drop_destroy(struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_rx * rx)163 static void ipsec_rx_status_drop_destroy(struct mlx5e_ipsec *ipsec,
164 					 struct mlx5e_ipsec_rx *rx)
165 {
166 	mlx5_del_flow_rules(rx->status_checks.all.rule);
167 	mlx5_fc_destroy(ipsec->mdev, rx->status_checks.all.fc);
168 	mlx5_destroy_flow_group(rx->status_checks.drop_all_group);
169 }
170 
ipsec_rx_status_pass_destroy(struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_rx * rx)171 static void ipsec_rx_status_pass_destroy(struct mlx5e_ipsec *ipsec,
172 					 struct mlx5e_ipsec_rx *rx)
173 {
174 	mlx5_del_flow_rules(rx->status_checks.packet_offload_pass_rule);
175 	mlx5_del_flow_rules(rx->status_checks.crypto_offload_pass_rule);
176 }
177 
ipsec_rx_rule_add_match_obj(struct mlx5e_ipsec_sa_entry * sa_entry,struct mlx5e_ipsec_rx * rx,struct mlx5_flow_spec * spec)178 static void ipsec_rx_rule_add_match_obj(struct mlx5e_ipsec_sa_entry *sa_entry,
179 					struct mlx5e_ipsec_rx *rx,
180 					struct mlx5_flow_spec *spec)
181 {
182 	struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
183 
184 	if (rx == ipsec->rx_esw) {
185 		mlx5_esw_ipsec_rx_rule_add_match_obj(sa_entry, spec);
186 	} else {
187 		MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
188 				 misc_parameters_2.metadata_reg_c_2);
189 		MLX5_SET(fte_match_param, spec->match_value,
190 			 misc_parameters_2.metadata_reg_c_2,
191 			 sa_entry->ipsec_obj_id | BIT(31));
192 
193 		spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
194 	}
195 }
196 
rx_add_rule_drop_auth_trailer(struct mlx5e_ipsec_sa_entry * sa_entry,struct mlx5e_ipsec_rx * rx)197 static int rx_add_rule_drop_auth_trailer(struct mlx5e_ipsec_sa_entry *sa_entry,
198 					 struct mlx5e_ipsec_rx *rx)
199 {
200 	struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
201 	struct mlx5_flow_table *ft = rx->ft.status;
202 	struct mlx5_core_dev *mdev = ipsec->mdev;
203 	struct mlx5_flow_destination dest = {};
204 	struct mlx5_flow_act flow_act = {};
205 	struct mlx5_flow_handle *rule;
206 	struct mlx5_fc *flow_counter;
207 	struct mlx5_flow_spec *spec;
208 	int err;
209 
210 	spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
211 	if (!spec)
212 		return -ENOMEM;
213 
214 	flow_counter = mlx5_fc_create(mdev, true);
215 	if (IS_ERR(flow_counter)) {
216 		err = PTR_ERR(flow_counter);
217 		mlx5_core_err(mdev,
218 			      "Failed to add ipsec rx status drop rule counter, err=%d\n", err);
219 		goto err_cnt;
220 	}
221 	sa_entry->ipsec_rule.auth.fc = flow_counter;
222 
223 	flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT;
224 	flow_act.flags = FLOW_ACT_NO_APPEND;
225 	dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
226 	dest.counter = flow_counter;
227 	if (rx == ipsec->rx_esw)
228 		spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK;
229 
230 	MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters_2.ipsec_syndrome);
231 	MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.ipsec_syndrome, 1);
232 	spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
233 	ipsec_rx_rule_add_match_obj(sa_entry, rx, spec);
234 	rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
235 	if (IS_ERR(rule)) {
236 		err = PTR_ERR(rule);
237 		mlx5_core_err(mdev,
238 			      "Failed to add ipsec rx status drop rule, err=%d\n", err);
239 		goto err_rule;
240 	}
241 	sa_entry->ipsec_rule.auth.rule = rule;
242 
243 	flow_counter = mlx5_fc_create(mdev, true);
244 	if (IS_ERR(flow_counter)) {
245 		err = PTR_ERR(flow_counter);
246 		mlx5_core_err(mdev,
247 			      "Failed to add ipsec rx status drop rule counter, err=%d\n", err);
248 		goto err_cnt_2;
249 	}
250 	sa_entry->ipsec_rule.trailer.fc = flow_counter;
251 
252 	dest.counter = flow_counter;
253 	MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.ipsec_syndrome, 2);
254 	rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
255 	if (IS_ERR(rule)) {
256 		err = PTR_ERR(rule);
257 		mlx5_core_err(mdev,
258 			      "Failed to add ipsec rx status drop rule, err=%d\n", err);
259 		goto err_rule_2;
260 	}
261 	sa_entry->ipsec_rule.trailer.rule = rule;
262 
263 	kvfree(spec);
264 	return 0;
265 
266 err_rule_2:
267 	mlx5_fc_destroy(mdev, sa_entry->ipsec_rule.trailer.fc);
268 err_cnt_2:
269 	mlx5_del_flow_rules(sa_entry->ipsec_rule.auth.rule);
270 err_rule:
271 	mlx5_fc_destroy(mdev, sa_entry->ipsec_rule.auth.fc);
272 err_cnt:
273 	kvfree(spec);
274 	return err;
275 }
276 
rx_add_rule_drop_replay(struct mlx5e_ipsec_sa_entry * sa_entry,struct mlx5e_ipsec_rx * rx)277 static int rx_add_rule_drop_replay(struct mlx5e_ipsec_sa_entry *sa_entry, struct mlx5e_ipsec_rx *rx)
278 {
279 	struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
280 	struct mlx5_flow_table *ft = rx->ft.status;
281 	struct mlx5_core_dev *mdev = ipsec->mdev;
282 	struct mlx5_flow_destination dest = {};
283 	struct mlx5_flow_act flow_act = {};
284 	struct mlx5_flow_handle *rule;
285 	struct mlx5_fc *flow_counter;
286 	struct mlx5_flow_spec *spec;
287 	int err;
288 
289 	spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
290 	if (!spec)
291 		return -ENOMEM;
292 
293 	flow_counter = mlx5_fc_create(mdev, true);
294 	if (IS_ERR(flow_counter)) {
295 		err = PTR_ERR(flow_counter);
296 		mlx5_core_err(mdev,
297 			      "Failed to add ipsec rx status drop rule counter, err=%d\n", err);
298 		goto err_cnt;
299 	}
300 
301 	flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT;
302 	flow_act.flags = FLOW_ACT_NO_APPEND;
303 	dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
304 	dest.counter = flow_counter;
305 	if (rx == ipsec->rx_esw)
306 		spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK;
307 
308 	MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters_2.metadata_reg_c_4);
309 	MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.metadata_reg_c_4, 1);
310 	spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
311 	ipsec_rx_rule_add_match_obj(sa_entry, rx, spec);
312 	rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
313 	if (IS_ERR(rule)) {
314 		err = PTR_ERR(rule);
315 		mlx5_core_err(mdev,
316 			      "Failed to add ipsec rx status drop rule, err=%d\n", err);
317 		goto err_rule;
318 	}
319 
320 	sa_entry->ipsec_rule.replay.rule = rule;
321 	sa_entry->ipsec_rule.replay.fc = flow_counter;
322 
323 	kvfree(spec);
324 	return 0;
325 
326 err_rule:
327 	mlx5_fc_destroy(mdev, flow_counter);
328 err_cnt:
329 	kvfree(spec);
330 	return err;
331 }
332 
ipsec_rx_status_drop_all_create(struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_rx * rx)333 static int ipsec_rx_status_drop_all_create(struct mlx5e_ipsec *ipsec,
334 					   struct mlx5e_ipsec_rx *rx)
335 {
336 	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
337 	struct mlx5_flow_table *ft = rx->ft.status;
338 	struct mlx5_core_dev *mdev = ipsec->mdev;
339 	struct mlx5_flow_destination dest = {};
340 	struct mlx5_flow_act flow_act = {};
341 	struct mlx5_flow_handle *rule;
342 	struct mlx5_fc *flow_counter;
343 	struct mlx5_flow_spec *spec;
344 	struct mlx5_flow_group *g;
345 	u32 *flow_group_in;
346 	int err = 0;
347 
348 	flow_group_in = kvzalloc(inlen, GFP_KERNEL);
349 	spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
350 	if (!flow_group_in || !spec) {
351 		err = -ENOMEM;
352 		goto err_out;
353 	}
354 
355 	MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ft->max_fte - 1);
356 	MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ft->max_fte - 1);
357 	g = mlx5_create_flow_group(ft, flow_group_in);
358 	if (IS_ERR(g)) {
359 		err = PTR_ERR(g);
360 		mlx5_core_err(mdev,
361 			      "Failed to add ipsec rx status drop flow group, err=%d\n", err);
362 		goto err_out;
363 	}
364 
365 	flow_counter = mlx5_fc_create(mdev, false);
366 	if (IS_ERR(flow_counter)) {
367 		err = PTR_ERR(flow_counter);
368 		mlx5_core_err(mdev,
369 			      "Failed to add ipsec rx status drop rule counter, err=%d\n", err);
370 		goto err_cnt;
371 	}
372 
373 	flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT;
374 	dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
375 	dest.counter = flow_counter;
376 	if (rx == ipsec->rx_esw)
377 		spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK;
378 	rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
379 	if (IS_ERR(rule)) {
380 		err = PTR_ERR(rule);
381 		mlx5_core_err(mdev,
382 			      "Failed to add ipsec rx status drop rule, err=%d\n", err);
383 		goto err_rule;
384 	}
385 
386 	rx->status_checks.drop_all_group = g;
387 	rx->status_checks.all.rule = rule;
388 	rx->status_checks.all.fc = flow_counter;
389 
390 	kvfree(flow_group_in);
391 	kvfree(spec);
392 	return 0;
393 
394 err_rule:
395 	mlx5_fc_destroy(mdev, flow_counter);
396 err_cnt:
397 	mlx5_destroy_flow_group(g);
398 err_out:
399 	kvfree(flow_group_in);
400 	kvfree(spec);
401 	return err;
402 }
403 
ipsec_rx_status_pass_group_create(struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_rx * rx)404 static int ipsec_rx_status_pass_group_create(struct mlx5e_ipsec *ipsec,
405 					     struct mlx5e_ipsec_rx *rx)
406 {
407 	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
408 	struct mlx5_flow_table *ft = rx->ft.status;
409 	struct mlx5_flow_group *fg;
410 	void *match_criteria;
411 	u32 *flow_group_in;
412 	int err = 0;
413 
414 	flow_group_in = kvzalloc(inlen, GFP_KERNEL);
415 	if (!flow_group_in)
416 		return -ENOMEM;
417 
418 	MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
419 		 MLX5_MATCH_MISC_PARAMETERS_2);
420 	match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in,
421 				      match_criteria);
422 	MLX5_SET_TO_ONES(fte_match_param, match_criteria,
423 			 misc_parameters_2.ipsec_syndrome);
424 	MLX5_SET_TO_ONES(fte_match_param, match_criteria,
425 			 misc_parameters_2.metadata_reg_c_4);
426 
427 	MLX5_SET(create_flow_group_in, flow_group_in,
428 		 start_flow_index, ft->max_fte - 3);
429 	MLX5_SET(create_flow_group_in, flow_group_in,
430 		 end_flow_index, ft->max_fte - 2);
431 
432 	fg = mlx5_create_flow_group(ft, flow_group_in);
433 	if (IS_ERR(fg)) {
434 		err = PTR_ERR(fg);
435 		mlx5_core_warn(ipsec->mdev,
436 			       "Failed to create rx status pass flow group, err=%d\n",
437 			       err);
438 	}
439 	rx->status_checks.pass_group = fg;
440 
441 	kvfree(flow_group_in);
442 	return err;
443 }
444 
445 static struct mlx5_flow_handle *
ipsec_rx_status_pass_create(struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_rx * rx,struct mlx5_flow_destination * dest,u8 aso_ok)446 ipsec_rx_status_pass_create(struct mlx5e_ipsec *ipsec,
447 			    struct mlx5e_ipsec_rx *rx,
448 			    struct mlx5_flow_destination *dest,
449 			    u8 aso_ok)
450 {
451 	struct mlx5_flow_act flow_act = {};
452 	struct mlx5_flow_handle *rule;
453 	struct mlx5_flow_spec *spec;
454 	int err;
455 
456 	spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
457 	if (!spec)
458 		return ERR_PTR(-ENOMEM);
459 
460 	MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
461 			 misc_parameters_2.ipsec_syndrome);
462 	MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
463 			 misc_parameters_2.metadata_reg_c_4);
464 	MLX5_SET(fte_match_param, spec->match_value,
465 		 misc_parameters_2.ipsec_syndrome, 0);
466 	MLX5_SET(fte_match_param, spec->match_value,
467 		 misc_parameters_2.metadata_reg_c_4, aso_ok);
468 	if (rx == ipsec->rx_esw)
469 		spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK;
470 	spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
471 	flow_act.flags = FLOW_ACT_NO_APPEND | FLOW_ACT_IGNORE_FLOW_LEVEL;
472 	flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
473 			  MLX5_FLOW_CONTEXT_ACTION_COUNT;
474 	rule = mlx5_add_flow_rules(rx->ft.status, spec, &flow_act, dest, 2);
475 	if (IS_ERR(rule)) {
476 		err = PTR_ERR(rule);
477 		mlx5_core_warn(ipsec->mdev,
478 			       "Failed to add ipsec rx status pass rule, err=%d\n", err);
479 		goto err_rule;
480 	}
481 
482 	kvfree(spec);
483 	return rule;
484 
485 err_rule:
486 	kvfree(spec);
487 	return ERR_PTR(err);
488 }
489 
mlx5_ipsec_rx_status_destroy(struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_rx * rx)490 static void mlx5_ipsec_rx_status_destroy(struct mlx5e_ipsec *ipsec,
491 					 struct mlx5e_ipsec_rx *rx)
492 {
493 	ipsec_rx_status_pass_destroy(ipsec, rx);
494 	mlx5_destroy_flow_group(rx->status_checks.pass_group);
495 	ipsec_rx_status_drop_destroy(ipsec, rx);
496 }
497 
mlx5_ipsec_rx_status_create(struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_rx * rx,struct mlx5_flow_destination * dest)498 static int mlx5_ipsec_rx_status_create(struct mlx5e_ipsec *ipsec,
499 				       struct mlx5e_ipsec_rx *rx,
500 				       struct mlx5_flow_destination *dest)
501 {
502 	struct mlx5_flow_destination pol_dest[2];
503 	struct mlx5_flow_handle *rule;
504 	int err;
505 
506 	err = ipsec_rx_status_drop_all_create(ipsec, rx);
507 	if (err)
508 		return err;
509 
510 	err = ipsec_rx_status_pass_group_create(ipsec, rx);
511 	if (err)
512 		goto err_pass_group_create;
513 
514 	rule = ipsec_rx_status_pass_create(ipsec, rx, dest,
515 					   MLX5_IPSEC_ASO_SW_CRYPTO_OFFLOAD);
516 	if (IS_ERR(rule)) {
517 		err = PTR_ERR(rule);
518 		goto err_crypto_offload_pass_create;
519 	}
520 	rx->status_checks.crypto_offload_pass_rule = rule;
521 
522 	pol_dest[0].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
523 	pol_dest[0].ft = rx->ft.pol;
524 	pol_dest[1] = dest[1];
525 	rule = ipsec_rx_status_pass_create(ipsec, rx, pol_dest,
526 					   MLX5_IPSEC_ASO_OK);
527 	if (IS_ERR(rule)) {
528 		err = PTR_ERR(rule);
529 		goto err_packet_offload_pass_create;
530 	}
531 	rx->status_checks.packet_offload_pass_rule = rule;
532 
533 	return 0;
534 
535 err_packet_offload_pass_create:
536 	mlx5_del_flow_rules(rx->status_checks.crypto_offload_pass_rule);
537 err_crypto_offload_pass_create:
538 	mlx5_destroy_flow_group(rx->status_checks.pass_group);
539 err_pass_group_create:
540 	ipsec_rx_status_drop_destroy(ipsec, rx);
541 	return err;
542 }
543 
ipsec_miss_create(struct mlx5_core_dev * mdev,struct mlx5_flow_table * ft,struct mlx5e_ipsec_miss * miss,struct mlx5_flow_destination * dest)544 static int ipsec_miss_create(struct mlx5_core_dev *mdev,
545 			     struct mlx5_flow_table *ft,
546 			     struct mlx5e_ipsec_miss *miss,
547 			     struct mlx5_flow_destination *dest)
548 {
549 	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
550 	MLX5_DECLARE_FLOW_ACT(flow_act);
551 	struct mlx5_flow_spec *spec;
552 	u32 *flow_group_in;
553 	int err = 0;
554 
555 	flow_group_in = kvzalloc(inlen, GFP_KERNEL);
556 	spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
557 	if (!flow_group_in || !spec) {
558 		err = -ENOMEM;
559 		goto out;
560 	}
561 
562 	/* Create miss_group */
563 	MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ft->max_fte - 1);
564 	MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ft->max_fte - 1);
565 	miss->group = mlx5_create_flow_group(ft, flow_group_in);
566 	if (IS_ERR(miss->group)) {
567 		err = PTR_ERR(miss->group);
568 		mlx5_core_err(mdev, "fail to create IPsec miss_group err=%d\n",
569 			      err);
570 		goto out;
571 	}
572 
573 	/* Create miss rule */
574 	miss->rule = mlx5_add_flow_rules(ft, spec, &flow_act, dest, 1);
575 	if (IS_ERR(miss->rule)) {
576 		mlx5_destroy_flow_group(miss->group);
577 		err = PTR_ERR(miss->rule);
578 		mlx5_core_err(mdev, "fail to create IPsec miss_rule err=%d\n",
579 			      err);
580 		goto out;
581 	}
582 out:
583 	kvfree(flow_group_in);
584 	kvfree(spec);
585 	return err;
586 }
587 
ipsec_rx_update_default_dest(struct mlx5e_ipsec_rx * rx,struct mlx5_flow_destination * old_dest,struct mlx5_flow_destination * new_dest)588 static void ipsec_rx_update_default_dest(struct mlx5e_ipsec_rx *rx,
589 					 struct mlx5_flow_destination *old_dest,
590 					 struct mlx5_flow_destination *new_dest)
591 {
592 	mlx5_modify_rule_destination(rx->pol_miss_rule, new_dest, old_dest);
593 	mlx5_modify_rule_destination(rx->status_checks.crypto_offload_pass_rule,
594 				     new_dest, old_dest);
595 }
596 
handle_ipsec_rx_bringup(struct mlx5e_ipsec * ipsec,u32 family)597 static void handle_ipsec_rx_bringup(struct mlx5e_ipsec *ipsec, u32 family)
598 {
599 	struct mlx5e_ipsec_rx *rx = ipsec_rx(ipsec, family, XFRM_DEV_OFFLOAD_PACKET);
600 	struct mlx5_flow_namespace *ns = mlx5e_fs_get_ns(ipsec->fs, false);
601 	struct mlx5_flow_destination old_dest, new_dest;
602 
603 	old_dest = mlx5_ttc_get_default_dest(mlx5e_fs_get_ttc(ipsec->fs, false),
604 					     family2tt(family));
605 
606 	mlx5_ipsec_fs_roce_rx_create(ipsec->mdev, ipsec->roce, ns, &old_dest, family,
607 				     MLX5E_ACCEL_FS_ESP_FT_ROCE_LEVEL, MLX5E_NIC_PRIO);
608 
609 	new_dest.ft = mlx5_ipsec_fs_roce_ft_get(ipsec->roce, family);
610 	new_dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
611 	ipsec_rx_update_default_dest(rx, &old_dest, &new_dest);
612 }
613 
handle_ipsec_rx_cleanup(struct mlx5e_ipsec * ipsec,u32 family)614 static void handle_ipsec_rx_cleanup(struct mlx5e_ipsec *ipsec, u32 family)
615 {
616 	struct mlx5e_ipsec_rx *rx = ipsec_rx(ipsec, family, XFRM_DEV_OFFLOAD_PACKET);
617 	struct mlx5_flow_destination old_dest, new_dest;
618 
619 	old_dest.ft = mlx5_ipsec_fs_roce_ft_get(ipsec->roce, family);
620 	old_dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
621 	new_dest = mlx5_ttc_get_default_dest(mlx5e_fs_get_ttc(ipsec->fs, false),
622 					     family2tt(family));
623 	ipsec_rx_update_default_dest(rx, &old_dest, &new_dest);
624 
625 	mlx5_ipsec_fs_roce_rx_destroy(ipsec->roce, family, ipsec->mdev);
626 }
627 
ipsec_mpv_work_handler(struct work_struct * _work)628 static void ipsec_mpv_work_handler(struct work_struct *_work)
629 {
630 	struct mlx5e_ipsec_mpv_work *work = container_of(_work, struct mlx5e_ipsec_mpv_work, work);
631 	struct mlx5e_ipsec *ipsec = work->slave_priv->ipsec;
632 
633 	switch (work->event) {
634 	case MPV_DEVCOM_IPSEC_MASTER_UP:
635 		mutex_lock(&ipsec->tx->ft.mutex);
636 		if (ipsec->tx->ft.refcnt)
637 			mlx5_ipsec_fs_roce_tx_create(ipsec->mdev, ipsec->roce, ipsec->tx->ft.pol,
638 						     true);
639 		mutex_unlock(&ipsec->tx->ft.mutex);
640 
641 		mutex_lock(&ipsec->rx_ipv4->ft.mutex);
642 		if (ipsec->rx_ipv4->ft.refcnt)
643 			handle_ipsec_rx_bringup(ipsec, AF_INET);
644 		mutex_unlock(&ipsec->rx_ipv4->ft.mutex);
645 
646 		mutex_lock(&ipsec->rx_ipv6->ft.mutex);
647 		if (ipsec->rx_ipv6->ft.refcnt)
648 			handle_ipsec_rx_bringup(ipsec, AF_INET6);
649 		mutex_unlock(&ipsec->rx_ipv6->ft.mutex);
650 		break;
651 	case MPV_DEVCOM_IPSEC_MASTER_DOWN:
652 		mutex_lock(&ipsec->tx->ft.mutex);
653 		if (ipsec->tx->ft.refcnt)
654 			mlx5_ipsec_fs_roce_tx_destroy(ipsec->roce, ipsec->mdev);
655 		mutex_unlock(&ipsec->tx->ft.mutex);
656 
657 		mutex_lock(&ipsec->rx_ipv4->ft.mutex);
658 		if (ipsec->rx_ipv4->ft.refcnt)
659 			handle_ipsec_rx_cleanup(ipsec, AF_INET);
660 		mutex_unlock(&ipsec->rx_ipv4->ft.mutex);
661 
662 		mutex_lock(&ipsec->rx_ipv6->ft.mutex);
663 		if (ipsec->rx_ipv6->ft.refcnt)
664 			handle_ipsec_rx_cleanup(ipsec, AF_INET6);
665 		mutex_unlock(&ipsec->rx_ipv6->ft.mutex);
666 		break;
667 	}
668 
669 	complete(&work->master_priv->ipsec->comp);
670 }
671 
ipsec_rx_ft_disconnect(struct mlx5e_ipsec * ipsec,u32 family)672 static void ipsec_rx_ft_disconnect(struct mlx5e_ipsec *ipsec, u32 family)
673 {
674 	struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(ipsec->fs, false);
675 
676 	mlx5_ttc_fwd_default_dest(ttc, family2tt(family));
677 }
678 
ipsec_rx_policy_destroy(struct mlx5e_ipsec_rx * rx)679 static void ipsec_rx_policy_destroy(struct mlx5e_ipsec_rx *rx)
680 {
681 	if (rx->chains) {
682 		ipsec_chains_destroy(rx->chains);
683 	} else {
684 		mlx5_del_flow_rules(rx->pol.rule);
685 		mlx5_destroy_flow_group(rx->pol.group);
686 		mlx5_destroy_flow_table(rx->ft.pol);
687 	}
688 
689 	if (rx->pol_miss_rule) {
690 		mlx5_del_flow_rules(rx->pol_miss_rule);
691 		mlx5_destroy_flow_table(rx->pol_miss_ft);
692 	}
693 }
694 
ipsec_rx_sa_selector_destroy(struct mlx5_core_dev * mdev,struct mlx5e_ipsec_rx * rx)695 static void ipsec_rx_sa_selector_destroy(struct mlx5_core_dev *mdev,
696 					 struct mlx5e_ipsec_rx *rx)
697 {
698 	mlx5_del_flow_rules(rx->sa_sel.rule);
699 	mlx5_fc_destroy(mdev, rx->sa_sel.fc);
700 	rx->sa_sel.fc = NULL;
701 	mlx5_destroy_flow_group(rx->sa_sel.group);
702 	mlx5_destroy_flow_table(rx->ft.sa_sel);
703 }
704 
rx_destroy(struct mlx5_core_dev * mdev,struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_rx * rx,u32 family)705 static void rx_destroy(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
706 		       struct mlx5e_ipsec_rx *rx, u32 family)
707 {
708 	/* disconnect */
709 	if (rx != ipsec->rx_esw)
710 		ipsec_rx_ft_disconnect(ipsec, family);
711 
712 	mlx5_del_flow_rules(rx->sa.rule);
713 	mlx5_destroy_flow_group(rx->sa.group);
714 	mlx5_destroy_flow_table(rx->ft.sa);
715 	if (rx->allow_tunnel_mode)
716 		mlx5_eswitch_unblock_encap(mdev);
717 	mlx5_ipsec_rx_status_destroy(ipsec, rx);
718 	mlx5_destroy_flow_table(rx->ft.status);
719 
720 	ipsec_rx_sa_selector_destroy(mdev, rx);
721 
722 	ipsec_rx_policy_destroy(rx);
723 
724 	mlx5_ipsec_fs_roce_rx_destroy(ipsec->roce, family, mdev);
725 
726 #ifdef CONFIG_MLX5_ESWITCH
727 	if (rx == ipsec->rx_esw)
728 		mlx5_chains_put_table(esw_chains(ipsec->mdev->priv.eswitch),
729 				      0, 1, 0);
730 #endif
731 }
732 
ipsec_rx_create_attr_set(struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_rx * rx,u32 family,struct mlx5e_ipsec_rx_create_attr * attr)733 static void ipsec_rx_create_attr_set(struct mlx5e_ipsec *ipsec,
734 				     struct mlx5e_ipsec_rx *rx,
735 				     u32 family,
736 				     struct mlx5e_ipsec_rx_create_attr *attr)
737 {
738 	if (rx == ipsec->rx_esw) {
739 		/* For packet offload in switchdev mode, RX & TX use FDB namespace */
740 		attr->ns = ipsec->tx_esw->ns;
741 		mlx5_esw_ipsec_rx_create_attr_set(ipsec, attr);
742 		return;
743 	}
744 
745 	attr->ns = mlx5e_fs_get_ns(ipsec->fs, false);
746 	attr->ttc = mlx5e_fs_get_ttc(ipsec->fs, false);
747 	attr->family = family;
748 	attr->prio = MLX5E_NIC_PRIO;
749 	attr->pol_level = MLX5E_ACCEL_FS_POL_FT_LEVEL;
750 	attr->pol_miss_level = MLX5E_ACCEL_FS_POL_MISS_FT_LEVEL;
751 	attr->sa_level = MLX5E_ACCEL_FS_ESP_FT_LEVEL;
752 	attr->status_level = MLX5E_ACCEL_FS_ESP_FT_ERR_LEVEL;
753 	attr->chains_ns = MLX5_FLOW_NAMESPACE_KERNEL;
754 }
755 
ipsec_rx_status_pass_dest_get(struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_rx * rx,struct mlx5e_ipsec_rx_create_attr * attr,struct mlx5_flow_destination * dest)756 static int ipsec_rx_status_pass_dest_get(struct mlx5e_ipsec *ipsec,
757 					 struct mlx5e_ipsec_rx *rx,
758 					 struct mlx5e_ipsec_rx_create_attr *attr,
759 					 struct mlx5_flow_destination *dest)
760 {
761 	struct mlx5_flow_table *ft;
762 	int err;
763 
764 	if (rx == ipsec->rx_esw)
765 		return mlx5_esw_ipsec_rx_status_pass_dest_get(ipsec, dest);
766 
767 	*dest = mlx5_ttc_get_default_dest(attr->ttc, family2tt(attr->family));
768 	err = mlx5_ipsec_fs_roce_rx_create(ipsec->mdev, ipsec->roce, attr->ns, dest,
769 					   attr->family, MLX5E_ACCEL_FS_ESP_FT_ROCE_LEVEL,
770 					   attr->prio);
771 	if (err)
772 		return err;
773 
774 	ft = mlx5_ipsec_fs_roce_ft_get(ipsec->roce, attr->family);
775 	if (ft) {
776 		dest->type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
777 		dest->ft = ft;
778 	}
779 
780 	return 0;
781 }
782 
ipsec_rx_sa_miss_dest_get(struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_rx * rx,struct mlx5e_ipsec_rx_create_attr * attr,struct mlx5_flow_destination * dest,struct mlx5_flow_destination * miss_dest)783 static void ipsec_rx_sa_miss_dest_get(struct mlx5e_ipsec *ipsec,
784 				      struct mlx5e_ipsec_rx *rx,
785 				      struct mlx5e_ipsec_rx_create_attr *attr,
786 				      struct mlx5_flow_destination *dest,
787 				      struct mlx5_flow_destination *miss_dest)
788 {
789 	if (rx == ipsec->rx_esw)
790 		*miss_dest = *dest;
791 	else
792 		*miss_dest =
793 			mlx5_ttc_get_default_dest(attr->ttc,
794 						  family2tt(attr->family));
795 }
796 
ipsec_rx_default_dest_get(struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_rx * rx,struct mlx5_flow_destination * dest)797 static void ipsec_rx_default_dest_get(struct mlx5e_ipsec *ipsec,
798 				      struct mlx5e_ipsec_rx *rx,
799 				      struct mlx5_flow_destination *dest)
800 {
801 	dest->type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
802 	dest->ft = rx->pol_miss_ft;
803 }
804 
ipsec_rx_ft_connect(struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_rx * rx,struct mlx5e_ipsec_rx_create_attr * attr)805 static void ipsec_rx_ft_connect(struct mlx5e_ipsec *ipsec,
806 				struct mlx5e_ipsec_rx *rx,
807 				struct mlx5e_ipsec_rx_create_attr *attr)
808 {
809 	struct mlx5_flow_destination dest = {};
810 
811 	dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
812 	dest.ft = rx->ft.sa;
813 	mlx5_ttc_fwd_dest(attr->ttc, family2tt(attr->family), &dest);
814 }
815 
ipsec_rx_chains_create_miss(struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_rx * rx,struct mlx5e_ipsec_rx_create_attr * attr,struct mlx5_flow_destination * dest)816 static int ipsec_rx_chains_create_miss(struct mlx5e_ipsec *ipsec,
817 				       struct mlx5e_ipsec_rx *rx,
818 				       struct mlx5e_ipsec_rx_create_attr *attr,
819 				       struct mlx5_flow_destination *dest)
820 {
821 	struct mlx5_flow_table_attr ft_attr = {};
822 	MLX5_DECLARE_FLOW_ACT(flow_act);
823 	struct mlx5_flow_handle *rule;
824 	struct mlx5_flow_table *ft;
825 	int err;
826 
827 	if (rx == ipsec->rx_esw) {
828 		/* No need to create miss table for switchdev mode,
829 		 * just set it to the root chain table.
830 		 */
831 		rx->pol_miss_ft = dest->ft;
832 		return 0;
833 	}
834 
835 	ft_attr.max_fte = 1;
836 	ft_attr.autogroup.max_num_groups = 1;
837 	ft_attr.level = attr->pol_miss_level;
838 	ft_attr.prio = attr->prio;
839 
840 	ft = mlx5_create_auto_grouped_flow_table(attr->ns, &ft_attr);
841 	if (IS_ERR(ft))
842 		return PTR_ERR(ft);
843 
844 	rule = mlx5_add_flow_rules(ft, NULL, &flow_act, dest, 1);
845 	if (IS_ERR(rule)) {
846 		err = PTR_ERR(rule);
847 		goto err_rule;
848 	}
849 
850 	rx->pol_miss_ft = ft;
851 	rx->pol_miss_rule = rule;
852 
853 	return 0;
854 
855 err_rule:
856 	mlx5_destroy_flow_table(ft);
857 	return err;
858 }
859 
ipsec_rx_policy_create(struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_rx * rx,struct mlx5e_ipsec_rx_create_attr * attr,struct mlx5_flow_destination * dest)860 static int ipsec_rx_policy_create(struct mlx5e_ipsec *ipsec,
861 				  struct mlx5e_ipsec_rx *rx,
862 				  struct mlx5e_ipsec_rx_create_attr *attr,
863 				  struct mlx5_flow_destination *dest)
864 {
865 	struct mlx5_flow_destination default_dest;
866 	struct mlx5_core_dev *mdev = ipsec->mdev;
867 	struct mlx5_flow_table *ft;
868 	int err;
869 
870 	err = ipsec_rx_chains_create_miss(ipsec, rx, attr, dest);
871 	if (err)
872 		return err;
873 
874 	ipsec_rx_default_dest_get(ipsec, rx, &default_dest);
875 
876 	if (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_PRIO) {
877 		rx->chains = ipsec_chains_create(mdev,
878 						 default_dest.ft,
879 						 attr->chains_ns,
880 						 attr->prio,
881 						 attr->sa_level,
882 						 &rx->ft.pol);
883 		if (IS_ERR(rx->chains))
884 			err = PTR_ERR(rx->chains);
885 	} else {
886 		ft = ipsec_ft_create(attr->ns, attr->pol_level,
887 				     attr->prio, 1, 2, 0);
888 		if (IS_ERR(ft)) {
889 			err = PTR_ERR(ft);
890 			goto err_out;
891 		}
892 		rx->ft.pol = ft;
893 
894 		err = ipsec_miss_create(mdev, rx->ft.pol, &rx->pol,
895 					&default_dest);
896 		if (err)
897 			mlx5_destroy_flow_table(rx->ft.pol);
898 	}
899 
900 	if (!err)
901 		return 0;
902 
903 err_out:
904 	if (rx->pol_miss_rule) {
905 		mlx5_del_flow_rules(rx->pol_miss_rule);
906 		mlx5_destroy_flow_table(rx->pol_miss_ft);
907 	}
908 	return err;
909 }
910 
ipsec_rx_sa_selector_create(struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_rx * rx,struct mlx5e_ipsec_rx_create_attr * attr)911 static int ipsec_rx_sa_selector_create(struct mlx5e_ipsec *ipsec,
912 				       struct mlx5e_ipsec_rx *rx,
913 				       struct mlx5e_ipsec_rx_create_attr *attr)
914 {
915 	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
916 	struct mlx5_core_dev *mdev = ipsec->mdev;
917 	struct mlx5_flow_act flow_act = {};
918 	struct mlx5_flow_destination dest;
919 	struct mlx5_flow_handle *rule;
920 	struct mlx5_flow_table *ft;
921 	struct mlx5_flow_group *fg;
922 	u32 *flow_group_in;
923 	struct mlx5_fc *fc;
924 	int err;
925 
926 	flow_group_in = kvzalloc(inlen, GFP_KERNEL);
927 	if (!flow_group_in)
928 		return -ENOMEM;
929 
930 	ft = ipsec_ft_create(attr->ns, attr->status_level, attr->prio, 1,
931 			     MLX5_IPSEC_FS_SA_SELECTOR_MAX_NUM_GROUPS, 0);
932 	if (IS_ERR(ft)) {
933 		err = PTR_ERR(ft);
934 		mlx5_core_err(mdev, "Failed to create RX SA selector flow table, err=%d\n",
935 			      err);
936 		goto err_ft;
937 	}
938 
939 	MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index,
940 		 ft->max_fte - 1);
941 	MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
942 		 ft->max_fte - 1);
943 	fg = mlx5_create_flow_group(ft, flow_group_in);
944 	if (IS_ERR(fg)) {
945 		err = PTR_ERR(fg);
946 		mlx5_core_err(mdev, "Failed to create RX SA selector miss group, err=%d\n",
947 			      err);
948 		goto err_fg;
949 	}
950 
951 	fc = mlx5_fc_create(mdev, false);
952 	if (IS_ERR(fc)) {
953 		err = PTR_ERR(fc);
954 		mlx5_core_err(mdev,
955 			      "Failed to create ipsec RX SA selector miss rule counter, err=%d\n",
956 			      err);
957 		goto err_cnt;
958 	}
959 
960 	dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
961 	dest.counter = fc;
962 	flow_act.action =
963 		MLX5_FLOW_CONTEXT_ACTION_COUNT | MLX5_FLOW_CONTEXT_ACTION_DROP;
964 
965 	rule = mlx5_add_flow_rules(ft, NULL, &flow_act, &dest, 1);
966 	if (IS_ERR(rule)) {
967 		err = PTR_ERR(rule);
968 		mlx5_core_err(mdev, "Failed to create RX SA selector miss drop rule, err=%d\n",
969 			      err);
970 		goto err_rule;
971 	}
972 
973 	rx->ft.sa_sel = ft;
974 	rx->sa_sel.group = fg;
975 	rx->sa_sel.fc = fc;
976 	rx->sa_sel.rule = rule;
977 
978 	kvfree(flow_group_in);
979 
980 	return 0;
981 
982 err_rule:
983 	mlx5_fc_destroy(mdev, fc);
984 err_cnt:
985 	mlx5_destroy_flow_group(fg);
986 err_fg:
987 	mlx5_destroy_flow_table(ft);
988 err_ft:
989 	kvfree(flow_group_in);
990 	return err;
991 }
992 
993 /* The decryption processing is as follows:
994  *
995  *   +----------+                         +-------------+
996  *   |          |                         |             |
997  *   |  Kernel  <--------------+----------+ policy miss <------------+
998  *   |          |              ^          |             |            ^
999  *   +----^-----+              |          +-------------+            |
1000  *        |                  crypto                                  |
1001  *      miss                offload ok                         allow/default
1002  *        ^                    ^                                     ^
1003  *        |                    |                  packet             |
1004  *   +----+---------+     +----+-------------+   offload ok   +------+---+
1005  *   |              |     |                  |   (no UPSPEC)  |          |
1006  *   | SA (decrypt) +----->      status      +--->------->----+  policy  |
1007  *   |              |     |                  |                |          |
1008  *   +--------------+     ++---------+-------+                +-^----+---+
1009  *                         |         |                          |    |
1010  *                         v        packet             +-->->---+    v
1011  *                         |       offload ok        match           |
1012  *                       fails    (with UPSPEC)        |           block
1013  *                         |         |   +-------------+-+           |
1014  *                         v         v   |               |  miss     v
1015  *                        drop       +--->    SA sel     +--------->drop
1016  *                                       |               |
1017  *                                       +---------------+
1018  */
1019 
rx_create(struct mlx5_core_dev * mdev,struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_rx * rx,u32 family)1020 static int rx_create(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
1021 		     struct mlx5e_ipsec_rx *rx, u32 family)
1022 {
1023 	struct mlx5_flow_destination dest[2], miss_dest;
1024 	struct mlx5e_ipsec_rx_create_attr attr;
1025 	struct mlx5_flow_table *ft;
1026 	u32 flags = 0;
1027 	int err;
1028 
1029 	ipsec_rx_create_attr_set(ipsec, rx, family, &attr);
1030 
1031 	err = ipsec_rx_status_pass_dest_get(ipsec, rx, &attr, &dest[0]);
1032 	if (err)
1033 		return err;
1034 
1035 	ft = ipsec_ft_create(attr.ns, attr.status_level, attr.prio, 3, 4, 0);
1036 	if (IS_ERR(ft)) {
1037 		err = PTR_ERR(ft);
1038 		goto err_fs_ft_status;
1039 	}
1040 	rx->ft.status = ft;
1041 
1042 	err = ipsec_rx_sa_selector_create(ipsec, rx, &attr);
1043 	if (err)
1044 		goto err_fs_ft_sa_sel;
1045 
1046 	/* Create FT */
1047 	if (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_TUNNEL)
1048 		rx->allow_tunnel_mode = mlx5_eswitch_block_encap(mdev);
1049 	if (rx->allow_tunnel_mode)
1050 		flags = MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
1051 	ft = ipsec_ft_create(attr.ns, attr.sa_level, attr.prio, 1, 2, flags);
1052 	if (IS_ERR(ft)) {
1053 		err = PTR_ERR(ft);
1054 		goto err_fs_ft;
1055 	}
1056 	rx->ft.sa = ft;
1057 
1058 	ipsec_rx_sa_miss_dest_get(ipsec, rx, &attr, &dest[0], &miss_dest);
1059 	err = ipsec_miss_create(mdev, rx->ft.sa, &rx->sa, &miss_dest);
1060 	if (err)
1061 		goto err_fs;
1062 
1063 	err = ipsec_rx_policy_create(ipsec, rx, &attr, &dest[0]);
1064 	if (err)
1065 		goto err_policy;
1066 
1067 	dest[1].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
1068 	dest[1].counter = rx->fc->cnt;
1069 	err = mlx5_ipsec_rx_status_create(ipsec, rx, dest);
1070 	if (err)
1071 		goto err_add;
1072 
1073 	/* connect */
1074 	if (rx != ipsec->rx_esw)
1075 		ipsec_rx_ft_connect(ipsec, rx, &attr);
1076 	return 0;
1077 
1078 err_add:
1079 	ipsec_rx_policy_destroy(rx);
1080 err_policy:
1081 	mlx5_del_flow_rules(rx->sa.rule);
1082 	mlx5_destroy_flow_group(rx->sa.group);
1083 err_fs:
1084 	mlx5_destroy_flow_table(rx->ft.sa);
1085 	if (rx->allow_tunnel_mode)
1086 		mlx5_eswitch_unblock_encap(mdev);
1087 err_fs_ft:
1088 	ipsec_rx_sa_selector_destroy(mdev, rx);
1089 err_fs_ft_sa_sel:
1090 	mlx5_destroy_flow_table(rx->ft.status);
1091 err_fs_ft_status:
1092 	mlx5_ipsec_fs_roce_rx_destroy(ipsec->roce, family, mdev);
1093 	return err;
1094 }
1095 
rx_get(struct mlx5_core_dev * mdev,struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_rx * rx,u32 family)1096 static int rx_get(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
1097 		  struct mlx5e_ipsec_rx *rx, u32 family)
1098 {
1099 	int err;
1100 
1101 	if (rx->ft.refcnt)
1102 		goto skip;
1103 
1104 	err = mlx5_eswitch_block_mode(mdev);
1105 	if (err)
1106 		return err;
1107 
1108 	err = rx_create(mdev, ipsec, rx, family);
1109 	if (err) {
1110 		mlx5_eswitch_unblock_mode(mdev);
1111 		return err;
1112 	}
1113 
1114 skip:
1115 	rx->ft.refcnt++;
1116 	return 0;
1117 }
1118 
rx_put(struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_rx * rx,u32 family)1119 static void rx_put(struct mlx5e_ipsec *ipsec, struct mlx5e_ipsec_rx *rx,
1120 		   u32 family)
1121 {
1122 	if (--rx->ft.refcnt)
1123 		return;
1124 
1125 	rx_destroy(ipsec->mdev, ipsec, rx, family);
1126 	mlx5_eswitch_unblock_mode(ipsec->mdev);
1127 }
1128 
rx_ft_get(struct mlx5_core_dev * mdev,struct mlx5e_ipsec * ipsec,u32 family,int type)1129 static struct mlx5e_ipsec_rx *rx_ft_get(struct mlx5_core_dev *mdev,
1130 					struct mlx5e_ipsec *ipsec, u32 family,
1131 					int type)
1132 {
1133 	struct mlx5e_ipsec_rx *rx = ipsec_rx(ipsec, family, type);
1134 	int err;
1135 
1136 	mutex_lock(&rx->ft.mutex);
1137 	err = rx_get(mdev, ipsec, rx, family);
1138 	mutex_unlock(&rx->ft.mutex);
1139 	if (err)
1140 		return ERR_PTR(err);
1141 
1142 	return rx;
1143 }
1144 
rx_ft_get_policy(struct mlx5_core_dev * mdev,struct mlx5e_ipsec * ipsec,u32 family,u32 prio,int type)1145 static struct mlx5_flow_table *rx_ft_get_policy(struct mlx5_core_dev *mdev,
1146 						struct mlx5e_ipsec *ipsec,
1147 						u32 family, u32 prio, int type)
1148 {
1149 	struct mlx5e_ipsec_rx *rx = ipsec_rx(ipsec, family, type);
1150 	struct mlx5_flow_table *ft;
1151 	int err;
1152 
1153 	mutex_lock(&rx->ft.mutex);
1154 	err = rx_get(mdev, ipsec, rx, family);
1155 	if (err)
1156 		goto err_get;
1157 
1158 	ft = rx->chains ? ipsec_chains_get_table(rx->chains, prio) : rx->ft.pol;
1159 	if (IS_ERR(ft)) {
1160 		err = PTR_ERR(ft);
1161 		goto err_get_ft;
1162 	}
1163 
1164 	mutex_unlock(&rx->ft.mutex);
1165 	return ft;
1166 
1167 err_get_ft:
1168 	rx_put(ipsec, rx, family);
1169 err_get:
1170 	mutex_unlock(&rx->ft.mutex);
1171 	return ERR_PTR(err);
1172 }
1173 
rx_ft_put(struct mlx5e_ipsec * ipsec,u32 family,int type)1174 static void rx_ft_put(struct mlx5e_ipsec *ipsec, u32 family, int type)
1175 {
1176 	struct mlx5e_ipsec_rx *rx = ipsec_rx(ipsec, family, type);
1177 
1178 	mutex_lock(&rx->ft.mutex);
1179 	rx_put(ipsec, rx, family);
1180 	mutex_unlock(&rx->ft.mutex);
1181 }
1182 
rx_ft_put_policy(struct mlx5e_ipsec * ipsec,u32 family,u32 prio,int type)1183 static void rx_ft_put_policy(struct mlx5e_ipsec *ipsec, u32 family, u32 prio, int type)
1184 {
1185 	struct mlx5e_ipsec_rx *rx = ipsec_rx(ipsec, family, type);
1186 
1187 	mutex_lock(&rx->ft.mutex);
1188 	if (rx->chains)
1189 		ipsec_chains_put_table(rx->chains, prio);
1190 
1191 	rx_put(ipsec, rx, family);
1192 	mutex_unlock(&rx->ft.mutex);
1193 }
1194 
ipsec_counter_rule_tx(struct mlx5_core_dev * mdev,struct mlx5e_ipsec_tx * tx)1195 static int ipsec_counter_rule_tx(struct mlx5_core_dev *mdev, struct mlx5e_ipsec_tx *tx)
1196 {
1197 	struct mlx5_flow_destination dest = {};
1198 	struct mlx5_flow_act flow_act = {};
1199 	struct mlx5_flow_handle *fte;
1200 	struct mlx5_flow_spec *spec;
1201 	int err;
1202 
1203 	spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
1204 	if (!spec)
1205 		return -ENOMEM;
1206 
1207 	/* create fte */
1208 	flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW |
1209 			  MLX5_FLOW_CONTEXT_ACTION_COUNT;
1210 	dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
1211 	dest.counter = tx->fc->cnt;
1212 	fte = mlx5_add_flow_rules(tx->ft.status, spec, &flow_act, &dest, 1);
1213 	if (IS_ERR(fte)) {
1214 		err = PTR_ERR(fte);
1215 		mlx5_core_err(mdev, "Fail to add ipsec tx counter rule err=%d\n", err);
1216 		goto err_rule;
1217 	}
1218 
1219 	kvfree(spec);
1220 	tx->status.rule = fte;
1221 	return 0;
1222 
1223 err_rule:
1224 	kvfree(spec);
1225 	return err;
1226 }
1227 
1228 /* IPsec TX flow steering */
tx_destroy(struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_tx * tx,struct mlx5_ipsec_fs * roce)1229 static void tx_destroy(struct mlx5e_ipsec *ipsec, struct mlx5e_ipsec_tx *tx,
1230 		       struct mlx5_ipsec_fs *roce)
1231 {
1232 	mlx5_ipsec_fs_roce_tx_destroy(roce, ipsec->mdev);
1233 	if (tx->chains) {
1234 		ipsec_chains_destroy(tx->chains);
1235 	} else {
1236 		mlx5_del_flow_rules(tx->pol.rule);
1237 		mlx5_destroy_flow_group(tx->pol.group);
1238 		mlx5_destroy_flow_table(tx->ft.pol);
1239 	}
1240 
1241 	if (tx == ipsec->tx_esw) {
1242 		mlx5_del_flow_rules(tx->sa.rule);
1243 		mlx5_destroy_flow_group(tx->sa.group);
1244 	}
1245 	mlx5_destroy_flow_table(tx->ft.sa);
1246 	if (tx->allow_tunnel_mode)
1247 		mlx5_eswitch_unblock_encap(ipsec->mdev);
1248 	mlx5_del_flow_rules(tx->status.rule);
1249 	mlx5_destroy_flow_table(tx->ft.status);
1250 }
1251 
ipsec_tx_create_attr_set(struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_tx * tx,struct mlx5e_ipsec_tx_create_attr * attr)1252 static void ipsec_tx_create_attr_set(struct mlx5e_ipsec *ipsec,
1253 				     struct mlx5e_ipsec_tx *tx,
1254 				     struct mlx5e_ipsec_tx_create_attr *attr)
1255 {
1256 	if (tx == ipsec->tx_esw) {
1257 		mlx5_esw_ipsec_tx_create_attr_set(ipsec, attr);
1258 		return;
1259 	}
1260 
1261 	attr->prio = 0;
1262 	attr->pol_level = 0;
1263 	attr->sa_level = 1;
1264 	attr->cnt_level = 2;
1265 	attr->chains_ns = MLX5_FLOW_NAMESPACE_EGRESS_IPSEC;
1266 }
1267 
tx_create(struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_tx * tx,struct mlx5_ipsec_fs * roce)1268 static int tx_create(struct mlx5e_ipsec *ipsec, struct mlx5e_ipsec_tx *tx,
1269 		     struct mlx5_ipsec_fs *roce)
1270 {
1271 	struct mlx5_core_dev *mdev = ipsec->mdev;
1272 	struct mlx5e_ipsec_tx_create_attr attr;
1273 	struct mlx5_flow_destination dest = {};
1274 	struct mlx5_flow_table *ft;
1275 	u32 flags = 0;
1276 	int err;
1277 
1278 	ipsec_tx_create_attr_set(ipsec, tx, &attr);
1279 	ft = ipsec_ft_create(tx->ns, attr.cnt_level, attr.prio, 1, 1, 0);
1280 	if (IS_ERR(ft))
1281 		return PTR_ERR(ft);
1282 	tx->ft.status = ft;
1283 
1284 	err = ipsec_counter_rule_tx(mdev, tx);
1285 	if (err)
1286 		goto err_status_rule;
1287 
1288 	if (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_TUNNEL)
1289 		tx->allow_tunnel_mode = mlx5_eswitch_block_encap(mdev);
1290 	if (tx->allow_tunnel_mode)
1291 		flags = MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
1292 	ft = ipsec_ft_create(tx->ns, attr.sa_level, attr.prio, 1, 4, flags);
1293 	if (IS_ERR(ft)) {
1294 		err = PTR_ERR(ft);
1295 		goto err_sa_ft;
1296 	}
1297 	tx->ft.sa = ft;
1298 
1299 	if (tx == ipsec->tx_esw) {
1300 		dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
1301 		dest.vport.num = MLX5_VPORT_UPLINK;
1302 		err = ipsec_miss_create(mdev, tx->ft.sa, &tx->sa, &dest);
1303 		if (err)
1304 			goto err_sa_miss;
1305 		memset(&dest, 0, sizeof(dest));
1306 	}
1307 
1308 	if (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_PRIO) {
1309 		tx->chains = ipsec_chains_create(
1310 			mdev, tx->ft.sa, attr.chains_ns, attr.prio, attr.pol_level,
1311 			&tx->ft.pol);
1312 		if (IS_ERR(tx->chains)) {
1313 			err = PTR_ERR(tx->chains);
1314 			goto err_pol_ft;
1315 		}
1316 
1317 		goto connect_roce;
1318 	}
1319 
1320 	ft = ipsec_ft_create(tx->ns, attr.pol_level, attr.prio, 1, 2, 0);
1321 	if (IS_ERR(ft)) {
1322 		err = PTR_ERR(ft);
1323 		goto err_pol_ft;
1324 	}
1325 	tx->ft.pol = ft;
1326 	dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
1327 	dest.ft = tx->ft.sa;
1328 	err = ipsec_miss_create(mdev, tx->ft.pol, &tx->pol, &dest);
1329 	if (err) {
1330 		mlx5_destroy_flow_table(tx->ft.pol);
1331 		goto err_pol_ft;
1332 	}
1333 
1334 connect_roce:
1335 	err = mlx5_ipsec_fs_roce_tx_create(mdev, roce, tx->ft.pol, false);
1336 	if (err)
1337 		goto err_roce;
1338 	return 0;
1339 
1340 err_roce:
1341 	if (tx->chains) {
1342 		ipsec_chains_destroy(tx->chains);
1343 	} else {
1344 		mlx5_del_flow_rules(tx->pol.rule);
1345 		mlx5_destroy_flow_group(tx->pol.group);
1346 		mlx5_destroy_flow_table(tx->ft.pol);
1347 	}
1348 err_pol_ft:
1349 	if (tx == ipsec->tx_esw) {
1350 		mlx5_del_flow_rules(tx->sa.rule);
1351 		mlx5_destroy_flow_group(tx->sa.group);
1352 	}
1353 err_sa_miss:
1354 	mlx5_destroy_flow_table(tx->ft.sa);
1355 err_sa_ft:
1356 	if (tx->allow_tunnel_mode)
1357 		mlx5_eswitch_unblock_encap(mdev);
1358 	mlx5_del_flow_rules(tx->status.rule);
1359 err_status_rule:
1360 	mlx5_destroy_flow_table(tx->ft.status);
1361 	return err;
1362 }
1363 
ipsec_esw_tx_ft_policy_set(struct mlx5_core_dev * mdev,struct mlx5_flow_table * ft)1364 static void ipsec_esw_tx_ft_policy_set(struct mlx5_core_dev *mdev,
1365 				       struct mlx5_flow_table *ft)
1366 {
1367 #ifdef CONFIG_MLX5_ESWITCH
1368 	struct mlx5_eswitch *esw = mdev->priv.eswitch;
1369 	struct mlx5e_rep_priv *uplink_rpriv;
1370 	struct mlx5e_priv *priv;
1371 
1372 	esw->offloads.ft_ipsec_tx_pol = ft;
1373 	uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
1374 	priv = netdev_priv(uplink_rpriv->netdev);
1375 	if (!priv->channels.num)
1376 		return;
1377 
1378 	mlx5e_rep_deactivate_channels(priv);
1379 	mlx5e_rep_activate_channels(priv);
1380 #endif
1381 }
1382 
tx_get(struct mlx5_core_dev * mdev,struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_tx * tx)1383 static int tx_get(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
1384 		  struct mlx5e_ipsec_tx *tx)
1385 {
1386 	int err;
1387 
1388 	if (tx->ft.refcnt)
1389 		goto skip;
1390 
1391 	err = mlx5_eswitch_block_mode(mdev);
1392 	if (err)
1393 		return err;
1394 
1395 	err = tx_create(ipsec, tx, ipsec->roce);
1396 	if (err) {
1397 		mlx5_eswitch_unblock_mode(mdev);
1398 		return err;
1399 	}
1400 
1401 	if (tx == ipsec->tx_esw)
1402 		ipsec_esw_tx_ft_policy_set(mdev, tx->ft.pol);
1403 
1404 skip:
1405 	tx->ft.refcnt++;
1406 	return 0;
1407 }
1408 
tx_put(struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_tx * tx)1409 static void tx_put(struct mlx5e_ipsec *ipsec, struct mlx5e_ipsec_tx *tx)
1410 {
1411 	if (--tx->ft.refcnt)
1412 		return;
1413 
1414 	if (tx == ipsec->tx_esw) {
1415 		mlx5_esw_ipsec_restore_dest_uplink(ipsec->mdev);
1416 		ipsec_esw_tx_ft_policy_set(ipsec->mdev, NULL);
1417 	}
1418 
1419 	tx_destroy(ipsec, tx, ipsec->roce);
1420 	mlx5_eswitch_unblock_mode(ipsec->mdev);
1421 }
1422 
tx_ft_get_policy(struct mlx5_core_dev * mdev,struct mlx5e_ipsec * ipsec,u32 prio,int type)1423 static struct mlx5_flow_table *tx_ft_get_policy(struct mlx5_core_dev *mdev,
1424 						struct mlx5e_ipsec *ipsec,
1425 						u32 prio, int type)
1426 {
1427 	struct mlx5e_ipsec_tx *tx = ipsec_tx(ipsec, type);
1428 	struct mlx5_flow_table *ft;
1429 	int err;
1430 
1431 	mutex_lock(&tx->ft.mutex);
1432 	err = tx_get(mdev, ipsec, tx);
1433 	if (err)
1434 		goto err_get;
1435 
1436 	ft = tx->chains ? ipsec_chains_get_table(tx->chains, prio) : tx->ft.pol;
1437 	if (IS_ERR(ft)) {
1438 		err = PTR_ERR(ft);
1439 		goto err_get_ft;
1440 	}
1441 
1442 	mutex_unlock(&tx->ft.mutex);
1443 	return ft;
1444 
1445 err_get_ft:
1446 	tx_put(ipsec, tx);
1447 err_get:
1448 	mutex_unlock(&tx->ft.mutex);
1449 	return ERR_PTR(err);
1450 }
1451 
tx_ft_get(struct mlx5_core_dev * mdev,struct mlx5e_ipsec * ipsec,int type)1452 static struct mlx5e_ipsec_tx *tx_ft_get(struct mlx5_core_dev *mdev,
1453 					struct mlx5e_ipsec *ipsec, int type)
1454 {
1455 	struct mlx5e_ipsec_tx *tx = ipsec_tx(ipsec, type);
1456 	int err;
1457 
1458 	mutex_lock(&tx->ft.mutex);
1459 	err = tx_get(mdev, ipsec, tx);
1460 	mutex_unlock(&tx->ft.mutex);
1461 	if (err)
1462 		return ERR_PTR(err);
1463 
1464 	return tx;
1465 }
1466 
tx_ft_put(struct mlx5e_ipsec * ipsec,int type)1467 static void tx_ft_put(struct mlx5e_ipsec *ipsec, int type)
1468 {
1469 	struct mlx5e_ipsec_tx *tx = ipsec_tx(ipsec, type);
1470 
1471 	mutex_lock(&tx->ft.mutex);
1472 	tx_put(ipsec, tx);
1473 	mutex_unlock(&tx->ft.mutex);
1474 }
1475 
tx_ft_put_policy(struct mlx5e_ipsec * ipsec,u32 prio,int type)1476 static void tx_ft_put_policy(struct mlx5e_ipsec *ipsec, u32 prio, int type)
1477 {
1478 	struct mlx5e_ipsec_tx *tx = ipsec_tx(ipsec, type);
1479 
1480 	mutex_lock(&tx->ft.mutex);
1481 	if (tx->chains)
1482 		ipsec_chains_put_table(tx->chains, prio);
1483 
1484 	tx_put(ipsec, tx);
1485 	mutex_unlock(&tx->ft.mutex);
1486 }
1487 
setup_fte_addr4(struct mlx5_flow_spec * spec,struct mlx5e_ipsec_addr * addrs)1488 static void setup_fte_addr4(struct mlx5_flow_spec *spec,
1489 			    struct mlx5e_ipsec_addr *addrs)
1490 {
1491 	__be32 *saddr = &addrs->saddr.a4;
1492 	__be32 *smask = &addrs->smask.m4;
1493 	__be32 *daddr = &addrs->daddr.a4;
1494 	__be32 *dmask = &addrs->dmask.m4;
1495 
1496 	if (!*saddr && !*daddr)
1497 		return;
1498 
1499 	spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
1500 
1501 	MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_version);
1502 	MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_version, 4);
1503 
1504 	if (*saddr) {
1505 		memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
1506 				    outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4), saddr, 4);
1507 		memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1508 				    outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4), smask, 4);
1509 	}
1510 
1511 	if (*daddr) {
1512 		memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
1513 				    outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4), daddr, 4);
1514 		memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1515 				    outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4), dmask, 4);
1516 	}
1517 }
1518 
setup_fte_addr6(struct mlx5_flow_spec * spec,struct mlx5e_ipsec_addr * addrs)1519 static void setup_fte_addr6(struct mlx5_flow_spec *spec,
1520 			    struct mlx5e_ipsec_addr *addrs)
1521 {
1522 	__be32 *saddr = addrs->saddr.a6;
1523 	__be32 *smask = addrs->smask.m6;
1524 	__be32 *daddr = addrs->daddr.a6;
1525 	__be32 *dmask = addrs->dmask.m6;
1526 
1527 	if (addr6_all_zero(saddr) && addr6_all_zero(daddr))
1528 		return;
1529 
1530 	spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
1531 
1532 	MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_version);
1533 	MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_version, 6);
1534 
1535 	if (!addr6_all_zero(saddr)) {
1536 		memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
1537 				    outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6), saddr, 16);
1538 		memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1539 				    outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6), dmask, 16);
1540 	}
1541 
1542 	if (!addr6_all_zero(daddr)) {
1543 		memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
1544 				    outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6), daddr, 16);
1545 		memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1546 				    outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6), smask, 16);
1547 	}
1548 }
1549 
setup_fte_esp(struct mlx5_flow_spec * spec)1550 static void setup_fte_esp(struct mlx5_flow_spec *spec)
1551 {
1552 	/* ESP header */
1553 	spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
1554 
1555 	MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_protocol);
1556 	MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_protocol, IPPROTO_ESP);
1557 }
1558 
setup_fte_spi(struct mlx5_flow_spec * spec,u32 spi,bool encap)1559 static void setup_fte_spi(struct mlx5_flow_spec *spec, u32 spi, bool encap)
1560 {
1561 	/* SPI number */
1562 	spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
1563 
1564 	if (encap) {
1565 		MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
1566 				 misc_parameters.inner_esp_spi);
1567 		MLX5_SET(fte_match_param, spec->match_value,
1568 			 misc_parameters.inner_esp_spi, spi);
1569 	} else {
1570 		MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
1571 				 misc_parameters.outer_esp_spi);
1572 		MLX5_SET(fte_match_param, spec->match_value,
1573 			 misc_parameters.outer_esp_spi, spi);
1574 	}
1575 }
1576 
setup_fte_no_frags(struct mlx5_flow_spec * spec)1577 static void setup_fte_no_frags(struct mlx5_flow_spec *spec)
1578 {
1579 	/* Non fragmented */
1580 	spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
1581 
1582 	MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.frag);
1583 	MLX5_SET(fte_match_param, spec->match_value, outer_headers.frag, 0);
1584 }
1585 
setup_fte_reg_a(struct mlx5_flow_spec * spec)1586 static void setup_fte_reg_a(struct mlx5_flow_spec *spec)
1587 {
1588 	/* Add IPsec indicator in metadata_reg_a */
1589 	spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
1590 
1591 	MLX5_SET(fte_match_param, spec->match_criteria,
1592 		 misc_parameters_2.metadata_reg_a, MLX5_ETH_WQE_FT_META_IPSEC);
1593 	MLX5_SET(fte_match_param, spec->match_value,
1594 		 misc_parameters_2.metadata_reg_a, MLX5_ETH_WQE_FT_META_IPSEC);
1595 }
1596 
setup_fte_reg_c4(struct mlx5_flow_spec * spec,u32 reqid)1597 static void setup_fte_reg_c4(struct mlx5_flow_spec *spec, u32 reqid)
1598 {
1599 	/* Pass policy check before choosing this SA */
1600 	spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
1601 
1602 	MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
1603 			 misc_parameters_2.metadata_reg_c_4);
1604 	MLX5_SET(fte_match_param, spec->match_value,
1605 		 misc_parameters_2.metadata_reg_c_4, reqid);
1606 }
1607 
setup_fte_upper_proto_match(struct mlx5_flow_spec * spec,struct upspec * upspec)1608 static void setup_fte_upper_proto_match(struct mlx5_flow_spec *spec, struct upspec *upspec)
1609 {
1610 	switch (upspec->proto) {
1611 	case IPPROTO_UDP:
1612 		if (upspec->dport) {
1613 			MLX5_SET(fte_match_set_lyr_2_4, spec->match_criteria,
1614 				 udp_dport, upspec->dport_mask);
1615 			MLX5_SET(fte_match_set_lyr_2_4, spec->match_value,
1616 				 udp_dport, upspec->dport);
1617 		}
1618 		if (upspec->sport) {
1619 			MLX5_SET(fte_match_set_lyr_2_4, spec->match_criteria,
1620 				 udp_sport, upspec->sport_mask);
1621 			MLX5_SET(fte_match_set_lyr_2_4, spec->match_value,
1622 				 udp_sport, upspec->sport);
1623 		}
1624 		break;
1625 	case IPPROTO_TCP:
1626 		if (upspec->dport) {
1627 			MLX5_SET(fte_match_set_lyr_2_4, spec->match_criteria,
1628 				 tcp_dport, upspec->dport_mask);
1629 			MLX5_SET(fte_match_set_lyr_2_4, spec->match_value,
1630 				 tcp_dport, upspec->dport);
1631 		}
1632 		if (upspec->sport) {
1633 			MLX5_SET(fte_match_set_lyr_2_4, spec->match_criteria,
1634 				 tcp_sport, upspec->sport_mask);
1635 			MLX5_SET(fte_match_set_lyr_2_4, spec->match_value,
1636 				 tcp_sport, upspec->sport);
1637 		}
1638 		break;
1639 	default:
1640 		return;
1641 	}
1642 
1643 	spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
1644 	MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, spec->match_criteria, ip_protocol);
1645 	MLX5_SET(fte_match_set_lyr_2_4, spec->match_value, ip_protocol, upspec->proto);
1646 }
1647 
ipsec_fs_get_ns(struct mlx5e_ipsec * ipsec,int type,u8 dir)1648 static enum mlx5_flow_namespace_type ipsec_fs_get_ns(struct mlx5e_ipsec *ipsec,
1649 						     int type, u8 dir)
1650 {
1651 	if (ipsec->is_uplink_rep && type == XFRM_DEV_OFFLOAD_PACKET)
1652 		return MLX5_FLOW_NAMESPACE_FDB;
1653 
1654 	if (dir == XFRM_DEV_OFFLOAD_IN)
1655 		return MLX5_FLOW_NAMESPACE_KERNEL;
1656 
1657 	return MLX5_FLOW_NAMESPACE_EGRESS;
1658 }
1659 
setup_modify_header(struct mlx5e_ipsec * ipsec,int type,u32 val,u8 dir,struct mlx5_flow_act * flow_act)1660 static int setup_modify_header(struct mlx5e_ipsec *ipsec, int type, u32 val, u8 dir,
1661 			       struct mlx5_flow_act *flow_act)
1662 {
1663 	enum mlx5_flow_namespace_type ns_type = ipsec_fs_get_ns(ipsec, type, dir);
1664 	u8 action[3][MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {};
1665 	struct mlx5_core_dev *mdev = ipsec->mdev;
1666 	struct mlx5_modify_hdr *modify_hdr;
1667 	u8 num_of_actions = 1;
1668 
1669 	MLX5_SET(set_action_in, action[0], action_type, MLX5_ACTION_TYPE_SET);
1670 	switch (dir) {
1671 	case XFRM_DEV_OFFLOAD_IN:
1672 		MLX5_SET(set_action_in, action[0], field,
1673 			 MLX5_ACTION_IN_FIELD_METADATA_REG_B);
1674 
1675 		num_of_actions++;
1676 		MLX5_SET(set_action_in, action[1], action_type, MLX5_ACTION_TYPE_SET);
1677 		MLX5_SET(set_action_in, action[1], field, MLX5_ACTION_IN_FIELD_METADATA_REG_C_2);
1678 		MLX5_SET(set_action_in, action[1], data, val);
1679 		MLX5_SET(set_action_in, action[1], offset, 0);
1680 		MLX5_SET(set_action_in, action[1], length, 32);
1681 
1682 		if (type == XFRM_DEV_OFFLOAD_CRYPTO) {
1683 			num_of_actions++;
1684 			MLX5_SET(set_action_in, action[2], action_type,
1685 				 MLX5_ACTION_TYPE_SET);
1686 			MLX5_SET(set_action_in, action[2], field,
1687 				 MLX5_ACTION_IN_FIELD_METADATA_REG_C_4);
1688 			MLX5_SET(set_action_in, action[2], data,
1689 				 MLX5_IPSEC_ASO_SW_CRYPTO_OFFLOAD);
1690 			MLX5_SET(set_action_in, action[2], offset, 0);
1691 			MLX5_SET(set_action_in, action[2], length, 32);
1692 		}
1693 		break;
1694 	case XFRM_DEV_OFFLOAD_OUT:
1695 		MLX5_SET(set_action_in, action[0], field,
1696 			 MLX5_ACTION_IN_FIELD_METADATA_REG_C_4);
1697 		break;
1698 	default:
1699 		return -EINVAL;
1700 	}
1701 
1702 	MLX5_SET(set_action_in, action[0], data, val);
1703 	MLX5_SET(set_action_in, action[0], offset, 0);
1704 	MLX5_SET(set_action_in, action[0], length, 32);
1705 
1706 	modify_hdr = mlx5_modify_header_alloc(mdev, ns_type, num_of_actions, action);
1707 	if (IS_ERR(modify_hdr)) {
1708 		mlx5_core_err(mdev, "Failed to allocate modify_header %ld\n",
1709 			      PTR_ERR(modify_hdr));
1710 		return PTR_ERR(modify_hdr);
1711 	}
1712 
1713 	flow_act->modify_hdr = modify_hdr;
1714 	flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
1715 	return 0;
1716 }
1717 
1718 static int
setup_pkt_tunnel_reformat(struct mlx5_core_dev * mdev,struct mlx5_accel_esp_xfrm_attrs * attrs,struct mlx5_pkt_reformat_params * reformat_params)1719 setup_pkt_tunnel_reformat(struct mlx5_core_dev *mdev,
1720 			  struct mlx5_accel_esp_xfrm_attrs *attrs,
1721 			  struct mlx5_pkt_reformat_params *reformat_params)
1722 {
1723 	struct ip_esp_hdr *esp_hdr;
1724 	struct ipv6hdr *ipv6hdr;
1725 	struct ethhdr *eth_hdr;
1726 	struct iphdr *iphdr;
1727 	char *reformatbf;
1728 	size_t bfflen;
1729 	void *hdr;
1730 
1731 	bfflen = sizeof(*eth_hdr);
1732 
1733 	if (attrs->dir == XFRM_DEV_OFFLOAD_OUT) {
1734 		bfflen += sizeof(*esp_hdr) + 8;
1735 
1736 		switch (attrs->addrs.family) {
1737 		case AF_INET:
1738 			bfflen += sizeof(*iphdr);
1739 			break;
1740 		case AF_INET6:
1741 			bfflen += sizeof(*ipv6hdr);
1742 			break;
1743 		default:
1744 			return -EINVAL;
1745 		}
1746 	}
1747 
1748 	reformatbf = kzalloc(bfflen, GFP_KERNEL);
1749 	if (!reformatbf)
1750 		return -ENOMEM;
1751 
1752 	eth_hdr = (struct ethhdr *)reformatbf;
1753 	switch (attrs->addrs.family) {
1754 	case AF_INET:
1755 		eth_hdr->h_proto = htons(ETH_P_IP);
1756 		break;
1757 	case AF_INET6:
1758 		eth_hdr->h_proto = htons(ETH_P_IPV6);
1759 		break;
1760 	default:
1761 		goto free_reformatbf;
1762 	}
1763 
1764 	ether_addr_copy(eth_hdr->h_dest, attrs->dmac);
1765 	ether_addr_copy(eth_hdr->h_source, attrs->smac);
1766 
1767 	switch (attrs->dir) {
1768 	case XFRM_DEV_OFFLOAD_IN:
1769 		reformat_params->type = MLX5_REFORMAT_TYPE_L3_ESP_TUNNEL_TO_L2;
1770 		break;
1771 	case XFRM_DEV_OFFLOAD_OUT:
1772 		reformat_params->type = MLX5_REFORMAT_TYPE_L2_TO_L3_ESP_TUNNEL;
1773 		reformat_params->param_0 = attrs->authsize;
1774 
1775 		hdr = reformatbf + sizeof(*eth_hdr);
1776 		switch (attrs->addrs.family) {
1777 		case AF_INET:
1778 			iphdr = (struct iphdr *)hdr;
1779 			memcpy(&iphdr->saddr, &attrs->addrs.saddr.a4, 4);
1780 			memcpy(&iphdr->daddr, &attrs->addrs.daddr.a4, 4);
1781 			iphdr->version = 4;
1782 			iphdr->ihl = 5;
1783 			iphdr->ttl = IPSEC_TUNNEL_DEFAULT_TTL;
1784 			iphdr->protocol = IPPROTO_ESP;
1785 			hdr += sizeof(*iphdr);
1786 			break;
1787 		case AF_INET6:
1788 			ipv6hdr = (struct ipv6hdr *)hdr;
1789 			memcpy(&ipv6hdr->saddr, &attrs->addrs.saddr.a6, 16);
1790 			memcpy(&ipv6hdr->daddr, &attrs->addrs.daddr.a6, 16);
1791 			ipv6hdr->nexthdr = IPPROTO_ESP;
1792 			ipv6hdr->version = 6;
1793 			ipv6hdr->hop_limit = IPSEC_TUNNEL_DEFAULT_TTL;
1794 			hdr += sizeof(*ipv6hdr);
1795 			break;
1796 		default:
1797 			goto free_reformatbf;
1798 		}
1799 
1800 		esp_hdr = (struct ip_esp_hdr *)hdr;
1801 		esp_hdr->spi = htonl(attrs->spi);
1802 		break;
1803 	default:
1804 		goto free_reformatbf;
1805 	}
1806 
1807 	reformat_params->size = bfflen;
1808 	reformat_params->data = reformatbf;
1809 	return 0;
1810 
1811 free_reformatbf:
1812 	kfree(reformatbf);
1813 	return -EINVAL;
1814 }
1815 
get_reformat_type(struct mlx5_accel_esp_xfrm_attrs * attrs)1816 static int get_reformat_type(struct mlx5_accel_esp_xfrm_attrs *attrs)
1817 {
1818 	switch (attrs->dir) {
1819 	case XFRM_DEV_OFFLOAD_IN:
1820 		if (attrs->encap)
1821 			return MLX5_REFORMAT_TYPE_DEL_ESP_TRANSPORT_OVER_UDP;
1822 		return MLX5_REFORMAT_TYPE_DEL_ESP_TRANSPORT;
1823 	case XFRM_DEV_OFFLOAD_OUT:
1824 		if (attrs->addrs.family == AF_INET) {
1825 			if (attrs->encap)
1826 				return MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_OVER_UDPV4;
1827 			return MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_OVER_IPV4;
1828 		}
1829 
1830 		if (attrs->encap)
1831 			return MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_OVER_UDPV6;
1832 		return MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_OVER_IPV6;
1833 	default:
1834 		WARN_ON(true);
1835 	}
1836 
1837 	return -EINVAL;
1838 }
1839 
1840 static int
setup_pkt_transport_reformat(struct mlx5_accel_esp_xfrm_attrs * attrs,struct mlx5_pkt_reformat_params * reformat_params)1841 setup_pkt_transport_reformat(struct mlx5_accel_esp_xfrm_attrs *attrs,
1842 			     struct mlx5_pkt_reformat_params *reformat_params)
1843 {
1844 	struct udphdr *udphdr;
1845 	char *reformatbf;
1846 	size_t bfflen;
1847 	__be32 spi;
1848 	void *hdr;
1849 
1850 	reformat_params->type = get_reformat_type(attrs);
1851 	if (reformat_params->type < 0)
1852 		return reformat_params->type;
1853 
1854 	switch (attrs->dir) {
1855 	case XFRM_DEV_OFFLOAD_IN:
1856 		break;
1857 	case XFRM_DEV_OFFLOAD_OUT:
1858 		bfflen = MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_SIZE;
1859 		if (attrs->encap)
1860 			bfflen += sizeof(*udphdr);
1861 
1862 		reformatbf = kzalloc(bfflen, GFP_KERNEL);
1863 		if (!reformatbf)
1864 			return -ENOMEM;
1865 
1866 		hdr = reformatbf;
1867 		if (attrs->encap) {
1868 			udphdr = (struct udphdr *)reformatbf;
1869 			udphdr->source = attrs->sport;
1870 			udphdr->dest = attrs->dport;
1871 			hdr += sizeof(*udphdr);
1872 		}
1873 
1874 		/* convert to network format */
1875 		spi = htonl(attrs->spi);
1876 		memcpy(hdr, &spi, sizeof(spi));
1877 
1878 		reformat_params->param_0 = attrs->authsize;
1879 		reformat_params->size = bfflen;
1880 		reformat_params->data = reformatbf;
1881 		break;
1882 	default:
1883 		return -EINVAL;
1884 	}
1885 
1886 	return 0;
1887 }
1888 
setup_pkt_reformat(struct mlx5e_ipsec * ipsec,struct mlx5_accel_esp_xfrm_attrs * attrs,struct mlx5_flow_act * flow_act)1889 static int setup_pkt_reformat(struct mlx5e_ipsec *ipsec,
1890 			      struct mlx5_accel_esp_xfrm_attrs *attrs,
1891 			      struct mlx5_flow_act *flow_act)
1892 {
1893 	enum mlx5_flow_namespace_type ns_type = ipsec_fs_get_ns(ipsec, attrs->type,
1894 								attrs->dir);
1895 	struct mlx5_pkt_reformat_params reformat_params = {};
1896 	struct mlx5_core_dev *mdev = ipsec->mdev;
1897 	struct mlx5_pkt_reformat *pkt_reformat;
1898 	int ret;
1899 
1900 	switch (attrs->mode) {
1901 	case XFRM_MODE_TRANSPORT:
1902 		ret = setup_pkt_transport_reformat(attrs, &reformat_params);
1903 		break;
1904 	case XFRM_MODE_TUNNEL:
1905 		ret = setup_pkt_tunnel_reformat(mdev, attrs, &reformat_params);
1906 		break;
1907 	default:
1908 		ret = -EINVAL;
1909 	}
1910 
1911 	if (ret)
1912 		return ret;
1913 
1914 	pkt_reformat =
1915 		mlx5_packet_reformat_alloc(mdev, &reformat_params, ns_type);
1916 	kfree(reformat_params.data);
1917 	if (IS_ERR(pkt_reformat))
1918 		return PTR_ERR(pkt_reformat);
1919 
1920 	flow_act->pkt_reformat = pkt_reformat;
1921 	flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
1922 	return 0;
1923 }
1924 
rx_add_rule_sa_selector(struct mlx5e_ipsec_sa_entry * sa_entry,struct mlx5e_ipsec_rx * rx,struct upspec * upspec)1925 static int rx_add_rule_sa_selector(struct mlx5e_ipsec_sa_entry *sa_entry,
1926 				   struct mlx5e_ipsec_rx *rx,
1927 				   struct upspec *upspec)
1928 {
1929 	struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
1930 	struct mlx5_core_dev *mdev = ipsec->mdev;
1931 	struct mlx5_flow_destination dest[2];
1932 	struct mlx5_flow_act flow_act = {};
1933 	struct mlx5_flow_handle *rule;
1934 	struct mlx5_flow_spec *spec;
1935 	int err = 0;
1936 
1937 	spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
1938 	if (!spec)
1939 		return -ENOMEM;
1940 
1941 	MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
1942 			 misc_parameters_2.ipsec_syndrome);
1943 	MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
1944 			 misc_parameters_2.metadata_reg_c_4);
1945 	MLX5_SET(fte_match_param, spec->match_value,
1946 		 misc_parameters_2.ipsec_syndrome, 0);
1947 	MLX5_SET(fte_match_param, spec->match_value,
1948 		 misc_parameters_2.metadata_reg_c_4, 0);
1949 	spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
1950 
1951 	ipsec_rx_rule_add_match_obj(sa_entry, rx, spec);
1952 
1953 	flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
1954 			  MLX5_FLOW_CONTEXT_ACTION_COUNT;
1955 	flow_act.flags = FLOW_ACT_IGNORE_FLOW_LEVEL;
1956 	dest[0].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
1957 	dest[0].ft = rx->ft.sa_sel;
1958 	dest[1].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
1959 	dest[1].counter = rx->fc->cnt;
1960 
1961 	rule = mlx5_add_flow_rules(rx->ft.status, spec, &flow_act, dest, 2);
1962 	if (IS_ERR(rule)) {
1963 		err = PTR_ERR(rule);
1964 		mlx5_core_err(mdev,
1965 			      "Failed to add ipsec rx pass rule, err=%d\n",
1966 			      err);
1967 		goto err_add_status_pass_rule;
1968 	}
1969 
1970 	sa_entry->ipsec_rule.status_pass = rule;
1971 
1972 	MLX5_SET(fte_match_param, spec->match_criteria,
1973 		 misc_parameters_2.ipsec_syndrome, 0);
1974 	MLX5_SET(fte_match_param, spec->match_criteria,
1975 		 misc_parameters_2.metadata_reg_c_4, 0);
1976 
1977 	setup_fte_upper_proto_match(spec, upspec);
1978 
1979 	flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1980 	dest[0].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
1981 	dest[0].ft = rx->ft.pol;
1982 
1983 	rule = mlx5_add_flow_rules(rx->ft.sa_sel, spec, &flow_act, &dest[0], 1);
1984 	if (IS_ERR(rule)) {
1985 		err = PTR_ERR(rule);
1986 		mlx5_core_err(mdev,
1987 			      "Failed to add ipsec rx sa selector rule, err=%d\n",
1988 			      err);
1989 		goto err_add_sa_sel_rule;
1990 	}
1991 
1992 	sa_entry->ipsec_rule.sa_sel = rule;
1993 
1994 	kvfree(spec);
1995 	return 0;
1996 
1997 err_add_sa_sel_rule:
1998 	mlx5_del_flow_rules(sa_entry->ipsec_rule.status_pass);
1999 err_add_status_pass_rule:
2000 	kvfree(spec);
2001 	return err;
2002 }
2003 
rx_add_rule(struct mlx5e_ipsec_sa_entry * sa_entry)2004 static int rx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
2005 {
2006 	struct mlx5_accel_esp_xfrm_attrs *attrs = &sa_entry->attrs;
2007 	struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry);
2008 	struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
2009 	struct mlx5_flow_destination dest[2];
2010 	struct mlx5_flow_act flow_act = {};
2011 	struct mlx5_flow_handle *rule;
2012 	struct mlx5_flow_spec *spec;
2013 	struct mlx5e_ipsec_rx *rx;
2014 	struct mlx5_fc *counter;
2015 	int err = 0;
2016 
2017 	rx = rx_ft_get(mdev, ipsec, attrs->addrs.family, attrs->type);
2018 	if (IS_ERR(rx))
2019 		return PTR_ERR(rx);
2020 
2021 	spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
2022 	if (!spec) {
2023 		err = -ENOMEM;
2024 		goto err_alloc;
2025 	}
2026 
2027 	if (attrs->addrs.family == AF_INET)
2028 		setup_fte_addr4(spec, &attrs->addrs);
2029 	else
2030 		setup_fte_addr6(spec, &attrs->addrs);
2031 
2032 	setup_fte_spi(spec, attrs->spi, attrs->encap);
2033 	if (!attrs->encap)
2034 		setup_fte_esp(spec);
2035 	setup_fte_no_frags(spec);
2036 
2037 	if (!attrs->drop) {
2038 		if (rx != ipsec->rx_esw)
2039 			err = setup_modify_header(ipsec, attrs->type,
2040 						  sa_entry->ipsec_obj_id | BIT(31),
2041 						  XFRM_DEV_OFFLOAD_IN, &flow_act);
2042 		else
2043 			err = mlx5_esw_ipsec_rx_setup_modify_header(sa_entry, &flow_act);
2044 
2045 		if (err)
2046 			goto err_mod_header;
2047 	}
2048 
2049 	switch (attrs->type) {
2050 	case XFRM_DEV_OFFLOAD_PACKET:
2051 		err = setup_pkt_reformat(ipsec, attrs, &flow_act);
2052 		if (err)
2053 			goto err_pkt_reformat;
2054 		break;
2055 	default:
2056 		break;
2057 	}
2058 
2059 	counter = mlx5_fc_create(mdev, true);
2060 	if (IS_ERR(counter)) {
2061 		err = PTR_ERR(counter);
2062 		goto err_add_cnt;
2063 	}
2064 	flow_act.crypto.type = MLX5_FLOW_CONTEXT_ENCRYPT_DECRYPT_TYPE_IPSEC;
2065 	flow_act.crypto.obj_id = sa_entry->ipsec_obj_id;
2066 	flow_act.flags |= FLOW_ACT_NO_APPEND;
2067 	flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_CRYPTO_DECRYPT |
2068 			   MLX5_FLOW_CONTEXT_ACTION_COUNT;
2069 	if (attrs->drop)
2070 		flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
2071 	else
2072 		flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
2073 	dest[0].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
2074 	dest[0].ft = rx->ft.status;
2075 	dest[1].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
2076 	dest[1].counter = counter;
2077 	rule = mlx5_add_flow_rules(rx->ft.sa, spec, &flow_act, dest, 2);
2078 	if (IS_ERR(rule)) {
2079 		err = PTR_ERR(rule);
2080 		mlx5_core_err(mdev, "fail to add RX ipsec rule err=%d\n", err);
2081 		goto err_add_flow;
2082 	}
2083 
2084 	if (attrs->upspec.proto && attrs->type == XFRM_DEV_OFFLOAD_PACKET) {
2085 		err = rx_add_rule_sa_selector(sa_entry, rx, &attrs->upspec);
2086 		if (err)
2087 			goto err_add_sa_sel;
2088 	}
2089 
2090 	if (attrs->type == XFRM_DEV_OFFLOAD_PACKET)
2091 		err = rx_add_rule_drop_replay(sa_entry, rx);
2092 	if (err)
2093 		goto err_add_replay;
2094 
2095 	err = rx_add_rule_drop_auth_trailer(sa_entry, rx);
2096 	if (err)
2097 		goto err_drop_reason;
2098 
2099 	kvfree(spec);
2100 
2101 	sa_entry->ipsec_rule.rule = rule;
2102 	sa_entry->ipsec_rule.modify_hdr = flow_act.modify_hdr;
2103 	sa_entry->ipsec_rule.fc = counter;
2104 	sa_entry->ipsec_rule.pkt_reformat = flow_act.pkt_reformat;
2105 	return 0;
2106 
2107 err_drop_reason:
2108 	if (sa_entry->ipsec_rule.replay.rule) {
2109 		mlx5_del_flow_rules(sa_entry->ipsec_rule.replay.rule);
2110 		mlx5_fc_destroy(mdev, sa_entry->ipsec_rule.replay.fc);
2111 	}
2112 err_add_replay:
2113 	if (sa_entry->ipsec_rule.sa_sel) {
2114 		mlx5_del_flow_rules(sa_entry->ipsec_rule.sa_sel);
2115 		mlx5_del_flow_rules(sa_entry->ipsec_rule.status_pass);
2116 	}
2117 err_add_sa_sel:
2118 	mlx5_del_flow_rules(rule);
2119 err_add_flow:
2120 	mlx5_fc_destroy(mdev, counter);
2121 err_add_cnt:
2122 	if (flow_act.pkt_reformat)
2123 		mlx5_packet_reformat_dealloc(mdev, flow_act.pkt_reformat);
2124 err_pkt_reformat:
2125 	if (flow_act.modify_hdr)
2126 		mlx5_modify_header_dealloc(mdev, flow_act.modify_hdr);
2127 err_mod_header:
2128 	kvfree(spec);
2129 err_alloc:
2130 	rx_ft_put(ipsec, attrs->addrs.family, attrs->type);
2131 	return err;
2132 }
2133 
tx_add_rule(struct mlx5e_ipsec_sa_entry * sa_entry)2134 static int tx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
2135 {
2136 	struct mlx5_accel_esp_xfrm_attrs *attrs = &sa_entry->attrs;
2137 	struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry);
2138 	struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
2139 	struct mlx5_flow_destination dest[2];
2140 	struct mlx5_flow_act flow_act = {};
2141 	struct mlx5_flow_handle *rule;
2142 	struct mlx5_flow_spec *spec;
2143 	struct mlx5e_ipsec_tx *tx;
2144 	struct mlx5_fc *counter;
2145 	int err;
2146 
2147 	tx = tx_ft_get(mdev, ipsec, attrs->type);
2148 	if (IS_ERR(tx))
2149 		return PTR_ERR(tx);
2150 
2151 	spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
2152 	if (!spec) {
2153 		err = -ENOMEM;
2154 		goto err_alloc;
2155 	}
2156 
2157 	setup_fte_no_frags(spec);
2158 	setup_fte_upper_proto_match(spec, &attrs->upspec);
2159 
2160 	switch (attrs->type) {
2161 	case XFRM_DEV_OFFLOAD_CRYPTO:
2162 		if (attrs->addrs.family == AF_INET)
2163 			setup_fte_addr4(spec, &attrs->addrs);
2164 		else
2165 			setup_fte_addr6(spec, &attrs->addrs);
2166 		setup_fte_spi(spec, attrs->spi, false);
2167 		setup_fte_esp(spec);
2168 		setup_fte_reg_a(spec);
2169 		break;
2170 	case XFRM_DEV_OFFLOAD_PACKET:
2171 		setup_fte_reg_c4(spec, attrs->reqid);
2172 		err = setup_pkt_reformat(ipsec, attrs, &flow_act);
2173 		if (err)
2174 			goto err_pkt_reformat;
2175 		break;
2176 	default:
2177 		break;
2178 	}
2179 
2180 	counter = mlx5_fc_create(mdev, true);
2181 	if (IS_ERR(counter)) {
2182 		err = PTR_ERR(counter);
2183 		goto err_add_cnt;
2184 	}
2185 
2186 	flow_act.crypto.type = MLX5_FLOW_CONTEXT_ENCRYPT_DECRYPT_TYPE_IPSEC;
2187 	flow_act.crypto.obj_id = sa_entry->ipsec_obj_id;
2188 	flow_act.flags |= FLOW_ACT_NO_APPEND;
2189 	flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_CRYPTO_ENCRYPT |
2190 			   MLX5_FLOW_CONTEXT_ACTION_COUNT;
2191 	if (attrs->drop)
2192 		flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
2193 	else
2194 		flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
2195 
2196 	dest[0].ft = tx->ft.status;
2197 	dest[0].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
2198 	dest[1].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
2199 	dest[1].counter = counter;
2200 	rule = mlx5_add_flow_rules(tx->ft.sa, spec, &flow_act, dest, 2);
2201 	if (IS_ERR(rule)) {
2202 		err = PTR_ERR(rule);
2203 		mlx5_core_err(mdev, "fail to add TX ipsec rule err=%d\n", err);
2204 		goto err_add_flow;
2205 	}
2206 
2207 	kvfree(spec);
2208 	sa_entry->ipsec_rule.rule = rule;
2209 	sa_entry->ipsec_rule.fc = counter;
2210 	sa_entry->ipsec_rule.pkt_reformat = flow_act.pkt_reformat;
2211 	return 0;
2212 
2213 err_add_flow:
2214 	mlx5_fc_destroy(mdev, counter);
2215 err_add_cnt:
2216 	if (flow_act.pkt_reformat)
2217 		mlx5_packet_reformat_dealloc(mdev, flow_act.pkt_reformat);
2218 err_pkt_reformat:
2219 	kvfree(spec);
2220 err_alloc:
2221 	tx_ft_put(ipsec, attrs->type);
2222 	return err;
2223 }
2224 
tx_add_policy(struct mlx5e_ipsec_pol_entry * pol_entry)2225 static int tx_add_policy(struct mlx5e_ipsec_pol_entry *pol_entry)
2226 {
2227 	struct mlx5_accel_pol_xfrm_attrs *attrs = &pol_entry->attrs;
2228 	struct mlx5_core_dev *mdev = mlx5e_ipsec_pol2dev(pol_entry);
2229 	struct mlx5e_ipsec *ipsec = pol_entry->ipsec;
2230 	struct mlx5_flow_destination dest[2] = {};
2231 	struct mlx5_flow_act flow_act = {};
2232 	struct mlx5_flow_handle *rule;
2233 	struct mlx5_flow_spec *spec;
2234 	struct mlx5_flow_table *ft;
2235 	struct mlx5e_ipsec_tx *tx;
2236 	int err, dstn = 0;
2237 
2238 	ft = tx_ft_get_policy(mdev, ipsec, attrs->prio, attrs->type);
2239 	if (IS_ERR(ft))
2240 		return PTR_ERR(ft);
2241 
2242 	spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
2243 	if (!spec) {
2244 		err = -ENOMEM;
2245 		goto err_alloc;
2246 	}
2247 
2248 	tx = ipsec_tx(ipsec, attrs->type);
2249 	if (attrs->addrs.family == AF_INET)
2250 		setup_fte_addr4(spec, &attrs->addrs);
2251 	else
2252 		setup_fte_addr6(spec, &attrs->addrs);
2253 
2254 	setup_fte_no_frags(spec);
2255 	setup_fte_upper_proto_match(spec, &attrs->upspec);
2256 
2257 	switch (attrs->action) {
2258 	case XFRM_POLICY_ALLOW:
2259 		flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
2260 		if (!attrs->reqid)
2261 			break;
2262 
2263 		err = setup_modify_header(ipsec, attrs->type, attrs->reqid,
2264 					  XFRM_DEV_OFFLOAD_OUT, &flow_act);
2265 		if (err)
2266 			goto err_mod_header;
2267 		break;
2268 	case XFRM_POLICY_BLOCK:
2269 		flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_DROP |
2270 				   MLX5_FLOW_CONTEXT_ACTION_COUNT;
2271 		dest[dstn].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
2272 		dest[dstn].counter = tx->fc->drop;
2273 		dstn++;
2274 		break;
2275 	default:
2276 		WARN_ON(true);
2277 		err = -EINVAL;
2278 		goto err_mod_header;
2279 	}
2280 
2281 	flow_act.flags |= FLOW_ACT_NO_APPEND;
2282 	if (tx == ipsec->tx_esw && tx->chains)
2283 		flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
2284 	dest[dstn].ft = tx->ft.sa;
2285 	dest[dstn].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
2286 	dstn++;
2287 	rule = mlx5_add_flow_rules(ft, spec, &flow_act, dest, dstn);
2288 	if (IS_ERR(rule)) {
2289 		err = PTR_ERR(rule);
2290 		mlx5_core_err(mdev, "fail to add TX ipsec rule err=%d\n", err);
2291 		goto err_action;
2292 	}
2293 
2294 	kvfree(spec);
2295 	pol_entry->ipsec_rule.rule = rule;
2296 	pol_entry->ipsec_rule.modify_hdr = flow_act.modify_hdr;
2297 	return 0;
2298 
2299 err_action:
2300 	if (flow_act.modify_hdr)
2301 		mlx5_modify_header_dealloc(mdev, flow_act.modify_hdr);
2302 err_mod_header:
2303 	kvfree(spec);
2304 err_alloc:
2305 	tx_ft_put_policy(ipsec, attrs->prio, attrs->type);
2306 	return err;
2307 }
2308 
rx_add_policy(struct mlx5e_ipsec_pol_entry * pol_entry)2309 static int rx_add_policy(struct mlx5e_ipsec_pol_entry *pol_entry)
2310 {
2311 	struct mlx5_accel_pol_xfrm_attrs *attrs = &pol_entry->attrs;
2312 	struct mlx5_core_dev *mdev = mlx5e_ipsec_pol2dev(pol_entry);
2313 	struct mlx5e_ipsec *ipsec = pol_entry->ipsec;
2314 	struct mlx5_flow_destination dest[2];
2315 	struct mlx5_flow_act flow_act = {};
2316 	struct mlx5_flow_handle *rule;
2317 	struct mlx5_flow_spec *spec;
2318 	struct mlx5_flow_table *ft;
2319 	struct mlx5e_ipsec_rx *rx;
2320 	int err, dstn = 0;
2321 
2322 	ft = rx_ft_get_policy(mdev, pol_entry->ipsec, attrs->addrs.family,
2323 			      attrs->prio, attrs->type);
2324 	if (IS_ERR(ft))
2325 		return PTR_ERR(ft);
2326 
2327 	rx = ipsec_rx(pol_entry->ipsec, attrs->addrs.family, attrs->type);
2328 
2329 	spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
2330 	if (!spec) {
2331 		err = -ENOMEM;
2332 		goto err_alloc;
2333 	}
2334 
2335 	if (attrs->addrs.family == AF_INET)
2336 		setup_fte_addr4(spec, &attrs->addrs);
2337 	else
2338 		setup_fte_addr6(spec, &attrs->addrs);
2339 
2340 	setup_fte_no_frags(spec);
2341 	setup_fte_upper_proto_match(spec, &attrs->upspec);
2342 
2343 	switch (attrs->action) {
2344 	case XFRM_POLICY_ALLOW:
2345 		flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
2346 		break;
2347 	case XFRM_POLICY_BLOCK:
2348 		flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT;
2349 		dest[dstn].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
2350 		dest[dstn].counter = rx->fc->drop;
2351 		dstn++;
2352 		break;
2353 	default:
2354 		WARN_ON(true);
2355 		err = -EINVAL;
2356 		goto err_action;
2357 	}
2358 
2359 	flow_act.flags |= FLOW_ACT_NO_APPEND;
2360 	if (rx == ipsec->rx_esw && rx->chains)
2361 		flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
2362 	ipsec_rx_default_dest_get(ipsec, rx, &dest[dstn]);
2363 	dstn++;
2364 	rule = mlx5_add_flow_rules(ft, spec, &flow_act, dest, dstn);
2365 	if (IS_ERR(rule)) {
2366 		err = PTR_ERR(rule);
2367 		mlx5_core_err(mdev, "Fail to add RX IPsec policy rule err=%d\n", err);
2368 		goto err_action;
2369 	}
2370 
2371 	kvfree(spec);
2372 	pol_entry->ipsec_rule.rule = rule;
2373 	return 0;
2374 
2375 err_action:
2376 	kvfree(spec);
2377 err_alloc:
2378 	rx_ft_put_policy(pol_entry->ipsec, attrs->addrs.family, attrs->prio,
2379 			 attrs->type);
2380 	return err;
2381 }
2382 
ipsec_fs_destroy_single_counter(struct mlx5_core_dev * mdev,struct mlx5e_ipsec_fc * fc)2383 static void ipsec_fs_destroy_single_counter(struct mlx5_core_dev *mdev,
2384 					    struct mlx5e_ipsec_fc *fc)
2385 {
2386 	mlx5_fc_destroy(mdev, fc->drop);
2387 	mlx5_fc_destroy(mdev, fc->cnt);
2388 	kfree(fc);
2389 }
2390 
ipsec_fs_destroy_counters(struct mlx5e_ipsec * ipsec)2391 static void ipsec_fs_destroy_counters(struct mlx5e_ipsec *ipsec)
2392 {
2393 	struct mlx5_core_dev *mdev = ipsec->mdev;
2394 
2395 	ipsec_fs_destroy_single_counter(mdev, ipsec->tx->fc);
2396 	ipsec_fs_destroy_single_counter(mdev, ipsec->rx_ipv4->fc);
2397 	if (ipsec->is_uplink_rep) {
2398 		ipsec_fs_destroy_single_counter(mdev, ipsec->tx_esw->fc);
2399 		ipsec_fs_destroy_single_counter(mdev, ipsec->rx_esw->fc);
2400 	}
2401 }
2402 
ipsec_fs_init_single_counter(struct mlx5_core_dev * mdev)2403 static struct mlx5e_ipsec_fc *ipsec_fs_init_single_counter(struct mlx5_core_dev *mdev)
2404 {
2405 	struct mlx5e_ipsec_fc *fc;
2406 	struct mlx5_fc *counter;
2407 	int err;
2408 
2409 	fc = kzalloc(sizeof(*fc), GFP_KERNEL);
2410 	if (!fc)
2411 		return ERR_PTR(-ENOMEM);
2412 
2413 	counter = mlx5_fc_create(mdev, false);
2414 	if (IS_ERR(counter)) {
2415 		err = PTR_ERR(counter);
2416 		goto err_cnt;
2417 	}
2418 	fc->cnt = counter;
2419 
2420 	counter = mlx5_fc_create(mdev, false);
2421 	if (IS_ERR(counter)) {
2422 		err = PTR_ERR(counter);
2423 		goto err_drop;
2424 	}
2425 	fc->drop = counter;
2426 
2427 	return fc;
2428 
2429 err_drop:
2430 	mlx5_fc_destroy(mdev, fc->cnt);
2431 err_cnt:
2432 	kfree(fc);
2433 	return ERR_PTR(err);
2434 }
2435 
ipsec_fs_init_counters(struct mlx5e_ipsec * ipsec)2436 static int ipsec_fs_init_counters(struct mlx5e_ipsec *ipsec)
2437 {
2438 	struct mlx5_core_dev *mdev = ipsec->mdev;
2439 	struct mlx5e_ipsec_fc *fc;
2440 	int err;
2441 
2442 	fc = ipsec_fs_init_single_counter(mdev);
2443 	if (IS_ERR(fc)) {
2444 		err = PTR_ERR(fc);
2445 		goto err_rx_cnt;
2446 	}
2447 	ipsec->rx_ipv4->fc = fc;
2448 
2449 	fc = ipsec_fs_init_single_counter(mdev);
2450 	if (IS_ERR(fc)) {
2451 		err = PTR_ERR(fc);
2452 		goto err_tx_cnt;
2453 	}
2454 	ipsec->tx->fc = fc;
2455 
2456 	if (ipsec->is_uplink_rep) {
2457 		fc = ipsec_fs_init_single_counter(mdev);
2458 		if (IS_ERR(fc)) {
2459 			err = PTR_ERR(fc);
2460 			goto err_rx_esw_cnt;
2461 		}
2462 		ipsec->rx_esw->fc = fc;
2463 
2464 		fc = ipsec_fs_init_single_counter(mdev);
2465 		if (IS_ERR(fc)) {
2466 			err = PTR_ERR(fc);
2467 			goto err_tx_esw_cnt;
2468 		}
2469 		ipsec->tx_esw->fc = fc;
2470 	}
2471 
2472 	/* Both IPv4 and IPv6 point to same flow counters struct. */
2473 	ipsec->rx_ipv6->fc = ipsec->rx_ipv4->fc;
2474 	return 0;
2475 
2476 err_tx_esw_cnt:
2477 	ipsec_fs_destroy_single_counter(mdev, ipsec->rx_esw->fc);
2478 err_rx_esw_cnt:
2479 	ipsec_fs_destroy_single_counter(mdev, ipsec->tx->fc);
2480 err_tx_cnt:
2481 	ipsec_fs_destroy_single_counter(mdev, ipsec->rx_ipv4->fc);
2482 err_rx_cnt:
2483 	return err;
2484 }
2485 
mlx5e_accel_ipsec_fs_read_stats(struct mlx5e_priv * priv,void * ipsec_stats)2486 void mlx5e_accel_ipsec_fs_read_stats(struct mlx5e_priv *priv, void *ipsec_stats)
2487 {
2488 	struct mlx5_core_dev *mdev = priv->mdev;
2489 	struct mlx5e_ipsec *ipsec = priv->ipsec;
2490 	struct mlx5e_ipsec_hw_stats *stats;
2491 	struct mlx5e_ipsec_fc *fc;
2492 	u64 packets, bytes;
2493 
2494 	stats = (struct mlx5e_ipsec_hw_stats *)ipsec_stats;
2495 
2496 	stats->ipsec_rx_pkts = 0;
2497 	stats->ipsec_rx_bytes = 0;
2498 	stats->ipsec_rx_drop_pkts = 0;
2499 	stats->ipsec_rx_drop_bytes = 0;
2500 	stats->ipsec_rx_drop_mismatch_sa_sel = 0;
2501 	stats->ipsec_tx_pkts = 0;
2502 	stats->ipsec_tx_bytes = 0;
2503 	stats->ipsec_tx_drop_pkts = 0;
2504 	stats->ipsec_tx_drop_bytes = 0;
2505 
2506 	fc = ipsec->rx_ipv4->fc;
2507 	mlx5_fc_query(mdev, fc->cnt, &stats->ipsec_rx_pkts, &stats->ipsec_rx_bytes);
2508 	mlx5_fc_query(mdev, fc->drop, &stats->ipsec_rx_drop_pkts,
2509 		      &stats->ipsec_rx_drop_bytes);
2510 	if (ipsec->rx_ipv4->sa_sel.fc)
2511 		mlx5_fc_query(mdev, ipsec->rx_ipv4->sa_sel.fc,
2512 			      &stats->ipsec_rx_drop_mismatch_sa_sel, &bytes);
2513 
2514 	fc = ipsec->tx->fc;
2515 	mlx5_fc_query(mdev, fc->cnt, &stats->ipsec_tx_pkts, &stats->ipsec_tx_bytes);
2516 	mlx5_fc_query(mdev, fc->drop, &stats->ipsec_tx_drop_pkts,
2517 		      &stats->ipsec_tx_drop_bytes);
2518 
2519 	if (ipsec->is_uplink_rep) {
2520 		fc = ipsec->rx_esw->fc;
2521 		if (!mlx5_fc_query(mdev, fc->cnt, &packets, &bytes)) {
2522 			stats->ipsec_rx_pkts += packets;
2523 			stats->ipsec_rx_bytes += bytes;
2524 		}
2525 
2526 		if (!mlx5_fc_query(mdev, fc->drop, &packets, &bytes)) {
2527 			stats->ipsec_rx_drop_pkts += packets;
2528 			stats->ipsec_rx_drop_bytes += bytes;
2529 		}
2530 
2531 		fc = ipsec->tx_esw->fc;
2532 		if (!mlx5_fc_query(mdev, fc->cnt, &packets, &bytes)) {
2533 			stats->ipsec_tx_pkts += packets;
2534 			stats->ipsec_tx_bytes += bytes;
2535 		}
2536 
2537 		if (!mlx5_fc_query(mdev, fc->drop, &packets, &bytes)) {
2538 			stats->ipsec_tx_drop_pkts += packets;
2539 			stats->ipsec_tx_drop_bytes += bytes;
2540 		}
2541 
2542 		if (ipsec->rx_esw->sa_sel.fc &&
2543 		    !mlx5_fc_query(mdev, ipsec->rx_esw->sa_sel.fc,
2544 				   &packets, &bytes))
2545 			stats->ipsec_rx_drop_mismatch_sa_sel += packets;
2546 	}
2547 }
2548 
2549 #ifdef CONFIG_MLX5_ESWITCH
mlx5e_ipsec_block_tc_offload(struct mlx5_core_dev * mdev)2550 static int mlx5e_ipsec_block_tc_offload(struct mlx5_core_dev *mdev)
2551 {
2552 	struct mlx5_eswitch *esw = mdev->priv.eswitch;
2553 	int err = 0;
2554 
2555 	if (esw) {
2556 		err = mlx5_esw_lock(esw);
2557 		if (err)
2558 			return err;
2559 	}
2560 
2561 	if (mdev->num_block_ipsec) {
2562 		err = -EBUSY;
2563 		goto unlock;
2564 	}
2565 
2566 	mdev->num_block_tc++;
2567 
2568 unlock:
2569 	if (esw)
2570 		mlx5_esw_unlock(esw);
2571 
2572 	return err;
2573 }
2574 #else
mlx5e_ipsec_block_tc_offload(struct mlx5_core_dev * mdev)2575 static int mlx5e_ipsec_block_tc_offload(struct mlx5_core_dev *mdev)
2576 {
2577 	if (mdev->num_block_ipsec)
2578 		return -EBUSY;
2579 
2580 	mdev->num_block_tc++;
2581 	return 0;
2582 }
2583 #endif
2584 
mlx5e_ipsec_unblock_tc_offload(struct mlx5_core_dev * mdev)2585 static void mlx5e_ipsec_unblock_tc_offload(struct mlx5_core_dev *mdev)
2586 {
2587 	mdev->num_block_tc--;
2588 }
2589 
mlx5e_accel_ipsec_fs_add_rule(struct mlx5e_ipsec_sa_entry * sa_entry)2590 int mlx5e_accel_ipsec_fs_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
2591 {
2592 	int err;
2593 
2594 	if (sa_entry->attrs.type == XFRM_DEV_OFFLOAD_PACKET) {
2595 		err = mlx5e_ipsec_block_tc_offload(sa_entry->ipsec->mdev);
2596 		if (err)
2597 			return err;
2598 	}
2599 
2600 	if (sa_entry->attrs.dir == XFRM_DEV_OFFLOAD_OUT)
2601 		err = tx_add_rule(sa_entry);
2602 	else
2603 		err = rx_add_rule(sa_entry);
2604 
2605 	if (err)
2606 		goto err_out;
2607 
2608 	return 0;
2609 
2610 err_out:
2611 	if (sa_entry->attrs.type == XFRM_DEV_OFFLOAD_PACKET)
2612 		mlx5e_ipsec_unblock_tc_offload(sa_entry->ipsec->mdev);
2613 	return err;
2614 }
2615 
mlx5e_accel_ipsec_fs_del_rule(struct mlx5e_ipsec_sa_entry * sa_entry)2616 void mlx5e_accel_ipsec_fs_del_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
2617 {
2618 	struct mlx5e_ipsec_rule *ipsec_rule = &sa_entry->ipsec_rule;
2619 	struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry);
2620 
2621 	mlx5_del_flow_rules(ipsec_rule->rule);
2622 	mlx5_fc_destroy(mdev, ipsec_rule->fc);
2623 	if (ipsec_rule->pkt_reformat)
2624 		mlx5_packet_reformat_dealloc(mdev, ipsec_rule->pkt_reformat);
2625 
2626 	if (sa_entry->attrs.type == XFRM_DEV_OFFLOAD_PACKET)
2627 		mlx5e_ipsec_unblock_tc_offload(mdev);
2628 
2629 	if (sa_entry->attrs.dir == XFRM_DEV_OFFLOAD_OUT) {
2630 		tx_ft_put(sa_entry->ipsec, sa_entry->attrs.type);
2631 		return;
2632 	}
2633 
2634 	if (ipsec_rule->modify_hdr)
2635 		mlx5_modify_header_dealloc(mdev, ipsec_rule->modify_hdr);
2636 
2637 	mlx5_del_flow_rules(ipsec_rule->trailer.rule);
2638 	mlx5_fc_destroy(mdev, ipsec_rule->trailer.fc);
2639 
2640 	mlx5_del_flow_rules(ipsec_rule->auth.rule);
2641 	mlx5_fc_destroy(mdev, ipsec_rule->auth.fc);
2642 
2643 	if (ipsec_rule->sa_sel) {
2644 		mlx5_del_flow_rules(ipsec_rule->sa_sel);
2645 		mlx5_del_flow_rules(ipsec_rule->status_pass);
2646 	}
2647 
2648 	if (ipsec_rule->replay.rule) {
2649 		mlx5_del_flow_rules(ipsec_rule->replay.rule);
2650 		mlx5_fc_destroy(mdev, ipsec_rule->replay.fc);
2651 	}
2652 	mlx5_esw_ipsec_rx_id_mapping_remove(sa_entry);
2653 	rx_ft_put(sa_entry->ipsec, sa_entry->attrs.addrs.family,
2654 		  sa_entry->attrs.type);
2655 }
2656 
mlx5e_accel_ipsec_fs_add_pol(struct mlx5e_ipsec_pol_entry * pol_entry)2657 int mlx5e_accel_ipsec_fs_add_pol(struct mlx5e_ipsec_pol_entry *pol_entry)
2658 {
2659 	int err;
2660 
2661 	err = mlx5e_ipsec_block_tc_offload(pol_entry->ipsec->mdev);
2662 	if (err)
2663 		return err;
2664 
2665 	if (pol_entry->attrs.dir == XFRM_DEV_OFFLOAD_OUT)
2666 		err = tx_add_policy(pol_entry);
2667 	else
2668 		err = rx_add_policy(pol_entry);
2669 
2670 	if (err)
2671 		goto err_out;
2672 
2673 	return 0;
2674 
2675 err_out:
2676 	mlx5e_ipsec_unblock_tc_offload(pol_entry->ipsec->mdev);
2677 	return err;
2678 }
2679 
mlx5e_accel_ipsec_fs_del_pol(struct mlx5e_ipsec_pol_entry * pol_entry)2680 void mlx5e_accel_ipsec_fs_del_pol(struct mlx5e_ipsec_pol_entry *pol_entry)
2681 {
2682 	struct mlx5e_ipsec_rule *ipsec_rule = &pol_entry->ipsec_rule;
2683 	struct mlx5_core_dev *mdev = mlx5e_ipsec_pol2dev(pol_entry);
2684 
2685 	mlx5_del_flow_rules(ipsec_rule->rule);
2686 
2687 	mlx5e_ipsec_unblock_tc_offload(pol_entry->ipsec->mdev);
2688 
2689 	if (pol_entry->attrs.dir == XFRM_DEV_OFFLOAD_IN) {
2690 		rx_ft_put_policy(pol_entry->ipsec,
2691 				 pol_entry->attrs.addrs.family,
2692 				 pol_entry->attrs.prio, pol_entry->attrs.type);
2693 		return;
2694 	}
2695 
2696 	if (ipsec_rule->modify_hdr)
2697 		mlx5_modify_header_dealloc(mdev, ipsec_rule->modify_hdr);
2698 
2699 	tx_ft_put_policy(pol_entry->ipsec, pol_entry->attrs.prio, pol_entry->attrs.type);
2700 }
2701 
mlx5e_accel_ipsec_fs_cleanup(struct mlx5e_ipsec * ipsec)2702 void mlx5e_accel_ipsec_fs_cleanup(struct mlx5e_ipsec *ipsec)
2703 {
2704 	if (!ipsec->tx)
2705 		return;
2706 
2707 	if (ipsec->roce)
2708 		mlx5_ipsec_fs_roce_cleanup(ipsec->roce);
2709 
2710 	ipsec_fs_destroy_counters(ipsec);
2711 	mutex_destroy(&ipsec->tx->ft.mutex);
2712 	WARN_ON(ipsec->tx->ft.refcnt);
2713 	kfree(ipsec->tx);
2714 
2715 	mutex_destroy(&ipsec->rx_ipv4->ft.mutex);
2716 	WARN_ON(ipsec->rx_ipv4->ft.refcnt);
2717 	kfree(ipsec->rx_ipv4);
2718 
2719 	mutex_destroy(&ipsec->rx_ipv6->ft.mutex);
2720 	WARN_ON(ipsec->rx_ipv6->ft.refcnt);
2721 	kfree(ipsec->rx_ipv6);
2722 
2723 	if (ipsec->is_uplink_rep) {
2724 		xa_destroy(&ipsec->ipsec_obj_id_map);
2725 
2726 		mutex_destroy(&ipsec->tx_esw->ft.mutex);
2727 		WARN_ON(ipsec->tx_esw->ft.refcnt);
2728 		kfree(ipsec->tx_esw);
2729 
2730 		mutex_destroy(&ipsec->rx_esw->ft.mutex);
2731 		WARN_ON(ipsec->rx_esw->ft.refcnt);
2732 		kfree(ipsec->rx_esw);
2733 	}
2734 }
2735 
mlx5e_accel_ipsec_fs_init(struct mlx5e_ipsec * ipsec,struct mlx5_devcom_comp_dev ** devcom)2736 int mlx5e_accel_ipsec_fs_init(struct mlx5e_ipsec *ipsec,
2737 			      struct mlx5_devcom_comp_dev **devcom)
2738 {
2739 	struct mlx5_core_dev *mdev = ipsec->mdev;
2740 	struct mlx5_flow_namespace *ns, *ns_esw;
2741 	int err = -ENOMEM;
2742 
2743 	ns = mlx5_get_flow_namespace(ipsec->mdev,
2744 				     MLX5_FLOW_NAMESPACE_EGRESS_IPSEC);
2745 	if (!ns)
2746 		return -EOPNOTSUPP;
2747 
2748 	if (ipsec->is_uplink_rep) {
2749 		ns_esw = mlx5_get_flow_namespace(mdev, MLX5_FLOW_NAMESPACE_FDB);
2750 		if (!ns_esw)
2751 			return -EOPNOTSUPP;
2752 
2753 		ipsec->tx_esw = kzalloc(sizeof(*ipsec->tx_esw), GFP_KERNEL);
2754 		if (!ipsec->tx_esw)
2755 			return -ENOMEM;
2756 
2757 		ipsec->rx_esw = kzalloc(sizeof(*ipsec->rx_esw), GFP_KERNEL);
2758 		if (!ipsec->rx_esw)
2759 			goto err_rx_esw;
2760 	}
2761 
2762 	ipsec->tx = kzalloc(sizeof(*ipsec->tx), GFP_KERNEL);
2763 	if (!ipsec->tx)
2764 		goto err_tx;
2765 
2766 	ipsec->rx_ipv4 = kzalloc(sizeof(*ipsec->rx_ipv4), GFP_KERNEL);
2767 	if (!ipsec->rx_ipv4)
2768 		goto err_rx_ipv4;
2769 
2770 	ipsec->rx_ipv6 = kzalloc(sizeof(*ipsec->rx_ipv6), GFP_KERNEL);
2771 	if (!ipsec->rx_ipv6)
2772 		goto err_rx_ipv6;
2773 
2774 	err = ipsec_fs_init_counters(ipsec);
2775 	if (err)
2776 		goto err_counters;
2777 
2778 	mutex_init(&ipsec->tx->ft.mutex);
2779 	mutex_init(&ipsec->rx_ipv4->ft.mutex);
2780 	mutex_init(&ipsec->rx_ipv6->ft.mutex);
2781 	ipsec->tx->ns = ns;
2782 
2783 	if (ipsec->is_uplink_rep) {
2784 		mutex_init(&ipsec->tx_esw->ft.mutex);
2785 		mutex_init(&ipsec->rx_esw->ft.mutex);
2786 		ipsec->tx_esw->ns = ns_esw;
2787 		xa_init_flags(&ipsec->ipsec_obj_id_map, XA_FLAGS_ALLOC1);
2788 	} else if (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_ROCE) {
2789 		ipsec->roce = mlx5_ipsec_fs_roce_init(mdev, devcom);
2790 	} else {
2791 		mlx5_core_warn(mdev, "IPsec was initialized without RoCE support\n");
2792 	}
2793 
2794 	return 0;
2795 
2796 err_counters:
2797 	kfree(ipsec->rx_ipv6);
2798 err_rx_ipv6:
2799 	kfree(ipsec->rx_ipv4);
2800 err_rx_ipv4:
2801 	kfree(ipsec->tx);
2802 err_tx:
2803 	kfree(ipsec->rx_esw);
2804 err_rx_esw:
2805 	kfree(ipsec->tx_esw);
2806 	return err;
2807 }
2808 
mlx5e_accel_ipsec_fs_modify(struct mlx5e_ipsec_sa_entry * sa_entry)2809 void mlx5e_accel_ipsec_fs_modify(struct mlx5e_ipsec_sa_entry *sa_entry)
2810 {
2811 	struct mlx5e_ipsec_sa_entry sa_entry_shadow = {};
2812 	int err;
2813 
2814 	memcpy(&sa_entry_shadow, sa_entry, sizeof(*sa_entry));
2815 	memset(&sa_entry_shadow.ipsec_rule, 0x00, sizeof(sa_entry->ipsec_rule));
2816 
2817 	err = mlx5e_accel_ipsec_fs_add_rule(&sa_entry_shadow);
2818 	if (err)
2819 		return;
2820 
2821 	mlx5e_accel_ipsec_fs_del_rule(sa_entry);
2822 	memcpy(sa_entry, &sa_entry_shadow, sizeof(*sa_entry));
2823 }
2824 
mlx5e_ipsec_fs_tunnel_enabled(struct mlx5e_ipsec_sa_entry * sa_entry)2825 bool mlx5e_ipsec_fs_tunnel_enabled(struct mlx5e_ipsec_sa_entry *sa_entry)
2826 {
2827 	struct mlx5_accel_esp_xfrm_attrs *attrs = &sa_entry->attrs;
2828 	struct mlx5e_ipsec_rx *rx;
2829 	struct mlx5e_ipsec_tx *tx;
2830 
2831 	rx = ipsec_rx(sa_entry->ipsec, attrs->addrs.family, attrs->type);
2832 	tx = ipsec_tx(sa_entry->ipsec, attrs->type);
2833 	if (sa_entry->attrs.dir == XFRM_DEV_OFFLOAD_OUT)
2834 		return tx->allow_tunnel_mode;
2835 
2836 	return rx->allow_tunnel_mode;
2837 }
2838 
mlx5e_ipsec_handle_mpv_event(int event,struct mlx5e_priv * slave_priv,struct mlx5e_priv * master_priv)2839 void mlx5e_ipsec_handle_mpv_event(int event, struct mlx5e_priv *slave_priv,
2840 				  struct mlx5e_priv *master_priv)
2841 {
2842 	struct mlx5e_ipsec_mpv_work *work;
2843 
2844 	reinit_completion(&master_priv->ipsec->comp);
2845 
2846 	if (!slave_priv->ipsec) {
2847 		complete(&master_priv->ipsec->comp);
2848 		return;
2849 	}
2850 
2851 	work = &slave_priv->ipsec->mpv_work;
2852 
2853 	INIT_WORK(&work->work, ipsec_mpv_work_handler);
2854 	work->event = event;
2855 	work->slave_priv = slave_priv;
2856 	work->master_priv = master_priv;
2857 	queue_work(slave_priv->ipsec->wq, &work->work);
2858 }
2859 
mlx5e_ipsec_send_event(struct mlx5e_priv * priv,int event)2860 void mlx5e_ipsec_send_event(struct mlx5e_priv *priv, int event)
2861 {
2862 	if (!priv->ipsec)
2863 		return; /* IPsec not supported */
2864 
2865 	mlx5_devcom_send_event(priv->devcom, event, event, priv);
2866 	wait_for_completion(&priv->ipsec->comp);
2867 }
2868