xref: /linux/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c (revision 18a7e218cfcdca6666e1f7356533e4c988780b57)
1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2020, Mellanox Technologies inc. All rights reserved. */
3 
4 #include <linux/netdevice.h>
5 #include "en.h"
6 #include "en/fs.h"
7 #include "eswitch.h"
8 #include "ipsec.h"
9 #include "fs_core.h"
10 #include "lib/ipsec_fs_roce.h"
11 #include "lib/fs_chains.h"
12 #include "esw/ipsec_fs.h"
13 #include "en_rep.h"
14 
15 #define NUM_IPSEC_FTE BIT(15)
16 #define MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_SIZE 16
17 #define IPSEC_TUNNEL_DEFAULT_TTL 0x40
18 
19 #define MLX5_IPSEC_FS_SA_SELECTOR_MAX_NUM_GROUPS 16
20 
21 enum {
22 	MLX5_IPSEC_ASO_OK,
23 	MLX5_IPSEC_ASO_BAD_REPLY,
24 
25 	/* For crypto offload, set by driver */
26 	MLX5_IPSEC_ASO_SW_CRYPTO_OFFLOAD = 0xAA,
27 };
28 
29 struct mlx5e_ipsec_fc {
30 	struct mlx5_fc *cnt;
31 	struct mlx5_fc *drop;
32 };
33 
34 struct mlx5e_ipsec_tx {
35 	struct mlx5e_ipsec_ft ft;
36 	struct mlx5e_ipsec_miss pol;
37 	struct mlx5e_ipsec_miss sa;
38 	struct mlx5e_ipsec_rule status;
39 	struct mlx5_flow_namespace *ns;
40 	struct mlx5e_ipsec_fc *fc;
41 	struct mlx5_fs_chains *chains;
42 	u8 allow_tunnel_mode : 1;
43 };
44 
45 struct mlx5e_ipsec_status_checks {
46 	struct mlx5_flow_group *pass_group;
47 	struct mlx5_flow_handle *packet_offload_pass_rule;
48 	struct mlx5_flow_handle *crypto_offload_pass_rule;
49 	struct mlx5_flow_group *drop_all_group;
50 	struct mlx5e_ipsec_drop all;
51 };
52 
53 struct mlx5e_ipsec_rx {
54 	struct mlx5e_ipsec_ft ft;
55 	struct mlx5e_ipsec_miss pol;
56 	struct mlx5e_ipsec_miss sa;
57 	struct mlx5e_ipsec_miss sa_sel;
58 	struct mlx5e_ipsec_status_checks status_checks;
59 	struct mlx5e_ipsec_fc *fc;
60 	struct mlx5_fs_chains *chains;
61 	struct mlx5_flow_table *pol_miss_ft;
62 	struct mlx5_flow_handle *pol_miss_rule;
63 	u8 allow_tunnel_mode : 1;
64 	u8 ttc_rules_added : 1;
65 };
66 
67 /* IPsec RX flow steering */
family2tt(u32 family)68 static enum mlx5_traffic_types family2tt(u32 family)
69 {
70 	if (family == AF_INET)
71 		return MLX5_TT_IPV4_IPSEC_ESP;
72 	return MLX5_TT_IPV6_IPSEC_ESP;
73 }
74 
ipsec_rx(struct mlx5e_ipsec * ipsec,u32 family,int type)75 static struct mlx5e_ipsec_rx *ipsec_rx(struct mlx5e_ipsec *ipsec, u32 family, int type)
76 {
77 	if (ipsec->is_uplink_rep && type == XFRM_DEV_OFFLOAD_PACKET)
78 		return ipsec->rx_esw;
79 
80 	if (family == AF_INET)
81 		return ipsec->rx_ipv4;
82 
83 	return ipsec->rx_ipv6;
84 }
85 
ipsec_tx(struct mlx5e_ipsec * ipsec,int type)86 static struct mlx5e_ipsec_tx *ipsec_tx(struct mlx5e_ipsec *ipsec, int type)
87 {
88 	if (ipsec->is_uplink_rep && type == XFRM_DEV_OFFLOAD_PACKET)
89 		return ipsec->tx_esw;
90 
91 	return ipsec->tx;
92 }
93 
94 static struct mlx5_fs_chains *
ipsec_chains_create(struct mlx5_core_dev * mdev,struct mlx5_flow_table * miss_ft,enum mlx5_flow_namespace_type ns,int base_prio,int base_level,struct mlx5_flow_table ** root_ft)95 ipsec_chains_create(struct mlx5_core_dev *mdev, struct mlx5_flow_table *miss_ft,
96 		    enum mlx5_flow_namespace_type ns, int base_prio,
97 		    int base_level, struct mlx5_flow_table **root_ft)
98 {
99 	struct mlx5_chains_attr attr = {};
100 	struct mlx5_fs_chains *chains;
101 	struct mlx5_flow_table *ft;
102 	int err;
103 
104 	attr.flags = MLX5_CHAINS_AND_PRIOS_SUPPORTED |
105 		     MLX5_CHAINS_IGNORE_FLOW_LEVEL_SUPPORTED;
106 	attr.max_grp_num = 2;
107 	attr.default_ft = miss_ft;
108 	attr.ns = ns;
109 	attr.fs_base_prio = base_prio;
110 	attr.fs_base_level = base_level;
111 	chains = mlx5_chains_create(mdev, &attr);
112 	if (IS_ERR(chains))
113 		return chains;
114 
115 	/* Create chain 0, prio 1, level 0 to connect chains to prev in fs_core */
116 	ft = mlx5_chains_get_table(chains, 0, 1, 0);
117 	if (IS_ERR(ft)) {
118 		err = PTR_ERR(ft);
119 		goto err_chains_get;
120 	}
121 
122 	*root_ft = ft;
123 	return chains;
124 
125 err_chains_get:
126 	mlx5_chains_destroy(chains);
127 	return ERR_PTR(err);
128 }
129 
ipsec_chains_destroy(struct mlx5_fs_chains * chains)130 static void ipsec_chains_destroy(struct mlx5_fs_chains *chains)
131 {
132 	mlx5_chains_put_table(chains, 0, 1, 0);
133 	mlx5_chains_destroy(chains);
134 }
135 
136 static struct mlx5_flow_table *
ipsec_chains_get_table(struct mlx5_fs_chains * chains,u32 prio)137 ipsec_chains_get_table(struct mlx5_fs_chains *chains, u32 prio)
138 {
139 	return mlx5_chains_get_table(chains, 0, prio + 1, 0);
140 }
141 
ipsec_chains_put_table(struct mlx5_fs_chains * chains,u32 prio)142 static void ipsec_chains_put_table(struct mlx5_fs_chains *chains, u32 prio)
143 {
144 	mlx5_chains_put_table(chains, 0, prio + 1, 0);
145 }
146 
ipsec_ft_create(struct mlx5_flow_namespace * ns,int level,int prio,int num_reserved_entries,int max_num_groups,u32 flags)147 static struct mlx5_flow_table *ipsec_ft_create(struct mlx5_flow_namespace *ns,
148 					       int level, int prio,
149 					       int num_reserved_entries,
150 					       int max_num_groups, u32 flags)
151 {
152 	struct mlx5_flow_table_attr ft_attr = {};
153 
154 	ft_attr.autogroup.num_reserved_entries = num_reserved_entries;
155 	ft_attr.autogroup.max_num_groups = max_num_groups;
156 	ft_attr.max_fte = NUM_IPSEC_FTE;
157 	ft_attr.level = level;
158 	ft_attr.prio = prio;
159 	ft_attr.flags = flags;
160 
161 	return mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
162 }
163 
ipsec_rx_status_drop_destroy(struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_rx * rx)164 static void ipsec_rx_status_drop_destroy(struct mlx5e_ipsec *ipsec,
165 					 struct mlx5e_ipsec_rx *rx)
166 {
167 	mlx5_del_flow_rules(rx->status_checks.all.rule);
168 	mlx5_fc_destroy(ipsec->mdev, rx->status_checks.all.fc);
169 	mlx5_destroy_flow_group(rx->status_checks.drop_all_group);
170 }
171 
ipsec_rx_status_pass_destroy(struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_rx * rx)172 static void ipsec_rx_status_pass_destroy(struct mlx5e_ipsec *ipsec,
173 					 struct mlx5e_ipsec_rx *rx)
174 {
175 	mlx5_del_flow_rules(rx->status_checks.packet_offload_pass_rule);
176 	mlx5_del_flow_rules(rx->status_checks.crypto_offload_pass_rule);
177 }
178 
ipsec_rx_rule_add_match_obj(struct mlx5e_ipsec_sa_entry * sa_entry,struct mlx5e_ipsec_rx * rx,struct mlx5_flow_spec * spec)179 static void ipsec_rx_rule_add_match_obj(struct mlx5e_ipsec_sa_entry *sa_entry,
180 					struct mlx5e_ipsec_rx *rx,
181 					struct mlx5_flow_spec *spec)
182 {
183 	struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
184 
185 	if (rx == ipsec->rx_esw) {
186 		mlx5_esw_ipsec_rx_rule_add_match_obj(sa_entry, spec);
187 	} else {
188 		MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
189 				 misc_parameters_2.metadata_reg_c_2);
190 		MLX5_SET(fte_match_param, spec->match_value,
191 			 misc_parameters_2.metadata_reg_c_2,
192 			 sa_entry->ipsec_obj_id | BIT(31));
193 
194 		spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
195 	}
196 }
197 
rx_add_rule_drop_auth_trailer(struct mlx5e_ipsec_sa_entry * sa_entry,struct mlx5e_ipsec_rx * rx)198 static int rx_add_rule_drop_auth_trailer(struct mlx5e_ipsec_sa_entry *sa_entry,
199 					 struct mlx5e_ipsec_rx *rx)
200 {
201 	struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
202 	struct mlx5_flow_table *ft = rx->ft.status;
203 	struct mlx5_core_dev *mdev = ipsec->mdev;
204 	struct mlx5_flow_destination dest = {};
205 	struct mlx5_flow_act flow_act = {};
206 	struct mlx5_flow_handle *rule;
207 	struct mlx5_fc *flow_counter;
208 	struct mlx5_flow_spec *spec;
209 	int err;
210 
211 	spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
212 	if (!spec)
213 		return -ENOMEM;
214 
215 	flow_counter = mlx5_fc_create(mdev, true);
216 	if (IS_ERR(flow_counter)) {
217 		err = PTR_ERR(flow_counter);
218 		mlx5_core_err(mdev,
219 			      "Failed to add ipsec rx status drop rule counter, err=%d\n", err);
220 		goto err_cnt;
221 	}
222 	sa_entry->ipsec_rule.auth.fc = flow_counter;
223 
224 	flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT;
225 	flow_act.flags = FLOW_ACT_NO_APPEND;
226 	dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
227 	dest.counter = flow_counter;
228 	if (rx == ipsec->rx_esw)
229 		spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK;
230 
231 	MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters_2.ipsec_syndrome);
232 	MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.ipsec_syndrome, 1);
233 	spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
234 	ipsec_rx_rule_add_match_obj(sa_entry, rx, spec);
235 	rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
236 	if (IS_ERR(rule)) {
237 		err = PTR_ERR(rule);
238 		mlx5_core_err(mdev,
239 			      "Failed to add ipsec rx status drop rule, err=%d\n", err);
240 		goto err_rule;
241 	}
242 	sa_entry->ipsec_rule.auth.rule = rule;
243 
244 	flow_counter = mlx5_fc_create(mdev, true);
245 	if (IS_ERR(flow_counter)) {
246 		err = PTR_ERR(flow_counter);
247 		mlx5_core_err(mdev,
248 			      "Failed to add ipsec rx status drop rule counter, err=%d\n", err);
249 		goto err_cnt_2;
250 	}
251 	sa_entry->ipsec_rule.trailer.fc = flow_counter;
252 
253 	dest.counter = flow_counter;
254 	MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.ipsec_syndrome, 2);
255 	rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
256 	if (IS_ERR(rule)) {
257 		err = PTR_ERR(rule);
258 		mlx5_core_err(mdev,
259 			      "Failed to add ipsec rx status drop rule, err=%d\n", err);
260 		goto err_rule_2;
261 	}
262 	sa_entry->ipsec_rule.trailer.rule = rule;
263 
264 	kvfree(spec);
265 	return 0;
266 
267 err_rule_2:
268 	mlx5_fc_destroy(mdev, sa_entry->ipsec_rule.trailer.fc);
269 err_cnt_2:
270 	mlx5_del_flow_rules(sa_entry->ipsec_rule.auth.rule);
271 err_rule:
272 	mlx5_fc_destroy(mdev, sa_entry->ipsec_rule.auth.fc);
273 err_cnt:
274 	kvfree(spec);
275 	return err;
276 }
277 
rx_add_rule_drop_replay(struct mlx5e_ipsec_sa_entry * sa_entry,struct mlx5e_ipsec_rx * rx)278 static int rx_add_rule_drop_replay(struct mlx5e_ipsec_sa_entry *sa_entry, struct mlx5e_ipsec_rx *rx)
279 {
280 	struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
281 	struct mlx5_flow_table *ft = rx->ft.status;
282 	struct mlx5_core_dev *mdev = ipsec->mdev;
283 	struct mlx5_flow_destination dest = {};
284 	struct mlx5_flow_act flow_act = {};
285 	struct mlx5_flow_handle *rule;
286 	struct mlx5_fc *flow_counter;
287 	struct mlx5_flow_spec *spec;
288 	int err;
289 
290 	spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
291 	if (!spec)
292 		return -ENOMEM;
293 
294 	flow_counter = mlx5_fc_create(mdev, true);
295 	if (IS_ERR(flow_counter)) {
296 		err = PTR_ERR(flow_counter);
297 		mlx5_core_err(mdev,
298 			      "Failed to add ipsec rx status drop rule counter, err=%d\n", err);
299 		goto err_cnt;
300 	}
301 
302 	flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT;
303 	flow_act.flags = FLOW_ACT_NO_APPEND;
304 	dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
305 	dest.counter = flow_counter;
306 	if (rx == ipsec->rx_esw)
307 		spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK;
308 
309 	MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters_2.metadata_reg_c_4);
310 	MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.metadata_reg_c_4, 1);
311 	spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
312 	ipsec_rx_rule_add_match_obj(sa_entry, rx, spec);
313 	rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
314 	if (IS_ERR(rule)) {
315 		err = PTR_ERR(rule);
316 		mlx5_core_err(mdev,
317 			      "Failed to add ipsec rx status drop rule, err=%d\n", err);
318 		goto err_rule;
319 	}
320 
321 	sa_entry->ipsec_rule.replay.rule = rule;
322 	sa_entry->ipsec_rule.replay.fc = flow_counter;
323 
324 	kvfree(spec);
325 	return 0;
326 
327 err_rule:
328 	mlx5_fc_destroy(mdev, flow_counter);
329 err_cnt:
330 	kvfree(spec);
331 	return err;
332 }
333 
ipsec_rx_status_drop_all_create(struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_rx * rx)334 static int ipsec_rx_status_drop_all_create(struct mlx5e_ipsec *ipsec,
335 					   struct mlx5e_ipsec_rx *rx)
336 {
337 	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
338 	struct mlx5_flow_table *ft = rx->ft.status;
339 	struct mlx5_core_dev *mdev = ipsec->mdev;
340 	struct mlx5_flow_destination dest = {};
341 	struct mlx5_flow_act flow_act = {};
342 	struct mlx5_flow_handle *rule;
343 	struct mlx5_fc *flow_counter;
344 	struct mlx5_flow_spec *spec;
345 	struct mlx5_flow_group *g;
346 	u32 *flow_group_in;
347 	int err = 0;
348 
349 	flow_group_in = kvzalloc(inlen, GFP_KERNEL);
350 	spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
351 	if (!flow_group_in || !spec) {
352 		err = -ENOMEM;
353 		goto err_out;
354 	}
355 
356 	MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ft->max_fte - 1);
357 	MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ft->max_fte - 1);
358 	g = mlx5_create_flow_group(ft, flow_group_in);
359 	if (IS_ERR(g)) {
360 		err = PTR_ERR(g);
361 		mlx5_core_err(mdev,
362 			      "Failed to add ipsec rx status drop flow group, err=%d\n", err);
363 		goto err_out;
364 	}
365 
366 	flow_counter = mlx5_fc_create(mdev, false);
367 	if (IS_ERR(flow_counter)) {
368 		err = PTR_ERR(flow_counter);
369 		mlx5_core_err(mdev,
370 			      "Failed to add ipsec rx status drop rule counter, err=%d\n", err);
371 		goto err_cnt;
372 	}
373 
374 	flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT;
375 	dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
376 	dest.counter = flow_counter;
377 	if (rx == ipsec->rx_esw)
378 		spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK;
379 	rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
380 	if (IS_ERR(rule)) {
381 		err = PTR_ERR(rule);
382 		mlx5_core_err(mdev,
383 			      "Failed to add ipsec rx status drop rule, err=%d\n", err);
384 		goto err_rule;
385 	}
386 
387 	rx->status_checks.drop_all_group = g;
388 	rx->status_checks.all.rule = rule;
389 	rx->status_checks.all.fc = flow_counter;
390 
391 	kvfree(flow_group_in);
392 	kvfree(spec);
393 	return 0;
394 
395 err_rule:
396 	mlx5_fc_destroy(mdev, flow_counter);
397 err_cnt:
398 	mlx5_destroy_flow_group(g);
399 err_out:
400 	kvfree(flow_group_in);
401 	kvfree(spec);
402 	return err;
403 }
404 
ipsec_rx_status_pass_group_create(struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_rx * rx)405 static int ipsec_rx_status_pass_group_create(struct mlx5e_ipsec *ipsec,
406 					     struct mlx5e_ipsec_rx *rx)
407 {
408 	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
409 	struct mlx5_flow_table *ft = rx->ft.status;
410 	struct mlx5_flow_group *fg;
411 	void *match_criteria;
412 	u32 *flow_group_in;
413 	int err = 0;
414 
415 	flow_group_in = kvzalloc(inlen, GFP_KERNEL);
416 	if (!flow_group_in)
417 		return -ENOMEM;
418 
419 	MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
420 		 MLX5_MATCH_MISC_PARAMETERS_2);
421 	match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in,
422 				      match_criteria);
423 	MLX5_SET_TO_ONES(fte_match_param, match_criteria,
424 			 misc_parameters_2.ipsec_syndrome);
425 	MLX5_SET_TO_ONES(fte_match_param, match_criteria,
426 			 misc_parameters_2.metadata_reg_c_4);
427 
428 	MLX5_SET(create_flow_group_in, flow_group_in,
429 		 start_flow_index, ft->max_fte - 3);
430 	MLX5_SET(create_flow_group_in, flow_group_in,
431 		 end_flow_index, ft->max_fte - 2);
432 
433 	fg = mlx5_create_flow_group(ft, flow_group_in);
434 	if (IS_ERR(fg)) {
435 		err = PTR_ERR(fg);
436 		mlx5_core_warn(ipsec->mdev,
437 			       "Failed to create rx status pass flow group, err=%d\n",
438 			       err);
439 	}
440 	rx->status_checks.pass_group = fg;
441 
442 	kvfree(flow_group_in);
443 	return err;
444 }
445 
446 static struct mlx5_flow_handle *
ipsec_rx_status_pass_create(struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_rx * rx,struct mlx5_flow_destination * dest,u8 aso_ok)447 ipsec_rx_status_pass_create(struct mlx5e_ipsec *ipsec,
448 			    struct mlx5e_ipsec_rx *rx,
449 			    struct mlx5_flow_destination *dest,
450 			    u8 aso_ok)
451 {
452 	struct mlx5_flow_act flow_act = {};
453 	struct mlx5_flow_handle *rule;
454 	struct mlx5_flow_spec *spec;
455 	int err;
456 
457 	spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
458 	if (!spec)
459 		return ERR_PTR(-ENOMEM);
460 
461 	MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
462 			 misc_parameters_2.ipsec_syndrome);
463 	MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
464 			 misc_parameters_2.metadata_reg_c_4);
465 	MLX5_SET(fte_match_param, spec->match_value,
466 		 misc_parameters_2.ipsec_syndrome, 0);
467 	MLX5_SET(fte_match_param, spec->match_value,
468 		 misc_parameters_2.metadata_reg_c_4, aso_ok);
469 	if (rx == ipsec->rx_esw)
470 		spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK;
471 	spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
472 	flow_act.flags = FLOW_ACT_NO_APPEND | FLOW_ACT_IGNORE_FLOW_LEVEL;
473 	flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
474 			  MLX5_FLOW_CONTEXT_ACTION_COUNT;
475 	rule = mlx5_add_flow_rules(rx->ft.status, spec, &flow_act, dest, 2);
476 	if (IS_ERR(rule)) {
477 		err = PTR_ERR(rule);
478 		mlx5_core_warn(ipsec->mdev,
479 			       "Failed to add ipsec rx status pass rule, err=%d\n", err);
480 		goto err_rule;
481 	}
482 
483 	kvfree(spec);
484 	return rule;
485 
486 err_rule:
487 	kvfree(spec);
488 	return ERR_PTR(err);
489 }
490 
mlx5_ipsec_rx_status_destroy(struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_rx * rx)491 static void mlx5_ipsec_rx_status_destroy(struct mlx5e_ipsec *ipsec,
492 					 struct mlx5e_ipsec_rx *rx)
493 {
494 	ipsec_rx_status_pass_destroy(ipsec, rx);
495 	mlx5_destroy_flow_group(rx->status_checks.pass_group);
496 	ipsec_rx_status_drop_destroy(ipsec, rx);
497 }
498 
mlx5_ipsec_rx_status_create(struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_rx * rx,struct mlx5_flow_destination * dest)499 static int mlx5_ipsec_rx_status_create(struct mlx5e_ipsec *ipsec,
500 				       struct mlx5e_ipsec_rx *rx,
501 				       struct mlx5_flow_destination *dest)
502 {
503 	struct mlx5_flow_destination pol_dest[2];
504 	struct mlx5_flow_handle *rule;
505 	int err;
506 
507 	err = ipsec_rx_status_drop_all_create(ipsec, rx);
508 	if (err)
509 		return err;
510 
511 	err = ipsec_rx_status_pass_group_create(ipsec, rx);
512 	if (err)
513 		goto err_pass_group_create;
514 
515 	rule = ipsec_rx_status_pass_create(ipsec, rx, dest,
516 					   MLX5_IPSEC_ASO_SW_CRYPTO_OFFLOAD);
517 	if (IS_ERR(rule)) {
518 		err = PTR_ERR(rule);
519 		goto err_crypto_offload_pass_create;
520 	}
521 	rx->status_checks.crypto_offload_pass_rule = rule;
522 
523 	pol_dest[0].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
524 	pol_dest[0].ft = rx->ft.pol;
525 	pol_dest[1] = dest[1];
526 	rule = ipsec_rx_status_pass_create(ipsec, rx, pol_dest,
527 					   MLX5_IPSEC_ASO_OK);
528 	if (IS_ERR(rule)) {
529 		err = PTR_ERR(rule);
530 		goto err_packet_offload_pass_create;
531 	}
532 	rx->status_checks.packet_offload_pass_rule = rule;
533 
534 	return 0;
535 
536 err_packet_offload_pass_create:
537 	mlx5_del_flow_rules(rx->status_checks.crypto_offload_pass_rule);
538 err_crypto_offload_pass_create:
539 	mlx5_destroy_flow_group(rx->status_checks.pass_group);
540 err_pass_group_create:
541 	ipsec_rx_status_drop_destroy(ipsec, rx);
542 	return err;
543 }
544 
ipsec_miss_create(struct mlx5_core_dev * mdev,struct mlx5_flow_table * ft,struct mlx5e_ipsec_miss * miss,struct mlx5_flow_destination * dest)545 static int ipsec_miss_create(struct mlx5_core_dev *mdev,
546 			     struct mlx5_flow_table *ft,
547 			     struct mlx5e_ipsec_miss *miss,
548 			     struct mlx5_flow_destination *dest)
549 {
550 	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
551 	MLX5_DECLARE_FLOW_ACT(flow_act);
552 	struct mlx5_flow_spec *spec;
553 	u32 *flow_group_in;
554 	int err = 0;
555 
556 	flow_group_in = kvzalloc(inlen, GFP_KERNEL);
557 	spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
558 	if (!flow_group_in || !spec) {
559 		err = -ENOMEM;
560 		goto out;
561 	}
562 
563 	/* Create miss_group */
564 	MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ft->max_fte - 1);
565 	MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ft->max_fte - 1);
566 	miss->group = mlx5_create_flow_group(ft, flow_group_in);
567 	if (IS_ERR(miss->group)) {
568 		err = PTR_ERR(miss->group);
569 		mlx5_core_err(mdev, "fail to create IPsec miss_group err=%d\n",
570 			      err);
571 		goto out;
572 	}
573 
574 	/* Create miss rule */
575 	miss->rule = mlx5_add_flow_rules(ft, spec, &flow_act, dest, 1);
576 	if (IS_ERR(miss->rule)) {
577 		mlx5_destroy_flow_group(miss->group);
578 		err = PTR_ERR(miss->rule);
579 		mlx5_core_err(mdev, "fail to create IPsec miss_rule err=%d\n",
580 			      err);
581 		goto out;
582 	}
583 out:
584 	kvfree(flow_group_in);
585 	kvfree(spec);
586 	return err;
587 }
588 
589 static struct mlx5_flow_destination
ipsec_rx_decrypted_pkt_def_dest(struct mlx5_ttc_table * ttc,u32 family)590 ipsec_rx_decrypted_pkt_def_dest(struct mlx5_ttc_table *ttc, u32 family)
591 {
592 	struct mlx5_flow_destination dest;
593 
594 	if (!mlx5_ttc_has_esp_flow_group(ttc))
595 		return mlx5_ttc_get_default_dest(ttc, family2tt(family));
596 
597 	dest.ft = mlx5_get_ttc_flow_table(ttc);
598 	dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
599 
600 	return dest;
601 }
602 
ipsec_rx_update_default_dest(struct mlx5e_ipsec_rx * rx,struct mlx5_flow_destination * old_dest,struct mlx5_flow_destination * new_dest)603 static void ipsec_rx_update_default_dest(struct mlx5e_ipsec_rx *rx,
604 					 struct mlx5_flow_destination *old_dest,
605 					 struct mlx5_flow_destination *new_dest)
606 {
607 	mlx5_modify_rule_destination(rx->pol_miss_rule, new_dest, old_dest);
608 	mlx5_modify_rule_destination(rx->status_checks.crypto_offload_pass_rule,
609 				     new_dest, old_dest);
610 }
611 
handle_ipsec_rx_bringup(struct mlx5e_ipsec * ipsec,u32 family)612 static void handle_ipsec_rx_bringup(struct mlx5e_ipsec *ipsec, u32 family)
613 {
614 	struct mlx5e_ipsec_rx *rx = ipsec_rx(ipsec, family, XFRM_DEV_OFFLOAD_PACKET);
615 	struct mlx5_flow_namespace *ns = mlx5e_fs_get_ns(ipsec->fs, false);
616 	struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(ipsec->fs, false);
617 	struct mlx5_flow_destination old_dest, new_dest;
618 
619 	old_dest = ipsec_rx_decrypted_pkt_def_dest(ttc, family);
620 
621 	mlx5_ipsec_fs_roce_rx_create(ipsec->mdev, ipsec->roce, ns, &old_dest, family,
622 				     MLX5E_ACCEL_FS_ESP_FT_ROCE_LEVEL, MLX5E_NIC_PRIO);
623 
624 	new_dest.ft = mlx5_ipsec_fs_roce_ft_get(ipsec->roce, family);
625 	new_dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
626 	ipsec_rx_update_default_dest(rx, &old_dest, &new_dest);
627 }
628 
handle_ipsec_rx_cleanup(struct mlx5e_ipsec * ipsec,u32 family)629 static void handle_ipsec_rx_cleanup(struct mlx5e_ipsec *ipsec, u32 family)
630 {
631 	struct mlx5e_ipsec_rx *rx = ipsec_rx(ipsec, family, XFRM_DEV_OFFLOAD_PACKET);
632 	struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(ipsec->fs, false);
633 	struct mlx5_flow_destination old_dest, new_dest;
634 
635 	old_dest.ft = mlx5_ipsec_fs_roce_ft_get(ipsec->roce, family);
636 	old_dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
637 	new_dest = ipsec_rx_decrypted_pkt_def_dest(ttc, family);
638 	ipsec_rx_update_default_dest(rx, &old_dest, &new_dest);
639 
640 	mlx5_ipsec_fs_roce_rx_destroy(ipsec->roce, family, ipsec->mdev);
641 }
642 
ipsec_mpv_work_handler(struct work_struct * _work)643 static void ipsec_mpv_work_handler(struct work_struct *_work)
644 {
645 	struct mlx5e_ipsec_mpv_work *work = container_of(_work, struct mlx5e_ipsec_mpv_work, work);
646 	struct mlx5e_ipsec *ipsec = work->slave_priv->ipsec;
647 
648 	switch (work->event) {
649 	case MPV_DEVCOM_IPSEC_MASTER_UP:
650 		mutex_lock(&ipsec->tx->ft.mutex);
651 		if (ipsec->tx->ft.refcnt)
652 			mlx5_ipsec_fs_roce_tx_create(ipsec->mdev, ipsec->roce, ipsec->tx->ft.pol,
653 						     true);
654 		mutex_unlock(&ipsec->tx->ft.mutex);
655 
656 		mutex_lock(&ipsec->rx_ipv4->ft.mutex);
657 		if (ipsec->rx_ipv4->ft.refcnt)
658 			handle_ipsec_rx_bringup(ipsec, AF_INET);
659 		mutex_unlock(&ipsec->rx_ipv4->ft.mutex);
660 
661 		mutex_lock(&ipsec->rx_ipv6->ft.mutex);
662 		if (ipsec->rx_ipv6->ft.refcnt)
663 			handle_ipsec_rx_bringup(ipsec, AF_INET6);
664 		mutex_unlock(&ipsec->rx_ipv6->ft.mutex);
665 		break;
666 	case MPV_DEVCOM_IPSEC_MASTER_DOWN:
667 		mutex_lock(&ipsec->tx->ft.mutex);
668 		if (ipsec->tx->ft.refcnt)
669 			mlx5_ipsec_fs_roce_tx_destroy(ipsec->roce, ipsec->mdev);
670 		mutex_unlock(&ipsec->tx->ft.mutex);
671 
672 		mutex_lock(&ipsec->rx_ipv4->ft.mutex);
673 		if (ipsec->rx_ipv4->ft.refcnt)
674 			handle_ipsec_rx_cleanup(ipsec, AF_INET);
675 		mutex_unlock(&ipsec->rx_ipv4->ft.mutex);
676 
677 		mutex_lock(&ipsec->rx_ipv6->ft.mutex);
678 		if (ipsec->rx_ipv6->ft.refcnt)
679 			handle_ipsec_rx_cleanup(ipsec, AF_INET6);
680 		mutex_unlock(&ipsec->rx_ipv6->ft.mutex);
681 		break;
682 	}
683 
684 	complete(&work->master_priv->ipsec->comp);
685 }
686 
ipsec_rx_ft_disconnect(struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_rx * rx,u32 family)687 static void ipsec_rx_ft_disconnect(struct mlx5e_ipsec *ipsec,
688 				   struct mlx5e_ipsec_rx *rx, u32 family)
689 {
690 	struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(ipsec->fs, false);
691 
692 	if (rx->ttc_rules_added)
693 		mlx5_ttc_destroy_ipsec_rules(ttc);
694 	mlx5_ttc_fwd_default_dest(ttc, family2tt(family));
695 }
696 
ipsec_rx_policy_destroy(struct mlx5e_ipsec_rx * rx)697 static void ipsec_rx_policy_destroy(struct mlx5e_ipsec_rx *rx)
698 {
699 	if (rx->chains) {
700 		ipsec_chains_destroy(rx->chains);
701 	} else {
702 		mlx5_del_flow_rules(rx->pol.rule);
703 		mlx5_destroy_flow_group(rx->pol.group);
704 		mlx5_destroy_flow_table(rx->ft.pol);
705 	}
706 
707 	if (rx->pol_miss_rule) {
708 		mlx5_del_flow_rules(rx->pol_miss_rule);
709 		mlx5_destroy_flow_table(rx->pol_miss_ft);
710 	}
711 }
712 
ipsec_rx_sa_selector_destroy(struct mlx5_core_dev * mdev,struct mlx5e_ipsec_rx * rx)713 static void ipsec_rx_sa_selector_destroy(struct mlx5_core_dev *mdev,
714 					 struct mlx5e_ipsec_rx *rx)
715 {
716 	mlx5_del_flow_rules(rx->sa_sel.rule);
717 	mlx5_fc_destroy(mdev, rx->sa_sel.fc);
718 	rx->sa_sel.fc = NULL;
719 	mlx5_destroy_flow_group(rx->sa_sel.group);
720 	mlx5_destroy_flow_table(rx->ft.sa_sel);
721 }
722 
rx_destroy(struct mlx5_core_dev * mdev,struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_rx * rx,u32 family)723 static void rx_destroy(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
724 		       struct mlx5e_ipsec_rx *rx, u32 family)
725 {
726 	/* disconnect */
727 	if (rx != ipsec->rx_esw)
728 		ipsec_rx_ft_disconnect(ipsec, rx, family);
729 
730 	mlx5_del_flow_rules(rx->sa.rule);
731 	mlx5_destroy_flow_group(rx->sa.group);
732 	mlx5_destroy_flow_table(rx->ft.sa);
733 	if (rx->allow_tunnel_mode)
734 		mlx5_eswitch_unblock_encap(mdev);
735 	mlx5_ipsec_rx_status_destroy(ipsec, rx);
736 	mlx5_destroy_flow_table(rx->ft.status);
737 
738 	ipsec_rx_sa_selector_destroy(mdev, rx);
739 
740 	ipsec_rx_policy_destroy(rx);
741 
742 	mlx5_ipsec_fs_roce_rx_destroy(ipsec->roce, family, mdev);
743 
744 #ifdef CONFIG_MLX5_ESWITCH
745 	if (rx == ipsec->rx_esw)
746 		mlx5_chains_put_table(esw_chains(ipsec->mdev->priv.eswitch),
747 				      0, 1, 0);
748 #endif
749 }
750 
ipsec_rx_create_attr_set(struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_rx * rx,u32 family,struct mlx5e_ipsec_rx_create_attr * attr)751 static void ipsec_rx_create_attr_set(struct mlx5e_ipsec *ipsec,
752 				     struct mlx5e_ipsec_rx *rx,
753 				     u32 family,
754 				     struct mlx5e_ipsec_rx_create_attr *attr)
755 {
756 	if (rx == ipsec->rx_esw) {
757 		/* For packet offload in switchdev mode, RX & TX use FDB namespace */
758 		attr->ns = ipsec->tx_esw->ns;
759 		mlx5_esw_ipsec_rx_create_attr_set(ipsec, attr);
760 		return;
761 	}
762 
763 	attr->ns = mlx5e_fs_get_ns(ipsec->fs, false);
764 	attr->ttc = mlx5e_fs_get_ttc(ipsec->fs, false);
765 	attr->family = family;
766 	attr->prio = MLX5E_NIC_PRIO;
767 	attr->pol_level = MLX5E_ACCEL_FS_POL_FT_LEVEL;
768 	attr->pol_miss_level = MLX5E_ACCEL_FS_POL_MISS_FT_LEVEL;
769 	attr->sa_level = MLX5E_ACCEL_FS_ESP_FT_LEVEL;
770 	attr->status_level = MLX5E_ACCEL_FS_ESP_FT_ERR_LEVEL;
771 	attr->chains_ns = MLX5_FLOW_NAMESPACE_KERNEL;
772 }
773 
ipsec_rx_status_pass_dest_get(struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_rx * rx,struct mlx5e_ipsec_rx_create_attr * attr,struct mlx5_flow_destination * dest)774 static int ipsec_rx_status_pass_dest_get(struct mlx5e_ipsec *ipsec,
775 					 struct mlx5e_ipsec_rx *rx,
776 					 struct mlx5e_ipsec_rx_create_attr *attr,
777 					 struct mlx5_flow_destination *dest)
778 {
779 	struct mlx5_flow_table *ft;
780 	int err;
781 
782 	if (rx == ipsec->rx_esw)
783 		return mlx5_esw_ipsec_rx_status_pass_dest_get(ipsec, dest);
784 
785 	*dest = ipsec_rx_decrypted_pkt_def_dest(attr->ttc, attr->family);
786 	err = mlx5_ipsec_fs_roce_rx_create(ipsec->mdev, ipsec->roce, attr->ns, dest,
787 					   attr->family, MLX5E_ACCEL_FS_ESP_FT_ROCE_LEVEL,
788 					   attr->prio);
789 	if (err)
790 		return err;
791 
792 	ft = mlx5_ipsec_fs_roce_ft_get(ipsec->roce, attr->family);
793 	if (ft) {
794 		dest->type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
795 		dest->ft = ft;
796 	}
797 
798 	return 0;
799 }
800 
ipsec_rx_sa_miss_dest_get(struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_rx * rx,struct mlx5e_ipsec_rx_create_attr * attr,struct mlx5_flow_destination * dest,struct mlx5_flow_destination * miss_dest)801 static void ipsec_rx_sa_miss_dest_get(struct mlx5e_ipsec *ipsec,
802 				      struct mlx5e_ipsec_rx *rx,
803 				      struct mlx5e_ipsec_rx_create_attr *attr,
804 				      struct mlx5_flow_destination *dest,
805 				      struct mlx5_flow_destination *miss_dest)
806 {
807 	if (rx == ipsec->rx_esw)
808 		*miss_dest = *dest;
809 	else
810 		*miss_dest =
811 			mlx5_ttc_get_default_dest(attr->ttc,
812 						  family2tt(attr->family));
813 }
814 
ipsec_rx_default_dest_get(struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_rx * rx,struct mlx5_flow_destination * dest)815 static void ipsec_rx_default_dest_get(struct mlx5e_ipsec *ipsec,
816 				      struct mlx5e_ipsec_rx *rx,
817 				      struct mlx5_flow_destination *dest)
818 {
819 	dest->type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
820 	dest->ft = rx->pol_miss_ft;
821 }
822 
ipsec_rx_ft_connect(struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_rx * rx,struct mlx5e_ipsec_rx_create_attr * attr)823 static void ipsec_rx_ft_connect(struct mlx5e_ipsec *ipsec,
824 				struct mlx5e_ipsec_rx *rx,
825 				struct mlx5e_ipsec_rx_create_attr *attr)
826 {
827 	struct mlx5_flow_destination dest = {};
828 	struct mlx5_ttc_table *ttc, *inner_ttc;
829 
830 	dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
831 	dest.ft = rx->ft.sa;
832 	if (mlx5_ttc_fwd_dest(attr->ttc, family2tt(attr->family), &dest))
833 		return;
834 
835 	ttc = mlx5e_fs_get_ttc(ipsec->fs, false);
836 	inner_ttc = mlx5e_fs_get_ttc(ipsec->fs, true);
837 	rx->ttc_rules_added = !mlx5_ttc_create_ipsec_rules(ttc, inner_ttc);
838 }
839 
ipsec_rx_chains_create_miss(struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_rx * rx,struct mlx5e_ipsec_rx_create_attr * attr,struct mlx5_flow_destination * dest)840 static int ipsec_rx_chains_create_miss(struct mlx5e_ipsec *ipsec,
841 				       struct mlx5e_ipsec_rx *rx,
842 				       struct mlx5e_ipsec_rx_create_attr *attr,
843 				       struct mlx5_flow_destination *dest)
844 {
845 	struct mlx5_flow_table_attr ft_attr = {};
846 	MLX5_DECLARE_FLOW_ACT(flow_act);
847 	struct mlx5_flow_handle *rule;
848 	struct mlx5_flow_table *ft;
849 	int err;
850 
851 	if (rx == ipsec->rx_esw) {
852 		/* No need to create miss table for switchdev mode,
853 		 * just set it to the root chain table.
854 		 */
855 		rx->pol_miss_ft = dest->ft;
856 		return 0;
857 	}
858 
859 	ft_attr.max_fte = 1;
860 	ft_attr.autogroup.max_num_groups = 1;
861 	ft_attr.level = attr->pol_miss_level;
862 	ft_attr.prio = attr->prio;
863 
864 	ft = mlx5_create_auto_grouped_flow_table(attr->ns, &ft_attr);
865 	if (IS_ERR(ft))
866 		return PTR_ERR(ft);
867 
868 	rule = mlx5_add_flow_rules(ft, NULL, &flow_act, dest, 1);
869 	if (IS_ERR(rule)) {
870 		err = PTR_ERR(rule);
871 		goto err_rule;
872 	}
873 
874 	rx->pol_miss_ft = ft;
875 	rx->pol_miss_rule = rule;
876 
877 	return 0;
878 
879 err_rule:
880 	mlx5_destroy_flow_table(ft);
881 	return err;
882 }
883 
ipsec_rx_policy_create(struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_rx * rx,struct mlx5e_ipsec_rx_create_attr * attr,struct mlx5_flow_destination * dest)884 static int ipsec_rx_policy_create(struct mlx5e_ipsec *ipsec,
885 				  struct mlx5e_ipsec_rx *rx,
886 				  struct mlx5e_ipsec_rx_create_attr *attr,
887 				  struct mlx5_flow_destination *dest)
888 {
889 	struct mlx5_flow_destination default_dest;
890 	struct mlx5_core_dev *mdev = ipsec->mdev;
891 	struct mlx5_flow_table *ft;
892 	int err;
893 
894 	err = ipsec_rx_chains_create_miss(ipsec, rx, attr, dest);
895 	if (err)
896 		return err;
897 
898 	ipsec_rx_default_dest_get(ipsec, rx, &default_dest);
899 
900 	if (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_PRIO) {
901 		rx->chains = ipsec_chains_create(mdev,
902 						 default_dest.ft,
903 						 attr->chains_ns,
904 						 attr->prio,
905 						 attr->sa_level,
906 						 &rx->ft.pol);
907 		if (IS_ERR(rx->chains))
908 			err = PTR_ERR(rx->chains);
909 	} else {
910 		ft = ipsec_ft_create(attr->ns, attr->pol_level,
911 				     attr->prio, 1, 2, 0);
912 		if (IS_ERR(ft)) {
913 			err = PTR_ERR(ft);
914 			goto err_out;
915 		}
916 		rx->ft.pol = ft;
917 
918 		err = ipsec_miss_create(mdev, rx->ft.pol, &rx->pol,
919 					&default_dest);
920 		if (err)
921 			mlx5_destroy_flow_table(rx->ft.pol);
922 	}
923 
924 	if (!err)
925 		return 0;
926 
927 err_out:
928 	if (rx->pol_miss_rule) {
929 		mlx5_del_flow_rules(rx->pol_miss_rule);
930 		mlx5_destroy_flow_table(rx->pol_miss_ft);
931 	}
932 	return err;
933 }
934 
ipsec_rx_sa_selector_create(struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_rx * rx,struct mlx5e_ipsec_rx_create_attr * attr)935 static int ipsec_rx_sa_selector_create(struct mlx5e_ipsec *ipsec,
936 				       struct mlx5e_ipsec_rx *rx,
937 				       struct mlx5e_ipsec_rx_create_attr *attr)
938 {
939 	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
940 	struct mlx5_core_dev *mdev = ipsec->mdev;
941 	struct mlx5_flow_act flow_act = {};
942 	struct mlx5_flow_destination dest;
943 	struct mlx5_flow_handle *rule;
944 	struct mlx5_flow_table *ft;
945 	struct mlx5_flow_group *fg;
946 	u32 *flow_group_in;
947 	struct mlx5_fc *fc;
948 	int err;
949 
950 	flow_group_in = kvzalloc(inlen, GFP_KERNEL);
951 	if (!flow_group_in)
952 		return -ENOMEM;
953 
954 	ft = ipsec_ft_create(attr->ns, attr->status_level, attr->prio, 1,
955 			     MLX5_IPSEC_FS_SA_SELECTOR_MAX_NUM_GROUPS, 0);
956 	if (IS_ERR(ft)) {
957 		err = PTR_ERR(ft);
958 		mlx5_core_err(mdev, "Failed to create RX SA selector flow table, err=%d\n",
959 			      err);
960 		goto err_ft;
961 	}
962 
963 	MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index,
964 		 ft->max_fte - 1);
965 	MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
966 		 ft->max_fte - 1);
967 	fg = mlx5_create_flow_group(ft, flow_group_in);
968 	if (IS_ERR(fg)) {
969 		err = PTR_ERR(fg);
970 		mlx5_core_err(mdev, "Failed to create RX SA selector miss group, err=%d\n",
971 			      err);
972 		goto err_fg;
973 	}
974 
975 	fc = mlx5_fc_create(mdev, false);
976 	if (IS_ERR(fc)) {
977 		err = PTR_ERR(fc);
978 		mlx5_core_err(mdev,
979 			      "Failed to create ipsec RX SA selector miss rule counter, err=%d\n",
980 			      err);
981 		goto err_cnt;
982 	}
983 
984 	dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
985 	dest.counter = fc;
986 	flow_act.action =
987 		MLX5_FLOW_CONTEXT_ACTION_COUNT | MLX5_FLOW_CONTEXT_ACTION_DROP;
988 
989 	rule = mlx5_add_flow_rules(ft, NULL, &flow_act, &dest, 1);
990 	if (IS_ERR(rule)) {
991 		err = PTR_ERR(rule);
992 		mlx5_core_err(mdev, "Failed to create RX SA selector miss drop rule, err=%d\n",
993 			      err);
994 		goto err_rule;
995 	}
996 
997 	rx->ft.sa_sel = ft;
998 	rx->sa_sel.group = fg;
999 	rx->sa_sel.fc = fc;
1000 	rx->sa_sel.rule = rule;
1001 
1002 	kvfree(flow_group_in);
1003 
1004 	return 0;
1005 
1006 err_rule:
1007 	mlx5_fc_destroy(mdev, fc);
1008 err_cnt:
1009 	mlx5_destroy_flow_group(fg);
1010 err_fg:
1011 	mlx5_destroy_flow_table(ft);
1012 err_ft:
1013 	kvfree(flow_group_in);
1014 	return err;
1015 }
1016 
1017 /* The decryption processing is as follows:
1018  *
1019  *   +----------+                         +-------------+
1020  *   |          |                         |             |
1021  *   |  Kernel  <--------------+----------+ policy miss <------------+
1022  *   |          |              ^          |             |            ^
1023  *   +----^-----+              |          +-------------+            |
1024  *        |                  crypto                                  |
1025  *      miss                offload ok                         allow/default
1026  *        ^                    ^                                     ^
1027  *        |                    |                  packet             |
1028  *   +----+---------+     +----+-------------+   offload ok   +------+---+
1029  *   |              |     |                  |   (no UPSPEC)  |          |
1030  *   | SA (decrypt) +----->      status      +--->------->----+  policy  |
1031  *   |              |     |                  |                |          |
1032  *   +--------------+     ++---------+-------+                +-^----+---+
1033  *                         |         |                          |    |
1034  *                         v        packet             +-->->---+    v
1035  *                         |       offload ok        match           |
1036  *                       fails    (with UPSPEC)        |           block
1037  *                         |         |   +-------------+-+           |
1038  *                         v         v   |               |  miss     v
1039  *                        drop       +--->    SA sel     +--------->drop
1040  *                                       |               |
1041  *                                       +---------------+
1042  */
1043 
rx_create(struct mlx5_core_dev * mdev,struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_rx * rx,u32 family)1044 static int rx_create(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
1045 		     struct mlx5e_ipsec_rx *rx, u32 family)
1046 {
1047 	struct mlx5_flow_destination dest[2], miss_dest;
1048 	struct mlx5e_ipsec_rx_create_attr attr;
1049 	struct mlx5_flow_table *ft;
1050 	u32 flags = 0;
1051 	int err;
1052 
1053 	ipsec_rx_create_attr_set(ipsec, rx, family, &attr);
1054 
1055 	err = ipsec_rx_status_pass_dest_get(ipsec, rx, &attr, &dest[0]);
1056 	if (err)
1057 		return err;
1058 
1059 	ft = ipsec_ft_create(attr.ns, attr.status_level, attr.prio, 3, 4, 0);
1060 	if (IS_ERR(ft)) {
1061 		err = PTR_ERR(ft);
1062 		goto err_fs_ft_status;
1063 	}
1064 	rx->ft.status = ft;
1065 
1066 	err = ipsec_rx_sa_selector_create(ipsec, rx, &attr);
1067 	if (err)
1068 		goto err_fs_ft_sa_sel;
1069 
1070 	/* Create FT */
1071 	if (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_TUNNEL)
1072 		rx->allow_tunnel_mode =
1073 			mlx5_eswitch_block_encap(mdev, rx == ipsec->rx_esw);
1074 
1075 	if (rx->allow_tunnel_mode)
1076 		flags = MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
1077 	ft = ipsec_ft_create(attr.ns, attr.sa_level, attr.prio, 1, 2, flags);
1078 	if (IS_ERR(ft)) {
1079 		err = PTR_ERR(ft);
1080 		goto err_fs_ft;
1081 	}
1082 	rx->ft.sa = ft;
1083 
1084 	ipsec_rx_sa_miss_dest_get(ipsec, rx, &attr, &dest[0], &miss_dest);
1085 	err = ipsec_miss_create(mdev, rx->ft.sa, &rx->sa, &miss_dest);
1086 	if (err)
1087 		goto err_fs;
1088 
1089 	err = ipsec_rx_policy_create(ipsec, rx, &attr, &dest[0]);
1090 	if (err)
1091 		goto err_policy;
1092 
1093 	dest[1].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
1094 	dest[1].counter = rx->fc->cnt;
1095 	err = mlx5_ipsec_rx_status_create(ipsec, rx, dest);
1096 	if (err)
1097 		goto err_add;
1098 
1099 	/* connect */
1100 	if (rx != ipsec->rx_esw)
1101 		ipsec_rx_ft_connect(ipsec, rx, &attr);
1102 	return 0;
1103 
1104 err_add:
1105 	ipsec_rx_policy_destroy(rx);
1106 err_policy:
1107 	mlx5_del_flow_rules(rx->sa.rule);
1108 	mlx5_destroy_flow_group(rx->sa.group);
1109 err_fs:
1110 	mlx5_destroy_flow_table(rx->ft.sa);
1111 	if (rx->allow_tunnel_mode)
1112 		mlx5_eswitch_unblock_encap(mdev);
1113 err_fs_ft:
1114 	ipsec_rx_sa_selector_destroy(mdev, rx);
1115 err_fs_ft_sa_sel:
1116 	mlx5_destroy_flow_table(rx->ft.status);
1117 err_fs_ft_status:
1118 	mlx5_ipsec_fs_roce_rx_destroy(ipsec->roce, family, mdev);
1119 	return err;
1120 }
1121 
rx_get(struct mlx5_core_dev * mdev,struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_rx * rx,u32 family)1122 static int rx_get(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
1123 		  struct mlx5e_ipsec_rx *rx, u32 family)
1124 {
1125 	int err;
1126 
1127 	if (rx->ft.refcnt)
1128 		goto skip;
1129 
1130 	err = mlx5_eswitch_block_mode(mdev);
1131 	if (err)
1132 		return err;
1133 
1134 	err = rx_create(mdev, ipsec, rx, family);
1135 	if (err) {
1136 		mlx5_eswitch_unblock_mode(mdev);
1137 		return err;
1138 	}
1139 
1140 skip:
1141 	rx->ft.refcnt++;
1142 	return 0;
1143 }
1144 
rx_put(struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_rx * rx,u32 family)1145 static void rx_put(struct mlx5e_ipsec *ipsec, struct mlx5e_ipsec_rx *rx,
1146 		   u32 family)
1147 {
1148 	if (--rx->ft.refcnt)
1149 		return;
1150 
1151 	rx_destroy(ipsec->mdev, ipsec, rx, family);
1152 	mlx5_eswitch_unblock_mode(ipsec->mdev);
1153 }
1154 
rx_ft_get(struct mlx5_core_dev * mdev,struct mlx5e_ipsec * ipsec,u32 family,int type)1155 static struct mlx5e_ipsec_rx *rx_ft_get(struct mlx5_core_dev *mdev,
1156 					struct mlx5e_ipsec *ipsec, u32 family,
1157 					int type)
1158 {
1159 	struct mlx5e_ipsec_rx *rx = ipsec_rx(ipsec, family, type);
1160 	int err;
1161 
1162 	mutex_lock(&rx->ft.mutex);
1163 	err = rx_get(mdev, ipsec, rx, family);
1164 	mutex_unlock(&rx->ft.mutex);
1165 	if (err)
1166 		return ERR_PTR(err);
1167 
1168 	return rx;
1169 }
1170 
rx_ft_get_policy(struct mlx5_core_dev * mdev,struct mlx5e_ipsec * ipsec,u32 family,u32 prio,int type)1171 static struct mlx5_flow_table *rx_ft_get_policy(struct mlx5_core_dev *mdev,
1172 						struct mlx5e_ipsec *ipsec,
1173 						u32 family, u32 prio, int type)
1174 {
1175 	struct mlx5e_ipsec_rx *rx = ipsec_rx(ipsec, family, type);
1176 	struct mlx5_flow_table *ft;
1177 	int err;
1178 
1179 	mutex_lock(&rx->ft.mutex);
1180 	err = rx_get(mdev, ipsec, rx, family);
1181 	if (err)
1182 		goto err_get;
1183 
1184 	ft = rx->chains ? ipsec_chains_get_table(rx->chains, prio) : rx->ft.pol;
1185 	if (IS_ERR(ft)) {
1186 		err = PTR_ERR(ft);
1187 		goto err_get_ft;
1188 	}
1189 
1190 	mutex_unlock(&rx->ft.mutex);
1191 	return ft;
1192 
1193 err_get_ft:
1194 	rx_put(ipsec, rx, family);
1195 err_get:
1196 	mutex_unlock(&rx->ft.mutex);
1197 	return ERR_PTR(err);
1198 }
1199 
rx_ft_put(struct mlx5e_ipsec * ipsec,u32 family,int type)1200 static void rx_ft_put(struct mlx5e_ipsec *ipsec, u32 family, int type)
1201 {
1202 	struct mlx5e_ipsec_rx *rx = ipsec_rx(ipsec, family, type);
1203 
1204 	mutex_lock(&rx->ft.mutex);
1205 	rx_put(ipsec, rx, family);
1206 	mutex_unlock(&rx->ft.mutex);
1207 }
1208 
rx_ft_put_policy(struct mlx5e_ipsec * ipsec,u32 family,u32 prio,int type)1209 static void rx_ft_put_policy(struct mlx5e_ipsec *ipsec, u32 family, u32 prio, int type)
1210 {
1211 	struct mlx5e_ipsec_rx *rx = ipsec_rx(ipsec, family, type);
1212 
1213 	mutex_lock(&rx->ft.mutex);
1214 	if (rx->chains)
1215 		ipsec_chains_put_table(rx->chains, prio);
1216 
1217 	rx_put(ipsec, rx, family);
1218 	mutex_unlock(&rx->ft.mutex);
1219 }
1220 
ipsec_counter_rule_tx(struct mlx5_core_dev * mdev,struct mlx5e_ipsec_tx * tx)1221 static int ipsec_counter_rule_tx(struct mlx5_core_dev *mdev, struct mlx5e_ipsec_tx *tx)
1222 {
1223 	struct mlx5_flow_destination dest = {};
1224 	struct mlx5_flow_act flow_act = {};
1225 	struct mlx5_flow_handle *fte;
1226 	struct mlx5_flow_spec *spec;
1227 	int err;
1228 
1229 	spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
1230 	if (!spec)
1231 		return -ENOMEM;
1232 
1233 	/* create fte */
1234 	flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW |
1235 			  MLX5_FLOW_CONTEXT_ACTION_COUNT;
1236 	dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
1237 	dest.counter = tx->fc->cnt;
1238 	fte = mlx5_add_flow_rules(tx->ft.status, spec, &flow_act, &dest, 1);
1239 	if (IS_ERR(fte)) {
1240 		err = PTR_ERR(fte);
1241 		mlx5_core_err(mdev, "Fail to add ipsec tx counter rule err=%d\n", err);
1242 		goto err_rule;
1243 	}
1244 
1245 	kvfree(spec);
1246 	tx->status.rule = fte;
1247 	return 0;
1248 
1249 err_rule:
1250 	kvfree(spec);
1251 	return err;
1252 }
1253 
1254 /* IPsec TX flow steering */
tx_destroy(struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_tx * tx,struct mlx5_ipsec_fs * roce)1255 static void tx_destroy(struct mlx5e_ipsec *ipsec, struct mlx5e_ipsec_tx *tx,
1256 		       struct mlx5_ipsec_fs *roce)
1257 {
1258 	mlx5_ipsec_fs_roce_tx_destroy(roce, ipsec->mdev);
1259 	if (tx->chains) {
1260 		ipsec_chains_destroy(tx->chains);
1261 	} else {
1262 		mlx5_del_flow_rules(tx->pol.rule);
1263 		mlx5_destroy_flow_group(tx->pol.group);
1264 		mlx5_destroy_flow_table(tx->ft.pol);
1265 	}
1266 
1267 	if (tx == ipsec->tx_esw) {
1268 		mlx5_del_flow_rules(tx->sa.rule);
1269 		mlx5_destroy_flow_group(tx->sa.group);
1270 	}
1271 	mlx5_destroy_flow_table(tx->ft.sa);
1272 	if (tx->allow_tunnel_mode)
1273 		mlx5_eswitch_unblock_encap(ipsec->mdev);
1274 	mlx5_del_flow_rules(tx->status.rule);
1275 	mlx5_destroy_flow_table(tx->ft.status);
1276 }
1277 
ipsec_tx_create_attr_set(struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_tx * tx,struct mlx5e_ipsec_tx_create_attr * attr)1278 static void ipsec_tx_create_attr_set(struct mlx5e_ipsec *ipsec,
1279 				     struct mlx5e_ipsec_tx *tx,
1280 				     struct mlx5e_ipsec_tx_create_attr *attr)
1281 {
1282 	if (tx == ipsec->tx_esw) {
1283 		mlx5_esw_ipsec_tx_create_attr_set(ipsec, attr);
1284 		return;
1285 	}
1286 
1287 	attr->prio = 0;
1288 	attr->pol_level = 0;
1289 	attr->sa_level = 1;
1290 	attr->cnt_level = 2;
1291 	attr->chains_ns = MLX5_FLOW_NAMESPACE_EGRESS_IPSEC;
1292 }
1293 
tx_create(struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_tx * tx,struct mlx5_ipsec_fs * roce)1294 static int tx_create(struct mlx5e_ipsec *ipsec, struct mlx5e_ipsec_tx *tx,
1295 		     struct mlx5_ipsec_fs *roce)
1296 {
1297 	struct mlx5_core_dev *mdev = ipsec->mdev;
1298 	struct mlx5e_ipsec_tx_create_attr attr;
1299 	struct mlx5_flow_destination dest = {};
1300 	struct mlx5_flow_table *ft;
1301 	u32 flags = 0;
1302 	int err;
1303 
1304 	ipsec_tx_create_attr_set(ipsec, tx, &attr);
1305 	ft = ipsec_ft_create(tx->ns, attr.cnt_level, attr.prio, 1, 1, 0);
1306 	if (IS_ERR(ft))
1307 		return PTR_ERR(ft);
1308 	tx->ft.status = ft;
1309 
1310 	err = ipsec_counter_rule_tx(mdev, tx);
1311 	if (err)
1312 		goto err_status_rule;
1313 
1314 	if (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_TUNNEL)
1315 		tx->allow_tunnel_mode =
1316 			mlx5_eswitch_block_encap(mdev, tx == ipsec->tx_esw);
1317 
1318 	if (tx->allow_tunnel_mode)
1319 		flags = MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
1320 	ft = ipsec_ft_create(tx->ns, attr.sa_level, attr.prio, 1, 4, flags);
1321 	if (IS_ERR(ft)) {
1322 		err = PTR_ERR(ft);
1323 		goto err_sa_ft;
1324 	}
1325 	tx->ft.sa = ft;
1326 
1327 	if (tx == ipsec->tx_esw) {
1328 		dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
1329 		dest.vport.num = MLX5_VPORT_UPLINK;
1330 		err = ipsec_miss_create(mdev, tx->ft.sa, &tx->sa, &dest);
1331 		if (err)
1332 			goto err_sa_miss;
1333 		memset(&dest, 0, sizeof(dest));
1334 	}
1335 
1336 	if (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_PRIO) {
1337 		tx->chains = ipsec_chains_create(
1338 			mdev, tx->ft.sa, attr.chains_ns, attr.prio, attr.pol_level,
1339 			&tx->ft.pol);
1340 		if (IS_ERR(tx->chains)) {
1341 			err = PTR_ERR(tx->chains);
1342 			goto err_pol_ft;
1343 		}
1344 
1345 		goto connect_roce;
1346 	}
1347 
1348 	ft = ipsec_ft_create(tx->ns, attr.pol_level, attr.prio, 1, 2, 0);
1349 	if (IS_ERR(ft)) {
1350 		err = PTR_ERR(ft);
1351 		goto err_pol_ft;
1352 	}
1353 	tx->ft.pol = ft;
1354 	dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
1355 	dest.ft = tx->ft.sa;
1356 	err = ipsec_miss_create(mdev, tx->ft.pol, &tx->pol, &dest);
1357 	if (err) {
1358 		mlx5_destroy_flow_table(tx->ft.pol);
1359 		goto err_pol_ft;
1360 	}
1361 
1362 connect_roce:
1363 	err = mlx5_ipsec_fs_roce_tx_create(mdev, roce, tx->ft.pol, false);
1364 	if (err)
1365 		goto err_roce;
1366 	return 0;
1367 
1368 err_roce:
1369 	if (tx->chains) {
1370 		ipsec_chains_destroy(tx->chains);
1371 	} else {
1372 		mlx5_del_flow_rules(tx->pol.rule);
1373 		mlx5_destroy_flow_group(tx->pol.group);
1374 		mlx5_destroy_flow_table(tx->ft.pol);
1375 	}
1376 err_pol_ft:
1377 	if (tx == ipsec->tx_esw) {
1378 		mlx5_del_flow_rules(tx->sa.rule);
1379 		mlx5_destroy_flow_group(tx->sa.group);
1380 	}
1381 err_sa_miss:
1382 	mlx5_destroy_flow_table(tx->ft.sa);
1383 err_sa_ft:
1384 	if (tx->allow_tunnel_mode)
1385 		mlx5_eswitch_unblock_encap(mdev);
1386 	mlx5_del_flow_rules(tx->status.rule);
1387 err_status_rule:
1388 	mlx5_destroy_flow_table(tx->ft.status);
1389 	return err;
1390 }
1391 
ipsec_esw_tx_ft_policy_set(struct mlx5_core_dev * mdev,struct mlx5_flow_table * ft)1392 static void ipsec_esw_tx_ft_policy_set(struct mlx5_core_dev *mdev,
1393 				       struct mlx5_flow_table *ft)
1394 {
1395 #ifdef CONFIG_MLX5_ESWITCH
1396 	struct mlx5_eswitch *esw = mdev->priv.eswitch;
1397 	struct mlx5e_rep_priv *uplink_rpriv;
1398 	struct mlx5e_priv *priv;
1399 
1400 	esw->offloads.ft_ipsec_tx_pol = ft;
1401 	uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
1402 	priv = netdev_priv(uplink_rpriv->netdev);
1403 	if (!priv->channels.num)
1404 		return;
1405 
1406 	mlx5e_rep_deactivate_channels(priv);
1407 	mlx5e_rep_activate_channels(priv);
1408 #endif
1409 }
1410 
tx_get(struct mlx5_core_dev * mdev,struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_tx * tx)1411 static int tx_get(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
1412 		  struct mlx5e_ipsec_tx *tx)
1413 {
1414 	int err;
1415 
1416 	if (tx->ft.refcnt)
1417 		goto skip;
1418 
1419 	err = mlx5_eswitch_block_mode(mdev);
1420 	if (err)
1421 		return err;
1422 
1423 	err = tx_create(ipsec, tx, ipsec->roce);
1424 	if (err) {
1425 		mlx5_eswitch_unblock_mode(mdev);
1426 		return err;
1427 	}
1428 
1429 	if (tx == ipsec->tx_esw)
1430 		ipsec_esw_tx_ft_policy_set(mdev, tx->ft.pol);
1431 
1432 skip:
1433 	tx->ft.refcnt++;
1434 	return 0;
1435 }
1436 
tx_put(struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_tx * tx)1437 static void tx_put(struct mlx5e_ipsec *ipsec, struct mlx5e_ipsec_tx *tx)
1438 {
1439 	if (--tx->ft.refcnt)
1440 		return;
1441 
1442 	if (tx == ipsec->tx_esw) {
1443 		mlx5_esw_ipsec_restore_dest_uplink(ipsec->mdev);
1444 		ipsec_esw_tx_ft_policy_set(ipsec->mdev, NULL);
1445 	}
1446 
1447 	tx_destroy(ipsec, tx, ipsec->roce);
1448 	mlx5_eswitch_unblock_mode(ipsec->mdev);
1449 }
1450 
tx_ft_get_policy(struct mlx5_core_dev * mdev,struct mlx5e_ipsec * ipsec,u32 prio,int type)1451 static struct mlx5_flow_table *tx_ft_get_policy(struct mlx5_core_dev *mdev,
1452 						struct mlx5e_ipsec *ipsec,
1453 						u32 prio, int type)
1454 {
1455 	struct mlx5e_ipsec_tx *tx = ipsec_tx(ipsec, type);
1456 	struct mlx5_flow_table *ft;
1457 	int err;
1458 
1459 	mutex_lock(&tx->ft.mutex);
1460 	err = tx_get(mdev, ipsec, tx);
1461 	if (err)
1462 		goto err_get;
1463 
1464 	ft = tx->chains ? ipsec_chains_get_table(tx->chains, prio) : tx->ft.pol;
1465 	if (IS_ERR(ft)) {
1466 		err = PTR_ERR(ft);
1467 		goto err_get_ft;
1468 	}
1469 
1470 	mutex_unlock(&tx->ft.mutex);
1471 	return ft;
1472 
1473 err_get_ft:
1474 	tx_put(ipsec, tx);
1475 err_get:
1476 	mutex_unlock(&tx->ft.mutex);
1477 	return ERR_PTR(err);
1478 }
1479 
tx_ft_get(struct mlx5_core_dev * mdev,struct mlx5e_ipsec * ipsec,int type)1480 static struct mlx5e_ipsec_tx *tx_ft_get(struct mlx5_core_dev *mdev,
1481 					struct mlx5e_ipsec *ipsec, int type)
1482 {
1483 	struct mlx5e_ipsec_tx *tx = ipsec_tx(ipsec, type);
1484 	int err;
1485 
1486 	mutex_lock(&tx->ft.mutex);
1487 	err = tx_get(mdev, ipsec, tx);
1488 	mutex_unlock(&tx->ft.mutex);
1489 	if (err)
1490 		return ERR_PTR(err);
1491 
1492 	return tx;
1493 }
1494 
tx_ft_put(struct mlx5e_ipsec * ipsec,int type)1495 static void tx_ft_put(struct mlx5e_ipsec *ipsec, int type)
1496 {
1497 	struct mlx5e_ipsec_tx *tx = ipsec_tx(ipsec, type);
1498 
1499 	mutex_lock(&tx->ft.mutex);
1500 	tx_put(ipsec, tx);
1501 	mutex_unlock(&tx->ft.mutex);
1502 }
1503 
tx_ft_put_policy(struct mlx5e_ipsec * ipsec,u32 prio,int type)1504 static void tx_ft_put_policy(struct mlx5e_ipsec *ipsec, u32 prio, int type)
1505 {
1506 	struct mlx5e_ipsec_tx *tx = ipsec_tx(ipsec, type);
1507 
1508 	mutex_lock(&tx->ft.mutex);
1509 	if (tx->chains)
1510 		ipsec_chains_put_table(tx->chains, prio);
1511 
1512 	tx_put(ipsec, tx);
1513 	mutex_unlock(&tx->ft.mutex);
1514 }
1515 
setup_fte_addr4(struct mlx5_flow_spec * spec,struct mlx5e_ipsec_addr * addrs)1516 static void setup_fte_addr4(struct mlx5_flow_spec *spec,
1517 			    struct mlx5e_ipsec_addr *addrs)
1518 {
1519 	__be32 *saddr = &addrs->saddr.a4;
1520 	__be32 *smask = &addrs->smask.m4;
1521 	__be32 *daddr = &addrs->daddr.a4;
1522 	__be32 *dmask = &addrs->dmask.m4;
1523 
1524 	if (!*saddr && !*daddr)
1525 		return;
1526 
1527 	spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
1528 
1529 	MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_version);
1530 	MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_version, 4);
1531 
1532 	if (*saddr) {
1533 		memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
1534 				    outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4), saddr, 4);
1535 		memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1536 				    outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4), smask, 4);
1537 	}
1538 
1539 	if (*daddr) {
1540 		memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
1541 				    outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4), daddr, 4);
1542 		memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1543 				    outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4), dmask, 4);
1544 	}
1545 }
1546 
setup_fte_addr6(struct mlx5_flow_spec * spec,struct mlx5e_ipsec_addr * addrs)1547 static void setup_fte_addr6(struct mlx5_flow_spec *spec,
1548 			    struct mlx5e_ipsec_addr *addrs)
1549 {
1550 	__be32 *saddr = addrs->saddr.a6;
1551 	__be32 *smask = addrs->smask.m6;
1552 	__be32 *daddr = addrs->daddr.a6;
1553 	__be32 *dmask = addrs->dmask.m6;
1554 
1555 	if (addr6_all_zero(saddr) && addr6_all_zero(daddr))
1556 		return;
1557 
1558 	spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
1559 
1560 	MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_version);
1561 	MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_version, 6);
1562 
1563 	if (!addr6_all_zero(saddr)) {
1564 		memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
1565 				    outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6), saddr, 16);
1566 		memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1567 				    outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6), dmask, 16);
1568 	}
1569 
1570 	if (!addr6_all_zero(daddr)) {
1571 		memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
1572 				    outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6), daddr, 16);
1573 		memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1574 				    outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6), smask, 16);
1575 	}
1576 }
1577 
setup_fte_esp(struct mlx5_flow_spec * spec)1578 static void setup_fte_esp(struct mlx5_flow_spec *spec)
1579 {
1580 	/* ESP header */
1581 	spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
1582 
1583 	MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_protocol);
1584 	MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_protocol, IPPROTO_ESP);
1585 }
1586 
setup_fte_spi(struct mlx5_flow_spec * spec,u32 spi,bool encap)1587 static void setup_fte_spi(struct mlx5_flow_spec *spec, u32 spi, bool encap)
1588 {
1589 	/* SPI number */
1590 	spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
1591 
1592 	if (encap) {
1593 		MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
1594 				 misc_parameters.inner_esp_spi);
1595 		MLX5_SET(fte_match_param, spec->match_value,
1596 			 misc_parameters.inner_esp_spi, spi);
1597 	} else {
1598 		MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
1599 				 misc_parameters.outer_esp_spi);
1600 		MLX5_SET(fte_match_param, spec->match_value,
1601 			 misc_parameters.outer_esp_spi, spi);
1602 	}
1603 }
1604 
setup_fte_no_frags(struct mlx5_flow_spec * spec)1605 static void setup_fte_no_frags(struct mlx5_flow_spec *spec)
1606 {
1607 	/* Non fragmented */
1608 	spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
1609 
1610 	MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.frag);
1611 	MLX5_SET(fte_match_param, spec->match_value, outer_headers.frag, 0);
1612 }
1613 
setup_fte_reg_a(struct mlx5_flow_spec * spec)1614 static void setup_fte_reg_a(struct mlx5_flow_spec *spec)
1615 {
1616 	/* Add IPsec indicator in metadata_reg_a */
1617 	spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
1618 
1619 	MLX5_SET(fte_match_param, spec->match_criteria,
1620 		 misc_parameters_2.metadata_reg_a, MLX5_ETH_WQE_FT_META_IPSEC);
1621 	MLX5_SET(fte_match_param, spec->match_value,
1622 		 misc_parameters_2.metadata_reg_a, MLX5_ETH_WQE_FT_META_IPSEC);
1623 }
1624 
setup_fte_reg_c4(struct mlx5_flow_spec * spec,u32 reqid)1625 static void setup_fte_reg_c4(struct mlx5_flow_spec *spec, u32 reqid)
1626 {
1627 	/* Pass policy check before choosing this SA */
1628 	spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
1629 
1630 	MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
1631 			 misc_parameters_2.metadata_reg_c_4);
1632 	MLX5_SET(fte_match_param, spec->match_value,
1633 		 misc_parameters_2.metadata_reg_c_4, reqid);
1634 }
1635 
setup_fte_upper_proto_match(struct mlx5_flow_spec * spec,struct upspec * upspec)1636 static void setup_fte_upper_proto_match(struct mlx5_flow_spec *spec, struct upspec *upspec)
1637 {
1638 	switch (upspec->proto) {
1639 	case IPPROTO_UDP:
1640 		if (upspec->dport) {
1641 			MLX5_SET(fte_match_set_lyr_2_4, spec->match_criteria,
1642 				 udp_dport, upspec->dport_mask);
1643 			MLX5_SET(fte_match_set_lyr_2_4, spec->match_value,
1644 				 udp_dport, upspec->dport);
1645 		}
1646 		if (upspec->sport) {
1647 			MLX5_SET(fte_match_set_lyr_2_4, spec->match_criteria,
1648 				 udp_sport, upspec->sport_mask);
1649 			MLX5_SET(fte_match_set_lyr_2_4, spec->match_value,
1650 				 udp_sport, upspec->sport);
1651 		}
1652 		break;
1653 	case IPPROTO_TCP:
1654 		if (upspec->dport) {
1655 			MLX5_SET(fte_match_set_lyr_2_4, spec->match_criteria,
1656 				 tcp_dport, upspec->dport_mask);
1657 			MLX5_SET(fte_match_set_lyr_2_4, spec->match_value,
1658 				 tcp_dport, upspec->dport);
1659 		}
1660 		if (upspec->sport) {
1661 			MLX5_SET(fte_match_set_lyr_2_4, spec->match_criteria,
1662 				 tcp_sport, upspec->sport_mask);
1663 			MLX5_SET(fte_match_set_lyr_2_4, spec->match_value,
1664 				 tcp_sport, upspec->sport);
1665 		}
1666 		break;
1667 	default:
1668 		return;
1669 	}
1670 
1671 	spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
1672 	MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, spec->match_criteria, ip_protocol);
1673 	MLX5_SET(fte_match_set_lyr_2_4, spec->match_value, ip_protocol, upspec->proto);
1674 }
1675 
ipsec_fs_get_ns(struct mlx5e_ipsec * ipsec,int type,u8 dir)1676 static enum mlx5_flow_namespace_type ipsec_fs_get_ns(struct mlx5e_ipsec *ipsec,
1677 						     int type, u8 dir)
1678 {
1679 	if (ipsec->is_uplink_rep && type == XFRM_DEV_OFFLOAD_PACKET)
1680 		return MLX5_FLOW_NAMESPACE_FDB;
1681 
1682 	if (dir == XFRM_DEV_OFFLOAD_IN)
1683 		return MLX5_FLOW_NAMESPACE_KERNEL;
1684 
1685 	return MLX5_FLOW_NAMESPACE_EGRESS;
1686 }
1687 
setup_modify_header(struct mlx5e_ipsec * ipsec,int type,u32 val,u8 dir,struct mlx5_flow_act * flow_act)1688 static int setup_modify_header(struct mlx5e_ipsec *ipsec, int type, u32 val, u8 dir,
1689 			       struct mlx5_flow_act *flow_act)
1690 {
1691 	enum mlx5_flow_namespace_type ns_type = ipsec_fs_get_ns(ipsec, type, dir);
1692 	u8 action[3][MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {};
1693 	struct mlx5_core_dev *mdev = ipsec->mdev;
1694 	struct mlx5_modify_hdr *modify_hdr;
1695 	u8 num_of_actions = 1;
1696 
1697 	MLX5_SET(set_action_in, action[0], action_type, MLX5_ACTION_TYPE_SET);
1698 	switch (dir) {
1699 	case XFRM_DEV_OFFLOAD_IN:
1700 		MLX5_SET(set_action_in, action[0], field,
1701 			 MLX5_ACTION_IN_FIELD_METADATA_REG_B);
1702 
1703 		num_of_actions++;
1704 		MLX5_SET(set_action_in, action[1], action_type, MLX5_ACTION_TYPE_SET);
1705 		MLX5_SET(set_action_in, action[1], field, MLX5_ACTION_IN_FIELD_METADATA_REG_C_2);
1706 		MLX5_SET(set_action_in, action[1], data, val);
1707 		MLX5_SET(set_action_in, action[1], offset, 0);
1708 		MLX5_SET(set_action_in, action[1], length, 32);
1709 
1710 		if (type == XFRM_DEV_OFFLOAD_CRYPTO) {
1711 			num_of_actions++;
1712 			MLX5_SET(set_action_in, action[2], action_type,
1713 				 MLX5_ACTION_TYPE_SET);
1714 			MLX5_SET(set_action_in, action[2], field,
1715 				 MLX5_ACTION_IN_FIELD_METADATA_REG_C_4);
1716 			MLX5_SET(set_action_in, action[2], data,
1717 				 MLX5_IPSEC_ASO_SW_CRYPTO_OFFLOAD);
1718 			MLX5_SET(set_action_in, action[2], offset, 0);
1719 			MLX5_SET(set_action_in, action[2], length, 32);
1720 		}
1721 		break;
1722 	case XFRM_DEV_OFFLOAD_OUT:
1723 		MLX5_SET(set_action_in, action[0], field,
1724 			 MLX5_ACTION_IN_FIELD_METADATA_REG_C_4);
1725 		break;
1726 	default:
1727 		return -EINVAL;
1728 	}
1729 
1730 	MLX5_SET(set_action_in, action[0], data, val);
1731 	MLX5_SET(set_action_in, action[0], offset, 0);
1732 	MLX5_SET(set_action_in, action[0], length, 32);
1733 
1734 	modify_hdr = mlx5_modify_header_alloc(mdev, ns_type, num_of_actions, action);
1735 	if (IS_ERR(modify_hdr)) {
1736 		mlx5_core_err(mdev, "Failed to allocate modify_header %pe\n",
1737 			      modify_hdr);
1738 		return PTR_ERR(modify_hdr);
1739 	}
1740 
1741 	flow_act->modify_hdr = modify_hdr;
1742 	flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
1743 	return 0;
1744 }
1745 
1746 static int
setup_pkt_tunnel_reformat(struct mlx5_core_dev * mdev,struct mlx5_accel_esp_xfrm_attrs * attrs,struct mlx5_pkt_reformat_params * reformat_params)1747 setup_pkt_tunnel_reformat(struct mlx5_core_dev *mdev,
1748 			  struct mlx5_accel_esp_xfrm_attrs *attrs,
1749 			  struct mlx5_pkt_reformat_params *reformat_params)
1750 {
1751 	struct ip_esp_hdr *esp_hdr;
1752 	struct ipv6hdr *ipv6hdr;
1753 	struct ethhdr *eth_hdr;
1754 	struct iphdr *iphdr;
1755 	char *reformatbf;
1756 	size_t bfflen;
1757 	void *hdr;
1758 
1759 	bfflen = sizeof(*eth_hdr);
1760 
1761 	if (attrs->dir == XFRM_DEV_OFFLOAD_OUT) {
1762 		bfflen += sizeof(*esp_hdr) + 8;
1763 
1764 		switch (attrs->addrs.family) {
1765 		case AF_INET:
1766 			bfflen += sizeof(*iphdr);
1767 			break;
1768 		case AF_INET6:
1769 			bfflen += sizeof(*ipv6hdr);
1770 			break;
1771 		default:
1772 			return -EINVAL;
1773 		}
1774 	}
1775 
1776 	reformatbf = kzalloc(bfflen, GFP_KERNEL);
1777 	if (!reformatbf)
1778 		return -ENOMEM;
1779 
1780 	eth_hdr = (struct ethhdr *)reformatbf;
1781 	switch (attrs->addrs.family) {
1782 	case AF_INET:
1783 		eth_hdr->h_proto = htons(ETH_P_IP);
1784 		break;
1785 	case AF_INET6:
1786 		eth_hdr->h_proto = htons(ETH_P_IPV6);
1787 		break;
1788 	default:
1789 		goto free_reformatbf;
1790 	}
1791 
1792 	ether_addr_copy(eth_hdr->h_dest, attrs->dmac);
1793 	ether_addr_copy(eth_hdr->h_source, attrs->smac);
1794 
1795 	switch (attrs->dir) {
1796 	case XFRM_DEV_OFFLOAD_IN:
1797 		reformat_params->type = MLX5_REFORMAT_TYPE_L3_ESP_TUNNEL_TO_L2;
1798 		break;
1799 	case XFRM_DEV_OFFLOAD_OUT:
1800 		reformat_params->type = MLX5_REFORMAT_TYPE_L2_TO_L3_ESP_TUNNEL;
1801 		reformat_params->param_0 = attrs->authsize;
1802 
1803 		hdr = reformatbf + sizeof(*eth_hdr);
1804 		switch (attrs->addrs.family) {
1805 		case AF_INET:
1806 			iphdr = (struct iphdr *)hdr;
1807 			memcpy(&iphdr->saddr, &attrs->addrs.saddr.a4, 4);
1808 			memcpy(&iphdr->daddr, &attrs->addrs.daddr.a4, 4);
1809 			iphdr->version = 4;
1810 			iphdr->ihl = 5;
1811 			iphdr->ttl = IPSEC_TUNNEL_DEFAULT_TTL;
1812 			iphdr->protocol = IPPROTO_ESP;
1813 			hdr += sizeof(*iphdr);
1814 			break;
1815 		case AF_INET6:
1816 			ipv6hdr = (struct ipv6hdr *)hdr;
1817 			memcpy(&ipv6hdr->saddr, &attrs->addrs.saddr.a6, 16);
1818 			memcpy(&ipv6hdr->daddr, &attrs->addrs.daddr.a6, 16);
1819 			ipv6hdr->nexthdr = IPPROTO_ESP;
1820 			ipv6hdr->version = 6;
1821 			ipv6hdr->hop_limit = IPSEC_TUNNEL_DEFAULT_TTL;
1822 			hdr += sizeof(*ipv6hdr);
1823 			break;
1824 		default:
1825 			goto free_reformatbf;
1826 		}
1827 
1828 		esp_hdr = (struct ip_esp_hdr *)hdr;
1829 		esp_hdr->spi = htonl(attrs->spi);
1830 		break;
1831 	default:
1832 		goto free_reformatbf;
1833 	}
1834 
1835 	reformat_params->size = bfflen;
1836 	reformat_params->data = reformatbf;
1837 	return 0;
1838 
1839 free_reformatbf:
1840 	kfree(reformatbf);
1841 	return -EINVAL;
1842 }
1843 
get_reformat_type(struct mlx5_accel_esp_xfrm_attrs * attrs)1844 static int get_reformat_type(struct mlx5_accel_esp_xfrm_attrs *attrs)
1845 {
1846 	switch (attrs->dir) {
1847 	case XFRM_DEV_OFFLOAD_IN:
1848 		if (attrs->encap)
1849 			return MLX5_REFORMAT_TYPE_DEL_ESP_TRANSPORT_OVER_UDP;
1850 		return MLX5_REFORMAT_TYPE_DEL_ESP_TRANSPORT;
1851 	case XFRM_DEV_OFFLOAD_OUT:
1852 		if (attrs->addrs.family == AF_INET) {
1853 			if (attrs->encap)
1854 				return MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_OVER_UDPV4;
1855 			return MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_OVER_IPV4;
1856 		}
1857 
1858 		if (attrs->encap)
1859 			return MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_OVER_UDPV6;
1860 		return MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_OVER_IPV6;
1861 	default:
1862 		WARN_ON(true);
1863 	}
1864 
1865 	return -EINVAL;
1866 }
1867 
1868 static int
setup_pkt_transport_reformat(struct mlx5_accel_esp_xfrm_attrs * attrs,struct mlx5_pkt_reformat_params * reformat_params)1869 setup_pkt_transport_reformat(struct mlx5_accel_esp_xfrm_attrs *attrs,
1870 			     struct mlx5_pkt_reformat_params *reformat_params)
1871 {
1872 	struct udphdr *udphdr;
1873 	char *reformatbf;
1874 	size_t bfflen;
1875 	__be32 spi;
1876 	void *hdr;
1877 
1878 	reformat_params->type = get_reformat_type(attrs);
1879 	if (reformat_params->type < 0)
1880 		return reformat_params->type;
1881 
1882 	switch (attrs->dir) {
1883 	case XFRM_DEV_OFFLOAD_IN:
1884 		break;
1885 	case XFRM_DEV_OFFLOAD_OUT:
1886 		bfflen = MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_SIZE;
1887 		if (attrs->encap)
1888 			bfflen += sizeof(*udphdr);
1889 
1890 		reformatbf = kzalloc(bfflen, GFP_KERNEL);
1891 		if (!reformatbf)
1892 			return -ENOMEM;
1893 
1894 		hdr = reformatbf;
1895 		if (attrs->encap) {
1896 			udphdr = (struct udphdr *)reformatbf;
1897 			udphdr->source = attrs->sport;
1898 			udphdr->dest = attrs->dport;
1899 			hdr += sizeof(*udphdr);
1900 		}
1901 
1902 		/* convert to network format */
1903 		spi = htonl(attrs->spi);
1904 		memcpy(hdr, &spi, sizeof(spi));
1905 
1906 		reformat_params->param_0 = attrs->authsize;
1907 		reformat_params->size = bfflen;
1908 		reformat_params->data = reformatbf;
1909 		break;
1910 	default:
1911 		return -EINVAL;
1912 	}
1913 
1914 	return 0;
1915 }
1916 
setup_pkt_reformat(struct mlx5e_ipsec * ipsec,struct mlx5_accel_esp_xfrm_attrs * attrs,struct mlx5_flow_act * flow_act)1917 static int setup_pkt_reformat(struct mlx5e_ipsec *ipsec,
1918 			      struct mlx5_accel_esp_xfrm_attrs *attrs,
1919 			      struct mlx5_flow_act *flow_act)
1920 {
1921 	enum mlx5_flow_namespace_type ns_type = ipsec_fs_get_ns(ipsec, attrs->type,
1922 								attrs->dir);
1923 	struct mlx5_pkt_reformat_params reformat_params = {};
1924 	struct mlx5_core_dev *mdev = ipsec->mdev;
1925 	struct mlx5_pkt_reformat *pkt_reformat;
1926 	int ret;
1927 
1928 	switch (attrs->mode) {
1929 	case XFRM_MODE_TRANSPORT:
1930 		ret = setup_pkt_transport_reformat(attrs, &reformat_params);
1931 		break;
1932 	case XFRM_MODE_TUNNEL:
1933 		ret = setup_pkt_tunnel_reformat(mdev, attrs, &reformat_params);
1934 		break;
1935 	default:
1936 		ret = -EINVAL;
1937 	}
1938 
1939 	if (ret)
1940 		return ret;
1941 
1942 	pkt_reformat =
1943 		mlx5_packet_reformat_alloc(mdev, &reformat_params, ns_type);
1944 	kfree(reformat_params.data);
1945 	if (IS_ERR(pkt_reformat))
1946 		return PTR_ERR(pkt_reformat);
1947 
1948 	flow_act->pkt_reformat = pkt_reformat;
1949 	flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
1950 	return 0;
1951 }
1952 
rx_add_rule_sa_selector(struct mlx5e_ipsec_sa_entry * sa_entry,struct mlx5e_ipsec_rx * rx,struct upspec * upspec)1953 static int rx_add_rule_sa_selector(struct mlx5e_ipsec_sa_entry *sa_entry,
1954 				   struct mlx5e_ipsec_rx *rx,
1955 				   struct upspec *upspec)
1956 {
1957 	struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
1958 	struct mlx5_core_dev *mdev = ipsec->mdev;
1959 	struct mlx5_flow_destination dest[2];
1960 	struct mlx5_flow_act flow_act = {};
1961 	struct mlx5_flow_handle *rule;
1962 	struct mlx5_flow_spec *spec;
1963 	int err = 0;
1964 
1965 	spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
1966 	if (!spec)
1967 		return -ENOMEM;
1968 
1969 	MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
1970 			 misc_parameters_2.ipsec_syndrome);
1971 	MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
1972 			 misc_parameters_2.metadata_reg_c_4);
1973 	MLX5_SET(fte_match_param, spec->match_value,
1974 		 misc_parameters_2.ipsec_syndrome, 0);
1975 	MLX5_SET(fte_match_param, spec->match_value,
1976 		 misc_parameters_2.metadata_reg_c_4, 0);
1977 	spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
1978 
1979 	ipsec_rx_rule_add_match_obj(sa_entry, rx, spec);
1980 
1981 	flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
1982 			  MLX5_FLOW_CONTEXT_ACTION_COUNT;
1983 	flow_act.flags = FLOW_ACT_IGNORE_FLOW_LEVEL;
1984 	dest[0].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
1985 	dest[0].ft = rx->ft.sa_sel;
1986 	dest[1].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
1987 	dest[1].counter = rx->fc->cnt;
1988 
1989 	rule = mlx5_add_flow_rules(rx->ft.status, spec, &flow_act, dest, 2);
1990 	if (IS_ERR(rule)) {
1991 		err = PTR_ERR(rule);
1992 		mlx5_core_err(mdev,
1993 			      "Failed to add ipsec rx pass rule, err=%d\n",
1994 			      err);
1995 		goto err_add_status_pass_rule;
1996 	}
1997 
1998 	sa_entry->ipsec_rule.status_pass = rule;
1999 
2000 	MLX5_SET(fte_match_param, spec->match_criteria,
2001 		 misc_parameters_2.ipsec_syndrome, 0);
2002 	MLX5_SET(fte_match_param, spec->match_criteria,
2003 		 misc_parameters_2.metadata_reg_c_4, 0);
2004 
2005 	setup_fte_upper_proto_match(spec, upspec);
2006 
2007 	flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
2008 	dest[0].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
2009 	dest[0].ft = rx->ft.pol;
2010 
2011 	rule = mlx5_add_flow_rules(rx->ft.sa_sel, spec, &flow_act, &dest[0], 1);
2012 	if (IS_ERR(rule)) {
2013 		err = PTR_ERR(rule);
2014 		mlx5_core_err(mdev,
2015 			      "Failed to add ipsec rx sa selector rule, err=%d\n",
2016 			      err);
2017 		goto err_add_sa_sel_rule;
2018 	}
2019 
2020 	sa_entry->ipsec_rule.sa_sel = rule;
2021 
2022 	kvfree(spec);
2023 	return 0;
2024 
2025 err_add_sa_sel_rule:
2026 	mlx5_del_flow_rules(sa_entry->ipsec_rule.status_pass);
2027 err_add_status_pass_rule:
2028 	kvfree(spec);
2029 	return err;
2030 }
2031 
rx_add_rule(struct mlx5e_ipsec_sa_entry * sa_entry)2032 static int rx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
2033 {
2034 	struct mlx5_accel_esp_xfrm_attrs *attrs = &sa_entry->attrs;
2035 	struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry);
2036 	struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
2037 	struct mlx5_flow_destination dest[2];
2038 	struct mlx5_flow_act flow_act = {};
2039 	struct mlx5_flow_handle *rule;
2040 	struct mlx5_flow_spec *spec;
2041 	struct mlx5e_ipsec_rx *rx;
2042 	struct mlx5_fc *counter;
2043 	int err = 0;
2044 
2045 	rx = rx_ft_get(mdev, ipsec, attrs->addrs.family, attrs->type);
2046 	if (IS_ERR(rx))
2047 		return PTR_ERR(rx);
2048 
2049 	spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
2050 	if (!spec) {
2051 		err = -ENOMEM;
2052 		goto err_alloc;
2053 	}
2054 
2055 	if (attrs->addrs.family == AF_INET)
2056 		setup_fte_addr4(spec, &attrs->addrs);
2057 	else
2058 		setup_fte_addr6(spec, &attrs->addrs);
2059 
2060 	setup_fte_spi(spec, attrs->spi, attrs->encap);
2061 	if (!attrs->encap)
2062 		setup_fte_esp(spec);
2063 	setup_fte_no_frags(spec);
2064 
2065 	if (!attrs->drop) {
2066 		if (rx != ipsec->rx_esw)
2067 			err = setup_modify_header(ipsec, attrs->type,
2068 						  sa_entry->ipsec_obj_id | BIT(31),
2069 						  XFRM_DEV_OFFLOAD_IN, &flow_act);
2070 		else
2071 			err = mlx5_esw_ipsec_rx_setup_modify_header(sa_entry, &flow_act);
2072 
2073 		if (err)
2074 			goto err_mod_header;
2075 	}
2076 
2077 	switch (attrs->type) {
2078 	case XFRM_DEV_OFFLOAD_PACKET:
2079 		err = setup_pkt_reformat(ipsec, attrs, &flow_act);
2080 		if (err)
2081 			goto err_pkt_reformat;
2082 		break;
2083 	default:
2084 		break;
2085 	}
2086 
2087 	counter = mlx5_fc_create(mdev, true);
2088 	if (IS_ERR(counter)) {
2089 		err = PTR_ERR(counter);
2090 		goto err_add_cnt;
2091 	}
2092 	flow_act.crypto.type = MLX5_FLOW_CONTEXT_ENCRYPT_DECRYPT_TYPE_IPSEC;
2093 	flow_act.crypto.obj_id = sa_entry->ipsec_obj_id;
2094 	flow_act.flags |= FLOW_ACT_NO_APPEND;
2095 	flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_CRYPTO_DECRYPT |
2096 			   MLX5_FLOW_CONTEXT_ACTION_COUNT;
2097 	if (attrs->drop)
2098 		flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
2099 	else
2100 		flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
2101 	dest[0].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
2102 	dest[0].ft = rx->ft.status;
2103 	dest[1].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
2104 	dest[1].counter = counter;
2105 	rule = mlx5_add_flow_rules(rx->ft.sa, spec, &flow_act, dest, 2);
2106 	if (IS_ERR(rule)) {
2107 		err = PTR_ERR(rule);
2108 		mlx5_core_err(mdev, "fail to add RX ipsec rule err=%d\n", err);
2109 		goto err_add_flow;
2110 	}
2111 
2112 	if (attrs->upspec.proto && attrs->type == XFRM_DEV_OFFLOAD_PACKET) {
2113 		err = rx_add_rule_sa_selector(sa_entry, rx, &attrs->upspec);
2114 		if (err)
2115 			goto err_add_sa_sel;
2116 	}
2117 
2118 	if (attrs->type == XFRM_DEV_OFFLOAD_PACKET)
2119 		err = rx_add_rule_drop_replay(sa_entry, rx);
2120 	if (err)
2121 		goto err_add_replay;
2122 
2123 	err = rx_add_rule_drop_auth_trailer(sa_entry, rx);
2124 	if (err)
2125 		goto err_drop_reason;
2126 
2127 	kvfree(spec);
2128 
2129 	sa_entry->ipsec_rule.rule = rule;
2130 	sa_entry->ipsec_rule.modify_hdr = flow_act.modify_hdr;
2131 	sa_entry->ipsec_rule.fc = counter;
2132 	sa_entry->ipsec_rule.pkt_reformat = flow_act.pkt_reformat;
2133 	return 0;
2134 
2135 err_drop_reason:
2136 	if (sa_entry->ipsec_rule.replay.rule) {
2137 		mlx5_del_flow_rules(sa_entry->ipsec_rule.replay.rule);
2138 		mlx5_fc_destroy(mdev, sa_entry->ipsec_rule.replay.fc);
2139 	}
2140 err_add_replay:
2141 	if (sa_entry->ipsec_rule.sa_sel) {
2142 		mlx5_del_flow_rules(sa_entry->ipsec_rule.sa_sel);
2143 		mlx5_del_flow_rules(sa_entry->ipsec_rule.status_pass);
2144 	}
2145 err_add_sa_sel:
2146 	mlx5_del_flow_rules(rule);
2147 err_add_flow:
2148 	mlx5_fc_destroy(mdev, counter);
2149 err_add_cnt:
2150 	if (flow_act.pkt_reformat)
2151 		mlx5_packet_reformat_dealloc(mdev, flow_act.pkt_reformat);
2152 err_pkt_reformat:
2153 	if (flow_act.modify_hdr)
2154 		mlx5_modify_header_dealloc(mdev, flow_act.modify_hdr);
2155 err_mod_header:
2156 	kvfree(spec);
2157 err_alloc:
2158 	rx_ft_put(ipsec, attrs->addrs.family, attrs->type);
2159 	return err;
2160 }
2161 
tx_add_rule(struct mlx5e_ipsec_sa_entry * sa_entry)2162 static int tx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
2163 {
2164 	struct mlx5_accel_esp_xfrm_attrs *attrs = &sa_entry->attrs;
2165 	struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry);
2166 	struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
2167 	struct mlx5_flow_destination dest[2];
2168 	struct mlx5_flow_act flow_act = {};
2169 	struct mlx5_flow_handle *rule;
2170 	struct mlx5_flow_spec *spec;
2171 	struct mlx5e_ipsec_tx *tx;
2172 	struct mlx5_fc *counter;
2173 	int err;
2174 
2175 	tx = tx_ft_get(mdev, ipsec, attrs->type);
2176 	if (IS_ERR(tx))
2177 		return PTR_ERR(tx);
2178 
2179 	spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
2180 	if (!spec) {
2181 		err = -ENOMEM;
2182 		goto err_alloc;
2183 	}
2184 
2185 	setup_fte_no_frags(spec);
2186 	setup_fte_upper_proto_match(spec, &attrs->upspec);
2187 
2188 	switch (attrs->type) {
2189 	case XFRM_DEV_OFFLOAD_CRYPTO:
2190 		if (attrs->addrs.family == AF_INET)
2191 			setup_fte_addr4(spec, &attrs->addrs);
2192 		else
2193 			setup_fte_addr6(spec, &attrs->addrs);
2194 		setup_fte_spi(spec, attrs->spi, false);
2195 		setup_fte_esp(spec);
2196 		setup_fte_reg_a(spec);
2197 		break;
2198 	case XFRM_DEV_OFFLOAD_PACKET:
2199 		setup_fte_reg_c4(spec, attrs->reqid);
2200 		err = setup_pkt_reformat(ipsec, attrs, &flow_act);
2201 		if (err)
2202 			goto err_pkt_reformat;
2203 		break;
2204 	default:
2205 		break;
2206 	}
2207 
2208 	counter = mlx5_fc_create(mdev, true);
2209 	if (IS_ERR(counter)) {
2210 		err = PTR_ERR(counter);
2211 		goto err_add_cnt;
2212 	}
2213 
2214 	flow_act.crypto.type = MLX5_FLOW_CONTEXT_ENCRYPT_DECRYPT_TYPE_IPSEC;
2215 	flow_act.crypto.obj_id = sa_entry->ipsec_obj_id;
2216 	flow_act.flags |= FLOW_ACT_NO_APPEND;
2217 	flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_CRYPTO_ENCRYPT |
2218 			   MLX5_FLOW_CONTEXT_ACTION_COUNT;
2219 	if (attrs->drop)
2220 		flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
2221 	else
2222 		flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
2223 
2224 	dest[0].ft = tx->ft.status;
2225 	dest[0].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
2226 	dest[1].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
2227 	dest[1].counter = counter;
2228 	rule = mlx5_add_flow_rules(tx->ft.sa, spec, &flow_act, dest, 2);
2229 	if (IS_ERR(rule)) {
2230 		err = PTR_ERR(rule);
2231 		mlx5_core_err(mdev, "fail to add TX ipsec rule err=%d\n", err);
2232 		goto err_add_flow;
2233 	}
2234 
2235 	kvfree(spec);
2236 	sa_entry->ipsec_rule.rule = rule;
2237 	sa_entry->ipsec_rule.fc = counter;
2238 	sa_entry->ipsec_rule.pkt_reformat = flow_act.pkt_reformat;
2239 	return 0;
2240 
2241 err_add_flow:
2242 	mlx5_fc_destroy(mdev, counter);
2243 err_add_cnt:
2244 	if (flow_act.pkt_reformat)
2245 		mlx5_packet_reformat_dealloc(mdev, flow_act.pkt_reformat);
2246 err_pkt_reformat:
2247 	kvfree(spec);
2248 err_alloc:
2249 	tx_ft_put(ipsec, attrs->type);
2250 	return err;
2251 }
2252 
tx_add_policy(struct mlx5e_ipsec_pol_entry * pol_entry)2253 static int tx_add_policy(struct mlx5e_ipsec_pol_entry *pol_entry)
2254 {
2255 	struct mlx5_accel_pol_xfrm_attrs *attrs = &pol_entry->attrs;
2256 	struct mlx5_core_dev *mdev = mlx5e_ipsec_pol2dev(pol_entry);
2257 	struct mlx5e_ipsec *ipsec = pol_entry->ipsec;
2258 	struct mlx5_flow_destination dest[2] = {};
2259 	struct mlx5_flow_act flow_act = {};
2260 	struct mlx5_flow_handle *rule;
2261 	struct mlx5_flow_spec *spec;
2262 	struct mlx5_flow_table *ft;
2263 	struct mlx5e_ipsec_tx *tx;
2264 	int err, dstn = 0;
2265 
2266 	ft = tx_ft_get_policy(mdev, ipsec, attrs->prio, attrs->type);
2267 	if (IS_ERR(ft))
2268 		return PTR_ERR(ft);
2269 
2270 	spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
2271 	if (!spec) {
2272 		err = -ENOMEM;
2273 		goto err_alloc;
2274 	}
2275 
2276 	tx = ipsec_tx(ipsec, attrs->type);
2277 	if (attrs->addrs.family == AF_INET)
2278 		setup_fte_addr4(spec, &attrs->addrs);
2279 	else
2280 		setup_fte_addr6(spec, &attrs->addrs);
2281 
2282 	setup_fte_no_frags(spec);
2283 	setup_fte_upper_proto_match(spec, &attrs->upspec);
2284 
2285 	switch (attrs->action) {
2286 	case XFRM_POLICY_ALLOW:
2287 		flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
2288 		if (!attrs->reqid)
2289 			break;
2290 
2291 		err = setup_modify_header(ipsec, attrs->type, attrs->reqid,
2292 					  XFRM_DEV_OFFLOAD_OUT, &flow_act);
2293 		if (err)
2294 			goto err_mod_header;
2295 		break;
2296 	case XFRM_POLICY_BLOCK:
2297 		flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_DROP |
2298 				   MLX5_FLOW_CONTEXT_ACTION_COUNT;
2299 		dest[dstn].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
2300 		dest[dstn].counter = tx->fc->drop;
2301 		dstn++;
2302 		break;
2303 	default:
2304 		WARN_ON(true);
2305 		err = -EINVAL;
2306 		goto err_mod_header;
2307 	}
2308 
2309 	flow_act.flags |= FLOW_ACT_NO_APPEND;
2310 	if (tx == ipsec->tx_esw && tx->chains)
2311 		flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
2312 	dest[dstn].ft = tx->ft.sa;
2313 	dest[dstn].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
2314 	dstn++;
2315 	rule = mlx5_add_flow_rules(ft, spec, &flow_act, dest, dstn);
2316 	if (IS_ERR(rule)) {
2317 		err = PTR_ERR(rule);
2318 		mlx5_core_err(mdev, "fail to add TX ipsec rule err=%d\n", err);
2319 		goto err_action;
2320 	}
2321 
2322 	kvfree(spec);
2323 	pol_entry->ipsec_rule.rule = rule;
2324 	pol_entry->ipsec_rule.modify_hdr = flow_act.modify_hdr;
2325 	return 0;
2326 
2327 err_action:
2328 	if (flow_act.modify_hdr)
2329 		mlx5_modify_header_dealloc(mdev, flow_act.modify_hdr);
2330 err_mod_header:
2331 	kvfree(spec);
2332 err_alloc:
2333 	tx_ft_put_policy(ipsec, attrs->prio, attrs->type);
2334 	return err;
2335 }
2336 
rx_add_policy(struct mlx5e_ipsec_pol_entry * pol_entry)2337 static int rx_add_policy(struct mlx5e_ipsec_pol_entry *pol_entry)
2338 {
2339 	struct mlx5_accel_pol_xfrm_attrs *attrs = &pol_entry->attrs;
2340 	struct mlx5_core_dev *mdev = mlx5e_ipsec_pol2dev(pol_entry);
2341 	struct mlx5e_ipsec *ipsec = pol_entry->ipsec;
2342 	struct mlx5_flow_destination dest[2];
2343 	struct mlx5_flow_act flow_act = {};
2344 	struct mlx5_flow_handle *rule;
2345 	struct mlx5_flow_spec *spec;
2346 	struct mlx5_flow_table *ft;
2347 	struct mlx5e_ipsec_rx *rx;
2348 	int err, dstn = 0;
2349 
2350 	ft = rx_ft_get_policy(mdev, pol_entry->ipsec, attrs->addrs.family,
2351 			      attrs->prio, attrs->type);
2352 	if (IS_ERR(ft))
2353 		return PTR_ERR(ft);
2354 
2355 	rx = ipsec_rx(pol_entry->ipsec, attrs->addrs.family, attrs->type);
2356 
2357 	spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
2358 	if (!spec) {
2359 		err = -ENOMEM;
2360 		goto err_alloc;
2361 	}
2362 
2363 	if (attrs->addrs.family == AF_INET)
2364 		setup_fte_addr4(spec, &attrs->addrs);
2365 	else
2366 		setup_fte_addr6(spec, &attrs->addrs);
2367 
2368 	setup_fte_no_frags(spec);
2369 	setup_fte_upper_proto_match(spec, &attrs->upspec);
2370 
2371 	switch (attrs->action) {
2372 	case XFRM_POLICY_ALLOW:
2373 		flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
2374 		break;
2375 	case XFRM_POLICY_BLOCK:
2376 		flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT;
2377 		dest[dstn].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
2378 		dest[dstn].counter = rx->fc->drop;
2379 		dstn++;
2380 		break;
2381 	default:
2382 		WARN_ON(true);
2383 		err = -EINVAL;
2384 		goto err_action;
2385 	}
2386 
2387 	flow_act.flags |= FLOW_ACT_NO_APPEND;
2388 	if (rx == ipsec->rx_esw && rx->chains)
2389 		flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
2390 	ipsec_rx_default_dest_get(ipsec, rx, &dest[dstn]);
2391 	dstn++;
2392 	rule = mlx5_add_flow_rules(ft, spec, &flow_act, dest, dstn);
2393 	if (IS_ERR(rule)) {
2394 		err = PTR_ERR(rule);
2395 		mlx5_core_err(mdev, "Fail to add RX IPsec policy rule err=%d\n", err);
2396 		goto err_action;
2397 	}
2398 
2399 	kvfree(spec);
2400 	pol_entry->ipsec_rule.rule = rule;
2401 	return 0;
2402 
2403 err_action:
2404 	kvfree(spec);
2405 err_alloc:
2406 	rx_ft_put_policy(pol_entry->ipsec, attrs->addrs.family, attrs->prio,
2407 			 attrs->type);
2408 	return err;
2409 }
2410 
ipsec_fs_destroy_single_counter(struct mlx5_core_dev * mdev,struct mlx5e_ipsec_fc * fc)2411 static void ipsec_fs_destroy_single_counter(struct mlx5_core_dev *mdev,
2412 					    struct mlx5e_ipsec_fc *fc)
2413 {
2414 	mlx5_fc_destroy(mdev, fc->drop);
2415 	mlx5_fc_destroy(mdev, fc->cnt);
2416 	kfree(fc);
2417 }
2418 
ipsec_fs_destroy_counters(struct mlx5e_ipsec * ipsec)2419 static void ipsec_fs_destroy_counters(struct mlx5e_ipsec *ipsec)
2420 {
2421 	struct mlx5_core_dev *mdev = ipsec->mdev;
2422 
2423 	ipsec_fs_destroy_single_counter(mdev, ipsec->tx->fc);
2424 	ipsec_fs_destroy_single_counter(mdev, ipsec->rx_ipv4->fc);
2425 	if (ipsec->is_uplink_rep) {
2426 		ipsec_fs_destroy_single_counter(mdev, ipsec->tx_esw->fc);
2427 		ipsec_fs_destroy_single_counter(mdev, ipsec->rx_esw->fc);
2428 	}
2429 }
2430 
ipsec_fs_init_single_counter(struct mlx5_core_dev * mdev)2431 static struct mlx5e_ipsec_fc *ipsec_fs_init_single_counter(struct mlx5_core_dev *mdev)
2432 {
2433 	struct mlx5e_ipsec_fc *fc;
2434 	struct mlx5_fc *counter;
2435 	int err;
2436 
2437 	fc = kzalloc(sizeof(*fc), GFP_KERNEL);
2438 	if (!fc)
2439 		return ERR_PTR(-ENOMEM);
2440 
2441 	counter = mlx5_fc_create(mdev, false);
2442 	if (IS_ERR(counter)) {
2443 		err = PTR_ERR(counter);
2444 		goto err_cnt;
2445 	}
2446 	fc->cnt = counter;
2447 
2448 	counter = mlx5_fc_create(mdev, false);
2449 	if (IS_ERR(counter)) {
2450 		err = PTR_ERR(counter);
2451 		goto err_drop;
2452 	}
2453 	fc->drop = counter;
2454 
2455 	return fc;
2456 
2457 err_drop:
2458 	mlx5_fc_destroy(mdev, fc->cnt);
2459 err_cnt:
2460 	kfree(fc);
2461 	return ERR_PTR(err);
2462 }
2463 
ipsec_fs_init_counters(struct mlx5e_ipsec * ipsec)2464 static int ipsec_fs_init_counters(struct mlx5e_ipsec *ipsec)
2465 {
2466 	struct mlx5_core_dev *mdev = ipsec->mdev;
2467 	struct mlx5e_ipsec_fc *fc;
2468 	int err;
2469 
2470 	fc = ipsec_fs_init_single_counter(mdev);
2471 	if (IS_ERR(fc)) {
2472 		err = PTR_ERR(fc);
2473 		goto err_rx_cnt;
2474 	}
2475 	ipsec->rx_ipv4->fc = fc;
2476 
2477 	fc = ipsec_fs_init_single_counter(mdev);
2478 	if (IS_ERR(fc)) {
2479 		err = PTR_ERR(fc);
2480 		goto err_tx_cnt;
2481 	}
2482 	ipsec->tx->fc = fc;
2483 
2484 	if (ipsec->is_uplink_rep) {
2485 		fc = ipsec_fs_init_single_counter(mdev);
2486 		if (IS_ERR(fc)) {
2487 			err = PTR_ERR(fc);
2488 			goto err_rx_esw_cnt;
2489 		}
2490 		ipsec->rx_esw->fc = fc;
2491 
2492 		fc = ipsec_fs_init_single_counter(mdev);
2493 		if (IS_ERR(fc)) {
2494 			err = PTR_ERR(fc);
2495 			goto err_tx_esw_cnt;
2496 		}
2497 		ipsec->tx_esw->fc = fc;
2498 	}
2499 
2500 	/* Both IPv4 and IPv6 point to same flow counters struct. */
2501 	ipsec->rx_ipv6->fc = ipsec->rx_ipv4->fc;
2502 	return 0;
2503 
2504 err_tx_esw_cnt:
2505 	ipsec_fs_destroy_single_counter(mdev, ipsec->rx_esw->fc);
2506 err_rx_esw_cnt:
2507 	ipsec_fs_destroy_single_counter(mdev, ipsec->tx->fc);
2508 err_tx_cnt:
2509 	ipsec_fs_destroy_single_counter(mdev, ipsec->rx_ipv4->fc);
2510 err_rx_cnt:
2511 	return err;
2512 }
2513 
mlx5e_accel_ipsec_fs_read_stats(struct mlx5e_priv * priv,void * ipsec_stats)2514 void mlx5e_accel_ipsec_fs_read_stats(struct mlx5e_priv *priv, void *ipsec_stats)
2515 {
2516 	struct mlx5_core_dev *mdev = priv->mdev;
2517 	struct mlx5e_ipsec *ipsec = priv->ipsec;
2518 	struct mlx5e_ipsec_hw_stats *stats;
2519 	struct mlx5e_ipsec_fc *fc;
2520 	u64 packets, bytes;
2521 
2522 	stats = (struct mlx5e_ipsec_hw_stats *)ipsec_stats;
2523 
2524 	stats->ipsec_rx_pkts = 0;
2525 	stats->ipsec_rx_bytes = 0;
2526 	stats->ipsec_rx_drop_pkts = 0;
2527 	stats->ipsec_rx_drop_bytes = 0;
2528 	stats->ipsec_rx_drop_mismatch_sa_sel = 0;
2529 	stats->ipsec_tx_pkts = 0;
2530 	stats->ipsec_tx_bytes = 0;
2531 	stats->ipsec_tx_drop_pkts = 0;
2532 	stats->ipsec_tx_drop_bytes = 0;
2533 
2534 	fc = ipsec->rx_ipv4->fc;
2535 	mlx5_fc_query(mdev, fc->cnt, &stats->ipsec_rx_pkts, &stats->ipsec_rx_bytes);
2536 	mlx5_fc_query(mdev, fc->drop, &stats->ipsec_rx_drop_pkts,
2537 		      &stats->ipsec_rx_drop_bytes);
2538 	if (ipsec->rx_ipv4->sa_sel.fc)
2539 		mlx5_fc_query(mdev, ipsec->rx_ipv4->sa_sel.fc,
2540 			      &stats->ipsec_rx_drop_mismatch_sa_sel, &bytes);
2541 
2542 	fc = ipsec->tx->fc;
2543 	mlx5_fc_query(mdev, fc->cnt, &stats->ipsec_tx_pkts, &stats->ipsec_tx_bytes);
2544 	mlx5_fc_query(mdev, fc->drop, &stats->ipsec_tx_drop_pkts,
2545 		      &stats->ipsec_tx_drop_bytes);
2546 
2547 	if (ipsec->is_uplink_rep) {
2548 		fc = ipsec->rx_esw->fc;
2549 		if (!mlx5_fc_query(mdev, fc->cnt, &packets, &bytes)) {
2550 			stats->ipsec_rx_pkts += packets;
2551 			stats->ipsec_rx_bytes += bytes;
2552 		}
2553 
2554 		if (!mlx5_fc_query(mdev, fc->drop, &packets, &bytes)) {
2555 			stats->ipsec_rx_drop_pkts += packets;
2556 			stats->ipsec_rx_drop_bytes += bytes;
2557 		}
2558 
2559 		fc = ipsec->tx_esw->fc;
2560 		if (!mlx5_fc_query(mdev, fc->cnt, &packets, &bytes)) {
2561 			stats->ipsec_tx_pkts += packets;
2562 			stats->ipsec_tx_bytes += bytes;
2563 		}
2564 
2565 		if (!mlx5_fc_query(mdev, fc->drop, &packets, &bytes)) {
2566 			stats->ipsec_tx_drop_pkts += packets;
2567 			stats->ipsec_tx_drop_bytes += bytes;
2568 		}
2569 
2570 		if (ipsec->rx_esw->sa_sel.fc &&
2571 		    !mlx5_fc_query(mdev, ipsec->rx_esw->sa_sel.fc,
2572 				   &packets, &bytes))
2573 			stats->ipsec_rx_drop_mismatch_sa_sel += packets;
2574 	}
2575 }
2576 
2577 #ifdef CONFIG_MLX5_ESWITCH
mlx5e_ipsec_block_tc_offload(struct mlx5_core_dev * mdev)2578 static int mlx5e_ipsec_block_tc_offload(struct mlx5_core_dev *mdev)
2579 {
2580 	struct mlx5_eswitch *esw = mdev->priv.eswitch;
2581 	int err = 0;
2582 
2583 	if (esw) {
2584 		err = mlx5_esw_lock(esw);
2585 		if (err)
2586 			return err;
2587 	}
2588 
2589 	if (mdev->num_block_ipsec) {
2590 		err = -EBUSY;
2591 		goto unlock;
2592 	}
2593 
2594 	mdev->num_block_tc++;
2595 
2596 unlock:
2597 	if (esw)
2598 		mlx5_esw_unlock(esw);
2599 
2600 	return err;
2601 }
2602 #else
mlx5e_ipsec_block_tc_offload(struct mlx5_core_dev * mdev)2603 static int mlx5e_ipsec_block_tc_offload(struct mlx5_core_dev *mdev)
2604 {
2605 	if (mdev->num_block_ipsec)
2606 		return -EBUSY;
2607 
2608 	mdev->num_block_tc++;
2609 	return 0;
2610 }
2611 #endif
2612 
mlx5e_ipsec_unblock_tc_offload(struct mlx5_core_dev * mdev)2613 static void mlx5e_ipsec_unblock_tc_offload(struct mlx5_core_dev *mdev)
2614 {
2615 	mdev->num_block_tc--;
2616 }
2617 
mlx5e_accel_ipsec_fs_add_rule(struct mlx5e_ipsec_sa_entry * sa_entry)2618 int mlx5e_accel_ipsec_fs_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
2619 {
2620 	int err;
2621 
2622 	if (sa_entry->attrs.type == XFRM_DEV_OFFLOAD_PACKET) {
2623 		err = mlx5e_ipsec_block_tc_offload(sa_entry->ipsec->mdev);
2624 		if (err)
2625 			return err;
2626 	}
2627 
2628 	if (sa_entry->attrs.dir == XFRM_DEV_OFFLOAD_OUT)
2629 		err = tx_add_rule(sa_entry);
2630 	else
2631 		err = rx_add_rule(sa_entry);
2632 
2633 	if (err)
2634 		goto err_out;
2635 
2636 	return 0;
2637 
2638 err_out:
2639 	if (sa_entry->attrs.type == XFRM_DEV_OFFLOAD_PACKET)
2640 		mlx5e_ipsec_unblock_tc_offload(sa_entry->ipsec->mdev);
2641 	return err;
2642 }
2643 
mlx5e_accel_ipsec_fs_del_rule(struct mlx5e_ipsec_sa_entry * sa_entry)2644 void mlx5e_accel_ipsec_fs_del_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
2645 {
2646 	struct mlx5e_ipsec_rule *ipsec_rule = &sa_entry->ipsec_rule;
2647 	struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry);
2648 
2649 	mlx5_del_flow_rules(ipsec_rule->rule);
2650 	mlx5_fc_destroy(mdev, ipsec_rule->fc);
2651 	if (ipsec_rule->pkt_reformat)
2652 		mlx5_packet_reformat_dealloc(mdev, ipsec_rule->pkt_reformat);
2653 
2654 	if (sa_entry->attrs.type == XFRM_DEV_OFFLOAD_PACKET)
2655 		mlx5e_ipsec_unblock_tc_offload(mdev);
2656 
2657 	if (sa_entry->attrs.dir == XFRM_DEV_OFFLOAD_OUT) {
2658 		tx_ft_put(sa_entry->ipsec, sa_entry->attrs.type);
2659 		return;
2660 	}
2661 
2662 	if (ipsec_rule->modify_hdr)
2663 		mlx5_modify_header_dealloc(mdev, ipsec_rule->modify_hdr);
2664 
2665 	mlx5_del_flow_rules(ipsec_rule->trailer.rule);
2666 	mlx5_fc_destroy(mdev, ipsec_rule->trailer.fc);
2667 
2668 	mlx5_del_flow_rules(ipsec_rule->auth.rule);
2669 	mlx5_fc_destroy(mdev, ipsec_rule->auth.fc);
2670 
2671 	if (ipsec_rule->sa_sel) {
2672 		mlx5_del_flow_rules(ipsec_rule->sa_sel);
2673 		mlx5_del_flow_rules(ipsec_rule->status_pass);
2674 	}
2675 
2676 	if (ipsec_rule->replay.rule) {
2677 		mlx5_del_flow_rules(ipsec_rule->replay.rule);
2678 		mlx5_fc_destroy(mdev, ipsec_rule->replay.fc);
2679 	}
2680 	mlx5_esw_ipsec_rx_id_mapping_remove(sa_entry);
2681 	rx_ft_put(sa_entry->ipsec, sa_entry->attrs.addrs.family,
2682 		  sa_entry->attrs.type);
2683 }
2684 
mlx5e_accel_ipsec_fs_add_pol(struct mlx5e_ipsec_pol_entry * pol_entry)2685 int mlx5e_accel_ipsec_fs_add_pol(struct mlx5e_ipsec_pol_entry *pol_entry)
2686 {
2687 	int err;
2688 
2689 	err = mlx5e_ipsec_block_tc_offload(pol_entry->ipsec->mdev);
2690 	if (err)
2691 		return err;
2692 
2693 	if (pol_entry->attrs.dir == XFRM_DEV_OFFLOAD_OUT)
2694 		err = tx_add_policy(pol_entry);
2695 	else
2696 		err = rx_add_policy(pol_entry);
2697 
2698 	if (err)
2699 		goto err_out;
2700 
2701 	return 0;
2702 
2703 err_out:
2704 	mlx5e_ipsec_unblock_tc_offload(pol_entry->ipsec->mdev);
2705 	return err;
2706 }
2707 
mlx5e_accel_ipsec_fs_del_pol(struct mlx5e_ipsec_pol_entry * pol_entry)2708 void mlx5e_accel_ipsec_fs_del_pol(struct mlx5e_ipsec_pol_entry *pol_entry)
2709 {
2710 	struct mlx5e_ipsec_rule *ipsec_rule = &pol_entry->ipsec_rule;
2711 	struct mlx5_core_dev *mdev = mlx5e_ipsec_pol2dev(pol_entry);
2712 
2713 	mlx5_del_flow_rules(ipsec_rule->rule);
2714 
2715 	mlx5e_ipsec_unblock_tc_offload(pol_entry->ipsec->mdev);
2716 
2717 	if (pol_entry->attrs.dir == XFRM_DEV_OFFLOAD_IN) {
2718 		rx_ft_put_policy(pol_entry->ipsec,
2719 				 pol_entry->attrs.addrs.family,
2720 				 pol_entry->attrs.prio, pol_entry->attrs.type);
2721 		return;
2722 	}
2723 
2724 	if (ipsec_rule->modify_hdr)
2725 		mlx5_modify_header_dealloc(mdev, ipsec_rule->modify_hdr);
2726 
2727 	tx_ft_put_policy(pol_entry->ipsec, pol_entry->attrs.prio, pol_entry->attrs.type);
2728 }
2729 
mlx5e_accel_ipsec_fs_cleanup(struct mlx5e_ipsec * ipsec)2730 void mlx5e_accel_ipsec_fs_cleanup(struct mlx5e_ipsec *ipsec)
2731 {
2732 	if (!ipsec->tx)
2733 		return;
2734 
2735 	if (ipsec->roce)
2736 		mlx5_ipsec_fs_roce_cleanup(ipsec->roce);
2737 
2738 	ipsec_fs_destroy_counters(ipsec);
2739 	mutex_destroy(&ipsec->tx->ft.mutex);
2740 	WARN_ON(ipsec->tx->ft.refcnt);
2741 	kfree(ipsec->tx);
2742 
2743 	mutex_destroy(&ipsec->rx_ipv4->ft.mutex);
2744 	WARN_ON(ipsec->rx_ipv4->ft.refcnt);
2745 	kfree(ipsec->rx_ipv4);
2746 
2747 	mutex_destroy(&ipsec->rx_ipv6->ft.mutex);
2748 	WARN_ON(ipsec->rx_ipv6->ft.refcnt);
2749 	kfree(ipsec->rx_ipv6);
2750 
2751 	if (ipsec->is_uplink_rep) {
2752 		xa_destroy(&ipsec->ipsec_obj_id_map);
2753 
2754 		mutex_destroy(&ipsec->tx_esw->ft.mutex);
2755 		WARN_ON(ipsec->tx_esw->ft.refcnt);
2756 		kfree(ipsec->tx_esw);
2757 
2758 		mutex_destroy(&ipsec->rx_esw->ft.mutex);
2759 		WARN_ON(ipsec->rx_esw->ft.refcnt);
2760 		kfree(ipsec->rx_esw);
2761 	}
2762 }
2763 
mlx5e_accel_ipsec_fs_init(struct mlx5e_ipsec * ipsec,struct mlx5_devcom_comp_dev ** devcom)2764 int mlx5e_accel_ipsec_fs_init(struct mlx5e_ipsec *ipsec,
2765 			      struct mlx5_devcom_comp_dev **devcom)
2766 {
2767 	struct mlx5_core_dev *mdev = ipsec->mdev;
2768 	struct mlx5_flow_namespace *ns, *ns_esw;
2769 	int err = -ENOMEM;
2770 
2771 	ns = mlx5_get_flow_namespace(ipsec->mdev,
2772 				     MLX5_FLOW_NAMESPACE_EGRESS_IPSEC);
2773 	if (!ns)
2774 		return -EOPNOTSUPP;
2775 
2776 	if (ipsec->is_uplink_rep) {
2777 		ns_esw = mlx5_get_flow_namespace(mdev, MLX5_FLOW_NAMESPACE_FDB);
2778 		if (!ns_esw)
2779 			return -EOPNOTSUPP;
2780 
2781 		ipsec->tx_esw = kzalloc(sizeof(*ipsec->tx_esw), GFP_KERNEL);
2782 		if (!ipsec->tx_esw)
2783 			return -ENOMEM;
2784 
2785 		ipsec->rx_esw = kzalloc(sizeof(*ipsec->rx_esw), GFP_KERNEL);
2786 		if (!ipsec->rx_esw)
2787 			goto err_rx_esw;
2788 	}
2789 
2790 	ipsec->tx = kzalloc(sizeof(*ipsec->tx), GFP_KERNEL);
2791 	if (!ipsec->tx)
2792 		goto err_tx;
2793 
2794 	ipsec->rx_ipv4 = kzalloc(sizeof(*ipsec->rx_ipv4), GFP_KERNEL);
2795 	if (!ipsec->rx_ipv4)
2796 		goto err_rx_ipv4;
2797 
2798 	ipsec->rx_ipv6 = kzalloc(sizeof(*ipsec->rx_ipv6), GFP_KERNEL);
2799 	if (!ipsec->rx_ipv6)
2800 		goto err_rx_ipv6;
2801 
2802 	err = ipsec_fs_init_counters(ipsec);
2803 	if (err)
2804 		goto err_counters;
2805 
2806 	mutex_init(&ipsec->tx->ft.mutex);
2807 	mutex_init(&ipsec->rx_ipv4->ft.mutex);
2808 	mutex_init(&ipsec->rx_ipv6->ft.mutex);
2809 	ipsec->tx->ns = ns;
2810 
2811 	if (ipsec->is_uplink_rep) {
2812 		mutex_init(&ipsec->tx_esw->ft.mutex);
2813 		mutex_init(&ipsec->rx_esw->ft.mutex);
2814 		ipsec->tx_esw->ns = ns_esw;
2815 		xa_init_flags(&ipsec->ipsec_obj_id_map, XA_FLAGS_ALLOC1);
2816 	} else if (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_ROCE) {
2817 		ipsec->roce = mlx5_ipsec_fs_roce_init(mdev, devcom);
2818 	} else {
2819 		mlx5_core_warn(mdev, "IPsec was initialized without RoCE support\n");
2820 	}
2821 
2822 	return 0;
2823 
2824 err_counters:
2825 	kfree(ipsec->rx_ipv6);
2826 err_rx_ipv6:
2827 	kfree(ipsec->rx_ipv4);
2828 err_rx_ipv4:
2829 	kfree(ipsec->tx);
2830 err_tx:
2831 	kfree(ipsec->rx_esw);
2832 err_rx_esw:
2833 	kfree(ipsec->tx_esw);
2834 	return err;
2835 }
2836 
mlx5e_accel_ipsec_fs_modify(struct mlx5e_ipsec_sa_entry * sa_entry)2837 void mlx5e_accel_ipsec_fs_modify(struct mlx5e_ipsec_sa_entry *sa_entry)
2838 {
2839 	struct mlx5e_ipsec_sa_entry sa_entry_shadow = {};
2840 	int err;
2841 
2842 	memcpy(&sa_entry_shadow, sa_entry, sizeof(*sa_entry));
2843 	memset(&sa_entry_shadow.ipsec_rule, 0x00, sizeof(sa_entry->ipsec_rule));
2844 
2845 	err = mlx5e_accel_ipsec_fs_add_rule(&sa_entry_shadow);
2846 	if (err)
2847 		return;
2848 
2849 	mlx5e_accel_ipsec_fs_del_rule(sa_entry);
2850 	memcpy(sa_entry, &sa_entry_shadow, sizeof(*sa_entry));
2851 }
2852 
mlx5e_ipsec_fs_tunnel_allowed(struct mlx5e_ipsec_sa_entry * sa_entry)2853 bool mlx5e_ipsec_fs_tunnel_allowed(struct mlx5e_ipsec_sa_entry *sa_entry)
2854 {
2855 	struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
2856 	struct xfrm_state *x = sa_entry->x;
2857 	bool from_fdb;
2858 
2859 	if (x->xso.dir == XFRM_DEV_OFFLOAD_OUT) {
2860 		struct mlx5e_ipsec_tx *tx = ipsec_tx(ipsec, x->xso.type);
2861 
2862 		from_fdb = (tx == ipsec->tx_esw);
2863 	} else {
2864 		struct mlx5e_ipsec_rx *rx = ipsec_rx(ipsec, x->props.family,
2865 						     x->xso.type);
2866 
2867 		from_fdb = (rx == ipsec->rx_esw);
2868 	}
2869 
2870 	return mlx5_eswitch_block_encap(ipsec->mdev, from_fdb);
2871 }
2872 
mlx5e_ipsec_handle_mpv_event(int event,struct mlx5e_priv * slave_priv,struct mlx5e_priv * master_priv)2873 void mlx5e_ipsec_handle_mpv_event(int event, struct mlx5e_priv *slave_priv,
2874 				  struct mlx5e_priv *master_priv)
2875 {
2876 	struct mlx5e_ipsec_mpv_work *work;
2877 
2878 	reinit_completion(&master_priv->ipsec->comp);
2879 
2880 	if (!slave_priv->ipsec) {
2881 		complete(&master_priv->ipsec->comp);
2882 		return;
2883 	}
2884 
2885 	work = &slave_priv->ipsec->mpv_work;
2886 
2887 	INIT_WORK(&work->work, ipsec_mpv_work_handler);
2888 	work->event = event;
2889 	work->slave_priv = slave_priv;
2890 	work->master_priv = master_priv;
2891 	queue_work(slave_priv->ipsec->wq, &work->work);
2892 }
2893 
mlx5e_ipsec_send_event(struct mlx5e_priv * priv,int event)2894 void mlx5e_ipsec_send_event(struct mlx5e_priv *priv, int event)
2895 {
2896 	if (!priv->ipsec)
2897 		return; /* IPsec not supported */
2898 
2899 	mlx5_devcom_send_event(priv->devcom, event, event, priv);
2900 	wait_for_completion(&priv->ipsec->comp);
2901 }
2902