xref: /linux/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c (revision 2c1ed907520c50326b8f604907a8478b27881a2e)
1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2020, Mellanox Technologies inc. All rights reserved. */
3 
4 #include <linux/netdevice.h>
5 #include "en.h"
6 #include "en/fs.h"
7 #include "eswitch.h"
8 #include "ipsec.h"
9 #include "fs_core.h"
10 #include "lib/ipsec_fs_roce.h"
11 #include "lib/fs_chains.h"
12 #include "esw/ipsec_fs.h"
13 #include "en_rep.h"
14 
15 #define NUM_IPSEC_FTE BIT(15)
16 #define MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_SIZE 16
17 #define IPSEC_TUNNEL_DEFAULT_TTL 0x40
18 
19 struct mlx5e_ipsec_fc {
20 	struct mlx5_fc *cnt;
21 	struct mlx5_fc *drop;
22 };
23 
24 struct mlx5e_ipsec_tx {
25 	struct mlx5e_ipsec_ft ft;
26 	struct mlx5e_ipsec_miss pol;
27 	struct mlx5e_ipsec_miss sa;
28 	struct mlx5e_ipsec_rule status;
29 	struct mlx5_flow_namespace *ns;
30 	struct mlx5e_ipsec_fc *fc;
31 	struct mlx5_fs_chains *chains;
32 	u8 allow_tunnel_mode : 1;
33 };
34 
35 struct mlx5e_ipsec_status_checks {
36 	struct mlx5_flow_group *drop_all_group;
37 	struct mlx5e_ipsec_drop all;
38 };
39 
40 struct mlx5e_ipsec_rx {
41 	struct mlx5e_ipsec_ft ft;
42 	struct mlx5e_ipsec_miss pol;
43 	struct mlx5e_ipsec_miss sa;
44 	struct mlx5e_ipsec_rule status;
45 	struct mlx5e_ipsec_status_checks status_drops;
46 	struct mlx5e_ipsec_fc *fc;
47 	struct mlx5_fs_chains *chains;
48 	u8 allow_tunnel_mode : 1;
49 };
50 
51 /* IPsec RX flow steering */
family2tt(u32 family)52 static enum mlx5_traffic_types family2tt(u32 family)
53 {
54 	if (family == AF_INET)
55 		return MLX5_TT_IPV4_IPSEC_ESP;
56 	return MLX5_TT_IPV6_IPSEC_ESP;
57 }
58 
ipsec_rx(struct mlx5e_ipsec * ipsec,u32 family,int type)59 static struct mlx5e_ipsec_rx *ipsec_rx(struct mlx5e_ipsec *ipsec, u32 family, int type)
60 {
61 	if (ipsec->is_uplink_rep && type == XFRM_DEV_OFFLOAD_PACKET)
62 		return ipsec->rx_esw;
63 
64 	if (family == AF_INET)
65 		return ipsec->rx_ipv4;
66 
67 	return ipsec->rx_ipv6;
68 }
69 
ipsec_tx(struct mlx5e_ipsec * ipsec,int type)70 static struct mlx5e_ipsec_tx *ipsec_tx(struct mlx5e_ipsec *ipsec, int type)
71 {
72 	if (ipsec->is_uplink_rep && type == XFRM_DEV_OFFLOAD_PACKET)
73 		return ipsec->tx_esw;
74 
75 	return ipsec->tx;
76 }
77 
78 static struct mlx5_fs_chains *
ipsec_chains_create(struct mlx5_core_dev * mdev,struct mlx5_flow_table * miss_ft,enum mlx5_flow_namespace_type ns,int base_prio,int base_level,struct mlx5_flow_table ** root_ft)79 ipsec_chains_create(struct mlx5_core_dev *mdev, struct mlx5_flow_table *miss_ft,
80 		    enum mlx5_flow_namespace_type ns, int base_prio,
81 		    int base_level, struct mlx5_flow_table **root_ft)
82 {
83 	struct mlx5_chains_attr attr = {};
84 	struct mlx5_fs_chains *chains;
85 	struct mlx5_flow_table *ft;
86 	int err;
87 
88 	attr.flags = MLX5_CHAINS_AND_PRIOS_SUPPORTED |
89 		     MLX5_CHAINS_IGNORE_FLOW_LEVEL_SUPPORTED;
90 	attr.max_grp_num = 2;
91 	attr.default_ft = miss_ft;
92 	attr.ns = ns;
93 	attr.fs_base_prio = base_prio;
94 	attr.fs_base_level = base_level;
95 	chains = mlx5_chains_create(mdev, &attr);
96 	if (IS_ERR(chains))
97 		return chains;
98 
99 	/* Create chain 0, prio 1, level 0 to connect chains to prev in fs_core */
100 	ft = mlx5_chains_get_table(chains, 0, 1, 0);
101 	if (IS_ERR(ft)) {
102 		err = PTR_ERR(ft);
103 		goto err_chains_get;
104 	}
105 
106 	*root_ft = ft;
107 	return chains;
108 
109 err_chains_get:
110 	mlx5_chains_destroy(chains);
111 	return ERR_PTR(err);
112 }
113 
ipsec_chains_destroy(struct mlx5_fs_chains * chains)114 static void ipsec_chains_destroy(struct mlx5_fs_chains *chains)
115 {
116 	mlx5_chains_put_table(chains, 0, 1, 0);
117 	mlx5_chains_destroy(chains);
118 }
119 
120 static struct mlx5_flow_table *
ipsec_chains_get_table(struct mlx5_fs_chains * chains,u32 prio)121 ipsec_chains_get_table(struct mlx5_fs_chains *chains, u32 prio)
122 {
123 	return mlx5_chains_get_table(chains, 0, prio + 1, 0);
124 }
125 
ipsec_chains_put_table(struct mlx5_fs_chains * chains,u32 prio)126 static void ipsec_chains_put_table(struct mlx5_fs_chains *chains, u32 prio)
127 {
128 	mlx5_chains_put_table(chains, 0, prio + 1, 0);
129 }
130 
ipsec_ft_create(struct mlx5_flow_namespace * ns,int level,int prio,int max_num_groups,u32 flags)131 static struct mlx5_flow_table *ipsec_ft_create(struct mlx5_flow_namespace *ns,
132 					       int level, int prio,
133 					       int max_num_groups, u32 flags)
134 {
135 	struct mlx5_flow_table_attr ft_attr = {};
136 
137 	ft_attr.autogroup.num_reserved_entries = 1;
138 	ft_attr.autogroup.max_num_groups = max_num_groups;
139 	ft_attr.max_fte = NUM_IPSEC_FTE;
140 	ft_attr.level = level;
141 	ft_attr.prio = prio;
142 	ft_attr.flags = flags;
143 
144 	return mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
145 }
146 
ipsec_rx_status_drop_destroy(struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_rx * rx)147 static void ipsec_rx_status_drop_destroy(struct mlx5e_ipsec *ipsec,
148 					 struct mlx5e_ipsec_rx *rx)
149 {
150 	mlx5_del_flow_rules(rx->status_drops.all.rule);
151 	mlx5_fc_destroy(ipsec->mdev, rx->status_drops.all.fc);
152 	mlx5_destroy_flow_group(rx->status_drops.drop_all_group);
153 }
154 
ipsec_rx_status_pass_destroy(struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_rx * rx)155 static void ipsec_rx_status_pass_destroy(struct mlx5e_ipsec *ipsec,
156 					 struct mlx5e_ipsec_rx *rx)
157 {
158 	mlx5_del_flow_rules(rx->status.rule);
159 
160 	if (rx != ipsec->rx_esw)
161 		return;
162 
163 #ifdef CONFIG_MLX5_ESWITCH
164 	mlx5_chains_put_table(esw_chains(ipsec->mdev->priv.eswitch), 0, 1, 0);
165 #endif
166 }
167 
rx_add_rule_drop_auth_trailer(struct mlx5e_ipsec_sa_entry * sa_entry,struct mlx5e_ipsec_rx * rx)168 static int rx_add_rule_drop_auth_trailer(struct mlx5e_ipsec_sa_entry *sa_entry,
169 					 struct mlx5e_ipsec_rx *rx)
170 {
171 	struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
172 	struct mlx5_flow_table *ft = rx->ft.status;
173 	struct mlx5_core_dev *mdev = ipsec->mdev;
174 	struct mlx5_flow_destination dest = {};
175 	struct mlx5_flow_act flow_act = {};
176 	struct mlx5_flow_handle *rule;
177 	struct mlx5_fc *flow_counter;
178 	struct mlx5_flow_spec *spec;
179 	int err;
180 
181 	spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
182 	if (!spec)
183 		return -ENOMEM;
184 
185 	flow_counter = mlx5_fc_create(mdev, true);
186 	if (IS_ERR(flow_counter)) {
187 		err = PTR_ERR(flow_counter);
188 		mlx5_core_err(mdev,
189 			      "Failed to add ipsec rx status drop rule counter, err=%d\n", err);
190 		goto err_cnt;
191 	}
192 	sa_entry->ipsec_rule.auth.fc = flow_counter;
193 
194 	flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT;
195 	flow_act.flags = FLOW_ACT_NO_APPEND;
196 	dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
197 	dest.counter = flow_counter;
198 	if (rx == ipsec->rx_esw)
199 		spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK;
200 
201 	MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters_2.ipsec_syndrome);
202 	MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.ipsec_syndrome, 1);
203 	MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters_2.metadata_reg_c_2);
204 	MLX5_SET(fte_match_param, spec->match_value,
205 		 misc_parameters_2.metadata_reg_c_2,
206 		 sa_entry->ipsec_obj_id | BIT(31));
207 	spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
208 	rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
209 	if (IS_ERR(rule)) {
210 		err = PTR_ERR(rule);
211 		mlx5_core_err(mdev,
212 			      "Failed to add ipsec rx status drop rule, err=%d\n", err);
213 		goto err_rule;
214 	}
215 	sa_entry->ipsec_rule.auth.rule = rule;
216 
217 	flow_counter = mlx5_fc_create(mdev, true);
218 	if (IS_ERR(flow_counter)) {
219 		err = PTR_ERR(flow_counter);
220 		mlx5_core_err(mdev,
221 			      "Failed to add ipsec rx status drop rule counter, err=%d\n", err);
222 		goto err_cnt_2;
223 	}
224 	sa_entry->ipsec_rule.trailer.fc = flow_counter;
225 
226 	dest.counter = flow_counter;
227 	MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.ipsec_syndrome, 2);
228 	rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
229 	if (IS_ERR(rule)) {
230 		err = PTR_ERR(rule);
231 		mlx5_core_err(mdev,
232 			      "Failed to add ipsec rx status drop rule, err=%d\n", err);
233 		goto err_rule_2;
234 	}
235 	sa_entry->ipsec_rule.trailer.rule = rule;
236 
237 	kvfree(spec);
238 	return 0;
239 
240 err_rule_2:
241 	mlx5_fc_destroy(mdev, sa_entry->ipsec_rule.trailer.fc);
242 err_cnt_2:
243 	mlx5_del_flow_rules(sa_entry->ipsec_rule.auth.rule);
244 err_rule:
245 	mlx5_fc_destroy(mdev, sa_entry->ipsec_rule.auth.fc);
246 err_cnt:
247 	kvfree(spec);
248 	return err;
249 }
250 
rx_add_rule_drop_replay(struct mlx5e_ipsec_sa_entry * sa_entry,struct mlx5e_ipsec_rx * rx)251 static int rx_add_rule_drop_replay(struct mlx5e_ipsec_sa_entry *sa_entry, struct mlx5e_ipsec_rx *rx)
252 {
253 	struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
254 	struct mlx5_flow_table *ft = rx->ft.status;
255 	struct mlx5_core_dev *mdev = ipsec->mdev;
256 	struct mlx5_flow_destination dest = {};
257 	struct mlx5_flow_act flow_act = {};
258 	struct mlx5_flow_handle *rule;
259 	struct mlx5_fc *flow_counter;
260 	struct mlx5_flow_spec *spec;
261 	int err;
262 
263 	spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
264 	if (!spec)
265 		return -ENOMEM;
266 
267 	flow_counter = mlx5_fc_create(mdev, true);
268 	if (IS_ERR(flow_counter)) {
269 		err = PTR_ERR(flow_counter);
270 		mlx5_core_err(mdev,
271 			      "Failed to add ipsec rx status drop rule counter, err=%d\n", err);
272 		goto err_cnt;
273 	}
274 
275 	flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT;
276 	flow_act.flags = FLOW_ACT_NO_APPEND;
277 	dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
278 	dest.counter = flow_counter;
279 	if (rx == ipsec->rx_esw)
280 		spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK;
281 
282 	MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters_2.metadata_reg_c_4);
283 	MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.metadata_reg_c_4, 1);
284 	MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters_2.metadata_reg_c_2);
285 	MLX5_SET(fte_match_param, spec->match_value,  misc_parameters_2.metadata_reg_c_2,
286 		 sa_entry->ipsec_obj_id | BIT(31));
287 	spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
288 	rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
289 	if (IS_ERR(rule)) {
290 		err = PTR_ERR(rule);
291 		mlx5_core_err(mdev,
292 			      "Failed to add ipsec rx status drop rule, err=%d\n", err);
293 		goto err_rule;
294 	}
295 
296 	sa_entry->ipsec_rule.replay.rule = rule;
297 	sa_entry->ipsec_rule.replay.fc = flow_counter;
298 
299 	kvfree(spec);
300 	return 0;
301 
302 err_rule:
303 	mlx5_fc_destroy(mdev, flow_counter);
304 err_cnt:
305 	kvfree(spec);
306 	return err;
307 }
308 
ipsec_rx_status_drop_all_create(struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_rx * rx)309 static int ipsec_rx_status_drop_all_create(struct mlx5e_ipsec *ipsec,
310 					   struct mlx5e_ipsec_rx *rx)
311 {
312 	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
313 	struct mlx5_flow_table *ft = rx->ft.status;
314 	struct mlx5_core_dev *mdev = ipsec->mdev;
315 	struct mlx5_flow_destination dest = {};
316 	struct mlx5_flow_act flow_act = {};
317 	struct mlx5_flow_handle *rule;
318 	struct mlx5_fc *flow_counter;
319 	struct mlx5_flow_spec *spec;
320 	struct mlx5_flow_group *g;
321 	u32 *flow_group_in;
322 	int err = 0;
323 
324 	flow_group_in = kvzalloc(inlen, GFP_KERNEL);
325 	spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
326 	if (!flow_group_in || !spec) {
327 		err = -ENOMEM;
328 		goto err_out;
329 	}
330 
331 	MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ft->max_fte - 1);
332 	MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ft->max_fte - 1);
333 	g = mlx5_create_flow_group(ft, flow_group_in);
334 	if (IS_ERR(g)) {
335 		err = PTR_ERR(g);
336 		mlx5_core_err(mdev,
337 			      "Failed to add ipsec rx status drop flow group, err=%d\n", err);
338 		goto err_out;
339 	}
340 
341 	flow_counter = mlx5_fc_create(mdev, false);
342 	if (IS_ERR(flow_counter)) {
343 		err = PTR_ERR(flow_counter);
344 		mlx5_core_err(mdev,
345 			      "Failed to add ipsec rx status drop rule counter, err=%d\n", err);
346 		goto err_cnt;
347 	}
348 
349 	flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT;
350 	dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
351 	dest.counter = flow_counter;
352 	if (rx == ipsec->rx_esw)
353 		spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK;
354 	rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
355 	if (IS_ERR(rule)) {
356 		err = PTR_ERR(rule);
357 		mlx5_core_err(mdev,
358 			      "Failed to add ipsec rx status drop rule, err=%d\n", err);
359 		goto err_rule;
360 	}
361 
362 	rx->status_drops.drop_all_group = g;
363 	rx->status_drops.all.rule = rule;
364 	rx->status_drops.all.fc = flow_counter;
365 
366 	kvfree(flow_group_in);
367 	kvfree(spec);
368 	return 0;
369 
370 err_rule:
371 	mlx5_fc_destroy(mdev, flow_counter);
372 err_cnt:
373 	mlx5_destroy_flow_group(g);
374 err_out:
375 	kvfree(flow_group_in);
376 	kvfree(spec);
377 	return err;
378 }
379 
ipsec_rx_status_pass_create(struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_rx * rx,struct mlx5_flow_destination * dest)380 static int ipsec_rx_status_pass_create(struct mlx5e_ipsec *ipsec,
381 				       struct mlx5e_ipsec_rx *rx,
382 				       struct mlx5_flow_destination *dest)
383 {
384 	struct mlx5_flow_act flow_act = {};
385 	struct mlx5_flow_handle *rule;
386 	struct mlx5_flow_spec *spec;
387 	int err;
388 
389 	spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
390 	if (!spec)
391 		return -ENOMEM;
392 
393 	MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
394 			 misc_parameters_2.ipsec_syndrome);
395 	MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
396 			 misc_parameters_2.metadata_reg_c_4);
397 	MLX5_SET(fte_match_param, spec->match_value,
398 		 misc_parameters_2.ipsec_syndrome, 0);
399 	MLX5_SET(fte_match_param, spec->match_value,
400 		 misc_parameters_2.metadata_reg_c_4, 0);
401 	if (rx == ipsec->rx_esw)
402 		spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK;
403 	spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
404 	flow_act.flags = FLOW_ACT_NO_APPEND;
405 	flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
406 			  MLX5_FLOW_CONTEXT_ACTION_COUNT;
407 	rule = mlx5_add_flow_rules(rx->ft.status, spec, &flow_act, dest, 2);
408 	if (IS_ERR(rule)) {
409 		err = PTR_ERR(rule);
410 		mlx5_core_warn(ipsec->mdev,
411 			       "Failed to add ipsec rx status pass rule, err=%d\n", err);
412 		goto err_rule;
413 	}
414 
415 	rx->status.rule = rule;
416 	kvfree(spec);
417 	return 0;
418 
419 err_rule:
420 	kvfree(spec);
421 	return err;
422 }
423 
mlx5_ipsec_rx_status_destroy(struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_rx * rx)424 static void mlx5_ipsec_rx_status_destroy(struct mlx5e_ipsec *ipsec,
425 					 struct mlx5e_ipsec_rx *rx)
426 {
427 	ipsec_rx_status_pass_destroy(ipsec, rx);
428 	ipsec_rx_status_drop_destroy(ipsec, rx);
429 }
430 
mlx5_ipsec_rx_status_create(struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_rx * rx,struct mlx5_flow_destination * dest)431 static int mlx5_ipsec_rx_status_create(struct mlx5e_ipsec *ipsec,
432 				       struct mlx5e_ipsec_rx *rx,
433 				       struct mlx5_flow_destination *dest)
434 {
435 	int err;
436 
437 	err = ipsec_rx_status_drop_all_create(ipsec, rx);
438 	if (err)
439 		return err;
440 
441 	err = ipsec_rx_status_pass_create(ipsec, rx, dest);
442 	if (err)
443 		goto err_pass_create;
444 
445 	return 0;
446 
447 err_pass_create:
448 	ipsec_rx_status_drop_destroy(ipsec, rx);
449 	return err;
450 }
451 
ipsec_miss_create(struct mlx5_core_dev * mdev,struct mlx5_flow_table * ft,struct mlx5e_ipsec_miss * miss,struct mlx5_flow_destination * dest)452 static int ipsec_miss_create(struct mlx5_core_dev *mdev,
453 			     struct mlx5_flow_table *ft,
454 			     struct mlx5e_ipsec_miss *miss,
455 			     struct mlx5_flow_destination *dest)
456 {
457 	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
458 	MLX5_DECLARE_FLOW_ACT(flow_act);
459 	struct mlx5_flow_spec *spec;
460 	u32 *flow_group_in;
461 	int err = 0;
462 
463 	flow_group_in = kvzalloc(inlen, GFP_KERNEL);
464 	spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
465 	if (!flow_group_in || !spec) {
466 		err = -ENOMEM;
467 		goto out;
468 	}
469 
470 	/* Create miss_group */
471 	MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ft->max_fte - 1);
472 	MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ft->max_fte - 1);
473 	miss->group = mlx5_create_flow_group(ft, flow_group_in);
474 	if (IS_ERR(miss->group)) {
475 		err = PTR_ERR(miss->group);
476 		mlx5_core_err(mdev, "fail to create IPsec miss_group err=%d\n",
477 			      err);
478 		goto out;
479 	}
480 
481 	/* Create miss rule */
482 	miss->rule = mlx5_add_flow_rules(ft, spec, &flow_act, dest, 1);
483 	if (IS_ERR(miss->rule)) {
484 		mlx5_destroy_flow_group(miss->group);
485 		err = PTR_ERR(miss->rule);
486 		mlx5_core_err(mdev, "fail to create IPsec miss_rule err=%d\n",
487 			      err);
488 		goto out;
489 	}
490 out:
491 	kvfree(flow_group_in);
492 	kvfree(spec);
493 	return err;
494 }
495 
handle_ipsec_rx_bringup(struct mlx5e_ipsec * ipsec,u32 family)496 static void handle_ipsec_rx_bringup(struct mlx5e_ipsec *ipsec, u32 family)
497 {
498 	struct mlx5e_ipsec_rx *rx = ipsec_rx(ipsec, family, XFRM_DEV_OFFLOAD_PACKET);
499 	struct mlx5_flow_namespace *ns = mlx5e_fs_get_ns(ipsec->fs, false);
500 	struct mlx5_flow_destination old_dest, new_dest;
501 
502 	old_dest = mlx5_ttc_get_default_dest(mlx5e_fs_get_ttc(ipsec->fs, false),
503 					     family2tt(family));
504 
505 	mlx5_ipsec_fs_roce_rx_create(ipsec->mdev, ipsec->roce, ns, &old_dest, family,
506 				     MLX5E_ACCEL_FS_ESP_FT_ROCE_LEVEL, MLX5E_NIC_PRIO);
507 
508 	new_dest.ft = mlx5_ipsec_fs_roce_ft_get(ipsec->roce, family);
509 	new_dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
510 	mlx5_modify_rule_destination(rx->status.rule, &new_dest, &old_dest);
511 	mlx5_modify_rule_destination(rx->sa.rule, &new_dest, &old_dest);
512 }
513 
handle_ipsec_rx_cleanup(struct mlx5e_ipsec * ipsec,u32 family)514 static void handle_ipsec_rx_cleanup(struct mlx5e_ipsec *ipsec, u32 family)
515 {
516 	struct mlx5e_ipsec_rx *rx = ipsec_rx(ipsec, family, XFRM_DEV_OFFLOAD_PACKET);
517 	struct mlx5_flow_destination old_dest, new_dest;
518 
519 	old_dest.ft = mlx5_ipsec_fs_roce_ft_get(ipsec->roce, family);
520 	old_dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
521 	new_dest = mlx5_ttc_get_default_dest(mlx5e_fs_get_ttc(ipsec->fs, false),
522 					     family2tt(family));
523 	mlx5_modify_rule_destination(rx->sa.rule, &new_dest, &old_dest);
524 	mlx5_modify_rule_destination(rx->status.rule, &new_dest, &old_dest);
525 
526 	mlx5_ipsec_fs_roce_rx_destroy(ipsec->roce, family, ipsec->mdev);
527 }
528 
ipsec_mpv_work_handler(struct work_struct * _work)529 static void ipsec_mpv_work_handler(struct work_struct *_work)
530 {
531 	struct mlx5e_ipsec_mpv_work *work = container_of(_work, struct mlx5e_ipsec_mpv_work, work);
532 	struct mlx5e_ipsec *ipsec = work->slave_priv->ipsec;
533 
534 	switch (work->event) {
535 	case MPV_DEVCOM_IPSEC_MASTER_UP:
536 		mutex_lock(&ipsec->tx->ft.mutex);
537 		if (ipsec->tx->ft.refcnt)
538 			mlx5_ipsec_fs_roce_tx_create(ipsec->mdev, ipsec->roce, ipsec->tx->ft.pol,
539 						     true);
540 		mutex_unlock(&ipsec->tx->ft.mutex);
541 
542 		mutex_lock(&ipsec->rx_ipv4->ft.mutex);
543 		if (ipsec->rx_ipv4->ft.refcnt)
544 			handle_ipsec_rx_bringup(ipsec, AF_INET);
545 		mutex_unlock(&ipsec->rx_ipv4->ft.mutex);
546 
547 		mutex_lock(&ipsec->rx_ipv6->ft.mutex);
548 		if (ipsec->rx_ipv6->ft.refcnt)
549 			handle_ipsec_rx_bringup(ipsec, AF_INET6);
550 		mutex_unlock(&ipsec->rx_ipv6->ft.mutex);
551 		break;
552 	case MPV_DEVCOM_IPSEC_MASTER_DOWN:
553 		mutex_lock(&ipsec->tx->ft.mutex);
554 		if (ipsec->tx->ft.refcnt)
555 			mlx5_ipsec_fs_roce_tx_destroy(ipsec->roce, ipsec->mdev);
556 		mutex_unlock(&ipsec->tx->ft.mutex);
557 
558 		mutex_lock(&ipsec->rx_ipv4->ft.mutex);
559 		if (ipsec->rx_ipv4->ft.refcnt)
560 			handle_ipsec_rx_cleanup(ipsec, AF_INET);
561 		mutex_unlock(&ipsec->rx_ipv4->ft.mutex);
562 
563 		mutex_lock(&ipsec->rx_ipv6->ft.mutex);
564 		if (ipsec->rx_ipv6->ft.refcnt)
565 			handle_ipsec_rx_cleanup(ipsec, AF_INET6);
566 		mutex_unlock(&ipsec->rx_ipv6->ft.mutex);
567 		break;
568 	}
569 
570 	complete(&work->master_priv->ipsec->comp);
571 }
572 
ipsec_rx_ft_disconnect(struct mlx5e_ipsec * ipsec,u32 family)573 static void ipsec_rx_ft_disconnect(struct mlx5e_ipsec *ipsec, u32 family)
574 {
575 	struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(ipsec->fs, false);
576 
577 	mlx5_ttc_fwd_default_dest(ttc, family2tt(family));
578 }
579 
rx_destroy(struct mlx5_core_dev * mdev,struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_rx * rx,u32 family)580 static void rx_destroy(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
581 		       struct mlx5e_ipsec_rx *rx, u32 family)
582 {
583 	/* disconnect */
584 	if (rx != ipsec->rx_esw)
585 		ipsec_rx_ft_disconnect(ipsec, family);
586 
587 	if (rx->chains) {
588 		ipsec_chains_destroy(rx->chains);
589 	} else {
590 		mlx5_del_flow_rules(rx->pol.rule);
591 		mlx5_destroy_flow_group(rx->pol.group);
592 		mlx5_destroy_flow_table(rx->ft.pol);
593 	}
594 
595 	mlx5_del_flow_rules(rx->sa.rule);
596 	mlx5_destroy_flow_group(rx->sa.group);
597 	mlx5_destroy_flow_table(rx->ft.sa);
598 	if (rx->allow_tunnel_mode)
599 		mlx5_eswitch_unblock_encap(mdev);
600 	mlx5_ipsec_rx_status_destroy(ipsec, rx);
601 	mlx5_destroy_flow_table(rx->ft.status);
602 
603 	mlx5_ipsec_fs_roce_rx_destroy(ipsec->roce, family, mdev);
604 }
605 
ipsec_rx_create_attr_set(struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_rx * rx,u32 family,struct mlx5e_ipsec_rx_create_attr * attr)606 static void ipsec_rx_create_attr_set(struct mlx5e_ipsec *ipsec,
607 				     struct mlx5e_ipsec_rx *rx,
608 				     u32 family,
609 				     struct mlx5e_ipsec_rx_create_attr *attr)
610 {
611 	if (rx == ipsec->rx_esw) {
612 		/* For packet offload in switchdev mode, RX & TX use FDB namespace */
613 		attr->ns = ipsec->tx_esw->ns;
614 		mlx5_esw_ipsec_rx_create_attr_set(ipsec, attr);
615 		return;
616 	}
617 
618 	attr->ns = mlx5e_fs_get_ns(ipsec->fs, false);
619 	attr->ttc = mlx5e_fs_get_ttc(ipsec->fs, false);
620 	attr->family = family;
621 	attr->prio = MLX5E_NIC_PRIO;
622 	attr->pol_level = MLX5E_ACCEL_FS_POL_FT_LEVEL;
623 	attr->sa_level = MLX5E_ACCEL_FS_ESP_FT_LEVEL;
624 	attr->status_level = MLX5E_ACCEL_FS_ESP_FT_ERR_LEVEL;
625 	attr->chains_ns = MLX5_FLOW_NAMESPACE_KERNEL;
626 }
627 
ipsec_rx_status_pass_dest_get(struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_rx * rx,struct mlx5e_ipsec_rx_create_attr * attr,struct mlx5_flow_destination * dest)628 static int ipsec_rx_status_pass_dest_get(struct mlx5e_ipsec *ipsec,
629 					 struct mlx5e_ipsec_rx *rx,
630 					 struct mlx5e_ipsec_rx_create_attr *attr,
631 					 struct mlx5_flow_destination *dest)
632 {
633 	struct mlx5_flow_table *ft;
634 	int err;
635 
636 	if (rx == ipsec->rx_esw)
637 		return mlx5_esw_ipsec_rx_status_pass_dest_get(ipsec, dest);
638 
639 	*dest = mlx5_ttc_get_default_dest(attr->ttc, family2tt(attr->family));
640 	err = mlx5_ipsec_fs_roce_rx_create(ipsec->mdev, ipsec->roce, attr->ns, dest,
641 					   attr->family, MLX5E_ACCEL_FS_ESP_FT_ROCE_LEVEL,
642 					   attr->prio);
643 	if (err)
644 		return err;
645 
646 	ft = mlx5_ipsec_fs_roce_ft_get(ipsec->roce, attr->family);
647 	if (ft) {
648 		dest->type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
649 		dest->ft = ft;
650 	}
651 
652 	return 0;
653 }
654 
ipsec_rx_ft_connect(struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_rx * rx,struct mlx5e_ipsec_rx_create_attr * attr)655 static void ipsec_rx_ft_connect(struct mlx5e_ipsec *ipsec,
656 				struct mlx5e_ipsec_rx *rx,
657 				struct mlx5e_ipsec_rx_create_attr *attr)
658 {
659 	struct mlx5_flow_destination dest = {};
660 
661 	dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
662 	dest.ft = rx->ft.pol;
663 	mlx5_ttc_fwd_dest(attr->ttc, family2tt(attr->family), &dest);
664 }
665 
rx_create(struct mlx5_core_dev * mdev,struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_rx * rx,u32 family)666 static int rx_create(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
667 		     struct mlx5e_ipsec_rx *rx, u32 family)
668 {
669 	struct mlx5e_ipsec_rx_create_attr attr;
670 	struct mlx5_flow_destination dest[2];
671 	struct mlx5_flow_table *ft;
672 	u32 flags = 0;
673 	int err;
674 
675 	ipsec_rx_create_attr_set(ipsec, rx, family, &attr);
676 
677 	err = ipsec_rx_status_pass_dest_get(ipsec, rx, &attr, &dest[0]);
678 	if (err)
679 		return err;
680 
681 	ft = ipsec_ft_create(attr.ns, attr.status_level, attr.prio, 3, 0);
682 	if (IS_ERR(ft)) {
683 		err = PTR_ERR(ft);
684 		goto err_fs_ft_status;
685 	}
686 	rx->ft.status = ft;
687 
688 	dest[1].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
689 	dest[1].counter = rx->fc->cnt;
690 	err = mlx5_ipsec_rx_status_create(ipsec, rx, dest);
691 	if (err)
692 		goto err_add;
693 
694 	/* Create FT */
695 	if (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_TUNNEL)
696 		rx->allow_tunnel_mode = mlx5_eswitch_block_encap(mdev);
697 	if (rx->allow_tunnel_mode)
698 		flags = MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
699 	ft = ipsec_ft_create(attr.ns, attr.sa_level, attr.prio, 2, flags);
700 	if (IS_ERR(ft)) {
701 		err = PTR_ERR(ft);
702 		goto err_fs_ft;
703 	}
704 	rx->ft.sa = ft;
705 
706 	err = ipsec_miss_create(mdev, rx->ft.sa, &rx->sa, dest);
707 	if (err)
708 		goto err_fs;
709 
710 	if (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_PRIO) {
711 		rx->chains = ipsec_chains_create(mdev, rx->ft.sa,
712 						 attr.chains_ns,
713 						 attr.prio,
714 						 attr.pol_level,
715 						 &rx->ft.pol);
716 		if (IS_ERR(rx->chains)) {
717 			err = PTR_ERR(rx->chains);
718 			goto err_pol_ft;
719 		}
720 
721 		goto connect;
722 	}
723 
724 	ft = ipsec_ft_create(attr.ns, attr.pol_level, attr.prio, 2, 0);
725 	if (IS_ERR(ft)) {
726 		err = PTR_ERR(ft);
727 		goto err_pol_ft;
728 	}
729 	rx->ft.pol = ft;
730 	memset(dest, 0x00, 2 * sizeof(*dest));
731 	dest[0].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
732 	dest[0].ft = rx->ft.sa;
733 	err = ipsec_miss_create(mdev, rx->ft.pol, &rx->pol, dest);
734 	if (err)
735 		goto err_pol_miss;
736 
737 connect:
738 	/* connect */
739 	if (rx != ipsec->rx_esw)
740 		ipsec_rx_ft_connect(ipsec, rx, &attr);
741 	return 0;
742 
743 err_pol_miss:
744 	mlx5_destroy_flow_table(rx->ft.pol);
745 err_pol_ft:
746 	mlx5_del_flow_rules(rx->sa.rule);
747 	mlx5_destroy_flow_group(rx->sa.group);
748 err_fs:
749 	mlx5_destroy_flow_table(rx->ft.sa);
750 err_fs_ft:
751 	if (rx->allow_tunnel_mode)
752 		mlx5_eswitch_unblock_encap(mdev);
753 	mlx5_ipsec_rx_status_destroy(ipsec, rx);
754 err_add:
755 	mlx5_destroy_flow_table(rx->ft.status);
756 err_fs_ft_status:
757 	mlx5_ipsec_fs_roce_rx_destroy(ipsec->roce, family, mdev);
758 	return err;
759 }
760 
rx_get(struct mlx5_core_dev * mdev,struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_rx * rx,u32 family)761 static int rx_get(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
762 		  struct mlx5e_ipsec_rx *rx, u32 family)
763 {
764 	int err;
765 
766 	if (rx->ft.refcnt)
767 		goto skip;
768 
769 	err = mlx5_eswitch_block_mode(mdev);
770 	if (err)
771 		return err;
772 
773 	err = rx_create(mdev, ipsec, rx, family);
774 	if (err) {
775 		mlx5_eswitch_unblock_mode(mdev);
776 		return err;
777 	}
778 
779 skip:
780 	rx->ft.refcnt++;
781 	return 0;
782 }
783 
rx_put(struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_rx * rx,u32 family)784 static void rx_put(struct mlx5e_ipsec *ipsec, struct mlx5e_ipsec_rx *rx,
785 		   u32 family)
786 {
787 	if (--rx->ft.refcnt)
788 		return;
789 
790 	rx_destroy(ipsec->mdev, ipsec, rx, family);
791 	mlx5_eswitch_unblock_mode(ipsec->mdev);
792 }
793 
rx_ft_get(struct mlx5_core_dev * mdev,struct mlx5e_ipsec * ipsec,u32 family,int type)794 static struct mlx5e_ipsec_rx *rx_ft_get(struct mlx5_core_dev *mdev,
795 					struct mlx5e_ipsec *ipsec, u32 family,
796 					int type)
797 {
798 	struct mlx5e_ipsec_rx *rx = ipsec_rx(ipsec, family, type);
799 	int err;
800 
801 	mutex_lock(&rx->ft.mutex);
802 	err = rx_get(mdev, ipsec, rx, family);
803 	mutex_unlock(&rx->ft.mutex);
804 	if (err)
805 		return ERR_PTR(err);
806 
807 	return rx;
808 }
809 
rx_ft_get_policy(struct mlx5_core_dev * mdev,struct mlx5e_ipsec * ipsec,u32 family,u32 prio,int type)810 static struct mlx5_flow_table *rx_ft_get_policy(struct mlx5_core_dev *mdev,
811 						struct mlx5e_ipsec *ipsec,
812 						u32 family, u32 prio, int type)
813 {
814 	struct mlx5e_ipsec_rx *rx = ipsec_rx(ipsec, family, type);
815 	struct mlx5_flow_table *ft;
816 	int err;
817 
818 	mutex_lock(&rx->ft.mutex);
819 	err = rx_get(mdev, ipsec, rx, family);
820 	if (err)
821 		goto err_get;
822 
823 	ft = rx->chains ? ipsec_chains_get_table(rx->chains, prio) : rx->ft.pol;
824 	if (IS_ERR(ft)) {
825 		err = PTR_ERR(ft);
826 		goto err_get_ft;
827 	}
828 
829 	mutex_unlock(&rx->ft.mutex);
830 	return ft;
831 
832 err_get_ft:
833 	rx_put(ipsec, rx, family);
834 err_get:
835 	mutex_unlock(&rx->ft.mutex);
836 	return ERR_PTR(err);
837 }
838 
rx_ft_put(struct mlx5e_ipsec * ipsec,u32 family,int type)839 static void rx_ft_put(struct mlx5e_ipsec *ipsec, u32 family, int type)
840 {
841 	struct mlx5e_ipsec_rx *rx = ipsec_rx(ipsec, family, type);
842 
843 	mutex_lock(&rx->ft.mutex);
844 	rx_put(ipsec, rx, family);
845 	mutex_unlock(&rx->ft.mutex);
846 }
847 
rx_ft_put_policy(struct mlx5e_ipsec * ipsec,u32 family,u32 prio,int type)848 static void rx_ft_put_policy(struct mlx5e_ipsec *ipsec, u32 family, u32 prio, int type)
849 {
850 	struct mlx5e_ipsec_rx *rx = ipsec_rx(ipsec, family, type);
851 
852 	mutex_lock(&rx->ft.mutex);
853 	if (rx->chains)
854 		ipsec_chains_put_table(rx->chains, prio);
855 
856 	rx_put(ipsec, rx, family);
857 	mutex_unlock(&rx->ft.mutex);
858 }
859 
ipsec_counter_rule_tx(struct mlx5_core_dev * mdev,struct mlx5e_ipsec_tx * tx)860 static int ipsec_counter_rule_tx(struct mlx5_core_dev *mdev, struct mlx5e_ipsec_tx *tx)
861 {
862 	struct mlx5_flow_destination dest = {};
863 	struct mlx5_flow_act flow_act = {};
864 	struct mlx5_flow_handle *fte;
865 	struct mlx5_flow_spec *spec;
866 	int err;
867 
868 	spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
869 	if (!spec)
870 		return -ENOMEM;
871 
872 	/* create fte */
873 	flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW |
874 			  MLX5_FLOW_CONTEXT_ACTION_COUNT;
875 	dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
876 	dest.counter = tx->fc->cnt;
877 	fte = mlx5_add_flow_rules(tx->ft.status, spec, &flow_act, &dest, 1);
878 	if (IS_ERR(fte)) {
879 		err = PTR_ERR(fte);
880 		mlx5_core_err(mdev, "Fail to add ipsec tx counter rule err=%d\n", err);
881 		goto err_rule;
882 	}
883 
884 	kvfree(spec);
885 	tx->status.rule = fte;
886 	return 0;
887 
888 err_rule:
889 	kvfree(spec);
890 	return err;
891 }
892 
893 /* IPsec TX flow steering */
tx_destroy(struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_tx * tx,struct mlx5_ipsec_fs * roce)894 static void tx_destroy(struct mlx5e_ipsec *ipsec, struct mlx5e_ipsec_tx *tx,
895 		       struct mlx5_ipsec_fs *roce)
896 {
897 	mlx5_ipsec_fs_roce_tx_destroy(roce, ipsec->mdev);
898 	if (tx->chains) {
899 		ipsec_chains_destroy(tx->chains);
900 	} else {
901 		mlx5_del_flow_rules(tx->pol.rule);
902 		mlx5_destroy_flow_group(tx->pol.group);
903 		mlx5_destroy_flow_table(tx->ft.pol);
904 	}
905 
906 	if (tx == ipsec->tx_esw) {
907 		mlx5_del_flow_rules(tx->sa.rule);
908 		mlx5_destroy_flow_group(tx->sa.group);
909 	}
910 	mlx5_destroy_flow_table(tx->ft.sa);
911 	if (tx->allow_tunnel_mode)
912 		mlx5_eswitch_unblock_encap(ipsec->mdev);
913 	mlx5_del_flow_rules(tx->status.rule);
914 	mlx5_destroy_flow_table(tx->ft.status);
915 }
916 
ipsec_tx_create_attr_set(struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_tx * tx,struct mlx5e_ipsec_tx_create_attr * attr)917 static void ipsec_tx_create_attr_set(struct mlx5e_ipsec *ipsec,
918 				     struct mlx5e_ipsec_tx *tx,
919 				     struct mlx5e_ipsec_tx_create_attr *attr)
920 {
921 	if (tx == ipsec->tx_esw) {
922 		mlx5_esw_ipsec_tx_create_attr_set(ipsec, attr);
923 		return;
924 	}
925 
926 	attr->prio = 0;
927 	attr->pol_level = 0;
928 	attr->sa_level = 1;
929 	attr->cnt_level = 2;
930 	attr->chains_ns = MLX5_FLOW_NAMESPACE_EGRESS_IPSEC;
931 }
932 
tx_create(struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_tx * tx,struct mlx5_ipsec_fs * roce)933 static int tx_create(struct mlx5e_ipsec *ipsec, struct mlx5e_ipsec_tx *tx,
934 		     struct mlx5_ipsec_fs *roce)
935 {
936 	struct mlx5_core_dev *mdev = ipsec->mdev;
937 	struct mlx5e_ipsec_tx_create_attr attr;
938 	struct mlx5_flow_destination dest = {};
939 	struct mlx5_flow_table *ft;
940 	u32 flags = 0;
941 	int err;
942 
943 	ipsec_tx_create_attr_set(ipsec, tx, &attr);
944 	ft = ipsec_ft_create(tx->ns, attr.cnt_level, attr.prio, 1, 0);
945 	if (IS_ERR(ft))
946 		return PTR_ERR(ft);
947 	tx->ft.status = ft;
948 
949 	err = ipsec_counter_rule_tx(mdev, tx);
950 	if (err)
951 		goto err_status_rule;
952 
953 	if (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_TUNNEL)
954 		tx->allow_tunnel_mode = mlx5_eswitch_block_encap(mdev);
955 	if (tx->allow_tunnel_mode)
956 		flags = MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
957 	ft = ipsec_ft_create(tx->ns, attr.sa_level, attr.prio, 4, flags);
958 	if (IS_ERR(ft)) {
959 		err = PTR_ERR(ft);
960 		goto err_sa_ft;
961 	}
962 	tx->ft.sa = ft;
963 
964 	if (tx == ipsec->tx_esw) {
965 		dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
966 		dest.vport.num = MLX5_VPORT_UPLINK;
967 		err = ipsec_miss_create(mdev, tx->ft.sa, &tx->sa, &dest);
968 		if (err)
969 			goto err_sa_miss;
970 		memset(&dest, 0, sizeof(dest));
971 	}
972 
973 	if (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_PRIO) {
974 		tx->chains = ipsec_chains_create(
975 			mdev, tx->ft.sa, attr.chains_ns, attr.prio, attr.pol_level,
976 			&tx->ft.pol);
977 		if (IS_ERR(tx->chains)) {
978 			err = PTR_ERR(tx->chains);
979 			goto err_pol_ft;
980 		}
981 
982 		goto connect_roce;
983 	}
984 
985 	ft = ipsec_ft_create(tx->ns, attr.pol_level, attr.prio, 2, 0);
986 	if (IS_ERR(ft)) {
987 		err = PTR_ERR(ft);
988 		goto err_pol_ft;
989 	}
990 	tx->ft.pol = ft;
991 	dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
992 	dest.ft = tx->ft.sa;
993 	err = ipsec_miss_create(mdev, tx->ft.pol, &tx->pol, &dest);
994 	if (err) {
995 		mlx5_destroy_flow_table(tx->ft.pol);
996 		goto err_pol_ft;
997 	}
998 
999 connect_roce:
1000 	err = mlx5_ipsec_fs_roce_tx_create(mdev, roce, tx->ft.pol, false);
1001 	if (err)
1002 		goto err_roce;
1003 	return 0;
1004 
1005 err_roce:
1006 	if (tx->chains) {
1007 		ipsec_chains_destroy(tx->chains);
1008 	} else {
1009 		mlx5_del_flow_rules(tx->pol.rule);
1010 		mlx5_destroy_flow_group(tx->pol.group);
1011 		mlx5_destroy_flow_table(tx->ft.pol);
1012 	}
1013 err_pol_ft:
1014 	if (tx == ipsec->tx_esw) {
1015 		mlx5_del_flow_rules(tx->sa.rule);
1016 		mlx5_destroy_flow_group(tx->sa.group);
1017 	}
1018 err_sa_miss:
1019 	mlx5_destroy_flow_table(tx->ft.sa);
1020 err_sa_ft:
1021 	if (tx->allow_tunnel_mode)
1022 		mlx5_eswitch_unblock_encap(mdev);
1023 	mlx5_del_flow_rules(tx->status.rule);
1024 err_status_rule:
1025 	mlx5_destroy_flow_table(tx->ft.status);
1026 	return err;
1027 }
1028 
ipsec_esw_tx_ft_policy_set(struct mlx5_core_dev * mdev,struct mlx5_flow_table * ft)1029 static void ipsec_esw_tx_ft_policy_set(struct mlx5_core_dev *mdev,
1030 				       struct mlx5_flow_table *ft)
1031 {
1032 #ifdef CONFIG_MLX5_ESWITCH
1033 	struct mlx5_eswitch *esw = mdev->priv.eswitch;
1034 	struct mlx5e_rep_priv *uplink_rpriv;
1035 	struct mlx5e_priv *priv;
1036 
1037 	esw->offloads.ft_ipsec_tx_pol = ft;
1038 	uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
1039 	priv = netdev_priv(uplink_rpriv->netdev);
1040 	if (!priv->channels.num)
1041 		return;
1042 
1043 	mlx5e_rep_deactivate_channels(priv);
1044 	mlx5e_rep_activate_channels(priv);
1045 #endif
1046 }
1047 
tx_get(struct mlx5_core_dev * mdev,struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_tx * tx)1048 static int tx_get(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
1049 		  struct mlx5e_ipsec_tx *tx)
1050 {
1051 	int err;
1052 
1053 	if (tx->ft.refcnt)
1054 		goto skip;
1055 
1056 	err = mlx5_eswitch_block_mode(mdev);
1057 	if (err)
1058 		return err;
1059 
1060 	err = tx_create(ipsec, tx, ipsec->roce);
1061 	if (err) {
1062 		mlx5_eswitch_unblock_mode(mdev);
1063 		return err;
1064 	}
1065 
1066 	if (tx == ipsec->tx_esw)
1067 		ipsec_esw_tx_ft_policy_set(mdev, tx->ft.pol);
1068 
1069 skip:
1070 	tx->ft.refcnt++;
1071 	return 0;
1072 }
1073 
tx_put(struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_tx * tx)1074 static void tx_put(struct mlx5e_ipsec *ipsec, struct mlx5e_ipsec_tx *tx)
1075 {
1076 	if (--tx->ft.refcnt)
1077 		return;
1078 
1079 	if (tx == ipsec->tx_esw) {
1080 		mlx5_esw_ipsec_restore_dest_uplink(ipsec->mdev);
1081 		ipsec_esw_tx_ft_policy_set(ipsec->mdev, NULL);
1082 	}
1083 
1084 	tx_destroy(ipsec, tx, ipsec->roce);
1085 	mlx5_eswitch_unblock_mode(ipsec->mdev);
1086 }
1087 
tx_ft_get_policy(struct mlx5_core_dev * mdev,struct mlx5e_ipsec * ipsec,u32 prio,int type)1088 static struct mlx5_flow_table *tx_ft_get_policy(struct mlx5_core_dev *mdev,
1089 						struct mlx5e_ipsec *ipsec,
1090 						u32 prio, int type)
1091 {
1092 	struct mlx5e_ipsec_tx *tx = ipsec_tx(ipsec, type);
1093 	struct mlx5_flow_table *ft;
1094 	int err;
1095 
1096 	mutex_lock(&tx->ft.mutex);
1097 	err = tx_get(mdev, ipsec, tx);
1098 	if (err)
1099 		goto err_get;
1100 
1101 	ft = tx->chains ? ipsec_chains_get_table(tx->chains, prio) : tx->ft.pol;
1102 	if (IS_ERR(ft)) {
1103 		err = PTR_ERR(ft);
1104 		goto err_get_ft;
1105 	}
1106 
1107 	mutex_unlock(&tx->ft.mutex);
1108 	return ft;
1109 
1110 err_get_ft:
1111 	tx_put(ipsec, tx);
1112 err_get:
1113 	mutex_unlock(&tx->ft.mutex);
1114 	return ERR_PTR(err);
1115 }
1116 
tx_ft_get(struct mlx5_core_dev * mdev,struct mlx5e_ipsec * ipsec,int type)1117 static struct mlx5e_ipsec_tx *tx_ft_get(struct mlx5_core_dev *mdev,
1118 					struct mlx5e_ipsec *ipsec, int type)
1119 {
1120 	struct mlx5e_ipsec_tx *tx = ipsec_tx(ipsec, type);
1121 	int err;
1122 
1123 	mutex_lock(&tx->ft.mutex);
1124 	err = tx_get(mdev, ipsec, tx);
1125 	mutex_unlock(&tx->ft.mutex);
1126 	if (err)
1127 		return ERR_PTR(err);
1128 
1129 	return tx;
1130 }
1131 
tx_ft_put(struct mlx5e_ipsec * ipsec,int type)1132 static void tx_ft_put(struct mlx5e_ipsec *ipsec, int type)
1133 {
1134 	struct mlx5e_ipsec_tx *tx = ipsec_tx(ipsec, type);
1135 
1136 	mutex_lock(&tx->ft.mutex);
1137 	tx_put(ipsec, tx);
1138 	mutex_unlock(&tx->ft.mutex);
1139 }
1140 
tx_ft_put_policy(struct mlx5e_ipsec * ipsec,u32 prio,int type)1141 static void tx_ft_put_policy(struct mlx5e_ipsec *ipsec, u32 prio, int type)
1142 {
1143 	struct mlx5e_ipsec_tx *tx = ipsec_tx(ipsec, type);
1144 
1145 	mutex_lock(&tx->ft.mutex);
1146 	if (tx->chains)
1147 		ipsec_chains_put_table(tx->chains, prio);
1148 
1149 	tx_put(ipsec, tx);
1150 	mutex_unlock(&tx->ft.mutex);
1151 }
1152 
setup_fte_addr4(struct mlx5_flow_spec * spec,__be32 * saddr,__be32 * daddr)1153 static void setup_fte_addr4(struct mlx5_flow_spec *spec, __be32 *saddr,
1154 			    __be32 *daddr)
1155 {
1156 	if (!*saddr && !*daddr)
1157 		return;
1158 
1159 	spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
1160 
1161 	MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_version);
1162 	MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_version, 4);
1163 
1164 	if (*saddr) {
1165 		memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
1166 				    outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4), saddr, 4);
1167 		MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
1168 				 outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4);
1169 	}
1170 
1171 	if (*daddr) {
1172 		memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
1173 				    outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4), daddr, 4);
1174 		MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
1175 				 outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
1176 	}
1177 }
1178 
setup_fte_addr6(struct mlx5_flow_spec * spec,__be32 * saddr,__be32 * daddr)1179 static void setup_fte_addr6(struct mlx5_flow_spec *spec, __be32 *saddr,
1180 			    __be32 *daddr)
1181 {
1182 	if (addr6_all_zero(saddr) && addr6_all_zero(daddr))
1183 		return;
1184 
1185 	spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
1186 
1187 	MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_version);
1188 	MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_version, 6);
1189 
1190 	if (!addr6_all_zero(saddr)) {
1191 		memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
1192 				    outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6), saddr, 16);
1193 		memset(MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1194 				    outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6), 0xff, 16);
1195 	}
1196 
1197 	if (!addr6_all_zero(daddr)) {
1198 		memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
1199 				    outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6), daddr, 16);
1200 		memset(MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1201 				    outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6), 0xff, 16);
1202 	}
1203 }
1204 
setup_fte_esp(struct mlx5_flow_spec * spec)1205 static void setup_fte_esp(struct mlx5_flow_spec *spec)
1206 {
1207 	/* ESP header */
1208 	spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
1209 
1210 	MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_protocol);
1211 	MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_protocol, IPPROTO_ESP);
1212 }
1213 
setup_fte_spi(struct mlx5_flow_spec * spec,u32 spi,bool encap)1214 static void setup_fte_spi(struct mlx5_flow_spec *spec, u32 spi, bool encap)
1215 {
1216 	/* SPI number */
1217 	spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
1218 
1219 	if (encap) {
1220 		MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
1221 				 misc_parameters.inner_esp_spi);
1222 		MLX5_SET(fte_match_param, spec->match_value,
1223 			 misc_parameters.inner_esp_spi, spi);
1224 	} else {
1225 		MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
1226 				 misc_parameters.outer_esp_spi);
1227 		MLX5_SET(fte_match_param, spec->match_value,
1228 			 misc_parameters.outer_esp_spi, spi);
1229 	}
1230 }
1231 
setup_fte_no_frags(struct mlx5_flow_spec * spec)1232 static void setup_fte_no_frags(struct mlx5_flow_spec *spec)
1233 {
1234 	/* Non fragmented */
1235 	spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
1236 
1237 	MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.frag);
1238 	MLX5_SET(fte_match_param, spec->match_value, outer_headers.frag, 0);
1239 }
1240 
setup_fte_reg_a(struct mlx5_flow_spec * spec)1241 static void setup_fte_reg_a(struct mlx5_flow_spec *spec)
1242 {
1243 	/* Add IPsec indicator in metadata_reg_a */
1244 	spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
1245 
1246 	MLX5_SET(fte_match_param, spec->match_criteria,
1247 		 misc_parameters_2.metadata_reg_a, MLX5_ETH_WQE_FT_META_IPSEC);
1248 	MLX5_SET(fte_match_param, spec->match_value,
1249 		 misc_parameters_2.metadata_reg_a, MLX5_ETH_WQE_FT_META_IPSEC);
1250 }
1251 
setup_fte_reg_c4(struct mlx5_flow_spec * spec,u32 reqid)1252 static void setup_fte_reg_c4(struct mlx5_flow_spec *spec, u32 reqid)
1253 {
1254 	/* Pass policy check before choosing this SA */
1255 	spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
1256 
1257 	MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
1258 			 misc_parameters_2.metadata_reg_c_4);
1259 	MLX5_SET(fte_match_param, spec->match_value,
1260 		 misc_parameters_2.metadata_reg_c_4, reqid);
1261 }
1262 
setup_fte_upper_proto_match(struct mlx5_flow_spec * spec,struct upspec * upspec)1263 static void setup_fte_upper_proto_match(struct mlx5_flow_spec *spec, struct upspec *upspec)
1264 {
1265 	switch (upspec->proto) {
1266 	case IPPROTO_UDP:
1267 		if (upspec->dport) {
1268 			MLX5_SET(fte_match_set_lyr_2_4, spec->match_criteria,
1269 				 udp_dport, upspec->dport_mask);
1270 			MLX5_SET(fte_match_set_lyr_2_4, spec->match_value,
1271 				 udp_dport, upspec->dport);
1272 		}
1273 		if (upspec->sport) {
1274 			MLX5_SET(fte_match_set_lyr_2_4, spec->match_criteria,
1275 				 udp_sport, upspec->sport_mask);
1276 			MLX5_SET(fte_match_set_lyr_2_4, spec->match_value,
1277 				 udp_sport, upspec->sport);
1278 		}
1279 		break;
1280 	case IPPROTO_TCP:
1281 		if (upspec->dport) {
1282 			MLX5_SET(fte_match_set_lyr_2_4, spec->match_criteria,
1283 				 tcp_dport, upspec->dport_mask);
1284 			MLX5_SET(fte_match_set_lyr_2_4, spec->match_value,
1285 				 tcp_dport, upspec->dport);
1286 		}
1287 		if (upspec->sport) {
1288 			MLX5_SET(fte_match_set_lyr_2_4, spec->match_criteria,
1289 				 tcp_sport, upspec->sport_mask);
1290 			MLX5_SET(fte_match_set_lyr_2_4, spec->match_value,
1291 				 tcp_sport, upspec->sport);
1292 		}
1293 		break;
1294 	default:
1295 		return;
1296 	}
1297 
1298 	spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
1299 	MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, spec->match_criteria, ip_protocol);
1300 	MLX5_SET(fte_match_set_lyr_2_4, spec->match_value, ip_protocol, upspec->proto);
1301 }
1302 
ipsec_fs_get_ns(struct mlx5e_ipsec * ipsec,int type,u8 dir)1303 static enum mlx5_flow_namespace_type ipsec_fs_get_ns(struct mlx5e_ipsec *ipsec,
1304 						     int type, u8 dir)
1305 {
1306 	if (ipsec->is_uplink_rep && type == XFRM_DEV_OFFLOAD_PACKET)
1307 		return MLX5_FLOW_NAMESPACE_FDB;
1308 
1309 	if (dir == XFRM_DEV_OFFLOAD_IN)
1310 		return MLX5_FLOW_NAMESPACE_KERNEL;
1311 
1312 	return MLX5_FLOW_NAMESPACE_EGRESS;
1313 }
1314 
setup_modify_header(struct mlx5e_ipsec * ipsec,int type,u32 val,u8 dir,struct mlx5_flow_act * flow_act)1315 static int setup_modify_header(struct mlx5e_ipsec *ipsec, int type, u32 val, u8 dir,
1316 			       struct mlx5_flow_act *flow_act)
1317 {
1318 	enum mlx5_flow_namespace_type ns_type = ipsec_fs_get_ns(ipsec, type, dir);
1319 	u8 action[3][MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {};
1320 	struct mlx5_core_dev *mdev = ipsec->mdev;
1321 	struct mlx5_modify_hdr *modify_hdr;
1322 	u8 num_of_actions = 1;
1323 
1324 	MLX5_SET(set_action_in, action[0], action_type, MLX5_ACTION_TYPE_SET);
1325 	switch (dir) {
1326 	case XFRM_DEV_OFFLOAD_IN:
1327 		MLX5_SET(set_action_in, action[0], field,
1328 			 MLX5_ACTION_IN_FIELD_METADATA_REG_B);
1329 
1330 		num_of_actions++;
1331 		MLX5_SET(set_action_in, action[1], action_type, MLX5_ACTION_TYPE_SET);
1332 		MLX5_SET(set_action_in, action[1], field, MLX5_ACTION_IN_FIELD_METADATA_REG_C_2);
1333 		MLX5_SET(set_action_in, action[1], data, val);
1334 		MLX5_SET(set_action_in, action[1], offset, 0);
1335 		MLX5_SET(set_action_in, action[1], length, 32);
1336 
1337 		if (type == XFRM_DEV_OFFLOAD_CRYPTO) {
1338 			num_of_actions++;
1339 			MLX5_SET(set_action_in, action[2], action_type,
1340 				 MLX5_ACTION_TYPE_SET);
1341 			MLX5_SET(set_action_in, action[2], field,
1342 				 MLX5_ACTION_IN_FIELD_METADATA_REG_C_4);
1343 			MLX5_SET(set_action_in, action[2], data, 0);
1344 			MLX5_SET(set_action_in, action[2], offset, 0);
1345 			MLX5_SET(set_action_in, action[2], length, 32);
1346 		}
1347 		break;
1348 	case XFRM_DEV_OFFLOAD_OUT:
1349 		MLX5_SET(set_action_in, action[0], field,
1350 			 MLX5_ACTION_IN_FIELD_METADATA_REG_C_4);
1351 		break;
1352 	default:
1353 		return -EINVAL;
1354 	}
1355 
1356 	MLX5_SET(set_action_in, action[0], data, val);
1357 	MLX5_SET(set_action_in, action[0], offset, 0);
1358 	MLX5_SET(set_action_in, action[0], length, 32);
1359 
1360 	modify_hdr = mlx5_modify_header_alloc(mdev, ns_type, num_of_actions, action);
1361 	if (IS_ERR(modify_hdr)) {
1362 		mlx5_core_err(mdev, "Failed to allocate modify_header %ld\n",
1363 			      PTR_ERR(modify_hdr));
1364 		return PTR_ERR(modify_hdr);
1365 	}
1366 
1367 	flow_act->modify_hdr = modify_hdr;
1368 	flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
1369 	return 0;
1370 }
1371 
1372 static int
setup_pkt_tunnel_reformat(struct mlx5_core_dev * mdev,struct mlx5_accel_esp_xfrm_attrs * attrs,struct mlx5_pkt_reformat_params * reformat_params)1373 setup_pkt_tunnel_reformat(struct mlx5_core_dev *mdev,
1374 			  struct mlx5_accel_esp_xfrm_attrs *attrs,
1375 			  struct mlx5_pkt_reformat_params *reformat_params)
1376 {
1377 	struct ip_esp_hdr *esp_hdr;
1378 	struct ipv6hdr *ipv6hdr;
1379 	struct ethhdr *eth_hdr;
1380 	struct iphdr *iphdr;
1381 	char *reformatbf;
1382 	size_t bfflen;
1383 	void *hdr;
1384 
1385 	bfflen = sizeof(*eth_hdr);
1386 
1387 	if (attrs->dir == XFRM_DEV_OFFLOAD_OUT) {
1388 		bfflen += sizeof(*esp_hdr) + 8;
1389 
1390 		switch (attrs->family) {
1391 		case AF_INET:
1392 			bfflen += sizeof(*iphdr);
1393 			break;
1394 		case AF_INET6:
1395 			bfflen += sizeof(*ipv6hdr);
1396 			break;
1397 		default:
1398 			return -EINVAL;
1399 		}
1400 	}
1401 
1402 	reformatbf = kzalloc(bfflen, GFP_KERNEL);
1403 	if (!reformatbf)
1404 		return -ENOMEM;
1405 
1406 	eth_hdr = (struct ethhdr *)reformatbf;
1407 	switch (attrs->family) {
1408 	case AF_INET:
1409 		eth_hdr->h_proto = htons(ETH_P_IP);
1410 		break;
1411 	case AF_INET6:
1412 		eth_hdr->h_proto = htons(ETH_P_IPV6);
1413 		break;
1414 	default:
1415 		goto free_reformatbf;
1416 	}
1417 
1418 	ether_addr_copy(eth_hdr->h_dest, attrs->dmac);
1419 	ether_addr_copy(eth_hdr->h_source, attrs->smac);
1420 
1421 	switch (attrs->dir) {
1422 	case XFRM_DEV_OFFLOAD_IN:
1423 		reformat_params->type = MLX5_REFORMAT_TYPE_L3_ESP_TUNNEL_TO_L2;
1424 		break;
1425 	case XFRM_DEV_OFFLOAD_OUT:
1426 		reformat_params->type = MLX5_REFORMAT_TYPE_L2_TO_L3_ESP_TUNNEL;
1427 		reformat_params->param_0 = attrs->authsize;
1428 
1429 		hdr = reformatbf + sizeof(*eth_hdr);
1430 		switch (attrs->family) {
1431 		case AF_INET:
1432 			iphdr = (struct iphdr *)hdr;
1433 			memcpy(&iphdr->saddr, &attrs->saddr.a4, 4);
1434 			memcpy(&iphdr->daddr, &attrs->daddr.a4, 4);
1435 			iphdr->version = 4;
1436 			iphdr->ihl = 5;
1437 			iphdr->ttl = IPSEC_TUNNEL_DEFAULT_TTL;
1438 			iphdr->protocol = IPPROTO_ESP;
1439 			hdr += sizeof(*iphdr);
1440 			break;
1441 		case AF_INET6:
1442 			ipv6hdr = (struct ipv6hdr *)hdr;
1443 			memcpy(&ipv6hdr->saddr, &attrs->saddr.a6, 16);
1444 			memcpy(&ipv6hdr->daddr, &attrs->daddr.a6, 16);
1445 			ipv6hdr->nexthdr = IPPROTO_ESP;
1446 			ipv6hdr->version = 6;
1447 			ipv6hdr->hop_limit = IPSEC_TUNNEL_DEFAULT_TTL;
1448 			hdr += sizeof(*ipv6hdr);
1449 			break;
1450 		default:
1451 			goto free_reformatbf;
1452 		}
1453 
1454 		esp_hdr = (struct ip_esp_hdr *)hdr;
1455 		esp_hdr->spi = htonl(attrs->spi);
1456 		break;
1457 	default:
1458 		goto free_reformatbf;
1459 	}
1460 
1461 	reformat_params->size = bfflen;
1462 	reformat_params->data = reformatbf;
1463 	return 0;
1464 
1465 free_reformatbf:
1466 	kfree(reformatbf);
1467 	return -EINVAL;
1468 }
1469 
get_reformat_type(struct mlx5_accel_esp_xfrm_attrs * attrs)1470 static int get_reformat_type(struct mlx5_accel_esp_xfrm_attrs *attrs)
1471 {
1472 	switch (attrs->dir) {
1473 	case XFRM_DEV_OFFLOAD_IN:
1474 		if (attrs->encap)
1475 			return MLX5_REFORMAT_TYPE_DEL_ESP_TRANSPORT_OVER_UDP;
1476 		return MLX5_REFORMAT_TYPE_DEL_ESP_TRANSPORT;
1477 	case XFRM_DEV_OFFLOAD_OUT:
1478 		if (attrs->family == AF_INET) {
1479 			if (attrs->encap)
1480 				return MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_OVER_UDPV4;
1481 			return MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_OVER_IPV4;
1482 		}
1483 
1484 		if (attrs->encap)
1485 			return MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_OVER_UDPV6;
1486 		return MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_OVER_IPV6;
1487 	default:
1488 		WARN_ON(true);
1489 	}
1490 
1491 	return -EINVAL;
1492 }
1493 
1494 static int
setup_pkt_transport_reformat(struct mlx5_accel_esp_xfrm_attrs * attrs,struct mlx5_pkt_reformat_params * reformat_params)1495 setup_pkt_transport_reformat(struct mlx5_accel_esp_xfrm_attrs *attrs,
1496 			     struct mlx5_pkt_reformat_params *reformat_params)
1497 {
1498 	struct udphdr *udphdr;
1499 	char *reformatbf;
1500 	size_t bfflen;
1501 	__be32 spi;
1502 	void *hdr;
1503 
1504 	reformat_params->type = get_reformat_type(attrs);
1505 	if (reformat_params->type < 0)
1506 		return reformat_params->type;
1507 
1508 	switch (attrs->dir) {
1509 	case XFRM_DEV_OFFLOAD_IN:
1510 		break;
1511 	case XFRM_DEV_OFFLOAD_OUT:
1512 		bfflen = MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_SIZE;
1513 		if (attrs->encap)
1514 			bfflen += sizeof(*udphdr);
1515 
1516 		reformatbf = kzalloc(bfflen, GFP_KERNEL);
1517 		if (!reformatbf)
1518 			return -ENOMEM;
1519 
1520 		hdr = reformatbf;
1521 		if (attrs->encap) {
1522 			udphdr = (struct udphdr *)reformatbf;
1523 			udphdr->source = attrs->sport;
1524 			udphdr->dest = attrs->dport;
1525 			hdr += sizeof(*udphdr);
1526 		}
1527 
1528 		/* convert to network format */
1529 		spi = htonl(attrs->spi);
1530 		memcpy(hdr, &spi, sizeof(spi));
1531 
1532 		reformat_params->param_0 = attrs->authsize;
1533 		reformat_params->size = bfflen;
1534 		reformat_params->data = reformatbf;
1535 		break;
1536 	default:
1537 		return -EINVAL;
1538 	}
1539 
1540 	return 0;
1541 }
1542 
setup_pkt_reformat(struct mlx5e_ipsec * ipsec,struct mlx5_accel_esp_xfrm_attrs * attrs,struct mlx5_flow_act * flow_act)1543 static int setup_pkt_reformat(struct mlx5e_ipsec *ipsec,
1544 			      struct mlx5_accel_esp_xfrm_attrs *attrs,
1545 			      struct mlx5_flow_act *flow_act)
1546 {
1547 	enum mlx5_flow_namespace_type ns_type = ipsec_fs_get_ns(ipsec, attrs->type,
1548 								attrs->dir);
1549 	struct mlx5_pkt_reformat_params reformat_params = {};
1550 	struct mlx5_core_dev *mdev = ipsec->mdev;
1551 	struct mlx5_pkt_reformat *pkt_reformat;
1552 	int ret;
1553 
1554 	switch (attrs->mode) {
1555 	case XFRM_MODE_TRANSPORT:
1556 		ret = setup_pkt_transport_reformat(attrs, &reformat_params);
1557 		break;
1558 	case XFRM_MODE_TUNNEL:
1559 		ret = setup_pkt_tunnel_reformat(mdev, attrs, &reformat_params);
1560 		break;
1561 	default:
1562 		ret = -EINVAL;
1563 	}
1564 
1565 	if (ret)
1566 		return ret;
1567 
1568 	pkt_reformat =
1569 		mlx5_packet_reformat_alloc(mdev, &reformat_params, ns_type);
1570 	kfree(reformat_params.data);
1571 	if (IS_ERR(pkt_reformat))
1572 		return PTR_ERR(pkt_reformat);
1573 
1574 	flow_act->pkt_reformat = pkt_reformat;
1575 	flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
1576 	return 0;
1577 }
1578 
rx_add_rule(struct mlx5e_ipsec_sa_entry * sa_entry)1579 static int rx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
1580 {
1581 	struct mlx5_accel_esp_xfrm_attrs *attrs = &sa_entry->attrs;
1582 	struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry);
1583 	struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
1584 	struct mlx5_flow_destination dest[2];
1585 	struct mlx5_flow_act flow_act = {};
1586 	struct mlx5_flow_handle *rule;
1587 	struct mlx5_flow_spec *spec;
1588 	struct mlx5e_ipsec_rx *rx;
1589 	struct mlx5_fc *counter;
1590 	int err = 0;
1591 
1592 	rx = rx_ft_get(mdev, ipsec, attrs->family, attrs->type);
1593 	if (IS_ERR(rx))
1594 		return PTR_ERR(rx);
1595 
1596 	spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
1597 	if (!spec) {
1598 		err = -ENOMEM;
1599 		goto err_alloc;
1600 	}
1601 
1602 	if (attrs->family == AF_INET)
1603 		setup_fte_addr4(spec, &attrs->saddr.a4, &attrs->daddr.a4);
1604 	else
1605 		setup_fte_addr6(spec, attrs->saddr.a6, attrs->daddr.a6);
1606 
1607 	setup_fte_spi(spec, attrs->spi, attrs->encap);
1608 	if (!attrs->encap)
1609 		setup_fte_esp(spec);
1610 	setup_fte_no_frags(spec);
1611 	setup_fte_upper_proto_match(spec, &attrs->upspec);
1612 
1613 	if (!attrs->drop) {
1614 		if (rx != ipsec->rx_esw)
1615 			err = setup_modify_header(ipsec, attrs->type,
1616 						  sa_entry->ipsec_obj_id | BIT(31),
1617 						  XFRM_DEV_OFFLOAD_IN, &flow_act);
1618 		else
1619 			err = mlx5_esw_ipsec_rx_setup_modify_header(sa_entry, &flow_act);
1620 
1621 		if (err)
1622 			goto err_mod_header;
1623 	}
1624 
1625 	switch (attrs->type) {
1626 	case XFRM_DEV_OFFLOAD_PACKET:
1627 		err = setup_pkt_reformat(ipsec, attrs, &flow_act);
1628 		if (err)
1629 			goto err_pkt_reformat;
1630 		break;
1631 	default:
1632 		break;
1633 	}
1634 
1635 	counter = mlx5_fc_create(mdev, true);
1636 	if (IS_ERR(counter)) {
1637 		err = PTR_ERR(counter);
1638 		goto err_add_cnt;
1639 	}
1640 	flow_act.crypto.type = MLX5_FLOW_CONTEXT_ENCRYPT_DECRYPT_TYPE_IPSEC;
1641 	flow_act.crypto.obj_id = sa_entry->ipsec_obj_id;
1642 	flow_act.flags |= FLOW_ACT_NO_APPEND;
1643 	flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_CRYPTO_DECRYPT |
1644 			   MLX5_FLOW_CONTEXT_ACTION_COUNT;
1645 	if (attrs->drop)
1646 		flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
1647 	else
1648 		flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1649 	dest[0].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
1650 	dest[0].ft = rx->ft.status;
1651 	dest[1].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
1652 	dest[1].counter = counter;
1653 	rule = mlx5_add_flow_rules(rx->ft.sa, spec, &flow_act, dest, 2);
1654 	if (IS_ERR(rule)) {
1655 		err = PTR_ERR(rule);
1656 		mlx5_core_err(mdev, "fail to add RX ipsec rule err=%d\n", err);
1657 		goto err_add_flow;
1658 	}
1659 	if (attrs->type == XFRM_DEV_OFFLOAD_PACKET)
1660 		err = rx_add_rule_drop_replay(sa_entry, rx);
1661 	if (err)
1662 		goto err_add_replay;
1663 
1664 	err = rx_add_rule_drop_auth_trailer(sa_entry, rx);
1665 	if (err)
1666 		goto err_drop_reason;
1667 
1668 	kvfree(spec);
1669 
1670 	sa_entry->ipsec_rule.rule = rule;
1671 	sa_entry->ipsec_rule.modify_hdr = flow_act.modify_hdr;
1672 	sa_entry->ipsec_rule.fc = counter;
1673 	sa_entry->ipsec_rule.pkt_reformat = flow_act.pkt_reformat;
1674 	return 0;
1675 
1676 err_drop_reason:
1677 	if (sa_entry->ipsec_rule.replay.rule) {
1678 		mlx5_del_flow_rules(sa_entry->ipsec_rule.replay.rule);
1679 		mlx5_fc_destroy(mdev, sa_entry->ipsec_rule.replay.fc);
1680 	}
1681 err_add_replay:
1682 	mlx5_del_flow_rules(rule);
1683 err_add_flow:
1684 	mlx5_fc_destroy(mdev, counter);
1685 err_add_cnt:
1686 	if (flow_act.pkt_reformat)
1687 		mlx5_packet_reformat_dealloc(mdev, flow_act.pkt_reformat);
1688 err_pkt_reformat:
1689 	if (flow_act.modify_hdr)
1690 		mlx5_modify_header_dealloc(mdev, flow_act.modify_hdr);
1691 err_mod_header:
1692 	kvfree(spec);
1693 err_alloc:
1694 	rx_ft_put(ipsec, attrs->family, attrs->type);
1695 	return err;
1696 }
1697 
tx_add_rule(struct mlx5e_ipsec_sa_entry * sa_entry)1698 static int tx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
1699 {
1700 	struct mlx5_accel_esp_xfrm_attrs *attrs = &sa_entry->attrs;
1701 	struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry);
1702 	struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
1703 	struct mlx5_flow_destination dest[2];
1704 	struct mlx5_flow_act flow_act = {};
1705 	struct mlx5_flow_handle *rule;
1706 	struct mlx5_flow_spec *spec;
1707 	struct mlx5e_ipsec_tx *tx;
1708 	struct mlx5_fc *counter;
1709 	int err;
1710 
1711 	tx = tx_ft_get(mdev, ipsec, attrs->type);
1712 	if (IS_ERR(tx))
1713 		return PTR_ERR(tx);
1714 
1715 	spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
1716 	if (!spec) {
1717 		err = -ENOMEM;
1718 		goto err_alloc;
1719 	}
1720 
1721 	setup_fte_no_frags(spec);
1722 	setup_fte_upper_proto_match(spec, &attrs->upspec);
1723 
1724 	switch (attrs->type) {
1725 	case XFRM_DEV_OFFLOAD_CRYPTO:
1726 		if (attrs->family == AF_INET)
1727 			setup_fte_addr4(spec, &attrs->saddr.a4, &attrs->daddr.a4);
1728 		else
1729 			setup_fte_addr6(spec, attrs->saddr.a6, attrs->daddr.a6);
1730 		setup_fte_spi(spec, attrs->spi, false);
1731 		setup_fte_esp(spec);
1732 		setup_fte_reg_a(spec);
1733 		break;
1734 	case XFRM_DEV_OFFLOAD_PACKET:
1735 		setup_fte_reg_c4(spec, attrs->reqid);
1736 		err = setup_pkt_reformat(ipsec, attrs, &flow_act);
1737 		if (err)
1738 			goto err_pkt_reformat;
1739 		break;
1740 	default:
1741 		break;
1742 	}
1743 
1744 	counter = mlx5_fc_create(mdev, true);
1745 	if (IS_ERR(counter)) {
1746 		err = PTR_ERR(counter);
1747 		goto err_add_cnt;
1748 	}
1749 
1750 	flow_act.crypto.type = MLX5_FLOW_CONTEXT_ENCRYPT_DECRYPT_TYPE_IPSEC;
1751 	flow_act.crypto.obj_id = sa_entry->ipsec_obj_id;
1752 	flow_act.flags |= FLOW_ACT_NO_APPEND;
1753 	flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_CRYPTO_ENCRYPT |
1754 			   MLX5_FLOW_CONTEXT_ACTION_COUNT;
1755 	if (attrs->drop)
1756 		flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
1757 	else
1758 		flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1759 
1760 	dest[0].ft = tx->ft.status;
1761 	dest[0].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
1762 	dest[1].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
1763 	dest[1].counter = counter;
1764 	rule = mlx5_add_flow_rules(tx->ft.sa, spec, &flow_act, dest, 2);
1765 	if (IS_ERR(rule)) {
1766 		err = PTR_ERR(rule);
1767 		mlx5_core_err(mdev, "fail to add TX ipsec rule err=%d\n", err);
1768 		goto err_add_flow;
1769 	}
1770 
1771 	kvfree(spec);
1772 	sa_entry->ipsec_rule.rule = rule;
1773 	sa_entry->ipsec_rule.fc = counter;
1774 	sa_entry->ipsec_rule.pkt_reformat = flow_act.pkt_reformat;
1775 	return 0;
1776 
1777 err_add_flow:
1778 	mlx5_fc_destroy(mdev, counter);
1779 err_add_cnt:
1780 	if (flow_act.pkt_reformat)
1781 		mlx5_packet_reformat_dealloc(mdev, flow_act.pkt_reformat);
1782 err_pkt_reformat:
1783 	kvfree(spec);
1784 err_alloc:
1785 	tx_ft_put(ipsec, attrs->type);
1786 	return err;
1787 }
1788 
tx_add_policy(struct mlx5e_ipsec_pol_entry * pol_entry)1789 static int tx_add_policy(struct mlx5e_ipsec_pol_entry *pol_entry)
1790 {
1791 	struct mlx5_accel_pol_xfrm_attrs *attrs = &pol_entry->attrs;
1792 	struct mlx5_core_dev *mdev = mlx5e_ipsec_pol2dev(pol_entry);
1793 	struct mlx5e_ipsec *ipsec = pol_entry->ipsec;
1794 	struct mlx5_flow_destination dest[2] = {};
1795 	struct mlx5_flow_act flow_act = {};
1796 	struct mlx5_flow_handle *rule;
1797 	struct mlx5_flow_spec *spec;
1798 	struct mlx5_flow_table *ft;
1799 	struct mlx5e_ipsec_tx *tx;
1800 	int err, dstn = 0;
1801 
1802 	ft = tx_ft_get_policy(mdev, ipsec, attrs->prio, attrs->type);
1803 	if (IS_ERR(ft))
1804 		return PTR_ERR(ft);
1805 
1806 	spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
1807 	if (!spec) {
1808 		err = -ENOMEM;
1809 		goto err_alloc;
1810 	}
1811 
1812 	tx = ipsec_tx(ipsec, attrs->type);
1813 	if (attrs->family == AF_INET)
1814 		setup_fte_addr4(spec, &attrs->saddr.a4, &attrs->daddr.a4);
1815 	else
1816 		setup_fte_addr6(spec, attrs->saddr.a6, attrs->daddr.a6);
1817 
1818 	setup_fte_no_frags(spec);
1819 	setup_fte_upper_proto_match(spec, &attrs->upspec);
1820 
1821 	switch (attrs->action) {
1822 	case XFRM_POLICY_ALLOW:
1823 		flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1824 		if (!attrs->reqid)
1825 			break;
1826 
1827 		err = setup_modify_header(ipsec, attrs->type, attrs->reqid,
1828 					  XFRM_DEV_OFFLOAD_OUT, &flow_act);
1829 		if (err)
1830 			goto err_mod_header;
1831 		break;
1832 	case XFRM_POLICY_BLOCK:
1833 		flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_DROP |
1834 				   MLX5_FLOW_CONTEXT_ACTION_COUNT;
1835 		dest[dstn].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
1836 		dest[dstn].counter = tx->fc->drop;
1837 		dstn++;
1838 		break;
1839 	default:
1840 		WARN_ON(true);
1841 		err = -EINVAL;
1842 		goto err_mod_header;
1843 	}
1844 
1845 	flow_act.flags |= FLOW_ACT_NO_APPEND;
1846 	if (tx == ipsec->tx_esw && tx->chains)
1847 		flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
1848 	dest[dstn].ft = tx->ft.sa;
1849 	dest[dstn].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
1850 	dstn++;
1851 	rule = mlx5_add_flow_rules(ft, spec, &flow_act, dest, dstn);
1852 	if (IS_ERR(rule)) {
1853 		err = PTR_ERR(rule);
1854 		mlx5_core_err(mdev, "fail to add TX ipsec rule err=%d\n", err);
1855 		goto err_action;
1856 	}
1857 
1858 	kvfree(spec);
1859 	pol_entry->ipsec_rule.rule = rule;
1860 	pol_entry->ipsec_rule.modify_hdr = flow_act.modify_hdr;
1861 	return 0;
1862 
1863 err_action:
1864 	if (flow_act.modify_hdr)
1865 		mlx5_modify_header_dealloc(mdev, flow_act.modify_hdr);
1866 err_mod_header:
1867 	kvfree(spec);
1868 err_alloc:
1869 	tx_ft_put_policy(ipsec, attrs->prio, attrs->type);
1870 	return err;
1871 }
1872 
rx_add_policy(struct mlx5e_ipsec_pol_entry * pol_entry)1873 static int rx_add_policy(struct mlx5e_ipsec_pol_entry *pol_entry)
1874 {
1875 	struct mlx5_accel_pol_xfrm_attrs *attrs = &pol_entry->attrs;
1876 	struct mlx5_core_dev *mdev = mlx5e_ipsec_pol2dev(pol_entry);
1877 	struct mlx5e_ipsec *ipsec = pol_entry->ipsec;
1878 	struct mlx5_flow_destination dest[2];
1879 	struct mlx5_flow_act flow_act = {};
1880 	struct mlx5_flow_handle *rule;
1881 	struct mlx5_flow_spec *spec;
1882 	struct mlx5_flow_table *ft;
1883 	struct mlx5e_ipsec_rx *rx;
1884 	int err, dstn = 0;
1885 
1886 	ft = rx_ft_get_policy(mdev, pol_entry->ipsec, attrs->family, attrs->prio,
1887 			      attrs->type);
1888 	if (IS_ERR(ft))
1889 		return PTR_ERR(ft);
1890 
1891 	rx = ipsec_rx(pol_entry->ipsec, attrs->family, attrs->type);
1892 
1893 	spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
1894 	if (!spec) {
1895 		err = -ENOMEM;
1896 		goto err_alloc;
1897 	}
1898 
1899 	if (attrs->family == AF_INET)
1900 		setup_fte_addr4(spec, &attrs->saddr.a4, &attrs->daddr.a4);
1901 	else
1902 		setup_fte_addr6(spec, attrs->saddr.a6, attrs->daddr.a6);
1903 
1904 	setup_fte_no_frags(spec);
1905 	setup_fte_upper_proto_match(spec, &attrs->upspec);
1906 
1907 	switch (attrs->action) {
1908 	case XFRM_POLICY_ALLOW:
1909 		flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1910 		break;
1911 	case XFRM_POLICY_BLOCK:
1912 		flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT;
1913 		dest[dstn].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
1914 		dest[dstn].counter = rx->fc->drop;
1915 		dstn++;
1916 		break;
1917 	default:
1918 		WARN_ON(true);
1919 		err = -EINVAL;
1920 		goto err_action;
1921 	}
1922 
1923 	flow_act.flags |= FLOW_ACT_NO_APPEND;
1924 	if (rx == ipsec->rx_esw && rx->chains)
1925 		flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
1926 	dest[dstn].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
1927 	dest[dstn].ft = rx->ft.sa;
1928 	dstn++;
1929 	rule = mlx5_add_flow_rules(ft, spec, &flow_act, dest, dstn);
1930 	if (IS_ERR(rule)) {
1931 		err = PTR_ERR(rule);
1932 		mlx5_core_err(mdev, "Fail to add RX IPsec policy rule err=%d\n", err);
1933 		goto err_action;
1934 	}
1935 
1936 	kvfree(spec);
1937 	pol_entry->ipsec_rule.rule = rule;
1938 	return 0;
1939 
1940 err_action:
1941 	kvfree(spec);
1942 err_alloc:
1943 	rx_ft_put_policy(pol_entry->ipsec, attrs->family, attrs->prio, attrs->type);
1944 	return err;
1945 }
1946 
ipsec_fs_destroy_single_counter(struct mlx5_core_dev * mdev,struct mlx5e_ipsec_fc * fc)1947 static void ipsec_fs_destroy_single_counter(struct mlx5_core_dev *mdev,
1948 					    struct mlx5e_ipsec_fc *fc)
1949 {
1950 	mlx5_fc_destroy(mdev, fc->drop);
1951 	mlx5_fc_destroy(mdev, fc->cnt);
1952 	kfree(fc);
1953 }
1954 
ipsec_fs_destroy_counters(struct mlx5e_ipsec * ipsec)1955 static void ipsec_fs_destroy_counters(struct mlx5e_ipsec *ipsec)
1956 {
1957 	struct mlx5_core_dev *mdev = ipsec->mdev;
1958 
1959 	ipsec_fs_destroy_single_counter(mdev, ipsec->tx->fc);
1960 	ipsec_fs_destroy_single_counter(mdev, ipsec->rx_ipv4->fc);
1961 	if (ipsec->is_uplink_rep) {
1962 		ipsec_fs_destroy_single_counter(mdev, ipsec->tx_esw->fc);
1963 		ipsec_fs_destroy_single_counter(mdev, ipsec->rx_esw->fc);
1964 	}
1965 }
1966 
ipsec_fs_init_single_counter(struct mlx5_core_dev * mdev)1967 static struct mlx5e_ipsec_fc *ipsec_fs_init_single_counter(struct mlx5_core_dev *mdev)
1968 {
1969 	struct mlx5e_ipsec_fc *fc;
1970 	struct mlx5_fc *counter;
1971 	int err;
1972 
1973 	fc = kzalloc(sizeof(*fc), GFP_KERNEL);
1974 	if (!fc)
1975 		return ERR_PTR(-ENOMEM);
1976 
1977 	counter = mlx5_fc_create(mdev, false);
1978 	if (IS_ERR(counter)) {
1979 		err = PTR_ERR(counter);
1980 		goto err_cnt;
1981 	}
1982 	fc->cnt = counter;
1983 
1984 	counter = mlx5_fc_create(mdev, false);
1985 	if (IS_ERR(counter)) {
1986 		err = PTR_ERR(counter);
1987 		goto err_drop;
1988 	}
1989 	fc->drop = counter;
1990 
1991 	return fc;
1992 
1993 err_drop:
1994 	mlx5_fc_destroy(mdev, fc->cnt);
1995 err_cnt:
1996 	kfree(fc);
1997 	return ERR_PTR(err);
1998 }
1999 
ipsec_fs_init_counters(struct mlx5e_ipsec * ipsec)2000 static int ipsec_fs_init_counters(struct mlx5e_ipsec *ipsec)
2001 {
2002 	struct mlx5_core_dev *mdev = ipsec->mdev;
2003 	struct mlx5e_ipsec_fc *fc;
2004 	int err;
2005 
2006 	fc = ipsec_fs_init_single_counter(mdev);
2007 	if (IS_ERR(fc)) {
2008 		err = PTR_ERR(fc);
2009 		goto err_rx_cnt;
2010 	}
2011 	ipsec->rx_ipv4->fc = fc;
2012 
2013 	fc = ipsec_fs_init_single_counter(mdev);
2014 	if (IS_ERR(fc)) {
2015 		err = PTR_ERR(fc);
2016 		goto err_tx_cnt;
2017 	}
2018 	ipsec->tx->fc = fc;
2019 
2020 	if (ipsec->is_uplink_rep) {
2021 		fc = ipsec_fs_init_single_counter(mdev);
2022 		if (IS_ERR(fc)) {
2023 			err = PTR_ERR(fc);
2024 			goto err_rx_esw_cnt;
2025 		}
2026 		ipsec->rx_esw->fc = fc;
2027 
2028 		fc = ipsec_fs_init_single_counter(mdev);
2029 		if (IS_ERR(fc)) {
2030 			err = PTR_ERR(fc);
2031 			goto err_tx_esw_cnt;
2032 		}
2033 		ipsec->tx_esw->fc = fc;
2034 	}
2035 
2036 	/* Both IPv4 and IPv6 point to same flow counters struct. */
2037 	ipsec->rx_ipv6->fc = ipsec->rx_ipv4->fc;
2038 	return 0;
2039 
2040 err_tx_esw_cnt:
2041 	ipsec_fs_destroy_single_counter(mdev, ipsec->rx_esw->fc);
2042 err_rx_esw_cnt:
2043 	ipsec_fs_destroy_single_counter(mdev, ipsec->tx->fc);
2044 err_tx_cnt:
2045 	ipsec_fs_destroy_single_counter(mdev, ipsec->rx_ipv4->fc);
2046 err_rx_cnt:
2047 	return err;
2048 }
2049 
mlx5e_accel_ipsec_fs_read_stats(struct mlx5e_priv * priv,void * ipsec_stats)2050 void mlx5e_accel_ipsec_fs_read_stats(struct mlx5e_priv *priv, void *ipsec_stats)
2051 {
2052 	struct mlx5_core_dev *mdev = priv->mdev;
2053 	struct mlx5e_ipsec *ipsec = priv->ipsec;
2054 	struct mlx5e_ipsec_hw_stats *stats;
2055 	struct mlx5e_ipsec_fc *fc;
2056 	u64 packets, bytes;
2057 
2058 	stats = (struct mlx5e_ipsec_hw_stats *)ipsec_stats;
2059 
2060 	stats->ipsec_rx_pkts = 0;
2061 	stats->ipsec_rx_bytes = 0;
2062 	stats->ipsec_rx_drop_pkts = 0;
2063 	stats->ipsec_rx_drop_bytes = 0;
2064 	stats->ipsec_tx_pkts = 0;
2065 	stats->ipsec_tx_bytes = 0;
2066 	stats->ipsec_tx_drop_pkts = 0;
2067 	stats->ipsec_tx_drop_bytes = 0;
2068 
2069 	fc = ipsec->rx_ipv4->fc;
2070 	mlx5_fc_query(mdev, fc->cnt, &stats->ipsec_rx_pkts, &stats->ipsec_rx_bytes);
2071 	mlx5_fc_query(mdev, fc->drop, &stats->ipsec_rx_drop_pkts,
2072 		      &stats->ipsec_rx_drop_bytes);
2073 
2074 	fc = ipsec->tx->fc;
2075 	mlx5_fc_query(mdev, fc->cnt, &stats->ipsec_tx_pkts, &stats->ipsec_tx_bytes);
2076 	mlx5_fc_query(mdev, fc->drop, &stats->ipsec_tx_drop_pkts,
2077 		      &stats->ipsec_tx_drop_bytes);
2078 
2079 	if (ipsec->is_uplink_rep) {
2080 		fc = ipsec->rx_esw->fc;
2081 		if (!mlx5_fc_query(mdev, fc->cnt, &packets, &bytes)) {
2082 			stats->ipsec_rx_pkts += packets;
2083 			stats->ipsec_rx_bytes += bytes;
2084 		}
2085 
2086 		if (!mlx5_fc_query(mdev, fc->drop, &packets, &bytes)) {
2087 			stats->ipsec_rx_drop_pkts += packets;
2088 			stats->ipsec_rx_drop_bytes += bytes;
2089 		}
2090 
2091 		fc = ipsec->tx_esw->fc;
2092 		if (!mlx5_fc_query(mdev, fc->cnt, &packets, &bytes)) {
2093 			stats->ipsec_tx_pkts += packets;
2094 			stats->ipsec_tx_bytes += bytes;
2095 		}
2096 
2097 		if (!mlx5_fc_query(mdev, fc->drop, &packets, &bytes)) {
2098 			stats->ipsec_tx_drop_pkts += packets;
2099 			stats->ipsec_tx_drop_bytes += bytes;
2100 		}
2101 	}
2102 }
2103 
2104 #ifdef CONFIG_MLX5_ESWITCH
mlx5e_ipsec_block_tc_offload(struct mlx5_core_dev * mdev)2105 static int mlx5e_ipsec_block_tc_offload(struct mlx5_core_dev *mdev)
2106 {
2107 	struct mlx5_eswitch *esw = mdev->priv.eswitch;
2108 	int err = 0;
2109 
2110 	if (esw) {
2111 		err = mlx5_esw_lock(esw);
2112 		if (err)
2113 			return err;
2114 	}
2115 
2116 	if (mdev->num_block_ipsec) {
2117 		err = -EBUSY;
2118 		goto unlock;
2119 	}
2120 
2121 	mdev->num_block_tc++;
2122 
2123 unlock:
2124 	if (esw)
2125 		mlx5_esw_unlock(esw);
2126 
2127 	return err;
2128 }
2129 #else
mlx5e_ipsec_block_tc_offload(struct mlx5_core_dev * mdev)2130 static int mlx5e_ipsec_block_tc_offload(struct mlx5_core_dev *mdev)
2131 {
2132 	if (mdev->num_block_ipsec)
2133 		return -EBUSY;
2134 
2135 	mdev->num_block_tc++;
2136 	return 0;
2137 }
2138 #endif
2139 
mlx5e_ipsec_unblock_tc_offload(struct mlx5_core_dev * mdev)2140 static void mlx5e_ipsec_unblock_tc_offload(struct mlx5_core_dev *mdev)
2141 {
2142 	mdev->num_block_tc--;
2143 }
2144 
mlx5e_accel_ipsec_fs_add_rule(struct mlx5e_ipsec_sa_entry * sa_entry)2145 int mlx5e_accel_ipsec_fs_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
2146 {
2147 	int err;
2148 
2149 	if (sa_entry->attrs.type == XFRM_DEV_OFFLOAD_PACKET) {
2150 		err = mlx5e_ipsec_block_tc_offload(sa_entry->ipsec->mdev);
2151 		if (err)
2152 			return err;
2153 	}
2154 
2155 	if (sa_entry->attrs.dir == XFRM_DEV_OFFLOAD_OUT)
2156 		err = tx_add_rule(sa_entry);
2157 	else
2158 		err = rx_add_rule(sa_entry);
2159 
2160 	if (err)
2161 		goto err_out;
2162 
2163 	return 0;
2164 
2165 err_out:
2166 	if (sa_entry->attrs.type == XFRM_DEV_OFFLOAD_PACKET)
2167 		mlx5e_ipsec_unblock_tc_offload(sa_entry->ipsec->mdev);
2168 	return err;
2169 }
2170 
mlx5e_accel_ipsec_fs_del_rule(struct mlx5e_ipsec_sa_entry * sa_entry)2171 void mlx5e_accel_ipsec_fs_del_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
2172 {
2173 	struct mlx5e_ipsec_rule *ipsec_rule = &sa_entry->ipsec_rule;
2174 	struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry);
2175 
2176 	mlx5_del_flow_rules(ipsec_rule->rule);
2177 	mlx5_fc_destroy(mdev, ipsec_rule->fc);
2178 	if (ipsec_rule->pkt_reformat)
2179 		mlx5_packet_reformat_dealloc(mdev, ipsec_rule->pkt_reformat);
2180 
2181 	if (sa_entry->attrs.type == XFRM_DEV_OFFLOAD_PACKET)
2182 		mlx5e_ipsec_unblock_tc_offload(mdev);
2183 
2184 	if (sa_entry->attrs.dir == XFRM_DEV_OFFLOAD_OUT) {
2185 		tx_ft_put(sa_entry->ipsec, sa_entry->attrs.type);
2186 		return;
2187 	}
2188 
2189 	if (ipsec_rule->modify_hdr)
2190 		mlx5_modify_header_dealloc(mdev, ipsec_rule->modify_hdr);
2191 
2192 	mlx5_del_flow_rules(ipsec_rule->trailer.rule);
2193 	mlx5_fc_destroy(mdev, ipsec_rule->trailer.fc);
2194 
2195 	mlx5_del_flow_rules(ipsec_rule->auth.rule);
2196 	mlx5_fc_destroy(mdev, ipsec_rule->auth.fc);
2197 
2198 	if (ipsec_rule->replay.rule) {
2199 		mlx5_del_flow_rules(ipsec_rule->replay.rule);
2200 		mlx5_fc_destroy(mdev, ipsec_rule->replay.fc);
2201 	}
2202 	mlx5_esw_ipsec_rx_id_mapping_remove(sa_entry);
2203 	rx_ft_put(sa_entry->ipsec, sa_entry->attrs.family, sa_entry->attrs.type);
2204 }
2205 
mlx5e_accel_ipsec_fs_add_pol(struct mlx5e_ipsec_pol_entry * pol_entry)2206 int mlx5e_accel_ipsec_fs_add_pol(struct mlx5e_ipsec_pol_entry *pol_entry)
2207 {
2208 	int err;
2209 
2210 	err = mlx5e_ipsec_block_tc_offload(pol_entry->ipsec->mdev);
2211 	if (err)
2212 		return err;
2213 
2214 	if (pol_entry->attrs.dir == XFRM_DEV_OFFLOAD_OUT)
2215 		err = tx_add_policy(pol_entry);
2216 	else
2217 		err = rx_add_policy(pol_entry);
2218 
2219 	if (err)
2220 		goto err_out;
2221 
2222 	return 0;
2223 
2224 err_out:
2225 	mlx5e_ipsec_unblock_tc_offload(pol_entry->ipsec->mdev);
2226 	return err;
2227 }
2228 
mlx5e_accel_ipsec_fs_del_pol(struct mlx5e_ipsec_pol_entry * pol_entry)2229 void mlx5e_accel_ipsec_fs_del_pol(struct mlx5e_ipsec_pol_entry *pol_entry)
2230 {
2231 	struct mlx5e_ipsec_rule *ipsec_rule = &pol_entry->ipsec_rule;
2232 	struct mlx5_core_dev *mdev = mlx5e_ipsec_pol2dev(pol_entry);
2233 
2234 	mlx5_del_flow_rules(ipsec_rule->rule);
2235 
2236 	mlx5e_ipsec_unblock_tc_offload(pol_entry->ipsec->mdev);
2237 
2238 	if (pol_entry->attrs.dir == XFRM_DEV_OFFLOAD_IN) {
2239 		rx_ft_put_policy(pol_entry->ipsec, pol_entry->attrs.family,
2240 				 pol_entry->attrs.prio, pol_entry->attrs.type);
2241 		return;
2242 	}
2243 
2244 	if (ipsec_rule->modify_hdr)
2245 		mlx5_modify_header_dealloc(mdev, ipsec_rule->modify_hdr);
2246 
2247 	tx_ft_put_policy(pol_entry->ipsec, pol_entry->attrs.prio, pol_entry->attrs.type);
2248 }
2249 
mlx5e_accel_ipsec_fs_cleanup(struct mlx5e_ipsec * ipsec)2250 void mlx5e_accel_ipsec_fs_cleanup(struct mlx5e_ipsec *ipsec)
2251 {
2252 	if (!ipsec->tx)
2253 		return;
2254 
2255 	if (ipsec->roce)
2256 		mlx5_ipsec_fs_roce_cleanup(ipsec->roce);
2257 
2258 	ipsec_fs_destroy_counters(ipsec);
2259 	mutex_destroy(&ipsec->tx->ft.mutex);
2260 	WARN_ON(ipsec->tx->ft.refcnt);
2261 	kfree(ipsec->tx);
2262 
2263 	mutex_destroy(&ipsec->rx_ipv4->ft.mutex);
2264 	WARN_ON(ipsec->rx_ipv4->ft.refcnt);
2265 	kfree(ipsec->rx_ipv4);
2266 
2267 	mutex_destroy(&ipsec->rx_ipv6->ft.mutex);
2268 	WARN_ON(ipsec->rx_ipv6->ft.refcnt);
2269 	kfree(ipsec->rx_ipv6);
2270 
2271 	if (ipsec->is_uplink_rep) {
2272 		xa_destroy(&ipsec->ipsec_obj_id_map);
2273 
2274 		mutex_destroy(&ipsec->tx_esw->ft.mutex);
2275 		WARN_ON(ipsec->tx_esw->ft.refcnt);
2276 		kfree(ipsec->tx_esw);
2277 
2278 		mutex_destroy(&ipsec->rx_esw->ft.mutex);
2279 		WARN_ON(ipsec->rx_esw->ft.refcnt);
2280 		kfree(ipsec->rx_esw);
2281 	}
2282 }
2283 
mlx5e_accel_ipsec_fs_init(struct mlx5e_ipsec * ipsec,struct mlx5_devcom_comp_dev ** devcom)2284 int mlx5e_accel_ipsec_fs_init(struct mlx5e_ipsec *ipsec,
2285 			      struct mlx5_devcom_comp_dev **devcom)
2286 {
2287 	struct mlx5_core_dev *mdev = ipsec->mdev;
2288 	struct mlx5_flow_namespace *ns, *ns_esw;
2289 	int err = -ENOMEM;
2290 
2291 	ns = mlx5_get_flow_namespace(ipsec->mdev,
2292 				     MLX5_FLOW_NAMESPACE_EGRESS_IPSEC);
2293 	if (!ns)
2294 		return -EOPNOTSUPP;
2295 
2296 	if (ipsec->is_uplink_rep) {
2297 		ns_esw = mlx5_get_flow_namespace(mdev, MLX5_FLOW_NAMESPACE_FDB);
2298 		if (!ns_esw)
2299 			return -EOPNOTSUPP;
2300 
2301 		ipsec->tx_esw = kzalloc(sizeof(*ipsec->tx_esw), GFP_KERNEL);
2302 		if (!ipsec->tx_esw)
2303 			return -ENOMEM;
2304 
2305 		ipsec->rx_esw = kzalloc(sizeof(*ipsec->rx_esw), GFP_KERNEL);
2306 		if (!ipsec->rx_esw)
2307 			goto err_rx_esw;
2308 	}
2309 
2310 	ipsec->tx = kzalloc(sizeof(*ipsec->tx), GFP_KERNEL);
2311 	if (!ipsec->tx)
2312 		goto err_tx;
2313 
2314 	ipsec->rx_ipv4 = kzalloc(sizeof(*ipsec->rx_ipv4), GFP_KERNEL);
2315 	if (!ipsec->rx_ipv4)
2316 		goto err_rx_ipv4;
2317 
2318 	ipsec->rx_ipv6 = kzalloc(sizeof(*ipsec->rx_ipv6), GFP_KERNEL);
2319 	if (!ipsec->rx_ipv6)
2320 		goto err_rx_ipv6;
2321 
2322 	err = ipsec_fs_init_counters(ipsec);
2323 	if (err)
2324 		goto err_counters;
2325 
2326 	mutex_init(&ipsec->tx->ft.mutex);
2327 	mutex_init(&ipsec->rx_ipv4->ft.mutex);
2328 	mutex_init(&ipsec->rx_ipv6->ft.mutex);
2329 	ipsec->tx->ns = ns;
2330 
2331 	if (ipsec->is_uplink_rep) {
2332 		mutex_init(&ipsec->tx_esw->ft.mutex);
2333 		mutex_init(&ipsec->rx_esw->ft.mutex);
2334 		ipsec->tx_esw->ns = ns_esw;
2335 		xa_init_flags(&ipsec->ipsec_obj_id_map, XA_FLAGS_ALLOC1);
2336 	} else if (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_ROCE) {
2337 		ipsec->roce = mlx5_ipsec_fs_roce_init(mdev, devcom);
2338 	} else {
2339 		mlx5_core_warn(mdev, "IPsec was initialized without RoCE support\n");
2340 	}
2341 
2342 	return 0;
2343 
2344 err_counters:
2345 	kfree(ipsec->rx_ipv6);
2346 err_rx_ipv6:
2347 	kfree(ipsec->rx_ipv4);
2348 err_rx_ipv4:
2349 	kfree(ipsec->tx);
2350 err_tx:
2351 	kfree(ipsec->rx_esw);
2352 err_rx_esw:
2353 	kfree(ipsec->tx_esw);
2354 	return err;
2355 }
2356 
mlx5e_accel_ipsec_fs_modify(struct mlx5e_ipsec_sa_entry * sa_entry)2357 void mlx5e_accel_ipsec_fs_modify(struct mlx5e_ipsec_sa_entry *sa_entry)
2358 {
2359 	struct mlx5e_ipsec_sa_entry sa_entry_shadow = {};
2360 	int err;
2361 
2362 	memcpy(&sa_entry_shadow, sa_entry, sizeof(*sa_entry));
2363 	memset(&sa_entry_shadow.ipsec_rule, 0x00, sizeof(sa_entry->ipsec_rule));
2364 
2365 	err = mlx5e_accel_ipsec_fs_add_rule(&sa_entry_shadow);
2366 	if (err)
2367 		return;
2368 
2369 	mlx5e_accel_ipsec_fs_del_rule(sa_entry);
2370 	memcpy(sa_entry, &sa_entry_shadow, sizeof(*sa_entry));
2371 }
2372 
mlx5e_ipsec_fs_tunnel_enabled(struct mlx5e_ipsec_sa_entry * sa_entry)2373 bool mlx5e_ipsec_fs_tunnel_enabled(struct mlx5e_ipsec_sa_entry *sa_entry)
2374 {
2375 	struct mlx5_accel_esp_xfrm_attrs *attrs = &sa_entry->attrs;
2376 	struct mlx5e_ipsec_rx *rx;
2377 	struct mlx5e_ipsec_tx *tx;
2378 
2379 	rx = ipsec_rx(sa_entry->ipsec, attrs->family, attrs->type);
2380 	tx = ipsec_tx(sa_entry->ipsec, attrs->type);
2381 	if (sa_entry->attrs.dir == XFRM_DEV_OFFLOAD_OUT)
2382 		return tx->allow_tunnel_mode;
2383 
2384 	return rx->allow_tunnel_mode;
2385 }
2386 
mlx5e_ipsec_handle_mpv_event(int event,struct mlx5e_priv * slave_priv,struct mlx5e_priv * master_priv)2387 void mlx5e_ipsec_handle_mpv_event(int event, struct mlx5e_priv *slave_priv,
2388 				  struct mlx5e_priv *master_priv)
2389 {
2390 	struct mlx5e_ipsec_mpv_work *work;
2391 
2392 	reinit_completion(&master_priv->ipsec->comp);
2393 
2394 	if (!slave_priv->ipsec) {
2395 		complete(&master_priv->ipsec->comp);
2396 		return;
2397 	}
2398 
2399 	work = &slave_priv->ipsec->mpv_work;
2400 
2401 	INIT_WORK(&work->work, ipsec_mpv_work_handler);
2402 	work->event = event;
2403 	work->slave_priv = slave_priv;
2404 	work->master_priv = master_priv;
2405 	queue_work(slave_priv->ipsec->wq, &work->work);
2406 }
2407 
mlx5e_ipsec_send_event(struct mlx5e_priv * priv,int event)2408 void mlx5e_ipsec_send_event(struct mlx5e_priv *priv, int event)
2409 {
2410 	if (!priv->ipsec)
2411 		return; /* IPsec not supported */
2412 
2413 	mlx5_devcom_send_event(priv->devcom, event, event, priv);
2414 	wait_for_completion(&priv->ipsec->comp);
2415 }
2416