1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
3
4 #include <net/macsec.h>
5 #include <linux/mlx5/qp.h>
6 #include <linux/if_vlan.h>
7 #include <linux/mlx5/fs_helpers.h>
8 #include <linux/mlx5/macsec.h>
9 #include "fs_core.h"
10 #include "lib/macsec_fs.h"
11 #include "mlx5_core.h"
12
13 /* MACsec TX flow steering */
14 #define CRYPTO_NUM_MAXSEC_FTE BIT(15)
15 #define CRYPTO_TABLE_DEFAULT_RULE_GROUP_SIZE 1
16
17 #define TX_CRYPTO_TABLE_LEVEL 0
18 #define TX_CRYPTO_TABLE_NUM_GROUPS 3
19 #define TX_CRYPTO_TABLE_MKE_GROUP_SIZE 1
20 #define TX_CRYPTO_TABLE_SA_GROUP_SIZE \
21 (CRYPTO_NUM_MAXSEC_FTE - (TX_CRYPTO_TABLE_MKE_GROUP_SIZE + \
22 CRYPTO_TABLE_DEFAULT_RULE_GROUP_SIZE))
23 #define TX_CHECK_TABLE_LEVEL 1
24 #define TX_CHECK_TABLE_NUM_FTE 2
25 #define RX_CRYPTO_TABLE_LEVEL 0
26 #define RX_CHECK_TABLE_LEVEL 1
27 #define RX_ROCE_TABLE_LEVEL 2
28 #define RX_CHECK_TABLE_NUM_FTE 3
29 #define RX_ROCE_TABLE_NUM_FTE 2
30 #define RX_CRYPTO_TABLE_NUM_GROUPS 3
31 #define RX_CRYPTO_TABLE_SA_RULE_WITH_SCI_GROUP_SIZE \
32 ((CRYPTO_NUM_MAXSEC_FTE - CRYPTO_TABLE_DEFAULT_RULE_GROUP_SIZE) / 2)
33 #define RX_CRYPTO_TABLE_SA_RULE_WITHOUT_SCI_GROUP_SIZE \
34 (CRYPTO_NUM_MAXSEC_FTE - RX_CRYPTO_TABLE_SA_RULE_WITH_SCI_GROUP_SIZE)
35 #define RX_NUM_OF_RULES_PER_SA 2
36
37 #define RDMA_RX_ROCE_IP_TABLE_LEVEL 0
38 #define RDMA_RX_ROCE_MACSEC_OP_TABLE_LEVEL 1
39
40 #define MLX5_MACSEC_TAG_LEN 8 /* SecTAG length with ethertype and without the optional SCI */
41 #define MLX5_MACSEC_SECTAG_TCI_AN_FIELD_BITMASK 0x23
42 #define MLX5_MACSEC_SECTAG_TCI_AN_FIELD_OFFSET 0x8
43 #define MLX5_MACSEC_SECTAG_TCI_SC_FIELD_OFFSET 0x5
44 #define MLX5_MACSEC_SECTAG_TCI_SC_FIELD_BIT (0x1 << MLX5_MACSEC_SECTAG_TCI_SC_FIELD_OFFSET)
45 #define MLX5_SECTAG_HEADER_SIZE_WITHOUT_SCI 0x8
46 #define MLX5_SECTAG_HEADER_SIZE_WITH_SCI (MLX5_SECTAG_HEADER_SIZE_WITHOUT_SCI + MACSEC_SCI_LEN)
47
48 /* MACsec fs_id handling for steering */
49 #define macsec_fs_set_rx_fs_id(fs_id) ((fs_id) | BIT(30))
50
51 struct mlx5_sectag_header {
52 __be16 ethertype;
53 u8 tci_an;
54 u8 sl;
55 u32 pn;
56 u8 sci[MACSEC_SCI_LEN]; /* optional */
57 } __packed;
58
59 struct mlx5_roce_macsec_tx_rule {
60 u32 fs_id;
61 u16 gid_idx;
62 struct list_head entry;
63 struct mlx5_flow_handle *rule;
64 struct mlx5_modify_hdr *meta_modhdr;
65 };
66
67 struct mlx5_macsec_tx_rule {
68 struct mlx5_flow_handle *rule;
69 struct mlx5_pkt_reformat *pkt_reformat;
70 u32 fs_id;
71 };
72
73 struct mlx5_macsec_flow_table {
74 int num_groups;
75 struct mlx5_flow_table *t;
76 struct mlx5_flow_group **g;
77 };
78
79 struct mlx5_macsec_tables {
80 struct mlx5_macsec_flow_table ft_crypto;
81 struct mlx5_flow_handle *crypto_miss_rule;
82
83 struct mlx5_flow_table *ft_check;
84 struct mlx5_flow_group *ft_check_group;
85 struct mlx5_fc *check_miss_rule_counter;
86 struct mlx5_flow_handle *check_miss_rule;
87 struct mlx5_fc *check_rule_counter;
88
89 u32 refcnt;
90 };
91
92 struct mlx5_fs_id {
93 u32 id;
94 refcount_t refcnt;
95 sci_t sci;
96 struct rhash_head hash;
97 };
98
99 struct mlx5_macsec_device {
100 struct list_head macsec_devices_list_entry;
101 void *macdev;
102 struct xarray tx_id_xa;
103 struct xarray rx_id_xa;
104 };
105
106 struct mlx5_macsec_tx {
107 struct mlx5_flow_handle *crypto_mke_rule;
108 struct mlx5_flow_handle *check_rule;
109
110 struct ida tx_halloc;
111
112 struct mlx5_macsec_tables tables;
113
114 struct mlx5_flow_table *ft_rdma_tx;
115 };
116
117 struct mlx5_roce_macsec_rx_rule {
118 u32 fs_id;
119 u16 gid_idx;
120 struct mlx5_flow_handle *op;
121 struct mlx5_flow_handle *ip;
122 struct list_head entry;
123 };
124
125 struct mlx5_macsec_rx_rule {
126 struct mlx5_flow_handle *rule[RX_NUM_OF_RULES_PER_SA];
127 struct mlx5_modify_hdr *meta_modhdr;
128 };
129
130 struct mlx5_macsec_miss {
131 struct mlx5_flow_group *g;
132 struct mlx5_flow_handle *rule;
133 };
134
135 struct mlx5_macsec_rx_roce {
136 /* Flow table/rules in NIC domain, to check if it's a RoCE packet */
137 struct mlx5_flow_group *g;
138 struct mlx5_flow_table *ft;
139 struct mlx5_flow_handle *rule;
140 struct mlx5_modify_hdr *copy_modify_hdr;
141 struct mlx5_macsec_miss nic_miss;
142
143 /* Flow table/rule in RDMA domain, to check dgid */
144 struct mlx5_flow_table *ft_ip_check;
145 struct mlx5_flow_table *ft_macsec_op_check;
146 struct mlx5_macsec_miss miss;
147 };
148
149 struct mlx5_macsec_rx {
150 struct mlx5_flow_handle *check_rule[2];
151 struct mlx5_pkt_reformat *check_rule_pkt_reformat[2];
152
153 struct mlx5_macsec_tables tables;
154 struct mlx5_macsec_rx_roce roce;
155 };
156
157 union mlx5_macsec_rule {
158 struct mlx5_macsec_tx_rule tx_rule;
159 struct mlx5_macsec_rx_rule rx_rule;
160 };
161
162 static const struct rhashtable_params rhash_sci = {
163 .key_len = sizeof_field(struct mlx5_fs_id, sci),
164 .key_offset = offsetof(struct mlx5_fs_id, sci),
165 .head_offset = offsetof(struct mlx5_fs_id, hash),
166 .automatic_shrinking = true,
167 .min_size = 1,
168 };
169
170 static const struct rhashtable_params rhash_fs_id = {
171 .key_len = sizeof_field(struct mlx5_fs_id, id),
172 .key_offset = offsetof(struct mlx5_fs_id, id),
173 .head_offset = offsetof(struct mlx5_fs_id, hash),
174 .automatic_shrinking = true,
175 .min_size = 1,
176 };
177
178 struct mlx5_macsec_fs {
179 struct mlx5_core_dev *mdev;
180 struct mlx5_macsec_tx *tx_fs;
181 struct mlx5_macsec_rx *rx_fs;
182
183 /* Stats manage */
184 struct mlx5_macsec_stats stats;
185
186 /* Tx sci -> fs id mapping handling */
187 struct rhashtable sci_hash; /* sci -> mlx5_fs_id */
188
189 /* RX fs_id -> mlx5_fs_id mapping handling */
190 struct rhashtable fs_id_hash; /* fs_id -> mlx5_fs_id */
191
192 /* TX & RX fs_id lists per macsec device */
193 struct list_head macsec_devices_list;
194 };
195
macsec_fs_destroy_groups(struct mlx5_macsec_flow_table * ft)196 static void macsec_fs_destroy_groups(struct mlx5_macsec_flow_table *ft)
197 {
198 int i;
199
200 for (i = ft->num_groups - 1; i >= 0; i--) {
201 if (!IS_ERR_OR_NULL(ft->g[i]))
202 mlx5_destroy_flow_group(ft->g[i]);
203 ft->g[i] = NULL;
204 }
205 ft->num_groups = 0;
206 }
207
macsec_fs_destroy_flow_table(struct mlx5_macsec_flow_table * ft)208 static void macsec_fs_destroy_flow_table(struct mlx5_macsec_flow_table *ft)
209 {
210 macsec_fs_destroy_groups(ft);
211 kfree(ft->g);
212 mlx5_destroy_flow_table(ft->t);
213 ft->t = NULL;
214 }
215
macsec_fs_tx_destroy(struct mlx5_macsec_fs * macsec_fs)216 static void macsec_fs_tx_destroy(struct mlx5_macsec_fs *macsec_fs)
217 {
218 struct mlx5_macsec_tx *tx_fs = macsec_fs->tx_fs;
219 struct mlx5_macsec_tables *tx_tables;
220
221 if (mlx5_is_macsec_roce_supported(macsec_fs->mdev))
222 mlx5_destroy_flow_table(tx_fs->ft_rdma_tx);
223
224 tx_tables = &tx_fs->tables;
225
226 /* Tx check table */
227 if (tx_fs->check_rule) {
228 mlx5_del_flow_rules(tx_fs->check_rule);
229 tx_fs->check_rule = NULL;
230 }
231
232 if (tx_tables->check_miss_rule) {
233 mlx5_del_flow_rules(tx_tables->check_miss_rule);
234 tx_tables->check_miss_rule = NULL;
235 }
236
237 if (tx_tables->ft_check_group) {
238 mlx5_destroy_flow_group(tx_tables->ft_check_group);
239 tx_tables->ft_check_group = NULL;
240 }
241
242 if (tx_tables->ft_check) {
243 mlx5_destroy_flow_table(tx_tables->ft_check);
244 tx_tables->ft_check = NULL;
245 }
246
247 /* Tx crypto table */
248 if (tx_fs->crypto_mke_rule) {
249 mlx5_del_flow_rules(tx_fs->crypto_mke_rule);
250 tx_fs->crypto_mke_rule = NULL;
251 }
252
253 if (tx_tables->crypto_miss_rule) {
254 mlx5_del_flow_rules(tx_tables->crypto_miss_rule);
255 tx_tables->crypto_miss_rule = NULL;
256 }
257
258 macsec_fs_destroy_flow_table(&tx_tables->ft_crypto);
259 }
260
macsec_fs_tx_create_crypto_table_groups(struct mlx5_macsec_flow_table * ft)261 static int macsec_fs_tx_create_crypto_table_groups(struct mlx5_macsec_flow_table *ft)
262 {
263 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
264 int mclen = MLX5_ST_SZ_BYTES(fte_match_param);
265 int ix = 0;
266 u32 *in;
267 int err;
268 u8 *mc;
269
270 ft->g = kzalloc_objs(*ft->g, TX_CRYPTO_TABLE_NUM_GROUPS);
271 if (!ft->g)
272 return -ENOMEM;
273 in = kvzalloc(inlen, GFP_KERNEL);
274
275 if (!in) {
276 kfree(ft->g);
277 ft->g = NULL;
278 return -ENOMEM;
279 }
280
281 mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
282
283 /* Flow Group for MKE match */
284 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
285 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
286
287 MLX5_SET_CFG(in, start_flow_index, ix);
288 ix += TX_CRYPTO_TABLE_MKE_GROUP_SIZE;
289 MLX5_SET_CFG(in, end_flow_index, ix - 1);
290 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
291 if (IS_ERR(ft->g[ft->num_groups]))
292 goto err;
293 ft->num_groups++;
294
295 /* Flow Group for SA rules */
296 memset(in, 0, inlen);
297 memset(mc, 0, mclen);
298 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_MISC_PARAMETERS_2);
299 MLX5_SET(fte_match_param, mc, misc_parameters_2.metadata_reg_a,
300 MLX5_ETH_WQE_FT_META_MACSEC_MASK);
301
302 MLX5_SET_CFG(in, start_flow_index, ix);
303 ix += TX_CRYPTO_TABLE_SA_GROUP_SIZE;
304 MLX5_SET_CFG(in, end_flow_index, ix - 1);
305 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
306 if (IS_ERR(ft->g[ft->num_groups]))
307 goto err;
308 ft->num_groups++;
309
310 /* Flow Group for l2 traps */
311 memset(in, 0, inlen);
312 memset(mc, 0, mclen);
313 MLX5_SET_CFG(in, start_flow_index, ix);
314 ix += CRYPTO_TABLE_DEFAULT_RULE_GROUP_SIZE;
315 MLX5_SET_CFG(in, end_flow_index, ix - 1);
316 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
317 if (IS_ERR(ft->g[ft->num_groups]))
318 goto err;
319 ft->num_groups++;
320
321 kvfree(in);
322 return 0;
323
324 err:
325 err = PTR_ERR(ft->g[ft->num_groups]);
326 ft->g[ft->num_groups] = NULL;
327 kvfree(in);
328
329 return err;
330 }
331
332 static struct mlx5_flow_table
macsec_fs_auto_group_table_create(struct mlx5_flow_namespace * ns,int flags,int level,int max_fte)333 *macsec_fs_auto_group_table_create(struct mlx5_flow_namespace *ns, int flags,
334 int level, int max_fte)
335 {
336 struct mlx5_flow_table_attr ft_attr = {};
337 struct mlx5_flow_table *fdb = NULL;
338
339 /* reserve entry for the match all miss group and rule */
340 ft_attr.autogroup.num_reserved_entries = 1;
341 ft_attr.autogroup.max_num_groups = 1;
342 ft_attr.prio = 0;
343 ft_attr.flags = flags;
344 ft_attr.level = level;
345 ft_attr.max_fte = max_fte;
346
347 fdb = mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
348
349 return fdb;
350 }
351
352 enum {
353 RDMA_TX_MACSEC_LEVEL = 0,
354 };
355
macsec_fs_tx_roce_create(struct mlx5_macsec_fs * macsec_fs)356 static int macsec_fs_tx_roce_create(struct mlx5_macsec_fs *macsec_fs)
357 {
358 struct mlx5_macsec_tx *tx_fs = macsec_fs->tx_fs;
359 struct mlx5_core_dev *mdev = macsec_fs->mdev;
360 struct mlx5_flow_namespace *ns;
361 struct mlx5_flow_table *ft;
362 int err;
363
364 if (!mlx5_is_macsec_roce_supported(mdev)) {
365 mlx5_core_dbg(mdev, "Failed to init RoCE MACsec, capabilities not supported\n");
366 return 0;
367 }
368
369 ns = mlx5_get_flow_namespace(mdev, MLX5_FLOW_NAMESPACE_RDMA_TX_MACSEC);
370 if (!ns)
371 return -ENOMEM;
372
373 /* Tx RoCE crypto table */
374 ft = macsec_fs_auto_group_table_create(ns, 0, RDMA_TX_MACSEC_LEVEL, CRYPTO_NUM_MAXSEC_FTE);
375 if (IS_ERR(ft)) {
376 err = PTR_ERR(ft);
377 mlx5_core_err(mdev, "Failed to create MACsec RoCE Tx crypto table err(%d)\n", err);
378 return err;
379 }
380 tx_fs->ft_rdma_tx = ft;
381
382 return 0;
383 }
384
macsec_fs_tx_create(struct mlx5_macsec_fs * macsec_fs)385 static int macsec_fs_tx_create(struct mlx5_macsec_fs *macsec_fs)
386 {
387 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
388 struct mlx5_macsec_tx *tx_fs = macsec_fs->tx_fs;
389 struct mlx5_core_dev *mdev = macsec_fs->mdev;
390 struct mlx5_flow_table_attr ft_attr = {};
391 struct mlx5_flow_destination dest = {};
392 struct mlx5_macsec_tables *tx_tables;
393 struct mlx5_flow_act flow_act = {};
394 struct mlx5_macsec_flow_table *ft_crypto;
395 struct mlx5_flow_table *flow_table;
396 struct mlx5_flow_group *flow_group;
397 struct mlx5_flow_namespace *ns;
398 struct mlx5_flow_handle *rule;
399 struct mlx5_flow_spec *spec;
400 u32 *flow_group_in;
401 int err;
402
403 ns = mlx5_get_flow_namespace(mdev, MLX5_FLOW_NAMESPACE_EGRESS_MACSEC);
404 if (!ns)
405 return -ENOMEM;
406
407 spec = kvzalloc_obj(*spec);
408 if (!spec)
409 return -ENOMEM;
410
411 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
412 if (!flow_group_in) {
413 err = -ENOMEM;
414 goto out_spec;
415 }
416
417 tx_tables = &tx_fs->tables;
418 ft_crypto = &tx_tables->ft_crypto;
419
420 /* Tx crypto table */
421 ft_attr.flags = MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
422 ft_attr.level = TX_CRYPTO_TABLE_LEVEL;
423 ft_attr.max_fte = CRYPTO_NUM_MAXSEC_FTE;
424
425 flow_table = mlx5_create_flow_table(ns, &ft_attr);
426 if (IS_ERR(flow_table)) {
427 err = PTR_ERR(flow_table);
428 mlx5_core_err(mdev, "Failed to create MACsec Tx crypto table err(%d)\n", err);
429 goto out_flow_group;
430 }
431 ft_crypto->t = flow_table;
432
433 /* Tx crypto table groups */
434 err = macsec_fs_tx_create_crypto_table_groups(ft_crypto);
435 if (err) {
436 mlx5_core_err(mdev,
437 "Failed to create default flow group for MACsec Tx crypto table err(%d)\n",
438 err);
439 goto err;
440 }
441
442 /* Tx crypto table MKE rule - MKE packets shouldn't be offloaded */
443 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
444
445 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ethertype);
446 MLX5_SET(fte_match_param, spec->match_value, outer_headers.ethertype, ETH_P_PAE);
447 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW;
448
449 rule = mlx5_add_flow_rules(ft_crypto->t, spec, &flow_act, NULL, 0);
450 if (IS_ERR(rule)) {
451 err = PTR_ERR(rule);
452 mlx5_core_err(mdev, "Failed to add MACsec TX MKE rule, err=%d\n", err);
453 goto err;
454 }
455 tx_fs->crypto_mke_rule = rule;
456
457 /* Tx crypto table Default miss rule */
458 memset(&flow_act, 0, sizeof(flow_act));
459 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW;
460 rule = mlx5_add_flow_rules(ft_crypto->t, NULL, &flow_act, NULL, 0);
461 if (IS_ERR(rule)) {
462 err = PTR_ERR(rule);
463 mlx5_core_err(mdev, "Failed to add MACsec Tx table default miss rule %d\n", err);
464 goto err;
465 }
466 tx_tables->crypto_miss_rule = rule;
467
468 /* Tx check table */
469 flow_table = macsec_fs_auto_group_table_create(ns, 0, TX_CHECK_TABLE_LEVEL,
470 TX_CHECK_TABLE_NUM_FTE);
471 if (IS_ERR(flow_table)) {
472 err = PTR_ERR(flow_table);
473 mlx5_core_err(mdev, "Fail to create MACsec TX check table, err(%d)\n", err);
474 goto err;
475 }
476 tx_tables->ft_check = flow_table;
477
478 /* Tx check table Default miss group/rule */
479 memset(flow_group_in, 0, inlen);
480 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, flow_table->max_fte - 1);
481 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, flow_table->max_fte - 1);
482 flow_group = mlx5_create_flow_group(tx_tables->ft_check, flow_group_in);
483 if (IS_ERR(flow_group)) {
484 err = PTR_ERR(flow_group);
485 mlx5_core_err(mdev,
486 "Failed to create default flow group for MACsec Tx crypto table err(%d)\n",
487 err);
488 goto err;
489 }
490 tx_tables->ft_check_group = flow_group;
491
492 /* Tx check table default drop rule */
493 memset(&dest, 0, sizeof(struct mlx5_flow_destination));
494 memset(&flow_act, 0, sizeof(flow_act));
495 dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
496 dest.counter = tx_tables->check_miss_rule_counter;
497 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT;
498 rule = mlx5_add_flow_rules(tx_tables->ft_check, NULL, &flow_act, &dest, 1);
499 if (IS_ERR(rule)) {
500 err = PTR_ERR(rule);
501 mlx5_core_err(mdev, "Failed to added MACsec tx check drop rule, err(%d)\n", err);
502 goto err;
503 }
504 tx_tables->check_miss_rule = rule;
505
506 /* Tx check table rule */
507 memset(spec, 0, sizeof(struct mlx5_flow_spec));
508 memset(&dest, 0, sizeof(struct mlx5_flow_destination));
509 memset(&flow_act, 0, sizeof(flow_act));
510
511 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters_2.metadata_reg_c_4);
512 MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.metadata_reg_c_4, 0);
513 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
514
515 flow_act.flags = FLOW_ACT_NO_APPEND;
516 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW | MLX5_FLOW_CONTEXT_ACTION_COUNT;
517 dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
518 dest.counter = tx_tables->check_rule_counter;
519 rule = mlx5_add_flow_rules(tx_tables->ft_check, spec, &flow_act, &dest, 1);
520 if (IS_ERR(rule)) {
521 err = PTR_ERR(rule);
522 mlx5_core_err(mdev, "Failed to add MACsec check rule, err=%d\n", err);
523 goto err;
524 }
525 tx_fs->check_rule = rule;
526
527 err = macsec_fs_tx_roce_create(macsec_fs);
528 if (err)
529 goto err;
530
531 kvfree(flow_group_in);
532 kvfree(spec);
533 return 0;
534
535 err:
536 macsec_fs_tx_destroy(macsec_fs);
537 out_flow_group:
538 kvfree(flow_group_in);
539 out_spec:
540 kvfree(spec);
541 return err;
542 }
543
macsec_fs_tx_ft_get(struct mlx5_macsec_fs * macsec_fs)544 static int macsec_fs_tx_ft_get(struct mlx5_macsec_fs *macsec_fs)
545 {
546 struct mlx5_macsec_tx *tx_fs = macsec_fs->tx_fs;
547 struct mlx5_macsec_tables *tx_tables;
548 int err = 0;
549
550 tx_tables = &tx_fs->tables;
551 if (tx_tables->refcnt)
552 goto out;
553
554 err = macsec_fs_tx_create(macsec_fs);
555 if (err)
556 return err;
557
558 out:
559 tx_tables->refcnt++;
560 return err;
561 }
562
macsec_fs_tx_ft_put(struct mlx5_macsec_fs * macsec_fs)563 static void macsec_fs_tx_ft_put(struct mlx5_macsec_fs *macsec_fs)
564 {
565 struct mlx5_macsec_tables *tx_tables = &macsec_fs->tx_fs->tables;
566
567 if (--tx_tables->refcnt)
568 return;
569
570 macsec_fs_tx_destroy(macsec_fs);
571 }
572
macsec_fs_tx_setup_fte(struct mlx5_macsec_fs * macsec_fs,struct mlx5_flow_spec * spec,struct mlx5_flow_act * flow_act,u32 macsec_obj_id,u32 * fs_id)573 static int macsec_fs_tx_setup_fte(struct mlx5_macsec_fs *macsec_fs,
574 struct mlx5_flow_spec *spec,
575 struct mlx5_flow_act *flow_act,
576 u32 macsec_obj_id,
577 u32 *fs_id)
578 {
579 struct mlx5_macsec_tx *tx_fs = macsec_fs->tx_fs;
580 int err = 0;
581 u32 id;
582
583 err = ida_alloc_range(&tx_fs->tx_halloc, 1,
584 MLX5_MACSEC_NUM_OF_SUPPORTED_INTERFACES,
585 GFP_KERNEL);
586 if (err < 0)
587 return err;
588
589 id = err;
590 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
591
592 /* Metadata match */
593 MLX5_SET(fte_match_param, spec->match_criteria, misc_parameters_2.metadata_reg_a,
594 MLX5_ETH_WQE_FT_META_MACSEC_MASK);
595 MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.metadata_reg_a,
596 MLX5_MACSEC_TX_METADATA(id));
597
598 *fs_id = id;
599 flow_act->crypto.type = MLX5_FLOW_CONTEXT_ENCRYPT_DECRYPT_TYPE_MACSEC;
600 flow_act->crypto.obj_id = macsec_obj_id;
601
602 mlx5_core_dbg(macsec_fs->mdev, "Tx fte: macsec obj_id %u, fs_id %u\n", macsec_obj_id, id);
603 return 0;
604 }
605
macsec_fs_tx_create_sectag_header(const struct macsec_context * ctx,char * reformatbf,size_t * reformat_size)606 static void macsec_fs_tx_create_sectag_header(const struct macsec_context *ctx,
607 char *reformatbf,
608 size_t *reformat_size)
609 {
610 const struct macsec_secy *secy = ctx->secy;
611 bool sci_present = macsec_send_sci(secy);
612 struct mlx5_sectag_header sectag = {};
613 const struct macsec_tx_sc *tx_sc;
614
615 tx_sc = &secy->tx_sc;
616 sectag.ethertype = htons(ETH_P_MACSEC);
617
618 if (sci_present) {
619 sectag.tci_an |= MACSEC_TCI_SC;
620 memcpy(§ag.sci, &secy->sci,
621 sizeof(sectag.sci));
622 } else {
623 if (tx_sc->end_station)
624 sectag.tci_an |= MACSEC_TCI_ES;
625 if (tx_sc->scb)
626 sectag.tci_an |= MACSEC_TCI_SCB;
627 }
628
629 /* With GCM, C/E clear for !encrypt, both set for encrypt */
630 if (tx_sc->encrypt)
631 sectag.tci_an |= MACSEC_TCI_CONFID;
632 else if (secy->icv_len != MACSEC_DEFAULT_ICV_LEN)
633 sectag.tci_an |= MACSEC_TCI_C;
634
635 sectag.tci_an |= tx_sc->encoding_sa;
636
637 *reformat_size = MLX5_MACSEC_TAG_LEN + (sci_present ? MACSEC_SCI_LEN : 0);
638
639 memcpy(reformatbf, §ag, *reformat_size);
640 }
641
macsec_fs_is_macsec_device_empty(struct mlx5_macsec_device * macsec_device)642 static bool macsec_fs_is_macsec_device_empty(struct mlx5_macsec_device *macsec_device)
643 {
644 if (xa_empty(&macsec_device->tx_id_xa) &&
645 xa_empty(&macsec_device->rx_id_xa))
646 return true;
647
648 return false;
649 }
650
macsec_fs_id_del(struct list_head * macsec_devices_list,u32 fs_id,void * macdev,struct rhashtable * hash_table,bool is_tx)651 static void macsec_fs_id_del(struct list_head *macsec_devices_list, u32 fs_id,
652 void *macdev, struct rhashtable *hash_table, bool is_tx)
653 {
654 const struct rhashtable_params *rhash = (is_tx) ? &rhash_sci : &rhash_fs_id;
655 struct mlx5_macsec_device *iter, *macsec_device = NULL;
656 struct mlx5_fs_id *fs_id_found;
657 struct xarray *fs_id_xa;
658
659 list_for_each_entry(iter, macsec_devices_list, macsec_devices_list_entry) {
660 if (iter->macdev == macdev) {
661 macsec_device = iter;
662 break;
663 }
664 }
665 WARN_ON(!macsec_device);
666
667 fs_id_xa = (is_tx) ? &macsec_device->tx_id_xa :
668 &macsec_device->rx_id_xa;
669 xa_lock(fs_id_xa);
670 fs_id_found = xa_load(fs_id_xa, fs_id);
671 WARN_ON(!fs_id_found);
672
673 if (!refcount_dec_and_test(&fs_id_found->refcnt)) {
674 xa_unlock(fs_id_xa);
675 return;
676 }
677
678 if (fs_id_found->id) {
679 /* Make sure ongoing datapath readers sees a valid SA */
680 rhashtable_remove_fast(hash_table, &fs_id_found->hash, *rhash);
681 fs_id_found->id = 0;
682 }
683 xa_unlock(fs_id_xa);
684
685 xa_erase(fs_id_xa, fs_id);
686
687 kfree(fs_id_found);
688
689 if (macsec_fs_is_macsec_device_empty(macsec_device)) {
690 list_del(&macsec_device->macsec_devices_list_entry);
691 kfree(macsec_device);
692 }
693 }
694
macsec_fs_id_add(struct list_head * macsec_devices_list,u32 fs_id,void * macdev,struct rhashtable * hash_table,sci_t sci,bool is_tx)695 static int macsec_fs_id_add(struct list_head *macsec_devices_list, u32 fs_id,
696 void *macdev, struct rhashtable *hash_table, sci_t sci,
697 bool is_tx)
698 {
699 const struct rhashtable_params *rhash = (is_tx) ? &rhash_sci : &rhash_fs_id;
700 struct mlx5_macsec_device *iter, *macsec_device = NULL;
701 struct mlx5_fs_id *fs_id_iter;
702 struct xarray *fs_id_xa;
703 int err;
704
705 if (!is_tx) {
706 rcu_read_lock();
707 fs_id_iter = rhashtable_lookup(hash_table, &fs_id, rhash_fs_id);
708 if (fs_id_iter) {
709 refcount_inc(&fs_id_iter->refcnt);
710 rcu_read_unlock();
711 return 0;
712 }
713 rcu_read_unlock();
714 }
715
716 fs_id_iter = kzalloc_obj(*fs_id_iter);
717 if (!fs_id_iter)
718 return -ENOMEM;
719
720 list_for_each_entry(iter, macsec_devices_list, macsec_devices_list_entry) {
721 if (iter->macdev == macdev) {
722 macsec_device = iter;
723 break;
724 }
725 }
726
727 if (!macsec_device) { /* first time adding a SA to that device */
728 macsec_device = kzalloc_obj(*macsec_device);
729 if (!macsec_device) {
730 err = -ENOMEM;
731 goto err_alloc_dev;
732 }
733 macsec_device->macdev = macdev;
734 xa_init(&macsec_device->tx_id_xa);
735 xa_init(&macsec_device->rx_id_xa);
736 list_add(&macsec_device->macsec_devices_list_entry, macsec_devices_list);
737 }
738
739 fs_id_xa = (is_tx) ? &macsec_device->tx_id_xa :
740 &macsec_device->rx_id_xa;
741 fs_id_iter->id = fs_id;
742 refcount_set(&fs_id_iter->refcnt, 1);
743 fs_id_iter->sci = sci;
744 err = xa_err(xa_store(fs_id_xa, fs_id, fs_id_iter, GFP_KERNEL));
745 if (err)
746 goto err_store_id;
747
748 err = rhashtable_insert_fast(hash_table, &fs_id_iter->hash, *rhash);
749 if (err)
750 goto err_hash_insert;
751
752 return 0;
753
754 err_hash_insert:
755 xa_erase(fs_id_xa, fs_id);
756 err_store_id:
757 if (macsec_fs_is_macsec_device_empty(macsec_device)) {
758 list_del(&macsec_device->macsec_devices_list_entry);
759 kfree(macsec_device);
760 }
761 err_alloc_dev:
762 kfree(fs_id_iter);
763 return err;
764 }
765
macsec_fs_tx_del_rule(struct mlx5_macsec_fs * macsec_fs,struct mlx5_macsec_tx_rule * tx_rule,void * macdev)766 static void macsec_fs_tx_del_rule(struct mlx5_macsec_fs *macsec_fs,
767 struct mlx5_macsec_tx_rule *tx_rule,
768 void *macdev)
769 {
770 macsec_fs_id_del(&macsec_fs->macsec_devices_list, tx_rule->fs_id, macdev,
771 &macsec_fs->sci_hash, true);
772
773 if (tx_rule->rule) {
774 mlx5_del_flow_rules(tx_rule->rule);
775 tx_rule->rule = NULL;
776 }
777
778 if (tx_rule->pkt_reformat) {
779 mlx5_packet_reformat_dealloc(macsec_fs->mdev, tx_rule->pkt_reformat);
780 tx_rule->pkt_reformat = NULL;
781 }
782
783 if (tx_rule->fs_id) {
784 ida_free(&macsec_fs->tx_fs->tx_halloc, tx_rule->fs_id);
785 tx_rule->fs_id = 0;
786 }
787
788 kfree(tx_rule);
789
790 macsec_fs_tx_ft_put(macsec_fs);
791 }
792
793 #define MLX5_REFORMAT_PARAM_ADD_MACSEC_OFFSET_4_BYTES 1
794
795 static union mlx5_macsec_rule *
macsec_fs_tx_add_rule(struct mlx5_macsec_fs * macsec_fs,const struct macsec_context * macsec_ctx,struct mlx5_macsec_rule_attrs * attrs,u32 * fs_id)796 macsec_fs_tx_add_rule(struct mlx5_macsec_fs *macsec_fs,
797 const struct macsec_context *macsec_ctx,
798 struct mlx5_macsec_rule_attrs *attrs, u32 *fs_id)
799 {
800 char reformatbf[MLX5_MACSEC_TAG_LEN + MACSEC_SCI_LEN];
801 struct mlx5_pkt_reformat_params reformat_params = {};
802 struct mlx5_macsec_tx *tx_fs = macsec_fs->tx_fs;
803 struct mlx5_core_dev *mdev = macsec_fs->mdev;
804 union mlx5_macsec_rule *macsec_rule = NULL;
805 struct mlx5_flow_destination dest = {};
806 struct mlx5_macsec_tables *tx_tables;
807 struct mlx5_macsec_tx_rule *tx_rule;
808 struct mlx5_flow_act flow_act = {};
809 struct mlx5_flow_handle *rule;
810 struct mlx5_flow_spec *spec;
811 size_t reformat_size;
812 int err = 0;
813
814 tx_tables = &tx_fs->tables;
815
816 spec = kvzalloc_obj(*spec);
817 if (!spec)
818 return NULL;
819
820 err = macsec_fs_tx_ft_get(macsec_fs);
821 if (err)
822 goto out_spec;
823
824 macsec_rule = kzalloc_obj(*macsec_rule);
825 if (!macsec_rule) {
826 macsec_fs_tx_ft_put(macsec_fs);
827 goto out_spec;
828 }
829
830 tx_rule = &macsec_rule->tx_rule;
831
832 /* Tx crypto table crypto rule */
833 macsec_fs_tx_create_sectag_header(macsec_ctx, reformatbf, &reformat_size);
834
835 reformat_params.type = MLX5_REFORMAT_TYPE_ADD_MACSEC;
836 reformat_params.size = reformat_size;
837 reformat_params.data = reformatbf;
838
839 if (is_vlan_dev(macsec_ctx->netdev))
840 reformat_params.param_0 = MLX5_REFORMAT_PARAM_ADD_MACSEC_OFFSET_4_BYTES;
841
842 flow_act.pkt_reformat = mlx5_packet_reformat_alloc(mdev,
843 &reformat_params,
844 MLX5_FLOW_NAMESPACE_EGRESS_MACSEC);
845 if (IS_ERR(flow_act.pkt_reformat)) {
846 err = PTR_ERR(flow_act.pkt_reformat);
847 mlx5_core_err(mdev, "Failed to allocate MACsec Tx reformat context err=%d\n", err);
848 goto err;
849 }
850 tx_rule->pkt_reformat = flow_act.pkt_reformat;
851
852 err = macsec_fs_tx_setup_fte(macsec_fs, spec, &flow_act, attrs->macsec_obj_id, fs_id);
853 if (err) {
854 mlx5_core_err(mdev,
855 "Failed to add packet reformat for MACsec TX crypto rule, err=%d\n",
856 err);
857 goto err;
858 }
859
860 tx_rule->fs_id = *fs_id;
861
862 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
863 MLX5_FLOW_CONTEXT_ACTION_CRYPTO_ENCRYPT |
864 MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
865 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
866 dest.ft = tx_tables->ft_check;
867 rule = mlx5_add_flow_rules(tx_tables->ft_crypto.t, spec, &flow_act, &dest, 1);
868 if (IS_ERR(rule)) {
869 err = PTR_ERR(rule);
870 mlx5_core_err(mdev, "Failed to add MACsec TX crypto rule, err=%d\n", err);
871 goto err;
872 }
873 tx_rule->rule = rule;
874
875 err = macsec_fs_id_add(&macsec_fs->macsec_devices_list, *fs_id, macsec_ctx->secy->netdev,
876 &macsec_fs->sci_hash, attrs->sci, true);
877 if (err) {
878 mlx5_core_err(mdev, "Failed to save fs_id, err=%d\n", err);
879 goto err;
880 }
881
882 goto out_spec;
883
884 err:
885 macsec_fs_tx_del_rule(macsec_fs, tx_rule, macsec_ctx->secy->netdev);
886 macsec_rule = NULL;
887 out_spec:
888 kvfree(spec);
889
890 return macsec_rule;
891 }
892
macsec_fs_tx_cleanup(struct mlx5_macsec_fs * macsec_fs)893 static void macsec_fs_tx_cleanup(struct mlx5_macsec_fs *macsec_fs)
894 {
895 struct mlx5_macsec_tx *tx_fs = macsec_fs->tx_fs;
896 struct mlx5_core_dev *mdev = macsec_fs->mdev;
897 struct mlx5_macsec_tables *tx_tables;
898
899 if (!tx_fs)
900 return;
901
902 tx_tables = &tx_fs->tables;
903 if (tx_tables->refcnt) {
904 mlx5_core_err(mdev,
905 "Can't destroy MACsec offload tx_fs, refcnt(%u) isn't 0\n",
906 tx_tables->refcnt);
907 return;
908 }
909
910 ida_destroy(&tx_fs->tx_halloc);
911
912 if (tx_tables->check_miss_rule_counter) {
913 mlx5_fc_destroy(mdev, tx_tables->check_miss_rule_counter);
914 tx_tables->check_miss_rule_counter = NULL;
915 }
916
917 if (tx_tables->check_rule_counter) {
918 mlx5_fc_destroy(mdev, tx_tables->check_rule_counter);
919 tx_tables->check_rule_counter = NULL;
920 }
921
922 kfree(tx_fs);
923 macsec_fs->tx_fs = NULL;
924 }
925
macsec_fs_tx_init(struct mlx5_macsec_fs * macsec_fs)926 static int macsec_fs_tx_init(struct mlx5_macsec_fs *macsec_fs)
927 {
928 struct mlx5_core_dev *mdev = macsec_fs->mdev;
929 struct mlx5_macsec_tables *tx_tables;
930 struct mlx5_macsec_tx *tx_fs;
931 struct mlx5_fc *flow_counter;
932 int err;
933
934 tx_fs = kzalloc_obj(*tx_fs);
935 if (!tx_fs)
936 return -ENOMEM;
937
938 tx_tables = &tx_fs->tables;
939
940 flow_counter = mlx5_fc_create(mdev, false);
941 if (IS_ERR(flow_counter)) {
942 err = PTR_ERR(flow_counter);
943 mlx5_core_err(mdev,
944 "Failed to create MACsec Tx encrypt flow counter, err(%d)\n",
945 err);
946 goto err_encrypt_counter;
947 }
948 tx_tables->check_rule_counter = flow_counter;
949
950 flow_counter = mlx5_fc_create(mdev, false);
951 if (IS_ERR(flow_counter)) {
952 err = PTR_ERR(flow_counter);
953 mlx5_core_err(mdev,
954 "Failed to create MACsec Tx drop flow counter, err(%d)\n",
955 err);
956 goto err_drop_counter;
957 }
958 tx_tables->check_miss_rule_counter = flow_counter;
959
960 ida_init(&tx_fs->tx_halloc);
961 INIT_LIST_HEAD(&macsec_fs->macsec_devices_list);
962
963 macsec_fs->tx_fs = tx_fs;
964
965 return 0;
966
967 err_drop_counter:
968 mlx5_fc_destroy(mdev, tx_tables->check_rule_counter);
969 tx_tables->check_rule_counter = NULL;
970
971 err_encrypt_counter:
972 kfree(tx_fs);
973 macsec_fs->tx_fs = NULL;
974
975 return err;
976 }
977
macsec_fs_rx_roce_miss_destroy(struct mlx5_macsec_miss * miss)978 static void macsec_fs_rx_roce_miss_destroy(struct mlx5_macsec_miss *miss)
979 {
980 mlx5_del_flow_rules(miss->rule);
981 mlx5_destroy_flow_group(miss->g);
982 }
983
macsec_fs_rdma_rx_destroy(struct mlx5_macsec_rx_roce * roce,struct mlx5_core_dev * mdev)984 static void macsec_fs_rdma_rx_destroy(struct mlx5_macsec_rx_roce *roce, struct mlx5_core_dev *mdev)
985 {
986 if (!mlx5_is_macsec_roce_supported(mdev))
987 return;
988
989 mlx5_del_flow_rules(roce->nic_miss.rule);
990 mlx5_del_flow_rules(roce->rule);
991 mlx5_modify_header_dealloc(mdev, roce->copy_modify_hdr);
992 mlx5_destroy_flow_group(roce->nic_miss.g);
993 mlx5_destroy_flow_group(roce->g);
994 mlx5_destroy_flow_table(roce->ft);
995
996 macsec_fs_rx_roce_miss_destroy(&roce->miss);
997 mlx5_destroy_flow_table(roce->ft_macsec_op_check);
998 mlx5_destroy_flow_table(roce->ft_ip_check);
999 }
1000
macsec_fs_rx_destroy(struct mlx5_macsec_fs * macsec_fs)1001 static void macsec_fs_rx_destroy(struct mlx5_macsec_fs *macsec_fs)
1002 {
1003 struct mlx5_macsec_rx *rx_fs = macsec_fs->rx_fs;
1004 struct mlx5_macsec_tables *rx_tables;
1005 int i;
1006
1007 /* Rx check table */
1008 for (i = 1; i >= 0; --i) {
1009 if (rx_fs->check_rule[i]) {
1010 mlx5_del_flow_rules(rx_fs->check_rule[i]);
1011 rx_fs->check_rule[i] = NULL;
1012 }
1013
1014 if (rx_fs->check_rule_pkt_reformat[i]) {
1015 mlx5_packet_reformat_dealloc(macsec_fs->mdev,
1016 rx_fs->check_rule_pkt_reformat[i]);
1017 rx_fs->check_rule_pkt_reformat[i] = NULL;
1018 }
1019 }
1020
1021 rx_tables = &rx_fs->tables;
1022
1023 if (rx_tables->check_miss_rule) {
1024 mlx5_del_flow_rules(rx_tables->check_miss_rule);
1025 rx_tables->check_miss_rule = NULL;
1026 }
1027
1028 if (rx_tables->ft_check_group) {
1029 mlx5_destroy_flow_group(rx_tables->ft_check_group);
1030 rx_tables->ft_check_group = NULL;
1031 }
1032
1033 if (rx_tables->ft_check) {
1034 mlx5_destroy_flow_table(rx_tables->ft_check);
1035 rx_tables->ft_check = NULL;
1036 }
1037
1038 /* Rx crypto table */
1039 if (rx_tables->crypto_miss_rule) {
1040 mlx5_del_flow_rules(rx_tables->crypto_miss_rule);
1041 rx_tables->crypto_miss_rule = NULL;
1042 }
1043
1044 macsec_fs_destroy_flow_table(&rx_tables->ft_crypto);
1045
1046 macsec_fs_rdma_rx_destroy(&macsec_fs->rx_fs->roce, macsec_fs->mdev);
1047 }
1048
macsec_fs_rx_create_crypto_table_groups(struct mlx5_macsec_flow_table * ft)1049 static int macsec_fs_rx_create_crypto_table_groups(struct mlx5_macsec_flow_table *ft)
1050 {
1051 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1052 int mclen = MLX5_ST_SZ_BYTES(fte_match_param);
1053 int ix = 0;
1054 u32 *in;
1055 int err;
1056 u8 *mc;
1057
1058 ft->g = kzalloc_objs(*ft->g, RX_CRYPTO_TABLE_NUM_GROUPS);
1059 if (!ft->g)
1060 return -ENOMEM;
1061
1062 in = kvzalloc(inlen, GFP_KERNEL);
1063 if (!in) {
1064 kfree(ft->g);
1065 return -ENOMEM;
1066 }
1067
1068 mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
1069
1070 /* Flow group for SA rule with SCI */
1071 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS |
1072 MLX5_MATCH_MISC_PARAMETERS_5);
1073 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
1074
1075 MLX5_SET(fte_match_param, mc, misc_parameters_5.macsec_tag_0,
1076 MLX5_MACSEC_SECTAG_TCI_AN_FIELD_BITMASK <<
1077 MLX5_MACSEC_SECTAG_TCI_AN_FIELD_OFFSET);
1078 MLX5_SET_TO_ONES(fte_match_param, mc, misc_parameters_5.macsec_tag_2);
1079 MLX5_SET_TO_ONES(fte_match_param, mc, misc_parameters_5.macsec_tag_3);
1080
1081 MLX5_SET_CFG(in, start_flow_index, ix);
1082 ix += RX_CRYPTO_TABLE_SA_RULE_WITH_SCI_GROUP_SIZE;
1083 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1084 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1085 if (IS_ERR(ft->g[ft->num_groups]))
1086 goto err;
1087 ft->num_groups++;
1088
1089 /* Flow group for SA rule without SCI */
1090 memset(in, 0, inlen);
1091 memset(mc, 0, mclen);
1092 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS |
1093 MLX5_MATCH_MISC_PARAMETERS_5);
1094 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.smac_47_16);
1095 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.smac_15_0);
1096 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
1097
1098 MLX5_SET(fte_match_param, mc, misc_parameters_5.macsec_tag_0,
1099 MLX5_MACSEC_SECTAG_TCI_AN_FIELD_BITMASK << MLX5_MACSEC_SECTAG_TCI_AN_FIELD_OFFSET);
1100
1101 MLX5_SET_CFG(in, start_flow_index, ix);
1102 ix += RX_CRYPTO_TABLE_SA_RULE_WITHOUT_SCI_GROUP_SIZE;
1103 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1104 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1105 if (IS_ERR(ft->g[ft->num_groups]))
1106 goto err;
1107 ft->num_groups++;
1108
1109 /* Flow Group for l2 traps */
1110 memset(in, 0, inlen);
1111 memset(mc, 0, mclen);
1112 MLX5_SET_CFG(in, start_flow_index, ix);
1113 ix += CRYPTO_TABLE_DEFAULT_RULE_GROUP_SIZE;
1114 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1115 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1116 if (IS_ERR(ft->g[ft->num_groups]))
1117 goto err;
1118 ft->num_groups++;
1119
1120 kvfree(in);
1121 return 0;
1122
1123 err:
1124 err = PTR_ERR(ft->g[ft->num_groups]);
1125 ft->g[ft->num_groups] = NULL;
1126 kvfree(in);
1127
1128 return err;
1129 }
1130
macsec_fs_rx_create_check_decap_rule(struct mlx5_macsec_fs * macsec_fs,struct mlx5_flow_destination * dest,struct mlx5_flow_act * flow_act,struct mlx5_flow_spec * spec,int reformat_param_size)1131 static int macsec_fs_rx_create_check_decap_rule(struct mlx5_macsec_fs *macsec_fs,
1132 struct mlx5_flow_destination *dest,
1133 struct mlx5_flow_act *flow_act,
1134 struct mlx5_flow_spec *spec,
1135 int reformat_param_size)
1136 {
1137 int rule_index = (reformat_param_size == MLX5_SECTAG_HEADER_SIZE_WITH_SCI) ? 0 : 1;
1138 u8 mlx5_reformat_buf[MLX5_SECTAG_HEADER_SIZE_WITH_SCI];
1139 struct mlx5_pkt_reformat_params reformat_params = {};
1140 struct mlx5_macsec_rx *rx_fs = macsec_fs->rx_fs;
1141 struct mlx5_core_dev *mdev = macsec_fs->mdev;
1142 struct mlx5_flow_destination roce_dest[2];
1143 struct mlx5_macsec_tables *rx_tables;
1144 struct mlx5_flow_handle *rule;
1145 int err = 0, dstn = 0;
1146
1147 rx_tables = &rx_fs->tables;
1148
1149 /* Rx check table decap 16B rule */
1150 memset(dest, 0, sizeof(*dest));
1151 memset(flow_act, 0, sizeof(*flow_act));
1152 memset(spec, 0, sizeof(*spec));
1153
1154 reformat_params.type = MLX5_REFORMAT_TYPE_DEL_MACSEC;
1155 reformat_params.size = reformat_param_size;
1156 reformat_params.data = mlx5_reformat_buf;
1157 flow_act->pkt_reformat = mlx5_packet_reformat_alloc(mdev,
1158 &reformat_params,
1159 MLX5_FLOW_NAMESPACE_KERNEL_RX_MACSEC);
1160 if (IS_ERR(flow_act->pkt_reformat)) {
1161 err = PTR_ERR(flow_act->pkt_reformat);
1162 mlx5_core_err(mdev, "Failed to allocate MACsec Rx reformat context err=%d\n", err);
1163 return err;
1164 }
1165 rx_fs->check_rule_pkt_reformat[rule_index] = flow_act->pkt_reformat;
1166
1167 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
1168 /* MACsec syndrome match */
1169 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters_2.macsec_syndrome);
1170 MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.macsec_syndrome, 0);
1171 /* ASO return reg syndrome match */
1172 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters_2.metadata_reg_c_4);
1173 MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.metadata_reg_c_4, 0);
1174
1175 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_5;
1176 /* Sectag TCI SC present bit*/
1177 MLX5_SET(fte_match_param, spec->match_criteria, misc_parameters_5.macsec_tag_0,
1178 MLX5_MACSEC_SECTAG_TCI_SC_FIELD_BIT << MLX5_MACSEC_SECTAG_TCI_AN_FIELD_OFFSET);
1179
1180 if (reformat_param_size == MLX5_SECTAG_HEADER_SIZE_WITH_SCI)
1181 MLX5_SET(fte_match_param, spec->match_value, misc_parameters_5.macsec_tag_0,
1182 MLX5_MACSEC_SECTAG_TCI_SC_FIELD_BIT <<
1183 MLX5_MACSEC_SECTAG_TCI_AN_FIELD_OFFSET);
1184
1185 flow_act->flags = FLOW_ACT_NO_APPEND;
1186
1187 if (rx_fs->roce.ft) {
1188 flow_act->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1189 roce_dest[dstn].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
1190 roce_dest[dstn].ft = rx_fs->roce.ft;
1191 dstn++;
1192 } else {
1193 flow_act->action = MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO;
1194 }
1195
1196 flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT |
1197 MLX5_FLOW_CONTEXT_ACTION_COUNT;
1198 roce_dest[dstn].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
1199 roce_dest[dstn].counter = rx_tables->check_rule_counter;
1200 rule = mlx5_add_flow_rules(rx_tables->ft_check, spec, flow_act, roce_dest, dstn + 1);
1201
1202 if (IS_ERR(rule)) {
1203 err = PTR_ERR(rule);
1204 mlx5_core_err(mdev, "Failed to add MACsec Rx check rule, err=%d\n", err);
1205 return err;
1206 }
1207
1208 rx_fs->check_rule[rule_index] = rule;
1209
1210 return 0;
1211 }
1212
macsec_fs_rx_roce_miss_create(struct mlx5_core_dev * mdev,struct mlx5_macsec_rx_roce * roce)1213 static int macsec_fs_rx_roce_miss_create(struct mlx5_core_dev *mdev,
1214 struct mlx5_macsec_rx_roce *roce)
1215 {
1216 struct mlx5_flow_act flow_act = {};
1217 struct mlx5_flow_group *flow_group;
1218 struct mlx5_flow_handle *rule;
1219 u32 *flow_group_in;
1220 int err;
1221
1222 flow_group_in = kvzalloc(MLX5_ST_SZ_BYTES(create_flow_group_in), GFP_KERNEL);
1223 if (!flow_group_in)
1224 return -ENOMEM;
1225
1226 /* IP check ft has no miss rule since we use default miss action which is go to next PRIO */
1227 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index,
1228 roce->ft_macsec_op_check->max_fte - 1);
1229 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
1230 roce->ft_macsec_op_check->max_fte - 1);
1231 flow_group = mlx5_create_flow_group(roce->ft_macsec_op_check, flow_group_in);
1232 if (IS_ERR(flow_group)) {
1233 err = PTR_ERR(flow_group);
1234 mlx5_core_err(mdev,
1235 "Failed to create miss flow group for MACsec RoCE operation check table err(%d)\n",
1236 err);
1237 goto err_macsec_op_miss_group;
1238 }
1239 roce->miss.g = flow_group;
1240
1241 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
1242 rule = mlx5_add_flow_rules(roce->ft_macsec_op_check, NULL, &flow_act, NULL, 0);
1243 if (IS_ERR(rule)) {
1244 err = PTR_ERR(rule);
1245 mlx5_core_err(mdev, "Failed to add miss rule to MACsec RoCE operation check table err(%d)\n",
1246 err);
1247 goto err_macsec_op_rule;
1248 }
1249 roce->miss.rule = rule;
1250
1251 kvfree(flow_group_in);
1252 return 0;
1253
1254 err_macsec_op_rule:
1255 mlx5_destroy_flow_group(roce->miss.g);
1256 err_macsec_op_miss_group:
1257 kvfree(flow_group_in);
1258 return err;
1259 }
1260
1261 #define MLX5_RX_ROCE_GROUP_SIZE BIT(0)
1262
macsec_fs_rx_roce_jump_to_rdma_groups_create(struct mlx5_core_dev * mdev,struct mlx5_macsec_rx_roce * roce)1263 static int macsec_fs_rx_roce_jump_to_rdma_groups_create(struct mlx5_core_dev *mdev,
1264 struct mlx5_macsec_rx_roce *roce)
1265 {
1266 struct mlx5_flow_group *g;
1267 void *outer_headers_c;
1268 int ix = 0;
1269 u32 *in;
1270 int err;
1271 u8 *mc;
1272
1273 in = kvzalloc(MLX5_ST_SZ_BYTES(create_flow_group_in), GFP_KERNEL);
1274 if (!in)
1275 return -ENOMEM;
1276
1277 mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
1278 outer_headers_c = MLX5_ADDR_OF(fte_match_param, mc, outer_headers);
1279 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, ip_protocol);
1280 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, udp_dport);
1281
1282 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1283 MLX5_SET_CFG(in, start_flow_index, ix);
1284 ix += MLX5_RX_ROCE_GROUP_SIZE;
1285 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1286 g = mlx5_create_flow_group(roce->ft, in);
1287 if (IS_ERR(g)) {
1288 err = PTR_ERR(g);
1289 mlx5_core_err(mdev, "Failed to create main flow group for MACsec RoCE NIC UDP table err(%d)\n",
1290 err);
1291 goto err_udp_group;
1292 }
1293 roce->g = g;
1294
1295 memset(in, 0, MLX5_ST_SZ_BYTES(create_flow_group_in));
1296 MLX5_SET_CFG(in, start_flow_index, ix);
1297 ix += MLX5_RX_ROCE_GROUP_SIZE;
1298 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1299 g = mlx5_create_flow_group(roce->ft, in);
1300 if (IS_ERR(g)) {
1301 err = PTR_ERR(g);
1302 mlx5_core_err(mdev, "Failed to create miss flow group for MACsec RoCE NIC UDP table err(%d)\n",
1303 err);
1304 goto err_udp_miss_group;
1305 }
1306 roce->nic_miss.g = g;
1307
1308 kvfree(in);
1309 return 0;
1310
1311 err_udp_miss_group:
1312 mlx5_destroy_flow_group(roce->g);
1313 err_udp_group:
1314 kvfree(in);
1315 return err;
1316 }
1317
macsec_fs_rx_roce_jump_to_rdma_rules_create(struct mlx5_macsec_fs * macsec_fs,struct mlx5_macsec_rx_roce * roce)1318 static int macsec_fs_rx_roce_jump_to_rdma_rules_create(struct mlx5_macsec_fs *macsec_fs,
1319 struct mlx5_macsec_rx_roce *roce)
1320 {
1321 u8 action[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {};
1322 struct mlx5_core_dev *mdev = macsec_fs->mdev;
1323 struct mlx5_flow_destination dst = {};
1324 struct mlx5_modify_hdr *modify_hdr;
1325 MLX5_DECLARE_FLOW_ACT(flow_act);
1326 struct mlx5_flow_handle *rule;
1327 struct mlx5_flow_spec *spec;
1328 int err;
1329
1330 spec = kvzalloc_obj(*spec);
1331 if (!spec)
1332 return -ENOMEM;
1333
1334 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
1335 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_protocol);
1336 MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_protocol, IPPROTO_UDP);
1337 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.udp_dport);
1338 MLX5_SET(fte_match_param, spec->match_value, outer_headers.udp_dport, ROCE_V2_UDP_DPORT);
1339
1340 MLX5_SET(copy_action_in, action, action_type, MLX5_ACTION_TYPE_COPY);
1341 MLX5_SET(copy_action_in, action, src_field, MLX5_ACTION_IN_FIELD_METADATA_REG_B);
1342 MLX5_SET(copy_action_in, action, src_offset, 0);
1343 MLX5_SET(copy_action_in, action, length, 32);
1344 MLX5_SET(copy_action_in, action, dst_field, MLX5_ACTION_IN_FIELD_METADATA_REG_C_5);
1345 MLX5_SET(copy_action_in, action, dst_offset, 0);
1346
1347 modify_hdr = mlx5_modify_header_alloc(macsec_fs->mdev, MLX5_FLOW_NAMESPACE_KERNEL_RX_MACSEC,
1348 1, action);
1349
1350 if (IS_ERR(modify_hdr)) {
1351 err = PTR_ERR(modify_hdr);
1352 mlx5_core_err(mdev,
1353 "Failed to alloc macsec copy modify_header_id err(%d)\n", err);
1354 goto err_alloc_hdr;
1355 }
1356
1357 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_MOD_HDR | MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1358 flow_act.modify_hdr = modify_hdr;
1359 dst.type = MLX5_FLOW_DESTINATION_TYPE_TABLE_TYPE;
1360 dst.ft = roce->ft_ip_check;
1361 rule = mlx5_add_flow_rules(roce->ft, spec, &flow_act, &dst, 1);
1362 if (IS_ERR(rule)) {
1363 err = PTR_ERR(rule);
1364 mlx5_core_err(mdev, "Failed to add rule to MACsec RoCE NIC UDP table err(%d)\n",
1365 err);
1366 goto err_add_rule;
1367 }
1368 roce->rule = rule;
1369 roce->copy_modify_hdr = modify_hdr;
1370
1371 memset(&flow_act, 0, sizeof(flow_act));
1372 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO;
1373 rule = mlx5_add_flow_rules(roce->ft, NULL, &flow_act, NULL, 0);
1374 if (IS_ERR(rule)) {
1375 err = PTR_ERR(rule);
1376 mlx5_core_err(mdev, "Failed to add miss rule to MACsec RoCE NIC UDP table err(%d)\n",
1377 err);
1378 goto err_add_rule2;
1379 }
1380 roce->nic_miss.rule = rule;
1381
1382 kvfree(spec);
1383 return 0;
1384
1385 err_add_rule2:
1386 mlx5_del_flow_rules(roce->rule);
1387 err_add_rule:
1388 mlx5_modify_header_dealloc(macsec_fs->mdev, modify_hdr);
1389 err_alloc_hdr:
1390 kvfree(spec);
1391 return err;
1392 }
1393
macsec_fs_rx_roce_jump_to_rdma_create(struct mlx5_macsec_fs * macsec_fs,struct mlx5_macsec_rx_roce * roce)1394 static int macsec_fs_rx_roce_jump_to_rdma_create(struct mlx5_macsec_fs *macsec_fs,
1395 struct mlx5_macsec_rx_roce *roce)
1396 {
1397 int err;
1398
1399 err = macsec_fs_rx_roce_jump_to_rdma_groups_create(macsec_fs->mdev, roce);
1400 if (err)
1401 return err;
1402
1403 err = macsec_fs_rx_roce_jump_to_rdma_rules_create(macsec_fs, roce);
1404 if (err)
1405 goto err;
1406
1407 return 0;
1408 err:
1409 mlx5_destroy_flow_group(roce->nic_miss.g);
1410 mlx5_destroy_flow_group(roce->g);
1411 return err;
1412 }
1413
macsec_fs_rx_roce_create(struct mlx5_macsec_fs * macsec_fs)1414 static int macsec_fs_rx_roce_create(struct mlx5_macsec_fs *macsec_fs)
1415 {
1416 struct mlx5_macsec_rx *rx_fs = macsec_fs->rx_fs;
1417 struct mlx5_core_dev *mdev = macsec_fs->mdev;
1418 struct mlx5_flow_table_attr ft_attr = {};
1419 struct mlx5_flow_namespace *ns;
1420 struct mlx5_flow_table *ft;
1421 int err = 0;
1422
1423 if (!mlx5_is_macsec_roce_supported(macsec_fs->mdev)) {
1424 mlx5_core_dbg(mdev, "Failed to init RoCE MACsec, capabilities not supported\n");
1425 return 0;
1426 }
1427
1428 ns = mlx5_get_flow_namespace(macsec_fs->mdev, MLX5_FLOW_NAMESPACE_RDMA_RX_MACSEC);
1429 if (!ns)
1430 return -ENOMEM;
1431
1432 ft = macsec_fs_auto_group_table_create(ns, 0, RDMA_RX_ROCE_IP_TABLE_LEVEL,
1433 CRYPTO_NUM_MAXSEC_FTE);
1434 if (IS_ERR(ft)) {
1435 err = PTR_ERR(ft);
1436 mlx5_core_err(mdev,
1437 "Failed to create MACsec IP check RoCE table err(%d)\n", err);
1438 return err;
1439 }
1440 rx_fs->roce.ft_ip_check = ft;
1441
1442 ft = macsec_fs_auto_group_table_create(ns, 0, RDMA_RX_ROCE_MACSEC_OP_TABLE_LEVEL,
1443 CRYPTO_NUM_MAXSEC_FTE);
1444 if (IS_ERR(ft)) {
1445 err = PTR_ERR(ft);
1446 mlx5_core_err(mdev,
1447 "Failed to create MACsec operation check RoCE table err(%d)\n",
1448 err);
1449 goto err_macsec_op;
1450 }
1451 rx_fs->roce.ft_macsec_op_check = ft;
1452
1453 err = macsec_fs_rx_roce_miss_create(mdev, &rx_fs->roce);
1454 if (err)
1455 goto err_miss_create;
1456
1457 ns = mlx5_get_flow_namespace(macsec_fs->mdev, MLX5_FLOW_NAMESPACE_KERNEL_RX_MACSEC);
1458 if (!ns) {
1459 err = -EOPNOTSUPP;
1460 goto err_ns;
1461 }
1462
1463 ft_attr.level = RX_ROCE_TABLE_LEVEL;
1464 ft_attr.max_fte = RX_ROCE_TABLE_NUM_FTE;
1465 ft = mlx5_create_flow_table(ns, &ft_attr);
1466 if (IS_ERR(ft)) {
1467 err = PTR_ERR(ft);
1468 mlx5_core_err(mdev,
1469 "Failed to create MACsec jump to RX RoCE, NIC table err(%d)\n", err);
1470 goto err_ns;
1471 }
1472 rx_fs->roce.ft = ft;
1473
1474 err = macsec_fs_rx_roce_jump_to_rdma_create(macsec_fs, &rx_fs->roce);
1475 if (err)
1476 goto err_udp_ft;
1477
1478 return 0;
1479
1480 err_udp_ft:
1481 mlx5_destroy_flow_table(rx_fs->roce.ft);
1482 err_ns:
1483 macsec_fs_rx_roce_miss_destroy(&rx_fs->roce.miss);
1484 err_miss_create:
1485 mlx5_destroy_flow_table(rx_fs->roce.ft_macsec_op_check);
1486 err_macsec_op:
1487 mlx5_destroy_flow_table(rx_fs->roce.ft_ip_check);
1488 return err;
1489 }
1490
macsec_fs_rx_create(struct mlx5_macsec_fs * macsec_fs)1491 static int macsec_fs_rx_create(struct mlx5_macsec_fs *macsec_fs)
1492 {
1493 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1494 struct mlx5_macsec_rx *rx_fs = macsec_fs->rx_fs;
1495 struct mlx5_core_dev *mdev = macsec_fs->mdev;
1496 struct mlx5_macsec_flow_table *ft_crypto;
1497 struct mlx5_flow_table_attr ft_attr = {};
1498 struct mlx5_flow_destination dest = {};
1499 struct mlx5_macsec_tables *rx_tables;
1500 struct mlx5_flow_table *flow_table;
1501 struct mlx5_flow_group *flow_group;
1502 struct mlx5_flow_act flow_act = {};
1503 struct mlx5_flow_namespace *ns;
1504 struct mlx5_flow_handle *rule;
1505 struct mlx5_flow_spec *spec;
1506 u32 *flow_group_in;
1507 int err;
1508
1509 ns = mlx5_get_flow_namespace(mdev, MLX5_FLOW_NAMESPACE_KERNEL_RX_MACSEC);
1510 if (!ns)
1511 return -ENOMEM;
1512
1513 spec = kvzalloc_obj(*spec);
1514 if (!spec)
1515 return -ENOMEM;
1516
1517 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
1518 if (!flow_group_in) {
1519 err = -ENOMEM;
1520 goto free_spec;
1521 }
1522
1523 rx_tables = &rx_fs->tables;
1524 ft_crypto = &rx_tables->ft_crypto;
1525
1526 err = macsec_fs_rx_roce_create(macsec_fs);
1527 if (err)
1528 goto out_flow_group;
1529
1530 /* Rx crypto table */
1531 ft_attr.level = RX_CRYPTO_TABLE_LEVEL;
1532 ft_attr.max_fte = CRYPTO_NUM_MAXSEC_FTE;
1533
1534 flow_table = mlx5_create_flow_table(ns, &ft_attr);
1535 if (IS_ERR(flow_table)) {
1536 err = PTR_ERR(flow_table);
1537 mlx5_core_err(mdev, "Failed to create MACsec Rx crypto table err(%d)\n", err);
1538 goto err;
1539 }
1540 ft_crypto->t = flow_table;
1541
1542 /* Rx crypto table groups */
1543 err = macsec_fs_rx_create_crypto_table_groups(ft_crypto);
1544 if (err) {
1545 mlx5_core_err(mdev,
1546 "Failed to create default flow group for MACsec Tx crypto table err(%d)\n",
1547 err);
1548 goto err;
1549 }
1550
1551 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO;
1552 rule = mlx5_add_flow_rules(ft_crypto->t, NULL, &flow_act, NULL, 0);
1553 if (IS_ERR(rule)) {
1554 err = PTR_ERR(rule);
1555 mlx5_core_err(mdev,
1556 "Failed to add MACsec Rx crypto table default miss rule %d\n",
1557 err);
1558 goto err;
1559 }
1560 rx_tables->crypto_miss_rule = rule;
1561
1562 /* Rx check table */
1563 flow_table = macsec_fs_auto_group_table_create(ns,
1564 MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT,
1565 RX_CHECK_TABLE_LEVEL,
1566 RX_CHECK_TABLE_NUM_FTE);
1567 if (IS_ERR(flow_table)) {
1568 err = PTR_ERR(flow_table);
1569 mlx5_core_err(mdev, "Fail to create MACsec RX check table, err(%d)\n", err);
1570 goto err;
1571 }
1572 rx_tables->ft_check = flow_table;
1573
1574 /* Rx check table Default miss group/rule */
1575 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, flow_table->max_fte - 1);
1576 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, flow_table->max_fte - 1);
1577 flow_group = mlx5_create_flow_group(rx_tables->ft_check, flow_group_in);
1578 if (IS_ERR(flow_group)) {
1579 err = PTR_ERR(flow_group);
1580 mlx5_core_err(mdev,
1581 "Failed to create default flow group for MACsec Rx check table err(%d)\n",
1582 err);
1583 goto err;
1584 }
1585 rx_tables->ft_check_group = flow_group;
1586
1587 /* Rx check table default drop rule */
1588 memset(&flow_act, 0, sizeof(flow_act));
1589
1590 dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
1591 dest.counter = rx_tables->check_miss_rule_counter;
1592 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT;
1593 rule = mlx5_add_flow_rules(rx_tables->ft_check, NULL, &flow_act, &dest, 1);
1594 if (IS_ERR(rule)) {
1595 err = PTR_ERR(rule);
1596 mlx5_core_err(mdev, "Failed to added MACsec Rx check drop rule, err(%d)\n", err);
1597 goto err;
1598 }
1599 rx_tables->check_miss_rule = rule;
1600
1601 /* Rx check table decap rules */
1602 err = macsec_fs_rx_create_check_decap_rule(macsec_fs, &dest, &flow_act, spec,
1603 MLX5_SECTAG_HEADER_SIZE_WITH_SCI);
1604 if (err)
1605 goto err;
1606
1607 err = macsec_fs_rx_create_check_decap_rule(macsec_fs, &dest, &flow_act, spec,
1608 MLX5_SECTAG_HEADER_SIZE_WITHOUT_SCI);
1609 if (err)
1610 goto err;
1611
1612 goto out_flow_group;
1613
1614 err:
1615 macsec_fs_rx_destroy(macsec_fs);
1616 out_flow_group:
1617 kvfree(flow_group_in);
1618 free_spec:
1619 kvfree(spec);
1620 return err;
1621 }
1622
macsec_fs_rx_ft_get(struct mlx5_macsec_fs * macsec_fs)1623 static int macsec_fs_rx_ft_get(struct mlx5_macsec_fs *macsec_fs)
1624 {
1625 struct mlx5_macsec_tables *rx_tables = &macsec_fs->rx_fs->tables;
1626 int err = 0;
1627
1628 if (rx_tables->refcnt)
1629 goto out;
1630
1631 err = macsec_fs_rx_create(macsec_fs);
1632 if (err)
1633 return err;
1634
1635 out:
1636 rx_tables->refcnt++;
1637 return err;
1638 }
1639
macsec_fs_rx_ft_put(struct mlx5_macsec_fs * macsec_fs)1640 static void macsec_fs_rx_ft_put(struct mlx5_macsec_fs *macsec_fs)
1641 {
1642 struct mlx5_macsec_tables *rx_tables = &macsec_fs->rx_fs->tables;
1643
1644 if (--rx_tables->refcnt)
1645 return;
1646
1647 macsec_fs_rx_destroy(macsec_fs);
1648 }
1649
macsec_fs_rx_del_rule(struct mlx5_macsec_fs * macsec_fs,struct mlx5_macsec_rx_rule * rx_rule,void * macdev,u32 fs_id)1650 static void macsec_fs_rx_del_rule(struct mlx5_macsec_fs *macsec_fs,
1651 struct mlx5_macsec_rx_rule *rx_rule,
1652 void *macdev, u32 fs_id)
1653 {
1654 int i;
1655
1656 macsec_fs_id_del(&macsec_fs->macsec_devices_list, fs_id, macdev,
1657 &macsec_fs->fs_id_hash, false);
1658
1659 for (i = 0; i < RX_NUM_OF_RULES_PER_SA; ++i) {
1660 if (rx_rule->rule[i]) {
1661 mlx5_del_flow_rules(rx_rule->rule[i]);
1662 rx_rule->rule[i] = NULL;
1663 }
1664 }
1665
1666 if (rx_rule->meta_modhdr) {
1667 mlx5_modify_header_dealloc(macsec_fs->mdev, rx_rule->meta_modhdr);
1668 rx_rule->meta_modhdr = NULL;
1669 }
1670
1671 kfree(rx_rule);
1672
1673 macsec_fs_rx_ft_put(macsec_fs);
1674 }
1675
macsec_fs_rx_setup_fte(struct mlx5_flow_spec * spec,struct mlx5_flow_act * flow_act,struct mlx5_macsec_rule_attrs * attrs,bool sci_present)1676 static void macsec_fs_rx_setup_fte(struct mlx5_flow_spec *spec,
1677 struct mlx5_flow_act *flow_act,
1678 struct mlx5_macsec_rule_attrs *attrs,
1679 bool sci_present)
1680 {
1681 u8 tci_an = (sci_present << MLX5_MACSEC_SECTAG_TCI_SC_FIELD_OFFSET) | attrs->assoc_num;
1682 struct mlx5_flow_act_crypto_params *crypto_params = &flow_act->crypto;
1683 __be32 *sci_p = (__be32 *)(&attrs->sci);
1684
1685 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
1686
1687 /* MACsec ethertype */
1688 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ethertype);
1689 MLX5_SET(fte_match_param, spec->match_value, outer_headers.ethertype, ETH_P_MACSEC);
1690
1691 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_5;
1692
1693 /* Sectag AN + TCI SC present bit*/
1694 MLX5_SET(fte_match_param, spec->match_criteria, misc_parameters_5.macsec_tag_0,
1695 MLX5_MACSEC_SECTAG_TCI_AN_FIELD_BITMASK << MLX5_MACSEC_SECTAG_TCI_AN_FIELD_OFFSET);
1696 MLX5_SET(fte_match_param, spec->match_value, misc_parameters_5.macsec_tag_0,
1697 tci_an << MLX5_MACSEC_SECTAG_TCI_AN_FIELD_OFFSET);
1698
1699 if (sci_present) {
1700 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
1701 misc_parameters_5.macsec_tag_2);
1702 MLX5_SET(fte_match_param, spec->match_value, misc_parameters_5.macsec_tag_2,
1703 be32_to_cpu(sci_p[0]));
1704
1705 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
1706 misc_parameters_5.macsec_tag_3);
1707 MLX5_SET(fte_match_param, spec->match_value, misc_parameters_5.macsec_tag_3,
1708 be32_to_cpu(sci_p[1]));
1709 } else {
1710 /* When SCI isn't present in the Sectag, need to match the source */
1711 /* MAC address only if the SCI contains the default MACsec PORT */
1712 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.smac_47_16);
1713 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.smac_15_0);
1714 memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers.smac_47_16),
1715 sci_p, ETH_ALEN);
1716 }
1717
1718 crypto_params->type = MLX5_FLOW_CONTEXT_ENCRYPT_DECRYPT_TYPE_MACSEC;
1719 crypto_params->obj_id = attrs->macsec_obj_id;
1720 }
1721
1722 static union mlx5_macsec_rule *
macsec_fs_rx_add_rule(struct mlx5_macsec_fs * macsec_fs,const struct macsec_context * macsec_ctx,struct mlx5_macsec_rule_attrs * attrs,u32 fs_id)1723 macsec_fs_rx_add_rule(struct mlx5_macsec_fs *macsec_fs,
1724 const struct macsec_context *macsec_ctx,
1725 struct mlx5_macsec_rule_attrs *attrs,
1726 u32 fs_id)
1727 {
1728 u8 action[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {};
1729 struct mlx5_macsec_rx *rx_fs = macsec_fs->rx_fs;
1730 struct mlx5_core_dev *mdev = macsec_fs->mdev;
1731 union mlx5_macsec_rule *macsec_rule = NULL;
1732 struct mlx5_modify_hdr *modify_hdr = NULL;
1733 struct mlx5_macsec_flow_table *ft_crypto;
1734 struct mlx5_flow_destination dest = {};
1735 struct mlx5_macsec_tables *rx_tables;
1736 struct mlx5_macsec_rx_rule *rx_rule;
1737 struct mlx5_flow_act flow_act = {};
1738 struct mlx5_flow_handle *rule;
1739 struct mlx5_flow_spec *spec;
1740 int err = 0;
1741
1742 spec = kvzalloc_obj(*spec);
1743 if (!spec)
1744 return NULL;
1745
1746 err = macsec_fs_rx_ft_get(macsec_fs);
1747 if (err)
1748 goto out_spec;
1749
1750 macsec_rule = kzalloc_obj(*macsec_rule);
1751 if (!macsec_rule) {
1752 macsec_fs_rx_ft_put(macsec_fs);
1753 goto out_spec;
1754 }
1755
1756 rx_rule = &macsec_rule->rx_rule;
1757 rx_tables = &rx_fs->tables;
1758 ft_crypto = &rx_tables->ft_crypto;
1759
1760 /* Set bit[31 - 30] macsec marker - 0x01 */
1761 /* Set bit[15-0] fs id */
1762 MLX5_SET(set_action_in, action, action_type, MLX5_ACTION_TYPE_SET);
1763 MLX5_SET(set_action_in, action, field, MLX5_ACTION_IN_FIELD_METADATA_REG_B);
1764 MLX5_SET(set_action_in, action, data, macsec_fs_set_rx_fs_id(fs_id));
1765 MLX5_SET(set_action_in, action, offset, 0);
1766 MLX5_SET(set_action_in, action, length, 32);
1767
1768 modify_hdr = mlx5_modify_header_alloc(mdev, MLX5_FLOW_NAMESPACE_KERNEL_RX_MACSEC,
1769 1, action);
1770 if (IS_ERR(modify_hdr)) {
1771 err = PTR_ERR(modify_hdr);
1772 mlx5_core_err(mdev, "Fail to alloc MACsec set modify_header_id err=%d\n", err);
1773 modify_hdr = NULL;
1774 goto err;
1775 }
1776 rx_rule->meta_modhdr = modify_hdr;
1777
1778 /* Rx crypto table with SCI rule */
1779 macsec_fs_rx_setup_fte(spec, &flow_act, attrs, true);
1780
1781 flow_act.modify_hdr = modify_hdr;
1782 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
1783 MLX5_FLOW_CONTEXT_ACTION_CRYPTO_DECRYPT |
1784 MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
1785
1786 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
1787 dest.ft = rx_tables->ft_check;
1788 rule = mlx5_add_flow_rules(ft_crypto->t, spec, &flow_act, &dest, 1);
1789 if (IS_ERR(rule)) {
1790 err = PTR_ERR(rule);
1791 mlx5_core_err(mdev,
1792 "Failed to add SA with SCI rule to Rx crypto rule, err=%d\n",
1793 err);
1794 goto err;
1795 }
1796 rx_rule->rule[0] = rule;
1797
1798 /* Rx crypto table without SCI rule */
1799 if ((cpu_to_be64((__force u64)attrs->sci) & 0xFFFF) == ntohs(MACSEC_PORT_ES)) {
1800 memset(spec, 0, sizeof(struct mlx5_flow_spec));
1801 memset(&dest, 0, sizeof(struct mlx5_flow_destination));
1802 memset(&flow_act, 0, sizeof(flow_act));
1803
1804 macsec_fs_rx_setup_fte(spec, &flow_act, attrs, false);
1805
1806 flow_act.modify_hdr = modify_hdr;
1807 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
1808 MLX5_FLOW_CONTEXT_ACTION_CRYPTO_DECRYPT |
1809 MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
1810
1811 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
1812 dest.ft = rx_tables->ft_check;
1813 rule = mlx5_add_flow_rules(ft_crypto->t, spec, &flow_act, &dest, 1);
1814 if (IS_ERR(rule)) {
1815 err = PTR_ERR(rule);
1816 mlx5_core_err(mdev,
1817 "Failed to add SA without SCI rule to Rx crypto rule, err=%d\n",
1818 err);
1819 goto err;
1820 }
1821 rx_rule->rule[1] = rule;
1822 }
1823
1824 err = macsec_fs_id_add(&macsec_fs->macsec_devices_list, fs_id, macsec_ctx->secy->netdev,
1825 &macsec_fs->fs_id_hash, attrs->sci, false);
1826 if (err) {
1827 mlx5_core_err(mdev, "Failed to save fs_id, err=%d\n", err);
1828 goto err;
1829 }
1830
1831 kvfree(spec);
1832 return macsec_rule;
1833
1834 err:
1835 macsec_fs_rx_del_rule(macsec_fs, rx_rule, macsec_ctx->secy->netdev, fs_id);
1836 macsec_rule = NULL;
1837 out_spec:
1838 kvfree(spec);
1839 return macsec_rule;
1840 }
1841
macsec_fs_rx_init(struct mlx5_macsec_fs * macsec_fs)1842 static int macsec_fs_rx_init(struct mlx5_macsec_fs *macsec_fs)
1843 {
1844 struct mlx5_core_dev *mdev = macsec_fs->mdev;
1845 struct mlx5_macsec_tables *rx_tables;
1846 struct mlx5_macsec_rx *rx_fs;
1847 struct mlx5_fc *flow_counter;
1848 int err;
1849
1850 rx_fs = kzalloc_obj(*rx_fs);
1851 if (!rx_fs)
1852 return -ENOMEM;
1853
1854 flow_counter = mlx5_fc_create(mdev, false);
1855 if (IS_ERR(flow_counter)) {
1856 err = PTR_ERR(flow_counter);
1857 mlx5_core_err(mdev,
1858 "Failed to create MACsec Rx encrypt flow counter, err(%d)\n",
1859 err);
1860 goto err_encrypt_counter;
1861 }
1862
1863 rx_tables = &rx_fs->tables;
1864 rx_tables->check_rule_counter = flow_counter;
1865
1866 flow_counter = mlx5_fc_create(mdev, false);
1867 if (IS_ERR(flow_counter)) {
1868 err = PTR_ERR(flow_counter);
1869 mlx5_core_err(mdev,
1870 "Failed to create MACsec Rx drop flow counter, err(%d)\n",
1871 err);
1872 goto err_drop_counter;
1873 }
1874 rx_tables->check_miss_rule_counter = flow_counter;
1875
1876 macsec_fs->rx_fs = rx_fs;
1877
1878 return 0;
1879
1880 err_drop_counter:
1881 mlx5_fc_destroy(mdev, rx_tables->check_rule_counter);
1882 rx_tables->check_rule_counter = NULL;
1883
1884 err_encrypt_counter:
1885 kfree(rx_fs);
1886 macsec_fs->rx_fs = NULL;
1887
1888 return err;
1889 }
1890
macsec_fs_rx_cleanup(struct mlx5_macsec_fs * macsec_fs)1891 static void macsec_fs_rx_cleanup(struct mlx5_macsec_fs *macsec_fs)
1892 {
1893 struct mlx5_macsec_rx *rx_fs = macsec_fs->rx_fs;
1894 struct mlx5_core_dev *mdev = macsec_fs->mdev;
1895 struct mlx5_macsec_tables *rx_tables;
1896
1897 if (!rx_fs)
1898 return;
1899
1900 rx_tables = &rx_fs->tables;
1901
1902 if (rx_tables->refcnt) {
1903 mlx5_core_err(mdev,
1904 "Can't destroy MACsec offload rx_fs, refcnt(%u) isn't 0\n",
1905 rx_tables->refcnt);
1906 return;
1907 }
1908
1909 if (rx_tables->check_miss_rule_counter) {
1910 mlx5_fc_destroy(mdev, rx_tables->check_miss_rule_counter);
1911 rx_tables->check_miss_rule_counter = NULL;
1912 }
1913
1914 if (rx_tables->check_rule_counter) {
1915 mlx5_fc_destroy(mdev, rx_tables->check_rule_counter);
1916 rx_tables->check_rule_counter = NULL;
1917 }
1918
1919 kfree(rx_fs);
1920 macsec_fs->rx_fs = NULL;
1921 }
1922
set_ipaddr_spec_v4(struct sockaddr_in * in,struct mlx5_flow_spec * spec,bool is_dst_ip)1923 static void set_ipaddr_spec_v4(struct sockaddr_in *in, struct mlx5_flow_spec *spec, bool is_dst_ip)
1924 {
1925 MLX5_SET(fte_match_param, spec->match_value,
1926 outer_headers.ip_version, MLX5_FS_IPV4_VERSION);
1927
1928 if (is_dst_ip) {
1929 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
1930 outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
1931 memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
1932 outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
1933 &in->sin_addr.s_addr, 4);
1934 } else {
1935 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
1936 outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4);
1937 memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
1938 outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4),
1939 &in->sin_addr.s_addr, 4);
1940 }
1941 }
1942
set_ipaddr_spec_v6(struct sockaddr_in6 * in6,struct mlx5_flow_spec * spec,bool is_dst_ip)1943 static void set_ipaddr_spec_v6(struct sockaddr_in6 *in6, struct mlx5_flow_spec *spec,
1944 bool is_dst_ip)
1945 {
1946 MLX5_SET(fte_match_param, spec->match_value,
1947 outer_headers.ip_version, MLX5_FS_IPV6_VERSION);
1948
1949 if (is_dst_ip) {
1950 memset(MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1951 outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
1952 0xff, 16);
1953 memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
1954 outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
1955 &in6->sin6_addr, 16);
1956 } else {
1957 memset(MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1958 outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6),
1959 0xff, 16);
1960 memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
1961 outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6),
1962 &in6->sin6_addr, 16);
1963 }
1964 }
1965
set_ipaddr_spec(const struct sockaddr * addr,struct mlx5_flow_spec * spec,bool is_dst_ip)1966 static void set_ipaddr_spec(const struct sockaddr *addr,
1967 struct mlx5_flow_spec *spec, bool is_dst_ip)
1968 {
1969 struct sockaddr_in6 *in6;
1970
1971 spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
1972 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
1973 outer_headers.ip_version);
1974
1975 if (addr->sa_family == AF_INET) {
1976 struct sockaddr_in *in = (struct sockaddr_in *)addr;
1977
1978 set_ipaddr_spec_v4(in, spec, is_dst_ip);
1979 return;
1980 }
1981
1982 in6 = (struct sockaddr_in6 *)addr;
1983 set_ipaddr_spec_v6(in6, spec, is_dst_ip);
1984 }
1985
macsec_fs_del_roce_rule_rx(struct mlx5_roce_macsec_rx_rule * rx_rule)1986 static void macsec_fs_del_roce_rule_rx(struct mlx5_roce_macsec_rx_rule *rx_rule)
1987 {
1988 mlx5_del_flow_rules(rx_rule->op);
1989 mlx5_del_flow_rules(rx_rule->ip);
1990 list_del(&rx_rule->entry);
1991 kfree(rx_rule);
1992 }
1993
macsec_fs_del_roce_rules_rx(struct mlx5_macsec_fs * macsec_fs,u32 fs_id,struct list_head * rx_rules_list)1994 static void macsec_fs_del_roce_rules_rx(struct mlx5_macsec_fs *macsec_fs, u32 fs_id,
1995 struct list_head *rx_rules_list)
1996 {
1997 struct mlx5_roce_macsec_rx_rule *rx_rule, *next;
1998
1999 if (!mlx5_is_macsec_roce_supported(macsec_fs->mdev))
2000 return;
2001
2002 list_for_each_entry_safe(rx_rule, next, rx_rules_list, entry) {
2003 if (rx_rule->fs_id == fs_id)
2004 macsec_fs_del_roce_rule_rx(rx_rule);
2005 }
2006 }
2007
macsec_fs_del_roce_rule_tx(struct mlx5_core_dev * mdev,struct mlx5_roce_macsec_tx_rule * tx_rule)2008 static void macsec_fs_del_roce_rule_tx(struct mlx5_core_dev *mdev,
2009 struct mlx5_roce_macsec_tx_rule *tx_rule)
2010 {
2011 mlx5_del_flow_rules(tx_rule->rule);
2012 mlx5_modify_header_dealloc(mdev, tx_rule->meta_modhdr);
2013 list_del(&tx_rule->entry);
2014 kfree(tx_rule);
2015 }
2016
macsec_fs_del_roce_rules_tx(struct mlx5_macsec_fs * macsec_fs,u32 fs_id,struct list_head * tx_rules_list)2017 static void macsec_fs_del_roce_rules_tx(struct mlx5_macsec_fs *macsec_fs, u32 fs_id,
2018 struct list_head *tx_rules_list)
2019 {
2020 struct mlx5_roce_macsec_tx_rule *tx_rule, *next;
2021
2022 if (!mlx5_is_macsec_roce_supported(macsec_fs->mdev))
2023 return;
2024
2025 list_for_each_entry_safe(tx_rule, next, tx_rules_list, entry) {
2026 if (tx_rule->fs_id == fs_id)
2027 macsec_fs_del_roce_rule_tx(macsec_fs->mdev, tx_rule);
2028 }
2029 }
2030
mlx5_macsec_fs_get_stats_fill(struct mlx5_macsec_fs * macsec_fs,void * macsec_stats)2031 void mlx5_macsec_fs_get_stats_fill(struct mlx5_macsec_fs *macsec_fs, void *macsec_stats)
2032 {
2033 struct mlx5_macsec_stats *stats = (struct mlx5_macsec_stats *)macsec_stats;
2034 struct mlx5_macsec_tables *tx_tables = &macsec_fs->tx_fs->tables;
2035 struct mlx5_macsec_tables *rx_tables = &macsec_fs->rx_fs->tables;
2036 struct mlx5_core_dev *mdev = macsec_fs->mdev;
2037
2038 if (tx_tables->check_rule_counter)
2039 mlx5_fc_query(mdev, tx_tables->check_rule_counter,
2040 &stats->macsec_tx_pkts, &stats->macsec_tx_bytes);
2041
2042 if (tx_tables->check_miss_rule_counter)
2043 mlx5_fc_query(mdev, tx_tables->check_miss_rule_counter,
2044 &stats->macsec_tx_pkts_drop, &stats->macsec_tx_bytes_drop);
2045
2046 if (rx_tables->check_rule_counter)
2047 mlx5_fc_query(mdev, rx_tables->check_rule_counter,
2048 &stats->macsec_rx_pkts, &stats->macsec_rx_bytes);
2049
2050 if (rx_tables->check_miss_rule_counter)
2051 mlx5_fc_query(mdev, rx_tables->check_miss_rule_counter,
2052 &stats->macsec_rx_pkts_drop, &stats->macsec_rx_bytes_drop);
2053 }
2054
mlx5_macsec_fs_get_stats(struct mlx5_macsec_fs * macsec_fs)2055 struct mlx5_macsec_stats *mlx5_macsec_fs_get_stats(struct mlx5_macsec_fs *macsec_fs)
2056 {
2057 if (!macsec_fs)
2058 return NULL;
2059
2060 return &macsec_fs->stats;
2061 }
2062
mlx5_macsec_fs_get_fs_id_from_hashtable(struct mlx5_macsec_fs * macsec_fs,sci_t * sci)2063 u32 mlx5_macsec_fs_get_fs_id_from_hashtable(struct mlx5_macsec_fs *macsec_fs, sci_t *sci)
2064 {
2065 struct mlx5_fs_id *mlx5_fs_id;
2066 u32 fs_id = 0;
2067
2068 rcu_read_lock();
2069 mlx5_fs_id = rhashtable_lookup(&macsec_fs->sci_hash, sci, rhash_sci);
2070 if (mlx5_fs_id)
2071 fs_id = mlx5_fs_id->id;
2072 rcu_read_unlock();
2073
2074 return fs_id;
2075 }
2076
2077 union mlx5_macsec_rule *
mlx5_macsec_fs_add_rule(struct mlx5_macsec_fs * macsec_fs,const struct macsec_context * macsec_ctx,struct mlx5_macsec_rule_attrs * attrs,u32 * sa_fs_id)2078 mlx5_macsec_fs_add_rule(struct mlx5_macsec_fs *macsec_fs,
2079 const struct macsec_context *macsec_ctx,
2080 struct mlx5_macsec_rule_attrs *attrs,
2081 u32 *sa_fs_id)
2082 {
2083 struct mlx5_macsec_event_data data = {.macsec_fs = macsec_fs,
2084 .macdev = macsec_ctx->secy->netdev,
2085 .is_tx =
2086 (attrs->action == MLX5_ACCEL_MACSEC_ACTION_ENCRYPT)
2087 };
2088 union mlx5_macsec_rule *macsec_rule;
2089 u32 tx_new_fs_id;
2090
2091 macsec_rule = (attrs->action == MLX5_ACCEL_MACSEC_ACTION_ENCRYPT) ?
2092 macsec_fs_tx_add_rule(macsec_fs, macsec_ctx, attrs, &tx_new_fs_id) :
2093 macsec_fs_rx_add_rule(macsec_fs, macsec_ctx, attrs, *sa_fs_id);
2094
2095 data.fs_id = (data.is_tx) ? tx_new_fs_id : *sa_fs_id;
2096 if (macsec_rule)
2097 blocking_notifier_call_chain(&macsec_fs->mdev->macsec_nh,
2098 MLX5_DRIVER_EVENT_MACSEC_SA_ADDED,
2099 &data);
2100
2101 return macsec_rule;
2102 }
2103
mlx5_macsec_fs_del_rule(struct mlx5_macsec_fs * macsec_fs,union mlx5_macsec_rule * macsec_rule,int action,void * macdev,u32 sa_fs_id)2104 void mlx5_macsec_fs_del_rule(struct mlx5_macsec_fs *macsec_fs,
2105 union mlx5_macsec_rule *macsec_rule,
2106 int action, void *macdev, u32 sa_fs_id)
2107 {
2108 struct mlx5_macsec_event_data data = {.macsec_fs = macsec_fs,
2109 .macdev = macdev,
2110 .is_tx = (action == MLX5_ACCEL_MACSEC_ACTION_ENCRYPT)
2111 };
2112
2113 data.fs_id = (data.is_tx) ? macsec_rule->tx_rule.fs_id : sa_fs_id;
2114 blocking_notifier_call_chain(&macsec_fs->mdev->macsec_nh,
2115 MLX5_DRIVER_EVENT_MACSEC_SA_DELETED,
2116 &data);
2117
2118 (action == MLX5_ACCEL_MACSEC_ACTION_ENCRYPT) ?
2119 macsec_fs_tx_del_rule(macsec_fs, &macsec_rule->tx_rule, macdev) :
2120 macsec_fs_rx_del_rule(macsec_fs, &macsec_rule->rx_rule, macdev, sa_fs_id);
2121 }
2122
mlx5_macsec_fs_add_roce_rule_rx(struct mlx5_macsec_fs * macsec_fs,u32 fs_id,u16 gid_idx,const struct sockaddr * addr,struct list_head * rx_rules_list)2123 static int mlx5_macsec_fs_add_roce_rule_rx(struct mlx5_macsec_fs *macsec_fs, u32 fs_id, u16 gid_idx,
2124 const struct sockaddr *addr,
2125 struct list_head *rx_rules_list)
2126 {
2127 struct mlx5_macsec_rx *rx_fs = macsec_fs->rx_fs;
2128 struct mlx5_roce_macsec_rx_rule *rx_rule;
2129 struct mlx5_flow_destination dest = {};
2130 struct mlx5_flow_act flow_act = {};
2131 struct mlx5_flow_handle *new_rule;
2132 struct mlx5_flow_spec *spec;
2133 int err = 0;
2134
2135 spec = kvzalloc_obj(*spec);
2136 if (!spec)
2137 return -ENOMEM;
2138
2139 rx_rule = kzalloc_obj(*rx_rule);
2140 if (!rx_rule) {
2141 err = -ENOMEM;
2142 goto out;
2143 }
2144
2145 set_ipaddr_spec(addr, spec, true);
2146
2147 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
2148 dest.ft = rx_fs->roce.ft_macsec_op_check;
2149 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
2150 new_rule = mlx5_add_flow_rules(rx_fs->roce.ft_ip_check, spec, &flow_act,
2151 &dest, 1);
2152 if (IS_ERR(new_rule)) {
2153 err = PTR_ERR(new_rule);
2154 goto ip_rule_err;
2155 }
2156 rx_rule->ip = new_rule;
2157
2158 memset(&flow_act, 0, sizeof(flow_act));
2159 memset(spec, 0, sizeof(*spec));
2160
2161 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
2162 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters_2.metadata_reg_c_5);
2163 MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.metadata_reg_c_5,
2164 macsec_fs_set_rx_fs_id(fs_id));
2165 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW;
2166 new_rule = mlx5_add_flow_rules(rx_fs->roce.ft_macsec_op_check, spec, &flow_act,
2167 NULL, 0);
2168 if (IS_ERR(new_rule)) {
2169 err = PTR_ERR(new_rule);
2170 goto op_rule_err;
2171 }
2172 rx_rule->op = new_rule;
2173 rx_rule->gid_idx = gid_idx;
2174 rx_rule->fs_id = fs_id;
2175 list_add_tail(&rx_rule->entry, rx_rules_list);
2176
2177 goto out;
2178
2179 op_rule_err:
2180 mlx5_del_flow_rules(rx_rule->ip);
2181 rx_rule->ip = NULL;
2182 ip_rule_err:
2183 kfree(rx_rule);
2184 out:
2185 kvfree(spec);
2186 return err;
2187 }
2188
mlx5_macsec_fs_add_roce_rule_tx(struct mlx5_macsec_fs * macsec_fs,u32 fs_id,u16 gid_idx,const struct sockaddr * addr,struct list_head * tx_rules_list)2189 static int mlx5_macsec_fs_add_roce_rule_tx(struct mlx5_macsec_fs *macsec_fs, u32 fs_id, u16 gid_idx,
2190 const struct sockaddr *addr,
2191 struct list_head *tx_rules_list)
2192 {
2193 u8 action[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {};
2194 struct mlx5_macsec_tx *tx_fs = macsec_fs->tx_fs;
2195 struct mlx5_core_dev *mdev = macsec_fs->mdev;
2196 struct mlx5_modify_hdr *modify_hdr = NULL;
2197 struct mlx5_roce_macsec_tx_rule *tx_rule;
2198 struct mlx5_flow_destination dest = {};
2199 struct mlx5_flow_act flow_act = {};
2200 struct mlx5_flow_handle *new_rule;
2201 struct mlx5_flow_spec *spec;
2202 int err = 0;
2203
2204 spec = kvzalloc_obj(*spec);
2205 if (!spec)
2206 return -ENOMEM;
2207
2208 tx_rule = kzalloc_obj(*tx_rule);
2209 if (!tx_rule) {
2210 err = -ENOMEM;
2211 goto out;
2212 }
2213
2214 set_ipaddr_spec(addr, spec, false);
2215
2216 MLX5_SET(set_action_in, action, action_type, MLX5_ACTION_TYPE_SET);
2217 MLX5_SET(set_action_in, action, field, MLX5_ACTION_IN_FIELD_METADATA_REG_A);
2218 MLX5_SET(set_action_in, action, data,
2219 mlx5_macsec_fs_set_tx_fs_id(fs_id));
2220 MLX5_SET(set_action_in, action, offset,
2221 MLX5_ETH_WQE_FT_META_MACSEC_SHIFT);
2222 MLX5_SET(set_action_in, action, length, 8);
2223
2224 modify_hdr = mlx5_modify_header_alloc(mdev, MLX5_FLOW_NAMESPACE_RDMA_TX_MACSEC,
2225 1, action);
2226 if (IS_ERR(modify_hdr)) {
2227 err = PTR_ERR(modify_hdr);
2228 mlx5_core_err(mdev, "Fail to alloc ROCE MACsec set modify_header_id err=%d\n",
2229 err);
2230 modify_hdr = NULL;
2231 goto modify_hdr_err;
2232 }
2233 tx_rule->meta_modhdr = modify_hdr;
2234
2235 flow_act.modify_hdr = modify_hdr;
2236 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
2237
2238 dest.type = MLX5_FLOW_DESTINATION_TYPE_TABLE_TYPE;
2239 dest.ft = tx_fs->tables.ft_crypto.t;
2240 new_rule = mlx5_add_flow_rules(tx_fs->ft_rdma_tx, spec, &flow_act, &dest, 1);
2241 if (IS_ERR(new_rule)) {
2242 err = PTR_ERR(new_rule);
2243 mlx5_core_err(mdev, "Failed to add ROCE TX rule, err=%d\n", err);
2244 goto rule_err;
2245 }
2246 tx_rule->rule = new_rule;
2247 tx_rule->gid_idx = gid_idx;
2248 tx_rule->fs_id = fs_id;
2249 list_add_tail(&tx_rule->entry, tx_rules_list);
2250
2251 goto out;
2252
2253 rule_err:
2254 mlx5_modify_header_dealloc(mdev, tx_rule->meta_modhdr);
2255 modify_hdr_err:
2256 kfree(tx_rule);
2257 out:
2258 kvfree(spec);
2259 return err;
2260 }
2261
mlx5_macsec_del_roce_rule(u16 gid_idx,struct mlx5_macsec_fs * macsec_fs,struct list_head * tx_rules_list,struct list_head * rx_rules_list)2262 void mlx5_macsec_del_roce_rule(u16 gid_idx, struct mlx5_macsec_fs *macsec_fs,
2263 struct list_head *tx_rules_list, struct list_head *rx_rules_list)
2264 {
2265 struct mlx5_roce_macsec_rx_rule *rx_rule, *next_rx;
2266 struct mlx5_roce_macsec_tx_rule *tx_rule, *next_tx;
2267
2268 list_for_each_entry_safe(tx_rule, next_tx, tx_rules_list, entry) {
2269 if (tx_rule->gid_idx == gid_idx)
2270 macsec_fs_del_roce_rule_tx(macsec_fs->mdev, tx_rule);
2271 }
2272
2273 list_for_each_entry_safe(rx_rule, next_rx, rx_rules_list, entry) {
2274 if (rx_rule->gid_idx == gid_idx)
2275 macsec_fs_del_roce_rule_rx(rx_rule);
2276 }
2277 }
2278 EXPORT_SYMBOL_GPL(mlx5_macsec_del_roce_rule);
2279
mlx5_macsec_add_roce_rule(void * macdev,const struct sockaddr * addr,u16 gid_idx,struct list_head * tx_rules_list,struct list_head * rx_rules_list,struct mlx5_macsec_fs * macsec_fs)2280 int mlx5_macsec_add_roce_rule(void *macdev, const struct sockaddr *addr, u16 gid_idx,
2281 struct list_head *tx_rules_list, struct list_head *rx_rules_list,
2282 struct mlx5_macsec_fs *macsec_fs)
2283 {
2284 struct mlx5_macsec_device *iter, *macsec_device = NULL;
2285 struct mlx5_core_dev *mdev = macsec_fs->mdev;
2286 struct mlx5_fs_id *fs_id_iter;
2287 unsigned long index = 0;
2288 int err;
2289
2290 list_for_each_entry(iter, &macsec_fs->macsec_devices_list, macsec_devices_list_entry) {
2291 if (iter->macdev == macdev) {
2292 macsec_device = iter;
2293 break;
2294 }
2295 }
2296
2297 if (!macsec_device)
2298 return 0;
2299
2300 xa_for_each(&macsec_device->tx_id_xa, index, fs_id_iter) {
2301 err = mlx5_macsec_fs_add_roce_rule_tx(macsec_fs, fs_id_iter->id, gid_idx, addr,
2302 tx_rules_list);
2303 if (err) {
2304 mlx5_core_err(mdev, "MACsec offload: Failed to add roce TX rule\n");
2305 goto out;
2306 }
2307 }
2308
2309 index = 0;
2310 xa_for_each(&macsec_device->rx_id_xa, index, fs_id_iter) {
2311 err = mlx5_macsec_fs_add_roce_rule_rx(macsec_fs, fs_id_iter->id, gid_idx, addr,
2312 rx_rules_list);
2313 if (err) {
2314 mlx5_core_err(mdev, "MACsec offload: Failed to add roce TX rule\n");
2315 goto out;
2316 }
2317 }
2318
2319 return 0;
2320 out:
2321 mlx5_macsec_del_roce_rule(gid_idx, macsec_fs, tx_rules_list, rx_rules_list);
2322 return err;
2323 }
2324 EXPORT_SYMBOL_GPL(mlx5_macsec_add_roce_rule);
2325
mlx5_macsec_add_roce_sa_rules(u32 fs_id,const struct sockaddr * addr,u16 gid_idx,struct list_head * tx_rules_list,struct list_head * rx_rules_list,struct mlx5_macsec_fs * macsec_fs,bool is_tx)2326 void mlx5_macsec_add_roce_sa_rules(u32 fs_id, const struct sockaddr *addr, u16 gid_idx,
2327 struct list_head *tx_rules_list,
2328 struct list_head *rx_rules_list,
2329 struct mlx5_macsec_fs *macsec_fs, bool is_tx)
2330 {
2331 (is_tx) ?
2332 mlx5_macsec_fs_add_roce_rule_tx(macsec_fs, fs_id, gid_idx, addr,
2333 tx_rules_list) :
2334 mlx5_macsec_fs_add_roce_rule_rx(macsec_fs, fs_id, gid_idx, addr,
2335 rx_rules_list);
2336 }
2337 EXPORT_SYMBOL_GPL(mlx5_macsec_add_roce_sa_rules);
2338
mlx5_macsec_del_roce_sa_rules(u32 fs_id,struct mlx5_macsec_fs * macsec_fs,struct list_head * tx_rules_list,struct list_head * rx_rules_list,bool is_tx)2339 void mlx5_macsec_del_roce_sa_rules(u32 fs_id, struct mlx5_macsec_fs *macsec_fs,
2340 struct list_head *tx_rules_list,
2341 struct list_head *rx_rules_list, bool is_tx)
2342 {
2343 (is_tx) ?
2344 macsec_fs_del_roce_rules_tx(macsec_fs, fs_id, tx_rules_list) :
2345 macsec_fs_del_roce_rules_rx(macsec_fs, fs_id, rx_rules_list);
2346 }
2347 EXPORT_SYMBOL_GPL(mlx5_macsec_del_roce_sa_rules);
2348
mlx5_macsec_fs_cleanup(struct mlx5_macsec_fs * macsec_fs)2349 void mlx5_macsec_fs_cleanup(struct mlx5_macsec_fs *macsec_fs)
2350 {
2351 macsec_fs_rx_cleanup(macsec_fs);
2352 macsec_fs_tx_cleanup(macsec_fs);
2353 rhashtable_destroy(&macsec_fs->fs_id_hash);
2354 rhashtable_destroy(&macsec_fs->sci_hash);
2355 kfree(macsec_fs);
2356 }
2357
2358 struct mlx5_macsec_fs *
mlx5_macsec_fs_init(struct mlx5_core_dev * mdev)2359 mlx5_macsec_fs_init(struct mlx5_core_dev *mdev)
2360 {
2361 struct mlx5_macsec_fs *macsec_fs;
2362 int err;
2363
2364 macsec_fs = kzalloc_obj(*macsec_fs);
2365 if (!macsec_fs)
2366 return NULL;
2367
2368 macsec_fs->mdev = mdev;
2369
2370 err = rhashtable_init(&macsec_fs->sci_hash, &rhash_sci);
2371 if (err) {
2372 mlx5_core_err(mdev, "MACsec offload: Failed to init SCI hash table, err=%d\n",
2373 err);
2374 goto err_hash;
2375 }
2376
2377 err = rhashtable_init(&macsec_fs->fs_id_hash, &rhash_fs_id);
2378 if (err) {
2379 mlx5_core_err(mdev, "MACsec offload: Failed to init FS_ID hash table, err=%d\n",
2380 err);
2381 goto sci_hash_cleanup;
2382 }
2383
2384 err = macsec_fs_tx_init(macsec_fs);
2385 if (err) {
2386 mlx5_core_err(mdev, "MACsec offload: Failed to init tx_fs, err=%d\n", err);
2387 goto fs_id_hash_cleanup;
2388 }
2389
2390 err = macsec_fs_rx_init(macsec_fs);
2391 if (err) {
2392 mlx5_core_err(mdev, "MACsec offload: Failed to init tx_fs, err=%d\n", err);
2393 goto tx_cleanup;
2394 }
2395
2396 BLOCKING_INIT_NOTIFIER_HEAD(&mdev->macsec_nh);
2397
2398 return macsec_fs;
2399
2400 tx_cleanup:
2401 macsec_fs_tx_cleanup(macsec_fs);
2402 fs_id_hash_cleanup:
2403 rhashtable_destroy(&macsec_fs->fs_id_hash);
2404 sci_hash_cleanup:
2405 rhashtable_destroy(&macsec_fs->sci_hash);
2406 err_hash:
2407 kfree(macsec_fs);
2408 return NULL;
2409 }
2410