1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
3 #include <linux/mlx5/device.h>
4 #include <net/psp.h>
5 #include <linux/psp.h>
6 #include "mlx5_core.h"
7 #include "psp.h"
8 #include "lib/crypto.h"
9 #include "en_accel/psp.h"
10 #include "fs_core.h"
11
12 enum accel_fs_psp_type {
13 ACCEL_FS_PSP4,
14 ACCEL_FS_PSP6,
15 ACCEL_FS_PSP_NUM_TYPES,
16 };
17
18 enum accel_psp_syndrome {
19 PSP_OK = 0,
20 PSP_ICV_FAIL,
21 PSP_BAD_TRAILER,
22 };
23
24 struct mlx5e_psp_tx {
25 struct mlx5_flow_namespace *ns;
26 struct mlx5_flow_table *ft;
27 struct mlx5_flow_group *fg;
28 struct mlx5_flow_handle *rule;
29 struct mutex mutex; /* Protect PSP TX steering */
30 u32 refcnt;
31 struct mlx5_fc *tx_counter;
32 };
33
34 struct mlx5e_psp_rx_err {
35 struct mlx5_flow_table *ft;
36 struct mlx5_flow_handle *rule;
37 struct mlx5_flow_handle *auth_fail_rule;
38 struct mlx5_flow_handle *err_rule;
39 struct mlx5_flow_handle *bad_rule;
40 struct mlx5_modify_hdr *copy_modify_hdr;
41 };
42
43 struct mlx5e_accel_fs_psp_prot {
44 struct mlx5_flow_table *ft;
45 struct mlx5_flow_group *miss_group;
46 struct mlx5_flow_handle *miss_rule;
47 struct mlx5_modify_hdr *rx_modify_hdr;
48 struct mlx5_flow_destination default_dest;
49 struct mlx5e_psp_rx_err rx_err;
50 u32 refcnt;
51 struct mutex prot_mutex; /* protect ESP4/ESP6 protocol */
52 struct mlx5_flow_handle *def_rule;
53 };
54
55 struct mlx5e_accel_fs_psp {
56 struct mlx5e_accel_fs_psp_prot fs_prot[ACCEL_FS_PSP_NUM_TYPES];
57 struct mlx5_fc *rx_counter;
58 struct mlx5_fc *rx_auth_fail_counter;
59 struct mlx5_fc *rx_err_counter;
60 struct mlx5_fc *rx_bad_counter;
61 };
62
63 struct mlx5e_psp_fs {
64 struct mlx5_core_dev *mdev;
65 struct mlx5e_psp_tx *tx_fs;
66 /* Rx manage */
67 struct mlx5e_flow_steering *fs;
68 struct mlx5e_accel_fs_psp *rx_fs;
69 };
70
71 /* PSP RX flow steering */
fs_psp2tt(enum accel_fs_psp_type i)72 static enum mlx5_traffic_types fs_psp2tt(enum accel_fs_psp_type i)
73 {
74 if (i == ACCEL_FS_PSP4)
75 return MLX5_TT_IPV4_UDP;
76
77 return MLX5_TT_IPV6_UDP;
78 }
79
accel_psp_fs_rx_err_del_rules(struct mlx5e_psp_fs * fs,struct mlx5e_psp_rx_err * rx_err)80 static void accel_psp_fs_rx_err_del_rules(struct mlx5e_psp_fs *fs,
81 struct mlx5e_psp_rx_err *rx_err)
82 {
83 if (rx_err->bad_rule) {
84 mlx5_del_flow_rules(rx_err->bad_rule);
85 rx_err->bad_rule = NULL;
86 }
87
88 if (rx_err->err_rule) {
89 mlx5_del_flow_rules(rx_err->err_rule);
90 rx_err->err_rule = NULL;
91 }
92
93 if (rx_err->auth_fail_rule) {
94 mlx5_del_flow_rules(rx_err->auth_fail_rule);
95 rx_err->auth_fail_rule = NULL;
96 }
97
98 if (rx_err->rule) {
99 mlx5_del_flow_rules(rx_err->rule);
100 rx_err->rule = NULL;
101 }
102
103 if (rx_err->copy_modify_hdr) {
104 mlx5_modify_header_dealloc(fs->mdev, rx_err->copy_modify_hdr);
105 rx_err->copy_modify_hdr = NULL;
106 }
107 }
108
accel_psp_fs_rx_err_destroy_ft(struct mlx5e_psp_fs * fs,struct mlx5e_psp_rx_err * rx_err)109 static void accel_psp_fs_rx_err_destroy_ft(struct mlx5e_psp_fs *fs,
110 struct mlx5e_psp_rx_err *rx_err)
111 {
112 accel_psp_fs_rx_err_del_rules(fs, rx_err);
113
114 if (rx_err->ft) {
115 mlx5_destroy_flow_table(rx_err->ft);
116 rx_err->ft = NULL;
117 }
118 }
119
accel_psp_setup_syndrome_match(struct mlx5_flow_spec * spec,enum accel_psp_syndrome syndrome)120 static void accel_psp_setup_syndrome_match(struct mlx5_flow_spec *spec,
121 enum accel_psp_syndrome syndrome)
122 {
123 void *misc_params_2;
124
125 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
126 misc_params_2 = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters_2);
127 MLX5_SET_TO_ONES(fte_match_set_misc2, misc_params_2, psp_syndrome);
128 misc_params_2 = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters_2);
129 MLX5_SET(fte_match_set_misc2, misc_params_2, psp_syndrome, syndrome);
130 }
131
accel_psp_fs_rx_err_add_rule(struct mlx5e_psp_fs * fs,struct mlx5e_accel_fs_psp_prot * fs_prot,struct mlx5e_psp_rx_err * rx_err)132 static int accel_psp_fs_rx_err_add_rule(struct mlx5e_psp_fs *fs,
133 struct mlx5e_accel_fs_psp_prot *fs_prot,
134 struct mlx5e_psp_rx_err *rx_err)
135 {
136 u8 action[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {};
137 struct mlx5_core_dev *mdev = fs->mdev;
138 struct mlx5_flow_destination dest[2];
139 struct mlx5_flow_act flow_act = {};
140 struct mlx5_modify_hdr *modify_hdr;
141 struct mlx5_flow_handle *fte;
142 struct mlx5_flow_spec *spec;
143 int err = 0;
144
145 spec = kzalloc(sizeof(*spec), GFP_KERNEL);
146 if (!spec)
147 return -ENOMEM;
148
149 /* Action to copy 7 bit psp_syndrome to regB[23:29] */
150 MLX5_SET(copy_action_in, action, action_type, MLX5_ACTION_TYPE_COPY);
151 MLX5_SET(copy_action_in, action, src_field, MLX5_ACTION_IN_FIELD_PSP_SYNDROME);
152 MLX5_SET(copy_action_in, action, src_offset, 0);
153 MLX5_SET(copy_action_in, action, length, 7);
154 MLX5_SET(copy_action_in, action, dst_field, MLX5_ACTION_IN_FIELD_METADATA_REG_B);
155 MLX5_SET(copy_action_in, action, dst_offset, 23);
156
157 modify_hdr = mlx5_modify_header_alloc(mdev, MLX5_FLOW_NAMESPACE_KERNEL,
158 1, action);
159 if (IS_ERR(modify_hdr)) {
160 err = PTR_ERR(modify_hdr);
161 mlx5_core_err(mdev,
162 "fail to alloc psp copy modify_header_id err=%d\n", err);
163 goto out_spec;
164 }
165
166 accel_psp_setup_syndrome_match(spec, PSP_OK);
167 /* create fte */
168 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_MOD_HDR |
169 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
170 MLX5_FLOW_CONTEXT_ACTION_COUNT;
171 flow_act.modify_hdr = modify_hdr;
172 dest[0].type = fs_prot->default_dest.type;
173 dest[0].ft = fs_prot->default_dest.ft;
174 dest[1].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
175 dest[1].counter = fs->rx_fs->rx_counter;
176 fte = mlx5_add_flow_rules(rx_err->ft, spec, &flow_act, dest, 2);
177 if (IS_ERR(fte)) {
178 err = PTR_ERR(fte);
179 mlx5_core_err(mdev, "fail to add psp rx err copy rule err=%d\n", err);
180 goto out;
181 }
182 rx_err->rule = fte;
183
184 /* add auth fail drop rule */
185 memset(spec, 0, sizeof(*spec));
186 memset(&flow_act, 0, sizeof(flow_act));
187 accel_psp_setup_syndrome_match(spec, PSP_ICV_FAIL);
188 /* create fte */
189 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP |
190 MLX5_FLOW_CONTEXT_ACTION_COUNT;
191 dest[0].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
192 dest[0].counter = fs->rx_fs->rx_auth_fail_counter;
193 fte = mlx5_add_flow_rules(rx_err->ft, spec, &flow_act, dest, 1);
194 if (IS_ERR(fte)) {
195 err = PTR_ERR(fte);
196 mlx5_core_err(mdev, "fail to add psp rx auth fail drop rule err=%d\n",
197 err);
198 goto out_drop_rule;
199 }
200 rx_err->auth_fail_rule = fte;
201
202 /* add framing drop rule */
203 memset(spec, 0, sizeof(*spec));
204 memset(&flow_act, 0, sizeof(flow_act));
205 accel_psp_setup_syndrome_match(spec, PSP_BAD_TRAILER);
206 /* create fte */
207 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP |
208 MLX5_FLOW_CONTEXT_ACTION_COUNT;
209 dest[0].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
210 dest[0].counter = fs->rx_fs->rx_err_counter;
211 fte = mlx5_add_flow_rules(rx_err->ft, spec, &flow_act, dest, 1);
212 if (IS_ERR(fte)) {
213 err = PTR_ERR(fte);
214 mlx5_core_err(mdev, "fail to add psp rx framing err drop rule err=%d\n",
215 err);
216 goto out_drop_auth_fail_rule;
217 }
218 rx_err->err_rule = fte;
219
220 /* add misc. errors drop rule */
221 memset(spec, 0, sizeof(*spec));
222 memset(&flow_act, 0, sizeof(flow_act));
223 /* create fte */
224 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP |
225 MLX5_FLOW_CONTEXT_ACTION_COUNT;
226 dest[0].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
227 dest[0].counter = fs->rx_fs->rx_bad_counter;
228 fte = mlx5_add_flow_rules(rx_err->ft, spec, &flow_act, dest, 1);
229 if (IS_ERR(fte)) {
230 err = PTR_ERR(fte);
231 mlx5_core_err(mdev, "fail to add psp rx misc. err drop rule err=%d\n",
232 err);
233 goto out_drop_error_rule;
234 }
235 rx_err->bad_rule = fte;
236
237 rx_err->copy_modify_hdr = modify_hdr;
238
239 goto out_spec;
240
241 out_drop_error_rule:
242 mlx5_del_flow_rules(rx_err->err_rule);
243 rx_err->err_rule = NULL;
244 out_drop_auth_fail_rule:
245 mlx5_del_flow_rules(rx_err->auth_fail_rule);
246 rx_err->auth_fail_rule = NULL;
247 out_drop_rule:
248 mlx5_del_flow_rules(rx_err->rule);
249 rx_err->rule = NULL;
250 out:
251 mlx5_modify_header_dealloc(mdev, modify_hdr);
252 out_spec:
253 kfree(spec);
254 return err;
255 }
256
accel_psp_fs_rx_err_create_ft(struct mlx5e_psp_fs * fs,struct mlx5e_accel_fs_psp_prot * fs_prot,struct mlx5e_psp_rx_err * rx_err)257 static int accel_psp_fs_rx_err_create_ft(struct mlx5e_psp_fs *fs,
258 struct mlx5e_accel_fs_psp_prot *fs_prot,
259 struct mlx5e_psp_rx_err *rx_err)
260 {
261 struct mlx5_flow_namespace *ns = mlx5e_fs_get_ns(fs->fs, false);
262 struct mlx5_flow_table_attr ft_attr = {};
263 struct mlx5_flow_table *ft;
264 int err;
265
266 ft_attr.max_fte = 2;
267 ft_attr.autogroup.max_num_groups = 2;
268 ft_attr.level = MLX5E_ACCEL_FS_ESP_FT_ERR_LEVEL; // MLX5E_ACCEL_FS_TCP_FT_LEVEL
269 ft_attr.prio = MLX5E_NIC_PRIO;
270 ft = mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
271 if (IS_ERR(ft)) {
272 err = PTR_ERR(ft);
273 mlx5_core_err(fs->mdev, "fail to create psp rx inline ft err=%d\n", err);
274 return err;
275 }
276
277 rx_err->ft = ft;
278 err = accel_psp_fs_rx_err_add_rule(fs, fs_prot, rx_err);
279 if (err)
280 goto out_err;
281
282 return 0;
283
284 out_err:
285 mlx5_destroy_flow_table(ft);
286 rx_err->ft = NULL;
287 return err;
288 }
289
accel_psp_fs_rx_fs_destroy(struct mlx5e_psp_fs * fs,struct mlx5e_accel_fs_psp_prot * fs_prot)290 static void accel_psp_fs_rx_fs_destroy(struct mlx5e_psp_fs *fs,
291 struct mlx5e_accel_fs_psp_prot *fs_prot)
292 {
293 if (fs_prot->def_rule) {
294 mlx5_del_flow_rules(fs_prot->def_rule);
295 fs_prot->def_rule = NULL;
296 }
297
298 if (fs_prot->rx_modify_hdr) {
299 mlx5_modify_header_dealloc(fs->mdev, fs_prot->rx_modify_hdr);
300 fs_prot->rx_modify_hdr = NULL;
301 }
302
303 if (fs_prot->miss_rule) {
304 mlx5_del_flow_rules(fs_prot->miss_rule);
305 fs_prot->miss_rule = NULL;
306 }
307
308 if (fs_prot->miss_group) {
309 mlx5_destroy_flow_group(fs_prot->miss_group);
310 fs_prot->miss_group = NULL;
311 }
312
313 if (fs_prot->ft) {
314 mlx5_destroy_flow_table(fs_prot->ft);
315 fs_prot->ft = NULL;
316 }
317 }
318
setup_fte_udp_psp(struct mlx5_flow_spec * spec,u16 udp_port)319 static void setup_fte_udp_psp(struct mlx5_flow_spec *spec, u16 udp_port)
320 {
321 spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
322 MLX5_SET(fte_match_set_lyr_2_4, spec->match_criteria, udp_dport, 0xffff);
323 MLX5_SET(fte_match_set_lyr_2_4, spec->match_value, udp_dport, udp_port);
324 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, spec->match_criteria, ip_protocol);
325 MLX5_SET(fte_match_set_lyr_2_4, spec->match_value, ip_protocol, IPPROTO_UDP);
326 }
327
accel_psp_fs_rx_create_ft(struct mlx5e_psp_fs * fs,struct mlx5e_accel_fs_psp_prot * fs_prot)328 static int accel_psp_fs_rx_create_ft(struct mlx5e_psp_fs *fs,
329 struct mlx5e_accel_fs_psp_prot *fs_prot)
330 {
331 struct mlx5_flow_namespace *ns = mlx5e_fs_get_ns(fs->fs, false);
332 u8 action[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {};
333 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
334 struct mlx5_modify_hdr *modify_hdr = NULL;
335 struct mlx5_flow_table_attr ft_attr = {};
336 struct mlx5_flow_destination dest = {};
337 struct mlx5_core_dev *mdev = fs->mdev;
338 struct mlx5_flow_group *miss_group;
339 MLX5_DECLARE_FLOW_ACT(flow_act);
340 struct mlx5_flow_handle *rule;
341 struct mlx5_flow_spec *spec;
342 struct mlx5_flow_table *ft;
343 u32 *flow_group_in;
344 int err = 0;
345
346 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
347 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
348 if (!flow_group_in || !spec) {
349 err = -ENOMEM;
350 goto out;
351 }
352
353 /* Create FT */
354 ft_attr.max_fte = 2;
355 ft_attr.level = MLX5E_ACCEL_FS_ESP_FT_LEVEL;
356 ft_attr.prio = MLX5E_NIC_PRIO;
357 ft_attr.autogroup.num_reserved_entries = 1;
358 ft_attr.autogroup.max_num_groups = 1;
359 ft = mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
360 if (IS_ERR(ft)) {
361 err = PTR_ERR(ft);
362 mlx5_core_err(mdev, "fail to create psp rx ft err=%d\n", err);
363 goto out_err;
364 }
365 fs_prot->ft = ft;
366
367 /* Create miss_group */
368 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ft->max_fte - 1);
369 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ft->max_fte - 1);
370 miss_group = mlx5_create_flow_group(ft, flow_group_in);
371 if (IS_ERR(miss_group)) {
372 err = PTR_ERR(miss_group);
373 mlx5_core_err(mdev, "fail to create psp rx miss_group err=%d\n", err);
374 goto out_err;
375 }
376 fs_prot->miss_group = miss_group;
377
378 /* Create miss rule */
379 rule = mlx5_add_flow_rules(ft, spec, &flow_act, &fs_prot->default_dest, 1);
380 if (IS_ERR(rule)) {
381 err = PTR_ERR(rule);
382 mlx5_core_err(mdev, "fail to create psp rx miss_rule err=%d\n", err);
383 goto out_err;
384 }
385 fs_prot->miss_rule = rule;
386
387 /* Add default Rx psp rule */
388 setup_fte_udp_psp(spec, PSP_DEFAULT_UDP_PORT);
389 flow_act.crypto.type = MLX5_FLOW_CONTEXT_ENCRYPT_DECRYPT_TYPE_PSP;
390 /* Set bit[31, 30] PSP marker */
391 /* Set bit[29-23] psp_syndrome is set in error FT */
392 #define MLX5E_PSP_MARKER_BIT (BIT(30) | BIT(31))
393 MLX5_SET(set_action_in, action, action_type, MLX5_ACTION_TYPE_SET);
394 MLX5_SET(set_action_in, action, field, MLX5_ACTION_IN_FIELD_METADATA_REG_B);
395 MLX5_SET(set_action_in, action, data, MLX5E_PSP_MARKER_BIT);
396 MLX5_SET(set_action_in, action, offset, 0);
397 MLX5_SET(set_action_in, action, length, 32);
398
399 modify_hdr = mlx5_modify_header_alloc(mdev, MLX5_FLOW_NAMESPACE_KERNEL, 1, action);
400 if (IS_ERR(modify_hdr)) {
401 err = PTR_ERR(modify_hdr);
402 mlx5_core_err(mdev, "fail to alloc psp set modify_header_id err=%d\n", err);
403 modify_hdr = NULL;
404 goto out_err;
405 }
406 fs_prot->rx_modify_hdr = modify_hdr;
407
408 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
409 MLX5_FLOW_CONTEXT_ACTION_CRYPTO_DECRYPT |
410 MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
411 flow_act.modify_hdr = modify_hdr;
412 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
413 dest.ft = fs_prot->rx_err.ft;
414 rule = mlx5_add_flow_rules(fs_prot->ft, spec, &flow_act, &dest, 1);
415 if (IS_ERR(rule)) {
416 err = PTR_ERR(rule);
417 mlx5_core_err(mdev,
418 "fail to add psp rule Rx decryption, err=%d, flow_act.action = %#04X\n",
419 err, flow_act.action);
420 goto out_err;
421 }
422
423 fs_prot->def_rule = rule;
424 goto out;
425
426 out_err:
427 accel_psp_fs_rx_fs_destroy(fs, fs_prot);
428 out:
429 kvfree(flow_group_in);
430 kvfree(spec);
431 return err;
432 }
433
accel_psp_fs_rx_destroy(struct mlx5e_psp_fs * fs,enum accel_fs_psp_type type)434 static int accel_psp_fs_rx_destroy(struct mlx5e_psp_fs *fs, enum accel_fs_psp_type type)
435 {
436 struct mlx5e_accel_fs_psp_prot *fs_prot;
437 struct mlx5e_accel_fs_psp *accel_psp;
438
439 accel_psp = fs->rx_fs;
440
441 /* The netdev unreg already happened, so all offloaded rule are already removed */
442 fs_prot = &accel_psp->fs_prot[type];
443
444 accel_psp_fs_rx_fs_destroy(fs, fs_prot);
445
446 accel_psp_fs_rx_err_destroy_ft(fs, &fs_prot->rx_err);
447
448 return 0;
449 }
450
accel_psp_fs_rx_create(struct mlx5e_psp_fs * fs,enum accel_fs_psp_type type)451 static int accel_psp_fs_rx_create(struct mlx5e_psp_fs *fs, enum accel_fs_psp_type type)
452 {
453 struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(fs->fs, false);
454 struct mlx5e_accel_fs_psp_prot *fs_prot;
455 struct mlx5e_accel_fs_psp *accel_psp;
456 int err;
457
458 accel_psp = fs->rx_fs;
459 fs_prot = &accel_psp->fs_prot[type];
460
461 fs_prot->default_dest = mlx5_ttc_get_default_dest(ttc, fs_psp2tt(type));
462
463 err = accel_psp_fs_rx_err_create_ft(fs, fs_prot, &fs_prot->rx_err);
464 if (err)
465 return err;
466
467 err = accel_psp_fs_rx_create_ft(fs, fs_prot);
468 if (err)
469 accel_psp_fs_rx_err_destroy_ft(fs, &fs_prot->rx_err);
470
471 return err;
472 }
473
accel_psp_fs_rx_ft_get(struct mlx5e_psp_fs * fs,enum accel_fs_psp_type type)474 static int accel_psp_fs_rx_ft_get(struct mlx5e_psp_fs *fs, enum accel_fs_psp_type type)
475 {
476 struct mlx5e_accel_fs_psp_prot *fs_prot;
477 struct mlx5_flow_destination dest = {};
478 struct mlx5e_accel_fs_psp *accel_psp;
479 struct mlx5_ttc_table *ttc;
480 int err = 0;
481
482 if (!fs || !fs->rx_fs)
483 return -EINVAL;
484
485 ttc = mlx5e_fs_get_ttc(fs->fs, false);
486 accel_psp = fs->rx_fs;
487 fs_prot = &accel_psp->fs_prot[type];
488 mutex_lock(&fs_prot->prot_mutex);
489 if (fs_prot->refcnt++)
490 goto out;
491
492 /* create FT */
493 err = accel_psp_fs_rx_create(fs, type);
494 if (err) {
495 fs_prot->refcnt--;
496 goto out;
497 }
498
499 /* connect */
500 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
501 dest.ft = fs_prot->ft;
502 mlx5_ttc_fwd_dest(ttc, fs_psp2tt(type), &dest);
503
504 out:
505 mutex_unlock(&fs_prot->prot_mutex);
506 return err;
507 }
508
accel_psp_fs_rx_ft_put(struct mlx5e_psp_fs * fs,enum accel_fs_psp_type type)509 static void accel_psp_fs_rx_ft_put(struct mlx5e_psp_fs *fs, enum accel_fs_psp_type type)
510 {
511 struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(fs->fs, false);
512 struct mlx5e_accel_fs_psp_prot *fs_prot;
513 struct mlx5e_accel_fs_psp *accel_psp;
514
515 accel_psp = fs->rx_fs;
516 fs_prot = &accel_psp->fs_prot[type];
517 mutex_lock(&fs_prot->prot_mutex);
518 if (--fs_prot->refcnt)
519 goto out;
520
521 /* disconnect */
522 mlx5_ttc_fwd_default_dest(ttc, fs_psp2tt(type));
523
524 /* remove FT */
525 accel_psp_fs_rx_destroy(fs, type);
526
527 out:
528 mutex_unlock(&fs_prot->prot_mutex);
529 }
530
accel_psp_fs_cleanup_rx(struct mlx5e_psp_fs * fs)531 static void accel_psp_fs_cleanup_rx(struct mlx5e_psp_fs *fs)
532 {
533 struct mlx5e_accel_fs_psp_prot *fs_prot;
534 struct mlx5e_accel_fs_psp *accel_psp;
535 enum accel_fs_psp_type i;
536
537 if (!fs->rx_fs)
538 return;
539
540 accel_psp = fs->rx_fs;
541 mlx5_fc_destroy(fs->mdev, accel_psp->rx_bad_counter);
542 mlx5_fc_destroy(fs->mdev, accel_psp->rx_err_counter);
543 mlx5_fc_destroy(fs->mdev, accel_psp->rx_auth_fail_counter);
544 mlx5_fc_destroy(fs->mdev, accel_psp->rx_counter);
545 for (i = 0; i < ACCEL_FS_PSP_NUM_TYPES; i++) {
546 fs_prot = &accel_psp->fs_prot[i];
547 mutex_destroy(&fs_prot->prot_mutex);
548 WARN_ON(fs_prot->refcnt);
549 }
550 kfree(fs->rx_fs);
551 fs->rx_fs = NULL;
552 }
553
accel_psp_fs_init_rx(struct mlx5e_psp_fs * fs)554 static int accel_psp_fs_init_rx(struct mlx5e_psp_fs *fs)
555 {
556 struct mlx5e_accel_fs_psp_prot *fs_prot;
557 struct mlx5e_accel_fs_psp *accel_psp;
558 struct mlx5_core_dev *mdev = fs->mdev;
559 struct mlx5_fc *flow_counter;
560 enum accel_fs_psp_type i;
561 int err;
562
563 accel_psp = kzalloc(sizeof(*accel_psp), GFP_KERNEL);
564 if (!accel_psp)
565 return -ENOMEM;
566
567 for (i = 0; i < ACCEL_FS_PSP_NUM_TYPES; i++) {
568 fs_prot = &accel_psp->fs_prot[i];
569 mutex_init(&fs_prot->prot_mutex);
570 }
571
572 flow_counter = mlx5_fc_create(mdev, false);
573 if (IS_ERR(flow_counter)) {
574 mlx5_core_warn(mdev,
575 "fail to create psp rx flow counter err=%pe\n",
576 flow_counter);
577 err = PTR_ERR(flow_counter);
578 goto out_err;
579 }
580 accel_psp->rx_counter = flow_counter;
581
582 flow_counter = mlx5_fc_create(mdev, false);
583 if (IS_ERR(flow_counter)) {
584 mlx5_core_warn(mdev,
585 "fail to create psp rx auth fail flow counter err=%pe\n",
586 flow_counter);
587 err = PTR_ERR(flow_counter);
588 goto out_counter_err;
589 }
590 accel_psp->rx_auth_fail_counter = flow_counter;
591
592 flow_counter = mlx5_fc_create(mdev, false);
593 if (IS_ERR(flow_counter)) {
594 mlx5_core_warn(mdev,
595 "fail to create psp rx error flow counter err=%pe\n",
596 flow_counter);
597 err = PTR_ERR(flow_counter);
598 goto out_auth_fail_counter_err;
599 }
600 accel_psp->rx_err_counter = flow_counter;
601
602 flow_counter = mlx5_fc_create(mdev, false);
603 if (IS_ERR(flow_counter)) {
604 mlx5_core_warn(mdev,
605 "fail to create psp rx bad flow counter err=%pe\n",
606 flow_counter);
607 err = PTR_ERR(flow_counter);
608 goto out_err_counter_err;
609 }
610 accel_psp->rx_bad_counter = flow_counter;
611
612 fs->rx_fs = accel_psp;
613
614 return 0;
615
616 out_err_counter_err:
617 mlx5_fc_destroy(mdev, accel_psp->rx_err_counter);
618 accel_psp->rx_err_counter = NULL;
619 out_auth_fail_counter_err:
620 mlx5_fc_destroy(mdev, accel_psp->rx_auth_fail_counter);
621 accel_psp->rx_auth_fail_counter = NULL;
622 out_counter_err:
623 mlx5_fc_destroy(mdev, accel_psp->rx_counter);
624 accel_psp->rx_counter = NULL;
625 out_err:
626 for (i = 0; i < ACCEL_FS_PSP_NUM_TYPES; i++) {
627 fs_prot = &accel_psp->fs_prot[i];
628 mutex_destroy(&fs_prot->prot_mutex);
629 }
630 kfree(accel_psp);
631 fs->rx_fs = NULL;
632
633 return err;
634 }
635
mlx5_accel_psp_fs_cleanup_rx_tables(struct mlx5e_priv * priv)636 void mlx5_accel_psp_fs_cleanup_rx_tables(struct mlx5e_priv *priv)
637 {
638 int i;
639
640 if (!priv->psp)
641 return;
642
643 for (i = 0; i < ACCEL_FS_PSP_NUM_TYPES; i++)
644 accel_psp_fs_rx_ft_put(priv->psp->fs, i);
645 }
646
mlx5_accel_psp_fs_init_rx_tables(struct mlx5e_priv * priv)647 int mlx5_accel_psp_fs_init_rx_tables(struct mlx5e_priv *priv)
648 {
649 struct mlx5e_psp_fs *fs;
650 int err, i;
651
652 if (!priv->psp)
653 return 0;
654
655 fs = priv->psp->fs;
656 for (i = 0; i < ACCEL_FS_PSP_NUM_TYPES; i++) {
657 err = accel_psp_fs_rx_ft_get(fs, i);
658 if (err)
659 goto out_err;
660 }
661
662 return 0;
663
664 out_err:
665 i--;
666 while (i >= 0) {
667 accel_psp_fs_rx_ft_put(fs, i);
668 --i;
669 }
670
671 return err;
672 }
673
accel_psp_fs_tx_create_ft_table(struct mlx5e_psp_fs * fs)674 static int accel_psp_fs_tx_create_ft_table(struct mlx5e_psp_fs *fs)
675 {
676 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
677 struct mlx5_flow_table_attr ft_attr = {};
678 struct mlx5_flow_destination dest = {};
679 struct mlx5_core_dev *mdev = fs->mdev;
680 struct mlx5_flow_act flow_act = {};
681 u32 *in, *mc, *outer_headers_c;
682 struct mlx5_flow_handle *rule;
683 struct mlx5_flow_spec *spec;
684 struct mlx5e_psp_tx *tx_fs;
685 struct mlx5_flow_table *ft;
686 struct mlx5_flow_group *fg;
687 int err = 0;
688
689 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
690 in = kvzalloc(inlen, GFP_KERNEL);
691 if (!spec || !in) {
692 err = -ENOMEM;
693 goto out;
694 }
695
696 ft_attr.max_fte = 1;
697 #define MLX5E_PSP_PRIO 0
698 ft_attr.prio = MLX5E_PSP_PRIO;
699 #define MLX5E_PSP_LEVEL 0
700 ft_attr.level = MLX5E_PSP_LEVEL;
701 ft_attr.autogroup.max_num_groups = 1;
702
703 tx_fs = fs->tx_fs;
704 ft = mlx5_create_flow_table(tx_fs->ns, &ft_attr);
705 if (IS_ERR(ft)) {
706 err = PTR_ERR(ft);
707 mlx5_core_err(mdev, "PSP: fail to add psp tx flow table, err = %d\n", err);
708 goto out;
709 }
710
711 mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
712 outer_headers_c = MLX5_ADDR_OF(fte_match_param, mc, outer_headers);
713 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, ip_protocol);
714 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, udp_dport);
715 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
716 fg = mlx5_create_flow_group(ft, in);
717 if (IS_ERR(fg)) {
718 err = PTR_ERR(fg);
719 mlx5_core_err(mdev, "PSP: fail to add psp tx flow group, err = %d\n", err);
720 goto err_create_fg;
721 }
722
723 setup_fte_udp_psp(spec, PSP_DEFAULT_UDP_PORT);
724 flow_act.crypto.type = MLX5_FLOW_CONTEXT_ENCRYPT_DECRYPT_TYPE_PSP;
725 flow_act.flags |= FLOW_ACT_NO_APPEND;
726 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW |
727 MLX5_FLOW_CONTEXT_ACTION_CRYPTO_ENCRYPT |
728 MLX5_FLOW_CONTEXT_ACTION_COUNT;
729 dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
730 dest.counter = tx_fs->tx_counter;
731 rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
732 if (IS_ERR(rule)) {
733 err = PTR_ERR(rule);
734 mlx5_core_err(mdev, "PSP: fail to add psp tx flow rule, err = %d\n", err);
735 goto err_add_flow_rule;
736 }
737
738 tx_fs->ft = ft;
739 tx_fs->fg = fg;
740 tx_fs->rule = rule;
741 goto out;
742
743 err_add_flow_rule:
744 mlx5_destroy_flow_group(fg);
745 err_create_fg:
746 mlx5_destroy_flow_table(ft);
747 out:
748 kvfree(in);
749 kvfree(spec);
750 return err;
751 }
752
accel_psp_fs_tx_destroy(struct mlx5e_psp_tx * tx_fs)753 static void accel_psp_fs_tx_destroy(struct mlx5e_psp_tx *tx_fs)
754 {
755 if (!tx_fs->ft)
756 return;
757
758 mlx5_del_flow_rules(tx_fs->rule);
759 mlx5_destroy_flow_group(tx_fs->fg);
760 mlx5_destroy_flow_table(tx_fs->ft);
761 }
762
accel_psp_fs_tx_ft_get(struct mlx5e_psp_fs * fs)763 static int accel_psp_fs_tx_ft_get(struct mlx5e_psp_fs *fs)
764 {
765 struct mlx5e_psp_tx *tx_fs = fs->tx_fs;
766 int err = 0;
767
768 mutex_lock(&tx_fs->mutex);
769 if (tx_fs->refcnt++)
770 goto out;
771
772 err = accel_psp_fs_tx_create_ft_table(fs);
773 if (err)
774 tx_fs->refcnt--;
775 out:
776 mutex_unlock(&tx_fs->mutex);
777 return err;
778 }
779
accel_psp_fs_tx_ft_put(struct mlx5e_psp_fs * fs)780 static void accel_psp_fs_tx_ft_put(struct mlx5e_psp_fs *fs)
781 {
782 struct mlx5e_psp_tx *tx_fs = fs->tx_fs;
783
784 mutex_lock(&tx_fs->mutex);
785 if (--tx_fs->refcnt)
786 goto out;
787
788 accel_psp_fs_tx_destroy(tx_fs);
789 out:
790 mutex_unlock(&tx_fs->mutex);
791 }
792
accel_psp_fs_cleanup_tx(struct mlx5e_psp_fs * fs)793 static void accel_psp_fs_cleanup_tx(struct mlx5e_psp_fs *fs)
794 {
795 struct mlx5e_psp_tx *tx_fs = fs->tx_fs;
796
797 if (!tx_fs)
798 return;
799
800 mlx5_fc_destroy(fs->mdev, tx_fs->tx_counter);
801 mutex_destroy(&tx_fs->mutex);
802 WARN_ON(tx_fs->refcnt);
803 kfree(tx_fs);
804 fs->tx_fs = NULL;
805 }
806
accel_psp_fs_init_tx(struct mlx5e_psp_fs * fs)807 static int accel_psp_fs_init_tx(struct mlx5e_psp_fs *fs)
808 {
809 struct mlx5_core_dev *mdev = fs->mdev;
810 struct mlx5_flow_namespace *ns;
811 struct mlx5_fc *flow_counter;
812 struct mlx5e_psp_tx *tx_fs;
813
814 ns = mlx5_get_flow_namespace(mdev, MLX5_FLOW_NAMESPACE_EGRESS_IPSEC);
815 if (!ns)
816 return -EOPNOTSUPP;
817
818 tx_fs = kzalloc(sizeof(*tx_fs), GFP_KERNEL);
819 if (!tx_fs)
820 return -ENOMEM;
821
822 flow_counter = mlx5_fc_create(mdev, false);
823 if (IS_ERR(flow_counter)) {
824 mlx5_core_warn(mdev,
825 "fail to create psp tx flow counter err=%pe\n",
826 flow_counter);
827 kfree(tx_fs);
828 return PTR_ERR(flow_counter);
829 }
830 tx_fs->tx_counter = flow_counter;
831 mutex_init(&tx_fs->mutex);
832 tx_fs->ns = ns;
833 fs->tx_fs = tx_fs;
834 return 0;
835 }
836
837 static void
mlx5e_accel_psp_fs_get_stats_fill(struct mlx5e_priv * priv,struct mlx5e_psp_stats * stats)838 mlx5e_accel_psp_fs_get_stats_fill(struct mlx5e_priv *priv,
839 struct mlx5e_psp_stats *stats)
840 {
841 struct mlx5e_psp_tx *tx_fs = priv->psp->fs->tx_fs;
842 struct mlx5_core_dev *mdev = priv->mdev;
843 struct mlx5e_accel_fs_psp *accel_psp;
844
845 accel_psp = (struct mlx5e_accel_fs_psp *)priv->psp->fs->rx_fs;
846
847 if (tx_fs->tx_counter)
848 mlx5_fc_query(mdev, tx_fs->tx_counter, &stats->psp_tx_pkts,
849 &stats->psp_tx_bytes);
850
851 if (accel_psp->rx_counter)
852 mlx5_fc_query(mdev, accel_psp->rx_counter, &stats->psp_rx_pkts,
853 &stats->psp_rx_bytes);
854
855 if (accel_psp->rx_auth_fail_counter)
856 mlx5_fc_query(mdev, accel_psp->rx_auth_fail_counter,
857 &stats->psp_rx_pkts_auth_fail,
858 &stats->psp_rx_bytes_auth_fail);
859
860 if (accel_psp->rx_err_counter)
861 mlx5_fc_query(mdev, accel_psp->rx_err_counter,
862 &stats->psp_rx_pkts_frame_err,
863 &stats->psp_rx_bytes_frame_err);
864
865 if (accel_psp->rx_bad_counter)
866 mlx5_fc_query(mdev, accel_psp->rx_bad_counter,
867 &stats->psp_rx_pkts_drop,
868 &stats->psp_rx_bytes_drop);
869 }
870
mlx5_accel_psp_fs_cleanup_tx_tables(struct mlx5e_priv * priv)871 void mlx5_accel_psp_fs_cleanup_tx_tables(struct mlx5e_priv *priv)
872 {
873 if (!priv->psp)
874 return;
875
876 accel_psp_fs_tx_ft_put(priv->psp->fs);
877 }
878
mlx5_accel_psp_fs_init_tx_tables(struct mlx5e_priv * priv)879 int mlx5_accel_psp_fs_init_tx_tables(struct mlx5e_priv *priv)
880 {
881 if (!priv->psp)
882 return 0;
883
884 return accel_psp_fs_tx_ft_get(priv->psp->fs);
885 }
886
mlx5e_accel_psp_fs_cleanup(struct mlx5e_psp_fs * fs)887 static void mlx5e_accel_psp_fs_cleanup(struct mlx5e_psp_fs *fs)
888 {
889 accel_psp_fs_cleanup_rx(fs);
890 accel_psp_fs_cleanup_tx(fs);
891 kfree(fs);
892 }
893
mlx5e_accel_psp_fs_init(struct mlx5e_priv * priv)894 static struct mlx5e_psp_fs *mlx5e_accel_psp_fs_init(struct mlx5e_priv *priv)
895 {
896 struct mlx5e_psp_fs *fs;
897 int err = 0;
898
899 fs = kzalloc(sizeof(*fs), GFP_KERNEL);
900 if (!fs)
901 return ERR_PTR(-ENOMEM);
902
903 fs->mdev = priv->mdev;
904 err = accel_psp_fs_init_tx(fs);
905 if (err)
906 goto err_tx;
907
908 fs->fs = priv->fs;
909 err = accel_psp_fs_init_rx(fs);
910 if (err)
911 goto err_rx;
912
913 return fs;
914
915 err_rx:
916 accel_psp_fs_cleanup_tx(fs);
917 err_tx:
918 kfree(fs);
919 return ERR_PTR(err);
920 }
921
922 static int
mlx5e_psp_set_config(struct psp_dev * psd,struct psp_dev_config * conf,struct netlink_ext_ack * extack)923 mlx5e_psp_set_config(struct psp_dev *psd, struct psp_dev_config *conf,
924 struct netlink_ext_ack *extack)
925 {
926 return 0; /* TODO: this should actually do things to the device */
927 }
928
929 static int
mlx5e_psp_generate_key_spi(struct mlx5_core_dev * mdev,enum mlx5_psp_gen_spi_in_key_size keysz,unsigned int keysz_bytes,struct psp_key_parsed * key)930 mlx5e_psp_generate_key_spi(struct mlx5_core_dev *mdev,
931 enum mlx5_psp_gen_spi_in_key_size keysz,
932 unsigned int keysz_bytes,
933 struct psp_key_parsed *key)
934 {
935 u32 out[MLX5_ST_SZ_DW(psp_gen_spi_out) + MLX5_ST_SZ_DW(key_spi)] = {};
936 u32 in[MLX5_ST_SZ_DW(psp_gen_spi_in)] = {};
937 void *outkey;
938 int err;
939
940 WARN_ON_ONCE(keysz_bytes > PSP_MAX_KEY);
941
942 MLX5_SET(psp_gen_spi_in, in, opcode, MLX5_CMD_OP_PSP_GEN_SPI);
943 MLX5_SET(psp_gen_spi_in, in, key_size, keysz);
944 MLX5_SET(psp_gen_spi_in, in, num_of_spi, 1);
945 err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
946 if (err)
947 return err;
948
949 outkey = MLX5_ADDR_OF(psp_gen_spi_out, out, key_spi);
950 key->spi = cpu_to_be32(MLX5_GET(key_spi, outkey, spi));
951 memcpy(key->key, MLX5_ADDR_OF(key_spi, outkey, key) + 32 - keysz_bytes,
952 keysz_bytes);
953
954 return 0;
955 }
956
957 static int
mlx5e_psp_rx_spi_alloc(struct psp_dev * psd,u32 version,struct psp_key_parsed * assoc,struct netlink_ext_ack * extack)958 mlx5e_psp_rx_spi_alloc(struct psp_dev *psd, u32 version,
959 struct psp_key_parsed *assoc,
960 struct netlink_ext_ack *extack)
961 {
962 struct mlx5e_priv *priv = netdev_priv(psd->main_netdev);
963 enum mlx5_psp_gen_spi_in_key_size keysz;
964 u8 keysz_bytes;
965
966 switch (version) {
967 case PSP_VERSION_HDR0_AES_GCM_128:
968 keysz = MLX5_PSP_GEN_SPI_IN_KEY_SIZE_128;
969 keysz_bytes = 16;
970 break;
971 case PSP_VERSION_HDR0_AES_GCM_256:
972 keysz = MLX5_PSP_GEN_SPI_IN_KEY_SIZE_256;
973 keysz_bytes = 32;
974 break;
975 default:
976 return -EINVAL;
977 }
978
979 return mlx5e_psp_generate_key_spi(priv->mdev, keysz, keysz_bytes, assoc);
980 }
981
982 struct psp_key {
983 u32 id;
984 };
985
mlx5e_psp_assoc_add(struct psp_dev * psd,struct psp_assoc * pas,struct netlink_ext_ack * extack)986 static int mlx5e_psp_assoc_add(struct psp_dev *psd, struct psp_assoc *pas,
987 struct netlink_ext_ack *extack)
988 {
989 struct mlx5e_priv *priv = netdev_priv(psd->main_netdev);
990 struct mlx5_core_dev *mdev = priv->mdev;
991 struct psp_key_parsed *tx = &pas->tx;
992 struct mlx5e_psp *psp = priv->psp;
993 struct psp_key *nkey;
994 int err;
995
996 mdev = priv->mdev;
997 nkey = (struct psp_key *)pas->drv_data;
998
999 err = mlx5_create_encryption_key(mdev, tx->key,
1000 psp_key_size(pas->version),
1001 MLX5_ACCEL_OBJ_PSP_KEY,
1002 &nkey->id);
1003 if (err) {
1004 mlx5_core_err(mdev, "Failed to create encryption key (err = %d)\n", err);
1005 return err;
1006 }
1007
1008 atomic_inc(&psp->tx_key_cnt);
1009 return 0;
1010 }
1011
mlx5e_psp_assoc_del(struct psp_dev * psd,struct psp_assoc * pas)1012 static void mlx5e_psp_assoc_del(struct psp_dev *psd, struct psp_assoc *pas)
1013 {
1014 struct mlx5e_priv *priv = netdev_priv(psd->main_netdev);
1015 struct mlx5e_psp *psp = priv->psp;
1016 struct psp_key *nkey;
1017
1018 nkey = (struct psp_key *)pas->drv_data;
1019 mlx5_destroy_encryption_key(priv->mdev, nkey->id);
1020 atomic_dec(&psp->tx_key_cnt);
1021 }
1022
mlx5e_psp_rotate_key(struct mlx5_core_dev * mdev)1023 static int mlx5e_psp_rotate_key(struct mlx5_core_dev *mdev)
1024 {
1025 u32 in[MLX5_ST_SZ_DW(psp_rotate_key_in)] = {};
1026 u32 out[MLX5_ST_SZ_DW(psp_rotate_key_out)];
1027
1028 MLX5_SET(psp_rotate_key_in, in, opcode,
1029 MLX5_CMD_OP_PSP_ROTATE_KEY);
1030
1031 return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
1032 }
1033
1034 static int
mlx5e_psp_key_rotate(struct psp_dev * psd,struct netlink_ext_ack * exack)1035 mlx5e_psp_key_rotate(struct psp_dev *psd, struct netlink_ext_ack *exack)
1036 {
1037 struct mlx5e_priv *priv = netdev_priv(psd->main_netdev);
1038
1039 /* no support for protecting against external rotations */
1040 psd->generation = 0;
1041
1042 return mlx5e_psp_rotate_key(priv->mdev);
1043 }
1044
1045 static void
mlx5e_psp_get_stats(struct psp_dev * psd,struct psp_dev_stats * stats)1046 mlx5e_psp_get_stats(struct psp_dev *psd, struct psp_dev_stats *stats)
1047 {
1048 struct mlx5e_priv *priv = netdev_priv(psd->main_netdev);
1049 struct mlx5e_psp_stats nstats;
1050
1051 mlx5e_accel_psp_fs_get_stats_fill(priv, &nstats);
1052 stats->rx_packets = nstats.psp_rx_pkts;
1053 stats->rx_bytes = nstats.psp_rx_bytes;
1054 stats->rx_auth_fail = nstats.psp_rx_pkts_auth_fail;
1055 stats->rx_error = nstats.psp_rx_pkts_frame_err;
1056 stats->rx_bad = nstats.psp_rx_pkts_drop;
1057 stats->tx_packets = nstats.psp_tx_pkts;
1058 stats->tx_bytes = nstats.psp_tx_bytes;
1059 stats->tx_error = atomic_read(&priv->psp->tx_drop);
1060 }
1061
1062 static struct psp_dev_ops mlx5_psp_ops = {
1063 .set_config = mlx5e_psp_set_config,
1064 .rx_spi_alloc = mlx5e_psp_rx_spi_alloc,
1065 .tx_key_add = mlx5e_psp_assoc_add,
1066 .tx_key_del = mlx5e_psp_assoc_del,
1067 .key_rotate = mlx5e_psp_key_rotate,
1068 .get_stats = mlx5e_psp_get_stats,
1069 };
1070
mlx5e_psp_unregister(struct mlx5e_priv * priv)1071 void mlx5e_psp_unregister(struct mlx5e_priv *priv)
1072 {
1073 if (!priv->psp || !priv->psp->psp)
1074 return;
1075
1076 psp_dev_unregister(priv->psp->psp);
1077 }
1078
mlx5e_psp_register(struct mlx5e_priv * priv)1079 void mlx5e_psp_register(struct mlx5e_priv *priv)
1080 {
1081 /* FW Caps missing */
1082 if (!priv->psp)
1083 return;
1084
1085 priv->psp->caps.assoc_drv_spc = sizeof(u32);
1086 priv->psp->caps.versions = 1 << PSP_VERSION_HDR0_AES_GCM_128;
1087 if (MLX5_CAP_PSP(priv->mdev, psp_crypto_esp_aes_gcm_256_encrypt) &&
1088 MLX5_CAP_PSP(priv->mdev, psp_crypto_esp_aes_gcm_256_decrypt))
1089 priv->psp->caps.versions |= 1 << PSP_VERSION_HDR0_AES_GCM_256;
1090
1091 priv->psp->psp = psp_dev_create(priv->netdev, &mlx5_psp_ops,
1092 &priv->psp->caps, NULL);
1093 if (IS_ERR(priv->psp->psp))
1094 mlx5_core_err(priv->mdev, "PSP failed to register due to %pe\n",
1095 priv->psp->psp);
1096 }
1097
mlx5e_psp_init(struct mlx5e_priv * priv)1098 int mlx5e_psp_init(struct mlx5e_priv *priv)
1099 {
1100 struct mlx5_core_dev *mdev = priv->mdev;
1101 struct mlx5e_psp_fs *fs;
1102 struct mlx5e_psp *psp;
1103 int err;
1104
1105 if (!mlx5_is_psp_device(mdev)) {
1106 mlx5_core_dbg(mdev, "PSP offload not supported\n");
1107 return 0;
1108 }
1109
1110 if (!MLX5_CAP_ETH(mdev, swp)) {
1111 mlx5_core_dbg(mdev, "SWP not supported\n");
1112 return 0;
1113 }
1114
1115 if (!MLX5_CAP_ETH(mdev, swp_csum)) {
1116 mlx5_core_dbg(mdev, "SWP checksum not supported\n");
1117 return 0;
1118 }
1119
1120 if (!MLX5_CAP_ETH(mdev, swp_csum_l4_partial)) {
1121 mlx5_core_dbg(mdev, "SWP L4 partial checksum not supported\n");
1122 return 0;
1123 }
1124
1125 if (!MLX5_CAP_ETH(mdev, swp_lso)) {
1126 mlx5_core_dbg(mdev, "PSP LSO not supported\n");
1127 return 0;
1128 }
1129
1130 psp = kzalloc(sizeof(*psp), GFP_KERNEL);
1131 if (!psp)
1132 return -ENOMEM;
1133
1134 priv->psp = psp;
1135 fs = mlx5e_accel_psp_fs_init(priv);
1136 if (IS_ERR(fs)) {
1137 err = PTR_ERR(fs);
1138 goto out_err;
1139 }
1140
1141 psp->fs = fs;
1142
1143 mlx5_core_dbg(priv->mdev, "PSP attached to netdevice\n");
1144 return 0;
1145
1146 out_err:
1147 priv->psp = NULL;
1148 kfree(psp);
1149 return err;
1150 }
1151
mlx5e_psp_cleanup(struct mlx5e_priv * priv)1152 void mlx5e_psp_cleanup(struct mlx5e_priv *priv)
1153 {
1154 struct mlx5e_psp *psp = priv->psp;
1155
1156 if (!psp)
1157 return;
1158
1159 WARN_ON(atomic_read(&psp->tx_key_cnt));
1160 mlx5e_accel_psp_fs_cleanup(psp->fs);
1161 priv->psp = NULL;
1162 kfree(psp);
1163 }
1164