xref: /linux/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c (revision bf4afc53b77aeaa48b5409da5c8da6bb4eff7f43)
1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
3 
4 #include <linux/mlx5/device.h>
5 #include <linux/mlx5/mlx5_ifc.h>
6 #include <linux/xarray.h>
7 #include <linux/if_vlan.h>
8 #include <linux/iopoll.h>
9 
10 #include "en.h"
11 #include "lib/aso.h"
12 #include "lib/crypto.h"
13 #include "en_accel/macsec.h"
14 
15 #define MLX5_MACSEC_EPN_SCOPE_MID 0x80000000L
16 #define MLX5E_MACSEC_ASO_CTX_SZ MLX5_ST_SZ_BYTES(macsec_aso)
17 
18 enum mlx5_macsec_aso_event_arm {
19 	MLX5E_ASO_EPN_ARM = BIT(0),
20 };
21 
22 enum {
23 	MLX5_MACSEC_ASO_REMOVE_FLOW_PKT_CNT_OFFSET,
24 };
25 
26 struct mlx5e_macsec_handle {
27 	struct mlx5e_macsec *macsec;
28 	u32 obj_id;
29 	u8 idx;
30 };
31 
32 enum {
33 	MLX5_MACSEC_EPN,
34 };
35 
36 struct mlx5e_macsec_aso_out {
37 	u8 event_arm;
38 	u32 mode_param;
39 };
40 
41 struct mlx5e_macsec_aso_in {
42 	u8 mode;
43 	u32 obj_id;
44 };
45 
46 struct mlx5e_macsec_epn_state {
47 	u32 epn_msb;
48 	u8 epn_enabled;
49 	u8 overlap;
50 };
51 
52 struct mlx5e_macsec_async_work {
53 	struct mlx5e_macsec *macsec;
54 	struct mlx5_core_dev *mdev;
55 	struct work_struct work;
56 	u32 obj_id;
57 };
58 
59 struct mlx5e_macsec_sa {
60 	bool active;
61 	u8  assoc_num;
62 	u32 macsec_obj_id;
63 	u32 enc_key_id;
64 	u32 next_pn;
65 	sci_t sci;
66 	ssci_t ssci;
67 	salt_t salt;
68 
69 	union mlx5_macsec_rule *macsec_rule;
70 	struct rcu_head rcu_head;
71 	struct mlx5e_macsec_epn_state epn_state;
72 };
73 
74 struct mlx5e_macsec_rx_sc;
75 struct mlx5e_macsec_rx_sc_xarray_element {
76 	u32 fs_id;
77 	struct mlx5e_macsec_rx_sc *rx_sc;
78 };
79 
80 struct mlx5e_macsec_rx_sc {
81 	bool active;
82 	sci_t sci;
83 	struct mlx5e_macsec_sa *rx_sa[MACSEC_NUM_AN];
84 	struct list_head rx_sc_list_element;
85 	struct mlx5e_macsec_rx_sc_xarray_element *sc_xarray_element;
86 	struct metadata_dst *md_dst;
87 	struct rcu_head rcu_head;
88 };
89 
90 struct mlx5e_macsec_umr {
91 	u8 __aligned(64) ctx[MLX5_ST_SZ_BYTES(macsec_aso)];
92 	dma_addr_t dma_addr;
93 	u32 mkey;
94 };
95 
96 struct mlx5e_macsec_aso {
97 	/* ASO */
98 	struct mlx5_aso *maso;
99 	/* Protects macsec ASO */
100 	struct mutex aso_lock;
101 	/* UMR */
102 	struct mlx5e_macsec_umr *umr;
103 
104 	u32 pdn;
105 };
106 
107 struct mlx5e_macsec_device {
108 	const struct net_device *netdev;
109 	struct mlx5e_macsec_sa *tx_sa[MACSEC_NUM_AN];
110 	struct list_head macsec_rx_sc_list_head;
111 	unsigned char *dev_addr;
112 	struct list_head macsec_device_list_element;
113 };
114 
115 struct mlx5e_macsec {
116 	struct list_head macsec_device_list_head;
117 	int num_of_devices;
118 	struct mutex lock; /* Protects mlx5e_macsec internal contexts */
119 
120 	/* Rx fs_id -> rx_sc mapping */
121 	struct xarray sc_xarray;
122 
123 	struct mlx5_core_dev *mdev;
124 
125 	/* ASO */
126 	struct mlx5e_macsec_aso aso;
127 
128 	struct notifier_block nb;
129 	struct workqueue_struct *wq;
130 };
131 
132 struct mlx5_macsec_obj_attrs {
133 	u32 aso_pdn;
134 	u32 next_pn;
135 	__be64 sci;
136 	u32 enc_key_id;
137 	bool encrypt;
138 	struct mlx5e_macsec_epn_state epn_state;
139 	salt_t salt;
140 	__be32 ssci;
141 	bool replay_protect;
142 	u32 replay_window;
143 };
144 
145 struct mlx5_aso_ctrl_param {
146 	u8   data_mask_mode;
147 	u8   condition_0_operand;
148 	u8   condition_1_operand;
149 	u8   condition_0_offset;
150 	u8   condition_1_offset;
151 	u8   data_offset;
152 	u8   condition_operand;
153 	u32  condition_0_data;
154 	u32  condition_0_mask;
155 	u32  condition_1_data;
156 	u32  condition_1_mask;
157 	u64  bitwise_data;
158 	u64  data_mask;
159 };
160 
mlx5e_macsec_aso_reg_mr(struct mlx5_core_dev * mdev,struct mlx5e_macsec_aso * aso)161 static int mlx5e_macsec_aso_reg_mr(struct mlx5_core_dev *mdev, struct mlx5e_macsec_aso *aso)
162 {
163 	struct mlx5e_macsec_umr *umr;
164 	struct device *dma_device;
165 	dma_addr_t dma_addr;
166 	int err;
167 
168 	umr = kzalloc_obj(*umr);
169 	if (!umr) {
170 		err = -ENOMEM;
171 		return err;
172 	}
173 
174 	dma_device = mlx5_core_dma_dev(mdev);
175 	dma_addr = dma_map_single(dma_device, umr->ctx, sizeof(umr->ctx), DMA_BIDIRECTIONAL);
176 	err = dma_mapping_error(dma_device, dma_addr);
177 	if (err) {
178 		mlx5_core_err(mdev, "Can't map dma device, err=%d\n", err);
179 		goto out_dma;
180 	}
181 
182 	err = mlx5e_create_mkey(mdev, aso->pdn, &umr->mkey);
183 	if (err) {
184 		mlx5_core_err(mdev, "Can't create mkey, err=%d\n", err);
185 		goto out_mkey;
186 	}
187 
188 	umr->dma_addr = dma_addr;
189 
190 	aso->umr = umr;
191 
192 	return 0;
193 
194 out_mkey:
195 	dma_unmap_single(dma_device, dma_addr, sizeof(umr->ctx), DMA_BIDIRECTIONAL);
196 out_dma:
197 	kfree(umr);
198 	return err;
199 }
200 
mlx5e_macsec_aso_dereg_mr(struct mlx5_core_dev * mdev,struct mlx5e_macsec_aso * aso)201 static void mlx5e_macsec_aso_dereg_mr(struct mlx5_core_dev *mdev, struct mlx5e_macsec_aso *aso)
202 {
203 	struct mlx5e_macsec_umr *umr = aso->umr;
204 
205 	mlx5_core_destroy_mkey(mdev, umr->mkey);
206 	dma_unmap_single(&mdev->pdev->dev, umr->dma_addr, sizeof(umr->ctx), DMA_BIDIRECTIONAL);
207 	kfree(umr);
208 }
209 
macsec_set_replay_protection(struct mlx5_macsec_obj_attrs * attrs,void * aso_ctx)210 static int macsec_set_replay_protection(struct mlx5_macsec_obj_attrs *attrs, void *aso_ctx)
211 {
212 	u8 window_sz;
213 
214 	if (!attrs->replay_protect)
215 		return 0;
216 
217 	switch (attrs->replay_window) {
218 	case 256:
219 		window_sz = MLX5_MACSEC_ASO_REPLAY_WIN_256BIT;
220 		break;
221 	case 128:
222 		window_sz = MLX5_MACSEC_ASO_REPLAY_WIN_128BIT;
223 		break;
224 	case 64:
225 		window_sz = MLX5_MACSEC_ASO_REPLAY_WIN_64BIT;
226 		break;
227 	case 32:
228 		window_sz = MLX5_MACSEC_ASO_REPLAY_WIN_32BIT;
229 		break;
230 	default:
231 		return -EINVAL;
232 	}
233 	MLX5_SET(macsec_aso, aso_ctx, window_size, window_sz);
234 	MLX5_SET(macsec_aso, aso_ctx, mode, MLX5_MACSEC_ASO_REPLAY_PROTECTION);
235 
236 	return 0;
237 }
238 
mlx5e_macsec_create_object(struct mlx5_core_dev * mdev,struct mlx5_macsec_obj_attrs * attrs,bool is_tx,u32 * macsec_obj_id)239 static int mlx5e_macsec_create_object(struct mlx5_core_dev *mdev,
240 				      struct mlx5_macsec_obj_attrs *attrs,
241 				      bool is_tx,
242 				      u32 *macsec_obj_id)
243 {
244 	u32 in[MLX5_ST_SZ_DW(create_macsec_obj_in)] = {};
245 	u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
246 	void *aso_ctx;
247 	void *obj;
248 	int err;
249 
250 	obj = MLX5_ADDR_OF(create_macsec_obj_in, in, macsec_object);
251 	aso_ctx = MLX5_ADDR_OF(macsec_offload_obj, obj, macsec_aso);
252 
253 	MLX5_SET(macsec_offload_obj, obj, confidentiality_en, attrs->encrypt);
254 	MLX5_SET(macsec_offload_obj, obj, dekn, attrs->enc_key_id);
255 	MLX5_SET(macsec_offload_obj, obj, aso_return_reg, MLX5_MACSEC_ASO_REG_C_4_5);
256 	MLX5_SET(macsec_offload_obj, obj, macsec_aso_access_pd, attrs->aso_pdn);
257 	MLX5_SET(macsec_aso, aso_ctx, mode_parameter, attrs->next_pn);
258 
259 	/* Epn */
260 	if (attrs->epn_state.epn_enabled) {
261 		void *salt_p;
262 		int i;
263 
264 		MLX5_SET(macsec_aso, aso_ctx, epn_event_arm, 1);
265 		MLX5_SET(macsec_offload_obj, obj, epn_en, 1);
266 		MLX5_SET(macsec_offload_obj, obj, epn_msb, attrs->epn_state.epn_msb);
267 		MLX5_SET(macsec_offload_obj, obj, epn_overlap, attrs->epn_state.overlap);
268 		MLX5_SET64(macsec_offload_obj, obj, sci, (__force u64)attrs->ssci);
269 		salt_p = MLX5_ADDR_OF(macsec_offload_obj, obj, salt);
270 		for (i = 0; i < 3 ; i++)
271 			memcpy((u32 *)salt_p + i, &attrs->salt.bytes[4 * (2 - i)], 4);
272 	} else {
273 		MLX5_SET64(macsec_offload_obj, obj, sci, (__force u64)(attrs->sci));
274 	}
275 
276 	MLX5_SET(macsec_aso, aso_ctx, valid, 0x1);
277 	if (is_tx) {
278 		MLX5_SET(macsec_aso, aso_ctx, mode, MLX5_MACSEC_ASO_INC_SN);
279 	} else {
280 		err = macsec_set_replay_protection(attrs, aso_ctx);
281 		if (err)
282 			return err;
283 	}
284 
285 	/* general object fields set */
286 	MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
287 	MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, MLX5_GENERAL_OBJECT_TYPES_MACSEC);
288 
289 	err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
290 	if (err) {
291 		mlx5_core_err(mdev,
292 			      "MACsec offload: Failed to create MACsec object (err = %d)\n",
293 			      err);
294 		return err;
295 	}
296 
297 	*macsec_obj_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
298 
299 	return err;
300 }
301 
mlx5e_macsec_destroy_object(struct mlx5_core_dev * mdev,u32 macsec_obj_id)302 static void mlx5e_macsec_destroy_object(struct mlx5_core_dev *mdev, u32 macsec_obj_id)
303 {
304 	u32 in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {};
305 	u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
306 
307 	MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_DESTROY_GENERAL_OBJECT);
308 	MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, MLX5_GENERAL_OBJECT_TYPES_MACSEC);
309 	MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, macsec_obj_id);
310 
311 	mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
312 }
313 
mlx5e_macsec_cleanup_sa_fs(struct mlx5e_macsec * macsec,struct mlx5e_macsec_sa * sa,bool is_tx,struct net_device * netdev,u32 fs_id)314 static void mlx5e_macsec_cleanup_sa_fs(struct mlx5e_macsec *macsec,
315 				       struct mlx5e_macsec_sa *sa, bool is_tx,
316 				       struct net_device *netdev, u32 fs_id)
317 {
318 	int action =  (is_tx) ?  MLX5_ACCEL_MACSEC_ACTION_ENCRYPT :
319 				 MLX5_ACCEL_MACSEC_ACTION_DECRYPT;
320 
321 	if (!sa->macsec_rule)
322 		return;
323 
324 	mlx5_macsec_fs_del_rule(macsec->mdev->macsec_fs, sa->macsec_rule, action, netdev,
325 				fs_id);
326 	sa->macsec_rule = NULL;
327 }
328 
mlx5e_macsec_cleanup_sa(struct mlx5e_macsec * macsec,struct mlx5e_macsec_sa * sa,bool is_tx,struct net_device * netdev,u32 fs_id)329 static void mlx5e_macsec_cleanup_sa(struct mlx5e_macsec *macsec,
330 				    struct mlx5e_macsec_sa *sa, bool is_tx,
331 				    struct net_device *netdev, u32 fs_id)
332 {
333 	mlx5e_macsec_cleanup_sa_fs(macsec, sa, is_tx, netdev, fs_id);
334 	mlx5e_macsec_destroy_object(macsec->mdev, sa->macsec_obj_id);
335 }
336 
mlx5e_macsec_init_sa_fs(struct macsec_context * ctx,struct mlx5e_macsec_sa * sa,bool encrypt,bool is_tx,u32 * fs_id)337 static int mlx5e_macsec_init_sa_fs(struct macsec_context *ctx,
338 				   struct mlx5e_macsec_sa *sa, bool encrypt,
339 				   bool is_tx, u32 *fs_id)
340 {
341 	struct mlx5e_priv *priv = macsec_netdev_priv(ctx->netdev);
342 	struct mlx5_macsec_fs *macsec_fs = priv->mdev->macsec_fs;
343 	const struct macsec_tx_sc *tx_sc = &ctx->secy->tx_sc;
344 	struct mlx5_macsec_rule_attrs rule_attrs;
345 	union mlx5_macsec_rule *macsec_rule;
346 
347 	if (is_tx && tx_sc->encoding_sa != sa->assoc_num)
348 		return 0;
349 
350 	rule_attrs.macsec_obj_id = sa->macsec_obj_id;
351 	rule_attrs.sci = sa->sci;
352 	rule_attrs.assoc_num = sa->assoc_num;
353 	rule_attrs.action = (is_tx) ? MLX5_ACCEL_MACSEC_ACTION_ENCRYPT :
354 				      MLX5_ACCEL_MACSEC_ACTION_DECRYPT;
355 
356 	macsec_rule = mlx5_macsec_fs_add_rule(macsec_fs, ctx, &rule_attrs, fs_id);
357 	if (!macsec_rule)
358 		return -ENOMEM;
359 
360 	sa->macsec_rule = macsec_rule;
361 
362 	return 0;
363 }
364 
mlx5e_macsec_init_sa(struct macsec_context * ctx,struct mlx5e_macsec_sa * sa,bool encrypt,bool is_tx,u32 * fs_id)365 static int mlx5e_macsec_init_sa(struct macsec_context *ctx,
366 				struct mlx5e_macsec_sa *sa,
367 				bool encrypt, bool is_tx, u32 *fs_id)
368 {
369 	struct mlx5e_priv *priv = macsec_netdev_priv(ctx->netdev);
370 	struct mlx5e_macsec *macsec = priv->macsec;
371 	struct mlx5_core_dev *mdev = priv->mdev;
372 	struct mlx5_macsec_obj_attrs obj_attrs;
373 	int err;
374 
375 	obj_attrs.next_pn = sa->next_pn;
376 	obj_attrs.sci = cpu_to_be64((__force u64)sa->sci);
377 	obj_attrs.enc_key_id = sa->enc_key_id;
378 	obj_attrs.encrypt = encrypt;
379 	obj_attrs.aso_pdn = macsec->aso.pdn;
380 	obj_attrs.epn_state = sa->epn_state;
381 
382 	if (sa->epn_state.epn_enabled) {
383 		obj_attrs.ssci = cpu_to_be32((__force u32)sa->ssci);
384 		memcpy(&obj_attrs.salt, &sa->salt, sizeof(sa->salt));
385 	}
386 
387 	obj_attrs.replay_window = ctx->secy->replay_window;
388 	obj_attrs.replay_protect = ctx->secy->replay_protect;
389 
390 	err = mlx5e_macsec_create_object(mdev, &obj_attrs, is_tx, &sa->macsec_obj_id);
391 	if (err)
392 		return err;
393 
394 	if (sa->active) {
395 		err = mlx5e_macsec_init_sa_fs(ctx, sa, encrypt, is_tx, fs_id);
396 		if (err)
397 			goto destroy_macsec_object;
398 	}
399 
400 	return 0;
401 
402 destroy_macsec_object:
403 	mlx5e_macsec_destroy_object(mdev, sa->macsec_obj_id);
404 
405 	return err;
406 }
407 
408 static struct mlx5e_macsec_rx_sc *
mlx5e_macsec_get_rx_sc_from_sc_list(const struct list_head * list,sci_t sci)409 mlx5e_macsec_get_rx_sc_from_sc_list(const struct list_head *list, sci_t sci)
410 {
411 	struct mlx5e_macsec_rx_sc *iter;
412 
413 	list_for_each_entry_rcu(iter, list, rx_sc_list_element) {
414 		if (iter->sci == sci)
415 			return iter;
416 	}
417 
418 	return NULL;
419 }
420 
macsec_rx_sa_active_update(struct macsec_context * ctx,struct mlx5e_macsec_sa * rx_sa,bool active,u32 * fs_id)421 static int macsec_rx_sa_active_update(struct macsec_context *ctx,
422 				      struct mlx5e_macsec_sa *rx_sa,
423 				      bool active, u32 *fs_id)
424 {
425 	struct mlx5e_priv *priv = macsec_netdev_priv(ctx->netdev);
426 	struct mlx5e_macsec *macsec = priv->macsec;
427 	int err = 0;
428 
429 	if (rx_sa->active == active)
430 		return 0;
431 
432 	rx_sa->active = active;
433 	if (!active) {
434 		mlx5e_macsec_cleanup_sa(macsec, rx_sa, false, ctx->secy->netdev, *fs_id);
435 		return 0;
436 	}
437 
438 	err = mlx5e_macsec_init_sa(ctx, rx_sa, true, false, fs_id);
439 	if (err)
440 		rx_sa->active = false;
441 
442 	return err;
443 }
444 
mlx5e_macsec_secy_features_validate(struct macsec_context * ctx)445 static bool mlx5e_macsec_secy_features_validate(struct macsec_context *ctx)
446 {
447 	const struct net_device *netdev = ctx->netdev;
448 	const struct macsec_secy *secy = ctx->secy;
449 
450 	if (secy->validate_frames != MACSEC_VALIDATE_STRICT) {
451 		netdev_err(netdev,
452 			   "MACsec offload is supported only when validate_frame is in strict mode\n");
453 		return false;
454 	}
455 
456 	if (secy->icv_len != MACSEC_DEFAULT_ICV_LEN) {
457 		netdev_err(netdev, "MACsec offload is supported only when icv_len is %d\n",
458 			   MACSEC_DEFAULT_ICV_LEN);
459 		return false;
460 	}
461 
462 	if (!secy->protect_frames) {
463 		netdev_err(netdev,
464 			   "MACsec offload is supported only when protect_frames is set\n");
465 		return false;
466 	}
467 
468 	if (!ctx->secy->tx_sc.encrypt) {
469 		netdev_err(netdev, "MACsec offload: encrypt off isn't supported\n");
470 		return false;
471 	}
472 
473 	return true;
474 }
475 
476 static struct mlx5e_macsec_device *
mlx5e_macsec_get_macsec_device_context(const struct mlx5e_macsec * macsec,const struct macsec_context * ctx)477 mlx5e_macsec_get_macsec_device_context(const struct mlx5e_macsec *macsec,
478 				       const struct macsec_context *ctx)
479 {
480 	struct mlx5e_macsec_device *iter;
481 	const struct list_head *list;
482 
483 	list = &macsec->macsec_device_list_head;
484 	list_for_each_entry_rcu(iter, list, macsec_device_list_element) {
485 		if (iter->netdev == ctx->secy->netdev)
486 			return iter;
487 	}
488 
489 	return NULL;
490 }
491 
update_macsec_epn(struct mlx5e_macsec_sa * sa,const struct macsec_key * key,const pn_t * next_pn_halves,ssci_t ssci)492 static void update_macsec_epn(struct mlx5e_macsec_sa *sa, const struct macsec_key *key,
493 			      const pn_t *next_pn_halves, ssci_t ssci)
494 {
495 	struct mlx5e_macsec_epn_state *epn_state = &sa->epn_state;
496 
497 	sa->ssci = ssci;
498 	sa->salt = key->salt;
499 	epn_state->epn_enabled = 1;
500 	epn_state->epn_msb = next_pn_halves->upper;
501 	epn_state->overlap = next_pn_halves->lower < MLX5_MACSEC_EPN_SCOPE_MID ? 0 : 1;
502 }
503 
mlx5e_macsec_add_txsa(struct macsec_context * ctx)504 static int mlx5e_macsec_add_txsa(struct macsec_context *ctx)
505 {
506 	struct mlx5e_priv *priv = macsec_netdev_priv(ctx->netdev);
507 	const struct macsec_tx_sc *tx_sc = &ctx->secy->tx_sc;
508 	const struct macsec_tx_sa *ctx_tx_sa = ctx->sa.tx_sa;
509 	const struct macsec_secy *secy = ctx->secy;
510 	struct mlx5e_macsec_device *macsec_device;
511 	struct mlx5_core_dev *mdev = priv->mdev;
512 	u8 assoc_num = ctx->sa.assoc_num;
513 	struct mlx5e_macsec_sa *tx_sa;
514 	struct mlx5e_macsec *macsec;
515 	int err = 0;
516 
517 	mutex_lock(&priv->macsec->lock);
518 
519 	macsec = priv->macsec;
520 	macsec_device = mlx5e_macsec_get_macsec_device_context(macsec, ctx);
521 	if (!macsec_device) {
522 		netdev_err(ctx->netdev, "MACsec offload: Failed to find device context\n");
523 		err = -EEXIST;
524 		goto out;
525 	}
526 
527 	if (macsec_device->tx_sa[assoc_num]) {
528 		netdev_err(ctx->netdev, "MACsec offload tx_sa: %d already exist\n", assoc_num);
529 		err = -EEXIST;
530 		goto out;
531 	}
532 
533 	tx_sa = kzalloc_obj(*tx_sa);
534 	if (!tx_sa) {
535 		err = -ENOMEM;
536 		goto out;
537 	}
538 
539 	tx_sa->active = ctx_tx_sa->active;
540 	tx_sa->next_pn = ctx_tx_sa->next_pn_halves.lower;
541 	tx_sa->sci = secy->sci;
542 	tx_sa->assoc_num = assoc_num;
543 
544 	if (secy->xpn)
545 		update_macsec_epn(tx_sa, &ctx_tx_sa->key, &ctx_tx_sa->next_pn_halves,
546 				  ctx_tx_sa->ssci);
547 
548 	err = mlx5_create_encryption_key(mdev, ctx->sa.key, secy->key_len,
549 					 MLX5_ACCEL_OBJ_MACSEC_KEY,
550 					 &tx_sa->enc_key_id);
551 	if (err)
552 		goto destroy_sa;
553 
554 	macsec_device->tx_sa[assoc_num] = tx_sa;
555 	if (!secy->operational)
556 		goto out;
557 
558 	err = mlx5e_macsec_init_sa(ctx, tx_sa, tx_sc->encrypt, true, NULL);
559 	if (err)
560 		goto destroy_encryption_key;
561 
562 	mutex_unlock(&macsec->lock);
563 
564 	return 0;
565 
566 destroy_encryption_key:
567 	macsec_device->tx_sa[assoc_num] = NULL;
568 	mlx5_destroy_encryption_key(mdev, tx_sa->enc_key_id);
569 destroy_sa:
570 	kfree(tx_sa);
571 out:
572 	mutex_unlock(&macsec->lock);
573 
574 	return err;
575 }
576 
mlx5e_macsec_upd_txsa(struct macsec_context * ctx)577 static int mlx5e_macsec_upd_txsa(struct macsec_context *ctx)
578 {
579 	struct mlx5e_priv *priv = macsec_netdev_priv(ctx->netdev);
580 	const struct macsec_tx_sc *tx_sc = &ctx->secy->tx_sc;
581 	const struct macsec_tx_sa *ctx_tx_sa = ctx->sa.tx_sa;
582 	struct mlx5e_macsec_device *macsec_device;
583 	u8 assoc_num = ctx->sa.assoc_num;
584 	struct mlx5e_macsec_sa *tx_sa;
585 	struct mlx5e_macsec *macsec;
586 	struct net_device *netdev;
587 	int err = 0;
588 
589 	mutex_lock(&priv->macsec->lock);
590 
591 	macsec = priv->macsec;
592 	netdev = ctx->netdev;
593 	macsec_device = mlx5e_macsec_get_macsec_device_context(macsec, ctx);
594 	if (!macsec_device) {
595 		netdev_err(netdev, "MACsec offload: Failed to find device context\n");
596 		err = -EINVAL;
597 		goto out;
598 	}
599 
600 	tx_sa = macsec_device->tx_sa[assoc_num];
601 	if (!tx_sa) {
602 		netdev_err(netdev, "MACsec offload: TX sa 0x%x doesn't exist\n", assoc_num);
603 		err = -EEXIST;
604 		goto out;
605 	}
606 
607 	if (ctx->sa.update_pn) {
608 		netdev_err(netdev, "MACsec offload: update TX sa %d PN isn't supported\n",
609 			   assoc_num);
610 		err = -EINVAL;
611 		goto out;
612 	}
613 
614 	if (tx_sa->active == ctx_tx_sa->active)
615 		goto out;
616 
617 	tx_sa->active = ctx_tx_sa->active;
618 	if (tx_sa->assoc_num != tx_sc->encoding_sa)
619 		goto out;
620 
621 	if (ctx_tx_sa->active) {
622 		err = mlx5e_macsec_init_sa_fs(ctx, tx_sa, tx_sc->encrypt, true, NULL);
623 		if (err)
624 			goto out;
625 	} else {
626 		if (!tx_sa->macsec_rule) {
627 			err = -EINVAL;
628 			goto out;
629 		}
630 
631 		mlx5e_macsec_cleanup_sa_fs(macsec, tx_sa, true, ctx->secy->netdev, 0);
632 	}
633 out:
634 	mutex_unlock(&macsec->lock);
635 
636 	return err;
637 }
638 
mlx5e_macsec_del_txsa(struct macsec_context * ctx)639 static int mlx5e_macsec_del_txsa(struct macsec_context *ctx)
640 {
641 	struct mlx5e_priv *priv = macsec_netdev_priv(ctx->netdev);
642 	struct mlx5e_macsec_device *macsec_device;
643 	u8 assoc_num = ctx->sa.assoc_num;
644 	struct mlx5e_macsec_sa *tx_sa;
645 	struct mlx5e_macsec *macsec;
646 	int err = 0;
647 
648 	mutex_lock(&priv->macsec->lock);
649 	macsec = priv->macsec;
650 	macsec_device = mlx5e_macsec_get_macsec_device_context(macsec, ctx);
651 	if (!macsec_device) {
652 		netdev_err(ctx->netdev, "MACsec offload: Failed to find device context\n");
653 		err = -EINVAL;
654 		goto out;
655 	}
656 
657 	tx_sa = macsec_device->tx_sa[assoc_num];
658 	if (!tx_sa) {
659 		netdev_err(ctx->netdev, "MACsec offload: TX sa 0x%x doesn't exist\n", assoc_num);
660 		err = -EEXIST;
661 		goto out;
662 	}
663 
664 	mlx5e_macsec_cleanup_sa(macsec, tx_sa, true, ctx->secy->netdev, 0);
665 	mlx5_destroy_encryption_key(macsec->mdev, tx_sa->enc_key_id);
666 	kfree_rcu_mightsleep(tx_sa);
667 	macsec_device->tx_sa[assoc_num] = NULL;
668 
669 out:
670 	mutex_unlock(&macsec->lock);
671 
672 	return err;
673 }
674 
mlx5e_macsec_add_rxsc(struct macsec_context * ctx)675 static int mlx5e_macsec_add_rxsc(struct macsec_context *ctx)
676 {
677 	struct mlx5e_macsec_rx_sc_xarray_element *sc_xarray_element;
678 	struct mlx5e_priv *priv = macsec_netdev_priv(ctx->netdev);
679 	const struct macsec_rx_sc *ctx_rx_sc = ctx->rx_sc;
680 	struct mlx5e_macsec_device *macsec_device;
681 	struct mlx5e_macsec_rx_sc *rx_sc;
682 	struct list_head *rx_sc_list;
683 	struct mlx5e_macsec *macsec;
684 	int err = 0;
685 
686 	mutex_lock(&priv->macsec->lock);
687 	macsec = priv->macsec;
688 	macsec_device = mlx5e_macsec_get_macsec_device_context(macsec, ctx);
689 	if (!macsec_device) {
690 		netdev_err(ctx->netdev, "MACsec offload: Failed to find device context\n");
691 		err = -EINVAL;
692 		goto out;
693 	}
694 
695 	rx_sc_list = &macsec_device->macsec_rx_sc_list_head;
696 	rx_sc = mlx5e_macsec_get_rx_sc_from_sc_list(rx_sc_list, ctx_rx_sc->sci);
697 	if (rx_sc) {
698 		netdev_err(ctx->netdev, "MACsec offload: rx_sc (sci %lld) already exists\n",
699 			   ctx_rx_sc->sci);
700 		err = -EEXIST;
701 		goto out;
702 	}
703 
704 	rx_sc = kzalloc_obj(*rx_sc);
705 	if (!rx_sc) {
706 		err = -ENOMEM;
707 		goto out;
708 	}
709 
710 	sc_xarray_element = kzalloc_obj(*sc_xarray_element);
711 	if (!sc_xarray_element) {
712 		err = -ENOMEM;
713 		goto destroy_rx_sc;
714 	}
715 
716 	sc_xarray_element->rx_sc = rx_sc;
717 	err = xa_alloc(&macsec->sc_xarray, &sc_xarray_element->fs_id, sc_xarray_element,
718 		       XA_LIMIT(1, MLX5_MACEC_RX_FS_ID_MAX), GFP_KERNEL);
719 	if (err) {
720 		if (err == -EBUSY)
721 			netdev_err(ctx->netdev,
722 				   "MACsec offload: unable to create entry for RX SC (%d Rx SCs already allocated)\n",
723 				   MLX5_MACEC_RX_FS_ID_MAX);
724 		goto destroy_sc_xarray_elemenet;
725 	}
726 
727 	rx_sc->md_dst = metadata_dst_alloc(0, METADATA_MACSEC, GFP_KERNEL);
728 	if (!rx_sc->md_dst) {
729 		err = -ENOMEM;
730 		goto erase_xa_alloc;
731 	}
732 
733 	rx_sc->sci = ctx_rx_sc->sci;
734 	rx_sc->active = ctx_rx_sc->active;
735 	list_add_rcu(&rx_sc->rx_sc_list_element, rx_sc_list);
736 
737 	rx_sc->sc_xarray_element = sc_xarray_element;
738 	rx_sc->md_dst->u.macsec_info.sci = rx_sc->sci;
739 	mutex_unlock(&macsec->lock);
740 
741 	return 0;
742 
743 erase_xa_alloc:
744 	xa_erase(&macsec->sc_xarray, sc_xarray_element->fs_id);
745 destroy_sc_xarray_elemenet:
746 	kfree(sc_xarray_element);
747 destroy_rx_sc:
748 	kfree(rx_sc);
749 
750 out:
751 	mutex_unlock(&macsec->lock);
752 
753 	return err;
754 }
755 
mlx5e_macsec_upd_rxsc(struct macsec_context * ctx)756 static int mlx5e_macsec_upd_rxsc(struct macsec_context *ctx)
757 {
758 	struct mlx5e_priv *priv = macsec_netdev_priv(ctx->netdev);
759 	const struct macsec_rx_sc *ctx_rx_sc = ctx->rx_sc;
760 	struct mlx5e_macsec_device *macsec_device;
761 	struct mlx5e_macsec_rx_sc *rx_sc;
762 	struct mlx5e_macsec_sa *rx_sa;
763 	struct mlx5e_macsec *macsec;
764 	struct list_head *list;
765 	int i;
766 	int err = 0;
767 
768 	mutex_lock(&priv->macsec->lock);
769 
770 	macsec = priv->macsec;
771 	macsec_device = mlx5e_macsec_get_macsec_device_context(macsec, ctx);
772 	if (!macsec_device) {
773 		netdev_err(ctx->netdev, "MACsec offload: Failed to find device context\n");
774 		err = -EINVAL;
775 		goto out;
776 	}
777 
778 	list = &macsec_device->macsec_rx_sc_list_head;
779 	rx_sc = mlx5e_macsec_get_rx_sc_from_sc_list(list, ctx_rx_sc->sci);
780 	if (!rx_sc) {
781 		err = -EINVAL;
782 		goto out;
783 	}
784 
785 	if (rx_sc->active == ctx_rx_sc->active)
786 		goto out;
787 
788 	rx_sc->active = ctx_rx_sc->active;
789 	for (i = 0; i < MACSEC_NUM_AN; ++i) {
790 		rx_sa = rx_sc->rx_sa[i];
791 		if (!rx_sa)
792 			continue;
793 
794 		err = macsec_rx_sa_active_update(ctx, rx_sa, rx_sa->active && ctx_rx_sc->active,
795 						 &rx_sc->sc_xarray_element->fs_id);
796 		if (err)
797 			goto out;
798 	}
799 
800 out:
801 	mutex_unlock(&macsec->lock);
802 
803 	return err;
804 }
805 
macsec_del_rxsc_ctx(struct mlx5e_macsec * macsec,struct mlx5e_macsec_rx_sc * rx_sc,struct net_device * netdev)806 static void macsec_del_rxsc_ctx(struct mlx5e_macsec *macsec, struct mlx5e_macsec_rx_sc *rx_sc,
807 				struct net_device *netdev)
808 {
809 	struct mlx5e_macsec_sa *rx_sa;
810 	int i;
811 
812 	for (i = 0; i < MACSEC_NUM_AN; ++i) {
813 		rx_sa = rx_sc->rx_sa[i];
814 		if (!rx_sa)
815 			continue;
816 
817 		mlx5e_macsec_cleanup_sa(macsec, rx_sa, false, netdev,
818 					rx_sc->sc_xarray_element->fs_id);
819 		mlx5_destroy_encryption_key(macsec->mdev, rx_sa->enc_key_id);
820 
821 		kfree(rx_sa);
822 		rx_sc->rx_sa[i] = NULL;
823 	}
824 
825 	/* At this point the relevant MACsec offload Rx rule already removed at
826 	 * mlx5e_macsec_cleanup_sa need to wait for datapath to finish current
827 	 * Rx related data propagating using xa_erase which uses rcu to sync,
828 	 * once fs_id is erased then this rx_sc is hidden from datapath.
829 	 */
830 	list_del_rcu(&rx_sc->rx_sc_list_element);
831 	xa_erase(&macsec->sc_xarray, rx_sc->sc_xarray_element->fs_id);
832 	metadata_dst_free(rx_sc->md_dst);
833 	kfree(rx_sc->sc_xarray_element);
834 	kfree_rcu_mightsleep(rx_sc);
835 }
836 
mlx5e_macsec_del_rxsc(struct macsec_context * ctx)837 static int mlx5e_macsec_del_rxsc(struct macsec_context *ctx)
838 {
839 	struct mlx5e_priv *priv = macsec_netdev_priv(ctx->netdev);
840 	struct mlx5e_macsec_device *macsec_device;
841 	struct mlx5e_macsec_rx_sc *rx_sc;
842 	struct mlx5e_macsec *macsec;
843 	struct list_head *list;
844 	int err = 0;
845 
846 	mutex_lock(&priv->macsec->lock);
847 
848 	macsec = priv->macsec;
849 	macsec_device = mlx5e_macsec_get_macsec_device_context(macsec, ctx);
850 	if (!macsec_device) {
851 		netdev_err(ctx->netdev, "MACsec offload: Failed to find device context\n");
852 		err = -EINVAL;
853 		goto out;
854 	}
855 
856 	list = &macsec_device->macsec_rx_sc_list_head;
857 	rx_sc = mlx5e_macsec_get_rx_sc_from_sc_list(list, ctx->rx_sc->sci);
858 	if (!rx_sc) {
859 		netdev_err(ctx->netdev,
860 			   "MACsec offload rx_sc sci %lld doesn't exist\n",
861 			   ctx->sa.rx_sa->sc->sci);
862 		err = -EINVAL;
863 		goto out;
864 	}
865 
866 	macsec_del_rxsc_ctx(macsec, rx_sc, ctx->secy->netdev);
867 out:
868 	mutex_unlock(&macsec->lock);
869 
870 	return err;
871 }
872 
mlx5e_macsec_add_rxsa(struct macsec_context * ctx)873 static int mlx5e_macsec_add_rxsa(struct macsec_context *ctx)
874 {
875 	struct mlx5e_priv *priv = macsec_netdev_priv(ctx->netdev);
876 	const struct macsec_rx_sa *ctx_rx_sa = ctx->sa.rx_sa;
877 	struct mlx5e_macsec_device *macsec_device;
878 	struct mlx5_core_dev *mdev = priv->mdev;
879 	u8 assoc_num = ctx->sa.assoc_num;
880 	struct mlx5e_macsec_rx_sc *rx_sc;
881 	sci_t sci = ctx_rx_sa->sc->sci;
882 	struct mlx5e_macsec_sa *rx_sa;
883 	struct mlx5e_macsec *macsec;
884 	struct list_head *list;
885 	int err = 0;
886 
887 	mutex_lock(&priv->macsec->lock);
888 
889 	macsec = priv->macsec;
890 	macsec_device = mlx5e_macsec_get_macsec_device_context(macsec, ctx);
891 	if (!macsec_device) {
892 		netdev_err(ctx->netdev, "MACsec offload: Failed to find device context\n");
893 		err = -EINVAL;
894 		goto out;
895 	}
896 
897 	list = &macsec_device->macsec_rx_sc_list_head;
898 	rx_sc = mlx5e_macsec_get_rx_sc_from_sc_list(list, sci);
899 	if (!rx_sc) {
900 		netdev_err(ctx->netdev,
901 			   "MACsec offload rx_sc sci %lld doesn't exist\n",
902 			   ctx->sa.rx_sa->sc->sci);
903 		err = -EINVAL;
904 		goto out;
905 	}
906 
907 	if (rx_sc->rx_sa[assoc_num]) {
908 		netdev_err(ctx->netdev,
909 			   "MACsec offload rx_sc sci %lld rx_sa %d already exist\n",
910 			   sci, assoc_num);
911 		err = -EEXIST;
912 		goto out;
913 	}
914 
915 	rx_sa = kzalloc_obj(*rx_sa);
916 	if (!rx_sa) {
917 		err = -ENOMEM;
918 		goto out;
919 	}
920 
921 	rx_sa->active = ctx_rx_sa->active;
922 	rx_sa->next_pn = ctx_rx_sa->next_pn;
923 	rx_sa->sci = sci;
924 	rx_sa->assoc_num = assoc_num;
925 
926 	if (ctx->secy->xpn)
927 		update_macsec_epn(rx_sa, &ctx_rx_sa->key, &ctx_rx_sa->next_pn_halves,
928 				  ctx_rx_sa->ssci);
929 
930 	err = mlx5_create_encryption_key(mdev, ctx->sa.key, ctx->secy->key_len,
931 					 MLX5_ACCEL_OBJ_MACSEC_KEY,
932 					 &rx_sa->enc_key_id);
933 	if (err)
934 		goto destroy_sa;
935 
936 	rx_sc->rx_sa[assoc_num] = rx_sa;
937 	if (!rx_sa->active)
938 		goto out;
939 
940 	//TODO - add support for both authentication and encryption flows
941 	err = mlx5e_macsec_init_sa(ctx, rx_sa, true, false, &rx_sc->sc_xarray_element->fs_id);
942 	if (err)
943 		goto destroy_encryption_key;
944 
945 	goto out;
946 
947 destroy_encryption_key:
948 	rx_sc->rx_sa[assoc_num] = NULL;
949 	mlx5_destroy_encryption_key(mdev, rx_sa->enc_key_id);
950 destroy_sa:
951 	kfree(rx_sa);
952 out:
953 	mutex_unlock(&macsec->lock);
954 
955 	return err;
956 }
957 
mlx5e_macsec_upd_rxsa(struct macsec_context * ctx)958 static int mlx5e_macsec_upd_rxsa(struct macsec_context *ctx)
959 {
960 	struct mlx5e_priv *priv = macsec_netdev_priv(ctx->netdev);
961 	const struct macsec_rx_sa *ctx_rx_sa = ctx->sa.rx_sa;
962 	struct mlx5e_macsec_device *macsec_device;
963 	u8 assoc_num = ctx->sa.assoc_num;
964 	struct mlx5e_macsec_rx_sc *rx_sc;
965 	sci_t sci = ctx_rx_sa->sc->sci;
966 	struct mlx5e_macsec_sa *rx_sa;
967 	struct mlx5e_macsec *macsec;
968 	struct list_head *list;
969 	int err = 0;
970 
971 	mutex_lock(&priv->macsec->lock);
972 
973 	macsec = priv->macsec;
974 	macsec_device = mlx5e_macsec_get_macsec_device_context(macsec, ctx);
975 	if (!macsec_device) {
976 		netdev_err(ctx->netdev, "MACsec offload: Failed to find device context\n");
977 		err = -EINVAL;
978 		goto out;
979 	}
980 
981 	list = &macsec_device->macsec_rx_sc_list_head;
982 	rx_sc = mlx5e_macsec_get_rx_sc_from_sc_list(list, sci);
983 	if (!rx_sc) {
984 		netdev_err(ctx->netdev,
985 			   "MACsec offload rx_sc sci %lld doesn't exist\n",
986 			   ctx->sa.rx_sa->sc->sci);
987 		err = -EINVAL;
988 		goto out;
989 	}
990 
991 	rx_sa = rx_sc->rx_sa[assoc_num];
992 	if (!rx_sa) {
993 		netdev_err(ctx->netdev,
994 			   "MACsec offload rx_sc sci %lld rx_sa %d doesn't exist\n",
995 			   sci, assoc_num);
996 		err = -EINVAL;
997 		goto out;
998 	}
999 
1000 	if (ctx->sa.update_pn) {
1001 		netdev_err(ctx->netdev,
1002 			   "MACsec offload update RX sa %d PN isn't supported\n",
1003 			   assoc_num);
1004 		err = -EINVAL;
1005 		goto out;
1006 	}
1007 
1008 	err = macsec_rx_sa_active_update(ctx, rx_sa, ctx_rx_sa->active,
1009 					 &rx_sc->sc_xarray_element->fs_id);
1010 out:
1011 	mutex_unlock(&macsec->lock);
1012 
1013 	return err;
1014 }
1015 
mlx5e_macsec_del_rxsa(struct macsec_context * ctx)1016 static int mlx5e_macsec_del_rxsa(struct macsec_context *ctx)
1017 {
1018 	struct mlx5e_priv *priv = macsec_netdev_priv(ctx->netdev);
1019 	struct mlx5e_macsec_device *macsec_device;
1020 	sci_t sci = ctx->sa.rx_sa->sc->sci;
1021 	struct mlx5e_macsec_rx_sc *rx_sc;
1022 	u8 assoc_num = ctx->sa.assoc_num;
1023 	struct mlx5e_macsec_sa *rx_sa;
1024 	struct mlx5e_macsec *macsec;
1025 	struct list_head *list;
1026 	int err = 0;
1027 
1028 	mutex_lock(&priv->macsec->lock);
1029 
1030 	macsec = priv->macsec;
1031 	macsec_device = mlx5e_macsec_get_macsec_device_context(macsec, ctx);
1032 	if (!macsec_device) {
1033 		netdev_err(ctx->netdev, "MACsec offload: Failed to find device context\n");
1034 		err = -EINVAL;
1035 		goto out;
1036 	}
1037 
1038 	list = &macsec_device->macsec_rx_sc_list_head;
1039 	rx_sc = mlx5e_macsec_get_rx_sc_from_sc_list(list, sci);
1040 	if (!rx_sc) {
1041 		netdev_err(ctx->netdev,
1042 			   "MACsec offload rx_sc sci %lld doesn't exist\n",
1043 			   ctx->sa.rx_sa->sc->sci);
1044 		err = -EINVAL;
1045 		goto out;
1046 	}
1047 
1048 	rx_sa = rx_sc->rx_sa[assoc_num];
1049 	if (!rx_sa) {
1050 		netdev_err(ctx->netdev,
1051 			   "MACsec offload rx_sc sci %lld rx_sa %d doesn't exist\n",
1052 			   sci, assoc_num);
1053 		err = -EINVAL;
1054 		goto out;
1055 	}
1056 
1057 	if (rx_sa->active)
1058 		mlx5e_macsec_cleanup_sa(macsec, rx_sa, false, ctx->secy->netdev,
1059 					rx_sc->sc_xarray_element->fs_id);
1060 	mlx5_destroy_encryption_key(macsec->mdev, rx_sa->enc_key_id);
1061 	kfree(rx_sa);
1062 	rx_sc->rx_sa[assoc_num] = NULL;
1063 
1064 out:
1065 	mutex_unlock(&macsec->lock);
1066 
1067 	return err;
1068 }
1069 
mlx5e_macsec_add_secy(struct macsec_context * ctx)1070 static int mlx5e_macsec_add_secy(struct macsec_context *ctx)
1071 {
1072 	struct mlx5e_priv *priv = macsec_netdev_priv(ctx->netdev);
1073 	const struct net_device *dev = ctx->secy->netdev;
1074 	const struct net_device *netdev = ctx->netdev;
1075 	struct mlx5e_macsec_device *macsec_device;
1076 	struct mlx5e_macsec *macsec;
1077 	int err = 0;
1078 
1079 	if (!mlx5e_macsec_secy_features_validate(ctx))
1080 		return -EINVAL;
1081 
1082 	mutex_lock(&priv->macsec->lock);
1083 	macsec = priv->macsec;
1084 	if (mlx5e_macsec_get_macsec_device_context(macsec, ctx)) {
1085 		netdev_err(netdev, "MACsec offload: MACsec net_device already exist\n");
1086 		goto out;
1087 	}
1088 
1089 	if (macsec->num_of_devices >= MLX5_MACSEC_NUM_OF_SUPPORTED_INTERFACES) {
1090 		netdev_err(netdev, "Currently, only %d MACsec offload devices can be set\n",
1091 			   MLX5_MACSEC_NUM_OF_SUPPORTED_INTERFACES);
1092 		err = -EBUSY;
1093 		goto out;
1094 	}
1095 
1096 	macsec_device = kzalloc_obj(*macsec_device);
1097 	if (!macsec_device) {
1098 		err = -ENOMEM;
1099 		goto out;
1100 	}
1101 
1102 	macsec_device->dev_addr = kmemdup(dev->dev_addr, dev->addr_len, GFP_KERNEL);
1103 	if (!macsec_device->dev_addr) {
1104 		kfree(macsec_device);
1105 		err = -ENOMEM;
1106 		goto out;
1107 	}
1108 
1109 	macsec_device->netdev = dev;
1110 
1111 	INIT_LIST_HEAD_RCU(&macsec_device->macsec_rx_sc_list_head);
1112 	list_add_rcu(&macsec_device->macsec_device_list_element, &macsec->macsec_device_list_head);
1113 
1114 	++macsec->num_of_devices;
1115 out:
1116 	mutex_unlock(&macsec->lock);
1117 
1118 	return err;
1119 }
1120 
macsec_upd_secy_hw_address(struct macsec_context * ctx,struct mlx5e_macsec_device * macsec_device)1121 static int macsec_upd_secy_hw_address(struct macsec_context *ctx,
1122 				      struct mlx5e_macsec_device *macsec_device)
1123 {
1124 	struct mlx5e_priv *priv = macsec_netdev_priv(ctx->netdev);
1125 	const struct net_device *dev = ctx->secy->netdev;
1126 	struct mlx5e_macsec *macsec = priv->macsec;
1127 	struct mlx5e_macsec_rx_sc *rx_sc, *tmp;
1128 	struct mlx5e_macsec_sa *rx_sa;
1129 	struct list_head *list;
1130 	int i, err = 0;
1131 
1132 
1133 	list = &macsec_device->macsec_rx_sc_list_head;
1134 	list_for_each_entry_safe(rx_sc, tmp, list, rx_sc_list_element) {
1135 		for (i = 0; i < MACSEC_NUM_AN; ++i) {
1136 			rx_sa = rx_sc->rx_sa[i];
1137 			if (!rx_sa || !rx_sa->macsec_rule)
1138 				continue;
1139 
1140 			mlx5e_macsec_cleanup_sa_fs(macsec, rx_sa, false, ctx->secy->netdev,
1141 						   rx_sc->sc_xarray_element->fs_id);
1142 		}
1143 	}
1144 
1145 	list_for_each_entry_safe(rx_sc, tmp, list, rx_sc_list_element) {
1146 		for (i = 0; i < MACSEC_NUM_AN; ++i) {
1147 			rx_sa = rx_sc->rx_sa[i];
1148 			if (!rx_sa)
1149 				continue;
1150 
1151 			if (rx_sa->active) {
1152 				err = mlx5e_macsec_init_sa_fs(ctx, rx_sa, true, false,
1153 							      &rx_sc->sc_xarray_element->fs_id);
1154 				if (err)
1155 					goto out;
1156 			}
1157 		}
1158 	}
1159 
1160 	memcpy(macsec_device->dev_addr, dev->dev_addr, dev->addr_len);
1161 out:
1162 	return err;
1163 }
1164 
1165 /* this function is called from 2 macsec ops functions:
1166  *  macsec_set_mac_address – MAC address was changed, therefore we need to destroy
1167  *  and create new Tx contexts(macsec object + steering).
1168  *  macsec_changelink – in this case the tx SC or SecY may be changed, therefore need to
1169  *  destroy Tx and Rx contexts(macsec object + steering)
1170  */
mlx5e_macsec_upd_secy(struct macsec_context * ctx)1171 static int mlx5e_macsec_upd_secy(struct macsec_context *ctx)
1172 {
1173 	struct mlx5e_priv *priv = macsec_netdev_priv(ctx->netdev);
1174 	const struct macsec_tx_sc *tx_sc = &ctx->secy->tx_sc;
1175 	const struct net_device *dev = ctx->secy->netdev;
1176 	struct mlx5e_macsec_device *macsec_device;
1177 	struct mlx5e_macsec_sa *tx_sa;
1178 	struct mlx5e_macsec *macsec;
1179 	int i, err = 0;
1180 
1181 	if (!mlx5e_macsec_secy_features_validate(ctx))
1182 		return -EINVAL;
1183 
1184 	mutex_lock(&priv->macsec->lock);
1185 
1186 	macsec = priv->macsec;
1187 	macsec_device = mlx5e_macsec_get_macsec_device_context(macsec, ctx);
1188 	if (!macsec_device) {
1189 		netdev_err(ctx->netdev, "MACsec offload: Failed to find device context\n");
1190 		err = -EINVAL;
1191 		goto out;
1192 	}
1193 
1194 	/* if the dev_addr hasn't change, it mean the callback is from macsec_changelink */
1195 	if (!memcmp(macsec_device->dev_addr, dev->dev_addr, dev->addr_len)) {
1196 		err = macsec_upd_secy_hw_address(ctx, macsec_device);
1197 		if (err)
1198 			goto out;
1199 	}
1200 
1201 	for (i = 0; i < MACSEC_NUM_AN; ++i) {
1202 		tx_sa = macsec_device->tx_sa[i];
1203 		if (!tx_sa)
1204 			continue;
1205 
1206 		mlx5e_macsec_cleanup_sa_fs(macsec, tx_sa, true, ctx->secy->netdev, 0);
1207 	}
1208 
1209 	for (i = 0; i < MACSEC_NUM_AN; ++i) {
1210 		tx_sa = macsec_device->tx_sa[i];
1211 		if (!tx_sa)
1212 			continue;
1213 
1214 		if (tx_sa->assoc_num == tx_sc->encoding_sa && tx_sa->active) {
1215 			err = mlx5e_macsec_init_sa_fs(ctx, tx_sa, tx_sc->encrypt, true, NULL);
1216 			if (err)
1217 				goto out;
1218 		}
1219 	}
1220 
1221 out:
1222 	mutex_unlock(&macsec->lock);
1223 
1224 	return err;
1225 }
1226 
mlx5e_macsec_del_secy(struct macsec_context * ctx)1227 static int mlx5e_macsec_del_secy(struct macsec_context *ctx)
1228 {
1229 	struct mlx5e_priv *priv = macsec_netdev_priv(ctx->netdev);
1230 	struct mlx5e_macsec_device *macsec_device;
1231 	struct mlx5e_macsec_rx_sc *rx_sc, *tmp;
1232 	struct mlx5e_macsec_sa *tx_sa;
1233 	struct mlx5e_macsec *macsec;
1234 	struct list_head *list;
1235 	int err = 0;
1236 	int i;
1237 
1238 	mutex_lock(&priv->macsec->lock);
1239 	macsec = priv->macsec;
1240 	macsec_device = mlx5e_macsec_get_macsec_device_context(macsec, ctx);
1241 	if (!macsec_device) {
1242 		netdev_err(ctx->netdev, "MACsec offload: Failed to find device context\n");
1243 		err = -EINVAL;
1244 
1245 		goto out;
1246 	}
1247 
1248 	for (i = 0; i < MACSEC_NUM_AN; ++i) {
1249 		tx_sa = macsec_device->tx_sa[i];
1250 		if (!tx_sa)
1251 			continue;
1252 
1253 		mlx5e_macsec_cleanup_sa(macsec, tx_sa, true, ctx->secy->netdev, 0);
1254 		mlx5_destroy_encryption_key(macsec->mdev, tx_sa->enc_key_id);
1255 		kfree(tx_sa);
1256 		macsec_device->tx_sa[i] = NULL;
1257 	}
1258 
1259 	list = &macsec_device->macsec_rx_sc_list_head;
1260 	list_for_each_entry_safe(rx_sc, tmp, list, rx_sc_list_element)
1261 		macsec_del_rxsc_ctx(macsec, rx_sc, ctx->secy->netdev);
1262 
1263 	kfree(macsec_device->dev_addr);
1264 	macsec_device->dev_addr = NULL;
1265 
1266 	list_del_rcu(&macsec_device->macsec_device_list_element);
1267 	--macsec->num_of_devices;
1268 	kfree(macsec_device);
1269 
1270 out:
1271 	mutex_unlock(&macsec->lock);
1272 
1273 	return err;
1274 }
1275 
macsec_build_accel_attrs(struct mlx5e_macsec_sa * sa,struct mlx5_macsec_obj_attrs * attrs)1276 static void macsec_build_accel_attrs(struct mlx5e_macsec_sa *sa,
1277 				     struct mlx5_macsec_obj_attrs *attrs)
1278 {
1279 	attrs->epn_state.epn_msb = sa->epn_state.epn_msb;
1280 	attrs->epn_state.overlap = sa->epn_state.overlap;
1281 }
1282 
macsec_aso_build_wqe_ctrl_seg(struct mlx5e_macsec_aso * macsec_aso,struct mlx5_wqe_aso_ctrl_seg * aso_ctrl,struct mlx5_aso_ctrl_param * param)1283 static void macsec_aso_build_wqe_ctrl_seg(struct mlx5e_macsec_aso *macsec_aso,
1284 					  struct mlx5_wqe_aso_ctrl_seg *aso_ctrl,
1285 					  struct mlx5_aso_ctrl_param *param)
1286 {
1287 	struct mlx5e_macsec_umr *umr = macsec_aso->umr;
1288 
1289 	memset(aso_ctrl, 0, sizeof(*aso_ctrl));
1290 	aso_ctrl->va_l = cpu_to_be32(umr->dma_addr | ASO_CTRL_READ_EN);
1291 	aso_ctrl->va_h = cpu_to_be32((u64)umr->dma_addr >> 32);
1292 	aso_ctrl->l_key = cpu_to_be32(umr->mkey);
1293 
1294 	if (!param)
1295 		return;
1296 
1297 	aso_ctrl->data_mask_mode = param->data_mask_mode << 6;
1298 	aso_ctrl->condition_1_0_operand = param->condition_1_operand |
1299 						param->condition_0_operand << 4;
1300 	aso_ctrl->condition_1_0_offset = param->condition_1_offset |
1301 						param->condition_0_offset << 4;
1302 	aso_ctrl->data_offset_condition_operand = param->data_offset |
1303 						param->condition_operand << 6;
1304 	aso_ctrl->condition_0_data = cpu_to_be32(param->condition_0_data);
1305 	aso_ctrl->condition_0_mask = cpu_to_be32(param->condition_0_mask);
1306 	aso_ctrl->condition_1_data = cpu_to_be32(param->condition_1_data);
1307 	aso_ctrl->condition_1_mask = cpu_to_be32(param->condition_1_mask);
1308 	aso_ctrl->bitwise_data = cpu_to_be64(param->bitwise_data);
1309 	aso_ctrl->data_mask = cpu_to_be64(param->data_mask);
1310 }
1311 
mlx5e_macsec_modify_obj(struct mlx5_core_dev * mdev,struct mlx5_macsec_obj_attrs * attrs,u32 macsec_id)1312 static int mlx5e_macsec_modify_obj(struct mlx5_core_dev *mdev, struct mlx5_macsec_obj_attrs *attrs,
1313 				   u32 macsec_id)
1314 {
1315 	u32 in[MLX5_ST_SZ_DW(modify_macsec_obj_in)] = {};
1316 	u32 out[MLX5_ST_SZ_DW(query_macsec_obj_out)];
1317 	u64 modify_field_select = 0;
1318 	void *obj;
1319 	int err;
1320 
1321 	/* General object fields set */
1322 	MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_QUERY_GENERAL_OBJECT);
1323 	MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, MLX5_GENERAL_OBJECT_TYPES_MACSEC);
1324 	MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, macsec_id);
1325 	err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
1326 	if (err) {
1327 		mlx5_core_err(mdev, "Query MACsec object failed (Object id %d), err = %d\n",
1328 			      macsec_id, err);
1329 		return err;
1330 	}
1331 
1332 	obj = MLX5_ADDR_OF(query_macsec_obj_out, out, macsec_object);
1333 	modify_field_select = MLX5_GET64(macsec_offload_obj, obj, modify_field_select);
1334 
1335 	/* EPN */
1336 	if (!(modify_field_select & MLX5_MODIFY_MACSEC_BITMASK_EPN_OVERLAP) ||
1337 	    !(modify_field_select & MLX5_MODIFY_MACSEC_BITMASK_EPN_MSB)) {
1338 		mlx5_core_dbg(mdev, "MACsec object field is not modifiable (Object id %d)\n",
1339 			      macsec_id);
1340 		return -EOPNOTSUPP;
1341 	}
1342 
1343 	obj = MLX5_ADDR_OF(modify_macsec_obj_in, in, macsec_object);
1344 	MLX5_SET64(macsec_offload_obj, obj, modify_field_select,
1345 		   MLX5_MODIFY_MACSEC_BITMASK_EPN_OVERLAP | MLX5_MODIFY_MACSEC_BITMASK_EPN_MSB);
1346 	MLX5_SET(macsec_offload_obj, obj, epn_msb, attrs->epn_state.epn_msb);
1347 	MLX5_SET(macsec_offload_obj, obj, epn_overlap, attrs->epn_state.overlap);
1348 
1349 	/* General object fields set */
1350 	MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_MODIFY_GENERAL_OBJECT);
1351 
1352 	return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
1353 }
1354 
macsec_aso_build_ctrl(struct mlx5e_macsec_aso * aso,struct mlx5_wqe_aso_ctrl_seg * aso_ctrl,struct mlx5e_macsec_aso_in * in)1355 static void macsec_aso_build_ctrl(struct mlx5e_macsec_aso *aso,
1356 				  struct mlx5_wqe_aso_ctrl_seg *aso_ctrl,
1357 				  struct mlx5e_macsec_aso_in *in)
1358 {
1359 	struct mlx5_aso_ctrl_param param = {};
1360 
1361 	param.data_mask_mode = MLX5_ASO_DATA_MASK_MODE_BITWISE_64BIT;
1362 	param.condition_0_operand = MLX5_ASO_ALWAYS_TRUE;
1363 	param.condition_1_operand = MLX5_ASO_ALWAYS_TRUE;
1364 	if (in->mode == MLX5_MACSEC_EPN) {
1365 		param.data_offset = MLX5_MACSEC_ASO_REMOVE_FLOW_PKT_CNT_OFFSET;
1366 		param.bitwise_data = BIT_ULL(54);
1367 		param.data_mask = param.bitwise_data;
1368 	}
1369 	macsec_aso_build_wqe_ctrl_seg(aso, aso_ctrl, &param);
1370 }
1371 
macsec_aso_set_arm_event(struct mlx5_core_dev * mdev,struct mlx5e_macsec * macsec,struct mlx5e_macsec_aso_in * in)1372 static int macsec_aso_set_arm_event(struct mlx5_core_dev *mdev, struct mlx5e_macsec *macsec,
1373 				    struct mlx5e_macsec_aso_in *in)
1374 {
1375 	struct mlx5e_macsec_aso *aso;
1376 	struct mlx5_aso_wqe *aso_wqe;
1377 	struct mlx5_aso *maso;
1378 	int err;
1379 
1380 	aso = &macsec->aso;
1381 	maso = aso->maso;
1382 
1383 	mutex_lock(&aso->aso_lock);
1384 	aso_wqe = mlx5_aso_get_wqe(maso);
1385 	mlx5_aso_build_wqe(maso, MLX5_MACSEC_ASO_DS_CNT, aso_wqe, in->obj_id,
1386 			   MLX5_ACCESS_ASO_OPC_MOD_MACSEC);
1387 	macsec_aso_build_ctrl(aso, &aso_wqe->aso_ctrl, in);
1388 	mlx5_aso_post_wqe(maso, false, &aso_wqe->ctrl);
1389 	read_poll_timeout(mlx5_aso_poll_cq, err, !err, 10, 10 * USEC_PER_MSEC,
1390 			  false, maso, false);
1391 	mutex_unlock(&aso->aso_lock);
1392 
1393 	return err;
1394 }
1395 
macsec_aso_query(struct mlx5_core_dev * mdev,struct mlx5e_macsec * macsec,struct mlx5e_macsec_aso_in * in,struct mlx5e_macsec_aso_out * out)1396 static int macsec_aso_query(struct mlx5_core_dev *mdev, struct mlx5e_macsec *macsec,
1397 			    struct mlx5e_macsec_aso_in *in, struct mlx5e_macsec_aso_out *out)
1398 {
1399 	struct mlx5e_macsec_aso *aso;
1400 	struct mlx5_aso_wqe *aso_wqe;
1401 	struct mlx5_aso *maso;
1402 	int err;
1403 
1404 	aso = &macsec->aso;
1405 	maso = aso->maso;
1406 
1407 	mutex_lock(&aso->aso_lock);
1408 
1409 	aso_wqe = mlx5_aso_get_wqe(maso);
1410 	mlx5_aso_build_wqe(maso, MLX5_MACSEC_ASO_DS_CNT, aso_wqe, in->obj_id,
1411 			   MLX5_ACCESS_ASO_OPC_MOD_MACSEC);
1412 	macsec_aso_build_wqe_ctrl_seg(aso, &aso_wqe->aso_ctrl, NULL);
1413 
1414 	mlx5_aso_post_wqe(maso, false, &aso_wqe->ctrl);
1415 	read_poll_timeout(mlx5_aso_poll_cq, err, !err, 10, 10 * USEC_PER_MSEC,
1416 			  false, maso, false);
1417 
1418 	if (err)
1419 		goto err_out;
1420 
1421 	if (MLX5_GET(macsec_aso, aso->umr->ctx, epn_event_arm))
1422 		out->event_arm |= MLX5E_ASO_EPN_ARM;
1423 
1424 	out->mode_param = MLX5_GET(macsec_aso, aso->umr->ctx, mode_parameter);
1425 
1426 err_out:
1427 	mutex_unlock(&aso->aso_lock);
1428 	return err;
1429 }
1430 
get_macsec_tx_sa_from_obj_id(const struct mlx5e_macsec * macsec,const u32 obj_id)1431 static struct mlx5e_macsec_sa *get_macsec_tx_sa_from_obj_id(const struct mlx5e_macsec *macsec,
1432 							    const u32 obj_id)
1433 {
1434 	const struct list_head *device_list;
1435 	struct mlx5e_macsec_sa *macsec_sa;
1436 	struct mlx5e_macsec_device *iter;
1437 	int i;
1438 
1439 	device_list = &macsec->macsec_device_list_head;
1440 
1441 	list_for_each_entry(iter, device_list, macsec_device_list_element) {
1442 		for (i = 0; i < MACSEC_NUM_AN; ++i) {
1443 			macsec_sa = iter->tx_sa[i];
1444 			if (!macsec_sa || !macsec_sa->active)
1445 				continue;
1446 			if (macsec_sa->macsec_obj_id == obj_id)
1447 				return macsec_sa;
1448 		}
1449 	}
1450 
1451 	return NULL;
1452 }
1453 
get_macsec_rx_sa_from_obj_id(const struct mlx5e_macsec * macsec,const u32 obj_id)1454 static struct mlx5e_macsec_sa *get_macsec_rx_sa_from_obj_id(const struct mlx5e_macsec *macsec,
1455 							    const u32 obj_id)
1456 {
1457 	const struct list_head *device_list, *sc_list;
1458 	struct mlx5e_macsec_rx_sc *mlx5e_rx_sc;
1459 	struct mlx5e_macsec_sa *macsec_sa;
1460 	struct mlx5e_macsec_device *iter;
1461 	int i;
1462 
1463 	device_list = &macsec->macsec_device_list_head;
1464 
1465 	list_for_each_entry(iter, device_list, macsec_device_list_element) {
1466 		sc_list = &iter->macsec_rx_sc_list_head;
1467 		list_for_each_entry(mlx5e_rx_sc, sc_list, rx_sc_list_element) {
1468 			for (i = 0; i < MACSEC_NUM_AN; ++i) {
1469 				macsec_sa = mlx5e_rx_sc->rx_sa[i];
1470 				if (!macsec_sa || !macsec_sa->active)
1471 					continue;
1472 				if (macsec_sa->macsec_obj_id == obj_id)
1473 					return macsec_sa;
1474 			}
1475 		}
1476 	}
1477 
1478 	return NULL;
1479 }
1480 
macsec_epn_update(struct mlx5e_macsec * macsec,struct mlx5_core_dev * mdev,struct mlx5e_macsec_sa * sa,u32 obj_id,u32 mode_param)1481 static void macsec_epn_update(struct mlx5e_macsec *macsec, struct mlx5_core_dev *mdev,
1482 			      struct mlx5e_macsec_sa *sa, u32 obj_id, u32 mode_param)
1483 {
1484 	struct mlx5_macsec_obj_attrs attrs = {};
1485 	struct mlx5e_macsec_aso_in in = {};
1486 
1487 	/* When the bottom of the replay protection window (mode_param) crosses 2^31 (half sequence
1488 	 * number wraparound) hence mode_param > MLX5_MACSEC_EPN_SCOPE_MID the SW should update the
1489 	 * esn_overlap to OLD (1).
1490 	 * When the bottom of the replay protection window (mode_param) crosses 2^32 (full sequence
1491 	 * number wraparound) hence mode_param < MLX5_MACSEC_EPN_SCOPE_MID since it did a
1492 	 * wraparound, the SW should update the esn_overlap to NEW (0), and increment the esn_msb.
1493 	 */
1494 
1495 	if (mode_param < MLX5_MACSEC_EPN_SCOPE_MID) {
1496 		sa->epn_state.epn_msb++;
1497 		sa->epn_state.overlap = 0;
1498 	} else {
1499 		sa->epn_state.overlap = 1;
1500 	}
1501 
1502 	macsec_build_accel_attrs(sa, &attrs);
1503 	mlx5e_macsec_modify_obj(mdev, &attrs, obj_id);
1504 
1505 	/* Re-set EPN arm event */
1506 	in.obj_id = obj_id;
1507 	in.mode = MLX5_MACSEC_EPN;
1508 	macsec_aso_set_arm_event(mdev, macsec, &in);
1509 }
1510 
macsec_async_event(struct work_struct * work)1511 static void macsec_async_event(struct work_struct *work)
1512 {
1513 	struct mlx5e_macsec_async_work *async_work;
1514 	struct mlx5e_macsec_aso_out out = {};
1515 	struct mlx5e_macsec_aso_in in = {};
1516 	struct mlx5e_macsec_sa *macsec_sa;
1517 	struct mlx5e_macsec *macsec;
1518 	struct mlx5_core_dev *mdev;
1519 	u32 obj_id;
1520 
1521 	async_work = container_of(work, struct mlx5e_macsec_async_work, work);
1522 	macsec = async_work->macsec;
1523 	mutex_lock(&macsec->lock);
1524 
1525 	mdev = async_work->mdev;
1526 	obj_id = async_work->obj_id;
1527 	macsec_sa = get_macsec_tx_sa_from_obj_id(macsec, obj_id);
1528 	if (!macsec_sa) {
1529 		macsec_sa = get_macsec_rx_sa_from_obj_id(macsec, obj_id);
1530 		if (!macsec_sa) {
1531 			mlx5_core_dbg(mdev, "MACsec SA is not found (SA object id %d)\n", obj_id);
1532 			goto out_async_work;
1533 		}
1534 	}
1535 
1536 	/* Query MACsec ASO context */
1537 	in.obj_id = obj_id;
1538 	macsec_aso_query(mdev, macsec, &in, &out);
1539 
1540 	/* EPN case */
1541 	if (macsec_sa->epn_state.epn_enabled && !(out.event_arm & MLX5E_ASO_EPN_ARM))
1542 		macsec_epn_update(macsec, mdev, macsec_sa, obj_id, out.mode_param);
1543 
1544 out_async_work:
1545 	kfree(async_work);
1546 	mutex_unlock(&macsec->lock);
1547 }
1548 
macsec_obj_change_event(struct notifier_block * nb,unsigned long event,void * data)1549 static int macsec_obj_change_event(struct notifier_block *nb, unsigned long event, void *data)
1550 {
1551 	struct mlx5e_macsec *macsec = container_of(nb, struct mlx5e_macsec, nb);
1552 	struct mlx5e_macsec_async_work *async_work;
1553 	struct mlx5_eqe_obj_change *obj_change;
1554 	struct mlx5_eqe *eqe = data;
1555 	u16 obj_type;
1556 	u32 obj_id;
1557 
1558 	if (event != MLX5_EVENT_TYPE_OBJECT_CHANGE)
1559 		return NOTIFY_DONE;
1560 
1561 	obj_change = &eqe->data.obj_change;
1562 	obj_type = be16_to_cpu(obj_change->obj_type);
1563 	obj_id = be32_to_cpu(obj_change->obj_id);
1564 
1565 	if (obj_type != MLX5_GENERAL_OBJECT_TYPES_MACSEC)
1566 		return NOTIFY_DONE;
1567 
1568 	async_work = kzalloc_obj(*async_work, GFP_ATOMIC);
1569 	if (!async_work)
1570 		return NOTIFY_DONE;
1571 
1572 	async_work->macsec = macsec;
1573 	async_work->mdev = macsec->mdev;
1574 	async_work->obj_id = obj_id;
1575 
1576 	INIT_WORK(&async_work->work, macsec_async_event);
1577 
1578 	WARN_ON(!queue_work(macsec->wq, &async_work->work));
1579 
1580 	return NOTIFY_OK;
1581 }
1582 
mlx5e_macsec_aso_init(struct mlx5e_macsec_aso * aso,struct mlx5_core_dev * mdev)1583 static int mlx5e_macsec_aso_init(struct mlx5e_macsec_aso *aso, struct mlx5_core_dev *mdev)
1584 {
1585 	struct mlx5_aso *maso;
1586 	int err;
1587 
1588 	err = mlx5_core_alloc_pd(mdev, &aso->pdn);
1589 	if (err) {
1590 		mlx5_core_err(mdev,
1591 			      "MACsec offload: Failed to alloc pd for MACsec ASO, err=%d\n",
1592 			      err);
1593 		return err;
1594 	}
1595 
1596 	maso = mlx5_aso_create(mdev, aso->pdn);
1597 	if (IS_ERR(maso)) {
1598 		err = PTR_ERR(maso);
1599 		goto err_aso;
1600 	}
1601 
1602 	err = mlx5e_macsec_aso_reg_mr(mdev, aso);
1603 	if (err)
1604 		goto err_aso_reg;
1605 
1606 	mutex_init(&aso->aso_lock);
1607 
1608 	aso->maso = maso;
1609 
1610 	return 0;
1611 
1612 err_aso_reg:
1613 	mlx5_aso_destroy(maso);
1614 err_aso:
1615 	mlx5_core_dealloc_pd(mdev, aso->pdn);
1616 	return err;
1617 }
1618 
mlx5e_macsec_aso_cleanup(struct mlx5e_macsec_aso * aso,struct mlx5_core_dev * mdev)1619 static void mlx5e_macsec_aso_cleanup(struct mlx5e_macsec_aso *aso, struct mlx5_core_dev *mdev)
1620 {
1621 	if (!aso)
1622 		return;
1623 
1624 	mlx5e_macsec_aso_dereg_mr(mdev, aso);
1625 
1626 	mlx5_aso_destroy(aso->maso);
1627 
1628 	mlx5_core_dealloc_pd(mdev, aso->pdn);
1629 }
1630 
1631 static const struct macsec_ops macsec_offload_ops = {
1632 	.mdo_add_txsa = mlx5e_macsec_add_txsa,
1633 	.mdo_upd_txsa = mlx5e_macsec_upd_txsa,
1634 	.mdo_del_txsa = mlx5e_macsec_del_txsa,
1635 	.mdo_add_rxsc = mlx5e_macsec_add_rxsc,
1636 	.mdo_upd_rxsc = mlx5e_macsec_upd_rxsc,
1637 	.mdo_del_rxsc = mlx5e_macsec_del_rxsc,
1638 	.mdo_add_rxsa = mlx5e_macsec_add_rxsa,
1639 	.mdo_upd_rxsa = mlx5e_macsec_upd_rxsa,
1640 	.mdo_del_rxsa = mlx5e_macsec_del_rxsa,
1641 	.mdo_add_secy = mlx5e_macsec_add_secy,
1642 	.mdo_upd_secy = mlx5e_macsec_upd_secy,
1643 	.mdo_del_secy = mlx5e_macsec_del_secy,
1644 	.rx_uses_md_dst = true,
1645 };
1646 
mlx5e_macsec_handle_tx_skb(struct mlx5e_macsec * macsec,struct sk_buff * skb)1647 bool mlx5e_macsec_handle_tx_skb(struct mlx5e_macsec *macsec, struct sk_buff *skb)
1648 {
1649 	struct metadata_dst *md_dst = skb_metadata_dst(skb);
1650 	u32 fs_id;
1651 
1652 	fs_id = mlx5_macsec_fs_get_fs_id_from_hashtable(macsec->mdev->macsec_fs,
1653 							&md_dst->u.macsec_info.sci);
1654 	if (!fs_id)
1655 		goto err_out;
1656 
1657 	return true;
1658 
1659 err_out:
1660 	dev_kfree_skb_any(skb);
1661 	return false;
1662 }
1663 
mlx5e_macsec_tx_build_eseg(struct mlx5e_macsec * macsec,struct sk_buff * skb,struct mlx5_wqe_eth_seg * eseg)1664 void mlx5e_macsec_tx_build_eseg(struct mlx5e_macsec *macsec,
1665 				struct sk_buff *skb,
1666 				struct mlx5_wqe_eth_seg *eseg)
1667 {
1668 	struct metadata_dst *md_dst = skb_metadata_dst(skb);
1669 	u32 fs_id;
1670 
1671 	fs_id = mlx5_macsec_fs_get_fs_id_from_hashtable(macsec->mdev->macsec_fs,
1672 							&md_dst->u.macsec_info.sci);
1673 	if (!fs_id)
1674 		return;
1675 
1676 	eseg->flow_table_metadata = cpu_to_be32(MLX5_MACSEC_TX_METADATA(fs_id));
1677 }
1678 
mlx5e_macsec_offload_handle_rx_skb(struct net_device * netdev,struct sk_buff * skb,struct mlx5_cqe64 * cqe)1679 void mlx5e_macsec_offload_handle_rx_skb(struct net_device *netdev,
1680 					struct sk_buff *skb,
1681 					struct mlx5_cqe64 *cqe)
1682 {
1683 	struct mlx5e_macsec_rx_sc_xarray_element *sc_xarray_element;
1684 	u32 macsec_meta_data = be32_to_cpu(cqe->ft_metadata);
1685 	struct mlx5e_priv *priv = macsec_netdev_priv(netdev);
1686 	struct mlx5e_macsec_rx_sc *rx_sc;
1687 	struct mlx5e_macsec *macsec;
1688 	u32  fs_id;
1689 
1690 	macsec = priv->macsec;
1691 	if (!macsec)
1692 		return;
1693 
1694 	fs_id = MLX5_MACSEC_RX_METADAT_HANDLE(macsec_meta_data);
1695 
1696 	rcu_read_lock();
1697 	sc_xarray_element = xa_load(&macsec->sc_xarray, fs_id);
1698 	rx_sc = sc_xarray_element->rx_sc;
1699 	if (rx_sc) {
1700 		dst_hold(&rx_sc->md_dst->dst);
1701 		skb_dst_set(skb, &rx_sc->md_dst->dst);
1702 	}
1703 
1704 	rcu_read_unlock();
1705 }
1706 
mlx5e_macsec_build_netdev(struct mlx5e_priv * priv)1707 void mlx5e_macsec_build_netdev(struct mlx5e_priv *priv)
1708 {
1709 	struct net_device *netdev = priv->netdev;
1710 
1711 	if (!mlx5e_is_macsec_device(priv->mdev))
1712 		return;
1713 
1714 	/* Enable MACsec */
1715 	mlx5_core_dbg(priv->mdev, "mlx5e: MACsec acceleration enabled\n");
1716 	netdev->macsec_ops = &macsec_offload_ops;
1717 	netdev->features |= NETIF_F_HW_MACSEC;
1718 	netif_keep_dst(netdev);
1719 }
1720 
mlx5e_macsec_init(struct mlx5e_priv * priv)1721 int mlx5e_macsec_init(struct mlx5e_priv *priv)
1722 {
1723 	struct mlx5_core_dev *mdev = priv->mdev;
1724 	struct mlx5e_macsec *macsec = NULL;
1725 	struct mlx5_macsec_fs *macsec_fs;
1726 	int err;
1727 
1728 	if (!mlx5e_is_macsec_device(priv->mdev)) {
1729 		mlx5_core_dbg(mdev, "Not a MACsec offload device\n");
1730 		return 0;
1731 	}
1732 
1733 	macsec = kzalloc_obj(*macsec);
1734 	if (!macsec)
1735 		return -ENOMEM;
1736 
1737 	INIT_LIST_HEAD(&macsec->macsec_device_list_head);
1738 	mutex_init(&macsec->lock);
1739 
1740 	err = mlx5e_macsec_aso_init(&macsec->aso, priv->mdev);
1741 	if (err) {
1742 		mlx5_core_err(mdev, "MACsec offload: Failed to init aso, err=%d\n", err);
1743 		goto err_aso;
1744 	}
1745 
1746 	macsec->wq = alloc_ordered_workqueue("mlx5e_macsec_%s", 0, priv->netdev->name);
1747 	if (!macsec->wq) {
1748 		err = -ENOMEM;
1749 		goto err_wq;
1750 	}
1751 
1752 	xa_init_flags(&macsec->sc_xarray, XA_FLAGS_ALLOC1);
1753 
1754 	priv->macsec = macsec;
1755 
1756 	macsec->mdev = mdev;
1757 
1758 	macsec_fs = mlx5_macsec_fs_init(mdev);
1759 	if (!macsec_fs) {
1760 		err = -ENOMEM;
1761 		goto err_out;
1762 	}
1763 
1764 	mdev->macsec_fs = macsec_fs;
1765 
1766 	macsec->nb.notifier_call = macsec_obj_change_event;
1767 	mlx5_notifier_register(mdev, &macsec->nb);
1768 
1769 	mlx5_core_dbg(mdev, "MACsec attached to netdevice\n");
1770 
1771 	return 0;
1772 
1773 err_out:
1774 	destroy_workqueue(macsec->wq);
1775 err_wq:
1776 	mlx5e_macsec_aso_cleanup(&macsec->aso, priv->mdev);
1777 err_aso:
1778 	kfree(macsec);
1779 	priv->macsec = NULL;
1780 	return err;
1781 }
1782 
mlx5e_macsec_cleanup(struct mlx5e_priv * priv)1783 void mlx5e_macsec_cleanup(struct mlx5e_priv *priv)
1784 {
1785 	struct mlx5e_macsec *macsec = priv->macsec;
1786 	struct mlx5_core_dev *mdev = priv->mdev;
1787 
1788 	if (!macsec)
1789 		return;
1790 
1791 	mlx5_notifier_unregister(mdev, &macsec->nb);
1792 	mlx5_macsec_fs_cleanup(mdev->macsec_fs);
1793 	destroy_workqueue(macsec->wq);
1794 	mlx5e_macsec_aso_cleanup(&macsec->aso, mdev);
1795 	mutex_destroy(&macsec->lock);
1796 	kfree(macsec);
1797 }
1798