xref: /linux/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c (revision a5210135489ae7bc1ef1cb4a8157361dd7b468cd)
1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2017, Mellanox Technologies inc. All rights reserved. */
3 
4 #include <linux/iopoll.h>
5 
6 #include "mlx5_core.h"
7 #include "en.h"
8 #include "ipsec.h"
9 #include "lib/crypto.h"
10 #include "lib/ipsec_fs_roce.h"
11 #include "fs_core.h"
12 #include "eswitch.h"
13 
14 enum {
15 	MLX5_IPSEC_ASO_REMOVE_FLOW_PKT_CNT_OFFSET,
16 	MLX5_IPSEC_ASO_REMOVE_FLOW_SOFT_LFT_OFFSET,
17 };
18 
mlx5_ipsec_device_caps(struct mlx5_core_dev * mdev)19 u32 mlx5_ipsec_device_caps(struct mlx5_core_dev *mdev)
20 {
21 	u32 caps = 0;
22 
23 	if (!MLX5_CAP_GEN(mdev, ipsec_offload))
24 		return 0;
25 
26 	if (!MLX5_CAP_GEN(mdev, log_max_dek))
27 		return 0;
28 
29 	if (!(MLX5_CAP_GEN_64(mdev, general_obj_types) &
30 	    MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_IPSEC))
31 		return 0;
32 
33 	if (!MLX5_CAP_FLOWTABLE_NIC_TX(mdev, ipsec_encrypt) ||
34 	    !MLX5_CAP_FLOWTABLE_NIC_RX(mdev, ipsec_decrypt))
35 		return 0;
36 
37 	if (!MLX5_CAP_IPSEC(mdev, ipsec_crypto_esp_aes_gcm_128_encrypt) ||
38 	    !MLX5_CAP_IPSEC(mdev, ipsec_crypto_esp_aes_gcm_128_decrypt))
39 		return 0;
40 
41 	if (MLX5_CAP_IPSEC(mdev, ipsec_crypto_offload) &&
42 	    MLX5_CAP_ETH(mdev, insert_trailer) && MLX5_CAP_ETH(mdev, swp))
43 		caps |= MLX5_IPSEC_CAP_CRYPTO;
44 
45 	if (MLX5_CAP_IPSEC(mdev, ipsec_full_offload) &&
46 	    (mdev->priv.steering->mode == MLX5_FLOW_STEERING_MODE_DMFS ||
47 	     is_mdev_legacy_mode(mdev))) {
48 		if (MLX5_CAP_FLOWTABLE_NIC_TX(mdev,
49 					      reformat_add_esp_trasport) &&
50 		    MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
51 					      reformat_del_esp_trasport) &&
52 		    MLX5_CAP_FLOWTABLE_NIC_RX(mdev, decap))
53 			caps |= MLX5_IPSEC_CAP_PACKET_OFFLOAD;
54 
55 		if (IS_ENABLED(CONFIG_MLX5_CLS_ACT) &&
56 		    ((MLX5_CAP_FLOWTABLE_NIC_TX(mdev, ignore_flow_level) &&
57 		      MLX5_CAP_FLOWTABLE_NIC_RX(mdev, ignore_flow_level)) ||
58 		     MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, ignore_flow_level)))
59 			caps |= MLX5_IPSEC_CAP_PRIO;
60 
61 		if (MLX5_CAP_FLOWTABLE_NIC_TX(mdev,
62 					      reformat_l2_to_l3_esp_tunnel) &&
63 		    MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
64 					      reformat_l3_esp_tunnel_to_l2))
65 			caps |= MLX5_IPSEC_CAP_TUNNEL;
66 
67 		if (MLX5_CAP_FLOWTABLE_NIC_TX(mdev,
68 					      reformat_add_esp_transport_over_udp) &&
69 		    MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
70 					      reformat_del_esp_transport_over_udp))
71 			caps |= MLX5_IPSEC_CAP_ESPINUDP;
72 	}
73 
74 	if (mlx5_get_roce_state(mdev) && mlx5_ipsec_fs_is_mpv_roce_supported(mdev) &&
75 	    MLX5_CAP_GEN_2(mdev, flow_table_type_2_type) & MLX5_FT_NIC_RX_2_NIC_RX_RDMA &&
76 	    MLX5_CAP_GEN_2(mdev, flow_table_type_2_type) & MLX5_FT_NIC_TX_RDMA_2_NIC_TX)
77 		caps |= MLX5_IPSEC_CAP_ROCE;
78 
79 	if (!caps)
80 		return 0;
81 
82 	if (MLX5_CAP_IPSEC(mdev, ipsec_esn))
83 		caps |= MLX5_IPSEC_CAP_ESN;
84 
85 	/* We can accommodate up to 2^24 different IPsec objects
86 	 * because we use up to 24 bit in flow table metadata
87 	 * to hold the IPsec Object unique handle.
88 	 */
89 	WARN_ON_ONCE(MLX5_CAP_IPSEC(mdev, log_max_ipsec_offload) > 24);
90 	return caps;
91 }
92 EXPORT_SYMBOL_GPL(mlx5_ipsec_device_caps);
93 
mlx5e_ipsec_packet_setup(void * obj,u32 pdn,struct mlx5e_ipsec_sa_entry * sa_entry)94 static void mlx5e_ipsec_packet_setup(void *obj, u32 pdn,
95 				     struct mlx5e_ipsec_sa_entry *sa_entry)
96 {
97 	struct mlx5_accel_esp_xfrm_attrs *attrs = &sa_entry->attrs;
98 	void *aso_ctx;
99 
100 	aso_ctx = MLX5_ADDR_OF(ipsec_obj, obj, ipsec_aso);
101 	if (attrs->replay_esn.trigger) {
102 		MLX5_SET(ipsec_aso, aso_ctx, esn_event_arm, 1);
103 
104 		if (attrs->dir == XFRM_DEV_OFFLOAD_IN) {
105 			MLX5_SET(ipsec_aso, aso_ctx, window_sz,
106 				 attrs->replay_esn.replay_window);
107 			MLX5_SET(ipsec_aso, aso_ctx, mode,
108 				 MLX5_IPSEC_ASO_REPLAY_PROTECTION);
109 		}
110 		MLX5_SET(ipsec_aso, aso_ctx, mode_parameter,
111 			 attrs->replay_esn.esn);
112 	}
113 
114 	/* ASO context */
115 	MLX5_SET(ipsec_obj, obj, ipsec_aso_access_pd, pdn);
116 	MLX5_SET(ipsec_obj, obj, full_offload, 1);
117 	MLX5_SET(ipsec_aso, aso_ctx, valid, 1);
118 	/* MLX5_IPSEC_ASO_REG_C_4_5 is type C register that is used
119 	 * in flow steering to perform matching against. Please be
120 	 * aware that this register was chosen arbitrary and can't
121 	 * be used in other places as long as IPsec packet offload
122 	 * active.
123 	 */
124 	MLX5_SET(ipsec_obj, obj, aso_return_reg, MLX5_IPSEC_ASO_REG_C_4_5);
125 	if (attrs->dir == XFRM_DEV_OFFLOAD_OUT) {
126 		MLX5_SET(ipsec_aso, aso_ctx, mode, MLX5_IPSEC_ASO_INC_SN);
127 		if (!attrs->replay_esn.trigger)
128 			MLX5_SET(ipsec_aso, aso_ctx, mode_parameter,
129 				 sa_entry->esn_state.esn);
130 	}
131 
132 	if (attrs->lft.hard_packet_limit != XFRM_INF) {
133 		MLX5_SET(ipsec_aso, aso_ctx, remove_flow_pkt_cnt,
134 			 attrs->lft.hard_packet_limit);
135 		MLX5_SET(ipsec_aso, aso_ctx, hard_lft_arm, 1);
136 		MLX5_SET(ipsec_aso, aso_ctx, remove_flow_enable, 1);
137 	}
138 
139 	if (attrs->lft.soft_packet_limit != XFRM_INF) {
140 		MLX5_SET(ipsec_aso, aso_ctx, remove_flow_soft_lft,
141 			 attrs->lft.soft_packet_limit);
142 
143 		MLX5_SET(ipsec_aso, aso_ctx, soft_lft_arm, 1);
144 	}
145 }
146 
mlx5_create_ipsec_obj(struct mlx5e_ipsec_sa_entry * sa_entry)147 static int mlx5_create_ipsec_obj(struct mlx5e_ipsec_sa_entry *sa_entry)
148 {
149 	struct mlx5_accel_esp_xfrm_attrs *attrs = &sa_entry->attrs;
150 	struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry);
151 	struct aes_gcm_keymat *aes_gcm = &attrs->aes_gcm;
152 	u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
153 	u32 in[MLX5_ST_SZ_DW(create_ipsec_obj_in)] = {};
154 	void *obj, *salt_p, *salt_iv_p;
155 	struct mlx5e_hw_objs *res;
156 	int err;
157 
158 	obj = MLX5_ADDR_OF(create_ipsec_obj_in, in, ipsec_object);
159 
160 	/* salt and seq_iv */
161 	salt_p = MLX5_ADDR_OF(ipsec_obj, obj, salt);
162 	memcpy(salt_p, &aes_gcm->salt, sizeof(aes_gcm->salt));
163 
164 	MLX5_SET(ipsec_obj, obj, icv_length, MLX5_IPSEC_OBJECT_ICV_LEN_16B);
165 	salt_iv_p = MLX5_ADDR_OF(ipsec_obj, obj, implicit_iv);
166 	memcpy(salt_iv_p, &aes_gcm->seq_iv, sizeof(aes_gcm->seq_iv));
167 	/* esn */
168 	if (attrs->replay_esn.trigger) {
169 		MLX5_SET(ipsec_obj, obj, esn_en, 1);
170 		MLX5_SET(ipsec_obj, obj, esn_msb, attrs->replay_esn.esn_msb);
171 		MLX5_SET(ipsec_obj, obj, esn_overlap, attrs->replay_esn.overlap);
172 	}
173 
174 	MLX5_SET(ipsec_obj, obj, dekn, sa_entry->enc_key_id);
175 
176 	/* general object fields set */
177 	MLX5_SET(general_obj_in_cmd_hdr, in, opcode,
178 		 MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
179 	MLX5_SET(general_obj_in_cmd_hdr, in, obj_type,
180 		 MLX5_GENERAL_OBJECT_TYPES_IPSEC);
181 
182 	res = &mdev->mlx5e_res.hw_objs;
183 	if (attrs->type == XFRM_DEV_OFFLOAD_PACKET)
184 		mlx5e_ipsec_packet_setup(obj, res->pdn, sa_entry);
185 
186 	err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
187 	if (!err)
188 		sa_entry->ipsec_obj_id =
189 			MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
190 
191 	return err;
192 }
193 
mlx5_destroy_ipsec_obj(struct mlx5e_ipsec_sa_entry * sa_entry)194 static void mlx5_destroy_ipsec_obj(struct mlx5e_ipsec_sa_entry *sa_entry)
195 {
196 	struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry);
197 	u32 in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {};
198 	u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
199 
200 	MLX5_SET(general_obj_in_cmd_hdr, in, opcode,
201 		 MLX5_CMD_OP_DESTROY_GENERAL_OBJECT);
202 	MLX5_SET(general_obj_in_cmd_hdr, in, obj_type,
203 		 MLX5_GENERAL_OBJECT_TYPES_IPSEC);
204 	MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, sa_entry->ipsec_obj_id);
205 
206 	mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
207 }
208 
mlx5_ipsec_create_sa_ctx(struct mlx5e_ipsec_sa_entry * sa_entry)209 int mlx5_ipsec_create_sa_ctx(struct mlx5e_ipsec_sa_entry *sa_entry)
210 {
211 	struct aes_gcm_keymat *aes_gcm = &sa_entry->attrs.aes_gcm;
212 	struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry);
213 	int err;
214 
215 	/* key */
216 	err = mlx5_create_encryption_key(mdev, aes_gcm->aes_key,
217 					 aes_gcm->key_len / BITS_PER_BYTE,
218 					 MLX5_ACCEL_OBJ_IPSEC_KEY,
219 					 &sa_entry->enc_key_id);
220 	if (err) {
221 		mlx5_core_dbg(mdev, "Failed to create encryption key (err = %d)\n", err);
222 		return err;
223 	}
224 
225 	err = mlx5_create_ipsec_obj(sa_entry);
226 	if (err) {
227 		mlx5_core_dbg(mdev, "Failed to create IPsec object (err = %d)\n", err);
228 		goto err_enc_key;
229 	}
230 
231 	return 0;
232 
233 err_enc_key:
234 	mlx5_destroy_encryption_key(mdev, sa_entry->enc_key_id);
235 	return err;
236 }
237 
mlx5_ipsec_free_sa_ctx(struct mlx5e_ipsec_sa_entry * sa_entry)238 void mlx5_ipsec_free_sa_ctx(struct mlx5e_ipsec_sa_entry *sa_entry)
239 {
240 	struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry);
241 
242 	mlx5_destroy_ipsec_obj(sa_entry);
243 	mlx5_destroy_encryption_key(mdev, sa_entry->enc_key_id);
244 }
245 
mlx5_modify_ipsec_obj(struct mlx5e_ipsec_sa_entry * sa_entry,const struct mlx5_accel_esp_xfrm_attrs * attrs)246 static int mlx5_modify_ipsec_obj(struct mlx5e_ipsec_sa_entry *sa_entry,
247 				 const struct mlx5_accel_esp_xfrm_attrs *attrs)
248 {
249 	struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry);
250 	u32 in[MLX5_ST_SZ_DW(modify_ipsec_obj_in)] = {};
251 	u32 out[MLX5_ST_SZ_DW(query_ipsec_obj_out)];
252 	u64 modify_field_select = 0;
253 	u64 general_obj_types;
254 	void *obj;
255 	int err;
256 
257 	general_obj_types = MLX5_CAP_GEN_64(mdev, general_obj_types);
258 	if (!(general_obj_types & MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_IPSEC))
259 		return -EINVAL;
260 
261 	/* general object fields set */
262 	MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_QUERY_GENERAL_OBJECT);
263 	MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, MLX5_GENERAL_OBJECT_TYPES_IPSEC);
264 	MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, sa_entry->ipsec_obj_id);
265 	err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
266 	if (err) {
267 		mlx5_core_err(mdev, "Query IPsec object failed (Object id %d), err = %d\n",
268 			      sa_entry->ipsec_obj_id, err);
269 		return err;
270 	}
271 
272 	obj = MLX5_ADDR_OF(query_ipsec_obj_out, out, ipsec_object);
273 	modify_field_select = MLX5_GET64(ipsec_obj, obj, modify_field_select);
274 
275 	/* esn */
276 	if (!(modify_field_select & MLX5_MODIFY_IPSEC_BITMASK_ESN_OVERLAP) ||
277 	    !(modify_field_select & MLX5_MODIFY_IPSEC_BITMASK_ESN_MSB))
278 		return -EOPNOTSUPP;
279 
280 	obj = MLX5_ADDR_OF(modify_ipsec_obj_in, in, ipsec_object);
281 	MLX5_SET64(ipsec_obj, obj, modify_field_select,
282 		   MLX5_MODIFY_IPSEC_BITMASK_ESN_OVERLAP |
283 			   MLX5_MODIFY_IPSEC_BITMASK_ESN_MSB);
284 	MLX5_SET(ipsec_obj, obj, esn_msb, attrs->replay_esn.esn_msb);
285 	MLX5_SET(ipsec_obj, obj, esn_overlap, attrs->replay_esn.overlap);
286 
287 	/* general object fields set */
288 	MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_MODIFY_GENERAL_OBJECT);
289 
290 	return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
291 }
292 
mlx5_accel_esp_modify_xfrm(struct mlx5e_ipsec_sa_entry * sa_entry,const struct mlx5_accel_esp_xfrm_attrs * attrs)293 void mlx5_accel_esp_modify_xfrm(struct mlx5e_ipsec_sa_entry *sa_entry,
294 				const struct mlx5_accel_esp_xfrm_attrs *attrs)
295 {
296 	int err;
297 
298 	err = mlx5_modify_ipsec_obj(sa_entry, attrs);
299 	if (err)
300 		return;
301 
302 	memcpy(&sa_entry->attrs, attrs, sizeof(sa_entry->attrs));
303 }
304 
mlx5e_ipsec_aso_update(struct mlx5e_ipsec_sa_entry * sa_entry,struct mlx5_wqe_aso_ctrl_seg * data)305 static void mlx5e_ipsec_aso_update(struct mlx5e_ipsec_sa_entry *sa_entry,
306 				   struct mlx5_wqe_aso_ctrl_seg *data)
307 {
308 	data->data_mask_mode = MLX5_ASO_DATA_MASK_MODE_BITWISE_64BIT << 6;
309 	data->condition_1_0_operand = MLX5_ASO_ALWAYS_TRUE |
310 				      MLX5_ASO_ALWAYS_TRUE << 4;
311 
312 	mlx5e_ipsec_aso_query(sa_entry, data);
313 }
314 
315 static void
mlx5e_ipsec_update_esn_state(struct mlx5e_ipsec_sa_entry * sa_entry,u32 mode_param,struct mlx5_accel_esp_xfrm_attrs * attrs)316 mlx5e_ipsec_update_esn_state(struct mlx5e_ipsec_sa_entry *sa_entry,
317 			     u32 mode_param,
318 			     struct mlx5_accel_esp_xfrm_attrs *attrs)
319 {
320 	struct mlx5_wqe_aso_ctrl_seg data = {};
321 
322 	if (mode_param < MLX5E_IPSEC_ESN_SCOPE_MID) {
323 		sa_entry->esn_state.esn_msb++;
324 		sa_entry->esn_state.overlap = 0;
325 	} else {
326 		sa_entry->esn_state.overlap = 1;
327 	}
328 
329 	mlx5e_ipsec_build_accel_xfrm_attrs(sa_entry, attrs);
330 
331 	data.data_offset_condition_operand =
332 		MLX5_IPSEC_ASO_REMOVE_FLOW_PKT_CNT_OFFSET;
333 	data.bitwise_data = cpu_to_be64(BIT_ULL(54));
334 	data.data_mask = data.bitwise_data;
335 
336 	mlx5e_ipsec_aso_update(sa_entry, &data);
337 }
338 
mlx5e_ipsec_aso_update_hard(struct mlx5e_ipsec_sa_entry * sa_entry)339 static void mlx5e_ipsec_aso_update_hard(struct mlx5e_ipsec_sa_entry *sa_entry)
340 {
341 	struct mlx5_wqe_aso_ctrl_seg data = {};
342 
343 	data.data_offset_condition_operand =
344 		MLX5_IPSEC_ASO_REMOVE_FLOW_PKT_CNT_OFFSET;
345 	data.bitwise_data = cpu_to_be64(BIT_ULL(57) + BIT_ULL(31));
346 	data.data_mask = data.bitwise_data;
347 	mlx5e_ipsec_aso_update(sa_entry, &data);
348 }
349 
mlx5e_ipsec_aso_update_soft(struct mlx5e_ipsec_sa_entry * sa_entry,u32 val)350 static void mlx5e_ipsec_aso_update_soft(struct mlx5e_ipsec_sa_entry *sa_entry,
351 					u32 val)
352 {
353 	struct mlx5_wqe_aso_ctrl_seg data = {};
354 
355 	data.data_offset_condition_operand =
356 		MLX5_IPSEC_ASO_REMOVE_FLOW_SOFT_LFT_OFFSET;
357 	data.bitwise_data = cpu_to_be64(val);
358 	data.data_mask = cpu_to_be64(U32_MAX);
359 	mlx5e_ipsec_aso_update(sa_entry, &data);
360 }
361 
mlx5e_ipsec_handle_limits(struct mlx5e_ipsec_sa_entry * sa_entry)362 static void mlx5e_ipsec_handle_limits(struct mlx5e_ipsec_sa_entry *sa_entry)
363 {
364 	struct mlx5_accel_esp_xfrm_attrs *attrs = &sa_entry->attrs;
365 	bool soft_arm, hard_arm;
366 	u64 hard_cnt;
367 
368 	lockdep_assert_held(&sa_entry->x->lock);
369 
370 	soft_arm = !MLX5_GET(ipsec_aso, sa_entry->ctx, soft_lft_arm);
371 	hard_arm = !MLX5_GET(ipsec_aso, sa_entry->ctx, hard_lft_arm);
372 	if (!soft_arm && !hard_arm)
373 		/* It is not lifetime event */
374 		return;
375 
376 	hard_cnt = MLX5_GET(ipsec_aso, sa_entry->ctx, remove_flow_pkt_cnt);
377 	if (!hard_cnt || hard_arm) {
378 		/* It is possible to see packet counter equal to zero without
379 		 * hard limit event armed. Such situation can be if packet
380 		 * decreased, while we handled soft limit event.
381 		 *
382 		 * However it will be HW/FW bug if hard limit event is raised
383 		 * and packet counter is not zero.
384 		 */
385 		WARN_ON_ONCE(hard_arm && hard_cnt);
386 
387 		/* Notify about hard limit */
388 		xfrm_state_check_expire(sa_entry->x);
389 		return;
390 	}
391 
392 	/* We are in soft limit event. */
393 	if (!sa_entry->limits.soft_limit_hit &&
394 	    sa_entry->limits.round == attrs->lft.numb_rounds_soft) {
395 		sa_entry->limits.soft_limit_hit = true;
396 		/* Notify about soft limit */
397 		xfrm_state_check_expire(sa_entry->x);
398 
399 		if (sa_entry->limits.round == attrs->lft.numb_rounds_hard)
400 			goto hard;
401 
402 		if (attrs->lft.soft_packet_limit > BIT_ULL(31)) {
403 			/* We cannot avoid a soft_value that might have the high
404 			 * bit set. For instance soft_value=2^31+1 cannot be
405 			 * adjusted to the low bit clear version of soft_value=1
406 			 * because it is too close to 0.
407 			 *
408 			 * Thus we have this corner case where we can hit the
409 			 * soft_limit with the high bit set, but cannot adjust
410 			 * the counter. Thus we set a temporary interrupt_value
411 			 * at least 2^30 away from here and do the adjustment
412 			 * then.
413 			 */
414 			mlx5e_ipsec_aso_update_soft(sa_entry,
415 						    BIT_ULL(31) - BIT_ULL(30));
416 			sa_entry->limits.fix_limit = true;
417 			return;
418 		}
419 
420 		sa_entry->limits.fix_limit = true;
421 	}
422 
423 hard:
424 	if (sa_entry->limits.round == attrs->lft.numb_rounds_hard) {
425 		mlx5e_ipsec_aso_update_soft(sa_entry, 0);
426 		attrs->lft.soft_packet_limit = XFRM_INF;
427 		return;
428 	}
429 
430 	mlx5e_ipsec_aso_update_hard(sa_entry);
431 	sa_entry->limits.round++;
432 	if (sa_entry->limits.round == attrs->lft.numb_rounds_soft)
433 		mlx5e_ipsec_aso_update_soft(sa_entry,
434 					    attrs->lft.soft_packet_limit);
435 	if (sa_entry->limits.fix_limit) {
436 		sa_entry->limits.fix_limit = false;
437 		mlx5e_ipsec_aso_update_soft(sa_entry, BIT_ULL(31) - 1);
438 	}
439 }
440 
mlx5e_ipsec_handle_event(struct work_struct * _work)441 static void mlx5e_ipsec_handle_event(struct work_struct *_work)
442 {
443 	struct mlx5e_ipsec_work *work =
444 		container_of(_work, struct mlx5e_ipsec_work, work);
445 	struct mlx5e_ipsec_sa_entry *sa_entry = work->data;
446 	struct mlx5_accel_esp_xfrm_attrs tmp = {};
447 	struct mlx5_accel_esp_xfrm_attrs *attrs;
448 	bool need_modify = false;
449 	int ret;
450 
451 	attrs = &sa_entry->attrs;
452 
453 	spin_lock_bh(&sa_entry->x->lock);
454 	ret = mlx5e_ipsec_aso_query(sa_entry, NULL);
455 	if (ret)
456 		goto unlock;
457 
458 	if (attrs->lft.soft_packet_limit != XFRM_INF)
459 		mlx5e_ipsec_handle_limits(sa_entry);
460 
461 	if (attrs->replay_esn.trigger &&
462 	    !MLX5_GET(ipsec_aso, sa_entry->ctx, esn_event_arm)) {
463 		u32 mode_param = MLX5_GET(ipsec_aso, sa_entry->ctx,
464 					  mode_parameter);
465 
466 		mlx5e_ipsec_update_esn_state(sa_entry, mode_param, &tmp);
467 		need_modify = true;
468 	}
469 
470 unlock:
471 	spin_unlock_bh(&sa_entry->x->lock);
472 	if (need_modify)
473 		mlx5_accel_esp_modify_xfrm(sa_entry, &tmp);
474 	kfree(work);
475 }
476 
mlx5e_ipsec_event(struct notifier_block * nb,unsigned long event,void * data)477 static int mlx5e_ipsec_event(struct notifier_block *nb, unsigned long event,
478 			     void *data)
479 {
480 	struct mlx5e_ipsec *ipsec = container_of(nb, struct mlx5e_ipsec, nb);
481 	struct mlx5e_ipsec_sa_entry *sa_entry;
482 	struct mlx5_eqe_obj_change *object;
483 	struct mlx5e_ipsec_work *work;
484 	struct mlx5_eqe *eqe = data;
485 	u16 type;
486 
487 	if (event != MLX5_EVENT_TYPE_OBJECT_CHANGE)
488 		return NOTIFY_DONE;
489 
490 	object = &eqe->data.obj_change;
491 	type = be16_to_cpu(object->obj_type);
492 
493 	if (type != MLX5_GENERAL_OBJECT_TYPES_IPSEC)
494 		return NOTIFY_DONE;
495 
496 	sa_entry = xa_load(&ipsec->sadb, be32_to_cpu(object->obj_id));
497 	if (!sa_entry)
498 		return NOTIFY_DONE;
499 
500 	work = kmalloc_obj(*work, GFP_ATOMIC);
501 	if (!work)
502 		return NOTIFY_DONE;
503 
504 	INIT_WORK(&work->work, mlx5e_ipsec_handle_event);
505 	work->data = sa_entry;
506 
507 	queue_work(ipsec->wq, &work->work);
508 	return NOTIFY_OK;
509 }
510 
mlx5e_ipsec_aso_init(struct mlx5e_ipsec * ipsec)511 int mlx5e_ipsec_aso_init(struct mlx5e_ipsec *ipsec)
512 {
513 	struct mlx5_core_dev *mdev = ipsec->mdev;
514 	struct mlx5e_ipsec_aso *aso;
515 	struct mlx5e_hw_objs *res;
516 	struct device *pdev;
517 	int err;
518 
519 	aso = kzalloc_obj(*ipsec->aso);
520 	if (!aso)
521 		return -ENOMEM;
522 
523 	res = &mdev->mlx5e_res.hw_objs;
524 
525 	pdev = mlx5_core_dma_dev(mdev);
526 	aso->dma_addr = dma_map_single(pdev, aso->ctx, sizeof(aso->ctx),
527 				       DMA_BIDIRECTIONAL);
528 	err = dma_mapping_error(pdev, aso->dma_addr);
529 	if (err)
530 		goto err_dma;
531 
532 	aso->aso = mlx5_aso_create(mdev, res->pdn);
533 	if (IS_ERR(aso->aso)) {
534 		err = PTR_ERR(aso->aso);
535 		goto err_aso_create;
536 	}
537 
538 	spin_lock_init(&aso->lock);
539 	ipsec->nb.notifier_call = mlx5e_ipsec_event;
540 	mlx5_notifier_register(mdev, &ipsec->nb);
541 
542 	ipsec->aso = aso;
543 	return 0;
544 
545 err_aso_create:
546 	dma_unmap_single(pdev, aso->dma_addr, sizeof(aso->ctx),
547 			 DMA_BIDIRECTIONAL);
548 err_dma:
549 	kfree(aso);
550 	return err;
551 }
552 
mlx5e_ipsec_aso_cleanup(struct mlx5e_ipsec * ipsec)553 void mlx5e_ipsec_aso_cleanup(struct mlx5e_ipsec *ipsec)
554 {
555 	struct mlx5_core_dev *mdev = ipsec->mdev;
556 	struct mlx5e_ipsec_aso *aso;
557 	struct device *pdev;
558 
559 	aso = ipsec->aso;
560 	pdev = mlx5_core_dma_dev(mdev);
561 
562 	mlx5_notifier_unregister(mdev, &ipsec->nb);
563 	mlx5_aso_destroy(aso->aso);
564 	dma_unmap_single(pdev, aso->dma_addr, sizeof(aso->ctx),
565 			 DMA_BIDIRECTIONAL);
566 	kfree(aso);
567 	ipsec->aso = NULL;
568 }
569 
mlx5e_ipsec_aso_copy(struct mlx5_wqe_aso_ctrl_seg * ctrl,struct mlx5_wqe_aso_ctrl_seg * data)570 static void mlx5e_ipsec_aso_copy(struct mlx5_wqe_aso_ctrl_seg *ctrl,
571 				 struct mlx5_wqe_aso_ctrl_seg *data)
572 {
573 	if (!data)
574 		return;
575 
576 	ctrl->data_mask_mode = data->data_mask_mode;
577 	ctrl->condition_1_0_operand = data->condition_1_0_operand;
578 	ctrl->condition_1_0_offset = data->condition_1_0_offset;
579 	ctrl->data_offset_condition_operand = data->data_offset_condition_operand;
580 	ctrl->condition_0_data = data->condition_0_data;
581 	ctrl->condition_0_mask = data->condition_0_mask;
582 	ctrl->condition_1_data = data->condition_1_data;
583 	ctrl->condition_1_mask = data->condition_1_mask;
584 	ctrl->bitwise_data = data->bitwise_data;
585 	ctrl->data_mask = data->data_mask;
586 }
587 
mlx5e_ipsec_aso_query(struct mlx5e_ipsec_sa_entry * sa_entry,struct mlx5_wqe_aso_ctrl_seg * data)588 int mlx5e_ipsec_aso_query(struct mlx5e_ipsec_sa_entry *sa_entry,
589 			  struct mlx5_wqe_aso_ctrl_seg *data)
590 {
591 	struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
592 	struct mlx5e_ipsec_aso *aso = ipsec->aso;
593 	struct mlx5_core_dev *mdev = ipsec->mdev;
594 	struct mlx5_wqe_aso_ctrl_seg *ctrl;
595 	struct mlx5e_hw_objs *res;
596 	struct mlx5_aso_wqe *wqe;
597 	u8 ds_cnt;
598 	int ret;
599 
600 	lockdep_assert_held(&sa_entry->x->lock);
601 	res = &mdev->mlx5e_res.hw_objs;
602 
603 	spin_lock_bh(&aso->lock);
604 	memset(aso->ctx, 0, sizeof(aso->ctx));
605 	wqe = mlx5_aso_get_wqe(aso->aso);
606 	ds_cnt = DIV_ROUND_UP(sizeof(*wqe), MLX5_SEND_WQE_DS);
607 	mlx5_aso_build_wqe(aso->aso, ds_cnt, wqe, sa_entry->ipsec_obj_id,
608 			   MLX5_ACCESS_ASO_OPC_MOD_IPSEC);
609 
610 	ctrl = &wqe->aso_ctrl;
611 	ctrl->va_l =
612 		cpu_to_be32(lower_32_bits(aso->dma_addr) | ASO_CTRL_READ_EN);
613 	ctrl->va_h = cpu_to_be32(upper_32_bits(aso->dma_addr));
614 	ctrl->l_key = cpu_to_be32(res->mkey);
615 	mlx5e_ipsec_aso_copy(ctrl, data);
616 
617 	mlx5_aso_post_wqe(aso->aso, false, &wqe->ctrl);
618 	read_poll_timeout_atomic(mlx5_aso_poll_cq, ret, !ret, 10,
619 				 10 * USEC_PER_MSEC, false, aso->aso, false);
620 	if (!ret)
621 		memcpy(sa_entry->ctx, aso->ctx, MLX5_ST_SZ_BYTES(ipsec_aso));
622 	spin_unlock_bh(&aso->lock);
623 	return ret;
624 }
625