xref: /linux/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c (revision 9410645520e9b820069761f3450ef6661418e279)
1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2017, Mellanox Technologies inc. All rights reserved. */
3 
4 #include "mlx5_core.h"
5 #include "en.h"
6 #include "ipsec.h"
7 #include "lib/crypto.h"
8 #include "lib/ipsec_fs_roce.h"
9 #include "fs_core.h"
10 #include "eswitch.h"
11 
12 enum {
13 	MLX5_IPSEC_ASO_REMOVE_FLOW_PKT_CNT_OFFSET,
14 	MLX5_IPSEC_ASO_REMOVE_FLOW_SOFT_LFT_OFFSET,
15 };
16 
mlx5_ipsec_device_caps(struct mlx5_core_dev * mdev)17 u32 mlx5_ipsec_device_caps(struct mlx5_core_dev *mdev)
18 {
19 	u32 caps = 0;
20 
21 	if (!MLX5_CAP_GEN(mdev, ipsec_offload))
22 		return 0;
23 
24 	if (!MLX5_CAP_GEN(mdev, log_max_dek))
25 		return 0;
26 
27 	if (!(MLX5_CAP_GEN_64(mdev, general_obj_types) &
28 	    MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_IPSEC))
29 		return 0;
30 
31 	if (!MLX5_CAP_FLOWTABLE_NIC_TX(mdev, ipsec_encrypt) ||
32 	    !MLX5_CAP_FLOWTABLE_NIC_RX(mdev, ipsec_decrypt))
33 		return 0;
34 
35 	if (!MLX5_CAP_IPSEC(mdev, ipsec_crypto_esp_aes_gcm_128_encrypt) ||
36 	    !MLX5_CAP_IPSEC(mdev, ipsec_crypto_esp_aes_gcm_128_decrypt))
37 		return 0;
38 
39 	if (MLX5_CAP_IPSEC(mdev, ipsec_crypto_offload) &&
40 	    MLX5_CAP_ETH(mdev, insert_trailer) && MLX5_CAP_ETH(mdev, swp))
41 		caps |= MLX5_IPSEC_CAP_CRYPTO;
42 
43 	if (MLX5_CAP_IPSEC(mdev, ipsec_full_offload) &&
44 	    (mdev->priv.steering->mode == MLX5_FLOW_STEERING_MODE_DMFS ||
45 	     (mdev->priv.steering->mode == MLX5_FLOW_STEERING_MODE_SMFS &&
46 	     is_mdev_legacy_mode(mdev)))) {
47 		if (MLX5_CAP_FLOWTABLE_NIC_TX(mdev,
48 					      reformat_add_esp_trasport) &&
49 		    MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
50 					      reformat_del_esp_trasport) &&
51 		    MLX5_CAP_FLOWTABLE_NIC_RX(mdev, decap))
52 			caps |= MLX5_IPSEC_CAP_PACKET_OFFLOAD;
53 
54 		if (IS_ENABLED(CONFIG_MLX5_CLS_ACT) &&
55 		    ((MLX5_CAP_FLOWTABLE_NIC_TX(mdev, ignore_flow_level) &&
56 		      MLX5_CAP_FLOWTABLE_NIC_RX(mdev, ignore_flow_level)) ||
57 		     MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, ignore_flow_level)))
58 			caps |= MLX5_IPSEC_CAP_PRIO;
59 
60 		if (MLX5_CAP_FLOWTABLE_NIC_TX(mdev,
61 					      reformat_l2_to_l3_esp_tunnel) &&
62 		    MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
63 					      reformat_l3_esp_tunnel_to_l2))
64 			caps |= MLX5_IPSEC_CAP_TUNNEL;
65 
66 		if (MLX5_CAP_FLOWTABLE_NIC_TX(mdev,
67 					      reformat_add_esp_transport_over_udp) &&
68 		    MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
69 					      reformat_del_esp_transport_over_udp))
70 			caps |= MLX5_IPSEC_CAP_ESPINUDP;
71 	}
72 
73 	if (mlx5_get_roce_state(mdev) && mlx5_ipsec_fs_is_mpv_roce_supported(mdev) &&
74 	    MLX5_CAP_GEN_2(mdev, flow_table_type_2_type) & MLX5_FT_NIC_RX_2_NIC_RX_RDMA &&
75 	    MLX5_CAP_GEN_2(mdev, flow_table_type_2_type) & MLX5_FT_NIC_TX_RDMA_2_NIC_TX)
76 		caps |= MLX5_IPSEC_CAP_ROCE;
77 
78 	if (!caps)
79 		return 0;
80 
81 	if (MLX5_CAP_IPSEC(mdev, ipsec_esn))
82 		caps |= MLX5_IPSEC_CAP_ESN;
83 
84 	/* We can accommodate up to 2^24 different IPsec objects
85 	 * because we use up to 24 bit in flow table metadata
86 	 * to hold the IPsec Object unique handle.
87 	 */
88 	WARN_ON_ONCE(MLX5_CAP_IPSEC(mdev, log_max_ipsec_offload) > 24);
89 	return caps;
90 }
91 EXPORT_SYMBOL_GPL(mlx5_ipsec_device_caps);
92 
mlx5e_ipsec_packet_setup(void * obj,u32 pdn,struct mlx5_accel_esp_xfrm_attrs * attrs)93 static void mlx5e_ipsec_packet_setup(void *obj, u32 pdn,
94 				     struct mlx5_accel_esp_xfrm_attrs *attrs)
95 {
96 	void *aso_ctx;
97 
98 	aso_ctx = MLX5_ADDR_OF(ipsec_obj, obj, ipsec_aso);
99 	if (attrs->replay_esn.trigger) {
100 		MLX5_SET(ipsec_aso, aso_ctx, esn_event_arm, 1);
101 
102 		if (attrs->dir == XFRM_DEV_OFFLOAD_IN) {
103 			MLX5_SET(ipsec_aso, aso_ctx, window_sz,
104 				 attrs->replay_esn.replay_window);
105 			MLX5_SET(ipsec_aso, aso_ctx, mode,
106 				 MLX5_IPSEC_ASO_REPLAY_PROTECTION);
107 		}
108 		MLX5_SET(ipsec_aso, aso_ctx, mode_parameter,
109 			 attrs->replay_esn.esn);
110 	}
111 
112 	/* ASO context */
113 	MLX5_SET(ipsec_obj, obj, ipsec_aso_access_pd, pdn);
114 	MLX5_SET(ipsec_obj, obj, full_offload, 1);
115 	MLX5_SET(ipsec_aso, aso_ctx, valid, 1);
116 	/* MLX5_IPSEC_ASO_REG_C_4_5 is type C register that is used
117 	 * in flow steering to perform matching against. Please be
118 	 * aware that this register was chosen arbitrary and can't
119 	 * be used in other places as long as IPsec packet offload
120 	 * active.
121 	 */
122 	MLX5_SET(ipsec_obj, obj, aso_return_reg, MLX5_IPSEC_ASO_REG_C_4_5);
123 	if (attrs->dir == XFRM_DEV_OFFLOAD_OUT)
124 		MLX5_SET(ipsec_aso, aso_ctx, mode, MLX5_IPSEC_ASO_INC_SN);
125 
126 	if (attrs->lft.hard_packet_limit != XFRM_INF) {
127 		MLX5_SET(ipsec_aso, aso_ctx, remove_flow_pkt_cnt,
128 			 attrs->lft.hard_packet_limit);
129 		MLX5_SET(ipsec_aso, aso_ctx, hard_lft_arm, 1);
130 		MLX5_SET(ipsec_aso, aso_ctx, remove_flow_enable, 1);
131 	}
132 
133 	if (attrs->lft.soft_packet_limit != XFRM_INF) {
134 		MLX5_SET(ipsec_aso, aso_ctx, remove_flow_soft_lft,
135 			 attrs->lft.soft_packet_limit);
136 
137 		MLX5_SET(ipsec_aso, aso_ctx, soft_lft_arm, 1);
138 	}
139 }
140 
mlx5_create_ipsec_obj(struct mlx5e_ipsec_sa_entry * sa_entry)141 static int mlx5_create_ipsec_obj(struct mlx5e_ipsec_sa_entry *sa_entry)
142 {
143 	struct mlx5_accel_esp_xfrm_attrs *attrs = &sa_entry->attrs;
144 	struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry);
145 	struct aes_gcm_keymat *aes_gcm = &attrs->aes_gcm;
146 	u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
147 	u32 in[MLX5_ST_SZ_DW(create_ipsec_obj_in)] = {};
148 	void *obj, *salt_p, *salt_iv_p;
149 	struct mlx5e_hw_objs *res;
150 	int err;
151 
152 	obj = MLX5_ADDR_OF(create_ipsec_obj_in, in, ipsec_object);
153 
154 	/* salt and seq_iv */
155 	salt_p = MLX5_ADDR_OF(ipsec_obj, obj, salt);
156 	memcpy(salt_p, &aes_gcm->salt, sizeof(aes_gcm->salt));
157 
158 	MLX5_SET(ipsec_obj, obj, icv_length, MLX5_IPSEC_OBJECT_ICV_LEN_16B);
159 	salt_iv_p = MLX5_ADDR_OF(ipsec_obj, obj, implicit_iv);
160 	memcpy(salt_iv_p, &aes_gcm->seq_iv, sizeof(aes_gcm->seq_iv));
161 	/* esn */
162 	if (attrs->replay_esn.trigger) {
163 		MLX5_SET(ipsec_obj, obj, esn_en, 1);
164 		MLX5_SET(ipsec_obj, obj, esn_msb, attrs->replay_esn.esn_msb);
165 		MLX5_SET(ipsec_obj, obj, esn_overlap, attrs->replay_esn.overlap);
166 	}
167 
168 	MLX5_SET(ipsec_obj, obj, dekn, sa_entry->enc_key_id);
169 
170 	/* general object fields set */
171 	MLX5_SET(general_obj_in_cmd_hdr, in, opcode,
172 		 MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
173 	MLX5_SET(general_obj_in_cmd_hdr, in, obj_type,
174 		 MLX5_GENERAL_OBJECT_TYPES_IPSEC);
175 
176 	res = &mdev->mlx5e_res.hw_objs;
177 	if (attrs->type == XFRM_DEV_OFFLOAD_PACKET)
178 		mlx5e_ipsec_packet_setup(obj, res->pdn, attrs);
179 
180 	err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
181 	if (!err)
182 		sa_entry->ipsec_obj_id =
183 			MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
184 
185 	return err;
186 }
187 
mlx5_destroy_ipsec_obj(struct mlx5e_ipsec_sa_entry * sa_entry)188 static void mlx5_destroy_ipsec_obj(struct mlx5e_ipsec_sa_entry *sa_entry)
189 {
190 	struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry);
191 	u32 in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {};
192 	u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
193 
194 	MLX5_SET(general_obj_in_cmd_hdr, in, opcode,
195 		 MLX5_CMD_OP_DESTROY_GENERAL_OBJECT);
196 	MLX5_SET(general_obj_in_cmd_hdr, in, obj_type,
197 		 MLX5_GENERAL_OBJECT_TYPES_IPSEC);
198 	MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, sa_entry->ipsec_obj_id);
199 
200 	mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
201 }
202 
mlx5_ipsec_create_sa_ctx(struct mlx5e_ipsec_sa_entry * sa_entry)203 int mlx5_ipsec_create_sa_ctx(struct mlx5e_ipsec_sa_entry *sa_entry)
204 {
205 	struct aes_gcm_keymat *aes_gcm = &sa_entry->attrs.aes_gcm;
206 	struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry);
207 	int err;
208 
209 	/* key */
210 	err = mlx5_create_encryption_key(mdev, aes_gcm->aes_key,
211 					 aes_gcm->key_len / BITS_PER_BYTE,
212 					 MLX5_ACCEL_OBJ_IPSEC_KEY,
213 					 &sa_entry->enc_key_id);
214 	if (err) {
215 		mlx5_core_dbg(mdev, "Failed to create encryption key (err = %d)\n", err);
216 		return err;
217 	}
218 
219 	err = mlx5_create_ipsec_obj(sa_entry);
220 	if (err) {
221 		mlx5_core_dbg(mdev, "Failed to create IPsec object (err = %d)\n", err);
222 		goto err_enc_key;
223 	}
224 
225 	return 0;
226 
227 err_enc_key:
228 	mlx5_destroy_encryption_key(mdev, sa_entry->enc_key_id);
229 	return err;
230 }
231 
mlx5_ipsec_free_sa_ctx(struct mlx5e_ipsec_sa_entry * sa_entry)232 void mlx5_ipsec_free_sa_ctx(struct mlx5e_ipsec_sa_entry *sa_entry)
233 {
234 	struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry);
235 
236 	mlx5_destroy_ipsec_obj(sa_entry);
237 	mlx5_destroy_encryption_key(mdev, sa_entry->enc_key_id);
238 }
239 
mlx5_modify_ipsec_obj(struct mlx5e_ipsec_sa_entry * sa_entry,const struct mlx5_accel_esp_xfrm_attrs * attrs)240 static int mlx5_modify_ipsec_obj(struct mlx5e_ipsec_sa_entry *sa_entry,
241 				 const struct mlx5_accel_esp_xfrm_attrs *attrs)
242 {
243 	struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry);
244 	u32 in[MLX5_ST_SZ_DW(modify_ipsec_obj_in)] = {};
245 	u32 out[MLX5_ST_SZ_DW(query_ipsec_obj_out)];
246 	u64 modify_field_select = 0;
247 	u64 general_obj_types;
248 	void *obj;
249 	int err;
250 
251 	general_obj_types = MLX5_CAP_GEN_64(mdev, general_obj_types);
252 	if (!(general_obj_types & MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_IPSEC))
253 		return -EINVAL;
254 
255 	/* general object fields set */
256 	MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_QUERY_GENERAL_OBJECT);
257 	MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, MLX5_GENERAL_OBJECT_TYPES_IPSEC);
258 	MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, sa_entry->ipsec_obj_id);
259 	err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
260 	if (err) {
261 		mlx5_core_err(mdev, "Query IPsec object failed (Object id %d), err = %d\n",
262 			      sa_entry->ipsec_obj_id, err);
263 		return err;
264 	}
265 
266 	obj = MLX5_ADDR_OF(query_ipsec_obj_out, out, ipsec_object);
267 	modify_field_select = MLX5_GET64(ipsec_obj, obj, modify_field_select);
268 
269 	/* esn */
270 	if (!(modify_field_select & MLX5_MODIFY_IPSEC_BITMASK_ESN_OVERLAP) ||
271 	    !(modify_field_select & MLX5_MODIFY_IPSEC_BITMASK_ESN_MSB))
272 		return -EOPNOTSUPP;
273 
274 	obj = MLX5_ADDR_OF(modify_ipsec_obj_in, in, ipsec_object);
275 	MLX5_SET64(ipsec_obj, obj, modify_field_select,
276 		   MLX5_MODIFY_IPSEC_BITMASK_ESN_OVERLAP |
277 			   MLX5_MODIFY_IPSEC_BITMASK_ESN_MSB);
278 	MLX5_SET(ipsec_obj, obj, esn_msb, attrs->replay_esn.esn_msb);
279 	MLX5_SET(ipsec_obj, obj, esn_overlap, attrs->replay_esn.overlap);
280 
281 	/* general object fields set */
282 	MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_MODIFY_GENERAL_OBJECT);
283 
284 	return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
285 }
286 
mlx5_accel_esp_modify_xfrm(struct mlx5e_ipsec_sa_entry * sa_entry,const struct mlx5_accel_esp_xfrm_attrs * attrs)287 void mlx5_accel_esp_modify_xfrm(struct mlx5e_ipsec_sa_entry *sa_entry,
288 				const struct mlx5_accel_esp_xfrm_attrs *attrs)
289 {
290 	int err;
291 
292 	err = mlx5_modify_ipsec_obj(sa_entry, attrs);
293 	if (err)
294 		return;
295 
296 	memcpy(&sa_entry->attrs, attrs, sizeof(sa_entry->attrs));
297 }
298 
mlx5e_ipsec_aso_update(struct mlx5e_ipsec_sa_entry * sa_entry,struct mlx5_wqe_aso_ctrl_seg * data)299 static void mlx5e_ipsec_aso_update(struct mlx5e_ipsec_sa_entry *sa_entry,
300 				   struct mlx5_wqe_aso_ctrl_seg *data)
301 {
302 	data->data_mask_mode = MLX5_ASO_DATA_MASK_MODE_BITWISE_64BIT << 6;
303 	data->condition_1_0_operand = MLX5_ASO_ALWAYS_TRUE |
304 				      MLX5_ASO_ALWAYS_TRUE << 4;
305 
306 	mlx5e_ipsec_aso_query(sa_entry, data);
307 }
308 
mlx5e_ipsec_update_esn_state(struct mlx5e_ipsec_sa_entry * sa_entry,u32 mode_param)309 static void mlx5e_ipsec_update_esn_state(struct mlx5e_ipsec_sa_entry *sa_entry,
310 					 u32 mode_param)
311 {
312 	struct mlx5_accel_esp_xfrm_attrs attrs = {};
313 	struct mlx5_wqe_aso_ctrl_seg data = {};
314 
315 	if (mode_param < MLX5E_IPSEC_ESN_SCOPE_MID) {
316 		sa_entry->esn_state.esn_msb++;
317 		sa_entry->esn_state.overlap = 0;
318 	} else {
319 		sa_entry->esn_state.overlap = 1;
320 	}
321 
322 	mlx5e_ipsec_build_accel_xfrm_attrs(sa_entry, &attrs);
323 
324 	/* It is safe to execute the modify below unlocked since the only flows
325 	 * that could affect this HW object, are create, destroy and this work.
326 	 *
327 	 * Creation flow can't co-exist with this modify work, the destruction
328 	 * flow would cancel this work, and this work is a single entity that
329 	 * can't conflict with it self.
330 	 */
331 	spin_unlock_bh(&sa_entry->x->lock);
332 	mlx5_accel_esp_modify_xfrm(sa_entry, &attrs);
333 	spin_lock_bh(&sa_entry->x->lock);
334 
335 	data.data_offset_condition_operand =
336 		MLX5_IPSEC_ASO_REMOVE_FLOW_PKT_CNT_OFFSET;
337 	data.bitwise_data = cpu_to_be64(BIT_ULL(54));
338 	data.data_mask = data.bitwise_data;
339 
340 	mlx5e_ipsec_aso_update(sa_entry, &data);
341 }
342 
mlx5e_ipsec_aso_update_hard(struct mlx5e_ipsec_sa_entry * sa_entry)343 static void mlx5e_ipsec_aso_update_hard(struct mlx5e_ipsec_sa_entry *sa_entry)
344 {
345 	struct mlx5_wqe_aso_ctrl_seg data = {};
346 
347 	data.data_offset_condition_operand =
348 		MLX5_IPSEC_ASO_REMOVE_FLOW_PKT_CNT_OFFSET;
349 	data.bitwise_data = cpu_to_be64(BIT_ULL(57) + BIT_ULL(31));
350 	data.data_mask = data.bitwise_data;
351 	mlx5e_ipsec_aso_update(sa_entry, &data);
352 }
353 
mlx5e_ipsec_aso_update_soft(struct mlx5e_ipsec_sa_entry * sa_entry,u32 val)354 static void mlx5e_ipsec_aso_update_soft(struct mlx5e_ipsec_sa_entry *sa_entry,
355 					u32 val)
356 {
357 	struct mlx5_wqe_aso_ctrl_seg data = {};
358 
359 	data.data_offset_condition_operand =
360 		MLX5_IPSEC_ASO_REMOVE_FLOW_SOFT_LFT_OFFSET;
361 	data.bitwise_data = cpu_to_be64(val);
362 	data.data_mask = cpu_to_be64(U32_MAX);
363 	mlx5e_ipsec_aso_update(sa_entry, &data);
364 }
365 
mlx5e_ipsec_handle_limits(struct mlx5e_ipsec_sa_entry * sa_entry)366 static void mlx5e_ipsec_handle_limits(struct mlx5e_ipsec_sa_entry *sa_entry)
367 {
368 	struct mlx5_accel_esp_xfrm_attrs *attrs = &sa_entry->attrs;
369 	struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
370 	struct mlx5e_ipsec_aso *aso = ipsec->aso;
371 	bool soft_arm, hard_arm;
372 	u64 hard_cnt;
373 
374 	lockdep_assert_held(&sa_entry->x->lock);
375 
376 	soft_arm = !MLX5_GET(ipsec_aso, aso->ctx, soft_lft_arm);
377 	hard_arm = !MLX5_GET(ipsec_aso, aso->ctx, hard_lft_arm);
378 	if (!soft_arm && !hard_arm)
379 		/* It is not lifetime event */
380 		return;
381 
382 	hard_cnt = MLX5_GET(ipsec_aso, aso->ctx, remove_flow_pkt_cnt);
383 	if (!hard_cnt || hard_arm) {
384 		/* It is possible to see packet counter equal to zero without
385 		 * hard limit event armed. Such situation can be if packet
386 		 * decreased, while we handled soft limit event.
387 		 *
388 		 * However it will be HW/FW bug if hard limit event is raised
389 		 * and packet counter is not zero.
390 		 */
391 		WARN_ON_ONCE(hard_arm && hard_cnt);
392 
393 		/* Notify about hard limit */
394 		xfrm_state_check_expire(sa_entry->x);
395 		return;
396 	}
397 
398 	/* We are in soft limit event. */
399 	if (!sa_entry->limits.soft_limit_hit &&
400 	    sa_entry->limits.round == attrs->lft.numb_rounds_soft) {
401 		sa_entry->limits.soft_limit_hit = true;
402 		/* Notify about soft limit */
403 		xfrm_state_check_expire(sa_entry->x);
404 
405 		if (sa_entry->limits.round == attrs->lft.numb_rounds_hard)
406 			goto hard;
407 
408 		if (attrs->lft.soft_packet_limit > BIT_ULL(31)) {
409 			/* We cannot avoid a soft_value that might have the high
410 			 * bit set. For instance soft_value=2^31+1 cannot be
411 			 * adjusted to the low bit clear version of soft_value=1
412 			 * because it is too close to 0.
413 			 *
414 			 * Thus we have this corner case where we can hit the
415 			 * soft_limit with the high bit set, but cannot adjust
416 			 * the counter. Thus we set a temporary interrupt_value
417 			 * at least 2^30 away from here and do the adjustment
418 			 * then.
419 			 */
420 			mlx5e_ipsec_aso_update_soft(sa_entry,
421 						    BIT_ULL(31) - BIT_ULL(30));
422 			sa_entry->limits.fix_limit = true;
423 			return;
424 		}
425 
426 		sa_entry->limits.fix_limit = true;
427 	}
428 
429 hard:
430 	if (sa_entry->limits.round == attrs->lft.numb_rounds_hard) {
431 		mlx5e_ipsec_aso_update_soft(sa_entry, 0);
432 		attrs->lft.soft_packet_limit = XFRM_INF;
433 		return;
434 	}
435 
436 	mlx5e_ipsec_aso_update_hard(sa_entry);
437 	sa_entry->limits.round++;
438 	if (sa_entry->limits.round == attrs->lft.numb_rounds_soft)
439 		mlx5e_ipsec_aso_update_soft(sa_entry,
440 					    attrs->lft.soft_packet_limit);
441 	if (sa_entry->limits.fix_limit) {
442 		sa_entry->limits.fix_limit = false;
443 		mlx5e_ipsec_aso_update_soft(sa_entry, BIT_ULL(31) - 1);
444 	}
445 }
446 
mlx5e_ipsec_handle_event(struct work_struct * _work)447 static void mlx5e_ipsec_handle_event(struct work_struct *_work)
448 {
449 	struct mlx5e_ipsec_work *work =
450 		container_of(_work, struct mlx5e_ipsec_work, work);
451 	struct mlx5e_ipsec_sa_entry *sa_entry = work->data;
452 	struct mlx5_accel_esp_xfrm_attrs *attrs;
453 	struct mlx5e_ipsec_aso *aso;
454 	int ret;
455 
456 	aso = sa_entry->ipsec->aso;
457 	attrs = &sa_entry->attrs;
458 
459 	spin_lock_bh(&sa_entry->x->lock);
460 	ret = mlx5e_ipsec_aso_query(sa_entry, NULL);
461 	if (ret)
462 		goto unlock;
463 
464 	if (attrs->replay_esn.trigger &&
465 	    !MLX5_GET(ipsec_aso, aso->ctx, esn_event_arm)) {
466 		u32 mode_param = MLX5_GET(ipsec_aso, aso->ctx, mode_parameter);
467 
468 		mlx5e_ipsec_update_esn_state(sa_entry, mode_param);
469 	}
470 
471 	if (attrs->lft.soft_packet_limit != XFRM_INF)
472 		mlx5e_ipsec_handle_limits(sa_entry);
473 
474 unlock:
475 	spin_unlock_bh(&sa_entry->x->lock);
476 	kfree(work);
477 }
478 
mlx5e_ipsec_event(struct notifier_block * nb,unsigned long event,void * data)479 static int mlx5e_ipsec_event(struct notifier_block *nb, unsigned long event,
480 			     void *data)
481 {
482 	struct mlx5e_ipsec *ipsec = container_of(nb, struct mlx5e_ipsec, nb);
483 	struct mlx5e_ipsec_sa_entry *sa_entry;
484 	struct mlx5_eqe_obj_change *object;
485 	struct mlx5e_ipsec_work *work;
486 	struct mlx5_eqe *eqe = data;
487 	u16 type;
488 
489 	if (event != MLX5_EVENT_TYPE_OBJECT_CHANGE)
490 		return NOTIFY_DONE;
491 
492 	object = &eqe->data.obj_change;
493 	type = be16_to_cpu(object->obj_type);
494 
495 	if (type != MLX5_GENERAL_OBJECT_TYPES_IPSEC)
496 		return NOTIFY_DONE;
497 
498 	sa_entry = xa_load(&ipsec->sadb, be32_to_cpu(object->obj_id));
499 	if (!sa_entry)
500 		return NOTIFY_DONE;
501 
502 	work = kmalloc(sizeof(*work), GFP_ATOMIC);
503 	if (!work)
504 		return NOTIFY_DONE;
505 
506 	INIT_WORK(&work->work, mlx5e_ipsec_handle_event);
507 	work->data = sa_entry;
508 
509 	queue_work(ipsec->wq, &work->work);
510 	return NOTIFY_OK;
511 }
512 
mlx5e_ipsec_aso_init(struct mlx5e_ipsec * ipsec)513 int mlx5e_ipsec_aso_init(struct mlx5e_ipsec *ipsec)
514 {
515 	struct mlx5_core_dev *mdev = ipsec->mdev;
516 	struct mlx5e_ipsec_aso *aso;
517 	struct mlx5e_hw_objs *res;
518 	struct device *pdev;
519 	int err;
520 
521 	aso = kzalloc(sizeof(*ipsec->aso), GFP_KERNEL);
522 	if (!aso)
523 		return -ENOMEM;
524 
525 	res = &mdev->mlx5e_res.hw_objs;
526 
527 	pdev = mlx5_core_dma_dev(mdev);
528 	aso->dma_addr = dma_map_single(pdev, aso->ctx, sizeof(aso->ctx),
529 				       DMA_BIDIRECTIONAL);
530 	err = dma_mapping_error(pdev, aso->dma_addr);
531 	if (err)
532 		goto err_dma;
533 
534 	aso->aso = mlx5_aso_create(mdev, res->pdn);
535 	if (IS_ERR(aso->aso)) {
536 		err = PTR_ERR(aso->aso);
537 		goto err_aso_create;
538 	}
539 
540 	spin_lock_init(&aso->lock);
541 	ipsec->nb.notifier_call = mlx5e_ipsec_event;
542 	mlx5_notifier_register(mdev, &ipsec->nb);
543 
544 	ipsec->aso = aso;
545 	return 0;
546 
547 err_aso_create:
548 	dma_unmap_single(pdev, aso->dma_addr, sizeof(aso->ctx),
549 			 DMA_BIDIRECTIONAL);
550 err_dma:
551 	kfree(aso);
552 	return err;
553 }
554 
mlx5e_ipsec_aso_cleanup(struct mlx5e_ipsec * ipsec)555 void mlx5e_ipsec_aso_cleanup(struct mlx5e_ipsec *ipsec)
556 {
557 	struct mlx5_core_dev *mdev = ipsec->mdev;
558 	struct mlx5e_ipsec_aso *aso;
559 	struct device *pdev;
560 
561 	aso = ipsec->aso;
562 	pdev = mlx5_core_dma_dev(mdev);
563 
564 	mlx5_notifier_unregister(mdev, &ipsec->nb);
565 	mlx5_aso_destroy(aso->aso);
566 	dma_unmap_single(pdev, aso->dma_addr, sizeof(aso->ctx),
567 			 DMA_BIDIRECTIONAL);
568 	kfree(aso);
569 	ipsec->aso = NULL;
570 }
571 
mlx5e_ipsec_aso_copy(struct mlx5_wqe_aso_ctrl_seg * ctrl,struct mlx5_wqe_aso_ctrl_seg * data)572 static void mlx5e_ipsec_aso_copy(struct mlx5_wqe_aso_ctrl_seg *ctrl,
573 				 struct mlx5_wqe_aso_ctrl_seg *data)
574 {
575 	if (!data)
576 		return;
577 
578 	ctrl->data_mask_mode = data->data_mask_mode;
579 	ctrl->condition_1_0_operand = data->condition_1_0_operand;
580 	ctrl->condition_1_0_offset = data->condition_1_0_offset;
581 	ctrl->data_offset_condition_operand = data->data_offset_condition_operand;
582 	ctrl->condition_0_data = data->condition_0_data;
583 	ctrl->condition_0_mask = data->condition_0_mask;
584 	ctrl->condition_1_data = data->condition_1_data;
585 	ctrl->condition_1_mask = data->condition_1_mask;
586 	ctrl->bitwise_data = data->bitwise_data;
587 	ctrl->data_mask = data->data_mask;
588 }
589 
mlx5e_ipsec_aso_query(struct mlx5e_ipsec_sa_entry * sa_entry,struct mlx5_wqe_aso_ctrl_seg * data)590 int mlx5e_ipsec_aso_query(struct mlx5e_ipsec_sa_entry *sa_entry,
591 			  struct mlx5_wqe_aso_ctrl_seg *data)
592 {
593 	struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
594 	struct mlx5e_ipsec_aso *aso = ipsec->aso;
595 	struct mlx5_core_dev *mdev = ipsec->mdev;
596 	struct mlx5_wqe_aso_ctrl_seg *ctrl;
597 	struct mlx5e_hw_objs *res;
598 	struct mlx5_aso_wqe *wqe;
599 	unsigned long expires;
600 	u8 ds_cnt;
601 	int ret;
602 
603 	lockdep_assert_held(&sa_entry->x->lock);
604 	res = &mdev->mlx5e_res.hw_objs;
605 
606 	spin_lock_bh(&aso->lock);
607 	memset(aso->ctx, 0, sizeof(aso->ctx));
608 	wqe = mlx5_aso_get_wqe(aso->aso);
609 	ds_cnt = DIV_ROUND_UP(sizeof(*wqe), MLX5_SEND_WQE_DS);
610 	mlx5_aso_build_wqe(aso->aso, ds_cnt, wqe, sa_entry->ipsec_obj_id,
611 			   MLX5_ACCESS_ASO_OPC_MOD_IPSEC);
612 
613 	ctrl = &wqe->aso_ctrl;
614 	ctrl->va_l =
615 		cpu_to_be32(lower_32_bits(aso->dma_addr) | ASO_CTRL_READ_EN);
616 	ctrl->va_h = cpu_to_be32(upper_32_bits(aso->dma_addr));
617 	ctrl->l_key = cpu_to_be32(res->mkey);
618 	mlx5e_ipsec_aso_copy(ctrl, data);
619 
620 	mlx5_aso_post_wqe(aso->aso, false, &wqe->ctrl);
621 	expires = jiffies + msecs_to_jiffies(10);
622 	do {
623 		ret = mlx5_aso_poll_cq(aso->aso, false);
624 		if (ret)
625 			/* We are in atomic context */
626 			udelay(10);
627 	} while (ret && time_is_after_jiffies(expires));
628 	spin_unlock_bh(&aso->lock);
629 	return ret;
630 }
631