xref: /freebsd/sys/dev/mlx5/mlx5_accel/mlx5_ipsec_offload.c (revision e23731db48ef9c6568d4768b1f87d48514339faa)
1 /*-
2  * Copyright (c) 2023 NVIDIA corporation & affiliates.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16  * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23  * SUCH DAMAGE.
24  *
25  */
26 
27 #include <sys/types.h>
28 #include <netinet/in.h>
29 #include <sys/socket.h>
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <net/pfkeyv2.h>
33 #include <netipsec/ipsec.h>
34 #include <dev/mlx5/mlx5_en/en.h>
35 #include <dev/mlx5/crypto.h>
36 #include <dev/mlx5/mlx5_accel/ipsec.h>
37 
38 u32 mlx5_ipsec_device_caps(struct mlx5_core_dev *mdev)
39 {
40 	u32 caps = 0;
41 
42 	if (!MLX5_CAP_GEN(mdev, ipsec_offload))
43 		return 0;
44 
45 	if (!MLX5_CAP_GEN(mdev, log_max_dek))
46 		return 0;
47 
48 	if (!(MLX5_CAP_GEN_64(mdev, general_obj_types) &
49 	    MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_IPSEC))
50 		return 0;
51 
52 	if (!MLX5_CAP_FLOWTABLE_NIC_TX(mdev, ipsec_encrypt) ||
53 	    !MLX5_CAP_FLOWTABLE_NIC_RX(mdev, ipsec_decrypt))
54 		return 0;
55 
56 	if (!MLX5_CAP_IPSEC(mdev, ipsec_crypto_esp_aes_gcm_128_encrypt) ||
57 	    !MLX5_CAP_IPSEC(mdev, ipsec_crypto_esp_aes_gcm_128_decrypt))
58 		return 0;
59 
60         if (MLX5_CAP_IPSEC(mdev, ipsec_full_offload)) {
61                 if (MLX5_CAP_FLOWTABLE_NIC_TX(mdev,
62                                               reformat_add_esp_trasport) &&
63                     MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
64                                               reformat_del_esp_trasport) &&
65                     MLX5_CAP_FLOWTABLE_NIC_RX(mdev, decap))
66 			caps |= MLX5_IPSEC_CAP_PACKET_OFFLOAD;
67 
68                 if (MLX5_CAP_FLOWTABLE_NIC_TX(mdev, ignore_flow_level) &&
69                     MLX5_CAP_FLOWTABLE_NIC_RX(mdev, ignore_flow_level))
70 			caps |= MLX5_IPSEC_CAP_PRIO;
71 
72 		if (MLX5_CAP_FLOWTABLE_NIC_TX(mdev, reformat_add_esp_transport_over_udp) &&
73 		    MLX5_CAP_FLOWTABLE_NIC_RX(mdev, reformat_del_esp_transport_over_udp))
74 			caps |= MLX5_IPSEC_CAP_ESPINUDP;
75         }
76 
77         if (!caps)
78 		return 0;
79 
80 	if (MLX5_CAP_IPSEC(mdev, ipsec_esn))
81 		caps |= MLX5_IPSEC_CAP_ESN;
82 
83 	return caps;
84 }
85 EXPORT_SYMBOL_GPL(mlx5_ipsec_device_caps);
86 
87 static void mlx5e_ipsec_packet_setup(void *obj, u32 pdn,
88 				     struct mlx5_accel_esp_xfrm_attrs *attrs)
89 {
90 	void *aso_ctx;
91 
92 	aso_ctx = MLX5_ADDR_OF(ipsec_obj, obj, ipsec_aso);
93 	/* ASO context */
94 	MLX5_SET(ipsec_obj, obj, ipsec_aso_access_pd, pdn);
95 	MLX5_SET(ipsec_obj, obj, full_offload, 1);
96 	MLX5_SET(ipsec_aso, aso_ctx, valid, 1);
97 	/* MLX5_IPSEC_ASO_REG_C_4_5 is type C register that is used
98 	 * in flow steering to perform matching against. Please be
99 	 * aware that this register was chosen arbitrary and can't
100 	 * be used in other places as long as IPsec packet offload
101 	 * active.
102 	 */
103 	MLX5_SET(ipsec_obj, obj, aso_return_reg, MLX5_IPSEC_ASO_REG_C_4_5);
104 	if (attrs->replay_esn.trigger) {
105 		MLX5_SET(ipsec_aso, aso_ctx, esn_event_arm, 1);
106 
107 		if (attrs->dir == IPSEC_DIR_INBOUND) {
108 			MLX5_SET(ipsec_aso, aso_ctx, window_sz,
109 				 attrs->replay_esn.replay_window);
110 			if (attrs->replay_esn.replay_window != 0)
111 				MLX5_SET(ipsec_aso, aso_ctx, mode,
112 				    MLX5_IPSEC_ASO_REPLAY_PROTECTION);
113 			else
114 				MLX5_SET(ipsec_aso, aso_ctx, mode,
115 				    MLX5_IPSEC_ASO_MODE);
116 		}
117 		MLX5_SET(ipsec_aso, aso_ctx, mode_parameter,
118 			 attrs->replay_esn.esn);
119 	}
120 
121 	switch (attrs->dir) {
122 	case IPSEC_DIR_OUTBOUND:
123 		if (attrs->replay_esn.replay_window != 0)
124 			MLX5_SET(ipsec_aso, aso_ctx, mode, MLX5_IPSEC_ASO_INC_SN);
125 		else
126 			MLX5_SET(ipsec_aso, aso_ctx, mode, MLX5_IPSEC_ASO_MODE);
127 		break;
128 	default:
129 		break;
130 	}
131 }
132 
133 static int mlx5_create_ipsec_obj(struct mlx5e_ipsec_sa_entry *sa_entry)
134 {
135 	struct mlx5_accel_esp_xfrm_attrs *attrs = &sa_entry->attrs;
136 	struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry);
137 	struct aes_gcm_keymat *aes_gcm = &attrs->aes_gcm;
138 	u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
139 	u32 in[MLX5_ST_SZ_DW(create_ipsec_obj_in)] = {};
140 	void *obj, *salt_p, *salt_iv_p;
141 	int err;
142 
143 	obj = MLX5_ADDR_OF(create_ipsec_obj_in, in, ipsec_object);
144 
145 	/* salt and seq_iv */
146 	salt_p = MLX5_ADDR_OF(ipsec_obj, obj, salt);
147 	memcpy(salt_p, &aes_gcm->salt, sizeof(aes_gcm->salt));
148 
149 	MLX5_SET(ipsec_obj, obj, icv_length, MLX5_IPSEC_OBJECT_ICV_LEN_16B);
150 	salt_iv_p = MLX5_ADDR_OF(ipsec_obj, obj, implicit_iv);
151 	memcpy(salt_iv_p, &aes_gcm->seq_iv, sizeof(aes_gcm->seq_iv));
152 
153 	/* esn */
154 	if (attrs->replay_esn.trigger) {
155 		MLX5_SET(ipsec_obj, obj, esn_en, 1);
156 		MLX5_SET(ipsec_obj, obj, esn_msb, attrs->replay_esn.esn_msb);
157 		MLX5_SET(ipsec_obj, obj, esn_overlap, attrs->replay_esn.overlap);
158 	}
159 
160 	/* enc./dec. key */
161 	MLX5_SET(ipsec_obj, obj, dekn, sa_entry->enc_key_id);
162 
163 	/* general object fields set */
164 	MLX5_SET(general_obj_in_cmd_hdr, in, opcode,
165 		 MLX5_CMD_OP_CREATE_GENERAL_OBJ);
166 	MLX5_SET(general_obj_in_cmd_hdr, in, obj_type,
167 		 MLX5_GENERAL_OBJECT_TYPES_IPSEC);
168 
169 	mlx5e_ipsec_packet_setup(obj, sa_entry->ipsec->pdn, attrs);
170 
171 	err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
172 	if (!err)
173 		sa_entry->ipsec_obj_id =
174 			MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
175 
176 	return err;
177 }
178 
179 static void mlx5_destroy_ipsec_obj(struct mlx5e_ipsec_sa_entry *sa_entry)
180 {
181 	struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry);
182 	u32 in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {};
183 	u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
184 
185 	MLX5_SET(general_obj_in_cmd_hdr, in, opcode,
186 		 MLX5_CMD_OP_DESTROY_GENERAL_OBJ);
187 	MLX5_SET(general_obj_in_cmd_hdr, in, obj_type,
188 		 MLX5_GENERAL_OBJECT_TYPES_IPSEC);
189 	MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, sa_entry->ipsec_obj_id);
190 
191 	mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
192 }
193 
194 int mlx5_ipsec_create_sa_ctx(struct mlx5e_ipsec_sa_entry *sa_entry)
195 {
196 	struct aes_gcm_keymat *aes_gcm = &sa_entry->attrs.aes_gcm;
197 	struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry);
198 	int err;
199 
200 	/* key */
201 	err = mlx5_encryption_key_create(mdev, sa_entry->ipsec->pdn,
202 					 MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_TYPE_IPSEC,
203 					 aes_gcm->aes_key,
204 					 aes_gcm->key_len,
205 					 &sa_entry->enc_key_id);
206 	if (err) {
207 		mlx5_core_dbg(mdev, "Failed to create encryption key (err = %d)\n", err);
208 		return err;
209 	}
210 
211 	err = mlx5_create_ipsec_obj(sa_entry);
212 	if (err) {
213 		mlx5_core_dbg(mdev, "Failed to create IPsec object (err = %d)\n", err);
214 		goto err_enc_key;
215 	}
216 
217 	return 0;
218 
219 err_enc_key:
220 	mlx5_encryption_key_destroy(mdev, sa_entry->enc_key_id);
221 	return err;
222 }
223 
224 void mlx5_ipsec_free_sa_ctx(struct mlx5e_ipsec_sa_entry *sa_entry)
225 {
226 	struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry);
227 
228 	mlx5_destroy_ipsec_obj(sa_entry);
229 	mlx5_encryption_key_destroy(mdev, sa_entry->enc_key_id);
230 }
231 
232 static void mlx5e_ipsec_aso_copy(struct mlx5_wqe_aso_ctrl_seg *ctrl,
233 				 struct mlx5_wqe_aso_ctrl_seg *data)
234 {
235 	if (!data)
236 		return;
237 
238 	ctrl->data_mask_mode = data->data_mask_mode;
239 	ctrl->condition_1_0_operand = data->condition_1_0_operand;
240 	ctrl->condition_1_0_offset = data->condition_1_0_offset;
241 	ctrl->data_offset_condition_operand = data->data_offset_condition_operand;
242 	ctrl->condition_0_data = data->condition_0_data;
243 	ctrl->condition_0_mask = data->condition_0_mask;
244 	ctrl->condition_1_data = data->condition_1_data;
245 	ctrl->condition_1_mask = data->condition_1_mask;
246 	ctrl->bitwise_data = data->bitwise_data;
247 	ctrl->data_mask = data->data_mask;
248 }
249 
250 static int mlx5e_ipsec_aso_query(struct mlx5e_ipsec_sa_entry *sa_entry,
251 				 struct mlx5_wqe_aso_ctrl_seg *data)
252 {
253 	struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
254 	struct mlx5e_ipsec_aso *aso = ipsec->aso;
255 	struct mlx5_wqe_aso_ctrl_seg *ctrl;
256 	struct mlx5_aso_wqe *wqe;
257 	unsigned long expires;
258 	u8 ds_cnt;
259 	int ret;
260 
261 	spin_lock_bh(&aso->lock);
262 	memset(aso->ctx, 0, sizeof(aso->ctx));
263 	wqe = mlx5_aso_get_wqe(aso->aso);
264 	ds_cnt = DIV_ROUND_UP(sizeof(*wqe), MLX5_SEND_WQE_DS);
265 	mlx5_aso_build_wqe(aso->aso, ds_cnt, wqe, sa_entry->ipsec_obj_id,
266 			   MLX5_ACCESS_ASO_OPC_MOD_IPSEC);
267 
268 	ctrl = &wqe->aso_ctrl;
269 	ctrl->va_l = cpu_to_be32(lower_32_bits(aso->dma_addr) | ASO_CTRL_READ_EN);
270 	ctrl->va_h = cpu_to_be32(upper_32_bits(aso->dma_addr));
271 	ctrl->l_key = cpu_to_be32(ipsec->mkey);
272 	mlx5e_ipsec_aso_copy(ctrl, data);
273 
274 	mlx5_aso_post_wqe(aso->aso, false, &wqe->ctrl);
275 	expires = jiffies + msecs_to_jiffies(10);
276 	do {
277 		ret = mlx5_aso_poll_cq(aso->aso, false);
278 		if (ret)
279 			/* We are in atomic context */
280 			udelay(10);
281 	} while (ret && time_is_after_jiffies(expires));
282 	spin_unlock_bh(&aso->lock);
283 
284 	return ret;
285 }
286 
287 #define MLX5E_IPSEC_ESN_SCOPE_MID 0x80000000L
288 
289 static int mlx5_modify_ipsec_obj(struct mlx5e_ipsec_sa_entry *sa_entry,
290                                  const struct mlx5_accel_esp_xfrm_attrs *attrs)
291 {
292         struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry);
293         u32 in[MLX5_ST_SZ_DW(modify_ipsec_obj_in)] = {};
294         u32 out[MLX5_ST_SZ_DW(query_ipsec_obj_out)];
295         u64 modify_field_select = 0;
296         u64 general_obj_types;
297         void *obj;
298         int err;
299 
300         general_obj_types = MLX5_CAP_GEN_64(mdev, general_obj_types);
301         if (!(general_obj_types & MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_IPSEC))
302                 return -EINVAL;
303 
304         /* general object fields set */
305         MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_QUERY_GENERAL_OBJ);
306         MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, MLX5_GENERAL_OBJECT_TYPES_IPSEC);
307         MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, sa_entry->ipsec_obj_id);
308         err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
309         if (err) {
310                 mlx5_core_err(mdev, "Query IPsec object failed (Object id %d), err = %d\n",
311                               sa_entry->ipsec_obj_id, err);
312                 return err;
313         }
314 
315         obj = MLX5_ADDR_OF(query_ipsec_obj_out, out, ipsec_object);
316         modify_field_select = MLX5_GET64(ipsec_obj, obj, modify_field_select);
317 
318         /* esn */
319         if (!(modify_field_select & MLX5_MODIFY_IPSEC_BITMASK_ESN_OVERLAP) ||
320             !(modify_field_select & MLX5_MODIFY_IPSEC_BITMASK_ESN_MSB))
321                 return -EOPNOTSUPP;
322 
323         obj = MLX5_ADDR_OF(modify_ipsec_obj_in, in, ipsec_object);
324         MLX5_SET64(ipsec_obj, obj, modify_field_select,
325                    MLX5_MODIFY_IPSEC_BITMASK_ESN_OVERLAP |
326                            MLX5_MODIFY_IPSEC_BITMASK_ESN_MSB);
327         MLX5_SET(ipsec_obj, obj, esn_msb, attrs->replay_esn.esn_msb);
328         MLX5_SET(ipsec_obj, obj, esn_overlap, attrs->replay_esn.overlap);
329 
330         /* general object fields set */
331         MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_MODIFY_GENERAL_OBJ);
332 
333         return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
334 }
335 
336 static void mlx5_accel_esp_modify_xfrm(struct mlx5e_ipsec_sa_entry *sa_entry,
337 				       const struct mlx5_accel_esp_xfrm_attrs *attrs)
338 {
339 	int err;
340 
341 	err = mlx5_modify_ipsec_obj(sa_entry, attrs);
342 	if (err)
343 		return;
344 
345 	memcpy(&sa_entry->attrs, attrs, sizeof(sa_entry->attrs));
346 }
347 
348 static void mlx5e_ipsec_aso_update(struct mlx5e_ipsec_sa_entry *sa_entry,
349 				   struct mlx5_wqe_aso_ctrl_seg *data)
350 {
351 	data->data_mask_mode = MLX5_ASO_DATA_MASK_MODE_BITWISE_64BIT << 6;
352 	data->condition_1_0_operand = MLX5_ASO_ALWAYS_TRUE | MLX5_ASO_ALWAYS_TRUE << 4;
353 
354 	mlx5e_ipsec_aso_query(sa_entry, data);
355 }
356 
357 #define MLX5_IPSEC_ASO_REMOVE_FLOW_PKT_CNT_OFFSET 0
358 
359 static void mlx5e_ipsec_update_esn_state(struct mlx5e_ipsec_sa_entry *sa_entry,
360 					 u32 mode_param)
361 {
362 	struct mlx5_accel_esp_xfrm_attrs attrs = {};
363 	struct mlx5_wqe_aso_ctrl_seg data = {};
364 
365 	if (mode_param < MLX5E_IPSEC_ESN_SCOPE_MID) {
366 		sa_entry->esn_state.esn_msb++;
367 		sa_entry->esn_state.overlap = 0;
368 	} else {
369 		sa_entry->esn_state.overlap = 1;
370 	}
371 
372 	mlx5e_ipsec_build_accel_xfrm_attrs(sa_entry, &attrs, sa_entry->attrs.dir);
373 
374 	mlx5_accel_esp_modify_xfrm(sa_entry, &attrs);
375 
376 	data.data_offset_condition_operand = MLX5_IPSEC_ASO_REMOVE_FLOW_PKT_CNT_OFFSET;
377 	data.bitwise_data = cpu_to_be64(BIT_ULL(54));
378 	data.data_mask = data.bitwise_data;
379 
380 	mlx5e_ipsec_aso_update(sa_entry, &data);
381 }
382 
383 static void mlx5e_ipsec_handle_event(struct work_struct *_work)
384 {
385 	struct mlx5e_ipsec_work *work =
386 		container_of(_work, struct mlx5e_ipsec_work, work);
387 	struct mlx5e_ipsec_sa_entry *sa_entry = work->data;
388 	struct mlx5_accel_esp_xfrm_attrs *attrs;
389 	struct mlx5e_ipsec_aso *aso;
390 	int ret;
391 
392 	aso = sa_entry->ipsec->aso;
393 	attrs = &sa_entry->attrs;
394 
395 	/* TODO: Kostia, this event should be locked/protected
396 	 * from concurent SA delete.
397 	 */
398 	ret = mlx5e_ipsec_aso_query(sa_entry, NULL);
399 	if (ret)
400 		goto unlock;
401 
402 	if (attrs->replay_esn.trigger &&
403 	    !MLX5_GET(ipsec_aso, aso->ctx, esn_event_arm)) {
404 		u32 mode_param = MLX5_GET(ipsec_aso, aso->ctx, mode_parameter);
405 
406 	        mlx5e_ipsec_update_esn_state(sa_entry, mode_param);
407 	}
408 
409 unlock:
410 	kfree(work);
411 }
412 
413 void mlx5_object_change_event(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe)
414 {
415 	struct mlx5e_ipsec_sa_entry *sa_entry;
416 	struct mlx5_eqe_obj_change *object;
417 	struct mlx5e_ipsec_work *work;
418 	u16 type;
419 
420 	object = &eqe->data.obj_change;
421 	type = be16_to_cpu(object->obj_type);
422 
423 	if (type != MLX5_GENERAL_OBJECT_TYPES_IPSEC)
424 		return;
425 
426 	sa_entry = xa_load(&dev->ipsec_sadb, be32_to_cpu(object->obj_id));
427 	if (!sa_entry)
428 		return;
429 
430 	work = kmalloc(sizeof(*work), GFP_ATOMIC);
431 	if (!work)
432 		return;
433 
434 	INIT_WORK(&work->work, mlx5e_ipsec_handle_event);
435 	work->data = sa_entry;
436 
437 	queue_work(sa_entry->ipsec->wq, &work->work);
438 }
439 
440 int mlx5e_ipsec_aso_init(struct mlx5e_ipsec *ipsec)
441 {
442 	struct mlx5_core_dev *mdev = ipsec->mdev;
443 	struct mlx5e_ipsec_aso *aso;
444 	struct device *pdev;
445 	int err;
446 
447 	aso = kzalloc(sizeof(*ipsec->aso), GFP_KERNEL);
448 	if (!aso)
449 		return -ENOMEM;
450 
451 	pdev = &mdev->pdev->dev;
452 	aso->dma_addr = dma_map_single(pdev, aso->ctx, sizeof(aso->ctx), DMA_BIDIRECTIONAL);
453 	err = dma_mapping_error(pdev, aso->dma_addr);
454 	if (err)
455 		goto err_dma;
456 
457 	aso->aso = mlx5_aso_create(mdev, ipsec->pdn);
458 	if (IS_ERR(aso->aso)) {
459 		err = PTR_ERR(aso->aso);
460 		goto err_aso_create;
461 	}
462 
463 	spin_lock_init(&aso->lock);
464 	ipsec->aso = aso;
465 	return 0;
466 
467 err_aso_create:
468 	dma_unmap_single(pdev, aso->dma_addr, sizeof(aso->ctx), DMA_BIDIRECTIONAL);
469 err_dma:
470 	kfree(aso);
471 	return err;
472 }
473 
474 void mlx5e_ipsec_aso_cleanup(struct mlx5e_ipsec *ipsec)
475 {
476 	struct mlx5_core_dev *mdev = ipsec->mdev;
477 	struct mlx5e_ipsec_aso *aso;
478 	struct device *pdev;
479 
480 	aso = ipsec->aso;
481 	pdev = &mdev->pdev->dev;
482 
483 	mlx5_aso_destroy(aso->aso);
484 	dma_unmap_single(pdev, aso->dma_addr, sizeof(aso->ctx), DMA_BIDIRECTIONAL);
485 	kfree(aso);
486 }
487