1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2019 Mellanox Technologies. */
3
4 #include <linux/mlx5/eswitch.h>
5 #include <linux/err.h>
6 #include "dr_types.h"
7
8 #define DR_DOMAIN_SW_STEERING_SUPPORTED(dmn, dmn_type) \
9 ((dmn)->info.caps.dmn_type##_sw_owner || \
10 ((dmn)->info.caps.dmn_type##_sw_owner_v2 && \
11 (dmn)->info.caps.sw_format_ver <= MLX5_STEERING_FORMAT_CONNECTX_7))
12
mlx5dr_domain_is_support_ptrn_arg(struct mlx5dr_domain * dmn)13 bool mlx5dr_domain_is_support_ptrn_arg(struct mlx5dr_domain *dmn)
14 {
15 return dmn->info.caps.sw_format_ver >= MLX5_STEERING_FORMAT_CONNECTX_6DX &&
16 dmn->info.caps.support_modify_argument;
17 }
18
dr_domain_init_modify_header_resources(struct mlx5dr_domain * dmn)19 static int dr_domain_init_modify_header_resources(struct mlx5dr_domain *dmn)
20 {
21 if (!mlx5dr_domain_is_support_ptrn_arg(dmn))
22 return 0;
23
24 dmn->ptrn_mgr = mlx5dr_ptrn_mgr_create(dmn);
25 if (!dmn->ptrn_mgr) {
26 mlx5dr_err(dmn, "Couldn't create ptrn_mgr\n");
27 return -ENOMEM;
28 }
29
30 /* create argument pool */
31 dmn->arg_mgr = mlx5dr_arg_mgr_create(dmn);
32 if (!dmn->arg_mgr) {
33 mlx5dr_err(dmn, "Couldn't create arg_mgr\n");
34 goto free_modify_header_pattern;
35 }
36
37 return 0;
38
39 free_modify_header_pattern:
40 mlx5dr_ptrn_mgr_destroy(dmn->ptrn_mgr);
41 return -ENOMEM;
42 }
43
dr_domain_destroy_modify_header_resources(struct mlx5dr_domain * dmn)44 static void dr_domain_destroy_modify_header_resources(struct mlx5dr_domain *dmn)
45 {
46 if (!mlx5dr_domain_is_support_ptrn_arg(dmn))
47 return;
48
49 mlx5dr_arg_mgr_destroy(dmn->arg_mgr);
50 mlx5dr_ptrn_mgr_destroy(dmn->ptrn_mgr);
51 }
52
dr_domain_init_csum_recalc_fts(struct mlx5dr_domain * dmn)53 static void dr_domain_init_csum_recalc_fts(struct mlx5dr_domain *dmn)
54 {
55 /* Per vport cached FW FT for checksum recalculation, this
56 * recalculation is needed due to a HW bug in STEv0.
57 */
58 xa_init(&dmn->csum_fts_xa);
59 }
60
dr_domain_uninit_csum_recalc_fts(struct mlx5dr_domain * dmn)61 static void dr_domain_uninit_csum_recalc_fts(struct mlx5dr_domain *dmn)
62 {
63 struct mlx5dr_fw_recalc_cs_ft *recalc_cs_ft;
64 unsigned long i;
65
66 xa_for_each(&dmn->csum_fts_xa, i, recalc_cs_ft) {
67 if (recalc_cs_ft)
68 mlx5dr_fw_destroy_recalc_cs_ft(dmn, recalc_cs_ft);
69 }
70
71 xa_destroy(&dmn->csum_fts_xa);
72 }
73
mlx5dr_domain_get_recalc_cs_ft_addr(struct mlx5dr_domain * dmn,u16 vport_num,u64 * rx_icm_addr)74 int mlx5dr_domain_get_recalc_cs_ft_addr(struct mlx5dr_domain *dmn,
75 u16 vport_num,
76 u64 *rx_icm_addr)
77 {
78 struct mlx5dr_fw_recalc_cs_ft *recalc_cs_ft;
79 int ret;
80
81 recalc_cs_ft = xa_load(&dmn->csum_fts_xa, vport_num);
82 if (!recalc_cs_ft) {
83 /* Table hasn't been created yet */
84 recalc_cs_ft = mlx5dr_fw_create_recalc_cs_ft(dmn, vport_num);
85 if (!recalc_cs_ft)
86 return -EINVAL;
87
88 ret = xa_err(xa_store(&dmn->csum_fts_xa, vport_num,
89 recalc_cs_ft, GFP_KERNEL));
90 if (ret)
91 return ret;
92 }
93
94 *rx_icm_addr = recalc_cs_ft->rx_icm_addr;
95
96 return 0;
97 }
98
dr_domain_init_mem_resources(struct mlx5dr_domain * dmn)99 static int dr_domain_init_mem_resources(struct mlx5dr_domain *dmn)
100 {
101 int ret;
102
103 dmn->chunks_kmem_cache = kmem_cache_create("mlx5_dr_chunks",
104 sizeof(struct mlx5dr_icm_chunk), 0,
105 SLAB_HWCACHE_ALIGN, NULL);
106 if (!dmn->chunks_kmem_cache) {
107 mlx5dr_err(dmn, "Couldn't create chunks kmem_cache\n");
108 return -ENOMEM;
109 }
110
111 dmn->htbls_kmem_cache = kmem_cache_create("mlx5_dr_htbls",
112 sizeof(struct mlx5dr_ste_htbl), 0,
113 SLAB_HWCACHE_ALIGN, NULL);
114 if (!dmn->htbls_kmem_cache) {
115 mlx5dr_err(dmn, "Couldn't create hash tables kmem_cache\n");
116 ret = -ENOMEM;
117 goto free_chunks_kmem_cache;
118 }
119
120 dmn->ste_icm_pool = mlx5dr_icm_pool_create(dmn, DR_ICM_TYPE_STE);
121 if (!dmn->ste_icm_pool) {
122 mlx5dr_err(dmn, "Couldn't get icm memory\n");
123 ret = -ENOMEM;
124 goto free_htbls_kmem_cache;
125 }
126
127 dmn->action_icm_pool = mlx5dr_icm_pool_create(dmn, DR_ICM_TYPE_MODIFY_ACTION);
128 if (!dmn->action_icm_pool) {
129 mlx5dr_err(dmn, "Couldn't get action icm memory\n");
130 ret = -ENOMEM;
131 goto free_ste_icm_pool;
132 }
133
134 ret = mlx5dr_send_info_pool_create(dmn);
135 if (ret) {
136 mlx5dr_err(dmn, "Couldn't create send info pool\n");
137 goto free_action_icm_pool;
138 }
139
140 return 0;
141
142 free_action_icm_pool:
143 mlx5dr_icm_pool_destroy(dmn->action_icm_pool);
144 free_ste_icm_pool:
145 mlx5dr_icm_pool_destroy(dmn->ste_icm_pool);
146 free_htbls_kmem_cache:
147 kmem_cache_destroy(dmn->htbls_kmem_cache);
148 free_chunks_kmem_cache:
149 kmem_cache_destroy(dmn->chunks_kmem_cache);
150
151 return ret;
152 }
153
dr_domain_uninit_mem_resources(struct mlx5dr_domain * dmn)154 static void dr_domain_uninit_mem_resources(struct mlx5dr_domain *dmn)
155 {
156 mlx5dr_send_info_pool_destroy(dmn);
157 mlx5dr_icm_pool_destroy(dmn->action_icm_pool);
158 mlx5dr_icm_pool_destroy(dmn->ste_icm_pool);
159 kmem_cache_destroy(dmn->htbls_kmem_cache);
160 kmem_cache_destroy(dmn->chunks_kmem_cache);
161 }
162
dr_domain_init_resources(struct mlx5dr_domain * dmn)163 static int dr_domain_init_resources(struct mlx5dr_domain *dmn)
164 {
165 int ret;
166
167 dmn->ste_ctx = mlx5dr_ste_get_ctx(dmn->info.caps.sw_format_ver);
168 if (!dmn->ste_ctx) {
169 mlx5dr_err(dmn, "SW Steering on this device is unsupported\n");
170 return -EOPNOTSUPP;
171 }
172
173 ret = mlx5_core_alloc_pd(dmn->mdev, &dmn->pdn);
174 if (ret) {
175 mlx5dr_err(dmn, "Couldn't allocate PD, ret: %d", ret);
176 return ret;
177 }
178
179 dmn->uar = mlx5_get_uars_page(dmn->mdev);
180 if (IS_ERR(dmn->uar)) {
181 mlx5dr_err(dmn, "Couldn't allocate UAR\n");
182 ret = PTR_ERR(dmn->uar);
183 goto clean_pd;
184 }
185
186 ret = dr_domain_init_mem_resources(dmn);
187 if (ret) {
188 mlx5dr_err(dmn, "Couldn't create domain memory resources\n");
189 goto clean_uar;
190 }
191
192 ret = dr_domain_init_modify_header_resources(dmn);
193 if (ret) {
194 mlx5dr_err(dmn, "Couldn't create modify-header-resources\n");
195 goto clean_mem_resources;
196 }
197
198 ret = mlx5dr_send_ring_alloc(dmn);
199 if (ret) {
200 mlx5dr_err(dmn, "Couldn't create send-ring\n");
201 goto clean_modify_hdr;
202 }
203
204 return 0;
205
206 clean_modify_hdr:
207 dr_domain_destroy_modify_header_resources(dmn);
208 clean_mem_resources:
209 dr_domain_uninit_mem_resources(dmn);
210 clean_uar:
211 mlx5_put_uars_page(dmn->mdev, dmn->uar);
212 clean_pd:
213 mlx5_core_dealloc_pd(dmn->mdev, dmn->pdn);
214
215 return ret;
216 }
217
dr_domain_uninit_resources(struct mlx5dr_domain * dmn)218 static void dr_domain_uninit_resources(struct mlx5dr_domain *dmn)
219 {
220 mlx5dr_send_ring_free(dmn, dmn->send_ring);
221 dr_domain_destroy_modify_header_resources(dmn);
222 dr_domain_uninit_mem_resources(dmn);
223 mlx5_put_uars_page(dmn->mdev, dmn->uar);
224 mlx5_core_dealloc_pd(dmn->mdev, dmn->pdn);
225 }
226
dr_domain_fill_uplink_caps(struct mlx5dr_domain * dmn,struct mlx5dr_cmd_vport_cap * uplink_vport)227 static void dr_domain_fill_uplink_caps(struct mlx5dr_domain *dmn,
228 struct mlx5dr_cmd_vport_cap *uplink_vport)
229 {
230 struct mlx5dr_esw_caps *esw_caps = &dmn->info.caps.esw_caps;
231
232 uplink_vport->num = MLX5_VPORT_UPLINK;
233 uplink_vport->icm_address_rx = esw_caps->uplink_icm_address_rx;
234 uplink_vport->icm_address_tx = esw_caps->uplink_icm_address_tx;
235 uplink_vport->vport_gvmi = 0;
236 uplink_vport->vhca_gvmi = dmn->info.caps.gvmi;
237 }
238
dr_domain_query_vport(struct mlx5dr_domain * dmn,u16 vport_number,bool other_vport,struct mlx5dr_cmd_vport_cap * vport_caps)239 static int dr_domain_query_vport(struct mlx5dr_domain *dmn,
240 u16 vport_number,
241 bool other_vport,
242 struct mlx5dr_cmd_vport_cap *vport_caps)
243 {
244 int ret;
245
246 ret = mlx5dr_cmd_query_esw_vport_context(dmn->mdev,
247 other_vport,
248 vport_number,
249 &vport_caps->icm_address_rx,
250 &vport_caps->icm_address_tx);
251 if (ret)
252 return ret;
253
254 ret = mlx5dr_cmd_query_gvmi(dmn->mdev,
255 other_vport,
256 vport_number,
257 &vport_caps->vport_gvmi);
258 if (ret)
259 return ret;
260
261 vport_caps->num = vport_number;
262 vport_caps->vhca_gvmi = dmn->info.caps.gvmi;
263
264 return 0;
265 }
266
dr_domain_query_esw_mgr(struct mlx5dr_domain * dmn)267 static int dr_domain_query_esw_mgr(struct mlx5dr_domain *dmn)
268 {
269 return dr_domain_query_vport(dmn, 0, false,
270 &dmn->info.caps.vports.esw_manager_caps);
271 }
272
dr_domain_query_uplink(struct mlx5dr_domain * dmn)273 static void dr_domain_query_uplink(struct mlx5dr_domain *dmn)
274 {
275 dr_domain_fill_uplink_caps(dmn, &dmn->info.caps.vports.uplink_caps);
276 }
277
278 static struct mlx5dr_cmd_vport_cap *
dr_domain_add_vport_cap(struct mlx5dr_domain * dmn,u16 vport)279 dr_domain_add_vport_cap(struct mlx5dr_domain *dmn, u16 vport)
280 {
281 struct mlx5dr_cmd_caps *caps = &dmn->info.caps;
282 struct mlx5dr_cmd_vport_cap *vport_caps;
283 int ret;
284
285 vport_caps = kvzalloc(sizeof(*vport_caps), GFP_KERNEL);
286 if (!vport_caps)
287 return NULL;
288
289 ret = dr_domain_query_vport(dmn, vport, true, vport_caps);
290 if (ret) {
291 kvfree(vport_caps);
292 return NULL;
293 }
294
295 ret = xa_insert(&caps->vports.vports_caps_xa, vport,
296 vport_caps, GFP_KERNEL);
297 if (ret) {
298 mlx5dr_dbg(dmn, "Couldn't insert new vport into xarray (%d)\n", ret);
299 kvfree(vport_caps);
300 if (ret == -EBUSY)
301 return ERR_PTR(-EBUSY);
302 return NULL;
303 }
304
305 return vport_caps;
306 }
307
dr_domain_is_esw_mgr_vport(struct mlx5dr_domain * dmn,u16 vport)308 static bool dr_domain_is_esw_mgr_vport(struct mlx5dr_domain *dmn, u16 vport)
309 {
310 struct mlx5dr_cmd_caps *caps = &dmn->info.caps;
311
312 return (caps->is_ecpf && vport == MLX5_VPORT_ECPF) ||
313 (!caps->is_ecpf && vport == 0);
314 }
315
316 struct mlx5dr_cmd_vport_cap *
mlx5dr_domain_get_vport_cap(struct mlx5dr_domain * dmn,u16 vport)317 mlx5dr_domain_get_vport_cap(struct mlx5dr_domain *dmn, u16 vport)
318 {
319 struct mlx5dr_cmd_caps *caps = &dmn->info.caps;
320 struct mlx5dr_cmd_vport_cap *vport_caps;
321
322 if (dr_domain_is_esw_mgr_vport(dmn, vport))
323 return &caps->vports.esw_manager_caps;
324
325 if (vport == MLX5_VPORT_UPLINK)
326 return &caps->vports.uplink_caps;
327
328 vport_load:
329 vport_caps = xa_load(&caps->vports.vports_caps_xa, vport);
330 if (vport_caps)
331 return vport_caps;
332
333 vport_caps = dr_domain_add_vport_cap(dmn, vport);
334 if (PTR_ERR(vport_caps) == -EBUSY)
335 /* caps were already stored by another thread */
336 goto vport_load;
337
338 return vport_caps;
339 }
340
dr_domain_clear_vports(struct mlx5dr_domain * dmn)341 static void dr_domain_clear_vports(struct mlx5dr_domain *dmn)
342 {
343 struct mlx5dr_cmd_vport_cap *vport_caps;
344 unsigned long i;
345
346 xa_for_each(&dmn->info.caps.vports.vports_caps_xa, i, vport_caps) {
347 vport_caps = xa_erase(&dmn->info.caps.vports.vports_caps_xa, i);
348 kvfree(vport_caps);
349 }
350 }
351
dr_domain_query_fdb_caps(struct mlx5_core_dev * mdev,struct mlx5dr_domain * dmn)352 static int dr_domain_query_fdb_caps(struct mlx5_core_dev *mdev,
353 struct mlx5dr_domain *dmn)
354 {
355 int ret;
356
357 if (!dmn->info.caps.eswitch_manager)
358 return -EOPNOTSUPP;
359
360 ret = mlx5dr_cmd_query_esw_caps(mdev, &dmn->info.caps.esw_caps);
361 if (ret)
362 return ret;
363
364 dmn->info.caps.fdb_sw_owner = dmn->info.caps.esw_caps.sw_owner;
365 dmn->info.caps.fdb_sw_owner_v2 = dmn->info.caps.esw_caps.sw_owner_v2;
366 dmn->info.caps.esw_rx_drop_address = dmn->info.caps.esw_caps.drop_icm_address_rx;
367 dmn->info.caps.esw_tx_drop_address = dmn->info.caps.esw_caps.drop_icm_address_tx;
368
369 xa_init(&dmn->info.caps.vports.vports_caps_xa);
370
371 /* Query eswitch manager and uplink vports only. Rest of the
372 * vports (vport 0, VFs and SFs) will be queried dynamically.
373 */
374
375 ret = dr_domain_query_esw_mgr(dmn);
376 if (ret) {
377 mlx5dr_err(dmn, "Failed to query eswitch manager vport caps (err: %d)", ret);
378 goto free_vports_caps_xa;
379 }
380
381 dr_domain_query_uplink(dmn);
382
383 return 0;
384
385 free_vports_caps_xa:
386 xa_destroy(&dmn->info.caps.vports.vports_caps_xa);
387
388 return ret;
389 }
390
dr_domain_caps_init(struct mlx5_core_dev * mdev,struct mlx5dr_domain * dmn)391 static int dr_domain_caps_init(struct mlx5_core_dev *mdev,
392 struct mlx5dr_domain *dmn)
393 {
394 struct mlx5dr_cmd_vport_cap *vport_cap;
395 int ret;
396
397 if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH) {
398 mlx5dr_err(dmn, "Failed to allocate domain, bad link type\n");
399 return -EOPNOTSUPP;
400 }
401
402 ret = mlx5dr_cmd_query_device(mdev, &dmn->info.caps);
403 if (ret)
404 return ret;
405
406 ret = dr_domain_query_fdb_caps(mdev, dmn);
407 if (ret)
408 return ret;
409
410 switch (dmn->type) {
411 case MLX5DR_DOMAIN_TYPE_NIC_RX:
412 if (!DR_DOMAIN_SW_STEERING_SUPPORTED(dmn, rx))
413 return -ENOTSUPP;
414
415 dmn->info.supp_sw_steering = true;
416 dmn->info.rx.type = DR_DOMAIN_NIC_TYPE_RX;
417 dmn->info.rx.default_icm_addr = dmn->info.caps.nic_rx_drop_address;
418 dmn->info.rx.drop_icm_addr = dmn->info.caps.nic_rx_drop_address;
419 break;
420 case MLX5DR_DOMAIN_TYPE_NIC_TX:
421 if (!DR_DOMAIN_SW_STEERING_SUPPORTED(dmn, tx))
422 return -ENOTSUPP;
423
424 dmn->info.supp_sw_steering = true;
425 dmn->info.tx.type = DR_DOMAIN_NIC_TYPE_TX;
426 dmn->info.tx.default_icm_addr = dmn->info.caps.nic_tx_allow_address;
427 dmn->info.tx.drop_icm_addr = dmn->info.caps.nic_tx_drop_address;
428 break;
429 case MLX5DR_DOMAIN_TYPE_FDB:
430 if (!dmn->info.caps.eswitch_manager)
431 return -ENOTSUPP;
432
433 if (!DR_DOMAIN_SW_STEERING_SUPPORTED(dmn, fdb))
434 return -ENOTSUPP;
435
436 dmn->info.rx.type = DR_DOMAIN_NIC_TYPE_RX;
437 dmn->info.tx.type = DR_DOMAIN_NIC_TYPE_TX;
438 vport_cap = &dmn->info.caps.vports.esw_manager_caps;
439
440 dmn->info.supp_sw_steering = true;
441 dmn->info.tx.default_icm_addr = vport_cap->icm_address_tx;
442 dmn->info.rx.default_icm_addr = vport_cap->icm_address_rx;
443 dmn->info.rx.drop_icm_addr = dmn->info.caps.esw_rx_drop_address;
444 dmn->info.tx.drop_icm_addr = dmn->info.caps.esw_tx_drop_address;
445 break;
446 default:
447 mlx5dr_err(dmn, "Invalid domain\n");
448 ret = -EINVAL;
449 break;
450 }
451
452 return ret;
453 }
454
dr_domain_caps_uninit(struct mlx5dr_domain * dmn)455 static void dr_domain_caps_uninit(struct mlx5dr_domain *dmn)
456 {
457 dr_domain_clear_vports(dmn);
458 xa_destroy(&dmn->info.caps.vports.vports_caps_xa);
459 }
460
461 struct mlx5dr_domain *
mlx5dr_domain_create(struct mlx5_core_dev * mdev,enum mlx5dr_domain_type type)462 mlx5dr_domain_create(struct mlx5_core_dev *mdev, enum mlx5dr_domain_type type)
463 {
464 struct mlx5dr_domain *dmn;
465 int ret;
466
467 if (type > MLX5DR_DOMAIN_TYPE_FDB)
468 return NULL;
469
470 dmn = kzalloc(sizeof(*dmn), GFP_KERNEL);
471 if (!dmn)
472 return NULL;
473
474 dmn->mdev = mdev;
475 dmn->type = type;
476 refcount_set(&dmn->refcount, 1);
477 mutex_init(&dmn->info.rx.mutex);
478 mutex_init(&dmn->info.tx.mutex);
479 xa_init(&dmn->definers_xa);
480 xa_init(&dmn->peer_dmn_xa);
481
482 if (dr_domain_caps_init(mdev, dmn)) {
483 mlx5dr_err(dmn, "Failed init domain, no caps\n");
484 goto def_xa_destroy;
485 }
486
487 dmn->info.max_log_action_icm_sz = DR_CHUNK_SIZE_4K;
488 dmn->info.max_log_sw_icm_sz = min_t(u32, DR_CHUNK_SIZE_1024K,
489 dmn->info.caps.log_icm_size);
490 dmn->info.max_log_modify_hdr_pattern_icm_sz =
491 min_t(u32, DR_CHUNK_SIZE_4K,
492 dmn->info.caps.log_modify_pattern_icm_size);
493
494 if (!dmn->info.supp_sw_steering) {
495 mlx5dr_err(dmn, "SW steering is not supported\n");
496 goto uninit_caps;
497 }
498
499 /* Allocate resources */
500 ret = dr_domain_init_resources(dmn);
501 if (ret) {
502 mlx5dr_err(dmn, "Failed init domain resources\n");
503 goto uninit_caps;
504 }
505
506 dr_domain_init_csum_recalc_fts(dmn);
507 mlx5dr_dbg_init_dump(dmn);
508 return dmn;
509
510 uninit_caps:
511 dr_domain_caps_uninit(dmn);
512 def_xa_destroy:
513 xa_destroy(&dmn->peer_dmn_xa);
514 xa_destroy(&dmn->definers_xa);
515 kfree(dmn);
516 return NULL;
517 }
518
519 /* Assure synchronization of the device steering tables with updates made by SW
520 * insertion.
521 */
mlx5dr_domain_sync(struct mlx5dr_domain * dmn,u32 flags)522 int mlx5dr_domain_sync(struct mlx5dr_domain *dmn, u32 flags)
523 {
524 int ret = 0;
525
526 if (flags & MLX5DR_DOMAIN_SYNC_FLAGS_SW) {
527 mlx5dr_domain_lock(dmn);
528 ret = mlx5dr_send_ring_force_drain(dmn);
529 mlx5dr_domain_unlock(dmn);
530 if (ret) {
531 mlx5dr_err(dmn, "Force drain failed flags: %d, ret: %d\n",
532 flags, ret);
533 return ret;
534 }
535 }
536
537 if (flags & MLX5DR_DOMAIN_SYNC_FLAGS_HW)
538 ret = mlx5dr_cmd_sync_steering(dmn->mdev);
539
540 return ret;
541 }
542
mlx5dr_domain_destroy(struct mlx5dr_domain * dmn)543 int mlx5dr_domain_destroy(struct mlx5dr_domain *dmn)
544 {
545 if (WARN_ON_ONCE(refcount_read(&dmn->refcount) > 1))
546 return -EBUSY;
547
548 /* make sure resources are not used by the hardware */
549 mlx5dr_cmd_sync_steering(dmn->mdev);
550 mlx5dr_dbg_uninit_dump(dmn);
551 dr_domain_uninit_csum_recalc_fts(dmn);
552 dr_domain_uninit_resources(dmn);
553 dr_domain_caps_uninit(dmn);
554 xa_destroy(&dmn->peer_dmn_xa);
555 xa_destroy(&dmn->definers_xa);
556 mutex_destroy(&dmn->info.tx.mutex);
557 mutex_destroy(&dmn->info.rx.mutex);
558 kfree(dmn);
559 return 0;
560 }
561
mlx5dr_domain_set_peer(struct mlx5dr_domain * dmn,struct mlx5dr_domain * peer_dmn,u16 peer_vhca_id)562 void mlx5dr_domain_set_peer(struct mlx5dr_domain *dmn,
563 struct mlx5dr_domain *peer_dmn,
564 u16 peer_vhca_id)
565 {
566 struct mlx5dr_domain *peer;
567
568 mlx5dr_domain_lock(dmn);
569
570 peer = xa_load(&dmn->peer_dmn_xa, peer_vhca_id);
571 if (peer)
572 refcount_dec(&peer->refcount);
573
574 WARN_ON(xa_err(xa_store(&dmn->peer_dmn_xa, peer_vhca_id, peer_dmn, GFP_KERNEL)));
575
576 peer = xa_load(&dmn->peer_dmn_xa, peer_vhca_id);
577 if (peer)
578 refcount_inc(&peer->refcount);
579
580 mlx5dr_domain_unlock(dmn);
581 }
582