main.c (3daee2e4b3568f0ed88b0598df96547fcf21cb9b) main.c (6910e3660d86c1a5654f742a40181d2c9154f26f)
1// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2/*
3 * Copyright (c) 2013-2020, Mellanox Technologies inc. All rights reserved.
4 * Copyright (c) 2020, Intel Corporation. All rights reserved.
5 */
6
7#include <linux/debugfs.h>
8#include <linux/highmem.h>

--- 34 unchanged lines hidden (view full) ---

43#include "restrack.h"
44#include "counters.h"
45#include "umr.h"
46#include <rdma/uverbs_std_types.h>
47#include <rdma/uverbs_ioctl.h>
48#include <rdma/mlx5_user_ioctl_verbs.h>
49#include <rdma/mlx5_user_ioctl_cmds.h>
50#include "macsec.h"
1// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2/*
3 * Copyright (c) 2013-2020, Mellanox Technologies inc. All rights reserved.
4 * Copyright (c) 2020, Intel Corporation. All rights reserved.
5 */
6
7#include <linux/debugfs.h>
8#include <linux/highmem.h>

--- 34 unchanged lines hidden (view full) ---

43#include "restrack.h"
44#include "counters.h"
45#include "umr.h"
46#include <rdma/uverbs_std_types.h>
47#include <rdma/uverbs_ioctl.h>
48#include <rdma/mlx5_user_ioctl_verbs.h>
49#include <rdma/mlx5_user_ioctl_cmds.h>
50#include "macsec.h"
51#include "data_direct.h"
51
52#define UVERBS_MODULE_NAME mlx5_ib
53#include <rdma/uverbs_named_ioctl.h>
54
55MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>");
56MODULE_DESCRIPTION("Mellanox 5th generation network adapters (ConnectX series) IB driver");
57MODULE_LICENSE("Dual BSD/GPL");
58

--- 218 unchanged lines hidden (view full) ---

277 u32 *native_port_num)
278{
279 enum rdma_link_layer ll = mlx5_ib_port_link_layer(&ibdev->ib_dev,
280 ib_port_num);
281 struct mlx5_core_dev *mdev = NULL;
282 struct mlx5_ib_multiport_info *mpi;
283 struct mlx5_ib_port *port;
284
52
53#define UVERBS_MODULE_NAME mlx5_ib
54#include <rdma/uverbs_named_ioctl.h>
55
56MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>");
57MODULE_DESCRIPTION("Mellanox 5th generation network adapters (ConnectX series) IB driver");
58MODULE_LICENSE("Dual BSD/GPL");
59

--- 218 unchanged lines hidden (view full) ---

278 u32 *native_port_num)
279{
280 enum rdma_link_layer ll = mlx5_ib_port_link_layer(&ibdev->ib_dev,
281 ib_port_num);
282 struct mlx5_core_dev *mdev = NULL;
283 struct mlx5_ib_multiport_info *mpi;
284 struct mlx5_ib_port *port;
285
286 if (ibdev->ib_dev.type == RDMA_DEVICE_TYPE_SMI) {
287 if (native_port_num)
288 *native_port_num = smi_to_native_portnum(ibdev,
289 ib_port_num);
290 return ibdev->mdev;
291
292 }
293
285 if (!mlx5_core_mp_enabled(ibdev->mdev) ||
286 ll != IB_LINK_LAYER_ETHERNET) {
287 if (native_port_num)
288 *native_port_num = ib_port_num;
289 return ibdev->mdev;
290 }
291
292 if (native_port_num)

--- 205 unchanged lines hidden (view full) ---

498 }
499
500 /* Possible bad flows are checked before filling out props so in case
501 * of an error it will still be zeroed out.
502 * Use native port in case of reps
503 */
504 if (dev->is_rep)
505 err = mlx5_query_port_ptys(mdev, out, sizeof(out), MLX5_PTYS_EN,
294 if (!mlx5_core_mp_enabled(ibdev->mdev) ||
295 ll != IB_LINK_LAYER_ETHERNET) {
296 if (native_port_num)
297 *native_port_num = ib_port_num;
298 return ibdev->mdev;
299 }
300
301 if (native_port_num)

--- 205 unchanged lines hidden (view full) ---

507 }
508
509 /* Possible bad flows are checked before filling out props so in case
510 * of an error it will still be zeroed out.
511 * Use native port in case of reps
512 */
513 if (dev->is_rep)
514 err = mlx5_query_port_ptys(mdev, out, sizeof(out), MLX5_PTYS_EN,
506 1);
515 1, 0);
507 else
508 err = mlx5_query_port_ptys(mdev, out, sizeof(out), MLX5_PTYS_EN,
516 else
517 err = mlx5_query_port_ptys(mdev, out, sizeof(out), MLX5_PTYS_EN,
509 mdev_port_num);
518 mdev_port_num, 0);
510 if (err)
511 goto out;
512 ext = !!MLX5_GET_ETH_PROTO(ptys_reg, out, true, eth_proto_capability);
513 eth_prot_oper = MLX5_GET_ETH_PROTO(ptys_reg, out, ext, eth_proto_oper);
514
515 props->active_width = IB_WIDTH_4X;
516 props->active_speed = IB_SPEED_QDR;
517

--- 810 unchanged lines hidden (view full) ---

1328}
1329
1330static int mlx5_query_hca_port(struct ib_device *ibdev, u32 port,
1331 struct ib_port_attr *props)
1332{
1333 struct mlx5_ib_dev *dev = to_mdev(ibdev);
1334 struct mlx5_core_dev *mdev = dev->mdev;
1335 struct mlx5_hca_vport_context *rep;
519 if (err)
520 goto out;
521 ext = !!MLX5_GET_ETH_PROTO(ptys_reg, out, true, eth_proto_capability);
522 eth_prot_oper = MLX5_GET_ETH_PROTO(ptys_reg, out, ext, eth_proto_oper);
523
524 props->active_width = IB_WIDTH_4X;
525 props->active_speed = IB_SPEED_QDR;
526

--- 810 unchanged lines hidden (view full) ---

1337}
1338
1339static int mlx5_query_hca_port(struct ib_device *ibdev, u32 port,
1340 struct ib_port_attr *props)
1341{
1342 struct mlx5_ib_dev *dev = to_mdev(ibdev);
1343 struct mlx5_core_dev *mdev = dev->mdev;
1344 struct mlx5_hca_vport_context *rep;
1345 u8 vl_hw_cap, plane_index = 0;
1336 u16 max_mtu;
1337 u16 oper_mtu;
1338 int err;
1339 u16 ib_link_width_oper;
1346 u16 max_mtu;
1347 u16 oper_mtu;
1348 int err;
1349 u16 ib_link_width_oper;
1340 u8 vl_hw_cap;
1341
1342 rep = kzalloc(sizeof(*rep), GFP_KERNEL);
1343 if (!rep) {
1344 err = -ENOMEM;
1345 goto out;
1346 }
1347
1348 /* props being zeroed by the caller, avoid zeroing it here */
1349
1350
1351 rep = kzalloc(sizeof(*rep), GFP_KERNEL);
1352 if (!rep) {
1353 err = -ENOMEM;
1354 goto out;
1355 }
1356
1357 /* props being zeroed by the caller, avoid zeroing it here */
1358
1359 if (ibdev->type == RDMA_DEVICE_TYPE_SMI) {
1360 plane_index = port;
1361 port = smi_to_native_portnum(dev, port);
1362 }
1363
1350 err = mlx5_query_hca_vport_context(mdev, 0, port, 0, rep);
1351 if (err)
1352 goto out;
1353
1354 props->lid = rep->lid;
1355 props->lmc = rep->lmc;
1356 props->sm_lid = rep->sm_lid;
1357 props->sm_sl = rep->sm_sl;
1358 props->state = rep->vport_state;
1359 props->phys_state = rep->port_physical_state;
1364 err = mlx5_query_hca_vport_context(mdev, 0, port, 0, rep);
1365 if (err)
1366 goto out;
1367
1368 props->lid = rep->lid;
1369 props->lmc = rep->lmc;
1370 props->sm_lid = rep->sm_lid;
1371 props->sm_sl = rep->sm_sl;
1372 props->state = rep->vport_state;
1373 props->phys_state = rep->port_physical_state;
1360 props->port_cap_flags = rep->cap_mask1;
1374
1375 props->port_cap_flags = rep->cap_mask1;
1376 if (dev->num_plane) {
1377 props->port_cap_flags |= IB_PORT_SM_DISABLED;
1378 props->port_cap_flags &= ~IB_PORT_SM;
1379 } else if (ibdev->type == RDMA_DEVICE_TYPE_SMI)
1380 props->port_cap_flags &= ~IB_PORT_CM_SUP;
1381
1361 props->gid_tbl_len = mlx5_get_gid_table_len(MLX5_CAP_GEN(mdev, gid_table_size));
1362 props->max_msg_sz = 1 << MLX5_CAP_GEN(mdev, log_max_msg);
1363 props->pkey_tbl_len = mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(mdev, pkey_table_size));
1364 props->bad_pkey_cntr = rep->pkey_violation_counter;
1365 props->qkey_viol_cntr = rep->qkey_violation_counter;
1366 props->subnet_timeout = rep->subnet_timeout;
1367 props->init_type_reply = rep->init_type_reply;
1368
1369 if (props->port_cap_flags & IB_PORT_CAP_MASK2_SUP)
1370 props->port_cap_flags2 = rep->cap_mask2;
1371
1372 err = mlx5_query_ib_port_oper(mdev, &ib_link_width_oper,
1382 props->gid_tbl_len = mlx5_get_gid_table_len(MLX5_CAP_GEN(mdev, gid_table_size));
1383 props->max_msg_sz = 1 << MLX5_CAP_GEN(mdev, log_max_msg);
1384 props->pkey_tbl_len = mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(mdev, pkey_table_size));
1385 props->bad_pkey_cntr = rep->pkey_violation_counter;
1386 props->qkey_viol_cntr = rep->qkey_violation_counter;
1387 props->subnet_timeout = rep->subnet_timeout;
1388 props->init_type_reply = rep->init_type_reply;
1389
1390 if (props->port_cap_flags & IB_PORT_CAP_MASK2_SUP)
1391 props->port_cap_flags2 = rep->cap_mask2;
1392
1393 err = mlx5_query_ib_port_oper(mdev, &ib_link_width_oper,
1373 &props->active_speed, port);
1394 &props->active_speed, port, plane_index);
1374 if (err)
1375 goto out;
1376
1377 translate_active_width(ibdev, ib_link_width_oper, &props->active_width);
1378
1379 mlx5_query_port_max_mtu(mdev, &max_mtu, port);
1380
1381 props->max_mtu = mlx5_mtu_to_ib_mtu(max_mtu);

--- 423 unchanged lines hidden (view full) ---

1805
1806 if (MLX5_CAP_GEN(dev->mdev, dump_fill_mkey)) {
1807 resp->dump_fill_mkey = dev->mkeys.dump_fill_mkey;
1808 resp->comp_mask |=
1809 MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_DUMP_FILL_MKEY;
1810 }
1811
1812 resp->qp_tab_size = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp);
1395 if (err)
1396 goto out;
1397
1398 translate_active_width(ibdev, ib_link_width_oper, &props->active_width);
1399
1400 mlx5_query_port_max_mtu(mdev, &max_mtu, port);
1401
1402 props->max_mtu = mlx5_mtu_to_ib_mtu(max_mtu);

--- 423 unchanged lines hidden (view full) ---

1826
1827 if (MLX5_CAP_GEN(dev->mdev, dump_fill_mkey)) {
1828 resp->dump_fill_mkey = dev->mkeys.dump_fill_mkey;
1829 resp->comp_mask |=
1830 MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_DUMP_FILL_MKEY;
1831 }
1832
1833 resp->qp_tab_size = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp);
1813 if (dev->wc_support)
1834 if (mlx5_wc_support_get(dev->mdev))
1814 resp->bf_reg_size = 1 << MLX5_CAP_GEN(dev->mdev,
1815 log_bf_reg_size);
1816 resp->cache_line_size = cache_line_size();
1817 resp->max_sq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq);
1818 resp->max_rq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_rq);
1819 resp->max_send_wqebb = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz);
1820 resp->max_recv_wr = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz);
1821 resp->max_srq_recv_wr = 1 << MLX5_CAP_GEN(dev->mdev, log_max_srq_sz);

--- 510 unchanged lines hidden (view full) ---

2332 struct mlx5_ib_dev *dev = to_mdev(ibcontext->device);
2333 unsigned long command;
2334 phys_addr_t pfn;
2335
2336 command = get_command(vma->vm_pgoff);
2337 switch (command) {
2338 case MLX5_IB_MMAP_WC_PAGE:
2339 case MLX5_IB_MMAP_ALLOC_WC:
1835 resp->bf_reg_size = 1 << MLX5_CAP_GEN(dev->mdev,
1836 log_bf_reg_size);
1837 resp->cache_line_size = cache_line_size();
1838 resp->max_sq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq);
1839 resp->max_rq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_rq);
1840 resp->max_send_wqebb = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz);
1841 resp->max_recv_wr = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz);
1842 resp->max_srq_recv_wr = 1 << MLX5_CAP_GEN(dev->mdev, log_max_srq_sz);

--- 510 unchanged lines hidden (view full) ---

2353 struct mlx5_ib_dev *dev = to_mdev(ibcontext->device);
2354 unsigned long command;
2355 phys_addr_t pfn;
2356
2357 command = get_command(vma->vm_pgoff);
2358 switch (command) {
2359 case MLX5_IB_MMAP_WC_PAGE:
2360 case MLX5_IB_MMAP_ALLOC_WC:
2340 if (!dev->wc_support)
2361 if (!mlx5_wc_support_get(dev->mdev))
2341 return -EPERM;
2342 fallthrough;
2343 case MLX5_IB_MMAP_NC_PAGE:
2344 case MLX5_IB_MMAP_REGULAR_PAGE:
2345 return uar_mmap(dev, command, vma, context);
2346
2347 case MLX5_IB_MMAP_GET_CONTIGUOUS_PAGES:
2348 return -ENOSYS;

--- 422 unchanged lines hidden (view full) ---

2771 work->is_slave = true;
2772 work->param = param;
2773 work->event = event;
2774 queue_work(mlx5_ib_event_wq, &work->work);
2775
2776 return NOTIFY_OK;
2777}
2778
2362 return -EPERM;
2363 fallthrough;
2364 case MLX5_IB_MMAP_NC_PAGE:
2365 case MLX5_IB_MMAP_REGULAR_PAGE:
2366 return uar_mmap(dev, command, vma, context);
2367
2368 case MLX5_IB_MMAP_GET_CONTIGUOUS_PAGES:
2369 return -ENOSYS;

--- 422 unchanged lines hidden (view full) ---

2792 work->is_slave = true;
2793 work->param = param;
2794 work->event = event;
2795 queue_work(mlx5_ib_event_wq, &work->work);
2796
2797 return NOTIFY_OK;
2798}
2799
2800static int mlx5_ib_get_plane_num(struct mlx5_core_dev *mdev, u8 *num_plane)
2801{
2802 struct mlx5_hca_vport_context vport_ctx;
2803 int err;
2804
2805 *num_plane = 0;
2806 if (!MLX5_CAP_GEN(mdev, ib_virt))
2807 return 0;
2808
2809 err = mlx5_query_hca_vport_context(mdev, 0, 1, 0, &vport_ctx);
2810 if (err)
2811 return err;
2812
2813 *num_plane = vport_ctx.num_plane;
2814 return 0;
2815}
2816
2779static int set_has_smi_cap(struct mlx5_ib_dev *dev)
2780{
2781 struct mlx5_hca_vport_context vport_ctx;
2782 int err;
2783 int port;
2784
2785 if (MLX5_CAP_GEN(dev->mdev, port_type) != MLX5_CAP_PORT_TYPE_IB)
2786 return 0;
2787
2788 for (port = 1; port <= dev->num_ports; port++) {
2817static int set_has_smi_cap(struct mlx5_ib_dev *dev)
2818{
2819 struct mlx5_hca_vport_context vport_ctx;
2820 int err;
2821 int port;
2822
2823 if (MLX5_CAP_GEN(dev->mdev, port_type) != MLX5_CAP_PORT_TYPE_IB)
2824 return 0;
2825
2826 for (port = 1; port <= dev->num_ports; port++) {
2789 if (!MLX5_CAP_GEN(dev->mdev, ib_virt)) {
2827 if (dev->num_plane) {
2828 dev->port_caps[port - 1].has_smi = false;
2829 continue;
2830 } else if (!MLX5_CAP_GEN(dev->mdev, ib_virt) ||
2831 dev->ib_dev.type == RDMA_DEVICE_TYPE_SMI) {
2790 dev->port_caps[port - 1].has_smi = true;
2791 continue;
2792 }
2832 dev->port_caps[port - 1].has_smi = true;
2833 continue;
2834 }
2835
2793 err = mlx5_query_hca_vport_context(dev->mdev, 0, port, 0,
2794 &vport_ctx);
2795 if (err) {
2796 mlx5_ib_err(dev, "query_hca_vport_context for port=%d failed %d\n",
2797 port, err);
2798 return err;
2799 }
2800 dev->port_caps[port - 1].has_smi = vport_ctx.has_smi;

--- 17 unchanged lines hidden (view full) ---

2818 return MLX5_FENCE_MODE_NONE;
2819 case MLX5_CAP_UMR_FENCE_SMALL:
2820 return MLX5_FENCE_MODE_INITIATOR_SMALL;
2821 default:
2822 return MLX5_FENCE_MODE_STRONG_ORDERING;
2823 }
2824}
2825
2836 err = mlx5_query_hca_vport_context(dev->mdev, 0, port, 0,
2837 &vport_ctx);
2838 if (err) {
2839 mlx5_ib_err(dev, "query_hca_vport_context for port=%d failed %d\n",
2840 port, err);
2841 return err;
2842 }
2843 dev->port_caps[port - 1].has_smi = vport_ctx.has_smi;

--- 17 unchanged lines hidden (view full) ---

2861 return MLX5_FENCE_MODE_NONE;
2862 case MLX5_CAP_UMR_FENCE_SMALL:
2863 return MLX5_FENCE_MODE_INITIATOR_SMALL;
2864 default:
2865 return MLX5_FENCE_MODE_STRONG_ORDERING;
2866 }
2867}
2868
2826static int mlx5_ib_dev_res_init(struct mlx5_ib_dev *dev)
2869int mlx5_ib_dev_res_cq_init(struct mlx5_ib_dev *dev)
2827{
2828 struct mlx5_ib_resources *devr = &dev->devr;
2870{
2871 struct mlx5_ib_resources *devr = &dev->devr;
2829 struct ib_srq_init_attr attr;
2830 struct ib_device *ibdev;
2831 struct ib_cq_init_attr cq_attr = {.cqe = 1};
2872 struct ib_cq_init_attr cq_attr = {.cqe = 1};
2832 int port;
2873 struct ib_device *ibdev;
2874 struct ib_pd *pd;
2875 struct ib_cq *cq;
2833 int ret = 0;
2834
2876 int ret = 0;
2877
2835 ibdev = &dev->ib_dev;
2836
2878
2837 if (!MLX5_CAP_GEN(dev->mdev, xrc))
2838 return -EOPNOTSUPP;
2879 /*
2880 * devr->c0 is set once, never changed until device unload.
2881 * Avoid taking the mutex if initialization is already done.
2882 */
2883 if (devr->c0)
2884 return 0;
2839
2885
2840 devr->p0 = ib_alloc_pd(ibdev, 0);
2841 if (IS_ERR(devr->p0))
2842 return PTR_ERR(devr->p0);
2886 mutex_lock(&devr->cq_lock);
2887 if (devr->c0)
2888 goto unlock;
2843
2889
2844 devr->c0 = ib_create_cq(ibdev, NULL, NULL, NULL, &cq_attr);
2845 if (IS_ERR(devr->c0)) {
2846 ret = PTR_ERR(devr->c0);
2847 goto error1;
2890 ibdev = &dev->ib_dev;
2891 pd = ib_alloc_pd(ibdev, 0);
2892 if (IS_ERR(pd)) {
2893 ret = PTR_ERR(pd);
2894 mlx5_ib_err(dev, "Couldn't allocate PD for res init, err=%d\n", ret);
2895 goto unlock;
2848 }
2849
2896 }
2897
2850 ret = mlx5_cmd_xrcd_alloc(dev->mdev, &devr->xrcdn0, 0);
2851 if (ret)
2852 goto error2;
2898 cq = ib_create_cq(ibdev, NULL, NULL, NULL, &cq_attr);
2899 if (IS_ERR(cq)) {
2900 ret = PTR_ERR(cq);
2901 mlx5_ib_err(dev, "Couldn't create CQ for res init, err=%d\n", ret);
2902 ib_dealloc_pd(pd);
2903 goto unlock;
2904 }
2853
2905
2854 ret = mlx5_cmd_xrcd_alloc(dev->mdev, &devr->xrcdn1, 0);
2906 devr->p0 = pd;
2907 devr->c0 = cq;
2908
2909unlock:
2910 mutex_unlock(&devr->cq_lock);
2911 return ret;
2912}
2913
2914int mlx5_ib_dev_res_srq_init(struct mlx5_ib_dev *dev)
2915{
2916 struct mlx5_ib_resources *devr = &dev->devr;
2917 struct ib_srq_init_attr attr;
2918 struct ib_srq *s0, *s1;
2919 int ret = 0;
2920
2921 /*
2922 * devr->s1 is set once, never changed until device unload.
2923 * Avoid taking the mutex if initialization is already done.
2924 */
2925 if (devr->s1)
2926 return 0;
2927
2928 mutex_lock(&devr->srq_lock);
2929 if (devr->s1)
2930 goto unlock;
2931
2932 ret = mlx5_ib_dev_res_cq_init(dev);
2855 if (ret)
2933 if (ret)
2856 goto error3;
2934 goto unlock;
2857
2858 memset(&attr, 0, sizeof(attr));
2859 attr.attr.max_sge = 1;
2860 attr.attr.max_wr = 1;
2861 attr.srq_type = IB_SRQT_XRC;
2862 attr.ext.cq = devr->c0;
2863
2935
2936 memset(&attr, 0, sizeof(attr));
2937 attr.attr.max_sge = 1;
2938 attr.attr.max_wr = 1;
2939 attr.srq_type = IB_SRQT_XRC;
2940 attr.ext.cq = devr->c0;
2941
2864 devr->s0 = ib_create_srq(devr->p0, &attr);
2865 if (IS_ERR(devr->s0)) {
2866 ret = PTR_ERR(devr->s0);
2867 goto err_create;
2942 s0 = ib_create_srq(devr->p0, &attr);
2943 if (IS_ERR(s0)) {
2944 ret = PTR_ERR(s0);
2945 mlx5_ib_err(dev, "Couldn't create SRQ 0 for res init, err=%d\n", ret);
2946 goto unlock;
2868 }
2869
2870 memset(&attr, 0, sizeof(attr));
2871 attr.attr.max_sge = 1;
2872 attr.attr.max_wr = 1;
2873 attr.srq_type = IB_SRQT_BASIC;
2874
2947 }
2948
2949 memset(&attr, 0, sizeof(attr));
2950 attr.attr.max_sge = 1;
2951 attr.attr.max_wr = 1;
2952 attr.srq_type = IB_SRQT_BASIC;
2953
2875 devr->s1 = ib_create_srq(devr->p0, &attr);
2876 if (IS_ERR(devr->s1)) {
2877 ret = PTR_ERR(devr->s1);
2878 goto error6;
2954 s1 = ib_create_srq(devr->p0, &attr);
2955 if (IS_ERR(s1)) {
2956 ret = PTR_ERR(s1);
2957 mlx5_ib_err(dev, "Couldn't create SRQ 1 for res init, err=%d\n", ret);
2958 ib_destroy_srq(s0);
2879 }
2880
2959 }
2960
2961 devr->s0 = s0;
2962 devr->s1 = s1;
2963
2964unlock:
2965 mutex_unlock(&devr->srq_lock);
2966 return ret;
2967}
2968
2969static int mlx5_ib_dev_res_init(struct mlx5_ib_dev *dev)
2970{
2971 struct mlx5_ib_resources *devr = &dev->devr;
2972 int port;
2973 int ret;
2974
2975 if (!MLX5_CAP_GEN(dev->mdev, xrc))
2976 return -EOPNOTSUPP;
2977
2978 ret = mlx5_cmd_xrcd_alloc(dev->mdev, &devr->xrcdn0, 0);
2979 if (ret)
2980 return ret;
2981
2982 ret = mlx5_cmd_xrcd_alloc(dev->mdev, &devr->xrcdn1, 0);
2983 if (ret) {
2984 mlx5_cmd_xrcd_dealloc(dev->mdev, devr->xrcdn0, 0);
2985 return ret;
2986 }
2987
2881 for (port = 0; port < ARRAY_SIZE(devr->ports); ++port)
2882 INIT_WORK(&devr->ports[port].pkey_change_work,
2883 pkey_change_handler);
2884
2988 for (port = 0; port < ARRAY_SIZE(devr->ports); ++port)
2989 INIT_WORK(&devr->ports[port].pkey_change_work,
2990 pkey_change_handler);
2991
2885 return 0;
2992 mutex_init(&devr->cq_lock);
2993 mutex_init(&devr->srq_lock);
2886
2994
2887error6:
2888 ib_destroy_srq(devr->s0);
2889err_create:
2890 mlx5_cmd_xrcd_dealloc(dev->mdev, devr->xrcdn1, 0);
2891error3:
2892 mlx5_cmd_xrcd_dealloc(dev->mdev, devr->xrcdn0, 0);
2893error2:
2894 ib_destroy_cq(devr->c0);
2895error1:
2896 ib_dealloc_pd(devr->p0);
2897 return ret;
2995 return 0;
2898}
2899
2900static void mlx5_ib_dev_res_cleanup(struct mlx5_ib_dev *dev)
2901{
2902 struct mlx5_ib_resources *devr = &dev->devr;
2903 int port;
2904
2905 /*
2906 * Make sure no change P_Key work items are still executing.
2907 *
2908 * At this stage, the mlx5_ib_event should be unregistered
2909 * and it ensures that no new works are added.
2910 */
2911 for (port = 0; port < ARRAY_SIZE(devr->ports); ++port)
2912 cancel_work_sync(&devr->ports[port].pkey_change_work);
2913
2996}
2997
2998static void mlx5_ib_dev_res_cleanup(struct mlx5_ib_dev *dev)
2999{
3000 struct mlx5_ib_resources *devr = &dev->devr;
3001 int port;
3002
3003 /*
3004 * Make sure no change P_Key work items are still executing.
3005 *
3006 * At this stage, the mlx5_ib_event should be unregistered
3007 * and it ensures that no new works are added.
3008 */
3009 for (port = 0; port < ARRAY_SIZE(devr->ports); ++port)
3010 cancel_work_sync(&devr->ports[port].pkey_change_work);
3011
2914 ib_destroy_srq(devr->s1);
2915 ib_destroy_srq(devr->s0);
3012 /* After s0/s1 init, they are not unset during the device lifetime. */
3013 if (devr->s1) {
3014 ib_destroy_srq(devr->s1);
3015 ib_destroy_srq(devr->s0);
3016 }
2916 mlx5_cmd_xrcd_dealloc(dev->mdev, devr->xrcdn1, 0);
2917 mlx5_cmd_xrcd_dealloc(dev->mdev, devr->xrcdn0, 0);
3017 mlx5_cmd_xrcd_dealloc(dev->mdev, devr->xrcdn1, 0);
3018 mlx5_cmd_xrcd_dealloc(dev->mdev, devr->xrcdn0, 0);
2918 ib_destroy_cq(devr->c0);
2919 ib_dealloc_pd(devr->p0);
3019 /* After p0/c0 init, they are not unset during the device lifetime. */
3020 if (devr->c0) {
3021 ib_destroy_cq(devr->c0);
3022 ib_dealloc_pd(devr->p0);
3023 }
3024 mutex_destroy(&devr->cq_lock);
3025 mutex_destroy(&devr->srq_lock);
2920}
2921
2922static u32 get_core_cap_flags(struct ib_device *ibdev,
2923 struct mlx5_hca_vport_context *rep)
2924{
2925 struct mlx5_ib_dev *dev = to_mdev(ibdev);
2926 enum rdma_link_layer ll = mlx5_ib_port_link_layer(ibdev, 1);
2927 u8 l3_type_cap = MLX5_CAP_ROCE(dev->mdev, l3_type);
2928 u8 roce_version_cap = MLX5_CAP_ROCE(dev->mdev, roce_version);
2929 bool raw_support = !mlx5_core_mp_enabled(dev->mdev);
2930 u32 ret = 0;
2931
2932 if (rep->grh_required)
2933 ret |= RDMA_CORE_CAP_IB_GRH_REQUIRED;
2934
3026}
3027
3028static u32 get_core_cap_flags(struct ib_device *ibdev,
3029 struct mlx5_hca_vport_context *rep)
3030{
3031 struct mlx5_ib_dev *dev = to_mdev(ibdev);
3032 enum rdma_link_layer ll = mlx5_ib_port_link_layer(ibdev, 1);
3033 u8 l3_type_cap = MLX5_CAP_ROCE(dev->mdev, l3_type);
3034 u8 roce_version_cap = MLX5_CAP_ROCE(dev->mdev, roce_version);
3035 bool raw_support = !mlx5_core_mp_enabled(dev->mdev);
3036 u32 ret = 0;
3037
3038 if (rep->grh_required)
3039 ret |= RDMA_CORE_CAP_IB_GRH_REQUIRED;
3040
3041 if (dev->num_plane)
3042 return ret | RDMA_CORE_CAP_PROT_IB | RDMA_CORE_CAP_IB_MAD |
3043 RDMA_CORE_CAP_IB_CM | RDMA_CORE_CAP_IB_SA |
3044 RDMA_CORE_CAP_AF_IB;
3045 else if (ibdev->type == RDMA_DEVICE_TYPE_SMI)
3046 return ret | RDMA_CORE_CAP_IB_MAD | RDMA_CORE_CAP_IB_SMI;
3047
2935 if (ll == IB_LINK_LAYER_INFINIBAND)
2936 return ret | RDMA_CORE_PORT_IBA_IB;
2937
2938 if (raw_support)
2939 ret |= RDMA_CORE_PORT_RAW_PACKET;
2940
2941 if (!(l3_type_cap & MLX5_ROCE_L3_TYPE_IPV4_CAP))
2942 return ret;

--- 19 unchanged lines hidden (view full) ---

2962 struct mlx5_hca_vport_context rep = {0};
2963 int err;
2964
2965 err = ib_query_port(ibdev, port_num, &attr);
2966 if (err)
2967 return err;
2968
2969 if (ll == IB_LINK_LAYER_INFINIBAND) {
3048 if (ll == IB_LINK_LAYER_INFINIBAND)
3049 return ret | RDMA_CORE_PORT_IBA_IB;
3050
3051 if (raw_support)
3052 ret |= RDMA_CORE_PORT_RAW_PACKET;
3053
3054 if (!(l3_type_cap & MLX5_ROCE_L3_TYPE_IPV4_CAP))
3055 return ret;

--- 19 unchanged lines hidden (view full) ---

3075 struct mlx5_hca_vport_context rep = {0};
3076 int err;
3077
3078 err = ib_query_port(ibdev, port_num, &attr);
3079 if (err)
3080 return err;
3081
3082 if (ll == IB_LINK_LAYER_INFINIBAND) {
3083 if (ibdev->type == RDMA_DEVICE_TYPE_SMI)
3084 port_num = smi_to_native_portnum(dev, port_num);
3085
2970 err = mlx5_query_hca_vport_context(dev->mdev, 0, port_num, 0,
2971 &rep);
2972 if (err)
2973 return err;
2974 }
2975
2976 immutable->pkey_tbl_len = attr.pkey_tbl_len;
2977 immutable->gid_tbl_len = attr.gid_tbl_len;

--- 629 unchanged lines hidden (view full) ---

3607 MLX5_IB_ATTR_UAR_OBJ_ALLOC_TYPE);
3608 if (err)
3609 return err;
3610
3611 if (alloc_type != MLX5_IB_UAPI_UAR_ALLOC_TYPE_BF &&
3612 alloc_type != MLX5_IB_UAPI_UAR_ALLOC_TYPE_NC)
3613 return -EOPNOTSUPP;
3614
3086 err = mlx5_query_hca_vport_context(dev->mdev, 0, port_num, 0,
3087 &rep);
3088 if (err)
3089 return err;
3090 }
3091
3092 immutable->pkey_tbl_len = attr.pkey_tbl_len;
3093 immutable->gid_tbl_len = attr.gid_tbl_len;

--- 629 unchanged lines hidden (view full) ---

3723 MLX5_IB_ATTR_UAR_OBJ_ALLOC_TYPE);
3724 if (err)
3725 return err;
3726
3727 if (alloc_type != MLX5_IB_UAPI_UAR_ALLOC_TYPE_BF &&
3728 alloc_type != MLX5_IB_UAPI_UAR_ALLOC_TYPE_NC)
3729 return -EOPNOTSUPP;
3730
3615 if (!to_mdev(c->ibucontext.device)->wc_support &&
3731 if (!mlx5_wc_support_get(to_mdev(c->ibucontext.device)->mdev) &&
3616 alloc_type == MLX5_IB_UAPI_UAR_ALLOC_TYPE_BF)
3617 return -EOPNOTSUPP;
3618
3619 entry = alloc_uar_entry(c, alloc_type);
3620 if (IS_ERR(entry))
3621 return PTR_ERR(entry);
3622
3623 mmap_offset = mlx5_entry_to_mmap_offset(entry);

--- 58 unchanged lines hidden (view full) ---

3682 UA_MANDATORY));
3683
3684static const struct uapi_definition mlx5_ib_defs[] = {
3685 UAPI_DEF_CHAIN(mlx5_ib_devx_defs),
3686 UAPI_DEF_CHAIN(mlx5_ib_flow_defs),
3687 UAPI_DEF_CHAIN(mlx5_ib_qos_defs),
3688 UAPI_DEF_CHAIN(mlx5_ib_std_types_defs),
3689 UAPI_DEF_CHAIN(mlx5_ib_dm_defs),
3732 alloc_type == MLX5_IB_UAPI_UAR_ALLOC_TYPE_BF)
3733 return -EOPNOTSUPP;
3734
3735 entry = alloc_uar_entry(c, alloc_type);
3736 if (IS_ERR(entry))
3737 return PTR_ERR(entry);
3738
3739 mmap_offset = mlx5_entry_to_mmap_offset(entry);

--- 58 unchanged lines hidden (view full) ---

3798 UA_MANDATORY));
3799
3800static const struct uapi_definition mlx5_ib_defs[] = {
3801 UAPI_DEF_CHAIN(mlx5_ib_devx_defs),
3802 UAPI_DEF_CHAIN(mlx5_ib_flow_defs),
3803 UAPI_DEF_CHAIN(mlx5_ib_qos_defs),
3804 UAPI_DEF_CHAIN(mlx5_ib_std_types_defs),
3805 UAPI_DEF_CHAIN(mlx5_ib_dm_defs),
3806 UAPI_DEF_CHAIN(mlx5_ib_create_cq_defs),
3690
3691 UAPI_DEF_CHAIN_OBJ_TREE(UVERBS_OBJECT_DEVICE, &mlx5_ib_query_context),
3692 UAPI_DEF_CHAIN_OBJ_TREE_NAMED(MLX5_IB_OBJECT_VAR,
3693 UAPI_DEF_IS_OBJ_SUPPORTED(var_is_supported)),
3694 UAPI_DEF_CHAIN_OBJ_TREE_NAMED(MLX5_IB_OBJECT_UAR),
3695 {}
3696};
3697

--- 47 unchanged lines hidden (view full) ---

3745 goto err_mp;
3746
3747 if (mlx5_use_mad_ifc(dev))
3748 get_ext_port_caps(dev);
3749
3750 dev->ib_dev.num_comp_vectors = mlx5_comp_vectors_max(mdev);
3751
3752 mutex_init(&dev->cap_mask_mutex);
3807
3808 UAPI_DEF_CHAIN_OBJ_TREE(UVERBS_OBJECT_DEVICE, &mlx5_ib_query_context),
3809 UAPI_DEF_CHAIN_OBJ_TREE_NAMED(MLX5_IB_OBJECT_VAR,
3810 UAPI_DEF_IS_OBJ_SUPPORTED(var_is_supported)),
3811 UAPI_DEF_CHAIN_OBJ_TREE_NAMED(MLX5_IB_OBJECT_UAR),
3812 {}
3813};
3814

--- 47 unchanged lines hidden (view full) ---

3862 goto err_mp;
3863
3864 if (mlx5_use_mad_ifc(dev))
3865 get_ext_port_caps(dev);
3866
3867 dev->ib_dev.num_comp_vectors = mlx5_comp_vectors_max(mdev);
3868
3869 mutex_init(&dev->cap_mask_mutex);
3870 mutex_init(&dev->data_direct_lock);
3753 INIT_LIST_HEAD(&dev->qp_list);
3754 spin_lock_init(&dev->reset_flow_resource_lock);
3755 xa_init(&dev->odp_mkeys);
3756 xa_init(&dev->sig_mrs);
3757 atomic_set(&dev->mkey_var, 0);
3758
3759 spin_lock_init(&dev->dm.lock);
3760 dev->dm.dev = mdev;
3761 return 0;
3762err_mp:
3763 mlx5_ib_cleanup_multiport_master(dev);
3764err:
3765 mlx5r_macsec_dealloc_gids(dev);
3766 return err;
3767}
3768
3871 INIT_LIST_HEAD(&dev->qp_list);
3872 spin_lock_init(&dev->reset_flow_resource_lock);
3873 xa_init(&dev->odp_mkeys);
3874 xa_init(&dev->sig_mrs);
3875 atomic_set(&dev->mkey_var, 0);
3876
3877 spin_lock_init(&dev->dm.lock);
3878 dev->dm.dev = mdev;
3879 return 0;
3880err_mp:
3881 mlx5_ib_cleanup_multiport_master(dev);
3882err:
3883 mlx5r_macsec_dealloc_gids(dev);
3884 return err;
3885}
3886
3769static int mlx5_ib_enable_driver(struct ib_device *dev)
3770{
3771 struct mlx5_ib_dev *mdev = to_mdev(dev);
3772 int ret;
3887static struct ib_device *mlx5_ib_add_sub_dev(struct ib_device *parent,
3888 enum rdma_nl_dev_type type,
3889 const char *name);
3890static void mlx5_ib_del_sub_dev(struct ib_device *sub_dev);
3773
3891
3774 ret = mlx5_ib_test_wc(mdev);
3775 mlx5_ib_dbg(mdev, "Write-Combining %s",
3776 mdev->wc_support ? "supported" : "not supported");
3777
3778 return ret;
3779}
3780
3781static const struct ib_device_ops mlx5_ib_dev_ops = {
3782 .owner = THIS_MODULE,
3783 .driver_id = RDMA_DRIVER_MLX5,
3784 .uverbs_abi_ver = MLX5_IB_UVERBS_ABI_VERSION,
3785
3786 .add_gid = mlx5_ib_add_gid,
3892static const struct ib_device_ops mlx5_ib_dev_ops = {
3893 .owner = THIS_MODULE,
3894 .driver_id = RDMA_DRIVER_MLX5,
3895 .uverbs_abi_ver = MLX5_IB_UVERBS_ABI_VERSION,
3896
3897 .add_gid = mlx5_ib_add_gid,
3898 .add_sub_dev = mlx5_ib_add_sub_dev,
3787 .alloc_mr = mlx5_ib_alloc_mr,
3788 .alloc_mr_integrity = mlx5_ib_alloc_mr_integrity,
3789 .alloc_pd = mlx5_ib_alloc_pd,
3790 .alloc_ucontext = mlx5_ib_alloc_ucontext,
3791 .attach_mcast = mlx5_ib_mcg_attach,
3792 .check_mr_status = mlx5_ib_check_mr_status,
3793 .create_ah = mlx5_ib_create_ah,
3794 .create_cq = mlx5_ib_create_cq,
3795 .create_qp = mlx5_ib_create_qp,
3796 .create_srq = mlx5_ib_create_srq,
3797 .create_user_ah = mlx5_ib_create_ah,
3798 .dealloc_pd = mlx5_ib_dealloc_pd,
3799 .dealloc_ucontext = mlx5_ib_dealloc_ucontext,
3800 .del_gid = mlx5_ib_del_gid,
3899 .alloc_mr = mlx5_ib_alloc_mr,
3900 .alloc_mr_integrity = mlx5_ib_alloc_mr_integrity,
3901 .alloc_pd = mlx5_ib_alloc_pd,
3902 .alloc_ucontext = mlx5_ib_alloc_ucontext,
3903 .attach_mcast = mlx5_ib_mcg_attach,
3904 .check_mr_status = mlx5_ib_check_mr_status,
3905 .create_ah = mlx5_ib_create_ah,
3906 .create_cq = mlx5_ib_create_cq,
3907 .create_qp = mlx5_ib_create_qp,
3908 .create_srq = mlx5_ib_create_srq,
3909 .create_user_ah = mlx5_ib_create_ah,
3910 .dealloc_pd = mlx5_ib_dealloc_pd,
3911 .dealloc_ucontext = mlx5_ib_dealloc_ucontext,
3912 .del_gid = mlx5_ib_del_gid,
3913 .del_sub_dev = mlx5_ib_del_sub_dev,
3801 .dereg_mr = mlx5_ib_dereg_mr,
3802 .destroy_ah = mlx5_ib_destroy_ah,
3803 .destroy_cq = mlx5_ib_destroy_cq,
3804 .destroy_qp = mlx5_ib_destroy_qp,
3805 .destroy_srq = mlx5_ib_destroy_srq,
3806 .detach_mcast = mlx5_ib_mcg_detach,
3807 .disassociate_ucontext = mlx5_ib_disassociate_ucontext,
3808 .drain_rq = mlx5_ib_drain_rq,
3809 .drain_sq = mlx5_ib_drain_sq,
3810 .device_group = &mlx5_attr_group,
3914 .dereg_mr = mlx5_ib_dereg_mr,
3915 .destroy_ah = mlx5_ib_destroy_ah,
3916 .destroy_cq = mlx5_ib_destroy_cq,
3917 .destroy_qp = mlx5_ib_destroy_qp,
3918 .destroy_srq = mlx5_ib_destroy_srq,
3919 .detach_mcast = mlx5_ib_mcg_detach,
3920 .disassociate_ucontext = mlx5_ib_disassociate_ucontext,
3921 .drain_rq = mlx5_ib_drain_rq,
3922 .drain_sq = mlx5_ib_drain_sq,
3923 .device_group = &mlx5_attr_group,
3811 .enable_driver = mlx5_ib_enable_driver,
3812 .get_dev_fw_str = get_dev_fw_str,
3813 .get_dma_mr = mlx5_ib_get_dma_mr,
3814 .get_link_layer = mlx5_ib_port_link_layer,
3815 .map_mr_sg = mlx5_ib_map_mr_sg,
3816 .map_mr_sg_pi = mlx5_ib_map_mr_sg_pi,
3817 .mmap = mlx5_ib_mmap,
3818 .mmap_free = mlx5_ib_mmap_free,
3819 .modify_cq = mlx5_ib_modify_cq,

--- 263 unchanged lines hidden (view full) ---

4083 mlx5_free_bfreg(dev->mdev, &dev->fp_bfreg);
4084 mlx5_free_bfreg(dev->mdev, &dev->bfreg);
4085}
4086
4087static int mlx5_ib_stage_ib_reg_init(struct mlx5_ib_dev *dev)
4088{
4089 const char *name;
4090
3924 .get_dev_fw_str = get_dev_fw_str,
3925 .get_dma_mr = mlx5_ib_get_dma_mr,
3926 .get_link_layer = mlx5_ib_port_link_layer,
3927 .map_mr_sg = mlx5_ib_map_mr_sg,
3928 .map_mr_sg_pi = mlx5_ib_map_mr_sg_pi,
3929 .mmap = mlx5_ib_mmap,
3930 .mmap_free = mlx5_ib_mmap_free,
3931 .modify_cq = mlx5_ib_modify_cq,

--- 263 unchanged lines hidden (view full) ---

4195 mlx5_free_bfreg(dev->mdev, &dev->fp_bfreg);
4196 mlx5_free_bfreg(dev->mdev, &dev->bfreg);
4197}
4198
4199static int mlx5_ib_stage_ib_reg_init(struct mlx5_ib_dev *dev)
4200{
4201 const char *name;
4202
4091 if (!mlx5_lag_is_active(dev->mdev))
4203 if (dev->sub_dev_name) {
4204 name = dev->sub_dev_name;
4205 ib_mark_name_assigned_by_user(&dev->ib_dev);
4206 } else if (!mlx5_lag_is_active(dev->mdev))
4092 name = "mlx5_%d";
4093 else
4094 name = "mlx5_bond_%d";
4095 return ib_register_device(&dev->ib_dev, name, &dev->mdev->pdev->dev);
4096}
4097
4098static void mlx5_ib_stage_pre_ib_reg_umr_cleanup(struct mlx5_ib_dev *dev)
4099{
4100 mlx5_mkey_cache_cleanup(dev);
4101 mlx5r_umr_resource_cleanup(dev);
4207 name = "mlx5_%d";
4208 else
4209 name = "mlx5_bond_%d";
4210 return ib_register_device(&dev->ib_dev, name, &dev->mdev->pdev->dev);
4211}
4212
4213static void mlx5_ib_stage_pre_ib_reg_umr_cleanup(struct mlx5_ib_dev *dev)
4214{
4215 mlx5_mkey_cache_cleanup(dev);
4216 mlx5r_umr_resource_cleanup(dev);
4217 mlx5r_umr_cleanup(dev);
4102}
4103
4104static void mlx5_ib_stage_ib_reg_cleanup(struct mlx5_ib_dev *dev)
4105{
4106 ib_unregister_device(&dev->ib_dev);
4107}
4108
4109static int mlx5_ib_stage_post_ib_reg_umr_init(struct mlx5_ib_dev *dev)
4110{
4111 int ret;
4112
4218}
4219
4220static void mlx5_ib_stage_ib_reg_cleanup(struct mlx5_ib_dev *dev)
4221{
4222 ib_unregister_device(&dev->ib_dev);
4223}
4224
4225static int mlx5_ib_stage_post_ib_reg_umr_init(struct mlx5_ib_dev *dev)
4226{
4227 int ret;
4228
4113 ret = mlx5r_umr_resource_init(dev);
4229 ret = mlx5r_umr_init(dev);
4114 if (ret)
4115 return ret;
4116
4117 ret = mlx5_mkey_cache_init(dev);
4118 if (ret)
4119 mlx5_ib_warn(dev, "mr cache init failed %d\n", ret);
4120 return ret;
4121}

--- 52 unchanged lines hidden (view full) ---

4174}
4175
4176static void mlx5_ib_stage_dev_notifier_cleanup(struct mlx5_ib_dev *dev)
4177{
4178 mlx5r_macsec_event_unregister(dev);
4179 mlx5_notifier_unregister(dev->mdev, &dev->mdev_events);
4180}
4181
4230 if (ret)
4231 return ret;
4232
4233 ret = mlx5_mkey_cache_init(dev);
4234 if (ret)
4235 mlx5_ib_warn(dev, "mr cache init failed %d\n", ret);
4236 return ret;
4237}

--- 52 unchanged lines hidden (view full) ---

4290}
4291
4292static void mlx5_ib_stage_dev_notifier_cleanup(struct mlx5_ib_dev *dev)
4293{
4294 mlx5r_macsec_event_unregister(dev);
4295 mlx5_notifier_unregister(dev->mdev, &dev->mdev_events);
4296}
4297
4298void mlx5_ib_data_direct_bind(struct mlx5_ib_dev *ibdev,
4299 struct mlx5_data_direct_dev *dev)
4300{
4301 mutex_lock(&ibdev->data_direct_lock);
4302 ibdev->data_direct_dev = dev;
4303 mutex_unlock(&ibdev->data_direct_lock);
4304}
4305
4306void mlx5_ib_data_direct_unbind(struct mlx5_ib_dev *ibdev)
4307{
4308 mutex_lock(&ibdev->data_direct_lock);
4309 ibdev->data_direct_dev = NULL;
4310 mutex_unlock(&ibdev->data_direct_lock);
4311}
4312
4182void __mlx5_ib_remove(struct mlx5_ib_dev *dev,
4183 const struct mlx5_ib_profile *profile,
4184 int stage)
4185{
4186 dev->ib_active = false;
4187
4188 /* Number of stages to cleanup */
4189 while (stage) {

--- 153 unchanged lines hidden (view full) ---

4343 STAGE_CREATE(MLX5_IB_STAGE_DELAY_DROP,
4344 mlx5_ib_stage_delay_drop_init,
4345 mlx5_ib_stage_delay_drop_cleanup),
4346 STAGE_CREATE(MLX5_IB_STAGE_RESTRACK,
4347 mlx5_ib_restrack_init,
4348 NULL),
4349};
4350
4313void __mlx5_ib_remove(struct mlx5_ib_dev *dev,
4314 const struct mlx5_ib_profile *profile,
4315 int stage)
4316{
4317 dev->ib_active = false;
4318
4319 /* Number of stages to cleanup */
4320 while (stage) {

--- 153 unchanged lines hidden (view full) ---

4474 STAGE_CREATE(MLX5_IB_STAGE_DELAY_DROP,
4475 mlx5_ib_stage_delay_drop_init,
4476 mlx5_ib_stage_delay_drop_cleanup),
4477 STAGE_CREATE(MLX5_IB_STAGE_RESTRACK,
4478 mlx5_ib_restrack_init,
4479 NULL),
4480};
4481
4482static const struct mlx5_ib_profile plane_profile = {
4483 STAGE_CREATE(MLX5_IB_STAGE_INIT,
4484 mlx5_ib_stage_init_init,
4485 mlx5_ib_stage_init_cleanup),
4486 STAGE_CREATE(MLX5_IB_STAGE_CAPS,
4487 mlx5_ib_stage_caps_init,
4488 mlx5_ib_stage_caps_cleanup),
4489 STAGE_CREATE(MLX5_IB_STAGE_NON_DEFAULT_CB,
4490 mlx5_ib_stage_non_default_cb,
4491 NULL),
4492 STAGE_CREATE(MLX5_IB_STAGE_QP,
4493 mlx5_init_qp_table,
4494 mlx5_cleanup_qp_table),
4495 STAGE_CREATE(MLX5_IB_STAGE_SRQ,
4496 mlx5_init_srq_table,
4497 mlx5_cleanup_srq_table),
4498 STAGE_CREATE(MLX5_IB_STAGE_DEVICE_RESOURCES,
4499 mlx5_ib_dev_res_init,
4500 mlx5_ib_dev_res_cleanup),
4501 STAGE_CREATE(MLX5_IB_STAGE_BFREG,
4502 mlx5_ib_stage_bfrag_init,
4503 mlx5_ib_stage_bfrag_cleanup),
4504 STAGE_CREATE(MLX5_IB_STAGE_IB_REG,
4505 mlx5_ib_stage_ib_reg_init,
4506 mlx5_ib_stage_ib_reg_cleanup),
4507};
4508
4509static struct ib_device *mlx5_ib_add_sub_dev(struct ib_device *parent,
4510 enum rdma_nl_dev_type type,
4511 const char *name)
4512{
4513 struct mlx5_ib_dev *mparent = to_mdev(parent), *mplane;
4514 enum rdma_link_layer ll;
4515 int ret;
4516
4517 if (mparent->smi_dev)
4518 return ERR_PTR(-EEXIST);
4519
4520 ll = mlx5_port_type_cap_to_rdma_ll(MLX5_CAP_GEN(mparent->mdev,
4521 port_type));
4522 if (type != RDMA_DEVICE_TYPE_SMI || !mparent->num_plane ||
4523 ll != IB_LINK_LAYER_INFINIBAND ||
4524 !MLX5_CAP_GEN_2(mparent->mdev, multiplane_qp_ud))
4525 return ERR_PTR(-EOPNOTSUPP);
4526
4527 mplane = ib_alloc_device(mlx5_ib_dev, ib_dev);
4528 if (!mplane)
4529 return ERR_PTR(-ENOMEM);
4530
4531 mplane->port = kcalloc(mparent->num_plane * mparent->num_ports,
4532 sizeof(*mplane->port), GFP_KERNEL);
4533 if (!mplane->port) {
4534 ret = -ENOMEM;
4535 goto fail_kcalloc;
4536 }
4537
4538 mplane->ib_dev.type = type;
4539 mplane->mdev = mparent->mdev;
4540 mplane->num_ports = mparent->num_plane;
4541 mplane->sub_dev_name = name;
4542
4543 ret = __mlx5_ib_add(mplane, &plane_profile);
4544 if (ret)
4545 goto fail_ib_add;
4546
4547 mparent->smi_dev = mplane;
4548 return &mplane->ib_dev;
4549
4550fail_ib_add:
4551 kfree(mplane->port);
4552fail_kcalloc:
4553 ib_dealloc_device(&mplane->ib_dev);
4554 return ERR_PTR(ret);
4555}
4556
4557static void mlx5_ib_del_sub_dev(struct ib_device *sub_dev)
4558{
4559 struct mlx5_ib_dev *mdev = to_mdev(sub_dev);
4560
4561 to_mdev(sub_dev->parent)->smi_dev = NULL;
4562 __mlx5_ib_remove(mdev, mdev->profile, MLX5_IB_STAGE_MAX);
4563}
4564
4351static int mlx5r_mp_probe(struct auxiliary_device *adev,
4352 const struct auxiliary_device_id *id)
4353{
4354 struct mlx5_adev *idev = container_of(adev, struct mlx5_adev, adev);
4355 struct mlx5_core_dev *mdev = idev->mdev;
4356 struct mlx5_ib_multiport_info *mpi;
4357 struct mlx5_ib_dev *dev;
4358 bool bound = false;

--- 61 unchanged lines hidden (view full) ---

4420 port_type_cap = MLX5_CAP_GEN(mdev, port_type);
4421 ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap);
4422
4423 num_ports = max(MLX5_CAP_GEN(mdev, num_ports),
4424 MLX5_CAP_GEN(mdev, num_vhca_ports));
4425 dev = ib_alloc_device(mlx5_ib_dev, ib_dev);
4426 if (!dev)
4427 return -ENOMEM;
4565static int mlx5r_mp_probe(struct auxiliary_device *adev,
4566 const struct auxiliary_device_id *id)
4567{
4568 struct mlx5_adev *idev = container_of(adev, struct mlx5_adev, adev);
4569 struct mlx5_core_dev *mdev = idev->mdev;
4570 struct mlx5_ib_multiport_info *mpi;
4571 struct mlx5_ib_dev *dev;
4572 bool bound = false;

--- 61 unchanged lines hidden (view full) ---

4634 port_type_cap = MLX5_CAP_GEN(mdev, port_type);
4635 ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap);
4636
4637 num_ports = max(MLX5_CAP_GEN(mdev, num_ports),
4638 MLX5_CAP_GEN(mdev, num_vhca_ports));
4639 dev = ib_alloc_device(mlx5_ib_dev, ib_dev);
4640 if (!dev)
4641 return -ENOMEM;
4642
4643 if (ll == IB_LINK_LAYER_INFINIBAND) {
4644 ret = mlx5_ib_get_plane_num(mdev, &dev->num_plane);
4645 if (ret)
4646 goto fail;
4647 }
4648
4428 dev->port = kcalloc(num_ports, sizeof(*dev->port),
4429 GFP_KERNEL);
4430 if (!dev->port) {
4649 dev->port = kcalloc(num_ports, sizeof(*dev->port),
4650 GFP_KERNEL);
4651 if (!dev->port) {
4431 ib_dealloc_device(&dev->ib_dev);
4432 return -ENOMEM;
4652 ret = -ENOMEM;
4653 goto fail;
4433 }
4434
4435 dev->mdev = mdev;
4436 dev->num_ports = num_ports;
4437
4438 if (ll == IB_LINK_LAYER_ETHERNET && !mlx5_get_roce_state(mdev))
4439 profile = &raw_eth_profile;
4440 else
4441 profile = &pf_profile;
4442
4443 ret = __mlx5_ib_add(dev, profile);
4654 }
4655
4656 dev->mdev = mdev;
4657 dev->num_ports = num_ports;
4658
4659 if (ll == IB_LINK_LAYER_ETHERNET && !mlx5_get_roce_state(mdev))
4660 profile = &raw_eth_profile;
4661 else
4662 profile = &pf_profile;
4663
4664 ret = __mlx5_ib_add(dev, profile);
4444 if (ret) {
4445 kfree(dev->port);
4446 ib_dealloc_device(&dev->ib_dev);
4447 return ret;
4448 }
4665 if (ret)
4666 goto fail_ib_add;
4449
4450 auxiliary_set_drvdata(adev, dev);
4451 return 0;
4667
4668 auxiliary_set_drvdata(adev, dev);
4669 return 0;
4670
4671fail_ib_add:
4672 kfree(dev->port);
4673fail:
4674 ib_dealloc_device(&dev->ib_dev);
4675 return ret;
4452}
4453
4454static void mlx5r_remove(struct auxiliary_device *adev)
4455{
4456 struct mlx5_ib_dev *dev;
4457
4458 dev = auxiliary_get_drvdata(adev);
4459 __mlx5_ib_remove(dev, dev->profile, MLX5_IB_STAGE_MAX);

--- 43 unchanged lines hidden (view full) ---

4503 ret = mlx5_ib_qp_event_init();
4504 if (ret)
4505 goto qp_event_err;
4506
4507 mlx5_ib_odp_init();
4508 ret = mlx5r_rep_init();
4509 if (ret)
4510 goto rep_err;
4676}
4677
4678static void mlx5r_remove(struct auxiliary_device *adev)
4679{
4680 struct mlx5_ib_dev *dev;
4681
4682 dev = auxiliary_get_drvdata(adev);
4683 __mlx5_ib_remove(dev, dev->profile, MLX5_IB_STAGE_MAX);

--- 43 unchanged lines hidden (view full) ---

4727 ret = mlx5_ib_qp_event_init();
4728 if (ret)
4729 goto qp_event_err;
4730
4731 mlx5_ib_odp_init();
4732 ret = mlx5r_rep_init();
4733 if (ret)
4734 goto rep_err;
4735 ret = mlx5_data_direct_driver_register();
4736 if (ret)
4737 goto dd_err;
4511 ret = auxiliary_driver_register(&mlx5r_mp_driver);
4512 if (ret)
4513 goto mp_err;
4514 ret = auxiliary_driver_register(&mlx5r_driver);
4515 if (ret)
4516 goto drv_err;
4738 ret = auxiliary_driver_register(&mlx5r_mp_driver);
4739 if (ret)
4740 goto mp_err;
4741 ret = auxiliary_driver_register(&mlx5r_driver);
4742 if (ret)
4743 goto drv_err;
4744
4517 return 0;
4518
4519drv_err:
4520 auxiliary_driver_unregister(&mlx5r_mp_driver);
4521mp_err:
4745 return 0;
4746
4747drv_err:
4748 auxiliary_driver_unregister(&mlx5r_mp_driver);
4749mp_err:
4750 mlx5_data_direct_driver_unregister();
4751dd_err:
4522 mlx5r_rep_cleanup();
4523rep_err:
4524 mlx5_ib_qp_event_cleanup();
4525qp_event_err:
4526 destroy_workqueue(mlx5_ib_event_wq);
4527 free_page((unsigned long)xlt_emergency_page);
4528 return ret;
4529}
4530
4531static void __exit mlx5_ib_cleanup(void)
4532{
4752 mlx5r_rep_cleanup();
4753rep_err:
4754 mlx5_ib_qp_event_cleanup();
4755qp_event_err:
4756 destroy_workqueue(mlx5_ib_event_wq);
4757 free_page((unsigned long)xlt_emergency_page);
4758 return ret;
4759}
4760
4761static void __exit mlx5_ib_cleanup(void)
4762{
4763 mlx5_data_direct_driver_unregister();
4533 auxiliary_driver_unregister(&mlx5r_driver);
4534 auxiliary_driver_unregister(&mlx5r_mp_driver);
4535 mlx5r_rep_cleanup();
4536
4537 mlx5_ib_qp_event_cleanup();
4538 destroy_workqueue(mlx5_ib_event_wq);
4539 free_page((unsigned long)xlt_emergency_page);
4540}
4541
4542module_init(mlx5_ib_init);
4543module_exit(mlx5_ib_cleanup);
4764 auxiliary_driver_unregister(&mlx5r_driver);
4765 auxiliary_driver_unregister(&mlx5r_mp_driver);
4766 mlx5r_rep_cleanup();
4767
4768 mlx5_ib_qp_event_cleanup();
4769 destroy_workqueue(mlx5_ib_event_wq);
4770 free_page((unsigned long)xlt_emergency_page);
4771}
4772
4773module_init(mlx5_ib_init);
4774module_exit(mlx5_ib_cleanup);