Lines Matching +full:sub +full:- +full:mailboxes
1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
57 struct list_head list; /* headed in ev_file->event_list */
74 struct list_head file_list; /* headed in ev_file->
77 struct list_head xa_list; /* headed in devx_event->unaffiliated_list or
78 * devx_obj_event->obj_sub_list
81 struct list_head event_list; /* headed in ev_file->event_list or in
129 if (MLX5_CAP_GEN(dev->mdev, uctx_cap) & MLX5_UCTX_CAP_RDMA_CTRL) in set_uctx_ucaps()
132 return -EOPNOTSUPP; in set_uctx_ucaps()
136 if (MLX5_CAP_GEN(dev->mdev, uctx_cap) & in set_uctx_ucaps()
140 return -EOPNOTSUPP; in set_uctx_ucaps()
156 if (!MLX5_CAP_GEN(dev->mdev, log_max_uctx)) in mlx5_ib_devx_create()
157 return -EINVAL; in mlx5_ib_devx_create()
161 (MLX5_CAP_GEN(dev->mdev, uctx_cap) & MLX5_UCTX_CAP_RAW_TX) && in mlx5_ib_devx_create()
162 rdma_dev_has_raw_cap(&dev->ib_dev)) in mlx5_ib_devx_create()
165 (MLX5_CAP_GEN(dev->mdev, uctx_cap) & in mlx5_ib_devx_create()
179 err = mlx5_cmd_exec(dev->mdev, in, sizeof(in), out, sizeof(out)); in mlx5_ib_devx_create()
195 mlx5_cmd_exec(dev->mdev, in, sizeof(in), out, sizeof(out)); in mlx5_ib_devx_destroy()
253 opcode = (obj->obj_id >> 32) & 0xffff; in get_dec_obj_type()
260 return (obj->obj_id >> 48); in get_dec_obj_type()
307 return eqe->data.qp_srq.type; in get_event_obj_type()
315 return MLX5_GET(affiliated_event_header, &eqe->data, obj_type); in get_event_obj_type()
637 struct mlx5_ib_dev *dev = mlx5_udata_to_mdev(&attrs->driver_udata); in devx_is_valid_obj_id()
646 to_mcq(uobj->object)->mcq.cqn) == in devx_is_valid_obj_id()
651 struct mlx5_core_srq *srq = &(to_msrq(uobj->object)->msrq); in devx_is_valid_obj_id()
654 switch (srq->common.res) { in devx_is_valid_obj_id()
662 if (!dev->mdev->issi) in devx_is_valid_obj_id()
669 to_msrq(uobj->object)->msrq.srqn) == in devx_is_valid_obj_id()
675 struct mlx5_ib_qp *qp = to_mqp(uobj->object); in devx_is_valid_obj_id()
677 if (qp->type == IB_QPT_RAW_PACKET || in devx_is_valid_obj_id()
678 (qp->flags & IB_QP_CREATE_SOURCE_QPN)) { in devx_is_valid_obj_id()
680 &qp->raw_packet_qp; in devx_is_valid_obj_id()
681 struct mlx5_ib_rq *rq = &raw_packet_qp->rq; in devx_is_valid_obj_id()
682 struct mlx5_ib_sq *sq = &raw_packet_qp->sq; in devx_is_valid_obj_id()
685 rq->base.mqp.qpn) == obj_id || in devx_is_valid_obj_id()
687 sq->base.mqp.qpn) == obj_id || in devx_is_valid_obj_id()
689 rq->tirn) == obj_id || in devx_is_valid_obj_id()
691 sq->tisn) == obj_id); in devx_is_valid_obj_id()
694 if (qp->type == MLX5_IB_QPT_DCT) in devx_is_valid_obj_id()
696 qp->dct.mdct.mqp.qpn) == obj_id; in devx_is_valid_obj_id()
698 qp->ibqp.qp_num) == obj_id; in devx_is_valid_obj_id()
703 to_mrwq(uobj->object)->core_qp.qpn) == in devx_is_valid_obj_id()
708 to_mrwq_ind_table(uobj->object)->rqtn) == in devx_is_valid_obj_id()
714 struct devx_obj *devx_uobj = uobj->object; in devx_is_valid_obj_id()
717 devx_uobj->flow_counter_bulk_size) { in devx_is_valid_obj_id()
720 end = devx_uobj->obj_id + in devx_is_valid_obj_id()
721 devx_uobj->flow_counter_bulk_size; in devx_is_valid_obj_id()
722 return devx_uobj->obj_id <= obj_id && end > obj_id; in devx_is_valid_obj_id()
725 return devx_uobj->obj_id == obj_id; in devx_is_valid_obj_id()
979 if (c->devx_uid) in devx_get_uid()
980 return c->devx_uid; in devx_get_uid()
982 dev = to_mdev(c->ibucontext.device); in devx_get_uid()
983 if (dev->devx_whitelist_uid) in devx_get_uid()
984 return dev->devx_whitelist_uid; in devx_get_uid()
986 return -EOPNOTSUPP; in devx_get_uid()
989 if (!c->devx_uid) in devx_get_uid()
990 return -EINVAL; in devx_get_uid()
992 return c->devx_uid; in devx_get_uid()
1000 if ((MLX5_CAP_GEN_64(dev->mdev, vhca_tunnel_commands) && in devx_is_general_cmd()
1041 return -EFAULT; in UVERBS_HANDLER()
1046 dev = to_mdev(c->ibucontext.device); in UVERBS_HANDLER()
1048 err = mlx5_comp_eqn_get(dev->mdev, user_vector, &dev_eqn); in UVERBS_HANDLER()
1054 return -EFAULT; in UVERBS_HANDLER()
1070 * mailboxes (except tagging them with UID), we expose to the user its UAR
1090 dev = to_mdev(c->ibucontext.device); in UVERBS_HANDLER()
1094 return -EFAULT; in UVERBS_HANDLER()
1096 dev_idx = bfregn_to_uar_index(dev, &c->bfregi, user_idx, true); in UVERBS_HANDLER()
1102 return -EFAULT; in UVERBS_HANDLER()
1123 dev = to_mdev(c->ibucontext.device); in UVERBS_HANDLER()
1131 return -EINVAL; in UVERBS_HANDLER()
1138 err = mlx5_cmd_do(dev->mdev, cmd_in, in UVERBS_HANDLER()
1141 if (err && err != -EREMOTEIO) in UVERBS_HANDLER()
1356 struct mlx5_ib_mkey *mkey = &obj->mkey; in devx_handle_mkey_indirect()
1362 mkey->key = mlx5_idx_to_mkey( in devx_handle_mkey_indirect()
1364 mkey->type = MLX5_MKEY_INDIRECT_DEVX; in devx_handle_mkey_indirect()
1365 mkey->ndescs = MLX5_GET(mkc, mkc, translations_octword_size); in devx_handle_mkey_indirect()
1366 init_waitqueue_head(&mkey->wait); in devx_handle_mkey_indirect()
1382 return -EINVAL; in devx_handle_mkey_create()
1392 obj->flags |= DEVX_OBJ_FLAGS_INDIRECT_MKEY; in devx_handle_mkey_create()
1405 struct devx_event_subscription *sub) in devx_cleanup_subscription() argument
1410 if (sub->is_cleaned) in devx_cleanup_subscription()
1413 sub->is_cleaned = 1; in devx_cleanup_subscription()
1414 list_del_rcu(&sub->xa_list); in devx_cleanup_subscription()
1416 if (list_empty(&sub->obj_list)) in devx_cleanup_subscription()
1419 list_del_rcu(&sub->obj_list); in devx_cleanup_subscription()
1421 event = xa_load(&dev->devx_event_table.event_xa, in devx_cleanup_subscription()
1422 sub->xa_key_level1); in devx_cleanup_subscription()
1425 xa_val_level2 = xa_load(&event->object_ids, sub->xa_key_level2); in devx_cleanup_subscription()
1426 if (list_empty(&xa_val_level2->obj_sub_list)) { in devx_cleanup_subscription()
1427 xa_erase(&event->object_ids, in devx_cleanup_subscription()
1428 sub->xa_key_level2); in devx_cleanup_subscription()
1439 struct devx_obj *obj = uobject->object; in devx_obj_cleanup()
1444 dev = mlx5_udata_to_mdev(&attrs->driver_udata); in devx_obj_cleanup()
1445 if (obj->flags & DEVX_OBJ_FLAGS_INDIRECT_MKEY && in devx_obj_cleanup()
1446 xa_erase(&obj->ib_dev->odp_mkeys, in devx_obj_cleanup()
1447 mlx5_base_mkey(obj->mkey.key))) in devx_obj_cleanup()
1453 mlx5r_deref_wait_odp_mkey(&obj->mkey); in devx_obj_cleanup()
1455 if (obj->flags & DEVX_OBJ_FLAGS_HW_FREED) in devx_obj_cleanup()
1457 else if (obj->flags & DEVX_OBJ_FLAGS_DCT) in devx_obj_cleanup()
1458 ret = mlx5_core_destroy_dct(obj->ib_dev, &obj->core_dct); in devx_obj_cleanup()
1459 else if (obj->flags & DEVX_OBJ_FLAGS_CQ) in devx_obj_cleanup()
1460 ret = mlx5_core_destroy_cq(obj->ib_dev->mdev, &obj->core_cq); in devx_obj_cleanup()
1462 ret = mlx5_cmd_exec(obj->ib_dev->mdev, obj->dinbox, in devx_obj_cleanup()
1463 obj->dinlen, out, sizeof(out)); in devx_obj_cleanup()
1467 devx_event_table = &dev->devx_event_table; in devx_obj_cleanup()
1469 mutex_lock(&devx_event_table->event_xa_lock); in devx_obj_cleanup()
1470 list_for_each_entry_safe(sub_entry, tmp, &obj->event_sub, obj_list) in devx_obj_cleanup()
1472 mutex_unlock(&devx_event_table->event_xa_lock); in devx_obj_cleanup()
1484 u32 obj_id = mcq->cqn; in devx_cq_comp()
1486 table = &obj->ib_dev->devx_event_table; in devx_cq_comp()
1488 event = xa_load(&table->event_xa, MLX5_EVENT_TYPE_COMP); in devx_cq_comp()
1492 obj_event = xa_load(&event->object_ids, obj_id); in devx_cq_comp()
1496 dispatch_event_fd(&obj_event->obj_sub_list, eqe); in devx_cq_comp()
1503 if (!MLX5_CAP_GEN(dev->mdev, apu) || in is_apu_cq()
1522 &attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext); in UVERBS_HANDLER()
1523 struct mlx5_ib_dev *dev = to_mdev(c->ibucontext.device); in UVERBS_HANDLER()
1533 return -EINVAL; in UVERBS_HANDLER()
1540 return -EINVAL; in UVERBS_HANDLER()
1548 return -ENOMEM; in UVERBS_HANDLER()
1560 obj->flags |= DEVX_OBJ_FLAGS_DCT; in UVERBS_HANDLER()
1561 err = mlx5_core_create_dct(dev, &obj->core_dct, cmd_in, in UVERBS_HANDLER()
1565 obj->flags |= DEVX_OBJ_FLAGS_CQ; in UVERBS_HANDLER()
1566 obj->core_cq.comp = devx_cq_comp; in UVERBS_HANDLER()
1567 err = mlx5_create_cq(dev->mdev, &obj->core_cq, in UVERBS_HANDLER()
1571 err = mlx5_cmd_do(dev->mdev, cmd_in, cmd_in_len, in UVERBS_HANDLER()
1575 if (err == -EREMOTEIO) in UVERBS_HANDLER()
1593 obj->flow_counter_bulk_size = bulk; in UVERBS_HANDLER()
1596 uobj->object = obj; in UVERBS_HANDLER()
1597 INIT_LIST_HEAD(&obj->event_sub); in UVERBS_HANDLER()
1598 obj->ib_dev = dev; in UVERBS_HANDLER()
1599 devx_obj_build_destroy_cmd(cmd_in, cmd_out, obj->dinbox, &obj->dinlen, in UVERBS_HANDLER()
1601 WARN_ON(obj->dinlen > MLX5_MAX_DESTROY_INBOX_SIZE_DW * sizeof(u32)); in UVERBS_HANDLER()
1609 obj->obj_id = get_enc_obj_id(opcode | obj_type << 16, obj_id); in UVERBS_HANDLER()
1611 if (obj->flags & DEVX_OBJ_FLAGS_INDIRECT_MKEY) { in UVERBS_HANDLER()
1619 if (obj->flags & DEVX_OBJ_FLAGS_DCT) in UVERBS_HANDLER()
1620 mlx5_core_destroy_dct(obj->ib_dev, &obj->core_dct); in UVERBS_HANDLER()
1621 else if (obj->flags & DEVX_OBJ_FLAGS_CQ) in UVERBS_HANDLER()
1622 mlx5_core_destroy_cq(obj->ib_dev->mdev, &obj->core_cq); in UVERBS_HANDLER()
1624 mlx5_cmd_exec(obj->ib_dev->mdev, obj->dinbox, obj->dinlen, out, in UVERBS_HANDLER()
1640 &attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext); in UVERBS_HANDLER()
1641 struct mlx5_ib_dev *mdev = to_mdev(c->ibucontext.device); in UVERBS_HANDLER()
1647 return -EINVAL; in UVERBS_HANDLER()
1654 return -EINVAL; in UVERBS_HANDLER()
1657 return -EINVAL; in UVERBS_HANDLER()
1666 err = mlx5_cmd_do(mdev->mdev, cmd_in, in UVERBS_HANDLER()
1669 if (err && err != -EREMOTEIO) in UVERBS_HANDLER()
1687 &attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext); in UVERBS_HANDLER()
1691 struct mlx5_ib_dev *mdev = to_mdev(c->ibucontext.device); in UVERBS_HANDLER()
1694 return -EINVAL; in UVERBS_HANDLER()
1701 return -EINVAL; in UVERBS_HANDLER()
1704 return -EINVAL; in UVERBS_HANDLER()
1711 err = mlx5_cmd_do(mdev->mdev, cmd_in, in UVERBS_HANDLER()
1714 if (err && err != -EREMOTEIO) in UVERBS_HANDLER()
1739 spin_lock_init(&ev_queue->lock); in devx_init_event_queue()
1740 INIT_LIST_HEAD(&ev_queue->event_list); in devx_init_event_queue()
1741 init_waitqueue_head(&ev_queue->poll_wait); in devx_init_event_queue()
1742 atomic_set(&ev_queue->bytes_in_use, 0); in devx_init_event_queue()
1743 ev_queue->is_destroyed = 0; in devx_init_event_queue()
1753 struct mlx5_ib_dev *mdev = mlx5_udata_to_mdev(&attrs->driver_udata); in UVERBS_HANDLER()
1757 devx_init_event_queue(&ev_file->ev_queue); in UVERBS_HANDLER()
1758 mlx5_cmd_init_async_ctx(mdev->mdev, &ev_file->async_ctx); in UVERBS_HANDLER()
1769 &attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext); in UVERBS_HANDLER()
1770 struct mlx5_ib_dev *dev = to_mdev(c->ibucontext.device); in UVERBS_HANDLER()
1783 spin_lock_init(&ev_file->lock); in UVERBS_HANDLER()
1784 INIT_LIST_HEAD(&ev_file->event_list); in UVERBS_HANDLER()
1785 init_waitqueue_head(&ev_file->poll_wait); in UVERBS_HANDLER()
1787 ev_file->omit_data = 1; in UVERBS_HANDLER()
1788 INIT_LIST_HEAD(&ev_file->subscribed_events_list); in UVERBS_HANDLER()
1789 ev_file->dev = dev; in UVERBS_HANDLER()
1790 get_device(&dev->ib_dev.dev); in UVERBS_HANDLER()
1798 struct devx_async_cmd_event_file *ev_file = async_data->ev_file; in devx_query_callback()
1799 struct devx_async_event_queue *ev_queue = &ev_file->ev_queue; in devx_query_callback()
1807 spin_lock_irqsave(&ev_queue->lock, flags); in devx_query_callback()
1808 list_add_tail(&async_data->list, &ev_queue->event_list); in devx_query_callback()
1809 spin_unlock_irqrestore(&ev_queue->lock, flags); in devx_query_callback()
1811 wake_up_interruptible(&ev_queue->poll_wait); in devx_query_callback()
1826 &attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext); in UVERBS_HANDLER()
1830 struct mlx5_ib_dev *mdev = to_mdev(c->ibucontext.device); in UVERBS_HANDLER()
1835 return -EINVAL; in UVERBS_HANDLER()
1842 return -EINVAL; in UVERBS_HANDLER()
1850 return -EINVAL; in UVERBS_HANDLER()
1860 if (atomic_add_return(cmd_out_len, &ev_file->ev_queue.bytes_in_use) > in UVERBS_HANDLER()
1862 atomic_sub(cmd_out_len, &ev_file->ev_queue.bytes_in_use); in UVERBS_HANDLER()
1863 return -EAGAIN; in UVERBS_HANDLER()
1869 err = -ENOMEM; in UVERBS_HANDLER()
1873 err = uverbs_copy_from(&async_data->hdr.wr_id, attrs, in UVERBS_HANDLER()
1878 async_data->cmd_out_len = cmd_out_len; in UVERBS_HANDLER()
1879 async_data->mdev = mdev; in UVERBS_HANDLER()
1880 async_data->ev_file = ev_file; in UVERBS_HANDLER()
1883 err = mlx5_cmd_exec_cb(&ev_file->async_ctx, cmd_in, in UVERBS_HANDLER()
1886 async_data->hdr.out_data, in UVERBS_HANDLER()
1887 async_data->cmd_out_len, in UVERBS_HANDLER()
1888 devx_query_callback, &async_data->cb_work); in UVERBS_HANDLER()
1898 atomic_sub(cmd_out_len, &ev_file->ev_queue.bytes_in_use); in UVERBS_HANDLER()
1915 event = xa_load(&devx_event_table->event_xa, key_level1); in subscribe_event_xa_dealloc()
1918 xa_val_level2 = xa_load(&event->object_ids, in subscribe_event_xa_dealloc()
1920 if (list_empty(&xa_val_level2->obj_sub_list)) { in subscribe_event_xa_dealloc()
1921 xa_erase(&event->object_ids, in subscribe_event_xa_dealloc()
1937 event = xa_load(&devx_event_table->event_xa, key_level1); in subscribe_event_xa_alloc()
1941 return -ENOMEM; in subscribe_event_xa_alloc()
1943 INIT_LIST_HEAD(&event->unaffiliated_list); in subscribe_event_xa_alloc()
1944 xa_init(&event->object_ids); in subscribe_event_xa_alloc()
1946 err = xa_insert(&devx_event_table->event_xa, in subscribe_event_xa_alloc()
1959 obj_event = xa_load(&event->object_ids, key_level2); in subscribe_event_xa_alloc()
1964 return -ENOMEM; in subscribe_event_xa_alloc()
1966 INIT_LIST_HEAD(&obj_event->obj_sub_list); in subscribe_event_xa_alloc()
1967 err = xa_insert(&event->object_ids, in subscribe_event_xa_alloc()
2054 &attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext); in UVERBS_HANDLER()
2055 struct mlx5_ib_dev *dev = to_mdev(c->ibucontext.device); in UVERBS_HANDLER()
2059 struct mlx5_devx_event_table *devx_event_table = &dev->devx_event_table; in UVERBS_HANDLER()
2072 if (!c->devx_uid) in UVERBS_HANDLER()
2073 return -EINVAL; in UVERBS_HANDLER()
2076 obj = (struct devx_obj *)devx_uobj->object; in UVERBS_HANDLER()
2078 obj_id = get_dec_obj_id(obj->obj_id); in UVERBS_HANDLER()
2102 return -EINVAL; in UVERBS_HANDLER()
2118 return -EINVAL; in UVERBS_HANDLER()
2123 if (!is_valid_events(dev->mdev, num_events, event_type_num_list, obj)) in UVERBS_HANDLER()
2124 return -EINVAL; in UVERBS_HANDLER()
2131 mutex_lock(&devx_event_table->event_xa_lock); in UVERBS_HANDLER()
2149 err = -ENOMEM; in UVERBS_HANDLER()
2153 list_add_tail(&event_sub->event_list, &sub_list); in UVERBS_HANDLER()
2154 uverbs_uobject_get(&ev_file->uobj); in UVERBS_HANDLER()
2156 event_sub->eventfd = in UVERBS_HANDLER()
2159 if (IS_ERR(event_sub->eventfd)) { in UVERBS_HANDLER()
2160 err = PTR_ERR(event_sub->eventfd); in UVERBS_HANDLER()
2161 event_sub->eventfd = NULL; in UVERBS_HANDLER()
2166 event_sub->cookie = cookie; in UVERBS_HANDLER()
2167 event_sub->ev_file = ev_file; in UVERBS_HANDLER()
2169 event_sub->xa_key_level1 = key_level1; in UVERBS_HANDLER()
2170 event_sub->xa_key_level2 = obj_id; in UVERBS_HANDLER()
2171 INIT_LIST_HEAD(&event_sub->obj_list); in UVERBS_HANDLER()
2182 list_del_init(&event_sub->event_list); in UVERBS_HANDLER()
2184 spin_lock_irq(&ev_file->lock); in UVERBS_HANDLER()
2185 list_add_tail_rcu(&event_sub->file_list, in UVERBS_HANDLER()
2186 &ev_file->subscribed_events_list); in UVERBS_HANDLER()
2187 spin_unlock_irq(&ev_file->lock); in UVERBS_HANDLER()
2189 event = xa_load(&devx_event_table->event_xa, in UVERBS_HANDLER()
2190 event_sub->xa_key_level1); in UVERBS_HANDLER()
2194 list_add_tail_rcu(&event_sub->xa_list, in UVERBS_HANDLER()
2195 &event->unaffiliated_list); in UVERBS_HANDLER()
2199 obj_event = xa_load(&event->object_ids, obj_id); in UVERBS_HANDLER()
2201 list_add_tail_rcu(&event_sub->xa_list, in UVERBS_HANDLER()
2202 &obj_event->obj_sub_list); in UVERBS_HANDLER()
2203 list_add_tail_rcu(&event_sub->obj_list, in UVERBS_HANDLER()
2204 &obj->event_sub); in UVERBS_HANDLER()
2207 mutex_unlock(&devx_event_table->event_xa_lock); in UVERBS_HANDLER()
2212 list_del(&event_sub->event_list); in UVERBS_HANDLER()
2215 event_sub->xa_key_level1, in UVERBS_HANDLER()
2219 if (event_sub->eventfd) in UVERBS_HANDLER()
2220 eventfd_ctx_put(event_sub->eventfd); in UVERBS_HANDLER()
2221 uverbs_uobject_put(&event_sub->ev_file->uobj); in UVERBS_HANDLER()
2225 mutex_unlock(&devx_event_table->event_xa_lock); in UVERBS_HANDLER()
2239 return -EFAULT; in devx_umem_get()
2241 err = ib_check_mr_access(&dev->ib_dev, access_flags); in devx_umem_get()
2252 return -EFAULT; in devx_umem_get()
2255 &dev->ib_dev, addr, size, dmabuf_fd, access_flags); in devx_umem_get()
2258 obj->umem = &umem_dmabuf->umem; in devx_umem_get()
2260 obj->umem = ib_umem_get(&dev->ib_dev, addr, size, access_flags); in devx_umem_get()
2261 if (IS_ERR(obj->umem)) in devx_umem_get()
2262 return PTR_ERR(obj->umem); in devx_umem_get()
2275 pgsz_bitmap &= GENMASK_ULL(max_t(u64, order_base_2(umem->length), in devx_umem_find_best_pgsize()
2292 (umem->length % page_size) != 0) && in devx_umem_find_best_pgsize()
2330 page_size = devx_umem_find_best_pgsize(obj->umem, pgsz_bitmap); in devx_umem_reg_cmd_alloc()
2332 return -EINVAL; in devx_umem_reg_cmd_alloc()
2334 cmd->inlen = MLX5_ST_SZ_BYTES(create_umem_in) + in devx_umem_reg_cmd_alloc()
2336 ib_umem_num_dma_blocks(obj->umem, page_size)); in devx_umem_reg_cmd_alloc()
2337 cmd->in = uverbs_zalloc(attrs, cmd->inlen); in devx_umem_reg_cmd_alloc()
2338 if (IS_ERR(cmd->in)) in devx_umem_reg_cmd_alloc()
2339 return PTR_ERR(cmd->in); in devx_umem_reg_cmd_alloc()
2341 umem = MLX5_ADDR_OF(create_umem_in, cmd->in, umem); in devx_umem_reg_cmd_alloc()
2344 MLX5_SET(create_umem_in, cmd->in, opcode, MLX5_CMD_OP_CREATE_UMEM); in devx_umem_reg_cmd_alloc()
2346 ib_umem_num_dma_blocks(obj->umem, page_size)); in devx_umem_reg_cmd_alloc()
2348 order_base_2(page_size) - MLX5_ADAPTER_PAGE_SHIFT); in devx_umem_reg_cmd_alloc()
2350 ib_umem_dma_offset(obj->umem, page_size)); in devx_umem_reg_cmd_alloc()
2352 if (mlx5_umem_needs_ats(dev, obj->umem, access)) in devx_umem_reg_cmd_alloc()
2355 mlx5_ib_populate_pas(obj->umem, page_size, mtt, in devx_umem_reg_cmd_alloc()
2356 (obj->umem->writable ? MLX5_IB_MTT_WRITE : 0) | in devx_umem_reg_cmd_alloc()
2370 &attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext); in UVERBS_HANDLER()
2371 struct mlx5_ib_dev *dev = to_mdev(c->ibucontext.device); in UVERBS_HANDLER()
2375 if (!c->devx_uid) in UVERBS_HANDLER()
2376 return -EINVAL; in UVERBS_HANDLER()
2389 return -ENOMEM; in UVERBS_HANDLER()
2391 err = devx_umem_get(dev, &c->ibucontext, attrs, obj, access_flags); in UVERBS_HANDLER()
2399 MLX5_SET(create_umem_in, cmd.in, uid, c->devx_uid); in UVERBS_HANDLER()
2400 err = mlx5_cmd_exec(dev->mdev, cmd.in, cmd.inlen, cmd.out, in UVERBS_HANDLER()
2405 obj->mdev = dev->mdev; in UVERBS_HANDLER()
2406 uobj->object = obj; in UVERBS_HANDLER()
2407 devx_obj_build_destroy_cmd(cmd.in, cmd.out, obj->dinbox, &obj->dinlen, &obj_id); in UVERBS_HANDLER()
2415 ib_umem_release(obj->umem); in UVERBS_HANDLER()
2425 struct devx_umem *obj = uobject->object; in devx_umem_cleanup()
2429 err = mlx5_cmd_exec(obj->mdev, obj->dinbox, obj->dinlen, out, sizeof(out)); in devx_umem_cleanup()
2433 ib_umem_release(obj->umem); in devx_umem_cleanup()
2477 obj_id = be32_to_cpu(eqe->data.qp_srq.qp_srq_n) & 0xffffff; in devx_get_obj_id_from_event()
2480 obj_id = be32_to_cpu(eqe->data.xrq_err.type_xrqn) & 0xffffff; in devx_get_obj_id_from_event()
2484 obj_id = be32_to_cpu(eqe->data.dct.dctn) & 0xffffff; in devx_get_obj_id_from_event()
2487 obj_id = be32_to_cpu(eqe->data.cq_err.cqn) & 0xffffff; in devx_get_obj_id_from_event()
2490 obj_id = MLX5_GET(affiliated_event_header, &eqe->data, obj_id); in devx_get_obj_id_from_event()
2504 ev_file = event_sub->ev_file; in deliver_event()
2506 if (ev_file->omit_data) { in deliver_event()
2507 spin_lock_irqsave(&ev_file->lock, flags); in deliver_event()
2508 if (!list_empty(&event_sub->event_list) || in deliver_event()
2509 ev_file->is_destroyed) { in deliver_event()
2510 spin_unlock_irqrestore(&ev_file->lock, flags); in deliver_event()
2514 list_add_tail(&event_sub->event_list, &ev_file->event_list); in deliver_event()
2515 spin_unlock_irqrestore(&ev_file->lock, flags); in deliver_event()
2516 wake_up_interruptible(&ev_file->poll_wait); in deliver_event()
2523 spin_lock_irqsave(&ev_file->lock, flags); in deliver_event()
2524 ev_file->is_overflow_err = 1; in deliver_event()
2525 spin_unlock_irqrestore(&ev_file->lock, flags); in deliver_event()
2526 return -ENOMEM; in deliver_event()
2529 event_data->hdr.cookie = event_sub->cookie; in deliver_event()
2530 memcpy(event_data->hdr.out_data, data, sizeof(struct mlx5_eqe)); in deliver_event()
2532 spin_lock_irqsave(&ev_file->lock, flags); in deliver_event()
2533 if (!ev_file->is_destroyed) in deliver_event()
2534 list_add_tail(&event_data->list, &ev_file->event_list); in deliver_event()
2537 spin_unlock_irqrestore(&ev_file->lock, flags); in deliver_event()
2538 wake_up_interruptible(&ev_file->poll_wait); in deliver_event()
2549 if (item->eventfd) in dispatch_event_fd()
2550 eventfd_signal(item->eventfd); in dispatch_event_fd()
2574 is_unaffiliated = is_unaffiliated_event(dev->mdev, event_type); in devx_event_notifier()
2580 event = xa_load(&table->event_xa, event_type | (obj_type << 16)); in devx_event_notifier()
2587 dispatch_event_fd(&event->unaffiliated_list, data); in devx_event_notifier()
2593 obj_event = xa_load(&event->object_ids, obj_id); in devx_event_notifier()
2599 dispatch_event_fd(&obj_event->obj_sub_list, data); in devx_event_notifier()
2607 struct mlx5_devx_event_table *table = &dev->devx_event_table; in mlx5_ib_devx_init()
2612 dev->devx_whitelist_uid = uid; in mlx5_ib_devx_init()
2613 xa_init(&table->event_xa); in mlx5_ib_devx_init()
2614 mutex_init(&table->event_xa_lock); in mlx5_ib_devx_init()
2615 MLX5_NB_INIT(&table->devx_nb, devx_event_notifier, NOTIFY_ANY); in mlx5_ib_devx_init()
2616 mlx5_eq_notifier_register(dev->mdev, &table->devx_nb); in mlx5_ib_devx_init()
2624 struct mlx5_devx_event_table *table = &dev->devx_event_table; in mlx5_ib_devx_cleanup()
2625 struct devx_event_subscription *sub, *tmp; in mlx5_ib_devx_cleanup() local
2630 if (dev->devx_whitelist_uid) { in mlx5_ib_devx_cleanup()
2631 mlx5_eq_notifier_unregister(dev->mdev, &table->devx_nb); in mlx5_ib_devx_cleanup()
2632 mutex_lock(&dev->devx_event_table.event_xa_lock); in mlx5_ib_devx_cleanup()
2633 xa_for_each(&table->event_xa, id, entry) { in mlx5_ib_devx_cleanup()
2636 sub, tmp, &event->unaffiliated_list, xa_list) in mlx5_ib_devx_cleanup()
2637 devx_cleanup_subscription(dev, sub); in mlx5_ib_devx_cleanup()
2640 mutex_unlock(&dev->devx_event_table.event_xa_lock); in mlx5_ib_devx_cleanup()
2641 xa_destroy(&table->event_xa); in mlx5_ib_devx_cleanup()
2643 mlx5_ib_devx_destroy(dev, dev->devx_whitelist_uid); in mlx5_ib_devx_cleanup()
2651 struct devx_obj *obj = devx_out->uobject->object; in devx_async_destroy_cb()
2654 obj->flags |= DEVX_OBJ_FLAGS_HW_FREED; in devx_async_destroy_cb()
2656 complete(&devx_out->comp); in devx_async_destroy_cb()
2662 init_completion(&cmd->comp); in devx_async_destroy()
2663 cmd->err = mlx5_cmd_exec_cb(&dev->async_ctx, cmd->in, cmd->in_size, in devx_async_destroy()
2664 &cmd->out, sizeof(cmd->out), in devx_async_destroy()
2665 devx_async_destroy_cb, &cmd->cb_work); in devx_async_destroy()
2670 if (!cmd->err) in devx_wait_async_destroy()
2671 wait_for_completion(&cmd->comp); in devx_wait_async_destroy()
2672 atomic_set(&cmd->uobject->usecnt, 0); in devx_wait_async_destroy()
2678 struct ib_ucontext *ucontext = ufile->ucontext; in mlx5_ib_ufile_hw_cleanup()
2679 struct ib_device *device = ucontext->device; in mlx5_ib_ufile_hw_cleanup()
2690 list_for_each_entry(uobject, &ufile->uobjects, list) { in mlx5_ib_ufile_hw_cleanup()
2700 (get_dec_obj_type(uobject->object, MLX5_EVENT_TYPE_MAX) != in mlx5_ib_ufile_hw_cleanup()
2702 atomic_set(&uobject->usecnt, 0); in mlx5_ib_ufile_hw_cleanup()
2706 obj = uobject->object; in mlx5_ib_ufile_hw_cleanup()
2708 async_cmd[tail % MAX_ASYNC_CMDS].in = obj->dinbox; in mlx5_ib_ufile_hw_cleanup()
2709 async_cmd[tail % MAX_ASYNC_CMDS].in_size = obj->dinlen; in mlx5_ib_ufile_hw_cleanup()
2715 if (tail - head == MAX_ASYNC_CMDS) { in mlx5_ib_ufile_hw_cleanup()
2732 struct devx_async_cmd_event_file *comp_ev_file = filp->private_data; in devx_async_cmd_event_read()
2733 struct devx_async_event_queue *ev_queue = &comp_ev_file->ev_queue; in devx_async_cmd_event_read()
2738 spin_lock_irq(&ev_queue->lock); in devx_async_cmd_event_read()
2740 while (list_empty(&ev_queue->event_list)) { in devx_async_cmd_event_read()
2741 spin_unlock_irq(&ev_queue->lock); in devx_async_cmd_event_read()
2743 if (filp->f_flags & O_NONBLOCK) in devx_async_cmd_event_read()
2744 return -EAGAIN; in devx_async_cmd_event_read()
2747 ev_queue->poll_wait, in devx_async_cmd_event_read()
2748 (!list_empty(&ev_queue->event_list) || in devx_async_cmd_event_read()
2749 ev_queue->is_destroyed))) { in devx_async_cmd_event_read()
2750 return -ERESTARTSYS; in devx_async_cmd_event_read()
2753 spin_lock_irq(&ev_queue->lock); in devx_async_cmd_event_read()
2754 if (ev_queue->is_destroyed) { in devx_async_cmd_event_read()
2755 spin_unlock_irq(&ev_queue->lock); in devx_async_cmd_event_read()
2756 return -EIO; in devx_async_cmd_event_read()
2760 event = list_entry(ev_queue->event_list.next, in devx_async_cmd_event_read()
2762 eventsz = event->cmd_out_len + in devx_async_cmd_event_read()
2766 spin_unlock_irq(&ev_queue->lock); in devx_async_cmd_event_read()
2767 return -ENOSPC; in devx_async_cmd_event_read()
2770 list_del(ev_queue->event_list.next); in devx_async_cmd_event_read()
2771 spin_unlock_irq(&ev_queue->lock); in devx_async_cmd_event_read()
2773 if (copy_to_user(buf, &event->hdr, eventsz)) in devx_async_cmd_event_read()
2774 ret = -EFAULT; in devx_async_cmd_event_read()
2778 atomic_sub(event->cmd_out_len, &ev_queue->bytes_in_use); in devx_async_cmd_event_read()
2786 struct devx_async_cmd_event_file *comp_ev_file = filp->private_data; in devx_async_cmd_event_poll()
2787 struct devx_async_event_queue *ev_queue = &comp_ev_file->ev_queue; in devx_async_cmd_event_poll()
2790 poll_wait(filp, &ev_queue->poll_wait, wait); in devx_async_cmd_event_poll()
2792 spin_lock_irq(&ev_queue->lock); in devx_async_cmd_event_poll()
2793 if (ev_queue->is_destroyed) in devx_async_cmd_event_poll()
2795 else if (!list_empty(&ev_queue->event_list)) in devx_async_cmd_event_poll()
2797 spin_unlock_irq(&ev_queue->lock); in devx_async_cmd_event_poll()
2812 struct devx_async_event_file *ev_file = filp->private_data; in devx_async_event_read()
2820 omit_data = ev_file->omit_data; in devx_async_event_read()
2822 spin_lock_irq(&ev_file->lock); in devx_async_event_read()
2824 if (ev_file->is_overflow_err) { in devx_async_event_read()
2825 ev_file->is_overflow_err = 0; in devx_async_event_read()
2826 spin_unlock_irq(&ev_file->lock); in devx_async_event_read()
2827 return -EOVERFLOW; in devx_async_event_read()
2831 while (list_empty(&ev_file->event_list)) { in devx_async_event_read()
2832 spin_unlock_irq(&ev_file->lock); in devx_async_event_read()
2834 if (filp->f_flags & O_NONBLOCK) in devx_async_event_read()
2835 return -EAGAIN; in devx_async_event_read()
2837 if (wait_event_interruptible(ev_file->poll_wait, in devx_async_event_read()
2838 (!list_empty(&ev_file->event_list) || in devx_async_event_read()
2839 ev_file->is_destroyed))) { in devx_async_event_read()
2840 return -ERESTARTSYS; in devx_async_event_read()
2843 spin_lock_irq(&ev_file->lock); in devx_async_event_read()
2844 if (ev_file->is_destroyed) { in devx_async_event_read()
2845 spin_unlock_irq(&ev_file->lock); in devx_async_event_read()
2846 return -EIO; in devx_async_event_read()
2851 event_sub = list_first_entry(&ev_file->event_list, in devx_async_event_read()
2854 eventsz = sizeof(event_sub->cookie); in devx_async_event_read()
2855 event_data = &event_sub->cookie; in devx_async_event_read()
2857 event = list_first_entry(&ev_file->event_list, in devx_async_event_read()
2861 event_data = &event->hdr; in devx_async_event_read()
2865 spin_unlock_irq(&ev_file->lock); in devx_async_event_read()
2866 return -EINVAL; in devx_async_event_read()
2870 list_del_init(&event_sub->event_list); in devx_async_event_read()
2872 list_del(&event->list); in devx_async_event_read()
2874 spin_unlock_irq(&ev_file->lock); in devx_async_event_read()
2878 ret = -EFAULT; in devx_async_event_read()
2890 struct devx_async_event_file *ev_file = filp->private_data; in devx_async_event_poll()
2893 poll_wait(filp, &ev_file->poll_wait, wait); in devx_async_event_poll()
2895 spin_lock_irq(&ev_file->lock); in devx_async_event_poll()
2896 if (ev_file->is_destroyed) in devx_async_event_poll()
2898 else if (!list_empty(&ev_file->event_list)) in devx_async_event_poll()
2900 spin_unlock_irq(&ev_file->lock); in devx_async_event_poll()
2910 if (event_sub->eventfd) in devx_free_subscription()
2911 eventfd_ctx_put(event_sub->eventfd); in devx_free_subscription()
2912 uverbs_uobject_put(&event_sub->ev_file->uobj); in devx_free_subscription()
2929 struct devx_async_event_queue *ev_queue = &comp_ev_file->ev_queue; in devx_async_cmd_event_destroy_uobj()
2932 spin_lock_irq(&ev_queue->lock); in devx_async_cmd_event_destroy_uobj()
2933 ev_queue->is_destroyed = 1; in devx_async_cmd_event_destroy_uobj()
2934 spin_unlock_irq(&ev_queue->lock); in devx_async_cmd_event_destroy_uobj()
2935 wake_up_interruptible(&ev_queue->poll_wait); in devx_async_cmd_event_destroy_uobj()
2937 mlx5_cmd_cleanup_async_ctx(&comp_ev_file->async_ctx); in devx_async_cmd_event_destroy_uobj()
2939 spin_lock_irq(&comp_ev_file->ev_queue.lock); in devx_async_cmd_event_destroy_uobj()
2941 &comp_ev_file->ev_queue.event_list, list) { in devx_async_cmd_event_destroy_uobj()
2942 list_del(&entry->list); in devx_async_cmd_event_destroy_uobj()
2945 spin_unlock_irq(&comp_ev_file->ev_queue.lock); in devx_async_cmd_event_destroy_uobj()
2955 struct mlx5_ib_dev *dev = ev_file->dev; in devx_async_event_destroy_uobj()
2957 spin_lock_irq(&ev_file->lock); in devx_async_event_destroy_uobj()
2958 ev_file->is_destroyed = 1; in devx_async_event_destroy_uobj()
2961 if (ev_file->omit_data) { in devx_async_event_destroy_uobj()
2964 list_for_each_entry_safe(event_sub, tmp, &ev_file->event_list, in devx_async_event_destroy_uobj()
2966 list_del_init(&event_sub->event_list); in devx_async_event_destroy_uobj()
2971 list_for_each_entry_safe(entry, tmp, &ev_file->event_list, in devx_async_event_destroy_uobj()
2973 list_del(&entry->list); in devx_async_event_destroy_uobj()
2978 spin_unlock_irq(&ev_file->lock); in devx_async_event_destroy_uobj()
2979 wake_up_interruptible(&ev_file->poll_wait); in devx_async_event_destroy_uobj()
2981 mutex_lock(&dev->devx_event_table.event_xa_lock); in devx_async_event_destroy_uobj()
2984 &ev_file->subscribed_events_list, file_list) { in devx_async_event_destroy_uobj()
2986 list_del_rcu(&event_sub->file_list); in devx_async_event_destroy_uobj()
2988 call_rcu(&event_sub->rcu, devx_free_subscription); in devx_async_event_destroy_uobj()
2990 mutex_unlock(&dev->devx_event_table.event_xa_lock); in devx_async_event_destroy_uobj()
2992 put_device(&dev->ib_dev.dev); in devx_async_event_destroy_uobj()
3208 return MLX5_CAP_GEN(dev->mdev, log_max_uctx); in devx_is_supported()