/linux/drivers/iommu/iommufd/ |
H A D | ioas.c | 38 int iommufd_ioas_alloc_ioctl(struct iommufd_ucmd *ucmd) in iommufd_ioas_alloc_ioctl() argument 40 struct iommu_ioas_alloc *cmd = ucmd->cmd; in iommufd_ioas_alloc_ioctl() 47 ioas = iommufd_ioas_alloc(ucmd->ictx); in iommufd_ioas_alloc_ioctl() 52 rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd)); in iommufd_ioas_alloc_ioctl() 56 down_read(&ucmd->ictx->ioas_creation_lock); in iommufd_ioas_alloc_ioctl() 57 iommufd_object_finalize(ucmd->ictx, &ioas->obj); in iommufd_ioas_alloc_ioctl() 58 up_read(&ucmd->ictx->ioas_creation_lock); in iommufd_ioas_alloc_ioctl() 62 iommufd_object_abort_and_destroy(ucmd->ictx, &ioas->obj); in iommufd_ioas_alloc_ioctl() 66 int iommufd_ioas_iova_ranges(struct iommufd_ucmd *ucmd) in iommufd_ioas_iova_ranges() argument 69 struct iommu_ioas_iova_ranges *cmd = ucmd->cmd; in iommufd_ioas_iova_ranges() [all …]
|
H A D | iommufd_private.h | 121 static inline int iommufd_ucmd_respond(struct iommufd_ucmd *ucmd, in iommufd_ucmd_respond() argument 124 if (copy_to_user(ucmd->ubuffer, ucmd->cmd, in iommufd_ucmd_respond() 125 min_t(size_t, ucmd->user_size, cmd_len))) in iommufd_ucmd_respond() 252 int iommufd_ioas_alloc_ioctl(struct iommufd_ucmd *ucmd); 254 int iommufd_ioas_iova_ranges(struct iommufd_ucmd *ucmd); 255 int iommufd_ioas_allow_iovas(struct iommufd_ucmd *ucmd); 256 int iommufd_ioas_map(struct iommufd_ucmd *ucmd); 257 int iommufd_ioas_map_file(struct iommufd_ucmd *ucmd); 258 int iommufd_ioas_change_process(struct iommufd_ucmd *ucmd); 259 int iommufd_ioas_copy(struct iommufd_ucmd *ucmd); [all …]
|
H A D | main.c | 200 static int iommufd_destroy(struct iommufd_ucmd *ucmd) in iommufd_destroy() argument 202 struct iommu_destroy *cmd = ucmd->cmd; in iommufd_destroy() 204 return iommufd_object_remove(ucmd->ictx, NULL, cmd->id, 0); in iommufd_destroy() 269 static int iommufd_option(struct iommufd_ucmd *ucmd) in iommufd_option() argument 271 struct iommu_option *cmd = ucmd->cmd; in iommufd_option() 279 rc = iommufd_option_rlimit_mode(cmd, ucmd->ictx); in iommufd_option() 282 rc = iommufd_ioas_option(ucmd); in iommufd_option() 289 if (copy_to_user(&((struct iommu_option __user *)ucmd->ubuffer)->val64, in iommufd_option() 322 int (*execute)(struct iommufd_ucmd *ucmd); 382 struct iommufd_ucmd ucmd = {}; in iommufd_fops_ioctl() local [all …]
|
H A D | vfio_compat.c | 123 int iommufd_vfio_ioas(struct iommufd_ucmd *ucmd) in iommufd_vfio_ioas() argument 125 struct iommu_vfio_ioas *cmd = ucmd->cmd; in iommufd_vfio_ioas() 132 ioas = get_compat_ioas(ucmd->ictx); in iommufd_vfio_ioas() 136 iommufd_put_object(ucmd->ictx, &ioas->obj); in iommufd_vfio_ioas() 137 return iommufd_ucmd_respond(ucmd, sizeof(*cmd)); in iommufd_vfio_ioas() 140 ioas = iommufd_get_ioas(ucmd->ictx, cmd->ioas_id); in iommufd_vfio_ioas() 143 xa_lock(&ucmd->ictx->objects); in iommufd_vfio_ioas() 144 ucmd->ictx->vfio_ioas = ioas; in iommufd_vfio_ioas() 145 xa_unlock(&ucmd->ictx->objects); in iommufd_vfio_ioas() 146 iommufd_put_object(ucmd->ictx, &ioas->obj); in iommufd_vfio_ioas() [all …]
|
H A D | fault.c | 370 int iommufd_fault_alloc(struct iommufd_ucmd *ucmd) in iommufd_fault_alloc() argument 372 struct iommu_fault_alloc *cmd = ucmd->cmd; in iommufd_fault_alloc() 381 fault = iommufd_object_alloc(ucmd->ictx, fault, IOMMUFD_OBJ_FAULT); in iommufd_fault_alloc() 385 fault->ictx = ucmd->ictx; in iommufd_fault_alloc() 411 rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd)); in iommufd_fault_alloc() 414 iommufd_object_finalize(ucmd->ictx, &fault->obj); in iommufd_fault_alloc() 424 iommufd_object_abort_and_destroy(ucmd->ictx, &fault->obj); in iommufd_fault_alloc()
|
/linux/drivers/infiniband/hw/mana/ |
H A D | wq.c | 14 struct mana_ib_create_wq ucmd = {}; in mana_ib_create_wq() local 18 if (udata->inlen < sizeof(ucmd)) in mana_ib_create_wq() 21 err = ib_copy_from_udata(&ucmd, udata, min(sizeof(ucmd), udata->inlen)); in mana_ib_create_wq() 32 ibdev_dbg(&mdev->ib_dev, "ucmd wq_buf_addr 0x%llx\n", ucmd.wq_buf_addr); in mana_ib_create_wq() 34 err = mana_ib_create_queue(mdev, ucmd.wq_buf_addr, ucmd.wq_buf_size, &wq->queue); in mana_ib_create_wq() 42 wq->wq_buf_size = ucmd.wq_buf_size; in mana_ib_create_wq()
|
H A D | qp.c | 100 struct mana_ib_create_qp_rss ucmd = {}; in mana_ib_create_qp_rss() local 114 if (!udata || udata->inlen < sizeof(ucmd)) in mana_ib_create_qp_rss() 117 ret = ib_copy_from_udata(&ucmd, udata, min(sizeof(ucmd), udata->inlen)); in mana_ib_create_qp_rss() 147 if (ucmd.rx_hash_function != MANA_IB_RX_HASH_FUNC_TOEPLITZ) { in mana_ib_create_qp_rss() 150 ucmd.rx_hash_function); in mana_ib_create_qp_rss() 155 port = ucmd.port; in mana_ib_create_qp_rss() 165 ucmd.rx_hash_function, port); in mana_ib_create_qp_rss() 229 ucmd.rx_hash_key_len, in mana_ib_create_qp_rss() 230 ucmd.rx_hash_key); in mana_ib_create_qp_rss() 276 struct mana_ib_create_qp ucmd = {}; in mana_ib_create_qp_raw() local [all …]
|
H A D | cq.c | 16 struct mana_ib_create_cq ucmd = {}; in mana_ib_create_cq() local 30 err = ib_copy_from_udata(&ucmd, udata, min(sizeof(ucmd), udata->inlen)); in mana_ib_create_cq() 37 is_rnic_cq = !!(ucmd.flags & MANA_IB_CREATE_RNIC_CQ); in mana_ib_create_cq() 45 err = mana_ib_create_queue(mdev, ucmd.buf_addr, cq->cqe * COMP_ENTRY_SIZE, &cq->queue); in mana_ib_create_cq()
|
/linux/drivers/infiniband/hw/mthca/ |
H A D | mthca_provider.c | 395 struct mthca_create_srq ucmd; in mthca_create_srq() local 405 if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) in mthca_create_srq() 409 context->db_tab, ucmd.db_index, in mthca_create_srq() 410 ucmd.db_page); in mthca_create_srq() 415 srq->mr.ibmr.lkey = ucmd.lkey; in mthca_create_srq() 416 srq->db_index = ucmd.db_index; in mthca_create_srq() 424 context->db_tab, ucmd.db_index); in mthca_create_srq() 460 struct mthca_create_qp ucmd; in mthca_create_qp() local 474 if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) in mthca_create_qp() 479 ucmd.sq_db_index, in mthca_create_qp() [all …]
|
/linux/drivers/infiniband/hw/mlx5/ |
H A D | cq.c | 723 struct mlx5_ib_create_cq ucmd = {}; in create_cq_user() local 734 ucmdlen = min(udata->inlen, sizeof(ucmd)); in create_cq_user() 738 if (ib_copy_from_udata(&ucmd, udata, ucmdlen)) in create_cq_user() 741 if ((ucmd.flags & ~(MLX5_IB_CREATE_CQ_FLAGS_CQE_128B_PAD | in create_cq_user() 746 if ((ucmd.cqe_size != 64 && ucmd.cqe_size != 128) || in create_cq_user() 747 ucmd.reserved0 || ucmd.reserved1) in create_cq_user() 750 *cqe_size = ucmd.cqe_size; in create_cq_user() 753 ib_umem_get(&dev->ib_dev, ucmd.buf_addr, in create_cq_user() 754 entries * ucmd.cqe_size, IB_ACCESS_LOCAL_WRITE); in create_cq_user() 768 err = mlx5_ib_db_map_user(context, ucmd.db_addr, &cq->db); in create_cq_user() [all …]
|
H A D | srq.c | 48 struct mlx5_ib_create_srq ucmd = {}; in create_srq_user() local 55 ucmdlen = min(udata->inlen, sizeof(ucmd)); in create_srq_user() 57 if (ib_copy_from_udata(&ucmd, udata, ucmdlen)) { in create_srq_user() 62 if (ucmd.reserved0 || ucmd.reserved1) in create_srq_user() 65 if (udata->inlen > sizeof(ucmd) && in create_srq_user() 66 !ib_is_udata_cleared(udata, sizeof(ucmd), in create_srq_user() 67 udata->inlen - sizeof(ucmd))) in create_srq_user() 71 err = get_srq_user_index(ucontext, &ucmd, udata->inlen, &uidx); in create_srq_user() 76 srq->wq_sig = !!(ucmd.flags & MLX5_SRQ_FLAG_SIGNATURE); in create_srq_user() 78 srq->umem = ib_umem_get(pd->device, ucmd.buf_addr, buf_size, 0); in create_srq_user() [all …]
|
H A D | counters.h | 15 struct mlx5_ib_create_flow *ucmd);
|
/linux/drivers/infiniband/hw/mlx4/ |
H A D | qp.c | 448 struct mlx4_ib_create_qp *ucmd) in set_user_sq_size() argument 453 if (check_shl_overflow(1, ucmd->log_sq_bb_count, &cnt) || in set_user_sq_size() 456 if (ucmd->log_sq_stride > in set_user_sq_size() 458 ucmd->log_sq_stride < MLX4_IB_MIN_SQ_STRIDE) in set_user_sq_size() 461 qp->sq.wqe_cnt = 1 << ucmd->log_sq_bb_count; in set_user_sq_size() 462 qp->sq.wqe_shift = ucmd->log_sq_stride; in set_user_sq_size() 554 struct mlx4_ib_create_qp_rss *ucmd) in set_qp_rss() argument 559 if ((ucmd->rx_hash_function == MLX4_IB_RX_HASH_FUNC_TOEPLITZ) && in set_qp_rss() 561 memcpy(rss_ctx->rss_key, ucmd->rx_hash_key, in set_qp_rss() 568 if (ucmd->rx_hash_fields_mask & ~(u64)(MLX4_IB_RX_HASH_SRC_IPV4 | in set_qp_rss() [all …]
|
H A D | srq.c | 112 struct mlx4_ib_create_srq ucmd; in mlx4_ib_create_srq() local 114 if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) in mlx4_ib_create_srq() 118 ib_umem_get(ib_srq->device, ucmd.buf_addr, buf_size, 0); in mlx4_ib_create_srq() 132 err = mlx4_ib_db_map_user(udata, ucmd.db_addr, &srq->db); in mlx4_ib_create_srq()
|
H A D | cq.c | 206 struct mlx4_ib_create_cq ucmd; in mlx4_ib_create_cq() local 208 if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) { in mlx4_ib_create_cq() 213 buf_addr = (void *)(unsigned long)ucmd.buf_addr; in mlx4_ib_create_cq() 215 ucmd.buf_addr, entries); in mlx4_ib_create_cq() 219 err = mlx4_ib_db_map_user(udata, ucmd.db_addr, &cq->db); in mlx4_ib_create_cq() 318 struct mlx4_ib_resize_cq ucmd; in mlx4_alloc_resize_umem() local 324 if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) in mlx4_alloc_resize_umem() 332 ucmd.buf_addr, entries); in mlx4_alloc_resize_umem()
|
/linux/drivers/dma-buf/ |
H A D | dma-heap.c | 125 static long dma_heap_ioctl(struct file *file, unsigned int ucmd, in dma_heap_ioctl() argument 132 int nr = _IOC_NR(ucmd); in dma_heap_ioctl() 144 out_size = _IOC_SIZE(ucmd); in dma_heap_ioctl() 146 if ((ucmd & kcmd & IOC_IN) == 0) in dma_heap_ioctl() 148 if ((ucmd & kcmd & IOC_OUT) == 0) in dma_heap_ioctl()
|
/linux/drivers/infiniband/hw/vmw_pvrdma/ |
H A D | pvrdma_srq.c | 109 struct pvrdma_create_srq ucmd; in pvrdma_create_srq() local 144 if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) { in pvrdma_create_srq() 149 srq->umem = ib_umem_get(ibsrq->device, ucmd.buf_addr, ucmd.buf_size, 0); in pvrdma_create_srq()
|
H A D | pvrdma_cq.c | 117 struct pvrdma_create_cq ucmd; in pvrdma_create_cq() local 137 if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) { in pvrdma_create_cq() 142 cq->umem = ib_umem_get(ibdev, ucmd.buf_addr, ucmd.buf_size, in pvrdma_create_cq()
|
H A D | pvrdma_qp.c | 201 struct pvrdma_create_qp ucmd; in pvrdma_create_qp() local 255 if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) { in pvrdma_create_qp() 272 ucmd.rbuf_addr, in pvrdma_create_qp() 273 ucmd.rbuf_size, 0); in pvrdma_create_qp() 284 qp->sumem = ib_umem_get(ibqp->device, ucmd.sbuf_addr, in pvrdma_create_qp() 285 ucmd.sbuf_size, 0); in pvrdma_create_qp()
|
/linux/arch/mips/cavium-octeon/ |
H A D | octeon-platform.c | 36 u32 ucmd; in octeon2_usb_reset() local 43 ucmd = cvmx_read64_uint32(CVMX_UAHCX_EHCI_USBCMD); in octeon2_usb_reset() 44 ucmd &= ~CMD_RUN; in octeon2_usb_reset() 45 cvmx_write64_uint32(CVMX_UAHCX_EHCI_USBCMD, ucmd); in octeon2_usb_reset() 47 ucmd |= CMD_RESET; in octeon2_usb_reset() 48 cvmx_write64_uint32(CVMX_UAHCX_EHCI_USBCMD, ucmd); in octeon2_usb_reset() 49 ucmd = cvmx_read64_uint32(CVMX_UAHCX_OHCI_USBCMD); in octeon2_usb_reset() 50 ucmd |= CMD_RUN; in octeon2_usb_reset() 51 cvmx_write64_uint32(CVMX_UAHCX_OHCI_USBCMD, ucmd); in octeon2_usb_reset()
|
/linux/drivers/iommu/arm/arm-smmu-v3/ |
H A D | arm-smmu-v3-iommufd.c | 244 struct iommu_viommu_arm_smmuv3_invalidate ucmd; member 259 cmd->cmd[0] = le64_to_cpu(cmd->ucmd.cmd[0]); in arm_vsmmu_convert_user_cmd() 260 cmd->cmd[1] = le64_to_cpu(cmd->ucmd.cmd[1]); in arm_vsmmu_convert_user_cmd()
|
/linux/drivers/infiniband/hw/qib/ |
H A D | qib_file_ops.c | 2035 const struct qib_cmd __user *ucmd; in qib_write() local 2054 ucmd = (const struct qib_cmd __user *) data; in qib_write() 2056 if (copy_from_user(&cmd.type, &ucmd->type, sizeof(cmd.type))) { in qib_write() 2068 src = &ucmd->cmd.user_info; in qib_write() 2074 src = &ucmd->cmd.recv_ctrl; in qib_write() 2080 src = &ucmd->cmd.ctxt_info; in qib_write() 2087 src = &ucmd->cmd.tid_info; in qib_write() 2093 src = &ucmd->cmd.part_key; in qib_write() 2106 src = &ucmd->cmd.poll_type; in qib_write() 2112 src = &ucmd->cmd.armlaunch_ctrl; in qib_write() [all …]
|
/linux/drivers/infiniband/sw/rxe/ |
H A D | rxe_srq.c | 153 struct rxe_modify_srq_cmd *ucmd, struct ib_udata *udata) in rxe_srq_from_attr() argument 165 mi = u64_to_user_ptr(ucmd->mmap_info_addr); in rxe_srq_from_attr()
|
/linux/drivers/nvme/host/ |
H A D | ioctl.c | 296 struct nvme_passthru_cmd __user *ucmd, unsigned int flags, in nvme_user_cmd() argument 305 if (copy_from_user(&cmd, ucmd, sizeof(cmd))) in nvme_user_cmd() 336 if (put_user(result, &ucmd->result)) in nvme_user_cmd() 344 struct nvme_passthru_cmd64 __user *ucmd, unsigned int flags, in nvme_user_cmd64() argument 352 if (copy_from_user(&cmd, ucmd, sizeof(cmd))) in nvme_user_cmd64() 383 if (put_user(cmd.result, &ucmd->result)) in nvme_user_cmd64()
|
/linux/drivers/scsi/sym53c8xx_2/ |
H A D | sym_glue.c | 129 struct sym_ucmd *ucmd = SYM_UCMD_PTR(cmd); in sym_xpt_done() local 131 if (ucmd->eh_done) in sym_xpt_done() 132 complete(ucmd->eh_done); in sym_xpt_done() 572 struct sym_ucmd *ucmd = SYM_UCMD_PTR(cmd); in sym53c8xx_eh_abort_handler() local 607 ucmd->eh_done = &eh_done; in sym53c8xx_eh_abort_handler() 610 ucmd->eh_done = NULL; in sym53c8xx_eh_abort_handler() 649 struct sym_ucmd *ucmd; in sym53c8xx_eh_target_reset_handler() local 655 ucmd = SYM_UCMD_PTR(cmd); in sym53c8xx_eh_target_reset_handler() 657 ucmd->eh_done = &eh_done; in sym53c8xx_eh_target_reset_handler() 660 ucmd->eh_done = NULL; in sym53c8xx_eh_target_reset_handler()
|