| /linux/drivers/iommu/iommufd/ |
| H A D | ioas.c | 38 int iommufd_ioas_alloc_ioctl(struct iommufd_ucmd *ucmd) in iommufd_ioas_alloc_ioctl() argument 40 struct iommu_ioas_alloc *cmd = ucmd->cmd; in iommufd_ioas_alloc_ioctl() 47 ioas = iommufd_ioas_alloc(ucmd->ictx); in iommufd_ioas_alloc_ioctl() 52 rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd)); in iommufd_ioas_alloc_ioctl() 56 down_read(&ucmd->ictx->ioas_creation_lock); in iommufd_ioas_alloc_ioctl() 57 iommufd_object_finalize(ucmd->ictx, &ioas->obj); in iommufd_ioas_alloc_ioctl() 58 up_read(&ucmd->ictx->ioas_creation_lock); in iommufd_ioas_alloc_ioctl() 62 iommufd_object_abort_and_destroy(ucmd->ictx, &ioas->obj); in iommufd_ioas_alloc_ioctl() 66 int iommufd_ioas_iova_ranges(struct iommufd_ucmd *ucmd) in iommufd_ioas_iova_ranges() argument 69 struct iommu_ioas_iova_ranges *cmd = ucmd->cmd; in iommufd_ioas_iova_ranges() [all …]
|
| H A D | iommufd_private.h | 161 static inline int iommufd_ucmd_respond(struct iommufd_ucmd *ucmd, in iommufd_ucmd_respond() argument 164 if (copy_to_user(ucmd->ubuffer, ucmd->cmd, in iommufd_ucmd_respond() 165 min_t(size_t, ucmd->user_size, cmd_len))) in iommufd_ucmd_respond() 297 _iommufd_object_alloc_ucmd(struct iommufd_ucmd *ucmd, size_t size, 300 #define __iommufd_object_alloc_ucmd(ucmd, ptr, type, obj) \ argument 302 ucmd, \ 309 #define iommufd_object_alloc_ucmd(ucmd, ptr, type) \ argument 310 __iommufd_object_alloc_ucmd(ucmd, ptr, type, obj) 341 int iommufd_ioas_alloc_ioctl(struct iommufd_ucmd *ucmd); 343 int iommufd_ioas_iova_ranges(struct iommufd_ucmd *ucmd); [all …]
|
| H A D | viommu.c | 17 int iommufd_viommu_alloc_ioctl(struct iommufd_ucmd *ucmd) in iommufd_viommu_alloc_ioctl() argument 19 struct iommu_viommu_alloc *cmd = ucmd->cmd; in iommufd_viommu_alloc_ioctl() 35 idev = iommufd_get_device(ucmd, cmd->dev_id); in iommufd_viommu_alloc_ioctl() 60 hwpt_paging = iommufd_get_hwpt_paging(ucmd, cmd->hwpt_id); in iommufd_viommu_alloc_ioctl() 72 ucmd, viommu_size, IOMMUFD_OBJ_VIOMMU); in iommufd_viommu_alloc_ioctl() 80 viommu->ictx = ucmd->ictx; in iommufd_viommu_alloc_ioctl() 104 rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd)); in iommufd_viommu_alloc_ioctl() 107 iommufd_put_object(ucmd->ictx, &hwpt_paging->common.obj); in iommufd_viommu_alloc_ioctl() 109 iommufd_put_object(ucmd->ictx, &idev->obj); in iommufd_viommu_alloc_ioctl() 143 int iommufd_vdevice_alloc_ioctl(struct iommufd_ucmd *ucmd) in iommufd_vdevice_alloc_ioctl() argument [all …]
|
| H A D | main.c | 66 struct iommufd_object *_iommufd_object_alloc_ucmd(struct iommufd_ucmd *ucmd, in _iommufd_object_alloc_ucmd() argument 73 if (WARN_ON(ucmd->new_obj)) in _iommufd_object_alloc_ucmd() 85 new_obj = _iommufd_object_alloc(ucmd->ictx, size, type); in _iommufd_object_alloc_ucmd() 89 ucmd->new_obj = new_obj; in _iommufd_object_alloc_ucmd() 288 static int iommufd_destroy(struct iommufd_ucmd *ucmd) in iommufd_destroy() argument 290 struct iommu_destroy *cmd = ucmd->cmd; in iommufd_destroy() 292 return iommufd_object_remove(ucmd->ictx, NULL, cmd->id, 0); in iommufd_destroy() 389 static int iommufd_option(struct iommufd_ucmd *ucmd) in iommufd_option() argument 391 struct iommu_option *cmd = ucmd->cmd; in iommufd_option() 399 rc = iommufd_option_rlimit_mode(cmd, ucmd->ictx); in iommufd_option() [all …]
|
| H A D | vfio_compat.c | 123 int iommufd_vfio_ioas(struct iommufd_ucmd *ucmd) in iommufd_vfio_ioas() argument 125 struct iommu_vfio_ioas *cmd = ucmd->cmd; in iommufd_vfio_ioas() 132 ioas = get_compat_ioas(ucmd->ictx); in iommufd_vfio_ioas() 136 iommufd_put_object(ucmd->ictx, &ioas->obj); in iommufd_vfio_ioas() 137 return iommufd_ucmd_respond(ucmd, sizeof(*cmd)); in iommufd_vfio_ioas() 140 ioas = iommufd_get_ioas(ucmd->ictx, cmd->ioas_id); in iommufd_vfio_ioas() 143 xa_lock(&ucmd->ictx->objects); in iommufd_vfio_ioas() 144 ucmd->ictx->vfio_ioas = ioas; in iommufd_vfio_ioas() 145 xa_unlock(&ucmd->ictx->objects); in iommufd_vfio_ioas() 146 iommufd_put_object(ucmd->ictx, &ioas->obj); in iommufd_vfio_ioas() [all …]
|
| H A D | eventq.c | 417 int iommufd_fault_alloc(struct iommufd_ucmd *ucmd) in iommufd_fault_alloc() argument 419 struct iommu_fault_alloc *cmd = ucmd->cmd; in iommufd_fault_alloc() 427 fault = __iommufd_object_alloc_ucmd(ucmd, fault, IOMMUFD_OBJ_FAULT, in iommufd_fault_alloc() 436 ucmd->ictx, &iommufd_fault_fops); in iommufd_fault_alloc() 443 rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd)); in iommufd_fault_alloc() 475 int iommufd_veventq_alloc(struct iommufd_ucmd *ucmd) in iommufd_veventq_alloc() argument 477 struct iommu_veventq_alloc *cmd = ucmd->cmd; in iommufd_veventq_alloc() 489 viommu = iommufd_get_viommu(ucmd, cmd->viommu_id); in iommufd_veventq_alloc() 500 veventq = __iommufd_object_alloc(ucmd->ictx, veventq, in iommufd_veventq_alloc() 516 ucmd->ictx, &iommufd_veventq_fops); in iommufd_veventq_alloc() [all …]
|
| /linux/drivers/fwctl/ |
| H A D | main.c | 33 static int ucmd_respond(struct fwctl_ucmd *ucmd, size_t cmd_len) in ucmd_respond() argument 35 if (copy_to_user(ucmd->ubuffer, ucmd->cmd, in ucmd_respond() 36 min_t(size_t, ucmd->user_size, cmd_len))) in ucmd_respond() 56 static int fwctl_cmd_info(struct fwctl_ucmd *ucmd) in fwctl_cmd_info() argument 58 struct fwctl_device *fwctl = ucmd->uctx->fwctl; in fwctl_cmd_info() 59 struct fwctl_info *cmd = ucmd->cmd; in fwctl_cmd_info() 71 fwctl->ops->info(ucmd->uctx, &driver_info_len); in fwctl_cmd_info() 83 return ucmd_respond(ucmd, sizeof(*cmd)); in fwctl_cmd_info() 86 static int fwctl_cmd_rpc(struct fwctl_ucmd *ucmd) in fwctl_cmd_rpc() argument 88 struct fwctl_device *fwctl = ucmd->uctx->fwctl; in fwctl_cmd_rpc() [all …]
|
| /linux/kernel/liveupdate/ |
| H A D | luo_core.c | 276 static int luo_ioctl_create_session(struct luo_ucmd *ucmd) in luo_ioctl_create_session() argument 278 struct liveupdate_ioctl_create_session *argp = ucmd->cmd; in luo_ioctl_create_session() 290 err = luo_ucmd_respond(ucmd, sizeof(*argp)); in luo_ioctl_create_session() 306 static int luo_ioctl_retrieve_session(struct luo_ucmd *ucmd) in luo_ioctl_retrieve_session() argument 308 struct liveupdate_ioctl_retrieve_session *argp = ucmd->cmd; in luo_ioctl_retrieve_session() 320 err = luo_ucmd_respond(ucmd, sizeof(*argp)); in luo_ioctl_retrieve_session() 373 int (*execute)(struct luo_ucmd *ucmd); 396 struct luo_ucmd ucmd = {}; in luo_ioctl() local 405 ucmd.ubuffer = (void __user *)arg; in luo_ioctl() 406 err = get_user(ucmd.user_size, (u32 __user *)ucmd.ubuffer); in luo_ioctl() [all …]
|
| H A D | luo_session.c | 231 struct luo_ucmd *ucmd) in luo_session_preserve_fd() argument 233 struct liveupdate_session_preserve_fd *argp = ucmd->cmd; in luo_session_preserve_fd() 241 err = luo_ucmd_respond(ucmd, sizeof(*argp)); in luo_session_preserve_fd() 249 struct luo_ucmd *ucmd) in luo_session_retrieve_fd() argument 251 struct liveupdate_session_retrieve_fd *argp = ucmd->cmd; in luo_session_retrieve_fd() 264 err = luo_ucmd_respond(ucmd, sizeof(*argp)); in luo_session_retrieve_fd() 281 struct luo_ucmd *ucmd) in luo_session_finish() argument 283 struct liveupdate_session_finish *argp = ucmd->cmd; in luo_session_finish() 289 return luo_ucmd_respond(ucmd, sizeof(*argp)); in luo_session_finish() 302 int (*execute)(struct luo_session *session, struct luo_ucmd *ucmd); [all …]
|
| H A D | luo_internal.h | 20 static inline int luo_ucmd_respond(struct luo_ucmd *ucmd, in luo_ucmd_respond() argument 27 if (copy_to_user(ucmd->ubuffer, ucmd->cmd, in luo_ucmd_respond() 28 min_t(size_t, ucmd->user_size, kernel_cmd_size))) { in luo_ucmd_respond()
|
| /linux/drivers/infiniband/hw/mana/ |
| H A D | wq.c | 14 struct mana_ib_create_wq ucmd = {}; in mana_ib_create_wq() local 18 if (udata->inlen < sizeof(ucmd)) in mana_ib_create_wq() 21 err = ib_copy_from_udata(&ucmd, udata, min(sizeof(ucmd), udata->inlen)); in mana_ib_create_wq() 32 ibdev_dbg(&mdev->ib_dev, "ucmd wq_buf_addr 0x%llx\n", ucmd.wq_buf_addr); in mana_ib_create_wq() 34 err = mana_ib_create_queue(mdev, ucmd.wq_buf_addr, ucmd.wq_buf_size, &wq->queue); in mana_ib_create_wq() 42 wq->wq_buf_size = ucmd.wq_buf_size; in mana_ib_create_wq()
|
| /linux/drivers/infiniband/hw/mlx5/ |
| H A D | qp.c | 434 int has_rq, struct mlx5_ib_qp *qp, struct mlx5_ib_create_qp *ucmd) in set_rq_size() argument 452 if (ucmd) { in set_rq_size() 453 qp->rq.wqe_cnt = ucmd->rq_wqe_count; in set_rq_size() 454 if (ucmd->rq_wqe_shift > BITS_PER_BYTE * sizeof(ucmd->rq_wqe_shift)) in set_rq_size() 456 qp->rq.wqe_shift = ucmd->rq_wqe_shift; in set_rq_size() 638 struct mlx5_ib_create_qp *ucmd, in set_user_buf_size() argument 650 if (ucmd->sq_wqe_count && !is_power_of_2(ucmd->sq_wqe_count)) { in set_user_buf_size() 652 ucmd->sq_wqe_count); in set_user_buf_size() 656 qp->sq.wqe_cnt = ucmd->sq_wqe_count; in set_user_buf_size() 882 struct mlx5_ib_create_wq *ucmd) in create_user_rq() argument [all …]
|
| H A D | cq.c | 723 struct mlx5_ib_create_cq ucmd = {}; in create_cq_user() local 734 ucmdlen = min(udata->inlen, sizeof(ucmd)); in create_cq_user() 738 if (ib_copy_from_udata(&ucmd, udata, ucmdlen)) in create_cq_user() 741 if ((ucmd.flags & ~(MLX5_IB_CREATE_CQ_FLAGS_CQE_128B_PAD | in create_cq_user() 746 if ((ucmd.cqe_size != 64 && ucmd.cqe_size != 128) || in create_cq_user() 747 ucmd.reserved0 || ucmd.reserved1) in create_cq_user() 750 *cqe_size = ucmd.cqe_size; in create_cq_user() 753 ib_umem_get(&dev->ib_dev, ucmd.buf_addr, in create_cq_user() 754 entries * ucmd.cqe_size, IB_ACCESS_LOCAL_WRITE); in create_cq_user() 768 err = mlx5_ib_db_map_user(context, ucmd.db_addr, &cq->db); in create_cq_user() [all …]
|
| H A D | srq.c | 48 struct mlx5_ib_create_srq ucmd = {}; in create_srq_user() local 55 ucmdlen = min(udata->inlen, sizeof(ucmd)); in create_srq_user() 57 if (ib_copy_from_udata(&ucmd, udata, ucmdlen)) { in create_srq_user() 62 if (ucmd.reserved0 || ucmd.reserved1) in create_srq_user() 65 if (udata->inlen > sizeof(ucmd) && in create_srq_user() 66 !ib_is_udata_cleared(udata, sizeof(ucmd), in create_srq_user() 67 udata->inlen - sizeof(ucmd))) in create_srq_user() 71 err = get_srq_user_index(ucontext, &ucmd, udata->inlen, &uidx); in create_srq_user() 76 srq->wq_sig = !!(ucmd.flags & MLX5_SRQ_FLAG_SIGNATURE); in create_srq_user() 78 srq->umem = ib_umem_get(pd->device, ucmd.buf_addr, buf_size, 0); in create_srq_user() [all …]
|
| /linux/drivers/infiniband/hw/hns/ |
| H A D | hns_roce_cq.c | 351 struct hns_roce_ib_create_cq *ucmd) in get_cq_ucmd() argument 356 ret = ib_copy_from_udata(ucmd, udata, min(udata->inlen, sizeof(*ucmd))); in get_cq_ucmd() 366 struct hns_roce_ib_create_cq *ucmd) in set_cq_param() argument 382 struct hns_roce_ib_create_cq *ucmd) in set_cqe_size() argument 391 if (udata->inlen >= offsetofend(typeof(*ucmd), cqe_size)) { in set_cqe_size() 392 if (ucmd->cqe_size != HNS_ROCE_V2_CQE_SIZE && in set_cqe_size() 393 ucmd->cqe_size != HNS_ROCE_V3_CQE_SIZE) { in set_cqe_size() 395 "invalid cqe size %u.\n", ucmd->cqe_size); in set_cqe_size() 399 hr_cq->cqe_size = ucmd->cqe_size; in set_cqe_size() 415 struct hns_roce_ib_create_cq ucmd = {}; in hns_roce_create_cq() local [all …]
|
| H A D | hns_roce_srq.c | 345 struct hns_roce_ib_create_srq ucmd = {}; in alloc_srq_buf() local 349 ret = ib_copy_from_udata(&ucmd, udata, in alloc_srq_buf() 350 min(udata->inlen, sizeof(ucmd))); in alloc_srq_buf() 359 ret = alloc_srq_idx(hr_dev, srq, udata, ucmd.que_addr); in alloc_srq_buf() 363 ret = alloc_srq_wqe_buf(hr_dev, srq, udata, ucmd.buf_addr); in alloc_srq_buf() 391 struct hns_roce_ib_create_srq *ucmd) in get_srq_ucmd() argument 396 ret = ib_copy_from_udata(ucmd, udata, min(udata->inlen, sizeof(*ucmd))); in get_srq_ucmd() 428 struct hns_roce_ib_create_srq ucmd = {}; in alloc_srq_db() local 433 ret = get_srq_ucmd(srq, udata, &ucmd); in alloc_srq_db() 438 (ucmd.req_cap_flags & HNS_ROCE_SRQ_CAP_RECORD_DB)) { in alloc_srq_db() [all …]
|
| H A D | hns_roce_qp.c | 620 struct hns_roce_ib_create_qp *ucmd) in check_sq_size_with_integrity() argument 626 if (ucmd->log_sq_stride > max_sq_stride || in check_sq_size_with_integrity() 627 ucmd->log_sq_stride < HNS_ROCE_IB_MIN_SQ_STRIDE) { in check_sq_size_with_integrity() 643 struct hns_roce_ib_create_qp *ucmd) in set_user_sq_size() argument 649 if (check_shl_overflow(1, ucmd->log_sq_bb_count, &cnt) || in set_user_sq_size() 653 ret = check_sq_size_with_integrity(hr_dev, cap, ucmd); in set_user_sq_size() 662 hr_qp->sq.wqe_shift = ucmd->log_sq_stride; in set_user_sq_size() 807 struct hns_roce_ib_create_qp *ucmd) in user_qp_has_sdb() argument 812 udata->inlen >= offsetofend(typeof(*ucmd), sdb_addr)); in user_qp_has_sdb() 865 struct hns_roce_ib_create_qp *ucmd, in alloc_user_qp_db() argument [all …]
|
| /linux/drivers/infiniband/hw/vmw_pvrdma/ |
| H A D | pvrdma_srq.c | 109 struct pvrdma_create_srq ucmd; in pvrdma_create_srq() local 144 if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) { in pvrdma_create_srq() 149 srq->umem = ib_umem_get(ibsrq->device, ucmd.buf_addr, ucmd.buf_size, 0); in pvrdma_create_srq()
|
| H A D | pvrdma_cq.c | 117 struct pvrdma_create_cq ucmd; in pvrdma_create_cq() local 137 if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) { in pvrdma_create_cq() 142 cq->umem = ib_umem_get(ibdev, ucmd.buf_addr, ucmd.buf_size, in pvrdma_create_cq()
|
| H A D | pvrdma_qp.c | 201 struct pvrdma_create_qp ucmd; in pvrdma_create_qp() local 255 if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) { in pvrdma_create_qp() 272 ucmd.rbuf_addr, in pvrdma_create_qp() 273 ucmd.rbuf_size, 0); in pvrdma_create_qp() 284 qp->sumem = ib_umem_get(ibqp->device, ucmd.sbuf_addr, in pvrdma_create_qp() 285 ucmd.sbuf_size, 0); in pvrdma_create_qp()
|
| /linux/drivers/infiniband/hw/mlx4/ |
| H A D | srq.c | 112 struct mlx4_ib_create_srq ucmd; in mlx4_ib_create_srq() local 114 if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) in mlx4_ib_create_srq() 118 ib_umem_get(ib_srq->device, ucmd.buf_addr, buf_size, 0); in mlx4_ib_create_srq() 132 err = mlx4_ib_db_map_user(udata, ucmd.db_addr, &srq->db); in mlx4_ib_create_srq()
|
| /linux/arch/mips/cavium-octeon/ |
| H A D | octeon-platform.c | 37 u32 ucmd; in octeon2_usb_reset() local 44 ucmd = cvmx_read64_uint32(CVMX_UAHCX_EHCI_USBCMD); in octeon2_usb_reset() 45 ucmd &= ~CMD_RUN; in octeon2_usb_reset() 46 cvmx_write64_uint32(CVMX_UAHCX_EHCI_USBCMD, ucmd); in octeon2_usb_reset() 48 ucmd |= CMD_RESET; in octeon2_usb_reset() 49 cvmx_write64_uint32(CVMX_UAHCX_EHCI_USBCMD, ucmd); in octeon2_usb_reset() 50 ucmd = cvmx_read64_uint32(CVMX_UAHCX_OHCI_USBCMD); in octeon2_usb_reset() 51 ucmd |= CMD_RUN; in octeon2_usb_reset() 52 cvmx_write64_uint32(CVMX_UAHCX_OHCI_USBCMD, ucmd); in octeon2_usb_reset()
|
| /linux/drivers/infiniband/sw/rxe/ |
| H A D | rxe_srq.c | 153 struct rxe_modify_srq_cmd *ucmd, struct ib_udata *udata) in rxe_srq_from_attr() argument 165 mi = u64_to_user_ptr(ucmd->mmap_info_addr); in rxe_srq_from_attr()
|
| /linux/drivers/nvme/host/ |
| H A D | ioctl.c | 282 struct nvme_passthru_cmd __user *ucmd, unsigned int flags, in nvme_user_cmd() argument 291 if (copy_from_user(&cmd, ucmd, sizeof(cmd))) in nvme_user_cmd() 322 if (put_user(result, &ucmd->result)) in nvme_user_cmd() 330 struct nvme_passthru_cmd64 __user *ucmd, unsigned int flags, in nvme_user_cmd64() argument 338 if (copy_from_user(&cmd, ucmd, sizeof(cmd))) in nvme_user_cmd64() 369 if (put_user(cmd.result, &ucmd->result)) in nvme_user_cmd64()
|
| /linux/drivers/iommu/arm/arm-smmu-v3/ |
| H A D | arm-smmu-v3-iommufd.c | 302 struct iommu_viommu_arm_smmuv3_invalidate ucmd; member 317 cmd->cmd[0] = le64_to_cpu(cmd->ucmd.cmd[0]); in arm_vsmmu_convert_user_cmd() 318 cmd->cmd[1] = le64_to_cpu(cmd->ucmd.cmd[1]); in arm_vsmmu_convert_user_cmd()
|