Lines Matching +full:blk +full:- +full:ctrl
1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2011-2014, Intel Corporation.
4 * Copyright (c) 2017-2021 Christoph Hellwig.
6 #include <linux/blk-integrity.h>
33 if (c->common.opcode >= nvme_cmd_vendor_start || in nvme_cmd_allowed()
34 c->common.opcode == nvme_fabrics_command) in nvme_cmd_allowed()
44 if (c->common.opcode == nvme_admin_identify) { in nvme_cmd_allowed()
45 switch (c->identify.cns) { in nvme_cmd_allowed()
62 effects = nvme_command_effects(ns->ctrl, ns, c->common.opcode); in nvme_cmd_allowed()
91 * ignoring the upper bits in the compat case to match behaviour of 32-bit
111 nvme_req(req)->flags |= NVME_REQ_USERCMD; in nvme_alloc_user_request()
119 struct request_queue *q = req->q; in nvme_map_user_request()
120 struct nvme_ns *ns = q->queuedata; in nvme_map_user_request()
121 struct block_device *bdev = ns ? ns->disk->part0 : NULL; in nvme_map_user_request()
122 bool supports_metadata = bdev && blk_get_integrity(bdev->bd_disk); in nvme_map_user_request()
123 struct nvme_ctrl *ctrl = nvme_req(req)->ctrl; in nvme_map_user_request() local
128 if (!nvme_ctrl_sgl_supported(ctrl)) in nvme_map_user_request()
129 dev_warn_once(ctrl->device, "using unchecked data buffer\n"); in nvme_map_user_request()
132 return -EINVAL; in nvme_map_user_request()
134 if (!nvme_ctrl_meta_sgl_supported(ctrl)) in nvme_map_user_request()
135 dev_warn_once(ctrl->device, in nvme_map_user_request()
167 struct nvme_ns *ns = q->queuedata; in nvme_submit_user_cmd()
168 struct nvme_ctrl *ctrl; in nvme_submit_user_cmd() local
178 req->timeout = timeout; in nvme_submit_user_cmd()
186 bio = req->bio; in nvme_submit_user_cmd()
187 ctrl = nvme_req(req)->ctrl; in nvme_submit_user_cmd()
189 effects = nvme_passthru_start(ctrl, ns, cmd->common.opcode); in nvme_submit_user_cmd()
192 *result = le64_to_cpu(nvme_req(req)->result.u64); in nvme_submit_user_cmd()
198 nvme_passthru_end(ctrl, ns, effects, cmd, ret); in nvme_submit_user_cmd()
214 return -EFAULT; in nvme_submit_io()
216 return -EINVAL; in nvme_submit_io()
224 return -EINVAL; in nvme_submit_io()
227 length = (io.nblocks + 1) << ns->head->lba_shift; in nvme_submit_io()
230 (ns->head->ms == ns->head->pi_size)) { in nvme_submit_io()
236 return -EINVAL; in nvme_submit_io()
240 meta_len = (io.nblocks + 1) * ns->head->ms; in nvme_submit_io()
244 if (ns->head->features & NVME_NS_EXT_LBAS) { in nvme_submit_io()
249 return -EINVAL; in nvme_submit_io()
255 c.rw.nsid = cpu_to_le32(ns->head->ns_id); in nvme_submit_io()
264 return nvme_submit_user_cmd(ns->queue, &c, io.addr, length, metadata, in nvme_submit_io()
268 static bool nvme_validate_passthru_nsid(struct nvme_ctrl *ctrl, in nvme_validate_passthru_nsid() argument
271 if (ns && nsid != ns->head->ns_id) { in nvme_validate_passthru_nsid()
272 dev_err(ctrl->device, in nvme_validate_passthru_nsid()
274 current->comm, nsid, ns->head->ns_id); in nvme_validate_passthru_nsid()
281 static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns, in nvme_user_cmd() argument
292 return -EFAULT; in nvme_user_cmd()
294 return -EINVAL; in nvme_user_cmd()
295 if (!nvme_validate_passthru_nsid(ctrl, ns, cmd.nsid)) in nvme_user_cmd()
296 return -EINVAL; in nvme_user_cmd()
312 return -EACCES; in nvme_user_cmd()
317 status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c, in nvme_user_cmd()
322 if (put_user(result, &ucmd->result)) in nvme_user_cmd()
323 return -EFAULT; in nvme_user_cmd()
329 static int nvme_user_cmd64(struct nvme_ctrl *ctrl, struct nvme_ns *ns, in nvme_user_cmd64() argument
339 return -EFAULT; in nvme_user_cmd64()
341 return -EINVAL; in nvme_user_cmd64()
342 if (!nvme_validate_passthru_nsid(ctrl, ns, cmd.nsid)) in nvme_user_cmd64()
343 return -EINVAL; in nvme_user_cmd64()
359 return -EACCES; in nvme_user_cmd64()
364 status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c, in nvme_user_cmd64()
369 if (put_user(cmd.result, &ucmd->result)) in nvme_user_cmd64()
370 return -EFAULT; in nvme_user_cmd64()
406 if (pdu->bio) in nvme_uring_task_cb()
407 blk_rq_unmap_user(pdu->bio); in nvme_uring_task_cb()
408 io_uring_cmd_done32(ioucmd, pdu->status, pdu->result, in nvme_uring_task_cb()
415 struct io_uring_cmd *ioucmd = req->end_io_data; in nvme_uring_cmd_end_io()
418 if (nvme_req(req)->flags & NVME_REQ_CANCELLED) { in nvme_uring_cmd_end_io()
419 pdu->status = -EINTR; in nvme_uring_cmd_end_io()
421 pdu->status = nvme_req(req)->status; in nvme_uring_cmd_end_io()
422 if (!pdu->status) in nvme_uring_cmd_end_io()
423 pdu->status = blk_status_to_errno(err); in nvme_uring_cmd_end_io()
425 pdu->result = le64_to_cpu(nvme_req(req)->result.u64); in nvme_uring_cmd_end_io()
439 static int nvme_uring_cmd_io(struct nvme_ctrl *ctrl, struct nvme_ns *ns, in nvme_uring_cmd_io() argument
443 const struct nvme_uring_cmd *cmd = io_uring_sqe_cmd(ioucmd->sqe); in nvme_uring_cmd_io()
444 struct request_queue *q = ns ? ns->queue : ctrl->admin_q; in nvme_uring_cmd_io()
454 c.common.opcode = READ_ONCE(cmd->opcode); in nvme_uring_cmd_io()
455 c.common.flags = READ_ONCE(cmd->flags); in nvme_uring_cmd_io()
457 return -EINVAL; in nvme_uring_cmd_io()
460 c.common.nsid = cpu_to_le32(cmd->nsid); in nvme_uring_cmd_io()
461 if (!nvme_validate_passthru_nsid(ctrl, ns, le32_to_cpu(c.common.nsid))) in nvme_uring_cmd_io()
462 return -EINVAL; in nvme_uring_cmd_io()
464 c.common.cdw2[0] = cpu_to_le32(READ_ONCE(cmd->cdw2)); in nvme_uring_cmd_io()
465 c.common.cdw2[1] = cpu_to_le32(READ_ONCE(cmd->cdw3)); in nvme_uring_cmd_io()
468 c.common.cdw10 = cpu_to_le32(READ_ONCE(cmd->cdw10)); in nvme_uring_cmd_io()
469 c.common.cdw11 = cpu_to_le32(READ_ONCE(cmd->cdw11)); in nvme_uring_cmd_io()
470 c.common.cdw12 = cpu_to_le32(READ_ONCE(cmd->cdw12)); in nvme_uring_cmd_io()
471 c.common.cdw13 = cpu_to_le32(READ_ONCE(cmd->cdw13)); in nvme_uring_cmd_io()
472 c.common.cdw14 = cpu_to_le32(READ_ONCE(cmd->cdw14)); in nvme_uring_cmd_io()
473 c.common.cdw15 = cpu_to_le32(READ_ONCE(cmd->cdw15)); in nvme_uring_cmd_io()
475 if (!nvme_cmd_allowed(ns, &c, 0, ioucmd->file->f_mode & FMODE_WRITE)) in nvme_uring_cmd_io()
476 return -EACCES; in nvme_uring_cmd_io()
478 d.metadata = READ_ONCE(cmd->metadata); in nvme_uring_cmd_io()
479 d.addr = READ_ONCE(cmd->addr); in nvme_uring_cmd_io()
480 d.data_len = READ_ONCE(cmd->data_len); in nvme_uring_cmd_io()
481 d.metadata_len = READ_ONCE(cmd->metadata_len); in nvme_uring_cmd_io()
482 d.timeout_ms = READ_ONCE(cmd->timeout_ms); in nvme_uring_cmd_io()
484 if (d.data_len && (ioucmd->flags & IORING_URING_CMD_FIXED)) { in nvme_uring_cmd_io()
510 req->timeout = d.timeout_ms ? msecs_to_jiffies(d.timeout_ms) : 0; in nvme_uring_cmd_io()
520 /* to free bio on completion, as req->bio will be null at that time */ in nvme_uring_cmd_io()
521 pdu->bio = req->bio; in nvme_uring_cmd_io()
522 pdu->req = req; in nvme_uring_cmd_io()
523 req->end_io_data = ioucmd; in nvme_uring_cmd_io()
524 req->end_io = nvme_uring_cmd_end_io; in nvme_uring_cmd_io()
526 return -EIOCBQUEUED; in nvme_uring_cmd_io()
542 static int nvme_ctrl_ioctl(struct nvme_ctrl *ctrl, unsigned int cmd, in nvme_ctrl_ioctl() argument
547 return nvme_user_cmd(ctrl, NULL, argp, 0, open_for_write); in nvme_ctrl_ioctl()
549 return nvme_user_cmd64(ctrl, NULL, argp, 0, open_for_write); in nvme_ctrl_ioctl()
551 return sed_ioctl(ctrl->opal_dev, cmd, argp); in nvme_ctrl_ioctl()
579 return ns->head->ns_id; in nvme_ns_ioctl()
581 return nvme_user_cmd(ns->ctrl, ns, argp, flags, open_for_write); in nvme_ns_ioctl()
583 * struct nvme_user_io can have different padding on some 32-bit ABIs. in nvme_ns_ioctl()
596 return nvme_user_cmd64(ns->ctrl, ns, argp, flags, in nvme_ns_ioctl()
599 return -ENOTTY; in nvme_ns_ioctl()
606 struct nvme_ns *ns = bdev->bd_disk->private_data; in nvme_ioctl()
615 return nvme_ctrl_ioctl(ns->ctrl, cmd, argp, open_for_write); in nvme_ioctl()
622 container_of(file_inode(file)->i_cdev, struct nvme_ns, cdev); in nvme_ns_chr_ioctl()
623 bool open_for_write = file->f_mode & FMODE_WRITE; in nvme_ns_chr_ioctl()
627 return nvme_ctrl_ioctl(ns->ctrl, cmd, argp, open_for_write); in nvme_ns_chr_ioctl()
637 return -EOPNOTSUPP; in nvme_uring_cmd_checks()
644 struct nvme_ctrl *ctrl = ns->ctrl; in nvme_ns_uring_cmd() local
651 switch (ioucmd->cmd_op) { in nvme_ns_uring_cmd()
653 ret = nvme_uring_cmd_io(ctrl, ns, ioucmd, issue_flags, false); in nvme_ns_uring_cmd()
656 ret = nvme_uring_cmd_io(ctrl, ns, ioucmd, issue_flags, true); in nvme_ns_uring_cmd()
659 ret = -ENOTTY; in nvme_ns_uring_cmd()
667 struct nvme_ns *ns = container_of(file_inode(ioucmd->file)->i_cdev, in nvme_ns_chr_uring_cmd()
678 struct request *req = pdu->req; in nvme_ns_chr_uring_cmd_iopoll()
688 __releases(&head->srcu) in nvme_ns_head_ctrl_ioctl()
690 struct nvme_ctrl *ctrl = ns->ctrl; in nvme_ns_head_ctrl_ioctl() local
693 nvme_get_ctrl(ns->ctrl); in nvme_ns_head_ctrl_ioctl()
694 srcu_read_unlock(&head->srcu, srcu_idx); in nvme_ns_head_ctrl_ioctl()
695 ret = nvme_ctrl_ioctl(ns->ctrl, cmd, argp, open_for_write); in nvme_ns_head_ctrl_ioctl()
697 nvme_put_ctrl(ctrl); in nvme_ns_head_ctrl_ioctl()
704 struct nvme_ns_head *head = bdev->bd_disk->private_data; in nvme_ns_head_ioctl()
708 int srcu_idx, ret = -EWOULDBLOCK; in nvme_ns_head_ioctl()
714 srcu_idx = srcu_read_lock(&head->srcu); in nvme_ns_head_ioctl()
730 srcu_read_unlock(&head->srcu, srcu_idx); in nvme_ns_head_ioctl()
737 bool open_for_write = file->f_mode & FMODE_WRITE; in nvme_ns_head_chr_ioctl()
738 struct cdev *cdev = file_inode(file)->i_cdev; in nvme_ns_head_chr_ioctl()
743 int srcu_idx, ret = -EWOULDBLOCK; in nvme_ns_head_chr_ioctl()
745 srcu_idx = srcu_read_lock(&head->srcu); in nvme_ns_head_chr_ioctl()
756 srcu_read_unlock(&head->srcu, srcu_idx); in nvme_ns_head_chr_ioctl()
763 struct cdev *cdev = file_inode(ioucmd->file)->i_cdev; in nvme_ns_head_chr_uring_cmd()
765 int srcu_idx = srcu_read_lock(&head->srcu); in nvme_ns_head_chr_uring_cmd()
767 int ret = -EINVAL; in nvme_ns_head_chr_uring_cmd()
771 srcu_read_unlock(&head->srcu, srcu_idx); in nvme_ns_head_chr_uring_cmd()
778 struct nvme_ctrl *ctrl = ioucmd->file->private_data; in nvme_dev_uring_cmd() local
783 return -EOPNOTSUPP; in nvme_dev_uring_cmd()
789 switch (ioucmd->cmd_op) { in nvme_dev_uring_cmd()
791 ret = nvme_uring_cmd_io(ctrl, NULL, ioucmd, issue_flags, false); in nvme_dev_uring_cmd()
794 ret = nvme_uring_cmd_io(ctrl, NULL, ioucmd, issue_flags, true); in nvme_dev_uring_cmd()
797 ret = -ENOTTY; in nvme_dev_uring_cmd()
803 static int nvme_dev_user_cmd(struct nvme_ctrl *ctrl, void __user *argp, in nvme_dev_user_cmd() argument
809 srcu_idx = srcu_read_lock(&ctrl->srcu); in nvme_dev_user_cmd()
810 if (list_empty(&ctrl->namespaces)) { in nvme_dev_user_cmd()
811 ret = -ENOTTY; in nvme_dev_user_cmd()
815 ns = list_first_or_null_rcu(&ctrl->namespaces, struct nvme_ns, list); in nvme_dev_user_cmd()
816 if (ns != list_last_entry(&ctrl->namespaces, struct nvme_ns, list)) { in nvme_dev_user_cmd()
817 dev_warn(ctrl->device, in nvme_dev_user_cmd()
819 ret = -EINVAL; in nvme_dev_user_cmd()
823 dev_warn(ctrl->device, in nvme_dev_user_cmd()
826 ret = -ENXIO; in nvme_dev_user_cmd()
829 srcu_read_unlock(&ctrl->srcu, srcu_idx); in nvme_dev_user_cmd()
831 ret = nvme_user_cmd(ctrl, ns, argp, 0, open_for_write); in nvme_dev_user_cmd()
836 srcu_read_unlock(&ctrl->srcu, srcu_idx); in nvme_dev_user_cmd()
843 bool open_for_write = file->f_mode & FMODE_WRITE; in nvme_dev_ioctl()
844 struct nvme_ctrl *ctrl = file->private_data; in nvme_dev_ioctl() local
849 return nvme_user_cmd(ctrl, NULL, argp, 0, open_for_write); in nvme_dev_ioctl()
851 return nvme_user_cmd64(ctrl, NULL, argp, 0, open_for_write); in nvme_dev_ioctl()
853 return nvme_dev_user_cmd(ctrl, argp, open_for_write); in nvme_dev_ioctl()
856 return -EACCES; in nvme_dev_ioctl()
857 dev_warn(ctrl->device, "resetting controller\n"); in nvme_dev_ioctl()
858 return nvme_reset_ctrl_sync(ctrl); in nvme_dev_ioctl()
861 return -EACCES; in nvme_dev_ioctl()
862 return nvme_reset_subsystem(ctrl); in nvme_dev_ioctl()
865 return -EACCES; in nvme_dev_ioctl()
866 nvme_queue_scan(ctrl); in nvme_dev_ioctl()
869 return -ENOTTY; in nvme_dev_ioctl()