Lines Matching +full:cmd +full:- +full:timeout +full:- +full:ms

1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2011-2014, Intel Corporation.
4 * Copyright (c) 2017-2021 Christoph Hellwig.
6 #include <linux/blk-integrity.h>
9 #include <linux/io_uring/cmd.h>
33 if (c->common.opcode >= nvme_cmd_vendor_start || in nvme_cmd_allowed()
34 c->common.opcode == nvme_fabrics_command) in nvme_cmd_allowed()
44 if (c->common.opcode == nvme_admin_identify) { in nvme_cmd_allowed()
45 switch (c->identify.cns) { in nvme_cmd_allowed()
62 effects = nvme_command_effects(ns->ctrl, ns, c->common.opcode); in nvme_cmd_allowed()
91 * ignoring the upper bits in the compat case to match behaviour of 32-bit
102 struct nvme_command *cmd, blk_opf_t rq_flags, in nvme_alloc_user_request() argument
107 req = blk_mq_alloc_request(q, nvme_req_op(cmd) | rq_flags, blk_flags); in nvme_alloc_user_request()
110 nvme_init_request(req, cmd); in nvme_alloc_user_request()
111 nvme_req(req)->flags |= NVME_REQ_USERCMD; in nvme_alloc_user_request()
119 struct request_queue *q = req->q; in nvme_map_user_request()
120 struct nvme_ns *ns = q->queuedata; in nvme_map_user_request()
121 struct block_device *bdev = ns ? ns->disk->part0 : NULL; in nvme_map_user_request()
122 bool supports_metadata = bdev && blk_get_integrity(bdev->bd_disk); in nvme_map_user_request()
123 struct nvme_ctrl *ctrl = nvme_req(req)->ctrl; in nvme_map_user_request()
129 dev_warn_once(ctrl->device, "using unchecked data buffer\n"); in nvme_map_user_request()
132 ret = -EINVAL; in nvme_map_user_request()
136 dev_warn_once(ctrl->device, in nvme_map_user_request()
140 if (ioucmd && (ioucmd->flags & IORING_URING_CMD_FIXED)) { in nvme_map_user_request()
143 /* fixedbufs is only for non-vectored io */ in nvme_map_user_request()
145 ret = -EINVAL; in nvme_map_user_request()
162 bio = req->bio; in nvme_map_user_request()
183 struct nvme_command *cmd, u64 ubuffer, unsigned bufflen, in nvme_submit_user_cmd() argument
185 u64 *result, unsigned timeout, unsigned int flags) in nvme_submit_user_cmd() argument
187 struct nvme_ns *ns = q->queuedata; in nvme_submit_user_cmd()
194 req = nvme_alloc_user_request(q, cmd, 0, 0); in nvme_submit_user_cmd()
198 req->timeout = timeout; in nvme_submit_user_cmd()
206 bio = req->bio; in nvme_submit_user_cmd()
207 ctrl = nvme_req(req)->ctrl; in nvme_submit_user_cmd()
209 effects = nvme_passthru_start(ctrl, ns, cmd->common.opcode); in nvme_submit_user_cmd()
212 *result = le64_to_cpu(nvme_req(req)->result.u64); in nvme_submit_user_cmd()
218 nvme_passthru_end(ctrl, ns, effects, cmd, ret); in nvme_submit_user_cmd()
231 return -EFAULT; in nvme_submit_io()
233 return -EINVAL; in nvme_submit_io()
241 return -EINVAL; in nvme_submit_io()
244 length = (io.nblocks + 1) << ns->head->lba_shift; in nvme_submit_io()
247 (ns->head->ms == ns->head->pi_size)) { in nvme_submit_io()
253 return -EINVAL; in nvme_submit_io()
257 meta_len = (io.nblocks + 1) * ns->head->ms; in nvme_submit_io()
261 if (ns->head->features & NVME_NS_EXT_LBAS) { in nvme_submit_io()
266 return -EINVAL; in nvme_submit_io()
272 c.rw.nsid = cpu_to_le32(ns->head->ns_id); in nvme_submit_io()
281 return nvme_submit_user_cmd(ns->queue, &c, io.addr, length, metadata, in nvme_submit_io()
288 if (ns && nsid != ns->head->ns_id) { in nvme_validate_passthru_nsid()
289 dev_err(ctrl->device, in nvme_validate_passthru_nsid()
290 "%s: nsid (%u) in cmd does not match nsid (%u) of namespace\n", in nvme_validate_passthru_nsid()
291 current->comm, nsid, ns->head->ns_id); in nvme_validate_passthru_nsid()
302 struct nvme_passthru_cmd cmd; in nvme_user_cmd() local
304 unsigned timeout = 0; in nvme_user_cmd() local
308 if (copy_from_user(&cmd, ucmd, sizeof(cmd))) in nvme_user_cmd()
309 return -EFAULT; in nvme_user_cmd()
310 if (cmd.flags) in nvme_user_cmd()
311 return -EINVAL; in nvme_user_cmd()
312 if (!nvme_validate_passthru_nsid(ctrl, ns, cmd.nsid)) in nvme_user_cmd()
313 return -EINVAL; in nvme_user_cmd()
316 c.common.opcode = cmd.opcode; in nvme_user_cmd()
317 c.common.flags = cmd.flags; in nvme_user_cmd()
318 c.common.nsid = cpu_to_le32(cmd.nsid); in nvme_user_cmd()
319 c.common.cdw2[0] = cpu_to_le32(cmd.cdw2); in nvme_user_cmd()
320 c.common.cdw2[1] = cpu_to_le32(cmd.cdw3); in nvme_user_cmd()
321 c.common.cdw10 = cpu_to_le32(cmd.cdw10); in nvme_user_cmd()
322 c.common.cdw11 = cpu_to_le32(cmd.cdw11); in nvme_user_cmd()
323 c.common.cdw12 = cpu_to_le32(cmd.cdw12); in nvme_user_cmd()
324 c.common.cdw13 = cpu_to_le32(cmd.cdw13); in nvme_user_cmd()
325 c.common.cdw14 = cpu_to_le32(cmd.cdw14); in nvme_user_cmd()
326 c.common.cdw15 = cpu_to_le32(cmd.cdw15); in nvme_user_cmd()
329 return -EACCES; in nvme_user_cmd()
331 if (cmd.timeout_ms) in nvme_user_cmd()
332 timeout = msecs_to_jiffies(cmd.timeout_ms); in nvme_user_cmd()
334 status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c, in nvme_user_cmd()
335 cmd.addr, cmd.data_len, nvme_to_user_ptr(cmd.metadata), in nvme_user_cmd()
336 cmd.metadata_len, &result, timeout, 0); in nvme_user_cmd()
339 if (put_user(result, &ucmd->result)) in nvme_user_cmd()
340 return -EFAULT; in nvme_user_cmd()
350 struct nvme_passthru_cmd64 cmd; in nvme_user_cmd64() local
352 unsigned timeout = 0; in nvme_user_cmd64() local
355 if (copy_from_user(&cmd, ucmd, sizeof(cmd))) in nvme_user_cmd64()
356 return -EFAULT; in nvme_user_cmd64()
357 if (cmd.flags) in nvme_user_cmd64()
358 return -EINVAL; in nvme_user_cmd64()
359 if (!nvme_validate_passthru_nsid(ctrl, ns, cmd.nsid)) in nvme_user_cmd64()
360 return -EINVAL; in nvme_user_cmd64()
363 c.common.opcode = cmd.opcode; in nvme_user_cmd64()
364 c.common.flags = cmd.flags; in nvme_user_cmd64()
365 c.common.nsid = cpu_to_le32(cmd.nsid); in nvme_user_cmd64()
366 c.common.cdw2[0] = cpu_to_le32(cmd.cdw2); in nvme_user_cmd64()
367 c.common.cdw2[1] = cpu_to_le32(cmd.cdw3); in nvme_user_cmd64()
368 c.common.cdw10 = cpu_to_le32(cmd.cdw10); in nvme_user_cmd64()
369 c.common.cdw11 = cpu_to_le32(cmd.cdw11); in nvme_user_cmd64()
370 c.common.cdw12 = cpu_to_le32(cmd.cdw12); in nvme_user_cmd64()
371 c.common.cdw13 = cpu_to_le32(cmd.cdw13); in nvme_user_cmd64()
372 c.common.cdw14 = cpu_to_le32(cmd.cdw14); in nvme_user_cmd64()
373 c.common.cdw15 = cpu_to_le32(cmd.cdw15); in nvme_user_cmd64()
376 return -EACCES; in nvme_user_cmd64()
378 if (cmd.timeout_ms) in nvme_user_cmd64()
379 timeout = msecs_to_jiffies(cmd.timeout_ms); in nvme_user_cmd64()
381 status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c, in nvme_user_cmd64()
382 cmd.addr, cmd.data_len, nvme_to_user_ptr(cmd.metadata), in nvme_user_cmd64()
383 cmd.metadata_len, &cmd.result, timeout, flags); in nvme_user_cmd64()
386 if (put_user(cmd.result, &ucmd->result)) in nvme_user_cmd64()
387 return -EFAULT; in nvme_user_cmd64()
423 if (pdu->bio) in nvme_uring_task_cb()
424 blk_rq_unmap_user(pdu->bio); in nvme_uring_task_cb()
425 io_uring_cmd_done(ioucmd, pdu->status, pdu->result, issue_flags); in nvme_uring_task_cb()
431 struct io_uring_cmd *ioucmd = req->end_io_data; in nvme_uring_cmd_end_io()
434 if (nvme_req(req)->flags & NVME_REQ_CANCELLED) { in nvme_uring_cmd_end_io()
435 pdu->status = -EINTR; in nvme_uring_cmd_end_io()
437 pdu->status = nvme_req(req)->status; in nvme_uring_cmd_end_io()
438 if (!pdu->status) in nvme_uring_cmd_end_io()
439 pdu->status = blk_status_to_errno(err); in nvme_uring_cmd_end_io()
441 pdu->result = le64_to_cpu(nvme_req(req)->result.u64); in nvme_uring_cmd_end_io()
452 if (pdu->bio) in nvme_uring_cmd_end_io()
453 blk_rq_unmap_user(pdu->bio); in nvme_uring_cmd_end_io()
454 io_uring_cmd_iopoll_done(ioucmd, pdu->result, pdu->status); in nvme_uring_cmd_end_io()
466 const struct nvme_uring_cmd *cmd = io_uring_sqe_cmd(ioucmd->sqe); in nvme_uring_cmd_io() local
467 struct request_queue *q = ns ? ns->queue : ctrl->admin_q; in nvme_uring_cmd_io()
475 c.common.opcode = READ_ONCE(cmd->opcode); in nvme_uring_cmd_io()
476 c.common.flags = READ_ONCE(cmd->flags); in nvme_uring_cmd_io()
478 return -EINVAL; in nvme_uring_cmd_io()
481 c.common.nsid = cpu_to_le32(cmd->nsid); in nvme_uring_cmd_io()
483 return -EINVAL; in nvme_uring_cmd_io()
485 c.common.cdw2[0] = cpu_to_le32(READ_ONCE(cmd->cdw2)); in nvme_uring_cmd_io()
486 c.common.cdw2[1] = cpu_to_le32(READ_ONCE(cmd->cdw3)); in nvme_uring_cmd_io()
489 c.common.cdw10 = cpu_to_le32(READ_ONCE(cmd->cdw10)); in nvme_uring_cmd_io()
490 c.common.cdw11 = cpu_to_le32(READ_ONCE(cmd->cdw11)); in nvme_uring_cmd_io()
491 c.common.cdw12 = cpu_to_le32(READ_ONCE(cmd->cdw12)); in nvme_uring_cmd_io()
492 c.common.cdw13 = cpu_to_le32(READ_ONCE(cmd->cdw13)); in nvme_uring_cmd_io()
493 c.common.cdw14 = cpu_to_le32(READ_ONCE(cmd->cdw14)); in nvme_uring_cmd_io()
494 c.common.cdw15 = cpu_to_le32(READ_ONCE(cmd->cdw15)); in nvme_uring_cmd_io()
496 if (!nvme_cmd_allowed(ns, &c, 0, ioucmd->file->f_mode & FMODE_WRITE)) in nvme_uring_cmd_io()
497 return -EACCES; in nvme_uring_cmd_io()
499 d.metadata = READ_ONCE(cmd->metadata); in nvme_uring_cmd_io()
500 d.addr = READ_ONCE(cmd->addr); in nvme_uring_cmd_io()
501 d.data_len = READ_ONCE(cmd->data_len); in nvme_uring_cmd_io()
502 d.metadata_len = READ_ONCE(cmd->metadata_len); in nvme_uring_cmd_io()
503 d.timeout_ms = READ_ONCE(cmd->timeout_ms); in nvme_uring_cmd_io()
515 req->timeout = d.timeout_ms ? msecs_to_jiffies(d.timeout_ms) : 0; in nvme_uring_cmd_io()
525 /* to free bio on completion, as req->bio will be null at that time */ in nvme_uring_cmd_io()
526 pdu->bio = req->bio; in nvme_uring_cmd_io()
527 pdu->req = req; in nvme_uring_cmd_io()
528 req->end_io_data = ioucmd; in nvme_uring_cmd_io()
529 req->end_io = nvme_uring_cmd_end_io; in nvme_uring_cmd_io()
531 return -EIOCBQUEUED; in nvme_uring_cmd_io()
534 static bool is_ctrl_ioctl(unsigned int cmd) in is_ctrl_ioctl() argument
536 if (cmd == NVME_IOCTL_ADMIN_CMD || cmd == NVME_IOCTL_ADMIN64_CMD) in is_ctrl_ioctl()
538 if (is_sed_ioctl(cmd)) in is_ctrl_ioctl()
543 static int nvme_ctrl_ioctl(struct nvme_ctrl *ctrl, unsigned int cmd, in nvme_ctrl_ioctl() argument
546 switch (cmd) { in nvme_ctrl_ioctl()
552 return sed_ioctl(ctrl->opal_dev, cmd, argp); in nvme_ctrl_ioctl()
574 static int nvme_ns_ioctl(struct nvme_ns *ns, unsigned int cmd, in nvme_ns_ioctl() argument
577 switch (cmd) { in nvme_ns_ioctl()
580 return ns->head->ns_id; in nvme_ns_ioctl()
582 return nvme_user_cmd(ns->ctrl, ns, argp, flags, open_for_write); in nvme_ns_ioctl()
584 * struct nvme_user_io can have different padding on some 32-bit ABIs. in nvme_ns_ioctl()
597 return nvme_user_cmd64(ns->ctrl, ns, argp, flags, in nvme_ns_ioctl()
600 return -ENOTTY; in nvme_ns_ioctl()
605 unsigned int cmd, unsigned long arg) in nvme_ioctl() argument
607 struct nvme_ns *ns = bdev->bd_disk->private_data; in nvme_ioctl()
615 if (is_ctrl_ioctl(cmd)) in nvme_ioctl()
616 return nvme_ctrl_ioctl(ns->ctrl, cmd, argp, open_for_write); in nvme_ioctl()
617 return nvme_ns_ioctl(ns, cmd, argp, flags, open_for_write); in nvme_ioctl()
620 long nvme_ns_chr_ioctl(struct file *file, unsigned int cmd, unsigned long arg) in nvme_ns_chr_ioctl() argument
623 container_of(file_inode(file)->i_cdev, struct nvme_ns, cdev); in nvme_ns_chr_ioctl()
624 bool open_for_write = file->f_mode & FMODE_WRITE; in nvme_ns_chr_ioctl()
627 if (is_ctrl_ioctl(cmd)) in nvme_ns_chr_ioctl()
628 return nvme_ctrl_ioctl(ns->ctrl, cmd, argp, open_for_write); in nvme_ns_chr_ioctl()
629 return nvme_ns_ioctl(ns, cmd, argp, 0, open_for_write); in nvme_ns_chr_ioctl()
638 return -EOPNOTSUPP; in nvme_uring_cmd_checks()
645 struct nvme_ctrl *ctrl = ns->ctrl; in nvme_ns_uring_cmd()
652 switch (ioucmd->cmd_op) { in nvme_ns_uring_cmd()
660 ret = -ENOTTY; in nvme_ns_uring_cmd()
668 struct nvme_ns *ns = container_of(file_inode(ioucmd->file)->i_cdev, in nvme_ns_chr_uring_cmd()
679 struct request *req = pdu->req; in nvme_ns_chr_uring_cmd_iopoll()
686 static int nvme_ns_head_ctrl_ioctl(struct nvme_ns *ns, unsigned int cmd, in nvme_ns_head_ctrl_ioctl() argument
689 __releases(&head->srcu) in nvme_ns_head_ctrl_ioctl()
691 struct nvme_ctrl *ctrl = ns->ctrl; in nvme_ns_head_ctrl_ioctl()
694 nvme_get_ctrl(ns->ctrl); in nvme_ns_head_ctrl_ioctl()
695 srcu_read_unlock(&head->srcu, srcu_idx); in nvme_ns_head_ctrl_ioctl()
696 ret = nvme_ctrl_ioctl(ns->ctrl, cmd, argp, open_for_write); in nvme_ns_head_ctrl_ioctl()
703 unsigned int cmd, unsigned long arg) in nvme_ns_head_ioctl() argument
705 struct nvme_ns_head *head = bdev->bd_disk->private_data; in nvme_ns_head_ioctl()
709 int srcu_idx, ret = -EWOULDBLOCK; in nvme_ns_head_ioctl()
715 srcu_idx = srcu_read_lock(&head->srcu); in nvme_ns_head_ioctl()
725 if (is_ctrl_ioctl(cmd)) in nvme_ns_head_ioctl()
726 return nvme_ns_head_ctrl_ioctl(ns, cmd, argp, head, srcu_idx, in nvme_ns_head_ioctl()
729 ret = nvme_ns_ioctl(ns, cmd, argp, flags, open_for_write); in nvme_ns_head_ioctl()
731 srcu_read_unlock(&head->srcu, srcu_idx); in nvme_ns_head_ioctl()
735 long nvme_ns_head_chr_ioctl(struct file *file, unsigned int cmd, in nvme_ns_head_chr_ioctl() argument
738 bool open_for_write = file->f_mode & FMODE_WRITE; in nvme_ns_head_chr_ioctl()
739 struct cdev *cdev = file_inode(file)->i_cdev; in nvme_ns_head_chr_ioctl()
744 int srcu_idx, ret = -EWOULDBLOCK; in nvme_ns_head_chr_ioctl()
746 srcu_idx = srcu_read_lock(&head->srcu); in nvme_ns_head_chr_ioctl()
751 if (is_ctrl_ioctl(cmd)) in nvme_ns_head_chr_ioctl()
752 return nvme_ns_head_ctrl_ioctl(ns, cmd, argp, head, srcu_idx, in nvme_ns_head_chr_ioctl()
755 ret = nvme_ns_ioctl(ns, cmd, argp, 0, open_for_write); in nvme_ns_head_chr_ioctl()
757 srcu_read_unlock(&head->srcu, srcu_idx); in nvme_ns_head_chr_ioctl()
764 struct cdev *cdev = file_inode(ioucmd->file)->i_cdev; in nvme_ns_head_chr_uring_cmd()
766 int srcu_idx = srcu_read_lock(&head->srcu); in nvme_ns_head_chr_uring_cmd()
768 int ret = -EINVAL; in nvme_ns_head_chr_uring_cmd()
772 srcu_read_unlock(&head->srcu, srcu_idx); in nvme_ns_head_chr_uring_cmd()
779 struct nvme_ctrl *ctrl = ioucmd->file->private_data; in nvme_dev_uring_cmd()
784 return -EOPNOTSUPP; in nvme_dev_uring_cmd()
790 switch (ioucmd->cmd_op) { in nvme_dev_uring_cmd()
798 ret = -ENOTTY; in nvme_dev_uring_cmd()
810 srcu_idx = srcu_read_lock(&ctrl->srcu); in nvme_dev_user_cmd()
811 if (list_empty(&ctrl->namespaces)) { in nvme_dev_user_cmd()
812 ret = -ENOTTY; in nvme_dev_user_cmd()
816 ns = list_first_or_null_rcu(&ctrl->namespaces, struct nvme_ns, list); in nvme_dev_user_cmd()
817 if (ns != list_last_entry(&ctrl->namespaces, struct nvme_ns, list)) { in nvme_dev_user_cmd()
818 dev_warn(ctrl->device, in nvme_dev_user_cmd()
820 ret = -EINVAL; in nvme_dev_user_cmd()
824 dev_warn(ctrl->device, in nvme_dev_user_cmd()
827 ret = -ENXIO; in nvme_dev_user_cmd()
830 srcu_read_unlock(&ctrl->srcu, srcu_idx); in nvme_dev_user_cmd()
837 srcu_read_unlock(&ctrl->srcu, srcu_idx); in nvme_dev_user_cmd()
841 long nvme_dev_ioctl(struct file *file, unsigned int cmd, in nvme_dev_ioctl() argument
844 bool open_for_write = file->f_mode & FMODE_WRITE; in nvme_dev_ioctl()
845 struct nvme_ctrl *ctrl = file->private_data; in nvme_dev_ioctl()
848 switch (cmd) { in nvme_dev_ioctl()
857 return -EACCES; in nvme_dev_ioctl()
858 dev_warn(ctrl->device, "resetting controller\n"); in nvme_dev_ioctl()
862 return -EACCES; in nvme_dev_ioctl()
866 return -EACCES; in nvme_dev_ioctl()
870 return -ENOTTY; in nvme_dev_ioctl()