1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2011-2014, Intel Corporation. 4 * Copyright (c) 2017-2021 Christoph Hellwig. 5 */ 6 #include <linux/blk-integrity.h> 7 #include <linux/ptrace.h> /* for force_successful_syscall_return */ 8 #include <linux/nvme_ioctl.h> 9 #include <linux/io_uring/cmd.h> 10 #include "nvme.h" 11 12 enum { 13 NVME_IOCTL_VEC = (1 << 0), 14 NVME_IOCTL_PARTITION = (1 << 1), 15 }; 16 17 static bool nvme_cmd_allowed(struct nvme_ns *ns, struct nvme_command *c, 18 unsigned int flags, bool open_for_write) 19 { 20 u32 effects; 21 22 /* 23 * Do not allow unprivileged passthrough on partitions, as that allows an 24 * escape from the containment of the partition. 25 */ 26 if (flags & NVME_IOCTL_PARTITION) 27 goto admin; 28 29 /* 30 * Do not allow unprivileged processes to send vendor specific or fabrics 31 * commands as we can't be sure about their effects. 32 */ 33 if (c->common.opcode >= nvme_cmd_vendor_start || 34 c->common.opcode == nvme_fabrics_command) 35 goto admin; 36 37 /* 38 * Do not allow unprivileged passthrough of admin commands except 39 * for a subset of identify commands that contain information required 40 * to form proper I/O commands in userspace and do not expose any 41 * potentially sensitive information. 42 */ 43 if (!ns) { 44 if (c->common.opcode == nvme_admin_identify) { 45 switch (c->identify.cns) { 46 case NVME_ID_CNS_NS: 47 case NVME_ID_CNS_CS_NS: 48 case NVME_ID_CNS_NS_CS_INDEP: 49 case NVME_ID_CNS_CS_CTRL: 50 case NVME_ID_CNS_CTRL: 51 return true; 52 } 53 } 54 goto admin; 55 } 56 57 /* 58 * Check if the controller provides a Commands Supported and Effects log 59 * and marks this command as supported. If not reject unprivileged 60 * passthrough. 61 */ 62 effects = nvme_command_effects(ns->ctrl, ns, c->common.opcode); 63 if (!(effects & NVME_CMD_EFFECTS_CSUPP)) 64 goto admin; 65 66 /* 67 * Don't allow passthrough for command that have intrusive (or unknown) 68 * effects. 69 */ 70 if (effects & ~(NVME_CMD_EFFECTS_CSUPP | NVME_CMD_EFFECTS_LBCC | 71 NVME_CMD_EFFECTS_UUID_SEL | 72 NVME_CMD_EFFECTS_SCOPE_MASK)) 73 goto admin; 74 75 /* 76 * Only allow I/O commands that transfer data to the controller or that 77 * change the logical block contents if the file descriptor is open for 78 * writing. 79 */ 80 if ((nvme_is_write(c) || (effects & NVME_CMD_EFFECTS_LBCC)) && 81 !open_for_write) 82 goto admin; 83 84 return true; 85 admin: 86 return capable(CAP_SYS_ADMIN); 87 } 88 89 /* 90 * Convert integer values from ioctl structures to user pointers, silently 91 * ignoring the upper bits in the compat case to match behaviour of 32-bit 92 * kernels. 93 */ 94 static void __user *nvme_to_user_ptr(uintptr_t ptrval) 95 { 96 if (in_compat_syscall()) 97 ptrval = (compat_uptr_t)ptrval; 98 return (void __user *)ptrval; 99 } 100 101 static struct request *nvme_alloc_user_request(struct request_queue *q, 102 struct nvme_command *cmd, blk_opf_t rq_flags, 103 blk_mq_req_flags_t blk_flags) 104 { 105 struct request *req; 106 107 req = blk_mq_alloc_request(q, nvme_req_op(cmd) | rq_flags, blk_flags); 108 if (IS_ERR(req)) 109 return req; 110 nvme_init_request(req, cmd); 111 nvme_req(req)->flags |= NVME_REQ_USERCMD; 112 return req; 113 } 114 115 static int nvme_map_user_request(struct request *req, u64 ubuffer, 116 unsigned bufflen, void __user *meta_buffer, unsigned meta_len, 117 struct iov_iter *iter, unsigned int flags) 118 { 119 struct request_queue *q = req->q; 120 struct nvme_ns *ns = q->queuedata; 121 struct block_device *bdev = ns ? ns->disk->part0 : NULL; 122 bool supports_metadata = bdev && blk_get_integrity(bdev->bd_disk); 123 struct nvme_ctrl *ctrl = nvme_req(req)->ctrl; 124 bool has_metadata = meta_buffer && meta_len; 125 struct bio *bio = NULL; 126 int ret; 127 128 if (!nvme_ctrl_sgl_supported(ctrl)) 129 dev_warn_once(ctrl->device, "using unchecked data buffer\n"); 130 if (has_metadata) { 131 if (!supports_metadata) 132 return -EINVAL; 133 134 if (!nvme_ctrl_meta_sgl_supported(ctrl)) 135 dev_warn_once(ctrl->device, 136 "using unchecked metadata buffer\n"); 137 } 138 139 if (iter) 140 ret = blk_rq_map_user_iov(q, req, NULL, iter, GFP_KERNEL); 141 else 142 ret = blk_rq_map_user_io(req, NULL, nvme_to_user_ptr(ubuffer), 143 bufflen, GFP_KERNEL, flags & NVME_IOCTL_VEC, 0, 144 0, rq_data_dir(req)); 145 if (ret) 146 return ret; 147 148 if (has_metadata) { 149 ret = blk_rq_integrity_map_user(req, meta_buffer, meta_len); 150 if (ret) 151 goto out_unmap; 152 } 153 154 return ret; 155 156 out_unmap: 157 if (bio) 158 blk_rq_unmap_user(bio); 159 return ret; 160 } 161 162 static int nvme_submit_user_cmd(struct request_queue *q, 163 struct nvme_command *cmd, u64 ubuffer, unsigned bufflen, 164 void __user *meta_buffer, unsigned meta_len, 165 u64 *result, unsigned timeout, unsigned int flags) 166 { 167 struct nvme_ns *ns = q->queuedata; 168 struct nvme_ctrl *ctrl; 169 struct request *req; 170 struct bio *bio; 171 u32 effects; 172 int ret; 173 174 req = nvme_alloc_user_request(q, cmd, 0, 0); 175 if (IS_ERR(req)) 176 return PTR_ERR(req); 177 178 req->timeout = timeout; 179 if (ubuffer && bufflen) { 180 ret = nvme_map_user_request(req, ubuffer, bufflen, meta_buffer, 181 meta_len, NULL, flags); 182 if (ret) 183 goto out_free_req; 184 } 185 186 bio = req->bio; 187 ctrl = nvme_req(req)->ctrl; 188 189 effects = nvme_passthru_start(ctrl, ns, cmd->common.opcode); 190 ret = nvme_execute_rq(req, false); 191 if (result) 192 *result = le64_to_cpu(nvme_req(req)->result.u64); 193 if (bio) 194 blk_rq_unmap_user(bio); 195 blk_mq_free_request(req); 196 197 if (effects) 198 nvme_passthru_end(ctrl, ns, effects, cmd, ret); 199 return ret; 200 201 out_free_req: 202 blk_mq_free_request(req); 203 return ret; 204 } 205 206 static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio) 207 { 208 struct nvme_user_io io; 209 struct nvme_command c; 210 unsigned length, meta_len; 211 void __user *metadata; 212 213 if (copy_from_user(&io, uio, sizeof(io))) 214 return -EFAULT; 215 if (io.flags) 216 return -EINVAL; 217 218 switch (io.opcode) { 219 case nvme_cmd_write: 220 case nvme_cmd_read: 221 case nvme_cmd_compare: 222 break; 223 default: 224 return -EINVAL; 225 } 226 227 length = (io.nblocks + 1) << ns->head->lba_shift; 228 229 if ((io.control & NVME_RW_PRINFO_PRACT) && 230 (ns->head->ms == ns->head->pi_size)) { 231 /* 232 * Protection information is stripped/inserted by the 233 * controller. 234 */ 235 if (nvme_to_user_ptr(io.metadata)) 236 return -EINVAL; 237 meta_len = 0; 238 metadata = NULL; 239 } else { 240 meta_len = (io.nblocks + 1) * ns->head->ms; 241 metadata = nvme_to_user_ptr(io.metadata); 242 } 243 244 if (ns->head->features & NVME_NS_EXT_LBAS) { 245 length += meta_len; 246 meta_len = 0; 247 } else if (meta_len) { 248 if ((io.metadata & 3) || !io.metadata) 249 return -EINVAL; 250 } 251 252 memset(&c, 0, sizeof(c)); 253 c.rw.opcode = io.opcode; 254 c.rw.flags = io.flags; 255 c.rw.nsid = cpu_to_le32(ns->head->ns_id); 256 c.rw.slba = cpu_to_le64(io.slba); 257 c.rw.length = cpu_to_le16(io.nblocks); 258 c.rw.control = cpu_to_le16(io.control); 259 c.rw.dsmgmt = cpu_to_le32(io.dsmgmt); 260 c.rw.reftag = cpu_to_le32(io.reftag); 261 c.rw.lbat = cpu_to_le16(io.apptag); 262 c.rw.lbatm = cpu_to_le16(io.appmask); 263 264 return nvme_submit_user_cmd(ns->queue, &c, io.addr, length, metadata, 265 meta_len, NULL, 0, 0); 266 } 267 268 static bool nvme_validate_passthru_nsid(struct nvme_ctrl *ctrl, 269 struct nvme_ns *ns, __u32 nsid) 270 { 271 if (ns && nsid != ns->head->ns_id) { 272 dev_err(ctrl->device, 273 "%s: nsid (%u) in cmd does not match nsid (%u) of namespace\n", 274 current->comm, nsid, ns->head->ns_id); 275 return false; 276 } 277 278 return true; 279 } 280 281 static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns, 282 struct nvme_passthru_cmd __user *ucmd, unsigned int flags, 283 bool open_for_write) 284 { 285 struct nvme_passthru_cmd cmd; 286 struct nvme_command c; 287 unsigned timeout = 0; 288 u64 result; 289 int status; 290 291 if (copy_from_user(&cmd, ucmd, sizeof(cmd))) 292 return -EFAULT; 293 if (cmd.flags) 294 return -EINVAL; 295 if (!nvme_validate_passthru_nsid(ctrl, ns, cmd.nsid)) 296 return -EINVAL; 297 298 memset(&c, 0, sizeof(c)); 299 c.common.opcode = cmd.opcode; 300 c.common.flags = cmd.flags; 301 c.common.nsid = cpu_to_le32(cmd.nsid); 302 c.common.cdw2[0] = cpu_to_le32(cmd.cdw2); 303 c.common.cdw2[1] = cpu_to_le32(cmd.cdw3); 304 c.common.cdw10 = cpu_to_le32(cmd.cdw10); 305 c.common.cdw11 = cpu_to_le32(cmd.cdw11); 306 c.common.cdw12 = cpu_to_le32(cmd.cdw12); 307 c.common.cdw13 = cpu_to_le32(cmd.cdw13); 308 c.common.cdw14 = cpu_to_le32(cmd.cdw14); 309 c.common.cdw15 = cpu_to_le32(cmd.cdw15); 310 311 if (!nvme_cmd_allowed(ns, &c, 0, open_for_write)) 312 return -EACCES; 313 314 if (cmd.timeout_ms) 315 timeout = msecs_to_jiffies(cmd.timeout_ms); 316 317 status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c, 318 cmd.addr, cmd.data_len, nvme_to_user_ptr(cmd.metadata), 319 cmd.metadata_len, &result, timeout, 0); 320 321 if (status >= 0) { 322 if (put_user(result, &ucmd->result)) 323 return -EFAULT; 324 } 325 326 return status; 327 } 328 329 static int nvme_user_cmd64(struct nvme_ctrl *ctrl, struct nvme_ns *ns, 330 struct nvme_passthru_cmd64 __user *ucmd, unsigned int flags, 331 bool open_for_write) 332 { 333 struct nvme_passthru_cmd64 cmd; 334 struct nvme_command c; 335 unsigned timeout = 0; 336 int status; 337 338 if (copy_from_user(&cmd, ucmd, sizeof(cmd))) 339 return -EFAULT; 340 if (cmd.flags) 341 return -EINVAL; 342 if (!nvme_validate_passthru_nsid(ctrl, ns, cmd.nsid)) 343 return -EINVAL; 344 345 memset(&c, 0, sizeof(c)); 346 c.common.opcode = cmd.opcode; 347 c.common.flags = cmd.flags; 348 c.common.nsid = cpu_to_le32(cmd.nsid); 349 c.common.cdw2[0] = cpu_to_le32(cmd.cdw2); 350 c.common.cdw2[1] = cpu_to_le32(cmd.cdw3); 351 c.common.cdw10 = cpu_to_le32(cmd.cdw10); 352 c.common.cdw11 = cpu_to_le32(cmd.cdw11); 353 c.common.cdw12 = cpu_to_le32(cmd.cdw12); 354 c.common.cdw13 = cpu_to_le32(cmd.cdw13); 355 c.common.cdw14 = cpu_to_le32(cmd.cdw14); 356 c.common.cdw15 = cpu_to_le32(cmd.cdw15); 357 358 if (!nvme_cmd_allowed(ns, &c, flags, open_for_write)) 359 return -EACCES; 360 361 if (cmd.timeout_ms) 362 timeout = msecs_to_jiffies(cmd.timeout_ms); 363 364 status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c, 365 cmd.addr, cmd.data_len, nvme_to_user_ptr(cmd.metadata), 366 cmd.metadata_len, &cmd.result, timeout, flags); 367 368 if (status >= 0) { 369 if (put_user(cmd.result, &ucmd->result)) 370 return -EFAULT; 371 } 372 373 return status; 374 } 375 376 struct nvme_uring_data { 377 __u64 metadata; 378 __u64 addr; 379 __u32 data_len; 380 __u32 metadata_len; 381 __u32 timeout_ms; 382 }; 383 384 /* 385 * This overlays struct io_uring_cmd pdu. 386 * Expect build errors if this grows larger than that. 387 */ 388 struct nvme_uring_cmd_pdu { 389 struct request *req; 390 struct bio *bio; 391 u64 result; 392 int status; 393 }; 394 395 static inline struct nvme_uring_cmd_pdu *nvme_uring_cmd_pdu( 396 struct io_uring_cmd *ioucmd) 397 { 398 return io_uring_cmd_to_pdu(ioucmd, struct nvme_uring_cmd_pdu); 399 } 400 401 static void nvme_uring_task_cb(struct io_uring_cmd *ioucmd, 402 unsigned issue_flags) 403 { 404 struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd); 405 406 if (pdu->bio) 407 blk_rq_unmap_user(pdu->bio); 408 io_uring_cmd_done32(ioucmd, pdu->status, pdu->result, issue_flags); 409 } 410 411 static enum rq_end_io_ret nvme_uring_cmd_end_io(struct request *req, 412 blk_status_t err) 413 { 414 struct io_uring_cmd *ioucmd = req->end_io_data; 415 struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd); 416 417 if (nvme_req(req)->flags & NVME_REQ_CANCELLED) { 418 pdu->status = -EINTR; 419 } else { 420 pdu->status = nvme_req(req)->status; 421 if (!pdu->status) 422 pdu->status = blk_status_to_errno(err); 423 } 424 pdu->result = le64_to_cpu(nvme_req(req)->result.u64); 425 426 /* 427 * IOPOLL could potentially complete this request directly, but 428 * if multiple rings are polling on the same queue, then it's possible 429 * for one ring to find completions for another ring. Punting the 430 * completion via task_work will always direct it to the right 431 * location, rather than potentially complete requests for ringA 432 * under iopoll invocations from ringB. 433 */ 434 io_uring_cmd_do_in_task_lazy(ioucmd, nvme_uring_task_cb); 435 return RQ_END_IO_FREE; 436 } 437 438 static int nvme_uring_cmd_io(struct nvme_ctrl *ctrl, struct nvme_ns *ns, 439 struct io_uring_cmd *ioucmd, unsigned int issue_flags, bool vec) 440 { 441 struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd); 442 const struct nvme_uring_cmd *cmd = io_uring_sqe_cmd(ioucmd->sqe); 443 struct request_queue *q = ns ? ns->queue : ctrl->admin_q; 444 struct nvme_uring_data d; 445 struct nvme_command c; 446 struct iov_iter iter; 447 struct iov_iter *map_iter = NULL; 448 struct request *req; 449 blk_opf_t rq_flags = REQ_ALLOC_CACHE; 450 blk_mq_req_flags_t blk_flags = 0; 451 int ret; 452 453 c.common.opcode = READ_ONCE(cmd->opcode); 454 c.common.flags = READ_ONCE(cmd->flags); 455 if (c.common.flags) 456 return -EINVAL; 457 458 c.common.command_id = 0; 459 c.common.nsid = cpu_to_le32(cmd->nsid); 460 if (!nvme_validate_passthru_nsid(ctrl, ns, le32_to_cpu(c.common.nsid))) 461 return -EINVAL; 462 463 c.common.cdw2[0] = cpu_to_le32(READ_ONCE(cmd->cdw2)); 464 c.common.cdw2[1] = cpu_to_le32(READ_ONCE(cmd->cdw3)); 465 c.common.metadata = 0; 466 c.common.dptr.prp1 = c.common.dptr.prp2 = 0; 467 c.common.cdw10 = cpu_to_le32(READ_ONCE(cmd->cdw10)); 468 c.common.cdw11 = cpu_to_le32(READ_ONCE(cmd->cdw11)); 469 c.common.cdw12 = cpu_to_le32(READ_ONCE(cmd->cdw12)); 470 c.common.cdw13 = cpu_to_le32(READ_ONCE(cmd->cdw13)); 471 c.common.cdw14 = cpu_to_le32(READ_ONCE(cmd->cdw14)); 472 c.common.cdw15 = cpu_to_le32(READ_ONCE(cmd->cdw15)); 473 474 if (!nvme_cmd_allowed(ns, &c, 0, ioucmd->file->f_mode & FMODE_WRITE)) 475 return -EACCES; 476 477 d.metadata = READ_ONCE(cmd->metadata); 478 d.addr = READ_ONCE(cmd->addr); 479 d.data_len = READ_ONCE(cmd->data_len); 480 d.metadata_len = READ_ONCE(cmd->metadata_len); 481 d.timeout_ms = READ_ONCE(cmd->timeout_ms); 482 483 if (d.data_len && (ioucmd->flags & IORING_URING_CMD_FIXED)) { 484 int ddir = nvme_is_write(&c) ? WRITE : READ; 485 486 if (vec) 487 ret = io_uring_cmd_import_fixed_vec(ioucmd, 488 u64_to_user_ptr(d.addr), d.data_len, 489 ddir, &iter, issue_flags); 490 else 491 ret = io_uring_cmd_import_fixed(d.addr, d.data_len, 492 ddir, &iter, ioucmd, issue_flags); 493 if (ret < 0) 494 return ret; 495 496 map_iter = &iter; 497 } 498 499 if (issue_flags & IO_URING_F_NONBLOCK) { 500 rq_flags |= REQ_NOWAIT; 501 blk_flags = BLK_MQ_REQ_NOWAIT; 502 } 503 if (issue_flags & IO_URING_F_IOPOLL) 504 rq_flags |= REQ_POLLED; 505 506 req = nvme_alloc_user_request(q, &c, rq_flags, blk_flags); 507 if (IS_ERR(req)) 508 return PTR_ERR(req); 509 req->timeout = d.timeout_ms ? msecs_to_jiffies(d.timeout_ms) : 0; 510 511 if (d.data_len) { 512 ret = nvme_map_user_request(req, d.addr, d.data_len, 513 nvme_to_user_ptr(d.metadata), d.metadata_len, 514 map_iter, vec ? NVME_IOCTL_VEC : 0); 515 if (ret) 516 goto out_free_req; 517 } 518 519 /* to free bio on completion, as req->bio will be null at that time */ 520 pdu->bio = req->bio; 521 pdu->req = req; 522 req->end_io_data = ioucmd; 523 req->end_io = nvme_uring_cmd_end_io; 524 blk_execute_rq_nowait(req, false); 525 return -EIOCBQUEUED; 526 527 out_free_req: 528 blk_mq_free_request(req); 529 return ret; 530 } 531 532 static bool is_ctrl_ioctl(unsigned int cmd) 533 { 534 if (cmd == NVME_IOCTL_ADMIN_CMD || cmd == NVME_IOCTL_ADMIN64_CMD) 535 return true; 536 if (is_sed_ioctl(cmd)) 537 return true; 538 return false; 539 } 540 541 static int nvme_ctrl_ioctl(struct nvme_ctrl *ctrl, unsigned int cmd, 542 void __user *argp, bool open_for_write) 543 { 544 switch (cmd) { 545 case NVME_IOCTL_ADMIN_CMD: 546 return nvme_user_cmd(ctrl, NULL, argp, 0, open_for_write); 547 case NVME_IOCTL_ADMIN64_CMD: 548 return nvme_user_cmd64(ctrl, NULL, argp, 0, open_for_write); 549 default: 550 return sed_ioctl(ctrl->opal_dev, cmd, argp); 551 } 552 } 553 554 #ifdef COMPAT_FOR_U64_ALIGNMENT 555 struct nvme_user_io32 { 556 __u8 opcode; 557 __u8 flags; 558 __u16 control; 559 __u16 nblocks; 560 __u16 rsvd; 561 __u64 metadata; 562 __u64 addr; 563 __u64 slba; 564 __u32 dsmgmt; 565 __u32 reftag; 566 __u16 apptag; 567 __u16 appmask; 568 } __attribute__((__packed__)); 569 #define NVME_IOCTL_SUBMIT_IO32 _IOW('N', 0x42, struct nvme_user_io32) 570 #endif /* COMPAT_FOR_U64_ALIGNMENT */ 571 572 static int nvme_ns_ioctl(struct nvme_ns *ns, unsigned int cmd, 573 void __user *argp, unsigned int flags, bool open_for_write) 574 { 575 switch (cmd) { 576 case NVME_IOCTL_ID: 577 force_successful_syscall_return(); 578 return ns->head->ns_id; 579 case NVME_IOCTL_IO_CMD: 580 return nvme_user_cmd(ns->ctrl, ns, argp, flags, open_for_write); 581 /* 582 * struct nvme_user_io can have different padding on some 32-bit ABIs. 583 * Just accept the compat version as all fields that are used are the 584 * same size and at the same offset. 585 */ 586 #ifdef COMPAT_FOR_U64_ALIGNMENT 587 case NVME_IOCTL_SUBMIT_IO32: 588 #endif 589 case NVME_IOCTL_SUBMIT_IO: 590 return nvme_submit_io(ns, argp); 591 case NVME_IOCTL_IO64_CMD_VEC: 592 flags |= NVME_IOCTL_VEC; 593 fallthrough; 594 case NVME_IOCTL_IO64_CMD: 595 return nvme_user_cmd64(ns->ctrl, ns, argp, flags, 596 open_for_write); 597 default: 598 return -ENOTTY; 599 } 600 } 601 602 int nvme_ioctl(struct block_device *bdev, blk_mode_t mode, 603 unsigned int cmd, unsigned long arg) 604 { 605 struct nvme_ns *ns = bdev->bd_disk->private_data; 606 bool open_for_write = mode & BLK_OPEN_WRITE; 607 void __user *argp = (void __user *)arg; 608 unsigned int flags = 0; 609 610 if (bdev_is_partition(bdev)) 611 flags |= NVME_IOCTL_PARTITION; 612 613 if (is_ctrl_ioctl(cmd)) 614 return nvme_ctrl_ioctl(ns->ctrl, cmd, argp, open_for_write); 615 return nvme_ns_ioctl(ns, cmd, argp, flags, open_for_write); 616 } 617 618 long nvme_ns_chr_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 619 { 620 struct nvme_ns *ns = 621 container_of(file_inode(file)->i_cdev, struct nvme_ns, cdev); 622 bool open_for_write = file->f_mode & FMODE_WRITE; 623 void __user *argp = (void __user *)arg; 624 625 if (is_ctrl_ioctl(cmd)) 626 return nvme_ctrl_ioctl(ns->ctrl, cmd, argp, open_for_write); 627 return nvme_ns_ioctl(ns, cmd, argp, 0, open_for_write); 628 } 629 630 static int nvme_uring_cmd_checks(unsigned int issue_flags) 631 { 632 633 /* NVMe passthrough requires big SQE/CQE support */ 634 if ((issue_flags & (IO_URING_F_SQE128|IO_URING_F_CQE32)) != 635 (IO_URING_F_SQE128|IO_URING_F_CQE32)) 636 return -EOPNOTSUPP; 637 return 0; 638 } 639 640 static int nvme_ns_uring_cmd(struct nvme_ns *ns, struct io_uring_cmd *ioucmd, 641 unsigned int issue_flags) 642 { 643 struct nvme_ctrl *ctrl = ns->ctrl; 644 int ret; 645 646 ret = nvme_uring_cmd_checks(issue_flags); 647 if (ret) 648 return ret; 649 650 switch (ioucmd->cmd_op) { 651 case NVME_URING_CMD_IO: 652 ret = nvme_uring_cmd_io(ctrl, ns, ioucmd, issue_flags, false); 653 break; 654 case NVME_URING_CMD_IO_VEC: 655 ret = nvme_uring_cmd_io(ctrl, ns, ioucmd, issue_flags, true); 656 break; 657 default: 658 ret = -ENOTTY; 659 } 660 661 return ret; 662 } 663 664 int nvme_ns_chr_uring_cmd(struct io_uring_cmd *ioucmd, unsigned int issue_flags) 665 { 666 struct nvme_ns *ns = container_of(file_inode(ioucmd->file)->i_cdev, 667 struct nvme_ns, cdev); 668 669 return nvme_ns_uring_cmd(ns, ioucmd, issue_flags); 670 } 671 672 int nvme_ns_chr_uring_cmd_iopoll(struct io_uring_cmd *ioucmd, 673 struct io_comp_batch *iob, 674 unsigned int poll_flags) 675 { 676 struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd); 677 struct request *req = pdu->req; 678 679 if (req && blk_rq_is_poll(req)) 680 return blk_rq_poll(req, iob, poll_flags); 681 return 0; 682 } 683 #ifdef CONFIG_NVME_MULTIPATH 684 static int nvme_ns_head_ctrl_ioctl(struct nvme_ns *ns, unsigned int cmd, 685 void __user *argp, struct nvme_ns_head *head, int srcu_idx, 686 bool open_for_write) 687 __releases(&head->srcu) 688 { 689 struct nvme_ctrl *ctrl = ns->ctrl; 690 int ret; 691 692 nvme_get_ctrl(ns->ctrl); 693 srcu_read_unlock(&head->srcu, srcu_idx); 694 ret = nvme_ctrl_ioctl(ns->ctrl, cmd, argp, open_for_write); 695 696 nvme_put_ctrl(ctrl); 697 return ret; 698 } 699 700 int nvme_ns_head_ioctl(struct block_device *bdev, blk_mode_t mode, 701 unsigned int cmd, unsigned long arg) 702 { 703 struct nvme_ns_head *head = bdev->bd_disk->private_data; 704 bool open_for_write = mode & BLK_OPEN_WRITE; 705 void __user *argp = (void __user *)arg; 706 struct nvme_ns *ns; 707 int srcu_idx, ret = -EWOULDBLOCK; 708 unsigned int flags = 0; 709 710 if (bdev_is_partition(bdev)) 711 flags |= NVME_IOCTL_PARTITION; 712 713 srcu_idx = srcu_read_lock(&head->srcu); 714 ns = nvme_find_path(head); 715 if (!ns) 716 goto out_unlock; 717 718 /* 719 * Handle ioctls that apply to the controller instead of the namespace 720 * separately and drop the ns SRCU reference early. This avoids a 721 * deadlock when deleting namespaces using the passthrough interface. 722 */ 723 if (is_ctrl_ioctl(cmd)) 724 return nvme_ns_head_ctrl_ioctl(ns, cmd, argp, head, srcu_idx, 725 open_for_write); 726 727 ret = nvme_ns_ioctl(ns, cmd, argp, flags, open_for_write); 728 out_unlock: 729 srcu_read_unlock(&head->srcu, srcu_idx); 730 return ret; 731 } 732 733 long nvme_ns_head_chr_ioctl(struct file *file, unsigned int cmd, 734 unsigned long arg) 735 { 736 bool open_for_write = file->f_mode & FMODE_WRITE; 737 struct cdev *cdev = file_inode(file)->i_cdev; 738 struct nvme_ns_head *head = 739 container_of(cdev, struct nvme_ns_head, cdev); 740 void __user *argp = (void __user *)arg; 741 struct nvme_ns *ns; 742 int srcu_idx, ret = -EWOULDBLOCK; 743 744 srcu_idx = srcu_read_lock(&head->srcu); 745 ns = nvme_find_path(head); 746 if (!ns) 747 goto out_unlock; 748 749 if (is_ctrl_ioctl(cmd)) 750 return nvme_ns_head_ctrl_ioctl(ns, cmd, argp, head, srcu_idx, 751 open_for_write); 752 753 ret = nvme_ns_ioctl(ns, cmd, argp, 0, open_for_write); 754 out_unlock: 755 srcu_read_unlock(&head->srcu, srcu_idx); 756 return ret; 757 } 758 759 int nvme_ns_head_chr_uring_cmd(struct io_uring_cmd *ioucmd, 760 unsigned int issue_flags) 761 { 762 struct cdev *cdev = file_inode(ioucmd->file)->i_cdev; 763 struct nvme_ns_head *head = container_of(cdev, struct nvme_ns_head, cdev); 764 int srcu_idx = srcu_read_lock(&head->srcu); 765 struct nvme_ns *ns = nvme_find_path(head); 766 int ret = -EINVAL; 767 768 if (ns) 769 ret = nvme_ns_uring_cmd(ns, ioucmd, issue_flags); 770 srcu_read_unlock(&head->srcu, srcu_idx); 771 return ret; 772 } 773 #endif /* CONFIG_NVME_MULTIPATH */ 774 775 int nvme_dev_uring_cmd(struct io_uring_cmd *ioucmd, unsigned int issue_flags) 776 { 777 struct nvme_ctrl *ctrl = ioucmd->file->private_data; 778 int ret; 779 780 /* IOPOLL not supported yet */ 781 if (issue_flags & IO_URING_F_IOPOLL) 782 return -EOPNOTSUPP; 783 784 ret = nvme_uring_cmd_checks(issue_flags); 785 if (ret) 786 return ret; 787 788 switch (ioucmd->cmd_op) { 789 case NVME_URING_CMD_ADMIN: 790 ret = nvme_uring_cmd_io(ctrl, NULL, ioucmd, issue_flags, false); 791 break; 792 case NVME_URING_CMD_ADMIN_VEC: 793 ret = nvme_uring_cmd_io(ctrl, NULL, ioucmd, issue_flags, true); 794 break; 795 default: 796 ret = -ENOTTY; 797 } 798 799 return ret; 800 } 801 802 static int nvme_dev_user_cmd(struct nvme_ctrl *ctrl, void __user *argp, 803 bool open_for_write) 804 { 805 struct nvme_ns *ns; 806 int ret, srcu_idx; 807 808 srcu_idx = srcu_read_lock(&ctrl->srcu); 809 if (list_empty(&ctrl->namespaces)) { 810 ret = -ENOTTY; 811 goto out_unlock; 812 } 813 814 ns = list_first_or_null_rcu(&ctrl->namespaces, struct nvme_ns, list); 815 if (ns != list_last_entry(&ctrl->namespaces, struct nvme_ns, list)) { 816 dev_warn(ctrl->device, 817 "NVME_IOCTL_IO_CMD not supported when multiple namespaces present!\n"); 818 ret = -EINVAL; 819 goto out_unlock; 820 } 821 822 dev_warn(ctrl->device, 823 "using deprecated NVME_IOCTL_IO_CMD ioctl on the char device!\n"); 824 if (!nvme_get_ns(ns)) { 825 ret = -ENXIO; 826 goto out_unlock; 827 } 828 srcu_read_unlock(&ctrl->srcu, srcu_idx); 829 830 ret = nvme_user_cmd(ctrl, ns, argp, 0, open_for_write); 831 nvme_put_ns(ns); 832 return ret; 833 834 out_unlock: 835 srcu_read_unlock(&ctrl->srcu, srcu_idx); 836 return ret; 837 } 838 839 long nvme_dev_ioctl(struct file *file, unsigned int cmd, 840 unsigned long arg) 841 { 842 bool open_for_write = file->f_mode & FMODE_WRITE; 843 struct nvme_ctrl *ctrl = file->private_data; 844 void __user *argp = (void __user *)arg; 845 846 switch (cmd) { 847 case NVME_IOCTL_ADMIN_CMD: 848 return nvme_user_cmd(ctrl, NULL, argp, 0, open_for_write); 849 case NVME_IOCTL_ADMIN64_CMD: 850 return nvme_user_cmd64(ctrl, NULL, argp, 0, open_for_write); 851 case NVME_IOCTL_IO_CMD: 852 return nvme_dev_user_cmd(ctrl, argp, open_for_write); 853 case NVME_IOCTL_RESET: 854 if (!capable(CAP_SYS_ADMIN)) 855 return -EACCES; 856 dev_warn(ctrl->device, "resetting controller\n"); 857 return nvme_reset_ctrl_sync(ctrl); 858 case NVME_IOCTL_SUBSYS_RESET: 859 if (!capable(CAP_SYS_ADMIN)) 860 return -EACCES; 861 return nvme_reset_subsystem(ctrl); 862 case NVME_IOCTL_RESCAN: 863 if (!capable(CAP_SYS_ADMIN)) 864 return -EACCES; 865 nvme_queue_scan(ctrl); 866 return 0; 867 default: 868 return -ENOTTY; 869 } 870 } 871