1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2011-2014, Intel Corporation. 4 * Copyright (c) 2017-2021 Christoph Hellwig. 5 */ 6 #include <linux/ptrace.h> /* for force_successful_syscall_return */ 7 #include <linux/nvme_ioctl.h> 8 #include "nvme.h" 9 10 /* 11 * Convert integer values from ioctl structures to user pointers, silently 12 * ignoring the upper bits in the compat case to match behaviour of 32-bit 13 * kernels. 14 */ 15 static void __user *nvme_to_user_ptr(uintptr_t ptrval) 16 { 17 if (in_compat_syscall()) 18 ptrval = (compat_uptr_t)ptrval; 19 return (void __user *)ptrval; 20 } 21 22 static void *nvme_add_user_metadata(struct bio *bio, void __user *ubuf, 23 unsigned len, u32 seed, bool write) 24 { 25 struct bio_integrity_payload *bip; 26 int ret = -ENOMEM; 27 void *buf; 28 29 buf = kmalloc(len, GFP_KERNEL); 30 if (!buf) 31 goto out; 32 33 ret = -EFAULT; 34 if (write && copy_from_user(buf, ubuf, len)) 35 goto out_free_meta; 36 37 bip = bio_integrity_alloc(bio, GFP_KERNEL, 1); 38 if (IS_ERR(bip)) { 39 ret = PTR_ERR(bip); 40 goto out_free_meta; 41 } 42 43 bip->bip_iter.bi_size = len; 44 bip->bip_iter.bi_sector = seed; 45 ret = bio_integrity_add_page(bio, virt_to_page(buf), len, 46 offset_in_page(buf)); 47 if (ret == len) 48 return buf; 49 ret = -ENOMEM; 50 out_free_meta: 51 kfree(buf); 52 out: 53 return ERR_PTR(ret); 54 } 55 56 static int nvme_submit_user_cmd(struct request_queue *q, 57 struct nvme_command *cmd, void __user *ubuffer, 58 unsigned bufflen, void __user *meta_buffer, unsigned meta_len, 59 u32 meta_seed, u64 *result, unsigned timeout, bool vec) 60 { 61 bool write = nvme_is_write(cmd); 62 struct nvme_ns *ns = q->queuedata; 63 struct block_device *bdev = ns ? ns->disk->part0 : NULL; 64 struct request *req; 65 struct bio *bio = NULL; 66 void *meta = NULL; 67 int ret; 68 69 req = nvme_alloc_request(q, cmd, 0); 70 if (IS_ERR(req)) 71 return PTR_ERR(req); 72 73 if (timeout) 74 req->timeout = timeout; 75 nvme_req(req)->flags |= NVME_REQ_USERCMD; 76 77 if (ubuffer && bufflen) { 78 if (!vec) 79 ret = blk_rq_map_user(q, req, NULL, ubuffer, bufflen, 80 GFP_KERNEL); 81 else { 82 struct iovec fast_iov[UIO_FASTIOV]; 83 struct iovec *iov = fast_iov; 84 struct iov_iter iter; 85 86 ret = import_iovec(rq_data_dir(req), ubuffer, bufflen, 87 UIO_FASTIOV, &iov, &iter); 88 if (ret < 0) 89 goto out; 90 ret = blk_rq_map_user_iov(q, req, NULL, &iter, 91 GFP_KERNEL); 92 kfree(iov); 93 } 94 if (ret) 95 goto out; 96 bio = req->bio; 97 if (bdev) 98 bio_set_dev(bio, bdev); 99 if (bdev && meta_buffer && meta_len) { 100 meta = nvme_add_user_metadata(bio, meta_buffer, meta_len, 101 meta_seed, write); 102 if (IS_ERR(meta)) { 103 ret = PTR_ERR(meta); 104 goto out_unmap; 105 } 106 req->cmd_flags |= REQ_INTEGRITY; 107 } 108 } 109 110 ret = nvme_execute_passthru_rq(req); 111 if (result) 112 *result = le64_to_cpu(nvme_req(req)->result.u64); 113 if (meta && !ret && !write) { 114 if (copy_to_user(meta_buffer, meta, meta_len)) 115 ret = -EFAULT; 116 } 117 kfree(meta); 118 out_unmap: 119 if (bio) 120 blk_rq_unmap_user(bio); 121 out: 122 blk_mq_free_request(req); 123 return ret; 124 } 125 126 127 static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio) 128 { 129 struct nvme_user_io io; 130 struct nvme_command c; 131 unsigned length, meta_len; 132 void __user *metadata; 133 134 if (copy_from_user(&io, uio, sizeof(io))) 135 return -EFAULT; 136 if (io.flags) 137 return -EINVAL; 138 139 switch (io.opcode) { 140 case nvme_cmd_write: 141 case nvme_cmd_read: 142 case nvme_cmd_compare: 143 break; 144 default: 145 return -EINVAL; 146 } 147 148 length = (io.nblocks + 1) << ns->lba_shift; 149 150 if ((io.control & NVME_RW_PRINFO_PRACT) && 151 ns->ms == sizeof(struct t10_pi_tuple)) { 152 /* 153 * Protection information is stripped/inserted by the 154 * controller. 155 */ 156 if (nvme_to_user_ptr(io.metadata)) 157 return -EINVAL; 158 meta_len = 0; 159 metadata = NULL; 160 } else { 161 meta_len = (io.nblocks + 1) * ns->ms; 162 metadata = nvme_to_user_ptr(io.metadata); 163 } 164 165 if (ns->features & NVME_NS_EXT_LBAS) { 166 length += meta_len; 167 meta_len = 0; 168 } else if (meta_len) { 169 if ((io.metadata & 3) || !io.metadata) 170 return -EINVAL; 171 } 172 173 memset(&c, 0, sizeof(c)); 174 c.rw.opcode = io.opcode; 175 c.rw.flags = io.flags; 176 c.rw.nsid = cpu_to_le32(ns->head->ns_id); 177 c.rw.slba = cpu_to_le64(io.slba); 178 c.rw.length = cpu_to_le16(io.nblocks); 179 c.rw.control = cpu_to_le16(io.control); 180 c.rw.dsmgmt = cpu_to_le32(io.dsmgmt); 181 c.rw.reftag = cpu_to_le32(io.reftag); 182 c.rw.apptag = cpu_to_le16(io.apptag); 183 c.rw.appmask = cpu_to_le16(io.appmask); 184 185 return nvme_submit_user_cmd(ns->queue, &c, 186 nvme_to_user_ptr(io.addr), length, 187 metadata, meta_len, lower_32_bits(io.slba), NULL, 0, 188 false); 189 } 190 191 static bool nvme_validate_passthru_nsid(struct nvme_ctrl *ctrl, 192 struct nvme_ns *ns, __u32 nsid) 193 { 194 if (ns && nsid != ns->head->ns_id) { 195 dev_err(ctrl->device, 196 "%s: nsid (%u) in cmd does not match nsid (%u)" 197 "of namespace\n", 198 current->comm, nsid, ns->head->ns_id); 199 return false; 200 } 201 202 return true; 203 } 204 205 static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns, 206 struct nvme_passthru_cmd __user *ucmd) 207 { 208 struct nvme_passthru_cmd cmd; 209 struct nvme_command c; 210 unsigned timeout = 0; 211 u64 result; 212 int status; 213 214 if (!capable(CAP_SYS_ADMIN)) 215 return -EACCES; 216 if (copy_from_user(&cmd, ucmd, sizeof(cmd))) 217 return -EFAULT; 218 if (cmd.flags) 219 return -EINVAL; 220 if (!nvme_validate_passthru_nsid(ctrl, ns, cmd.nsid)) 221 return -EINVAL; 222 223 memset(&c, 0, sizeof(c)); 224 c.common.opcode = cmd.opcode; 225 c.common.flags = cmd.flags; 226 c.common.nsid = cpu_to_le32(cmd.nsid); 227 c.common.cdw2[0] = cpu_to_le32(cmd.cdw2); 228 c.common.cdw2[1] = cpu_to_le32(cmd.cdw3); 229 c.common.cdw10 = cpu_to_le32(cmd.cdw10); 230 c.common.cdw11 = cpu_to_le32(cmd.cdw11); 231 c.common.cdw12 = cpu_to_le32(cmd.cdw12); 232 c.common.cdw13 = cpu_to_le32(cmd.cdw13); 233 c.common.cdw14 = cpu_to_le32(cmd.cdw14); 234 c.common.cdw15 = cpu_to_le32(cmd.cdw15); 235 236 if (cmd.timeout_ms) 237 timeout = msecs_to_jiffies(cmd.timeout_ms); 238 239 status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c, 240 nvme_to_user_ptr(cmd.addr), cmd.data_len, 241 nvme_to_user_ptr(cmd.metadata), cmd.metadata_len, 242 0, &result, timeout, false); 243 244 if (status >= 0) { 245 if (put_user(result, &ucmd->result)) 246 return -EFAULT; 247 } 248 249 return status; 250 } 251 252 static int nvme_user_cmd64(struct nvme_ctrl *ctrl, struct nvme_ns *ns, 253 struct nvme_passthru_cmd64 __user *ucmd, bool vec) 254 { 255 struct nvme_passthru_cmd64 cmd; 256 struct nvme_command c; 257 unsigned timeout = 0; 258 int status; 259 260 if (!capable(CAP_SYS_ADMIN)) 261 return -EACCES; 262 if (copy_from_user(&cmd, ucmd, sizeof(cmd))) 263 return -EFAULT; 264 if (cmd.flags) 265 return -EINVAL; 266 if (!nvme_validate_passthru_nsid(ctrl, ns, cmd.nsid)) 267 return -EINVAL; 268 269 memset(&c, 0, sizeof(c)); 270 c.common.opcode = cmd.opcode; 271 c.common.flags = cmd.flags; 272 c.common.nsid = cpu_to_le32(cmd.nsid); 273 c.common.cdw2[0] = cpu_to_le32(cmd.cdw2); 274 c.common.cdw2[1] = cpu_to_le32(cmd.cdw3); 275 c.common.cdw10 = cpu_to_le32(cmd.cdw10); 276 c.common.cdw11 = cpu_to_le32(cmd.cdw11); 277 c.common.cdw12 = cpu_to_le32(cmd.cdw12); 278 c.common.cdw13 = cpu_to_le32(cmd.cdw13); 279 c.common.cdw14 = cpu_to_le32(cmd.cdw14); 280 c.common.cdw15 = cpu_to_le32(cmd.cdw15); 281 282 if (cmd.timeout_ms) 283 timeout = msecs_to_jiffies(cmd.timeout_ms); 284 285 status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c, 286 nvme_to_user_ptr(cmd.addr), cmd.data_len, 287 nvme_to_user_ptr(cmd.metadata), cmd.metadata_len, 288 0, &cmd.result, timeout, vec); 289 290 if (status >= 0) { 291 if (put_user(cmd.result, &ucmd->result)) 292 return -EFAULT; 293 } 294 295 return status; 296 } 297 298 static bool is_ctrl_ioctl(unsigned int cmd) 299 { 300 if (cmd == NVME_IOCTL_ADMIN_CMD || cmd == NVME_IOCTL_ADMIN64_CMD) 301 return true; 302 if (is_sed_ioctl(cmd)) 303 return true; 304 return false; 305 } 306 307 static int nvme_ctrl_ioctl(struct nvme_ctrl *ctrl, unsigned int cmd, 308 void __user *argp) 309 { 310 switch (cmd) { 311 case NVME_IOCTL_ADMIN_CMD: 312 return nvme_user_cmd(ctrl, NULL, argp); 313 case NVME_IOCTL_ADMIN64_CMD: 314 return nvme_user_cmd64(ctrl, NULL, argp, false); 315 default: 316 return sed_ioctl(ctrl->opal_dev, cmd, argp); 317 } 318 } 319 320 #ifdef COMPAT_FOR_U64_ALIGNMENT 321 struct nvme_user_io32 { 322 __u8 opcode; 323 __u8 flags; 324 __u16 control; 325 __u16 nblocks; 326 __u16 rsvd; 327 __u64 metadata; 328 __u64 addr; 329 __u64 slba; 330 __u32 dsmgmt; 331 __u32 reftag; 332 __u16 apptag; 333 __u16 appmask; 334 } __attribute__((__packed__)); 335 #define NVME_IOCTL_SUBMIT_IO32 _IOW('N', 0x42, struct nvme_user_io32) 336 #endif /* COMPAT_FOR_U64_ALIGNMENT */ 337 338 static int nvme_ns_ioctl(struct nvme_ns *ns, unsigned int cmd, 339 void __user *argp) 340 { 341 switch (cmd) { 342 case NVME_IOCTL_ID: 343 force_successful_syscall_return(); 344 return ns->head->ns_id; 345 case NVME_IOCTL_IO_CMD: 346 return nvme_user_cmd(ns->ctrl, ns, argp); 347 /* 348 * struct nvme_user_io can have different padding on some 32-bit ABIs. 349 * Just accept the compat version as all fields that are used are the 350 * same size and at the same offset. 351 */ 352 #ifdef COMPAT_FOR_U64_ALIGNMENT 353 case NVME_IOCTL_SUBMIT_IO32: 354 #endif 355 case NVME_IOCTL_SUBMIT_IO: 356 return nvme_submit_io(ns, argp); 357 case NVME_IOCTL_IO64_CMD: 358 return nvme_user_cmd64(ns->ctrl, ns, argp, false); 359 case NVME_IOCTL_IO64_CMD_VEC: 360 return nvme_user_cmd64(ns->ctrl, ns, argp, true); 361 default: 362 return -ENOTTY; 363 } 364 } 365 366 static int __nvme_ioctl(struct nvme_ns *ns, unsigned int cmd, void __user *arg) 367 { 368 if (is_ctrl_ioctl(cmd)) 369 return nvme_ctrl_ioctl(ns->ctrl, cmd, arg); 370 return nvme_ns_ioctl(ns, cmd, arg); 371 } 372 373 int nvme_ioctl(struct block_device *bdev, fmode_t mode, 374 unsigned int cmd, unsigned long arg) 375 { 376 struct nvme_ns *ns = bdev->bd_disk->private_data; 377 378 return __nvme_ioctl(ns, cmd, (void __user *)arg); 379 } 380 381 long nvme_ns_chr_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 382 { 383 struct nvme_ns *ns = 384 container_of(file_inode(file)->i_cdev, struct nvme_ns, cdev); 385 386 return __nvme_ioctl(ns, cmd, (void __user *)arg); 387 } 388 389 #ifdef CONFIG_NVME_MULTIPATH 390 static int nvme_ns_head_ctrl_ioctl(struct nvme_ns *ns, unsigned int cmd, 391 void __user *argp, struct nvme_ns_head *head, int srcu_idx) 392 __releases(&head->srcu) 393 { 394 struct nvme_ctrl *ctrl = ns->ctrl; 395 int ret; 396 397 nvme_get_ctrl(ns->ctrl); 398 srcu_read_unlock(&head->srcu, srcu_idx); 399 ret = nvme_ctrl_ioctl(ns->ctrl, cmd, argp); 400 401 nvme_put_ctrl(ctrl); 402 return ret; 403 } 404 405 int nvme_ns_head_ioctl(struct block_device *bdev, fmode_t mode, 406 unsigned int cmd, unsigned long arg) 407 { 408 struct nvme_ns_head *head = bdev->bd_disk->private_data; 409 void __user *argp = (void __user *)arg; 410 struct nvme_ns *ns; 411 int srcu_idx, ret = -EWOULDBLOCK; 412 413 srcu_idx = srcu_read_lock(&head->srcu); 414 ns = nvme_find_path(head); 415 if (!ns) 416 goto out_unlock; 417 418 /* 419 * Handle ioctls that apply to the controller instead of the namespace 420 * seperately and drop the ns SRCU reference early. This avoids a 421 * deadlock when deleting namespaces using the passthrough interface. 422 */ 423 if (is_ctrl_ioctl(cmd)) 424 return nvme_ns_head_ctrl_ioctl(ns, cmd, argp, head, srcu_idx); 425 426 ret = nvme_ns_ioctl(ns, cmd, argp); 427 out_unlock: 428 srcu_read_unlock(&head->srcu, srcu_idx); 429 return ret; 430 } 431 432 long nvme_ns_head_chr_ioctl(struct file *file, unsigned int cmd, 433 unsigned long arg) 434 { 435 struct cdev *cdev = file_inode(file)->i_cdev; 436 struct nvme_ns_head *head = 437 container_of(cdev, struct nvme_ns_head, cdev); 438 void __user *argp = (void __user *)arg; 439 struct nvme_ns *ns; 440 int srcu_idx, ret = -EWOULDBLOCK; 441 442 srcu_idx = srcu_read_lock(&head->srcu); 443 ns = nvme_find_path(head); 444 if (!ns) 445 goto out_unlock; 446 447 if (is_ctrl_ioctl(cmd)) 448 return nvme_ns_head_ctrl_ioctl(ns, cmd, argp, head, srcu_idx); 449 450 ret = nvme_ns_ioctl(ns, cmd, argp); 451 out_unlock: 452 srcu_read_unlock(&head->srcu, srcu_idx); 453 return ret; 454 } 455 #endif /* CONFIG_NVME_MULTIPATH */ 456 457 static int nvme_dev_user_cmd(struct nvme_ctrl *ctrl, void __user *argp) 458 { 459 struct nvme_ns *ns; 460 int ret; 461 462 down_read(&ctrl->namespaces_rwsem); 463 if (list_empty(&ctrl->namespaces)) { 464 ret = -ENOTTY; 465 goto out_unlock; 466 } 467 468 ns = list_first_entry(&ctrl->namespaces, struct nvme_ns, list); 469 if (ns != list_last_entry(&ctrl->namespaces, struct nvme_ns, list)) { 470 dev_warn(ctrl->device, 471 "NVME_IOCTL_IO_CMD not supported when multiple namespaces present!\n"); 472 ret = -EINVAL; 473 goto out_unlock; 474 } 475 476 dev_warn(ctrl->device, 477 "using deprecated NVME_IOCTL_IO_CMD ioctl on the char device!\n"); 478 kref_get(&ns->kref); 479 up_read(&ctrl->namespaces_rwsem); 480 481 ret = nvme_user_cmd(ctrl, ns, argp); 482 nvme_put_ns(ns); 483 return ret; 484 485 out_unlock: 486 up_read(&ctrl->namespaces_rwsem); 487 return ret; 488 } 489 490 long nvme_dev_ioctl(struct file *file, unsigned int cmd, 491 unsigned long arg) 492 { 493 struct nvme_ctrl *ctrl = file->private_data; 494 void __user *argp = (void __user *)arg; 495 496 switch (cmd) { 497 case NVME_IOCTL_ADMIN_CMD: 498 return nvme_user_cmd(ctrl, NULL, argp); 499 case NVME_IOCTL_ADMIN64_CMD: 500 return nvme_user_cmd64(ctrl, NULL, argp, false); 501 case NVME_IOCTL_IO_CMD: 502 return nvme_dev_user_cmd(ctrl, argp); 503 case NVME_IOCTL_RESET: 504 dev_warn(ctrl->device, "resetting controller\n"); 505 return nvme_reset_ctrl_sync(ctrl); 506 case NVME_IOCTL_SUBSYS_RESET: 507 return nvme_reset_subsystem(ctrl); 508 case NVME_IOCTL_RESCAN: 509 nvme_queue_scan(ctrl); 510 return 0; 511 default: 512 return -ENOTTY; 513 } 514 } 515