1 /* 2 * NVM Express device driver 3 * Copyright (c) 2011-2014, Intel Corporation. 4 * 5 * This program is free software; you can redistribute it and/or modify it 6 * under the terms and conditions of the GNU General Public License, 7 * version 2, as published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * more details. 13 */ 14 15 #include <linux/blkdev.h> 16 #include <linux/blk-mq.h> 17 #include <linux/delay.h> 18 #include <linux/errno.h> 19 #include <linux/hdreg.h> 20 #include <linux/kernel.h> 21 #include <linux/module.h> 22 #include <linux/list_sort.h> 23 #include <linux/slab.h> 24 #include <linux/types.h> 25 #include <linux/pr.h> 26 #include <linux/ptrace.h> 27 #include <linux/nvme_ioctl.h> 28 #include <linux/t10-pi.h> 29 #include <scsi/sg.h> 30 #include <asm/unaligned.h> 31 32 #include "nvme.h" 33 34 #define NVME_MINORS (1U << MINORBITS) 35 36 unsigned char admin_timeout = 60; 37 module_param(admin_timeout, byte, 0644); 38 MODULE_PARM_DESC(admin_timeout, "timeout in seconds for admin commands"); 39 EXPORT_SYMBOL_GPL(admin_timeout); 40 41 unsigned char nvme_io_timeout = 30; 42 module_param_named(io_timeout, nvme_io_timeout, byte, 0644); 43 MODULE_PARM_DESC(io_timeout, "timeout in seconds for I/O"); 44 EXPORT_SYMBOL_GPL(nvme_io_timeout); 45 46 unsigned char shutdown_timeout = 5; 47 module_param(shutdown_timeout, byte, 0644); 48 MODULE_PARM_DESC(shutdown_timeout, "timeout in seconds for controller shutdown"); 49 50 static int nvme_major; 51 module_param(nvme_major, int, 0); 52 53 static int nvme_char_major; 54 module_param(nvme_char_major, int, 0); 55 56 static LIST_HEAD(nvme_ctrl_list); 57 static DEFINE_SPINLOCK(dev_list_lock); 58 59 static struct class *nvme_class; 60 61 bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl, 62 enum nvme_ctrl_state new_state) 63 { 64 enum nvme_ctrl_state old_state = ctrl->state; 65 bool changed = false; 66 67 spin_lock_irq(&ctrl->lock); 68 switch (new_state) { 69 case NVME_CTRL_LIVE: 70 switch (old_state) { 71 case NVME_CTRL_RESETTING: 72 changed = true; 73 /* FALLTHRU */ 74 default: 75 break; 76 } 77 break; 78 case NVME_CTRL_RESETTING: 79 switch (old_state) { 80 case NVME_CTRL_NEW: 81 case NVME_CTRL_LIVE: 82 changed = true; 83 /* FALLTHRU */ 84 default: 85 break; 86 } 87 break; 88 case NVME_CTRL_DELETING: 89 switch (old_state) { 90 case NVME_CTRL_LIVE: 91 case NVME_CTRL_RESETTING: 92 changed = true; 93 /* FALLTHRU */ 94 default: 95 break; 96 } 97 break; 98 case NVME_CTRL_DEAD: 99 switch (old_state) { 100 case NVME_CTRL_DELETING: 101 changed = true; 102 /* FALLTHRU */ 103 default: 104 break; 105 } 106 break; 107 default: 108 break; 109 } 110 spin_unlock_irq(&ctrl->lock); 111 112 if (changed) 113 ctrl->state = new_state; 114 115 return changed; 116 } 117 EXPORT_SYMBOL_GPL(nvme_change_ctrl_state); 118 119 static void nvme_free_ns(struct kref *kref) 120 { 121 struct nvme_ns *ns = container_of(kref, struct nvme_ns, kref); 122 123 if (ns->type == NVME_NS_LIGHTNVM) 124 nvme_nvm_unregister(ns->queue, ns->disk->disk_name); 125 126 spin_lock(&dev_list_lock); 127 ns->disk->private_data = NULL; 128 spin_unlock(&dev_list_lock); 129 130 put_disk(ns->disk); 131 ida_simple_remove(&ns->ctrl->ns_ida, ns->instance); 132 nvme_put_ctrl(ns->ctrl); 133 kfree(ns); 134 } 135 136 static void nvme_put_ns(struct nvme_ns *ns) 137 { 138 kref_put(&ns->kref, nvme_free_ns); 139 } 140 141 static struct nvme_ns *nvme_get_ns_from_disk(struct gendisk *disk) 142 { 143 struct nvme_ns *ns; 144 145 spin_lock(&dev_list_lock); 146 ns = disk->private_data; 147 if (ns) { 148 if (!kref_get_unless_zero(&ns->kref)) 149 goto fail; 150 if (!try_module_get(ns->ctrl->ops->module)) 151 goto fail_put_ns; 152 } 153 spin_unlock(&dev_list_lock); 154 155 return ns; 156 157 fail_put_ns: 158 kref_put(&ns->kref, nvme_free_ns); 159 fail: 160 spin_unlock(&dev_list_lock); 161 return NULL; 162 } 163 164 void nvme_requeue_req(struct request *req) 165 { 166 unsigned long flags; 167 168 blk_mq_requeue_request(req); 169 spin_lock_irqsave(req->q->queue_lock, flags); 170 if (!blk_queue_stopped(req->q)) 171 blk_mq_kick_requeue_list(req->q); 172 spin_unlock_irqrestore(req->q->queue_lock, flags); 173 } 174 EXPORT_SYMBOL_GPL(nvme_requeue_req); 175 176 struct request *nvme_alloc_request(struct request_queue *q, 177 struct nvme_command *cmd, unsigned int flags) 178 { 179 bool write = cmd->common.opcode & 1; 180 struct request *req; 181 182 req = blk_mq_alloc_request(q, write, flags); 183 if (IS_ERR(req)) 184 return req; 185 186 req->cmd_type = REQ_TYPE_DRV_PRIV; 187 req->cmd_flags |= REQ_FAILFAST_DRIVER; 188 req->__data_len = 0; 189 req->__sector = (sector_t) -1; 190 req->bio = req->biotail = NULL; 191 192 req->cmd = (unsigned char *)cmd; 193 req->cmd_len = sizeof(struct nvme_command); 194 195 return req; 196 } 197 EXPORT_SYMBOL_GPL(nvme_alloc_request); 198 199 static inline void nvme_setup_flush(struct nvme_ns *ns, 200 struct nvme_command *cmnd) 201 { 202 memset(cmnd, 0, sizeof(*cmnd)); 203 cmnd->common.opcode = nvme_cmd_flush; 204 cmnd->common.nsid = cpu_to_le32(ns->ns_id); 205 } 206 207 static inline int nvme_setup_discard(struct nvme_ns *ns, struct request *req, 208 struct nvme_command *cmnd) 209 { 210 struct nvme_dsm_range *range; 211 struct page *page; 212 int offset; 213 unsigned int nr_bytes = blk_rq_bytes(req); 214 215 range = kmalloc(sizeof(*range), GFP_ATOMIC); 216 if (!range) 217 return BLK_MQ_RQ_QUEUE_BUSY; 218 219 range->cattr = cpu_to_le32(0); 220 range->nlb = cpu_to_le32(nr_bytes >> ns->lba_shift); 221 range->slba = cpu_to_le64(nvme_block_nr(ns, blk_rq_pos(req))); 222 223 memset(cmnd, 0, sizeof(*cmnd)); 224 cmnd->dsm.opcode = nvme_cmd_dsm; 225 cmnd->dsm.nsid = cpu_to_le32(ns->ns_id); 226 cmnd->dsm.nr = 0; 227 cmnd->dsm.attributes = cpu_to_le32(NVME_DSMGMT_AD); 228 229 req->completion_data = range; 230 page = virt_to_page(range); 231 offset = offset_in_page(range); 232 blk_add_request_payload(req, page, offset, sizeof(*range)); 233 234 /* 235 * we set __data_len back to the size of the area to be discarded 236 * on disk. This allows us to report completion on the full amount 237 * of blocks described by the request. 238 */ 239 req->__data_len = nr_bytes; 240 241 return 0; 242 } 243 244 static inline void nvme_setup_rw(struct nvme_ns *ns, struct request *req, 245 struct nvme_command *cmnd) 246 { 247 u16 control = 0; 248 u32 dsmgmt = 0; 249 250 if (req->cmd_flags & REQ_FUA) 251 control |= NVME_RW_FUA; 252 if (req->cmd_flags & (REQ_FAILFAST_DEV | REQ_RAHEAD)) 253 control |= NVME_RW_LR; 254 255 if (req->cmd_flags & REQ_RAHEAD) 256 dsmgmt |= NVME_RW_DSM_FREQ_PREFETCH; 257 258 memset(cmnd, 0, sizeof(*cmnd)); 259 cmnd->rw.opcode = (rq_data_dir(req) ? nvme_cmd_write : nvme_cmd_read); 260 cmnd->rw.command_id = req->tag; 261 cmnd->rw.nsid = cpu_to_le32(ns->ns_id); 262 cmnd->rw.slba = cpu_to_le64(nvme_block_nr(ns, blk_rq_pos(req))); 263 cmnd->rw.length = cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1); 264 265 if (ns->ms) { 266 switch (ns->pi_type) { 267 case NVME_NS_DPS_PI_TYPE3: 268 control |= NVME_RW_PRINFO_PRCHK_GUARD; 269 break; 270 case NVME_NS_DPS_PI_TYPE1: 271 case NVME_NS_DPS_PI_TYPE2: 272 control |= NVME_RW_PRINFO_PRCHK_GUARD | 273 NVME_RW_PRINFO_PRCHK_REF; 274 cmnd->rw.reftag = cpu_to_le32( 275 nvme_block_nr(ns, blk_rq_pos(req))); 276 break; 277 } 278 if (!blk_integrity_rq(req)) 279 control |= NVME_RW_PRINFO_PRACT; 280 } 281 282 cmnd->rw.control = cpu_to_le16(control); 283 cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt); 284 } 285 286 int nvme_setup_cmd(struct nvme_ns *ns, struct request *req, 287 struct nvme_command *cmd) 288 { 289 int ret = 0; 290 291 if (req->cmd_type == REQ_TYPE_DRV_PRIV) 292 memcpy(cmd, req->cmd, sizeof(*cmd)); 293 else if (req->cmd_flags & REQ_FLUSH) 294 nvme_setup_flush(ns, cmd); 295 else if (req->cmd_flags & REQ_DISCARD) 296 ret = nvme_setup_discard(ns, req, cmd); 297 else 298 nvme_setup_rw(ns, req, cmd); 299 300 return ret; 301 } 302 EXPORT_SYMBOL_GPL(nvme_setup_cmd); 303 304 /* 305 * Returns 0 on success. If the result is negative, it's a Linux error code; 306 * if the result is positive, it's an NVM Express status code 307 */ 308 int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, 309 struct nvme_completion *cqe, void *buffer, unsigned bufflen, 310 unsigned timeout) 311 { 312 struct request *req; 313 int ret; 314 315 req = nvme_alloc_request(q, cmd, 0); 316 if (IS_ERR(req)) 317 return PTR_ERR(req); 318 319 req->timeout = timeout ? timeout : ADMIN_TIMEOUT; 320 req->special = cqe; 321 322 if (buffer && bufflen) { 323 ret = blk_rq_map_kern(q, req, buffer, bufflen, GFP_KERNEL); 324 if (ret) 325 goto out; 326 } 327 328 blk_execute_rq(req->q, NULL, req, 0); 329 ret = req->errors; 330 out: 331 blk_mq_free_request(req); 332 return ret; 333 } 334 335 int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, 336 void *buffer, unsigned bufflen) 337 { 338 return __nvme_submit_sync_cmd(q, cmd, NULL, buffer, bufflen, 0); 339 } 340 EXPORT_SYMBOL_GPL(nvme_submit_sync_cmd); 341 342 int __nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd, 343 void __user *ubuffer, unsigned bufflen, 344 void __user *meta_buffer, unsigned meta_len, u32 meta_seed, 345 u32 *result, unsigned timeout) 346 { 347 bool write = cmd->common.opcode & 1; 348 struct nvme_completion cqe; 349 struct nvme_ns *ns = q->queuedata; 350 struct gendisk *disk = ns ? ns->disk : NULL; 351 struct request *req; 352 struct bio *bio = NULL; 353 void *meta = NULL; 354 int ret; 355 356 req = nvme_alloc_request(q, cmd, 0); 357 if (IS_ERR(req)) 358 return PTR_ERR(req); 359 360 req->timeout = timeout ? timeout : ADMIN_TIMEOUT; 361 req->special = &cqe; 362 363 if (ubuffer && bufflen) { 364 ret = blk_rq_map_user(q, req, NULL, ubuffer, bufflen, 365 GFP_KERNEL); 366 if (ret) 367 goto out; 368 bio = req->bio; 369 370 if (!disk) 371 goto submit; 372 bio->bi_bdev = bdget_disk(disk, 0); 373 if (!bio->bi_bdev) { 374 ret = -ENODEV; 375 goto out_unmap; 376 } 377 378 if (meta_buffer && meta_len) { 379 struct bio_integrity_payload *bip; 380 381 meta = kmalloc(meta_len, GFP_KERNEL); 382 if (!meta) { 383 ret = -ENOMEM; 384 goto out_unmap; 385 } 386 387 if (write) { 388 if (copy_from_user(meta, meta_buffer, 389 meta_len)) { 390 ret = -EFAULT; 391 goto out_free_meta; 392 } 393 } 394 395 bip = bio_integrity_alloc(bio, GFP_KERNEL, 1); 396 if (IS_ERR(bip)) { 397 ret = PTR_ERR(bip); 398 goto out_free_meta; 399 } 400 401 bip->bip_iter.bi_size = meta_len; 402 bip->bip_iter.bi_sector = meta_seed; 403 404 ret = bio_integrity_add_page(bio, virt_to_page(meta), 405 meta_len, offset_in_page(meta)); 406 if (ret != meta_len) { 407 ret = -ENOMEM; 408 goto out_free_meta; 409 } 410 } 411 } 412 submit: 413 blk_execute_rq(req->q, disk, req, 0); 414 ret = req->errors; 415 if (result) 416 *result = le32_to_cpu(cqe.result); 417 if (meta && !ret && !write) { 418 if (copy_to_user(meta_buffer, meta, meta_len)) 419 ret = -EFAULT; 420 } 421 out_free_meta: 422 kfree(meta); 423 out_unmap: 424 if (bio) { 425 if (disk && bio->bi_bdev) 426 bdput(bio->bi_bdev); 427 blk_rq_unmap_user(bio); 428 } 429 out: 430 blk_mq_free_request(req); 431 return ret; 432 } 433 434 int nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd, 435 void __user *ubuffer, unsigned bufflen, u32 *result, 436 unsigned timeout) 437 { 438 return __nvme_submit_user_cmd(q, cmd, ubuffer, bufflen, NULL, 0, 0, 439 result, timeout); 440 } 441 442 int nvme_identify_ctrl(struct nvme_ctrl *dev, struct nvme_id_ctrl **id) 443 { 444 struct nvme_command c = { }; 445 int error; 446 447 /* gcc-4.4.4 (at least) has issues with initializers and anon unions */ 448 c.identify.opcode = nvme_admin_identify; 449 c.identify.cns = cpu_to_le32(1); 450 451 *id = kmalloc(sizeof(struct nvme_id_ctrl), GFP_KERNEL); 452 if (!*id) 453 return -ENOMEM; 454 455 error = nvme_submit_sync_cmd(dev->admin_q, &c, *id, 456 sizeof(struct nvme_id_ctrl)); 457 if (error) 458 kfree(*id); 459 return error; 460 } 461 462 static int nvme_identify_ns_list(struct nvme_ctrl *dev, unsigned nsid, __le32 *ns_list) 463 { 464 struct nvme_command c = { }; 465 466 c.identify.opcode = nvme_admin_identify; 467 c.identify.cns = cpu_to_le32(2); 468 c.identify.nsid = cpu_to_le32(nsid); 469 return nvme_submit_sync_cmd(dev->admin_q, &c, ns_list, 0x1000); 470 } 471 472 int nvme_identify_ns(struct nvme_ctrl *dev, unsigned nsid, 473 struct nvme_id_ns **id) 474 { 475 struct nvme_command c = { }; 476 int error; 477 478 /* gcc-4.4.4 (at least) has issues with initializers and anon unions */ 479 c.identify.opcode = nvme_admin_identify, 480 c.identify.nsid = cpu_to_le32(nsid), 481 482 *id = kmalloc(sizeof(struct nvme_id_ns), GFP_KERNEL); 483 if (!*id) 484 return -ENOMEM; 485 486 error = nvme_submit_sync_cmd(dev->admin_q, &c, *id, 487 sizeof(struct nvme_id_ns)); 488 if (error) 489 kfree(*id); 490 return error; 491 } 492 493 int nvme_get_features(struct nvme_ctrl *dev, unsigned fid, unsigned nsid, 494 dma_addr_t dma_addr, u32 *result) 495 { 496 struct nvme_command c; 497 struct nvme_completion cqe; 498 int ret; 499 500 memset(&c, 0, sizeof(c)); 501 c.features.opcode = nvme_admin_get_features; 502 c.features.nsid = cpu_to_le32(nsid); 503 c.features.prp1 = cpu_to_le64(dma_addr); 504 c.features.fid = cpu_to_le32(fid); 505 506 ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &cqe, NULL, 0, 0); 507 if (ret >= 0) 508 *result = le32_to_cpu(cqe.result); 509 return ret; 510 } 511 512 int nvme_set_features(struct nvme_ctrl *dev, unsigned fid, unsigned dword11, 513 dma_addr_t dma_addr, u32 *result) 514 { 515 struct nvme_command c; 516 struct nvme_completion cqe; 517 int ret; 518 519 memset(&c, 0, sizeof(c)); 520 c.features.opcode = nvme_admin_set_features; 521 c.features.prp1 = cpu_to_le64(dma_addr); 522 c.features.fid = cpu_to_le32(fid); 523 c.features.dword11 = cpu_to_le32(dword11); 524 525 ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &cqe, NULL, 0, 0); 526 if (ret >= 0) 527 *result = le32_to_cpu(cqe.result); 528 return ret; 529 } 530 531 int nvme_get_log_page(struct nvme_ctrl *dev, struct nvme_smart_log **log) 532 { 533 struct nvme_command c = { }; 534 int error; 535 536 c.common.opcode = nvme_admin_get_log_page, 537 c.common.nsid = cpu_to_le32(0xFFFFFFFF), 538 c.common.cdw10[0] = cpu_to_le32( 539 (((sizeof(struct nvme_smart_log) / 4) - 1) << 16) | 540 NVME_LOG_SMART), 541 542 *log = kmalloc(sizeof(struct nvme_smart_log), GFP_KERNEL); 543 if (!*log) 544 return -ENOMEM; 545 546 error = nvme_submit_sync_cmd(dev->admin_q, &c, *log, 547 sizeof(struct nvme_smart_log)); 548 if (error) 549 kfree(*log); 550 return error; 551 } 552 553 int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count) 554 { 555 u32 q_count = (*count - 1) | ((*count - 1) << 16); 556 u32 result; 557 int status, nr_io_queues; 558 559 status = nvme_set_features(ctrl, NVME_FEAT_NUM_QUEUES, q_count, 0, 560 &result); 561 if (status) 562 return status; 563 564 nr_io_queues = min(result & 0xffff, result >> 16) + 1; 565 *count = min(*count, nr_io_queues); 566 return 0; 567 } 568 EXPORT_SYMBOL_GPL(nvme_set_queue_count); 569 570 static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio) 571 { 572 struct nvme_user_io io; 573 struct nvme_command c; 574 unsigned length, meta_len; 575 void __user *metadata; 576 577 if (copy_from_user(&io, uio, sizeof(io))) 578 return -EFAULT; 579 if (io.flags) 580 return -EINVAL; 581 582 switch (io.opcode) { 583 case nvme_cmd_write: 584 case nvme_cmd_read: 585 case nvme_cmd_compare: 586 break; 587 default: 588 return -EINVAL; 589 } 590 591 length = (io.nblocks + 1) << ns->lba_shift; 592 meta_len = (io.nblocks + 1) * ns->ms; 593 metadata = (void __user *)(uintptr_t)io.metadata; 594 595 if (ns->ext) { 596 length += meta_len; 597 meta_len = 0; 598 } else if (meta_len) { 599 if ((io.metadata & 3) || !io.metadata) 600 return -EINVAL; 601 } 602 603 memset(&c, 0, sizeof(c)); 604 c.rw.opcode = io.opcode; 605 c.rw.flags = io.flags; 606 c.rw.nsid = cpu_to_le32(ns->ns_id); 607 c.rw.slba = cpu_to_le64(io.slba); 608 c.rw.length = cpu_to_le16(io.nblocks); 609 c.rw.control = cpu_to_le16(io.control); 610 c.rw.dsmgmt = cpu_to_le32(io.dsmgmt); 611 c.rw.reftag = cpu_to_le32(io.reftag); 612 c.rw.apptag = cpu_to_le16(io.apptag); 613 c.rw.appmask = cpu_to_le16(io.appmask); 614 615 return __nvme_submit_user_cmd(ns->queue, &c, 616 (void __user *)(uintptr_t)io.addr, length, 617 metadata, meta_len, io.slba, NULL, 0); 618 } 619 620 static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns, 621 struct nvme_passthru_cmd __user *ucmd) 622 { 623 struct nvme_passthru_cmd cmd; 624 struct nvme_command c; 625 unsigned timeout = 0; 626 int status; 627 628 if (!capable(CAP_SYS_ADMIN)) 629 return -EACCES; 630 if (copy_from_user(&cmd, ucmd, sizeof(cmd))) 631 return -EFAULT; 632 if (cmd.flags) 633 return -EINVAL; 634 635 memset(&c, 0, sizeof(c)); 636 c.common.opcode = cmd.opcode; 637 c.common.flags = cmd.flags; 638 c.common.nsid = cpu_to_le32(cmd.nsid); 639 c.common.cdw2[0] = cpu_to_le32(cmd.cdw2); 640 c.common.cdw2[1] = cpu_to_le32(cmd.cdw3); 641 c.common.cdw10[0] = cpu_to_le32(cmd.cdw10); 642 c.common.cdw10[1] = cpu_to_le32(cmd.cdw11); 643 c.common.cdw10[2] = cpu_to_le32(cmd.cdw12); 644 c.common.cdw10[3] = cpu_to_le32(cmd.cdw13); 645 c.common.cdw10[4] = cpu_to_le32(cmd.cdw14); 646 c.common.cdw10[5] = cpu_to_le32(cmd.cdw15); 647 648 if (cmd.timeout_ms) 649 timeout = msecs_to_jiffies(cmd.timeout_ms); 650 651 status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c, 652 (void __user *)(uintptr_t)cmd.addr, cmd.data_len, 653 &cmd.result, timeout); 654 if (status >= 0) { 655 if (put_user(cmd.result, &ucmd->result)) 656 return -EFAULT; 657 } 658 659 return status; 660 } 661 662 static int nvme_ioctl(struct block_device *bdev, fmode_t mode, 663 unsigned int cmd, unsigned long arg) 664 { 665 struct nvme_ns *ns = bdev->bd_disk->private_data; 666 667 switch (cmd) { 668 case NVME_IOCTL_ID: 669 force_successful_syscall_return(); 670 return ns->ns_id; 671 case NVME_IOCTL_ADMIN_CMD: 672 return nvme_user_cmd(ns->ctrl, NULL, (void __user *)arg); 673 case NVME_IOCTL_IO_CMD: 674 return nvme_user_cmd(ns->ctrl, ns, (void __user *)arg); 675 case NVME_IOCTL_SUBMIT_IO: 676 return nvme_submit_io(ns, (void __user *)arg); 677 #ifdef CONFIG_BLK_DEV_NVME_SCSI 678 case SG_GET_VERSION_NUM: 679 return nvme_sg_get_version_num((void __user *)arg); 680 case SG_IO: 681 return nvme_sg_io(ns, (void __user *)arg); 682 #endif 683 default: 684 return -ENOTTY; 685 } 686 } 687 688 #ifdef CONFIG_COMPAT 689 static int nvme_compat_ioctl(struct block_device *bdev, fmode_t mode, 690 unsigned int cmd, unsigned long arg) 691 { 692 switch (cmd) { 693 case SG_IO: 694 return -ENOIOCTLCMD; 695 } 696 return nvme_ioctl(bdev, mode, cmd, arg); 697 } 698 #else 699 #define nvme_compat_ioctl NULL 700 #endif 701 702 static int nvme_open(struct block_device *bdev, fmode_t mode) 703 { 704 return nvme_get_ns_from_disk(bdev->bd_disk) ? 0 : -ENXIO; 705 } 706 707 static void nvme_release(struct gendisk *disk, fmode_t mode) 708 { 709 struct nvme_ns *ns = disk->private_data; 710 711 module_put(ns->ctrl->ops->module); 712 nvme_put_ns(ns); 713 } 714 715 static int nvme_getgeo(struct block_device *bdev, struct hd_geometry *geo) 716 { 717 /* some standard values */ 718 geo->heads = 1 << 6; 719 geo->sectors = 1 << 5; 720 geo->cylinders = get_capacity(bdev->bd_disk) >> 11; 721 return 0; 722 } 723 724 #ifdef CONFIG_BLK_DEV_INTEGRITY 725 static void nvme_init_integrity(struct nvme_ns *ns) 726 { 727 struct blk_integrity integrity; 728 729 switch (ns->pi_type) { 730 case NVME_NS_DPS_PI_TYPE3: 731 integrity.profile = &t10_pi_type3_crc; 732 integrity.tag_size = sizeof(u16) + sizeof(u32); 733 integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE; 734 break; 735 case NVME_NS_DPS_PI_TYPE1: 736 case NVME_NS_DPS_PI_TYPE2: 737 integrity.profile = &t10_pi_type1_crc; 738 integrity.tag_size = sizeof(u16); 739 integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE; 740 break; 741 default: 742 integrity.profile = NULL; 743 break; 744 } 745 integrity.tuple_size = ns->ms; 746 blk_integrity_register(ns->disk, &integrity); 747 blk_queue_max_integrity_segments(ns->queue, 1); 748 } 749 #else 750 static void nvme_init_integrity(struct nvme_ns *ns) 751 { 752 } 753 #endif /* CONFIG_BLK_DEV_INTEGRITY */ 754 755 static void nvme_config_discard(struct nvme_ns *ns) 756 { 757 struct nvme_ctrl *ctrl = ns->ctrl; 758 u32 logical_block_size = queue_logical_block_size(ns->queue); 759 760 if (ctrl->quirks & NVME_QUIRK_DISCARD_ZEROES) 761 ns->queue->limits.discard_zeroes_data = 1; 762 else 763 ns->queue->limits.discard_zeroes_data = 0; 764 765 ns->queue->limits.discard_alignment = logical_block_size; 766 ns->queue->limits.discard_granularity = logical_block_size; 767 blk_queue_max_discard_sectors(ns->queue, 0xffffffff); 768 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, ns->queue); 769 } 770 771 static int nvme_revalidate_disk(struct gendisk *disk) 772 { 773 struct nvme_ns *ns = disk->private_data; 774 struct nvme_id_ns *id; 775 u8 lbaf, pi_type; 776 u16 old_ms; 777 unsigned short bs; 778 779 if (test_bit(NVME_NS_DEAD, &ns->flags)) { 780 set_capacity(disk, 0); 781 return -ENODEV; 782 } 783 if (nvme_identify_ns(ns->ctrl, ns->ns_id, &id)) { 784 dev_warn(disk_to_dev(ns->disk), "%s: Identify failure\n", 785 __func__); 786 return -ENODEV; 787 } 788 if (id->ncap == 0) { 789 kfree(id); 790 return -ENODEV; 791 } 792 793 if (nvme_nvm_ns_supported(ns, id) && ns->type != NVME_NS_LIGHTNVM) { 794 if (nvme_nvm_register(ns->queue, disk->disk_name)) { 795 dev_warn(disk_to_dev(ns->disk), 796 "%s: LightNVM init failure\n", __func__); 797 kfree(id); 798 return -ENODEV; 799 } 800 ns->type = NVME_NS_LIGHTNVM; 801 } 802 803 if (ns->ctrl->vs >= NVME_VS(1, 1)) 804 memcpy(ns->eui, id->eui64, sizeof(ns->eui)); 805 if (ns->ctrl->vs >= NVME_VS(1, 2)) 806 memcpy(ns->uuid, id->nguid, sizeof(ns->uuid)); 807 808 old_ms = ns->ms; 809 lbaf = id->flbas & NVME_NS_FLBAS_LBA_MASK; 810 ns->lba_shift = id->lbaf[lbaf].ds; 811 ns->ms = le16_to_cpu(id->lbaf[lbaf].ms); 812 ns->ext = ns->ms && (id->flbas & NVME_NS_FLBAS_META_EXT); 813 814 /* 815 * If identify namespace failed, use default 512 byte block size so 816 * block layer can use before failing read/write for 0 capacity. 817 */ 818 if (ns->lba_shift == 0) 819 ns->lba_shift = 9; 820 bs = 1 << ns->lba_shift; 821 /* XXX: PI implementation requires metadata equal t10 pi tuple size */ 822 pi_type = ns->ms == sizeof(struct t10_pi_tuple) ? 823 id->dps & NVME_NS_DPS_PI_MASK : 0; 824 825 blk_mq_freeze_queue(disk->queue); 826 if (blk_get_integrity(disk) && (ns->pi_type != pi_type || 827 ns->ms != old_ms || 828 bs != queue_logical_block_size(disk->queue) || 829 (ns->ms && ns->ext))) 830 blk_integrity_unregister(disk); 831 832 ns->pi_type = pi_type; 833 blk_queue_logical_block_size(ns->queue, bs); 834 835 if (ns->ms && !blk_get_integrity(disk) && !ns->ext) 836 nvme_init_integrity(ns); 837 if (ns->ms && !(ns->ms == 8 && ns->pi_type) && !blk_get_integrity(disk)) 838 set_capacity(disk, 0); 839 else 840 set_capacity(disk, le64_to_cpup(&id->nsze) << (ns->lba_shift - 9)); 841 842 if (ns->ctrl->oncs & NVME_CTRL_ONCS_DSM) 843 nvme_config_discard(ns); 844 blk_mq_unfreeze_queue(disk->queue); 845 846 kfree(id); 847 return 0; 848 } 849 850 static char nvme_pr_type(enum pr_type type) 851 { 852 switch (type) { 853 case PR_WRITE_EXCLUSIVE: 854 return 1; 855 case PR_EXCLUSIVE_ACCESS: 856 return 2; 857 case PR_WRITE_EXCLUSIVE_REG_ONLY: 858 return 3; 859 case PR_EXCLUSIVE_ACCESS_REG_ONLY: 860 return 4; 861 case PR_WRITE_EXCLUSIVE_ALL_REGS: 862 return 5; 863 case PR_EXCLUSIVE_ACCESS_ALL_REGS: 864 return 6; 865 default: 866 return 0; 867 } 868 }; 869 870 static int nvme_pr_command(struct block_device *bdev, u32 cdw10, 871 u64 key, u64 sa_key, u8 op) 872 { 873 struct nvme_ns *ns = bdev->bd_disk->private_data; 874 struct nvme_command c; 875 u8 data[16] = { 0, }; 876 877 put_unaligned_le64(key, &data[0]); 878 put_unaligned_le64(sa_key, &data[8]); 879 880 memset(&c, 0, sizeof(c)); 881 c.common.opcode = op; 882 c.common.nsid = cpu_to_le32(ns->ns_id); 883 c.common.cdw10[0] = cpu_to_le32(cdw10); 884 885 return nvme_submit_sync_cmd(ns->queue, &c, data, 16); 886 } 887 888 static int nvme_pr_register(struct block_device *bdev, u64 old, 889 u64 new, unsigned flags) 890 { 891 u32 cdw10; 892 893 if (flags & ~PR_FL_IGNORE_KEY) 894 return -EOPNOTSUPP; 895 896 cdw10 = old ? 2 : 0; 897 cdw10 |= (flags & PR_FL_IGNORE_KEY) ? 1 << 3 : 0; 898 cdw10 |= (1 << 30) | (1 << 31); /* PTPL=1 */ 899 return nvme_pr_command(bdev, cdw10, old, new, nvme_cmd_resv_register); 900 } 901 902 static int nvme_pr_reserve(struct block_device *bdev, u64 key, 903 enum pr_type type, unsigned flags) 904 { 905 u32 cdw10; 906 907 if (flags & ~PR_FL_IGNORE_KEY) 908 return -EOPNOTSUPP; 909 910 cdw10 = nvme_pr_type(type) << 8; 911 cdw10 |= ((flags & PR_FL_IGNORE_KEY) ? 1 << 3 : 0); 912 return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_acquire); 913 } 914 915 static int nvme_pr_preempt(struct block_device *bdev, u64 old, u64 new, 916 enum pr_type type, bool abort) 917 { 918 u32 cdw10 = nvme_pr_type(type) << 8 | abort ? 2 : 1; 919 return nvme_pr_command(bdev, cdw10, old, new, nvme_cmd_resv_acquire); 920 } 921 922 static int nvme_pr_clear(struct block_device *bdev, u64 key) 923 { 924 u32 cdw10 = 1 | (key ? 1 << 3 : 0); 925 return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_register); 926 } 927 928 static int nvme_pr_release(struct block_device *bdev, u64 key, enum pr_type type) 929 { 930 u32 cdw10 = nvme_pr_type(type) << 8 | key ? 1 << 3 : 0; 931 return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_release); 932 } 933 934 static const struct pr_ops nvme_pr_ops = { 935 .pr_register = nvme_pr_register, 936 .pr_reserve = nvme_pr_reserve, 937 .pr_release = nvme_pr_release, 938 .pr_preempt = nvme_pr_preempt, 939 .pr_clear = nvme_pr_clear, 940 }; 941 942 static const struct block_device_operations nvme_fops = { 943 .owner = THIS_MODULE, 944 .ioctl = nvme_ioctl, 945 .compat_ioctl = nvme_compat_ioctl, 946 .open = nvme_open, 947 .release = nvme_release, 948 .getgeo = nvme_getgeo, 949 .revalidate_disk= nvme_revalidate_disk, 950 .pr_ops = &nvme_pr_ops, 951 }; 952 953 static int nvme_wait_ready(struct nvme_ctrl *ctrl, u64 cap, bool enabled) 954 { 955 unsigned long timeout = 956 ((NVME_CAP_TIMEOUT(cap) + 1) * HZ / 2) + jiffies; 957 u32 csts, bit = enabled ? NVME_CSTS_RDY : 0; 958 int ret; 959 960 while ((ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &csts)) == 0) { 961 if ((csts & NVME_CSTS_RDY) == bit) 962 break; 963 964 msleep(100); 965 if (fatal_signal_pending(current)) 966 return -EINTR; 967 if (time_after(jiffies, timeout)) { 968 dev_err(ctrl->device, 969 "Device not ready; aborting %s\n", enabled ? 970 "initialisation" : "reset"); 971 return -ENODEV; 972 } 973 } 974 975 return ret; 976 } 977 978 /* 979 * If the device has been passed off to us in an enabled state, just clear 980 * the enabled bit. The spec says we should set the 'shutdown notification 981 * bits', but doing so may cause the device to complete commands to the 982 * admin queue ... and we don't know what memory that might be pointing at! 983 */ 984 int nvme_disable_ctrl(struct nvme_ctrl *ctrl, u64 cap) 985 { 986 int ret; 987 988 ctrl->ctrl_config &= ~NVME_CC_SHN_MASK; 989 ctrl->ctrl_config &= ~NVME_CC_ENABLE; 990 991 ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config); 992 if (ret) 993 return ret; 994 return nvme_wait_ready(ctrl, cap, false); 995 } 996 EXPORT_SYMBOL_GPL(nvme_disable_ctrl); 997 998 int nvme_enable_ctrl(struct nvme_ctrl *ctrl, u64 cap) 999 { 1000 /* 1001 * Default to a 4K page size, with the intention to update this 1002 * path in the future to accomodate architectures with differing 1003 * kernel and IO page sizes. 1004 */ 1005 unsigned dev_page_min = NVME_CAP_MPSMIN(cap) + 12, page_shift = 12; 1006 int ret; 1007 1008 if (page_shift < dev_page_min) { 1009 dev_err(ctrl->device, 1010 "Minimum device page size %u too large for host (%u)\n", 1011 1 << dev_page_min, 1 << page_shift); 1012 return -ENODEV; 1013 } 1014 1015 ctrl->page_size = 1 << page_shift; 1016 1017 ctrl->ctrl_config = NVME_CC_CSS_NVM; 1018 ctrl->ctrl_config |= (page_shift - 12) << NVME_CC_MPS_SHIFT; 1019 ctrl->ctrl_config |= NVME_CC_ARB_RR | NVME_CC_SHN_NONE; 1020 ctrl->ctrl_config |= NVME_CC_IOSQES | NVME_CC_IOCQES; 1021 ctrl->ctrl_config |= NVME_CC_ENABLE; 1022 1023 ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config); 1024 if (ret) 1025 return ret; 1026 return nvme_wait_ready(ctrl, cap, true); 1027 } 1028 EXPORT_SYMBOL_GPL(nvme_enable_ctrl); 1029 1030 int nvme_shutdown_ctrl(struct nvme_ctrl *ctrl) 1031 { 1032 unsigned long timeout = SHUTDOWN_TIMEOUT + jiffies; 1033 u32 csts; 1034 int ret; 1035 1036 ctrl->ctrl_config &= ~NVME_CC_SHN_MASK; 1037 ctrl->ctrl_config |= NVME_CC_SHN_NORMAL; 1038 1039 ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config); 1040 if (ret) 1041 return ret; 1042 1043 while ((ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &csts)) == 0) { 1044 if ((csts & NVME_CSTS_SHST_MASK) == NVME_CSTS_SHST_CMPLT) 1045 break; 1046 1047 msleep(100); 1048 if (fatal_signal_pending(current)) 1049 return -EINTR; 1050 if (time_after(jiffies, timeout)) { 1051 dev_err(ctrl->device, 1052 "Device shutdown incomplete; abort shutdown\n"); 1053 return -ENODEV; 1054 } 1055 } 1056 1057 return ret; 1058 } 1059 EXPORT_SYMBOL_GPL(nvme_shutdown_ctrl); 1060 1061 static void nvme_set_queue_limits(struct nvme_ctrl *ctrl, 1062 struct request_queue *q) 1063 { 1064 bool vwc = false; 1065 1066 if (ctrl->max_hw_sectors) { 1067 u32 max_segments = 1068 (ctrl->max_hw_sectors / (ctrl->page_size >> 9)) + 1; 1069 1070 blk_queue_max_hw_sectors(q, ctrl->max_hw_sectors); 1071 blk_queue_max_segments(q, min_t(u32, max_segments, USHRT_MAX)); 1072 } 1073 if (ctrl->stripe_size) 1074 blk_queue_chunk_sectors(q, ctrl->stripe_size >> 9); 1075 blk_queue_virt_boundary(q, ctrl->page_size - 1); 1076 if (ctrl->vwc & NVME_CTRL_VWC_PRESENT) 1077 vwc = true; 1078 blk_queue_write_cache(q, vwc, vwc); 1079 } 1080 1081 /* 1082 * Initialize the cached copies of the Identify data and various controller 1083 * register in our nvme_ctrl structure. This should be called as soon as 1084 * the admin queue is fully up and running. 1085 */ 1086 int nvme_init_identify(struct nvme_ctrl *ctrl) 1087 { 1088 struct nvme_id_ctrl *id; 1089 u64 cap; 1090 int ret, page_shift; 1091 1092 ret = ctrl->ops->reg_read32(ctrl, NVME_REG_VS, &ctrl->vs); 1093 if (ret) { 1094 dev_err(ctrl->device, "Reading VS failed (%d)\n", ret); 1095 return ret; 1096 } 1097 1098 ret = ctrl->ops->reg_read64(ctrl, NVME_REG_CAP, &cap); 1099 if (ret) { 1100 dev_err(ctrl->device, "Reading CAP failed (%d)\n", ret); 1101 return ret; 1102 } 1103 page_shift = NVME_CAP_MPSMIN(cap) + 12; 1104 1105 if (ctrl->vs >= NVME_VS(1, 1)) 1106 ctrl->subsystem = NVME_CAP_NSSRC(cap); 1107 1108 ret = nvme_identify_ctrl(ctrl, &id); 1109 if (ret) { 1110 dev_err(ctrl->device, "Identify Controller failed (%d)\n", ret); 1111 return -EIO; 1112 } 1113 1114 ctrl->vid = le16_to_cpu(id->vid); 1115 ctrl->oncs = le16_to_cpup(&id->oncs); 1116 atomic_set(&ctrl->abort_limit, id->acl + 1); 1117 ctrl->vwc = id->vwc; 1118 ctrl->cntlid = le16_to_cpup(&id->cntlid); 1119 memcpy(ctrl->serial, id->sn, sizeof(id->sn)); 1120 memcpy(ctrl->model, id->mn, sizeof(id->mn)); 1121 memcpy(ctrl->firmware_rev, id->fr, sizeof(id->fr)); 1122 if (id->mdts) 1123 ctrl->max_hw_sectors = 1 << (id->mdts + page_shift - 9); 1124 else 1125 ctrl->max_hw_sectors = UINT_MAX; 1126 1127 if ((ctrl->quirks & NVME_QUIRK_STRIPE_SIZE) && id->vs[3]) { 1128 unsigned int max_hw_sectors; 1129 1130 ctrl->stripe_size = 1 << (id->vs[3] + page_shift); 1131 max_hw_sectors = ctrl->stripe_size >> (page_shift - 9); 1132 if (ctrl->max_hw_sectors) { 1133 ctrl->max_hw_sectors = min(max_hw_sectors, 1134 ctrl->max_hw_sectors); 1135 } else { 1136 ctrl->max_hw_sectors = max_hw_sectors; 1137 } 1138 } 1139 1140 nvme_set_queue_limits(ctrl, ctrl->admin_q); 1141 1142 kfree(id); 1143 return 0; 1144 } 1145 EXPORT_SYMBOL_GPL(nvme_init_identify); 1146 1147 static int nvme_dev_open(struct inode *inode, struct file *file) 1148 { 1149 struct nvme_ctrl *ctrl; 1150 int instance = iminor(inode); 1151 int ret = -ENODEV; 1152 1153 spin_lock(&dev_list_lock); 1154 list_for_each_entry(ctrl, &nvme_ctrl_list, node) { 1155 if (ctrl->instance != instance) 1156 continue; 1157 1158 if (!ctrl->admin_q) { 1159 ret = -EWOULDBLOCK; 1160 break; 1161 } 1162 if (!kref_get_unless_zero(&ctrl->kref)) 1163 break; 1164 file->private_data = ctrl; 1165 ret = 0; 1166 break; 1167 } 1168 spin_unlock(&dev_list_lock); 1169 1170 return ret; 1171 } 1172 1173 static int nvme_dev_release(struct inode *inode, struct file *file) 1174 { 1175 nvme_put_ctrl(file->private_data); 1176 return 0; 1177 } 1178 1179 static int nvme_dev_user_cmd(struct nvme_ctrl *ctrl, void __user *argp) 1180 { 1181 struct nvme_ns *ns; 1182 int ret; 1183 1184 mutex_lock(&ctrl->namespaces_mutex); 1185 if (list_empty(&ctrl->namespaces)) { 1186 ret = -ENOTTY; 1187 goto out_unlock; 1188 } 1189 1190 ns = list_first_entry(&ctrl->namespaces, struct nvme_ns, list); 1191 if (ns != list_last_entry(&ctrl->namespaces, struct nvme_ns, list)) { 1192 dev_warn(ctrl->device, 1193 "NVME_IOCTL_IO_CMD not supported when multiple namespaces present!\n"); 1194 ret = -EINVAL; 1195 goto out_unlock; 1196 } 1197 1198 dev_warn(ctrl->device, 1199 "using deprecated NVME_IOCTL_IO_CMD ioctl on the char device!\n"); 1200 kref_get(&ns->kref); 1201 mutex_unlock(&ctrl->namespaces_mutex); 1202 1203 ret = nvme_user_cmd(ctrl, ns, argp); 1204 nvme_put_ns(ns); 1205 return ret; 1206 1207 out_unlock: 1208 mutex_unlock(&ctrl->namespaces_mutex); 1209 return ret; 1210 } 1211 1212 static long nvme_dev_ioctl(struct file *file, unsigned int cmd, 1213 unsigned long arg) 1214 { 1215 struct nvme_ctrl *ctrl = file->private_data; 1216 void __user *argp = (void __user *)arg; 1217 1218 switch (cmd) { 1219 case NVME_IOCTL_ADMIN_CMD: 1220 return nvme_user_cmd(ctrl, NULL, argp); 1221 case NVME_IOCTL_IO_CMD: 1222 return nvme_dev_user_cmd(ctrl, argp); 1223 case NVME_IOCTL_RESET: 1224 dev_warn(ctrl->device, "resetting controller\n"); 1225 return ctrl->ops->reset_ctrl(ctrl); 1226 case NVME_IOCTL_SUBSYS_RESET: 1227 return nvme_reset_subsystem(ctrl); 1228 case NVME_IOCTL_RESCAN: 1229 nvme_queue_scan(ctrl); 1230 return 0; 1231 default: 1232 return -ENOTTY; 1233 } 1234 } 1235 1236 static const struct file_operations nvme_dev_fops = { 1237 .owner = THIS_MODULE, 1238 .open = nvme_dev_open, 1239 .release = nvme_dev_release, 1240 .unlocked_ioctl = nvme_dev_ioctl, 1241 .compat_ioctl = nvme_dev_ioctl, 1242 }; 1243 1244 static ssize_t nvme_sysfs_reset(struct device *dev, 1245 struct device_attribute *attr, const char *buf, 1246 size_t count) 1247 { 1248 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 1249 int ret; 1250 1251 ret = ctrl->ops->reset_ctrl(ctrl); 1252 if (ret < 0) 1253 return ret; 1254 return count; 1255 } 1256 static DEVICE_ATTR(reset_controller, S_IWUSR, NULL, nvme_sysfs_reset); 1257 1258 static ssize_t nvme_sysfs_rescan(struct device *dev, 1259 struct device_attribute *attr, const char *buf, 1260 size_t count) 1261 { 1262 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 1263 1264 nvme_queue_scan(ctrl); 1265 return count; 1266 } 1267 static DEVICE_ATTR(rescan_controller, S_IWUSR, NULL, nvme_sysfs_rescan); 1268 1269 static ssize_t wwid_show(struct device *dev, struct device_attribute *attr, 1270 char *buf) 1271 { 1272 struct nvme_ns *ns = dev_to_disk(dev)->private_data; 1273 struct nvme_ctrl *ctrl = ns->ctrl; 1274 int serial_len = sizeof(ctrl->serial); 1275 int model_len = sizeof(ctrl->model); 1276 1277 if (memchr_inv(ns->uuid, 0, sizeof(ns->uuid))) 1278 return sprintf(buf, "eui.%16phN\n", ns->uuid); 1279 1280 if (memchr_inv(ns->eui, 0, sizeof(ns->eui))) 1281 return sprintf(buf, "eui.%8phN\n", ns->eui); 1282 1283 while (ctrl->serial[serial_len - 1] == ' ') 1284 serial_len--; 1285 while (ctrl->model[model_len - 1] == ' ') 1286 model_len--; 1287 1288 return sprintf(buf, "nvme.%04x-%*phN-%*phN-%08x\n", ctrl->vid, 1289 serial_len, ctrl->serial, model_len, ctrl->model, ns->ns_id); 1290 } 1291 static DEVICE_ATTR(wwid, S_IRUGO, wwid_show, NULL); 1292 1293 static ssize_t uuid_show(struct device *dev, struct device_attribute *attr, 1294 char *buf) 1295 { 1296 struct nvme_ns *ns = dev_to_disk(dev)->private_data; 1297 return sprintf(buf, "%pU\n", ns->uuid); 1298 } 1299 static DEVICE_ATTR(uuid, S_IRUGO, uuid_show, NULL); 1300 1301 static ssize_t eui_show(struct device *dev, struct device_attribute *attr, 1302 char *buf) 1303 { 1304 struct nvme_ns *ns = dev_to_disk(dev)->private_data; 1305 return sprintf(buf, "%8phd\n", ns->eui); 1306 } 1307 static DEVICE_ATTR(eui, S_IRUGO, eui_show, NULL); 1308 1309 static ssize_t nsid_show(struct device *dev, struct device_attribute *attr, 1310 char *buf) 1311 { 1312 struct nvme_ns *ns = dev_to_disk(dev)->private_data; 1313 return sprintf(buf, "%d\n", ns->ns_id); 1314 } 1315 static DEVICE_ATTR(nsid, S_IRUGO, nsid_show, NULL); 1316 1317 static struct attribute *nvme_ns_attrs[] = { 1318 &dev_attr_wwid.attr, 1319 &dev_attr_uuid.attr, 1320 &dev_attr_eui.attr, 1321 &dev_attr_nsid.attr, 1322 NULL, 1323 }; 1324 1325 static umode_t nvme_attrs_are_visible(struct kobject *kobj, 1326 struct attribute *a, int n) 1327 { 1328 struct device *dev = container_of(kobj, struct device, kobj); 1329 struct nvme_ns *ns = dev_to_disk(dev)->private_data; 1330 1331 if (a == &dev_attr_uuid.attr) { 1332 if (!memchr_inv(ns->uuid, 0, sizeof(ns->uuid))) 1333 return 0; 1334 } 1335 if (a == &dev_attr_eui.attr) { 1336 if (!memchr_inv(ns->eui, 0, sizeof(ns->eui))) 1337 return 0; 1338 } 1339 return a->mode; 1340 } 1341 1342 static const struct attribute_group nvme_ns_attr_group = { 1343 .attrs = nvme_ns_attrs, 1344 .is_visible = nvme_attrs_are_visible, 1345 }; 1346 1347 #define nvme_show_str_function(field) \ 1348 static ssize_t field##_show(struct device *dev, \ 1349 struct device_attribute *attr, char *buf) \ 1350 { \ 1351 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); \ 1352 return sprintf(buf, "%.*s\n", (int)sizeof(ctrl->field), ctrl->field); \ 1353 } \ 1354 static DEVICE_ATTR(field, S_IRUGO, field##_show, NULL); 1355 1356 #define nvme_show_int_function(field) \ 1357 static ssize_t field##_show(struct device *dev, \ 1358 struct device_attribute *attr, char *buf) \ 1359 { \ 1360 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); \ 1361 return sprintf(buf, "%d\n", ctrl->field); \ 1362 } \ 1363 static DEVICE_ATTR(field, S_IRUGO, field##_show, NULL); 1364 1365 nvme_show_str_function(model); 1366 nvme_show_str_function(serial); 1367 nvme_show_str_function(firmware_rev); 1368 nvme_show_int_function(cntlid); 1369 1370 static struct attribute *nvme_dev_attrs[] = { 1371 &dev_attr_reset_controller.attr, 1372 &dev_attr_rescan_controller.attr, 1373 &dev_attr_model.attr, 1374 &dev_attr_serial.attr, 1375 &dev_attr_firmware_rev.attr, 1376 &dev_attr_cntlid.attr, 1377 NULL 1378 }; 1379 1380 static struct attribute_group nvme_dev_attrs_group = { 1381 .attrs = nvme_dev_attrs, 1382 }; 1383 1384 static const struct attribute_group *nvme_dev_attr_groups[] = { 1385 &nvme_dev_attrs_group, 1386 NULL, 1387 }; 1388 1389 static int ns_cmp(void *priv, struct list_head *a, struct list_head *b) 1390 { 1391 struct nvme_ns *nsa = container_of(a, struct nvme_ns, list); 1392 struct nvme_ns *nsb = container_of(b, struct nvme_ns, list); 1393 1394 return nsa->ns_id - nsb->ns_id; 1395 } 1396 1397 static struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid) 1398 { 1399 struct nvme_ns *ns, *ret = NULL; 1400 1401 mutex_lock(&ctrl->namespaces_mutex); 1402 list_for_each_entry(ns, &ctrl->namespaces, list) { 1403 if (ns->ns_id == nsid) { 1404 kref_get(&ns->kref); 1405 ret = ns; 1406 break; 1407 } 1408 if (ns->ns_id > nsid) 1409 break; 1410 } 1411 mutex_unlock(&ctrl->namespaces_mutex); 1412 return ret; 1413 } 1414 1415 static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid) 1416 { 1417 struct nvme_ns *ns; 1418 struct gendisk *disk; 1419 int node = dev_to_node(ctrl->dev); 1420 1421 ns = kzalloc_node(sizeof(*ns), GFP_KERNEL, node); 1422 if (!ns) 1423 return; 1424 1425 ns->instance = ida_simple_get(&ctrl->ns_ida, 1, 0, GFP_KERNEL); 1426 if (ns->instance < 0) 1427 goto out_free_ns; 1428 1429 ns->queue = blk_mq_init_queue(ctrl->tagset); 1430 if (IS_ERR(ns->queue)) 1431 goto out_release_instance; 1432 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, ns->queue); 1433 ns->queue->queuedata = ns; 1434 ns->ctrl = ctrl; 1435 1436 disk = alloc_disk_node(0, node); 1437 if (!disk) 1438 goto out_free_queue; 1439 1440 kref_init(&ns->kref); 1441 ns->ns_id = nsid; 1442 ns->disk = disk; 1443 ns->lba_shift = 9; /* set to a default value for 512 until disk is validated */ 1444 1445 1446 blk_queue_logical_block_size(ns->queue, 1 << ns->lba_shift); 1447 nvme_set_queue_limits(ctrl, ns->queue); 1448 1449 disk->major = nvme_major; 1450 disk->first_minor = 0; 1451 disk->fops = &nvme_fops; 1452 disk->private_data = ns; 1453 disk->queue = ns->queue; 1454 disk->driverfs_dev = ctrl->device; 1455 disk->flags = GENHD_FL_EXT_DEVT; 1456 sprintf(disk->disk_name, "nvme%dn%d", ctrl->instance, ns->instance); 1457 1458 if (nvme_revalidate_disk(ns->disk)) 1459 goto out_free_disk; 1460 1461 mutex_lock(&ctrl->namespaces_mutex); 1462 list_add_tail(&ns->list, &ctrl->namespaces); 1463 mutex_unlock(&ctrl->namespaces_mutex); 1464 1465 kref_get(&ctrl->kref); 1466 if (ns->type == NVME_NS_LIGHTNVM) 1467 return; 1468 1469 add_disk(ns->disk); 1470 if (sysfs_create_group(&disk_to_dev(ns->disk)->kobj, 1471 &nvme_ns_attr_group)) 1472 pr_warn("%s: failed to create sysfs group for identification\n", 1473 ns->disk->disk_name); 1474 return; 1475 out_free_disk: 1476 kfree(disk); 1477 out_free_queue: 1478 blk_cleanup_queue(ns->queue); 1479 out_release_instance: 1480 ida_simple_remove(&ctrl->ns_ida, ns->instance); 1481 out_free_ns: 1482 kfree(ns); 1483 } 1484 1485 static void nvme_ns_remove(struct nvme_ns *ns) 1486 { 1487 if (test_and_set_bit(NVME_NS_REMOVING, &ns->flags)) 1488 return; 1489 1490 if (ns->disk->flags & GENHD_FL_UP) { 1491 if (blk_get_integrity(ns->disk)) 1492 blk_integrity_unregister(ns->disk); 1493 sysfs_remove_group(&disk_to_dev(ns->disk)->kobj, 1494 &nvme_ns_attr_group); 1495 del_gendisk(ns->disk); 1496 blk_mq_abort_requeue_list(ns->queue); 1497 blk_cleanup_queue(ns->queue); 1498 } 1499 1500 mutex_lock(&ns->ctrl->namespaces_mutex); 1501 list_del_init(&ns->list); 1502 mutex_unlock(&ns->ctrl->namespaces_mutex); 1503 1504 nvme_put_ns(ns); 1505 } 1506 1507 static void nvme_validate_ns(struct nvme_ctrl *ctrl, unsigned nsid) 1508 { 1509 struct nvme_ns *ns; 1510 1511 ns = nvme_find_get_ns(ctrl, nsid); 1512 if (ns) { 1513 if (revalidate_disk(ns->disk)) 1514 nvme_ns_remove(ns); 1515 nvme_put_ns(ns); 1516 } else 1517 nvme_alloc_ns(ctrl, nsid); 1518 } 1519 1520 static int nvme_scan_ns_list(struct nvme_ctrl *ctrl, unsigned nn) 1521 { 1522 struct nvme_ns *ns; 1523 __le32 *ns_list; 1524 unsigned i, j, nsid, prev = 0, num_lists = DIV_ROUND_UP(nn, 1024); 1525 int ret = 0; 1526 1527 ns_list = kzalloc(0x1000, GFP_KERNEL); 1528 if (!ns_list) 1529 return -ENOMEM; 1530 1531 for (i = 0; i < num_lists; i++) { 1532 ret = nvme_identify_ns_list(ctrl, prev, ns_list); 1533 if (ret) 1534 goto out; 1535 1536 for (j = 0; j < min(nn, 1024U); j++) { 1537 nsid = le32_to_cpu(ns_list[j]); 1538 if (!nsid) 1539 goto out; 1540 1541 nvme_validate_ns(ctrl, nsid); 1542 1543 while (++prev < nsid) { 1544 ns = nvme_find_get_ns(ctrl, prev); 1545 if (ns) { 1546 nvme_ns_remove(ns); 1547 nvme_put_ns(ns); 1548 } 1549 } 1550 } 1551 nn -= j; 1552 } 1553 out: 1554 kfree(ns_list); 1555 return ret; 1556 } 1557 1558 static void nvme_scan_ns_sequential(struct nvme_ctrl *ctrl, unsigned nn) 1559 { 1560 struct nvme_ns *ns, *next; 1561 unsigned i; 1562 1563 for (i = 1; i <= nn; i++) 1564 nvme_validate_ns(ctrl, i); 1565 1566 list_for_each_entry_safe(ns, next, &ctrl->namespaces, list) { 1567 if (ns->ns_id > nn) 1568 nvme_ns_remove(ns); 1569 } 1570 } 1571 1572 static void nvme_scan_work(struct work_struct *work) 1573 { 1574 struct nvme_ctrl *ctrl = 1575 container_of(work, struct nvme_ctrl, scan_work); 1576 struct nvme_id_ctrl *id; 1577 unsigned nn; 1578 1579 if (ctrl->state != NVME_CTRL_LIVE) 1580 return; 1581 1582 if (nvme_identify_ctrl(ctrl, &id)) 1583 return; 1584 1585 nn = le32_to_cpu(id->nn); 1586 if (ctrl->vs >= NVME_VS(1, 1) && 1587 !(ctrl->quirks & NVME_QUIRK_IDENTIFY_CNS)) { 1588 if (!nvme_scan_ns_list(ctrl, nn)) 1589 goto done; 1590 } 1591 nvme_scan_ns_sequential(ctrl, nn); 1592 done: 1593 mutex_lock(&ctrl->namespaces_mutex); 1594 list_sort(NULL, &ctrl->namespaces, ns_cmp); 1595 mutex_unlock(&ctrl->namespaces_mutex); 1596 kfree(id); 1597 1598 if (ctrl->ops->post_scan) 1599 ctrl->ops->post_scan(ctrl); 1600 } 1601 1602 void nvme_queue_scan(struct nvme_ctrl *ctrl) 1603 { 1604 /* 1605 * Do not queue new scan work when a controller is reset during 1606 * removal. 1607 */ 1608 if (ctrl->state == NVME_CTRL_LIVE) 1609 schedule_work(&ctrl->scan_work); 1610 } 1611 EXPORT_SYMBOL_GPL(nvme_queue_scan); 1612 1613 /* 1614 * This function iterates the namespace list unlocked to allow recovery from 1615 * controller failure. It is up to the caller to ensure the namespace list is 1616 * not modified by scan work while this function is executing. 1617 */ 1618 void nvme_remove_namespaces(struct nvme_ctrl *ctrl) 1619 { 1620 struct nvme_ns *ns, *next; 1621 1622 /* 1623 * The dead states indicates the controller was not gracefully 1624 * disconnected. In that case, we won't be able to flush any data while 1625 * removing the namespaces' disks; fail all the queues now to avoid 1626 * potentially having to clean up the failed sync later. 1627 */ 1628 if (ctrl->state == NVME_CTRL_DEAD) 1629 nvme_kill_queues(ctrl); 1630 1631 list_for_each_entry_safe(ns, next, &ctrl->namespaces, list) 1632 nvme_ns_remove(ns); 1633 } 1634 EXPORT_SYMBOL_GPL(nvme_remove_namespaces); 1635 1636 static void nvme_async_event_work(struct work_struct *work) 1637 { 1638 struct nvme_ctrl *ctrl = 1639 container_of(work, struct nvme_ctrl, async_event_work); 1640 1641 spin_lock_irq(&ctrl->lock); 1642 while (ctrl->event_limit > 0) { 1643 int aer_idx = --ctrl->event_limit; 1644 1645 spin_unlock_irq(&ctrl->lock); 1646 ctrl->ops->submit_async_event(ctrl, aer_idx); 1647 spin_lock_irq(&ctrl->lock); 1648 } 1649 spin_unlock_irq(&ctrl->lock); 1650 } 1651 1652 void nvme_complete_async_event(struct nvme_ctrl *ctrl, 1653 struct nvme_completion *cqe) 1654 { 1655 u16 status = le16_to_cpu(cqe->status) >> 1; 1656 u32 result = le32_to_cpu(cqe->result); 1657 1658 if (status == NVME_SC_SUCCESS || status == NVME_SC_ABORT_REQ) { 1659 ++ctrl->event_limit; 1660 schedule_work(&ctrl->async_event_work); 1661 } 1662 1663 if (status != NVME_SC_SUCCESS) 1664 return; 1665 1666 switch (result & 0xff07) { 1667 case NVME_AER_NOTICE_NS_CHANGED: 1668 dev_info(ctrl->device, "rescanning\n"); 1669 nvme_queue_scan(ctrl); 1670 break; 1671 default: 1672 dev_warn(ctrl->device, "async event result %08x\n", result); 1673 } 1674 } 1675 EXPORT_SYMBOL_GPL(nvme_complete_async_event); 1676 1677 void nvme_queue_async_events(struct nvme_ctrl *ctrl) 1678 { 1679 ctrl->event_limit = NVME_NR_AERS; 1680 schedule_work(&ctrl->async_event_work); 1681 } 1682 EXPORT_SYMBOL_GPL(nvme_queue_async_events); 1683 1684 static DEFINE_IDA(nvme_instance_ida); 1685 1686 static int nvme_set_instance(struct nvme_ctrl *ctrl) 1687 { 1688 int instance, error; 1689 1690 do { 1691 if (!ida_pre_get(&nvme_instance_ida, GFP_KERNEL)) 1692 return -ENODEV; 1693 1694 spin_lock(&dev_list_lock); 1695 error = ida_get_new(&nvme_instance_ida, &instance); 1696 spin_unlock(&dev_list_lock); 1697 } while (error == -EAGAIN); 1698 1699 if (error) 1700 return -ENODEV; 1701 1702 ctrl->instance = instance; 1703 return 0; 1704 } 1705 1706 static void nvme_release_instance(struct nvme_ctrl *ctrl) 1707 { 1708 spin_lock(&dev_list_lock); 1709 ida_remove(&nvme_instance_ida, ctrl->instance); 1710 spin_unlock(&dev_list_lock); 1711 } 1712 1713 void nvme_uninit_ctrl(struct nvme_ctrl *ctrl) 1714 { 1715 flush_work(&ctrl->async_event_work); 1716 flush_work(&ctrl->scan_work); 1717 nvme_remove_namespaces(ctrl); 1718 1719 device_destroy(nvme_class, MKDEV(nvme_char_major, ctrl->instance)); 1720 1721 spin_lock(&dev_list_lock); 1722 list_del(&ctrl->node); 1723 spin_unlock(&dev_list_lock); 1724 } 1725 EXPORT_SYMBOL_GPL(nvme_uninit_ctrl); 1726 1727 static void nvme_free_ctrl(struct kref *kref) 1728 { 1729 struct nvme_ctrl *ctrl = container_of(kref, struct nvme_ctrl, kref); 1730 1731 put_device(ctrl->device); 1732 nvme_release_instance(ctrl); 1733 ida_destroy(&ctrl->ns_ida); 1734 1735 ctrl->ops->free_ctrl(ctrl); 1736 } 1737 1738 void nvme_put_ctrl(struct nvme_ctrl *ctrl) 1739 { 1740 kref_put(&ctrl->kref, nvme_free_ctrl); 1741 } 1742 EXPORT_SYMBOL_GPL(nvme_put_ctrl); 1743 1744 /* 1745 * Initialize a NVMe controller structures. This needs to be called during 1746 * earliest initialization so that we have the initialized structured around 1747 * during probing. 1748 */ 1749 int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev, 1750 const struct nvme_ctrl_ops *ops, unsigned long quirks) 1751 { 1752 int ret; 1753 1754 ctrl->state = NVME_CTRL_NEW; 1755 spin_lock_init(&ctrl->lock); 1756 INIT_LIST_HEAD(&ctrl->namespaces); 1757 mutex_init(&ctrl->namespaces_mutex); 1758 kref_init(&ctrl->kref); 1759 ctrl->dev = dev; 1760 ctrl->ops = ops; 1761 ctrl->quirks = quirks; 1762 INIT_WORK(&ctrl->scan_work, nvme_scan_work); 1763 INIT_WORK(&ctrl->async_event_work, nvme_async_event_work); 1764 1765 ret = nvme_set_instance(ctrl); 1766 if (ret) 1767 goto out; 1768 1769 ctrl->device = device_create_with_groups(nvme_class, ctrl->dev, 1770 MKDEV(nvme_char_major, ctrl->instance), 1771 ctrl, nvme_dev_attr_groups, 1772 "nvme%d", ctrl->instance); 1773 if (IS_ERR(ctrl->device)) { 1774 ret = PTR_ERR(ctrl->device); 1775 goto out_release_instance; 1776 } 1777 get_device(ctrl->device); 1778 ida_init(&ctrl->ns_ida); 1779 1780 spin_lock(&dev_list_lock); 1781 list_add_tail(&ctrl->node, &nvme_ctrl_list); 1782 spin_unlock(&dev_list_lock); 1783 1784 return 0; 1785 out_release_instance: 1786 nvme_release_instance(ctrl); 1787 out: 1788 return ret; 1789 } 1790 EXPORT_SYMBOL_GPL(nvme_init_ctrl); 1791 1792 /** 1793 * nvme_kill_queues(): Ends all namespace queues 1794 * @ctrl: the dead controller that needs to end 1795 * 1796 * Call this function when the driver determines it is unable to get the 1797 * controller in a state capable of servicing IO. 1798 */ 1799 void nvme_kill_queues(struct nvme_ctrl *ctrl) 1800 { 1801 struct nvme_ns *ns; 1802 1803 mutex_lock(&ctrl->namespaces_mutex); 1804 list_for_each_entry(ns, &ctrl->namespaces, list) { 1805 /* 1806 * Revalidating a dead namespace sets capacity to 0. This will 1807 * end buffered writers dirtying pages that can't be synced. 1808 */ 1809 if (!test_and_set_bit(NVME_NS_DEAD, &ns->flags)) 1810 revalidate_disk(ns->disk); 1811 1812 blk_set_queue_dying(ns->queue); 1813 blk_mq_abort_requeue_list(ns->queue); 1814 blk_mq_start_stopped_hw_queues(ns->queue, true); 1815 } 1816 mutex_unlock(&ctrl->namespaces_mutex); 1817 } 1818 EXPORT_SYMBOL_GPL(nvme_kill_queues); 1819 1820 void nvme_stop_queues(struct nvme_ctrl *ctrl) 1821 { 1822 struct nvme_ns *ns; 1823 1824 mutex_lock(&ctrl->namespaces_mutex); 1825 list_for_each_entry(ns, &ctrl->namespaces, list) { 1826 spin_lock_irq(ns->queue->queue_lock); 1827 queue_flag_set(QUEUE_FLAG_STOPPED, ns->queue); 1828 spin_unlock_irq(ns->queue->queue_lock); 1829 1830 blk_mq_cancel_requeue_work(ns->queue); 1831 blk_mq_stop_hw_queues(ns->queue); 1832 } 1833 mutex_unlock(&ctrl->namespaces_mutex); 1834 } 1835 EXPORT_SYMBOL_GPL(nvme_stop_queues); 1836 1837 void nvme_start_queues(struct nvme_ctrl *ctrl) 1838 { 1839 struct nvme_ns *ns; 1840 1841 mutex_lock(&ctrl->namespaces_mutex); 1842 list_for_each_entry(ns, &ctrl->namespaces, list) { 1843 queue_flag_clear_unlocked(QUEUE_FLAG_STOPPED, ns->queue); 1844 blk_mq_start_stopped_hw_queues(ns->queue, true); 1845 blk_mq_kick_requeue_list(ns->queue); 1846 } 1847 mutex_unlock(&ctrl->namespaces_mutex); 1848 } 1849 EXPORT_SYMBOL_GPL(nvme_start_queues); 1850 1851 int __init nvme_core_init(void) 1852 { 1853 int result; 1854 1855 result = register_blkdev(nvme_major, "nvme"); 1856 if (result < 0) 1857 return result; 1858 else if (result > 0) 1859 nvme_major = result; 1860 1861 result = __register_chrdev(nvme_char_major, 0, NVME_MINORS, "nvme", 1862 &nvme_dev_fops); 1863 if (result < 0) 1864 goto unregister_blkdev; 1865 else if (result > 0) 1866 nvme_char_major = result; 1867 1868 nvme_class = class_create(THIS_MODULE, "nvme"); 1869 if (IS_ERR(nvme_class)) { 1870 result = PTR_ERR(nvme_class); 1871 goto unregister_chrdev; 1872 } 1873 1874 return 0; 1875 1876 unregister_chrdev: 1877 __unregister_chrdev(nvme_char_major, 0, NVME_MINORS, "nvme"); 1878 unregister_blkdev: 1879 unregister_blkdev(nvme_major, "nvme"); 1880 return result; 1881 } 1882 1883 void nvme_core_exit(void) 1884 { 1885 class_destroy(nvme_class); 1886 __unregister_chrdev(nvme_char_major, 0, NVME_MINORS, "nvme"); 1887 unregister_blkdev(nvme_major, "nvme"); 1888 } 1889 1890 MODULE_LICENSE("GPL"); 1891 MODULE_VERSION("1.0"); 1892 module_init(nvme_core_init); 1893 module_exit(nvme_core_exit); 1894