1 /* 2 * NVM Express device driver 3 * Copyright (c) 2011-2014, Intel Corporation. 4 * 5 * This program is free software; you can redistribute it and/or modify it 6 * under the terms and conditions of the GNU General Public License, 7 * version 2, as published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * more details. 13 */ 14 15 #include <linux/blkdev.h> 16 #include <linux/blk-mq.h> 17 #include <linux/delay.h> 18 #include <linux/errno.h> 19 #include <linux/hdreg.h> 20 #include <linux/kernel.h> 21 #include <linux/module.h> 22 #include <linux/list_sort.h> 23 #include <linux/slab.h> 24 #include <linux/types.h> 25 #include <linux/pr.h> 26 #include <linux/ptrace.h> 27 #include <linux/nvme_ioctl.h> 28 #include <linux/t10-pi.h> 29 #include <scsi/sg.h> 30 #include <asm/unaligned.h> 31 32 #include "nvme.h" 33 #include "fabrics.h" 34 35 #define NVME_MINORS (1U << MINORBITS) 36 37 unsigned char admin_timeout = 60; 38 module_param(admin_timeout, byte, 0644); 39 MODULE_PARM_DESC(admin_timeout, "timeout in seconds for admin commands"); 40 EXPORT_SYMBOL_GPL(admin_timeout); 41 42 unsigned char nvme_io_timeout = 30; 43 module_param_named(io_timeout, nvme_io_timeout, byte, 0644); 44 MODULE_PARM_DESC(io_timeout, "timeout in seconds for I/O"); 45 EXPORT_SYMBOL_GPL(nvme_io_timeout); 46 47 unsigned char shutdown_timeout = 5; 48 module_param(shutdown_timeout, byte, 0644); 49 MODULE_PARM_DESC(shutdown_timeout, "timeout in seconds for controller shutdown"); 50 51 unsigned int nvme_max_retries = 5; 52 module_param_named(max_retries, nvme_max_retries, uint, 0644); 53 MODULE_PARM_DESC(max_retries, "max number of retries a command may have"); 54 EXPORT_SYMBOL_GPL(nvme_max_retries); 55 56 static int nvme_char_major; 57 module_param(nvme_char_major, int, 0); 58 59 static LIST_HEAD(nvme_ctrl_list); 60 static DEFINE_SPINLOCK(dev_list_lock); 61 62 static struct class *nvme_class; 63 64 void nvme_cancel_request(struct request *req, void *data, bool reserved) 65 { 66 int status; 67 68 if (!blk_mq_request_started(req)) 69 return; 70 71 dev_dbg_ratelimited(((struct nvme_ctrl *) data)->device, 72 "Cancelling I/O %d", req->tag); 73 74 status = NVME_SC_ABORT_REQ; 75 if (blk_queue_dying(req->q)) 76 status |= NVME_SC_DNR; 77 blk_mq_complete_request(req, status); 78 } 79 EXPORT_SYMBOL_GPL(nvme_cancel_request); 80 81 bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl, 82 enum nvme_ctrl_state new_state) 83 { 84 enum nvme_ctrl_state old_state = ctrl->state; 85 bool changed = false; 86 87 spin_lock_irq(&ctrl->lock); 88 switch (new_state) { 89 case NVME_CTRL_LIVE: 90 switch (old_state) { 91 case NVME_CTRL_NEW: 92 case NVME_CTRL_RESETTING: 93 case NVME_CTRL_RECONNECTING: 94 changed = true; 95 /* FALLTHRU */ 96 default: 97 break; 98 } 99 break; 100 case NVME_CTRL_RESETTING: 101 switch (old_state) { 102 case NVME_CTRL_NEW: 103 case NVME_CTRL_LIVE: 104 case NVME_CTRL_RECONNECTING: 105 changed = true; 106 /* FALLTHRU */ 107 default: 108 break; 109 } 110 break; 111 case NVME_CTRL_RECONNECTING: 112 switch (old_state) { 113 case NVME_CTRL_LIVE: 114 changed = true; 115 /* FALLTHRU */ 116 default: 117 break; 118 } 119 break; 120 case NVME_CTRL_DELETING: 121 switch (old_state) { 122 case NVME_CTRL_LIVE: 123 case NVME_CTRL_RESETTING: 124 case NVME_CTRL_RECONNECTING: 125 changed = true; 126 /* FALLTHRU */ 127 default: 128 break; 129 } 130 break; 131 case NVME_CTRL_DEAD: 132 switch (old_state) { 133 case NVME_CTRL_DELETING: 134 changed = true; 135 /* FALLTHRU */ 136 default: 137 break; 138 } 139 break; 140 default: 141 break; 142 } 143 spin_unlock_irq(&ctrl->lock); 144 145 if (changed) 146 ctrl->state = new_state; 147 148 return changed; 149 } 150 EXPORT_SYMBOL_GPL(nvme_change_ctrl_state); 151 152 static void nvme_free_ns(struct kref *kref) 153 { 154 struct nvme_ns *ns = container_of(kref, struct nvme_ns, kref); 155 156 if (ns->type == NVME_NS_LIGHTNVM) 157 nvme_nvm_unregister(ns->queue, ns->disk->disk_name); 158 159 spin_lock(&dev_list_lock); 160 ns->disk->private_data = NULL; 161 spin_unlock(&dev_list_lock); 162 163 put_disk(ns->disk); 164 ida_simple_remove(&ns->ctrl->ns_ida, ns->instance); 165 nvme_put_ctrl(ns->ctrl); 166 kfree(ns); 167 } 168 169 static void nvme_put_ns(struct nvme_ns *ns) 170 { 171 kref_put(&ns->kref, nvme_free_ns); 172 } 173 174 static struct nvme_ns *nvme_get_ns_from_disk(struct gendisk *disk) 175 { 176 struct nvme_ns *ns; 177 178 spin_lock(&dev_list_lock); 179 ns = disk->private_data; 180 if (ns) { 181 if (!kref_get_unless_zero(&ns->kref)) 182 goto fail; 183 if (!try_module_get(ns->ctrl->ops->module)) 184 goto fail_put_ns; 185 } 186 spin_unlock(&dev_list_lock); 187 188 return ns; 189 190 fail_put_ns: 191 kref_put(&ns->kref, nvme_free_ns); 192 fail: 193 spin_unlock(&dev_list_lock); 194 return NULL; 195 } 196 197 void nvme_requeue_req(struct request *req) 198 { 199 unsigned long flags; 200 201 blk_mq_requeue_request(req); 202 spin_lock_irqsave(req->q->queue_lock, flags); 203 if (!blk_queue_stopped(req->q)) 204 blk_mq_kick_requeue_list(req->q); 205 spin_unlock_irqrestore(req->q->queue_lock, flags); 206 } 207 EXPORT_SYMBOL_GPL(nvme_requeue_req); 208 209 struct request *nvme_alloc_request(struct request_queue *q, 210 struct nvme_command *cmd, unsigned int flags, int qid) 211 { 212 struct request *req; 213 214 if (qid == NVME_QID_ANY) { 215 req = blk_mq_alloc_request(q, nvme_is_write(cmd), flags); 216 } else { 217 req = blk_mq_alloc_request_hctx(q, nvme_is_write(cmd), flags, 218 qid ? qid - 1 : 0); 219 } 220 if (IS_ERR(req)) 221 return req; 222 223 req->cmd_type = REQ_TYPE_DRV_PRIV; 224 req->cmd_flags |= REQ_FAILFAST_DRIVER; 225 req->cmd = (unsigned char *)cmd; 226 req->cmd_len = sizeof(struct nvme_command); 227 228 return req; 229 } 230 EXPORT_SYMBOL_GPL(nvme_alloc_request); 231 232 static inline void nvme_setup_flush(struct nvme_ns *ns, 233 struct nvme_command *cmnd) 234 { 235 memset(cmnd, 0, sizeof(*cmnd)); 236 cmnd->common.opcode = nvme_cmd_flush; 237 cmnd->common.nsid = cpu_to_le32(ns->ns_id); 238 } 239 240 static inline int nvme_setup_discard(struct nvme_ns *ns, struct request *req, 241 struct nvme_command *cmnd) 242 { 243 struct nvme_dsm_range *range; 244 struct page *page; 245 int offset; 246 unsigned int nr_bytes = blk_rq_bytes(req); 247 248 range = kmalloc(sizeof(*range), GFP_ATOMIC); 249 if (!range) 250 return BLK_MQ_RQ_QUEUE_BUSY; 251 252 range->cattr = cpu_to_le32(0); 253 range->nlb = cpu_to_le32(nr_bytes >> ns->lba_shift); 254 range->slba = cpu_to_le64(nvme_block_nr(ns, blk_rq_pos(req))); 255 256 memset(cmnd, 0, sizeof(*cmnd)); 257 cmnd->dsm.opcode = nvme_cmd_dsm; 258 cmnd->dsm.nsid = cpu_to_le32(ns->ns_id); 259 cmnd->dsm.nr = 0; 260 cmnd->dsm.attributes = cpu_to_le32(NVME_DSMGMT_AD); 261 262 req->completion_data = range; 263 page = virt_to_page(range); 264 offset = offset_in_page(range); 265 blk_add_request_payload(req, page, offset, sizeof(*range)); 266 267 /* 268 * we set __data_len back to the size of the area to be discarded 269 * on disk. This allows us to report completion on the full amount 270 * of blocks described by the request. 271 */ 272 req->__data_len = nr_bytes; 273 274 return 0; 275 } 276 277 static inline void nvme_setup_rw(struct nvme_ns *ns, struct request *req, 278 struct nvme_command *cmnd) 279 { 280 u16 control = 0; 281 u32 dsmgmt = 0; 282 283 if (req->cmd_flags & REQ_FUA) 284 control |= NVME_RW_FUA; 285 if (req->cmd_flags & (REQ_FAILFAST_DEV | REQ_RAHEAD)) 286 control |= NVME_RW_LR; 287 288 if (req->cmd_flags & REQ_RAHEAD) 289 dsmgmt |= NVME_RW_DSM_FREQ_PREFETCH; 290 291 memset(cmnd, 0, sizeof(*cmnd)); 292 cmnd->rw.opcode = (rq_data_dir(req) ? nvme_cmd_write : nvme_cmd_read); 293 cmnd->rw.command_id = req->tag; 294 cmnd->rw.nsid = cpu_to_le32(ns->ns_id); 295 cmnd->rw.slba = cpu_to_le64(nvme_block_nr(ns, blk_rq_pos(req))); 296 cmnd->rw.length = cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1); 297 298 if (ns->ms) { 299 switch (ns->pi_type) { 300 case NVME_NS_DPS_PI_TYPE3: 301 control |= NVME_RW_PRINFO_PRCHK_GUARD; 302 break; 303 case NVME_NS_DPS_PI_TYPE1: 304 case NVME_NS_DPS_PI_TYPE2: 305 control |= NVME_RW_PRINFO_PRCHK_GUARD | 306 NVME_RW_PRINFO_PRCHK_REF; 307 cmnd->rw.reftag = cpu_to_le32( 308 nvme_block_nr(ns, blk_rq_pos(req))); 309 break; 310 } 311 if (!blk_integrity_rq(req)) 312 control |= NVME_RW_PRINFO_PRACT; 313 } 314 315 cmnd->rw.control = cpu_to_le16(control); 316 cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt); 317 } 318 319 int nvme_setup_cmd(struct nvme_ns *ns, struct request *req, 320 struct nvme_command *cmd) 321 { 322 int ret = 0; 323 324 if (req->cmd_type == REQ_TYPE_DRV_PRIV) 325 memcpy(cmd, req->cmd, sizeof(*cmd)); 326 else if (req_op(req) == REQ_OP_FLUSH) 327 nvme_setup_flush(ns, cmd); 328 else if (req_op(req) == REQ_OP_DISCARD) 329 ret = nvme_setup_discard(ns, req, cmd); 330 else 331 nvme_setup_rw(ns, req, cmd); 332 333 return ret; 334 } 335 EXPORT_SYMBOL_GPL(nvme_setup_cmd); 336 337 /* 338 * Returns 0 on success. If the result is negative, it's a Linux error code; 339 * if the result is positive, it's an NVM Express status code 340 */ 341 int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, 342 struct nvme_completion *cqe, void *buffer, unsigned bufflen, 343 unsigned timeout, int qid, int at_head, int flags) 344 { 345 struct request *req; 346 int ret; 347 348 req = nvme_alloc_request(q, cmd, flags, qid); 349 if (IS_ERR(req)) 350 return PTR_ERR(req); 351 352 req->timeout = timeout ? timeout : ADMIN_TIMEOUT; 353 req->special = cqe; 354 355 if (buffer && bufflen) { 356 ret = blk_rq_map_kern(q, req, buffer, bufflen, GFP_KERNEL); 357 if (ret) 358 goto out; 359 } 360 361 blk_execute_rq(req->q, NULL, req, at_head); 362 ret = req->errors; 363 out: 364 blk_mq_free_request(req); 365 return ret; 366 } 367 EXPORT_SYMBOL_GPL(__nvme_submit_sync_cmd); 368 369 int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, 370 void *buffer, unsigned bufflen) 371 { 372 return __nvme_submit_sync_cmd(q, cmd, NULL, buffer, bufflen, 0, 373 NVME_QID_ANY, 0, 0); 374 } 375 EXPORT_SYMBOL_GPL(nvme_submit_sync_cmd); 376 377 int __nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd, 378 void __user *ubuffer, unsigned bufflen, 379 void __user *meta_buffer, unsigned meta_len, u32 meta_seed, 380 u32 *result, unsigned timeout) 381 { 382 bool write = nvme_is_write(cmd); 383 struct nvme_completion cqe; 384 struct nvme_ns *ns = q->queuedata; 385 struct gendisk *disk = ns ? ns->disk : NULL; 386 struct request *req; 387 struct bio *bio = NULL; 388 void *meta = NULL; 389 int ret; 390 391 req = nvme_alloc_request(q, cmd, 0, NVME_QID_ANY); 392 if (IS_ERR(req)) 393 return PTR_ERR(req); 394 395 req->timeout = timeout ? timeout : ADMIN_TIMEOUT; 396 req->special = &cqe; 397 398 if (ubuffer && bufflen) { 399 ret = blk_rq_map_user(q, req, NULL, ubuffer, bufflen, 400 GFP_KERNEL); 401 if (ret) 402 goto out; 403 bio = req->bio; 404 405 if (!disk) 406 goto submit; 407 bio->bi_bdev = bdget_disk(disk, 0); 408 if (!bio->bi_bdev) { 409 ret = -ENODEV; 410 goto out_unmap; 411 } 412 413 if (meta_buffer && meta_len) { 414 struct bio_integrity_payload *bip; 415 416 meta = kmalloc(meta_len, GFP_KERNEL); 417 if (!meta) { 418 ret = -ENOMEM; 419 goto out_unmap; 420 } 421 422 if (write) { 423 if (copy_from_user(meta, meta_buffer, 424 meta_len)) { 425 ret = -EFAULT; 426 goto out_free_meta; 427 } 428 } 429 430 bip = bio_integrity_alloc(bio, GFP_KERNEL, 1); 431 if (IS_ERR(bip)) { 432 ret = PTR_ERR(bip); 433 goto out_free_meta; 434 } 435 436 bip->bip_iter.bi_size = meta_len; 437 bip->bip_iter.bi_sector = meta_seed; 438 439 ret = bio_integrity_add_page(bio, virt_to_page(meta), 440 meta_len, offset_in_page(meta)); 441 if (ret != meta_len) { 442 ret = -ENOMEM; 443 goto out_free_meta; 444 } 445 } 446 } 447 submit: 448 blk_execute_rq(req->q, disk, req, 0); 449 ret = req->errors; 450 if (result) 451 *result = le32_to_cpu(cqe.result); 452 if (meta && !ret && !write) { 453 if (copy_to_user(meta_buffer, meta, meta_len)) 454 ret = -EFAULT; 455 } 456 out_free_meta: 457 kfree(meta); 458 out_unmap: 459 if (bio) { 460 if (disk && bio->bi_bdev) 461 bdput(bio->bi_bdev); 462 blk_rq_unmap_user(bio); 463 } 464 out: 465 blk_mq_free_request(req); 466 return ret; 467 } 468 469 int nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd, 470 void __user *ubuffer, unsigned bufflen, u32 *result, 471 unsigned timeout) 472 { 473 return __nvme_submit_user_cmd(q, cmd, ubuffer, bufflen, NULL, 0, 0, 474 result, timeout); 475 } 476 477 static void nvme_keep_alive_end_io(struct request *rq, int error) 478 { 479 struct nvme_ctrl *ctrl = rq->end_io_data; 480 481 blk_mq_free_request(rq); 482 483 if (error) { 484 dev_err(ctrl->device, 485 "failed nvme_keep_alive_end_io error=%d\n", error); 486 return; 487 } 488 489 schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ); 490 } 491 492 static int nvme_keep_alive(struct nvme_ctrl *ctrl) 493 { 494 struct nvme_command c; 495 struct request *rq; 496 497 memset(&c, 0, sizeof(c)); 498 c.common.opcode = nvme_admin_keep_alive; 499 500 rq = nvme_alloc_request(ctrl->admin_q, &c, BLK_MQ_REQ_RESERVED, 501 NVME_QID_ANY); 502 if (IS_ERR(rq)) 503 return PTR_ERR(rq); 504 505 rq->timeout = ctrl->kato * HZ; 506 rq->end_io_data = ctrl; 507 508 blk_execute_rq_nowait(rq->q, NULL, rq, 0, nvme_keep_alive_end_io); 509 510 return 0; 511 } 512 513 static void nvme_keep_alive_work(struct work_struct *work) 514 { 515 struct nvme_ctrl *ctrl = container_of(to_delayed_work(work), 516 struct nvme_ctrl, ka_work); 517 518 if (nvme_keep_alive(ctrl)) { 519 /* allocation failure, reset the controller */ 520 dev_err(ctrl->device, "keep-alive failed\n"); 521 ctrl->ops->reset_ctrl(ctrl); 522 return; 523 } 524 } 525 526 void nvme_start_keep_alive(struct nvme_ctrl *ctrl) 527 { 528 if (unlikely(ctrl->kato == 0)) 529 return; 530 531 INIT_DELAYED_WORK(&ctrl->ka_work, nvme_keep_alive_work); 532 schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ); 533 } 534 EXPORT_SYMBOL_GPL(nvme_start_keep_alive); 535 536 void nvme_stop_keep_alive(struct nvme_ctrl *ctrl) 537 { 538 if (unlikely(ctrl->kato == 0)) 539 return; 540 541 cancel_delayed_work_sync(&ctrl->ka_work); 542 } 543 EXPORT_SYMBOL_GPL(nvme_stop_keep_alive); 544 545 int nvme_identify_ctrl(struct nvme_ctrl *dev, struct nvme_id_ctrl **id) 546 { 547 struct nvme_command c = { }; 548 int error; 549 550 /* gcc-4.4.4 (at least) has issues with initializers and anon unions */ 551 c.identify.opcode = nvme_admin_identify; 552 c.identify.cns = cpu_to_le32(1); 553 554 *id = kmalloc(sizeof(struct nvme_id_ctrl), GFP_KERNEL); 555 if (!*id) 556 return -ENOMEM; 557 558 error = nvme_submit_sync_cmd(dev->admin_q, &c, *id, 559 sizeof(struct nvme_id_ctrl)); 560 if (error) 561 kfree(*id); 562 return error; 563 } 564 565 static int nvme_identify_ns_list(struct nvme_ctrl *dev, unsigned nsid, __le32 *ns_list) 566 { 567 struct nvme_command c = { }; 568 569 c.identify.opcode = nvme_admin_identify; 570 c.identify.cns = cpu_to_le32(2); 571 c.identify.nsid = cpu_to_le32(nsid); 572 return nvme_submit_sync_cmd(dev->admin_q, &c, ns_list, 0x1000); 573 } 574 575 int nvme_identify_ns(struct nvme_ctrl *dev, unsigned nsid, 576 struct nvme_id_ns **id) 577 { 578 struct nvme_command c = { }; 579 int error; 580 581 /* gcc-4.4.4 (at least) has issues with initializers and anon unions */ 582 c.identify.opcode = nvme_admin_identify, 583 c.identify.nsid = cpu_to_le32(nsid), 584 585 *id = kmalloc(sizeof(struct nvme_id_ns), GFP_KERNEL); 586 if (!*id) 587 return -ENOMEM; 588 589 error = nvme_submit_sync_cmd(dev->admin_q, &c, *id, 590 sizeof(struct nvme_id_ns)); 591 if (error) 592 kfree(*id); 593 return error; 594 } 595 596 int nvme_get_features(struct nvme_ctrl *dev, unsigned fid, unsigned nsid, 597 dma_addr_t dma_addr, u32 *result) 598 { 599 struct nvme_command c; 600 struct nvme_completion cqe; 601 int ret; 602 603 memset(&c, 0, sizeof(c)); 604 c.features.opcode = nvme_admin_get_features; 605 c.features.nsid = cpu_to_le32(nsid); 606 c.features.dptr.prp1 = cpu_to_le64(dma_addr); 607 c.features.fid = cpu_to_le32(fid); 608 609 ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &cqe, NULL, 0, 0, 610 NVME_QID_ANY, 0, 0); 611 if (ret >= 0) 612 *result = le32_to_cpu(cqe.result); 613 return ret; 614 } 615 616 int nvme_set_features(struct nvme_ctrl *dev, unsigned fid, unsigned dword11, 617 dma_addr_t dma_addr, u32 *result) 618 { 619 struct nvme_command c; 620 struct nvme_completion cqe; 621 int ret; 622 623 memset(&c, 0, sizeof(c)); 624 c.features.opcode = nvme_admin_set_features; 625 c.features.dptr.prp1 = cpu_to_le64(dma_addr); 626 c.features.fid = cpu_to_le32(fid); 627 c.features.dword11 = cpu_to_le32(dword11); 628 629 ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &cqe, NULL, 0, 0, 630 NVME_QID_ANY, 0, 0); 631 if (ret >= 0) 632 *result = le32_to_cpu(cqe.result); 633 return ret; 634 } 635 636 int nvme_get_log_page(struct nvme_ctrl *dev, struct nvme_smart_log **log) 637 { 638 struct nvme_command c = { }; 639 int error; 640 641 c.common.opcode = nvme_admin_get_log_page, 642 c.common.nsid = cpu_to_le32(0xFFFFFFFF), 643 c.common.cdw10[0] = cpu_to_le32( 644 (((sizeof(struct nvme_smart_log) / 4) - 1) << 16) | 645 NVME_LOG_SMART), 646 647 *log = kmalloc(sizeof(struct nvme_smart_log), GFP_KERNEL); 648 if (!*log) 649 return -ENOMEM; 650 651 error = nvme_submit_sync_cmd(dev->admin_q, &c, *log, 652 sizeof(struct nvme_smart_log)); 653 if (error) 654 kfree(*log); 655 return error; 656 } 657 658 int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count) 659 { 660 u32 q_count = (*count - 1) | ((*count - 1) << 16); 661 u32 result; 662 int status, nr_io_queues; 663 664 status = nvme_set_features(ctrl, NVME_FEAT_NUM_QUEUES, q_count, 0, 665 &result); 666 if (status < 0) 667 return status; 668 669 /* 670 * Degraded controllers might return an error when setting the queue 671 * count. We still want to be able to bring them online and offer 672 * access to the admin queue, as that might be only way to fix them up. 673 */ 674 if (status > 0) { 675 dev_err(ctrl->dev, "Could not set queue count (%d)\n", status); 676 *count = 0; 677 } else { 678 nr_io_queues = min(result & 0xffff, result >> 16) + 1; 679 *count = min(*count, nr_io_queues); 680 } 681 682 return 0; 683 } 684 EXPORT_SYMBOL_GPL(nvme_set_queue_count); 685 686 static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio) 687 { 688 struct nvme_user_io io; 689 struct nvme_command c; 690 unsigned length, meta_len; 691 void __user *metadata; 692 693 if (copy_from_user(&io, uio, sizeof(io))) 694 return -EFAULT; 695 if (io.flags) 696 return -EINVAL; 697 698 switch (io.opcode) { 699 case nvme_cmd_write: 700 case nvme_cmd_read: 701 case nvme_cmd_compare: 702 break; 703 default: 704 return -EINVAL; 705 } 706 707 length = (io.nblocks + 1) << ns->lba_shift; 708 meta_len = (io.nblocks + 1) * ns->ms; 709 metadata = (void __user *)(uintptr_t)io.metadata; 710 711 if (ns->ext) { 712 length += meta_len; 713 meta_len = 0; 714 } else if (meta_len) { 715 if ((io.metadata & 3) || !io.metadata) 716 return -EINVAL; 717 } 718 719 memset(&c, 0, sizeof(c)); 720 c.rw.opcode = io.opcode; 721 c.rw.flags = io.flags; 722 c.rw.nsid = cpu_to_le32(ns->ns_id); 723 c.rw.slba = cpu_to_le64(io.slba); 724 c.rw.length = cpu_to_le16(io.nblocks); 725 c.rw.control = cpu_to_le16(io.control); 726 c.rw.dsmgmt = cpu_to_le32(io.dsmgmt); 727 c.rw.reftag = cpu_to_le32(io.reftag); 728 c.rw.apptag = cpu_to_le16(io.apptag); 729 c.rw.appmask = cpu_to_le16(io.appmask); 730 731 return __nvme_submit_user_cmd(ns->queue, &c, 732 (void __user *)(uintptr_t)io.addr, length, 733 metadata, meta_len, io.slba, NULL, 0); 734 } 735 736 static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns, 737 struct nvme_passthru_cmd __user *ucmd) 738 { 739 struct nvme_passthru_cmd cmd; 740 struct nvme_command c; 741 unsigned timeout = 0; 742 int status; 743 744 if (!capable(CAP_SYS_ADMIN)) 745 return -EACCES; 746 if (copy_from_user(&cmd, ucmd, sizeof(cmd))) 747 return -EFAULT; 748 if (cmd.flags) 749 return -EINVAL; 750 751 memset(&c, 0, sizeof(c)); 752 c.common.opcode = cmd.opcode; 753 c.common.flags = cmd.flags; 754 c.common.nsid = cpu_to_le32(cmd.nsid); 755 c.common.cdw2[0] = cpu_to_le32(cmd.cdw2); 756 c.common.cdw2[1] = cpu_to_le32(cmd.cdw3); 757 c.common.cdw10[0] = cpu_to_le32(cmd.cdw10); 758 c.common.cdw10[1] = cpu_to_le32(cmd.cdw11); 759 c.common.cdw10[2] = cpu_to_le32(cmd.cdw12); 760 c.common.cdw10[3] = cpu_to_le32(cmd.cdw13); 761 c.common.cdw10[4] = cpu_to_le32(cmd.cdw14); 762 c.common.cdw10[5] = cpu_to_le32(cmd.cdw15); 763 764 if (cmd.timeout_ms) 765 timeout = msecs_to_jiffies(cmd.timeout_ms); 766 767 status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c, 768 (void __user *)(uintptr_t)cmd.addr, cmd.data_len, 769 &cmd.result, timeout); 770 if (status >= 0) { 771 if (put_user(cmd.result, &ucmd->result)) 772 return -EFAULT; 773 } 774 775 return status; 776 } 777 778 static int nvme_ioctl(struct block_device *bdev, fmode_t mode, 779 unsigned int cmd, unsigned long arg) 780 { 781 struct nvme_ns *ns = bdev->bd_disk->private_data; 782 783 switch (cmd) { 784 case NVME_IOCTL_ID: 785 force_successful_syscall_return(); 786 return ns->ns_id; 787 case NVME_IOCTL_ADMIN_CMD: 788 return nvme_user_cmd(ns->ctrl, NULL, (void __user *)arg); 789 case NVME_IOCTL_IO_CMD: 790 return nvme_user_cmd(ns->ctrl, ns, (void __user *)arg); 791 case NVME_IOCTL_SUBMIT_IO: 792 return nvme_submit_io(ns, (void __user *)arg); 793 #ifdef CONFIG_BLK_DEV_NVME_SCSI 794 case SG_GET_VERSION_NUM: 795 return nvme_sg_get_version_num((void __user *)arg); 796 case SG_IO: 797 return nvme_sg_io(ns, (void __user *)arg); 798 #endif 799 default: 800 return -ENOTTY; 801 } 802 } 803 804 #ifdef CONFIG_COMPAT 805 static int nvme_compat_ioctl(struct block_device *bdev, fmode_t mode, 806 unsigned int cmd, unsigned long arg) 807 { 808 switch (cmd) { 809 case SG_IO: 810 return -ENOIOCTLCMD; 811 } 812 return nvme_ioctl(bdev, mode, cmd, arg); 813 } 814 #else 815 #define nvme_compat_ioctl NULL 816 #endif 817 818 static int nvme_open(struct block_device *bdev, fmode_t mode) 819 { 820 return nvme_get_ns_from_disk(bdev->bd_disk) ? 0 : -ENXIO; 821 } 822 823 static void nvme_release(struct gendisk *disk, fmode_t mode) 824 { 825 struct nvme_ns *ns = disk->private_data; 826 827 module_put(ns->ctrl->ops->module); 828 nvme_put_ns(ns); 829 } 830 831 static int nvme_getgeo(struct block_device *bdev, struct hd_geometry *geo) 832 { 833 /* some standard values */ 834 geo->heads = 1 << 6; 835 geo->sectors = 1 << 5; 836 geo->cylinders = get_capacity(bdev->bd_disk) >> 11; 837 return 0; 838 } 839 840 #ifdef CONFIG_BLK_DEV_INTEGRITY 841 static void nvme_init_integrity(struct nvme_ns *ns) 842 { 843 struct blk_integrity integrity; 844 845 memset(&integrity, 0, sizeof(integrity)); 846 switch (ns->pi_type) { 847 case NVME_NS_DPS_PI_TYPE3: 848 integrity.profile = &t10_pi_type3_crc; 849 integrity.tag_size = sizeof(u16) + sizeof(u32); 850 integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE; 851 break; 852 case NVME_NS_DPS_PI_TYPE1: 853 case NVME_NS_DPS_PI_TYPE2: 854 integrity.profile = &t10_pi_type1_crc; 855 integrity.tag_size = sizeof(u16); 856 integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE; 857 break; 858 default: 859 integrity.profile = NULL; 860 break; 861 } 862 integrity.tuple_size = ns->ms; 863 blk_integrity_register(ns->disk, &integrity); 864 blk_queue_max_integrity_segments(ns->queue, 1); 865 } 866 #else 867 static void nvme_init_integrity(struct nvme_ns *ns) 868 { 869 } 870 #endif /* CONFIG_BLK_DEV_INTEGRITY */ 871 872 static void nvme_config_discard(struct nvme_ns *ns) 873 { 874 struct nvme_ctrl *ctrl = ns->ctrl; 875 u32 logical_block_size = queue_logical_block_size(ns->queue); 876 877 if (ctrl->quirks & NVME_QUIRK_DISCARD_ZEROES) 878 ns->queue->limits.discard_zeroes_data = 1; 879 else 880 ns->queue->limits.discard_zeroes_data = 0; 881 882 ns->queue->limits.discard_alignment = logical_block_size; 883 ns->queue->limits.discard_granularity = logical_block_size; 884 blk_queue_max_discard_sectors(ns->queue, UINT_MAX); 885 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, ns->queue); 886 } 887 888 static int nvme_revalidate_disk(struct gendisk *disk) 889 { 890 struct nvme_ns *ns = disk->private_data; 891 struct nvme_id_ns *id; 892 u8 lbaf, pi_type; 893 u16 old_ms; 894 unsigned short bs; 895 896 if (test_bit(NVME_NS_DEAD, &ns->flags)) { 897 set_capacity(disk, 0); 898 return -ENODEV; 899 } 900 if (nvme_identify_ns(ns->ctrl, ns->ns_id, &id)) { 901 dev_warn(disk_to_dev(ns->disk), "%s: Identify failure\n", 902 __func__); 903 return -ENODEV; 904 } 905 if (id->ncap == 0) { 906 kfree(id); 907 return -ENODEV; 908 } 909 910 if (nvme_nvm_ns_supported(ns, id) && ns->type != NVME_NS_LIGHTNVM) { 911 if (nvme_nvm_register(ns->queue, disk->disk_name)) { 912 dev_warn(disk_to_dev(ns->disk), 913 "%s: LightNVM init failure\n", __func__); 914 kfree(id); 915 return -ENODEV; 916 } 917 ns->type = NVME_NS_LIGHTNVM; 918 } 919 920 if (ns->ctrl->vs >= NVME_VS(1, 1)) 921 memcpy(ns->eui, id->eui64, sizeof(ns->eui)); 922 if (ns->ctrl->vs >= NVME_VS(1, 2)) 923 memcpy(ns->uuid, id->nguid, sizeof(ns->uuid)); 924 925 old_ms = ns->ms; 926 lbaf = id->flbas & NVME_NS_FLBAS_LBA_MASK; 927 ns->lba_shift = id->lbaf[lbaf].ds; 928 ns->ms = le16_to_cpu(id->lbaf[lbaf].ms); 929 ns->ext = ns->ms && (id->flbas & NVME_NS_FLBAS_META_EXT); 930 931 /* 932 * If identify namespace failed, use default 512 byte block size so 933 * block layer can use before failing read/write for 0 capacity. 934 */ 935 if (ns->lba_shift == 0) 936 ns->lba_shift = 9; 937 bs = 1 << ns->lba_shift; 938 /* XXX: PI implementation requires metadata equal t10 pi tuple size */ 939 pi_type = ns->ms == sizeof(struct t10_pi_tuple) ? 940 id->dps & NVME_NS_DPS_PI_MASK : 0; 941 942 blk_mq_freeze_queue(disk->queue); 943 if (blk_get_integrity(disk) && (ns->pi_type != pi_type || 944 ns->ms != old_ms || 945 bs != queue_logical_block_size(disk->queue) || 946 (ns->ms && ns->ext))) 947 blk_integrity_unregister(disk); 948 949 ns->pi_type = pi_type; 950 blk_queue_logical_block_size(ns->queue, bs); 951 952 if (ns->ms && !blk_get_integrity(disk) && !ns->ext) 953 nvme_init_integrity(ns); 954 if (ns->ms && !(ns->ms == 8 && ns->pi_type) && !blk_get_integrity(disk)) 955 set_capacity(disk, 0); 956 else 957 set_capacity(disk, le64_to_cpup(&id->nsze) << (ns->lba_shift - 9)); 958 959 if (ns->ctrl->oncs & NVME_CTRL_ONCS_DSM) 960 nvme_config_discard(ns); 961 blk_mq_unfreeze_queue(disk->queue); 962 963 kfree(id); 964 return 0; 965 } 966 967 static char nvme_pr_type(enum pr_type type) 968 { 969 switch (type) { 970 case PR_WRITE_EXCLUSIVE: 971 return 1; 972 case PR_EXCLUSIVE_ACCESS: 973 return 2; 974 case PR_WRITE_EXCLUSIVE_REG_ONLY: 975 return 3; 976 case PR_EXCLUSIVE_ACCESS_REG_ONLY: 977 return 4; 978 case PR_WRITE_EXCLUSIVE_ALL_REGS: 979 return 5; 980 case PR_EXCLUSIVE_ACCESS_ALL_REGS: 981 return 6; 982 default: 983 return 0; 984 } 985 }; 986 987 static int nvme_pr_command(struct block_device *bdev, u32 cdw10, 988 u64 key, u64 sa_key, u8 op) 989 { 990 struct nvme_ns *ns = bdev->bd_disk->private_data; 991 struct nvme_command c; 992 u8 data[16] = { 0, }; 993 994 put_unaligned_le64(key, &data[0]); 995 put_unaligned_le64(sa_key, &data[8]); 996 997 memset(&c, 0, sizeof(c)); 998 c.common.opcode = op; 999 c.common.nsid = cpu_to_le32(ns->ns_id); 1000 c.common.cdw10[0] = cpu_to_le32(cdw10); 1001 1002 return nvme_submit_sync_cmd(ns->queue, &c, data, 16); 1003 } 1004 1005 static int nvme_pr_register(struct block_device *bdev, u64 old, 1006 u64 new, unsigned flags) 1007 { 1008 u32 cdw10; 1009 1010 if (flags & ~PR_FL_IGNORE_KEY) 1011 return -EOPNOTSUPP; 1012 1013 cdw10 = old ? 2 : 0; 1014 cdw10 |= (flags & PR_FL_IGNORE_KEY) ? 1 << 3 : 0; 1015 cdw10 |= (1 << 30) | (1 << 31); /* PTPL=1 */ 1016 return nvme_pr_command(bdev, cdw10, old, new, nvme_cmd_resv_register); 1017 } 1018 1019 static int nvme_pr_reserve(struct block_device *bdev, u64 key, 1020 enum pr_type type, unsigned flags) 1021 { 1022 u32 cdw10; 1023 1024 if (flags & ~PR_FL_IGNORE_KEY) 1025 return -EOPNOTSUPP; 1026 1027 cdw10 = nvme_pr_type(type) << 8; 1028 cdw10 |= ((flags & PR_FL_IGNORE_KEY) ? 1 << 3 : 0); 1029 return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_acquire); 1030 } 1031 1032 static int nvme_pr_preempt(struct block_device *bdev, u64 old, u64 new, 1033 enum pr_type type, bool abort) 1034 { 1035 u32 cdw10 = nvme_pr_type(type) << 8 | abort ? 2 : 1; 1036 return nvme_pr_command(bdev, cdw10, old, new, nvme_cmd_resv_acquire); 1037 } 1038 1039 static int nvme_pr_clear(struct block_device *bdev, u64 key) 1040 { 1041 u32 cdw10 = 1 | (key ? 1 << 3 : 0); 1042 return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_register); 1043 } 1044 1045 static int nvme_pr_release(struct block_device *bdev, u64 key, enum pr_type type) 1046 { 1047 u32 cdw10 = nvme_pr_type(type) << 8 | key ? 1 << 3 : 0; 1048 return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_release); 1049 } 1050 1051 static const struct pr_ops nvme_pr_ops = { 1052 .pr_register = nvme_pr_register, 1053 .pr_reserve = nvme_pr_reserve, 1054 .pr_release = nvme_pr_release, 1055 .pr_preempt = nvme_pr_preempt, 1056 .pr_clear = nvme_pr_clear, 1057 }; 1058 1059 static const struct block_device_operations nvme_fops = { 1060 .owner = THIS_MODULE, 1061 .ioctl = nvme_ioctl, 1062 .compat_ioctl = nvme_compat_ioctl, 1063 .open = nvme_open, 1064 .release = nvme_release, 1065 .getgeo = nvme_getgeo, 1066 .revalidate_disk= nvme_revalidate_disk, 1067 .pr_ops = &nvme_pr_ops, 1068 }; 1069 1070 static int nvme_wait_ready(struct nvme_ctrl *ctrl, u64 cap, bool enabled) 1071 { 1072 unsigned long timeout = 1073 ((NVME_CAP_TIMEOUT(cap) + 1) * HZ / 2) + jiffies; 1074 u32 csts, bit = enabled ? NVME_CSTS_RDY : 0; 1075 int ret; 1076 1077 while ((ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &csts)) == 0) { 1078 if ((csts & NVME_CSTS_RDY) == bit) 1079 break; 1080 1081 msleep(100); 1082 if (fatal_signal_pending(current)) 1083 return -EINTR; 1084 if (time_after(jiffies, timeout)) { 1085 dev_err(ctrl->device, 1086 "Device not ready; aborting %s\n", enabled ? 1087 "initialisation" : "reset"); 1088 return -ENODEV; 1089 } 1090 } 1091 1092 return ret; 1093 } 1094 1095 /* 1096 * If the device has been passed off to us in an enabled state, just clear 1097 * the enabled bit. The spec says we should set the 'shutdown notification 1098 * bits', but doing so may cause the device to complete commands to the 1099 * admin queue ... and we don't know what memory that might be pointing at! 1100 */ 1101 int nvme_disable_ctrl(struct nvme_ctrl *ctrl, u64 cap) 1102 { 1103 int ret; 1104 1105 ctrl->ctrl_config &= ~NVME_CC_SHN_MASK; 1106 ctrl->ctrl_config &= ~NVME_CC_ENABLE; 1107 1108 ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config); 1109 if (ret) 1110 return ret; 1111 1112 /* Checking for ctrl->tagset is a trick to avoid sleeping on module 1113 * load, since we only need the quirk on reset_controller. Notice 1114 * that the HGST device needs this delay only in firmware activation 1115 * procedure; unfortunately we have no (easy) way to verify this. 1116 */ 1117 if ((ctrl->quirks & NVME_QUIRK_DELAY_BEFORE_CHK_RDY) && ctrl->tagset) 1118 msleep(NVME_QUIRK_DELAY_AMOUNT); 1119 1120 return nvme_wait_ready(ctrl, cap, false); 1121 } 1122 EXPORT_SYMBOL_GPL(nvme_disable_ctrl); 1123 1124 int nvme_enable_ctrl(struct nvme_ctrl *ctrl, u64 cap) 1125 { 1126 /* 1127 * Default to a 4K page size, with the intention to update this 1128 * path in the future to accomodate architectures with differing 1129 * kernel and IO page sizes. 1130 */ 1131 unsigned dev_page_min = NVME_CAP_MPSMIN(cap) + 12, page_shift = 12; 1132 int ret; 1133 1134 if (page_shift < dev_page_min) { 1135 dev_err(ctrl->device, 1136 "Minimum device page size %u too large for host (%u)\n", 1137 1 << dev_page_min, 1 << page_shift); 1138 return -ENODEV; 1139 } 1140 1141 ctrl->page_size = 1 << page_shift; 1142 1143 ctrl->ctrl_config = NVME_CC_CSS_NVM; 1144 ctrl->ctrl_config |= (page_shift - 12) << NVME_CC_MPS_SHIFT; 1145 ctrl->ctrl_config |= NVME_CC_ARB_RR | NVME_CC_SHN_NONE; 1146 ctrl->ctrl_config |= NVME_CC_IOSQES | NVME_CC_IOCQES; 1147 ctrl->ctrl_config |= NVME_CC_ENABLE; 1148 1149 ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config); 1150 if (ret) 1151 return ret; 1152 return nvme_wait_ready(ctrl, cap, true); 1153 } 1154 EXPORT_SYMBOL_GPL(nvme_enable_ctrl); 1155 1156 int nvme_shutdown_ctrl(struct nvme_ctrl *ctrl) 1157 { 1158 unsigned long timeout = SHUTDOWN_TIMEOUT + jiffies; 1159 u32 csts; 1160 int ret; 1161 1162 ctrl->ctrl_config &= ~NVME_CC_SHN_MASK; 1163 ctrl->ctrl_config |= NVME_CC_SHN_NORMAL; 1164 1165 ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config); 1166 if (ret) 1167 return ret; 1168 1169 while ((ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &csts)) == 0) { 1170 if ((csts & NVME_CSTS_SHST_MASK) == NVME_CSTS_SHST_CMPLT) 1171 break; 1172 1173 msleep(100); 1174 if (fatal_signal_pending(current)) 1175 return -EINTR; 1176 if (time_after(jiffies, timeout)) { 1177 dev_err(ctrl->device, 1178 "Device shutdown incomplete; abort shutdown\n"); 1179 return -ENODEV; 1180 } 1181 } 1182 1183 return ret; 1184 } 1185 EXPORT_SYMBOL_GPL(nvme_shutdown_ctrl); 1186 1187 static void nvme_set_queue_limits(struct nvme_ctrl *ctrl, 1188 struct request_queue *q) 1189 { 1190 bool vwc = false; 1191 1192 if (ctrl->max_hw_sectors) { 1193 u32 max_segments = 1194 (ctrl->max_hw_sectors / (ctrl->page_size >> 9)) + 1; 1195 1196 blk_queue_max_hw_sectors(q, ctrl->max_hw_sectors); 1197 blk_queue_max_segments(q, min_t(u32, max_segments, USHRT_MAX)); 1198 } 1199 if (ctrl->stripe_size) 1200 blk_queue_chunk_sectors(q, ctrl->stripe_size >> 9); 1201 blk_queue_virt_boundary(q, ctrl->page_size - 1); 1202 if (ctrl->vwc & NVME_CTRL_VWC_PRESENT) 1203 vwc = true; 1204 blk_queue_write_cache(q, vwc, vwc); 1205 } 1206 1207 /* 1208 * Initialize the cached copies of the Identify data and various controller 1209 * register in our nvme_ctrl structure. This should be called as soon as 1210 * the admin queue is fully up and running. 1211 */ 1212 int nvme_init_identify(struct nvme_ctrl *ctrl) 1213 { 1214 struct nvme_id_ctrl *id; 1215 u64 cap; 1216 int ret, page_shift; 1217 u32 max_hw_sectors; 1218 1219 ret = ctrl->ops->reg_read32(ctrl, NVME_REG_VS, &ctrl->vs); 1220 if (ret) { 1221 dev_err(ctrl->device, "Reading VS failed (%d)\n", ret); 1222 return ret; 1223 } 1224 1225 ret = ctrl->ops->reg_read64(ctrl, NVME_REG_CAP, &cap); 1226 if (ret) { 1227 dev_err(ctrl->device, "Reading CAP failed (%d)\n", ret); 1228 return ret; 1229 } 1230 page_shift = NVME_CAP_MPSMIN(cap) + 12; 1231 1232 if (ctrl->vs >= NVME_VS(1, 1)) 1233 ctrl->subsystem = NVME_CAP_NSSRC(cap); 1234 1235 ret = nvme_identify_ctrl(ctrl, &id); 1236 if (ret) { 1237 dev_err(ctrl->device, "Identify Controller failed (%d)\n", ret); 1238 return -EIO; 1239 } 1240 1241 ctrl->vid = le16_to_cpu(id->vid); 1242 ctrl->oncs = le16_to_cpup(&id->oncs); 1243 atomic_set(&ctrl->abort_limit, id->acl + 1); 1244 ctrl->vwc = id->vwc; 1245 ctrl->cntlid = le16_to_cpup(&id->cntlid); 1246 memcpy(ctrl->serial, id->sn, sizeof(id->sn)); 1247 memcpy(ctrl->model, id->mn, sizeof(id->mn)); 1248 memcpy(ctrl->firmware_rev, id->fr, sizeof(id->fr)); 1249 if (id->mdts) 1250 max_hw_sectors = 1 << (id->mdts + page_shift - 9); 1251 else 1252 max_hw_sectors = UINT_MAX; 1253 ctrl->max_hw_sectors = 1254 min_not_zero(ctrl->max_hw_sectors, max_hw_sectors); 1255 1256 if ((ctrl->quirks & NVME_QUIRK_STRIPE_SIZE) && id->vs[3]) { 1257 unsigned int max_hw_sectors; 1258 1259 ctrl->stripe_size = 1 << (id->vs[3] + page_shift); 1260 max_hw_sectors = ctrl->stripe_size >> (page_shift - 9); 1261 if (ctrl->max_hw_sectors) { 1262 ctrl->max_hw_sectors = min(max_hw_sectors, 1263 ctrl->max_hw_sectors); 1264 } else { 1265 ctrl->max_hw_sectors = max_hw_sectors; 1266 } 1267 } 1268 1269 nvme_set_queue_limits(ctrl, ctrl->admin_q); 1270 ctrl->sgls = le32_to_cpu(id->sgls); 1271 ctrl->kas = le16_to_cpu(id->kas); 1272 1273 if (ctrl->ops->is_fabrics) { 1274 ctrl->icdoff = le16_to_cpu(id->icdoff); 1275 ctrl->ioccsz = le32_to_cpu(id->ioccsz); 1276 ctrl->iorcsz = le32_to_cpu(id->iorcsz); 1277 ctrl->maxcmd = le16_to_cpu(id->maxcmd); 1278 1279 /* 1280 * In fabrics we need to verify the cntlid matches the 1281 * admin connect 1282 */ 1283 if (ctrl->cntlid != le16_to_cpu(id->cntlid)) 1284 ret = -EINVAL; 1285 1286 if (!ctrl->opts->discovery_nqn && !ctrl->kas) { 1287 dev_err(ctrl->dev, 1288 "keep-alive support is mandatory for fabrics\n"); 1289 ret = -EINVAL; 1290 } 1291 } else { 1292 ctrl->cntlid = le16_to_cpu(id->cntlid); 1293 } 1294 1295 kfree(id); 1296 return ret; 1297 } 1298 EXPORT_SYMBOL_GPL(nvme_init_identify); 1299 1300 static int nvme_dev_open(struct inode *inode, struct file *file) 1301 { 1302 struct nvme_ctrl *ctrl; 1303 int instance = iminor(inode); 1304 int ret = -ENODEV; 1305 1306 spin_lock(&dev_list_lock); 1307 list_for_each_entry(ctrl, &nvme_ctrl_list, node) { 1308 if (ctrl->instance != instance) 1309 continue; 1310 1311 if (!ctrl->admin_q) { 1312 ret = -EWOULDBLOCK; 1313 break; 1314 } 1315 if (!kref_get_unless_zero(&ctrl->kref)) 1316 break; 1317 file->private_data = ctrl; 1318 ret = 0; 1319 break; 1320 } 1321 spin_unlock(&dev_list_lock); 1322 1323 return ret; 1324 } 1325 1326 static int nvme_dev_release(struct inode *inode, struct file *file) 1327 { 1328 nvme_put_ctrl(file->private_data); 1329 return 0; 1330 } 1331 1332 static int nvme_dev_user_cmd(struct nvme_ctrl *ctrl, void __user *argp) 1333 { 1334 struct nvme_ns *ns; 1335 int ret; 1336 1337 mutex_lock(&ctrl->namespaces_mutex); 1338 if (list_empty(&ctrl->namespaces)) { 1339 ret = -ENOTTY; 1340 goto out_unlock; 1341 } 1342 1343 ns = list_first_entry(&ctrl->namespaces, struct nvme_ns, list); 1344 if (ns != list_last_entry(&ctrl->namespaces, struct nvme_ns, list)) { 1345 dev_warn(ctrl->device, 1346 "NVME_IOCTL_IO_CMD not supported when multiple namespaces present!\n"); 1347 ret = -EINVAL; 1348 goto out_unlock; 1349 } 1350 1351 dev_warn(ctrl->device, 1352 "using deprecated NVME_IOCTL_IO_CMD ioctl on the char device!\n"); 1353 kref_get(&ns->kref); 1354 mutex_unlock(&ctrl->namespaces_mutex); 1355 1356 ret = nvme_user_cmd(ctrl, ns, argp); 1357 nvme_put_ns(ns); 1358 return ret; 1359 1360 out_unlock: 1361 mutex_unlock(&ctrl->namespaces_mutex); 1362 return ret; 1363 } 1364 1365 static long nvme_dev_ioctl(struct file *file, unsigned int cmd, 1366 unsigned long arg) 1367 { 1368 struct nvme_ctrl *ctrl = file->private_data; 1369 void __user *argp = (void __user *)arg; 1370 1371 switch (cmd) { 1372 case NVME_IOCTL_ADMIN_CMD: 1373 return nvme_user_cmd(ctrl, NULL, argp); 1374 case NVME_IOCTL_IO_CMD: 1375 return nvme_dev_user_cmd(ctrl, argp); 1376 case NVME_IOCTL_RESET: 1377 dev_warn(ctrl->device, "resetting controller\n"); 1378 return ctrl->ops->reset_ctrl(ctrl); 1379 case NVME_IOCTL_SUBSYS_RESET: 1380 return nvme_reset_subsystem(ctrl); 1381 case NVME_IOCTL_RESCAN: 1382 nvme_queue_scan(ctrl); 1383 return 0; 1384 default: 1385 return -ENOTTY; 1386 } 1387 } 1388 1389 static const struct file_operations nvme_dev_fops = { 1390 .owner = THIS_MODULE, 1391 .open = nvme_dev_open, 1392 .release = nvme_dev_release, 1393 .unlocked_ioctl = nvme_dev_ioctl, 1394 .compat_ioctl = nvme_dev_ioctl, 1395 }; 1396 1397 static ssize_t nvme_sysfs_reset(struct device *dev, 1398 struct device_attribute *attr, const char *buf, 1399 size_t count) 1400 { 1401 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 1402 int ret; 1403 1404 ret = ctrl->ops->reset_ctrl(ctrl); 1405 if (ret < 0) 1406 return ret; 1407 return count; 1408 } 1409 static DEVICE_ATTR(reset_controller, S_IWUSR, NULL, nvme_sysfs_reset); 1410 1411 static ssize_t nvme_sysfs_rescan(struct device *dev, 1412 struct device_attribute *attr, const char *buf, 1413 size_t count) 1414 { 1415 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 1416 1417 nvme_queue_scan(ctrl); 1418 return count; 1419 } 1420 static DEVICE_ATTR(rescan_controller, S_IWUSR, NULL, nvme_sysfs_rescan); 1421 1422 static ssize_t wwid_show(struct device *dev, struct device_attribute *attr, 1423 char *buf) 1424 { 1425 struct nvme_ns *ns = dev_to_disk(dev)->private_data; 1426 struct nvme_ctrl *ctrl = ns->ctrl; 1427 int serial_len = sizeof(ctrl->serial); 1428 int model_len = sizeof(ctrl->model); 1429 1430 if (memchr_inv(ns->uuid, 0, sizeof(ns->uuid))) 1431 return sprintf(buf, "eui.%16phN\n", ns->uuid); 1432 1433 if (memchr_inv(ns->eui, 0, sizeof(ns->eui))) 1434 return sprintf(buf, "eui.%8phN\n", ns->eui); 1435 1436 while (ctrl->serial[serial_len - 1] == ' ') 1437 serial_len--; 1438 while (ctrl->model[model_len - 1] == ' ') 1439 model_len--; 1440 1441 return sprintf(buf, "nvme.%04x-%*phN-%*phN-%08x\n", ctrl->vid, 1442 serial_len, ctrl->serial, model_len, ctrl->model, ns->ns_id); 1443 } 1444 static DEVICE_ATTR(wwid, S_IRUGO, wwid_show, NULL); 1445 1446 static ssize_t uuid_show(struct device *dev, struct device_attribute *attr, 1447 char *buf) 1448 { 1449 struct nvme_ns *ns = dev_to_disk(dev)->private_data; 1450 return sprintf(buf, "%pU\n", ns->uuid); 1451 } 1452 static DEVICE_ATTR(uuid, S_IRUGO, uuid_show, NULL); 1453 1454 static ssize_t eui_show(struct device *dev, struct device_attribute *attr, 1455 char *buf) 1456 { 1457 struct nvme_ns *ns = dev_to_disk(dev)->private_data; 1458 return sprintf(buf, "%8phd\n", ns->eui); 1459 } 1460 static DEVICE_ATTR(eui, S_IRUGO, eui_show, NULL); 1461 1462 static ssize_t nsid_show(struct device *dev, struct device_attribute *attr, 1463 char *buf) 1464 { 1465 struct nvme_ns *ns = dev_to_disk(dev)->private_data; 1466 return sprintf(buf, "%d\n", ns->ns_id); 1467 } 1468 static DEVICE_ATTR(nsid, S_IRUGO, nsid_show, NULL); 1469 1470 static struct attribute *nvme_ns_attrs[] = { 1471 &dev_attr_wwid.attr, 1472 &dev_attr_uuid.attr, 1473 &dev_attr_eui.attr, 1474 &dev_attr_nsid.attr, 1475 NULL, 1476 }; 1477 1478 static umode_t nvme_ns_attrs_are_visible(struct kobject *kobj, 1479 struct attribute *a, int n) 1480 { 1481 struct device *dev = container_of(kobj, struct device, kobj); 1482 struct nvme_ns *ns = dev_to_disk(dev)->private_data; 1483 1484 if (a == &dev_attr_uuid.attr) { 1485 if (!memchr_inv(ns->uuid, 0, sizeof(ns->uuid))) 1486 return 0; 1487 } 1488 if (a == &dev_attr_eui.attr) { 1489 if (!memchr_inv(ns->eui, 0, sizeof(ns->eui))) 1490 return 0; 1491 } 1492 return a->mode; 1493 } 1494 1495 static const struct attribute_group nvme_ns_attr_group = { 1496 .attrs = nvme_ns_attrs, 1497 .is_visible = nvme_ns_attrs_are_visible, 1498 }; 1499 1500 #define nvme_show_str_function(field) \ 1501 static ssize_t field##_show(struct device *dev, \ 1502 struct device_attribute *attr, char *buf) \ 1503 { \ 1504 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); \ 1505 return sprintf(buf, "%.*s\n", (int)sizeof(ctrl->field), ctrl->field); \ 1506 } \ 1507 static DEVICE_ATTR(field, S_IRUGO, field##_show, NULL); 1508 1509 #define nvme_show_int_function(field) \ 1510 static ssize_t field##_show(struct device *dev, \ 1511 struct device_attribute *attr, char *buf) \ 1512 { \ 1513 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); \ 1514 return sprintf(buf, "%d\n", ctrl->field); \ 1515 } \ 1516 static DEVICE_ATTR(field, S_IRUGO, field##_show, NULL); 1517 1518 nvme_show_str_function(model); 1519 nvme_show_str_function(serial); 1520 nvme_show_str_function(firmware_rev); 1521 nvme_show_int_function(cntlid); 1522 1523 static ssize_t nvme_sysfs_delete(struct device *dev, 1524 struct device_attribute *attr, const char *buf, 1525 size_t count) 1526 { 1527 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 1528 1529 if (device_remove_file_self(dev, attr)) 1530 ctrl->ops->delete_ctrl(ctrl); 1531 return count; 1532 } 1533 static DEVICE_ATTR(delete_controller, S_IWUSR, NULL, nvme_sysfs_delete); 1534 1535 static ssize_t nvme_sysfs_show_transport(struct device *dev, 1536 struct device_attribute *attr, 1537 char *buf) 1538 { 1539 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 1540 1541 return snprintf(buf, PAGE_SIZE, "%s\n", ctrl->ops->name); 1542 } 1543 static DEVICE_ATTR(transport, S_IRUGO, nvme_sysfs_show_transport, NULL); 1544 1545 static ssize_t nvme_sysfs_show_subsysnqn(struct device *dev, 1546 struct device_attribute *attr, 1547 char *buf) 1548 { 1549 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 1550 1551 return snprintf(buf, PAGE_SIZE, "%s\n", 1552 ctrl->ops->get_subsysnqn(ctrl)); 1553 } 1554 static DEVICE_ATTR(subsysnqn, S_IRUGO, nvme_sysfs_show_subsysnqn, NULL); 1555 1556 static ssize_t nvme_sysfs_show_address(struct device *dev, 1557 struct device_attribute *attr, 1558 char *buf) 1559 { 1560 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 1561 1562 return ctrl->ops->get_address(ctrl, buf, PAGE_SIZE); 1563 } 1564 static DEVICE_ATTR(address, S_IRUGO, nvme_sysfs_show_address, NULL); 1565 1566 static struct attribute *nvme_dev_attrs[] = { 1567 &dev_attr_reset_controller.attr, 1568 &dev_attr_rescan_controller.attr, 1569 &dev_attr_model.attr, 1570 &dev_attr_serial.attr, 1571 &dev_attr_firmware_rev.attr, 1572 &dev_attr_cntlid.attr, 1573 &dev_attr_delete_controller.attr, 1574 &dev_attr_transport.attr, 1575 &dev_attr_subsysnqn.attr, 1576 &dev_attr_address.attr, 1577 NULL 1578 }; 1579 1580 #define CHECK_ATTR(ctrl, a, name) \ 1581 if ((a) == &dev_attr_##name.attr && \ 1582 !(ctrl)->ops->get_##name) \ 1583 return 0 1584 1585 static umode_t nvme_dev_attrs_are_visible(struct kobject *kobj, 1586 struct attribute *a, int n) 1587 { 1588 struct device *dev = container_of(kobj, struct device, kobj); 1589 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 1590 1591 if (a == &dev_attr_delete_controller.attr) { 1592 if (!ctrl->ops->delete_ctrl) 1593 return 0; 1594 } 1595 1596 CHECK_ATTR(ctrl, a, subsysnqn); 1597 CHECK_ATTR(ctrl, a, address); 1598 1599 return a->mode; 1600 } 1601 1602 static struct attribute_group nvme_dev_attrs_group = { 1603 .attrs = nvme_dev_attrs, 1604 .is_visible = nvme_dev_attrs_are_visible, 1605 }; 1606 1607 static const struct attribute_group *nvme_dev_attr_groups[] = { 1608 &nvme_dev_attrs_group, 1609 NULL, 1610 }; 1611 1612 static int ns_cmp(void *priv, struct list_head *a, struct list_head *b) 1613 { 1614 struct nvme_ns *nsa = container_of(a, struct nvme_ns, list); 1615 struct nvme_ns *nsb = container_of(b, struct nvme_ns, list); 1616 1617 return nsa->ns_id - nsb->ns_id; 1618 } 1619 1620 static struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid) 1621 { 1622 struct nvme_ns *ns, *ret = NULL; 1623 1624 mutex_lock(&ctrl->namespaces_mutex); 1625 list_for_each_entry(ns, &ctrl->namespaces, list) { 1626 if (ns->ns_id == nsid) { 1627 kref_get(&ns->kref); 1628 ret = ns; 1629 break; 1630 } 1631 if (ns->ns_id > nsid) 1632 break; 1633 } 1634 mutex_unlock(&ctrl->namespaces_mutex); 1635 return ret; 1636 } 1637 1638 static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid) 1639 { 1640 struct nvme_ns *ns; 1641 struct gendisk *disk; 1642 int node = dev_to_node(ctrl->dev); 1643 1644 ns = kzalloc_node(sizeof(*ns), GFP_KERNEL, node); 1645 if (!ns) 1646 return; 1647 1648 ns->instance = ida_simple_get(&ctrl->ns_ida, 1, 0, GFP_KERNEL); 1649 if (ns->instance < 0) 1650 goto out_free_ns; 1651 1652 ns->queue = blk_mq_init_queue(ctrl->tagset); 1653 if (IS_ERR(ns->queue)) 1654 goto out_release_instance; 1655 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, ns->queue); 1656 ns->queue->queuedata = ns; 1657 ns->ctrl = ctrl; 1658 1659 disk = alloc_disk_node(0, node); 1660 if (!disk) 1661 goto out_free_queue; 1662 1663 kref_init(&ns->kref); 1664 ns->ns_id = nsid; 1665 ns->disk = disk; 1666 ns->lba_shift = 9; /* set to a default value for 512 until disk is validated */ 1667 1668 1669 blk_queue_logical_block_size(ns->queue, 1 << ns->lba_shift); 1670 nvme_set_queue_limits(ctrl, ns->queue); 1671 1672 disk->fops = &nvme_fops; 1673 disk->private_data = ns; 1674 disk->queue = ns->queue; 1675 disk->flags = GENHD_FL_EXT_DEVT; 1676 sprintf(disk->disk_name, "nvme%dn%d", ctrl->instance, ns->instance); 1677 1678 if (nvme_revalidate_disk(ns->disk)) 1679 goto out_free_disk; 1680 1681 mutex_lock(&ctrl->namespaces_mutex); 1682 list_add_tail(&ns->list, &ctrl->namespaces); 1683 mutex_unlock(&ctrl->namespaces_mutex); 1684 1685 kref_get(&ctrl->kref); 1686 if (ns->type == NVME_NS_LIGHTNVM) 1687 return; 1688 1689 device_add_disk(ctrl->device, ns->disk); 1690 if (sysfs_create_group(&disk_to_dev(ns->disk)->kobj, 1691 &nvme_ns_attr_group)) 1692 pr_warn("%s: failed to create sysfs group for identification\n", 1693 ns->disk->disk_name); 1694 return; 1695 out_free_disk: 1696 kfree(disk); 1697 out_free_queue: 1698 blk_cleanup_queue(ns->queue); 1699 out_release_instance: 1700 ida_simple_remove(&ctrl->ns_ida, ns->instance); 1701 out_free_ns: 1702 kfree(ns); 1703 } 1704 1705 static void nvme_ns_remove(struct nvme_ns *ns) 1706 { 1707 if (test_and_set_bit(NVME_NS_REMOVING, &ns->flags)) 1708 return; 1709 1710 if (ns->disk->flags & GENHD_FL_UP) { 1711 if (blk_get_integrity(ns->disk)) 1712 blk_integrity_unregister(ns->disk); 1713 sysfs_remove_group(&disk_to_dev(ns->disk)->kobj, 1714 &nvme_ns_attr_group); 1715 del_gendisk(ns->disk); 1716 blk_mq_abort_requeue_list(ns->queue); 1717 blk_cleanup_queue(ns->queue); 1718 } 1719 1720 mutex_lock(&ns->ctrl->namespaces_mutex); 1721 list_del_init(&ns->list); 1722 mutex_unlock(&ns->ctrl->namespaces_mutex); 1723 1724 nvme_put_ns(ns); 1725 } 1726 1727 static void nvme_validate_ns(struct nvme_ctrl *ctrl, unsigned nsid) 1728 { 1729 struct nvme_ns *ns; 1730 1731 ns = nvme_find_get_ns(ctrl, nsid); 1732 if (ns) { 1733 if (revalidate_disk(ns->disk)) 1734 nvme_ns_remove(ns); 1735 nvme_put_ns(ns); 1736 } else 1737 nvme_alloc_ns(ctrl, nsid); 1738 } 1739 1740 static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl, 1741 unsigned nsid) 1742 { 1743 struct nvme_ns *ns, *next; 1744 1745 list_for_each_entry_safe(ns, next, &ctrl->namespaces, list) { 1746 if (ns->ns_id > nsid) 1747 nvme_ns_remove(ns); 1748 } 1749 } 1750 1751 static int nvme_scan_ns_list(struct nvme_ctrl *ctrl, unsigned nn) 1752 { 1753 struct nvme_ns *ns; 1754 __le32 *ns_list; 1755 unsigned i, j, nsid, prev = 0, num_lists = DIV_ROUND_UP(nn, 1024); 1756 int ret = 0; 1757 1758 ns_list = kzalloc(0x1000, GFP_KERNEL); 1759 if (!ns_list) 1760 return -ENOMEM; 1761 1762 for (i = 0; i < num_lists; i++) { 1763 ret = nvme_identify_ns_list(ctrl, prev, ns_list); 1764 if (ret) 1765 goto free; 1766 1767 for (j = 0; j < min(nn, 1024U); j++) { 1768 nsid = le32_to_cpu(ns_list[j]); 1769 if (!nsid) 1770 goto out; 1771 1772 nvme_validate_ns(ctrl, nsid); 1773 1774 while (++prev < nsid) { 1775 ns = nvme_find_get_ns(ctrl, prev); 1776 if (ns) { 1777 nvme_ns_remove(ns); 1778 nvme_put_ns(ns); 1779 } 1780 } 1781 } 1782 nn -= j; 1783 } 1784 out: 1785 nvme_remove_invalid_namespaces(ctrl, prev); 1786 free: 1787 kfree(ns_list); 1788 return ret; 1789 } 1790 1791 static void nvme_scan_ns_sequential(struct nvme_ctrl *ctrl, unsigned nn) 1792 { 1793 unsigned i; 1794 1795 for (i = 1; i <= nn; i++) 1796 nvme_validate_ns(ctrl, i); 1797 1798 nvme_remove_invalid_namespaces(ctrl, nn); 1799 } 1800 1801 static void nvme_scan_work(struct work_struct *work) 1802 { 1803 struct nvme_ctrl *ctrl = 1804 container_of(work, struct nvme_ctrl, scan_work); 1805 struct nvme_id_ctrl *id; 1806 unsigned nn; 1807 1808 if (ctrl->state != NVME_CTRL_LIVE) 1809 return; 1810 1811 if (nvme_identify_ctrl(ctrl, &id)) 1812 return; 1813 1814 nn = le32_to_cpu(id->nn); 1815 if (ctrl->vs >= NVME_VS(1, 1) && 1816 !(ctrl->quirks & NVME_QUIRK_IDENTIFY_CNS)) { 1817 if (!nvme_scan_ns_list(ctrl, nn)) 1818 goto done; 1819 } 1820 nvme_scan_ns_sequential(ctrl, nn); 1821 done: 1822 mutex_lock(&ctrl->namespaces_mutex); 1823 list_sort(NULL, &ctrl->namespaces, ns_cmp); 1824 mutex_unlock(&ctrl->namespaces_mutex); 1825 kfree(id); 1826 1827 if (ctrl->ops->post_scan) 1828 ctrl->ops->post_scan(ctrl); 1829 } 1830 1831 void nvme_queue_scan(struct nvme_ctrl *ctrl) 1832 { 1833 /* 1834 * Do not queue new scan work when a controller is reset during 1835 * removal. 1836 */ 1837 if (ctrl->state == NVME_CTRL_LIVE) 1838 schedule_work(&ctrl->scan_work); 1839 } 1840 EXPORT_SYMBOL_GPL(nvme_queue_scan); 1841 1842 /* 1843 * This function iterates the namespace list unlocked to allow recovery from 1844 * controller failure. It is up to the caller to ensure the namespace list is 1845 * not modified by scan work while this function is executing. 1846 */ 1847 void nvme_remove_namespaces(struct nvme_ctrl *ctrl) 1848 { 1849 struct nvme_ns *ns, *next; 1850 1851 /* 1852 * The dead states indicates the controller was not gracefully 1853 * disconnected. In that case, we won't be able to flush any data while 1854 * removing the namespaces' disks; fail all the queues now to avoid 1855 * potentially having to clean up the failed sync later. 1856 */ 1857 if (ctrl->state == NVME_CTRL_DEAD) 1858 nvme_kill_queues(ctrl); 1859 1860 list_for_each_entry_safe(ns, next, &ctrl->namespaces, list) 1861 nvme_ns_remove(ns); 1862 } 1863 EXPORT_SYMBOL_GPL(nvme_remove_namespaces); 1864 1865 static void nvme_async_event_work(struct work_struct *work) 1866 { 1867 struct nvme_ctrl *ctrl = 1868 container_of(work, struct nvme_ctrl, async_event_work); 1869 1870 spin_lock_irq(&ctrl->lock); 1871 while (ctrl->event_limit > 0) { 1872 int aer_idx = --ctrl->event_limit; 1873 1874 spin_unlock_irq(&ctrl->lock); 1875 ctrl->ops->submit_async_event(ctrl, aer_idx); 1876 spin_lock_irq(&ctrl->lock); 1877 } 1878 spin_unlock_irq(&ctrl->lock); 1879 } 1880 1881 void nvme_complete_async_event(struct nvme_ctrl *ctrl, 1882 struct nvme_completion *cqe) 1883 { 1884 u16 status = le16_to_cpu(cqe->status) >> 1; 1885 u32 result = le32_to_cpu(cqe->result); 1886 1887 if (status == NVME_SC_SUCCESS || status == NVME_SC_ABORT_REQ) { 1888 ++ctrl->event_limit; 1889 schedule_work(&ctrl->async_event_work); 1890 } 1891 1892 if (status != NVME_SC_SUCCESS) 1893 return; 1894 1895 switch (result & 0xff07) { 1896 case NVME_AER_NOTICE_NS_CHANGED: 1897 dev_info(ctrl->device, "rescanning\n"); 1898 nvme_queue_scan(ctrl); 1899 break; 1900 default: 1901 dev_warn(ctrl->device, "async event result %08x\n", result); 1902 } 1903 } 1904 EXPORT_SYMBOL_GPL(nvme_complete_async_event); 1905 1906 void nvme_queue_async_events(struct nvme_ctrl *ctrl) 1907 { 1908 ctrl->event_limit = NVME_NR_AERS; 1909 schedule_work(&ctrl->async_event_work); 1910 } 1911 EXPORT_SYMBOL_GPL(nvme_queue_async_events); 1912 1913 static DEFINE_IDA(nvme_instance_ida); 1914 1915 static int nvme_set_instance(struct nvme_ctrl *ctrl) 1916 { 1917 int instance, error; 1918 1919 do { 1920 if (!ida_pre_get(&nvme_instance_ida, GFP_KERNEL)) 1921 return -ENODEV; 1922 1923 spin_lock(&dev_list_lock); 1924 error = ida_get_new(&nvme_instance_ida, &instance); 1925 spin_unlock(&dev_list_lock); 1926 } while (error == -EAGAIN); 1927 1928 if (error) 1929 return -ENODEV; 1930 1931 ctrl->instance = instance; 1932 return 0; 1933 } 1934 1935 static void nvme_release_instance(struct nvme_ctrl *ctrl) 1936 { 1937 spin_lock(&dev_list_lock); 1938 ida_remove(&nvme_instance_ida, ctrl->instance); 1939 spin_unlock(&dev_list_lock); 1940 } 1941 1942 void nvme_uninit_ctrl(struct nvme_ctrl *ctrl) 1943 { 1944 flush_work(&ctrl->async_event_work); 1945 flush_work(&ctrl->scan_work); 1946 nvme_remove_namespaces(ctrl); 1947 1948 device_destroy(nvme_class, MKDEV(nvme_char_major, ctrl->instance)); 1949 1950 spin_lock(&dev_list_lock); 1951 list_del(&ctrl->node); 1952 spin_unlock(&dev_list_lock); 1953 } 1954 EXPORT_SYMBOL_GPL(nvme_uninit_ctrl); 1955 1956 static void nvme_free_ctrl(struct kref *kref) 1957 { 1958 struct nvme_ctrl *ctrl = container_of(kref, struct nvme_ctrl, kref); 1959 1960 put_device(ctrl->device); 1961 nvme_release_instance(ctrl); 1962 ida_destroy(&ctrl->ns_ida); 1963 1964 ctrl->ops->free_ctrl(ctrl); 1965 } 1966 1967 void nvme_put_ctrl(struct nvme_ctrl *ctrl) 1968 { 1969 kref_put(&ctrl->kref, nvme_free_ctrl); 1970 } 1971 EXPORT_SYMBOL_GPL(nvme_put_ctrl); 1972 1973 /* 1974 * Initialize a NVMe controller structures. This needs to be called during 1975 * earliest initialization so that we have the initialized structured around 1976 * during probing. 1977 */ 1978 int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev, 1979 const struct nvme_ctrl_ops *ops, unsigned long quirks) 1980 { 1981 int ret; 1982 1983 ctrl->state = NVME_CTRL_NEW; 1984 spin_lock_init(&ctrl->lock); 1985 INIT_LIST_HEAD(&ctrl->namespaces); 1986 mutex_init(&ctrl->namespaces_mutex); 1987 kref_init(&ctrl->kref); 1988 ctrl->dev = dev; 1989 ctrl->ops = ops; 1990 ctrl->quirks = quirks; 1991 INIT_WORK(&ctrl->scan_work, nvme_scan_work); 1992 INIT_WORK(&ctrl->async_event_work, nvme_async_event_work); 1993 1994 ret = nvme_set_instance(ctrl); 1995 if (ret) 1996 goto out; 1997 1998 ctrl->device = device_create_with_groups(nvme_class, ctrl->dev, 1999 MKDEV(nvme_char_major, ctrl->instance), 2000 ctrl, nvme_dev_attr_groups, 2001 "nvme%d", ctrl->instance); 2002 if (IS_ERR(ctrl->device)) { 2003 ret = PTR_ERR(ctrl->device); 2004 goto out_release_instance; 2005 } 2006 get_device(ctrl->device); 2007 ida_init(&ctrl->ns_ida); 2008 2009 spin_lock(&dev_list_lock); 2010 list_add_tail(&ctrl->node, &nvme_ctrl_list); 2011 spin_unlock(&dev_list_lock); 2012 2013 return 0; 2014 out_release_instance: 2015 nvme_release_instance(ctrl); 2016 out: 2017 return ret; 2018 } 2019 EXPORT_SYMBOL_GPL(nvme_init_ctrl); 2020 2021 /** 2022 * nvme_kill_queues(): Ends all namespace queues 2023 * @ctrl: the dead controller that needs to end 2024 * 2025 * Call this function when the driver determines it is unable to get the 2026 * controller in a state capable of servicing IO. 2027 */ 2028 void nvme_kill_queues(struct nvme_ctrl *ctrl) 2029 { 2030 struct nvme_ns *ns; 2031 2032 mutex_lock(&ctrl->namespaces_mutex); 2033 list_for_each_entry(ns, &ctrl->namespaces, list) { 2034 /* 2035 * Revalidating a dead namespace sets capacity to 0. This will 2036 * end buffered writers dirtying pages that can't be synced. 2037 */ 2038 if (!test_and_set_bit(NVME_NS_DEAD, &ns->flags)) 2039 revalidate_disk(ns->disk); 2040 2041 blk_set_queue_dying(ns->queue); 2042 blk_mq_abort_requeue_list(ns->queue); 2043 blk_mq_start_stopped_hw_queues(ns->queue, true); 2044 } 2045 mutex_unlock(&ctrl->namespaces_mutex); 2046 } 2047 EXPORT_SYMBOL_GPL(nvme_kill_queues); 2048 2049 void nvme_stop_queues(struct nvme_ctrl *ctrl) 2050 { 2051 struct nvme_ns *ns; 2052 2053 mutex_lock(&ctrl->namespaces_mutex); 2054 list_for_each_entry(ns, &ctrl->namespaces, list) { 2055 spin_lock_irq(ns->queue->queue_lock); 2056 queue_flag_set(QUEUE_FLAG_STOPPED, ns->queue); 2057 spin_unlock_irq(ns->queue->queue_lock); 2058 2059 blk_mq_cancel_requeue_work(ns->queue); 2060 blk_mq_stop_hw_queues(ns->queue); 2061 } 2062 mutex_unlock(&ctrl->namespaces_mutex); 2063 } 2064 EXPORT_SYMBOL_GPL(nvme_stop_queues); 2065 2066 void nvme_start_queues(struct nvme_ctrl *ctrl) 2067 { 2068 struct nvme_ns *ns; 2069 2070 mutex_lock(&ctrl->namespaces_mutex); 2071 list_for_each_entry(ns, &ctrl->namespaces, list) { 2072 queue_flag_clear_unlocked(QUEUE_FLAG_STOPPED, ns->queue); 2073 blk_mq_start_stopped_hw_queues(ns->queue, true); 2074 blk_mq_kick_requeue_list(ns->queue); 2075 } 2076 mutex_unlock(&ctrl->namespaces_mutex); 2077 } 2078 EXPORT_SYMBOL_GPL(nvme_start_queues); 2079 2080 int __init nvme_core_init(void) 2081 { 2082 int result; 2083 2084 result = __register_chrdev(nvme_char_major, 0, NVME_MINORS, "nvme", 2085 &nvme_dev_fops); 2086 if (result < 0) 2087 return result; 2088 else if (result > 0) 2089 nvme_char_major = result; 2090 2091 nvme_class = class_create(THIS_MODULE, "nvme"); 2092 if (IS_ERR(nvme_class)) { 2093 result = PTR_ERR(nvme_class); 2094 goto unregister_chrdev; 2095 } 2096 2097 return 0; 2098 2099 unregister_chrdev: 2100 __unregister_chrdev(nvme_char_major, 0, NVME_MINORS, "nvme"); 2101 return result; 2102 } 2103 2104 void nvme_core_exit(void) 2105 { 2106 class_destroy(nvme_class); 2107 __unregister_chrdev(nvme_char_major, 0, NVME_MINORS, "nvme"); 2108 } 2109 2110 MODULE_LICENSE("GPL"); 2111 MODULE_VERSION("1.0"); 2112 module_init(nvme_core_init); 2113 module_exit(nvme_core_exit); 2114