1 /* 2 * NVM Express device driver 3 * Copyright (c) 2011-2014, Intel Corporation. 4 * 5 * This program is free software; you can redistribute it and/or modify it 6 * under the terms and conditions of the GNU General Public License, 7 * version 2, as published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * more details. 13 */ 14 15 #include <linux/blkdev.h> 16 #include <linux/blk-mq.h> 17 #include <linux/delay.h> 18 #include <linux/errno.h> 19 #include <linux/hdreg.h> 20 #include <linux/kernel.h> 21 #include <linux/module.h> 22 #include <linux/list_sort.h> 23 #include <linux/slab.h> 24 #include <linux/types.h> 25 #include <linux/pr.h> 26 #include <linux/ptrace.h> 27 #include <linux/nvme_ioctl.h> 28 #include <linux/t10-pi.h> 29 #include <linux/pm_qos.h> 30 #include <asm/unaligned.h> 31 32 #include "nvme.h" 33 #include "fabrics.h" 34 35 #define NVME_MINORS (1U << MINORBITS) 36 37 unsigned int admin_timeout = 60; 38 module_param(admin_timeout, uint, 0644); 39 MODULE_PARM_DESC(admin_timeout, "timeout in seconds for admin commands"); 40 EXPORT_SYMBOL_GPL(admin_timeout); 41 42 unsigned int nvme_io_timeout = 30; 43 module_param_named(io_timeout, nvme_io_timeout, uint, 0644); 44 MODULE_PARM_DESC(io_timeout, "timeout in seconds for I/O"); 45 EXPORT_SYMBOL_GPL(nvme_io_timeout); 46 47 static unsigned char shutdown_timeout = 5; 48 module_param(shutdown_timeout, byte, 0644); 49 MODULE_PARM_DESC(shutdown_timeout, "timeout in seconds for controller shutdown"); 50 51 static u8 nvme_max_retries = 5; 52 module_param_named(max_retries, nvme_max_retries, byte, 0644); 53 MODULE_PARM_DESC(max_retries, "max number of retries a command may have"); 54 55 static unsigned long default_ps_max_latency_us = 100000; 56 module_param(default_ps_max_latency_us, ulong, 0644); 57 MODULE_PARM_DESC(default_ps_max_latency_us, 58 "max power saving latency for new devices; use PM QOS to change per device"); 59 60 static bool force_apst; 61 module_param(force_apst, bool, 0644); 62 MODULE_PARM_DESC(force_apst, "allow APST for newly enumerated devices even if quirked off"); 63 64 static bool streams; 65 module_param(streams, bool, 0644); 66 MODULE_PARM_DESC(streams, "turn on support for Streams write directives"); 67 68 struct workqueue_struct *nvme_wq; 69 EXPORT_SYMBOL_GPL(nvme_wq); 70 71 static DEFINE_IDA(nvme_subsystems_ida); 72 static LIST_HEAD(nvme_subsystems); 73 static DEFINE_MUTEX(nvme_subsystems_lock); 74 75 static DEFINE_IDA(nvme_instance_ida); 76 static dev_t nvme_chr_devt; 77 static struct class *nvme_class; 78 static struct class *nvme_subsys_class; 79 80 static void nvme_ns_remove(struct nvme_ns *ns); 81 static int nvme_revalidate_disk(struct gendisk *disk); 82 83 static __le32 nvme_get_log_dw10(u8 lid, size_t size) 84 { 85 return cpu_to_le32((((size / 4) - 1) << 16) | lid); 86 } 87 88 int nvme_reset_ctrl(struct nvme_ctrl *ctrl) 89 { 90 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING)) 91 return -EBUSY; 92 if (!queue_work(nvme_wq, &ctrl->reset_work)) 93 return -EBUSY; 94 return 0; 95 } 96 EXPORT_SYMBOL_GPL(nvme_reset_ctrl); 97 98 static int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl) 99 { 100 int ret; 101 102 ret = nvme_reset_ctrl(ctrl); 103 if (!ret) 104 flush_work(&ctrl->reset_work); 105 return ret; 106 } 107 108 static void nvme_delete_ctrl_work(struct work_struct *work) 109 { 110 struct nvme_ctrl *ctrl = 111 container_of(work, struct nvme_ctrl, delete_work); 112 113 flush_work(&ctrl->reset_work); 114 nvme_stop_ctrl(ctrl); 115 nvme_remove_namespaces(ctrl); 116 ctrl->ops->delete_ctrl(ctrl); 117 nvme_uninit_ctrl(ctrl); 118 nvme_put_ctrl(ctrl); 119 } 120 121 int nvme_delete_ctrl(struct nvme_ctrl *ctrl) 122 { 123 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_DELETING)) 124 return -EBUSY; 125 if (!queue_work(nvme_wq, &ctrl->delete_work)) 126 return -EBUSY; 127 return 0; 128 } 129 EXPORT_SYMBOL_GPL(nvme_delete_ctrl); 130 131 int nvme_delete_ctrl_sync(struct nvme_ctrl *ctrl) 132 { 133 int ret = 0; 134 135 /* 136 * Keep a reference until the work is flushed since ->delete_ctrl 137 * can free the controller. 138 */ 139 nvme_get_ctrl(ctrl); 140 ret = nvme_delete_ctrl(ctrl); 141 if (!ret) 142 flush_work(&ctrl->delete_work); 143 nvme_put_ctrl(ctrl); 144 return ret; 145 } 146 EXPORT_SYMBOL_GPL(nvme_delete_ctrl_sync); 147 148 static inline bool nvme_ns_has_pi(struct nvme_ns *ns) 149 { 150 return ns->pi_type && ns->ms == sizeof(struct t10_pi_tuple); 151 } 152 153 static blk_status_t nvme_error_status(struct request *req) 154 { 155 switch (nvme_req(req)->status & 0x7ff) { 156 case NVME_SC_SUCCESS: 157 return BLK_STS_OK; 158 case NVME_SC_CAP_EXCEEDED: 159 return BLK_STS_NOSPC; 160 case NVME_SC_ONCS_NOT_SUPPORTED: 161 return BLK_STS_NOTSUPP; 162 case NVME_SC_WRITE_FAULT: 163 case NVME_SC_READ_ERROR: 164 case NVME_SC_UNWRITTEN_BLOCK: 165 case NVME_SC_ACCESS_DENIED: 166 case NVME_SC_READ_ONLY: 167 return BLK_STS_MEDIUM; 168 case NVME_SC_GUARD_CHECK: 169 case NVME_SC_APPTAG_CHECK: 170 case NVME_SC_REFTAG_CHECK: 171 case NVME_SC_INVALID_PI: 172 return BLK_STS_PROTECTION; 173 case NVME_SC_RESERVATION_CONFLICT: 174 return BLK_STS_NEXUS; 175 default: 176 return BLK_STS_IOERR; 177 } 178 } 179 180 static inline bool nvme_req_needs_retry(struct request *req) 181 { 182 if (blk_noretry_request(req)) 183 return false; 184 if (nvme_req(req)->status & NVME_SC_DNR) 185 return false; 186 if (nvme_req(req)->retries >= nvme_max_retries) 187 return false; 188 return true; 189 } 190 191 void nvme_complete_rq(struct request *req) 192 { 193 if (unlikely(nvme_req(req)->status && nvme_req_needs_retry(req))) { 194 if (nvme_req_needs_failover(req)) { 195 nvme_failover_req(req); 196 return; 197 } 198 199 if (!blk_queue_dying(req->q)) { 200 nvme_req(req)->retries++; 201 blk_mq_requeue_request(req, true); 202 return; 203 } 204 } 205 206 blk_mq_end_request(req, nvme_error_status(req)); 207 } 208 EXPORT_SYMBOL_GPL(nvme_complete_rq); 209 210 void nvme_cancel_request(struct request *req, void *data, bool reserved) 211 { 212 if (!blk_mq_request_started(req)) 213 return; 214 215 dev_dbg_ratelimited(((struct nvme_ctrl *) data)->device, 216 "Cancelling I/O %d", req->tag); 217 218 nvme_req(req)->status = NVME_SC_ABORT_REQ; 219 blk_mq_complete_request(req); 220 221 } 222 EXPORT_SYMBOL_GPL(nvme_cancel_request); 223 224 bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl, 225 enum nvme_ctrl_state new_state) 226 { 227 enum nvme_ctrl_state old_state; 228 unsigned long flags; 229 bool changed = false; 230 231 spin_lock_irqsave(&ctrl->lock, flags); 232 233 old_state = ctrl->state; 234 switch (new_state) { 235 case NVME_CTRL_LIVE: 236 switch (old_state) { 237 case NVME_CTRL_NEW: 238 case NVME_CTRL_RESETTING: 239 case NVME_CTRL_RECONNECTING: 240 changed = true; 241 /* FALLTHRU */ 242 default: 243 break; 244 } 245 break; 246 case NVME_CTRL_RESETTING: 247 switch (old_state) { 248 case NVME_CTRL_NEW: 249 case NVME_CTRL_LIVE: 250 changed = true; 251 /* FALLTHRU */ 252 default: 253 break; 254 } 255 break; 256 case NVME_CTRL_RECONNECTING: 257 switch (old_state) { 258 case NVME_CTRL_LIVE: 259 case NVME_CTRL_RESETTING: 260 changed = true; 261 /* FALLTHRU */ 262 default: 263 break; 264 } 265 break; 266 case NVME_CTRL_DELETING: 267 switch (old_state) { 268 case NVME_CTRL_LIVE: 269 case NVME_CTRL_RESETTING: 270 case NVME_CTRL_RECONNECTING: 271 changed = true; 272 /* FALLTHRU */ 273 default: 274 break; 275 } 276 break; 277 case NVME_CTRL_DEAD: 278 switch (old_state) { 279 case NVME_CTRL_DELETING: 280 changed = true; 281 /* FALLTHRU */ 282 default: 283 break; 284 } 285 break; 286 default: 287 break; 288 } 289 290 if (changed) 291 ctrl->state = new_state; 292 293 spin_unlock_irqrestore(&ctrl->lock, flags); 294 if (changed && ctrl->state == NVME_CTRL_LIVE) 295 nvme_kick_requeue_lists(ctrl); 296 return changed; 297 } 298 EXPORT_SYMBOL_GPL(nvme_change_ctrl_state); 299 300 static void nvme_free_ns_head(struct kref *ref) 301 { 302 struct nvme_ns_head *head = 303 container_of(ref, struct nvme_ns_head, ref); 304 305 nvme_mpath_remove_disk(head); 306 ida_simple_remove(&head->subsys->ns_ida, head->instance); 307 list_del_init(&head->entry); 308 cleanup_srcu_struct(&head->srcu); 309 kfree(head); 310 } 311 312 static void nvme_put_ns_head(struct nvme_ns_head *head) 313 { 314 kref_put(&head->ref, nvme_free_ns_head); 315 } 316 317 static void nvme_free_ns(struct kref *kref) 318 { 319 struct nvme_ns *ns = container_of(kref, struct nvme_ns, kref); 320 321 if (ns->ndev) 322 nvme_nvm_unregister(ns); 323 324 put_disk(ns->disk); 325 nvme_put_ns_head(ns->head); 326 nvme_put_ctrl(ns->ctrl); 327 kfree(ns); 328 } 329 330 static void nvme_put_ns(struct nvme_ns *ns) 331 { 332 kref_put(&ns->kref, nvme_free_ns); 333 } 334 335 struct request *nvme_alloc_request(struct request_queue *q, 336 struct nvme_command *cmd, blk_mq_req_flags_t flags, int qid) 337 { 338 unsigned op = nvme_is_write(cmd) ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN; 339 struct request *req; 340 341 if (qid == NVME_QID_ANY) { 342 req = blk_mq_alloc_request(q, op, flags); 343 } else { 344 req = blk_mq_alloc_request_hctx(q, op, flags, 345 qid ? qid - 1 : 0); 346 } 347 if (IS_ERR(req)) 348 return req; 349 350 req->cmd_flags |= REQ_FAILFAST_DRIVER; 351 nvme_req(req)->cmd = cmd; 352 353 return req; 354 } 355 EXPORT_SYMBOL_GPL(nvme_alloc_request); 356 357 static int nvme_toggle_streams(struct nvme_ctrl *ctrl, bool enable) 358 { 359 struct nvme_command c; 360 361 memset(&c, 0, sizeof(c)); 362 363 c.directive.opcode = nvme_admin_directive_send; 364 c.directive.nsid = cpu_to_le32(NVME_NSID_ALL); 365 c.directive.doper = NVME_DIR_SND_ID_OP_ENABLE; 366 c.directive.dtype = NVME_DIR_IDENTIFY; 367 c.directive.tdtype = NVME_DIR_STREAMS; 368 c.directive.endir = enable ? NVME_DIR_ENDIR : 0; 369 370 return nvme_submit_sync_cmd(ctrl->admin_q, &c, NULL, 0); 371 } 372 373 static int nvme_disable_streams(struct nvme_ctrl *ctrl) 374 { 375 return nvme_toggle_streams(ctrl, false); 376 } 377 378 static int nvme_enable_streams(struct nvme_ctrl *ctrl) 379 { 380 return nvme_toggle_streams(ctrl, true); 381 } 382 383 static int nvme_get_stream_params(struct nvme_ctrl *ctrl, 384 struct streams_directive_params *s, u32 nsid) 385 { 386 struct nvme_command c; 387 388 memset(&c, 0, sizeof(c)); 389 memset(s, 0, sizeof(*s)); 390 391 c.directive.opcode = nvme_admin_directive_recv; 392 c.directive.nsid = cpu_to_le32(nsid); 393 c.directive.numd = cpu_to_le32((sizeof(*s) >> 2) - 1); 394 c.directive.doper = NVME_DIR_RCV_ST_OP_PARAM; 395 c.directive.dtype = NVME_DIR_STREAMS; 396 397 return nvme_submit_sync_cmd(ctrl->admin_q, &c, s, sizeof(*s)); 398 } 399 400 static int nvme_configure_directives(struct nvme_ctrl *ctrl) 401 { 402 struct streams_directive_params s; 403 int ret; 404 405 if (!(ctrl->oacs & NVME_CTRL_OACS_DIRECTIVES)) 406 return 0; 407 if (!streams) 408 return 0; 409 410 ret = nvme_enable_streams(ctrl); 411 if (ret) 412 return ret; 413 414 ret = nvme_get_stream_params(ctrl, &s, NVME_NSID_ALL); 415 if (ret) 416 return ret; 417 418 ctrl->nssa = le16_to_cpu(s.nssa); 419 if (ctrl->nssa < BLK_MAX_WRITE_HINTS - 1) { 420 dev_info(ctrl->device, "too few streams (%u) available\n", 421 ctrl->nssa); 422 nvme_disable_streams(ctrl); 423 return 0; 424 } 425 426 ctrl->nr_streams = min_t(unsigned, ctrl->nssa, BLK_MAX_WRITE_HINTS - 1); 427 dev_info(ctrl->device, "Using %u streams\n", ctrl->nr_streams); 428 return 0; 429 } 430 431 /* 432 * Check if 'req' has a write hint associated with it. If it does, assign 433 * a valid namespace stream to the write. 434 */ 435 static void nvme_assign_write_stream(struct nvme_ctrl *ctrl, 436 struct request *req, u16 *control, 437 u32 *dsmgmt) 438 { 439 enum rw_hint streamid = req->write_hint; 440 441 if (streamid == WRITE_LIFE_NOT_SET || streamid == WRITE_LIFE_NONE) 442 streamid = 0; 443 else { 444 streamid--; 445 if (WARN_ON_ONCE(streamid > ctrl->nr_streams)) 446 return; 447 448 *control |= NVME_RW_DTYPE_STREAMS; 449 *dsmgmt |= streamid << 16; 450 } 451 452 if (streamid < ARRAY_SIZE(req->q->write_hints)) 453 req->q->write_hints[streamid] += blk_rq_bytes(req) >> 9; 454 } 455 456 static inline void nvme_setup_flush(struct nvme_ns *ns, 457 struct nvme_command *cmnd) 458 { 459 memset(cmnd, 0, sizeof(*cmnd)); 460 cmnd->common.opcode = nvme_cmd_flush; 461 cmnd->common.nsid = cpu_to_le32(ns->head->ns_id); 462 } 463 464 static blk_status_t nvme_setup_discard(struct nvme_ns *ns, struct request *req, 465 struct nvme_command *cmnd) 466 { 467 unsigned short segments = blk_rq_nr_discard_segments(req), n = 0; 468 struct nvme_dsm_range *range; 469 struct bio *bio; 470 471 range = kmalloc_array(segments, sizeof(*range), GFP_ATOMIC); 472 if (!range) 473 return BLK_STS_RESOURCE; 474 475 __rq_for_each_bio(bio, req) { 476 u64 slba = nvme_block_nr(ns, bio->bi_iter.bi_sector); 477 u32 nlb = bio->bi_iter.bi_size >> ns->lba_shift; 478 479 range[n].cattr = cpu_to_le32(0); 480 range[n].nlb = cpu_to_le32(nlb); 481 range[n].slba = cpu_to_le64(slba); 482 n++; 483 } 484 485 if (WARN_ON_ONCE(n != segments)) { 486 kfree(range); 487 return BLK_STS_IOERR; 488 } 489 490 memset(cmnd, 0, sizeof(*cmnd)); 491 cmnd->dsm.opcode = nvme_cmd_dsm; 492 cmnd->dsm.nsid = cpu_to_le32(ns->head->ns_id); 493 cmnd->dsm.nr = cpu_to_le32(segments - 1); 494 cmnd->dsm.attributes = cpu_to_le32(NVME_DSMGMT_AD); 495 496 req->special_vec.bv_page = virt_to_page(range); 497 req->special_vec.bv_offset = offset_in_page(range); 498 req->special_vec.bv_len = sizeof(*range) * segments; 499 req->rq_flags |= RQF_SPECIAL_PAYLOAD; 500 501 return BLK_STS_OK; 502 } 503 504 static inline blk_status_t nvme_setup_rw(struct nvme_ns *ns, 505 struct request *req, struct nvme_command *cmnd) 506 { 507 struct nvme_ctrl *ctrl = ns->ctrl; 508 u16 control = 0; 509 u32 dsmgmt = 0; 510 511 if (req->cmd_flags & REQ_FUA) 512 control |= NVME_RW_FUA; 513 if (req->cmd_flags & (REQ_FAILFAST_DEV | REQ_RAHEAD)) 514 control |= NVME_RW_LR; 515 516 if (req->cmd_flags & REQ_RAHEAD) 517 dsmgmt |= NVME_RW_DSM_FREQ_PREFETCH; 518 519 memset(cmnd, 0, sizeof(*cmnd)); 520 cmnd->rw.opcode = (rq_data_dir(req) ? nvme_cmd_write : nvme_cmd_read); 521 cmnd->rw.nsid = cpu_to_le32(ns->head->ns_id); 522 cmnd->rw.slba = cpu_to_le64(nvme_block_nr(ns, blk_rq_pos(req))); 523 cmnd->rw.length = cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1); 524 525 if (req_op(req) == REQ_OP_WRITE && ctrl->nr_streams) 526 nvme_assign_write_stream(ctrl, req, &control, &dsmgmt); 527 528 if (ns->ms) { 529 /* 530 * If formated with metadata, the block layer always provides a 531 * metadata buffer if CONFIG_BLK_DEV_INTEGRITY is enabled. Else 532 * we enable the PRACT bit for protection information or set the 533 * namespace capacity to zero to prevent any I/O. 534 */ 535 if (!blk_integrity_rq(req)) { 536 if (WARN_ON_ONCE(!nvme_ns_has_pi(ns))) 537 return BLK_STS_NOTSUPP; 538 control |= NVME_RW_PRINFO_PRACT; 539 } 540 541 switch (ns->pi_type) { 542 case NVME_NS_DPS_PI_TYPE3: 543 control |= NVME_RW_PRINFO_PRCHK_GUARD; 544 break; 545 case NVME_NS_DPS_PI_TYPE1: 546 case NVME_NS_DPS_PI_TYPE2: 547 control |= NVME_RW_PRINFO_PRCHK_GUARD | 548 NVME_RW_PRINFO_PRCHK_REF; 549 cmnd->rw.reftag = cpu_to_le32( 550 nvme_block_nr(ns, blk_rq_pos(req))); 551 break; 552 } 553 } 554 555 cmnd->rw.control = cpu_to_le16(control); 556 cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt); 557 return 0; 558 } 559 560 blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req, 561 struct nvme_command *cmd) 562 { 563 blk_status_t ret = BLK_STS_OK; 564 565 if (!(req->rq_flags & RQF_DONTPREP)) { 566 nvme_req(req)->retries = 0; 567 nvme_req(req)->flags = 0; 568 req->rq_flags |= RQF_DONTPREP; 569 } 570 571 switch (req_op(req)) { 572 case REQ_OP_DRV_IN: 573 case REQ_OP_DRV_OUT: 574 memcpy(cmd, nvme_req(req)->cmd, sizeof(*cmd)); 575 break; 576 case REQ_OP_FLUSH: 577 nvme_setup_flush(ns, cmd); 578 break; 579 case REQ_OP_WRITE_ZEROES: 580 /* currently only aliased to deallocate for a few ctrls: */ 581 case REQ_OP_DISCARD: 582 ret = nvme_setup_discard(ns, req, cmd); 583 break; 584 case REQ_OP_READ: 585 case REQ_OP_WRITE: 586 ret = nvme_setup_rw(ns, req, cmd); 587 break; 588 default: 589 WARN_ON_ONCE(1); 590 return BLK_STS_IOERR; 591 } 592 593 cmd->common.command_id = req->tag; 594 return ret; 595 } 596 EXPORT_SYMBOL_GPL(nvme_setup_cmd); 597 598 /* 599 * Returns 0 on success. If the result is negative, it's a Linux error code; 600 * if the result is positive, it's an NVM Express status code 601 */ 602 int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, 603 union nvme_result *result, void *buffer, unsigned bufflen, 604 unsigned timeout, int qid, int at_head, 605 blk_mq_req_flags_t flags) 606 { 607 struct request *req; 608 int ret; 609 610 req = nvme_alloc_request(q, cmd, flags, qid); 611 if (IS_ERR(req)) 612 return PTR_ERR(req); 613 614 req->timeout = timeout ? timeout : ADMIN_TIMEOUT; 615 616 if (buffer && bufflen) { 617 ret = blk_rq_map_kern(q, req, buffer, bufflen, GFP_KERNEL); 618 if (ret) 619 goto out; 620 } 621 622 blk_execute_rq(req->q, NULL, req, at_head); 623 if (result) 624 *result = nvme_req(req)->result; 625 if (nvme_req(req)->flags & NVME_REQ_CANCELLED) 626 ret = -EINTR; 627 else 628 ret = nvme_req(req)->status; 629 out: 630 blk_mq_free_request(req); 631 return ret; 632 } 633 EXPORT_SYMBOL_GPL(__nvme_submit_sync_cmd); 634 635 int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, 636 void *buffer, unsigned bufflen) 637 { 638 return __nvme_submit_sync_cmd(q, cmd, NULL, buffer, bufflen, 0, 639 NVME_QID_ANY, 0, 0); 640 } 641 EXPORT_SYMBOL_GPL(nvme_submit_sync_cmd); 642 643 static void *nvme_add_user_metadata(struct bio *bio, void __user *ubuf, 644 unsigned len, u32 seed, bool write) 645 { 646 struct bio_integrity_payload *bip; 647 int ret = -ENOMEM; 648 void *buf; 649 650 buf = kmalloc(len, GFP_KERNEL); 651 if (!buf) 652 goto out; 653 654 ret = -EFAULT; 655 if (write && copy_from_user(buf, ubuf, len)) 656 goto out_free_meta; 657 658 bip = bio_integrity_alloc(bio, GFP_KERNEL, 1); 659 if (IS_ERR(bip)) { 660 ret = PTR_ERR(bip); 661 goto out_free_meta; 662 } 663 664 bip->bip_iter.bi_size = len; 665 bip->bip_iter.bi_sector = seed; 666 ret = bio_integrity_add_page(bio, virt_to_page(buf), len, 667 offset_in_page(buf)); 668 if (ret == len) 669 return buf; 670 ret = -ENOMEM; 671 out_free_meta: 672 kfree(buf); 673 out: 674 return ERR_PTR(ret); 675 } 676 677 static int nvme_submit_user_cmd(struct request_queue *q, 678 struct nvme_command *cmd, void __user *ubuffer, 679 unsigned bufflen, void __user *meta_buffer, unsigned meta_len, 680 u32 meta_seed, u32 *result, unsigned timeout) 681 { 682 bool write = nvme_is_write(cmd); 683 struct nvme_ns *ns = q->queuedata; 684 struct gendisk *disk = ns ? ns->disk : NULL; 685 struct request *req; 686 struct bio *bio = NULL; 687 void *meta = NULL; 688 int ret; 689 690 req = nvme_alloc_request(q, cmd, 0, NVME_QID_ANY); 691 if (IS_ERR(req)) 692 return PTR_ERR(req); 693 694 req->timeout = timeout ? timeout : ADMIN_TIMEOUT; 695 696 if (ubuffer && bufflen) { 697 ret = blk_rq_map_user(q, req, NULL, ubuffer, bufflen, 698 GFP_KERNEL); 699 if (ret) 700 goto out; 701 bio = req->bio; 702 bio->bi_disk = disk; 703 if (disk && meta_buffer && meta_len) { 704 meta = nvme_add_user_metadata(bio, meta_buffer, meta_len, 705 meta_seed, write); 706 if (IS_ERR(meta)) { 707 ret = PTR_ERR(meta); 708 goto out_unmap; 709 } 710 } 711 } 712 713 blk_execute_rq(req->q, disk, req, 0); 714 if (nvme_req(req)->flags & NVME_REQ_CANCELLED) 715 ret = -EINTR; 716 else 717 ret = nvme_req(req)->status; 718 if (result) 719 *result = le32_to_cpu(nvme_req(req)->result.u32); 720 if (meta && !ret && !write) { 721 if (copy_to_user(meta_buffer, meta, meta_len)) 722 ret = -EFAULT; 723 } 724 kfree(meta); 725 out_unmap: 726 if (bio) 727 blk_rq_unmap_user(bio); 728 out: 729 blk_mq_free_request(req); 730 return ret; 731 } 732 733 static void nvme_keep_alive_end_io(struct request *rq, blk_status_t status) 734 { 735 struct nvme_ctrl *ctrl = rq->end_io_data; 736 737 blk_mq_free_request(rq); 738 739 if (status) { 740 dev_err(ctrl->device, 741 "failed nvme_keep_alive_end_io error=%d\n", 742 status); 743 return; 744 } 745 746 schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ); 747 } 748 749 static int nvme_keep_alive(struct nvme_ctrl *ctrl) 750 { 751 struct nvme_command c; 752 struct request *rq; 753 754 memset(&c, 0, sizeof(c)); 755 c.common.opcode = nvme_admin_keep_alive; 756 757 rq = nvme_alloc_request(ctrl->admin_q, &c, BLK_MQ_REQ_RESERVED, 758 NVME_QID_ANY); 759 if (IS_ERR(rq)) 760 return PTR_ERR(rq); 761 762 rq->timeout = ctrl->kato * HZ; 763 rq->end_io_data = ctrl; 764 765 blk_execute_rq_nowait(rq->q, NULL, rq, 0, nvme_keep_alive_end_io); 766 767 return 0; 768 } 769 770 static void nvme_keep_alive_work(struct work_struct *work) 771 { 772 struct nvme_ctrl *ctrl = container_of(to_delayed_work(work), 773 struct nvme_ctrl, ka_work); 774 775 if (nvme_keep_alive(ctrl)) { 776 /* allocation failure, reset the controller */ 777 dev_err(ctrl->device, "keep-alive failed\n"); 778 nvme_reset_ctrl(ctrl); 779 return; 780 } 781 } 782 783 void nvme_start_keep_alive(struct nvme_ctrl *ctrl) 784 { 785 if (unlikely(ctrl->kato == 0)) 786 return; 787 788 INIT_DELAYED_WORK(&ctrl->ka_work, nvme_keep_alive_work); 789 schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ); 790 } 791 EXPORT_SYMBOL_GPL(nvme_start_keep_alive); 792 793 void nvme_stop_keep_alive(struct nvme_ctrl *ctrl) 794 { 795 if (unlikely(ctrl->kato == 0)) 796 return; 797 798 cancel_delayed_work_sync(&ctrl->ka_work); 799 } 800 EXPORT_SYMBOL_GPL(nvme_stop_keep_alive); 801 802 static int nvme_identify_ctrl(struct nvme_ctrl *dev, struct nvme_id_ctrl **id) 803 { 804 struct nvme_command c = { }; 805 int error; 806 807 /* gcc-4.4.4 (at least) has issues with initializers and anon unions */ 808 c.identify.opcode = nvme_admin_identify; 809 c.identify.cns = NVME_ID_CNS_CTRL; 810 811 *id = kmalloc(sizeof(struct nvme_id_ctrl), GFP_KERNEL); 812 if (!*id) 813 return -ENOMEM; 814 815 error = nvme_submit_sync_cmd(dev->admin_q, &c, *id, 816 sizeof(struct nvme_id_ctrl)); 817 if (error) 818 kfree(*id); 819 return error; 820 } 821 822 static int nvme_identify_ns_descs(struct nvme_ctrl *ctrl, unsigned nsid, 823 struct nvme_ns_ids *ids) 824 { 825 struct nvme_command c = { }; 826 int status; 827 void *data; 828 int pos; 829 int len; 830 831 c.identify.opcode = nvme_admin_identify; 832 c.identify.nsid = cpu_to_le32(nsid); 833 c.identify.cns = NVME_ID_CNS_NS_DESC_LIST; 834 835 data = kzalloc(NVME_IDENTIFY_DATA_SIZE, GFP_KERNEL); 836 if (!data) 837 return -ENOMEM; 838 839 status = nvme_submit_sync_cmd(ctrl->admin_q, &c, data, 840 NVME_IDENTIFY_DATA_SIZE); 841 if (status) 842 goto free_data; 843 844 for (pos = 0; pos < NVME_IDENTIFY_DATA_SIZE; pos += len) { 845 struct nvme_ns_id_desc *cur = data + pos; 846 847 if (cur->nidl == 0) 848 break; 849 850 switch (cur->nidt) { 851 case NVME_NIDT_EUI64: 852 if (cur->nidl != NVME_NIDT_EUI64_LEN) { 853 dev_warn(ctrl->device, 854 "ctrl returned bogus length: %d for NVME_NIDT_EUI64\n", 855 cur->nidl); 856 goto free_data; 857 } 858 len = NVME_NIDT_EUI64_LEN; 859 memcpy(ids->eui64, data + pos + sizeof(*cur), len); 860 break; 861 case NVME_NIDT_NGUID: 862 if (cur->nidl != NVME_NIDT_NGUID_LEN) { 863 dev_warn(ctrl->device, 864 "ctrl returned bogus length: %d for NVME_NIDT_NGUID\n", 865 cur->nidl); 866 goto free_data; 867 } 868 len = NVME_NIDT_NGUID_LEN; 869 memcpy(ids->nguid, data + pos + sizeof(*cur), len); 870 break; 871 case NVME_NIDT_UUID: 872 if (cur->nidl != NVME_NIDT_UUID_LEN) { 873 dev_warn(ctrl->device, 874 "ctrl returned bogus length: %d for NVME_NIDT_UUID\n", 875 cur->nidl); 876 goto free_data; 877 } 878 len = NVME_NIDT_UUID_LEN; 879 uuid_copy(&ids->uuid, data + pos + sizeof(*cur)); 880 break; 881 default: 882 /* Skip unnkown types */ 883 len = cur->nidl; 884 break; 885 } 886 887 len += sizeof(*cur); 888 } 889 free_data: 890 kfree(data); 891 return status; 892 } 893 894 static int nvme_identify_ns_list(struct nvme_ctrl *dev, unsigned nsid, __le32 *ns_list) 895 { 896 struct nvme_command c = { }; 897 898 c.identify.opcode = nvme_admin_identify; 899 c.identify.cns = NVME_ID_CNS_NS_ACTIVE_LIST; 900 c.identify.nsid = cpu_to_le32(nsid); 901 return nvme_submit_sync_cmd(dev->admin_q, &c, ns_list, 0x1000); 902 } 903 904 static struct nvme_id_ns *nvme_identify_ns(struct nvme_ctrl *ctrl, 905 unsigned nsid) 906 { 907 struct nvme_id_ns *id; 908 struct nvme_command c = { }; 909 int error; 910 911 /* gcc-4.4.4 (at least) has issues with initializers and anon unions */ 912 c.identify.opcode = nvme_admin_identify; 913 c.identify.nsid = cpu_to_le32(nsid); 914 c.identify.cns = NVME_ID_CNS_NS; 915 916 id = kmalloc(sizeof(*id), GFP_KERNEL); 917 if (!id) 918 return NULL; 919 920 error = nvme_submit_sync_cmd(ctrl->admin_q, &c, id, sizeof(*id)); 921 if (error) { 922 dev_warn(ctrl->device, "Identify namespace failed\n"); 923 kfree(id); 924 return NULL; 925 } 926 927 return id; 928 } 929 930 static int nvme_set_features(struct nvme_ctrl *dev, unsigned fid, unsigned dword11, 931 void *buffer, size_t buflen, u32 *result) 932 { 933 struct nvme_command c; 934 union nvme_result res; 935 int ret; 936 937 memset(&c, 0, sizeof(c)); 938 c.features.opcode = nvme_admin_set_features; 939 c.features.fid = cpu_to_le32(fid); 940 c.features.dword11 = cpu_to_le32(dword11); 941 942 ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &res, 943 buffer, buflen, 0, NVME_QID_ANY, 0, 0); 944 if (ret >= 0 && result) 945 *result = le32_to_cpu(res.u32); 946 return ret; 947 } 948 949 int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count) 950 { 951 u32 q_count = (*count - 1) | ((*count - 1) << 16); 952 u32 result; 953 int status, nr_io_queues; 954 955 status = nvme_set_features(ctrl, NVME_FEAT_NUM_QUEUES, q_count, NULL, 0, 956 &result); 957 if (status < 0) 958 return status; 959 960 /* 961 * Degraded controllers might return an error when setting the queue 962 * count. We still want to be able to bring them online and offer 963 * access to the admin queue, as that might be only way to fix them up. 964 */ 965 if (status > 0) { 966 dev_err(ctrl->device, "Could not set queue count (%d)\n", status); 967 *count = 0; 968 } else { 969 nr_io_queues = min(result & 0xffff, result >> 16) + 1; 970 *count = min(*count, nr_io_queues); 971 } 972 973 return 0; 974 } 975 EXPORT_SYMBOL_GPL(nvme_set_queue_count); 976 977 static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio) 978 { 979 struct nvme_user_io io; 980 struct nvme_command c; 981 unsigned length, meta_len; 982 void __user *metadata; 983 984 if (copy_from_user(&io, uio, sizeof(io))) 985 return -EFAULT; 986 if (io.flags) 987 return -EINVAL; 988 989 switch (io.opcode) { 990 case nvme_cmd_write: 991 case nvme_cmd_read: 992 case nvme_cmd_compare: 993 break; 994 default: 995 return -EINVAL; 996 } 997 998 length = (io.nblocks + 1) << ns->lba_shift; 999 meta_len = (io.nblocks + 1) * ns->ms; 1000 metadata = (void __user *)(uintptr_t)io.metadata; 1001 1002 if (ns->ext) { 1003 length += meta_len; 1004 meta_len = 0; 1005 } else if (meta_len) { 1006 if ((io.metadata & 3) || !io.metadata) 1007 return -EINVAL; 1008 } 1009 1010 memset(&c, 0, sizeof(c)); 1011 c.rw.opcode = io.opcode; 1012 c.rw.flags = io.flags; 1013 c.rw.nsid = cpu_to_le32(ns->head->ns_id); 1014 c.rw.slba = cpu_to_le64(io.slba); 1015 c.rw.length = cpu_to_le16(io.nblocks); 1016 c.rw.control = cpu_to_le16(io.control); 1017 c.rw.dsmgmt = cpu_to_le32(io.dsmgmt); 1018 c.rw.reftag = cpu_to_le32(io.reftag); 1019 c.rw.apptag = cpu_to_le16(io.apptag); 1020 c.rw.appmask = cpu_to_le16(io.appmask); 1021 1022 return nvme_submit_user_cmd(ns->queue, &c, 1023 (void __user *)(uintptr_t)io.addr, length, 1024 metadata, meta_len, io.slba, NULL, 0); 1025 } 1026 1027 static u32 nvme_known_admin_effects(u8 opcode) 1028 { 1029 switch (opcode) { 1030 case nvme_admin_format_nvm: 1031 return NVME_CMD_EFFECTS_CSUPP | NVME_CMD_EFFECTS_LBCC | 1032 NVME_CMD_EFFECTS_CSE_MASK; 1033 case nvme_admin_sanitize_nvm: 1034 return NVME_CMD_EFFECTS_CSE_MASK; 1035 default: 1036 break; 1037 } 1038 return 0; 1039 } 1040 1041 static u32 nvme_passthru_start(struct nvme_ctrl *ctrl, struct nvme_ns *ns, 1042 u8 opcode) 1043 { 1044 u32 effects = 0; 1045 1046 if (ns) { 1047 if (ctrl->effects) 1048 effects = le32_to_cpu(ctrl->effects->iocs[opcode]); 1049 if (effects & ~NVME_CMD_EFFECTS_CSUPP) 1050 dev_warn(ctrl->device, 1051 "IO command:%02x has unhandled effects:%08x\n", 1052 opcode, effects); 1053 return 0; 1054 } 1055 1056 if (ctrl->effects) 1057 effects = le32_to_cpu(ctrl->effects->iocs[opcode]); 1058 else 1059 effects = nvme_known_admin_effects(opcode); 1060 1061 /* 1062 * For simplicity, IO to all namespaces is quiesced even if the command 1063 * effects say only one namespace is affected. 1064 */ 1065 if (effects & (NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_CSE_MASK)) { 1066 nvme_start_freeze(ctrl); 1067 nvme_wait_freeze(ctrl); 1068 } 1069 return effects; 1070 } 1071 1072 static void nvme_update_formats(struct nvme_ctrl *ctrl) 1073 { 1074 struct nvme_ns *ns; 1075 1076 mutex_lock(&ctrl->namespaces_mutex); 1077 list_for_each_entry(ns, &ctrl->namespaces, list) { 1078 if (ns->disk && nvme_revalidate_disk(ns->disk)) 1079 nvme_ns_remove(ns); 1080 } 1081 mutex_unlock(&ctrl->namespaces_mutex); 1082 } 1083 1084 static void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects) 1085 { 1086 /* 1087 * Revalidate LBA changes prior to unfreezing. This is necessary to 1088 * prevent memory corruption if a logical block size was changed by 1089 * this command. 1090 */ 1091 if (effects & NVME_CMD_EFFECTS_LBCC) 1092 nvme_update_formats(ctrl); 1093 if (effects & (NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_CSE_MASK)) 1094 nvme_unfreeze(ctrl); 1095 if (effects & NVME_CMD_EFFECTS_CCC) 1096 nvme_init_identify(ctrl); 1097 if (effects & (NVME_CMD_EFFECTS_NIC | NVME_CMD_EFFECTS_NCC)) 1098 nvme_queue_scan(ctrl); 1099 } 1100 1101 static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns, 1102 struct nvme_passthru_cmd __user *ucmd) 1103 { 1104 struct nvme_passthru_cmd cmd; 1105 struct nvme_command c; 1106 unsigned timeout = 0; 1107 u32 effects; 1108 int status; 1109 1110 if (!capable(CAP_SYS_ADMIN)) 1111 return -EACCES; 1112 if (copy_from_user(&cmd, ucmd, sizeof(cmd))) 1113 return -EFAULT; 1114 if (cmd.flags) 1115 return -EINVAL; 1116 1117 memset(&c, 0, sizeof(c)); 1118 c.common.opcode = cmd.opcode; 1119 c.common.flags = cmd.flags; 1120 c.common.nsid = cpu_to_le32(cmd.nsid); 1121 c.common.cdw2[0] = cpu_to_le32(cmd.cdw2); 1122 c.common.cdw2[1] = cpu_to_le32(cmd.cdw3); 1123 c.common.cdw10[0] = cpu_to_le32(cmd.cdw10); 1124 c.common.cdw10[1] = cpu_to_le32(cmd.cdw11); 1125 c.common.cdw10[2] = cpu_to_le32(cmd.cdw12); 1126 c.common.cdw10[3] = cpu_to_le32(cmd.cdw13); 1127 c.common.cdw10[4] = cpu_to_le32(cmd.cdw14); 1128 c.common.cdw10[5] = cpu_to_le32(cmd.cdw15); 1129 1130 if (cmd.timeout_ms) 1131 timeout = msecs_to_jiffies(cmd.timeout_ms); 1132 1133 effects = nvme_passthru_start(ctrl, ns, cmd.opcode); 1134 status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c, 1135 (void __user *)(uintptr_t)cmd.addr, cmd.data_len, 1136 (void __user *)(uintptr_t)cmd.metadata, cmd.metadata, 1137 0, &cmd.result, timeout); 1138 nvme_passthru_end(ctrl, effects); 1139 1140 if (status >= 0) { 1141 if (put_user(cmd.result, &ucmd->result)) 1142 return -EFAULT; 1143 } 1144 1145 return status; 1146 } 1147 1148 /* 1149 * Issue ioctl requests on the first available path. Note that unlike normal 1150 * block layer requests we will not retry failed request on another controller. 1151 */ 1152 static struct nvme_ns *nvme_get_ns_from_disk(struct gendisk *disk, 1153 struct nvme_ns_head **head, int *srcu_idx) 1154 { 1155 #ifdef CONFIG_NVME_MULTIPATH 1156 if (disk->fops == &nvme_ns_head_ops) { 1157 *head = disk->private_data; 1158 *srcu_idx = srcu_read_lock(&(*head)->srcu); 1159 return nvme_find_path(*head); 1160 } 1161 #endif 1162 *head = NULL; 1163 *srcu_idx = -1; 1164 return disk->private_data; 1165 } 1166 1167 static void nvme_put_ns_from_disk(struct nvme_ns_head *head, int idx) 1168 { 1169 if (head) 1170 srcu_read_unlock(&head->srcu, idx); 1171 } 1172 1173 static int nvme_ns_ioctl(struct nvme_ns *ns, unsigned cmd, unsigned long arg) 1174 { 1175 switch (cmd) { 1176 case NVME_IOCTL_ID: 1177 force_successful_syscall_return(); 1178 return ns->head->ns_id; 1179 case NVME_IOCTL_ADMIN_CMD: 1180 return nvme_user_cmd(ns->ctrl, NULL, (void __user *)arg); 1181 case NVME_IOCTL_IO_CMD: 1182 return nvme_user_cmd(ns->ctrl, ns, (void __user *)arg); 1183 case NVME_IOCTL_SUBMIT_IO: 1184 return nvme_submit_io(ns, (void __user *)arg); 1185 default: 1186 #ifdef CONFIG_NVM 1187 if (ns->ndev) 1188 return nvme_nvm_ioctl(ns, cmd, arg); 1189 #endif 1190 if (is_sed_ioctl(cmd)) 1191 return sed_ioctl(ns->ctrl->opal_dev, cmd, 1192 (void __user *) arg); 1193 return -ENOTTY; 1194 } 1195 } 1196 1197 static int nvme_ioctl(struct block_device *bdev, fmode_t mode, 1198 unsigned int cmd, unsigned long arg) 1199 { 1200 struct nvme_ns_head *head = NULL; 1201 struct nvme_ns *ns; 1202 int srcu_idx, ret; 1203 1204 ns = nvme_get_ns_from_disk(bdev->bd_disk, &head, &srcu_idx); 1205 if (unlikely(!ns)) 1206 ret = -EWOULDBLOCK; 1207 else 1208 ret = nvme_ns_ioctl(ns, cmd, arg); 1209 nvme_put_ns_from_disk(head, srcu_idx); 1210 return ret; 1211 } 1212 1213 static int nvme_open(struct block_device *bdev, fmode_t mode) 1214 { 1215 struct nvme_ns *ns = bdev->bd_disk->private_data; 1216 1217 #ifdef CONFIG_NVME_MULTIPATH 1218 /* should never be called due to GENHD_FL_HIDDEN */ 1219 if (WARN_ON_ONCE(ns->head->disk)) 1220 return -ENXIO; 1221 #endif 1222 if (!kref_get_unless_zero(&ns->kref)) 1223 return -ENXIO; 1224 return 0; 1225 } 1226 1227 static void nvme_release(struct gendisk *disk, fmode_t mode) 1228 { 1229 nvme_put_ns(disk->private_data); 1230 } 1231 1232 static int nvme_getgeo(struct block_device *bdev, struct hd_geometry *geo) 1233 { 1234 /* some standard values */ 1235 geo->heads = 1 << 6; 1236 geo->sectors = 1 << 5; 1237 geo->cylinders = get_capacity(bdev->bd_disk) >> 11; 1238 return 0; 1239 } 1240 1241 #ifdef CONFIG_BLK_DEV_INTEGRITY 1242 static void nvme_init_integrity(struct gendisk *disk, u16 ms, u8 pi_type) 1243 { 1244 struct blk_integrity integrity; 1245 1246 memset(&integrity, 0, sizeof(integrity)); 1247 switch (pi_type) { 1248 case NVME_NS_DPS_PI_TYPE3: 1249 integrity.profile = &t10_pi_type3_crc; 1250 integrity.tag_size = sizeof(u16) + sizeof(u32); 1251 integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE; 1252 break; 1253 case NVME_NS_DPS_PI_TYPE1: 1254 case NVME_NS_DPS_PI_TYPE2: 1255 integrity.profile = &t10_pi_type1_crc; 1256 integrity.tag_size = sizeof(u16); 1257 integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE; 1258 break; 1259 default: 1260 integrity.profile = NULL; 1261 break; 1262 } 1263 integrity.tuple_size = ms; 1264 blk_integrity_register(disk, &integrity); 1265 blk_queue_max_integrity_segments(disk->queue, 1); 1266 } 1267 #else 1268 static void nvme_init_integrity(struct gendisk *disk, u16 ms, u8 pi_type) 1269 { 1270 } 1271 #endif /* CONFIG_BLK_DEV_INTEGRITY */ 1272 1273 static void nvme_set_chunk_size(struct nvme_ns *ns) 1274 { 1275 u32 chunk_size = (((u32)ns->noiob) << (ns->lba_shift - 9)); 1276 blk_queue_chunk_sectors(ns->queue, rounddown_pow_of_two(chunk_size)); 1277 } 1278 1279 static void nvme_config_discard(struct nvme_ctrl *ctrl, 1280 unsigned stream_alignment, struct request_queue *queue) 1281 { 1282 u32 size = queue_logical_block_size(queue); 1283 1284 if (stream_alignment) 1285 size *= stream_alignment; 1286 1287 BUILD_BUG_ON(PAGE_SIZE / sizeof(struct nvme_dsm_range) < 1288 NVME_DSM_MAX_RANGES); 1289 1290 queue->limits.discard_alignment = 0; 1291 queue->limits.discard_granularity = size; 1292 1293 blk_queue_max_discard_sectors(queue, UINT_MAX); 1294 blk_queue_max_discard_segments(queue, NVME_DSM_MAX_RANGES); 1295 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, queue); 1296 1297 if (ctrl->quirks & NVME_QUIRK_DEALLOCATE_ZEROES) 1298 blk_queue_max_write_zeroes_sectors(queue, UINT_MAX); 1299 } 1300 1301 static void nvme_report_ns_ids(struct nvme_ctrl *ctrl, unsigned int nsid, 1302 struct nvme_id_ns *id, struct nvme_ns_ids *ids) 1303 { 1304 memset(ids, 0, sizeof(*ids)); 1305 1306 if (ctrl->vs >= NVME_VS(1, 1, 0)) 1307 memcpy(ids->eui64, id->eui64, sizeof(id->eui64)); 1308 if (ctrl->vs >= NVME_VS(1, 2, 0)) 1309 memcpy(ids->nguid, id->nguid, sizeof(id->nguid)); 1310 if (ctrl->vs >= NVME_VS(1, 3, 0)) { 1311 /* Don't treat error as fatal we potentially 1312 * already have a NGUID or EUI-64 1313 */ 1314 if (nvme_identify_ns_descs(ctrl, nsid, ids)) 1315 dev_warn(ctrl->device, 1316 "%s: Identify Descriptors failed\n", __func__); 1317 } 1318 } 1319 1320 static bool nvme_ns_ids_valid(struct nvme_ns_ids *ids) 1321 { 1322 return !uuid_is_null(&ids->uuid) || 1323 memchr_inv(ids->nguid, 0, sizeof(ids->nguid)) || 1324 memchr_inv(ids->eui64, 0, sizeof(ids->eui64)); 1325 } 1326 1327 static bool nvme_ns_ids_equal(struct nvme_ns_ids *a, struct nvme_ns_ids *b) 1328 { 1329 return uuid_equal(&a->uuid, &b->uuid) && 1330 memcmp(&a->nguid, &b->nguid, sizeof(a->nguid)) == 0 && 1331 memcmp(&a->eui64, &b->eui64, sizeof(a->eui64)) == 0; 1332 } 1333 1334 static void nvme_update_disk_info(struct gendisk *disk, 1335 struct nvme_ns *ns, struct nvme_id_ns *id) 1336 { 1337 sector_t capacity = le64_to_cpup(&id->nsze) << (ns->lba_shift - 9); 1338 unsigned short bs = 1 << ns->lba_shift; 1339 unsigned stream_alignment = 0; 1340 1341 if (ns->ctrl->nr_streams && ns->sws && ns->sgs) 1342 stream_alignment = ns->sws * ns->sgs; 1343 1344 blk_mq_freeze_queue(disk->queue); 1345 blk_integrity_unregister(disk); 1346 1347 blk_queue_logical_block_size(disk->queue, bs); 1348 blk_queue_physical_block_size(disk->queue, bs); 1349 blk_queue_io_min(disk->queue, bs); 1350 1351 if (ns->ms && !ns->ext && 1352 (ns->ctrl->ops->flags & NVME_F_METADATA_SUPPORTED)) 1353 nvme_init_integrity(disk, ns->ms, ns->pi_type); 1354 if (ns->ms && !nvme_ns_has_pi(ns) && !blk_get_integrity(disk)) 1355 capacity = 0; 1356 set_capacity(disk, capacity); 1357 1358 if (ns->ctrl->oncs & NVME_CTRL_ONCS_DSM) 1359 nvme_config_discard(ns->ctrl, stream_alignment, disk->queue); 1360 blk_mq_unfreeze_queue(disk->queue); 1361 } 1362 1363 static void __nvme_revalidate_disk(struct gendisk *disk, struct nvme_id_ns *id) 1364 { 1365 struct nvme_ns *ns = disk->private_data; 1366 1367 /* 1368 * If identify namespace failed, use default 512 byte block size so 1369 * block layer can use before failing read/write for 0 capacity. 1370 */ 1371 ns->lba_shift = id->lbaf[id->flbas & NVME_NS_FLBAS_LBA_MASK].ds; 1372 if (ns->lba_shift == 0) 1373 ns->lba_shift = 9; 1374 ns->noiob = le16_to_cpu(id->noiob); 1375 ns->ext = ns->ms && (id->flbas & NVME_NS_FLBAS_META_EXT); 1376 ns->ms = le16_to_cpu(id->lbaf[id->flbas & NVME_NS_FLBAS_LBA_MASK].ms); 1377 /* the PI implementation requires metadata equal t10 pi tuple size */ 1378 if (ns->ms == sizeof(struct t10_pi_tuple)) 1379 ns->pi_type = id->dps & NVME_NS_DPS_PI_MASK; 1380 else 1381 ns->pi_type = 0; 1382 1383 if (ns->noiob) 1384 nvme_set_chunk_size(ns); 1385 nvme_update_disk_info(disk, ns, id); 1386 #ifdef CONFIG_NVME_MULTIPATH 1387 if (ns->head->disk) 1388 nvme_update_disk_info(ns->head->disk, ns, id); 1389 #endif 1390 } 1391 1392 static int nvme_revalidate_disk(struct gendisk *disk) 1393 { 1394 struct nvme_ns *ns = disk->private_data; 1395 struct nvme_ctrl *ctrl = ns->ctrl; 1396 struct nvme_id_ns *id; 1397 struct nvme_ns_ids ids; 1398 int ret = 0; 1399 1400 if (test_bit(NVME_NS_DEAD, &ns->flags)) { 1401 set_capacity(disk, 0); 1402 return -ENODEV; 1403 } 1404 1405 id = nvme_identify_ns(ctrl, ns->head->ns_id); 1406 if (!id) 1407 return -ENODEV; 1408 1409 if (id->ncap == 0) { 1410 ret = -ENODEV; 1411 goto out; 1412 } 1413 1414 __nvme_revalidate_disk(disk, id); 1415 nvme_report_ns_ids(ctrl, ns->head->ns_id, id, &ids); 1416 if (!nvme_ns_ids_equal(&ns->head->ids, &ids)) { 1417 dev_err(ctrl->device, 1418 "identifiers changed for nsid %d\n", ns->head->ns_id); 1419 ret = -ENODEV; 1420 } 1421 1422 out: 1423 kfree(id); 1424 return ret; 1425 } 1426 1427 static char nvme_pr_type(enum pr_type type) 1428 { 1429 switch (type) { 1430 case PR_WRITE_EXCLUSIVE: 1431 return 1; 1432 case PR_EXCLUSIVE_ACCESS: 1433 return 2; 1434 case PR_WRITE_EXCLUSIVE_REG_ONLY: 1435 return 3; 1436 case PR_EXCLUSIVE_ACCESS_REG_ONLY: 1437 return 4; 1438 case PR_WRITE_EXCLUSIVE_ALL_REGS: 1439 return 5; 1440 case PR_EXCLUSIVE_ACCESS_ALL_REGS: 1441 return 6; 1442 default: 1443 return 0; 1444 } 1445 }; 1446 1447 static int nvme_pr_command(struct block_device *bdev, u32 cdw10, 1448 u64 key, u64 sa_key, u8 op) 1449 { 1450 struct nvme_ns_head *head = NULL; 1451 struct nvme_ns *ns; 1452 struct nvme_command c; 1453 int srcu_idx, ret; 1454 u8 data[16] = { 0, }; 1455 1456 ns = nvme_get_ns_from_disk(bdev->bd_disk, &head, &srcu_idx); 1457 if (unlikely(!ns)) 1458 return -EWOULDBLOCK; 1459 1460 put_unaligned_le64(key, &data[0]); 1461 put_unaligned_le64(sa_key, &data[8]); 1462 1463 memset(&c, 0, sizeof(c)); 1464 c.common.opcode = op; 1465 c.common.nsid = cpu_to_le32(ns->head->ns_id); 1466 c.common.cdw10[0] = cpu_to_le32(cdw10); 1467 1468 ret = nvme_submit_sync_cmd(ns->queue, &c, data, 16); 1469 nvme_put_ns_from_disk(head, srcu_idx); 1470 return ret; 1471 } 1472 1473 static int nvme_pr_register(struct block_device *bdev, u64 old, 1474 u64 new, unsigned flags) 1475 { 1476 u32 cdw10; 1477 1478 if (flags & ~PR_FL_IGNORE_KEY) 1479 return -EOPNOTSUPP; 1480 1481 cdw10 = old ? 2 : 0; 1482 cdw10 |= (flags & PR_FL_IGNORE_KEY) ? 1 << 3 : 0; 1483 cdw10 |= (1 << 30) | (1 << 31); /* PTPL=1 */ 1484 return nvme_pr_command(bdev, cdw10, old, new, nvme_cmd_resv_register); 1485 } 1486 1487 static int nvme_pr_reserve(struct block_device *bdev, u64 key, 1488 enum pr_type type, unsigned flags) 1489 { 1490 u32 cdw10; 1491 1492 if (flags & ~PR_FL_IGNORE_KEY) 1493 return -EOPNOTSUPP; 1494 1495 cdw10 = nvme_pr_type(type) << 8; 1496 cdw10 |= ((flags & PR_FL_IGNORE_KEY) ? 1 << 3 : 0); 1497 return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_acquire); 1498 } 1499 1500 static int nvme_pr_preempt(struct block_device *bdev, u64 old, u64 new, 1501 enum pr_type type, bool abort) 1502 { 1503 u32 cdw10 = nvme_pr_type(type) << 8 | abort ? 2 : 1; 1504 return nvme_pr_command(bdev, cdw10, old, new, nvme_cmd_resv_acquire); 1505 } 1506 1507 static int nvme_pr_clear(struct block_device *bdev, u64 key) 1508 { 1509 u32 cdw10 = 1 | (key ? 1 << 3 : 0); 1510 return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_register); 1511 } 1512 1513 static int nvme_pr_release(struct block_device *bdev, u64 key, enum pr_type type) 1514 { 1515 u32 cdw10 = nvme_pr_type(type) << 8 | key ? 1 << 3 : 0; 1516 return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_release); 1517 } 1518 1519 static const struct pr_ops nvme_pr_ops = { 1520 .pr_register = nvme_pr_register, 1521 .pr_reserve = nvme_pr_reserve, 1522 .pr_release = nvme_pr_release, 1523 .pr_preempt = nvme_pr_preempt, 1524 .pr_clear = nvme_pr_clear, 1525 }; 1526 1527 #ifdef CONFIG_BLK_SED_OPAL 1528 int nvme_sec_submit(void *data, u16 spsp, u8 secp, void *buffer, size_t len, 1529 bool send) 1530 { 1531 struct nvme_ctrl *ctrl = data; 1532 struct nvme_command cmd; 1533 1534 memset(&cmd, 0, sizeof(cmd)); 1535 if (send) 1536 cmd.common.opcode = nvme_admin_security_send; 1537 else 1538 cmd.common.opcode = nvme_admin_security_recv; 1539 cmd.common.nsid = 0; 1540 cmd.common.cdw10[0] = cpu_to_le32(((u32)secp) << 24 | ((u32)spsp) << 8); 1541 cmd.common.cdw10[1] = cpu_to_le32(len); 1542 1543 return __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, NULL, buffer, len, 1544 ADMIN_TIMEOUT, NVME_QID_ANY, 1, 0); 1545 } 1546 EXPORT_SYMBOL_GPL(nvme_sec_submit); 1547 #endif /* CONFIG_BLK_SED_OPAL */ 1548 1549 static const struct block_device_operations nvme_fops = { 1550 .owner = THIS_MODULE, 1551 .ioctl = nvme_ioctl, 1552 .compat_ioctl = nvme_ioctl, 1553 .open = nvme_open, 1554 .release = nvme_release, 1555 .getgeo = nvme_getgeo, 1556 .revalidate_disk= nvme_revalidate_disk, 1557 .pr_ops = &nvme_pr_ops, 1558 }; 1559 1560 #ifdef CONFIG_NVME_MULTIPATH 1561 static int nvme_ns_head_open(struct block_device *bdev, fmode_t mode) 1562 { 1563 struct nvme_ns_head *head = bdev->bd_disk->private_data; 1564 1565 if (!kref_get_unless_zero(&head->ref)) 1566 return -ENXIO; 1567 return 0; 1568 } 1569 1570 static void nvme_ns_head_release(struct gendisk *disk, fmode_t mode) 1571 { 1572 nvme_put_ns_head(disk->private_data); 1573 } 1574 1575 const struct block_device_operations nvme_ns_head_ops = { 1576 .owner = THIS_MODULE, 1577 .open = nvme_ns_head_open, 1578 .release = nvme_ns_head_release, 1579 .ioctl = nvme_ioctl, 1580 .compat_ioctl = nvme_ioctl, 1581 .getgeo = nvme_getgeo, 1582 .pr_ops = &nvme_pr_ops, 1583 }; 1584 #endif /* CONFIG_NVME_MULTIPATH */ 1585 1586 static int nvme_wait_ready(struct nvme_ctrl *ctrl, u64 cap, bool enabled) 1587 { 1588 unsigned long timeout = 1589 ((NVME_CAP_TIMEOUT(cap) + 1) * HZ / 2) + jiffies; 1590 u32 csts, bit = enabled ? NVME_CSTS_RDY : 0; 1591 int ret; 1592 1593 while ((ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &csts)) == 0) { 1594 if (csts == ~0) 1595 return -ENODEV; 1596 if ((csts & NVME_CSTS_RDY) == bit) 1597 break; 1598 1599 msleep(100); 1600 if (fatal_signal_pending(current)) 1601 return -EINTR; 1602 if (time_after(jiffies, timeout)) { 1603 dev_err(ctrl->device, 1604 "Device not ready; aborting %s\n", enabled ? 1605 "initialisation" : "reset"); 1606 return -ENODEV; 1607 } 1608 } 1609 1610 return ret; 1611 } 1612 1613 /* 1614 * If the device has been passed off to us in an enabled state, just clear 1615 * the enabled bit. The spec says we should set the 'shutdown notification 1616 * bits', but doing so may cause the device to complete commands to the 1617 * admin queue ... and we don't know what memory that might be pointing at! 1618 */ 1619 int nvme_disable_ctrl(struct nvme_ctrl *ctrl, u64 cap) 1620 { 1621 int ret; 1622 1623 ctrl->ctrl_config &= ~NVME_CC_SHN_MASK; 1624 ctrl->ctrl_config &= ~NVME_CC_ENABLE; 1625 1626 ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config); 1627 if (ret) 1628 return ret; 1629 1630 if (ctrl->quirks & NVME_QUIRK_DELAY_BEFORE_CHK_RDY) 1631 msleep(NVME_QUIRK_DELAY_AMOUNT); 1632 1633 return nvme_wait_ready(ctrl, cap, false); 1634 } 1635 EXPORT_SYMBOL_GPL(nvme_disable_ctrl); 1636 1637 int nvme_enable_ctrl(struct nvme_ctrl *ctrl, u64 cap) 1638 { 1639 /* 1640 * Default to a 4K page size, with the intention to update this 1641 * path in the future to accomodate architectures with differing 1642 * kernel and IO page sizes. 1643 */ 1644 unsigned dev_page_min = NVME_CAP_MPSMIN(cap) + 12, page_shift = 12; 1645 int ret; 1646 1647 if (page_shift < dev_page_min) { 1648 dev_err(ctrl->device, 1649 "Minimum device page size %u too large for host (%u)\n", 1650 1 << dev_page_min, 1 << page_shift); 1651 return -ENODEV; 1652 } 1653 1654 ctrl->page_size = 1 << page_shift; 1655 1656 ctrl->ctrl_config = NVME_CC_CSS_NVM; 1657 ctrl->ctrl_config |= (page_shift - 12) << NVME_CC_MPS_SHIFT; 1658 ctrl->ctrl_config |= NVME_CC_AMS_RR | NVME_CC_SHN_NONE; 1659 ctrl->ctrl_config |= NVME_CC_IOSQES | NVME_CC_IOCQES; 1660 ctrl->ctrl_config |= NVME_CC_ENABLE; 1661 1662 ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config); 1663 if (ret) 1664 return ret; 1665 return nvme_wait_ready(ctrl, cap, true); 1666 } 1667 EXPORT_SYMBOL_GPL(nvme_enable_ctrl); 1668 1669 int nvme_shutdown_ctrl(struct nvme_ctrl *ctrl) 1670 { 1671 unsigned long timeout = jiffies + (ctrl->shutdown_timeout * HZ); 1672 u32 csts; 1673 int ret; 1674 1675 ctrl->ctrl_config &= ~NVME_CC_SHN_MASK; 1676 ctrl->ctrl_config |= NVME_CC_SHN_NORMAL; 1677 1678 ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config); 1679 if (ret) 1680 return ret; 1681 1682 while ((ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &csts)) == 0) { 1683 if ((csts & NVME_CSTS_SHST_MASK) == NVME_CSTS_SHST_CMPLT) 1684 break; 1685 1686 msleep(100); 1687 if (fatal_signal_pending(current)) 1688 return -EINTR; 1689 if (time_after(jiffies, timeout)) { 1690 dev_err(ctrl->device, 1691 "Device shutdown incomplete; abort shutdown\n"); 1692 return -ENODEV; 1693 } 1694 } 1695 1696 return ret; 1697 } 1698 EXPORT_SYMBOL_GPL(nvme_shutdown_ctrl); 1699 1700 static void nvme_set_queue_limits(struct nvme_ctrl *ctrl, 1701 struct request_queue *q) 1702 { 1703 bool vwc = false; 1704 1705 if (ctrl->max_hw_sectors) { 1706 u32 max_segments = 1707 (ctrl->max_hw_sectors / (ctrl->page_size >> 9)) + 1; 1708 1709 blk_queue_max_hw_sectors(q, ctrl->max_hw_sectors); 1710 blk_queue_max_segments(q, min_t(u32, max_segments, USHRT_MAX)); 1711 } 1712 if ((ctrl->quirks & NVME_QUIRK_STRIPE_SIZE) && 1713 is_power_of_2(ctrl->max_hw_sectors)) 1714 blk_queue_chunk_sectors(q, ctrl->max_hw_sectors); 1715 blk_queue_virt_boundary(q, ctrl->page_size - 1); 1716 if (ctrl->vwc & NVME_CTRL_VWC_PRESENT) 1717 vwc = true; 1718 blk_queue_write_cache(q, vwc, vwc); 1719 } 1720 1721 static int nvme_configure_timestamp(struct nvme_ctrl *ctrl) 1722 { 1723 __le64 ts; 1724 int ret; 1725 1726 if (!(ctrl->oncs & NVME_CTRL_ONCS_TIMESTAMP)) 1727 return 0; 1728 1729 ts = cpu_to_le64(ktime_to_ms(ktime_get_real())); 1730 ret = nvme_set_features(ctrl, NVME_FEAT_TIMESTAMP, 0, &ts, sizeof(ts), 1731 NULL); 1732 if (ret) 1733 dev_warn_once(ctrl->device, 1734 "could not set timestamp (%d)\n", ret); 1735 return ret; 1736 } 1737 1738 static int nvme_configure_apst(struct nvme_ctrl *ctrl) 1739 { 1740 /* 1741 * APST (Autonomous Power State Transition) lets us program a 1742 * table of power state transitions that the controller will 1743 * perform automatically. We configure it with a simple 1744 * heuristic: we are willing to spend at most 2% of the time 1745 * transitioning between power states. Therefore, when running 1746 * in any given state, we will enter the next lower-power 1747 * non-operational state after waiting 50 * (enlat + exlat) 1748 * microseconds, as long as that state's exit latency is under 1749 * the requested maximum latency. 1750 * 1751 * We will not autonomously enter any non-operational state for 1752 * which the total latency exceeds ps_max_latency_us. Users 1753 * can set ps_max_latency_us to zero to turn off APST. 1754 */ 1755 1756 unsigned apste; 1757 struct nvme_feat_auto_pst *table; 1758 u64 max_lat_us = 0; 1759 int max_ps = -1; 1760 int ret; 1761 1762 /* 1763 * If APST isn't supported or if we haven't been initialized yet, 1764 * then don't do anything. 1765 */ 1766 if (!ctrl->apsta) 1767 return 0; 1768 1769 if (ctrl->npss > 31) { 1770 dev_warn(ctrl->device, "NPSS is invalid; not using APST\n"); 1771 return 0; 1772 } 1773 1774 table = kzalloc(sizeof(*table), GFP_KERNEL); 1775 if (!table) 1776 return 0; 1777 1778 if (!ctrl->apst_enabled || ctrl->ps_max_latency_us == 0) { 1779 /* Turn off APST. */ 1780 apste = 0; 1781 dev_dbg(ctrl->device, "APST disabled\n"); 1782 } else { 1783 __le64 target = cpu_to_le64(0); 1784 int state; 1785 1786 /* 1787 * Walk through all states from lowest- to highest-power. 1788 * According to the spec, lower-numbered states use more 1789 * power. NPSS, despite the name, is the index of the 1790 * lowest-power state, not the number of states. 1791 */ 1792 for (state = (int)ctrl->npss; state >= 0; state--) { 1793 u64 total_latency_us, exit_latency_us, transition_ms; 1794 1795 if (target) 1796 table->entries[state] = target; 1797 1798 /* 1799 * Don't allow transitions to the deepest state 1800 * if it's quirked off. 1801 */ 1802 if (state == ctrl->npss && 1803 (ctrl->quirks & NVME_QUIRK_NO_DEEPEST_PS)) 1804 continue; 1805 1806 /* 1807 * Is this state a useful non-operational state for 1808 * higher-power states to autonomously transition to? 1809 */ 1810 if (!(ctrl->psd[state].flags & 1811 NVME_PS_FLAGS_NON_OP_STATE)) 1812 continue; 1813 1814 exit_latency_us = 1815 (u64)le32_to_cpu(ctrl->psd[state].exit_lat); 1816 if (exit_latency_us > ctrl->ps_max_latency_us) 1817 continue; 1818 1819 total_latency_us = 1820 exit_latency_us + 1821 le32_to_cpu(ctrl->psd[state].entry_lat); 1822 1823 /* 1824 * This state is good. Use it as the APST idle 1825 * target for higher power states. 1826 */ 1827 transition_ms = total_latency_us + 19; 1828 do_div(transition_ms, 20); 1829 if (transition_ms > (1 << 24) - 1) 1830 transition_ms = (1 << 24) - 1; 1831 1832 target = cpu_to_le64((state << 3) | 1833 (transition_ms << 8)); 1834 1835 if (max_ps == -1) 1836 max_ps = state; 1837 1838 if (total_latency_us > max_lat_us) 1839 max_lat_us = total_latency_us; 1840 } 1841 1842 apste = 1; 1843 1844 if (max_ps == -1) { 1845 dev_dbg(ctrl->device, "APST enabled but no non-operational states are available\n"); 1846 } else { 1847 dev_dbg(ctrl->device, "APST enabled: max PS = %d, max round-trip latency = %lluus, table = %*phN\n", 1848 max_ps, max_lat_us, (int)sizeof(*table), table); 1849 } 1850 } 1851 1852 ret = nvme_set_features(ctrl, NVME_FEAT_AUTO_PST, apste, 1853 table, sizeof(*table), NULL); 1854 if (ret) 1855 dev_err(ctrl->device, "failed to set APST feature (%d)\n", ret); 1856 1857 kfree(table); 1858 return ret; 1859 } 1860 1861 static void nvme_set_latency_tolerance(struct device *dev, s32 val) 1862 { 1863 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 1864 u64 latency; 1865 1866 switch (val) { 1867 case PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT: 1868 case PM_QOS_LATENCY_ANY: 1869 latency = U64_MAX; 1870 break; 1871 1872 default: 1873 latency = val; 1874 } 1875 1876 if (ctrl->ps_max_latency_us != latency) { 1877 ctrl->ps_max_latency_us = latency; 1878 nvme_configure_apst(ctrl); 1879 } 1880 } 1881 1882 struct nvme_core_quirk_entry { 1883 /* 1884 * NVMe model and firmware strings are padded with spaces. For 1885 * simplicity, strings in the quirk table are padded with NULLs 1886 * instead. 1887 */ 1888 u16 vid; 1889 const char *mn; 1890 const char *fr; 1891 unsigned long quirks; 1892 }; 1893 1894 static const struct nvme_core_quirk_entry core_quirks[] = { 1895 { 1896 /* 1897 * This Toshiba device seems to die using any APST states. See: 1898 * https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1678184/comments/11 1899 */ 1900 .vid = 0x1179, 1901 .mn = "THNSF5256GPUK TOSHIBA", 1902 .quirks = NVME_QUIRK_NO_APST, 1903 } 1904 }; 1905 1906 /* match is null-terminated but idstr is space-padded. */ 1907 static bool string_matches(const char *idstr, const char *match, size_t len) 1908 { 1909 size_t matchlen; 1910 1911 if (!match) 1912 return true; 1913 1914 matchlen = strlen(match); 1915 WARN_ON_ONCE(matchlen > len); 1916 1917 if (memcmp(idstr, match, matchlen)) 1918 return false; 1919 1920 for (; matchlen < len; matchlen++) 1921 if (idstr[matchlen] != ' ') 1922 return false; 1923 1924 return true; 1925 } 1926 1927 static bool quirk_matches(const struct nvme_id_ctrl *id, 1928 const struct nvme_core_quirk_entry *q) 1929 { 1930 return q->vid == le16_to_cpu(id->vid) && 1931 string_matches(id->mn, q->mn, sizeof(id->mn)) && 1932 string_matches(id->fr, q->fr, sizeof(id->fr)); 1933 } 1934 1935 static void nvme_init_subnqn(struct nvme_subsystem *subsys, struct nvme_ctrl *ctrl, 1936 struct nvme_id_ctrl *id) 1937 { 1938 size_t nqnlen; 1939 int off; 1940 1941 nqnlen = strnlen(id->subnqn, NVMF_NQN_SIZE); 1942 if (nqnlen > 0 && nqnlen < NVMF_NQN_SIZE) { 1943 strncpy(subsys->subnqn, id->subnqn, NVMF_NQN_SIZE); 1944 return; 1945 } 1946 1947 if (ctrl->vs >= NVME_VS(1, 2, 1)) 1948 dev_warn(ctrl->device, "missing or invalid SUBNQN field.\n"); 1949 1950 /* Generate a "fake" NQN per Figure 254 in NVMe 1.3 + ECN 001 */ 1951 off = snprintf(subsys->subnqn, NVMF_NQN_SIZE, 1952 "nqn.2014.08.org.nvmexpress:%4x%4x", 1953 le16_to_cpu(id->vid), le16_to_cpu(id->ssvid)); 1954 memcpy(subsys->subnqn + off, id->sn, sizeof(id->sn)); 1955 off += sizeof(id->sn); 1956 memcpy(subsys->subnqn + off, id->mn, sizeof(id->mn)); 1957 off += sizeof(id->mn); 1958 memset(subsys->subnqn + off, 0, sizeof(subsys->subnqn) - off); 1959 } 1960 1961 static void __nvme_release_subsystem(struct nvme_subsystem *subsys) 1962 { 1963 ida_simple_remove(&nvme_subsystems_ida, subsys->instance); 1964 kfree(subsys); 1965 } 1966 1967 static void nvme_release_subsystem(struct device *dev) 1968 { 1969 __nvme_release_subsystem(container_of(dev, struct nvme_subsystem, dev)); 1970 } 1971 1972 static void nvme_destroy_subsystem(struct kref *ref) 1973 { 1974 struct nvme_subsystem *subsys = 1975 container_of(ref, struct nvme_subsystem, ref); 1976 1977 mutex_lock(&nvme_subsystems_lock); 1978 list_del(&subsys->entry); 1979 mutex_unlock(&nvme_subsystems_lock); 1980 1981 ida_destroy(&subsys->ns_ida); 1982 device_del(&subsys->dev); 1983 put_device(&subsys->dev); 1984 } 1985 1986 static void nvme_put_subsystem(struct nvme_subsystem *subsys) 1987 { 1988 kref_put(&subsys->ref, nvme_destroy_subsystem); 1989 } 1990 1991 static struct nvme_subsystem *__nvme_find_get_subsystem(const char *subsysnqn) 1992 { 1993 struct nvme_subsystem *subsys; 1994 1995 lockdep_assert_held(&nvme_subsystems_lock); 1996 1997 list_for_each_entry(subsys, &nvme_subsystems, entry) { 1998 if (strcmp(subsys->subnqn, subsysnqn)) 1999 continue; 2000 if (!kref_get_unless_zero(&subsys->ref)) 2001 continue; 2002 return subsys; 2003 } 2004 2005 return NULL; 2006 } 2007 2008 #define SUBSYS_ATTR_RO(_name, _mode, _show) \ 2009 struct device_attribute subsys_attr_##_name = \ 2010 __ATTR(_name, _mode, _show, NULL) 2011 2012 static ssize_t nvme_subsys_show_nqn(struct device *dev, 2013 struct device_attribute *attr, 2014 char *buf) 2015 { 2016 struct nvme_subsystem *subsys = 2017 container_of(dev, struct nvme_subsystem, dev); 2018 2019 return snprintf(buf, PAGE_SIZE, "%s\n", subsys->subnqn); 2020 } 2021 static SUBSYS_ATTR_RO(subsysnqn, S_IRUGO, nvme_subsys_show_nqn); 2022 2023 #define nvme_subsys_show_str_function(field) \ 2024 static ssize_t subsys_##field##_show(struct device *dev, \ 2025 struct device_attribute *attr, char *buf) \ 2026 { \ 2027 struct nvme_subsystem *subsys = \ 2028 container_of(dev, struct nvme_subsystem, dev); \ 2029 return sprintf(buf, "%.*s\n", \ 2030 (int)sizeof(subsys->field), subsys->field); \ 2031 } \ 2032 static SUBSYS_ATTR_RO(field, S_IRUGO, subsys_##field##_show); 2033 2034 nvme_subsys_show_str_function(model); 2035 nvme_subsys_show_str_function(serial); 2036 nvme_subsys_show_str_function(firmware_rev); 2037 2038 static struct attribute *nvme_subsys_attrs[] = { 2039 &subsys_attr_model.attr, 2040 &subsys_attr_serial.attr, 2041 &subsys_attr_firmware_rev.attr, 2042 &subsys_attr_subsysnqn.attr, 2043 NULL, 2044 }; 2045 2046 static struct attribute_group nvme_subsys_attrs_group = { 2047 .attrs = nvme_subsys_attrs, 2048 }; 2049 2050 static const struct attribute_group *nvme_subsys_attrs_groups[] = { 2051 &nvme_subsys_attrs_group, 2052 NULL, 2053 }; 2054 2055 static int nvme_init_subsystem(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id) 2056 { 2057 struct nvme_subsystem *subsys, *found; 2058 int ret; 2059 2060 subsys = kzalloc(sizeof(*subsys), GFP_KERNEL); 2061 if (!subsys) 2062 return -ENOMEM; 2063 ret = ida_simple_get(&nvme_subsystems_ida, 0, 0, GFP_KERNEL); 2064 if (ret < 0) { 2065 kfree(subsys); 2066 return ret; 2067 } 2068 subsys->instance = ret; 2069 mutex_init(&subsys->lock); 2070 kref_init(&subsys->ref); 2071 INIT_LIST_HEAD(&subsys->ctrls); 2072 INIT_LIST_HEAD(&subsys->nsheads); 2073 nvme_init_subnqn(subsys, ctrl, id); 2074 memcpy(subsys->serial, id->sn, sizeof(subsys->serial)); 2075 memcpy(subsys->model, id->mn, sizeof(subsys->model)); 2076 memcpy(subsys->firmware_rev, id->fr, sizeof(subsys->firmware_rev)); 2077 subsys->vendor_id = le16_to_cpu(id->vid); 2078 subsys->cmic = id->cmic; 2079 2080 subsys->dev.class = nvme_subsys_class; 2081 subsys->dev.release = nvme_release_subsystem; 2082 subsys->dev.groups = nvme_subsys_attrs_groups; 2083 dev_set_name(&subsys->dev, "nvme-subsys%d", subsys->instance); 2084 device_initialize(&subsys->dev); 2085 2086 mutex_lock(&nvme_subsystems_lock); 2087 found = __nvme_find_get_subsystem(subsys->subnqn); 2088 if (found) { 2089 /* 2090 * Verify that the subsystem actually supports multiple 2091 * controllers, else bail out. 2092 */ 2093 if (!(id->cmic & (1 << 1))) { 2094 dev_err(ctrl->device, 2095 "ignoring ctrl due to duplicate subnqn (%s).\n", 2096 found->subnqn); 2097 nvme_put_subsystem(found); 2098 ret = -EINVAL; 2099 goto out_unlock; 2100 } 2101 2102 __nvme_release_subsystem(subsys); 2103 subsys = found; 2104 } else { 2105 ret = device_add(&subsys->dev); 2106 if (ret) { 2107 dev_err(ctrl->device, 2108 "failed to register subsystem device.\n"); 2109 goto out_unlock; 2110 } 2111 ida_init(&subsys->ns_ida); 2112 list_add_tail(&subsys->entry, &nvme_subsystems); 2113 } 2114 2115 ctrl->subsys = subsys; 2116 mutex_unlock(&nvme_subsystems_lock); 2117 2118 if (sysfs_create_link(&subsys->dev.kobj, &ctrl->device->kobj, 2119 dev_name(ctrl->device))) { 2120 dev_err(ctrl->device, 2121 "failed to create sysfs link from subsystem.\n"); 2122 /* the transport driver will eventually put the subsystem */ 2123 return -EINVAL; 2124 } 2125 2126 mutex_lock(&subsys->lock); 2127 list_add_tail(&ctrl->subsys_entry, &subsys->ctrls); 2128 mutex_unlock(&subsys->lock); 2129 2130 return 0; 2131 2132 out_unlock: 2133 mutex_unlock(&nvme_subsystems_lock); 2134 put_device(&subsys->dev); 2135 return ret; 2136 } 2137 2138 static int nvme_get_log(struct nvme_ctrl *ctrl, u8 log_page, void *log, 2139 size_t size) 2140 { 2141 struct nvme_command c = { }; 2142 2143 c.common.opcode = nvme_admin_get_log_page; 2144 c.common.nsid = cpu_to_le32(NVME_NSID_ALL); 2145 c.common.cdw10[0] = nvme_get_log_dw10(log_page, size); 2146 2147 return nvme_submit_sync_cmd(ctrl->admin_q, &c, log, size); 2148 } 2149 2150 static int nvme_get_effects_log(struct nvme_ctrl *ctrl) 2151 { 2152 int ret; 2153 2154 if (!ctrl->effects) 2155 ctrl->effects = kzalloc(sizeof(*ctrl->effects), GFP_KERNEL); 2156 2157 if (!ctrl->effects) 2158 return 0; 2159 2160 ret = nvme_get_log(ctrl, NVME_LOG_CMD_EFFECTS, ctrl->effects, 2161 sizeof(*ctrl->effects)); 2162 if (ret) { 2163 kfree(ctrl->effects); 2164 ctrl->effects = NULL; 2165 } 2166 return ret; 2167 } 2168 2169 /* 2170 * Initialize the cached copies of the Identify data and various controller 2171 * register in our nvme_ctrl structure. This should be called as soon as 2172 * the admin queue is fully up and running. 2173 */ 2174 int nvme_init_identify(struct nvme_ctrl *ctrl) 2175 { 2176 struct nvme_id_ctrl *id; 2177 u64 cap; 2178 int ret, page_shift; 2179 u32 max_hw_sectors; 2180 bool prev_apst_enabled; 2181 2182 ret = ctrl->ops->reg_read32(ctrl, NVME_REG_VS, &ctrl->vs); 2183 if (ret) { 2184 dev_err(ctrl->device, "Reading VS failed (%d)\n", ret); 2185 return ret; 2186 } 2187 2188 ret = ctrl->ops->reg_read64(ctrl, NVME_REG_CAP, &cap); 2189 if (ret) { 2190 dev_err(ctrl->device, "Reading CAP failed (%d)\n", ret); 2191 return ret; 2192 } 2193 page_shift = NVME_CAP_MPSMIN(cap) + 12; 2194 2195 if (ctrl->vs >= NVME_VS(1, 1, 0)) 2196 ctrl->subsystem = NVME_CAP_NSSRC(cap); 2197 2198 ret = nvme_identify_ctrl(ctrl, &id); 2199 if (ret) { 2200 dev_err(ctrl->device, "Identify Controller failed (%d)\n", ret); 2201 return -EIO; 2202 } 2203 2204 if (id->lpa & NVME_CTRL_LPA_CMD_EFFECTS_LOG) { 2205 ret = nvme_get_effects_log(ctrl); 2206 if (ret < 0) 2207 return ret; 2208 } 2209 2210 if (!ctrl->identified) { 2211 int i; 2212 2213 ret = nvme_init_subsystem(ctrl, id); 2214 if (ret) 2215 goto out_free; 2216 2217 /* 2218 * Check for quirks. Quirk can depend on firmware version, 2219 * so, in principle, the set of quirks present can change 2220 * across a reset. As a possible future enhancement, we 2221 * could re-scan for quirks every time we reinitialize 2222 * the device, but we'd have to make sure that the driver 2223 * behaves intelligently if the quirks change. 2224 */ 2225 for (i = 0; i < ARRAY_SIZE(core_quirks); i++) { 2226 if (quirk_matches(id, &core_quirks[i])) 2227 ctrl->quirks |= core_quirks[i].quirks; 2228 } 2229 } 2230 2231 if (force_apst && (ctrl->quirks & NVME_QUIRK_NO_DEEPEST_PS)) { 2232 dev_warn(ctrl->device, "forcibly allowing all power states due to nvme_core.force_apst -- use at your own risk\n"); 2233 ctrl->quirks &= ~NVME_QUIRK_NO_DEEPEST_PS; 2234 } 2235 2236 ctrl->oacs = le16_to_cpu(id->oacs); 2237 ctrl->oncs = le16_to_cpup(&id->oncs); 2238 atomic_set(&ctrl->abort_limit, id->acl + 1); 2239 ctrl->vwc = id->vwc; 2240 ctrl->cntlid = le16_to_cpup(&id->cntlid); 2241 if (id->mdts) 2242 max_hw_sectors = 1 << (id->mdts + page_shift - 9); 2243 else 2244 max_hw_sectors = UINT_MAX; 2245 ctrl->max_hw_sectors = 2246 min_not_zero(ctrl->max_hw_sectors, max_hw_sectors); 2247 2248 nvme_set_queue_limits(ctrl, ctrl->admin_q); 2249 ctrl->sgls = le32_to_cpu(id->sgls); 2250 ctrl->kas = le16_to_cpu(id->kas); 2251 2252 if (id->rtd3e) { 2253 /* us -> s */ 2254 u32 transition_time = le32_to_cpu(id->rtd3e) / 1000000; 2255 2256 ctrl->shutdown_timeout = clamp_t(unsigned int, transition_time, 2257 shutdown_timeout, 60); 2258 2259 if (ctrl->shutdown_timeout != shutdown_timeout) 2260 dev_warn(ctrl->device, 2261 "Shutdown timeout set to %u seconds\n", 2262 ctrl->shutdown_timeout); 2263 } else 2264 ctrl->shutdown_timeout = shutdown_timeout; 2265 2266 ctrl->npss = id->npss; 2267 ctrl->apsta = id->apsta; 2268 prev_apst_enabled = ctrl->apst_enabled; 2269 if (ctrl->quirks & NVME_QUIRK_NO_APST) { 2270 if (force_apst && id->apsta) { 2271 dev_warn(ctrl->device, "forcibly allowing APST due to nvme_core.force_apst -- use at your own risk\n"); 2272 ctrl->apst_enabled = true; 2273 } else { 2274 ctrl->apst_enabled = false; 2275 } 2276 } else { 2277 ctrl->apst_enabled = id->apsta; 2278 } 2279 memcpy(ctrl->psd, id->psd, sizeof(ctrl->psd)); 2280 2281 if (ctrl->ops->flags & NVME_F_FABRICS) { 2282 ctrl->icdoff = le16_to_cpu(id->icdoff); 2283 ctrl->ioccsz = le32_to_cpu(id->ioccsz); 2284 ctrl->iorcsz = le32_to_cpu(id->iorcsz); 2285 ctrl->maxcmd = le16_to_cpu(id->maxcmd); 2286 2287 /* 2288 * In fabrics we need to verify the cntlid matches the 2289 * admin connect 2290 */ 2291 if (ctrl->cntlid != le16_to_cpu(id->cntlid)) { 2292 ret = -EINVAL; 2293 goto out_free; 2294 } 2295 2296 if (!ctrl->opts->discovery_nqn && !ctrl->kas) { 2297 dev_err(ctrl->device, 2298 "keep-alive support is mandatory for fabrics\n"); 2299 ret = -EINVAL; 2300 goto out_free; 2301 } 2302 } else { 2303 ctrl->cntlid = le16_to_cpu(id->cntlid); 2304 ctrl->hmpre = le32_to_cpu(id->hmpre); 2305 ctrl->hmmin = le32_to_cpu(id->hmmin); 2306 ctrl->hmminds = le32_to_cpu(id->hmminds); 2307 ctrl->hmmaxd = le16_to_cpu(id->hmmaxd); 2308 } 2309 2310 kfree(id); 2311 2312 if (ctrl->apst_enabled && !prev_apst_enabled) 2313 dev_pm_qos_expose_latency_tolerance(ctrl->device); 2314 else if (!ctrl->apst_enabled && prev_apst_enabled) 2315 dev_pm_qos_hide_latency_tolerance(ctrl->device); 2316 2317 ret = nvme_configure_apst(ctrl); 2318 if (ret < 0) 2319 return ret; 2320 2321 ret = nvme_configure_timestamp(ctrl); 2322 if (ret < 0) 2323 return ret; 2324 2325 ret = nvme_configure_directives(ctrl); 2326 if (ret < 0) 2327 return ret; 2328 2329 ctrl->identified = true; 2330 2331 return 0; 2332 2333 out_free: 2334 kfree(id); 2335 return ret; 2336 } 2337 EXPORT_SYMBOL_GPL(nvme_init_identify); 2338 2339 static int nvme_dev_open(struct inode *inode, struct file *file) 2340 { 2341 struct nvme_ctrl *ctrl = 2342 container_of(inode->i_cdev, struct nvme_ctrl, cdev); 2343 2344 if (ctrl->state != NVME_CTRL_LIVE) 2345 return -EWOULDBLOCK; 2346 file->private_data = ctrl; 2347 return 0; 2348 } 2349 2350 static int nvme_dev_user_cmd(struct nvme_ctrl *ctrl, void __user *argp) 2351 { 2352 struct nvme_ns *ns; 2353 int ret; 2354 2355 mutex_lock(&ctrl->namespaces_mutex); 2356 if (list_empty(&ctrl->namespaces)) { 2357 ret = -ENOTTY; 2358 goto out_unlock; 2359 } 2360 2361 ns = list_first_entry(&ctrl->namespaces, struct nvme_ns, list); 2362 if (ns != list_last_entry(&ctrl->namespaces, struct nvme_ns, list)) { 2363 dev_warn(ctrl->device, 2364 "NVME_IOCTL_IO_CMD not supported when multiple namespaces present!\n"); 2365 ret = -EINVAL; 2366 goto out_unlock; 2367 } 2368 2369 dev_warn(ctrl->device, 2370 "using deprecated NVME_IOCTL_IO_CMD ioctl on the char device!\n"); 2371 kref_get(&ns->kref); 2372 mutex_unlock(&ctrl->namespaces_mutex); 2373 2374 ret = nvme_user_cmd(ctrl, ns, argp); 2375 nvme_put_ns(ns); 2376 return ret; 2377 2378 out_unlock: 2379 mutex_unlock(&ctrl->namespaces_mutex); 2380 return ret; 2381 } 2382 2383 static long nvme_dev_ioctl(struct file *file, unsigned int cmd, 2384 unsigned long arg) 2385 { 2386 struct nvme_ctrl *ctrl = file->private_data; 2387 void __user *argp = (void __user *)arg; 2388 2389 switch (cmd) { 2390 case NVME_IOCTL_ADMIN_CMD: 2391 return nvme_user_cmd(ctrl, NULL, argp); 2392 case NVME_IOCTL_IO_CMD: 2393 return nvme_dev_user_cmd(ctrl, argp); 2394 case NVME_IOCTL_RESET: 2395 dev_warn(ctrl->device, "resetting controller\n"); 2396 return nvme_reset_ctrl_sync(ctrl); 2397 case NVME_IOCTL_SUBSYS_RESET: 2398 return nvme_reset_subsystem(ctrl); 2399 case NVME_IOCTL_RESCAN: 2400 nvme_queue_scan(ctrl); 2401 return 0; 2402 default: 2403 return -ENOTTY; 2404 } 2405 } 2406 2407 static const struct file_operations nvme_dev_fops = { 2408 .owner = THIS_MODULE, 2409 .open = nvme_dev_open, 2410 .unlocked_ioctl = nvme_dev_ioctl, 2411 .compat_ioctl = nvme_dev_ioctl, 2412 }; 2413 2414 static ssize_t nvme_sysfs_reset(struct device *dev, 2415 struct device_attribute *attr, const char *buf, 2416 size_t count) 2417 { 2418 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 2419 int ret; 2420 2421 ret = nvme_reset_ctrl_sync(ctrl); 2422 if (ret < 0) 2423 return ret; 2424 return count; 2425 } 2426 static DEVICE_ATTR(reset_controller, S_IWUSR, NULL, nvme_sysfs_reset); 2427 2428 static ssize_t nvme_sysfs_rescan(struct device *dev, 2429 struct device_attribute *attr, const char *buf, 2430 size_t count) 2431 { 2432 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 2433 2434 nvme_queue_scan(ctrl); 2435 return count; 2436 } 2437 static DEVICE_ATTR(rescan_controller, S_IWUSR, NULL, nvme_sysfs_rescan); 2438 2439 static inline struct nvme_ns_head *dev_to_ns_head(struct device *dev) 2440 { 2441 struct gendisk *disk = dev_to_disk(dev); 2442 2443 if (disk->fops == &nvme_fops) 2444 return nvme_get_ns_from_dev(dev)->head; 2445 else 2446 return disk->private_data; 2447 } 2448 2449 static ssize_t wwid_show(struct device *dev, struct device_attribute *attr, 2450 char *buf) 2451 { 2452 struct nvme_ns_head *head = dev_to_ns_head(dev); 2453 struct nvme_ns_ids *ids = &head->ids; 2454 struct nvme_subsystem *subsys = head->subsys; 2455 int serial_len = sizeof(subsys->serial); 2456 int model_len = sizeof(subsys->model); 2457 2458 if (!uuid_is_null(&ids->uuid)) 2459 return sprintf(buf, "uuid.%pU\n", &ids->uuid); 2460 2461 if (memchr_inv(ids->nguid, 0, sizeof(ids->nguid))) 2462 return sprintf(buf, "eui.%16phN\n", ids->nguid); 2463 2464 if (memchr_inv(ids->eui64, 0, sizeof(ids->eui64))) 2465 return sprintf(buf, "eui.%8phN\n", ids->eui64); 2466 2467 while (serial_len > 0 && (subsys->serial[serial_len - 1] == ' ' || 2468 subsys->serial[serial_len - 1] == '\0')) 2469 serial_len--; 2470 while (model_len > 0 && (subsys->model[model_len - 1] == ' ' || 2471 subsys->model[model_len - 1] == '\0')) 2472 model_len--; 2473 2474 return sprintf(buf, "nvme.%04x-%*phN-%*phN-%08x\n", subsys->vendor_id, 2475 serial_len, subsys->serial, model_len, subsys->model, 2476 head->ns_id); 2477 } 2478 static DEVICE_ATTR(wwid, S_IRUGO, wwid_show, NULL); 2479 2480 static ssize_t nguid_show(struct device *dev, struct device_attribute *attr, 2481 char *buf) 2482 { 2483 return sprintf(buf, "%pU\n", dev_to_ns_head(dev)->ids.nguid); 2484 } 2485 static DEVICE_ATTR(nguid, S_IRUGO, nguid_show, NULL); 2486 2487 static ssize_t uuid_show(struct device *dev, struct device_attribute *attr, 2488 char *buf) 2489 { 2490 struct nvme_ns_ids *ids = &dev_to_ns_head(dev)->ids; 2491 2492 /* For backward compatibility expose the NGUID to userspace if 2493 * we have no UUID set 2494 */ 2495 if (uuid_is_null(&ids->uuid)) { 2496 printk_ratelimited(KERN_WARNING 2497 "No UUID available providing old NGUID\n"); 2498 return sprintf(buf, "%pU\n", ids->nguid); 2499 } 2500 return sprintf(buf, "%pU\n", &ids->uuid); 2501 } 2502 static DEVICE_ATTR(uuid, S_IRUGO, uuid_show, NULL); 2503 2504 static ssize_t eui_show(struct device *dev, struct device_attribute *attr, 2505 char *buf) 2506 { 2507 return sprintf(buf, "%8ph\n", dev_to_ns_head(dev)->ids.eui64); 2508 } 2509 static DEVICE_ATTR(eui, S_IRUGO, eui_show, NULL); 2510 2511 static ssize_t nsid_show(struct device *dev, struct device_attribute *attr, 2512 char *buf) 2513 { 2514 return sprintf(buf, "%d\n", dev_to_ns_head(dev)->ns_id); 2515 } 2516 static DEVICE_ATTR(nsid, S_IRUGO, nsid_show, NULL); 2517 2518 static struct attribute *nvme_ns_id_attrs[] = { 2519 &dev_attr_wwid.attr, 2520 &dev_attr_uuid.attr, 2521 &dev_attr_nguid.attr, 2522 &dev_attr_eui.attr, 2523 &dev_attr_nsid.attr, 2524 NULL, 2525 }; 2526 2527 static umode_t nvme_ns_id_attrs_are_visible(struct kobject *kobj, 2528 struct attribute *a, int n) 2529 { 2530 struct device *dev = container_of(kobj, struct device, kobj); 2531 struct nvme_ns_ids *ids = &dev_to_ns_head(dev)->ids; 2532 2533 if (a == &dev_attr_uuid.attr) { 2534 if (uuid_is_null(&ids->uuid) && 2535 !memchr_inv(ids->nguid, 0, sizeof(ids->nguid))) 2536 return 0; 2537 } 2538 if (a == &dev_attr_nguid.attr) { 2539 if (!memchr_inv(ids->nguid, 0, sizeof(ids->nguid))) 2540 return 0; 2541 } 2542 if (a == &dev_attr_eui.attr) { 2543 if (!memchr_inv(ids->eui64, 0, sizeof(ids->eui64))) 2544 return 0; 2545 } 2546 return a->mode; 2547 } 2548 2549 const struct attribute_group nvme_ns_id_attr_group = { 2550 .attrs = nvme_ns_id_attrs, 2551 .is_visible = nvme_ns_id_attrs_are_visible, 2552 }; 2553 2554 #define nvme_show_str_function(field) \ 2555 static ssize_t field##_show(struct device *dev, \ 2556 struct device_attribute *attr, char *buf) \ 2557 { \ 2558 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); \ 2559 return sprintf(buf, "%.*s\n", \ 2560 (int)sizeof(ctrl->subsys->field), ctrl->subsys->field); \ 2561 } \ 2562 static DEVICE_ATTR(field, S_IRUGO, field##_show, NULL); 2563 2564 nvme_show_str_function(model); 2565 nvme_show_str_function(serial); 2566 nvme_show_str_function(firmware_rev); 2567 2568 #define nvme_show_int_function(field) \ 2569 static ssize_t field##_show(struct device *dev, \ 2570 struct device_attribute *attr, char *buf) \ 2571 { \ 2572 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); \ 2573 return sprintf(buf, "%d\n", ctrl->field); \ 2574 } \ 2575 static DEVICE_ATTR(field, S_IRUGO, field##_show, NULL); 2576 2577 nvme_show_int_function(cntlid); 2578 2579 static ssize_t nvme_sysfs_delete(struct device *dev, 2580 struct device_attribute *attr, const char *buf, 2581 size_t count) 2582 { 2583 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 2584 2585 if (device_remove_file_self(dev, attr)) 2586 nvme_delete_ctrl_sync(ctrl); 2587 return count; 2588 } 2589 static DEVICE_ATTR(delete_controller, S_IWUSR, NULL, nvme_sysfs_delete); 2590 2591 static ssize_t nvme_sysfs_show_transport(struct device *dev, 2592 struct device_attribute *attr, 2593 char *buf) 2594 { 2595 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 2596 2597 return snprintf(buf, PAGE_SIZE, "%s\n", ctrl->ops->name); 2598 } 2599 static DEVICE_ATTR(transport, S_IRUGO, nvme_sysfs_show_transport, NULL); 2600 2601 static ssize_t nvme_sysfs_show_state(struct device *dev, 2602 struct device_attribute *attr, 2603 char *buf) 2604 { 2605 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 2606 static const char *const state_name[] = { 2607 [NVME_CTRL_NEW] = "new", 2608 [NVME_CTRL_LIVE] = "live", 2609 [NVME_CTRL_RESETTING] = "resetting", 2610 [NVME_CTRL_RECONNECTING]= "reconnecting", 2611 [NVME_CTRL_DELETING] = "deleting", 2612 [NVME_CTRL_DEAD] = "dead", 2613 }; 2614 2615 if ((unsigned)ctrl->state < ARRAY_SIZE(state_name) && 2616 state_name[ctrl->state]) 2617 return sprintf(buf, "%s\n", state_name[ctrl->state]); 2618 2619 return sprintf(buf, "unknown state\n"); 2620 } 2621 2622 static DEVICE_ATTR(state, S_IRUGO, nvme_sysfs_show_state, NULL); 2623 2624 static ssize_t nvme_sysfs_show_subsysnqn(struct device *dev, 2625 struct device_attribute *attr, 2626 char *buf) 2627 { 2628 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 2629 2630 return snprintf(buf, PAGE_SIZE, "%s\n", ctrl->subsys->subnqn); 2631 } 2632 static DEVICE_ATTR(subsysnqn, S_IRUGO, nvme_sysfs_show_subsysnqn, NULL); 2633 2634 static ssize_t nvme_sysfs_show_address(struct device *dev, 2635 struct device_attribute *attr, 2636 char *buf) 2637 { 2638 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 2639 2640 return ctrl->ops->get_address(ctrl, buf, PAGE_SIZE); 2641 } 2642 static DEVICE_ATTR(address, S_IRUGO, nvme_sysfs_show_address, NULL); 2643 2644 static struct attribute *nvme_dev_attrs[] = { 2645 &dev_attr_reset_controller.attr, 2646 &dev_attr_rescan_controller.attr, 2647 &dev_attr_model.attr, 2648 &dev_attr_serial.attr, 2649 &dev_attr_firmware_rev.attr, 2650 &dev_attr_cntlid.attr, 2651 &dev_attr_delete_controller.attr, 2652 &dev_attr_transport.attr, 2653 &dev_attr_subsysnqn.attr, 2654 &dev_attr_address.attr, 2655 &dev_attr_state.attr, 2656 NULL 2657 }; 2658 2659 static umode_t nvme_dev_attrs_are_visible(struct kobject *kobj, 2660 struct attribute *a, int n) 2661 { 2662 struct device *dev = container_of(kobj, struct device, kobj); 2663 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 2664 2665 if (a == &dev_attr_delete_controller.attr && !ctrl->ops->delete_ctrl) 2666 return 0; 2667 if (a == &dev_attr_address.attr && !ctrl->ops->get_address) 2668 return 0; 2669 2670 return a->mode; 2671 } 2672 2673 static struct attribute_group nvme_dev_attrs_group = { 2674 .attrs = nvme_dev_attrs, 2675 .is_visible = nvme_dev_attrs_are_visible, 2676 }; 2677 2678 static const struct attribute_group *nvme_dev_attr_groups[] = { 2679 &nvme_dev_attrs_group, 2680 NULL, 2681 }; 2682 2683 static struct nvme_ns_head *__nvme_find_ns_head(struct nvme_subsystem *subsys, 2684 unsigned nsid) 2685 { 2686 struct nvme_ns_head *h; 2687 2688 lockdep_assert_held(&subsys->lock); 2689 2690 list_for_each_entry(h, &subsys->nsheads, entry) { 2691 if (h->ns_id == nsid && kref_get_unless_zero(&h->ref)) 2692 return h; 2693 } 2694 2695 return NULL; 2696 } 2697 2698 static int __nvme_check_ids(struct nvme_subsystem *subsys, 2699 struct nvme_ns_head *new) 2700 { 2701 struct nvme_ns_head *h; 2702 2703 lockdep_assert_held(&subsys->lock); 2704 2705 list_for_each_entry(h, &subsys->nsheads, entry) { 2706 if (nvme_ns_ids_valid(&new->ids) && 2707 nvme_ns_ids_equal(&new->ids, &h->ids)) 2708 return -EINVAL; 2709 } 2710 2711 return 0; 2712 } 2713 2714 static struct nvme_ns_head *nvme_alloc_ns_head(struct nvme_ctrl *ctrl, 2715 unsigned nsid, struct nvme_id_ns *id) 2716 { 2717 struct nvme_ns_head *head; 2718 int ret = -ENOMEM; 2719 2720 head = kzalloc(sizeof(*head), GFP_KERNEL); 2721 if (!head) 2722 goto out; 2723 ret = ida_simple_get(&ctrl->subsys->ns_ida, 1, 0, GFP_KERNEL); 2724 if (ret < 0) 2725 goto out_free_head; 2726 head->instance = ret; 2727 INIT_LIST_HEAD(&head->list); 2728 init_srcu_struct(&head->srcu); 2729 head->subsys = ctrl->subsys; 2730 head->ns_id = nsid; 2731 kref_init(&head->ref); 2732 2733 nvme_report_ns_ids(ctrl, nsid, id, &head->ids); 2734 2735 ret = __nvme_check_ids(ctrl->subsys, head); 2736 if (ret) { 2737 dev_err(ctrl->device, 2738 "duplicate IDs for nsid %d\n", nsid); 2739 goto out_cleanup_srcu; 2740 } 2741 2742 ret = nvme_mpath_alloc_disk(ctrl, head); 2743 if (ret) 2744 goto out_cleanup_srcu; 2745 2746 list_add_tail(&head->entry, &ctrl->subsys->nsheads); 2747 return head; 2748 out_cleanup_srcu: 2749 cleanup_srcu_struct(&head->srcu); 2750 ida_simple_remove(&ctrl->subsys->ns_ida, head->instance); 2751 out_free_head: 2752 kfree(head); 2753 out: 2754 return ERR_PTR(ret); 2755 } 2756 2757 static int nvme_init_ns_head(struct nvme_ns *ns, unsigned nsid, 2758 struct nvme_id_ns *id, bool *new) 2759 { 2760 struct nvme_ctrl *ctrl = ns->ctrl; 2761 bool is_shared = id->nmic & (1 << 0); 2762 struct nvme_ns_head *head = NULL; 2763 int ret = 0; 2764 2765 mutex_lock(&ctrl->subsys->lock); 2766 if (is_shared) 2767 head = __nvme_find_ns_head(ctrl->subsys, nsid); 2768 if (!head) { 2769 head = nvme_alloc_ns_head(ctrl, nsid, id); 2770 if (IS_ERR(head)) { 2771 ret = PTR_ERR(head); 2772 goto out_unlock; 2773 } 2774 2775 *new = true; 2776 } else { 2777 struct nvme_ns_ids ids; 2778 2779 nvme_report_ns_ids(ctrl, nsid, id, &ids); 2780 if (!nvme_ns_ids_equal(&head->ids, &ids)) { 2781 dev_err(ctrl->device, 2782 "IDs don't match for shared namespace %d\n", 2783 nsid); 2784 ret = -EINVAL; 2785 goto out_unlock; 2786 } 2787 2788 *new = false; 2789 } 2790 2791 list_add_tail(&ns->siblings, &head->list); 2792 ns->head = head; 2793 2794 out_unlock: 2795 mutex_unlock(&ctrl->subsys->lock); 2796 return ret; 2797 } 2798 2799 static int ns_cmp(void *priv, struct list_head *a, struct list_head *b) 2800 { 2801 struct nvme_ns *nsa = container_of(a, struct nvme_ns, list); 2802 struct nvme_ns *nsb = container_of(b, struct nvme_ns, list); 2803 2804 return nsa->head->ns_id - nsb->head->ns_id; 2805 } 2806 2807 static struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid) 2808 { 2809 struct nvme_ns *ns, *ret = NULL; 2810 2811 mutex_lock(&ctrl->namespaces_mutex); 2812 list_for_each_entry(ns, &ctrl->namespaces, list) { 2813 if (ns->head->ns_id == nsid) { 2814 if (!kref_get_unless_zero(&ns->kref)) 2815 continue; 2816 ret = ns; 2817 break; 2818 } 2819 if (ns->head->ns_id > nsid) 2820 break; 2821 } 2822 mutex_unlock(&ctrl->namespaces_mutex); 2823 return ret; 2824 } 2825 2826 static int nvme_setup_streams_ns(struct nvme_ctrl *ctrl, struct nvme_ns *ns) 2827 { 2828 struct streams_directive_params s; 2829 int ret; 2830 2831 if (!ctrl->nr_streams) 2832 return 0; 2833 2834 ret = nvme_get_stream_params(ctrl, &s, ns->head->ns_id); 2835 if (ret) 2836 return ret; 2837 2838 ns->sws = le32_to_cpu(s.sws); 2839 ns->sgs = le16_to_cpu(s.sgs); 2840 2841 if (ns->sws) { 2842 unsigned int bs = 1 << ns->lba_shift; 2843 2844 blk_queue_io_min(ns->queue, bs * ns->sws); 2845 if (ns->sgs) 2846 blk_queue_io_opt(ns->queue, bs * ns->sws * ns->sgs); 2847 } 2848 2849 return 0; 2850 } 2851 2852 static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid) 2853 { 2854 struct nvme_ns *ns; 2855 struct gendisk *disk; 2856 struct nvme_id_ns *id; 2857 char disk_name[DISK_NAME_LEN]; 2858 int node = dev_to_node(ctrl->dev), flags = GENHD_FL_EXT_DEVT; 2859 bool new = true; 2860 2861 ns = kzalloc_node(sizeof(*ns), GFP_KERNEL, node); 2862 if (!ns) 2863 return; 2864 2865 ns->queue = blk_mq_init_queue(ctrl->tagset); 2866 if (IS_ERR(ns->queue)) 2867 goto out_free_ns; 2868 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, ns->queue); 2869 ns->queue->queuedata = ns; 2870 ns->ctrl = ctrl; 2871 2872 kref_init(&ns->kref); 2873 ns->lba_shift = 9; /* set to a default value for 512 until disk is validated */ 2874 2875 blk_queue_logical_block_size(ns->queue, 1 << ns->lba_shift); 2876 nvme_set_queue_limits(ctrl, ns->queue); 2877 2878 id = nvme_identify_ns(ctrl, nsid); 2879 if (!id) 2880 goto out_free_queue; 2881 2882 if (id->ncap == 0) 2883 goto out_free_id; 2884 2885 if (nvme_init_ns_head(ns, nsid, id, &new)) 2886 goto out_free_id; 2887 nvme_setup_streams_ns(ctrl, ns); 2888 2889 #ifdef CONFIG_NVME_MULTIPATH 2890 /* 2891 * If multipathing is enabled we need to always use the subsystem 2892 * instance number for numbering our devices to avoid conflicts 2893 * between subsystems that have multiple controllers and thus use 2894 * the multipath-aware subsystem node and those that have a single 2895 * controller and use the controller node directly. 2896 */ 2897 if (ns->head->disk) { 2898 sprintf(disk_name, "nvme%dc%dn%d", ctrl->subsys->instance, 2899 ctrl->cntlid, ns->head->instance); 2900 flags = GENHD_FL_HIDDEN; 2901 } else { 2902 sprintf(disk_name, "nvme%dn%d", ctrl->subsys->instance, 2903 ns->head->instance); 2904 } 2905 #else 2906 /* 2907 * But without the multipath code enabled, multiple controller per 2908 * subsystems are visible as devices and thus we cannot use the 2909 * subsystem instance. 2910 */ 2911 sprintf(disk_name, "nvme%dn%d", ctrl->instance, ns->head->instance); 2912 #endif 2913 2914 if ((ctrl->quirks & NVME_QUIRK_LIGHTNVM) && id->vs[0] == 0x1) { 2915 if (nvme_nvm_register(ns, disk_name, node)) { 2916 dev_warn(ctrl->device, "LightNVM init failure\n"); 2917 goto out_unlink_ns; 2918 } 2919 } 2920 2921 disk = alloc_disk_node(0, node); 2922 if (!disk) 2923 goto out_unlink_ns; 2924 2925 disk->fops = &nvme_fops; 2926 disk->private_data = ns; 2927 disk->queue = ns->queue; 2928 disk->flags = flags; 2929 memcpy(disk->disk_name, disk_name, DISK_NAME_LEN); 2930 ns->disk = disk; 2931 2932 __nvme_revalidate_disk(disk, id); 2933 2934 mutex_lock(&ctrl->namespaces_mutex); 2935 list_add_tail(&ns->list, &ctrl->namespaces); 2936 mutex_unlock(&ctrl->namespaces_mutex); 2937 2938 nvme_get_ctrl(ctrl); 2939 2940 kfree(id); 2941 2942 device_add_disk(ctrl->device, ns->disk); 2943 if (sysfs_create_group(&disk_to_dev(ns->disk)->kobj, 2944 &nvme_ns_id_attr_group)) 2945 pr_warn("%s: failed to create sysfs group for identification\n", 2946 ns->disk->disk_name); 2947 if (ns->ndev && nvme_nvm_register_sysfs(ns)) 2948 pr_warn("%s: failed to register lightnvm sysfs group for identification\n", 2949 ns->disk->disk_name); 2950 2951 if (new) 2952 nvme_mpath_add_disk(ns->head); 2953 nvme_mpath_add_disk_links(ns); 2954 return; 2955 out_unlink_ns: 2956 mutex_lock(&ctrl->subsys->lock); 2957 list_del_rcu(&ns->siblings); 2958 mutex_unlock(&ctrl->subsys->lock); 2959 out_free_id: 2960 kfree(id); 2961 out_free_queue: 2962 blk_cleanup_queue(ns->queue); 2963 out_free_ns: 2964 kfree(ns); 2965 } 2966 2967 static void nvme_ns_remove(struct nvme_ns *ns) 2968 { 2969 if (test_and_set_bit(NVME_NS_REMOVING, &ns->flags)) 2970 return; 2971 2972 if (ns->disk && ns->disk->flags & GENHD_FL_UP) { 2973 nvme_mpath_remove_disk_links(ns); 2974 sysfs_remove_group(&disk_to_dev(ns->disk)->kobj, 2975 &nvme_ns_id_attr_group); 2976 if (ns->ndev) 2977 nvme_nvm_unregister_sysfs(ns); 2978 del_gendisk(ns->disk); 2979 blk_cleanup_queue(ns->queue); 2980 if (blk_get_integrity(ns->disk)) 2981 blk_integrity_unregister(ns->disk); 2982 } 2983 2984 mutex_lock(&ns->ctrl->subsys->lock); 2985 nvme_mpath_clear_current_path(ns); 2986 list_del_rcu(&ns->siblings); 2987 mutex_unlock(&ns->ctrl->subsys->lock); 2988 2989 mutex_lock(&ns->ctrl->namespaces_mutex); 2990 list_del_init(&ns->list); 2991 mutex_unlock(&ns->ctrl->namespaces_mutex); 2992 2993 synchronize_srcu(&ns->head->srcu); 2994 nvme_mpath_check_last_path(ns); 2995 nvme_put_ns(ns); 2996 } 2997 2998 static void nvme_validate_ns(struct nvme_ctrl *ctrl, unsigned nsid) 2999 { 3000 struct nvme_ns *ns; 3001 3002 ns = nvme_find_get_ns(ctrl, nsid); 3003 if (ns) { 3004 if (ns->disk && revalidate_disk(ns->disk)) 3005 nvme_ns_remove(ns); 3006 nvme_put_ns(ns); 3007 } else 3008 nvme_alloc_ns(ctrl, nsid); 3009 } 3010 3011 static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl, 3012 unsigned nsid) 3013 { 3014 struct nvme_ns *ns, *next; 3015 3016 list_for_each_entry_safe(ns, next, &ctrl->namespaces, list) { 3017 if (ns->head->ns_id > nsid) 3018 nvme_ns_remove(ns); 3019 } 3020 } 3021 3022 static int nvme_scan_ns_list(struct nvme_ctrl *ctrl, unsigned nn) 3023 { 3024 struct nvme_ns *ns; 3025 __le32 *ns_list; 3026 unsigned i, j, nsid, prev = 0, num_lists = DIV_ROUND_UP(nn, 1024); 3027 int ret = 0; 3028 3029 ns_list = kzalloc(0x1000, GFP_KERNEL); 3030 if (!ns_list) 3031 return -ENOMEM; 3032 3033 for (i = 0; i < num_lists; i++) { 3034 ret = nvme_identify_ns_list(ctrl, prev, ns_list); 3035 if (ret) 3036 goto free; 3037 3038 for (j = 0; j < min(nn, 1024U); j++) { 3039 nsid = le32_to_cpu(ns_list[j]); 3040 if (!nsid) 3041 goto out; 3042 3043 nvme_validate_ns(ctrl, nsid); 3044 3045 while (++prev < nsid) { 3046 ns = nvme_find_get_ns(ctrl, prev); 3047 if (ns) { 3048 nvme_ns_remove(ns); 3049 nvme_put_ns(ns); 3050 } 3051 } 3052 } 3053 nn -= j; 3054 } 3055 out: 3056 nvme_remove_invalid_namespaces(ctrl, prev); 3057 free: 3058 kfree(ns_list); 3059 return ret; 3060 } 3061 3062 static void nvme_scan_ns_sequential(struct nvme_ctrl *ctrl, unsigned nn) 3063 { 3064 unsigned i; 3065 3066 for (i = 1; i <= nn; i++) 3067 nvme_validate_ns(ctrl, i); 3068 3069 nvme_remove_invalid_namespaces(ctrl, nn); 3070 } 3071 3072 static void nvme_scan_work(struct work_struct *work) 3073 { 3074 struct nvme_ctrl *ctrl = 3075 container_of(work, struct nvme_ctrl, scan_work); 3076 struct nvme_id_ctrl *id; 3077 unsigned nn; 3078 3079 if (ctrl->state != NVME_CTRL_LIVE) 3080 return; 3081 3082 if (nvme_identify_ctrl(ctrl, &id)) 3083 return; 3084 3085 nn = le32_to_cpu(id->nn); 3086 if (ctrl->vs >= NVME_VS(1, 1, 0) && 3087 !(ctrl->quirks & NVME_QUIRK_IDENTIFY_CNS)) { 3088 if (!nvme_scan_ns_list(ctrl, nn)) 3089 goto done; 3090 } 3091 nvme_scan_ns_sequential(ctrl, nn); 3092 done: 3093 mutex_lock(&ctrl->namespaces_mutex); 3094 list_sort(NULL, &ctrl->namespaces, ns_cmp); 3095 mutex_unlock(&ctrl->namespaces_mutex); 3096 kfree(id); 3097 } 3098 3099 void nvme_queue_scan(struct nvme_ctrl *ctrl) 3100 { 3101 /* 3102 * Do not queue new scan work when a controller is reset during 3103 * removal. 3104 */ 3105 if (ctrl->state == NVME_CTRL_LIVE) 3106 queue_work(nvme_wq, &ctrl->scan_work); 3107 } 3108 EXPORT_SYMBOL_GPL(nvme_queue_scan); 3109 3110 /* 3111 * This function iterates the namespace list unlocked to allow recovery from 3112 * controller failure. It is up to the caller to ensure the namespace list is 3113 * not modified by scan work while this function is executing. 3114 */ 3115 void nvme_remove_namespaces(struct nvme_ctrl *ctrl) 3116 { 3117 struct nvme_ns *ns, *next; 3118 3119 /* 3120 * The dead states indicates the controller was not gracefully 3121 * disconnected. In that case, we won't be able to flush any data while 3122 * removing the namespaces' disks; fail all the queues now to avoid 3123 * potentially having to clean up the failed sync later. 3124 */ 3125 if (ctrl->state == NVME_CTRL_DEAD) 3126 nvme_kill_queues(ctrl); 3127 3128 list_for_each_entry_safe(ns, next, &ctrl->namespaces, list) 3129 nvme_ns_remove(ns); 3130 } 3131 EXPORT_SYMBOL_GPL(nvme_remove_namespaces); 3132 3133 static void nvme_aen_uevent(struct nvme_ctrl *ctrl) 3134 { 3135 char *envp[2] = { NULL, NULL }; 3136 u32 aen_result = ctrl->aen_result; 3137 3138 ctrl->aen_result = 0; 3139 if (!aen_result) 3140 return; 3141 3142 envp[0] = kasprintf(GFP_KERNEL, "NVME_AEN=%#08x", aen_result); 3143 if (!envp[0]) 3144 return; 3145 kobject_uevent_env(&ctrl->device->kobj, KOBJ_CHANGE, envp); 3146 kfree(envp[0]); 3147 } 3148 3149 static void nvme_async_event_work(struct work_struct *work) 3150 { 3151 struct nvme_ctrl *ctrl = 3152 container_of(work, struct nvme_ctrl, async_event_work); 3153 3154 nvme_aen_uevent(ctrl); 3155 ctrl->ops->submit_async_event(ctrl); 3156 } 3157 3158 static bool nvme_ctrl_pp_status(struct nvme_ctrl *ctrl) 3159 { 3160 3161 u32 csts; 3162 3163 if (ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &csts)) 3164 return false; 3165 3166 if (csts == ~0) 3167 return false; 3168 3169 return ((ctrl->ctrl_config & NVME_CC_ENABLE) && (csts & NVME_CSTS_PP)); 3170 } 3171 3172 static void nvme_get_fw_slot_info(struct nvme_ctrl *ctrl) 3173 { 3174 struct nvme_fw_slot_info_log *log; 3175 3176 log = kmalloc(sizeof(*log), GFP_KERNEL); 3177 if (!log) 3178 return; 3179 3180 if (nvme_get_log(ctrl, NVME_LOG_FW_SLOT, log, sizeof(*log))) 3181 dev_warn(ctrl->device, 3182 "Get FW SLOT INFO log error\n"); 3183 kfree(log); 3184 } 3185 3186 static void nvme_fw_act_work(struct work_struct *work) 3187 { 3188 struct nvme_ctrl *ctrl = container_of(work, 3189 struct nvme_ctrl, fw_act_work); 3190 unsigned long fw_act_timeout; 3191 3192 if (ctrl->mtfa) 3193 fw_act_timeout = jiffies + 3194 msecs_to_jiffies(ctrl->mtfa * 100); 3195 else 3196 fw_act_timeout = jiffies + 3197 msecs_to_jiffies(admin_timeout * 1000); 3198 3199 nvme_stop_queues(ctrl); 3200 while (nvme_ctrl_pp_status(ctrl)) { 3201 if (time_after(jiffies, fw_act_timeout)) { 3202 dev_warn(ctrl->device, 3203 "Fw activation timeout, reset controller\n"); 3204 nvme_reset_ctrl(ctrl); 3205 break; 3206 } 3207 msleep(100); 3208 } 3209 3210 if (ctrl->state != NVME_CTRL_LIVE) 3211 return; 3212 3213 nvme_start_queues(ctrl); 3214 /* read FW slot information to clear the AER */ 3215 nvme_get_fw_slot_info(ctrl); 3216 } 3217 3218 void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status, 3219 union nvme_result *res) 3220 { 3221 u32 result = le32_to_cpu(res->u32); 3222 3223 if (le16_to_cpu(status) >> 1 != NVME_SC_SUCCESS) 3224 return; 3225 3226 switch (result & 0x7) { 3227 case NVME_AER_ERROR: 3228 case NVME_AER_SMART: 3229 case NVME_AER_CSS: 3230 case NVME_AER_VS: 3231 ctrl->aen_result = result; 3232 break; 3233 default: 3234 break; 3235 } 3236 3237 switch (result & 0xff07) { 3238 case NVME_AER_NOTICE_NS_CHANGED: 3239 dev_info(ctrl->device, "rescanning\n"); 3240 nvme_queue_scan(ctrl); 3241 break; 3242 case NVME_AER_NOTICE_FW_ACT_STARTING: 3243 queue_work(nvme_wq, &ctrl->fw_act_work); 3244 break; 3245 default: 3246 dev_warn(ctrl->device, "async event result %08x\n", result); 3247 } 3248 queue_work(nvme_wq, &ctrl->async_event_work); 3249 } 3250 EXPORT_SYMBOL_GPL(nvme_complete_async_event); 3251 3252 void nvme_stop_ctrl(struct nvme_ctrl *ctrl) 3253 { 3254 nvme_stop_keep_alive(ctrl); 3255 flush_work(&ctrl->async_event_work); 3256 flush_work(&ctrl->scan_work); 3257 cancel_work_sync(&ctrl->fw_act_work); 3258 } 3259 EXPORT_SYMBOL_GPL(nvme_stop_ctrl); 3260 3261 void nvme_start_ctrl(struct nvme_ctrl *ctrl) 3262 { 3263 if (ctrl->kato) 3264 nvme_start_keep_alive(ctrl); 3265 3266 if (ctrl->queue_count > 1) { 3267 nvme_queue_scan(ctrl); 3268 queue_work(nvme_wq, &ctrl->async_event_work); 3269 nvme_start_queues(ctrl); 3270 } 3271 } 3272 EXPORT_SYMBOL_GPL(nvme_start_ctrl); 3273 3274 void nvme_uninit_ctrl(struct nvme_ctrl *ctrl) 3275 { 3276 cdev_device_del(&ctrl->cdev, ctrl->device); 3277 } 3278 EXPORT_SYMBOL_GPL(nvme_uninit_ctrl); 3279 3280 static void nvme_free_ctrl(struct device *dev) 3281 { 3282 struct nvme_ctrl *ctrl = 3283 container_of(dev, struct nvme_ctrl, ctrl_device); 3284 struct nvme_subsystem *subsys = ctrl->subsys; 3285 3286 ida_simple_remove(&nvme_instance_ida, ctrl->instance); 3287 kfree(ctrl->effects); 3288 3289 if (subsys) { 3290 mutex_lock(&subsys->lock); 3291 list_del(&ctrl->subsys_entry); 3292 mutex_unlock(&subsys->lock); 3293 sysfs_remove_link(&subsys->dev.kobj, dev_name(ctrl->device)); 3294 } 3295 3296 ctrl->ops->free_ctrl(ctrl); 3297 3298 if (subsys) 3299 nvme_put_subsystem(subsys); 3300 } 3301 3302 /* 3303 * Initialize a NVMe controller structures. This needs to be called during 3304 * earliest initialization so that we have the initialized structured around 3305 * during probing. 3306 */ 3307 int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev, 3308 const struct nvme_ctrl_ops *ops, unsigned long quirks) 3309 { 3310 int ret; 3311 3312 ctrl->state = NVME_CTRL_NEW; 3313 spin_lock_init(&ctrl->lock); 3314 INIT_LIST_HEAD(&ctrl->namespaces); 3315 mutex_init(&ctrl->namespaces_mutex); 3316 ctrl->dev = dev; 3317 ctrl->ops = ops; 3318 ctrl->quirks = quirks; 3319 INIT_WORK(&ctrl->scan_work, nvme_scan_work); 3320 INIT_WORK(&ctrl->async_event_work, nvme_async_event_work); 3321 INIT_WORK(&ctrl->fw_act_work, nvme_fw_act_work); 3322 INIT_WORK(&ctrl->delete_work, nvme_delete_ctrl_work); 3323 3324 ret = ida_simple_get(&nvme_instance_ida, 0, 0, GFP_KERNEL); 3325 if (ret < 0) 3326 goto out; 3327 ctrl->instance = ret; 3328 3329 device_initialize(&ctrl->ctrl_device); 3330 ctrl->device = &ctrl->ctrl_device; 3331 ctrl->device->devt = MKDEV(MAJOR(nvme_chr_devt), ctrl->instance); 3332 ctrl->device->class = nvme_class; 3333 ctrl->device->parent = ctrl->dev; 3334 ctrl->device->groups = nvme_dev_attr_groups; 3335 ctrl->device->release = nvme_free_ctrl; 3336 dev_set_drvdata(ctrl->device, ctrl); 3337 ret = dev_set_name(ctrl->device, "nvme%d", ctrl->instance); 3338 if (ret) 3339 goto out_release_instance; 3340 3341 cdev_init(&ctrl->cdev, &nvme_dev_fops); 3342 ctrl->cdev.owner = ops->module; 3343 ret = cdev_device_add(&ctrl->cdev, ctrl->device); 3344 if (ret) 3345 goto out_free_name; 3346 3347 /* 3348 * Initialize latency tolerance controls. The sysfs files won't 3349 * be visible to userspace unless the device actually supports APST. 3350 */ 3351 ctrl->device->power.set_latency_tolerance = nvme_set_latency_tolerance; 3352 dev_pm_qos_update_user_latency_tolerance(ctrl->device, 3353 min(default_ps_max_latency_us, (unsigned long)S32_MAX)); 3354 3355 return 0; 3356 out_free_name: 3357 kfree_const(dev->kobj.name); 3358 out_release_instance: 3359 ida_simple_remove(&nvme_instance_ida, ctrl->instance); 3360 out: 3361 return ret; 3362 } 3363 EXPORT_SYMBOL_GPL(nvme_init_ctrl); 3364 3365 /** 3366 * nvme_kill_queues(): Ends all namespace queues 3367 * @ctrl: the dead controller that needs to end 3368 * 3369 * Call this function when the driver determines it is unable to get the 3370 * controller in a state capable of servicing IO. 3371 */ 3372 void nvme_kill_queues(struct nvme_ctrl *ctrl) 3373 { 3374 struct nvme_ns *ns; 3375 3376 mutex_lock(&ctrl->namespaces_mutex); 3377 3378 /* Forcibly unquiesce queues to avoid blocking dispatch */ 3379 if (ctrl->admin_q) 3380 blk_mq_unquiesce_queue(ctrl->admin_q); 3381 3382 list_for_each_entry(ns, &ctrl->namespaces, list) { 3383 /* 3384 * Revalidating a dead namespace sets capacity to 0. This will 3385 * end buffered writers dirtying pages that can't be synced. 3386 */ 3387 if (!ns->disk || test_and_set_bit(NVME_NS_DEAD, &ns->flags)) 3388 continue; 3389 revalidate_disk(ns->disk); 3390 blk_set_queue_dying(ns->queue); 3391 3392 /* Forcibly unquiesce queues to avoid blocking dispatch */ 3393 blk_mq_unquiesce_queue(ns->queue); 3394 } 3395 mutex_unlock(&ctrl->namespaces_mutex); 3396 } 3397 EXPORT_SYMBOL_GPL(nvme_kill_queues); 3398 3399 void nvme_unfreeze(struct nvme_ctrl *ctrl) 3400 { 3401 struct nvme_ns *ns; 3402 3403 mutex_lock(&ctrl->namespaces_mutex); 3404 list_for_each_entry(ns, &ctrl->namespaces, list) 3405 blk_mq_unfreeze_queue(ns->queue); 3406 mutex_unlock(&ctrl->namespaces_mutex); 3407 } 3408 EXPORT_SYMBOL_GPL(nvme_unfreeze); 3409 3410 void nvme_wait_freeze_timeout(struct nvme_ctrl *ctrl, long timeout) 3411 { 3412 struct nvme_ns *ns; 3413 3414 mutex_lock(&ctrl->namespaces_mutex); 3415 list_for_each_entry(ns, &ctrl->namespaces, list) { 3416 timeout = blk_mq_freeze_queue_wait_timeout(ns->queue, timeout); 3417 if (timeout <= 0) 3418 break; 3419 } 3420 mutex_unlock(&ctrl->namespaces_mutex); 3421 } 3422 EXPORT_SYMBOL_GPL(nvme_wait_freeze_timeout); 3423 3424 void nvme_wait_freeze(struct nvme_ctrl *ctrl) 3425 { 3426 struct nvme_ns *ns; 3427 3428 mutex_lock(&ctrl->namespaces_mutex); 3429 list_for_each_entry(ns, &ctrl->namespaces, list) 3430 blk_mq_freeze_queue_wait(ns->queue); 3431 mutex_unlock(&ctrl->namespaces_mutex); 3432 } 3433 EXPORT_SYMBOL_GPL(nvme_wait_freeze); 3434 3435 void nvme_start_freeze(struct nvme_ctrl *ctrl) 3436 { 3437 struct nvme_ns *ns; 3438 3439 mutex_lock(&ctrl->namespaces_mutex); 3440 list_for_each_entry(ns, &ctrl->namespaces, list) 3441 blk_freeze_queue_start(ns->queue); 3442 mutex_unlock(&ctrl->namespaces_mutex); 3443 } 3444 EXPORT_SYMBOL_GPL(nvme_start_freeze); 3445 3446 void nvme_stop_queues(struct nvme_ctrl *ctrl) 3447 { 3448 struct nvme_ns *ns; 3449 3450 mutex_lock(&ctrl->namespaces_mutex); 3451 list_for_each_entry(ns, &ctrl->namespaces, list) 3452 blk_mq_quiesce_queue(ns->queue); 3453 mutex_unlock(&ctrl->namespaces_mutex); 3454 } 3455 EXPORT_SYMBOL_GPL(nvme_stop_queues); 3456 3457 void nvme_start_queues(struct nvme_ctrl *ctrl) 3458 { 3459 struct nvme_ns *ns; 3460 3461 mutex_lock(&ctrl->namespaces_mutex); 3462 list_for_each_entry(ns, &ctrl->namespaces, list) 3463 blk_mq_unquiesce_queue(ns->queue); 3464 mutex_unlock(&ctrl->namespaces_mutex); 3465 } 3466 EXPORT_SYMBOL_GPL(nvme_start_queues); 3467 3468 int nvme_reinit_tagset(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set) 3469 { 3470 if (!ctrl->ops->reinit_request) 3471 return 0; 3472 3473 return blk_mq_tagset_iter(set, set->driver_data, 3474 ctrl->ops->reinit_request); 3475 } 3476 EXPORT_SYMBOL_GPL(nvme_reinit_tagset); 3477 3478 int __init nvme_core_init(void) 3479 { 3480 int result; 3481 3482 nvme_wq = alloc_workqueue("nvme-wq", 3483 WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0); 3484 if (!nvme_wq) 3485 return -ENOMEM; 3486 3487 result = alloc_chrdev_region(&nvme_chr_devt, 0, NVME_MINORS, "nvme"); 3488 if (result < 0) 3489 goto destroy_wq; 3490 3491 nvme_class = class_create(THIS_MODULE, "nvme"); 3492 if (IS_ERR(nvme_class)) { 3493 result = PTR_ERR(nvme_class); 3494 goto unregister_chrdev; 3495 } 3496 3497 nvme_subsys_class = class_create(THIS_MODULE, "nvme-subsystem"); 3498 if (IS_ERR(nvme_subsys_class)) { 3499 result = PTR_ERR(nvme_subsys_class); 3500 goto destroy_class; 3501 } 3502 return 0; 3503 3504 destroy_class: 3505 class_destroy(nvme_class); 3506 unregister_chrdev: 3507 unregister_chrdev_region(nvme_chr_devt, NVME_MINORS); 3508 destroy_wq: 3509 destroy_workqueue(nvme_wq); 3510 return result; 3511 } 3512 3513 void nvme_core_exit(void) 3514 { 3515 ida_destroy(&nvme_subsystems_ida); 3516 class_destroy(nvme_subsys_class); 3517 class_destroy(nvme_class); 3518 unregister_chrdev_region(nvme_chr_devt, NVME_MINORS); 3519 destroy_workqueue(nvme_wq); 3520 } 3521 3522 MODULE_LICENSE("GPL"); 3523 MODULE_VERSION("1.0"); 3524 module_init(nvme_core_init); 3525 module_exit(nvme_core_exit); 3526