1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * NVM Express device driver 4 * Copyright (c) 2011-2014, Intel Corporation. 5 */ 6 7 #include <linux/blkdev.h> 8 #include <linux/blk-mq.h> 9 #include <linux/blk-integrity.h> 10 #include <linux/compat.h> 11 #include <linux/delay.h> 12 #include <linux/errno.h> 13 #include <linux/hdreg.h> 14 #include <linux/kernel.h> 15 #include <linux/module.h> 16 #include <linux/backing-dev.h> 17 #include <linux/slab.h> 18 #include <linux/types.h> 19 #include <linux/pr.h> 20 #include <linux/ptrace.h> 21 #include <linux/nvme_ioctl.h> 22 #include <linux/pm_qos.h> 23 #include <asm/unaligned.h> 24 25 #include "nvme.h" 26 #include "fabrics.h" 27 #include <linux/nvme-auth.h> 28 29 #define CREATE_TRACE_POINTS 30 #include "trace.h" 31 32 #define NVME_MINORS (1U << MINORBITS) 33 34 struct nvme_ns_info { 35 struct nvme_ns_ids ids; 36 u32 nsid; 37 __le32 anagrpid; 38 bool is_shared; 39 bool is_readonly; 40 bool is_ready; 41 bool is_removed; 42 }; 43 44 unsigned int admin_timeout = 60; 45 module_param(admin_timeout, uint, 0644); 46 MODULE_PARM_DESC(admin_timeout, "timeout in seconds for admin commands"); 47 EXPORT_SYMBOL_GPL(admin_timeout); 48 49 unsigned int nvme_io_timeout = 30; 50 module_param_named(io_timeout, nvme_io_timeout, uint, 0644); 51 MODULE_PARM_DESC(io_timeout, "timeout in seconds for I/O"); 52 EXPORT_SYMBOL_GPL(nvme_io_timeout); 53 54 static unsigned char shutdown_timeout = 5; 55 module_param(shutdown_timeout, byte, 0644); 56 MODULE_PARM_DESC(shutdown_timeout, "timeout in seconds for controller shutdown"); 57 58 static u8 nvme_max_retries = 5; 59 module_param_named(max_retries, nvme_max_retries, byte, 0644); 60 MODULE_PARM_DESC(max_retries, "max number of retries a command may have"); 61 62 static unsigned long default_ps_max_latency_us = 100000; 63 module_param(default_ps_max_latency_us, ulong, 0644); 64 MODULE_PARM_DESC(default_ps_max_latency_us, 65 "max power saving latency for new devices; use PM QOS to change per device"); 66 67 static bool force_apst; 68 module_param(force_apst, bool, 0644); 69 MODULE_PARM_DESC(force_apst, "allow APST for newly enumerated devices even if quirked off"); 70 71 static unsigned long apst_primary_timeout_ms = 100; 72 module_param(apst_primary_timeout_ms, ulong, 0644); 73 MODULE_PARM_DESC(apst_primary_timeout_ms, 74 "primary APST timeout in ms"); 75 76 static unsigned long apst_secondary_timeout_ms = 2000; 77 module_param(apst_secondary_timeout_ms, ulong, 0644); 78 MODULE_PARM_DESC(apst_secondary_timeout_ms, 79 "secondary APST timeout in ms"); 80 81 static unsigned long apst_primary_latency_tol_us = 15000; 82 module_param(apst_primary_latency_tol_us, ulong, 0644); 83 MODULE_PARM_DESC(apst_primary_latency_tol_us, 84 "primary APST latency tolerance in us"); 85 86 static unsigned long apst_secondary_latency_tol_us = 100000; 87 module_param(apst_secondary_latency_tol_us, ulong, 0644); 88 MODULE_PARM_DESC(apst_secondary_latency_tol_us, 89 "secondary APST latency tolerance in us"); 90 91 /* 92 * nvme_wq - hosts nvme related works that are not reset or delete 93 * nvme_reset_wq - hosts nvme reset works 94 * nvme_delete_wq - hosts nvme delete works 95 * 96 * nvme_wq will host works such as scan, aen handling, fw activation, 97 * keep-alive, periodic reconnects etc. nvme_reset_wq 98 * runs reset works which also flush works hosted on nvme_wq for 99 * serialization purposes. nvme_delete_wq host controller deletion 100 * works which flush reset works for serialization. 101 */ 102 struct workqueue_struct *nvme_wq; 103 EXPORT_SYMBOL_GPL(nvme_wq); 104 105 struct workqueue_struct *nvme_reset_wq; 106 EXPORT_SYMBOL_GPL(nvme_reset_wq); 107 108 struct workqueue_struct *nvme_delete_wq; 109 EXPORT_SYMBOL_GPL(nvme_delete_wq); 110 111 static LIST_HEAD(nvme_subsystems); 112 static DEFINE_MUTEX(nvme_subsystems_lock); 113 114 static DEFINE_IDA(nvme_instance_ida); 115 static dev_t nvme_ctrl_base_chr_devt; 116 static struct class *nvme_class; 117 static struct class *nvme_subsys_class; 118 119 static DEFINE_IDA(nvme_ns_chr_minor_ida); 120 static dev_t nvme_ns_chr_devt; 121 static struct class *nvme_ns_chr_class; 122 123 static void nvme_put_subsystem(struct nvme_subsystem *subsys); 124 static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl, 125 unsigned nsid); 126 static void nvme_update_keep_alive(struct nvme_ctrl *ctrl, 127 struct nvme_command *cmd); 128 129 void nvme_queue_scan(struct nvme_ctrl *ctrl) 130 { 131 /* 132 * Only new queue scan work when admin and IO queues are both alive 133 */ 134 if (ctrl->state == NVME_CTRL_LIVE && ctrl->tagset) 135 queue_work(nvme_wq, &ctrl->scan_work); 136 } 137 138 /* 139 * Use this function to proceed with scheduling reset_work for a controller 140 * that had previously been set to the resetting state. This is intended for 141 * code paths that can't be interrupted by other reset attempts. A hot removal 142 * may prevent this from succeeding. 143 */ 144 int nvme_try_sched_reset(struct nvme_ctrl *ctrl) 145 { 146 if (ctrl->state != NVME_CTRL_RESETTING) 147 return -EBUSY; 148 if (!queue_work(nvme_reset_wq, &ctrl->reset_work)) 149 return -EBUSY; 150 return 0; 151 } 152 EXPORT_SYMBOL_GPL(nvme_try_sched_reset); 153 154 static void nvme_failfast_work(struct work_struct *work) 155 { 156 struct nvme_ctrl *ctrl = container_of(to_delayed_work(work), 157 struct nvme_ctrl, failfast_work); 158 159 if (ctrl->state != NVME_CTRL_CONNECTING) 160 return; 161 162 set_bit(NVME_CTRL_FAILFAST_EXPIRED, &ctrl->flags); 163 dev_info(ctrl->device, "failfast expired\n"); 164 nvme_kick_requeue_lists(ctrl); 165 } 166 167 static inline void nvme_start_failfast_work(struct nvme_ctrl *ctrl) 168 { 169 if (!ctrl->opts || ctrl->opts->fast_io_fail_tmo == -1) 170 return; 171 172 schedule_delayed_work(&ctrl->failfast_work, 173 ctrl->opts->fast_io_fail_tmo * HZ); 174 } 175 176 static inline void nvme_stop_failfast_work(struct nvme_ctrl *ctrl) 177 { 178 if (!ctrl->opts) 179 return; 180 181 cancel_delayed_work_sync(&ctrl->failfast_work); 182 clear_bit(NVME_CTRL_FAILFAST_EXPIRED, &ctrl->flags); 183 } 184 185 186 int nvme_reset_ctrl(struct nvme_ctrl *ctrl) 187 { 188 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING)) 189 return -EBUSY; 190 if (!queue_work(nvme_reset_wq, &ctrl->reset_work)) 191 return -EBUSY; 192 return 0; 193 } 194 EXPORT_SYMBOL_GPL(nvme_reset_ctrl); 195 196 int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl) 197 { 198 int ret; 199 200 ret = nvme_reset_ctrl(ctrl); 201 if (!ret) { 202 flush_work(&ctrl->reset_work); 203 if (ctrl->state != NVME_CTRL_LIVE) 204 ret = -ENETRESET; 205 } 206 207 return ret; 208 } 209 210 static void nvme_do_delete_ctrl(struct nvme_ctrl *ctrl) 211 { 212 dev_info(ctrl->device, 213 "Removing ctrl: NQN \"%s\"\n", nvmf_ctrl_subsysnqn(ctrl)); 214 215 flush_work(&ctrl->reset_work); 216 nvme_stop_ctrl(ctrl); 217 nvme_remove_namespaces(ctrl); 218 ctrl->ops->delete_ctrl(ctrl); 219 nvme_uninit_ctrl(ctrl); 220 } 221 222 static void nvme_delete_ctrl_work(struct work_struct *work) 223 { 224 struct nvme_ctrl *ctrl = 225 container_of(work, struct nvme_ctrl, delete_work); 226 227 nvme_do_delete_ctrl(ctrl); 228 } 229 230 int nvme_delete_ctrl(struct nvme_ctrl *ctrl) 231 { 232 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_DELETING)) 233 return -EBUSY; 234 if (!queue_work(nvme_delete_wq, &ctrl->delete_work)) 235 return -EBUSY; 236 return 0; 237 } 238 EXPORT_SYMBOL_GPL(nvme_delete_ctrl); 239 240 void nvme_delete_ctrl_sync(struct nvme_ctrl *ctrl) 241 { 242 /* 243 * Keep a reference until nvme_do_delete_ctrl() complete, 244 * since ->delete_ctrl can free the controller. 245 */ 246 nvme_get_ctrl(ctrl); 247 if (nvme_change_ctrl_state(ctrl, NVME_CTRL_DELETING)) 248 nvme_do_delete_ctrl(ctrl); 249 nvme_put_ctrl(ctrl); 250 } 251 252 static blk_status_t nvme_error_status(u16 status) 253 { 254 switch (status & 0x7ff) { 255 case NVME_SC_SUCCESS: 256 return BLK_STS_OK; 257 case NVME_SC_CAP_EXCEEDED: 258 return BLK_STS_NOSPC; 259 case NVME_SC_LBA_RANGE: 260 case NVME_SC_CMD_INTERRUPTED: 261 case NVME_SC_NS_NOT_READY: 262 return BLK_STS_TARGET; 263 case NVME_SC_BAD_ATTRIBUTES: 264 case NVME_SC_ONCS_NOT_SUPPORTED: 265 case NVME_SC_INVALID_OPCODE: 266 case NVME_SC_INVALID_FIELD: 267 case NVME_SC_INVALID_NS: 268 return BLK_STS_NOTSUPP; 269 case NVME_SC_WRITE_FAULT: 270 case NVME_SC_READ_ERROR: 271 case NVME_SC_UNWRITTEN_BLOCK: 272 case NVME_SC_ACCESS_DENIED: 273 case NVME_SC_READ_ONLY: 274 case NVME_SC_COMPARE_FAILED: 275 return BLK_STS_MEDIUM; 276 case NVME_SC_GUARD_CHECK: 277 case NVME_SC_APPTAG_CHECK: 278 case NVME_SC_REFTAG_CHECK: 279 case NVME_SC_INVALID_PI: 280 return BLK_STS_PROTECTION; 281 case NVME_SC_RESERVATION_CONFLICT: 282 return BLK_STS_RESV_CONFLICT; 283 case NVME_SC_HOST_PATH_ERROR: 284 return BLK_STS_TRANSPORT; 285 case NVME_SC_ZONE_TOO_MANY_ACTIVE: 286 return BLK_STS_ZONE_ACTIVE_RESOURCE; 287 case NVME_SC_ZONE_TOO_MANY_OPEN: 288 return BLK_STS_ZONE_OPEN_RESOURCE; 289 default: 290 return BLK_STS_IOERR; 291 } 292 } 293 294 static void nvme_retry_req(struct request *req) 295 { 296 unsigned long delay = 0; 297 u16 crd; 298 299 /* The mask and shift result must be <= 3 */ 300 crd = (nvme_req(req)->status & NVME_SC_CRD) >> 11; 301 if (crd) 302 delay = nvme_req(req)->ctrl->crdt[crd - 1] * 100; 303 304 nvme_req(req)->retries++; 305 blk_mq_requeue_request(req, false); 306 blk_mq_delay_kick_requeue_list(req->q, delay); 307 } 308 309 static void nvme_log_error(struct request *req) 310 { 311 struct nvme_ns *ns = req->q->queuedata; 312 struct nvme_request *nr = nvme_req(req); 313 314 if (ns) { 315 pr_err_ratelimited("%s: %s(0x%x) @ LBA %llu, %llu blocks, %s (sct 0x%x / sc 0x%x) %s%s\n", 316 ns->disk ? ns->disk->disk_name : "?", 317 nvme_get_opcode_str(nr->cmd->common.opcode), 318 nr->cmd->common.opcode, 319 (unsigned long long)nvme_sect_to_lba(ns, blk_rq_pos(req)), 320 (unsigned long long)blk_rq_bytes(req) >> ns->lba_shift, 321 nvme_get_error_status_str(nr->status), 322 nr->status >> 8 & 7, /* Status Code Type */ 323 nr->status & 0xff, /* Status Code */ 324 nr->status & NVME_SC_MORE ? "MORE " : "", 325 nr->status & NVME_SC_DNR ? "DNR " : ""); 326 return; 327 } 328 329 pr_err_ratelimited("%s: %s(0x%x), %s (sct 0x%x / sc 0x%x) %s%s\n", 330 dev_name(nr->ctrl->device), 331 nvme_get_admin_opcode_str(nr->cmd->common.opcode), 332 nr->cmd->common.opcode, 333 nvme_get_error_status_str(nr->status), 334 nr->status >> 8 & 7, /* Status Code Type */ 335 nr->status & 0xff, /* Status Code */ 336 nr->status & NVME_SC_MORE ? "MORE " : "", 337 nr->status & NVME_SC_DNR ? "DNR " : ""); 338 } 339 340 enum nvme_disposition { 341 COMPLETE, 342 RETRY, 343 FAILOVER, 344 AUTHENTICATE, 345 }; 346 347 static inline enum nvme_disposition nvme_decide_disposition(struct request *req) 348 { 349 if (likely(nvme_req(req)->status == 0)) 350 return COMPLETE; 351 352 if ((nvme_req(req)->status & 0x7ff) == NVME_SC_AUTH_REQUIRED) 353 return AUTHENTICATE; 354 355 if (blk_noretry_request(req) || 356 (nvme_req(req)->status & NVME_SC_DNR) || 357 nvme_req(req)->retries >= nvme_max_retries) 358 return COMPLETE; 359 360 if (req->cmd_flags & REQ_NVME_MPATH) { 361 if (nvme_is_path_error(nvme_req(req)->status) || 362 blk_queue_dying(req->q)) 363 return FAILOVER; 364 } else { 365 if (blk_queue_dying(req->q)) 366 return COMPLETE; 367 } 368 369 return RETRY; 370 } 371 372 static inline void nvme_end_req_zoned(struct request *req) 373 { 374 if (IS_ENABLED(CONFIG_BLK_DEV_ZONED) && 375 req_op(req) == REQ_OP_ZONE_APPEND) 376 req->__sector = nvme_lba_to_sect(req->q->queuedata, 377 le64_to_cpu(nvme_req(req)->result.u64)); 378 } 379 380 static inline void nvme_end_req(struct request *req) 381 { 382 blk_status_t status = nvme_error_status(nvme_req(req)->status); 383 384 if (unlikely(nvme_req(req)->status && !(req->rq_flags & RQF_QUIET))) 385 nvme_log_error(req); 386 nvme_end_req_zoned(req); 387 nvme_trace_bio_complete(req); 388 if (req->cmd_flags & REQ_NVME_MPATH) 389 nvme_mpath_end_request(req); 390 blk_mq_end_request(req, status); 391 } 392 393 void nvme_complete_rq(struct request *req) 394 { 395 struct nvme_ctrl *ctrl = nvme_req(req)->ctrl; 396 397 trace_nvme_complete_rq(req); 398 nvme_cleanup_cmd(req); 399 400 /* 401 * Completions of long-running commands should not be able to 402 * defer sending of periodic keep alives, since the controller 403 * may have completed processing such commands a long time ago 404 * (arbitrarily close to command submission time). 405 * req->deadline - req->timeout is the command submission time 406 * in jiffies. 407 */ 408 if (ctrl->kas && 409 req->deadline - req->timeout >= ctrl->ka_last_check_time) 410 ctrl->comp_seen = true; 411 412 switch (nvme_decide_disposition(req)) { 413 case COMPLETE: 414 nvme_end_req(req); 415 return; 416 case RETRY: 417 nvme_retry_req(req); 418 return; 419 case FAILOVER: 420 nvme_failover_req(req); 421 return; 422 case AUTHENTICATE: 423 #ifdef CONFIG_NVME_AUTH 424 queue_work(nvme_wq, &ctrl->dhchap_auth_work); 425 nvme_retry_req(req); 426 #else 427 nvme_end_req(req); 428 #endif 429 return; 430 } 431 } 432 EXPORT_SYMBOL_GPL(nvme_complete_rq); 433 434 void nvme_complete_batch_req(struct request *req) 435 { 436 trace_nvme_complete_rq(req); 437 nvme_cleanup_cmd(req); 438 nvme_end_req_zoned(req); 439 } 440 EXPORT_SYMBOL_GPL(nvme_complete_batch_req); 441 442 /* 443 * Called to unwind from ->queue_rq on a failed command submission so that the 444 * multipathing code gets called to potentially failover to another path. 445 * The caller needs to unwind all transport specific resource allocations and 446 * must return propagate the return value. 447 */ 448 blk_status_t nvme_host_path_error(struct request *req) 449 { 450 nvme_req(req)->status = NVME_SC_HOST_PATH_ERROR; 451 blk_mq_set_request_complete(req); 452 nvme_complete_rq(req); 453 return BLK_STS_OK; 454 } 455 EXPORT_SYMBOL_GPL(nvme_host_path_error); 456 457 bool nvme_cancel_request(struct request *req, void *data) 458 { 459 dev_dbg_ratelimited(((struct nvme_ctrl *) data)->device, 460 "Cancelling I/O %d", req->tag); 461 462 /* don't abort one completed or idle request */ 463 if (blk_mq_rq_state(req) != MQ_RQ_IN_FLIGHT) 464 return true; 465 466 nvme_req(req)->status = NVME_SC_HOST_ABORTED_CMD; 467 nvme_req(req)->flags |= NVME_REQ_CANCELLED; 468 blk_mq_complete_request(req); 469 return true; 470 } 471 EXPORT_SYMBOL_GPL(nvme_cancel_request); 472 473 void nvme_cancel_tagset(struct nvme_ctrl *ctrl) 474 { 475 if (ctrl->tagset) { 476 blk_mq_tagset_busy_iter(ctrl->tagset, 477 nvme_cancel_request, ctrl); 478 blk_mq_tagset_wait_completed_request(ctrl->tagset); 479 } 480 } 481 EXPORT_SYMBOL_GPL(nvme_cancel_tagset); 482 483 void nvme_cancel_admin_tagset(struct nvme_ctrl *ctrl) 484 { 485 if (ctrl->admin_tagset) { 486 blk_mq_tagset_busy_iter(ctrl->admin_tagset, 487 nvme_cancel_request, ctrl); 488 blk_mq_tagset_wait_completed_request(ctrl->admin_tagset); 489 } 490 } 491 EXPORT_SYMBOL_GPL(nvme_cancel_admin_tagset); 492 493 bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl, 494 enum nvme_ctrl_state new_state) 495 { 496 enum nvme_ctrl_state old_state; 497 unsigned long flags; 498 bool changed = false; 499 500 spin_lock_irqsave(&ctrl->lock, flags); 501 502 old_state = ctrl->state; 503 switch (new_state) { 504 case NVME_CTRL_LIVE: 505 switch (old_state) { 506 case NVME_CTRL_NEW: 507 case NVME_CTRL_RESETTING: 508 case NVME_CTRL_CONNECTING: 509 changed = true; 510 fallthrough; 511 default: 512 break; 513 } 514 break; 515 case NVME_CTRL_RESETTING: 516 switch (old_state) { 517 case NVME_CTRL_NEW: 518 case NVME_CTRL_LIVE: 519 changed = true; 520 fallthrough; 521 default: 522 break; 523 } 524 break; 525 case NVME_CTRL_CONNECTING: 526 switch (old_state) { 527 case NVME_CTRL_NEW: 528 case NVME_CTRL_RESETTING: 529 changed = true; 530 fallthrough; 531 default: 532 break; 533 } 534 break; 535 case NVME_CTRL_DELETING: 536 switch (old_state) { 537 case NVME_CTRL_LIVE: 538 case NVME_CTRL_RESETTING: 539 case NVME_CTRL_CONNECTING: 540 changed = true; 541 fallthrough; 542 default: 543 break; 544 } 545 break; 546 case NVME_CTRL_DELETING_NOIO: 547 switch (old_state) { 548 case NVME_CTRL_DELETING: 549 case NVME_CTRL_DEAD: 550 changed = true; 551 fallthrough; 552 default: 553 break; 554 } 555 break; 556 case NVME_CTRL_DEAD: 557 switch (old_state) { 558 case NVME_CTRL_DELETING: 559 changed = true; 560 fallthrough; 561 default: 562 break; 563 } 564 break; 565 default: 566 break; 567 } 568 569 if (changed) { 570 ctrl->state = new_state; 571 wake_up_all(&ctrl->state_wq); 572 } 573 574 spin_unlock_irqrestore(&ctrl->lock, flags); 575 if (!changed) 576 return false; 577 578 if (ctrl->state == NVME_CTRL_LIVE) { 579 if (old_state == NVME_CTRL_CONNECTING) 580 nvme_stop_failfast_work(ctrl); 581 nvme_kick_requeue_lists(ctrl); 582 } else if (ctrl->state == NVME_CTRL_CONNECTING && 583 old_state == NVME_CTRL_RESETTING) { 584 nvme_start_failfast_work(ctrl); 585 } 586 return changed; 587 } 588 EXPORT_SYMBOL_GPL(nvme_change_ctrl_state); 589 590 /* 591 * Returns true for sink states that can't ever transition back to live. 592 */ 593 static bool nvme_state_terminal(struct nvme_ctrl *ctrl) 594 { 595 switch (ctrl->state) { 596 case NVME_CTRL_NEW: 597 case NVME_CTRL_LIVE: 598 case NVME_CTRL_RESETTING: 599 case NVME_CTRL_CONNECTING: 600 return false; 601 case NVME_CTRL_DELETING: 602 case NVME_CTRL_DELETING_NOIO: 603 case NVME_CTRL_DEAD: 604 return true; 605 default: 606 WARN_ONCE(1, "Unhandled ctrl state:%d", ctrl->state); 607 return true; 608 } 609 } 610 611 /* 612 * Waits for the controller state to be resetting, or returns false if it is 613 * not possible to ever transition to that state. 614 */ 615 bool nvme_wait_reset(struct nvme_ctrl *ctrl) 616 { 617 wait_event(ctrl->state_wq, 618 nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING) || 619 nvme_state_terminal(ctrl)); 620 return ctrl->state == NVME_CTRL_RESETTING; 621 } 622 EXPORT_SYMBOL_GPL(nvme_wait_reset); 623 624 static void nvme_free_ns_head(struct kref *ref) 625 { 626 struct nvme_ns_head *head = 627 container_of(ref, struct nvme_ns_head, ref); 628 629 nvme_mpath_remove_disk(head); 630 ida_free(&head->subsys->ns_ida, head->instance); 631 cleanup_srcu_struct(&head->srcu); 632 nvme_put_subsystem(head->subsys); 633 kfree(head); 634 } 635 636 bool nvme_tryget_ns_head(struct nvme_ns_head *head) 637 { 638 return kref_get_unless_zero(&head->ref); 639 } 640 641 void nvme_put_ns_head(struct nvme_ns_head *head) 642 { 643 kref_put(&head->ref, nvme_free_ns_head); 644 } 645 646 static void nvme_free_ns(struct kref *kref) 647 { 648 struct nvme_ns *ns = container_of(kref, struct nvme_ns, kref); 649 650 put_disk(ns->disk); 651 nvme_put_ns_head(ns->head); 652 nvme_put_ctrl(ns->ctrl); 653 kfree(ns); 654 } 655 656 static inline bool nvme_get_ns(struct nvme_ns *ns) 657 { 658 return kref_get_unless_zero(&ns->kref); 659 } 660 661 void nvme_put_ns(struct nvme_ns *ns) 662 { 663 kref_put(&ns->kref, nvme_free_ns); 664 } 665 EXPORT_SYMBOL_NS_GPL(nvme_put_ns, NVME_TARGET_PASSTHRU); 666 667 static inline void nvme_clear_nvme_request(struct request *req) 668 { 669 nvme_req(req)->status = 0; 670 nvme_req(req)->retries = 0; 671 nvme_req(req)->flags = 0; 672 req->rq_flags |= RQF_DONTPREP; 673 } 674 675 /* initialize a passthrough request */ 676 void nvme_init_request(struct request *req, struct nvme_command *cmd) 677 { 678 if (req->q->queuedata) 679 req->timeout = NVME_IO_TIMEOUT; 680 else /* no queuedata implies admin queue */ 681 req->timeout = NVME_ADMIN_TIMEOUT; 682 683 /* passthru commands should let the driver set the SGL flags */ 684 cmd->common.flags &= ~NVME_CMD_SGL_ALL; 685 686 req->cmd_flags |= REQ_FAILFAST_DRIVER; 687 if (req->mq_hctx->type == HCTX_TYPE_POLL) 688 req->cmd_flags |= REQ_POLLED; 689 nvme_clear_nvme_request(req); 690 req->rq_flags |= RQF_QUIET; 691 memcpy(nvme_req(req)->cmd, cmd, sizeof(*cmd)); 692 } 693 EXPORT_SYMBOL_GPL(nvme_init_request); 694 695 /* 696 * For something we're not in a state to send to the device the default action 697 * is to busy it and retry it after the controller state is recovered. However, 698 * if the controller is deleting or if anything is marked for failfast or 699 * nvme multipath it is immediately failed. 700 * 701 * Note: commands used to initialize the controller will be marked for failfast. 702 * Note: nvme cli/ioctl commands are marked for failfast. 703 */ 704 blk_status_t nvme_fail_nonready_command(struct nvme_ctrl *ctrl, 705 struct request *rq) 706 { 707 if (ctrl->state != NVME_CTRL_DELETING_NOIO && 708 ctrl->state != NVME_CTRL_DELETING && 709 ctrl->state != NVME_CTRL_DEAD && 710 !test_bit(NVME_CTRL_FAILFAST_EXPIRED, &ctrl->flags) && 711 !blk_noretry_request(rq) && !(rq->cmd_flags & REQ_NVME_MPATH)) 712 return BLK_STS_RESOURCE; 713 return nvme_host_path_error(rq); 714 } 715 EXPORT_SYMBOL_GPL(nvme_fail_nonready_command); 716 717 bool __nvme_check_ready(struct nvme_ctrl *ctrl, struct request *rq, 718 bool queue_live) 719 { 720 struct nvme_request *req = nvme_req(rq); 721 722 /* 723 * currently we have a problem sending passthru commands 724 * on the admin_q if the controller is not LIVE because we can't 725 * make sure that they are going out after the admin connect, 726 * controller enable and/or other commands in the initialization 727 * sequence. until the controller will be LIVE, fail with 728 * BLK_STS_RESOURCE so that they will be rescheduled. 729 */ 730 if (rq->q == ctrl->admin_q && (req->flags & NVME_REQ_USERCMD)) 731 return false; 732 733 if (ctrl->ops->flags & NVME_F_FABRICS) { 734 /* 735 * Only allow commands on a live queue, except for the connect 736 * command, which is require to set the queue live in the 737 * appropinquate states. 738 */ 739 switch (ctrl->state) { 740 case NVME_CTRL_CONNECTING: 741 if (blk_rq_is_passthrough(rq) && nvme_is_fabrics(req->cmd) && 742 (req->cmd->fabrics.fctype == nvme_fabrics_type_connect || 743 req->cmd->fabrics.fctype == nvme_fabrics_type_auth_send || 744 req->cmd->fabrics.fctype == nvme_fabrics_type_auth_receive)) 745 return true; 746 break; 747 default: 748 break; 749 case NVME_CTRL_DEAD: 750 return false; 751 } 752 } 753 754 return queue_live; 755 } 756 EXPORT_SYMBOL_GPL(__nvme_check_ready); 757 758 static inline void nvme_setup_flush(struct nvme_ns *ns, 759 struct nvme_command *cmnd) 760 { 761 memset(cmnd, 0, sizeof(*cmnd)); 762 cmnd->common.opcode = nvme_cmd_flush; 763 cmnd->common.nsid = cpu_to_le32(ns->head->ns_id); 764 } 765 766 static blk_status_t nvme_setup_discard(struct nvme_ns *ns, struct request *req, 767 struct nvme_command *cmnd) 768 { 769 unsigned short segments = blk_rq_nr_discard_segments(req), n = 0; 770 struct nvme_dsm_range *range; 771 struct bio *bio; 772 773 /* 774 * Some devices do not consider the DSM 'Number of Ranges' field when 775 * determining how much data to DMA. Always allocate memory for maximum 776 * number of segments to prevent device reading beyond end of buffer. 777 */ 778 static const size_t alloc_size = sizeof(*range) * NVME_DSM_MAX_RANGES; 779 780 range = kzalloc(alloc_size, GFP_ATOMIC | __GFP_NOWARN); 781 if (!range) { 782 /* 783 * If we fail allocation our range, fallback to the controller 784 * discard page. If that's also busy, it's safe to return 785 * busy, as we know we can make progress once that's freed. 786 */ 787 if (test_and_set_bit_lock(0, &ns->ctrl->discard_page_busy)) 788 return BLK_STS_RESOURCE; 789 790 range = page_address(ns->ctrl->discard_page); 791 } 792 793 if (queue_max_discard_segments(req->q) == 1) { 794 u64 slba = nvme_sect_to_lba(ns, blk_rq_pos(req)); 795 u32 nlb = blk_rq_sectors(req) >> (ns->lba_shift - 9); 796 797 range[0].cattr = cpu_to_le32(0); 798 range[0].nlb = cpu_to_le32(nlb); 799 range[0].slba = cpu_to_le64(slba); 800 n = 1; 801 } else { 802 __rq_for_each_bio(bio, req) { 803 u64 slba = nvme_sect_to_lba(ns, bio->bi_iter.bi_sector); 804 u32 nlb = bio->bi_iter.bi_size >> ns->lba_shift; 805 806 if (n < segments) { 807 range[n].cattr = cpu_to_le32(0); 808 range[n].nlb = cpu_to_le32(nlb); 809 range[n].slba = cpu_to_le64(slba); 810 } 811 n++; 812 } 813 } 814 815 if (WARN_ON_ONCE(n != segments)) { 816 if (virt_to_page(range) == ns->ctrl->discard_page) 817 clear_bit_unlock(0, &ns->ctrl->discard_page_busy); 818 else 819 kfree(range); 820 return BLK_STS_IOERR; 821 } 822 823 memset(cmnd, 0, sizeof(*cmnd)); 824 cmnd->dsm.opcode = nvme_cmd_dsm; 825 cmnd->dsm.nsid = cpu_to_le32(ns->head->ns_id); 826 cmnd->dsm.nr = cpu_to_le32(segments - 1); 827 cmnd->dsm.attributes = cpu_to_le32(NVME_DSMGMT_AD); 828 829 bvec_set_virt(&req->special_vec, range, alloc_size); 830 req->rq_flags |= RQF_SPECIAL_PAYLOAD; 831 832 return BLK_STS_OK; 833 } 834 835 static void nvme_set_ref_tag(struct nvme_ns *ns, struct nvme_command *cmnd, 836 struct request *req) 837 { 838 u32 upper, lower; 839 u64 ref48; 840 841 /* both rw and write zeroes share the same reftag format */ 842 switch (ns->guard_type) { 843 case NVME_NVM_NS_16B_GUARD: 844 cmnd->rw.reftag = cpu_to_le32(t10_pi_ref_tag(req)); 845 break; 846 case NVME_NVM_NS_64B_GUARD: 847 ref48 = ext_pi_ref_tag(req); 848 lower = lower_32_bits(ref48); 849 upper = upper_32_bits(ref48); 850 851 cmnd->rw.reftag = cpu_to_le32(lower); 852 cmnd->rw.cdw3 = cpu_to_le32(upper); 853 break; 854 default: 855 break; 856 } 857 } 858 859 static inline blk_status_t nvme_setup_write_zeroes(struct nvme_ns *ns, 860 struct request *req, struct nvme_command *cmnd) 861 { 862 memset(cmnd, 0, sizeof(*cmnd)); 863 864 if (ns->ctrl->quirks & NVME_QUIRK_DEALLOCATE_ZEROES) 865 return nvme_setup_discard(ns, req, cmnd); 866 867 cmnd->write_zeroes.opcode = nvme_cmd_write_zeroes; 868 cmnd->write_zeroes.nsid = cpu_to_le32(ns->head->ns_id); 869 cmnd->write_zeroes.slba = 870 cpu_to_le64(nvme_sect_to_lba(ns, blk_rq_pos(req))); 871 cmnd->write_zeroes.length = 872 cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1); 873 874 if (!(req->cmd_flags & REQ_NOUNMAP) && (ns->features & NVME_NS_DEAC)) 875 cmnd->write_zeroes.control |= cpu_to_le16(NVME_WZ_DEAC); 876 877 if (nvme_ns_has_pi(ns)) { 878 cmnd->write_zeroes.control |= cpu_to_le16(NVME_RW_PRINFO_PRACT); 879 880 switch (ns->pi_type) { 881 case NVME_NS_DPS_PI_TYPE1: 882 case NVME_NS_DPS_PI_TYPE2: 883 nvme_set_ref_tag(ns, cmnd, req); 884 break; 885 } 886 } 887 888 return BLK_STS_OK; 889 } 890 891 static inline blk_status_t nvme_setup_rw(struct nvme_ns *ns, 892 struct request *req, struct nvme_command *cmnd, 893 enum nvme_opcode op) 894 { 895 u16 control = 0; 896 u32 dsmgmt = 0; 897 898 if (req->cmd_flags & REQ_FUA) 899 control |= NVME_RW_FUA; 900 if (req->cmd_flags & (REQ_FAILFAST_DEV | REQ_RAHEAD)) 901 control |= NVME_RW_LR; 902 903 if (req->cmd_flags & REQ_RAHEAD) 904 dsmgmt |= NVME_RW_DSM_FREQ_PREFETCH; 905 906 cmnd->rw.opcode = op; 907 cmnd->rw.flags = 0; 908 cmnd->rw.nsid = cpu_to_le32(ns->head->ns_id); 909 cmnd->rw.cdw2 = 0; 910 cmnd->rw.cdw3 = 0; 911 cmnd->rw.metadata = 0; 912 cmnd->rw.slba = cpu_to_le64(nvme_sect_to_lba(ns, blk_rq_pos(req))); 913 cmnd->rw.length = cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1); 914 cmnd->rw.reftag = 0; 915 cmnd->rw.apptag = 0; 916 cmnd->rw.appmask = 0; 917 918 if (ns->ms) { 919 /* 920 * If formated with metadata, the block layer always provides a 921 * metadata buffer if CONFIG_BLK_DEV_INTEGRITY is enabled. Else 922 * we enable the PRACT bit for protection information or set the 923 * namespace capacity to zero to prevent any I/O. 924 */ 925 if (!blk_integrity_rq(req)) { 926 if (WARN_ON_ONCE(!nvme_ns_has_pi(ns))) 927 return BLK_STS_NOTSUPP; 928 control |= NVME_RW_PRINFO_PRACT; 929 } 930 931 switch (ns->pi_type) { 932 case NVME_NS_DPS_PI_TYPE3: 933 control |= NVME_RW_PRINFO_PRCHK_GUARD; 934 break; 935 case NVME_NS_DPS_PI_TYPE1: 936 case NVME_NS_DPS_PI_TYPE2: 937 control |= NVME_RW_PRINFO_PRCHK_GUARD | 938 NVME_RW_PRINFO_PRCHK_REF; 939 if (op == nvme_cmd_zone_append) 940 control |= NVME_RW_APPEND_PIREMAP; 941 nvme_set_ref_tag(ns, cmnd, req); 942 break; 943 } 944 } 945 946 cmnd->rw.control = cpu_to_le16(control); 947 cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt); 948 return 0; 949 } 950 951 void nvme_cleanup_cmd(struct request *req) 952 { 953 if (req->rq_flags & RQF_SPECIAL_PAYLOAD) { 954 struct nvme_ctrl *ctrl = nvme_req(req)->ctrl; 955 956 if (req->special_vec.bv_page == ctrl->discard_page) 957 clear_bit_unlock(0, &ctrl->discard_page_busy); 958 else 959 kfree(bvec_virt(&req->special_vec)); 960 } 961 } 962 EXPORT_SYMBOL_GPL(nvme_cleanup_cmd); 963 964 blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req) 965 { 966 struct nvme_command *cmd = nvme_req(req)->cmd; 967 blk_status_t ret = BLK_STS_OK; 968 969 if (!(req->rq_flags & RQF_DONTPREP)) 970 nvme_clear_nvme_request(req); 971 972 switch (req_op(req)) { 973 case REQ_OP_DRV_IN: 974 case REQ_OP_DRV_OUT: 975 /* these are setup prior to execution in nvme_init_request() */ 976 break; 977 case REQ_OP_FLUSH: 978 nvme_setup_flush(ns, cmd); 979 break; 980 case REQ_OP_ZONE_RESET_ALL: 981 case REQ_OP_ZONE_RESET: 982 ret = nvme_setup_zone_mgmt_send(ns, req, cmd, NVME_ZONE_RESET); 983 break; 984 case REQ_OP_ZONE_OPEN: 985 ret = nvme_setup_zone_mgmt_send(ns, req, cmd, NVME_ZONE_OPEN); 986 break; 987 case REQ_OP_ZONE_CLOSE: 988 ret = nvme_setup_zone_mgmt_send(ns, req, cmd, NVME_ZONE_CLOSE); 989 break; 990 case REQ_OP_ZONE_FINISH: 991 ret = nvme_setup_zone_mgmt_send(ns, req, cmd, NVME_ZONE_FINISH); 992 break; 993 case REQ_OP_WRITE_ZEROES: 994 ret = nvme_setup_write_zeroes(ns, req, cmd); 995 break; 996 case REQ_OP_DISCARD: 997 ret = nvme_setup_discard(ns, req, cmd); 998 break; 999 case REQ_OP_READ: 1000 ret = nvme_setup_rw(ns, req, cmd, nvme_cmd_read); 1001 break; 1002 case REQ_OP_WRITE: 1003 ret = nvme_setup_rw(ns, req, cmd, nvme_cmd_write); 1004 break; 1005 case REQ_OP_ZONE_APPEND: 1006 ret = nvme_setup_rw(ns, req, cmd, nvme_cmd_zone_append); 1007 break; 1008 default: 1009 WARN_ON_ONCE(1); 1010 return BLK_STS_IOERR; 1011 } 1012 1013 cmd->common.command_id = nvme_cid(req); 1014 trace_nvme_setup_cmd(req, cmd); 1015 return ret; 1016 } 1017 EXPORT_SYMBOL_GPL(nvme_setup_cmd); 1018 1019 /* 1020 * Return values: 1021 * 0: success 1022 * >0: nvme controller's cqe status response 1023 * <0: kernel error in lieu of controller response 1024 */ 1025 int nvme_execute_rq(struct request *rq, bool at_head) 1026 { 1027 blk_status_t status; 1028 1029 status = blk_execute_rq(rq, at_head); 1030 if (nvme_req(rq)->flags & NVME_REQ_CANCELLED) 1031 return -EINTR; 1032 if (nvme_req(rq)->status) 1033 return nvme_req(rq)->status; 1034 return blk_status_to_errno(status); 1035 } 1036 EXPORT_SYMBOL_NS_GPL(nvme_execute_rq, NVME_TARGET_PASSTHRU); 1037 1038 /* 1039 * Returns 0 on success. If the result is negative, it's a Linux error code; 1040 * if the result is positive, it's an NVM Express status code 1041 */ 1042 int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, 1043 union nvme_result *result, void *buffer, unsigned bufflen, 1044 int qid, int at_head, blk_mq_req_flags_t flags) 1045 { 1046 struct request *req; 1047 int ret; 1048 1049 if (qid == NVME_QID_ANY) 1050 req = blk_mq_alloc_request(q, nvme_req_op(cmd), flags); 1051 else 1052 req = blk_mq_alloc_request_hctx(q, nvme_req_op(cmd), flags, 1053 qid - 1); 1054 1055 if (IS_ERR(req)) 1056 return PTR_ERR(req); 1057 nvme_init_request(req, cmd); 1058 1059 if (buffer && bufflen) { 1060 ret = blk_rq_map_kern(q, req, buffer, bufflen, GFP_KERNEL); 1061 if (ret) 1062 goto out; 1063 } 1064 1065 ret = nvme_execute_rq(req, at_head); 1066 if (result && ret >= 0) 1067 *result = nvme_req(req)->result; 1068 out: 1069 blk_mq_free_request(req); 1070 return ret; 1071 } 1072 EXPORT_SYMBOL_GPL(__nvme_submit_sync_cmd); 1073 1074 int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, 1075 void *buffer, unsigned bufflen) 1076 { 1077 return __nvme_submit_sync_cmd(q, cmd, NULL, buffer, bufflen, 1078 NVME_QID_ANY, 0, 0); 1079 } 1080 EXPORT_SYMBOL_GPL(nvme_submit_sync_cmd); 1081 1082 u32 nvme_command_effects(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u8 opcode) 1083 { 1084 u32 effects = 0; 1085 1086 if (ns) { 1087 effects = le32_to_cpu(ns->head->effects->iocs[opcode]); 1088 if (effects & ~(NVME_CMD_EFFECTS_CSUPP | NVME_CMD_EFFECTS_LBCC)) 1089 dev_warn_once(ctrl->device, 1090 "IO command:%02x has unusual effects:%08x\n", 1091 opcode, effects); 1092 1093 /* 1094 * NVME_CMD_EFFECTS_CSE_MASK causes a freeze all I/O queues, 1095 * which would deadlock when done on an I/O command. Note that 1096 * We already warn about an unusual effect above. 1097 */ 1098 effects &= ~NVME_CMD_EFFECTS_CSE_MASK; 1099 } else { 1100 effects = le32_to_cpu(ctrl->effects->acs[opcode]); 1101 } 1102 1103 return effects; 1104 } 1105 EXPORT_SYMBOL_NS_GPL(nvme_command_effects, NVME_TARGET_PASSTHRU); 1106 1107 u32 nvme_passthru_start(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u8 opcode) 1108 { 1109 u32 effects = nvme_command_effects(ctrl, ns, opcode); 1110 1111 /* 1112 * For simplicity, IO to all namespaces is quiesced even if the command 1113 * effects say only one namespace is affected. 1114 */ 1115 if (effects & NVME_CMD_EFFECTS_CSE_MASK) { 1116 mutex_lock(&ctrl->scan_lock); 1117 mutex_lock(&ctrl->subsys->lock); 1118 nvme_mpath_start_freeze(ctrl->subsys); 1119 nvme_mpath_wait_freeze(ctrl->subsys); 1120 nvme_start_freeze(ctrl); 1121 nvme_wait_freeze(ctrl); 1122 } 1123 return effects; 1124 } 1125 EXPORT_SYMBOL_NS_GPL(nvme_passthru_start, NVME_TARGET_PASSTHRU); 1126 1127 void nvme_passthru_end(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u32 effects, 1128 struct nvme_command *cmd, int status) 1129 { 1130 if (effects & NVME_CMD_EFFECTS_CSE_MASK) { 1131 nvme_unfreeze(ctrl); 1132 nvme_mpath_unfreeze(ctrl->subsys); 1133 mutex_unlock(&ctrl->subsys->lock); 1134 mutex_unlock(&ctrl->scan_lock); 1135 } 1136 if (effects & NVME_CMD_EFFECTS_CCC) { 1137 if (!test_and_set_bit(NVME_CTRL_DIRTY_CAPABILITY, 1138 &ctrl->flags)) { 1139 dev_info(ctrl->device, 1140 "controller capabilities changed, reset may be required to take effect.\n"); 1141 } 1142 } 1143 if (effects & (NVME_CMD_EFFECTS_NIC | NVME_CMD_EFFECTS_NCC)) { 1144 nvme_queue_scan(ctrl); 1145 flush_work(&ctrl->scan_work); 1146 } 1147 if (ns) 1148 return; 1149 1150 switch (cmd->common.opcode) { 1151 case nvme_admin_set_features: 1152 switch (le32_to_cpu(cmd->common.cdw10) & 0xFF) { 1153 case NVME_FEAT_KATO: 1154 /* 1155 * Keep alive commands interval on the host should be 1156 * updated when KATO is modified by Set Features 1157 * commands. 1158 */ 1159 if (!status) 1160 nvme_update_keep_alive(ctrl, cmd); 1161 break; 1162 default: 1163 break; 1164 } 1165 break; 1166 default: 1167 break; 1168 } 1169 } 1170 EXPORT_SYMBOL_NS_GPL(nvme_passthru_end, NVME_TARGET_PASSTHRU); 1171 1172 /* 1173 * Recommended frequency for KATO commands per NVMe 1.4 section 7.12.1: 1174 * 1175 * The host should send Keep Alive commands at half of the Keep Alive Timeout 1176 * accounting for transport roundtrip times [..]. 1177 */ 1178 static unsigned long nvme_keep_alive_work_period(struct nvme_ctrl *ctrl) 1179 { 1180 unsigned long delay = ctrl->kato * HZ / 2; 1181 1182 /* 1183 * When using Traffic Based Keep Alive, we need to run 1184 * nvme_keep_alive_work at twice the normal frequency, as one 1185 * command completion can postpone sending a keep alive command 1186 * by up to twice the delay between runs. 1187 */ 1188 if (ctrl->ctratt & NVME_CTRL_ATTR_TBKAS) 1189 delay /= 2; 1190 return delay; 1191 } 1192 1193 static void nvme_queue_keep_alive_work(struct nvme_ctrl *ctrl) 1194 { 1195 queue_delayed_work(nvme_wq, &ctrl->ka_work, 1196 nvme_keep_alive_work_period(ctrl)); 1197 } 1198 1199 static enum rq_end_io_ret nvme_keep_alive_end_io(struct request *rq, 1200 blk_status_t status) 1201 { 1202 struct nvme_ctrl *ctrl = rq->end_io_data; 1203 unsigned long flags; 1204 bool startka = false; 1205 unsigned long rtt = jiffies - (rq->deadline - rq->timeout); 1206 unsigned long delay = nvme_keep_alive_work_period(ctrl); 1207 1208 /* 1209 * Subtract off the keepalive RTT so nvme_keep_alive_work runs 1210 * at the desired frequency. 1211 */ 1212 if (rtt <= delay) { 1213 delay -= rtt; 1214 } else { 1215 dev_warn(ctrl->device, "long keepalive RTT (%u ms)\n", 1216 jiffies_to_msecs(rtt)); 1217 delay = 0; 1218 } 1219 1220 blk_mq_free_request(rq); 1221 1222 if (status) { 1223 dev_err(ctrl->device, 1224 "failed nvme_keep_alive_end_io error=%d\n", 1225 status); 1226 return RQ_END_IO_NONE; 1227 } 1228 1229 ctrl->ka_last_check_time = jiffies; 1230 ctrl->comp_seen = false; 1231 spin_lock_irqsave(&ctrl->lock, flags); 1232 if (ctrl->state == NVME_CTRL_LIVE || 1233 ctrl->state == NVME_CTRL_CONNECTING) 1234 startka = true; 1235 spin_unlock_irqrestore(&ctrl->lock, flags); 1236 if (startka) 1237 queue_delayed_work(nvme_wq, &ctrl->ka_work, delay); 1238 return RQ_END_IO_NONE; 1239 } 1240 1241 static void nvme_keep_alive_work(struct work_struct *work) 1242 { 1243 struct nvme_ctrl *ctrl = container_of(to_delayed_work(work), 1244 struct nvme_ctrl, ka_work); 1245 bool comp_seen = ctrl->comp_seen; 1246 struct request *rq; 1247 1248 ctrl->ka_last_check_time = jiffies; 1249 1250 if ((ctrl->ctratt & NVME_CTRL_ATTR_TBKAS) && comp_seen) { 1251 dev_dbg(ctrl->device, 1252 "reschedule traffic based keep-alive timer\n"); 1253 ctrl->comp_seen = false; 1254 nvme_queue_keep_alive_work(ctrl); 1255 return; 1256 } 1257 1258 rq = blk_mq_alloc_request(ctrl->admin_q, nvme_req_op(&ctrl->ka_cmd), 1259 BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT); 1260 if (IS_ERR(rq)) { 1261 /* allocation failure, reset the controller */ 1262 dev_err(ctrl->device, "keep-alive failed: %ld\n", PTR_ERR(rq)); 1263 nvme_reset_ctrl(ctrl); 1264 return; 1265 } 1266 nvme_init_request(rq, &ctrl->ka_cmd); 1267 1268 rq->timeout = ctrl->kato * HZ; 1269 rq->end_io = nvme_keep_alive_end_io; 1270 rq->end_io_data = ctrl; 1271 blk_execute_rq_nowait(rq, false); 1272 } 1273 1274 static void nvme_start_keep_alive(struct nvme_ctrl *ctrl) 1275 { 1276 if (unlikely(ctrl->kato == 0)) 1277 return; 1278 1279 nvme_queue_keep_alive_work(ctrl); 1280 } 1281 1282 void nvme_stop_keep_alive(struct nvme_ctrl *ctrl) 1283 { 1284 if (unlikely(ctrl->kato == 0)) 1285 return; 1286 1287 cancel_delayed_work_sync(&ctrl->ka_work); 1288 } 1289 EXPORT_SYMBOL_GPL(nvme_stop_keep_alive); 1290 1291 static void nvme_update_keep_alive(struct nvme_ctrl *ctrl, 1292 struct nvme_command *cmd) 1293 { 1294 unsigned int new_kato = 1295 DIV_ROUND_UP(le32_to_cpu(cmd->common.cdw11), 1000); 1296 1297 dev_info(ctrl->device, 1298 "keep alive interval updated from %u ms to %u ms\n", 1299 ctrl->kato * 1000 / 2, new_kato * 1000 / 2); 1300 1301 nvme_stop_keep_alive(ctrl); 1302 ctrl->kato = new_kato; 1303 nvme_start_keep_alive(ctrl); 1304 } 1305 1306 /* 1307 * In NVMe 1.0 the CNS field was just a binary controller or namespace 1308 * flag, thus sending any new CNS opcodes has a big chance of not working. 1309 * Qemu unfortunately had that bug after reporting a 1.1 version compliance 1310 * (but not for any later version). 1311 */ 1312 static bool nvme_ctrl_limited_cns(struct nvme_ctrl *ctrl) 1313 { 1314 if (ctrl->quirks & NVME_QUIRK_IDENTIFY_CNS) 1315 return ctrl->vs < NVME_VS(1, 2, 0); 1316 return ctrl->vs < NVME_VS(1, 1, 0); 1317 } 1318 1319 static int nvme_identify_ctrl(struct nvme_ctrl *dev, struct nvme_id_ctrl **id) 1320 { 1321 struct nvme_command c = { }; 1322 int error; 1323 1324 /* gcc-4.4.4 (at least) has issues with initializers and anon unions */ 1325 c.identify.opcode = nvme_admin_identify; 1326 c.identify.cns = NVME_ID_CNS_CTRL; 1327 1328 *id = kmalloc(sizeof(struct nvme_id_ctrl), GFP_KERNEL); 1329 if (!*id) 1330 return -ENOMEM; 1331 1332 error = nvme_submit_sync_cmd(dev->admin_q, &c, *id, 1333 sizeof(struct nvme_id_ctrl)); 1334 if (error) 1335 kfree(*id); 1336 return error; 1337 } 1338 1339 static int nvme_process_ns_desc(struct nvme_ctrl *ctrl, struct nvme_ns_ids *ids, 1340 struct nvme_ns_id_desc *cur, bool *csi_seen) 1341 { 1342 const char *warn_str = "ctrl returned bogus length:"; 1343 void *data = cur; 1344 1345 switch (cur->nidt) { 1346 case NVME_NIDT_EUI64: 1347 if (cur->nidl != NVME_NIDT_EUI64_LEN) { 1348 dev_warn(ctrl->device, "%s %d for NVME_NIDT_EUI64\n", 1349 warn_str, cur->nidl); 1350 return -1; 1351 } 1352 if (ctrl->quirks & NVME_QUIRK_BOGUS_NID) 1353 return NVME_NIDT_EUI64_LEN; 1354 memcpy(ids->eui64, data + sizeof(*cur), NVME_NIDT_EUI64_LEN); 1355 return NVME_NIDT_EUI64_LEN; 1356 case NVME_NIDT_NGUID: 1357 if (cur->nidl != NVME_NIDT_NGUID_LEN) { 1358 dev_warn(ctrl->device, "%s %d for NVME_NIDT_NGUID\n", 1359 warn_str, cur->nidl); 1360 return -1; 1361 } 1362 if (ctrl->quirks & NVME_QUIRK_BOGUS_NID) 1363 return NVME_NIDT_NGUID_LEN; 1364 memcpy(ids->nguid, data + sizeof(*cur), NVME_NIDT_NGUID_LEN); 1365 return NVME_NIDT_NGUID_LEN; 1366 case NVME_NIDT_UUID: 1367 if (cur->nidl != NVME_NIDT_UUID_LEN) { 1368 dev_warn(ctrl->device, "%s %d for NVME_NIDT_UUID\n", 1369 warn_str, cur->nidl); 1370 return -1; 1371 } 1372 if (ctrl->quirks & NVME_QUIRK_BOGUS_NID) 1373 return NVME_NIDT_UUID_LEN; 1374 uuid_copy(&ids->uuid, data + sizeof(*cur)); 1375 return NVME_NIDT_UUID_LEN; 1376 case NVME_NIDT_CSI: 1377 if (cur->nidl != NVME_NIDT_CSI_LEN) { 1378 dev_warn(ctrl->device, "%s %d for NVME_NIDT_CSI\n", 1379 warn_str, cur->nidl); 1380 return -1; 1381 } 1382 memcpy(&ids->csi, data + sizeof(*cur), NVME_NIDT_CSI_LEN); 1383 *csi_seen = true; 1384 return NVME_NIDT_CSI_LEN; 1385 default: 1386 /* Skip unknown types */ 1387 return cur->nidl; 1388 } 1389 } 1390 1391 static int nvme_identify_ns_descs(struct nvme_ctrl *ctrl, 1392 struct nvme_ns_info *info) 1393 { 1394 struct nvme_command c = { }; 1395 bool csi_seen = false; 1396 int status, pos, len; 1397 void *data; 1398 1399 if (ctrl->vs < NVME_VS(1, 3, 0) && !nvme_multi_css(ctrl)) 1400 return 0; 1401 if (ctrl->quirks & NVME_QUIRK_NO_NS_DESC_LIST) 1402 return 0; 1403 1404 c.identify.opcode = nvme_admin_identify; 1405 c.identify.nsid = cpu_to_le32(info->nsid); 1406 c.identify.cns = NVME_ID_CNS_NS_DESC_LIST; 1407 1408 data = kzalloc(NVME_IDENTIFY_DATA_SIZE, GFP_KERNEL); 1409 if (!data) 1410 return -ENOMEM; 1411 1412 status = nvme_submit_sync_cmd(ctrl->admin_q, &c, data, 1413 NVME_IDENTIFY_DATA_SIZE); 1414 if (status) { 1415 dev_warn(ctrl->device, 1416 "Identify Descriptors failed (nsid=%u, status=0x%x)\n", 1417 info->nsid, status); 1418 goto free_data; 1419 } 1420 1421 for (pos = 0; pos < NVME_IDENTIFY_DATA_SIZE; pos += len) { 1422 struct nvme_ns_id_desc *cur = data + pos; 1423 1424 if (cur->nidl == 0) 1425 break; 1426 1427 len = nvme_process_ns_desc(ctrl, &info->ids, cur, &csi_seen); 1428 if (len < 0) 1429 break; 1430 1431 len += sizeof(*cur); 1432 } 1433 1434 if (nvme_multi_css(ctrl) && !csi_seen) { 1435 dev_warn(ctrl->device, "Command set not reported for nsid:%d\n", 1436 info->nsid); 1437 status = -EINVAL; 1438 } 1439 1440 free_data: 1441 kfree(data); 1442 return status; 1443 } 1444 1445 static int nvme_identify_ns(struct nvme_ctrl *ctrl, unsigned nsid, 1446 struct nvme_id_ns **id) 1447 { 1448 struct nvme_command c = { }; 1449 int error; 1450 1451 /* gcc-4.4.4 (at least) has issues with initializers and anon unions */ 1452 c.identify.opcode = nvme_admin_identify; 1453 c.identify.nsid = cpu_to_le32(nsid); 1454 c.identify.cns = NVME_ID_CNS_NS; 1455 1456 *id = kmalloc(sizeof(**id), GFP_KERNEL); 1457 if (!*id) 1458 return -ENOMEM; 1459 1460 error = nvme_submit_sync_cmd(ctrl->admin_q, &c, *id, sizeof(**id)); 1461 if (error) { 1462 dev_warn(ctrl->device, "Identify namespace failed (%d)\n", error); 1463 kfree(*id); 1464 } 1465 return error; 1466 } 1467 1468 static int nvme_ns_info_from_identify(struct nvme_ctrl *ctrl, 1469 struct nvme_ns_info *info) 1470 { 1471 struct nvme_ns_ids *ids = &info->ids; 1472 struct nvme_id_ns *id; 1473 int ret; 1474 1475 ret = nvme_identify_ns(ctrl, info->nsid, &id); 1476 if (ret) 1477 return ret; 1478 1479 if (id->ncap == 0) { 1480 /* namespace not allocated or attached */ 1481 info->is_removed = true; 1482 return -ENODEV; 1483 } 1484 1485 info->anagrpid = id->anagrpid; 1486 info->is_shared = id->nmic & NVME_NS_NMIC_SHARED; 1487 info->is_readonly = id->nsattr & NVME_NS_ATTR_RO; 1488 info->is_ready = true; 1489 if (ctrl->quirks & NVME_QUIRK_BOGUS_NID) { 1490 dev_info(ctrl->device, 1491 "Ignoring bogus Namespace Identifiers\n"); 1492 } else { 1493 if (ctrl->vs >= NVME_VS(1, 1, 0) && 1494 !memchr_inv(ids->eui64, 0, sizeof(ids->eui64))) 1495 memcpy(ids->eui64, id->eui64, sizeof(ids->eui64)); 1496 if (ctrl->vs >= NVME_VS(1, 2, 0) && 1497 !memchr_inv(ids->nguid, 0, sizeof(ids->nguid))) 1498 memcpy(ids->nguid, id->nguid, sizeof(ids->nguid)); 1499 } 1500 kfree(id); 1501 return 0; 1502 } 1503 1504 static int nvme_ns_info_from_id_cs_indep(struct nvme_ctrl *ctrl, 1505 struct nvme_ns_info *info) 1506 { 1507 struct nvme_id_ns_cs_indep *id; 1508 struct nvme_command c = { 1509 .identify.opcode = nvme_admin_identify, 1510 .identify.nsid = cpu_to_le32(info->nsid), 1511 .identify.cns = NVME_ID_CNS_NS_CS_INDEP, 1512 }; 1513 int ret; 1514 1515 id = kmalloc(sizeof(*id), GFP_KERNEL); 1516 if (!id) 1517 return -ENOMEM; 1518 1519 ret = nvme_submit_sync_cmd(ctrl->admin_q, &c, id, sizeof(*id)); 1520 if (!ret) { 1521 info->anagrpid = id->anagrpid; 1522 info->is_shared = id->nmic & NVME_NS_NMIC_SHARED; 1523 info->is_readonly = id->nsattr & NVME_NS_ATTR_RO; 1524 info->is_ready = id->nstat & NVME_NSTAT_NRDY; 1525 } 1526 kfree(id); 1527 return ret; 1528 } 1529 1530 static int nvme_features(struct nvme_ctrl *dev, u8 op, unsigned int fid, 1531 unsigned int dword11, void *buffer, size_t buflen, u32 *result) 1532 { 1533 union nvme_result res = { 0 }; 1534 struct nvme_command c = { }; 1535 int ret; 1536 1537 c.features.opcode = op; 1538 c.features.fid = cpu_to_le32(fid); 1539 c.features.dword11 = cpu_to_le32(dword11); 1540 1541 ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &res, 1542 buffer, buflen, NVME_QID_ANY, 0, 0); 1543 if (ret >= 0 && result) 1544 *result = le32_to_cpu(res.u32); 1545 return ret; 1546 } 1547 1548 int nvme_set_features(struct nvme_ctrl *dev, unsigned int fid, 1549 unsigned int dword11, void *buffer, size_t buflen, 1550 u32 *result) 1551 { 1552 return nvme_features(dev, nvme_admin_set_features, fid, dword11, buffer, 1553 buflen, result); 1554 } 1555 EXPORT_SYMBOL_GPL(nvme_set_features); 1556 1557 int nvme_get_features(struct nvme_ctrl *dev, unsigned int fid, 1558 unsigned int dword11, void *buffer, size_t buflen, 1559 u32 *result) 1560 { 1561 return nvme_features(dev, nvme_admin_get_features, fid, dword11, buffer, 1562 buflen, result); 1563 } 1564 EXPORT_SYMBOL_GPL(nvme_get_features); 1565 1566 int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count) 1567 { 1568 u32 q_count = (*count - 1) | ((*count - 1) << 16); 1569 u32 result; 1570 int status, nr_io_queues; 1571 1572 status = nvme_set_features(ctrl, NVME_FEAT_NUM_QUEUES, q_count, NULL, 0, 1573 &result); 1574 if (status < 0) 1575 return status; 1576 1577 /* 1578 * Degraded controllers might return an error when setting the queue 1579 * count. We still want to be able to bring them online and offer 1580 * access to the admin queue, as that might be only way to fix them up. 1581 */ 1582 if (status > 0) { 1583 dev_err(ctrl->device, "Could not set queue count (%d)\n", status); 1584 *count = 0; 1585 } else { 1586 nr_io_queues = min(result & 0xffff, result >> 16) + 1; 1587 *count = min(*count, nr_io_queues); 1588 } 1589 1590 return 0; 1591 } 1592 EXPORT_SYMBOL_GPL(nvme_set_queue_count); 1593 1594 #define NVME_AEN_SUPPORTED \ 1595 (NVME_AEN_CFG_NS_ATTR | NVME_AEN_CFG_FW_ACT | \ 1596 NVME_AEN_CFG_ANA_CHANGE | NVME_AEN_CFG_DISC_CHANGE) 1597 1598 static void nvme_enable_aen(struct nvme_ctrl *ctrl) 1599 { 1600 u32 result, supported_aens = ctrl->oaes & NVME_AEN_SUPPORTED; 1601 int status; 1602 1603 if (!supported_aens) 1604 return; 1605 1606 status = nvme_set_features(ctrl, NVME_FEAT_ASYNC_EVENT, supported_aens, 1607 NULL, 0, &result); 1608 if (status) 1609 dev_warn(ctrl->device, "Failed to configure AEN (cfg %x)\n", 1610 supported_aens); 1611 1612 queue_work(nvme_wq, &ctrl->async_event_work); 1613 } 1614 1615 static int nvme_ns_open(struct nvme_ns *ns) 1616 { 1617 1618 /* should never be called due to GENHD_FL_HIDDEN */ 1619 if (WARN_ON_ONCE(nvme_ns_head_multipath(ns->head))) 1620 goto fail; 1621 if (!nvme_get_ns(ns)) 1622 goto fail; 1623 if (!try_module_get(ns->ctrl->ops->module)) 1624 goto fail_put_ns; 1625 1626 return 0; 1627 1628 fail_put_ns: 1629 nvme_put_ns(ns); 1630 fail: 1631 return -ENXIO; 1632 } 1633 1634 static void nvme_ns_release(struct nvme_ns *ns) 1635 { 1636 1637 module_put(ns->ctrl->ops->module); 1638 nvme_put_ns(ns); 1639 } 1640 1641 static int nvme_open(struct gendisk *disk, blk_mode_t mode) 1642 { 1643 return nvme_ns_open(disk->private_data); 1644 } 1645 1646 static void nvme_release(struct gendisk *disk) 1647 { 1648 nvme_ns_release(disk->private_data); 1649 } 1650 1651 int nvme_getgeo(struct block_device *bdev, struct hd_geometry *geo) 1652 { 1653 /* some standard values */ 1654 geo->heads = 1 << 6; 1655 geo->sectors = 1 << 5; 1656 geo->cylinders = get_capacity(bdev->bd_disk) >> 11; 1657 return 0; 1658 } 1659 1660 #ifdef CONFIG_BLK_DEV_INTEGRITY 1661 static void nvme_init_integrity(struct gendisk *disk, struct nvme_ns *ns, 1662 u32 max_integrity_segments) 1663 { 1664 struct blk_integrity integrity = { }; 1665 1666 switch (ns->pi_type) { 1667 case NVME_NS_DPS_PI_TYPE3: 1668 switch (ns->guard_type) { 1669 case NVME_NVM_NS_16B_GUARD: 1670 integrity.profile = &t10_pi_type3_crc; 1671 integrity.tag_size = sizeof(u16) + sizeof(u32); 1672 integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE; 1673 break; 1674 case NVME_NVM_NS_64B_GUARD: 1675 integrity.profile = &ext_pi_type3_crc64; 1676 integrity.tag_size = sizeof(u16) + 6; 1677 integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE; 1678 break; 1679 default: 1680 integrity.profile = NULL; 1681 break; 1682 } 1683 break; 1684 case NVME_NS_DPS_PI_TYPE1: 1685 case NVME_NS_DPS_PI_TYPE2: 1686 switch (ns->guard_type) { 1687 case NVME_NVM_NS_16B_GUARD: 1688 integrity.profile = &t10_pi_type1_crc; 1689 integrity.tag_size = sizeof(u16); 1690 integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE; 1691 break; 1692 case NVME_NVM_NS_64B_GUARD: 1693 integrity.profile = &ext_pi_type1_crc64; 1694 integrity.tag_size = sizeof(u16); 1695 integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE; 1696 break; 1697 default: 1698 integrity.profile = NULL; 1699 break; 1700 } 1701 break; 1702 default: 1703 integrity.profile = NULL; 1704 break; 1705 } 1706 1707 integrity.tuple_size = ns->ms; 1708 blk_integrity_register(disk, &integrity); 1709 blk_queue_max_integrity_segments(disk->queue, max_integrity_segments); 1710 } 1711 #else 1712 static void nvme_init_integrity(struct gendisk *disk, struct nvme_ns *ns, 1713 u32 max_integrity_segments) 1714 { 1715 } 1716 #endif /* CONFIG_BLK_DEV_INTEGRITY */ 1717 1718 static void nvme_config_discard(struct gendisk *disk, struct nvme_ns *ns) 1719 { 1720 struct nvme_ctrl *ctrl = ns->ctrl; 1721 struct request_queue *queue = disk->queue; 1722 u32 size = queue_logical_block_size(queue); 1723 1724 if (ctrl->dmrsl && ctrl->dmrsl <= nvme_sect_to_lba(ns, UINT_MAX)) 1725 ctrl->max_discard_sectors = nvme_lba_to_sect(ns, ctrl->dmrsl); 1726 1727 if (ctrl->max_discard_sectors == 0) { 1728 blk_queue_max_discard_sectors(queue, 0); 1729 return; 1730 } 1731 1732 BUILD_BUG_ON(PAGE_SIZE / sizeof(struct nvme_dsm_range) < 1733 NVME_DSM_MAX_RANGES); 1734 1735 queue->limits.discard_granularity = size; 1736 1737 /* If discard is already enabled, don't reset queue limits */ 1738 if (queue->limits.max_discard_sectors) 1739 return; 1740 1741 blk_queue_max_discard_sectors(queue, ctrl->max_discard_sectors); 1742 blk_queue_max_discard_segments(queue, ctrl->max_discard_segments); 1743 1744 if (ctrl->quirks & NVME_QUIRK_DEALLOCATE_ZEROES) 1745 blk_queue_max_write_zeroes_sectors(queue, UINT_MAX); 1746 } 1747 1748 static bool nvme_ns_ids_equal(struct nvme_ns_ids *a, struct nvme_ns_ids *b) 1749 { 1750 return uuid_equal(&a->uuid, &b->uuid) && 1751 memcmp(&a->nguid, &b->nguid, sizeof(a->nguid)) == 0 && 1752 memcmp(&a->eui64, &b->eui64, sizeof(a->eui64)) == 0 && 1753 a->csi == b->csi; 1754 } 1755 1756 static int nvme_init_ms(struct nvme_ns *ns, struct nvme_id_ns *id) 1757 { 1758 bool first = id->dps & NVME_NS_DPS_PI_FIRST; 1759 unsigned lbaf = nvme_lbaf_index(id->flbas); 1760 struct nvme_ctrl *ctrl = ns->ctrl; 1761 struct nvme_command c = { }; 1762 struct nvme_id_ns_nvm *nvm; 1763 int ret = 0; 1764 u32 elbaf; 1765 1766 ns->pi_size = 0; 1767 ns->ms = le16_to_cpu(id->lbaf[lbaf].ms); 1768 if (!(ctrl->ctratt & NVME_CTRL_ATTR_ELBAS)) { 1769 ns->pi_size = sizeof(struct t10_pi_tuple); 1770 ns->guard_type = NVME_NVM_NS_16B_GUARD; 1771 goto set_pi; 1772 } 1773 1774 nvm = kzalloc(sizeof(*nvm), GFP_KERNEL); 1775 if (!nvm) 1776 return -ENOMEM; 1777 1778 c.identify.opcode = nvme_admin_identify; 1779 c.identify.nsid = cpu_to_le32(ns->head->ns_id); 1780 c.identify.cns = NVME_ID_CNS_CS_NS; 1781 c.identify.csi = NVME_CSI_NVM; 1782 1783 ret = nvme_submit_sync_cmd(ns->ctrl->admin_q, &c, nvm, sizeof(*nvm)); 1784 if (ret) 1785 goto free_data; 1786 1787 elbaf = le32_to_cpu(nvm->elbaf[lbaf]); 1788 1789 /* no support for storage tag formats right now */ 1790 if (nvme_elbaf_sts(elbaf)) 1791 goto free_data; 1792 1793 ns->guard_type = nvme_elbaf_guard_type(elbaf); 1794 switch (ns->guard_type) { 1795 case NVME_NVM_NS_64B_GUARD: 1796 ns->pi_size = sizeof(struct crc64_pi_tuple); 1797 break; 1798 case NVME_NVM_NS_16B_GUARD: 1799 ns->pi_size = sizeof(struct t10_pi_tuple); 1800 break; 1801 default: 1802 break; 1803 } 1804 1805 free_data: 1806 kfree(nvm); 1807 set_pi: 1808 if (ns->pi_size && (first || ns->ms == ns->pi_size)) 1809 ns->pi_type = id->dps & NVME_NS_DPS_PI_MASK; 1810 else 1811 ns->pi_type = 0; 1812 1813 return ret; 1814 } 1815 1816 static void nvme_configure_metadata(struct nvme_ns *ns, struct nvme_id_ns *id) 1817 { 1818 struct nvme_ctrl *ctrl = ns->ctrl; 1819 1820 if (nvme_init_ms(ns, id)) 1821 return; 1822 1823 ns->features &= ~(NVME_NS_METADATA_SUPPORTED | NVME_NS_EXT_LBAS); 1824 if (!ns->ms || !(ctrl->ops->flags & NVME_F_METADATA_SUPPORTED)) 1825 return; 1826 1827 if (ctrl->ops->flags & NVME_F_FABRICS) { 1828 /* 1829 * The NVMe over Fabrics specification only supports metadata as 1830 * part of the extended data LBA. We rely on HCA/HBA support to 1831 * remap the separate metadata buffer from the block layer. 1832 */ 1833 if (WARN_ON_ONCE(!(id->flbas & NVME_NS_FLBAS_META_EXT))) 1834 return; 1835 1836 ns->features |= NVME_NS_EXT_LBAS; 1837 1838 /* 1839 * The current fabrics transport drivers support namespace 1840 * metadata formats only if nvme_ns_has_pi() returns true. 1841 * Suppress support for all other formats so the namespace will 1842 * have a 0 capacity and not be usable through the block stack. 1843 * 1844 * Note, this check will need to be modified if any drivers 1845 * gain the ability to use other metadata formats. 1846 */ 1847 if (ctrl->max_integrity_segments && nvme_ns_has_pi(ns)) 1848 ns->features |= NVME_NS_METADATA_SUPPORTED; 1849 } else { 1850 /* 1851 * For PCIe controllers, we can't easily remap the separate 1852 * metadata buffer from the block layer and thus require a 1853 * separate metadata buffer for block layer metadata/PI support. 1854 * We allow extended LBAs for the passthrough interface, though. 1855 */ 1856 if (id->flbas & NVME_NS_FLBAS_META_EXT) 1857 ns->features |= NVME_NS_EXT_LBAS; 1858 else 1859 ns->features |= NVME_NS_METADATA_SUPPORTED; 1860 } 1861 } 1862 1863 static void nvme_set_queue_limits(struct nvme_ctrl *ctrl, 1864 struct request_queue *q) 1865 { 1866 bool vwc = ctrl->vwc & NVME_CTRL_VWC_PRESENT; 1867 1868 if (ctrl->max_hw_sectors) { 1869 u32 max_segments = 1870 (ctrl->max_hw_sectors / (NVME_CTRL_PAGE_SIZE >> 9)) + 1; 1871 1872 max_segments = min_not_zero(max_segments, ctrl->max_segments); 1873 blk_queue_max_hw_sectors(q, ctrl->max_hw_sectors); 1874 blk_queue_max_segments(q, min_t(u32, max_segments, USHRT_MAX)); 1875 } 1876 blk_queue_virt_boundary(q, NVME_CTRL_PAGE_SIZE - 1); 1877 blk_queue_dma_alignment(q, 3); 1878 blk_queue_write_cache(q, vwc, vwc); 1879 } 1880 1881 static void nvme_update_disk_info(struct gendisk *disk, 1882 struct nvme_ns *ns, struct nvme_id_ns *id) 1883 { 1884 sector_t capacity = nvme_lba_to_sect(ns, le64_to_cpu(id->nsze)); 1885 u32 bs = 1U << ns->lba_shift; 1886 u32 atomic_bs, phys_bs, io_opt = 0; 1887 1888 /* 1889 * The block layer can't support LBA sizes larger than the page size 1890 * yet, so catch this early and don't allow block I/O. 1891 */ 1892 if (ns->lba_shift > PAGE_SHIFT) { 1893 capacity = 0; 1894 bs = (1 << 9); 1895 } 1896 1897 blk_integrity_unregister(disk); 1898 1899 atomic_bs = phys_bs = bs; 1900 if (id->nabo == 0) { 1901 /* 1902 * Bit 1 indicates whether NAWUPF is defined for this namespace 1903 * and whether it should be used instead of AWUPF. If NAWUPF == 1904 * 0 then AWUPF must be used instead. 1905 */ 1906 if (id->nsfeat & NVME_NS_FEAT_ATOMICS && id->nawupf) 1907 atomic_bs = (1 + le16_to_cpu(id->nawupf)) * bs; 1908 else 1909 atomic_bs = (1 + ns->ctrl->subsys->awupf) * bs; 1910 } 1911 1912 if (id->nsfeat & NVME_NS_FEAT_IO_OPT) { 1913 /* NPWG = Namespace Preferred Write Granularity */ 1914 phys_bs = bs * (1 + le16_to_cpu(id->npwg)); 1915 /* NOWS = Namespace Optimal Write Size */ 1916 io_opt = bs * (1 + le16_to_cpu(id->nows)); 1917 } 1918 1919 blk_queue_logical_block_size(disk->queue, bs); 1920 /* 1921 * Linux filesystems assume writing a single physical block is 1922 * an atomic operation. Hence limit the physical block size to the 1923 * value of the Atomic Write Unit Power Fail parameter. 1924 */ 1925 blk_queue_physical_block_size(disk->queue, min(phys_bs, atomic_bs)); 1926 blk_queue_io_min(disk->queue, phys_bs); 1927 blk_queue_io_opt(disk->queue, io_opt); 1928 1929 /* 1930 * Register a metadata profile for PI, or the plain non-integrity NVMe 1931 * metadata masquerading as Type 0 if supported, otherwise reject block 1932 * I/O to namespaces with metadata except when the namespace supports 1933 * PI, as it can strip/insert in that case. 1934 */ 1935 if (ns->ms) { 1936 if (IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY) && 1937 (ns->features & NVME_NS_METADATA_SUPPORTED)) 1938 nvme_init_integrity(disk, ns, 1939 ns->ctrl->max_integrity_segments); 1940 else if (!nvme_ns_has_pi(ns)) 1941 capacity = 0; 1942 } 1943 1944 set_capacity_and_notify(disk, capacity); 1945 1946 nvme_config_discard(disk, ns); 1947 blk_queue_max_write_zeroes_sectors(disk->queue, 1948 ns->ctrl->max_zeroes_sectors); 1949 } 1950 1951 static bool nvme_ns_is_readonly(struct nvme_ns *ns, struct nvme_ns_info *info) 1952 { 1953 return info->is_readonly || test_bit(NVME_NS_FORCE_RO, &ns->flags); 1954 } 1955 1956 static inline bool nvme_first_scan(struct gendisk *disk) 1957 { 1958 /* nvme_alloc_ns() scans the disk prior to adding it */ 1959 return !disk_live(disk); 1960 } 1961 1962 static void nvme_set_chunk_sectors(struct nvme_ns *ns, struct nvme_id_ns *id) 1963 { 1964 struct nvme_ctrl *ctrl = ns->ctrl; 1965 u32 iob; 1966 1967 if ((ctrl->quirks & NVME_QUIRK_STRIPE_SIZE) && 1968 is_power_of_2(ctrl->max_hw_sectors)) 1969 iob = ctrl->max_hw_sectors; 1970 else 1971 iob = nvme_lba_to_sect(ns, le16_to_cpu(id->noiob)); 1972 1973 if (!iob) 1974 return; 1975 1976 if (!is_power_of_2(iob)) { 1977 if (nvme_first_scan(ns->disk)) 1978 pr_warn("%s: ignoring unaligned IO boundary:%u\n", 1979 ns->disk->disk_name, iob); 1980 return; 1981 } 1982 1983 if (blk_queue_is_zoned(ns->disk->queue)) { 1984 if (nvme_first_scan(ns->disk)) 1985 pr_warn("%s: ignoring zoned namespace IO boundary\n", 1986 ns->disk->disk_name); 1987 return; 1988 } 1989 1990 blk_queue_chunk_sectors(ns->queue, iob); 1991 } 1992 1993 static int nvme_update_ns_info_generic(struct nvme_ns *ns, 1994 struct nvme_ns_info *info) 1995 { 1996 blk_mq_freeze_queue(ns->disk->queue); 1997 nvme_set_queue_limits(ns->ctrl, ns->queue); 1998 set_disk_ro(ns->disk, nvme_ns_is_readonly(ns, info)); 1999 blk_mq_unfreeze_queue(ns->disk->queue); 2000 2001 if (nvme_ns_head_multipath(ns->head)) { 2002 blk_mq_freeze_queue(ns->head->disk->queue); 2003 set_disk_ro(ns->head->disk, nvme_ns_is_readonly(ns, info)); 2004 nvme_mpath_revalidate_paths(ns); 2005 blk_stack_limits(&ns->head->disk->queue->limits, 2006 &ns->queue->limits, 0); 2007 ns->head->disk->flags |= GENHD_FL_HIDDEN; 2008 blk_mq_unfreeze_queue(ns->head->disk->queue); 2009 } 2010 2011 /* Hide the block-interface for these devices */ 2012 ns->disk->flags |= GENHD_FL_HIDDEN; 2013 set_bit(NVME_NS_READY, &ns->flags); 2014 2015 return 0; 2016 } 2017 2018 static int nvme_update_ns_info_block(struct nvme_ns *ns, 2019 struct nvme_ns_info *info) 2020 { 2021 struct nvme_id_ns *id; 2022 unsigned lbaf; 2023 int ret; 2024 2025 ret = nvme_identify_ns(ns->ctrl, info->nsid, &id); 2026 if (ret) 2027 return ret; 2028 2029 blk_mq_freeze_queue(ns->disk->queue); 2030 lbaf = nvme_lbaf_index(id->flbas); 2031 ns->lba_shift = id->lbaf[lbaf].ds; 2032 nvme_set_queue_limits(ns->ctrl, ns->queue); 2033 2034 nvme_configure_metadata(ns, id); 2035 nvme_set_chunk_sectors(ns, id); 2036 nvme_update_disk_info(ns->disk, ns, id); 2037 2038 if (ns->head->ids.csi == NVME_CSI_ZNS) { 2039 ret = nvme_update_zone_info(ns, lbaf); 2040 if (ret) { 2041 blk_mq_unfreeze_queue(ns->disk->queue); 2042 goto out; 2043 } 2044 } 2045 2046 /* 2047 * Only set the DEAC bit if the device guarantees that reads from 2048 * deallocated data return zeroes. While the DEAC bit does not 2049 * require that, it must be a no-op if reads from deallocated data 2050 * do not return zeroes. 2051 */ 2052 if ((id->dlfeat & 0x7) == 0x1 && (id->dlfeat & (1 << 3))) 2053 ns->features |= NVME_NS_DEAC; 2054 set_disk_ro(ns->disk, nvme_ns_is_readonly(ns, info)); 2055 set_bit(NVME_NS_READY, &ns->flags); 2056 blk_mq_unfreeze_queue(ns->disk->queue); 2057 2058 if (blk_queue_is_zoned(ns->queue)) { 2059 ret = nvme_revalidate_zones(ns); 2060 if (ret && !nvme_first_scan(ns->disk)) 2061 goto out; 2062 } 2063 2064 if (nvme_ns_head_multipath(ns->head)) { 2065 blk_mq_freeze_queue(ns->head->disk->queue); 2066 nvme_update_disk_info(ns->head->disk, ns, id); 2067 set_disk_ro(ns->head->disk, nvme_ns_is_readonly(ns, info)); 2068 nvme_mpath_revalidate_paths(ns); 2069 blk_stack_limits(&ns->head->disk->queue->limits, 2070 &ns->queue->limits, 0); 2071 disk_update_readahead(ns->head->disk); 2072 blk_mq_unfreeze_queue(ns->head->disk->queue); 2073 } 2074 2075 ret = 0; 2076 out: 2077 /* 2078 * If probing fails due an unsupported feature, hide the block device, 2079 * but still allow other access. 2080 */ 2081 if (ret == -ENODEV) { 2082 ns->disk->flags |= GENHD_FL_HIDDEN; 2083 set_bit(NVME_NS_READY, &ns->flags); 2084 ret = 0; 2085 } 2086 kfree(id); 2087 return ret; 2088 } 2089 2090 static int nvme_update_ns_info(struct nvme_ns *ns, struct nvme_ns_info *info) 2091 { 2092 switch (info->ids.csi) { 2093 case NVME_CSI_ZNS: 2094 if (!IS_ENABLED(CONFIG_BLK_DEV_ZONED)) { 2095 dev_info(ns->ctrl->device, 2096 "block device for nsid %u not supported without CONFIG_BLK_DEV_ZONED\n", 2097 info->nsid); 2098 return nvme_update_ns_info_generic(ns, info); 2099 } 2100 return nvme_update_ns_info_block(ns, info); 2101 case NVME_CSI_NVM: 2102 return nvme_update_ns_info_block(ns, info); 2103 default: 2104 dev_info(ns->ctrl->device, 2105 "block device for nsid %u not supported (csi %u)\n", 2106 info->nsid, info->ids.csi); 2107 return nvme_update_ns_info_generic(ns, info); 2108 } 2109 } 2110 2111 #ifdef CONFIG_BLK_SED_OPAL 2112 static int nvme_sec_submit(void *data, u16 spsp, u8 secp, void *buffer, size_t len, 2113 bool send) 2114 { 2115 struct nvme_ctrl *ctrl = data; 2116 struct nvme_command cmd = { }; 2117 2118 if (send) 2119 cmd.common.opcode = nvme_admin_security_send; 2120 else 2121 cmd.common.opcode = nvme_admin_security_recv; 2122 cmd.common.nsid = 0; 2123 cmd.common.cdw10 = cpu_to_le32(((u32)secp) << 24 | ((u32)spsp) << 8); 2124 cmd.common.cdw11 = cpu_to_le32(len); 2125 2126 return __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, NULL, buffer, len, 2127 NVME_QID_ANY, 1, 0); 2128 } 2129 2130 static void nvme_configure_opal(struct nvme_ctrl *ctrl, bool was_suspended) 2131 { 2132 if (ctrl->oacs & NVME_CTRL_OACS_SEC_SUPP) { 2133 if (!ctrl->opal_dev) 2134 ctrl->opal_dev = init_opal_dev(ctrl, &nvme_sec_submit); 2135 else if (was_suspended) 2136 opal_unlock_from_suspend(ctrl->opal_dev); 2137 } else { 2138 free_opal_dev(ctrl->opal_dev); 2139 ctrl->opal_dev = NULL; 2140 } 2141 } 2142 #else 2143 static void nvme_configure_opal(struct nvme_ctrl *ctrl, bool was_suspended) 2144 { 2145 } 2146 #endif /* CONFIG_BLK_SED_OPAL */ 2147 2148 #ifdef CONFIG_BLK_DEV_ZONED 2149 static int nvme_report_zones(struct gendisk *disk, sector_t sector, 2150 unsigned int nr_zones, report_zones_cb cb, void *data) 2151 { 2152 return nvme_ns_report_zones(disk->private_data, sector, nr_zones, cb, 2153 data); 2154 } 2155 #else 2156 #define nvme_report_zones NULL 2157 #endif /* CONFIG_BLK_DEV_ZONED */ 2158 2159 const struct block_device_operations nvme_bdev_ops = { 2160 .owner = THIS_MODULE, 2161 .ioctl = nvme_ioctl, 2162 .compat_ioctl = blkdev_compat_ptr_ioctl, 2163 .open = nvme_open, 2164 .release = nvme_release, 2165 .getgeo = nvme_getgeo, 2166 .report_zones = nvme_report_zones, 2167 .pr_ops = &nvme_pr_ops, 2168 }; 2169 2170 static int nvme_wait_ready(struct nvme_ctrl *ctrl, u32 mask, u32 val, 2171 u32 timeout, const char *op) 2172 { 2173 unsigned long timeout_jiffies = jiffies + timeout * HZ; 2174 u32 csts; 2175 int ret; 2176 2177 while ((ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &csts)) == 0) { 2178 if (csts == ~0) 2179 return -ENODEV; 2180 if ((csts & mask) == val) 2181 break; 2182 2183 usleep_range(1000, 2000); 2184 if (fatal_signal_pending(current)) 2185 return -EINTR; 2186 if (time_after(jiffies, timeout_jiffies)) { 2187 dev_err(ctrl->device, 2188 "Device not ready; aborting %s, CSTS=0x%x\n", 2189 op, csts); 2190 return -ENODEV; 2191 } 2192 } 2193 2194 return ret; 2195 } 2196 2197 int nvme_disable_ctrl(struct nvme_ctrl *ctrl, bool shutdown) 2198 { 2199 int ret; 2200 2201 ctrl->ctrl_config &= ~NVME_CC_SHN_MASK; 2202 if (shutdown) 2203 ctrl->ctrl_config |= NVME_CC_SHN_NORMAL; 2204 else 2205 ctrl->ctrl_config &= ~NVME_CC_ENABLE; 2206 2207 ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config); 2208 if (ret) 2209 return ret; 2210 2211 if (shutdown) { 2212 return nvme_wait_ready(ctrl, NVME_CSTS_SHST_MASK, 2213 NVME_CSTS_SHST_CMPLT, 2214 ctrl->shutdown_timeout, "shutdown"); 2215 } 2216 if (ctrl->quirks & NVME_QUIRK_DELAY_BEFORE_CHK_RDY) 2217 msleep(NVME_QUIRK_DELAY_AMOUNT); 2218 return nvme_wait_ready(ctrl, NVME_CSTS_RDY, 0, 2219 (NVME_CAP_TIMEOUT(ctrl->cap) + 1) / 2, "reset"); 2220 } 2221 EXPORT_SYMBOL_GPL(nvme_disable_ctrl); 2222 2223 int nvme_enable_ctrl(struct nvme_ctrl *ctrl) 2224 { 2225 unsigned dev_page_min; 2226 u32 timeout; 2227 int ret; 2228 2229 ret = ctrl->ops->reg_read64(ctrl, NVME_REG_CAP, &ctrl->cap); 2230 if (ret) { 2231 dev_err(ctrl->device, "Reading CAP failed (%d)\n", ret); 2232 return ret; 2233 } 2234 dev_page_min = NVME_CAP_MPSMIN(ctrl->cap) + 12; 2235 2236 if (NVME_CTRL_PAGE_SHIFT < dev_page_min) { 2237 dev_err(ctrl->device, 2238 "Minimum device page size %u too large for host (%u)\n", 2239 1 << dev_page_min, 1 << NVME_CTRL_PAGE_SHIFT); 2240 return -ENODEV; 2241 } 2242 2243 if (NVME_CAP_CSS(ctrl->cap) & NVME_CAP_CSS_CSI) 2244 ctrl->ctrl_config = NVME_CC_CSS_CSI; 2245 else 2246 ctrl->ctrl_config = NVME_CC_CSS_NVM; 2247 2248 if (ctrl->cap & NVME_CAP_CRMS_CRWMS && ctrl->cap & NVME_CAP_CRMS_CRIMS) 2249 ctrl->ctrl_config |= NVME_CC_CRIME; 2250 2251 ctrl->ctrl_config |= (NVME_CTRL_PAGE_SHIFT - 12) << NVME_CC_MPS_SHIFT; 2252 ctrl->ctrl_config |= NVME_CC_AMS_RR | NVME_CC_SHN_NONE; 2253 ctrl->ctrl_config |= NVME_CC_IOSQES | NVME_CC_IOCQES; 2254 ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config); 2255 if (ret) 2256 return ret; 2257 2258 /* Flush write to device (required if transport is PCI) */ 2259 ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CC, &ctrl->ctrl_config); 2260 if (ret) 2261 return ret; 2262 2263 /* CAP value may change after initial CC write */ 2264 ret = ctrl->ops->reg_read64(ctrl, NVME_REG_CAP, &ctrl->cap); 2265 if (ret) 2266 return ret; 2267 2268 timeout = NVME_CAP_TIMEOUT(ctrl->cap); 2269 if (ctrl->cap & NVME_CAP_CRMS_CRWMS) { 2270 u32 crto, ready_timeout; 2271 2272 ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CRTO, &crto); 2273 if (ret) { 2274 dev_err(ctrl->device, "Reading CRTO failed (%d)\n", 2275 ret); 2276 return ret; 2277 } 2278 2279 /* 2280 * CRTO should always be greater or equal to CAP.TO, but some 2281 * devices are known to get this wrong. Use the larger of the 2282 * two values. 2283 */ 2284 if (ctrl->ctrl_config & NVME_CC_CRIME) 2285 ready_timeout = NVME_CRTO_CRIMT(crto); 2286 else 2287 ready_timeout = NVME_CRTO_CRWMT(crto); 2288 2289 if (ready_timeout < timeout) 2290 dev_warn_once(ctrl->device, "bad crto:%x cap:%llx\n", 2291 crto, ctrl->cap); 2292 else 2293 timeout = ready_timeout; 2294 } 2295 2296 ctrl->ctrl_config |= NVME_CC_ENABLE; 2297 ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config); 2298 if (ret) 2299 return ret; 2300 return nvme_wait_ready(ctrl, NVME_CSTS_RDY, NVME_CSTS_RDY, 2301 (timeout + 1) / 2, "initialisation"); 2302 } 2303 EXPORT_SYMBOL_GPL(nvme_enable_ctrl); 2304 2305 static int nvme_configure_timestamp(struct nvme_ctrl *ctrl) 2306 { 2307 __le64 ts; 2308 int ret; 2309 2310 if (!(ctrl->oncs & NVME_CTRL_ONCS_TIMESTAMP)) 2311 return 0; 2312 2313 ts = cpu_to_le64(ktime_to_ms(ktime_get_real())); 2314 ret = nvme_set_features(ctrl, NVME_FEAT_TIMESTAMP, 0, &ts, sizeof(ts), 2315 NULL); 2316 if (ret) 2317 dev_warn_once(ctrl->device, 2318 "could not set timestamp (%d)\n", ret); 2319 return ret; 2320 } 2321 2322 static int nvme_configure_host_options(struct nvme_ctrl *ctrl) 2323 { 2324 struct nvme_feat_host_behavior *host; 2325 u8 acre = 0, lbafee = 0; 2326 int ret; 2327 2328 /* Don't bother enabling the feature if retry delay is not reported */ 2329 if (ctrl->crdt[0]) 2330 acre = NVME_ENABLE_ACRE; 2331 if (ctrl->ctratt & NVME_CTRL_ATTR_ELBAS) 2332 lbafee = NVME_ENABLE_LBAFEE; 2333 2334 if (!acre && !lbafee) 2335 return 0; 2336 2337 host = kzalloc(sizeof(*host), GFP_KERNEL); 2338 if (!host) 2339 return 0; 2340 2341 host->acre = acre; 2342 host->lbafee = lbafee; 2343 ret = nvme_set_features(ctrl, NVME_FEAT_HOST_BEHAVIOR, 0, 2344 host, sizeof(*host), NULL); 2345 kfree(host); 2346 return ret; 2347 } 2348 2349 /* 2350 * The function checks whether the given total (exlat + enlat) latency of 2351 * a power state allows the latter to be used as an APST transition target. 2352 * It does so by comparing the latency to the primary and secondary latency 2353 * tolerances defined by module params. If there's a match, the corresponding 2354 * timeout value is returned and the matching tolerance index (1 or 2) is 2355 * reported. 2356 */ 2357 static bool nvme_apst_get_transition_time(u64 total_latency, 2358 u64 *transition_time, unsigned *last_index) 2359 { 2360 if (total_latency <= apst_primary_latency_tol_us) { 2361 if (*last_index == 1) 2362 return false; 2363 *last_index = 1; 2364 *transition_time = apst_primary_timeout_ms; 2365 return true; 2366 } 2367 if (apst_secondary_timeout_ms && 2368 total_latency <= apst_secondary_latency_tol_us) { 2369 if (*last_index <= 2) 2370 return false; 2371 *last_index = 2; 2372 *transition_time = apst_secondary_timeout_ms; 2373 return true; 2374 } 2375 return false; 2376 } 2377 2378 /* 2379 * APST (Autonomous Power State Transition) lets us program a table of power 2380 * state transitions that the controller will perform automatically. 2381 * 2382 * Depending on module params, one of the two supported techniques will be used: 2383 * 2384 * - If the parameters provide explicit timeouts and tolerances, they will be 2385 * used to build a table with up to 2 non-operational states to transition to. 2386 * The default parameter values were selected based on the values used by 2387 * Microsoft's and Intel's NVMe drivers. Yet, since we don't implement dynamic 2388 * regeneration of the APST table in the event of switching between external 2389 * and battery power, the timeouts and tolerances reflect a compromise 2390 * between values used by Microsoft for AC and battery scenarios. 2391 * - If not, we'll configure the table with a simple heuristic: we are willing 2392 * to spend at most 2% of the time transitioning between power states. 2393 * Therefore, when running in any given state, we will enter the next 2394 * lower-power non-operational state after waiting 50 * (enlat + exlat) 2395 * microseconds, as long as that state's exit latency is under the requested 2396 * maximum latency. 2397 * 2398 * We will not autonomously enter any non-operational state for which the total 2399 * latency exceeds ps_max_latency_us. 2400 * 2401 * Users can set ps_max_latency_us to zero to turn off APST. 2402 */ 2403 static int nvme_configure_apst(struct nvme_ctrl *ctrl) 2404 { 2405 struct nvme_feat_auto_pst *table; 2406 unsigned apste = 0; 2407 u64 max_lat_us = 0; 2408 __le64 target = 0; 2409 int max_ps = -1; 2410 int state; 2411 int ret; 2412 unsigned last_lt_index = UINT_MAX; 2413 2414 /* 2415 * If APST isn't supported or if we haven't been initialized yet, 2416 * then don't do anything. 2417 */ 2418 if (!ctrl->apsta) 2419 return 0; 2420 2421 if (ctrl->npss > 31) { 2422 dev_warn(ctrl->device, "NPSS is invalid; not using APST\n"); 2423 return 0; 2424 } 2425 2426 table = kzalloc(sizeof(*table), GFP_KERNEL); 2427 if (!table) 2428 return 0; 2429 2430 if (!ctrl->apst_enabled || ctrl->ps_max_latency_us == 0) { 2431 /* Turn off APST. */ 2432 dev_dbg(ctrl->device, "APST disabled\n"); 2433 goto done; 2434 } 2435 2436 /* 2437 * Walk through all states from lowest- to highest-power. 2438 * According to the spec, lower-numbered states use more power. NPSS, 2439 * despite the name, is the index of the lowest-power state, not the 2440 * number of states. 2441 */ 2442 for (state = (int)ctrl->npss; state >= 0; state--) { 2443 u64 total_latency_us, exit_latency_us, transition_ms; 2444 2445 if (target) 2446 table->entries[state] = target; 2447 2448 /* 2449 * Don't allow transitions to the deepest state if it's quirked 2450 * off. 2451 */ 2452 if (state == ctrl->npss && 2453 (ctrl->quirks & NVME_QUIRK_NO_DEEPEST_PS)) 2454 continue; 2455 2456 /* 2457 * Is this state a useful non-operational state for higher-power 2458 * states to autonomously transition to? 2459 */ 2460 if (!(ctrl->psd[state].flags & NVME_PS_FLAGS_NON_OP_STATE)) 2461 continue; 2462 2463 exit_latency_us = (u64)le32_to_cpu(ctrl->psd[state].exit_lat); 2464 if (exit_latency_us > ctrl->ps_max_latency_us) 2465 continue; 2466 2467 total_latency_us = exit_latency_us + 2468 le32_to_cpu(ctrl->psd[state].entry_lat); 2469 2470 /* 2471 * This state is good. It can be used as the APST idle target 2472 * for higher power states. 2473 */ 2474 if (apst_primary_timeout_ms && apst_primary_latency_tol_us) { 2475 if (!nvme_apst_get_transition_time(total_latency_us, 2476 &transition_ms, &last_lt_index)) 2477 continue; 2478 } else { 2479 transition_ms = total_latency_us + 19; 2480 do_div(transition_ms, 20); 2481 if (transition_ms > (1 << 24) - 1) 2482 transition_ms = (1 << 24) - 1; 2483 } 2484 2485 target = cpu_to_le64((state << 3) | (transition_ms << 8)); 2486 if (max_ps == -1) 2487 max_ps = state; 2488 if (total_latency_us > max_lat_us) 2489 max_lat_us = total_latency_us; 2490 } 2491 2492 if (max_ps == -1) 2493 dev_dbg(ctrl->device, "APST enabled but no non-operational states are available\n"); 2494 else 2495 dev_dbg(ctrl->device, "APST enabled: max PS = %d, max round-trip latency = %lluus, table = %*phN\n", 2496 max_ps, max_lat_us, (int)sizeof(*table), table); 2497 apste = 1; 2498 2499 done: 2500 ret = nvme_set_features(ctrl, NVME_FEAT_AUTO_PST, apste, 2501 table, sizeof(*table), NULL); 2502 if (ret) 2503 dev_err(ctrl->device, "failed to set APST feature (%d)\n", ret); 2504 kfree(table); 2505 return ret; 2506 } 2507 2508 static void nvme_set_latency_tolerance(struct device *dev, s32 val) 2509 { 2510 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 2511 u64 latency; 2512 2513 switch (val) { 2514 case PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT: 2515 case PM_QOS_LATENCY_ANY: 2516 latency = U64_MAX; 2517 break; 2518 2519 default: 2520 latency = val; 2521 } 2522 2523 if (ctrl->ps_max_latency_us != latency) { 2524 ctrl->ps_max_latency_us = latency; 2525 if (ctrl->state == NVME_CTRL_LIVE) 2526 nvme_configure_apst(ctrl); 2527 } 2528 } 2529 2530 struct nvme_core_quirk_entry { 2531 /* 2532 * NVMe model and firmware strings are padded with spaces. For 2533 * simplicity, strings in the quirk table are padded with NULLs 2534 * instead. 2535 */ 2536 u16 vid; 2537 const char *mn; 2538 const char *fr; 2539 unsigned long quirks; 2540 }; 2541 2542 static const struct nvme_core_quirk_entry core_quirks[] = { 2543 { 2544 /* 2545 * This Toshiba device seems to die using any APST states. See: 2546 * https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1678184/comments/11 2547 */ 2548 .vid = 0x1179, 2549 .mn = "THNSF5256GPUK TOSHIBA", 2550 .quirks = NVME_QUIRK_NO_APST, 2551 }, 2552 { 2553 /* 2554 * This LiteON CL1-3D*-Q11 firmware version has a race 2555 * condition associated with actions related to suspend to idle 2556 * LiteON has resolved the problem in future firmware 2557 */ 2558 .vid = 0x14a4, 2559 .fr = "22301111", 2560 .quirks = NVME_QUIRK_SIMPLE_SUSPEND, 2561 }, 2562 { 2563 /* 2564 * This Kioxia CD6-V Series / HPE PE8030 device times out and 2565 * aborts I/O during any load, but more easily reproducible 2566 * with discards (fstrim). 2567 * 2568 * The device is left in a state where it is also not possible 2569 * to use "nvme set-feature" to disable APST, but booting with 2570 * nvme_core.default_ps_max_latency=0 works. 2571 */ 2572 .vid = 0x1e0f, 2573 .mn = "KCD6XVUL6T40", 2574 .quirks = NVME_QUIRK_NO_APST, 2575 }, 2576 { 2577 /* 2578 * The external Samsung X5 SSD fails initialization without a 2579 * delay before checking if it is ready and has a whole set of 2580 * other problems. To make this even more interesting, it 2581 * shares the PCI ID with internal Samsung 970 Evo Plus that 2582 * does not need or want these quirks. 2583 */ 2584 .vid = 0x144d, 2585 .mn = "Samsung Portable SSD X5", 2586 .quirks = NVME_QUIRK_DELAY_BEFORE_CHK_RDY | 2587 NVME_QUIRK_NO_DEEPEST_PS | 2588 NVME_QUIRK_IGNORE_DEV_SUBNQN, 2589 } 2590 }; 2591 2592 /* match is null-terminated but idstr is space-padded. */ 2593 static bool string_matches(const char *idstr, const char *match, size_t len) 2594 { 2595 size_t matchlen; 2596 2597 if (!match) 2598 return true; 2599 2600 matchlen = strlen(match); 2601 WARN_ON_ONCE(matchlen > len); 2602 2603 if (memcmp(idstr, match, matchlen)) 2604 return false; 2605 2606 for (; matchlen < len; matchlen++) 2607 if (idstr[matchlen] != ' ') 2608 return false; 2609 2610 return true; 2611 } 2612 2613 static bool quirk_matches(const struct nvme_id_ctrl *id, 2614 const struct nvme_core_quirk_entry *q) 2615 { 2616 return q->vid == le16_to_cpu(id->vid) && 2617 string_matches(id->mn, q->mn, sizeof(id->mn)) && 2618 string_matches(id->fr, q->fr, sizeof(id->fr)); 2619 } 2620 2621 static void nvme_init_subnqn(struct nvme_subsystem *subsys, struct nvme_ctrl *ctrl, 2622 struct nvme_id_ctrl *id) 2623 { 2624 size_t nqnlen; 2625 int off; 2626 2627 if(!(ctrl->quirks & NVME_QUIRK_IGNORE_DEV_SUBNQN)) { 2628 nqnlen = strnlen(id->subnqn, NVMF_NQN_SIZE); 2629 if (nqnlen > 0 && nqnlen < NVMF_NQN_SIZE) { 2630 strscpy(subsys->subnqn, id->subnqn, NVMF_NQN_SIZE); 2631 return; 2632 } 2633 2634 if (ctrl->vs >= NVME_VS(1, 2, 1)) 2635 dev_warn(ctrl->device, "missing or invalid SUBNQN field.\n"); 2636 } 2637 2638 /* 2639 * Generate a "fake" NQN similar to the one in Section 4.5 of the NVMe 2640 * Base Specification 2.0. It is slightly different from the format 2641 * specified there due to historic reasons, and we can't change it now. 2642 */ 2643 off = snprintf(subsys->subnqn, NVMF_NQN_SIZE, 2644 "nqn.2014.08.org.nvmexpress:%04x%04x", 2645 le16_to_cpu(id->vid), le16_to_cpu(id->ssvid)); 2646 memcpy(subsys->subnqn + off, id->sn, sizeof(id->sn)); 2647 off += sizeof(id->sn); 2648 memcpy(subsys->subnqn + off, id->mn, sizeof(id->mn)); 2649 off += sizeof(id->mn); 2650 memset(subsys->subnqn + off, 0, sizeof(subsys->subnqn) - off); 2651 } 2652 2653 static void nvme_release_subsystem(struct device *dev) 2654 { 2655 struct nvme_subsystem *subsys = 2656 container_of(dev, struct nvme_subsystem, dev); 2657 2658 if (subsys->instance >= 0) 2659 ida_free(&nvme_instance_ida, subsys->instance); 2660 kfree(subsys); 2661 } 2662 2663 static void nvme_destroy_subsystem(struct kref *ref) 2664 { 2665 struct nvme_subsystem *subsys = 2666 container_of(ref, struct nvme_subsystem, ref); 2667 2668 mutex_lock(&nvme_subsystems_lock); 2669 list_del(&subsys->entry); 2670 mutex_unlock(&nvme_subsystems_lock); 2671 2672 ida_destroy(&subsys->ns_ida); 2673 device_del(&subsys->dev); 2674 put_device(&subsys->dev); 2675 } 2676 2677 static void nvme_put_subsystem(struct nvme_subsystem *subsys) 2678 { 2679 kref_put(&subsys->ref, nvme_destroy_subsystem); 2680 } 2681 2682 static struct nvme_subsystem *__nvme_find_get_subsystem(const char *subsysnqn) 2683 { 2684 struct nvme_subsystem *subsys; 2685 2686 lockdep_assert_held(&nvme_subsystems_lock); 2687 2688 /* 2689 * Fail matches for discovery subsystems. This results 2690 * in each discovery controller bound to a unique subsystem. 2691 * This avoids issues with validating controller values 2692 * that can only be true when there is a single unique subsystem. 2693 * There may be multiple and completely independent entities 2694 * that provide discovery controllers. 2695 */ 2696 if (!strcmp(subsysnqn, NVME_DISC_SUBSYS_NAME)) 2697 return NULL; 2698 2699 list_for_each_entry(subsys, &nvme_subsystems, entry) { 2700 if (strcmp(subsys->subnqn, subsysnqn)) 2701 continue; 2702 if (!kref_get_unless_zero(&subsys->ref)) 2703 continue; 2704 return subsys; 2705 } 2706 2707 return NULL; 2708 } 2709 2710 static inline bool nvme_discovery_ctrl(struct nvme_ctrl *ctrl) 2711 { 2712 return ctrl->opts && ctrl->opts->discovery_nqn; 2713 } 2714 2715 static bool nvme_validate_cntlid(struct nvme_subsystem *subsys, 2716 struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id) 2717 { 2718 struct nvme_ctrl *tmp; 2719 2720 lockdep_assert_held(&nvme_subsystems_lock); 2721 2722 list_for_each_entry(tmp, &subsys->ctrls, subsys_entry) { 2723 if (nvme_state_terminal(tmp)) 2724 continue; 2725 2726 if (tmp->cntlid == ctrl->cntlid) { 2727 dev_err(ctrl->device, 2728 "Duplicate cntlid %u with %s, subsys %s, rejecting\n", 2729 ctrl->cntlid, dev_name(tmp->device), 2730 subsys->subnqn); 2731 return false; 2732 } 2733 2734 if ((id->cmic & NVME_CTRL_CMIC_MULTI_CTRL) || 2735 nvme_discovery_ctrl(ctrl)) 2736 continue; 2737 2738 dev_err(ctrl->device, 2739 "Subsystem does not support multiple controllers\n"); 2740 return false; 2741 } 2742 2743 return true; 2744 } 2745 2746 static int nvme_init_subsystem(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id) 2747 { 2748 struct nvme_subsystem *subsys, *found; 2749 int ret; 2750 2751 subsys = kzalloc(sizeof(*subsys), GFP_KERNEL); 2752 if (!subsys) 2753 return -ENOMEM; 2754 2755 subsys->instance = -1; 2756 mutex_init(&subsys->lock); 2757 kref_init(&subsys->ref); 2758 INIT_LIST_HEAD(&subsys->ctrls); 2759 INIT_LIST_HEAD(&subsys->nsheads); 2760 nvme_init_subnqn(subsys, ctrl, id); 2761 memcpy(subsys->serial, id->sn, sizeof(subsys->serial)); 2762 memcpy(subsys->model, id->mn, sizeof(subsys->model)); 2763 subsys->vendor_id = le16_to_cpu(id->vid); 2764 subsys->cmic = id->cmic; 2765 2766 /* Versions prior to 1.4 don't necessarily report a valid type */ 2767 if (id->cntrltype == NVME_CTRL_DISC || 2768 !strcmp(subsys->subnqn, NVME_DISC_SUBSYS_NAME)) 2769 subsys->subtype = NVME_NQN_DISC; 2770 else 2771 subsys->subtype = NVME_NQN_NVME; 2772 2773 if (nvme_discovery_ctrl(ctrl) && subsys->subtype != NVME_NQN_DISC) { 2774 dev_err(ctrl->device, 2775 "Subsystem %s is not a discovery controller", 2776 subsys->subnqn); 2777 kfree(subsys); 2778 return -EINVAL; 2779 } 2780 subsys->awupf = le16_to_cpu(id->awupf); 2781 nvme_mpath_default_iopolicy(subsys); 2782 2783 subsys->dev.class = nvme_subsys_class; 2784 subsys->dev.release = nvme_release_subsystem; 2785 subsys->dev.groups = nvme_subsys_attrs_groups; 2786 dev_set_name(&subsys->dev, "nvme-subsys%d", ctrl->instance); 2787 device_initialize(&subsys->dev); 2788 2789 mutex_lock(&nvme_subsystems_lock); 2790 found = __nvme_find_get_subsystem(subsys->subnqn); 2791 if (found) { 2792 put_device(&subsys->dev); 2793 subsys = found; 2794 2795 if (!nvme_validate_cntlid(subsys, ctrl, id)) { 2796 ret = -EINVAL; 2797 goto out_put_subsystem; 2798 } 2799 } else { 2800 ret = device_add(&subsys->dev); 2801 if (ret) { 2802 dev_err(ctrl->device, 2803 "failed to register subsystem device.\n"); 2804 put_device(&subsys->dev); 2805 goto out_unlock; 2806 } 2807 ida_init(&subsys->ns_ida); 2808 list_add_tail(&subsys->entry, &nvme_subsystems); 2809 } 2810 2811 ret = sysfs_create_link(&subsys->dev.kobj, &ctrl->device->kobj, 2812 dev_name(ctrl->device)); 2813 if (ret) { 2814 dev_err(ctrl->device, 2815 "failed to create sysfs link from subsystem.\n"); 2816 goto out_put_subsystem; 2817 } 2818 2819 if (!found) 2820 subsys->instance = ctrl->instance; 2821 ctrl->subsys = subsys; 2822 list_add_tail(&ctrl->subsys_entry, &subsys->ctrls); 2823 mutex_unlock(&nvme_subsystems_lock); 2824 return 0; 2825 2826 out_put_subsystem: 2827 nvme_put_subsystem(subsys); 2828 out_unlock: 2829 mutex_unlock(&nvme_subsystems_lock); 2830 return ret; 2831 } 2832 2833 int nvme_get_log(struct nvme_ctrl *ctrl, u32 nsid, u8 log_page, u8 lsp, u8 csi, 2834 void *log, size_t size, u64 offset) 2835 { 2836 struct nvme_command c = { }; 2837 u32 dwlen = nvme_bytes_to_numd(size); 2838 2839 c.get_log_page.opcode = nvme_admin_get_log_page; 2840 c.get_log_page.nsid = cpu_to_le32(nsid); 2841 c.get_log_page.lid = log_page; 2842 c.get_log_page.lsp = lsp; 2843 c.get_log_page.numdl = cpu_to_le16(dwlen & ((1 << 16) - 1)); 2844 c.get_log_page.numdu = cpu_to_le16(dwlen >> 16); 2845 c.get_log_page.lpol = cpu_to_le32(lower_32_bits(offset)); 2846 c.get_log_page.lpou = cpu_to_le32(upper_32_bits(offset)); 2847 c.get_log_page.csi = csi; 2848 2849 return nvme_submit_sync_cmd(ctrl->admin_q, &c, log, size); 2850 } 2851 2852 static int nvme_get_effects_log(struct nvme_ctrl *ctrl, u8 csi, 2853 struct nvme_effects_log **log) 2854 { 2855 struct nvme_effects_log *cel = xa_load(&ctrl->cels, csi); 2856 int ret; 2857 2858 if (cel) 2859 goto out; 2860 2861 cel = kzalloc(sizeof(*cel), GFP_KERNEL); 2862 if (!cel) 2863 return -ENOMEM; 2864 2865 ret = nvme_get_log(ctrl, 0x00, NVME_LOG_CMD_EFFECTS, 0, csi, 2866 cel, sizeof(*cel), 0); 2867 if (ret) { 2868 kfree(cel); 2869 return ret; 2870 } 2871 2872 xa_store(&ctrl->cels, csi, cel, GFP_KERNEL); 2873 out: 2874 *log = cel; 2875 return 0; 2876 } 2877 2878 static inline u32 nvme_mps_to_sectors(struct nvme_ctrl *ctrl, u32 units) 2879 { 2880 u32 page_shift = NVME_CAP_MPSMIN(ctrl->cap) + 12, val; 2881 2882 if (check_shl_overflow(1U, units + page_shift - 9, &val)) 2883 return UINT_MAX; 2884 return val; 2885 } 2886 2887 static int nvme_init_non_mdts_limits(struct nvme_ctrl *ctrl) 2888 { 2889 struct nvme_command c = { }; 2890 struct nvme_id_ctrl_nvm *id; 2891 int ret; 2892 2893 if (ctrl->oncs & NVME_CTRL_ONCS_DSM) { 2894 ctrl->max_discard_sectors = UINT_MAX; 2895 ctrl->max_discard_segments = NVME_DSM_MAX_RANGES; 2896 } else { 2897 ctrl->max_discard_sectors = 0; 2898 ctrl->max_discard_segments = 0; 2899 } 2900 2901 /* 2902 * Even though NVMe spec explicitly states that MDTS is not applicable 2903 * to the write-zeroes, we are cautious and limit the size to the 2904 * controllers max_hw_sectors value, which is based on the MDTS field 2905 * and possibly other limiting factors. 2906 */ 2907 if ((ctrl->oncs & NVME_CTRL_ONCS_WRITE_ZEROES) && 2908 !(ctrl->quirks & NVME_QUIRK_DISABLE_WRITE_ZEROES)) 2909 ctrl->max_zeroes_sectors = ctrl->max_hw_sectors; 2910 else 2911 ctrl->max_zeroes_sectors = 0; 2912 2913 if (ctrl->subsys->subtype != NVME_NQN_NVME || 2914 nvme_ctrl_limited_cns(ctrl) || 2915 test_bit(NVME_CTRL_SKIP_ID_CNS_CS, &ctrl->flags)) 2916 return 0; 2917 2918 id = kzalloc(sizeof(*id), GFP_KERNEL); 2919 if (!id) 2920 return -ENOMEM; 2921 2922 c.identify.opcode = nvme_admin_identify; 2923 c.identify.cns = NVME_ID_CNS_CS_CTRL; 2924 c.identify.csi = NVME_CSI_NVM; 2925 2926 ret = nvme_submit_sync_cmd(ctrl->admin_q, &c, id, sizeof(*id)); 2927 if (ret) 2928 goto free_data; 2929 2930 if (id->dmrl) 2931 ctrl->max_discard_segments = id->dmrl; 2932 ctrl->dmrsl = le32_to_cpu(id->dmrsl); 2933 if (id->wzsl) 2934 ctrl->max_zeroes_sectors = nvme_mps_to_sectors(ctrl, id->wzsl); 2935 2936 free_data: 2937 if (ret > 0) 2938 set_bit(NVME_CTRL_SKIP_ID_CNS_CS, &ctrl->flags); 2939 kfree(id); 2940 return ret; 2941 } 2942 2943 static void nvme_init_known_nvm_effects(struct nvme_ctrl *ctrl) 2944 { 2945 struct nvme_effects_log *log = ctrl->effects; 2946 2947 log->acs[nvme_admin_format_nvm] |= cpu_to_le32(NVME_CMD_EFFECTS_LBCC | 2948 NVME_CMD_EFFECTS_NCC | 2949 NVME_CMD_EFFECTS_CSE_MASK); 2950 log->acs[nvme_admin_sanitize_nvm] |= cpu_to_le32(NVME_CMD_EFFECTS_LBCC | 2951 NVME_CMD_EFFECTS_CSE_MASK); 2952 2953 /* 2954 * The spec says the result of a security receive command depends on 2955 * the previous security send command. As such, many vendors log this 2956 * command as one to submitted only when no other commands to the same 2957 * namespace are outstanding. The intention is to tell the host to 2958 * prevent mixing security send and receive. 2959 * 2960 * This driver can only enforce such exclusive access against IO 2961 * queues, though. We are not readily able to enforce such a rule for 2962 * two commands to the admin queue, which is the only queue that 2963 * matters for this command. 2964 * 2965 * Rather than blindly freezing the IO queues for this effect that 2966 * doesn't even apply to IO, mask it off. 2967 */ 2968 log->acs[nvme_admin_security_recv] &= cpu_to_le32(~NVME_CMD_EFFECTS_CSE_MASK); 2969 2970 log->iocs[nvme_cmd_write] |= cpu_to_le32(NVME_CMD_EFFECTS_LBCC); 2971 log->iocs[nvme_cmd_write_zeroes] |= cpu_to_le32(NVME_CMD_EFFECTS_LBCC); 2972 log->iocs[nvme_cmd_write_uncor] |= cpu_to_le32(NVME_CMD_EFFECTS_LBCC); 2973 } 2974 2975 static int nvme_init_effects(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id) 2976 { 2977 int ret = 0; 2978 2979 if (ctrl->effects) 2980 return 0; 2981 2982 if (id->lpa & NVME_CTRL_LPA_CMD_EFFECTS_LOG) { 2983 ret = nvme_get_effects_log(ctrl, NVME_CSI_NVM, &ctrl->effects); 2984 if (ret < 0) 2985 return ret; 2986 } 2987 2988 if (!ctrl->effects) { 2989 ctrl->effects = kzalloc(sizeof(*ctrl->effects), GFP_KERNEL); 2990 if (!ctrl->effects) 2991 return -ENOMEM; 2992 xa_store(&ctrl->cels, NVME_CSI_NVM, ctrl->effects, GFP_KERNEL); 2993 } 2994 2995 nvme_init_known_nvm_effects(ctrl); 2996 return 0; 2997 } 2998 2999 static int nvme_init_identify(struct nvme_ctrl *ctrl) 3000 { 3001 struct nvme_id_ctrl *id; 3002 u32 max_hw_sectors; 3003 bool prev_apst_enabled; 3004 int ret; 3005 3006 ret = nvme_identify_ctrl(ctrl, &id); 3007 if (ret) { 3008 dev_err(ctrl->device, "Identify Controller failed (%d)\n", ret); 3009 return -EIO; 3010 } 3011 3012 if (!(ctrl->ops->flags & NVME_F_FABRICS)) 3013 ctrl->cntlid = le16_to_cpu(id->cntlid); 3014 3015 if (!ctrl->identified) { 3016 unsigned int i; 3017 3018 /* 3019 * Check for quirks. Quirk can depend on firmware version, 3020 * so, in principle, the set of quirks present can change 3021 * across a reset. As a possible future enhancement, we 3022 * could re-scan for quirks every time we reinitialize 3023 * the device, but we'd have to make sure that the driver 3024 * behaves intelligently if the quirks change. 3025 */ 3026 for (i = 0; i < ARRAY_SIZE(core_quirks); i++) { 3027 if (quirk_matches(id, &core_quirks[i])) 3028 ctrl->quirks |= core_quirks[i].quirks; 3029 } 3030 3031 ret = nvme_init_subsystem(ctrl, id); 3032 if (ret) 3033 goto out_free; 3034 3035 ret = nvme_init_effects(ctrl, id); 3036 if (ret) 3037 goto out_free; 3038 } 3039 memcpy(ctrl->subsys->firmware_rev, id->fr, 3040 sizeof(ctrl->subsys->firmware_rev)); 3041 3042 if (force_apst && (ctrl->quirks & NVME_QUIRK_NO_DEEPEST_PS)) { 3043 dev_warn(ctrl->device, "forcibly allowing all power states due to nvme_core.force_apst -- use at your own risk\n"); 3044 ctrl->quirks &= ~NVME_QUIRK_NO_DEEPEST_PS; 3045 } 3046 3047 ctrl->crdt[0] = le16_to_cpu(id->crdt1); 3048 ctrl->crdt[1] = le16_to_cpu(id->crdt2); 3049 ctrl->crdt[2] = le16_to_cpu(id->crdt3); 3050 3051 ctrl->oacs = le16_to_cpu(id->oacs); 3052 ctrl->oncs = le16_to_cpu(id->oncs); 3053 ctrl->mtfa = le16_to_cpu(id->mtfa); 3054 ctrl->oaes = le32_to_cpu(id->oaes); 3055 ctrl->wctemp = le16_to_cpu(id->wctemp); 3056 ctrl->cctemp = le16_to_cpu(id->cctemp); 3057 3058 atomic_set(&ctrl->abort_limit, id->acl + 1); 3059 ctrl->vwc = id->vwc; 3060 if (id->mdts) 3061 max_hw_sectors = nvme_mps_to_sectors(ctrl, id->mdts); 3062 else 3063 max_hw_sectors = UINT_MAX; 3064 ctrl->max_hw_sectors = 3065 min_not_zero(ctrl->max_hw_sectors, max_hw_sectors); 3066 3067 nvme_set_queue_limits(ctrl, ctrl->admin_q); 3068 ctrl->sgls = le32_to_cpu(id->sgls); 3069 ctrl->kas = le16_to_cpu(id->kas); 3070 ctrl->max_namespaces = le32_to_cpu(id->mnan); 3071 ctrl->ctratt = le32_to_cpu(id->ctratt); 3072 3073 ctrl->cntrltype = id->cntrltype; 3074 ctrl->dctype = id->dctype; 3075 3076 if (id->rtd3e) { 3077 /* us -> s */ 3078 u32 transition_time = le32_to_cpu(id->rtd3e) / USEC_PER_SEC; 3079 3080 ctrl->shutdown_timeout = clamp_t(unsigned int, transition_time, 3081 shutdown_timeout, 60); 3082 3083 if (ctrl->shutdown_timeout != shutdown_timeout) 3084 dev_info(ctrl->device, 3085 "Shutdown timeout set to %u seconds\n", 3086 ctrl->shutdown_timeout); 3087 } else 3088 ctrl->shutdown_timeout = shutdown_timeout; 3089 3090 ctrl->npss = id->npss; 3091 ctrl->apsta = id->apsta; 3092 prev_apst_enabled = ctrl->apst_enabled; 3093 if (ctrl->quirks & NVME_QUIRK_NO_APST) { 3094 if (force_apst && id->apsta) { 3095 dev_warn(ctrl->device, "forcibly allowing APST due to nvme_core.force_apst -- use at your own risk\n"); 3096 ctrl->apst_enabled = true; 3097 } else { 3098 ctrl->apst_enabled = false; 3099 } 3100 } else { 3101 ctrl->apst_enabled = id->apsta; 3102 } 3103 memcpy(ctrl->psd, id->psd, sizeof(ctrl->psd)); 3104 3105 if (ctrl->ops->flags & NVME_F_FABRICS) { 3106 ctrl->icdoff = le16_to_cpu(id->icdoff); 3107 ctrl->ioccsz = le32_to_cpu(id->ioccsz); 3108 ctrl->iorcsz = le32_to_cpu(id->iorcsz); 3109 ctrl->maxcmd = le16_to_cpu(id->maxcmd); 3110 3111 /* 3112 * In fabrics we need to verify the cntlid matches the 3113 * admin connect 3114 */ 3115 if (ctrl->cntlid != le16_to_cpu(id->cntlid)) { 3116 dev_err(ctrl->device, 3117 "Mismatching cntlid: Connect %u vs Identify " 3118 "%u, rejecting\n", 3119 ctrl->cntlid, le16_to_cpu(id->cntlid)); 3120 ret = -EINVAL; 3121 goto out_free; 3122 } 3123 3124 if (!nvme_discovery_ctrl(ctrl) && !ctrl->kas) { 3125 dev_err(ctrl->device, 3126 "keep-alive support is mandatory for fabrics\n"); 3127 ret = -EINVAL; 3128 goto out_free; 3129 } 3130 } else { 3131 ctrl->hmpre = le32_to_cpu(id->hmpre); 3132 ctrl->hmmin = le32_to_cpu(id->hmmin); 3133 ctrl->hmminds = le32_to_cpu(id->hmminds); 3134 ctrl->hmmaxd = le16_to_cpu(id->hmmaxd); 3135 } 3136 3137 ret = nvme_mpath_init_identify(ctrl, id); 3138 if (ret < 0) 3139 goto out_free; 3140 3141 if (ctrl->apst_enabled && !prev_apst_enabled) 3142 dev_pm_qos_expose_latency_tolerance(ctrl->device); 3143 else if (!ctrl->apst_enabled && prev_apst_enabled) 3144 dev_pm_qos_hide_latency_tolerance(ctrl->device); 3145 3146 out_free: 3147 kfree(id); 3148 return ret; 3149 } 3150 3151 /* 3152 * Initialize the cached copies of the Identify data and various controller 3153 * register in our nvme_ctrl structure. This should be called as soon as 3154 * the admin queue is fully up and running. 3155 */ 3156 int nvme_init_ctrl_finish(struct nvme_ctrl *ctrl, bool was_suspended) 3157 { 3158 int ret; 3159 3160 ret = ctrl->ops->reg_read32(ctrl, NVME_REG_VS, &ctrl->vs); 3161 if (ret) { 3162 dev_err(ctrl->device, "Reading VS failed (%d)\n", ret); 3163 return ret; 3164 } 3165 3166 ctrl->sqsize = min_t(u16, NVME_CAP_MQES(ctrl->cap), ctrl->sqsize); 3167 3168 if (ctrl->vs >= NVME_VS(1, 1, 0)) 3169 ctrl->subsystem = NVME_CAP_NSSRC(ctrl->cap); 3170 3171 ret = nvme_init_identify(ctrl); 3172 if (ret) 3173 return ret; 3174 3175 ret = nvme_configure_apst(ctrl); 3176 if (ret < 0) 3177 return ret; 3178 3179 ret = nvme_configure_timestamp(ctrl); 3180 if (ret < 0) 3181 return ret; 3182 3183 ret = nvme_configure_host_options(ctrl); 3184 if (ret < 0) 3185 return ret; 3186 3187 nvme_configure_opal(ctrl, was_suspended); 3188 3189 if (!ctrl->identified && !nvme_discovery_ctrl(ctrl)) { 3190 /* 3191 * Do not return errors unless we are in a controller reset, 3192 * the controller works perfectly fine without hwmon. 3193 */ 3194 ret = nvme_hwmon_init(ctrl); 3195 if (ret == -EINTR) 3196 return ret; 3197 } 3198 3199 clear_bit(NVME_CTRL_DIRTY_CAPABILITY, &ctrl->flags); 3200 ctrl->identified = true; 3201 3202 return 0; 3203 } 3204 EXPORT_SYMBOL_GPL(nvme_init_ctrl_finish); 3205 3206 static int nvme_dev_open(struct inode *inode, struct file *file) 3207 { 3208 struct nvme_ctrl *ctrl = 3209 container_of(inode->i_cdev, struct nvme_ctrl, cdev); 3210 3211 switch (ctrl->state) { 3212 case NVME_CTRL_LIVE: 3213 break; 3214 default: 3215 return -EWOULDBLOCK; 3216 } 3217 3218 nvme_get_ctrl(ctrl); 3219 if (!try_module_get(ctrl->ops->module)) { 3220 nvme_put_ctrl(ctrl); 3221 return -EINVAL; 3222 } 3223 3224 file->private_data = ctrl; 3225 return 0; 3226 } 3227 3228 static int nvme_dev_release(struct inode *inode, struct file *file) 3229 { 3230 struct nvme_ctrl *ctrl = 3231 container_of(inode->i_cdev, struct nvme_ctrl, cdev); 3232 3233 module_put(ctrl->ops->module); 3234 nvme_put_ctrl(ctrl); 3235 return 0; 3236 } 3237 3238 static const struct file_operations nvme_dev_fops = { 3239 .owner = THIS_MODULE, 3240 .open = nvme_dev_open, 3241 .release = nvme_dev_release, 3242 .unlocked_ioctl = nvme_dev_ioctl, 3243 .compat_ioctl = compat_ptr_ioctl, 3244 .uring_cmd = nvme_dev_uring_cmd, 3245 }; 3246 3247 static struct nvme_ns_head *nvme_find_ns_head(struct nvme_ctrl *ctrl, 3248 unsigned nsid) 3249 { 3250 struct nvme_ns_head *h; 3251 3252 lockdep_assert_held(&ctrl->subsys->lock); 3253 3254 list_for_each_entry(h, &ctrl->subsys->nsheads, entry) { 3255 /* 3256 * Private namespaces can share NSIDs under some conditions. 3257 * In that case we can't use the same ns_head for namespaces 3258 * with the same NSID. 3259 */ 3260 if (h->ns_id != nsid || !nvme_is_unique_nsid(ctrl, h)) 3261 continue; 3262 if (!list_empty(&h->list) && nvme_tryget_ns_head(h)) 3263 return h; 3264 } 3265 3266 return NULL; 3267 } 3268 3269 static int nvme_subsys_check_duplicate_ids(struct nvme_subsystem *subsys, 3270 struct nvme_ns_ids *ids) 3271 { 3272 bool has_uuid = !uuid_is_null(&ids->uuid); 3273 bool has_nguid = memchr_inv(ids->nguid, 0, sizeof(ids->nguid)); 3274 bool has_eui64 = memchr_inv(ids->eui64, 0, sizeof(ids->eui64)); 3275 struct nvme_ns_head *h; 3276 3277 lockdep_assert_held(&subsys->lock); 3278 3279 list_for_each_entry(h, &subsys->nsheads, entry) { 3280 if (has_uuid && uuid_equal(&ids->uuid, &h->ids.uuid)) 3281 return -EINVAL; 3282 if (has_nguid && 3283 memcmp(&ids->nguid, &h->ids.nguid, sizeof(ids->nguid)) == 0) 3284 return -EINVAL; 3285 if (has_eui64 && 3286 memcmp(&ids->eui64, &h->ids.eui64, sizeof(ids->eui64)) == 0) 3287 return -EINVAL; 3288 } 3289 3290 return 0; 3291 } 3292 3293 static void nvme_cdev_rel(struct device *dev) 3294 { 3295 ida_free(&nvme_ns_chr_minor_ida, MINOR(dev->devt)); 3296 } 3297 3298 void nvme_cdev_del(struct cdev *cdev, struct device *cdev_device) 3299 { 3300 cdev_device_del(cdev, cdev_device); 3301 put_device(cdev_device); 3302 } 3303 3304 int nvme_cdev_add(struct cdev *cdev, struct device *cdev_device, 3305 const struct file_operations *fops, struct module *owner) 3306 { 3307 int minor, ret; 3308 3309 minor = ida_alloc(&nvme_ns_chr_minor_ida, GFP_KERNEL); 3310 if (minor < 0) 3311 return minor; 3312 cdev_device->devt = MKDEV(MAJOR(nvme_ns_chr_devt), minor); 3313 cdev_device->class = nvme_ns_chr_class; 3314 cdev_device->release = nvme_cdev_rel; 3315 device_initialize(cdev_device); 3316 cdev_init(cdev, fops); 3317 cdev->owner = owner; 3318 ret = cdev_device_add(cdev, cdev_device); 3319 if (ret) 3320 put_device(cdev_device); 3321 3322 return ret; 3323 } 3324 3325 static int nvme_ns_chr_open(struct inode *inode, struct file *file) 3326 { 3327 return nvme_ns_open(container_of(inode->i_cdev, struct nvme_ns, cdev)); 3328 } 3329 3330 static int nvme_ns_chr_release(struct inode *inode, struct file *file) 3331 { 3332 nvme_ns_release(container_of(inode->i_cdev, struct nvme_ns, cdev)); 3333 return 0; 3334 } 3335 3336 static const struct file_operations nvme_ns_chr_fops = { 3337 .owner = THIS_MODULE, 3338 .open = nvme_ns_chr_open, 3339 .release = nvme_ns_chr_release, 3340 .unlocked_ioctl = nvme_ns_chr_ioctl, 3341 .compat_ioctl = compat_ptr_ioctl, 3342 .uring_cmd = nvme_ns_chr_uring_cmd, 3343 .uring_cmd_iopoll = nvme_ns_chr_uring_cmd_iopoll, 3344 }; 3345 3346 static int nvme_add_ns_cdev(struct nvme_ns *ns) 3347 { 3348 int ret; 3349 3350 ns->cdev_device.parent = ns->ctrl->device; 3351 ret = dev_set_name(&ns->cdev_device, "ng%dn%d", 3352 ns->ctrl->instance, ns->head->instance); 3353 if (ret) 3354 return ret; 3355 3356 return nvme_cdev_add(&ns->cdev, &ns->cdev_device, &nvme_ns_chr_fops, 3357 ns->ctrl->ops->module); 3358 } 3359 3360 static struct nvme_ns_head *nvme_alloc_ns_head(struct nvme_ctrl *ctrl, 3361 struct nvme_ns_info *info) 3362 { 3363 struct nvme_ns_head *head; 3364 size_t size = sizeof(*head); 3365 int ret = -ENOMEM; 3366 3367 #ifdef CONFIG_NVME_MULTIPATH 3368 size += num_possible_nodes() * sizeof(struct nvme_ns *); 3369 #endif 3370 3371 head = kzalloc(size, GFP_KERNEL); 3372 if (!head) 3373 goto out; 3374 ret = ida_alloc_min(&ctrl->subsys->ns_ida, 1, GFP_KERNEL); 3375 if (ret < 0) 3376 goto out_free_head; 3377 head->instance = ret; 3378 INIT_LIST_HEAD(&head->list); 3379 ret = init_srcu_struct(&head->srcu); 3380 if (ret) 3381 goto out_ida_remove; 3382 head->subsys = ctrl->subsys; 3383 head->ns_id = info->nsid; 3384 head->ids = info->ids; 3385 head->shared = info->is_shared; 3386 kref_init(&head->ref); 3387 3388 if (head->ids.csi) { 3389 ret = nvme_get_effects_log(ctrl, head->ids.csi, &head->effects); 3390 if (ret) 3391 goto out_cleanup_srcu; 3392 } else 3393 head->effects = ctrl->effects; 3394 3395 ret = nvme_mpath_alloc_disk(ctrl, head); 3396 if (ret) 3397 goto out_cleanup_srcu; 3398 3399 list_add_tail(&head->entry, &ctrl->subsys->nsheads); 3400 3401 kref_get(&ctrl->subsys->ref); 3402 3403 return head; 3404 out_cleanup_srcu: 3405 cleanup_srcu_struct(&head->srcu); 3406 out_ida_remove: 3407 ida_free(&ctrl->subsys->ns_ida, head->instance); 3408 out_free_head: 3409 kfree(head); 3410 out: 3411 if (ret > 0) 3412 ret = blk_status_to_errno(nvme_error_status(ret)); 3413 return ERR_PTR(ret); 3414 } 3415 3416 static int nvme_global_check_duplicate_ids(struct nvme_subsystem *this, 3417 struct nvme_ns_ids *ids) 3418 { 3419 struct nvme_subsystem *s; 3420 int ret = 0; 3421 3422 /* 3423 * Note that this check is racy as we try to avoid holding the global 3424 * lock over the whole ns_head creation. But it is only intended as 3425 * a sanity check anyway. 3426 */ 3427 mutex_lock(&nvme_subsystems_lock); 3428 list_for_each_entry(s, &nvme_subsystems, entry) { 3429 if (s == this) 3430 continue; 3431 mutex_lock(&s->lock); 3432 ret = nvme_subsys_check_duplicate_ids(s, ids); 3433 mutex_unlock(&s->lock); 3434 if (ret) 3435 break; 3436 } 3437 mutex_unlock(&nvme_subsystems_lock); 3438 3439 return ret; 3440 } 3441 3442 static int nvme_init_ns_head(struct nvme_ns *ns, struct nvme_ns_info *info) 3443 { 3444 struct nvme_ctrl *ctrl = ns->ctrl; 3445 struct nvme_ns_head *head = NULL; 3446 int ret; 3447 3448 ret = nvme_global_check_duplicate_ids(ctrl->subsys, &info->ids); 3449 if (ret) { 3450 /* 3451 * We've found two different namespaces on two different 3452 * subsystems that report the same ID. This is pretty nasty 3453 * for anything that actually requires unique device 3454 * identification. In the kernel we need this for multipathing, 3455 * and in user space the /dev/disk/by-id/ links rely on it. 3456 * 3457 * If the device also claims to be multi-path capable back off 3458 * here now and refuse the probe the second device as this is a 3459 * recipe for data corruption. If not this is probably a 3460 * cheap consumer device if on the PCIe bus, so let the user 3461 * proceed and use the shiny toy, but warn that with changing 3462 * probing order (which due to our async probing could just be 3463 * device taking longer to startup) the other device could show 3464 * up at any time. 3465 */ 3466 nvme_print_device_info(ctrl); 3467 if ((ns->ctrl->ops->flags & NVME_F_FABRICS) || /* !PCIe */ 3468 ((ns->ctrl->subsys->cmic & NVME_CTRL_CMIC_MULTI_CTRL) && 3469 info->is_shared)) { 3470 dev_err(ctrl->device, 3471 "ignoring nsid %d because of duplicate IDs\n", 3472 info->nsid); 3473 return ret; 3474 } 3475 3476 dev_err(ctrl->device, 3477 "clearing duplicate IDs for nsid %d\n", info->nsid); 3478 dev_err(ctrl->device, 3479 "use of /dev/disk/by-id/ may cause data corruption\n"); 3480 memset(&info->ids.nguid, 0, sizeof(info->ids.nguid)); 3481 memset(&info->ids.uuid, 0, sizeof(info->ids.uuid)); 3482 memset(&info->ids.eui64, 0, sizeof(info->ids.eui64)); 3483 ctrl->quirks |= NVME_QUIRK_BOGUS_NID; 3484 } 3485 3486 mutex_lock(&ctrl->subsys->lock); 3487 head = nvme_find_ns_head(ctrl, info->nsid); 3488 if (!head) { 3489 ret = nvme_subsys_check_duplicate_ids(ctrl->subsys, &info->ids); 3490 if (ret) { 3491 dev_err(ctrl->device, 3492 "duplicate IDs in subsystem for nsid %d\n", 3493 info->nsid); 3494 goto out_unlock; 3495 } 3496 head = nvme_alloc_ns_head(ctrl, info); 3497 if (IS_ERR(head)) { 3498 ret = PTR_ERR(head); 3499 goto out_unlock; 3500 } 3501 } else { 3502 ret = -EINVAL; 3503 if (!info->is_shared || !head->shared) { 3504 dev_err(ctrl->device, 3505 "Duplicate unshared namespace %d\n", 3506 info->nsid); 3507 goto out_put_ns_head; 3508 } 3509 if (!nvme_ns_ids_equal(&head->ids, &info->ids)) { 3510 dev_err(ctrl->device, 3511 "IDs don't match for shared namespace %d\n", 3512 info->nsid); 3513 goto out_put_ns_head; 3514 } 3515 3516 if (!multipath) { 3517 dev_warn(ctrl->device, 3518 "Found shared namespace %d, but multipathing not supported.\n", 3519 info->nsid); 3520 dev_warn_once(ctrl->device, 3521 "Support for shared namespaces without CONFIG_NVME_MULTIPATH is deprecated and will be removed in Linux 6.0\n."); 3522 } 3523 } 3524 3525 list_add_tail_rcu(&ns->siblings, &head->list); 3526 ns->head = head; 3527 mutex_unlock(&ctrl->subsys->lock); 3528 return 0; 3529 3530 out_put_ns_head: 3531 nvme_put_ns_head(head); 3532 out_unlock: 3533 mutex_unlock(&ctrl->subsys->lock); 3534 return ret; 3535 } 3536 3537 struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid) 3538 { 3539 struct nvme_ns *ns, *ret = NULL; 3540 3541 down_read(&ctrl->namespaces_rwsem); 3542 list_for_each_entry(ns, &ctrl->namespaces, list) { 3543 if (ns->head->ns_id == nsid) { 3544 if (!nvme_get_ns(ns)) 3545 continue; 3546 ret = ns; 3547 break; 3548 } 3549 if (ns->head->ns_id > nsid) 3550 break; 3551 } 3552 up_read(&ctrl->namespaces_rwsem); 3553 return ret; 3554 } 3555 EXPORT_SYMBOL_NS_GPL(nvme_find_get_ns, NVME_TARGET_PASSTHRU); 3556 3557 /* 3558 * Add the namespace to the controller list while keeping the list ordered. 3559 */ 3560 static void nvme_ns_add_to_ctrl_list(struct nvme_ns *ns) 3561 { 3562 struct nvme_ns *tmp; 3563 3564 list_for_each_entry_reverse(tmp, &ns->ctrl->namespaces, list) { 3565 if (tmp->head->ns_id < ns->head->ns_id) { 3566 list_add(&ns->list, &tmp->list); 3567 return; 3568 } 3569 } 3570 list_add(&ns->list, &ns->ctrl->namespaces); 3571 } 3572 3573 static void nvme_alloc_ns(struct nvme_ctrl *ctrl, struct nvme_ns_info *info) 3574 { 3575 struct nvme_ns *ns; 3576 struct gendisk *disk; 3577 int node = ctrl->numa_node; 3578 3579 ns = kzalloc_node(sizeof(*ns), GFP_KERNEL, node); 3580 if (!ns) 3581 return; 3582 3583 disk = blk_mq_alloc_disk(ctrl->tagset, ns); 3584 if (IS_ERR(disk)) 3585 goto out_free_ns; 3586 disk->fops = &nvme_bdev_ops; 3587 disk->private_data = ns; 3588 3589 ns->disk = disk; 3590 ns->queue = disk->queue; 3591 3592 if (ctrl->opts && ctrl->opts->data_digest) 3593 blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, ns->queue); 3594 3595 blk_queue_flag_set(QUEUE_FLAG_NONROT, ns->queue); 3596 if (ctrl->ops->supports_pci_p2pdma && 3597 ctrl->ops->supports_pci_p2pdma(ctrl)) 3598 blk_queue_flag_set(QUEUE_FLAG_PCI_P2PDMA, ns->queue); 3599 3600 ns->ctrl = ctrl; 3601 kref_init(&ns->kref); 3602 3603 if (nvme_init_ns_head(ns, info)) 3604 goto out_cleanup_disk; 3605 3606 /* 3607 * If multipathing is enabled, the device name for all disks and not 3608 * just those that represent shared namespaces needs to be based on the 3609 * subsystem instance. Using the controller instance for private 3610 * namespaces could lead to naming collisions between shared and private 3611 * namespaces if they don't use a common numbering scheme. 3612 * 3613 * If multipathing is not enabled, disk names must use the controller 3614 * instance as shared namespaces will show up as multiple block 3615 * devices. 3616 */ 3617 if (nvme_ns_head_multipath(ns->head)) { 3618 sprintf(disk->disk_name, "nvme%dc%dn%d", ctrl->subsys->instance, 3619 ctrl->instance, ns->head->instance); 3620 disk->flags |= GENHD_FL_HIDDEN; 3621 } else if (multipath) { 3622 sprintf(disk->disk_name, "nvme%dn%d", ctrl->subsys->instance, 3623 ns->head->instance); 3624 } else { 3625 sprintf(disk->disk_name, "nvme%dn%d", ctrl->instance, 3626 ns->head->instance); 3627 } 3628 3629 if (nvme_update_ns_info(ns, info)) 3630 goto out_unlink_ns; 3631 3632 down_write(&ctrl->namespaces_rwsem); 3633 nvme_ns_add_to_ctrl_list(ns); 3634 up_write(&ctrl->namespaces_rwsem); 3635 nvme_get_ctrl(ctrl); 3636 3637 if (device_add_disk(ctrl->device, ns->disk, nvme_ns_id_attr_groups)) 3638 goto out_cleanup_ns_from_list; 3639 3640 if (!nvme_ns_head_multipath(ns->head)) 3641 nvme_add_ns_cdev(ns); 3642 3643 nvme_mpath_add_disk(ns, info->anagrpid); 3644 nvme_fault_inject_init(&ns->fault_inject, ns->disk->disk_name); 3645 3646 return; 3647 3648 out_cleanup_ns_from_list: 3649 nvme_put_ctrl(ctrl); 3650 down_write(&ctrl->namespaces_rwsem); 3651 list_del_init(&ns->list); 3652 up_write(&ctrl->namespaces_rwsem); 3653 out_unlink_ns: 3654 mutex_lock(&ctrl->subsys->lock); 3655 list_del_rcu(&ns->siblings); 3656 if (list_empty(&ns->head->list)) 3657 list_del_init(&ns->head->entry); 3658 mutex_unlock(&ctrl->subsys->lock); 3659 nvme_put_ns_head(ns->head); 3660 out_cleanup_disk: 3661 put_disk(disk); 3662 out_free_ns: 3663 kfree(ns); 3664 } 3665 3666 static void nvme_ns_remove(struct nvme_ns *ns) 3667 { 3668 bool last_path = false; 3669 3670 if (test_and_set_bit(NVME_NS_REMOVING, &ns->flags)) 3671 return; 3672 3673 clear_bit(NVME_NS_READY, &ns->flags); 3674 set_capacity(ns->disk, 0); 3675 nvme_fault_inject_fini(&ns->fault_inject); 3676 3677 /* 3678 * Ensure that !NVME_NS_READY is seen by other threads to prevent 3679 * this ns going back into current_path. 3680 */ 3681 synchronize_srcu(&ns->head->srcu); 3682 3683 /* wait for concurrent submissions */ 3684 if (nvme_mpath_clear_current_path(ns)) 3685 synchronize_srcu(&ns->head->srcu); 3686 3687 mutex_lock(&ns->ctrl->subsys->lock); 3688 list_del_rcu(&ns->siblings); 3689 if (list_empty(&ns->head->list)) { 3690 list_del_init(&ns->head->entry); 3691 last_path = true; 3692 } 3693 mutex_unlock(&ns->ctrl->subsys->lock); 3694 3695 /* guarantee not available in head->list */ 3696 synchronize_srcu(&ns->head->srcu); 3697 3698 if (!nvme_ns_head_multipath(ns->head)) 3699 nvme_cdev_del(&ns->cdev, &ns->cdev_device); 3700 del_gendisk(ns->disk); 3701 3702 down_write(&ns->ctrl->namespaces_rwsem); 3703 list_del_init(&ns->list); 3704 up_write(&ns->ctrl->namespaces_rwsem); 3705 3706 if (last_path) 3707 nvme_mpath_shutdown_disk(ns->head); 3708 nvme_put_ns(ns); 3709 } 3710 3711 static void nvme_ns_remove_by_nsid(struct nvme_ctrl *ctrl, u32 nsid) 3712 { 3713 struct nvme_ns *ns = nvme_find_get_ns(ctrl, nsid); 3714 3715 if (ns) { 3716 nvme_ns_remove(ns); 3717 nvme_put_ns(ns); 3718 } 3719 } 3720 3721 static void nvme_validate_ns(struct nvme_ns *ns, struct nvme_ns_info *info) 3722 { 3723 int ret = NVME_SC_INVALID_NS | NVME_SC_DNR; 3724 3725 if (!nvme_ns_ids_equal(&ns->head->ids, &info->ids)) { 3726 dev_err(ns->ctrl->device, 3727 "identifiers changed for nsid %d\n", ns->head->ns_id); 3728 goto out; 3729 } 3730 3731 ret = nvme_update_ns_info(ns, info); 3732 out: 3733 /* 3734 * Only remove the namespace if we got a fatal error back from the 3735 * device, otherwise ignore the error and just move on. 3736 * 3737 * TODO: we should probably schedule a delayed retry here. 3738 */ 3739 if (ret > 0 && (ret & NVME_SC_DNR)) 3740 nvme_ns_remove(ns); 3741 } 3742 3743 static void nvme_scan_ns(struct nvme_ctrl *ctrl, unsigned nsid) 3744 { 3745 struct nvme_ns_info info = { .nsid = nsid }; 3746 struct nvme_ns *ns; 3747 int ret; 3748 3749 if (nvme_identify_ns_descs(ctrl, &info)) 3750 return; 3751 3752 if (info.ids.csi != NVME_CSI_NVM && !nvme_multi_css(ctrl)) { 3753 dev_warn(ctrl->device, 3754 "command set not reported for nsid: %d\n", nsid); 3755 return; 3756 } 3757 3758 /* 3759 * If available try to use the Command Set Idependent Identify Namespace 3760 * data structure to find all the generic information that is needed to 3761 * set up a namespace. If not fall back to the legacy version. 3762 */ 3763 if ((ctrl->cap & NVME_CAP_CRMS_CRIMS) || 3764 (info.ids.csi != NVME_CSI_NVM && info.ids.csi != NVME_CSI_ZNS)) 3765 ret = nvme_ns_info_from_id_cs_indep(ctrl, &info); 3766 else 3767 ret = nvme_ns_info_from_identify(ctrl, &info); 3768 3769 if (info.is_removed) 3770 nvme_ns_remove_by_nsid(ctrl, nsid); 3771 3772 /* 3773 * Ignore the namespace if it is not ready. We will get an AEN once it 3774 * becomes ready and restart the scan. 3775 */ 3776 if (ret || !info.is_ready) 3777 return; 3778 3779 ns = nvme_find_get_ns(ctrl, nsid); 3780 if (ns) { 3781 nvme_validate_ns(ns, &info); 3782 nvme_put_ns(ns); 3783 } else { 3784 nvme_alloc_ns(ctrl, &info); 3785 } 3786 } 3787 3788 static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl, 3789 unsigned nsid) 3790 { 3791 struct nvme_ns *ns, *next; 3792 LIST_HEAD(rm_list); 3793 3794 down_write(&ctrl->namespaces_rwsem); 3795 list_for_each_entry_safe(ns, next, &ctrl->namespaces, list) { 3796 if (ns->head->ns_id > nsid) 3797 list_move_tail(&ns->list, &rm_list); 3798 } 3799 up_write(&ctrl->namespaces_rwsem); 3800 3801 list_for_each_entry_safe(ns, next, &rm_list, list) 3802 nvme_ns_remove(ns); 3803 3804 } 3805 3806 static int nvme_scan_ns_list(struct nvme_ctrl *ctrl) 3807 { 3808 const int nr_entries = NVME_IDENTIFY_DATA_SIZE / sizeof(__le32); 3809 __le32 *ns_list; 3810 u32 prev = 0; 3811 int ret = 0, i; 3812 3813 ns_list = kzalloc(NVME_IDENTIFY_DATA_SIZE, GFP_KERNEL); 3814 if (!ns_list) 3815 return -ENOMEM; 3816 3817 for (;;) { 3818 struct nvme_command cmd = { 3819 .identify.opcode = nvme_admin_identify, 3820 .identify.cns = NVME_ID_CNS_NS_ACTIVE_LIST, 3821 .identify.nsid = cpu_to_le32(prev), 3822 }; 3823 3824 ret = nvme_submit_sync_cmd(ctrl->admin_q, &cmd, ns_list, 3825 NVME_IDENTIFY_DATA_SIZE); 3826 if (ret) { 3827 dev_warn(ctrl->device, 3828 "Identify NS List failed (status=0x%x)\n", ret); 3829 goto free; 3830 } 3831 3832 for (i = 0; i < nr_entries; i++) { 3833 u32 nsid = le32_to_cpu(ns_list[i]); 3834 3835 if (!nsid) /* end of the list? */ 3836 goto out; 3837 nvme_scan_ns(ctrl, nsid); 3838 while (++prev < nsid) 3839 nvme_ns_remove_by_nsid(ctrl, prev); 3840 } 3841 } 3842 out: 3843 nvme_remove_invalid_namespaces(ctrl, prev); 3844 free: 3845 kfree(ns_list); 3846 return ret; 3847 } 3848 3849 static void nvme_scan_ns_sequential(struct nvme_ctrl *ctrl) 3850 { 3851 struct nvme_id_ctrl *id; 3852 u32 nn, i; 3853 3854 if (nvme_identify_ctrl(ctrl, &id)) 3855 return; 3856 nn = le32_to_cpu(id->nn); 3857 kfree(id); 3858 3859 for (i = 1; i <= nn; i++) 3860 nvme_scan_ns(ctrl, i); 3861 3862 nvme_remove_invalid_namespaces(ctrl, nn); 3863 } 3864 3865 static void nvme_clear_changed_ns_log(struct nvme_ctrl *ctrl) 3866 { 3867 size_t log_size = NVME_MAX_CHANGED_NAMESPACES * sizeof(__le32); 3868 __le32 *log; 3869 int error; 3870 3871 log = kzalloc(log_size, GFP_KERNEL); 3872 if (!log) 3873 return; 3874 3875 /* 3876 * We need to read the log to clear the AEN, but we don't want to rely 3877 * on it for the changed namespace information as userspace could have 3878 * raced with us in reading the log page, which could cause us to miss 3879 * updates. 3880 */ 3881 error = nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_CHANGED_NS, 0, 3882 NVME_CSI_NVM, log, log_size, 0); 3883 if (error) 3884 dev_warn(ctrl->device, 3885 "reading changed ns log failed: %d\n", error); 3886 3887 kfree(log); 3888 } 3889 3890 static void nvme_scan_work(struct work_struct *work) 3891 { 3892 struct nvme_ctrl *ctrl = 3893 container_of(work, struct nvme_ctrl, scan_work); 3894 int ret; 3895 3896 /* No tagset on a live ctrl means IO queues could not created */ 3897 if (ctrl->state != NVME_CTRL_LIVE || !ctrl->tagset) 3898 return; 3899 3900 /* 3901 * Identify controller limits can change at controller reset due to 3902 * new firmware download, even though it is not common we cannot ignore 3903 * such scenario. Controller's non-mdts limits are reported in the unit 3904 * of logical blocks that is dependent on the format of attached 3905 * namespace. Hence re-read the limits at the time of ns allocation. 3906 */ 3907 ret = nvme_init_non_mdts_limits(ctrl); 3908 if (ret < 0) { 3909 dev_warn(ctrl->device, 3910 "reading non-mdts-limits failed: %d\n", ret); 3911 return; 3912 } 3913 3914 if (test_and_clear_bit(NVME_AER_NOTICE_NS_CHANGED, &ctrl->events)) { 3915 dev_info(ctrl->device, "rescanning namespaces.\n"); 3916 nvme_clear_changed_ns_log(ctrl); 3917 } 3918 3919 mutex_lock(&ctrl->scan_lock); 3920 if (nvme_ctrl_limited_cns(ctrl)) { 3921 nvme_scan_ns_sequential(ctrl); 3922 } else { 3923 /* 3924 * Fall back to sequential scan if DNR is set to handle broken 3925 * devices which should support Identify NS List (as per the VS 3926 * they report) but don't actually support it. 3927 */ 3928 ret = nvme_scan_ns_list(ctrl); 3929 if (ret > 0 && ret & NVME_SC_DNR) 3930 nvme_scan_ns_sequential(ctrl); 3931 } 3932 mutex_unlock(&ctrl->scan_lock); 3933 } 3934 3935 /* 3936 * This function iterates the namespace list unlocked to allow recovery from 3937 * controller failure. It is up to the caller to ensure the namespace list is 3938 * not modified by scan work while this function is executing. 3939 */ 3940 void nvme_remove_namespaces(struct nvme_ctrl *ctrl) 3941 { 3942 struct nvme_ns *ns, *next; 3943 LIST_HEAD(ns_list); 3944 3945 /* 3946 * make sure to requeue I/O to all namespaces as these 3947 * might result from the scan itself and must complete 3948 * for the scan_work to make progress 3949 */ 3950 nvme_mpath_clear_ctrl_paths(ctrl); 3951 3952 /* 3953 * Unquiesce io queues so any pending IO won't hang, especially 3954 * those submitted from scan work 3955 */ 3956 nvme_unquiesce_io_queues(ctrl); 3957 3958 /* prevent racing with ns scanning */ 3959 flush_work(&ctrl->scan_work); 3960 3961 /* 3962 * The dead states indicates the controller was not gracefully 3963 * disconnected. In that case, we won't be able to flush any data while 3964 * removing the namespaces' disks; fail all the queues now to avoid 3965 * potentially having to clean up the failed sync later. 3966 */ 3967 if (ctrl->state == NVME_CTRL_DEAD) 3968 nvme_mark_namespaces_dead(ctrl); 3969 3970 /* this is a no-op when called from the controller reset handler */ 3971 nvme_change_ctrl_state(ctrl, NVME_CTRL_DELETING_NOIO); 3972 3973 down_write(&ctrl->namespaces_rwsem); 3974 list_splice_init(&ctrl->namespaces, &ns_list); 3975 up_write(&ctrl->namespaces_rwsem); 3976 3977 list_for_each_entry_safe(ns, next, &ns_list, list) 3978 nvme_ns_remove(ns); 3979 } 3980 EXPORT_SYMBOL_GPL(nvme_remove_namespaces); 3981 3982 static int nvme_class_uevent(const struct device *dev, struct kobj_uevent_env *env) 3983 { 3984 const struct nvme_ctrl *ctrl = 3985 container_of(dev, struct nvme_ctrl, ctrl_device); 3986 struct nvmf_ctrl_options *opts = ctrl->opts; 3987 int ret; 3988 3989 ret = add_uevent_var(env, "NVME_TRTYPE=%s", ctrl->ops->name); 3990 if (ret) 3991 return ret; 3992 3993 if (opts) { 3994 ret = add_uevent_var(env, "NVME_TRADDR=%s", opts->traddr); 3995 if (ret) 3996 return ret; 3997 3998 ret = add_uevent_var(env, "NVME_TRSVCID=%s", 3999 opts->trsvcid ?: "none"); 4000 if (ret) 4001 return ret; 4002 4003 ret = add_uevent_var(env, "NVME_HOST_TRADDR=%s", 4004 opts->host_traddr ?: "none"); 4005 if (ret) 4006 return ret; 4007 4008 ret = add_uevent_var(env, "NVME_HOST_IFACE=%s", 4009 opts->host_iface ?: "none"); 4010 } 4011 return ret; 4012 } 4013 4014 static void nvme_change_uevent(struct nvme_ctrl *ctrl, char *envdata) 4015 { 4016 char *envp[2] = { envdata, NULL }; 4017 4018 kobject_uevent_env(&ctrl->device->kobj, KOBJ_CHANGE, envp); 4019 } 4020 4021 static void nvme_aen_uevent(struct nvme_ctrl *ctrl) 4022 { 4023 char *envp[2] = { NULL, NULL }; 4024 u32 aen_result = ctrl->aen_result; 4025 4026 ctrl->aen_result = 0; 4027 if (!aen_result) 4028 return; 4029 4030 envp[0] = kasprintf(GFP_KERNEL, "NVME_AEN=%#08x", aen_result); 4031 if (!envp[0]) 4032 return; 4033 kobject_uevent_env(&ctrl->device->kobj, KOBJ_CHANGE, envp); 4034 kfree(envp[0]); 4035 } 4036 4037 static void nvme_async_event_work(struct work_struct *work) 4038 { 4039 struct nvme_ctrl *ctrl = 4040 container_of(work, struct nvme_ctrl, async_event_work); 4041 4042 nvme_aen_uevent(ctrl); 4043 4044 /* 4045 * The transport drivers must guarantee AER submission here is safe by 4046 * flushing ctrl async_event_work after changing the controller state 4047 * from LIVE and before freeing the admin queue. 4048 */ 4049 if (ctrl->state == NVME_CTRL_LIVE) 4050 ctrl->ops->submit_async_event(ctrl); 4051 } 4052 4053 static bool nvme_ctrl_pp_status(struct nvme_ctrl *ctrl) 4054 { 4055 4056 u32 csts; 4057 4058 if (ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &csts)) 4059 return false; 4060 4061 if (csts == ~0) 4062 return false; 4063 4064 return ((ctrl->ctrl_config & NVME_CC_ENABLE) && (csts & NVME_CSTS_PP)); 4065 } 4066 4067 static void nvme_get_fw_slot_info(struct nvme_ctrl *ctrl) 4068 { 4069 struct nvme_fw_slot_info_log *log; 4070 4071 log = kmalloc(sizeof(*log), GFP_KERNEL); 4072 if (!log) 4073 return; 4074 4075 if (nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_FW_SLOT, 0, NVME_CSI_NVM, 4076 log, sizeof(*log), 0)) 4077 dev_warn(ctrl->device, "Get FW SLOT INFO log error\n"); 4078 kfree(log); 4079 } 4080 4081 static void nvme_fw_act_work(struct work_struct *work) 4082 { 4083 struct nvme_ctrl *ctrl = container_of(work, 4084 struct nvme_ctrl, fw_act_work); 4085 unsigned long fw_act_timeout; 4086 4087 if (ctrl->mtfa) 4088 fw_act_timeout = jiffies + 4089 msecs_to_jiffies(ctrl->mtfa * 100); 4090 else 4091 fw_act_timeout = jiffies + 4092 msecs_to_jiffies(admin_timeout * 1000); 4093 4094 nvme_quiesce_io_queues(ctrl); 4095 while (nvme_ctrl_pp_status(ctrl)) { 4096 if (time_after(jiffies, fw_act_timeout)) { 4097 dev_warn(ctrl->device, 4098 "Fw activation timeout, reset controller\n"); 4099 nvme_try_sched_reset(ctrl); 4100 return; 4101 } 4102 msleep(100); 4103 } 4104 4105 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_LIVE)) 4106 return; 4107 4108 nvme_unquiesce_io_queues(ctrl); 4109 /* read FW slot information to clear the AER */ 4110 nvme_get_fw_slot_info(ctrl); 4111 4112 queue_work(nvme_wq, &ctrl->async_event_work); 4113 } 4114 4115 static u32 nvme_aer_type(u32 result) 4116 { 4117 return result & 0x7; 4118 } 4119 4120 static u32 nvme_aer_subtype(u32 result) 4121 { 4122 return (result & 0xff00) >> 8; 4123 } 4124 4125 static bool nvme_handle_aen_notice(struct nvme_ctrl *ctrl, u32 result) 4126 { 4127 u32 aer_notice_type = nvme_aer_subtype(result); 4128 bool requeue = true; 4129 4130 switch (aer_notice_type) { 4131 case NVME_AER_NOTICE_NS_CHANGED: 4132 set_bit(NVME_AER_NOTICE_NS_CHANGED, &ctrl->events); 4133 nvme_queue_scan(ctrl); 4134 break; 4135 case NVME_AER_NOTICE_FW_ACT_STARTING: 4136 /* 4137 * We are (ab)using the RESETTING state to prevent subsequent 4138 * recovery actions from interfering with the controller's 4139 * firmware activation. 4140 */ 4141 if (nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING)) { 4142 nvme_auth_stop(ctrl); 4143 requeue = false; 4144 queue_work(nvme_wq, &ctrl->fw_act_work); 4145 } 4146 break; 4147 #ifdef CONFIG_NVME_MULTIPATH 4148 case NVME_AER_NOTICE_ANA: 4149 if (!ctrl->ana_log_buf) 4150 break; 4151 queue_work(nvme_wq, &ctrl->ana_work); 4152 break; 4153 #endif 4154 case NVME_AER_NOTICE_DISC_CHANGED: 4155 ctrl->aen_result = result; 4156 break; 4157 default: 4158 dev_warn(ctrl->device, "async event result %08x\n", result); 4159 } 4160 return requeue; 4161 } 4162 4163 static void nvme_handle_aer_persistent_error(struct nvme_ctrl *ctrl) 4164 { 4165 dev_warn(ctrl->device, "resetting controller due to AER\n"); 4166 nvme_reset_ctrl(ctrl); 4167 } 4168 4169 void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status, 4170 volatile union nvme_result *res) 4171 { 4172 u32 result = le32_to_cpu(res->u32); 4173 u32 aer_type = nvme_aer_type(result); 4174 u32 aer_subtype = nvme_aer_subtype(result); 4175 bool requeue = true; 4176 4177 if (le16_to_cpu(status) >> 1 != NVME_SC_SUCCESS) 4178 return; 4179 4180 trace_nvme_async_event(ctrl, result); 4181 switch (aer_type) { 4182 case NVME_AER_NOTICE: 4183 requeue = nvme_handle_aen_notice(ctrl, result); 4184 break; 4185 case NVME_AER_ERROR: 4186 /* 4187 * For a persistent internal error, don't run async_event_work 4188 * to submit a new AER. The controller reset will do it. 4189 */ 4190 if (aer_subtype == NVME_AER_ERROR_PERSIST_INT_ERR) { 4191 nvme_handle_aer_persistent_error(ctrl); 4192 return; 4193 } 4194 fallthrough; 4195 case NVME_AER_SMART: 4196 case NVME_AER_CSS: 4197 case NVME_AER_VS: 4198 ctrl->aen_result = result; 4199 break; 4200 default: 4201 break; 4202 } 4203 4204 if (requeue) 4205 queue_work(nvme_wq, &ctrl->async_event_work); 4206 } 4207 EXPORT_SYMBOL_GPL(nvme_complete_async_event); 4208 4209 int nvme_alloc_admin_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set, 4210 const struct blk_mq_ops *ops, unsigned int cmd_size) 4211 { 4212 int ret; 4213 4214 memset(set, 0, sizeof(*set)); 4215 set->ops = ops; 4216 set->queue_depth = NVME_AQ_MQ_TAG_DEPTH; 4217 if (ctrl->ops->flags & NVME_F_FABRICS) 4218 set->reserved_tags = NVMF_RESERVED_TAGS; 4219 set->numa_node = ctrl->numa_node; 4220 set->flags = BLK_MQ_F_NO_SCHED; 4221 if (ctrl->ops->flags & NVME_F_BLOCKING) 4222 set->flags |= BLK_MQ_F_BLOCKING; 4223 set->cmd_size = cmd_size; 4224 set->driver_data = ctrl; 4225 set->nr_hw_queues = 1; 4226 set->timeout = NVME_ADMIN_TIMEOUT; 4227 ret = blk_mq_alloc_tag_set(set); 4228 if (ret) 4229 return ret; 4230 4231 ctrl->admin_q = blk_mq_init_queue(set); 4232 if (IS_ERR(ctrl->admin_q)) { 4233 ret = PTR_ERR(ctrl->admin_q); 4234 goto out_free_tagset; 4235 } 4236 4237 if (ctrl->ops->flags & NVME_F_FABRICS) { 4238 ctrl->fabrics_q = blk_mq_init_queue(set); 4239 if (IS_ERR(ctrl->fabrics_q)) { 4240 ret = PTR_ERR(ctrl->fabrics_q); 4241 goto out_cleanup_admin_q; 4242 } 4243 } 4244 4245 ctrl->admin_tagset = set; 4246 return 0; 4247 4248 out_cleanup_admin_q: 4249 blk_mq_destroy_queue(ctrl->admin_q); 4250 blk_put_queue(ctrl->admin_q); 4251 out_free_tagset: 4252 blk_mq_free_tag_set(set); 4253 ctrl->admin_q = NULL; 4254 ctrl->fabrics_q = NULL; 4255 return ret; 4256 } 4257 EXPORT_SYMBOL_GPL(nvme_alloc_admin_tag_set); 4258 4259 void nvme_remove_admin_tag_set(struct nvme_ctrl *ctrl) 4260 { 4261 blk_mq_destroy_queue(ctrl->admin_q); 4262 blk_put_queue(ctrl->admin_q); 4263 if (ctrl->ops->flags & NVME_F_FABRICS) { 4264 blk_mq_destroy_queue(ctrl->fabrics_q); 4265 blk_put_queue(ctrl->fabrics_q); 4266 } 4267 blk_mq_free_tag_set(ctrl->admin_tagset); 4268 } 4269 EXPORT_SYMBOL_GPL(nvme_remove_admin_tag_set); 4270 4271 int nvme_alloc_io_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set, 4272 const struct blk_mq_ops *ops, unsigned int nr_maps, 4273 unsigned int cmd_size) 4274 { 4275 int ret; 4276 4277 memset(set, 0, sizeof(*set)); 4278 set->ops = ops; 4279 set->queue_depth = min_t(unsigned, ctrl->sqsize, BLK_MQ_MAX_DEPTH - 1); 4280 /* 4281 * Some Apple controllers requires tags to be unique across admin and 4282 * the (only) I/O queue, so reserve the first 32 tags of the I/O queue. 4283 */ 4284 if (ctrl->quirks & NVME_QUIRK_SHARED_TAGS) 4285 set->reserved_tags = NVME_AQ_DEPTH; 4286 else if (ctrl->ops->flags & NVME_F_FABRICS) 4287 set->reserved_tags = NVMF_RESERVED_TAGS; 4288 set->numa_node = ctrl->numa_node; 4289 set->flags = BLK_MQ_F_SHOULD_MERGE; 4290 if (ctrl->ops->flags & NVME_F_BLOCKING) 4291 set->flags |= BLK_MQ_F_BLOCKING; 4292 set->cmd_size = cmd_size, 4293 set->driver_data = ctrl; 4294 set->nr_hw_queues = ctrl->queue_count - 1; 4295 set->timeout = NVME_IO_TIMEOUT; 4296 set->nr_maps = nr_maps; 4297 ret = blk_mq_alloc_tag_set(set); 4298 if (ret) 4299 return ret; 4300 4301 if (ctrl->ops->flags & NVME_F_FABRICS) { 4302 ctrl->connect_q = blk_mq_init_queue(set); 4303 if (IS_ERR(ctrl->connect_q)) { 4304 ret = PTR_ERR(ctrl->connect_q); 4305 goto out_free_tag_set; 4306 } 4307 blk_queue_flag_set(QUEUE_FLAG_SKIP_TAGSET_QUIESCE, 4308 ctrl->connect_q); 4309 } 4310 4311 ctrl->tagset = set; 4312 return 0; 4313 4314 out_free_tag_set: 4315 blk_mq_free_tag_set(set); 4316 ctrl->connect_q = NULL; 4317 return ret; 4318 } 4319 EXPORT_SYMBOL_GPL(nvme_alloc_io_tag_set); 4320 4321 void nvme_remove_io_tag_set(struct nvme_ctrl *ctrl) 4322 { 4323 if (ctrl->ops->flags & NVME_F_FABRICS) { 4324 blk_mq_destroy_queue(ctrl->connect_q); 4325 blk_put_queue(ctrl->connect_q); 4326 } 4327 blk_mq_free_tag_set(ctrl->tagset); 4328 } 4329 EXPORT_SYMBOL_GPL(nvme_remove_io_tag_set); 4330 4331 void nvme_stop_ctrl(struct nvme_ctrl *ctrl) 4332 { 4333 nvme_mpath_stop(ctrl); 4334 nvme_auth_stop(ctrl); 4335 nvme_stop_keep_alive(ctrl); 4336 nvme_stop_failfast_work(ctrl); 4337 flush_work(&ctrl->async_event_work); 4338 cancel_work_sync(&ctrl->fw_act_work); 4339 if (ctrl->ops->stop_ctrl) 4340 ctrl->ops->stop_ctrl(ctrl); 4341 } 4342 EXPORT_SYMBOL_GPL(nvme_stop_ctrl); 4343 4344 void nvme_start_ctrl(struct nvme_ctrl *ctrl) 4345 { 4346 nvme_start_keep_alive(ctrl); 4347 4348 nvme_enable_aen(ctrl); 4349 4350 /* 4351 * persistent discovery controllers need to send indication to userspace 4352 * to re-read the discovery log page to learn about possible changes 4353 * that were missed. We identify persistent discovery controllers by 4354 * checking that they started once before, hence are reconnecting back. 4355 */ 4356 if (test_bit(NVME_CTRL_STARTED_ONCE, &ctrl->flags) && 4357 nvme_discovery_ctrl(ctrl)) 4358 nvme_change_uevent(ctrl, "NVME_EVENT=rediscover"); 4359 4360 if (ctrl->queue_count > 1) { 4361 nvme_queue_scan(ctrl); 4362 nvme_unquiesce_io_queues(ctrl); 4363 nvme_mpath_update(ctrl); 4364 } 4365 4366 nvme_change_uevent(ctrl, "NVME_EVENT=connected"); 4367 set_bit(NVME_CTRL_STARTED_ONCE, &ctrl->flags); 4368 } 4369 EXPORT_SYMBOL_GPL(nvme_start_ctrl); 4370 4371 void nvme_uninit_ctrl(struct nvme_ctrl *ctrl) 4372 { 4373 nvme_hwmon_exit(ctrl); 4374 nvme_fault_inject_fini(&ctrl->fault_inject); 4375 dev_pm_qos_hide_latency_tolerance(ctrl->device); 4376 cdev_device_del(&ctrl->cdev, ctrl->device); 4377 nvme_put_ctrl(ctrl); 4378 } 4379 EXPORT_SYMBOL_GPL(nvme_uninit_ctrl); 4380 4381 static void nvme_free_cels(struct nvme_ctrl *ctrl) 4382 { 4383 struct nvme_effects_log *cel; 4384 unsigned long i; 4385 4386 xa_for_each(&ctrl->cels, i, cel) { 4387 xa_erase(&ctrl->cels, i); 4388 kfree(cel); 4389 } 4390 4391 xa_destroy(&ctrl->cels); 4392 } 4393 4394 static void nvme_free_ctrl(struct device *dev) 4395 { 4396 struct nvme_ctrl *ctrl = 4397 container_of(dev, struct nvme_ctrl, ctrl_device); 4398 struct nvme_subsystem *subsys = ctrl->subsys; 4399 4400 if (!subsys || ctrl->instance != subsys->instance) 4401 ida_free(&nvme_instance_ida, ctrl->instance); 4402 4403 nvme_free_cels(ctrl); 4404 nvme_mpath_uninit(ctrl); 4405 nvme_auth_stop(ctrl); 4406 nvme_auth_free(ctrl); 4407 __free_page(ctrl->discard_page); 4408 free_opal_dev(ctrl->opal_dev); 4409 4410 if (subsys) { 4411 mutex_lock(&nvme_subsystems_lock); 4412 list_del(&ctrl->subsys_entry); 4413 sysfs_remove_link(&subsys->dev.kobj, dev_name(ctrl->device)); 4414 mutex_unlock(&nvme_subsystems_lock); 4415 } 4416 4417 ctrl->ops->free_ctrl(ctrl); 4418 4419 if (subsys) 4420 nvme_put_subsystem(subsys); 4421 } 4422 4423 /* 4424 * Initialize a NVMe controller structures. This needs to be called during 4425 * earliest initialization so that we have the initialized structured around 4426 * during probing. 4427 */ 4428 int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev, 4429 const struct nvme_ctrl_ops *ops, unsigned long quirks) 4430 { 4431 int ret; 4432 4433 ctrl->state = NVME_CTRL_NEW; 4434 clear_bit(NVME_CTRL_FAILFAST_EXPIRED, &ctrl->flags); 4435 spin_lock_init(&ctrl->lock); 4436 mutex_init(&ctrl->scan_lock); 4437 INIT_LIST_HEAD(&ctrl->namespaces); 4438 xa_init(&ctrl->cels); 4439 init_rwsem(&ctrl->namespaces_rwsem); 4440 ctrl->dev = dev; 4441 ctrl->ops = ops; 4442 ctrl->quirks = quirks; 4443 ctrl->numa_node = NUMA_NO_NODE; 4444 INIT_WORK(&ctrl->scan_work, nvme_scan_work); 4445 INIT_WORK(&ctrl->async_event_work, nvme_async_event_work); 4446 INIT_WORK(&ctrl->fw_act_work, nvme_fw_act_work); 4447 INIT_WORK(&ctrl->delete_work, nvme_delete_ctrl_work); 4448 init_waitqueue_head(&ctrl->state_wq); 4449 4450 INIT_DELAYED_WORK(&ctrl->ka_work, nvme_keep_alive_work); 4451 INIT_DELAYED_WORK(&ctrl->failfast_work, nvme_failfast_work); 4452 memset(&ctrl->ka_cmd, 0, sizeof(ctrl->ka_cmd)); 4453 ctrl->ka_cmd.common.opcode = nvme_admin_keep_alive; 4454 4455 BUILD_BUG_ON(NVME_DSM_MAX_RANGES * sizeof(struct nvme_dsm_range) > 4456 PAGE_SIZE); 4457 ctrl->discard_page = alloc_page(GFP_KERNEL); 4458 if (!ctrl->discard_page) { 4459 ret = -ENOMEM; 4460 goto out; 4461 } 4462 4463 ret = ida_alloc(&nvme_instance_ida, GFP_KERNEL); 4464 if (ret < 0) 4465 goto out; 4466 ctrl->instance = ret; 4467 4468 device_initialize(&ctrl->ctrl_device); 4469 ctrl->device = &ctrl->ctrl_device; 4470 ctrl->device->devt = MKDEV(MAJOR(nvme_ctrl_base_chr_devt), 4471 ctrl->instance); 4472 ctrl->device->class = nvme_class; 4473 ctrl->device->parent = ctrl->dev; 4474 if (ops->dev_attr_groups) 4475 ctrl->device->groups = ops->dev_attr_groups; 4476 else 4477 ctrl->device->groups = nvme_dev_attr_groups; 4478 ctrl->device->release = nvme_free_ctrl; 4479 dev_set_drvdata(ctrl->device, ctrl); 4480 ret = dev_set_name(ctrl->device, "nvme%d", ctrl->instance); 4481 if (ret) 4482 goto out_release_instance; 4483 4484 nvme_get_ctrl(ctrl); 4485 cdev_init(&ctrl->cdev, &nvme_dev_fops); 4486 ctrl->cdev.owner = ops->module; 4487 ret = cdev_device_add(&ctrl->cdev, ctrl->device); 4488 if (ret) 4489 goto out_free_name; 4490 4491 /* 4492 * Initialize latency tolerance controls. The sysfs files won't 4493 * be visible to userspace unless the device actually supports APST. 4494 */ 4495 ctrl->device->power.set_latency_tolerance = nvme_set_latency_tolerance; 4496 dev_pm_qos_update_user_latency_tolerance(ctrl->device, 4497 min(default_ps_max_latency_us, (unsigned long)S32_MAX)); 4498 4499 nvme_fault_inject_init(&ctrl->fault_inject, dev_name(ctrl->device)); 4500 nvme_mpath_init_ctrl(ctrl); 4501 ret = nvme_auth_init_ctrl(ctrl); 4502 if (ret) 4503 goto out_free_cdev; 4504 4505 return 0; 4506 out_free_cdev: 4507 nvme_fault_inject_fini(&ctrl->fault_inject); 4508 dev_pm_qos_hide_latency_tolerance(ctrl->device); 4509 cdev_device_del(&ctrl->cdev, ctrl->device); 4510 out_free_name: 4511 nvme_put_ctrl(ctrl); 4512 kfree_const(ctrl->device->kobj.name); 4513 out_release_instance: 4514 ida_free(&nvme_instance_ida, ctrl->instance); 4515 out: 4516 if (ctrl->discard_page) 4517 __free_page(ctrl->discard_page); 4518 return ret; 4519 } 4520 EXPORT_SYMBOL_GPL(nvme_init_ctrl); 4521 4522 /* let I/O to all namespaces fail in preparation for surprise removal */ 4523 void nvme_mark_namespaces_dead(struct nvme_ctrl *ctrl) 4524 { 4525 struct nvme_ns *ns; 4526 4527 down_read(&ctrl->namespaces_rwsem); 4528 list_for_each_entry(ns, &ctrl->namespaces, list) 4529 blk_mark_disk_dead(ns->disk); 4530 up_read(&ctrl->namespaces_rwsem); 4531 } 4532 EXPORT_SYMBOL_GPL(nvme_mark_namespaces_dead); 4533 4534 void nvme_unfreeze(struct nvme_ctrl *ctrl) 4535 { 4536 struct nvme_ns *ns; 4537 4538 down_read(&ctrl->namespaces_rwsem); 4539 list_for_each_entry(ns, &ctrl->namespaces, list) 4540 blk_mq_unfreeze_queue(ns->queue); 4541 up_read(&ctrl->namespaces_rwsem); 4542 } 4543 EXPORT_SYMBOL_GPL(nvme_unfreeze); 4544 4545 int nvme_wait_freeze_timeout(struct nvme_ctrl *ctrl, long timeout) 4546 { 4547 struct nvme_ns *ns; 4548 4549 down_read(&ctrl->namespaces_rwsem); 4550 list_for_each_entry(ns, &ctrl->namespaces, list) { 4551 timeout = blk_mq_freeze_queue_wait_timeout(ns->queue, timeout); 4552 if (timeout <= 0) 4553 break; 4554 } 4555 up_read(&ctrl->namespaces_rwsem); 4556 return timeout; 4557 } 4558 EXPORT_SYMBOL_GPL(nvme_wait_freeze_timeout); 4559 4560 void nvme_wait_freeze(struct nvme_ctrl *ctrl) 4561 { 4562 struct nvme_ns *ns; 4563 4564 down_read(&ctrl->namespaces_rwsem); 4565 list_for_each_entry(ns, &ctrl->namespaces, list) 4566 blk_mq_freeze_queue_wait(ns->queue); 4567 up_read(&ctrl->namespaces_rwsem); 4568 } 4569 EXPORT_SYMBOL_GPL(nvme_wait_freeze); 4570 4571 void nvme_start_freeze(struct nvme_ctrl *ctrl) 4572 { 4573 struct nvme_ns *ns; 4574 4575 down_read(&ctrl->namespaces_rwsem); 4576 list_for_each_entry(ns, &ctrl->namespaces, list) 4577 blk_freeze_queue_start(ns->queue); 4578 up_read(&ctrl->namespaces_rwsem); 4579 } 4580 EXPORT_SYMBOL_GPL(nvme_start_freeze); 4581 4582 void nvme_quiesce_io_queues(struct nvme_ctrl *ctrl) 4583 { 4584 if (!ctrl->tagset) 4585 return; 4586 if (!test_and_set_bit(NVME_CTRL_STOPPED, &ctrl->flags)) 4587 blk_mq_quiesce_tagset(ctrl->tagset); 4588 else 4589 blk_mq_wait_quiesce_done(ctrl->tagset); 4590 } 4591 EXPORT_SYMBOL_GPL(nvme_quiesce_io_queues); 4592 4593 void nvme_unquiesce_io_queues(struct nvme_ctrl *ctrl) 4594 { 4595 if (!ctrl->tagset) 4596 return; 4597 if (test_and_clear_bit(NVME_CTRL_STOPPED, &ctrl->flags)) 4598 blk_mq_unquiesce_tagset(ctrl->tagset); 4599 } 4600 EXPORT_SYMBOL_GPL(nvme_unquiesce_io_queues); 4601 4602 void nvme_quiesce_admin_queue(struct nvme_ctrl *ctrl) 4603 { 4604 if (!test_and_set_bit(NVME_CTRL_ADMIN_Q_STOPPED, &ctrl->flags)) 4605 blk_mq_quiesce_queue(ctrl->admin_q); 4606 else 4607 blk_mq_wait_quiesce_done(ctrl->admin_q->tag_set); 4608 } 4609 EXPORT_SYMBOL_GPL(nvme_quiesce_admin_queue); 4610 4611 void nvme_unquiesce_admin_queue(struct nvme_ctrl *ctrl) 4612 { 4613 if (test_and_clear_bit(NVME_CTRL_ADMIN_Q_STOPPED, &ctrl->flags)) 4614 blk_mq_unquiesce_queue(ctrl->admin_q); 4615 } 4616 EXPORT_SYMBOL_GPL(nvme_unquiesce_admin_queue); 4617 4618 void nvme_sync_io_queues(struct nvme_ctrl *ctrl) 4619 { 4620 struct nvme_ns *ns; 4621 4622 down_read(&ctrl->namespaces_rwsem); 4623 list_for_each_entry(ns, &ctrl->namespaces, list) 4624 blk_sync_queue(ns->queue); 4625 up_read(&ctrl->namespaces_rwsem); 4626 } 4627 EXPORT_SYMBOL_GPL(nvme_sync_io_queues); 4628 4629 void nvme_sync_queues(struct nvme_ctrl *ctrl) 4630 { 4631 nvme_sync_io_queues(ctrl); 4632 if (ctrl->admin_q) 4633 blk_sync_queue(ctrl->admin_q); 4634 } 4635 EXPORT_SYMBOL_GPL(nvme_sync_queues); 4636 4637 struct nvme_ctrl *nvme_ctrl_from_file(struct file *file) 4638 { 4639 if (file->f_op != &nvme_dev_fops) 4640 return NULL; 4641 return file->private_data; 4642 } 4643 EXPORT_SYMBOL_NS_GPL(nvme_ctrl_from_file, NVME_TARGET_PASSTHRU); 4644 4645 /* 4646 * Check we didn't inadvertently grow the command structure sizes: 4647 */ 4648 static inline void _nvme_check_size(void) 4649 { 4650 BUILD_BUG_ON(sizeof(struct nvme_common_command) != 64); 4651 BUILD_BUG_ON(sizeof(struct nvme_rw_command) != 64); 4652 BUILD_BUG_ON(sizeof(struct nvme_identify) != 64); 4653 BUILD_BUG_ON(sizeof(struct nvme_features) != 64); 4654 BUILD_BUG_ON(sizeof(struct nvme_download_firmware) != 64); 4655 BUILD_BUG_ON(sizeof(struct nvme_format_cmd) != 64); 4656 BUILD_BUG_ON(sizeof(struct nvme_dsm_cmd) != 64); 4657 BUILD_BUG_ON(sizeof(struct nvme_write_zeroes_cmd) != 64); 4658 BUILD_BUG_ON(sizeof(struct nvme_abort_cmd) != 64); 4659 BUILD_BUG_ON(sizeof(struct nvme_get_log_page_command) != 64); 4660 BUILD_BUG_ON(sizeof(struct nvme_command) != 64); 4661 BUILD_BUG_ON(sizeof(struct nvme_id_ctrl) != NVME_IDENTIFY_DATA_SIZE); 4662 BUILD_BUG_ON(sizeof(struct nvme_id_ns) != NVME_IDENTIFY_DATA_SIZE); 4663 BUILD_BUG_ON(sizeof(struct nvme_id_ns_cs_indep) != 4664 NVME_IDENTIFY_DATA_SIZE); 4665 BUILD_BUG_ON(sizeof(struct nvme_id_ns_zns) != NVME_IDENTIFY_DATA_SIZE); 4666 BUILD_BUG_ON(sizeof(struct nvme_id_ns_nvm) != NVME_IDENTIFY_DATA_SIZE); 4667 BUILD_BUG_ON(sizeof(struct nvme_id_ctrl_zns) != NVME_IDENTIFY_DATA_SIZE); 4668 BUILD_BUG_ON(sizeof(struct nvme_id_ctrl_nvm) != NVME_IDENTIFY_DATA_SIZE); 4669 BUILD_BUG_ON(sizeof(struct nvme_lba_range_type) != 64); 4670 BUILD_BUG_ON(sizeof(struct nvme_smart_log) != 512); 4671 BUILD_BUG_ON(sizeof(struct nvme_dbbuf) != 64); 4672 BUILD_BUG_ON(sizeof(struct nvme_directive_cmd) != 64); 4673 BUILD_BUG_ON(sizeof(struct nvme_feat_host_behavior) != 512); 4674 } 4675 4676 4677 static int __init nvme_core_init(void) 4678 { 4679 int result = -ENOMEM; 4680 4681 _nvme_check_size(); 4682 4683 nvme_wq = alloc_workqueue("nvme-wq", 4684 WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0); 4685 if (!nvme_wq) 4686 goto out; 4687 4688 nvme_reset_wq = alloc_workqueue("nvme-reset-wq", 4689 WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0); 4690 if (!nvme_reset_wq) 4691 goto destroy_wq; 4692 4693 nvme_delete_wq = alloc_workqueue("nvme-delete-wq", 4694 WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0); 4695 if (!nvme_delete_wq) 4696 goto destroy_reset_wq; 4697 4698 result = alloc_chrdev_region(&nvme_ctrl_base_chr_devt, 0, 4699 NVME_MINORS, "nvme"); 4700 if (result < 0) 4701 goto destroy_delete_wq; 4702 4703 nvme_class = class_create("nvme"); 4704 if (IS_ERR(nvme_class)) { 4705 result = PTR_ERR(nvme_class); 4706 goto unregister_chrdev; 4707 } 4708 nvme_class->dev_uevent = nvme_class_uevent; 4709 4710 nvme_subsys_class = class_create("nvme-subsystem"); 4711 if (IS_ERR(nvme_subsys_class)) { 4712 result = PTR_ERR(nvme_subsys_class); 4713 goto destroy_class; 4714 } 4715 4716 result = alloc_chrdev_region(&nvme_ns_chr_devt, 0, NVME_MINORS, 4717 "nvme-generic"); 4718 if (result < 0) 4719 goto destroy_subsys_class; 4720 4721 nvme_ns_chr_class = class_create("nvme-generic"); 4722 if (IS_ERR(nvme_ns_chr_class)) { 4723 result = PTR_ERR(nvme_ns_chr_class); 4724 goto unregister_generic_ns; 4725 } 4726 4727 result = nvme_init_auth(); 4728 if (result) 4729 goto destroy_ns_chr; 4730 return 0; 4731 4732 destroy_ns_chr: 4733 class_destroy(nvme_ns_chr_class); 4734 unregister_generic_ns: 4735 unregister_chrdev_region(nvme_ns_chr_devt, NVME_MINORS); 4736 destroy_subsys_class: 4737 class_destroy(nvme_subsys_class); 4738 destroy_class: 4739 class_destroy(nvme_class); 4740 unregister_chrdev: 4741 unregister_chrdev_region(nvme_ctrl_base_chr_devt, NVME_MINORS); 4742 destroy_delete_wq: 4743 destroy_workqueue(nvme_delete_wq); 4744 destroy_reset_wq: 4745 destroy_workqueue(nvme_reset_wq); 4746 destroy_wq: 4747 destroy_workqueue(nvme_wq); 4748 out: 4749 return result; 4750 } 4751 4752 static void __exit nvme_core_exit(void) 4753 { 4754 nvme_exit_auth(); 4755 class_destroy(nvme_ns_chr_class); 4756 class_destroy(nvme_subsys_class); 4757 class_destroy(nvme_class); 4758 unregister_chrdev_region(nvme_ns_chr_devt, NVME_MINORS); 4759 unregister_chrdev_region(nvme_ctrl_base_chr_devt, NVME_MINORS); 4760 destroy_workqueue(nvme_delete_wq); 4761 destroy_workqueue(nvme_reset_wq); 4762 destroy_workqueue(nvme_wq); 4763 ida_destroy(&nvme_ns_chr_minor_ida); 4764 ida_destroy(&nvme_instance_ida); 4765 } 4766 4767 MODULE_LICENSE("GPL"); 4768 MODULE_VERSION("1.0"); 4769 module_init(nvme_core_init); 4770 module_exit(nvme_core_exit); 4771