1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * NVM Express device driver 4 * Copyright (c) 2011-2014, Intel Corporation. 5 */ 6 7 #include <linux/blkdev.h> 8 #include <linux/blk-mq.h> 9 #include <linux/blk-integrity.h> 10 #include <linux/compat.h> 11 #include <linux/delay.h> 12 #include <linux/errno.h> 13 #include <linux/hdreg.h> 14 #include <linux/kernel.h> 15 #include <linux/module.h> 16 #include <linux/backing-dev.h> 17 #include <linux/slab.h> 18 #include <linux/types.h> 19 #include <linux/pr.h> 20 #include <linux/ptrace.h> 21 #include <linux/nvme_ioctl.h> 22 #include <linux/pm_qos.h> 23 #include <linux/ratelimit.h> 24 #include <asm/unaligned.h> 25 26 #include "nvme.h" 27 #include "fabrics.h" 28 #include <linux/nvme-auth.h> 29 30 #define CREATE_TRACE_POINTS 31 #include "trace.h" 32 33 #define NVME_MINORS (1U << MINORBITS) 34 35 struct nvme_ns_info { 36 struct nvme_ns_ids ids; 37 u32 nsid; 38 __le32 anagrpid; 39 bool is_shared; 40 bool is_readonly; 41 bool is_ready; 42 bool is_removed; 43 }; 44 45 unsigned int admin_timeout = 60; 46 module_param(admin_timeout, uint, 0644); 47 MODULE_PARM_DESC(admin_timeout, "timeout in seconds for admin commands"); 48 EXPORT_SYMBOL_GPL(admin_timeout); 49 50 unsigned int nvme_io_timeout = 30; 51 module_param_named(io_timeout, nvme_io_timeout, uint, 0644); 52 MODULE_PARM_DESC(io_timeout, "timeout in seconds for I/O"); 53 EXPORT_SYMBOL_GPL(nvme_io_timeout); 54 55 static unsigned char shutdown_timeout = 5; 56 module_param(shutdown_timeout, byte, 0644); 57 MODULE_PARM_DESC(shutdown_timeout, "timeout in seconds for controller shutdown"); 58 59 static u8 nvme_max_retries = 5; 60 module_param_named(max_retries, nvme_max_retries, byte, 0644); 61 MODULE_PARM_DESC(max_retries, "max number of retries a command may have"); 62 63 static unsigned long default_ps_max_latency_us = 100000; 64 module_param(default_ps_max_latency_us, ulong, 0644); 65 MODULE_PARM_DESC(default_ps_max_latency_us, 66 "max power saving latency for new devices; use PM QOS to change per device"); 67 68 static bool force_apst; 69 module_param(force_apst, bool, 0644); 70 MODULE_PARM_DESC(force_apst, "allow APST for newly enumerated devices even if quirked off"); 71 72 static unsigned long apst_primary_timeout_ms = 100; 73 module_param(apst_primary_timeout_ms, ulong, 0644); 74 MODULE_PARM_DESC(apst_primary_timeout_ms, 75 "primary APST timeout in ms"); 76 77 static unsigned long apst_secondary_timeout_ms = 2000; 78 module_param(apst_secondary_timeout_ms, ulong, 0644); 79 MODULE_PARM_DESC(apst_secondary_timeout_ms, 80 "secondary APST timeout in ms"); 81 82 static unsigned long apst_primary_latency_tol_us = 15000; 83 module_param(apst_primary_latency_tol_us, ulong, 0644); 84 MODULE_PARM_DESC(apst_primary_latency_tol_us, 85 "primary APST latency tolerance in us"); 86 87 static unsigned long apst_secondary_latency_tol_us = 100000; 88 module_param(apst_secondary_latency_tol_us, ulong, 0644); 89 MODULE_PARM_DESC(apst_secondary_latency_tol_us, 90 "secondary APST latency tolerance in us"); 91 92 /* 93 * nvme_wq - hosts nvme related works that are not reset or delete 94 * nvme_reset_wq - hosts nvme reset works 95 * nvme_delete_wq - hosts nvme delete works 96 * 97 * nvme_wq will host works such as scan, aen handling, fw activation, 98 * keep-alive, periodic reconnects etc. nvme_reset_wq 99 * runs reset works which also flush works hosted on nvme_wq for 100 * serialization purposes. nvme_delete_wq host controller deletion 101 * works which flush reset works for serialization. 102 */ 103 struct workqueue_struct *nvme_wq; 104 EXPORT_SYMBOL_GPL(nvme_wq); 105 106 struct workqueue_struct *nvme_reset_wq; 107 EXPORT_SYMBOL_GPL(nvme_reset_wq); 108 109 struct workqueue_struct *nvme_delete_wq; 110 EXPORT_SYMBOL_GPL(nvme_delete_wq); 111 112 static LIST_HEAD(nvme_subsystems); 113 static DEFINE_MUTEX(nvme_subsystems_lock); 114 115 static DEFINE_IDA(nvme_instance_ida); 116 static dev_t nvme_ctrl_base_chr_devt; 117 static int nvme_class_uevent(const struct device *dev, struct kobj_uevent_env *env); 118 static const struct class nvme_class = { 119 .name = "nvme", 120 .dev_uevent = nvme_class_uevent, 121 }; 122 123 static const struct class nvme_subsys_class = { 124 .name = "nvme-subsystem", 125 }; 126 127 static DEFINE_IDA(nvme_ns_chr_minor_ida); 128 static dev_t nvme_ns_chr_devt; 129 static const struct class nvme_ns_chr_class = { 130 .name = "nvme-generic", 131 }; 132 133 static void nvme_put_subsystem(struct nvme_subsystem *subsys); 134 static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl, 135 unsigned nsid); 136 static void nvme_update_keep_alive(struct nvme_ctrl *ctrl, 137 struct nvme_command *cmd); 138 139 void nvme_queue_scan(struct nvme_ctrl *ctrl) 140 { 141 /* 142 * Only new queue scan work when admin and IO queues are both alive 143 */ 144 if (nvme_ctrl_state(ctrl) == NVME_CTRL_LIVE && ctrl->tagset) 145 queue_work(nvme_wq, &ctrl->scan_work); 146 } 147 148 /* 149 * Use this function to proceed with scheduling reset_work for a controller 150 * that had previously been set to the resetting state. This is intended for 151 * code paths that can't be interrupted by other reset attempts. A hot removal 152 * may prevent this from succeeding. 153 */ 154 int nvme_try_sched_reset(struct nvme_ctrl *ctrl) 155 { 156 if (nvme_ctrl_state(ctrl) != NVME_CTRL_RESETTING) 157 return -EBUSY; 158 if (!queue_work(nvme_reset_wq, &ctrl->reset_work)) 159 return -EBUSY; 160 return 0; 161 } 162 EXPORT_SYMBOL_GPL(nvme_try_sched_reset); 163 164 static void nvme_failfast_work(struct work_struct *work) 165 { 166 struct nvme_ctrl *ctrl = container_of(to_delayed_work(work), 167 struct nvme_ctrl, failfast_work); 168 169 if (nvme_ctrl_state(ctrl) != NVME_CTRL_CONNECTING) 170 return; 171 172 set_bit(NVME_CTRL_FAILFAST_EXPIRED, &ctrl->flags); 173 dev_info(ctrl->device, "failfast expired\n"); 174 nvme_kick_requeue_lists(ctrl); 175 } 176 177 static inline void nvme_start_failfast_work(struct nvme_ctrl *ctrl) 178 { 179 if (!ctrl->opts || ctrl->opts->fast_io_fail_tmo == -1) 180 return; 181 182 schedule_delayed_work(&ctrl->failfast_work, 183 ctrl->opts->fast_io_fail_tmo * HZ); 184 } 185 186 static inline void nvme_stop_failfast_work(struct nvme_ctrl *ctrl) 187 { 188 if (!ctrl->opts) 189 return; 190 191 cancel_delayed_work_sync(&ctrl->failfast_work); 192 clear_bit(NVME_CTRL_FAILFAST_EXPIRED, &ctrl->flags); 193 } 194 195 196 int nvme_reset_ctrl(struct nvme_ctrl *ctrl) 197 { 198 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING)) 199 return -EBUSY; 200 if (!queue_work(nvme_reset_wq, &ctrl->reset_work)) 201 return -EBUSY; 202 return 0; 203 } 204 EXPORT_SYMBOL_GPL(nvme_reset_ctrl); 205 206 int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl) 207 { 208 int ret; 209 210 ret = nvme_reset_ctrl(ctrl); 211 if (!ret) { 212 flush_work(&ctrl->reset_work); 213 if (nvme_ctrl_state(ctrl) != NVME_CTRL_LIVE) 214 ret = -ENETRESET; 215 } 216 217 return ret; 218 } 219 220 static void nvme_do_delete_ctrl(struct nvme_ctrl *ctrl) 221 { 222 dev_info(ctrl->device, 223 "Removing ctrl: NQN \"%s\"\n", nvmf_ctrl_subsysnqn(ctrl)); 224 225 flush_work(&ctrl->reset_work); 226 nvme_stop_ctrl(ctrl); 227 nvme_remove_namespaces(ctrl); 228 ctrl->ops->delete_ctrl(ctrl); 229 nvme_uninit_ctrl(ctrl); 230 } 231 232 static void nvme_delete_ctrl_work(struct work_struct *work) 233 { 234 struct nvme_ctrl *ctrl = 235 container_of(work, struct nvme_ctrl, delete_work); 236 237 nvme_do_delete_ctrl(ctrl); 238 } 239 240 int nvme_delete_ctrl(struct nvme_ctrl *ctrl) 241 { 242 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_DELETING)) 243 return -EBUSY; 244 if (!queue_work(nvme_delete_wq, &ctrl->delete_work)) 245 return -EBUSY; 246 return 0; 247 } 248 EXPORT_SYMBOL_GPL(nvme_delete_ctrl); 249 250 void nvme_delete_ctrl_sync(struct nvme_ctrl *ctrl) 251 { 252 /* 253 * Keep a reference until nvme_do_delete_ctrl() complete, 254 * since ->delete_ctrl can free the controller. 255 */ 256 nvme_get_ctrl(ctrl); 257 if (nvme_change_ctrl_state(ctrl, NVME_CTRL_DELETING)) 258 nvme_do_delete_ctrl(ctrl); 259 nvme_put_ctrl(ctrl); 260 } 261 262 static blk_status_t nvme_error_status(u16 status) 263 { 264 switch (status & 0x7ff) { 265 case NVME_SC_SUCCESS: 266 return BLK_STS_OK; 267 case NVME_SC_CAP_EXCEEDED: 268 return BLK_STS_NOSPC; 269 case NVME_SC_LBA_RANGE: 270 case NVME_SC_CMD_INTERRUPTED: 271 case NVME_SC_NS_NOT_READY: 272 return BLK_STS_TARGET; 273 case NVME_SC_BAD_ATTRIBUTES: 274 case NVME_SC_ONCS_NOT_SUPPORTED: 275 case NVME_SC_INVALID_OPCODE: 276 case NVME_SC_INVALID_FIELD: 277 case NVME_SC_INVALID_NS: 278 return BLK_STS_NOTSUPP; 279 case NVME_SC_WRITE_FAULT: 280 case NVME_SC_READ_ERROR: 281 case NVME_SC_UNWRITTEN_BLOCK: 282 case NVME_SC_ACCESS_DENIED: 283 case NVME_SC_READ_ONLY: 284 case NVME_SC_COMPARE_FAILED: 285 return BLK_STS_MEDIUM; 286 case NVME_SC_GUARD_CHECK: 287 case NVME_SC_APPTAG_CHECK: 288 case NVME_SC_REFTAG_CHECK: 289 case NVME_SC_INVALID_PI: 290 return BLK_STS_PROTECTION; 291 case NVME_SC_RESERVATION_CONFLICT: 292 return BLK_STS_RESV_CONFLICT; 293 case NVME_SC_HOST_PATH_ERROR: 294 return BLK_STS_TRANSPORT; 295 case NVME_SC_ZONE_TOO_MANY_ACTIVE: 296 return BLK_STS_ZONE_ACTIVE_RESOURCE; 297 case NVME_SC_ZONE_TOO_MANY_OPEN: 298 return BLK_STS_ZONE_OPEN_RESOURCE; 299 default: 300 return BLK_STS_IOERR; 301 } 302 } 303 304 static void nvme_retry_req(struct request *req) 305 { 306 unsigned long delay = 0; 307 u16 crd; 308 309 /* The mask and shift result must be <= 3 */ 310 crd = (nvme_req(req)->status & NVME_SC_CRD) >> 11; 311 if (crd) 312 delay = nvme_req(req)->ctrl->crdt[crd - 1] * 100; 313 314 nvme_req(req)->retries++; 315 blk_mq_requeue_request(req, false); 316 blk_mq_delay_kick_requeue_list(req->q, delay); 317 } 318 319 static void nvme_log_error(struct request *req) 320 { 321 struct nvme_ns *ns = req->q->queuedata; 322 struct nvme_request *nr = nvme_req(req); 323 324 if (ns) { 325 pr_err_ratelimited("%s: %s(0x%x) @ LBA %llu, %u blocks, %s (sct 0x%x / sc 0x%x) %s%s\n", 326 ns->disk ? ns->disk->disk_name : "?", 327 nvme_get_opcode_str(nr->cmd->common.opcode), 328 nr->cmd->common.opcode, 329 nvme_sect_to_lba(ns->head, blk_rq_pos(req)), 330 blk_rq_bytes(req) >> ns->head->lba_shift, 331 nvme_get_error_status_str(nr->status), 332 nr->status >> 8 & 7, /* Status Code Type */ 333 nr->status & 0xff, /* Status Code */ 334 nr->status & NVME_SC_MORE ? "MORE " : "", 335 nr->status & NVME_SC_DNR ? "DNR " : ""); 336 return; 337 } 338 339 pr_err_ratelimited("%s: %s(0x%x), %s (sct 0x%x / sc 0x%x) %s%s\n", 340 dev_name(nr->ctrl->device), 341 nvme_get_admin_opcode_str(nr->cmd->common.opcode), 342 nr->cmd->common.opcode, 343 nvme_get_error_status_str(nr->status), 344 nr->status >> 8 & 7, /* Status Code Type */ 345 nr->status & 0xff, /* Status Code */ 346 nr->status & NVME_SC_MORE ? "MORE " : "", 347 nr->status & NVME_SC_DNR ? "DNR " : ""); 348 } 349 350 static void nvme_log_err_passthru(struct request *req) 351 { 352 struct nvme_ns *ns = req->q->queuedata; 353 struct nvme_request *nr = nvme_req(req); 354 355 pr_err_ratelimited("%s: %s(0x%x), %s (sct 0x%x / sc 0x%x) %s%s" 356 "cdw10=0x%x cdw11=0x%x cdw12=0x%x cdw13=0x%x cdw14=0x%x cdw15=0x%x\n", 357 ns ? ns->disk->disk_name : dev_name(nr->ctrl->device), 358 ns ? nvme_get_opcode_str(nr->cmd->common.opcode) : 359 nvme_get_admin_opcode_str(nr->cmd->common.opcode), 360 nr->cmd->common.opcode, 361 nvme_get_error_status_str(nr->status), 362 nr->status >> 8 & 7, /* Status Code Type */ 363 nr->status & 0xff, /* Status Code */ 364 nr->status & NVME_SC_MORE ? "MORE " : "", 365 nr->status & NVME_SC_DNR ? "DNR " : "", 366 nr->cmd->common.cdw10, 367 nr->cmd->common.cdw11, 368 nr->cmd->common.cdw12, 369 nr->cmd->common.cdw13, 370 nr->cmd->common.cdw14, 371 nr->cmd->common.cdw14); 372 } 373 374 enum nvme_disposition { 375 COMPLETE, 376 RETRY, 377 FAILOVER, 378 AUTHENTICATE, 379 }; 380 381 static inline enum nvme_disposition nvme_decide_disposition(struct request *req) 382 { 383 if (likely(nvme_req(req)->status == 0)) 384 return COMPLETE; 385 386 if ((nvme_req(req)->status & 0x7ff) == NVME_SC_AUTH_REQUIRED) 387 return AUTHENTICATE; 388 389 if (blk_noretry_request(req) || 390 (nvme_req(req)->status & NVME_SC_DNR) || 391 nvme_req(req)->retries >= nvme_max_retries) 392 return COMPLETE; 393 394 if (req->cmd_flags & REQ_NVME_MPATH) { 395 if (nvme_is_path_error(nvme_req(req)->status) || 396 blk_queue_dying(req->q)) 397 return FAILOVER; 398 } else { 399 if (blk_queue_dying(req->q)) 400 return COMPLETE; 401 } 402 403 return RETRY; 404 } 405 406 static inline void nvme_end_req_zoned(struct request *req) 407 { 408 if (IS_ENABLED(CONFIG_BLK_DEV_ZONED) && 409 req_op(req) == REQ_OP_ZONE_APPEND) { 410 struct nvme_ns *ns = req->q->queuedata; 411 412 req->__sector = nvme_lba_to_sect(ns->head, 413 le64_to_cpu(nvme_req(req)->result.u64)); 414 } 415 } 416 417 static inline void nvme_end_req(struct request *req) 418 { 419 blk_status_t status = nvme_error_status(nvme_req(req)->status); 420 421 if (unlikely(nvme_req(req)->status && !(req->rq_flags & RQF_QUIET))) { 422 if (blk_rq_is_passthrough(req)) 423 nvme_log_err_passthru(req); 424 else 425 nvme_log_error(req); 426 } 427 nvme_end_req_zoned(req); 428 nvme_trace_bio_complete(req); 429 if (req->cmd_flags & REQ_NVME_MPATH) 430 nvme_mpath_end_request(req); 431 blk_mq_end_request(req, status); 432 } 433 434 void nvme_complete_rq(struct request *req) 435 { 436 struct nvme_ctrl *ctrl = nvme_req(req)->ctrl; 437 438 trace_nvme_complete_rq(req); 439 nvme_cleanup_cmd(req); 440 441 /* 442 * Completions of long-running commands should not be able to 443 * defer sending of periodic keep alives, since the controller 444 * may have completed processing such commands a long time ago 445 * (arbitrarily close to command submission time). 446 * req->deadline - req->timeout is the command submission time 447 * in jiffies. 448 */ 449 if (ctrl->kas && 450 req->deadline - req->timeout >= ctrl->ka_last_check_time) 451 ctrl->comp_seen = true; 452 453 switch (nvme_decide_disposition(req)) { 454 case COMPLETE: 455 nvme_end_req(req); 456 return; 457 case RETRY: 458 nvme_retry_req(req); 459 return; 460 case FAILOVER: 461 nvme_failover_req(req); 462 return; 463 case AUTHENTICATE: 464 #ifdef CONFIG_NVME_HOST_AUTH 465 queue_work(nvme_wq, &ctrl->dhchap_auth_work); 466 nvme_retry_req(req); 467 #else 468 nvme_end_req(req); 469 #endif 470 return; 471 } 472 } 473 EXPORT_SYMBOL_GPL(nvme_complete_rq); 474 475 void nvme_complete_batch_req(struct request *req) 476 { 477 trace_nvme_complete_rq(req); 478 nvme_cleanup_cmd(req); 479 nvme_end_req_zoned(req); 480 } 481 EXPORT_SYMBOL_GPL(nvme_complete_batch_req); 482 483 /* 484 * Called to unwind from ->queue_rq on a failed command submission so that the 485 * multipathing code gets called to potentially failover to another path. 486 * The caller needs to unwind all transport specific resource allocations and 487 * must return propagate the return value. 488 */ 489 blk_status_t nvme_host_path_error(struct request *req) 490 { 491 nvme_req(req)->status = NVME_SC_HOST_PATH_ERROR; 492 blk_mq_set_request_complete(req); 493 nvme_complete_rq(req); 494 return BLK_STS_OK; 495 } 496 EXPORT_SYMBOL_GPL(nvme_host_path_error); 497 498 bool nvme_cancel_request(struct request *req, void *data) 499 { 500 dev_dbg_ratelimited(((struct nvme_ctrl *) data)->device, 501 "Cancelling I/O %d", req->tag); 502 503 /* don't abort one completed or idle request */ 504 if (blk_mq_rq_state(req) != MQ_RQ_IN_FLIGHT) 505 return true; 506 507 nvme_req(req)->status = NVME_SC_HOST_ABORTED_CMD; 508 nvme_req(req)->flags |= NVME_REQ_CANCELLED; 509 blk_mq_complete_request(req); 510 return true; 511 } 512 EXPORT_SYMBOL_GPL(nvme_cancel_request); 513 514 void nvme_cancel_tagset(struct nvme_ctrl *ctrl) 515 { 516 if (ctrl->tagset) { 517 blk_mq_tagset_busy_iter(ctrl->tagset, 518 nvme_cancel_request, ctrl); 519 blk_mq_tagset_wait_completed_request(ctrl->tagset); 520 } 521 } 522 EXPORT_SYMBOL_GPL(nvme_cancel_tagset); 523 524 void nvme_cancel_admin_tagset(struct nvme_ctrl *ctrl) 525 { 526 if (ctrl->admin_tagset) { 527 blk_mq_tagset_busy_iter(ctrl->admin_tagset, 528 nvme_cancel_request, ctrl); 529 blk_mq_tagset_wait_completed_request(ctrl->admin_tagset); 530 } 531 } 532 EXPORT_SYMBOL_GPL(nvme_cancel_admin_tagset); 533 534 bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl, 535 enum nvme_ctrl_state new_state) 536 { 537 enum nvme_ctrl_state old_state; 538 unsigned long flags; 539 bool changed = false; 540 541 spin_lock_irqsave(&ctrl->lock, flags); 542 543 old_state = nvme_ctrl_state(ctrl); 544 switch (new_state) { 545 case NVME_CTRL_LIVE: 546 switch (old_state) { 547 case NVME_CTRL_NEW: 548 case NVME_CTRL_RESETTING: 549 case NVME_CTRL_CONNECTING: 550 changed = true; 551 fallthrough; 552 default: 553 break; 554 } 555 break; 556 case NVME_CTRL_RESETTING: 557 switch (old_state) { 558 case NVME_CTRL_NEW: 559 case NVME_CTRL_LIVE: 560 changed = true; 561 fallthrough; 562 default: 563 break; 564 } 565 break; 566 case NVME_CTRL_CONNECTING: 567 switch (old_state) { 568 case NVME_CTRL_NEW: 569 case NVME_CTRL_RESETTING: 570 changed = true; 571 fallthrough; 572 default: 573 break; 574 } 575 break; 576 case NVME_CTRL_DELETING: 577 switch (old_state) { 578 case NVME_CTRL_LIVE: 579 case NVME_CTRL_RESETTING: 580 case NVME_CTRL_CONNECTING: 581 changed = true; 582 fallthrough; 583 default: 584 break; 585 } 586 break; 587 case NVME_CTRL_DELETING_NOIO: 588 switch (old_state) { 589 case NVME_CTRL_DELETING: 590 case NVME_CTRL_DEAD: 591 changed = true; 592 fallthrough; 593 default: 594 break; 595 } 596 break; 597 case NVME_CTRL_DEAD: 598 switch (old_state) { 599 case NVME_CTRL_DELETING: 600 changed = true; 601 fallthrough; 602 default: 603 break; 604 } 605 break; 606 default: 607 break; 608 } 609 610 if (changed) { 611 WRITE_ONCE(ctrl->state, new_state); 612 wake_up_all(&ctrl->state_wq); 613 } 614 615 spin_unlock_irqrestore(&ctrl->lock, flags); 616 if (!changed) 617 return false; 618 619 if (new_state == NVME_CTRL_LIVE) { 620 if (old_state == NVME_CTRL_CONNECTING) 621 nvme_stop_failfast_work(ctrl); 622 nvme_kick_requeue_lists(ctrl); 623 } else if (new_state == NVME_CTRL_CONNECTING && 624 old_state == NVME_CTRL_RESETTING) { 625 nvme_start_failfast_work(ctrl); 626 } 627 return changed; 628 } 629 EXPORT_SYMBOL_GPL(nvme_change_ctrl_state); 630 631 /* 632 * Returns true for sink states that can't ever transition back to live. 633 */ 634 static bool nvme_state_terminal(struct nvme_ctrl *ctrl) 635 { 636 switch (nvme_ctrl_state(ctrl)) { 637 case NVME_CTRL_NEW: 638 case NVME_CTRL_LIVE: 639 case NVME_CTRL_RESETTING: 640 case NVME_CTRL_CONNECTING: 641 return false; 642 case NVME_CTRL_DELETING: 643 case NVME_CTRL_DELETING_NOIO: 644 case NVME_CTRL_DEAD: 645 return true; 646 default: 647 WARN_ONCE(1, "Unhandled ctrl state:%d", ctrl->state); 648 return true; 649 } 650 } 651 652 /* 653 * Waits for the controller state to be resetting, or returns false if it is 654 * not possible to ever transition to that state. 655 */ 656 bool nvme_wait_reset(struct nvme_ctrl *ctrl) 657 { 658 wait_event(ctrl->state_wq, 659 nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING) || 660 nvme_state_terminal(ctrl)); 661 return nvme_ctrl_state(ctrl) == NVME_CTRL_RESETTING; 662 } 663 EXPORT_SYMBOL_GPL(nvme_wait_reset); 664 665 static void nvme_free_ns_head(struct kref *ref) 666 { 667 struct nvme_ns_head *head = 668 container_of(ref, struct nvme_ns_head, ref); 669 670 nvme_mpath_remove_disk(head); 671 ida_free(&head->subsys->ns_ida, head->instance); 672 cleanup_srcu_struct(&head->srcu); 673 nvme_put_subsystem(head->subsys); 674 kfree(head); 675 } 676 677 bool nvme_tryget_ns_head(struct nvme_ns_head *head) 678 { 679 return kref_get_unless_zero(&head->ref); 680 } 681 682 void nvme_put_ns_head(struct nvme_ns_head *head) 683 { 684 kref_put(&head->ref, nvme_free_ns_head); 685 } 686 687 static void nvme_free_ns(struct kref *kref) 688 { 689 struct nvme_ns *ns = container_of(kref, struct nvme_ns, kref); 690 691 put_disk(ns->disk); 692 nvme_put_ns_head(ns->head); 693 nvme_put_ctrl(ns->ctrl); 694 kfree(ns); 695 } 696 697 static inline bool nvme_get_ns(struct nvme_ns *ns) 698 { 699 return kref_get_unless_zero(&ns->kref); 700 } 701 702 void nvme_put_ns(struct nvme_ns *ns) 703 { 704 kref_put(&ns->kref, nvme_free_ns); 705 } 706 EXPORT_SYMBOL_NS_GPL(nvme_put_ns, NVME_TARGET_PASSTHRU); 707 708 static inline void nvme_clear_nvme_request(struct request *req) 709 { 710 nvme_req(req)->status = 0; 711 nvme_req(req)->retries = 0; 712 nvme_req(req)->flags = 0; 713 req->rq_flags |= RQF_DONTPREP; 714 } 715 716 /* initialize a passthrough request */ 717 void nvme_init_request(struct request *req, struct nvme_command *cmd) 718 { 719 struct nvme_request *nr = nvme_req(req); 720 bool logging_enabled; 721 722 if (req->q->queuedata) { 723 struct nvme_ns *ns = req->q->disk->private_data; 724 725 logging_enabled = ns->head->passthru_err_log_enabled; 726 req->timeout = NVME_IO_TIMEOUT; 727 } else { /* no queuedata implies admin queue */ 728 logging_enabled = nr->ctrl->passthru_err_log_enabled; 729 req->timeout = NVME_ADMIN_TIMEOUT; 730 } 731 732 if (!logging_enabled) 733 req->rq_flags |= RQF_QUIET; 734 735 /* passthru commands should let the driver set the SGL flags */ 736 cmd->common.flags &= ~NVME_CMD_SGL_ALL; 737 738 req->cmd_flags |= REQ_FAILFAST_DRIVER; 739 if (req->mq_hctx->type == HCTX_TYPE_POLL) 740 req->cmd_flags |= REQ_POLLED; 741 nvme_clear_nvme_request(req); 742 memcpy(nr->cmd, cmd, sizeof(*cmd)); 743 } 744 EXPORT_SYMBOL_GPL(nvme_init_request); 745 746 /* 747 * For something we're not in a state to send to the device the default action 748 * is to busy it and retry it after the controller state is recovered. However, 749 * if the controller is deleting or if anything is marked for failfast or 750 * nvme multipath it is immediately failed. 751 * 752 * Note: commands used to initialize the controller will be marked for failfast. 753 * Note: nvme cli/ioctl commands are marked for failfast. 754 */ 755 blk_status_t nvme_fail_nonready_command(struct nvme_ctrl *ctrl, 756 struct request *rq) 757 { 758 enum nvme_ctrl_state state = nvme_ctrl_state(ctrl); 759 760 if (state != NVME_CTRL_DELETING_NOIO && 761 state != NVME_CTRL_DELETING && 762 state != NVME_CTRL_DEAD && 763 !test_bit(NVME_CTRL_FAILFAST_EXPIRED, &ctrl->flags) && 764 !blk_noretry_request(rq) && !(rq->cmd_flags & REQ_NVME_MPATH)) 765 return BLK_STS_RESOURCE; 766 return nvme_host_path_error(rq); 767 } 768 EXPORT_SYMBOL_GPL(nvme_fail_nonready_command); 769 770 bool __nvme_check_ready(struct nvme_ctrl *ctrl, struct request *rq, 771 bool queue_live, enum nvme_ctrl_state state) 772 { 773 struct nvme_request *req = nvme_req(rq); 774 775 /* 776 * currently we have a problem sending passthru commands 777 * on the admin_q if the controller is not LIVE because we can't 778 * make sure that they are going out after the admin connect, 779 * controller enable and/or other commands in the initialization 780 * sequence. until the controller will be LIVE, fail with 781 * BLK_STS_RESOURCE so that they will be rescheduled. 782 */ 783 if (rq->q == ctrl->admin_q && (req->flags & NVME_REQ_USERCMD)) 784 return false; 785 786 if (ctrl->ops->flags & NVME_F_FABRICS) { 787 /* 788 * Only allow commands on a live queue, except for the connect 789 * command, which is require to set the queue live in the 790 * appropinquate states. 791 */ 792 switch (state) { 793 case NVME_CTRL_CONNECTING: 794 if (blk_rq_is_passthrough(rq) && nvme_is_fabrics(req->cmd) && 795 (req->cmd->fabrics.fctype == nvme_fabrics_type_connect || 796 req->cmd->fabrics.fctype == nvme_fabrics_type_auth_send || 797 req->cmd->fabrics.fctype == nvme_fabrics_type_auth_receive)) 798 return true; 799 break; 800 default: 801 break; 802 case NVME_CTRL_DEAD: 803 return false; 804 } 805 } 806 807 return queue_live; 808 } 809 EXPORT_SYMBOL_GPL(__nvme_check_ready); 810 811 static inline void nvme_setup_flush(struct nvme_ns *ns, 812 struct nvme_command *cmnd) 813 { 814 memset(cmnd, 0, sizeof(*cmnd)); 815 cmnd->common.opcode = nvme_cmd_flush; 816 cmnd->common.nsid = cpu_to_le32(ns->head->ns_id); 817 } 818 819 static blk_status_t nvme_setup_discard(struct nvme_ns *ns, struct request *req, 820 struct nvme_command *cmnd) 821 { 822 unsigned short segments = blk_rq_nr_discard_segments(req), n = 0; 823 struct nvme_dsm_range *range; 824 struct bio *bio; 825 826 /* 827 * Some devices do not consider the DSM 'Number of Ranges' field when 828 * determining how much data to DMA. Always allocate memory for maximum 829 * number of segments to prevent device reading beyond end of buffer. 830 */ 831 static const size_t alloc_size = sizeof(*range) * NVME_DSM_MAX_RANGES; 832 833 range = kzalloc(alloc_size, GFP_ATOMIC | __GFP_NOWARN); 834 if (!range) { 835 /* 836 * If we fail allocation our range, fallback to the controller 837 * discard page. If that's also busy, it's safe to return 838 * busy, as we know we can make progress once that's freed. 839 */ 840 if (test_and_set_bit_lock(0, &ns->ctrl->discard_page_busy)) 841 return BLK_STS_RESOURCE; 842 843 range = page_address(ns->ctrl->discard_page); 844 } 845 846 if (queue_max_discard_segments(req->q) == 1) { 847 u64 slba = nvme_sect_to_lba(ns->head, blk_rq_pos(req)); 848 u32 nlb = blk_rq_sectors(req) >> (ns->head->lba_shift - 9); 849 850 range[0].cattr = cpu_to_le32(0); 851 range[0].nlb = cpu_to_le32(nlb); 852 range[0].slba = cpu_to_le64(slba); 853 n = 1; 854 } else { 855 __rq_for_each_bio(bio, req) { 856 u64 slba = nvme_sect_to_lba(ns->head, 857 bio->bi_iter.bi_sector); 858 u32 nlb = bio->bi_iter.bi_size >> ns->head->lba_shift; 859 860 if (n < segments) { 861 range[n].cattr = cpu_to_le32(0); 862 range[n].nlb = cpu_to_le32(nlb); 863 range[n].slba = cpu_to_le64(slba); 864 } 865 n++; 866 } 867 } 868 869 if (WARN_ON_ONCE(n != segments)) { 870 if (virt_to_page(range) == ns->ctrl->discard_page) 871 clear_bit_unlock(0, &ns->ctrl->discard_page_busy); 872 else 873 kfree(range); 874 return BLK_STS_IOERR; 875 } 876 877 memset(cmnd, 0, sizeof(*cmnd)); 878 cmnd->dsm.opcode = nvme_cmd_dsm; 879 cmnd->dsm.nsid = cpu_to_le32(ns->head->ns_id); 880 cmnd->dsm.nr = cpu_to_le32(segments - 1); 881 cmnd->dsm.attributes = cpu_to_le32(NVME_DSMGMT_AD); 882 883 bvec_set_virt(&req->special_vec, range, alloc_size); 884 req->rq_flags |= RQF_SPECIAL_PAYLOAD; 885 886 return BLK_STS_OK; 887 } 888 889 static void nvme_set_ref_tag(struct nvme_ns *ns, struct nvme_command *cmnd, 890 struct request *req) 891 { 892 u32 upper, lower; 893 u64 ref48; 894 895 /* both rw and write zeroes share the same reftag format */ 896 switch (ns->head->guard_type) { 897 case NVME_NVM_NS_16B_GUARD: 898 cmnd->rw.reftag = cpu_to_le32(t10_pi_ref_tag(req)); 899 break; 900 case NVME_NVM_NS_64B_GUARD: 901 ref48 = ext_pi_ref_tag(req); 902 lower = lower_32_bits(ref48); 903 upper = upper_32_bits(ref48); 904 905 cmnd->rw.reftag = cpu_to_le32(lower); 906 cmnd->rw.cdw3 = cpu_to_le32(upper); 907 break; 908 default: 909 break; 910 } 911 } 912 913 static inline blk_status_t nvme_setup_write_zeroes(struct nvme_ns *ns, 914 struct request *req, struct nvme_command *cmnd) 915 { 916 memset(cmnd, 0, sizeof(*cmnd)); 917 918 if (ns->ctrl->quirks & NVME_QUIRK_DEALLOCATE_ZEROES) 919 return nvme_setup_discard(ns, req, cmnd); 920 921 cmnd->write_zeroes.opcode = nvme_cmd_write_zeroes; 922 cmnd->write_zeroes.nsid = cpu_to_le32(ns->head->ns_id); 923 cmnd->write_zeroes.slba = 924 cpu_to_le64(nvme_sect_to_lba(ns->head, blk_rq_pos(req))); 925 cmnd->write_zeroes.length = 926 cpu_to_le16((blk_rq_bytes(req) >> ns->head->lba_shift) - 1); 927 928 if (!(req->cmd_flags & REQ_NOUNMAP) && 929 (ns->head->features & NVME_NS_DEAC)) 930 cmnd->write_zeroes.control |= cpu_to_le16(NVME_WZ_DEAC); 931 932 if (nvme_ns_has_pi(ns->head)) { 933 cmnd->write_zeroes.control |= cpu_to_le16(NVME_RW_PRINFO_PRACT); 934 935 switch (ns->head->pi_type) { 936 case NVME_NS_DPS_PI_TYPE1: 937 case NVME_NS_DPS_PI_TYPE2: 938 nvme_set_ref_tag(ns, cmnd, req); 939 break; 940 } 941 } 942 943 return BLK_STS_OK; 944 } 945 946 static inline blk_status_t nvme_setup_rw(struct nvme_ns *ns, 947 struct request *req, struct nvme_command *cmnd, 948 enum nvme_opcode op) 949 { 950 u16 control = 0; 951 u32 dsmgmt = 0; 952 953 if (req->cmd_flags & REQ_FUA) 954 control |= NVME_RW_FUA; 955 if (req->cmd_flags & (REQ_FAILFAST_DEV | REQ_RAHEAD)) 956 control |= NVME_RW_LR; 957 958 if (req->cmd_flags & REQ_RAHEAD) 959 dsmgmt |= NVME_RW_DSM_FREQ_PREFETCH; 960 961 cmnd->rw.opcode = op; 962 cmnd->rw.flags = 0; 963 cmnd->rw.nsid = cpu_to_le32(ns->head->ns_id); 964 cmnd->rw.cdw2 = 0; 965 cmnd->rw.cdw3 = 0; 966 cmnd->rw.metadata = 0; 967 cmnd->rw.slba = 968 cpu_to_le64(nvme_sect_to_lba(ns->head, blk_rq_pos(req))); 969 cmnd->rw.length = 970 cpu_to_le16((blk_rq_bytes(req) >> ns->head->lba_shift) - 1); 971 cmnd->rw.reftag = 0; 972 cmnd->rw.apptag = 0; 973 cmnd->rw.appmask = 0; 974 975 if (ns->head->ms) { 976 /* 977 * If formated with metadata, the block layer always provides a 978 * metadata buffer if CONFIG_BLK_DEV_INTEGRITY is enabled. Else 979 * we enable the PRACT bit for protection information or set the 980 * namespace capacity to zero to prevent any I/O. 981 */ 982 if (!blk_integrity_rq(req)) { 983 if (WARN_ON_ONCE(!nvme_ns_has_pi(ns->head))) 984 return BLK_STS_NOTSUPP; 985 control |= NVME_RW_PRINFO_PRACT; 986 } 987 988 switch (ns->head->pi_type) { 989 case NVME_NS_DPS_PI_TYPE3: 990 control |= NVME_RW_PRINFO_PRCHK_GUARD; 991 break; 992 case NVME_NS_DPS_PI_TYPE1: 993 case NVME_NS_DPS_PI_TYPE2: 994 control |= NVME_RW_PRINFO_PRCHK_GUARD | 995 NVME_RW_PRINFO_PRCHK_REF; 996 if (op == nvme_cmd_zone_append) 997 control |= NVME_RW_APPEND_PIREMAP; 998 nvme_set_ref_tag(ns, cmnd, req); 999 break; 1000 } 1001 } 1002 1003 cmnd->rw.control = cpu_to_le16(control); 1004 cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt); 1005 return 0; 1006 } 1007 1008 void nvme_cleanup_cmd(struct request *req) 1009 { 1010 if (req->rq_flags & RQF_SPECIAL_PAYLOAD) { 1011 struct nvme_ctrl *ctrl = nvme_req(req)->ctrl; 1012 1013 if (req->special_vec.bv_page == ctrl->discard_page) 1014 clear_bit_unlock(0, &ctrl->discard_page_busy); 1015 else 1016 kfree(bvec_virt(&req->special_vec)); 1017 } 1018 } 1019 EXPORT_SYMBOL_GPL(nvme_cleanup_cmd); 1020 1021 blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req) 1022 { 1023 struct nvme_command *cmd = nvme_req(req)->cmd; 1024 blk_status_t ret = BLK_STS_OK; 1025 1026 if (!(req->rq_flags & RQF_DONTPREP)) 1027 nvme_clear_nvme_request(req); 1028 1029 switch (req_op(req)) { 1030 case REQ_OP_DRV_IN: 1031 case REQ_OP_DRV_OUT: 1032 /* these are setup prior to execution in nvme_init_request() */ 1033 break; 1034 case REQ_OP_FLUSH: 1035 nvme_setup_flush(ns, cmd); 1036 break; 1037 case REQ_OP_ZONE_RESET_ALL: 1038 case REQ_OP_ZONE_RESET: 1039 ret = nvme_setup_zone_mgmt_send(ns, req, cmd, NVME_ZONE_RESET); 1040 break; 1041 case REQ_OP_ZONE_OPEN: 1042 ret = nvme_setup_zone_mgmt_send(ns, req, cmd, NVME_ZONE_OPEN); 1043 break; 1044 case REQ_OP_ZONE_CLOSE: 1045 ret = nvme_setup_zone_mgmt_send(ns, req, cmd, NVME_ZONE_CLOSE); 1046 break; 1047 case REQ_OP_ZONE_FINISH: 1048 ret = nvme_setup_zone_mgmt_send(ns, req, cmd, NVME_ZONE_FINISH); 1049 break; 1050 case REQ_OP_WRITE_ZEROES: 1051 ret = nvme_setup_write_zeroes(ns, req, cmd); 1052 break; 1053 case REQ_OP_DISCARD: 1054 ret = nvme_setup_discard(ns, req, cmd); 1055 break; 1056 case REQ_OP_READ: 1057 ret = nvme_setup_rw(ns, req, cmd, nvme_cmd_read); 1058 break; 1059 case REQ_OP_WRITE: 1060 ret = nvme_setup_rw(ns, req, cmd, nvme_cmd_write); 1061 break; 1062 case REQ_OP_ZONE_APPEND: 1063 ret = nvme_setup_rw(ns, req, cmd, nvme_cmd_zone_append); 1064 break; 1065 default: 1066 WARN_ON_ONCE(1); 1067 return BLK_STS_IOERR; 1068 } 1069 1070 cmd->common.command_id = nvme_cid(req); 1071 trace_nvme_setup_cmd(req, cmd); 1072 return ret; 1073 } 1074 EXPORT_SYMBOL_GPL(nvme_setup_cmd); 1075 1076 /* 1077 * Return values: 1078 * 0: success 1079 * >0: nvme controller's cqe status response 1080 * <0: kernel error in lieu of controller response 1081 */ 1082 int nvme_execute_rq(struct request *rq, bool at_head) 1083 { 1084 blk_status_t status; 1085 1086 status = blk_execute_rq(rq, at_head); 1087 if (nvme_req(rq)->flags & NVME_REQ_CANCELLED) 1088 return -EINTR; 1089 if (nvme_req(rq)->status) 1090 return nvme_req(rq)->status; 1091 return blk_status_to_errno(status); 1092 } 1093 EXPORT_SYMBOL_NS_GPL(nvme_execute_rq, NVME_TARGET_PASSTHRU); 1094 1095 /* 1096 * Returns 0 on success. If the result is negative, it's a Linux error code; 1097 * if the result is positive, it's an NVM Express status code 1098 */ 1099 int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, 1100 union nvme_result *result, void *buffer, unsigned bufflen, 1101 int qid, nvme_submit_flags_t flags) 1102 { 1103 struct request *req; 1104 int ret; 1105 blk_mq_req_flags_t blk_flags = 0; 1106 1107 if (flags & NVME_SUBMIT_NOWAIT) 1108 blk_flags |= BLK_MQ_REQ_NOWAIT; 1109 if (flags & NVME_SUBMIT_RESERVED) 1110 blk_flags |= BLK_MQ_REQ_RESERVED; 1111 if (qid == NVME_QID_ANY) 1112 req = blk_mq_alloc_request(q, nvme_req_op(cmd), blk_flags); 1113 else 1114 req = blk_mq_alloc_request_hctx(q, nvme_req_op(cmd), blk_flags, 1115 qid - 1); 1116 1117 if (IS_ERR(req)) 1118 return PTR_ERR(req); 1119 nvme_init_request(req, cmd); 1120 if (flags & NVME_SUBMIT_RETRY) 1121 req->cmd_flags &= ~REQ_FAILFAST_DRIVER; 1122 1123 if (buffer && bufflen) { 1124 ret = blk_rq_map_kern(q, req, buffer, bufflen, GFP_KERNEL); 1125 if (ret) 1126 goto out; 1127 } 1128 1129 ret = nvme_execute_rq(req, flags & NVME_SUBMIT_AT_HEAD); 1130 if (result && ret >= 0) 1131 *result = nvme_req(req)->result; 1132 out: 1133 blk_mq_free_request(req); 1134 return ret; 1135 } 1136 EXPORT_SYMBOL_GPL(__nvme_submit_sync_cmd); 1137 1138 int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, 1139 void *buffer, unsigned bufflen) 1140 { 1141 return __nvme_submit_sync_cmd(q, cmd, NULL, buffer, bufflen, 1142 NVME_QID_ANY, 0); 1143 } 1144 EXPORT_SYMBOL_GPL(nvme_submit_sync_cmd); 1145 1146 u32 nvme_command_effects(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u8 opcode) 1147 { 1148 u32 effects = 0; 1149 1150 if (ns) { 1151 effects = le32_to_cpu(ns->head->effects->iocs[opcode]); 1152 if (effects & ~(NVME_CMD_EFFECTS_CSUPP | NVME_CMD_EFFECTS_LBCC)) 1153 dev_warn_once(ctrl->device, 1154 "IO command:%02x has unusual effects:%08x\n", 1155 opcode, effects); 1156 1157 /* 1158 * NVME_CMD_EFFECTS_CSE_MASK causes a freeze all I/O queues, 1159 * which would deadlock when done on an I/O command. Note that 1160 * We already warn about an unusual effect above. 1161 */ 1162 effects &= ~NVME_CMD_EFFECTS_CSE_MASK; 1163 } else { 1164 effects = le32_to_cpu(ctrl->effects->acs[opcode]); 1165 1166 /* Ignore execution restrictions if any relaxation bits are set */ 1167 if (effects & NVME_CMD_EFFECTS_CSER_MASK) 1168 effects &= ~NVME_CMD_EFFECTS_CSE_MASK; 1169 } 1170 1171 return effects; 1172 } 1173 EXPORT_SYMBOL_NS_GPL(nvme_command_effects, NVME_TARGET_PASSTHRU); 1174 1175 u32 nvme_passthru_start(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u8 opcode) 1176 { 1177 u32 effects = nvme_command_effects(ctrl, ns, opcode); 1178 1179 /* 1180 * For simplicity, IO to all namespaces is quiesced even if the command 1181 * effects say only one namespace is affected. 1182 */ 1183 if (effects & NVME_CMD_EFFECTS_CSE_MASK) { 1184 mutex_lock(&ctrl->scan_lock); 1185 mutex_lock(&ctrl->subsys->lock); 1186 nvme_mpath_start_freeze(ctrl->subsys); 1187 nvme_mpath_wait_freeze(ctrl->subsys); 1188 nvme_start_freeze(ctrl); 1189 nvme_wait_freeze(ctrl); 1190 } 1191 return effects; 1192 } 1193 EXPORT_SYMBOL_NS_GPL(nvme_passthru_start, NVME_TARGET_PASSTHRU); 1194 1195 void nvme_passthru_end(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u32 effects, 1196 struct nvme_command *cmd, int status) 1197 { 1198 if (effects & NVME_CMD_EFFECTS_CSE_MASK) { 1199 nvme_unfreeze(ctrl); 1200 nvme_mpath_unfreeze(ctrl->subsys); 1201 mutex_unlock(&ctrl->subsys->lock); 1202 mutex_unlock(&ctrl->scan_lock); 1203 } 1204 if (effects & NVME_CMD_EFFECTS_CCC) { 1205 if (!test_and_set_bit(NVME_CTRL_DIRTY_CAPABILITY, 1206 &ctrl->flags)) { 1207 dev_info(ctrl->device, 1208 "controller capabilities changed, reset may be required to take effect.\n"); 1209 } 1210 } 1211 if (effects & (NVME_CMD_EFFECTS_NIC | NVME_CMD_EFFECTS_NCC)) { 1212 nvme_queue_scan(ctrl); 1213 flush_work(&ctrl->scan_work); 1214 } 1215 if (ns) 1216 return; 1217 1218 switch (cmd->common.opcode) { 1219 case nvme_admin_set_features: 1220 switch (le32_to_cpu(cmd->common.cdw10) & 0xFF) { 1221 case NVME_FEAT_KATO: 1222 /* 1223 * Keep alive commands interval on the host should be 1224 * updated when KATO is modified by Set Features 1225 * commands. 1226 */ 1227 if (!status) 1228 nvme_update_keep_alive(ctrl, cmd); 1229 break; 1230 default: 1231 break; 1232 } 1233 break; 1234 default: 1235 break; 1236 } 1237 } 1238 EXPORT_SYMBOL_NS_GPL(nvme_passthru_end, NVME_TARGET_PASSTHRU); 1239 1240 /* 1241 * Recommended frequency for KATO commands per NVMe 1.4 section 7.12.1: 1242 * 1243 * The host should send Keep Alive commands at half of the Keep Alive Timeout 1244 * accounting for transport roundtrip times [..]. 1245 */ 1246 static unsigned long nvme_keep_alive_work_period(struct nvme_ctrl *ctrl) 1247 { 1248 unsigned long delay = ctrl->kato * HZ / 2; 1249 1250 /* 1251 * When using Traffic Based Keep Alive, we need to run 1252 * nvme_keep_alive_work at twice the normal frequency, as one 1253 * command completion can postpone sending a keep alive command 1254 * by up to twice the delay between runs. 1255 */ 1256 if (ctrl->ctratt & NVME_CTRL_ATTR_TBKAS) 1257 delay /= 2; 1258 return delay; 1259 } 1260 1261 static void nvme_queue_keep_alive_work(struct nvme_ctrl *ctrl) 1262 { 1263 unsigned long now = jiffies; 1264 unsigned long delay = nvme_keep_alive_work_period(ctrl); 1265 unsigned long ka_next_check_tm = ctrl->ka_last_check_time + delay; 1266 1267 if (time_after(now, ka_next_check_tm)) 1268 delay = 0; 1269 else 1270 delay = ka_next_check_tm - now; 1271 1272 queue_delayed_work(nvme_wq, &ctrl->ka_work, delay); 1273 } 1274 1275 static enum rq_end_io_ret nvme_keep_alive_end_io(struct request *rq, 1276 blk_status_t status) 1277 { 1278 struct nvme_ctrl *ctrl = rq->end_io_data; 1279 unsigned long flags; 1280 bool startka = false; 1281 unsigned long rtt = jiffies - (rq->deadline - rq->timeout); 1282 unsigned long delay = nvme_keep_alive_work_period(ctrl); 1283 1284 /* 1285 * Subtract off the keepalive RTT so nvme_keep_alive_work runs 1286 * at the desired frequency. 1287 */ 1288 if (rtt <= delay) { 1289 delay -= rtt; 1290 } else { 1291 dev_warn(ctrl->device, "long keepalive RTT (%u ms)\n", 1292 jiffies_to_msecs(rtt)); 1293 delay = 0; 1294 } 1295 1296 blk_mq_free_request(rq); 1297 1298 if (status) { 1299 dev_err(ctrl->device, 1300 "failed nvme_keep_alive_end_io error=%d\n", 1301 status); 1302 return RQ_END_IO_NONE; 1303 } 1304 1305 ctrl->ka_last_check_time = jiffies; 1306 ctrl->comp_seen = false; 1307 spin_lock_irqsave(&ctrl->lock, flags); 1308 if (ctrl->state == NVME_CTRL_LIVE || 1309 ctrl->state == NVME_CTRL_CONNECTING) 1310 startka = true; 1311 spin_unlock_irqrestore(&ctrl->lock, flags); 1312 if (startka) 1313 queue_delayed_work(nvme_wq, &ctrl->ka_work, delay); 1314 return RQ_END_IO_NONE; 1315 } 1316 1317 static void nvme_keep_alive_work(struct work_struct *work) 1318 { 1319 struct nvme_ctrl *ctrl = container_of(to_delayed_work(work), 1320 struct nvme_ctrl, ka_work); 1321 bool comp_seen = ctrl->comp_seen; 1322 struct request *rq; 1323 1324 ctrl->ka_last_check_time = jiffies; 1325 1326 if ((ctrl->ctratt & NVME_CTRL_ATTR_TBKAS) && comp_seen) { 1327 dev_dbg(ctrl->device, 1328 "reschedule traffic based keep-alive timer\n"); 1329 ctrl->comp_seen = false; 1330 nvme_queue_keep_alive_work(ctrl); 1331 return; 1332 } 1333 1334 rq = blk_mq_alloc_request(ctrl->admin_q, nvme_req_op(&ctrl->ka_cmd), 1335 BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT); 1336 if (IS_ERR(rq)) { 1337 /* allocation failure, reset the controller */ 1338 dev_err(ctrl->device, "keep-alive failed: %ld\n", PTR_ERR(rq)); 1339 nvme_reset_ctrl(ctrl); 1340 return; 1341 } 1342 nvme_init_request(rq, &ctrl->ka_cmd); 1343 1344 rq->timeout = ctrl->kato * HZ; 1345 rq->end_io = nvme_keep_alive_end_io; 1346 rq->end_io_data = ctrl; 1347 blk_execute_rq_nowait(rq, false); 1348 } 1349 1350 static void nvme_start_keep_alive(struct nvme_ctrl *ctrl) 1351 { 1352 if (unlikely(ctrl->kato == 0)) 1353 return; 1354 1355 nvme_queue_keep_alive_work(ctrl); 1356 } 1357 1358 void nvme_stop_keep_alive(struct nvme_ctrl *ctrl) 1359 { 1360 if (unlikely(ctrl->kato == 0)) 1361 return; 1362 1363 cancel_delayed_work_sync(&ctrl->ka_work); 1364 } 1365 EXPORT_SYMBOL_GPL(nvme_stop_keep_alive); 1366 1367 static void nvme_update_keep_alive(struct nvme_ctrl *ctrl, 1368 struct nvme_command *cmd) 1369 { 1370 unsigned int new_kato = 1371 DIV_ROUND_UP(le32_to_cpu(cmd->common.cdw11), 1000); 1372 1373 dev_info(ctrl->device, 1374 "keep alive interval updated from %u ms to %u ms\n", 1375 ctrl->kato * 1000 / 2, new_kato * 1000 / 2); 1376 1377 nvme_stop_keep_alive(ctrl); 1378 ctrl->kato = new_kato; 1379 nvme_start_keep_alive(ctrl); 1380 } 1381 1382 /* 1383 * In NVMe 1.0 the CNS field was just a binary controller or namespace 1384 * flag, thus sending any new CNS opcodes has a big chance of not working. 1385 * Qemu unfortunately had that bug after reporting a 1.1 version compliance 1386 * (but not for any later version). 1387 */ 1388 static bool nvme_ctrl_limited_cns(struct nvme_ctrl *ctrl) 1389 { 1390 if (ctrl->quirks & NVME_QUIRK_IDENTIFY_CNS) 1391 return ctrl->vs < NVME_VS(1, 2, 0); 1392 return ctrl->vs < NVME_VS(1, 1, 0); 1393 } 1394 1395 static int nvme_identify_ctrl(struct nvme_ctrl *dev, struct nvme_id_ctrl **id) 1396 { 1397 struct nvme_command c = { }; 1398 int error; 1399 1400 /* gcc-4.4.4 (at least) has issues with initializers and anon unions */ 1401 c.identify.opcode = nvme_admin_identify; 1402 c.identify.cns = NVME_ID_CNS_CTRL; 1403 1404 *id = kmalloc(sizeof(struct nvme_id_ctrl), GFP_KERNEL); 1405 if (!*id) 1406 return -ENOMEM; 1407 1408 error = nvme_submit_sync_cmd(dev->admin_q, &c, *id, 1409 sizeof(struct nvme_id_ctrl)); 1410 if (error) { 1411 kfree(*id); 1412 *id = NULL; 1413 } 1414 return error; 1415 } 1416 1417 static int nvme_process_ns_desc(struct nvme_ctrl *ctrl, struct nvme_ns_ids *ids, 1418 struct nvme_ns_id_desc *cur, bool *csi_seen) 1419 { 1420 const char *warn_str = "ctrl returned bogus length:"; 1421 void *data = cur; 1422 1423 switch (cur->nidt) { 1424 case NVME_NIDT_EUI64: 1425 if (cur->nidl != NVME_NIDT_EUI64_LEN) { 1426 dev_warn(ctrl->device, "%s %d for NVME_NIDT_EUI64\n", 1427 warn_str, cur->nidl); 1428 return -1; 1429 } 1430 if (ctrl->quirks & NVME_QUIRK_BOGUS_NID) 1431 return NVME_NIDT_EUI64_LEN; 1432 memcpy(ids->eui64, data + sizeof(*cur), NVME_NIDT_EUI64_LEN); 1433 return NVME_NIDT_EUI64_LEN; 1434 case NVME_NIDT_NGUID: 1435 if (cur->nidl != NVME_NIDT_NGUID_LEN) { 1436 dev_warn(ctrl->device, "%s %d for NVME_NIDT_NGUID\n", 1437 warn_str, cur->nidl); 1438 return -1; 1439 } 1440 if (ctrl->quirks & NVME_QUIRK_BOGUS_NID) 1441 return NVME_NIDT_NGUID_LEN; 1442 memcpy(ids->nguid, data + sizeof(*cur), NVME_NIDT_NGUID_LEN); 1443 return NVME_NIDT_NGUID_LEN; 1444 case NVME_NIDT_UUID: 1445 if (cur->nidl != NVME_NIDT_UUID_LEN) { 1446 dev_warn(ctrl->device, "%s %d for NVME_NIDT_UUID\n", 1447 warn_str, cur->nidl); 1448 return -1; 1449 } 1450 if (ctrl->quirks & NVME_QUIRK_BOGUS_NID) 1451 return NVME_NIDT_UUID_LEN; 1452 uuid_copy(&ids->uuid, data + sizeof(*cur)); 1453 return NVME_NIDT_UUID_LEN; 1454 case NVME_NIDT_CSI: 1455 if (cur->nidl != NVME_NIDT_CSI_LEN) { 1456 dev_warn(ctrl->device, "%s %d for NVME_NIDT_CSI\n", 1457 warn_str, cur->nidl); 1458 return -1; 1459 } 1460 memcpy(&ids->csi, data + sizeof(*cur), NVME_NIDT_CSI_LEN); 1461 *csi_seen = true; 1462 return NVME_NIDT_CSI_LEN; 1463 default: 1464 /* Skip unknown types */ 1465 return cur->nidl; 1466 } 1467 } 1468 1469 static int nvme_identify_ns_descs(struct nvme_ctrl *ctrl, 1470 struct nvme_ns_info *info) 1471 { 1472 struct nvme_command c = { }; 1473 bool csi_seen = false; 1474 int status, pos, len; 1475 void *data; 1476 1477 if (ctrl->vs < NVME_VS(1, 3, 0) && !nvme_multi_css(ctrl)) 1478 return 0; 1479 if (ctrl->quirks & NVME_QUIRK_NO_NS_DESC_LIST) 1480 return 0; 1481 1482 c.identify.opcode = nvme_admin_identify; 1483 c.identify.nsid = cpu_to_le32(info->nsid); 1484 c.identify.cns = NVME_ID_CNS_NS_DESC_LIST; 1485 1486 data = kzalloc(NVME_IDENTIFY_DATA_SIZE, GFP_KERNEL); 1487 if (!data) 1488 return -ENOMEM; 1489 1490 status = nvme_submit_sync_cmd(ctrl->admin_q, &c, data, 1491 NVME_IDENTIFY_DATA_SIZE); 1492 if (status) { 1493 dev_warn(ctrl->device, 1494 "Identify Descriptors failed (nsid=%u, status=0x%x)\n", 1495 info->nsid, status); 1496 goto free_data; 1497 } 1498 1499 for (pos = 0; pos < NVME_IDENTIFY_DATA_SIZE; pos += len) { 1500 struct nvme_ns_id_desc *cur = data + pos; 1501 1502 if (cur->nidl == 0) 1503 break; 1504 1505 len = nvme_process_ns_desc(ctrl, &info->ids, cur, &csi_seen); 1506 if (len < 0) 1507 break; 1508 1509 len += sizeof(*cur); 1510 } 1511 1512 if (nvme_multi_css(ctrl) && !csi_seen) { 1513 dev_warn(ctrl->device, "Command set not reported for nsid:%d\n", 1514 info->nsid); 1515 status = -EINVAL; 1516 } 1517 1518 free_data: 1519 kfree(data); 1520 return status; 1521 } 1522 1523 int nvme_identify_ns(struct nvme_ctrl *ctrl, unsigned nsid, 1524 struct nvme_id_ns **id) 1525 { 1526 struct nvme_command c = { }; 1527 int error; 1528 1529 /* gcc-4.4.4 (at least) has issues with initializers and anon unions */ 1530 c.identify.opcode = nvme_admin_identify; 1531 c.identify.nsid = cpu_to_le32(nsid); 1532 c.identify.cns = NVME_ID_CNS_NS; 1533 1534 *id = kmalloc(sizeof(**id), GFP_KERNEL); 1535 if (!*id) 1536 return -ENOMEM; 1537 1538 error = nvme_submit_sync_cmd(ctrl->admin_q, &c, *id, sizeof(**id)); 1539 if (error) { 1540 dev_warn(ctrl->device, "Identify namespace failed (%d)\n", error); 1541 kfree(*id); 1542 *id = NULL; 1543 } 1544 return error; 1545 } 1546 1547 static int nvme_ns_info_from_identify(struct nvme_ctrl *ctrl, 1548 struct nvme_ns_info *info) 1549 { 1550 struct nvme_ns_ids *ids = &info->ids; 1551 struct nvme_id_ns *id; 1552 int ret; 1553 1554 ret = nvme_identify_ns(ctrl, info->nsid, &id); 1555 if (ret) 1556 return ret; 1557 1558 if (id->ncap == 0) { 1559 /* namespace not allocated or attached */ 1560 info->is_removed = true; 1561 ret = -ENODEV; 1562 goto error; 1563 } 1564 1565 info->anagrpid = id->anagrpid; 1566 info->is_shared = id->nmic & NVME_NS_NMIC_SHARED; 1567 info->is_readonly = id->nsattr & NVME_NS_ATTR_RO; 1568 info->is_ready = true; 1569 if (ctrl->quirks & NVME_QUIRK_BOGUS_NID) { 1570 dev_info(ctrl->device, 1571 "Ignoring bogus Namespace Identifiers\n"); 1572 } else { 1573 if (ctrl->vs >= NVME_VS(1, 1, 0) && 1574 !memchr_inv(ids->eui64, 0, sizeof(ids->eui64))) 1575 memcpy(ids->eui64, id->eui64, sizeof(ids->eui64)); 1576 if (ctrl->vs >= NVME_VS(1, 2, 0) && 1577 !memchr_inv(ids->nguid, 0, sizeof(ids->nguid))) 1578 memcpy(ids->nguid, id->nguid, sizeof(ids->nguid)); 1579 } 1580 1581 error: 1582 kfree(id); 1583 return ret; 1584 } 1585 1586 static int nvme_ns_info_from_id_cs_indep(struct nvme_ctrl *ctrl, 1587 struct nvme_ns_info *info) 1588 { 1589 struct nvme_id_ns_cs_indep *id; 1590 struct nvme_command c = { 1591 .identify.opcode = nvme_admin_identify, 1592 .identify.nsid = cpu_to_le32(info->nsid), 1593 .identify.cns = NVME_ID_CNS_NS_CS_INDEP, 1594 }; 1595 int ret; 1596 1597 id = kmalloc(sizeof(*id), GFP_KERNEL); 1598 if (!id) 1599 return -ENOMEM; 1600 1601 ret = nvme_submit_sync_cmd(ctrl->admin_q, &c, id, sizeof(*id)); 1602 if (!ret) { 1603 info->anagrpid = id->anagrpid; 1604 info->is_shared = id->nmic & NVME_NS_NMIC_SHARED; 1605 info->is_readonly = id->nsattr & NVME_NS_ATTR_RO; 1606 info->is_ready = id->nstat & NVME_NSTAT_NRDY; 1607 } 1608 kfree(id); 1609 return ret; 1610 } 1611 1612 static int nvme_features(struct nvme_ctrl *dev, u8 op, unsigned int fid, 1613 unsigned int dword11, void *buffer, size_t buflen, u32 *result) 1614 { 1615 union nvme_result res = { 0 }; 1616 struct nvme_command c = { }; 1617 int ret; 1618 1619 c.features.opcode = op; 1620 c.features.fid = cpu_to_le32(fid); 1621 c.features.dword11 = cpu_to_le32(dword11); 1622 1623 ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &res, 1624 buffer, buflen, NVME_QID_ANY, 0); 1625 if (ret >= 0 && result) 1626 *result = le32_to_cpu(res.u32); 1627 return ret; 1628 } 1629 1630 int nvme_set_features(struct nvme_ctrl *dev, unsigned int fid, 1631 unsigned int dword11, void *buffer, size_t buflen, 1632 u32 *result) 1633 { 1634 return nvme_features(dev, nvme_admin_set_features, fid, dword11, buffer, 1635 buflen, result); 1636 } 1637 EXPORT_SYMBOL_GPL(nvme_set_features); 1638 1639 int nvme_get_features(struct nvme_ctrl *dev, unsigned int fid, 1640 unsigned int dword11, void *buffer, size_t buflen, 1641 u32 *result) 1642 { 1643 return nvme_features(dev, nvme_admin_get_features, fid, dword11, buffer, 1644 buflen, result); 1645 } 1646 EXPORT_SYMBOL_GPL(nvme_get_features); 1647 1648 int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count) 1649 { 1650 u32 q_count = (*count - 1) | ((*count - 1) << 16); 1651 u32 result; 1652 int status, nr_io_queues; 1653 1654 status = nvme_set_features(ctrl, NVME_FEAT_NUM_QUEUES, q_count, NULL, 0, 1655 &result); 1656 if (status < 0) 1657 return status; 1658 1659 /* 1660 * Degraded controllers might return an error when setting the queue 1661 * count. We still want to be able to bring them online and offer 1662 * access to the admin queue, as that might be only way to fix them up. 1663 */ 1664 if (status > 0) { 1665 dev_err(ctrl->device, "Could not set queue count (%d)\n", status); 1666 *count = 0; 1667 } else { 1668 nr_io_queues = min(result & 0xffff, result >> 16) + 1; 1669 *count = min(*count, nr_io_queues); 1670 } 1671 1672 return 0; 1673 } 1674 EXPORT_SYMBOL_GPL(nvme_set_queue_count); 1675 1676 #define NVME_AEN_SUPPORTED \ 1677 (NVME_AEN_CFG_NS_ATTR | NVME_AEN_CFG_FW_ACT | \ 1678 NVME_AEN_CFG_ANA_CHANGE | NVME_AEN_CFG_DISC_CHANGE) 1679 1680 static void nvme_enable_aen(struct nvme_ctrl *ctrl) 1681 { 1682 u32 result, supported_aens = ctrl->oaes & NVME_AEN_SUPPORTED; 1683 int status; 1684 1685 if (!supported_aens) 1686 return; 1687 1688 status = nvme_set_features(ctrl, NVME_FEAT_ASYNC_EVENT, supported_aens, 1689 NULL, 0, &result); 1690 if (status) 1691 dev_warn(ctrl->device, "Failed to configure AEN (cfg %x)\n", 1692 supported_aens); 1693 1694 queue_work(nvme_wq, &ctrl->async_event_work); 1695 } 1696 1697 static int nvme_ns_open(struct nvme_ns *ns) 1698 { 1699 1700 /* should never be called due to GENHD_FL_HIDDEN */ 1701 if (WARN_ON_ONCE(nvme_ns_head_multipath(ns->head))) 1702 goto fail; 1703 if (!nvme_get_ns(ns)) 1704 goto fail; 1705 if (!try_module_get(ns->ctrl->ops->module)) 1706 goto fail_put_ns; 1707 1708 return 0; 1709 1710 fail_put_ns: 1711 nvme_put_ns(ns); 1712 fail: 1713 return -ENXIO; 1714 } 1715 1716 static void nvme_ns_release(struct nvme_ns *ns) 1717 { 1718 1719 module_put(ns->ctrl->ops->module); 1720 nvme_put_ns(ns); 1721 } 1722 1723 static int nvme_open(struct gendisk *disk, blk_mode_t mode) 1724 { 1725 return nvme_ns_open(disk->private_data); 1726 } 1727 1728 static void nvme_release(struct gendisk *disk) 1729 { 1730 nvme_ns_release(disk->private_data); 1731 } 1732 1733 int nvme_getgeo(struct block_device *bdev, struct hd_geometry *geo) 1734 { 1735 /* some standard values */ 1736 geo->heads = 1 << 6; 1737 geo->sectors = 1 << 5; 1738 geo->cylinders = get_capacity(bdev->bd_disk) >> 11; 1739 return 0; 1740 } 1741 1742 static bool nvme_init_integrity(struct gendisk *disk, struct nvme_ns_head *head) 1743 { 1744 struct blk_integrity integrity = { }; 1745 1746 blk_integrity_unregister(disk); 1747 1748 if (!head->ms) 1749 return true; 1750 1751 /* 1752 * PI can always be supported as we can ask the controller to simply 1753 * insert/strip it, which is not possible for other kinds of metadata. 1754 */ 1755 if (!IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY) || 1756 !(head->features & NVME_NS_METADATA_SUPPORTED)) 1757 return nvme_ns_has_pi(head); 1758 1759 switch (head->pi_type) { 1760 case NVME_NS_DPS_PI_TYPE3: 1761 switch (head->guard_type) { 1762 case NVME_NVM_NS_16B_GUARD: 1763 integrity.profile = &t10_pi_type3_crc; 1764 integrity.tag_size = sizeof(u16) + sizeof(u32); 1765 integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE; 1766 break; 1767 case NVME_NVM_NS_64B_GUARD: 1768 integrity.profile = &ext_pi_type3_crc64; 1769 integrity.tag_size = sizeof(u16) + 6; 1770 integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE; 1771 break; 1772 default: 1773 integrity.profile = NULL; 1774 break; 1775 } 1776 break; 1777 case NVME_NS_DPS_PI_TYPE1: 1778 case NVME_NS_DPS_PI_TYPE2: 1779 switch (head->guard_type) { 1780 case NVME_NVM_NS_16B_GUARD: 1781 integrity.profile = &t10_pi_type1_crc; 1782 integrity.tag_size = sizeof(u16); 1783 integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE; 1784 break; 1785 case NVME_NVM_NS_64B_GUARD: 1786 integrity.profile = &ext_pi_type1_crc64; 1787 integrity.tag_size = sizeof(u16); 1788 integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE; 1789 break; 1790 default: 1791 integrity.profile = NULL; 1792 break; 1793 } 1794 break; 1795 default: 1796 integrity.profile = NULL; 1797 break; 1798 } 1799 1800 integrity.tuple_size = head->ms; 1801 integrity.pi_offset = head->pi_offset; 1802 blk_integrity_register(disk, &integrity); 1803 return true; 1804 } 1805 1806 static void nvme_config_discard(struct nvme_ns *ns, struct queue_limits *lim) 1807 { 1808 struct nvme_ctrl *ctrl = ns->ctrl; 1809 1810 if (ctrl->dmrsl && ctrl->dmrsl <= nvme_sect_to_lba(ns->head, UINT_MAX)) 1811 lim->max_hw_discard_sectors = 1812 nvme_lba_to_sect(ns->head, ctrl->dmrsl); 1813 else if (ctrl->oncs & NVME_CTRL_ONCS_DSM) 1814 lim->max_hw_discard_sectors = UINT_MAX; 1815 else 1816 lim->max_hw_discard_sectors = 0; 1817 1818 lim->discard_granularity = lim->logical_block_size; 1819 1820 if (ctrl->dmrl) 1821 lim->max_discard_segments = ctrl->dmrl; 1822 else 1823 lim->max_discard_segments = NVME_DSM_MAX_RANGES; 1824 } 1825 1826 static bool nvme_ns_ids_equal(struct nvme_ns_ids *a, struct nvme_ns_ids *b) 1827 { 1828 return uuid_equal(&a->uuid, &b->uuid) && 1829 memcmp(&a->nguid, &b->nguid, sizeof(a->nguid)) == 0 && 1830 memcmp(&a->eui64, &b->eui64, sizeof(a->eui64)) == 0 && 1831 a->csi == b->csi; 1832 } 1833 1834 static int nvme_identify_ns_nvm(struct nvme_ctrl *ctrl, unsigned int nsid, 1835 struct nvme_id_ns_nvm **nvmp) 1836 { 1837 struct nvme_command c = { 1838 .identify.opcode = nvme_admin_identify, 1839 .identify.nsid = cpu_to_le32(nsid), 1840 .identify.cns = NVME_ID_CNS_CS_NS, 1841 .identify.csi = NVME_CSI_NVM, 1842 }; 1843 struct nvme_id_ns_nvm *nvm; 1844 int ret; 1845 1846 nvm = kzalloc(sizeof(*nvm), GFP_KERNEL); 1847 if (!nvm) 1848 return -ENOMEM; 1849 1850 ret = nvme_submit_sync_cmd(ctrl->admin_q, &c, nvm, sizeof(*nvm)); 1851 if (ret) 1852 kfree(nvm); 1853 else 1854 *nvmp = nvm; 1855 return ret; 1856 } 1857 1858 static void nvme_configure_pi_elbas(struct nvme_ns_head *head, 1859 struct nvme_id_ns *id, struct nvme_id_ns_nvm *nvm) 1860 { 1861 u32 elbaf = le32_to_cpu(nvm->elbaf[nvme_lbaf_index(id->flbas)]); 1862 1863 /* no support for storage tag formats right now */ 1864 if (nvme_elbaf_sts(elbaf)) 1865 return; 1866 1867 head->guard_type = nvme_elbaf_guard_type(elbaf); 1868 switch (head->guard_type) { 1869 case NVME_NVM_NS_64B_GUARD: 1870 head->pi_size = sizeof(struct crc64_pi_tuple); 1871 break; 1872 case NVME_NVM_NS_16B_GUARD: 1873 head->pi_size = sizeof(struct t10_pi_tuple); 1874 break; 1875 default: 1876 break; 1877 } 1878 } 1879 1880 static void nvme_configure_metadata(struct nvme_ctrl *ctrl, 1881 struct nvme_ns_head *head, struct nvme_id_ns *id, 1882 struct nvme_id_ns_nvm *nvm) 1883 { 1884 head->features &= ~(NVME_NS_METADATA_SUPPORTED | NVME_NS_EXT_LBAS); 1885 head->pi_type = 0; 1886 head->pi_size = 0; 1887 head->pi_offset = 0; 1888 head->ms = le16_to_cpu(id->lbaf[nvme_lbaf_index(id->flbas)].ms); 1889 if (!head->ms || !(ctrl->ops->flags & NVME_F_METADATA_SUPPORTED)) 1890 return; 1891 1892 if (nvm && (ctrl->ctratt & NVME_CTRL_ATTR_ELBAS)) { 1893 nvme_configure_pi_elbas(head, id, nvm); 1894 } else { 1895 head->pi_size = sizeof(struct t10_pi_tuple); 1896 head->guard_type = NVME_NVM_NS_16B_GUARD; 1897 } 1898 1899 if (head->pi_size && head->ms >= head->pi_size) 1900 head->pi_type = id->dps & NVME_NS_DPS_PI_MASK; 1901 if (!(id->dps & NVME_NS_DPS_PI_FIRST)) 1902 head->pi_offset = head->ms - head->pi_size; 1903 1904 if (ctrl->ops->flags & NVME_F_FABRICS) { 1905 /* 1906 * The NVMe over Fabrics specification only supports metadata as 1907 * part of the extended data LBA. We rely on HCA/HBA support to 1908 * remap the separate metadata buffer from the block layer. 1909 */ 1910 if (WARN_ON_ONCE(!(id->flbas & NVME_NS_FLBAS_META_EXT))) 1911 return; 1912 1913 head->features |= NVME_NS_EXT_LBAS; 1914 1915 /* 1916 * The current fabrics transport drivers support namespace 1917 * metadata formats only if nvme_ns_has_pi() returns true. 1918 * Suppress support for all other formats so the namespace will 1919 * have a 0 capacity and not be usable through the block stack. 1920 * 1921 * Note, this check will need to be modified if any drivers 1922 * gain the ability to use other metadata formats. 1923 */ 1924 if (ctrl->max_integrity_segments && nvme_ns_has_pi(head)) 1925 head->features |= NVME_NS_METADATA_SUPPORTED; 1926 } else { 1927 /* 1928 * For PCIe controllers, we can't easily remap the separate 1929 * metadata buffer from the block layer and thus require a 1930 * separate metadata buffer for block layer metadata/PI support. 1931 * We allow extended LBAs for the passthrough interface, though. 1932 */ 1933 if (id->flbas & NVME_NS_FLBAS_META_EXT) 1934 head->features |= NVME_NS_EXT_LBAS; 1935 else 1936 head->features |= NVME_NS_METADATA_SUPPORTED; 1937 } 1938 } 1939 1940 static u32 nvme_max_drv_segments(struct nvme_ctrl *ctrl) 1941 { 1942 return ctrl->max_hw_sectors / (NVME_CTRL_PAGE_SIZE >> SECTOR_SHIFT) + 1; 1943 } 1944 1945 static void nvme_set_ctrl_limits(struct nvme_ctrl *ctrl, 1946 struct queue_limits *lim) 1947 { 1948 lim->max_hw_sectors = ctrl->max_hw_sectors; 1949 lim->max_segments = min_t(u32, USHRT_MAX, 1950 min_not_zero(nvme_max_drv_segments(ctrl), ctrl->max_segments)); 1951 lim->max_integrity_segments = ctrl->max_integrity_segments; 1952 lim->virt_boundary_mask = NVME_CTRL_PAGE_SIZE - 1; 1953 lim->max_segment_size = UINT_MAX; 1954 lim->dma_alignment = 3; 1955 } 1956 1957 static bool nvme_update_disk_info(struct nvme_ns *ns, struct nvme_id_ns *id, 1958 struct queue_limits *lim) 1959 { 1960 struct nvme_ns_head *head = ns->head; 1961 u32 bs = 1U << head->lba_shift; 1962 u32 atomic_bs, phys_bs, io_opt = 0; 1963 bool valid = true; 1964 1965 /* 1966 * The block layer can't support LBA sizes larger than the page size 1967 * or smaller than a sector size yet, so catch this early and don't 1968 * allow block I/O. 1969 */ 1970 if (head->lba_shift > PAGE_SHIFT || head->lba_shift < SECTOR_SHIFT) { 1971 bs = (1 << 9); 1972 valid = false; 1973 } 1974 1975 atomic_bs = phys_bs = bs; 1976 if (id->nabo == 0) { 1977 /* 1978 * Bit 1 indicates whether NAWUPF is defined for this namespace 1979 * and whether it should be used instead of AWUPF. If NAWUPF == 1980 * 0 then AWUPF must be used instead. 1981 */ 1982 if (id->nsfeat & NVME_NS_FEAT_ATOMICS && id->nawupf) 1983 atomic_bs = (1 + le16_to_cpu(id->nawupf)) * bs; 1984 else 1985 atomic_bs = (1 + ns->ctrl->subsys->awupf) * bs; 1986 } 1987 1988 if (id->nsfeat & NVME_NS_FEAT_IO_OPT) { 1989 /* NPWG = Namespace Preferred Write Granularity */ 1990 phys_bs = bs * (1 + le16_to_cpu(id->npwg)); 1991 /* NOWS = Namespace Optimal Write Size */ 1992 io_opt = bs * (1 + le16_to_cpu(id->nows)); 1993 } 1994 1995 /* 1996 * Linux filesystems assume writing a single physical block is 1997 * an atomic operation. Hence limit the physical block size to the 1998 * value of the Atomic Write Unit Power Fail parameter. 1999 */ 2000 lim->logical_block_size = bs; 2001 lim->physical_block_size = min(phys_bs, atomic_bs); 2002 lim->io_min = phys_bs; 2003 lim->io_opt = io_opt; 2004 if (ns->ctrl->quirks & NVME_QUIRK_DEALLOCATE_ZEROES) 2005 lim->max_write_zeroes_sectors = UINT_MAX; 2006 else 2007 lim->max_write_zeroes_sectors = ns->ctrl->max_zeroes_sectors; 2008 return valid; 2009 } 2010 2011 static bool nvme_ns_is_readonly(struct nvme_ns *ns, struct nvme_ns_info *info) 2012 { 2013 return info->is_readonly || test_bit(NVME_NS_FORCE_RO, &ns->flags); 2014 } 2015 2016 static inline bool nvme_first_scan(struct gendisk *disk) 2017 { 2018 /* nvme_alloc_ns() scans the disk prior to adding it */ 2019 return !disk_live(disk); 2020 } 2021 2022 static void nvme_set_chunk_sectors(struct nvme_ns *ns, struct nvme_id_ns *id, 2023 struct queue_limits *lim) 2024 { 2025 struct nvme_ctrl *ctrl = ns->ctrl; 2026 u32 iob; 2027 2028 if ((ctrl->quirks & NVME_QUIRK_STRIPE_SIZE) && 2029 is_power_of_2(ctrl->max_hw_sectors)) 2030 iob = ctrl->max_hw_sectors; 2031 else 2032 iob = nvme_lba_to_sect(ns->head, le16_to_cpu(id->noiob)); 2033 2034 if (!iob) 2035 return; 2036 2037 if (!is_power_of_2(iob)) { 2038 if (nvme_first_scan(ns->disk)) 2039 pr_warn("%s: ignoring unaligned IO boundary:%u\n", 2040 ns->disk->disk_name, iob); 2041 return; 2042 } 2043 2044 if (blk_queue_is_zoned(ns->disk->queue)) { 2045 if (nvme_first_scan(ns->disk)) 2046 pr_warn("%s: ignoring zoned namespace IO boundary\n", 2047 ns->disk->disk_name); 2048 return; 2049 } 2050 2051 lim->chunk_sectors = iob; 2052 } 2053 2054 static int nvme_update_ns_info_generic(struct nvme_ns *ns, 2055 struct nvme_ns_info *info) 2056 { 2057 struct queue_limits lim; 2058 int ret; 2059 2060 blk_mq_freeze_queue(ns->disk->queue); 2061 lim = queue_limits_start_update(ns->disk->queue); 2062 nvme_set_ctrl_limits(ns->ctrl, &lim); 2063 ret = queue_limits_commit_update(ns->disk->queue, &lim); 2064 set_disk_ro(ns->disk, nvme_ns_is_readonly(ns, info)); 2065 blk_mq_unfreeze_queue(ns->disk->queue); 2066 2067 /* Hide the block-interface for these devices */ 2068 if (!ret) 2069 ret = -ENODEV; 2070 return ret; 2071 } 2072 2073 static int nvme_update_ns_info_block(struct nvme_ns *ns, 2074 struct nvme_ns_info *info) 2075 { 2076 bool vwc = ns->ctrl->vwc & NVME_CTRL_VWC_PRESENT; 2077 struct queue_limits lim; 2078 struct nvme_id_ns_nvm *nvm = NULL; 2079 struct nvme_zone_info zi = {}; 2080 struct nvme_id_ns *id; 2081 sector_t capacity; 2082 unsigned lbaf; 2083 int ret; 2084 2085 ret = nvme_identify_ns(ns->ctrl, info->nsid, &id); 2086 if (ret) 2087 return ret; 2088 2089 if (id->ncap == 0) { 2090 /* namespace not allocated or attached */ 2091 info->is_removed = true; 2092 ret = -ENXIO; 2093 goto out; 2094 } 2095 lbaf = nvme_lbaf_index(id->flbas); 2096 2097 if (ns->ctrl->ctratt & NVME_CTRL_ATTR_ELBAS) { 2098 ret = nvme_identify_ns_nvm(ns->ctrl, info->nsid, &nvm); 2099 if (ret < 0) 2100 goto out; 2101 } 2102 2103 if (IS_ENABLED(CONFIG_BLK_DEV_ZONED) && 2104 ns->head->ids.csi == NVME_CSI_ZNS) { 2105 ret = nvme_query_zone_info(ns, lbaf, &zi); 2106 if (ret < 0) 2107 goto out; 2108 } 2109 2110 blk_mq_freeze_queue(ns->disk->queue); 2111 ns->head->lba_shift = id->lbaf[lbaf].ds; 2112 ns->head->nuse = le64_to_cpu(id->nuse); 2113 capacity = nvme_lba_to_sect(ns->head, le64_to_cpu(id->nsze)); 2114 2115 lim = queue_limits_start_update(ns->disk->queue); 2116 nvme_set_ctrl_limits(ns->ctrl, &lim); 2117 nvme_configure_metadata(ns->ctrl, ns->head, id, nvm); 2118 nvme_set_chunk_sectors(ns, id, &lim); 2119 if (!nvme_update_disk_info(ns, id, &lim)) 2120 capacity = 0; 2121 nvme_config_discard(ns, &lim); 2122 if (IS_ENABLED(CONFIG_BLK_DEV_ZONED) && 2123 ns->head->ids.csi == NVME_CSI_ZNS) 2124 nvme_update_zone_info(ns, &lim, &zi); 2125 ret = queue_limits_commit_update(ns->disk->queue, &lim); 2126 if (ret) { 2127 blk_mq_unfreeze_queue(ns->disk->queue); 2128 goto out; 2129 } 2130 2131 /* 2132 * Register a metadata profile for PI, or the plain non-integrity NVMe 2133 * metadata masquerading as Type 0 if supported, otherwise reject block 2134 * I/O to namespaces with metadata except when the namespace supports 2135 * PI, as it can strip/insert in that case. 2136 */ 2137 if (!nvme_init_integrity(ns->disk, ns->head)) 2138 capacity = 0; 2139 2140 set_capacity_and_notify(ns->disk, capacity); 2141 2142 /* 2143 * Only set the DEAC bit if the device guarantees that reads from 2144 * deallocated data return zeroes. While the DEAC bit does not 2145 * require that, it must be a no-op if reads from deallocated data 2146 * do not return zeroes. 2147 */ 2148 if ((id->dlfeat & 0x7) == 0x1 && (id->dlfeat & (1 << 3))) 2149 ns->head->features |= NVME_NS_DEAC; 2150 set_disk_ro(ns->disk, nvme_ns_is_readonly(ns, info)); 2151 blk_queue_write_cache(ns->disk->queue, vwc, vwc); 2152 set_bit(NVME_NS_READY, &ns->flags); 2153 blk_mq_unfreeze_queue(ns->disk->queue); 2154 2155 if (blk_queue_is_zoned(ns->queue)) { 2156 ret = blk_revalidate_disk_zones(ns->disk, NULL); 2157 if (ret && !nvme_first_scan(ns->disk)) 2158 goto out; 2159 } 2160 2161 ret = 0; 2162 out: 2163 kfree(nvm); 2164 kfree(id); 2165 return ret; 2166 } 2167 2168 static int nvme_update_ns_info(struct nvme_ns *ns, struct nvme_ns_info *info) 2169 { 2170 bool unsupported = false; 2171 int ret; 2172 2173 switch (info->ids.csi) { 2174 case NVME_CSI_ZNS: 2175 if (!IS_ENABLED(CONFIG_BLK_DEV_ZONED)) { 2176 dev_info(ns->ctrl->device, 2177 "block device for nsid %u not supported without CONFIG_BLK_DEV_ZONED\n", 2178 info->nsid); 2179 ret = nvme_update_ns_info_generic(ns, info); 2180 break; 2181 } 2182 ret = nvme_update_ns_info_block(ns, info); 2183 break; 2184 case NVME_CSI_NVM: 2185 ret = nvme_update_ns_info_block(ns, info); 2186 break; 2187 default: 2188 dev_info(ns->ctrl->device, 2189 "block device for nsid %u not supported (csi %u)\n", 2190 info->nsid, info->ids.csi); 2191 ret = nvme_update_ns_info_generic(ns, info); 2192 break; 2193 } 2194 2195 /* 2196 * If probing fails due an unsupported feature, hide the block device, 2197 * but still allow other access. 2198 */ 2199 if (ret == -ENODEV) { 2200 ns->disk->flags |= GENHD_FL_HIDDEN; 2201 set_bit(NVME_NS_READY, &ns->flags); 2202 unsupported = true; 2203 ret = 0; 2204 } 2205 2206 if (!ret && nvme_ns_head_multipath(ns->head)) { 2207 struct queue_limits *ns_lim = &ns->disk->queue->limits; 2208 struct queue_limits lim; 2209 2210 blk_mq_freeze_queue(ns->head->disk->queue); 2211 if (unsupported) 2212 ns->head->disk->flags |= GENHD_FL_HIDDEN; 2213 else 2214 nvme_init_integrity(ns->head->disk, ns->head); 2215 set_capacity_and_notify(ns->head->disk, get_capacity(ns->disk)); 2216 set_disk_ro(ns->head->disk, nvme_ns_is_readonly(ns, info)); 2217 nvme_mpath_revalidate_paths(ns); 2218 2219 /* 2220 * queue_limits mixes values that are the hardware limitations 2221 * for bio splitting with what is the device configuration. 2222 * 2223 * For NVMe the device configuration can change after e.g. a 2224 * Format command, and we really want to pick up the new format 2225 * value here. But we must still stack the queue limits to the 2226 * least common denominator for multipathing to split the bios 2227 * properly. 2228 * 2229 * To work around this, we explicitly set the device 2230 * configuration to those that we just queried, but only stack 2231 * the splitting limits in to make sure we still obey possibly 2232 * lower limitations of other controllers. 2233 */ 2234 lim = queue_limits_start_update(ns->head->disk->queue); 2235 lim.logical_block_size = ns_lim->logical_block_size; 2236 lim.physical_block_size = ns_lim->physical_block_size; 2237 lim.io_min = ns_lim->io_min; 2238 lim.io_opt = ns_lim->io_opt; 2239 queue_limits_stack_bdev(&lim, ns->disk->part0, 0, 2240 ns->head->disk->disk_name); 2241 ret = queue_limits_commit_update(ns->head->disk->queue, &lim); 2242 blk_mq_unfreeze_queue(ns->head->disk->queue); 2243 } 2244 2245 return ret; 2246 } 2247 2248 #ifdef CONFIG_BLK_SED_OPAL 2249 static int nvme_sec_submit(void *data, u16 spsp, u8 secp, void *buffer, size_t len, 2250 bool send) 2251 { 2252 struct nvme_ctrl *ctrl = data; 2253 struct nvme_command cmd = { }; 2254 2255 if (send) 2256 cmd.common.opcode = nvme_admin_security_send; 2257 else 2258 cmd.common.opcode = nvme_admin_security_recv; 2259 cmd.common.nsid = 0; 2260 cmd.common.cdw10 = cpu_to_le32(((u32)secp) << 24 | ((u32)spsp) << 8); 2261 cmd.common.cdw11 = cpu_to_le32(len); 2262 2263 return __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, NULL, buffer, len, 2264 NVME_QID_ANY, NVME_SUBMIT_AT_HEAD); 2265 } 2266 2267 static void nvme_configure_opal(struct nvme_ctrl *ctrl, bool was_suspended) 2268 { 2269 if (ctrl->oacs & NVME_CTRL_OACS_SEC_SUPP) { 2270 if (!ctrl->opal_dev) 2271 ctrl->opal_dev = init_opal_dev(ctrl, &nvme_sec_submit); 2272 else if (was_suspended) 2273 opal_unlock_from_suspend(ctrl->opal_dev); 2274 } else { 2275 free_opal_dev(ctrl->opal_dev); 2276 ctrl->opal_dev = NULL; 2277 } 2278 } 2279 #else 2280 static void nvme_configure_opal(struct nvme_ctrl *ctrl, bool was_suspended) 2281 { 2282 } 2283 #endif /* CONFIG_BLK_SED_OPAL */ 2284 2285 #ifdef CONFIG_BLK_DEV_ZONED 2286 static int nvme_report_zones(struct gendisk *disk, sector_t sector, 2287 unsigned int nr_zones, report_zones_cb cb, void *data) 2288 { 2289 return nvme_ns_report_zones(disk->private_data, sector, nr_zones, cb, 2290 data); 2291 } 2292 #else 2293 #define nvme_report_zones NULL 2294 #endif /* CONFIG_BLK_DEV_ZONED */ 2295 2296 const struct block_device_operations nvme_bdev_ops = { 2297 .owner = THIS_MODULE, 2298 .ioctl = nvme_ioctl, 2299 .compat_ioctl = blkdev_compat_ptr_ioctl, 2300 .open = nvme_open, 2301 .release = nvme_release, 2302 .getgeo = nvme_getgeo, 2303 .report_zones = nvme_report_zones, 2304 .pr_ops = &nvme_pr_ops, 2305 }; 2306 2307 static int nvme_wait_ready(struct nvme_ctrl *ctrl, u32 mask, u32 val, 2308 u32 timeout, const char *op) 2309 { 2310 unsigned long timeout_jiffies = jiffies + timeout * HZ; 2311 u32 csts; 2312 int ret; 2313 2314 while ((ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &csts)) == 0) { 2315 if (csts == ~0) 2316 return -ENODEV; 2317 if ((csts & mask) == val) 2318 break; 2319 2320 usleep_range(1000, 2000); 2321 if (fatal_signal_pending(current)) 2322 return -EINTR; 2323 if (time_after(jiffies, timeout_jiffies)) { 2324 dev_err(ctrl->device, 2325 "Device not ready; aborting %s, CSTS=0x%x\n", 2326 op, csts); 2327 return -ENODEV; 2328 } 2329 } 2330 2331 return ret; 2332 } 2333 2334 int nvme_disable_ctrl(struct nvme_ctrl *ctrl, bool shutdown) 2335 { 2336 int ret; 2337 2338 ctrl->ctrl_config &= ~NVME_CC_SHN_MASK; 2339 if (shutdown) 2340 ctrl->ctrl_config |= NVME_CC_SHN_NORMAL; 2341 else 2342 ctrl->ctrl_config &= ~NVME_CC_ENABLE; 2343 2344 ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config); 2345 if (ret) 2346 return ret; 2347 2348 if (shutdown) { 2349 return nvme_wait_ready(ctrl, NVME_CSTS_SHST_MASK, 2350 NVME_CSTS_SHST_CMPLT, 2351 ctrl->shutdown_timeout, "shutdown"); 2352 } 2353 if (ctrl->quirks & NVME_QUIRK_DELAY_BEFORE_CHK_RDY) 2354 msleep(NVME_QUIRK_DELAY_AMOUNT); 2355 return nvme_wait_ready(ctrl, NVME_CSTS_RDY, 0, 2356 (NVME_CAP_TIMEOUT(ctrl->cap) + 1) / 2, "reset"); 2357 } 2358 EXPORT_SYMBOL_GPL(nvme_disable_ctrl); 2359 2360 int nvme_enable_ctrl(struct nvme_ctrl *ctrl) 2361 { 2362 unsigned dev_page_min; 2363 u32 timeout; 2364 int ret; 2365 2366 ret = ctrl->ops->reg_read64(ctrl, NVME_REG_CAP, &ctrl->cap); 2367 if (ret) { 2368 dev_err(ctrl->device, "Reading CAP failed (%d)\n", ret); 2369 return ret; 2370 } 2371 dev_page_min = NVME_CAP_MPSMIN(ctrl->cap) + 12; 2372 2373 if (NVME_CTRL_PAGE_SHIFT < dev_page_min) { 2374 dev_err(ctrl->device, 2375 "Minimum device page size %u too large for host (%u)\n", 2376 1 << dev_page_min, 1 << NVME_CTRL_PAGE_SHIFT); 2377 return -ENODEV; 2378 } 2379 2380 if (NVME_CAP_CSS(ctrl->cap) & NVME_CAP_CSS_CSI) 2381 ctrl->ctrl_config = NVME_CC_CSS_CSI; 2382 else 2383 ctrl->ctrl_config = NVME_CC_CSS_NVM; 2384 2385 if (ctrl->cap & NVME_CAP_CRMS_CRWMS && ctrl->cap & NVME_CAP_CRMS_CRIMS) 2386 ctrl->ctrl_config |= NVME_CC_CRIME; 2387 2388 ctrl->ctrl_config |= (NVME_CTRL_PAGE_SHIFT - 12) << NVME_CC_MPS_SHIFT; 2389 ctrl->ctrl_config |= NVME_CC_AMS_RR | NVME_CC_SHN_NONE; 2390 ctrl->ctrl_config |= NVME_CC_IOSQES | NVME_CC_IOCQES; 2391 ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config); 2392 if (ret) 2393 return ret; 2394 2395 /* Flush write to device (required if transport is PCI) */ 2396 ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CC, &ctrl->ctrl_config); 2397 if (ret) 2398 return ret; 2399 2400 /* CAP value may change after initial CC write */ 2401 ret = ctrl->ops->reg_read64(ctrl, NVME_REG_CAP, &ctrl->cap); 2402 if (ret) 2403 return ret; 2404 2405 timeout = NVME_CAP_TIMEOUT(ctrl->cap); 2406 if (ctrl->cap & NVME_CAP_CRMS_CRWMS) { 2407 u32 crto, ready_timeout; 2408 2409 ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CRTO, &crto); 2410 if (ret) { 2411 dev_err(ctrl->device, "Reading CRTO failed (%d)\n", 2412 ret); 2413 return ret; 2414 } 2415 2416 /* 2417 * CRTO should always be greater or equal to CAP.TO, but some 2418 * devices are known to get this wrong. Use the larger of the 2419 * two values. 2420 */ 2421 if (ctrl->ctrl_config & NVME_CC_CRIME) 2422 ready_timeout = NVME_CRTO_CRIMT(crto); 2423 else 2424 ready_timeout = NVME_CRTO_CRWMT(crto); 2425 2426 if (ready_timeout < timeout) 2427 dev_warn_once(ctrl->device, "bad crto:%x cap:%llx\n", 2428 crto, ctrl->cap); 2429 else 2430 timeout = ready_timeout; 2431 } 2432 2433 ctrl->ctrl_config |= NVME_CC_ENABLE; 2434 ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config); 2435 if (ret) 2436 return ret; 2437 return nvme_wait_ready(ctrl, NVME_CSTS_RDY, NVME_CSTS_RDY, 2438 (timeout + 1) / 2, "initialisation"); 2439 } 2440 EXPORT_SYMBOL_GPL(nvme_enable_ctrl); 2441 2442 static int nvme_configure_timestamp(struct nvme_ctrl *ctrl) 2443 { 2444 __le64 ts; 2445 int ret; 2446 2447 if (!(ctrl->oncs & NVME_CTRL_ONCS_TIMESTAMP)) 2448 return 0; 2449 2450 ts = cpu_to_le64(ktime_to_ms(ktime_get_real())); 2451 ret = nvme_set_features(ctrl, NVME_FEAT_TIMESTAMP, 0, &ts, sizeof(ts), 2452 NULL); 2453 if (ret) 2454 dev_warn_once(ctrl->device, 2455 "could not set timestamp (%d)\n", ret); 2456 return ret; 2457 } 2458 2459 static int nvme_configure_host_options(struct nvme_ctrl *ctrl) 2460 { 2461 struct nvme_feat_host_behavior *host; 2462 u8 acre = 0, lbafee = 0; 2463 int ret; 2464 2465 /* Don't bother enabling the feature if retry delay is not reported */ 2466 if (ctrl->crdt[0]) 2467 acre = NVME_ENABLE_ACRE; 2468 if (ctrl->ctratt & NVME_CTRL_ATTR_ELBAS) 2469 lbafee = NVME_ENABLE_LBAFEE; 2470 2471 if (!acre && !lbafee) 2472 return 0; 2473 2474 host = kzalloc(sizeof(*host), GFP_KERNEL); 2475 if (!host) 2476 return 0; 2477 2478 host->acre = acre; 2479 host->lbafee = lbafee; 2480 ret = nvme_set_features(ctrl, NVME_FEAT_HOST_BEHAVIOR, 0, 2481 host, sizeof(*host), NULL); 2482 kfree(host); 2483 return ret; 2484 } 2485 2486 /* 2487 * The function checks whether the given total (exlat + enlat) latency of 2488 * a power state allows the latter to be used as an APST transition target. 2489 * It does so by comparing the latency to the primary and secondary latency 2490 * tolerances defined by module params. If there's a match, the corresponding 2491 * timeout value is returned and the matching tolerance index (1 or 2) is 2492 * reported. 2493 */ 2494 static bool nvme_apst_get_transition_time(u64 total_latency, 2495 u64 *transition_time, unsigned *last_index) 2496 { 2497 if (total_latency <= apst_primary_latency_tol_us) { 2498 if (*last_index == 1) 2499 return false; 2500 *last_index = 1; 2501 *transition_time = apst_primary_timeout_ms; 2502 return true; 2503 } 2504 if (apst_secondary_timeout_ms && 2505 total_latency <= apst_secondary_latency_tol_us) { 2506 if (*last_index <= 2) 2507 return false; 2508 *last_index = 2; 2509 *transition_time = apst_secondary_timeout_ms; 2510 return true; 2511 } 2512 return false; 2513 } 2514 2515 /* 2516 * APST (Autonomous Power State Transition) lets us program a table of power 2517 * state transitions that the controller will perform automatically. 2518 * 2519 * Depending on module params, one of the two supported techniques will be used: 2520 * 2521 * - If the parameters provide explicit timeouts and tolerances, they will be 2522 * used to build a table with up to 2 non-operational states to transition to. 2523 * The default parameter values were selected based on the values used by 2524 * Microsoft's and Intel's NVMe drivers. Yet, since we don't implement dynamic 2525 * regeneration of the APST table in the event of switching between external 2526 * and battery power, the timeouts and tolerances reflect a compromise 2527 * between values used by Microsoft for AC and battery scenarios. 2528 * - If not, we'll configure the table with a simple heuristic: we are willing 2529 * to spend at most 2% of the time transitioning between power states. 2530 * Therefore, when running in any given state, we will enter the next 2531 * lower-power non-operational state after waiting 50 * (enlat + exlat) 2532 * microseconds, as long as that state's exit latency is under the requested 2533 * maximum latency. 2534 * 2535 * We will not autonomously enter any non-operational state for which the total 2536 * latency exceeds ps_max_latency_us. 2537 * 2538 * Users can set ps_max_latency_us to zero to turn off APST. 2539 */ 2540 static int nvme_configure_apst(struct nvme_ctrl *ctrl) 2541 { 2542 struct nvme_feat_auto_pst *table; 2543 unsigned apste = 0; 2544 u64 max_lat_us = 0; 2545 __le64 target = 0; 2546 int max_ps = -1; 2547 int state; 2548 int ret; 2549 unsigned last_lt_index = UINT_MAX; 2550 2551 /* 2552 * If APST isn't supported or if we haven't been initialized yet, 2553 * then don't do anything. 2554 */ 2555 if (!ctrl->apsta) 2556 return 0; 2557 2558 if (ctrl->npss > 31) { 2559 dev_warn(ctrl->device, "NPSS is invalid; not using APST\n"); 2560 return 0; 2561 } 2562 2563 table = kzalloc(sizeof(*table), GFP_KERNEL); 2564 if (!table) 2565 return 0; 2566 2567 if (!ctrl->apst_enabled || ctrl->ps_max_latency_us == 0) { 2568 /* Turn off APST. */ 2569 dev_dbg(ctrl->device, "APST disabled\n"); 2570 goto done; 2571 } 2572 2573 /* 2574 * Walk through all states from lowest- to highest-power. 2575 * According to the spec, lower-numbered states use more power. NPSS, 2576 * despite the name, is the index of the lowest-power state, not the 2577 * number of states. 2578 */ 2579 for (state = (int)ctrl->npss; state >= 0; state--) { 2580 u64 total_latency_us, exit_latency_us, transition_ms; 2581 2582 if (target) 2583 table->entries[state] = target; 2584 2585 /* 2586 * Don't allow transitions to the deepest state if it's quirked 2587 * off. 2588 */ 2589 if (state == ctrl->npss && 2590 (ctrl->quirks & NVME_QUIRK_NO_DEEPEST_PS)) 2591 continue; 2592 2593 /* 2594 * Is this state a useful non-operational state for higher-power 2595 * states to autonomously transition to? 2596 */ 2597 if (!(ctrl->psd[state].flags & NVME_PS_FLAGS_NON_OP_STATE)) 2598 continue; 2599 2600 exit_latency_us = (u64)le32_to_cpu(ctrl->psd[state].exit_lat); 2601 if (exit_latency_us > ctrl->ps_max_latency_us) 2602 continue; 2603 2604 total_latency_us = exit_latency_us + 2605 le32_to_cpu(ctrl->psd[state].entry_lat); 2606 2607 /* 2608 * This state is good. It can be used as the APST idle target 2609 * for higher power states. 2610 */ 2611 if (apst_primary_timeout_ms && apst_primary_latency_tol_us) { 2612 if (!nvme_apst_get_transition_time(total_latency_us, 2613 &transition_ms, &last_lt_index)) 2614 continue; 2615 } else { 2616 transition_ms = total_latency_us + 19; 2617 do_div(transition_ms, 20); 2618 if (transition_ms > (1 << 24) - 1) 2619 transition_ms = (1 << 24) - 1; 2620 } 2621 2622 target = cpu_to_le64((state << 3) | (transition_ms << 8)); 2623 if (max_ps == -1) 2624 max_ps = state; 2625 if (total_latency_us > max_lat_us) 2626 max_lat_us = total_latency_us; 2627 } 2628 2629 if (max_ps == -1) 2630 dev_dbg(ctrl->device, "APST enabled but no non-operational states are available\n"); 2631 else 2632 dev_dbg(ctrl->device, "APST enabled: max PS = %d, max round-trip latency = %lluus, table = %*phN\n", 2633 max_ps, max_lat_us, (int)sizeof(*table), table); 2634 apste = 1; 2635 2636 done: 2637 ret = nvme_set_features(ctrl, NVME_FEAT_AUTO_PST, apste, 2638 table, sizeof(*table), NULL); 2639 if (ret) 2640 dev_err(ctrl->device, "failed to set APST feature (%d)\n", ret); 2641 kfree(table); 2642 return ret; 2643 } 2644 2645 static void nvme_set_latency_tolerance(struct device *dev, s32 val) 2646 { 2647 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 2648 u64 latency; 2649 2650 switch (val) { 2651 case PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT: 2652 case PM_QOS_LATENCY_ANY: 2653 latency = U64_MAX; 2654 break; 2655 2656 default: 2657 latency = val; 2658 } 2659 2660 if (ctrl->ps_max_latency_us != latency) { 2661 ctrl->ps_max_latency_us = latency; 2662 if (nvme_ctrl_state(ctrl) == NVME_CTRL_LIVE) 2663 nvme_configure_apst(ctrl); 2664 } 2665 } 2666 2667 struct nvme_core_quirk_entry { 2668 /* 2669 * NVMe model and firmware strings are padded with spaces. For 2670 * simplicity, strings in the quirk table are padded with NULLs 2671 * instead. 2672 */ 2673 u16 vid; 2674 const char *mn; 2675 const char *fr; 2676 unsigned long quirks; 2677 }; 2678 2679 static const struct nvme_core_quirk_entry core_quirks[] = { 2680 { 2681 /* 2682 * This Toshiba device seems to die using any APST states. See: 2683 * https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1678184/comments/11 2684 */ 2685 .vid = 0x1179, 2686 .mn = "THNSF5256GPUK TOSHIBA", 2687 .quirks = NVME_QUIRK_NO_APST, 2688 }, 2689 { 2690 /* 2691 * This LiteON CL1-3D*-Q11 firmware version has a race 2692 * condition associated with actions related to suspend to idle 2693 * LiteON has resolved the problem in future firmware 2694 */ 2695 .vid = 0x14a4, 2696 .fr = "22301111", 2697 .quirks = NVME_QUIRK_SIMPLE_SUSPEND, 2698 }, 2699 { 2700 /* 2701 * This Kioxia CD6-V Series / HPE PE8030 device times out and 2702 * aborts I/O during any load, but more easily reproducible 2703 * with discards (fstrim). 2704 * 2705 * The device is left in a state where it is also not possible 2706 * to use "nvme set-feature" to disable APST, but booting with 2707 * nvme_core.default_ps_max_latency=0 works. 2708 */ 2709 .vid = 0x1e0f, 2710 .mn = "KCD6XVUL6T40", 2711 .quirks = NVME_QUIRK_NO_APST, 2712 }, 2713 { 2714 /* 2715 * The external Samsung X5 SSD fails initialization without a 2716 * delay before checking if it is ready and has a whole set of 2717 * other problems. To make this even more interesting, it 2718 * shares the PCI ID with internal Samsung 970 Evo Plus that 2719 * does not need or want these quirks. 2720 */ 2721 .vid = 0x144d, 2722 .mn = "Samsung Portable SSD X5", 2723 .quirks = NVME_QUIRK_DELAY_BEFORE_CHK_RDY | 2724 NVME_QUIRK_NO_DEEPEST_PS | 2725 NVME_QUIRK_IGNORE_DEV_SUBNQN, 2726 } 2727 }; 2728 2729 /* match is null-terminated but idstr is space-padded. */ 2730 static bool string_matches(const char *idstr, const char *match, size_t len) 2731 { 2732 size_t matchlen; 2733 2734 if (!match) 2735 return true; 2736 2737 matchlen = strlen(match); 2738 WARN_ON_ONCE(matchlen > len); 2739 2740 if (memcmp(idstr, match, matchlen)) 2741 return false; 2742 2743 for (; matchlen < len; matchlen++) 2744 if (idstr[matchlen] != ' ') 2745 return false; 2746 2747 return true; 2748 } 2749 2750 static bool quirk_matches(const struct nvme_id_ctrl *id, 2751 const struct nvme_core_quirk_entry *q) 2752 { 2753 return q->vid == le16_to_cpu(id->vid) && 2754 string_matches(id->mn, q->mn, sizeof(id->mn)) && 2755 string_matches(id->fr, q->fr, sizeof(id->fr)); 2756 } 2757 2758 static void nvme_init_subnqn(struct nvme_subsystem *subsys, struct nvme_ctrl *ctrl, 2759 struct nvme_id_ctrl *id) 2760 { 2761 size_t nqnlen; 2762 int off; 2763 2764 if(!(ctrl->quirks & NVME_QUIRK_IGNORE_DEV_SUBNQN)) { 2765 nqnlen = strnlen(id->subnqn, NVMF_NQN_SIZE); 2766 if (nqnlen > 0 && nqnlen < NVMF_NQN_SIZE) { 2767 strscpy(subsys->subnqn, id->subnqn, NVMF_NQN_SIZE); 2768 return; 2769 } 2770 2771 if (ctrl->vs >= NVME_VS(1, 2, 1)) 2772 dev_warn(ctrl->device, "missing or invalid SUBNQN field.\n"); 2773 } 2774 2775 /* 2776 * Generate a "fake" NQN similar to the one in Section 4.5 of the NVMe 2777 * Base Specification 2.0. It is slightly different from the format 2778 * specified there due to historic reasons, and we can't change it now. 2779 */ 2780 off = snprintf(subsys->subnqn, NVMF_NQN_SIZE, 2781 "nqn.2014.08.org.nvmexpress:%04x%04x", 2782 le16_to_cpu(id->vid), le16_to_cpu(id->ssvid)); 2783 memcpy(subsys->subnqn + off, id->sn, sizeof(id->sn)); 2784 off += sizeof(id->sn); 2785 memcpy(subsys->subnqn + off, id->mn, sizeof(id->mn)); 2786 off += sizeof(id->mn); 2787 memset(subsys->subnqn + off, 0, sizeof(subsys->subnqn) - off); 2788 } 2789 2790 static void nvme_release_subsystem(struct device *dev) 2791 { 2792 struct nvme_subsystem *subsys = 2793 container_of(dev, struct nvme_subsystem, dev); 2794 2795 if (subsys->instance >= 0) 2796 ida_free(&nvme_instance_ida, subsys->instance); 2797 kfree(subsys); 2798 } 2799 2800 static void nvme_destroy_subsystem(struct kref *ref) 2801 { 2802 struct nvme_subsystem *subsys = 2803 container_of(ref, struct nvme_subsystem, ref); 2804 2805 mutex_lock(&nvme_subsystems_lock); 2806 list_del(&subsys->entry); 2807 mutex_unlock(&nvme_subsystems_lock); 2808 2809 ida_destroy(&subsys->ns_ida); 2810 device_del(&subsys->dev); 2811 put_device(&subsys->dev); 2812 } 2813 2814 static void nvme_put_subsystem(struct nvme_subsystem *subsys) 2815 { 2816 kref_put(&subsys->ref, nvme_destroy_subsystem); 2817 } 2818 2819 static struct nvme_subsystem *__nvme_find_get_subsystem(const char *subsysnqn) 2820 { 2821 struct nvme_subsystem *subsys; 2822 2823 lockdep_assert_held(&nvme_subsystems_lock); 2824 2825 /* 2826 * Fail matches for discovery subsystems. This results 2827 * in each discovery controller bound to a unique subsystem. 2828 * This avoids issues with validating controller values 2829 * that can only be true when there is a single unique subsystem. 2830 * There may be multiple and completely independent entities 2831 * that provide discovery controllers. 2832 */ 2833 if (!strcmp(subsysnqn, NVME_DISC_SUBSYS_NAME)) 2834 return NULL; 2835 2836 list_for_each_entry(subsys, &nvme_subsystems, entry) { 2837 if (strcmp(subsys->subnqn, subsysnqn)) 2838 continue; 2839 if (!kref_get_unless_zero(&subsys->ref)) 2840 continue; 2841 return subsys; 2842 } 2843 2844 return NULL; 2845 } 2846 2847 static inline bool nvme_discovery_ctrl(struct nvme_ctrl *ctrl) 2848 { 2849 return ctrl->opts && ctrl->opts->discovery_nqn; 2850 } 2851 2852 static bool nvme_validate_cntlid(struct nvme_subsystem *subsys, 2853 struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id) 2854 { 2855 struct nvme_ctrl *tmp; 2856 2857 lockdep_assert_held(&nvme_subsystems_lock); 2858 2859 list_for_each_entry(tmp, &subsys->ctrls, subsys_entry) { 2860 if (nvme_state_terminal(tmp)) 2861 continue; 2862 2863 if (tmp->cntlid == ctrl->cntlid) { 2864 dev_err(ctrl->device, 2865 "Duplicate cntlid %u with %s, subsys %s, rejecting\n", 2866 ctrl->cntlid, dev_name(tmp->device), 2867 subsys->subnqn); 2868 return false; 2869 } 2870 2871 if ((id->cmic & NVME_CTRL_CMIC_MULTI_CTRL) || 2872 nvme_discovery_ctrl(ctrl)) 2873 continue; 2874 2875 dev_err(ctrl->device, 2876 "Subsystem does not support multiple controllers\n"); 2877 return false; 2878 } 2879 2880 return true; 2881 } 2882 2883 static int nvme_init_subsystem(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id) 2884 { 2885 struct nvme_subsystem *subsys, *found; 2886 int ret; 2887 2888 subsys = kzalloc(sizeof(*subsys), GFP_KERNEL); 2889 if (!subsys) 2890 return -ENOMEM; 2891 2892 subsys->instance = -1; 2893 mutex_init(&subsys->lock); 2894 kref_init(&subsys->ref); 2895 INIT_LIST_HEAD(&subsys->ctrls); 2896 INIT_LIST_HEAD(&subsys->nsheads); 2897 nvme_init_subnqn(subsys, ctrl, id); 2898 memcpy(subsys->serial, id->sn, sizeof(subsys->serial)); 2899 memcpy(subsys->model, id->mn, sizeof(subsys->model)); 2900 subsys->vendor_id = le16_to_cpu(id->vid); 2901 subsys->cmic = id->cmic; 2902 2903 /* Versions prior to 1.4 don't necessarily report a valid type */ 2904 if (id->cntrltype == NVME_CTRL_DISC || 2905 !strcmp(subsys->subnqn, NVME_DISC_SUBSYS_NAME)) 2906 subsys->subtype = NVME_NQN_DISC; 2907 else 2908 subsys->subtype = NVME_NQN_NVME; 2909 2910 if (nvme_discovery_ctrl(ctrl) && subsys->subtype != NVME_NQN_DISC) { 2911 dev_err(ctrl->device, 2912 "Subsystem %s is not a discovery controller", 2913 subsys->subnqn); 2914 kfree(subsys); 2915 return -EINVAL; 2916 } 2917 subsys->awupf = le16_to_cpu(id->awupf); 2918 nvme_mpath_default_iopolicy(subsys); 2919 2920 subsys->dev.class = &nvme_subsys_class; 2921 subsys->dev.release = nvme_release_subsystem; 2922 subsys->dev.groups = nvme_subsys_attrs_groups; 2923 dev_set_name(&subsys->dev, "nvme-subsys%d", ctrl->instance); 2924 device_initialize(&subsys->dev); 2925 2926 mutex_lock(&nvme_subsystems_lock); 2927 found = __nvme_find_get_subsystem(subsys->subnqn); 2928 if (found) { 2929 put_device(&subsys->dev); 2930 subsys = found; 2931 2932 if (!nvme_validate_cntlid(subsys, ctrl, id)) { 2933 ret = -EINVAL; 2934 goto out_put_subsystem; 2935 } 2936 } else { 2937 ret = device_add(&subsys->dev); 2938 if (ret) { 2939 dev_err(ctrl->device, 2940 "failed to register subsystem device.\n"); 2941 put_device(&subsys->dev); 2942 goto out_unlock; 2943 } 2944 ida_init(&subsys->ns_ida); 2945 list_add_tail(&subsys->entry, &nvme_subsystems); 2946 } 2947 2948 ret = sysfs_create_link(&subsys->dev.kobj, &ctrl->device->kobj, 2949 dev_name(ctrl->device)); 2950 if (ret) { 2951 dev_err(ctrl->device, 2952 "failed to create sysfs link from subsystem.\n"); 2953 goto out_put_subsystem; 2954 } 2955 2956 if (!found) 2957 subsys->instance = ctrl->instance; 2958 ctrl->subsys = subsys; 2959 list_add_tail(&ctrl->subsys_entry, &subsys->ctrls); 2960 mutex_unlock(&nvme_subsystems_lock); 2961 return 0; 2962 2963 out_put_subsystem: 2964 nvme_put_subsystem(subsys); 2965 out_unlock: 2966 mutex_unlock(&nvme_subsystems_lock); 2967 return ret; 2968 } 2969 2970 int nvme_get_log(struct nvme_ctrl *ctrl, u32 nsid, u8 log_page, u8 lsp, u8 csi, 2971 void *log, size_t size, u64 offset) 2972 { 2973 struct nvme_command c = { }; 2974 u32 dwlen = nvme_bytes_to_numd(size); 2975 2976 c.get_log_page.opcode = nvme_admin_get_log_page; 2977 c.get_log_page.nsid = cpu_to_le32(nsid); 2978 c.get_log_page.lid = log_page; 2979 c.get_log_page.lsp = lsp; 2980 c.get_log_page.numdl = cpu_to_le16(dwlen & ((1 << 16) - 1)); 2981 c.get_log_page.numdu = cpu_to_le16(dwlen >> 16); 2982 c.get_log_page.lpol = cpu_to_le32(lower_32_bits(offset)); 2983 c.get_log_page.lpou = cpu_to_le32(upper_32_bits(offset)); 2984 c.get_log_page.csi = csi; 2985 2986 return nvme_submit_sync_cmd(ctrl->admin_q, &c, log, size); 2987 } 2988 2989 static int nvme_get_effects_log(struct nvme_ctrl *ctrl, u8 csi, 2990 struct nvme_effects_log **log) 2991 { 2992 struct nvme_effects_log *cel = xa_load(&ctrl->cels, csi); 2993 int ret; 2994 2995 if (cel) 2996 goto out; 2997 2998 cel = kzalloc(sizeof(*cel), GFP_KERNEL); 2999 if (!cel) 3000 return -ENOMEM; 3001 3002 ret = nvme_get_log(ctrl, 0x00, NVME_LOG_CMD_EFFECTS, 0, csi, 3003 cel, sizeof(*cel), 0); 3004 if (ret) { 3005 kfree(cel); 3006 return ret; 3007 } 3008 3009 xa_store(&ctrl->cels, csi, cel, GFP_KERNEL); 3010 out: 3011 *log = cel; 3012 return 0; 3013 } 3014 3015 static inline u32 nvme_mps_to_sectors(struct nvme_ctrl *ctrl, u32 units) 3016 { 3017 u32 page_shift = NVME_CAP_MPSMIN(ctrl->cap) + 12, val; 3018 3019 if (check_shl_overflow(1U, units + page_shift - 9, &val)) 3020 return UINT_MAX; 3021 return val; 3022 } 3023 3024 static int nvme_init_non_mdts_limits(struct nvme_ctrl *ctrl) 3025 { 3026 struct nvme_command c = { }; 3027 struct nvme_id_ctrl_nvm *id; 3028 int ret; 3029 3030 /* 3031 * Even though NVMe spec explicitly states that MDTS is not applicable 3032 * to the write-zeroes, we are cautious and limit the size to the 3033 * controllers max_hw_sectors value, which is based on the MDTS field 3034 * and possibly other limiting factors. 3035 */ 3036 if ((ctrl->oncs & NVME_CTRL_ONCS_WRITE_ZEROES) && 3037 !(ctrl->quirks & NVME_QUIRK_DISABLE_WRITE_ZEROES)) 3038 ctrl->max_zeroes_sectors = ctrl->max_hw_sectors; 3039 else 3040 ctrl->max_zeroes_sectors = 0; 3041 3042 if (ctrl->subsys->subtype != NVME_NQN_NVME || 3043 nvme_ctrl_limited_cns(ctrl) || 3044 test_bit(NVME_CTRL_SKIP_ID_CNS_CS, &ctrl->flags)) 3045 return 0; 3046 3047 id = kzalloc(sizeof(*id), GFP_KERNEL); 3048 if (!id) 3049 return -ENOMEM; 3050 3051 c.identify.opcode = nvme_admin_identify; 3052 c.identify.cns = NVME_ID_CNS_CS_CTRL; 3053 c.identify.csi = NVME_CSI_NVM; 3054 3055 ret = nvme_submit_sync_cmd(ctrl->admin_q, &c, id, sizeof(*id)); 3056 if (ret) 3057 goto free_data; 3058 3059 ctrl->dmrl = id->dmrl; 3060 ctrl->dmrsl = le32_to_cpu(id->dmrsl); 3061 if (id->wzsl) 3062 ctrl->max_zeroes_sectors = nvme_mps_to_sectors(ctrl, id->wzsl); 3063 3064 free_data: 3065 if (ret > 0) 3066 set_bit(NVME_CTRL_SKIP_ID_CNS_CS, &ctrl->flags); 3067 kfree(id); 3068 return ret; 3069 } 3070 3071 static void nvme_init_known_nvm_effects(struct nvme_ctrl *ctrl) 3072 { 3073 struct nvme_effects_log *log = ctrl->effects; 3074 3075 log->acs[nvme_admin_format_nvm] |= cpu_to_le32(NVME_CMD_EFFECTS_LBCC | 3076 NVME_CMD_EFFECTS_NCC | 3077 NVME_CMD_EFFECTS_CSE_MASK); 3078 log->acs[nvme_admin_sanitize_nvm] |= cpu_to_le32(NVME_CMD_EFFECTS_LBCC | 3079 NVME_CMD_EFFECTS_CSE_MASK); 3080 3081 /* 3082 * The spec says the result of a security receive command depends on 3083 * the previous security send command. As such, many vendors log this 3084 * command as one to submitted only when no other commands to the same 3085 * namespace are outstanding. The intention is to tell the host to 3086 * prevent mixing security send and receive. 3087 * 3088 * This driver can only enforce such exclusive access against IO 3089 * queues, though. We are not readily able to enforce such a rule for 3090 * two commands to the admin queue, which is the only queue that 3091 * matters for this command. 3092 * 3093 * Rather than blindly freezing the IO queues for this effect that 3094 * doesn't even apply to IO, mask it off. 3095 */ 3096 log->acs[nvme_admin_security_recv] &= cpu_to_le32(~NVME_CMD_EFFECTS_CSE_MASK); 3097 3098 log->iocs[nvme_cmd_write] |= cpu_to_le32(NVME_CMD_EFFECTS_LBCC); 3099 log->iocs[nvme_cmd_write_zeroes] |= cpu_to_le32(NVME_CMD_EFFECTS_LBCC); 3100 log->iocs[nvme_cmd_write_uncor] |= cpu_to_le32(NVME_CMD_EFFECTS_LBCC); 3101 } 3102 3103 static int nvme_init_effects(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id) 3104 { 3105 int ret = 0; 3106 3107 if (ctrl->effects) 3108 return 0; 3109 3110 if (id->lpa & NVME_CTRL_LPA_CMD_EFFECTS_LOG) { 3111 ret = nvme_get_effects_log(ctrl, NVME_CSI_NVM, &ctrl->effects); 3112 if (ret < 0) 3113 return ret; 3114 } 3115 3116 if (!ctrl->effects) { 3117 ctrl->effects = kzalloc(sizeof(*ctrl->effects), GFP_KERNEL); 3118 if (!ctrl->effects) 3119 return -ENOMEM; 3120 xa_store(&ctrl->cels, NVME_CSI_NVM, ctrl->effects, GFP_KERNEL); 3121 } 3122 3123 nvme_init_known_nvm_effects(ctrl); 3124 return 0; 3125 } 3126 3127 static int nvme_check_ctrl_fabric_info(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id) 3128 { 3129 /* 3130 * In fabrics we need to verify the cntlid matches the 3131 * admin connect 3132 */ 3133 if (ctrl->cntlid != le16_to_cpu(id->cntlid)) { 3134 dev_err(ctrl->device, 3135 "Mismatching cntlid: Connect %u vs Identify %u, rejecting\n", 3136 ctrl->cntlid, le16_to_cpu(id->cntlid)); 3137 return -EINVAL; 3138 } 3139 3140 if (!nvme_discovery_ctrl(ctrl) && !ctrl->kas) { 3141 dev_err(ctrl->device, 3142 "keep-alive support is mandatory for fabrics\n"); 3143 return -EINVAL; 3144 } 3145 3146 if (!nvme_discovery_ctrl(ctrl) && ctrl->ioccsz < 4) { 3147 dev_err(ctrl->device, 3148 "I/O queue command capsule supported size %d < 4\n", 3149 ctrl->ioccsz); 3150 return -EINVAL; 3151 } 3152 3153 if (!nvme_discovery_ctrl(ctrl) && ctrl->iorcsz < 1) { 3154 dev_err(ctrl->device, 3155 "I/O queue response capsule supported size %d < 1\n", 3156 ctrl->iorcsz); 3157 return -EINVAL; 3158 } 3159 3160 if (!ctrl->maxcmd) { 3161 dev_err(ctrl->device, "Maximum outstanding commands is 0\n"); 3162 return -EINVAL; 3163 } 3164 3165 return 0; 3166 } 3167 3168 static int nvme_init_identify(struct nvme_ctrl *ctrl) 3169 { 3170 struct queue_limits lim; 3171 struct nvme_id_ctrl *id; 3172 u32 max_hw_sectors; 3173 bool prev_apst_enabled; 3174 int ret; 3175 3176 ret = nvme_identify_ctrl(ctrl, &id); 3177 if (ret) { 3178 dev_err(ctrl->device, "Identify Controller failed (%d)\n", ret); 3179 return -EIO; 3180 } 3181 3182 if (!(ctrl->ops->flags & NVME_F_FABRICS)) 3183 ctrl->cntlid = le16_to_cpu(id->cntlid); 3184 3185 if (!ctrl->identified) { 3186 unsigned int i; 3187 3188 /* 3189 * Check for quirks. Quirk can depend on firmware version, 3190 * so, in principle, the set of quirks present can change 3191 * across a reset. As a possible future enhancement, we 3192 * could re-scan for quirks every time we reinitialize 3193 * the device, but we'd have to make sure that the driver 3194 * behaves intelligently if the quirks change. 3195 */ 3196 for (i = 0; i < ARRAY_SIZE(core_quirks); i++) { 3197 if (quirk_matches(id, &core_quirks[i])) 3198 ctrl->quirks |= core_quirks[i].quirks; 3199 } 3200 3201 ret = nvme_init_subsystem(ctrl, id); 3202 if (ret) 3203 goto out_free; 3204 3205 ret = nvme_init_effects(ctrl, id); 3206 if (ret) 3207 goto out_free; 3208 } 3209 memcpy(ctrl->subsys->firmware_rev, id->fr, 3210 sizeof(ctrl->subsys->firmware_rev)); 3211 3212 if (force_apst && (ctrl->quirks & NVME_QUIRK_NO_DEEPEST_PS)) { 3213 dev_warn(ctrl->device, "forcibly allowing all power states due to nvme_core.force_apst -- use at your own risk\n"); 3214 ctrl->quirks &= ~NVME_QUIRK_NO_DEEPEST_PS; 3215 } 3216 3217 ctrl->crdt[0] = le16_to_cpu(id->crdt1); 3218 ctrl->crdt[1] = le16_to_cpu(id->crdt2); 3219 ctrl->crdt[2] = le16_to_cpu(id->crdt3); 3220 3221 ctrl->oacs = le16_to_cpu(id->oacs); 3222 ctrl->oncs = le16_to_cpu(id->oncs); 3223 ctrl->mtfa = le16_to_cpu(id->mtfa); 3224 ctrl->oaes = le32_to_cpu(id->oaes); 3225 ctrl->wctemp = le16_to_cpu(id->wctemp); 3226 ctrl->cctemp = le16_to_cpu(id->cctemp); 3227 3228 atomic_set(&ctrl->abort_limit, id->acl + 1); 3229 ctrl->vwc = id->vwc; 3230 if (id->mdts) 3231 max_hw_sectors = nvme_mps_to_sectors(ctrl, id->mdts); 3232 else 3233 max_hw_sectors = UINT_MAX; 3234 ctrl->max_hw_sectors = 3235 min_not_zero(ctrl->max_hw_sectors, max_hw_sectors); 3236 3237 lim = queue_limits_start_update(ctrl->admin_q); 3238 nvme_set_ctrl_limits(ctrl, &lim); 3239 ret = queue_limits_commit_update(ctrl->admin_q, &lim); 3240 if (ret) 3241 goto out_free; 3242 3243 ctrl->sgls = le32_to_cpu(id->sgls); 3244 ctrl->kas = le16_to_cpu(id->kas); 3245 ctrl->max_namespaces = le32_to_cpu(id->mnan); 3246 ctrl->ctratt = le32_to_cpu(id->ctratt); 3247 3248 ctrl->cntrltype = id->cntrltype; 3249 ctrl->dctype = id->dctype; 3250 3251 if (id->rtd3e) { 3252 /* us -> s */ 3253 u32 transition_time = le32_to_cpu(id->rtd3e) / USEC_PER_SEC; 3254 3255 ctrl->shutdown_timeout = clamp_t(unsigned int, transition_time, 3256 shutdown_timeout, 60); 3257 3258 if (ctrl->shutdown_timeout != shutdown_timeout) 3259 dev_info(ctrl->device, 3260 "D3 entry latency set to %u seconds\n", 3261 ctrl->shutdown_timeout); 3262 } else 3263 ctrl->shutdown_timeout = shutdown_timeout; 3264 3265 ctrl->npss = id->npss; 3266 ctrl->apsta = id->apsta; 3267 prev_apst_enabled = ctrl->apst_enabled; 3268 if (ctrl->quirks & NVME_QUIRK_NO_APST) { 3269 if (force_apst && id->apsta) { 3270 dev_warn(ctrl->device, "forcibly allowing APST due to nvme_core.force_apst -- use at your own risk\n"); 3271 ctrl->apst_enabled = true; 3272 } else { 3273 ctrl->apst_enabled = false; 3274 } 3275 } else { 3276 ctrl->apst_enabled = id->apsta; 3277 } 3278 memcpy(ctrl->psd, id->psd, sizeof(ctrl->psd)); 3279 3280 if (ctrl->ops->flags & NVME_F_FABRICS) { 3281 ctrl->icdoff = le16_to_cpu(id->icdoff); 3282 ctrl->ioccsz = le32_to_cpu(id->ioccsz); 3283 ctrl->iorcsz = le32_to_cpu(id->iorcsz); 3284 ctrl->maxcmd = le16_to_cpu(id->maxcmd); 3285 3286 ret = nvme_check_ctrl_fabric_info(ctrl, id); 3287 if (ret) 3288 goto out_free; 3289 } else { 3290 ctrl->hmpre = le32_to_cpu(id->hmpre); 3291 ctrl->hmmin = le32_to_cpu(id->hmmin); 3292 ctrl->hmminds = le32_to_cpu(id->hmminds); 3293 ctrl->hmmaxd = le16_to_cpu(id->hmmaxd); 3294 } 3295 3296 ret = nvme_mpath_init_identify(ctrl, id); 3297 if (ret < 0) 3298 goto out_free; 3299 3300 if (ctrl->apst_enabled && !prev_apst_enabled) 3301 dev_pm_qos_expose_latency_tolerance(ctrl->device); 3302 else if (!ctrl->apst_enabled && prev_apst_enabled) 3303 dev_pm_qos_hide_latency_tolerance(ctrl->device); 3304 3305 out_free: 3306 kfree(id); 3307 return ret; 3308 } 3309 3310 /* 3311 * Initialize the cached copies of the Identify data and various controller 3312 * register in our nvme_ctrl structure. This should be called as soon as 3313 * the admin queue is fully up and running. 3314 */ 3315 int nvme_init_ctrl_finish(struct nvme_ctrl *ctrl, bool was_suspended) 3316 { 3317 int ret; 3318 3319 ret = ctrl->ops->reg_read32(ctrl, NVME_REG_VS, &ctrl->vs); 3320 if (ret) { 3321 dev_err(ctrl->device, "Reading VS failed (%d)\n", ret); 3322 return ret; 3323 } 3324 3325 ctrl->sqsize = min_t(u16, NVME_CAP_MQES(ctrl->cap), ctrl->sqsize); 3326 3327 if (ctrl->vs >= NVME_VS(1, 1, 0)) 3328 ctrl->subsystem = NVME_CAP_NSSRC(ctrl->cap); 3329 3330 ret = nvme_init_identify(ctrl); 3331 if (ret) 3332 return ret; 3333 3334 ret = nvme_configure_apst(ctrl); 3335 if (ret < 0) 3336 return ret; 3337 3338 ret = nvme_configure_timestamp(ctrl); 3339 if (ret < 0) 3340 return ret; 3341 3342 ret = nvme_configure_host_options(ctrl); 3343 if (ret < 0) 3344 return ret; 3345 3346 nvme_configure_opal(ctrl, was_suspended); 3347 3348 if (!ctrl->identified && !nvme_discovery_ctrl(ctrl)) { 3349 /* 3350 * Do not return errors unless we are in a controller reset, 3351 * the controller works perfectly fine without hwmon. 3352 */ 3353 ret = nvme_hwmon_init(ctrl); 3354 if (ret == -EINTR) 3355 return ret; 3356 } 3357 3358 clear_bit(NVME_CTRL_DIRTY_CAPABILITY, &ctrl->flags); 3359 ctrl->identified = true; 3360 3361 nvme_start_keep_alive(ctrl); 3362 3363 return 0; 3364 } 3365 EXPORT_SYMBOL_GPL(nvme_init_ctrl_finish); 3366 3367 static int nvme_dev_open(struct inode *inode, struct file *file) 3368 { 3369 struct nvme_ctrl *ctrl = 3370 container_of(inode->i_cdev, struct nvme_ctrl, cdev); 3371 3372 switch (nvme_ctrl_state(ctrl)) { 3373 case NVME_CTRL_LIVE: 3374 break; 3375 default: 3376 return -EWOULDBLOCK; 3377 } 3378 3379 nvme_get_ctrl(ctrl); 3380 if (!try_module_get(ctrl->ops->module)) { 3381 nvme_put_ctrl(ctrl); 3382 return -EINVAL; 3383 } 3384 3385 file->private_data = ctrl; 3386 return 0; 3387 } 3388 3389 static int nvme_dev_release(struct inode *inode, struct file *file) 3390 { 3391 struct nvme_ctrl *ctrl = 3392 container_of(inode->i_cdev, struct nvme_ctrl, cdev); 3393 3394 module_put(ctrl->ops->module); 3395 nvme_put_ctrl(ctrl); 3396 return 0; 3397 } 3398 3399 static const struct file_operations nvme_dev_fops = { 3400 .owner = THIS_MODULE, 3401 .open = nvme_dev_open, 3402 .release = nvme_dev_release, 3403 .unlocked_ioctl = nvme_dev_ioctl, 3404 .compat_ioctl = compat_ptr_ioctl, 3405 .uring_cmd = nvme_dev_uring_cmd, 3406 }; 3407 3408 static struct nvme_ns_head *nvme_find_ns_head(struct nvme_ctrl *ctrl, 3409 unsigned nsid) 3410 { 3411 struct nvme_ns_head *h; 3412 3413 lockdep_assert_held(&ctrl->subsys->lock); 3414 3415 list_for_each_entry(h, &ctrl->subsys->nsheads, entry) { 3416 /* 3417 * Private namespaces can share NSIDs under some conditions. 3418 * In that case we can't use the same ns_head for namespaces 3419 * with the same NSID. 3420 */ 3421 if (h->ns_id != nsid || !nvme_is_unique_nsid(ctrl, h)) 3422 continue; 3423 if (!list_empty(&h->list) && nvme_tryget_ns_head(h)) 3424 return h; 3425 } 3426 3427 return NULL; 3428 } 3429 3430 static int nvme_subsys_check_duplicate_ids(struct nvme_subsystem *subsys, 3431 struct nvme_ns_ids *ids) 3432 { 3433 bool has_uuid = !uuid_is_null(&ids->uuid); 3434 bool has_nguid = memchr_inv(ids->nguid, 0, sizeof(ids->nguid)); 3435 bool has_eui64 = memchr_inv(ids->eui64, 0, sizeof(ids->eui64)); 3436 struct nvme_ns_head *h; 3437 3438 lockdep_assert_held(&subsys->lock); 3439 3440 list_for_each_entry(h, &subsys->nsheads, entry) { 3441 if (has_uuid && uuid_equal(&ids->uuid, &h->ids.uuid)) 3442 return -EINVAL; 3443 if (has_nguid && 3444 memcmp(&ids->nguid, &h->ids.nguid, sizeof(ids->nguid)) == 0) 3445 return -EINVAL; 3446 if (has_eui64 && 3447 memcmp(&ids->eui64, &h->ids.eui64, sizeof(ids->eui64)) == 0) 3448 return -EINVAL; 3449 } 3450 3451 return 0; 3452 } 3453 3454 static void nvme_cdev_rel(struct device *dev) 3455 { 3456 ida_free(&nvme_ns_chr_minor_ida, MINOR(dev->devt)); 3457 } 3458 3459 void nvme_cdev_del(struct cdev *cdev, struct device *cdev_device) 3460 { 3461 cdev_device_del(cdev, cdev_device); 3462 put_device(cdev_device); 3463 } 3464 3465 int nvme_cdev_add(struct cdev *cdev, struct device *cdev_device, 3466 const struct file_operations *fops, struct module *owner) 3467 { 3468 int minor, ret; 3469 3470 minor = ida_alloc(&nvme_ns_chr_minor_ida, GFP_KERNEL); 3471 if (minor < 0) 3472 return minor; 3473 cdev_device->devt = MKDEV(MAJOR(nvme_ns_chr_devt), minor); 3474 cdev_device->class = &nvme_ns_chr_class; 3475 cdev_device->release = nvme_cdev_rel; 3476 device_initialize(cdev_device); 3477 cdev_init(cdev, fops); 3478 cdev->owner = owner; 3479 ret = cdev_device_add(cdev, cdev_device); 3480 if (ret) 3481 put_device(cdev_device); 3482 3483 return ret; 3484 } 3485 3486 static int nvme_ns_chr_open(struct inode *inode, struct file *file) 3487 { 3488 return nvme_ns_open(container_of(inode->i_cdev, struct nvme_ns, cdev)); 3489 } 3490 3491 static int nvme_ns_chr_release(struct inode *inode, struct file *file) 3492 { 3493 nvme_ns_release(container_of(inode->i_cdev, struct nvme_ns, cdev)); 3494 return 0; 3495 } 3496 3497 static const struct file_operations nvme_ns_chr_fops = { 3498 .owner = THIS_MODULE, 3499 .open = nvme_ns_chr_open, 3500 .release = nvme_ns_chr_release, 3501 .unlocked_ioctl = nvme_ns_chr_ioctl, 3502 .compat_ioctl = compat_ptr_ioctl, 3503 .uring_cmd = nvme_ns_chr_uring_cmd, 3504 .uring_cmd_iopoll = nvme_ns_chr_uring_cmd_iopoll, 3505 }; 3506 3507 static int nvme_add_ns_cdev(struct nvme_ns *ns) 3508 { 3509 int ret; 3510 3511 ns->cdev_device.parent = ns->ctrl->device; 3512 ret = dev_set_name(&ns->cdev_device, "ng%dn%d", 3513 ns->ctrl->instance, ns->head->instance); 3514 if (ret) 3515 return ret; 3516 3517 return nvme_cdev_add(&ns->cdev, &ns->cdev_device, &nvme_ns_chr_fops, 3518 ns->ctrl->ops->module); 3519 } 3520 3521 static struct nvme_ns_head *nvme_alloc_ns_head(struct nvme_ctrl *ctrl, 3522 struct nvme_ns_info *info) 3523 { 3524 struct nvme_ns_head *head; 3525 size_t size = sizeof(*head); 3526 int ret = -ENOMEM; 3527 3528 #ifdef CONFIG_NVME_MULTIPATH 3529 size += num_possible_nodes() * sizeof(struct nvme_ns *); 3530 #endif 3531 3532 head = kzalloc(size, GFP_KERNEL); 3533 if (!head) 3534 goto out; 3535 ret = ida_alloc_min(&ctrl->subsys->ns_ida, 1, GFP_KERNEL); 3536 if (ret < 0) 3537 goto out_free_head; 3538 head->instance = ret; 3539 INIT_LIST_HEAD(&head->list); 3540 ret = init_srcu_struct(&head->srcu); 3541 if (ret) 3542 goto out_ida_remove; 3543 head->subsys = ctrl->subsys; 3544 head->ns_id = info->nsid; 3545 head->ids = info->ids; 3546 head->shared = info->is_shared; 3547 ratelimit_state_init(&head->rs_nuse, 5 * HZ, 1); 3548 ratelimit_set_flags(&head->rs_nuse, RATELIMIT_MSG_ON_RELEASE); 3549 kref_init(&head->ref); 3550 3551 if (head->ids.csi) { 3552 ret = nvme_get_effects_log(ctrl, head->ids.csi, &head->effects); 3553 if (ret) 3554 goto out_cleanup_srcu; 3555 } else 3556 head->effects = ctrl->effects; 3557 3558 ret = nvme_mpath_alloc_disk(ctrl, head); 3559 if (ret) 3560 goto out_cleanup_srcu; 3561 3562 list_add_tail(&head->entry, &ctrl->subsys->nsheads); 3563 3564 kref_get(&ctrl->subsys->ref); 3565 3566 return head; 3567 out_cleanup_srcu: 3568 cleanup_srcu_struct(&head->srcu); 3569 out_ida_remove: 3570 ida_free(&ctrl->subsys->ns_ida, head->instance); 3571 out_free_head: 3572 kfree(head); 3573 out: 3574 if (ret > 0) 3575 ret = blk_status_to_errno(nvme_error_status(ret)); 3576 return ERR_PTR(ret); 3577 } 3578 3579 static int nvme_global_check_duplicate_ids(struct nvme_subsystem *this, 3580 struct nvme_ns_ids *ids) 3581 { 3582 struct nvme_subsystem *s; 3583 int ret = 0; 3584 3585 /* 3586 * Note that this check is racy as we try to avoid holding the global 3587 * lock over the whole ns_head creation. But it is only intended as 3588 * a sanity check anyway. 3589 */ 3590 mutex_lock(&nvme_subsystems_lock); 3591 list_for_each_entry(s, &nvme_subsystems, entry) { 3592 if (s == this) 3593 continue; 3594 mutex_lock(&s->lock); 3595 ret = nvme_subsys_check_duplicate_ids(s, ids); 3596 mutex_unlock(&s->lock); 3597 if (ret) 3598 break; 3599 } 3600 mutex_unlock(&nvme_subsystems_lock); 3601 3602 return ret; 3603 } 3604 3605 static int nvme_init_ns_head(struct nvme_ns *ns, struct nvme_ns_info *info) 3606 { 3607 struct nvme_ctrl *ctrl = ns->ctrl; 3608 struct nvme_ns_head *head = NULL; 3609 int ret; 3610 3611 ret = nvme_global_check_duplicate_ids(ctrl->subsys, &info->ids); 3612 if (ret) { 3613 /* 3614 * We've found two different namespaces on two different 3615 * subsystems that report the same ID. This is pretty nasty 3616 * for anything that actually requires unique device 3617 * identification. In the kernel we need this for multipathing, 3618 * and in user space the /dev/disk/by-id/ links rely on it. 3619 * 3620 * If the device also claims to be multi-path capable back off 3621 * here now and refuse the probe the second device as this is a 3622 * recipe for data corruption. If not this is probably a 3623 * cheap consumer device if on the PCIe bus, so let the user 3624 * proceed and use the shiny toy, but warn that with changing 3625 * probing order (which due to our async probing could just be 3626 * device taking longer to startup) the other device could show 3627 * up at any time. 3628 */ 3629 nvme_print_device_info(ctrl); 3630 if ((ns->ctrl->ops->flags & NVME_F_FABRICS) || /* !PCIe */ 3631 ((ns->ctrl->subsys->cmic & NVME_CTRL_CMIC_MULTI_CTRL) && 3632 info->is_shared)) { 3633 dev_err(ctrl->device, 3634 "ignoring nsid %d because of duplicate IDs\n", 3635 info->nsid); 3636 return ret; 3637 } 3638 3639 dev_err(ctrl->device, 3640 "clearing duplicate IDs for nsid %d\n", info->nsid); 3641 dev_err(ctrl->device, 3642 "use of /dev/disk/by-id/ may cause data corruption\n"); 3643 memset(&info->ids.nguid, 0, sizeof(info->ids.nguid)); 3644 memset(&info->ids.uuid, 0, sizeof(info->ids.uuid)); 3645 memset(&info->ids.eui64, 0, sizeof(info->ids.eui64)); 3646 ctrl->quirks |= NVME_QUIRK_BOGUS_NID; 3647 } 3648 3649 mutex_lock(&ctrl->subsys->lock); 3650 head = nvme_find_ns_head(ctrl, info->nsid); 3651 if (!head) { 3652 ret = nvme_subsys_check_duplicate_ids(ctrl->subsys, &info->ids); 3653 if (ret) { 3654 dev_err(ctrl->device, 3655 "duplicate IDs in subsystem for nsid %d\n", 3656 info->nsid); 3657 goto out_unlock; 3658 } 3659 head = nvme_alloc_ns_head(ctrl, info); 3660 if (IS_ERR(head)) { 3661 ret = PTR_ERR(head); 3662 goto out_unlock; 3663 } 3664 } else { 3665 ret = -EINVAL; 3666 if (!info->is_shared || !head->shared) { 3667 dev_err(ctrl->device, 3668 "Duplicate unshared namespace %d\n", 3669 info->nsid); 3670 goto out_put_ns_head; 3671 } 3672 if (!nvme_ns_ids_equal(&head->ids, &info->ids)) { 3673 dev_err(ctrl->device, 3674 "IDs don't match for shared namespace %d\n", 3675 info->nsid); 3676 goto out_put_ns_head; 3677 } 3678 3679 if (!multipath) { 3680 dev_warn(ctrl->device, 3681 "Found shared namespace %d, but multipathing not supported.\n", 3682 info->nsid); 3683 dev_warn_once(ctrl->device, 3684 "Support for shared namespaces without CONFIG_NVME_MULTIPATH is deprecated and will be removed in Linux 6.0\n."); 3685 } 3686 } 3687 3688 list_add_tail_rcu(&ns->siblings, &head->list); 3689 ns->head = head; 3690 mutex_unlock(&ctrl->subsys->lock); 3691 return 0; 3692 3693 out_put_ns_head: 3694 nvme_put_ns_head(head); 3695 out_unlock: 3696 mutex_unlock(&ctrl->subsys->lock); 3697 return ret; 3698 } 3699 3700 struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid) 3701 { 3702 struct nvme_ns *ns, *ret = NULL; 3703 3704 down_read(&ctrl->namespaces_rwsem); 3705 list_for_each_entry(ns, &ctrl->namespaces, list) { 3706 if (ns->head->ns_id == nsid) { 3707 if (!nvme_get_ns(ns)) 3708 continue; 3709 ret = ns; 3710 break; 3711 } 3712 if (ns->head->ns_id > nsid) 3713 break; 3714 } 3715 up_read(&ctrl->namespaces_rwsem); 3716 return ret; 3717 } 3718 EXPORT_SYMBOL_NS_GPL(nvme_find_get_ns, NVME_TARGET_PASSTHRU); 3719 3720 /* 3721 * Add the namespace to the controller list while keeping the list ordered. 3722 */ 3723 static void nvme_ns_add_to_ctrl_list(struct nvme_ns *ns) 3724 { 3725 struct nvme_ns *tmp; 3726 3727 list_for_each_entry_reverse(tmp, &ns->ctrl->namespaces, list) { 3728 if (tmp->head->ns_id < ns->head->ns_id) { 3729 list_add(&ns->list, &tmp->list); 3730 return; 3731 } 3732 } 3733 list_add(&ns->list, &ns->ctrl->namespaces); 3734 } 3735 3736 static void nvme_alloc_ns(struct nvme_ctrl *ctrl, struct nvme_ns_info *info) 3737 { 3738 struct nvme_ns *ns; 3739 struct gendisk *disk; 3740 int node = ctrl->numa_node; 3741 3742 ns = kzalloc_node(sizeof(*ns), GFP_KERNEL, node); 3743 if (!ns) 3744 return; 3745 3746 disk = blk_mq_alloc_disk(ctrl->tagset, NULL, ns); 3747 if (IS_ERR(disk)) 3748 goto out_free_ns; 3749 disk->fops = &nvme_bdev_ops; 3750 disk->private_data = ns; 3751 3752 ns->disk = disk; 3753 ns->queue = disk->queue; 3754 3755 if (ctrl->opts && ctrl->opts->data_digest) 3756 blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, ns->queue); 3757 3758 blk_queue_flag_set(QUEUE_FLAG_NONROT, ns->queue); 3759 if (ctrl->ops->supports_pci_p2pdma && 3760 ctrl->ops->supports_pci_p2pdma(ctrl)) 3761 blk_queue_flag_set(QUEUE_FLAG_PCI_P2PDMA, ns->queue); 3762 3763 ns->ctrl = ctrl; 3764 kref_init(&ns->kref); 3765 3766 if (nvme_init_ns_head(ns, info)) 3767 goto out_cleanup_disk; 3768 3769 /* 3770 * If multipathing is enabled, the device name for all disks and not 3771 * just those that represent shared namespaces needs to be based on the 3772 * subsystem instance. Using the controller instance for private 3773 * namespaces could lead to naming collisions between shared and private 3774 * namespaces if they don't use a common numbering scheme. 3775 * 3776 * If multipathing is not enabled, disk names must use the controller 3777 * instance as shared namespaces will show up as multiple block 3778 * devices. 3779 */ 3780 if (nvme_ns_head_multipath(ns->head)) { 3781 sprintf(disk->disk_name, "nvme%dc%dn%d", ctrl->subsys->instance, 3782 ctrl->instance, ns->head->instance); 3783 disk->flags |= GENHD_FL_HIDDEN; 3784 } else if (multipath) { 3785 sprintf(disk->disk_name, "nvme%dn%d", ctrl->subsys->instance, 3786 ns->head->instance); 3787 } else { 3788 sprintf(disk->disk_name, "nvme%dn%d", ctrl->instance, 3789 ns->head->instance); 3790 } 3791 3792 if (nvme_update_ns_info(ns, info)) 3793 goto out_unlink_ns; 3794 3795 down_write(&ctrl->namespaces_rwsem); 3796 /* 3797 * Ensure that no namespaces are added to the ctrl list after the queues 3798 * are frozen, thereby avoiding a deadlock between scan and reset. 3799 */ 3800 if (test_bit(NVME_CTRL_FROZEN, &ctrl->flags)) { 3801 up_write(&ctrl->namespaces_rwsem); 3802 goto out_unlink_ns; 3803 } 3804 nvme_ns_add_to_ctrl_list(ns); 3805 up_write(&ctrl->namespaces_rwsem); 3806 nvme_get_ctrl(ctrl); 3807 3808 if (device_add_disk(ctrl->device, ns->disk, nvme_ns_attr_groups)) 3809 goto out_cleanup_ns_from_list; 3810 3811 if (!nvme_ns_head_multipath(ns->head)) 3812 nvme_add_ns_cdev(ns); 3813 3814 nvme_mpath_add_disk(ns, info->anagrpid); 3815 nvme_fault_inject_init(&ns->fault_inject, ns->disk->disk_name); 3816 3817 /* 3818 * Set ns->disk->device->driver_data to ns so we can access 3819 * ns->head->passthru_err_log_enabled in 3820 * nvme_io_passthru_err_log_enabled_[store | show](). 3821 */ 3822 dev_set_drvdata(disk_to_dev(ns->disk), ns); 3823 3824 return; 3825 3826 out_cleanup_ns_from_list: 3827 nvme_put_ctrl(ctrl); 3828 down_write(&ctrl->namespaces_rwsem); 3829 list_del_init(&ns->list); 3830 up_write(&ctrl->namespaces_rwsem); 3831 out_unlink_ns: 3832 mutex_lock(&ctrl->subsys->lock); 3833 list_del_rcu(&ns->siblings); 3834 if (list_empty(&ns->head->list)) 3835 list_del_init(&ns->head->entry); 3836 mutex_unlock(&ctrl->subsys->lock); 3837 nvme_put_ns_head(ns->head); 3838 out_cleanup_disk: 3839 put_disk(disk); 3840 out_free_ns: 3841 kfree(ns); 3842 } 3843 3844 static void nvme_ns_remove(struct nvme_ns *ns) 3845 { 3846 bool last_path = false; 3847 3848 if (test_and_set_bit(NVME_NS_REMOVING, &ns->flags)) 3849 return; 3850 3851 clear_bit(NVME_NS_READY, &ns->flags); 3852 set_capacity(ns->disk, 0); 3853 nvme_fault_inject_fini(&ns->fault_inject); 3854 3855 /* 3856 * Ensure that !NVME_NS_READY is seen by other threads to prevent 3857 * this ns going back into current_path. 3858 */ 3859 synchronize_srcu(&ns->head->srcu); 3860 3861 /* wait for concurrent submissions */ 3862 if (nvme_mpath_clear_current_path(ns)) 3863 synchronize_srcu(&ns->head->srcu); 3864 3865 mutex_lock(&ns->ctrl->subsys->lock); 3866 list_del_rcu(&ns->siblings); 3867 if (list_empty(&ns->head->list)) { 3868 list_del_init(&ns->head->entry); 3869 last_path = true; 3870 } 3871 mutex_unlock(&ns->ctrl->subsys->lock); 3872 3873 /* guarantee not available in head->list */ 3874 synchronize_srcu(&ns->head->srcu); 3875 3876 if (!nvme_ns_head_multipath(ns->head)) 3877 nvme_cdev_del(&ns->cdev, &ns->cdev_device); 3878 del_gendisk(ns->disk); 3879 3880 down_write(&ns->ctrl->namespaces_rwsem); 3881 list_del_init(&ns->list); 3882 up_write(&ns->ctrl->namespaces_rwsem); 3883 3884 if (last_path) 3885 nvme_mpath_shutdown_disk(ns->head); 3886 nvme_put_ns(ns); 3887 } 3888 3889 static void nvme_ns_remove_by_nsid(struct nvme_ctrl *ctrl, u32 nsid) 3890 { 3891 struct nvme_ns *ns = nvme_find_get_ns(ctrl, nsid); 3892 3893 if (ns) { 3894 nvme_ns_remove(ns); 3895 nvme_put_ns(ns); 3896 } 3897 } 3898 3899 static void nvme_validate_ns(struct nvme_ns *ns, struct nvme_ns_info *info) 3900 { 3901 int ret = NVME_SC_INVALID_NS | NVME_SC_DNR; 3902 3903 if (!nvme_ns_ids_equal(&ns->head->ids, &info->ids)) { 3904 dev_err(ns->ctrl->device, 3905 "identifiers changed for nsid %d\n", ns->head->ns_id); 3906 goto out; 3907 } 3908 3909 ret = nvme_update_ns_info(ns, info); 3910 out: 3911 /* 3912 * Only remove the namespace if we got a fatal error back from the 3913 * device, otherwise ignore the error and just move on. 3914 * 3915 * TODO: we should probably schedule a delayed retry here. 3916 */ 3917 if (ret > 0 && (ret & NVME_SC_DNR)) 3918 nvme_ns_remove(ns); 3919 } 3920 3921 static void nvme_scan_ns(struct nvme_ctrl *ctrl, unsigned nsid) 3922 { 3923 struct nvme_ns_info info = { .nsid = nsid }; 3924 struct nvme_ns *ns; 3925 int ret; 3926 3927 if (nvme_identify_ns_descs(ctrl, &info)) 3928 return; 3929 3930 if (info.ids.csi != NVME_CSI_NVM && !nvme_multi_css(ctrl)) { 3931 dev_warn(ctrl->device, 3932 "command set not reported for nsid: %d\n", nsid); 3933 return; 3934 } 3935 3936 /* 3937 * If available try to use the Command Set Idependent Identify Namespace 3938 * data structure to find all the generic information that is needed to 3939 * set up a namespace. If not fall back to the legacy version. 3940 */ 3941 if ((ctrl->cap & NVME_CAP_CRMS_CRIMS) || 3942 (info.ids.csi != NVME_CSI_NVM && info.ids.csi != NVME_CSI_ZNS)) 3943 ret = nvme_ns_info_from_id_cs_indep(ctrl, &info); 3944 else 3945 ret = nvme_ns_info_from_identify(ctrl, &info); 3946 3947 if (info.is_removed) 3948 nvme_ns_remove_by_nsid(ctrl, nsid); 3949 3950 /* 3951 * Ignore the namespace if it is not ready. We will get an AEN once it 3952 * becomes ready and restart the scan. 3953 */ 3954 if (ret || !info.is_ready) 3955 return; 3956 3957 ns = nvme_find_get_ns(ctrl, nsid); 3958 if (ns) { 3959 nvme_validate_ns(ns, &info); 3960 nvme_put_ns(ns); 3961 } else { 3962 nvme_alloc_ns(ctrl, &info); 3963 } 3964 } 3965 3966 static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl, 3967 unsigned nsid) 3968 { 3969 struct nvme_ns *ns, *next; 3970 LIST_HEAD(rm_list); 3971 3972 down_write(&ctrl->namespaces_rwsem); 3973 list_for_each_entry_safe(ns, next, &ctrl->namespaces, list) { 3974 if (ns->head->ns_id > nsid) 3975 list_move_tail(&ns->list, &rm_list); 3976 } 3977 up_write(&ctrl->namespaces_rwsem); 3978 3979 list_for_each_entry_safe(ns, next, &rm_list, list) 3980 nvme_ns_remove(ns); 3981 3982 } 3983 3984 static int nvme_scan_ns_list(struct nvme_ctrl *ctrl) 3985 { 3986 const int nr_entries = NVME_IDENTIFY_DATA_SIZE / sizeof(__le32); 3987 __le32 *ns_list; 3988 u32 prev = 0; 3989 int ret = 0, i; 3990 3991 ns_list = kzalloc(NVME_IDENTIFY_DATA_SIZE, GFP_KERNEL); 3992 if (!ns_list) 3993 return -ENOMEM; 3994 3995 for (;;) { 3996 struct nvme_command cmd = { 3997 .identify.opcode = nvme_admin_identify, 3998 .identify.cns = NVME_ID_CNS_NS_ACTIVE_LIST, 3999 .identify.nsid = cpu_to_le32(prev), 4000 }; 4001 4002 ret = nvme_submit_sync_cmd(ctrl->admin_q, &cmd, ns_list, 4003 NVME_IDENTIFY_DATA_SIZE); 4004 if (ret) { 4005 dev_warn(ctrl->device, 4006 "Identify NS List failed (status=0x%x)\n", ret); 4007 goto free; 4008 } 4009 4010 for (i = 0; i < nr_entries; i++) { 4011 u32 nsid = le32_to_cpu(ns_list[i]); 4012 4013 if (!nsid) /* end of the list? */ 4014 goto out; 4015 nvme_scan_ns(ctrl, nsid); 4016 while (++prev < nsid) 4017 nvme_ns_remove_by_nsid(ctrl, prev); 4018 } 4019 } 4020 out: 4021 nvme_remove_invalid_namespaces(ctrl, prev); 4022 free: 4023 kfree(ns_list); 4024 return ret; 4025 } 4026 4027 static void nvme_scan_ns_sequential(struct nvme_ctrl *ctrl) 4028 { 4029 struct nvme_id_ctrl *id; 4030 u32 nn, i; 4031 4032 if (nvme_identify_ctrl(ctrl, &id)) 4033 return; 4034 nn = le32_to_cpu(id->nn); 4035 kfree(id); 4036 4037 for (i = 1; i <= nn; i++) 4038 nvme_scan_ns(ctrl, i); 4039 4040 nvme_remove_invalid_namespaces(ctrl, nn); 4041 } 4042 4043 static void nvme_clear_changed_ns_log(struct nvme_ctrl *ctrl) 4044 { 4045 size_t log_size = NVME_MAX_CHANGED_NAMESPACES * sizeof(__le32); 4046 __le32 *log; 4047 int error; 4048 4049 log = kzalloc(log_size, GFP_KERNEL); 4050 if (!log) 4051 return; 4052 4053 /* 4054 * We need to read the log to clear the AEN, but we don't want to rely 4055 * on it for the changed namespace information as userspace could have 4056 * raced with us in reading the log page, which could cause us to miss 4057 * updates. 4058 */ 4059 error = nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_CHANGED_NS, 0, 4060 NVME_CSI_NVM, log, log_size, 0); 4061 if (error) 4062 dev_warn(ctrl->device, 4063 "reading changed ns log failed: %d\n", error); 4064 4065 kfree(log); 4066 } 4067 4068 static void nvme_scan_work(struct work_struct *work) 4069 { 4070 struct nvme_ctrl *ctrl = 4071 container_of(work, struct nvme_ctrl, scan_work); 4072 int ret; 4073 4074 /* No tagset on a live ctrl means IO queues could not created */ 4075 if (nvme_ctrl_state(ctrl) != NVME_CTRL_LIVE || !ctrl->tagset) 4076 return; 4077 4078 /* 4079 * Identify controller limits can change at controller reset due to 4080 * new firmware download, even though it is not common we cannot ignore 4081 * such scenario. Controller's non-mdts limits are reported in the unit 4082 * of logical blocks that is dependent on the format of attached 4083 * namespace. Hence re-read the limits at the time of ns allocation. 4084 */ 4085 ret = nvme_init_non_mdts_limits(ctrl); 4086 if (ret < 0) { 4087 dev_warn(ctrl->device, 4088 "reading non-mdts-limits failed: %d\n", ret); 4089 return; 4090 } 4091 4092 if (test_and_clear_bit(NVME_AER_NOTICE_NS_CHANGED, &ctrl->events)) { 4093 dev_info(ctrl->device, "rescanning namespaces.\n"); 4094 nvme_clear_changed_ns_log(ctrl); 4095 } 4096 4097 mutex_lock(&ctrl->scan_lock); 4098 if (nvme_ctrl_limited_cns(ctrl)) { 4099 nvme_scan_ns_sequential(ctrl); 4100 } else { 4101 /* 4102 * Fall back to sequential scan if DNR is set to handle broken 4103 * devices which should support Identify NS List (as per the VS 4104 * they report) but don't actually support it. 4105 */ 4106 ret = nvme_scan_ns_list(ctrl); 4107 if (ret > 0 && ret & NVME_SC_DNR) 4108 nvme_scan_ns_sequential(ctrl); 4109 } 4110 mutex_unlock(&ctrl->scan_lock); 4111 } 4112 4113 /* 4114 * This function iterates the namespace list unlocked to allow recovery from 4115 * controller failure. It is up to the caller to ensure the namespace list is 4116 * not modified by scan work while this function is executing. 4117 */ 4118 void nvme_remove_namespaces(struct nvme_ctrl *ctrl) 4119 { 4120 struct nvme_ns *ns, *next; 4121 LIST_HEAD(ns_list); 4122 4123 /* 4124 * make sure to requeue I/O to all namespaces as these 4125 * might result from the scan itself and must complete 4126 * for the scan_work to make progress 4127 */ 4128 nvme_mpath_clear_ctrl_paths(ctrl); 4129 4130 /* 4131 * Unquiesce io queues so any pending IO won't hang, especially 4132 * those submitted from scan work 4133 */ 4134 nvme_unquiesce_io_queues(ctrl); 4135 4136 /* prevent racing with ns scanning */ 4137 flush_work(&ctrl->scan_work); 4138 4139 /* 4140 * The dead states indicates the controller was not gracefully 4141 * disconnected. In that case, we won't be able to flush any data while 4142 * removing the namespaces' disks; fail all the queues now to avoid 4143 * potentially having to clean up the failed sync later. 4144 */ 4145 if (nvme_ctrl_state(ctrl) == NVME_CTRL_DEAD) 4146 nvme_mark_namespaces_dead(ctrl); 4147 4148 /* this is a no-op when called from the controller reset handler */ 4149 nvme_change_ctrl_state(ctrl, NVME_CTRL_DELETING_NOIO); 4150 4151 down_write(&ctrl->namespaces_rwsem); 4152 list_splice_init(&ctrl->namespaces, &ns_list); 4153 up_write(&ctrl->namespaces_rwsem); 4154 4155 list_for_each_entry_safe(ns, next, &ns_list, list) 4156 nvme_ns_remove(ns); 4157 } 4158 EXPORT_SYMBOL_GPL(nvme_remove_namespaces); 4159 4160 static int nvme_class_uevent(const struct device *dev, struct kobj_uevent_env *env) 4161 { 4162 const struct nvme_ctrl *ctrl = 4163 container_of(dev, struct nvme_ctrl, ctrl_device); 4164 struct nvmf_ctrl_options *opts = ctrl->opts; 4165 int ret; 4166 4167 ret = add_uevent_var(env, "NVME_TRTYPE=%s", ctrl->ops->name); 4168 if (ret) 4169 return ret; 4170 4171 if (opts) { 4172 ret = add_uevent_var(env, "NVME_TRADDR=%s", opts->traddr); 4173 if (ret) 4174 return ret; 4175 4176 ret = add_uevent_var(env, "NVME_TRSVCID=%s", 4177 opts->trsvcid ?: "none"); 4178 if (ret) 4179 return ret; 4180 4181 ret = add_uevent_var(env, "NVME_HOST_TRADDR=%s", 4182 opts->host_traddr ?: "none"); 4183 if (ret) 4184 return ret; 4185 4186 ret = add_uevent_var(env, "NVME_HOST_IFACE=%s", 4187 opts->host_iface ?: "none"); 4188 } 4189 return ret; 4190 } 4191 4192 static void nvme_change_uevent(struct nvme_ctrl *ctrl, char *envdata) 4193 { 4194 char *envp[2] = { envdata, NULL }; 4195 4196 kobject_uevent_env(&ctrl->device->kobj, KOBJ_CHANGE, envp); 4197 } 4198 4199 static void nvme_aen_uevent(struct nvme_ctrl *ctrl) 4200 { 4201 char *envp[2] = { NULL, NULL }; 4202 u32 aen_result = ctrl->aen_result; 4203 4204 ctrl->aen_result = 0; 4205 if (!aen_result) 4206 return; 4207 4208 envp[0] = kasprintf(GFP_KERNEL, "NVME_AEN=%#08x", aen_result); 4209 if (!envp[0]) 4210 return; 4211 kobject_uevent_env(&ctrl->device->kobj, KOBJ_CHANGE, envp); 4212 kfree(envp[0]); 4213 } 4214 4215 static void nvme_async_event_work(struct work_struct *work) 4216 { 4217 struct nvme_ctrl *ctrl = 4218 container_of(work, struct nvme_ctrl, async_event_work); 4219 4220 nvme_aen_uevent(ctrl); 4221 4222 /* 4223 * The transport drivers must guarantee AER submission here is safe by 4224 * flushing ctrl async_event_work after changing the controller state 4225 * from LIVE and before freeing the admin queue. 4226 */ 4227 if (nvme_ctrl_state(ctrl) == NVME_CTRL_LIVE) 4228 ctrl->ops->submit_async_event(ctrl); 4229 } 4230 4231 static bool nvme_ctrl_pp_status(struct nvme_ctrl *ctrl) 4232 { 4233 4234 u32 csts; 4235 4236 if (ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &csts)) 4237 return false; 4238 4239 if (csts == ~0) 4240 return false; 4241 4242 return ((ctrl->ctrl_config & NVME_CC_ENABLE) && (csts & NVME_CSTS_PP)); 4243 } 4244 4245 static void nvme_get_fw_slot_info(struct nvme_ctrl *ctrl) 4246 { 4247 struct nvme_fw_slot_info_log *log; 4248 u8 next_fw_slot, cur_fw_slot; 4249 4250 log = kmalloc(sizeof(*log), GFP_KERNEL); 4251 if (!log) 4252 return; 4253 4254 if (nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_FW_SLOT, 0, NVME_CSI_NVM, 4255 log, sizeof(*log), 0)) { 4256 dev_warn(ctrl->device, "Get FW SLOT INFO log error\n"); 4257 goto out_free_log; 4258 } 4259 4260 cur_fw_slot = log->afi & 0x7; 4261 next_fw_slot = (log->afi & 0x70) >> 4; 4262 if (!cur_fw_slot || (next_fw_slot && (cur_fw_slot != next_fw_slot))) { 4263 dev_info(ctrl->device, 4264 "Firmware is activated after next Controller Level Reset\n"); 4265 goto out_free_log; 4266 } 4267 4268 memcpy(ctrl->subsys->firmware_rev, &log->frs[cur_fw_slot - 1], 4269 sizeof(ctrl->subsys->firmware_rev)); 4270 4271 out_free_log: 4272 kfree(log); 4273 } 4274 4275 static void nvme_fw_act_work(struct work_struct *work) 4276 { 4277 struct nvme_ctrl *ctrl = container_of(work, 4278 struct nvme_ctrl, fw_act_work); 4279 unsigned long fw_act_timeout; 4280 4281 nvme_auth_stop(ctrl); 4282 4283 if (ctrl->mtfa) 4284 fw_act_timeout = jiffies + 4285 msecs_to_jiffies(ctrl->mtfa * 100); 4286 else 4287 fw_act_timeout = jiffies + 4288 msecs_to_jiffies(admin_timeout * 1000); 4289 4290 nvme_quiesce_io_queues(ctrl); 4291 while (nvme_ctrl_pp_status(ctrl)) { 4292 if (time_after(jiffies, fw_act_timeout)) { 4293 dev_warn(ctrl->device, 4294 "Fw activation timeout, reset controller\n"); 4295 nvme_try_sched_reset(ctrl); 4296 return; 4297 } 4298 msleep(100); 4299 } 4300 4301 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_LIVE)) 4302 return; 4303 4304 nvme_unquiesce_io_queues(ctrl); 4305 /* read FW slot information to clear the AER */ 4306 nvme_get_fw_slot_info(ctrl); 4307 4308 queue_work(nvme_wq, &ctrl->async_event_work); 4309 } 4310 4311 static u32 nvme_aer_type(u32 result) 4312 { 4313 return result & 0x7; 4314 } 4315 4316 static u32 nvme_aer_subtype(u32 result) 4317 { 4318 return (result & 0xff00) >> 8; 4319 } 4320 4321 static bool nvme_handle_aen_notice(struct nvme_ctrl *ctrl, u32 result) 4322 { 4323 u32 aer_notice_type = nvme_aer_subtype(result); 4324 bool requeue = true; 4325 4326 switch (aer_notice_type) { 4327 case NVME_AER_NOTICE_NS_CHANGED: 4328 set_bit(NVME_AER_NOTICE_NS_CHANGED, &ctrl->events); 4329 nvme_queue_scan(ctrl); 4330 break; 4331 case NVME_AER_NOTICE_FW_ACT_STARTING: 4332 /* 4333 * We are (ab)using the RESETTING state to prevent subsequent 4334 * recovery actions from interfering with the controller's 4335 * firmware activation. 4336 */ 4337 if (nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING)) { 4338 requeue = false; 4339 queue_work(nvme_wq, &ctrl->fw_act_work); 4340 } 4341 break; 4342 #ifdef CONFIG_NVME_MULTIPATH 4343 case NVME_AER_NOTICE_ANA: 4344 if (!ctrl->ana_log_buf) 4345 break; 4346 queue_work(nvme_wq, &ctrl->ana_work); 4347 break; 4348 #endif 4349 case NVME_AER_NOTICE_DISC_CHANGED: 4350 ctrl->aen_result = result; 4351 break; 4352 default: 4353 dev_warn(ctrl->device, "async event result %08x\n", result); 4354 } 4355 return requeue; 4356 } 4357 4358 static void nvme_handle_aer_persistent_error(struct nvme_ctrl *ctrl) 4359 { 4360 dev_warn(ctrl->device, "resetting controller due to AER\n"); 4361 nvme_reset_ctrl(ctrl); 4362 } 4363 4364 void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status, 4365 volatile union nvme_result *res) 4366 { 4367 u32 result = le32_to_cpu(res->u32); 4368 u32 aer_type = nvme_aer_type(result); 4369 u32 aer_subtype = nvme_aer_subtype(result); 4370 bool requeue = true; 4371 4372 if (le16_to_cpu(status) >> 1 != NVME_SC_SUCCESS) 4373 return; 4374 4375 trace_nvme_async_event(ctrl, result); 4376 switch (aer_type) { 4377 case NVME_AER_NOTICE: 4378 requeue = nvme_handle_aen_notice(ctrl, result); 4379 break; 4380 case NVME_AER_ERROR: 4381 /* 4382 * For a persistent internal error, don't run async_event_work 4383 * to submit a new AER. The controller reset will do it. 4384 */ 4385 if (aer_subtype == NVME_AER_ERROR_PERSIST_INT_ERR) { 4386 nvme_handle_aer_persistent_error(ctrl); 4387 return; 4388 } 4389 fallthrough; 4390 case NVME_AER_SMART: 4391 case NVME_AER_CSS: 4392 case NVME_AER_VS: 4393 ctrl->aen_result = result; 4394 break; 4395 default: 4396 break; 4397 } 4398 4399 if (requeue) 4400 queue_work(nvme_wq, &ctrl->async_event_work); 4401 } 4402 EXPORT_SYMBOL_GPL(nvme_complete_async_event); 4403 4404 int nvme_alloc_admin_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set, 4405 const struct blk_mq_ops *ops, unsigned int cmd_size) 4406 { 4407 struct queue_limits lim = {}; 4408 int ret; 4409 4410 memset(set, 0, sizeof(*set)); 4411 set->ops = ops; 4412 set->queue_depth = NVME_AQ_MQ_TAG_DEPTH; 4413 if (ctrl->ops->flags & NVME_F_FABRICS) 4414 /* Reserved for fabric connect and keep alive */ 4415 set->reserved_tags = 2; 4416 set->numa_node = ctrl->numa_node; 4417 set->flags = BLK_MQ_F_NO_SCHED; 4418 if (ctrl->ops->flags & NVME_F_BLOCKING) 4419 set->flags |= BLK_MQ_F_BLOCKING; 4420 set->cmd_size = cmd_size; 4421 set->driver_data = ctrl; 4422 set->nr_hw_queues = 1; 4423 set->timeout = NVME_ADMIN_TIMEOUT; 4424 ret = blk_mq_alloc_tag_set(set); 4425 if (ret) 4426 return ret; 4427 4428 ctrl->admin_q = blk_mq_alloc_queue(set, &lim, NULL); 4429 if (IS_ERR(ctrl->admin_q)) { 4430 ret = PTR_ERR(ctrl->admin_q); 4431 goto out_free_tagset; 4432 } 4433 4434 if (ctrl->ops->flags & NVME_F_FABRICS) { 4435 ctrl->fabrics_q = blk_mq_alloc_queue(set, NULL, NULL); 4436 if (IS_ERR(ctrl->fabrics_q)) { 4437 ret = PTR_ERR(ctrl->fabrics_q); 4438 goto out_cleanup_admin_q; 4439 } 4440 } 4441 4442 ctrl->admin_tagset = set; 4443 return 0; 4444 4445 out_cleanup_admin_q: 4446 blk_mq_destroy_queue(ctrl->admin_q); 4447 blk_put_queue(ctrl->admin_q); 4448 out_free_tagset: 4449 blk_mq_free_tag_set(set); 4450 ctrl->admin_q = NULL; 4451 ctrl->fabrics_q = NULL; 4452 return ret; 4453 } 4454 EXPORT_SYMBOL_GPL(nvme_alloc_admin_tag_set); 4455 4456 void nvme_remove_admin_tag_set(struct nvme_ctrl *ctrl) 4457 { 4458 blk_mq_destroy_queue(ctrl->admin_q); 4459 blk_put_queue(ctrl->admin_q); 4460 if (ctrl->ops->flags & NVME_F_FABRICS) { 4461 blk_mq_destroy_queue(ctrl->fabrics_q); 4462 blk_put_queue(ctrl->fabrics_q); 4463 } 4464 blk_mq_free_tag_set(ctrl->admin_tagset); 4465 } 4466 EXPORT_SYMBOL_GPL(nvme_remove_admin_tag_set); 4467 4468 int nvme_alloc_io_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set, 4469 const struct blk_mq_ops *ops, unsigned int nr_maps, 4470 unsigned int cmd_size) 4471 { 4472 int ret; 4473 4474 memset(set, 0, sizeof(*set)); 4475 set->ops = ops; 4476 set->queue_depth = min_t(unsigned, ctrl->sqsize, BLK_MQ_MAX_DEPTH - 1); 4477 /* 4478 * Some Apple controllers requires tags to be unique across admin and 4479 * the (only) I/O queue, so reserve the first 32 tags of the I/O queue. 4480 */ 4481 if (ctrl->quirks & NVME_QUIRK_SHARED_TAGS) 4482 set->reserved_tags = NVME_AQ_DEPTH; 4483 else if (ctrl->ops->flags & NVME_F_FABRICS) 4484 /* Reserved for fabric connect */ 4485 set->reserved_tags = 1; 4486 set->numa_node = ctrl->numa_node; 4487 set->flags = BLK_MQ_F_SHOULD_MERGE; 4488 if (ctrl->ops->flags & NVME_F_BLOCKING) 4489 set->flags |= BLK_MQ_F_BLOCKING; 4490 set->cmd_size = cmd_size, 4491 set->driver_data = ctrl; 4492 set->nr_hw_queues = ctrl->queue_count - 1; 4493 set->timeout = NVME_IO_TIMEOUT; 4494 set->nr_maps = nr_maps; 4495 ret = blk_mq_alloc_tag_set(set); 4496 if (ret) 4497 return ret; 4498 4499 if (ctrl->ops->flags & NVME_F_FABRICS) { 4500 ctrl->connect_q = blk_mq_alloc_queue(set, NULL, NULL); 4501 if (IS_ERR(ctrl->connect_q)) { 4502 ret = PTR_ERR(ctrl->connect_q); 4503 goto out_free_tag_set; 4504 } 4505 blk_queue_flag_set(QUEUE_FLAG_SKIP_TAGSET_QUIESCE, 4506 ctrl->connect_q); 4507 } 4508 4509 ctrl->tagset = set; 4510 return 0; 4511 4512 out_free_tag_set: 4513 blk_mq_free_tag_set(set); 4514 ctrl->connect_q = NULL; 4515 return ret; 4516 } 4517 EXPORT_SYMBOL_GPL(nvme_alloc_io_tag_set); 4518 4519 void nvme_remove_io_tag_set(struct nvme_ctrl *ctrl) 4520 { 4521 if (ctrl->ops->flags & NVME_F_FABRICS) { 4522 blk_mq_destroy_queue(ctrl->connect_q); 4523 blk_put_queue(ctrl->connect_q); 4524 } 4525 blk_mq_free_tag_set(ctrl->tagset); 4526 } 4527 EXPORT_SYMBOL_GPL(nvme_remove_io_tag_set); 4528 4529 void nvme_stop_ctrl(struct nvme_ctrl *ctrl) 4530 { 4531 nvme_mpath_stop(ctrl); 4532 nvme_auth_stop(ctrl); 4533 nvme_stop_keep_alive(ctrl); 4534 nvme_stop_failfast_work(ctrl); 4535 flush_work(&ctrl->async_event_work); 4536 cancel_work_sync(&ctrl->fw_act_work); 4537 if (ctrl->ops->stop_ctrl) 4538 ctrl->ops->stop_ctrl(ctrl); 4539 } 4540 EXPORT_SYMBOL_GPL(nvme_stop_ctrl); 4541 4542 void nvme_start_ctrl(struct nvme_ctrl *ctrl) 4543 { 4544 nvme_enable_aen(ctrl); 4545 4546 /* 4547 * persistent discovery controllers need to send indication to userspace 4548 * to re-read the discovery log page to learn about possible changes 4549 * that were missed. We identify persistent discovery controllers by 4550 * checking that they started once before, hence are reconnecting back. 4551 */ 4552 if (test_bit(NVME_CTRL_STARTED_ONCE, &ctrl->flags) && 4553 nvme_discovery_ctrl(ctrl)) 4554 nvme_change_uevent(ctrl, "NVME_EVENT=rediscover"); 4555 4556 if (ctrl->queue_count > 1) { 4557 nvme_queue_scan(ctrl); 4558 nvme_unquiesce_io_queues(ctrl); 4559 nvme_mpath_update(ctrl); 4560 } 4561 4562 nvme_change_uevent(ctrl, "NVME_EVENT=connected"); 4563 set_bit(NVME_CTRL_STARTED_ONCE, &ctrl->flags); 4564 } 4565 EXPORT_SYMBOL_GPL(nvme_start_ctrl); 4566 4567 void nvme_uninit_ctrl(struct nvme_ctrl *ctrl) 4568 { 4569 nvme_hwmon_exit(ctrl); 4570 nvme_fault_inject_fini(&ctrl->fault_inject); 4571 dev_pm_qos_hide_latency_tolerance(ctrl->device); 4572 cdev_device_del(&ctrl->cdev, ctrl->device); 4573 nvme_put_ctrl(ctrl); 4574 } 4575 EXPORT_SYMBOL_GPL(nvme_uninit_ctrl); 4576 4577 static void nvme_free_cels(struct nvme_ctrl *ctrl) 4578 { 4579 struct nvme_effects_log *cel; 4580 unsigned long i; 4581 4582 xa_for_each(&ctrl->cels, i, cel) { 4583 xa_erase(&ctrl->cels, i); 4584 kfree(cel); 4585 } 4586 4587 xa_destroy(&ctrl->cels); 4588 } 4589 4590 static void nvme_free_ctrl(struct device *dev) 4591 { 4592 struct nvme_ctrl *ctrl = 4593 container_of(dev, struct nvme_ctrl, ctrl_device); 4594 struct nvme_subsystem *subsys = ctrl->subsys; 4595 4596 if (!subsys || ctrl->instance != subsys->instance) 4597 ida_free(&nvme_instance_ida, ctrl->instance); 4598 key_put(ctrl->tls_key); 4599 nvme_free_cels(ctrl); 4600 nvme_mpath_uninit(ctrl); 4601 nvme_auth_stop(ctrl); 4602 nvme_auth_free(ctrl); 4603 __free_page(ctrl->discard_page); 4604 free_opal_dev(ctrl->opal_dev); 4605 4606 if (subsys) { 4607 mutex_lock(&nvme_subsystems_lock); 4608 list_del(&ctrl->subsys_entry); 4609 sysfs_remove_link(&subsys->dev.kobj, dev_name(ctrl->device)); 4610 mutex_unlock(&nvme_subsystems_lock); 4611 } 4612 4613 ctrl->ops->free_ctrl(ctrl); 4614 4615 if (subsys) 4616 nvme_put_subsystem(subsys); 4617 } 4618 4619 /* 4620 * Initialize a NVMe controller structures. This needs to be called during 4621 * earliest initialization so that we have the initialized structured around 4622 * during probing. 4623 */ 4624 int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev, 4625 const struct nvme_ctrl_ops *ops, unsigned long quirks) 4626 { 4627 int ret; 4628 4629 WRITE_ONCE(ctrl->state, NVME_CTRL_NEW); 4630 ctrl->passthru_err_log_enabled = false; 4631 clear_bit(NVME_CTRL_FAILFAST_EXPIRED, &ctrl->flags); 4632 spin_lock_init(&ctrl->lock); 4633 mutex_init(&ctrl->scan_lock); 4634 INIT_LIST_HEAD(&ctrl->namespaces); 4635 xa_init(&ctrl->cels); 4636 init_rwsem(&ctrl->namespaces_rwsem); 4637 ctrl->dev = dev; 4638 ctrl->ops = ops; 4639 ctrl->quirks = quirks; 4640 ctrl->numa_node = NUMA_NO_NODE; 4641 INIT_WORK(&ctrl->scan_work, nvme_scan_work); 4642 INIT_WORK(&ctrl->async_event_work, nvme_async_event_work); 4643 INIT_WORK(&ctrl->fw_act_work, nvme_fw_act_work); 4644 INIT_WORK(&ctrl->delete_work, nvme_delete_ctrl_work); 4645 init_waitqueue_head(&ctrl->state_wq); 4646 4647 INIT_DELAYED_WORK(&ctrl->ka_work, nvme_keep_alive_work); 4648 INIT_DELAYED_WORK(&ctrl->failfast_work, nvme_failfast_work); 4649 memset(&ctrl->ka_cmd, 0, sizeof(ctrl->ka_cmd)); 4650 ctrl->ka_cmd.common.opcode = nvme_admin_keep_alive; 4651 ctrl->ka_last_check_time = jiffies; 4652 4653 BUILD_BUG_ON(NVME_DSM_MAX_RANGES * sizeof(struct nvme_dsm_range) > 4654 PAGE_SIZE); 4655 ctrl->discard_page = alloc_page(GFP_KERNEL); 4656 if (!ctrl->discard_page) { 4657 ret = -ENOMEM; 4658 goto out; 4659 } 4660 4661 ret = ida_alloc(&nvme_instance_ida, GFP_KERNEL); 4662 if (ret < 0) 4663 goto out; 4664 ctrl->instance = ret; 4665 4666 device_initialize(&ctrl->ctrl_device); 4667 ctrl->device = &ctrl->ctrl_device; 4668 ctrl->device->devt = MKDEV(MAJOR(nvme_ctrl_base_chr_devt), 4669 ctrl->instance); 4670 ctrl->device->class = &nvme_class; 4671 ctrl->device->parent = ctrl->dev; 4672 if (ops->dev_attr_groups) 4673 ctrl->device->groups = ops->dev_attr_groups; 4674 else 4675 ctrl->device->groups = nvme_dev_attr_groups; 4676 ctrl->device->release = nvme_free_ctrl; 4677 dev_set_drvdata(ctrl->device, ctrl); 4678 ret = dev_set_name(ctrl->device, "nvme%d", ctrl->instance); 4679 if (ret) 4680 goto out_release_instance; 4681 4682 nvme_get_ctrl(ctrl); 4683 cdev_init(&ctrl->cdev, &nvme_dev_fops); 4684 ctrl->cdev.owner = ops->module; 4685 ret = cdev_device_add(&ctrl->cdev, ctrl->device); 4686 if (ret) 4687 goto out_free_name; 4688 4689 /* 4690 * Initialize latency tolerance controls. The sysfs files won't 4691 * be visible to userspace unless the device actually supports APST. 4692 */ 4693 ctrl->device->power.set_latency_tolerance = nvme_set_latency_tolerance; 4694 dev_pm_qos_update_user_latency_tolerance(ctrl->device, 4695 min(default_ps_max_latency_us, (unsigned long)S32_MAX)); 4696 4697 nvme_fault_inject_init(&ctrl->fault_inject, dev_name(ctrl->device)); 4698 nvme_mpath_init_ctrl(ctrl); 4699 ret = nvme_auth_init_ctrl(ctrl); 4700 if (ret) 4701 goto out_free_cdev; 4702 4703 return 0; 4704 out_free_cdev: 4705 nvme_fault_inject_fini(&ctrl->fault_inject); 4706 dev_pm_qos_hide_latency_tolerance(ctrl->device); 4707 cdev_device_del(&ctrl->cdev, ctrl->device); 4708 out_free_name: 4709 nvme_put_ctrl(ctrl); 4710 kfree_const(ctrl->device->kobj.name); 4711 out_release_instance: 4712 ida_free(&nvme_instance_ida, ctrl->instance); 4713 out: 4714 if (ctrl->discard_page) 4715 __free_page(ctrl->discard_page); 4716 return ret; 4717 } 4718 EXPORT_SYMBOL_GPL(nvme_init_ctrl); 4719 4720 /* let I/O to all namespaces fail in preparation for surprise removal */ 4721 void nvme_mark_namespaces_dead(struct nvme_ctrl *ctrl) 4722 { 4723 struct nvme_ns *ns; 4724 4725 down_read(&ctrl->namespaces_rwsem); 4726 list_for_each_entry(ns, &ctrl->namespaces, list) 4727 blk_mark_disk_dead(ns->disk); 4728 up_read(&ctrl->namespaces_rwsem); 4729 } 4730 EXPORT_SYMBOL_GPL(nvme_mark_namespaces_dead); 4731 4732 void nvme_unfreeze(struct nvme_ctrl *ctrl) 4733 { 4734 struct nvme_ns *ns; 4735 4736 down_read(&ctrl->namespaces_rwsem); 4737 list_for_each_entry(ns, &ctrl->namespaces, list) 4738 blk_mq_unfreeze_queue(ns->queue); 4739 up_read(&ctrl->namespaces_rwsem); 4740 clear_bit(NVME_CTRL_FROZEN, &ctrl->flags); 4741 } 4742 EXPORT_SYMBOL_GPL(nvme_unfreeze); 4743 4744 int nvme_wait_freeze_timeout(struct nvme_ctrl *ctrl, long timeout) 4745 { 4746 struct nvme_ns *ns; 4747 4748 down_read(&ctrl->namespaces_rwsem); 4749 list_for_each_entry(ns, &ctrl->namespaces, list) { 4750 timeout = blk_mq_freeze_queue_wait_timeout(ns->queue, timeout); 4751 if (timeout <= 0) 4752 break; 4753 } 4754 up_read(&ctrl->namespaces_rwsem); 4755 return timeout; 4756 } 4757 EXPORT_SYMBOL_GPL(nvme_wait_freeze_timeout); 4758 4759 void nvme_wait_freeze(struct nvme_ctrl *ctrl) 4760 { 4761 struct nvme_ns *ns; 4762 4763 down_read(&ctrl->namespaces_rwsem); 4764 list_for_each_entry(ns, &ctrl->namespaces, list) 4765 blk_mq_freeze_queue_wait(ns->queue); 4766 up_read(&ctrl->namespaces_rwsem); 4767 } 4768 EXPORT_SYMBOL_GPL(nvme_wait_freeze); 4769 4770 void nvme_start_freeze(struct nvme_ctrl *ctrl) 4771 { 4772 struct nvme_ns *ns; 4773 4774 set_bit(NVME_CTRL_FROZEN, &ctrl->flags); 4775 down_read(&ctrl->namespaces_rwsem); 4776 list_for_each_entry(ns, &ctrl->namespaces, list) 4777 blk_freeze_queue_start(ns->queue); 4778 up_read(&ctrl->namespaces_rwsem); 4779 } 4780 EXPORT_SYMBOL_GPL(nvme_start_freeze); 4781 4782 void nvme_quiesce_io_queues(struct nvme_ctrl *ctrl) 4783 { 4784 if (!ctrl->tagset) 4785 return; 4786 if (!test_and_set_bit(NVME_CTRL_STOPPED, &ctrl->flags)) 4787 blk_mq_quiesce_tagset(ctrl->tagset); 4788 else 4789 blk_mq_wait_quiesce_done(ctrl->tagset); 4790 } 4791 EXPORT_SYMBOL_GPL(nvme_quiesce_io_queues); 4792 4793 void nvme_unquiesce_io_queues(struct nvme_ctrl *ctrl) 4794 { 4795 if (!ctrl->tagset) 4796 return; 4797 if (test_and_clear_bit(NVME_CTRL_STOPPED, &ctrl->flags)) 4798 blk_mq_unquiesce_tagset(ctrl->tagset); 4799 } 4800 EXPORT_SYMBOL_GPL(nvme_unquiesce_io_queues); 4801 4802 void nvme_quiesce_admin_queue(struct nvme_ctrl *ctrl) 4803 { 4804 if (!test_and_set_bit(NVME_CTRL_ADMIN_Q_STOPPED, &ctrl->flags)) 4805 blk_mq_quiesce_queue(ctrl->admin_q); 4806 else 4807 blk_mq_wait_quiesce_done(ctrl->admin_q->tag_set); 4808 } 4809 EXPORT_SYMBOL_GPL(nvme_quiesce_admin_queue); 4810 4811 void nvme_unquiesce_admin_queue(struct nvme_ctrl *ctrl) 4812 { 4813 if (test_and_clear_bit(NVME_CTRL_ADMIN_Q_STOPPED, &ctrl->flags)) 4814 blk_mq_unquiesce_queue(ctrl->admin_q); 4815 } 4816 EXPORT_SYMBOL_GPL(nvme_unquiesce_admin_queue); 4817 4818 void nvme_sync_io_queues(struct nvme_ctrl *ctrl) 4819 { 4820 struct nvme_ns *ns; 4821 4822 down_read(&ctrl->namespaces_rwsem); 4823 list_for_each_entry(ns, &ctrl->namespaces, list) 4824 blk_sync_queue(ns->queue); 4825 up_read(&ctrl->namespaces_rwsem); 4826 } 4827 EXPORT_SYMBOL_GPL(nvme_sync_io_queues); 4828 4829 void nvme_sync_queues(struct nvme_ctrl *ctrl) 4830 { 4831 nvme_sync_io_queues(ctrl); 4832 if (ctrl->admin_q) 4833 blk_sync_queue(ctrl->admin_q); 4834 } 4835 EXPORT_SYMBOL_GPL(nvme_sync_queues); 4836 4837 struct nvme_ctrl *nvme_ctrl_from_file(struct file *file) 4838 { 4839 if (file->f_op != &nvme_dev_fops) 4840 return NULL; 4841 return file->private_data; 4842 } 4843 EXPORT_SYMBOL_NS_GPL(nvme_ctrl_from_file, NVME_TARGET_PASSTHRU); 4844 4845 /* 4846 * Check we didn't inadvertently grow the command structure sizes: 4847 */ 4848 static inline void _nvme_check_size(void) 4849 { 4850 BUILD_BUG_ON(sizeof(struct nvme_common_command) != 64); 4851 BUILD_BUG_ON(sizeof(struct nvme_rw_command) != 64); 4852 BUILD_BUG_ON(sizeof(struct nvme_identify) != 64); 4853 BUILD_BUG_ON(sizeof(struct nvme_features) != 64); 4854 BUILD_BUG_ON(sizeof(struct nvme_download_firmware) != 64); 4855 BUILD_BUG_ON(sizeof(struct nvme_format_cmd) != 64); 4856 BUILD_BUG_ON(sizeof(struct nvme_dsm_cmd) != 64); 4857 BUILD_BUG_ON(sizeof(struct nvme_write_zeroes_cmd) != 64); 4858 BUILD_BUG_ON(sizeof(struct nvme_abort_cmd) != 64); 4859 BUILD_BUG_ON(sizeof(struct nvme_get_log_page_command) != 64); 4860 BUILD_BUG_ON(sizeof(struct nvme_command) != 64); 4861 BUILD_BUG_ON(sizeof(struct nvme_id_ctrl) != NVME_IDENTIFY_DATA_SIZE); 4862 BUILD_BUG_ON(sizeof(struct nvme_id_ns) != NVME_IDENTIFY_DATA_SIZE); 4863 BUILD_BUG_ON(sizeof(struct nvme_id_ns_cs_indep) != 4864 NVME_IDENTIFY_DATA_SIZE); 4865 BUILD_BUG_ON(sizeof(struct nvme_id_ns_zns) != NVME_IDENTIFY_DATA_SIZE); 4866 BUILD_BUG_ON(sizeof(struct nvme_id_ns_nvm) != NVME_IDENTIFY_DATA_SIZE); 4867 BUILD_BUG_ON(sizeof(struct nvme_id_ctrl_zns) != NVME_IDENTIFY_DATA_SIZE); 4868 BUILD_BUG_ON(sizeof(struct nvme_id_ctrl_nvm) != NVME_IDENTIFY_DATA_SIZE); 4869 BUILD_BUG_ON(sizeof(struct nvme_lba_range_type) != 64); 4870 BUILD_BUG_ON(sizeof(struct nvme_smart_log) != 512); 4871 BUILD_BUG_ON(sizeof(struct nvme_dbbuf) != 64); 4872 BUILD_BUG_ON(sizeof(struct nvme_directive_cmd) != 64); 4873 BUILD_BUG_ON(sizeof(struct nvme_feat_host_behavior) != 512); 4874 } 4875 4876 4877 static int __init nvme_core_init(void) 4878 { 4879 int result = -ENOMEM; 4880 4881 _nvme_check_size(); 4882 4883 nvme_wq = alloc_workqueue("nvme-wq", 4884 WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0); 4885 if (!nvme_wq) 4886 goto out; 4887 4888 nvme_reset_wq = alloc_workqueue("nvme-reset-wq", 4889 WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0); 4890 if (!nvme_reset_wq) 4891 goto destroy_wq; 4892 4893 nvme_delete_wq = alloc_workqueue("nvme-delete-wq", 4894 WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0); 4895 if (!nvme_delete_wq) 4896 goto destroy_reset_wq; 4897 4898 result = alloc_chrdev_region(&nvme_ctrl_base_chr_devt, 0, 4899 NVME_MINORS, "nvme"); 4900 if (result < 0) 4901 goto destroy_delete_wq; 4902 4903 result = class_register(&nvme_class); 4904 if (result) 4905 goto unregister_chrdev; 4906 4907 result = class_register(&nvme_subsys_class); 4908 if (result) 4909 goto destroy_class; 4910 4911 result = alloc_chrdev_region(&nvme_ns_chr_devt, 0, NVME_MINORS, 4912 "nvme-generic"); 4913 if (result < 0) 4914 goto destroy_subsys_class; 4915 4916 result = class_register(&nvme_ns_chr_class); 4917 if (result) 4918 goto unregister_generic_ns; 4919 4920 result = nvme_init_auth(); 4921 if (result) 4922 goto destroy_ns_chr; 4923 return 0; 4924 4925 destroy_ns_chr: 4926 class_unregister(&nvme_ns_chr_class); 4927 unregister_generic_ns: 4928 unregister_chrdev_region(nvme_ns_chr_devt, NVME_MINORS); 4929 destroy_subsys_class: 4930 class_unregister(&nvme_subsys_class); 4931 destroy_class: 4932 class_unregister(&nvme_class); 4933 unregister_chrdev: 4934 unregister_chrdev_region(nvme_ctrl_base_chr_devt, NVME_MINORS); 4935 destroy_delete_wq: 4936 destroy_workqueue(nvme_delete_wq); 4937 destroy_reset_wq: 4938 destroy_workqueue(nvme_reset_wq); 4939 destroy_wq: 4940 destroy_workqueue(nvme_wq); 4941 out: 4942 return result; 4943 } 4944 4945 static void __exit nvme_core_exit(void) 4946 { 4947 nvme_exit_auth(); 4948 class_unregister(&nvme_ns_chr_class); 4949 class_unregister(&nvme_subsys_class); 4950 class_unregister(&nvme_class); 4951 unregister_chrdev_region(nvme_ns_chr_devt, NVME_MINORS); 4952 unregister_chrdev_region(nvme_ctrl_base_chr_devt, NVME_MINORS); 4953 destroy_workqueue(nvme_delete_wq); 4954 destroy_workqueue(nvme_reset_wq); 4955 destroy_workqueue(nvme_wq); 4956 ida_destroy(&nvme_ns_chr_minor_ida); 4957 ida_destroy(&nvme_instance_ida); 4958 } 4959 4960 MODULE_LICENSE("GPL"); 4961 MODULE_VERSION("1.0"); 4962 MODULE_DESCRIPTION("NVMe host core framework"); 4963 module_init(nvme_core_init); 4964 module_exit(nvme_core_exit); 4965