1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * NVM Express device driver 4 * Copyright (c) 2011-2014, Intel Corporation. 5 */ 6 7 #include <linux/blkdev.h> 8 #include <linux/blk-mq.h> 9 #include <linux/blk-integrity.h> 10 #include <linux/compat.h> 11 #include <linux/delay.h> 12 #include <linux/errno.h> 13 #include <linux/hdreg.h> 14 #include <linux/kernel.h> 15 #include <linux/module.h> 16 #include <linux/backing-dev.h> 17 #include <linux/slab.h> 18 #include <linux/types.h> 19 #include <linux/pr.h> 20 #include <linux/ptrace.h> 21 #include <linux/nvme_ioctl.h> 22 #include <linux/pm_qos.h> 23 #include <linux/ratelimit.h> 24 #include <asm/unaligned.h> 25 26 #include "nvme.h" 27 #include "fabrics.h" 28 #include <linux/nvme-auth.h> 29 30 #define CREATE_TRACE_POINTS 31 #include "trace.h" 32 33 #define NVME_MINORS (1U << MINORBITS) 34 35 struct nvme_ns_info { 36 struct nvme_ns_ids ids; 37 u32 nsid; 38 __le32 anagrpid; 39 bool is_shared; 40 bool is_readonly; 41 bool is_ready; 42 bool is_removed; 43 }; 44 45 unsigned int admin_timeout = 60; 46 module_param(admin_timeout, uint, 0644); 47 MODULE_PARM_DESC(admin_timeout, "timeout in seconds for admin commands"); 48 EXPORT_SYMBOL_GPL(admin_timeout); 49 50 unsigned int nvme_io_timeout = 30; 51 module_param_named(io_timeout, nvme_io_timeout, uint, 0644); 52 MODULE_PARM_DESC(io_timeout, "timeout in seconds for I/O"); 53 EXPORT_SYMBOL_GPL(nvme_io_timeout); 54 55 static unsigned char shutdown_timeout = 5; 56 module_param(shutdown_timeout, byte, 0644); 57 MODULE_PARM_DESC(shutdown_timeout, "timeout in seconds for controller shutdown"); 58 59 static u8 nvme_max_retries = 5; 60 module_param_named(max_retries, nvme_max_retries, byte, 0644); 61 MODULE_PARM_DESC(max_retries, "max number of retries a command may have"); 62 63 static unsigned long default_ps_max_latency_us = 100000; 64 module_param(default_ps_max_latency_us, ulong, 0644); 65 MODULE_PARM_DESC(default_ps_max_latency_us, 66 "max power saving latency for new devices; use PM QOS to change per device"); 67 68 static bool force_apst; 69 module_param(force_apst, bool, 0644); 70 MODULE_PARM_DESC(force_apst, "allow APST for newly enumerated devices even if quirked off"); 71 72 static unsigned long apst_primary_timeout_ms = 100; 73 module_param(apst_primary_timeout_ms, ulong, 0644); 74 MODULE_PARM_DESC(apst_primary_timeout_ms, 75 "primary APST timeout in ms"); 76 77 static unsigned long apst_secondary_timeout_ms = 2000; 78 module_param(apst_secondary_timeout_ms, ulong, 0644); 79 MODULE_PARM_DESC(apst_secondary_timeout_ms, 80 "secondary APST timeout in ms"); 81 82 static unsigned long apst_primary_latency_tol_us = 15000; 83 module_param(apst_primary_latency_tol_us, ulong, 0644); 84 MODULE_PARM_DESC(apst_primary_latency_tol_us, 85 "primary APST latency tolerance in us"); 86 87 static unsigned long apst_secondary_latency_tol_us = 100000; 88 module_param(apst_secondary_latency_tol_us, ulong, 0644); 89 MODULE_PARM_DESC(apst_secondary_latency_tol_us, 90 "secondary APST latency tolerance in us"); 91 92 /* 93 * nvme_wq - hosts nvme related works that are not reset or delete 94 * nvme_reset_wq - hosts nvme reset works 95 * nvme_delete_wq - hosts nvme delete works 96 * 97 * nvme_wq will host works such as scan, aen handling, fw activation, 98 * keep-alive, periodic reconnects etc. nvme_reset_wq 99 * runs reset works which also flush works hosted on nvme_wq for 100 * serialization purposes. nvme_delete_wq host controller deletion 101 * works which flush reset works for serialization. 102 */ 103 struct workqueue_struct *nvme_wq; 104 EXPORT_SYMBOL_GPL(nvme_wq); 105 106 struct workqueue_struct *nvme_reset_wq; 107 EXPORT_SYMBOL_GPL(nvme_reset_wq); 108 109 struct workqueue_struct *nvme_delete_wq; 110 EXPORT_SYMBOL_GPL(nvme_delete_wq); 111 112 static LIST_HEAD(nvme_subsystems); 113 static DEFINE_MUTEX(nvme_subsystems_lock); 114 115 static DEFINE_IDA(nvme_instance_ida); 116 static dev_t nvme_ctrl_base_chr_devt; 117 static int nvme_class_uevent(const struct device *dev, struct kobj_uevent_env *env); 118 static const struct class nvme_class = { 119 .name = "nvme", 120 .dev_uevent = nvme_class_uevent, 121 }; 122 123 static const struct class nvme_subsys_class = { 124 .name = "nvme-subsystem", 125 }; 126 127 static DEFINE_IDA(nvme_ns_chr_minor_ida); 128 static dev_t nvme_ns_chr_devt; 129 static const struct class nvme_ns_chr_class = { 130 .name = "nvme-generic", 131 }; 132 133 static void nvme_put_subsystem(struct nvme_subsystem *subsys); 134 static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl, 135 unsigned nsid); 136 static void nvme_update_keep_alive(struct nvme_ctrl *ctrl, 137 struct nvme_command *cmd); 138 139 void nvme_queue_scan(struct nvme_ctrl *ctrl) 140 { 141 /* 142 * Only new queue scan work when admin and IO queues are both alive 143 */ 144 if (nvme_ctrl_state(ctrl) == NVME_CTRL_LIVE && ctrl->tagset) 145 queue_work(nvme_wq, &ctrl->scan_work); 146 } 147 148 /* 149 * Use this function to proceed with scheduling reset_work for a controller 150 * that had previously been set to the resetting state. This is intended for 151 * code paths that can't be interrupted by other reset attempts. A hot removal 152 * may prevent this from succeeding. 153 */ 154 int nvme_try_sched_reset(struct nvme_ctrl *ctrl) 155 { 156 if (nvme_ctrl_state(ctrl) != NVME_CTRL_RESETTING) 157 return -EBUSY; 158 if (!queue_work(nvme_reset_wq, &ctrl->reset_work)) 159 return -EBUSY; 160 return 0; 161 } 162 EXPORT_SYMBOL_GPL(nvme_try_sched_reset); 163 164 static void nvme_failfast_work(struct work_struct *work) 165 { 166 struct nvme_ctrl *ctrl = container_of(to_delayed_work(work), 167 struct nvme_ctrl, failfast_work); 168 169 if (nvme_ctrl_state(ctrl) != NVME_CTRL_CONNECTING) 170 return; 171 172 set_bit(NVME_CTRL_FAILFAST_EXPIRED, &ctrl->flags); 173 dev_info(ctrl->device, "failfast expired\n"); 174 nvme_kick_requeue_lists(ctrl); 175 } 176 177 static inline void nvme_start_failfast_work(struct nvme_ctrl *ctrl) 178 { 179 if (!ctrl->opts || ctrl->opts->fast_io_fail_tmo == -1) 180 return; 181 182 schedule_delayed_work(&ctrl->failfast_work, 183 ctrl->opts->fast_io_fail_tmo * HZ); 184 } 185 186 static inline void nvme_stop_failfast_work(struct nvme_ctrl *ctrl) 187 { 188 if (!ctrl->opts) 189 return; 190 191 cancel_delayed_work_sync(&ctrl->failfast_work); 192 clear_bit(NVME_CTRL_FAILFAST_EXPIRED, &ctrl->flags); 193 } 194 195 196 int nvme_reset_ctrl(struct nvme_ctrl *ctrl) 197 { 198 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING)) 199 return -EBUSY; 200 if (!queue_work(nvme_reset_wq, &ctrl->reset_work)) 201 return -EBUSY; 202 return 0; 203 } 204 EXPORT_SYMBOL_GPL(nvme_reset_ctrl); 205 206 int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl) 207 { 208 int ret; 209 210 ret = nvme_reset_ctrl(ctrl); 211 if (!ret) { 212 flush_work(&ctrl->reset_work); 213 if (nvme_ctrl_state(ctrl) != NVME_CTRL_LIVE) 214 ret = -ENETRESET; 215 } 216 217 return ret; 218 } 219 220 static void nvme_do_delete_ctrl(struct nvme_ctrl *ctrl) 221 { 222 dev_info(ctrl->device, 223 "Removing ctrl: NQN \"%s\"\n", nvmf_ctrl_subsysnqn(ctrl)); 224 225 flush_work(&ctrl->reset_work); 226 nvme_stop_ctrl(ctrl); 227 nvme_remove_namespaces(ctrl); 228 ctrl->ops->delete_ctrl(ctrl); 229 nvme_uninit_ctrl(ctrl); 230 } 231 232 static void nvme_delete_ctrl_work(struct work_struct *work) 233 { 234 struct nvme_ctrl *ctrl = 235 container_of(work, struct nvme_ctrl, delete_work); 236 237 nvme_do_delete_ctrl(ctrl); 238 } 239 240 int nvme_delete_ctrl(struct nvme_ctrl *ctrl) 241 { 242 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_DELETING)) 243 return -EBUSY; 244 if (!queue_work(nvme_delete_wq, &ctrl->delete_work)) 245 return -EBUSY; 246 return 0; 247 } 248 EXPORT_SYMBOL_GPL(nvme_delete_ctrl); 249 250 void nvme_delete_ctrl_sync(struct nvme_ctrl *ctrl) 251 { 252 /* 253 * Keep a reference until nvme_do_delete_ctrl() complete, 254 * since ->delete_ctrl can free the controller. 255 */ 256 nvme_get_ctrl(ctrl); 257 if (nvme_change_ctrl_state(ctrl, NVME_CTRL_DELETING)) 258 nvme_do_delete_ctrl(ctrl); 259 nvme_put_ctrl(ctrl); 260 } 261 262 static blk_status_t nvme_error_status(u16 status) 263 { 264 switch (status & 0x7ff) { 265 case NVME_SC_SUCCESS: 266 return BLK_STS_OK; 267 case NVME_SC_CAP_EXCEEDED: 268 return BLK_STS_NOSPC; 269 case NVME_SC_LBA_RANGE: 270 case NVME_SC_CMD_INTERRUPTED: 271 case NVME_SC_NS_NOT_READY: 272 return BLK_STS_TARGET; 273 case NVME_SC_BAD_ATTRIBUTES: 274 case NVME_SC_ONCS_NOT_SUPPORTED: 275 case NVME_SC_INVALID_OPCODE: 276 case NVME_SC_INVALID_FIELD: 277 case NVME_SC_INVALID_NS: 278 return BLK_STS_NOTSUPP; 279 case NVME_SC_WRITE_FAULT: 280 case NVME_SC_READ_ERROR: 281 case NVME_SC_UNWRITTEN_BLOCK: 282 case NVME_SC_ACCESS_DENIED: 283 case NVME_SC_READ_ONLY: 284 case NVME_SC_COMPARE_FAILED: 285 return BLK_STS_MEDIUM; 286 case NVME_SC_GUARD_CHECK: 287 case NVME_SC_APPTAG_CHECK: 288 case NVME_SC_REFTAG_CHECK: 289 case NVME_SC_INVALID_PI: 290 return BLK_STS_PROTECTION; 291 case NVME_SC_RESERVATION_CONFLICT: 292 return BLK_STS_RESV_CONFLICT; 293 case NVME_SC_HOST_PATH_ERROR: 294 return BLK_STS_TRANSPORT; 295 case NVME_SC_ZONE_TOO_MANY_ACTIVE: 296 return BLK_STS_ZONE_ACTIVE_RESOURCE; 297 case NVME_SC_ZONE_TOO_MANY_OPEN: 298 return BLK_STS_ZONE_OPEN_RESOURCE; 299 default: 300 return BLK_STS_IOERR; 301 } 302 } 303 304 static void nvme_retry_req(struct request *req) 305 { 306 unsigned long delay = 0; 307 u16 crd; 308 309 /* The mask and shift result must be <= 3 */ 310 crd = (nvme_req(req)->status & NVME_SC_CRD) >> 11; 311 if (crd) 312 delay = nvme_req(req)->ctrl->crdt[crd - 1] * 100; 313 314 nvme_req(req)->retries++; 315 blk_mq_requeue_request(req, false); 316 blk_mq_delay_kick_requeue_list(req->q, delay); 317 } 318 319 static void nvme_log_error(struct request *req) 320 { 321 struct nvme_ns *ns = req->q->queuedata; 322 struct nvme_request *nr = nvme_req(req); 323 324 if (ns) { 325 pr_err_ratelimited("%s: %s(0x%x) @ LBA %llu, %u blocks, %s (sct 0x%x / sc 0x%x) %s%s\n", 326 ns->disk ? ns->disk->disk_name : "?", 327 nvme_get_opcode_str(nr->cmd->common.opcode), 328 nr->cmd->common.opcode, 329 nvme_sect_to_lba(ns->head, blk_rq_pos(req)), 330 blk_rq_bytes(req) >> ns->head->lba_shift, 331 nvme_get_error_status_str(nr->status), 332 nr->status >> 8 & 7, /* Status Code Type */ 333 nr->status & 0xff, /* Status Code */ 334 nr->status & NVME_SC_MORE ? "MORE " : "", 335 nr->status & NVME_SC_DNR ? "DNR " : ""); 336 return; 337 } 338 339 pr_err_ratelimited("%s: %s(0x%x), %s (sct 0x%x / sc 0x%x) %s%s\n", 340 dev_name(nr->ctrl->device), 341 nvme_get_admin_opcode_str(nr->cmd->common.opcode), 342 nr->cmd->common.opcode, 343 nvme_get_error_status_str(nr->status), 344 nr->status >> 8 & 7, /* Status Code Type */ 345 nr->status & 0xff, /* Status Code */ 346 nr->status & NVME_SC_MORE ? "MORE " : "", 347 nr->status & NVME_SC_DNR ? "DNR " : ""); 348 } 349 350 static void nvme_log_err_passthru(struct request *req) 351 { 352 struct nvme_ns *ns = req->q->queuedata; 353 struct nvme_request *nr = nvme_req(req); 354 355 pr_err_ratelimited("%s: %s(0x%x), %s (sct 0x%x / sc 0x%x) %s%s" 356 "cdw10=0x%x cdw11=0x%x cdw12=0x%x cdw13=0x%x cdw14=0x%x cdw15=0x%x\n", 357 ns ? ns->disk->disk_name : dev_name(nr->ctrl->device), 358 ns ? nvme_get_opcode_str(nr->cmd->common.opcode) : 359 nvme_get_admin_opcode_str(nr->cmd->common.opcode), 360 nr->cmd->common.opcode, 361 nvme_get_error_status_str(nr->status), 362 nr->status >> 8 & 7, /* Status Code Type */ 363 nr->status & 0xff, /* Status Code */ 364 nr->status & NVME_SC_MORE ? "MORE " : "", 365 nr->status & NVME_SC_DNR ? "DNR " : "", 366 nr->cmd->common.cdw10, 367 nr->cmd->common.cdw11, 368 nr->cmd->common.cdw12, 369 nr->cmd->common.cdw13, 370 nr->cmd->common.cdw14, 371 nr->cmd->common.cdw14); 372 } 373 374 enum nvme_disposition { 375 COMPLETE, 376 RETRY, 377 FAILOVER, 378 AUTHENTICATE, 379 }; 380 381 static inline enum nvme_disposition nvme_decide_disposition(struct request *req) 382 { 383 if (likely(nvme_req(req)->status == 0)) 384 return COMPLETE; 385 386 if ((nvme_req(req)->status & 0x7ff) == NVME_SC_AUTH_REQUIRED) 387 return AUTHENTICATE; 388 389 if (blk_noretry_request(req) || 390 (nvme_req(req)->status & NVME_SC_DNR) || 391 nvme_req(req)->retries >= nvme_max_retries) 392 return COMPLETE; 393 394 if (req->cmd_flags & REQ_NVME_MPATH) { 395 if (nvme_is_path_error(nvme_req(req)->status) || 396 blk_queue_dying(req->q)) 397 return FAILOVER; 398 } else { 399 if (blk_queue_dying(req->q)) 400 return COMPLETE; 401 } 402 403 return RETRY; 404 } 405 406 static inline void nvme_end_req_zoned(struct request *req) 407 { 408 if (IS_ENABLED(CONFIG_BLK_DEV_ZONED) && 409 req_op(req) == REQ_OP_ZONE_APPEND) { 410 struct nvme_ns *ns = req->q->queuedata; 411 412 req->__sector = nvme_lba_to_sect(ns->head, 413 le64_to_cpu(nvme_req(req)->result.u64)); 414 } 415 } 416 417 static inline void nvme_end_req(struct request *req) 418 { 419 blk_status_t status = nvme_error_status(nvme_req(req)->status); 420 421 if (unlikely(nvme_req(req)->status && !(req->rq_flags & RQF_QUIET))) { 422 if (blk_rq_is_passthrough(req)) 423 nvme_log_err_passthru(req); 424 else 425 nvme_log_error(req); 426 } 427 nvme_end_req_zoned(req); 428 nvme_trace_bio_complete(req); 429 if (req->cmd_flags & REQ_NVME_MPATH) 430 nvme_mpath_end_request(req); 431 blk_mq_end_request(req, status); 432 } 433 434 void nvme_complete_rq(struct request *req) 435 { 436 struct nvme_ctrl *ctrl = nvme_req(req)->ctrl; 437 438 trace_nvme_complete_rq(req); 439 nvme_cleanup_cmd(req); 440 441 /* 442 * Completions of long-running commands should not be able to 443 * defer sending of periodic keep alives, since the controller 444 * may have completed processing such commands a long time ago 445 * (arbitrarily close to command submission time). 446 * req->deadline - req->timeout is the command submission time 447 * in jiffies. 448 */ 449 if (ctrl->kas && 450 req->deadline - req->timeout >= ctrl->ka_last_check_time) 451 ctrl->comp_seen = true; 452 453 switch (nvme_decide_disposition(req)) { 454 case COMPLETE: 455 nvme_end_req(req); 456 return; 457 case RETRY: 458 nvme_retry_req(req); 459 return; 460 case FAILOVER: 461 nvme_failover_req(req); 462 return; 463 case AUTHENTICATE: 464 #ifdef CONFIG_NVME_HOST_AUTH 465 queue_work(nvme_wq, &ctrl->dhchap_auth_work); 466 nvme_retry_req(req); 467 #else 468 nvme_end_req(req); 469 #endif 470 return; 471 } 472 } 473 EXPORT_SYMBOL_GPL(nvme_complete_rq); 474 475 void nvme_complete_batch_req(struct request *req) 476 { 477 trace_nvme_complete_rq(req); 478 nvme_cleanup_cmd(req); 479 nvme_end_req_zoned(req); 480 } 481 EXPORT_SYMBOL_GPL(nvme_complete_batch_req); 482 483 /* 484 * Called to unwind from ->queue_rq on a failed command submission so that the 485 * multipathing code gets called to potentially failover to another path. 486 * The caller needs to unwind all transport specific resource allocations and 487 * must return propagate the return value. 488 */ 489 blk_status_t nvme_host_path_error(struct request *req) 490 { 491 nvme_req(req)->status = NVME_SC_HOST_PATH_ERROR; 492 blk_mq_set_request_complete(req); 493 nvme_complete_rq(req); 494 return BLK_STS_OK; 495 } 496 EXPORT_SYMBOL_GPL(nvme_host_path_error); 497 498 bool nvme_cancel_request(struct request *req, void *data) 499 { 500 dev_dbg_ratelimited(((struct nvme_ctrl *) data)->device, 501 "Cancelling I/O %d", req->tag); 502 503 /* don't abort one completed or idle request */ 504 if (blk_mq_rq_state(req) != MQ_RQ_IN_FLIGHT) 505 return true; 506 507 nvme_req(req)->status = NVME_SC_HOST_ABORTED_CMD; 508 nvme_req(req)->flags |= NVME_REQ_CANCELLED; 509 blk_mq_complete_request(req); 510 return true; 511 } 512 EXPORT_SYMBOL_GPL(nvme_cancel_request); 513 514 void nvme_cancel_tagset(struct nvme_ctrl *ctrl) 515 { 516 if (ctrl->tagset) { 517 blk_mq_tagset_busy_iter(ctrl->tagset, 518 nvme_cancel_request, ctrl); 519 blk_mq_tagset_wait_completed_request(ctrl->tagset); 520 } 521 } 522 EXPORT_SYMBOL_GPL(nvme_cancel_tagset); 523 524 void nvme_cancel_admin_tagset(struct nvme_ctrl *ctrl) 525 { 526 if (ctrl->admin_tagset) { 527 blk_mq_tagset_busy_iter(ctrl->admin_tagset, 528 nvme_cancel_request, ctrl); 529 blk_mq_tagset_wait_completed_request(ctrl->admin_tagset); 530 } 531 } 532 EXPORT_SYMBOL_GPL(nvme_cancel_admin_tagset); 533 534 bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl, 535 enum nvme_ctrl_state new_state) 536 { 537 enum nvme_ctrl_state old_state; 538 unsigned long flags; 539 bool changed = false; 540 541 spin_lock_irqsave(&ctrl->lock, flags); 542 543 old_state = nvme_ctrl_state(ctrl); 544 switch (new_state) { 545 case NVME_CTRL_LIVE: 546 switch (old_state) { 547 case NVME_CTRL_NEW: 548 case NVME_CTRL_RESETTING: 549 case NVME_CTRL_CONNECTING: 550 changed = true; 551 fallthrough; 552 default: 553 break; 554 } 555 break; 556 case NVME_CTRL_RESETTING: 557 switch (old_state) { 558 case NVME_CTRL_NEW: 559 case NVME_CTRL_LIVE: 560 changed = true; 561 fallthrough; 562 default: 563 break; 564 } 565 break; 566 case NVME_CTRL_CONNECTING: 567 switch (old_state) { 568 case NVME_CTRL_NEW: 569 case NVME_CTRL_RESETTING: 570 changed = true; 571 fallthrough; 572 default: 573 break; 574 } 575 break; 576 case NVME_CTRL_DELETING: 577 switch (old_state) { 578 case NVME_CTRL_LIVE: 579 case NVME_CTRL_RESETTING: 580 case NVME_CTRL_CONNECTING: 581 changed = true; 582 fallthrough; 583 default: 584 break; 585 } 586 break; 587 case NVME_CTRL_DELETING_NOIO: 588 switch (old_state) { 589 case NVME_CTRL_DELETING: 590 case NVME_CTRL_DEAD: 591 changed = true; 592 fallthrough; 593 default: 594 break; 595 } 596 break; 597 case NVME_CTRL_DEAD: 598 switch (old_state) { 599 case NVME_CTRL_DELETING: 600 changed = true; 601 fallthrough; 602 default: 603 break; 604 } 605 break; 606 default: 607 break; 608 } 609 610 if (changed) { 611 WRITE_ONCE(ctrl->state, new_state); 612 wake_up_all(&ctrl->state_wq); 613 } 614 615 spin_unlock_irqrestore(&ctrl->lock, flags); 616 if (!changed) 617 return false; 618 619 if (new_state == NVME_CTRL_LIVE) { 620 if (old_state == NVME_CTRL_CONNECTING) 621 nvme_stop_failfast_work(ctrl); 622 nvme_kick_requeue_lists(ctrl); 623 } else if (new_state == NVME_CTRL_CONNECTING && 624 old_state == NVME_CTRL_RESETTING) { 625 nvme_start_failfast_work(ctrl); 626 } 627 return changed; 628 } 629 EXPORT_SYMBOL_GPL(nvme_change_ctrl_state); 630 631 /* 632 * Returns true for sink states that can't ever transition back to live. 633 */ 634 static bool nvme_state_terminal(struct nvme_ctrl *ctrl) 635 { 636 switch (nvme_ctrl_state(ctrl)) { 637 case NVME_CTRL_NEW: 638 case NVME_CTRL_LIVE: 639 case NVME_CTRL_RESETTING: 640 case NVME_CTRL_CONNECTING: 641 return false; 642 case NVME_CTRL_DELETING: 643 case NVME_CTRL_DELETING_NOIO: 644 case NVME_CTRL_DEAD: 645 return true; 646 default: 647 WARN_ONCE(1, "Unhandled ctrl state:%d", ctrl->state); 648 return true; 649 } 650 } 651 652 /* 653 * Waits for the controller state to be resetting, or returns false if it is 654 * not possible to ever transition to that state. 655 */ 656 bool nvme_wait_reset(struct nvme_ctrl *ctrl) 657 { 658 wait_event(ctrl->state_wq, 659 nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING) || 660 nvme_state_terminal(ctrl)); 661 return nvme_ctrl_state(ctrl) == NVME_CTRL_RESETTING; 662 } 663 EXPORT_SYMBOL_GPL(nvme_wait_reset); 664 665 static void nvme_free_ns_head(struct kref *ref) 666 { 667 struct nvme_ns_head *head = 668 container_of(ref, struct nvme_ns_head, ref); 669 670 nvme_mpath_remove_disk(head); 671 ida_free(&head->subsys->ns_ida, head->instance); 672 cleanup_srcu_struct(&head->srcu); 673 nvme_put_subsystem(head->subsys); 674 kfree(head); 675 } 676 677 bool nvme_tryget_ns_head(struct nvme_ns_head *head) 678 { 679 return kref_get_unless_zero(&head->ref); 680 } 681 682 void nvme_put_ns_head(struct nvme_ns_head *head) 683 { 684 kref_put(&head->ref, nvme_free_ns_head); 685 } 686 687 static void nvme_free_ns(struct kref *kref) 688 { 689 struct nvme_ns *ns = container_of(kref, struct nvme_ns, kref); 690 691 put_disk(ns->disk); 692 nvme_put_ns_head(ns->head); 693 nvme_put_ctrl(ns->ctrl); 694 kfree(ns); 695 } 696 697 static inline bool nvme_get_ns(struct nvme_ns *ns) 698 { 699 return kref_get_unless_zero(&ns->kref); 700 } 701 702 void nvme_put_ns(struct nvme_ns *ns) 703 { 704 kref_put(&ns->kref, nvme_free_ns); 705 } 706 EXPORT_SYMBOL_NS_GPL(nvme_put_ns, NVME_TARGET_PASSTHRU); 707 708 static inline void nvme_clear_nvme_request(struct request *req) 709 { 710 nvme_req(req)->status = 0; 711 nvme_req(req)->retries = 0; 712 nvme_req(req)->flags = 0; 713 req->rq_flags |= RQF_DONTPREP; 714 } 715 716 /* initialize a passthrough request */ 717 void nvme_init_request(struct request *req, struct nvme_command *cmd) 718 { 719 struct nvme_request *nr = nvme_req(req); 720 bool logging_enabled; 721 722 if (req->q->queuedata) { 723 struct nvme_ns *ns = req->q->disk->private_data; 724 725 logging_enabled = ns->head->passthru_err_log_enabled; 726 req->timeout = NVME_IO_TIMEOUT; 727 } else { /* no queuedata implies admin queue */ 728 logging_enabled = nr->ctrl->passthru_err_log_enabled; 729 req->timeout = NVME_ADMIN_TIMEOUT; 730 } 731 732 if (!logging_enabled) 733 req->rq_flags |= RQF_QUIET; 734 735 /* passthru commands should let the driver set the SGL flags */ 736 cmd->common.flags &= ~NVME_CMD_SGL_ALL; 737 738 req->cmd_flags |= REQ_FAILFAST_DRIVER; 739 if (req->mq_hctx->type == HCTX_TYPE_POLL) 740 req->cmd_flags |= REQ_POLLED; 741 nvme_clear_nvme_request(req); 742 memcpy(nr->cmd, cmd, sizeof(*cmd)); 743 } 744 EXPORT_SYMBOL_GPL(nvme_init_request); 745 746 /* 747 * For something we're not in a state to send to the device the default action 748 * is to busy it and retry it after the controller state is recovered. However, 749 * if the controller is deleting or if anything is marked for failfast or 750 * nvme multipath it is immediately failed. 751 * 752 * Note: commands used to initialize the controller will be marked for failfast. 753 * Note: nvme cli/ioctl commands are marked for failfast. 754 */ 755 blk_status_t nvme_fail_nonready_command(struct nvme_ctrl *ctrl, 756 struct request *rq) 757 { 758 enum nvme_ctrl_state state = nvme_ctrl_state(ctrl); 759 760 if (state != NVME_CTRL_DELETING_NOIO && 761 state != NVME_CTRL_DELETING && 762 state != NVME_CTRL_DEAD && 763 !test_bit(NVME_CTRL_FAILFAST_EXPIRED, &ctrl->flags) && 764 !blk_noretry_request(rq) && !(rq->cmd_flags & REQ_NVME_MPATH)) 765 return BLK_STS_RESOURCE; 766 return nvme_host_path_error(rq); 767 } 768 EXPORT_SYMBOL_GPL(nvme_fail_nonready_command); 769 770 bool __nvme_check_ready(struct nvme_ctrl *ctrl, struct request *rq, 771 bool queue_live, enum nvme_ctrl_state state) 772 { 773 struct nvme_request *req = nvme_req(rq); 774 775 /* 776 * currently we have a problem sending passthru commands 777 * on the admin_q if the controller is not LIVE because we can't 778 * make sure that they are going out after the admin connect, 779 * controller enable and/or other commands in the initialization 780 * sequence. until the controller will be LIVE, fail with 781 * BLK_STS_RESOURCE so that they will be rescheduled. 782 */ 783 if (rq->q == ctrl->admin_q && (req->flags & NVME_REQ_USERCMD)) 784 return false; 785 786 if (ctrl->ops->flags & NVME_F_FABRICS) { 787 /* 788 * Only allow commands on a live queue, except for the connect 789 * command, which is require to set the queue live in the 790 * appropinquate states. 791 */ 792 switch (state) { 793 case NVME_CTRL_CONNECTING: 794 if (blk_rq_is_passthrough(rq) && nvme_is_fabrics(req->cmd) && 795 (req->cmd->fabrics.fctype == nvme_fabrics_type_connect || 796 req->cmd->fabrics.fctype == nvme_fabrics_type_auth_send || 797 req->cmd->fabrics.fctype == nvme_fabrics_type_auth_receive)) 798 return true; 799 break; 800 default: 801 break; 802 case NVME_CTRL_DEAD: 803 return false; 804 } 805 } 806 807 return queue_live; 808 } 809 EXPORT_SYMBOL_GPL(__nvme_check_ready); 810 811 static inline void nvme_setup_flush(struct nvme_ns *ns, 812 struct nvme_command *cmnd) 813 { 814 memset(cmnd, 0, sizeof(*cmnd)); 815 cmnd->common.opcode = nvme_cmd_flush; 816 cmnd->common.nsid = cpu_to_le32(ns->head->ns_id); 817 } 818 819 static blk_status_t nvme_setup_discard(struct nvme_ns *ns, struct request *req, 820 struct nvme_command *cmnd) 821 { 822 unsigned short segments = blk_rq_nr_discard_segments(req), n = 0; 823 struct nvme_dsm_range *range; 824 struct bio *bio; 825 826 /* 827 * Some devices do not consider the DSM 'Number of Ranges' field when 828 * determining how much data to DMA. Always allocate memory for maximum 829 * number of segments to prevent device reading beyond end of buffer. 830 */ 831 static const size_t alloc_size = sizeof(*range) * NVME_DSM_MAX_RANGES; 832 833 range = kzalloc(alloc_size, GFP_ATOMIC | __GFP_NOWARN); 834 if (!range) { 835 /* 836 * If we fail allocation our range, fallback to the controller 837 * discard page. If that's also busy, it's safe to return 838 * busy, as we know we can make progress once that's freed. 839 */ 840 if (test_and_set_bit_lock(0, &ns->ctrl->discard_page_busy)) 841 return BLK_STS_RESOURCE; 842 843 range = page_address(ns->ctrl->discard_page); 844 } 845 846 if (queue_max_discard_segments(req->q) == 1) { 847 u64 slba = nvme_sect_to_lba(ns->head, blk_rq_pos(req)); 848 u32 nlb = blk_rq_sectors(req) >> (ns->head->lba_shift - 9); 849 850 range[0].cattr = cpu_to_le32(0); 851 range[0].nlb = cpu_to_le32(nlb); 852 range[0].slba = cpu_to_le64(slba); 853 n = 1; 854 } else { 855 __rq_for_each_bio(bio, req) { 856 u64 slba = nvme_sect_to_lba(ns->head, 857 bio->bi_iter.bi_sector); 858 u32 nlb = bio->bi_iter.bi_size >> ns->head->lba_shift; 859 860 if (n < segments) { 861 range[n].cattr = cpu_to_le32(0); 862 range[n].nlb = cpu_to_le32(nlb); 863 range[n].slba = cpu_to_le64(slba); 864 } 865 n++; 866 } 867 } 868 869 if (WARN_ON_ONCE(n != segments)) { 870 if (virt_to_page(range) == ns->ctrl->discard_page) 871 clear_bit_unlock(0, &ns->ctrl->discard_page_busy); 872 else 873 kfree(range); 874 return BLK_STS_IOERR; 875 } 876 877 memset(cmnd, 0, sizeof(*cmnd)); 878 cmnd->dsm.opcode = nvme_cmd_dsm; 879 cmnd->dsm.nsid = cpu_to_le32(ns->head->ns_id); 880 cmnd->dsm.nr = cpu_to_le32(segments - 1); 881 cmnd->dsm.attributes = cpu_to_le32(NVME_DSMGMT_AD); 882 883 bvec_set_virt(&req->special_vec, range, alloc_size); 884 req->rq_flags |= RQF_SPECIAL_PAYLOAD; 885 886 return BLK_STS_OK; 887 } 888 889 static void nvme_set_ref_tag(struct nvme_ns *ns, struct nvme_command *cmnd, 890 struct request *req) 891 { 892 u32 upper, lower; 893 u64 ref48; 894 895 /* both rw and write zeroes share the same reftag format */ 896 switch (ns->head->guard_type) { 897 case NVME_NVM_NS_16B_GUARD: 898 cmnd->rw.reftag = cpu_to_le32(t10_pi_ref_tag(req)); 899 break; 900 case NVME_NVM_NS_64B_GUARD: 901 ref48 = ext_pi_ref_tag(req); 902 lower = lower_32_bits(ref48); 903 upper = upper_32_bits(ref48); 904 905 cmnd->rw.reftag = cpu_to_le32(lower); 906 cmnd->rw.cdw3 = cpu_to_le32(upper); 907 break; 908 default: 909 break; 910 } 911 } 912 913 static inline blk_status_t nvme_setup_write_zeroes(struct nvme_ns *ns, 914 struct request *req, struct nvme_command *cmnd) 915 { 916 memset(cmnd, 0, sizeof(*cmnd)); 917 918 if (ns->ctrl->quirks & NVME_QUIRK_DEALLOCATE_ZEROES) 919 return nvme_setup_discard(ns, req, cmnd); 920 921 cmnd->write_zeroes.opcode = nvme_cmd_write_zeroes; 922 cmnd->write_zeroes.nsid = cpu_to_le32(ns->head->ns_id); 923 cmnd->write_zeroes.slba = 924 cpu_to_le64(nvme_sect_to_lba(ns->head, blk_rq_pos(req))); 925 cmnd->write_zeroes.length = 926 cpu_to_le16((blk_rq_bytes(req) >> ns->head->lba_shift) - 1); 927 928 if (!(req->cmd_flags & REQ_NOUNMAP) && 929 (ns->head->features & NVME_NS_DEAC)) 930 cmnd->write_zeroes.control |= cpu_to_le16(NVME_WZ_DEAC); 931 932 if (nvme_ns_has_pi(ns->head)) { 933 cmnd->write_zeroes.control |= cpu_to_le16(NVME_RW_PRINFO_PRACT); 934 935 switch (ns->head->pi_type) { 936 case NVME_NS_DPS_PI_TYPE1: 937 case NVME_NS_DPS_PI_TYPE2: 938 nvme_set_ref_tag(ns, cmnd, req); 939 break; 940 } 941 } 942 943 return BLK_STS_OK; 944 } 945 946 static inline blk_status_t nvme_setup_rw(struct nvme_ns *ns, 947 struct request *req, struct nvme_command *cmnd, 948 enum nvme_opcode op) 949 { 950 u16 control = 0; 951 u32 dsmgmt = 0; 952 953 if (req->cmd_flags & REQ_FUA) 954 control |= NVME_RW_FUA; 955 if (req->cmd_flags & (REQ_FAILFAST_DEV | REQ_RAHEAD)) 956 control |= NVME_RW_LR; 957 958 if (req->cmd_flags & REQ_RAHEAD) 959 dsmgmt |= NVME_RW_DSM_FREQ_PREFETCH; 960 961 cmnd->rw.opcode = op; 962 cmnd->rw.flags = 0; 963 cmnd->rw.nsid = cpu_to_le32(ns->head->ns_id); 964 cmnd->rw.cdw2 = 0; 965 cmnd->rw.cdw3 = 0; 966 cmnd->rw.metadata = 0; 967 cmnd->rw.slba = 968 cpu_to_le64(nvme_sect_to_lba(ns->head, blk_rq_pos(req))); 969 cmnd->rw.length = 970 cpu_to_le16((blk_rq_bytes(req) >> ns->head->lba_shift) - 1); 971 cmnd->rw.reftag = 0; 972 cmnd->rw.apptag = 0; 973 cmnd->rw.appmask = 0; 974 975 if (ns->head->ms) { 976 /* 977 * If formated with metadata, the block layer always provides a 978 * metadata buffer if CONFIG_BLK_DEV_INTEGRITY is enabled. Else 979 * we enable the PRACT bit for protection information or set the 980 * namespace capacity to zero to prevent any I/O. 981 */ 982 if (!blk_integrity_rq(req)) { 983 if (WARN_ON_ONCE(!nvme_ns_has_pi(ns->head))) 984 return BLK_STS_NOTSUPP; 985 control |= NVME_RW_PRINFO_PRACT; 986 } 987 988 switch (ns->head->pi_type) { 989 case NVME_NS_DPS_PI_TYPE3: 990 control |= NVME_RW_PRINFO_PRCHK_GUARD; 991 break; 992 case NVME_NS_DPS_PI_TYPE1: 993 case NVME_NS_DPS_PI_TYPE2: 994 control |= NVME_RW_PRINFO_PRCHK_GUARD | 995 NVME_RW_PRINFO_PRCHK_REF; 996 if (op == nvme_cmd_zone_append) 997 control |= NVME_RW_APPEND_PIREMAP; 998 nvme_set_ref_tag(ns, cmnd, req); 999 break; 1000 } 1001 } 1002 1003 cmnd->rw.control = cpu_to_le16(control); 1004 cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt); 1005 return 0; 1006 } 1007 1008 void nvme_cleanup_cmd(struct request *req) 1009 { 1010 if (req->rq_flags & RQF_SPECIAL_PAYLOAD) { 1011 struct nvme_ctrl *ctrl = nvme_req(req)->ctrl; 1012 1013 if (req->special_vec.bv_page == ctrl->discard_page) 1014 clear_bit_unlock(0, &ctrl->discard_page_busy); 1015 else 1016 kfree(bvec_virt(&req->special_vec)); 1017 } 1018 } 1019 EXPORT_SYMBOL_GPL(nvme_cleanup_cmd); 1020 1021 blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req) 1022 { 1023 struct nvme_command *cmd = nvme_req(req)->cmd; 1024 blk_status_t ret = BLK_STS_OK; 1025 1026 if (!(req->rq_flags & RQF_DONTPREP)) 1027 nvme_clear_nvme_request(req); 1028 1029 switch (req_op(req)) { 1030 case REQ_OP_DRV_IN: 1031 case REQ_OP_DRV_OUT: 1032 /* these are setup prior to execution in nvme_init_request() */ 1033 break; 1034 case REQ_OP_FLUSH: 1035 nvme_setup_flush(ns, cmd); 1036 break; 1037 case REQ_OP_ZONE_RESET_ALL: 1038 case REQ_OP_ZONE_RESET: 1039 ret = nvme_setup_zone_mgmt_send(ns, req, cmd, NVME_ZONE_RESET); 1040 break; 1041 case REQ_OP_ZONE_OPEN: 1042 ret = nvme_setup_zone_mgmt_send(ns, req, cmd, NVME_ZONE_OPEN); 1043 break; 1044 case REQ_OP_ZONE_CLOSE: 1045 ret = nvme_setup_zone_mgmt_send(ns, req, cmd, NVME_ZONE_CLOSE); 1046 break; 1047 case REQ_OP_ZONE_FINISH: 1048 ret = nvme_setup_zone_mgmt_send(ns, req, cmd, NVME_ZONE_FINISH); 1049 break; 1050 case REQ_OP_WRITE_ZEROES: 1051 ret = nvme_setup_write_zeroes(ns, req, cmd); 1052 break; 1053 case REQ_OP_DISCARD: 1054 ret = nvme_setup_discard(ns, req, cmd); 1055 break; 1056 case REQ_OP_READ: 1057 ret = nvme_setup_rw(ns, req, cmd, nvme_cmd_read); 1058 break; 1059 case REQ_OP_WRITE: 1060 ret = nvme_setup_rw(ns, req, cmd, nvme_cmd_write); 1061 break; 1062 case REQ_OP_ZONE_APPEND: 1063 ret = nvme_setup_rw(ns, req, cmd, nvme_cmd_zone_append); 1064 break; 1065 default: 1066 WARN_ON_ONCE(1); 1067 return BLK_STS_IOERR; 1068 } 1069 1070 cmd->common.command_id = nvme_cid(req); 1071 trace_nvme_setup_cmd(req, cmd); 1072 return ret; 1073 } 1074 EXPORT_SYMBOL_GPL(nvme_setup_cmd); 1075 1076 /* 1077 * Return values: 1078 * 0: success 1079 * >0: nvme controller's cqe status response 1080 * <0: kernel error in lieu of controller response 1081 */ 1082 int nvme_execute_rq(struct request *rq, bool at_head) 1083 { 1084 blk_status_t status; 1085 1086 status = blk_execute_rq(rq, at_head); 1087 if (nvme_req(rq)->flags & NVME_REQ_CANCELLED) 1088 return -EINTR; 1089 if (nvme_req(rq)->status) 1090 return nvme_req(rq)->status; 1091 return blk_status_to_errno(status); 1092 } 1093 EXPORT_SYMBOL_NS_GPL(nvme_execute_rq, NVME_TARGET_PASSTHRU); 1094 1095 /* 1096 * Returns 0 on success. If the result is negative, it's a Linux error code; 1097 * if the result is positive, it's an NVM Express status code 1098 */ 1099 int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, 1100 union nvme_result *result, void *buffer, unsigned bufflen, 1101 int qid, nvme_submit_flags_t flags) 1102 { 1103 struct request *req; 1104 int ret; 1105 blk_mq_req_flags_t blk_flags = 0; 1106 1107 if (flags & NVME_SUBMIT_NOWAIT) 1108 blk_flags |= BLK_MQ_REQ_NOWAIT; 1109 if (flags & NVME_SUBMIT_RESERVED) 1110 blk_flags |= BLK_MQ_REQ_RESERVED; 1111 if (qid == NVME_QID_ANY) 1112 req = blk_mq_alloc_request(q, nvme_req_op(cmd), blk_flags); 1113 else 1114 req = blk_mq_alloc_request_hctx(q, nvme_req_op(cmd), blk_flags, 1115 qid - 1); 1116 1117 if (IS_ERR(req)) 1118 return PTR_ERR(req); 1119 nvme_init_request(req, cmd); 1120 if (flags & NVME_SUBMIT_RETRY) 1121 req->cmd_flags &= ~REQ_FAILFAST_DRIVER; 1122 1123 if (buffer && bufflen) { 1124 ret = blk_rq_map_kern(q, req, buffer, bufflen, GFP_KERNEL); 1125 if (ret) 1126 goto out; 1127 } 1128 1129 ret = nvme_execute_rq(req, flags & NVME_SUBMIT_AT_HEAD); 1130 if (result && ret >= 0) 1131 *result = nvme_req(req)->result; 1132 out: 1133 blk_mq_free_request(req); 1134 return ret; 1135 } 1136 EXPORT_SYMBOL_GPL(__nvme_submit_sync_cmd); 1137 1138 int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, 1139 void *buffer, unsigned bufflen) 1140 { 1141 return __nvme_submit_sync_cmd(q, cmd, NULL, buffer, bufflen, 1142 NVME_QID_ANY, 0); 1143 } 1144 EXPORT_SYMBOL_GPL(nvme_submit_sync_cmd); 1145 1146 u32 nvme_command_effects(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u8 opcode) 1147 { 1148 u32 effects = 0; 1149 1150 if (ns) { 1151 effects = le32_to_cpu(ns->head->effects->iocs[opcode]); 1152 if (effects & ~(NVME_CMD_EFFECTS_CSUPP | NVME_CMD_EFFECTS_LBCC)) 1153 dev_warn_once(ctrl->device, 1154 "IO command:%02x has unusual effects:%08x\n", 1155 opcode, effects); 1156 1157 /* 1158 * NVME_CMD_EFFECTS_CSE_MASK causes a freeze all I/O queues, 1159 * which would deadlock when done on an I/O command. Note that 1160 * We already warn about an unusual effect above. 1161 */ 1162 effects &= ~NVME_CMD_EFFECTS_CSE_MASK; 1163 } else { 1164 effects = le32_to_cpu(ctrl->effects->acs[opcode]); 1165 1166 /* Ignore execution restrictions if any relaxation bits are set */ 1167 if (effects & NVME_CMD_EFFECTS_CSER_MASK) 1168 effects &= ~NVME_CMD_EFFECTS_CSE_MASK; 1169 } 1170 1171 return effects; 1172 } 1173 EXPORT_SYMBOL_NS_GPL(nvme_command_effects, NVME_TARGET_PASSTHRU); 1174 1175 u32 nvme_passthru_start(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u8 opcode) 1176 { 1177 u32 effects = nvme_command_effects(ctrl, ns, opcode); 1178 1179 /* 1180 * For simplicity, IO to all namespaces is quiesced even if the command 1181 * effects say only one namespace is affected. 1182 */ 1183 if (effects & NVME_CMD_EFFECTS_CSE_MASK) { 1184 mutex_lock(&ctrl->scan_lock); 1185 mutex_lock(&ctrl->subsys->lock); 1186 nvme_mpath_start_freeze(ctrl->subsys); 1187 nvme_mpath_wait_freeze(ctrl->subsys); 1188 nvme_start_freeze(ctrl); 1189 nvme_wait_freeze(ctrl); 1190 } 1191 return effects; 1192 } 1193 EXPORT_SYMBOL_NS_GPL(nvme_passthru_start, NVME_TARGET_PASSTHRU); 1194 1195 void nvme_passthru_end(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u32 effects, 1196 struct nvme_command *cmd, int status) 1197 { 1198 if (effects & NVME_CMD_EFFECTS_CSE_MASK) { 1199 nvme_unfreeze(ctrl); 1200 nvme_mpath_unfreeze(ctrl->subsys); 1201 mutex_unlock(&ctrl->subsys->lock); 1202 mutex_unlock(&ctrl->scan_lock); 1203 } 1204 if (effects & NVME_CMD_EFFECTS_CCC) { 1205 if (!test_and_set_bit(NVME_CTRL_DIRTY_CAPABILITY, 1206 &ctrl->flags)) { 1207 dev_info(ctrl->device, 1208 "controller capabilities changed, reset may be required to take effect.\n"); 1209 } 1210 } 1211 if (effects & (NVME_CMD_EFFECTS_NIC | NVME_CMD_EFFECTS_NCC)) { 1212 nvme_queue_scan(ctrl); 1213 flush_work(&ctrl->scan_work); 1214 } 1215 if (ns) 1216 return; 1217 1218 switch (cmd->common.opcode) { 1219 case nvme_admin_set_features: 1220 switch (le32_to_cpu(cmd->common.cdw10) & 0xFF) { 1221 case NVME_FEAT_KATO: 1222 /* 1223 * Keep alive commands interval on the host should be 1224 * updated when KATO is modified by Set Features 1225 * commands. 1226 */ 1227 if (!status) 1228 nvme_update_keep_alive(ctrl, cmd); 1229 break; 1230 default: 1231 break; 1232 } 1233 break; 1234 default: 1235 break; 1236 } 1237 } 1238 EXPORT_SYMBOL_NS_GPL(nvme_passthru_end, NVME_TARGET_PASSTHRU); 1239 1240 /* 1241 * Recommended frequency for KATO commands per NVMe 1.4 section 7.12.1: 1242 * 1243 * The host should send Keep Alive commands at half of the Keep Alive Timeout 1244 * accounting for transport roundtrip times [..]. 1245 */ 1246 static unsigned long nvme_keep_alive_work_period(struct nvme_ctrl *ctrl) 1247 { 1248 unsigned long delay = ctrl->kato * HZ / 2; 1249 1250 /* 1251 * When using Traffic Based Keep Alive, we need to run 1252 * nvme_keep_alive_work at twice the normal frequency, as one 1253 * command completion can postpone sending a keep alive command 1254 * by up to twice the delay between runs. 1255 */ 1256 if (ctrl->ctratt & NVME_CTRL_ATTR_TBKAS) 1257 delay /= 2; 1258 return delay; 1259 } 1260 1261 static void nvme_queue_keep_alive_work(struct nvme_ctrl *ctrl) 1262 { 1263 unsigned long now = jiffies; 1264 unsigned long delay = nvme_keep_alive_work_period(ctrl); 1265 unsigned long ka_next_check_tm = ctrl->ka_last_check_time + delay; 1266 1267 if (time_after(now, ka_next_check_tm)) 1268 delay = 0; 1269 else 1270 delay = ka_next_check_tm - now; 1271 1272 queue_delayed_work(nvme_wq, &ctrl->ka_work, delay); 1273 } 1274 1275 static enum rq_end_io_ret nvme_keep_alive_end_io(struct request *rq, 1276 blk_status_t status) 1277 { 1278 struct nvme_ctrl *ctrl = rq->end_io_data; 1279 unsigned long flags; 1280 bool startka = false; 1281 unsigned long rtt = jiffies - (rq->deadline - rq->timeout); 1282 unsigned long delay = nvme_keep_alive_work_period(ctrl); 1283 1284 /* 1285 * Subtract off the keepalive RTT so nvme_keep_alive_work runs 1286 * at the desired frequency. 1287 */ 1288 if (rtt <= delay) { 1289 delay -= rtt; 1290 } else { 1291 dev_warn(ctrl->device, "long keepalive RTT (%u ms)\n", 1292 jiffies_to_msecs(rtt)); 1293 delay = 0; 1294 } 1295 1296 blk_mq_free_request(rq); 1297 1298 if (status) { 1299 dev_err(ctrl->device, 1300 "failed nvme_keep_alive_end_io error=%d\n", 1301 status); 1302 return RQ_END_IO_NONE; 1303 } 1304 1305 ctrl->ka_last_check_time = jiffies; 1306 ctrl->comp_seen = false; 1307 spin_lock_irqsave(&ctrl->lock, flags); 1308 if (ctrl->state == NVME_CTRL_LIVE || 1309 ctrl->state == NVME_CTRL_CONNECTING) 1310 startka = true; 1311 spin_unlock_irqrestore(&ctrl->lock, flags); 1312 if (startka) 1313 queue_delayed_work(nvme_wq, &ctrl->ka_work, delay); 1314 return RQ_END_IO_NONE; 1315 } 1316 1317 static void nvme_keep_alive_work(struct work_struct *work) 1318 { 1319 struct nvme_ctrl *ctrl = container_of(to_delayed_work(work), 1320 struct nvme_ctrl, ka_work); 1321 bool comp_seen = ctrl->comp_seen; 1322 struct request *rq; 1323 1324 ctrl->ka_last_check_time = jiffies; 1325 1326 if ((ctrl->ctratt & NVME_CTRL_ATTR_TBKAS) && comp_seen) { 1327 dev_dbg(ctrl->device, 1328 "reschedule traffic based keep-alive timer\n"); 1329 ctrl->comp_seen = false; 1330 nvme_queue_keep_alive_work(ctrl); 1331 return; 1332 } 1333 1334 rq = blk_mq_alloc_request(ctrl->admin_q, nvme_req_op(&ctrl->ka_cmd), 1335 BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT); 1336 if (IS_ERR(rq)) { 1337 /* allocation failure, reset the controller */ 1338 dev_err(ctrl->device, "keep-alive failed: %ld\n", PTR_ERR(rq)); 1339 nvme_reset_ctrl(ctrl); 1340 return; 1341 } 1342 nvme_init_request(rq, &ctrl->ka_cmd); 1343 1344 rq->timeout = ctrl->kato * HZ; 1345 rq->end_io = nvme_keep_alive_end_io; 1346 rq->end_io_data = ctrl; 1347 blk_execute_rq_nowait(rq, false); 1348 } 1349 1350 static void nvme_start_keep_alive(struct nvme_ctrl *ctrl) 1351 { 1352 if (unlikely(ctrl->kato == 0)) 1353 return; 1354 1355 nvme_queue_keep_alive_work(ctrl); 1356 } 1357 1358 void nvme_stop_keep_alive(struct nvme_ctrl *ctrl) 1359 { 1360 if (unlikely(ctrl->kato == 0)) 1361 return; 1362 1363 cancel_delayed_work_sync(&ctrl->ka_work); 1364 } 1365 EXPORT_SYMBOL_GPL(nvme_stop_keep_alive); 1366 1367 static void nvme_update_keep_alive(struct nvme_ctrl *ctrl, 1368 struct nvme_command *cmd) 1369 { 1370 unsigned int new_kato = 1371 DIV_ROUND_UP(le32_to_cpu(cmd->common.cdw11), 1000); 1372 1373 dev_info(ctrl->device, 1374 "keep alive interval updated from %u ms to %u ms\n", 1375 ctrl->kato * 1000 / 2, new_kato * 1000 / 2); 1376 1377 nvme_stop_keep_alive(ctrl); 1378 ctrl->kato = new_kato; 1379 nvme_start_keep_alive(ctrl); 1380 } 1381 1382 /* 1383 * In NVMe 1.0 the CNS field was just a binary controller or namespace 1384 * flag, thus sending any new CNS opcodes has a big chance of not working. 1385 * Qemu unfortunately had that bug after reporting a 1.1 version compliance 1386 * (but not for any later version). 1387 */ 1388 static bool nvme_ctrl_limited_cns(struct nvme_ctrl *ctrl) 1389 { 1390 if (ctrl->quirks & NVME_QUIRK_IDENTIFY_CNS) 1391 return ctrl->vs < NVME_VS(1, 2, 0); 1392 return ctrl->vs < NVME_VS(1, 1, 0); 1393 } 1394 1395 static int nvme_identify_ctrl(struct nvme_ctrl *dev, struct nvme_id_ctrl **id) 1396 { 1397 struct nvme_command c = { }; 1398 int error; 1399 1400 /* gcc-4.4.4 (at least) has issues with initializers and anon unions */ 1401 c.identify.opcode = nvme_admin_identify; 1402 c.identify.cns = NVME_ID_CNS_CTRL; 1403 1404 *id = kmalloc(sizeof(struct nvme_id_ctrl), GFP_KERNEL); 1405 if (!*id) 1406 return -ENOMEM; 1407 1408 error = nvme_submit_sync_cmd(dev->admin_q, &c, *id, 1409 sizeof(struct nvme_id_ctrl)); 1410 if (error) { 1411 kfree(*id); 1412 *id = NULL; 1413 } 1414 return error; 1415 } 1416 1417 static int nvme_process_ns_desc(struct nvme_ctrl *ctrl, struct nvme_ns_ids *ids, 1418 struct nvme_ns_id_desc *cur, bool *csi_seen) 1419 { 1420 const char *warn_str = "ctrl returned bogus length:"; 1421 void *data = cur; 1422 1423 switch (cur->nidt) { 1424 case NVME_NIDT_EUI64: 1425 if (cur->nidl != NVME_NIDT_EUI64_LEN) { 1426 dev_warn(ctrl->device, "%s %d for NVME_NIDT_EUI64\n", 1427 warn_str, cur->nidl); 1428 return -1; 1429 } 1430 if (ctrl->quirks & NVME_QUIRK_BOGUS_NID) 1431 return NVME_NIDT_EUI64_LEN; 1432 memcpy(ids->eui64, data + sizeof(*cur), NVME_NIDT_EUI64_LEN); 1433 return NVME_NIDT_EUI64_LEN; 1434 case NVME_NIDT_NGUID: 1435 if (cur->nidl != NVME_NIDT_NGUID_LEN) { 1436 dev_warn(ctrl->device, "%s %d for NVME_NIDT_NGUID\n", 1437 warn_str, cur->nidl); 1438 return -1; 1439 } 1440 if (ctrl->quirks & NVME_QUIRK_BOGUS_NID) 1441 return NVME_NIDT_NGUID_LEN; 1442 memcpy(ids->nguid, data + sizeof(*cur), NVME_NIDT_NGUID_LEN); 1443 return NVME_NIDT_NGUID_LEN; 1444 case NVME_NIDT_UUID: 1445 if (cur->nidl != NVME_NIDT_UUID_LEN) { 1446 dev_warn(ctrl->device, "%s %d for NVME_NIDT_UUID\n", 1447 warn_str, cur->nidl); 1448 return -1; 1449 } 1450 if (ctrl->quirks & NVME_QUIRK_BOGUS_NID) 1451 return NVME_NIDT_UUID_LEN; 1452 uuid_copy(&ids->uuid, data + sizeof(*cur)); 1453 return NVME_NIDT_UUID_LEN; 1454 case NVME_NIDT_CSI: 1455 if (cur->nidl != NVME_NIDT_CSI_LEN) { 1456 dev_warn(ctrl->device, "%s %d for NVME_NIDT_CSI\n", 1457 warn_str, cur->nidl); 1458 return -1; 1459 } 1460 memcpy(&ids->csi, data + sizeof(*cur), NVME_NIDT_CSI_LEN); 1461 *csi_seen = true; 1462 return NVME_NIDT_CSI_LEN; 1463 default: 1464 /* Skip unknown types */ 1465 return cur->nidl; 1466 } 1467 } 1468 1469 static int nvme_identify_ns_descs(struct nvme_ctrl *ctrl, 1470 struct nvme_ns_info *info) 1471 { 1472 struct nvme_command c = { }; 1473 bool csi_seen = false; 1474 int status, pos, len; 1475 void *data; 1476 1477 if (ctrl->vs < NVME_VS(1, 3, 0) && !nvme_multi_css(ctrl)) 1478 return 0; 1479 if (ctrl->quirks & NVME_QUIRK_NO_NS_DESC_LIST) 1480 return 0; 1481 1482 c.identify.opcode = nvme_admin_identify; 1483 c.identify.nsid = cpu_to_le32(info->nsid); 1484 c.identify.cns = NVME_ID_CNS_NS_DESC_LIST; 1485 1486 data = kzalloc(NVME_IDENTIFY_DATA_SIZE, GFP_KERNEL); 1487 if (!data) 1488 return -ENOMEM; 1489 1490 status = nvme_submit_sync_cmd(ctrl->admin_q, &c, data, 1491 NVME_IDENTIFY_DATA_SIZE); 1492 if (status) { 1493 dev_warn(ctrl->device, 1494 "Identify Descriptors failed (nsid=%u, status=0x%x)\n", 1495 info->nsid, status); 1496 goto free_data; 1497 } 1498 1499 for (pos = 0; pos < NVME_IDENTIFY_DATA_SIZE; pos += len) { 1500 struct nvme_ns_id_desc *cur = data + pos; 1501 1502 if (cur->nidl == 0) 1503 break; 1504 1505 len = nvme_process_ns_desc(ctrl, &info->ids, cur, &csi_seen); 1506 if (len < 0) 1507 break; 1508 1509 len += sizeof(*cur); 1510 } 1511 1512 if (nvme_multi_css(ctrl) && !csi_seen) { 1513 dev_warn(ctrl->device, "Command set not reported for nsid:%d\n", 1514 info->nsid); 1515 status = -EINVAL; 1516 } 1517 1518 free_data: 1519 kfree(data); 1520 return status; 1521 } 1522 1523 int nvme_identify_ns(struct nvme_ctrl *ctrl, unsigned nsid, 1524 struct nvme_id_ns **id) 1525 { 1526 struct nvme_command c = { }; 1527 int error; 1528 1529 /* gcc-4.4.4 (at least) has issues with initializers and anon unions */ 1530 c.identify.opcode = nvme_admin_identify; 1531 c.identify.nsid = cpu_to_le32(nsid); 1532 c.identify.cns = NVME_ID_CNS_NS; 1533 1534 *id = kmalloc(sizeof(**id), GFP_KERNEL); 1535 if (!*id) 1536 return -ENOMEM; 1537 1538 error = nvme_submit_sync_cmd(ctrl->admin_q, &c, *id, sizeof(**id)); 1539 if (error) { 1540 dev_warn(ctrl->device, "Identify namespace failed (%d)\n", error); 1541 kfree(*id); 1542 *id = NULL; 1543 } 1544 return error; 1545 } 1546 1547 static int nvme_ns_info_from_identify(struct nvme_ctrl *ctrl, 1548 struct nvme_ns_info *info) 1549 { 1550 struct nvme_ns_ids *ids = &info->ids; 1551 struct nvme_id_ns *id; 1552 int ret; 1553 1554 ret = nvme_identify_ns(ctrl, info->nsid, &id); 1555 if (ret) 1556 return ret; 1557 1558 if (id->ncap == 0) { 1559 /* namespace not allocated or attached */ 1560 info->is_removed = true; 1561 ret = -ENODEV; 1562 goto error; 1563 } 1564 1565 info->anagrpid = id->anagrpid; 1566 info->is_shared = id->nmic & NVME_NS_NMIC_SHARED; 1567 info->is_readonly = id->nsattr & NVME_NS_ATTR_RO; 1568 info->is_ready = true; 1569 if (ctrl->quirks & NVME_QUIRK_BOGUS_NID) { 1570 dev_info(ctrl->device, 1571 "Ignoring bogus Namespace Identifiers\n"); 1572 } else { 1573 if (ctrl->vs >= NVME_VS(1, 1, 0) && 1574 !memchr_inv(ids->eui64, 0, sizeof(ids->eui64))) 1575 memcpy(ids->eui64, id->eui64, sizeof(ids->eui64)); 1576 if (ctrl->vs >= NVME_VS(1, 2, 0) && 1577 !memchr_inv(ids->nguid, 0, sizeof(ids->nguid))) 1578 memcpy(ids->nguid, id->nguid, sizeof(ids->nguid)); 1579 } 1580 1581 error: 1582 kfree(id); 1583 return ret; 1584 } 1585 1586 static int nvme_ns_info_from_id_cs_indep(struct nvme_ctrl *ctrl, 1587 struct nvme_ns_info *info) 1588 { 1589 struct nvme_id_ns_cs_indep *id; 1590 struct nvme_command c = { 1591 .identify.opcode = nvme_admin_identify, 1592 .identify.nsid = cpu_to_le32(info->nsid), 1593 .identify.cns = NVME_ID_CNS_NS_CS_INDEP, 1594 }; 1595 int ret; 1596 1597 id = kmalloc(sizeof(*id), GFP_KERNEL); 1598 if (!id) 1599 return -ENOMEM; 1600 1601 ret = nvme_submit_sync_cmd(ctrl->admin_q, &c, id, sizeof(*id)); 1602 if (!ret) { 1603 info->anagrpid = id->anagrpid; 1604 info->is_shared = id->nmic & NVME_NS_NMIC_SHARED; 1605 info->is_readonly = id->nsattr & NVME_NS_ATTR_RO; 1606 info->is_ready = id->nstat & NVME_NSTAT_NRDY; 1607 } 1608 kfree(id); 1609 return ret; 1610 } 1611 1612 static int nvme_features(struct nvme_ctrl *dev, u8 op, unsigned int fid, 1613 unsigned int dword11, void *buffer, size_t buflen, u32 *result) 1614 { 1615 union nvme_result res = { 0 }; 1616 struct nvme_command c = { }; 1617 int ret; 1618 1619 c.features.opcode = op; 1620 c.features.fid = cpu_to_le32(fid); 1621 c.features.dword11 = cpu_to_le32(dword11); 1622 1623 ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &res, 1624 buffer, buflen, NVME_QID_ANY, 0); 1625 if (ret >= 0 && result) 1626 *result = le32_to_cpu(res.u32); 1627 return ret; 1628 } 1629 1630 int nvme_set_features(struct nvme_ctrl *dev, unsigned int fid, 1631 unsigned int dword11, void *buffer, size_t buflen, 1632 u32 *result) 1633 { 1634 return nvme_features(dev, nvme_admin_set_features, fid, dword11, buffer, 1635 buflen, result); 1636 } 1637 EXPORT_SYMBOL_GPL(nvme_set_features); 1638 1639 int nvme_get_features(struct nvme_ctrl *dev, unsigned int fid, 1640 unsigned int dword11, void *buffer, size_t buflen, 1641 u32 *result) 1642 { 1643 return nvme_features(dev, nvme_admin_get_features, fid, dword11, buffer, 1644 buflen, result); 1645 } 1646 EXPORT_SYMBOL_GPL(nvme_get_features); 1647 1648 int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count) 1649 { 1650 u32 q_count = (*count - 1) | ((*count - 1) << 16); 1651 u32 result; 1652 int status, nr_io_queues; 1653 1654 status = nvme_set_features(ctrl, NVME_FEAT_NUM_QUEUES, q_count, NULL, 0, 1655 &result); 1656 if (status < 0) 1657 return status; 1658 1659 /* 1660 * Degraded controllers might return an error when setting the queue 1661 * count. We still want to be able to bring them online and offer 1662 * access to the admin queue, as that might be only way to fix them up. 1663 */ 1664 if (status > 0) { 1665 dev_err(ctrl->device, "Could not set queue count (%d)\n", status); 1666 *count = 0; 1667 } else { 1668 nr_io_queues = min(result & 0xffff, result >> 16) + 1; 1669 *count = min(*count, nr_io_queues); 1670 } 1671 1672 return 0; 1673 } 1674 EXPORT_SYMBOL_GPL(nvme_set_queue_count); 1675 1676 #define NVME_AEN_SUPPORTED \ 1677 (NVME_AEN_CFG_NS_ATTR | NVME_AEN_CFG_FW_ACT | \ 1678 NVME_AEN_CFG_ANA_CHANGE | NVME_AEN_CFG_DISC_CHANGE) 1679 1680 static void nvme_enable_aen(struct nvme_ctrl *ctrl) 1681 { 1682 u32 result, supported_aens = ctrl->oaes & NVME_AEN_SUPPORTED; 1683 int status; 1684 1685 if (!supported_aens) 1686 return; 1687 1688 status = nvme_set_features(ctrl, NVME_FEAT_ASYNC_EVENT, supported_aens, 1689 NULL, 0, &result); 1690 if (status) 1691 dev_warn(ctrl->device, "Failed to configure AEN (cfg %x)\n", 1692 supported_aens); 1693 1694 queue_work(nvme_wq, &ctrl->async_event_work); 1695 } 1696 1697 static int nvme_ns_open(struct nvme_ns *ns) 1698 { 1699 1700 /* should never be called due to GENHD_FL_HIDDEN */ 1701 if (WARN_ON_ONCE(nvme_ns_head_multipath(ns->head))) 1702 goto fail; 1703 if (!nvme_get_ns(ns)) 1704 goto fail; 1705 if (!try_module_get(ns->ctrl->ops->module)) 1706 goto fail_put_ns; 1707 1708 return 0; 1709 1710 fail_put_ns: 1711 nvme_put_ns(ns); 1712 fail: 1713 return -ENXIO; 1714 } 1715 1716 static void nvme_ns_release(struct nvme_ns *ns) 1717 { 1718 1719 module_put(ns->ctrl->ops->module); 1720 nvme_put_ns(ns); 1721 } 1722 1723 static int nvme_open(struct gendisk *disk, blk_mode_t mode) 1724 { 1725 return nvme_ns_open(disk->private_data); 1726 } 1727 1728 static void nvme_release(struct gendisk *disk) 1729 { 1730 nvme_ns_release(disk->private_data); 1731 } 1732 1733 int nvme_getgeo(struct block_device *bdev, struct hd_geometry *geo) 1734 { 1735 /* some standard values */ 1736 geo->heads = 1 << 6; 1737 geo->sectors = 1 << 5; 1738 geo->cylinders = get_capacity(bdev->bd_disk) >> 11; 1739 return 0; 1740 } 1741 1742 static bool nvme_init_integrity(struct gendisk *disk, struct nvme_ns_head *head) 1743 { 1744 struct blk_integrity integrity = { }; 1745 1746 blk_integrity_unregister(disk); 1747 1748 if (!head->ms) 1749 return true; 1750 1751 /* 1752 * PI can always be supported as we can ask the controller to simply 1753 * insert/strip it, which is not possible for other kinds of metadata. 1754 */ 1755 if (!IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY) || 1756 !(head->features & NVME_NS_METADATA_SUPPORTED)) 1757 return nvme_ns_has_pi(head); 1758 1759 switch (head->pi_type) { 1760 case NVME_NS_DPS_PI_TYPE3: 1761 switch (head->guard_type) { 1762 case NVME_NVM_NS_16B_GUARD: 1763 integrity.profile = &t10_pi_type3_crc; 1764 integrity.tag_size = sizeof(u16) + sizeof(u32); 1765 integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE; 1766 break; 1767 case NVME_NVM_NS_64B_GUARD: 1768 integrity.profile = &ext_pi_type3_crc64; 1769 integrity.tag_size = sizeof(u16) + 6; 1770 integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE; 1771 break; 1772 default: 1773 integrity.profile = NULL; 1774 break; 1775 } 1776 break; 1777 case NVME_NS_DPS_PI_TYPE1: 1778 case NVME_NS_DPS_PI_TYPE2: 1779 switch (head->guard_type) { 1780 case NVME_NVM_NS_16B_GUARD: 1781 integrity.profile = &t10_pi_type1_crc; 1782 integrity.tag_size = sizeof(u16); 1783 integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE; 1784 break; 1785 case NVME_NVM_NS_64B_GUARD: 1786 integrity.profile = &ext_pi_type1_crc64; 1787 integrity.tag_size = sizeof(u16); 1788 integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE; 1789 break; 1790 default: 1791 integrity.profile = NULL; 1792 break; 1793 } 1794 break; 1795 default: 1796 integrity.profile = NULL; 1797 break; 1798 } 1799 1800 integrity.tuple_size = head->ms; 1801 integrity.pi_offset = head->pi_offset; 1802 blk_integrity_register(disk, &integrity); 1803 return true; 1804 } 1805 1806 static void nvme_config_discard(struct nvme_ns *ns, struct queue_limits *lim) 1807 { 1808 struct nvme_ctrl *ctrl = ns->ctrl; 1809 1810 BUILD_BUG_ON(PAGE_SIZE / sizeof(struct nvme_dsm_range) < 1811 NVME_DSM_MAX_RANGES); 1812 1813 if (ctrl->dmrsl && ctrl->dmrsl <= nvme_sect_to_lba(ns->head, UINT_MAX)) 1814 lim->max_hw_discard_sectors = 1815 nvme_lba_to_sect(ns->head, ctrl->dmrsl); 1816 else if (ctrl->oncs & NVME_CTRL_ONCS_DSM) 1817 lim->max_hw_discard_sectors = UINT_MAX; 1818 else 1819 lim->max_hw_discard_sectors = 0; 1820 1821 lim->discard_granularity = lim->logical_block_size; 1822 1823 if (ctrl->dmrl) 1824 lim->max_discard_segments = ctrl->dmrl; 1825 else 1826 lim->max_discard_segments = NVME_DSM_MAX_RANGES; 1827 } 1828 1829 static bool nvme_ns_ids_equal(struct nvme_ns_ids *a, struct nvme_ns_ids *b) 1830 { 1831 return uuid_equal(&a->uuid, &b->uuid) && 1832 memcmp(&a->nguid, &b->nguid, sizeof(a->nguid)) == 0 && 1833 memcmp(&a->eui64, &b->eui64, sizeof(a->eui64)) == 0 && 1834 a->csi == b->csi; 1835 } 1836 1837 static int nvme_identify_ns_nvm(struct nvme_ctrl *ctrl, unsigned int nsid, 1838 struct nvme_id_ns_nvm **nvmp) 1839 { 1840 struct nvme_command c = { 1841 .identify.opcode = nvme_admin_identify, 1842 .identify.nsid = cpu_to_le32(nsid), 1843 .identify.cns = NVME_ID_CNS_CS_NS, 1844 .identify.csi = NVME_CSI_NVM, 1845 }; 1846 struct nvme_id_ns_nvm *nvm; 1847 int ret; 1848 1849 nvm = kzalloc(sizeof(*nvm), GFP_KERNEL); 1850 if (!nvm) 1851 return -ENOMEM; 1852 1853 ret = nvme_submit_sync_cmd(ctrl->admin_q, &c, nvm, sizeof(*nvm)); 1854 if (ret) 1855 kfree(nvm); 1856 else 1857 *nvmp = nvm; 1858 return ret; 1859 } 1860 1861 static void nvme_configure_pi_elbas(struct nvme_ns_head *head, 1862 struct nvme_id_ns *id, struct nvme_id_ns_nvm *nvm) 1863 { 1864 u32 elbaf = le32_to_cpu(nvm->elbaf[nvme_lbaf_index(id->flbas)]); 1865 1866 /* no support for storage tag formats right now */ 1867 if (nvme_elbaf_sts(elbaf)) 1868 return; 1869 1870 head->guard_type = nvme_elbaf_guard_type(elbaf); 1871 switch (head->guard_type) { 1872 case NVME_NVM_NS_64B_GUARD: 1873 head->pi_size = sizeof(struct crc64_pi_tuple); 1874 break; 1875 case NVME_NVM_NS_16B_GUARD: 1876 head->pi_size = sizeof(struct t10_pi_tuple); 1877 break; 1878 default: 1879 break; 1880 } 1881 } 1882 1883 static void nvme_configure_metadata(struct nvme_ctrl *ctrl, 1884 struct nvme_ns_head *head, struct nvme_id_ns *id, 1885 struct nvme_id_ns_nvm *nvm) 1886 { 1887 head->features &= ~(NVME_NS_METADATA_SUPPORTED | NVME_NS_EXT_LBAS); 1888 head->pi_type = 0; 1889 head->pi_size = 0; 1890 head->pi_offset = 0; 1891 head->ms = le16_to_cpu(id->lbaf[nvme_lbaf_index(id->flbas)].ms); 1892 if (!head->ms || !(ctrl->ops->flags & NVME_F_METADATA_SUPPORTED)) 1893 return; 1894 1895 if (nvm && (ctrl->ctratt & NVME_CTRL_ATTR_ELBAS)) { 1896 nvme_configure_pi_elbas(head, id, nvm); 1897 } else { 1898 head->pi_size = sizeof(struct t10_pi_tuple); 1899 head->guard_type = NVME_NVM_NS_16B_GUARD; 1900 } 1901 1902 if (head->pi_size && head->ms >= head->pi_size) 1903 head->pi_type = id->dps & NVME_NS_DPS_PI_MASK; 1904 if (!(id->dps & NVME_NS_DPS_PI_FIRST)) 1905 head->pi_offset = head->ms - head->pi_size; 1906 1907 if (ctrl->ops->flags & NVME_F_FABRICS) { 1908 /* 1909 * The NVMe over Fabrics specification only supports metadata as 1910 * part of the extended data LBA. We rely on HCA/HBA support to 1911 * remap the separate metadata buffer from the block layer. 1912 */ 1913 if (WARN_ON_ONCE(!(id->flbas & NVME_NS_FLBAS_META_EXT))) 1914 return; 1915 1916 head->features |= NVME_NS_EXT_LBAS; 1917 1918 /* 1919 * The current fabrics transport drivers support namespace 1920 * metadata formats only if nvme_ns_has_pi() returns true. 1921 * Suppress support for all other formats so the namespace will 1922 * have a 0 capacity and not be usable through the block stack. 1923 * 1924 * Note, this check will need to be modified if any drivers 1925 * gain the ability to use other metadata formats. 1926 */ 1927 if (ctrl->max_integrity_segments && nvme_ns_has_pi(head)) 1928 head->features |= NVME_NS_METADATA_SUPPORTED; 1929 } else { 1930 /* 1931 * For PCIe controllers, we can't easily remap the separate 1932 * metadata buffer from the block layer and thus require a 1933 * separate metadata buffer for block layer metadata/PI support. 1934 * We allow extended LBAs for the passthrough interface, though. 1935 */ 1936 if (id->flbas & NVME_NS_FLBAS_META_EXT) 1937 head->features |= NVME_NS_EXT_LBAS; 1938 else 1939 head->features |= NVME_NS_METADATA_SUPPORTED; 1940 } 1941 } 1942 1943 static u32 nvme_max_drv_segments(struct nvme_ctrl *ctrl) 1944 { 1945 return ctrl->max_hw_sectors / (NVME_CTRL_PAGE_SIZE >> SECTOR_SHIFT) + 1; 1946 } 1947 1948 static void nvme_set_ctrl_limits(struct nvme_ctrl *ctrl, 1949 struct queue_limits *lim) 1950 { 1951 lim->max_hw_sectors = ctrl->max_hw_sectors; 1952 lim->max_segments = min_t(u32, USHRT_MAX, 1953 min_not_zero(nvme_max_drv_segments(ctrl), ctrl->max_segments)); 1954 lim->max_integrity_segments = ctrl->max_integrity_segments; 1955 lim->virt_boundary_mask = NVME_CTRL_PAGE_SIZE - 1; 1956 lim->max_segment_size = UINT_MAX; 1957 lim->dma_alignment = 3; 1958 } 1959 1960 static bool nvme_update_disk_info(struct nvme_ns *ns, struct nvme_id_ns *id, 1961 struct queue_limits *lim) 1962 { 1963 struct nvme_ns_head *head = ns->head; 1964 u32 bs = 1U << head->lba_shift; 1965 u32 atomic_bs, phys_bs, io_opt = 0; 1966 bool valid = true; 1967 1968 /* 1969 * The block layer can't support LBA sizes larger than the page size 1970 * or smaller than a sector size yet, so catch this early and don't 1971 * allow block I/O. 1972 */ 1973 if (head->lba_shift > PAGE_SHIFT || head->lba_shift < SECTOR_SHIFT) { 1974 bs = (1 << 9); 1975 valid = false; 1976 } 1977 1978 atomic_bs = phys_bs = bs; 1979 if (id->nabo == 0) { 1980 /* 1981 * Bit 1 indicates whether NAWUPF is defined for this namespace 1982 * and whether it should be used instead of AWUPF. If NAWUPF == 1983 * 0 then AWUPF must be used instead. 1984 */ 1985 if (id->nsfeat & NVME_NS_FEAT_ATOMICS && id->nawupf) 1986 atomic_bs = (1 + le16_to_cpu(id->nawupf)) * bs; 1987 else 1988 atomic_bs = (1 + ns->ctrl->subsys->awupf) * bs; 1989 } 1990 1991 if (id->nsfeat & NVME_NS_FEAT_IO_OPT) { 1992 /* NPWG = Namespace Preferred Write Granularity */ 1993 phys_bs = bs * (1 + le16_to_cpu(id->npwg)); 1994 /* NOWS = Namespace Optimal Write Size */ 1995 io_opt = bs * (1 + le16_to_cpu(id->nows)); 1996 } 1997 1998 /* 1999 * Linux filesystems assume writing a single physical block is 2000 * an atomic operation. Hence limit the physical block size to the 2001 * value of the Atomic Write Unit Power Fail parameter. 2002 */ 2003 lim->logical_block_size = bs; 2004 lim->physical_block_size = min(phys_bs, atomic_bs); 2005 lim->io_min = phys_bs; 2006 lim->io_opt = io_opt; 2007 if (ns->ctrl->quirks & NVME_QUIRK_DEALLOCATE_ZEROES) 2008 lim->max_write_zeroes_sectors = UINT_MAX; 2009 else 2010 lim->max_write_zeroes_sectors = ns->ctrl->max_zeroes_sectors; 2011 return valid; 2012 } 2013 2014 static bool nvme_ns_is_readonly(struct nvme_ns *ns, struct nvme_ns_info *info) 2015 { 2016 return info->is_readonly || test_bit(NVME_NS_FORCE_RO, &ns->flags); 2017 } 2018 2019 static inline bool nvme_first_scan(struct gendisk *disk) 2020 { 2021 /* nvme_alloc_ns() scans the disk prior to adding it */ 2022 return !disk_live(disk); 2023 } 2024 2025 static void nvme_set_chunk_sectors(struct nvme_ns *ns, struct nvme_id_ns *id, 2026 struct queue_limits *lim) 2027 { 2028 struct nvme_ctrl *ctrl = ns->ctrl; 2029 u32 iob; 2030 2031 if ((ctrl->quirks & NVME_QUIRK_STRIPE_SIZE) && 2032 is_power_of_2(ctrl->max_hw_sectors)) 2033 iob = ctrl->max_hw_sectors; 2034 else 2035 iob = nvme_lba_to_sect(ns->head, le16_to_cpu(id->noiob)); 2036 2037 if (!iob) 2038 return; 2039 2040 if (!is_power_of_2(iob)) { 2041 if (nvme_first_scan(ns->disk)) 2042 pr_warn("%s: ignoring unaligned IO boundary:%u\n", 2043 ns->disk->disk_name, iob); 2044 return; 2045 } 2046 2047 if (blk_queue_is_zoned(ns->disk->queue)) { 2048 if (nvme_first_scan(ns->disk)) 2049 pr_warn("%s: ignoring zoned namespace IO boundary\n", 2050 ns->disk->disk_name); 2051 return; 2052 } 2053 2054 lim->chunk_sectors = iob; 2055 } 2056 2057 static int nvme_update_ns_info_generic(struct nvme_ns *ns, 2058 struct nvme_ns_info *info) 2059 { 2060 struct queue_limits lim; 2061 int ret; 2062 2063 blk_mq_freeze_queue(ns->disk->queue); 2064 lim = queue_limits_start_update(ns->disk->queue); 2065 nvme_set_ctrl_limits(ns->ctrl, &lim); 2066 ret = queue_limits_commit_update(ns->disk->queue, &lim); 2067 set_disk_ro(ns->disk, nvme_ns_is_readonly(ns, info)); 2068 blk_mq_unfreeze_queue(ns->disk->queue); 2069 2070 /* Hide the block-interface for these devices */ 2071 if (!ret) 2072 ret = -ENODEV; 2073 return ret; 2074 } 2075 2076 static int nvme_update_ns_info_block(struct nvme_ns *ns, 2077 struct nvme_ns_info *info) 2078 { 2079 bool vwc = ns->ctrl->vwc & NVME_CTRL_VWC_PRESENT; 2080 struct queue_limits lim; 2081 struct nvme_id_ns_nvm *nvm = NULL; 2082 struct nvme_id_ns *id; 2083 sector_t capacity; 2084 unsigned lbaf; 2085 int ret; 2086 2087 ret = nvme_identify_ns(ns->ctrl, info->nsid, &id); 2088 if (ret) 2089 return ret; 2090 2091 if (id->ncap == 0) { 2092 /* namespace not allocated or attached */ 2093 info->is_removed = true; 2094 ret = -ENODEV; 2095 goto out; 2096 } 2097 2098 if (ns->ctrl->ctratt & NVME_CTRL_ATTR_ELBAS) { 2099 ret = nvme_identify_ns_nvm(ns->ctrl, info->nsid, &nvm); 2100 if (ret < 0) 2101 goto out; 2102 } 2103 2104 blk_mq_freeze_queue(ns->disk->queue); 2105 lbaf = nvme_lbaf_index(id->flbas); 2106 ns->head->lba_shift = id->lbaf[lbaf].ds; 2107 ns->head->nuse = le64_to_cpu(id->nuse); 2108 capacity = nvme_lba_to_sect(ns->head, le64_to_cpu(id->nsze)); 2109 2110 lim = queue_limits_start_update(ns->disk->queue); 2111 nvme_set_ctrl_limits(ns->ctrl, &lim); 2112 nvme_configure_metadata(ns->ctrl, ns->head, id, nvm); 2113 nvme_set_chunk_sectors(ns, id, &lim); 2114 if (!nvme_update_disk_info(ns, id, &lim)) 2115 capacity = 0; 2116 nvme_config_discard(ns, &lim); 2117 if (IS_ENABLED(CONFIG_BLK_DEV_ZONED) && 2118 ns->head->ids.csi == NVME_CSI_ZNS) { 2119 ret = nvme_update_zone_info(ns, lbaf, &lim); 2120 if (ret) { 2121 blk_mq_unfreeze_queue(ns->disk->queue); 2122 goto out; 2123 } 2124 } 2125 ret = queue_limits_commit_update(ns->disk->queue, &lim); 2126 if (ret) { 2127 blk_mq_unfreeze_queue(ns->disk->queue); 2128 goto out; 2129 } 2130 2131 /* 2132 * Register a metadata profile for PI, or the plain non-integrity NVMe 2133 * metadata masquerading as Type 0 if supported, otherwise reject block 2134 * I/O to namespaces with metadata except when the namespace supports 2135 * PI, as it can strip/insert in that case. 2136 */ 2137 if (!nvme_init_integrity(ns->disk, ns->head)) 2138 capacity = 0; 2139 2140 set_capacity_and_notify(ns->disk, capacity); 2141 2142 /* 2143 * Only set the DEAC bit if the device guarantees that reads from 2144 * deallocated data return zeroes. While the DEAC bit does not 2145 * require that, it must be a no-op if reads from deallocated data 2146 * do not return zeroes. 2147 */ 2148 if ((id->dlfeat & 0x7) == 0x1 && (id->dlfeat & (1 << 3))) 2149 ns->head->features |= NVME_NS_DEAC; 2150 set_disk_ro(ns->disk, nvme_ns_is_readonly(ns, info)); 2151 blk_queue_write_cache(ns->disk->queue, vwc, vwc); 2152 set_bit(NVME_NS_READY, &ns->flags); 2153 blk_mq_unfreeze_queue(ns->disk->queue); 2154 2155 if (blk_queue_is_zoned(ns->queue)) { 2156 ret = blk_revalidate_disk_zones(ns->disk, NULL); 2157 if (ret && !nvme_first_scan(ns->disk)) 2158 goto out; 2159 } 2160 2161 ret = 0; 2162 out: 2163 kfree(nvm); 2164 kfree(id); 2165 return ret; 2166 } 2167 2168 static int nvme_update_ns_info(struct nvme_ns *ns, struct nvme_ns_info *info) 2169 { 2170 bool unsupported = false; 2171 int ret; 2172 2173 switch (info->ids.csi) { 2174 case NVME_CSI_ZNS: 2175 if (!IS_ENABLED(CONFIG_BLK_DEV_ZONED)) { 2176 dev_info(ns->ctrl->device, 2177 "block device for nsid %u not supported without CONFIG_BLK_DEV_ZONED\n", 2178 info->nsid); 2179 ret = nvme_update_ns_info_generic(ns, info); 2180 break; 2181 } 2182 ret = nvme_update_ns_info_block(ns, info); 2183 break; 2184 case NVME_CSI_NVM: 2185 ret = nvme_update_ns_info_block(ns, info); 2186 break; 2187 default: 2188 dev_info(ns->ctrl->device, 2189 "block device for nsid %u not supported (csi %u)\n", 2190 info->nsid, info->ids.csi); 2191 ret = nvme_update_ns_info_generic(ns, info); 2192 break; 2193 } 2194 2195 /* 2196 * If probing fails due an unsupported feature, hide the block device, 2197 * but still allow other access. 2198 */ 2199 if (ret == -ENODEV) { 2200 ns->disk->flags |= GENHD_FL_HIDDEN; 2201 set_bit(NVME_NS_READY, &ns->flags); 2202 unsupported = true; 2203 ret = 0; 2204 } 2205 2206 if (!ret && nvme_ns_head_multipath(ns->head)) { 2207 struct queue_limits lim; 2208 2209 blk_mq_freeze_queue(ns->head->disk->queue); 2210 if (unsupported) 2211 ns->head->disk->flags |= GENHD_FL_HIDDEN; 2212 else 2213 nvme_init_integrity(ns->head->disk, ns->head); 2214 set_capacity_and_notify(ns->head->disk, get_capacity(ns->disk)); 2215 set_disk_ro(ns->head->disk, nvme_ns_is_readonly(ns, info)); 2216 nvme_mpath_revalidate_paths(ns); 2217 2218 lim = queue_limits_start_update(ns->head->disk->queue); 2219 queue_limits_stack_bdev(&lim, ns->disk->part0, 0, 2220 ns->head->disk->disk_name); 2221 ret = queue_limits_commit_update(ns->head->disk->queue, &lim); 2222 blk_mq_unfreeze_queue(ns->head->disk->queue); 2223 } 2224 2225 return ret; 2226 } 2227 2228 #ifdef CONFIG_BLK_SED_OPAL 2229 static int nvme_sec_submit(void *data, u16 spsp, u8 secp, void *buffer, size_t len, 2230 bool send) 2231 { 2232 struct nvme_ctrl *ctrl = data; 2233 struct nvme_command cmd = { }; 2234 2235 if (send) 2236 cmd.common.opcode = nvme_admin_security_send; 2237 else 2238 cmd.common.opcode = nvme_admin_security_recv; 2239 cmd.common.nsid = 0; 2240 cmd.common.cdw10 = cpu_to_le32(((u32)secp) << 24 | ((u32)spsp) << 8); 2241 cmd.common.cdw11 = cpu_to_le32(len); 2242 2243 return __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, NULL, buffer, len, 2244 NVME_QID_ANY, NVME_SUBMIT_AT_HEAD); 2245 } 2246 2247 static void nvme_configure_opal(struct nvme_ctrl *ctrl, bool was_suspended) 2248 { 2249 if (ctrl->oacs & NVME_CTRL_OACS_SEC_SUPP) { 2250 if (!ctrl->opal_dev) 2251 ctrl->opal_dev = init_opal_dev(ctrl, &nvme_sec_submit); 2252 else if (was_suspended) 2253 opal_unlock_from_suspend(ctrl->opal_dev); 2254 } else { 2255 free_opal_dev(ctrl->opal_dev); 2256 ctrl->opal_dev = NULL; 2257 } 2258 } 2259 #else 2260 static void nvme_configure_opal(struct nvme_ctrl *ctrl, bool was_suspended) 2261 { 2262 } 2263 #endif /* CONFIG_BLK_SED_OPAL */ 2264 2265 #ifdef CONFIG_BLK_DEV_ZONED 2266 static int nvme_report_zones(struct gendisk *disk, sector_t sector, 2267 unsigned int nr_zones, report_zones_cb cb, void *data) 2268 { 2269 return nvme_ns_report_zones(disk->private_data, sector, nr_zones, cb, 2270 data); 2271 } 2272 #else 2273 #define nvme_report_zones NULL 2274 #endif /* CONFIG_BLK_DEV_ZONED */ 2275 2276 const struct block_device_operations nvme_bdev_ops = { 2277 .owner = THIS_MODULE, 2278 .ioctl = nvme_ioctl, 2279 .compat_ioctl = blkdev_compat_ptr_ioctl, 2280 .open = nvme_open, 2281 .release = nvme_release, 2282 .getgeo = nvme_getgeo, 2283 .report_zones = nvme_report_zones, 2284 .pr_ops = &nvme_pr_ops, 2285 }; 2286 2287 static int nvme_wait_ready(struct nvme_ctrl *ctrl, u32 mask, u32 val, 2288 u32 timeout, const char *op) 2289 { 2290 unsigned long timeout_jiffies = jiffies + timeout * HZ; 2291 u32 csts; 2292 int ret; 2293 2294 while ((ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &csts)) == 0) { 2295 if (csts == ~0) 2296 return -ENODEV; 2297 if ((csts & mask) == val) 2298 break; 2299 2300 usleep_range(1000, 2000); 2301 if (fatal_signal_pending(current)) 2302 return -EINTR; 2303 if (time_after(jiffies, timeout_jiffies)) { 2304 dev_err(ctrl->device, 2305 "Device not ready; aborting %s, CSTS=0x%x\n", 2306 op, csts); 2307 return -ENODEV; 2308 } 2309 } 2310 2311 return ret; 2312 } 2313 2314 int nvme_disable_ctrl(struct nvme_ctrl *ctrl, bool shutdown) 2315 { 2316 int ret; 2317 2318 ctrl->ctrl_config &= ~NVME_CC_SHN_MASK; 2319 if (shutdown) 2320 ctrl->ctrl_config |= NVME_CC_SHN_NORMAL; 2321 else 2322 ctrl->ctrl_config &= ~NVME_CC_ENABLE; 2323 2324 ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config); 2325 if (ret) 2326 return ret; 2327 2328 if (shutdown) { 2329 return nvme_wait_ready(ctrl, NVME_CSTS_SHST_MASK, 2330 NVME_CSTS_SHST_CMPLT, 2331 ctrl->shutdown_timeout, "shutdown"); 2332 } 2333 if (ctrl->quirks & NVME_QUIRK_DELAY_BEFORE_CHK_RDY) 2334 msleep(NVME_QUIRK_DELAY_AMOUNT); 2335 return nvme_wait_ready(ctrl, NVME_CSTS_RDY, 0, 2336 (NVME_CAP_TIMEOUT(ctrl->cap) + 1) / 2, "reset"); 2337 } 2338 EXPORT_SYMBOL_GPL(nvme_disable_ctrl); 2339 2340 int nvme_enable_ctrl(struct nvme_ctrl *ctrl) 2341 { 2342 unsigned dev_page_min; 2343 u32 timeout; 2344 int ret; 2345 2346 ret = ctrl->ops->reg_read64(ctrl, NVME_REG_CAP, &ctrl->cap); 2347 if (ret) { 2348 dev_err(ctrl->device, "Reading CAP failed (%d)\n", ret); 2349 return ret; 2350 } 2351 dev_page_min = NVME_CAP_MPSMIN(ctrl->cap) + 12; 2352 2353 if (NVME_CTRL_PAGE_SHIFT < dev_page_min) { 2354 dev_err(ctrl->device, 2355 "Minimum device page size %u too large for host (%u)\n", 2356 1 << dev_page_min, 1 << NVME_CTRL_PAGE_SHIFT); 2357 return -ENODEV; 2358 } 2359 2360 if (NVME_CAP_CSS(ctrl->cap) & NVME_CAP_CSS_CSI) 2361 ctrl->ctrl_config = NVME_CC_CSS_CSI; 2362 else 2363 ctrl->ctrl_config = NVME_CC_CSS_NVM; 2364 2365 if (ctrl->cap & NVME_CAP_CRMS_CRWMS && ctrl->cap & NVME_CAP_CRMS_CRIMS) 2366 ctrl->ctrl_config |= NVME_CC_CRIME; 2367 2368 ctrl->ctrl_config |= (NVME_CTRL_PAGE_SHIFT - 12) << NVME_CC_MPS_SHIFT; 2369 ctrl->ctrl_config |= NVME_CC_AMS_RR | NVME_CC_SHN_NONE; 2370 ctrl->ctrl_config |= NVME_CC_IOSQES | NVME_CC_IOCQES; 2371 ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config); 2372 if (ret) 2373 return ret; 2374 2375 /* Flush write to device (required if transport is PCI) */ 2376 ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CC, &ctrl->ctrl_config); 2377 if (ret) 2378 return ret; 2379 2380 /* CAP value may change after initial CC write */ 2381 ret = ctrl->ops->reg_read64(ctrl, NVME_REG_CAP, &ctrl->cap); 2382 if (ret) 2383 return ret; 2384 2385 timeout = NVME_CAP_TIMEOUT(ctrl->cap); 2386 if (ctrl->cap & NVME_CAP_CRMS_CRWMS) { 2387 u32 crto, ready_timeout; 2388 2389 ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CRTO, &crto); 2390 if (ret) { 2391 dev_err(ctrl->device, "Reading CRTO failed (%d)\n", 2392 ret); 2393 return ret; 2394 } 2395 2396 /* 2397 * CRTO should always be greater or equal to CAP.TO, but some 2398 * devices are known to get this wrong. Use the larger of the 2399 * two values. 2400 */ 2401 if (ctrl->ctrl_config & NVME_CC_CRIME) 2402 ready_timeout = NVME_CRTO_CRIMT(crto); 2403 else 2404 ready_timeout = NVME_CRTO_CRWMT(crto); 2405 2406 if (ready_timeout < timeout) 2407 dev_warn_once(ctrl->device, "bad crto:%x cap:%llx\n", 2408 crto, ctrl->cap); 2409 else 2410 timeout = ready_timeout; 2411 } 2412 2413 ctrl->ctrl_config |= NVME_CC_ENABLE; 2414 ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config); 2415 if (ret) 2416 return ret; 2417 return nvme_wait_ready(ctrl, NVME_CSTS_RDY, NVME_CSTS_RDY, 2418 (timeout + 1) / 2, "initialisation"); 2419 } 2420 EXPORT_SYMBOL_GPL(nvme_enable_ctrl); 2421 2422 static int nvme_configure_timestamp(struct nvme_ctrl *ctrl) 2423 { 2424 __le64 ts; 2425 int ret; 2426 2427 if (!(ctrl->oncs & NVME_CTRL_ONCS_TIMESTAMP)) 2428 return 0; 2429 2430 ts = cpu_to_le64(ktime_to_ms(ktime_get_real())); 2431 ret = nvme_set_features(ctrl, NVME_FEAT_TIMESTAMP, 0, &ts, sizeof(ts), 2432 NULL); 2433 if (ret) 2434 dev_warn_once(ctrl->device, 2435 "could not set timestamp (%d)\n", ret); 2436 return ret; 2437 } 2438 2439 static int nvme_configure_host_options(struct nvme_ctrl *ctrl) 2440 { 2441 struct nvme_feat_host_behavior *host; 2442 u8 acre = 0, lbafee = 0; 2443 int ret; 2444 2445 /* Don't bother enabling the feature if retry delay is not reported */ 2446 if (ctrl->crdt[0]) 2447 acre = NVME_ENABLE_ACRE; 2448 if (ctrl->ctratt & NVME_CTRL_ATTR_ELBAS) 2449 lbafee = NVME_ENABLE_LBAFEE; 2450 2451 if (!acre && !lbafee) 2452 return 0; 2453 2454 host = kzalloc(sizeof(*host), GFP_KERNEL); 2455 if (!host) 2456 return 0; 2457 2458 host->acre = acre; 2459 host->lbafee = lbafee; 2460 ret = nvme_set_features(ctrl, NVME_FEAT_HOST_BEHAVIOR, 0, 2461 host, sizeof(*host), NULL); 2462 kfree(host); 2463 return ret; 2464 } 2465 2466 /* 2467 * The function checks whether the given total (exlat + enlat) latency of 2468 * a power state allows the latter to be used as an APST transition target. 2469 * It does so by comparing the latency to the primary and secondary latency 2470 * tolerances defined by module params. If there's a match, the corresponding 2471 * timeout value is returned and the matching tolerance index (1 or 2) is 2472 * reported. 2473 */ 2474 static bool nvme_apst_get_transition_time(u64 total_latency, 2475 u64 *transition_time, unsigned *last_index) 2476 { 2477 if (total_latency <= apst_primary_latency_tol_us) { 2478 if (*last_index == 1) 2479 return false; 2480 *last_index = 1; 2481 *transition_time = apst_primary_timeout_ms; 2482 return true; 2483 } 2484 if (apst_secondary_timeout_ms && 2485 total_latency <= apst_secondary_latency_tol_us) { 2486 if (*last_index <= 2) 2487 return false; 2488 *last_index = 2; 2489 *transition_time = apst_secondary_timeout_ms; 2490 return true; 2491 } 2492 return false; 2493 } 2494 2495 /* 2496 * APST (Autonomous Power State Transition) lets us program a table of power 2497 * state transitions that the controller will perform automatically. 2498 * 2499 * Depending on module params, one of the two supported techniques will be used: 2500 * 2501 * - If the parameters provide explicit timeouts and tolerances, they will be 2502 * used to build a table with up to 2 non-operational states to transition to. 2503 * The default parameter values were selected based on the values used by 2504 * Microsoft's and Intel's NVMe drivers. Yet, since we don't implement dynamic 2505 * regeneration of the APST table in the event of switching between external 2506 * and battery power, the timeouts and tolerances reflect a compromise 2507 * between values used by Microsoft for AC and battery scenarios. 2508 * - If not, we'll configure the table with a simple heuristic: we are willing 2509 * to spend at most 2% of the time transitioning between power states. 2510 * Therefore, when running in any given state, we will enter the next 2511 * lower-power non-operational state after waiting 50 * (enlat + exlat) 2512 * microseconds, as long as that state's exit latency is under the requested 2513 * maximum latency. 2514 * 2515 * We will not autonomously enter any non-operational state for which the total 2516 * latency exceeds ps_max_latency_us. 2517 * 2518 * Users can set ps_max_latency_us to zero to turn off APST. 2519 */ 2520 static int nvme_configure_apst(struct nvme_ctrl *ctrl) 2521 { 2522 struct nvme_feat_auto_pst *table; 2523 unsigned apste = 0; 2524 u64 max_lat_us = 0; 2525 __le64 target = 0; 2526 int max_ps = -1; 2527 int state; 2528 int ret; 2529 unsigned last_lt_index = UINT_MAX; 2530 2531 /* 2532 * If APST isn't supported or if we haven't been initialized yet, 2533 * then don't do anything. 2534 */ 2535 if (!ctrl->apsta) 2536 return 0; 2537 2538 if (ctrl->npss > 31) { 2539 dev_warn(ctrl->device, "NPSS is invalid; not using APST\n"); 2540 return 0; 2541 } 2542 2543 table = kzalloc(sizeof(*table), GFP_KERNEL); 2544 if (!table) 2545 return 0; 2546 2547 if (!ctrl->apst_enabled || ctrl->ps_max_latency_us == 0) { 2548 /* Turn off APST. */ 2549 dev_dbg(ctrl->device, "APST disabled\n"); 2550 goto done; 2551 } 2552 2553 /* 2554 * Walk through all states from lowest- to highest-power. 2555 * According to the spec, lower-numbered states use more power. NPSS, 2556 * despite the name, is the index of the lowest-power state, not the 2557 * number of states. 2558 */ 2559 for (state = (int)ctrl->npss; state >= 0; state--) { 2560 u64 total_latency_us, exit_latency_us, transition_ms; 2561 2562 if (target) 2563 table->entries[state] = target; 2564 2565 /* 2566 * Don't allow transitions to the deepest state if it's quirked 2567 * off. 2568 */ 2569 if (state == ctrl->npss && 2570 (ctrl->quirks & NVME_QUIRK_NO_DEEPEST_PS)) 2571 continue; 2572 2573 /* 2574 * Is this state a useful non-operational state for higher-power 2575 * states to autonomously transition to? 2576 */ 2577 if (!(ctrl->psd[state].flags & NVME_PS_FLAGS_NON_OP_STATE)) 2578 continue; 2579 2580 exit_latency_us = (u64)le32_to_cpu(ctrl->psd[state].exit_lat); 2581 if (exit_latency_us > ctrl->ps_max_latency_us) 2582 continue; 2583 2584 total_latency_us = exit_latency_us + 2585 le32_to_cpu(ctrl->psd[state].entry_lat); 2586 2587 /* 2588 * This state is good. It can be used as the APST idle target 2589 * for higher power states. 2590 */ 2591 if (apst_primary_timeout_ms && apst_primary_latency_tol_us) { 2592 if (!nvme_apst_get_transition_time(total_latency_us, 2593 &transition_ms, &last_lt_index)) 2594 continue; 2595 } else { 2596 transition_ms = total_latency_us + 19; 2597 do_div(transition_ms, 20); 2598 if (transition_ms > (1 << 24) - 1) 2599 transition_ms = (1 << 24) - 1; 2600 } 2601 2602 target = cpu_to_le64((state << 3) | (transition_ms << 8)); 2603 if (max_ps == -1) 2604 max_ps = state; 2605 if (total_latency_us > max_lat_us) 2606 max_lat_us = total_latency_us; 2607 } 2608 2609 if (max_ps == -1) 2610 dev_dbg(ctrl->device, "APST enabled but no non-operational states are available\n"); 2611 else 2612 dev_dbg(ctrl->device, "APST enabled: max PS = %d, max round-trip latency = %lluus, table = %*phN\n", 2613 max_ps, max_lat_us, (int)sizeof(*table), table); 2614 apste = 1; 2615 2616 done: 2617 ret = nvme_set_features(ctrl, NVME_FEAT_AUTO_PST, apste, 2618 table, sizeof(*table), NULL); 2619 if (ret) 2620 dev_err(ctrl->device, "failed to set APST feature (%d)\n", ret); 2621 kfree(table); 2622 return ret; 2623 } 2624 2625 static void nvme_set_latency_tolerance(struct device *dev, s32 val) 2626 { 2627 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 2628 u64 latency; 2629 2630 switch (val) { 2631 case PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT: 2632 case PM_QOS_LATENCY_ANY: 2633 latency = U64_MAX; 2634 break; 2635 2636 default: 2637 latency = val; 2638 } 2639 2640 if (ctrl->ps_max_latency_us != latency) { 2641 ctrl->ps_max_latency_us = latency; 2642 if (nvme_ctrl_state(ctrl) == NVME_CTRL_LIVE) 2643 nvme_configure_apst(ctrl); 2644 } 2645 } 2646 2647 struct nvme_core_quirk_entry { 2648 /* 2649 * NVMe model and firmware strings are padded with spaces. For 2650 * simplicity, strings in the quirk table are padded with NULLs 2651 * instead. 2652 */ 2653 u16 vid; 2654 const char *mn; 2655 const char *fr; 2656 unsigned long quirks; 2657 }; 2658 2659 static const struct nvme_core_quirk_entry core_quirks[] = { 2660 { 2661 /* 2662 * This Toshiba device seems to die using any APST states. See: 2663 * https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1678184/comments/11 2664 */ 2665 .vid = 0x1179, 2666 .mn = "THNSF5256GPUK TOSHIBA", 2667 .quirks = NVME_QUIRK_NO_APST, 2668 }, 2669 { 2670 /* 2671 * This LiteON CL1-3D*-Q11 firmware version has a race 2672 * condition associated with actions related to suspend to idle 2673 * LiteON has resolved the problem in future firmware 2674 */ 2675 .vid = 0x14a4, 2676 .fr = "22301111", 2677 .quirks = NVME_QUIRK_SIMPLE_SUSPEND, 2678 }, 2679 { 2680 /* 2681 * This Kioxia CD6-V Series / HPE PE8030 device times out and 2682 * aborts I/O during any load, but more easily reproducible 2683 * with discards (fstrim). 2684 * 2685 * The device is left in a state where it is also not possible 2686 * to use "nvme set-feature" to disable APST, but booting with 2687 * nvme_core.default_ps_max_latency=0 works. 2688 */ 2689 .vid = 0x1e0f, 2690 .mn = "KCD6XVUL6T40", 2691 .quirks = NVME_QUIRK_NO_APST, 2692 }, 2693 { 2694 /* 2695 * The external Samsung X5 SSD fails initialization without a 2696 * delay before checking if it is ready and has a whole set of 2697 * other problems. To make this even more interesting, it 2698 * shares the PCI ID with internal Samsung 970 Evo Plus that 2699 * does not need or want these quirks. 2700 */ 2701 .vid = 0x144d, 2702 .mn = "Samsung Portable SSD X5", 2703 .quirks = NVME_QUIRK_DELAY_BEFORE_CHK_RDY | 2704 NVME_QUIRK_NO_DEEPEST_PS | 2705 NVME_QUIRK_IGNORE_DEV_SUBNQN, 2706 } 2707 }; 2708 2709 /* match is null-terminated but idstr is space-padded. */ 2710 static bool string_matches(const char *idstr, const char *match, size_t len) 2711 { 2712 size_t matchlen; 2713 2714 if (!match) 2715 return true; 2716 2717 matchlen = strlen(match); 2718 WARN_ON_ONCE(matchlen > len); 2719 2720 if (memcmp(idstr, match, matchlen)) 2721 return false; 2722 2723 for (; matchlen < len; matchlen++) 2724 if (idstr[matchlen] != ' ') 2725 return false; 2726 2727 return true; 2728 } 2729 2730 static bool quirk_matches(const struct nvme_id_ctrl *id, 2731 const struct nvme_core_quirk_entry *q) 2732 { 2733 return q->vid == le16_to_cpu(id->vid) && 2734 string_matches(id->mn, q->mn, sizeof(id->mn)) && 2735 string_matches(id->fr, q->fr, sizeof(id->fr)); 2736 } 2737 2738 static void nvme_init_subnqn(struct nvme_subsystem *subsys, struct nvme_ctrl *ctrl, 2739 struct nvme_id_ctrl *id) 2740 { 2741 size_t nqnlen; 2742 int off; 2743 2744 if(!(ctrl->quirks & NVME_QUIRK_IGNORE_DEV_SUBNQN)) { 2745 nqnlen = strnlen(id->subnqn, NVMF_NQN_SIZE); 2746 if (nqnlen > 0 && nqnlen < NVMF_NQN_SIZE) { 2747 strscpy(subsys->subnqn, id->subnqn, NVMF_NQN_SIZE); 2748 return; 2749 } 2750 2751 if (ctrl->vs >= NVME_VS(1, 2, 1)) 2752 dev_warn(ctrl->device, "missing or invalid SUBNQN field.\n"); 2753 } 2754 2755 /* 2756 * Generate a "fake" NQN similar to the one in Section 4.5 of the NVMe 2757 * Base Specification 2.0. It is slightly different from the format 2758 * specified there due to historic reasons, and we can't change it now. 2759 */ 2760 off = snprintf(subsys->subnqn, NVMF_NQN_SIZE, 2761 "nqn.2014.08.org.nvmexpress:%04x%04x", 2762 le16_to_cpu(id->vid), le16_to_cpu(id->ssvid)); 2763 memcpy(subsys->subnqn + off, id->sn, sizeof(id->sn)); 2764 off += sizeof(id->sn); 2765 memcpy(subsys->subnqn + off, id->mn, sizeof(id->mn)); 2766 off += sizeof(id->mn); 2767 memset(subsys->subnqn + off, 0, sizeof(subsys->subnqn) - off); 2768 } 2769 2770 static void nvme_release_subsystem(struct device *dev) 2771 { 2772 struct nvme_subsystem *subsys = 2773 container_of(dev, struct nvme_subsystem, dev); 2774 2775 if (subsys->instance >= 0) 2776 ida_free(&nvme_instance_ida, subsys->instance); 2777 kfree(subsys); 2778 } 2779 2780 static void nvme_destroy_subsystem(struct kref *ref) 2781 { 2782 struct nvme_subsystem *subsys = 2783 container_of(ref, struct nvme_subsystem, ref); 2784 2785 mutex_lock(&nvme_subsystems_lock); 2786 list_del(&subsys->entry); 2787 mutex_unlock(&nvme_subsystems_lock); 2788 2789 ida_destroy(&subsys->ns_ida); 2790 device_del(&subsys->dev); 2791 put_device(&subsys->dev); 2792 } 2793 2794 static void nvme_put_subsystem(struct nvme_subsystem *subsys) 2795 { 2796 kref_put(&subsys->ref, nvme_destroy_subsystem); 2797 } 2798 2799 static struct nvme_subsystem *__nvme_find_get_subsystem(const char *subsysnqn) 2800 { 2801 struct nvme_subsystem *subsys; 2802 2803 lockdep_assert_held(&nvme_subsystems_lock); 2804 2805 /* 2806 * Fail matches for discovery subsystems. This results 2807 * in each discovery controller bound to a unique subsystem. 2808 * This avoids issues with validating controller values 2809 * that can only be true when there is a single unique subsystem. 2810 * There may be multiple and completely independent entities 2811 * that provide discovery controllers. 2812 */ 2813 if (!strcmp(subsysnqn, NVME_DISC_SUBSYS_NAME)) 2814 return NULL; 2815 2816 list_for_each_entry(subsys, &nvme_subsystems, entry) { 2817 if (strcmp(subsys->subnqn, subsysnqn)) 2818 continue; 2819 if (!kref_get_unless_zero(&subsys->ref)) 2820 continue; 2821 return subsys; 2822 } 2823 2824 return NULL; 2825 } 2826 2827 static inline bool nvme_discovery_ctrl(struct nvme_ctrl *ctrl) 2828 { 2829 return ctrl->opts && ctrl->opts->discovery_nqn; 2830 } 2831 2832 static bool nvme_validate_cntlid(struct nvme_subsystem *subsys, 2833 struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id) 2834 { 2835 struct nvme_ctrl *tmp; 2836 2837 lockdep_assert_held(&nvme_subsystems_lock); 2838 2839 list_for_each_entry(tmp, &subsys->ctrls, subsys_entry) { 2840 if (nvme_state_terminal(tmp)) 2841 continue; 2842 2843 if (tmp->cntlid == ctrl->cntlid) { 2844 dev_err(ctrl->device, 2845 "Duplicate cntlid %u with %s, subsys %s, rejecting\n", 2846 ctrl->cntlid, dev_name(tmp->device), 2847 subsys->subnqn); 2848 return false; 2849 } 2850 2851 if ((id->cmic & NVME_CTRL_CMIC_MULTI_CTRL) || 2852 nvme_discovery_ctrl(ctrl)) 2853 continue; 2854 2855 dev_err(ctrl->device, 2856 "Subsystem does not support multiple controllers\n"); 2857 return false; 2858 } 2859 2860 return true; 2861 } 2862 2863 static int nvme_init_subsystem(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id) 2864 { 2865 struct nvme_subsystem *subsys, *found; 2866 int ret; 2867 2868 subsys = kzalloc(sizeof(*subsys), GFP_KERNEL); 2869 if (!subsys) 2870 return -ENOMEM; 2871 2872 subsys->instance = -1; 2873 mutex_init(&subsys->lock); 2874 kref_init(&subsys->ref); 2875 INIT_LIST_HEAD(&subsys->ctrls); 2876 INIT_LIST_HEAD(&subsys->nsheads); 2877 nvme_init_subnqn(subsys, ctrl, id); 2878 memcpy(subsys->serial, id->sn, sizeof(subsys->serial)); 2879 memcpy(subsys->model, id->mn, sizeof(subsys->model)); 2880 subsys->vendor_id = le16_to_cpu(id->vid); 2881 subsys->cmic = id->cmic; 2882 2883 /* Versions prior to 1.4 don't necessarily report a valid type */ 2884 if (id->cntrltype == NVME_CTRL_DISC || 2885 !strcmp(subsys->subnqn, NVME_DISC_SUBSYS_NAME)) 2886 subsys->subtype = NVME_NQN_DISC; 2887 else 2888 subsys->subtype = NVME_NQN_NVME; 2889 2890 if (nvme_discovery_ctrl(ctrl) && subsys->subtype != NVME_NQN_DISC) { 2891 dev_err(ctrl->device, 2892 "Subsystem %s is not a discovery controller", 2893 subsys->subnqn); 2894 kfree(subsys); 2895 return -EINVAL; 2896 } 2897 subsys->awupf = le16_to_cpu(id->awupf); 2898 nvme_mpath_default_iopolicy(subsys); 2899 2900 subsys->dev.class = &nvme_subsys_class; 2901 subsys->dev.release = nvme_release_subsystem; 2902 subsys->dev.groups = nvme_subsys_attrs_groups; 2903 dev_set_name(&subsys->dev, "nvme-subsys%d", ctrl->instance); 2904 device_initialize(&subsys->dev); 2905 2906 mutex_lock(&nvme_subsystems_lock); 2907 found = __nvme_find_get_subsystem(subsys->subnqn); 2908 if (found) { 2909 put_device(&subsys->dev); 2910 subsys = found; 2911 2912 if (!nvme_validate_cntlid(subsys, ctrl, id)) { 2913 ret = -EINVAL; 2914 goto out_put_subsystem; 2915 } 2916 } else { 2917 ret = device_add(&subsys->dev); 2918 if (ret) { 2919 dev_err(ctrl->device, 2920 "failed to register subsystem device.\n"); 2921 put_device(&subsys->dev); 2922 goto out_unlock; 2923 } 2924 ida_init(&subsys->ns_ida); 2925 list_add_tail(&subsys->entry, &nvme_subsystems); 2926 } 2927 2928 ret = sysfs_create_link(&subsys->dev.kobj, &ctrl->device->kobj, 2929 dev_name(ctrl->device)); 2930 if (ret) { 2931 dev_err(ctrl->device, 2932 "failed to create sysfs link from subsystem.\n"); 2933 goto out_put_subsystem; 2934 } 2935 2936 if (!found) 2937 subsys->instance = ctrl->instance; 2938 ctrl->subsys = subsys; 2939 list_add_tail(&ctrl->subsys_entry, &subsys->ctrls); 2940 mutex_unlock(&nvme_subsystems_lock); 2941 return 0; 2942 2943 out_put_subsystem: 2944 nvme_put_subsystem(subsys); 2945 out_unlock: 2946 mutex_unlock(&nvme_subsystems_lock); 2947 return ret; 2948 } 2949 2950 int nvme_get_log(struct nvme_ctrl *ctrl, u32 nsid, u8 log_page, u8 lsp, u8 csi, 2951 void *log, size_t size, u64 offset) 2952 { 2953 struct nvme_command c = { }; 2954 u32 dwlen = nvme_bytes_to_numd(size); 2955 2956 c.get_log_page.opcode = nvme_admin_get_log_page; 2957 c.get_log_page.nsid = cpu_to_le32(nsid); 2958 c.get_log_page.lid = log_page; 2959 c.get_log_page.lsp = lsp; 2960 c.get_log_page.numdl = cpu_to_le16(dwlen & ((1 << 16) - 1)); 2961 c.get_log_page.numdu = cpu_to_le16(dwlen >> 16); 2962 c.get_log_page.lpol = cpu_to_le32(lower_32_bits(offset)); 2963 c.get_log_page.lpou = cpu_to_le32(upper_32_bits(offset)); 2964 c.get_log_page.csi = csi; 2965 2966 return nvme_submit_sync_cmd(ctrl->admin_q, &c, log, size); 2967 } 2968 2969 static int nvme_get_effects_log(struct nvme_ctrl *ctrl, u8 csi, 2970 struct nvme_effects_log **log) 2971 { 2972 struct nvme_effects_log *cel = xa_load(&ctrl->cels, csi); 2973 int ret; 2974 2975 if (cel) 2976 goto out; 2977 2978 cel = kzalloc(sizeof(*cel), GFP_KERNEL); 2979 if (!cel) 2980 return -ENOMEM; 2981 2982 ret = nvme_get_log(ctrl, 0x00, NVME_LOG_CMD_EFFECTS, 0, csi, 2983 cel, sizeof(*cel), 0); 2984 if (ret) { 2985 kfree(cel); 2986 return ret; 2987 } 2988 2989 xa_store(&ctrl->cels, csi, cel, GFP_KERNEL); 2990 out: 2991 *log = cel; 2992 return 0; 2993 } 2994 2995 static inline u32 nvme_mps_to_sectors(struct nvme_ctrl *ctrl, u32 units) 2996 { 2997 u32 page_shift = NVME_CAP_MPSMIN(ctrl->cap) + 12, val; 2998 2999 if (check_shl_overflow(1U, units + page_shift - 9, &val)) 3000 return UINT_MAX; 3001 return val; 3002 } 3003 3004 static int nvme_init_non_mdts_limits(struct nvme_ctrl *ctrl) 3005 { 3006 struct nvme_command c = { }; 3007 struct nvme_id_ctrl_nvm *id; 3008 int ret; 3009 3010 /* 3011 * Even though NVMe spec explicitly states that MDTS is not applicable 3012 * to the write-zeroes, we are cautious and limit the size to the 3013 * controllers max_hw_sectors value, which is based on the MDTS field 3014 * and possibly other limiting factors. 3015 */ 3016 if ((ctrl->oncs & NVME_CTRL_ONCS_WRITE_ZEROES) && 3017 !(ctrl->quirks & NVME_QUIRK_DISABLE_WRITE_ZEROES)) 3018 ctrl->max_zeroes_sectors = ctrl->max_hw_sectors; 3019 else 3020 ctrl->max_zeroes_sectors = 0; 3021 3022 if (ctrl->subsys->subtype != NVME_NQN_NVME || 3023 nvme_ctrl_limited_cns(ctrl) || 3024 test_bit(NVME_CTRL_SKIP_ID_CNS_CS, &ctrl->flags)) 3025 return 0; 3026 3027 id = kzalloc(sizeof(*id), GFP_KERNEL); 3028 if (!id) 3029 return -ENOMEM; 3030 3031 c.identify.opcode = nvme_admin_identify; 3032 c.identify.cns = NVME_ID_CNS_CS_CTRL; 3033 c.identify.csi = NVME_CSI_NVM; 3034 3035 ret = nvme_submit_sync_cmd(ctrl->admin_q, &c, id, sizeof(*id)); 3036 if (ret) 3037 goto free_data; 3038 3039 ctrl->dmrl = id->dmrl; 3040 ctrl->dmrsl = le32_to_cpu(id->dmrsl); 3041 if (id->wzsl) 3042 ctrl->max_zeroes_sectors = nvme_mps_to_sectors(ctrl, id->wzsl); 3043 3044 free_data: 3045 if (ret > 0) 3046 set_bit(NVME_CTRL_SKIP_ID_CNS_CS, &ctrl->flags); 3047 kfree(id); 3048 return ret; 3049 } 3050 3051 static void nvme_init_known_nvm_effects(struct nvme_ctrl *ctrl) 3052 { 3053 struct nvme_effects_log *log = ctrl->effects; 3054 3055 log->acs[nvme_admin_format_nvm] |= cpu_to_le32(NVME_CMD_EFFECTS_LBCC | 3056 NVME_CMD_EFFECTS_NCC | 3057 NVME_CMD_EFFECTS_CSE_MASK); 3058 log->acs[nvme_admin_sanitize_nvm] |= cpu_to_le32(NVME_CMD_EFFECTS_LBCC | 3059 NVME_CMD_EFFECTS_CSE_MASK); 3060 3061 /* 3062 * The spec says the result of a security receive command depends on 3063 * the previous security send command. As such, many vendors log this 3064 * command as one to submitted only when no other commands to the same 3065 * namespace are outstanding. The intention is to tell the host to 3066 * prevent mixing security send and receive. 3067 * 3068 * This driver can only enforce such exclusive access against IO 3069 * queues, though. We are not readily able to enforce such a rule for 3070 * two commands to the admin queue, which is the only queue that 3071 * matters for this command. 3072 * 3073 * Rather than blindly freezing the IO queues for this effect that 3074 * doesn't even apply to IO, mask it off. 3075 */ 3076 log->acs[nvme_admin_security_recv] &= cpu_to_le32(~NVME_CMD_EFFECTS_CSE_MASK); 3077 3078 log->iocs[nvme_cmd_write] |= cpu_to_le32(NVME_CMD_EFFECTS_LBCC); 3079 log->iocs[nvme_cmd_write_zeroes] |= cpu_to_le32(NVME_CMD_EFFECTS_LBCC); 3080 log->iocs[nvme_cmd_write_uncor] |= cpu_to_le32(NVME_CMD_EFFECTS_LBCC); 3081 } 3082 3083 static int nvme_init_effects(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id) 3084 { 3085 int ret = 0; 3086 3087 if (ctrl->effects) 3088 return 0; 3089 3090 if (id->lpa & NVME_CTRL_LPA_CMD_EFFECTS_LOG) { 3091 ret = nvme_get_effects_log(ctrl, NVME_CSI_NVM, &ctrl->effects); 3092 if (ret < 0) 3093 return ret; 3094 } 3095 3096 if (!ctrl->effects) { 3097 ctrl->effects = kzalloc(sizeof(*ctrl->effects), GFP_KERNEL); 3098 if (!ctrl->effects) 3099 return -ENOMEM; 3100 xa_store(&ctrl->cels, NVME_CSI_NVM, ctrl->effects, GFP_KERNEL); 3101 } 3102 3103 nvme_init_known_nvm_effects(ctrl); 3104 return 0; 3105 } 3106 3107 static int nvme_check_ctrl_fabric_info(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id) 3108 { 3109 /* 3110 * In fabrics we need to verify the cntlid matches the 3111 * admin connect 3112 */ 3113 if (ctrl->cntlid != le16_to_cpu(id->cntlid)) { 3114 dev_err(ctrl->device, 3115 "Mismatching cntlid: Connect %u vs Identify %u, rejecting\n", 3116 ctrl->cntlid, le16_to_cpu(id->cntlid)); 3117 return -EINVAL; 3118 } 3119 3120 if (!nvme_discovery_ctrl(ctrl) && !ctrl->kas) { 3121 dev_err(ctrl->device, 3122 "keep-alive support is mandatory for fabrics\n"); 3123 return -EINVAL; 3124 } 3125 3126 if (!nvme_discovery_ctrl(ctrl) && ctrl->ioccsz < 4) { 3127 dev_err(ctrl->device, 3128 "I/O queue command capsule supported size %d < 4\n", 3129 ctrl->ioccsz); 3130 return -EINVAL; 3131 } 3132 3133 if (!nvme_discovery_ctrl(ctrl) && ctrl->iorcsz < 1) { 3134 dev_err(ctrl->device, 3135 "I/O queue response capsule supported size %d < 1\n", 3136 ctrl->iorcsz); 3137 return -EINVAL; 3138 } 3139 3140 if (!ctrl->maxcmd) { 3141 dev_err(ctrl->device, "Maximum outstanding commands is 0\n"); 3142 return -EINVAL; 3143 } 3144 3145 return 0; 3146 } 3147 3148 static int nvme_init_identify(struct nvme_ctrl *ctrl) 3149 { 3150 struct queue_limits lim; 3151 struct nvme_id_ctrl *id; 3152 u32 max_hw_sectors; 3153 bool prev_apst_enabled; 3154 int ret; 3155 3156 ret = nvme_identify_ctrl(ctrl, &id); 3157 if (ret) { 3158 dev_err(ctrl->device, "Identify Controller failed (%d)\n", ret); 3159 return -EIO; 3160 } 3161 3162 if (!(ctrl->ops->flags & NVME_F_FABRICS)) 3163 ctrl->cntlid = le16_to_cpu(id->cntlid); 3164 3165 if (!ctrl->identified) { 3166 unsigned int i; 3167 3168 /* 3169 * Check for quirks. Quirk can depend on firmware version, 3170 * so, in principle, the set of quirks present can change 3171 * across a reset. As a possible future enhancement, we 3172 * could re-scan for quirks every time we reinitialize 3173 * the device, but we'd have to make sure that the driver 3174 * behaves intelligently if the quirks change. 3175 */ 3176 for (i = 0; i < ARRAY_SIZE(core_quirks); i++) { 3177 if (quirk_matches(id, &core_quirks[i])) 3178 ctrl->quirks |= core_quirks[i].quirks; 3179 } 3180 3181 ret = nvme_init_subsystem(ctrl, id); 3182 if (ret) 3183 goto out_free; 3184 3185 ret = nvme_init_effects(ctrl, id); 3186 if (ret) 3187 goto out_free; 3188 } 3189 memcpy(ctrl->subsys->firmware_rev, id->fr, 3190 sizeof(ctrl->subsys->firmware_rev)); 3191 3192 if (force_apst && (ctrl->quirks & NVME_QUIRK_NO_DEEPEST_PS)) { 3193 dev_warn(ctrl->device, "forcibly allowing all power states due to nvme_core.force_apst -- use at your own risk\n"); 3194 ctrl->quirks &= ~NVME_QUIRK_NO_DEEPEST_PS; 3195 } 3196 3197 ctrl->crdt[0] = le16_to_cpu(id->crdt1); 3198 ctrl->crdt[1] = le16_to_cpu(id->crdt2); 3199 ctrl->crdt[2] = le16_to_cpu(id->crdt3); 3200 3201 ctrl->oacs = le16_to_cpu(id->oacs); 3202 ctrl->oncs = le16_to_cpu(id->oncs); 3203 ctrl->mtfa = le16_to_cpu(id->mtfa); 3204 ctrl->oaes = le32_to_cpu(id->oaes); 3205 ctrl->wctemp = le16_to_cpu(id->wctemp); 3206 ctrl->cctemp = le16_to_cpu(id->cctemp); 3207 3208 atomic_set(&ctrl->abort_limit, id->acl + 1); 3209 ctrl->vwc = id->vwc; 3210 if (id->mdts) 3211 max_hw_sectors = nvme_mps_to_sectors(ctrl, id->mdts); 3212 else 3213 max_hw_sectors = UINT_MAX; 3214 ctrl->max_hw_sectors = 3215 min_not_zero(ctrl->max_hw_sectors, max_hw_sectors); 3216 3217 lim = queue_limits_start_update(ctrl->admin_q); 3218 nvme_set_ctrl_limits(ctrl, &lim); 3219 ret = queue_limits_commit_update(ctrl->admin_q, &lim); 3220 if (ret) 3221 goto out_free; 3222 3223 ctrl->sgls = le32_to_cpu(id->sgls); 3224 ctrl->kas = le16_to_cpu(id->kas); 3225 ctrl->max_namespaces = le32_to_cpu(id->mnan); 3226 ctrl->ctratt = le32_to_cpu(id->ctratt); 3227 3228 ctrl->cntrltype = id->cntrltype; 3229 ctrl->dctype = id->dctype; 3230 3231 if (id->rtd3e) { 3232 /* us -> s */ 3233 u32 transition_time = le32_to_cpu(id->rtd3e) / USEC_PER_SEC; 3234 3235 ctrl->shutdown_timeout = clamp_t(unsigned int, transition_time, 3236 shutdown_timeout, 60); 3237 3238 if (ctrl->shutdown_timeout != shutdown_timeout) 3239 dev_info(ctrl->device, 3240 "Shutdown timeout set to %u seconds\n", 3241 ctrl->shutdown_timeout); 3242 } else 3243 ctrl->shutdown_timeout = shutdown_timeout; 3244 3245 ctrl->npss = id->npss; 3246 ctrl->apsta = id->apsta; 3247 prev_apst_enabled = ctrl->apst_enabled; 3248 if (ctrl->quirks & NVME_QUIRK_NO_APST) { 3249 if (force_apst && id->apsta) { 3250 dev_warn(ctrl->device, "forcibly allowing APST due to nvme_core.force_apst -- use at your own risk\n"); 3251 ctrl->apst_enabled = true; 3252 } else { 3253 ctrl->apst_enabled = false; 3254 } 3255 } else { 3256 ctrl->apst_enabled = id->apsta; 3257 } 3258 memcpy(ctrl->psd, id->psd, sizeof(ctrl->psd)); 3259 3260 if (ctrl->ops->flags & NVME_F_FABRICS) { 3261 ctrl->icdoff = le16_to_cpu(id->icdoff); 3262 ctrl->ioccsz = le32_to_cpu(id->ioccsz); 3263 ctrl->iorcsz = le32_to_cpu(id->iorcsz); 3264 ctrl->maxcmd = le16_to_cpu(id->maxcmd); 3265 3266 ret = nvme_check_ctrl_fabric_info(ctrl, id); 3267 if (ret) 3268 goto out_free; 3269 } else { 3270 ctrl->hmpre = le32_to_cpu(id->hmpre); 3271 ctrl->hmmin = le32_to_cpu(id->hmmin); 3272 ctrl->hmminds = le32_to_cpu(id->hmminds); 3273 ctrl->hmmaxd = le16_to_cpu(id->hmmaxd); 3274 } 3275 3276 ret = nvme_mpath_init_identify(ctrl, id); 3277 if (ret < 0) 3278 goto out_free; 3279 3280 if (ctrl->apst_enabled && !prev_apst_enabled) 3281 dev_pm_qos_expose_latency_tolerance(ctrl->device); 3282 else if (!ctrl->apst_enabled && prev_apst_enabled) 3283 dev_pm_qos_hide_latency_tolerance(ctrl->device); 3284 3285 out_free: 3286 kfree(id); 3287 return ret; 3288 } 3289 3290 /* 3291 * Initialize the cached copies of the Identify data and various controller 3292 * register in our nvme_ctrl structure. This should be called as soon as 3293 * the admin queue is fully up and running. 3294 */ 3295 int nvme_init_ctrl_finish(struct nvme_ctrl *ctrl, bool was_suspended) 3296 { 3297 int ret; 3298 3299 ret = ctrl->ops->reg_read32(ctrl, NVME_REG_VS, &ctrl->vs); 3300 if (ret) { 3301 dev_err(ctrl->device, "Reading VS failed (%d)\n", ret); 3302 return ret; 3303 } 3304 3305 ctrl->sqsize = min_t(u16, NVME_CAP_MQES(ctrl->cap), ctrl->sqsize); 3306 3307 if (ctrl->vs >= NVME_VS(1, 1, 0)) 3308 ctrl->subsystem = NVME_CAP_NSSRC(ctrl->cap); 3309 3310 ret = nvme_init_identify(ctrl); 3311 if (ret) 3312 return ret; 3313 3314 ret = nvme_configure_apst(ctrl); 3315 if (ret < 0) 3316 return ret; 3317 3318 ret = nvme_configure_timestamp(ctrl); 3319 if (ret < 0) 3320 return ret; 3321 3322 ret = nvme_configure_host_options(ctrl); 3323 if (ret < 0) 3324 return ret; 3325 3326 nvme_configure_opal(ctrl, was_suspended); 3327 3328 if (!ctrl->identified && !nvme_discovery_ctrl(ctrl)) { 3329 /* 3330 * Do not return errors unless we are in a controller reset, 3331 * the controller works perfectly fine without hwmon. 3332 */ 3333 ret = nvme_hwmon_init(ctrl); 3334 if (ret == -EINTR) 3335 return ret; 3336 } 3337 3338 clear_bit(NVME_CTRL_DIRTY_CAPABILITY, &ctrl->flags); 3339 ctrl->identified = true; 3340 3341 nvme_start_keep_alive(ctrl); 3342 3343 return 0; 3344 } 3345 EXPORT_SYMBOL_GPL(nvme_init_ctrl_finish); 3346 3347 static int nvme_dev_open(struct inode *inode, struct file *file) 3348 { 3349 struct nvme_ctrl *ctrl = 3350 container_of(inode->i_cdev, struct nvme_ctrl, cdev); 3351 3352 switch (nvme_ctrl_state(ctrl)) { 3353 case NVME_CTRL_LIVE: 3354 break; 3355 default: 3356 return -EWOULDBLOCK; 3357 } 3358 3359 nvme_get_ctrl(ctrl); 3360 if (!try_module_get(ctrl->ops->module)) { 3361 nvme_put_ctrl(ctrl); 3362 return -EINVAL; 3363 } 3364 3365 file->private_data = ctrl; 3366 return 0; 3367 } 3368 3369 static int nvme_dev_release(struct inode *inode, struct file *file) 3370 { 3371 struct nvme_ctrl *ctrl = 3372 container_of(inode->i_cdev, struct nvme_ctrl, cdev); 3373 3374 module_put(ctrl->ops->module); 3375 nvme_put_ctrl(ctrl); 3376 return 0; 3377 } 3378 3379 static const struct file_operations nvme_dev_fops = { 3380 .owner = THIS_MODULE, 3381 .open = nvme_dev_open, 3382 .release = nvme_dev_release, 3383 .unlocked_ioctl = nvme_dev_ioctl, 3384 .compat_ioctl = compat_ptr_ioctl, 3385 .uring_cmd = nvme_dev_uring_cmd, 3386 }; 3387 3388 static struct nvme_ns_head *nvme_find_ns_head(struct nvme_ctrl *ctrl, 3389 unsigned nsid) 3390 { 3391 struct nvme_ns_head *h; 3392 3393 lockdep_assert_held(&ctrl->subsys->lock); 3394 3395 list_for_each_entry(h, &ctrl->subsys->nsheads, entry) { 3396 /* 3397 * Private namespaces can share NSIDs under some conditions. 3398 * In that case we can't use the same ns_head for namespaces 3399 * with the same NSID. 3400 */ 3401 if (h->ns_id != nsid || !nvme_is_unique_nsid(ctrl, h)) 3402 continue; 3403 if (!list_empty(&h->list) && nvme_tryget_ns_head(h)) 3404 return h; 3405 } 3406 3407 return NULL; 3408 } 3409 3410 static int nvme_subsys_check_duplicate_ids(struct nvme_subsystem *subsys, 3411 struct nvme_ns_ids *ids) 3412 { 3413 bool has_uuid = !uuid_is_null(&ids->uuid); 3414 bool has_nguid = memchr_inv(ids->nguid, 0, sizeof(ids->nguid)); 3415 bool has_eui64 = memchr_inv(ids->eui64, 0, sizeof(ids->eui64)); 3416 struct nvme_ns_head *h; 3417 3418 lockdep_assert_held(&subsys->lock); 3419 3420 list_for_each_entry(h, &subsys->nsheads, entry) { 3421 if (has_uuid && uuid_equal(&ids->uuid, &h->ids.uuid)) 3422 return -EINVAL; 3423 if (has_nguid && 3424 memcmp(&ids->nguid, &h->ids.nguid, sizeof(ids->nguid)) == 0) 3425 return -EINVAL; 3426 if (has_eui64 && 3427 memcmp(&ids->eui64, &h->ids.eui64, sizeof(ids->eui64)) == 0) 3428 return -EINVAL; 3429 } 3430 3431 return 0; 3432 } 3433 3434 static void nvme_cdev_rel(struct device *dev) 3435 { 3436 ida_free(&nvme_ns_chr_minor_ida, MINOR(dev->devt)); 3437 } 3438 3439 void nvme_cdev_del(struct cdev *cdev, struct device *cdev_device) 3440 { 3441 cdev_device_del(cdev, cdev_device); 3442 put_device(cdev_device); 3443 } 3444 3445 int nvme_cdev_add(struct cdev *cdev, struct device *cdev_device, 3446 const struct file_operations *fops, struct module *owner) 3447 { 3448 int minor, ret; 3449 3450 minor = ida_alloc(&nvme_ns_chr_minor_ida, GFP_KERNEL); 3451 if (minor < 0) 3452 return minor; 3453 cdev_device->devt = MKDEV(MAJOR(nvme_ns_chr_devt), minor); 3454 cdev_device->class = &nvme_ns_chr_class; 3455 cdev_device->release = nvme_cdev_rel; 3456 device_initialize(cdev_device); 3457 cdev_init(cdev, fops); 3458 cdev->owner = owner; 3459 ret = cdev_device_add(cdev, cdev_device); 3460 if (ret) 3461 put_device(cdev_device); 3462 3463 return ret; 3464 } 3465 3466 static int nvme_ns_chr_open(struct inode *inode, struct file *file) 3467 { 3468 return nvme_ns_open(container_of(inode->i_cdev, struct nvme_ns, cdev)); 3469 } 3470 3471 static int nvme_ns_chr_release(struct inode *inode, struct file *file) 3472 { 3473 nvme_ns_release(container_of(inode->i_cdev, struct nvme_ns, cdev)); 3474 return 0; 3475 } 3476 3477 static const struct file_operations nvme_ns_chr_fops = { 3478 .owner = THIS_MODULE, 3479 .open = nvme_ns_chr_open, 3480 .release = nvme_ns_chr_release, 3481 .unlocked_ioctl = nvme_ns_chr_ioctl, 3482 .compat_ioctl = compat_ptr_ioctl, 3483 .uring_cmd = nvme_ns_chr_uring_cmd, 3484 .uring_cmd_iopoll = nvme_ns_chr_uring_cmd_iopoll, 3485 }; 3486 3487 static int nvme_add_ns_cdev(struct nvme_ns *ns) 3488 { 3489 int ret; 3490 3491 ns->cdev_device.parent = ns->ctrl->device; 3492 ret = dev_set_name(&ns->cdev_device, "ng%dn%d", 3493 ns->ctrl->instance, ns->head->instance); 3494 if (ret) 3495 return ret; 3496 3497 return nvme_cdev_add(&ns->cdev, &ns->cdev_device, &nvme_ns_chr_fops, 3498 ns->ctrl->ops->module); 3499 } 3500 3501 static struct nvme_ns_head *nvme_alloc_ns_head(struct nvme_ctrl *ctrl, 3502 struct nvme_ns_info *info) 3503 { 3504 struct nvme_ns_head *head; 3505 size_t size = sizeof(*head); 3506 int ret = -ENOMEM; 3507 3508 #ifdef CONFIG_NVME_MULTIPATH 3509 size += num_possible_nodes() * sizeof(struct nvme_ns *); 3510 #endif 3511 3512 head = kzalloc(size, GFP_KERNEL); 3513 if (!head) 3514 goto out; 3515 ret = ida_alloc_min(&ctrl->subsys->ns_ida, 1, GFP_KERNEL); 3516 if (ret < 0) 3517 goto out_free_head; 3518 head->instance = ret; 3519 INIT_LIST_HEAD(&head->list); 3520 ret = init_srcu_struct(&head->srcu); 3521 if (ret) 3522 goto out_ida_remove; 3523 head->subsys = ctrl->subsys; 3524 head->ns_id = info->nsid; 3525 head->ids = info->ids; 3526 head->shared = info->is_shared; 3527 ratelimit_state_init(&head->rs_nuse, 5 * HZ, 1); 3528 ratelimit_set_flags(&head->rs_nuse, RATELIMIT_MSG_ON_RELEASE); 3529 kref_init(&head->ref); 3530 3531 if (head->ids.csi) { 3532 ret = nvme_get_effects_log(ctrl, head->ids.csi, &head->effects); 3533 if (ret) 3534 goto out_cleanup_srcu; 3535 } else 3536 head->effects = ctrl->effects; 3537 3538 ret = nvme_mpath_alloc_disk(ctrl, head); 3539 if (ret) 3540 goto out_cleanup_srcu; 3541 3542 list_add_tail(&head->entry, &ctrl->subsys->nsheads); 3543 3544 kref_get(&ctrl->subsys->ref); 3545 3546 return head; 3547 out_cleanup_srcu: 3548 cleanup_srcu_struct(&head->srcu); 3549 out_ida_remove: 3550 ida_free(&ctrl->subsys->ns_ida, head->instance); 3551 out_free_head: 3552 kfree(head); 3553 out: 3554 if (ret > 0) 3555 ret = blk_status_to_errno(nvme_error_status(ret)); 3556 return ERR_PTR(ret); 3557 } 3558 3559 static int nvme_global_check_duplicate_ids(struct nvme_subsystem *this, 3560 struct nvme_ns_ids *ids) 3561 { 3562 struct nvme_subsystem *s; 3563 int ret = 0; 3564 3565 /* 3566 * Note that this check is racy as we try to avoid holding the global 3567 * lock over the whole ns_head creation. But it is only intended as 3568 * a sanity check anyway. 3569 */ 3570 mutex_lock(&nvme_subsystems_lock); 3571 list_for_each_entry(s, &nvme_subsystems, entry) { 3572 if (s == this) 3573 continue; 3574 mutex_lock(&s->lock); 3575 ret = nvme_subsys_check_duplicate_ids(s, ids); 3576 mutex_unlock(&s->lock); 3577 if (ret) 3578 break; 3579 } 3580 mutex_unlock(&nvme_subsystems_lock); 3581 3582 return ret; 3583 } 3584 3585 static int nvme_init_ns_head(struct nvme_ns *ns, struct nvme_ns_info *info) 3586 { 3587 struct nvme_ctrl *ctrl = ns->ctrl; 3588 struct nvme_ns_head *head = NULL; 3589 int ret; 3590 3591 ret = nvme_global_check_duplicate_ids(ctrl->subsys, &info->ids); 3592 if (ret) { 3593 /* 3594 * We've found two different namespaces on two different 3595 * subsystems that report the same ID. This is pretty nasty 3596 * for anything that actually requires unique device 3597 * identification. In the kernel we need this for multipathing, 3598 * and in user space the /dev/disk/by-id/ links rely on it. 3599 * 3600 * If the device also claims to be multi-path capable back off 3601 * here now and refuse the probe the second device as this is a 3602 * recipe for data corruption. If not this is probably a 3603 * cheap consumer device if on the PCIe bus, so let the user 3604 * proceed and use the shiny toy, but warn that with changing 3605 * probing order (which due to our async probing could just be 3606 * device taking longer to startup) the other device could show 3607 * up at any time. 3608 */ 3609 nvme_print_device_info(ctrl); 3610 if ((ns->ctrl->ops->flags & NVME_F_FABRICS) || /* !PCIe */ 3611 ((ns->ctrl->subsys->cmic & NVME_CTRL_CMIC_MULTI_CTRL) && 3612 info->is_shared)) { 3613 dev_err(ctrl->device, 3614 "ignoring nsid %d because of duplicate IDs\n", 3615 info->nsid); 3616 return ret; 3617 } 3618 3619 dev_err(ctrl->device, 3620 "clearing duplicate IDs for nsid %d\n", info->nsid); 3621 dev_err(ctrl->device, 3622 "use of /dev/disk/by-id/ may cause data corruption\n"); 3623 memset(&info->ids.nguid, 0, sizeof(info->ids.nguid)); 3624 memset(&info->ids.uuid, 0, sizeof(info->ids.uuid)); 3625 memset(&info->ids.eui64, 0, sizeof(info->ids.eui64)); 3626 ctrl->quirks |= NVME_QUIRK_BOGUS_NID; 3627 } 3628 3629 mutex_lock(&ctrl->subsys->lock); 3630 head = nvme_find_ns_head(ctrl, info->nsid); 3631 if (!head) { 3632 ret = nvme_subsys_check_duplicate_ids(ctrl->subsys, &info->ids); 3633 if (ret) { 3634 dev_err(ctrl->device, 3635 "duplicate IDs in subsystem for nsid %d\n", 3636 info->nsid); 3637 goto out_unlock; 3638 } 3639 head = nvme_alloc_ns_head(ctrl, info); 3640 if (IS_ERR(head)) { 3641 ret = PTR_ERR(head); 3642 goto out_unlock; 3643 } 3644 } else { 3645 ret = -EINVAL; 3646 if (!info->is_shared || !head->shared) { 3647 dev_err(ctrl->device, 3648 "Duplicate unshared namespace %d\n", 3649 info->nsid); 3650 goto out_put_ns_head; 3651 } 3652 if (!nvme_ns_ids_equal(&head->ids, &info->ids)) { 3653 dev_err(ctrl->device, 3654 "IDs don't match for shared namespace %d\n", 3655 info->nsid); 3656 goto out_put_ns_head; 3657 } 3658 3659 if (!multipath) { 3660 dev_warn(ctrl->device, 3661 "Found shared namespace %d, but multipathing not supported.\n", 3662 info->nsid); 3663 dev_warn_once(ctrl->device, 3664 "Support for shared namespaces without CONFIG_NVME_MULTIPATH is deprecated and will be removed in Linux 6.0\n."); 3665 } 3666 } 3667 3668 list_add_tail_rcu(&ns->siblings, &head->list); 3669 ns->head = head; 3670 mutex_unlock(&ctrl->subsys->lock); 3671 return 0; 3672 3673 out_put_ns_head: 3674 nvme_put_ns_head(head); 3675 out_unlock: 3676 mutex_unlock(&ctrl->subsys->lock); 3677 return ret; 3678 } 3679 3680 struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid) 3681 { 3682 struct nvme_ns *ns, *ret = NULL; 3683 3684 down_read(&ctrl->namespaces_rwsem); 3685 list_for_each_entry(ns, &ctrl->namespaces, list) { 3686 if (ns->head->ns_id == nsid) { 3687 if (!nvme_get_ns(ns)) 3688 continue; 3689 ret = ns; 3690 break; 3691 } 3692 if (ns->head->ns_id > nsid) 3693 break; 3694 } 3695 up_read(&ctrl->namespaces_rwsem); 3696 return ret; 3697 } 3698 EXPORT_SYMBOL_NS_GPL(nvme_find_get_ns, NVME_TARGET_PASSTHRU); 3699 3700 /* 3701 * Add the namespace to the controller list while keeping the list ordered. 3702 */ 3703 static void nvme_ns_add_to_ctrl_list(struct nvme_ns *ns) 3704 { 3705 struct nvme_ns *tmp; 3706 3707 list_for_each_entry_reverse(tmp, &ns->ctrl->namespaces, list) { 3708 if (tmp->head->ns_id < ns->head->ns_id) { 3709 list_add(&ns->list, &tmp->list); 3710 return; 3711 } 3712 } 3713 list_add(&ns->list, &ns->ctrl->namespaces); 3714 } 3715 3716 static void nvme_alloc_ns(struct nvme_ctrl *ctrl, struct nvme_ns_info *info) 3717 { 3718 struct nvme_ns *ns; 3719 struct gendisk *disk; 3720 int node = ctrl->numa_node; 3721 3722 ns = kzalloc_node(sizeof(*ns), GFP_KERNEL, node); 3723 if (!ns) 3724 return; 3725 3726 disk = blk_mq_alloc_disk(ctrl->tagset, NULL, ns); 3727 if (IS_ERR(disk)) 3728 goto out_free_ns; 3729 disk->fops = &nvme_bdev_ops; 3730 disk->private_data = ns; 3731 3732 ns->disk = disk; 3733 ns->queue = disk->queue; 3734 3735 if (ctrl->opts && ctrl->opts->data_digest) 3736 blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, ns->queue); 3737 3738 blk_queue_flag_set(QUEUE_FLAG_NONROT, ns->queue); 3739 if (ctrl->ops->supports_pci_p2pdma && 3740 ctrl->ops->supports_pci_p2pdma(ctrl)) 3741 blk_queue_flag_set(QUEUE_FLAG_PCI_P2PDMA, ns->queue); 3742 3743 ns->ctrl = ctrl; 3744 kref_init(&ns->kref); 3745 3746 if (nvme_init_ns_head(ns, info)) 3747 goto out_cleanup_disk; 3748 3749 /* 3750 * If multipathing is enabled, the device name for all disks and not 3751 * just those that represent shared namespaces needs to be based on the 3752 * subsystem instance. Using the controller instance for private 3753 * namespaces could lead to naming collisions between shared and private 3754 * namespaces if they don't use a common numbering scheme. 3755 * 3756 * If multipathing is not enabled, disk names must use the controller 3757 * instance as shared namespaces will show up as multiple block 3758 * devices. 3759 */ 3760 if (nvme_ns_head_multipath(ns->head)) { 3761 sprintf(disk->disk_name, "nvme%dc%dn%d", ctrl->subsys->instance, 3762 ctrl->instance, ns->head->instance); 3763 disk->flags |= GENHD_FL_HIDDEN; 3764 } else if (multipath) { 3765 sprintf(disk->disk_name, "nvme%dn%d", ctrl->subsys->instance, 3766 ns->head->instance); 3767 } else { 3768 sprintf(disk->disk_name, "nvme%dn%d", ctrl->instance, 3769 ns->head->instance); 3770 } 3771 3772 if (nvme_update_ns_info(ns, info)) 3773 goto out_unlink_ns; 3774 3775 down_write(&ctrl->namespaces_rwsem); 3776 /* 3777 * Ensure that no namespaces are added to the ctrl list after the queues 3778 * are frozen, thereby avoiding a deadlock between scan and reset. 3779 */ 3780 if (test_bit(NVME_CTRL_FROZEN, &ctrl->flags)) { 3781 up_write(&ctrl->namespaces_rwsem); 3782 goto out_unlink_ns; 3783 } 3784 nvme_ns_add_to_ctrl_list(ns); 3785 up_write(&ctrl->namespaces_rwsem); 3786 nvme_get_ctrl(ctrl); 3787 3788 if (device_add_disk(ctrl->device, ns->disk, nvme_ns_attr_groups)) 3789 goto out_cleanup_ns_from_list; 3790 3791 if (!nvme_ns_head_multipath(ns->head)) 3792 nvme_add_ns_cdev(ns); 3793 3794 nvme_mpath_add_disk(ns, info->anagrpid); 3795 nvme_fault_inject_init(&ns->fault_inject, ns->disk->disk_name); 3796 3797 /* 3798 * Set ns->disk->device->driver_data to ns so we can access 3799 * ns->head->passthru_err_log_enabled in 3800 * nvme_io_passthru_err_log_enabled_[store | show](). 3801 */ 3802 dev_set_drvdata(disk_to_dev(ns->disk), ns); 3803 3804 return; 3805 3806 out_cleanup_ns_from_list: 3807 nvme_put_ctrl(ctrl); 3808 down_write(&ctrl->namespaces_rwsem); 3809 list_del_init(&ns->list); 3810 up_write(&ctrl->namespaces_rwsem); 3811 out_unlink_ns: 3812 mutex_lock(&ctrl->subsys->lock); 3813 list_del_rcu(&ns->siblings); 3814 if (list_empty(&ns->head->list)) 3815 list_del_init(&ns->head->entry); 3816 mutex_unlock(&ctrl->subsys->lock); 3817 nvme_put_ns_head(ns->head); 3818 out_cleanup_disk: 3819 put_disk(disk); 3820 out_free_ns: 3821 kfree(ns); 3822 } 3823 3824 static void nvme_ns_remove(struct nvme_ns *ns) 3825 { 3826 bool last_path = false; 3827 3828 if (test_and_set_bit(NVME_NS_REMOVING, &ns->flags)) 3829 return; 3830 3831 clear_bit(NVME_NS_READY, &ns->flags); 3832 set_capacity(ns->disk, 0); 3833 nvme_fault_inject_fini(&ns->fault_inject); 3834 3835 /* 3836 * Ensure that !NVME_NS_READY is seen by other threads to prevent 3837 * this ns going back into current_path. 3838 */ 3839 synchronize_srcu(&ns->head->srcu); 3840 3841 /* wait for concurrent submissions */ 3842 if (nvme_mpath_clear_current_path(ns)) 3843 synchronize_srcu(&ns->head->srcu); 3844 3845 mutex_lock(&ns->ctrl->subsys->lock); 3846 list_del_rcu(&ns->siblings); 3847 if (list_empty(&ns->head->list)) { 3848 list_del_init(&ns->head->entry); 3849 last_path = true; 3850 } 3851 mutex_unlock(&ns->ctrl->subsys->lock); 3852 3853 /* guarantee not available in head->list */ 3854 synchronize_srcu(&ns->head->srcu); 3855 3856 if (!nvme_ns_head_multipath(ns->head)) 3857 nvme_cdev_del(&ns->cdev, &ns->cdev_device); 3858 del_gendisk(ns->disk); 3859 3860 down_write(&ns->ctrl->namespaces_rwsem); 3861 list_del_init(&ns->list); 3862 up_write(&ns->ctrl->namespaces_rwsem); 3863 3864 if (last_path) 3865 nvme_mpath_shutdown_disk(ns->head); 3866 nvme_put_ns(ns); 3867 } 3868 3869 static void nvme_ns_remove_by_nsid(struct nvme_ctrl *ctrl, u32 nsid) 3870 { 3871 struct nvme_ns *ns = nvme_find_get_ns(ctrl, nsid); 3872 3873 if (ns) { 3874 nvme_ns_remove(ns); 3875 nvme_put_ns(ns); 3876 } 3877 } 3878 3879 static void nvme_validate_ns(struct nvme_ns *ns, struct nvme_ns_info *info) 3880 { 3881 int ret = NVME_SC_INVALID_NS | NVME_SC_DNR; 3882 3883 if (!nvme_ns_ids_equal(&ns->head->ids, &info->ids)) { 3884 dev_err(ns->ctrl->device, 3885 "identifiers changed for nsid %d\n", ns->head->ns_id); 3886 goto out; 3887 } 3888 3889 ret = nvme_update_ns_info(ns, info); 3890 out: 3891 /* 3892 * Only remove the namespace if we got a fatal error back from the 3893 * device, otherwise ignore the error and just move on. 3894 * 3895 * TODO: we should probably schedule a delayed retry here. 3896 */ 3897 if (ret > 0 && (ret & NVME_SC_DNR)) 3898 nvme_ns_remove(ns); 3899 } 3900 3901 static void nvme_scan_ns(struct nvme_ctrl *ctrl, unsigned nsid) 3902 { 3903 struct nvme_ns_info info = { .nsid = nsid }; 3904 struct nvme_ns *ns; 3905 int ret; 3906 3907 if (nvme_identify_ns_descs(ctrl, &info)) 3908 return; 3909 3910 if (info.ids.csi != NVME_CSI_NVM && !nvme_multi_css(ctrl)) { 3911 dev_warn(ctrl->device, 3912 "command set not reported for nsid: %d\n", nsid); 3913 return; 3914 } 3915 3916 /* 3917 * If available try to use the Command Set Idependent Identify Namespace 3918 * data structure to find all the generic information that is needed to 3919 * set up a namespace. If not fall back to the legacy version. 3920 */ 3921 if ((ctrl->cap & NVME_CAP_CRMS_CRIMS) || 3922 (info.ids.csi != NVME_CSI_NVM && info.ids.csi != NVME_CSI_ZNS)) 3923 ret = nvme_ns_info_from_id_cs_indep(ctrl, &info); 3924 else 3925 ret = nvme_ns_info_from_identify(ctrl, &info); 3926 3927 if (info.is_removed) 3928 nvme_ns_remove_by_nsid(ctrl, nsid); 3929 3930 /* 3931 * Ignore the namespace if it is not ready. We will get an AEN once it 3932 * becomes ready and restart the scan. 3933 */ 3934 if (ret || !info.is_ready) 3935 return; 3936 3937 ns = nvme_find_get_ns(ctrl, nsid); 3938 if (ns) { 3939 nvme_validate_ns(ns, &info); 3940 nvme_put_ns(ns); 3941 } else { 3942 nvme_alloc_ns(ctrl, &info); 3943 } 3944 } 3945 3946 static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl, 3947 unsigned nsid) 3948 { 3949 struct nvme_ns *ns, *next; 3950 LIST_HEAD(rm_list); 3951 3952 down_write(&ctrl->namespaces_rwsem); 3953 list_for_each_entry_safe(ns, next, &ctrl->namespaces, list) { 3954 if (ns->head->ns_id > nsid) 3955 list_move_tail(&ns->list, &rm_list); 3956 } 3957 up_write(&ctrl->namespaces_rwsem); 3958 3959 list_for_each_entry_safe(ns, next, &rm_list, list) 3960 nvme_ns_remove(ns); 3961 3962 } 3963 3964 static int nvme_scan_ns_list(struct nvme_ctrl *ctrl) 3965 { 3966 const int nr_entries = NVME_IDENTIFY_DATA_SIZE / sizeof(__le32); 3967 __le32 *ns_list; 3968 u32 prev = 0; 3969 int ret = 0, i; 3970 3971 ns_list = kzalloc(NVME_IDENTIFY_DATA_SIZE, GFP_KERNEL); 3972 if (!ns_list) 3973 return -ENOMEM; 3974 3975 for (;;) { 3976 struct nvme_command cmd = { 3977 .identify.opcode = nvme_admin_identify, 3978 .identify.cns = NVME_ID_CNS_NS_ACTIVE_LIST, 3979 .identify.nsid = cpu_to_le32(prev), 3980 }; 3981 3982 ret = nvme_submit_sync_cmd(ctrl->admin_q, &cmd, ns_list, 3983 NVME_IDENTIFY_DATA_SIZE); 3984 if (ret) { 3985 dev_warn(ctrl->device, 3986 "Identify NS List failed (status=0x%x)\n", ret); 3987 goto free; 3988 } 3989 3990 for (i = 0; i < nr_entries; i++) { 3991 u32 nsid = le32_to_cpu(ns_list[i]); 3992 3993 if (!nsid) /* end of the list? */ 3994 goto out; 3995 nvme_scan_ns(ctrl, nsid); 3996 while (++prev < nsid) 3997 nvme_ns_remove_by_nsid(ctrl, prev); 3998 } 3999 } 4000 out: 4001 nvme_remove_invalid_namespaces(ctrl, prev); 4002 free: 4003 kfree(ns_list); 4004 return ret; 4005 } 4006 4007 static void nvme_scan_ns_sequential(struct nvme_ctrl *ctrl) 4008 { 4009 struct nvme_id_ctrl *id; 4010 u32 nn, i; 4011 4012 if (nvme_identify_ctrl(ctrl, &id)) 4013 return; 4014 nn = le32_to_cpu(id->nn); 4015 kfree(id); 4016 4017 for (i = 1; i <= nn; i++) 4018 nvme_scan_ns(ctrl, i); 4019 4020 nvme_remove_invalid_namespaces(ctrl, nn); 4021 } 4022 4023 static void nvme_clear_changed_ns_log(struct nvme_ctrl *ctrl) 4024 { 4025 size_t log_size = NVME_MAX_CHANGED_NAMESPACES * sizeof(__le32); 4026 __le32 *log; 4027 int error; 4028 4029 log = kzalloc(log_size, GFP_KERNEL); 4030 if (!log) 4031 return; 4032 4033 /* 4034 * We need to read the log to clear the AEN, but we don't want to rely 4035 * on it for the changed namespace information as userspace could have 4036 * raced with us in reading the log page, which could cause us to miss 4037 * updates. 4038 */ 4039 error = nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_CHANGED_NS, 0, 4040 NVME_CSI_NVM, log, log_size, 0); 4041 if (error) 4042 dev_warn(ctrl->device, 4043 "reading changed ns log failed: %d\n", error); 4044 4045 kfree(log); 4046 } 4047 4048 static void nvme_scan_work(struct work_struct *work) 4049 { 4050 struct nvme_ctrl *ctrl = 4051 container_of(work, struct nvme_ctrl, scan_work); 4052 int ret; 4053 4054 /* No tagset on a live ctrl means IO queues could not created */ 4055 if (nvme_ctrl_state(ctrl) != NVME_CTRL_LIVE || !ctrl->tagset) 4056 return; 4057 4058 /* 4059 * Identify controller limits can change at controller reset due to 4060 * new firmware download, even though it is not common we cannot ignore 4061 * such scenario. Controller's non-mdts limits are reported in the unit 4062 * of logical blocks that is dependent on the format of attached 4063 * namespace. Hence re-read the limits at the time of ns allocation. 4064 */ 4065 ret = nvme_init_non_mdts_limits(ctrl); 4066 if (ret < 0) { 4067 dev_warn(ctrl->device, 4068 "reading non-mdts-limits failed: %d\n", ret); 4069 return; 4070 } 4071 4072 if (test_and_clear_bit(NVME_AER_NOTICE_NS_CHANGED, &ctrl->events)) { 4073 dev_info(ctrl->device, "rescanning namespaces.\n"); 4074 nvme_clear_changed_ns_log(ctrl); 4075 } 4076 4077 mutex_lock(&ctrl->scan_lock); 4078 if (nvme_ctrl_limited_cns(ctrl)) { 4079 nvme_scan_ns_sequential(ctrl); 4080 } else { 4081 /* 4082 * Fall back to sequential scan if DNR is set to handle broken 4083 * devices which should support Identify NS List (as per the VS 4084 * they report) but don't actually support it. 4085 */ 4086 ret = nvme_scan_ns_list(ctrl); 4087 if (ret > 0 && ret & NVME_SC_DNR) 4088 nvme_scan_ns_sequential(ctrl); 4089 } 4090 mutex_unlock(&ctrl->scan_lock); 4091 } 4092 4093 /* 4094 * This function iterates the namespace list unlocked to allow recovery from 4095 * controller failure. It is up to the caller to ensure the namespace list is 4096 * not modified by scan work while this function is executing. 4097 */ 4098 void nvme_remove_namespaces(struct nvme_ctrl *ctrl) 4099 { 4100 struct nvme_ns *ns, *next; 4101 LIST_HEAD(ns_list); 4102 4103 /* 4104 * make sure to requeue I/O to all namespaces as these 4105 * might result from the scan itself and must complete 4106 * for the scan_work to make progress 4107 */ 4108 nvme_mpath_clear_ctrl_paths(ctrl); 4109 4110 /* 4111 * Unquiesce io queues so any pending IO won't hang, especially 4112 * those submitted from scan work 4113 */ 4114 nvme_unquiesce_io_queues(ctrl); 4115 4116 /* prevent racing with ns scanning */ 4117 flush_work(&ctrl->scan_work); 4118 4119 /* 4120 * The dead states indicates the controller was not gracefully 4121 * disconnected. In that case, we won't be able to flush any data while 4122 * removing the namespaces' disks; fail all the queues now to avoid 4123 * potentially having to clean up the failed sync later. 4124 */ 4125 if (nvme_ctrl_state(ctrl) == NVME_CTRL_DEAD) 4126 nvme_mark_namespaces_dead(ctrl); 4127 4128 /* this is a no-op when called from the controller reset handler */ 4129 nvme_change_ctrl_state(ctrl, NVME_CTRL_DELETING_NOIO); 4130 4131 down_write(&ctrl->namespaces_rwsem); 4132 list_splice_init(&ctrl->namespaces, &ns_list); 4133 up_write(&ctrl->namespaces_rwsem); 4134 4135 list_for_each_entry_safe(ns, next, &ns_list, list) 4136 nvme_ns_remove(ns); 4137 } 4138 EXPORT_SYMBOL_GPL(nvme_remove_namespaces); 4139 4140 static int nvme_class_uevent(const struct device *dev, struct kobj_uevent_env *env) 4141 { 4142 const struct nvme_ctrl *ctrl = 4143 container_of(dev, struct nvme_ctrl, ctrl_device); 4144 struct nvmf_ctrl_options *opts = ctrl->opts; 4145 int ret; 4146 4147 ret = add_uevent_var(env, "NVME_TRTYPE=%s", ctrl->ops->name); 4148 if (ret) 4149 return ret; 4150 4151 if (opts) { 4152 ret = add_uevent_var(env, "NVME_TRADDR=%s", opts->traddr); 4153 if (ret) 4154 return ret; 4155 4156 ret = add_uevent_var(env, "NVME_TRSVCID=%s", 4157 opts->trsvcid ?: "none"); 4158 if (ret) 4159 return ret; 4160 4161 ret = add_uevent_var(env, "NVME_HOST_TRADDR=%s", 4162 opts->host_traddr ?: "none"); 4163 if (ret) 4164 return ret; 4165 4166 ret = add_uevent_var(env, "NVME_HOST_IFACE=%s", 4167 opts->host_iface ?: "none"); 4168 } 4169 return ret; 4170 } 4171 4172 static void nvme_change_uevent(struct nvme_ctrl *ctrl, char *envdata) 4173 { 4174 char *envp[2] = { envdata, NULL }; 4175 4176 kobject_uevent_env(&ctrl->device->kobj, KOBJ_CHANGE, envp); 4177 } 4178 4179 static void nvme_aen_uevent(struct nvme_ctrl *ctrl) 4180 { 4181 char *envp[2] = { NULL, NULL }; 4182 u32 aen_result = ctrl->aen_result; 4183 4184 ctrl->aen_result = 0; 4185 if (!aen_result) 4186 return; 4187 4188 envp[0] = kasprintf(GFP_KERNEL, "NVME_AEN=%#08x", aen_result); 4189 if (!envp[0]) 4190 return; 4191 kobject_uevent_env(&ctrl->device->kobj, KOBJ_CHANGE, envp); 4192 kfree(envp[0]); 4193 } 4194 4195 static void nvme_async_event_work(struct work_struct *work) 4196 { 4197 struct nvme_ctrl *ctrl = 4198 container_of(work, struct nvme_ctrl, async_event_work); 4199 4200 nvme_aen_uevent(ctrl); 4201 4202 /* 4203 * The transport drivers must guarantee AER submission here is safe by 4204 * flushing ctrl async_event_work after changing the controller state 4205 * from LIVE and before freeing the admin queue. 4206 */ 4207 if (nvme_ctrl_state(ctrl) == NVME_CTRL_LIVE) 4208 ctrl->ops->submit_async_event(ctrl); 4209 } 4210 4211 static bool nvme_ctrl_pp_status(struct nvme_ctrl *ctrl) 4212 { 4213 4214 u32 csts; 4215 4216 if (ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &csts)) 4217 return false; 4218 4219 if (csts == ~0) 4220 return false; 4221 4222 return ((ctrl->ctrl_config & NVME_CC_ENABLE) && (csts & NVME_CSTS_PP)); 4223 } 4224 4225 static void nvme_get_fw_slot_info(struct nvme_ctrl *ctrl) 4226 { 4227 struct nvme_fw_slot_info_log *log; 4228 u8 next_fw_slot, cur_fw_slot; 4229 4230 log = kmalloc(sizeof(*log), GFP_KERNEL); 4231 if (!log) 4232 return; 4233 4234 if (nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_FW_SLOT, 0, NVME_CSI_NVM, 4235 log, sizeof(*log), 0)) { 4236 dev_warn(ctrl->device, "Get FW SLOT INFO log error\n"); 4237 goto out_free_log; 4238 } 4239 4240 cur_fw_slot = log->afi & 0x7; 4241 next_fw_slot = (log->afi & 0x70) >> 4; 4242 if (!cur_fw_slot || (next_fw_slot && (cur_fw_slot != next_fw_slot))) { 4243 dev_info(ctrl->device, 4244 "Firmware is activated after next Controller Level Reset\n"); 4245 goto out_free_log; 4246 } 4247 4248 memcpy(ctrl->subsys->firmware_rev, &log->frs[cur_fw_slot - 1], 4249 sizeof(ctrl->subsys->firmware_rev)); 4250 4251 out_free_log: 4252 kfree(log); 4253 } 4254 4255 static void nvme_fw_act_work(struct work_struct *work) 4256 { 4257 struct nvme_ctrl *ctrl = container_of(work, 4258 struct nvme_ctrl, fw_act_work); 4259 unsigned long fw_act_timeout; 4260 4261 nvme_auth_stop(ctrl); 4262 4263 if (ctrl->mtfa) 4264 fw_act_timeout = jiffies + 4265 msecs_to_jiffies(ctrl->mtfa * 100); 4266 else 4267 fw_act_timeout = jiffies + 4268 msecs_to_jiffies(admin_timeout * 1000); 4269 4270 nvme_quiesce_io_queues(ctrl); 4271 while (nvme_ctrl_pp_status(ctrl)) { 4272 if (time_after(jiffies, fw_act_timeout)) { 4273 dev_warn(ctrl->device, 4274 "Fw activation timeout, reset controller\n"); 4275 nvme_try_sched_reset(ctrl); 4276 return; 4277 } 4278 msleep(100); 4279 } 4280 4281 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_LIVE)) 4282 return; 4283 4284 nvme_unquiesce_io_queues(ctrl); 4285 /* read FW slot information to clear the AER */ 4286 nvme_get_fw_slot_info(ctrl); 4287 4288 queue_work(nvme_wq, &ctrl->async_event_work); 4289 } 4290 4291 static u32 nvme_aer_type(u32 result) 4292 { 4293 return result & 0x7; 4294 } 4295 4296 static u32 nvme_aer_subtype(u32 result) 4297 { 4298 return (result & 0xff00) >> 8; 4299 } 4300 4301 static bool nvme_handle_aen_notice(struct nvme_ctrl *ctrl, u32 result) 4302 { 4303 u32 aer_notice_type = nvme_aer_subtype(result); 4304 bool requeue = true; 4305 4306 switch (aer_notice_type) { 4307 case NVME_AER_NOTICE_NS_CHANGED: 4308 set_bit(NVME_AER_NOTICE_NS_CHANGED, &ctrl->events); 4309 nvme_queue_scan(ctrl); 4310 break; 4311 case NVME_AER_NOTICE_FW_ACT_STARTING: 4312 /* 4313 * We are (ab)using the RESETTING state to prevent subsequent 4314 * recovery actions from interfering with the controller's 4315 * firmware activation. 4316 */ 4317 if (nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING)) { 4318 requeue = false; 4319 queue_work(nvme_wq, &ctrl->fw_act_work); 4320 } 4321 break; 4322 #ifdef CONFIG_NVME_MULTIPATH 4323 case NVME_AER_NOTICE_ANA: 4324 if (!ctrl->ana_log_buf) 4325 break; 4326 queue_work(nvme_wq, &ctrl->ana_work); 4327 break; 4328 #endif 4329 case NVME_AER_NOTICE_DISC_CHANGED: 4330 ctrl->aen_result = result; 4331 break; 4332 default: 4333 dev_warn(ctrl->device, "async event result %08x\n", result); 4334 } 4335 return requeue; 4336 } 4337 4338 static void nvme_handle_aer_persistent_error(struct nvme_ctrl *ctrl) 4339 { 4340 dev_warn(ctrl->device, "resetting controller due to AER\n"); 4341 nvme_reset_ctrl(ctrl); 4342 } 4343 4344 void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status, 4345 volatile union nvme_result *res) 4346 { 4347 u32 result = le32_to_cpu(res->u32); 4348 u32 aer_type = nvme_aer_type(result); 4349 u32 aer_subtype = nvme_aer_subtype(result); 4350 bool requeue = true; 4351 4352 if (le16_to_cpu(status) >> 1 != NVME_SC_SUCCESS) 4353 return; 4354 4355 trace_nvme_async_event(ctrl, result); 4356 switch (aer_type) { 4357 case NVME_AER_NOTICE: 4358 requeue = nvme_handle_aen_notice(ctrl, result); 4359 break; 4360 case NVME_AER_ERROR: 4361 /* 4362 * For a persistent internal error, don't run async_event_work 4363 * to submit a new AER. The controller reset will do it. 4364 */ 4365 if (aer_subtype == NVME_AER_ERROR_PERSIST_INT_ERR) { 4366 nvme_handle_aer_persistent_error(ctrl); 4367 return; 4368 } 4369 fallthrough; 4370 case NVME_AER_SMART: 4371 case NVME_AER_CSS: 4372 case NVME_AER_VS: 4373 ctrl->aen_result = result; 4374 break; 4375 default: 4376 break; 4377 } 4378 4379 if (requeue) 4380 queue_work(nvme_wq, &ctrl->async_event_work); 4381 } 4382 EXPORT_SYMBOL_GPL(nvme_complete_async_event); 4383 4384 int nvme_alloc_admin_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set, 4385 const struct blk_mq_ops *ops, unsigned int cmd_size) 4386 { 4387 struct queue_limits lim = {}; 4388 int ret; 4389 4390 memset(set, 0, sizeof(*set)); 4391 set->ops = ops; 4392 set->queue_depth = NVME_AQ_MQ_TAG_DEPTH; 4393 if (ctrl->ops->flags & NVME_F_FABRICS) 4394 set->reserved_tags = NVMF_RESERVED_TAGS; 4395 set->numa_node = ctrl->numa_node; 4396 set->flags = BLK_MQ_F_NO_SCHED; 4397 if (ctrl->ops->flags & NVME_F_BLOCKING) 4398 set->flags |= BLK_MQ_F_BLOCKING; 4399 set->cmd_size = cmd_size; 4400 set->driver_data = ctrl; 4401 set->nr_hw_queues = 1; 4402 set->timeout = NVME_ADMIN_TIMEOUT; 4403 ret = blk_mq_alloc_tag_set(set); 4404 if (ret) 4405 return ret; 4406 4407 ctrl->admin_q = blk_mq_alloc_queue(set, &lim, NULL); 4408 if (IS_ERR(ctrl->admin_q)) { 4409 ret = PTR_ERR(ctrl->admin_q); 4410 goto out_free_tagset; 4411 } 4412 4413 if (ctrl->ops->flags & NVME_F_FABRICS) { 4414 ctrl->fabrics_q = blk_mq_alloc_queue(set, NULL, NULL); 4415 if (IS_ERR(ctrl->fabrics_q)) { 4416 ret = PTR_ERR(ctrl->fabrics_q); 4417 goto out_cleanup_admin_q; 4418 } 4419 } 4420 4421 ctrl->admin_tagset = set; 4422 return 0; 4423 4424 out_cleanup_admin_q: 4425 blk_mq_destroy_queue(ctrl->admin_q); 4426 blk_put_queue(ctrl->admin_q); 4427 out_free_tagset: 4428 blk_mq_free_tag_set(set); 4429 ctrl->admin_q = NULL; 4430 ctrl->fabrics_q = NULL; 4431 return ret; 4432 } 4433 EXPORT_SYMBOL_GPL(nvme_alloc_admin_tag_set); 4434 4435 void nvme_remove_admin_tag_set(struct nvme_ctrl *ctrl) 4436 { 4437 blk_mq_destroy_queue(ctrl->admin_q); 4438 blk_put_queue(ctrl->admin_q); 4439 if (ctrl->ops->flags & NVME_F_FABRICS) { 4440 blk_mq_destroy_queue(ctrl->fabrics_q); 4441 blk_put_queue(ctrl->fabrics_q); 4442 } 4443 blk_mq_free_tag_set(ctrl->admin_tagset); 4444 } 4445 EXPORT_SYMBOL_GPL(nvme_remove_admin_tag_set); 4446 4447 int nvme_alloc_io_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set, 4448 const struct blk_mq_ops *ops, unsigned int nr_maps, 4449 unsigned int cmd_size) 4450 { 4451 int ret; 4452 4453 memset(set, 0, sizeof(*set)); 4454 set->ops = ops; 4455 set->queue_depth = min_t(unsigned, ctrl->sqsize, BLK_MQ_MAX_DEPTH - 1); 4456 /* 4457 * Some Apple controllers requires tags to be unique across admin and 4458 * the (only) I/O queue, so reserve the first 32 tags of the I/O queue. 4459 */ 4460 if (ctrl->quirks & NVME_QUIRK_SHARED_TAGS) 4461 set->reserved_tags = NVME_AQ_DEPTH; 4462 else if (ctrl->ops->flags & NVME_F_FABRICS) 4463 set->reserved_tags = NVMF_RESERVED_TAGS; 4464 set->numa_node = ctrl->numa_node; 4465 set->flags = BLK_MQ_F_SHOULD_MERGE; 4466 if (ctrl->ops->flags & NVME_F_BLOCKING) 4467 set->flags |= BLK_MQ_F_BLOCKING; 4468 set->cmd_size = cmd_size, 4469 set->driver_data = ctrl; 4470 set->nr_hw_queues = ctrl->queue_count - 1; 4471 set->timeout = NVME_IO_TIMEOUT; 4472 set->nr_maps = nr_maps; 4473 ret = blk_mq_alloc_tag_set(set); 4474 if (ret) 4475 return ret; 4476 4477 if (ctrl->ops->flags & NVME_F_FABRICS) { 4478 ctrl->connect_q = blk_mq_alloc_queue(set, NULL, NULL); 4479 if (IS_ERR(ctrl->connect_q)) { 4480 ret = PTR_ERR(ctrl->connect_q); 4481 goto out_free_tag_set; 4482 } 4483 blk_queue_flag_set(QUEUE_FLAG_SKIP_TAGSET_QUIESCE, 4484 ctrl->connect_q); 4485 } 4486 4487 ctrl->tagset = set; 4488 return 0; 4489 4490 out_free_tag_set: 4491 blk_mq_free_tag_set(set); 4492 ctrl->connect_q = NULL; 4493 return ret; 4494 } 4495 EXPORT_SYMBOL_GPL(nvme_alloc_io_tag_set); 4496 4497 void nvme_remove_io_tag_set(struct nvme_ctrl *ctrl) 4498 { 4499 if (ctrl->ops->flags & NVME_F_FABRICS) { 4500 blk_mq_destroy_queue(ctrl->connect_q); 4501 blk_put_queue(ctrl->connect_q); 4502 } 4503 blk_mq_free_tag_set(ctrl->tagset); 4504 } 4505 EXPORT_SYMBOL_GPL(nvme_remove_io_tag_set); 4506 4507 void nvme_stop_ctrl(struct nvme_ctrl *ctrl) 4508 { 4509 nvme_mpath_stop(ctrl); 4510 nvme_auth_stop(ctrl); 4511 nvme_stop_keep_alive(ctrl); 4512 nvme_stop_failfast_work(ctrl); 4513 flush_work(&ctrl->async_event_work); 4514 cancel_work_sync(&ctrl->fw_act_work); 4515 if (ctrl->ops->stop_ctrl) 4516 ctrl->ops->stop_ctrl(ctrl); 4517 } 4518 EXPORT_SYMBOL_GPL(nvme_stop_ctrl); 4519 4520 void nvme_start_ctrl(struct nvme_ctrl *ctrl) 4521 { 4522 nvme_enable_aen(ctrl); 4523 4524 /* 4525 * persistent discovery controllers need to send indication to userspace 4526 * to re-read the discovery log page to learn about possible changes 4527 * that were missed. We identify persistent discovery controllers by 4528 * checking that they started once before, hence are reconnecting back. 4529 */ 4530 if (test_bit(NVME_CTRL_STARTED_ONCE, &ctrl->flags) && 4531 nvme_discovery_ctrl(ctrl)) 4532 nvme_change_uevent(ctrl, "NVME_EVENT=rediscover"); 4533 4534 if (ctrl->queue_count > 1) { 4535 nvme_queue_scan(ctrl); 4536 nvme_unquiesce_io_queues(ctrl); 4537 nvme_mpath_update(ctrl); 4538 } 4539 4540 nvme_change_uevent(ctrl, "NVME_EVENT=connected"); 4541 set_bit(NVME_CTRL_STARTED_ONCE, &ctrl->flags); 4542 } 4543 EXPORT_SYMBOL_GPL(nvme_start_ctrl); 4544 4545 void nvme_uninit_ctrl(struct nvme_ctrl *ctrl) 4546 { 4547 nvme_hwmon_exit(ctrl); 4548 nvme_fault_inject_fini(&ctrl->fault_inject); 4549 dev_pm_qos_hide_latency_tolerance(ctrl->device); 4550 cdev_device_del(&ctrl->cdev, ctrl->device); 4551 nvme_put_ctrl(ctrl); 4552 } 4553 EXPORT_SYMBOL_GPL(nvme_uninit_ctrl); 4554 4555 static void nvme_free_cels(struct nvme_ctrl *ctrl) 4556 { 4557 struct nvme_effects_log *cel; 4558 unsigned long i; 4559 4560 xa_for_each(&ctrl->cels, i, cel) { 4561 xa_erase(&ctrl->cels, i); 4562 kfree(cel); 4563 } 4564 4565 xa_destroy(&ctrl->cels); 4566 } 4567 4568 static void nvme_free_ctrl(struct device *dev) 4569 { 4570 struct nvme_ctrl *ctrl = 4571 container_of(dev, struct nvme_ctrl, ctrl_device); 4572 struct nvme_subsystem *subsys = ctrl->subsys; 4573 4574 if (!subsys || ctrl->instance != subsys->instance) 4575 ida_free(&nvme_instance_ida, ctrl->instance); 4576 key_put(ctrl->tls_key); 4577 nvme_free_cels(ctrl); 4578 nvme_mpath_uninit(ctrl); 4579 nvme_auth_stop(ctrl); 4580 nvme_auth_free(ctrl); 4581 __free_page(ctrl->discard_page); 4582 free_opal_dev(ctrl->opal_dev); 4583 4584 if (subsys) { 4585 mutex_lock(&nvme_subsystems_lock); 4586 list_del(&ctrl->subsys_entry); 4587 sysfs_remove_link(&subsys->dev.kobj, dev_name(ctrl->device)); 4588 mutex_unlock(&nvme_subsystems_lock); 4589 } 4590 4591 ctrl->ops->free_ctrl(ctrl); 4592 4593 if (subsys) 4594 nvme_put_subsystem(subsys); 4595 } 4596 4597 /* 4598 * Initialize a NVMe controller structures. This needs to be called during 4599 * earliest initialization so that we have the initialized structured around 4600 * during probing. 4601 */ 4602 int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev, 4603 const struct nvme_ctrl_ops *ops, unsigned long quirks) 4604 { 4605 int ret; 4606 4607 WRITE_ONCE(ctrl->state, NVME_CTRL_NEW); 4608 ctrl->passthru_err_log_enabled = false; 4609 clear_bit(NVME_CTRL_FAILFAST_EXPIRED, &ctrl->flags); 4610 spin_lock_init(&ctrl->lock); 4611 mutex_init(&ctrl->scan_lock); 4612 INIT_LIST_HEAD(&ctrl->namespaces); 4613 xa_init(&ctrl->cels); 4614 init_rwsem(&ctrl->namespaces_rwsem); 4615 ctrl->dev = dev; 4616 ctrl->ops = ops; 4617 ctrl->quirks = quirks; 4618 ctrl->numa_node = NUMA_NO_NODE; 4619 INIT_WORK(&ctrl->scan_work, nvme_scan_work); 4620 INIT_WORK(&ctrl->async_event_work, nvme_async_event_work); 4621 INIT_WORK(&ctrl->fw_act_work, nvme_fw_act_work); 4622 INIT_WORK(&ctrl->delete_work, nvme_delete_ctrl_work); 4623 init_waitqueue_head(&ctrl->state_wq); 4624 4625 INIT_DELAYED_WORK(&ctrl->ka_work, nvme_keep_alive_work); 4626 INIT_DELAYED_WORK(&ctrl->failfast_work, nvme_failfast_work); 4627 memset(&ctrl->ka_cmd, 0, sizeof(ctrl->ka_cmd)); 4628 ctrl->ka_cmd.common.opcode = nvme_admin_keep_alive; 4629 ctrl->ka_last_check_time = jiffies; 4630 4631 BUILD_BUG_ON(NVME_DSM_MAX_RANGES * sizeof(struct nvme_dsm_range) > 4632 PAGE_SIZE); 4633 ctrl->discard_page = alloc_page(GFP_KERNEL); 4634 if (!ctrl->discard_page) { 4635 ret = -ENOMEM; 4636 goto out; 4637 } 4638 4639 ret = ida_alloc(&nvme_instance_ida, GFP_KERNEL); 4640 if (ret < 0) 4641 goto out; 4642 ctrl->instance = ret; 4643 4644 device_initialize(&ctrl->ctrl_device); 4645 ctrl->device = &ctrl->ctrl_device; 4646 ctrl->device->devt = MKDEV(MAJOR(nvme_ctrl_base_chr_devt), 4647 ctrl->instance); 4648 ctrl->device->class = &nvme_class; 4649 ctrl->device->parent = ctrl->dev; 4650 if (ops->dev_attr_groups) 4651 ctrl->device->groups = ops->dev_attr_groups; 4652 else 4653 ctrl->device->groups = nvme_dev_attr_groups; 4654 ctrl->device->release = nvme_free_ctrl; 4655 dev_set_drvdata(ctrl->device, ctrl); 4656 ret = dev_set_name(ctrl->device, "nvme%d", ctrl->instance); 4657 if (ret) 4658 goto out_release_instance; 4659 4660 nvme_get_ctrl(ctrl); 4661 cdev_init(&ctrl->cdev, &nvme_dev_fops); 4662 ctrl->cdev.owner = ops->module; 4663 ret = cdev_device_add(&ctrl->cdev, ctrl->device); 4664 if (ret) 4665 goto out_free_name; 4666 4667 /* 4668 * Initialize latency tolerance controls. The sysfs files won't 4669 * be visible to userspace unless the device actually supports APST. 4670 */ 4671 ctrl->device->power.set_latency_tolerance = nvme_set_latency_tolerance; 4672 dev_pm_qos_update_user_latency_tolerance(ctrl->device, 4673 min(default_ps_max_latency_us, (unsigned long)S32_MAX)); 4674 4675 nvme_fault_inject_init(&ctrl->fault_inject, dev_name(ctrl->device)); 4676 nvme_mpath_init_ctrl(ctrl); 4677 ret = nvme_auth_init_ctrl(ctrl); 4678 if (ret) 4679 goto out_free_cdev; 4680 4681 return 0; 4682 out_free_cdev: 4683 nvme_fault_inject_fini(&ctrl->fault_inject); 4684 dev_pm_qos_hide_latency_tolerance(ctrl->device); 4685 cdev_device_del(&ctrl->cdev, ctrl->device); 4686 out_free_name: 4687 nvme_put_ctrl(ctrl); 4688 kfree_const(ctrl->device->kobj.name); 4689 out_release_instance: 4690 ida_free(&nvme_instance_ida, ctrl->instance); 4691 out: 4692 if (ctrl->discard_page) 4693 __free_page(ctrl->discard_page); 4694 return ret; 4695 } 4696 EXPORT_SYMBOL_GPL(nvme_init_ctrl); 4697 4698 /* let I/O to all namespaces fail in preparation for surprise removal */ 4699 void nvme_mark_namespaces_dead(struct nvme_ctrl *ctrl) 4700 { 4701 struct nvme_ns *ns; 4702 4703 down_read(&ctrl->namespaces_rwsem); 4704 list_for_each_entry(ns, &ctrl->namespaces, list) 4705 blk_mark_disk_dead(ns->disk); 4706 up_read(&ctrl->namespaces_rwsem); 4707 } 4708 EXPORT_SYMBOL_GPL(nvme_mark_namespaces_dead); 4709 4710 void nvme_unfreeze(struct nvme_ctrl *ctrl) 4711 { 4712 struct nvme_ns *ns; 4713 4714 down_read(&ctrl->namespaces_rwsem); 4715 list_for_each_entry(ns, &ctrl->namespaces, list) 4716 blk_mq_unfreeze_queue(ns->queue); 4717 up_read(&ctrl->namespaces_rwsem); 4718 clear_bit(NVME_CTRL_FROZEN, &ctrl->flags); 4719 } 4720 EXPORT_SYMBOL_GPL(nvme_unfreeze); 4721 4722 int nvme_wait_freeze_timeout(struct nvme_ctrl *ctrl, long timeout) 4723 { 4724 struct nvme_ns *ns; 4725 4726 down_read(&ctrl->namespaces_rwsem); 4727 list_for_each_entry(ns, &ctrl->namespaces, list) { 4728 timeout = blk_mq_freeze_queue_wait_timeout(ns->queue, timeout); 4729 if (timeout <= 0) 4730 break; 4731 } 4732 up_read(&ctrl->namespaces_rwsem); 4733 return timeout; 4734 } 4735 EXPORT_SYMBOL_GPL(nvme_wait_freeze_timeout); 4736 4737 void nvme_wait_freeze(struct nvme_ctrl *ctrl) 4738 { 4739 struct nvme_ns *ns; 4740 4741 down_read(&ctrl->namespaces_rwsem); 4742 list_for_each_entry(ns, &ctrl->namespaces, list) 4743 blk_mq_freeze_queue_wait(ns->queue); 4744 up_read(&ctrl->namespaces_rwsem); 4745 } 4746 EXPORT_SYMBOL_GPL(nvme_wait_freeze); 4747 4748 void nvme_start_freeze(struct nvme_ctrl *ctrl) 4749 { 4750 struct nvme_ns *ns; 4751 4752 set_bit(NVME_CTRL_FROZEN, &ctrl->flags); 4753 down_read(&ctrl->namespaces_rwsem); 4754 list_for_each_entry(ns, &ctrl->namespaces, list) 4755 blk_freeze_queue_start(ns->queue); 4756 up_read(&ctrl->namespaces_rwsem); 4757 } 4758 EXPORT_SYMBOL_GPL(nvme_start_freeze); 4759 4760 void nvme_quiesce_io_queues(struct nvme_ctrl *ctrl) 4761 { 4762 if (!ctrl->tagset) 4763 return; 4764 if (!test_and_set_bit(NVME_CTRL_STOPPED, &ctrl->flags)) 4765 blk_mq_quiesce_tagset(ctrl->tagset); 4766 else 4767 blk_mq_wait_quiesce_done(ctrl->tagset); 4768 } 4769 EXPORT_SYMBOL_GPL(nvme_quiesce_io_queues); 4770 4771 void nvme_unquiesce_io_queues(struct nvme_ctrl *ctrl) 4772 { 4773 if (!ctrl->tagset) 4774 return; 4775 if (test_and_clear_bit(NVME_CTRL_STOPPED, &ctrl->flags)) 4776 blk_mq_unquiesce_tagset(ctrl->tagset); 4777 } 4778 EXPORT_SYMBOL_GPL(nvme_unquiesce_io_queues); 4779 4780 void nvme_quiesce_admin_queue(struct nvme_ctrl *ctrl) 4781 { 4782 if (!test_and_set_bit(NVME_CTRL_ADMIN_Q_STOPPED, &ctrl->flags)) 4783 blk_mq_quiesce_queue(ctrl->admin_q); 4784 else 4785 blk_mq_wait_quiesce_done(ctrl->admin_q->tag_set); 4786 } 4787 EXPORT_SYMBOL_GPL(nvme_quiesce_admin_queue); 4788 4789 void nvme_unquiesce_admin_queue(struct nvme_ctrl *ctrl) 4790 { 4791 if (test_and_clear_bit(NVME_CTRL_ADMIN_Q_STOPPED, &ctrl->flags)) 4792 blk_mq_unquiesce_queue(ctrl->admin_q); 4793 } 4794 EXPORT_SYMBOL_GPL(nvme_unquiesce_admin_queue); 4795 4796 void nvme_sync_io_queues(struct nvme_ctrl *ctrl) 4797 { 4798 struct nvme_ns *ns; 4799 4800 down_read(&ctrl->namespaces_rwsem); 4801 list_for_each_entry(ns, &ctrl->namespaces, list) 4802 blk_sync_queue(ns->queue); 4803 up_read(&ctrl->namespaces_rwsem); 4804 } 4805 EXPORT_SYMBOL_GPL(nvme_sync_io_queues); 4806 4807 void nvme_sync_queues(struct nvme_ctrl *ctrl) 4808 { 4809 nvme_sync_io_queues(ctrl); 4810 if (ctrl->admin_q) 4811 blk_sync_queue(ctrl->admin_q); 4812 } 4813 EXPORT_SYMBOL_GPL(nvme_sync_queues); 4814 4815 struct nvme_ctrl *nvme_ctrl_from_file(struct file *file) 4816 { 4817 if (file->f_op != &nvme_dev_fops) 4818 return NULL; 4819 return file->private_data; 4820 } 4821 EXPORT_SYMBOL_NS_GPL(nvme_ctrl_from_file, NVME_TARGET_PASSTHRU); 4822 4823 /* 4824 * Check we didn't inadvertently grow the command structure sizes: 4825 */ 4826 static inline void _nvme_check_size(void) 4827 { 4828 BUILD_BUG_ON(sizeof(struct nvme_common_command) != 64); 4829 BUILD_BUG_ON(sizeof(struct nvme_rw_command) != 64); 4830 BUILD_BUG_ON(sizeof(struct nvme_identify) != 64); 4831 BUILD_BUG_ON(sizeof(struct nvme_features) != 64); 4832 BUILD_BUG_ON(sizeof(struct nvme_download_firmware) != 64); 4833 BUILD_BUG_ON(sizeof(struct nvme_format_cmd) != 64); 4834 BUILD_BUG_ON(sizeof(struct nvme_dsm_cmd) != 64); 4835 BUILD_BUG_ON(sizeof(struct nvme_write_zeroes_cmd) != 64); 4836 BUILD_BUG_ON(sizeof(struct nvme_abort_cmd) != 64); 4837 BUILD_BUG_ON(sizeof(struct nvme_get_log_page_command) != 64); 4838 BUILD_BUG_ON(sizeof(struct nvme_command) != 64); 4839 BUILD_BUG_ON(sizeof(struct nvme_id_ctrl) != NVME_IDENTIFY_DATA_SIZE); 4840 BUILD_BUG_ON(sizeof(struct nvme_id_ns) != NVME_IDENTIFY_DATA_SIZE); 4841 BUILD_BUG_ON(sizeof(struct nvme_id_ns_cs_indep) != 4842 NVME_IDENTIFY_DATA_SIZE); 4843 BUILD_BUG_ON(sizeof(struct nvme_id_ns_zns) != NVME_IDENTIFY_DATA_SIZE); 4844 BUILD_BUG_ON(sizeof(struct nvme_id_ns_nvm) != NVME_IDENTIFY_DATA_SIZE); 4845 BUILD_BUG_ON(sizeof(struct nvme_id_ctrl_zns) != NVME_IDENTIFY_DATA_SIZE); 4846 BUILD_BUG_ON(sizeof(struct nvme_id_ctrl_nvm) != NVME_IDENTIFY_DATA_SIZE); 4847 BUILD_BUG_ON(sizeof(struct nvme_lba_range_type) != 64); 4848 BUILD_BUG_ON(sizeof(struct nvme_smart_log) != 512); 4849 BUILD_BUG_ON(sizeof(struct nvme_dbbuf) != 64); 4850 BUILD_BUG_ON(sizeof(struct nvme_directive_cmd) != 64); 4851 BUILD_BUG_ON(sizeof(struct nvme_feat_host_behavior) != 512); 4852 } 4853 4854 4855 static int __init nvme_core_init(void) 4856 { 4857 int result = -ENOMEM; 4858 4859 _nvme_check_size(); 4860 4861 nvme_wq = alloc_workqueue("nvme-wq", 4862 WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0); 4863 if (!nvme_wq) 4864 goto out; 4865 4866 nvme_reset_wq = alloc_workqueue("nvme-reset-wq", 4867 WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0); 4868 if (!nvme_reset_wq) 4869 goto destroy_wq; 4870 4871 nvme_delete_wq = alloc_workqueue("nvme-delete-wq", 4872 WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0); 4873 if (!nvme_delete_wq) 4874 goto destroy_reset_wq; 4875 4876 result = alloc_chrdev_region(&nvme_ctrl_base_chr_devt, 0, 4877 NVME_MINORS, "nvme"); 4878 if (result < 0) 4879 goto destroy_delete_wq; 4880 4881 result = class_register(&nvme_class); 4882 if (result) 4883 goto unregister_chrdev; 4884 4885 result = class_register(&nvme_subsys_class); 4886 if (result) 4887 goto destroy_class; 4888 4889 result = alloc_chrdev_region(&nvme_ns_chr_devt, 0, NVME_MINORS, 4890 "nvme-generic"); 4891 if (result < 0) 4892 goto destroy_subsys_class; 4893 4894 result = class_register(&nvme_ns_chr_class); 4895 if (result) 4896 goto unregister_generic_ns; 4897 4898 result = nvme_init_auth(); 4899 if (result) 4900 goto destroy_ns_chr; 4901 return 0; 4902 4903 destroy_ns_chr: 4904 class_unregister(&nvme_ns_chr_class); 4905 unregister_generic_ns: 4906 unregister_chrdev_region(nvme_ns_chr_devt, NVME_MINORS); 4907 destroy_subsys_class: 4908 class_unregister(&nvme_subsys_class); 4909 destroy_class: 4910 class_unregister(&nvme_class); 4911 unregister_chrdev: 4912 unregister_chrdev_region(nvme_ctrl_base_chr_devt, NVME_MINORS); 4913 destroy_delete_wq: 4914 destroy_workqueue(nvme_delete_wq); 4915 destroy_reset_wq: 4916 destroy_workqueue(nvme_reset_wq); 4917 destroy_wq: 4918 destroy_workqueue(nvme_wq); 4919 out: 4920 return result; 4921 } 4922 4923 static void __exit nvme_core_exit(void) 4924 { 4925 nvme_exit_auth(); 4926 class_unregister(&nvme_ns_chr_class); 4927 class_unregister(&nvme_subsys_class); 4928 class_unregister(&nvme_class); 4929 unregister_chrdev_region(nvme_ns_chr_devt, NVME_MINORS); 4930 unregister_chrdev_region(nvme_ctrl_base_chr_devt, NVME_MINORS); 4931 destroy_workqueue(nvme_delete_wq); 4932 destroy_workqueue(nvme_reset_wq); 4933 destroy_workqueue(nvme_wq); 4934 ida_destroy(&nvme_ns_chr_minor_ida); 4935 ida_destroy(&nvme_instance_ida); 4936 } 4937 4938 MODULE_LICENSE("GPL"); 4939 MODULE_VERSION("1.0"); 4940 MODULE_DESCRIPTION("NVMe host core framework"); 4941 module_init(nvme_core_init); 4942 module_exit(nvme_core_exit); 4943