1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * NVM Express device driver 4 * Copyright (c) 2011-2014, Intel Corporation. 5 */ 6 7 #include <linux/async.h> 8 #include <linux/blkdev.h> 9 #include <linux/blk-mq.h> 10 #include <linux/blk-integrity.h> 11 #include <linux/compat.h> 12 #include <linux/delay.h> 13 #include <linux/errno.h> 14 #include <linux/hdreg.h> 15 #include <linux/kernel.h> 16 #include <linux/module.h> 17 #include <linux/backing-dev.h> 18 #include <linux/slab.h> 19 #include <linux/types.h> 20 #include <linux/pr.h> 21 #include <linux/ptrace.h> 22 #include <linux/nvme_ioctl.h> 23 #include <linux/pm_qos.h> 24 #include <linux/ratelimit.h> 25 #include <linux/unaligned.h> 26 27 #include "nvme.h" 28 #include "fabrics.h" 29 #include <linux/nvme-auth.h> 30 31 #define CREATE_TRACE_POINTS 32 #include "trace.h" 33 34 #define NVME_MINORS (1U << MINORBITS) 35 36 struct nvme_ns_info { 37 struct nvme_ns_ids ids; 38 u32 nsid; 39 __le32 anagrpid; 40 u8 pi_offset; 41 bool is_shared; 42 bool is_readonly; 43 bool is_ready; 44 bool is_removed; 45 bool is_rotational; 46 bool no_vwc; 47 }; 48 49 unsigned int admin_timeout = 60; 50 module_param(admin_timeout, uint, 0644); 51 MODULE_PARM_DESC(admin_timeout, "timeout in seconds for admin commands"); 52 EXPORT_SYMBOL_GPL(admin_timeout); 53 54 unsigned int nvme_io_timeout = 30; 55 module_param_named(io_timeout, nvme_io_timeout, uint, 0644); 56 MODULE_PARM_DESC(io_timeout, "timeout in seconds for I/O"); 57 EXPORT_SYMBOL_GPL(nvme_io_timeout); 58 59 static unsigned char shutdown_timeout = 5; 60 module_param(shutdown_timeout, byte, 0644); 61 MODULE_PARM_DESC(shutdown_timeout, "timeout in seconds for controller shutdown"); 62 63 static u8 nvme_max_retries = 5; 64 module_param_named(max_retries, nvme_max_retries, byte, 0644); 65 MODULE_PARM_DESC(max_retries, "max number of retries a command may have"); 66 67 static unsigned long default_ps_max_latency_us = 100000; 68 module_param(default_ps_max_latency_us, ulong, 0644); 69 MODULE_PARM_DESC(default_ps_max_latency_us, 70 "max power saving latency for new devices; use PM QOS to change per device"); 71 72 static bool force_apst; 73 module_param(force_apst, bool, 0644); 74 MODULE_PARM_DESC(force_apst, "allow APST for newly enumerated devices even if quirked off"); 75 76 static unsigned long apst_primary_timeout_ms = 100; 77 module_param(apst_primary_timeout_ms, ulong, 0644); 78 MODULE_PARM_DESC(apst_primary_timeout_ms, 79 "primary APST timeout in ms"); 80 81 static unsigned long apst_secondary_timeout_ms = 2000; 82 module_param(apst_secondary_timeout_ms, ulong, 0644); 83 MODULE_PARM_DESC(apst_secondary_timeout_ms, 84 "secondary APST timeout in ms"); 85 86 static unsigned long apst_primary_latency_tol_us = 15000; 87 module_param(apst_primary_latency_tol_us, ulong, 0644); 88 MODULE_PARM_DESC(apst_primary_latency_tol_us, 89 "primary APST latency tolerance in us"); 90 91 static unsigned long apst_secondary_latency_tol_us = 100000; 92 module_param(apst_secondary_latency_tol_us, ulong, 0644); 93 MODULE_PARM_DESC(apst_secondary_latency_tol_us, 94 "secondary APST latency tolerance in us"); 95 96 /* 97 * Older kernels didn't enable protection information if it was at an offset. 98 * Newer kernels do, so it breaks reads on the upgrade if such formats were 99 * used in prior kernels since the metadata written did not contain a valid 100 * checksum. 101 */ 102 static bool disable_pi_offsets = false; 103 module_param(disable_pi_offsets, bool, 0444); 104 MODULE_PARM_DESC(disable_pi_offsets, 105 "disable protection information if it has an offset"); 106 107 /* 108 * nvme_wq - hosts nvme related works that are not reset or delete 109 * nvme_reset_wq - hosts nvme reset works 110 * nvme_delete_wq - hosts nvme delete works 111 * 112 * nvme_wq will host works such as scan, aen handling, fw activation, 113 * keep-alive, periodic reconnects etc. nvme_reset_wq 114 * runs reset works which also flush works hosted on nvme_wq for 115 * serialization purposes. nvme_delete_wq host controller deletion 116 * works which flush reset works for serialization. 117 */ 118 struct workqueue_struct *nvme_wq; 119 EXPORT_SYMBOL_GPL(nvme_wq); 120 121 struct workqueue_struct *nvme_reset_wq; 122 EXPORT_SYMBOL_GPL(nvme_reset_wq); 123 124 struct workqueue_struct *nvme_delete_wq; 125 EXPORT_SYMBOL_GPL(nvme_delete_wq); 126 127 static LIST_HEAD(nvme_subsystems); 128 DEFINE_MUTEX(nvme_subsystems_lock); 129 130 static DEFINE_IDA(nvme_instance_ida); 131 static dev_t nvme_ctrl_base_chr_devt; 132 static int nvme_class_uevent(const struct device *dev, struct kobj_uevent_env *env); 133 static const struct class nvme_class = { 134 .name = "nvme", 135 .dev_uevent = nvme_class_uevent, 136 }; 137 138 static const struct class nvme_subsys_class = { 139 .name = "nvme-subsystem", 140 }; 141 142 static DEFINE_IDA(nvme_ns_chr_minor_ida); 143 static dev_t nvme_ns_chr_devt; 144 static const struct class nvme_ns_chr_class = { 145 .name = "nvme-generic", 146 }; 147 148 static void nvme_put_subsystem(struct nvme_subsystem *subsys); 149 static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl, 150 unsigned nsid); 151 static void nvme_update_keep_alive(struct nvme_ctrl *ctrl, 152 struct nvme_command *cmd); 153 154 void nvme_queue_scan(struct nvme_ctrl *ctrl) 155 { 156 /* 157 * Only new queue scan work when admin and IO queues are both alive 158 */ 159 if (nvme_ctrl_state(ctrl) == NVME_CTRL_LIVE && ctrl->tagset) 160 queue_work(nvme_wq, &ctrl->scan_work); 161 } 162 163 /* 164 * Use this function to proceed with scheduling reset_work for a controller 165 * that had previously been set to the resetting state. This is intended for 166 * code paths that can't be interrupted by other reset attempts. A hot removal 167 * may prevent this from succeeding. 168 */ 169 int nvme_try_sched_reset(struct nvme_ctrl *ctrl) 170 { 171 if (nvme_ctrl_state(ctrl) != NVME_CTRL_RESETTING) 172 return -EBUSY; 173 if (!queue_work(nvme_reset_wq, &ctrl->reset_work)) 174 return -EBUSY; 175 return 0; 176 } 177 EXPORT_SYMBOL_GPL(nvme_try_sched_reset); 178 179 static void nvme_failfast_work(struct work_struct *work) 180 { 181 struct nvme_ctrl *ctrl = container_of(to_delayed_work(work), 182 struct nvme_ctrl, failfast_work); 183 184 if (nvme_ctrl_state(ctrl) != NVME_CTRL_CONNECTING) 185 return; 186 187 set_bit(NVME_CTRL_FAILFAST_EXPIRED, &ctrl->flags); 188 dev_info(ctrl->device, "failfast expired\n"); 189 nvme_kick_requeue_lists(ctrl); 190 } 191 192 static inline void nvme_start_failfast_work(struct nvme_ctrl *ctrl) 193 { 194 if (!ctrl->opts || ctrl->opts->fast_io_fail_tmo == -1) 195 return; 196 197 schedule_delayed_work(&ctrl->failfast_work, 198 ctrl->opts->fast_io_fail_tmo * HZ); 199 } 200 201 static inline void nvme_stop_failfast_work(struct nvme_ctrl *ctrl) 202 { 203 if (!ctrl->opts) 204 return; 205 206 cancel_delayed_work_sync(&ctrl->failfast_work); 207 clear_bit(NVME_CTRL_FAILFAST_EXPIRED, &ctrl->flags); 208 } 209 210 211 int nvme_reset_ctrl(struct nvme_ctrl *ctrl) 212 { 213 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING)) 214 return -EBUSY; 215 if (!queue_work(nvme_reset_wq, &ctrl->reset_work)) 216 return -EBUSY; 217 return 0; 218 } 219 EXPORT_SYMBOL_GPL(nvme_reset_ctrl); 220 221 int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl) 222 { 223 int ret; 224 225 ret = nvme_reset_ctrl(ctrl); 226 if (!ret) { 227 flush_work(&ctrl->reset_work); 228 if (nvme_ctrl_state(ctrl) != NVME_CTRL_LIVE) 229 ret = -ENETRESET; 230 } 231 232 return ret; 233 } 234 235 static void nvme_do_delete_ctrl(struct nvme_ctrl *ctrl) 236 { 237 dev_info(ctrl->device, 238 "Removing ctrl: NQN \"%s\"\n", nvmf_ctrl_subsysnqn(ctrl)); 239 240 flush_work(&ctrl->reset_work); 241 nvme_stop_ctrl(ctrl); 242 nvme_remove_namespaces(ctrl); 243 ctrl->ops->delete_ctrl(ctrl); 244 nvme_uninit_ctrl(ctrl); 245 } 246 247 static void nvme_delete_ctrl_work(struct work_struct *work) 248 { 249 struct nvme_ctrl *ctrl = 250 container_of(work, struct nvme_ctrl, delete_work); 251 252 nvme_do_delete_ctrl(ctrl); 253 } 254 255 int nvme_delete_ctrl(struct nvme_ctrl *ctrl) 256 { 257 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_DELETING)) 258 return -EBUSY; 259 if (!queue_work(nvme_delete_wq, &ctrl->delete_work)) 260 return -EBUSY; 261 return 0; 262 } 263 EXPORT_SYMBOL_GPL(nvme_delete_ctrl); 264 265 void nvme_delete_ctrl_sync(struct nvme_ctrl *ctrl) 266 { 267 /* 268 * Keep a reference until nvme_do_delete_ctrl() complete, 269 * since ->delete_ctrl can free the controller. 270 */ 271 nvme_get_ctrl(ctrl); 272 if (nvme_change_ctrl_state(ctrl, NVME_CTRL_DELETING)) 273 nvme_do_delete_ctrl(ctrl); 274 nvme_put_ctrl(ctrl); 275 } 276 277 static blk_status_t nvme_error_status(u16 status) 278 { 279 switch (status & NVME_SCT_SC_MASK) { 280 case NVME_SC_SUCCESS: 281 return BLK_STS_OK; 282 case NVME_SC_CAP_EXCEEDED: 283 return BLK_STS_NOSPC; 284 case NVME_SC_LBA_RANGE: 285 case NVME_SC_CMD_INTERRUPTED: 286 case NVME_SC_NS_NOT_READY: 287 return BLK_STS_TARGET; 288 case NVME_SC_BAD_ATTRIBUTES: 289 case NVME_SC_ONCS_NOT_SUPPORTED: 290 case NVME_SC_INVALID_OPCODE: 291 case NVME_SC_INVALID_FIELD: 292 case NVME_SC_INVALID_NS: 293 return BLK_STS_NOTSUPP; 294 case NVME_SC_WRITE_FAULT: 295 case NVME_SC_READ_ERROR: 296 case NVME_SC_UNWRITTEN_BLOCK: 297 case NVME_SC_ACCESS_DENIED: 298 case NVME_SC_READ_ONLY: 299 case NVME_SC_COMPARE_FAILED: 300 return BLK_STS_MEDIUM; 301 case NVME_SC_GUARD_CHECK: 302 case NVME_SC_APPTAG_CHECK: 303 case NVME_SC_REFTAG_CHECK: 304 case NVME_SC_INVALID_PI: 305 return BLK_STS_PROTECTION; 306 case NVME_SC_RESERVATION_CONFLICT: 307 return BLK_STS_RESV_CONFLICT; 308 case NVME_SC_HOST_PATH_ERROR: 309 return BLK_STS_TRANSPORT; 310 case NVME_SC_ZONE_TOO_MANY_ACTIVE: 311 return BLK_STS_ZONE_ACTIVE_RESOURCE; 312 case NVME_SC_ZONE_TOO_MANY_OPEN: 313 return BLK_STS_ZONE_OPEN_RESOURCE; 314 default: 315 return BLK_STS_IOERR; 316 } 317 } 318 319 static void nvme_retry_req(struct request *req) 320 { 321 unsigned long delay = 0; 322 u16 crd; 323 324 /* The mask and shift result must be <= 3 */ 325 crd = (nvme_req(req)->status & NVME_STATUS_CRD) >> 11; 326 if (crd) 327 delay = nvme_req(req)->ctrl->crdt[crd - 1] * 100; 328 329 nvme_req(req)->retries++; 330 blk_mq_requeue_request(req, false); 331 blk_mq_delay_kick_requeue_list(req->q, delay); 332 } 333 334 static void nvme_log_error(struct request *req) 335 { 336 struct nvme_ns *ns = req->q->queuedata; 337 struct nvme_request *nr = nvme_req(req); 338 339 if (ns) { 340 pr_err_ratelimited("%s: %s(0x%x) @ LBA %llu, %u blocks, %s (sct 0x%x / sc 0x%x) %s%s\n", 341 ns->disk ? ns->disk->disk_name : "?", 342 nvme_get_opcode_str(nr->cmd->common.opcode), 343 nr->cmd->common.opcode, 344 nvme_sect_to_lba(ns->head, blk_rq_pos(req)), 345 blk_rq_bytes(req) >> ns->head->lba_shift, 346 nvme_get_error_status_str(nr->status), 347 NVME_SCT(nr->status), /* Status Code Type */ 348 nr->status & NVME_SC_MASK, /* Status Code */ 349 nr->status & NVME_STATUS_MORE ? "MORE " : "", 350 nr->status & NVME_STATUS_DNR ? "DNR " : ""); 351 return; 352 } 353 354 pr_err_ratelimited("%s: %s(0x%x), %s (sct 0x%x / sc 0x%x) %s%s\n", 355 dev_name(nr->ctrl->device), 356 nvme_get_admin_opcode_str(nr->cmd->common.opcode), 357 nr->cmd->common.opcode, 358 nvme_get_error_status_str(nr->status), 359 NVME_SCT(nr->status), /* Status Code Type */ 360 nr->status & NVME_SC_MASK, /* Status Code */ 361 nr->status & NVME_STATUS_MORE ? "MORE " : "", 362 nr->status & NVME_STATUS_DNR ? "DNR " : ""); 363 } 364 365 static void nvme_log_err_passthru(struct request *req) 366 { 367 struct nvme_ns *ns = req->q->queuedata; 368 struct nvme_request *nr = nvme_req(req); 369 370 pr_err_ratelimited("%s: %s(0x%x), %s (sct 0x%x / sc 0x%x) %s%s" 371 "cdw10=0x%x cdw11=0x%x cdw12=0x%x cdw13=0x%x cdw14=0x%x cdw15=0x%x\n", 372 ns ? ns->disk->disk_name : dev_name(nr->ctrl->device), 373 ns ? nvme_get_opcode_str(nr->cmd->common.opcode) : 374 nvme_get_admin_opcode_str(nr->cmd->common.opcode), 375 nr->cmd->common.opcode, 376 nvme_get_error_status_str(nr->status), 377 NVME_SCT(nr->status), /* Status Code Type */ 378 nr->status & NVME_SC_MASK, /* Status Code */ 379 nr->status & NVME_STATUS_MORE ? "MORE " : "", 380 nr->status & NVME_STATUS_DNR ? "DNR " : "", 381 nr->cmd->common.cdw10, 382 nr->cmd->common.cdw11, 383 nr->cmd->common.cdw12, 384 nr->cmd->common.cdw13, 385 nr->cmd->common.cdw14, 386 nr->cmd->common.cdw14); 387 } 388 389 enum nvme_disposition { 390 COMPLETE, 391 RETRY, 392 FAILOVER, 393 AUTHENTICATE, 394 }; 395 396 static inline enum nvme_disposition nvme_decide_disposition(struct request *req) 397 { 398 if (likely(nvme_req(req)->status == 0)) 399 return COMPLETE; 400 401 if (blk_noretry_request(req) || 402 (nvme_req(req)->status & NVME_STATUS_DNR) || 403 nvme_req(req)->retries >= nvme_max_retries) 404 return COMPLETE; 405 406 if ((nvme_req(req)->status & NVME_SCT_SC_MASK) == NVME_SC_AUTH_REQUIRED) 407 return AUTHENTICATE; 408 409 if (req->cmd_flags & REQ_NVME_MPATH) { 410 if (nvme_is_path_error(nvme_req(req)->status) || 411 blk_queue_dying(req->q)) 412 return FAILOVER; 413 } else { 414 if (blk_queue_dying(req->q)) 415 return COMPLETE; 416 } 417 418 return RETRY; 419 } 420 421 static inline void nvme_end_req_zoned(struct request *req) 422 { 423 if (IS_ENABLED(CONFIG_BLK_DEV_ZONED) && 424 req_op(req) == REQ_OP_ZONE_APPEND) { 425 struct nvme_ns *ns = req->q->queuedata; 426 427 req->__sector = nvme_lba_to_sect(ns->head, 428 le64_to_cpu(nvme_req(req)->result.u64)); 429 } 430 } 431 432 static inline void __nvme_end_req(struct request *req) 433 { 434 nvme_end_req_zoned(req); 435 nvme_trace_bio_complete(req); 436 if (req->cmd_flags & REQ_NVME_MPATH) 437 nvme_mpath_end_request(req); 438 } 439 440 void nvme_end_req(struct request *req) 441 { 442 blk_status_t status = nvme_error_status(nvme_req(req)->status); 443 444 if (unlikely(nvme_req(req)->status && !(req->rq_flags & RQF_QUIET))) { 445 if (blk_rq_is_passthrough(req)) 446 nvme_log_err_passthru(req); 447 else 448 nvme_log_error(req); 449 } 450 __nvme_end_req(req); 451 blk_mq_end_request(req, status); 452 } 453 454 void nvme_complete_rq(struct request *req) 455 { 456 struct nvme_ctrl *ctrl = nvme_req(req)->ctrl; 457 458 trace_nvme_complete_rq(req); 459 nvme_cleanup_cmd(req); 460 461 /* 462 * Completions of long-running commands should not be able to 463 * defer sending of periodic keep alives, since the controller 464 * may have completed processing such commands a long time ago 465 * (arbitrarily close to command submission time). 466 * req->deadline - req->timeout is the command submission time 467 * in jiffies. 468 */ 469 if (ctrl->kas && 470 req->deadline - req->timeout >= ctrl->ka_last_check_time) 471 ctrl->comp_seen = true; 472 473 switch (nvme_decide_disposition(req)) { 474 case COMPLETE: 475 nvme_end_req(req); 476 return; 477 case RETRY: 478 nvme_retry_req(req); 479 return; 480 case FAILOVER: 481 nvme_failover_req(req); 482 return; 483 case AUTHENTICATE: 484 #ifdef CONFIG_NVME_HOST_AUTH 485 queue_work(nvme_wq, &ctrl->dhchap_auth_work); 486 nvme_retry_req(req); 487 #else 488 nvme_end_req(req); 489 #endif 490 return; 491 } 492 } 493 EXPORT_SYMBOL_GPL(nvme_complete_rq); 494 495 void nvme_complete_batch_req(struct request *req) 496 { 497 trace_nvme_complete_rq(req); 498 nvme_cleanup_cmd(req); 499 __nvme_end_req(req); 500 } 501 EXPORT_SYMBOL_GPL(nvme_complete_batch_req); 502 503 /* 504 * Called to unwind from ->queue_rq on a failed command submission so that the 505 * multipathing code gets called to potentially failover to another path. 506 * The caller needs to unwind all transport specific resource allocations and 507 * must return propagate the return value. 508 */ 509 blk_status_t nvme_host_path_error(struct request *req) 510 { 511 nvme_req(req)->status = NVME_SC_HOST_PATH_ERROR; 512 blk_mq_set_request_complete(req); 513 nvme_complete_rq(req); 514 return BLK_STS_OK; 515 } 516 EXPORT_SYMBOL_GPL(nvme_host_path_error); 517 518 bool nvme_cancel_request(struct request *req, void *data) 519 { 520 dev_dbg_ratelimited(((struct nvme_ctrl *) data)->device, 521 "Cancelling I/O %d", req->tag); 522 523 /* don't abort one completed or idle request */ 524 if (blk_mq_rq_state(req) != MQ_RQ_IN_FLIGHT) 525 return true; 526 527 nvme_req(req)->status = NVME_SC_HOST_ABORTED_CMD; 528 nvme_req(req)->flags |= NVME_REQ_CANCELLED; 529 blk_mq_complete_request(req); 530 return true; 531 } 532 EXPORT_SYMBOL_GPL(nvme_cancel_request); 533 534 void nvme_cancel_tagset(struct nvme_ctrl *ctrl) 535 { 536 if (ctrl->tagset) { 537 blk_mq_tagset_busy_iter(ctrl->tagset, 538 nvme_cancel_request, ctrl); 539 blk_mq_tagset_wait_completed_request(ctrl->tagset); 540 } 541 } 542 EXPORT_SYMBOL_GPL(nvme_cancel_tagset); 543 544 void nvme_cancel_admin_tagset(struct nvme_ctrl *ctrl) 545 { 546 if (ctrl->admin_tagset) { 547 blk_mq_tagset_busy_iter(ctrl->admin_tagset, 548 nvme_cancel_request, ctrl); 549 blk_mq_tagset_wait_completed_request(ctrl->admin_tagset); 550 } 551 } 552 EXPORT_SYMBOL_GPL(nvme_cancel_admin_tagset); 553 554 bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl, 555 enum nvme_ctrl_state new_state) 556 { 557 enum nvme_ctrl_state old_state; 558 unsigned long flags; 559 bool changed = false; 560 561 spin_lock_irqsave(&ctrl->lock, flags); 562 563 old_state = nvme_ctrl_state(ctrl); 564 switch (new_state) { 565 case NVME_CTRL_LIVE: 566 switch (old_state) { 567 case NVME_CTRL_NEW: 568 case NVME_CTRL_RESETTING: 569 case NVME_CTRL_CONNECTING: 570 changed = true; 571 fallthrough; 572 default: 573 break; 574 } 575 break; 576 case NVME_CTRL_RESETTING: 577 switch (old_state) { 578 case NVME_CTRL_NEW: 579 case NVME_CTRL_LIVE: 580 changed = true; 581 fallthrough; 582 default: 583 break; 584 } 585 break; 586 case NVME_CTRL_CONNECTING: 587 switch (old_state) { 588 case NVME_CTRL_NEW: 589 case NVME_CTRL_RESETTING: 590 changed = true; 591 fallthrough; 592 default: 593 break; 594 } 595 break; 596 case NVME_CTRL_DELETING: 597 switch (old_state) { 598 case NVME_CTRL_LIVE: 599 case NVME_CTRL_RESETTING: 600 case NVME_CTRL_CONNECTING: 601 changed = true; 602 fallthrough; 603 default: 604 break; 605 } 606 break; 607 case NVME_CTRL_DELETING_NOIO: 608 switch (old_state) { 609 case NVME_CTRL_DELETING: 610 case NVME_CTRL_DEAD: 611 changed = true; 612 fallthrough; 613 default: 614 break; 615 } 616 break; 617 case NVME_CTRL_DEAD: 618 switch (old_state) { 619 case NVME_CTRL_DELETING: 620 changed = true; 621 fallthrough; 622 default: 623 break; 624 } 625 break; 626 default: 627 break; 628 } 629 630 if (changed) { 631 WRITE_ONCE(ctrl->state, new_state); 632 wake_up_all(&ctrl->state_wq); 633 } 634 635 spin_unlock_irqrestore(&ctrl->lock, flags); 636 if (!changed) 637 return false; 638 639 if (new_state == NVME_CTRL_LIVE) { 640 if (old_state == NVME_CTRL_CONNECTING) 641 nvme_stop_failfast_work(ctrl); 642 nvme_kick_requeue_lists(ctrl); 643 } else if (new_state == NVME_CTRL_CONNECTING && 644 old_state == NVME_CTRL_RESETTING) { 645 nvme_start_failfast_work(ctrl); 646 } 647 return changed; 648 } 649 EXPORT_SYMBOL_GPL(nvme_change_ctrl_state); 650 651 /* 652 * Waits for the controller state to be resetting, or returns false if it is 653 * not possible to ever transition to that state. 654 */ 655 bool nvme_wait_reset(struct nvme_ctrl *ctrl) 656 { 657 wait_event(ctrl->state_wq, 658 nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING) || 659 nvme_state_terminal(ctrl)); 660 return nvme_ctrl_state(ctrl) == NVME_CTRL_RESETTING; 661 } 662 EXPORT_SYMBOL_GPL(nvme_wait_reset); 663 664 static void nvme_free_ns_head(struct kref *ref) 665 { 666 struct nvme_ns_head *head = 667 container_of(ref, struct nvme_ns_head, ref); 668 669 nvme_mpath_remove_disk(head); 670 ida_free(&head->subsys->ns_ida, head->instance); 671 cleanup_srcu_struct(&head->srcu); 672 nvme_put_subsystem(head->subsys); 673 kfree(head); 674 } 675 676 bool nvme_tryget_ns_head(struct nvme_ns_head *head) 677 { 678 return kref_get_unless_zero(&head->ref); 679 } 680 681 void nvme_put_ns_head(struct nvme_ns_head *head) 682 { 683 kref_put(&head->ref, nvme_free_ns_head); 684 } 685 686 static void nvme_free_ns(struct kref *kref) 687 { 688 struct nvme_ns *ns = container_of(kref, struct nvme_ns, kref); 689 690 put_disk(ns->disk); 691 nvme_put_ns_head(ns->head); 692 nvme_put_ctrl(ns->ctrl); 693 kfree(ns); 694 } 695 696 bool nvme_get_ns(struct nvme_ns *ns) 697 { 698 return kref_get_unless_zero(&ns->kref); 699 } 700 701 void nvme_put_ns(struct nvme_ns *ns) 702 { 703 kref_put(&ns->kref, nvme_free_ns); 704 } 705 EXPORT_SYMBOL_NS_GPL(nvme_put_ns, "NVME_TARGET_PASSTHRU"); 706 707 static inline void nvme_clear_nvme_request(struct request *req) 708 { 709 nvme_req(req)->status = 0; 710 nvme_req(req)->retries = 0; 711 nvme_req(req)->flags = 0; 712 req->rq_flags |= RQF_DONTPREP; 713 } 714 715 /* initialize a passthrough request */ 716 void nvme_init_request(struct request *req, struct nvme_command *cmd) 717 { 718 struct nvme_request *nr = nvme_req(req); 719 bool logging_enabled; 720 721 if (req->q->queuedata) { 722 struct nvme_ns *ns = req->q->disk->private_data; 723 724 logging_enabled = ns->head->passthru_err_log_enabled; 725 req->timeout = NVME_IO_TIMEOUT; 726 } else { /* no queuedata implies admin queue */ 727 logging_enabled = nr->ctrl->passthru_err_log_enabled; 728 req->timeout = NVME_ADMIN_TIMEOUT; 729 } 730 731 if (!logging_enabled) 732 req->rq_flags |= RQF_QUIET; 733 734 /* passthru commands should let the driver set the SGL flags */ 735 cmd->common.flags &= ~NVME_CMD_SGL_ALL; 736 737 req->cmd_flags |= REQ_FAILFAST_DRIVER; 738 if (req->mq_hctx->type == HCTX_TYPE_POLL) 739 req->cmd_flags |= REQ_POLLED; 740 nvme_clear_nvme_request(req); 741 memcpy(nr->cmd, cmd, sizeof(*cmd)); 742 } 743 EXPORT_SYMBOL_GPL(nvme_init_request); 744 745 /* 746 * For something we're not in a state to send to the device the default action 747 * is to busy it and retry it after the controller state is recovered. However, 748 * if the controller is deleting or if anything is marked for failfast or 749 * nvme multipath it is immediately failed. 750 * 751 * Note: commands used to initialize the controller will be marked for failfast. 752 * Note: nvme cli/ioctl commands are marked for failfast. 753 */ 754 blk_status_t nvme_fail_nonready_command(struct nvme_ctrl *ctrl, 755 struct request *rq) 756 { 757 enum nvme_ctrl_state state = nvme_ctrl_state(ctrl); 758 759 if (state != NVME_CTRL_DELETING_NOIO && 760 state != NVME_CTRL_DELETING && 761 state != NVME_CTRL_DEAD && 762 !test_bit(NVME_CTRL_FAILFAST_EXPIRED, &ctrl->flags) && 763 !blk_noretry_request(rq) && !(rq->cmd_flags & REQ_NVME_MPATH)) 764 return BLK_STS_RESOURCE; 765 return nvme_host_path_error(rq); 766 } 767 EXPORT_SYMBOL_GPL(nvme_fail_nonready_command); 768 769 bool __nvme_check_ready(struct nvme_ctrl *ctrl, struct request *rq, 770 bool queue_live, enum nvme_ctrl_state state) 771 { 772 struct nvme_request *req = nvme_req(rq); 773 774 /* 775 * currently we have a problem sending passthru commands 776 * on the admin_q if the controller is not LIVE because we can't 777 * make sure that they are going out after the admin connect, 778 * controller enable and/or other commands in the initialization 779 * sequence. until the controller will be LIVE, fail with 780 * BLK_STS_RESOURCE so that they will be rescheduled. 781 */ 782 if (rq->q == ctrl->admin_q && (req->flags & NVME_REQ_USERCMD)) 783 return false; 784 785 if (ctrl->ops->flags & NVME_F_FABRICS) { 786 /* 787 * Only allow commands on a live queue, except for the connect 788 * command, which is require to set the queue live in the 789 * appropinquate states. 790 */ 791 switch (state) { 792 case NVME_CTRL_CONNECTING: 793 if (blk_rq_is_passthrough(rq) && nvme_is_fabrics(req->cmd) && 794 (req->cmd->fabrics.fctype == nvme_fabrics_type_connect || 795 req->cmd->fabrics.fctype == nvme_fabrics_type_auth_send || 796 req->cmd->fabrics.fctype == nvme_fabrics_type_auth_receive)) 797 return true; 798 break; 799 default: 800 break; 801 case NVME_CTRL_DEAD: 802 return false; 803 } 804 } 805 806 return queue_live; 807 } 808 EXPORT_SYMBOL_GPL(__nvme_check_ready); 809 810 static inline void nvme_setup_flush(struct nvme_ns *ns, 811 struct nvme_command *cmnd) 812 { 813 memset(cmnd, 0, sizeof(*cmnd)); 814 cmnd->common.opcode = nvme_cmd_flush; 815 cmnd->common.nsid = cpu_to_le32(ns->head->ns_id); 816 } 817 818 static blk_status_t nvme_setup_discard(struct nvme_ns *ns, struct request *req, 819 struct nvme_command *cmnd) 820 { 821 unsigned short segments = blk_rq_nr_discard_segments(req), n = 0; 822 struct nvme_dsm_range *range; 823 struct bio *bio; 824 825 /* 826 * Some devices do not consider the DSM 'Number of Ranges' field when 827 * determining how much data to DMA. Always allocate memory for maximum 828 * number of segments to prevent device reading beyond end of buffer. 829 */ 830 static const size_t alloc_size = sizeof(*range) * NVME_DSM_MAX_RANGES; 831 832 range = kzalloc(alloc_size, GFP_ATOMIC | __GFP_NOWARN); 833 if (!range) { 834 /* 835 * If we fail allocation our range, fallback to the controller 836 * discard page. If that's also busy, it's safe to return 837 * busy, as we know we can make progress once that's freed. 838 */ 839 if (test_and_set_bit_lock(0, &ns->ctrl->discard_page_busy)) 840 return BLK_STS_RESOURCE; 841 842 range = page_address(ns->ctrl->discard_page); 843 } 844 845 if (queue_max_discard_segments(req->q) == 1) { 846 u64 slba = nvme_sect_to_lba(ns->head, blk_rq_pos(req)); 847 u32 nlb = blk_rq_sectors(req) >> (ns->head->lba_shift - 9); 848 849 range[0].cattr = cpu_to_le32(0); 850 range[0].nlb = cpu_to_le32(nlb); 851 range[0].slba = cpu_to_le64(slba); 852 n = 1; 853 } else { 854 __rq_for_each_bio(bio, req) { 855 u64 slba = nvme_sect_to_lba(ns->head, 856 bio->bi_iter.bi_sector); 857 u32 nlb = bio->bi_iter.bi_size >> ns->head->lba_shift; 858 859 if (n < segments) { 860 range[n].cattr = cpu_to_le32(0); 861 range[n].nlb = cpu_to_le32(nlb); 862 range[n].slba = cpu_to_le64(slba); 863 } 864 n++; 865 } 866 } 867 868 if (WARN_ON_ONCE(n != segments)) { 869 if (virt_to_page(range) == ns->ctrl->discard_page) 870 clear_bit_unlock(0, &ns->ctrl->discard_page_busy); 871 else 872 kfree(range); 873 return BLK_STS_IOERR; 874 } 875 876 memset(cmnd, 0, sizeof(*cmnd)); 877 cmnd->dsm.opcode = nvme_cmd_dsm; 878 cmnd->dsm.nsid = cpu_to_le32(ns->head->ns_id); 879 cmnd->dsm.nr = cpu_to_le32(segments - 1); 880 cmnd->dsm.attributes = cpu_to_le32(NVME_DSMGMT_AD); 881 882 bvec_set_virt(&req->special_vec, range, alloc_size); 883 req->rq_flags |= RQF_SPECIAL_PAYLOAD; 884 885 return BLK_STS_OK; 886 } 887 888 static void nvme_set_ref_tag(struct nvme_ns *ns, struct nvme_command *cmnd, 889 struct request *req) 890 { 891 u32 upper, lower; 892 u64 ref48; 893 894 /* both rw and write zeroes share the same reftag format */ 895 switch (ns->head->guard_type) { 896 case NVME_NVM_NS_16B_GUARD: 897 cmnd->rw.reftag = cpu_to_le32(t10_pi_ref_tag(req)); 898 break; 899 case NVME_NVM_NS_64B_GUARD: 900 ref48 = ext_pi_ref_tag(req); 901 lower = lower_32_bits(ref48); 902 upper = upper_32_bits(ref48); 903 904 cmnd->rw.reftag = cpu_to_le32(lower); 905 cmnd->rw.cdw3 = cpu_to_le32(upper); 906 break; 907 default: 908 break; 909 } 910 } 911 912 static inline blk_status_t nvme_setup_write_zeroes(struct nvme_ns *ns, 913 struct request *req, struct nvme_command *cmnd) 914 { 915 memset(cmnd, 0, sizeof(*cmnd)); 916 917 if (ns->ctrl->quirks & NVME_QUIRK_DEALLOCATE_ZEROES) 918 return nvme_setup_discard(ns, req, cmnd); 919 920 cmnd->write_zeroes.opcode = nvme_cmd_write_zeroes; 921 cmnd->write_zeroes.nsid = cpu_to_le32(ns->head->ns_id); 922 cmnd->write_zeroes.slba = 923 cpu_to_le64(nvme_sect_to_lba(ns->head, blk_rq_pos(req))); 924 cmnd->write_zeroes.length = 925 cpu_to_le16((blk_rq_bytes(req) >> ns->head->lba_shift) - 1); 926 927 if (!(req->cmd_flags & REQ_NOUNMAP) && 928 (ns->head->features & NVME_NS_DEAC)) 929 cmnd->write_zeroes.control |= cpu_to_le16(NVME_WZ_DEAC); 930 931 if (nvme_ns_has_pi(ns->head)) { 932 cmnd->write_zeroes.control |= cpu_to_le16(NVME_RW_PRINFO_PRACT); 933 934 switch (ns->head->pi_type) { 935 case NVME_NS_DPS_PI_TYPE1: 936 case NVME_NS_DPS_PI_TYPE2: 937 nvme_set_ref_tag(ns, cmnd, req); 938 break; 939 } 940 } 941 942 return BLK_STS_OK; 943 } 944 945 /* 946 * NVMe does not support a dedicated command to issue an atomic write. A write 947 * which does adhere to the device atomic limits will silently be executed 948 * non-atomically. The request issuer should ensure that the write is within 949 * the queue atomic writes limits, but just validate this in case it is not. 950 */ 951 static bool nvme_valid_atomic_write(struct request *req) 952 { 953 struct request_queue *q = req->q; 954 u32 boundary_bytes = queue_atomic_write_boundary_bytes(q); 955 956 if (blk_rq_bytes(req) > queue_atomic_write_unit_max_bytes(q)) 957 return false; 958 959 if (boundary_bytes) { 960 u64 mask = boundary_bytes - 1, imask = ~mask; 961 u64 start = blk_rq_pos(req) << SECTOR_SHIFT; 962 u64 end = start + blk_rq_bytes(req) - 1; 963 964 /* If greater then must be crossing a boundary */ 965 if (blk_rq_bytes(req) > boundary_bytes) 966 return false; 967 968 if ((start & imask) != (end & imask)) 969 return false; 970 } 971 972 return true; 973 } 974 975 static inline blk_status_t nvme_setup_rw(struct nvme_ns *ns, 976 struct request *req, struct nvme_command *cmnd, 977 enum nvme_opcode op) 978 { 979 u16 control = 0; 980 u32 dsmgmt = 0; 981 982 if (req->cmd_flags & REQ_FUA) 983 control |= NVME_RW_FUA; 984 if (req->cmd_flags & (REQ_FAILFAST_DEV | REQ_RAHEAD)) 985 control |= NVME_RW_LR; 986 987 if (req->cmd_flags & REQ_RAHEAD) 988 dsmgmt |= NVME_RW_DSM_FREQ_PREFETCH; 989 990 if (req->cmd_flags & REQ_ATOMIC && !nvme_valid_atomic_write(req)) 991 return BLK_STS_INVAL; 992 993 cmnd->rw.opcode = op; 994 cmnd->rw.flags = 0; 995 cmnd->rw.nsid = cpu_to_le32(ns->head->ns_id); 996 cmnd->rw.cdw2 = 0; 997 cmnd->rw.cdw3 = 0; 998 cmnd->rw.metadata = 0; 999 cmnd->rw.slba = 1000 cpu_to_le64(nvme_sect_to_lba(ns->head, blk_rq_pos(req))); 1001 cmnd->rw.length = 1002 cpu_to_le16((blk_rq_bytes(req) >> ns->head->lba_shift) - 1); 1003 cmnd->rw.reftag = 0; 1004 cmnd->rw.lbat = 0; 1005 cmnd->rw.lbatm = 0; 1006 1007 if (ns->head->ms) { 1008 /* 1009 * If formated with metadata, the block layer always provides a 1010 * metadata buffer if CONFIG_BLK_DEV_INTEGRITY is enabled. Else 1011 * we enable the PRACT bit for protection information or set the 1012 * namespace capacity to zero to prevent any I/O. 1013 */ 1014 if (!blk_integrity_rq(req)) { 1015 if (WARN_ON_ONCE(!nvme_ns_has_pi(ns->head))) 1016 return BLK_STS_NOTSUPP; 1017 control |= NVME_RW_PRINFO_PRACT; 1018 } 1019 1020 switch (ns->head->pi_type) { 1021 case NVME_NS_DPS_PI_TYPE3: 1022 control |= NVME_RW_PRINFO_PRCHK_GUARD; 1023 break; 1024 case NVME_NS_DPS_PI_TYPE1: 1025 case NVME_NS_DPS_PI_TYPE2: 1026 control |= NVME_RW_PRINFO_PRCHK_GUARD | 1027 NVME_RW_PRINFO_PRCHK_REF; 1028 if (op == nvme_cmd_zone_append) 1029 control |= NVME_RW_APPEND_PIREMAP; 1030 nvme_set_ref_tag(ns, cmnd, req); 1031 break; 1032 } 1033 } 1034 1035 cmnd->rw.control = cpu_to_le16(control); 1036 cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt); 1037 return 0; 1038 } 1039 1040 void nvme_cleanup_cmd(struct request *req) 1041 { 1042 if (req->rq_flags & RQF_SPECIAL_PAYLOAD) { 1043 struct nvme_ctrl *ctrl = nvme_req(req)->ctrl; 1044 1045 if (req->special_vec.bv_page == ctrl->discard_page) 1046 clear_bit_unlock(0, &ctrl->discard_page_busy); 1047 else 1048 kfree(bvec_virt(&req->special_vec)); 1049 req->rq_flags &= ~RQF_SPECIAL_PAYLOAD; 1050 } 1051 } 1052 EXPORT_SYMBOL_GPL(nvme_cleanup_cmd); 1053 1054 blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req) 1055 { 1056 struct nvme_command *cmd = nvme_req(req)->cmd; 1057 blk_status_t ret = BLK_STS_OK; 1058 1059 if (!(req->rq_flags & RQF_DONTPREP)) 1060 nvme_clear_nvme_request(req); 1061 1062 switch (req_op(req)) { 1063 case REQ_OP_DRV_IN: 1064 case REQ_OP_DRV_OUT: 1065 /* these are setup prior to execution in nvme_init_request() */ 1066 break; 1067 case REQ_OP_FLUSH: 1068 nvme_setup_flush(ns, cmd); 1069 break; 1070 case REQ_OP_ZONE_RESET_ALL: 1071 case REQ_OP_ZONE_RESET: 1072 ret = nvme_setup_zone_mgmt_send(ns, req, cmd, NVME_ZONE_RESET); 1073 break; 1074 case REQ_OP_ZONE_OPEN: 1075 ret = nvme_setup_zone_mgmt_send(ns, req, cmd, NVME_ZONE_OPEN); 1076 break; 1077 case REQ_OP_ZONE_CLOSE: 1078 ret = nvme_setup_zone_mgmt_send(ns, req, cmd, NVME_ZONE_CLOSE); 1079 break; 1080 case REQ_OP_ZONE_FINISH: 1081 ret = nvme_setup_zone_mgmt_send(ns, req, cmd, NVME_ZONE_FINISH); 1082 break; 1083 case REQ_OP_WRITE_ZEROES: 1084 ret = nvme_setup_write_zeroes(ns, req, cmd); 1085 break; 1086 case REQ_OP_DISCARD: 1087 ret = nvme_setup_discard(ns, req, cmd); 1088 break; 1089 case REQ_OP_READ: 1090 ret = nvme_setup_rw(ns, req, cmd, nvme_cmd_read); 1091 break; 1092 case REQ_OP_WRITE: 1093 ret = nvme_setup_rw(ns, req, cmd, nvme_cmd_write); 1094 break; 1095 case REQ_OP_ZONE_APPEND: 1096 ret = nvme_setup_rw(ns, req, cmd, nvme_cmd_zone_append); 1097 break; 1098 default: 1099 WARN_ON_ONCE(1); 1100 return BLK_STS_IOERR; 1101 } 1102 1103 cmd->common.command_id = nvme_cid(req); 1104 trace_nvme_setup_cmd(req, cmd); 1105 return ret; 1106 } 1107 EXPORT_SYMBOL_GPL(nvme_setup_cmd); 1108 1109 /* 1110 * Return values: 1111 * 0: success 1112 * >0: nvme controller's cqe status response 1113 * <0: kernel error in lieu of controller response 1114 */ 1115 int nvme_execute_rq(struct request *rq, bool at_head) 1116 { 1117 blk_status_t status; 1118 1119 status = blk_execute_rq(rq, at_head); 1120 if (nvme_req(rq)->flags & NVME_REQ_CANCELLED) 1121 return -EINTR; 1122 if (nvme_req(rq)->status) 1123 return nvme_req(rq)->status; 1124 return blk_status_to_errno(status); 1125 } 1126 EXPORT_SYMBOL_NS_GPL(nvme_execute_rq, "NVME_TARGET_PASSTHRU"); 1127 1128 /* 1129 * Returns 0 on success. If the result is negative, it's a Linux error code; 1130 * if the result is positive, it's an NVM Express status code 1131 */ 1132 int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, 1133 union nvme_result *result, void *buffer, unsigned bufflen, 1134 int qid, nvme_submit_flags_t flags) 1135 { 1136 struct request *req; 1137 int ret; 1138 blk_mq_req_flags_t blk_flags = 0; 1139 1140 if (flags & NVME_SUBMIT_NOWAIT) 1141 blk_flags |= BLK_MQ_REQ_NOWAIT; 1142 if (flags & NVME_SUBMIT_RESERVED) 1143 blk_flags |= BLK_MQ_REQ_RESERVED; 1144 if (qid == NVME_QID_ANY) 1145 req = blk_mq_alloc_request(q, nvme_req_op(cmd), blk_flags); 1146 else 1147 req = blk_mq_alloc_request_hctx(q, nvme_req_op(cmd), blk_flags, 1148 qid - 1); 1149 1150 if (IS_ERR(req)) 1151 return PTR_ERR(req); 1152 nvme_init_request(req, cmd); 1153 if (flags & NVME_SUBMIT_RETRY) 1154 req->cmd_flags &= ~REQ_FAILFAST_DRIVER; 1155 1156 if (buffer && bufflen) { 1157 ret = blk_rq_map_kern(q, req, buffer, bufflen, GFP_KERNEL); 1158 if (ret) 1159 goto out; 1160 } 1161 1162 ret = nvme_execute_rq(req, flags & NVME_SUBMIT_AT_HEAD); 1163 if (result && ret >= 0) 1164 *result = nvme_req(req)->result; 1165 out: 1166 blk_mq_free_request(req); 1167 return ret; 1168 } 1169 EXPORT_SYMBOL_GPL(__nvme_submit_sync_cmd); 1170 1171 int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, 1172 void *buffer, unsigned bufflen) 1173 { 1174 return __nvme_submit_sync_cmd(q, cmd, NULL, buffer, bufflen, 1175 NVME_QID_ANY, 0); 1176 } 1177 EXPORT_SYMBOL_GPL(nvme_submit_sync_cmd); 1178 1179 u32 nvme_command_effects(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u8 opcode) 1180 { 1181 u32 effects = 0; 1182 1183 if (ns) { 1184 effects = le32_to_cpu(ns->head->effects->iocs[opcode]); 1185 if (effects & ~(NVME_CMD_EFFECTS_CSUPP | NVME_CMD_EFFECTS_LBCC)) 1186 dev_warn_once(ctrl->device, 1187 "IO command:%02x has unusual effects:%08x\n", 1188 opcode, effects); 1189 1190 /* 1191 * NVME_CMD_EFFECTS_CSE_MASK causes a freeze all I/O queues, 1192 * which would deadlock when done on an I/O command. Note that 1193 * We already warn about an unusual effect above. 1194 */ 1195 effects &= ~NVME_CMD_EFFECTS_CSE_MASK; 1196 } else { 1197 effects = le32_to_cpu(ctrl->effects->acs[opcode]); 1198 1199 /* Ignore execution restrictions if any relaxation bits are set */ 1200 if (effects & NVME_CMD_EFFECTS_CSER_MASK) 1201 effects &= ~NVME_CMD_EFFECTS_CSE_MASK; 1202 } 1203 1204 return effects; 1205 } 1206 EXPORT_SYMBOL_NS_GPL(nvme_command_effects, "NVME_TARGET_PASSTHRU"); 1207 1208 u32 nvme_passthru_start(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u8 opcode) 1209 { 1210 u32 effects = nvme_command_effects(ctrl, ns, opcode); 1211 1212 /* 1213 * For simplicity, IO to all namespaces is quiesced even if the command 1214 * effects say only one namespace is affected. 1215 */ 1216 if (effects & NVME_CMD_EFFECTS_CSE_MASK) { 1217 mutex_lock(&ctrl->scan_lock); 1218 mutex_lock(&ctrl->subsys->lock); 1219 nvme_mpath_start_freeze(ctrl->subsys); 1220 nvme_mpath_wait_freeze(ctrl->subsys); 1221 nvme_start_freeze(ctrl); 1222 nvme_wait_freeze(ctrl); 1223 } 1224 return effects; 1225 } 1226 EXPORT_SYMBOL_NS_GPL(nvme_passthru_start, "NVME_TARGET_PASSTHRU"); 1227 1228 void nvme_passthru_end(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u32 effects, 1229 struct nvme_command *cmd, int status) 1230 { 1231 if (effects & NVME_CMD_EFFECTS_CSE_MASK) { 1232 nvme_unfreeze(ctrl); 1233 nvme_mpath_unfreeze(ctrl->subsys); 1234 mutex_unlock(&ctrl->subsys->lock); 1235 mutex_unlock(&ctrl->scan_lock); 1236 } 1237 if (effects & NVME_CMD_EFFECTS_CCC) { 1238 if (!test_and_set_bit(NVME_CTRL_DIRTY_CAPABILITY, 1239 &ctrl->flags)) { 1240 dev_info(ctrl->device, 1241 "controller capabilities changed, reset may be required to take effect.\n"); 1242 } 1243 } 1244 if (effects & (NVME_CMD_EFFECTS_NIC | NVME_CMD_EFFECTS_NCC)) { 1245 nvme_queue_scan(ctrl); 1246 flush_work(&ctrl->scan_work); 1247 } 1248 if (ns) 1249 return; 1250 1251 switch (cmd->common.opcode) { 1252 case nvme_admin_set_features: 1253 switch (le32_to_cpu(cmd->common.cdw10) & 0xFF) { 1254 case NVME_FEAT_KATO: 1255 /* 1256 * Keep alive commands interval on the host should be 1257 * updated when KATO is modified by Set Features 1258 * commands. 1259 */ 1260 if (!status) 1261 nvme_update_keep_alive(ctrl, cmd); 1262 break; 1263 default: 1264 break; 1265 } 1266 break; 1267 default: 1268 break; 1269 } 1270 } 1271 EXPORT_SYMBOL_NS_GPL(nvme_passthru_end, "NVME_TARGET_PASSTHRU"); 1272 1273 /* 1274 * Recommended frequency for KATO commands per NVMe 1.4 section 7.12.1: 1275 * 1276 * The host should send Keep Alive commands at half of the Keep Alive Timeout 1277 * accounting for transport roundtrip times [..]. 1278 */ 1279 static unsigned long nvme_keep_alive_work_period(struct nvme_ctrl *ctrl) 1280 { 1281 unsigned long delay = ctrl->kato * HZ / 2; 1282 1283 /* 1284 * When using Traffic Based Keep Alive, we need to run 1285 * nvme_keep_alive_work at twice the normal frequency, as one 1286 * command completion can postpone sending a keep alive command 1287 * by up to twice the delay between runs. 1288 */ 1289 if (ctrl->ctratt & NVME_CTRL_ATTR_TBKAS) 1290 delay /= 2; 1291 return delay; 1292 } 1293 1294 static void nvme_queue_keep_alive_work(struct nvme_ctrl *ctrl) 1295 { 1296 unsigned long now = jiffies; 1297 unsigned long delay = nvme_keep_alive_work_period(ctrl); 1298 unsigned long ka_next_check_tm = ctrl->ka_last_check_time + delay; 1299 1300 if (time_after(now, ka_next_check_tm)) 1301 delay = 0; 1302 else 1303 delay = ka_next_check_tm - now; 1304 1305 queue_delayed_work(nvme_wq, &ctrl->ka_work, delay); 1306 } 1307 1308 static enum rq_end_io_ret nvme_keep_alive_end_io(struct request *rq, 1309 blk_status_t status) 1310 { 1311 struct nvme_ctrl *ctrl = rq->end_io_data; 1312 unsigned long rtt = jiffies - (rq->deadline - rq->timeout); 1313 unsigned long delay = nvme_keep_alive_work_period(ctrl); 1314 enum nvme_ctrl_state state = nvme_ctrl_state(ctrl); 1315 1316 /* 1317 * Subtract off the keepalive RTT so nvme_keep_alive_work runs 1318 * at the desired frequency. 1319 */ 1320 if (rtt <= delay) { 1321 delay -= rtt; 1322 } else { 1323 dev_warn(ctrl->device, "long keepalive RTT (%u ms)\n", 1324 jiffies_to_msecs(rtt)); 1325 delay = 0; 1326 } 1327 1328 blk_mq_free_request(rq); 1329 1330 if (status) { 1331 dev_err(ctrl->device, 1332 "failed nvme_keep_alive_end_io error=%d\n", 1333 status); 1334 return RQ_END_IO_NONE; 1335 } 1336 1337 ctrl->ka_last_check_time = jiffies; 1338 ctrl->comp_seen = false; 1339 if (state == NVME_CTRL_LIVE || state == NVME_CTRL_CONNECTING) 1340 queue_delayed_work(nvme_wq, &ctrl->ka_work, delay); 1341 return RQ_END_IO_NONE; 1342 } 1343 1344 static void nvme_keep_alive_work(struct work_struct *work) 1345 { 1346 struct nvme_ctrl *ctrl = container_of(to_delayed_work(work), 1347 struct nvme_ctrl, ka_work); 1348 bool comp_seen = ctrl->comp_seen; 1349 struct request *rq; 1350 1351 ctrl->ka_last_check_time = jiffies; 1352 1353 if ((ctrl->ctratt & NVME_CTRL_ATTR_TBKAS) && comp_seen) { 1354 dev_dbg(ctrl->device, 1355 "reschedule traffic based keep-alive timer\n"); 1356 ctrl->comp_seen = false; 1357 nvme_queue_keep_alive_work(ctrl); 1358 return; 1359 } 1360 1361 rq = blk_mq_alloc_request(ctrl->admin_q, nvme_req_op(&ctrl->ka_cmd), 1362 BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT); 1363 if (IS_ERR(rq)) { 1364 /* allocation failure, reset the controller */ 1365 dev_err(ctrl->device, "keep-alive failed: %ld\n", PTR_ERR(rq)); 1366 nvme_reset_ctrl(ctrl); 1367 return; 1368 } 1369 nvme_init_request(rq, &ctrl->ka_cmd); 1370 1371 rq->timeout = ctrl->kato * HZ; 1372 rq->end_io = nvme_keep_alive_end_io; 1373 rq->end_io_data = ctrl; 1374 blk_execute_rq_nowait(rq, false); 1375 } 1376 1377 static void nvme_start_keep_alive(struct nvme_ctrl *ctrl) 1378 { 1379 if (unlikely(ctrl->kato == 0)) 1380 return; 1381 1382 nvme_queue_keep_alive_work(ctrl); 1383 } 1384 1385 void nvme_stop_keep_alive(struct nvme_ctrl *ctrl) 1386 { 1387 if (unlikely(ctrl->kato == 0)) 1388 return; 1389 1390 cancel_delayed_work_sync(&ctrl->ka_work); 1391 } 1392 EXPORT_SYMBOL_GPL(nvme_stop_keep_alive); 1393 1394 static void nvme_update_keep_alive(struct nvme_ctrl *ctrl, 1395 struct nvme_command *cmd) 1396 { 1397 unsigned int new_kato = 1398 DIV_ROUND_UP(le32_to_cpu(cmd->common.cdw11), 1000); 1399 1400 dev_info(ctrl->device, 1401 "keep alive interval updated from %u ms to %u ms\n", 1402 ctrl->kato * 1000 / 2, new_kato * 1000 / 2); 1403 1404 nvme_stop_keep_alive(ctrl); 1405 ctrl->kato = new_kato; 1406 nvme_start_keep_alive(ctrl); 1407 } 1408 1409 static bool nvme_id_cns_ok(struct nvme_ctrl *ctrl, u8 cns) 1410 { 1411 /* 1412 * The CNS field occupies a full byte starting with NVMe 1.2 1413 */ 1414 if (ctrl->vs >= NVME_VS(1, 2, 0)) 1415 return true; 1416 1417 /* 1418 * NVMe 1.1 expanded the CNS value to two bits, which means values 1419 * larger than that could get truncated and treated as an incorrect 1420 * value. 1421 * 1422 * Qemu implemented 1.0 behavior for controllers claiming 1.1 1423 * compliance, so they need to be quirked here. 1424 */ 1425 if (ctrl->vs >= NVME_VS(1, 1, 0) && 1426 !(ctrl->quirks & NVME_QUIRK_IDENTIFY_CNS)) 1427 return cns <= 3; 1428 1429 /* 1430 * NVMe 1.0 used a single bit for the CNS value. 1431 */ 1432 return cns <= 1; 1433 } 1434 1435 static int nvme_identify_ctrl(struct nvme_ctrl *dev, struct nvme_id_ctrl **id) 1436 { 1437 struct nvme_command c = { }; 1438 int error; 1439 1440 /* gcc-4.4.4 (at least) has issues with initializers and anon unions */ 1441 c.identify.opcode = nvme_admin_identify; 1442 c.identify.cns = NVME_ID_CNS_CTRL; 1443 1444 *id = kmalloc(sizeof(struct nvme_id_ctrl), GFP_KERNEL); 1445 if (!*id) 1446 return -ENOMEM; 1447 1448 error = nvme_submit_sync_cmd(dev->admin_q, &c, *id, 1449 sizeof(struct nvme_id_ctrl)); 1450 if (error) { 1451 kfree(*id); 1452 *id = NULL; 1453 } 1454 return error; 1455 } 1456 1457 static int nvme_process_ns_desc(struct nvme_ctrl *ctrl, struct nvme_ns_ids *ids, 1458 struct nvme_ns_id_desc *cur, bool *csi_seen) 1459 { 1460 const char *warn_str = "ctrl returned bogus length:"; 1461 void *data = cur; 1462 1463 switch (cur->nidt) { 1464 case NVME_NIDT_EUI64: 1465 if (cur->nidl != NVME_NIDT_EUI64_LEN) { 1466 dev_warn(ctrl->device, "%s %d for NVME_NIDT_EUI64\n", 1467 warn_str, cur->nidl); 1468 return -1; 1469 } 1470 if (ctrl->quirks & NVME_QUIRK_BOGUS_NID) 1471 return NVME_NIDT_EUI64_LEN; 1472 memcpy(ids->eui64, data + sizeof(*cur), NVME_NIDT_EUI64_LEN); 1473 return NVME_NIDT_EUI64_LEN; 1474 case NVME_NIDT_NGUID: 1475 if (cur->nidl != NVME_NIDT_NGUID_LEN) { 1476 dev_warn(ctrl->device, "%s %d for NVME_NIDT_NGUID\n", 1477 warn_str, cur->nidl); 1478 return -1; 1479 } 1480 if (ctrl->quirks & NVME_QUIRK_BOGUS_NID) 1481 return NVME_NIDT_NGUID_LEN; 1482 memcpy(ids->nguid, data + sizeof(*cur), NVME_NIDT_NGUID_LEN); 1483 return NVME_NIDT_NGUID_LEN; 1484 case NVME_NIDT_UUID: 1485 if (cur->nidl != NVME_NIDT_UUID_LEN) { 1486 dev_warn(ctrl->device, "%s %d for NVME_NIDT_UUID\n", 1487 warn_str, cur->nidl); 1488 return -1; 1489 } 1490 if (ctrl->quirks & NVME_QUIRK_BOGUS_NID) 1491 return NVME_NIDT_UUID_LEN; 1492 uuid_copy(&ids->uuid, data + sizeof(*cur)); 1493 return NVME_NIDT_UUID_LEN; 1494 case NVME_NIDT_CSI: 1495 if (cur->nidl != NVME_NIDT_CSI_LEN) { 1496 dev_warn(ctrl->device, "%s %d for NVME_NIDT_CSI\n", 1497 warn_str, cur->nidl); 1498 return -1; 1499 } 1500 memcpy(&ids->csi, data + sizeof(*cur), NVME_NIDT_CSI_LEN); 1501 *csi_seen = true; 1502 return NVME_NIDT_CSI_LEN; 1503 default: 1504 /* Skip unknown types */ 1505 return cur->nidl; 1506 } 1507 } 1508 1509 static int nvme_identify_ns_descs(struct nvme_ctrl *ctrl, 1510 struct nvme_ns_info *info) 1511 { 1512 struct nvme_command c = { }; 1513 bool csi_seen = false; 1514 int status, pos, len; 1515 void *data; 1516 1517 if (ctrl->vs < NVME_VS(1, 3, 0) && !nvme_multi_css(ctrl)) 1518 return 0; 1519 if (ctrl->quirks & NVME_QUIRK_NO_NS_DESC_LIST) 1520 return 0; 1521 1522 c.identify.opcode = nvme_admin_identify; 1523 c.identify.nsid = cpu_to_le32(info->nsid); 1524 c.identify.cns = NVME_ID_CNS_NS_DESC_LIST; 1525 1526 data = kzalloc(NVME_IDENTIFY_DATA_SIZE, GFP_KERNEL); 1527 if (!data) 1528 return -ENOMEM; 1529 1530 status = nvme_submit_sync_cmd(ctrl->admin_q, &c, data, 1531 NVME_IDENTIFY_DATA_SIZE); 1532 if (status) { 1533 dev_warn(ctrl->device, 1534 "Identify Descriptors failed (nsid=%u, status=0x%x)\n", 1535 info->nsid, status); 1536 goto free_data; 1537 } 1538 1539 for (pos = 0; pos < NVME_IDENTIFY_DATA_SIZE; pos += len) { 1540 struct nvme_ns_id_desc *cur = data + pos; 1541 1542 if (cur->nidl == 0) 1543 break; 1544 1545 len = nvme_process_ns_desc(ctrl, &info->ids, cur, &csi_seen); 1546 if (len < 0) 1547 break; 1548 1549 len += sizeof(*cur); 1550 } 1551 1552 if (nvme_multi_css(ctrl) && !csi_seen) { 1553 dev_warn(ctrl->device, "Command set not reported for nsid:%d\n", 1554 info->nsid); 1555 status = -EINVAL; 1556 } 1557 1558 free_data: 1559 kfree(data); 1560 return status; 1561 } 1562 1563 int nvme_identify_ns(struct nvme_ctrl *ctrl, unsigned nsid, 1564 struct nvme_id_ns **id) 1565 { 1566 struct nvme_command c = { }; 1567 int error; 1568 1569 /* gcc-4.4.4 (at least) has issues with initializers and anon unions */ 1570 c.identify.opcode = nvme_admin_identify; 1571 c.identify.nsid = cpu_to_le32(nsid); 1572 c.identify.cns = NVME_ID_CNS_NS; 1573 1574 *id = kmalloc(sizeof(**id), GFP_KERNEL); 1575 if (!*id) 1576 return -ENOMEM; 1577 1578 error = nvme_submit_sync_cmd(ctrl->admin_q, &c, *id, sizeof(**id)); 1579 if (error) { 1580 dev_warn(ctrl->device, "Identify namespace failed (%d)\n", error); 1581 kfree(*id); 1582 *id = NULL; 1583 } 1584 return error; 1585 } 1586 1587 static int nvme_ns_info_from_identify(struct nvme_ctrl *ctrl, 1588 struct nvme_ns_info *info) 1589 { 1590 struct nvme_ns_ids *ids = &info->ids; 1591 struct nvme_id_ns *id; 1592 int ret; 1593 1594 ret = nvme_identify_ns(ctrl, info->nsid, &id); 1595 if (ret) 1596 return ret; 1597 1598 if (id->ncap == 0) { 1599 /* namespace not allocated or attached */ 1600 info->is_removed = true; 1601 ret = -ENODEV; 1602 goto error; 1603 } 1604 1605 info->anagrpid = id->anagrpid; 1606 info->is_shared = id->nmic & NVME_NS_NMIC_SHARED; 1607 info->is_readonly = id->nsattr & NVME_NS_ATTR_RO; 1608 info->is_ready = true; 1609 if (ctrl->quirks & NVME_QUIRK_BOGUS_NID) { 1610 dev_info(ctrl->device, 1611 "Ignoring bogus Namespace Identifiers\n"); 1612 } else { 1613 if (ctrl->vs >= NVME_VS(1, 1, 0) && 1614 !memchr_inv(ids->eui64, 0, sizeof(ids->eui64))) 1615 memcpy(ids->eui64, id->eui64, sizeof(ids->eui64)); 1616 if (ctrl->vs >= NVME_VS(1, 2, 0) && 1617 !memchr_inv(ids->nguid, 0, sizeof(ids->nguid))) 1618 memcpy(ids->nguid, id->nguid, sizeof(ids->nguid)); 1619 } 1620 1621 error: 1622 kfree(id); 1623 return ret; 1624 } 1625 1626 static int nvme_ns_info_from_id_cs_indep(struct nvme_ctrl *ctrl, 1627 struct nvme_ns_info *info) 1628 { 1629 struct nvme_id_ns_cs_indep *id; 1630 struct nvme_command c = { 1631 .identify.opcode = nvme_admin_identify, 1632 .identify.nsid = cpu_to_le32(info->nsid), 1633 .identify.cns = NVME_ID_CNS_NS_CS_INDEP, 1634 }; 1635 int ret; 1636 1637 id = kmalloc(sizeof(*id), GFP_KERNEL); 1638 if (!id) 1639 return -ENOMEM; 1640 1641 ret = nvme_submit_sync_cmd(ctrl->admin_q, &c, id, sizeof(*id)); 1642 if (!ret) { 1643 info->anagrpid = id->anagrpid; 1644 info->is_shared = id->nmic & NVME_NS_NMIC_SHARED; 1645 info->is_readonly = id->nsattr & NVME_NS_ATTR_RO; 1646 info->is_ready = id->nstat & NVME_NSTAT_NRDY; 1647 info->is_rotational = id->nsfeat & NVME_NS_ROTATIONAL; 1648 info->no_vwc = id->nsfeat & NVME_NS_VWC_NOT_PRESENT; 1649 } 1650 kfree(id); 1651 return ret; 1652 } 1653 1654 static int nvme_features(struct nvme_ctrl *dev, u8 op, unsigned int fid, 1655 unsigned int dword11, void *buffer, size_t buflen, u32 *result) 1656 { 1657 union nvme_result res = { 0 }; 1658 struct nvme_command c = { }; 1659 int ret; 1660 1661 c.features.opcode = op; 1662 c.features.fid = cpu_to_le32(fid); 1663 c.features.dword11 = cpu_to_le32(dword11); 1664 1665 ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &res, 1666 buffer, buflen, NVME_QID_ANY, 0); 1667 if (ret >= 0 && result) 1668 *result = le32_to_cpu(res.u32); 1669 return ret; 1670 } 1671 1672 int nvme_set_features(struct nvme_ctrl *dev, unsigned int fid, 1673 unsigned int dword11, void *buffer, size_t buflen, 1674 u32 *result) 1675 { 1676 return nvme_features(dev, nvme_admin_set_features, fid, dword11, buffer, 1677 buflen, result); 1678 } 1679 EXPORT_SYMBOL_GPL(nvme_set_features); 1680 1681 int nvme_get_features(struct nvme_ctrl *dev, unsigned int fid, 1682 unsigned int dword11, void *buffer, size_t buflen, 1683 u32 *result) 1684 { 1685 return nvme_features(dev, nvme_admin_get_features, fid, dword11, buffer, 1686 buflen, result); 1687 } 1688 EXPORT_SYMBOL_GPL(nvme_get_features); 1689 1690 int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count) 1691 { 1692 u32 q_count = (*count - 1) | ((*count - 1) << 16); 1693 u32 result; 1694 int status, nr_io_queues; 1695 1696 status = nvme_set_features(ctrl, NVME_FEAT_NUM_QUEUES, q_count, NULL, 0, 1697 &result); 1698 if (status < 0) 1699 return status; 1700 1701 /* 1702 * Degraded controllers might return an error when setting the queue 1703 * count. We still want to be able to bring them online and offer 1704 * access to the admin queue, as that might be only way to fix them up. 1705 */ 1706 if (status > 0) { 1707 dev_err(ctrl->device, "Could not set queue count (%d)\n", status); 1708 *count = 0; 1709 } else { 1710 nr_io_queues = min(result & 0xffff, result >> 16) + 1; 1711 *count = min(*count, nr_io_queues); 1712 } 1713 1714 return 0; 1715 } 1716 EXPORT_SYMBOL_GPL(nvme_set_queue_count); 1717 1718 #define NVME_AEN_SUPPORTED \ 1719 (NVME_AEN_CFG_NS_ATTR | NVME_AEN_CFG_FW_ACT | \ 1720 NVME_AEN_CFG_ANA_CHANGE | NVME_AEN_CFG_DISC_CHANGE) 1721 1722 static void nvme_enable_aen(struct nvme_ctrl *ctrl) 1723 { 1724 u32 result, supported_aens = ctrl->oaes & NVME_AEN_SUPPORTED; 1725 int status; 1726 1727 if (!supported_aens) 1728 return; 1729 1730 status = nvme_set_features(ctrl, NVME_FEAT_ASYNC_EVENT, supported_aens, 1731 NULL, 0, &result); 1732 if (status) 1733 dev_warn(ctrl->device, "Failed to configure AEN (cfg %x)\n", 1734 supported_aens); 1735 1736 queue_work(nvme_wq, &ctrl->async_event_work); 1737 } 1738 1739 static int nvme_ns_open(struct nvme_ns *ns) 1740 { 1741 1742 /* should never be called due to GENHD_FL_HIDDEN */ 1743 if (WARN_ON_ONCE(nvme_ns_head_multipath(ns->head))) 1744 goto fail; 1745 if (!nvme_get_ns(ns)) 1746 goto fail; 1747 if (!try_module_get(ns->ctrl->ops->module)) 1748 goto fail_put_ns; 1749 1750 return 0; 1751 1752 fail_put_ns: 1753 nvme_put_ns(ns); 1754 fail: 1755 return -ENXIO; 1756 } 1757 1758 static void nvme_ns_release(struct nvme_ns *ns) 1759 { 1760 1761 module_put(ns->ctrl->ops->module); 1762 nvme_put_ns(ns); 1763 } 1764 1765 static int nvme_open(struct gendisk *disk, blk_mode_t mode) 1766 { 1767 return nvme_ns_open(disk->private_data); 1768 } 1769 1770 static void nvme_release(struct gendisk *disk) 1771 { 1772 nvme_ns_release(disk->private_data); 1773 } 1774 1775 int nvme_getgeo(struct block_device *bdev, struct hd_geometry *geo) 1776 { 1777 /* some standard values */ 1778 geo->heads = 1 << 6; 1779 geo->sectors = 1 << 5; 1780 geo->cylinders = get_capacity(bdev->bd_disk) >> 11; 1781 return 0; 1782 } 1783 1784 static bool nvme_init_integrity(struct nvme_ns_head *head, 1785 struct queue_limits *lim, struct nvme_ns_info *info) 1786 { 1787 struct blk_integrity *bi = &lim->integrity; 1788 1789 memset(bi, 0, sizeof(*bi)); 1790 1791 if (!head->ms) 1792 return true; 1793 1794 /* 1795 * PI can always be supported as we can ask the controller to simply 1796 * insert/strip it, which is not possible for other kinds of metadata. 1797 */ 1798 if (!IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY) || 1799 !(head->features & NVME_NS_METADATA_SUPPORTED)) 1800 return nvme_ns_has_pi(head); 1801 1802 switch (head->pi_type) { 1803 case NVME_NS_DPS_PI_TYPE3: 1804 switch (head->guard_type) { 1805 case NVME_NVM_NS_16B_GUARD: 1806 bi->csum_type = BLK_INTEGRITY_CSUM_CRC; 1807 bi->tag_size = sizeof(u16) + sizeof(u32); 1808 bi->flags |= BLK_INTEGRITY_DEVICE_CAPABLE; 1809 break; 1810 case NVME_NVM_NS_64B_GUARD: 1811 bi->csum_type = BLK_INTEGRITY_CSUM_CRC64; 1812 bi->tag_size = sizeof(u16) + 6; 1813 bi->flags |= BLK_INTEGRITY_DEVICE_CAPABLE; 1814 break; 1815 default: 1816 break; 1817 } 1818 break; 1819 case NVME_NS_DPS_PI_TYPE1: 1820 case NVME_NS_DPS_PI_TYPE2: 1821 switch (head->guard_type) { 1822 case NVME_NVM_NS_16B_GUARD: 1823 bi->csum_type = BLK_INTEGRITY_CSUM_CRC; 1824 bi->tag_size = sizeof(u16); 1825 bi->flags |= BLK_INTEGRITY_DEVICE_CAPABLE | 1826 BLK_INTEGRITY_REF_TAG; 1827 break; 1828 case NVME_NVM_NS_64B_GUARD: 1829 bi->csum_type = BLK_INTEGRITY_CSUM_CRC64; 1830 bi->tag_size = sizeof(u16); 1831 bi->flags |= BLK_INTEGRITY_DEVICE_CAPABLE | 1832 BLK_INTEGRITY_REF_TAG; 1833 break; 1834 default: 1835 break; 1836 } 1837 break; 1838 default: 1839 break; 1840 } 1841 1842 bi->tuple_size = head->ms; 1843 bi->pi_offset = info->pi_offset; 1844 return true; 1845 } 1846 1847 static void nvme_config_discard(struct nvme_ns *ns, struct queue_limits *lim) 1848 { 1849 struct nvme_ctrl *ctrl = ns->ctrl; 1850 1851 if (ctrl->dmrsl && ctrl->dmrsl <= nvme_sect_to_lba(ns->head, UINT_MAX)) 1852 lim->max_hw_discard_sectors = 1853 nvme_lba_to_sect(ns->head, ctrl->dmrsl); 1854 else if (ctrl->oncs & NVME_CTRL_ONCS_DSM) 1855 lim->max_hw_discard_sectors = UINT_MAX; 1856 else 1857 lim->max_hw_discard_sectors = 0; 1858 1859 lim->discard_granularity = lim->logical_block_size; 1860 1861 if (ctrl->dmrl) 1862 lim->max_discard_segments = ctrl->dmrl; 1863 else 1864 lim->max_discard_segments = NVME_DSM_MAX_RANGES; 1865 } 1866 1867 static bool nvme_ns_ids_equal(struct nvme_ns_ids *a, struct nvme_ns_ids *b) 1868 { 1869 return uuid_equal(&a->uuid, &b->uuid) && 1870 memcmp(&a->nguid, &b->nguid, sizeof(a->nguid)) == 0 && 1871 memcmp(&a->eui64, &b->eui64, sizeof(a->eui64)) == 0 && 1872 a->csi == b->csi; 1873 } 1874 1875 static int nvme_identify_ns_nvm(struct nvme_ctrl *ctrl, unsigned int nsid, 1876 struct nvme_id_ns_nvm **nvmp) 1877 { 1878 struct nvme_command c = { 1879 .identify.opcode = nvme_admin_identify, 1880 .identify.nsid = cpu_to_le32(nsid), 1881 .identify.cns = NVME_ID_CNS_CS_NS, 1882 .identify.csi = NVME_CSI_NVM, 1883 }; 1884 struct nvme_id_ns_nvm *nvm; 1885 int ret; 1886 1887 nvm = kzalloc(sizeof(*nvm), GFP_KERNEL); 1888 if (!nvm) 1889 return -ENOMEM; 1890 1891 ret = nvme_submit_sync_cmd(ctrl->admin_q, &c, nvm, sizeof(*nvm)); 1892 if (ret) 1893 kfree(nvm); 1894 else 1895 *nvmp = nvm; 1896 return ret; 1897 } 1898 1899 static void nvme_configure_pi_elbas(struct nvme_ns_head *head, 1900 struct nvme_id_ns *id, struct nvme_id_ns_nvm *nvm) 1901 { 1902 u32 elbaf = le32_to_cpu(nvm->elbaf[nvme_lbaf_index(id->flbas)]); 1903 u8 guard_type; 1904 1905 /* no support for storage tag formats right now */ 1906 if (nvme_elbaf_sts(elbaf)) 1907 return; 1908 1909 guard_type = nvme_elbaf_guard_type(elbaf); 1910 if ((nvm->pic & NVME_ID_NS_NVM_QPIFS) && 1911 guard_type == NVME_NVM_NS_QTYPE_GUARD) 1912 guard_type = nvme_elbaf_qualified_guard_type(elbaf); 1913 1914 head->guard_type = guard_type; 1915 switch (head->guard_type) { 1916 case NVME_NVM_NS_64B_GUARD: 1917 head->pi_size = sizeof(struct crc64_pi_tuple); 1918 break; 1919 case NVME_NVM_NS_16B_GUARD: 1920 head->pi_size = sizeof(struct t10_pi_tuple); 1921 break; 1922 default: 1923 break; 1924 } 1925 } 1926 1927 static void nvme_configure_metadata(struct nvme_ctrl *ctrl, 1928 struct nvme_ns_head *head, struct nvme_id_ns *id, 1929 struct nvme_id_ns_nvm *nvm, struct nvme_ns_info *info) 1930 { 1931 head->features &= ~(NVME_NS_METADATA_SUPPORTED | NVME_NS_EXT_LBAS); 1932 head->pi_type = 0; 1933 head->pi_size = 0; 1934 head->ms = le16_to_cpu(id->lbaf[nvme_lbaf_index(id->flbas)].ms); 1935 if (!head->ms || !(ctrl->ops->flags & NVME_F_METADATA_SUPPORTED)) 1936 return; 1937 1938 if (nvm && (ctrl->ctratt & NVME_CTRL_ATTR_ELBAS)) { 1939 nvme_configure_pi_elbas(head, id, nvm); 1940 } else { 1941 head->pi_size = sizeof(struct t10_pi_tuple); 1942 head->guard_type = NVME_NVM_NS_16B_GUARD; 1943 } 1944 1945 if (head->pi_size && head->ms >= head->pi_size) 1946 head->pi_type = id->dps & NVME_NS_DPS_PI_MASK; 1947 if (!(id->dps & NVME_NS_DPS_PI_FIRST)) { 1948 if (disable_pi_offsets) 1949 head->pi_type = 0; 1950 else 1951 info->pi_offset = head->ms - head->pi_size; 1952 } 1953 1954 if (ctrl->ops->flags & NVME_F_FABRICS) { 1955 /* 1956 * The NVMe over Fabrics specification only supports metadata as 1957 * part of the extended data LBA. We rely on HCA/HBA support to 1958 * remap the separate metadata buffer from the block layer. 1959 */ 1960 if (WARN_ON_ONCE(!(id->flbas & NVME_NS_FLBAS_META_EXT))) 1961 return; 1962 1963 head->features |= NVME_NS_EXT_LBAS; 1964 1965 /* 1966 * The current fabrics transport drivers support namespace 1967 * metadata formats only if nvme_ns_has_pi() returns true. 1968 * Suppress support for all other formats so the namespace will 1969 * have a 0 capacity and not be usable through the block stack. 1970 * 1971 * Note, this check will need to be modified if any drivers 1972 * gain the ability to use other metadata formats. 1973 */ 1974 if (ctrl->max_integrity_segments && nvme_ns_has_pi(head)) 1975 head->features |= NVME_NS_METADATA_SUPPORTED; 1976 } else { 1977 /* 1978 * For PCIe controllers, we can't easily remap the separate 1979 * metadata buffer from the block layer and thus require a 1980 * separate metadata buffer for block layer metadata/PI support. 1981 * We allow extended LBAs for the passthrough interface, though. 1982 */ 1983 if (id->flbas & NVME_NS_FLBAS_META_EXT) 1984 head->features |= NVME_NS_EXT_LBAS; 1985 else 1986 head->features |= NVME_NS_METADATA_SUPPORTED; 1987 } 1988 } 1989 1990 1991 static void nvme_update_atomic_write_disk_info(struct nvme_ns *ns, 1992 struct nvme_id_ns *id, struct queue_limits *lim, 1993 u32 bs, u32 atomic_bs) 1994 { 1995 unsigned int boundary = 0; 1996 1997 if (id->nsfeat & NVME_NS_FEAT_ATOMICS && id->nawupf) { 1998 if (le16_to_cpu(id->nabspf)) 1999 boundary = (le16_to_cpu(id->nabspf) + 1) * bs; 2000 } 2001 lim->atomic_write_hw_max = atomic_bs; 2002 lim->atomic_write_hw_boundary = boundary; 2003 lim->atomic_write_hw_unit_min = bs; 2004 lim->atomic_write_hw_unit_max = rounddown_pow_of_two(atomic_bs); 2005 lim->features |= BLK_FEAT_ATOMIC_WRITES; 2006 } 2007 2008 static u32 nvme_max_drv_segments(struct nvme_ctrl *ctrl) 2009 { 2010 return ctrl->max_hw_sectors / (NVME_CTRL_PAGE_SIZE >> SECTOR_SHIFT) + 1; 2011 } 2012 2013 static void nvme_set_ctrl_limits(struct nvme_ctrl *ctrl, 2014 struct queue_limits *lim) 2015 { 2016 lim->max_hw_sectors = ctrl->max_hw_sectors; 2017 lim->max_segments = min_t(u32, USHRT_MAX, 2018 min_not_zero(nvme_max_drv_segments(ctrl), ctrl->max_segments)); 2019 lim->max_integrity_segments = ctrl->max_integrity_segments; 2020 lim->virt_boundary_mask = NVME_CTRL_PAGE_SIZE - 1; 2021 lim->max_segment_size = UINT_MAX; 2022 lim->dma_alignment = 3; 2023 } 2024 2025 static bool nvme_update_disk_info(struct nvme_ns *ns, struct nvme_id_ns *id, 2026 struct queue_limits *lim) 2027 { 2028 struct nvme_ns_head *head = ns->head; 2029 u32 bs = 1U << head->lba_shift; 2030 u32 atomic_bs, phys_bs, io_opt = 0; 2031 bool valid = true; 2032 2033 /* 2034 * The block layer can't support LBA sizes larger than the page size 2035 * or smaller than a sector size yet, so catch this early and don't 2036 * allow block I/O. 2037 */ 2038 if (blk_validate_block_size(bs)) { 2039 bs = (1 << 9); 2040 valid = false; 2041 } 2042 2043 atomic_bs = phys_bs = bs; 2044 if (id->nabo == 0) { 2045 /* 2046 * Bit 1 indicates whether NAWUPF is defined for this namespace 2047 * and whether it should be used instead of AWUPF. If NAWUPF == 2048 * 0 then AWUPF must be used instead. 2049 */ 2050 if (id->nsfeat & NVME_NS_FEAT_ATOMICS && id->nawupf) 2051 atomic_bs = (1 + le16_to_cpu(id->nawupf)) * bs; 2052 else 2053 atomic_bs = (1 + ns->ctrl->subsys->awupf) * bs; 2054 2055 nvme_update_atomic_write_disk_info(ns, id, lim, bs, atomic_bs); 2056 } 2057 2058 if (id->nsfeat & NVME_NS_FEAT_IO_OPT) { 2059 /* NPWG = Namespace Preferred Write Granularity */ 2060 phys_bs = bs * (1 + le16_to_cpu(id->npwg)); 2061 /* NOWS = Namespace Optimal Write Size */ 2062 if (id->nows) 2063 io_opt = bs * (1 + le16_to_cpu(id->nows)); 2064 } 2065 2066 /* 2067 * Linux filesystems assume writing a single physical block is 2068 * an atomic operation. Hence limit the physical block size to the 2069 * value of the Atomic Write Unit Power Fail parameter. 2070 */ 2071 lim->logical_block_size = bs; 2072 lim->physical_block_size = min(phys_bs, atomic_bs); 2073 lim->io_min = phys_bs; 2074 lim->io_opt = io_opt; 2075 if ((ns->ctrl->quirks & NVME_QUIRK_DEALLOCATE_ZEROES) && 2076 (ns->ctrl->oncs & NVME_CTRL_ONCS_DSM)) 2077 lim->max_write_zeroes_sectors = UINT_MAX; 2078 else 2079 lim->max_write_zeroes_sectors = ns->ctrl->max_zeroes_sectors; 2080 return valid; 2081 } 2082 2083 static bool nvme_ns_is_readonly(struct nvme_ns *ns, struct nvme_ns_info *info) 2084 { 2085 return info->is_readonly || test_bit(NVME_NS_FORCE_RO, &ns->flags); 2086 } 2087 2088 static inline bool nvme_first_scan(struct gendisk *disk) 2089 { 2090 /* nvme_alloc_ns() scans the disk prior to adding it */ 2091 return !disk_live(disk); 2092 } 2093 2094 static void nvme_set_chunk_sectors(struct nvme_ns *ns, struct nvme_id_ns *id, 2095 struct queue_limits *lim) 2096 { 2097 struct nvme_ctrl *ctrl = ns->ctrl; 2098 u32 iob; 2099 2100 if ((ctrl->quirks & NVME_QUIRK_STRIPE_SIZE) && 2101 is_power_of_2(ctrl->max_hw_sectors)) 2102 iob = ctrl->max_hw_sectors; 2103 else 2104 iob = nvme_lba_to_sect(ns->head, le16_to_cpu(id->noiob)); 2105 2106 if (!iob) 2107 return; 2108 2109 if (!is_power_of_2(iob)) { 2110 if (nvme_first_scan(ns->disk)) 2111 pr_warn("%s: ignoring unaligned IO boundary:%u\n", 2112 ns->disk->disk_name, iob); 2113 return; 2114 } 2115 2116 if (blk_queue_is_zoned(ns->disk->queue)) { 2117 if (nvme_first_scan(ns->disk)) 2118 pr_warn("%s: ignoring zoned namespace IO boundary\n", 2119 ns->disk->disk_name); 2120 return; 2121 } 2122 2123 lim->chunk_sectors = iob; 2124 } 2125 2126 static int nvme_update_ns_info_generic(struct nvme_ns *ns, 2127 struct nvme_ns_info *info) 2128 { 2129 struct queue_limits lim; 2130 int ret; 2131 2132 lim = queue_limits_start_update(ns->disk->queue); 2133 nvme_set_ctrl_limits(ns->ctrl, &lim); 2134 2135 blk_mq_freeze_queue(ns->disk->queue); 2136 ret = queue_limits_commit_update(ns->disk->queue, &lim); 2137 set_disk_ro(ns->disk, nvme_ns_is_readonly(ns, info)); 2138 blk_mq_unfreeze_queue(ns->disk->queue); 2139 2140 /* Hide the block-interface for these devices */ 2141 if (!ret) 2142 ret = -ENODEV; 2143 return ret; 2144 } 2145 2146 static int nvme_update_ns_info_block(struct nvme_ns *ns, 2147 struct nvme_ns_info *info) 2148 { 2149 struct queue_limits lim; 2150 struct nvme_id_ns_nvm *nvm = NULL; 2151 struct nvme_zone_info zi = {}; 2152 struct nvme_id_ns *id; 2153 sector_t capacity; 2154 unsigned lbaf; 2155 int ret; 2156 2157 ret = nvme_identify_ns(ns->ctrl, info->nsid, &id); 2158 if (ret) 2159 return ret; 2160 2161 if (id->ncap == 0) { 2162 /* namespace not allocated or attached */ 2163 info->is_removed = true; 2164 ret = -ENXIO; 2165 goto out; 2166 } 2167 lbaf = nvme_lbaf_index(id->flbas); 2168 2169 if (ns->ctrl->ctratt & NVME_CTRL_ATTR_ELBAS) { 2170 ret = nvme_identify_ns_nvm(ns->ctrl, info->nsid, &nvm); 2171 if (ret < 0) 2172 goto out; 2173 } 2174 2175 if (IS_ENABLED(CONFIG_BLK_DEV_ZONED) && 2176 ns->head->ids.csi == NVME_CSI_ZNS) { 2177 ret = nvme_query_zone_info(ns, lbaf, &zi); 2178 if (ret < 0) 2179 goto out; 2180 } 2181 2182 lim = queue_limits_start_update(ns->disk->queue); 2183 2184 blk_mq_freeze_queue(ns->disk->queue); 2185 ns->head->lba_shift = id->lbaf[lbaf].ds; 2186 ns->head->nuse = le64_to_cpu(id->nuse); 2187 capacity = nvme_lba_to_sect(ns->head, le64_to_cpu(id->nsze)); 2188 nvme_set_ctrl_limits(ns->ctrl, &lim); 2189 nvme_configure_metadata(ns->ctrl, ns->head, id, nvm, info); 2190 nvme_set_chunk_sectors(ns, id, &lim); 2191 if (!nvme_update_disk_info(ns, id, &lim)) 2192 capacity = 0; 2193 nvme_config_discard(ns, &lim); 2194 if (IS_ENABLED(CONFIG_BLK_DEV_ZONED) && 2195 ns->head->ids.csi == NVME_CSI_ZNS) 2196 nvme_update_zone_info(ns, &lim, &zi); 2197 2198 if ((ns->ctrl->vwc & NVME_CTRL_VWC_PRESENT) && !info->no_vwc) 2199 lim.features |= BLK_FEAT_WRITE_CACHE | BLK_FEAT_FUA; 2200 else 2201 lim.features &= ~(BLK_FEAT_WRITE_CACHE | BLK_FEAT_FUA); 2202 2203 if (info->is_rotational) 2204 lim.features |= BLK_FEAT_ROTATIONAL; 2205 2206 /* 2207 * Register a metadata profile for PI, or the plain non-integrity NVMe 2208 * metadata masquerading as Type 0 if supported, otherwise reject block 2209 * I/O to namespaces with metadata except when the namespace supports 2210 * PI, as it can strip/insert in that case. 2211 */ 2212 if (!nvme_init_integrity(ns->head, &lim, info)) 2213 capacity = 0; 2214 2215 ret = queue_limits_commit_update(ns->disk->queue, &lim); 2216 if (ret) { 2217 blk_mq_unfreeze_queue(ns->disk->queue); 2218 goto out; 2219 } 2220 2221 set_capacity_and_notify(ns->disk, capacity); 2222 2223 /* 2224 * Only set the DEAC bit if the device guarantees that reads from 2225 * deallocated data return zeroes. While the DEAC bit does not 2226 * require that, it must be a no-op if reads from deallocated data 2227 * do not return zeroes. 2228 */ 2229 if ((id->dlfeat & 0x7) == 0x1 && (id->dlfeat & (1 << 3))) 2230 ns->head->features |= NVME_NS_DEAC; 2231 set_disk_ro(ns->disk, nvme_ns_is_readonly(ns, info)); 2232 set_bit(NVME_NS_READY, &ns->flags); 2233 blk_mq_unfreeze_queue(ns->disk->queue); 2234 2235 if (blk_queue_is_zoned(ns->queue)) { 2236 ret = blk_revalidate_disk_zones(ns->disk); 2237 if (ret && !nvme_first_scan(ns->disk)) 2238 goto out; 2239 } 2240 2241 ret = 0; 2242 out: 2243 kfree(nvm); 2244 kfree(id); 2245 return ret; 2246 } 2247 2248 static int nvme_update_ns_info(struct nvme_ns *ns, struct nvme_ns_info *info) 2249 { 2250 bool unsupported = false; 2251 int ret; 2252 2253 switch (info->ids.csi) { 2254 case NVME_CSI_ZNS: 2255 if (!IS_ENABLED(CONFIG_BLK_DEV_ZONED)) { 2256 dev_info(ns->ctrl->device, 2257 "block device for nsid %u not supported without CONFIG_BLK_DEV_ZONED\n", 2258 info->nsid); 2259 ret = nvme_update_ns_info_generic(ns, info); 2260 break; 2261 } 2262 ret = nvme_update_ns_info_block(ns, info); 2263 break; 2264 case NVME_CSI_NVM: 2265 ret = nvme_update_ns_info_block(ns, info); 2266 break; 2267 default: 2268 dev_info(ns->ctrl->device, 2269 "block device for nsid %u not supported (csi %u)\n", 2270 info->nsid, info->ids.csi); 2271 ret = nvme_update_ns_info_generic(ns, info); 2272 break; 2273 } 2274 2275 /* 2276 * If probing fails due an unsupported feature, hide the block device, 2277 * but still allow other access. 2278 */ 2279 if (ret == -ENODEV) { 2280 ns->disk->flags |= GENHD_FL_HIDDEN; 2281 set_bit(NVME_NS_READY, &ns->flags); 2282 unsupported = true; 2283 ret = 0; 2284 } 2285 2286 if (!ret && nvme_ns_head_multipath(ns->head)) { 2287 struct queue_limits *ns_lim = &ns->disk->queue->limits; 2288 struct queue_limits lim; 2289 2290 lim = queue_limits_start_update(ns->head->disk->queue); 2291 blk_mq_freeze_queue(ns->head->disk->queue); 2292 /* 2293 * queue_limits mixes values that are the hardware limitations 2294 * for bio splitting with what is the device configuration. 2295 * 2296 * For NVMe the device configuration can change after e.g. a 2297 * Format command, and we really want to pick up the new format 2298 * value here. But we must still stack the queue limits to the 2299 * least common denominator for multipathing to split the bios 2300 * properly. 2301 * 2302 * To work around this, we explicitly set the device 2303 * configuration to those that we just queried, but only stack 2304 * the splitting limits in to make sure we still obey possibly 2305 * lower limitations of other controllers. 2306 */ 2307 lim.logical_block_size = ns_lim->logical_block_size; 2308 lim.physical_block_size = ns_lim->physical_block_size; 2309 lim.io_min = ns_lim->io_min; 2310 lim.io_opt = ns_lim->io_opt; 2311 queue_limits_stack_bdev(&lim, ns->disk->part0, 0, 2312 ns->head->disk->disk_name); 2313 if (unsupported) 2314 ns->head->disk->flags |= GENHD_FL_HIDDEN; 2315 else 2316 nvme_init_integrity(ns->head, &lim, info); 2317 ret = queue_limits_commit_update(ns->head->disk->queue, &lim); 2318 2319 set_capacity_and_notify(ns->head->disk, get_capacity(ns->disk)); 2320 set_disk_ro(ns->head->disk, nvme_ns_is_readonly(ns, info)); 2321 nvme_mpath_revalidate_paths(ns); 2322 2323 blk_mq_unfreeze_queue(ns->head->disk->queue); 2324 } 2325 2326 return ret; 2327 } 2328 2329 int nvme_ns_get_unique_id(struct nvme_ns *ns, u8 id[16], 2330 enum blk_unique_id type) 2331 { 2332 struct nvme_ns_ids *ids = &ns->head->ids; 2333 2334 if (type != BLK_UID_EUI64) 2335 return -EINVAL; 2336 2337 if (memchr_inv(ids->nguid, 0, sizeof(ids->nguid))) { 2338 memcpy(id, &ids->nguid, sizeof(ids->nguid)); 2339 return sizeof(ids->nguid); 2340 } 2341 if (memchr_inv(ids->eui64, 0, sizeof(ids->eui64))) { 2342 memcpy(id, &ids->eui64, sizeof(ids->eui64)); 2343 return sizeof(ids->eui64); 2344 } 2345 2346 return -EINVAL; 2347 } 2348 2349 static int nvme_get_unique_id(struct gendisk *disk, u8 id[16], 2350 enum blk_unique_id type) 2351 { 2352 return nvme_ns_get_unique_id(disk->private_data, id, type); 2353 } 2354 2355 #ifdef CONFIG_BLK_SED_OPAL 2356 static int nvme_sec_submit(void *data, u16 spsp, u8 secp, void *buffer, size_t len, 2357 bool send) 2358 { 2359 struct nvme_ctrl *ctrl = data; 2360 struct nvme_command cmd = { }; 2361 2362 if (send) 2363 cmd.common.opcode = nvme_admin_security_send; 2364 else 2365 cmd.common.opcode = nvme_admin_security_recv; 2366 cmd.common.nsid = 0; 2367 cmd.common.cdw10 = cpu_to_le32(((u32)secp) << 24 | ((u32)spsp) << 8); 2368 cmd.common.cdw11 = cpu_to_le32(len); 2369 2370 return __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, NULL, buffer, len, 2371 NVME_QID_ANY, NVME_SUBMIT_AT_HEAD); 2372 } 2373 2374 static void nvme_configure_opal(struct nvme_ctrl *ctrl, bool was_suspended) 2375 { 2376 if (ctrl->oacs & NVME_CTRL_OACS_SEC_SUPP) { 2377 if (!ctrl->opal_dev) 2378 ctrl->opal_dev = init_opal_dev(ctrl, &nvme_sec_submit); 2379 else if (was_suspended) 2380 opal_unlock_from_suspend(ctrl->opal_dev); 2381 } else { 2382 free_opal_dev(ctrl->opal_dev); 2383 ctrl->opal_dev = NULL; 2384 } 2385 } 2386 #else 2387 static void nvme_configure_opal(struct nvme_ctrl *ctrl, bool was_suspended) 2388 { 2389 } 2390 #endif /* CONFIG_BLK_SED_OPAL */ 2391 2392 #ifdef CONFIG_BLK_DEV_ZONED 2393 static int nvme_report_zones(struct gendisk *disk, sector_t sector, 2394 unsigned int nr_zones, report_zones_cb cb, void *data) 2395 { 2396 return nvme_ns_report_zones(disk->private_data, sector, nr_zones, cb, 2397 data); 2398 } 2399 #else 2400 #define nvme_report_zones NULL 2401 #endif /* CONFIG_BLK_DEV_ZONED */ 2402 2403 const struct block_device_operations nvme_bdev_ops = { 2404 .owner = THIS_MODULE, 2405 .ioctl = nvme_ioctl, 2406 .compat_ioctl = blkdev_compat_ptr_ioctl, 2407 .open = nvme_open, 2408 .release = nvme_release, 2409 .getgeo = nvme_getgeo, 2410 .get_unique_id = nvme_get_unique_id, 2411 .report_zones = nvme_report_zones, 2412 .pr_ops = &nvme_pr_ops, 2413 }; 2414 2415 static int nvme_wait_ready(struct nvme_ctrl *ctrl, u32 mask, u32 val, 2416 u32 timeout, const char *op) 2417 { 2418 unsigned long timeout_jiffies = jiffies + timeout * HZ; 2419 u32 csts; 2420 int ret; 2421 2422 while ((ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &csts)) == 0) { 2423 if (csts == ~0) 2424 return -ENODEV; 2425 if ((csts & mask) == val) 2426 break; 2427 2428 usleep_range(1000, 2000); 2429 if (fatal_signal_pending(current)) 2430 return -EINTR; 2431 if (time_after(jiffies, timeout_jiffies)) { 2432 dev_err(ctrl->device, 2433 "Device not ready; aborting %s, CSTS=0x%x\n", 2434 op, csts); 2435 return -ENODEV; 2436 } 2437 } 2438 2439 return ret; 2440 } 2441 2442 int nvme_disable_ctrl(struct nvme_ctrl *ctrl, bool shutdown) 2443 { 2444 int ret; 2445 2446 ctrl->ctrl_config &= ~NVME_CC_SHN_MASK; 2447 if (shutdown) 2448 ctrl->ctrl_config |= NVME_CC_SHN_NORMAL; 2449 else 2450 ctrl->ctrl_config &= ~NVME_CC_ENABLE; 2451 2452 ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config); 2453 if (ret) 2454 return ret; 2455 2456 if (shutdown) { 2457 return nvme_wait_ready(ctrl, NVME_CSTS_SHST_MASK, 2458 NVME_CSTS_SHST_CMPLT, 2459 ctrl->shutdown_timeout, "shutdown"); 2460 } 2461 if (ctrl->quirks & NVME_QUIRK_DELAY_BEFORE_CHK_RDY) 2462 msleep(NVME_QUIRK_DELAY_AMOUNT); 2463 return nvme_wait_ready(ctrl, NVME_CSTS_RDY, 0, 2464 (NVME_CAP_TIMEOUT(ctrl->cap) + 1) / 2, "reset"); 2465 } 2466 EXPORT_SYMBOL_GPL(nvme_disable_ctrl); 2467 2468 int nvme_enable_ctrl(struct nvme_ctrl *ctrl) 2469 { 2470 unsigned dev_page_min; 2471 u32 timeout; 2472 int ret; 2473 2474 ret = ctrl->ops->reg_read64(ctrl, NVME_REG_CAP, &ctrl->cap); 2475 if (ret) { 2476 dev_err(ctrl->device, "Reading CAP failed (%d)\n", ret); 2477 return ret; 2478 } 2479 dev_page_min = NVME_CAP_MPSMIN(ctrl->cap) + 12; 2480 2481 if (NVME_CTRL_PAGE_SHIFT < dev_page_min) { 2482 dev_err(ctrl->device, 2483 "Minimum device page size %u too large for host (%u)\n", 2484 1 << dev_page_min, 1 << NVME_CTRL_PAGE_SHIFT); 2485 return -ENODEV; 2486 } 2487 2488 if (NVME_CAP_CSS(ctrl->cap) & NVME_CAP_CSS_CSI) 2489 ctrl->ctrl_config = NVME_CC_CSS_CSI; 2490 else 2491 ctrl->ctrl_config = NVME_CC_CSS_NVM; 2492 2493 /* 2494 * Setting CRIME results in CSTS.RDY before the media is ready. This 2495 * makes it possible for media related commands to return the error 2496 * NVME_SC_ADMIN_COMMAND_MEDIA_NOT_READY. Until the driver is 2497 * restructured to handle retries, disable CC.CRIME. 2498 */ 2499 ctrl->ctrl_config &= ~NVME_CC_CRIME; 2500 2501 ctrl->ctrl_config |= (NVME_CTRL_PAGE_SHIFT - 12) << NVME_CC_MPS_SHIFT; 2502 ctrl->ctrl_config |= NVME_CC_AMS_RR | NVME_CC_SHN_NONE; 2503 ctrl->ctrl_config |= NVME_CC_IOSQES | NVME_CC_IOCQES; 2504 ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config); 2505 if (ret) 2506 return ret; 2507 2508 /* CAP value may change after initial CC write */ 2509 ret = ctrl->ops->reg_read64(ctrl, NVME_REG_CAP, &ctrl->cap); 2510 if (ret) 2511 return ret; 2512 2513 timeout = NVME_CAP_TIMEOUT(ctrl->cap); 2514 if (ctrl->cap & NVME_CAP_CRMS_CRWMS) { 2515 u32 crto, ready_timeout; 2516 2517 ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CRTO, &crto); 2518 if (ret) { 2519 dev_err(ctrl->device, "Reading CRTO failed (%d)\n", 2520 ret); 2521 return ret; 2522 } 2523 2524 /* 2525 * CRTO should always be greater or equal to CAP.TO, but some 2526 * devices are known to get this wrong. Use the larger of the 2527 * two values. 2528 */ 2529 ready_timeout = NVME_CRTO_CRWMT(crto); 2530 2531 if (ready_timeout < timeout) 2532 dev_warn_once(ctrl->device, "bad crto:%x cap:%llx\n", 2533 crto, ctrl->cap); 2534 else 2535 timeout = ready_timeout; 2536 } 2537 2538 ctrl->ctrl_config |= NVME_CC_ENABLE; 2539 ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config); 2540 if (ret) 2541 return ret; 2542 return nvme_wait_ready(ctrl, NVME_CSTS_RDY, NVME_CSTS_RDY, 2543 (timeout + 1) / 2, "initialisation"); 2544 } 2545 EXPORT_SYMBOL_GPL(nvme_enable_ctrl); 2546 2547 static int nvme_configure_timestamp(struct nvme_ctrl *ctrl) 2548 { 2549 __le64 ts; 2550 int ret; 2551 2552 if (!(ctrl->oncs & NVME_CTRL_ONCS_TIMESTAMP)) 2553 return 0; 2554 2555 ts = cpu_to_le64(ktime_to_ms(ktime_get_real())); 2556 ret = nvme_set_features(ctrl, NVME_FEAT_TIMESTAMP, 0, &ts, sizeof(ts), 2557 NULL); 2558 if (ret) 2559 dev_warn_once(ctrl->device, 2560 "could not set timestamp (%d)\n", ret); 2561 return ret; 2562 } 2563 2564 static int nvme_configure_host_options(struct nvme_ctrl *ctrl) 2565 { 2566 struct nvme_feat_host_behavior *host; 2567 u8 acre = 0, lbafee = 0; 2568 int ret; 2569 2570 /* Don't bother enabling the feature if retry delay is not reported */ 2571 if (ctrl->crdt[0]) 2572 acre = NVME_ENABLE_ACRE; 2573 if (ctrl->ctratt & NVME_CTRL_ATTR_ELBAS) 2574 lbafee = NVME_ENABLE_LBAFEE; 2575 2576 if (!acre && !lbafee) 2577 return 0; 2578 2579 host = kzalloc(sizeof(*host), GFP_KERNEL); 2580 if (!host) 2581 return 0; 2582 2583 host->acre = acre; 2584 host->lbafee = lbafee; 2585 ret = nvme_set_features(ctrl, NVME_FEAT_HOST_BEHAVIOR, 0, 2586 host, sizeof(*host), NULL); 2587 kfree(host); 2588 return ret; 2589 } 2590 2591 /* 2592 * The function checks whether the given total (exlat + enlat) latency of 2593 * a power state allows the latter to be used as an APST transition target. 2594 * It does so by comparing the latency to the primary and secondary latency 2595 * tolerances defined by module params. If there's a match, the corresponding 2596 * timeout value is returned and the matching tolerance index (1 or 2) is 2597 * reported. 2598 */ 2599 static bool nvme_apst_get_transition_time(u64 total_latency, 2600 u64 *transition_time, unsigned *last_index) 2601 { 2602 if (total_latency <= apst_primary_latency_tol_us) { 2603 if (*last_index == 1) 2604 return false; 2605 *last_index = 1; 2606 *transition_time = apst_primary_timeout_ms; 2607 return true; 2608 } 2609 if (apst_secondary_timeout_ms && 2610 total_latency <= apst_secondary_latency_tol_us) { 2611 if (*last_index <= 2) 2612 return false; 2613 *last_index = 2; 2614 *transition_time = apst_secondary_timeout_ms; 2615 return true; 2616 } 2617 return false; 2618 } 2619 2620 /* 2621 * APST (Autonomous Power State Transition) lets us program a table of power 2622 * state transitions that the controller will perform automatically. 2623 * 2624 * Depending on module params, one of the two supported techniques will be used: 2625 * 2626 * - If the parameters provide explicit timeouts and tolerances, they will be 2627 * used to build a table with up to 2 non-operational states to transition to. 2628 * The default parameter values were selected based on the values used by 2629 * Microsoft's and Intel's NVMe drivers. Yet, since we don't implement dynamic 2630 * regeneration of the APST table in the event of switching between external 2631 * and battery power, the timeouts and tolerances reflect a compromise 2632 * between values used by Microsoft for AC and battery scenarios. 2633 * - If not, we'll configure the table with a simple heuristic: we are willing 2634 * to spend at most 2% of the time transitioning between power states. 2635 * Therefore, when running in any given state, we will enter the next 2636 * lower-power non-operational state after waiting 50 * (enlat + exlat) 2637 * microseconds, as long as that state's exit latency is under the requested 2638 * maximum latency. 2639 * 2640 * We will not autonomously enter any non-operational state for which the total 2641 * latency exceeds ps_max_latency_us. 2642 * 2643 * Users can set ps_max_latency_us to zero to turn off APST. 2644 */ 2645 static int nvme_configure_apst(struct nvme_ctrl *ctrl) 2646 { 2647 struct nvme_feat_auto_pst *table; 2648 unsigned apste = 0; 2649 u64 max_lat_us = 0; 2650 __le64 target = 0; 2651 int max_ps = -1; 2652 int state; 2653 int ret; 2654 unsigned last_lt_index = UINT_MAX; 2655 2656 /* 2657 * If APST isn't supported or if we haven't been initialized yet, 2658 * then don't do anything. 2659 */ 2660 if (!ctrl->apsta) 2661 return 0; 2662 2663 if (ctrl->npss > 31) { 2664 dev_warn(ctrl->device, "NPSS is invalid; not using APST\n"); 2665 return 0; 2666 } 2667 2668 table = kzalloc(sizeof(*table), GFP_KERNEL); 2669 if (!table) 2670 return 0; 2671 2672 if (!ctrl->apst_enabled || ctrl->ps_max_latency_us == 0) { 2673 /* Turn off APST. */ 2674 dev_dbg(ctrl->device, "APST disabled\n"); 2675 goto done; 2676 } 2677 2678 /* 2679 * Walk through all states from lowest- to highest-power. 2680 * According to the spec, lower-numbered states use more power. NPSS, 2681 * despite the name, is the index of the lowest-power state, not the 2682 * number of states. 2683 */ 2684 for (state = (int)ctrl->npss; state >= 0; state--) { 2685 u64 total_latency_us, exit_latency_us, transition_ms; 2686 2687 if (target) 2688 table->entries[state] = target; 2689 2690 /* 2691 * Don't allow transitions to the deepest state if it's quirked 2692 * off. 2693 */ 2694 if (state == ctrl->npss && 2695 (ctrl->quirks & NVME_QUIRK_NO_DEEPEST_PS)) 2696 continue; 2697 2698 /* 2699 * Is this state a useful non-operational state for higher-power 2700 * states to autonomously transition to? 2701 */ 2702 if (!(ctrl->psd[state].flags & NVME_PS_FLAGS_NON_OP_STATE)) 2703 continue; 2704 2705 exit_latency_us = (u64)le32_to_cpu(ctrl->psd[state].exit_lat); 2706 if (exit_latency_us > ctrl->ps_max_latency_us) 2707 continue; 2708 2709 total_latency_us = exit_latency_us + 2710 le32_to_cpu(ctrl->psd[state].entry_lat); 2711 2712 /* 2713 * This state is good. It can be used as the APST idle target 2714 * for higher power states. 2715 */ 2716 if (apst_primary_timeout_ms && apst_primary_latency_tol_us) { 2717 if (!nvme_apst_get_transition_time(total_latency_us, 2718 &transition_ms, &last_lt_index)) 2719 continue; 2720 } else { 2721 transition_ms = total_latency_us + 19; 2722 do_div(transition_ms, 20); 2723 if (transition_ms > (1 << 24) - 1) 2724 transition_ms = (1 << 24) - 1; 2725 } 2726 2727 target = cpu_to_le64((state << 3) | (transition_ms << 8)); 2728 if (max_ps == -1) 2729 max_ps = state; 2730 if (total_latency_us > max_lat_us) 2731 max_lat_us = total_latency_us; 2732 } 2733 2734 if (max_ps == -1) 2735 dev_dbg(ctrl->device, "APST enabled but no non-operational states are available\n"); 2736 else 2737 dev_dbg(ctrl->device, "APST enabled: max PS = %d, max round-trip latency = %lluus, table = %*phN\n", 2738 max_ps, max_lat_us, (int)sizeof(*table), table); 2739 apste = 1; 2740 2741 done: 2742 ret = nvme_set_features(ctrl, NVME_FEAT_AUTO_PST, apste, 2743 table, sizeof(*table), NULL); 2744 if (ret) 2745 dev_err(ctrl->device, "failed to set APST feature (%d)\n", ret); 2746 kfree(table); 2747 return ret; 2748 } 2749 2750 static void nvme_set_latency_tolerance(struct device *dev, s32 val) 2751 { 2752 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 2753 u64 latency; 2754 2755 switch (val) { 2756 case PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT: 2757 case PM_QOS_LATENCY_ANY: 2758 latency = U64_MAX; 2759 break; 2760 2761 default: 2762 latency = val; 2763 } 2764 2765 if (ctrl->ps_max_latency_us != latency) { 2766 ctrl->ps_max_latency_us = latency; 2767 if (nvme_ctrl_state(ctrl) == NVME_CTRL_LIVE) 2768 nvme_configure_apst(ctrl); 2769 } 2770 } 2771 2772 struct nvme_core_quirk_entry { 2773 /* 2774 * NVMe model and firmware strings are padded with spaces. For 2775 * simplicity, strings in the quirk table are padded with NULLs 2776 * instead. 2777 */ 2778 u16 vid; 2779 const char *mn; 2780 const char *fr; 2781 unsigned long quirks; 2782 }; 2783 2784 static const struct nvme_core_quirk_entry core_quirks[] = { 2785 { 2786 /* 2787 * This Toshiba device seems to die using any APST states. See: 2788 * https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1678184/comments/11 2789 */ 2790 .vid = 0x1179, 2791 .mn = "THNSF5256GPUK TOSHIBA", 2792 .quirks = NVME_QUIRK_NO_APST, 2793 }, 2794 { 2795 /* 2796 * This LiteON CL1-3D*-Q11 firmware version has a race 2797 * condition associated with actions related to suspend to idle 2798 * LiteON has resolved the problem in future firmware 2799 */ 2800 .vid = 0x14a4, 2801 .fr = "22301111", 2802 .quirks = NVME_QUIRK_SIMPLE_SUSPEND, 2803 }, 2804 { 2805 /* 2806 * This Kioxia CD6-V Series / HPE PE8030 device times out and 2807 * aborts I/O during any load, but more easily reproducible 2808 * with discards (fstrim). 2809 * 2810 * The device is left in a state where it is also not possible 2811 * to use "nvme set-feature" to disable APST, but booting with 2812 * nvme_core.default_ps_max_latency=0 works. 2813 */ 2814 .vid = 0x1e0f, 2815 .mn = "KCD6XVUL6T40", 2816 .quirks = NVME_QUIRK_NO_APST, 2817 }, 2818 { 2819 /* 2820 * The external Samsung X5 SSD fails initialization without a 2821 * delay before checking if it is ready and has a whole set of 2822 * other problems. To make this even more interesting, it 2823 * shares the PCI ID with internal Samsung 970 Evo Plus that 2824 * does not need or want these quirks. 2825 */ 2826 .vid = 0x144d, 2827 .mn = "Samsung Portable SSD X5", 2828 .quirks = NVME_QUIRK_DELAY_BEFORE_CHK_RDY | 2829 NVME_QUIRK_NO_DEEPEST_PS | 2830 NVME_QUIRK_IGNORE_DEV_SUBNQN, 2831 } 2832 }; 2833 2834 /* match is null-terminated but idstr is space-padded. */ 2835 static bool string_matches(const char *idstr, const char *match, size_t len) 2836 { 2837 size_t matchlen; 2838 2839 if (!match) 2840 return true; 2841 2842 matchlen = strlen(match); 2843 WARN_ON_ONCE(matchlen > len); 2844 2845 if (memcmp(idstr, match, matchlen)) 2846 return false; 2847 2848 for (; matchlen < len; matchlen++) 2849 if (idstr[matchlen] != ' ') 2850 return false; 2851 2852 return true; 2853 } 2854 2855 static bool quirk_matches(const struct nvme_id_ctrl *id, 2856 const struct nvme_core_quirk_entry *q) 2857 { 2858 return q->vid == le16_to_cpu(id->vid) && 2859 string_matches(id->mn, q->mn, sizeof(id->mn)) && 2860 string_matches(id->fr, q->fr, sizeof(id->fr)); 2861 } 2862 2863 static void nvme_init_subnqn(struct nvme_subsystem *subsys, struct nvme_ctrl *ctrl, 2864 struct nvme_id_ctrl *id) 2865 { 2866 size_t nqnlen; 2867 int off; 2868 2869 if(!(ctrl->quirks & NVME_QUIRK_IGNORE_DEV_SUBNQN)) { 2870 nqnlen = strnlen(id->subnqn, NVMF_NQN_SIZE); 2871 if (nqnlen > 0 && nqnlen < NVMF_NQN_SIZE) { 2872 strscpy(subsys->subnqn, id->subnqn, NVMF_NQN_SIZE); 2873 return; 2874 } 2875 2876 if (ctrl->vs >= NVME_VS(1, 2, 1)) 2877 dev_warn(ctrl->device, "missing or invalid SUBNQN field.\n"); 2878 } 2879 2880 /* 2881 * Generate a "fake" NQN similar to the one in Section 4.5 of the NVMe 2882 * Base Specification 2.0. It is slightly different from the format 2883 * specified there due to historic reasons, and we can't change it now. 2884 */ 2885 off = snprintf(subsys->subnqn, NVMF_NQN_SIZE, 2886 "nqn.2014.08.org.nvmexpress:%04x%04x", 2887 le16_to_cpu(id->vid), le16_to_cpu(id->ssvid)); 2888 memcpy(subsys->subnqn + off, id->sn, sizeof(id->sn)); 2889 off += sizeof(id->sn); 2890 memcpy(subsys->subnqn + off, id->mn, sizeof(id->mn)); 2891 off += sizeof(id->mn); 2892 memset(subsys->subnqn + off, 0, sizeof(subsys->subnqn) - off); 2893 } 2894 2895 static void nvme_release_subsystem(struct device *dev) 2896 { 2897 struct nvme_subsystem *subsys = 2898 container_of(dev, struct nvme_subsystem, dev); 2899 2900 if (subsys->instance >= 0) 2901 ida_free(&nvme_instance_ida, subsys->instance); 2902 kfree(subsys); 2903 } 2904 2905 static void nvme_destroy_subsystem(struct kref *ref) 2906 { 2907 struct nvme_subsystem *subsys = 2908 container_of(ref, struct nvme_subsystem, ref); 2909 2910 mutex_lock(&nvme_subsystems_lock); 2911 list_del(&subsys->entry); 2912 mutex_unlock(&nvme_subsystems_lock); 2913 2914 ida_destroy(&subsys->ns_ida); 2915 device_del(&subsys->dev); 2916 put_device(&subsys->dev); 2917 } 2918 2919 static void nvme_put_subsystem(struct nvme_subsystem *subsys) 2920 { 2921 kref_put(&subsys->ref, nvme_destroy_subsystem); 2922 } 2923 2924 static struct nvme_subsystem *__nvme_find_get_subsystem(const char *subsysnqn) 2925 { 2926 struct nvme_subsystem *subsys; 2927 2928 lockdep_assert_held(&nvme_subsystems_lock); 2929 2930 /* 2931 * Fail matches for discovery subsystems. This results 2932 * in each discovery controller bound to a unique subsystem. 2933 * This avoids issues with validating controller values 2934 * that can only be true when there is a single unique subsystem. 2935 * There may be multiple and completely independent entities 2936 * that provide discovery controllers. 2937 */ 2938 if (!strcmp(subsysnqn, NVME_DISC_SUBSYS_NAME)) 2939 return NULL; 2940 2941 list_for_each_entry(subsys, &nvme_subsystems, entry) { 2942 if (strcmp(subsys->subnqn, subsysnqn)) 2943 continue; 2944 if (!kref_get_unless_zero(&subsys->ref)) 2945 continue; 2946 return subsys; 2947 } 2948 2949 return NULL; 2950 } 2951 2952 static inline bool nvme_discovery_ctrl(struct nvme_ctrl *ctrl) 2953 { 2954 return ctrl->opts && ctrl->opts->discovery_nqn; 2955 } 2956 2957 static bool nvme_validate_cntlid(struct nvme_subsystem *subsys, 2958 struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id) 2959 { 2960 struct nvme_ctrl *tmp; 2961 2962 lockdep_assert_held(&nvme_subsystems_lock); 2963 2964 list_for_each_entry(tmp, &subsys->ctrls, subsys_entry) { 2965 if (nvme_state_terminal(tmp)) 2966 continue; 2967 2968 if (tmp->cntlid == ctrl->cntlid) { 2969 dev_err(ctrl->device, 2970 "Duplicate cntlid %u with %s, subsys %s, rejecting\n", 2971 ctrl->cntlid, dev_name(tmp->device), 2972 subsys->subnqn); 2973 return false; 2974 } 2975 2976 if ((id->cmic & NVME_CTRL_CMIC_MULTI_CTRL) || 2977 nvme_discovery_ctrl(ctrl)) 2978 continue; 2979 2980 dev_err(ctrl->device, 2981 "Subsystem does not support multiple controllers\n"); 2982 return false; 2983 } 2984 2985 return true; 2986 } 2987 2988 static int nvme_init_subsystem(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id) 2989 { 2990 struct nvme_subsystem *subsys, *found; 2991 int ret; 2992 2993 subsys = kzalloc(sizeof(*subsys), GFP_KERNEL); 2994 if (!subsys) 2995 return -ENOMEM; 2996 2997 subsys->instance = -1; 2998 mutex_init(&subsys->lock); 2999 kref_init(&subsys->ref); 3000 INIT_LIST_HEAD(&subsys->ctrls); 3001 INIT_LIST_HEAD(&subsys->nsheads); 3002 nvme_init_subnqn(subsys, ctrl, id); 3003 memcpy(subsys->serial, id->sn, sizeof(subsys->serial)); 3004 memcpy(subsys->model, id->mn, sizeof(subsys->model)); 3005 subsys->vendor_id = le16_to_cpu(id->vid); 3006 subsys->cmic = id->cmic; 3007 3008 /* Versions prior to 1.4 don't necessarily report a valid type */ 3009 if (id->cntrltype == NVME_CTRL_DISC || 3010 !strcmp(subsys->subnqn, NVME_DISC_SUBSYS_NAME)) 3011 subsys->subtype = NVME_NQN_DISC; 3012 else 3013 subsys->subtype = NVME_NQN_NVME; 3014 3015 if (nvme_discovery_ctrl(ctrl) && subsys->subtype != NVME_NQN_DISC) { 3016 dev_err(ctrl->device, 3017 "Subsystem %s is not a discovery controller", 3018 subsys->subnqn); 3019 kfree(subsys); 3020 return -EINVAL; 3021 } 3022 subsys->awupf = le16_to_cpu(id->awupf); 3023 nvme_mpath_default_iopolicy(subsys); 3024 3025 subsys->dev.class = &nvme_subsys_class; 3026 subsys->dev.release = nvme_release_subsystem; 3027 subsys->dev.groups = nvme_subsys_attrs_groups; 3028 dev_set_name(&subsys->dev, "nvme-subsys%d", ctrl->instance); 3029 device_initialize(&subsys->dev); 3030 3031 mutex_lock(&nvme_subsystems_lock); 3032 found = __nvme_find_get_subsystem(subsys->subnqn); 3033 if (found) { 3034 put_device(&subsys->dev); 3035 subsys = found; 3036 3037 if (!nvme_validate_cntlid(subsys, ctrl, id)) { 3038 ret = -EINVAL; 3039 goto out_put_subsystem; 3040 } 3041 } else { 3042 ret = device_add(&subsys->dev); 3043 if (ret) { 3044 dev_err(ctrl->device, 3045 "failed to register subsystem device.\n"); 3046 put_device(&subsys->dev); 3047 goto out_unlock; 3048 } 3049 ida_init(&subsys->ns_ida); 3050 list_add_tail(&subsys->entry, &nvme_subsystems); 3051 } 3052 3053 ret = sysfs_create_link(&subsys->dev.kobj, &ctrl->device->kobj, 3054 dev_name(ctrl->device)); 3055 if (ret) { 3056 dev_err(ctrl->device, 3057 "failed to create sysfs link from subsystem.\n"); 3058 goto out_put_subsystem; 3059 } 3060 3061 if (!found) 3062 subsys->instance = ctrl->instance; 3063 ctrl->subsys = subsys; 3064 list_add_tail(&ctrl->subsys_entry, &subsys->ctrls); 3065 mutex_unlock(&nvme_subsystems_lock); 3066 return 0; 3067 3068 out_put_subsystem: 3069 nvme_put_subsystem(subsys); 3070 out_unlock: 3071 mutex_unlock(&nvme_subsystems_lock); 3072 return ret; 3073 } 3074 3075 int nvme_get_log(struct nvme_ctrl *ctrl, u32 nsid, u8 log_page, u8 lsp, u8 csi, 3076 void *log, size_t size, u64 offset) 3077 { 3078 struct nvme_command c = { }; 3079 u32 dwlen = nvme_bytes_to_numd(size); 3080 3081 c.get_log_page.opcode = nvme_admin_get_log_page; 3082 c.get_log_page.nsid = cpu_to_le32(nsid); 3083 c.get_log_page.lid = log_page; 3084 c.get_log_page.lsp = lsp; 3085 c.get_log_page.numdl = cpu_to_le16(dwlen & ((1 << 16) - 1)); 3086 c.get_log_page.numdu = cpu_to_le16(dwlen >> 16); 3087 c.get_log_page.lpol = cpu_to_le32(lower_32_bits(offset)); 3088 c.get_log_page.lpou = cpu_to_le32(upper_32_bits(offset)); 3089 c.get_log_page.csi = csi; 3090 3091 return nvme_submit_sync_cmd(ctrl->admin_q, &c, log, size); 3092 } 3093 3094 static int nvme_get_effects_log(struct nvme_ctrl *ctrl, u8 csi, 3095 struct nvme_effects_log **log) 3096 { 3097 struct nvme_effects_log *old, *cel = xa_load(&ctrl->cels, csi); 3098 int ret; 3099 3100 if (cel) 3101 goto out; 3102 3103 cel = kzalloc(sizeof(*cel), GFP_KERNEL); 3104 if (!cel) 3105 return -ENOMEM; 3106 3107 ret = nvme_get_log(ctrl, 0x00, NVME_LOG_CMD_EFFECTS, 0, csi, 3108 cel, sizeof(*cel), 0); 3109 if (ret) { 3110 kfree(cel); 3111 return ret; 3112 } 3113 3114 old = xa_store(&ctrl->cels, csi, cel, GFP_KERNEL); 3115 if (xa_is_err(old)) { 3116 kfree(cel); 3117 return xa_err(old); 3118 } 3119 out: 3120 *log = cel; 3121 return 0; 3122 } 3123 3124 static inline u32 nvme_mps_to_sectors(struct nvme_ctrl *ctrl, u32 units) 3125 { 3126 u32 page_shift = NVME_CAP_MPSMIN(ctrl->cap) + 12, val; 3127 3128 if (check_shl_overflow(1U, units + page_shift - 9, &val)) 3129 return UINT_MAX; 3130 return val; 3131 } 3132 3133 static int nvme_init_non_mdts_limits(struct nvme_ctrl *ctrl) 3134 { 3135 struct nvme_command c = { }; 3136 struct nvme_id_ctrl_nvm *id; 3137 int ret; 3138 3139 /* 3140 * Even though NVMe spec explicitly states that MDTS is not applicable 3141 * to the write-zeroes, we are cautious and limit the size to the 3142 * controllers max_hw_sectors value, which is based on the MDTS field 3143 * and possibly other limiting factors. 3144 */ 3145 if ((ctrl->oncs & NVME_CTRL_ONCS_WRITE_ZEROES) && 3146 !(ctrl->quirks & NVME_QUIRK_DISABLE_WRITE_ZEROES)) 3147 ctrl->max_zeroes_sectors = ctrl->max_hw_sectors; 3148 else 3149 ctrl->max_zeroes_sectors = 0; 3150 3151 if (ctrl->subsys->subtype != NVME_NQN_NVME || 3152 !nvme_id_cns_ok(ctrl, NVME_ID_CNS_CS_CTRL) || 3153 test_bit(NVME_CTRL_SKIP_ID_CNS_CS, &ctrl->flags)) 3154 return 0; 3155 3156 id = kzalloc(sizeof(*id), GFP_KERNEL); 3157 if (!id) 3158 return -ENOMEM; 3159 3160 c.identify.opcode = nvme_admin_identify; 3161 c.identify.cns = NVME_ID_CNS_CS_CTRL; 3162 c.identify.csi = NVME_CSI_NVM; 3163 3164 ret = nvme_submit_sync_cmd(ctrl->admin_q, &c, id, sizeof(*id)); 3165 if (ret) 3166 goto free_data; 3167 3168 ctrl->dmrl = id->dmrl; 3169 ctrl->dmrsl = le32_to_cpu(id->dmrsl); 3170 if (id->wzsl) 3171 ctrl->max_zeroes_sectors = nvme_mps_to_sectors(ctrl, id->wzsl); 3172 3173 free_data: 3174 if (ret > 0) 3175 set_bit(NVME_CTRL_SKIP_ID_CNS_CS, &ctrl->flags); 3176 kfree(id); 3177 return ret; 3178 } 3179 3180 static int nvme_init_effects_log(struct nvme_ctrl *ctrl, 3181 u8 csi, struct nvme_effects_log **log) 3182 { 3183 struct nvme_effects_log *effects, *old; 3184 3185 effects = kzalloc(sizeof(*effects), GFP_KERNEL); 3186 if (!effects) 3187 return -ENOMEM; 3188 3189 old = xa_store(&ctrl->cels, csi, effects, GFP_KERNEL); 3190 if (xa_is_err(old)) { 3191 kfree(effects); 3192 return xa_err(old); 3193 } 3194 3195 *log = effects; 3196 return 0; 3197 } 3198 3199 static void nvme_init_known_nvm_effects(struct nvme_ctrl *ctrl) 3200 { 3201 struct nvme_effects_log *log = ctrl->effects; 3202 3203 log->acs[nvme_admin_format_nvm] |= cpu_to_le32(NVME_CMD_EFFECTS_LBCC | 3204 NVME_CMD_EFFECTS_NCC | 3205 NVME_CMD_EFFECTS_CSE_MASK); 3206 log->acs[nvme_admin_sanitize_nvm] |= cpu_to_le32(NVME_CMD_EFFECTS_LBCC | 3207 NVME_CMD_EFFECTS_CSE_MASK); 3208 3209 /* 3210 * The spec says the result of a security receive command depends on 3211 * the previous security send command. As such, many vendors log this 3212 * command as one to submitted only when no other commands to the same 3213 * namespace are outstanding. The intention is to tell the host to 3214 * prevent mixing security send and receive. 3215 * 3216 * This driver can only enforce such exclusive access against IO 3217 * queues, though. We are not readily able to enforce such a rule for 3218 * two commands to the admin queue, which is the only queue that 3219 * matters for this command. 3220 * 3221 * Rather than blindly freezing the IO queues for this effect that 3222 * doesn't even apply to IO, mask it off. 3223 */ 3224 log->acs[nvme_admin_security_recv] &= cpu_to_le32(~NVME_CMD_EFFECTS_CSE_MASK); 3225 3226 log->iocs[nvme_cmd_write] |= cpu_to_le32(NVME_CMD_EFFECTS_LBCC); 3227 log->iocs[nvme_cmd_write_zeroes] |= cpu_to_le32(NVME_CMD_EFFECTS_LBCC); 3228 log->iocs[nvme_cmd_write_uncor] |= cpu_to_le32(NVME_CMD_EFFECTS_LBCC); 3229 } 3230 3231 static int nvme_init_effects(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id) 3232 { 3233 int ret = 0; 3234 3235 if (ctrl->effects) 3236 return 0; 3237 3238 if (id->lpa & NVME_CTRL_LPA_CMD_EFFECTS_LOG) { 3239 ret = nvme_get_effects_log(ctrl, NVME_CSI_NVM, &ctrl->effects); 3240 if (ret < 0) 3241 return ret; 3242 } 3243 3244 if (!ctrl->effects) { 3245 ret = nvme_init_effects_log(ctrl, NVME_CSI_NVM, &ctrl->effects); 3246 if (ret < 0) 3247 return ret; 3248 } 3249 3250 nvme_init_known_nvm_effects(ctrl); 3251 return 0; 3252 } 3253 3254 static int nvme_check_ctrl_fabric_info(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id) 3255 { 3256 /* 3257 * In fabrics we need to verify the cntlid matches the 3258 * admin connect 3259 */ 3260 if (ctrl->cntlid != le16_to_cpu(id->cntlid)) { 3261 dev_err(ctrl->device, 3262 "Mismatching cntlid: Connect %u vs Identify %u, rejecting\n", 3263 ctrl->cntlid, le16_to_cpu(id->cntlid)); 3264 return -EINVAL; 3265 } 3266 3267 if (!nvme_discovery_ctrl(ctrl) && !ctrl->kas) { 3268 dev_err(ctrl->device, 3269 "keep-alive support is mandatory for fabrics\n"); 3270 return -EINVAL; 3271 } 3272 3273 if (!nvme_discovery_ctrl(ctrl) && ctrl->ioccsz < 4) { 3274 dev_err(ctrl->device, 3275 "I/O queue command capsule supported size %d < 4\n", 3276 ctrl->ioccsz); 3277 return -EINVAL; 3278 } 3279 3280 if (!nvme_discovery_ctrl(ctrl) && ctrl->iorcsz < 1) { 3281 dev_err(ctrl->device, 3282 "I/O queue response capsule supported size %d < 1\n", 3283 ctrl->iorcsz); 3284 return -EINVAL; 3285 } 3286 3287 if (!ctrl->maxcmd) { 3288 dev_warn(ctrl->device, 3289 "Firmware bug: maximum outstanding commands is 0\n"); 3290 ctrl->maxcmd = ctrl->sqsize + 1; 3291 } 3292 3293 return 0; 3294 } 3295 3296 static int nvme_init_identify(struct nvme_ctrl *ctrl) 3297 { 3298 struct queue_limits lim; 3299 struct nvme_id_ctrl *id; 3300 u32 max_hw_sectors; 3301 bool prev_apst_enabled; 3302 int ret; 3303 3304 ret = nvme_identify_ctrl(ctrl, &id); 3305 if (ret) { 3306 dev_err(ctrl->device, "Identify Controller failed (%d)\n", ret); 3307 return -EIO; 3308 } 3309 3310 if (!(ctrl->ops->flags & NVME_F_FABRICS)) 3311 ctrl->cntlid = le16_to_cpu(id->cntlid); 3312 3313 if (!ctrl->identified) { 3314 unsigned int i; 3315 3316 /* 3317 * Check for quirks. Quirk can depend on firmware version, 3318 * so, in principle, the set of quirks present can change 3319 * across a reset. As a possible future enhancement, we 3320 * could re-scan for quirks every time we reinitialize 3321 * the device, but we'd have to make sure that the driver 3322 * behaves intelligently if the quirks change. 3323 */ 3324 for (i = 0; i < ARRAY_SIZE(core_quirks); i++) { 3325 if (quirk_matches(id, &core_quirks[i])) 3326 ctrl->quirks |= core_quirks[i].quirks; 3327 } 3328 3329 ret = nvme_init_subsystem(ctrl, id); 3330 if (ret) 3331 goto out_free; 3332 3333 ret = nvme_init_effects(ctrl, id); 3334 if (ret) 3335 goto out_free; 3336 } 3337 memcpy(ctrl->subsys->firmware_rev, id->fr, 3338 sizeof(ctrl->subsys->firmware_rev)); 3339 3340 if (force_apst && (ctrl->quirks & NVME_QUIRK_NO_DEEPEST_PS)) { 3341 dev_warn(ctrl->device, "forcibly allowing all power states due to nvme_core.force_apst -- use at your own risk\n"); 3342 ctrl->quirks &= ~NVME_QUIRK_NO_DEEPEST_PS; 3343 } 3344 3345 ctrl->crdt[0] = le16_to_cpu(id->crdt1); 3346 ctrl->crdt[1] = le16_to_cpu(id->crdt2); 3347 ctrl->crdt[2] = le16_to_cpu(id->crdt3); 3348 3349 ctrl->oacs = le16_to_cpu(id->oacs); 3350 ctrl->oncs = le16_to_cpu(id->oncs); 3351 ctrl->mtfa = le16_to_cpu(id->mtfa); 3352 ctrl->oaes = le32_to_cpu(id->oaes); 3353 ctrl->wctemp = le16_to_cpu(id->wctemp); 3354 ctrl->cctemp = le16_to_cpu(id->cctemp); 3355 3356 atomic_set(&ctrl->abort_limit, id->acl + 1); 3357 ctrl->vwc = id->vwc; 3358 if (id->mdts) 3359 max_hw_sectors = nvme_mps_to_sectors(ctrl, id->mdts); 3360 else 3361 max_hw_sectors = UINT_MAX; 3362 ctrl->max_hw_sectors = 3363 min_not_zero(ctrl->max_hw_sectors, max_hw_sectors); 3364 3365 lim = queue_limits_start_update(ctrl->admin_q); 3366 nvme_set_ctrl_limits(ctrl, &lim); 3367 ret = queue_limits_commit_update(ctrl->admin_q, &lim); 3368 if (ret) 3369 goto out_free; 3370 3371 ctrl->sgls = le32_to_cpu(id->sgls); 3372 ctrl->kas = le16_to_cpu(id->kas); 3373 ctrl->max_namespaces = le32_to_cpu(id->mnan); 3374 ctrl->ctratt = le32_to_cpu(id->ctratt); 3375 3376 ctrl->cntrltype = id->cntrltype; 3377 ctrl->dctype = id->dctype; 3378 3379 if (id->rtd3e) { 3380 /* us -> s */ 3381 u32 transition_time = le32_to_cpu(id->rtd3e) / USEC_PER_SEC; 3382 3383 ctrl->shutdown_timeout = clamp_t(unsigned int, transition_time, 3384 shutdown_timeout, 60); 3385 3386 if (ctrl->shutdown_timeout != shutdown_timeout) 3387 dev_info(ctrl->device, 3388 "D3 entry latency set to %u seconds\n", 3389 ctrl->shutdown_timeout); 3390 } else 3391 ctrl->shutdown_timeout = shutdown_timeout; 3392 3393 ctrl->npss = id->npss; 3394 ctrl->apsta = id->apsta; 3395 prev_apst_enabled = ctrl->apst_enabled; 3396 if (ctrl->quirks & NVME_QUIRK_NO_APST) { 3397 if (force_apst && id->apsta) { 3398 dev_warn(ctrl->device, "forcibly allowing APST due to nvme_core.force_apst -- use at your own risk\n"); 3399 ctrl->apst_enabled = true; 3400 } else { 3401 ctrl->apst_enabled = false; 3402 } 3403 } else { 3404 ctrl->apst_enabled = id->apsta; 3405 } 3406 memcpy(ctrl->psd, id->psd, sizeof(ctrl->psd)); 3407 3408 if (ctrl->ops->flags & NVME_F_FABRICS) { 3409 ctrl->icdoff = le16_to_cpu(id->icdoff); 3410 ctrl->ioccsz = le32_to_cpu(id->ioccsz); 3411 ctrl->iorcsz = le32_to_cpu(id->iorcsz); 3412 ctrl->maxcmd = le16_to_cpu(id->maxcmd); 3413 3414 ret = nvme_check_ctrl_fabric_info(ctrl, id); 3415 if (ret) 3416 goto out_free; 3417 } else { 3418 ctrl->hmpre = le32_to_cpu(id->hmpre); 3419 ctrl->hmmin = le32_to_cpu(id->hmmin); 3420 ctrl->hmminds = le32_to_cpu(id->hmminds); 3421 ctrl->hmmaxd = le16_to_cpu(id->hmmaxd); 3422 } 3423 3424 ret = nvme_mpath_init_identify(ctrl, id); 3425 if (ret < 0) 3426 goto out_free; 3427 3428 if (ctrl->apst_enabled && !prev_apst_enabled) 3429 dev_pm_qos_expose_latency_tolerance(ctrl->device); 3430 else if (!ctrl->apst_enabled && prev_apst_enabled) 3431 dev_pm_qos_hide_latency_tolerance(ctrl->device); 3432 3433 out_free: 3434 kfree(id); 3435 return ret; 3436 } 3437 3438 /* 3439 * Initialize the cached copies of the Identify data and various controller 3440 * register in our nvme_ctrl structure. This should be called as soon as 3441 * the admin queue is fully up and running. 3442 */ 3443 int nvme_init_ctrl_finish(struct nvme_ctrl *ctrl, bool was_suspended) 3444 { 3445 int ret; 3446 3447 ret = ctrl->ops->reg_read32(ctrl, NVME_REG_VS, &ctrl->vs); 3448 if (ret) { 3449 dev_err(ctrl->device, "Reading VS failed (%d)\n", ret); 3450 return ret; 3451 } 3452 3453 ctrl->sqsize = min_t(u16, NVME_CAP_MQES(ctrl->cap), ctrl->sqsize); 3454 3455 if (ctrl->vs >= NVME_VS(1, 1, 0)) 3456 ctrl->subsystem = NVME_CAP_NSSRC(ctrl->cap); 3457 3458 ret = nvme_init_identify(ctrl); 3459 if (ret) 3460 return ret; 3461 3462 ret = nvme_configure_apst(ctrl); 3463 if (ret < 0) 3464 return ret; 3465 3466 ret = nvme_configure_timestamp(ctrl); 3467 if (ret < 0) 3468 return ret; 3469 3470 ret = nvme_configure_host_options(ctrl); 3471 if (ret < 0) 3472 return ret; 3473 3474 nvme_configure_opal(ctrl, was_suspended); 3475 3476 if (!ctrl->identified && !nvme_discovery_ctrl(ctrl)) { 3477 /* 3478 * Do not return errors unless we are in a controller reset, 3479 * the controller works perfectly fine without hwmon. 3480 */ 3481 ret = nvme_hwmon_init(ctrl); 3482 if (ret == -EINTR) 3483 return ret; 3484 } 3485 3486 clear_bit(NVME_CTRL_DIRTY_CAPABILITY, &ctrl->flags); 3487 ctrl->identified = true; 3488 3489 nvme_start_keep_alive(ctrl); 3490 3491 return 0; 3492 } 3493 EXPORT_SYMBOL_GPL(nvme_init_ctrl_finish); 3494 3495 static int nvme_dev_open(struct inode *inode, struct file *file) 3496 { 3497 struct nvme_ctrl *ctrl = 3498 container_of(inode->i_cdev, struct nvme_ctrl, cdev); 3499 3500 switch (nvme_ctrl_state(ctrl)) { 3501 case NVME_CTRL_LIVE: 3502 break; 3503 default: 3504 return -EWOULDBLOCK; 3505 } 3506 3507 nvme_get_ctrl(ctrl); 3508 if (!try_module_get(ctrl->ops->module)) { 3509 nvme_put_ctrl(ctrl); 3510 return -EINVAL; 3511 } 3512 3513 file->private_data = ctrl; 3514 return 0; 3515 } 3516 3517 static int nvme_dev_release(struct inode *inode, struct file *file) 3518 { 3519 struct nvme_ctrl *ctrl = 3520 container_of(inode->i_cdev, struct nvme_ctrl, cdev); 3521 3522 module_put(ctrl->ops->module); 3523 nvme_put_ctrl(ctrl); 3524 return 0; 3525 } 3526 3527 static const struct file_operations nvme_dev_fops = { 3528 .owner = THIS_MODULE, 3529 .open = nvme_dev_open, 3530 .release = nvme_dev_release, 3531 .unlocked_ioctl = nvme_dev_ioctl, 3532 .compat_ioctl = compat_ptr_ioctl, 3533 .uring_cmd = nvme_dev_uring_cmd, 3534 }; 3535 3536 static struct nvme_ns_head *nvme_find_ns_head(struct nvme_ctrl *ctrl, 3537 unsigned nsid) 3538 { 3539 struct nvme_ns_head *h; 3540 3541 lockdep_assert_held(&ctrl->subsys->lock); 3542 3543 list_for_each_entry(h, &ctrl->subsys->nsheads, entry) { 3544 /* 3545 * Private namespaces can share NSIDs under some conditions. 3546 * In that case we can't use the same ns_head for namespaces 3547 * with the same NSID. 3548 */ 3549 if (h->ns_id != nsid || !nvme_is_unique_nsid(ctrl, h)) 3550 continue; 3551 if (!list_empty(&h->list) && nvme_tryget_ns_head(h)) 3552 return h; 3553 } 3554 3555 return NULL; 3556 } 3557 3558 static int nvme_subsys_check_duplicate_ids(struct nvme_subsystem *subsys, 3559 struct nvme_ns_ids *ids) 3560 { 3561 bool has_uuid = !uuid_is_null(&ids->uuid); 3562 bool has_nguid = memchr_inv(ids->nguid, 0, sizeof(ids->nguid)); 3563 bool has_eui64 = memchr_inv(ids->eui64, 0, sizeof(ids->eui64)); 3564 struct nvme_ns_head *h; 3565 3566 lockdep_assert_held(&subsys->lock); 3567 3568 list_for_each_entry(h, &subsys->nsheads, entry) { 3569 if (has_uuid && uuid_equal(&ids->uuid, &h->ids.uuid)) 3570 return -EINVAL; 3571 if (has_nguid && 3572 memcmp(&ids->nguid, &h->ids.nguid, sizeof(ids->nguid)) == 0) 3573 return -EINVAL; 3574 if (has_eui64 && 3575 memcmp(&ids->eui64, &h->ids.eui64, sizeof(ids->eui64)) == 0) 3576 return -EINVAL; 3577 } 3578 3579 return 0; 3580 } 3581 3582 static void nvme_cdev_rel(struct device *dev) 3583 { 3584 ida_free(&nvme_ns_chr_minor_ida, MINOR(dev->devt)); 3585 } 3586 3587 void nvme_cdev_del(struct cdev *cdev, struct device *cdev_device) 3588 { 3589 cdev_device_del(cdev, cdev_device); 3590 put_device(cdev_device); 3591 } 3592 3593 int nvme_cdev_add(struct cdev *cdev, struct device *cdev_device, 3594 const struct file_operations *fops, struct module *owner) 3595 { 3596 int minor, ret; 3597 3598 minor = ida_alloc(&nvme_ns_chr_minor_ida, GFP_KERNEL); 3599 if (minor < 0) 3600 return minor; 3601 cdev_device->devt = MKDEV(MAJOR(nvme_ns_chr_devt), minor); 3602 cdev_device->class = &nvme_ns_chr_class; 3603 cdev_device->release = nvme_cdev_rel; 3604 device_initialize(cdev_device); 3605 cdev_init(cdev, fops); 3606 cdev->owner = owner; 3607 ret = cdev_device_add(cdev, cdev_device); 3608 if (ret) 3609 put_device(cdev_device); 3610 3611 return ret; 3612 } 3613 3614 static int nvme_ns_chr_open(struct inode *inode, struct file *file) 3615 { 3616 return nvme_ns_open(container_of(inode->i_cdev, struct nvme_ns, cdev)); 3617 } 3618 3619 static int nvme_ns_chr_release(struct inode *inode, struct file *file) 3620 { 3621 nvme_ns_release(container_of(inode->i_cdev, struct nvme_ns, cdev)); 3622 return 0; 3623 } 3624 3625 static const struct file_operations nvme_ns_chr_fops = { 3626 .owner = THIS_MODULE, 3627 .open = nvme_ns_chr_open, 3628 .release = nvme_ns_chr_release, 3629 .unlocked_ioctl = nvme_ns_chr_ioctl, 3630 .compat_ioctl = compat_ptr_ioctl, 3631 .uring_cmd = nvme_ns_chr_uring_cmd, 3632 .uring_cmd_iopoll = nvme_ns_chr_uring_cmd_iopoll, 3633 }; 3634 3635 static int nvme_add_ns_cdev(struct nvme_ns *ns) 3636 { 3637 int ret; 3638 3639 ns->cdev_device.parent = ns->ctrl->device; 3640 ret = dev_set_name(&ns->cdev_device, "ng%dn%d", 3641 ns->ctrl->instance, ns->head->instance); 3642 if (ret) 3643 return ret; 3644 3645 return nvme_cdev_add(&ns->cdev, &ns->cdev_device, &nvme_ns_chr_fops, 3646 ns->ctrl->ops->module); 3647 } 3648 3649 static struct nvme_ns_head *nvme_alloc_ns_head(struct nvme_ctrl *ctrl, 3650 struct nvme_ns_info *info) 3651 { 3652 struct nvme_ns_head *head; 3653 size_t size = sizeof(*head); 3654 int ret = -ENOMEM; 3655 3656 #ifdef CONFIG_NVME_MULTIPATH 3657 size += num_possible_nodes() * sizeof(struct nvme_ns *); 3658 #endif 3659 3660 head = kzalloc(size, GFP_KERNEL); 3661 if (!head) 3662 goto out; 3663 ret = ida_alloc_min(&ctrl->subsys->ns_ida, 1, GFP_KERNEL); 3664 if (ret < 0) 3665 goto out_free_head; 3666 head->instance = ret; 3667 INIT_LIST_HEAD(&head->list); 3668 ret = init_srcu_struct(&head->srcu); 3669 if (ret) 3670 goto out_ida_remove; 3671 head->subsys = ctrl->subsys; 3672 head->ns_id = info->nsid; 3673 head->ids = info->ids; 3674 head->shared = info->is_shared; 3675 head->rotational = info->is_rotational; 3676 ratelimit_state_init(&head->rs_nuse, 5 * HZ, 1); 3677 ratelimit_set_flags(&head->rs_nuse, RATELIMIT_MSG_ON_RELEASE); 3678 kref_init(&head->ref); 3679 3680 if (head->ids.csi) { 3681 ret = nvme_get_effects_log(ctrl, head->ids.csi, &head->effects); 3682 if (ret) 3683 goto out_cleanup_srcu; 3684 } else 3685 head->effects = ctrl->effects; 3686 3687 ret = nvme_mpath_alloc_disk(ctrl, head); 3688 if (ret) 3689 goto out_cleanup_srcu; 3690 3691 list_add_tail(&head->entry, &ctrl->subsys->nsheads); 3692 3693 kref_get(&ctrl->subsys->ref); 3694 3695 return head; 3696 out_cleanup_srcu: 3697 cleanup_srcu_struct(&head->srcu); 3698 out_ida_remove: 3699 ida_free(&ctrl->subsys->ns_ida, head->instance); 3700 out_free_head: 3701 kfree(head); 3702 out: 3703 if (ret > 0) 3704 ret = blk_status_to_errno(nvme_error_status(ret)); 3705 return ERR_PTR(ret); 3706 } 3707 3708 static int nvme_global_check_duplicate_ids(struct nvme_subsystem *this, 3709 struct nvme_ns_ids *ids) 3710 { 3711 struct nvme_subsystem *s; 3712 int ret = 0; 3713 3714 /* 3715 * Note that this check is racy as we try to avoid holding the global 3716 * lock over the whole ns_head creation. But it is only intended as 3717 * a sanity check anyway. 3718 */ 3719 mutex_lock(&nvme_subsystems_lock); 3720 list_for_each_entry(s, &nvme_subsystems, entry) { 3721 if (s == this) 3722 continue; 3723 mutex_lock(&s->lock); 3724 ret = nvme_subsys_check_duplicate_ids(s, ids); 3725 mutex_unlock(&s->lock); 3726 if (ret) 3727 break; 3728 } 3729 mutex_unlock(&nvme_subsystems_lock); 3730 3731 return ret; 3732 } 3733 3734 static int nvme_init_ns_head(struct nvme_ns *ns, struct nvme_ns_info *info) 3735 { 3736 struct nvme_ctrl *ctrl = ns->ctrl; 3737 struct nvme_ns_head *head = NULL; 3738 int ret; 3739 3740 ret = nvme_global_check_duplicate_ids(ctrl->subsys, &info->ids); 3741 if (ret) { 3742 /* 3743 * We've found two different namespaces on two different 3744 * subsystems that report the same ID. This is pretty nasty 3745 * for anything that actually requires unique device 3746 * identification. In the kernel we need this for multipathing, 3747 * and in user space the /dev/disk/by-id/ links rely on it. 3748 * 3749 * If the device also claims to be multi-path capable back off 3750 * here now and refuse the probe the second device as this is a 3751 * recipe for data corruption. If not this is probably a 3752 * cheap consumer device if on the PCIe bus, so let the user 3753 * proceed and use the shiny toy, but warn that with changing 3754 * probing order (which due to our async probing could just be 3755 * device taking longer to startup) the other device could show 3756 * up at any time. 3757 */ 3758 nvme_print_device_info(ctrl); 3759 if ((ns->ctrl->ops->flags & NVME_F_FABRICS) || /* !PCIe */ 3760 ((ns->ctrl->subsys->cmic & NVME_CTRL_CMIC_MULTI_CTRL) && 3761 info->is_shared)) { 3762 dev_err(ctrl->device, 3763 "ignoring nsid %d because of duplicate IDs\n", 3764 info->nsid); 3765 return ret; 3766 } 3767 3768 dev_err(ctrl->device, 3769 "clearing duplicate IDs for nsid %d\n", info->nsid); 3770 dev_err(ctrl->device, 3771 "use of /dev/disk/by-id/ may cause data corruption\n"); 3772 memset(&info->ids.nguid, 0, sizeof(info->ids.nguid)); 3773 memset(&info->ids.uuid, 0, sizeof(info->ids.uuid)); 3774 memset(&info->ids.eui64, 0, sizeof(info->ids.eui64)); 3775 ctrl->quirks |= NVME_QUIRK_BOGUS_NID; 3776 } 3777 3778 mutex_lock(&ctrl->subsys->lock); 3779 head = nvme_find_ns_head(ctrl, info->nsid); 3780 if (!head) { 3781 ret = nvme_subsys_check_duplicate_ids(ctrl->subsys, &info->ids); 3782 if (ret) { 3783 dev_err(ctrl->device, 3784 "duplicate IDs in subsystem for nsid %d\n", 3785 info->nsid); 3786 goto out_unlock; 3787 } 3788 head = nvme_alloc_ns_head(ctrl, info); 3789 if (IS_ERR(head)) { 3790 ret = PTR_ERR(head); 3791 goto out_unlock; 3792 } 3793 } else { 3794 ret = -EINVAL; 3795 if (!info->is_shared || !head->shared) { 3796 dev_err(ctrl->device, 3797 "Duplicate unshared namespace %d\n", 3798 info->nsid); 3799 goto out_put_ns_head; 3800 } 3801 if (!nvme_ns_ids_equal(&head->ids, &info->ids)) { 3802 dev_err(ctrl->device, 3803 "IDs don't match for shared namespace %d\n", 3804 info->nsid); 3805 goto out_put_ns_head; 3806 } 3807 3808 if (!multipath) { 3809 dev_warn(ctrl->device, 3810 "Found shared namespace %d, but multipathing not supported.\n", 3811 info->nsid); 3812 dev_warn_once(ctrl->device, 3813 "Support for shared namespaces without CONFIG_NVME_MULTIPATH is deprecated and will be removed in Linux 6.0.\n"); 3814 } 3815 } 3816 3817 list_add_tail_rcu(&ns->siblings, &head->list); 3818 ns->head = head; 3819 mutex_unlock(&ctrl->subsys->lock); 3820 return 0; 3821 3822 out_put_ns_head: 3823 nvme_put_ns_head(head); 3824 out_unlock: 3825 mutex_unlock(&ctrl->subsys->lock); 3826 return ret; 3827 } 3828 3829 struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid) 3830 { 3831 struct nvme_ns *ns, *ret = NULL; 3832 int srcu_idx; 3833 3834 srcu_idx = srcu_read_lock(&ctrl->srcu); 3835 list_for_each_entry_srcu(ns, &ctrl->namespaces, list, 3836 srcu_read_lock_held(&ctrl->srcu)) { 3837 if (ns->head->ns_id == nsid) { 3838 if (!nvme_get_ns(ns)) 3839 continue; 3840 ret = ns; 3841 break; 3842 } 3843 if (ns->head->ns_id > nsid) 3844 break; 3845 } 3846 srcu_read_unlock(&ctrl->srcu, srcu_idx); 3847 return ret; 3848 } 3849 EXPORT_SYMBOL_NS_GPL(nvme_find_get_ns, "NVME_TARGET_PASSTHRU"); 3850 3851 /* 3852 * Add the namespace to the controller list while keeping the list ordered. 3853 */ 3854 static void nvme_ns_add_to_ctrl_list(struct nvme_ns *ns) 3855 { 3856 struct nvme_ns *tmp; 3857 3858 list_for_each_entry_reverse(tmp, &ns->ctrl->namespaces, list) { 3859 if (tmp->head->ns_id < ns->head->ns_id) { 3860 list_add_rcu(&ns->list, &tmp->list); 3861 return; 3862 } 3863 } 3864 list_add(&ns->list, &ns->ctrl->namespaces); 3865 } 3866 3867 static void nvme_alloc_ns(struct nvme_ctrl *ctrl, struct nvme_ns_info *info) 3868 { 3869 struct queue_limits lim = { }; 3870 struct nvme_ns *ns; 3871 struct gendisk *disk; 3872 int node = ctrl->numa_node; 3873 3874 ns = kzalloc_node(sizeof(*ns), GFP_KERNEL, node); 3875 if (!ns) 3876 return; 3877 3878 if (ctrl->opts && ctrl->opts->data_digest) 3879 lim.features |= BLK_FEAT_STABLE_WRITES; 3880 if (ctrl->ops->supports_pci_p2pdma && 3881 ctrl->ops->supports_pci_p2pdma(ctrl)) 3882 lim.features |= BLK_FEAT_PCI_P2PDMA; 3883 3884 disk = blk_mq_alloc_disk(ctrl->tagset, &lim, ns); 3885 if (IS_ERR(disk)) 3886 goto out_free_ns; 3887 disk->fops = &nvme_bdev_ops; 3888 disk->private_data = ns; 3889 3890 ns->disk = disk; 3891 ns->queue = disk->queue; 3892 ns->ctrl = ctrl; 3893 kref_init(&ns->kref); 3894 3895 if (nvme_init_ns_head(ns, info)) 3896 goto out_cleanup_disk; 3897 3898 /* 3899 * If multipathing is enabled, the device name for all disks and not 3900 * just those that represent shared namespaces needs to be based on the 3901 * subsystem instance. Using the controller instance for private 3902 * namespaces could lead to naming collisions between shared and private 3903 * namespaces if they don't use a common numbering scheme. 3904 * 3905 * If multipathing is not enabled, disk names must use the controller 3906 * instance as shared namespaces will show up as multiple block 3907 * devices. 3908 */ 3909 if (nvme_ns_head_multipath(ns->head)) { 3910 sprintf(disk->disk_name, "nvme%dc%dn%d", ctrl->subsys->instance, 3911 ctrl->instance, ns->head->instance); 3912 disk->flags |= GENHD_FL_HIDDEN; 3913 } else if (multipath) { 3914 sprintf(disk->disk_name, "nvme%dn%d", ctrl->subsys->instance, 3915 ns->head->instance); 3916 } else { 3917 sprintf(disk->disk_name, "nvme%dn%d", ctrl->instance, 3918 ns->head->instance); 3919 } 3920 3921 if (nvme_update_ns_info(ns, info)) 3922 goto out_unlink_ns; 3923 3924 mutex_lock(&ctrl->namespaces_lock); 3925 /* 3926 * Ensure that no namespaces are added to the ctrl list after the queues 3927 * are frozen, thereby avoiding a deadlock between scan and reset. 3928 */ 3929 if (test_bit(NVME_CTRL_FROZEN, &ctrl->flags)) { 3930 mutex_unlock(&ctrl->namespaces_lock); 3931 goto out_unlink_ns; 3932 } 3933 nvme_ns_add_to_ctrl_list(ns); 3934 mutex_unlock(&ctrl->namespaces_lock); 3935 synchronize_srcu(&ctrl->srcu); 3936 nvme_get_ctrl(ctrl); 3937 3938 if (device_add_disk(ctrl->device, ns->disk, nvme_ns_attr_groups)) 3939 goto out_cleanup_ns_from_list; 3940 3941 if (!nvme_ns_head_multipath(ns->head)) 3942 nvme_add_ns_cdev(ns); 3943 3944 nvme_mpath_add_disk(ns, info->anagrpid); 3945 nvme_fault_inject_init(&ns->fault_inject, ns->disk->disk_name); 3946 3947 /* 3948 * Set ns->disk->device->driver_data to ns so we can access 3949 * ns->head->passthru_err_log_enabled in 3950 * nvme_io_passthru_err_log_enabled_[store | show](). 3951 */ 3952 dev_set_drvdata(disk_to_dev(ns->disk), ns); 3953 3954 return; 3955 3956 out_cleanup_ns_from_list: 3957 nvme_put_ctrl(ctrl); 3958 mutex_lock(&ctrl->namespaces_lock); 3959 list_del_rcu(&ns->list); 3960 mutex_unlock(&ctrl->namespaces_lock); 3961 synchronize_srcu(&ctrl->srcu); 3962 out_unlink_ns: 3963 mutex_lock(&ctrl->subsys->lock); 3964 list_del_rcu(&ns->siblings); 3965 if (list_empty(&ns->head->list)) 3966 list_del_init(&ns->head->entry); 3967 mutex_unlock(&ctrl->subsys->lock); 3968 nvme_put_ns_head(ns->head); 3969 out_cleanup_disk: 3970 put_disk(disk); 3971 out_free_ns: 3972 kfree(ns); 3973 } 3974 3975 static void nvme_ns_remove(struct nvme_ns *ns) 3976 { 3977 bool last_path = false; 3978 3979 if (test_and_set_bit(NVME_NS_REMOVING, &ns->flags)) 3980 return; 3981 3982 clear_bit(NVME_NS_READY, &ns->flags); 3983 set_capacity(ns->disk, 0); 3984 nvme_fault_inject_fini(&ns->fault_inject); 3985 3986 /* 3987 * Ensure that !NVME_NS_READY is seen by other threads to prevent 3988 * this ns going back into current_path. 3989 */ 3990 synchronize_srcu(&ns->head->srcu); 3991 3992 /* wait for concurrent submissions */ 3993 if (nvme_mpath_clear_current_path(ns)) 3994 synchronize_srcu(&ns->head->srcu); 3995 3996 mutex_lock(&ns->ctrl->subsys->lock); 3997 list_del_rcu(&ns->siblings); 3998 if (list_empty(&ns->head->list)) { 3999 list_del_init(&ns->head->entry); 4000 last_path = true; 4001 } 4002 mutex_unlock(&ns->ctrl->subsys->lock); 4003 4004 /* guarantee not available in head->list */ 4005 synchronize_srcu(&ns->head->srcu); 4006 4007 if (!nvme_ns_head_multipath(ns->head)) 4008 nvme_cdev_del(&ns->cdev, &ns->cdev_device); 4009 del_gendisk(ns->disk); 4010 4011 mutex_lock(&ns->ctrl->namespaces_lock); 4012 list_del_rcu(&ns->list); 4013 mutex_unlock(&ns->ctrl->namespaces_lock); 4014 synchronize_srcu(&ns->ctrl->srcu); 4015 4016 if (last_path) 4017 nvme_mpath_shutdown_disk(ns->head); 4018 nvme_put_ns(ns); 4019 } 4020 4021 static void nvme_ns_remove_by_nsid(struct nvme_ctrl *ctrl, u32 nsid) 4022 { 4023 struct nvme_ns *ns = nvme_find_get_ns(ctrl, nsid); 4024 4025 if (ns) { 4026 nvme_ns_remove(ns); 4027 nvme_put_ns(ns); 4028 } 4029 } 4030 4031 static void nvme_validate_ns(struct nvme_ns *ns, struct nvme_ns_info *info) 4032 { 4033 int ret = NVME_SC_INVALID_NS | NVME_STATUS_DNR; 4034 4035 if (!nvme_ns_ids_equal(&ns->head->ids, &info->ids)) { 4036 dev_err(ns->ctrl->device, 4037 "identifiers changed for nsid %d\n", ns->head->ns_id); 4038 goto out; 4039 } 4040 4041 ret = nvme_update_ns_info(ns, info); 4042 out: 4043 /* 4044 * Only remove the namespace if we got a fatal error back from the 4045 * device, otherwise ignore the error and just move on. 4046 * 4047 * TODO: we should probably schedule a delayed retry here. 4048 */ 4049 if (ret > 0 && (ret & NVME_STATUS_DNR)) 4050 nvme_ns_remove(ns); 4051 } 4052 4053 static void nvme_scan_ns(struct nvme_ctrl *ctrl, unsigned nsid) 4054 { 4055 struct nvme_ns_info info = { .nsid = nsid }; 4056 struct nvme_ns *ns; 4057 int ret = 1; 4058 4059 if (nvme_identify_ns_descs(ctrl, &info)) 4060 return; 4061 4062 if (info.ids.csi != NVME_CSI_NVM && !nvme_multi_css(ctrl)) { 4063 dev_warn(ctrl->device, 4064 "command set not reported for nsid: %d\n", nsid); 4065 return; 4066 } 4067 4068 /* 4069 * If available try to use the Command Set Idependent Identify Namespace 4070 * data structure to find all the generic information that is needed to 4071 * set up a namespace. If not fall back to the legacy version. 4072 */ 4073 if ((ctrl->cap & NVME_CAP_CRMS_CRIMS) || 4074 (info.ids.csi != NVME_CSI_NVM && info.ids.csi != NVME_CSI_ZNS) || 4075 ctrl->vs >= NVME_VS(2, 0, 0)) 4076 ret = nvme_ns_info_from_id_cs_indep(ctrl, &info); 4077 if (ret > 0) 4078 ret = nvme_ns_info_from_identify(ctrl, &info); 4079 4080 if (info.is_removed) 4081 nvme_ns_remove_by_nsid(ctrl, nsid); 4082 4083 /* 4084 * Ignore the namespace if it is not ready. We will get an AEN once it 4085 * becomes ready and restart the scan. 4086 */ 4087 if (ret || !info.is_ready) 4088 return; 4089 4090 ns = nvme_find_get_ns(ctrl, nsid); 4091 if (ns) { 4092 nvme_validate_ns(ns, &info); 4093 nvme_put_ns(ns); 4094 } else { 4095 nvme_alloc_ns(ctrl, &info); 4096 } 4097 } 4098 4099 /** 4100 * struct async_scan_info - keeps track of controller & NSIDs to scan 4101 * @ctrl: Controller on which namespaces are being scanned 4102 * @next_nsid: Index of next NSID to scan in ns_list 4103 * @ns_list: Pointer to list of NSIDs to scan 4104 * 4105 * Note: There is a single async_scan_info structure shared by all instances 4106 * of nvme_scan_ns_async() scanning a given controller, so the atomic 4107 * operations on next_nsid are critical to ensure each instance scans a unique 4108 * NSID. 4109 */ 4110 struct async_scan_info { 4111 struct nvme_ctrl *ctrl; 4112 atomic_t next_nsid; 4113 __le32 *ns_list; 4114 }; 4115 4116 static void nvme_scan_ns_async(void *data, async_cookie_t cookie) 4117 { 4118 struct async_scan_info *scan_info = data; 4119 int idx; 4120 u32 nsid; 4121 4122 idx = (u32)atomic_fetch_inc(&scan_info->next_nsid); 4123 nsid = le32_to_cpu(scan_info->ns_list[idx]); 4124 4125 nvme_scan_ns(scan_info->ctrl, nsid); 4126 } 4127 4128 static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl, 4129 unsigned nsid) 4130 { 4131 struct nvme_ns *ns, *next; 4132 LIST_HEAD(rm_list); 4133 4134 mutex_lock(&ctrl->namespaces_lock); 4135 list_for_each_entry_safe(ns, next, &ctrl->namespaces, list) { 4136 if (ns->head->ns_id > nsid) { 4137 list_del_rcu(&ns->list); 4138 synchronize_srcu(&ctrl->srcu); 4139 list_add_tail_rcu(&ns->list, &rm_list); 4140 } 4141 } 4142 mutex_unlock(&ctrl->namespaces_lock); 4143 4144 list_for_each_entry_safe(ns, next, &rm_list, list) 4145 nvme_ns_remove(ns); 4146 } 4147 4148 static int nvme_scan_ns_list(struct nvme_ctrl *ctrl) 4149 { 4150 const int nr_entries = NVME_IDENTIFY_DATA_SIZE / sizeof(__le32); 4151 __le32 *ns_list; 4152 u32 prev = 0; 4153 int ret = 0, i; 4154 ASYNC_DOMAIN(domain); 4155 struct async_scan_info scan_info; 4156 4157 ns_list = kzalloc(NVME_IDENTIFY_DATA_SIZE, GFP_KERNEL); 4158 if (!ns_list) 4159 return -ENOMEM; 4160 4161 scan_info.ctrl = ctrl; 4162 scan_info.ns_list = ns_list; 4163 for (;;) { 4164 struct nvme_command cmd = { 4165 .identify.opcode = nvme_admin_identify, 4166 .identify.cns = NVME_ID_CNS_NS_ACTIVE_LIST, 4167 .identify.nsid = cpu_to_le32(prev), 4168 }; 4169 4170 ret = nvme_submit_sync_cmd(ctrl->admin_q, &cmd, ns_list, 4171 NVME_IDENTIFY_DATA_SIZE); 4172 if (ret) { 4173 dev_warn(ctrl->device, 4174 "Identify NS List failed (status=0x%x)\n", ret); 4175 goto free; 4176 } 4177 4178 atomic_set(&scan_info.next_nsid, 0); 4179 for (i = 0; i < nr_entries; i++) { 4180 u32 nsid = le32_to_cpu(ns_list[i]); 4181 4182 if (!nsid) /* end of the list? */ 4183 goto out; 4184 async_schedule_domain(nvme_scan_ns_async, &scan_info, 4185 &domain); 4186 while (++prev < nsid) 4187 nvme_ns_remove_by_nsid(ctrl, prev); 4188 } 4189 async_synchronize_full_domain(&domain); 4190 } 4191 out: 4192 nvme_remove_invalid_namespaces(ctrl, prev); 4193 free: 4194 async_synchronize_full_domain(&domain); 4195 kfree(ns_list); 4196 return ret; 4197 } 4198 4199 static void nvme_scan_ns_sequential(struct nvme_ctrl *ctrl) 4200 { 4201 struct nvme_id_ctrl *id; 4202 u32 nn, i; 4203 4204 if (nvme_identify_ctrl(ctrl, &id)) 4205 return; 4206 nn = le32_to_cpu(id->nn); 4207 kfree(id); 4208 4209 for (i = 1; i <= nn; i++) 4210 nvme_scan_ns(ctrl, i); 4211 4212 nvme_remove_invalid_namespaces(ctrl, nn); 4213 } 4214 4215 static void nvme_clear_changed_ns_log(struct nvme_ctrl *ctrl) 4216 { 4217 size_t log_size = NVME_MAX_CHANGED_NAMESPACES * sizeof(__le32); 4218 __le32 *log; 4219 int error; 4220 4221 log = kzalloc(log_size, GFP_KERNEL); 4222 if (!log) 4223 return; 4224 4225 /* 4226 * We need to read the log to clear the AEN, but we don't want to rely 4227 * on it for the changed namespace information as userspace could have 4228 * raced with us in reading the log page, which could cause us to miss 4229 * updates. 4230 */ 4231 error = nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_CHANGED_NS, 0, 4232 NVME_CSI_NVM, log, log_size, 0); 4233 if (error) 4234 dev_warn(ctrl->device, 4235 "reading changed ns log failed: %d\n", error); 4236 4237 kfree(log); 4238 } 4239 4240 static void nvme_scan_work(struct work_struct *work) 4241 { 4242 struct nvme_ctrl *ctrl = 4243 container_of(work, struct nvme_ctrl, scan_work); 4244 int ret; 4245 4246 /* No tagset on a live ctrl means IO queues could not created */ 4247 if (nvme_ctrl_state(ctrl) != NVME_CTRL_LIVE || !ctrl->tagset) 4248 return; 4249 4250 /* 4251 * Identify controller limits can change at controller reset due to 4252 * new firmware download, even though it is not common we cannot ignore 4253 * such scenario. Controller's non-mdts limits are reported in the unit 4254 * of logical blocks that is dependent on the format of attached 4255 * namespace. Hence re-read the limits at the time of ns allocation. 4256 */ 4257 ret = nvme_init_non_mdts_limits(ctrl); 4258 if (ret < 0) { 4259 dev_warn(ctrl->device, 4260 "reading non-mdts-limits failed: %d\n", ret); 4261 return; 4262 } 4263 4264 if (test_and_clear_bit(NVME_AER_NOTICE_NS_CHANGED, &ctrl->events)) { 4265 dev_info(ctrl->device, "rescanning namespaces.\n"); 4266 nvme_clear_changed_ns_log(ctrl); 4267 } 4268 4269 mutex_lock(&ctrl->scan_lock); 4270 if (!nvme_id_cns_ok(ctrl, NVME_ID_CNS_NS_ACTIVE_LIST)) { 4271 nvme_scan_ns_sequential(ctrl); 4272 } else { 4273 /* 4274 * Fall back to sequential scan if DNR is set to handle broken 4275 * devices which should support Identify NS List (as per the VS 4276 * they report) but don't actually support it. 4277 */ 4278 ret = nvme_scan_ns_list(ctrl); 4279 if (ret > 0 && ret & NVME_STATUS_DNR) 4280 nvme_scan_ns_sequential(ctrl); 4281 } 4282 mutex_unlock(&ctrl->scan_lock); 4283 } 4284 4285 /* 4286 * This function iterates the namespace list unlocked to allow recovery from 4287 * controller failure. It is up to the caller to ensure the namespace list is 4288 * not modified by scan work while this function is executing. 4289 */ 4290 void nvme_remove_namespaces(struct nvme_ctrl *ctrl) 4291 { 4292 struct nvme_ns *ns, *next; 4293 LIST_HEAD(ns_list); 4294 4295 /* 4296 * make sure to requeue I/O to all namespaces as these 4297 * might result from the scan itself and must complete 4298 * for the scan_work to make progress 4299 */ 4300 nvme_mpath_clear_ctrl_paths(ctrl); 4301 4302 /* 4303 * Unquiesce io queues so any pending IO won't hang, especially 4304 * those submitted from scan work 4305 */ 4306 nvme_unquiesce_io_queues(ctrl); 4307 4308 /* prevent racing with ns scanning */ 4309 flush_work(&ctrl->scan_work); 4310 4311 /* 4312 * The dead states indicates the controller was not gracefully 4313 * disconnected. In that case, we won't be able to flush any data while 4314 * removing the namespaces' disks; fail all the queues now to avoid 4315 * potentially having to clean up the failed sync later. 4316 */ 4317 if (nvme_ctrl_state(ctrl) == NVME_CTRL_DEAD) 4318 nvme_mark_namespaces_dead(ctrl); 4319 4320 /* this is a no-op when called from the controller reset handler */ 4321 nvme_change_ctrl_state(ctrl, NVME_CTRL_DELETING_NOIO); 4322 4323 mutex_lock(&ctrl->namespaces_lock); 4324 list_splice_init_rcu(&ctrl->namespaces, &ns_list, synchronize_rcu); 4325 mutex_unlock(&ctrl->namespaces_lock); 4326 synchronize_srcu(&ctrl->srcu); 4327 4328 list_for_each_entry_safe(ns, next, &ns_list, list) 4329 nvme_ns_remove(ns); 4330 } 4331 EXPORT_SYMBOL_GPL(nvme_remove_namespaces); 4332 4333 static int nvme_class_uevent(const struct device *dev, struct kobj_uevent_env *env) 4334 { 4335 const struct nvme_ctrl *ctrl = 4336 container_of(dev, struct nvme_ctrl, ctrl_device); 4337 struct nvmf_ctrl_options *opts = ctrl->opts; 4338 int ret; 4339 4340 ret = add_uevent_var(env, "NVME_TRTYPE=%s", ctrl->ops->name); 4341 if (ret) 4342 return ret; 4343 4344 if (opts) { 4345 ret = add_uevent_var(env, "NVME_TRADDR=%s", opts->traddr); 4346 if (ret) 4347 return ret; 4348 4349 ret = add_uevent_var(env, "NVME_TRSVCID=%s", 4350 opts->trsvcid ?: "none"); 4351 if (ret) 4352 return ret; 4353 4354 ret = add_uevent_var(env, "NVME_HOST_TRADDR=%s", 4355 opts->host_traddr ?: "none"); 4356 if (ret) 4357 return ret; 4358 4359 ret = add_uevent_var(env, "NVME_HOST_IFACE=%s", 4360 opts->host_iface ?: "none"); 4361 } 4362 return ret; 4363 } 4364 4365 static void nvme_change_uevent(struct nvme_ctrl *ctrl, char *envdata) 4366 { 4367 char *envp[2] = { envdata, NULL }; 4368 4369 kobject_uevent_env(&ctrl->device->kobj, KOBJ_CHANGE, envp); 4370 } 4371 4372 static void nvme_aen_uevent(struct nvme_ctrl *ctrl) 4373 { 4374 char *envp[2] = { NULL, NULL }; 4375 u32 aen_result = ctrl->aen_result; 4376 4377 ctrl->aen_result = 0; 4378 if (!aen_result) 4379 return; 4380 4381 envp[0] = kasprintf(GFP_KERNEL, "NVME_AEN=%#08x", aen_result); 4382 if (!envp[0]) 4383 return; 4384 kobject_uevent_env(&ctrl->device->kobj, KOBJ_CHANGE, envp); 4385 kfree(envp[0]); 4386 } 4387 4388 static void nvme_async_event_work(struct work_struct *work) 4389 { 4390 struct nvme_ctrl *ctrl = 4391 container_of(work, struct nvme_ctrl, async_event_work); 4392 4393 nvme_aen_uevent(ctrl); 4394 4395 /* 4396 * The transport drivers must guarantee AER submission here is safe by 4397 * flushing ctrl async_event_work after changing the controller state 4398 * from LIVE and before freeing the admin queue. 4399 */ 4400 if (nvme_ctrl_state(ctrl) == NVME_CTRL_LIVE) 4401 ctrl->ops->submit_async_event(ctrl); 4402 } 4403 4404 static bool nvme_ctrl_pp_status(struct nvme_ctrl *ctrl) 4405 { 4406 4407 u32 csts; 4408 4409 if (ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &csts)) 4410 return false; 4411 4412 if (csts == ~0) 4413 return false; 4414 4415 return ((ctrl->ctrl_config & NVME_CC_ENABLE) && (csts & NVME_CSTS_PP)); 4416 } 4417 4418 static void nvme_get_fw_slot_info(struct nvme_ctrl *ctrl) 4419 { 4420 struct nvme_fw_slot_info_log *log; 4421 u8 next_fw_slot, cur_fw_slot; 4422 4423 log = kmalloc(sizeof(*log), GFP_KERNEL); 4424 if (!log) 4425 return; 4426 4427 if (nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_FW_SLOT, 0, NVME_CSI_NVM, 4428 log, sizeof(*log), 0)) { 4429 dev_warn(ctrl->device, "Get FW SLOT INFO log error\n"); 4430 goto out_free_log; 4431 } 4432 4433 cur_fw_slot = log->afi & 0x7; 4434 next_fw_slot = (log->afi & 0x70) >> 4; 4435 if (!cur_fw_slot || (next_fw_slot && (cur_fw_slot != next_fw_slot))) { 4436 dev_info(ctrl->device, 4437 "Firmware is activated after next Controller Level Reset\n"); 4438 goto out_free_log; 4439 } 4440 4441 memcpy(ctrl->subsys->firmware_rev, &log->frs[cur_fw_slot - 1], 4442 sizeof(ctrl->subsys->firmware_rev)); 4443 4444 out_free_log: 4445 kfree(log); 4446 } 4447 4448 static void nvme_fw_act_work(struct work_struct *work) 4449 { 4450 struct nvme_ctrl *ctrl = container_of(work, 4451 struct nvme_ctrl, fw_act_work); 4452 unsigned long fw_act_timeout; 4453 4454 nvme_auth_stop(ctrl); 4455 4456 if (ctrl->mtfa) 4457 fw_act_timeout = jiffies + 4458 msecs_to_jiffies(ctrl->mtfa * 100); 4459 else 4460 fw_act_timeout = jiffies + 4461 msecs_to_jiffies(admin_timeout * 1000); 4462 4463 nvme_quiesce_io_queues(ctrl); 4464 while (nvme_ctrl_pp_status(ctrl)) { 4465 if (time_after(jiffies, fw_act_timeout)) { 4466 dev_warn(ctrl->device, 4467 "Fw activation timeout, reset controller\n"); 4468 nvme_try_sched_reset(ctrl); 4469 return; 4470 } 4471 msleep(100); 4472 } 4473 4474 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_LIVE)) 4475 return; 4476 4477 nvme_unquiesce_io_queues(ctrl); 4478 /* read FW slot information to clear the AER */ 4479 nvme_get_fw_slot_info(ctrl); 4480 4481 queue_work(nvme_wq, &ctrl->async_event_work); 4482 } 4483 4484 static u32 nvme_aer_type(u32 result) 4485 { 4486 return result & 0x7; 4487 } 4488 4489 static u32 nvme_aer_subtype(u32 result) 4490 { 4491 return (result & 0xff00) >> 8; 4492 } 4493 4494 static bool nvme_handle_aen_notice(struct nvme_ctrl *ctrl, u32 result) 4495 { 4496 u32 aer_notice_type = nvme_aer_subtype(result); 4497 bool requeue = true; 4498 4499 switch (aer_notice_type) { 4500 case NVME_AER_NOTICE_NS_CHANGED: 4501 set_bit(NVME_AER_NOTICE_NS_CHANGED, &ctrl->events); 4502 nvme_queue_scan(ctrl); 4503 break; 4504 case NVME_AER_NOTICE_FW_ACT_STARTING: 4505 /* 4506 * We are (ab)using the RESETTING state to prevent subsequent 4507 * recovery actions from interfering with the controller's 4508 * firmware activation. 4509 */ 4510 if (nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING)) { 4511 requeue = false; 4512 queue_work(nvme_wq, &ctrl->fw_act_work); 4513 } 4514 break; 4515 #ifdef CONFIG_NVME_MULTIPATH 4516 case NVME_AER_NOTICE_ANA: 4517 if (!ctrl->ana_log_buf) 4518 break; 4519 queue_work(nvme_wq, &ctrl->ana_work); 4520 break; 4521 #endif 4522 case NVME_AER_NOTICE_DISC_CHANGED: 4523 ctrl->aen_result = result; 4524 break; 4525 default: 4526 dev_warn(ctrl->device, "async event result %08x\n", result); 4527 } 4528 return requeue; 4529 } 4530 4531 static void nvme_handle_aer_persistent_error(struct nvme_ctrl *ctrl) 4532 { 4533 dev_warn(ctrl->device, 4534 "resetting controller due to persistent internal error\n"); 4535 nvme_reset_ctrl(ctrl); 4536 } 4537 4538 void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status, 4539 volatile union nvme_result *res) 4540 { 4541 u32 result = le32_to_cpu(res->u32); 4542 u32 aer_type = nvme_aer_type(result); 4543 u32 aer_subtype = nvme_aer_subtype(result); 4544 bool requeue = true; 4545 4546 if (le16_to_cpu(status) >> 1 != NVME_SC_SUCCESS) 4547 return; 4548 4549 trace_nvme_async_event(ctrl, result); 4550 switch (aer_type) { 4551 case NVME_AER_NOTICE: 4552 requeue = nvme_handle_aen_notice(ctrl, result); 4553 break; 4554 case NVME_AER_ERROR: 4555 /* 4556 * For a persistent internal error, don't run async_event_work 4557 * to submit a new AER. The controller reset will do it. 4558 */ 4559 if (aer_subtype == NVME_AER_ERROR_PERSIST_INT_ERR) { 4560 nvme_handle_aer_persistent_error(ctrl); 4561 return; 4562 } 4563 fallthrough; 4564 case NVME_AER_SMART: 4565 case NVME_AER_CSS: 4566 case NVME_AER_VS: 4567 ctrl->aen_result = result; 4568 break; 4569 default: 4570 break; 4571 } 4572 4573 if (requeue) 4574 queue_work(nvme_wq, &ctrl->async_event_work); 4575 } 4576 EXPORT_SYMBOL_GPL(nvme_complete_async_event); 4577 4578 int nvme_alloc_admin_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set, 4579 const struct blk_mq_ops *ops, unsigned int cmd_size) 4580 { 4581 struct queue_limits lim = {}; 4582 int ret; 4583 4584 memset(set, 0, sizeof(*set)); 4585 set->ops = ops; 4586 set->queue_depth = NVME_AQ_MQ_TAG_DEPTH; 4587 if (ctrl->ops->flags & NVME_F_FABRICS) 4588 /* Reserved for fabric connect and keep alive */ 4589 set->reserved_tags = 2; 4590 set->numa_node = ctrl->numa_node; 4591 if (ctrl->ops->flags & NVME_F_BLOCKING) 4592 set->flags |= BLK_MQ_F_BLOCKING; 4593 set->cmd_size = cmd_size; 4594 set->driver_data = ctrl; 4595 set->nr_hw_queues = 1; 4596 set->timeout = NVME_ADMIN_TIMEOUT; 4597 ret = blk_mq_alloc_tag_set(set); 4598 if (ret) 4599 return ret; 4600 4601 ctrl->admin_q = blk_mq_alloc_queue(set, &lim, NULL); 4602 if (IS_ERR(ctrl->admin_q)) { 4603 ret = PTR_ERR(ctrl->admin_q); 4604 goto out_free_tagset; 4605 } 4606 4607 if (ctrl->ops->flags & NVME_F_FABRICS) { 4608 ctrl->fabrics_q = blk_mq_alloc_queue(set, NULL, NULL); 4609 if (IS_ERR(ctrl->fabrics_q)) { 4610 ret = PTR_ERR(ctrl->fabrics_q); 4611 goto out_cleanup_admin_q; 4612 } 4613 } 4614 4615 ctrl->admin_tagset = set; 4616 return 0; 4617 4618 out_cleanup_admin_q: 4619 blk_mq_destroy_queue(ctrl->admin_q); 4620 blk_put_queue(ctrl->admin_q); 4621 out_free_tagset: 4622 blk_mq_free_tag_set(set); 4623 ctrl->admin_q = NULL; 4624 ctrl->fabrics_q = NULL; 4625 return ret; 4626 } 4627 EXPORT_SYMBOL_GPL(nvme_alloc_admin_tag_set); 4628 4629 void nvme_remove_admin_tag_set(struct nvme_ctrl *ctrl) 4630 { 4631 /* 4632 * As we're about to destroy the queue and free tagset 4633 * we can not have keep-alive work running. 4634 */ 4635 nvme_stop_keep_alive(ctrl); 4636 blk_mq_destroy_queue(ctrl->admin_q); 4637 blk_put_queue(ctrl->admin_q); 4638 if (ctrl->ops->flags & NVME_F_FABRICS) { 4639 blk_mq_destroy_queue(ctrl->fabrics_q); 4640 blk_put_queue(ctrl->fabrics_q); 4641 } 4642 blk_mq_free_tag_set(ctrl->admin_tagset); 4643 } 4644 EXPORT_SYMBOL_GPL(nvme_remove_admin_tag_set); 4645 4646 int nvme_alloc_io_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set, 4647 const struct blk_mq_ops *ops, unsigned int nr_maps, 4648 unsigned int cmd_size) 4649 { 4650 int ret; 4651 4652 memset(set, 0, sizeof(*set)); 4653 set->ops = ops; 4654 set->queue_depth = min_t(unsigned, ctrl->sqsize, BLK_MQ_MAX_DEPTH - 1); 4655 /* 4656 * Some Apple controllers requires tags to be unique across admin and 4657 * the (only) I/O queue, so reserve the first 32 tags of the I/O queue. 4658 */ 4659 if (ctrl->quirks & NVME_QUIRK_SHARED_TAGS) 4660 set->reserved_tags = NVME_AQ_DEPTH; 4661 else if (ctrl->ops->flags & NVME_F_FABRICS) 4662 /* Reserved for fabric connect */ 4663 set->reserved_tags = 1; 4664 set->numa_node = ctrl->numa_node; 4665 if (ctrl->ops->flags & NVME_F_BLOCKING) 4666 set->flags |= BLK_MQ_F_BLOCKING; 4667 set->cmd_size = cmd_size; 4668 set->driver_data = ctrl; 4669 set->nr_hw_queues = ctrl->queue_count - 1; 4670 set->timeout = NVME_IO_TIMEOUT; 4671 set->nr_maps = nr_maps; 4672 ret = blk_mq_alloc_tag_set(set); 4673 if (ret) 4674 return ret; 4675 4676 if (ctrl->ops->flags & NVME_F_FABRICS) { 4677 struct queue_limits lim = { 4678 .features = BLK_FEAT_SKIP_TAGSET_QUIESCE, 4679 }; 4680 4681 ctrl->connect_q = blk_mq_alloc_queue(set, &lim, NULL); 4682 if (IS_ERR(ctrl->connect_q)) { 4683 ret = PTR_ERR(ctrl->connect_q); 4684 goto out_free_tag_set; 4685 } 4686 } 4687 4688 ctrl->tagset = set; 4689 return 0; 4690 4691 out_free_tag_set: 4692 blk_mq_free_tag_set(set); 4693 ctrl->connect_q = NULL; 4694 return ret; 4695 } 4696 EXPORT_SYMBOL_GPL(nvme_alloc_io_tag_set); 4697 4698 void nvme_remove_io_tag_set(struct nvme_ctrl *ctrl) 4699 { 4700 if (ctrl->ops->flags & NVME_F_FABRICS) { 4701 blk_mq_destroy_queue(ctrl->connect_q); 4702 blk_put_queue(ctrl->connect_q); 4703 } 4704 blk_mq_free_tag_set(ctrl->tagset); 4705 } 4706 EXPORT_SYMBOL_GPL(nvme_remove_io_tag_set); 4707 4708 void nvme_stop_ctrl(struct nvme_ctrl *ctrl) 4709 { 4710 nvme_mpath_stop(ctrl); 4711 nvme_auth_stop(ctrl); 4712 nvme_stop_failfast_work(ctrl); 4713 flush_work(&ctrl->async_event_work); 4714 cancel_work_sync(&ctrl->fw_act_work); 4715 if (ctrl->ops->stop_ctrl) 4716 ctrl->ops->stop_ctrl(ctrl); 4717 } 4718 EXPORT_SYMBOL_GPL(nvme_stop_ctrl); 4719 4720 void nvme_start_ctrl(struct nvme_ctrl *ctrl) 4721 { 4722 nvme_enable_aen(ctrl); 4723 4724 /* 4725 * persistent discovery controllers need to send indication to userspace 4726 * to re-read the discovery log page to learn about possible changes 4727 * that were missed. We identify persistent discovery controllers by 4728 * checking that they started once before, hence are reconnecting back. 4729 */ 4730 if (test_bit(NVME_CTRL_STARTED_ONCE, &ctrl->flags) && 4731 nvme_discovery_ctrl(ctrl)) 4732 nvme_change_uevent(ctrl, "NVME_EVENT=rediscover"); 4733 4734 if (ctrl->queue_count > 1) { 4735 nvme_queue_scan(ctrl); 4736 nvme_unquiesce_io_queues(ctrl); 4737 nvme_mpath_update(ctrl); 4738 } 4739 4740 nvme_change_uevent(ctrl, "NVME_EVENT=connected"); 4741 set_bit(NVME_CTRL_STARTED_ONCE, &ctrl->flags); 4742 } 4743 EXPORT_SYMBOL_GPL(nvme_start_ctrl); 4744 4745 void nvme_uninit_ctrl(struct nvme_ctrl *ctrl) 4746 { 4747 nvme_stop_keep_alive(ctrl); 4748 nvme_hwmon_exit(ctrl); 4749 nvme_fault_inject_fini(&ctrl->fault_inject); 4750 dev_pm_qos_hide_latency_tolerance(ctrl->device); 4751 cdev_device_del(&ctrl->cdev, ctrl->device); 4752 nvme_put_ctrl(ctrl); 4753 } 4754 EXPORT_SYMBOL_GPL(nvme_uninit_ctrl); 4755 4756 static void nvme_free_cels(struct nvme_ctrl *ctrl) 4757 { 4758 struct nvme_effects_log *cel; 4759 unsigned long i; 4760 4761 xa_for_each(&ctrl->cels, i, cel) { 4762 xa_erase(&ctrl->cels, i); 4763 kfree(cel); 4764 } 4765 4766 xa_destroy(&ctrl->cels); 4767 } 4768 4769 static void nvme_free_ctrl(struct device *dev) 4770 { 4771 struct nvme_ctrl *ctrl = 4772 container_of(dev, struct nvme_ctrl, ctrl_device); 4773 struct nvme_subsystem *subsys = ctrl->subsys; 4774 4775 if (!subsys || ctrl->instance != subsys->instance) 4776 ida_free(&nvme_instance_ida, ctrl->instance); 4777 nvme_free_cels(ctrl); 4778 nvme_mpath_uninit(ctrl); 4779 cleanup_srcu_struct(&ctrl->srcu); 4780 nvme_auth_stop(ctrl); 4781 nvme_auth_free(ctrl); 4782 __free_page(ctrl->discard_page); 4783 free_opal_dev(ctrl->opal_dev); 4784 4785 if (subsys) { 4786 mutex_lock(&nvme_subsystems_lock); 4787 list_del(&ctrl->subsys_entry); 4788 sysfs_remove_link(&subsys->dev.kobj, dev_name(ctrl->device)); 4789 mutex_unlock(&nvme_subsystems_lock); 4790 } 4791 4792 ctrl->ops->free_ctrl(ctrl); 4793 4794 if (subsys) 4795 nvme_put_subsystem(subsys); 4796 } 4797 4798 /* 4799 * Initialize a NVMe controller structures. This needs to be called during 4800 * earliest initialization so that we have the initialized structured around 4801 * during probing. 4802 * 4803 * On success, the caller must use the nvme_put_ctrl() to release this when 4804 * needed, which also invokes the ops->free_ctrl() callback. 4805 */ 4806 int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev, 4807 const struct nvme_ctrl_ops *ops, unsigned long quirks) 4808 { 4809 int ret; 4810 4811 WRITE_ONCE(ctrl->state, NVME_CTRL_NEW); 4812 ctrl->passthru_err_log_enabled = false; 4813 clear_bit(NVME_CTRL_FAILFAST_EXPIRED, &ctrl->flags); 4814 spin_lock_init(&ctrl->lock); 4815 mutex_init(&ctrl->namespaces_lock); 4816 4817 ret = init_srcu_struct(&ctrl->srcu); 4818 if (ret) 4819 return ret; 4820 4821 mutex_init(&ctrl->scan_lock); 4822 INIT_LIST_HEAD(&ctrl->namespaces); 4823 xa_init(&ctrl->cels); 4824 ctrl->dev = dev; 4825 ctrl->ops = ops; 4826 ctrl->quirks = quirks; 4827 ctrl->numa_node = NUMA_NO_NODE; 4828 INIT_WORK(&ctrl->scan_work, nvme_scan_work); 4829 INIT_WORK(&ctrl->async_event_work, nvme_async_event_work); 4830 INIT_WORK(&ctrl->fw_act_work, nvme_fw_act_work); 4831 INIT_WORK(&ctrl->delete_work, nvme_delete_ctrl_work); 4832 init_waitqueue_head(&ctrl->state_wq); 4833 4834 INIT_DELAYED_WORK(&ctrl->ka_work, nvme_keep_alive_work); 4835 INIT_DELAYED_WORK(&ctrl->failfast_work, nvme_failfast_work); 4836 memset(&ctrl->ka_cmd, 0, sizeof(ctrl->ka_cmd)); 4837 ctrl->ka_cmd.common.opcode = nvme_admin_keep_alive; 4838 ctrl->ka_last_check_time = jiffies; 4839 4840 BUILD_BUG_ON(NVME_DSM_MAX_RANGES * sizeof(struct nvme_dsm_range) > 4841 PAGE_SIZE); 4842 ctrl->discard_page = alloc_page(GFP_KERNEL); 4843 if (!ctrl->discard_page) { 4844 ret = -ENOMEM; 4845 goto out; 4846 } 4847 4848 ret = ida_alloc(&nvme_instance_ida, GFP_KERNEL); 4849 if (ret < 0) 4850 goto out; 4851 ctrl->instance = ret; 4852 4853 ret = nvme_auth_init_ctrl(ctrl); 4854 if (ret) 4855 goto out_release_instance; 4856 4857 nvme_mpath_init_ctrl(ctrl); 4858 4859 device_initialize(&ctrl->ctrl_device); 4860 ctrl->device = &ctrl->ctrl_device; 4861 ctrl->device->devt = MKDEV(MAJOR(nvme_ctrl_base_chr_devt), 4862 ctrl->instance); 4863 ctrl->device->class = &nvme_class; 4864 ctrl->device->parent = ctrl->dev; 4865 if (ops->dev_attr_groups) 4866 ctrl->device->groups = ops->dev_attr_groups; 4867 else 4868 ctrl->device->groups = nvme_dev_attr_groups; 4869 ctrl->device->release = nvme_free_ctrl; 4870 dev_set_drvdata(ctrl->device, ctrl); 4871 4872 return ret; 4873 4874 out_release_instance: 4875 ida_free(&nvme_instance_ida, ctrl->instance); 4876 out: 4877 if (ctrl->discard_page) 4878 __free_page(ctrl->discard_page); 4879 cleanup_srcu_struct(&ctrl->srcu); 4880 return ret; 4881 } 4882 EXPORT_SYMBOL_GPL(nvme_init_ctrl); 4883 4884 /* 4885 * On success, returns with an elevated controller reference and caller must 4886 * use nvme_uninit_ctrl() to properly free resources associated with the ctrl. 4887 */ 4888 int nvme_add_ctrl(struct nvme_ctrl *ctrl) 4889 { 4890 int ret; 4891 4892 ret = dev_set_name(ctrl->device, "nvme%d", ctrl->instance); 4893 if (ret) 4894 return ret; 4895 4896 cdev_init(&ctrl->cdev, &nvme_dev_fops); 4897 ctrl->cdev.owner = ctrl->ops->module; 4898 ret = cdev_device_add(&ctrl->cdev, ctrl->device); 4899 if (ret) 4900 return ret; 4901 4902 /* 4903 * Initialize latency tolerance controls. The sysfs files won't 4904 * be visible to userspace unless the device actually supports APST. 4905 */ 4906 ctrl->device->power.set_latency_tolerance = nvme_set_latency_tolerance; 4907 dev_pm_qos_update_user_latency_tolerance(ctrl->device, 4908 min(default_ps_max_latency_us, (unsigned long)S32_MAX)); 4909 4910 nvme_fault_inject_init(&ctrl->fault_inject, dev_name(ctrl->device)); 4911 nvme_get_ctrl(ctrl); 4912 4913 return 0; 4914 } 4915 EXPORT_SYMBOL_GPL(nvme_add_ctrl); 4916 4917 /* let I/O to all namespaces fail in preparation for surprise removal */ 4918 void nvme_mark_namespaces_dead(struct nvme_ctrl *ctrl) 4919 { 4920 struct nvme_ns *ns; 4921 int srcu_idx; 4922 4923 srcu_idx = srcu_read_lock(&ctrl->srcu); 4924 list_for_each_entry_srcu(ns, &ctrl->namespaces, list, 4925 srcu_read_lock_held(&ctrl->srcu)) 4926 blk_mark_disk_dead(ns->disk); 4927 srcu_read_unlock(&ctrl->srcu, srcu_idx); 4928 } 4929 EXPORT_SYMBOL_GPL(nvme_mark_namespaces_dead); 4930 4931 void nvme_unfreeze(struct nvme_ctrl *ctrl) 4932 { 4933 struct nvme_ns *ns; 4934 int srcu_idx; 4935 4936 srcu_idx = srcu_read_lock(&ctrl->srcu); 4937 list_for_each_entry_srcu(ns, &ctrl->namespaces, list, 4938 srcu_read_lock_held(&ctrl->srcu)) 4939 blk_mq_unfreeze_queue_non_owner(ns->queue); 4940 srcu_read_unlock(&ctrl->srcu, srcu_idx); 4941 clear_bit(NVME_CTRL_FROZEN, &ctrl->flags); 4942 } 4943 EXPORT_SYMBOL_GPL(nvme_unfreeze); 4944 4945 int nvme_wait_freeze_timeout(struct nvme_ctrl *ctrl, long timeout) 4946 { 4947 struct nvme_ns *ns; 4948 int srcu_idx; 4949 4950 srcu_idx = srcu_read_lock(&ctrl->srcu); 4951 list_for_each_entry_srcu(ns, &ctrl->namespaces, list, 4952 srcu_read_lock_held(&ctrl->srcu)) { 4953 timeout = blk_mq_freeze_queue_wait_timeout(ns->queue, timeout); 4954 if (timeout <= 0) 4955 break; 4956 } 4957 srcu_read_unlock(&ctrl->srcu, srcu_idx); 4958 return timeout; 4959 } 4960 EXPORT_SYMBOL_GPL(nvme_wait_freeze_timeout); 4961 4962 void nvme_wait_freeze(struct nvme_ctrl *ctrl) 4963 { 4964 struct nvme_ns *ns; 4965 int srcu_idx; 4966 4967 srcu_idx = srcu_read_lock(&ctrl->srcu); 4968 list_for_each_entry_srcu(ns, &ctrl->namespaces, list, 4969 srcu_read_lock_held(&ctrl->srcu)) 4970 blk_mq_freeze_queue_wait(ns->queue); 4971 srcu_read_unlock(&ctrl->srcu, srcu_idx); 4972 } 4973 EXPORT_SYMBOL_GPL(nvme_wait_freeze); 4974 4975 void nvme_start_freeze(struct nvme_ctrl *ctrl) 4976 { 4977 struct nvme_ns *ns; 4978 int srcu_idx; 4979 4980 set_bit(NVME_CTRL_FROZEN, &ctrl->flags); 4981 srcu_idx = srcu_read_lock(&ctrl->srcu); 4982 list_for_each_entry_srcu(ns, &ctrl->namespaces, list, 4983 srcu_read_lock_held(&ctrl->srcu)) 4984 /* 4985 * Typical non_owner use case is from pci driver, in which 4986 * start_freeze is called from timeout work function, but 4987 * unfreeze is done in reset work context 4988 */ 4989 blk_freeze_queue_start_non_owner(ns->queue); 4990 srcu_read_unlock(&ctrl->srcu, srcu_idx); 4991 } 4992 EXPORT_SYMBOL_GPL(nvme_start_freeze); 4993 4994 void nvme_quiesce_io_queues(struct nvme_ctrl *ctrl) 4995 { 4996 if (!ctrl->tagset) 4997 return; 4998 if (!test_and_set_bit(NVME_CTRL_STOPPED, &ctrl->flags)) 4999 blk_mq_quiesce_tagset(ctrl->tagset); 5000 else 5001 blk_mq_wait_quiesce_done(ctrl->tagset); 5002 } 5003 EXPORT_SYMBOL_GPL(nvme_quiesce_io_queues); 5004 5005 void nvme_unquiesce_io_queues(struct nvme_ctrl *ctrl) 5006 { 5007 if (!ctrl->tagset) 5008 return; 5009 if (test_and_clear_bit(NVME_CTRL_STOPPED, &ctrl->flags)) 5010 blk_mq_unquiesce_tagset(ctrl->tagset); 5011 } 5012 EXPORT_SYMBOL_GPL(nvme_unquiesce_io_queues); 5013 5014 void nvme_quiesce_admin_queue(struct nvme_ctrl *ctrl) 5015 { 5016 if (!test_and_set_bit(NVME_CTRL_ADMIN_Q_STOPPED, &ctrl->flags)) 5017 blk_mq_quiesce_queue(ctrl->admin_q); 5018 else 5019 blk_mq_wait_quiesce_done(ctrl->admin_q->tag_set); 5020 } 5021 EXPORT_SYMBOL_GPL(nvme_quiesce_admin_queue); 5022 5023 void nvme_unquiesce_admin_queue(struct nvme_ctrl *ctrl) 5024 { 5025 if (test_and_clear_bit(NVME_CTRL_ADMIN_Q_STOPPED, &ctrl->flags)) 5026 blk_mq_unquiesce_queue(ctrl->admin_q); 5027 } 5028 EXPORT_SYMBOL_GPL(nvme_unquiesce_admin_queue); 5029 5030 void nvme_sync_io_queues(struct nvme_ctrl *ctrl) 5031 { 5032 struct nvme_ns *ns; 5033 int srcu_idx; 5034 5035 srcu_idx = srcu_read_lock(&ctrl->srcu); 5036 list_for_each_entry_srcu(ns, &ctrl->namespaces, list, 5037 srcu_read_lock_held(&ctrl->srcu)) 5038 blk_sync_queue(ns->queue); 5039 srcu_read_unlock(&ctrl->srcu, srcu_idx); 5040 } 5041 EXPORT_SYMBOL_GPL(nvme_sync_io_queues); 5042 5043 void nvme_sync_queues(struct nvme_ctrl *ctrl) 5044 { 5045 nvme_sync_io_queues(ctrl); 5046 if (ctrl->admin_q) 5047 blk_sync_queue(ctrl->admin_q); 5048 } 5049 EXPORT_SYMBOL_GPL(nvme_sync_queues); 5050 5051 struct nvme_ctrl *nvme_ctrl_from_file(struct file *file) 5052 { 5053 if (file->f_op != &nvme_dev_fops) 5054 return NULL; 5055 return file->private_data; 5056 } 5057 EXPORT_SYMBOL_NS_GPL(nvme_ctrl_from_file, "NVME_TARGET_PASSTHRU"); 5058 5059 /* 5060 * Check we didn't inadvertently grow the command structure sizes: 5061 */ 5062 static inline void _nvme_check_size(void) 5063 { 5064 BUILD_BUG_ON(sizeof(struct nvme_common_command) != 64); 5065 BUILD_BUG_ON(sizeof(struct nvme_rw_command) != 64); 5066 BUILD_BUG_ON(sizeof(struct nvme_identify) != 64); 5067 BUILD_BUG_ON(sizeof(struct nvme_features) != 64); 5068 BUILD_BUG_ON(sizeof(struct nvme_download_firmware) != 64); 5069 BUILD_BUG_ON(sizeof(struct nvme_format_cmd) != 64); 5070 BUILD_BUG_ON(sizeof(struct nvme_dsm_cmd) != 64); 5071 BUILD_BUG_ON(sizeof(struct nvme_write_zeroes_cmd) != 64); 5072 BUILD_BUG_ON(sizeof(struct nvme_abort_cmd) != 64); 5073 BUILD_BUG_ON(sizeof(struct nvme_get_log_page_command) != 64); 5074 BUILD_BUG_ON(sizeof(struct nvme_command) != 64); 5075 BUILD_BUG_ON(sizeof(struct nvme_id_ctrl) != NVME_IDENTIFY_DATA_SIZE); 5076 BUILD_BUG_ON(sizeof(struct nvme_id_ns) != NVME_IDENTIFY_DATA_SIZE); 5077 BUILD_BUG_ON(sizeof(struct nvme_id_ns_cs_indep) != 5078 NVME_IDENTIFY_DATA_SIZE); 5079 BUILD_BUG_ON(sizeof(struct nvme_id_ns_zns) != NVME_IDENTIFY_DATA_SIZE); 5080 BUILD_BUG_ON(sizeof(struct nvme_id_ns_nvm) != NVME_IDENTIFY_DATA_SIZE); 5081 BUILD_BUG_ON(sizeof(struct nvme_id_ctrl_zns) != NVME_IDENTIFY_DATA_SIZE); 5082 BUILD_BUG_ON(sizeof(struct nvme_id_ctrl_nvm) != NVME_IDENTIFY_DATA_SIZE); 5083 BUILD_BUG_ON(sizeof(struct nvme_lba_range_type) != 64); 5084 BUILD_BUG_ON(sizeof(struct nvme_smart_log) != 512); 5085 BUILD_BUG_ON(sizeof(struct nvme_endurance_group_log) != 512); 5086 BUILD_BUG_ON(sizeof(struct nvme_rotational_media_log) != 512); 5087 BUILD_BUG_ON(sizeof(struct nvme_dbbuf) != 64); 5088 BUILD_BUG_ON(sizeof(struct nvme_directive_cmd) != 64); 5089 BUILD_BUG_ON(sizeof(struct nvme_feat_host_behavior) != 512); 5090 } 5091 5092 5093 static int __init nvme_core_init(void) 5094 { 5095 unsigned int wq_flags = WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS; 5096 int result = -ENOMEM; 5097 5098 _nvme_check_size(); 5099 5100 nvme_wq = alloc_workqueue("nvme-wq", wq_flags, 0); 5101 if (!nvme_wq) 5102 goto out; 5103 5104 nvme_reset_wq = alloc_workqueue("nvme-reset-wq", wq_flags, 0); 5105 if (!nvme_reset_wq) 5106 goto destroy_wq; 5107 5108 nvme_delete_wq = alloc_workqueue("nvme-delete-wq", wq_flags, 0); 5109 if (!nvme_delete_wq) 5110 goto destroy_reset_wq; 5111 5112 result = alloc_chrdev_region(&nvme_ctrl_base_chr_devt, 0, 5113 NVME_MINORS, "nvme"); 5114 if (result < 0) 5115 goto destroy_delete_wq; 5116 5117 result = class_register(&nvme_class); 5118 if (result) 5119 goto unregister_chrdev; 5120 5121 result = class_register(&nvme_subsys_class); 5122 if (result) 5123 goto destroy_class; 5124 5125 result = alloc_chrdev_region(&nvme_ns_chr_devt, 0, NVME_MINORS, 5126 "nvme-generic"); 5127 if (result < 0) 5128 goto destroy_subsys_class; 5129 5130 result = class_register(&nvme_ns_chr_class); 5131 if (result) 5132 goto unregister_generic_ns; 5133 5134 result = nvme_init_auth(); 5135 if (result) 5136 goto destroy_ns_chr; 5137 return 0; 5138 5139 destroy_ns_chr: 5140 class_unregister(&nvme_ns_chr_class); 5141 unregister_generic_ns: 5142 unregister_chrdev_region(nvme_ns_chr_devt, NVME_MINORS); 5143 destroy_subsys_class: 5144 class_unregister(&nvme_subsys_class); 5145 destroy_class: 5146 class_unregister(&nvme_class); 5147 unregister_chrdev: 5148 unregister_chrdev_region(nvme_ctrl_base_chr_devt, NVME_MINORS); 5149 destroy_delete_wq: 5150 destroy_workqueue(nvme_delete_wq); 5151 destroy_reset_wq: 5152 destroy_workqueue(nvme_reset_wq); 5153 destroy_wq: 5154 destroy_workqueue(nvme_wq); 5155 out: 5156 return result; 5157 } 5158 5159 static void __exit nvme_core_exit(void) 5160 { 5161 nvme_exit_auth(); 5162 class_unregister(&nvme_ns_chr_class); 5163 class_unregister(&nvme_subsys_class); 5164 class_unregister(&nvme_class); 5165 unregister_chrdev_region(nvme_ns_chr_devt, NVME_MINORS); 5166 unregister_chrdev_region(nvme_ctrl_base_chr_devt, NVME_MINORS); 5167 destroy_workqueue(nvme_delete_wq); 5168 destroy_workqueue(nvme_reset_wq); 5169 destroy_workqueue(nvme_wq); 5170 ida_destroy(&nvme_ns_chr_minor_ida); 5171 ida_destroy(&nvme_instance_ida); 5172 } 5173 5174 MODULE_LICENSE("GPL"); 5175 MODULE_VERSION("1.0"); 5176 MODULE_DESCRIPTION("NVMe host core framework"); 5177 module_init(nvme_core_init); 5178 module_exit(nvme_core_exit); 5179