1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (C) 2012-2016 Intel Corporation 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 29 #include "opt_nvme.h" 30 31 #include <sys/param.h> 32 #include <sys/systm.h> 33 #include <sys/buf.h> 34 #include <sys/bus.h> 35 #include <sys/conf.h> 36 #include <sys/ioccom.h> 37 #include <sys/proc.h> 38 #include <sys/smp.h> 39 #include <sys/uio.h> 40 #include <sys/sbuf.h> 41 #include <sys/endian.h> 42 #include <machine/stdarg.h> 43 #include <vm/vm.h> 44 45 #include "nvme_private.h" 46 47 #define B4_CHK_RDY_DELAY_MS 2300 /* work around controller bug */ 48 49 static void nvme_ctrlr_construct_and_submit_aer(struct nvme_controller *ctrlr, 50 struct nvme_async_event_request *aer); 51 52 static void 53 nvme_ctrlr_barrier(struct nvme_controller *ctrlr, int flags) 54 { 55 bus_barrier(ctrlr->resource, 0, rman_get_size(ctrlr->resource), flags); 56 } 57 58 static void 59 nvme_ctrlr_devctl_log(struct nvme_controller *ctrlr, const char *type, const char *msg, ...) 60 { 61 struct sbuf sb; 62 va_list ap; 63 int error; 64 65 if (sbuf_new(&sb, NULL, 0, SBUF_AUTOEXTEND | SBUF_NOWAIT) == NULL) 66 return; 67 sbuf_printf(&sb, "%s: ", device_get_nameunit(ctrlr->dev)); 68 va_start(ap, msg); 69 sbuf_vprintf(&sb, msg, ap); 70 va_end(ap); 71 error = sbuf_finish(&sb); 72 if (error == 0) 73 printf("%s\n", sbuf_data(&sb)); 74 75 sbuf_clear(&sb); 76 sbuf_printf(&sb, "name=\"%s\" reason=\"", device_get_nameunit(ctrlr->dev)); 77 va_start(ap, msg); 78 sbuf_vprintf(&sb, msg, ap); 79 va_end(ap); 80 sbuf_printf(&sb, "\""); 81 error = sbuf_finish(&sb); 82 if (error == 0) 83 devctl_notify("nvme", "controller", type, sbuf_data(&sb)); 84 sbuf_delete(&sb); 85 } 86 87 static int 88 nvme_ctrlr_construct_admin_qpair(struct nvme_controller *ctrlr) 89 { 90 struct nvme_qpair *qpair; 91 uint32_t num_entries; 92 int error; 93 94 qpair = &ctrlr->adminq; 95 qpair->id = 0; 96 qpair->cpu = CPU_FFS(&cpuset_domain[ctrlr->domain]) - 1; 97 qpair->domain = ctrlr->domain; 98 99 num_entries = NVME_ADMIN_ENTRIES; 100 TUNABLE_INT_FETCH("hw.nvme.admin_entries", &num_entries); 101 /* 102 * If admin_entries was overridden to an invalid value, revert it 103 * back to our default value. 104 */ 105 if (num_entries < NVME_MIN_ADMIN_ENTRIES || 106 num_entries > NVME_MAX_ADMIN_ENTRIES) { 107 nvme_printf(ctrlr, "invalid hw.nvme.admin_entries=%d " 108 "specified\n", num_entries); 109 num_entries = NVME_ADMIN_ENTRIES; 110 } 111 112 /* 113 * The admin queue's max xfer size is treated differently than the 114 * max I/O xfer size. 16KB is sufficient here - maybe even less? 115 */ 116 error = nvme_qpair_construct(qpair, num_entries, NVME_ADMIN_TRACKERS, 117 ctrlr); 118 return (error); 119 } 120 121 #define QP(ctrlr, c) ((c) * (ctrlr)->num_io_queues / mp_ncpus) 122 123 static int 124 nvme_ctrlr_construct_io_qpairs(struct nvme_controller *ctrlr) 125 { 126 struct nvme_qpair *qpair; 127 uint32_t cap_lo; 128 uint16_t mqes; 129 int c, error, i, n; 130 int num_entries, num_trackers, max_entries; 131 132 /* 133 * NVMe spec sets a hard limit of 64K max entries, but devices may 134 * specify a smaller limit, so we need to check the MQES field in the 135 * capabilities register. We have to cap the number of entries to the 136 * current stride allows for in BAR 0/1, otherwise the remainder entries 137 * are inaccessible. MQES should reflect this, and this is just a 138 * fail-safe. 139 */ 140 max_entries = 141 (rman_get_size(ctrlr->resource) - nvme_mmio_offsetof(doorbell[0])) / 142 (1 << (ctrlr->dstrd + 1)); 143 num_entries = NVME_IO_ENTRIES; 144 TUNABLE_INT_FETCH("hw.nvme.io_entries", &num_entries); 145 cap_lo = nvme_mmio_read_4(ctrlr, cap_lo); 146 mqes = NVME_CAP_LO_MQES(cap_lo); 147 num_entries = min(num_entries, mqes + 1); 148 num_entries = min(num_entries, max_entries); 149 150 num_trackers = NVME_IO_TRACKERS; 151 TUNABLE_INT_FETCH("hw.nvme.io_trackers", &num_trackers); 152 153 num_trackers = max(num_trackers, NVME_MIN_IO_TRACKERS); 154 num_trackers = min(num_trackers, NVME_MAX_IO_TRACKERS); 155 /* 156 * No need to have more trackers than entries in the submit queue. Note 157 * also that for a queue size of N, we can only have (N-1) commands 158 * outstanding, hence the "-1" here. 159 */ 160 num_trackers = min(num_trackers, (num_entries-1)); 161 162 /* 163 * Our best estimate for the maximum number of I/Os that we should 164 * normally have in flight at one time. This should be viewed as a hint, 165 * not a hard limit and will need to be revisited when the upper layers 166 * of the storage system grows multi-queue support. 167 */ 168 ctrlr->max_hw_pend_io = num_trackers * ctrlr->num_io_queues * 3 / 4; 169 170 ctrlr->ioq = malloc(ctrlr->num_io_queues * sizeof(struct nvme_qpair), 171 M_NVME, M_ZERO | M_WAITOK); 172 173 for (i = c = n = 0; i < ctrlr->num_io_queues; i++, c += n) { 174 qpair = &ctrlr->ioq[i]; 175 176 /* 177 * Admin queue has ID=0. IO queues start at ID=1 - 178 * hence the 'i+1' here. 179 */ 180 qpair->id = i + 1; 181 if (ctrlr->num_io_queues > 1) { 182 /* Find number of CPUs served by this queue. */ 183 for (n = 1; QP(ctrlr, c + n) == i; n++) 184 ; 185 /* Shuffle multiple NVMe devices between CPUs. */ 186 qpair->cpu = c + (device_get_unit(ctrlr->dev)+n/2) % n; 187 qpair->domain = pcpu_find(qpair->cpu)->pc_domain; 188 } else { 189 qpair->cpu = CPU_FFS(&cpuset_domain[ctrlr->domain]) - 1; 190 qpair->domain = ctrlr->domain; 191 } 192 193 /* 194 * For I/O queues, use the controller-wide max_xfer_size 195 * calculated in nvme_attach(). 196 */ 197 error = nvme_qpair_construct(qpair, num_entries, num_trackers, 198 ctrlr); 199 if (error) 200 return (error); 201 202 /* 203 * Do not bother binding interrupts if we only have one I/O 204 * interrupt thread for this controller. 205 */ 206 if (ctrlr->num_io_queues > 1) 207 bus_bind_intr(ctrlr->dev, qpair->res, qpair->cpu); 208 } 209 210 return (0); 211 } 212 213 static void 214 nvme_ctrlr_fail(struct nvme_controller *ctrlr) 215 { 216 int i; 217 218 /* 219 * No need to disable queues before failing them. Failing is a superet 220 * of disabling (though pedantically we'd abort the AERs silently with 221 * a different error, though when we fail, that hardly matters). 222 */ 223 ctrlr->is_failed = true; 224 nvme_qpair_fail(&ctrlr->adminq); 225 if (ctrlr->ioq != NULL) { 226 for (i = 0; i < ctrlr->num_io_queues; i++) { 227 nvme_qpair_fail(&ctrlr->ioq[i]); 228 } 229 } 230 nvme_notify_fail_consumers(ctrlr); 231 } 232 233 /* 234 * Wait for RDY to change. 235 * 236 * Starts sleeping for 1us and geometrically increases it the longer we wait, 237 * capped at 1ms. 238 */ 239 static int 240 nvme_ctrlr_wait_for_ready(struct nvme_controller *ctrlr, int desired_val) 241 { 242 int timeout = ticks + MSEC_2_TICKS(ctrlr->ready_timeout_in_ms); 243 sbintime_t delta_t = SBT_1US; 244 uint32_t csts; 245 246 while (1) { 247 csts = nvme_mmio_read_4(ctrlr, csts); 248 if (csts == NVME_GONE) /* Hot unplug. */ 249 return (ENXIO); 250 if (NVMEV(NVME_CSTS_REG_RDY, csts) == desired_val) 251 break; 252 if (timeout - ticks < 0) { 253 nvme_printf(ctrlr, "controller ready did not become %d " 254 "within %d ms\n", desired_val, ctrlr->ready_timeout_in_ms); 255 return (ENXIO); 256 } 257 258 pause_sbt("nvmerdy", delta_t, 0, C_PREL(1)); 259 delta_t = min(SBT_1MS, delta_t * 3 / 2); 260 } 261 262 return (0); 263 } 264 265 static int 266 nvme_ctrlr_disable(struct nvme_controller *ctrlr) 267 { 268 uint32_t cc; 269 uint32_t csts; 270 uint8_t en, rdy; 271 int err; 272 273 cc = nvme_mmio_read_4(ctrlr, cc); 274 csts = nvme_mmio_read_4(ctrlr, csts); 275 276 en = NVMEV(NVME_CC_REG_EN, cc); 277 rdy = NVMEV(NVME_CSTS_REG_RDY, csts); 278 279 /* 280 * Per 3.1.5 in NVME 1.3 spec, transitioning CC.EN from 0 to 1 281 * when CSTS.RDY is 1 or transitioning CC.EN from 1 to 0 when 282 * CSTS.RDY is 0 "has undefined results" So make sure that CSTS.RDY 283 * isn't the desired value. Short circuit if we're already disabled. 284 */ 285 if (en == 0) { 286 /* Wait for RDY == 0 or timeout & fail */ 287 if (rdy == 0) 288 return (0); 289 return (nvme_ctrlr_wait_for_ready(ctrlr, 0)); 290 } 291 if (rdy == 0) { 292 /* EN == 1, wait for RDY == 1 or timeout & fail */ 293 err = nvme_ctrlr_wait_for_ready(ctrlr, 1); 294 if (err != 0) 295 return (err); 296 } 297 298 cc &= ~NVMEM(NVME_CC_REG_EN); 299 nvme_mmio_write_4(ctrlr, cc, cc); 300 301 /* 302 * A few drives have firmware bugs that freeze the drive if we access 303 * the mmio too soon after we disable. 304 */ 305 if (ctrlr->quirks & QUIRK_DELAY_B4_CHK_RDY) 306 pause("nvmeR", MSEC_2_TICKS(B4_CHK_RDY_DELAY_MS)); 307 return (nvme_ctrlr_wait_for_ready(ctrlr, 0)); 308 } 309 310 static int 311 nvme_ctrlr_enable(struct nvme_controller *ctrlr) 312 { 313 uint32_t cc; 314 uint32_t csts; 315 uint32_t aqa; 316 uint32_t qsize; 317 uint8_t en, rdy; 318 int err; 319 320 cc = nvme_mmio_read_4(ctrlr, cc); 321 csts = nvme_mmio_read_4(ctrlr, csts); 322 323 en = NVMEV(NVME_CC_REG_EN, cc); 324 rdy = NVMEV(NVME_CSTS_REG_RDY, csts); 325 326 /* 327 * See note in nvme_ctrlr_disable. Short circuit if we're already enabled. 328 */ 329 if (en == 1) { 330 if (rdy == 1) 331 return (0); 332 return (nvme_ctrlr_wait_for_ready(ctrlr, 1)); 333 } 334 335 /* EN == 0 already wait for RDY == 0 or timeout & fail */ 336 err = nvme_ctrlr_wait_for_ready(ctrlr, 0); 337 if (err != 0) 338 return (err); 339 340 nvme_mmio_write_8(ctrlr, asq, ctrlr->adminq.cmd_bus_addr); 341 nvme_mmio_write_8(ctrlr, acq, ctrlr->adminq.cpl_bus_addr); 342 343 /* acqs and asqs are 0-based. */ 344 qsize = ctrlr->adminq.num_entries - 1; 345 346 aqa = 0; 347 aqa |= NVMEF(NVME_AQA_REG_ACQS, qsize); 348 aqa |= NVMEF(NVME_AQA_REG_ASQS, qsize); 349 nvme_mmio_write_4(ctrlr, aqa, aqa); 350 351 /* Initialization values for CC */ 352 cc = 0; 353 cc |= NVMEF(NVME_CC_REG_EN, 1); 354 cc |= NVMEF(NVME_CC_REG_CSS, 0); 355 cc |= NVMEF(NVME_CC_REG_AMS, 0); 356 cc |= NVMEF(NVME_CC_REG_SHN, 0); 357 cc |= NVMEF(NVME_CC_REG_IOSQES, 6); /* SQ entry size == 64 == 2^6 */ 358 cc |= NVMEF(NVME_CC_REG_IOCQES, 4); /* CQ entry size == 16 == 2^4 */ 359 360 /* 361 * Use the Memory Page Size selected during device initialization. Note 362 * that value stored in mps is suitable to use here without adjusting by 363 * NVME_MPS_SHIFT. 364 */ 365 cc |= NVMEF(NVME_CC_REG_MPS, ctrlr->mps); 366 367 nvme_ctrlr_barrier(ctrlr, BUS_SPACE_BARRIER_WRITE); 368 nvme_mmio_write_4(ctrlr, cc, cc); 369 370 return (nvme_ctrlr_wait_for_ready(ctrlr, 1)); 371 } 372 373 static void 374 nvme_ctrlr_disable_qpairs(struct nvme_controller *ctrlr) 375 { 376 int i; 377 378 nvme_admin_qpair_disable(&ctrlr->adminq); 379 /* 380 * I/O queues are not allocated before the initial HW 381 * reset, so do not try to disable them. Use is_initialized 382 * to determine if this is the initial HW reset. 383 */ 384 if (ctrlr->is_initialized) { 385 for (i = 0; i < ctrlr->num_io_queues; i++) 386 nvme_io_qpair_disable(&ctrlr->ioq[i]); 387 } 388 } 389 390 static int 391 nvme_ctrlr_hw_reset(struct nvme_controller *ctrlr) 392 { 393 int err; 394 395 TSENTER(); 396 397 nvme_ctrlr_disable_qpairs(ctrlr); 398 399 err = nvme_ctrlr_disable(ctrlr); 400 if (err != 0) 401 goto out; 402 403 err = nvme_ctrlr_enable(ctrlr); 404 out: 405 406 TSEXIT(); 407 return (err); 408 } 409 410 void 411 nvme_ctrlr_reset(struct nvme_controller *ctrlr) 412 { 413 int cmpset; 414 415 cmpset = atomic_cmpset_32(&ctrlr->is_resetting, 0, 1); 416 417 if (cmpset == 0 || ctrlr->is_failed) 418 /* 419 * Controller is already resetting or has failed. Return 420 * immediately since there is no need to kick off another 421 * reset in these cases. 422 */ 423 return; 424 425 if (!ctrlr->is_dying) 426 taskqueue_enqueue(ctrlr->taskqueue, &ctrlr->reset_task); 427 } 428 429 static int 430 nvme_ctrlr_identify(struct nvme_controller *ctrlr) 431 { 432 struct nvme_completion_poll_status status; 433 434 status.done = 0; 435 nvme_ctrlr_cmd_identify_controller(ctrlr, &ctrlr->cdata, 436 nvme_completion_poll_cb, &status); 437 nvme_completion_poll(&status); 438 if (nvme_completion_is_error(&status.cpl)) { 439 nvme_printf(ctrlr, "nvme_identify_controller failed!\n"); 440 return (ENXIO); 441 } 442 443 /* Convert data to host endian */ 444 nvme_controller_data_swapbytes(&ctrlr->cdata); 445 446 /* 447 * Use MDTS to ensure our default max_xfer_size doesn't exceed what the 448 * controller supports. 449 */ 450 if (ctrlr->cdata.mdts > 0) 451 ctrlr->max_xfer_size = min(ctrlr->max_xfer_size, 452 1 << (ctrlr->cdata.mdts + NVME_MPS_SHIFT + 453 NVME_CAP_HI_MPSMIN(ctrlr->cap_hi))); 454 455 return (0); 456 } 457 458 static int 459 nvme_ctrlr_set_num_qpairs(struct nvme_controller *ctrlr) 460 { 461 struct nvme_completion_poll_status status; 462 int cq_allocated, sq_allocated; 463 464 status.done = 0; 465 nvme_ctrlr_cmd_set_num_queues(ctrlr, ctrlr->num_io_queues, 466 nvme_completion_poll_cb, &status); 467 nvme_completion_poll(&status); 468 if (nvme_completion_is_error(&status.cpl)) { 469 nvme_printf(ctrlr, "nvme_ctrlr_set_num_qpairs failed!\n"); 470 return (ENXIO); 471 } 472 473 /* 474 * Data in cdw0 is 0-based. 475 * Lower 16-bits indicate number of submission queues allocated. 476 * Upper 16-bits indicate number of completion queues allocated. 477 */ 478 sq_allocated = (status.cpl.cdw0 & 0xFFFF) + 1; 479 cq_allocated = (status.cpl.cdw0 >> 16) + 1; 480 481 /* 482 * Controller may allocate more queues than we requested, 483 * so use the minimum of the number requested and what was 484 * actually allocated. 485 */ 486 ctrlr->num_io_queues = min(ctrlr->num_io_queues, sq_allocated); 487 ctrlr->num_io_queues = min(ctrlr->num_io_queues, cq_allocated); 488 if (ctrlr->num_io_queues > vm_ndomains) 489 ctrlr->num_io_queues -= ctrlr->num_io_queues % vm_ndomains; 490 491 return (0); 492 } 493 494 static int 495 nvme_ctrlr_create_qpairs(struct nvme_controller *ctrlr) 496 { 497 struct nvme_completion_poll_status status; 498 struct nvme_qpair *qpair; 499 int i; 500 501 for (i = 0; i < ctrlr->num_io_queues; i++) { 502 qpair = &ctrlr->ioq[i]; 503 504 status.done = 0; 505 nvme_ctrlr_cmd_create_io_cq(ctrlr, qpair, 506 nvme_completion_poll_cb, &status); 507 nvme_completion_poll(&status); 508 if (nvme_completion_is_error(&status.cpl)) { 509 nvme_printf(ctrlr, "nvme_create_io_cq failed!\n"); 510 return (ENXIO); 511 } 512 513 status.done = 0; 514 nvme_ctrlr_cmd_create_io_sq(ctrlr, qpair, 515 nvme_completion_poll_cb, &status); 516 nvme_completion_poll(&status); 517 if (nvme_completion_is_error(&status.cpl)) { 518 nvme_printf(ctrlr, "nvme_create_io_sq failed!\n"); 519 return (ENXIO); 520 } 521 } 522 523 return (0); 524 } 525 526 static int 527 nvme_ctrlr_delete_qpairs(struct nvme_controller *ctrlr) 528 { 529 struct nvme_completion_poll_status status; 530 struct nvme_qpair *qpair; 531 532 for (int i = 0; i < ctrlr->num_io_queues; i++) { 533 qpair = &ctrlr->ioq[i]; 534 535 status.done = 0; 536 nvme_ctrlr_cmd_delete_io_sq(ctrlr, qpair, 537 nvme_completion_poll_cb, &status); 538 nvme_completion_poll(&status); 539 if (nvme_completion_is_error(&status.cpl)) { 540 nvme_printf(ctrlr, "nvme_destroy_io_sq failed!\n"); 541 return (ENXIO); 542 } 543 544 status.done = 0; 545 nvme_ctrlr_cmd_delete_io_cq(ctrlr, qpair, 546 nvme_completion_poll_cb, &status); 547 nvme_completion_poll(&status); 548 if (nvme_completion_is_error(&status.cpl)) { 549 nvme_printf(ctrlr, "nvme_destroy_io_cq failed!\n"); 550 return (ENXIO); 551 } 552 } 553 554 return (0); 555 } 556 557 static int 558 nvme_ctrlr_construct_namespaces(struct nvme_controller *ctrlr) 559 { 560 struct nvme_namespace *ns; 561 uint32_t i; 562 563 for (i = 0; i < min(ctrlr->cdata.nn, NVME_MAX_NAMESPACES); i++) { 564 ns = &ctrlr->ns[i]; 565 nvme_ns_construct(ns, i+1, ctrlr); 566 } 567 568 return (0); 569 } 570 571 static bool 572 is_log_page_id_valid(uint8_t page_id) 573 { 574 575 switch (page_id) { 576 case NVME_LOG_ERROR: 577 case NVME_LOG_HEALTH_INFORMATION: 578 case NVME_LOG_FIRMWARE_SLOT: 579 case NVME_LOG_CHANGED_NAMESPACE: 580 case NVME_LOG_COMMAND_EFFECT: 581 case NVME_LOG_RES_NOTIFICATION: 582 case NVME_LOG_SANITIZE_STATUS: 583 return (true); 584 } 585 586 return (false); 587 } 588 589 static uint32_t 590 nvme_ctrlr_get_log_page_size(struct nvme_controller *ctrlr, uint8_t page_id) 591 { 592 uint32_t log_page_size; 593 594 switch (page_id) { 595 case NVME_LOG_ERROR: 596 log_page_size = min( 597 sizeof(struct nvme_error_information_entry) * 598 (ctrlr->cdata.elpe + 1), NVME_MAX_AER_LOG_SIZE); 599 break; 600 case NVME_LOG_HEALTH_INFORMATION: 601 log_page_size = sizeof(struct nvme_health_information_page); 602 break; 603 case NVME_LOG_FIRMWARE_SLOT: 604 log_page_size = sizeof(struct nvme_firmware_page); 605 break; 606 case NVME_LOG_CHANGED_NAMESPACE: 607 log_page_size = sizeof(struct nvme_ns_list); 608 break; 609 case NVME_LOG_COMMAND_EFFECT: 610 log_page_size = sizeof(struct nvme_command_effects_page); 611 break; 612 case NVME_LOG_RES_NOTIFICATION: 613 log_page_size = sizeof(struct nvme_res_notification_page); 614 break; 615 case NVME_LOG_SANITIZE_STATUS: 616 log_page_size = sizeof(struct nvme_sanitize_status_page); 617 break; 618 default: 619 log_page_size = 0; 620 break; 621 } 622 623 return (log_page_size); 624 } 625 626 static void 627 nvme_ctrlr_log_critical_warnings(struct nvme_controller *ctrlr, 628 uint8_t state) 629 { 630 631 if (state & NVME_CRIT_WARN_ST_AVAILABLE_SPARE) 632 nvme_ctrlr_devctl_log(ctrlr, "critical", 633 "available spare space below threshold"); 634 635 if (state & NVME_CRIT_WARN_ST_TEMPERATURE) 636 nvme_ctrlr_devctl_log(ctrlr, "critical", 637 "temperature above threshold"); 638 639 if (state & NVME_CRIT_WARN_ST_DEVICE_RELIABILITY) 640 nvme_ctrlr_devctl_log(ctrlr, "critical", 641 "device reliability degraded"); 642 643 if (state & NVME_CRIT_WARN_ST_READ_ONLY) 644 nvme_ctrlr_devctl_log(ctrlr, "critical", 645 "media placed in read only mode"); 646 647 if (state & NVME_CRIT_WARN_ST_VOLATILE_MEMORY_BACKUP) 648 nvme_ctrlr_devctl_log(ctrlr, "critical", 649 "volatile memory backup device failed"); 650 651 if (state & NVME_CRIT_WARN_ST_RESERVED_MASK) 652 nvme_ctrlr_devctl_log(ctrlr, "critical", 653 "unknown critical warning(s): state = 0x%02x", state); 654 } 655 656 static void 657 nvme_ctrlr_async_event_log_page_cb(void *arg, const struct nvme_completion *cpl) 658 { 659 struct nvme_async_event_request *aer = arg; 660 struct nvme_health_information_page *health_info; 661 struct nvme_ns_list *nsl; 662 struct nvme_error_information_entry *err; 663 int i; 664 665 /* 666 * If the log page fetch for some reason completed with an error, 667 * don't pass log page data to the consumers. In practice, this case 668 * should never happen. 669 */ 670 if (nvme_completion_is_error(cpl)) 671 nvme_notify_async_consumers(aer->ctrlr, &aer->cpl, 672 aer->log_page_id, NULL, 0); 673 else { 674 /* Convert data to host endian */ 675 switch (aer->log_page_id) { 676 case NVME_LOG_ERROR: 677 err = (struct nvme_error_information_entry *)aer->log_page_buffer; 678 for (i = 0; i < (aer->ctrlr->cdata.elpe + 1); i++) 679 nvme_error_information_entry_swapbytes(err++); 680 break; 681 case NVME_LOG_HEALTH_INFORMATION: 682 nvme_health_information_page_swapbytes( 683 (struct nvme_health_information_page *)aer->log_page_buffer); 684 break; 685 case NVME_LOG_FIRMWARE_SLOT: 686 nvme_firmware_page_swapbytes( 687 (struct nvme_firmware_page *)aer->log_page_buffer); 688 break; 689 case NVME_LOG_CHANGED_NAMESPACE: 690 nvme_ns_list_swapbytes( 691 (struct nvme_ns_list *)aer->log_page_buffer); 692 break; 693 case NVME_LOG_COMMAND_EFFECT: 694 nvme_command_effects_page_swapbytes( 695 (struct nvme_command_effects_page *)aer->log_page_buffer); 696 break; 697 case NVME_LOG_RES_NOTIFICATION: 698 nvme_res_notification_page_swapbytes( 699 (struct nvme_res_notification_page *)aer->log_page_buffer); 700 break; 701 case NVME_LOG_SANITIZE_STATUS: 702 nvme_sanitize_status_page_swapbytes( 703 (struct nvme_sanitize_status_page *)aer->log_page_buffer); 704 break; 705 case INTEL_LOG_TEMP_STATS: 706 intel_log_temp_stats_swapbytes( 707 (struct intel_log_temp_stats *)aer->log_page_buffer); 708 break; 709 default: 710 break; 711 } 712 713 if (aer->log_page_id == NVME_LOG_HEALTH_INFORMATION) { 714 health_info = (struct nvme_health_information_page *) 715 aer->log_page_buffer; 716 nvme_ctrlr_log_critical_warnings(aer->ctrlr, 717 health_info->critical_warning); 718 /* 719 * Critical warnings reported through the 720 * SMART/health log page are persistent, so 721 * clear the associated bits in the async event 722 * config so that we do not receive repeated 723 * notifications for the same event. 724 */ 725 aer->ctrlr->async_event_config &= 726 ~health_info->critical_warning; 727 nvme_ctrlr_cmd_set_async_event_config(aer->ctrlr, 728 aer->ctrlr->async_event_config, NULL, NULL); 729 } else if (aer->log_page_id == NVME_LOG_CHANGED_NAMESPACE && 730 !nvme_use_nvd) { 731 nsl = (struct nvme_ns_list *)aer->log_page_buffer; 732 for (i = 0; i < nitems(nsl->ns) && nsl->ns[i] != 0; i++) { 733 if (nsl->ns[i] > NVME_MAX_NAMESPACES) 734 break; 735 nvme_notify_ns(aer->ctrlr, nsl->ns[i]); 736 } 737 } 738 739 /* 740 * Pass the cpl data from the original async event completion, 741 * not the log page fetch. 742 */ 743 nvme_notify_async_consumers(aer->ctrlr, &aer->cpl, 744 aer->log_page_id, aer->log_page_buffer, aer->log_page_size); 745 } 746 747 /* 748 * Repost another asynchronous event request to replace the one 749 * that just completed. 750 */ 751 nvme_ctrlr_construct_and_submit_aer(aer->ctrlr, aer); 752 } 753 754 static void 755 nvme_ctrlr_async_event_cb(void *arg, const struct nvme_completion *cpl) 756 { 757 struct nvme_async_event_request *aer = arg; 758 759 if (nvme_completion_is_error(cpl)) { 760 /* 761 * Do not retry failed async event requests. This avoids 762 * infinite loops where a new async event request is submitted 763 * to replace the one just failed, only to fail again and 764 * perpetuate the loop. 765 */ 766 return; 767 } 768 769 /* Associated log page is in bits 23:16 of completion entry dw0. */ 770 aer->log_page_id = (cpl->cdw0 & 0xFF0000) >> 16; 771 772 nvme_printf(aer->ctrlr, "async event occurred (type 0x%x, info 0x%02x," 773 " page 0x%02x)\n", (cpl->cdw0 & 0x07), (cpl->cdw0 & 0xFF00) >> 8, 774 aer->log_page_id); 775 776 if (is_log_page_id_valid(aer->log_page_id)) { 777 aer->log_page_size = nvme_ctrlr_get_log_page_size(aer->ctrlr, 778 aer->log_page_id); 779 memcpy(&aer->cpl, cpl, sizeof(*cpl)); 780 nvme_ctrlr_cmd_get_log_page(aer->ctrlr, aer->log_page_id, 781 NVME_GLOBAL_NAMESPACE_TAG, aer->log_page_buffer, 782 aer->log_page_size, nvme_ctrlr_async_event_log_page_cb, 783 aer); 784 /* Wait to notify consumers until after log page is fetched. */ 785 } else { 786 nvme_notify_async_consumers(aer->ctrlr, cpl, aer->log_page_id, 787 NULL, 0); 788 789 /* 790 * Repost another asynchronous event request to replace the one 791 * that just completed. 792 */ 793 nvme_ctrlr_construct_and_submit_aer(aer->ctrlr, aer); 794 } 795 } 796 797 static void 798 nvme_ctrlr_construct_and_submit_aer(struct nvme_controller *ctrlr, 799 struct nvme_async_event_request *aer) 800 { 801 struct nvme_request *req; 802 803 aer->ctrlr = ctrlr; 804 req = nvme_allocate_request_null(nvme_ctrlr_async_event_cb, aer); 805 aer->req = req; 806 807 /* 808 * Disable timeout here, since asynchronous event requests should by 809 * nature never be timed out. 810 */ 811 req->timeout = false; 812 req->cmd.opc = NVME_OPC_ASYNC_EVENT_REQUEST; 813 nvme_ctrlr_submit_admin_request(ctrlr, req); 814 } 815 816 static void 817 nvme_ctrlr_configure_aer(struct nvme_controller *ctrlr) 818 { 819 struct nvme_completion_poll_status status; 820 struct nvme_async_event_request *aer; 821 uint32_t i; 822 823 ctrlr->async_event_config = NVME_CRIT_WARN_ST_AVAILABLE_SPARE | 824 NVME_CRIT_WARN_ST_DEVICE_RELIABILITY | 825 NVME_CRIT_WARN_ST_READ_ONLY | 826 NVME_CRIT_WARN_ST_VOLATILE_MEMORY_BACKUP; 827 if (ctrlr->cdata.ver >= NVME_REV(1, 2)) 828 ctrlr->async_event_config |= 829 ctrlr->cdata.oaes & (NVME_ASYNC_EVENT_NS_ATTRIBUTE | 830 NVME_ASYNC_EVENT_FW_ACTIVATE); 831 832 status.done = 0; 833 nvme_ctrlr_cmd_get_feature(ctrlr, NVME_FEAT_TEMPERATURE_THRESHOLD, 834 0, NULL, 0, nvme_completion_poll_cb, &status); 835 nvme_completion_poll(&status); 836 if (nvme_completion_is_error(&status.cpl) || 837 (status.cpl.cdw0 & 0xFFFF) == 0xFFFF || 838 (status.cpl.cdw0 & 0xFFFF) == 0x0000) { 839 nvme_printf(ctrlr, "temperature threshold not supported\n"); 840 } else 841 ctrlr->async_event_config |= NVME_CRIT_WARN_ST_TEMPERATURE; 842 843 nvme_ctrlr_cmd_set_async_event_config(ctrlr, 844 ctrlr->async_event_config, NULL, NULL); 845 846 /* aerl is a zero-based value, so we need to add 1 here. */ 847 ctrlr->num_aers = min(NVME_MAX_ASYNC_EVENTS, (ctrlr->cdata.aerl+1)); 848 849 for (i = 0; i < ctrlr->num_aers; i++) { 850 aer = &ctrlr->aer[i]; 851 nvme_ctrlr_construct_and_submit_aer(ctrlr, aer); 852 } 853 } 854 855 static void 856 nvme_ctrlr_configure_int_coalescing(struct nvme_controller *ctrlr) 857 { 858 859 ctrlr->int_coal_time = 0; 860 TUNABLE_INT_FETCH("hw.nvme.int_coal_time", 861 &ctrlr->int_coal_time); 862 863 ctrlr->int_coal_threshold = 0; 864 TUNABLE_INT_FETCH("hw.nvme.int_coal_threshold", 865 &ctrlr->int_coal_threshold); 866 867 nvme_ctrlr_cmd_set_interrupt_coalescing(ctrlr, ctrlr->int_coal_time, 868 ctrlr->int_coal_threshold, NULL, NULL); 869 } 870 871 static void 872 nvme_ctrlr_hmb_free(struct nvme_controller *ctrlr) 873 { 874 struct nvme_hmb_chunk *hmbc; 875 int i; 876 877 if (ctrlr->hmb_desc_paddr) { 878 bus_dmamap_unload(ctrlr->hmb_desc_tag, ctrlr->hmb_desc_map); 879 bus_dmamem_free(ctrlr->hmb_desc_tag, ctrlr->hmb_desc_vaddr, 880 ctrlr->hmb_desc_map); 881 ctrlr->hmb_desc_paddr = 0; 882 } 883 if (ctrlr->hmb_desc_tag) { 884 bus_dma_tag_destroy(ctrlr->hmb_desc_tag); 885 ctrlr->hmb_desc_tag = NULL; 886 } 887 for (i = 0; i < ctrlr->hmb_nchunks; i++) { 888 hmbc = &ctrlr->hmb_chunks[i]; 889 bus_dmamap_unload(ctrlr->hmb_tag, hmbc->hmbc_map); 890 bus_dmamem_free(ctrlr->hmb_tag, hmbc->hmbc_vaddr, 891 hmbc->hmbc_map); 892 } 893 ctrlr->hmb_nchunks = 0; 894 if (ctrlr->hmb_tag) { 895 bus_dma_tag_destroy(ctrlr->hmb_tag); 896 ctrlr->hmb_tag = NULL; 897 } 898 if (ctrlr->hmb_chunks) { 899 free(ctrlr->hmb_chunks, M_NVME); 900 ctrlr->hmb_chunks = NULL; 901 } 902 } 903 904 static void 905 nvme_ctrlr_hmb_alloc(struct nvme_controller *ctrlr) 906 { 907 struct nvme_hmb_chunk *hmbc; 908 size_t pref, min, minc, size; 909 int err, i; 910 uint64_t max; 911 912 /* Limit HMB to 5% of RAM size per device by default. */ 913 max = (uint64_t)physmem * PAGE_SIZE / 20; 914 TUNABLE_UINT64_FETCH("hw.nvme.hmb_max", &max); 915 916 /* 917 * Units of Host Memory Buffer in the Identify info are always in terms 918 * of 4k units. 919 */ 920 min = (long long unsigned)ctrlr->cdata.hmmin * NVME_HMB_UNITS; 921 if (max == 0 || max < min) 922 return; 923 pref = MIN((long long unsigned)ctrlr->cdata.hmpre * NVME_HMB_UNITS, max); 924 minc = MAX(ctrlr->cdata.hmminds * NVME_HMB_UNITS, ctrlr->page_size); 925 if (min > 0 && ctrlr->cdata.hmmaxd > 0) 926 minc = MAX(minc, min / ctrlr->cdata.hmmaxd); 927 ctrlr->hmb_chunk = pref; 928 929 again: 930 /* 931 * However, the chunk sizes, number of chunks, and alignment of chunks 932 * are all based on the current MPS (ctrlr->page_size). 933 */ 934 ctrlr->hmb_chunk = roundup2(ctrlr->hmb_chunk, ctrlr->page_size); 935 ctrlr->hmb_nchunks = howmany(pref, ctrlr->hmb_chunk); 936 if (ctrlr->cdata.hmmaxd > 0 && ctrlr->hmb_nchunks > ctrlr->cdata.hmmaxd) 937 ctrlr->hmb_nchunks = ctrlr->cdata.hmmaxd; 938 ctrlr->hmb_chunks = malloc(sizeof(struct nvme_hmb_chunk) * 939 ctrlr->hmb_nchunks, M_NVME, M_WAITOK); 940 err = bus_dma_tag_create(bus_get_dma_tag(ctrlr->dev), 941 ctrlr->page_size, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, 942 ctrlr->hmb_chunk, 1, ctrlr->hmb_chunk, 0, NULL, NULL, &ctrlr->hmb_tag); 943 if (err != 0) { 944 nvme_printf(ctrlr, "HMB tag create failed %d\n", err); 945 nvme_ctrlr_hmb_free(ctrlr); 946 return; 947 } 948 949 for (i = 0; i < ctrlr->hmb_nchunks; i++) { 950 hmbc = &ctrlr->hmb_chunks[i]; 951 if (bus_dmamem_alloc(ctrlr->hmb_tag, 952 (void **)&hmbc->hmbc_vaddr, BUS_DMA_NOWAIT, 953 &hmbc->hmbc_map)) { 954 nvme_printf(ctrlr, "failed to alloc HMB\n"); 955 break; 956 } 957 if (bus_dmamap_load(ctrlr->hmb_tag, hmbc->hmbc_map, 958 hmbc->hmbc_vaddr, ctrlr->hmb_chunk, nvme_single_map, 959 &hmbc->hmbc_paddr, BUS_DMA_NOWAIT) != 0) { 960 bus_dmamem_free(ctrlr->hmb_tag, hmbc->hmbc_vaddr, 961 hmbc->hmbc_map); 962 nvme_printf(ctrlr, "failed to load HMB\n"); 963 break; 964 } 965 bus_dmamap_sync(ctrlr->hmb_tag, hmbc->hmbc_map, 966 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 967 } 968 969 if (i < ctrlr->hmb_nchunks && i * ctrlr->hmb_chunk < min && 970 ctrlr->hmb_chunk / 2 >= minc) { 971 ctrlr->hmb_nchunks = i; 972 nvme_ctrlr_hmb_free(ctrlr); 973 ctrlr->hmb_chunk /= 2; 974 goto again; 975 } 976 ctrlr->hmb_nchunks = i; 977 if (ctrlr->hmb_nchunks * ctrlr->hmb_chunk < min) { 978 nvme_ctrlr_hmb_free(ctrlr); 979 return; 980 } 981 982 size = sizeof(struct nvme_hmb_desc) * ctrlr->hmb_nchunks; 983 err = bus_dma_tag_create(bus_get_dma_tag(ctrlr->dev), 984 16, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, 985 size, 1, size, 0, NULL, NULL, &ctrlr->hmb_desc_tag); 986 if (err != 0) { 987 nvme_printf(ctrlr, "HMB desc tag create failed %d\n", err); 988 nvme_ctrlr_hmb_free(ctrlr); 989 return; 990 } 991 if (bus_dmamem_alloc(ctrlr->hmb_desc_tag, 992 (void **)&ctrlr->hmb_desc_vaddr, BUS_DMA_WAITOK, 993 &ctrlr->hmb_desc_map)) { 994 nvme_printf(ctrlr, "failed to alloc HMB desc\n"); 995 nvme_ctrlr_hmb_free(ctrlr); 996 return; 997 } 998 if (bus_dmamap_load(ctrlr->hmb_desc_tag, ctrlr->hmb_desc_map, 999 ctrlr->hmb_desc_vaddr, size, nvme_single_map, 1000 &ctrlr->hmb_desc_paddr, BUS_DMA_NOWAIT) != 0) { 1001 bus_dmamem_free(ctrlr->hmb_desc_tag, ctrlr->hmb_desc_vaddr, 1002 ctrlr->hmb_desc_map); 1003 nvme_printf(ctrlr, "failed to load HMB desc\n"); 1004 nvme_ctrlr_hmb_free(ctrlr); 1005 return; 1006 } 1007 1008 for (i = 0; i < ctrlr->hmb_nchunks; i++) { 1009 memset(&ctrlr->hmb_desc_vaddr[i], 0, 1010 sizeof(struct nvme_hmb_desc)); 1011 ctrlr->hmb_desc_vaddr[i].addr = 1012 htole64(ctrlr->hmb_chunks[i].hmbc_paddr); 1013 ctrlr->hmb_desc_vaddr[i].size = htole32(ctrlr->hmb_chunk / ctrlr->page_size); 1014 } 1015 bus_dmamap_sync(ctrlr->hmb_desc_tag, ctrlr->hmb_desc_map, 1016 BUS_DMASYNC_PREWRITE); 1017 1018 nvme_printf(ctrlr, "Allocated %lluMB host memory buffer\n", 1019 (long long unsigned)ctrlr->hmb_nchunks * ctrlr->hmb_chunk 1020 / 1024 / 1024); 1021 } 1022 1023 static void 1024 nvme_ctrlr_hmb_enable(struct nvme_controller *ctrlr, bool enable, bool memret) 1025 { 1026 struct nvme_completion_poll_status status; 1027 uint32_t cdw11; 1028 1029 cdw11 = 0; 1030 if (enable) 1031 cdw11 |= 1; 1032 if (memret) 1033 cdw11 |= 2; 1034 status.done = 0; 1035 nvme_ctrlr_cmd_set_feature(ctrlr, NVME_FEAT_HOST_MEMORY_BUFFER, cdw11, 1036 ctrlr->hmb_nchunks * ctrlr->hmb_chunk / ctrlr->page_size, 1037 ctrlr->hmb_desc_paddr, ctrlr->hmb_desc_paddr >> 32, 1038 ctrlr->hmb_nchunks, NULL, 0, 1039 nvme_completion_poll_cb, &status); 1040 nvme_completion_poll(&status); 1041 if (nvme_completion_is_error(&status.cpl)) 1042 nvme_printf(ctrlr, "nvme_ctrlr_hmb_enable failed!\n"); 1043 } 1044 1045 static void 1046 nvme_ctrlr_start(void *ctrlr_arg, bool resetting) 1047 { 1048 struct nvme_controller *ctrlr = ctrlr_arg; 1049 uint32_t old_num_io_queues; 1050 int i; 1051 1052 TSENTER(); 1053 1054 /* 1055 * Only reset adminq here when we are restarting the 1056 * controller after a reset. During initialization, 1057 * we have already submitted admin commands to get 1058 * the number of I/O queues supported, so cannot reset 1059 * the adminq again here. 1060 */ 1061 if (resetting) { 1062 nvme_qpair_reset(&ctrlr->adminq); 1063 nvme_admin_qpair_enable(&ctrlr->adminq); 1064 } 1065 1066 if (ctrlr->ioq != NULL) { 1067 for (i = 0; i < ctrlr->num_io_queues; i++) 1068 nvme_qpair_reset(&ctrlr->ioq[i]); 1069 } 1070 1071 /* 1072 * If it was a reset on initialization command timeout, just 1073 * return here, letting initialization code fail gracefully. 1074 */ 1075 if (resetting && !ctrlr->is_initialized) 1076 return; 1077 1078 if (resetting && nvme_ctrlr_identify(ctrlr) != 0) { 1079 nvme_ctrlr_fail(ctrlr); 1080 return; 1081 } 1082 1083 /* 1084 * The number of qpairs are determined during controller initialization, 1085 * including using NVMe SET_FEATURES/NUMBER_OF_QUEUES to determine the 1086 * HW limit. We call SET_FEATURES again here so that it gets called 1087 * after any reset for controllers that depend on the driver to 1088 * explicit specify how many queues it will use. This value should 1089 * never change between resets, so panic if somehow that does happen. 1090 */ 1091 if (resetting) { 1092 old_num_io_queues = ctrlr->num_io_queues; 1093 if (nvme_ctrlr_set_num_qpairs(ctrlr) != 0) { 1094 nvme_ctrlr_fail(ctrlr); 1095 return; 1096 } 1097 1098 if (old_num_io_queues != ctrlr->num_io_queues) { 1099 panic("num_io_queues changed from %u to %u", 1100 old_num_io_queues, ctrlr->num_io_queues); 1101 } 1102 } 1103 1104 if (ctrlr->cdata.hmpre > 0 && ctrlr->hmb_nchunks == 0) { 1105 nvme_ctrlr_hmb_alloc(ctrlr); 1106 if (ctrlr->hmb_nchunks > 0) 1107 nvme_ctrlr_hmb_enable(ctrlr, true, false); 1108 } else if (ctrlr->hmb_nchunks > 0) 1109 nvme_ctrlr_hmb_enable(ctrlr, true, true); 1110 1111 if (nvme_ctrlr_create_qpairs(ctrlr) != 0) { 1112 nvme_ctrlr_fail(ctrlr); 1113 return; 1114 } 1115 1116 if (nvme_ctrlr_construct_namespaces(ctrlr) != 0) { 1117 nvme_ctrlr_fail(ctrlr); 1118 return; 1119 } 1120 1121 nvme_ctrlr_configure_aer(ctrlr); 1122 nvme_ctrlr_configure_int_coalescing(ctrlr); 1123 1124 for (i = 0; i < ctrlr->num_io_queues; i++) 1125 nvme_io_qpair_enable(&ctrlr->ioq[i]); 1126 TSEXIT(); 1127 } 1128 1129 void 1130 nvme_ctrlr_start_config_hook(void *arg) 1131 { 1132 struct nvme_controller *ctrlr = arg; 1133 1134 TSENTER(); 1135 1136 if (nvme_ctrlr_hw_reset(ctrlr) != 0) { 1137 fail: 1138 nvme_ctrlr_fail(ctrlr); 1139 config_intrhook_disestablish(&ctrlr->config_hook); 1140 return; 1141 } 1142 1143 nvme_qpair_reset(&ctrlr->adminq); 1144 nvme_admin_qpair_enable(&ctrlr->adminq); 1145 1146 if (nvme_ctrlr_identify(ctrlr) == 0 && 1147 nvme_ctrlr_set_num_qpairs(ctrlr) == 0 && 1148 nvme_ctrlr_construct_io_qpairs(ctrlr) == 0) 1149 nvme_ctrlr_start(ctrlr, false); 1150 else 1151 goto fail; 1152 1153 nvme_sysctl_initialize_ctrlr(ctrlr); 1154 config_intrhook_disestablish(&ctrlr->config_hook); 1155 1156 ctrlr->is_initialized = 1; 1157 nvme_notify_new_controller(ctrlr); 1158 TSEXIT(); 1159 } 1160 1161 static void 1162 nvme_ctrlr_reset_task(void *arg, int pending) 1163 { 1164 struct nvme_controller *ctrlr = arg; 1165 int status; 1166 1167 nvme_ctrlr_devctl_log(ctrlr, "RESET", "resetting controller"); 1168 status = nvme_ctrlr_hw_reset(ctrlr); 1169 if (status == 0) 1170 nvme_ctrlr_start(ctrlr, true); 1171 else 1172 nvme_ctrlr_fail(ctrlr); 1173 1174 atomic_cmpset_32(&ctrlr->is_resetting, 1, 0); 1175 } 1176 1177 /* 1178 * Poll all the queues enabled on the device for completion. 1179 */ 1180 void 1181 nvme_ctrlr_poll(struct nvme_controller *ctrlr) 1182 { 1183 int i; 1184 1185 nvme_qpair_process_completions(&ctrlr->adminq); 1186 1187 for (i = 0; i < ctrlr->num_io_queues; i++) 1188 if (ctrlr->ioq && ctrlr->ioq[i].cpl) 1189 nvme_qpair_process_completions(&ctrlr->ioq[i]); 1190 } 1191 1192 /* 1193 * Poll the single-vector interrupt case: num_io_queues will be 1 and 1194 * there's only a single vector. While we're polling, we mask further 1195 * interrupts in the controller. 1196 */ 1197 void 1198 nvme_ctrlr_shared_handler(void *arg) 1199 { 1200 struct nvme_controller *ctrlr = arg; 1201 1202 nvme_mmio_write_4(ctrlr, intms, 1); 1203 nvme_ctrlr_poll(ctrlr); 1204 nvme_mmio_write_4(ctrlr, intmc, 1); 1205 } 1206 1207 static void 1208 nvme_pt_done(void *arg, const struct nvme_completion *cpl) 1209 { 1210 struct nvme_pt_command *pt = arg; 1211 struct mtx *mtx = pt->driver_lock; 1212 uint16_t status; 1213 1214 bzero(&pt->cpl, sizeof(pt->cpl)); 1215 pt->cpl.cdw0 = cpl->cdw0; 1216 1217 status = cpl->status; 1218 status &= ~NVMEM(NVME_STATUS_P); 1219 pt->cpl.status = status; 1220 1221 mtx_lock(mtx); 1222 pt->driver_lock = NULL; 1223 wakeup(pt); 1224 mtx_unlock(mtx); 1225 } 1226 1227 int 1228 nvme_ctrlr_passthrough_cmd(struct nvme_controller *ctrlr, 1229 struct nvme_pt_command *pt, uint32_t nsid, int is_user_buffer, 1230 int is_admin_cmd) 1231 { 1232 struct nvme_request *req; 1233 struct mtx *mtx; 1234 struct buf *buf = NULL; 1235 int ret = 0; 1236 1237 if (pt->len > 0) { 1238 if (pt->len > ctrlr->max_xfer_size) { 1239 nvme_printf(ctrlr, "pt->len (%d) " 1240 "exceeds max_xfer_size (%d)\n", pt->len, 1241 ctrlr->max_xfer_size); 1242 return EIO; 1243 } 1244 if (is_user_buffer) { 1245 /* 1246 * Ensure the user buffer is wired for the duration of 1247 * this pass-through command. 1248 */ 1249 PHOLD(curproc); 1250 buf = uma_zalloc(pbuf_zone, M_WAITOK); 1251 buf->b_iocmd = pt->is_read ? BIO_READ : BIO_WRITE; 1252 if (vmapbuf(buf, pt->buf, pt->len, 1) < 0) { 1253 ret = EFAULT; 1254 goto err; 1255 } 1256 req = nvme_allocate_request_vaddr(buf->b_data, pt->len, 1257 nvme_pt_done, pt); 1258 } else 1259 req = nvme_allocate_request_vaddr(pt->buf, pt->len, 1260 nvme_pt_done, pt); 1261 } else 1262 req = nvme_allocate_request_null(nvme_pt_done, pt); 1263 1264 /* Assume user space already converted to little-endian */ 1265 req->cmd.opc = pt->cmd.opc; 1266 req->cmd.fuse = pt->cmd.fuse; 1267 req->cmd.rsvd2 = pt->cmd.rsvd2; 1268 req->cmd.rsvd3 = pt->cmd.rsvd3; 1269 req->cmd.cdw10 = pt->cmd.cdw10; 1270 req->cmd.cdw11 = pt->cmd.cdw11; 1271 req->cmd.cdw12 = pt->cmd.cdw12; 1272 req->cmd.cdw13 = pt->cmd.cdw13; 1273 req->cmd.cdw14 = pt->cmd.cdw14; 1274 req->cmd.cdw15 = pt->cmd.cdw15; 1275 1276 req->cmd.nsid = htole32(nsid); 1277 1278 mtx = mtx_pool_find(mtxpool_sleep, pt); 1279 pt->driver_lock = mtx; 1280 1281 if (is_admin_cmd) 1282 nvme_ctrlr_submit_admin_request(ctrlr, req); 1283 else 1284 nvme_ctrlr_submit_io_request(ctrlr, req); 1285 1286 mtx_lock(mtx); 1287 while (pt->driver_lock != NULL) 1288 mtx_sleep(pt, mtx, PRIBIO, "nvme_pt", 0); 1289 mtx_unlock(mtx); 1290 1291 if (buf != NULL) { 1292 vunmapbuf(buf); 1293 err: 1294 uma_zfree(pbuf_zone, buf); 1295 PRELE(curproc); 1296 } 1297 1298 return (ret); 1299 } 1300 1301 static int 1302 nvme_ctrlr_ioctl(struct cdev *cdev, u_long cmd, caddr_t arg, int flag, 1303 struct thread *td) 1304 { 1305 struct nvme_controller *ctrlr; 1306 struct nvme_pt_command *pt; 1307 1308 ctrlr = cdev->si_drv1; 1309 1310 switch (cmd) { 1311 case NVME_RESET_CONTROLLER: 1312 nvme_ctrlr_reset(ctrlr); 1313 break; 1314 case NVME_PASSTHROUGH_CMD: 1315 pt = (struct nvme_pt_command *)arg; 1316 return (nvme_ctrlr_passthrough_cmd(ctrlr, pt, le32toh(pt->cmd.nsid), 1317 1 /* is_user_buffer */, 1 /* is_admin_cmd */)); 1318 case NVME_GET_NSID: 1319 { 1320 struct nvme_get_nsid *gnsid = (struct nvme_get_nsid *)arg; 1321 strncpy(gnsid->cdev, device_get_nameunit(ctrlr->dev), 1322 sizeof(gnsid->cdev)); 1323 gnsid->cdev[sizeof(gnsid->cdev) - 1] = '\0'; 1324 gnsid->nsid = 0; 1325 break; 1326 } 1327 case NVME_GET_MAX_XFER_SIZE: 1328 *(uint64_t *)arg = ctrlr->max_xfer_size; 1329 break; 1330 default: 1331 return (ENOTTY); 1332 } 1333 1334 return (0); 1335 } 1336 1337 static struct cdevsw nvme_ctrlr_cdevsw = { 1338 .d_version = D_VERSION, 1339 .d_flags = 0, 1340 .d_ioctl = nvme_ctrlr_ioctl 1341 }; 1342 1343 int 1344 nvme_ctrlr_construct(struct nvme_controller *ctrlr, device_t dev) 1345 { 1346 struct make_dev_args md_args; 1347 uint32_t cap_lo; 1348 uint32_t cap_hi; 1349 uint32_t to, vs, pmrcap; 1350 int status, timeout_period; 1351 1352 ctrlr->dev = dev; 1353 1354 mtx_init(&ctrlr->lock, "nvme ctrlr lock", NULL, MTX_DEF); 1355 if (bus_get_domain(dev, &ctrlr->domain) != 0) 1356 ctrlr->domain = 0; 1357 1358 ctrlr->cap_lo = cap_lo = nvme_mmio_read_4(ctrlr, cap_lo); 1359 if (bootverbose) { 1360 device_printf(dev, "CapLo: 0x%08x: MQES %u%s%s%s%s, TO %u\n", 1361 cap_lo, NVME_CAP_LO_MQES(cap_lo), 1362 NVME_CAP_LO_CQR(cap_lo) ? ", CQR" : "", 1363 NVME_CAP_LO_AMS(cap_lo) ? ", AMS" : "", 1364 (NVME_CAP_LO_AMS(cap_lo) & 0x1) ? " WRRwUPC" : "", 1365 (NVME_CAP_LO_AMS(cap_lo) & 0x2) ? " VS" : "", 1366 NVME_CAP_LO_TO(cap_lo)); 1367 } 1368 ctrlr->cap_hi = cap_hi = nvme_mmio_read_4(ctrlr, cap_hi); 1369 if (bootverbose) { 1370 device_printf(dev, "CapHi: 0x%08x: DSTRD %u%s, CSS %x%s, " 1371 "CPS %x, MPSMIN %u, MPSMAX %u%s%s%s%s%s\n", cap_hi, 1372 NVME_CAP_HI_DSTRD(cap_hi), 1373 NVME_CAP_HI_NSSRS(cap_hi) ? ", NSSRS" : "", 1374 NVME_CAP_HI_CSS(cap_hi), 1375 NVME_CAP_HI_BPS(cap_hi) ? ", BPS" : "", 1376 NVME_CAP_HI_CPS(cap_hi), 1377 NVME_CAP_HI_MPSMIN(cap_hi), 1378 NVME_CAP_HI_MPSMAX(cap_hi), 1379 NVME_CAP_HI_PMRS(cap_hi) ? ", PMRS" : "", 1380 NVME_CAP_HI_CMBS(cap_hi) ? ", CMBS" : "", 1381 NVME_CAP_HI_NSSS(cap_hi) ? ", NSSS" : "", 1382 NVME_CAP_HI_CRWMS(cap_hi) ? ", CRWMS" : "", 1383 NVME_CAP_HI_CRIMS(cap_hi) ? ", CRIMS" : ""); 1384 } 1385 if (bootverbose) { 1386 vs = nvme_mmio_read_4(ctrlr, vs); 1387 device_printf(dev, "Version: 0x%08x: %d.%d\n", vs, 1388 NVME_MAJOR(vs), NVME_MINOR(vs)); 1389 } 1390 if (bootverbose && NVME_CAP_HI_PMRS(cap_hi)) { 1391 pmrcap = nvme_mmio_read_4(ctrlr, pmrcap); 1392 device_printf(dev, "PMRCap: 0x%08x: BIR %u%s%s, PMRTU %u, " 1393 "PMRWBM %x, PMRTO %u%s\n", pmrcap, 1394 NVME_PMRCAP_BIR(pmrcap), 1395 NVME_PMRCAP_RDS(pmrcap) ? ", RDS" : "", 1396 NVME_PMRCAP_WDS(pmrcap) ? ", WDS" : "", 1397 NVME_PMRCAP_PMRTU(pmrcap), 1398 NVME_PMRCAP_PMRWBM(pmrcap), 1399 NVME_PMRCAP_PMRTO(pmrcap), 1400 NVME_PMRCAP_CMSS(pmrcap) ? ", CMSS" : ""); 1401 } 1402 1403 ctrlr->dstrd = NVME_CAP_HI_DSTRD(cap_hi) + 2; 1404 1405 ctrlr->mps = NVME_CAP_HI_MPSMIN(cap_hi); 1406 ctrlr->page_size = 1 << (NVME_MPS_SHIFT + ctrlr->mps); 1407 1408 /* Get ready timeout value from controller, in units of 500ms. */ 1409 to = NVME_CAP_LO_TO(cap_lo) + 1; 1410 ctrlr->ready_timeout_in_ms = to * 500; 1411 1412 timeout_period = NVME_ADMIN_TIMEOUT_PERIOD; 1413 TUNABLE_INT_FETCH("hw.nvme.admin_timeout_period", &timeout_period); 1414 timeout_period = min(timeout_period, NVME_MAX_TIMEOUT_PERIOD); 1415 timeout_period = max(timeout_period, NVME_MIN_TIMEOUT_PERIOD); 1416 ctrlr->admin_timeout_period = timeout_period; 1417 1418 timeout_period = NVME_DEFAULT_TIMEOUT_PERIOD; 1419 TUNABLE_INT_FETCH("hw.nvme.timeout_period", &timeout_period); 1420 timeout_period = min(timeout_period, NVME_MAX_TIMEOUT_PERIOD); 1421 timeout_period = max(timeout_period, NVME_MIN_TIMEOUT_PERIOD); 1422 ctrlr->timeout_period = timeout_period; 1423 1424 nvme_retry_count = NVME_DEFAULT_RETRY_COUNT; 1425 TUNABLE_INT_FETCH("hw.nvme.retry_count", &nvme_retry_count); 1426 1427 ctrlr->enable_aborts = 0; 1428 TUNABLE_INT_FETCH("hw.nvme.enable_aborts", &ctrlr->enable_aborts); 1429 1430 /* Cap transfers by the maximum addressable by page-sized PRP (4KB pages -> 2MB). */ 1431 ctrlr->max_xfer_size = MIN(maxphys, (ctrlr->page_size / 8 * ctrlr->page_size)); 1432 if (nvme_ctrlr_construct_admin_qpair(ctrlr) != 0) 1433 return (ENXIO); 1434 1435 /* 1436 * Create 2 threads for the taskqueue. The reset thread will block when 1437 * it detects that the controller has failed until all I/O has been 1438 * failed up the stack. The fail_req task needs to be able to run in 1439 * this case to finish the request failure for some cases. 1440 * 1441 * We could partially solve this race by draining the failed requeust 1442 * queue before proceding to free the sim, though nothing would stop 1443 * new I/O from coming in after we do that drain, but before we reach 1444 * cam_sim_free, so this big hammer is used instead. 1445 */ 1446 ctrlr->taskqueue = taskqueue_create("nvme_taskq", M_WAITOK, 1447 taskqueue_thread_enqueue, &ctrlr->taskqueue); 1448 taskqueue_start_threads(&ctrlr->taskqueue, 2, PI_DISK, "nvme taskq"); 1449 1450 ctrlr->is_resetting = 0; 1451 ctrlr->is_initialized = 0; 1452 ctrlr->notification_sent = 0; 1453 TASK_INIT(&ctrlr->reset_task, 0, nvme_ctrlr_reset_task, ctrlr); 1454 STAILQ_INIT(&ctrlr->fail_req); 1455 ctrlr->is_failed = false; 1456 1457 make_dev_args_init(&md_args); 1458 md_args.mda_devsw = &nvme_ctrlr_cdevsw; 1459 md_args.mda_uid = UID_ROOT; 1460 md_args.mda_gid = GID_WHEEL; 1461 md_args.mda_mode = 0600; 1462 md_args.mda_unit = device_get_unit(dev); 1463 md_args.mda_si_drv1 = (void *)ctrlr; 1464 status = make_dev_s(&md_args, &ctrlr->cdev, "nvme%d", 1465 device_get_unit(dev)); 1466 if (status != 0) 1467 return (ENXIO); 1468 1469 return (0); 1470 } 1471 1472 void 1473 nvme_ctrlr_destruct(struct nvme_controller *ctrlr, device_t dev) 1474 { 1475 int gone, i; 1476 1477 ctrlr->is_dying = true; 1478 1479 if (ctrlr->resource == NULL) 1480 goto nores; 1481 if (!mtx_initialized(&ctrlr->adminq.lock)) 1482 goto noadminq; 1483 1484 /* 1485 * Check whether it is a hot unplug or a clean driver detach. 1486 * If device is not there any more, skip any shutdown commands. 1487 */ 1488 gone = (nvme_mmio_read_4(ctrlr, csts) == NVME_GONE); 1489 if (gone) 1490 nvme_ctrlr_fail(ctrlr); 1491 else 1492 nvme_notify_fail_consumers(ctrlr); 1493 1494 for (i = 0; i < NVME_MAX_NAMESPACES; i++) 1495 nvme_ns_destruct(&ctrlr->ns[i]); 1496 1497 if (ctrlr->cdev) 1498 destroy_dev(ctrlr->cdev); 1499 1500 if (ctrlr->is_initialized) { 1501 if (!gone) { 1502 if (ctrlr->hmb_nchunks > 0) 1503 nvme_ctrlr_hmb_enable(ctrlr, false, false); 1504 nvme_ctrlr_delete_qpairs(ctrlr); 1505 } 1506 nvme_ctrlr_hmb_free(ctrlr); 1507 } 1508 if (ctrlr->ioq != NULL) { 1509 for (i = 0; i < ctrlr->num_io_queues; i++) 1510 nvme_io_qpair_destroy(&ctrlr->ioq[i]); 1511 free(ctrlr->ioq, M_NVME); 1512 } 1513 nvme_admin_qpair_destroy(&ctrlr->adminq); 1514 1515 /* 1516 * Notify the controller of a shutdown, even though this is due to 1517 * a driver unload, not a system shutdown (this path is not invoked 1518 * during shutdown). This ensures the controller receives a 1519 * shutdown notification in case the system is shutdown before 1520 * reloading the driver. 1521 */ 1522 if (!gone) 1523 nvme_ctrlr_shutdown(ctrlr); 1524 1525 if (!gone) 1526 nvme_ctrlr_disable(ctrlr); 1527 1528 noadminq: 1529 if (ctrlr->taskqueue) 1530 taskqueue_free(ctrlr->taskqueue); 1531 1532 if (ctrlr->tag) 1533 bus_teardown_intr(ctrlr->dev, ctrlr->res, ctrlr->tag); 1534 1535 if (ctrlr->res) 1536 bus_release_resource(ctrlr->dev, SYS_RES_IRQ, 1537 rman_get_rid(ctrlr->res), ctrlr->res); 1538 1539 if (ctrlr->bar4_resource != NULL) { 1540 bus_release_resource(dev, SYS_RES_MEMORY, 1541 ctrlr->bar4_resource_id, ctrlr->bar4_resource); 1542 } 1543 1544 bus_release_resource(dev, SYS_RES_MEMORY, 1545 ctrlr->resource_id, ctrlr->resource); 1546 1547 nores: 1548 mtx_destroy(&ctrlr->lock); 1549 } 1550 1551 void 1552 nvme_ctrlr_shutdown(struct nvme_controller *ctrlr) 1553 { 1554 uint32_t cc; 1555 uint32_t csts; 1556 int timeout; 1557 1558 cc = nvme_mmio_read_4(ctrlr, cc); 1559 cc &= ~NVMEM(NVME_CC_REG_SHN); 1560 cc |= NVMEF(NVME_CC_REG_SHN, NVME_SHN_NORMAL); 1561 nvme_mmio_write_4(ctrlr, cc, cc); 1562 1563 timeout = ticks + (ctrlr->cdata.rtd3e == 0 ? 5 * hz : 1564 ((uint64_t)ctrlr->cdata.rtd3e * hz + 999999) / 1000000); 1565 while (1) { 1566 csts = nvme_mmio_read_4(ctrlr, csts); 1567 if (csts == NVME_GONE) /* Hot unplug. */ 1568 break; 1569 if (NVME_CSTS_GET_SHST(csts) == NVME_SHST_COMPLETE) 1570 break; 1571 if (timeout - ticks < 0) { 1572 nvme_printf(ctrlr, "shutdown timeout\n"); 1573 break; 1574 } 1575 pause("nvmeshut", 1); 1576 } 1577 } 1578 1579 void 1580 nvme_ctrlr_submit_admin_request(struct nvme_controller *ctrlr, 1581 struct nvme_request *req) 1582 { 1583 1584 nvme_qpair_submit_request(&ctrlr->adminq, req); 1585 } 1586 1587 void 1588 nvme_ctrlr_submit_io_request(struct nvme_controller *ctrlr, 1589 struct nvme_request *req) 1590 { 1591 struct nvme_qpair *qpair; 1592 1593 qpair = &ctrlr->ioq[QP(ctrlr, curcpu)]; 1594 nvme_qpair_submit_request(qpair, req); 1595 } 1596 1597 device_t 1598 nvme_ctrlr_get_device(struct nvme_controller *ctrlr) 1599 { 1600 1601 return (ctrlr->dev); 1602 } 1603 1604 const struct nvme_controller_data * 1605 nvme_ctrlr_get_data(struct nvme_controller *ctrlr) 1606 { 1607 1608 return (&ctrlr->cdata); 1609 } 1610 1611 int 1612 nvme_ctrlr_suspend(struct nvme_controller *ctrlr) 1613 { 1614 int to = hz; 1615 1616 /* 1617 * Can't touch failed controllers, so it's already suspended. 1618 */ 1619 if (ctrlr->is_failed) 1620 return (0); 1621 1622 /* 1623 * We don't want the reset taskqueue running, since it does similar 1624 * things, so prevent it from running after we start. Wait for any reset 1625 * that may have been started to complete. The reset process we follow 1626 * will ensure that any new I/O will queue and be given to the hardware 1627 * after we resume (though there should be none). 1628 */ 1629 while (atomic_cmpset_32(&ctrlr->is_resetting, 0, 1) == 0 && to-- > 0) 1630 pause("nvmesusp", 1); 1631 if (to <= 0) { 1632 nvme_printf(ctrlr, 1633 "Competing reset task didn't finish. Try again later.\n"); 1634 return (EWOULDBLOCK); 1635 } 1636 1637 if (ctrlr->hmb_nchunks > 0) 1638 nvme_ctrlr_hmb_enable(ctrlr, false, false); 1639 1640 /* 1641 * Per Section 7.6.2 of NVMe spec 1.4, to properly suspend, we need to 1642 * delete the hardware I/O queues, and then shutdown. This properly 1643 * flushes any metadata the drive may have stored so it can survive 1644 * having its power removed and prevents the unsafe shutdown count from 1645 * incriminating. Once we delete the qpairs, we have to disable them 1646 * before shutting down. 1647 */ 1648 nvme_ctrlr_delete_qpairs(ctrlr); 1649 nvme_ctrlr_disable_qpairs(ctrlr); 1650 nvme_ctrlr_shutdown(ctrlr); 1651 1652 return (0); 1653 } 1654 1655 int 1656 nvme_ctrlr_resume(struct nvme_controller *ctrlr) 1657 { 1658 1659 /* 1660 * Can't touch failed controllers, so nothing to do to resume. 1661 */ 1662 if (ctrlr->is_failed) 1663 return (0); 1664 1665 if (nvme_ctrlr_hw_reset(ctrlr) != 0) 1666 goto fail; 1667 1668 /* 1669 * Now that we've reset the hardware, we can restart the controller. Any 1670 * I/O that was pending is requeued. Any admin commands are aborted with 1671 * an error. Once we've restarted, take the controller out of reset. 1672 */ 1673 nvme_ctrlr_start(ctrlr, true); 1674 (void)atomic_cmpset_32(&ctrlr->is_resetting, 1, 0); 1675 1676 return (0); 1677 fail: 1678 /* 1679 * Since we can't bring the controller out of reset, announce and fail 1680 * the controller. However, we have to return success for the resume 1681 * itself, due to questionable APIs. 1682 */ 1683 nvme_printf(ctrlr, "Failed to reset on resume, failing.\n"); 1684 nvme_ctrlr_fail(ctrlr); 1685 (void)atomic_cmpset_32(&ctrlr->is_resetting, 1, 0); 1686 return (0); 1687 } 1688