1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (C) 2012-2016 Intel Corporation 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 29 #include <sys/cdefs.h> 30 __FBSDID("$FreeBSD$"); 31 32 #include "opt_cam.h" 33 34 #include <sys/param.h> 35 #include <sys/systm.h> 36 #include <sys/buf.h> 37 #include <sys/bus.h> 38 #include <sys/conf.h> 39 #include <sys/ioccom.h> 40 #include <sys/proc.h> 41 #include <sys/smp.h> 42 #include <sys/uio.h> 43 #include <sys/endian.h> 44 45 #include "nvme_private.h" 46 47 #define B4_CHK_RDY_DELAY_MS 2300 /* work around controller bug */ 48 49 static void nvme_ctrlr_construct_and_submit_aer(struct nvme_controller *ctrlr, 50 struct nvme_async_event_request *aer); 51 52 static int 53 nvme_ctrlr_construct_admin_qpair(struct nvme_controller *ctrlr) 54 { 55 struct nvme_qpair *qpair; 56 uint32_t num_entries; 57 int error; 58 59 qpair = &ctrlr->adminq; 60 61 num_entries = NVME_ADMIN_ENTRIES; 62 TUNABLE_INT_FETCH("hw.nvme.admin_entries", &num_entries); 63 /* 64 * If admin_entries was overridden to an invalid value, revert it 65 * back to our default value. 66 */ 67 if (num_entries < NVME_MIN_ADMIN_ENTRIES || 68 num_entries > NVME_MAX_ADMIN_ENTRIES) { 69 nvme_printf(ctrlr, "invalid hw.nvme.admin_entries=%d " 70 "specified\n", num_entries); 71 num_entries = NVME_ADMIN_ENTRIES; 72 } 73 74 /* 75 * The admin queue's max xfer size is treated differently than the 76 * max I/O xfer size. 16KB is sufficient here - maybe even less? 77 */ 78 error = nvme_qpair_construct(qpair, 79 0, /* qpair ID */ 80 0, /* vector */ 81 num_entries, 82 NVME_ADMIN_TRACKERS, 83 ctrlr); 84 return (error); 85 } 86 87 static int 88 nvme_ctrlr_construct_io_qpairs(struct nvme_controller *ctrlr) 89 { 90 struct nvme_qpair *qpair; 91 uint32_t cap_lo; 92 uint16_t mqes; 93 int i, error, num_entries, num_trackers, max_entries; 94 95 /* 96 * NVMe spec sets a hard limit of 64K max entries, but devices may 97 * specify a smaller limit, so we need to check the MQES field in the 98 * capabilities register. We have to cap the number of entries to the 99 * current stride allows for in BAR 0/1, otherwise the remainder entries 100 * are inaccessable. MQES should reflect this, and this is just a 101 * fail-safe. 102 */ 103 max_entries = 104 (rman_get_size(ctrlr->resource) - nvme_mmio_offsetof(doorbell[0])) / 105 (1 << (ctrlr->dstrd + 1)); 106 num_entries = NVME_IO_ENTRIES; 107 TUNABLE_INT_FETCH("hw.nvme.io_entries", &num_entries); 108 cap_lo = nvme_mmio_read_4(ctrlr, cap_lo); 109 mqes = NVME_CAP_LO_MQES(cap_lo); 110 num_entries = min(num_entries, mqes + 1); 111 num_entries = min(num_entries, max_entries); 112 113 num_trackers = NVME_IO_TRACKERS; 114 TUNABLE_INT_FETCH("hw.nvme.io_trackers", &num_trackers); 115 116 num_trackers = max(num_trackers, NVME_MIN_IO_TRACKERS); 117 num_trackers = min(num_trackers, NVME_MAX_IO_TRACKERS); 118 /* 119 * No need to have more trackers than entries in the submit queue. Note 120 * also that for a queue size of N, we can only have (N-1) commands 121 * outstanding, hence the "-1" here. 122 */ 123 num_trackers = min(num_trackers, (num_entries-1)); 124 125 /* 126 * Our best estimate for the maximum number of I/Os that we should 127 * normally have in flight at one time. This should be viewed as a hint, 128 * not a hard limit and will need to be revisited when the upper layers 129 * of the storage system grows multi-queue support. 130 */ 131 ctrlr->max_hw_pend_io = num_trackers * ctrlr->num_io_queues * 3 / 4; 132 133 /* 134 * This was calculated previously when setting up interrupts, but 135 * a controller could theoretically support fewer I/O queues than 136 * MSI-X vectors. So calculate again here just to be safe. 137 */ 138 ctrlr->num_cpus_per_ioq = howmany(mp_ncpus, ctrlr->num_io_queues); 139 140 ctrlr->ioq = malloc(ctrlr->num_io_queues * sizeof(struct nvme_qpair), 141 M_NVME, M_ZERO | M_WAITOK); 142 143 for (i = 0; i < ctrlr->num_io_queues; i++) { 144 qpair = &ctrlr->ioq[i]; 145 146 /* 147 * Admin queue has ID=0. IO queues start at ID=1 - 148 * hence the 'i+1' here. 149 * 150 * For I/O queues, use the controller-wide max_xfer_size 151 * calculated in nvme_attach(). 152 */ 153 error = nvme_qpair_construct(qpair, 154 i+1, /* qpair ID */ 155 ctrlr->msix_enabled ? i+1 : 0, /* vector */ 156 num_entries, 157 num_trackers, 158 ctrlr); 159 if (error) 160 return (error); 161 162 /* 163 * Do not bother binding interrupts if we only have one I/O 164 * interrupt thread for this controller. 165 */ 166 if (ctrlr->num_io_queues > 1) 167 bus_bind_intr(ctrlr->dev, qpair->res, 168 i * ctrlr->num_cpus_per_ioq); 169 } 170 171 return (0); 172 } 173 174 static void 175 nvme_ctrlr_fail(struct nvme_controller *ctrlr) 176 { 177 int i; 178 179 ctrlr->is_failed = TRUE; 180 nvme_admin_qpair_disable(&ctrlr->adminq); 181 nvme_qpair_fail(&ctrlr->adminq); 182 if (ctrlr->ioq != NULL) { 183 for (i = 0; i < ctrlr->num_io_queues; i++) { 184 nvme_io_qpair_disable(&ctrlr->ioq[i]); 185 nvme_qpair_fail(&ctrlr->ioq[i]); 186 } 187 } 188 nvme_notify_fail_consumers(ctrlr); 189 } 190 191 void 192 nvme_ctrlr_post_failed_request(struct nvme_controller *ctrlr, 193 struct nvme_request *req) 194 { 195 196 mtx_lock(&ctrlr->lock); 197 STAILQ_INSERT_TAIL(&ctrlr->fail_req, req, stailq); 198 mtx_unlock(&ctrlr->lock); 199 taskqueue_enqueue(ctrlr->taskqueue, &ctrlr->fail_req_task); 200 } 201 202 static void 203 nvme_ctrlr_fail_req_task(void *arg, int pending) 204 { 205 struct nvme_controller *ctrlr = arg; 206 struct nvme_request *req; 207 208 mtx_lock(&ctrlr->lock); 209 while ((req = STAILQ_FIRST(&ctrlr->fail_req)) != NULL) { 210 STAILQ_REMOVE_HEAD(&ctrlr->fail_req, stailq); 211 mtx_unlock(&ctrlr->lock); 212 nvme_qpair_manual_complete_request(req->qpair, req, 213 NVME_SCT_GENERIC, NVME_SC_ABORTED_BY_REQUEST); 214 mtx_lock(&ctrlr->lock); 215 } 216 mtx_unlock(&ctrlr->lock); 217 } 218 219 static int 220 nvme_ctrlr_wait_for_ready(struct nvme_controller *ctrlr, int desired_val) 221 { 222 int ms_waited; 223 uint32_t csts; 224 225 ms_waited = 0; 226 while (1) { 227 csts = nvme_mmio_read_4(ctrlr, csts); 228 if (csts == 0xffffffff) /* Hot unplug. */ 229 return (ENXIO); 230 if (((csts >> NVME_CSTS_REG_RDY_SHIFT) & NVME_CSTS_REG_RDY_MASK) 231 == desired_val) 232 break; 233 if (ms_waited++ > ctrlr->ready_timeout_in_ms) { 234 nvme_printf(ctrlr, "controller ready did not become %d " 235 "within %d ms\n", desired_val, ctrlr->ready_timeout_in_ms); 236 return (ENXIO); 237 } 238 DELAY(1000); 239 } 240 241 return (0); 242 } 243 244 static int 245 nvme_ctrlr_disable(struct nvme_controller *ctrlr) 246 { 247 uint32_t cc; 248 uint32_t csts; 249 uint8_t en, rdy; 250 int err; 251 252 cc = nvme_mmio_read_4(ctrlr, cc); 253 csts = nvme_mmio_read_4(ctrlr, csts); 254 255 en = (cc >> NVME_CC_REG_EN_SHIFT) & NVME_CC_REG_EN_MASK; 256 rdy = (csts >> NVME_CSTS_REG_RDY_SHIFT) & NVME_CSTS_REG_RDY_MASK; 257 258 /* 259 * Per 3.1.5 in NVME 1.3 spec, transitioning CC.EN from 0 to 1 260 * when CSTS.RDY is 1 or transitioning CC.EN from 1 to 0 when 261 * CSTS.RDY is 0 "has undefined results" So make sure that CSTS.RDY 262 * isn't the desired value. Short circuit if we're already disabled. 263 */ 264 if (en == 1) { 265 if (rdy == 0) { 266 /* EN == 1, wait for RDY == 1 or fail */ 267 err = nvme_ctrlr_wait_for_ready(ctrlr, 1); 268 if (err != 0) 269 return (err); 270 } 271 } else { 272 /* EN == 0 already wait for RDY == 0 */ 273 if (rdy == 0) 274 return (0); 275 else 276 return (nvme_ctrlr_wait_for_ready(ctrlr, 0)); 277 } 278 279 cc &= ~NVME_CC_REG_EN_MASK; 280 nvme_mmio_write_4(ctrlr, cc, cc); 281 /* 282 * Some drives have issues with accessing the mmio after we 283 * disable, so delay for a bit after we write the bit to 284 * cope with these issues. 285 */ 286 if (ctrlr->quirks & QUIRK_DELAY_B4_CHK_RDY) 287 pause("nvmeR", B4_CHK_RDY_DELAY_MS * hz / 1000); 288 return (nvme_ctrlr_wait_for_ready(ctrlr, 0)); 289 } 290 291 static int 292 nvme_ctrlr_enable(struct nvme_controller *ctrlr) 293 { 294 uint32_t cc; 295 uint32_t csts; 296 uint32_t aqa; 297 uint32_t qsize; 298 uint8_t en, rdy; 299 int err; 300 301 cc = nvme_mmio_read_4(ctrlr, cc); 302 csts = nvme_mmio_read_4(ctrlr, csts); 303 304 en = (cc >> NVME_CC_REG_EN_SHIFT) & NVME_CC_REG_EN_MASK; 305 rdy = (csts >> NVME_CSTS_REG_RDY_SHIFT) & NVME_CSTS_REG_RDY_MASK; 306 307 /* 308 * See note in nvme_ctrlr_disable. Short circuit if we're already enabled. 309 */ 310 if (en == 1) { 311 if (rdy == 1) 312 return (0); 313 else 314 return (nvme_ctrlr_wait_for_ready(ctrlr, 1)); 315 } else { 316 /* EN == 0 already wait for RDY == 0 or fail */ 317 err = nvme_ctrlr_wait_for_ready(ctrlr, 0); 318 if (err != 0) 319 return (err); 320 } 321 322 nvme_mmio_write_8(ctrlr, asq, ctrlr->adminq.cmd_bus_addr); 323 DELAY(5000); 324 nvme_mmio_write_8(ctrlr, acq, ctrlr->adminq.cpl_bus_addr); 325 DELAY(5000); 326 327 /* acqs and asqs are 0-based. */ 328 qsize = ctrlr->adminq.num_entries - 1; 329 330 aqa = 0; 331 aqa = (qsize & NVME_AQA_REG_ACQS_MASK) << NVME_AQA_REG_ACQS_SHIFT; 332 aqa |= (qsize & NVME_AQA_REG_ASQS_MASK) << NVME_AQA_REG_ASQS_SHIFT; 333 nvme_mmio_write_4(ctrlr, aqa, aqa); 334 DELAY(5000); 335 336 /* Initialization values for CC */ 337 cc = 0; 338 cc |= 1 << NVME_CC_REG_EN_SHIFT; 339 cc |= 0 << NVME_CC_REG_CSS_SHIFT; 340 cc |= 0 << NVME_CC_REG_AMS_SHIFT; 341 cc |= 0 << NVME_CC_REG_SHN_SHIFT; 342 cc |= 6 << NVME_CC_REG_IOSQES_SHIFT; /* SQ entry size == 64 == 2^6 */ 343 cc |= 4 << NVME_CC_REG_IOCQES_SHIFT; /* CQ entry size == 16 == 2^4 */ 344 345 /* This evaluates to 0, which is according to spec. */ 346 cc |= (PAGE_SIZE >> 13) << NVME_CC_REG_MPS_SHIFT; 347 348 nvme_mmio_write_4(ctrlr, cc, cc); 349 350 return (nvme_ctrlr_wait_for_ready(ctrlr, 1)); 351 } 352 353 static void 354 nvme_ctrlr_disable_qpairs(struct nvme_controller *ctrlr) 355 { 356 int i; 357 358 nvme_admin_qpair_disable(&ctrlr->adminq); 359 /* 360 * I/O queues are not allocated before the initial HW 361 * reset, so do not try to disable them. Use is_initialized 362 * to determine if this is the initial HW reset. 363 */ 364 if (ctrlr->is_initialized) { 365 for (i = 0; i < ctrlr->num_io_queues; i++) 366 nvme_io_qpair_disable(&ctrlr->ioq[i]); 367 } 368 } 369 370 int 371 nvme_ctrlr_hw_reset(struct nvme_controller *ctrlr) 372 { 373 int err; 374 375 nvme_ctrlr_disable_qpairs(ctrlr); 376 377 DELAY(100*1000); 378 379 err = nvme_ctrlr_disable(ctrlr); 380 if (err != 0) 381 return err; 382 return (nvme_ctrlr_enable(ctrlr)); 383 } 384 385 void 386 nvme_ctrlr_reset(struct nvme_controller *ctrlr) 387 { 388 int cmpset; 389 390 cmpset = atomic_cmpset_32(&ctrlr->is_resetting, 0, 1); 391 392 if (cmpset == 0 || ctrlr->is_failed) 393 /* 394 * Controller is already resetting or has failed. Return 395 * immediately since there is no need to kick off another 396 * reset in these cases. 397 */ 398 return; 399 400 taskqueue_enqueue(ctrlr->taskqueue, &ctrlr->reset_task); 401 } 402 403 static int 404 nvme_ctrlr_identify(struct nvme_controller *ctrlr) 405 { 406 struct nvme_completion_poll_status status; 407 408 status.done = 0; 409 nvme_ctrlr_cmd_identify_controller(ctrlr, &ctrlr->cdata, 410 nvme_completion_poll_cb, &status); 411 nvme_completion_poll(&status); 412 if (nvme_completion_is_error(&status.cpl)) { 413 nvme_printf(ctrlr, "nvme_identify_controller failed!\n"); 414 return (ENXIO); 415 } 416 417 /* Convert data to host endian */ 418 nvme_controller_data_swapbytes(&ctrlr->cdata); 419 420 /* 421 * Use MDTS to ensure our default max_xfer_size doesn't exceed what the 422 * controller supports. 423 */ 424 if (ctrlr->cdata.mdts > 0) 425 ctrlr->max_xfer_size = min(ctrlr->max_xfer_size, 426 ctrlr->min_page_size * (1 << (ctrlr->cdata.mdts))); 427 428 return (0); 429 } 430 431 static int 432 nvme_ctrlr_set_num_qpairs(struct nvme_controller *ctrlr) 433 { 434 struct nvme_completion_poll_status status; 435 int cq_allocated, sq_allocated; 436 437 status.done = 0; 438 nvme_ctrlr_cmd_set_num_queues(ctrlr, ctrlr->num_io_queues, 439 nvme_completion_poll_cb, &status); 440 nvme_completion_poll(&status); 441 if (nvme_completion_is_error(&status.cpl)) { 442 nvme_printf(ctrlr, "nvme_ctrlr_set_num_qpairs failed!\n"); 443 return (ENXIO); 444 } 445 446 /* 447 * Data in cdw0 is 0-based. 448 * Lower 16-bits indicate number of submission queues allocated. 449 * Upper 16-bits indicate number of completion queues allocated. 450 */ 451 sq_allocated = (status.cpl.cdw0 & 0xFFFF) + 1; 452 cq_allocated = (status.cpl.cdw0 >> 16) + 1; 453 454 /* 455 * Controller may allocate more queues than we requested, 456 * so use the minimum of the number requested and what was 457 * actually allocated. 458 */ 459 ctrlr->num_io_queues = min(ctrlr->num_io_queues, sq_allocated); 460 ctrlr->num_io_queues = min(ctrlr->num_io_queues, cq_allocated); 461 462 return (0); 463 } 464 465 static int 466 nvme_ctrlr_create_qpairs(struct nvme_controller *ctrlr) 467 { 468 struct nvme_completion_poll_status status; 469 struct nvme_qpair *qpair; 470 int i; 471 472 for (i = 0; i < ctrlr->num_io_queues; i++) { 473 qpair = &ctrlr->ioq[i]; 474 475 status.done = 0; 476 nvme_ctrlr_cmd_create_io_cq(ctrlr, qpair, qpair->vector, 477 nvme_completion_poll_cb, &status); 478 nvme_completion_poll(&status); 479 if (nvme_completion_is_error(&status.cpl)) { 480 nvme_printf(ctrlr, "nvme_create_io_cq failed!\n"); 481 return (ENXIO); 482 } 483 484 status.done = 0; 485 nvme_ctrlr_cmd_create_io_sq(qpair->ctrlr, qpair, 486 nvme_completion_poll_cb, &status); 487 nvme_completion_poll(&status); 488 if (nvme_completion_is_error(&status.cpl)) { 489 nvme_printf(ctrlr, "nvme_create_io_sq failed!\n"); 490 return (ENXIO); 491 } 492 } 493 494 return (0); 495 } 496 497 static int 498 nvme_ctrlr_delete_qpairs(struct nvme_controller *ctrlr) 499 { 500 struct nvme_completion_poll_status status; 501 struct nvme_qpair *qpair; 502 503 for (int i = 0; i < ctrlr->num_io_queues; i++) { 504 qpair = &ctrlr->ioq[i]; 505 506 status.done = 0; 507 nvme_ctrlr_cmd_delete_io_sq(ctrlr, qpair, 508 nvme_completion_poll_cb, &status); 509 nvme_completion_poll(&status); 510 if (nvme_completion_is_error(&status.cpl)) { 511 nvme_printf(ctrlr, "nvme_destroy_io_sq failed!\n"); 512 return (ENXIO); 513 } 514 515 status.done = 0; 516 nvme_ctrlr_cmd_delete_io_cq(ctrlr, qpair, 517 nvme_completion_poll_cb, &status); 518 nvme_completion_poll(&status); 519 if (nvme_completion_is_error(&status.cpl)) { 520 nvme_printf(ctrlr, "nvme_destroy_io_cq failed!\n"); 521 return (ENXIO); 522 } 523 } 524 525 return (0); 526 } 527 528 static int 529 nvme_ctrlr_construct_namespaces(struct nvme_controller *ctrlr) 530 { 531 struct nvme_namespace *ns; 532 uint32_t i; 533 534 for (i = 0; i < min(ctrlr->cdata.nn, NVME_MAX_NAMESPACES); i++) { 535 ns = &ctrlr->ns[i]; 536 nvme_ns_construct(ns, i+1, ctrlr); 537 } 538 539 return (0); 540 } 541 542 static boolean_t 543 is_log_page_id_valid(uint8_t page_id) 544 { 545 546 switch (page_id) { 547 case NVME_LOG_ERROR: 548 case NVME_LOG_HEALTH_INFORMATION: 549 case NVME_LOG_FIRMWARE_SLOT: 550 case NVME_LOG_CHANGED_NAMESPACE: 551 case NVME_LOG_COMMAND_EFFECT: 552 case NVME_LOG_RES_NOTIFICATION: 553 case NVME_LOG_SANITIZE_STATUS: 554 return (TRUE); 555 } 556 557 return (FALSE); 558 } 559 560 static uint32_t 561 nvme_ctrlr_get_log_page_size(struct nvme_controller *ctrlr, uint8_t page_id) 562 { 563 uint32_t log_page_size; 564 565 switch (page_id) { 566 case NVME_LOG_ERROR: 567 log_page_size = min( 568 sizeof(struct nvme_error_information_entry) * 569 (ctrlr->cdata.elpe + 1), NVME_MAX_AER_LOG_SIZE); 570 break; 571 case NVME_LOG_HEALTH_INFORMATION: 572 log_page_size = sizeof(struct nvme_health_information_page); 573 break; 574 case NVME_LOG_FIRMWARE_SLOT: 575 log_page_size = sizeof(struct nvme_firmware_page); 576 break; 577 case NVME_LOG_CHANGED_NAMESPACE: 578 log_page_size = sizeof(struct nvme_ns_list); 579 break; 580 case NVME_LOG_COMMAND_EFFECT: 581 log_page_size = sizeof(struct nvme_command_effects_page); 582 break; 583 case NVME_LOG_RES_NOTIFICATION: 584 log_page_size = sizeof(struct nvme_res_notification_page); 585 break; 586 case NVME_LOG_SANITIZE_STATUS: 587 log_page_size = sizeof(struct nvme_sanitize_status_page); 588 break; 589 default: 590 log_page_size = 0; 591 break; 592 } 593 594 return (log_page_size); 595 } 596 597 static void 598 nvme_ctrlr_log_critical_warnings(struct nvme_controller *ctrlr, 599 uint8_t state) 600 { 601 602 if (state & NVME_CRIT_WARN_ST_AVAILABLE_SPARE) 603 nvme_printf(ctrlr, "available spare space below threshold\n"); 604 605 if (state & NVME_CRIT_WARN_ST_TEMPERATURE) 606 nvme_printf(ctrlr, "temperature above threshold\n"); 607 608 if (state & NVME_CRIT_WARN_ST_DEVICE_RELIABILITY) 609 nvme_printf(ctrlr, "device reliability degraded\n"); 610 611 if (state & NVME_CRIT_WARN_ST_READ_ONLY) 612 nvme_printf(ctrlr, "media placed in read only mode\n"); 613 614 if (state & NVME_CRIT_WARN_ST_VOLATILE_MEMORY_BACKUP) 615 nvme_printf(ctrlr, "volatile memory backup device failed\n"); 616 617 if (state & NVME_CRIT_WARN_ST_RESERVED_MASK) 618 nvme_printf(ctrlr, 619 "unknown critical warning(s): state = 0x%02x\n", state); 620 } 621 622 static void 623 nvme_ctrlr_async_event_log_page_cb(void *arg, const struct nvme_completion *cpl) 624 { 625 struct nvme_async_event_request *aer = arg; 626 struct nvme_health_information_page *health_info; 627 struct nvme_ns_list *nsl; 628 struct nvme_error_information_entry *err; 629 int i; 630 631 /* 632 * If the log page fetch for some reason completed with an error, 633 * don't pass log page data to the consumers. In practice, this case 634 * should never happen. 635 */ 636 if (nvme_completion_is_error(cpl)) 637 nvme_notify_async_consumers(aer->ctrlr, &aer->cpl, 638 aer->log_page_id, NULL, 0); 639 else { 640 /* Convert data to host endian */ 641 switch (aer->log_page_id) { 642 case NVME_LOG_ERROR: 643 err = (struct nvme_error_information_entry *)aer->log_page_buffer; 644 for (i = 0; i < (aer->ctrlr->cdata.elpe + 1); i++) 645 nvme_error_information_entry_swapbytes(err++); 646 break; 647 case NVME_LOG_HEALTH_INFORMATION: 648 nvme_health_information_page_swapbytes( 649 (struct nvme_health_information_page *)aer->log_page_buffer); 650 break; 651 case NVME_LOG_FIRMWARE_SLOT: 652 nvme_firmware_page_swapbytes( 653 (struct nvme_firmware_page *)aer->log_page_buffer); 654 break; 655 case NVME_LOG_CHANGED_NAMESPACE: 656 nvme_ns_list_swapbytes( 657 (struct nvme_ns_list *)aer->log_page_buffer); 658 break; 659 case NVME_LOG_COMMAND_EFFECT: 660 nvme_command_effects_page_swapbytes( 661 (struct nvme_command_effects_page *)aer->log_page_buffer); 662 break; 663 case NVME_LOG_RES_NOTIFICATION: 664 nvme_res_notification_page_swapbytes( 665 (struct nvme_res_notification_page *)aer->log_page_buffer); 666 break; 667 case NVME_LOG_SANITIZE_STATUS: 668 nvme_sanitize_status_page_swapbytes( 669 (struct nvme_sanitize_status_page *)aer->log_page_buffer); 670 break; 671 case INTEL_LOG_TEMP_STATS: 672 intel_log_temp_stats_swapbytes( 673 (struct intel_log_temp_stats *)aer->log_page_buffer); 674 break; 675 default: 676 break; 677 } 678 679 if (aer->log_page_id == NVME_LOG_HEALTH_INFORMATION) { 680 health_info = (struct nvme_health_information_page *) 681 aer->log_page_buffer; 682 nvme_ctrlr_log_critical_warnings(aer->ctrlr, 683 health_info->critical_warning); 684 /* 685 * Critical warnings reported through the 686 * SMART/health log page are persistent, so 687 * clear the associated bits in the async event 688 * config so that we do not receive repeated 689 * notifications for the same event. 690 */ 691 aer->ctrlr->async_event_config &= 692 ~health_info->critical_warning; 693 nvme_ctrlr_cmd_set_async_event_config(aer->ctrlr, 694 aer->ctrlr->async_event_config, NULL, NULL); 695 } else if (aer->log_page_id == NVME_LOG_CHANGED_NAMESPACE && 696 !nvme_use_nvd) { 697 nsl = (struct nvme_ns_list *)aer->log_page_buffer; 698 for (i = 0; i < nitems(nsl->ns) && nsl->ns[i] != 0; i++) { 699 if (nsl->ns[i] > NVME_MAX_NAMESPACES) 700 break; 701 nvme_notify_ns(aer->ctrlr, nsl->ns[i]); 702 } 703 } 704 705 706 /* 707 * Pass the cpl data from the original async event completion, 708 * not the log page fetch. 709 */ 710 nvme_notify_async_consumers(aer->ctrlr, &aer->cpl, 711 aer->log_page_id, aer->log_page_buffer, aer->log_page_size); 712 } 713 714 /* 715 * Repost another asynchronous event request to replace the one 716 * that just completed. 717 */ 718 nvme_ctrlr_construct_and_submit_aer(aer->ctrlr, aer); 719 } 720 721 static void 722 nvme_ctrlr_async_event_cb(void *arg, const struct nvme_completion *cpl) 723 { 724 struct nvme_async_event_request *aer = arg; 725 726 if (nvme_completion_is_error(cpl)) { 727 /* 728 * Do not retry failed async event requests. This avoids 729 * infinite loops where a new async event request is submitted 730 * to replace the one just failed, only to fail again and 731 * perpetuate the loop. 732 */ 733 return; 734 } 735 736 /* Associated log page is in bits 23:16 of completion entry dw0. */ 737 aer->log_page_id = (cpl->cdw0 & 0xFF0000) >> 16; 738 739 nvme_printf(aer->ctrlr, "async event occurred (type 0x%x, info 0x%02x," 740 " page 0x%02x)\n", (cpl->cdw0 & 0x07), (cpl->cdw0 & 0xFF00) >> 8, 741 aer->log_page_id); 742 743 if (is_log_page_id_valid(aer->log_page_id)) { 744 aer->log_page_size = nvme_ctrlr_get_log_page_size(aer->ctrlr, 745 aer->log_page_id); 746 memcpy(&aer->cpl, cpl, sizeof(*cpl)); 747 nvme_ctrlr_cmd_get_log_page(aer->ctrlr, aer->log_page_id, 748 NVME_GLOBAL_NAMESPACE_TAG, aer->log_page_buffer, 749 aer->log_page_size, nvme_ctrlr_async_event_log_page_cb, 750 aer); 751 /* Wait to notify consumers until after log page is fetched. */ 752 } else { 753 nvme_notify_async_consumers(aer->ctrlr, cpl, aer->log_page_id, 754 NULL, 0); 755 756 /* 757 * Repost another asynchronous event request to replace the one 758 * that just completed. 759 */ 760 nvme_ctrlr_construct_and_submit_aer(aer->ctrlr, aer); 761 } 762 } 763 764 static void 765 nvme_ctrlr_construct_and_submit_aer(struct nvme_controller *ctrlr, 766 struct nvme_async_event_request *aer) 767 { 768 struct nvme_request *req; 769 770 aer->ctrlr = ctrlr; 771 req = nvme_allocate_request_null(nvme_ctrlr_async_event_cb, aer); 772 aer->req = req; 773 774 /* 775 * Disable timeout here, since asynchronous event requests should by 776 * nature never be timed out. 777 */ 778 req->timeout = FALSE; 779 req->cmd.opc = NVME_OPC_ASYNC_EVENT_REQUEST; 780 nvme_ctrlr_submit_admin_request(ctrlr, req); 781 } 782 783 static void 784 nvme_ctrlr_configure_aer(struct nvme_controller *ctrlr) 785 { 786 struct nvme_completion_poll_status status; 787 struct nvme_async_event_request *aer; 788 uint32_t i; 789 790 ctrlr->async_event_config = NVME_CRIT_WARN_ST_AVAILABLE_SPARE | 791 NVME_CRIT_WARN_ST_DEVICE_RELIABILITY | 792 NVME_CRIT_WARN_ST_READ_ONLY | 793 NVME_CRIT_WARN_ST_VOLATILE_MEMORY_BACKUP; 794 if (ctrlr->cdata.ver >= NVME_REV(1, 2)) 795 ctrlr->async_event_config |= 0x300; 796 797 status.done = 0; 798 nvme_ctrlr_cmd_get_feature(ctrlr, NVME_FEAT_TEMPERATURE_THRESHOLD, 799 0, NULL, 0, nvme_completion_poll_cb, &status); 800 nvme_completion_poll(&status); 801 if (nvme_completion_is_error(&status.cpl) || 802 (status.cpl.cdw0 & 0xFFFF) == 0xFFFF || 803 (status.cpl.cdw0 & 0xFFFF) == 0x0000) { 804 nvme_printf(ctrlr, "temperature threshold not supported\n"); 805 } else 806 ctrlr->async_event_config |= NVME_CRIT_WARN_ST_TEMPERATURE; 807 808 nvme_ctrlr_cmd_set_async_event_config(ctrlr, 809 ctrlr->async_event_config, NULL, NULL); 810 811 /* aerl is a zero-based value, so we need to add 1 here. */ 812 ctrlr->num_aers = min(NVME_MAX_ASYNC_EVENTS, (ctrlr->cdata.aerl+1)); 813 814 for (i = 0; i < ctrlr->num_aers; i++) { 815 aer = &ctrlr->aer[i]; 816 nvme_ctrlr_construct_and_submit_aer(ctrlr, aer); 817 } 818 } 819 820 static void 821 nvme_ctrlr_configure_int_coalescing(struct nvme_controller *ctrlr) 822 { 823 824 ctrlr->int_coal_time = 0; 825 TUNABLE_INT_FETCH("hw.nvme.int_coal_time", 826 &ctrlr->int_coal_time); 827 828 ctrlr->int_coal_threshold = 0; 829 TUNABLE_INT_FETCH("hw.nvme.int_coal_threshold", 830 &ctrlr->int_coal_threshold); 831 832 nvme_ctrlr_cmd_set_interrupt_coalescing(ctrlr, ctrlr->int_coal_time, 833 ctrlr->int_coal_threshold, NULL, NULL); 834 } 835 836 static void 837 nvme_ctrlr_start(void *ctrlr_arg, bool resetting) 838 { 839 struct nvme_controller *ctrlr = ctrlr_arg; 840 uint32_t old_num_io_queues; 841 int i; 842 843 /* 844 * Only reset adminq here when we are restarting the 845 * controller after a reset. During initialization, 846 * we have already submitted admin commands to get 847 * the number of I/O queues supported, so cannot reset 848 * the adminq again here. 849 */ 850 if (resetting) 851 nvme_qpair_reset(&ctrlr->adminq); 852 853 for (i = 0; i < ctrlr->num_io_queues; i++) 854 nvme_qpair_reset(&ctrlr->ioq[i]); 855 856 nvme_admin_qpair_enable(&ctrlr->adminq); 857 858 if (nvme_ctrlr_identify(ctrlr) != 0) { 859 nvme_ctrlr_fail(ctrlr); 860 return; 861 } 862 863 /* 864 * The number of qpairs are determined during controller initialization, 865 * including using NVMe SET_FEATURES/NUMBER_OF_QUEUES to determine the 866 * HW limit. We call SET_FEATURES again here so that it gets called 867 * after any reset for controllers that depend on the driver to 868 * explicit specify how many queues it will use. This value should 869 * never change between resets, so panic if somehow that does happen. 870 */ 871 if (resetting) { 872 old_num_io_queues = ctrlr->num_io_queues; 873 if (nvme_ctrlr_set_num_qpairs(ctrlr) != 0) { 874 nvme_ctrlr_fail(ctrlr); 875 return; 876 } 877 878 if (old_num_io_queues != ctrlr->num_io_queues) { 879 panic("num_io_queues changed from %u to %u", 880 old_num_io_queues, ctrlr->num_io_queues); 881 } 882 } 883 884 if (nvme_ctrlr_create_qpairs(ctrlr) != 0) { 885 nvme_ctrlr_fail(ctrlr); 886 return; 887 } 888 889 if (nvme_ctrlr_construct_namespaces(ctrlr) != 0) { 890 nvme_ctrlr_fail(ctrlr); 891 return; 892 } 893 894 nvme_ctrlr_configure_aer(ctrlr); 895 nvme_ctrlr_configure_int_coalescing(ctrlr); 896 897 for (i = 0; i < ctrlr->num_io_queues; i++) 898 nvme_io_qpair_enable(&ctrlr->ioq[i]); 899 } 900 901 void 902 nvme_ctrlr_start_config_hook(void *arg) 903 { 904 struct nvme_controller *ctrlr = arg; 905 906 nvme_qpair_reset(&ctrlr->adminq); 907 nvme_admin_qpair_enable(&ctrlr->adminq); 908 909 if (nvme_ctrlr_set_num_qpairs(ctrlr) == 0 && 910 nvme_ctrlr_construct_io_qpairs(ctrlr) == 0) 911 nvme_ctrlr_start(ctrlr, false); 912 else 913 nvme_ctrlr_fail(ctrlr); 914 915 nvme_sysctl_initialize_ctrlr(ctrlr); 916 config_intrhook_disestablish(&ctrlr->config_hook); 917 918 ctrlr->is_initialized = 1; 919 nvme_notify_new_controller(ctrlr); 920 } 921 922 static void 923 nvme_ctrlr_reset_task(void *arg, int pending) 924 { 925 struct nvme_controller *ctrlr = arg; 926 int status; 927 928 nvme_printf(ctrlr, "resetting controller\n"); 929 status = nvme_ctrlr_hw_reset(ctrlr); 930 /* 931 * Use pause instead of DELAY, so that we yield to any nvme interrupt 932 * handlers on this CPU that were blocked on a qpair lock. We want 933 * all nvme interrupts completed before proceeding with restarting the 934 * controller. 935 * 936 * XXX - any way to guarantee the interrupt handlers have quiesced? 937 */ 938 pause("nvmereset", hz / 10); 939 if (status == 0) 940 nvme_ctrlr_start(ctrlr, true); 941 else 942 nvme_ctrlr_fail(ctrlr); 943 944 atomic_cmpset_32(&ctrlr->is_resetting, 1, 0); 945 } 946 947 /* 948 * Poll all the queues enabled on the device for completion. 949 */ 950 void 951 nvme_ctrlr_poll(struct nvme_controller *ctrlr) 952 { 953 int i; 954 955 nvme_qpair_process_completions(&ctrlr->adminq); 956 957 for (i = 0; i < ctrlr->num_io_queues; i++) 958 if (ctrlr->ioq && ctrlr->ioq[i].cpl) 959 nvme_qpair_process_completions(&ctrlr->ioq[i]); 960 } 961 962 /* 963 * Poll the single-vector interrupt case: num_io_queues will be 1 and 964 * there's only a single vector. While we're polling, we mask further 965 * interrupts in the controller. 966 */ 967 void 968 nvme_ctrlr_intx_handler(void *arg) 969 { 970 struct nvme_controller *ctrlr = arg; 971 972 nvme_mmio_write_4(ctrlr, intms, 1); 973 nvme_ctrlr_poll(ctrlr); 974 nvme_mmio_write_4(ctrlr, intmc, 1); 975 } 976 977 static void 978 nvme_pt_done(void *arg, const struct nvme_completion *cpl) 979 { 980 struct nvme_pt_command *pt = arg; 981 struct mtx *mtx = pt->driver_lock; 982 uint16_t status; 983 984 bzero(&pt->cpl, sizeof(pt->cpl)); 985 pt->cpl.cdw0 = cpl->cdw0; 986 987 status = cpl->status; 988 status &= ~NVME_STATUS_P_MASK; 989 pt->cpl.status = status; 990 991 mtx_lock(mtx); 992 pt->driver_lock = NULL; 993 wakeup(pt); 994 mtx_unlock(mtx); 995 } 996 997 int 998 nvme_ctrlr_passthrough_cmd(struct nvme_controller *ctrlr, 999 struct nvme_pt_command *pt, uint32_t nsid, int is_user_buffer, 1000 int is_admin_cmd) 1001 { 1002 struct nvme_request *req; 1003 struct mtx *mtx; 1004 struct buf *buf = NULL; 1005 int ret = 0; 1006 vm_offset_t addr, end; 1007 1008 if (pt->len > 0) { 1009 /* 1010 * vmapbuf calls vm_fault_quick_hold_pages which only maps full 1011 * pages. Ensure this request has fewer than MAXPHYS bytes when 1012 * extended to full pages. 1013 */ 1014 addr = (vm_offset_t)pt->buf; 1015 end = round_page(addr + pt->len); 1016 addr = trunc_page(addr); 1017 if (end - addr > MAXPHYS) 1018 return EIO; 1019 1020 if (pt->len > ctrlr->max_xfer_size) { 1021 nvme_printf(ctrlr, "pt->len (%d) " 1022 "exceeds max_xfer_size (%d)\n", pt->len, 1023 ctrlr->max_xfer_size); 1024 return EIO; 1025 } 1026 if (is_user_buffer) { 1027 /* 1028 * Ensure the user buffer is wired for the duration of 1029 * this pass-through command. 1030 */ 1031 PHOLD(curproc); 1032 buf = uma_zalloc(pbuf_zone, M_WAITOK); 1033 buf->b_data = pt->buf; 1034 buf->b_bufsize = pt->len; 1035 buf->b_iocmd = pt->is_read ? BIO_READ : BIO_WRITE; 1036 if (vmapbuf(buf, 1) < 0) { 1037 ret = EFAULT; 1038 goto err; 1039 } 1040 req = nvme_allocate_request_vaddr(buf->b_data, pt->len, 1041 nvme_pt_done, pt); 1042 } else 1043 req = nvme_allocate_request_vaddr(pt->buf, pt->len, 1044 nvme_pt_done, pt); 1045 } else 1046 req = nvme_allocate_request_null(nvme_pt_done, pt); 1047 1048 /* Assume user space already converted to little-endian */ 1049 req->cmd.opc = pt->cmd.opc; 1050 req->cmd.fuse = pt->cmd.fuse; 1051 req->cmd.rsvd2 = pt->cmd.rsvd2; 1052 req->cmd.rsvd3 = pt->cmd.rsvd3; 1053 req->cmd.cdw10 = pt->cmd.cdw10; 1054 req->cmd.cdw11 = pt->cmd.cdw11; 1055 req->cmd.cdw12 = pt->cmd.cdw12; 1056 req->cmd.cdw13 = pt->cmd.cdw13; 1057 req->cmd.cdw14 = pt->cmd.cdw14; 1058 req->cmd.cdw15 = pt->cmd.cdw15; 1059 1060 req->cmd.nsid = htole32(nsid); 1061 1062 mtx = mtx_pool_find(mtxpool_sleep, pt); 1063 pt->driver_lock = mtx; 1064 1065 if (is_admin_cmd) 1066 nvme_ctrlr_submit_admin_request(ctrlr, req); 1067 else 1068 nvme_ctrlr_submit_io_request(ctrlr, req); 1069 1070 mtx_lock(mtx); 1071 while (pt->driver_lock != NULL) 1072 mtx_sleep(pt, mtx, PRIBIO, "nvme_pt", 0); 1073 mtx_unlock(mtx); 1074 1075 err: 1076 if (buf != NULL) { 1077 uma_zfree(pbuf_zone, buf); 1078 PRELE(curproc); 1079 } 1080 1081 return (ret); 1082 } 1083 1084 static int 1085 nvme_ctrlr_ioctl(struct cdev *cdev, u_long cmd, caddr_t arg, int flag, 1086 struct thread *td) 1087 { 1088 struct nvme_controller *ctrlr; 1089 struct nvme_pt_command *pt; 1090 1091 ctrlr = cdev->si_drv1; 1092 1093 switch (cmd) { 1094 case NVME_RESET_CONTROLLER: 1095 nvme_ctrlr_reset(ctrlr); 1096 break; 1097 case NVME_PASSTHROUGH_CMD: 1098 pt = (struct nvme_pt_command *)arg; 1099 return (nvme_ctrlr_passthrough_cmd(ctrlr, pt, le32toh(pt->cmd.nsid), 1100 1 /* is_user_buffer */, 1 /* is_admin_cmd */)); 1101 case NVME_GET_NSID: 1102 { 1103 struct nvme_get_nsid *gnsid = (struct nvme_get_nsid *)arg; 1104 strncpy(gnsid->cdev, device_get_nameunit(ctrlr->dev), 1105 sizeof(gnsid->cdev)); 1106 gnsid->nsid = 0; 1107 break; 1108 } 1109 default: 1110 return (ENOTTY); 1111 } 1112 1113 return (0); 1114 } 1115 1116 static struct cdevsw nvme_ctrlr_cdevsw = { 1117 .d_version = D_VERSION, 1118 .d_flags = 0, 1119 .d_ioctl = nvme_ctrlr_ioctl 1120 }; 1121 1122 int 1123 nvme_ctrlr_construct(struct nvme_controller *ctrlr, device_t dev) 1124 { 1125 struct make_dev_args md_args; 1126 uint32_t cap_lo; 1127 uint32_t cap_hi; 1128 uint32_t to; 1129 uint8_t mpsmin; 1130 int status, timeout_period; 1131 1132 ctrlr->dev = dev; 1133 1134 mtx_init(&ctrlr->lock, "nvme ctrlr lock", NULL, MTX_DEF); 1135 1136 cap_hi = nvme_mmio_read_4(ctrlr, cap_hi); 1137 ctrlr->dstrd = NVME_CAP_HI_DSTRD(cap_hi) + 2; 1138 1139 mpsmin = NVME_CAP_HI_MPSMIN(cap_hi); 1140 ctrlr->min_page_size = 1 << (12 + mpsmin); 1141 1142 /* Get ready timeout value from controller, in units of 500ms. */ 1143 cap_lo = nvme_mmio_read_4(ctrlr, cap_lo); 1144 to = NVME_CAP_LO_TO(cap_lo) + 1; 1145 ctrlr->ready_timeout_in_ms = to * 500; 1146 1147 timeout_period = NVME_DEFAULT_TIMEOUT_PERIOD; 1148 TUNABLE_INT_FETCH("hw.nvme.timeout_period", &timeout_period); 1149 timeout_period = min(timeout_period, NVME_MAX_TIMEOUT_PERIOD); 1150 timeout_period = max(timeout_period, NVME_MIN_TIMEOUT_PERIOD); 1151 ctrlr->timeout_period = timeout_period; 1152 1153 nvme_retry_count = NVME_DEFAULT_RETRY_COUNT; 1154 TUNABLE_INT_FETCH("hw.nvme.retry_count", &nvme_retry_count); 1155 1156 ctrlr->enable_aborts = 0; 1157 TUNABLE_INT_FETCH("hw.nvme.enable_aborts", &ctrlr->enable_aborts); 1158 1159 ctrlr->max_xfer_size = NVME_MAX_XFER_SIZE; 1160 if (nvme_ctrlr_construct_admin_qpair(ctrlr) != 0) 1161 return (ENXIO); 1162 1163 ctrlr->taskqueue = taskqueue_create("nvme_taskq", M_WAITOK, 1164 taskqueue_thread_enqueue, &ctrlr->taskqueue); 1165 taskqueue_start_threads(&ctrlr->taskqueue, 1, PI_DISK, "nvme taskq"); 1166 1167 ctrlr->is_resetting = 0; 1168 ctrlr->is_initialized = 0; 1169 ctrlr->notification_sent = 0; 1170 TASK_INIT(&ctrlr->reset_task, 0, nvme_ctrlr_reset_task, ctrlr); 1171 TASK_INIT(&ctrlr->fail_req_task, 0, nvme_ctrlr_fail_req_task, ctrlr); 1172 STAILQ_INIT(&ctrlr->fail_req); 1173 ctrlr->is_failed = FALSE; 1174 1175 make_dev_args_init(&md_args); 1176 md_args.mda_devsw = &nvme_ctrlr_cdevsw; 1177 md_args.mda_uid = UID_ROOT; 1178 md_args.mda_gid = GID_WHEEL; 1179 md_args.mda_mode = 0600; 1180 md_args.mda_unit = device_get_unit(dev); 1181 md_args.mda_si_drv1 = (void *)ctrlr; 1182 status = make_dev_s(&md_args, &ctrlr->cdev, "nvme%d", 1183 device_get_unit(dev)); 1184 if (status != 0) 1185 return (ENXIO); 1186 1187 return (0); 1188 } 1189 1190 void 1191 nvme_ctrlr_destruct(struct nvme_controller *ctrlr, device_t dev) 1192 { 1193 int gone, i; 1194 1195 if (ctrlr->resource == NULL) 1196 goto nores; 1197 1198 /* 1199 * Check whether it is a hot unplug or a clean driver detach. 1200 * If device is not there any more, skip any shutdown commands. 1201 */ 1202 gone = (nvme_mmio_read_4(ctrlr, csts) == 0xffffffff); 1203 if (gone) 1204 nvme_ctrlr_fail(ctrlr); 1205 else 1206 nvme_notify_fail_consumers(ctrlr); 1207 1208 for (i = 0; i < NVME_MAX_NAMESPACES; i++) 1209 nvme_ns_destruct(&ctrlr->ns[i]); 1210 1211 if (ctrlr->cdev) 1212 destroy_dev(ctrlr->cdev); 1213 1214 if (ctrlr->is_initialized) { 1215 if (!gone) 1216 nvme_ctrlr_delete_qpairs(ctrlr); 1217 for (i = 0; i < ctrlr->num_io_queues; i++) 1218 nvme_io_qpair_destroy(&ctrlr->ioq[i]); 1219 free(ctrlr->ioq, M_NVME); 1220 nvme_admin_qpair_destroy(&ctrlr->adminq); 1221 } 1222 1223 /* 1224 * Notify the controller of a shutdown, even though this is due to 1225 * a driver unload, not a system shutdown (this path is not invoked 1226 * during shutdown). This ensures the controller receives a 1227 * shutdown notification in case the system is shutdown before 1228 * reloading the driver. 1229 */ 1230 if (!gone) 1231 nvme_ctrlr_shutdown(ctrlr); 1232 1233 if (!gone) 1234 nvme_ctrlr_disable(ctrlr); 1235 1236 if (ctrlr->taskqueue) 1237 taskqueue_free(ctrlr->taskqueue); 1238 1239 if (ctrlr->tag) 1240 bus_teardown_intr(ctrlr->dev, ctrlr->res, ctrlr->tag); 1241 1242 if (ctrlr->res) 1243 bus_release_resource(ctrlr->dev, SYS_RES_IRQ, 1244 rman_get_rid(ctrlr->res), ctrlr->res); 1245 1246 if (ctrlr->bar4_resource != NULL) { 1247 bus_release_resource(dev, SYS_RES_MEMORY, 1248 ctrlr->bar4_resource_id, ctrlr->bar4_resource); 1249 } 1250 1251 bus_release_resource(dev, SYS_RES_MEMORY, 1252 ctrlr->resource_id, ctrlr->resource); 1253 1254 nores: 1255 mtx_destroy(&ctrlr->lock); 1256 } 1257 1258 void 1259 nvme_ctrlr_shutdown(struct nvme_controller *ctrlr) 1260 { 1261 uint32_t cc; 1262 uint32_t csts; 1263 int ticks = 0; 1264 1265 cc = nvme_mmio_read_4(ctrlr, cc); 1266 cc &= ~(NVME_CC_REG_SHN_MASK << NVME_CC_REG_SHN_SHIFT); 1267 cc |= NVME_SHN_NORMAL << NVME_CC_REG_SHN_SHIFT; 1268 nvme_mmio_write_4(ctrlr, cc, cc); 1269 1270 while (1) { 1271 csts = nvme_mmio_read_4(ctrlr, csts); 1272 if (csts == 0xffffffff) /* Hot unplug. */ 1273 break; 1274 if (NVME_CSTS_GET_SHST(csts) == NVME_SHST_COMPLETE) 1275 break; 1276 if (ticks++ > 5*hz) { 1277 nvme_printf(ctrlr, "did not complete shutdown within" 1278 " 5 seconds of notification\n"); 1279 break; 1280 } 1281 pause("nvme shn", 1); 1282 } 1283 } 1284 1285 void 1286 nvme_ctrlr_submit_admin_request(struct nvme_controller *ctrlr, 1287 struct nvme_request *req) 1288 { 1289 1290 nvme_qpair_submit_request(&ctrlr->adminq, req); 1291 } 1292 1293 void 1294 nvme_ctrlr_submit_io_request(struct nvme_controller *ctrlr, 1295 struct nvme_request *req) 1296 { 1297 struct nvme_qpair *qpair; 1298 1299 qpair = &ctrlr->ioq[curcpu / ctrlr->num_cpus_per_ioq]; 1300 nvme_qpair_submit_request(qpair, req); 1301 } 1302 1303 device_t 1304 nvme_ctrlr_get_device(struct nvme_controller *ctrlr) 1305 { 1306 1307 return (ctrlr->dev); 1308 } 1309 1310 const struct nvme_controller_data * 1311 nvme_ctrlr_get_data(struct nvme_controller *ctrlr) 1312 { 1313 1314 return (&ctrlr->cdata); 1315 } 1316 1317 int 1318 nvme_ctrlr_suspend(struct nvme_controller *ctrlr) 1319 { 1320 int to = hz; 1321 1322 /* 1323 * Can't touch failed controllers, so it's already suspended. 1324 */ 1325 if (ctrlr->is_failed) 1326 return (0); 1327 1328 /* 1329 * We don't want the reset taskqueue running, since it does similar 1330 * things, so prevent it from running after we start. Wait for any reset 1331 * that may have been started to complete. The reset process we follow 1332 * will ensure that any new I/O will queue and be given to the hardware 1333 * after we resume (though there should be none). 1334 */ 1335 while (atomic_cmpset_32(&ctrlr->is_resetting, 0, 1) == 0 && to-- > 0) 1336 pause("nvmesusp", 1); 1337 if (to <= 0) { 1338 nvme_printf(ctrlr, 1339 "Competing reset task didn't finish. Try again later.\n"); 1340 return (EWOULDBLOCK); 1341 } 1342 1343 /* 1344 * Per Section 7.6.2 of NVMe spec 1.4, to properly suspend, we need to 1345 * delete the hardware I/O queues, and then shutdown. This properly 1346 * flushes any metadata the drive may have stored so it can survive 1347 * having its power removed and prevents the unsafe shutdown count from 1348 * incriminating. Once we delete the qpairs, we have to disable them 1349 * before shutting down. The delay is out of paranoia in 1350 * nvme_ctrlr_hw_reset, and is repeated here (though we should have no 1351 * pending I/O that the delay copes with). 1352 */ 1353 nvme_ctrlr_delete_qpairs(ctrlr); 1354 nvme_ctrlr_disable_qpairs(ctrlr); 1355 DELAY(100*1000); 1356 nvme_ctrlr_shutdown(ctrlr); 1357 1358 return (0); 1359 } 1360 1361 int 1362 nvme_ctrlr_resume(struct nvme_controller *ctrlr) 1363 { 1364 1365 /* 1366 * Can't touch failed controllers, so nothing to do to resume. 1367 */ 1368 if (ctrlr->is_failed) 1369 return (0); 1370 1371 /* 1372 * Have to reset the hardware twice, just like we do on attach. See 1373 * nmve_attach() for why. 1374 */ 1375 if (nvme_ctrlr_hw_reset(ctrlr) != 0) 1376 goto fail; 1377 if (nvme_ctrlr_hw_reset(ctrlr) != 0) 1378 goto fail; 1379 1380 /* 1381 * Now that we're reset the hardware, we can restart the controller. Any 1382 * I/O that was pending is requeued. Any admin commands are aborted with 1383 * an error. Once we've restarted, take the controller out of reset. 1384 */ 1385 nvme_ctrlr_start(ctrlr, true); 1386 atomic_cmpset_32(&ctrlr->is_resetting, 1, 0); 1387 1388 return (0); 1389 fail: 1390 /* 1391 * Since we can't bring the controller out of reset, announce and fail 1392 * the controller. However, we have to return success for the resume 1393 * itself, due to questionable APIs. 1394 */ 1395 nvme_printf(ctrlr, "Failed to reset on resume, failing.\n"); 1396 nvme_ctrlr_fail(ctrlr); 1397 atomic_cmpset_32(&ctrlr->is_resetting, 1, 0); 1398 return (0); 1399 } 1400