1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (C) 2012-2016 Intel Corporation 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 29 #include <sys/cdefs.h> 30 __FBSDID("$FreeBSD$"); 31 32 #include "opt_cam.h" 33 34 #include <sys/param.h> 35 #include <sys/systm.h> 36 #include <sys/buf.h> 37 #include <sys/bus.h> 38 #include <sys/conf.h> 39 #include <sys/ioccom.h> 40 #include <sys/proc.h> 41 #include <sys/smp.h> 42 #include <sys/uio.h> 43 44 #include <dev/pci/pcireg.h> 45 #include <dev/pci/pcivar.h> 46 47 #include "nvme_private.h" 48 49 #define B4_CHK_RDY_DELAY_MS 2300 /* work arond controller bug */ 50 51 static void nvme_ctrlr_construct_and_submit_aer(struct nvme_controller *ctrlr, 52 struct nvme_async_event_request *aer); 53 static void nvme_ctrlr_setup_interrupts(struct nvme_controller *ctrlr); 54 55 static int 56 nvme_ctrlr_allocate_bar(struct nvme_controller *ctrlr) 57 { 58 59 ctrlr->resource_id = PCIR_BAR(0); 60 61 ctrlr->resource = bus_alloc_resource_any(ctrlr->dev, SYS_RES_MEMORY, 62 &ctrlr->resource_id, RF_ACTIVE); 63 64 if(ctrlr->resource == NULL) { 65 nvme_printf(ctrlr, "unable to allocate pci resource\n"); 66 return (ENOMEM); 67 } 68 69 ctrlr->bus_tag = rman_get_bustag(ctrlr->resource); 70 ctrlr->bus_handle = rman_get_bushandle(ctrlr->resource); 71 ctrlr->regs = (struct nvme_registers *)ctrlr->bus_handle; 72 73 /* 74 * The NVMe spec allows for the MSI-X table to be placed behind 75 * BAR 4/5, separate from the control/doorbell registers. Always 76 * try to map this bar, because it must be mapped prior to calling 77 * pci_alloc_msix(). If the table isn't behind BAR 4/5, 78 * bus_alloc_resource() will just return NULL which is OK. 79 */ 80 ctrlr->bar4_resource_id = PCIR_BAR(4); 81 ctrlr->bar4_resource = bus_alloc_resource_any(ctrlr->dev, SYS_RES_MEMORY, 82 &ctrlr->bar4_resource_id, RF_ACTIVE); 83 84 return (0); 85 } 86 87 static int 88 nvme_ctrlr_construct_admin_qpair(struct nvme_controller *ctrlr) 89 { 90 struct nvme_qpair *qpair; 91 uint32_t num_entries; 92 int error; 93 94 qpair = &ctrlr->adminq; 95 96 num_entries = NVME_ADMIN_ENTRIES; 97 TUNABLE_INT_FETCH("hw.nvme.admin_entries", &num_entries); 98 /* 99 * If admin_entries was overridden to an invalid value, revert it 100 * back to our default value. 101 */ 102 if (num_entries < NVME_MIN_ADMIN_ENTRIES || 103 num_entries > NVME_MAX_ADMIN_ENTRIES) { 104 nvme_printf(ctrlr, "invalid hw.nvme.admin_entries=%d " 105 "specified\n", num_entries); 106 num_entries = NVME_ADMIN_ENTRIES; 107 } 108 109 /* 110 * The admin queue's max xfer size is treated differently than the 111 * max I/O xfer size. 16KB is sufficient here - maybe even less? 112 */ 113 error = nvme_qpair_construct(qpair, 114 0, /* qpair ID */ 115 0, /* vector */ 116 num_entries, 117 NVME_ADMIN_TRACKERS, 118 ctrlr); 119 return (error); 120 } 121 122 static int 123 nvme_ctrlr_construct_io_qpairs(struct nvme_controller *ctrlr) 124 { 125 struct nvme_qpair *qpair; 126 union cap_lo_register cap_lo; 127 int i, error, num_entries, num_trackers; 128 129 num_entries = NVME_IO_ENTRIES; 130 TUNABLE_INT_FETCH("hw.nvme.io_entries", &num_entries); 131 132 /* 133 * NVMe spec sets a hard limit of 64K max entries, but 134 * devices may specify a smaller limit, so we need to check 135 * the MQES field in the capabilities register. 136 */ 137 cap_lo.raw = nvme_mmio_read_4(ctrlr, cap_lo); 138 num_entries = min(num_entries, cap_lo.bits.mqes+1); 139 140 num_trackers = NVME_IO_TRACKERS; 141 TUNABLE_INT_FETCH("hw.nvme.io_trackers", &num_trackers); 142 143 num_trackers = max(num_trackers, NVME_MIN_IO_TRACKERS); 144 num_trackers = min(num_trackers, NVME_MAX_IO_TRACKERS); 145 /* 146 * No need to have more trackers than entries in the submit queue. 147 * Note also that for a queue size of N, we can only have (N-1) 148 * commands outstanding, hence the "-1" here. 149 */ 150 num_trackers = min(num_trackers, (num_entries-1)); 151 152 /* 153 * Our best estimate for the maximum number of I/Os that we should 154 * noramlly have in flight at one time. This should be viewed as a hint, 155 * not a hard limit and will need to be revisitted when the upper layers 156 * of the storage system grows multi-queue support. 157 */ 158 ctrlr->max_hw_pend_io = num_trackers * ctrlr->num_io_queues * 3 / 4; 159 160 /* 161 * This was calculated previously when setting up interrupts, but 162 * a controller could theoretically support fewer I/O queues than 163 * MSI-X vectors. So calculate again here just to be safe. 164 */ 165 ctrlr->num_cpus_per_ioq = howmany(mp_ncpus, ctrlr->num_io_queues); 166 167 ctrlr->ioq = malloc(ctrlr->num_io_queues * sizeof(struct nvme_qpair), 168 M_NVME, M_ZERO | M_WAITOK); 169 170 for (i = 0; i < ctrlr->num_io_queues; i++) { 171 qpair = &ctrlr->ioq[i]; 172 173 /* 174 * Admin queue has ID=0. IO queues start at ID=1 - 175 * hence the 'i+1' here. 176 * 177 * For I/O queues, use the controller-wide max_xfer_size 178 * calculated in nvme_attach(). 179 */ 180 error = nvme_qpair_construct(qpair, 181 i+1, /* qpair ID */ 182 ctrlr->msix_enabled ? i+1 : 0, /* vector */ 183 num_entries, 184 num_trackers, 185 ctrlr); 186 if (error) 187 return (error); 188 189 /* 190 * Do not bother binding interrupts if we only have one I/O 191 * interrupt thread for this controller. 192 */ 193 if (ctrlr->num_io_queues > 1) 194 bus_bind_intr(ctrlr->dev, qpair->res, 195 i * ctrlr->num_cpus_per_ioq); 196 } 197 198 return (0); 199 } 200 201 static void 202 nvme_ctrlr_fail(struct nvme_controller *ctrlr) 203 { 204 int i; 205 206 ctrlr->is_failed = TRUE; 207 nvme_qpair_fail(&ctrlr->adminq); 208 if (ctrlr->ioq != NULL) { 209 for (i = 0; i < ctrlr->num_io_queues; i++) 210 nvme_qpair_fail(&ctrlr->ioq[i]); 211 } 212 nvme_notify_fail_consumers(ctrlr); 213 } 214 215 void 216 nvme_ctrlr_post_failed_request(struct nvme_controller *ctrlr, 217 struct nvme_request *req) 218 { 219 220 mtx_lock(&ctrlr->lock); 221 STAILQ_INSERT_TAIL(&ctrlr->fail_req, req, stailq); 222 mtx_unlock(&ctrlr->lock); 223 taskqueue_enqueue(ctrlr->taskqueue, &ctrlr->fail_req_task); 224 } 225 226 static void 227 nvme_ctrlr_fail_req_task(void *arg, int pending) 228 { 229 struct nvme_controller *ctrlr = arg; 230 struct nvme_request *req; 231 232 mtx_lock(&ctrlr->lock); 233 while (!STAILQ_EMPTY(&ctrlr->fail_req)) { 234 req = STAILQ_FIRST(&ctrlr->fail_req); 235 STAILQ_REMOVE_HEAD(&ctrlr->fail_req, stailq); 236 nvme_qpair_manual_complete_request(req->qpair, req, 237 NVME_SCT_GENERIC, NVME_SC_ABORTED_BY_REQUEST, TRUE); 238 } 239 mtx_unlock(&ctrlr->lock); 240 } 241 242 static int 243 nvme_ctrlr_wait_for_ready(struct nvme_controller *ctrlr, int desired_val) 244 { 245 int ms_waited; 246 union csts_register csts; 247 248 csts.raw = nvme_mmio_read_4(ctrlr, csts); 249 250 ms_waited = 0; 251 while (csts.bits.rdy != desired_val) { 252 if (ms_waited++ > ctrlr->ready_timeout_in_ms) { 253 nvme_printf(ctrlr, "controller ready did not become %d " 254 "within %d ms\n", desired_val, ctrlr->ready_timeout_in_ms); 255 return (ENXIO); 256 } 257 DELAY(1000); 258 csts.raw = nvme_mmio_read_4(ctrlr, csts); 259 } 260 261 return (0); 262 } 263 264 static int 265 nvme_ctrlr_disable(struct nvme_controller *ctrlr) 266 { 267 union cc_register cc; 268 union csts_register csts; 269 int err; 270 271 cc.raw = nvme_mmio_read_4(ctrlr, cc); 272 csts.raw = nvme_mmio_read_4(ctrlr, csts); 273 274 /* 275 * Per 3.1.5 in NVME 1.3 spec, transitioning CC.EN from 0 to 1 276 * when CSTS.RDY is 1 or transitioning CC.EN from 1 to 0 when 277 * CSTS.RDY is 0 "has undefined results" So make sure that CSTS.RDY 278 * isn't the desired value. Short circuit if we're already disabled. 279 */ 280 if (cc.bits.en == 1) { 281 if (csts.bits.rdy == 0) { 282 /* EN == 1, wait for RDY == 1 or fail */ 283 err = nvme_ctrlr_wait_for_ready(ctrlr, 1); 284 if (err != 0) 285 return (err); 286 } 287 } else { 288 /* EN == 0 already wait for RDY == 0 */ 289 if (csts.bits.rdy == 0) 290 return (0); 291 else 292 return (nvme_ctrlr_wait_for_ready(ctrlr, 0)); 293 } 294 295 cc.bits.en = 0; 296 nvme_mmio_write_4(ctrlr, cc, cc.raw); 297 /* 298 * Some drives have issues with accessing the mmio after we 299 * disable, so delay for a bit after we write the bit to 300 * cope with these issues. 301 */ 302 if (ctrlr->quirks & QUIRK_DELAY_B4_CHK_RDY) 303 pause("nvmeR", B4_CHK_RDY_DELAY_MS * hz / 1000); 304 return (nvme_ctrlr_wait_for_ready(ctrlr, 0)); 305 } 306 307 static int 308 nvme_ctrlr_enable(struct nvme_controller *ctrlr) 309 { 310 union cc_register cc; 311 union csts_register csts; 312 union aqa_register aqa; 313 int err; 314 315 cc.raw = nvme_mmio_read_4(ctrlr, cc); 316 csts.raw = nvme_mmio_read_4(ctrlr, csts); 317 318 /* 319 * See note in nvme_ctrlr_disable. Short circuit if we're already enabled. 320 */ 321 if (cc.bits.en == 1) { 322 if (csts.bits.rdy == 1) 323 return (0); 324 else 325 return (nvme_ctrlr_wait_for_ready(ctrlr, 1)); 326 } else { 327 /* EN == 0 already wait for RDY == 0 or fail */ 328 err = nvme_ctrlr_wait_for_ready(ctrlr, 0); 329 if (err != 0) 330 return (err); 331 } 332 333 nvme_mmio_write_8(ctrlr, asq, ctrlr->adminq.cmd_bus_addr); 334 DELAY(5000); 335 nvme_mmio_write_8(ctrlr, acq, ctrlr->adminq.cpl_bus_addr); 336 DELAY(5000); 337 338 aqa.raw = 0; 339 /* acqs and asqs are 0-based. */ 340 aqa.bits.acqs = ctrlr->adminq.num_entries-1; 341 aqa.bits.asqs = ctrlr->adminq.num_entries-1; 342 nvme_mmio_write_4(ctrlr, aqa, aqa.raw); 343 DELAY(5000); 344 345 cc.bits.en = 1; 346 cc.bits.css = 0; 347 cc.bits.ams = 0; 348 cc.bits.shn = 0; 349 cc.bits.iosqes = 6; /* SQ entry size == 64 == 2^6 */ 350 cc.bits.iocqes = 4; /* CQ entry size == 16 == 2^4 */ 351 352 /* This evaluates to 0, which is according to spec. */ 353 cc.bits.mps = (PAGE_SIZE >> 13); 354 355 nvme_mmio_write_4(ctrlr, cc, cc.raw); 356 357 return (nvme_ctrlr_wait_for_ready(ctrlr, 1)); 358 } 359 360 int 361 nvme_ctrlr_hw_reset(struct nvme_controller *ctrlr) 362 { 363 int i, err; 364 365 nvme_admin_qpair_disable(&ctrlr->adminq); 366 /* 367 * I/O queues are not allocated before the initial HW 368 * reset, so do not try to disable them. Use is_initialized 369 * to determine if this is the initial HW reset. 370 */ 371 if (ctrlr->is_initialized) { 372 for (i = 0; i < ctrlr->num_io_queues; i++) 373 nvme_io_qpair_disable(&ctrlr->ioq[i]); 374 } 375 376 DELAY(100*1000); 377 378 err = nvme_ctrlr_disable(ctrlr); 379 if (err != 0) 380 return err; 381 return (nvme_ctrlr_enable(ctrlr)); 382 } 383 384 void 385 nvme_ctrlr_reset(struct nvme_controller *ctrlr) 386 { 387 int cmpset; 388 389 cmpset = atomic_cmpset_32(&ctrlr->is_resetting, 0, 1); 390 391 if (cmpset == 0 || ctrlr->is_failed) 392 /* 393 * Controller is already resetting or has failed. Return 394 * immediately since there is no need to kick off another 395 * reset in these cases. 396 */ 397 return; 398 399 taskqueue_enqueue(ctrlr->taskqueue, &ctrlr->reset_task); 400 } 401 402 static int 403 nvme_ctrlr_identify(struct nvme_controller *ctrlr) 404 { 405 struct nvme_completion_poll_status status; 406 407 status.done = FALSE; 408 nvme_ctrlr_cmd_identify_controller(ctrlr, &ctrlr->cdata, 409 nvme_completion_poll_cb, &status); 410 while (status.done == FALSE) 411 pause("nvme", 1); 412 if (nvme_completion_is_error(&status.cpl)) { 413 nvme_printf(ctrlr, "nvme_identify_controller failed!\n"); 414 return (ENXIO); 415 } 416 417 /* 418 * Use MDTS to ensure our default max_xfer_size doesn't exceed what the 419 * controller supports. 420 */ 421 if (ctrlr->cdata.mdts > 0) 422 ctrlr->max_xfer_size = min(ctrlr->max_xfer_size, 423 ctrlr->min_page_size * (1 << (ctrlr->cdata.mdts))); 424 425 return (0); 426 } 427 428 static int 429 nvme_ctrlr_set_num_qpairs(struct nvme_controller *ctrlr) 430 { 431 struct nvme_completion_poll_status status; 432 int cq_allocated, sq_allocated; 433 434 status.done = FALSE; 435 nvme_ctrlr_cmd_set_num_queues(ctrlr, ctrlr->num_io_queues, 436 nvme_completion_poll_cb, &status); 437 while (status.done == FALSE) 438 pause("nvme", 1); 439 if (nvme_completion_is_error(&status.cpl)) { 440 nvme_printf(ctrlr, "nvme_ctrlr_set_num_qpairs failed!\n"); 441 return (ENXIO); 442 } 443 444 /* 445 * Data in cdw0 is 0-based. 446 * Lower 16-bits indicate number of submission queues allocated. 447 * Upper 16-bits indicate number of completion queues allocated. 448 */ 449 sq_allocated = (status.cpl.cdw0 & 0xFFFF) + 1; 450 cq_allocated = (status.cpl.cdw0 >> 16) + 1; 451 452 /* 453 * Controller may allocate more queues than we requested, 454 * so use the minimum of the number requested and what was 455 * actually allocated. 456 */ 457 ctrlr->num_io_queues = min(ctrlr->num_io_queues, sq_allocated); 458 ctrlr->num_io_queues = min(ctrlr->num_io_queues, cq_allocated); 459 460 return (0); 461 } 462 463 static int 464 nvme_ctrlr_create_qpairs(struct nvme_controller *ctrlr) 465 { 466 struct nvme_completion_poll_status status; 467 struct nvme_qpair *qpair; 468 int i; 469 470 for (i = 0; i < ctrlr->num_io_queues; i++) { 471 qpair = &ctrlr->ioq[i]; 472 473 status.done = FALSE; 474 nvme_ctrlr_cmd_create_io_cq(ctrlr, qpair, qpair->vector, 475 nvme_completion_poll_cb, &status); 476 while (status.done == FALSE) 477 pause("nvme", 1); 478 if (nvme_completion_is_error(&status.cpl)) { 479 nvme_printf(ctrlr, "nvme_create_io_cq failed!\n"); 480 return (ENXIO); 481 } 482 483 status.done = FALSE; 484 nvme_ctrlr_cmd_create_io_sq(qpair->ctrlr, qpair, 485 nvme_completion_poll_cb, &status); 486 while (status.done == FALSE) 487 pause("nvme", 1); 488 if (nvme_completion_is_error(&status.cpl)) { 489 nvme_printf(ctrlr, "nvme_create_io_sq failed!\n"); 490 return (ENXIO); 491 } 492 } 493 494 return (0); 495 } 496 497 static int 498 nvme_ctrlr_construct_namespaces(struct nvme_controller *ctrlr) 499 { 500 struct nvme_namespace *ns; 501 uint32_t i; 502 503 for (i = 0; i < min(ctrlr->cdata.nn, NVME_MAX_NAMESPACES); i++) { 504 ns = &ctrlr->ns[i]; 505 nvme_ns_construct(ns, i+1, ctrlr); 506 } 507 508 return (0); 509 } 510 511 static boolean_t 512 is_log_page_id_valid(uint8_t page_id) 513 { 514 515 switch (page_id) { 516 case NVME_LOG_ERROR: 517 case NVME_LOG_HEALTH_INFORMATION: 518 case NVME_LOG_FIRMWARE_SLOT: 519 return (TRUE); 520 } 521 522 return (FALSE); 523 } 524 525 static uint32_t 526 nvme_ctrlr_get_log_page_size(struct nvme_controller *ctrlr, uint8_t page_id) 527 { 528 uint32_t log_page_size; 529 530 switch (page_id) { 531 case NVME_LOG_ERROR: 532 log_page_size = min( 533 sizeof(struct nvme_error_information_entry) * 534 ctrlr->cdata.elpe, 535 NVME_MAX_AER_LOG_SIZE); 536 break; 537 case NVME_LOG_HEALTH_INFORMATION: 538 log_page_size = sizeof(struct nvme_health_information_page); 539 break; 540 case NVME_LOG_FIRMWARE_SLOT: 541 log_page_size = sizeof(struct nvme_firmware_page); 542 break; 543 default: 544 log_page_size = 0; 545 break; 546 } 547 548 return (log_page_size); 549 } 550 551 static void 552 nvme_ctrlr_log_critical_warnings(struct nvme_controller *ctrlr, 553 union nvme_critical_warning_state state) 554 { 555 556 if (state.bits.available_spare == 1) 557 nvme_printf(ctrlr, "available spare space below threshold\n"); 558 559 if (state.bits.temperature == 1) 560 nvme_printf(ctrlr, "temperature above threshold\n"); 561 562 if (state.bits.device_reliability == 1) 563 nvme_printf(ctrlr, "device reliability degraded\n"); 564 565 if (state.bits.read_only == 1) 566 nvme_printf(ctrlr, "media placed in read only mode\n"); 567 568 if (state.bits.volatile_memory_backup == 1) 569 nvme_printf(ctrlr, "volatile memory backup device failed\n"); 570 571 if (state.bits.reserved != 0) 572 nvme_printf(ctrlr, 573 "unknown critical warning(s): state = 0x%02x\n", state.raw); 574 } 575 576 static void 577 nvme_ctrlr_async_event_log_page_cb(void *arg, const struct nvme_completion *cpl) 578 { 579 struct nvme_async_event_request *aer = arg; 580 struct nvme_health_information_page *health_info; 581 582 /* 583 * If the log page fetch for some reason completed with an error, 584 * don't pass log page data to the consumers. In practice, this case 585 * should never happen. 586 */ 587 if (nvme_completion_is_error(cpl)) 588 nvme_notify_async_consumers(aer->ctrlr, &aer->cpl, 589 aer->log_page_id, NULL, 0); 590 else { 591 if (aer->log_page_id == NVME_LOG_HEALTH_INFORMATION) { 592 health_info = (struct nvme_health_information_page *) 593 aer->log_page_buffer; 594 nvme_ctrlr_log_critical_warnings(aer->ctrlr, 595 health_info->critical_warning); 596 /* 597 * Critical warnings reported through the 598 * SMART/health log page are persistent, so 599 * clear the associated bits in the async event 600 * config so that we do not receive repeated 601 * notifications for the same event. 602 */ 603 aer->ctrlr->async_event_config.raw &= 604 ~health_info->critical_warning.raw; 605 nvme_ctrlr_cmd_set_async_event_config(aer->ctrlr, 606 aer->ctrlr->async_event_config, NULL, NULL); 607 } 608 609 610 /* 611 * Pass the cpl data from the original async event completion, 612 * not the log page fetch. 613 */ 614 nvme_notify_async_consumers(aer->ctrlr, &aer->cpl, 615 aer->log_page_id, aer->log_page_buffer, aer->log_page_size); 616 } 617 618 /* 619 * Repost another asynchronous event request to replace the one 620 * that just completed. 621 */ 622 nvme_ctrlr_construct_and_submit_aer(aer->ctrlr, aer); 623 } 624 625 static void 626 nvme_ctrlr_async_event_cb(void *arg, const struct nvme_completion *cpl) 627 { 628 struct nvme_async_event_request *aer = arg; 629 630 if (nvme_completion_is_error(cpl)) { 631 /* 632 * Do not retry failed async event requests. This avoids 633 * infinite loops where a new async event request is submitted 634 * to replace the one just failed, only to fail again and 635 * perpetuate the loop. 636 */ 637 return; 638 } 639 640 /* Associated log page is in bits 23:16 of completion entry dw0. */ 641 aer->log_page_id = (cpl->cdw0 & 0xFF0000) >> 16; 642 643 nvme_printf(aer->ctrlr, "async event occurred (log page id=0x%x)\n", 644 aer->log_page_id); 645 646 if (is_log_page_id_valid(aer->log_page_id)) { 647 aer->log_page_size = nvme_ctrlr_get_log_page_size(aer->ctrlr, 648 aer->log_page_id); 649 memcpy(&aer->cpl, cpl, sizeof(*cpl)); 650 nvme_ctrlr_cmd_get_log_page(aer->ctrlr, aer->log_page_id, 651 NVME_GLOBAL_NAMESPACE_TAG, aer->log_page_buffer, 652 aer->log_page_size, nvme_ctrlr_async_event_log_page_cb, 653 aer); 654 /* Wait to notify consumers until after log page is fetched. */ 655 } else { 656 nvme_notify_async_consumers(aer->ctrlr, cpl, aer->log_page_id, 657 NULL, 0); 658 659 /* 660 * Repost another asynchronous event request to replace the one 661 * that just completed. 662 */ 663 nvme_ctrlr_construct_and_submit_aer(aer->ctrlr, aer); 664 } 665 } 666 667 static void 668 nvme_ctrlr_construct_and_submit_aer(struct nvme_controller *ctrlr, 669 struct nvme_async_event_request *aer) 670 { 671 struct nvme_request *req; 672 673 aer->ctrlr = ctrlr; 674 req = nvme_allocate_request_null(nvme_ctrlr_async_event_cb, aer); 675 aer->req = req; 676 677 /* 678 * Disable timeout here, since asynchronous event requests should by 679 * nature never be timed out. 680 */ 681 req->timeout = FALSE; 682 req->cmd.opc = NVME_OPC_ASYNC_EVENT_REQUEST; 683 nvme_ctrlr_submit_admin_request(ctrlr, req); 684 } 685 686 static void 687 nvme_ctrlr_configure_aer(struct nvme_controller *ctrlr) 688 { 689 struct nvme_completion_poll_status status; 690 struct nvme_async_event_request *aer; 691 uint32_t i; 692 693 ctrlr->async_event_config.raw = 0xFF; 694 ctrlr->async_event_config.bits.reserved = 0; 695 696 status.done = FALSE; 697 nvme_ctrlr_cmd_get_feature(ctrlr, NVME_FEAT_TEMPERATURE_THRESHOLD, 698 0, NULL, 0, nvme_completion_poll_cb, &status); 699 while (status.done == FALSE) 700 pause("nvme", 1); 701 if (nvme_completion_is_error(&status.cpl) || 702 (status.cpl.cdw0 & 0xFFFF) == 0xFFFF || 703 (status.cpl.cdw0 & 0xFFFF) == 0x0000) { 704 nvme_printf(ctrlr, "temperature threshold not supported\n"); 705 ctrlr->async_event_config.bits.temperature = 0; 706 } 707 708 nvme_ctrlr_cmd_set_async_event_config(ctrlr, 709 ctrlr->async_event_config, NULL, NULL); 710 711 /* aerl is a zero-based value, so we need to add 1 here. */ 712 ctrlr->num_aers = min(NVME_MAX_ASYNC_EVENTS, (ctrlr->cdata.aerl+1)); 713 714 for (i = 0; i < ctrlr->num_aers; i++) { 715 aer = &ctrlr->aer[i]; 716 nvme_ctrlr_construct_and_submit_aer(ctrlr, aer); 717 } 718 } 719 720 static void 721 nvme_ctrlr_configure_int_coalescing(struct nvme_controller *ctrlr) 722 { 723 724 ctrlr->int_coal_time = 0; 725 TUNABLE_INT_FETCH("hw.nvme.int_coal_time", 726 &ctrlr->int_coal_time); 727 728 ctrlr->int_coal_threshold = 0; 729 TUNABLE_INT_FETCH("hw.nvme.int_coal_threshold", 730 &ctrlr->int_coal_threshold); 731 732 nvme_ctrlr_cmd_set_interrupt_coalescing(ctrlr, ctrlr->int_coal_time, 733 ctrlr->int_coal_threshold, NULL, NULL); 734 } 735 736 static void 737 nvme_ctrlr_start(void *ctrlr_arg) 738 { 739 struct nvme_controller *ctrlr = ctrlr_arg; 740 uint32_t old_num_io_queues; 741 int i; 742 743 /* 744 * Only reset adminq here when we are restarting the 745 * controller after a reset. During initialization, 746 * we have already submitted admin commands to get 747 * the number of I/O queues supported, so cannot reset 748 * the adminq again here. 749 */ 750 if (ctrlr->is_resetting) { 751 nvme_qpair_reset(&ctrlr->adminq); 752 } 753 754 for (i = 0; i < ctrlr->num_io_queues; i++) 755 nvme_qpair_reset(&ctrlr->ioq[i]); 756 757 nvme_admin_qpair_enable(&ctrlr->adminq); 758 759 if (nvme_ctrlr_identify(ctrlr) != 0) { 760 nvme_ctrlr_fail(ctrlr); 761 return; 762 } 763 764 /* 765 * The number of qpairs are determined during controller initialization, 766 * including using NVMe SET_FEATURES/NUMBER_OF_QUEUES to determine the 767 * HW limit. We call SET_FEATURES again here so that it gets called 768 * after any reset for controllers that depend on the driver to 769 * explicit specify how many queues it will use. This value should 770 * never change between resets, so panic if somehow that does happen. 771 */ 772 if (ctrlr->is_resetting) { 773 old_num_io_queues = ctrlr->num_io_queues; 774 if (nvme_ctrlr_set_num_qpairs(ctrlr) != 0) { 775 nvme_ctrlr_fail(ctrlr); 776 return; 777 } 778 779 if (old_num_io_queues != ctrlr->num_io_queues) { 780 panic("num_io_queues changed from %u to %u", 781 old_num_io_queues, ctrlr->num_io_queues); 782 } 783 } 784 785 if (nvme_ctrlr_create_qpairs(ctrlr) != 0) { 786 nvme_ctrlr_fail(ctrlr); 787 return; 788 } 789 790 if (nvme_ctrlr_construct_namespaces(ctrlr) != 0) { 791 nvme_ctrlr_fail(ctrlr); 792 return; 793 } 794 795 nvme_ctrlr_configure_aer(ctrlr); 796 nvme_ctrlr_configure_int_coalescing(ctrlr); 797 798 for (i = 0; i < ctrlr->num_io_queues; i++) 799 nvme_io_qpair_enable(&ctrlr->ioq[i]); 800 } 801 802 void 803 nvme_ctrlr_start_config_hook(void *arg) 804 { 805 struct nvme_controller *ctrlr = arg; 806 807 nvme_qpair_reset(&ctrlr->adminq); 808 nvme_admin_qpair_enable(&ctrlr->adminq); 809 810 if (nvme_ctrlr_set_num_qpairs(ctrlr) == 0 && 811 nvme_ctrlr_construct_io_qpairs(ctrlr) == 0) 812 nvme_ctrlr_start(ctrlr); 813 else 814 nvme_ctrlr_fail(ctrlr); 815 816 nvme_sysctl_initialize_ctrlr(ctrlr); 817 config_intrhook_disestablish(&ctrlr->config_hook); 818 819 ctrlr->is_initialized = 1; 820 nvme_notify_new_controller(ctrlr); 821 } 822 823 static void 824 nvme_ctrlr_reset_task(void *arg, int pending) 825 { 826 struct nvme_controller *ctrlr = arg; 827 int status; 828 829 nvme_printf(ctrlr, "resetting controller\n"); 830 status = nvme_ctrlr_hw_reset(ctrlr); 831 /* 832 * Use pause instead of DELAY, so that we yield to any nvme interrupt 833 * handlers on this CPU that were blocked on a qpair lock. We want 834 * all nvme interrupts completed before proceeding with restarting the 835 * controller. 836 * 837 * XXX - any way to guarantee the interrupt handlers have quiesced? 838 */ 839 pause("nvmereset", hz / 10); 840 if (status == 0) 841 nvme_ctrlr_start(ctrlr); 842 else 843 nvme_ctrlr_fail(ctrlr); 844 845 atomic_cmpset_32(&ctrlr->is_resetting, 1, 0); 846 } 847 848 /* 849 * Poll all the queues enabled on the device for completion. 850 */ 851 void 852 nvme_ctrlr_poll(struct nvme_controller *ctrlr) 853 { 854 int i; 855 856 nvme_qpair_process_completions(&ctrlr->adminq); 857 858 for (i = 0; i < ctrlr->num_io_queues; i++) 859 if (ctrlr->ioq && ctrlr->ioq[i].cpl) 860 nvme_qpair_process_completions(&ctrlr->ioq[i]); 861 } 862 863 /* 864 * Poll the single-vector intertrupt case: num_io_queues will be 1 and 865 * there's only a single vector. While we're polling, we mask further 866 * interrupts in the controller. 867 */ 868 void 869 nvme_ctrlr_intx_handler(void *arg) 870 { 871 struct nvme_controller *ctrlr = arg; 872 873 nvme_mmio_write_4(ctrlr, intms, 1); 874 nvme_ctrlr_poll(ctrlr); 875 nvme_mmio_write_4(ctrlr, intmc, 1); 876 } 877 878 static int 879 nvme_ctrlr_configure_intx(struct nvme_controller *ctrlr) 880 { 881 882 ctrlr->msix_enabled = 0; 883 ctrlr->num_io_queues = 1; 884 ctrlr->num_cpus_per_ioq = mp_ncpus; 885 ctrlr->rid = 0; 886 ctrlr->res = bus_alloc_resource_any(ctrlr->dev, SYS_RES_IRQ, 887 &ctrlr->rid, RF_SHAREABLE | RF_ACTIVE); 888 889 if (ctrlr->res == NULL) { 890 nvme_printf(ctrlr, "unable to allocate shared IRQ\n"); 891 return (ENOMEM); 892 } 893 894 bus_setup_intr(ctrlr->dev, ctrlr->res, 895 INTR_TYPE_MISC | INTR_MPSAFE, NULL, nvme_ctrlr_intx_handler, 896 ctrlr, &ctrlr->tag); 897 898 if (ctrlr->tag == NULL) { 899 nvme_printf(ctrlr, "unable to setup intx handler\n"); 900 return (ENOMEM); 901 } 902 903 return (0); 904 } 905 906 static void 907 nvme_pt_done(void *arg, const struct nvme_completion *cpl) 908 { 909 struct nvme_pt_command *pt = arg; 910 911 bzero(&pt->cpl, sizeof(pt->cpl)); 912 pt->cpl.cdw0 = cpl->cdw0; 913 pt->cpl.status = cpl->status; 914 pt->cpl.status.p = 0; 915 916 mtx_lock(pt->driver_lock); 917 wakeup(pt); 918 mtx_unlock(pt->driver_lock); 919 } 920 921 int 922 nvme_ctrlr_passthrough_cmd(struct nvme_controller *ctrlr, 923 struct nvme_pt_command *pt, uint32_t nsid, int is_user_buffer, 924 int is_admin_cmd) 925 { 926 struct nvme_request *req; 927 struct mtx *mtx; 928 struct buf *buf = NULL; 929 int ret = 0; 930 vm_offset_t addr, end; 931 932 if (pt->len > 0) { 933 /* 934 * vmapbuf calls vm_fault_quick_hold_pages which only maps full 935 * pages. Ensure this request has fewer than MAXPHYS bytes when 936 * extended to full pages. 937 */ 938 addr = (vm_offset_t)pt->buf; 939 end = round_page(addr + pt->len); 940 addr = trunc_page(addr); 941 if (end - addr > MAXPHYS) 942 return EIO; 943 944 if (pt->len > ctrlr->max_xfer_size) { 945 nvme_printf(ctrlr, "pt->len (%d) " 946 "exceeds max_xfer_size (%d)\n", pt->len, 947 ctrlr->max_xfer_size); 948 return EIO; 949 } 950 if (is_user_buffer) { 951 /* 952 * Ensure the user buffer is wired for the duration of 953 * this passthrough command. 954 */ 955 PHOLD(curproc); 956 buf = getpbuf(NULL); 957 buf->b_data = pt->buf; 958 buf->b_bufsize = pt->len; 959 buf->b_iocmd = pt->is_read ? BIO_READ : BIO_WRITE; 960 #ifdef NVME_UNMAPPED_BIO_SUPPORT 961 if (vmapbuf(buf, 1) < 0) { 962 #else 963 if (vmapbuf(buf) < 0) { 964 #endif 965 ret = EFAULT; 966 goto err; 967 } 968 req = nvme_allocate_request_vaddr(buf->b_data, pt->len, 969 nvme_pt_done, pt); 970 } else 971 req = nvme_allocate_request_vaddr(pt->buf, pt->len, 972 nvme_pt_done, pt); 973 } else 974 req = nvme_allocate_request_null(nvme_pt_done, pt); 975 976 req->cmd.opc = pt->cmd.opc; 977 req->cmd.cdw10 = pt->cmd.cdw10; 978 req->cmd.cdw11 = pt->cmd.cdw11; 979 req->cmd.cdw12 = pt->cmd.cdw12; 980 req->cmd.cdw13 = pt->cmd.cdw13; 981 req->cmd.cdw14 = pt->cmd.cdw14; 982 req->cmd.cdw15 = pt->cmd.cdw15; 983 984 req->cmd.nsid = nsid; 985 986 if (is_admin_cmd) 987 mtx = &ctrlr->lock; 988 else 989 mtx = &ctrlr->ns[nsid-1].lock; 990 991 mtx_lock(mtx); 992 pt->driver_lock = mtx; 993 994 if (is_admin_cmd) 995 nvme_ctrlr_submit_admin_request(ctrlr, req); 996 else 997 nvme_ctrlr_submit_io_request(ctrlr, req); 998 999 mtx_sleep(pt, mtx, PRIBIO, "nvme_pt", 0); 1000 mtx_unlock(mtx); 1001 1002 pt->driver_lock = NULL; 1003 1004 err: 1005 if (buf != NULL) { 1006 relpbuf(buf, NULL); 1007 PRELE(curproc); 1008 } 1009 1010 return (ret); 1011 } 1012 1013 static int 1014 nvme_ctrlr_ioctl(struct cdev *cdev, u_long cmd, caddr_t arg, int flag, 1015 struct thread *td) 1016 { 1017 struct nvme_controller *ctrlr; 1018 struct nvme_pt_command *pt; 1019 1020 ctrlr = cdev->si_drv1; 1021 1022 switch (cmd) { 1023 case NVME_RESET_CONTROLLER: 1024 nvme_ctrlr_reset(ctrlr); 1025 break; 1026 case NVME_PASSTHROUGH_CMD: 1027 pt = (struct nvme_pt_command *)arg; 1028 return (nvme_ctrlr_passthrough_cmd(ctrlr, pt, pt->cmd.nsid, 1029 1 /* is_user_buffer */, 1 /* is_admin_cmd */)); 1030 default: 1031 return (ENOTTY); 1032 } 1033 1034 return (0); 1035 } 1036 1037 static struct cdevsw nvme_ctrlr_cdevsw = { 1038 .d_version = D_VERSION, 1039 .d_flags = 0, 1040 .d_ioctl = nvme_ctrlr_ioctl 1041 }; 1042 1043 static void 1044 nvme_ctrlr_setup_interrupts(struct nvme_controller *ctrlr) 1045 { 1046 device_t dev; 1047 int per_cpu_io_queues; 1048 int min_cpus_per_ioq; 1049 int num_vectors_requested, num_vectors_allocated; 1050 int num_vectors_available; 1051 1052 dev = ctrlr->dev; 1053 min_cpus_per_ioq = 1; 1054 TUNABLE_INT_FETCH("hw.nvme.min_cpus_per_ioq", &min_cpus_per_ioq); 1055 1056 if (min_cpus_per_ioq < 1) { 1057 min_cpus_per_ioq = 1; 1058 } else if (min_cpus_per_ioq > mp_ncpus) { 1059 min_cpus_per_ioq = mp_ncpus; 1060 } 1061 1062 per_cpu_io_queues = 1; 1063 TUNABLE_INT_FETCH("hw.nvme.per_cpu_io_queues", &per_cpu_io_queues); 1064 1065 if (per_cpu_io_queues == 0) { 1066 min_cpus_per_ioq = mp_ncpus; 1067 } 1068 1069 ctrlr->force_intx = 0; 1070 TUNABLE_INT_FETCH("hw.nvme.force_intx", &ctrlr->force_intx); 1071 1072 /* 1073 * FreeBSD currently cannot allocate more than about 190 vectors at 1074 * boot, meaning that systems with high core count and many devices 1075 * requesting per-CPU interrupt vectors will not get their full 1076 * allotment. So first, try to allocate as many as we may need to 1077 * understand what is available, then immediately release them. 1078 * Then figure out how many of those we will actually use, based on 1079 * assigning an equal number of cores to each I/O queue. 1080 */ 1081 1082 /* One vector for per core I/O queue, plus one vector for admin queue. */ 1083 num_vectors_available = min(pci_msix_count(dev), mp_ncpus + 1); 1084 if (pci_alloc_msix(dev, &num_vectors_available) != 0) { 1085 num_vectors_available = 0; 1086 } 1087 pci_release_msi(dev); 1088 1089 if (ctrlr->force_intx || num_vectors_available < 2) { 1090 nvme_ctrlr_configure_intx(ctrlr); 1091 return; 1092 } 1093 1094 /* 1095 * Do not use all vectors for I/O queues - one must be saved for the 1096 * admin queue. 1097 */ 1098 ctrlr->num_cpus_per_ioq = max(min_cpus_per_ioq, 1099 howmany(mp_ncpus, num_vectors_available - 1)); 1100 1101 ctrlr->num_io_queues = howmany(mp_ncpus, ctrlr->num_cpus_per_ioq); 1102 num_vectors_requested = ctrlr->num_io_queues + 1; 1103 num_vectors_allocated = num_vectors_requested; 1104 1105 /* 1106 * Now just allocate the number of vectors we need. This should 1107 * succeed, since we previously called pci_alloc_msix() 1108 * successfully returning at least this many vectors, but just to 1109 * be safe, if something goes wrong just revert to INTx. 1110 */ 1111 if (pci_alloc_msix(dev, &num_vectors_allocated) != 0) { 1112 nvme_ctrlr_configure_intx(ctrlr); 1113 return; 1114 } 1115 1116 if (num_vectors_allocated < num_vectors_requested) { 1117 pci_release_msi(dev); 1118 nvme_ctrlr_configure_intx(ctrlr); 1119 return; 1120 } 1121 1122 ctrlr->msix_enabled = 1; 1123 } 1124 1125 int 1126 nvme_ctrlr_construct(struct nvme_controller *ctrlr, device_t dev) 1127 { 1128 union cap_lo_register cap_lo; 1129 union cap_hi_register cap_hi; 1130 int status, timeout_period; 1131 1132 ctrlr->dev = dev; 1133 1134 mtx_init(&ctrlr->lock, "nvme ctrlr lock", NULL, MTX_DEF); 1135 1136 status = nvme_ctrlr_allocate_bar(ctrlr); 1137 1138 if (status != 0) 1139 return (status); 1140 1141 /* 1142 * Software emulators may set the doorbell stride to something 1143 * other than zero, but this driver is not set up to handle that. 1144 */ 1145 cap_hi.raw = nvme_mmio_read_4(ctrlr, cap_hi); 1146 if (cap_hi.bits.dstrd != 0) 1147 return (ENXIO); 1148 1149 ctrlr->min_page_size = 1 << (12 + cap_hi.bits.mpsmin); 1150 1151 /* Get ready timeout value from controller, in units of 500ms. */ 1152 cap_lo.raw = nvme_mmio_read_4(ctrlr, cap_lo); 1153 ctrlr->ready_timeout_in_ms = cap_lo.bits.to * 500; 1154 1155 timeout_period = NVME_DEFAULT_TIMEOUT_PERIOD; 1156 TUNABLE_INT_FETCH("hw.nvme.timeout_period", &timeout_period); 1157 timeout_period = min(timeout_period, NVME_MAX_TIMEOUT_PERIOD); 1158 timeout_period = max(timeout_period, NVME_MIN_TIMEOUT_PERIOD); 1159 ctrlr->timeout_period = timeout_period; 1160 1161 nvme_retry_count = NVME_DEFAULT_RETRY_COUNT; 1162 TUNABLE_INT_FETCH("hw.nvme.retry_count", &nvme_retry_count); 1163 1164 ctrlr->enable_aborts = 0; 1165 TUNABLE_INT_FETCH("hw.nvme.enable_aborts", &ctrlr->enable_aborts); 1166 1167 nvme_ctrlr_setup_interrupts(ctrlr); 1168 1169 ctrlr->max_xfer_size = NVME_MAX_XFER_SIZE; 1170 if (nvme_ctrlr_construct_admin_qpair(ctrlr) != 0) 1171 return (ENXIO); 1172 1173 ctrlr->cdev = make_dev(&nvme_ctrlr_cdevsw, device_get_unit(dev), 1174 UID_ROOT, GID_WHEEL, 0600, "nvme%d", device_get_unit(dev)); 1175 1176 if (ctrlr->cdev == NULL) 1177 return (ENXIO); 1178 1179 ctrlr->cdev->si_drv1 = (void *)ctrlr; 1180 1181 ctrlr->taskqueue = taskqueue_create("nvme_taskq", M_WAITOK, 1182 taskqueue_thread_enqueue, &ctrlr->taskqueue); 1183 taskqueue_start_threads(&ctrlr->taskqueue, 1, PI_DISK, "nvme taskq"); 1184 1185 ctrlr->is_resetting = 0; 1186 ctrlr->is_initialized = 0; 1187 ctrlr->notification_sent = 0; 1188 TASK_INIT(&ctrlr->reset_task, 0, nvme_ctrlr_reset_task, ctrlr); 1189 1190 TASK_INIT(&ctrlr->fail_req_task, 0, nvme_ctrlr_fail_req_task, ctrlr); 1191 STAILQ_INIT(&ctrlr->fail_req); 1192 ctrlr->is_failed = FALSE; 1193 1194 return (0); 1195 } 1196 1197 void 1198 nvme_ctrlr_destruct(struct nvme_controller *ctrlr, device_t dev) 1199 { 1200 int i; 1201 1202 /* 1203 * Notify the controller of a shutdown, even though this is due to 1204 * a driver unload, not a system shutdown (this path is not invoked 1205 * during shutdown). This ensures the controller receives a 1206 * shutdown notification in case the system is shutdown before 1207 * reloading the driver. 1208 */ 1209 nvme_ctrlr_shutdown(ctrlr); 1210 1211 nvme_ctrlr_disable(ctrlr); 1212 taskqueue_free(ctrlr->taskqueue); 1213 1214 for (i = 0; i < NVME_MAX_NAMESPACES; i++) 1215 nvme_ns_destruct(&ctrlr->ns[i]); 1216 1217 if (ctrlr->cdev) 1218 destroy_dev(ctrlr->cdev); 1219 1220 for (i = 0; i < ctrlr->num_io_queues; i++) { 1221 nvme_io_qpair_destroy(&ctrlr->ioq[i]); 1222 } 1223 1224 free(ctrlr->ioq, M_NVME); 1225 1226 nvme_admin_qpair_destroy(&ctrlr->adminq); 1227 1228 if (ctrlr->resource != NULL) { 1229 bus_release_resource(dev, SYS_RES_MEMORY, 1230 ctrlr->resource_id, ctrlr->resource); 1231 } 1232 1233 if (ctrlr->bar4_resource != NULL) { 1234 bus_release_resource(dev, SYS_RES_MEMORY, 1235 ctrlr->bar4_resource_id, ctrlr->bar4_resource); 1236 } 1237 1238 if (ctrlr->tag) 1239 bus_teardown_intr(ctrlr->dev, ctrlr->res, ctrlr->tag); 1240 1241 if (ctrlr->res) 1242 bus_release_resource(ctrlr->dev, SYS_RES_IRQ, 1243 rman_get_rid(ctrlr->res), ctrlr->res); 1244 1245 if (ctrlr->msix_enabled) 1246 pci_release_msi(dev); 1247 } 1248 1249 void 1250 nvme_ctrlr_shutdown(struct nvme_controller *ctrlr) 1251 { 1252 union cc_register cc; 1253 union csts_register csts; 1254 int ticks = 0; 1255 1256 cc.raw = nvme_mmio_read_4(ctrlr, cc); 1257 cc.bits.shn = NVME_SHN_NORMAL; 1258 nvme_mmio_write_4(ctrlr, cc, cc.raw); 1259 csts.raw = nvme_mmio_read_4(ctrlr, csts); 1260 while ((csts.bits.shst != NVME_SHST_COMPLETE) && (ticks++ < 5*hz)) { 1261 pause("nvme shn", 1); 1262 csts.raw = nvme_mmio_read_4(ctrlr, csts); 1263 } 1264 if (csts.bits.shst != NVME_SHST_COMPLETE) 1265 nvme_printf(ctrlr, "did not complete shutdown within 5 seconds " 1266 "of notification\n"); 1267 } 1268 1269 void 1270 nvme_ctrlr_submit_admin_request(struct nvme_controller *ctrlr, 1271 struct nvme_request *req) 1272 { 1273 1274 nvme_qpair_submit_request(&ctrlr->adminq, req); 1275 } 1276 1277 void 1278 nvme_ctrlr_submit_io_request(struct nvme_controller *ctrlr, 1279 struct nvme_request *req) 1280 { 1281 struct nvme_qpair *qpair; 1282 1283 qpair = &ctrlr->ioq[curcpu / ctrlr->num_cpus_per_ioq]; 1284 nvme_qpair_submit_request(qpair, req); 1285 } 1286 1287 device_t 1288 nvme_ctrlr_get_device(struct nvme_controller *ctrlr) 1289 { 1290 1291 return (ctrlr->dev); 1292 } 1293 1294 const struct nvme_controller_data * 1295 nvme_ctrlr_get_data(struct nvme_controller *ctrlr) 1296 { 1297 1298 return (&ctrlr->cdata); 1299 } 1300