1 /*- 2 * Copyright (C) 2012-2016 Intel Corporation 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 #include <sys/param.h> 31 #include <sys/systm.h> 32 #include <sys/buf.h> 33 #include <sys/bus.h> 34 #include <sys/conf.h> 35 #include <sys/ioccom.h> 36 #include <sys/proc.h> 37 #include <sys/smp.h> 38 #include <sys/uio.h> 39 40 #include <dev/pci/pcireg.h> 41 #include <dev/pci/pcivar.h> 42 43 #include "nvme_private.h" 44 45 static void nvme_ctrlr_construct_and_submit_aer(struct nvme_controller *ctrlr, 46 struct nvme_async_event_request *aer); 47 static void nvme_ctrlr_setup_interrupts(struct nvme_controller *ctrlr); 48 49 static int 50 nvme_ctrlr_allocate_bar(struct nvme_controller *ctrlr) 51 { 52 53 ctrlr->resource_id = PCIR_BAR(0); 54 55 ctrlr->resource = bus_alloc_resource(ctrlr->dev, SYS_RES_MEMORY, 56 &ctrlr->resource_id, 0, ~0, 1, RF_ACTIVE); 57 58 if(ctrlr->resource == NULL) { 59 nvme_printf(ctrlr, "unable to allocate pci resource\n"); 60 return (ENOMEM); 61 } 62 63 ctrlr->bus_tag = rman_get_bustag(ctrlr->resource); 64 ctrlr->bus_handle = rman_get_bushandle(ctrlr->resource); 65 ctrlr->regs = (struct nvme_registers *)ctrlr->bus_handle; 66 67 /* 68 * The NVMe spec allows for the MSI-X table to be placed behind 69 * BAR 4/5, separate from the control/doorbell registers. Always 70 * try to map this bar, because it must be mapped prior to calling 71 * pci_alloc_msix(). If the table isn't behind BAR 4/5, 72 * bus_alloc_resource() will just return NULL which is OK. 73 */ 74 ctrlr->bar4_resource_id = PCIR_BAR(4); 75 ctrlr->bar4_resource = bus_alloc_resource(ctrlr->dev, SYS_RES_MEMORY, 76 &ctrlr->bar4_resource_id, 0, ~0, 1, RF_ACTIVE); 77 78 return (0); 79 } 80 81 static void 82 nvme_ctrlr_construct_admin_qpair(struct nvme_controller *ctrlr) 83 { 84 struct nvme_qpair *qpair; 85 uint32_t num_entries; 86 87 qpair = &ctrlr->adminq; 88 89 num_entries = NVME_ADMIN_ENTRIES; 90 TUNABLE_INT_FETCH("hw.nvme.admin_entries", &num_entries); 91 /* 92 * If admin_entries was overridden to an invalid value, revert it 93 * back to our default value. 94 */ 95 if (num_entries < NVME_MIN_ADMIN_ENTRIES || 96 num_entries > NVME_MAX_ADMIN_ENTRIES) { 97 nvme_printf(ctrlr, "invalid hw.nvme.admin_entries=%d " 98 "specified\n", num_entries); 99 num_entries = NVME_ADMIN_ENTRIES; 100 } 101 102 /* 103 * The admin queue's max xfer size is treated differently than the 104 * max I/O xfer size. 16KB is sufficient here - maybe even less? 105 */ 106 nvme_qpair_construct(qpair, 107 0, /* qpair ID */ 108 0, /* vector */ 109 num_entries, 110 NVME_ADMIN_TRACKERS, 111 ctrlr); 112 } 113 114 static int 115 nvme_ctrlr_construct_io_qpairs(struct nvme_controller *ctrlr) 116 { 117 struct nvme_qpair *qpair; 118 union cap_lo_register cap_lo; 119 int i, num_entries, num_trackers; 120 121 num_entries = NVME_IO_ENTRIES; 122 TUNABLE_INT_FETCH("hw.nvme.io_entries", &num_entries); 123 124 /* 125 * NVMe spec sets a hard limit of 64K max entries, but 126 * devices may specify a smaller limit, so we need to check 127 * the MQES field in the capabilities register. 128 */ 129 cap_lo.raw = nvme_mmio_read_4(ctrlr, cap_lo); 130 num_entries = min(num_entries, cap_lo.bits.mqes+1); 131 132 num_trackers = NVME_IO_TRACKERS; 133 TUNABLE_INT_FETCH("hw.nvme.io_trackers", &num_trackers); 134 135 num_trackers = max(num_trackers, NVME_MIN_IO_TRACKERS); 136 num_trackers = min(num_trackers, NVME_MAX_IO_TRACKERS); 137 /* 138 * No need to have more trackers than entries in the submit queue. 139 * Note also that for a queue size of N, we can only have (N-1) 140 * commands outstanding, hence the "-1" here. 141 */ 142 num_trackers = min(num_trackers, (num_entries-1)); 143 144 /* 145 * This was calculated previously when setting up interrupts, but 146 * a controller could theoretically support fewer I/O queues than 147 * MSI-X vectors. So calculate again here just to be safe. 148 */ 149 ctrlr->num_cpus_per_ioq = howmany(mp_ncpus, ctrlr->num_io_queues); 150 151 ctrlr->ioq = malloc(ctrlr->num_io_queues * sizeof(struct nvme_qpair), 152 M_NVME, M_ZERO | M_WAITOK); 153 154 for (i = 0; i < ctrlr->num_io_queues; i++) { 155 qpair = &ctrlr->ioq[i]; 156 157 /* 158 * Admin queue has ID=0. IO queues start at ID=1 - 159 * hence the 'i+1' here. 160 * 161 * For I/O queues, use the controller-wide max_xfer_size 162 * calculated in nvme_attach(). 163 */ 164 nvme_qpair_construct(qpair, 165 i+1, /* qpair ID */ 166 ctrlr->msix_enabled ? i+1 : 0, /* vector */ 167 num_entries, 168 num_trackers, 169 ctrlr); 170 171 /* 172 * Do not bother binding interrupts if we only have one I/O 173 * interrupt thread for this controller. 174 */ 175 if (ctrlr->num_io_queues > 1) 176 bus_bind_intr(ctrlr->dev, qpair->res, 177 i * ctrlr->num_cpus_per_ioq); 178 } 179 180 return (0); 181 } 182 183 static void 184 nvme_ctrlr_fail(struct nvme_controller *ctrlr) 185 { 186 int i; 187 188 ctrlr->is_failed = TRUE; 189 nvme_qpair_fail(&ctrlr->adminq); 190 for (i = 0; i < ctrlr->num_io_queues; i++) 191 nvme_qpair_fail(&ctrlr->ioq[i]); 192 nvme_notify_fail_consumers(ctrlr); 193 } 194 195 void 196 nvme_ctrlr_post_failed_request(struct nvme_controller *ctrlr, 197 struct nvme_request *req) 198 { 199 200 mtx_lock(&ctrlr->lock); 201 STAILQ_INSERT_TAIL(&ctrlr->fail_req, req, stailq); 202 mtx_unlock(&ctrlr->lock); 203 taskqueue_enqueue(ctrlr->taskqueue, &ctrlr->fail_req_task); 204 } 205 206 static void 207 nvme_ctrlr_fail_req_task(void *arg, int pending) 208 { 209 struct nvme_controller *ctrlr = arg; 210 struct nvme_request *req; 211 212 mtx_lock(&ctrlr->lock); 213 while (!STAILQ_EMPTY(&ctrlr->fail_req)) { 214 req = STAILQ_FIRST(&ctrlr->fail_req); 215 STAILQ_REMOVE_HEAD(&ctrlr->fail_req, stailq); 216 nvme_qpair_manual_complete_request(req->qpair, req, 217 NVME_SCT_GENERIC, NVME_SC_ABORTED_BY_REQUEST, TRUE); 218 } 219 mtx_unlock(&ctrlr->lock); 220 } 221 222 static int 223 nvme_ctrlr_wait_for_ready(struct nvme_controller *ctrlr, int desired_val) 224 { 225 int ms_waited; 226 union cc_register cc; 227 union csts_register csts; 228 229 cc.raw = nvme_mmio_read_4(ctrlr, cc); 230 csts.raw = nvme_mmio_read_4(ctrlr, csts); 231 232 if (cc.bits.en != desired_val) { 233 nvme_printf(ctrlr, "%s called with desired_val = %d " 234 "but cc.en = %d\n", __func__, desired_val, cc.bits.en); 235 return (ENXIO); 236 } 237 238 ms_waited = 0; 239 240 while (csts.bits.rdy != desired_val) { 241 DELAY(1000); 242 if (ms_waited++ > ctrlr->ready_timeout_in_ms) { 243 nvme_printf(ctrlr, "controller ready did not become %d " 244 "within %d ms\n", desired_val, ctrlr->ready_timeout_in_ms); 245 return (ENXIO); 246 } 247 csts.raw = nvme_mmio_read_4(ctrlr, csts); 248 } 249 250 return (0); 251 } 252 253 static void 254 nvme_ctrlr_disable(struct nvme_controller *ctrlr) 255 { 256 union cc_register cc; 257 union csts_register csts; 258 259 cc.raw = nvme_mmio_read_4(ctrlr, cc); 260 csts.raw = nvme_mmio_read_4(ctrlr, csts); 261 262 if (cc.bits.en == 1 && csts.bits.rdy == 0) 263 nvme_ctrlr_wait_for_ready(ctrlr, 1); 264 265 cc.bits.en = 0; 266 nvme_mmio_write_4(ctrlr, cc, cc.raw); 267 DELAY(5000); 268 nvme_ctrlr_wait_for_ready(ctrlr, 0); 269 } 270 271 static int 272 nvme_ctrlr_enable(struct nvme_controller *ctrlr) 273 { 274 union cc_register cc; 275 union csts_register csts; 276 union aqa_register aqa; 277 278 cc.raw = nvme_mmio_read_4(ctrlr, cc); 279 csts.raw = nvme_mmio_read_4(ctrlr, csts); 280 281 if (cc.bits.en == 1) { 282 if (csts.bits.rdy == 1) 283 return (0); 284 else 285 return (nvme_ctrlr_wait_for_ready(ctrlr, 1)); 286 } 287 288 nvme_mmio_write_8(ctrlr, asq, ctrlr->adminq.cmd_bus_addr); 289 DELAY(5000); 290 nvme_mmio_write_8(ctrlr, acq, ctrlr->adminq.cpl_bus_addr); 291 DELAY(5000); 292 293 aqa.raw = 0; 294 /* acqs and asqs are 0-based. */ 295 aqa.bits.acqs = ctrlr->adminq.num_entries-1; 296 aqa.bits.asqs = ctrlr->adminq.num_entries-1; 297 nvme_mmio_write_4(ctrlr, aqa, aqa.raw); 298 DELAY(5000); 299 300 cc.bits.en = 1; 301 cc.bits.css = 0; 302 cc.bits.ams = 0; 303 cc.bits.shn = 0; 304 cc.bits.iosqes = 6; /* SQ entry size == 64 == 2^6 */ 305 cc.bits.iocqes = 4; /* CQ entry size == 16 == 2^4 */ 306 307 /* This evaluates to 0, which is according to spec. */ 308 cc.bits.mps = (PAGE_SIZE >> 13); 309 310 nvme_mmio_write_4(ctrlr, cc, cc.raw); 311 DELAY(5000); 312 313 return (nvme_ctrlr_wait_for_ready(ctrlr, 1)); 314 } 315 316 int 317 nvme_ctrlr_hw_reset(struct nvme_controller *ctrlr) 318 { 319 int i; 320 321 nvme_admin_qpair_disable(&ctrlr->adminq); 322 /* 323 * I/O queues are not allocated before the initial HW 324 * reset, so do not try to disable them. Use is_initialized 325 * to determine if this is the initial HW reset. 326 */ 327 if (ctrlr->is_initialized) { 328 for (i = 0; i < ctrlr->num_io_queues; i++) 329 nvme_io_qpair_disable(&ctrlr->ioq[i]); 330 } 331 332 DELAY(100*1000); 333 334 nvme_ctrlr_disable(ctrlr); 335 return (nvme_ctrlr_enable(ctrlr)); 336 } 337 338 void 339 nvme_ctrlr_reset(struct nvme_controller *ctrlr) 340 { 341 int cmpset; 342 343 cmpset = atomic_cmpset_32(&ctrlr->is_resetting, 0, 1); 344 345 if (cmpset == 0 || ctrlr->is_failed) 346 /* 347 * Controller is already resetting or has failed. Return 348 * immediately since there is no need to kick off another 349 * reset in these cases. 350 */ 351 return; 352 353 taskqueue_enqueue(ctrlr->taskqueue, &ctrlr->reset_task); 354 } 355 356 static int 357 nvme_ctrlr_identify(struct nvme_controller *ctrlr) 358 { 359 struct nvme_completion_poll_status status; 360 361 status.done = FALSE; 362 nvme_ctrlr_cmd_identify_controller(ctrlr, &ctrlr->cdata, 363 nvme_completion_poll_cb, &status); 364 while (status.done == FALSE) 365 pause("nvme", 1); 366 if (nvme_completion_is_error(&status.cpl)) { 367 nvme_printf(ctrlr, "nvme_identify_controller failed!\n"); 368 return (ENXIO); 369 } 370 371 /* 372 * Use MDTS to ensure our default max_xfer_size doesn't exceed what the 373 * controller supports. 374 */ 375 if (ctrlr->cdata.mdts > 0) 376 ctrlr->max_xfer_size = min(ctrlr->max_xfer_size, 377 ctrlr->min_page_size * (1 << (ctrlr->cdata.mdts))); 378 379 return (0); 380 } 381 382 static int 383 nvme_ctrlr_set_num_qpairs(struct nvme_controller *ctrlr) 384 { 385 struct nvme_completion_poll_status status; 386 int cq_allocated, sq_allocated; 387 388 status.done = FALSE; 389 nvme_ctrlr_cmd_set_num_queues(ctrlr, ctrlr->num_io_queues, 390 nvme_completion_poll_cb, &status); 391 while (status.done == FALSE) 392 pause("nvme", 1); 393 if (nvme_completion_is_error(&status.cpl)) { 394 nvme_printf(ctrlr, "nvme_set_num_queues failed!\n"); 395 return (ENXIO); 396 } 397 398 /* 399 * Data in cdw0 is 0-based. 400 * Lower 16-bits indicate number of submission queues allocated. 401 * Upper 16-bits indicate number of completion queues allocated. 402 */ 403 sq_allocated = (status.cpl.cdw0 & 0xFFFF) + 1; 404 cq_allocated = (status.cpl.cdw0 >> 16) + 1; 405 406 /* 407 * Controller may allocate more queues than we requested, 408 * so use the minimum of the number requested and what was 409 * actually allocated. 410 */ 411 ctrlr->num_io_queues = min(ctrlr->num_io_queues, sq_allocated); 412 ctrlr->num_io_queues = min(ctrlr->num_io_queues, cq_allocated); 413 414 return (0); 415 } 416 417 static int 418 nvme_ctrlr_create_qpairs(struct nvme_controller *ctrlr) 419 { 420 struct nvme_completion_poll_status status; 421 struct nvme_qpair *qpair; 422 int i; 423 424 for (i = 0; i < ctrlr->num_io_queues; i++) { 425 qpair = &ctrlr->ioq[i]; 426 427 status.done = FALSE; 428 nvme_ctrlr_cmd_create_io_cq(ctrlr, qpair, qpair->vector, 429 nvme_completion_poll_cb, &status); 430 while (status.done == FALSE) 431 pause("nvme", 1); 432 if (nvme_completion_is_error(&status.cpl)) { 433 nvme_printf(ctrlr, "nvme_create_io_cq failed!\n"); 434 return (ENXIO); 435 } 436 437 status.done = FALSE; 438 nvme_ctrlr_cmd_create_io_sq(qpair->ctrlr, qpair, 439 nvme_completion_poll_cb, &status); 440 while (status.done == FALSE) 441 pause("nvme", 1); 442 if (nvme_completion_is_error(&status.cpl)) { 443 nvme_printf(ctrlr, "nvme_create_io_sq failed!\n"); 444 return (ENXIO); 445 } 446 } 447 448 return (0); 449 } 450 451 static int 452 nvme_ctrlr_construct_namespaces(struct nvme_controller *ctrlr) 453 { 454 struct nvme_namespace *ns; 455 int i, status; 456 457 for (i = 0; i < ctrlr->cdata.nn; i++) { 458 ns = &ctrlr->ns[i]; 459 status = nvme_ns_construct(ns, i+1, ctrlr); 460 if (status != 0) 461 return (status); 462 } 463 464 return (0); 465 } 466 467 static boolean_t 468 is_log_page_id_valid(uint8_t page_id) 469 { 470 471 switch (page_id) { 472 case NVME_LOG_ERROR: 473 case NVME_LOG_HEALTH_INFORMATION: 474 case NVME_LOG_FIRMWARE_SLOT: 475 return (TRUE); 476 } 477 478 return (FALSE); 479 } 480 481 static uint32_t 482 nvme_ctrlr_get_log_page_size(struct nvme_controller *ctrlr, uint8_t page_id) 483 { 484 uint32_t log_page_size; 485 486 switch (page_id) { 487 case NVME_LOG_ERROR: 488 log_page_size = min( 489 sizeof(struct nvme_error_information_entry) * 490 ctrlr->cdata.elpe, 491 NVME_MAX_AER_LOG_SIZE); 492 break; 493 case NVME_LOG_HEALTH_INFORMATION: 494 log_page_size = sizeof(struct nvme_health_information_page); 495 break; 496 case NVME_LOG_FIRMWARE_SLOT: 497 log_page_size = sizeof(struct nvme_firmware_page); 498 break; 499 default: 500 log_page_size = 0; 501 break; 502 } 503 504 return (log_page_size); 505 } 506 507 static void 508 nvme_ctrlr_log_critical_warnings(struct nvme_controller *ctrlr, 509 union nvme_critical_warning_state state) 510 { 511 512 if (state.bits.available_spare == 1) 513 nvme_printf(ctrlr, "available spare space below threshold\n"); 514 515 if (state.bits.temperature == 1) 516 nvme_printf(ctrlr, "temperature above threshold\n"); 517 518 if (state.bits.device_reliability == 1) 519 nvme_printf(ctrlr, "device reliability degraded\n"); 520 521 if (state.bits.read_only == 1) 522 nvme_printf(ctrlr, "media placed in read only mode\n"); 523 524 if (state.bits.volatile_memory_backup == 1) 525 nvme_printf(ctrlr, "volatile memory backup device failed\n"); 526 527 if (state.bits.reserved != 0) 528 nvme_printf(ctrlr, 529 "unknown critical warning(s): state = 0x%02x\n", state.raw); 530 } 531 532 static void 533 nvme_ctrlr_async_event_log_page_cb(void *arg, const struct nvme_completion *cpl) 534 { 535 struct nvme_async_event_request *aer = arg; 536 struct nvme_health_information_page *health_info; 537 538 /* 539 * If the log page fetch for some reason completed with an error, 540 * don't pass log page data to the consumers. In practice, this case 541 * should never happen. 542 */ 543 if (nvme_completion_is_error(cpl)) 544 nvme_notify_async_consumers(aer->ctrlr, &aer->cpl, 545 aer->log_page_id, NULL, 0); 546 else { 547 if (aer->log_page_id == NVME_LOG_HEALTH_INFORMATION) { 548 health_info = (struct nvme_health_information_page *) 549 aer->log_page_buffer; 550 nvme_ctrlr_log_critical_warnings(aer->ctrlr, 551 health_info->critical_warning); 552 /* 553 * Critical warnings reported through the 554 * SMART/health log page are persistent, so 555 * clear the associated bits in the async event 556 * config so that we do not receive repeated 557 * notifications for the same event. 558 */ 559 aer->ctrlr->async_event_config.raw &= 560 ~health_info->critical_warning.raw; 561 nvme_ctrlr_cmd_set_async_event_config(aer->ctrlr, 562 aer->ctrlr->async_event_config, NULL, NULL); 563 } 564 565 566 /* 567 * Pass the cpl data from the original async event completion, 568 * not the log page fetch. 569 */ 570 nvme_notify_async_consumers(aer->ctrlr, &aer->cpl, 571 aer->log_page_id, aer->log_page_buffer, aer->log_page_size); 572 } 573 574 /* 575 * Repost another asynchronous event request to replace the one 576 * that just completed. 577 */ 578 nvme_ctrlr_construct_and_submit_aer(aer->ctrlr, aer); 579 } 580 581 static void 582 nvme_ctrlr_async_event_cb(void *arg, const struct nvme_completion *cpl) 583 { 584 struct nvme_async_event_request *aer = arg; 585 586 if (nvme_completion_is_error(cpl)) { 587 /* 588 * Do not retry failed async event requests. This avoids 589 * infinite loops where a new async event request is submitted 590 * to replace the one just failed, only to fail again and 591 * perpetuate the loop. 592 */ 593 return; 594 } 595 596 /* Associated log page is in bits 23:16 of completion entry dw0. */ 597 aer->log_page_id = (cpl->cdw0 & 0xFF0000) >> 16; 598 599 nvme_printf(aer->ctrlr, "async event occurred (log page id=0x%x)\n", 600 aer->log_page_id); 601 602 if (is_log_page_id_valid(aer->log_page_id)) { 603 aer->log_page_size = nvme_ctrlr_get_log_page_size(aer->ctrlr, 604 aer->log_page_id); 605 memcpy(&aer->cpl, cpl, sizeof(*cpl)); 606 nvme_ctrlr_cmd_get_log_page(aer->ctrlr, aer->log_page_id, 607 NVME_GLOBAL_NAMESPACE_TAG, aer->log_page_buffer, 608 aer->log_page_size, nvme_ctrlr_async_event_log_page_cb, 609 aer); 610 /* Wait to notify consumers until after log page is fetched. */ 611 } else { 612 nvme_notify_async_consumers(aer->ctrlr, cpl, aer->log_page_id, 613 NULL, 0); 614 615 /* 616 * Repost another asynchronous event request to replace the one 617 * that just completed. 618 */ 619 nvme_ctrlr_construct_and_submit_aer(aer->ctrlr, aer); 620 } 621 } 622 623 static void 624 nvme_ctrlr_construct_and_submit_aer(struct nvme_controller *ctrlr, 625 struct nvme_async_event_request *aer) 626 { 627 struct nvme_request *req; 628 629 aer->ctrlr = ctrlr; 630 req = nvme_allocate_request_null(nvme_ctrlr_async_event_cb, aer); 631 aer->req = req; 632 633 /* 634 * Disable timeout here, since asynchronous event requests should by 635 * nature never be timed out. 636 */ 637 req->timeout = FALSE; 638 req->cmd.opc = NVME_OPC_ASYNC_EVENT_REQUEST; 639 nvme_ctrlr_submit_admin_request(ctrlr, req); 640 } 641 642 static void 643 nvme_ctrlr_configure_aer(struct nvme_controller *ctrlr) 644 { 645 struct nvme_completion_poll_status status; 646 struct nvme_async_event_request *aer; 647 uint32_t i; 648 649 ctrlr->async_event_config.raw = 0xFF; 650 ctrlr->async_event_config.bits.reserved = 0; 651 652 status.done = FALSE; 653 nvme_ctrlr_cmd_get_feature(ctrlr, NVME_FEAT_TEMPERATURE_THRESHOLD, 654 0, NULL, 0, nvme_completion_poll_cb, &status); 655 while (status.done == FALSE) 656 pause("nvme", 1); 657 if (nvme_completion_is_error(&status.cpl) || 658 (status.cpl.cdw0 & 0xFFFF) == 0xFFFF || 659 (status.cpl.cdw0 & 0xFFFF) == 0x0000) { 660 nvme_printf(ctrlr, "temperature threshold not supported\n"); 661 ctrlr->async_event_config.bits.temperature = 0; 662 } 663 664 nvme_ctrlr_cmd_set_async_event_config(ctrlr, 665 ctrlr->async_event_config, NULL, NULL); 666 667 /* aerl is a zero-based value, so we need to add 1 here. */ 668 ctrlr->num_aers = min(NVME_MAX_ASYNC_EVENTS, (ctrlr->cdata.aerl+1)); 669 670 for (i = 0; i < ctrlr->num_aers; i++) { 671 aer = &ctrlr->aer[i]; 672 nvme_ctrlr_construct_and_submit_aer(ctrlr, aer); 673 } 674 } 675 676 static void 677 nvme_ctrlr_configure_int_coalescing(struct nvme_controller *ctrlr) 678 { 679 680 ctrlr->int_coal_time = 0; 681 TUNABLE_INT_FETCH("hw.nvme.int_coal_time", 682 &ctrlr->int_coal_time); 683 684 ctrlr->int_coal_threshold = 0; 685 TUNABLE_INT_FETCH("hw.nvme.int_coal_threshold", 686 &ctrlr->int_coal_threshold); 687 688 nvme_ctrlr_cmd_set_interrupt_coalescing(ctrlr, ctrlr->int_coal_time, 689 ctrlr->int_coal_threshold, NULL, NULL); 690 } 691 692 static void 693 nvme_ctrlr_start(void *ctrlr_arg) 694 { 695 struct nvme_controller *ctrlr = ctrlr_arg; 696 uint32_t old_num_io_queues; 697 int i; 698 699 /* 700 * Only reset adminq here when we are restarting the 701 * controller after a reset. During initialization, 702 * we have already submitted admin commands to get 703 * the number of I/O queues supported, so cannot reset 704 * the adminq again here. 705 */ 706 if (ctrlr->is_resetting) { 707 nvme_qpair_reset(&ctrlr->adminq); 708 } 709 710 for (i = 0; i < ctrlr->num_io_queues; i++) 711 nvme_qpair_reset(&ctrlr->ioq[i]); 712 713 nvme_admin_qpair_enable(&ctrlr->adminq); 714 715 if (nvme_ctrlr_identify(ctrlr) != 0) { 716 nvme_ctrlr_fail(ctrlr); 717 return; 718 } 719 720 /* 721 * The number of qpairs are determined during controller initialization, 722 * including using NVMe SET_FEATURES/NUMBER_OF_QUEUES to determine the 723 * HW limit. We call SET_FEATURES again here so that it gets called 724 * after any reset for controllers that depend on the driver to 725 * explicit specify how many queues it will use. This value should 726 * never change between resets, so panic if somehow that does happen. 727 */ 728 old_num_io_queues = ctrlr->num_io_queues; 729 if (nvme_ctrlr_set_num_qpairs(ctrlr) != 0) { 730 nvme_ctrlr_fail(ctrlr); 731 return; 732 } 733 734 if (old_num_io_queues != ctrlr->num_io_queues) { 735 panic("num_io_queues changed from %u to %u", old_num_io_queues, 736 ctrlr->num_io_queues); 737 } 738 739 if (nvme_ctrlr_create_qpairs(ctrlr) != 0) { 740 nvme_ctrlr_fail(ctrlr); 741 return; 742 } 743 744 if (nvme_ctrlr_construct_namespaces(ctrlr) != 0) { 745 nvme_ctrlr_fail(ctrlr); 746 return; 747 } 748 749 nvme_ctrlr_configure_aer(ctrlr); 750 nvme_ctrlr_configure_int_coalescing(ctrlr); 751 752 for (i = 0; i < ctrlr->num_io_queues; i++) 753 nvme_io_qpair_enable(&ctrlr->ioq[i]); 754 } 755 756 void 757 nvme_ctrlr_start_config_hook(void *arg) 758 { 759 struct nvme_controller *ctrlr = arg; 760 761 nvme_qpair_reset(&ctrlr->adminq); 762 nvme_admin_qpair_enable(&ctrlr->adminq); 763 764 if (nvme_ctrlr_set_num_qpairs(ctrlr) == 0 && 765 nvme_ctrlr_construct_io_qpairs(ctrlr) == 0) 766 nvme_ctrlr_start(ctrlr); 767 else 768 nvme_ctrlr_fail(ctrlr); 769 770 nvme_sysctl_initialize_ctrlr(ctrlr); 771 config_intrhook_disestablish(&ctrlr->config_hook); 772 773 ctrlr->is_initialized = 1; 774 nvme_notify_new_controller(ctrlr); 775 } 776 777 static void 778 nvme_ctrlr_reset_task(void *arg, int pending) 779 { 780 struct nvme_controller *ctrlr = arg; 781 int status; 782 783 nvme_printf(ctrlr, "resetting controller\n"); 784 status = nvme_ctrlr_hw_reset(ctrlr); 785 /* 786 * Use pause instead of DELAY, so that we yield to any nvme interrupt 787 * handlers on this CPU that were blocked on a qpair lock. We want 788 * all nvme interrupts completed before proceeding with restarting the 789 * controller. 790 * 791 * XXX - any way to guarantee the interrupt handlers have quiesced? 792 */ 793 pause("nvmereset", hz / 10); 794 if (status == 0) 795 nvme_ctrlr_start(ctrlr); 796 else 797 nvme_ctrlr_fail(ctrlr); 798 799 atomic_cmpset_32(&ctrlr->is_resetting, 1, 0); 800 } 801 802 static void 803 nvme_ctrlr_intx_handler(void *arg) 804 { 805 struct nvme_controller *ctrlr = arg; 806 807 nvme_mmio_write_4(ctrlr, intms, 1); 808 809 nvme_qpair_process_completions(&ctrlr->adminq); 810 811 if (ctrlr->ioq[0].cpl) 812 nvme_qpair_process_completions(&ctrlr->ioq[0]); 813 814 nvme_mmio_write_4(ctrlr, intmc, 1); 815 } 816 817 static int 818 nvme_ctrlr_configure_intx(struct nvme_controller *ctrlr) 819 { 820 821 ctrlr->msix_enabled = 0; 822 ctrlr->num_io_queues = 1; 823 ctrlr->num_cpus_per_ioq = mp_ncpus; 824 ctrlr->rid = 0; 825 ctrlr->res = bus_alloc_resource_any(ctrlr->dev, SYS_RES_IRQ, 826 &ctrlr->rid, RF_SHAREABLE | RF_ACTIVE); 827 828 if (ctrlr->res == NULL) { 829 nvme_printf(ctrlr, "unable to allocate shared IRQ\n"); 830 return (ENOMEM); 831 } 832 833 bus_setup_intr(ctrlr->dev, ctrlr->res, 834 INTR_TYPE_MISC | INTR_MPSAFE, NULL, nvme_ctrlr_intx_handler, 835 ctrlr, &ctrlr->tag); 836 837 if (ctrlr->tag == NULL) { 838 nvme_printf(ctrlr, "unable to setup intx handler\n"); 839 return (ENOMEM); 840 } 841 842 return (0); 843 } 844 845 static void 846 nvme_pt_done(void *arg, const struct nvme_completion *cpl) 847 { 848 struct nvme_pt_command *pt = arg; 849 850 bzero(&pt->cpl, sizeof(pt->cpl)); 851 pt->cpl.cdw0 = cpl->cdw0; 852 pt->cpl.status = cpl->status; 853 pt->cpl.status.p = 0; 854 855 mtx_lock(pt->driver_lock); 856 wakeup(pt); 857 mtx_unlock(pt->driver_lock); 858 } 859 860 int 861 nvme_ctrlr_passthrough_cmd(struct nvme_controller *ctrlr, 862 struct nvme_pt_command *pt, uint32_t nsid, int is_user_buffer, 863 int is_admin_cmd) 864 { 865 struct nvme_request *req; 866 struct mtx *mtx; 867 struct buf *buf = NULL; 868 int ret = 0; 869 870 if (pt->len > 0) { 871 if (pt->len > ctrlr->max_xfer_size) { 872 nvme_printf(ctrlr, "pt->len (%d) " 873 "exceeds max_xfer_size (%d)\n", pt->len, 874 ctrlr->max_xfer_size); 875 return EIO; 876 } 877 if (is_user_buffer) { 878 /* 879 * Ensure the user buffer is wired for the duration of 880 * this passthrough command. 881 */ 882 PHOLD(curproc); 883 buf = getpbuf(NULL); 884 buf->b_data = pt->buf; 885 buf->b_bufsize = pt->len; 886 buf->b_iocmd = pt->is_read ? BIO_READ : BIO_WRITE; 887 #ifdef NVME_UNMAPPED_BIO_SUPPORT 888 if (vmapbuf(buf, 1) < 0) { 889 #else 890 if (vmapbuf(buf) < 0) { 891 #endif 892 ret = EFAULT; 893 goto err; 894 } 895 req = nvme_allocate_request_vaddr(buf->b_data, pt->len, 896 nvme_pt_done, pt); 897 } else 898 req = nvme_allocate_request_vaddr(pt->buf, pt->len, 899 nvme_pt_done, pt); 900 } else 901 req = nvme_allocate_request_null(nvme_pt_done, pt); 902 903 req->cmd.opc = pt->cmd.opc; 904 req->cmd.cdw10 = pt->cmd.cdw10; 905 req->cmd.cdw11 = pt->cmd.cdw11; 906 req->cmd.cdw12 = pt->cmd.cdw12; 907 req->cmd.cdw13 = pt->cmd.cdw13; 908 req->cmd.cdw14 = pt->cmd.cdw14; 909 req->cmd.cdw15 = pt->cmd.cdw15; 910 911 req->cmd.nsid = nsid; 912 913 if (is_admin_cmd) 914 mtx = &ctrlr->lock; 915 else 916 mtx = &ctrlr->ns[nsid-1].lock; 917 918 mtx_lock(mtx); 919 pt->driver_lock = mtx; 920 921 if (is_admin_cmd) 922 nvme_ctrlr_submit_admin_request(ctrlr, req); 923 else 924 nvme_ctrlr_submit_io_request(ctrlr, req); 925 926 mtx_sleep(pt, mtx, PRIBIO, "nvme_pt", 0); 927 mtx_unlock(mtx); 928 929 pt->driver_lock = NULL; 930 931 err: 932 if (buf != NULL) { 933 relpbuf(buf, NULL); 934 PRELE(curproc); 935 } 936 937 return (ret); 938 } 939 940 static int 941 nvme_ctrlr_ioctl(struct cdev *cdev, u_long cmd, caddr_t arg, int flag, 942 struct thread *td) 943 { 944 struct nvme_controller *ctrlr; 945 struct nvme_pt_command *pt; 946 947 ctrlr = cdev->si_drv1; 948 949 switch (cmd) { 950 case NVME_RESET_CONTROLLER: 951 nvme_ctrlr_reset(ctrlr); 952 break; 953 case NVME_PASSTHROUGH_CMD: 954 pt = (struct nvme_pt_command *)arg; 955 return (nvme_ctrlr_passthrough_cmd(ctrlr, pt, pt->cmd.nsid, 956 1 /* is_user_buffer */, 1 /* is_admin_cmd */)); 957 default: 958 return (ENOTTY); 959 } 960 961 return (0); 962 } 963 964 static struct cdevsw nvme_ctrlr_cdevsw = { 965 .d_version = D_VERSION, 966 .d_flags = 0, 967 .d_ioctl = nvme_ctrlr_ioctl 968 }; 969 970 static void 971 nvme_ctrlr_setup_interrupts(struct nvme_controller *ctrlr) 972 { 973 device_t dev; 974 int per_cpu_io_queues; 975 int min_cpus_per_ioq; 976 int num_vectors_requested, num_vectors_allocated; 977 int num_vectors_available; 978 979 dev = ctrlr->dev; 980 min_cpus_per_ioq = 1; 981 TUNABLE_INT_FETCH("hw.nvme.min_cpus_per_ioq", &min_cpus_per_ioq); 982 983 if (min_cpus_per_ioq < 1) { 984 min_cpus_per_ioq = 1; 985 } else if (min_cpus_per_ioq > mp_ncpus) { 986 min_cpus_per_ioq = mp_ncpus; 987 } 988 989 per_cpu_io_queues = 1; 990 TUNABLE_INT_FETCH("hw.nvme.per_cpu_io_queues", &per_cpu_io_queues); 991 992 if (per_cpu_io_queues == 0) { 993 min_cpus_per_ioq = mp_ncpus; 994 } 995 996 ctrlr->force_intx = 0; 997 TUNABLE_INT_FETCH("hw.nvme.force_intx", &ctrlr->force_intx); 998 999 /* 1000 * FreeBSD currently cannot allocate more than about 190 vectors at 1001 * boot, meaning that systems with high core count and many devices 1002 * requesting per-CPU interrupt vectors will not get their full 1003 * allotment. So first, try to allocate as many as we may need to 1004 * understand what is available, then immediately release them. 1005 * Then figure out how many of those we will actually use, based on 1006 * assigning an equal number of cores to each I/O queue. 1007 */ 1008 1009 /* One vector for per core I/O queue, plus one vector for admin queue. */ 1010 num_vectors_available = min(pci_msix_count(dev), mp_ncpus + 1); 1011 if (pci_alloc_msix(dev, &num_vectors_available) != 0) { 1012 num_vectors_available = 0; 1013 } 1014 pci_release_msi(dev); 1015 1016 if (ctrlr->force_intx || num_vectors_available < 2) { 1017 nvme_ctrlr_configure_intx(ctrlr); 1018 return; 1019 } 1020 1021 /* 1022 * Do not use all vectors for I/O queues - one must be saved for the 1023 * admin queue. 1024 */ 1025 ctrlr->num_cpus_per_ioq = max(min_cpus_per_ioq, 1026 howmany(mp_ncpus, num_vectors_available - 1)); 1027 1028 ctrlr->num_io_queues = howmany(mp_ncpus, ctrlr->num_cpus_per_ioq); 1029 num_vectors_requested = ctrlr->num_io_queues + 1; 1030 num_vectors_allocated = num_vectors_requested; 1031 1032 /* 1033 * Now just allocate the number of vectors we need. This should 1034 * succeed, since we previously called pci_alloc_msix() 1035 * successfully returning at least this many vectors, but just to 1036 * be safe, if something goes wrong just revert to INTx. 1037 */ 1038 if (pci_alloc_msix(dev, &num_vectors_allocated) != 0) { 1039 nvme_ctrlr_configure_intx(ctrlr); 1040 return; 1041 } 1042 1043 if (num_vectors_allocated < num_vectors_requested) { 1044 pci_release_msi(dev); 1045 nvme_ctrlr_configure_intx(ctrlr); 1046 return; 1047 } 1048 1049 ctrlr->msix_enabled = 1; 1050 } 1051 1052 int 1053 nvme_ctrlr_construct(struct nvme_controller *ctrlr, device_t dev) 1054 { 1055 union cap_lo_register cap_lo; 1056 union cap_hi_register cap_hi; 1057 int status, timeout_period; 1058 1059 ctrlr->dev = dev; 1060 1061 mtx_init(&ctrlr->lock, "nvme ctrlr lock", NULL, MTX_DEF); 1062 1063 status = nvme_ctrlr_allocate_bar(ctrlr); 1064 1065 if (status != 0) 1066 return (status); 1067 1068 /* 1069 * Software emulators may set the doorbell stride to something 1070 * other than zero, but this driver is not set up to handle that. 1071 */ 1072 cap_hi.raw = nvme_mmio_read_4(ctrlr, cap_hi); 1073 if (cap_hi.bits.dstrd != 0) 1074 return (ENXIO); 1075 1076 ctrlr->min_page_size = 1 << (12 + cap_hi.bits.mpsmin); 1077 1078 /* Get ready timeout value from controller, in units of 500ms. */ 1079 cap_lo.raw = nvme_mmio_read_4(ctrlr, cap_lo); 1080 ctrlr->ready_timeout_in_ms = cap_lo.bits.to * 500; 1081 1082 timeout_period = NVME_DEFAULT_TIMEOUT_PERIOD; 1083 TUNABLE_INT_FETCH("hw.nvme.timeout_period", &timeout_period); 1084 timeout_period = min(timeout_period, NVME_MAX_TIMEOUT_PERIOD); 1085 timeout_period = max(timeout_period, NVME_MIN_TIMEOUT_PERIOD); 1086 ctrlr->timeout_period = timeout_period; 1087 1088 nvme_retry_count = NVME_DEFAULT_RETRY_COUNT; 1089 TUNABLE_INT_FETCH("hw.nvme.retry_count", &nvme_retry_count); 1090 1091 ctrlr->enable_aborts = 0; 1092 TUNABLE_INT_FETCH("hw.nvme.enable_aborts", &ctrlr->enable_aborts); 1093 1094 nvme_ctrlr_setup_interrupts(ctrlr); 1095 1096 ctrlr->max_xfer_size = NVME_MAX_XFER_SIZE; 1097 nvme_ctrlr_construct_admin_qpair(ctrlr); 1098 1099 ctrlr->cdev = make_dev(&nvme_ctrlr_cdevsw, device_get_unit(dev), 1100 UID_ROOT, GID_WHEEL, 0600, "nvme%d", device_get_unit(dev)); 1101 1102 if (ctrlr->cdev == NULL) 1103 return (ENXIO); 1104 1105 ctrlr->cdev->si_drv1 = (void *)ctrlr; 1106 1107 ctrlr->taskqueue = taskqueue_create("nvme_taskq", M_WAITOK, 1108 taskqueue_thread_enqueue, &ctrlr->taskqueue); 1109 taskqueue_start_threads(&ctrlr->taskqueue, 1, PI_DISK, "nvme taskq"); 1110 1111 ctrlr->is_resetting = 0; 1112 ctrlr->is_initialized = 0; 1113 ctrlr->notification_sent = 0; 1114 TASK_INIT(&ctrlr->reset_task, 0, nvme_ctrlr_reset_task, ctrlr); 1115 1116 TASK_INIT(&ctrlr->fail_req_task, 0, nvme_ctrlr_fail_req_task, ctrlr); 1117 STAILQ_INIT(&ctrlr->fail_req); 1118 ctrlr->is_failed = FALSE; 1119 1120 return (0); 1121 } 1122 1123 void 1124 nvme_ctrlr_destruct(struct nvme_controller *ctrlr, device_t dev) 1125 { 1126 int i; 1127 1128 /* 1129 * Notify the controller of a shutdown, even though this is due to 1130 * a driver unload, not a system shutdown (this path is not invoked 1131 * during shutdown). This ensures the controller receives a 1132 * shutdown notification in case the system is shutdown before 1133 * reloading the driver. 1134 */ 1135 nvme_ctrlr_shutdown(ctrlr); 1136 1137 nvme_ctrlr_disable(ctrlr); 1138 taskqueue_free(ctrlr->taskqueue); 1139 1140 for (i = 0; i < NVME_MAX_NAMESPACES; i++) 1141 nvme_ns_destruct(&ctrlr->ns[i]); 1142 1143 if (ctrlr->cdev) 1144 destroy_dev(ctrlr->cdev); 1145 1146 for (i = 0; i < ctrlr->num_io_queues; i++) { 1147 nvme_io_qpair_destroy(&ctrlr->ioq[i]); 1148 } 1149 1150 free(ctrlr->ioq, M_NVME); 1151 1152 nvme_admin_qpair_destroy(&ctrlr->adminq); 1153 1154 if (ctrlr->resource != NULL) { 1155 bus_release_resource(dev, SYS_RES_MEMORY, 1156 ctrlr->resource_id, ctrlr->resource); 1157 } 1158 1159 if (ctrlr->bar4_resource != NULL) { 1160 bus_release_resource(dev, SYS_RES_MEMORY, 1161 ctrlr->bar4_resource_id, ctrlr->bar4_resource); 1162 } 1163 1164 if (ctrlr->tag) 1165 bus_teardown_intr(ctrlr->dev, ctrlr->res, ctrlr->tag); 1166 1167 if (ctrlr->res) 1168 bus_release_resource(ctrlr->dev, SYS_RES_IRQ, 1169 rman_get_rid(ctrlr->res), ctrlr->res); 1170 1171 if (ctrlr->msix_enabled) 1172 pci_release_msi(dev); 1173 } 1174 1175 void 1176 nvme_ctrlr_shutdown(struct nvme_controller *ctrlr) 1177 { 1178 union cc_register cc; 1179 union csts_register csts; 1180 int ticks = 0; 1181 1182 cc.raw = nvme_mmio_read_4(ctrlr, cc); 1183 cc.bits.shn = NVME_SHN_NORMAL; 1184 nvme_mmio_write_4(ctrlr, cc, cc.raw); 1185 csts.raw = nvme_mmio_read_4(ctrlr, csts); 1186 while ((csts.bits.shst != NVME_SHST_COMPLETE) && (ticks++ < 5*hz)) { 1187 pause("nvme shn", 1); 1188 csts.raw = nvme_mmio_read_4(ctrlr, csts); 1189 } 1190 if (csts.bits.shst != NVME_SHST_COMPLETE) 1191 nvme_printf(ctrlr, "did not complete shutdown within 5 seconds " 1192 "of notification\n"); 1193 } 1194 1195 void 1196 nvme_ctrlr_submit_admin_request(struct nvme_controller *ctrlr, 1197 struct nvme_request *req) 1198 { 1199 1200 nvme_qpair_submit_request(&ctrlr->adminq, req); 1201 } 1202 1203 void 1204 nvme_ctrlr_submit_io_request(struct nvme_controller *ctrlr, 1205 struct nvme_request *req) 1206 { 1207 struct nvme_qpair *qpair; 1208 1209 qpair = &ctrlr->ioq[curcpu / ctrlr->num_cpus_per_ioq]; 1210 nvme_qpair_submit_request(qpair, req); 1211 } 1212 1213 device_t 1214 nvme_ctrlr_get_device(struct nvme_controller *ctrlr) 1215 { 1216 1217 return (ctrlr->dev); 1218 } 1219 1220 const struct nvme_controller_data * 1221 nvme_ctrlr_get_data(struct nvme_controller *ctrlr) 1222 { 1223 1224 return (&ctrlr->cdata); 1225 } 1226