1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (C) 2012-2014 Intel Corporation 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 29 #include <sys/cdefs.h> 30 __FBSDID("$FreeBSD$"); 31 32 #include <sys/param.h> 33 #include <sys/bus.h> 34 35 #include <dev/pci/pcivar.h> 36 37 #include "nvme_private.h" 38 39 static void _nvme_qpair_submit_request(struct nvme_qpair *qpair, 40 struct nvme_request *req); 41 static void nvme_qpair_destroy(struct nvme_qpair *qpair); 42 43 struct nvme_opcode_string { 44 45 uint16_t opc; 46 const char * str; 47 }; 48 49 static struct nvme_opcode_string admin_opcode[] = { 50 { NVME_OPC_DELETE_IO_SQ, "DELETE IO SQ" }, 51 { NVME_OPC_CREATE_IO_SQ, "CREATE IO SQ" }, 52 { NVME_OPC_GET_LOG_PAGE, "GET LOG PAGE" }, 53 { NVME_OPC_DELETE_IO_CQ, "DELETE IO CQ" }, 54 { NVME_OPC_CREATE_IO_CQ, "CREATE IO CQ" }, 55 { NVME_OPC_IDENTIFY, "IDENTIFY" }, 56 { NVME_OPC_ABORT, "ABORT" }, 57 { NVME_OPC_SET_FEATURES, "SET FEATURES" }, 58 { NVME_OPC_GET_FEATURES, "GET FEATURES" }, 59 { NVME_OPC_ASYNC_EVENT_REQUEST, "ASYNC EVENT REQUEST" }, 60 { NVME_OPC_FIRMWARE_ACTIVATE, "FIRMWARE ACTIVATE" }, 61 { NVME_OPC_FIRMWARE_IMAGE_DOWNLOAD, "FIRMWARE IMAGE DOWNLOAD" }, 62 { NVME_OPC_FORMAT_NVM, "FORMAT NVM" }, 63 { NVME_OPC_SECURITY_SEND, "SECURITY SEND" }, 64 { NVME_OPC_SECURITY_RECEIVE, "SECURITY RECEIVE" }, 65 { 0xFFFF, "ADMIN COMMAND" } 66 }; 67 68 static struct nvme_opcode_string io_opcode[] = { 69 { NVME_OPC_FLUSH, "FLUSH" }, 70 { NVME_OPC_WRITE, "WRITE" }, 71 { NVME_OPC_READ, "READ" }, 72 { NVME_OPC_WRITE_UNCORRECTABLE, "WRITE UNCORRECTABLE" }, 73 { NVME_OPC_COMPARE, "COMPARE" }, 74 { NVME_OPC_DATASET_MANAGEMENT, "DATASET MANAGEMENT" }, 75 { 0xFFFF, "IO COMMAND" } 76 }; 77 78 static const char * 79 get_admin_opcode_string(uint16_t opc) 80 { 81 struct nvme_opcode_string *entry; 82 83 entry = admin_opcode; 84 85 while (entry->opc != 0xFFFF) { 86 if (entry->opc == opc) 87 return (entry->str); 88 entry++; 89 } 90 return (entry->str); 91 } 92 93 static const char * 94 get_io_opcode_string(uint16_t opc) 95 { 96 struct nvme_opcode_string *entry; 97 98 entry = io_opcode; 99 100 while (entry->opc != 0xFFFF) { 101 if (entry->opc == opc) 102 return (entry->str); 103 entry++; 104 } 105 return (entry->str); 106 } 107 108 109 static void 110 nvme_admin_qpair_print_command(struct nvme_qpair *qpair, 111 struct nvme_command *cmd) 112 { 113 114 nvme_printf(qpair->ctrlr, "%s (%02x) sqid:%d cid:%d nsid:%x " 115 "cdw10:%08x cdw11:%08x\n", 116 get_admin_opcode_string(cmd->opc), cmd->opc, qpair->id, cmd->cid, 117 cmd->nsid, cmd->cdw10, cmd->cdw11); 118 } 119 120 static void 121 nvme_io_qpair_print_command(struct nvme_qpair *qpair, 122 struct nvme_command *cmd) 123 { 124 125 switch (cmd->opc) { 126 case NVME_OPC_WRITE: 127 case NVME_OPC_READ: 128 case NVME_OPC_WRITE_UNCORRECTABLE: 129 case NVME_OPC_COMPARE: 130 nvme_printf(qpair->ctrlr, "%s sqid:%d cid:%d nsid:%d " 131 "lba:%llu len:%d\n", 132 get_io_opcode_string(cmd->opc), qpair->id, cmd->cid, 133 cmd->nsid, 134 ((unsigned long long)cmd->cdw11 << 32) + cmd->cdw10, 135 (cmd->cdw12 & 0xFFFF) + 1); 136 break; 137 case NVME_OPC_FLUSH: 138 case NVME_OPC_DATASET_MANAGEMENT: 139 nvme_printf(qpair->ctrlr, "%s sqid:%d cid:%d nsid:%d\n", 140 get_io_opcode_string(cmd->opc), qpair->id, cmd->cid, 141 cmd->nsid); 142 break; 143 default: 144 nvme_printf(qpair->ctrlr, "%s (%02x) sqid:%d cid:%d nsid:%d\n", 145 get_io_opcode_string(cmd->opc), cmd->opc, qpair->id, 146 cmd->cid, cmd->nsid); 147 break; 148 } 149 } 150 151 static void 152 nvme_qpair_print_command(struct nvme_qpair *qpair, struct nvme_command *cmd) 153 { 154 if (qpair->id == 0) 155 nvme_admin_qpair_print_command(qpair, cmd); 156 else 157 nvme_io_qpair_print_command(qpair, cmd); 158 } 159 160 struct nvme_status_string { 161 162 uint16_t sc; 163 const char * str; 164 }; 165 166 static struct nvme_status_string generic_status[] = { 167 { NVME_SC_SUCCESS, "SUCCESS" }, 168 { NVME_SC_INVALID_OPCODE, "INVALID OPCODE" }, 169 { NVME_SC_INVALID_FIELD, "INVALID_FIELD" }, 170 { NVME_SC_COMMAND_ID_CONFLICT, "COMMAND ID CONFLICT" }, 171 { NVME_SC_DATA_TRANSFER_ERROR, "DATA TRANSFER ERROR" }, 172 { NVME_SC_ABORTED_POWER_LOSS, "ABORTED - POWER LOSS" }, 173 { NVME_SC_INTERNAL_DEVICE_ERROR, "INTERNAL DEVICE ERROR" }, 174 { NVME_SC_ABORTED_BY_REQUEST, "ABORTED - BY REQUEST" }, 175 { NVME_SC_ABORTED_SQ_DELETION, "ABORTED - SQ DELETION" }, 176 { NVME_SC_ABORTED_FAILED_FUSED, "ABORTED - FAILED FUSED" }, 177 { NVME_SC_ABORTED_MISSING_FUSED, "ABORTED - MISSING FUSED" }, 178 { NVME_SC_INVALID_NAMESPACE_OR_FORMAT, "INVALID NAMESPACE OR FORMAT" }, 179 { NVME_SC_COMMAND_SEQUENCE_ERROR, "COMMAND SEQUENCE ERROR" }, 180 { NVME_SC_LBA_OUT_OF_RANGE, "LBA OUT OF RANGE" }, 181 { NVME_SC_CAPACITY_EXCEEDED, "CAPACITY EXCEEDED" }, 182 { NVME_SC_NAMESPACE_NOT_READY, "NAMESPACE NOT READY" }, 183 { 0xFFFF, "GENERIC" } 184 }; 185 186 static struct nvme_status_string command_specific_status[] = { 187 { NVME_SC_COMPLETION_QUEUE_INVALID, "INVALID COMPLETION QUEUE" }, 188 { NVME_SC_INVALID_QUEUE_IDENTIFIER, "INVALID QUEUE IDENTIFIER" }, 189 { NVME_SC_MAXIMUM_QUEUE_SIZE_EXCEEDED, "MAX QUEUE SIZE EXCEEDED" }, 190 { NVME_SC_ABORT_COMMAND_LIMIT_EXCEEDED, "ABORT CMD LIMIT EXCEEDED" }, 191 { NVME_SC_ASYNC_EVENT_REQUEST_LIMIT_EXCEEDED, "ASYNC LIMIT EXCEEDED" }, 192 { NVME_SC_INVALID_FIRMWARE_SLOT, "INVALID FIRMWARE SLOT" }, 193 { NVME_SC_INVALID_FIRMWARE_IMAGE, "INVALID FIRMWARE IMAGE" }, 194 { NVME_SC_INVALID_INTERRUPT_VECTOR, "INVALID INTERRUPT VECTOR" }, 195 { NVME_SC_INVALID_LOG_PAGE, "INVALID LOG PAGE" }, 196 { NVME_SC_INVALID_FORMAT, "INVALID FORMAT" }, 197 { NVME_SC_FIRMWARE_REQUIRES_RESET, "FIRMWARE REQUIRES RESET" }, 198 { NVME_SC_CONFLICTING_ATTRIBUTES, "CONFLICTING ATTRIBUTES" }, 199 { NVME_SC_INVALID_PROTECTION_INFO, "INVALID PROTECTION INFO" }, 200 { NVME_SC_ATTEMPTED_WRITE_TO_RO_PAGE, "WRITE TO RO PAGE" }, 201 { 0xFFFF, "COMMAND SPECIFIC" } 202 }; 203 204 static struct nvme_status_string media_error_status[] = { 205 { NVME_SC_WRITE_FAULTS, "WRITE FAULTS" }, 206 { NVME_SC_UNRECOVERED_READ_ERROR, "UNRECOVERED READ ERROR" }, 207 { NVME_SC_GUARD_CHECK_ERROR, "GUARD CHECK ERROR" }, 208 { NVME_SC_APPLICATION_TAG_CHECK_ERROR, "APPLICATION TAG CHECK ERROR" }, 209 { NVME_SC_REFERENCE_TAG_CHECK_ERROR, "REFERENCE TAG CHECK ERROR" }, 210 { NVME_SC_COMPARE_FAILURE, "COMPARE FAILURE" }, 211 { NVME_SC_ACCESS_DENIED, "ACCESS DENIED" }, 212 { 0xFFFF, "MEDIA ERROR" } 213 }; 214 215 static const char * 216 get_status_string(uint16_t sct, uint16_t sc) 217 { 218 struct nvme_status_string *entry; 219 220 switch (sct) { 221 case NVME_SCT_GENERIC: 222 entry = generic_status; 223 break; 224 case NVME_SCT_COMMAND_SPECIFIC: 225 entry = command_specific_status; 226 break; 227 case NVME_SCT_MEDIA_ERROR: 228 entry = media_error_status; 229 break; 230 case NVME_SCT_VENDOR_SPECIFIC: 231 return ("VENDOR SPECIFIC"); 232 default: 233 return ("RESERVED"); 234 } 235 236 while (entry->sc != 0xFFFF) { 237 if (entry->sc == sc) 238 return (entry->str); 239 entry++; 240 } 241 return (entry->str); 242 } 243 244 static void 245 nvme_qpair_print_completion(struct nvme_qpair *qpair, 246 struct nvme_completion *cpl) 247 { 248 nvme_printf(qpair->ctrlr, "%s (%02x/%02x) sqid:%d cid:%d cdw0:%x\n", 249 get_status_string(cpl->status.sct, cpl->status.sc), 250 cpl->status.sct, cpl->status.sc, cpl->sqid, cpl->cid, cpl->cdw0); 251 } 252 253 static boolean_t 254 nvme_completion_is_retry(const struct nvme_completion *cpl) 255 { 256 /* 257 * TODO: spec is not clear how commands that are aborted due 258 * to TLER will be marked. So for now, it seems 259 * NAMESPACE_NOT_READY is the only case where we should 260 * look at the DNR bit. 261 */ 262 switch (cpl->status.sct) { 263 case NVME_SCT_GENERIC: 264 switch (cpl->status.sc) { 265 case NVME_SC_ABORTED_BY_REQUEST: 266 case NVME_SC_NAMESPACE_NOT_READY: 267 if (cpl->status.dnr) 268 return (0); 269 else 270 return (1); 271 case NVME_SC_INVALID_OPCODE: 272 case NVME_SC_INVALID_FIELD: 273 case NVME_SC_COMMAND_ID_CONFLICT: 274 case NVME_SC_DATA_TRANSFER_ERROR: 275 case NVME_SC_ABORTED_POWER_LOSS: 276 case NVME_SC_INTERNAL_DEVICE_ERROR: 277 case NVME_SC_ABORTED_SQ_DELETION: 278 case NVME_SC_ABORTED_FAILED_FUSED: 279 case NVME_SC_ABORTED_MISSING_FUSED: 280 case NVME_SC_INVALID_NAMESPACE_OR_FORMAT: 281 case NVME_SC_COMMAND_SEQUENCE_ERROR: 282 case NVME_SC_LBA_OUT_OF_RANGE: 283 case NVME_SC_CAPACITY_EXCEEDED: 284 default: 285 return (0); 286 } 287 case NVME_SCT_COMMAND_SPECIFIC: 288 case NVME_SCT_MEDIA_ERROR: 289 case NVME_SCT_VENDOR_SPECIFIC: 290 default: 291 return (0); 292 } 293 } 294 295 static void 296 nvme_qpair_complete_tracker(struct nvme_qpair *qpair, struct nvme_tracker *tr, 297 struct nvme_completion *cpl, boolean_t print_on_error) 298 { 299 struct nvme_request *req; 300 boolean_t retry, error; 301 302 req = tr->req; 303 error = nvme_completion_is_error(cpl); 304 retry = error && nvme_completion_is_retry(cpl) && 305 req->retries < nvme_retry_count; 306 307 if (error && print_on_error) { 308 nvme_qpair_print_command(qpair, &req->cmd); 309 nvme_qpair_print_completion(qpair, cpl); 310 } 311 312 qpair->act_tr[cpl->cid] = NULL; 313 314 KASSERT(cpl->cid == req->cmd.cid, ("cpl cid does not match cmd cid\n")); 315 316 if (req->cb_fn && !retry) 317 req->cb_fn(req->cb_arg, cpl); 318 319 mtx_lock(&qpair->lock); 320 callout_stop(&tr->timer); 321 322 if (retry) { 323 req->retries++; 324 nvme_qpair_submit_tracker(qpair, tr); 325 } else { 326 if (req->type != NVME_REQUEST_NULL) 327 bus_dmamap_unload(qpair->dma_tag_payload, 328 tr->payload_dma_map); 329 330 nvme_free_request(req); 331 tr->req = NULL; 332 333 TAILQ_REMOVE(&qpair->outstanding_tr, tr, tailq); 334 TAILQ_INSERT_HEAD(&qpair->free_tr, tr, tailq); 335 336 /* 337 * If the controller is in the middle of resetting, don't 338 * try to submit queued requests here - let the reset logic 339 * handle that instead. 340 */ 341 if (!STAILQ_EMPTY(&qpair->queued_req) && 342 !qpair->ctrlr->is_resetting) { 343 req = STAILQ_FIRST(&qpair->queued_req); 344 STAILQ_REMOVE_HEAD(&qpair->queued_req, stailq); 345 _nvme_qpair_submit_request(qpair, req); 346 } 347 } 348 349 mtx_unlock(&qpair->lock); 350 } 351 352 static void 353 nvme_qpair_manual_complete_tracker(struct nvme_qpair *qpair, 354 struct nvme_tracker *tr, uint32_t sct, uint32_t sc, uint32_t dnr, 355 boolean_t print_on_error) 356 { 357 struct nvme_completion cpl; 358 359 memset(&cpl, 0, sizeof(cpl)); 360 cpl.sqid = qpair->id; 361 cpl.cid = tr->cid; 362 cpl.status.sct = sct; 363 cpl.status.sc = sc; 364 cpl.status.dnr = dnr; 365 nvme_qpair_complete_tracker(qpair, tr, &cpl, print_on_error); 366 } 367 368 void 369 nvme_qpair_manual_complete_request(struct nvme_qpair *qpair, 370 struct nvme_request *req, uint32_t sct, uint32_t sc, 371 boolean_t print_on_error) 372 { 373 struct nvme_completion cpl; 374 boolean_t error; 375 376 memset(&cpl, 0, sizeof(cpl)); 377 cpl.sqid = qpair->id; 378 cpl.status.sct = sct; 379 cpl.status.sc = sc; 380 381 error = nvme_completion_is_error(&cpl); 382 383 if (error && print_on_error) { 384 nvme_qpair_print_command(qpair, &req->cmd); 385 nvme_qpair_print_completion(qpair, &cpl); 386 } 387 388 if (req->cb_fn) 389 req->cb_fn(req->cb_arg, &cpl); 390 391 nvme_free_request(req); 392 } 393 394 void 395 nvme_qpair_process_completions(struct nvme_qpair *qpair) 396 { 397 struct nvme_tracker *tr; 398 struct nvme_completion *cpl; 399 400 qpair->num_intr_handler_calls++; 401 402 if (!qpair->is_enabled) 403 /* 404 * qpair is not enabled, likely because a controller reset is 405 * is in progress. Ignore the interrupt - any I/O that was 406 * associated with this interrupt will get retried when the 407 * reset is complete. 408 */ 409 return; 410 411 while (1) { 412 cpl = &qpair->cpl[qpair->cq_head]; 413 414 if (cpl->status.p != qpair->phase) 415 break; 416 417 tr = qpair->act_tr[cpl->cid]; 418 419 if (tr != NULL) { 420 nvme_qpair_complete_tracker(qpair, tr, cpl, TRUE); 421 qpair->sq_head = cpl->sqhd; 422 } else { 423 nvme_printf(qpair->ctrlr, 424 "cpl does not map to outstanding cmd\n"); 425 nvme_dump_completion(cpl); 426 KASSERT(0, ("received completion for unknown cmd\n")); 427 } 428 429 if (++qpair->cq_head == qpair->num_entries) { 430 qpair->cq_head = 0; 431 qpair->phase = !qpair->phase; 432 } 433 434 nvme_mmio_write_4(qpair->ctrlr, doorbell[qpair->id].cq_hdbl, 435 qpair->cq_head); 436 } 437 } 438 439 static void 440 nvme_qpair_msix_handler(void *arg) 441 { 442 struct nvme_qpair *qpair = arg; 443 444 nvme_qpair_process_completions(qpair); 445 } 446 447 int 448 nvme_qpair_construct(struct nvme_qpair *qpair, uint32_t id, 449 uint16_t vector, uint32_t num_entries, uint32_t num_trackers, 450 struct nvme_controller *ctrlr) 451 { 452 struct nvme_tracker *tr; 453 size_t cmdsz, cplsz, prpsz, allocsz, prpmemsz; 454 uint64_t queuemem_phys, prpmem_phys, list_phys; 455 uint8_t *queuemem, *prpmem, *prp_list; 456 int i, err; 457 458 qpair->id = id; 459 qpair->vector = vector; 460 qpair->num_entries = num_entries; 461 qpair->num_trackers = num_trackers; 462 qpair->ctrlr = ctrlr; 463 464 if (ctrlr->msix_enabled) { 465 466 /* 467 * MSI-X vector resource IDs start at 1, so we add one to 468 * the queue's vector to get the corresponding rid to use. 469 */ 470 qpair->rid = vector + 1; 471 472 qpair->res = bus_alloc_resource_any(ctrlr->dev, SYS_RES_IRQ, 473 &qpair->rid, RF_ACTIVE); 474 bus_setup_intr(ctrlr->dev, qpair->res, 475 INTR_TYPE_MISC | INTR_MPSAFE, NULL, 476 nvme_qpair_msix_handler, qpair, &qpair->tag); 477 } 478 479 mtx_init(&qpair->lock, "nvme qpair lock", NULL, MTX_DEF); 480 481 /* Note: NVMe PRP format is restricted to 4-byte alignment. */ 482 err = bus_dma_tag_create(bus_get_dma_tag(ctrlr->dev), 483 4, PAGE_SIZE, BUS_SPACE_MAXADDR, 484 BUS_SPACE_MAXADDR, NULL, NULL, NVME_MAX_XFER_SIZE, 485 (NVME_MAX_XFER_SIZE/PAGE_SIZE)+1, PAGE_SIZE, 0, 486 NULL, NULL, &qpair->dma_tag_payload); 487 if (err != 0) { 488 nvme_printf(ctrlr, "payload tag create failed %d\n", err); 489 goto out; 490 } 491 492 /* 493 * Each component must be page aligned, and individual PRP lists 494 * cannot cross a page boundary. 495 */ 496 cmdsz = qpair->num_entries * sizeof(struct nvme_command); 497 cmdsz = roundup2(cmdsz, PAGE_SIZE); 498 cplsz = qpair->num_entries * sizeof(struct nvme_completion); 499 cplsz = roundup2(cplsz, PAGE_SIZE); 500 prpsz = sizeof(uint64_t) * NVME_MAX_PRP_LIST_ENTRIES;; 501 prpmemsz = qpair->num_trackers * prpsz; 502 allocsz = cmdsz + cplsz + prpmemsz; 503 504 err = bus_dma_tag_create(bus_get_dma_tag(ctrlr->dev), 505 PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, 506 allocsz, 1, allocsz, 0, NULL, NULL, &qpair->dma_tag); 507 if (err != 0) { 508 nvme_printf(ctrlr, "tag create failed %d\n", err); 509 goto out; 510 } 511 512 if (bus_dmamem_alloc(qpair->dma_tag, (void **)&queuemem, 513 BUS_DMA_NOWAIT, &qpair->queuemem_map)) { 514 nvme_printf(ctrlr, "failed to alloc qpair memory\n"); 515 goto out; 516 } 517 518 if (bus_dmamap_load(qpair->dma_tag, qpair->queuemem_map, 519 queuemem, allocsz, nvme_single_map, &queuemem_phys, 0) != 0) { 520 nvme_printf(ctrlr, "failed to load qpair memory\n"); 521 goto out; 522 } 523 524 qpair->num_cmds = 0; 525 qpair->num_intr_handler_calls = 0; 526 qpair->cmd = (struct nvme_command *)queuemem; 527 qpair->cpl = (struct nvme_completion *)(queuemem + cmdsz); 528 prpmem = (uint8_t *)(queuemem + cmdsz + cplsz); 529 qpair->cmd_bus_addr = queuemem_phys; 530 qpair->cpl_bus_addr = queuemem_phys + cmdsz; 531 prpmem_phys = queuemem_phys + cmdsz + cplsz; 532 533 qpair->sq_tdbl_off = nvme_mmio_offsetof(doorbell[id].sq_tdbl); 534 qpair->cq_hdbl_off = nvme_mmio_offsetof(doorbell[id].cq_hdbl); 535 536 TAILQ_INIT(&qpair->free_tr); 537 TAILQ_INIT(&qpair->outstanding_tr); 538 STAILQ_INIT(&qpair->queued_req); 539 540 list_phys = prpmem_phys; 541 prp_list = prpmem; 542 for (i = 0; i < qpair->num_trackers; i++) { 543 544 if (list_phys + prpsz > prpmem_phys + prpmemsz) { 545 qpair->num_trackers = i; 546 break; 547 } 548 549 /* 550 * Make sure that the PRP list for this tracker doesn't 551 * overflow to another page. 552 */ 553 if (trunc_page(list_phys) != 554 trunc_page(list_phys + prpsz - 1)) { 555 list_phys = roundup2(list_phys, PAGE_SIZE); 556 prp_list = 557 (uint8_t *)roundup2((uintptr_t)prp_list, PAGE_SIZE); 558 } 559 560 tr = malloc(sizeof(*tr), M_NVME, M_ZERO | M_WAITOK); 561 bus_dmamap_create(qpair->dma_tag_payload, 0, 562 &tr->payload_dma_map); 563 callout_init(&tr->timer, 1); 564 tr->cid = i; 565 tr->qpair = qpair; 566 tr->prp = (uint64_t *)prp_list; 567 tr->prp_bus_addr = list_phys; 568 TAILQ_INSERT_HEAD(&qpair->free_tr, tr, tailq); 569 list_phys += prpsz; 570 prp_list += prpsz; 571 } 572 573 if (qpair->num_trackers == 0) { 574 nvme_printf(ctrlr, "failed to allocate enough trackers\n"); 575 goto out; 576 } 577 578 qpair->act_tr = malloc(sizeof(struct nvme_tracker *) * 579 qpair->num_entries, M_NVME, M_ZERO | M_WAITOK); 580 return (0); 581 582 out: 583 nvme_qpair_destroy(qpair); 584 return (ENOMEM); 585 } 586 587 static void 588 nvme_qpair_destroy(struct nvme_qpair *qpair) 589 { 590 struct nvme_tracker *tr; 591 592 if (qpair->tag) 593 bus_teardown_intr(qpair->ctrlr->dev, qpair->res, qpair->tag); 594 595 if (mtx_initialized(&qpair->lock)) 596 mtx_destroy(&qpair->lock); 597 598 if (qpair->res) 599 bus_release_resource(qpair->ctrlr->dev, SYS_RES_IRQ, 600 rman_get_rid(qpair->res), qpair->res); 601 602 if (qpair->cmd != NULL) { 603 bus_dmamap_unload(qpair->dma_tag, qpair->queuemem_map); 604 bus_dmamem_free(qpair->dma_tag, qpair->cmd, 605 qpair->queuemem_map); 606 } 607 608 if (qpair->dma_tag) 609 bus_dma_tag_destroy(qpair->dma_tag); 610 611 if (qpair->dma_tag_payload) 612 bus_dma_tag_destroy(qpair->dma_tag_payload); 613 614 if (qpair->act_tr) 615 free(qpair->act_tr, M_NVME); 616 617 while (!TAILQ_EMPTY(&qpair->free_tr)) { 618 tr = TAILQ_FIRST(&qpair->free_tr); 619 TAILQ_REMOVE(&qpair->free_tr, tr, tailq); 620 bus_dmamap_destroy(qpair->dma_tag, tr->payload_dma_map); 621 free(tr, M_NVME); 622 } 623 } 624 625 static void 626 nvme_admin_qpair_abort_aers(struct nvme_qpair *qpair) 627 { 628 struct nvme_tracker *tr; 629 630 tr = TAILQ_FIRST(&qpair->outstanding_tr); 631 while (tr != NULL) { 632 if (tr->req->cmd.opc == NVME_OPC_ASYNC_EVENT_REQUEST) { 633 nvme_qpair_manual_complete_tracker(qpair, tr, 634 NVME_SCT_GENERIC, NVME_SC_ABORTED_SQ_DELETION, 0, 635 FALSE); 636 tr = TAILQ_FIRST(&qpair->outstanding_tr); 637 } else { 638 tr = TAILQ_NEXT(tr, tailq); 639 } 640 } 641 } 642 643 void 644 nvme_admin_qpair_destroy(struct nvme_qpair *qpair) 645 { 646 647 nvme_admin_qpair_abort_aers(qpair); 648 nvme_qpair_destroy(qpair); 649 } 650 651 void 652 nvme_io_qpair_destroy(struct nvme_qpair *qpair) 653 { 654 655 nvme_qpair_destroy(qpair); 656 } 657 658 static void 659 nvme_abort_complete(void *arg, const struct nvme_completion *status) 660 { 661 struct nvme_tracker *tr = arg; 662 663 /* 664 * If cdw0 == 1, the controller was not able to abort the command 665 * we requested. We still need to check the active tracker array, 666 * to cover race where I/O timed out at same time controller was 667 * completing the I/O. 668 */ 669 if (status->cdw0 == 1 && tr->qpair->act_tr[tr->cid] != NULL) { 670 /* 671 * An I/O has timed out, and the controller was unable to 672 * abort it for some reason. Construct a fake completion 673 * status, and then complete the I/O's tracker manually. 674 */ 675 nvme_printf(tr->qpair->ctrlr, 676 "abort command failed, aborting command manually\n"); 677 nvme_qpair_manual_complete_tracker(tr->qpair, tr, 678 NVME_SCT_GENERIC, NVME_SC_ABORTED_BY_REQUEST, 0, TRUE); 679 } 680 } 681 682 static void 683 nvme_timeout(void *arg) 684 { 685 struct nvme_tracker *tr = arg; 686 struct nvme_qpair *qpair = tr->qpair; 687 struct nvme_controller *ctrlr = qpair->ctrlr; 688 union csts_register csts; 689 690 /* Read csts to get value of cfs - controller fatal status. */ 691 csts.raw = nvme_mmio_read_4(ctrlr, csts); 692 693 if (ctrlr->enable_aborts && csts.bits.cfs == 0) { 694 /* 695 * If aborts are enabled, only use them if the controller is 696 * not reporting fatal status. 697 */ 698 nvme_ctrlr_cmd_abort(ctrlr, tr->cid, qpair->id, 699 nvme_abort_complete, tr); 700 } else 701 nvme_ctrlr_reset(ctrlr); 702 } 703 704 void 705 nvme_qpair_submit_tracker(struct nvme_qpair *qpair, struct nvme_tracker *tr) 706 { 707 struct nvme_request *req; 708 struct nvme_controller *ctrlr; 709 710 mtx_assert(&qpair->lock, MA_OWNED); 711 712 req = tr->req; 713 req->cmd.cid = tr->cid; 714 qpair->act_tr[tr->cid] = tr; 715 ctrlr = qpair->ctrlr; 716 717 if (req->timeout) 718 #if __FreeBSD_version >= 800030 719 callout_reset_curcpu(&tr->timer, ctrlr->timeout_period * hz, 720 nvme_timeout, tr); 721 #else 722 callout_reset(&tr->timer, ctrlr->timeout_period * hz, 723 nvme_timeout, tr); 724 #endif 725 726 /* Copy the command from the tracker to the submission queue. */ 727 memcpy(&qpair->cmd[qpair->sq_tail], &req->cmd, sizeof(req->cmd)); 728 729 if (++qpair->sq_tail == qpair->num_entries) 730 qpair->sq_tail = 0; 731 732 wmb(); 733 nvme_mmio_write_4(qpair->ctrlr, doorbell[qpair->id].sq_tdbl, 734 qpair->sq_tail); 735 736 qpair->num_cmds++; 737 } 738 739 static void 740 nvme_payload_map(void *arg, bus_dma_segment_t *seg, int nseg, int error) 741 { 742 struct nvme_tracker *tr = arg; 743 uint32_t cur_nseg; 744 745 /* 746 * If the mapping operation failed, return immediately. The caller 747 * is responsible for detecting the error status and failing the 748 * tracker manually. 749 */ 750 if (error != 0) { 751 nvme_printf(tr->qpair->ctrlr, 752 "nvme_payload_map err %d\n", error); 753 return; 754 } 755 756 /* 757 * Note that we specified PAGE_SIZE for alignment and max 758 * segment size when creating the bus dma tags. So here 759 * we can safely just transfer each segment to its 760 * associated PRP entry. 761 */ 762 tr->req->cmd.prp1 = seg[0].ds_addr; 763 764 if (nseg == 2) { 765 tr->req->cmd.prp2 = seg[1].ds_addr; 766 } else if (nseg > 2) { 767 cur_nseg = 1; 768 tr->req->cmd.prp2 = (uint64_t)tr->prp_bus_addr; 769 while (cur_nseg < nseg) { 770 tr->prp[cur_nseg-1] = 771 (uint64_t)seg[cur_nseg].ds_addr; 772 cur_nseg++; 773 } 774 } else { 775 /* 776 * prp2 should not be used by the controller 777 * since there is only one segment, but set 778 * to 0 just to be safe. 779 */ 780 tr->req->cmd.prp2 = 0; 781 } 782 783 nvme_qpair_submit_tracker(tr->qpair, tr); 784 } 785 786 static void 787 _nvme_qpair_submit_request(struct nvme_qpair *qpair, struct nvme_request *req) 788 { 789 struct nvme_tracker *tr; 790 int err = 0; 791 792 mtx_assert(&qpair->lock, MA_OWNED); 793 794 tr = TAILQ_FIRST(&qpair->free_tr); 795 req->qpair = qpair; 796 797 if (tr == NULL || !qpair->is_enabled) { 798 /* 799 * No tracker is available, or the qpair is disabled due to 800 * an in-progress controller-level reset or controller 801 * failure. 802 */ 803 804 if (qpair->ctrlr->is_failed) { 805 /* 806 * The controller has failed. Post the request to a 807 * task where it will be aborted, so that we do not 808 * invoke the request's callback in the context 809 * of the submission. 810 */ 811 nvme_ctrlr_post_failed_request(qpair->ctrlr, req); 812 } else { 813 /* 814 * Put the request on the qpair's request queue to be 815 * processed when a tracker frees up via a command 816 * completion or when the controller reset is 817 * completed. 818 */ 819 STAILQ_INSERT_TAIL(&qpair->queued_req, req, stailq); 820 } 821 return; 822 } 823 824 TAILQ_REMOVE(&qpair->free_tr, tr, tailq); 825 TAILQ_INSERT_TAIL(&qpair->outstanding_tr, tr, tailq); 826 tr->req = req; 827 828 switch (req->type) { 829 case NVME_REQUEST_VADDR: 830 KASSERT(req->payload_size <= qpair->ctrlr->max_xfer_size, 831 ("payload_size (%d) exceeds max_xfer_size (%d)\n", 832 req->payload_size, qpair->ctrlr->max_xfer_size)); 833 err = bus_dmamap_load(tr->qpair->dma_tag_payload, 834 tr->payload_dma_map, req->u.payload, req->payload_size, 835 nvme_payload_map, tr, 0); 836 if (err != 0) 837 nvme_printf(qpair->ctrlr, 838 "bus_dmamap_load returned 0x%x!\n", err); 839 break; 840 case NVME_REQUEST_NULL: 841 nvme_qpair_submit_tracker(tr->qpair, tr); 842 break; 843 #ifdef NVME_UNMAPPED_BIO_SUPPORT 844 case NVME_REQUEST_BIO: 845 KASSERT(req->u.bio->bio_bcount <= qpair->ctrlr->max_xfer_size, 846 ("bio->bio_bcount (%jd) exceeds max_xfer_size (%d)\n", 847 (intmax_t)req->u.bio->bio_bcount, 848 qpair->ctrlr->max_xfer_size)); 849 err = bus_dmamap_load_bio(tr->qpair->dma_tag_payload, 850 tr->payload_dma_map, req->u.bio, nvme_payload_map, tr, 0); 851 if (err != 0) 852 nvme_printf(qpair->ctrlr, 853 "bus_dmamap_load_bio returned 0x%x!\n", err); 854 break; 855 #endif 856 case NVME_REQUEST_CCB: 857 err = bus_dmamap_load_ccb(tr->qpair->dma_tag_payload, 858 tr->payload_dma_map, req->u.payload, 859 nvme_payload_map, tr, 0); 860 if (err != 0) 861 nvme_printf(qpair->ctrlr, 862 "bus_dmamap_load_ccb returned 0x%x!\n", err); 863 break; 864 default: 865 panic("unknown nvme request type 0x%x\n", req->type); 866 break; 867 } 868 869 if (err != 0) { 870 /* 871 * The dmamap operation failed, so we manually fail the 872 * tracker here with DATA_TRANSFER_ERROR status. 873 * 874 * nvme_qpair_manual_complete_tracker must not be called 875 * with the qpair lock held. 876 */ 877 mtx_unlock(&qpair->lock); 878 nvme_qpair_manual_complete_tracker(qpair, tr, NVME_SCT_GENERIC, 879 NVME_SC_DATA_TRANSFER_ERROR, 1 /* do not retry */, TRUE); 880 mtx_lock(&qpair->lock); 881 } 882 } 883 884 void 885 nvme_qpair_submit_request(struct nvme_qpair *qpair, struct nvme_request *req) 886 { 887 888 mtx_lock(&qpair->lock); 889 _nvme_qpair_submit_request(qpair, req); 890 mtx_unlock(&qpair->lock); 891 } 892 893 static void 894 nvme_qpair_enable(struct nvme_qpair *qpair) 895 { 896 897 qpair->is_enabled = TRUE; 898 } 899 900 void 901 nvme_qpair_reset(struct nvme_qpair *qpair) 902 { 903 904 qpair->sq_head = qpair->sq_tail = qpair->cq_head = 0; 905 906 /* 907 * First time through the completion queue, HW will set phase 908 * bit on completions to 1. So set this to 1 here, indicating 909 * we're looking for a 1 to know which entries have completed. 910 * we'll toggle the bit each time when the completion queue 911 * rolls over. 912 */ 913 qpair->phase = 1; 914 915 memset(qpair->cmd, 0, 916 qpair->num_entries * sizeof(struct nvme_command)); 917 memset(qpair->cpl, 0, 918 qpair->num_entries * sizeof(struct nvme_completion)); 919 } 920 921 void 922 nvme_admin_qpair_enable(struct nvme_qpair *qpair) 923 { 924 struct nvme_tracker *tr; 925 struct nvme_tracker *tr_temp; 926 927 /* 928 * Manually abort each outstanding admin command. Do not retry 929 * admin commands found here, since they will be left over from 930 * a controller reset and its likely the context in which the 931 * command was issued no longer applies. 932 */ 933 TAILQ_FOREACH_SAFE(tr, &qpair->outstanding_tr, tailq, tr_temp) { 934 nvme_printf(qpair->ctrlr, 935 "aborting outstanding admin command\n"); 936 nvme_qpair_manual_complete_tracker(qpair, tr, NVME_SCT_GENERIC, 937 NVME_SC_ABORTED_BY_REQUEST, 1 /* do not retry */, TRUE); 938 } 939 940 nvme_qpair_enable(qpair); 941 } 942 943 void 944 nvme_io_qpair_enable(struct nvme_qpair *qpair) 945 { 946 STAILQ_HEAD(, nvme_request) temp; 947 struct nvme_tracker *tr; 948 struct nvme_tracker *tr_temp; 949 struct nvme_request *req; 950 951 /* 952 * Manually abort each outstanding I/O. This normally results in a 953 * retry, unless the retry count on the associated request has 954 * reached its limit. 955 */ 956 TAILQ_FOREACH_SAFE(tr, &qpair->outstanding_tr, tailq, tr_temp) { 957 nvme_printf(qpair->ctrlr, "aborting outstanding i/o\n"); 958 nvme_qpair_manual_complete_tracker(qpair, tr, NVME_SCT_GENERIC, 959 NVME_SC_ABORTED_BY_REQUEST, 0, TRUE); 960 } 961 962 mtx_lock(&qpair->lock); 963 964 nvme_qpair_enable(qpair); 965 966 STAILQ_INIT(&temp); 967 STAILQ_SWAP(&qpair->queued_req, &temp, nvme_request); 968 969 while (!STAILQ_EMPTY(&temp)) { 970 req = STAILQ_FIRST(&temp); 971 STAILQ_REMOVE_HEAD(&temp, stailq); 972 nvme_printf(qpair->ctrlr, "resubmitting queued i/o\n"); 973 nvme_qpair_print_command(qpair, &req->cmd); 974 _nvme_qpair_submit_request(qpair, req); 975 } 976 977 mtx_unlock(&qpair->lock); 978 } 979 980 static void 981 nvme_qpair_disable(struct nvme_qpair *qpair) 982 { 983 struct nvme_tracker *tr; 984 985 qpair->is_enabled = FALSE; 986 mtx_lock(&qpair->lock); 987 TAILQ_FOREACH(tr, &qpair->outstanding_tr, tailq) 988 callout_stop(&tr->timer); 989 mtx_unlock(&qpair->lock); 990 } 991 992 void 993 nvme_admin_qpair_disable(struct nvme_qpair *qpair) 994 { 995 996 nvme_qpair_disable(qpair); 997 nvme_admin_qpair_abort_aers(qpair); 998 } 999 1000 void 1001 nvme_io_qpair_disable(struct nvme_qpair *qpair) 1002 { 1003 1004 nvme_qpair_disable(qpair); 1005 } 1006 1007 void 1008 nvme_qpair_fail(struct nvme_qpair *qpair) 1009 { 1010 struct nvme_tracker *tr; 1011 struct nvme_request *req; 1012 1013 if (!mtx_initialized(&qpair->lock)) 1014 return; 1015 1016 mtx_lock(&qpair->lock); 1017 1018 while (!STAILQ_EMPTY(&qpair->queued_req)) { 1019 req = STAILQ_FIRST(&qpair->queued_req); 1020 STAILQ_REMOVE_HEAD(&qpair->queued_req, stailq); 1021 nvme_printf(qpair->ctrlr, "failing queued i/o\n"); 1022 mtx_unlock(&qpair->lock); 1023 nvme_qpair_manual_complete_request(qpair, req, NVME_SCT_GENERIC, 1024 NVME_SC_ABORTED_BY_REQUEST, TRUE); 1025 mtx_lock(&qpair->lock); 1026 } 1027 1028 /* Manually abort each outstanding I/O. */ 1029 while (!TAILQ_EMPTY(&qpair->outstanding_tr)) { 1030 tr = TAILQ_FIRST(&qpair->outstanding_tr); 1031 /* 1032 * Do not remove the tracker. The abort_tracker path will 1033 * do that for us. 1034 */ 1035 nvme_printf(qpair->ctrlr, "failing outstanding i/o\n"); 1036 mtx_unlock(&qpair->lock); 1037 nvme_qpair_manual_complete_tracker(qpair, tr, NVME_SCT_GENERIC, 1038 NVME_SC_ABORTED_BY_REQUEST, 1 /* do not retry */, TRUE); 1039 mtx_lock(&qpair->lock); 1040 } 1041 1042 mtx_unlock(&qpair->lock); 1043 } 1044 1045