1 /*- 2 * Copyright (C) 2012-2014 Intel Corporation 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 #include <sys/param.h> 31 #include <sys/bus.h> 32 33 #include <dev/pci/pcivar.h> 34 35 #include "nvme_private.h" 36 37 static void _nvme_qpair_submit_request(struct nvme_qpair *qpair, 38 struct nvme_request *req); 39 static void nvme_qpair_destroy(struct nvme_qpair *qpair); 40 41 struct nvme_opcode_string { 42 43 uint16_t opc; 44 const char * str; 45 }; 46 47 static struct nvme_opcode_string admin_opcode[] = { 48 { NVME_OPC_DELETE_IO_SQ, "DELETE IO SQ" }, 49 { NVME_OPC_CREATE_IO_SQ, "CREATE IO SQ" }, 50 { NVME_OPC_GET_LOG_PAGE, "GET LOG PAGE" }, 51 { NVME_OPC_DELETE_IO_CQ, "DELETE IO CQ" }, 52 { NVME_OPC_CREATE_IO_CQ, "CREATE IO CQ" }, 53 { NVME_OPC_IDENTIFY, "IDENTIFY" }, 54 { NVME_OPC_ABORT, "ABORT" }, 55 { NVME_OPC_SET_FEATURES, "SET FEATURES" }, 56 { NVME_OPC_GET_FEATURES, "GET FEATURES" }, 57 { NVME_OPC_ASYNC_EVENT_REQUEST, "ASYNC EVENT REQUEST" }, 58 { NVME_OPC_FIRMWARE_ACTIVATE, "FIRMWARE ACTIVATE" }, 59 { NVME_OPC_FIRMWARE_IMAGE_DOWNLOAD, "FIRMWARE IMAGE DOWNLOAD" }, 60 { NVME_OPC_FORMAT_NVM, "FORMAT NVM" }, 61 { NVME_OPC_SECURITY_SEND, "SECURITY SEND" }, 62 { NVME_OPC_SECURITY_RECEIVE, "SECURITY RECEIVE" }, 63 { 0xFFFF, "ADMIN COMMAND" } 64 }; 65 66 static struct nvme_opcode_string io_opcode[] = { 67 { NVME_OPC_FLUSH, "FLUSH" }, 68 { NVME_OPC_WRITE, "WRITE" }, 69 { NVME_OPC_READ, "READ" }, 70 { NVME_OPC_WRITE_UNCORRECTABLE, "WRITE UNCORRECTABLE" }, 71 { NVME_OPC_COMPARE, "COMPARE" }, 72 { NVME_OPC_DATASET_MANAGEMENT, "DATASET MANAGEMENT" }, 73 { 0xFFFF, "IO COMMAND" } 74 }; 75 76 static const char * 77 get_admin_opcode_string(uint16_t opc) 78 { 79 struct nvme_opcode_string *entry; 80 81 entry = admin_opcode; 82 83 while (entry->opc != 0xFFFF) { 84 if (entry->opc == opc) 85 return (entry->str); 86 entry++; 87 } 88 return (entry->str); 89 } 90 91 static const char * 92 get_io_opcode_string(uint16_t opc) 93 { 94 struct nvme_opcode_string *entry; 95 96 entry = io_opcode; 97 98 while (entry->opc != 0xFFFF) { 99 if (entry->opc == opc) 100 return (entry->str); 101 entry++; 102 } 103 return (entry->str); 104 } 105 106 107 static void 108 nvme_admin_qpair_print_command(struct nvme_qpair *qpair, 109 struct nvme_command *cmd) 110 { 111 112 nvme_printf(qpair->ctrlr, "%s (%02x) sqid:%d cid:%d nsid:%x " 113 "cdw10:%08x cdw11:%08x\n", 114 get_admin_opcode_string(cmd->opc), cmd->opc, qpair->id, cmd->cid, 115 cmd->nsid, cmd->cdw10, cmd->cdw11); 116 } 117 118 static void 119 nvme_io_qpair_print_command(struct nvme_qpair *qpair, 120 struct nvme_command *cmd) 121 { 122 123 switch (cmd->opc) { 124 case NVME_OPC_WRITE: 125 case NVME_OPC_READ: 126 case NVME_OPC_WRITE_UNCORRECTABLE: 127 case NVME_OPC_COMPARE: 128 nvme_printf(qpair->ctrlr, "%s sqid:%d cid:%d nsid:%d " 129 "lba:%llu len:%d\n", 130 get_io_opcode_string(cmd->opc), qpair->id, cmd->cid, 131 cmd->nsid, 132 ((unsigned long long)cmd->cdw11 << 32) + cmd->cdw10, 133 (cmd->cdw12 & 0xFFFF) + 1); 134 break; 135 case NVME_OPC_FLUSH: 136 case NVME_OPC_DATASET_MANAGEMENT: 137 nvme_printf(qpair->ctrlr, "%s sqid:%d cid:%d nsid:%d\n", 138 get_io_opcode_string(cmd->opc), qpair->id, cmd->cid, 139 cmd->nsid); 140 break; 141 default: 142 nvme_printf(qpair->ctrlr, "%s (%02x) sqid:%d cid:%d nsid:%d\n", 143 get_io_opcode_string(cmd->opc), cmd->opc, qpair->id, 144 cmd->cid, cmd->nsid); 145 break; 146 } 147 } 148 149 static void 150 nvme_qpair_print_command(struct nvme_qpair *qpair, struct nvme_command *cmd) 151 { 152 if (qpair->id == 0) 153 nvme_admin_qpair_print_command(qpair, cmd); 154 else 155 nvme_io_qpair_print_command(qpair, cmd); 156 } 157 158 struct nvme_status_string { 159 160 uint16_t sc; 161 const char * str; 162 }; 163 164 static struct nvme_status_string generic_status[] = { 165 { NVME_SC_SUCCESS, "SUCCESS" }, 166 { NVME_SC_INVALID_OPCODE, "INVALID OPCODE" }, 167 { NVME_SC_INVALID_FIELD, "INVALID_FIELD" }, 168 { NVME_SC_COMMAND_ID_CONFLICT, "COMMAND ID CONFLICT" }, 169 { NVME_SC_DATA_TRANSFER_ERROR, "DATA TRANSFER ERROR" }, 170 { NVME_SC_ABORTED_POWER_LOSS, "ABORTED - POWER LOSS" }, 171 { NVME_SC_INTERNAL_DEVICE_ERROR, "INTERNAL DEVICE ERROR" }, 172 { NVME_SC_ABORTED_BY_REQUEST, "ABORTED - BY REQUEST" }, 173 { NVME_SC_ABORTED_SQ_DELETION, "ABORTED - SQ DELETION" }, 174 { NVME_SC_ABORTED_FAILED_FUSED, "ABORTED - FAILED FUSED" }, 175 { NVME_SC_ABORTED_MISSING_FUSED, "ABORTED - MISSING FUSED" }, 176 { NVME_SC_INVALID_NAMESPACE_OR_FORMAT, "INVALID NAMESPACE OR FORMAT" }, 177 { NVME_SC_COMMAND_SEQUENCE_ERROR, "COMMAND SEQUENCE ERROR" }, 178 { NVME_SC_LBA_OUT_OF_RANGE, "LBA OUT OF RANGE" }, 179 { NVME_SC_CAPACITY_EXCEEDED, "CAPACITY EXCEEDED" }, 180 { NVME_SC_NAMESPACE_NOT_READY, "NAMESPACE NOT READY" }, 181 { 0xFFFF, "GENERIC" } 182 }; 183 184 static struct nvme_status_string command_specific_status[] = { 185 { NVME_SC_COMPLETION_QUEUE_INVALID, "INVALID COMPLETION QUEUE" }, 186 { NVME_SC_INVALID_QUEUE_IDENTIFIER, "INVALID QUEUE IDENTIFIER" }, 187 { NVME_SC_MAXIMUM_QUEUE_SIZE_EXCEEDED, "MAX QUEUE SIZE EXCEEDED" }, 188 { NVME_SC_ABORT_COMMAND_LIMIT_EXCEEDED, "ABORT CMD LIMIT EXCEEDED" }, 189 { NVME_SC_ASYNC_EVENT_REQUEST_LIMIT_EXCEEDED, "ASYNC LIMIT EXCEEDED" }, 190 { NVME_SC_INVALID_FIRMWARE_SLOT, "INVALID FIRMWARE SLOT" }, 191 { NVME_SC_INVALID_FIRMWARE_IMAGE, "INVALID FIRMWARE IMAGE" }, 192 { NVME_SC_INVALID_INTERRUPT_VECTOR, "INVALID INTERRUPT VECTOR" }, 193 { NVME_SC_INVALID_LOG_PAGE, "INVALID LOG PAGE" }, 194 { NVME_SC_INVALID_FORMAT, "INVALID FORMAT" }, 195 { NVME_SC_FIRMWARE_REQUIRES_RESET, "FIRMWARE REQUIRES RESET" }, 196 { NVME_SC_CONFLICTING_ATTRIBUTES, "CONFLICTING ATTRIBUTES" }, 197 { NVME_SC_INVALID_PROTECTION_INFO, "INVALID PROTECTION INFO" }, 198 { NVME_SC_ATTEMPTED_WRITE_TO_RO_PAGE, "WRITE TO RO PAGE" }, 199 { 0xFFFF, "COMMAND SPECIFIC" } 200 }; 201 202 static struct nvme_status_string media_error_status[] = { 203 { NVME_SC_WRITE_FAULTS, "WRITE FAULTS" }, 204 { NVME_SC_UNRECOVERED_READ_ERROR, "UNRECOVERED READ ERROR" }, 205 { NVME_SC_GUARD_CHECK_ERROR, "GUARD CHECK ERROR" }, 206 { NVME_SC_APPLICATION_TAG_CHECK_ERROR, "APPLICATION TAG CHECK ERROR" }, 207 { NVME_SC_REFERENCE_TAG_CHECK_ERROR, "REFERENCE TAG CHECK ERROR" }, 208 { NVME_SC_COMPARE_FAILURE, "COMPARE FAILURE" }, 209 { NVME_SC_ACCESS_DENIED, "ACCESS DENIED" }, 210 { 0xFFFF, "MEDIA ERROR" } 211 }; 212 213 static const char * 214 get_status_string(uint16_t sct, uint16_t sc) 215 { 216 struct nvme_status_string *entry; 217 218 switch (sct) { 219 case NVME_SCT_GENERIC: 220 entry = generic_status; 221 break; 222 case NVME_SCT_COMMAND_SPECIFIC: 223 entry = command_specific_status; 224 break; 225 case NVME_SCT_MEDIA_ERROR: 226 entry = media_error_status; 227 break; 228 case NVME_SCT_VENDOR_SPECIFIC: 229 return ("VENDOR SPECIFIC"); 230 default: 231 return ("RESERVED"); 232 } 233 234 while (entry->sc != 0xFFFF) { 235 if (entry->sc == sc) 236 return (entry->str); 237 entry++; 238 } 239 return (entry->str); 240 } 241 242 static void 243 nvme_qpair_print_completion(struct nvme_qpair *qpair, 244 struct nvme_completion *cpl) 245 { 246 nvme_printf(qpair->ctrlr, "%s (%02x/%02x) sqid:%d cid:%d cdw0:%x\n", 247 get_status_string(cpl->status.sct, cpl->status.sc), 248 cpl->status.sct, cpl->status.sc, cpl->sqid, cpl->cid, cpl->cdw0); 249 } 250 251 static boolean_t 252 nvme_completion_is_retry(const struct nvme_completion *cpl) 253 { 254 /* 255 * TODO: spec is not clear how commands that are aborted due 256 * to TLER will be marked. So for now, it seems 257 * NAMESPACE_NOT_READY is the only case where we should 258 * look at the DNR bit. 259 */ 260 switch (cpl->status.sct) { 261 case NVME_SCT_GENERIC: 262 switch (cpl->status.sc) { 263 case NVME_SC_ABORTED_BY_REQUEST: 264 case NVME_SC_NAMESPACE_NOT_READY: 265 if (cpl->status.dnr) 266 return (0); 267 else 268 return (1); 269 case NVME_SC_INVALID_OPCODE: 270 case NVME_SC_INVALID_FIELD: 271 case NVME_SC_COMMAND_ID_CONFLICT: 272 case NVME_SC_DATA_TRANSFER_ERROR: 273 case NVME_SC_ABORTED_POWER_LOSS: 274 case NVME_SC_INTERNAL_DEVICE_ERROR: 275 case NVME_SC_ABORTED_SQ_DELETION: 276 case NVME_SC_ABORTED_FAILED_FUSED: 277 case NVME_SC_ABORTED_MISSING_FUSED: 278 case NVME_SC_INVALID_NAMESPACE_OR_FORMAT: 279 case NVME_SC_COMMAND_SEQUENCE_ERROR: 280 case NVME_SC_LBA_OUT_OF_RANGE: 281 case NVME_SC_CAPACITY_EXCEEDED: 282 default: 283 return (0); 284 } 285 case NVME_SCT_COMMAND_SPECIFIC: 286 case NVME_SCT_MEDIA_ERROR: 287 case NVME_SCT_VENDOR_SPECIFIC: 288 default: 289 return (0); 290 } 291 } 292 293 static void 294 nvme_qpair_complete_tracker(struct nvme_qpair *qpair, struct nvme_tracker *tr, 295 struct nvme_completion *cpl, boolean_t print_on_error) 296 { 297 struct nvme_request *req; 298 boolean_t retry, error; 299 300 req = tr->req; 301 error = nvme_completion_is_error(cpl); 302 retry = error && nvme_completion_is_retry(cpl) && 303 req->retries < nvme_retry_count; 304 305 if (error && print_on_error) { 306 nvme_qpair_print_command(qpair, &req->cmd); 307 nvme_qpair_print_completion(qpair, cpl); 308 } 309 310 qpair->act_tr[cpl->cid] = NULL; 311 312 KASSERT(cpl->cid == req->cmd.cid, ("cpl cid does not match cmd cid\n")); 313 314 if (req->cb_fn && !retry) 315 req->cb_fn(req->cb_arg, cpl); 316 317 mtx_lock(&qpair->lock); 318 callout_stop(&tr->timer); 319 320 if (retry) { 321 req->retries++; 322 nvme_qpair_submit_tracker(qpair, tr); 323 } else { 324 if (req->type != NVME_REQUEST_NULL) 325 bus_dmamap_unload(qpair->dma_tag_payload, 326 tr->payload_dma_map); 327 328 nvme_free_request(req); 329 tr->req = NULL; 330 331 TAILQ_REMOVE(&qpair->outstanding_tr, tr, tailq); 332 TAILQ_INSERT_HEAD(&qpair->free_tr, tr, tailq); 333 334 /* 335 * If the controller is in the middle of resetting, don't 336 * try to submit queued requests here - let the reset logic 337 * handle that instead. 338 */ 339 if (!STAILQ_EMPTY(&qpair->queued_req) && 340 !qpair->ctrlr->is_resetting) { 341 req = STAILQ_FIRST(&qpair->queued_req); 342 STAILQ_REMOVE_HEAD(&qpair->queued_req, stailq); 343 _nvme_qpair_submit_request(qpair, req); 344 } 345 } 346 347 mtx_unlock(&qpair->lock); 348 } 349 350 static void 351 nvme_qpair_manual_complete_tracker(struct nvme_qpair *qpair, 352 struct nvme_tracker *tr, uint32_t sct, uint32_t sc, uint32_t dnr, 353 boolean_t print_on_error) 354 { 355 struct nvme_completion cpl; 356 357 memset(&cpl, 0, sizeof(cpl)); 358 cpl.sqid = qpair->id; 359 cpl.cid = tr->cid; 360 cpl.status.sct = sct; 361 cpl.status.sc = sc; 362 cpl.status.dnr = dnr; 363 nvme_qpair_complete_tracker(qpair, tr, &cpl, print_on_error); 364 } 365 366 void 367 nvme_qpair_manual_complete_request(struct nvme_qpair *qpair, 368 struct nvme_request *req, uint32_t sct, uint32_t sc, 369 boolean_t print_on_error) 370 { 371 struct nvme_completion cpl; 372 boolean_t error; 373 374 memset(&cpl, 0, sizeof(cpl)); 375 cpl.sqid = qpair->id; 376 cpl.status.sct = sct; 377 cpl.status.sc = sc; 378 379 error = nvme_completion_is_error(&cpl); 380 381 if (error && print_on_error) { 382 nvme_qpair_print_command(qpair, &req->cmd); 383 nvme_qpair_print_completion(qpair, &cpl); 384 } 385 386 if (req->cb_fn) 387 req->cb_fn(req->cb_arg, &cpl); 388 389 nvme_free_request(req); 390 } 391 392 void 393 nvme_qpair_process_completions(struct nvme_qpair *qpair) 394 { 395 struct nvme_tracker *tr; 396 struct nvme_completion *cpl; 397 398 qpair->num_intr_handler_calls++; 399 400 if (!qpair->is_enabled) 401 /* 402 * qpair is not enabled, likely because a controller reset is 403 * is in progress. Ignore the interrupt - any I/O that was 404 * associated with this interrupt will get retried when the 405 * reset is complete. 406 */ 407 return; 408 409 while (1) { 410 cpl = &qpair->cpl[qpair->cq_head]; 411 412 if (cpl->status.p != qpair->phase) 413 break; 414 415 tr = qpair->act_tr[cpl->cid]; 416 417 if (tr != NULL) { 418 nvme_qpair_complete_tracker(qpair, tr, cpl, TRUE); 419 qpair->sq_head = cpl->sqhd; 420 } else { 421 nvme_printf(qpair->ctrlr, 422 "cpl does not map to outstanding cmd\n"); 423 nvme_dump_completion(cpl); 424 KASSERT(0, ("received completion for unknown cmd\n")); 425 } 426 427 if (++qpair->cq_head == qpair->num_entries) { 428 qpair->cq_head = 0; 429 qpair->phase = !qpair->phase; 430 } 431 432 nvme_mmio_write_4(qpair->ctrlr, doorbell[qpair->id].cq_hdbl, 433 qpair->cq_head); 434 } 435 } 436 437 static void 438 nvme_qpair_msix_handler(void *arg) 439 { 440 struct nvme_qpair *qpair = arg; 441 442 nvme_qpair_process_completions(qpair); 443 } 444 445 int 446 nvme_qpair_construct(struct nvme_qpair *qpair, uint32_t id, 447 uint16_t vector, uint32_t num_entries, uint32_t num_trackers, 448 struct nvme_controller *ctrlr) 449 { 450 struct nvme_tracker *tr; 451 size_t cmdsz, cplsz, prpsz, allocsz, prpmemsz; 452 uint64_t queuemem_phys, prpmem_phys, list_phys; 453 uint8_t *queuemem, *prpmem, *prp_list; 454 int i, err; 455 456 qpair->id = id; 457 qpair->vector = vector; 458 qpair->num_entries = num_entries; 459 qpair->num_trackers = num_trackers; 460 qpair->ctrlr = ctrlr; 461 462 if (ctrlr->msix_enabled) { 463 464 /* 465 * MSI-X vector resource IDs start at 1, so we add one to 466 * the queue's vector to get the corresponding rid to use. 467 */ 468 qpair->rid = vector + 1; 469 470 qpair->res = bus_alloc_resource_any(ctrlr->dev, SYS_RES_IRQ, 471 &qpair->rid, RF_ACTIVE); 472 bus_setup_intr(ctrlr->dev, qpair->res, 473 INTR_TYPE_MISC | INTR_MPSAFE, NULL, 474 nvme_qpair_msix_handler, qpair, &qpair->tag); 475 } 476 477 mtx_init(&qpair->lock, "nvme qpair lock", NULL, MTX_DEF); 478 479 /* Note: NVMe PRP format is restricted to 4-byte alignment. */ 480 err = bus_dma_tag_create(bus_get_dma_tag(ctrlr->dev), 481 4, PAGE_SIZE, BUS_SPACE_MAXADDR, 482 BUS_SPACE_MAXADDR, NULL, NULL, NVME_MAX_XFER_SIZE, 483 (NVME_MAX_XFER_SIZE/PAGE_SIZE)+1, PAGE_SIZE, 0, 484 NULL, NULL, &qpair->dma_tag_payload); 485 if (err != 0) { 486 nvme_printf(ctrlr, "payload tag create failed %d\n", err); 487 goto out; 488 } 489 490 /* 491 * Each component must be page aligned, and individual PRP lists 492 * cannot cross a page boundary. 493 */ 494 cmdsz = qpair->num_entries * sizeof(struct nvme_command); 495 cmdsz = roundup2(cmdsz, PAGE_SIZE); 496 cplsz = qpair->num_entries * sizeof(struct nvme_completion); 497 cplsz = roundup2(cplsz, PAGE_SIZE); 498 prpsz = sizeof(uint64_t) * NVME_MAX_PRP_LIST_ENTRIES;; 499 prpmemsz = qpair->num_trackers * prpsz; 500 allocsz = cmdsz + cplsz + prpmemsz; 501 502 err = bus_dma_tag_create(bus_get_dma_tag(ctrlr->dev), 503 PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, 504 allocsz, 1, allocsz, 0, NULL, NULL, &qpair->dma_tag); 505 if (err != 0) { 506 nvme_printf(ctrlr, "tag create failed %d\n", err); 507 goto out; 508 } 509 510 if (bus_dmamem_alloc(qpair->dma_tag, (void **)&queuemem, 511 BUS_DMA_NOWAIT, &qpair->queuemem_map)) { 512 nvme_printf(ctrlr, "failed to alloc qpair memory\n"); 513 goto out; 514 } 515 516 if (bus_dmamap_load(qpair->dma_tag, qpair->queuemem_map, 517 queuemem, allocsz, nvme_single_map, &queuemem_phys, 0) != 0) { 518 nvme_printf(ctrlr, "failed to load qpair memory\n"); 519 goto out; 520 } 521 522 qpair->num_cmds = 0; 523 qpair->num_intr_handler_calls = 0; 524 qpair->cmd = (struct nvme_command *)queuemem; 525 qpair->cpl = (struct nvme_completion *)(queuemem + cmdsz); 526 prpmem = (uint8_t *)(queuemem + cmdsz + cplsz); 527 qpair->cmd_bus_addr = queuemem_phys; 528 qpair->cpl_bus_addr = queuemem_phys + cmdsz; 529 prpmem_phys = queuemem_phys + cmdsz + cplsz; 530 531 qpair->sq_tdbl_off = nvme_mmio_offsetof(doorbell[id].sq_tdbl); 532 qpair->cq_hdbl_off = nvme_mmio_offsetof(doorbell[id].cq_hdbl); 533 534 TAILQ_INIT(&qpair->free_tr); 535 TAILQ_INIT(&qpair->outstanding_tr); 536 STAILQ_INIT(&qpair->queued_req); 537 538 list_phys = prpmem_phys; 539 prp_list = prpmem; 540 for (i = 0; i < qpair->num_trackers; i++) { 541 542 if (list_phys + prpsz > prpmem_phys + prpmemsz) { 543 qpair->num_trackers = i; 544 break; 545 } 546 547 /* 548 * Make sure that the PRP list for this tracker doesn't 549 * overflow to another page. 550 */ 551 if (trunc_page(list_phys) != 552 trunc_page(list_phys + prpsz - 1)) { 553 list_phys = roundup2(list_phys, PAGE_SIZE); 554 prp_list = 555 (uint8_t *)roundup2((uintptr_t)prp_list, PAGE_SIZE); 556 } 557 558 tr = malloc(sizeof(*tr), M_NVME, M_ZERO | M_WAITOK); 559 bus_dmamap_create(qpair->dma_tag_payload, 0, 560 &tr->payload_dma_map); 561 callout_init(&tr->timer, 1); 562 tr->cid = i; 563 tr->qpair = qpair; 564 tr->prp = (uint64_t *)prp_list; 565 tr->prp_bus_addr = list_phys; 566 TAILQ_INSERT_HEAD(&qpair->free_tr, tr, tailq); 567 list_phys += prpsz; 568 prp_list += prpsz; 569 } 570 571 if (qpair->num_trackers == 0) { 572 nvme_printf(ctrlr, "failed to allocate enough trackers\n"); 573 goto out; 574 } 575 576 qpair->act_tr = malloc(sizeof(struct nvme_tracker *) * 577 qpair->num_entries, M_NVME, M_ZERO | M_WAITOK); 578 return (0); 579 580 out: 581 nvme_qpair_destroy(qpair); 582 return (ENOMEM); 583 } 584 585 static void 586 nvme_qpair_destroy(struct nvme_qpair *qpair) 587 { 588 struct nvme_tracker *tr; 589 590 if (qpair->tag) 591 bus_teardown_intr(qpair->ctrlr->dev, qpair->res, qpair->tag); 592 593 if (mtx_initialized(&qpair->lock)) 594 mtx_destroy(&qpair->lock); 595 596 if (qpair->res) 597 bus_release_resource(qpair->ctrlr->dev, SYS_RES_IRQ, 598 rman_get_rid(qpair->res), qpair->res); 599 600 if (qpair->cmd != NULL) { 601 bus_dmamap_unload(qpair->dma_tag, qpair->queuemem_map); 602 bus_dmamem_free(qpair->dma_tag, qpair->cmd, 603 qpair->queuemem_map); 604 } 605 606 if (qpair->dma_tag) 607 bus_dma_tag_destroy(qpair->dma_tag); 608 609 if (qpair->dma_tag_payload) 610 bus_dma_tag_destroy(qpair->dma_tag_payload); 611 612 if (qpair->act_tr) 613 free(qpair->act_tr, M_NVME); 614 615 while (!TAILQ_EMPTY(&qpair->free_tr)) { 616 tr = TAILQ_FIRST(&qpair->free_tr); 617 TAILQ_REMOVE(&qpair->free_tr, tr, tailq); 618 bus_dmamap_destroy(qpair->dma_tag, tr->payload_dma_map); 619 free(tr, M_NVME); 620 } 621 } 622 623 static void 624 nvme_admin_qpair_abort_aers(struct nvme_qpair *qpair) 625 { 626 struct nvme_tracker *tr; 627 628 tr = TAILQ_FIRST(&qpair->outstanding_tr); 629 while (tr != NULL) { 630 if (tr->req->cmd.opc == NVME_OPC_ASYNC_EVENT_REQUEST) { 631 nvme_qpair_manual_complete_tracker(qpair, tr, 632 NVME_SCT_GENERIC, NVME_SC_ABORTED_SQ_DELETION, 0, 633 FALSE); 634 tr = TAILQ_FIRST(&qpair->outstanding_tr); 635 } else { 636 tr = TAILQ_NEXT(tr, tailq); 637 } 638 } 639 } 640 641 void 642 nvme_admin_qpair_destroy(struct nvme_qpair *qpair) 643 { 644 645 nvme_admin_qpair_abort_aers(qpair); 646 nvme_qpair_destroy(qpair); 647 } 648 649 void 650 nvme_io_qpair_destroy(struct nvme_qpair *qpair) 651 { 652 653 nvme_qpair_destroy(qpair); 654 } 655 656 static void 657 nvme_abort_complete(void *arg, const struct nvme_completion *status) 658 { 659 struct nvme_tracker *tr = arg; 660 661 /* 662 * If cdw0 == 1, the controller was not able to abort the command 663 * we requested. We still need to check the active tracker array, 664 * to cover race where I/O timed out at same time controller was 665 * completing the I/O. 666 */ 667 if (status->cdw0 == 1 && tr->qpair->act_tr[tr->cid] != NULL) { 668 /* 669 * An I/O has timed out, and the controller was unable to 670 * abort it for some reason. Construct a fake completion 671 * status, and then complete the I/O's tracker manually. 672 */ 673 nvme_printf(tr->qpair->ctrlr, 674 "abort command failed, aborting command manually\n"); 675 nvme_qpair_manual_complete_tracker(tr->qpair, tr, 676 NVME_SCT_GENERIC, NVME_SC_ABORTED_BY_REQUEST, 0, TRUE); 677 } 678 } 679 680 static void 681 nvme_timeout(void *arg) 682 { 683 struct nvme_tracker *tr = arg; 684 struct nvme_qpair *qpair = tr->qpair; 685 struct nvme_controller *ctrlr = qpair->ctrlr; 686 union csts_register csts; 687 688 /* Read csts to get value of cfs - controller fatal status. */ 689 csts.raw = nvme_mmio_read_4(ctrlr, csts); 690 691 if (ctrlr->enable_aborts && csts.bits.cfs == 0) { 692 /* 693 * If aborts are enabled, only use them if the controller is 694 * not reporting fatal status. 695 */ 696 nvme_ctrlr_cmd_abort(ctrlr, tr->cid, qpair->id, 697 nvme_abort_complete, tr); 698 } else 699 nvme_ctrlr_reset(ctrlr); 700 } 701 702 void 703 nvme_qpair_submit_tracker(struct nvme_qpair *qpair, struct nvme_tracker *tr) 704 { 705 struct nvme_request *req; 706 struct nvme_controller *ctrlr; 707 708 mtx_assert(&qpair->lock, MA_OWNED); 709 710 req = tr->req; 711 req->cmd.cid = tr->cid; 712 qpair->act_tr[tr->cid] = tr; 713 ctrlr = qpair->ctrlr; 714 715 if (req->timeout) 716 #if __FreeBSD_version >= 800030 717 callout_reset_curcpu(&tr->timer, ctrlr->timeout_period * hz, 718 nvme_timeout, tr); 719 #else 720 callout_reset(&tr->timer, ctrlr->timeout_period * hz, 721 nvme_timeout, tr); 722 #endif 723 724 /* Copy the command from the tracker to the submission queue. */ 725 memcpy(&qpair->cmd[qpair->sq_tail], &req->cmd, sizeof(req->cmd)); 726 727 if (++qpair->sq_tail == qpair->num_entries) 728 qpair->sq_tail = 0; 729 730 wmb(); 731 nvme_mmio_write_4(qpair->ctrlr, doorbell[qpair->id].sq_tdbl, 732 qpair->sq_tail); 733 734 qpair->num_cmds++; 735 } 736 737 static void 738 nvme_payload_map(void *arg, bus_dma_segment_t *seg, int nseg, int error) 739 { 740 struct nvme_tracker *tr = arg; 741 uint32_t cur_nseg; 742 743 /* 744 * If the mapping operation failed, return immediately. The caller 745 * is responsible for detecting the error status and failing the 746 * tracker manually. 747 */ 748 if (error != 0) { 749 nvme_printf(tr->qpair->ctrlr, 750 "nvme_payload_map err %d\n", error); 751 return; 752 } 753 754 /* 755 * Note that we specified PAGE_SIZE for alignment and max 756 * segment size when creating the bus dma tags. So here 757 * we can safely just transfer each segment to its 758 * associated PRP entry. 759 */ 760 tr->req->cmd.prp1 = seg[0].ds_addr; 761 762 if (nseg == 2) { 763 tr->req->cmd.prp2 = seg[1].ds_addr; 764 } else if (nseg > 2) { 765 cur_nseg = 1; 766 tr->req->cmd.prp2 = (uint64_t)tr->prp_bus_addr; 767 while (cur_nseg < nseg) { 768 tr->prp[cur_nseg-1] = 769 (uint64_t)seg[cur_nseg].ds_addr; 770 cur_nseg++; 771 } 772 } else { 773 /* 774 * prp2 should not be used by the controller 775 * since there is only one segment, but set 776 * to 0 just to be safe. 777 */ 778 tr->req->cmd.prp2 = 0; 779 } 780 781 nvme_qpair_submit_tracker(tr->qpair, tr); 782 } 783 784 static void 785 _nvme_qpair_submit_request(struct nvme_qpair *qpair, struct nvme_request *req) 786 { 787 struct nvme_tracker *tr; 788 int err = 0; 789 790 mtx_assert(&qpair->lock, MA_OWNED); 791 792 tr = TAILQ_FIRST(&qpair->free_tr); 793 req->qpair = qpair; 794 795 if (tr == NULL || !qpair->is_enabled) { 796 /* 797 * No tracker is available, or the qpair is disabled due to 798 * an in-progress controller-level reset or controller 799 * failure. 800 */ 801 802 if (qpair->ctrlr->is_failed) { 803 /* 804 * The controller has failed. Post the request to a 805 * task where it will be aborted, so that we do not 806 * invoke the request's callback in the context 807 * of the submission. 808 */ 809 nvme_ctrlr_post_failed_request(qpair->ctrlr, req); 810 } else { 811 /* 812 * Put the request on the qpair's request queue to be 813 * processed when a tracker frees up via a command 814 * completion or when the controller reset is 815 * completed. 816 */ 817 STAILQ_INSERT_TAIL(&qpair->queued_req, req, stailq); 818 } 819 return; 820 } 821 822 TAILQ_REMOVE(&qpair->free_tr, tr, tailq); 823 TAILQ_INSERT_TAIL(&qpair->outstanding_tr, tr, tailq); 824 tr->req = req; 825 826 switch (req->type) { 827 case NVME_REQUEST_VADDR: 828 KASSERT(req->payload_size <= qpair->ctrlr->max_xfer_size, 829 ("payload_size (%d) exceeds max_xfer_size (%d)\n", 830 req->payload_size, qpair->ctrlr->max_xfer_size)); 831 err = bus_dmamap_load(tr->qpair->dma_tag_payload, 832 tr->payload_dma_map, req->u.payload, req->payload_size, 833 nvme_payload_map, tr, 0); 834 if (err != 0) 835 nvme_printf(qpair->ctrlr, 836 "bus_dmamap_load returned 0x%x!\n", err); 837 break; 838 case NVME_REQUEST_NULL: 839 nvme_qpair_submit_tracker(tr->qpair, tr); 840 break; 841 #ifdef NVME_UNMAPPED_BIO_SUPPORT 842 case NVME_REQUEST_BIO: 843 KASSERT(req->u.bio->bio_bcount <= qpair->ctrlr->max_xfer_size, 844 ("bio->bio_bcount (%jd) exceeds max_xfer_size (%d)\n", 845 (intmax_t)req->u.bio->bio_bcount, 846 qpair->ctrlr->max_xfer_size)); 847 err = bus_dmamap_load_bio(tr->qpair->dma_tag_payload, 848 tr->payload_dma_map, req->u.bio, nvme_payload_map, tr, 0); 849 if (err != 0) 850 nvme_printf(qpair->ctrlr, 851 "bus_dmamap_load_bio returned 0x%x!\n", err); 852 break; 853 #endif 854 default: 855 panic("unknown nvme request type 0x%x\n", req->type); 856 break; 857 } 858 859 if (err != 0) { 860 /* 861 * The dmamap operation failed, so we manually fail the 862 * tracker here with DATA_TRANSFER_ERROR status. 863 * 864 * nvme_qpair_manual_complete_tracker must not be called 865 * with the qpair lock held. 866 */ 867 mtx_unlock(&qpair->lock); 868 nvme_qpair_manual_complete_tracker(qpair, tr, NVME_SCT_GENERIC, 869 NVME_SC_DATA_TRANSFER_ERROR, 1 /* do not retry */, TRUE); 870 mtx_lock(&qpair->lock); 871 } 872 } 873 874 void 875 nvme_qpair_submit_request(struct nvme_qpair *qpair, struct nvme_request *req) 876 { 877 878 mtx_lock(&qpair->lock); 879 _nvme_qpair_submit_request(qpair, req); 880 mtx_unlock(&qpair->lock); 881 } 882 883 static void 884 nvme_qpair_enable(struct nvme_qpair *qpair) 885 { 886 887 qpair->is_enabled = TRUE; 888 } 889 890 void 891 nvme_qpair_reset(struct nvme_qpair *qpair) 892 { 893 894 qpair->sq_head = qpair->sq_tail = qpair->cq_head = 0; 895 896 /* 897 * First time through the completion queue, HW will set phase 898 * bit on completions to 1. So set this to 1 here, indicating 899 * we're looking for a 1 to know which entries have completed. 900 * we'll toggle the bit each time when the completion queue 901 * rolls over. 902 */ 903 qpair->phase = 1; 904 905 memset(qpair->cmd, 0, 906 qpair->num_entries * sizeof(struct nvme_command)); 907 memset(qpair->cpl, 0, 908 qpair->num_entries * sizeof(struct nvme_completion)); 909 } 910 911 void 912 nvme_admin_qpair_enable(struct nvme_qpair *qpair) 913 { 914 struct nvme_tracker *tr; 915 struct nvme_tracker *tr_temp; 916 917 /* 918 * Manually abort each outstanding admin command. Do not retry 919 * admin commands found here, since they will be left over from 920 * a controller reset and its likely the context in which the 921 * command was issued no longer applies. 922 */ 923 TAILQ_FOREACH_SAFE(tr, &qpair->outstanding_tr, tailq, tr_temp) { 924 nvme_printf(qpair->ctrlr, 925 "aborting outstanding admin command\n"); 926 nvme_qpair_manual_complete_tracker(qpair, tr, NVME_SCT_GENERIC, 927 NVME_SC_ABORTED_BY_REQUEST, 1 /* do not retry */, TRUE); 928 } 929 930 nvme_qpair_enable(qpair); 931 } 932 933 void 934 nvme_io_qpair_enable(struct nvme_qpair *qpair) 935 { 936 STAILQ_HEAD(, nvme_request) temp; 937 struct nvme_tracker *tr; 938 struct nvme_tracker *tr_temp; 939 struct nvme_request *req; 940 941 /* 942 * Manually abort each outstanding I/O. This normally results in a 943 * retry, unless the retry count on the associated request has 944 * reached its limit. 945 */ 946 TAILQ_FOREACH_SAFE(tr, &qpair->outstanding_tr, tailq, tr_temp) { 947 nvme_printf(qpair->ctrlr, "aborting outstanding i/o\n"); 948 nvme_qpair_manual_complete_tracker(qpair, tr, NVME_SCT_GENERIC, 949 NVME_SC_ABORTED_BY_REQUEST, 0, TRUE); 950 } 951 952 mtx_lock(&qpair->lock); 953 954 nvme_qpair_enable(qpair); 955 956 STAILQ_INIT(&temp); 957 STAILQ_SWAP(&qpair->queued_req, &temp, nvme_request); 958 959 while (!STAILQ_EMPTY(&temp)) { 960 req = STAILQ_FIRST(&temp); 961 STAILQ_REMOVE_HEAD(&temp, stailq); 962 nvme_printf(qpair->ctrlr, "resubmitting queued i/o\n"); 963 nvme_qpair_print_command(qpair, &req->cmd); 964 _nvme_qpair_submit_request(qpair, req); 965 } 966 967 mtx_unlock(&qpair->lock); 968 } 969 970 static void 971 nvme_qpair_disable(struct nvme_qpair *qpair) 972 { 973 struct nvme_tracker *tr; 974 975 qpair->is_enabled = FALSE; 976 mtx_lock(&qpair->lock); 977 TAILQ_FOREACH(tr, &qpair->outstanding_tr, tailq) 978 callout_stop(&tr->timer); 979 mtx_unlock(&qpair->lock); 980 } 981 982 void 983 nvme_admin_qpair_disable(struct nvme_qpair *qpair) 984 { 985 986 nvme_qpair_disable(qpair); 987 nvme_admin_qpair_abort_aers(qpair); 988 } 989 990 void 991 nvme_io_qpair_disable(struct nvme_qpair *qpair) 992 { 993 994 nvme_qpair_disable(qpair); 995 } 996 997 void 998 nvme_qpair_fail(struct nvme_qpair *qpair) 999 { 1000 struct nvme_tracker *tr; 1001 struct nvme_request *req; 1002 1003 if (!mtx_initialized(&qpair->lock)) 1004 return; 1005 1006 mtx_lock(&qpair->lock); 1007 1008 while (!STAILQ_EMPTY(&qpair->queued_req)) { 1009 req = STAILQ_FIRST(&qpair->queued_req); 1010 STAILQ_REMOVE_HEAD(&qpair->queued_req, stailq); 1011 nvme_printf(qpair->ctrlr, "failing queued i/o\n"); 1012 mtx_unlock(&qpair->lock); 1013 nvme_qpair_manual_complete_request(qpair, req, NVME_SCT_GENERIC, 1014 NVME_SC_ABORTED_BY_REQUEST, TRUE); 1015 mtx_lock(&qpair->lock); 1016 } 1017 1018 /* Manually abort each outstanding I/O. */ 1019 while (!TAILQ_EMPTY(&qpair->outstanding_tr)) { 1020 tr = TAILQ_FIRST(&qpair->outstanding_tr); 1021 /* 1022 * Do not remove the tracker. The abort_tracker path will 1023 * do that for us. 1024 */ 1025 nvme_printf(qpair->ctrlr, "failing outstanding i/o\n"); 1026 mtx_unlock(&qpair->lock); 1027 nvme_qpair_manual_complete_tracker(qpair, tr, NVME_SCT_GENERIC, 1028 NVME_SC_ABORTED_BY_REQUEST, 1 /* do not retry */, TRUE); 1029 mtx_lock(&qpair->lock); 1030 } 1031 1032 mtx_unlock(&qpair->lock); 1033 } 1034 1035