1 /*- 2 * Copyright (C) 2012-2014 Intel Corporation 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 #include <sys/param.h> 31 #include <sys/bus.h> 32 33 #include <dev/pci/pcivar.h> 34 35 #include "nvme_private.h" 36 37 static void _nvme_qpair_submit_request(struct nvme_qpair *qpair, 38 struct nvme_request *req); 39 40 struct nvme_opcode_string { 41 42 uint16_t opc; 43 const char * str; 44 }; 45 46 static struct nvme_opcode_string admin_opcode[] = { 47 { NVME_OPC_DELETE_IO_SQ, "DELETE IO SQ" }, 48 { NVME_OPC_CREATE_IO_SQ, "CREATE IO SQ" }, 49 { NVME_OPC_GET_LOG_PAGE, "GET LOG PAGE" }, 50 { NVME_OPC_DELETE_IO_CQ, "DELETE IO CQ" }, 51 { NVME_OPC_CREATE_IO_CQ, "CREATE IO CQ" }, 52 { NVME_OPC_IDENTIFY, "IDENTIFY" }, 53 { NVME_OPC_ABORT, "ABORT" }, 54 { NVME_OPC_SET_FEATURES, "SET FEATURES" }, 55 { NVME_OPC_GET_FEATURES, "GET FEATURES" }, 56 { NVME_OPC_ASYNC_EVENT_REQUEST, "ASYNC EVENT REQUEST" }, 57 { NVME_OPC_FIRMWARE_ACTIVATE, "FIRMWARE ACTIVATE" }, 58 { NVME_OPC_FIRMWARE_IMAGE_DOWNLOAD, "FIRMWARE IMAGE DOWNLOAD" }, 59 { NVME_OPC_FORMAT_NVM, "FORMAT NVM" }, 60 { NVME_OPC_SECURITY_SEND, "SECURITY SEND" }, 61 { NVME_OPC_SECURITY_RECEIVE, "SECURITY RECEIVE" }, 62 { 0xFFFF, "ADMIN COMMAND" } 63 }; 64 65 static struct nvme_opcode_string io_opcode[] = { 66 { NVME_OPC_FLUSH, "FLUSH" }, 67 { NVME_OPC_WRITE, "WRITE" }, 68 { NVME_OPC_READ, "READ" }, 69 { NVME_OPC_WRITE_UNCORRECTABLE, "WRITE UNCORRECTABLE" }, 70 { NVME_OPC_COMPARE, "COMPARE" }, 71 { NVME_OPC_DATASET_MANAGEMENT, "DATASET MANAGEMENT" }, 72 { 0xFFFF, "IO COMMAND" } 73 }; 74 75 static const char * 76 get_admin_opcode_string(uint16_t opc) 77 { 78 struct nvme_opcode_string *entry; 79 80 entry = admin_opcode; 81 82 while (entry->opc != 0xFFFF) { 83 if (entry->opc == opc) 84 return (entry->str); 85 entry++; 86 } 87 return (entry->str); 88 } 89 90 static const char * 91 get_io_opcode_string(uint16_t opc) 92 { 93 struct nvme_opcode_string *entry; 94 95 entry = io_opcode; 96 97 while (entry->opc != 0xFFFF) { 98 if (entry->opc == opc) 99 return (entry->str); 100 entry++; 101 } 102 return (entry->str); 103 } 104 105 106 static void 107 nvme_admin_qpair_print_command(struct nvme_qpair *qpair, 108 struct nvme_command *cmd) 109 { 110 111 nvme_printf(qpair->ctrlr, "%s (%02x) sqid:%d cid:%d nsid:%x " 112 "cdw10:%08x cdw11:%08x\n", 113 get_admin_opcode_string(cmd->opc), cmd->opc, qpair->id, cmd->cid, 114 cmd->nsid, cmd->cdw10, cmd->cdw11); 115 } 116 117 static void 118 nvme_io_qpair_print_command(struct nvme_qpair *qpair, 119 struct nvme_command *cmd) 120 { 121 122 switch (cmd->opc) { 123 case NVME_OPC_WRITE: 124 case NVME_OPC_READ: 125 case NVME_OPC_WRITE_UNCORRECTABLE: 126 case NVME_OPC_COMPARE: 127 nvme_printf(qpair->ctrlr, "%s sqid:%d cid:%d nsid:%d " 128 "lba:%llu len:%d\n", 129 get_io_opcode_string(cmd->opc), qpair->id, cmd->cid, 130 cmd->nsid, 131 ((unsigned long long)cmd->cdw11 << 32) + cmd->cdw10, 132 (cmd->cdw12 & 0xFFFF) + 1); 133 break; 134 case NVME_OPC_FLUSH: 135 case NVME_OPC_DATASET_MANAGEMENT: 136 nvme_printf(qpair->ctrlr, "%s sqid:%d cid:%d nsid:%d\n", 137 get_io_opcode_string(cmd->opc), qpair->id, cmd->cid, 138 cmd->nsid); 139 break; 140 default: 141 nvme_printf(qpair->ctrlr, "%s (%02x) sqid:%d cid:%d nsid:%d\n", 142 get_io_opcode_string(cmd->opc), cmd->opc, qpair->id, 143 cmd->cid, cmd->nsid); 144 break; 145 } 146 } 147 148 static void 149 nvme_qpair_print_command(struct nvme_qpair *qpair, struct nvme_command *cmd) 150 { 151 if (qpair->id == 0) 152 nvme_admin_qpair_print_command(qpair, cmd); 153 else 154 nvme_io_qpair_print_command(qpair, cmd); 155 } 156 157 struct nvme_status_string { 158 159 uint16_t sc; 160 const char * str; 161 }; 162 163 static struct nvme_status_string generic_status[] = { 164 { NVME_SC_SUCCESS, "SUCCESS" }, 165 { NVME_SC_INVALID_OPCODE, "INVALID OPCODE" }, 166 { NVME_SC_INVALID_FIELD, "INVALID_FIELD" }, 167 { NVME_SC_COMMAND_ID_CONFLICT, "COMMAND ID CONFLICT" }, 168 { NVME_SC_DATA_TRANSFER_ERROR, "DATA TRANSFER ERROR" }, 169 { NVME_SC_ABORTED_POWER_LOSS, "ABORTED - POWER LOSS" }, 170 { NVME_SC_INTERNAL_DEVICE_ERROR, "INTERNAL DEVICE ERROR" }, 171 { NVME_SC_ABORTED_BY_REQUEST, "ABORTED - BY REQUEST" }, 172 { NVME_SC_ABORTED_SQ_DELETION, "ABORTED - SQ DELETION" }, 173 { NVME_SC_ABORTED_FAILED_FUSED, "ABORTED - FAILED FUSED" }, 174 { NVME_SC_ABORTED_MISSING_FUSED, "ABORTED - MISSING FUSED" }, 175 { NVME_SC_INVALID_NAMESPACE_OR_FORMAT, "INVALID NAMESPACE OR FORMAT" }, 176 { NVME_SC_COMMAND_SEQUENCE_ERROR, "COMMAND SEQUENCE ERROR" }, 177 { NVME_SC_LBA_OUT_OF_RANGE, "LBA OUT OF RANGE" }, 178 { NVME_SC_CAPACITY_EXCEEDED, "CAPACITY EXCEEDED" }, 179 { NVME_SC_NAMESPACE_NOT_READY, "NAMESPACE NOT READY" }, 180 { 0xFFFF, "GENERIC" } 181 }; 182 183 static struct nvme_status_string command_specific_status[] = { 184 { NVME_SC_COMPLETION_QUEUE_INVALID, "INVALID COMPLETION QUEUE" }, 185 { NVME_SC_INVALID_QUEUE_IDENTIFIER, "INVALID QUEUE IDENTIFIER" }, 186 { NVME_SC_MAXIMUM_QUEUE_SIZE_EXCEEDED, "MAX QUEUE SIZE EXCEEDED" }, 187 { NVME_SC_ABORT_COMMAND_LIMIT_EXCEEDED, "ABORT CMD LIMIT EXCEEDED" }, 188 { NVME_SC_ASYNC_EVENT_REQUEST_LIMIT_EXCEEDED, "ASYNC LIMIT EXCEEDED" }, 189 { NVME_SC_INVALID_FIRMWARE_SLOT, "INVALID FIRMWARE SLOT" }, 190 { NVME_SC_INVALID_FIRMWARE_IMAGE, "INVALID FIRMWARE IMAGE" }, 191 { NVME_SC_INVALID_INTERRUPT_VECTOR, "INVALID INTERRUPT VECTOR" }, 192 { NVME_SC_INVALID_LOG_PAGE, "INVALID LOG PAGE" }, 193 { NVME_SC_INVALID_FORMAT, "INVALID FORMAT" }, 194 { NVME_SC_FIRMWARE_REQUIRES_RESET, "FIRMWARE REQUIRES RESET" }, 195 { NVME_SC_CONFLICTING_ATTRIBUTES, "CONFLICTING ATTRIBUTES" }, 196 { NVME_SC_INVALID_PROTECTION_INFO, "INVALID PROTECTION INFO" }, 197 { NVME_SC_ATTEMPTED_WRITE_TO_RO_PAGE, "WRITE TO RO PAGE" }, 198 { 0xFFFF, "COMMAND SPECIFIC" } 199 }; 200 201 static struct nvme_status_string media_error_status[] = { 202 { NVME_SC_WRITE_FAULTS, "WRITE FAULTS" }, 203 { NVME_SC_UNRECOVERED_READ_ERROR, "UNRECOVERED READ ERROR" }, 204 { NVME_SC_GUARD_CHECK_ERROR, "GUARD CHECK ERROR" }, 205 { NVME_SC_APPLICATION_TAG_CHECK_ERROR, "APPLICATION TAG CHECK ERROR" }, 206 { NVME_SC_REFERENCE_TAG_CHECK_ERROR, "REFERENCE TAG CHECK ERROR" }, 207 { NVME_SC_COMPARE_FAILURE, "COMPARE FAILURE" }, 208 { NVME_SC_ACCESS_DENIED, "ACCESS DENIED" }, 209 { 0xFFFF, "MEDIA ERROR" } 210 }; 211 212 static const char * 213 get_status_string(uint16_t sct, uint16_t sc) 214 { 215 struct nvme_status_string *entry; 216 217 switch (sct) { 218 case NVME_SCT_GENERIC: 219 entry = generic_status; 220 break; 221 case NVME_SCT_COMMAND_SPECIFIC: 222 entry = command_specific_status; 223 break; 224 case NVME_SCT_MEDIA_ERROR: 225 entry = media_error_status; 226 break; 227 case NVME_SCT_VENDOR_SPECIFIC: 228 return ("VENDOR SPECIFIC"); 229 default: 230 return ("RESERVED"); 231 } 232 233 while (entry->sc != 0xFFFF) { 234 if (entry->sc == sc) 235 return (entry->str); 236 entry++; 237 } 238 return (entry->str); 239 } 240 241 static void 242 nvme_qpair_print_completion(struct nvme_qpair *qpair, 243 struct nvme_completion *cpl) 244 { 245 nvme_printf(qpair->ctrlr, "%s (%02x/%02x) sqid:%d cid:%d cdw0:%x\n", 246 get_status_string(cpl->status.sct, cpl->status.sc), 247 cpl->status.sct, cpl->status.sc, cpl->sqid, cpl->cid, cpl->cdw0); 248 } 249 250 static boolean_t 251 nvme_completion_is_retry(const struct nvme_completion *cpl) 252 { 253 /* 254 * TODO: spec is not clear how commands that are aborted due 255 * to TLER will be marked. So for now, it seems 256 * NAMESPACE_NOT_READY is the only case where we should 257 * look at the DNR bit. 258 */ 259 switch (cpl->status.sct) { 260 case NVME_SCT_GENERIC: 261 switch (cpl->status.sc) { 262 case NVME_SC_ABORTED_BY_REQUEST: 263 case NVME_SC_NAMESPACE_NOT_READY: 264 if (cpl->status.dnr) 265 return (0); 266 else 267 return (1); 268 case NVME_SC_INVALID_OPCODE: 269 case NVME_SC_INVALID_FIELD: 270 case NVME_SC_COMMAND_ID_CONFLICT: 271 case NVME_SC_DATA_TRANSFER_ERROR: 272 case NVME_SC_ABORTED_POWER_LOSS: 273 case NVME_SC_INTERNAL_DEVICE_ERROR: 274 case NVME_SC_ABORTED_SQ_DELETION: 275 case NVME_SC_ABORTED_FAILED_FUSED: 276 case NVME_SC_ABORTED_MISSING_FUSED: 277 case NVME_SC_INVALID_NAMESPACE_OR_FORMAT: 278 case NVME_SC_COMMAND_SEQUENCE_ERROR: 279 case NVME_SC_LBA_OUT_OF_RANGE: 280 case NVME_SC_CAPACITY_EXCEEDED: 281 default: 282 return (0); 283 } 284 case NVME_SCT_COMMAND_SPECIFIC: 285 case NVME_SCT_MEDIA_ERROR: 286 case NVME_SCT_VENDOR_SPECIFIC: 287 default: 288 return (0); 289 } 290 } 291 292 static void 293 nvme_qpair_construct_tracker(struct nvme_qpair *qpair, struct nvme_tracker *tr, 294 uint16_t cid) 295 { 296 297 bus_dmamap_create(qpair->dma_tag_payload, 0, &tr->payload_dma_map); 298 bus_dmamap_create(qpair->dma_tag, 0, &tr->prp_dma_map); 299 300 bus_dmamap_load(qpair->dma_tag, tr->prp_dma_map, tr->prp, 301 sizeof(tr->prp), nvme_single_map, &tr->prp_bus_addr, 0); 302 303 callout_init(&tr->timer, 1); 304 tr->cid = cid; 305 tr->qpair = qpair; 306 } 307 308 static void 309 nvme_qpair_complete_tracker(struct nvme_qpair *qpair, struct nvme_tracker *tr, 310 struct nvme_completion *cpl, boolean_t print_on_error) 311 { 312 struct nvme_request *req; 313 boolean_t retry, error; 314 315 req = tr->req; 316 error = nvme_completion_is_error(cpl); 317 retry = error && nvme_completion_is_retry(cpl) && 318 req->retries < nvme_retry_count; 319 320 if (error && print_on_error) { 321 nvme_qpair_print_command(qpair, &req->cmd); 322 nvme_qpair_print_completion(qpair, cpl); 323 } 324 325 qpair->act_tr[cpl->cid] = NULL; 326 327 KASSERT(cpl->cid == req->cmd.cid, ("cpl cid does not match cmd cid\n")); 328 329 if (req->cb_fn && !retry) 330 req->cb_fn(req->cb_arg, cpl); 331 332 mtx_lock(&qpair->lock); 333 callout_stop(&tr->timer); 334 335 if (retry) { 336 req->retries++; 337 nvme_qpair_submit_tracker(qpair, tr); 338 } else { 339 if (req->type != NVME_REQUEST_NULL) 340 bus_dmamap_unload(qpair->dma_tag_payload, 341 tr->payload_dma_map); 342 343 nvme_free_request(req); 344 tr->req = NULL; 345 346 TAILQ_REMOVE(&qpair->outstanding_tr, tr, tailq); 347 TAILQ_INSERT_HEAD(&qpair->free_tr, tr, tailq); 348 349 /* 350 * If the controller is in the middle of resetting, don't 351 * try to submit queued requests here - let the reset logic 352 * handle that instead. 353 */ 354 if (!STAILQ_EMPTY(&qpair->queued_req) && 355 !qpair->ctrlr->is_resetting) { 356 req = STAILQ_FIRST(&qpair->queued_req); 357 STAILQ_REMOVE_HEAD(&qpair->queued_req, stailq); 358 _nvme_qpair_submit_request(qpair, req); 359 } 360 } 361 362 mtx_unlock(&qpair->lock); 363 } 364 365 static void 366 nvme_qpair_manual_complete_tracker(struct nvme_qpair *qpair, 367 struct nvme_tracker *tr, uint32_t sct, uint32_t sc, uint32_t dnr, 368 boolean_t print_on_error) 369 { 370 struct nvme_completion cpl; 371 372 memset(&cpl, 0, sizeof(cpl)); 373 cpl.sqid = qpair->id; 374 cpl.cid = tr->cid; 375 cpl.status.sct = sct; 376 cpl.status.sc = sc; 377 cpl.status.dnr = dnr; 378 nvme_qpair_complete_tracker(qpair, tr, &cpl, print_on_error); 379 } 380 381 void 382 nvme_qpair_manual_complete_request(struct nvme_qpair *qpair, 383 struct nvme_request *req, uint32_t sct, uint32_t sc, 384 boolean_t print_on_error) 385 { 386 struct nvme_completion cpl; 387 boolean_t error; 388 389 memset(&cpl, 0, sizeof(cpl)); 390 cpl.sqid = qpair->id; 391 cpl.status.sct = sct; 392 cpl.status.sc = sc; 393 394 error = nvme_completion_is_error(&cpl); 395 396 if (error && print_on_error) { 397 nvme_qpair_print_command(qpair, &req->cmd); 398 nvme_qpair_print_completion(qpair, &cpl); 399 } 400 401 if (req->cb_fn) 402 req->cb_fn(req->cb_arg, &cpl); 403 404 nvme_free_request(req); 405 } 406 407 void 408 nvme_qpair_process_completions(struct nvme_qpair *qpair) 409 { 410 struct nvme_tracker *tr; 411 struct nvme_completion *cpl; 412 413 qpair->num_intr_handler_calls++; 414 415 if (!qpair->is_enabled) 416 /* 417 * qpair is not enabled, likely because a controller reset is 418 * is in progress. Ignore the interrupt - any I/O that was 419 * associated with this interrupt will get retried when the 420 * reset is complete. 421 */ 422 return; 423 424 while (1) { 425 cpl = &qpair->cpl[qpair->cq_head]; 426 427 if (cpl->status.p != qpair->phase) 428 break; 429 430 tr = qpair->act_tr[cpl->cid]; 431 432 if (tr != NULL) { 433 nvme_qpair_complete_tracker(qpair, tr, cpl, TRUE); 434 qpair->sq_head = cpl->sqhd; 435 } else { 436 nvme_printf(qpair->ctrlr, 437 "cpl does not map to outstanding cmd\n"); 438 nvme_dump_completion(cpl); 439 KASSERT(0, ("received completion for unknown cmd\n")); 440 } 441 442 if (++qpair->cq_head == qpair->num_entries) { 443 qpair->cq_head = 0; 444 qpair->phase = !qpair->phase; 445 } 446 447 nvme_mmio_write_4(qpair->ctrlr, doorbell[qpair->id].cq_hdbl, 448 qpair->cq_head); 449 } 450 } 451 452 static void 453 nvme_qpair_msix_handler(void *arg) 454 { 455 struct nvme_qpair *qpair = arg; 456 457 nvme_qpair_process_completions(qpair); 458 } 459 460 void 461 nvme_qpair_construct(struct nvme_qpair *qpair, uint32_t id, 462 uint16_t vector, uint32_t num_entries, uint32_t num_trackers, 463 struct nvme_controller *ctrlr) 464 { 465 struct nvme_tracker *tr; 466 uint32_t i; 467 int err; 468 469 qpair->id = id; 470 qpair->vector = vector; 471 qpair->num_entries = num_entries; 472 qpair->num_trackers = num_trackers; 473 qpair->ctrlr = ctrlr; 474 475 if (ctrlr->msix_enabled) { 476 477 /* 478 * MSI-X vector resource IDs start at 1, so we add one to 479 * the queue's vector to get the corresponding rid to use. 480 */ 481 qpair->rid = vector + 1; 482 483 qpair->res = bus_alloc_resource_any(ctrlr->dev, SYS_RES_IRQ, 484 &qpair->rid, RF_ACTIVE); 485 bus_setup_intr(ctrlr->dev, qpair->res, 486 INTR_TYPE_MISC | INTR_MPSAFE, NULL, 487 nvme_qpair_msix_handler, qpair, &qpair->tag); 488 } 489 490 mtx_init(&qpair->lock, "nvme qpair lock", NULL, MTX_DEF); 491 492 /* Note: NVMe PRP format is restricted to 4-byte alignment. */ 493 err = bus_dma_tag_create(bus_get_dma_tag(ctrlr->dev), 494 4, PAGE_SIZE, BUS_SPACE_MAXADDR, 495 BUS_SPACE_MAXADDR, NULL, NULL, NVME_MAX_XFER_SIZE, 496 (NVME_MAX_XFER_SIZE/PAGE_SIZE)+1, PAGE_SIZE, 0, 497 NULL, NULL, &qpair->dma_tag_payload); 498 if (err != 0) 499 nvme_printf(ctrlr, "payload tag create failed %d\n", err); 500 501 err = bus_dma_tag_create(bus_get_dma_tag(ctrlr->dev), 502 4, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, 503 BUS_SPACE_MAXSIZE, 1, BUS_SPACE_MAXSIZE, 0, 504 NULL, NULL, &qpair->dma_tag); 505 if (err != 0) 506 nvme_printf(ctrlr, "tag create failed %d\n", err); 507 508 qpair->num_cmds = 0; 509 qpair->num_intr_handler_calls = 0; 510 511 qpair->cmd = contigmalloc(qpair->num_entries * 512 sizeof(struct nvme_command), M_NVME, M_ZERO, 513 0, BUS_SPACE_MAXADDR, PAGE_SIZE, 0); 514 qpair->cpl = contigmalloc(qpair->num_entries * 515 sizeof(struct nvme_completion), M_NVME, M_ZERO, 516 0, BUS_SPACE_MAXADDR, PAGE_SIZE, 0); 517 518 err = bus_dmamap_create(qpair->dma_tag, 0, &qpair->cmd_dma_map); 519 if (err != 0) 520 nvme_printf(ctrlr, "cmd_dma_map create failed %d\n", err); 521 522 err = bus_dmamap_create(qpair->dma_tag, 0, &qpair->cpl_dma_map); 523 if (err != 0) 524 nvme_printf(ctrlr, "cpl_dma_map create failed %d\n", err); 525 526 bus_dmamap_load(qpair->dma_tag, qpair->cmd_dma_map, 527 qpair->cmd, qpair->num_entries * sizeof(struct nvme_command), 528 nvme_single_map, &qpair->cmd_bus_addr, 0); 529 bus_dmamap_load(qpair->dma_tag, qpair->cpl_dma_map, 530 qpair->cpl, qpair->num_entries * sizeof(struct nvme_completion), 531 nvme_single_map, &qpair->cpl_bus_addr, 0); 532 533 qpair->sq_tdbl_off = nvme_mmio_offsetof(doorbell[id].sq_tdbl); 534 qpair->cq_hdbl_off = nvme_mmio_offsetof(doorbell[id].cq_hdbl); 535 536 TAILQ_INIT(&qpair->free_tr); 537 TAILQ_INIT(&qpair->outstanding_tr); 538 STAILQ_INIT(&qpair->queued_req); 539 540 for (i = 0; i < qpair->num_trackers; i++) { 541 tr = malloc(sizeof(*tr), M_NVME, M_ZERO | M_WAITOK); 542 nvme_qpair_construct_tracker(qpair, tr, i); 543 TAILQ_INSERT_HEAD(&qpair->free_tr, tr, tailq); 544 } 545 546 qpair->act_tr = malloc(sizeof(struct nvme_tracker *) * qpair->num_entries, 547 M_NVME, M_ZERO | M_WAITOK); 548 } 549 550 static void 551 nvme_qpair_destroy(struct nvme_qpair *qpair) 552 { 553 struct nvme_tracker *tr; 554 555 if (qpair->tag) 556 bus_teardown_intr(qpair->ctrlr->dev, qpair->res, qpair->tag); 557 558 if (qpair->res) 559 bus_release_resource(qpair->ctrlr->dev, SYS_RES_IRQ, 560 rman_get_rid(qpair->res), qpair->res); 561 562 if (qpair->cmd) { 563 bus_dmamap_unload(qpair->dma_tag, qpair->cmd_dma_map); 564 bus_dmamap_destroy(qpair->dma_tag, qpair->cmd_dma_map); 565 contigfree(qpair->cmd, 566 qpair->num_entries * sizeof(struct nvme_command), M_NVME); 567 } 568 569 if (qpair->cpl) { 570 bus_dmamap_unload(qpair->dma_tag, qpair->cpl_dma_map); 571 bus_dmamap_destroy(qpair->dma_tag, qpair->cpl_dma_map); 572 contigfree(qpair->cpl, 573 qpair->num_entries * sizeof(struct nvme_completion), 574 M_NVME); 575 } 576 577 if (qpair->dma_tag) 578 bus_dma_tag_destroy(qpair->dma_tag); 579 580 if (qpair->dma_tag_payload) 581 bus_dma_tag_destroy(qpair->dma_tag_payload); 582 583 if (qpair->act_tr) 584 free(qpair->act_tr, M_NVME); 585 586 while (!TAILQ_EMPTY(&qpair->free_tr)) { 587 tr = TAILQ_FIRST(&qpair->free_tr); 588 TAILQ_REMOVE(&qpair->free_tr, tr, tailq); 589 bus_dmamap_destroy(qpair->dma_tag, tr->payload_dma_map); 590 bus_dmamap_destroy(qpair->dma_tag, tr->prp_dma_map); 591 free(tr, M_NVME); 592 } 593 } 594 595 static void 596 nvme_admin_qpair_abort_aers(struct nvme_qpair *qpair) 597 { 598 struct nvme_tracker *tr; 599 600 tr = TAILQ_FIRST(&qpair->outstanding_tr); 601 while (tr != NULL) { 602 if (tr->req->cmd.opc == NVME_OPC_ASYNC_EVENT_REQUEST) { 603 nvme_qpair_manual_complete_tracker(qpair, tr, 604 NVME_SCT_GENERIC, NVME_SC_ABORTED_SQ_DELETION, 0, 605 FALSE); 606 tr = TAILQ_FIRST(&qpair->outstanding_tr); 607 } else { 608 tr = TAILQ_NEXT(tr, tailq); 609 } 610 } 611 } 612 613 void 614 nvme_admin_qpair_destroy(struct nvme_qpair *qpair) 615 { 616 617 nvme_admin_qpair_abort_aers(qpair); 618 nvme_qpair_destroy(qpair); 619 } 620 621 void 622 nvme_io_qpair_destroy(struct nvme_qpair *qpair) 623 { 624 625 nvme_qpair_destroy(qpair); 626 } 627 628 static void 629 nvme_abort_complete(void *arg, const struct nvme_completion *status) 630 { 631 struct nvme_tracker *tr = arg; 632 633 /* 634 * If cdw0 == 1, the controller was not able to abort the command 635 * we requested. We still need to check the active tracker array, 636 * to cover race where I/O timed out at same time controller was 637 * completing the I/O. 638 */ 639 if (status->cdw0 == 1 && tr->qpair->act_tr[tr->cid] != NULL) { 640 /* 641 * An I/O has timed out, and the controller was unable to 642 * abort it for some reason. Construct a fake completion 643 * status, and then complete the I/O's tracker manually. 644 */ 645 nvme_printf(tr->qpair->ctrlr, 646 "abort command failed, aborting command manually\n"); 647 nvme_qpair_manual_complete_tracker(tr->qpair, tr, 648 NVME_SCT_GENERIC, NVME_SC_ABORTED_BY_REQUEST, 0, TRUE); 649 } 650 } 651 652 static void 653 nvme_timeout(void *arg) 654 { 655 struct nvme_tracker *tr = arg; 656 struct nvme_qpair *qpair = tr->qpair; 657 struct nvme_controller *ctrlr = qpair->ctrlr; 658 union csts_register csts; 659 660 /* Read csts to get value of cfs - controller fatal status. */ 661 csts.raw = nvme_mmio_read_4(ctrlr, csts); 662 663 if (ctrlr->enable_aborts && csts.bits.cfs == 0) { 664 /* 665 * If aborts are enabled, only use them if the controller is 666 * not reporting fatal status. 667 */ 668 nvme_ctrlr_cmd_abort(ctrlr, tr->cid, qpair->id, 669 nvme_abort_complete, tr); 670 } else 671 nvme_ctrlr_reset(ctrlr); 672 } 673 674 void 675 nvme_qpair_submit_tracker(struct nvme_qpair *qpair, struct nvme_tracker *tr) 676 { 677 struct nvme_request *req; 678 struct nvme_controller *ctrlr; 679 680 mtx_assert(&qpair->lock, MA_OWNED); 681 682 req = tr->req; 683 req->cmd.cid = tr->cid; 684 qpair->act_tr[tr->cid] = tr; 685 ctrlr = qpair->ctrlr; 686 687 if (req->timeout) 688 #if __FreeBSD_version >= 800030 689 callout_reset_curcpu(&tr->timer, ctrlr->timeout_period * hz, 690 nvme_timeout, tr); 691 #else 692 callout_reset(&tr->timer, ctrlr->timeout_period * hz, 693 nvme_timeout, tr); 694 #endif 695 696 /* Copy the command from the tracker to the submission queue. */ 697 memcpy(&qpair->cmd[qpair->sq_tail], &req->cmd, sizeof(req->cmd)); 698 699 if (++qpair->sq_tail == qpair->num_entries) 700 qpair->sq_tail = 0; 701 702 wmb(); 703 nvme_mmio_write_4(qpair->ctrlr, doorbell[qpair->id].sq_tdbl, 704 qpair->sq_tail); 705 706 qpair->num_cmds++; 707 } 708 709 static void 710 nvme_payload_map(void *arg, bus_dma_segment_t *seg, int nseg, int error) 711 { 712 struct nvme_tracker *tr = arg; 713 uint32_t cur_nseg; 714 715 /* 716 * If the mapping operation failed, return immediately. The caller 717 * is responsible for detecting the error status and failing the 718 * tracker manually. 719 */ 720 if (error != 0) { 721 nvme_printf(tr->qpair->ctrlr, 722 "nvme_payload_map err %d\n", error); 723 return; 724 } 725 726 /* 727 * Note that we specified PAGE_SIZE for alignment and max 728 * segment size when creating the bus dma tags. So here 729 * we can safely just transfer each segment to its 730 * associated PRP entry. 731 */ 732 tr->req->cmd.prp1 = seg[0].ds_addr; 733 734 if (nseg == 2) { 735 tr->req->cmd.prp2 = seg[1].ds_addr; 736 } else if (nseg > 2) { 737 cur_nseg = 1; 738 tr->req->cmd.prp2 = (uint64_t)tr->prp_bus_addr; 739 while (cur_nseg < nseg) { 740 tr->prp[cur_nseg-1] = 741 (uint64_t)seg[cur_nseg].ds_addr; 742 cur_nseg++; 743 } 744 } else { 745 /* 746 * prp2 should not be used by the controller 747 * since there is only one segment, but set 748 * to 0 just to be safe. 749 */ 750 tr->req->cmd.prp2 = 0; 751 } 752 753 nvme_qpair_submit_tracker(tr->qpair, tr); 754 } 755 756 static void 757 _nvme_qpair_submit_request(struct nvme_qpair *qpair, struct nvme_request *req) 758 { 759 struct nvme_tracker *tr; 760 int err = 0; 761 762 mtx_assert(&qpair->lock, MA_OWNED); 763 764 tr = TAILQ_FIRST(&qpair->free_tr); 765 req->qpair = qpair; 766 767 if (tr == NULL || !qpair->is_enabled) { 768 /* 769 * No tracker is available, or the qpair is disabled due to 770 * an in-progress controller-level reset or controller 771 * failure. 772 */ 773 774 if (qpair->ctrlr->is_failed) { 775 /* 776 * The controller has failed. Post the request to a 777 * task where it will be aborted, so that we do not 778 * invoke the request's callback in the context 779 * of the submission. 780 */ 781 nvme_ctrlr_post_failed_request(qpair->ctrlr, req); 782 } else { 783 /* 784 * Put the request on the qpair's request queue to be 785 * processed when a tracker frees up via a command 786 * completion or when the controller reset is 787 * completed. 788 */ 789 STAILQ_INSERT_TAIL(&qpair->queued_req, req, stailq); 790 } 791 return; 792 } 793 794 TAILQ_REMOVE(&qpair->free_tr, tr, tailq); 795 TAILQ_INSERT_TAIL(&qpair->outstanding_tr, tr, tailq); 796 tr->req = req; 797 798 switch (req->type) { 799 case NVME_REQUEST_VADDR: 800 KASSERT(req->payload_size <= qpair->ctrlr->max_xfer_size, 801 ("payload_size (%d) exceeds max_xfer_size (%d)\n", 802 req->payload_size, qpair->ctrlr->max_xfer_size)); 803 err = bus_dmamap_load(tr->qpair->dma_tag_payload, 804 tr->payload_dma_map, req->u.payload, req->payload_size, 805 nvme_payload_map, tr, 0); 806 if (err != 0) 807 nvme_printf(qpair->ctrlr, 808 "bus_dmamap_load returned 0x%x!\n", err); 809 break; 810 case NVME_REQUEST_NULL: 811 nvme_qpair_submit_tracker(tr->qpair, tr); 812 break; 813 #ifdef NVME_UNMAPPED_BIO_SUPPORT 814 case NVME_REQUEST_BIO: 815 KASSERT(req->u.bio->bio_bcount <= qpair->ctrlr->max_xfer_size, 816 ("bio->bio_bcount (%jd) exceeds max_xfer_size (%d)\n", 817 (intmax_t)req->u.bio->bio_bcount, 818 qpair->ctrlr->max_xfer_size)); 819 err = bus_dmamap_load_bio(tr->qpair->dma_tag_payload, 820 tr->payload_dma_map, req->u.bio, nvme_payload_map, tr, 0); 821 if (err != 0) 822 nvme_printf(qpair->ctrlr, 823 "bus_dmamap_load_bio returned 0x%x!\n", err); 824 break; 825 #endif 826 default: 827 panic("unknown nvme request type 0x%x\n", req->type); 828 break; 829 } 830 831 if (err != 0) { 832 /* 833 * The dmamap operation failed, so we manually fail the 834 * tracker here with DATA_TRANSFER_ERROR status. 835 * 836 * nvme_qpair_manual_complete_tracker must not be called 837 * with the qpair lock held. 838 */ 839 mtx_unlock(&qpair->lock); 840 nvme_qpair_manual_complete_tracker(qpair, tr, NVME_SCT_GENERIC, 841 NVME_SC_DATA_TRANSFER_ERROR, 1 /* do not retry */, TRUE); 842 mtx_lock(&qpair->lock); 843 } 844 } 845 846 void 847 nvme_qpair_submit_request(struct nvme_qpair *qpair, struct nvme_request *req) 848 { 849 850 mtx_lock(&qpair->lock); 851 _nvme_qpair_submit_request(qpair, req); 852 mtx_unlock(&qpair->lock); 853 } 854 855 static void 856 nvme_qpair_enable(struct nvme_qpair *qpair) 857 { 858 859 qpair->is_enabled = TRUE; 860 } 861 862 void 863 nvme_qpair_reset(struct nvme_qpair *qpair) 864 { 865 866 qpair->sq_head = qpair->sq_tail = qpair->cq_head = 0; 867 868 /* 869 * First time through the completion queue, HW will set phase 870 * bit on completions to 1. So set this to 1 here, indicating 871 * we're looking for a 1 to know which entries have completed. 872 * we'll toggle the bit each time when the completion queue 873 * rolls over. 874 */ 875 qpair->phase = 1; 876 877 memset(qpair->cmd, 0, 878 qpair->num_entries * sizeof(struct nvme_command)); 879 memset(qpair->cpl, 0, 880 qpair->num_entries * sizeof(struct nvme_completion)); 881 } 882 883 void 884 nvme_admin_qpair_enable(struct nvme_qpair *qpair) 885 { 886 struct nvme_tracker *tr; 887 struct nvme_tracker *tr_temp; 888 889 /* 890 * Manually abort each outstanding admin command. Do not retry 891 * admin commands found here, since they will be left over from 892 * a controller reset and its likely the context in which the 893 * command was issued no longer applies. 894 */ 895 TAILQ_FOREACH_SAFE(tr, &qpair->outstanding_tr, tailq, tr_temp) { 896 nvme_printf(qpair->ctrlr, 897 "aborting outstanding admin command\n"); 898 nvme_qpair_manual_complete_tracker(qpair, tr, NVME_SCT_GENERIC, 899 NVME_SC_ABORTED_BY_REQUEST, 1 /* do not retry */, TRUE); 900 } 901 902 nvme_qpair_enable(qpair); 903 } 904 905 void 906 nvme_io_qpair_enable(struct nvme_qpair *qpair) 907 { 908 STAILQ_HEAD(, nvme_request) temp; 909 struct nvme_tracker *tr; 910 struct nvme_tracker *tr_temp; 911 struct nvme_request *req; 912 913 /* 914 * Manually abort each outstanding I/O. This normally results in a 915 * retry, unless the retry count on the associated request has 916 * reached its limit. 917 */ 918 TAILQ_FOREACH_SAFE(tr, &qpair->outstanding_tr, tailq, tr_temp) { 919 nvme_printf(qpair->ctrlr, "aborting outstanding i/o\n"); 920 nvme_qpair_manual_complete_tracker(qpair, tr, NVME_SCT_GENERIC, 921 NVME_SC_ABORTED_BY_REQUEST, 0, TRUE); 922 } 923 924 mtx_lock(&qpair->lock); 925 926 nvme_qpair_enable(qpair); 927 928 STAILQ_INIT(&temp); 929 STAILQ_SWAP(&qpair->queued_req, &temp, nvme_request); 930 931 while (!STAILQ_EMPTY(&temp)) { 932 req = STAILQ_FIRST(&temp); 933 STAILQ_REMOVE_HEAD(&temp, stailq); 934 nvme_printf(qpair->ctrlr, "resubmitting queued i/o\n"); 935 nvme_qpair_print_command(qpair, &req->cmd); 936 _nvme_qpair_submit_request(qpair, req); 937 } 938 939 mtx_unlock(&qpair->lock); 940 } 941 942 static void 943 nvme_qpair_disable(struct nvme_qpair *qpair) 944 { 945 struct nvme_tracker *tr; 946 947 qpair->is_enabled = FALSE; 948 mtx_lock(&qpair->lock); 949 TAILQ_FOREACH(tr, &qpair->outstanding_tr, tailq) 950 callout_stop(&tr->timer); 951 mtx_unlock(&qpair->lock); 952 } 953 954 void 955 nvme_admin_qpair_disable(struct nvme_qpair *qpair) 956 { 957 958 nvme_qpair_disable(qpair); 959 nvme_admin_qpair_abort_aers(qpair); 960 } 961 962 void 963 nvme_io_qpair_disable(struct nvme_qpair *qpair) 964 { 965 966 nvme_qpair_disable(qpair); 967 } 968 969 void 970 nvme_qpair_fail(struct nvme_qpair *qpair) 971 { 972 struct nvme_tracker *tr; 973 struct nvme_request *req; 974 975 mtx_lock(&qpair->lock); 976 977 while (!STAILQ_EMPTY(&qpair->queued_req)) { 978 req = STAILQ_FIRST(&qpair->queued_req); 979 STAILQ_REMOVE_HEAD(&qpair->queued_req, stailq); 980 nvme_printf(qpair->ctrlr, "failing queued i/o\n"); 981 mtx_unlock(&qpair->lock); 982 nvme_qpair_manual_complete_request(qpair, req, NVME_SCT_GENERIC, 983 NVME_SC_ABORTED_BY_REQUEST, TRUE); 984 mtx_lock(&qpair->lock); 985 } 986 987 /* Manually abort each outstanding I/O. */ 988 while (!TAILQ_EMPTY(&qpair->outstanding_tr)) { 989 tr = TAILQ_FIRST(&qpair->outstanding_tr); 990 /* 991 * Do not remove the tracker. The abort_tracker path will 992 * do that for us. 993 */ 994 nvme_printf(qpair->ctrlr, "failing outstanding i/o\n"); 995 mtx_unlock(&qpair->lock); 996 nvme_qpair_manual_complete_tracker(qpair, tr, NVME_SCT_GENERIC, 997 NVME_SC_ABORTED_BY_REQUEST, 1 /* do not retry */, TRUE); 998 mtx_lock(&qpair->lock); 999 } 1000 1001 mtx_unlock(&qpair->lock); 1002 } 1003 1004