1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (C) 2012-2014 Intel Corporation 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 29 #include <sys/cdefs.h> 30 __FBSDID("$FreeBSD$"); 31 32 #include <sys/param.h> 33 #include <sys/bus.h> 34 35 #include <dev/pci/pcivar.h> 36 37 #include "nvme_private.h" 38 39 static void _nvme_qpair_submit_request(struct nvme_qpair *qpair, 40 struct nvme_request *req); 41 static void nvme_qpair_destroy(struct nvme_qpair *qpair); 42 43 struct nvme_opcode_string { 44 45 uint16_t opc; 46 const char * str; 47 }; 48 49 static struct nvme_opcode_string admin_opcode[] = { 50 { NVME_OPC_DELETE_IO_SQ, "DELETE IO SQ" }, 51 { NVME_OPC_CREATE_IO_SQ, "CREATE IO SQ" }, 52 { NVME_OPC_GET_LOG_PAGE, "GET LOG PAGE" }, 53 { NVME_OPC_DELETE_IO_CQ, "DELETE IO CQ" }, 54 { NVME_OPC_CREATE_IO_CQ, "CREATE IO CQ" }, 55 { NVME_OPC_IDENTIFY, "IDENTIFY" }, 56 { NVME_OPC_ABORT, "ABORT" }, 57 { NVME_OPC_SET_FEATURES, "SET FEATURES" }, 58 { NVME_OPC_GET_FEATURES, "GET FEATURES" }, 59 { NVME_OPC_ASYNC_EVENT_REQUEST, "ASYNC EVENT REQUEST" }, 60 { NVME_OPC_FIRMWARE_ACTIVATE, "FIRMWARE ACTIVATE" }, 61 { NVME_OPC_FIRMWARE_IMAGE_DOWNLOAD, "FIRMWARE IMAGE DOWNLOAD" }, 62 { NVME_OPC_DEVICE_SELF_TEST, "DEVICE SELF-TEST" }, 63 { NVME_OPC_NAMESPACE_ATTACHMENT, "NAMESPACE ATTACHMENT" }, 64 { NVME_OPC_KEEP_ALIVE, "KEEP ALIVE" }, 65 { NVME_OPC_DIRECTIVE_SEND, "DIRECTIVE SEND" }, 66 { NVME_OPC_DIRECTIVE_RECEIVE, "DIRECTIVE RECEIVE" }, 67 { NVME_OPC_VIRTUALIZATION_MANAGEMENT, "VIRTUALIZATION MANAGEMENT" }, 68 { NVME_OPC_NVME_MI_SEND, "NVME-MI SEND" }, 69 { NVME_OPC_NVME_MI_RECEIVE, "NVME-MI RECEIVE" }, 70 { NVME_OPC_DOORBELL_BUFFER_CONFIG, "DOORBELL BUFFER CONFIG" }, 71 { NVME_OPC_FORMAT_NVM, "FORMAT NVM" }, 72 { NVME_OPC_SECURITY_SEND, "SECURITY SEND" }, 73 { NVME_OPC_SECURITY_RECEIVE, "SECURITY RECEIVE" }, 74 { NVME_OPC_SANITIZE, "SANITIZE" }, 75 { 0xFFFF, "ADMIN COMMAND" } 76 }; 77 78 static struct nvme_opcode_string io_opcode[] = { 79 { NVME_OPC_FLUSH, "FLUSH" }, 80 { NVME_OPC_WRITE, "WRITE" }, 81 { NVME_OPC_READ, "READ" }, 82 { NVME_OPC_WRITE_UNCORRECTABLE, "WRITE UNCORRECTABLE" }, 83 { NVME_OPC_COMPARE, "COMPARE" }, 84 { NVME_OPC_WRITE_ZEROES, "WRITE ZEROES" }, 85 { NVME_OPC_DATASET_MANAGEMENT, "DATASET MANAGEMENT" }, 86 { NVME_OPC_RESERVATION_REGISTER, "RESERVATION REGISTER" }, 87 { NVME_OPC_RESERVATION_REPORT, "RESERVATION REPORT" }, 88 { NVME_OPC_RESERVATION_ACQUIRE, "RESERVATION ACQUIRE" }, 89 { NVME_OPC_RESERVATION_RELEASE, "RESERVATION RELEASE" }, 90 { 0xFFFF, "IO COMMAND" } 91 }; 92 93 static const char * 94 get_admin_opcode_string(uint16_t opc) 95 { 96 struct nvme_opcode_string *entry; 97 98 entry = admin_opcode; 99 100 while (entry->opc != 0xFFFF) { 101 if (entry->opc == opc) 102 return (entry->str); 103 entry++; 104 } 105 return (entry->str); 106 } 107 108 static const char * 109 get_io_opcode_string(uint16_t opc) 110 { 111 struct nvme_opcode_string *entry; 112 113 entry = io_opcode; 114 115 while (entry->opc != 0xFFFF) { 116 if (entry->opc == opc) 117 return (entry->str); 118 entry++; 119 } 120 return (entry->str); 121 } 122 123 124 static void 125 nvme_admin_qpair_print_command(struct nvme_qpair *qpair, 126 struct nvme_command *cmd) 127 { 128 129 nvme_printf(qpair->ctrlr, "%s (%02x) sqid:%d cid:%d nsid:%x " 130 "cdw10:%08x cdw11:%08x\n", 131 get_admin_opcode_string(cmd->opc), cmd->opc, qpair->id, cmd->cid, 132 le32toh(cmd->nsid), le32toh(cmd->cdw10), le32toh(cmd->cdw11)); 133 } 134 135 static void 136 nvme_io_qpair_print_command(struct nvme_qpair *qpair, 137 struct nvme_command *cmd) 138 { 139 140 switch (cmd->opc) { 141 case NVME_OPC_WRITE: 142 case NVME_OPC_READ: 143 case NVME_OPC_WRITE_UNCORRECTABLE: 144 case NVME_OPC_COMPARE: 145 case NVME_OPC_WRITE_ZEROES: 146 nvme_printf(qpair->ctrlr, "%s sqid:%d cid:%d nsid:%d " 147 "lba:%llu len:%d\n", 148 get_io_opcode_string(cmd->opc), qpair->id, cmd->cid, le32toh(cmd->nsid), 149 ((unsigned long long)le32toh(cmd->cdw11) << 32) + le32toh(cmd->cdw10), 150 (le32toh(cmd->cdw12) & 0xFFFF) + 1); 151 break; 152 case NVME_OPC_FLUSH: 153 case NVME_OPC_DATASET_MANAGEMENT: 154 case NVME_OPC_RESERVATION_REGISTER: 155 case NVME_OPC_RESERVATION_REPORT: 156 case NVME_OPC_RESERVATION_ACQUIRE: 157 case NVME_OPC_RESERVATION_RELEASE: 158 nvme_printf(qpair->ctrlr, "%s sqid:%d cid:%d nsid:%d\n", 159 get_io_opcode_string(cmd->opc), qpair->id, cmd->cid, le32toh(cmd->nsid)); 160 break; 161 default: 162 nvme_printf(qpair->ctrlr, "%s (%02x) sqid:%d cid:%d nsid:%d\n", 163 get_io_opcode_string(cmd->opc), cmd->opc, qpair->id, 164 cmd->cid, le32toh(cmd->nsid)); 165 break; 166 } 167 } 168 169 static void 170 nvme_qpair_print_command(struct nvme_qpair *qpair, struct nvme_command *cmd) 171 { 172 if (qpair->id == 0) 173 nvme_admin_qpair_print_command(qpair, cmd); 174 else 175 nvme_io_qpair_print_command(qpair, cmd); 176 } 177 178 struct nvme_status_string { 179 180 uint16_t sc; 181 const char * str; 182 }; 183 184 static struct nvme_status_string generic_status[] = { 185 { NVME_SC_SUCCESS, "SUCCESS" }, 186 { NVME_SC_INVALID_OPCODE, "INVALID OPCODE" }, 187 { NVME_SC_INVALID_FIELD, "INVALID_FIELD" }, 188 { NVME_SC_COMMAND_ID_CONFLICT, "COMMAND ID CONFLICT" }, 189 { NVME_SC_DATA_TRANSFER_ERROR, "DATA TRANSFER ERROR" }, 190 { NVME_SC_ABORTED_POWER_LOSS, "ABORTED - POWER LOSS" }, 191 { NVME_SC_INTERNAL_DEVICE_ERROR, "INTERNAL DEVICE ERROR" }, 192 { NVME_SC_ABORTED_BY_REQUEST, "ABORTED - BY REQUEST" }, 193 { NVME_SC_ABORTED_SQ_DELETION, "ABORTED - SQ DELETION" }, 194 { NVME_SC_ABORTED_FAILED_FUSED, "ABORTED - FAILED FUSED" }, 195 { NVME_SC_ABORTED_MISSING_FUSED, "ABORTED - MISSING FUSED" }, 196 { NVME_SC_INVALID_NAMESPACE_OR_FORMAT, "INVALID NAMESPACE OR FORMAT" }, 197 { NVME_SC_COMMAND_SEQUENCE_ERROR, "COMMAND SEQUENCE ERROR" }, 198 { NVME_SC_INVALID_SGL_SEGMENT_DESCR, "INVALID SGL SEGMENT DESCRIPTOR" }, 199 { NVME_SC_INVALID_NUMBER_OF_SGL_DESCR, "INVALID NUMBER OF SGL DESCRIPTORS" }, 200 { NVME_SC_DATA_SGL_LENGTH_INVALID, "DATA SGL LENGTH INVALID" }, 201 { NVME_SC_METADATA_SGL_LENGTH_INVALID, "METADATA SGL LENGTH INVALID" }, 202 { NVME_SC_SGL_DESCRIPTOR_TYPE_INVALID, "SGL DESCRIPTOR TYPE INVALID" }, 203 { NVME_SC_INVALID_USE_OF_CMB, "INVALID USE OF CONTROLLER MEMORY BUFFER" }, 204 { NVME_SC_PRP_OFFET_INVALID, "PRP OFFET INVALID" }, 205 { NVME_SC_ATOMIC_WRITE_UNIT_EXCEEDED, "ATOMIC WRITE UNIT EXCEEDED" }, 206 { NVME_SC_OPERATION_DENIED, "OPERATION DENIED" }, 207 { NVME_SC_SGL_OFFSET_INVALID, "SGL OFFSET INVALID" }, 208 { NVME_SC_HOST_ID_INCONSISTENT_FORMAT, "HOST IDENTIFIER INCONSISTENT FORMAT" }, 209 { NVME_SC_KEEP_ALIVE_TIMEOUT_EXPIRED, "KEEP ALIVE TIMEOUT EXPIRED" }, 210 { NVME_SC_KEEP_ALIVE_TIMEOUT_INVALID, "KEEP ALIVE TIMEOUT INVALID" }, 211 { NVME_SC_ABORTED_DUE_TO_PREEMPT, "COMMAND ABORTED DUE TO PREEMPT AND ABORT" }, 212 { NVME_SC_SANITIZE_FAILED, "SANITIZE FAILED" }, 213 { NVME_SC_SANITIZE_IN_PROGRESS, "SANITIZE IN PROGRESS" }, 214 { NVME_SC_SGL_DATA_BLOCK_GRAN_INVALID, "SGL_DATA_BLOCK_GRANULARITY_INVALID" }, 215 { NVME_SC_NOT_SUPPORTED_IN_CMB, "COMMAND NOT SUPPORTED FOR QUEUE IN CMB" }, 216 217 { NVME_SC_LBA_OUT_OF_RANGE, "LBA OUT OF RANGE" }, 218 { NVME_SC_CAPACITY_EXCEEDED, "CAPACITY EXCEEDED" }, 219 { NVME_SC_NAMESPACE_NOT_READY, "NAMESPACE NOT READY" }, 220 { NVME_SC_RESERVATION_CONFLICT, "RESERVATION CONFLICT" }, 221 { NVME_SC_FORMAT_IN_PROGRESS, "FORMAT IN PROGRESS" }, 222 { 0xFFFF, "GENERIC" } 223 }; 224 225 static struct nvme_status_string command_specific_status[] = { 226 { NVME_SC_COMPLETION_QUEUE_INVALID, "INVALID COMPLETION QUEUE" }, 227 { NVME_SC_INVALID_QUEUE_IDENTIFIER, "INVALID QUEUE IDENTIFIER" }, 228 { NVME_SC_MAXIMUM_QUEUE_SIZE_EXCEEDED, "MAX QUEUE SIZE EXCEEDED" }, 229 { NVME_SC_ABORT_COMMAND_LIMIT_EXCEEDED, "ABORT CMD LIMIT EXCEEDED" }, 230 { NVME_SC_ASYNC_EVENT_REQUEST_LIMIT_EXCEEDED, "ASYNC LIMIT EXCEEDED" }, 231 { NVME_SC_INVALID_FIRMWARE_SLOT, "INVALID FIRMWARE SLOT" }, 232 { NVME_SC_INVALID_FIRMWARE_IMAGE, "INVALID FIRMWARE IMAGE" }, 233 { NVME_SC_INVALID_INTERRUPT_VECTOR, "INVALID INTERRUPT VECTOR" }, 234 { NVME_SC_INVALID_LOG_PAGE, "INVALID LOG PAGE" }, 235 { NVME_SC_INVALID_FORMAT, "INVALID FORMAT" }, 236 { NVME_SC_FIRMWARE_REQUIRES_RESET, "FIRMWARE REQUIRES RESET" }, 237 { NVME_SC_INVALID_QUEUE_DELETION, "INVALID QUEUE DELETION" }, 238 { NVME_SC_FEATURE_NOT_SAVEABLE, "FEATURE IDENTIFIER NOT SAVEABLE" }, 239 { NVME_SC_FEATURE_NOT_CHANGEABLE, "FEATURE NOT CHANGEABLE" }, 240 { NVME_SC_FEATURE_NOT_NS_SPECIFIC, "FEATURE NOT NAMESPACE SPECIFIC" }, 241 { NVME_SC_FW_ACT_REQUIRES_NVMS_RESET, "FIRMWARE ACTIVATION REQUIRES NVM SUBSYSTEM RESET" }, 242 { NVME_SC_FW_ACT_REQUIRES_RESET, "FIRMWARE ACTIVATION REQUIRES RESET" }, 243 { NVME_SC_FW_ACT_REQUIRES_TIME, "FIRMWARE ACTIVATION REQUIRES MAXIMUM TIME VIOLATION" }, 244 { NVME_SC_FW_ACT_PROHIBITED, "FIRMWARE ACTIVATION PROHIBITED" }, 245 { NVME_SC_OVERLAPPING_RANGE, "OVERLAPPING RANGE" }, 246 { NVME_SC_NS_INSUFFICIENT_CAPACITY, "NAMESPACE INSUFFICIENT CAPACITY" }, 247 { NVME_SC_NS_ID_UNAVAILABLE, "NAMESPACE IDENTIFIER UNAVAILABLE" }, 248 { NVME_SC_NS_ALREADY_ATTACHED, "NAMESPACE ALREADY ATTACHED" }, 249 { NVME_SC_NS_IS_PRIVATE, "NAMESPACE IS PRIVATE" }, 250 { NVME_SC_NS_NOT_ATTACHED, "NS NOT ATTACHED" }, 251 { NVME_SC_THIN_PROV_NOT_SUPPORTED, "THIN PROVISIONING NOT SUPPORTED" }, 252 { NVME_SC_CTRLR_LIST_INVALID, "CONTROLLER LIST INVALID" }, 253 { NVME_SC_SELT_TEST_IN_PROGRESS, "DEVICE SELT-TEST IN PROGRESS" }, 254 { NVME_SC_BOOT_PART_WRITE_PROHIB, "BOOT PARTITION WRITE PROHIBITED" }, 255 { NVME_SC_INVALID_CTRLR_ID, "INVALID CONTROLLER IDENTIFIER" }, 256 { NVME_SC_INVALID_SEC_CTRLR_STATE, "INVALID SECONDARY CONTROLLER STATE" }, 257 { NVME_SC_INVALID_NUM_OF_CTRLR_RESRC, "INVALID NUMBER OF CONTROLLER RESOURCES" }, 258 { NVME_SC_INVALID_RESOURCE_ID, "INVALID RESOURCE IDENTIFIER" }, 259 260 { NVME_SC_CONFLICTING_ATTRIBUTES, "CONFLICTING ATTRIBUTES" }, 261 { NVME_SC_INVALID_PROTECTION_INFO, "INVALID PROTECTION INFO" }, 262 { NVME_SC_ATTEMPTED_WRITE_TO_RO_PAGE, "WRITE TO RO PAGE" }, 263 { 0xFFFF, "COMMAND SPECIFIC" } 264 }; 265 266 static struct nvme_status_string media_error_status[] = { 267 { NVME_SC_WRITE_FAULTS, "WRITE FAULTS" }, 268 { NVME_SC_UNRECOVERED_READ_ERROR, "UNRECOVERED READ ERROR" }, 269 { NVME_SC_GUARD_CHECK_ERROR, "GUARD CHECK ERROR" }, 270 { NVME_SC_APPLICATION_TAG_CHECK_ERROR, "APPLICATION TAG CHECK ERROR" }, 271 { NVME_SC_REFERENCE_TAG_CHECK_ERROR, "REFERENCE TAG CHECK ERROR" }, 272 { NVME_SC_COMPARE_FAILURE, "COMPARE FAILURE" }, 273 { NVME_SC_ACCESS_DENIED, "ACCESS DENIED" }, 274 { NVME_SC_DEALLOCATED_OR_UNWRITTEN, "DEALLOCATED OR UNWRITTEN LOGICAL BLOCK" }, 275 { 0xFFFF, "MEDIA ERROR" } 276 }; 277 278 static const char * 279 get_status_string(uint16_t sct, uint16_t sc) 280 { 281 struct nvme_status_string *entry; 282 283 switch (sct) { 284 case NVME_SCT_GENERIC: 285 entry = generic_status; 286 break; 287 case NVME_SCT_COMMAND_SPECIFIC: 288 entry = command_specific_status; 289 break; 290 case NVME_SCT_MEDIA_ERROR: 291 entry = media_error_status; 292 break; 293 case NVME_SCT_VENDOR_SPECIFIC: 294 return ("VENDOR SPECIFIC"); 295 default: 296 return ("RESERVED"); 297 } 298 299 while (entry->sc != 0xFFFF) { 300 if (entry->sc == sc) 301 return (entry->str); 302 entry++; 303 } 304 return (entry->str); 305 } 306 307 static void 308 nvme_qpair_print_completion(struct nvme_qpair *qpair, 309 struct nvme_completion *cpl) 310 { 311 uint16_t sct, sc; 312 313 sct = NVME_STATUS_GET_SCT(cpl->status); 314 sc = NVME_STATUS_GET_SC(cpl->status); 315 316 nvme_printf(qpair->ctrlr, "%s (%02x/%02x) sqid:%d cid:%d cdw0:%x\n", 317 get_status_string(sct, sc), sct, sc, cpl->sqid, cpl->cid, 318 cpl->cdw0); 319 } 320 321 static boolean_t 322 nvme_completion_is_retry(const struct nvme_completion *cpl) 323 { 324 uint8_t sct, sc, dnr; 325 326 sct = NVME_STATUS_GET_SCT(cpl->status); 327 sc = NVME_STATUS_GET_SC(cpl->status); 328 dnr = NVME_STATUS_GET_DNR(cpl->status); 329 330 /* 331 * TODO: spec is not clear how commands that are aborted due 332 * to TLER will be marked. So for now, it seems 333 * NAMESPACE_NOT_READY is the only case where we should 334 * look at the DNR bit. Requests failed with ABORTED_BY_REQUEST 335 * set the DNR bit correctly since the driver controls that. 336 */ 337 switch (sct) { 338 case NVME_SCT_GENERIC: 339 switch (sc) { 340 case NVME_SC_ABORTED_BY_REQUEST: 341 case NVME_SC_NAMESPACE_NOT_READY: 342 if (dnr) 343 return (0); 344 else 345 return (1); 346 case NVME_SC_INVALID_OPCODE: 347 case NVME_SC_INVALID_FIELD: 348 case NVME_SC_COMMAND_ID_CONFLICT: 349 case NVME_SC_DATA_TRANSFER_ERROR: 350 case NVME_SC_ABORTED_POWER_LOSS: 351 case NVME_SC_INTERNAL_DEVICE_ERROR: 352 case NVME_SC_ABORTED_SQ_DELETION: 353 case NVME_SC_ABORTED_FAILED_FUSED: 354 case NVME_SC_ABORTED_MISSING_FUSED: 355 case NVME_SC_INVALID_NAMESPACE_OR_FORMAT: 356 case NVME_SC_COMMAND_SEQUENCE_ERROR: 357 case NVME_SC_LBA_OUT_OF_RANGE: 358 case NVME_SC_CAPACITY_EXCEEDED: 359 default: 360 return (0); 361 } 362 case NVME_SCT_COMMAND_SPECIFIC: 363 case NVME_SCT_MEDIA_ERROR: 364 case NVME_SCT_VENDOR_SPECIFIC: 365 default: 366 return (0); 367 } 368 } 369 370 static void 371 nvme_qpair_complete_tracker(struct nvme_qpair *qpair, struct nvme_tracker *tr, 372 struct nvme_completion *cpl, boolean_t print_on_error) 373 { 374 struct nvme_request *req; 375 boolean_t retry, error; 376 377 req = tr->req; 378 error = nvme_completion_is_error(cpl); 379 retry = error && nvme_completion_is_retry(cpl) && 380 req->retries < nvme_retry_count; 381 382 if (error && print_on_error) { 383 nvme_qpair_print_command(qpair, &req->cmd); 384 nvme_qpair_print_completion(qpair, cpl); 385 } 386 387 qpair->act_tr[cpl->cid] = NULL; 388 389 KASSERT(cpl->cid == req->cmd.cid, ("cpl cid does not match cmd cid\n")); 390 391 if (req->cb_fn && !retry) 392 req->cb_fn(req->cb_arg, cpl); 393 394 mtx_lock(&qpair->lock); 395 callout_stop(&tr->timer); 396 397 if (retry) { 398 req->retries++; 399 nvme_qpair_submit_tracker(qpair, tr); 400 } else { 401 if (req->type != NVME_REQUEST_NULL) { 402 bus_dmamap_sync(qpair->dma_tag_payload, 403 tr->payload_dma_map, 404 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 405 bus_dmamap_unload(qpair->dma_tag_payload, 406 tr->payload_dma_map); 407 } 408 409 nvme_free_request(req); 410 tr->req = NULL; 411 412 TAILQ_REMOVE(&qpair->outstanding_tr, tr, tailq); 413 TAILQ_INSERT_HEAD(&qpair->free_tr, tr, tailq); 414 415 /* 416 * If the controller is in the middle of resetting, don't 417 * try to submit queued requests here - let the reset logic 418 * handle that instead. 419 */ 420 if (!STAILQ_EMPTY(&qpair->queued_req) && 421 !qpair->ctrlr->is_resetting) { 422 req = STAILQ_FIRST(&qpair->queued_req); 423 STAILQ_REMOVE_HEAD(&qpair->queued_req, stailq); 424 _nvme_qpair_submit_request(qpair, req); 425 } 426 } 427 428 mtx_unlock(&qpair->lock); 429 } 430 431 static void 432 nvme_qpair_manual_complete_tracker(struct nvme_qpair *qpair, 433 struct nvme_tracker *tr, uint32_t sct, uint32_t sc, uint32_t dnr, 434 boolean_t print_on_error) 435 { 436 struct nvme_completion cpl; 437 438 memset(&cpl, 0, sizeof(cpl)); 439 cpl.sqid = qpair->id; 440 cpl.cid = tr->cid; 441 cpl.status |= (sct & NVME_STATUS_SCT_MASK) << NVME_STATUS_SCT_SHIFT; 442 cpl.status |= (sc & NVME_STATUS_SC_MASK) << NVME_STATUS_SC_SHIFT; 443 cpl.status |= (dnr & NVME_STATUS_DNR_MASK) << NVME_STATUS_DNR_SHIFT; 444 nvme_qpair_complete_tracker(qpair, tr, &cpl, print_on_error); 445 } 446 447 void 448 nvme_qpair_manual_complete_request(struct nvme_qpair *qpair, 449 struct nvme_request *req, uint32_t sct, uint32_t sc, 450 boolean_t print_on_error) 451 { 452 struct nvme_completion cpl; 453 boolean_t error; 454 455 memset(&cpl, 0, sizeof(cpl)); 456 cpl.sqid = qpair->id; 457 cpl.status |= (sct & NVME_STATUS_SCT_MASK) << NVME_STATUS_SCT_SHIFT; 458 cpl.status |= (sc & NVME_STATUS_SC_MASK) << NVME_STATUS_SC_SHIFT; 459 460 error = nvme_completion_is_error(&cpl); 461 462 if (error && print_on_error) { 463 nvme_qpair_print_command(qpair, &req->cmd); 464 nvme_qpair_print_completion(qpair, &cpl); 465 } 466 467 if (req->cb_fn) 468 req->cb_fn(req->cb_arg, &cpl); 469 470 nvme_free_request(req); 471 } 472 473 bool 474 nvme_qpair_process_completions(struct nvme_qpair *qpair) 475 { 476 struct nvme_tracker *tr; 477 struct nvme_completion cpl; 478 int done = 0; 479 480 qpair->num_intr_handler_calls++; 481 482 if (!qpair->is_enabled) 483 /* 484 * qpair is not enabled, likely because a controller reset is 485 * is in progress. Ignore the interrupt - any I/O that was 486 * associated with this interrupt will get retried when the 487 * reset is complete. 488 */ 489 return (false); 490 491 bus_dmamap_sync(qpair->dma_tag, qpair->queuemem_map, 492 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 493 while (1) { 494 cpl = qpair->cpl[qpair->cq_head]; 495 496 /* Convert to host endian */ 497 nvme_completion_swapbytes(&cpl); 498 499 if (NVME_STATUS_GET_P(cpl.status) != qpair->phase) 500 break; 501 502 tr = qpair->act_tr[cpl.cid]; 503 504 if (tr != NULL) { 505 nvme_qpair_complete_tracker(qpair, tr, &cpl, TRUE); 506 qpair->sq_head = cpl.sqhd; 507 done++; 508 } else { 509 nvme_printf(qpair->ctrlr, 510 "cpl does not map to outstanding cmd\n"); 511 /* nvme_dump_completion expects device endianess */ 512 nvme_dump_completion(&qpair->cpl[qpair->cq_head]); 513 KASSERT(0, ("received completion for unknown cmd\n")); 514 } 515 516 if (++qpair->cq_head == qpair->num_entries) { 517 qpair->cq_head = 0; 518 qpair->phase = !qpair->phase; 519 } 520 521 nvme_mmio_write_4(qpair->ctrlr, doorbell[qpair->id].cq_hdbl, 522 qpair->cq_head); 523 } 524 return (done != 0); 525 } 526 527 static void 528 nvme_qpair_msix_handler(void *arg) 529 { 530 struct nvme_qpair *qpair = arg; 531 532 nvme_qpair_process_completions(qpair); 533 } 534 535 int 536 nvme_qpair_construct(struct nvme_qpair *qpair, uint32_t id, 537 uint16_t vector, uint32_t num_entries, uint32_t num_trackers, 538 struct nvme_controller *ctrlr) 539 { 540 struct nvme_tracker *tr; 541 size_t cmdsz, cplsz, prpsz, allocsz, prpmemsz; 542 uint64_t queuemem_phys, prpmem_phys, list_phys; 543 uint8_t *queuemem, *prpmem, *prp_list; 544 int i, err; 545 546 qpair->id = id; 547 qpair->vector = vector; 548 qpair->num_entries = num_entries; 549 qpair->num_trackers = num_trackers; 550 qpair->ctrlr = ctrlr; 551 552 if (ctrlr->msix_enabled) { 553 554 /* 555 * MSI-X vector resource IDs start at 1, so we add one to 556 * the queue's vector to get the corresponding rid to use. 557 */ 558 qpair->rid = vector + 1; 559 560 qpair->res = bus_alloc_resource_any(ctrlr->dev, SYS_RES_IRQ, 561 &qpair->rid, RF_ACTIVE); 562 bus_setup_intr(ctrlr->dev, qpair->res, 563 INTR_TYPE_MISC | INTR_MPSAFE, NULL, 564 nvme_qpair_msix_handler, qpair, &qpair->tag); 565 if (id == 0) { 566 bus_describe_intr(ctrlr->dev, qpair->res, qpair->tag, 567 "admin"); 568 } else { 569 bus_describe_intr(ctrlr->dev, qpair->res, qpair->tag, 570 "io%d", id - 1); 571 } 572 } 573 574 mtx_init(&qpair->lock, "nvme qpair lock", NULL, MTX_DEF); 575 576 /* Note: NVMe PRP format is restricted to 4-byte alignment. */ 577 err = bus_dma_tag_create(bus_get_dma_tag(ctrlr->dev), 578 4, PAGE_SIZE, BUS_SPACE_MAXADDR, 579 BUS_SPACE_MAXADDR, NULL, NULL, NVME_MAX_XFER_SIZE, 580 (NVME_MAX_XFER_SIZE/PAGE_SIZE)+1, PAGE_SIZE, 0, 581 NULL, NULL, &qpair->dma_tag_payload); 582 if (err != 0) { 583 nvme_printf(ctrlr, "payload tag create failed %d\n", err); 584 goto out; 585 } 586 587 /* 588 * Each component must be page aligned, and individual PRP lists 589 * cannot cross a page boundary. 590 */ 591 cmdsz = qpair->num_entries * sizeof(struct nvme_command); 592 cmdsz = roundup2(cmdsz, PAGE_SIZE); 593 cplsz = qpair->num_entries * sizeof(struct nvme_completion); 594 cplsz = roundup2(cplsz, PAGE_SIZE); 595 prpsz = sizeof(uint64_t) * NVME_MAX_PRP_LIST_ENTRIES;; 596 prpmemsz = qpair->num_trackers * prpsz; 597 allocsz = cmdsz + cplsz + prpmemsz; 598 599 err = bus_dma_tag_create(bus_get_dma_tag(ctrlr->dev), 600 PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, 601 allocsz, 1, allocsz, 0, NULL, NULL, &qpair->dma_tag); 602 if (err != 0) { 603 nvme_printf(ctrlr, "tag create failed %d\n", err); 604 goto out; 605 } 606 607 if (bus_dmamem_alloc(qpair->dma_tag, (void **)&queuemem, 608 BUS_DMA_NOWAIT, &qpair->queuemem_map)) { 609 nvme_printf(ctrlr, "failed to alloc qpair memory\n"); 610 goto out; 611 } 612 613 if (bus_dmamap_load(qpair->dma_tag, qpair->queuemem_map, 614 queuemem, allocsz, nvme_single_map, &queuemem_phys, 0) != 0) { 615 nvme_printf(ctrlr, "failed to load qpair memory\n"); 616 goto out; 617 } 618 619 qpair->num_cmds = 0; 620 qpair->num_intr_handler_calls = 0; 621 qpair->cmd = (struct nvme_command *)queuemem; 622 qpair->cpl = (struct nvme_completion *)(queuemem + cmdsz); 623 prpmem = (uint8_t *)(queuemem + cmdsz + cplsz); 624 qpair->cmd_bus_addr = queuemem_phys; 625 qpair->cpl_bus_addr = queuemem_phys + cmdsz; 626 prpmem_phys = queuemem_phys + cmdsz + cplsz; 627 628 qpair->sq_tdbl_off = nvme_mmio_offsetof(doorbell[id].sq_tdbl); 629 qpair->cq_hdbl_off = nvme_mmio_offsetof(doorbell[id].cq_hdbl); 630 631 TAILQ_INIT(&qpair->free_tr); 632 TAILQ_INIT(&qpair->outstanding_tr); 633 STAILQ_INIT(&qpair->queued_req); 634 635 list_phys = prpmem_phys; 636 prp_list = prpmem; 637 for (i = 0; i < qpair->num_trackers; i++) { 638 639 if (list_phys + prpsz > prpmem_phys + prpmemsz) { 640 qpair->num_trackers = i; 641 break; 642 } 643 644 /* 645 * Make sure that the PRP list for this tracker doesn't 646 * overflow to another page. 647 */ 648 if (trunc_page(list_phys) != 649 trunc_page(list_phys + prpsz - 1)) { 650 list_phys = roundup2(list_phys, PAGE_SIZE); 651 prp_list = 652 (uint8_t *)roundup2((uintptr_t)prp_list, PAGE_SIZE); 653 } 654 655 tr = malloc(sizeof(*tr), M_NVME, M_ZERO | M_WAITOK); 656 bus_dmamap_create(qpair->dma_tag_payload, 0, 657 &tr->payload_dma_map); 658 callout_init(&tr->timer, 1); 659 tr->cid = i; 660 tr->qpair = qpair; 661 tr->prp = (uint64_t *)prp_list; 662 tr->prp_bus_addr = list_phys; 663 TAILQ_INSERT_HEAD(&qpair->free_tr, tr, tailq); 664 list_phys += prpsz; 665 prp_list += prpsz; 666 } 667 668 if (qpair->num_trackers == 0) { 669 nvme_printf(ctrlr, "failed to allocate enough trackers\n"); 670 goto out; 671 } 672 673 qpair->act_tr = malloc(sizeof(struct nvme_tracker *) * 674 qpair->num_entries, M_NVME, M_ZERO | M_WAITOK); 675 return (0); 676 677 out: 678 nvme_qpair_destroy(qpair); 679 return (ENOMEM); 680 } 681 682 static void 683 nvme_qpair_destroy(struct nvme_qpair *qpair) 684 { 685 struct nvme_tracker *tr; 686 687 if (qpair->tag) 688 bus_teardown_intr(qpair->ctrlr->dev, qpair->res, qpair->tag); 689 690 if (mtx_initialized(&qpair->lock)) 691 mtx_destroy(&qpair->lock); 692 693 if (qpair->res) 694 bus_release_resource(qpair->ctrlr->dev, SYS_RES_IRQ, 695 rman_get_rid(qpair->res), qpair->res); 696 697 if (qpair->cmd != NULL) { 698 bus_dmamap_unload(qpair->dma_tag, qpair->queuemem_map); 699 bus_dmamem_free(qpair->dma_tag, qpair->cmd, 700 qpair->queuemem_map); 701 } 702 703 if (qpair->act_tr) 704 free(qpair->act_tr, M_NVME); 705 706 while (!TAILQ_EMPTY(&qpair->free_tr)) { 707 tr = TAILQ_FIRST(&qpair->free_tr); 708 TAILQ_REMOVE(&qpair->free_tr, tr, tailq); 709 bus_dmamap_destroy(qpair->dma_tag_payload, 710 tr->payload_dma_map); 711 free(tr, M_NVME); 712 } 713 714 if (qpair->dma_tag) 715 bus_dma_tag_destroy(qpair->dma_tag); 716 717 if (qpair->dma_tag_payload) 718 bus_dma_tag_destroy(qpair->dma_tag_payload); 719 } 720 721 static void 722 nvme_admin_qpair_abort_aers(struct nvme_qpair *qpair) 723 { 724 struct nvme_tracker *tr; 725 726 tr = TAILQ_FIRST(&qpair->outstanding_tr); 727 while (tr != NULL) { 728 if (tr->req->cmd.opc == NVME_OPC_ASYNC_EVENT_REQUEST) { 729 nvme_qpair_manual_complete_tracker(qpair, tr, 730 NVME_SCT_GENERIC, NVME_SC_ABORTED_SQ_DELETION, 0, 731 FALSE); 732 tr = TAILQ_FIRST(&qpair->outstanding_tr); 733 } else { 734 tr = TAILQ_NEXT(tr, tailq); 735 } 736 } 737 } 738 739 void 740 nvme_admin_qpair_destroy(struct nvme_qpair *qpair) 741 { 742 743 nvme_admin_qpair_abort_aers(qpair); 744 nvme_qpair_destroy(qpair); 745 } 746 747 void 748 nvme_io_qpair_destroy(struct nvme_qpair *qpair) 749 { 750 751 nvme_qpair_destroy(qpair); 752 } 753 754 static void 755 nvme_abort_complete(void *arg, const struct nvme_completion *status) 756 { 757 struct nvme_tracker *tr = arg; 758 759 /* 760 * If cdw0 == 1, the controller was not able to abort the command 761 * we requested. We still need to check the active tracker array, 762 * to cover race where I/O timed out at same time controller was 763 * completing the I/O. 764 */ 765 if (status->cdw0 == 1 && tr->qpair->act_tr[tr->cid] != NULL) { 766 /* 767 * An I/O has timed out, and the controller was unable to 768 * abort it for some reason. Construct a fake completion 769 * status, and then complete the I/O's tracker manually. 770 */ 771 nvme_printf(tr->qpair->ctrlr, 772 "abort command failed, aborting command manually\n"); 773 nvme_qpair_manual_complete_tracker(tr->qpair, tr, 774 NVME_SCT_GENERIC, NVME_SC_ABORTED_BY_REQUEST, 0, TRUE); 775 } 776 } 777 778 static void 779 nvme_timeout(void *arg) 780 { 781 struct nvme_tracker *tr = arg; 782 struct nvme_qpair *qpair = tr->qpair; 783 struct nvme_controller *ctrlr = qpair->ctrlr; 784 uint32_t csts; 785 uint8_t cfs; 786 787 /* 788 * Read csts to get value of cfs - controller fatal status. 789 * If no fatal status, try to call the completion routine, and 790 * if completes transactions, report a missed interrupt and 791 * return (this may need to be rate limited). Otherwise, if 792 * aborts are enabled and the controller is not reporting 793 * fatal status, abort the command. Otherwise, just reset the 794 * controller and hope for the best. 795 */ 796 csts = nvme_mmio_read_4(ctrlr, csts); 797 cfs = (csts >> NVME_CSTS_REG_CFS_SHIFT) & NVME_CSTS_REG_CFS_MASK; 798 if (cfs == 0 && nvme_qpair_process_completions(qpair)) { 799 nvme_printf(ctrlr, "Missing interrupt\n"); 800 return; 801 } 802 if (ctrlr->enable_aborts && cfs == 0) { 803 nvme_printf(ctrlr, "Aborting command due to a timeout.\n"); 804 nvme_ctrlr_cmd_abort(ctrlr, tr->cid, qpair->id, 805 nvme_abort_complete, tr); 806 } else { 807 nvme_printf(ctrlr, "Resetting controller due to a timeout%s.\n", 808 cfs ? " and fatal error status" : ""); 809 nvme_ctrlr_reset(ctrlr); 810 } 811 } 812 813 void 814 nvme_qpair_submit_tracker(struct nvme_qpair *qpair, struct nvme_tracker *tr) 815 { 816 struct nvme_request *req; 817 struct nvme_controller *ctrlr; 818 819 mtx_assert(&qpair->lock, MA_OWNED); 820 821 req = tr->req; 822 req->cmd.cid = tr->cid; 823 qpair->act_tr[tr->cid] = tr; 824 ctrlr = qpair->ctrlr; 825 826 if (req->timeout) 827 callout_reset_curcpu(&tr->timer, ctrlr->timeout_period * hz, 828 nvme_timeout, tr); 829 830 /* Copy the command from the tracker to the submission queue. */ 831 memcpy(&qpair->cmd[qpair->sq_tail], &req->cmd, sizeof(req->cmd)); 832 833 if (++qpair->sq_tail == qpair->num_entries) 834 qpair->sq_tail = 0; 835 836 bus_dmamap_sync(qpair->dma_tag, qpair->queuemem_map, 837 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 838 #ifndef __powerpc__ 839 /* 840 * powerpc's bus_dmamap_sync() already includes a heavyweight sync, but 841 * no other archs do. 842 */ 843 wmb(); 844 #endif 845 846 nvme_mmio_write_4(qpair->ctrlr, doorbell[qpair->id].sq_tdbl, 847 qpair->sq_tail); 848 849 qpair->num_cmds++; 850 } 851 852 static void 853 nvme_payload_map(void *arg, bus_dma_segment_t *seg, int nseg, int error) 854 { 855 struct nvme_tracker *tr = arg; 856 uint32_t cur_nseg; 857 858 /* 859 * If the mapping operation failed, return immediately. The caller 860 * is responsible for detecting the error status and failing the 861 * tracker manually. 862 */ 863 if (error != 0) { 864 nvme_printf(tr->qpair->ctrlr, 865 "nvme_payload_map err %d\n", error); 866 return; 867 } 868 869 /* 870 * Note that we specified PAGE_SIZE for alignment and max 871 * segment size when creating the bus dma tags. So here 872 * we can safely just transfer each segment to its 873 * associated PRP entry. 874 */ 875 tr->req->cmd.prp1 = htole64(seg[0].ds_addr); 876 877 if (nseg == 2) { 878 tr->req->cmd.prp2 = htole64(seg[1].ds_addr); 879 } else if (nseg > 2) { 880 cur_nseg = 1; 881 tr->req->cmd.prp2 = htole64((uint64_t)tr->prp_bus_addr); 882 while (cur_nseg < nseg) { 883 tr->prp[cur_nseg-1] = 884 htole64((uint64_t)seg[cur_nseg].ds_addr); 885 cur_nseg++; 886 } 887 } else { 888 /* 889 * prp2 should not be used by the controller 890 * since there is only one segment, but set 891 * to 0 just to be safe. 892 */ 893 tr->req->cmd.prp2 = 0; 894 } 895 896 bus_dmamap_sync(tr->qpair->dma_tag_payload, tr->payload_dma_map, 897 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 898 nvme_qpair_submit_tracker(tr->qpair, tr); 899 } 900 901 static void 902 _nvme_qpair_submit_request(struct nvme_qpair *qpair, struct nvme_request *req) 903 { 904 struct nvme_tracker *tr; 905 int err = 0; 906 907 mtx_assert(&qpair->lock, MA_OWNED); 908 909 tr = TAILQ_FIRST(&qpair->free_tr); 910 req->qpair = qpair; 911 912 if (tr == NULL || !qpair->is_enabled) { 913 /* 914 * No tracker is available, or the qpair is disabled due to 915 * an in-progress controller-level reset or controller 916 * failure. 917 */ 918 919 if (qpair->ctrlr->is_failed) { 920 /* 921 * The controller has failed. Post the request to a 922 * task where it will be aborted, so that we do not 923 * invoke the request's callback in the context 924 * of the submission. 925 */ 926 nvme_ctrlr_post_failed_request(qpair->ctrlr, req); 927 } else { 928 /* 929 * Put the request on the qpair's request queue to be 930 * processed when a tracker frees up via a command 931 * completion or when the controller reset is 932 * completed. 933 */ 934 STAILQ_INSERT_TAIL(&qpair->queued_req, req, stailq); 935 } 936 return; 937 } 938 939 TAILQ_REMOVE(&qpair->free_tr, tr, tailq); 940 TAILQ_INSERT_TAIL(&qpair->outstanding_tr, tr, tailq); 941 tr->req = req; 942 943 switch (req->type) { 944 case NVME_REQUEST_VADDR: 945 KASSERT(req->payload_size <= qpair->ctrlr->max_xfer_size, 946 ("payload_size (%d) exceeds max_xfer_size (%d)\n", 947 req->payload_size, qpair->ctrlr->max_xfer_size)); 948 err = bus_dmamap_load(tr->qpair->dma_tag_payload, 949 tr->payload_dma_map, req->u.payload, req->payload_size, 950 nvme_payload_map, tr, 0); 951 if (err != 0) 952 nvme_printf(qpair->ctrlr, 953 "bus_dmamap_load returned 0x%x!\n", err); 954 break; 955 case NVME_REQUEST_NULL: 956 nvme_qpair_submit_tracker(tr->qpair, tr); 957 break; 958 case NVME_REQUEST_BIO: 959 KASSERT(req->u.bio->bio_bcount <= qpair->ctrlr->max_xfer_size, 960 ("bio->bio_bcount (%jd) exceeds max_xfer_size (%d)\n", 961 (intmax_t)req->u.bio->bio_bcount, 962 qpair->ctrlr->max_xfer_size)); 963 err = bus_dmamap_load_bio(tr->qpair->dma_tag_payload, 964 tr->payload_dma_map, req->u.bio, nvme_payload_map, tr, 0); 965 if (err != 0) 966 nvme_printf(qpair->ctrlr, 967 "bus_dmamap_load_bio returned 0x%x!\n", err); 968 break; 969 case NVME_REQUEST_CCB: 970 err = bus_dmamap_load_ccb(tr->qpair->dma_tag_payload, 971 tr->payload_dma_map, req->u.payload, 972 nvme_payload_map, tr, 0); 973 if (err != 0) 974 nvme_printf(qpair->ctrlr, 975 "bus_dmamap_load_ccb returned 0x%x!\n", err); 976 break; 977 default: 978 panic("unknown nvme request type 0x%x\n", req->type); 979 break; 980 } 981 982 if (err != 0) { 983 /* 984 * The dmamap operation failed, so we manually fail the 985 * tracker here with DATA_TRANSFER_ERROR status. 986 * 987 * nvme_qpair_manual_complete_tracker must not be called 988 * with the qpair lock held. 989 */ 990 mtx_unlock(&qpair->lock); 991 nvme_qpair_manual_complete_tracker(qpair, tr, NVME_SCT_GENERIC, 992 NVME_SC_DATA_TRANSFER_ERROR, 1 /* do not retry */, TRUE); 993 mtx_lock(&qpair->lock); 994 } 995 } 996 997 void 998 nvme_qpair_submit_request(struct nvme_qpair *qpair, struct nvme_request *req) 999 { 1000 1001 mtx_lock(&qpair->lock); 1002 _nvme_qpair_submit_request(qpair, req); 1003 mtx_unlock(&qpair->lock); 1004 } 1005 1006 static void 1007 nvme_qpair_enable(struct nvme_qpair *qpair) 1008 { 1009 1010 qpair->is_enabled = TRUE; 1011 } 1012 1013 void 1014 nvme_qpair_reset(struct nvme_qpair *qpair) 1015 { 1016 1017 qpair->sq_head = qpair->sq_tail = qpair->cq_head = 0; 1018 1019 /* 1020 * First time through the completion queue, HW will set phase 1021 * bit on completions to 1. So set this to 1 here, indicating 1022 * we're looking for a 1 to know which entries have completed. 1023 * we'll toggle the bit each time when the completion queue 1024 * rolls over. 1025 */ 1026 qpair->phase = 1; 1027 1028 memset(qpair->cmd, 0, 1029 qpair->num_entries * sizeof(struct nvme_command)); 1030 memset(qpair->cpl, 0, 1031 qpair->num_entries * sizeof(struct nvme_completion)); 1032 } 1033 1034 void 1035 nvme_admin_qpair_enable(struct nvme_qpair *qpair) 1036 { 1037 struct nvme_tracker *tr; 1038 struct nvme_tracker *tr_temp; 1039 1040 /* 1041 * Manually abort each outstanding admin command. Do not retry 1042 * admin commands found here, since they will be left over from 1043 * a controller reset and its likely the context in which the 1044 * command was issued no longer applies. 1045 */ 1046 TAILQ_FOREACH_SAFE(tr, &qpair->outstanding_tr, tailq, tr_temp) { 1047 nvme_printf(qpair->ctrlr, 1048 "aborting outstanding admin command\n"); 1049 nvme_qpair_manual_complete_tracker(qpair, tr, NVME_SCT_GENERIC, 1050 NVME_SC_ABORTED_BY_REQUEST, 1 /* do not retry */, TRUE); 1051 } 1052 1053 nvme_qpair_enable(qpair); 1054 } 1055 1056 void 1057 nvme_io_qpair_enable(struct nvme_qpair *qpair) 1058 { 1059 STAILQ_HEAD(, nvme_request) temp; 1060 struct nvme_tracker *tr; 1061 struct nvme_tracker *tr_temp; 1062 struct nvme_request *req; 1063 1064 /* 1065 * Manually abort each outstanding I/O. This normally results in a 1066 * retry, unless the retry count on the associated request has 1067 * reached its limit. 1068 */ 1069 TAILQ_FOREACH_SAFE(tr, &qpair->outstanding_tr, tailq, tr_temp) { 1070 nvme_printf(qpair->ctrlr, "aborting outstanding i/o\n"); 1071 nvme_qpair_manual_complete_tracker(qpair, tr, NVME_SCT_GENERIC, 1072 NVME_SC_ABORTED_BY_REQUEST, 0, TRUE); 1073 } 1074 1075 mtx_lock(&qpair->lock); 1076 1077 nvme_qpair_enable(qpair); 1078 1079 STAILQ_INIT(&temp); 1080 STAILQ_SWAP(&qpair->queued_req, &temp, nvme_request); 1081 1082 while (!STAILQ_EMPTY(&temp)) { 1083 req = STAILQ_FIRST(&temp); 1084 STAILQ_REMOVE_HEAD(&temp, stailq); 1085 nvme_printf(qpair->ctrlr, "resubmitting queued i/o\n"); 1086 nvme_qpair_print_command(qpair, &req->cmd); 1087 _nvme_qpair_submit_request(qpair, req); 1088 } 1089 1090 mtx_unlock(&qpair->lock); 1091 } 1092 1093 static void 1094 nvme_qpair_disable(struct nvme_qpair *qpair) 1095 { 1096 struct nvme_tracker *tr; 1097 1098 qpair->is_enabled = FALSE; 1099 mtx_lock(&qpair->lock); 1100 TAILQ_FOREACH(tr, &qpair->outstanding_tr, tailq) 1101 callout_stop(&tr->timer); 1102 mtx_unlock(&qpair->lock); 1103 } 1104 1105 void 1106 nvme_admin_qpair_disable(struct nvme_qpair *qpair) 1107 { 1108 1109 nvme_qpair_disable(qpair); 1110 nvme_admin_qpair_abort_aers(qpair); 1111 } 1112 1113 void 1114 nvme_io_qpair_disable(struct nvme_qpair *qpair) 1115 { 1116 1117 nvme_qpair_disable(qpair); 1118 } 1119 1120 void 1121 nvme_qpair_fail(struct nvme_qpair *qpair) 1122 { 1123 struct nvme_tracker *tr; 1124 struct nvme_request *req; 1125 1126 if (!mtx_initialized(&qpair->lock)) 1127 return; 1128 1129 mtx_lock(&qpair->lock); 1130 1131 while (!STAILQ_EMPTY(&qpair->queued_req)) { 1132 req = STAILQ_FIRST(&qpair->queued_req); 1133 STAILQ_REMOVE_HEAD(&qpair->queued_req, stailq); 1134 nvme_printf(qpair->ctrlr, "failing queued i/o\n"); 1135 mtx_unlock(&qpair->lock); 1136 nvme_qpair_manual_complete_request(qpair, req, NVME_SCT_GENERIC, 1137 NVME_SC_ABORTED_BY_REQUEST, TRUE); 1138 mtx_lock(&qpair->lock); 1139 } 1140 1141 /* Manually abort each outstanding I/O. */ 1142 while (!TAILQ_EMPTY(&qpair->outstanding_tr)) { 1143 tr = TAILQ_FIRST(&qpair->outstanding_tr); 1144 /* 1145 * Do not remove the tracker. The abort_tracker path will 1146 * do that for us. 1147 */ 1148 nvme_printf(qpair->ctrlr, "failing outstanding i/o\n"); 1149 mtx_unlock(&qpair->lock); 1150 nvme_qpair_manual_complete_tracker(qpair, tr, NVME_SCT_GENERIC, 1151 NVME_SC_ABORTED_BY_REQUEST, 1 /* do not retry */, TRUE); 1152 mtx_lock(&qpair->lock); 1153 } 1154 1155 mtx_unlock(&qpair->lock); 1156 } 1157 1158