1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (C) 2012-2014 Intel Corporation 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 29 #include <sys/param.h> 30 #include <sys/bus.h> 31 #include <sys/conf.h> 32 #include <sys/domainset.h> 33 #include <sys/proc.h> 34 35 #include <dev/pci/pcivar.h> 36 37 #include "nvme_private.h" 38 39 typedef enum error_print { ERROR_PRINT_NONE, ERROR_PRINT_NO_RETRY, ERROR_PRINT_ALL } error_print_t; 40 #define DO_NOT_RETRY 1 41 42 static void _nvme_qpair_submit_request(struct nvme_qpair *qpair, 43 struct nvme_request *req); 44 static void nvme_qpair_destroy(struct nvme_qpair *qpair); 45 46 #define DEFAULT_INDEX 256 47 #define DEFAULT_ENTRY(x) [DEFAULT_INDEX] = x 48 #define OPC_ENTRY(x) [NVME_OPC_ ## x] = #x 49 50 static const char *admin_opcode[DEFAULT_INDEX + 1] = { 51 OPC_ENTRY(DELETE_IO_SQ), 52 OPC_ENTRY(CREATE_IO_SQ), 53 OPC_ENTRY(GET_LOG_PAGE), 54 OPC_ENTRY(DELETE_IO_CQ), 55 OPC_ENTRY(CREATE_IO_CQ), 56 OPC_ENTRY(IDENTIFY), 57 OPC_ENTRY(ABORT), 58 OPC_ENTRY(SET_FEATURES), 59 OPC_ENTRY(GET_FEATURES), 60 OPC_ENTRY(ASYNC_EVENT_REQUEST), 61 OPC_ENTRY(NAMESPACE_MANAGEMENT), 62 OPC_ENTRY(FIRMWARE_ACTIVATE), 63 OPC_ENTRY(FIRMWARE_IMAGE_DOWNLOAD), 64 OPC_ENTRY(DEVICE_SELF_TEST), 65 OPC_ENTRY(NAMESPACE_ATTACHMENT), 66 OPC_ENTRY(KEEP_ALIVE), 67 OPC_ENTRY(DIRECTIVE_SEND), 68 OPC_ENTRY(DIRECTIVE_RECEIVE), 69 OPC_ENTRY(VIRTUALIZATION_MANAGEMENT), 70 OPC_ENTRY(NVME_MI_SEND), 71 OPC_ENTRY(NVME_MI_RECEIVE), 72 OPC_ENTRY(CAPACITY_MANAGEMENT), 73 OPC_ENTRY(LOCKDOWN), 74 OPC_ENTRY(DOORBELL_BUFFER_CONFIG), 75 OPC_ENTRY(FABRICS_COMMANDS), 76 OPC_ENTRY(FORMAT_NVM), 77 OPC_ENTRY(SECURITY_SEND), 78 OPC_ENTRY(SECURITY_RECEIVE), 79 OPC_ENTRY(SANITIZE), 80 OPC_ENTRY(GET_LBA_STATUS), 81 DEFAULT_ENTRY("ADMIN COMMAND"), 82 }; 83 84 static const char *io_opcode[DEFAULT_INDEX + 1] = { 85 OPC_ENTRY(FLUSH), 86 OPC_ENTRY(WRITE), 87 OPC_ENTRY(READ), 88 OPC_ENTRY(WRITE_UNCORRECTABLE), 89 OPC_ENTRY(COMPARE), 90 OPC_ENTRY(WRITE_ZEROES), 91 OPC_ENTRY(DATASET_MANAGEMENT), 92 OPC_ENTRY(VERIFY), 93 OPC_ENTRY(RESERVATION_REGISTER), 94 OPC_ENTRY(RESERVATION_REPORT), 95 OPC_ENTRY(RESERVATION_ACQUIRE), 96 OPC_ENTRY(RESERVATION_RELEASE), 97 OPC_ENTRY(COPY), 98 DEFAULT_ENTRY("IO COMMAND"), 99 }; 100 101 static const char * 102 get_opcode_string(const char *op[DEFAULT_INDEX + 1], uint16_t opc) 103 { 104 const char *nm = opc < DEFAULT_INDEX ? op[opc] : op[DEFAULT_INDEX]; 105 106 return (nm != NULL ? nm : op[DEFAULT_INDEX]); 107 } 108 109 static const char * 110 get_admin_opcode_string(uint16_t opc) 111 { 112 return (get_opcode_string(admin_opcode, opc)); 113 } 114 115 static const char * 116 get_io_opcode_string(uint16_t opc) 117 { 118 return (get_opcode_string(io_opcode, opc)); 119 } 120 121 static void 122 nvme_admin_qpair_print_command(struct nvme_qpair *qpair, 123 struct nvme_command *cmd) 124 { 125 126 nvme_printf(qpair->ctrlr, "%s (%02x) sqid:%d cid:%d nsid:%x " 127 "cdw10:%08x cdw11:%08x\n", 128 get_admin_opcode_string(cmd->opc), cmd->opc, qpair->id, cmd->cid, 129 le32toh(cmd->nsid), le32toh(cmd->cdw10), le32toh(cmd->cdw11)); 130 } 131 132 static void 133 nvme_io_qpair_print_command(struct nvme_qpair *qpair, 134 struct nvme_command *cmd) 135 { 136 137 switch (cmd->opc) { 138 case NVME_OPC_WRITE: 139 case NVME_OPC_READ: 140 case NVME_OPC_WRITE_UNCORRECTABLE: 141 case NVME_OPC_COMPARE: 142 case NVME_OPC_WRITE_ZEROES: 143 case NVME_OPC_VERIFY: 144 nvme_printf(qpair->ctrlr, "%s sqid:%d cid:%d nsid:%d " 145 "lba:%llu len:%d\n", 146 get_io_opcode_string(cmd->opc), qpair->id, cmd->cid, le32toh(cmd->nsid), 147 ((unsigned long long)le32toh(cmd->cdw11) << 32) + le32toh(cmd->cdw10), 148 (le32toh(cmd->cdw12) & 0xFFFF) + 1); 149 break; 150 case NVME_OPC_FLUSH: 151 case NVME_OPC_DATASET_MANAGEMENT: 152 case NVME_OPC_RESERVATION_REGISTER: 153 case NVME_OPC_RESERVATION_REPORT: 154 case NVME_OPC_RESERVATION_ACQUIRE: 155 case NVME_OPC_RESERVATION_RELEASE: 156 nvme_printf(qpair->ctrlr, "%s sqid:%d cid:%d nsid:%d\n", 157 get_io_opcode_string(cmd->opc), qpair->id, cmd->cid, le32toh(cmd->nsid)); 158 break; 159 default: 160 nvme_printf(qpair->ctrlr, "%s (%02x) sqid:%d cid:%d nsid:%d\n", 161 get_io_opcode_string(cmd->opc), cmd->opc, qpair->id, 162 cmd->cid, le32toh(cmd->nsid)); 163 break; 164 } 165 } 166 167 void 168 nvme_qpair_print_command(struct nvme_qpair *qpair, struct nvme_command *cmd) 169 { 170 if (qpair->id == 0) 171 nvme_admin_qpair_print_command(qpair, cmd); 172 else 173 nvme_io_qpair_print_command(qpair, cmd); 174 if (nvme_verbose_cmd_dump) { 175 nvme_printf(qpair->ctrlr, 176 "nsid:%#x rsvd2:%#x rsvd3:%#x mptr:%#jx prp1:%#jx prp2:%#jx\n", 177 cmd->nsid, cmd->rsvd2, cmd->rsvd3, (uintmax_t)cmd->mptr, 178 (uintmax_t)cmd->prp1, (uintmax_t)cmd->prp2); 179 nvme_printf(qpair->ctrlr, 180 "cdw10: %#x cdw11:%#x cdw12:%#x cdw13:%#x cdw14:%#x cdw15:%#x\n", 181 cmd->cdw10, cmd->cdw11, cmd->cdw12, cmd->cdw13, cmd->cdw14, 182 cmd->cdw15); 183 } 184 } 185 186 struct nvme_status_string { 187 uint16_t sc; 188 const char * str; 189 }; 190 191 static struct nvme_status_string generic_status[] = { 192 { NVME_SC_SUCCESS, "SUCCESS" }, 193 { NVME_SC_INVALID_OPCODE, "INVALID OPCODE" }, 194 { NVME_SC_INVALID_FIELD, "INVALID_FIELD" }, 195 { NVME_SC_COMMAND_ID_CONFLICT, "COMMAND ID CONFLICT" }, 196 { NVME_SC_DATA_TRANSFER_ERROR, "DATA TRANSFER ERROR" }, 197 { NVME_SC_ABORTED_POWER_LOSS, "ABORTED - POWER LOSS" }, 198 { NVME_SC_INTERNAL_DEVICE_ERROR, "INTERNAL DEVICE ERROR" }, 199 { NVME_SC_ABORTED_BY_REQUEST, "ABORTED - BY REQUEST" }, 200 { NVME_SC_ABORTED_SQ_DELETION, "ABORTED - SQ DELETION" }, 201 { NVME_SC_ABORTED_FAILED_FUSED, "ABORTED - FAILED FUSED" }, 202 { NVME_SC_ABORTED_MISSING_FUSED, "ABORTED - MISSING FUSED" }, 203 { NVME_SC_INVALID_NAMESPACE_OR_FORMAT, "INVALID NAMESPACE OR FORMAT" }, 204 { NVME_SC_COMMAND_SEQUENCE_ERROR, "COMMAND SEQUENCE ERROR" }, 205 { NVME_SC_INVALID_SGL_SEGMENT_DESCR, "INVALID SGL SEGMENT DESCRIPTOR" }, 206 { NVME_SC_INVALID_NUMBER_OF_SGL_DESCR, "INVALID NUMBER OF SGL DESCRIPTORS" }, 207 { NVME_SC_DATA_SGL_LENGTH_INVALID, "DATA SGL LENGTH INVALID" }, 208 { NVME_SC_METADATA_SGL_LENGTH_INVALID, "METADATA SGL LENGTH INVALID" }, 209 { NVME_SC_SGL_DESCRIPTOR_TYPE_INVALID, "SGL DESCRIPTOR TYPE INVALID" }, 210 { NVME_SC_INVALID_USE_OF_CMB, "INVALID USE OF CONTROLLER MEMORY BUFFER" }, 211 { NVME_SC_PRP_OFFET_INVALID, "PRP OFFET INVALID" }, 212 { NVME_SC_ATOMIC_WRITE_UNIT_EXCEEDED, "ATOMIC WRITE UNIT EXCEEDED" }, 213 { NVME_SC_OPERATION_DENIED, "OPERATION DENIED" }, 214 { NVME_SC_SGL_OFFSET_INVALID, "SGL OFFSET INVALID" }, 215 { NVME_SC_HOST_ID_INCONSISTENT_FORMAT, "HOST IDENTIFIER INCONSISTENT FORMAT" }, 216 { NVME_SC_KEEP_ALIVE_TIMEOUT_EXPIRED, "KEEP ALIVE TIMEOUT EXPIRED" }, 217 { NVME_SC_KEEP_ALIVE_TIMEOUT_INVALID, "KEEP ALIVE TIMEOUT INVALID" }, 218 { NVME_SC_ABORTED_DUE_TO_PREEMPT, "COMMAND ABORTED DUE TO PREEMPT AND ABORT" }, 219 { NVME_SC_SANITIZE_FAILED, "SANITIZE FAILED" }, 220 { NVME_SC_SANITIZE_IN_PROGRESS, "SANITIZE IN PROGRESS" }, 221 { NVME_SC_SGL_DATA_BLOCK_GRAN_INVALID, "SGL_DATA_BLOCK_GRANULARITY_INVALID" }, 222 { NVME_SC_NOT_SUPPORTED_IN_CMB, "COMMAND NOT SUPPORTED FOR QUEUE IN CMB" }, 223 { NVME_SC_NAMESPACE_IS_WRITE_PROTECTED, "NAMESPACE IS WRITE PROTECTED" }, 224 { NVME_SC_COMMAND_INTERRUPTED, "COMMAND INTERRUPTED" }, 225 { NVME_SC_TRANSIENT_TRANSPORT_ERROR, "TRANSIENT TRANSPORT ERROR" }, 226 227 { NVME_SC_LBA_OUT_OF_RANGE, "LBA OUT OF RANGE" }, 228 { NVME_SC_CAPACITY_EXCEEDED, "CAPACITY EXCEEDED" }, 229 { NVME_SC_NAMESPACE_NOT_READY, "NAMESPACE NOT READY" }, 230 { NVME_SC_RESERVATION_CONFLICT, "RESERVATION CONFLICT" }, 231 { NVME_SC_FORMAT_IN_PROGRESS, "FORMAT IN PROGRESS" }, 232 { 0xFFFF, "GENERIC" } 233 }; 234 235 static struct nvme_status_string command_specific_status[] = { 236 { NVME_SC_COMPLETION_QUEUE_INVALID, "INVALID COMPLETION QUEUE" }, 237 { NVME_SC_INVALID_QUEUE_IDENTIFIER, "INVALID QUEUE IDENTIFIER" }, 238 { NVME_SC_MAXIMUM_QUEUE_SIZE_EXCEEDED, "MAX QUEUE SIZE EXCEEDED" }, 239 { NVME_SC_ABORT_COMMAND_LIMIT_EXCEEDED, "ABORT CMD LIMIT EXCEEDED" }, 240 { NVME_SC_ASYNC_EVENT_REQUEST_LIMIT_EXCEEDED, "ASYNC LIMIT EXCEEDED" }, 241 { NVME_SC_INVALID_FIRMWARE_SLOT, "INVALID FIRMWARE SLOT" }, 242 { NVME_SC_INVALID_FIRMWARE_IMAGE, "INVALID FIRMWARE IMAGE" }, 243 { NVME_SC_INVALID_INTERRUPT_VECTOR, "INVALID INTERRUPT VECTOR" }, 244 { NVME_SC_INVALID_LOG_PAGE, "INVALID LOG PAGE" }, 245 { NVME_SC_INVALID_FORMAT, "INVALID FORMAT" }, 246 { NVME_SC_FIRMWARE_REQUIRES_RESET, "FIRMWARE REQUIRES RESET" }, 247 { NVME_SC_INVALID_QUEUE_DELETION, "INVALID QUEUE DELETION" }, 248 { NVME_SC_FEATURE_NOT_SAVEABLE, "FEATURE IDENTIFIER NOT SAVEABLE" }, 249 { NVME_SC_FEATURE_NOT_CHANGEABLE, "FEATURE NOT CHANGEABLE" }, 250 { NVME_SC_FEATURE_NOT_NS_SPECIFIC, "FEATURE NOT NAMESPACE SPECIFIC" }, 251 { NVME_SC_FW_ACT_REQUIRES_NVMS_RESET, "FIRMWARE ACTIVATION REQUIRES NVM SUBSYSTEM RESET" }, 252 { NVME_SC_FW_ACT_REQUIRES_RESET, "FIRMWARE ACTIVATION REQUIRES RESET" }, 253 { NVME_SC_FW_ACT_REQUIRES_TIME, "FIRMWARE ACTIVATION REQUIRES MAXIMUM TIME VIOLATION" }, 254 { NVME_SC_FW_ACT_PROHIBITED, "FIRMWARE ACTIVATION PROHIBITED" }, 255 { NVME_SC_OVERLAPPING_RANGE, "OVERLAPPING RANGE" }, 256 { NVME_SC_NS_INSUFFICIENT_CAPACITY, "NAMESPACE INSUFFICIENT CAPACITY" }, 257 { NVME_SC_NS_ID_UNAVAILABLE, "NAMESPACE IDENTIFIER UNAVAILABLE" }, 258 { NVME_SC_NS_ALREADY_ATTACHED, "NAMESPACE ALREADY ATTACHED" }, 259 { NVME_SC_NS_IS_PRIVATE, "NAMESPACE IS PRIVATE" }, 260 { NVME_SC_NS_NOT_ATTACHED, "NS NOT ATTACHED" }, 261 { NVME_SC_THIN_PROV_NOT_SUPPORTED, "THIN PROVISIONING NOT SUPPORTED" }, 262 { NVME_SC_CTRLR_LIST_INVALID, "CONTROLLER LIST INVALID" }, 263 { NVME_SC_SELF_TEST_IN_PROGRESS, "DEVICE SELF-TEST IN PROGRESS" }, 264 { NVME_SC_BOOT_PART_WRITE_PROHIB, "BOOT PARTITION WRITE PROHIBITED" }, 265 { NVME_SC_INVALID_CTRLR_ID, "INVALID CONTROLLER IDENTIFIER" }, 266 { NVME_SC_INVALID_SEC_CTRLR_STATE, "INVALID SECONDARY CONTROLLER STATE" }, 267 { NVME_SC_INVALID_NUM_OF_CTRLR_RESRC, "INVALID NUMBER OF CONTROLLER RESOURCES" }, 268 { NVME_SC_INVALID_RESOURCE_ID, "INVALID RESOURCE IDENTIFIER" }, 269 { NVME_SC_SANITIZE_PROHIBITED_WPMRE, "SANITIZE PROHIBITED WRITE PERSISTENT MEMORY REGION ENABLED" }, 270 { NVME_SC_ANA_GROUP_ID_INVALID, "ANA GROUP IDENTIFIED INVALID" }, 271 { NVME_SC_ANA_ATTACH_FAILED, "ANA ATTACH FAILED" }, 272 273 { NVME_SC_CONFLICTING_ATTRIBUTES, "CONFLICTING ATTRIBUTES" }, 274 { NVME_SC_INVALID_PROTECTION_INFO, "INVALID PROTECTION INFO" }, 275 { NVME_SC_ATTEMPTED_WRITE_TO_RO_PAGE, "WRITE TO RO PAGE" }, 276 { 0xFFFF, "COMMAND SPECIFIC" } 277 }; 278 279 static struct nvme_status_string media_error_status[] = { 280 { NVME_SC_WRITE_FAULTS, "WRITE FAULTS" }, 281 { NVME_SC_UNRECOVERED_READ_ERROR, "UNRECOVERED READ ERROR" }, 282 { NVME_SC_GUARD_CHECK_ERROR, "GUARD CHECK ERROR" }, 283 { NVME_SC_APPLICATION_TAG_CHECK_ERROR, "APPLICATION TAG CHECK ERROR" }, 284 { NVME_SC_REFERENCE_TAG_CHECK_ERROR, "REFERENCE TAG CHECK ERROR" }, 285 { NVME_SC_COMPARE_FAILURE, "COMPARE FAILURE" }, 286 { NVME_SC_ACCESS_DENIED, "ACCESS DENIED" }, 287 { NVME_SC_DEALLOCATED_OR_UNWRITTEN, "DEALLOCATED OR UNWRITTEN LOGICAL BLOCK" }, 288 { 0xFFFF, "MEDIA ERROR" } 289 }; 290 291 static struct nvme_status_string path_related_status[] = { 292 { NVME_SC_INTERNAL_PATH_ERROR, "INTERNAL PATH ERROR" }, 293 { NVME_SC_ASYMMETRIC_ACCESS_PERSISTENT_LOSS, "ASYMMETRIC ACCESS PERSISTENT LOSS" }, 294 { NVME_SC_ASYMMETRIC_ACCESS_INACCESSIBLE, "ASYMMETRIC ACCESS INACCESSIBLE" }, 295 { NVME_SC_ASYMMETRIC_ACCESS_TRANSITION, "ASYMMETRIC ACCESS TRANSITION" }, 296 { NVME_SC_CONTROLLER_PATHING_ERROR, "CONTROLLER PATHING ERROR" }, 297 { NVME_SC_HOST_PATHING_ERROR, "HOST PATHING ERROR" }, 298 { NVME_SC_COMMAND_ABORTED_BY_HOST, "COMMAND ABORTED BY HOST" }, 299 { 0xFFFF, "PATH RELATED" }, 300 }; 301 302 static const char * 303 get_status_string(uint16_t sct, uint16_t sc) 304 { 305 struct nvme_status_string *entry; 306 307 switch (sct) { 308 case NVME_SCT_GENERIC: 309 entry = generic_status; 310 break; 311 case NVME_SCT_COMMAND_SPECIFIC: 312 entry = command_specific_status; 313 break; 314 case NVME_SCT_MEDIA_ERROR: 315 entry = media_error_status; 316 break; 317 case NVME_SCT_PATH_RELATED: 318 entry = path_related_status; 319 break; 320 case NVME_SCT_VENDOR_SPECIFIC: 321 return ("VENDOR SPECIFIC"); 322 default: 323 return ("RESERVED"); 324 } 325 326 while (entry->sc != 0xFFFF) { 327 if (entry->sc == sc) 328 return (entry->str); 329 entry++; 330 } 331 return (entry->str); 332 } 333 334 void 335 nvme_qpair_print_completion(struct nvme_qpair *qpair, 336 struct nvme_completion *cpl) 337 { 338 uint8_t sct, sc, crd, m, dnr, p; 339 340 sct = NVME_STATUS_GET_SCT(cpl->status); 341 sc = NVME_STATUS_GET_SC(cpl->status); 342 crd = NVME_STATUS_GET_CRD(cpl->status); 343 m = NVME_STATUS_GET_M(cpl->status); 344 dnr = NVME_STATUS_GET_DNR(cpl->status); 345 p = NVME_STATUS_GET_P(cpl->status); 346 347 nvme_printf(qpair->ctrlr, "%s (%02x/%02x) crd:%x m:%x dnr:%x p:%d " 348 "sqid:%d cid:%d cdw0:%x\n", 349 get_status_string(sct, sc), sct, sc, crd, m, dnr, p, 350 cpl->sqid, cpl->cid, cpl->cdw0); 351 } 352 353 static bool 354 nvme_completion_is_retry(const struct nvme_completion *cpl) 355 { 356 uint8_t sct, sc, dnr; 357 358 sct = NVME_STATUS_GET_SCT(cpl->status); 359 sc = NVME_STATUS_GET_SC(cpl->status); 360 dnr = NVME_STATUS_GET_DNR(cpl->status); /* Do Not Retry Bit */ 361 362 /* 363 * TODO: spec is not clear how commands that are aborted due 364 * to TLER will be marked. So for now, it seems 365 * NAMESPACE_NOT_READY is the only case where we should 366 * look at the DNR bit. Requests failed with ABORTED_BY_REQUEST 367 * set the DNR bit correctly since the driver controls that. 368 */ 369 switch (sct) { 370 case NVME_SCT_GENERIC: 371 switch (sc) { 372 case NVME_SC_ABORTED_BY_REQUEST: 373 case NVME_SC_NAMESPACE_NOT_READY: 374 if (dnr) 375 return (0); 376 else 377 return (1); 378 case NVME_SC_INVALID_OPCODE: 379 case NVME_SC_INVALID_FIELD: 380 case NVME_SC_COMMAND_ID_CONFLICT: 381 case NVME_SC_DATA_TRANSFER_ERROR: 382 case NVME_SC_ABORTED_POWER_LOSS: 383 case NVME_SC_INTERNAL_DEVICE_ERROR: 384 case NVME_SC_ABORTED_SQ_DELETION: 385 case NVME_SC_ABORTED_FAILED_FUSED: 386 case NVME_SC_ABORTED_MISSING_FUSED: 387 case NVME_SC_INVALID_NAMESPACE_OR_FORMAT: 388 case NVME_SC_COMMAND_SEQUENCE_ERROR: 389 case NVME_SC_LBA_OUT_OF_RANGE: 390 case NVME_SC_CAPACITY_EXCEEDED: 391 default: 392 return (0); 393 } 394 case NVME_SCT_COMMAND_SPECIFIC: 395 case NVME_SCT_MEDIA_ERROR: 396 return (0); 397 case NVME_SCT_PATH_RELATED: 398 switch (sc) { 399 case NVME_SC_INTERNAL_PATH_ERROR: 400 if (dnr) 401 return (0); 402 else 403 return (1); 404 default: 405 return (0); 406 } 407 case NVME_SCT_VENDOR_SPECIFIC: 408 default: 409 return (0); 410 } 411 } 412 413 static void 414 nvme_qpair_complete_tracker(struct nvme_tracker *tr, 415 struct nvme_completion *cpl, error_print_t print_on_error) 416 { 417 struct nvme_qpair *qpair = tr->qpair; 418 struct nvme_request *req; 419 bool retry, error, retriable; 420 421 mtx_assert(&qpair->lock, MA_NOTOWNED); 422 423 req = tr->req; 424 error = nvme_completion_is_error(cpl); 425 retriable = nvme_completion_is_retry(cpl); 426 retry = error && retriable && req->retries < nvme_retry_count; 427 if (retry) 428 qpair->num_retries++; 429 if (error && req->retries >= nvme_retry_count && retriable) 430 qpair->num_failures++; 431 432 if (error && (print_on_error == ERROR_PRINT_ALL || 433 (!retry && print_on_error == ERROR_PRINT_NO_RETRY))) { 434 nvme_qpair_print_command(qpair, &req->cmd); 435 nvme_qpair_print_completion(qpair, cpl); 436 } 437 438 qpair->act_tr[cpl->cid] = NULL; 439 440 KASSERT(cpl->cid == req->cmd.cid, ("cpl cid does not match cmd cid\n")); 441 442 if (!retry) { 443 if (req->payload_valid) { 444 bus_dmamap_sync(qpair->dma_tag_payload, 445 tr->payload_dma_map, 446 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 447 } 448 if (req->cb_fn) 449 req->cb_fn(req->cb_arg, cpl); 450 } 451 452 mtx_lock(&qpair->lock); 453 454 if (retry) { 455 req->retries++; 456 nvme_qpair_submit_tracker(qpair, tr); 457 } else { 458 if (req->payload_valid) { 459 bus_dmamap_unload(qpair->dma_tag_payload, 460 tr->payload_dma_map); 461 } 462 463 nvme_free_request(req); 464 tr->req = NULL; 465 466 TAILQ_REMOVE(&qpair->outstanding_tr, tr, tailq); 467 TAILQ_INSERT_HEAD(&qpair->free_tr, tr, tailq); 468 469 /* 470 * If the controller is in the middle of resetting, don't 471 * try to submit queued requests here - let the reset logic 472 * handle that instead. 473 */ 474 if (!STAILQ_EMPTY(&qpair->queued_req) && 475 !qpair->ctrlr->is_resetting) { 476 req = STAILQ_FIRST(&qpair->queued_req); 477 STAILQ_REMOVE_HEAD(&qpair->queued_req, stailq); 478 _nvme_qpair_submit_request(qpair, req); 479 } 480 } 481 482 mtx_unlock(&qpair->lock); 483 } 484 485 static uint32_t 486 nvme_qpair_make_status(uint32_t sct, uint32_t sc, uint32_t dnr) 487 { 488 uint32_t status = 0; 489 490 status |= NVMEF(NVME_STATUS_SCT, sct); 491 status |= NVMEF(NVME_STATUS_SC, sc); 492 status |= NVMEF(NVME_STATUS_DNR, dnr); 493 /* M=0 : this is artificial so no data in error log page */ 494 /* CRD=0 : this is artificial and no delayed retry support anyway */ 495 /* P=0 : phase not checked */ 496 return (status); 497 } 498 499 static void 500 nvme_qpair_manual_complete_tracker( 501 struct nvme_tracker *tr, uint32_t sct, uint32_t sc, uint32_t dnr, 502 error_print_t print_on_error) 503 { 504 struct nvme_completion cpl; 505 struct nvme_qpair * qpair = tr->qpair; 506 507 mtx_assert(&qpair->lock, MA_NOTOWNED); 508 509 memset(&cpl, 0, sizeof(cpl)); 510 511 cpl.sqid = qpair->id; 512 cpl.cid = tr->cid; 513 cpl.status = nvme_qpair_make_status(sct, sc, dnr); 514 nvme_qpair_complete_tracker(tr, &cpl, print_on_error); 515 } 516 517 static void 518 nvme_qpair_manual_complete_request(struct nvme_qpair *qpair, 519 struct nvme_request *req, uint32_t sct, uint32_t sc, uint32_t dnr, 520 error_print_t print_on_error) 521 { 522 struct nvme_completion cpl; 523 bool error; 524 525 memset(&cpl, 0, sizeof(cpl)); 526 cpl.sqid = qpair->id; 527 cpl.status = nvme_qpair_make_status(sct, sc, dnr); 528 error = nvme_completion_is_error(&cpl); 529 530 if (error && print_on_error == ERROR_PRINT_ALL) { 531 nvme_qpair_print_command(qpair, &req->cmd); 532 nvme_qpair_print_completion(qpair, &cpl); 533 } 534 535 if (req->cb_fn) 536 req->cb_fn(req->cb_arg, &cpl); 537 538 nvme_free_request(req); 539 } 540 541 /* Locked version of completion processor */ 542 static bool 543 _nvme_qpair_process_completions(struct nvme_qpair *qpair) 544 { 545 struct nvme_tracker *tr; 546 struct nvme_completion cpl; 547 bool done = false; 548 bool in_panic = dumping || SCHEDULER_STOPPED(); 549 550 mtx_assert(&qpair->recovery, MA_OWNED); 551 552 /* 553 * qpair is not enabled, likely because a controller reset is in 554 * progress. Ignore the interrupt - any I/O that was associated with 555 * this interrupt will get retried when the reset is complete. Any 556 * pending completions for when we're in startup will be completed 557 * as soon as initialization is complete and we start sending commands 558 * to the device. 559 */ 560 if (qpair->recovery_state != RECOVERY_NONE) { 561 qpair->num_ignored++; 562 return (false); 563 } 564 565 /* 566 * Sanity check initialization. After we reset the hardware, the phase 567 * is defined to be 1. So if we get here with zero prior calls and the 568 * phase is 0, it means that we've lost a race between the 569 * initialization and the ISR running. With the phase wrong, we'll 570 * process a bunch of completions that aren't really completions leading 571 * to a KASSERT below. 572 */ 573 KASSERT(!(qpair->num_intr_handler_calls == 0 && qpair->phase == 0), 574 ("%s: Phase wrong for first interrupt call.", 575 device_get_nameunit(qpair->ctrlr->dev))); 576 577 qpair->num_intr_handler_calls++; 578 579 bus_dmamap_sync(qpair->dma_tag, qpair->queuemem_map, 580 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 581 /* 582 * A panic can stop the CPU this routine is running on at any point. If 583 * we're called during a panic, complete the sq_head wrap protocol for 584 * the case where we are interrupted just after the increment at 1 585 * below, but before we can reset cq_head to zero at 2. Also cope with 586 * the case where we do the zero at 2, but may or may not have done the 587 * phase adjustment at step 3. The panic machinery flushes all pending 588 * memory writes, so we can make these strong ordering assumptions 589 * that would otherwise be unwise if we were racing in real time. 590 */ 591 if (__predict_false(in_panic)) { 592 if (qpair->cq_head == qpair->num_entries) { 593 /* 594 * Here we know that we need to zero cq_head and then negate 595 * the phase, which hasn't been assigned if cq_head isn't 596 * zero due to the atomic_store_rel. 597 */ 598 qpair->cq_head = 0; 599 qpair->phase = !qpair->phase; 600 } else if (qpair->cq_head == 0) { 601 /* 602 * In this case, we know that the assignment at 2 603 * happened below, but we don't know if it 3 happened or 604 * not. To do this, we look at the last completion 605 * entry and set the phase to the opposite phase 606 * that it has. This gets us back in sync 607 */ 608 cpl = qpair->cpl[qpair->num_entries - 1]; 609 nvme_completion_swapbytes(&cpl); 610 qpair->phase = !NVME_STATUS_GET_P(cpl.status); 611 } 612 } 613 614 while (1) { 615 uint16_t status; 616 617 /* 618 * We need to do this dance to avoid a race between the host and 619 * the device where the device overtakes the host while the host 620 * is reading this record, leaving the status field 'new' and 621 * the sqhd and cid fields potentially stale. If the phase 622 * doesn't match, that means status hasn't yet been updated and 623 * we'll get any pending changes next time. It also means that 624 * the phase must be the same the second time. We have to sync 625 * before reading to ensure any bouncing completes. 626 */ 627 status = le16toh(qpair->cpl[qpair->cq_head].status); 628 if (NVME_STATUS_GET_P(status) != qpair->phase) 629 break; 630 631 bus_dmamap_sync(qpair->dma_tag, qpair->queuemem_map, 632 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 633 cpl = qpair->cpl[qpair->cq_head]; 634 nvme_completion_swapbytes(&cpl); 635 636 KASSERT( 637 NVME_STATUS_GET_P(status) == NVME_STATUS_GET_P(cpl.status), 638 ("Phase unexpectedly inconsistent")); 639 640 if (cpl.cid < qpair->num_trackers) 641 tr = qpair->act_tr[cpl.cid]; 642 else 643 tr = NULL; 644 645 done = true; 646 if (tr != NULL) { 647 nvme_qpair_complete_tracker(tr, &cpl, ERROR_PRINT_ALL); 648 qpair->sq_head = cpl.sqhd; 649 } else if (!in_panic) { 650 /* 651 * A missing tracker is normally an error. However, a 652 * panic can stop the CPU this routine is running on 653 * after completing an I/O but before updating 654 * qpair->cq_head at 1 below. Later, we re-enter this 655 * routine to poll I/O associated with the kernel 656 * dump. We find that the tr has been set to null before 657 * calling the completion routine. If it hasn't 658 * completed (or it triggers a panic), then '1' below 659 * won't have updated cq_head. Rather than panic again, 660 * ignore this condition because it's not unexpected. 661 */ 662 nvme_printf(qpair->ctrlr, 663 "cpl (cid = %u) does not map to outstanding cmd\n", 664 cpl.cid); 665 nvme_qpair_print_completion(qpair, 666 &qpair->cpl[qpair->cq_head]); 667 KASSERT(0, ("received completion for unknown cmd")); 668 } 669 670 /* 671 * There's a number of races with the following (see above) when 672 * the system panics. We compensate for each one of them by 673 * using the atomic store to force strong ordering (at least when 674 * viewed in the aftermath of a panic). 675 */ 676 if (++qpair->cq_head == qpair->num_entries) { /* 1 */ 677 atomic_store_rel_int(&qpair->cq_head, 0); /* 2 */ 678 qpair->phase = !qpair->phase; /* 3 */ 679 } 680 } 681 682 if (done) { 683 bus_space_write_4(qpair->ctrlr->bus_tag, qpair->ctrlr->bus_handle, 684 qpair->cq_hdbl_off, qpair->cq_head); 685 } 686 687 return (done); 688 } 689 690 bool 691 nvme_qpair_process_completions(struct nvme_qpair *qpair) 692 { 693 bool done = false; 694 695 /* 696 * Interlock with reset / recovery code. This is an usually uncontended 697 * to make sure that we drain out of the ISRs before we reset the card 698 * and to prevent races with the recovery process called from a timeout 699 * context. 700 */ 701 mtx_lock(&qpair->recovery); 702 703 if (__predict_true(qpair->recovery_state == RECOVERY_NONE)) 704 done = _nvme_qpair_process_completions(qpair); 705 else 706 qpair->num_recovery_nolock++; // XXX likely need to rename 707 708 mtx_unlock(&qpair->recovery); 709 710 return (done); 711 } 712 713 static void 714 nvme_qpair_msi_handler(void *arg) 715 { 716 struct nvme_qpair *qpair = arg; 717 718 nvme_qpair_process_completions(qpair); 719 } 720 721 int 722 nvme_qpair_construct(struct nvme_qpair *qpair, 723 uint32_t num_entries, uint32_t num_trackers, 724 struct nvme_controller *ctrlr) 725 { 726 struct nvme_tracker *tr; 727 size_t cmdsz, cplsz, prpsz, allocsz, prpmemsz; 728 uint64_t queuemem_phys, prpmem_phys, list_phys; 729 uint8_t *queuemem, *prpmem, *prp_list; 730 int i, err; 731 732 qpair->vector = ctrlr->msi_count > 1 ? qpair->id : 0; 733 qpair->num_entries = num_entries; 734 qpair->num_trackers = num_trackers; 735 qpair->ctrlr = ctrlr; 736 737 mtx_init(&qpair->lock, "nvme qpair lock", NULL, MTX_DEF); 738 mtx_init(&qpair->recovery, "nvme qpair recovery", NULL, MTX_DEF); 739 740 callout_init_mtx(&qpair->timer, &qpair->recovery, 0); 741 qpair->timer_armed = false; 742 qpair->recovery_state = RECOVERY_WAITING; 743 744 /* Note: NVMe PRP format is restricted to 4-byte alignment. */ 745 err = bus_dma_tag_create(bus_get_dma_tag(ctrlr->dev), 746 4, ctrlr->page_size, BUS_SPACE_MAXADDR, 747 BUS_SPACE_MAXADDR, NULL, NULL, ctrlr->max_xfer_size, 748 howmany(ctrlr->max_xfer_size, ctrlr->page_size) + 1, 749 ctrlr->page_size, 0, 750 NULL, NULL, &qpair->dma_tag_payload); 751 if (err != 0) { 752 nvme_printf(ctrlr, "payload tag create failed %d\n", err); 753 goto out; 754 } 755 756 /* 757 * Each component must be page aligned, and individual PRP lists 758 * cannot cross a page boundary. 759 */ 760 cmdsz = qpair->num_entries * sizeof(struct nvme_command); 761 cmdsz = roundup2(cmdsz, ctrlr->page_size); 762 cplsz = qpair->num_entries * sizeof(struct nvme_completion); 763 cplsz = roundup2(cplsz, ctrlr->page_size); 764 /* 765 * For commands requiring more than 2 PRP entries, one PRP will be 766 * embedded in the command (prp1), and the rest of the PRP entries 767 * will be in a list pointed to by the command (prp2). 768 */ 769 prpsz = sizeof(uint64_t) * 770 howmany(ctrlr->max_xfer_size, ctrlr->page_size); 771 prpmemsz = qpair->num_trackers * prpsz; 772 allocsz = cmdsz + cplsz + prpmemsz; 773 774 err = bus_dma_tag_create(bus_get_dma_tag(ctrlr->dev), 775 ctrlr->page_size, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, 776 allocsz, 1, allocsz, 0, NULL, NULL, &qpair->dma_tag); 777 if (err != 0) { 778 nvme_printf(ctrlr, "tag create failed %d\n", err); 779 goto out; 780 } 781 bus_dma_tag_set_domain(qpair->dma_tag, qpair->domain); 782 783 if (bus_dmamem_alloc(qpair->dma_tag, (void **)&queuemem, 784 BUS_DMA_COHERENT | BUS_DMA_NOWAIT, &qpair->queuemem_map)) { 785 nvme_printf(ctrlr, "failed to alloc qpair memory\n"); 786 goto out; 787 } 788 789 if (bus_dmamap_load(qpair->dma_tag, qpair->queuemem_map, 790 queuemem, allocsz, nvme_single_map, &queuemem_phys, 0) != 0) { 791 nvme_printf(ctrlr, "failed to load qpair memory\n"); 792 bus_dmamem_free(qpair->dma_tag, qpair->cmd, 793 qpair->queuemem_map); 794 goto out; 795 } 796 797 qpair->num_cmds = 0; 798 qpair->num_intr_handler_calls = 0; 799 qpair->num_retries = 0; 800 qpair->num_failures = 0; 801 qpair->num_ignored = 0; 802 qpair->cmd = (struct nvme_command *)queuemem; 803 qpair->cpl = (struct nvme_completion *)(queuemem + cmdsz); 804 prpmem = (uint8_t *)(queuemem + cmdsz + cplsz); 805 qpair->cmd_bus_addr = queuemem_phys; 806 qpair->cpl_bus_addr = queuemem_phys + cmdsz; 807 prpmem_phys = queuemem_phys + cmdsz + cplsz; 808 809 /* 810 * Calcuate the stride of the doorbell register. Many emulators set this 811 * value to correspond to a cache line. However, some hardware has set 812 * it to various small values. 813 */ 814 qpair->sq_tdbl_off = nvme_mmio_offsetof(doorbell[0]) + 815 (qpair->id << (ctrlr->dstrd + 1)); 816 qpair->cq_hdbl_off = nvme_mmio_offsetof(doorbell[0]) + 817 (qpair->id << (ctrlr->dstrd + 1)) + (1 << ctrlr->dstrd); 818 819 TAILQ_INIT(&qpair->free_tr); 820 TAILQ_INIT(&qpair->outstanding_tr); 821 STAILQ_INIT(&qpair->queued_req); 822 823 list_phys = prpmem_phys; 824 prp_list = prpmem; 825 for (i = 0; i < qpair->num_trackers; i++) { 826 if (list_phys + prpsz > prpmem_phys + prpmemsz) { 827 qpair->num_trackers = i; 828 break; 829 } 830 831 /* 832 * Make sure that the PRP list for this tracker doesn't 833 * overflow to another nvme page. 834 */ 835 if (trunc_page(list_phys) != 836 trunc_page(list_phys + prpsz - 1)) { 837 list_phys = roundup2(list_phys, ctrlr->page_size); 838 prp_list = 839 (uint8_t *)roundup2((uintptr_t)prp_list, ctrlr->page_size); 840 } 841 842 tr = malloc_domainset(sizeof(*tr), M_NVME, 843 DOMAINSET_PREF(qpair->domain), M_ZERO | M_WAITOK); 844 bus_dmamap_create(qpair->dma_tag_payload, 0, 845 &tr->payload_dma_map); 846 tr->cid = i; 847 tr->qpair = qpair; 848 tr->prp = (uint64_t *)prp_list; 849 tr->prp_bus_addr = list_phys; 850 TAILQ_INSERT_HEAD(&qpair->free_tr, tr, tailq); 851 list_phys += prpsz; 852 prp_list += prpsz; 853 } 854 855 if (qpair->num_trackers == 0) { 856 nvme_printf(ctrlr, "failed to allocate enough trackers\n"); 857 goto out; 858 } 859 860 qpair->act_tr = malloc_domainset(sizeof(struct nvme_tracker *) * 861 qpair->num_entries, M_NVME, DOMAINSET_PREF(qpair->domain), 862 M_ZERO | M_WAITOK); 863 864 if (ctrlr->msi_count > 1) { 865 /* 866 * MSI-X vector resource IDs start at 1, so we add one to 867 * the queue's vector to get the corresponding rid to use. 868 */ 869 qpair->rid = qpair->vector + 1; 870 871 qpair->res = bus_alloc_resource_any(ctrlr->dev, SYS_RES_IRQ, 872 &qpair->rid, RF_ACTIVE); 873 if (qpair->res == NULL) { 874 nvme_printf(ctrlr, "unable to allocate MSI\n"); 875 goto out; 876 } 877 if (bus_setup_intr(ctrlr->dev, qpair->res, 878 INTR_TYPE_MISC | INTR_MPSAFE, NULL, 879 nvme_qpair_msi_handler, qpair, &qpair->tag) != 0) { 880 nvme_printf(ctrlr, "unable to setup MSI\n"); 881 goto out; 882 } 883 if (qpair->id == 0) { 884 bus_describe_intr(ctrlr->dev, qpair->res, qpair->tag, 885 "admin"); 886 } else { 887 bus_describe_intr(ctrlr->dev, qpair->res, qpair->tag, 888 "io%d", qpair->id - 1); 889 } 890 } 891 892 return (0); 893 894 out: 895 nvme_qpair_destroy(qpair); 896 return (ENOMEM); 897 } 898 899 static void 900 nvme_qpair_destroy(struct nvme_qpair *qpair) 901 { 902 struct nvme_tracker *tr; 903 904 mtx_lock(&qpair->recovery); 905 qpair->timer_armed = false; 906 mtx_unlock(&qpair->recovery); 907 callout_drain(&qpair->timer); 908 909 if (qpair->tag) { 910 bus_teardown_intr(qpair->ctrlr->dev, qpair->res, qpair->tag); 911 qpair->tag = NULL; 912 } 913 914 if (qpair->act_tr) { 915 free(qpair->act_tr, M_NVME); 916 qpair->act_tr = NULL; 917 } 918 919 while (!TAILQ_EMPTY(&qpair->free_tr)) { 920 tr = TAILQ_FIRST(&qpair->free_tr); 921 TAILQ_REMOVE(&qpair->free_tr, tr, tailq); 922 bus_dmamap_destroy(qpair->dma_tag_payload, 923 tr->payload_dma_map); 924 free(tr, M_NVME); 925 } 926 927 if (qpair->cmd != NULL) { 928 bus_dmamap_unload(qpair->dma_tag, qpair->queuemem_map); 929 bus_dmamem_free(qpair->dma_tag, qpair->cmd, 930 qpair->queuemem_map); 931 qpair->cmd = NULL; 932 } 933 934 if (qpair->dma_tag) { 935 bus_dma_tag_destroy(qpair->dma_tag); 936 qpair->dma_tag = NULL; 937 } 938 939 if (qpair->dma_tag_payload) { 940 bus_dma_tag_destroy(qpair->dma_tag_payload); 941 qpair->dma_tag_payload = NULL; 942 } 943 944 if (mtx_initialized(&qpair->lock)) 945 mtx_destroy(&qpair->lock); 946 if (mtx_initialized(&qpair->recovery)) 947 mtx_destroy(&qpair->recovery); 948 949 if (qpair->res) { 950 bus_release_resource(qpair->ctrlr->dev, SYS_RES_IRQ, 951 rman_get_rid(qpair->res), qpair->res); 952 qpair->res = NULL; 953 } 954 } 955 956 static void 957 nvme_admin_qpair_abort_aers(struct nvme_qpair *qpair) 958 { 959 struct nvme_tracker *tr; 960 961 /* 962 * nvme_complete_tracker must be called without the qpair lock held. It 963 * takes the lock to adjust outstanding_tr list, so make sure we don't 964 * have it yet. We need the lock to make the list traverse safe, but 965 * have to drop the lock to complete any AER. We restart the list scan 966 * when we do this to make this safe. There's interlock with the ISR so 967 * we know this tracker won't be completed twice. 968 */ 969 mtx_assert(&qpair->lock, MA_NOTOWNED); 970 971 mtx_lock(&qpair->lock); 972 tr = TAILQ_FIRST(&qpair->outstanding_tr); 973 while (tr != NULL) { 974 if (tr->req->cmd.opc != NVME_OPC_ASYNC_EVENT_REQUEST) { 975 tr = TAILQ_NEXT(tr, tailq); 976 continue; 977 } 978 mtx_unlock(&qpair->lock); 979 nvme_qpair_manual_complete_tracker(tr, 980 NVME_SCT_GENERIC, NVME_SC_ABORTED_SQ_DELETION, 0, 981 ERROR_PRINT_NONE); 982 mtx_lock(&qpair->lock); 983 tr = TAILQ_FIRST(&qpair->outstanding_tr); 984 } 985 mtx_unlock(&qpair->lock); 986 } 987 988 void 989 nvme_admin_qpair_destroy(struct nvme_qpair *qpair) 990 { 991 mtx_assert(&qpair->lock, MA_NOTOWNED); 992 993 nvme_admin_qpair_abort_aers(qpair); 994 nvme_qpair_destroy(qpair); 995 } 996 997 void 998 nvme_io_qpair_destroy(struct nvme_qpair *qpair) 999 { 1000 1001 nvme_qpair_destroy(qpair); 1002 } 1003 1004 static void 1005 nvme_abort_complete(void *arg, const struct nvme_completion *status) 1006 { 1007 struct nvme_tracker *tr = arg; 1008 1009 /* 1010 * If cdw0 == 1, the controller was not able to abort the command 1011 * we requested. We still need to check the active tracker array, 1012 * to cover race where I/O timed out at same time controller was 1013 * completing the I/O. 1014 */ 1015 if (status->cdw0 == 1 && tr->qpair->act_tr[tr->cid] != NULL) { 1016 /* 1017 * An I/O has timed out, and the controller was unable to 1018 * abort it for some reason. Construct a fake completion 1019 * status, and then complete the I/O's tracker manually. 1020 */ 1021 nvme_printf(tr->qpair->ctrlr, 1022 "abort command failed, aborting command manually\n"); 1023 nvme_qpair_manual_complete_tracker(tr, 1024 NVME_SCT_GENERIC, NVME_SC_ABORTED_BY_REQUEST, 0, ERROR_PRINT_ALL); 1025 } 1026 } 1027 1028 static void 1029 nvme_qpair_timeout(void *arg) 1030 { 1031 struct nvme_qpair *qpair = arg; 1032 struct nvme_controller *ctrlr = qpair->ctrlr; 1033 struct nvme_tracker *tr; 1034 sbintime_t now; 1035 bool idle = false; 1036 bool needs_reset; 1037 uint32_t csts; 1038 uint8_t cfs; 1039 1040 mtx_assert(&qpair->recovery, MA_OWNED); 1041 1042 /* 1043 * If the controller is failed, then stop polling. This ensures that any 1044 * failure processing that races with the qpair timeout will fail 1045 * safely. 1046 */ 1047 if (qpair->ctrlr->is_failed) { 1048 nvme_printf(qpair->ctrlr, 1049 "Failed controller, stopping watchdog timeout.\n"); 1050 qpair->timer_armed = false; 1051 return; 1052 } 1053 1054 /* 1055 * Shutdown condition: We set qpair->timer_armed to false in 1056 * nvme_qpair_destroy before calling callout_drain. When we call that, 1057 * this routine might get called one last time. Exit w/o setting a 1058 * timeout. None of the watchdog stuff needs to be done since we're 1059 * destroying the qpair. 1060 */ 1061 if (!qpair->timer_armed) { 1062 nvme_printf(qpair->ctrlr, 1063 "Timeout fired during nvme_qpair_destroy\n"); 1064 return; 1065 } 1066 1067 switch (qpair->recovery_state) { 1068 case RECOVERY_NONE: 1069 /* 1070 * Read csts to get value of cfs - controller fatal status. If 1071 * we are in the hot-plug or controller failed status proceed 1072 * directly to reset. We also bail early if the status reads all 1073 * 1's or the control fatal status bit is now 1. The latter is 1074 * always true when the former is true, but not vice versa. The 1075 * intent of the code is that if the card is gone (all 1's) or 1076 * we've failed, then try to do a reset (which someitmes 1077 * unwedges a card reading all 1's that's not gone away, but 1078 * usually doesn't). 1079 */ 1080 csts = nvme_mmio_read_4(ctrlr, csts); 1081 cfs = NVMEV(NVME_CSTS_REG_CFS, csts); 1082 if (csts == NVME_GONE || cfs == 1) 1083 goto do_reset; 1084 1085 /* 1086 * Process completions. We already have the recovery lock, so 1087 * call the locked version. 1088 */ 1089 _nvme_qpair_process_completions(qpair); 1090 1091 /* 1092 * Check to see if we need to timeout any commands. If we do, then 1093 * we also enter a recovery phase. 1094 */ 1095 now = getsbinuptime(); 1096 needs_reset = false; 1097 idle = true; 1098 mtx_lock(&qpair->lock); 1099 TAILQ_FOREACH(tr, &qpair->outstanding_tr, tailq) { 1100 /* 1101 * Skip async commands, they are posted to the card for 1102 * an indefinite amount of time and have no deadline. 1103 */ 1104 if (tr->deadline == SBT_MAX) 1105 continue; 1106 if (now > tr->deadline) { 1107 if (tr->req->cb_fn != nvme_abort_complete && 1108 ctrlr->enable_aborts) { 1109 /* 1110 * This isn't an abort command, ask 1111 * for a hardware abort. 1112 */ 1113 nvme_ctrlr_cmd_abort(ctrlr, tr->cid, 1114 qpair->id, nvme_abort_complete, tr); 1115 } else { 1116 /* 1117 * Otherwise we have a live command in 1118 * the card (either one we couldn't 1119 * abort, or aborts weren't enabled). 1120 * The only safe way to proceed is to do 1121 * a reset. 1122 */ 1123 needs_reset = true; 1124 } 1125 } else { 1126 idle = false; 1127 } 1128 } 1129 mtx_unlock(&qpair->lock); 1130 if (!needs_reset) 1131 break; 1132 1133 /* 1134 * We've had a command timeout that we weren't able to abort 1135 * 1136 * If we get here due to a possible surprise hot-unplug event, 1137 * then we let nvme_ctrlr_reset confirm and fail the 1138 * controller. 1139 */ 1140 do_reset: 1141 nvme_printf(ctrlr, "Resetting controller due to a timeout%s.\n", 1142 (csts == 0xffffffff) ? " and possible hot unplug" : 1143 (cfs ? " and fatal error status" : "")); 1144 qpair->recovery_state = RECOVERY_WAITING; 1145 nvme_ctrlr_reset(ctrlr); 1146 idle = false; /* We want to keep polling */ 1147 break; 1148 case RECOVERY_WAITING: 1149 /* 1150 * These messages aren't interesting while we're suspended. We 1151 * put the queues into waiting state while 1152 * suspending. Suspending takes a while, so we'll see these 1153 * during that time and they aren't diagnostic. At other times, 1154 * they indicate a problem that's worth complaining about. 1155 */ 1156 if (!device_is_suspended(ctrlr->dev)) 1157 nvme_printf(ctrlr, "Waiting for reset to complete\n"); 1158 idle = false; /* We want to keep polling */ 1159 break; 1160 } 1161 1162 /* 1163 * Rearm the timeout. 1164 */ 1165 if (!idle) { 1166 callout_schedule_sbt(&qpair->timer, SBT_1S / 2, SBT_1S / 2, 0); 1167 } else { 1168 qpair->timer_armed = false; 1169 } 1170 } 1171 1172 /* 1173 * Submit the tracker to the hardware. Must already be in the 1174 * outstanding queue when called. 1175 */ 1176 void 1177 nvme_qpair_submit_tracker(struct nvme_qpair *qpair, struct nvme_tracker *tr) 1178 { 1179 struct nvme_request *req; 1180 struct nvme_controller *ctrlr; 1181 int timeout; 1182 1183 mtx_assert(&qpair->lock, MA_OWNED); 1184 1185 req = tr->req; 1186 req->cmd.cid = tr->cid; 1187 qpair->act_tr[tr->cid] = tr; 1188 ctrlr = qpair->ctrlr; 1189 1190 if (req->timeout) { 1191 if (req->cb_fn == nvme_completion_poll_cb) 1192 timeout = 1; 1193 else if (qpair->id == 0) 1194 timeout = ctrlr->admin_timeout_period; 1195 else 1196 timeout = ctrlr->timeout_period; 1197 tr->deadline = getsbinuptime() + timeout * SBT_1S; 1198 if (!qpair->timer_armed) { 1199 qpair->timer_armed = true; 1200 callout_reset_sbt_on(&qpair->timer, SBT_1S / 2, SBT_1S / 2, 1201 nvme_qpair_timeout, qpair, qpair->cpu, 0); 1202 } 1203 } else 1204 tr->deadline = SBT_MAX; 1205 1206 /* Copy the command from the tracker to the submission queue. */ 1207 memcpy(&qpair->cmd[qpair->sq_tail], &req->cmd, sizeof(req->cmd)); 1208 1209 if (++qpair->sq_tail == qpair->num_entries) 1210 qpair->sq_tail = 0; 1211 1212 bus_dmamap_sync(qpair->dma_tag, qpair->queuemem_map, 1213 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1214 bus_space_write_4(ctrlr->bus_tag, ctrlr->bus_handle, 1215 qpair->sq_tdbl_off, qpair->sq_tail); 1216 qpair->num_cmds++; 1217 } 1218 1219 static void 1220 nvme_payload_map(void *arg, bus_dma_segment_t *seg, int nseg, int error) 1221 { 1222 struct nvme_tracker *tr = arg; 1223 uint32_t cur_nseg; 1224 1225 /* 1226 * If the mapping operation failed, return immediately. The caller 1227 * is responsible for detecting the error status and failing the 1228 * tracker manually. 1229 */ 1230 if (error != 0) { 1231 nvme_printf(tr->qpair->ctrlr, 1232 "nvme_payload_map err %d\n", error); 1233 return; 1234 } 1235 1236 /* 1237 * Note that we specified ctrlr->page_size for alignment and max 1238 * segment size when creating the bus dma tags. So here we can safely 1239 * just transfer each segment to its associated PRP entry. 1240 */ 1241 tr->req->cmd.prp1 = htole64(seg[0].ds_addr); 1242 1243 if (nseg == 2) { 1244 tr->req->cmd.prp2 = htole64(seg[1].ds_addr); 1245 } else if (nseg > 2) { 1246 cur_nseg = 1; 1247 tr->req->cmd.prp2 = htole64((uint64_t)tr->prp_bus_addr); 1248 while (cur_nseg < nseg) { 1249 tr->prp[cur_nseg-1] = 1250 htole64((uint64_t)seg[cur_nseg].ds_addr); 1251 cur_nseg++; 1252 } 1253 } else { 1254 /* 1255 * prp2 should not be used by the controller 1256 * since there is only one segment, but set 1257 * to 0 just to be safe. 1258 */ 1259 tr->req->cmd.prp2 = 0; 1260 } 1261 1262 bus_dmamap_sync(tr->qpair->dma_tag_payload, tr->payload_dma_map, 1263 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1264 nvme_qpair_submit_tracker(tr->qpair, tr); 1265 } 1266 1267 static void 1268 _nvme_qpair_submit_request(struct nvme_qpair *qpair, struct nvme_request *req) 1269 { 1270 struct nvme_tracker *tr; 1271 int err = 0; 1272 1273 mtx_assert(&qpair->lock, MA_OWNED); 1274 1275 tr = TAILQ_FIRST(&qpair->free_tr); 1276 req->qpair = qpair; 1277 1278 /* 1279 * The controller has failed, so fail the request. Note, that this races 1280 * the recovery / timeout code. Since we hold the qpair lock, we know 1281 * it's safe to fail directly. is_failed is set when we fail the controller. 1282 * It is only ever reset in the ioctl reset controller path, which is safe 1283 * to race (for failed controllers, we make no guarantees about bringing 1284 * it out of failed state relative to other commands). 1285 */ 1286 if (qpair->ctrlr->is_failed) { 1287 nvme_qpair_manual_complete_request(qpair, req, 1288 NVME_SCT_GENERIC, NVME_SC_ABORTED_BY_REQUEST, 1, 1289 ERROR_PRINT_NONE); 1290 return; 1291 } 1292 1293 /* 1294 * No tracker is available, or the qpair is disabled due to an 1295 * in-progress controller-level reset. If we lose the race with 1296 * recovery_state, then we may add an extra request to the queue which 1297 * will be resubmitted later. We only set recovery_state to NONE with 1298 * qpair->lock also held, so if we observe that the state is not NONE, 1299 * we know it won't transition back to NONE without retrying queued 1300 * request. 1301 */ 1302 if (tr == NULL || qpair->recovery_state != RECOVERY_NONE) { 1303 STAILQ_INSERT_TAIL(&qpair->queued_req, req, stailq); 1304 return; 1305 } 1306 1307 TAILQ_REMOVE(&qpair->free_tr, tr, tailq); 1308 TAILQ_INSERT_TAIL(&qpair->outstanding_tr, tr, tailq); 1309 tr->deadline = SBT_MAX; 1310 tr->req = req; 1311 1312 if (!req->payload_valid) { 1313 nvme_qpair_submit_tracker(tr->qpair, tr); 1314 return; 1315 } 1316 1317 /* 1318 * tr->deadline updating when nvme_payload_map calls 1319 * nvme_qpair_submit_tracker (we call it above directly 1320 * when there's no map to load). 1321 */ 1322 err = bus_dmamap_load_mem(tr->qpair->dma_tag_payload, 1323 tr->payload_dma_map, &req->payload, nvme_payload_map, tr, 0); 1324 if (err != 0) { 1325 /* 1326 * The dmamap operation failed, so we manually fail the 1327 * tracker here with DATA_TRANSFER_ERROR status. 1328 * 1329 * nvme_qpair_manual_complete_tracker must not be called 1330 * with the qpair lock held. 1331 */ 1332 nvme_printf(qpair->ctrlr, 1333 "bus_dmamap_load_mem returned 0x%x!\n", err); 1334 mtx_unlock(&qpair->lock); 1335 nvme_qpair_manual_complete_tracker(tr, NVME_SCT_GENERIC, 1336 NVME_SC_DATA_TRANSFER_ERROR, DO_NOT_RETRY, ERROR_PRINT_ALL); 1337 mtx_lock(&qpair->lock); 1338 } 1339 } 1340 1341 void 1342 nvme_qpair_submit_request(struct nvme_qpair *qpair, struct nvme_request *req) 1343 { 1344 1345 mtx_lock(&qpair->lock); 1346 _nvme_qpair_submit_request(qpair, req); 1347 mtx_unlock(&qpair->lock); 1348 } 1349 1350 static void 1351 nvme_qpair_enable(struct nvme_qpair *qpair) 1352 { 1353 if (mtx_initialized(&qpair->recovery)) 1354 mtx_assert(&qpair->recovery, MA_OWNED); 1355 if (mtx_initialized(&qpair->lock)) 1356 mtx_assert(&qpair->lock, MA_OWNED); 1357 KASSERT(!qpair->ctrlr->is_failed, 1358 ("Enabling a failed qpair\n")); 1359 1360 qpair->recovery_state = RECOVERY_NONE; 1361 } 1362 1363 void 1364 nvme_qpair_reset(struct nvme_qpair *qpair) 1365 { 1366 1367 qpair->sq_head = qpair->sq_tail = qpair->cq_head = 0; 1368 1369 /* 1370 * First time through the completion queue, HW will set phase 1371 * bit on completions to 1. So set this to 1 here, indicating 1372 * we're looking for a 1 to know which entries have completed. 1373 * we'll toggle the bit each time when the completion queue 1374 * rolls over. 1375 */ 1376 qpair->phase = 1; 1377 1378 memset(qpair->cmd, 0, 1379 qpair->num_entries * sizeof(struct nvme_command)); 1380 memset(qpair->cpl, 0, 1381 qpair->num_entries * sizeof(struct nvme_completion)); 1382 } 1383 1384 void 1385 nvme_admin_qpair_enable(struct nvme_qpair *qpair) 1386 { 1387 struct nvme_tracker *tr; 1388 struct nvme_tracker *tr_temp; 1389 bool rpt; 1390 1391 /* 1392 * Manually abort each outstanding admin command. Do not retry 1393 * admin commands found here, since they will be left over from 1394 * a controller reset and its likely the context in which the 1395 * command was issued no longer applies. 1396 */ 1397 rpt = !TAILQ_EMPTY(&qpair->outstanding_tr); 1398 if (rpt) 1399 nvme_printf(qpair->ctrlr, 1400 "aborting outstanding admin command\n"); 1401 TAILQ_FOREACH_SAFE(tr, &qpair->outstanding_tr, tailq, tr_temp) { 1402 nvme_qpair_manual_complete_tracker(tr, NVME_SCT_GENERIC, 1403 NVME_SC_ABORTED_BY_REQUEST, DO_NOT_RETRY, ERROR_PRINT_ALL); 1404 } 1405 if (rpt) 1406 nvme_printf(qpair->ctrlr, 1407 "done aborting outstanding admin\n"); 1408 1409 mtx_lock(&qpair->recovery); 1410 mtx_lock(&qpair->lock); 1411 nvme_qpair_enable(qpair); 1412 mtx_unlock(&qpair->lock); 1413 mtx_unlock(&qpair->recovery); 1414 } 1415 1416 void 1417 nvme_io_qpair_enable(struct nvme_qpair *qpair) 1418 { 1419 STAILQ_HEAD(, nvme_request) temp; 1420 struct nvme_tracker *tr; 1421 struct nvme_tracker *tr_temp; 1422 struct nvme_request *req; 1423 bool report; 1424 1425 /* 1426 * Manually abort each outstanding I/O. This normally results in a 1427 * retry, unless the retry count on the associated request has 1428 * reached its limit. 1429 */ 1430 report = !TAILQ_EMPTY(&qpair->outstanding_tr); 1431 if (report) 1432 nvme_printf(qpair->ctrlr, "aborting outstanding i/o\n"); 1433 TAILQ_FOREACH_SAFE(tr, &qpair->outstanding_tr, tailq, tr_temp) { 1434 nvme_qpair_manual_complete_tracker(tr, NVME_SCT_GENERIC, 1435 NVME_SC_ABORTED_BY_REQUEST, 0, ERROR_PRINT_NO_RETRY); 1436 } 1437 if (report) 1438 nvme_printf(qpair->ctrlr, "done aborting outstanding i/o\n"); 1439 1440 mtx_lock(&qpair->recovery); 1441 mtx_lock(&qpair->lock); 1442 nvme_qpair_enable(qpair); 1443 1444 STAILQ_INIT(&temp); 1445 STAILQ_SWAP(&qpair->queued_req, &temp, nvme_request); 1446 1447 report = !STAILQ_EMPTY(&temp); 1448 if (report) 1449 nvme_printf(qpair->ctrlr, "resubmitting queued i/o\n"); 1450 while (!STAILQ_EMPTY(&temp)) { 1451 req = STAILQ_FIRST(&temp); 1452 STAILQ_REMOVE_HEAD(&temp, stailq); 1453 nvme_qpair_print_command(qpair, &req->cmd); 1454 _nvme_qpair_submit_request(qpair, req); 1455 } 1456 if (report) 1457 nvme_printf(qpair->ctrlr, "done resubmitting i/o\n"); 1458 1459 mtx_unlock(&qpair->lock); 1460 mtx_unlock(&qpair->recovery); 1461 } 1462 1463 static void 1464 nvme_qpair_disable(struct nvme_qpair *qpair) 1465 { 1466 struct nvme_tracker *tr, *tr_temp; 1467 1468 if (mtx_initialized(&qpair->recovery)) 1469 mtx_assert(&qpair->recovery, MA_OWNED); 1470 if (mtx_initialized(&qpair->lock)) 1471 mtx_assert(&qpair->lock, MA_OWNED); 1472 1473 qpair->recovery_state = RECOVERY_WAITING; 1474 TAILQ_FOREACH_SAFE(tr, &qpair->outstanding_tr, tailq, tr_temp) { 1475 tr->deadline = SBT_MAX; 1476 } 1477 } 1478 1479 void 1480 nvme_admin_qpair_disable(struct nvme_qpair *qpair) 1481 { 1482 mtx_lock(&qpair->recovery); 1483 1484 mtx_lock(&qpair->lock); 1485 nvme_qpair_disable(qpair); 1486 mtx_unlock(&qpair->lock); 1487 1488 nvme_admin_qpair_abort_aers(qpair); 1489 1490 mtx_unlock(&qpair->recovery); 1491 } 1492 1493 void 1494 nvme_io_qpair_disable(struct nvme_qpair *qpair) 1495 { 1496 mtx_lock(&qpair->recovery); 1497 mtx_lock(&qpair->lock); 1498 1499 nvme_qpair_disable(qpair); 1500 1501 mtx_unlock(&qpair->lock); 1502 mtx_unlock(&qpair->recovery); 1503 } 1504 1505 void 1506 nvme_qpair_fail(struct nvme_qpair *qpair) 1507 { 1508 struct nvme_tracker *tr; 1509 struct nvme_request *req; 1510 1511 if (!mtx_initialized(&qpair->lock)) 1512 return; 1513 1514 mtx_lock(&qpair->lock); 1515 1516 if (!STAILQ_EMPTY(&qpair->queued_req)) { 1517 nvme_printf(qpair->ctrlr, "failing queued i/o\n"); 1518 } 1519 while (!STAILQ_EMPTY(&qpair->queued_req)) { 1520 req = STAILQ_FIRST(&qpair->queued_req); 1521 STAILQ_REMOVE_HEAD(&qpair->queued_req, stailq); 1522 mtx_unlock(&qpair->lock); 1523 nvme_qpair_manual_complete_request(qpair, req, NVME_SCT_GENERIC, 1524 NVME_SC_ABORTED_BY_REQUEST, 1, ERROR_PRINT_ALL); 1525 mtx_lock(&qpair->lock); 1526 } 1527 1528 if (!TAILQ_EMPTY(&qpair->outstanding_tr)) { 1529 nvme_printf(qpair->ctrlr, "failing outstanding i/o\n"); 1530 } 1531 /* Manually abort each outstanding I/O. */ 1532 while (!TAILQ_EMPTY(&qpair->outstanding_tr)) { 1533 tr = TAILQ_FIRST(&qpair->outstanding_tr); 1534 /* 1535 * Do not remove the tracker. The abort_tracker path will 1536 * do that for us. 1537 */ 1538 mtx_unlock(&qpair->lock); 1539 nvme_qpair_manual_complete_tracker(tr, NVME_SCT_GENERIC, 1540 NVME_SC_ABORTED_BY_REQUEST, DO_NOT_RETRY, ERROR_PRINT_ALL); 1541 mtx_lock(&qpair->lock); 1542 } 1543 1544 mtx_unlock(&qpair->lock); 1545 } 1546