xref: /freebsd/sys/dev/nvme/nvme_qpair.c (revision 4f8f43b06ed07e96a250855488cc531799d5b78f)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (C) 2012-2014 Intel Corporation
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 #include <sys/cdefs.h>
30 #include <sys/param.h>
31 #include <sys/bus.h>
32 #include <sys/conf.h>
33 #include <sys/domainset.h>
34 #include <sys/proc.h>
35 
36 #include <dev/pci/pcivar.h>
37 
38 #include "nvme_private.h"
39 
40 typedef enum error_print { ERROR_PRINT_NONE, ERROR_PRINT_NO_RETRY, ERROR_PRINT_ALL } error_print_t;
41 #define DO_NOT_RETRY	1
42 
43 static void	_nvme_qpair_submit_request(struct nvme_qpair *qpair,
44 					   struct nvme_request *req);
45 static void	nvme_qpair_destroy(struct nvme_qpair *qpair);
46 
47 #define DEFAULT_INDEX	256
48 #define DEFAULT_ENTRY(x)	[DEFAULT_INDEX] = x
49 #define OPC_ENTRY(x)		[NVME_OPC_ ## x] = #x
50 
51 static const char *admin_opcode[DEFAULT_INDEX + 1] = {
52 	OPC_ENTRY(DELETE_IO_SQ),
53 	OPC_ENTRY(CREATE_IO_SQ),
54 	OPC_ENTRY(GET_LOG_PAGE),
55 	OPC_ENTRY(DELETE_IO_CQ),
56 	OPC_ENTRY(CREATE_IO_CQ),
57 	OPC_ENTRY(IDENTIFY),
58 	OPC_ENTRY(ABORT),
59 	OPC_ENTRY(SET_FEATURES),
60 	OPC_ENTRY(GET_FEATURES),
61 	OPC_ENTRY(ASYNC_EVENT_REQUEST),
62 	OPC_ENTRY(NAMESPACE_MANAGEMENT),
63 	OPC_ENTRY(FIRMWARE_ACTIVATE),
64 	OPC_ENTRY(FIRMWARE_IMAGE_DOWNLOAD),
65 	OPC_ENTRY(DEVICE_SELF_TEST),
66 	OPC_ENTRY(NAMESPACE_ATTACHMENT),
67 	OPC_ENTRY(KEEP_ALIVE),
68 	OPC_ENTRY(DIRECTIVE_SEND),
69 	OPC_ENTRY(DIRECTIVE_RECEIVE),
70 	OPC_ENTRY(VIRTUALIZATION_MANAGEMENT),
71 	OPC_ENTRY(NVME_MI_SEND),
72 	OPC_ENTRY(NVME_MI_RECEIVE),
73 	OPC_ENTRY(CAPACITY_MANAGEMENT),
74 	OPC_ENTRY(LOCKDOWN),
75 	OPC_ENTRY(DOORBELL_BUFFER_CONFIG),
76 	OPC_ENTRY(FABRICS_COMMANDS),
77 	OPC_ENTRY(FORMAT_NVM),
78 	OPC_ENTRY(SECURITY_SEND),
79 	OPC_ENTRY(SECURITY_RECEIVE),
80 	OPC_ENTRY(SANITIZE),
81 	OPC_ENTRY(GET_LBA_STATUS),
82 	DEFAULT_ENTRY("ADMIN COMMAND"),
83 };
84 
85 static const char *io_opcode[DEFAULT_INDEX + 1] = {
86 	OPC_ENTRY(FLUSH),
87 	OPC_ENTRY(WRITE),
88 	OPC_ENTRY(READ),
89 	OPC_ENTRY(WRITE_UNCORRECTABLE),
90 	OPC_ENTRY(COMPARE),
91 	OPC_ENTRY(WRITE_ZEROES),
92 	OPC_ENTRY(DATASET_MANAGEMENT),
93 	OPC_ENTRY(VERIFY),
94 	OPC_ENTRY(RESERVATION_REGISTER),
95 	OPC_ENTRY(RESERVATION_REPORT),
96 	OPC_ENTRY(RESERVATION_ACQUIRE),
97 	OPC_ENTRY(RESERVATION_RELEASE),
98 	OPC_ENTRY(COPY),
99 	DEFAULT_ENTRY("IO COMMAND"),
100 };
101 
102 static const char *
103 get_opcode_string(const char *op[DEFAULT_INDEX + 1], uint16_t opc)
104 {
105 	const char *nm = opc < DEFAULT_INDEX ? op[opc] : op[DEFAULT_INDEX];
106 
107 	return (nm != NULL ? nm : op[DEFAULT_INDEX]);
108 }
109 
110 static const char *
111 get_admin_opcode_string(uint16_t opc)
112 {
113 	return (get_opcode_string(admin_opcode, opc));
114 }
115 
116 static const char *
117 get_io_opcode_string(uint16_t opc)
118 {
119 	return (get_opcode_string(io_opcode, opc));
120 }
121 
122 static void
123 nvme_admin_qpair_print_command(struct nvme_qpair *qpair,
124     struct nvme_command *cmd)
125 {
126 
127 	nvme_printf(qpair->ctrlr, "%s (%02x) sqid:%d cid:%d nsid:%x "
128 	    "cdw10:%08x cdw11:%08x\n",
129 	    get_admin_opcode_string(cmd->opc), cmd->opc, qpair->id, cmd->cid,
130 	    le32toh(cmd->nsid), le32toh(cmd->cdw10), le32toh(cmd->cdw11));
131 }
132 
133 static void
134 nvme_io_qpair_print_command(struct nvme_qpair *qpair,
135     struct nvme_command *cmd)
136 {
137 
138 	switch (cmd->opc) {
139 	case NVME_OPC_WRITE:
140 	case NVME_OPC_READ:
141 	case NVME_OPC_WRITE_UNCORRECTABLE:
142 	case NVME_OPC_COMPARE:
143 	case NVME_OPC_WRITE_ZEROES:
144 	case NVME_OPC_VERIFY:
145 		nvme_printf(qpair->ctrlr, "%s sqid:%d cid:%d nsid:%d "
146 		    "lba:%llu len:%d\n",
147 		    get_io_opcode_string(cmd->opc), qpair->id, cmd->cid, le32toh(cmd->nsid),
148 		    ((unsigned long long)le32toh(cmd->cdw11) << 32) + le32toh(cmd->cdw10),
149 		    (le32toh(cmd->cdw12) & 0xFFFF) + 1);
150 		break;
151 	case NVME_OPC_FLUSH:
152 	case NVME_OPC_DATASET_MANAGEMENT:
153 	case NVME_OPC_RESERVATION_REGISTER:
154 	case NVME_OPC_RESERVATION_REPORT:
155 	case NVME_OPC_RESERVATION_ACQUIRE:
156 	case NVME_OPC_RESERVATION_RELEASE:
157 		nvme_printf(qpair->ctrlr, "%s sqid:%d cid:%d nsid:%d\n",
158 		    get_io_opcode_string(cmd->opc), qpair->id, cmd->cid, le32toh(cmd->nsid));
159 		break;
160 	default:
161 		nvme_printf(qpair->ctrlr, "%s (%02x) sqid:%d cid:%d nsid:%d\n",
162 		    get_io_opcode_string(cmd->opc), cmd->opc, qpair->id,
163 		    cmd->cid, le32toh(cmd->nsid));
164 		break;
165 	}
166 }
167 
168 void
169 nvme_qpair_print_command(struct nvme_qpair *qpair, struct nvme_command *cmd)
170 {
171 	if (qpair->id == 0)
172 		nvme_admin_qpair_print_command(qpair, cmd);
173 	else
174 		nvme_io_qpair_print_command(qpair, cmd);
175 	if (nvme_verbose_cmd_dump) {
176 		nvme_printf(qpair->ctrlr,
177 		    "nsid:%#x rsvd2:%#x rsvd3:%#x mptr:%#jx prp1:%#jx prp2:%#jx\n",
178 		    cmd->nsid, cmd->rsvd2, cmd->rsvd3, (uintmax_t)cmd->mptr,
179 		    (uintmax_t)cmd->prp1, (uintmax_t)cmd->prp2);
180 		nvme_printf(qpair->ctrlr,
181 		    "cdw10: %#x cdw11:%#x cdw12:%#x cdw13:%#x cdw14:%#x cdw15:%#x\n",
182 		    cmd->cdw10, cmd->cdw11, cmd->cdw12, cmd->cdw13, cmd->cdw14,
183 		    cmd->cdw15);
184 	}
185 }
186 
187 struct nvme_status_string {
188 	uint16_t	sc;
189 	const char *	str;
190 };
191 
192 static struct nvme_status_string generic_status[] = {
193 	{ NVME_SC_SUCCESS, "SUCCESS" },
194 	{ NVME_SC_INVALID_OPCODE, "INVALID OPCODE" },
195 	{ NVME_SC_INVALID_FIELD, "INVALID_FIELD" },
196 	{ NVME_SC_COMMAND_ID_CONFLICT, "COMMAND ID CONFLICT" },
197 	{ NVME_SC_DATA_TRANSFER_ERROR, "DATA TRANSFER ERROR" },
198 	{ NVME_SC_ABORTED_POWER_LOSS, "ABORTED - POWER LOSS" },
199 	{ NVME_SC_INTERNAL_DEVICE_ERROR, "INTERNAL DEVICE ERROR" },
200 	{ NVME_SC_ABORTED_BY_REQUEST, "ABORTED - BY REQUEST" },
201 	{ NVME_SC_ABORTED_SQ_DELETION, "ABORTED - SQ DELETION" },
202 	{ NVME_SC_ABORTED_FAILED_FUSED, "ABORTED - FAILED FUSED" },
203 	{ NVME_SC_ABORTED_MISSING_FUSED, "ABORTED - MISSING FUSED" },
204 	{ NVME_SC_INVALID_NAMESPACE_OR_FORMAT, "INVALID NAMESPACE OR FORMAT" },
205 	{ NVME_SC_COMMAND_SEQUENCE_ERROR, "COMMAND SEQUENCE ERROR" },
206 	{ NVME_SC_INVALID_SGL_SEGMENT_DESCR, "INVALID SGL SEGMENT DESCRIPTOR" },
207 	{ NVME_SC_INVALID_NUMBER_OF_SGL_DESCR, "INVALID NUMBER OF SGL DESCRIPTORS" },
208 	{ NVME_SC_DATA_SGL_LENGTH_INVALID, "DATA SGL LENGTH INVALID" },
209 	{ NVME_SC_METADATA_SGL_LENGTH_INVALID, "METADATA SGL LENGTH INVALID" },
210 	{ NVME_SC_SGL_DESCRIPTOR_TYPE_INVALID, "SGL DESCRIPTOR TYPE INVALID" },
211 	{ NVME_SC_INVALID_USE_OF_CMB, "INVALID USE OF CONTROLLER MEMORY BUFFER" },
212 	{ NVME_SC_PRP_OFFET_INVALID, "PRP OFFET INVALID" },
213 	{ NVME_SC_ATOMIC_WRITE_UNIT_EXCEEDED, "ATOMIC WRITE UNIT EXCEEDED" },
214 	{ NVME_SC_OPERATION_DENIED, "OPERATION DENIED" },
215 	{ NVME_SC_SGL_OFFSET_INVALID, "SGL OFFSET INVALID" },
216 	{ NVME_SC_HOST_ID_INCONSISTENT_FORMAT, "HOST IDENTIFIER INCONSISTENT FORMAT" },
217 	{ NVME_SC_KEEP_ALIVE_TIMEOUT_EXPIRED, "KEEP ALIVE TIMEOUT EXPIRED" },
218 	{ NVME_SC_KEEP_ALIVE_TIMEOUT_INVALID, "KEEP ALIVE TIMEOUT INVALID" },
219 	{ NVME_SC_ABORTED_DUE_TO_PREEMPT, "COMMAND ABORTED DUE TO PREEMPT AND ABORT" },
220 	{ NVME_SC_SANITIZE_FAILED, "SANITIZE FAILED" },
221 	{ NVME_SC_SANITIZE_IN_PROGRESS, "SANITIZE IN PROGRESS" },
222 	{ NVME_SC_SGL_DATA_BLOCK_GRAN_INVALID, "SGL_DATA_BLOCK_GRANULARITY_INVALID" },
223 	{ NVME_SC_NOT_SUPPORTED_IN_CMB, "COMMAND NOT SUPPORTED FOR QUEUE IN CMB" },
224 	{ NVME_SC_NAMESPACE_IS_WRITE_PROTECTED, "NAMESPACE IS WRITE PROTECTED" },
225 	{ NVME_SC_COMMAND_INTERRUPTED, "COMMAND INTERRUPTED" },
226 	{ NVME_SC_TRANSIENT_TRANSPORT_ERROR, "TRANSIENT TRANSPORT ERROR" },
227 
228 	{ NVME_SC_LBA_OUT_OF_RANGE, "LBA OUT OF RANGE" },
229 	{ NVME_SC_CAPACITY_EXCEEDED, "CAPACITY EXCEEDED" },
230 	{ NVME_SC_NAMESPACE_NOT_READY, "NAMESPACE NOT READY" },
231 	{ NVME_SC_RESERVATION_CONFLICT, "RESERVATION CONFLICT" },
232 	{ NVME_SC_FORMAT_IN_PROGRESS, "FORMAT IN PROGRESS" },
233 	{ 0xFFFF, "GENERIC" }
234 };
235 
236 static struct nvme_status_string command_specific_status[] = {
237 	{ NVME_SC_COMPLETION_QUEUE_INVALID, "INVALID COMPLETION QUEUE" },
238 	{ NVME_SC_INVALID_QUEUE_IDENTIFIER, "INVALID QUEUE IDENTIFIER" },
239 	{ NVME_SC_MAXIMUM_QUEUE_SIZE_EXCEEDED, "MAX QUEUE SIZE EXCEEDED" },
240 	{ NVME_SC_ABORT_COMMAND_LIMIT_EXCEEDED, "ABORT CMD LIMIT EXCEEDED" },
241 	{ NVME_SC_ASYNC_EVENT_REQUEST_LIMIT_EXCEEDED, "ASYNC LIMIT EXCEEDED" },
242 	{ NVME_SC_INVALID_FIRMWARE_SLOT, "INVALID FIRMWARE SLOT" },
243 	{ NVME_SC_INVALID_FIRMWARE_IMAGE, "INVALID FIRMWARE IMAGE" },
244 	{ NVME_SC_INVALID_INTERRUPT_VECTOR, "INVALID INTERRUPT VECTOR" },
245 	{ NVME_SC_INVALID_LOG_PAGE, "INVALID LOG PAGE" },
246 	{ NVME_SC_INVALID_FORMAT, "INVALID FORMAT" },
247 	{ NVME_SC_FIRMWARE_REQUIRES_RESET, "FIRMWARE REQUIRES RESET" },
248 	{ NVME_SC_INVALID_QUEUE_DELETION, "INVALID QUEUE DELETION" },
249 	{ NVME_SC_FEATURE_NOT_SAVEABLE, "FEATURE IDENTIFIER NOT SAVEABLE" },
250 	{ NVME_SC_FEATURE_NOT_CHANGEABLE, "FEATURE NOT CHANGEABLE" },
251 	{ NVME_SC_FEATURE_NOT_NS_SPECIFIC, "FEATURE NOT NAMESPACE SPECIFIC" },
252 	{ NVME_SC_FW_ACT_REQUIRES_NVMS_RESET, "FIRMWARE ACTIVATION REQUIRES NVM SUBSYSTEM RESET" },
253 	{ NVME_SC_FW_ACT_REQUIRES_RESET, "FIRMWARE ACTIVATION REQUIRES RESET" },
254 	{ NVME_SC_FW_ACT_REQUIRES_TIME, "FIRMWARE ACTIVATION REQUIRES MAXIMUM TIME VIOLATION" },
255 	{ NVME_SC_FW_ACT_PROHIBITED, "FIRMWARE ACTIVATION PROHIBITED" },
256 	{ NVME_SC_OVERLAPPING_RANGE, "OVERLAPPING RANGE" },
257 	{ NVME_SC_NS_INSUFFICIENT_CAPACITY, "NAMESPACE INSUFFICIENT CAPACITY" },
258 	{ NVME_SC_NS_ID_UNAVAILABLE, "NAMESPACE IDENTIFIER UNAVAILABLE" },
259 	{ NVME_SC_NS_ALREADY_ATTACHED, "NAMESPACE ALREADY ATTACHED" },
260 	{ NVME_SC_NS_IS_PRIVATE, "NAMESPACE IS PRIVATE" },
261 	{ NVME_SC_NS_NOT_ATTACHED, "NS NOT ATTACHED" },
262 	{ NVME_SC_THIN_PROV_NOT_SUPPORTED, "THIN PROVISIONING NOT SUPPORTED" },
263 	{ NVME_SC_CTRLR_LIST_INVALID, "CONTROLLER LIST INVALID" },
264 	{ NVME_SC_SELF_TEST_IN_PROGRESS, "DEVICE SELF-TEST IN PROGRESS" },
265 	{ NVME_SC_BOOT_PART_WRITE_PROHIB, "BOOT PARTITION WRITE PROHIBITED" },
266 	{ NVME_SC_INVALID_CTRLR_ID, "INVALID CONTROLLER IDENTIFIER" },
267 	{ NVME_SC_INVALID_SEC_CTRLR_STATE, "INVALID SECONDARY CONTROLLER STATE" },
268 	{ NVME_SC_INVALID_NUM_OF_CTRLR_RESRC, "INVALID NUMBER OF CONTROLLER RESOURCES" },
269 	{ NVME_SC_INVALID_RESOURCE_ID, "INVALID RESOURCE IDENTIFIER" },
270 	{ NVME_SC_SANITIZE_PROHIBITED_WPMRE, "SANITIZE PROHIBITED WRITE PERSISTENT MEMORY REGION ENABLED" },
271 	{ NVME_SC_ANA_GROUP_ID_INVALID, "ANA GROUP IDENTIFIED INVALID" },
272 	{ NVME_SC_ANA_ATTACH_FAILED, "ANA ATTACH FAILED" },
273 
274 	{ NVME_SC_CONFLICTING_ATTRIBUTES, "CONFLICTING ATTRIBUTES" },
275 	{ NVME_SC_INVALID_PROTECTION_INFO, "INVALID PROTECTION INFO" },
276 	{ NVME_SC_ATTEMPTED_WRITE_TO_RO_PAGE, "WRITE TO RO PAGE" },
277 	{ 0xFFFF, "COMMAND SPECIFIC" }
278 };
279 
280 static struct nvme_status_string media_error_status[] = {
281 	{ NVME_SC_WRITE_FAULTS, "WRITE FAULTS" },
282 	{ NVME_SC_UNRECOVERED_READ_ERROR, "UNRECOVERED READ ERROR" },
283 	{ NVME_SC_GUARD_CHECK_ERROR, "GUARD CHECK ERROR" },
284 	{ NVME_SC_APPLICATION_TAG_CHECK_ERROR, "APPLICATION TAG CHECK ERROR" },
285 	{ NVME_SC_REFERENCE_TAG_CHECK_ERROR, "REFERENCE TAG CHECK ERROR" },
286 	{ NVME_SC_COMPARE_FAILURE, "COMPARE FAILURE" },
287 	{ NVME_SC_ACCESS_DENIED, "ACCESS DENIED" },
288 	{ NVME_SC_DEALLOCATED_OR_UNWRITTEN, "DEALLOCATED OR UNWRITTEN LOGICAL BLOCK" },
289 	{ 0xFFFF, "MEDIA ERROR" }
290 };
291 
292 static struct nvme_status_string path_related_status[] = {
293 	{ NVME_SC_INTERNAL_PATH_ERROR, "INTERNAL PATH ERROR" },
294 	{ NVME_SC_ASYMMETRIC_ACCESS_PERSISTENT_LOSS, "ASYMMETRIC ACCESS PERSISTENT LOSS" },
295 	{ NVME_SC_ASYMMETRIC_ACCESS_INACCESSIBLE, "ASYMMETRIC ACCESS INACCESSIBLE" },
296 	{ NVME_SC_ASYMMETRIC_ACCESS_TRANSITION, "ASYMMETRIC ACCESS TRANSITION" },
297 	{ NVME_SC_CONTROLLER_PATHING_ERROR, "CONTROLLER PATHING ERROR" },
298 	{ NVME_SC_HOST_PATHING_ERROR, "HOST PATHING ERROR" },
299 	{ NVME_SC_COMMAND_ABORTED_BY_HOST, "COMMAND ABORTED BY HOST" },
300 	{ 0xFFFF, "PATH RELATED" },
301 };
302 
303 static const char *
304 get_status_string(uint16_t sct, uint16_t sc)
305 {
306 	struct nvme_status_string *entry;
307 
308 	switch (sct) {
309 	case NVME_SCT_GENERIC:
310 		entry = generic_status;
311 		break;
312 	case NVME_SCT_COMMAND_SPECIFIC:
313 		entry = command_specific_status;
314 		break;
315 	case NVME_SCT_MEDIA_ERROR:
316 		entry = media_error_status;
317 		break;
318 	case NVME_SCT_PATH_RELATED:
319 		entry = path_related_status;
320 		break;
321 	case NVME_SCT_VENDOR_SPECIFIC:
322 		return ("VENDOR SPECIFIC");
323 	default:
324 		return ("RESERVED");
325 	}
326 
327 	while (entry->sc != 0xFFFF) {
328 		if (entry->sc == sc)
329 			return (entry->str);
330 		entry++;
331 	}
332 	return (entry->str);
333 }
334 
335 void
336 nvme_qpair_print_completion(struct nvme_qpair *qpair,
337     struct nvme_completion *cpl)
338 {
339 	uint8_t sct, sc, crd, m, dnr, p;
340 
341 	sct = NVME_STATUS_GET_SCT(cpl->status);
342 	sc = NVME_STATUS_GET_SC(cpl->status);
343 	crd = NVME_STATUS_GET_CRD(cpl->status);
344 	m = NVME_STATUS_GET_M(cpl->status);
345 	dnr = NVME_STATUS_GET_DNR(cpl->status);
346 	p = NVME_STATUS_GET_P(cpl->status);
347 
348 	nvme_printf(qpair->ctrlr, "%s (%02x/%02x) crd:%x m:%x dnr:%x p:%d "
349 	    "sqid:%d cid:%d cdw0:%x\n",
350 	    get_status_string(sct, sc), sct, sc, crd, m, dnr, p,
351 	    cpl->sqid, cpl->cid, cpl->cdw0);
352 }
353 
354 static bool
355 nvme_completion_is_retry(const struct nvme_completion *cpl)
356 {
357 	uint8_t sct, sc, dnr;
358 
359 	sct = NVME_STATUS_GET_SCT(cpl->status);
360 	sc = NVME_STATUS_GET_SC(cpl->status);
361 	dnr = NVME_STATUS_GET_DNR(cpl->status);	/* Do Not Retry Bit */
362 
363 	/*
364 	 * TODO: spec is not clear how commands that are aborted due
365 	 *  to TLER will be marked.  So for now, it seems
366 	 *  NAMESPACE_NOT_READY is the only case where we should
367 	 *  look at the DNR bit. Requests failed with ABORTED_BY_REQUEST
368 	 *  set the DNR bit correctly since the driver controls that.
369 	 */
370 	switch (sct) {
371 	case NVME_SCT_GENERIC:
372 		switch (sc) {
373 		case NVME_SC_ABORTED_BY_REQUEST:
374 		case NVME_SC_NAMESPACE_NOT_READY:
375 			if (dnr)
376 				return (0);
377 			else
378 				return (1);
379 		case NVME_SC_INVALID_OPCODE:
380 		case NVME_SC_INVALID_FIELD:
381 		case NVME_SC_COMMAND_ID_CONFLICT:
382 		case NVME_SC_DATA_TRANSFER_ERROR:
383 		case NVME_SC_ABORTED_POWER_LOSS:
384 		case NVME_SC_INTERNAL_DEVICE_ERROR:
385 		case NVME_SC_ABORTED_SQ_DELETION:
386 		case NVME_SC_ABORTED_FAILED_FUSED:
387 		case NVME_SC_ABORTED_MISSING_FUSED:
388 		case NVME_SC_INVALID_NAMESPACE_OR_FORMAT:
389 		case NVME_SC_COMMAND_SEQUENCE_ERROR:
390 		case NVME_SC_LBA_OUT_OF_RANGE:
391 		case NVME_SC_CAPACITY_EXCEEDED:
392 		default:
393 			return (0);
394 		}
395 	case NVME_SCT_COMMAND_SPECIFIC:
396 	case NVME_SCT_MEDIA_ERROR:
397 		return (0);
398 	case NVME_SCT_PATH_RELATED:
399 		switch (sc) {
400 		case NVME_SC_INTERNAL_PATH_ERROR:
401 			if (dnr)
402 				return (0);
403 			else
404 				return (1);
405 		default:
406 			return (0);
407 		}
408 	case NVME_SCT_VENDOR_SPECIFIC:
409 	default:
410 		return (0);
411 	}
412 }
413 
414 static void
415 nvme_qpair_complete_tracker(struct nvme_tracker *tr,
416     struct nvme_completion *cpl, error_print_t print_on_error)
417 {
418 	struct nvme_qpair * qpair = tr->qpair;
419 	struct nvme_request	*req;
420 	bool			retry, error, retriable;
421 
422 	req = tr->req;
423 	error = nvme_completion_is_error(cpl);
424 	retriable = nvme_completion_is_retry(cpl);
425 	retry = error && retriable && req->retries < nvme_retry_count;
426 	if (retry)
427 		qpair->num_retries++;
428 	if (error && req->retries >= nvme_retry_count && retriable)
429 		qpair->num_failures++;
430 
431 	if (error && (print_on_error == ERROR_PRINT_ALL ||
432 		(!retry && print_on_error == ERROR_PRINT_NO_RETRY))) {
433 		nvme_qpair_print_command(qpair, &req->cmd);
434 		nvme_qpair_print_completion(qpair, cpl);
435 	}
436 
437 	qpair->act_tr[cpl->cid] = NULL;
438 
439 	KASSERT(cpl->cid == req->cmd.cid, ("cpl cid does not match cmd cid\n"));
440 
441 	if (!retry) {
442 		if (req->payload_valid) {
443 			bus_dmamap_sync(qpair->dma_tag_payload,
444 			    tr->payload_dma_map,
445 			    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
446 		}
447 		if (req->cb_fn)
448 			req->cb_fn(req->cb_arg, cpl);
449 	}
450 
451 	mtx_lock(&qpair->lock);
452 
453 	if (retry) {
454 		req->retries++;
455 		nvme_qpair_submit_tracker(qpair, tr);
456 	} else {
457 		if (req->payload_valid) {
458 			bus_dmamap_unload(qpair->dma_tag_payload,
459 			    tr->payload_dma_map);
460 		}
461 
462 		nvme_free_request(req);
463 		tr->req = NULL;
464 
465 		TAILQ_REMOVE(&qpair->outstanding_tr, tr, tailq);
466 		TAILQ_INSERT_HEAD(&qpair->free_tr, tr, tailq);
467 
468 		/*
469 		 * If the controller is in the middle of resetting, don't
470 		 *  try to submit queued requests here - let the reset logic
471 		 *  handle that instead.
472 		 */
473 		if (!STAILQ_EMPTY(&qpair->queued_req) &&
474 		    !qpair->ctrlr->is_resetting) {
475 			req = STAILQ_FIRST(&qpair->queued_req);
476 			STAILQ_REMOVE_HEAD(&qpair->queued_req, stailq);
477 			_nvme_qpair_submit_request(qpair, req);
478 		}
479 	}
480 
481 	mtx_unlock(&qpair->lock);
482 }
483 
484 static void
485 nvme_qpair_manual_complete_tracker(
486     struct nvme_tracker *tr, uint32_t sct, uint32_t sc, uint32_t dnr,
487     error_print_t print_on_error)
488 {
489 	struct nvme_completion	cpl;
490 
491 	memset(&cpl, 0, sizeof(cpl));
492 
493 	struct nvme_qpair * qpair = tr->qpair;
494 
495 	cpl.sqid = qpair->id;
496 	cpl.cid = tr->cid;
497 	cpl.status |= (sct & NVME_STATUS_SCT_MASK) << NVME_STATUS_SCT_SHIFT;
498 	cpl.status |= (sc & NVME_STATUS_SC_MASK) << NVME_STATUS_SC_SHIFT;
499 	cpl.status |= (dnr & NVME_STATUS_DNR_MASK) << NVME_STATUS_DNR_SHIFT;
500 	/* M=0 : this is artificial so no data in error log page */
501 	/* CRD=0 : this is artificial and no delayed retry support anyway */
502 	/* P=0 : phase not checked */
503 	nvme_qpair_complete_tracker(tr, &cpl, print_on_error);
504 }
505 
506 void
507 nvme_qpair_manual_complete_request(struct nvme_qpair *qpair,
508     struct nvme_request *req, uint32_t sct, uint32_t sc)
509 {
510 	struct nvme_completion	cpl;
511 	bool			error;
512 
513 	memset(&cpl, 0, sizeof(cpl));
514 	cpl.sqid = qpair->id;
515 	cpl.status |= (sct & NVME_STATUS_SCT_MASK) << NVME_STATUS_SCT_SHIFT;
516 	cpl.status |= (sc & NVME_STATUS_SC_MASK) << NVME_STATUS_SC_SHIFT;
517 
518 	error = nvme_completion_is_error(&cpl);
519 
520 	if (error) {
521 		nvme_qpair_print_command(qpair, &req->cmd);
522 		nvme_qpair_print_completion(qpair, &cpl);
523 	}
524 
525 	if (req->cb_fn)
526 		req->cb_fn(req->cb_arg, &cpl);
527 
528 	nvme_free_request(req);
529 }
530 
531 /* Locked version of completion processor */
532 static bool
533 _nvme_qpair_process_completions(struct nvme_qpair *qpair)
534 {
535 	struct nvme_tracker	*tr;
536 	struct nvme_completion	cpl;
537 	bool done = false;
538 	bool in_panic = dumping || SCHEDULER_STOPPED();
539 
540 	mtx_assert(&qpair->recovery, MA_OWNED);
541 
542 	/*
543 	 * qpair is not enabled, likely because a controller reset is in
544 	 * progress.  Ignore the interrupt - any I/O that was associated with
545 	 * this interrupt will get retried when the reset is complete. Any
546 	 * pending completions for when we're in startup will be completed
547 	 * as soon as initialization is complete and we start sending commands
548 	 * to the device.
549 	 */
550 	if (qpair->recovery_state != RECOVERY_NONE) {
551 		qpair->num_ignored++;
552 		return (false);
553 	}
554 
555 	/*
556 	 * Sanity check initialization. After we reset the hardware, the phase
557 	 * is defined to be 1. So if we get here with zero prior calls and the
558 	 * phase is 0, it means that we've lost a race between the
559 	 * initialization and the ISR running. With the phase wrong, we'll
560 	 * process a bunch of completions that aren't really completions leading
561 	 * to a KASSERT below.
562 	 */
563 	KASSERT(!(qpair->num_intr_handler_calls == 0 && qpair->phase == 0),
564 	    ("%s: Phase wrong for first interrupt call.",
565 		device_get_nameunit(qpair->ctrlr->dev)));
566 
567 	qpair->num_intr_handler_calls++;
568 
569 	bus_dmamap_sync(qpair->dma_tag, qpair->queuemem_map,
570 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
571 	/*
572 	 * A panic can stop the CPU this routine is running on at any point.  If
573 	 * we're called during a panic, complete the sq_head wrap protocol for
574 	 * the case where we are interrupted just after the increment at 1
575 	 * below, but before we can reset cq_head to zero at 2. Also cope with
576 	 * the case where we do the zero at 2, but may or may not have done the
577 	 * phase adjustment at step 3. The panic machinery flushes all pending
578 	 * memory writes, so we can make these strong ordering assumptions
579 	 * that would otherwise be unwise if we were racing in real time.
580 	 */
581 	if (__predict_false(in_panic)) {
582 		if (qpair->cq_head == qpair->num_entries) {
583 			/*
584 			 * Here we know that we need to zero cq_head and then negate
585 			 * the phase, which hasn't been assigned if cq_head isn't
586 			 * zero due to the atomic_store_rel.
587 			 */
588 			qpair->cq_head = 0;
589 			qpair->phase = !qpair->phase;
590 		} else if (qpair->cq_head == 0) {
591 			/*
592 			 * In this case, we know that the assignment at 2
593 			 * happened below, but we don't know if it 3 happened or
594 			 * not. To do this, we look at the last completion
595 			 * entry and set the phase to the opposite phase
596 			 * that it has. This gets us back in sync
597 			 */
598 			cpl = qpair->cpl[qpair->num_entries - 1];
599 			nvme_completion_swapbytes(&cpl);
600 			qpair->phase = !NVME_STATUS_GET_P(cpl.status);
601 		}
602 	}
603 
604 	while (1) {
605 		uint16_t status;
606 
607 		/*
608 		 * We need to do this dance to avoid a race between the host and
609 		 * the device where the device overtakes the host while the host
610 		 * is reading this record, leaving the status field 'new' and
611 		 * the sqhd and cid fields potentially stale. If the phase
612 		 * doesn't match, that means status hasn't yet been updated and
613 		 * we'll get any pending changes next time. It also means that
614 		 * the phase must be the same the second time. We have to sync
615 		 * before reading to ensure any bouncing completes.
616 		 */
617 		status = le16toh(qpair->cpl[qpair->cq_head].status);
618 		if (NVME_STATUS_GET_P(status) != qpair->phase)
619 			break;
620 
621 		bus_dmamap_sync(qpair->dma_tag, qpair->queuemem_map,
622 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
623 		cpl = qpair->cpl[qpair->cq_head];
624 		nvme_completion_swapbytes(&cpl);
625 
626 		KASSERT(
627 		    NVME_STATUS_GET_P(status) == NVME_STATUS_GET_P(cpl.status),
628 		    ("Phase unexpectedly inconsistent"));
629 
630 		if (cpl.cid < qpair->num_trackers)
631 			tr = qpair->act_tr[cpl.cid];
632 		else
633 			tr = NULL;
634 
635 		done = true;
636 		if (tr != NULL) {
637 			nvme_qpair_complete_tracker(tr, &cpl, ERROR_PRINT_ALL);
638 			qpair->sq_head = cpl.sqhd;
639 		} else if (!in_panic) {
640 			/*
641 			 * A missing tracker is normally an error.  However, a
642 			 * panic can stop the CPU this routine is running on
643 			 * after completing an I/O but before updating
644 			 * qpair->cq_head at 1 below.  Later, we re-enter this
645 			 * routine to poll I/O associated with the kernel
646 			 * dump. We find that the tr has been set to null before
647 			 * calling the completion routine.  If it hasn't
648 			 * completed (or it triggers a panic), then '1' below
649 			 * won't have updated cq_head. Rather than panic again,
650 			 * ignore this condition because it's not unexpected.
651 			 */
652 			nvme_printf(qpair->ctrlr,
653 			    "cpl (cid = %u) does not map to outstanding cmd\n",
654 				cpl.cid);
655 			nvme_qpair_print_completion(qpair,
656 			    &qpair->cpl[qpair->cq_head]);
657 			KASSERT(0, ("received completion for unknown cmd"));
658 		}
659 
660 		/*
661 		 * There's a number of races with the following (see above) when
662 		 * the system panics. We compensate for each one of them by
663 		 * using the atomic store to force strong ordering (at least when
664 		 * viewed in the aftermath of a panic).
665 		 */
666 		if (++qpair->cq_head == qpair->num_entries) {		/* 1 */
667 			atomic_store_rel_int(&qpair->cq_head, 0);	/* 2 */
668 			qpair->phase = !qpair->phase;			/* 3 */
669 		}
670 	}
671 
672 	if (done) {
673 		bus_space_write_4(qpair->ctrlr->bus_tag, qpair->ctrlr->bus_handle,
674 		    qpair->cq_hdbl_off, qpair->cq_head);
675 	}
676 
677 	return (done);
678 }
679 
680 bool
681 nvme_qpair_process_completions(struct nvme_qpair *qpair)
682 {
683 	bool done;
684 
685 	/*
686 	 * Interlock with reset / recovery code. This is an usually uncontended
687 	 * to make sure that we drain out of the ISRs before we reset the card
688 	 * and to prevent races with the recovery process called from a timeout
689 	 * context.
690 	 */
691 	if (!mtx_trylock(&qpair->recovery)) {
692 		qpair->num_recovery_nolock++;
693 		return (false);
694 	}
695 
696 	done = _nvme_qpair_process_completions(qpair);
697 
698 	mtx_unlock(&qpair->recovery);
699 
700 	return (done);
701 }
702 
703 static void
704 nvme_qpair_msi_handler(void *arg)
705 {
706 	struct nvme_qpair *qpair = arg;
707 
708 	nvme_qpair_process_completions(qpair);
709 }
710 
711 int
712 nvme_qpair_construct(struct nvme_qpair *qpair,
713     uint32_t num_entries, uint32_t num_trackers,
714     struct nvme_controller *ctrlr)
715 {
716 	struct nvme_tracker	*tr;
717 	size_t			cmdsz, cplsz, prpsz, allocsz, prpmemsz;
718 	uint64_t		queuemem_phys, prpmem_phys, list_phys;
719 	uint8_t			*queuemem, *prpmem, *prp_list;
720 	int			i, err;
721 
722 	qpair->vector = ctrlr->msi_count > 1 ? qpair->id : 0;
723 	qpair->num_entries = num_entries;
724 	qpair->num_trackers = num_trackers;
725 	qpair->ctrlr = ctrlr;
726 
727 	mtx_init(&qpair->lock, "nvme qpair lock", NULL, MTX_DEF);
728 	mtx_init(&qpair->recovery, "nvme qpair recovery", NULL, MTX_DEF);
729 
730 	callout_init_mtx(&qpair->timer, &qpair->recovery, 0);
731 	qpair->timer_armed = false;
732 	qpair->recovery_state = RECOVERY_WAITING;
733 
734 	/* Note: NVMe PRP format is restricted to 4-byte alignment. */
735 	err = bus_dma_tag_create(bus_get_dma_tag(ctrlr->dev),
736 	    4, ctrlr->page_size, BUS_SPACE_MAXADDR,
737 	    BUS_SPACE_MAXADDR, NULL, NULL, ctrlr->max_xfer_size,
738 	    howmany(ctrlr->max_xfer_size, ctrlr->page_size) + 1,
739 	    ctrlr->page_size, 0,
740 	    NULL, NULL, &qpair->dma_tag_payload);
741 	if (err != 0) {
742 		nvme_printf(ctrlr, "payload tag create failed %d\n", err);
743 		goto out;
744 	}
745 
746 	/*
747 	 * Each component must be page aligned, and individual PRP lists
748 	 * cannot cross a page boundary.
749 	 */
750 	cmdsz = qpair->num_entries * sizeof(struct nvme_command);
751 	cmdsz = roundup2(cmdsz, ctrlr->page_size);
752 	cplsz = qpair->num_entries * sizeof(struct nvme_completion);
753 	cplsz = roundup2(cplsz, ctrlr->page_size);
754 	/*
755 	 * For commands requiring more than 2 PRP entries, one PRP will be
756 	 * embedded in the command (prp1), and the rest of the PRP entries
757 	 * will be in a list pointed to by the command (prp2).
758 	 */
759 	prpsz = sizeof(uint64_t) *
760 	    howmany(ctrlr->max_xfer_size, ctrlr->page_size);
761 	prpmemsz = qpair->num_trackers * prpsz;
762 	allocsz = cmdsz + cplsz + prpmemsz;
763 
764 	err = bus_dma_tag_create(bus_get_dma_tag(ctrlr->dev),
765 	    ctrlr->page_size, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
766 	    allocsz, 1, allocsz, 0, NULL, NULL, &qpair->dma_tag);
767 	if (err != 0) {
768 		nvme_printf(ctrlr, "tag create failed %d\n", err);
769 		goto out;
770 	}
771 	bus_dma_tag_set_domain(qpair->dma_tag, qpair->domain);
772 
773 	if (bus_dmamem_alloc(qpair->dma_tag, (void **)&queuemem,
774 	     BUS_DMA_COHERENT | BUS_DMA_NOWAIT, &qpair->queuemem_map)) {
775 		nvme_printf(ctrlr, "failed to alloc qpair memory\n");
776 		goto out;
777 	}
778 
779 	if (bus_dmamap_load(qpair->dma_tag, qpair->queuemem_map,
780 	    queuemem, allocsz, nvme_single_map, &queuemem_phys, 0) != 0) {
781 		nvme_printf(ctrlr, "failed to load qpair memory\n");
782 		bus_dmamem_free(qpair->dma_tag, qpair->cmd,
783 		    qpair->queuemem_map);
784 		goto out;
785 	}
786 
787 	qpair->num_cmds = 0;
788 	qpair->num_intr_handler_calls = 0;
789 	qpair->num_retries = 0;
790 	qpair->num_failures = 0;
791 	qpair->num_ignored = 0;
792 	qpair->cmd = (struct nvme_command *)queuemem;
793 	qpair->cpl = (struct nvme_completion *)(queuemem + cmdsz);
794 	prpmem = (uint8_t *)(queuemem + cmdsz + cplsz);
795 	qpair->cmd_bus_addr = queuemem_phys;
796 	qpair->cpl_bus_addr = queuemem_phys + cmdsz;
797 	prpmem_phys = queuemem_phys + cmdsz + cplsz;
798 
799 	/*
800 	 * Calcuate the stride of the doorbell register. Many emulators set this
801 	 * value to correspond to a cache line. However, some hardware has set
802 	 * it to various small values.
803 	 */
804 	qpair->sq_tdbl_off = nvme_mmio_offsetof(doorbell[0]) +
805 	    (qpair->id << (ctrlr->dstrd + 1));
806 	qpair->cq_hdbl_off = nvme_mmio_offsetof(doorbell[0]) +
807 	    (qpair->id << (ctrlr->dstrd + 1)) + (1 << ctrlr->dstrd);
808 
809 	TAILQ_INIT(&qpair->free_tr);
810 	TAILQ_INIT(&qpair->outstanding_tr);
811 	STAILQ_INIT(&qpair->queued_req);
812 
813 	list_phys = prpmem_phys;
814 	prp_list = prpmem;
815 	for (i = 0; i < qpair->num_trackers; i++) {
816 		if (list_phys + prpsz > prpmem_phys + prpmemsz) {
817 			qpair->num_trackers = i;
818 			break;
819 		}
820 
821 		/*
822 		 * Make sure that the PRP list for this tracker doesn't
823 		 * overflow to another nvme page.
824 		 */
825 		if (trunc_page(list_phys) !=
826 		    trunc_page(list_phys + prpsz - 1)) {
827 			list_phys = roundup2(list_phys, ctrlr->page_size);
828 			prp_list =
829 			    (uint8_t *)roundup2((uintptr_t)prp_list, ctrlr->page_size);
830 		}
831 
832 		tr = malloc_domainset(sizeof(*tr), M_NVME,
833 		    DOMAINSET_PREF(qpair->domain), M_ZERO | M_WAITOK);
834 		bus_dmamap_create(qpair->dma_tag_payload, 0,
835 		    &tr->payload_dma_map);
836 		tr->cid = i;
837 		tr->qpair = qpair;
838 		tr->prp = (uint64_t *)prp_list;
839 		tr->prp_bus_addr = list_phys;
840 		TAILQ_INSERT_HEAD(&qpair->free_tr, tr, tailq);
841 		list_phys += prpsz;
842 		prp_list += prpsz;
843 	}
844 
845 	if (qpair->num_trackers == 0) {
846 		nvme_printf(ctrlr, "failed to allocate enough trackers\n");
847 		goto out;
848 	}
849 
850 	qpair->act_tr = malloc_domainset(sizeof(struct nvme_tracker *) *
851 	    qpair->num_entries, M_NVME, DOMAINSET_PREF(qpair->domain),
852 	    M_ZERO | M_WAITOK);
853 
854 	if (ctrlr->msi_count > 1) {
855 		/*
856 		 * MSI-X vector resource IDs start at 1, so we add one to
857 		 *  the queue's vector to get the corresponding rid to use.
858 		 */
859 		qpair->rid = qpair->vector + 1;
860 
861 		qpair->res = bus_alloc_resource_any(ctrlr->dev, SYS_RES_IRQ,
862 		    &qpair->rid, RF_ACTIVE);
863 		if (qpair->res == NULL) {
864 			nvme_printf(ctrlr, "unable to allocate MSI\n");
865 			goto out;
866 		}
867 		if (bus_setup_intr(ctrlr->dev, qpair->res,
868 		    INTR_TYPE_MISC | INTR_MPSAFE, NULL,
869 		    nvme_qpair_msi_handler, qpair, &qpair->tag) != 0) {
870 			nvme_printf(ctrlr, "unable to setup MSI\n");
871 			goto out;
872 		}
873 		if (qpair->id == 0) {
874 			bus_describe_intr(ctrlr->dev, qpair->res, qpair->tag,
875 			    "admin");
876 		} else {
877 			bus_describe_intr(ctrlr->dev, qpair->res, qpair->tag,
878 			    "io%d", qpair->id - 1);
879 		}
880 	}
881 
882 	return (0);
883 
884 out:
885 	nvme_qpair_destroy(qpair);
886 	return (ENOMEM);
887 }
888 
889 static void
890 nvme_qpair_destroy(struct nvme_qpair *qpair)
891 {
892 	struct nvme_tracker	*tr;
893 
894 	mtx_lock(&qpair->recovery);
895 	qpair->timer_armed = false;
896 	mtx_unlock(&qpair->recovery);
897 	callout_drain(&qpair->timer);
898 
899 	if (qpair->tag) {
900 		bus_teardown_intr(qpair->ctrlr->dev, qpair->res, qpair->tag);
901 		qpair->tag = NULL;
902 	}
903 
904 	if (qpair->act_tr) {
905 		free(qpair->act_tr, M_NVME);
906 		qpair->act_tr = NULL;
907 	}
908 
909 	while (!TAILQ_EMPTY(&qpair->free_tr)) {
910 		tr = TAILQ_FIRST(&qpair->free_tr);
911 		TAILQ_REMOVE(&qpair->free_tr, tr, tailq);
912 		bus_dmamap_destroy(qpair->dma_tag_payload,
913 		    tr->payload_dma_map);
914 		free(tr, M_NVME);
915 	}
916 
917 	if (qpair->cmd != NULL) {
918 		bus_dmamap_unload(qpair->dma_tag, qpair->queuemem_map);
919 		bus_dmamem_free(qpair->dma_tag, qpair->cmd,
920 		    qpair->queuemem_map);
921 		qpair->cmd = NULL;
922 	}
923 
924 	if (qpair->dma_tag) {
925 		bus_dma_tag_destroy(qpair->dma_tag);
926 		qpair->dma_tag = NULL;
927 	}
928 
929 	if (qpair->dma_tag_payload) {
930 		bus_dma_tag_destroy(qpair->dma_tag_payload);
931 		qpair->dma_tag_payload = NULL;
932 	}
933 
934 	if (mtx_initialized(&qpair->lock))
935 		mtx_destroy(&qpair->lock);
936 	if (mtx_initialized(&qpair->recovery))
937 		mtx_destroy(&qpair->recovery);
938 
939 	if (qpair->res) {
940 		bus_release_resource(qpair->ctrlr->dev, SYS_RES_IRQ,
941 		    rman_get_rid(qpair->res), qpair->res);
942 		qpair->res = NULL;
943 	}
944 }
945 
946 static void
947 nvme_admin_qpair_abort_aers(struct nvme_qpair *qpair)
948 {
949 	struct nvme_tracker	*tr;
950 
951 	/*
952 	 * nvme_complete_tracker must be called without the qpair lock held. It
953 	 * takes the lock to adjust outstanding_tr list, so make sure we don't
954 	 * have it yet (since this is a general purpose routine). We take the
955 	 * lock to make the list traverse safe, but have to drop the lock to
956 	 * complete any AER. We restart the list scan when we do this to make
957 	 * this safe. There's interlock with the ISR so we know this tracker
958 	 * won't be completed twice.
959 	 */
960 	mtx_assert(&qpair->lock, MA_NOTOWNED);
961 
962 	mtx_lock(&qpair->lock);
963 	tr = TAILQ_FIRST(&qpair->outstanding_tr);
964 	while (tr != NULL) {
965 		if (tr->req->cmd.opc == NVME_OPC_ASYNC_EVENT_REQUEST) {
966 			mtx_unlock(&qpair->lock);
967 			nvme_qpair_manual_complete_tracker(tr,
968 			    NVME_SCT_GENERIC, NVME_SC_ABORTED_SQ_DELETION, 0,
969 			    ERROR_PRINT_NONE);
970 			mtx_lock(&qpair->lock);
971 			tr = TAILQ_FIRST(&qpair->outstanding_tr);
972 		} else {
973 			tr = TAILQ_NEXT(tr, tailq);
974 		}
975 	}
976 	mtx_unlock(&qpair->lock);
977 }
978 
979 void
980 nvme_admin_qpair_destroy(struct nvme_qpair *qpair)
981 {
982 	mtx_assert(&qpair->lock, MA_NOTOWNED);
983 
984 	nvme_admin_qpair_abort_aers(qpair);
985 	nvme_qpair_destroy(qpair);
986 }
987 
988 void
989 nvme_io_qpair_destroy(struct nvme_qpair *qpair)
990 {
991 
992 	nvme_qpair_destroy(qpair);
993 }
994 
995 static void
996 nvme_abort_complete(void *arg, const struct nvme_completion *status)
997 {
998 	struct nvme_tracker     *tr = arg;
999 
1000 	/*
1001 	 * If cdw0 == 1, the controller was not able to abort the command
1002 	 *  we requested.  We still need to check the active tracker array,
1003 	 *  to cover race where I/O timed out at same time controller was
1004 	 *  completing the I/O.
1005 	 */
1006 	if (status->cdw0 == 1 && tr->qpair->act_tr[tr->cid] != NULL) {
1007 		/*
1008 		 * An I/O has timed out, and the controller was unable to
1009 		 *  abort it for some reason.  Construct a fake completion
1010 		 *  status, and then complete the I/O's tracker manually.
1011 		 */
1012 		nvme_printf(tr->qpair->ctrlr,
1013 		    "abort command failed, aborting command manually\n");
1014 		nvme_qpair_manual_complete_tracker(tr,
1015 		    NVME_SCT_GENERIC, NVME_SC_ABORTED_BY_REQUEST, 0, ERROR_PRINT_ALL);
1016 	}
1017 }
1018 
1019 static void
1020 nvme_qpair_timeout(void *arg)
1021 {
1022 	struct nvme_qpair	*qpair = arg;
1023 	struct nvme_controller	*ctrlr = qpair->ctrlr;
1024 	struct nvme_tracker	*tr;
1025 	sbintime_t		now;
1026 	bool			idle = false;
1027 	bool			needs_reset;
1028 	uint32_t		csts;
1029 	uint8_t			cfs;
1030 
1031 	mtx_assert(&qpair->recovery, MA_OWNED);
1032 
1033 	/*
1034 	 * If the controller is failed, then stop polling. This ensures that any
1035 	 * failure processing that races with the qpair timeout will fail
1036 	 * safely.
1037 	 */
1038 	if (qpair->ctrlr->is_failed) {
1039 		nvme_printf(qpair->ctrlr,
1040 		    "Failed controller, stopping watchdog timeout.\n");
1041 		qpair->timer_armed = false;
1042 		return;
1043 	}
1044 
1045 	/*
1046 	 * Shutdown condition: We set qpair->timer_armed to false in
1047 	 * nvme_qpair_destroy before calling callout_drain. When we call that,
1048 	 * this routine might get called one last time. Exit w/o setting a
1049 	 * timeout. None of the watchdog stuff needs to be done since we're
1050 	 * destroying the qpair.
1051 	 */
1052 	if (!qpair->timer_armed) {
1053 		nvme_printf(qpair->ctrlr,
1054 		    "Timeout fired during nvme_qpair_destroy\n");
1055 		return;
1056 	}
1057 
1058 	switch (qpair->recovery_state) {
1059 	case RECOVERY_NONE:
1060 		/*
1061 		 * Read csts to get value of cfs - controller fatal status.  If
1062 		 * we are in the hot-plug or controller failed status proceed
1063 		 * directly to reset. We also bail early if the status reads all
1064 		 * 1's or the control fatal status bit is now 1. The latter is
1065 		 * always true when the former is true, but not vice versa.  The
1066 		 * intent of the code is that if the card is gone (all 1's) or
1067 		 * we've failed, then try to do a reset (which someitmes
1068 		 * unwedges a card reading all 1's that's not gone away, but
1069 		 * usually doesn't).
1070 		 */
1071 		csts = nvme_mmio_read_4(ctrlr, csts);
1072 		cfs = (csts >> NVME_CSTS_REG_CFS_SHIFT) & NVME_CSTS_REG_CFS_MASK;
1073 		if (csts == NVME_GONE || cfs == 1)
1074 			goto do_reset;
1075 
1076 		/*
1077 		 * Process completions. We already have the recovery lock, so
1078 		 * call the locked version.
1079 		 */
1080 		_nvme_qpair_process_completions(qpair);
1081 
1082 		/*
1083 		 * Check to see if we need to timeout any commands. If we do, then
1084 		 * we also enter a recovery phase.
1085 		 */
1086 		now = getsbinuptime();
1087 		needs_reset = false;
1088 		idle = true;
1089 		mtx_lock(&qpair->lock);
1090 		TAILQ_FOREACH(tr, &qpair->outstanding_tr, tailq) {
1091 			/*
1092 			 * Skip async commands, they are posted to the card for
1093 			 * an indefinite amount of time and have no deadline.
1094 			 */
1095 			if (tr->deadline == SBT_MAX)
1096 				continue;
1097 			if (now > tr->deadline) {
1098 				if (tr->req->cb_fn != nvme_abort_complete &&
1099 				    ctrlr->enable_aborts) {
1100 					/*
1101 					 * This isn't an abort command, ask
1102 					 * for a hardware abort.
1103 					 */
1104 					nvme_ctrlr_cmd_abort(ctrlr, tr->cid,
1105 					    qpair->id, nvme_abort_complete, tr);
1106 				} else {
1107 					/*
1108 					 * Otherwise we have a live command in
1109 					 * the card (either one we couldn't
1110 					 * abort, or aborts weren't enabled).
1111 					 * The only safe way to proceed is to do
1112 					 * a reset.
1113 					 */
1114 					needs_reset = true;
1115 				}
1116 			} else {
1117 				idle = false;
1118 			}
1119 		}
1120 		mtx_unlock(&qpair->lock);
1121 		if (!needs_reset)
1122 			break;
1123 
1124 		/*
1125 		 * We've had a command timeout that we weren't able to abort
1126 		 *
1127 		 * If we get here due to a possible surprise hot-unplug event,
1128 		 * then we let nvme_ctrlr_reset confirm and fail the
1129 		 * controller.
1130 		 */
1131 	do_reset:
1132 		nvme_printf(ctrlr, "Resetting controller due to a timeout%s.\n",
1133 		    (csts == 0xffffffff) ? " and possible hot unplug" :
1134 		    (cfs ? " and fatal error status" : ""));
1135 		qpair->recovery_state = RECOVERY_WAITING;
1136 		nvme_ctrlr_reset(ctrlr);
1137 		idle = false;			/* We want to keep polling */
1138 		break;
1139 	case RECOVERY_WAITING:
1140 		/*
1141 		 * These messages aren't interesting while we're suspended. We
1142 		 * put the queues into waiting state while
1143 		 * suspending. Suspending takes a while, so we'll see these
1144 		 * during that time and they aren't diagnostic. At other times,
1145 		 * they indicate a problem that's worth complaining about.
1146 		 */
1147 		if (!device_is_suspended(ctrlr->dev))
1148 			nvme_printf(ctrlr, "Waiting for reset to complete\n");
1149 		idle = false;		/* We want to keep polling */
1150 		break;
1151 	}
1152 
1153 	/*
1154 	 * Rearm the timeout.
1155 	 */
1156 	if (!idle) {
1157 		callout_schedule_sbt(&qpair->timer, SBT_1S / 2, SBT_1S / 2, 0);
1158 	} else {
1159 		qpair->timer_armed = false;
1160 	}
1161 }
1162 
1163 /*
1164  * Submit the tracker to the hardware. Must already be in the
1165  * outstanding queue when called.
1166  */
1167 void
1168 nvme_qpair_submit_tracker(struct nvme_qpair *qpair, struct nvme_tracker *tr)
1169 {
1170 	struct nvme_request	*req;
1171 	struct nvme_controller	*ctrlr;
1172 	int timeout;
1173 
1174 	mtx_assert(&qpair->lock, MA_OWNED);
1175 
1176 	req = tr->req;
1177 	req->cmd.cid = tr->cid;
1178 	qpair->act_tr[tr->cid] = tr;
1179 	ctrlr = qpair->ctrlr;
1180 
1181 	if (req->timeout) {
1182 		if (req->cb_fn == nvme_completion_poll_cb)
1183 			timeout = 1;
1184 		else
1185 			timeout = ctrlr->timeout_period;
1186 		tr->deadline = getsbinuptime() + timeout * SBT_1S;
1187 		if (!qpair->timer_armed) {
1188 			qpair->timer_armed = true;
1189 			callout_reset_sbt_on(&qpair->timer, SBT_1S / 2, SBT_1S / 2,
1190 			    nvme_qpair_timeout, qpair, qpair->cpu, 0);
1191 		}
1192 	} else
1193 		tr->deadline = SBT_MAX;
1194 
1195 	/* Copy the command from the tracker to the submission queue. */
1196 	memcpy(&qpair->cmd[qpair->sq_tail], &req->cmd, sizeof(req->cmd));
1197 
1198 	if (++qpair->sq_tail == qpair->num_entries)
1199 		qpair->sq_tail = 0;
1200 
1201 	bus_dmamap_sync(qpair->dma_tag, qpair->queuemem_map,
1202 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1203 	bus_space_write_4(qpair->ctrlr->bus_tag, qpair->ctrlr->bus_handle,
1204 	    qpair->sq_tdbl_off, qpair->sq_tail);
1205 	qpair->num_cmds++;
1206 }
1207 
1208 static void
1209 nvme_payload_map(void *arg, bus_dma_segment_t *seg, int nseg, int error)
1210 {
1211 	struct nvme_tracker 	*tr = arg;
1212 	uint32_t		cur_nseg;
1213 
1214 	/*
1215 	 * If the mapping operation failed, return immediately.  The caller
1216 	 *  is responsible for detecting the error status and failing the
1217 	 *  tracker manually.
1218 	 */
1219 	if (error != 0) {
1220 		nvme_printf(tr->qpair->ctrlr,
1221 		    "nvme_payload_map err %d\n", error);
1222 		return;
1223 	}
1224 
1225 	/*
1226 	 * Note that we specified ctrlr->page_size for alignment and max
1227 	 * segment size when creating the bus dma tags.  So here we can safely
1228 	 * just transfer each segment to its associated PRP entry.
1229 	 */
1230 	tr->req->cmd.prp1 = htole64(seg[0].ds_addr);
1231 
1232 	if (nseg == 2) {
1233 		tr->req->cmd.prp2 = htole64(seg[1].ds_addr);
1234 	} else if (nseg > 2) {
1235 		cur_nseg = 1;
1236 		tr->req->cmd.prp2 = htole64((uint64_t)tr->prp_bus_addr);
1237 		while (cur_nseg < nseg) {
1238 			tr->prp[cur_nseg-1] =
1239 			    htole64((uint64_t)seg[cur_nseg].ds_addr);
1240 			cur_nseg++;
1241 		}
1242 	} else {
1243 		/*
1244 		 * prp2 should not be used by the controller
1245 		 *  since there is only one segment, but set
1246 		 *  to 0 just to be safe.
1247 		 */
1248 		tr->req->cmd.prp2 = 0;
1249 	}
1250 
1251 	bus_dmamap_sync(tr->qpair->dma_tag_payload, tr->payload_dma_map,
1252 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1253 	nvme_qpair_submit_tracker(tr->qpair, tr);
1254 }
1255 
1256 static void
1257 _nvme_qpair_submit_request(struct nvme_qpair *qpair, struct nvme_request *req)
1258 {
1259 	struct nvme_tracker	*tr;
1260 	int			err = 0;
1261 
1262 	mtx_assert(&qpair->lock, MA_OWNED);
1263 
1264 	tr = TAILQ_FIRST(&qpair->free_tr);
1265 	req->qpair = qpair;
1266 
1267 	if (tr == NULL || qpair->recovery_state != RECOVERY_NONE) {
1268 		/*
1269 		 * No tracker is available, or the qpair is disabled due to an
1270 		 * in-progress controller-level reset. If we lose the race with
1271 		 * recovery_state, then we may add an extra request to the queue
1272 		 * which will be resubmitted later.  We only set recovery_state
1273 		 * to NONE with qpair->lock also held, so if we observe that the
1274 		 * state is not NONE, we know it can't transition to NONE below
1275 		 * when we've submitted the request to hardware.
1276 		 *
1277 		 * Also, as part of the failure process, we set recovery_state
1278 		 * to RECOVERY_WAITING, so we check here to see if we've failed
1279 		 * the controller. We set it before we call the qpair_fail
1280 		 * functions, which take out the lock lock before messing with
1281 		 * queued_req. Since we hold that lock, we know it's safe to
1282 		 * either fail directly, or queue the failure should is_failed
1283 		 * be stale. If we lose the race reading is_failed, then
1284 		 * nvme_qpair_fail will fail the queued request.
1285 		 */
1286 
1287 		if (qpair->ctrlr->is_failed) {
1288 			/*
1289 			 * The controller has failed, so fail the request.
1290 			 */
1291 			nvme_qpair_manual_complete_request(qpair, req,
1292 			    NVME_SCT_GENERIC, NVME_SC_ABORTED_BY_REQUEST);
1293 		} else {
1294 			/*
1295 			 * Put the request on the qpair's request queue to be
1296 			 *  processed when a tracker frees up via a command
1297 			 *  completion or when the controller reset is
1298 			 *  completed.
1299 			 */
1300 			STAILQ_INSERT_TAIL(&qpair->queued_req, req, stailq);
1301 		}
1302 		return;
1303 	}
1304 
1305 	TAILQ_REMOVE(&qpair->free_tr, tr, tailq);
1306 	TAILQ_INSERT_TAIL(&qpair->outstanding_tr, tr, tailq);
1307 	tr->deadline = SBT_MAX;
1308 	tr->req = req;
1309 
1310 	if (!req->payload_valid) {
1311 		nvme_qpair_submit_tracker(tr->qpair, tr);
1312 		return;
1313 	}
1314 
1315 	err = bus_dmamap_load_mem(tr->qpair->dma_tag_payload,
1316 	    tr->payload_dma_map, &req->payload, nvme_payload_map, tr, 0);
1317 	if (err != 0) {
1318 		/*
1319 		 * The dmamap operation failed, so we manually fail the
1320 		 *  tracker here with DATA_TRANSFER_ERROR status.
1321 		 *
1322 		 * nvme_qpair_manual_complete_tracker must not be called
1323 		 *  with the qpair lock held.
1324 		 */
1325 		nvme_printf(qpair->ctrlr,
1326 		    "bus_dmamap_load_mem returned 0x%x!\n", err);
1327 		mtx_unlock(&qpair->lock);
1328 		nvme_qpair_manual_complete_tracker(tr, NVME_SCT_GENERIC,
1329 		    NVME_SC_DATA_TRANSFER_ERROR, DO_NOT_RETRY, ERROR_PRINT_ALL);
1330 		mtx_lock(&qpair->lock);
1331 	}
1332 }
1333 
1334 void
1335 nvme_qpair_submit_request(struct nvme_qpair *qpair, struct nvme_request *req)
1336 {
1337 
1338 	mtx_lock(&qpair->lock);
1339 	_nvme_qpair_submit_request(qpair, req);
1340 	mtx_unlock(&qpair->lock);
1341 }
1342 
1343 static void
1344 nvme_qpair_enable(struct nvme_qpair *qpair)
1345 {
1346 	if (mtx_initialized(&qpair->recovery))
1347 		mtx_assert(&qpair->recovery, MA_OWNED);
1348 	if (mtx_initialized(&qpair->lock))
1349 		mtx_assert(&qpair->lock, MA_OWNED);
1350 	KASSERT(!qpair->ctrlr->is_failed,
1351 	    ("Enabling a failed qpair\n"));
1352 
1353 	qpair->recovery_state = RECOVERY_NONE;
1354 }
1355 
1356 void
1357 nvme_qpair_reset(struct nvme_qpair *qpair)
1358 {
1359 
1360 	qpair->sq_head = qpair->sq_tail = qpair->cq_head = 0;
1361 
1362 	/*
1363 	 * First time through the completion queue, HW will set phase
1364 	 *  bit on completions to 1.  So set this to 1 here, indicating
1365 	 *  we're looking for a 1 to know which entries have completed.
1366 	 *  we'll toggle the bit each time when the completion queue
1367 	 *  rolls over.
1368 	 */
1369 	qpair->phase = 1;
1370 
1371 	memset(qpair->cmd, 0,
1372 	    qpair->num_entries * sizeof(struct nvme_command));
1373 	memset(qpair->cpl, 0,
1374 	    qpair->num_entries * sizeof(struct nvme_completion));
1375 }
1376 
1377 void
1378 nvme_admin_qpair_enable(struct nvme_qpair *qpair)
1379 {
1380 	struct nvme_tracker		*tr;
1381 	struct nvme_tracker		*tr_temp;
1382 	bool				rpt;
1383 
1384 	/*
1385 	 * Manually abort each outstanding admin command.  Do not retry
1386 	 * admin commands found here, since they will be left over from
1387 	 * a controller reset and its likely the context in which the
1388 	 * command was issued no longer applies.
1389 	 */
1390 	rpt = !TAILQ_EMPTY(&qpair->outstanding_tr);
1391 	if (rpt)
1392 		nvme_printf(qpair->ctrlr,
1393 		    "aborting outstanding admin command\n");
1394 	TAILQ_FOREACH_SAFE(tr, &qpair->outstanding_tr, tailq, tr_temp) {
1395 		nvme_qpair_manual_complete_tracker(tr, NVME_SCT_GENERIC,
1396 		    NVME_SC_ABORTED_BY_REQUEST, DO_NOT_RETRY, ERROR_PRINT_ALL);
1397 	}
1398 	if (rpt)
1399 		nvme_printf(qpair->ctrlr,
1400 		    "done aborting outstanding admin\n");
1401 
1402 	mtx_lock(&qpair->recovery);
1403 	mtx_lock(&qpair->lock);
1404 	nvme_qpair_enable(qpair);
1405 	mtx_unlock(&qpair->lock);
1406 	mtx_unlock(&qpair->recovery);
1407 }
1408 
1409 void
1410 nvme_io_qpair_enable(struct nvme_qpair *qpair)
1411 {
1412 	STAILQ_HEAD(, nvme_request)	temp;
1413 	struct nvme_tracker		*tr;
1414 	struct nvme_tracker		*tr_temp;
1415 	struct nvme_request		*req;
1416 	bool				report;
1417 
1418 	/*
1419 	 * Manually abort each outstanding I/O.  This normally results in a
1420 	 * retry, unless the retry count on the associated request has
1421 	 * reached its limit.
1422 	 */
1423 	report = !TAILQ_EMPTY(&qpair->outstanding_tr);
1424 	if (report)
1425 		nvme_printf(qpair->ctrlr, "aborting outstanding i/o\n");
1426 	TAILQ_FOREACH_SAFE(tr, &qpair->outstanding_tr, tailq, tr_temp) {
1427 		nvme_qpair_manual_complete_tracker(tr, NVME_SCT_GENERIC,
1428 		    NVME_SC_ABORTED_BY_REQUEST, 0, ERROR_PRINT_NO_RETRY);
1429 	}
1430 	if (report)
1431 		nvme_printf(qpair->ctrlr, "done aborting outstanding i/o\n");
1432 
1433 	mtx_lock(&qpair->recovery);
1434 	mtx_lock(&qpair->lock);
1435 	nvme_qpair_enable(qpair);
1436 
1437 	STAILQ_INIT(&temp);
1438 	STAILQ_SWAP(&qpair->queued_req, &temp, nvme_request);
1439 
1440 	report = !STAILQ_EMPTY(&temp);
1441 	if (report)
1442 		nvme_printf(qpair->ctrlr, "resubmitting queued i/o\n");
1443 	while (!STAILQ_EMPTY(&temp)) {
1444 		req = STAILQ_FIRST(&temp);
1445 		STAILQ_REMOVE_HEAD(&temp, stailq);
1446 		nvme_qpair_print_command(qpair, &req->cmd);
1447 		_nvme_qpair_submit_request(qpair, req);
1448 	}
1449 	if (report)
1450 		nvme_printf(qpair->ctrlr, "done resubmitting i/o\n");
1451 
1452 	mtx_unlock(&qpair->lock);
1453 	mtx_unlock(&qpair->recovery);
1454 }
1455 
1456 static void
1457 nvme_qpair_disable(struct nvme_qpair *qpair)
1458 {
1459 	struct nvme_tracker	*tr, *tr_temp;
1460 
1461 	if (mtx_initialized(&qpair->recovery))
1462 		mtx_assert(&qpair->recovery, MA_OWNED);
1463 	if (mtx_initialized(&qpair->lock))
1464 		mtx_assert(&qpair->lock, MA_OWNED);
1465 
1466 	qpair->recovery_state = RECOVERY_WAITING;
1467 	TAILQ_FOREACH_SAFE(tr, &qpair->outstanding_tr, tailq, tr_temp) {
1468 		tr->deadline = SBT_MAX;
1469 	}
1470 }
1471 
1472 void
1473 nvme_admin_qpair_disable(struct nvme_qpair *qpair)
1474 {
1475 	mtx_lock(&qpair->recovery);
1476 
1477 	mtx_lock(&qpair->lock);
1478 	nvme_qpair_disable(qpair);
1479 	mtx_unlock(&qpair->lock);
1480 
1481 	nvme_admin_qpair_abort_aers(qpair);
1482 
1483 	mtx_unlock(&qpair->recovery);
1484 }
1485 
1486 void
1487 nvme_io_qpair_disable(struct nvme_qpair *qpair)
1488 {
1489 	mtx_lock(&qpair->recovery);
1490 	mtx_lock(&qpair->lock);
1491 
1492 	nvme_qpair_disable(qpair);
1493 
1494 	mtx_unlock(&qpair->lock);
1495 	mtx_unlock(&qpair->recovery);
1496 }
1497 
1498 void
1499 nvme_qpair_fail(struct nvme_qpair *qpair)
1500 {
1501 	struct nvme_tracker		*tr;
1502 	struct nvme_request		*req;
1503 
1504 	if (!mtx_initialized(&qpair->lock))
1505 		return;
1506 
1507 	mtx_lock(&qpair->lock);
1508 
1509 	if (!STAILQ_EMPTY(&qpair->queued_req)) {
1510 		nvme_printf(qpair->ctrlr, "failing queued i/o\n");
1511 	}
1512 	while (!STAILQ_EMPTY(&qpair->queued_req)) {
1513 		req = STAILQ_FIRST(&qpair->queued_req);
1514 		STAILQ_REMOVE_HEAD(&qpair->queued_req, stailq);
1515 		mtx_unlock(&qpair->lock);
1516 		nvme_qpair_manual_complete_request(qpair, req, NVME_SCT_GENERIC,
1517 		    NVME_SC_ABORTED_BY_REQUEST);
1518 		mtx_lock(&qpair->lock);
1519 	}
1520 
1521 	if (!TAILQ_EMPTY(&qpair->outstanding_tr)) {
1522 		nvme_printf(qpair->ctrlr, "failing outstanding i/o\n");
1523 	}
1524 	/* Manually abort each outstanding I/O. */
1525 	while (!TAILQ_EMPTY(&qpair->outstanding_tr)) {
1526 		tr = TAILQ_FIRST(&qpair->outstanding_tr);
1527 		/*
1528 		 * Do not remove the tracker.  The abort_tracker path will
1529 		 *  do that for us.
1530 		 */
1531 		mtx_unlock(&qpair->lock);
1532 		nvme_qpair_manual_complete_tracker(tr, NVME_SCT_GENERIC,
1533 		    NVME_SC_ABORTED_BY_REQUEST, DO_NOT_RETRY, ERROR_PRINT_ALL);
1534 		mtx_lock(&qpair->lock);
1535 	}
1536 
1537 	mtx_unlock(&qpair->lock);
1538 }
1539