xref: /freebsd/sys/dev/nvme/nvme_qpair.c (revision 587aa25525e54ea775298c402acd7a647f9838fb)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (C) 2012-2014 Intel Corporation
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31 
32 #include <sys/param.h>
33 #include <sys/bus.h>
34 #include <sys/conf.h>
35 #include <sys/domainset.h>
36 #include <sys/proc.h>
37 
38 #include <dev/pci/pcivar.h>
39 
40 #include "nvme_private.h"
41 
42 typedef enum error_print { ERROR_PRINT_NONE, ERROR_PRINT_NO_RETRY, ERROR_PRINT_ALL } error_print_t;
43 #define DO_NOT_RETRY	1
44 
45 static void	_nvme_qpair_submit_request(struct nvme_qpair *qpair,
46 					   struct nvme_request *req);
47 static void	nvme_qpair_destroy(struct nvme_qpair *qpair);
48 
49 struct nvme_opcode_string {
50 	uint16_t	opc;
51 	const char *	str;
52 };
53 
54 static struct nvme_opcode_string admin_opcode[] = {
55 	{ NVME_OPC_DELETE_IO_SQ, "DELETE IO SQ" },
56 	{ NVME_OPC_CREATE_IO_SQ, "CREATE IO SQ" },
57 	{ NVME_OPC_GET_LOG_PAGE, "GET LOG PAGE" },
58 	{ NVME_OPC_DELETE_IO_CQ, "DELETE IO CQ" },
59 	{ NVME_OPC_CREATE_IO_CQ, "CREATE IO CQ" },
60 	{ NVME_OPC_IDENTIFY, "IDENTIFY" },
61 	{ NVME_OPC_ABORT, "ABORT" },
62 	{ NVME_OPC_SET_FEATURES, "SET FEATURES" },
63 	{ NVME_OPC_GET_FEATURES, "GET FEATURES" },
64 	{ NVME_OPC_ASYNC_EVENT_REQUEST, "ASYNC EVENT REQUEST" },
65 	{ NVME_OPC_FIRMWARE_ACTIVATE, "FIRMWARE ACTIVATE" },
66 	{ NVME_OPC_FIRMWARE_IMAGE_DOWNLOAD, "FIRMWARE IMAGE DOWNLOAD" },
67 	{ NVME_OPC_DEVICE_SELF_TEST, "DEVICE SELF-TEST" },
68 	{ NVME_OPC_NAMESPACE_ATTACHMENT, "NAMESPACE ATTACHMENT" },
69 	{ NVME_OPC_KEEP_ALIVE, "KEEP ALIVE" },
70 	{ NVME_OPC_DIRECTIVE_SEND, "DIRECTIVE SEND" },
71 	{ NVME_OPC_DIRECTIVE_RECEIVE, "DIRECTIVE RECEIVE" },
72 	{ NVME_OPC_VIRTUALIZATION_MANAGEMENT, "VIRTUALIZATION MANAGEMENT" },
73 	{ NVME_OPC_NVME_MI_SEND, "NVME-MI SEND" },
74 	{ NVME_OPC_NVME_MI_RECEIVE, "NVME-MI RECEIVE" },
75 	{ NVME_OPC_DOORBELL_BUFFER_CONFIG, "DOORBELL BUFFER CONFIG" },
76 	{ NVME_OPC_FORMAT_NVM, "FORMAT NVM" },
77 	{ NVME_OPC_SECURITY_SEND, "SECURITY SEND" },
78 	{ NVME_OPC_SECURITY_RECEIVE, "SECURITY RECEIVE" },
79 	{ NVME_OPC_SANITIZE, "SANITIZE" },
80 	{ NVME_OPC_GET_LBA_STATUS, "GET LBA STATUS" },
81 	{ 0xFFFF, "ADMIN COMMAND" }
82 };
83 
84 static struct nvme_opcode_string io_opcode[] = {
85 	{ NVME_OPC_FLUSH, "FLUSH" },
86 	{ NVME_OPC_WRITE, "WRITE" },
87 	{ NVME_OPC_READ, "READ" },
88 	{ NVME_OPC_WRITE_UNCORRECTABLE, "WRITE UNCORRECTABLE" },
89 	{ NVME_OPC_COMPARE, "COMPARE" },
90 	{ NVME_OPC_WRITE_ZEROES, "WRITE ZEROES" },
91 	{ NVME_OPC_DATASET_MANAGEMENT, "DATASET MANAGEMENT" },
92 	{ NVME_OPC_VERIFY, "VERIFY" },
93 	{ NVME_OPC_RESERVATION_REGISTER, "RESERVATION REGISTER" },
94 	{ NVME_OPC_RESERVATION_REPORT, "RESERVATION REPORT" },
95 	{ NVME_OPC_RESERVATION_ACQUIRE, "RESERVATION ACQUIRE" },
96 	{ NVME_OPC_RESERVATION_RELEASE, "RESERVATION RELEASE" },
97 	{ 0xFFFF, "IO COMMAND" }
98 };
99 
100 static const char *
101 get_admin_opcode_string(uint16_t opc)
102 {
103 	struct nvme_opcode_string *entry;
104 
105 	entry = admin_opcode;
106 
107 	while (entry->opc != 0xFFFF) {
108 		if (entry->opc == opc)
109 			return (entry->str);
110 		entry++;
111 	}
112 	return (entry->str);
113 }
114 
115 static const char *
116 get_io_opcode_string(uint16_t opc)
117 {
118 	struct nvme_opcode_string *entry;
119 
120 	entry = io_opcode;
121 
122 	while (entry->opc != 0xFFFF) {
123 		if (entry->opc == opc)
124 			return (entry->str);
125 		entry++;
126 	}
127 	return (entry->str);
128 }
129 
130 static void
131 nvme_admin_qpair_print_command(struct nvme_qpair *qpair,
132     struct nvme_command *cmd)
133 {
134 
135 	nvme_printf(qpair->ctrlr, "%s (%02x) sqid:%d cid:%d nsid:%x "
136 	    "cdw10:%08x cdw11:%08x\n",
137 	    get_admin_opcode_string(cmd->opc), cmd->opc, qpair->id, cmd->cid,
138 	    le32toh(cmd->nsid), le32toh(cmd->cdw10), le32toh(cmd->cdw11));
139 }
140 
141 static void
142 nvme_io_qpair_print_command(struct nvme_qpair *qpair,
143     struct nvme_command *cmd)
144 {
145 
146 	switch (cmd->opc) {
147 	case NVME_OPC_WRITE:
148 	case NVME_OPC_READ:
149 	case NVME_OPC_WRITE_UNCORRECTABLE:
150 	case NVME_OPC_COMPARE:
151 	case NVME_OPC_WRITE_ZEROES:
152 	case NVME_OPC_VERIFY:
153 		nvme_printf(qpair->ctrlr, "%s sqid:%d cid:%d nsid:%d "
154 		    "lba:%llu len:%d\n",
155 		    get_io_opcode_string(cmd->opc), qpair->id, cmd->cid, le32toh(cmd->nsid),
156 		    ((unsigned long long)le32toh(cmd->cdw11) << 32) + le32toh(cmd->cdw10),
157 		    (le32toh(cmd->cdw12) & 0xFFFF) + 1);
158 		break;
159 	case NVME_OPC_FLUSH:
160 	case NVME_OPC_DATASET_MANAGEMENT:
161 	case NVME_OPC_RESERVATION_REGISTER:
162 	case NVME_OPC_RESERVATION_REPORT:
163 	case NVME_OPC_RESERVATION_ACQUIRE:
164 	case NVME_OPC_RESERVATION_RELEASE:
165 		nvme_printf(qpair->ctrlr, "%s sqid:%d cid:%d nsid:%d\n",
166 		    get_io_opcode_string(cmd->opc), qpair->id, cmd->cid, le32toh(cmd->nsid));
167 		break;
168 	default:
169 		nvme_printf(qpair->ctrlr, "%s (%02x) sqid:%d cid:%d nsid:%d\n",
170 		    get_io_opcode_string(cmd->opc), cmd->opc, qpair->id,
171 		    cmd->cid, le32toh(cmd->nsid));
172 		break;
173 	}
174 }
175 
176 static void
177 nvme_qpair_print_command(struct nvme_qpair *qpair, struct nvme_command *cmd)
178 {
179 	if (qpair->id == 0)
180 		nvme_admin_qpair_print_command(qpair, cmd);
181 	else
182 		nvme_io_qpair_print_command(qpair, cmd);
183 	if (nvme_verbose_cmd_dump) {
184 		nvme_printf(qpair->ctrlr,
185 		    "nsid:%#x rsvd2:%#x rsvd3:%#x mptr:%#jx prp1:%#jx prp2:%#jx\n",
186 		    cmd->nsid, cmd->rsvd2, cmd->rsvd3, (uintmax_t)cmd->mptr,
187 		    (uintmax_t)cmd->prp1, (uintmax_t)cmd->prp2);
188 		nvme_printf(qpair->ctrlr,
189 		    "cdw10: %#x cdw11:%#x cdw12:%#x cdw13:%#x cdw14:%#x cdw15:%#x\n",
190 		    cmd->cdw10, cmd->cdw11, cmd->cdw12, cmd->cdw13, cmd->cdw14,
191 		    cmd->cdw15);
192 	}
193 }
194 
195 struct nvme_status_string {
196 	uint16_t	sc;
197 	const char *	str;
198 };
199 
200 static struct nvme_status_string generic_status[] = {
201 	{ NVME_SC_SUCCESS, "SUCCESS" },
202 	{ NVME_SC_INVALID_OPCODE, "INVALID OPCODE" },
203 	{ NVME_SC_INVALID_FIELD, "INVALID_FIELD" },
204 	{ NVME_SC_COMMAND_ID_CONFLICT, "COMMAND ID CONFLICT" },
205 	{ NVME_SC_DATA_TRANSFER_ERROR, "DATA TRANSFER ERROR" },
206 	{ NVME_SC_ABORTED_POWER_LOSS, "ABORTED - POWER LOSS" },
207 	{ NVME_SC_INTERNAL_DEVICE_ERROR, "INTERNAL DEVICE ERROR" },
208 	{ NVME_SC_ABORTED_BY_REQUEST, "ABORTED - BY REQUEST" },
209 	{ NVME_SC_ABORTED_SQ_DELETION, "ABORTED - SQ DELETION" },
210 	{ NVME_SC_ABORTED_FAILED_FUSED, "ABORTED - FAILED FUSED" },
211 	{ NVME_SC_ABORTED_MISSING_FUSED, "ABORTED - MISSING FUSED" },
212 	{ NVME_SC_INVALID_NAMESPACE_OR_FORMAT, "INVALID NAMESPACE OR FORMAT" },
213 	{ NVME_SC_COMMAND_SEQUENCE_ERROR, "COMMAND SEQUENCE ERROR" },
214 	{ NVME_SC_INVALID_SGL_SEGMENT_DESCR, "INVALID SGL SEGMENT DESCRIPTOR" },
215 	{ NVME_SC_INVALID_NUMBER_OF_SGL_DESCR, "INVALID NUMBER OF SGL DESCRIPTORS" },
216 	{ NVME_SC_DATA_SGL_LENGTH_INVALID, "DATA SGL LENGTH INVALID" },
217 	{ NVME_SC_METADATA_SGL_LENGTH_INVALID, "METADATA SGL LENGTH INVALID" },
218 	{ NVME_SC_SGL_DESCRIPTOR_TYPE_INVALID, "SGL DESCRIPTOR TYPE INVALID" },
219 	{ NVME_SC_INVALID_USE_OF_CMB, "INVALID USE OF CONTROLLER MEMORY BUFFER" },
220 	{ NVME_SC_PRP_OFFET_INVALID, "PRP OFFET INVALID" },
221 	{ NVME_SC_ATOMIC_WRITE_UNIT_EXCEEDED, "ATOMIC WRITE UNIT EXCEEDED" },
222 	{ NVME_SC_OPERATION_DENIED, "OPERATION DENIED" },
223 	{ NVME_SC_SGL_OFFSET_INVALID, "SGL OFFSET INVALID" },
224 	{ NVME_SC_HOST_ID_INCONSISTENT_FORMAT, "HOST IDENTIFIER INCONSISTENT FORMAT" },
225 	{ NVME_SC_KEEP_ALIVE_TIMEOUT_EXPIRED, "KEEP ALIVE TIMEOUT EXPIRED" },
226 	{ NVME_SC_KEEP_ALIVE_TIMEOUT_INVALID, "KEEP ALIVE TIMEOUT INVALID" },
227 	{ NVME_SC_ABORTED_DUE_TO_PREEMPT, "COMMAND ABORTED DUE TO PREEMPT AND ABORT" },
228 	{ NVME_SC_SANITIZE_FAILED, "SANITIZE FAILED" },
229 	{ NVME_SC_SANITIZE_IN_PROGRESS, "SANITIZE IN PROGRESS" },
230 	{ NVME_SC_SGL_DATA_BLOCK_GRAN_INVALID, "SGL_DATA_BLOCK_GRANULARITY_INVALID" },
231 	{ NVME_SC_NOT_SUPPORTED_IN_CMB, "COMMAND NOT SUPPORTED FOR QUEUE IN CMB" },
232 	{ NVME_SC_NAMESPACE_IS_WRITE_PROTECTED, "NAMESPACE IS WRITE PROTECTED" },
233 	{ NVME_SC_COMMAND_INTERRUPTED, "COMMAND INTERRUPTED" },
234 	{ NVME_SC_TRANSIENT_TRANSPORT_ERROR, "TRANSIENT TRANSPORT ERROR" },
235 
236 	{ NVME_SC_LBA_OUT_OF_RANGE, "LBA OUT OF RANGE" },
237 	{ NVME_SC_CAPACITY_EXCEEDED, "CAPACITY EXCEEDED" },
238 	{ NVME_SC_NAMESPACE_NOT_READY, "NAMESPACE NOT READY" },
239 	{ NVME_SC_RESERVATION_CONFLICT, "RESERVATION CONFLICT" },
240 	{ NVME_SC_FORMAT_IN_PROGRESS, "FORMAT IN PROGRESS" },
241 	{ 0xFFFF, "GENERIC" }
242 };
243 
244 static struct nvme_status_string command_specific_status[] = {
245 	{ NVME_SC_COMPLETION_QUEUE_INVALID, "INVALID COMPLETION QUEUE" },
246 	{ NVME_SC_INVALID_QUEUE_IDENTIFIER, "INVALID QUEUE IDENTIFIER" },
247 	{ NVME_SC_MAXIMUM_QUEUE_SIZE_EXCEEDED, "MAX QUEUE SIZE EXCEEDED" },
248 	{ NVME_SC_ABORT_COMMAND_LIMIT_EXCEEDED, "ABORT CMD LIMIT EXCEEDED" },
249 	{ NVME_SC_ASYNC_EVENT_REQUEST_LIMIT_EXCEEDED, "ASYNC LIMIT EXCEEDED" },
250 	{ NVME_SC_INVALID_FIRMWARE_SLOT, "INVALID FIRMWARE SLOT" },
251 	{ NVME_SC_INVALID_FIRMWARE_IMAGE, "INVALID FIRMWARE IMAGE" },
252 	{ NVME_SC_INVALID_INTERRUPT_VECTOR, "INVALID INTERRUPT VECTOR" },
253 	{ NVME_SC_INVALID_LOG_PAGE, "INVALID LOG PAGE" },
254 	{ NVME_SC_INVALID_FORMAT, "INVALID FORMAT" },
255 	{ NVME_SC_FIRMWARE_REQUIRES_RESET, "FIRMWARE REQUIRES RESET" },
256 	{ NVME_SC_INVALID_QUEUE_DELETION, "INVALID QUEUE DELETION" },
257 	{ NVME_SC_FEATURE_NOT_SAVEABLE, "FEATURE IDENTIFIER NOT SAVEABLE" },
258 	{ NVME_SC_FEATURE_NOT_CHANGEABLE, "FEATURE NOT CHANGEABLE" },
259 	{ NVME_SC_FEATURE_NOT_NS_SPECIFIC, "FEATURE NOT NAMESPACE SPECIFIC" },
260 	{ NVME_SC_FW_ACT_REQUIRES_NVMS_RESET, "FIRMWARE ACTIVATION REQUIRES NVM SUBSYSTEM RESET" },
261 	{ NVME_SC_FW_ACT_REQUIRES_RESET, "FIRMWARE ACTIVATION REQUIRES RESET" },
262 	{ NVME_SC_FW_ACT_REQUIRES_TIME, "FIRMWARE ACTIVATION REQUIRES MAXIMUM TIME VIOLATION" },
263 	{ NVME_SC_FW_ACT_PROHIBITED, "FIRMWARE ACTIVATION PROHIBITED" },
264 	{ NVME_SC_OVERLAPPING_RANGE, "OVERLAPPING RANGE" },
265 	{ NVME_SC_NS_INSUFFICIENT_CAPACITY, "NAMESPACE INSUFFICIENT CAPACITY" },
266 	{ NVME_SC_NS_ID_UNAVAILABLE, "NAMESPACE IDENTIFIER UNAVAILABLE" },
267 	{ NVME_SC_NS_ALREADY_ATTACHED, "NAMESPACE ALREADY ATTACHED" },
268 	{ NVME_SC_NS_IS_PRIVATE, "NAMESPACE IS PRIVATE" },
269 	{ NVME_SC_NS_NOT_ATTACHED, "NS NOT ATTACHED" },
270 	{ NVME_SC_THIN_PROV_NOT_SUPPORTED, "THIN PROVISIONING NOT SUPPORTED" },
271 	{ NVME_SC_CTRLR_LIST_INVALID, "CONTROLLER LIST INVALID" },
272 	{ NVME_SC_SELF_TEST_IN_PROGRESS, "DEVICE SELF-TEST IN PROGRESS" },
273 	{ NVME_SC_BOOT_PART_WRITE_PROHIB, "BOOT PARTITION WRITE PROHIBITED" },
274 	{ NVME_SC_INVALID_CTRLR_ID, "INVALID CONTROLLER IDENTIFIER" },
275 	{ NVME_SC_INVALID_SEC_CTRLR_STATE, "INVALID SECONDARY CONTROLLER STATE" },
276 	{ NVME_SC_INVALID_NUM_OF_CTRLR_RESRC, "INVALID NUMBER OF CONTROLLER RESOURCES" },
277 	{ NVME_SC_INVALID_RESOURCE_ID, "INVALID RESOURCE IDENTIFIER" },
278 	{ NVME_SC_SANITIZE_PROHIBITED_WPMRE, "SANITIZE PROHIBITED WRITE PERSISTENT MEMORY REGION ENABLED" },
279 	{ NVME_SC_ANA_GROUP_ID_INVALID, "ANA GROUP IDENTIFIED INVALID" },
280 	{ NVME_SC_ANA_ATTACH_FAILED, "ANA ATTACH FAILED" },
281 
282 	{ NVME_SC_CONFLICTING_ATTRIBUTES, "CONFLICTING ATTRIBUTES" },
283 	{ NVME_SC_INVALID_PROTECTION_INFO, "INVALID PROTECTION INFO" },
284 	{ NVME_SC_ATTEMPTED_WRITE_TO_RO_PAGE, "WRITE TO RO PAGE" },
285 	{ 0xFFFF, "COMMAND SPECIFIC" }
286 };
287 
288 static struct nvme_status_string media_error_status[] = {
289 	{ NVME_SC_WRITE_FAULTS, "WRITE FAULTS" },
290 	{ NVME_SC_UNRECOVERED_READ_ERROR, "UNRECOVERED READ ERROR" },
291 	{ NVME_SC_GUARD_CHECK_ERROR, "GUARD CHECK ERROR" },
292 	{ NVME_SC_APPLICATION_TAG_CHECK_ERROR, "APPLICATION TAG CHECK ERROR" },
293 	{ NVME_SC_REFERENCE_TAG_CHECK_ERROR, "REFERENCE TAG CHECK ERROR" },
294 	{ NVME_SC_COMPARE_FAILURE, "COMPARE FAILURE" },
295 	{ NVME_SC_ACCESS_DENIED, "ACCESS DENIED" },
296 	{ NVME_SC_DEALLOCATED_OR_UNWRITTEN, "DEALLOCATED OR UNWRITTEN LOGICAL BLOCK" },
297 	{ 0xFFFF, "MEDIA ERROR" }
298 };
299 
300 static struct nvme_status_string path_related_status[] = {
301 	{ NVME_SC_INTERNAL_PATH_ERROR, "INTERNAL PATH ERROR" },
302 	{ NVME_SC_ASYMMETRIC_ACCESS_PERSISTENT_LOSS, "ASYMMETRIC ACCESS PERSISTENT LOSS" },
303 	{ NVME_SC_ASYMMETRIC_ACCESS_INACCESSIBLE, "ASYMMETRIC ACCESS INACCESSIBLE" },
304 	{ NVME_SC_ASYMMETRIC_ACCESS_TRANSITION, "ASYMMETRIC ACCESS TRANSITION" },
305 	{ NVME_SC_CONTROLLER_PATHING_ERROR, "CONTROLLER PATHING ERROR" },
306 	{ NVME_SC_HOST_PATHING_ERROR, "HOST PATHING ERROR" },
307 	{ NVME_SC_COMMAND_ABOTHED_BY_HOST, "COMMAND ABOTHED BY HOST" },
308 	{ 0xFFFF, "PATH RELATED" },
309 };
310 
311 static const char *
312 get_status_string(uint16_t sct, uint16_t sc)
313 {
314 	struct nvme_status_string *entry;
315 
316 	switch (sct) {
317 	case NVME_SCT_GENERIC:
318 		entry = generic_status;
319 		break;
320 	case NVME_SCT_COMMAND_SPECIFIC:
321 		entry = command_specific_status;
322 		break;
323 	case NVME_SCT_MEDIA_ERROR:
324 		entry = media_error_status;
325 		break;
326 	case NVME_SCT_PATH_RELATED:
327 		entry = path_related_status;
328 		break;
329 	case NVME_SCT_VENDOR_SPECIFIC:
330 		return ("VENDOR SPECIFIC");
331 	default:
332 		return ("RESERVED");
333 	}
334 
335 	while (entry->sc != 0xFFFF) {
336 		if (entry->sc == sc)
337 			return (entry->str);
338 		entry++;
339 	}
340 	return (entry->str);
341 }
342 
343 static void
344 nvme_qpair_print_completion(struct nvme_qpair *qpair,
345     struct nvme_completion *cpl)
346 {
347 	uint16_t sct, sc;
348 
349 	sct = NVME_STATUS_GET_SCT(cpl->status);
350 	sc = NVME_STATUS_GET_SC(cpl->status);
351 
352 	nvme_printf(qpair->ctrlr, "%s (%02x/%02x) sqid:%d cid:%d cdw0:%x\n",
353 	    get_status_string(sct, sc), sct, sc, cpl->sqid, cpl->cid,
354 	    cpl->cdw0);
355 }
356 
357 static bool
358 nvme_completion_is_retry(const struct nvme_completion *cpl)
359 {
360 	uint8_t sct, sc, dnr;
361 
362 	sct = NVME_STATUS_GET_SCT(cpl->status);
363 	sc = NVME_STATUS_GET_SC(cpl->status);
364 	dnr = NVME_STATUS_GET_DNR(cpl->status);	/* Do Not Retry Bit */
365 
366 	/*
367 	 * TODO: spec is not clear how commands that are aborted due
368 	 *  to TLER will be marked.  So for now, it seems
369 	 *  NAMESPACE_NOT_READY is the only case where we should
370 	 *  look at the DNR bit. Requests failed with ABORTED_BY_REQUEST
371 	 *  set the DNR bit correctly since the driver controls that.
372 	 */
373 	switch (sct) {
374 	case NVME_SCT_GENERIC:
375 		switch (sc) {
376 		case NVME_SC_ABORTED_BY_REQUEST:
377 		case NVME_SC_NAMESPACE_NOT_READY:
378 			if (dnr)
379 				return (0);
380 			else
381 				return (1);
382 		case NVME_SC_INVALID_OPCODE:
383 		case NVME_SC_INVALID_FIELD:
384 		case NVME_SC_COMMAND_ID_CONFLICT:
385 		case NVME_SC_DATA_TRANSFER_ERROR:
386 		case NVME_SC_ABORTED_POWER_LOSS:
387 		case NVME_SC_INTERNAL_DEVICE_ERROR:
388 		case NVME_SC_ABORTED_SQ_DELETION:
389 		case NVME_SC_ABORTED_FAILED_FUSED:
390 		case NVME_SC_ABORTED_MISSING_FUSED:
391 		case NVME_SC_INVALID_NAMESPACE_OR_FORMAT:
392 		case NVME_SC_COMMAND_SEQUENCE_ERROR:
393 		case NVME_SC_LBA_OUT_OF_RANGE:
394 		case NVME_SC_CAPACITY_EXCEEDED:
395 		default:
396 			return (0);
397 		}
398 	case NVME_SCT_COMMAND_SPECIFIC:
399 	case NVME_SCT_MEDIA_ERROR:
400 		return (0);
401 	case NVME_SCT_PATH_RELATED:
402 		switch (sc) {
403 		case NVME_SC_INTERNAL_PATH_ERROR:
404 			if (dnr)
405 				return (0);
406 			else
407 				return (1);
408 		default:
409 			return (0);
410 		}
411 	case NVME_SCT_VENDOR_SPECIFIC:
412 	default:
413 		return (0);
414 	}
415 }
416 
417 static void
418 nvme_qpair_complete_tracker(struct nvme_tracker *tr,
419     struct nvme_completion *cpl, error_print_t print_on_error)
420 {
421 	struct nvme_qpair * qpair = tr->qpair;
422 	struct nvme_request	*req;
423 	bool			retry, error, retriable;
424 
425 	req = tr->req;
426 	error = nvme_completion_is_error(cpl);
427 	retriable = nvme_completion_is_retry(cpl);
428 	retry = error && retriable && req->retries < nvme_retry_count;
429 	if (retry)
430 		qpair->num_retries++;
431 	if (error && req->retries >= nvme_retry_count && retriable)
432 		qpair->num_failures++;
433 
434 	if (error && (print_on_error == ERROR_PRINT_ALL ||
435 		(!retry && print_on_error == ERROR_PRINT_NO_RETRY))) {
436 		nvme_qpair_print_command(qpair, &req->cmd);
437 		nvme_qpair_print_completion(qpair, cpl);
438 	}
439 
440 	qpair->act_tr[cpl->cid] = NULL;
441 
442 	KASSERT(cpl->cid == req->cmd.cid, ("cpl cid does not match cmd cid\n"));
443 
444 	if (!retry) {
445 		if (req->type != NVME_REQUEST_NULL) {
446 			bus_dmamap_sync(qpair->dma_tag_payload,
447 			    tr->payload_dma_map,
448 			    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
449 		}
450 		if (req->cb_fn)
451 			req->cb_fn(req->cb_arg, cpl);
452 	}
453 
454 	mtx_lock(&qpair->lock);
455 
456 	if (retry) {
457 		req->retries++;
458 		nvme_qpair_submit_tracker(qpair, tr);
459 	} else {
460 		if (req->type != NVME_REQUEST_NULL) {
461 			bus_dmamap_unload(qpair->dma_tag_payload,
462 			    tr->payload_dma_map);
463 		}
464 
465 		nvme_free_request(req);
466 		tr->req = NULL;
467 
468 		TAILQ_REMOVE(&qpair->outstanding_tr, tr, tailq);
469 		TAILQ_INSERT_HEAD(&qpair->free_tr, tr, tailq);
470 
471 		/*
472 		 * If the controller is in the middle of resetting, don't
473 		 *  try to submit queued requests here - let the reset logic
474 		 *  handle that instead.
475 		 */
476 		if (!STAILQ_EMPTY(&qpair->queued_req) &&
477 		    !qpair->ctrlr->is_resetting) {
478 			req = STAILQ_FIRST(&qpair->queued_req);
479 			STAILQ_REMOVE_HEAD(&qpair->queued_req, stailq);
480 			_nvme_qpair_submit_request(qpair, req);
481 		}
482 	}
483 
484 	mtx_unlock(&qpair->lock);
485 }
486 
487 static void
488 nvme_qpair_manual_complete_tracker(
489     struct nvme_tracker *tr, uint32_t sct, uint32_t sc, uint32_t dnr,
490     error_print_t print_on_error)
491 {
492 	struct nvme_completion	cpl;
493 
494 	memset(&cpl, 0, sizeof(cpl));
495 
496 	struct nvme_qpair * qpair = tr->qpair;
497 
498 	cpl.sqid = qpair->id;
499 	cpl.cid = tr->cid;
500 	cpl.status |= (sct & NVME_STATUS_SCT_MASK) << NVME_STATUS_SCT_SHIFT;
501 	cpl.status |= (sc & NVME_STATUS_SC_MASK) << NVME_STATUS_SC_SHIFT;
502 	cpl.status |= (dnr & NVME_STATUS_DNR_MASK) << NVME_STATUS_DNR_SHIFT;
503 	nvme_qpair_complete_tracker(tr, &cpl, print_on_error);
504 }
505 
506 void
507 nvme_qpair_manual_complete_request(struct nvme_qpair *qpair,
508     struct nvme_request *req, uint32_t sct, uint32_t sc)
509 {
510 	struct nvme_completion	cpl;
511 	bool			error;
512 
513 	memset(&cpl, 0, sizeof(cpl));
514 	cpl.sqid = qpair->id;
515 	cpl.status |= (sct & NVME_STATUS_SCT_MASK) << NVME_STATUS_SCT_SHIFT;
516 	cpl.status |= (sc & NVME_STATUS_SC_MASK) << NVME_STATUS_SC_SHIFT;
517 
518 	error = nvme_completion_is_error(&cpl);
519 
520 	if (error) {
521 		nvme_qpair_print_command(qpair, &req->cmd);
522 		nvme_qpair_print_completion(qpair, &cpl);
523 	}
524 
525 	if (req->cb_fn)
526 		req->cb_fn(req->cb_arg, &cpl);
527 
528 	nvme_free_request(req);
529 }
530 
531 bool
532 nvme_qpair_process_completions(struct nvme_qpair *qpair)
533 {
534 	struct nvme_tracker	*tr;
535 	struct nvme_completion	cpl;
536 	int done = 0;
537 	bool in_panic = dumping || SCHEDULER_STOPPED();
538 
539 	/*
540 	 * qpair is not enabled, likely because a controller reset is is in
541 	 * progress.  Ignore the interrupt - any I/O that was associated with
542 	 * this interrupt will get retried when the reset is complete. Any
543 	 * pending completions for when we're in startup will be completed
544 	 * as soon as initialization is complete and we start sending commands
545 	 * to the device.
546 	 */
547 	if (qpair->recovery_state != RECOVERY_NONE) {
548 		qpair->num_ignored++;
549 		return (false);
550 	}
551 
552 	/*
553 	 * Sanity check initialization. After we reset the hardware, the phase
554 	 * is defined to be 1. So if we get here with zero prior calls and the
555 	 * phase is 0, it means that we've lost a race between the
556 	 * initialization and the ISR running. With the phase wrong, we'll
557 	 * process a bunch of completions that aren't really completions leading
558 	 * to a KASSERT below.
559 	 */
560 	KASSERT(!(qpair->num_intr_handler_calls == 0 && qpair->phase == 0),
561 	    ("%s: Phase wrong for first interrupt call.",
562 		device_get_nameunit(qpair->ctrlr->dev)));
563 
564 	qpair->num_intr_handler_calls++;
565 
566 	bus_dmamap_sync(qpair->dma_tag, qpair->queuemem_map,
567 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
568 	/*
569 	 * A panic can stop the CPU this routine is running on at any point.  If
570 	 * we're called during a panic, complete the sq_head wrap protocol for
571 	 * the case where we are interrupted just after the increment at 1
572 	 * below, but before we can reset cq_head to zero at 2. Also cope with
573 	 * the case where we do the zero at 2, but may or may not have done the
574 	 * phase adjustment at step 3. The panic machinery flushes all pending
575 	 * memory writes, so we can make these strong ordering assumptions
576 	 * that would otherwise be unwise if we were racing in real time.
577 	 */
578 	if (__predict_false(in_panic)) {
579 		if (qpair->cq_head == qpair->num_entries) {
580 			/*
581 			 * Here we know that we need to zero cq_head and then negate
582 			 * the phase, which hasn't been assigned if cq_head isn't
583 			 * zero due to the atomic_store_rel.
584 			 */
585 			qpair->cq_head = 0;
586 			qpair->phase = !qpair->phase;
587 		} else if (qpair->cq_head == 0) {
588 			/*
589 			 * In this case, we know that the assignment at 2
590 			 * happened below, but we don't know if it 3 happened or
591 			 * not. To do this, we look at the last completion
592 			 * entry and set the phase to the opposite phase
593 			 * that it has. This gets us back in sync
594 			 */
595 			cpl = qpair->cpl[qpair->num_entries - 1];
596 			nvme_completion_swapbytes(&cpl);
597 			qpair->phase = !NVME_STATUS_GET_P(cpl.status);
598 		}
599 	}
600 
601 	while (1) {
602 		uint16_t status;
603 
604 		/*
605 		 * We need to do this dance to avoid a race between the host and
606 		 * the device where the device overtakes the host while the host
607 		 * is reading this record, leaving the status field 'new' and
608 		 * the sqhd and cid fields potentially stale. If the phase
609 		 * doesn't match, that means status hasn't yet been updated and
610 		 * we'll get any pending changes next time. It also means that
611 		 * the phase must be the same the second time. We have to sync
612 		 * before reading to ensure any bouncing completes.
613 		 */
614 		status = le16toh(qpair->cpl[qpair->cq_head].status);
615 		if (NVME_STATUS_GET_P(status) != qpair->phase)
616 			break;
617 
618 		bus_dmamap_sync(qpair->dma_tag, qpair->queuemem_map,
619 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
620 		cpl = qpair->cpl[qpair->cq_head];
621 		nvme_completion_swapbytes(&cpl);
622 
623 		KASSERT(
624 		    NVME_STATUS_GET_P(status) == NVME_STATUS_GET_P(cpl.status),
625 		    ("Phase unexpectedly inconsistent"));
626 
627 		tr = qpair->act_tr[cpl.cid];
628 
629 		if (tr != NULL) {
630 			nvme_qpair_complete_tracker(tr, &cpl, ERROR_PRINT_ALL);
631 			qpair->sq_head = cpl.sqhd;
632 			done++;
633 		} else if (!in_panic) {
634 			/*
635 			 * A missing tracker is normally an error.  However, a
636 			 * panic can stop the CPU this routine is running on
637 			 * after completing an I/O but before updating
638 			 * qpair->cq_head at 1 below.  Later, we re-enter this
639 			 * routine to poll I/O associated with the kernel
640 			 * dump. We find that the tr has been set to null before
641 			 * calling the completion routine.  If it hasn't
642 			 * completed (or it triggers a panic), then '1' below
643 			 * won't have updated cq_head. Rather than panic again,
644 			 * ignore this condition because it's not unexpected.
645 			 */
646 			nvme_printf(qpair->ctrlr,
647 			    "cpl does not map to outstanding cmd\n");
648 			/* nvme_dump_completion expects device endianess */
649 			nvme_dump_completion(&qpair->cpl[qpair->cq_head]);
650 			KASSERT(0, ("received completion for unknown cmd"));
651 		}
652 
653 		/*
654 		 * There's a number of races with the following (see above) when
655 		 * the system panics. We compensate for each one of them by
656 		 * using the atomic store to force strong ordering (at least when
657 		 * viewed in the aftermath of a panic).
658 		 */
659 		if (++qpair->cq_head == qpair->num_entries) {		/* 1 */
660 			atomic_store_rel_int(&qpair->cq_head, 0);	/* 2 */
661 			qpair->phase = !qpair->phase;			/* 3 */
662 		}
663 
664 		bus_space_write_4(qpair->ctrlr->bus_tag, qpair->ctrlr->bus_handle,
665 		    qpair->cq_hdbl_off, qpair->cq_head);
666 	}
667 	return (done != 0);
668 }
669 
670 static void
671 nvme_qpair_msi_handler(void *arg)
672 {
673 	struct nvme_qpair *qpair = arg;
674 
675 	nvme_qpair_process_completions(qpair);
676 }
677 
678 int
679 nvme_qpair_construct(struct nvme_qpair *qpair,
680     uint32_t num_entries, uint32_t num_trackers,
681     struct nvme_controller *ctrlr)
682 {
683 	struct nvme_tracker	*tr;
684 	size_t			cmdsz, cplsz, prpsz, allocsz, prpmemsz;
685 	uint64_t		queuemem_phys, prpmem_phys, list_phys;
686 	uint8_t			*queuemem, *prpmem, *prp_list;
687 	int			i, err;
688 
689 	qpair->vector = ctrlr->msi_count > 1 ? qpair->id : 0;
690 	qpair->num_entries = num_entries;
691 	qpair->num_trackers = num_trackers;
692 	qpair->ctrlr = ctrlr;
693 
694 	mtx_init(&qpair->lock, "nvme qpair lock", NULL, MTX_DEF);
695 
696 	/* Note: NVMe PRP format is restricted to 4-byte alignment. */
697 	err = bus_dma_tag_create(bus_get_dma_tag(ctrlr->dev),
698 	    4, PAGE_SIZE, BUS_SPACE_MAXADDR,
699 	    BUS_SPACE_MAXADDR, NULL, NULL, ctrlr->max_xfer_size,
700 	    btoc(ctrlr->max_xfer_size) + 1, PAGE_SIZE, 0,
701 	    NULL, NULL, &qpair->dma_tag_payload);
702 	if (err != 0) {
703 		nvme_printf(ctrlr, "payload tag create failed %d\n", err);
704 		goto out;
705 	}
706 
707 	/*
708 	 * Each component must be page aligned, and individual PRP lists
709 	 * cannot cross a page boundary.
710 	 */
711 	cmdsz = qpair->num_entries * sizeof(struct nvme_command);
712 	cmdsz = roundup2(cmdsz, PAGE_SIZE);
713 	cplsz = qpair->num_entries * sizeof(struct nvme_completion);
714 	cplsz = roundup2(cplsz, PAGE_SIZE);
715 	/*
716 	 * For commands requiring more than 2 PRP entries, one PRP will be
717 	 * embedded in the command (prp1), and the rest of the PRP entries
718 	 * will be in a list pointed to by the command (prp2).
719 	 */
720 	prpsz = sizeof(uint64_t) * btoc(ctrlr->max_xfer_size);
721 	prpmemsz = qpair->num_trackers * prpsz;
722 	allocsz = cmdsz + cplsz + prpmemsz;
723 
724 	err = bus_dma_tag_create(bus_get_dma_tag(ctrlr->dev),
725 	    PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
726 	    allocsz, 1, allocsz, 0, NULL, NULL, &qpair->dma_tag);
727 	if (err != 0) {
728 		nvme_printf(ctrlr, "tag create failed %d\n", err);
729 		goto out;
730 	}
731 	bus_dma_tag_set_domain(qpair->dma_tag, qpair->domain);
732 
733 	if (bus_dmamem_alloc(qpair->dma_tag, (void **)&queuemem,
734 	     BUS_DMA_COHERENT | BUS_DMA_NOWAIT, &qpair->queuemem_map)) {
735 		nvme_printf(ctrlr, "failed to alloc qpair memory\n");
736 		goto out;
737 	}
738 
739 	if (bus_dmamap_load(qpair->dma_tag, qpair->queuemem_map,
740 	    queuemem, allocsz, nvme_single_map, &queuemem_phys, 0) != 0) {
741 		nvme_printf(ctrlr, "failed to load qpair memory\n");
742 		bus_dmamem_free(qpair->dma_tag, qpair->cmd,
743 		    qpair->queuemem_map);
744 		goto out;
745 	}
746 
747 	qpair->num_cmds = 0;
748 	qpair->num_intr_handler_calls = 0;
749 	qpair->num_retries = 0;
750 	qpair->num_failures = 0;
751 	qpair->num_ignored = 0;
752 	qpair->cmd = (struct nvme_command *)queuemem;
753 	qpair->cpl = (struct nvme_completion *)(queuemem + cmdsz);
754 	prpmem = (uint8_t *)(queuemem + cmdsz + cplsz);
755 	qpair->cmd_bus_addr = queuemem_phys;
756 	qpair->cpl_bus_addr = queuemem_phys + cmdsz;
757 	prpmem_phys = queuemem_phys + cmdsz + cplsz;
758 
759 	callout_init(&qpair->timer, 1);
760 	qpair->timer_armed = false;
761 	qpair->recovery_state = RECOVERY_WAITING;
762 
763 	/*
764 	 * Calcuate the stride of the doorbell register. Many emulators set this
765 	 * value to correspond to a cache line. However, some hardware has set
766 	 * it to various small values.
767 	 */
768 	qpair->sq_tdbl_off = nvme_mmio_offsetof(doorbell[0]) +
769 	    (qpair->id << (ctrlr->dstrd + 1));
770 	qpair->cq_hdbl_off = nvme_mmio_offsetof(doorbell[0]) +
771 	    (qpair->id << (ctrlr->dstrd + 1)) + (1 << ctrlr->dstrd);
772 
773 	TAILQ_INIT(&qpair->free_tr);
774 	TAILQ_INIT(&qpair->outstanding_tr);
775 	STAILQ_INIT(&qpair->queued_req);
776 
777 	list_phys = prpmem_phys;
778 	prp_list = prpmem;
779 	for (i = 0; i < qpair->num_trackers; i++) {
780 		if (list_phys + prpsz > prpmem_phys + prpmemsz) {
781 			qpair->num_trackers = i;
782 			break;
783 		}
784 
785 		/*
786 		 * Make sure that the PRP list for this tracker doesn't
787 		 * overflow to another page.
788 		 */
789 		if (trunc_page(list_phys) !=
790 		    trunc_page(list_phys + prpsz - 1)) {
791 			list_phys = roundup2(list_phys, PAGE_SIZE);
792 			prp_list =
793 			    (uint8_t *)roundup2((uintptr_t)prp_list, PAGE_SIZE);
794 		}
795 
796 		tr = malloc_domainset(sizeof(*tr), M_NVME,
797 		    DOMAINSET_PREF(qpair->domain), M_ZERO | M_WAITOK);
798 		bus_dmamap_create(qpair->dma_tag_payload, 0,
799 		    &tr->payload_dma_map);
800 		tr->cid = i;
801 		tr->qpair = qpair;
802 		tr->prp = (uint64_t *)prp_list;
803 		tr->prp_bus_addr = list_phys;
804 		TAILQ_INSERT_HEAD(&qpair->free_tr, tr, tailq);
805 		list_phys += prpsz;
806 		prp_list += prpsz;
807 	}
808 
809 	if (qpair->num_trackers == 0) {
810 		nvme_printf(ctrlr, "failed to allocate enough trackers\n");
811 		goto out;
812 	}
813 
814 	qpair->act_tr = malloc_domainset(sizeof(struct nvme_tracker *) *
815 	    qpair->num_entries, M_NVME, DOMAINSET_PREF(qpair->domain),
816 	    M_ZERO | M_WAITOK);
817 
818 	if (ctrlr->msi_count > 1) {
819 		/*
820 		 * MSI-X vector resource IDs start at 1, so we add one to
821 		 *  the queue's vector to get the corresponding rid to use.
822 		 */
823 		qpair->rid = qpair->vector + 1;
824 
825 		qpair->res = bus_alloc_resource_any(ctrlr->dev, SYS_RES_IRQ,
826 		    &qpair->rid, RF_ACTIVE);
827 		if (qpair->res == NULL) {
828 			nvme_printf(ctrlr, "unable to allocate MSI\n");
829 			goto out;
830 		}
831 		if (bus_setup_intr(ctrlr->dev, qpair->res,
832 		    INTR_TYPE_MISC | INTR_MPSAFE, NULL,
833 		    nvme_qpair_msi_handler, qpair, &qpair->tag) != 0) {
834 			nvme_printf(ctrlr, "unable to setup MSI\n");
835 			goto out;
836 		}
837 		if (qpair->id == 0) {
838 			bus_describe_intr(ctrlr->dev, qpair->res, qpair->tag,
839 			    "admin");
840 		} else {
841 			bus_describe_intr(ctrlr->dev, qpair->res, qpair->tag,
842 			    "io%d", qpair->id - 1);
843 		}
844 	}
845 
846 	return (0);
847 
848 out:
849 	nvme_qpair_destroy(qpair);
850 	return (ENOMEM);
851 }
852 
853 static void
854 nvme_qpair_destroy(struct nvme_qpair *qpair)
855 {
856 	struct nvme_tracker	*tr;
857 
858 	callout_drain(&qpair->timer);
859 
860 	if (qpair->tag) {
861 		bus_teardown_intr(qpair->ctrlr->dev, qpair->res, qpair->tag);
862 		qpair->tag = NULL;
863 	}
864 
865 	if (qpair->act_tr) {
866 		free(qpair->act_tr, M_NVME);
867 		qpair->act_tr = NULL;
868 	}
869 
870 	while (!TAILQ_EMPTY(&qpair->free_tr)) {
871 		tr = TAILQ_FIRST(&qpair->free_tr);
872 		TAILQ_REMOVE(&qpair->free_tr, tr, tailq);
873 		bus_dmamap_destroy(qpair->dma_tag_payload,
874 		    tr->payload_dma_map);
875 		free(tr, M_NVME);
876 	}
877 
878 	if (qpair->cmd != NULL) {
879 		bus_dmamap_unload(qpair->dma_tag, qpair->queuemem_map);
880 		bus_dmamem_free(qpair->dma_tag, qpair->cmd,
881 		    qpair->queuemem_map);
882 		qpair->cmd = NULL;
883 	}
884 
885 	if (qpair->dma_tag) {
886 		bus_dma_tag_destroy(qpair->dma_tag);
887 		qpair->dma_tag = NULL;
888 	}
889 
890 	if (qpair->dma_tag_payload) {
891 		bus_dma_tag_destroy(qpair->dma_tag_payload);
892 		qpair->dma_tag_payload = NULL;
893 	}
894 
895 	if (mtx_initialized(&qpair->lock))
896 		mtx_destroy(&qpair->lock);
897 
898 	if (qpair->res) {
899 		bus_release_resource(qpair->ctrlr->dev, SYS_RES_IRQ,
900 		    rman_get_rid(qpair->res), qpair->res);
901 		qpair->res = NULL;
902 	}
903 }
904 
905 static void
906 nvme_admin_qpair_abort_aers(struct nvme_qpair *qpair)
907 {
908 	struct nvme_tracker	*tr;
909 
910 	tr = TAILQ_FIRST(&qpair->outstanding_tr);
911 	while (tr != NULL) {
912 		if (tr->req->cmd.opc == NVME_OPC_ASYNC_EVENT_REQUEST) {
913 			nvme_qpair_manual_complete_tracker(tr,
914 			    NVME_SCT_GENERIC, NVME_SC_ABORTED_SQ_DELETION, 0,
915 			    ERROR_PRINT_NONE);
916 			tr = TAILQ_FIRST(&qpair->outstanding_tr);
917 		} else {
918 			tr = TAILQ_NEXT(tr, tailq);
919 		}
920 	}
921 }
922 
923 void
924 nvme_admin_qpair_destroy(struct nvme_qpair *qpair)
925 {
926 
927 	nvme_admin_qpair_abort_aers(qpair);
928 	nvme_qpair_destroy(qpair);
929 }
930 
931 void
932 nvme_io_qpair_destroy(struct nvme_qpair *qpair)
933 {
934 
935 	nvme_qpair_destroy(qpair);
936 }
937 
938 static void
939 nvme_qpair_timeout(void *arg)
940 {
941 	struct nvme_qpair	*qpair = arg;
942 	struct nvme_controller	*ctrlr = qpair->ctrlr;
943 	struct nvme_tracker	*tr;
944 	struct nvme_tracker	*tr_temp;
945 	sbintime_t		now;
946 	bool			idle;
947 	uint32_t		csts;
948 	uint8_t			cfs;
949 
950 	mtx_lock(&qpair->lock);
951 	idle = TAILQ_EMPTY(&qpair->outstanding_tr);
952 again:
953 	switch (qpair->recovery_state) {
954 	case RECOVERY_NONE:
955 		if (idle)
956 			break;
957 		now = getsbinuptime();
958 		TAILQ_FOREACH_SAFE(tr, &qpair->outstanding_tr, tailq, tr_temp) {
959 			if (now > tr->deadline && tr->deadline != 0) {
960 				/*
961 				 * We're now passed our earliest deadline. We
962 				 * need to do expensive things to cope, but next
963 				 * time. Flag that and close the door to any
964 				 * further processing.
965 				 */
966 				qpair->recovery_state = RECOVERY_START;
967 				nvme_printf(ctrlr, "RECOVERY_START %jd vs %jd\n",
968 				    (uintmax_t)now, (uintmax_t)tr->deadline);
969 				break;
970 			}
971 		}
972 		break;
973 	case RECOVERY_START:
974 		/*
975 		 * Read csts to get value of cfs - controller fatal status.
976 		 * If no fatal status, try to call the completion routine, and
977 		 * if completes transactions, report a missed interrupt and
978 		 * return (this may need to be rate limited). Otherwise, if
979 		 * aborts are enabled and the controller is not reporting
980 		 * fatal status, abort the command. Otherwise, just reset the
981 		 * controller and hope for the best.
982 		 */
983 		csts = nvme_mmio_read_4(ctrlr, csts);
984 		cfs = (csts >> NVME_CSTS_REG_CFS_SHIFT) & NVME_CSTS_REG_CFS_MASK;
985 		if (cfs) {
986 			nvme_printf(ctrlr, "Controller in fatal status, resetting\n");
987 			qpair->recovery_state = RECOVERY_RESET;
988 			goto again;
989 		}
990 		mtx_unlock(&qpair->lock);
991 		if (nvme_qpair_process_completions(qpair)) {
992 			nvme_printf(ctrlr, "Completions present in output without an interrupt\n");
993 			qpair->recovery_state = RECOVERY_NONE;
994 		} else {
995 			nvme_printf(ctrlr, "timeout with nothing complete, resetting\n");
996 			qpair->recovery_state = RECOVERY_RESET;
997 			mtx_lock(&qpair->lock);
998 			goto again;
999 		}
1000 		mtx_lock(&qpair->lock);
1001 		break;
1002 	case RECOVERY_RESET:
1003 		/*
1004 		 * If we get here due to a possible surprise hot-unplug event,
1005 		 * then we let nvme_ctrlr_reset confirm and fail the
1006 		 * controller.
1007 		 */
1008 		nvme_printf(ctrlr, "Resetting controller due to a timeout%s.\n",
1009 		    cfs ? " and fatal error status" : "");
1010 		nvme_printf(ctrlr, "RECOVERY_WAITING\n");
1011 		qpair->recovery_state = RECOVERY_WAITING;
1012 		nvme_ctrlr_reset(ctrlr);
1013 		break;
1014 	case RECOVERY_WAITING:
1015 		nvme_printf(ctrlr, "waiting\n");
1016 		break;
1017 	}
1018 
1019 	/*
1020 	 * Rearm the timeout.
1021 	 */
1022 	if (!idle) {
1023 		callout_schedule(&qpair->timer, hz / 2);
1024 	} else {
1025 		qpair->timer_armed = false;
1026 	}
1027 	mtx_unlock(&qpair->lock);
1028 }
1029 
1030 /*
1031  * Submit the tracker to the hardware. Must already be in the
1032  * outstanding queue when called.
1033  */
1034 void
1035 nvme_qpair_submit_tracker(struct nvme_qpair *qpair, struct nvme_tracker *tr)
1036 {
1037 	struct nvme_request	*req;
1038 	struct nvme_controller	*ctrlr;
1039 	int timeout;
1040 
1041 	mtx_assert(&qpair->lock, MA_OWNED);
1042 
1043 	req = tr->req;
1044 	req->cmd.cid = tr->cid;
1045 	qpair->act_tr[tr->cid] = tr;
1046 	ctrlr = qpair->ctrlr;
1047 
1048 	if (req->timeout) {
1049 		if (req->cb_fn == nvme_completion_poll_cb)
1050 			timeout = 1;
1051 		else
1052 			timeout = ctrlr->timeout_period;
1053 		tr->deadline = getsbinuptime() + timeout * SBT_1S;
1054 		if (!qpair->timer_armed) {
1055 			qpair->timer_armed = true;
1056 			callout_reset_on(&qpair->timer, hz / 2,
1057 			    nvme_qpair_timeout, qpair, qpair->cpu);
1058 		}
1059 	} else
1060 		tr->deadline = SBT_MAX;
1061 
1062 	/* Copy the command from the tracker to the submission queue. */
1063 	memcpy(&qpair->cmd[qpair->sq_tail], &req->cmd, sizeof(req->cmd));
1064 
1065 	if (++qpair->sq_tail == qpair->num_entries)
1066 		qpair->sq_tail = 0;
1067 
1068 	bus_dmamap_sync(qpair->dma_tag, qpair->queuemem_map,
1069 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1070 	bus_space_write_4(qpair->ctrlr->bus_tag, qpair->ctrlr->bus_handle,
1071 	    qpair->sq_tdbl_off, qpair->sq_tail);
1072 	qpair->num_cmds++;
1073 }
1074 
1075 static void
1076 nvme_payload_map(void *arg, bus_dma_segment_t *seg, int nseg, int error)
1077 {
1078 	struct nvme_tracker 	*tr = arg;
1079 	uint32_t		cur_nseg;
1080 
1081 	/*
1082 	 * If the mapping operation failed, return immediately.  The caller
1083 	 *  is responsible for detecting the error status and failing the
1084 	 *  tracker manually.
1085 	 */
1086 	if (error != 0) {
1087 		nvme_printf(tr->qpair->ctrlr,
1088 		    "nvme_payload_map err %d\n", error);
1089 		return;
1090 	}
1091 
1092 	/*
1093 	 * Note that we specified PAGE_SIZE for alignment and max
1094 	 *  segment size when creating the bus dma tags.  So here
1095 	 *  we can safely just transfer each segment to its
1096 	 *  associated PRP entry.
1097 	 */
1098 	tr->req->cmd.prp1 = htole64(seg[0].ds_addr);
1099 
1100 	if (nseg == 2) {
1101 		tr->req->cmd.prp2 = htole64(seg[1].ds_addr);
1102 	} else if (nseg > 2) {
1103 		cur_nseg = 1;
1104 		tr->req->cmd.prp2 = htole64((uint64_t)tr->prp_bus_addr);
1105 		while (cur_nseg < nseg) {
1106 			tr->prp[cur_nseg-1] =
1107 			    htole64((uint64_t)seg[cur_nseg].ds_addr);
1108 			cur_nseg++;
1109 		}
1110 	} else {
1111 		/*
1112 		 * prp2 should not be used by the controller
1113 		 *  since there is only one segment, but set
1114 		 *  to 0 just to be safe.
1115 		 */
1116 		tr->req->cmd.prp2 = 0;
1117 	}
1118 
1119 	bus_dmamap_sync(tr->qpair->dma_tag_payload, tr->payload_dma_map,
1120 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1121 	nvme_qpair_submit_tracker(tr->qpair, tr);
1122 }
1123 
1124 static void
1125 _nvme_qpair_submit_request(struct nvme_qpair *qpair, struct nvme_request *req)
1126 {
1127 	struct nvme_tracker	*tr;
1128 	int			err = 0;
1129 
1130 	mtx_assert(&qpair->lock, MA_OWNED);
1131 
1132 	tr = TAILQ_FIRST(&qpair->free_tr);
1133 	req->qpair = qpair;
1134 
1135 	if (tr == NULL || qpair->recovery_state != RECOVERY_NONE) {
1136 		/*
1137 		 * No tracker is available, or the qpair is disabled due to
1138 		 *  an in-progress controller-level reset or controller
1139 		 *  failure.
1140 		 */
1141 
1142 		if (qpair->ctrlr->is_failed) {
1143 			/*
1144 			 * The controller has failed, so fail the request.
1145 			 */
1146 			nvme_qpair_manual_complete_request(qpair, req,
1147 			    NVME_SCT_GENERIC, NVME_SC_ABORTED_BY_REQUEST);
1148 		} else {
1149 			/*
1150 			 * Put the request on the qpair's request queue to be
1151 			 *  processed when a tracker frees up via a command
1152 			 *  completion or when the controller reset is
1153 			 *  completed.
1154 			 */
1155 			STAILQ_INSERT_TAIL(&qpair->queued_req, req, stailq);
1156 		}
1157 		return;
1158 	}
1159 
1160 	TAILQ_REMOVE(&qpair->free_tr, tr, tailq);
1161 	TAILQ_INSERT_TAIL(&qpair->outstanding_tr, tr, tailq);
1162 	if (!qpair->timer_armed)
1163 		tr->deadline = SBT_MAX;
1164 	tr->req = req;
1165 
1166 	switch (req->type) {
1167 	case NVME_REQUEST_VADDR:
1168 		KASSERT(req->payload_size <= qpair->ctrlr->max_xfer_size,
1169 		    ("payload_size (%d) exceeds max_xfer_size (%d)\n",
1170 		    req->payload_size, qpair->ctrlr->max_xfer_size));
1171 		err = bus_dmamap_load(tr->qpair->dma_tag_payload,
1172 		    tr->payload_dma_map, req->u.payload, req->payload_size,
1173 		    nvme_payload_map, tr, 0);
1174 		if (err != 0)
1175 			nvme_printf(qpair->ctrlr,
1176 			    "bus_dmamap_load returned 0x%x!\n", err);
1177 		break;
1178 	case NVME_REQUEST_NULL:
1179 		nvme_qpair_submit_tracker(tr->qpair, tr);
1180 		break;
1181 	case NVME_REQUEST_BIO:
1182 		KASSERT(req->u.bio->bio_bcount <= qpair->ctrlr->max_xfer_size,
1183 		    ("bio->bio_bcount (%jd) exceeds max_xfer_size (%d)\n",
1184 		    (intmax_t)req->u.bio->bio_bcount,
1185 		    qpair->ctrlr->max_xfer_size));
1186 		err = bus_dmamap_load_bio(tr->qpair->dma_tag_payload,
1187 		    tr->payload_dma_map, req->u.bio, nvme_payload_map, tr, 0);
1188 		if (err != 0)
1189 			nvme_printf(qpair->ctrlr,
1190 			    "bus_dmamap_load_bio returned 0x%x!\n", err);
1191 		break;
1192 	case NVME_REQUEST_CCB:
1193 		err = bus_dmamap_load_ccb(tr->qpair->dma_tag_payload,
1194 		    tr->payload_dma_map, req->u.payload,
1195 		    nvme_payload_map, tr, 0);
1196 		if (err != 0)
1197 			nvme_printf(qpair->ctrlr,
1198 			    "bus_dmamap_load_ccb returned 0x%x!\n", err);
1199 		break;
1200 	default:
1201 		panic("unknown nvme request type 0x%x\n", req->type);
1202 		break;
1203 	}
1204 
1205 	if (err != 0) {
1206 		/*
1207 		 * The dmamap operation failed, so we manually fail the
1208 		 *  tracker here with DATA_TRANSFER_ERROR status.
1209 		 *
1210 		 * nvme_qpair_manual_complete_tracker must not be called
1211 		 *  with the qpair lock held.
1212 		 */
1213 		mtx_unlock(&qpair->lock);
1214 		nvme_qpair_manual_complete_tracker(tr, NVME_SCT_GENERIC,
1215 		    NVME_SC_DATA_TRANSFER_ERROR, DO_NOT_RETRY, ERROR_PRINT_ALL);
1216 		mtx_lock(&qpair->lock);
1217 	}
1218 }
1219 
1220 void
1221 nvme_qpair_submit_request(struct nvme_qpair *qpair, struct nvme_request *req)
1222 {
1223 
1224 	mtx_lock(&qpair->lock);
1225 	_nvme_qpair_submit_request(qpair, req);
1226 	mtx_unlock(&qpair->lock);
1227 }
1228 
1229 static void
1230 nvme_qpair_enable(struct nvme_qpair *qpair)
1231 {
1232 	mtx_assert(&qpair->lock, MA_OWNED);
1233 
1234 	qpair->recovery_state = RECOVERY_NONE;
1235 }
1236 
1237 void
1238 nvme_qpair_reset(struct nvme_qpair *qpair)
1239 {
1240 
1241 	qpair->sq_head = qpair->sq_tail = qpair->cq_head = 0;
1242 
1243 	/*
1244 	 * First time through the completion queue, HW will set phase
1245 	 *  bit on completions to 1.  So set this to 1 here, indicating
1246 	 *  we're looking for a 1 to know which entries have completed.
1247 	 *  we'll toggle the bit each time when the completion queue
1248 	 *  rolls over.
1249 	 */
1250 	qpair->phase = 1;
1251 
1252 	memset(qpair->cmd, 0,
1253 	    qpair->num_entries * sizeof(struct nvme_command));
1254 	memset(qpair->cpl, 0,
1255 	    qpair->num_entries * sizeof(struct nvme_completion));
1256 }
1257 
1258 void
1259 nvme_admin_qpair_enable(struct nvme_qpair *qpair)
1260 {
1261 	struct nvme_tracker		*tr;
1262 	struct nvme_tracker		*tr_temp;
1263 
1264 	/*
1265 	 * Manually abort each outstanding admin command.  Do not retry
1266 	 *  admin commands found here, since they will be left over from
1267 	 *  a controller reset and its likely the context in which the
1268 	 *  command was issued no longer applies.
1269 	 */
1270 	TAILQ_FOREACH_SAFE(tr, &qpair->outstanding_tr, tailq, tr_temp) {
1271 		nvme_printf(qpair->ctrlr,
1272 		    "aborting outstanding admin command\n");
1273 		nvme_qpair_manual_complete_tracker(tr, NVME_SCT_GENERIC,
1274 		    NVME_SC_ABORTED_BY_REQUEST, DO_NOT_RETRY, ERROR_PRINT_ALL);
1275 	}
1276 
1277 	mtx_lock(&qpair->lock);
1278 	nvme_qpair_enable(qpair);
1279 	mtx_unlock(&qpair->lock);
1280 }
1281 
1282 void
1283 nvme_io_qpair_enable(struct nvme_qpair *qpair)
1284 {
1285 	STAILQ_HEAD(, nvme_request)	temp;
1286 	struct nvme_tracker		*tr;
1287 	struct nvme_tracker		*tr_temp;
1288 	struct nvme_request		*req;
1289 
1290 	/*
1291 	 * Manually abort each outstanding I/O.  This normally results in a
1292 	 *  retry, unless the retry count on the associated request has
1293 	 *  reached its limit.
1294 	 */
1295 	TAILQ_FOREACH_SAFE(tr, &qpair->outstanding_tr, tailq, tr_temp) {
1296 		nvme_printf(qpair->ctrlr, "aborting outstanding i/o\n");
1297 		nvme_qpair_manual_complete_tracker(tr, NVME_SCT_GENERIC,
1298 		    NVME_SC_ABORTED_BY_REQUEST, 0, ERROR_PRINT_NO_RETRY);
1299 	}
1300 
1301 	mtx_lock(&qpair->lock);
1302 
1303 	nvme_qpair_enable(qpair);
1304 
1305 	STAILQ_INIT(&temp);
1306 	STAILQ_SWAP(&qpair->queued_req, &temp, nvme_request);
1307 
1308 	while (!STAILQ_EMPTY(&temp)) {
1309 		req = STAILQ_FIRST(&temp);
1310 		STAILQ_REMOVE_HEAD(&temp, stailq);
1311 		nvme_printf(qpair->ctrlr, "resubmitting queued i/o\n");
1312 		nvme_qpair_print_command(qpair, &req->cmd);
1313 		_nvme_qpair_submit_request(qpair, req);
1314 	}
1315 
1316 	mtx_unlock(&qpair->lock);
1317 }
1318 
1319 static void
1320 nvme_qpair_disable(struct nvme_qpair *qpair)
1321 {
1322 	struct nvme_tracker	*tr, *tr_temp;
1323 
1324 	mtx_lock(&qpair->lock);
1325 	qpair->recovery_state = RECOVERY_WAITING;
1326 	TAILQ_FOREACH_SAFE(tr, &qpair->outstanding_tr, tailq, tr_temp) {
1327 		tr->deadline = SBT_MAX;
1328 	}
1329 	mtx_unlock(&qpair->lock);
1330 }
1331 
1332 void
1333 nvme_admin_qpair_disable(struct nvme_qpair *qpair)
1334 {
1335 
1336 	nvme_qpair_disable(qpair);
1337 	nvme_admin_qpair_abort_aers(qpair);
1338 }
1339 
1340 void
1341 nvme_io_qpair_disable(struct nvme_qpair *qpair)
1342 {
1343 
1344 	nvme_qpair_disable(qpair);
1345 }
1346 
1347 void
1348 nvme_qpair_fail(struct nvme_qpair *qpair)
1349 {
1350 	struct nvme_tracker		*tr;
1351 	struct nvme_request		*req;
1352 
1353 	if (!mtx_initialized(&qpair->lock))
1354 		return;
1355 
1356 	mtx_lock(&qpair->lock);
1357 
1358 	while (!STAILQ_EMPTY(&qpair->queued_req)) {
1359 		req = STAILQ_FIRST(&qpair->queued_req);
1360 		STAILQ_REMOVE_HEAD(&qpair->queued_req, stailq);
1361 		nvme_printf(qpair->ctrlr, "failing queued i/o\n");
1362 		mtx_unlock(&qpair->lock);
1363 		nvme_qpair_manual_complete_request(qpair, req, NVME_SCT_GENERIC,
1364 		    NVME_SC_ABORTED_BY_REQUEST);
1365 		mtx_lock(&qpair->lock);
1366 	}
1367 
1368 	/* Manually abort each outstanding I/O. */
1369 	while (!TAILQ_EMPTY(&qpair->outstanding_tr)) {
1370 		tr = TAILQ_FIRST(&qpair->outstanding_tr);
1371 		/*
1372 		 * Do not remove the tracker.  The abort_tracker path will
1373 		 *  do that for us.
1374 		 */
1375 		nvme_printf(qpair->ctrlr, "failing outstanding i/o\n");
1376 		mtx_unlock(&qpair->lock);
1377 		nvme_qpair_manual_complete_tracker(tr, NVME_SCT_GENERIC,
1378 		    NVME_SC_ABORTED_BY_REQUEST, DO_NOT_RETRY, ERROR_PRINT_ALL);
1379 		mtx_lock(&qpair->lock);
1380 	}
1381 
1382 	mtx_unlock(&qpair->lock);
1383 }
1384