xref: /freebsd/sys/dev/nvme/nvme_qpair.c (revision d4959bfcd110ea471222c7dd87775ba1f4e3d1d9)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (C) 2012-2014 Intel Corporation
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 #include <sys/cdefs.h>
30 #include <sys/param.h>
31 #include <sys/bus.h>
32 #include <sys/conf.h>
33 #include <sys/domainset.h>
34 #include <sys/proc.h>
35 
36 #include <dev/pci/pcivar.h>
37 
38 #include "nvme_private.h"
39 
40 typedef enum error_print { ERROR_PRINT_NONE, ERROR_PRINT_NO_RETRY, ERROR_PRINT_ALL } error_print_t;
41 #define DO_NOT_RETRY	1
42 
43 static void	_nvme_qpair_submit_request(struct nvme_qpair *qpair,
44 					   struct nvme_request *req);
45 static void	nvme_qpair_destroy(struct nvme_qpair *qpair);
46 
47 #define DEFAULT_INDEX	256
48 #define DEFAULT_ENTRY(x)	[DEFAULT_INDEX] = x
49 #define OPC_ENTRY(x)		[NVME_OPC_ ## x] = #x
50 
51 static const char *admin_opcode[DEFAULT_INDEX + 1] = {
52 	OPC_ENTRY(DELETE_IO_SQ),
53 	OPC_ENTRY(CREATE_IO_SQ),
54 	OPC_ENTRY(GET_LOG_PAGE),
55 	OPC_ENTRY(DELETE_IO_CQ),
56 	OPC_ENTRY(CREATE_IO_CQ),
57 	OPC_ENTRY(IDENTIFY),
58 	OPC_ENTRY(ABORT),
59 	OPC_ENTRY(SET_FEATURES),
60 	OPC_ENTRY(GET_FEATURES),
61 	OPC_ENTRY(ASYNC_EVENT_REQUEST),
62 	OPC_ENTRY(NAMESPACE_MANAGEMENT),
63 	OPC_ENTRY(FIRMWARE_ACTIVATE),
64 	OPC_ENTRY(FIRMWARE_IMAGE_DOWNLOAD),
65 	OPC_ENTRY(DEVICE_SELF_TEST),
66 	OPC_ENTRY(NAMESPACE_ATTACHMENT),
67 	OPC_ENTRY(KEEP_ALIVE),
68 	OPC_ENTRY(DIRECTIVE_SEND),
69 	OPC_ENTRY(DIRECTIVE_RECEIVE),
70 	OPC_ENTRY(VIRTUALIZATION_MANAGEMENT),
71 	OPC_ENTRY(NVME_MI_SEND),
72 	OPC_ENTRY(NVME_MI_RECEIVE),
73 	OPC_ENTRY(CAPACITY_MANAGEMENT),
74 	OPC_ENTRY(LOCKDOWN),
75 	OPC_ENTRY(DOORBELL_BUFFER_CONFIG),
76 	OPC_ENTRY(FABRICS_COMMANDS),
77 	OPC_ENTRY(FORMAT_NVM),
78 	OPC_ENTRY(SECURITY_SEND),
79 	OPC_ENTRY(SECURITY_RECEIVE),
80 	OPC_ENTRY(SANITIZE),
81 	OPC_ENTRY(GET_LBA_STATUS),
82 	DEFAULT_ENTRY("ADMIN COMMAND"),
83 };
84 
85 static const char *io_opcode[DEFAULT_INDEX + 1] = {
86 	OPC_ENTRY(FLUSH),
87 	OPC_ENTRY(WRITE),
88 	OPC_ENTRY(READ),
89 	OPC_ENTRY(WRITE_UNCORRECTABLE),
90 	OPC_ENTRY(COMPARE),
91 	OPC_ENTRY(WRITE_ZEROES),
92 	OPC_ENTRY(DATASET_MANAGEMENT),
93 	OPC_ENTRY(VERIFY),
94 	OPC_ENTRY(RESERVATION_REGISTER),
95 	OPC_ENTRY(RESERVATION_REPORT),
96 	OPC_ENTRY(RESERVATION_ACQUIRE),
97 	OPC_ENTRY(RESERVATION_RELEASE),
98 	OPC_ENTRY(COPY),
99 	DEFAULT_ENTRY("IO COMMAND"),
100 };
101 
102 static const char *
103 get_opcode_string(const char *op[DEFAULT_INDEX + 1], uint16_t opc)
104 {
105 	const char *nm = opc < DEFAULT_INDEX ? op[opc] : op[DEFAULT_INDEX];
106 
107 	return (nm != NULL ? nm : op[DEFAULT_INDEX]);
108 }
109 
110 static const char *
111 get_admin_opcode_string(uint16_t opc)
112 {
113 	return (get_opcode_string(admin_opcode, opc));
114 }
115 
116 static const char *
117 get_io_opcode_string(uint16_t opc)
118 {
119 	return (get_opcode_string(io_opcode, opc));
120 }
121 
122 static void
123 nvme_admin_qpair_print_command(struct nvme_qpair *qpair,
124     struct nvme_command *cmd)
125 {
126 
127 	nvme_printf(qpair->ctrlr, "%s (%02x) sqid:%d cid:%d nsid:%x "
128 	    "cdw10:%08x cdw11:%08x\n",
129 	    get_admin_opcode_string(cmd->opc), cmd->opc, qpair->id, cmd->cid,
130 	    le32toh(cmd->nsid), le32toh(cmd->cdw10), le32toh(cmd->cdw11));
131 }
132 
133 static void
134 nvme_io_qpair_print_command(struct nvme_qpair *qpair,
135     struct nvme_command *cmd)
136 {
137 
138 	switch (cmd->opc) {
139 	case NVME_OPC_WRITE:
140 	case NVME_OPC_READ:
141 	case NVME_OPC_WRITE_UNCORRECTABLE:
142 	case NVME_OPC_COMPARE:
143 	case NVME_OPC_WRITE_ZEROES:
144 	case NVME_OPC_VERIFY:
145 		nvme_printf(qpair->ctrlr, "%s sqid:%d cid:%d nsid:%d "
146 		    "lba:%llu len:%d\n",
147 		    get_io_opcode_string(cmd->opc), qpair->id, cmd->cid, le32toh(cmd->nsid),
148 		    ((unsigned long long)le32toh(cmd->cdw11) << 32) + le32toh(cmd->cdw10),
149 		    (le32toh(cmd->cdw12) & 0xFFFF) + 1);
150 		break;
151 	case NVME_OPC_FLUSH:
152 	case NVME_OPC_DATASET_MANAGEMENT:
153 	case NVME_OPC_RESERVATION_REGISTER:
154 	case NVME_OPC_RESERVATION_REPORT:
155 	case NVME_OPC_RESERVATION_ACQUIRE:
156 	case NVME_OPC_RESERVATION_RELEASE:
157 		nvme_printf(qpair->ctrlr, "%s sqid:%d cid:%d nsid:%d\n",
158 		    get_io_opcode_string(cmd->opc), qpair->id, cmd->cid, le32toh(cmd->nsid));
159 		break;
160 	default:
161 		nvme_printf(qpair->ctrlr, "%s (%02x) sqid:%d cid:%d nsid:%d\n",
162 		    get_io_opcode_string(cmd->opc), cmd->opc, qpair->id,
163 		    cmd->cid, le32toh(cmd->nsid));
164 		break;
165 	}
166 }
167 
168 void
169 nvme_qpair_print_command(struct nvme_qpair *qpair, struct nvme_command *cmd)
170 {
171 	if (qpair->id == 0)
172 		nvme_admin_qpair_print_command(qpair, cmd);
173 	else
174 		nvme_io_qpair_print_command(qpair, cmd);
175 	if (nvme_verbose_cmd_dump) {
176 		nvme_printf(qpair->ctrlr,
177 		    "nsid:%#x rsvd2:%#x rsvd3:%#x mptr:%#jx prp1:%#jx prp2:%#jx\n",
178 		    cmd->nsid, cmd->rsvd2, cmd->rsvd3, (uintmax_t)cmd->mptr,
179 		    (uintmax_t)cmd->prp1, (uintmax_t)cmd->prp2);
180 		nvme_printf(qpair->ctrlr,
181 		    "cdw10: %#x cdw11:%#x cdw12:%#x cdw13:%#x cdw14:%#x cdw15:%#x\n",
182 		    cmd->cdw10, cmd->cdw11, cmd->cdw12, cmd->cdw13, cmd->cdw14,
183 		    cmd->cdw15);
184 	}
185 }
186 
187 struct nvme_status_string {
188 	uint16_t	sc;
189 	const char *	str;
190 };
191 
192 static struct nvme_status_string generic_status[] = {
193 	{ NVME_SC_SUCCESS, "SUCCESS" },
194 	{ NVME_SC_INVALID_OPCODE, "INVALID OPCODE" },
195 	{ NVME_SC_INVALID_FIELD, "INVALID_FIELD" },
196 	{ NVME_SC_COMMAND_ID_CONFLICT, "COMMAND ID CONFLICT" },
197 	{ NVME_SC_DATA_TRANSFER_ERROR, "DATA TRANSFER ERROR" },
198 	{ NVME_SC_ABORTED_POWER_LOSS, "ABORTED - POWER LOSS" },
199 	{ NVME_SC_INTERNAL_DEVICE_ERROR, "INTERNAL DEVICE ERROR" },
200 	{ NVME_SC_ABORTED_BY_REQUEST, "ABORTED - BY REQUEST" },
201 	{ NVME_SC_ABORTED_SQ_DELETION, "ABORTED - SQ DELETION" },
202 	{ NVME_SC_ABORTED_FAILED_FUSED, "ABORTED - FAILED FUSED" },
203 	{ NVME_SC_ABORTED_MISSING_FUSED, "ABORTED - MISSING FUSED" },
204 	{ NVME_SC_INVALID_NAMESPACE_OR_FORMAT, "INVALID NAMESPACE OR FORMAT" },
205 	{ NVME_SC_COMMAND_SEQUENCE_ERROR, "COMMAND SEQUENCE ERROR" },
206 	{ NVME_SC_INVALID_SGL_SEGMENT_DESCR, "INVALID SGL SEGMENT DESCRIPTOR" },
207 	{ NVME_SC_INVALID_NUMBER_OF_SGL_DESCR, "INVALID NUMBER OF SGL DESCRIPTORS" },
208 	{ NVME_SC_DATA_SGL_LENGTH_INVALID, "DATA SGL LENGTH INVALID" },
209 	{ NVME_SC_METADATA_SGL_LENGTH_INVALID, "METADATA SGL LENGTH INVALID" },
210 	{ NVME_SC_SGL_DESCRIPTOR_TYPE_INVALID, "SGL DESCRIPTOR TYPE INVALID" },
211 	{ NVME_SC_INVALID_USE_OF_CMB, "INVALID USE OF CONTROLLER MEMORY BUFFER" },
212 	{ NVME_SC_PRP_OFFET_INVALID, "PRP OFFET INVALID" },
213 	{ NVME_SC_ATOMIC_WRITE_UNIT_EXCEEDED, "ATOMIC WRITE UNIT EXCEEDED" },
214 	{ NVME_SC_OPERATION_DENIED, "OPERATION DENIED" },
215 	{ NVME_SC_SGL_OFFSET_INVALID, "SGL OFFSET INVALID" },
216 	{ NVME_SC_HOST_ID_INCONSISTENT_FORMAT, "HOST IDENTIFIER INCONSISTENT FORMAT" },
217 	{ NVME_SC_KEEP_ALIVE_TIMEOUT_EXPIRED, "KEEP ALIVE TIMEOUT EXPIRED" },
218 	{ NVME_SC_KEEP_ALIVE_TIMEOUT_INVALID, "KEEP ALIVE TIMEOUT INVALID" },
219 	{ NVME_SC_ABORTED_DUE_TO_PREEMPT, "COMMAND ABORTED DUE TO PREEMPT AND ABORT" },
220 	{ NVME_SC_SANITIZE_FAILED, "SANITIZE FAILED" },
221 	{ NVME_SC_SANITIZE_IN_PROGRESS, "SANITIZE IN PROGRESS" },
222 	{ NVME_SC_SGL_DATA_BLOCK_GRAN_INVALID, "SGL_DATA_BLOCK_GRANULARITY_INVALID" },
223 	{ NVME_SC_NOT_SUPPORTED_IN_CMB, "COMMAND NOT SUPPORTED FOR QUEUE IN CMB" },
224 	{ NVME_SC_NAMESPACE_IS_WRITE_PROTECTED, "NAMESPACE IS WRITE PROTECTED" },
225 	{ NVME_SC_COMMAND_INTERRUPTED, "COMMAND INTERRUPTED" },
226 	{ NVME_SC_TRANSIENT_TRANSPORT_ERROR, "TRANSIENT TRANSPORT ERROR" },
227 
228 	{ NVME_SC_LBA_OUT_OF_RANGE, "LBA OUT OF RANGE" },
229 	{ NVME_SC_CAPACITY_EXCEEDED, "CAPACITY EXCEEDED" },
230 	{ NVME_SC_NAMESPACE_NOT_READY, "NAMESPACE NOT READY" },
231 	{ NVME_SC_RESERVATION_CONFLICT, "RESERVATION CONFLICT" },
232 	{ NVME_SC_FORMAT_IN_PROGRESS, "FORMAT IN PROGRESS" },
233 	{ 0xFFFF, "GENERIC" }
234 };
235 
236 static struct nvme_status_string command_specific_status[] = {
237 	{ NVME_SC_COMPLETION_QUEUE_INVALID, "INVALID COMPLETION QUEUE" },
238 	{ NVME_SC_INVALID_QUEUE_IDENTIFIER, "INVALID QUEUE IDENTIFIER" },
239 	{ NVME_SC_MAXIMUM_QUEUE_SIZE_EXCEEDED, "MAX QUEUE SIZE EXCEEDED" },
240 	{ NVME_SC_ABORT_COMMAND_LIMIT_EXCEEDED, "ABORT CMD LIMIT EXCEEDED" },
241 	{ NVME_SC_ASYNC_EVENT_REQUEST_LIMIT_EXCEEDED, "ASYNC LIMIT EXCEEDED" },
242 	{ NVME_SC_INVALID_FIRMWARE_SLOT, "INVALID FIRMWARE SLOT" },
243 	{ NVME_SC_INVALID_FIRMWARE_IMAGE, "INVALID FIRMWARE IMAGE" },
244 	{ NVME_SC_INVALID_INTERRUPT_VECTOR, "INVALID INTERRUPT VECTOR" },
245 	{ NVME_SC_INVALID_LOG_PAGE, "INVALID LOG PAGE" },
246 	{ NVME_SC_INVALID_FORMAT, "INVALID FORMAT" },
247 	{ NVME_SC_FIRMWARE_REQUIRES_RESET, "FIRMWARE REQUIRES RESET" },
248 	{ NVME_SC_INVALID_QUEUE_DELETION, "INVALID QUEUE DELETION" },
249 	{ NVME_SC_FEATURE_NOT_SAVEABLE, "FEATURE IDENTIFIER NOT SAVEABLE" },
250 	{ NVME_SC_FEATURE_NOT_CHANGEABLE, "FEATURE NOT CHANGEABLE" },
251 	{ NVME_SC_FEATURE_NOT_NS_SPECIFIC, "FEATURE NOT NAMESPACE SPECIFIC" },
252 	{ NVME_SC_FW_ACT_REQUIRES_NVMS_RESET, "FIRMWARE ACTIVATION REQUIRES NVM SUBSYSTEM RESET" },
253 	{ NVME_SC_FW_ACT_REQUIRES_RESET, "FIRMWARE ACTIVATION REQUIRES RESET" },
254 	{ NVME_SC_FW_ACT_REQUIRES_TIME, "FIRMWARE ACTIVATION REQUIRES MAXIMUM TIME VIOLATION" },
255 	{ NVME_SC_FW_ACT_PROHIBITED, "FIRMWARE ACTIVATION PROHIBITED" },
256 	{ NVME_SC_OVERLAPPING_RANGE, "OVERLAPPING RANGE" },
257 	{ NVME_SC_NS_INSUFFICIENT_CAPACITY, "NAMESPACE INSUFFICIENT CAPACITY" },
258 	{ NVME_SC_NS_ID_UNAVAILABLE, "NAMESPACE IDENTIFIER UNAVAILABLE" },
259 	{ NVME_SC_NS_ALREADY_ATTACHED, "NAMESPACE ALREADY ATTACHED" },
260 	{ NVME_SC_NS_IS_PRIVATE, "NAMESPACE IS PRIVATE" },
261 	{ NVME_SC_NS_NOT_ATTACHED, "NS NOT ATTACHED" },
262 	{ NVME_SC_THIN_PROV_NOT_SUPPORTED, "THIN PROVISIONING NOT SUPPORTED" },
263 	{ NVME_SC_CTRLR_LIST_INVALID, "CONTROLLER LIST INVALID" },
264 	{ NVME_SC_SELF_TEST_IN_PROGRESS, "DEVICE SELF-TEST IN PROGRESS" },
265 	{ NVME_SC_BOOT_PART_WRITE_PROHIB, "BOOT PARTITION WRITE PROHIBITED" },
266 	{ NVME_SC_INVALID_CTRLR_ID, "INVALID CONTROLLER IDENTIFIER" },
267 	{ NVME_SC_INVALID_SEC_CTRLR_STATE, "INVALID SECONDARY CONTROLLER STATE" },
268 	{ NVME_SC_INVALID_NUM_OF_CTRLR_RESRC, "INVALID NUMBER OF CONTROLLER RESOURCES" },
269 	{ NVME_SC_INVALID_RESOURCE_ID, "INVALID RESOURCE IDENTIFIER" },
270 	{ NVME_SC_SANITIZE_PROHIBITED_WPMRE, "SANITIZE PROHIBITED WRITE PERSISTENT MEMORY REGION ENABLED" },
271 	{ NVME_SC_ANA_GROUP_ID_INVALID, "ANA GROUP IDENTIFIED INVALID" },
272 	{ NVME_SC_ANA_ATTACH_FAILED, "ANA ATTACH FAILED" },
273 
274 	{ NVME_SC_CONFLICTING_ATTRIBUTES, "CONFLICTING ATTRIBUTES" },
275 	{ NVME_SC_INVALID_PROTECTION_INFO, "INVALID PROTECTION INFO" },
276 	{ NVME_SC_ATTEMPTED_WRITE_TO_RO_PAGE, "WRITE TO RO PAGE" },
277 	{ 0xFFFF, "COMMAND SPECIFIC" }
278 };
279 
280 static struct nvme_status_string media_error_status[] = {
281 	{ NVME_SC_WRITE_FAULTS, "WRITE FAULTS" },
282 	{ NVME_SC_UNRECOVERED_READ_ERROR, "UNRECOVERED READ ERROR" },
283 	{ NVME_SC_GUARD_CHECK_ERROR, "GUARD CHECK ERROR" },
284 	{ NVME_SC_APPLICATION_TAG_CHECK_ERROR, "APPLICATION TAG CHECK ERROR" },
285 	{ NVME_SC_REFERENCE_TAG_CHECK_ERROR, "REFERENCE TAG CHECK ERROR" },
286 	{ NVME_SC_COMPARE_FAILURE, "COMPARE FAILURE" },
287 	{ NVME_SC_ACCESS_DENIED, "ACCESS DENIED" },
288 	{ NVME_SC_DEALLOCATED_OR_UNWRITTEN, "DEALLOCATED OR UNWRITTEN LOGICAL BLOCK" },
289 	{ 0xFFFF, "MEDIA ERROR" }
290 };
291 
292 static struct nvme_status_string path_related_status[] = {
293 	{ NVME_SC_INTERNAL_PATH_ERROR, "INTERNAL PATH ERROR" },
294 	{ NVME_SC_ASYMMETRIC_ACCESS_PERSISTENT_LOSS, "ASYMMETRIC ACCESS PERSISTENT LOSS" },
295 	{ NVME_SC_ASYMMETRIC_ACCESS_INACCESSIBLE, "ASYMMETRIC ACCESS INACCESSIBLE" },
296 	{ NVME_SC_ASYMMETRIC_ACCESS_TRANSITION, "ASYMMETRIC ACCESS TRANSITION" },
297 	{ NVME_SC_CONTROLLER_PATHING_ERROR, "CONTROLLER PATHING ERROR" },
298 	{ NVME_SC_HOST_PATHING_ERROR, "HOST PATHING ERROR" },
299 	{ NVME_SC_COMMAND_ABORTED_BY_HOST, "COMMAND ABORTED BY HOST" },
300 	{ 0xFFFF, "PATH RELATED" },
301 };
302 
303 static const char *
304 get_status_string(uint16_t sct, uint16_t sc)
305 {
306 	struct nvme_status_string *entry;
307 
308 	switch (sct) {
309 	case NVME_SCT_GENERIC:
310 		entry = generic_status;
311 		break;
312 	case NVME_SCT_COMMAND_SPECIFIC:
313 		entry = command_specific_status;
314 		break;
315 	case NVME_SCT_MEDIA_ERROR:
316 		entry = media_error_status;
317 		break;
318 	case NVME_SCT_PATH_RELATED:
319 		entry = path_related_status;
320 		break;
321 	case NVME_SCT_VENDOR_SPECIFIC:
322 		return ("VENDOR SPECIFIC");
323 	default:
324 		return ("RESERVED");
325 	}
326 
327 	while (entry->sc != 0xFFFF) {
328 		if (entry->sc == sc)
329 			return (entry->str);
330 		entry++;
331 	}
332 	return (entry->str);
333 }
334 
335 void
336 nvme_qpair_print_completion(struct nvme_qpair *qpair,
337     struct nvme_completion *cpl)
338 {
339 	uint8_t sct, sc, crd, m, dnr, p;
340 
341 	sct = NVME_STATUS_GET_SCT(cpl->status);
342 	sc = NVME_STATUS_GET_SC(cpl->status);
343 	crd = NVME_STATUS_GET_CRD(cpl->status);
344 	m = NVME_STATUS_GET_M(cpl->status);
345 	dnr = NVME_STATUS_GET_DNR(cpl->status);
346 	p = NVME_STATUS_GET_P(cpl->status);
347 
348 	nvme_printf(qpair->ctrlr, "%s (%02x/%02x) crd:%x m:%x dnr:%x p:%d "
349 	    "sqid:%d cid:%d cdw0:%x\n",
350 	    get_status_string(sct, sc), sct, sc, crd, m, dnr, p,
351 	    cpl->sqid, cpl->cid, cpl->cdw0);
352 }
353 
354 static bool
355 nvme_completion_is_retry(const struct nvme_completion *cpl)
356 {
357 	uint8_t sct, sc, dnr;
358 
359 	sct = NVME_STATUS_GET_SCT(cpl->status);
360 	sc = NVME_STATUS_GET_SC(cpl->status);
361 	dnr = NVME_STATUS_GET_DNR(cpl->status);	/* Do Not Retry Bit */
362 
363 	/*
364 	 * TODO: spec is not clear how commands that are aborted due
365 	 *  to TLER will be marked.  So for now, it seems
366 	 *  NAMESPACE_NOT_READY is the only case where we should
367 	 *  look at the DNR bit. Requests failed with ABORTED_BY_REQUEST
368 	 *  set the DNR bit correctly since the driver controls that.
369 	 */
370 	switch (sct) {
371 	case NVME_SCT_GENERIC:
372 		switch (sc) {
373 		case NVME_SC_ABORTED_BY_REQUEST:
374 		case NVME_SC_NAMESPACE_NOT_READY:
375 			if (dnr)
376 				return (0);
377 			else
378 				return (1);
379 		case NVME_SC_INVALID_OPCODE:
380 		case NVME_SC_INVALID_FIELD:
381 		case NVME_SC_COMMAND_ID_CONFLICT:
382 		case NVME_SC_DATA_TRANSFER_ERROR:
383 		case NVME_SC_ABORTED_POWER_LOSS:
384 		case NVME_SC_INTERNAL_DEVICE_ERROR:
385 		case NVME_SC_ABORTED_SQ_DELETION:
386 		case NVME_SC_ABORTED_FAILED_FUSED:
387 		case NVME_SC_ABORTED_MISSING_FUSED:
388 		case NVME_SC_INVALID_NAMESPACE_OR_FORMAT:
389 		case NVME_SC_COMMAND_SEQUENCE_ERROR:
390 		case NVME_SC_LBA_OUT_OF_RANGE:
391 		case NVME_SC_CAPACITY_EXCEEDED:
392 		default:
393 			return (0);
394 		}
395 	case NVME_SCT_COMMAND_SPECIFIC:
396 	case NVME_SCT_MEDIA_ERROR:
397 		return (0);
398 	case NVME_SCT_PATH_RELATED:
399 		switch (sc) {
400 		case NVME_SC_INTERNAL_PATH_ERROR:
401 			if (dnr)
402 				return (0);
403 			else
404 				return (1);
405 		default:
406 			return (0);
407 		}
408 	case NVME_SCT_VENDOR_SPECIFIC:
409 	default:
410 		return (0);
411 	}
412 }
413 
414 static void
415 nvme_qpair_complete_tracker(struct nvme_tracker *tr,
416     struct nvme_completion *cpl, error_print_t print_on_error)
417 {
418 	struct nvme_qpair * qpair = tr->qpair;
419 	struct nvme_request	*req;
420 	bool			retry, error, retriable;
421 
422 	req = tr->req;
423 	error = nvme_completion_is_error(cpl);
424 	retriable = nvme_completion_is_retry(cpl);
425 	retry = error && retriable && req->retries < nvme_retry_count;
426 	if (retry)
427 		qpair->num_retries++;
428 	if (error && req->retries >= nvme_retry_count && retriable)
429 		qpair->num_failures++;
430 
431 	if (error && (print_on_error == ERROR_PRINT_ALL ||
432 		(!retry && print_on_error == ERROR_PRINT_NO_RETRY))) {
433 		nvme_qpair_print_command(qpair, &req->cmd);
434 		nvme_qpair_print_completion(qpair, cpl);
435 	}
436 
437 	qpair->act_tr[cpl->cid] = NULL;
438 
439 	KASSERT(cpl->cid == req->cmd.cid, ("cpl cid does not match cmd cid\n"));
440 
441 	if (!retry) {
442 		if (req->payload_valid) {
443 			bus_dmamap_sync(qpair->dma_tag_payload,
444 			    tr->payload_dma_map,
445 			    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
446 		}
447 		if (req->cb_fn)
448 			req->cb_fn(req->cb_arg, cpl);
449 	}
450 
451 	mtx_lock(&qpair->lock);
452 
453 	if (retry) {
454 		req->retries++;
455 		nvme_qpair_submit_tracker(qpair, tr);
456 	} else {
457 		if (req->payload_valid) {
458 			bus_dmamap_unload(qpair->dma_tag_payload,
459 			    tr->payload_dma_map);
460 		}
461 
462 		nvme_free_request(req);
463 		tr->req = NULL;
464 
465 		TAILQ_REMOVE(&qpair->outstanding_tr, tr, tailq);
466 		TAILQ_INSERT_HEAD(&qpair->free_tr, tr, tailq);
467 
468 		/*
469 		 * If the controller is in the middle of resetting, don't
470 		 *  try to submit queued requests here - let the reset logic
471 		 *  handle that instead.
472 		 */
473 		if (!STAILQ_EMPTY(&qpair->queued_req) &&
474 		    !qpair->ctrlr->is_resetting) {
475 			req = STAILQ_FIRST(&qpair->queued_req);
476 			STAILQ_REMOVE_HEAD(&qpair->queued_req, stailq);
477 			_nvme_qpair_submit_request(qpair, req);
478 		}
479 	}
480 
481 	mtx_unlock(&qpair->lock);
482 }
483 
484 static void
485 nvme_qpair_manual_complete_tracker(
486     struct nvme_tracker *tr, uint32_t sct, uint32_t sc, uint32_t dnr,
487     error_print_t print_on_error)
488 {
489 	struct nvme_completion	cpl;
490 
491 	memset(&cpl, 0, sizeof(cpl));
492 
493 	struct nvme_qpair * qpair = tr->qpair;
494 
495 	cpl.sqid = qpair->id;
496 	cpl.cid = tr->cid;
497 	cpl.status |= (sct & NVME_STATUS_SCT_MASK) << NVME_STATUS_SCT_SHIFT;
498 	cpl.status |= (sc & NVME_STATUS_SC_MASK) << NVME_STATUS_SC_SHIFT;
499 	cpl.status |= (dnr & NVME_STATUS_DNR_MASK) << NVME_STATUS_DNR_SHIFT;
500 	/* M=0 : this is artificial so no data in error log page */
501 	/* CRD=0 : this is artificial and no delayed retry support anyway */
502 	/* P=0 : phase not checked */
503 	nvme_qpair_complete_tracker(tr, &cpl, print_on_error);
504 }
505 
506 void
507 nvme_qpair_manual_complete_request(struct nvme_qpair *qpair,
508     struct nvme_request *req, uint32_t sct, uint32_t sc)
509 {
510 	struct nvme_completion	cpl;
511 	bool			error;
512 
513 	memset(&cpl, 0, sizeof(cpl));
514 	cpl.sqid = qpair->id;
515 	cpl.status |= (sct & NVME_STATUS_SCT_MASK) << NVME_STATUS_SCT_SHIFT;
516 	cpl.status |= (sc & NVME_STATUS_SC_MASK) << NVME_STATUS_SC_SHIFT;
517 
518 	error = nvme_completion_is_error(&cpl);
519 
520 	if (error) {
521 		nvme_qpair_print_command(qpair, &req->cmd);
522 		nvme_qpair_print_completion(qpair, &cpl);
523 	}
524 
525 	if (req->cb_fn)
526 		req->cb_fn(req->cb_arg, &cpl);
527 
528 	nvme_free_request(req);
529 }
530 
531 bool
532 nvme_qpair_process_completions(struct nvme_qpair *qpair)
533 {
534 	struct nvme_tracker	*tr;
535 	struct nvme_completion	cpl;
536 	int done = 0;
537 	bool in_panic = dumping || SCHEDULER_STOPPED();
538 
539 	/*
540 	 * qpair is not enabled, likely because a controller reset is in
541 	 * progress.  Ignore the interrupt - any I/O that was associated with
542 	 * this interrupt will get retried when the reset is complete. Any
543 	 * pending completions for when we're in startup will be completed
544 	 * as soon as initialization is complete and we start sending commands
545 	 * to the device.
546 	 */
547 	if (qpair->recovery_state != RECOVERY_NONE) {
548 		qpair->num_ignored++;
549 		return (false);
550 	}
551 
552 	/*
553 	 * Sanity check initialization. After we reset the hardware, the phase
554 	 * is defined to be 1. So if we get here with zero prior calls and the
555 	 * phase is 0, it means that we've lost a race between the
556 	 * initialization and the ISR running. With the phase wrong, we'll
557 	 * process a bunch of completions that aren't really completions leading
558 	 * to a KASSERT below.
559 	 */
560 	KASSERT(!(qpair->num_intr_handler_calls == 0 && qpair->phase == 0),
561 	    ("%s: Phase wrong for first interrupt call.",
562 		device_get_nameunit(qpair->ctrlr->dev)));
563 
564 	qpair->num_intr_handler_calls++;
565 
566 	bus_dmamap_sync(qpair->dma_tag, qpair->queuemem_map,
567 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
568 	/*
569 	 * A panic can stop the CPU this routine is running on at any point.  If
570 	 * we're called during a panic, complete the sq_head wrap protocol for
571 	 * the case where we are interrupted just after the increment at 1
572 	 * below, but before we can reset cq_head to zero at 2. Also cope with
573 	 * the case where we do the zero at 2, but may or may not have done the
574 	 * phase adjustment at step 3. The panic machinery flushes all pending
575 	 * memory writes, so we can make these strong ordering assumptions
576 	 * that would otherwise be unwise if we were racing in real time.
577 	 */
578 	if (__predict_false(in_panic)) {
579 		if (qpair->cq_head == qpair->num_entries) {
580 			/*
581 			 * Here we know that we need to zero cq_head and then negate
582 			 * the phase, which hasn't been assigned if cq_head isn't
583 			 * zero due to the atomic_store_rel.
584 			 */
585 			qpair->cq_head = 0;
586 			qpair->phase = !qpair->phase;
587 		} else if (qpair->cq_head == 0) {
588 			/*
589 			 * In this case, we know that the assignment at 2
590 			 * happened below, but we don't know if it 3 happened or
591 			 * not. To do this, we look at the last completion
592 			 * entry and set the phase to the opposite phase
593 			 * that it has. This gets us back in sync
594 			 */
595 			cpl = qpair->cpl[qpair->num_entries - 1];
596 			nvme_completion_swapbytes(&cpl);
597 			qpair->phase = !NVME_STATUS_GET_P(cpl.status);
598 		}
599 	}
600 
601 	while (1) {
602 		uint16_t status;
603 
604 		/*
605 		 * We need to do this dance to avoid a race between the host and
606 		 * the device where the device overtakes the host while the host
607 		 * is reading this record, leaving the status field 'new' and
608 		 * the sqhd and cid fields potentially stale. If the phase
609 		 * doesn't match, that means status hasn't yet been updated and
610 		 * we'll get any pending changes next time. It also means that
611 		 * the phase must be the same the second time. We have to sync
612 		 * before reading to ensure any bouncing completes.
613 		 */
614 		status = le16toh(qpair->cpl[qpair->cq_head].status);
615 		if (NVME_STATUS_GET_P(status) != qpair->phase)
616 			break;
617 
618 		bus_dmamap_sync(qpair->dma_tag, qpair->queuemem_map,
619 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
620 		cpl = qpair->cpl[qpair->cq_head];
621 		nvme_completion_swapbytes(&cpl);
622 
623 		KASSERT(
624 		    NVME_STATUS_GET_P(status) == NVME_STATUS_GET_P(cpl.status),
625 		    ("Phase unexpectedly inconsistent"));
626 
627 		if (cpl.cid < qpair->num_trackers)
628 			tr = qpair->act_tr[cpl.cid];
629 		else
630 			tr = NULL;
631 
632 		done++;
633 		if (tr != NULL) {
634 			nvme_qpair_complete_tracker(tr, &cpl, ERROR_PRINT_ALL);
635 			qpair->sq_head = cpl.sqhd;
636 		} else if (!in_panic) {
637 			/*
638 			 * A missing tracker is normally an error.  However, a
639 			 * panic can stop the CPU this routine is running on
640 			 * after completing an I/O but before updating
641 			 * qpair->cq_head at 1 below.  Later, we re-enter this
642 			 * routine to poll I/O associated with the kernel
643 			 * dump. We find that the tr has been set to null before
644 			 * calling the completion routine.  If it hasn't
645 			 * completed (or it triggers a panic), then '1' below
646 			 * won't have updated cq_head. Rather than panic again,
647 			 * ignore this condition because it's not unexpected.
648 			 */
649 			nvme_printf(qpair->ctrlr,
650 			    "cpl (cid = %u) does not map to outstanding cmd\n",
651 				cpl.cid);
652 			nvme_qpair_print_completion(qpair,
653 			    &qpair->cpl[qpair->cq_head]);
654 			KASSERT(0, ("received completion for unknown cmd"));
655 		}
656 
657 		/*
658 		 * There's a number of races with the following (see above) when
659 		 * the system panics. We compensate for each one of them by
660 		 * using the atomic store to force strong ordering (at least when
661 		 * viewed in the aftermath of a panic).
662 		 */
663 		if (++qpair->cq_head == qpair->num_entries) {		/* 1 */
664 			atomic_store_rel_int(&qpair->cq_head, 0);	/* 2 */
665 			qpair->phase = !qpair->phase;			/* 3 */
666 		}
667 	}
668 
669 	if (done != 0) {
670 		bus_space_write_4(qpair->ctrlr->bus_tag, qpair->ctrlr->bus_handle,
671 		    qpair->cq_hdbl_off, qpair->cq_head);
672 	}
673 
674 	return (done != 0);
675 }
676 
677 static void
678 nvme_qpair_msi_handler(void *arg)
679 {
680 	struct nvme_qpair *qpair = arg;
681 
682 	nvme_qpair_process_completions(qpair);
683 }
684 
685 int
686 nvme_qpair_construct(struct nvme_qpair *qpair,
687     uint32_t num_entries, uint32_t num_trackers,
688     struct nvme_controller *ctrlr)
689 {
690 	struct nvme_tracker	*tr;
691 	size_t			cmdsz, cplsz, prpsz, allocsz, prpmemsz;
692 	uint64_t		queuemem_phys, prpmem_phys, list_phys;
693 	uint8_t			*queuemem, *prpmem, *prp_list;
694 	int			i, err;
695 
696 	qpair->vector = ctrlr->msi_count > 1 ? qpair->id : 0;
697 	qpair->num_entries = num_entries;
698 	qpair->num_trackers = num_trackers;
699 	qpair->ctrlr = ctrlr;
700 
701 	mtx_init(&qpair->lock, "nvme qpair lock", NULL, MTX_DEF);
702 
703 	/* Note: NVMe PRP format is restricted to 4-byte alignment. */
704 	err = bus_dma_tag_create(bus_get_dma_tag(ctrlr->dev),
705 	    4, ctrlr->page_size, BUS_SPACE_MAXADDR,
706 	    BUS_SPACE_MAXADDR, NULL, NULL, ctrlr->max_xfer_size,
707 	    howmany(ctrlr->max_xfer_size, ctrlr->page_size) + 1,
708 	    ctrlr->page_size, 0,
709 	    NULL, NULL, &qpair->dma_tag_payload);
710 	if (err != 0) {
711 		nvme_printf(ctrlr, "payload tag create failed %d\n", err);
712 		goto out;
713 	}
714 
715 	/*
716 	 * Each component must be page aligned, and individual PRP lists
717 	 * cannot cross a page boundary.
718 	 */
719 	cmdsz = qpair->num_entries * sizeof(struct nvme_command);
720 	cmdsz = roundup2(cmdsz, ctrlr->page_size);
721 	cplsz = qpair->num_entries * sizeof(struct nvme_completion);
722 	cplsz = roundup2(cplsz, ctrlr->page_size);
723 	/*
724 	 * For commands requiring more than 2 PRP entries, one PRP will be
725 	 * embedded in the command (prp1), and the rest of the PRP entries
726 	 * will be in a list pointed to by the command (prp2).
727 	 */
728 	prpsz = sizeof(uint64_t) *
729 	    howmany(ctrlr->max_xfer_size, ctrlr->page_size);
730 	prpmemsz = qpair->num_trackers * prpsz;
731 	allocsz = cmdsz + cplsz + prpmemsz;
732 
733 	err = bus_dma_tag_create(bus_get_dma_tag(ctrlr->dev),
734 	    ctrlr->page_size, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
735 	    allocsz, 1, allocsz, 0, NULL, NULL, &qpair->dma_tag);
736 	if (err != 0) {
737 		nvme_printf(ctrlr, "tag create failed %d\n", err);
738 		goto out;
739 	}
740 	bus_dma_tag_set_domain(qpair->dma_tag, qpair->domain);
741 
742 	if (bus_dmamem_alloc(qpair->dma_tag, (void **)&queuemem,
743 	     BUS_DMA_COHERENT | BUS_DMA_NOWAIT, &qpair->queuemem_map)) {
744 		nvme_printf(ctrlr, "failed to alloc qpair memory\n");
745 		goto out;
746 	}
747 
748 	if (bus_dmamap_load(qpair->dma_tag, qpair->queuemem_map,
749 	    queuemem, allocsz, nvme_single_map, &queuemem_phys, 0) != 0) {
750 		nvme_printf(ctrlr, "failed to load qpair memory\n");
751 		bus_dmamem_free(qpair->dma_tag, qpair->cmd,
752 		    qpair->queuemem_map);
753 		goto out;
754 	}
755 
756 	qpair->num_cmds = 0;
757 	qpair->num_intr_handler_calls = 0;
758 	qpair->num_retries = 0;
759 	qpair->num_failures = 0;
760 	qpair->num_ignored = 0;
761 	qpair->cmd = (struct nvme_command *)queuemem;
762 	qpair->cpl = (struct nvme_completion *)(queuemem + cmdsz);
763 	prpmem = (uint8_t *)(queuemem + cmdsz + cplsz);
764 	qpair->cmd_bus_addr = queuemem_phys;
765 	qpair->cpl_bus_addr = queuemem_phys + cmdsz;
766 	prpmem_phys = queuemem_phys + cmdsz + cplsz;
767 
768 	callout_init(&qpair->timer, 1);
769 	qpair->timer_armed = false;
770 	qpair->recovery_state = RECOVERY_WAITING;
771 
772 	/*
773 	 * Calcuate the stride of the doorbell register. Many emulators set this
774 	 * value to correspond to a cache line. However, some hardware has set
775 	 * it to various small values.
776 	 */
777 	qpair->sq_tdbl_off = nvme_mmio_offsetof(doorbell[0]) +
778 	    (qpair->id << (ctrlr->dstrd + 1));
779 	qpair->cq_hdbl_off = nvme_mmio_offsetof(doorbell[0]) +
780 	    (qpair->id << (ctrlr->dstrd + 1)) + (1 << ctrlr->dstrd);
781 
782 	TAILQ_INIT(&qpair->free_tr);
783 	TAILQ_INIT(&qpair->outstanding_tr);
784 	STAILQ_INIT(&qpair->queued_req);
785 
786 	list_phys = prpmem_phys;
787 	prp_list = prpmem;
788 	for (i = 0; i < qpair->num_trackers; i++) {
789 		if (list_phys + prpsz > prpmem_phys + prpmemsz) {
790 			qpair->num_trackers = i;
791 			break;
792 		}
793 
794 		/*
795 		 * Make sure that the PRP list for this tracker doesn't
796 		 * overflow to another nvme page.
797 		 */
798 		if (trunc_page(list_phys) !=
799 		    trunc_page(list_phys + prpsz - 1)) {
800 			list_phys = roundup2(list_phys, ctrlr->page_size);
801 			prp_list =
802 			    (uint8_t *)roundup2((uintptr_t)prp_list, ctrlr->page_size);
803 		}
804 
805 		tr = malloc_domainset(sizeof(*tr), M_NVME,
806 		    DOMAINSET_PREF(qpair->domain), M_ZERO | M_WAITOK);
807 		bus_dmamap_create(qpair->dma_tag_payload, 0,
808 		    &tr->payload_dma_map);
809 		tr->cid = i;
810 		tr->qpair = qpair;
811 		tr->prp = (uint64_t *)prp_list;
812 		tr->prp_bus_addr = list_phys;
813 		TAILQ_INSERT_HEAD(&qpair->free_tr, tr, tailq);
814 		list_phys += prpsz;
815 		prp_list += prpsz;
816 	}
817 
818 	if (qpair->num_trackers == 0) {
819 		nvme_printf(ctrlr, "failed to allocate enough trackers\n");
820 		goto out;
821 	}
822 
823 	qpair->act_tr = malloc_domainset(sizeof(struct nvme_tracker *) *
824 	    qpair->num_entries, M_NVME, DOMAINSET_PREF(qpair->domain),
825 	    M_ZERO | M_WAITOK);
826 
827 	if (ctrlr->msi_count > 1) {
828 		/*
829 		 * MSI-X vector resource IDs start at 1, so we add one to
830 		 *  the queue's vector to get the corresponding rid to use.
831 		 */
832 		qpair->rid = qpair->vector + 1;
833 
834 		qpair->res = bus_alloc_resource_any(ctrlr->dev, SYS_RES_IRQ,
835 		    &qpair->rid, RF_ACTIVE);
836 		if (qpair->res == NULL) {
837 			nvme_printf(ctrlr, "unable to allocate MSI\n");
838 			goto out;
839 		}
840 		if (bus_setup_intr(ctrlr->dev, qpair->res,
841 		    INTR_TYPE_MISC | INTR_MPSAFE, NULL,
842 		    nvme_qpair_msi_handler, qpair, &qpair->tag) != 0) {
843 			nvme_printf(ctrlr, "unable to setup MSI\n");
844 			goto out;
845 		}
846 		if (qpair->id == 0) {
847 			bus_describe_intr(ctrlr->dev, qpair->res, qpair->tag,
848 			    "admin");
849 		} else {
850 			bus_describe_intr(ctrlr->dev, qpair->res, qpair->tag,
851 			    "io%d", qpair->id - 1);
852 		}
853 	}
854 
855 	return (0);
856 
857 out:
858 	nvme_qpair_destroy(qpair);
859 	return (ENOMEM);
860 }
861 
862 static void
863 nvme_qpair_destroy(struct nvme_qpair *qpair)
864 {
865 	struct nvme_tracker	*tr;
866 
867 	callout_drain(&qpair->timer);
868 
869 	if (qpair->tag) {
870 		bus_teardown_intr(qpair->ctrlr->dev, qpair->res, qpair->tag);
871 		qpair->tag = NULL;
872 	}
873 
874 	if (qpair->act_tr) {
875 		free(qpair->act_tr, M_NVME);
876 		qpair->act_tr = NULL;
877 	}
878 
879 	while (!TAILQ_EMPTY(&qpair->free_tr)) {
880 		tr = TAILQ_FIRST(&qpair->free_tr);
881 		TAILQ_REMOVE(&qpair->free_tr, tr, tailq);
882 		bus_dmamap_destroy(qpair->dma_tag_payload,
883 		    tr->payload_dma_map);
884 		free(tr, M_NVME);
885 	}
886 
887 	if (qpair->cmd != NULL) {
888 		bus_dmamap_unload(qpair->dma_tag, qpair->queuemem_map);
889 		bus_dmamem_free(qpair->dma_tag, qpair->cmd,
890 		    qpair->queuemem_map);
891 		qpair->cmd = NULL;
892 	}
893 
894 	if (qpair->dma_tag) {
895 		bus_dma_tag_destroy(qpair->dma_tag);
896 		qpair->dma_tag = NULL;
897 	}
898 
899 	if (qpair->dma_tag_payload) {
900 		bus_dma_tag_destroy(qpair->dma_tag_payload);
901 		qpair->dma_tag_payload = NULL;
902 	}
903 
904 	if (mtx_initialized(&qpair->lock))
905 		mtx_destroy(&qpair->lock);
906 
907 	if (qpair->res) {
908 		bus_release_resource(qpair->ctrlr->dev, SYS_RES_IRQ,
909 		    rman_get_rid(qpair->res), qpair->res);
910 		qpair->res = NULL;
911 	}
912 }
913 
914 static void
915 nvme_admin_qpair_abort_aers(struct nvme_qpair *qpair)
916 {
917 	struct nvme_tracker	*tr;
918 
919 	tr = TAILQ_FIRST(&qpair->outstanding_tr);
920 	while (tr != NULL) {
921 		if (tr->req->cmd.opc == NVME_OPC_ASYNC_EVENT_REQUEST) {
922 			nvme_qpair_manual_complete_tracker(tr,
923 			    NVME_SCT_GENERIC, NVME_SC_ABORTED_SQ_DELETION, 0,
924 			    ERROR_PRINT_NONE);
925 			tr = TAILQ_FIRST(&qpair->outstanding_tr);
926 		} else {
927 			tr = TAILQ_NEXT(tr, tailq);
928 		}
929 	}
930 }
931 
932 void
933 nvme_admin_qpair_destroy(struct nvme_qpair *qpair)
934 {
935 
936 	nvme_admin_qpair_abort_aers(qpair);
937 	nvme_qpair_destroy(qpair);
938 }
939 
940 void
941 nvme_io_qpair_destroy(struct nvme_qpair *qpair)
942 {
943 
944 	nvme_qpair_destroy(qpair);
945 }
946 
947 static void
948 nvme_abort_complete(void *arg, const struct nvme_completion *status)
949 {
950 	struct nvme_tracker     *tr = arg;
951 
952 	/*
953 	 * If cdw0 == 1, the controller was not able to abort the command
954 	 *  we requested.  We still need to check the active tracker array,
955 	 *  to cover race where I/O timed out at same time controller was
956 	 *  completing the I/O.
957 	 */
958 	if (status->cdw0 == 1 && tr->qpair->act_tr[tr->cid] != NULL) {
959 		/*
960 		 * An I/O has timed out, and the controller was unable to
961 		 *  abort it for some reason.  Construct a fake completion
962 		 *  status, and then complete the I/O's tracker manually.
963 		 */
964 		nvme_printf(tr->qpair->ctrlr,
965 		    "abort command failed, aborting command manually\n");
966 		nvme_qpair_manual_complete_tracker(tr,
967 		    NVME_SCT_GENERIC, NVME_SC_ABORTED_BY_REQUEST, 0, ERROR_PRINT_ALL);
968 	}
969 }
970 
971 static void
972 nvme_qpair_timeout(void *arg)
973 {
974 	struct nvme_qpair	*qpair = arg;
975 	struct nvme_controller	*ctrlr = qpair->ctrlr;
976 	struct nvme_tracker	*tr;
977 	sbintime_t		now;
978 	bool			idle;
979 	bool			needs_reset;
980 	uint32_t		csts;
981 	uint8_t			cfs;
982 
983 
984 	mtx_lock(&qpair->lock);
985 	idle = TAILQ_EMPTY(&qpair->outstanding_tr);
986 
987 	switch (qpair->recovery_state) {
988 	case RECOVERY_NONE:
989 		/*
990 		 * Read csts to get value of cfs - controller fatal status.  If
991 		 * we are in the hot-plug or controller failed status proceed
992 		 * directly to reset. We also bail early if the status reads all
993 		 * 1's or the control fatal status bit is now 1. The latter is
994 		 * always true when the former is true, but not vice versa.  The
995 		 * intent of the code is that if the card is gone (all 1's) or
996 		 * we've failed, then try to do a reset (which someitmes
997 		 * unwedges a card reading all 1's that's not gone away, but
998 		 * usually doesn't).
999 		 */
1000 		csts = nvme_mmio_read_4(ctrlr, csts);
1001 		cfs = (csts >> NVME_CSTS_REG_CFS_SHIFT) & NVME_CSTS_REG_CFS_MASK;
1002 		if (csts == NVME_GONE || cfs == 1)
1003 			goto do_reset;
1004 
1005 		/*
1006 		 * Next, check to see if we have any completions. If we do,
1007 		 * we've likely missed an interrupt, but the card is otherwise
1008 		 * fine. This will also catch all the commands that are about
1009 		 * to timeout (but there's still a tiny race). Since the timeout
1010 		 * is long relative to the race between here and the check below,
1011 		 * this is still a win.
1012 		 */
1013 		mtx_unlock(&qpair->lock);
1014 		nvme_qpair_process_completions(qpair);
1015 		mtx_lock(&qpair->lock);
1016 		if (qpair->recovery_state != RECOVERY_NONE) {
1017 			/*
1018 			 * Somebody else adjusted recovery state while unlocked,
1019 			 * we should bail. Unlock the qpair and return without
1020 			 * doing anything else.
1021 			 */
1022 			mtx_unlock(&qpair->lock);
1023 			return;
1024 		}
1025 
1026 		/*
1027 		 * Check to see if we need to timeout any commands. If we do, then
1028 		 * we also enter a recovery phase.
1029 		 */
1030 		now = getsbinuptime();
1031 		needs_reset = false;
1032 		TAILQ_FOREACH(tr, &qpair->outstanding_tr, tailq) {
1033 			if (tr->deadline == SBT_MAX)
1034 				continue;
1035 			if (now > tr->deadline) {
1036 				if (tr->req->cb_fn != nvme_abort_complete &&
1037 				    ctrlr->enable_aborts) {
1038 					/*
1039 					 * This isn't an abort command, ask
1040 					 * for a hardware abort.
1041 					 */
1042 					nvme_ctrlr_cmd_abort(ctrlr, tr->cid,
1043 					    qpair->id, nvme_abort_complete, tr);
1044 				} else {
1045 					/*
1046 					 * Otherwise we have a live command in
1047 					 * the card (either one we couldn't
1048 					 * abort, or aborts weren't enabled).
1049 					 * The only safe way to proceed is to do
1050 					 * a reset.
1051 					 */
1052 					needs_reset = true;
1053 				}
1054 			} else {
1055 				idle = false;
1056 			}
1057 		}
1058 		if (!needs_reset)
1059 			break;
1060 
1061 		/*
1062 		 * We've had a command timeout that we weren't able to abort
1063 		 *
1064 		 * If we get here due to a possible surprise hot-unplug event,
1065 		 * then we let nvme_ctrlr_reset confirm and fail the
1066 		 * controller.
1067 		 */
1068 	do_reset:
1069 		nvme_printf(ctrlr, "Resetting controller due to a timeout%s.\n",
1070 		    (csts == 0xffffffff) ? " and possible hot unplug" :
1071 		    (cfs ? " and fatal error status" : ""));
1072 		nvme_printf(ctrlr, "RECOVERY_WAITING\n");
1073 		qpair->recovery_state = RECOVERY_WAITING;
1074 		nvme_ctrlr_reset(ctrlr);
1075 		idle = false;			/* We want to keep polling */
1076 		break;
1077 	case RECOVERY_WAITING:
1078 		nvme_printf(ctrlr, "waiting for reset to complete\n");
1079 		break;
1080 	}
1081 
1082 	/*
1083 	 * Rearm the timeout.
1084 	 */
1085 	if (!idle) {
1086 		callout_schedule_sbt(&qpair->timer, SBT_1S / 2, SBT_1S / 2, 0);
1087 	} else {
1088 		qpair->timer_armed = false;
1089 	}
1090 	mtx_unlock(&qpair->lock);
1091 }
1092 
1093 /*
1094  * Submit the tracker to the hardware. Must already be in the
1095  * outstanding queue when called.
1096  */
1097 void
1098 nvme_qpair_submit_tracker(struct nvme_qpair *qpair, struct nvme_tracker *tr)
1099 {
1100 	struct nvme_request	*req;
1101 	struct nvme_controller	*ctrlr;
1102 	int timeout;
1103 
1104 	mtx_assert(&qpair->lock, MA_OWNED);
1105 
1106 	req = tr->req;
1107 	req->cmd.cid = tr->cid;
1108 	qpair->act_tr[tr->cid] = tr;
1109 	ctrlr = qpair->ctrlr;
1110 
1111 	if (req->timeout) {
1112 		if (req->cb_fn == nvme_completion_poll_cb)
1113 			timeout = 1;
1114 		else
1115 			timeout = ctrlr->timeout_period;
1116 		tr->deadline = getsbinuptime() + timeout * SBT_1S;
1117 		if (!qpair->timer_armed) {
1118 			qpair->timer_armed = true;
1119 			callout_reset_sbt_on(&qpair->timer, SBT_1S / 2, SBT_1S / 2,
1120 			    nvme_qpair_timeout, qpair, qpair->cpu, 0);
1121 		}
1122 	} else
1123 		tr->deadline = SBT_MAX;
1124 
1125 	/* Copy the command from the tracker to the submission queue. */
1126 	memcpy(&qpair->cmd[qpair->sq_tail], &req->cmd, sizeof(req->cmd));
1127 
1128 	if (++qpair->sq_tail == qpair->num_entries)
1129 		qpair->sq_tail = 0;
1130 
1131 	bus_dmamap_sync(qpair->dma_tag, qpair->queuemem_map,
1132 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1133 	bus_space_write_4(qpair->ctrlr->bus_tag, qpair->ctrlr->bus_handle,
1134 	    qpair->sq_tdbl_off, qpair->sq_tail);
1135 	qpair->num_cmds++;
1136 }
1137 
1138 static void
1139 nvme_payload_map(void *arg, bus_dma_segment_t *seg, int nseg, int error)
1140 {
1141 	struct nvme_tracker 	*tr = arg;
1142 	uint32_t		cur_nseg;
1143 
1144 	/*
1145 	 * If the mapping operation failed, return immediately.  The caller
1146 	 *  is responsible for detecting the error status and failing the
1147 	 *  tracker manually.
1148 	 */
1149 	if (error != 0) {
1150 		nvme_printf(tr->qpair->ctrlr,
1151 		    "nvme_payload_map err %d\n", error);
1152 		return;
1153 	}
1154 
1155 	/*
1156 	 * Note that we specified ctrlr->page_size for alignment and max
1157 	 * segment size when creating the bus dma tags.  So here we can safely
1158 	 * just transfer each segment to its associated PRP entry.
1159 	 */
1160 	tr->req->cmd.prp1 = htole64(seg[0].ds_addr);
1161 
1162 	if (nseg == 2) {
1163 		tr->req->cmd.prp2 = htole64(seg[1].ds_addr);
1164 	} else if (nseg > 2) {
1165 		cur_nseg = 1;
1166 		tr->req->cmd.prp2 = htole64((uint64_t)tr->prp_bus_addr);
1167 		while (cur_nseg < nseg) {
1168 			tr->prp[cur_nseg-1] =
1169 			    htole64((uint64_t)seg[cur_nseg].ds_addr);
1170 			cur_nseg++;
1171 		}
1172 	} else {
1173 		/*
1174 		 * prp2 should not be used by the controller
1175 		 *  since there is only one segment, but set
1176 		 *  to 0 just to be safe.
1177 		 */
1178 		tr->req->cmd.prp2 = 0;
1179 	}
1180 
1181 	bus_dmamap_sync(tr->qpair->dma_tag_payload, tr->payload_dma_map,
1182 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1183 	nvme_qpair_submit_tracker(tr->qpair, tr);
1184 }
1185 
1186 static void
1187 _nvme_qpair_submit_request(struct nvme_qpair *qpair, struct nvme_request *req)
1188 {
1189 	struct nvme_tracker	*tr;
1190 	int			err = 0;
1191 
1192 	mtx_assert(&qpair->lock, MA_OWNED);
1193 
1194 	tr = TAILQ_FIRST(&qpair->free_tr);
1195 	req->qpair = qpair;
1196 
1197 	if (tr == NULL || qpair->recovery_state != RECOVERY_NONE) {
1198 		/*
1199 		 * No tracker is available, or the qpair is disabled due to
1200 		 *  an in-progress controller-level reset or controller
1201 		 *  failure.
1202 		 */
1203 
1204 		if (qpair->ctrlr->is_failed) {
1205 			/*
1206 			 * The controller has failed, so fail the request.
1207 			 */
1208 			nvme_qpair_manual_complete_request(qpair, req,
1209 			    NVME_SCT_GENERIC, NVME_SC_ABORTED_BY_REQUEST);
1210 		} else {
1211 			/*
1212 			 * Put the request on the qpair's request queue to be
1213 			 *  processed when a tracker frees up via a command
1214 			 *  completion or when the controller reset is
1215 			 *  completed.
1216 			 */
1217 			STAILQ_INSERT_TAIL(&qpair->queued_req, req, stailq);
1218 		}
1219 		return;
1220 	}
1221 
1222 	TAILQ_REMOVE(&qpair->free_tr, tr, tailq);
1223 	TAILQ_INSERT_TAIL(&qpair->outstanding_tr, tr, tailq);
1224 	tr->deadline = SBT_MAX;
1225 	tr->req = req;
1226 
1227 	if (!req->payload_valid) {
1228 		nvme_qpair_submit_tracker(tr->qpair, tr);
1229 		return;
1230 	}
1231 
1232 	err = bus_dmamap_load_mem(tr->qpair->dma_tag_payload,
1233 	    tr->payload_dma_map, &req->payload, nvme_payload_map, tr, 0);
1234 	if (err != 0) {
1235 		/*
1236 		 * The dmamap operation failed, so we manually fail the
1237 		 *  tracker here with DATA_TRANSFER_ERROR status.
1238 		 *
1239 		 * nvme_qpair_manual_complete_tracker must not be called
1240 		 *  with the qpair lock held.
1241 		 */
1242 		nvme_printf(qpair->ctrlr,
1243 		    "bus_dmamap_load_mem returned 0x%x!\n", err);
1244 		mtx_unlock(&qpair->lock);
1245 		nvme_qpair_manual_complete_tracker(tr, NVME_SCT_GENERIC,
1246 		    NVME_SC_DATA_TRANSFER_ERROR, DO_NOT_RETRY, ERROR_PRINT_ALL);
1247 		mtx_lock(&qpair->lock);
1248 	}
1249 }
1250 
1251 void
1252 nvme_qpair_submit_request(struct nvme_qpair *qpair, struct nvme_request *req)
1253 {
1254 
1255 	mtx_lock(&qpair->lock);
1256 	_nvme_qpair_submit_request(qpair, req);
1257 	mtx_unlock(&qpair->lock);
1258 }
1259 
1260 static void
1261 nvme_qpair_enable(struct nvme_qpair *qpair)
1262 {
1263 	mtx_assert(&qpair->lock, MA_OWNED);
1264 
1265 	qpair->recovery_state = RECOVERY_NONE;
1266 }
1267 
1268 void
1269 nvme_qpair_reset(struct nvme_qpair *qpair)
1270 {
1271 
1272 	qpair->sq_head = qpair->sq_tail = qpair->cq_head = 0;
1273 
1274 	/*
1275 	 * First time through the completion queue, HW will set phase
1276 	 *  bit on completions to 1.  So set this to 1 here, indicating
1277 	 *  we're looking for a 1 to know which entries have completed.
1278 	 *  we'll toggle the bit each time when the completion queue
1279 	 *  rolls over.
1280 	 */
1281 	qpair->phase = 1;
1282 
1283 	memset(qpair->cmd, 0,
1284 	    qpair->num_entries * sizeof(struct nvme_command));
1285 	memset(qpair->cpl, 0,
1286 	    qpair->num_entries * sizeof(struct nvme_completion));
1287 }
1288 
1289 void
1290 nvme_admin_qpair_enable(struct nvme_qpair *qpair)
1291 {
1292 	struct nvme_tracker		*tr;
1293 	struct nvme_tracker		*tr_temp;
1294 	bool				rpt;
1295 
1296 	/*
1297 	 * Manually abort each outstanding admin command.  Do not retry
1298 	 * admin commands found here, since they will be left over from
1299 	 * a controller reset and its likely the context in which the
1300 	 * command was issued no longer applies.
1301 	 */
1302 	rpt = !TAILQ_EMPTY(&qpair->outstanding_tr);
1303 	if (rpt)
1304 		nvme_printf(qpair->ctrlr,
1305 		    "aborting outstanding admin command\n");
1306 	TAILQ_FOREACH_SAFE(tr, &qpair->outstanding_tr, tailq, tr_temp) {
1307 		nvme_qpair_manual_complete_tracker(tr, NVME_SCT_GENERIC,
1308 		    NVME_SC_ABORTED_BY_REQUEST, DO_NOT_RETRY, ERROR_PRINT_ALL);
1309 	}
1310 	if (rpt)
1311 		nvme_printf(qpair->ctrlr,
1312 		    "done aborting outstanding admin\n");
1313 
1314 	mtx_lock(&qpair->lock);
1315 	nvme_qpair_enable(qpair);
1316 	mtx_unlock(&qpair->lock);
1317 }
1318 
1319 void
1320 nvme_io_qpair_enable(struct nvme_qpair *qpair)
1321 {
1322 	STAILQ_HEAD(, nvme_request)	temp;
1323 	struct nvme_tracker		*tr;
1324 	struct nvme_tracker		*tr_temp;
1325 	struct nvme_request		*req;
1326 	bool				report;
1327 
1328 	/*
1329 	 * Manually abort each outstanding I/O.  This normally results in a
1330 	 * retry, unless the retry count on the associated request has
1331 	 * reached its limit.
1332 	 */
1333 	report = !TAILQ_EMPTY(&qpair->outstanding_tr);
1334 	if (report)
1335 		nvme_printf(qpair->ctrlr, "aborting outstanding i/o\n");
1336 	TAILQ_FOREACH_SAFE(tr, &qpair->outstanding_tr, tailq, tr_temp) {
1337 		nvme_qpair_manual_complete_tracker(tr, NVME_SCT_GENERIC,
1338 		    NVME_SC_ABORTED_BY_REQUEST, 0, ERROR_PRINT_NO_RETRY);
1339 	}
1340 	if (report)
1341 		nvme_printf(qpair->ctrlr, "done aborting outstanding i/o\n");
1342 
1343 	mtx_lock(&qpair->lock);
1344 
1345 	nvme_qpair_enable(qpair);
1346 
1347 	STAILQ_INIT(&temp);
1348 	STAILQ_SWAP(&qpair->queued_req, &temp, nvme_request);
1349 
1350 	report = !STAILQ_EMPTY(&temp);
1351 	if (report)
1352 		nvme_printf(qpair->ctrlr, "resubmitting queued i/o\n");
1353 	while (!STAILQ_EMPTY(&temp)) {
1354 		req = STAILQ_FIRST(&temp);
1355 		STAILQ_REMOVE_HEAD(&temp, stailq);
1356 		nvme_qpair_print_command(qpair, &req->cmd);
1357 		_nvme_qpair_submit_request(qpair, req);
1358 	}
1359 	if (report)
1360 		nvme_printf(qpair->ctrlr, "done resubmitting i/o\n");
1361 
1362 	mtx_unlock(&qpair->lock);
1363 }
1364 
1365 static void
1366 nvme_qpair_disable(struct nvme_qpair *qpair)
1367 {
1368 	struct nvme_tracker	*tr, *tr_temp;
1369 
1370 	mtx_lock(&qpair->lock);
1371 	qpair->recovery_state = RECOVERY_WAITING;
1372 	TAILQ_FOREACH_SAFE(tr, &qpair->outstanding_tr, tailq, tr_temp) {
1373 		tr->deadline = SBT_MAX;
1374 	}
1375 	mtx_unlock(&qpair->lock);
1376 }
1377 
1378 void
1379 nvme_admin_qpair_disable(struct nvme_qpair *qpair)
1380 {
1381 
1382 	nvme_qpair_disable(qpair);
1383 	nvme_admin_qpair_abort_aers(qpair);
1384 }
1385 
1386 void
1387 nvme_io_qpair_disable(struct nvme_qpair *qpair)
1388 {
1389 
1390 	nvme_qpair_disable(qpair);
1391 }
1392 
1393 void
1394 nvme_qpair_fail(struct nvme_qpair *qpair)
1395 {
1396 	struct nvme_tracker		*tr;
1397 	struct nvme_request		*req;
1398 
1399 	if (!mtx_initialized(&qpair->lock))
1400 		return;
1401 
1402 	mtx_lock(&qpair->lock);
1403 
1404 	while (!STAILQ_EMPTY(&qpair->queued_req)) {
1405 		req = STAILQ_FIRST(&qpair->queued_req);
1406 		STAILQ_REMOVE_HEAD(&qpair->queued_req, stailq);
1407 		nvme_printf(qpair->ctrlr, "failing queued i/o\n");
1408 		mtx_unlock(&qpair->lock);
1409 		nvme_qpair_manual_complete_request(qpair, req, NVME_SCT_GENERIC,
1410 		    NVME_SC_ABORTED_BY_REQUEST);
1411 		mtx_lock(&qpair->lock);
1412 	}
1413 
1414 	/* Manually abort each outstanding I/O. */
1415 	while (!TAILQ_EMPTY(&qpair->outstanding_tr)) {
1416 		tr = TAILQ_FIRST(&qpair->outstanding_tr);
1417 		/*
1418 		 * Do not remove the tracker.  The abort_tracker path will
1419 		 *  do that for us.
1420 		 */
1421 		nvme_printf(qpair->ctrlr, "failing outstanding i/o\n");
1422 		mtx_unlock(&qpair->lock);
1423 		nvme_qpair_manual_complete_tracker(tr, NVME_SCT_GENERIC,
1424 		    NVME_SC_ABORTED_BY_REQUEST, DO_NOT_RETRY, ERROR_PRINT_ALL);
1425 		mtx_lock(&qpair->lock);
1426 	}
1427 
1428 	mtx_unlock(&qpair->lock);
1429 }
1430