xref: /freebsd/sys/dev/nvme/nvme_ctrlr.c (revision 2037e9880c8c1bf3f8c82cebb27547b04e89accb)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (C) 2012-2016 Intel Corporation
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31 
32 #include "opt_cam.h"
33 
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/buf.h>
37 #include <sys/bus.h>
38 #include <sys/conf.h>
39 #include <sys/ioccom.h>
40 #include <sys/proc.h>
41 #include <sys/smp.h>
42 #include <sys/uio.h>
43 #include <sys/endian.h>
44 
45 #include "nvme_private.h"
46 
47 #define B4_CHK_RDY_DELAY_MS	2300		/* work around controller bug */
48 
49 static void nvme_ctrlr_construct_and_submit_aer(struct nvme_controller *ctrlr,
50 						struct nvme_async_event_request *aer);
51 
52 static int
53 nvme_ctrlr_construct_admin_qpair(struct nvme_controller *ctrlr)
54 {
55 	struct nvme_qpair	*qpair;
56 	uint32_t		num_entries;
57 	int			error;
58 
59 	qpair = &ctrlr->adminq;
60 
61 	num_entries = NVME_ADMIN_ENTRIES;
62 	TUNABLE_INT_FETCH("hw.nvme.admin_entries", &num_entries);
63 	/*
64 	 * If admin_entries was overridden to an invalid value, revert it
65 	 *  back to our default value.
66 	 */
67 	if (num_entries < NVME_MIN_ADMIN_ENTRIES ||
68 	    num_entries > NVME_MAX_ADMIN_ENTRIES) {
69 		nvme_printf(ctrlr, "invalid hw.nvme.admin_entries=%d "
70 		    "specified\n", num_entries);
71 		num_entries = NVME_ADMIN_ENTRIES;
72 	}
73 
74 	/*
75 	 * The admin queue's max xfer size is treated differently than the
76 	 *  max I/O xfer size.  16KB is sufficient here - maybe even less?
77 	 */
78 	error = nvme_qpair_construct(qpair,
79 				     0, /* qpair ID */
80 				     0, /* vector */
81 				     num_entries,
82 				     NVME_ADMIN_TRACKERS,
83 				     ctrlr);
84 	return (error);
85 }
86 
87 static int
88 nvme_ctrlr_construct_io_qpairs(struct nvme_controller *ctrlr)
89 {
90 	struct nvme_qpair	*qpair;
91 	uint32_t		cap_lo;
92 	uint16_t		mqes;
93 	int			i, error, num_entries, num_trackers;
94 
95 	num_entries = NVME_IO_ENTRIES;
96 	TUNABLE_INT_FETCH("hw.nvme.io_entries", &num_entries);
97 
98 	/*
99 	 * NVMe spec sets a hard limit of 64K max entries, but
100 	 *  devices may specify a smaller limit, so we need to check
101 	 *  the MQES field in the capabilities register.
102 	 */
103 	cap_lo = nvme_mmio_read_4(ctrlr, cap_lo);
104 	mqes = NVME_CAP_LO_MQES(cap_lo);
105 	num_entries = min(num_entries, mqes + 1);
106 
107 	num_trackers = NVME_IO_TRACKERS;
108 	TUNABLE_INT_FETCH("hw.nvme.io_trackers", &num_trackers);
109 
110 	num_trackers = max(num_trackers, NVME_MIN_IO_TRACKERS);
111 	num_trackers = min(num_trackers, NVME_MAX_IO_TRACKERS);
112 	/*
113 	 * No need to have more trackers than entries in the submit queue.
114 	 *  Note also that for a queue size of N, we can only have (N-1)
115 	 *  commands outstanding, hence the "-1" here.
116 	 */
117 	num_trackers = min(num_trackers, (num_entries-1));
118 
119 	/*
120 	 * Our best estimate for the maximum number of I/Os that we should
121 	 * noramlly have in flight at one time. This should be viewed as a hint,
122 	 * not a hard limit and will need to be revisitted when the upper layers
123 	 * of the storage system grows multi-queue support.
124 	 */
125 	ctrlr->max_hw_pend_io = num_trackers * ctrlr->num_io_queues * 3 / 4;
126 
127 	/*
128 	 * This was calculated previously when setting up interrupts, but
129 	 *  a controller could theoretically support fewer I/O queues than
130 	 *  MSI-X vectors.  So calculate again here just to be safe.
131 	 */
132 	ctrlr->num_cpus_per_ioq = howmany(mp_ncpus, ctrlr->num_io_queues);
133 
134 	ctrlr->ioq = malloc(ctrlr->num_io_queues * sizeof(struct nvme_qpair),
135 	    M_NVME, M_ZERO | M_WAITOK);
136 
137 	for (i = 0; i < ctrlr->num_io_queues; i++) {
138 		qpair = &ctrlr->ioq[i];
139 
140 		/*
141 		 * Admin queue has ID=0. IO queues start at ID=1 -
142 		 *  hence the 'i+1' here.
143 		 *
144 		 * For I/O queues, use the controller-wide max_xfer_size
145 		 *  calculated in nvme_attach().
146 		 */
147 		error = nvme_qpair_construct(qpair,
148 				     i+1, /* qpair ID */
149 				     ctrlr->msix_enabled ? i+1 : 0, /* vector */
150 				     num_entries,
151 				     num_trackers,
152 				     ctrlr);
153 		if (error)
154 			return (error);
155 
156 		/*
157 		 * Do not bother binding interrupts if we only have one I/O
158 		 *  interrupt thread for this controller.
159 		 */
160 		if (ctrlr->num_io_queues > 1)
161 			bus_bind_intr(ctrlr->dev, qpair->res,
162 			    i * ctrlr->num_cpus_per_ioq);
163 	}
164 
165 	return (0);
166 }
167 
168 static void
169 nvme_ctrlr_fail(struct nvme_controller *ctrlr)
170 {
171 	int i;
172 
173 	ctrlr->is_failed = TRUE;
174 	nvme_admin_qpair_disable(&ctrlr->adminq);
175 	nvme_qpair_fail(&ctrlr->adminq);
176 	if (ctrlr->ioq != NULL) {
177 		for (i = 0; i < ctrlr->num_io_queues; i++) {
178 			nvme_io_qpair_disable(&ctrlr->ioq[i]);
179 			nvme_qpair_fail(&ctrlr->ioq[i]);
180 		}
181 	}
182 	nvme_notify_fail_consumers(ctrlr);
183 }
184 
185 void
186 nvme_ctrlr_post_failed_request(struct nvme_controller *ctrlr,
187     struct nvme_request *req)
188 {
189 
190 	mtx_lock(&ctrlr->lock);
191 	STAILQ_INSERT_TAIL(&ctrlr->fail_req, req, stailq);
192 	mtx_unlock(&ctrlr->lock);
193 	taskqueue_enqueue(ctrlr->taskqueue, &ctrlr->fail_req_task);
194 }
195 
196 static void
197 nvme_ctrlr_fail_req_task(void *arg, int pending)
198 {
199 	struct nvme_controller	*ctrlr = arg;
200 	struct nvme_request	*req;
201 
202 	mtx_lock(&ctrlr->lock);
203 	while ((req = STAILQ_FIRST(&ctrlr->fail_req)) != NULL) {
204 		STAILQ_REMOVE_HEAD(&ctrlr->fail_req, stailq);
205 		mtx_unlock(&ctrlr->lock);
206 		nvme_qpair_manual_complete_request(req->qpair, req,
207 		    NVME_SCT_GENERIC, NVME_SC_ABORTED_BY_REQUEST);
208 		mtx_lock(&ctrlr->lock);
209 	}
210 	mtx_unlock(&ctrlr->lock);
211 }
212 
213 static int
214 nvme_ctrlr_wait_for_ready(struct nvme_controller *ctrlr, int desired_val)
215 {
216 	int ms_waited;
217 	uint32_t csts;
218 
219 	ms_waited = 0;
220 	while (1) {
221 		csts = nvme_mmio_read_4(ctrlr, csts);
222 		if (csts == 0xffffffff)		/* Hot unplug. */
223 			return (ENXIO);
224 		if (((csts >> NVME_CSTS_REG_RDY_SHIFT) & NVME_CSTS_REG_RDY_MASK)
225 		    == desired_val)
226 			break;
227 		if (ms_waited++ > ctrlr->ready_timeout_in_ms) {
228 			nvme_printf(ctrlr, "controller ready did not become %d "
229 			    "within %d ms\n", desired_val, ctrlr->ready_timeout_in_ms);
230 			return (ENXIO);
231 		}
232 		DELAY(1000);
233 	}
234 
235 	return (0);
236 }
237 
238 static int
239 nvme_ctrlr_disable(struct nvme_controller *ctrlr)
240 {
241 	uint32_t cc;
242 	uint32_t csts;
243 	uint8_t  en, rdy;
244 	int err;
245 
246 	cc = nvme_mmio_read_4(ctrlr, cc);
247 	csts = nvme_mmio_read_4(ctrlr, csts);
248 
249 	en = (cc >> NVME_CC_REG_EN_SHIFT) & NVME_CC_REG_EN_MASK;
250 	rdy = (csts >> NVME_CSTS_REG_RDY_SHIFT) & NVME_CSTS_REG_RDY_MASK;
251 
252 	/*
253 	 * Per 3.1.5 in NVME 1.3 spec, transitioning CC.EN from 0 to 1
254 	 * when CSTS.RDY is 1 or transitioning CC.EN from 1 to 0 when
255 	 * CSTS.RDY is 0 "has undefined results" So make sure that CSTS.RDY
256 	 * isn't the desired value. Short circuit if we're already disabled.
257 	 */
258 	if (en == 1) {
259 		if (rdy == 0) {
260 			/* EN == 1, wait for  RDY == 1 or fail */
261 			err = nvme_ctrlr_wait_for_ready(ctrlr, 1);
262 			if (err != 0)
263 				return (err);
264 		}
265 	} else {
266 		/* EN == 0 already wait for RDY == 0 */
267 		if (rdy == 0)
268 			return (0);
269 		else
270 			return (nvme_ctrlr_wait_for_ready(ctrlr, 0));
271 	}
272 
273 	cc &= ~NVME_CC_REG_EN_MASK;
274 	nvme_mmio_write_4(ctrlr, cc, cc);
275 	/*
276 	 * Some drives have issues with accessing the mmio after we
277 	 * disable, so delay for a bit after we write the bit to
278 	 * cope with these issues.
279 	 */
280 	if (ctrlr->quirks & QUIRK_DELAY_B4_CHK_RDY)
281 		pause("nvmeR", B4_CHK_RDY_DELAY_MS * hz / 1000);
282 	return (nvme_ctrlr_wait_for_ready(ctrlr, 0));
283 }
284 
285 static int
286 nvme_ctrlr_enable(struct nvme_controller *ctrlr)
287 {
288 	uint32_t	cc;
289 	uint32_t	csts;
290 	uint32_t	aqa;
291 	uint32_t	qsize;
292 	uint8_t		en, rdy;
293 	int		err;
294 
295 	cc = nvme_mmio_read_4(ctrlr, cc);
296 	csts = nvme_mmio_read_4(ctrlr, csts);
297 
298 	en = (cc >> NVME_CC_REG_EN_SHIFT) & NVME_CC_REG_EN_MASK;
299 	rdy = (csts >> NVME_CSTS_REG_RDY_SHIFT) & NVME_CSTS_REG_RDY_MASK;
300 
301 	/*
302 	 * See note in nvme_ctrlr_disable. Short circuit if we're already enabled.
303 	 */
304 	if (en == 1) {
305 		if (rdy == 1)
306 			return (0);
307 		else
308 			return (nvme_ctrlr_wait_for_ready(ctrlr, 1));
309 	} else {
310 		/* EN == 0 already wait for RDY == 0 or fail */
311 		err = nvme_ctrlr_wait_for_ready(ctrlr, 0);
312 		if (err != 0)
313 			return (err);
314 	}
315 
316 	nvme_mmio_write_8(ctrlr, asq, ctrlr->adminq.cmd_bus_addr);
317 	DELAY(5000);
318 	nvme_mmio_write_8(ctrlr, acq, ctrlr->adminq.cpl_bus_addr);
319 	DELAY(5000);
320 
321 	/* acqs and asqs are 0-based. */
322 	qsize = ctrlr->adminq.num_entries - 1;
323 
324 	aqa = 0;
325 	aqa = (qsize & NVME_AQA_REG_ACQS_MASK) << NVME_AQA_REG_ACQS_SHIFT;
326 	aqa |= (qsize & NVME_AQA_REG_ASQS_MASK) << NVME_AQA_REG_ASQS_SHIFT;
327 	nvme_mmio_write_4(ctrlr, aqa, aqa);
328 	DELAY(5000);
329 
330 	/* Initialization values for CC */
331 	cc = 0;
332 	cc |= 1 << NVME_CC_REG_EN_SHIFT;
333 	cc |= 0 << NVME_CC_REG_CSS_SHIFT;
334 	cc |= 0 << NVME_CC_REG_AMS_SHIFT;
335 	cc |= 0 << NVME_CC_REG_SHN_SHIFT;
336 	cc |= 6 << NVME_CC_REG_IOSQES_SHIFT; /* SQ entry size == 64 == 2^6 */
337 	cc |= 4 << NVME_CC_REG_IOCQES_SHIFT; /* CQ entry size == 16 == 2^4 */
338 
339 	/* This evaluates to 0, which is according to spec. */
340 	cc |= (PAGE_SIZE >> 13) << NVME_CC_REG_MPS_SHIFT;
341 
342 	nvme_mmio_write_4(ctrlr, cc, cc);
343 
344 	return (nvme_ctrlr_wait_for_ready(ctrlr, 1));
345 }
346 
347 int
348 nvme_ctrlr_hw_reset(struct nvme_controller *ctrlr)
349 {
350 	int i, err;
351 
352 	nvme_admin_qpair_disable(&ctrlr->adminq);
353 	/*
354 	 * I/O queues are not allocated before the initial HW
355 	 *  reset, so do not try to disable them.  Use is_initialized
356 	 *  to determine if this is the initial HW reset.
357 	 */
358 	if (ctrlr->is_initialized) {
359 		for (i = 0; i < ctrlr->num_io_queues; i++)
360 			nvme_io_qpair_disable(&ctrlr->ioq[i]);
361 	}
362 
363 	DELAY(100*1000);
364 
365 	err = nvme_ctrlr_disable(ctrlr);
366 	if (err != 0)
367 		return err;
368 	return (nvme_ctrlr_enable(ctrlr));
369 }
370 
371 void
372 nvme_ctrlr_reset(struct nvme_controller *ctrlr)
373 {
374 	int cmpset;
375 
376 	cmpset = atomic_cmpset_32(&ctrlr->is_resetting, 0, 1);
377 
378 	if (cmpset == 0 || ctrlr->is_failed)
379 		/*
380 		 * Controller is already resetting or has failed.  Return
381 		 *  immediately since there is no need to kick off another
382 		 *  reset in these cases.
383 		 */
384 		return;
385 
386 	taskqueue_enqueue(ctrlr->taskqueue, &ctrlr->reset_task);
387 }
388 
389 static int
390 nvme_ctrlr_identify(struct nvme_controller *ctrlr)
391 {
392 	struct nvme_completion_poll_status	status;
393 
394 	status.done = 0;
395 	nvme_ctrlr_cmd_identify_controller(ctrlr, &ctrlr->cdata,
396 	    nvme_completion_poll_cb, &status);
397 	while (!atomic_load_acq_int(&status.done))
398 		pause("nvme", 1);
399 	if (nvme_completion_is_error(&status.cpl)) {
400 		nvme_printf(ctrlr, "nvme_identify_controller failed!\n");
401 		return (ENXIO);
402 	}
403 
404 	/* Convert data to host endian */
405 	nvme_controller_data_swapbytes(&ctrlr->cdata);
406 
407 	/*
408 	 * Use MDTS to ensure our default max_xfer_size doesn't exceed what the
409 	 *  controller supports.
410 	 */
411 	if (ctrlr->cdata.mdts > 0)
412 		ctrlr->max_xfer_size = min(ctrlr->max_xfer_size,
413 		    ctrlr->min_page_size * (1 << (ctrlr->cdata.mdts)));
414 
415 	return (0);
416 }
417 
418 static int
419 nvme_ctrlr_set_num_qpairs(struct nvme_controller *ctrlr)
420 {
421 	struct nvme_completion_poll_status	status;
422 	int					cq_allocated, sq_allocated;
423 
424 	status.done = 0;
425 	nvme_ctrlr_cmd_set_num_queues(ctrlr, ctrlr->num_io_queues,
426 	    nvme_completion_poll_cb, &status);
427 	while (!atomic_load_acq_int(&status.done))
428 		pause("nvme", 1);
429 	if (nvme_completion_is_error(&status.cpl)) {
430 		nvme_printf(ctrlr, "nvme_ctrlr_set_num_qpairs failed!\n");
431 		return (ENXIO);
432 	}
433 
434 	/*
435 	 * Data in cdw0 is 0-based.
436 	 * Lower 16-bits indicate number of submission queues allocated.
437 	 * Upper 16-bits indicate number of completion queues allocated.
438 	 */
439 	sq_allocated = (status.cpl.cdw0 & 0xFFFF) + 1;
440 	cq_allocated = (status.cpl.cdw0 >> 16) + 1;
441 
442 	/*
443 	 * Controller may allocate more queues than we requested,
444 	 *  so use the minimum of the number requested and what was
445 	 *  actually allocated.
446 	 */
447 	ctrlr->num_io_queues = min(ctrlr->num_io_queues, sq_allocated);
448 	ctrlr->num_io_queues = min(ctrlr->num_io_queues, cq_allocated);
449 
450 	return (0);
451 }
452 
453 static int
454 nvme_ctrlr_create_qpairs(struct nvme_controller *ctrlr)
455 {
456 	struct nvme_completion_poll_status	status;
457 	struct nvme_qpair			*qpair;
458 	int					i;
459 
460 	for (i = 0; i < ctrlr->num_io_queues; i++) {
461 		qpair = &ctrlr->ioq[i];
462 
463 		status.done = 0;
464 		nvme_ctrlr_cmd_create_io_cq(ctrlr, qpair, qpair->vector,
465 		    nvme_completion_poll_cb, &status);
466 		while (!atomic_load_acq_int(&status.done))
467 			pause("nvme", 1);
468 		if (nvme_completion_is_error(&status.cpl)) {
469 			nvme_printf(ctrlr, "nvme_create_io_cq failed!\n");
470 			return (ENXIO);
471 		}
472 
473 		status.done = 0;
474 		nvme_ctrlr_cmd_create_io_sq(qpair->ctrlr, qpair,
475 		    nvme_completion_poll_cb, &status);
476 		while (!atomic_load_acq_int(&status.done))
477 			pause("nvme", 1);
478 		if (nvme_completion_is_error(&status.cpl)) {
479 			nvme_printf(ctrlr, "nvme_create_io_sq failed!\n");
480 			return (ENXIO);
481 		}
482 	}
483 
484 	return (0);
485 }
486 
487 static int
488 nvme_ctrlr_destroy_qpairs(struct nvme_controller *ctrlr)
489 {
490 	struct nvme_completion_poll_status	status;
491 	struct nvme_qpair			*qpair;
492 
493 	for (int i = 0; i < ctrlr->num_io_queues; i++) {
494 		qpair = &ctrlr->ioq[i];
495 
496 		status.done = 0;
497 		nvme_ctrlr_cmd_delete_io_sq(ctrlr, qpair,
498 		    nvme_completion_poll_cb, &status);
499 		while (!atomic_load_acq_int(&status.done))
500 			pause("nvme", 1);
501 		if (nvme_completion_is_error(&status.cpl)) {
502 			nvme_printf(ctrlr, "nvme_destroy_io_sq failed!\n");
503 			return (ENXIO);
504 		}
505 
506 		status.done = 0;
507 		nvme_ctrlr_cmd_delete_io_cq(ctrlr, qpair,
508 		    nvme_completion_poll_cb, &status);
509 		while (!atomic_load_acq_int(&status.done))
510 			pause("nvme", 1);
511 		if (nvme_completion_is_error(&status.cpl)) {
512 			nvme_printf(ctrlr, "nvme_destroy_io_cq failed!\n");
513 			return (ENXIO);
514 		}
515 	}
516 
517 	return (0);
518 }
519 
520 static int
521 nvme_ctrlr_construct_namespaces(struct nvme_controller *ctrlr)
522 {
523 	struct nvme_namespace	*ns;
524 	uint32_t 		i;
525 
526 	for (i = 0; i < min(ctrlr->cdata.nn, NVME_MAX_NAMESPACES); i++) {
527 		ns = &ctrlr->ns[i];
528 		nvme_ns_construct(ns, i+1, ctrlr);
529 	}
530 
531 	return (0);
532 }
533 
534 static boolean_t
535 is_log_page_id_valid(uint8_t page_id)
536 {
537 
538 	switch (page_id) {
539 	case NVME_LOG_ERROR:
540 	case NVME_LOG_HEALTH_INFORMATION:
541 	case NVME_LOG_FIRMWARE_SLOT:
542 	case NVME_LOG_CHANGED_NAMESPACE:
543 	case NVME_LOG_COMMAND_EFFECT:
544 	case NVME_LOG_RES_NOTIFICATION:
545 	case NVME_LOG_SANITIZE_STATUS:
546 		return (TRUE);
547 	}
548 
549 	return (FALSE);
550 }
551 
552 static uint32_t
553 nvme_ctrlr_get_log_page_size(struct nvme_controller *ctrlr, uint8_t page_id)
554 {
555 	uint32_t	log_page_size;
556 
557 	switch (page_id) {
558 	case NVME_LOG_ERROR:
559 		log_page_size = min(
560 		    sizeof(struct nvme_error_information_entry) *
561 		    (ctrlr->cdata.elpe + 1), NVME_MAX_AER_LOG_SIZE);
562 		break;
563 	case NVME_LOG_HEALTH_INFORMATION:
564 		log_page_size = sizeof(struct nvme_health_information_page);
565 		break;
566 	case NVME_LOG_FIRMWARE_SLOT:
567 		log_page_size = sizeof(struct nvme_firmware_page);
568 		break;
569 	case NVME_LOG_CHANGED_NAMESPACE:
570 		log_page_size = sizeof(struct nvme_ns_list);
571 		break;
572 	case NVME_LOG_COMMAND_EFFECT:
573 		log_page_size = sizeof(struct nvme_command_effects_page);
574 		break;
575 	case NVME_LOG_RES_NOTIFICATION:
576 		log_page_size = sizeof(struct nvme_res_notification_page);
577 		break;
578 	case NVME_LOG_SANITIZE_STATUS:
579 		log_page_size = sizeof(struct nvme_sanitize_status_page);
580 		break;
581 	default:
582 		log_page_size = 0;
583 		break;
584 	}
585 
586 	return (log_page_size);
587 }
588 
589 static void
590 nvme_ctrlr_log_critical_warnings(struct nvme_controller *ctrlr,
591     uint8_t state)
592 {
593 
594 	if (state & NVME_CRIT_WARN_ST_AVAILABLE_SPARE)
595 		nvme_printf(ctrlr, "available spare space below threshold\n");
596 
597 	if (state & NVME_CRIT_WARN_ST_TEMPERATURE)
598 		nvme_printf(ctrlr, "temperature above threshold\n");
599 
600 	if (state & NVME_CRIT_WARN_ST_DEVICE_RELIABILITY)
601 		nvme_printf(ctrlr, "device reliability degraded\n");
602 
603 	if (state & NVME_CRIT_WARN_ST_READ_ONLY)
604 		nvme_printf(ctrlr, "media placed in read only mode\n");
605 
606 	if (state & NVME_CRIT_WARN_ST_VOLATILE_MEMORY_BACKUP)
607 		nvme_printf(ctrlr, "volatile memory backup device failed\n");
608 
609 	if (state & NVME_CRIT_WARN_ST_RESERVED_MASK)
610 		nvme_printf(ctrlr,
611 		    "unknown critical warning(s): state = 0x%02x\n", state);
612 }
613 
614 static void
615 nvme_ctrlr_async_event_log_page_cb(void *arg, const struct nvme_completion *cpl)
616 {
617 	struct nvme_async_event_request		*aer = arg;
618 	struct nvme_health_information_page	*health_info;
619 	struct nvme_ns_list			*nsl;
620 	struct nvme_error_information_entry	*err;
621 	int i;
622 
623 	/*
624 	 * If the log page fetch for some reason completed with an error,
625 	 *  don't pass log page data to the consumers.  In practice, this case
626 	 *  should never happen.
627 	 */
628 	if (nvme_completion_is_error(cpl))
629 		nvme_notify_async_consumers(aer->ctrlr, &aer->cpl,
630 		    aer->log_page_id, NULL, 0);
631 	else {
632 		/* Convert data to host endian */
633 		switch (aer->log_page_id) {
634 		case NVME_LOG_ERROR:
635 			err = (struct nvme_error_information_entry *)aer->log_page_buffer;
636 			for (i = 0; i < (aer->ctrlr->cdata.elpe + 1); i++)
637 				nvme_error_information_entry_swapbytes(err++);
638 			break;
639 		case NVME_LOG_HEALTH_INFORMATION:
640 			nvme_health_information_page_swapbytes(
641 			    (struct nvme_health_information_page *)aer->log_page_buffer);
642 			break;
643 		case NVME_LOG_FIRMWARE_SLOT:
644 			nvme_firmware_page_swapbytes(
645 			    (struct nvme_firmware_page *)aer->log_page_buffer);
646 			break;
647 		case NVME_LOG_CHANGED_NAMESPACE:
648 			nvme_ns_list_swapbytes(
649 			    (struct nvme_ns_list *)aer->log_page_buffer);
650 			break;
651 		case NVME_LOG_COMMAND_EFFECT:
652 			nvme_command_effects_page_swapbytes(
653 			    (struct nvme_command_effects_page *)aer->log_page_buffer);
654 			break;
655 		case NVME_LOG_RES_NOTIFICATION:
656 			nvme_res_notification_page_swapbytes(
657 			    (struct nvme_res_notification_page *)aer->log_page_buffer);
658 			break;
659 		case NVME_LOG_SANITIZE_STATUS:
660 			nvme_sanitize_status_page_swapbytes(
661 			    (struct nvme_sanitize_status_page *)aer->log_page_buffer);
662 			break;
663 		case INTEL_LOG_TEMP_STATS:
664 			intel_log_temp_stats_swapbytes(
665 			    (struct intel_log_temp_stats *)aer->log_page_buffer);
666 			break;
667 		default:
668 			break;
669 		}
670 
671 		if (aer->log_page_id == NVME_LOG_HEALTH_INFORMATION) {
672 			health_info = (struct nvme_health_information_page *)
673 			    aer->log_page_buffer;
674 			nvme_ctrlr_log_critical_warnings(aer->ctrlr,
675 			    health_info->critical_warning);
676 			/*
677 			 * Critical warnings reported through the
678 			 *  SMART/health log page are persistent, so
679 			 *  clear the associated bits in the async event
680 			 *  config so that we do not receive repeated
681 			 *  notifications for the same event.
682 			 */
683 			aer->ctrlr->async_event_config &=
684 			    ~health_info->critical_warning;
685 			nvme_ctrlr_cmd_set_async_event_config(aer->ctrlr,
686 			    aer->ctrlr->async_event_config, NULL, NULL);
687 		} else if (aer->log_page_id == NVME_LOG_CHANGED_NAMESPACE &&
688 		    !nvme_use_nvd) {
689 			nsl = (struct nvme_ns_list *)aer->log_page_buffer;
690 			for (i = 0; i < nitems(nsl->ns) && nsl->ns[i] != 0; i++) {
691 				if (nsl->ns[i] > NVME_MAX_NAMESPACES)
692 					break;
693 				nvme_notify_ns(aer->ctrlr, nsl->ns[i]);
694 			}
695 		}
696 
697 
698 		/*
699 		 * Pass the cpl data from the original async event completion,
700 		 *  not the log page fetch.
701 		 */
702 		nvme_notify_async_consumers(aer->ctrlr, &aer->cpl,
703 		    aer->log_page_id, aer->log_page_buffer, aer->log_page_size);
704 	}
705 
706 	/*
707 	 * Repost another asynchronous event request to replace the one
708 	 *  that just completed.
709 	 */
710 	nvme_ctrlr_construct_and_submit_aer(aer->ctrlr, aer);
711 }
712 
713 static void
714 nvme_ctrlr_async_event_cb(void *arg, const struct nvme_completion *cpl)
715 {
716 	struct nvme_async_event_request	*aer = arg;
717 
718 	if (nvme_completion_is_error(cpl)) {
719 		/*
720 		 *  Do not retry failed async event requests.  This avoids
721 		 *  infinite loops where a new async event request is submitted
722 		 *  to replace the one just failed, only to fail again and
723 		 *  perpetuate the loop.
724 		 */
725 		return;
726 	}
727 
728 	/* Associated log page is in bits 23:16 of completion entry dw0. */
729 	aer->log_page_id = (cpl->cdw0 & 0xFF0000) >> 16;
730 
731 	nvme_printf(aer->ctrlr, "async event occurred (type 0x%x, info 0x%02x,"
732 	    " page 0x%02x)\n", (cpl->cdw0 & 0x07), (cpl->cdw0 & 0xFF00) >> 8,
733 	    aer->log_page_id);
734 
735 	if (is_log_page_id_valid(aer->log_page_id)) {
736 		aer->log_page_size = nvme_ctrlr_get_log_page_size(aer->ctrlr,
737 		    aer->log_page_id);
738 		memcpy(&aer->cpl, cpl, sizeof(*cpl));
739 		nvme_ctrlr_cmd_get_log_page(aer->ctrlr, aer->log_page_id,
740 		    NVME_GLOBAL_NAMESPACE_TAG, aer->log_page_buffer,
741 		    aer->log_page_size, nvme_ctrlr_async_event_log_page_cb,
742 		    aer);
743 		/* Wait to notify consumers until after log page is fetched. */
744 	} else {
745 		nvme_notify_async_consumers(aer->ctrlr, cpl, aer->log_page_id,
746 		    NULL, 0);
747 
748 		/*
749 		 * Repost another asynchronous event request to replace the one
750 		 *  that just completed.
751 		 */
752 		nvme_ctrlr_construct_and_submit_aer(aer->ctrlr, aer);
753 	}
754 }
755 
756 static void
757 nvme_ctrlr_construct_and_submit_aer(struct nvme_controller *ctrlr,
758     struct nvme_async_event_request *aer)
759 {
760 	struct nvme_request *req;
761 
762 	aer->ctrlr = ctrlr;
763 	req = nvme_allocate_request_null(nvme_ctrlr_async_event_cb, aer);
764 	aer->req = req;
765 
766 	/*
767 	 * Disable timeout here, since asynchronous event requests should by
768 	 *  nature never be timed out.
769 	 */
770 	req->timeout = FALSE;
771 	req->cmd.opc = NVME_OPC_ASYNC_EVENT_REQUEST;
772 	nvme_ctrlr_submit_admin_request(ctrlr, req);
773 }
774 
775 static void
776 nvme_ctrlr_configure_aer(struct nvme_controller *ctrlr)
777 {
778 	struct nvme_completion_poll_status	status;
779 	struct nvme_async_event_request		*aer;
780 	uint32_t				i;
781 
782 	ctrlr->async_event_config = NVME_CRIT_WARN_ST_AVAILABLE_SPARE |
783 	    NVME_CRIT_WARN_ST_DEVICE_RELIABILITY |
784 	    NVME_CRIT_WARN_ST_READ_ONLY |
785 	    NVME_CRIT_WARN_ST_VOLATILE_MEMORY_BACKUP;
786 	if (ctrlr->cdata.ver >= NVME_REV(1, 2))
787 		ctrlr->async_event_config |= 0x300;
788 
789 	status.done = 0;
790 	nvme_ctrlr_cmd_get_feature(ctrlr, NVME_FEAT_TEMPERATURE_THRESHOLD,
791 	    0, NULL, 0, nvme_completion_poll_cb, &status);
792 	while (!atomic_load_acq_int(&status.done))
793 		pause("nvme", 1);
794 	if (nvme_completion_is_error(&status.cpl) ||
795 	    (status.cpl.cdw0 & 0xFFFF) == 0xFFFF ||
796 	    (status.cpl.cdw0 & 0xFFFF) == 0x0000) {
797 		nvme_printf(ctrlr, "temperature threshold not supported\n");
798 	} else
799 		ctrlr->async_event_config |= NVME_CRIT_WARN_ST_TEMPERATURE;
800 
801 	nvme_ctrlr_cmd_set_async_event_config(ctrlr,
802 	    ctrlr->async_event_config, NULL, NULL);
803 
804 	/* aerl is a zero-based value, so we need to add 1 here. */
805 	ctrlr->num_aers = min(NVME_MAX_ASYNC_EVENTS, (ctrlr->cdata.aerl+1));
806 
807 	for (i = 0; i < ctrlr->num_aers; i++) {
808 		aer = &ctrlr->aer[i];
809 		nvme_ctrlr_construct_and_submit_aer(ctrlr, aer);
810 	}
811 }
812 
813 static void
814 nvme_ctrlr_configure_int_coalescing(struct nvme_controller *ctrlr)
815 {
816 
817 	ctrlr->int_coal_time = 0;
818 	TUNABLE_INT_FETCH("hw.nvme.int_coal_time",
819 	    &ctrlr->int_coal_time);
820 
821 	ctrlr->int_coal_threshold = 0;
822 	TUNABLE_INT_FETCH("hw.nvme.int_coal_threshold",
823 	    &ctrlr->int_coal_threshold);
824 
825 	nvme_ctrlr_cmd_set_interrupt_coalescing(ctrlr, ctrlr->int_coal_time,
826 	    ctrlr->int_coal_threshold, NULL, NULL);
827 }
828 
829 static void
830 nvme_ctrlr_start(void *ctrlr_arg)
831 {
832 	struct nvme_controller *ctrlr = ctrlr_arg;
833 	uint32_t old_num_io_queues;
834 	int i;
835 
836 	/*
837 	 * Only reset adminq here when we are restarting the
838 	 *  controller after a reset.  During initialization,
839 	 *  we have already submitted admin commands to get
840 	 *  the number of I/O queues supported, so cannot reset
841 	 *  the adminq again here.
842 	 */
843 	if (ctrlr->is_resetting)
844 		nvme_qpair_reset(&ctrlr->adminq);
845 
846 	for (i = 0; i < ctrlr->num_io_queues; i++)
847 		nvme_qpair_reset(&ctrlr->ioq[i]);
848 
849 	nvme_admin_qpair_enable(&ctrlr->adminq);
850 
851 	if (nvme_ctrlr_identify(ctrlr) != 0) {
852 		nvme_ctrlr_fail(ctrlr);
853 		return;
854 	}
855 
856 	/*
857 	 * The number of qpairs are determined during controller initialization,
858 	 *  including using NVMe SET_FEATURES/NUMBER_OF_QUEUES to determine the
859 	 *  HW limit.  We call SET_FEATURES again here so that it gets called
860 	 *  after any reset for controllers that depend on the driver to
861 	 *  explicit specify how many queues it will use.  This value should
862 	 *  never change between resets, so panic if somehow that does happen.
863 	 */
864 	if (ctrlr->is_resetting) {
865 		old_num_io_queues = ctrlr->num_io_queues;
866 		if (nvme_ctrlr_set_num_qpairs(ctrlr) != 0) {
867 			nvme_ctrlr_fail(ctrlr);
868 			return;
869 		}
870 
871 		if (old_num_io_queues != ctrlr->num_io_queues) {
872 			panic("num_io_queues changed from %u to %u",
873 			      old_num_io_queues, ctrlr->num_io_queues);
874 		}
875 	}
876 
877 	if (nvme_ctrlr_create_qpairs(ctrlr) != 0) {
878 		nvme_ctrlr_fail(ctrlr);
879 		return;
880 	}
881 
882 	if (nvme_ctrlr_construct_namespaces(ctrlr) != 0) {
883 		nvme_ctrlr_fail(ctrlr);
884 		return;
885 	}
886 
887 	nvme_ctrlr_configure_aer(ctrlr);
888 	nvme_ctrlr_configure_int_coalescing(ctrlr);
889 
890 	for (i = 0; i < ctrlr->num_io_queues; i++)
891 		nvme_io_qpair_enable(&ctrlr->ioq[i]);
892 }
893 
894 void
895 nvme_ctrlr_start_config_hook(void *arg)
896 {
897 	struct nvme_controller *ctrlr = arg;
898 
899 	nvme_qpair_reset(&ctrlr->adminq);
900 	nvme_admin_qpair_enable(&ctrlr->adminq);
901 
902 	if (nvme_ctrlr_set_num_qpairs(ctrlr) == 0 &&
903 	    nvme_ctrlr_construct_io_qpairs(ctrlr) == 0)
904 		nvme_ctrlr_start(ctrlr);
905 	else
906 		nvme_ctrlr_fail(ctrlr);
907 
908 	nvme_sysctl_initialize_ctrlr(ctrlr);
909 	config_intrhook_disestablish(&ctrlr->config_hook);
910 
911 	ctrlr->is_initialized = 1;
912 	nvme_notify_new_controller(ctrlr);
913 }
914 
915 static void
916 nvme_ctrlr_reset_task(void *arg, int pending)
917 {
918 	struct nvme_controller	*ctrlr = arg;
919 	int			status;
920 
921 	nvme_printf(ctrlr, "resetting controller\n");
922 	status = nvme_ctrlr_hw_reset(ctrlr);
923 	/*
924 	 * Use pause instead of DELAY, so that we yield to any nvme interrupt
925 	 *  handlers on this CPU that were blocked on a qpair lock. We want
926 	 *  all nvme interrupts completed before proceeding with restarting the
927 	 *  controller.
928 	 *
929 	 * XXX - any way to guarantee the interrupt handlers have quiesced?
930 	 */
931 	pause("nvmereset", hz / 10);
932 	if (status == 0)
933 		nvme_ctrlr_start(ctrlr);
934 	else
935 		nvme_ctrlr_fail(ctrlr);
936 
937 	atomic_cmpset_32(&ctrlr->is_resetting, 1, 0);
938 }
939 
940 /*
941  * Poll all the queues enabled on the device for completion.
942  */
943 void
944 nvme_ctrlr_poll(struct nvme_controller *ctrlr)
945 {
946 	int i;
947 
948 	nvme_qpair_process_completions(&ctrlr->adminq);
949 
950 	for (i = 0; i < ctrlr->num_io_queues; i++)
951 		if (ctrlr->ioq && ctrlr->ioq[i].cpl)
952 			nvme_qpair_process_completions(&ctrlr->ioq[i]);
953 }
954 
955 /*
956  * Poll the single-vector intertrupt case: num_io_queues will be 1 and
957  * there's only a single vector. While we're polling, we mask further
958  * interrupts in the controller.
959  */
960 void
961 nvme_ctrlr_intx_handler(void *arg)
962 {
963 	struct nvme_controller *ctrlr = arg;
964 
965 	nvme_mmio_write_4(ctrlr, intms, 1);
966 	nvme_ctrlr_poll(ctrlr);
967 	nvme_mmio_write_4(ctrlr, intmc, 1);
968 }
969 
970 static void
971 nvme_pt_done(void *arg, const struct nvme_completion *cpl)
972 {
973 	struct nvme_pt_command *pt = arg;
974 	struct mtx *mtx = pt->driver_lock;
975 	uint16_t status;
976 
977 	bzero(&pt->cpl, sizeof(pt->cpl));
978 	pt->cpl.cdw0 = cpl->cdw0;
979 
980 	status = cpl->status;
981 	status &= ~NVME_STATUS_P_MASK;
982 	pt->cpl.status = status;
983 
984 	mtx_lock(mtx);
985 	pt->driver_lock = NULL;
986 	wakeup(pt);
987 	mtx_unlock(mtx);
988 }
989 
990 int
991 nvme_ctrlr_passthrough_cmd(struct nvme_controller *ctrlr,
992     struct nvme_pt_command *pt, uint32_t nsid, int is_user_buffer,
993     int is_admin_cmd)
994 {
995 	struct nvme_request	*req;
996 	struct mtx		*mtx;
997 	struct buf		*buf = NULL;
998 	int			ret = 0;
999 	vm_offset_t		addr, end;
1000 
1001 	if (pt->len > 0) {
1002 		/*
1003 		 * vmapbuf calls vm_fault_quick_hold_pages which only maps full
1004 		 * pages. Ensure this request has fewer than MAXPHYS bytes when
1005 		 * extended to full pages.
1006 		 */
1007 		addr = (vm_offset_t)pt->buf;
1008 		end = round_page(addr + pt->len);
1009 		addr = trunc_page(addr);
1010 		if (end - addr > MAXPHYS)
1011 			return EIO;
1012 
1013 		if (pt->len > ctrlr->max_xfer_size) {
1014 			nvme_printf(ctrlr, "pt->len (%d) "
1015 			    "exceeds max_xfer_size (%d)\n", pt->len,
1016 			    ctrlr->max_xfer_size);
1017 			return EIO;
1018 		}
1019 		if (is_user_buffer) {
1020 			/*
1021 			 * Ensure the user buffer is wired for the duration of
1022 			 *  this passthrough command.
1023 			 */
1024 			PHOLD(curproc);
1025 			buf = uma_zalloc(pbuf_zone, M_WAITOK);
1026 			buf->b_data = pt->buf;
1027 			buf->b_bufsize = pt->len;
1028 			buf->b_iocmd = pt->is_read ? BIO_READ : BIO_WRITE;
1029 			if (vmapbuf(buf, 1) < 0) {
1030 				ret = EFAULT;
1031 				goto err;
1032 			}
1033 			req = nvme_allocate_request_vaddr(buf->b_data, pt->len,
1034 			    nvme_pt_done, pt);
1035 		} else
1036 			req = nvme_allocate_request_vaddr(pt->buf, pt->len,
1037 			    nvme_pt_done, pt);
1038 	} else
1039 		req = nvme_allocate_request_null(nvme_pt_done, pt);
1040 
1041 	/* Assume userspace already converted to little-endian */
1042 	req->cmd.opc = pt->cmd.opc;
1043 	req->cmd.fuse = pt->cmd.fuse;
1044 	req->cmd.rsvd2 = pt->cmd.rsvd2;
1045 	req->cmd.rsvd3 = pt->cmd.rsvd3;
1046 	req->cmd.cdw10 = pt->cmd.cdw10;
1047 	req->cmd.cdw11 = pt->cmd.cdw11;
1048 	req->cmd.cdw12 = pt->cmd.cdw12;
1049 	req->cmd.cdw13 = pt->cmd.cdw13;
1050 	req->cmd.cdw14 = pt->cmd.cdw14;
1051 	req->cmd.cdw15 = pt->cmd.cdw15;
1052 
1053 	req->cmd.nsid = htole32(nsid);
1054 
1055 	mtx = mtx_pool_find(mtxpool_sleep, pt);
1056 	pt->driver_lock = mtx;
1057 
1058 	if (is_admin_cmd)
1059 		nvme_ctrlr_submit_admin_request(ctrlr, req);
1060 	else
1061 		nvme_ctrlr_submit_io_request(ctrlr, req);
1062 
1063 	mtx_lock(mtx);
1064 	while (pt->driver_lock != NULL)
1065 		mtx_sleep(pt, mtx, PRIBIO, "nvme_pt", 0);
1066 	mtx_unlock(mtx);
1067 
1068 err:
1069 	if (buf != NULL) {
1070 		uma_zfree(pbuf_zone, buf);
1071 		PRELE(curproc);
1072 	}
1073 
1074 	return (ret);
1075 }
1076 
1077 static int
1078 nvme_ctrlr_ioctl(struct cdev *cdev, u_long cmd, caddr_t arg, int flag,
1079     struct thread *td)
1080 {
1081 	struct nvme_controller			*ctrlr;
1082 	struct nvme_pt_command			*pt;
1083 
1084 	ctrlr = cdev->si_drv1;
1085 
1086 	switch (cmd) {
1087 	case NVME_RESET_CONTROLLER:
1088 		nvme_ctrlr_reset(ctrlr);
1089 		break;
1090 	case NVME_PASSTHROUGH_CMD:
1091 		pt = (struct nvme_pt_command *)arg;
1092 		return (nvme_ctrlr_passthrough_cmd(ctrlr, pt, le32toh(pt->cmd.nsid),
1093 		    1 /* is_user_buffer */, 1 /* is_admin_cmd */));
1094 	case NVME_GET_NSID:
1095 	{
1096 		struct nvme_get_nsid *gnsid = (struct nvme_get_nsid *)arg;
1097 		strncpy(gnsid->cdev, device_get_nameunit(ctrlr->dev),
1098 		    sizeof(gnsid->cdev));
1099 		gnsid->nsid = 0;
1100 		break;
1101 	}
1102 	default:
1103 		return (ENOTTY);
1104 	}
1105 
1106 	return (0);
1107 }
1108 
1109 static struct cdevsw nvme_ctrlr_cdevsw = {
1110 	.d_version =	D_VERSION,
1111 	.d_flags =	0,
1112 	.d_ioctl =	nvme_ctrlr_ioctl
1113 };
1114 
1115 int
1116 nvme_ctrlr_construct(struct nvme_controller *ctrlr, device_t dev)
1117 {
1118 	struct make_dev_args	md_args;
1119 	uint32_t	cap_lo;
1120 	uint32_t	cap_hi;
1121 	uint32_t	to;
1122 	uint8_t		dstrd;
1123 	uint8_t		mpsmin;
1124 	int		status, timeout_period;
1125 
1126 	ctrlr->dev = dev;
1127 
1128 	mtx_init(&ctrlr->lock, "nvme ctrlr lock", NULL, MTX_DEF);
1129 
1130 	/*
1131 	 * Software emulators may set the doorbell stride to something
1132 	 *  other than zero, but this driver is not set up to handle that.
1133 	 */
1134 	cap_hi = nvme_mmio_read_4(ctrlr, cap_hi);
1135 	dstrd = NVME_CAP_HI_DSTRD(cap_hi);
1136 	if (dstrd != 0)
1137 		return (ENXIO);
1138 
1139 	mpsmin = NVME_CAP_HI_MPSMIN(cap_hi);
1140 	ctrlr->min_page_size = 1 << (12 + mpsmin);
1141 
1142 	/* Get ready timeout value from controller, in units of 500ms. */
1143 	cap_lo = nvme_mmio_read_4(ctrlr, cap_lo);
1144 	to = NVME_CAP_LO_TO(cap_lo) + 1;
1145 	ctrlr->ready_timeout_in_ms = to * 500;
1146 
1147 	timeout_period = NVME_DEFAULT_TIMEOUT_PERIOD;
1148 	TUNABLE_INT_FETCH("hw.nvme.timeout_period", &timeout_period);
1149 	timeout_period = min(timeout_period, NVME_MAX_TIMEOUT_PERIOD);
1150 	timeout_period = max(timeout_period, NVME_MIN_TIMEOUT_PERIOD);
1151 	ctrlr->timeout_period = timeout_period;
1152 
1153 	nvme_retry_count = NVME_DEFAULT_RETRY_COUNT;
1154 	TUNABLE_INT_FETCH("hw.nvme.retry_count", &nvme_retry_count);
1155 
1156 	ctrlr->enable_aborts = 0;
1157 	TUNABLE_INT_FETCH("hw.nvme.enable_aborts", &ctrlr->enable_aborts);
1158 
1159 	ctrlr->max_xfer_size = NVME_MAX_XFER_SIZE;
1160 	if (nvme_ctrlr_construct_admin_qpair(ctrlr) != 0)
1161 		return (ENXIO);
1162 
1163 	ctrlr->taskqueue = taskqueue_create("nvme_taskq", M_WAITOK,
1164 	    taskqueue_thread_enqueue, &ctrlr->taskqueue);
1165 	taskqueue_start_threads(&ctrlr->taskqueue, 1, PI_DISK, "nvme taskq");
1166 
1167 	ctrlr->is_resetting = 0;
1168 	ctrlr->is_initialized = 0;
1169 	ctrlr->notification_sent = 0;
1170 	TASK_INIT(&ctrlr->reset_task, 0, nvme_ctrlr_reset_task, ctrlr);
1171 	TASK_INIT(&ctrlr->fail_req_task, 0, nvme_ctrlr_fail_req_task, ctrlr);
1172 	STAILQ_INIT(&ctrlr->fail_req);
1173 	ctrlr->is_failed = FALSE;
1174 
1175 	make_dev_args_init(&md_args);
1176 	md_args.mda_devsw = &nvme_ctrlr_cdevsw;
1177 	md_args.mda_uid = UID_ROOT;
1178 	md_args.mda_gid = GID_WHEEL;
1179 	md_args.mda_mode = 0600;
1180 	md_args.mda_unit = device_get_unit(dev);
1181 	md_args.mda_si_drv1 = (void *)ctrlr;
1182 	status = make_dev_s(&md_args, &ctrlr->cdev, "nvme%d",
1183 	    device_get_unit(dev));
1184 	if (status != 0)
1185 		return (ENXIO);
1186 
1187 	return (0);
1188 }
1189 
1190 void
1191 nvme_ctrlr_destruct(struct nvme_controller *ctrlr, device_t dev)
1192 {
1193 	int	gone, i;
1194 
1195 	if (ctrlr->resource == NULL)
1196 		goto nores;
1197 
1198 	/*
1199 	 * Check whether it is a hot unplug or a clean driver detach.
1200 	 * If device is not there any more, skip any shutdown commands.
1201 	 */
1202 	gone = (nvme_mmio_read_4(ctrlr, csts) == 0xffffffff);
1203 	if (gone)
1204 		nvme_ctrlr_fail(ctrlr);
1205 	else
1206 		nvme_notify_fail_consumers(ctrlr);
1207 
1208 	for (i = 0; i < NVME_MAX_NAMESPACES; i++)
1209 		nvme_ns_destruct(&ctrlr->ns[i]);
1210 
1211 	if (ctrlr->cdev)
1212 		destroy_dev(ctrlr->cdev);
1213 
1214 	if (!gone)
1215 		nvme_ctrlr_destroy_qpairs(ctrlr);
1216 	for (i = 0; i < ctrlr->num_io_queues; i++)
1217 		nvme_io_qpair_destroy(&ctrlr->ioq[i]);
1218 	free(ctrlr->ioq, M_NVME);
1219 	nvme_admin_qpair_destroy(&ctrlr->adminq);
1220 
1221 	/*
1222 	 *  Notify the controller of a shutdown, even though this is due to
1223 	 *   a driver unload, not a system shutdown (this path is not invoked
1224 	 *   during shutdown).  This ensures the controller receives a
1225 	 *   shutdown notification in case the system is shutdown before
1226 	 *   reloading the driver.
1227 	 */
1228 	if (!gone)
1229 		nvme_ctrlr_shutdown(ctrlr);
1230 
1231 	if (!gone)
1232 		nvme_ctrlr_disable(ctrlr);
1233 
1234 	if (ctrlr->taskqueue)
1235 		taskqueue_free(ctrlr->taskqueue);
1236 
1237 	if (ctrlr->tag)
1238 		bus_teardown_intr(ctrlr->dev, ctrlr->res, ctrlr->tag);
1239 
1240 	if (ctrlr->res)
1241 		bus_release_resource(ctrlr->dev, SYS_RES_IRQ,
1242 		    rman_get_rid(ctrlr->res), ctrlr->res);
1243 
1244 	if (ctrlr->bar4_resource != NULL) {
1245 		bus_release_resource(dev, SYS_RES_MEMORY,
1246 		    ctrlr->bar4_resource_id, ctrlr->bar4_resource);
1247 	}
1248 
1249 	bus_release_resource(dev, SYS_RES_MEMORY,
1250 	    ctrlr->resource_id, ctrlr->resource);
1251 
1252 nores:
1253 	mtx_destroy(&ctrlr->lock);
1254 }
1255 
1256 void
1257 nvme_ctrlr_shutdown(struct nvme_controller *ctrlr)
1258 {
1259 	uint32_t	cc;
1260 	uint32_t	csts;
1261 	int		ticks = 0;
1262 
1263 	cc = nvme_mmio_read_4(ctrlr, cc);
1264 	cc &= ~(NVME_CC_REG_SHN_MASK << NVME_CC_REG_SHN_SHIFT);
1265 	cc |= NVME_SHN_NORMAL << NVME_CC_REG_SHN_SHIFT;
1266 	nvme_mmio_write_4(ctrlr, cc, cc);
1267 
1268 	while (1) {
1269 		csts = nvme_mmio_read_4(ctrlr, csts);
1270 		if (csts == 0xffffffff)		/* Hot unplug. */
1271 			break;
1272 		if (NVME_CSTS_GET_SHST(csts) == NVME_SHST_COMPLETE)
1273 			break;
1274 		if (ticks++ > 5*hz) {
1275 			nvme_printf(ctrlr, "did not complete shutdown within"
1276 			    " 5 seconds of notification\n");
1277 			break;
1278 		}
1279 		pause("nvme shn", 1);
1280 	}
1281 }
1282 
1283 void
1284 nvme_ctrlr_submit_admin_request(struct nvme_controller *ctrlr,
1285     struct nvme_request *req)
1286 {
1287 
1288 	nvme_qpair_submit_request(&ctrlr->adminq, req);
1289 }
1290 
1291 void
1292 nvme_ctrlr_submit_io_request(struct nvme_controller *ctrlr,
1293     struct nvme_request *req)
1294 {
1295 	struct nvme_qpair       *qpair;
1296 
1297 	qpair = &ctrlr->ioq[curcpu / ctrlr->num_cpus_per_ioq];
1298 	nvme_qpair_submit_request(qpair, req);
1299 }
1300 
1301 device_t
1302 nvme_ctrlr_get_device(struct nvme_controller *ctrlr)
1303 {
1304 
1305 	return (ctrlr->dev);
1306 }
1307 
1308 const struct nvme_controller_data *
1309 nvme_ctrlr_get_data(struct nvme_controller *ctrlr)
1310 {
1311 
1312 	return (&ctrlr->cdata);
1313 }
1314