xref: /freebsd/sys/dev/nvme/nvme_ctrlr.c (revision 3fc36ee018bb836bd1796067cf4ef8683f166ebc)
1 /*-
2  * Copyright (C) 2012-2016 Intel Corporation
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  */
26 
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
29 
30 #include "opt_cam.h"
31 
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/buf.h>
35 #include <sys/bus.h>
36 #include <sys/conf.h>
37 #include <sys/ioccom.h>
38 #include <sys/proc.h>
39 #include <sys/smp.h>
40 #include <sys/uio.h>
41 
42 #include <dev/pci/pcireg.h>
43 #include <dev/pci/pcivar.h>
44 
45 #include "nvme_private.h"
46 
47 static void nvme_ctrlr_construct_and_submit_aer(struct nvme_controller *ctrlr,
48 						struct nvme_async_event_request *aer);
49 static void nvme_ctrlr_setup_interrupts(struct nvme_controller *ctrlr);
50 
51 static int
52 nvme_ctrlr_allocate_bar(struct nvme_controller *ctrlr)
53 {
54 
55 	ctrlr->resource_id = PCIR_BAR(0);
56 
57 	ctrlr->resource = bus_alloc_resource_any(ctrlr->dev, SYS_RES_MEMORY,
58 	    &ctrlr->resource_id, RF_ACTIVE);
59 
60 	if(ctrlr->resource == NULL) {
61 		nvme_printf(ctrlr, "unable to allocate pci resource\n");
62 		return (ENOMEM);
63 	}
64 
65 	ctrlr->bus_tag = rman_get_bustag(ctrlr->resource);
66 	ctrlr->bus_handle = rman_get_bushandle(ctrlr->resource);
67 	ctrlr->regs = (struct nvme_registers *)ctrlr->bus_handle;
68 
69 	/*
70 	 * The NVMe spec allows for the MSI-X table to be placed behind
71 	 *  BAR 4/5, separate from the control/doorbell registers.  Always
72 	 *  try to map this bar, because it must be mapped prior to calling
73 	 *  pci_alloc_msix().  If the table isn't behind BAR 4/5,
74 	 *  bus_alloc_resource() will just return NULL which is OK.
75 	 */
76 	ctrlr->bar4_resource_id = PCIR_BAR(4);
77 	ctrlr->bar4_resource = bus_alloc_resource_any(ctrlr->dev, SYS_RES_MEMORY,
78 	    &ctrlr->bar4_resource_id, RF_ACTIVE);
79 
80 	return (0);
81 }
82 
83 static void
84 nvme_ctrlr_construct_admin_qpair(struct nvme_controller *ctrlr)
85 {
86 	struct nvme_qpair	*qpair;
87 	uint32_t		num_entries;
88 
89 	qpair = &ctrlr->adminq;
90 
91 	num_entries = NVME_ADMIN_ENTRIES;
92 	TUNABLE_INT_FETCH("hw.nvme.admin_entries", &num_entries);
93 	/*
94 	 * If admin_entries was overridden to an invalid value, revert it
95 	 *  back to our default value.
96 	 */
97 	if (num_entries < NVME_MIN_ADMIN_ENTRIES ||
98 	    num_entries > NVME_MAX_ADMIN_ENTRIES) {
99 		nvme_printf(ctrlr, "invalid hw.nvme.admin_entries=%d "
100 		    "specified\n", num_entries);
101 		num_entries = NVME_ADMIN_ENTRIES;
102 	}
103 
104 	/*
105 	 * The admin queue's max xfer size is treated differently than the
106 	 *  max I/O xfer size.  16KB is sufficient here - maybe even less?
107 	 */
108 	nvme_qpair_construct(qpair,
109 			     0, /* qpair ID */
110 			     0, /* vector */
111 			     num_entries,
112 			     NVME_ADMIN_TRACKERS,
113 			     ctrlr);
114 }
115 
116 static int
117 nvme_ctrlr_construct_io_qpairs(struct nvme_controller *ctrlr)
118 {
119 	struct nvme_qpair	*qpair;
120 	union cap_lo_register	cap_lo;
121 	int			i, num_entries, num_trackers;
122 
123 	num_entries = NVME_IO_ENTRIES;
124 	TUNABLE_INT_FETCH("hw.nvme.io_entries", &num_entries);
125 
126 	/*
127 	 * NVMe spec sets a hard limit of 64K max entries, but
128 	 *  devices may specify a smaller limit, so we need to check
129 	 *  the MQES field in the capabilities register.
130 	 */
131 	cap_lo.raw = nvme_mmio_read_4(ctrlr, cap_lo);
132 	num_entries = min(num_entries, cap_lo.bits.mqes+1);
133 
134 	num_trackers = NVME_IO_TRACKERS;
135 	TUNABLE_INT_FETCH("hw.nvme.io_trackers", &num_trackers);
136 
137 	num_trackers = max(num_trackers, NVME_MIN_IO_TRACKERS);
138 	num_trackers = min(num_trackers, NVME_MAX_IO_TRACKERS);
139 	/*
140 	 * No need to have more trackers than entries in the submit queue.
141 	 *  Note also that for a queue size of N, we can only have (N-1)
142 	 *  commands outstanding, hence the "-1" here.
143 	 */
144 	num_trackers = min(num_trackers, (num_entries-1));
145 
146 	/*
147 	 * This was calculated previously when setting up interrupts, but
148 	 *  a controller could theoretically support fewer I/O queues than
149 	 *  MSI-X vectors.  So calculate again here just to be safe.
150 	 */
151 	ctrlr->num_cpus_per_ioq = howmany(mp_ncpus, ctrlr->num_io_queues);
152 
153 	ctrlr->ioq = malloc(ctrlr->num_io_queues * sizeof(struct nvme_qpair),
154 	    M_NVME, M_ZERO | M_WAITOK);
155 
156 	for (i = 0; i < ctrlr->num_io_queues; i++) {
157 		qpair = &ctrlr->ioq[i];
158 
159 		/*
160 		 * Admin queue has ID=0. IO queues start at ID=1 -
161 		 *  hence the 'i+1' here.
162 		 *
163 		 * For I/O queues, use the controller-wide max_xfer_size
164 		 *  calculated in nvme_attach().
165 		 */
166 		nvme_qpair_construct(qpair,
167 				     i+1, /* qpair ID */
168 				     ctrlr->msix_enabled ? i+1 : 0, /* vector */
169 				     num_entries,
170 				     num_trackers,
171 				     ctrlr);
172 
173 		/*
174 		 * Do not bother binding interrupts if we only have one I/O
175 		 *  interrupt thread for this controller.
176 		 */
177 		if (ctrlr->num_io_queues > 1)
178 			bus_bind_intr(ctrlr->dev, qpair->res,
179 			    i * ctrlr->num_cpus_per_ioq);
180 	}
181 
182 	return (0);
183 }
184 
185 static void
186 nvme_ctrlr_fail(struct nvme_controller *ctrlr)
187 {
188 	int i;
189 
190 	ctrlr->is_failed = TRUE;
191 	nvme_qpair_fail(&ctrlr->adminq);
192 	for (i = 0; i < ctrlr->num_io_queues; i++)
193 		nvme_qpair_fail(&ctrlr->ioq[i]);
194 	nvme_notify_fail_consumers(ctrlr);
195 }
196 
197 void
198 nvme_ctrlr_post_failed_request(struct nvme_controller *ctrlr,
199     struct nvme_request *req)
200 {
201 
202 	mtx_lock(&ctrlr->lock);
203 	STAILQ_INSERT_TAIL(&ctrlr->fail_req, req, stailq);
204 	mtx_unlock(&ctrlr->lock);
205 	taskqueue_enqueue(ctrlr->taskqueue, &ctrlr->fail_req_task);
206 }
207 
208 static void
209 nvme_ctrlr_fail_req_task(void *arg, int pending)
210 {
211 	struct nvme_controller	*ctrlr = arg;
212 	struct nvme_request	*req;
213 
214 	mtx_lock(&ctrlr->lock);
215 	while (!STAILQ_EMPTY(&ctrlr->fail_req)) {
216 		req = STAILQ_FIRST(&ctrlr->fail_req);
217 		STAILQ_REMOVE_HEAD(&ctrlr->fail_req, stailq);
218 		nvme_qpair_manual_complete_request(req->qpair, req,
219 		    NVME_SCT_GENERIC, NVME_SC_ABORTED_BY_REQUEST, TRUE);
220 	}
221 	mtx_unlock(&ctrlr->lock);
222 }
223 
224 static int
225 nvme_ctrlr_wait_for_ready(struct nvme_controller *ctrlr, int desired_val)
226 {
227 	int ms_waited;
228 	union cc_register cc;
229 	union csts_register csts;
230 
231 	cc.raw = nvme_mmio_read_4(ctrlr, cc);
232 	csts.raw = nvme_mmio_read_4(ctrlr, csts);
233 
234 	if (cc.bits.en != desired_val) {
235 		nvme_printf(ctrlr, "%s called with desired_val = %d "
236 		    "but cc.en = %d\n", __func__, desired_val, cc.bits.en);
237 		return (ENXIO);
238 	}
239 
240 	ms_waited = 0;
241 
242 	while (csts.bits.rdy != desired_val) {
243 		DELAY(1000);
244 		if (ms_waited++ > ctrlr->ready_timeout_in_ms) {
245 			nvme_printf(ctrlr, "controller ready did not become %d "
246 			    "within %d ms\n", desired_val, ctrlr->ready_timeout_in_ms);
247 			return (ENXIO);
248 		}
249 		csts.raw = nvme_mmio_read_4(ctrlr, csts);
250 	}
251 
252 	return (0);
253 }
254 
255 static void
256 nvme_ctrlr_disable(struct nvme_controller *ctrlr)
257 {
258 	union cc_register cc;
259 	union csts_register csts;
260 
261 	cc.raw = nvme_mmio_read_4(ctrlr, cc);
262 	csts.raw = nvme_mmio_read_4(ctrlr, csts);
263 
264 	if (cc.bits.en == 1 && csts.bits.rdy == 0)
265 		nvme_ctrlr_wait_for_ready(ctrlr, 1);
266 
267 	cc.bits.en = 0;
268 	nvme_mmio_write_4(ctrlr, cc, cc.raw);
269 	DELAY(5000);
270 	nvme_ctrlr_wait_for_ready(ctrlr, 0);
271 }
272 
273 static int
274 nvme_ctrlr_enable(struct nvme_controller *ctrlr)
275 {
276 	union cc_register	cc;
277 	union csts_register	csts;
278 	union aqa_register	aqa;
279 
280 	cc.raw = nvme_mmio_read_4(ctrlr, cc);
281 	csts.raw = nvme_mmio_read_4(ctrlr, csts);
282 
283 	if (cc.bits.en == 1) {
284 		if (csts.bits.rdy == 1)
285 			return (0);
286 		else
287 			return (nvme_ctrlr_wait_for_ready(ctrlr, 1));
288 	}
289 
290 	nvme_mmio_write_8(ctrlr, asq, ctrlr->adminq.cmd_bus_addr);
291 	DELAY(5000);
292 	nvme_mmio_write_8(ctrlr, acq, ctrlr->adminq.cpl_bus_addr);
293 	DELAY(5000);
294 
295 	aqa.raw = 0;
296 	/* acqs and asqs are 0-based. */
297 	aqa.bits.acqs = ctrlr->adminq.num_entries-1;
298 	aqa.bits.asqs = ctrlr->adminq.num_entries-1;
299 	nvme_mmio_write_4(ctrlr, aqa, aqa.raw);
300 	DELAY(5000);
301 
302 	cc.bits.en = 1;
303 	cc.bits.css = 0;
304 	cc.bits.ams = 0;
305 	cc.bits.shn = 0;
306 	cc.bits.iosqes = 6; /* SQ entry size == 64 == 2^6 */
307 	cc.bits.iocqes = 4; /* CQ entry size == 16 == 2^4 */
308 
309 	/* This evaluates to 0, which is according to spec. */
310 	cc.bits.mps = (PAGE_SIZE >> 13);
311 
312 	nvme_mmio_write_4(ctrlr, cc, cc.raw);
313 	DELAY(5000);
314 
315 	return (nvme_ctrlr_wait_for_ready(ctrlr, 1));
316 }
317 
318 int
319 nvme_ctrlr_hw_reset(struct nvme_controller *ctrlr)
320 {
321 	int i;
322 
323 	nvme_admin_qpair_disable(&ctrlr->adminq);
324 	/*
325 	 * I/O queues are not allocated before the initial HW
326 	 *  reset, so do not try to disable them.  Use is_initialized
327 	 *  to determine if this is the initial HW reset.
328 	 */
329 	if (ctrlr->is_initialized) {
330 		for (i = 0; i < ctrlr->num_io_queues; i++)
331 			nvme_io_qpair_disable(&ctrlr->ioq[i]);
332 	}
333 
334 	DELAY(100*1000);
335 
336 	nvme_ctrlr_disable(ctrlr);
337 	return (nvme_ctrlr_enable(ctrlr));
338 }
339 
340 void
341 nvme_ctrlr_reset(struct nvme_controller *ctrlr)
342 {
343 	int cmpset;
344 
345 	cmpset = atomic_cmpset_32(&ctrlr->is_resetting, 0, 1);
346 
347 	if (cmpset == 0 || ctrlr->is_failed)
348 		/*
349 		 * Controller is already resetting or has failed.  Return
350 		 *  immediately since there is no need to kick off another
351 		 *  reset in these cases.
352 		 */
353 		return;
354 
355 	taskqueue_enqueue(ctrlr->taskqueue, &ctrlr->reset_task);
356 }
357 
358 static int
359 nvme_ctrlr_identify(struct nvme_controller *ctrlr)
360 {
361 	struct nvme_completion_poll_status	status;
362 
363 	status.done = FALSE;
364 	nvme_ctrlr_cmd_identify_controller(ctrlr, &ctrlr->cdata,
365 	    nvme_completion_poll_cb, &status);
366 	while (status.done == FALSE)
367 		pause("nvme", 1);
368 	if (nvme_completion_is_error(&status.cpl)) {
369 		nvme_printf(ctrlr, "nvme_identify_controller failed!\n");
370 		return (ENXIO);
371 	}
372 
373 	/*
374 	 * Use MDTS to ensure our default max_xfer_size doesn't exceed what the
375 	 *  controller supports.
376 	 */
377 	if (ctrlr->cdata.mdts > 0)
378 		ctrlr->max_xfer_size = min(ctrlr->max_xfer_size,
379 		    ctrlr->min_page_size * (1 << (ctrlr->cdata.mdts)));
380 
381 	return (0);
382 }
383 
384 static int
385 nvme_ctrlr_set_num_qpairs(struct nvme_controller *ctrlr)
386 {
387 	struct nvme_completion_poll_status	status;
388 	int					cq_allocated, sq_allocated;
389 
390 	status.done = FALSE;
391 	nvme_ctrlr_cmd_set_num_queues(ctrlr, ctrlr->num_io_queues,
392 	    nvme_completion_poll_cb, &status);
393 	while (status.done == FALSE)
394 		pause("nvme", 1);
395 	if (nvme_completion_is_error(&status.cpl)) {
396 		nvme_printf(ctrlr, "nvme_set_num_queues failed!\n");
397 		return (ENXIO);
398 	}
399 
400 	/*
401 	 * Data in cdw0 is 0-based.
402 	 * Lower 16-bits indicate number of submission queues allocated.
403 	 * Upper 16-bits indicate number of completion queues allocated.
404 	 */
405 	sq_allocated = (status.cpl.cdw0 & 0xFFFF) + 1;
406 	cq_allocated = (status.cpl.cdw0 >> 16) + 1;
407 
408 	/*
409 	 * Controller may allocate more queues than we requested,
410 	 *  so use the minimum of the number requested and what was
411 	 *  actually allocated.
412 	 */
413 	ctrlr->num_io_queues = min(ctrlr->num_io_queues, sq_allocated);
414 	ctrlr->num_io_queues = min(ctrlr->num_io_queues, cq_allocated);
415 
416 	return (0);
417 }
418 
419 static int
420 nvme_ctrlr_create_qpairs(struct nvme_controller *ctrlr)
421 {
422 	struct nvme_completion_poll_status	status;
423 	struct nvme_qpair			*qpair;
424 	int					i;
425 
426 	for (i = 0; i < ctrlr->num_io_queues; i++) {
427 		qpair = &ctrlr->ioq[i];
428 
429 		status.done = FALSE;
430 		nvme_ctrlr_cmd_create_io_cq(ctrlr, qpair, qpair->vector,
431 		    nvme_completion_poll_cb, &status);
432 		while (status.done == FALSE)
433 			pause("nvme", 1);
434 		if (nvme_completion_is_error(&status.cpl)) {
435 			nvme_printf(ctrlr, "nvme_create_io_cq failed!\n");
436 			return (ENXIO);
437 		}
438 
439 		status.done = FALSE;
440 		nvme_ctrlr_cmd_create_io_sq(qpair->ctrlr, qpair,
441 		    nvme_completion_poll_cb, &status);
442 		while (status.done == FALSE)
443 			pause("nvme", 1);
444 		if (nvme_completion_is_error(&status.cpl)) {
445 			nvme_printf(ctrlr, "nvme_create_io_sq failed!\n");
446 			return (ENXIO);
447 		}
448 	}
449 
450 	return (0);
451 }
452 
453 static int
454 nvme_ctrlr_construct_namespaces(struct nvme_controller *ctrlr)
455 {
456 	struct nvme_namespace	*ns;
457 	int			i, status;
458 
459 	for (i = 0; i < ctrlr->cdata.nn; i++) {
460 		ns = &ctrlr->ns[i];
461 		status = nvme_ns_construct(ns, i+1, ctrlr);
462 		if (status != 0)
463 			return (status);
464 	}
465 
466 	return (0);
467 }
468 
469 static boolean_t
470 is_log_page_id_valid(uint8_t page_id)
471 {
472 
473 	switch (page_id) {
474 	case NVME_LOG_ERROR:
475 	case NVME_LOG_HEALTH_INFORMATION:
476 	case NVME_LOG_FIRMWARE_SLOT:
477 		return (TRUE);
478 	}
479 
480 	return (FALSE);
481 }
482 
483 static uint32_t
484 nvme_ctrlr_get_log_page_size(struct nvme_controller *ctrlr, uint8_t page_id)
485 {
486 	uint32_t	log_page_size;
487 
488 	switch (page_id) {
489 	case NVME_LOG_ERROR:
490 		log_page_size = min(
491 		    sizeof(struct nvme_error_information_entry) *
492 		    ctrlr->cdata.elpe,
493 		    NVME_MAX_AER_LOG_SIZE);
494 		break;
495 	case NVME_LOG_HEALTH_INFORMATION:
496 		log_page_size = sizeof(struct nvme_health_information_page);
497 		break;
498 	case NVME_LOG_FIRMWARE_SLOT:
499 		log_page_size = sizeof(struct nvme_firmware_page);
500 		break;
501 	default:
502 		log_page_size = 0;
503 		break;
504 	}
505 
506 	return (log_page_size);
507 }
508 
509 static void
510 nvme_ctrlr_log_critical_warnings(struct nvme_controller *ctrlr,
511     union nvme_critical_warning_state state)
512 {
513 
514 	if (state.bits.available_spare == 1)
515 		nvme_printf(ctrlr, "available spare space below threshold\n");
516 
517 	if (state.bits.temperature == 1)
518 		nvme_printf(ctrlr, "temperature above threshold\n");
519 
520 	if (state.bits.device_reliability == 1)
521 		nvme_printf(ctrlr, "device reliability degraded\n");
522 
523 	if (state.bits.read_only == 1)
524 		nvme_printf(ctrlr, "media placed in read only mode\n");
525 
526 	if (state.bits.volatile_memory_backup == 1)
527 		nvme_printf(ctrlr, "volatile memory backup device failed\n");
528 
529 	if (state.bits.reserved != 0)
530 		nvme_printf(ctrlr,
531 		    "unknown critical warning(s): state = 0x%02x\n", state.raw);
532 }
533 
534 static void
535 nvme_ctrlr_async_event_log_page_cb(void *arg, const struct nvme_completion *cpl)
536 {
537 	struct nvme_async_event_request		*aer = arg;
538 	struct nvme_health_information_page	*health_info;
539 
540 	/*
541 	 * If the log page fetch for some reason completed with an error,
542 	 *  don't pass log page data to the consumers.  In practice, this case
543 	 *  should never happen.
544 	 */
545 	if (nvme_completion_is_error(cpl))
546 		nvme_notify_async_consumers(aer->ctrlr, &aer->cpl,
547 		    aer->log_page_id, NULL, 0);
548 	else {
549 		if (aer->log_page_id == NVME_LOG_HEALTH_INFORMATION) {
550 			health_info = (struct nvme_health_information_page *)
551 			    aer->log_page_buffer;
552 			nvme_ctrlr_log_critical_warnings(aer->ctrlr,
553 			    health_info->critical_warning);
554 			/*
555 			 * Critical warnings reported through the
556 			 *  SMART/health log page are persistent, so
557 			 *  clear the associated bits in the async event
558 			 *  config so that we do not receive repeated
559 			 *  notifications for the same event.
560 			 */
561 			aer->ctrlr->async_event_config.raw &=
562 			    ~health_info->critical_warning.raw;
563 			nvme_ctrlr_cmd_set_async_event_config(aer->ctrlr,
564 			    aer->ctrlr->async_event_config, NULL, NULL);
565 		}
566 
567 
568 		/*
569 		 * Pass the cpl data from the original async event completion,
570 		 *  not the log page fetch.
571 		 */
572 		nvme_notify_async_consumers(aer->ctrlr, &aer->cpl,
573 		    aer->log_page_id, aer->log_page_buffer, aer->log_page_size);
574 	}
575 
576 	/*
577 	 * Repost another asynchronous event request to replace the one
578 	 *  that just completed.
579 	 */
580 	nvme_ctrlr_construct_and_submit_aer(aer->ctrlr, aer);
581 }
582 
583 static void
584 nvme_ctrlr_async_event_cb(void *arg, const struct nvme_completion *cpl)
585 {
586 	struct nvme_async_event_request	*aer = arg;
587 
588 	if (nvme_completion_is_error(cpl)) {
589 		/*
590 		 *  Do not retry failed async event requests.  This avoids
591 		 *  infinite loops where a new async event request is submitted
592 		 *  to replace the one just failed, only to fail again and
593 		 *  perpetuate the loop.
594 		 */
595 		return;
596 	}
597 
598 	/* Associated log page is in bits 23:16 of completion entry dw0. */
599 	aer->log_page_id = (cpl->cdw0 & 0xFF0000) >> 16;
600 
601 	nvme_printf(aer->ctrlr, "async event occurred (log page id=0x%x)\n",
602 	    aer->log_page_id);
603 
604 	if (is_log_page_id_valid(aer->log_page_id)) {
605 		aer->log_page_size = nvme_ctrlr_get_log_page_size(aer->ctrlr,
606 		    aer->log_page_id);
607 		memcpy(&aer->cpl, cpl, sizeof(*cpl));
608 		nvme_ctrlr_cmd_get_log_page(aer->ctrlr, aer->log_page_id,
609 		    NVME_GLOBAL_NAMESPACE_TAG, aer->log_page_buffer,
610 		    aer->log_page_size, nvme_ctrlr_async_event_log_page_cb,
611 		    aer);
612 		/* Wait to notify consumers until after log page is fetched. */
613 	} else {
614 		nvme_notify_async_consumers(aer->ctrlr, cpl, aer->log_page_id,
615 		    NULL, 0);
616 
617 		/*
618 		 * Repost another asynchronous event request to replace the one
619 		 *  that just completed.
620 		 */
621 		nvme_ctrlr_construct_and_submit_aer(aer->ctrlr, aer);
622 	}
623 }
624 
625 static void
626 nvme_ctrlr_construct_and_submit_aer(struct nvme_controller *ctrlr,
627     struct nvme_async_event_request *aer)
628 {
629 	struct nvme_request *req;
630 
631 	aer->ctrlr = ctrlr;
632 	req = nvme_allocate_request_null(nvme_ctrlr_async_event_cb, aer);
633 	aer->req = req;
634 
635 	/*
636 	 * Disable timeout here, since asynchronous event requests should by
637 	 *  nature never be timed out.
638 	 */
639 	req->timeout = FALSE;
640 	req->cmd.opc = NVME_OPC_ASYNC_EVENT_REQUEST;
641 	nvme_ctrlr_submit_admin_request(ctrlr, req);
642 }
643 
644 static void
645 nvme_ctrlr_configure_aer(struct nvme_controller *ctrlr)
646 {
647 	struct nvme_completion_poll_status	status;
648 	struct nvme_async_event_request		*aer;
649 	uint32_t				i;
650 
651 	ctrlr->async_event_config.raw = 0xFF;
652 	ctrlr->async_event_config.bits.reserved = 0;
653 
654 	status.done = FALSE;
655 	nvme_ctrlr_cmd_get_feature(ctrlr, NVME_FEAT_TEMPERATURE_THRESHOLD,
656 	    0, NULL, 0, nvme_completion_poll_cb, &status);
657 	while (status.done == FALSE)
658 		pause("nvme", 1);
659 	if (nvme_completion_is_error(&status.cpl) ||
660 	    (status.cpl.cdw0 & 0xFFFF) == 0xFFFF ||
661 	    (status.cpl.cdw0 & 0xFFFF) == 0x0000) {
662 		nvme_printf(ctrlr, "temperature threshold not supported\n");
663 		ctrlr->async_event_config.bits.temperature = 0;
664 	}
665 
666 	nvme_ctrlr_cmd_set_async_event_config(ctrlr,
667 	    ctrlr->async_event_config, NULL, NULL);
668 
669 	/* aerl is a zero-based value, so we need to add 1 here. */
670 	ctrlr->num_aers = min(NVME_MAX_ASYNC_EVENTS, (ctrlr->cdata.aerl+1));
671 
672 	for (i = 0; i < ctrlr->num_aers; i++) {
673 		aer = &ctrlr->aer[i];
674 		nvme_ctrlr_construct_and_submit_aer(ctrlr, aer);
675 	}
676 }
677 
678 static void
679 nvme_ctrlr_configure_int_coalescing(struct nvme_controller *ctrlr)
680 {
681 
682 	ctrlr->int_coal_time = 0;
683 	TUNABLE_INT_FETCH("hw.nvme.int_coal_time",
684 	    &ctrlr->int_coal_time);
685 
686 	ctrlr->int_coal_threshold = 0;
687 	TUNABLE_INT_FETCH("hw.nvme.int_coal_threshold",
688 	    &ctrlr->int_coal_threshold);
689 
690 	nvme_ctrlr_cmd_set_interrupt_coalescing(ctrlr, ctrlr->int_coal_time,
691 	    ctrlr->int_coal_threshold, NULL, NULL);
692 }
693 
694 static void
695 nvme_ctrlr_start(void *ctrlr_arg)
696 {
697 	struct nvme_controller *ctrlr = ctrlr_arg;
698 	uint32_t old_num_io_queues;
699 	int i;
700 
701 	/*
702 	 * Only reset adminq here when we are restarting the
703 	 *  controller after a reset.  During initialization,
704 	 *  we have already submitted admin commands to get
705 	 *  the number of I/O queues supported, so cannot reset
706 	 *  the adminq again here.
707 	 */
708 	if (ctrlr->is_resetting) {
709 		nvme_qpair_reset(&ctrlr->adminq);
710 	}
711 
712 	for (i = 0; i < ctrlr->num_io_queues; i++)
713 		nvme_qpair_reset(&ctrlr->ioq[i]);
714 
715 	nvme_admin_qpair_enable(&ctrlr->adminq);
716 
717 	if (nvme_ctrlr_identify(ctrlr) != 0) {
718 		nvme_ctrlr_fail(ctrlr);
719 		return;
720 	}
721 
722 	/*
723 	 * The number of qpairs are determined during controller initialization,
724 	 *  including using NVMe SET_FEATURES/NUMBER_OF_QUEUES to determine the
725 	 *  HW limit.  We call SET_FEATURES again here so that it gets called
726 	 *  after any reset for controllers that depend on the driver to
727 	 *  explicit specify how many queues it will use.  This value should
728 	 *  never change between resets, so panic if somehow that does happen.
729 	 */
730 	if (ctrlr->is_resetting) {
731 		old_num_io_queues = ctrlr->num_io_queues;
732 		if (nvme_ctrlr_set_num_qpairs(ctrlr) != 0) {
733 			nvme_ctrlr_fail(ctrlr);
734 			return;
735 		}
736 
737 		if (old_num_io_queues != ctrlr->num_io_queues) {
738 			panic("num_io_queues changed from %u to %u",
739 			      old_num_io_queues, ctrlr->num_io_queues);
740 		}
741 	}
742 
743 	if (nvme_ctrlr_create_qpairs(ctrlr) != 0) {
744 		nvme_ctrlr_fail(ctrlr);
745 		return;
746 	}
747 
748 	if (nvme_ctrlr_construct_namespaces(ctrlr) != 0) {
749 		nvme_ctrlr_fail(ctrlr);
750 		return;
751 	}
752 
753 	nvme_ctrlr_configure_aer(ctrlr);
754 	nvme_ctrlr_configure_int_coalescing(ctrlr);
755 
756 	for (i = 0; i < ctrlr->num_io_queues; i++)
757 		nvme_io_qpair_enable(&ctrlr->ioq[i]);
758 }
759 
760 void
761 nvme_ctrlr_start_config_hook(void *arg)
762 {
763 	struct nvme_controller *ctrlr = arg;
764 
765 	nvme_qpair_reset(&ctrlr->adminq);
766 	nvme_admin_qpair_enable(&ctrlr->adminq);
767 
768 	if (nvme_ctrlr_set_num_qpairs(ctrlr) == 0 &&
769 	    nvme_ctrlr_construct_io_qpairs(ctrlr) == 0)
770 		nvme_ctrlr_start(ctrlr);
771 	else
772 		nvme_ctrlr_fail(ctrlr);
773 
774 	nvme_sysctl_initialize_ctrlr(ctrlr);
775 	config_intrhook_disestablish(&ctrlr->config_hook);
776 
777 	ctrlr->is_initialized = 1;
778 	nvme_notify_new_controller(ctrlr);
779 }
780 
781 static void
782 nvme_ctrlr_reset_task(void *arg, int pending)
783 {
784 	struct nvme_controller	*ctrlr = arg;
785 	int			status;
786 
787 	nvme_printf(ctrlr, "resetting controller\n");
788 	status = nvme_ctrlr_hw_reset(ctrlr);
789 	/*
790 	 * Use pause instead of DELAY, so that we yield to any nvme interrupt
791 	 *  handlers on this CPU that were blocked on a qpair lock. We want
792 	 *  all nvme interrupts completed before proceeding with restarting the
793 	 *  controller.
794 	 *
795 	 * XXX - any way to guarantee the interrupt handlers have quiesced?
796 	 */
797 	pause("nvmereset", hz / 10);
798 	if (status == 0)
799 		nvme_ctrlr_start(ctrlr);
800 	else
801 		nvme_ctrlr_fail(ctrlr);
802 
803 	atomic_cmpset_32(&ctrlr->is_resetting, 1, 0);
804 }
805 
806 void
807 nvme_ctrlr_intx_handler(void *arg)
808 {
809 	struct nvme_controller *ctrlr = arg;
810 
811 	nvme_mmio_write_4(ctrlr, intms, 1);
812 
813 	nvme_qpair_process_completions(&ctrlr->adminq);
814 
815 	if (ctrlr->ioq && ctrlr->ioq[0].cpl)
816 		nvme_qpair_process_completions(&ctrlr->ioq[0]);
817 
818 	nvme_mmio_write_4(ctrlr, intmc, 1);
819 }
820 
821 static int
822 nvme_ctrlr_configure_intx(struct nvme_controller *ctrlr)
823 {
824 
825 	ctrlr->msix_enabled = 0;
826 	ctrlr->num_io_queues = 1;
827 	ctrlr->num_cpus_per_ioq = mp_ncpus;
828 	ctrlr->rid = 0;
829 	ctrlr->res = bus_alloc_resource_any(ctrlr->dev, SYS_RES_IRQ,
830 	    &ctrlr->rid, RF_SHAREABLE | RF_ACTIVE);
831 
832 	if (ctrlr->res == NULL) {
833 		nvme_printf(ctrlr, "unable to allocate shared IRQ\n");
834 		return (ENOMEM);
835 	}
836 
837 	bus_setup_intr(ctrlr->dev, ctrlr->res,
838 	    INTR_TYPE_MISC | INTR_MPSAFE, NULL, nvme_ctrlr_intx_handler,
839 	    ctrlr, &ctrlr->tag);
840 
841 	if (ctrlr->tag == NULL) {
842 		nvme_printf(ctrlr, "unable to setup intx handler\n");
843 		return (ENOMEM);
844 	}
845 
846 	return (0);
847 }
848 
849 static void
850 nvme_pt_done(void *arg, const struct nvme_completion *cpl)
851 {
852 	struct nvme_pt_command *pt = arg;
853 
854 	bzero(&pt->cpl, sizeof(pt->cpl));
855 	pt->cpl.cdw0 = cpl->cdw0;
856 	pt->cpl.status = cpl->status;
857 	pt->cpl.status.p = 0;
858 
859 	mtx_lock(pt->driver_lock);
860 	wakeup(pt);
861 	mtx_unlock(pt->driver_lock);
862 }
863 
864 int
865 nvme_ctrlr_passthrough_cmd(struct nvme_controller *ctrlr,
866     struct nvme_pt_command *pt, uint32_t nsid, int is_user_buffer,
867     int is_admin_cmd)
868 {
869 	struct nvme_request	*req;
870 	struct mtx		*mtx;
871 	struct buf		*buf = NULL;
872 	int			ret = 0;
873 
874 	if (pt->len > 0) {
875 		if (pt->len > ctrlr->max_xfer_size) {
876 			nvme_printf(ctrlr, "pt->len (%d) "
877 			    "exceeds max_xfer_size (%d)\n", pt->len,
878 			    ctrlr->max_xfer_size);
879 			return EIO;
880 		}
881 		if (is_user_buffer) {
882 			/*
883 			 * Ensure the user buffer is wired for the duration of
884 			 *  this passthrough command.
885 			 */
886 			PHOLD(curproc);
887 			buf = getpbuf(NULL);
888 			buf->b_data = pt->buf;
889 			buf->b_bufsize = pt->len;
890 			buf->b_iocmd = pt->is_read ? BIO_READ : BIO_WRITE;
891 #ifdef NVME_UNMAPPED_BIO_SUPPORT
892 			if (vmapbuf(buf, 1) < 0) {
893 #else
894 			if (vmapbuf(buf) < 0) {
895 #endif
896 				ret = EFAULT;
897 				goto err;
898 			}
899 			req = nvme_allocate_request_vaddr(buf->b_data, pt->len,
900 			    nvme_pt_done, pt);
901 		} else
902 			req = nvme_allocate_request_vaddr(pt->buf, pt->len,
903 			    nvme_pt_done, pt);
904 	} else
905 		req = nvme_allocate_request_null(nvme_pt_done, pt);
906 
907 	req->cmd.opc	= pt->cmd.opc;
908 	req->cmd.cdw10	= pt->cmd.cdw10;
909 	req->cmd.cdw11	= pt->cmd.cdw11;
910 	req->cmd.cdw12	= pt->cmd.cdw12;
911 	req->cmd.cdw13	= pt->cmd.cdw13;
912 	req->cmd.cdw14	= pt->cmd.cdw14;
913 	req->cmd.cdw15	= pt->cmd.cdw15;
914 
915 	req->cmd.nsid = nsid;
916 
917 	if (is_admin_cmd)
918 		mtx = &ctrlr->lock;
919 	else
920 		mtx = &ctrlr->ns[nsid-1].lock;
921 
922 	mtx_lock(mtx);
923 	pt->driver_lock = mtx;
924 
925 	if (is_admin_cmd)
926 		nvme_ctrlr_submit_admin_request(ctrlr, req);
927 	else
928 		nvme_ctrlr_submit_io_request(ctrlr, req);
929 
930 	mtx_sleep(pt, mtx, PRIBIO, "nvme_pt", 0);
931 	mtx_unlock(mtx);
932 
933 	pt->driver_lock = NULL;
934 
935 err:
936 	if (buf != NULL) {
937 		relpbuf(buf, NULL);
938 		PRELE(curproc);
939 	}
940 
941 	return (ret);
942 }
943 
944 static int
945 nvme_ctrlr_ioctl(struct cdev *cdev, u_long cmd, caddr_t arg, int flag,
946     struct thread *td)
947 {
948 	struct nvme_controller			*ctrlr;
949 	struct nvme_pt_command			*pt;
950 
951 	ctrlr = cdev->si_drv1;
952 
953 	switch (cmd) {
954 	case NVME_RESET_CONTROLLER:
955 		nvme_ctrlr_reset(ctrlr);
956 		break;
957 	case NVME_PASSTHROUGH_CMD:
958 		pt = (struct nvme_pt_command *)arg;
959 		return (nvme_ctrlr_passthrough_cmd(ctrlr, pt, pt->cmd.nsid,
960 		    1 /* is_user_buffer */, 1 /* is_admin_cmd */));
961 	default:
962 		return (ENOTTY);
963 	}
964 
965 	return (0);
966 }
967 
968 static struct cdevsw nvme_ctrlr_cdevsw = {
969 	.d_version =	D_VERSION,
970 	.d_flags =	0,
971 	.d_ioctl =	nvme_ctrlr_ioctl
972 };
973 
974 static void
975 nvme_ctrlr_setup_interrupts(struct nvme_controller *ctrlr)
976 {
977 	device_t	dev;
978 	int		per_cpu_io_queues;
979 	int		min_cpus_per_ioq;
980 	int		num_vectors_requested, num_vectors_allocated;
981 	int		num_vectors_available;
982 
983 	dev = ctrlr->dev;
984 	min_cpus_per_ioq = 1;
985 	TUNABLE_INT_FETCH("hw.nvme.min_cpus_per_ioq", &min_cpus_per_ioq);
986 
987 	if (min_cpus_per_ioq < 1) {
988 		min_cpus_per_ioq = 1;
989 	} else if (min_cpus_per_ioq > mp_ncpus) {
990 		min_cpus_per_ioq = mp_ncpus;
991 	}
992 
993 	per_cpu_io_queues = 1;
994 	TUNABLE_INT_FETCH("hw.nvme.per_cpu_io_queues", &per_cpu_io_queues);
995 
996 	if (per_cpu_io_queues == 0) {
997 		min_cpus_per_ioq = mp_ncpus;
998 	}
999 
1000 	ctrlr->force_intx = 0;
1001 	TUNABLE_INT_FETCH("hw.nvme.force_intx", &ctrlr->force_intx);
1002 
1003 	/*
1004 	 * FreeBSD currently cannot allocate more than about 190 vectors at
1005 	 *  boot, meaning that systems with high core count and many devices
1006 	 *  requesting per-CPU interrupt vectors will not get their full
1007 	 *  allotment.  So first, try to allocate as many as we may need to
1008 	 *  understand what is available, then immediately release them.
1009 	 *  Then figure out how many of those we will actually use, based on
1010 	 *  assigning an equal number of cores to each I/O queue.
1011 	 */
1012 
1013 	/* One vector for per core I/O queue, plus one vector for admin queue. */
1014 	num_vectors_available = min(pci_msix_count(dev), mp_ncpus + 1);
1015 	if (pci_alloc_msix(dev, &num_vectors_available) != 0) {
1016 		num_vectors_available = 0;
1017 	}
1018 	pci_release_msi(dev);
1019 
1020 	if (ctrlr->force_intx || num_vectors_available < 2) {
1021 		nvme_ctrlr_configure_intx(ctrlr);
1022 		return;
1023 	}
1024 
1025 	/*
1026 	 * Do not use all vectors for I/O queues - one must be saved for the
1027 	 *  admin queue.
1028 	 */
1029 	ctrlr->num_cpus_per_ioq = max(min_cpus_per_ioq,
1030 	    howmany(mp_ncpus, num_vectors_available - 1));
1031 
1032 	ctrlr->num_io_queues = howmany(mp_ncpus, ctrlr->num_cpus_per_ioq);
1033 	num_vectors_requested = ctrlr->num_io_queues + 1;
1034 	num_vectors_allocated = num_vectors_requested;
1035 
1036 	/*
1037 	 * Now just allocate the number of vectors we need.  This should
1038 	 *  succeed, since we previously called pci_alloc_msix()
1039 	 *  successfully returning at least this many vectors, but just to
1040 	 *  be safe, if something goes wrong just revert to INTx.
1041 	 */
1042 	if (pci_alloc_msix(dev, &num_vectors_allocated) != 0) {
1043 		nvme_ctrlr_configure_intx(ctrlr);
1044 		return;
1045 	}
1046 
1047 	if (num_vectors_allocated < num_vectors_requested) {
1048 		pci_release_msi(dev);
1049 		nvme_ctrlr_configure_intx(ctrlr);
1050 		return;
1051 	}
1052 
1053 	ctrlr->msix_enabled = 1;
1054 }
1055 
1056 int
1057 nvme_ctrlr_construct(struct nvme_controller *ctrlr, device_t dev)
1058 {
1059 	union cap_lo_register	cap_lo;
1060 	union cap_hi_register	cap_hi;
1061 	int			status, timeout_period;
1062 
1063 	ctrlr->dev = dev;
1064 
1065 	mtx_init(&ctrlr->lock, "nvme ctrlr lock", NULL, MTX_DEF);
1066 
1067 	status = nvme_ctrlr_allocate_bar(ctrlr);
1068 
1069 	if (status != 0)
1070 		return (status);
1071 
1072 	/*
1073 	 * Software emulators may set the doorbell stride to something
1074 	 *  other than zero, but this driver is not set up to handle that.
1075 	 */
1076 	cap_hi.raw = nvme_mmio_read_4(ctrlr, cap_hi);
1077 	if (cap_hi.bits.dstrd != 0)
1078 		return (ENXIO);
1079 
1080 	ctrlr->min_page_size = 1 << (12 + cap_hi.bits.mpsmin);
1081 
1082 	/* Get ready timeout value from controller, in units of 500ms. */
1083 	cap_lo.raw = nvme_mmio_read_4(ctrlr, cap_lo);
1084 	ctrlr->ready_timeout_in_ms = cap_lo.bits.to * 500;
1085 
1086 	timeout_period = NVME_DEFAULT_TIMEOUT_PERIOD;
1087 	TUNABLE_INT_FETCH("hw.nvme.timeout_period", &timeout_period);
1088 	timeout_period = min(timeout_period, NVME_MAX_TIMEOUT_PERIOD);
1089 	timeout_period = max(timeout_period, NVME_MIN_TIMEOUT_PERIOD);
1090 	ctrlr->timeout_period = timeout_period;
1091 
1092 	nvme_retry_count = NVME_DEFAULT_RETRY_COUNT;
1093 	TUNABLE_INT_FETCH("hw.nvme.retry_count", &nvme_retry_count);
1094 
1095 	ctrlr->enable_aborts = 0;
1096 	TUNABLE_INT_FETCH("hw.nvme.enable_aborts", &ctrlr->enable_aborts);
1097 
1098 	nvme_ctrlr_setup_interrupts(ctrlr);
1099 
1100 	ctrlr->max_xfer_size = NVME_MAX_XFER_SIZE;
1101 	nvme_ctrlr_construct_admin_qpair(ctrlr);
1102 
1103 	ctrlr->cdev = make_dev(&nvme_ctrlr_cdevsw, device_get_unit(dev),
1104 	    UID_ROOT, GID_WHEEL, 0600, "nvme%d", device_get_unit(dev));
1105 
1106 	if (ctrlr->cdev == NULL)
1107 		return (ENXIO);
1108 
1109 	ctrlr->cdev->si_drv1 = (void *)ctrlr;
1110 
1111 	ctrlr->taskqueue = taskqueue_create("nvme_taskq", M_WAITOK,
1112 	    taskqueue_thread_enqueue, &ctrlr->taskqueue);
1113 	taskqueue_start_threads(&ctrlr->taskqueue, 1, PI_DISK, "nvme taskq");
1114 
1115 	ctrlr->is_resetting = 0;
1116 	ctrlr->is_initialized = 0;
1117 	ctrlr->notification_sent = 0;
1118 	TASK_INIT(&ctrlr->reset_task, 0, nvme_ctrlr_reset_task, ctrlr);
1119 
1120 	TASK_INIT(&ctrlr->fail_req_task, 0, nvme_ctrlr_fail_req_task, ctrlr);
1121 	STAILQ_INIT(&ctrlr->fail_req);
1122 	ctrlr->is_failed = FALSE;
1123 
1124 	return (0);
1125 }
1126 
1127 void
1128 nvme_ctrlr_destruct(struct nvme_controller *ctrlr, device_t dev)
1129 {
1130 	int				i;
1131 
1132 	/*
1133 	 *  Notify the controller of a shutdown, even though this is due to
1134 	 *   a driver unload, not a system shutdown (this path is not invoked
1135 	 *   during shutdown).  This ensures the controller receives a
1136 	 *   shutdown notification in case the system is shutdown before
1137 	 *   reloading the driver.
1138 	 */
1139 	nvme_ctrlr_shutdown(ctrlr);
1140 
1141 	nvme_ctrlr_disable(ctrlr);
1142 	taskqueue_free(ctrlr->taskqueue);
1143 
1144 	for (i = 0; i < NVME_MAX_NAMESPACES; i++)
1145 		nvme_ns_destruct(&ctrlr->ns[i]);
1146 
1147 	if (ctrlr->cdev)
1148 		destroy_dev(ctrlr->cdev);
1149 
1150 	for (i = 0; i < ctrlr->num_io_queues; i++) {
1151 		nvme_io_qpair_destroy(&ctrlr->ioq[i]);
1152 	}
1153 
1154 	free(ctrlr->ioq, M_NVME);
1155 
1156 	nvme_admin_qpair_destroy(&ctrlr->adminq);
1157 
1158 	if (ctrlr->resource != NULL) {
1159 		bus_release_resource(dev, SYS_RES_MEMORY,
1160 		    ctrlr->resource_id, ctrlr->resource);
1161 	}
1162 
1163 	if (ctrlr->bar4_resource != NULL) {
1164 		bus_release_resource(dev, SYS_RES_MEMORY,
1165 		    ctrlr->bar4_resource_id, ctrlr->bar4_resource);
1166 	}
1167 
1168 	if (ctrlr->tag)
1169 		bus_teardown_intr(ctrlr->dev, ctrlr->res, ctrlr->tag);
1170 
1171 	if (ctrlr->res)
1172 		bus_release_resource(ctrlr->dev, SYS_RES_IRQ,
1173 		    rman_get_rid(ctrlr->res), ctrlr->res);
1174 
1175 	if (ctrlr->msix_enabled)
1176 		pci_release_msi(dev);
1177 }
1178 
1179 void
1180 nvme_ctrlr_shutdown(struct nvme_controller *ctrlr)
1181 {
1182 	union cc_register	cc;
1183 	union csts_register	csts;
1184 	int			ticks = 0;
1185 
1186 	cc.raw = nvme_mmio_read_4(ctrlr, cc);
1187 	cc.bits.shn = NVME_SHN_NORMAL;
1188 	nvme_mmio_write_4(ctrlr, cc, cc.raw);
1189 	csts.raw = nvme_mmio_read_4(ctrlr, csts);
1190 	while ((csts.bits.shst != NVME_SHST_COMPLETE) && (ticks++ < 5*hz)) {
1191 		pause("nvme shn", 1);
1192 		csts.raw = nvme_mmio_read_4(ctrlr, csts);
1193 	}
1194 	if (csts.bits.shst != NVME_SHST_COMPLETE)
1195 		nvme_printf(ctrlr, "did not complete shutdown within 5 seconds "
1196 		    "of notification\n");
1197 }
1198 
1199 void
1200 nvme_ctrlr_submit_admin_request(struct nvme_controller *ctrlr,
1201     struct nvme_request *req)
1202 {
1203 
1204 	nvme_qpair_submit_request(&ctrlr->adminq, req);
1205 }
1206 
1207 void
1208 nvme_ctrlr_submit_io_request(struct nvme_controller *ctrlr,
1209     struct nvme_request *req)
1210 {
1211 	struct nvme_qpair       *qpair;
1212 
1213 	qpair = &ctrlr->ioq[curcpu / ctrlr->num_cpus_per_ioq];
1214 	nvme_qpair_submit_request(qpair, req);
1215 }
1216 
1217 device_t
1218 nvme_ctrlr_get_device(struct nvme_controller *ctrlr)
1219 {
1220 
1221 	return (ctrlr->dev);
1222 }
1223 
1224 const struct nvme_controller_data *
1225 nvme_ctrlr_get_data(struct nvme_controller *ctrlr)
1226 {
1227 
1228 	return (&ctrlr->cdata);
1229 }
1230