xref: /freebsd/sys/dev/nvme/nvme_ctrlr.c (revision 97cb52fa9aefd90fad38790fded50905aeeb9b9e)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (C) 2012-2016 Intel Corporation
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31 
32 #include "opt_cam.h"
33 
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/buf.h>
37 #include <sys/bus.h>
38 #include <sys/conf.h>
39 #include <sys/ioccom.h>
40 #include <sys/proc.h>
41 #include <sys/smp.h>
42 #include <sys/uio.h>
43 
44 #include <dev/pci/pcireg.h>
45 #include <dev/pci/pcivar.h>
46 
47 #include "nvme_private.h"
48 
49 static void nvme_ctrlr_construct_and_submit_aer(struct nvme_controller *ctrlr,
50 						struct nvme_async_event_request *aer);
51 static void nvme_ctrlr_setup_interrupts(struct nvme_controller *ctrlr);
52 
53 static int
54 nvme_ctrlr_allocate_bar(struct nvme_controller *ctrlr)
55 {
56 
57 	ctrlr->resource_id = PCIR_BAR(0);
58 
59 	ctrlr->resource = bus_alloc_resource_any(ctrlr->dev, SYS_RES_MEMORY,
60 	    &ctrlr->resource_id, RF_ACTIVE);
61 
62 	if(ctrlr->resource == NULL) {
63 		nvme_printf(ctrlr, "unable to allocate pci resource\n");
64 		return (ENOMEM);
65 	}
66 
67 	ctrlr->bus_tag = rman_get_bustag(ctrlr->resource);
68 	ctrlr->bus_handle = rman_get_bushandle(ctrlr->resource);
69 	ctrlr->regs = (struct nvme_registers *)ctrlr->bus_handle;
70 
71 	/*
72 	 * The NVMe spec allows for the MSI-X table to be placed behind
73 	 *  BAR 4/5, separate from the control/doorbell registers.  Always
74 	 *  try to map this bar, because it must be mapped prior to calling
75 	 *  pci_alloc_msix().  If the table isn't behind BAR 4/5,
76 	 *  bus_alloc_resource() will just return NULL which is OK.
77 	 */
78 	ctrlr->bar4_resource_id = PCIR_BAR(4);
79 	ctrlr->bar4_resource = bus_alloc_resource_any(ctrlr->dev, SYS_RES_MEMORY,
80 	    &ctrlr->bar4_resource_id, RF_ACTIVE);
81 
82 	return (0);
83 }
84 
85 static int
86 nvme_ctrlr_construct_admin_qpair(struct nvme_controller *ctrlr)
87 {
88 	struct nvme_qpair	*qpair;
89 	uint32_t		num_entries;
90 	int			error;
91 
92 	qpair = &ctrlr->adminq;
93 
94 	num_entries = NVME_ADMIN_ENTRIES;
95 	TUNABLE_INT_FETCH("hw.nvme.admin_entries", &num_entries);
96 	/*
97 	 * If admin_entries was overridden to an invalid value, revert it
98 	 *  back to our default value.
99 	 */
100 	if (num_entries < NVME_MIN_ADMIN_ENTRIES ||
101 	    num_entries > NVME_MAX_ADMIN_ENTRIES) {
102 		nvme_printf(ctrlr, "invalid hw.nvme.admin_entries=%d "
103 		    "specified\n", num_entries);
104 		num_entries = NVME_ADMIN_ENTRIES;
105 	}
106 
107 	/*
108 	 * The admin queue's max xfer size is treated differently than the
109 	 *  max I/O xfer size.  16KB is sufficient here - maybe even less?
110 	 */
111 	error = nvme_qpair_construct(qpair,
112 				     0, /* qpair ID */
113 				     0, /* vector */
114 				     num_entries,
115 				     NVME_ADMIN_TRACKERS,
116 				     ctrlr);
117 	return (error);
118 }
119 
120 static int
121 nvme_ctrlr_construct_io_qpairs(struct nvme_controller *ctrlr)
122 {
123 	struct nvme_qpair	*qpair;
124 	union cap_lo_register	cap_lo;
125 	int			i, error, num_entries, num_trackers;
126 
127 	num_entries = NVME_IO_ENTRIES;
128 	TUNABLE_INT_FETCH("hw.nvme.io_entries", &num_entries);
129 
130 	/*
131 	 * NVMe spec sets a hard limit of 64K max entries, but
132 	 *  devices may specify a smaller limit, so we need to check
133 	 *  the MQES field in the capabilities register.
134 	 */
135 	cap_lo.raw = nvme_mmio_read_4(ctrlr, cap_lo);
136 	num_entries = min(num_entries, cap_lo.bits.mqes+1);
137 
138 	num_trackers = NVME_IO_TRACKERS;
139 	TUNABLE_INT_FETCH("hw.nvme.io_trackers", &num_trackers);
140 
141 	num_trackers = max(num_trackers, NVME_MIN_IO_TRACKERS);
142 	num_trackers = min(num_trackers, NVME_MAX_IO_TRACKERS);
143 	/*
144 	 * No need to have more trackers than entries in the submit queue.
145 	 *  Note also that for a queue size of N, we can only have (N-1)
146 	 *  commands outstanding, hence the "-1" here.
147 	 */
148 	num_trackers = min(num_trackers, (num_entries-1));
149 
150 	/*
151 	 * Our best estimate for the maximum number of I/Os that we should
152 	 * noramlly have in flight at one time. This should be viewed as a hint,
153 	 * not a hard limit and will need to be revisitted when the upper layers
154 	 * of the storage system grows multi-queue support.
155 	 */
156 	ctrlr->max_hw_pend_io = num_trackers * ctrlr->num_io_queues * 3 / 4;
157 
158 	/*
159 	 * This was calculated previously when setting up interrupts, but
160 	 *  a controller could theoretically support fewer I/O queues than
161 	 *  MSI-X vectors.  So calculate again here just to be safe.
162 	 */
163 	ctrlr->num_cpus_per_ioq = howmany(mp_ncpus, ctrlr->num_io_queues);
164 
165 	ctrlr->ioq = malloc(ctrlr->num_io_queues * sizeof(struct nvme_qpair),
166 	    M_NVME, M_ZERO | M_WAITOK);
167 
168 	for (i = 0; i < ctrlr->num_io_queues; i++) {
169 		qpair = &ctrlr->ioq[i];
170 
171 		/*
172 		 * Admin queue has ID=0. IO queues start at ID=1 -
173 		 *  hence the 'i+1' here.
174 		 *
175 		 * For I/O queues, use the controller-wide max_xfer_size
176 		 *  calculated in nvme_attach().
177 		 */
178 		error = nvme_qpair_construct(qpair,
179 				     i+1, /* qpair ID */
180 				     ctrlr->msix_enabled ? i+1 : 0, /* vector */
181 				     num_entries,
182 				     num_trackers,
183 				     ctrlr);
184 		if (error)
185 			return (error);
186 
187 		/*
188 		 * Do not bother binding interrupts if we only have one I/O
189 		 *  interrupt thread for this controller.
190 		 */
191 		if (ctrlr->num_io_queues > 1)
192 			bus_bind_intr(ctrlr->dev, qpair->res,
193 			    i * ctrlr->num_cpus_per_ioq);
194 	}
195 
196 	return (0);
197 }
198 
199 static void
200 nvme_ctrlr_fail(struct nvme_controller *ctrlr)
201 {
202 	int i;
203 
204 	ctrlr->is_failed = TRUE;
205 	nvme_qpair_fail(&ctrlr->adminq);
206 	if (ctrlr->ioq != NULL) {
207 		for (i = 0; i < ctrlr->num_io_queues; i++)
208 			nvme_qpair_fail(&ctrlr->ioq[i]);
209 	}
210 	nvme_notify_fail_consumers(ctrlr);
211 }
212 
213 void
214 nvme_ctrlr_post_failed_request(struct nvme_controller *ctrlr,
215     struct nvme_request *req)
216 {
217 
218 	mtx_lock(&ctrlr->lock);
219 	STAILQ_INSERT_TAIL(&ctrlr->fail_req, req, stailq);
220 	mtx_unlock(&ctrlr->lock);
221 	taskqueue_enqueue(ctrlr->taskqueue, &ctrlr->fail_req_task);
222 }
223 
224 static void
225 nvme_ctrlr_fail_req_task(void *arg, int pending)
226 {
227 	struct nvme_controller	*ctrlr = arg;
228 	struct nvme_request	*req;
229 
230 	mtx_lock(&ctrlr->lock);
231 	while (!STAILQ_EMPTY(&ctrlr->fail_req)) {
232 		req = STAILQ_FIRST(&ctrlr->fail_req);
233 		STAILQ_REMOVE_HEAD(&ctrlr->fail_req, stailq);
234 		nvme_qpair_manual_complete_request(req->qpair, req,
235 		    NVME_SCT_GENERIC, NVME_SC_ABORTED_BY_REQUEST, TRUE);
236 	}
237 	mtx_unlock(&ctrlr->lock);
238 }
239 
240 static int
241 nvme_ctrlr_wait_for_ready(struct nvme_controller *ctrlr, int desired_val)
242 {
243 	int ms_waited;
244 	union cc_register cc;
245 	union csts_register csts;
246 
247 	cc.raw = nvme_mmio_read_4(ctrlr, cc);
248 	csts.raw = nvme_mmio_read_4(ctrlr, csts);
249 
250 	if (cc.bits.en != desired_val) {
251 		nvme_printf(ctrlr, "%s called with desired_val = %d "
252 		    "but cc.en = %d\n", __func__, desired_val, cc.bits.en);
253 		return (ENXIO);
254 	}
255 
256 	ms_waited = 0;
257 
258 	while (csts.bits.rdy != desired_val) {
259 		DELAY(1000);
260 		if (ms_waited++ > ctrlr->ready_timeout_in_ms) {
261 			nvme_printf(ctrlr, "controller ready did not become %d "
262 			    "within %d ms\n", desired_val, ctrlr->ready_timeout_in_ms);
263 			return (ENXIO);
264 		}
265 		csts.raw = nvme_mmio_read_4(ctrlr, csts);
266 	}
267 
268 	return (0);
269 }
270 
271 static void
272 nvme_ctrlr_disable(struct nvme_controller *ctrlr)
273 {
274 	union cc_register cc;
275 	union csts_register csts;
276 
277 	cc.raw = nvme_mmio_read_4(ctrlr, cc);
278 	csts.raw = nvme_mmio_read_4(ctrlr, csts);
279 
280 	if (cc.bits.en == 1 && csts.bits.rdy == 0)
281 		nvme_ctrlr_wait_for_ready(ctrlr, 1);
282 
283 	cc.bits.en = 0;
284 	nvme_mmio_write_4(ctrlr, cc, cc.raw);
285 	DELAY(5000);
286 	nvme_ctrlr_wait_for_ready(ctrlr, 0);
287 }
288 
289 static int
290 nvme_ctrlr_enable(struct nvme_controller *ctrlr)
291 {
292 	union cc_register	cc;
293 	union csts_register	csts;
294 	union aqa_register	aqa;
295 
296 	cc.raw = nvme_mmio_read_4(ctrlr, cc);
297 	csts.raw = nvme_mmio_read_4(ctrlr, csts);
298 
299 	if (cc.bits.en == 1) {
300 		if (csts.bits.rdy == 1)
301 			return (0);
302 		else
303 			return (nvme_ctrlr_wait_for_ready(ctrlr, 1));
304 	}
305 
306 	nvme_mmio_write_8(ctrlr, asq, ctrlr->adminq.cmd_bus_addr);
307 	DELAY(5000);
308 	nvme_mmio_write_8(ctrlr, acq, ctrlr->adminq.cpl_bus_addr);
309 	DELAY(5000);
310 
311 	aqa.raw = 0;
312 	/* acqs and asqs are 0-based. */
313 	aqa.bits.acqs = ctrlr->adminq.num_entries-1;
314 	aqa.bits.asqs = ctrlr->adminq.num_entries-1;
315 	nvme_mmio_write_4(ctrlr, aqa, aqa.raw);
316 	DELAY(5000);
317 
318 	cc.bits.en = 1;
319 	cc.bits.css = 0;
320 	cc.bits.ams = 0;
321 	cc.bits.shn = 0;
322 	cc.bits.iosqes = 6; /* SQ entry size == 64 == 2^6 */
323 	cc.bits.iocqes = 4; /* CQ entry size == 16 == 2^4 */
324 
325 	/* This evaluates to 0, which is according to spec. */
326 	cc.bits.mps = (PAGE_SIZE >> 13);
327 
328 	nvme_mmio_write_4(ctrlr, cc, cc.raw);
329 	DELAY(5000);
330 
331 	return (nvme_ctrlr_wait_for_ready(ctrlr, 1));
332 }
333 
334 int
335 nvme_ctrlr_hw_reset(struct nvme_controller *ctrlr)
336 {
337 	int i;
338 
339 	nvme_admin_qpair_disable(&ctrlr->adminq);
340 	/*
341 	 * I/O queues are not allocated before the initial HW
342 	 *  reset, so do not try to disable them.  Use is_initialized
343 	 *  to determine if this is the initial HW reset.
344 	 */
345 	if (ctrlr->is_initialized) {
346 		for (i = 0; i < ctrlr->num_io_queues; i++)
347 			nvme_io_qpair_disable(&ctrlr->ioq[i]);
348 	}
349 
350 	DELAY(100*1000);
351 
352 	nvme_ctrlr_disable(ctrlr);
353 	return (nvme_ctrlr_enable(ctrlr));
354 }
355 
356 void
357 nvme_ctrlr_reset(struct nvme_controller *ctrlr)
358 {
359 	int cmpset;
360 
361 	cmpset = atomic_cmpset_32(&ctrlr->is_resetting, 0, 1);
362 
363 	if (cmpset == 0 || ctrlr->is_failed)
364 		/*
365 		 * Controller is already resetting or has failed.  Return
366 		 *  immediately since there is no need to kick off another
367 		 *  reset in these cases.
368 		 */
369 		return;
370 
371 	taskqueue_enqueue(ctrlr->taskqueue, &ctrlr->reset_task);
372 }
373 
374 static int
375 nvme_ctrlr_identify(struct nvme_controller *ctrlr)
376 {
377 	struct nvme_completion_poll_status	status;
378 
379 	status.done = FALSE;
380 	nvme_ctrlr_cmd_identify_controller(ctrlr, &ctrlr->cdata,
381 	    nvme_completion_poll_cb, &status);
382 	while (status.done == FALSE)
383 		pause("nvme", 1);
384 	if (nvme_completion_is_error(&status.cpl)) {
385 		nvme_printf(ctrlr, "nvme_identify_controller failed!\n");
386 		return (ENXIO);
387 	}
388 
389 	/*
390 	 * Use MDTS to ensure our default max_xfer_size doesn't exceed what the
391 	 *  controller supports.
392 	 */
393 	if (ctrlr->cdata.mdts > 0)
394 		ctrlr->max_xfer_size = min(ctrlr->max_xfer_size,
395 		    ctrlr->min_page_size * (1 << (ctrlr->cdata.mdts)));
396 
397 	return (0);
398 }
399 
400 static int
401 nvme_ctrlr_set_num_qpairs(struct nvme_controller *ctrlr)
402 {
403 	struct nvme_completion_poll_status	status;
404 	int					cq_allocated, sq_allocated;
405 
406 	status.done = FALSE;
407 	nvme_ctrlr_cmd_set_num_queues(ctrlr, ctrlr->num_io_queues,
408 	    nvme_completion_poll_cb, &status);
409 	while (status.done == FALSE)
410 		pause("nvme", 1);
411 	if (nvme_completion_is_error(&status.cpl)) {
412 		nvme_printf(ctrlr, "nvme_ctrlr_set_num_qpairs failed!\n");
413 		return (ENXIO);
414 	}
415 
416 	/*
417 	 * Data in cdw0 is 0-based.
418 	 * Lower 16-bits indicate number of submission queues allocated.
419 	 * Upper 16-bits indicate number of completion queues allocated.
420 	 */
421 	sq_allocated = (status.cpl.cdw0 & 0xFFFF) + 1;
422 	cq_allocated = (status.cpl.cdw0 >> 16) + 1;
423 
424 	/*
425 	 * Controller may allocate more queues than we requested,
426 	 *  so use the minimum of the number requested and what was
427 	 *  actually allocated.
428 	 */
429 	ctrlr->num_io_queues = min(ctrlr->num_io_queues, sq_allocated);
430 	ctrlr->num_io_queues = min(ctrlr->num_io_queues, cq_allocated);
431 
432 	return (0);
433 }
434 
435 static int
436 nvme_ctrlr_create_qpairs(struct nvme_controller *ctrlr)
437 {
438 	struct nvme_completion_poll_status	status;
439 	struct nvme_qpair			*qpair;
440 	int					i;
441 
442 	for (i = 0; i < ctrlr->num_io_queues; i++) {
443 		qpair = &ctrlr->ioq[i];
444 
445 		status.done = FALSE;
446 		nvme_ctrlr_cmd_create_io_cq(ctrlr, qpair, qpair->vector,
447 		    nvme_completion_poll_cb, &status);
448 		while (status.done == FALSE)
449 			pause("nvme", 1);
450 		if (nvme_completion_is_error(&status.cpl)) {
451 			nvme_printf(ctrlr, "nvme_create_io_cq failed!\n");
452 			return (ENXIO);
453 		}
454 
455 		status.done = FALSE;
456 		nvme_ctrlr_cmd_create_io_sq(qpair->ctrlr, qpair,
457 		    nvme_completion_poll_cb, &status);
458 		while (status.done == FALSE)
459 			pause("nvme", 1);
460 		if (nvme_completion_is_error(&status.cpl)) {
461 			nvme_printf(ctrlr, "nvme_create_io_sq failed!\n");
462 			return (ENXIO);
463 		}
464 	}
465 
466 	return (0);
467 }
468 
469 static int
470 nvme_ctrlr_construct_namespaces(struct nvme_controller *ctrlr)
471 {
472 	struct nvme_namespace	*ns;
473 	uint32_t 		i;
474 
475 	for (i = 0; i < min(ctrlr->cdata.nn, NVME_MAX_NAMESPACES); i++) {
476 		ns = &ctrlr->ns[i];
477 		nvme_ns_construct(ns, i+1, ctrlr);
478 	}
479 
480 	return (0);
481 }
482 
483 static boolean_t
484 is_log_page_id_valid(uint8_t page_id)
485 {
486 
487 	switch (page_id) {
488 	case NVME_LOG_ERROR:
489 	case NVME_LOG_HEALTH_INFORMATION:
490 	case NVME_LOG_FIRMWARE_SLOT:
491 		return (TRUE);
492 	}
493 
494 	return (FALSE);
495 }
496 
497 static uint32_t
498 nvme_ctrlr_get_log_page_size(struct nvme_controller *ctrlr, uint8_t page_id)
499 {
500 	uint32_t	log_page_size;
501 
502 	switch (page_id) {
503 	case NVME_LOG_ERROR:
504 		log_page_size = min(
505 		    sizeof(struct nvme_error_information_entry) *
506 		    ctrlr->cdata.elpe,
507 		    NVME_MAX_AER_LOG_SIZE);
508 		break;
509 	case NVME_LOG_HEALTH_INFORMATION:
510 		log_page_size = sizeof(struct nvme_health_information_page);
511 		break;
512 	case NVME_LOG_FIRMWARE_SLOT:
513 		log_page_size = sizeof(struct nvme_firmware_page);
514 		break;
515 	default:
516 		log_page_size = 0;
517 		break;
518 	}
519 
520 	return (log_page_size);
521 }
522 
523 static void
524 nvme_ctrlr_log_critical_warnings(struct nvme_controller *ctrlr,
525     union nvme_critical_warning_state state)
526 {
527 
528 	if (state.bits.available_spare == 1)
529 		nvme_printf(ctrlr, "available spare space below threshold\n");
530 
531 	if (state.bits.temperature == 1)
532 		nvme_printf(ctrlr, "temperature above threshold\n");
533 
534 	if (state.bits.device_reliability == 1)
535 		nvme_printf(ctrlr, "device reliability degraded\n");
536 
537 	if (state.bits.read_only == 1)
538 		nvme_printf(ctrlr, "media placed in read only mode\n");
539 
540 	if (state.bits.volatile_memory_backup == 1)
541 		nvme_printf(ctrlr, "volatile memory backup device failed\n");
542 
543 	if (state.bits.reserved != 0)
544 		nvme_printf(ctrlr,
545 		    "unknown critical warning(s): state = 0x%02x\n", state.raw);
546 }
547 
548 static void
549 nvme_ctrlr_async_event_log_page_cb(void *arg, const struct nvme_completion *cpl)
550 {
551 	struct nvme_async_event_request		*aer = arg;
552 	struct nvme_health_information_page	*health_info;
553 
554 	/*
555 	 * If the log page fetch for some reason completed with an error,
556 	 *  don't pass log page data to the consumers.  In practice, this case
557 	 *  should never happen.
558 	 */
559 	if (nvme_completion_is_error(cpl))
560 		nvme_notify_async_consumers(aer->ctrlr, &aer->cpl,
561 		    aer->log_page_id, NULL, 0);
562 	else {
563 		if (aer->log_page_id == NVME_LOG_HEALTH_INFORMATION) {
564 			health_info = (struct nvme_health_information_page *)
565 			    aer->log_page_buffer;
566 			nvme_ctrlr_log_critical_warnings(aer->ctrlr,
567 			    health_info->critical_warning);
568 			/*
569 			 * Critical warnings reported through the
570 			 *  SMART/health log page are persistent, so
571 			 *  clear the associated bits in the async event
572 			 *  config so that we do not receive repeated
573 			 *  notifications for the same event.
574 			 */
575 			aer->ctrlr->async_event_config.raw &=
576 			    ~health_info->critical_warning.raw;
577 			nvme_ctrlr_cmd_set_async_event_config(aer->ctrlr,
578 			    aer->ctrlr->async_event_config, NULL, NULL);
579 		}
580 
581 
582 		/*
583 		 * Pass the cpl data from the original async event completion,
584 		 *  not the log page fetch.
585 		 */
586 		nvme_notify_async_consumers(aer->ctrlr, &aer->cpl,
587 		    aer->log_page_id, aer->log_page_buffer, aer->log_page_size);
588 	}
589 
590 	/*
591 	 * Repost another asynchronous event request to replace the one
592 	 *  that just completed.
593 	 */
594 	nvme_ctrlr_construct_and_submit_aer(aer->ctrlr, aer);
595 }
596 
597 static void
598 nvme_ctrlr_async_event_cb(void *arg, const struct nvme_completion *cpl)
599 {
600 	struct nvme_async_event_request	*aer = arg;
601 
602 	if (nvme_completion_is_error(cpl)) {
603 		/*
604 		 *  Do not retry failed async event requests.  This avoids
605 		 *  infinite loops where a new async event request is submitted
606 		 *  to replace the one just failed, only to fail again and
607 		 *  perpetuate the loop.
608 		 */
609 		return;
610 	}
611 
612 	/* Associated log page is in bits 23:16 of completion entry dw0. */
613 	aer->log_page_id = (cpl->cdw0 & 0xFF0000) >> 16;
614 
615 	nvme_printf(aer->ctrlr, "async event occurred (log page id=0x%x)\n",
616 	    aer->log_page_id);
617 
618 	if (is_log_page_id_valid(aer->log_page_id)) {
619 		aer->log_page_size = nvme_ctrlr_get_log_page_size(aer->ctrlr,
620 		    aer->log_page_id);
621 		memcpy(&aer->cpl, cpl, sizeof(*cpl));
622 		nvme_ctrlr_cmd_get_log_page(aer->ctrlr, aer->log_page_id,
623 		    NVME_GLOBAL_NAMESPACE_TAG, aer->log_page_buffer,
624 		    aer->log_page_size, nvme_ctrlr_async_event_log_page_cb,
625 		    aer);
626 		/* Wait to notify consumers until after log page is fetched. */
627 	} else {
628 		nvme_notify_async_consumers(aer->ctrlr, cpl, aer->log_page_id,
629 		    NULL, 0);
630 
631 		/*
632 		 * Repost another asynchronous event request to replace the one
633 		 *  that just completed.
634 		 */
635 		nvme_ctrlr_construct_and_submit_aer(aer->ctrlr, aer);
636 	}
637 }
638 
639 static void
640 nvme_ctrlr_construct_and_submit_aer(struct nvme_controller *ctrlr,
641     struct nvme_async_event_request *aer)
642 {
643 	struct nvme_request *req;
644 
645 	aer->ctrlr = ctrlr;
646 	req = nvme_allocate_request_null(nvme_ctrlr_async_event_cb, aer);
647 	aer->req = req;
648 
649 	/*
650 	 * Disable timeout here, since asynchronous event requests should by
651 	 *  nature never be timed out.
652 	 */
653 	req->timeout = FALSE;
654 	req->cmd.opc = NVME_OPC_ASYNC_EVENT_REQUEST;
655 	nvme_ctrlr_submit_admin_request(ctrlr, req);
656 }
657 
658 static void
659 nvme_ctrlr_configure_aer(struct nvme_controller *ctrlr)
660 {
661 	struct nvme_completion_poll_status	status;
662 	struct nvme_async_event_request		*aer;
663 	uint32_t				i;
664 
665 	ctrlr->async_event_config.raw = 0xFF;
666 	ctrlr->async_event_config.bits.reserved = 0;
667 
668 	status.done = FALSE;
669 	nvme_ctrlr_cmd_get_feature(ctrlr, NVME_FEAT_TEMPERATURE_THRESHOLD,
670 	    0, NULL, 0, nvme_completion_poll_cb, &status);
671 	while (status.done == FALSE)
672 		pause("nvme", 1);
673 	if (nvme_completion_is_error(&status.cpl) ||
674 	    (status.cpl.cdw0 & 0xFFFF) == 0xFFFF ||
675 	    (status.cpl.cdw0 & 0xFFFF) == 0x0000) {
676 		nvme_printf(ctrlr, "temperature threshold not supported\n");
677 		ctrlr->async_event_config.bits.temperature = 0;
678 	}
679 
680 	nvme_ctrlr_cmd_set_async_event_config(ctrlr,
681 	    ctrlr->async_event_config, NULL, NULL);
682 
683 	/* aerl is a zero-based value, so we need to add 1 here. */
684 	ctrlr->num_aers = min(NVME_MAX_ASYNC_EVENTS, (ctrlr->cdata.aerl+1));
685 
686 	for (i = 0; i < ctrlr->num_aers; i++) {
687 		aer = &ctrlr->aer[i];
688 		nvme_ctrlr_construct_and_submit_aer(ctrlr, aer);
689 	}
690 }
691 
692 static void
693 nvme_ctrlr_configure_int_coalescing(struct nvme_controller *ctrlr)
694 {
695 
696 	ctrlr->int_coal_time = 0;
697 	TUNABLE_INT_FETCH("hw.nvme.int_coal_time",
698 	    &ctrlr->int_coal_time);
699 
700 	ctrlr->int_coal_threshold = 0;
701 	TUNABLE_INT_FETCH("hw.nvme.int_coal_threshold",
702 	    &ctrlr->int_coal_threshold);
703 
704 	nvme_ctrlr_cmd_set_interrupt_coalescing(ctrlr, ctrlr->int_coal_time,
705 	    ctrlr->int_coal_threshold, NULL, NULL);
706 }
707 
708 static void
709 nvme_ctrlr_start(void *ctrlr_arg)
710 {
711 	struct nvme_controller *ctrlr = ctrlr_arg;
712 	uint32_t old_num_io_queues;
713 	int i;
714 
715 	/*
716 	 * Only reset adminq here when we are restarting the
717 	 *  controller after a reset.  During initialization,
718 	 *  we have already submitted admin commands to get
719 	 *  the number of I/O queues supported, so cannot reset
720 	 *  the adminq again here.
721 	 */
722 	if (ctrlr->is_resetting) {
723 		nvme_qpair_reset(&ctrlr->adminq);
724 	}
725 
726 	for (i = 0; i < ctrlr->num_io_queues; i++)
727 		nvme_qpair_reset(&ctrlr->ioq[i]);
728 
729 	nvme_admin_qpair_enable(&ctrlr->adminq);
730 
731 	if (nvme_ctrlr_identify(ctrlr) != 0) {
732 		nvme_ctrlr_fail(ctrlr);
733 		return;
734 	}
735 
736 	/*
737 	 * The number of qpairs are determined during controller initialization,
738 	 *  including using NVMe SET_FEATURES/NUMBER_OF_QUEUES to determine the
739 	 *  HW limit.  We call SET_FEATURES again here so that it gets called
740 	 *  after any reset for controllers that depend on the driver to
741 	 *  explicit specify how many queues it will use.  This value should
742 	 *  never change between resets, so panic if somehow that does happen.
743 	 */
744 	if (ctrlr->is_resetting) {
745 		old_num_io_queues = ctrlr->num_io_queues;
746 		if (nvme_ctrlr_set_num_qpairs(ctrlr) != 0) {
747 			nvme_ctrlr_fail(ctrlr);
748 			return;
749 		}
750 
751 		if (old_num_io_queues != ctrlr->num_io_queues) {
752 			panic("num_io_queues changed from %u to %u",
753 			      old_num_io_queues, ctrlr->num_io_queues);
754 		}
755 	}
756 
757 	if (nvme_ctrlr_create_qpairs(ctrlr) != 0) {
758 		nvme_ctrlr_fail(ctrlr);
759 		return;
760 	}
761 
762 	if (nvme_ctrlr_construct_namespaces(ctrlr) != 0) {
763 		nvme_ctrlr_fail(ctrlr);
764 		return;
765 	}
766 
767 	nvme_ctrlr_configure_aer(ctrlr);
768 	nvme_ctrlr_configure_int_coalescing(ctrlr);
769 
770 	for (i = 0; i < ctrlr->num_io_queues; i++)
771 		nvme_io_qpair_enable(&ctrlr->ioq[i]);
772 }
773 
774 void
775 nvme_ctrlr_start_config_hook(void *arg)
776 {
777 	struct nvme_controller *ctrlr = arg;
778 
779 	nvme_qpair_reset(&ctrlr->adminq);
780 	nvme_admin_qpair_enable(&ctrlr->adminq);
781 
782 	if (nvme_ctrlr_set_num_qpairs(ctrlr) == 0 &&
783 	    nvme_ctrlr_construct_io_qpairs(ctrlr) == 0)
784 		nvme_ctrlr_start(ctrlr);
785 	else
786 		nvme_ctrlr_fail(ctrlr);
787 
788 	nvme_sysctl_initialize_ctrlr(ctrlr);
789 	config_intrhook_disestablish(&ctrlr->config_hook);
790 
791 	ctrlr->is_initialized = 1;
792 	nvme_notify_new_controller(ctrlr);
793 }
794 
795 static void
796 nvme_ctrlr_reset_task(void *arg, int pending)
797 {
798 	struct nvme_controller	*ctrlr = arg;
799 	int			status;
800 
801 	nvme_printf(ctrlr, "resetting controller\n");
802 	status = nvme_ctrlr_hw_reset(ctrlr);
803 	/*
804 	 * Use pause instead of DELAY, so that we yield to any nvme interrupt
805 	 *  handlers on this CPU that were blocked on a qpair lock. We want
806 	 *  all nvme interrupts completed before proceeding with restarting the
807 	 *  controller.
808 	 *
809 	 * XXX - any way to guarantee the interrupt handlers have quiesced?
810 	 */
811 	pause("nvmereset", hz / 10);
812 	if (status == 0)
813 		nvme_ctrlr_start(ctrlr);
814 	else
815 		nvme_ctrlr_fail(ctrlr);
816 
817 	atomic_cmpset_32(&ctrlr->is_resetting, 1, 0);
818 }
819 
820 /*
821  * Poll all the queues enabled on the device for completion.
822  */
823 void
824 nvme_ctrlr_poll(struct nvme_controller *ctrlr)
825 {
826 	int i;
827 
828 	nvme_qpair_process_completions(&ctrlr->adminq);
829 
830 	for (i = 0; i < ctrlr->num_io_queues; i++)
831 		if (ctrlr->ioq && ctrlr->ioq[i].cpl)
832 			nvme_qpair_process_completions(&ctrlr->ioq[i]);
833 }
834 
835 /*
836  * Poll the single-vector intertrupt case: num_io_queues will be 1 and
837  * there's only a single vector. While we're polling, we mask further
838  * interrupts in the controller.
839  */
840 void
841 nvme_ctrlr_intx_handler(void *arg)
842 {
843 	struct nvme_controller *ctrlr = arg;
844 
845 	nvme_mmio_write_4(ctrlr, intms, 1);
846 	nvme_ctrlr_poll(ctrlr);
847 	nvme_mmio_write_4(ctrlr, intmc, 1);
848 }
849 
850 static int
851 nvme_ctrlr_configure_intx(struct nvme_controller *ctrlr)
852 {
853 
854 	ctrlr->msix_enabled = 0;
855 	ctrlr->num_io_queues = 1;
856 	ctrlr->num_cpus_per_ioq = mp_ncpus;
857 	ctrlr->rid = 0;
858 	ctrlr->res = bus_alloc_resource_any(ctrlr->dev, SYS_RES_IRQ,
859 	    &ctrlr->rid, RF_SHAREABLE | RF_ACTIVE);
860 
861 	if (ctrlr->res == NULL) {
862 		nvme_printf(ctrlr, "unable to allocate shared IRQ\n");
863 		return (ENOMEM);
864 	}
865 
866 	bus_setup_intr(ctrlr->dev, ctrlr->res,
867 	    INTR_TYPE_MISC | INTR_MPSAFE, NULL, nvme_ctrlr_intx_handler,
868 	    ctrlr, &ctrlr->tag);
869 
870 	if (ctrlr->tag == NULL) {
871 		nvme_printf(ctrlr, "unable to setup intx handler\n");
872 		return (ENOMEM);
873 	}
874 
875 	return (0);
876 }
877 
878 static void
879 nvme_pt_done(void *arg, const struct nvme_completion *cpl)
880 {
881 	struct nvme_pt_command *pt = arg;
882 
883 	bzero(&pt->cpl, sizeof(pt->cpl));
884 	pt->cpl.cdw0 = cpl->cdw0;
885 	pt->cpl.status = cpl->status;
886 	pt->cpl.status.p = 0;
887 
888 	mtx_lock(pt->driver_lock);
889 	wakeup(pt);
890 	mtx_unlock(pt->driver_lock);
891 }
892 
893 int
894 nvme_ctrlr_passthrough_cmd(struct nvme_controller *ctrlr,
895     struct nvme_pt_command *pt, uint32_t nsid, int is_user_buffer,
896     int is_admin_cmd)
897 {
898 	struct nvme_request	*req;
899 	struct mtx		*mtx;
900 	struct buf		*buf = NULL;
901 	int			ret = 0;
902 	vm_offset_t		addr, end;
903 
904 	if (pt->len > 0) {
905 		/*
906 		 * vmapbuf calls vm_fault_quick_hold_pages which only maps full
907 		 * pages. Ensure this request has fewer than MAXPHYS bytes when
908 		 * extended to full pages.
909 		 */
910 		addr = (vm_offset_t)pt->buf;
911 		end = round_page(addr + pt->len);
912 		addr = trunc_page(addr);
913 		if (end - addr > MAXPHYS)
914 			return EIO;
915 
916 		if (pt->len > ctrlr->max_xfer_size) {
917 			nvme_printf(ctrlr, "pt->len (%d) "
918 			    "exceeds max_xfer_size (%d)\n", pt->len,
919 			    ctrlr->max_xfer_size);
920 			return EIO;
921 		}
922 		if (is_user_buffer) {
923 			/*
924 			 * Ensure the user buffer is wired for the duration of
925 			 *  this passthrough command.
926 			 */
927 			PHOLD(curproc);
928 			buf = getpbuf(NULL);
929 			buf->b_data = pt->buf;
930 			buf->b_bufsize = pt->len;
931 			buf->b_iocmd = pt->is_read ? BIO_READ : BIO_WRITE;
932 #ifdef NVME_UNMAPPED_BIO_SUPPORT
933 			if (vmapbuf(buf, 1) < 0) {
934 #else
935 			if (vmapbuf(buf) < 0) {
936 #endif
937 				ret = EFAULT;
938 				goto err;
939 			}
940 			req = nvme_allocate_request_vaddr(buf->b_data, pt->len,
941 			    nvme_pt_done, pt);
942 		} else
943 			req = nvme_allocate_request_vaddr(pt->buf, pt->len,
944 			    nvme_pt_done, pt);
945 	} else
946 		req = nvme_allocate_request_null(nvme_pt_done, pt);
947 
948 	req->cmd.opc	= pt->cmd.opc;
949 	req->cmd.cdw10	= pt->cmd.cdw10;
950 	req->cmd.cdw11	= pt->cmd.cdw11;
951 	req->cmd.cdw12	= pt->cmd.cdw12;
952 	req->cmd.cdw13	= pt->cmd.cdw13;
953 	req->cmd.cdw14	= pt->cmd.cdw14;
954 	req->cmd.cdw15	= pt->cmd.cdw15;
955 
956 	req->cmd.nsid = nsid;
957 
958 	if (is_admin_cmd)
959 		mtx = &ctrlr->lock;
960 	else
961 		mtx = &ctrlr->ns[nsid-1].lock;
962 
963 	mtx_lock(mtx);
964 	pt->driver_lock = mtx;
965 
966 	if (is_admin_cmd)
967 		nvme_ctrlr_submit_admin_request(ctrlr, req);
968 	else
969 		nvme_ctrlr_submit_io_request(ctrlr, req);
970 
971 	mtx_sleep(pt, mtx, PRIBIO, "nvme_pt", 0);
972 	mtx_unlock(mtx);
973 
974 	pt->driver_lock = NULL;
975 
976 err:
977 	if (buf != NULL) {
978 		relpbuf(buf, NULL);
979 		PRELE(curproc);
980 	}
981 
982 	return (ret);
983 }
984 
985 static int
986 nvme_ctrlr_ioctl(struct cdev *cdev, u_long cmd, caddr_t arg, int flag,
987     struct thread *td)
988 {
989 	struct nvme_controller			*ctrlr;
990 	struct nvme_pt_command			*pt;
991 
992 	ctrlr = cdev->si_drv1;
993 
994 	switch (cmd) {
995 	case NVME_RESET_CONTROLLER:
996 		nvme_ctrlr_reset(ctrlr);
997 		break;
998 	case NVME_PASSTHROUGH_CMD:
999 		pt = (struct nvme_pt_command *)arg;
1000 		return (nvme_ctrlr_passthrough_cmd(ctrlr, pt, pt->cmd.nsid,
1001 		    1 /* is_user_buffer */, 1 /* is_admin_cmd */));
1002 	default:
1003 		return (ENOTTY);
1004 	}
1005 
1006 	return (0);
1007 }
1008 
1009 static struct cdevsw nvme_ctrlr_cdevsw = {
1010 	.d_version =	D_VERSION,
1011 	.d_flags =	0,
1012 	.d_ioctl =	nvme_ctrlr_ioctl
1013 };
1014 
1015 static void
1016 nvme_ctrlr_setup_interrupts(struct nvme_controller *ctrlr)
1017 {
1018 	device_t	dev;
1019 	int		per_cpu_io_queues;
1020 	int		min_cpus_per_ioq;
1021 	int		num_vectors_requested, num_vectors_allocated;
1022 	int		num_vectors_available;
1023 
1024 	dev = ctrlr->dev;
1025 	min_cpus_per_ioq = 1;
1026 	TUNABLE_INT_FETCH("hw.nvme.min_cpus_per_ioq", &min_cpus_per_ioq);
1027 
1028 	if (min_cpus_per_ioq < 1) {
1029 		min_cpus_per_ioq = 1;
1030 	} else if (min_cpus_per_ioq > mp_ncpus) {
1031 		min_cpus_per_ioq = mp_ncpus;
1032 	}
1033 
1034 	per_cpu_io_queues = 1;
1035 	TUNABLE_INT_FETCH("hw.nvme.per_cpu_io_queues", &per_cpu_io_queues);
1036 
1037 	if (per_cpu_io_queues == 0) {
1038 		min_cpus_per_ioq = mp_ncpus;
1039 	}
1040 
1041 	ctrlr->force_intx = 0;
1042 	TUNABLE_INT_FETCH("hw.nvme.force_intx", &ctrlr->force_intx);
1043 
1044 	/*
1045 	 * FreeBSD currently cannot allocate more than about 190 vectors at
1046 	 *  boot, meaning that systems with high core count and many devices
1047 	 *  requesting per-CPU interrupt vectors will not get their full
1048 	 *  allotment.  So first, try to allocate as many as we may need to
1049 	 *  understand what is available, then immediately release them.
1050 	 *  Then figure out how many of those we will actually use, based on
1051 	 *  assigning an equal number of cores to each I/O queue.
1052 	 */
1053 
1054 	/* One vector for per core I/O queue, plus one vector for admin queue. */
1055 	num_vectors_available = min(pci_msix_count(dev), mp_ncpus + 1);
1056 	if (pci_alloc_msix(dev, &num_vectors_available) != 0) {
1057 		num_vectors_available = 0;
1058 	}
1059 	pci_release_msi(dev);
1060 
1061 	if (ctrlr->force_intx || num_vectors_available < 2) {
1062 		nvme_ctrlr_configure_intx(ctrlr);
1063 		return;
1064 	}
1065 
1066 	/*
1067 	 * Do not use all vectors for I/O queues - one must be saved for the
1068 	 *  admin queue.
1069 	 */
1070 	ctrlr->num_cpus_per_ioq = max(min_cpus_per_ioq,
1071 	    howmany(mp_ncpus, num_vectors_available - 1));
1072 
1073 	ctrlr->num_io_queues = howmany(mp_ncpus, ctrlr->num_cpus_per_ioq);
1074 	num_vectors_requested = ctrlr->num_io_queues + 1;
1075 	num_vectors_allocated = num_vectors_requested;
1076 
1077 	/*
1078 	 * Now just allocate the number of vectors we need.  This should
1079 	 *  succeed, since we previously called pci_alloc_msix()
1080 	 *  successfully returning at least this many vectors, but just to
1081 	 *  be safe, if something goes wrong just revert to INTx.
1082 	 */
1083 	if (pci_alloc_msix(dev, &num_vectors_allocated) != 0) {
1084 		nvme_ctrlr_configure_intx(ctrlr);
1085 		return;
1086 	}
1087 
1088 	if (num_vectors_allocated < num_vectors_requested) {
1089 		pci_release_msi(dev);
1090 		nvme_ctrlr_configure_intx(ctrlr);
1091 		return;
1092 	}
1093 
1094 	ctrlr->msix_enabled = 1;
1095 }
1096 
1097 int
1098 nvme_ctrlr_construct(struct nvme_controller *ctrlr, device_t dev)
1099 {
1100 	union cap_lo_register	cap_lo;
1101 	union cap_hi_register	cap_hi;
1102 	int			status, timeout_period;
1103 
1104 	ctrlr->dev = dev;
1105 
1106 	mtx_init(&ctrlr->lock, "nvme ctrlr lock", NULL, MTX_DEF);
1107 
1108 	status = nvme_ctrlr_allocate_bar(ctrlr);
1109 
1110 	if (status != 0)
1111 		return (status);
1112 
1113 	/*
1114 	 * Software emulators may set the doorbell stride to something
1115 	 *  other than zero, but this driver is not set up to handle that.
1116 	 */
1117 	cap_hi.raw = nvme_mmio_read_4(ctrlr, cap_hi);
1118 	if (cap_hi.bits.dstrd != 0)
1119 		return (ENXIO);
1120 
1121 	ctrlr->min_page_size = 1 << (12 + cap_hi.bits.mpsmin);
1122 
1123 	/* Get ready timeout value from controller, in units of 500ms. */
1124 	cap_lo.raw = nvme_mmio_read_4(ctrlr, cap_lo);
1125 	ctrlr->ready_timeout_in_ms = cap_lo.bits.to * 500;
1126 
1127 	timeout_period = NVME_DEFAULT_TIMEOUT_PERIOD;
1128 	TUNABLE_INT_FETCH("hw.nvme.timeout_period", &timeout_period);
1129 	timeout_period = min(timeout_period, NVME_MAX_TIMEOUT_PERIOD);
1130 	timeout_period = max(timeout_period, NVME_MIN_TIMEOUT_PERIOD);
1131 	ctrlr->timeout_period = timeout_period;
1132 
1133 	nvme_retry_count = NVME_DEFAULT_RETRY_COUNT;
1134 	TUNABLE_INT_FETCH("hw.nvme.retry_count", &nvme_retry_count);
1135 
1136 	ctrlr->enable_aborts = 0;
1137 	TUNABLE_INT_FETCH("hw.nvme.enable_aborts", &ctrlr->enable_aborts);
1138 
1139 	nvme_ctrlr_setup_interrupts(ctrlr);
1140 
1141 	ctrlr->max_xfer_size = NVME_MAX_XFER_SIZE;
1142 	if (nvme_ctrlr_construct_admin_qpair(ctrlr) != 0)
1143 		return (ENXIO);
1144 
1145 	ctrlr->cdev = make_dev(&nvme_ctrlr_cdevsw, device_get_unit(dev),
1146 	    UID_ROOT, GID_WHEEL, 0600, "nvme%d", device_get_unit(dev));
1147 
1148 	if (ctrlr->cdev == NULL)
1149 		return (ENXIO);
1150 
1151 	ctrlr->cdev->si_drv1 = (void *)ctrlr;
1152 
1153 	ctrlr->taskqueue = taskqueue_create("nvme_taskq", M_WAITOK,
1154 	    taskqueue_thread_enqueue, &ctrlr->taskqueue);
1155 	taskqueue_start_threads(&ctrlr->taskqueue, 1, PI_DISK, "nvme taskq");
1156 
1157 	ctrlr->is_resetting = 0;
1158 	ctrlr->is_initialized = 0;
1159 	ctrlr->notification_sent = 0;
1160 	TASK_INIT(&ctrlr->reset_task, 0, nvme_ctrlr_reset_task, ctrlr);
1161 
1162 	TASK_INIT(&ctrlr->fail_req_task, 0, nvme_ctrlr_fail_req_task, ctrlr);
1163 	STAILQ_INIT(&ctrlr->fail_req);
1164 	ctrlr->is_failed = FALSE;
1165 
1166 	return (0);
1167 }
1168 
1169 void
1170 nvme_ctrlr_destruct(struct nvme_controller *ctrlr, device_t dev)
1171 {
1172 	int				i;
1173 
1174 	/*
1175 	 *  Notify the controller of a shutdown, even though this is due to
1176 	 *   a driver unload, not a system shutdown (this path is not invoked
1177 	 *   during shutdown).  This ensures the controller receives a
1178 	 *   shutdown notification in case the system is shutdown before
1179 	 *   reloading the driver.
1180 	 */
1181 	nvme_ctrlr_shutdown(ctrlr);
1182 
1183 	nvme_ctrlr_disable(ctrlr);
1184 	taskqueue_free(ctrlr->taskqueue);
1185 
1186 	for (i = 0; i < NVME_MAX_NAMESPACES; i++)
1187 		nvme_ns_destruct(&ctrlr->ns[i]);
1188 
1189 	if (ctrlr->cdev)
1190 		destroy_dev(ctrlr->cdev);
1191 
1192 	for (i = 0; i < ctrlr->num_io_queues; i++) {
1193 		nvme_io_qpair_destroy(&ctrlr->ioq[i]);
1194 	}
1195 
1196 	free(ctrlr->ioq, M_NVME);
1197 
1198 	nvme_admin_qpair_destroy(&ctrlr->adminq);
1199 
1200 	if (ctrlr->resource != NULL) {
1201 		bus_release_resource(dev, SYS_RES_MEMORY,
1202 		    ctrlr->resource_id, ctrlr->resource);
1203 	}
1204 
1205 	if (ctrlr->bar4_resource != NULL) {
1206 		bus_release_resource(dev, SYS_RES_MEMORY,
1207 		    ctrlr->bar4_resource_id, ctrlr->bar4_resource);
1208 	}
1209 
1210 	if (ctrlr->tag)
1211 		bus_teardown_intr(ctrlr->dev, ctrlr->res, ctrlr->tag);
1212 
1213 	if (ctrlr->res)
1214 		bus_release_resource(ctrlr->dev, SYS_RES_IRQ,
1215 		    rman_get_rid(ctrlr->res), ctrlr->res);
1216 
1217 	if (ctrlr->msix_enabled)
1218 		pci_release_msi(dev);
1219 }
1220 
1221 void
1222 nvme_ctrlr_shutdown(struct nvme_controller *ctrlr)
1223 {
1224 	union cc_register	cc;
1225 	union csts_register	csts;
1226 	int			ticks = 0;
1227 
1228 	cc.raw = nvme_mmio_read_4(ctrlr, cc);
1229 	cc.bits.shn = NVME_SHN_NORMAL;
1230 	nvme_mmio_write_4(ctrlr, cc, cc.raw);
1231 	csts.raw = nvme_mmio_read_4(ctrlr, csts);
1232 	while ((csts.bits.shst != NVME_SHST_COMPLETE) && (ticks++ < 5*hz)) {
1233 		pause("nvme shn", 1);
1234 		csts.raw = nvme_mmio_read_4(ctrlr, csts);
1235 	}
1236 	if (csts.bits.shst != NVME_SHST_COMPLETE)
1237 		nvme_printf(ctrlr, "did not complete shutdown within 5 seconds "
1238 		    "of notification\n");
1239 }
1240 
1241 void
1242 nvme_ctrlr_submit_admin_request(struct nvme_controller *ctrlr,
1243     struct nvme_request *req)
1244 {
1245 
1246 	nvme_qpair_submit_request(&ctrlr->adminq, req);
1247 }
1248 
1249 void
1250 nvme_ctrlr_submit_io_request(struct nvme_controller *ctrlr,
1251     struct nvme_request *req)
1252 {
1253 	struct nvme_qpair       *qpair;
1254 
1255 	qpair = &ctrlr->ioq[curcpu / ctrlr->num_cpus_per_ioq];
1256 	nvme_qpair_submit_request(qpair, req);
1257 }
1258 
1259 device_t
1260 nvme_ctrlr_get_device(struct nvme_controller *ctrlr)
1261 {
1262 
1263 	return (ctrlr->dev);
1264 }
1265 
1266 const struct nvme_controller_data *
1267 nvme_ctrlr_get_data(struct nvme_controller *ctrlr)
1268 {
1269 
1270 	return (&ctrlr->cdata);
1271 }
1272