xref: /freebsd/sys/dev/nvme/nvme_ctrlr.c (revision 74fe6c29fb7eef3418d7919dcd41dc1a04a982a1)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (C) 2012-2016 Intel Corporation
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31 
32 #include "opt_cam.h"
33 
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/buf.h>
37 #include <sys/bus.h>
38 #include <sys/conf.h>
39 #include <sys/ioccom.h>
40 #include <sys/proc.h>
41 #include <sys/smp.h>
42 #include <sys/uio.h>
43 #include <sys/endian.h>
44 
45 #include <dev/pci/pcireg.h>
46 #include <dev/pci/pcivar.h>
47 
48 #include "nvme_private.h"
49 
50 #define B4_CHK_RDY_DELAY_MS	2300		/* work around controller bug */
51 
52 static void nvme_ctrlr_construct_and_submit_aer(struct nvme_controller *ctrlr,
53 						struct nvme_async_event_request *aer);
54 static void nvme_ctrlr_setup_interrupts(struct nvme_controller *ctrlr);
55 
56 static int
57 nvme_ctrlr_allocate_bar(struct nvme_controller *ctrlr)
58 {
59 
60 	ctrlr->resource_id = PCIR_BAR(0);
61 
62 	ctrlr->resource = bus_alloc_resource_any(ctrlr->dev, SYS_RES_MEMORY,
63 	    &ctrlr->resource_id, RF_ACTIVE);
64 
65 	if(ctrlr->resource == NULL) {
66 		nvme_printf(ctrlr, "unable to allocate pci resource\n");
67 		return (ENOMEM);
68 	}
69 
70 	ctrlr->bus_tag = rman_get_bustag(ctrlr->resource);
71 	ctrlr->bus_handle = rman_get_bushandle(ctrlr->resource);
72 	ctrlr->regs = (struct nvme_registers *)ctrlr->bus_handle;
73 
74 	/*
75 	 * The NVMe spec allows for the MSI-X table to be placed behind
76 	 *  BAR 4/5, separate from the control/doorbell registers.  Always
77 	 *  try to map this bar, because it must be mapped prior to calling
78 	 *  pci_alloc_msix().  If the table isn't behind BAR 4/5,
79 	 *  bus_alloc_resource() will just return NULL which is OK.
80 	 */
81 	ctrlr->bar4_resource_id = PCIR_BAR(4);
82 	ctrlr->bar4_resource = bus_alloc_resource_any(ctrlr->dev, SYS_RES_MEMORY,
83 	    &ctrlr->bar4_resource_id, RF_ACTIVE);
84 
85 	return (0);
86 }
87 
88 static int
89 nvme_ctrlr_construct_admin_qpair(struct nvme_controller *ctrlr)
90 {
91 	struct nvme_qpair	*qpair;
92 	uint32_t		num_entries;
93 	int			error;
94 
95 	qpair = &ctrlr->adminq;
96 
97 	num_entries = NVME_ADMIN_ENTRIES;
98 	TUNABLE_INT_FETCH("hw.nvme.admin_entries", &num_entries);
99 	/*
100 	 * If admin_entries was overridden to an invalid value, revert it
101 	 *  back to our default value.
102 	 */
103 	if (num_entries < NVME_MIN_ADMIN_ENTRIES ||
104 	    num_entries > NVME_MAX_ADMIN_ENTRIES) {
105 		nvme_printf(ctrlr, "invalid hw.nvme.admin_entries=%d "
106 		    "specified\n", num_entries);
107 		num_entries = NVME_ADMIN_ENTRIES;
108 	}
109 
110 	/*
111 	 * The admin queue's max xfer size is treated differently than the
112 	 *  max I/O xfer size.  16KB is sufficient here - maybe even less?
113 	 */
114 	error = nvme_qpair_construct(qpair,
115 				     0, /* qpair ID */
116 				     0, /* vector */
117 				     num_entries,
118 				     NVME_ADMIN_TRACKERS,
119 				     ctrlr);
120 	return (error);
121 }
122 
123 static int
124 nvme_ctrlr_construct_io_qpairs(struct nvme_controller *ctrlr)
125 {
126 	struct nvme_qpair	*qpair;
127 	uint32_t		cap_lo;
128 	uint16_t		mqes;
129 	int			i, error, num_entries, num_trackers;
130 
131 	num_entries = NVME_IO_ENTRIES;
132 	TUNABLE_INT_FETCH("hw.nvme.io_entries", &num_entries);
133 
134 	/*
135 	 * NVMe spec sets a hard limit of 64K max entries, but
136 	 *  devices may specify a smaller limit, so we need to check
137 	 *  the MQES field in the capabilities register.
138 	 */
139 	cap_lo = nvme_mmio_read_4(ctrlr, cap_lo);
140 	mqes = (cap_lo >> NVME_CAP_LO_REG_MQES_SHIFT) & NVME_CAP_LO_REG_MQES_MASK;
141 	num_entries = min(num_entries, mqes + 1);
142 
143 	num_trackers = NVME_IO_TRACKERS;
144 	TUNABLE_INT_FETCH("hw.nvme.io_trackers", &num_trackers);
145 
146 	num_trackers = max(num_trackers, NVME_MIN_IO_TRACKERS);
147 	num_trackers = min(num_trackers, NVME_MAX_IO_TRACKERS);
148 	/*
149 	 * No need to have more trackers than entries in the submit queue.
150 	 *  Note also that for a queue size of N, we can only have (N-1)
151 	 *  commands outstanding, hence the "-1" here.
152 	 */
153 	num_trackers = min(num_trackers, (num_entries-1));
154 
155 	/*
156 	 * Our best estimate for the maximum number of I/Os that we should
157 	 * noramlly have in flight at one time. This should be viewed as a hint,
158 	 * not a hard limit and will need to be revisitted when the upper layers
159 	 * of the storage system grows multi-queue support.
160 	 */
161 	ctrlr->max_hw_pend_io = num_trackers * ctrlr->num_io_queues * 3 / 4;
162 
163 	/*
164 	 * This was calculated previously when setting up interrupts, but
165 	 *  a controller could theoretically support fewer I/O queues than
166 	 *  MSI-X vectors.  So calculate again here just to be safe.
167 	 */
168 	ctrlr->num_cpus_per_ioq = howmany(mp_ncpus, ctrlr->num_io_queues);
169 
170 	ctrlr->ioq = malloc(ctrlr->num_io_queues * sizeof(struct nvme_qpair),
171 	    M_NVME, M_ZERO | M_WAITOK);
172 
173 	for (i = 0; i < ctrlr->num_io_queues; i++) {
174 		qpair = &ctrlr->ioq[i];
175 
176 		/*
177 		 * Admin queue has ID=0. IO queues start at ID=1 -
178 		 *  hence the 'i+1' here.
179 		 *
180 		 * For I/O queues, use the controller-wide max_xfer_size
181 		 *  calculated in nvme_attach().
182 		 */
183 		error = nvme_qpair_construct(qpair,
184 				     i+1, /* qpair ID */
185 				     ctrlr->msix_enabled ? i+1 : 0, /* vector */
186 				     num_entries,
187 				     num_trackers,
188 				     ctrlr);
189 		if (error)
190 			return (error);
191 
192 		/*
193 		 * Do not bother binding interrupts if we only have one I/O
194 		 *  interrupt thread for this controller.
195 		 */
196 		if (ctrlr->num_io_queues > 1)
197 			bus_bind_intr(ctrlr->dev, qpair->res,
198 			    i * ctrlr->num_cpus_per_ioq);
199 	}
200 
201 	return (0);
202 }
203 
204 static void
205 nvme_ctrlr_fail(struct nvme_controller *ctrlr)
206 {
207 	int i;
208 
209 	ctrlr->is_failed = TRUE;
210 	nvme_qpair_fail(&ctrlr->adminq);
211 	if (ctrlr->ioq != NULL) {
212 		for (i = 0; i < ctrlr->num_io_queues; i++)
213 			nvme_qpair_fail(&ctrlr->ioq[i]);
214 	}
215 	nvme_notify_fail_consumers(ctrlr);
216 }
217 
218 void
219 nvme_ctrlr_post_failed_request(struct nvme_controller *ctrlr,
220     struct nvme_request *req)
221 {
222 
223 	mtx_lock(&ctrlr->lock);
224 	STAILQ_INSERT_TAIL(&ctrlr->fail_req, req, stailq);
225 	mtx_unlock(&ctrlr->lock);
226 	taskqueue_enqueue(ctrlr->taskqueue, &ctrlr->fail_req_task);
227 }
228 
229 static void
230 nvme_ctrlr_fail_req_task(void *arg, int pending)
231 {
232 	struct nvme_controller	*ctrlr = arg;
233 	struct nvme_request	*req;
234 
235 	mtx_lock(&ctrlr->lock);
236 	while (!STAILQ_EMPTY(&ctrlr->fail_req)) {
237 		req = STAILQ_FIRST(&ctrlr->fail_req);
238 		STAILQ_REMOVE_HEAD(&ctrlr->fail_req, stailq);
239 		nvme_qpair_manual_complete_request(req->qpair, req,
240 		    NVME_SCT_GENERIC, NVME_SC_ABORTED_BY_REQUEST, TRUE);
241 	}
242 	mtx_unlock(&ctrlr->lock);
243 }
244 
245 static int
246 nvme_ctrlr_wait_for_ready(struct nvme_controller *ctrlr, int desired_val)
247 {
248 	int ms_waited;
249 	uint32_t csts;
250 
251 	csts = nvme_mmio_read_4(ctrlr, csts);
252 
253 	ms_waited = 0;
254 	while (((csts >> NVME_CSTS_REG_RDY_SHIFT) & NVME_CSTS_REG_RDY_MASK) != desired_val) {
255 		if (ms_waited++ > ctrlr->ready_timeout_in_ms) {
256 			nvme_printf(ctrlr, "controller ready did not become %d "
257 			    "within %d ms\n", desired_val, ctrlr->ready_timeout_in_ms);
258 			return (ENXIO);
259 		}
260 		DELAY(1000);
261 		csts = nvme_mmio_read_4(ctrlr, csts);
262 	}
263 
264 	return (0);
265 }
266 
267 static int
268 nvme_ctrlr_disable(struct nvme_controller *ctrlr)
269 {
270 	uint32_t cc;
271 	uint32_t csts;
272 	uint8_t  en, rdy;
273 	int err;
274 
275 	cc = nvme_mmio_read_4(ctrlr, cc);
276 	csts = nvme_mmio_read_4(ctrlr, csts);
277 
278 	en = (cc >> NVME_CC_REG_EN_SHIFT) & NVME_CC_REG_EN_MASK;
279 	rdy = (csts >> NVME_CSTS_REG_RDY_SHIFT) & NVME_CSTS_REG_RDY_MASK;
280 
281 	/*
282 	 * Per 3.1.5 in NVME 1.3 spec, transitioning CC.EN from 0 to 1
283 	 * when CSTS.RDY is 1 or transitioning CC.EN from 1 to 0 when
284 	 * CSTS.RDY is 0 "has undefined results" So make sure that CSTS.RDY
285 	 * isn't the desired value. Short circuit if we're already disabled.
286 	 */
287 	if (en == 1) {
288 		if (rdy == 0) {
289 			/* EN == 1, wait for  RDY == 1 or fail */
290 			err = nvme_ctrlr_wait_for_ready(ctrlr, 1);
291 			if (err != 0)
292 				return (err);
293 		}
294 	} else {
295 		/* EN == 0 already wait for RDY == 0 */
296 		if (rdy == 0)
297 			return (0);
298 		else
299 			return (nvme_ctrlr_wait_for_ready(ctrlr, 0));
300 	}
301 
302 	cc &= ~NVME_CC_REG_EN_MASK;
303 	nvme_mmio_write_4(ctrlr, cc, cc);
304 	/*
305 	 * Some drives have issues with accessing the mmio after we
306 	 * disable, so delay for a bit after we write the bit to
307 	 * cope with these issues.
308 	 */
309 	if (ctrlr->quirks & QUIRK_DELAY_B4_CHK_RDY)
310 		pause("nvmeR", B4_CHK_RDY_DELAY_MS * hz / 1000);
311 	return (nvme_ctrlr_wait_for_ready(ctrlr, 0));
312 }
313 
314 static int
315 nvme_ctrlr_enable(struct nvme_controller *ctrlr)
316 {
317 	uint32_t	cc;
318 	uint32_t	csts;
319 	uint32_t	aqa;
320 	uint32_t	qsize;
321 	uint8_t		en, rdy;
322 	int		err;
323 
324 	cc = nvme_mmio_read_4(ctrlr, cc);
325 	csts = nvme_mmio_read_4(ctrlr, csts);
326 
327 	en = (cc >> NVME_CC_REG_EN_SHIFT) & NVME_CC_REG_EN_MASK;
328 	rdy = (csts >> NVME_CSTS_REG_RDY_SHIFT) & NVME_CSTS_REG_RDY_MASK;
329 
330 	/*
331 	 * See note in nvme_ctrlr_disable. Short circuit if we're already enabled.
332 	 */
333 	if (en == 1) {
334 		if (rdy == 1)
335 			return (0);
336 		else
337 			return (nvme_ctrlr_wait_for_ready(ctrlr, 1));
338 	} else {
339 		/* EN == 0 already wait for RDY == 0 or fail */
340 		err = nvme_ctrlr_wait_for_ready(ctrlr, 0);
341 		if (err != 0)
342 			return (err);
343 	}
344 
345 	nvme_mmio_write_8(ctrlr, asq, ctrlr->adminq.cmd_bus_addr);
346 	DELAY(5000);
347 	nvme_mmio_write_8(ctrlr, acq, ctrlr->adminq.cpl_bus_addr);
348 	DELAY(5000);
349 
350 	/* acqs and asqs are 0-based. */
351 	qsize = ctrlr->adminq.num_entries - 1;
352 
353 	aqa = 0;
354 	aqa = (qsize & NVME_AQA_REG_ACQS_MASK) << NVME_AQA_REG_ACQS_SHIFT;
355 	aqa |= (qsize & NVME_AQA_REG_ASQS_MASK) << NVME_AQA_REG_ASQS_SHIFT;
356 	nvme_mmio_write_4(ctrlr, aqa, aqa);
357 	DELAY(5000);
358 
359 	/* Initialization values for CC */
360 	cc = 0;
361 	cc |= 1 << NVME_CC_REG_EN_SHIFT;
362 	cc |= 0 << NVME_CC_REG_CSS_SHIFT;
363 	cc |= 0 << NVME_CC_REG_AMS_SHIFT;
364 	cc |= 0 << NVME_CC_REG_SHN_SHIFT;
365 	cc |= 6 << NVME_CC_REG_IOSQES_SHIFT; /* SQ entry size == 64 == 2^6 */
366 	cc |= 4 << NVME_CC_REG_IOCQES_SHIFT; /* CQ entry size == 16 == 2^4 */
367 
368 	/* This evaluates to 0, which is according to spec. */
369 	cc |= (PAGE_SIZE >> 13) << NVME_CC_REG_MPS_SHIFT;
370 
371 	nvme_mmio_write_4(ctrlr, cc, cc);
372 
373 	return (nvme_ctrlr_wait_for_ready(ctrlr, 1));
374 }
375 
376 int
377 nvme_ctrlr_hw_reset(struct nvme_controller *ctrlr)
378 {
379 	int i, err;
380 
381 	nvme_admin_qpair_disable(&ctrlr->adminq);
382 	/*
383 	 * I/O queues are not allocated before the initial HW
384 	 *  reset, so do not try to disable them.  Use is_initialized
385 	 *  to determine if this is the initial HW reset.
386 	 */
387 	if (ctrlr->is_initialized) {
388 		for (i = 0; i < ctrlr->num_io_queues; i++)
389 			nvme_io_qpair_disable(&ctrlr->ioq[i]);
390 	}
391 
392 	DELAY(100*1000);
393 
394 	err = nvme_ctrlr_disable(ctrlr);
395 	if (err != 0)
396 		return err;
397 	return (nvme_ctrlr_enable(ctrlr));
398 }
399 
400 void
401 nvme_ctrlr_reset(struct nvme_controller *ctrlr)
402 {
403 	int cmpset;
404 
405 	cmpset = atomic_cmpset_32(&ctrlr->is_resetting, 0, 1);
406 
407 	if (cmpset == 0 || ctrlr->is_failed)
408 		/*
409 		 * Controller is already resetting or has failed.  Return
410 		 *  immediately since there is no need to kick off another
411 		 *  reset in these cases.
412 		 */
413 		return;
414 
415 	taskqueue_enqueue(ctrlr->taskqueue, &ctrlr->reset_task);
416 }
417 
418 static int
419 nvme_ctrlr_identify(struct nvme_controller *ctrlr)
420 {
421 	struct nvme_completion_poll_status	status;
422 
423 	status.done = 0;
424 	nvme_ctrlr_cmd_identify_controller(ctrlr, &ctrlr->cdata,
425 	    nvme_completion_poll_cb, &status);
426 	while (!atomic_load_acq_int(&status.done))
427 		pause("nvme", 1);
428 	if (nvme_completion_is_error(&status.cpl)) {
429 		nvme_printf(ctrlr, "nvme_identify_controller failed!\n");
430 		return (ENXIO);
431 	}
432 
433 	/* Convert data to host endian */
434 	nvme_controller_data_swapbytes(&ctrlr->cdata);
435 
436 	/*
437 	 * Use MDTS to ensure our default max_xfer_size doesn't exceed what the
438 	 *  controller supports.
439 	 */
440 	if (ctrlr->cdata.mdts > 0)
441 		ctrlr->max_xfer_size = min(ctrlr->max_xfer_size,
442 		    ctrlr->min_page_size * (1 << (ctrlr->cdata.mdts)));
443 
444 	return (0);
445 }
446 
447 static int
448 nvme_ctrlr_set_num_qpairs(struct nvme_controller *ctrlr)
449 {
450 	struct nvme_completion_poll_status	status;
451 	int					cq_allocated, sq_allocated;
452 
453 	status.done = 0;
454 	nvme_ctrlr_cmd_set_num_queues(ctrlr, ctrlr->num_io_queues,
455 	    nvme_completion_poll_cb, &status);
456 	while (!atomic_load_acq_int(&status.done))
457 		pause("nvme", 1);
458 	if (nvme_completion_is_error(&status.cpl)) {
459 		nvme_printf(ctrlr, "nvme_ctrlr_set_num_qpairs failed!\n");
460 		return (ENXIO);
461 	}
462 
463 	/*
464 	 * Data in cdw0 is 0-based.
465 	 * Lower 16-bits indicate number of submission queues allocated.
466 	 * Upper 16-bits indicate number of completion queues allocated.
467 	 */
468 	sq_allocated = (status.cpl.cdw0 & 0xFFFF) + 1;
469 	cq_allocated = (status.cpl.cdw0 >> 16) + 1;
470 
471 	/*
472 	 * Controller may allocate more queues than we requested,
473 	 *  so use the minimum of the number requested and what was
474 	 *  actually allocated.
475 	 */
476 	ctrlr->num_io_queues = min(ctrlr->num_io_queues, sq_allocated);
477 	ctrlr->num_io_queues = min(ctrlr->num_io_queues, cq_allocated);
478 
479 	return (0);
480 }
481 
482 static int
483 nvme_ctrlr_create_qpairs(struct nvme_controller *ctrlr)
484 {
485 	struct nvme_completion_poll_status	status;
486 	struct nvme_qpair			*qpair;
487 	int					i;
488 
489 	for (i = 0; i < ctrlr->num_io_queues; i++) {
490 		qpair = &ctrlr->ioq[i];
491 
492 		status.done = 0;
493 		nvme_ctrlr_cmd_create_io_cq(ctrlr, qpair, qpair->vector,
494 		    nvme_completion_poll_cb, &status);
495 		while (!atomic_load_acq_int(&status.done))
496 			pause("nvme", 1);
497 		if (nvme_completion_is_error(&status.cpl)) {
498 			nvme_printf(ctrlr, "nvme_create_io_cq failed!\n");
499 			return (ENXIO);
500 		}
501 
502 		status.done = 0;
503 		nvme_ctrlr_cmd_create_io_sq(qpair->ctrlr, qpair,
504 		    nvme_completion_poll_cb, &status);
505 		while (!atomic_load_acq_int(&status.done))
506 			pause("nvme", 1);
507 		if (nvme_completion_is_error(&status.cpl)) {
508 			nvme_printf(ctrlr, "nvme_create_io_sq failed!\n");
509 			return (ENXIO);
510 		}
511 	}
512 
513 	return (0);
514 }
515 
516 static int
517 nvme_ctrlr_destroy_qpair(struct nvme_controller *ctrlr, struct nvme_qpair *qpair)
518 {
519 	struct nvme_completion_poll_status	status;
520 
521 	status.done = 0;
522 	nvme_ctrlr_cmd_delete_io_sq(ctrlr, qpair,
523 	    nvme_completion_poll_cb, &status);
524 	while (!atomic_load_acq_int(&status.done))
525 		pause("nvme", 1);
526 	if (nvme_completion_is_error(&status.cpl)) {
527 		nvme_printf(ctrlr, "nvme_destroy_io_sq failed!\n");
528 		return (ENXIO);
529 	}
530 
531 	status.done = 0;
532 	nvme_ctrlr_cmd_delete_io_cq(ctrlr, qpair,
533 	    nvme_completion_poll_cb, &status);
534 	while (!atomic_load_acq_int(&status.done))
535 		pause("nvme", 1);
536 	if (nvme_completion_is_error(&status.cpl)) {
537 		nvme_printf(ctrlr, "nvme_destroy_io_cq failed!\n");
538 		return (ENXIO);
539 	}
540 
541 	return (0);
542 }
543 
544 static int
545 nvme_ctrlr_construct_namespaces(struct nvme_controller *ctrlr)
546 {
547 	struct nvme_namespace	*ns;
548 	uint32_t 		i;
549 
550 	for (i = 0; i < min(ctrlr->cdata.nn, NVME_MAX_NAMESPACES); i++) {
551 		ns = &ctrlr->ns[i];
552 		nvme_ns_construct(ns, i+1, ctrlr);
553 	}
554 
555 	return (0);
556 }
557 
558 static boolean_t
559 is_log_page_id_valid(uint8_t page_id)
560 {
561 
562 	switch (page_id) {
563 	case NVME_LOG_ERROR:
564 	case NVME_LOG_HEALTH_INFORMATION:
565 	case NVME_LOG_FIRMWARE_SLOT:
566 		return (TRUE);
567 	}
568 
569 	return (FALSE);
570 }
571 
572 static uint32_t
573 nvme_ctrlr_get_log_page_size(struct nvme_controller *ctrlr, uint8_t page_id)
574 {
575 	uint32_t	log_page_size;
576 
577 	switch (page_id) {
578 	case NVME_LOG_ERROR:
579 		log_page_size = min(
580 		    sizeof(struct nvme_error_information_entry) *
581 		    (ctrlr->cdata.elpe + 1), NVME_MAX_AER_LOG_SIZE);
582 		break;
583 	case NVME_LOG_HEALTH_INFORMATION:
584 		log_page_size = sizeof(struct nvme_health_information_page);
585 		break;
586 	case NVME_LOG_FIRMWARE_SLOT:
587 		log_page_size = sizeof(struct nvme_firmware_page);
588 		break;
589 	default:
590 		log_page_size = 0;
591 		break;
592 	}
593 
594 	return (log_page_size);
595 }
596 
597 static void
598 nvme_ctrlr_log_critical_warnings(struct nvme_controller *ctrlr,
599     uint8_t state)
600 {
601 
602 	if (state & NVME_CRIT_WARN_ST_AVAILABLE_SPARE)
603 		nvme_printf(ctrlr, "available spare space below threshold\n");
604 
605 	if (state & NVME_CRIT_WARN_ST_TEMPERATURE)
606 		nvme_printf(ctrlr, "temperature above threshold\n");
607 
608 	if (state & NVME_CRIT_WARN_ST_DEVICE_RELIABILITY)
609 		nvme_printf(ctrlr, "device reliability degraded\n");
610 
611 	if (state & NVME_CRIT_WARN_ST_READ_ONLY)
612 		nvme_printf(ctrlr, "media placed in read only mode\n");
613 
614 	if (state & NVME_CRIT_WARN_ST_VOLATILE_MEMORY_BACKUP)
615 		nvme_printf(ctrlr, "volatile memory backup device failed\n");
616 
617 	if (state & NVME_CRIT_WARN_ST_RESERVED_MASK)
618 		nvme_printf(ctrlr,
619 		    "unknown critical warning(s): state = 0x%02x\n", state);
620 }
621 
622 static void
623 nvme_ctrlr_async_event_log_page_cb(void *arg, const struct nvme_completion *cpl)
624 {
625 	struct nvme_async_event_request		*aer = arg;
626 	struct nvme_health_information_page	*health_info;
627 	struct nvme_error_information_entry	*err;
628 	int i;
629 
630 	/*
631 	 * If the log page fetch for some reason completed with an error,
632 	 *  don't pass log page data to the consumers.  In practice, this case
633 	 *  should never happen.
634 	 */
635 	if (nvme_completion_is_error(cpl))
636 		nvme_notify_async_consumers(aer->ctrlr, &aer->cpl,
637 		    aer->log_page_id, NULL, 0);
638 	else {
639 		/* Convert data to host endian */
640 		switch (aer->log_page_id) {
641 		case NVME_LOG_ERROR:
642 			err = (struct nvme_error_information_entry *)aer->log_page_buffer;
643 			for (i = 0; i < (aer->ctrlr->cdata.elpe + 1); i++)
644 				nvme_error_information_entry_swapbytes(err++);
645 			break;
646 		case NVME_LOG_HEALTH_INFORMATION:
647 			nvme_health_information_page_swapbytes(
648 			    (struct nvme_health_information_page *)aer->log_page_buffer);
649 			break;
650 		case NVME_LOG_FIRMWARE_SLOT:
651 			nvme_firmware_page_swapbytes(
652 			    (struct nvme_firmware_page *)aer->log_page_buffer);
653 			break;
654 		case INTEL_LOG_TEMP_STATS:
655 			intel_log_temp_stats_swapbytes(
656 			    (struct intel_log_temp_stats *)aer->log_page_buffer);
657 			break;
658 		default:
659 			break;
660 		}
661 
662 		if (aer->log_page_id == NVME_LOG_HEALTH_INFORMATION) {
663 			health_info = (struct nvme_health_information_page *)
664 			    aer->log_page_buffer;
665 			nvme_ctrlr_log_critical_warnings(aer->ctrlr,
666 			    health_info->critical_warning);
667 			/*
668 			 * Critical warnings reported through the
669 			 *  SMART/health log page are persistent, so
670 			 *  clear the associated bits in the async event
671 			 *  config so that we do not receive repeated
672 			 *  notifications for the same event.
673 			 */
674 			aer->ctrlr->async_event_config &=
675 			    ~health_info->critical_warning;
676 			nvme_ctrlr_cmd_set_async_event_config(aer->ctrlr,
677 			    aer->ctrlr->async_event_config, NULL, NULL);
678 		}
679 
680 
681 		/*
682 		 * Pass the cpl data from the original async event completion,
683 		 *  not the log page fetch.
684 		 */
685 		nvme_notify_async_consumers(aer->ctrlr, &aer->cpl,
686 		    aer->log_page_id, aer->log_page_buffer, aer->log_page_size);
687 	}
688 
689 	/*
690 	 * Repost another asynchronous event request to replace the one
691 	 *  that just completed.
692 	 */
693 	nvme_ctrlr_construct_and_submit_aer(aer->ctrlr, aer);
694 }
695 
696 static void
697 nvme_ctrlr_async_event_cb(void *arg, const struct nvme_completion *cpl)
698 {
699 	struct nvme_async_event_request	*aer = arg;
700 
701 	if (nvme_completion_is_error(cpl)) {
702 		/*
703 		 *  Do not retry failed async event requests.  This avoids
704 		 *  infinite loops where a new async event request is submitted
705 		 *  to replace the one just failed, only to fail again and
706 		 *  perpetuate the loop.
707 		 */
708 		return;
709 	}
710 
711 	/* Associated log page is in bits 23:16 of completion entry dw0. */
712 	aer->log_page_id = (cpl->cdw0 & 0xFF0000) >> 16;
713 
714 	nvme_printf(aer->ctrlr, "async event occurred (log page id=0x%x)\n",
715 	    aer->log_page_id);
716 
717 	if (is_log_page_id_valid(aer->log_page_id)) {
718 		aer->log_page_size = nvme_ctrlr_get_log_page_size(aer->ctrlr,
719 		    aer->log_page_id);
720 		memcpy(&aer->cpl, cpl, sizeof(*cpl));
721 		nvme_ctrlr_cmd_get_log_page(aer->ctrlr, aer->log_page_id,
722 		    NVME_GLOBAL_NAMESPACE_TAG, aer->log_page_buffer,
723 		    aer->log_page_size, nvme_ctrlr_async_event_log_page_cb,
724 		    aer);
725 		/* Wait to notify consumers until after log page is fetched. */
726 	} else {
727 		nvme_notify_async_consumers(aer->ctrlr, cpl, aer->log_page_id,
728 		    NULL, 0);
729 
730 		/*
731 		 * Repost another asynchronous event request to replace the one
732 		 *  that just completed.
733 		 */
734 		nvme_ctrlr_construct_and_submit_aer(aer->ctrlr, aer);
735 	}
736 }
737 
738 static void
739 nvme_ctrlr_construct_and_submit_aer(struct nvme_controller *ctrlr,
740     struct nvme_async_event_request *aer)
741 {
742 	struct nvme_request *req;
743 
744 	aer->ctrlr = ctrlr;
745 	req = nvme_allocate_request_null(nvme_ctrlr_async_event_cb, aer);
746 	aer->req = req;
747 
748 	/*
749 	 * Disable timeout here, since asynchronous event requests should by
750 	 *  nature never be timed out.
751 	 */
752 	req->timeout = FALSE;
753 	req->cmd.opc_fuse = NVME_CMD_SET_OPC(NVME_OPC_ASYNC_EVENT_REQUEST);
754 	nvme_ctrlr_submit_admin_request(ctrlr, req);
755 }
756 
757 static void
758 nvme_ctrlr_configure_aer(struct nvme_controller *ctrlr)
759 {
760 	struct nvme_completion_poll_status	status;
761 	struct nvme_async_event_request		*aer;
762 	uint32_t				i;
763 
764 	ctrlr->async_event_config = 0xFF;
765 	ctrlr->async_event_config &= ~NVME_CRIT_WARN_ST_RESERVED_MASK;
766 
767 	status.done = 0;
768 	nvme_ctrlr_cmd_get_feature(ctrlr, NVME_FEAT_TEMPERATURE_THRESHOLD,
769 	    0, NULL, 0, nvme_completion_poll_cb, &status);
770 	while (!atomic_load_acq_int(&status.done))
771 		pause("nvme", 1);
772 	if (nvme_completion_is_error(&status.cpl) ||
773 	    (status.cpl.cdw0 & 0xFFFF) == 0xFFFF ||
774 	    (status.cpl.cdw0 & 0xFFFF) == 0x0000) {
775 		nvme_printf(ctrlr, "temperature threshold not supported\n");
776 		ctrlr->async_event_config &= ~NVME_CRIT_WARN_ST_TEMPERATURE;
777 	}
778 
779 	nvme_ctrlr_cmd_set_async_event_config(ctrlr,
780 	    ctrlr->async_event_config, NULL, NULL);
781 
782 	/* aerl is a zero-based value, so we need to add 1 here. */
783 	ctrlr->num_aers = min(NVME_MAX_ASYNC_EVENTS, (ctrlr->cdata.aerl+1));
784 
785 	for (i = 0; i < ctrlr->num_aers; i++) {
786 		aer = &ctrlr->aer[i];
787 		nvme_ctrlr_construct_and_submit_aer(ctrlr, aer);
788 	}
789 }
790 
791 static void
792 nvme_ctrlr_configure_int_coalescing(struct nvme_controller *ctrlr)
793 {
794 
795 	ctrlr->int_coal_time = 0;
796 	TUNABLE_INT_FETCH("hw.nvme.int_coal_time",
797 	    &ctrlr->int_coal_time);
798 
799 	ctrlr->int_coal_threshold = 0;
800 	TUNABLE_INT_FETCH("hw.nvme.int_coal_threshold",
801 	    &ctrlr->int_coal_threshold);
802 
803 	nvme_ctrlr_cmd_set_interrupt_coalescing(ctrlr, ctrlr->int_coal_time,
804 	    ctrlr->int_coal_threshold, NULL, NULL);
805 }
806 
807 static void
808 nvme_ctrlr_start(void *ctrlr_arg)
809 {
810 	struct nvme_controller *ctrlr = ctrlr_arg;
811 	uint32_t old_num_io_queues;
812 	int i;
813 
814 	/*
815 	 * Only reset adminq here when we are restarting the
816 	 *  controller after a reset.  During initialization,
817 	 *  we have already submitted admin commands to get
818 	 *  the number of I/O queues supported, so cannot reset
819 	 *  the adminq again here.
820 	 */
821 	if (ctrlr->is_resetting) {
822 		nvme_qpair_reset(&ctrlr->adminq);
823 	}
824 
825 	for (i = 0; i < ctrlr->num_io_queues; i++)
826 		nvme_qpair_reset(&ctrlr->ioq[i]);
827 
828 	nvme_admin_qpair_enable(&ctrlr->adminq);
829 
830 	if (nvme_ctrlr_identify(ctrlr) != 0) {
831 		nvme_ctrlr_fail(ctrlr);
832 		return;
833 	}
834 
835 	/*
836 	 * The number of qpairs are determined during controller initialization,
837 	 *  including using NVMe SET_FEATURES/NUMBER_OF_QUEUES to determine the
838 	 *  HW limit.  We call SET_FEATURES again here so that it gets called
839 	 *  after any reset for controllers that depend on the driver to
840 	 *  explicit specify how many queues it will use.  This value should
841 	 *  never change between resets, so panic if somehow that does happen.
842 	 */
843 	if (ctrlr->is_resetting) {
844 		old_num_io_queues = ctrlr->num_io_queues;
845 		if (nvme_ctrlr_set_num_qpairs(ctrlr) != 0) {
846 			nvme_ctrlr_fail(ctrlr);
847 			return;
848 		}
849 
850 		if (old_num_io_queues != ctrlr->num_io_queues) {
851 			panic("num_io_queues changed from %u to %u",
852 			      old_num_io_queues, ctrlr->num_io_queues);
853 		}
854 	}
855 
856 	if (nvme_ctrlr_create_qpairs(ctrlr) != 0) {
857 		nvme_ctrlr_fail(ctrlr);
858 		return;
859 	}
860 
861 	if (nvme_ctrlr_construct_namespaces(ctrlr) != 0) {
862 		nvme_ctrlr_fail(ctrlr);
863 		return;
864 	}
865 
866 	nvme_ctrlr_configure_aer(ctrlr);
867 	nvme_ctrlr_configure_int_coalescing(ctrlr);
868 
869 	for (i = 0; i < ctrlr->num_io_queues; i++)
870 		nvme_io_qpair_enable(&ctrlr->ioq[i]);
871 }
872 
873 void
874 nvme_ctrlr_start_config_hook(void *arg)
875 {
876 	struct nvme_controller *ctrlr = arg;
877 
878 	nvme_qpair_reset(&ctrlr->adminq);
879 	nvme_admin_qpair_enable(&ctrlr->adminq);
880 
881 	if (nvme_ctrlr_set_num_qpairs(ctrlr) == 0 &&
882 	    nvme_ctrlr_construct_io_qpairs(ctrlr) == 0)
883 		nvme_ctrlr_start(ctrlr);
884 	else
885 		nvme_ctrlr_fail(ctrlr);
886 
887 	nvme_sysctl_initialize_ctrlr(ctrlr);
888 	config_intrhook_disestablish(&ctrlr->config_hook);
889 
890 	ctrlr->is_initialized = 1;
891 	nvme_notify_new_controller(ctrlr);
892 }
893 
894 static void
895 nvme_ctrlr_reset_task(void *arg, int pending)
896 {
897 	struct nvme_controller	*ctrlr = arg;
898 	int			status;
899 
900 	nvme_printf(ctrlr, "resetting controller\n");
901 	status = nvme_ctrlr_hw_reset(ctrlr);
902 	/*
903 	 * Use pause instead of DELAY, so that we yield to any nvme interrupt
904 	 *  handlers on this CPU that were blocked on a qpair lock. We want
905 	 *  all nvme interrupts completed before proceeding with restarting the
906 	 *  controller.
907 	 *
908 	 * XXX - any way to guarantee the interrupt handlers have quiesced?
909 	 */
910 	pause("nvmereset", hz / 10);
911 	if (status == 0)
912 		nvme_ctrlr_start(ctrlr);
913 	else
914 		nvme_ctrlr_fail(ctrlr);
915 
916 	atomic_cmpset_32(&ctrlr->is_resetting, 1, 0);
917 }
918 
919 /*
920  * Poll all the queues enabled on the device for completion.
921  */
922 void
923 nvme_ctrlr_poll(struct nvme_controller *ctrlr)
924 {
925 	int i;
926 
927 	nvme_qpair_process_completions(&ctrlr->adminq);
928 
929 	for (i = 0; i < ctrlr->num_io_queues; i++)
930 		if (ctrlr->ioq && ctrlr->ioq[i].cpl)
931 			nvme_qpair_process_completions(&ctrlr->ioq[i]);
932 }
933 
934 /*
935  * Poll the single-vector intertrupt case: num_io_queues will be 1 and
936  * there's only a single vector. While we're polling, we mask further
937  * interrupts in the controller.
938  */
939 void
940 nvme_ctrlr_intx_handler(void *arg)
941 {
942 	struct nvme_controller *ctrlr = arg;
943 
944 	nvme_mmio_write_4(ctrlr, intms, 1);
945 	nvme_ctrlr_poll(ctrlr);
946 	nvme_mmio_write_4(ctrlr, intmc, 1);
947 }
948 
949 static int
950 nvme_ctrlr_configure_intx(struct nvme_controller *ctrlr)
951 {
952 
953 	ctrlr->msix_enabled = 0;
954 	ctrlr->num_io_queues = 1;
955 	ctrlr->num_cpus_per_ioq = mp_ncpus;
956 	ctrlr->rid = 0;
957 	ctrlr->res = bus_alloc_resource_any(ctrlr->dev, SYS_RES_IRQ,
958 	    &ctrlr->rid, RF_SHAREABLE | RF_ACTIVE);
959 
960 	if (ctrlr->res == NULL) {
961 		nvme_printf(ctrlr, "unable to allocate shared IRQ\n");
962 		return (ENOMEM);
963 	}
964 
965 	bus_setup_intr(ctrlr->dev, ctrlr->res,
966 	    INTR_TYPE_MISC | INTR_MPSAFE, NULL, nvme_ctrlr_intx_handler,
967 	    ctrlr, &ctrlr->tag);
968 
969 	if (ctrlr->tag == NULL) {
970 		nvme_printf(ctrlr, "unable to setup intx handler\n");
971 		return (ENOMEM);
972 	}
973 
974 	return (0);
975 }
976 
977 static void
978 nvme_pt_done(void *arg, const struct nvme_completion *cpl)
979 {
980 	struct nvme_pt_command *pt = arg;
981 	uint16_t status;
982 
983 	bzero(&pt->cpl, sizeof(pt->cpl));
984 	pt->cpl.cdw0 = cpl->cdw0;
985 
986 	status = cpl->status;
987 	status &= ~NVME_STATUS_P_MASK;
988 	pt->cpl.status = status;
989 
990 	mtx_lock(pt->driver_lock);
991 	wakeup(pt);
992 	mtx_unlock(pt->driver_lock);
993 }
994 
995 int
996 nvme_ctrlr_passthrough_cmd(struct nvme_controller *ctrlr,
997     struct nvme_pt_command *pt, uint32_t nsid, int is_user_buffer,
998     int is_admin_cmd)
999 {
1000 	struct nvme_request	*req;
1001 	struct mtx		*mtx;
1002 	struct buf		*buf = NULL;
1003 	int			ret = 0;
1004 	vm_offset_t		addr, end;
1005 
1006 	if (pt->len > 0) {
1007 		/*
1008 		 * vmapbuf calls vm_fault_quick_hold_pages which only maps full
1009 		 * pages. Ensure this request has fewer than MAXPHYS bytes when
1010 		 * extended to full pages.
1011 		 */
1012 		addr = (vm_offset_t)pt->buf;
1013 		end = round_page(addr + pt->len);
1014 		addr = trunc_page(addr);
1015 		if (end - addr > MAXPHYS)
1016 			return EIO;
1017 
1018 		if (pt->len > ctrlr->max_xfer_size) {
1019 			nvme_printf(ctrlr, "pt->len (%d) "
1020 			    "exceeds max_xfer_size (%d)\n", pt->len,
1021 			    ctrlr->max_xfer_size);
1022 			return EIO;
1023 		}
1024 		if (is_user_buffer) {
1025 			/*
1026 			 * Ensure the user buffer is wired for the duration of
1027 			 *  this passthrough command.
1028 			 */
1029 			PHOLD(curproc);
1030 			buf = getpbuf(NULL);
1031 			buf->b_data = pt->buf;
1032 			buf->b_bufsize = pt->len;
1033 			buf->b_iocmd = pt->is_read ? BIO_READ : BIO_WRITE;
1034 #ifdef NVME_UNMAPPED_BIO_SUPPORT
1035 			if (vmapbuf(buf, 1) < 0) {
1036 #else
1037 			if (vmapbuf(buf) < 0) {
1038 #endif
1039 				ret = EFAULT;
1040 				goto err;
1041 			}
1042 			req = nvme_allocate_request_vaddr(buf->b_data, pt->len,
1043 			    nvme_pt_done, pt);
1044 		} else
1045 			req = nvme_allocate_request_vaddr(pt->buf, pt->len,
1046 			    nvme_pt_done, pt);
1047 	} else
1048 		req = nvme_allocate_request_null(nvme_pt_done, pt);
1049 
1050 	/* Assume userspace already converted to little-endian */
1051 	req->cmd.opc_fuse = pt->cmd.opc_fuse;
1052 	req->cmd.cdw10 = pt->cmd.cdw10;
1053 	req->cmd.cdw11 = pt->cmd.cdw11;
1054 	req->cmd.cdw12 = pt->cmd.cdw12;
1055 	req->cmd.cdw13 = pt->cmd.cdw13;
1056 	req->cmd.cdw14 = pt->cmd.cdw14;
1057 	req->cmd.cdw15 = pt->cmd.cdw15;
1058 
1059 	req->cmd.nsid = htole32(nsid);
1060 
1061 	if (is_admin_cmd)
1062 		mtx = &ctrlr->lock;
1063 	else {
1064 		KASSERT((nsid-1) >= 0 && (nsid-1) < NVME_MAX_NAMESPACES,
1065 		    ("%s: invalid namespace ID %d\n", __func__, nsid));
1066 		mtx = &ctrlr->ns[nsid-1].lock;
1067 	}
1068 
1069 	mtx_lock(mtx);
1070 	pt->driver_lock = mtx;
1071 
1072 	if (is_admin_cmd)
1073 		nvme_ctrlr_submit_admin_request(ctrlr, req);
1074 	else
1075 		nvme_ctrlr_submit_io_request(ctrlr, req);
1076 
1077 	mtx_sleep(pt, mtx, PRIBIO, "nvme_pt", 0);
1078 	mtx_unlock(mtx);
1079 
1080 	pt->driver_lock = NULL;
1081 
1082 err:
1083 	if (buf != NULL) {
1084 		relpbuf(buf, NULL);
1085 		PRELE(curproc);
1086 	}
1087 
1088 	return (ret);
1089 }
1090 
1091 static int
1092 nvme_ctrlr_ioctl(struct cdev *cdev, u_long cmd, caddr_t arg, int flag,
1093     struct thread *td)
1094 {
1095 	struct nvme_controller			*ctrlr;
1096 	struct nvme_pt_command			*pt;
1097 
1098 	ctrlr = cdev->si_drv1;
1099 
1100 	switch (cmd) {
1101 	case NVME_RESET_CONTROLLER:
1102 		nvme_ctrlr_reset(ctrlr);
1103 		break;
1104 	case NVME_PASSTHROUGH_CMD:
1105 		pt = (struct nvme_pt_command *)arg;
1106 		return (nvme_ctrlr_passthrough_cmd(ctrlr, pt, le32toh(pt->cmd.nsid),
1107 		    1 /* is_user_buffer */, 1 /* is_admin_cmd */));
1108 	default:
1109 		return (ENOTTY);
1110 	}
1111 
1112 	return (0);
1113 }
1114 
1115 static struct cdevsw nvme_ctrlr_cdevsw = {
1116 	.d_version =	D_VERSION,
1117 	.d_flags =	0,
1118 	.d_ioctl =	nvme_ctrlr_ioctl
1119 };
1120 
1121 static void
1122 nvme_ctrlr_setup_interrupts(struct nvme_controller *ctrlr)
1123 {
1124 	device_t	dev;
1125 	int		per_cpu_io_queues;
1126 	int		min_cpus_per_ioq;
1127 	int		num_vectors_requested, num_vectors_allocated;
1128 	int		num_vectors_available;
1129 
1130 	dev = ctrlr->dev;
1131 	min_cpus_per_ioq = 1;
1132 	TUNABLE_INT_FETCH("hw.nvme.min_cpus_per_ioq", &min_cpus_per_ioq);
1133 
1134 	if (min_cpus_per_ioq < 1) {
1135 		min_cpus_per_ioq = 1;
1136 	} else if (min_cpus_per_ioq > mp_ncpus) {
1137 		min_cpus_per_ioq = mp_ncpus;
1138 	}
1139 
1140 	per_cpu_io_queues = 1;
1141 	TUNABLE_INT_FETCH("hw.nvme.per_cpu_io_queues", &per_cpu_io_queues);
1142 
1143 	if (per_cpu_io_queues == 0) {
1144 		min_cpus_per_ioq = mp_ncpus;
1145 	}
1146 
1147 	ctrlr->force_intx = 0;
1148 	TUNABLE_INT_FETCH("hw.nvme.force_intx", &ctrlr->force_intx);
1149 
1150 	/*
1151 	 * FreeBSD currently cannot allocate more than about 190 vectors at
1152 	 *  boot, meaning that systems with high core count and many devices
1153 	 *  requesting per-CPU interrupt vectors will not get their full
1154 	 *  allotment.  So first, try to allocate as many as we may need to
1155 	 *  understand what is available, then immediately release them.
1156 	 *  Then figure out how many of those we will actually use, based on
1157 	 *  assigning an equal number of cores to each I/O queue.
1158 	 */
1159 
1160 	/* One vector for per core I/O queue, plus one vector for admin queue. */
1161 	num_vectors_available = min(pci_msix_count(dev), mp_ncpus + 1);
1162 	if (pci_alloc_msix(dev, &num_vectors_available) != 0) {
1163 		num_vectors_available = 0;
1164 	}
1165 	pci_release_msi(dev);
1166 
1167 	if (ctrlr->force_intx || num_vectors_available < 2) {
1168 		nvme_ctrlr_configure_intx(ctrlr);
1169 		return;
1170 	}
1171 
1172 	/*
1173 	 * Do not use all vectors for I/O queues - one must be saved for the
1174 	 *  admin queue.
1175 	 */
1176 	ctrlr->num_cpus_per_ioq = max(min_cpus_per_ioq,
1177 	    howmany(mp_ncpus, num_vectors_available - 1));
1178 
1179 	ctrlr->num_io_queues = howmany(mp_ncpus, ctrlr->num_cpus_per_ioq);
1180 	num_vectors_requested = ctrlr->num_io_queues + 1;
1181 	num_vectors_allocated = num_vectors_requested;
1182 
1183 	/*
1184 	 * Now just allocate the number of vectors we need.  This should
1185 	 *  succeed, since we previously called pci_alloc_msix()
1186 	 *  successfully returning at least this many vectors, but just to
1187 	 *  be safe, if something goes wrong just revert to INTx.
1188 	 */
1189 	if (pci_alloc_msix(dev, &num_vectors_allocated) != 0) {
1190 		nvme_ctrlr_configure_intx(ctrlr);
1191 		return;
1192 	}
1193 
1194 	if (num_vectors_allocated < num_vectors_requested) {
1195 		pci_release_msi(dev);
1196 		nvme_ctrlr_configure_intx(ctrlr);
1197 		return;
1198 	}
1199 
1200 	ctrlr->msix_enabled = 1;
1201 }
1202 
1203 int
1204 nvme_ctrlr_construct(struct nvme_controller *ctrlr, device_t dev)
1205 {
1206 	uint32_t	cap_lo;
1207 	uint32_t	cap_hi;
1208 	uint8_t		to;
1209 	uint8_t		dstrd;
1210 	uint8_t		mpsmin;
1211 	int		status, timeout_period;
1212 
1213 	ctrlr->dev = dev;
1214 
1215 	mtx_init(&ctrlr->lock, "nvme ctrlr lock", NULL, MTX_DEF);
1216 
1217 	status = nvme_ctrlr_allocate_bar(ctrlr);
1218 
1219 	if (status != 0)
1220 		return (status);
1221 
1222 	/*
1223 	 * Software emulators may set the doorbell stride to something
1224 	 *  other than zero, but this driver is not set up to handle that.
1225 	 */
1226 	cap_hi = nvme_mmio_read_4(ctrlr, cap_hi);
1227 	dstrd = (cap_hi >> NVME_CAP_HI_REG_DSTRD_SHIFT) & NVME_CAP_HI_REG_DSTRD_MASK;
1228 	if (dstrd != 0)
1229 		return (ENXIO);
1230 
1231 	mpsmin = (cap_hi >> NVME_CAP_HI_REG_MPSMIN_SHIFT) & NVME_CAP_HI_REG_MPSMIN_MASK;
1232 	ctrlr->min_page_size = 1 << (12 + mpsmin);
1233 
1234 	/* Get ready timeout value from controller, in units of 500ms. */
1235 	cap_lo = nvme_mmio_read_4(ctrlr, cap_lo);
1236 	to = (cap_lo >> NVME_CAP_LO_REG_TO_SHIFT) & NVME_CAP_LO_REG_TO_MASK;
1237 	ctrlr->ready_timeout_in_ms = to * 500;
1238 
1239 	timeout_period = NVME_DEFAULT_TIMEOUT_PERIOD;
1240 	TUNABLE_INT_FETCH("hw.nvme.timeout_period", &timeout_period);
1241 	timeout_period = min(timeout_period, NVME_MAX_TIMEOUT_PERIOD);
1242 	timeout_period = max(timeout_period, NVME_MIN_TIMEOUT_PERIOD);
1243 	ctrlr->timeout_period = timeout_period;
1244 
1245 	nvme_retry_count = NVME_DEFAULT_RETRY_COUNT;
1246 	TUNABLE_INT_FETCH("hw.nvme.retry_count", &nvme_retry_count);
1247 
1248 	ctrlr->enable_aborts = 0;
1249 	TUNABLE_INT_FETCH("hw.nvme.enable_aborts", &ctrlr->enable_aborts);
1250 
1251 	nvme_ctrlr_setup_interrupts(ctrlr);
1252 
1253 	ctrlr->max_xfer_size = NVME_MAX_XFER_SIZE;
1254 	if (nvme_ctrlr_construct_admin_qpair(ctrlr) != 0)
1255 		return (ENXIO);
1256 
1257 	ctrlr->cdev = make_dev(&nvme_ctrlr_cdevsw, device_get_unit(dev),
1258 	    UID_ROOT, GID_WHEEL, 0600, "nvme%d", device_get_unit(dev));
1259 
1260 	if (ctrlr->cdev == NULL)
1261 		return (ENXIO);
1262 
1263 	ctrlr->cdev->si_drv1 = (void *)ctrlr;
1264 
1265 	ctrlr->taskqueue = taskqueue_create("nvme_taskq", M_WAITOK,
1266 	    taskqueue_thread_enqueue, &ctrlr->taskqueue);
1267 	taskqueue_start_threads(&ctrlr->taskqueue, 1, PI_DISK, "nvme taskq");
1268 
1269 	ctrlr->is_resetting = 0;
1270 	ctrlr->is_initialized = 0;
1271 	ctrlr->notification_sent = 0;
1272 	TASK_INIT(&ctrlr->reset_task, 0, nvme_ctrlr_reset_task, ctrlr);
1273 
1274 	TASK_INIT(&ctrlr->fail_req_task, 0, nvme_ctrlr_fail_req_task, ctrlr);
1275 	STAILQ_INIT(&ctrlr->fail_req);
1276 	ctrlr->is_failed = FALSE;
1277 
1278 	return (0);
1279 }
1280 
1281 void
1282 nvme_ctrlr_destruct(struct nvme_controller *ctrlr, device_t dev)
1283 {
1284 	int				i;
1285 
1286 	/*
1287 	 *  Notify the controller of a shutdown, even though this is due to
1288 	 *   a driver unload, not a system shutdown (this path is not invoked
1289 	 *   during shutdown).  This ensures the controller receives a
1290 	 *   shutdown notification in case the system is shutdown before
1291 	 *   reloading the driver.
1292 	 */
1293 	nvme_ctrlr_shutdown(ctrlr);
1294 
1295 	nvme_ctrlr_disable(ctrlr);
1296 	taskqueue_free(ctrlr->taskqueue);
1297 
1298 	for (i = 0; i < NVME_MAX_NAMESPACES; i++)
1299 		nvme_ns_destruct(&ctrlr->ns[i]);
1300 
1301 	if (ctrlr->cdev)
1302 		destroy_dev(ctrlr->cdev);
1303 
1304 	for (i = 0; i < ctrlr->num_io_queues; i++) {
1305 		nvme_ctrlr_destroy_qpair(ctrlr, &ctrlr->ioq[i]);
1306 		nvme_io_qpair_destroy(&ctrlr->ioq[i]);
1307 	}
1308 
1309 	free(ctrlr->ioq, M_NVME);
1310 
1311 	nvme_admin_qpair_destroy(&ctrlr->adminq);
1312 
1313 	if (ctrlr->resource != NULL) {
1314 		bus_release_resource(dev, SYS_RES_MEMORY,
1315 		    ctrlr->resource_id, ctrlr->resource);
1316 	}
1317 
1318 	if (ctrlr->bar4_resource != NULL) {
1319 		bus_release_resource(dev, SYS_RES_MEMORY,
1320 		    ctrlr->bar4_resource_id, ctrlr->bar4_resource);
1321 	}
1322 
1323 	if (ctrlr->tag)
1324 		bus_teardown_intr(ctrlr->dev, ctrlr->res, ctrlr->tag);
1325 
1326 	if (ctrlr->res)
1327 		bus_release_resource(ctrlr->dev, SYS_RES_IRQ,
1328 		    rman_get_rid(ctrlr->res), ctrlr->res);
1329 
1330 	if (ctrlr->msix_enabled)
1331 		pci_release_msi(dev);
1332 }
1333 
1334 void
1335 nvme_ctrlr_shutdown(struct nvme_controller *ctrlr)
1336 {
1337 	uint32_t	cc;
1338 	uint32_t	csts;
1339 	int		ticks = 0;
1340 
1341 	cc = nvme_mmio_read_4(ctrlr, cc);
1342 	cc &= ~(NVME_CC_REG_SHN_MASK << NVME_CC_REG_SHN_SHIFT);
1343 	cc |= NVME_SHN_NORMAL << NVME_CC_REG_SHN_SHIFT;
1344 	nvme_mmio_write_4(ctrlr, cc, cc);
1345 
1346 	csts = nvme_mmio_read_4(ctrlr, csts);
1347 	while ((NVME_CSTS_GET_SHST(csts) != NVME_SHST_COMPLETE) && (ticks++ < 5*hz)) {
1348 		pause("nvme shn", 1);
1349 		csts = nvme_mmio_read_4(ctrlr, csts);
1350 	}
1351 	if (NVME_CSTS_GET_SHST(csts) != NVME_SHST_COMPLETE)
1352 		nvme_printf(ctrlr, "did not complete shutdown within 5 seconds "
1353 		    "of notification\n");
1354 }
1355 
1356 void
1357 nvme_ctrlr_submit_admin_request(struct nvme_controller *ctrlr,
1358     struct nvme_request *req)
1359 {
1360 
1361 	nvme_qpair_submit_request(&ctrlr->adminq, req);
1362 }
1363 
1364 void
1365 nvme_ctrlr_submit_io_request(struct nvme_controller *ctrlr,
1366     struct nvme_request *req)
1367 {
1368 	struct nvme_qpair       *qpair;
1369 
1370 	qpair = &ctrlr->ioq[curcpu / ctrlr->num_cpus_per_ioq];
1371 	nvme_qpair_submit_request(qpair, req);
1372 }
1373 
1374 device_t
1375 nvme_ctrlr_get_device(struct nvme_controller *ctrlr)
1376 {
1377 
1378 	return (ctrlr->dev);
1379 }
1380 
1381 const struct nvme_controller_data *
1382 nvme_ctrlr_get_data(struct nvme_controller *ctrlr)
1383 {
1384 
1385 	return (&ctrlr->cdata);
1386 }
1387