xref: /freebsd/sys/dev/nvme/nvme_ctrlr.c (revision 0bf48626aaa33768078f5872b922b1487b3a9296)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (C) 2012-2016 Intel Corporation
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31 
32 #include "opt_cam.h"
33 
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/buf.h>
37 #include <sys/bus.h>
38 #include <sys/conf.h>
39 #include <sys/ioccom.h>
40 #include <sys/proc.h>
41 #include <sys/smp.h>
42 #include <sys/uio.h>
43 #include <sys/endian.h>
44 
45 #include <dev/pci/pcireg.h>
46 #include <dev/pci/pcivar.h>
47 
48 #include "nvme_private.h"
49 
50 #define B4_CHK_RDY_DELAY_MS	2300		/* work around controller bug */
51 
52 static void nvme_ctrlr_construct_and_submit_aer(struct nvme_controller *ctrlr,
53 						struct nvme_async_event_request *aer);
54 static void nvme_ctrlr_setup_interrupts(struct nvme_controller *ctrlr);
55 
56 static int
57 nvme_ctrlr_allocate_bar(struct nvme_controller *ctrlr)
58 {
59 
60 	ctrlr->resource_id = PCIR_BAR(0);
61 
62 	ctrlr->resource = bus_alloc_resource_any(ctrlr->dev, SYS_RES_MEMORY,
63 	    &ctrlr->resource_id, RF_ACTIVE);
64 
65 	if(ctrlr->resource == NULL) {
66 		nvme_printf(ctrlr, "unable to allocate pci resource\n");
67 		return (ENOMEM);
68 	}
69 
70 	ctrlr->bus_tag = rman_get_bustag(ctrlr->resource);
71 	ctrlr->bus_handle = rman_get_bushandle(ctrlr->resource);
72 	ctrlr->regs = (struct nvme_registers *)ctrlr->bus_handle;
73 
74 	/*
75 	 * The NVMe spec allows for the MSI-X table to be placed behind
76 	 *  BAR 4/5, separate from the control/doorbell registers.  Always
77 	 *  try to map this bar, because it must be mapped prior to calling
78 	 *  pci_alloc_msix().  If the table isn't behind BAR 4/5,
79 	 *  bus_alloc_resource() will just return NULL which is OK.
80 	 */
81 	ctrlr->bar4_resource_id = PCIR_BAR(4);
82 	ctrlr->bar4_resource = bus_alloc_resource_any(ctrlr->dev, SYS_RES_MEMORY,
83 	    &ctrlr->bar4_resource_id, RF_ACTIVE);
84 
85 	return (0);
86 }
87 
88 static int
89 nvme_ctrlr_construct_admin_qpair(struct nvme_controller *ctrlr)
90 {
91 	struct nvme_qpair	*qpair;
92 	uint32_t		num_entries;
93 	int			error;
94 
95 	qpair = &ctrlr->adminq;
96 
97 	num_entries = NVME_ADMIN_ENTRIES;
98 	TUNABLE_INT_FETCH("hw.nvme.admin_entries", &num_entries);
99 	/*
100 	 * If admin_entries was overridden to an invalid value, revert it
101 	 *  back to our default value.
102 	 */
103 	if (num_entries < NVME_MIN_ADMIN_ENTRIES ||
104 	    num_entries > NVME_MAX_ADMIN_ENTRIES) {
105 		nvme_printf(ctrlr, "invalid hw.nvme.admin_entries=%d "
106 		    "specified\n", num_entries);
107 		num_entries = NVME_ADMIN_ENTRIES;
108 	}
109 
110 	/*
111 	 * The admin queue's max xfer size is treated differently than the
112 	 *  max I/O xfer size.  16KB is sufficient here - maybe even less?
113 	 */
114 	error = nvme_qpair_construct(qpair,
115 				     0, /* qpair ID */
116 				     0, /* vector */
117 				     num_entries,
118 				     NVME_ADMIN_TRACKERS,
119 				     ctrlr);
120 	return (error);
121 }
122 
123 static int
124 nvme_ctrlr_construct_io_qpairs(struct nvme_controller *ctrlr)
125 {
126 	struct nvme_qpair	*qpair;
127 	uint32_t		cap_lo;
128 	uint16_t		mqes;
129 	int			i, error, num_entries, num_trackers;
130 
131 	num_entries = NVME_IO_ENTRIES;
132 	TUNABLE_INT_FETCH("hw.nvme.io_entries", &num_entries);
133 
134 	/*
135 	 * NVMe spec sets a hard limit of 64K max entries, but
136 	 *  devices may specify a smaller limit, so we need to check
137 	 *  the MQES field in the capabilities register.
138 	 */
139 	cap_lo = nvme_mmio_read_4(ctrlr, cap_lo);
140 	mqes = NVME_CAP_LO_MQES(cap_lo);
141 	num_entries = min(num_entries, mqes + 1);
142 
143 	num_trackers = NVME_IO_TRACKERS;
144 	TUNABLE_INT_FETCH("hw.nvme.io_trackers", &num_trackers);
145 
146 	num_trackers = max(num_trackers, NVME_MIN_IO_TRACKERS);
147 	num_trackers = min(num_trackers, NVME_MAX_IO_TRACKERS);
148 	/*
149 	 * No need to have more trackers than entries in the submit queue.
150 	 *  Note also that for a queue size of N, we can only have (N-1)
151 	 *  commands outstanding, hence the "-1" here.
152 	 */
153 	num_trackers = min(num_trackers, (num_entries-1));
154 
155 	/*
156 	 * Our best estimate for the maximum number of I/Os that we should
157 	 * noramlly have in flight at one time. This should be viewed as a hint,
158 	 * not a hard limit and will need to be revisitted when the upper layers
159 	 * of the storage system grows multi-queue support.
160 	 */
161 	ctrlr->max_hw_pend_io = num_trackers * ctrlr->num_io_queues * 3 / 4;
162 
163 	/*
164 	 * This was calculated previously when setting up interrupts, but
165 	 *  a controller could theoretically support fewer I/O queues than
166 	 *  MSI-X vectors.  So calculate again here just to be safe.
167 	 */
168 	ctrlr->num_cpus_per_ioq = howmany(mp_ncpus, ctrlr->num_io_queues);
169 
170 	ctrlr->ioq = malloc(ctrlr->num_io_queues * sizeof(struct nvme_qpair),
171 	    M_NVME, M_ZERO | M_WAITOK);
172 
173 	for (i = 0; i < ctrlr->num_io_queues; i++) {
174 		qpair = &ctrlr->ioq[i];
175 
176 		/*
177 		 * Admin queue has ID=0. IO queues start at ID=1 -
178 		 *  hence the 'i+1' here.
179 		 *
180 		 * For I/O queues, use the controller-wide max_xfer_size
181 		 *  calculated in nvme_attach().
182 		 */
183 		error = nvme_qpair_construct(qpair,
184 				     i+1, /* qpair ID */
185 				     ctrlr->msix_enabled ? i+1 : 0, /* vector */
186 				     num_entries,
187 				     num_trackers,
188 				     ctrlr);
189 		if (error)
190 			return (error);
191 
192 		/*
193 		 * Do not bother binding interrupts if we only have one I/O
194 		 *  interrupt thread for this controller.
195 		 */
196 		if (ctrlr->num_io_queues > 1)
197 			bus_bind_intr(ctrlr->dev, qpair->res,
198 			    i * ctrlr->num_cpus_per_ioq);
199 	}
200 
201 	return (0);
202 }
203 
204 static void
205 nvme_ctrlr_fail(struct nvme_controller *ctrlr)
206 {
207 	int i;
208 
209 	ctrlr->is_failed = TRUE;
210 	nvme_qpair_fail(&ctrlr->adminq);
211 	if (ctrlr->ioq != NULL) {
212 		for (i = 0; i < ctrlr->num_io_queues; i++)
213 			nvme_qpair_fail(&ctrlr->ioq[i]);
214 	}
215 	nvme_notify_fail_consumers(ctrlr);
216 }
217 
218 void
219 nvme_ctrlr_post_failed_request(struct nvme_controller *ctrlr,
220     struct nvme_request *req)
221 {
222 
223 	mtx_lock(&ctrlr->lock);
224 	STAILQ_INSERT_TAIL(&ctrlr->fail_req, req, stailq);
225 	mtx_unlock(&ctrlr->lock);
226 	taskqueue_enqueue(ctrlr->taskqueue, &ctrlr->fail_req_task);
227 }
228 
229 static void
230 nvme_ctrlr_fail_req_task(void *arg, int pending)
231 {
232 	struct nvme_controller	*ctrlr = arg;
233 	struct nvme_request	*req;
234 
235 	mtx_lock(&ctrlr->lock);
236 	while ((req = STAILQ_FIRST(&ctrlr->fail_req)) != NULL) {
237 		STAILQ_REMOVE_HEAD(&ctrlr->fail_req, stailq);
238 		mtx_unlock(&ctrlr->lock);
239 		nvme_qpair_manual_complete_request(req->qpair, req,
240 		    NVME_SCT_GENERIC, NVME_SC_ABORTED_BY_REQUEST);
241 		mtx_lock(&ctrlr->lock);
242 	}
243 	mtx_unlock(&ctrlr->lock);
244 }
245 
246 static int
247 nvme_ctrlr_wait_for_ready(struct nvme_controller *ctrlr, int desired_val)
248 {
249 	int ms_waited;
250 	uint32_t csts;
251 
252 	csts = nvme_mmio_read_4(ctrlr, csts);
253 
254 	ms_waited = 0;
255 	while (((csts >> NVME_CSTS_REG_RDY_SHIFT) & NVME_CSTS_REG_RDY_MASK) != desired_val) {
256 		if (ms_waited++ > ctrlr->ready_timeout_in_ms) {
257 			nvme_printf(ctrlr, "controller ready did not become %d "
258 			    "within %d ms\n", desired_val, ctrlr->ready_timeout_in_ms);
259 			return (ENXIO);
260 		}
261 		DELAY(1000);
262 		csts = nvme_mmio_read_4(ctrlr, csts);
263 	}
264 
265 	return (0);
266 }
267 
268 static int
269 nvme_ctrlr_disable(struct nvme_controller *ctrlr)
270 {
271 	uint32_t cc;
272 	uint32_t csts;
273 	uint8_t  en, rdy;
274 	int err;
275 
276 	cc = nvme_mmio_read_4(ctrlr, cc);
277 	csts = nvme_mmio_read_4(ctrlr, csts);
278 
279 	en = (cc >> NVME_CC_REG_EN_SHIFT) & NVME_CC_REG_EN_MASK;
280 	rdy = (csts >> NVME_CSTS_REG_RDY_SHIFT) & NVME_CSTS_REG_RDY_MASK;
281 
282 	/*
283 	 * Per 3.1.5 in NVME 1.3 spec, transitioning CC.EN from 0 to 1
284 	 * when CSTS.RDY is 1 or transitioning CC.EN from 1 to 0 when
285 	 * CSTS.RDY is 0 "has undefined results" So make sure that CSTS.RDY
286 	 * isn't the desired value. Short circuit if we're already disabled.
287 	 */
288 	if (en == 1) {
289 		if (rdy == 0) {
290 			/* EN == 1, wait for  RDY == 1 or fail */
291 			err = nvme_ctrlr_wait_for_ready(ctrlr, 1);
292 			if (err != 0)
293 				return (err);
294 		}
295 	} else {
296 		/* EN == 0 already wait for RDY == 0 */
297 		if (rdy == 0)
298 			return (0);
299 		else
300 			return (nvme_ctrlr_wait_for_ready(ctrlr, 0));
301 	}
302 
303 	cc &= ~NVME_CC_REG_EN_MASK;
304 	nvme_mmio_write_4(ctrlr, cc, cc);
305 	/*
306 	 * Some drives have issues with accessing the mmio after we
307 	 * disable, so delay for a bit after we write the bit to
308 	 * cope with these issues.
309 	 */
310 	if (ctrlr->quirks & QUIRK_DELAY_B4_CHK_RDY)
311 		pause("nvmeR", B4_CHK_RDY_DELAY_MS * hz / 1000);
312 	return (nvme_ctrlr_wait_for_ready(ctrlr, 0));
313 }
314 
315 static int
316 nvme_ctrlr_enable(struct nvme_controller *ctrlr)
317 {
318 	uint32_t	cc;
319 	uint32_t	csts;
320 	uint32_t	aqa;
321 	uint32_t	qsize;
322 	uint8_t		en, rdy;
323 	int		err;
324 
325 	cc = nvme_mmio_read_4(ctrlr, cc);
326 	csts = nvme_mmio_read_4(ctrlr, csts);
327 
328 	en = (cc >> NVME_CC_REG_EN_SHIFT) & NVME_CC_REG_EN_MASK;
329 	rdy = (csts >> NVME_CSTS_REG_RDY_SHIFT) & NVME_CSTS_REG_RDY_MASK;
330 
331 	/*
332 	 * See note in nvme_ctrlr_disable. Short circuit if we're already enabled.
333 	 */
334 	if (en == 1) {
335 		if (rdy == 1)
336 			return (0);
337 		else
338 			return (nvme_ctrlr_wait_for_ready(ctrlr, 1));
339 	} else {
340 		/* EN == 0 already wait for RDY == 0 or fail */
341 		err = nvme_ctrlr_wait_for_ready(ctrlr, 0);
342 		if (err != 0)
343 			return (err);
344 	}
345 
346 	nvme_mmio_write_8(ctrlr, asq, ctrlr->adminq.cmd_bus_addr);
347 	DELAY(5000);
348 	nvme_mmio_write_8(ctrlr, acq, ctrlr->adminq.cpl_bus_addr);
349 	DELAY(5000);
350 
351 	/* acqs and asqs are 0-based. */
352 	qsize = ctrlr->adminq.num_entries - 1;
353 
354 	aqa = 0;
355 	aqa = (qsize & NVME_AQA_REG_ACQS_MASK) << NVME_AQA_REG_ACQS_SHIFT;
356 	aqa |= (qsize & NVME_AQA_REG_ASQS_MASK) << NVME_AQA_REG_ASQS_SHIFT;
357 	nvme_mmio_write_4(ctrlr, aqa, aqa);
358 	DELAY(5000);
359 
360 	/* Initialization values for CC */
361 	cc = 0;
362 	cc |= 1 << NVME_CC_REG_EN_SHIFT;
363 	cc |= 0 << NVME_CC_REG_CSS_SHIFT;
364 	cc |= 0 << NVME_CC_REG_AMS_SHIFT;
365 	cc |= 0 << NVME_CC_REG_SHN_SHIFT;
366 	cc |= 6 << NVME_CC_REG_IOSQES_SHIFT; /* SQ entry size == 64 == 2^6 */
367 	cc |= 4 << NVME_CC_REG_IOCQES_SHIFT; /* CQ entry size == 16 == 2^4 */
368 
369 	/* This evaluates to 0, which is according to spec. */
370 	cc |= (PAGE_SIZE >> 13) << NVME_CC_REG_MPS_SHIFT;
371 
372 	nvme_mmio_write_4(ctrlr, cc, cc);
373 
374 	return (nvme_ctrlr_wait_for_ready(ctrlr, 1));
375 }
376 
377 int
378 nvme_ctrlr_hw_reset(struct nvme_controller *ctrlr)
379 {
380 	int i, err;
381 
382 	nvme_admin_qpair_disable(&ctrlr->adminq);
383 	/*
384 	 * I/O queues are not allocated before the initial HW
385 	 *  reset, so do not try to disable them.  Use is_initialized
386 	 *  to determine if this is the initial HW reset.
387 	 */
388 	if (ctrlr->is_initialized) {
389 		for (i = 0; i < ctrlr->num_io_queues; i++)
390 			nvme_io_qpair_disable(&ctrlr->ioq[i]);
391 	}
392 
393 	DELAY(100*1000);
394 
395 	err = nvme_ctrlr_disable(ctrlr);
396 	if (err != 0)
397 		return err;
398 	return (nvme_ctrlr_enable(ctrlr));
399 }
400 
401 void
402 nvme_ctrlr_reset(struct nvme_controller *ctrlr)
403 {
404 	int cmpset;
405 
406 	cmpset = atomic_cmpset_32(&ctrlr->is_resetting, 0, 1);
407 
408 	if (cmpset == 0 || ctrlr->is_failed)
409 		/*
410 		 * Controller is already resetting or has failed.  Return
411 		 *  immediately since there is no need to kick off another
412 		 *  reset in these cases.
413 		 */
414 		return;
415 
416 	taskqueue_enqueue(ctrlr->taskqueue, &ctrlr->reset_task);
417 }
418 
419 static int
420 nvme_ctrlr_identify(struct nvme_controller *ctrlr)
421 {
422 	struct nvme_completion_poll_status	status;
423 
424 	status.done = 0;
425 	nvme_ctrlr_cmd_identify_controller(ctrlr, &ctrlr->cdata,
426 	    nvme_completion_poll_cb, &status);
427 	while (!atomic_load_acq_int(&status.done))
428 		pause("nvme", 1);
429 	if (nvme_completion_is_error(&status.cpl)) {
430 		nvme_printf(ctrlr, "nvme_identify_controller failed!\n");
431 		return (ENXIO);
432 	}
433 
434 	/* Convert data to host endian */
435 	nvme_controller_data_swapbytes(&ctrlr->cdata);
436 
437 	/*
438 	 * Use MDTS to ensure our default max_xfer_size doesn't exceed what the
439 	 *  controller supports.
440 	 */
441 	if (ctrlr->cdata.mdts > 0)
442 		ctrlr->max_xfer_size = min(ctrlr->max_xfer_size,
443 		    ctrlr->min_page_size * (1 << (ctrlr->cdata.mdts)));
444 
445 	return (0);
446 }
447 
448 static int
449 nvme_ctrlr_set_num_qpairs(struct nvme_controller *ctrlr)
450 {
451 	struct nvme_completion_poll_status	status;
452 	int					cq_allocated, sq_allocated;
453 
454 	status.done = 0;
455 	nvme_ctrlr_cmd_set_num_queues(ctrlr, ctrlr->num_io_queues,
456 	    nvme_completion_poll_cb, &status);
457 	while (!atomic_load_acq_int(&status.done))
458 		pause("nvme", 1);
459 	if (nvme_completion_is_error(&status.cpl)) {
460 		nvme_printf(ctrlr, "nvme_ctrlr_set_num_qpairs failed!\n");
461 		return (ENXIO);
462 	}
463 
464 	/*
465 	 * Data in cdw0 is 0-based.
466 	 * Lower 16-bits indicate number of submission queues allocated.
467 	 * Upper 16-bits indicate number of completion queues allocated.
468 	 */
469 	sq_allocated = (status.cpl.cdw0 & 0xFFFF) + 1;
470 	cq_allocated = (status.cpl.cdw0 >> 16) + 1;
471 
472 	/*
473 	 * Controller may allocate more queues than we requested,
474 	 *  so use the minimum of the number requested and what was
475 	 *  actually allocated.
476 	 */
477 	ctrlr->num_io_queues = min(ctrlr->num_io_queues, sq_allocated);
478 	ctrlr->num_io_queues = min(ctrlr->num_io_queues, cq_allocated);
479 
480 	return (0);
481 }
482 
483 static int
484 nvme_ctrlr_create_qpairs(struct nvme_controller *ctrlr)
485 {
486 	struct nvme_completion_poll_status	status;
487 	struct nvme_qpair			*qpair;
488 	int					i;
489 
490 	for (i = 0; i < ctrlr->num_io_queues; i++) {
491 		qpair = &ctrlr->ioq[i];
492 
493 		status.done = 0;
494 		nvme_ctrlr_cmd_create_io_cq(ctrlr, qpair, qpair->vector,
495 		    nvme_completion_poll_cb, &status);
496 		while (!atomic_load_acq_int(&status.done))
497 			pause("nvme", 1);
498 		if (nvme_completion_is_error(&status.cpl)) {
499 			nvme_printf(ctrlr, "nvme_create_io_cq failed!\n");
500 			return (ENXIO);
501 		}
502 
503 		status.done = 0;
504 		nvme_ctrlr_cmd_create_io_sq(qpair->ctrlr, qpair,
505 		    nvme_completion_poll_cb, &status);
506 		while (!atomic_load_acq_int(&status.done))
507 			pause("nvme", 1);
508 		if (nvme_completion_is_error(&status.cpl)) {
509 			nvme_printf(ctrlr, "nvme_create_io_sq failed!\n");
510 			return (ENXIO);
511 		}
512 	}
513 
514 	return (0);
515 }
516 
517 static int
518 nvme_ctrlr_destroy_qpairs(struct nvme_controller *ctrlr)
519 {
520 	struct nvme_completion_poll_status	status;
521 	struct nvme_qpair			*qpair;
522 
523 	for (int i = 0; i < ctrlr->num_io_queues; i++) {
524 		qpair = &ctrlr->ioq[i];
525 
526 		status.done = 0;
527 		nvme_ctrlr_cmd_delete_io_sq(ctrlr, qpair,
528 		    nvme_completion_poll_cb, &status);
529 		while (!atomic_load_acq_int(&status.done))
530 			pause("nvme", 1);
531 		if (nvme_completion_is_error(&status.cpl)) {
532 			nvme_printf(ctrlr, "nvme_destroy_io_sq failed!\n");
533 			return (ENXIO);
534 		}
535 
536 		status.done = 0;
537 		nvme_ctrlr_cmd_delete_io_cq(ctrlr, qpair,
538 		    nvme_completion_poll_cb, &status);
539 		while (!atomic_load_acq_int(&status.done))
540 			pause("nvme", 1);
541 		if (nvme_completion_is_error(&status.cpl)) {
542 			nvme_printf(ctrlr, "nvme_destroy_io_cq failed!\n");
543 			return (ENXIO);
544 		}
545 	}
546 
547 	return (0);
548 }
549 
550 static int
551 nvme_ctrlr_construct_namespaces(struct nvme_controller *ctrlr)
552 {
553 	struct nvme_namespace	*ns;
554 	uint32_t 		i;
555 
556 	for (i = 0; i < min(ctrlr->cdata.nn, NVME_MAX_NAMESPACES); i++) {
557 		ns = &ctrlr->ns[i];
558 		nvme_ns_construct(ns, i+1, ctrlr);
559 	}
560 
561 	return (0);
562 }
563 
564 static boolean_t
565 is_log_page_id_valid(uint8_t page_id)
566 {
567 
568 	switch (page_id) {
569 	case NVME_LOG_ERROR:
570 	case NVME_LOG_HEALTH_INFORMATION:
571 	case NVME_LOG_FIRMWARE_SLOT:
572 	case NVME_LOG_CHANGED_NAMESPACE:
573 	case NVME_LOG_COMMAND_EFFECT:
574 	case NVME_LOG_RES_NOTIFICATION:
575 	case NVME_LOG_SANITIZE_STATUS:
576 		return (TRUE);
577 	}
578 
579 	return (FALSE);
580 }
581 
582 static uint32_t
583 nvme_ctrlr_get_log_page_size(struct nvme_controller *ctrlr, uint8_t page_id)
584 {
585 	uint32_t	log_page_size;
586 
587 	switch (page_id) {
588 	case NVME_LOG_ERROR:
589 		log_page_size = min(
590 		    sizeof(struct nvme_error_information_entry) *
591 		    (ctrlr->cdata.elpe + 1), NVME_MAX_AER_LOG_SIZE);
592 		break;
593 	case NVME_LOG_HEALTH_INFORMATION:
594 		log_page_size = sizeof(struct nvme_health_information_page);
595 		break;
596 	case NVME_LOG_FIRMWARE_SLOT:
597 		log_page_size = sizeof(struct nvme_firmware_page);
598 		break;
599 	case NVME_LOG_CHANGED_NAMESPACE:
600 		log_page_size = sizeof(struct nvme_ns_list);
601 		break;
602 	case NVME_LOG_COMMAND_EFFECT:
603 		log_page_size = sizeof(struct nvme_command_effects_page);
604 		break;
605 	case NVME_LOG_RES_NOTIFICATION:
606 		log_page_size = sizeof(struct nvme_res_notification_page);
607 		break;
608 	case NVME_LOG_SANITIZE_STATUS:
609 		log_page_size = sizeof(struct nvme_sanitize_status_page);
610 		break;
611 	default:
612 		log_page_size = 0;
613 		break;
614 	}
615 
616 	return (log_page_size);
617 }
618 
619 static void
620 nvme_ctrlr_log_critical_warnings(struct nvme_controller *ctrlr,
621     uint8_t state)
622 {
623 
624 	if (state & NVME_CRIT_WARN_ST_AVAILABLE_SPARE)
625 		nvme_printf(ctrlr, "available spare space below threshold\n");
626 
627 	if (state & NVME_CRIT_WARN_ST_TEMPERATURE)
628 		nvme_printf(ctrlr, "temperature above threshold\n");
629 
630 	if (state & NVME_CRIT_WARN_ST_DEVICE_RELIABILITY)
631 		nvme_printf(ctrlr, "device reliability degraded\n");
632 
633 	if (state & NVME_CRIT_WARN_ST_READ_ONLY)
634 		nvme_printf(ctrlr, "media placed in read only mode\n");
635 
636 	if (state & NVME_CRIT_WARN_ST_VOLATILE_MEMORY_BACKUP)
637 		nvme_printf(ctrlr, "volatile memory backup device failed\n");
638 
639 	if (state & NVME_CRIT_WARN_ST_RESERVED_MASK)
640 		nvme_printf(ctrlr,
641 		    "unknown critical warning(s): state = 0x%02x\n", state);
642 }
643 
644 static void
645 nvme_ctrlr_async_event_log_page_cb(void *arg, const struct nvme_completion *cpl)
646 {
647 	struct nvme_async_event_request		*aer = arg;
648 	struct nvme_health_information_page	*health_info;
649 	struct nvme_ns_list			*nsl;
650 	struct nvme_error_information_entry	*err;
651 	int i;
652 
653 	/*
654 	 * If the log page fetch for some reason completed with an error,
655 	 *  don't pass log page data to the consumers.  In practice, this case
656 	 *  should never happen.
657 	 */
658 	if (nvme_completion_is_error(cpl))
659 		nvme_notify_async_consumers(aer->ctrlr, &aer->cpl,
660 		    aer->log_page_id, NULL, 0);
661 	else {
662 		/* Convert data to host endian */
663 		switch (aer->log_page_id) {
664 		case NVME_LOG_ERROR:
665 			err = (struct nvme_error_information_entry *)aer->log_page_buffer;
666 			for (i = 0; i < (aer->ctrlr->cdata.elpe + 1); i++)
667 				nvme_error_information_entry_swapbytes(err++);
668 			break;
669 		case NVME_LOG_HEALTH_INFORMATION:
670 			nvme_health_information_page_swapbytes(
671 			    (struct nvme_health_information_page *)aer->log_page_buffer);
672 			break;
673 		case NVME_LOG_FIRMWARE_SLOT:
674 			nvme_firmware_page_swapbytes(
675 			    (struct nvme_firmware_page *)aer->log_page_buffer);
676 			break;
677 		case NVME_LOG_CHANGED_NAMESPACE:
678 			nvme_ns_list_swapbytes(
679 			    (struct nvme_ns_list *)aer->log_page_buffer);
680 			break;
681 		case NVME_LOG_COMMAND_EFFECT:
682 			nvme_command_effects_page_swapbytes(
683 			    (struct nvme_command_effects_page *)aer->log_page_buffer);
684 			break;
685 		case NVME_LOG_RES_NOTIFICATION:
686 			nvme_res_notification_page_swapbytes(
687 			    (struct nvme_res_notification_page *)aer->log_page_buffer);
688 			break;
689 		case NVME_LOG_SANITIZE_STATUS:
690 			nvme_sanitize_status_page_swapbytes(
691 			    (struct nvme_sanitize_status_page *)aer->log_page_buffer);
692 			break;
693 		case INTEL_LOG_TEMP_STATS:
694 			intel_log_temp_stats_swapbytes(
695 			    (struct intel_log_temp_stats *)aer->log_page_buffer);
696 			break;
697 		default:
698 			break;
699 		}
700 
701 		if (aer->log_page_id == NVME_LOG_HEALTH_INFORMATION) {
702 			health_info = (struct nvme_health_information_page *)
703 			    aer->log_page_buffer;
704 			nvme_ctrlr_log_critical_warnings(aer->ctrlr,
705 			    health_info->critical_warning);
706 			/*
707 			 * Critical warnings reported through the
708 			 *  SMART/health log page are persistent, so
709 			 *  clear the associated bits in the async event
710 			 *  config so that we do not receive repeated
711 			 *  notifications for the same event.
712 			 */
713 			aer->ctrlr->async_event_config &=
714 			    ~health_info->critical_warning;
715 			nvme_ctrlr_cmd_set_async_event_config(aer->ctrlr,
716 			    aer->ctrlr->async_event_config, NULL, NULL);
717 		} else if (aer->log_page_id == NVME_LOG_CHANGED_NAMESPACE &&
718 		    !nvme_use_nvd) {
719 			nsl = (struct nvme_ns_list *)aer->log_page_buffer;
720 			for (i = 0; i < nitems(nsl->ns) && nsl->ns[i] != 0; i++) {
721 				if (nsl->ns[i] > NVME_MAX_NAMESPACES)
722 					break;
723 				nvme_notify_ns(aer->ctrlr, nsl->ns[i]);
724 			}
725 		}
726 
727 
728 		/*
729 		 * Pass the cpl data from the original async event completion,
730 		 *  not the log page fetch.
731 		 */
732 		nvme_notify_async_consumers(aer->ctrlr, &aer->cpl,
733 		    aer->log_page_id, aer->log_page_buffer, aer->log_page_size);
734 	}
735 
736 	/*
737 	 * Repost another asynchronous event request to replace the one
738 	 *  that just completed.
739 	 */
740 	nvme_ctrlr_construct_and_submit_aer(aer->ctrlr, aer);
741 }
742 
743 static void
744 nvme_ctrlr_async_event_cb(void *arg, const struct nvme_completion *cpl)
745 {
746 	struct nvme_async_event_request	*aer = arg;
747 
748 	if (nvme_completion_is_error(cpl)) {
749 		/*
750 		 *  Do not retry failed async event requests.  This avoids
751 		 *  infinite loops where a new async event request is submitted
752 		 *  to replace the one just failed, only to fail again and
753 		 *  perpetuate the loop.
754 		 */
755 		return;
756 	}
757 
758 	/* Associated log page is in bits 23:16 of completion entry dw0. */
759 	aer->log_page_id = (cpl->cdw0 & 0xFF0000) >> 16;
760 
761 	nvme_printf(aer->ctrlr, "async event occurred (type 0x%x, info 0x%02x,"
762 	    " page 0x%02x)\n", (cpl->cdw0 & 0x07), (cpl->cdw0 & 0xFF00) >> 8,
763 	    aer->log_page_id);
764 
765 	if (is_log_page_id_valid(aer->log_page_id)) {
766 		aer->log_page_size = nvme_ctrlr_get_log_page_size(aer->ctrlr,
767 		    aer->log_page_id);
768 		memcpy(&aer->cpl, cpl, sizeof(*cpl));
769 		nvme_ctrlr_cmd_get_log_page(aer->ctrlr, aer->log_page_id,
770 		    NVME_GLOBAL_NAMESPACE_TAG, aer->log_page_buffer,
771 		    aer->log_page_size, nvme_ctrlr_async_event_log_page_cb,
772 		    aer);
773 		/* Wait to notify consumers until after log page is fetched. */
774 	} else {
775 		nvme_notify_async_consumers(aer->ctrlr, cpl, aer->log_page_id,
776 		    NULL, 0);
777 
778 		/*
779 		 * Repost another asynchronous event request to replace the one
780 		 *  that just completed.
781 		 */
782 		nvme_ctrlr_construct_and_submit_aer(aer->ctrlr, aer);
783 	}
784 }
785 
786 static void
787 nvme_ctrlr_construct_and_submit_aer(struct nvme_controller *ctrlr,
788     struct nvme_async_event_request *aer)
789 {
790 	struct nvme_request *req;
791 
792 	aer->ctrlr = ctrlr;
793 	req = nvme_allocate_request_null(nvme_ctrlr_async_event_cb, aer);
794 	aer->req = req;
795 
796 	/*
797 	 * Disable timeout here, since asynchronous event requests should by
798 	 *  nature never be timed out.
799 	 */
800 	req->timeout = FALSE;
801 	req->cmd.opc = NVME_OPC_ASYNC_EVENT_REQUEST;
802 	nvme_ctrlr_submit_admin_request(ctrlr, req);
803 }
804 
805 static void
806 nvme_ctrlr_configure_aer(struct nvme_controller *ctrlr)
807 {
808 	struct nvme_completion_poll_status	status;
809 	struct nvme_async_event_request		*aer;
810 	uint32_t				i;
811 
812 	ctrlr->async_event_config = NVME_CRIT_WARN_ST_AVAILABLE_SPARE |
813 	    NVME_CRIT_WARN_ST_DEVICE_RELIABILITY |
814 	    NVME_CRIT_WARN_ST_READ_ONLY |
815 	    NVME_CRIT_WARN_ST_VOLATILE_MEMORY_BACKUP;
816 	if (ctrlr->cdata.ver >= NVME_REV(1, 2))
817 		ctrlr->async_event_config |= 0x300;
818 
819 	status.done = 0;
820 	nvme_ctrlr_cmd_get_feature(ctrlr, NVME_FEAT_TEMPERATURE_THRESHOLD,
821 	    0, NULL, 0, nvme_completion_poll_cb, &status);
822 	while (!atomic_load_acq_int(&status.done))
823 		pause("nvme", 1);
824 	if (nvme_completion_is_error(&status.cpl) ||
825 	    (status.cpl.cdw0 & 0xFFFF) == 0xFFFF ||
826 	    (status.cpl.cdw0 & 0xFFFF) == 0x0000) {
827 		nvme_printf(ctrlr, "temperature threshold not supported\n");
828 	} else
829 		ctrlr->async_event_config |= NVME_CRIT_WARN_ST_TEMPERATURE;
830 
831 	nvme_ctrlr_cmd_set_async_event_config(ctrlr,
832 	    ctrlr->async_event_config, NULL, NULL);
833 
834 	/* aerl is a zero-based value, so we need to add 1 here. */
835 	ctrlr->num_aers = min(NVME_MAX_ASYNC_EVENTS, (ctrlr->cdata.aerl+1));
836 
837 	for (i = 0; i < ctrlr->num_aers; i++) {
838 		aer = &ctrlr->aer[i];
839 		nvme_ctrlr_construct_and_submit_aer(ctrlr, aer);
840 	}
841 }
842 
843 static void
844 nvme_ctrlr_configure_int_coalescing(struct nvme_controller *ctrlr)
845 {
846 
847 	ctrlr->int_coal_time = 0;
848 	TUNABLE_INT_FETCH("hw.nvme.int_coal_time",
849 	    &ctrlr->int_coal_time);
850 
851 	ctrlr->int_coal_threshold = 0;
852 	TUNABLE_INT_FETCH("hw.nvme.int_coal_threshold",
853 	    &ctrlr->int_coal_threshold);
854 
855 	nvme_ctrlr_cmd_set_interrupt_coalescing(ctrlr, ctrlr->int_coal_time,
856 	    ctrlr->int_coal_threshold, NULL, NULL);
857 }
858 
859 static void
860 nvme_ctrlr_start(void *ctrlr_arg)
861 {
862 	struct nvme_controller *ctrlr = ctrlr_arg;
863 	uint32_t old_num_io_queues;
864 	int i;
865 
866 	/*
867 	 * Only reset adminq here when we are restarting the
868 	 *  controller after a reset.  During initialization,
869 	 *  we have already submitted admin commands to get
870 	 *  the number of I/O queues supported, so cannot reset
871 	 *  the adminq again here.
872 	 */
873 	if (ctrlr->is_resetting) {
874 		nvme_qpair_reset(&ctrlr->adminq);
875 	}
876 
877 	for (i = 0; i < ctrlr->num_io_queues; i++)
878 		nvme_qpair_reset(&ctrlr->ioq[i]);
879 
880 	nvme_admin_qpair_enable(&ctrlr->adminq);
881 
882 	if (nvme_ctrlr_identify(ctrlr) != 0) {
883 		nvme_ctrlr_fail(ctrlr);
884 		return;
885 	}
886 
887 	/*
888 	 * The number of qpairs are determined during controller initialization,
889 	 *  including using NVMe SET_FEATURES/NUMBER_OF_QUEUES to determine the
890 	 *  HW limit.  We call SET_FEATURES again here so that it gets called
891 	 *  after any reset for controllers that depend on the driver to
892 	 *  explicit specify how many queues it will use.  This value should
893 	 *  never change between resets, so panic if somehow that does happen.
894 	 */
895 	if (ctrlr->is_resetting) {
896 		old_num_io_queues = ctrlr->num_io_queues;
897 		if (nvme_ctrlr_set_num_qpairs(ctrlr) != 0) {
898 			nvme_ctrlr_fail(ctrlr);
899 			return;
900 		}
901 
902 		if (old_num_io_queues != ctrlr->num_io_queues) {
903 			panic("num_io_queues changed from %u to %u",
904 			      old_num_io_queues, ctrlr->num_io_queues);
905 		}
906 	}
907 
908 	if (nvme_ctrlr_create_qpairs(ctrlr) != 0) {
909 		nvme_ctrlr_fail(ctrlr);
910 		return;
911 	}
912 
913 	if (nvme_ctrlr_construct_namespaces(ctrlr) != 0) {
914 		nvme_ctrlr_fail(ctrlr);
915 		return;
916 	}
917 
918 	nvme_ctrlr_configure_aer(ctrlr);
919 	nvme_ctrlr_configure_int_coalescing(ctrlr);
920 
921 	for (i = 0; i < ctrlr->num_io_queues; i++)
922 		nvme_io_qpair_enable(&ctrlr->ioq[i]);
923 }
924 
925 void
926 nvme_ctrlr_start_config_hook(void *arg)
927 {
928 	struct nvme_controller *ctrlr = arg;
929 
930 	nvme_qpair_reset(&ctrlr->adminq);
931 	nvme_admin_qpair_enable(&ctrlr->adminq);
932 
933 	if (nvme_ctrlr_set_num_qpairs(ctrlr) == 0 &&
934 	    nvme_ctrlr_construct_io_qpairs(ctrlr) == 0)
935 		nvme_ctrlr_start(ctrlr);
936 	else
937 		nvme_ctrlr_fail(ctrlr);
938 
939 	nvme_sysctl_initialize_ctrlr(ctrlr);
940 	config_intrhook_disestablish(&ctrlr->config_hook);
941 
942 	ctrlr->is_initialized = 1;
943 	nvme_notify_new_controller(ctrlr);
944 }
945 
946 static void
947 nvme_ctrlr_reset_task(void *arg, int pending)
948 {
949 	struct nvme_controller	*ctrlr = arg;
950 	int			status;
951 
952 	nvme_printf(ctrlr, "resetting controller\n");
953 	status = nvme_ctrlr_hw_reset(ctrlr);
954 	/*
955 	 * Use pause instead of DELAY, so that we yield to any nvme interrupt
956 	 *  handlers on this CPU that were blocked on a qpair lock. We want
957 	 *  all nvme interrupts completed before proceeding with restarting the
958 	 *  controller.
959 	 *
960 	 * XXX - any way to guarantee the interrupt handlers have quiesced?
961 	 */
962 	pause("nvmereset", hz / 10);
963 	if (status == 0)
964 		nvme_ctrlr_start(ctrlr);
965 	else
966 		nvme_ctrlr_fail(ctrlr);
967 
968 	atomic_cmpset_32(&ctrlr->is_resetting, 1, 0);
969 }
970 
971 /*
972  * Poll all the queues enabled on the device for completion.
973  */
974 void
975 nvme_ctrlr_poll(struct nvme_controller *ctrlr)
976 {
977 	int i;
978 
979 	nvme_qpair_process_completions(&ctrlr->adminq);
980 
981 	for (i = 0; i < ctrlr->num_io_queues; i++)
982 		if (ctrlr->ioq && ctrlr->ioq[i].cpl)
983 			nvme_qpair_process_completions(&ctrlr->ioq[i]);
984 }
985 
986 /*
987  * Poll the single-vector intertrupt case: num_io_queues will be 1 and
988  * there's only a single vector. While we're polling, we mask further
989  * interrupts in the controller.
990  */
991 void
992 nvme_ctrlr_intx_handler(void *arg)
993 {
994 	struct nvme_controller *ctrlr = arg;
995 
996 	nvme_mmio_write_4(ctrlr, intms, 1);
997 	nvme_ctrlr_poll(ctrlr);
998 	nvme_mmio_write_4(ctrlr, intmc, 1);
999 }
1000 
1001 static int
1002 nvme_ctrlr_configure_intx(struct nvme_controller *ctrlr)
1003 {
1004 
1005 	ctrlr->msix_enabled = 0;
1006 	ctrlr->num_io_queues = 1;
1007 	ctrlr->num_cpus_per_ioq = mp_ncpus;
1008 	ctrlr->rid = 0;
1009 	ctrlr->res = bus_alloc_resource_any(ctrlr->dev, SYS_RES_IRQ,
1010 	    &ctrlr->rid, RF_SHAREABLE | RF_ACTIVE);
1011 
1012 	if (ctrlr->res == NULL) {
1013 		nvme_printf(ctrlr, "unable to allocate shared IRQ\n");
1014 		return (ENOMEM);
1015 	}
1016 
1017 	bus_setup_intr(ctrlr->dev, ctrlr->res,
1018 	    INTR_TYPE_MISC | INTR_MPSAFE, NULL, nvme_ctrlr_intx_handler,
1019 	    ctrlr, &ctrlr->tag);
1020 
1021 	if (ctrlr->tag == NULL) {
1022 		nvme_printf(ctrlr, "unable to setup intx handler\n");
1023 		return (ENOMEM);
1024 	}
1025 
1026 	return (0);
1027 }
1028 
1029 static void
1030 nvme_pt_done(void *arg, const struct nvme_completion *cpl)
1031 {
1032 	struct nvme_pt_command *pt = arg;
1033 	struct mtx *mtx = pt->driver_lock;
1034 	uint16_t status;
1035 
1036 	bzero(&pt->cpl, sizeof(pt->cpl));
1037 	pt->cpl.cdw0 = cpl->cdw0;
1038 
1039 	status = cpl->status;
1040 	status &= ~NVME_STATUS_P_MASK;
1041 	pt->cpl.status = status;
1042 
1043 	mtx_lock(mtx);
1044 	pt->driver_lock = NULL;
1045 	wakeup(pt);
1046 	mtx_unlock(mtx);
1047 }
1048 
1049 int
1050 nvme_ctrlr_passthrough_cmd(struct nvme_controller *ctrlr,
1051     struct nvme_pt_command *pt, uint32_t nsid, int is_user_buffer,
1052     int is_admin_cmd)
1053 {
1054 	struct nvme_request	*req;
1055 	struct mtx		*mtx;
1056 	struct buf		*buf = NULL;
1057 	int			ret = 0;
1058 	vm_offset_t		addr, end;
1059 
1060 	if (pt->len > 0) {
1061 		/*
1062 		 * vmapbuf calls vm_fault_quick_hold_pages which only maps full
1063 		 * pages. Ensure this request has fewer than MAXPHYS bytes when
1064 		 * extended to full pages.
1065 		 */
1066 		addr = (vm_offset_t)pt->buf;
1067 		end = round_page(addr + pt->len);
1068 		addr = trunc_page(addr);
1069 		if (end - addr > MAXPHYS)
1070 			return EIO;
1071 
1072 		if (pt->len > ctrlr->max_xfer_size) {
1073 			nvme_printf(ctrlr, "pt->len (%d) "
1074 			    "exceeds max_xfer_size (%d)\n", pt->len,
1075 			    ctrlr->max_xfer_size);
1076 			return EIO;
1077 		}
1078 		if (is_user_buffer) {
1079 			/*
1080 			 * Ensure the user buffer is wired for the duration of
1081 			 *  this passthrough command.
1082 			 */
1083 			PHOLD(curproc);
1084 			buf = uma_zalloc(pbuf_zone, M_WAITOK);
1085 			buf->b_data = pt->buf;
1086 			buf->b_bufsize = pt->len;
1087 			buf->b_iocmd = pt->is_read ? BIO_READ : BIO_WRITE;
1088 			if (vmapbuf(buf, 1) < 0) {
1089 				ret = EFAULT;
1090 				goto err;
1091 			}
1092 			req = nvme_allocate_request_vaddr(buf->b_data, pt->len,
1093 			    nvme_pt_done, pt);
1094 		} else
1095 			req = nvme_allocate_request_vaddr(pt->buf, pt->len,
1096 			    nvme_pt_done, pt);
1097 	} else
1098 		req = nvme_allocate_request_null(nvme_pt_done, pt);
1099 
1100 	/* Assume userspace already converted to little-endian */
1101 	req->cmd.opc = pt->cmd.opc;
1102 	req->cmd.fuse = pt->cmd.fuse;
1103 	req->cmd.rsvd2 = pt->cmd.rsvd2;
1104 	req->cmd.rsvd3 = pt->cmd.rsvd3;
1105 	req->cmd.cdw10 = pt->cmd.cdw10;
1106 	req->cmd.cdw11 = pt->cmd.cdw11;
1107 	req->cmd.cdw12 = pt->cmd.cdw12;
1108 	req->cmd.cdw13 = pt->cmd.cdw13;
1109 	req->cmd.cdw14 = pt->cmd.cdw14;
1110 	req->cmd.cdw15 = pt->cmd.cdw15;
1111 
1112 	req->cmd.nsid = htole32(nsid);
1113 
1114 	mtx = mtx_pool_find(mtxpool_sleep, pt);
1115 	pt->driver_lock = mtx;
1116 
1117 	if (is_admin_cmd)
1118 		nvme_ctrlr_submit_admin_request(ctrlr, req);
1119 	else
1120 		nvme_ctrlr_submit_io_request(ctrlr, req);
1121 
1122 	mtx_lock(mtx);
1123 	while (pt->driver_lock != NULL)
1124 		mtx_sleep(pt, mtx, PRIBIO, "nvme_pt", 0);
1125 	mtx_unlock(mtx);
1126 
1127 err:
1128 	if (buf != NULL) {
1129 		uma_zfree(pbuf_zone, buf);
1130 		PRELE(curproc);
1131 	}
1132 
1133 	return (ret);
1134 }
1135 
1136 static int
1137 nvme_ctrlr_ioctl(struct cdev *cdev, u_long cmd, caddr_t arg, int flag,
1138     struct thread *td)
1139 {
1140 	struct nvme_controller			*ctrlr;
1141 	struct nvme_pt_command			*pt;
1142 
1143 	ctrlr = cdev->si_drv1;
1144 
1145 	switch (cmd) {
1146 	case NVME_RESET_CONTROLLER:
1147 		nvme_ctrlr_reset(ctrlr);
1148 		break;
1149 	case NVME_PASSTHROUGH_CMD:
1150 		pt = (struct nvme_pt_command *)arg;
1151 		return (nvme_ctrlr_passthrough_cmd(ctrlr, pt, le32toh(pt->cmd.nsid),
1152 		    1 /* is_user_buffer */, 1 /* is_admin_cmd */));
1153 	case NVME_GET_NSID:
1154 	{
1155 		struct nvme_get_nsid *gnsid = (struct nvme_get_nsid *)arg;
1156 		strncpy(gnsid->cdev, device_get_nameunit(ctrlr->dev),
1157 		    sizeof(gnsid->cdev));
1158 		gnsid->nsid = 0;
1159 		break;
1160 	}
1161 	default:
1162 		return (ENOTTY);
1163 	}
1164 
1165 	return (0);
1166 }
1167 
1168 static struct cdevsw nvme_ctrlr_cdevsw = {
1169 	.d_version =	D_VERSION,
1170 	.d_flags =	0,
1171 	.d_ioctl =	nvme_ctrlr_ioctl
1172 };
1173 
1174 static void
1175 nvme_ctrlr_setup_interrupts(struct nvme_controller *ctrlr)
1176 {
1177 	device_t	dev;
1178 	int		per_cpu_io_queues;
1179 	int		min_cpus_per_ioq;
1180 	int		num_vectors_requested, num_vectors_allocated;
1181 	int		num_vectors_available;
1182 
1183 	dev = ctrlr->dev;
1184 	min_cpus_per_ioq = 1;
1185 	TUNABLE_INT_FETCH("hw.nvme.min_cpus_per_ioq", &min_cpus_per_ioq);
1186 
1187 	if (min_cpus_per_ioq < 1) {
1188 		min_cpus_per_ioq = 1;
1189 	} else if (min_cpus_per_ioq > mp_ncpus) {
1190 		min_cpus_per_ioq = mp_ncpus;
1191 	}
1192 
1193 	per_cpu_io_queues = 1;
1194 	TUNABLE_INT_FETCH("hw.nvme.per_cpu_io_queues", &per_cpu_io_queues);
1195 
1196 	if (per_cpu_io_queues == 0) {
1197 		min_cpus_per_ioq = mp_ncpus;
1198 	}
1199 
1200 	ctrlr->force_intx = 0;
1201 	TUNABLE_INT_FETCH("hw.nvme.force_intx", &ctrlr->force_intx);
1202 
1203 	/*
1204 	 * FreeBSD currently cannot allocate more than about 190 vectors at
1205 	 *  boot, meaning that systems with high core count and many devices
1206 	 *  requesting per-CPU interrupt vectors will not get their full
1207 	 *  allotment.  So first, try to allocate as many as we may need to
1208 	 *  understand what is available, then immediately release them.
1209 	 *  Then figure out how many of those we will actually use, based on
1210 	 *  assigning an equal number of cores to each I/O queue.
1211 	 */
1212 
1213 	/* One vector for per core I/O queue, plus one vector for admin queue. */
1214 	num_vectors_available = min(pci_msix_count(dev), mp_ncpus + 1);
1215 	if (pci_alloc_msix(dev, &num_vectors_available) != 0) {
1216 		num_vectors_available = 0;
1217 	}
1218 	pci_release_msi(dev);
1219 
1220 	if (ctrlr->force_intx || num_vectors_available < 2) {
1221 		nvme_ctrlr_configure_intx(ctrlr);
1222 		return;
1223 	}
1224 
1225 	/*
1226 	 * Do not use all vectors for I/O queues - one must be saved for the
1227 	 *  admin queue.
1228 	 */
1229 	ctrlr->num_cpus_per_ioq = max(min_cpus_per_ioq,
1230 	    howmany(mp_ncpus, num_vectors_available - 1));
1231 
1232 	ctrlr->num_io_queues = howmany(mp_ncpus, ctrlr->num_cpus_per_ioq);
1233 	num_vectors_requested = ctrlr->num_io_queues + 1;
1234 	num_vectors_allocated = num_vectors_requested;
1235 
1236 	/*
1237 	 * Now just allocate the number of vectors we need.  This should
1238 	 *  succeed, since we previously called pci_alloc_msix()
1239 	 *  successfully returning at least this many vectors, but just to
1240 	 *  be safe, if something goes wrong just revert to INTx.
1241 	 */
1242 	if (pci_alloc_msix(dev, &num_vectors_allocated) != 0) {
1243 		nvme_ctrlr_configure_intx(ctrlr);
1244 		return;
1245 	}
1246 
1247 	if (num_vectors_allocated < num_vectors_requested) {
1248 		pci_release_msi(dev);
1249 		nvme_ctrlr_configure_intx(ctrlr);
1250 		return;
1251 	}
1252 
1253 	ctrlr->msix_enabled = 1;
1254 }
1255 
1256 int
1257 nvme_ctrlr_construct(struct nvme_controller *ctrlr, device_t dev)
1258 {
1259 	struct make_dev_args	md_args;
1260 	uint32_t	cap_lo;
1261 	uint32_t	cap_hi;
1262 	uint32_t	to;
1263 	uint8_t		dstrd;
1264 	uint8_t		mpsmin;
1265 	int		status, timeout_period;
1266 
1267 	ctrlr->dev = dev;
1268 
1269 	mtx_init(&ctrlr->lock, "nvme ctrlr lock", NULL, MTX_DEF);
1270 
1271 	status = nvme_ctrlr_allocate_bar(ctrlr);
1272 
1273 	if (status != 0)
1274 		return (status);
1275 
1276 	/*
1277 	 * Software emulators may set the doorbell stride to something
1278 	 *  other than zero, but this driver is not set up to handle that.
1279 	 */
1280 	cap_hi = nvme_mmio_read_4(ctrlr, cap_hi);
1281 	dstrd = NVME_CAP_HI_DSTRD(cap_hi);
1282 	if (dstrd != 0)
1283 		return (ENXIO);
1284 
1285 	mpsmin = NVME_CAP_HI_MPSMIN(cap_hi);
1286 	ctrlr->min_page_size = 1 << (12 + mpsmin);
1287 
1288 	/* Get ready timeout value from controller, in units of 500ms. */
1289 	cap_lo = nvme_mmio_read_4(ctrlr, cap_lo);
1290 	to = NVME_CAP_LO_TO(cap_lo) + 1;
1291 	ctrlr->ready_timeout_in_ms = to * 500;
1292 
1293 	timeout_period = NVME_DEFAULT_TIMEOUT_PERIOD;
1294 	TUNABLE_INT_FETCH("hw.nvme.timeout_period", &timeout_period);
1295 	timeout_period = min(timeout_period, NVME_MAX_TIMEOUT_PERIOD);
1296 	timeout_period = max(timeout_period, NVME_MIN_TIMEOUT_PERIOD);
1297 	ctrlr->timeout_period = timeout_period;
1298 
1299 	nvme_retry_count = NVME_DEFAULT_RETRY_COUNT;
1300 	TUNABLE_INT_FETCH("hw.nvme.retry_count", &nvme_retry_count);
1301 
1302 	ctrlr->enable_aborts = 0;
1303 	TUNABLE_INT_FETCH("hw.nvme.enable_aborts", &ctrlr->enable_aborts);
1304 
1305 	nvme_ctrlr_setup_interrupts(ctrlr);
1306 
1307 	ctrlr->max_xfer_size = NVME_MAX_XFER_SIZE;
1308 	if (nvme_ctrlr_construct_admin_qpair(ctrlr) != 0)
1309 		return (ENXIO);
1310 
1311 	ctrlr->taskqueue = taskqueue_create("nvme_taskq", M_WAITOK,
1312 	    taskqueue_thread_enqueue, &ctrlr->taskqueue);
1313 	taskqueue_start_threads(&ctrlr->taskqueue, 1, PI_DISK, "nvme taskq");
1314 
1315 	ctrlr->is_resetting = 0;
1316 	ctrlr->is_initialized = 0;
1317 	ctrlr->notification_sent = 0;
1318 	TASK_INIT(&ctrlr->reset_task, 0, nvme_ctrlr_reset_task, ctrlr);
1319 	TASK_INIT(&ctrlr->fail_req_task, 0, nvme_ctrlr_fail_req_task, ctrlr);
1320 	STAILQ_INIT(&ctrlr->fail_req);
1321 	ctrlr->is_failed = FALSE;
1322 
1323 	make_dev_args_init(&md_args);
1324 	md_args.mda_devsw = &nvme_ctrlr_cdevsw;
1325 	md_args.mda_uid = UID_ROOT;
1326 	md_args.mda_gid = GID_WHEEL;
1327 	md_args.mda_mode = 0600;
1328 	md_args.mda_unit = device_get_unit(dev);
1329 	md_args.mda_si_drv1 = (void *)ctrlr;
1330 	status = make_dev_s(&md_args, &ctrlr->cdev, "nvme%d",
1331 	    device_get_unit(dev));
1332 	if (status != 0)
1333 		return (ENXIO);
1334 
1335 	return (0);
1336 }
1337 
1338 void
1339 nvme_ctrlr_destruct(struct nvme_controller *ctrlr, device_t dev)
1340 {
1341 	int				i;
1342 
1343 	if (ctrlr->resource == NULL)
1344 		goto nores;
1345 
1346 	nvme_notify_fail_consumers(ctrlr);
1347 
1348 	for (i = 0; i < NVME_MAX_NAMESPACES; i++)
1349 		nvme_ns_destruct(&ctrlr->ns[i]);
1350 
1351 	if (ctrlr->cdev)
1352 		destroy_dev(ctrlr->cdev);
1353 
1354 	nvme_ctrlr_destroy_qpairs(ctrlr);
1355 	for (i = 0; i < ctrlr->num_io_queues; i++) {
1356 		nvme_io_qpair_destroy(&ctrlr->ioq[i]);
1357 	}
1358 	free(ctrlr->ioq, M_NVME);
1359 
1360 	nvme_admin_qpair_destroy(&ctrlr->adminq);
1361 
1362 	/*
1363 	 *  Notify the controller of a shutdown, even though this is due to
1364 	 *   a driver unload, not a system shutdown (this path is not invoked
1365 	 *   during shutdown).  This ensures the controller receives a
1366 	 *   shutdown notification in case the system is shutdown before
1367 	 *   reloading the driver.
1368 	 */
1369 	nvme_ctrlr_shutdown(ctrlr);
1370 
1371 	nvme_ctrlr_disable(ctrlr);
1372 
1373 	if (ctrlr->taskqueue)
1374 		taskqueue_free(ctrlr->taskqueue);
1375 
1376 	if (ctrlr->tag)
1377 		bus_teardown_intr(ctrlr->dev, ctrlr->res, ctrlr->tag);
1378 
1379 	if (ctrlr->res)
1380 		bus_release_resource(ctrlr->dev, SYS_RES_IRQ,
1381 		    rman_get_rid(ctrlr->res), ctrlr->res);
1382 
1383 	if (ctrlr->msix_enabled)
1384 		pci_release_msi(dev);
1385 
1386 	if (ctrlr->bar4_resource != NULL) {
1387 		bus_release_resource(dev, SYS_RES_MEMORY,
1388 		    ctrlr->bar4_resource_id, ctrlr->bar4_resource);
1389 	}
1390 
1391 	bus_release_resource(dev, SYS_RES_MEMORY,
1392 	    ctrlr->resource_id, ctrlr->resource);
1393 
1394 nores:
1395 	mtx_destroy(&ctrlr->lock);
1396 }
1397 
1398 void
1399 nvme_ctrlr_shutdown(struct nvme_controller *ctrlr)
1400 {
1401 	uint32_t	cc;
1402 	uint32_t	csts;
1403 	int		ticks = 0;
1404 
1405 	cc = nvme_mmio_read_4(ctrlr, cc);
1406 	cc &= ~(NVME_CC_REG_SHN_MASK << NVME_CC_REG_SHN_SHIFT);
1407 	cc |= NVME_SHN_NORMAL << NVME_CC_REG_SHN_SHIFT;
1408 	nvme_mmio_write_4(ctrlr, cc, cc);
1409 
1410 	csts = nvme_mmio_read_4(ctrlr, csts);
1411 	while ((NVME_CSTS_GET_SHST(csts) != NVME_SHST_COMPLETE) && (ticks++ < 5*hz)) {
1412 		pause("nvme shn", 1);
1413 		csts = nvme_mmio_read_4(ctrlr, csts);
1414 	}
1415 	if (NVME_CSTS_GET_SHST(csts) != NVME_SHST_COMPLETE)
1416 		nvme_printf(ctrlr, "did not complete shutdown within 5 seconds "
1417 		    "of notification\n");
1418 }
1419 
1420 void
1421 nvme_ctrlr_submit_admin_request(struct nvme_controller *ctrlr,
1422     struct nvme_request *req)
1423 {
1424 
1425 	nvme_qpair_submit_request(&ctrlr->adminq, req);
1426 }
1427 
1428 void
1429 nvme_ctrlr_submit_io_request(struct nvme_controller *ctrlr,
1430     struct nvme_request *req)
1431 {
1432 	struct nvme_qpair       *qpair;
1433 
1434 	qpair = &ctrlr->ioq[curcpu / ctrlr->num_cpus_per_ioq];
1435 	nvme_qpair_submit_request(qpair, req);
1436 }
1437 
1438 device_t
1439 nvme_ctrlr_get_device(struct nvme_controller *ctrlr)
1440 {
1441 
1442 	return (ctrlr->dev);
1443 }
1444 
1445 const struct nvme_controller_data *
1446 nvme_ctrlr_get_data(struct nvme_controller *ctrlr)
1447 {
1448 
1449 	return (&ctrlr->cdata);
1450 }
1451