xref: /freebsd/sys/dev/nvme/nvme_ctrlr.c (revision 4f52dfbb8d6c4d446500c5b097e3806ec219fbd4)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (C) 2012-2016 Intel Corporation
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31 
32 #include "opt_cam.h"
33 
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/buf.h>
37 #include <sys/bus.h>
38 #include <sys/conf.h>
39 #include <sys/ioccom.h>
40 #include <sys/proc.h>
41 #include <sys/smp.h>
42 #include <sys/uio.h>
43 #include <sys/endian.h>
44 
45 #include <dev/pci/pcireg.h>
46 #include <dev/pci/pcivar.h>
47 
48 #include "nvme_private.h"
49 
50 #define B4_CHK_RDY_DELAY_MS	2300		/* work around controller bug */
51 
52 static void nvme_ctrlr_construct_and_submit_aer(struct nvme_controller *ctrlr,
53 						struct nvme_async_event_request *aer);
54 static void nvme_ctrlr_setup_interrupts(struct nvme_controller *ctrlr);
55 
56 static int
57 nvme_ctrlr_allocate_bar(struct nvme_controller *ctrlr)
58 {
59 
60 	ctrlr->resource_id = PCIR_BAR(0);
61 
62 	ctrlr->resource = bus_alloc_resource_any(ctrlr->dev, SYS_RES_MEMORY,
63 	    &ctrlr->resource_id, RF_ACTIVE);
64 
65 	if(ctrlr->resource == NULL) {
66 		nvme_printf(ctrlr, "unable to allocate pci resource\n");
67 		return (ENOMEM);
68 	}
69 
70 	ctrlr->bus_tag = rman_get_bustag(ctrlr->resource);
71 	ctrlr->bus_handle = rman_get_bushandle(ctrlr->resource);
72 	ctrlr->regs = (struct nvme_registers *)ctrlr->bus_handle;
73 
74 	/*
75 	 * The NVMe spec allows for the MSI-X table to be placed behind
76 	 *  BAR 4/5, separate from the control/doorbell registers.  Always
77 	 *  try to map this bar, because it must be mapped prior to calling
78 	 *  pci_alloc_msix().  If the table isn't behind BAR 4/5,
79 	 *  bus_alloc_resource() will just return NULL which is OK.
80 	 */
81 	ctrlr->bar4_resource_id = PCIR_BAR(4);
82 	ctrlr->bar4_resource = bus_alloc_resource_any(ctrlr->dev, SYS_RES_MEMORY,
83 	    &ctrlr->bar4_resource_id, RF_ACTIVE);
84 
85 	return (0);
86 }
87 
88 static int
89 nvme_ctrlr_construct_admin_qpair(struct nvme_controller *ctrlr)
90 {
91 	struct nvme_qpair	*qpair;
92 	uint32_t		num_entries;
93 	int			error;
94 
95 	qpair = &ctrlr->adminq;
96 
97 	num_entries = NVME_ADMIN_ENTRIES;
98 	TUNABLE_INT_FETCH("hw.nvme.admin_entries", &num_entries);
99 	/*
100 	 * If admin_entries was overridden to an invalid value, revert it
101 	 *  back to our default value.
102 	 */
103 	if (num_entries < NVME_MIN_ADMIN_ENTRIES ||
104 	    num_entries > NVME_MAX_ADMIN_ENTRIES) {
105 		nvme_printf(ctrlr, "invalid hw.nvme.admin_entries=%d "
106 		    "specified\n", num_entries);
107 		num_entries = NVME_ADMIN_ENTRIES;
108 	}
109 
110 	/*
111 	 * The admin queue's max xfer size is treated differently than the
112 	 *  max I/O xfer size.  16KB is sufficient here - maybe even less?
113 	 */
114 	error = nvme_qpair_construct(qpair,
115 				     0, /* qpair ID */
116 				     0, /* vector */
117 				     num_entries,
118 				     NVME_ADMIN_TRACKERS,
119 				     ctrlr);
120 	return (error);
121 }
122 
123 static int
124 nvme_ctrlr_construct_io_qpairs(struct nvme_controller *ctrlr)
125 {
126 	struct nvme_qpair	*qpair;
127 	uint32_t		cap_lo;
128 	uint16_t		mqes;
129 	int			i, error, num_entries, num_trackers;
130 
131 	num_entries = NVME_IO_ENTRIES;
132 	TUNABLE_INT_FETCH("hw.nvme.io_entries", &num_entries);
133 
134 	/*
135 	 * NVMe spec sets a hard limit of 64K max entries, but
136 	 *  devices may specify a smaller limit, so we need to check
137 	 *  the MQES field in the capabilities register.
138 	 */
139 	cap_lo = nvme_mmio_read_4(ctrlr, cap_lo);
140 	mqes = (cap_lo >> NVME_CAP_LO_REG_MQES_SHIFT) & NVME_CAP_LO_REG_MQES_MASK;
141 	num_entries = min(num_entries, mqes + 1);
142 
143 	num_trackers = NVME_IO_TRACKERS;
144 	TUNABLE_INT_FETCH("hw.nvme.io_trackers", &num_trackers);
145 
146 	num_trackers = max(num_trackers, NVME_MIN_IO_TRACKERS);
147 	num_trackers = min(num_trackers, NVME_MAX_IO_TRACKERS);
148 	/*
149 	 * No need to have more trackers than entries in the submit queue.
150 	 *  Note also that for a queue size of N, we can only have (N-1)
151 	 *  commands outstanding, hence the "-1" here.
152 	 */
153 	num_trackers = min(num_trackers, (num_entries-1));
154 
155 	/*
156 	 * Our best estimate for the maximum number of I/Os that we should
157 	 * noramlly have in flight at one time. This should be viewed as a hint,
158 	 * not a hard limit and will need to be revisitted when the upper layers
159 	 * of the storage system grows multi-queue support.
160 	 */
161 	ctrlr->max_hw_pend_io = num_trackers * ctrlr->num_io_queues * 3 / 4;
162 
163 	/*
164 	 * This was calculated previously when setting up interrupts, but
165 	 *  a controller could theoretically support fewer I/O queues than
166 	 *  MSI-X vectors.  So calculate again here just to be safe.
167 	 */
168 	ctrlr->num_cpus_per_ioq = howmany(mp_ncpus, ctrlr->num_io_queues);
169 
170 	ctrlr->ioq = malloc(ctrlr->num_io_queues * sizeof(struct nvme_qpair),
171 	    M_NVME, M_ZERO | M_WAITOK);
172 
173 	for (i = 0; i < ctrlr->num_io_queues; i++) {
174 		qpair = &ctrlr->ioq[i];
175 
176 		/*
177 		 * Admin queue has ID=0. IO queues start at ID=1 -
178 		 *  hence the 'i+1' here.
179 		 *
180 		 * For I/O queues, use the controller-wide max_xfer_size
181 		 *  calculated in nvme_attach().
182 		 */
183 		error = nvme_qpair_construct(qpair,
184 				     i+1, /* qpair ID */
185 				     ctrlr->msix_enabled ? i+1 : 0, /* vector */
186 				     num_entries,
187 				     num_trackers,
188 				     ctrlr);
189 		if (error)
190 			return (error);
191 
192 		/*
193 		 * Do not bother binding interrupts if we only have one I/O
194 		 *  interrupt thread for this controller.
195 		 */
196 		if (ctrlr->num_io_queues > 1)
197 			bus_bind_intr(ctrlr->dev, qpair->res,
198 			    i * ctrlr->num_cpus_per_ioq);
199 	}
200 
201 	return (0);
202 }
203 
204 static void
205 nvme_ctrlr_fail(struct nvme_controller *ctrlr)
206 {
207 	int i;
208 
209 	ctrlr->is_failed = TRUE;
210 	nvme_qpair_fail(&ctrlr->adminq);
211 	if (ctrlr->ioq != NULL) {
212 		for (i = 0; i < ctrlr->num_io_queues; i++)
213 			nvme_qpair_fail(&ctrlr->ioq[i]);
214 	}
215 	nvme_notify_fail_consumers(ctrlr);
216 }
217 
218 void
219 nvme_ctrlr_post_failed_request(struct nvme_controller *ctrlr,
220     struct nvme_request *req)
221 {
222 
223 	mtx_lock(&ctrlr->lock);
224 	STAILQ_INSERT_TAIL(&ctrlr->fail_req, req, stailq);
225 	mtx_unlock(&ctrlr->lock);
226 	taskqueue_enqueue(ctrlr->taskqueue, &ctrlr->fail_req_task);
227 }
228 
229 static void
230 nvme_ctrlr_fail_req_task(void *arg, int pending)
231 {
232 	struct nvme_controller	*ctrlr = arg;
233 	struct nvme_request	*req;
234 
235 	mtx_lock(&ctrlr->lock);
236 	while ((req = STAILQ_FIRST(&ctrlr->fail_req)) != NULL) {
237 		STAILQ_REMOVE_HEAD(&ctrlr->fail_req, stailq);
238 		mtx_unlock(&ctrlr->lock);
239 		nvme_qpair_manual_complete_request(req->qpair, req,
240 		    NVME_SCT_GENERIC, NVME_SC_ABORTED_BY_REQUEST, TRUE);
241 		mtx_lock(&ctrlr->lock);
242 	}
243 	mtx_unlock(&ctrlr->lock);
244 }
245 
246 static int
247 nvme_ctrlr_wait_for_ready(struct nvme_controller *ctrlr, int desired_val)
248 {
249 	int ms_waited;
250 	uint32_t csts;
251 
252 	csts = nvme_mmio_read_4(ctrlr, csts);
253 
254 	ms_waited = 0;
255 	while (((csts >> NVME_CSTS_REG_RDY_SHIFT) & NVME_CSTS_REG_RDY_MASK) != desired_val) {
256 		if (ms_waited++ > ctrlr->ready_timeout_in_ms) {
257 			nvme_printf(ctrlr, "controller ready did not become %d "
258 			    "within %d ms\n", desired_val, ctrlr->ready_timeout_in_ms);
259 			return (ENXIO);
260 		}
261 		DELAY(1000);
262 		csts = nvme_mmio_read_4(ctrlr, csts);
263 	}
264 
265 	return (0);
266 }
267 
268 static int
269 nvme_ctrlr_disable(struct nvme_controller *ctrlr)
270 {
271 	uint32_t cc;
272 	uint32_t csts;
273 	uint8_t  en, rdy;
274 	int err;
275 
276 	cc = nvme_mmio_read_4(ctrlr, cc);
277 	csts = nvme_mmio_read_4(ctrlr, csts);
278 
279 	en = (cc >> NVME_CC_REG_EN_SHIFT) & NVME_CC_REG_EN_MASK;
280 	rdy = (csts >> NVME_CSTS_REG_RDY_SHIFT) & NVME_CSTS_REG_RDY_MASK;
281 
282 	/*
283 	 * Per 3.1.5 in NVME 1.3 spec, transitioning CC.EN from 0 to 1
284 	 * when CSTS.RDY is 1 or transitioning CC.EN from 1 to 0 when
285 	 * CSTS.RDY is 0 "has undefined results" So make sure that CSTS.RDY
286 	 * isn't the desired value. Short circuit if we're already disabled.
287 	 */
288 	if (en == 1) {
289 		if (rdy == 0) {
290 			/* EN == 1, wait for  RDY == 1 or fail */
291 			err = nvme_ctrlr_wait_for_ready(ctrlr, 1);
292 			if (err != 0)
293 				return (err);
294 		}
295 	} else {
296 		/* EN == 0 already wait for RDY == 0 */
297 		if (rdy == 0)
298 			return (0);
299 		else
300 			return (nvme_ctrlr_wait_for_ready(ctrlr, 0));
301 	}
302 
303 	cc &= ~NVME_CC_REG_EN_MASK;
304 	nvme_mmio_write_4(ctrlr, cc, cc);
305 	/*
306 	 * Some drives have issues with accessing the mmio after we
307 	 * disable, so delay for a bit after we write the bit to
308 	 * cope with these issues.
309 	 */
310 	if (ctrlr->quirks & QUIRK_DELAY_B4_CHK_RDY)
311 		pause("nvmeR", B4_CHK_RDY_DELAY_MS * hz / 1000);
312 	return (nvme_ctrlr_wait_for_ready(ctrlr, 0));
313 }
314 
315 static int
316 nvme_ctrlr_enable(struct nvme_controller *ctrlr)
317 {
318 	uint32_t	cc;
319 	uint32_t	csts;
320 	uint32_t	aqa;
321 	uint32_t	qsize;
322 	uint8_t		en, rdy;
323 	int		err;
324 
325 	cc = nvme_mmio_read_4(ctrlr, cc);
326 	csts = nvme_mmio_read_4(ctrlr, csts);
327 
328 	en = (cc >> NVME_CC_REG_EN_SHIFT) & NVME_CC_REG_EN_MASK;
329 	rdy = (csts >> NVME_CSTS_REG_RDY_SHIFT) & NVME_CSTS_REG_RDY_MASK;
330 
331 	/*
332 	 * See note in nvme_ctrlr_disable. Short circuit if we're already enabled.
333 	 */
334 	if (en == 1) {
335 		if (rdy == 1)
336 			return (0);
337 		else
338 			return (nvme_ctrlr_wait_for_ready(ctrlr, 1));
339 	} else {
340 		/* EN == 0 already wait for RDY == 0 or fail */
341 		err = nvme_ctrlr_wait_for_ready(ctrlr, 0);
342 		if (err != 0)
343 			return (err);
344 	}
345 
346 	nvme_mmio_write_8(ctrlr, asq, ctrlr->adminq.cmd_bus_addr);
347 	DELAY(5000);
348 	nvme_mmio_write_8(ctrlr, acq, ctrlr->adminq.cpl_bus_addr);
349 	DELAY(5000);
350 
351 	/* acqs and asqs are 0-based. */
352 	qsize = ctrlr->adminq.num_entries - 1;
353 
354 	aqa = 0;
355 	aqa = (qsize & NVME_AQA_REG_ACQS_MASK) << NVME_AQA_REG_ACQS_SHIFT;
356 	aqa |= (qsize & NVME_AQA_REG_ASQS_MASK) << NVME_AQA_REG_ASQS_SHIFT;
357 	nvme_mmio_write_4(ctrlr, aqa, aqa);
358 	DELAY(5000);
359 
360 	/* Initialization values for CC */
361 	cc = 0;
362 	cc |= 1 << NVME_CC_REG_EN_SHIFT;
363 	cc |= 0 << NVME_CC_REG_CSS_SHIFT;
364 	cc |= 0 << NVME_CC_REG_AMS_SHIFT;
365 	cc |= 0 << NVME_CC_REG_SHN_SHIFT;
366 	cc |= 6 << NVME_CC_REG_IOSQES_SHIFT; /* SQ entry size == 64 == 2^6 */
367 	cc |= 4 << NVME_CC_REG_IOCQES_SHIFT; /* CQ entry size == 16 == 2^4 */
368 
369 	/* This evaluates to 0, which is according to spec. */
370 	cc |= (PAGE_SIZE >> 13) << NVME_CC_REG_MPS_SHIFT;
371 
372 	nvme_mmio_write_4(ctrlr, cc, cc);
373 
374 	return (nvme_ctrlr_wait_for_ready(ctrlr, 1));
375 }
376 
377 int
378 nvme_ctrlr_hw_reset(struct nvme_controller *ctrlr)
379 {
380 	int i, err;
381 
382 	nvme_admin_qpair_disable(&ctrlr->adminq);
383 	/*
384 	 * I/O queues are not allocated before the initial HW
385 	 *  reset, so do not try to disable them.  Use is_initialized
386 	 *  to determine if this is the initial HW reset.
387 	 */
388 	if (ctrlr->is_initialized) {
389 		for (i = 0; i < ctrlr->num_io_queues; i++)
390 			nvme_io_qpair_disable(&ctrlr->ioq[i]);
391 	}
392 
393 	DELAY(100*1000);
394 
395 	err = nvme_ctrlr_disable(ctrlr);
396 	if (err != 0)
397 		return err;
398 	return (nvme_ctrlr_enable(ctrlr));
399 }
400 
401 void
402 nvme_ctrlr_reset(struct nvme_controller *ctrlr)
403 {
404 	int cmpset;
405 
406 	cmpset = atomic_cmpset_32(&ctrlr->is_resetting, 0, 1);
407 
408 	if (cmpset == 0 || ctrlr->is_failed)
409 		/*
410 		 * Controller is already resetting or has failed.  Return
411 		 *  immediately since there is no need to kick off another
412 		 *  reset in these cases.
413 		 */
414 		return;
415 
416 	taskqueue_enqueue(ctrlr->taskqueue, &ctrlr->reset_task);
417 }
418 
419 static int
420 nvme_ctrlr_identify(struct nvme_controller *ctrlr)
421 {
422 	struct nvme_completion_poll_status	status;
423 
424 	status.done = 0;
425 	nvme_ctrlr_cmd_identify_controller(ctrlr, &ctrlr->cdata,
426 	    nvme_completion_poll_cb, &status);
427 	while (!atomic_load_acq_int(&status.done))
428 		pause("nvme", 1);
429 	if (nvme_completion_is_error(&status.cpl)) {
430 		nvme_printf(ctrlr, "nvme_identify_controller failed!\n");
431 		return (ENXIO);
432 	}
433 
434 	/* Convert data to host endian */
435 	nvme_controller_data_swapbytes(&ctrlr->cdata);
436 
437 	/*
438 	 * Use MDTS to ensure our default max_xfer_size doesn't exceed what the
439 	 *  controller supports.
440 	 */
441 	if (ctrlr->cdata.mdts > 0)
442 		ctrlr->max_xfer_size = min(ctrlr->max_xfer_size,
443 		    ctrlr->min_page_size * (1 << (ctrlr->cdata.mdts)));
444 
445 	return (0);
446 }
447 
448 static int
449 nvme_ctrlr_set_num_qpairs(struct nvme_controller *ctrlr)
450 {
451 	struct nvme_completion_poll_status	status;
452 	int					cq_allocated, sq_allocated;
453 
454 	status.done = 0;
455 	nvme_ctrlr_cmd_set_num_queues(ctrlr, ctrlr->num_io_queues,
456 	    nvme_completion_poll_cb, &status);
457 	while (!atomic_load_acq_int(&status.done))
458 		pause("nvme", 1);
459 	if (nvme_completion_is_error(&status.cpl)) {
460 		nvme_printf(ctrlr, "nvme_ctrlr_set_num_qpairs failed!\n");
461 		return (ENXIO);
462 	}
463 
464 	/*
465 	 * Data in cdw0 is 0-based.
466 	 * Lower 16-bits indicate number of submission queues allocated.
467 	 * Upper 16-bits indicate number of completion queues allocated.
468 	 */
469 	sq_allocated = (status.cpl.cdw0 & 0xFFFF) + 1;
470 	cq_allocated = (status.cpl.cdw0 >> 16) + 1;
471 
472 	/*
473 	 * Controller may allocate more queues than we requested,
474 	 *  so use the minimum of the number requested and what was
475 	 *  actually allocated.
476 	 */
477 	ctrlr->num_io_queues = min(ctrlr->num_io_queues, sq_allocated);
478 	ctrlr->num_io_queues = min(ctrlr->num_io_queues, cq_allocated);
479 
480 	return (0);
481 }
482 
483 static int
484 nvme_ctrlr_create_qpairs(struct nvme_controller *ctrlr)
485 {
486 	struct nvme_completion_poll_status	status;
487 	struct nvme_qpair			*qpair;
488 	int					i;
489 
490 	for (i = 0; i < ctrlr->num_io_queues; i++) {
491 		qpair = &ctrlr->ioq[i];
492 
493 		status.done = 0;
494 		nvme_ctrlr_cmd_create_io_cq(ctrlr, qpair, qpair->vector,
495 		    nvme_completion_poll_cb, &status);
496 		while (!atomic_load_acq_int(&status.done))
497 			pause("nvme", 1);
498 		if (nvme_completion_is_error(&status.cpl)) {
499 			nvme_printf(ctrlr, "nvme_create_io_cq failed!\n");
500 			return (ENXIO);
501 		}
502 
503 		status.done = 0;
504 		nvme_ctrlr_cmd_create_io_sq(qpair->ctrlr, qpair,
505 		    nvme_completion_poll_cb, &status);
506 		while (!atomic_load_acq_int(&status.done))
507 			pause("nvme", 1);
508 		if (nvme_completion_is_error(&status.cpl)) {
509 			nvme_printf(ctrlr, "nvme_create_io_sq failed!\n");
510 			return (ENXIO);
511 		}
512 	}
513 
514 	return (0);
515 }
516 
517 static int
518 nvme_ctrlr_destroy_qpair(struct nvme_controller *ctrlr, struct nvme_qpair *qpair)
519 {
520 	struct nvme_completion_poll_status	status;
521 
522 	status.done = 0;
523 	nvme_ctrlr_cmd_delete_io_sq(ctrlr, qpair,
524 	    nvme_completion_poll_cb, &status);
525 	while (!atomic_load_acq_int(&status.done))
526 		pause("nvme", 1);
527 	if (nvme_completion_is_error(&status.cpl)) {
528 		nvme_printf(ctrlr, "nvme_destroy_io_sq failed!\n");
529 		return (ENXIO);
530 	}
531 
532 	status.done = 0;
533 	nvme_ctrlr_cmd_delete_io_cq(ctrlr, qpair,
534 	    nvme_completion_poll_cb, &status);
535 	while (!atomic_load_acq_int(&status.done))
536 		pause("nvme", 1);
537 	if (nvme_completion_is_error(&status.cpl)) {
538 		nvme_printf(ctrlr, "nvme_destroy_io_cq failed!\n");
539 		return (ENXIO);
540 	}
541 
542 	return (0);
543 }
544 
545 static int
546 nvme_ctrlr_construct_namespaces(struct nvme_controller *ctrlr)
547 {
548 	struct nvme_namespace	*ns;
549 	uint32_t 		i;
550 
551 	for (i = 0; i < min(ctrlr->cdata.nn, NVME_MAX_NAMESPACES); i++) {
552 		ns = &ctrlr->ns[i];
553 		nvme_ns_construct(ns, i+1, ctrlr);
554 	}
555 
556 	return (0);
557 }
558 
559 static boolean_t
560 is_log_page_id_valid(uint8_t page_id)
561 {
562 
563 	switch (page_id) {
564 	case NVME_LOG_ERROR:
565 	case NVME_LOG_HEALTH_INFORMATION:
566 	case NVME_LOG_FIRMWARE_SLOT:
567 		return (TRUE);
568 	}
569 
570 	return (FALSE);
571 }
572 
573 static uint32_t
574 nvme_ctrlr_get_log_page_size(struct nvme_controller *ctrlr, uint8_t page_id)
575 {
576 	uint32_t	log_page_size;
577 
578 	switch (page_id) {
579 	case NVME_LOG_ERROR:
580 		log_page_size = min(
581 		    sizeof(struct nvme_error_information_entry) *
582 		    (ctrlr->cdata.elpe + 1), NVME_MAX_AER_LOG_SIZE);
583 		break;
584 	case NVME_LOG_HEALTH_INFORMATION:
585 		log_page_size = sizeof(struct nvme_health_information_page);
586 		break;
587 	case NVME_LOG_FIRMWARE_SLOT:
588 		log_page_size = sizeof(struct nvme_firmware_page);
589 		break;
590 	default:
591 		log_page_size = 0;
592 		break;
593 	}
594 
595 	return (log_page_size);
596 }
597 
598 static void
599 nvme_ctrlr_log_critical_warnings(struct nvme_controller *ctrlr,
600     uint8_t state)
601 {
602 
603 	if (state & NVME_CRIT_WARN_ST_AVAILABLE_SPARE)
604 		nvme_printf(ctrlr, "available spare space below threshold\n");
605 
606 	if (state & NVME_CRIT_WARN_ST_TEMPERATURE)
607 		nvme_printf(ctrlr, "temperature above threshold\n");
608 
609 	if (state & NVME_CRIT_WARN_ST_DEVICE_RELIABILITY)
610 		nvme_printf(ctrlr, "device reliability degraded\n");
611 
612 	if (state & NVME_CRIT_WARN_ST_READ_ONLY)
613 		nvme_printf(ctrlr, "media placed in read only mode\n");
614 
615 	if (state & NVME_CRIT_WARN_ST_VOLATILE_MEMORY_BACKUP)
616 		nvme_printf(ctrlr, "volatile memory backup device failed\n");
617 
618 	if (state & NVME_CRIT_WARN_ST_RESERVED_MASK)
619 		nvme_printf(ctrlr,
620 		    "unknown critical warning(s): state = 0x%02x\n", state);
621 }
622 
623 static void
624 nvme_ctrlr_async_event_log_page_cb(void *arg, const struct nvme_completion *cpl)
625 {
626 	struct nvme_async_event_request		*aer = arg;
627 	struct nvme_health_information_page	*health_info;
628 	struct nvme_error_information_entry	*err;
629 	int i;
630 
631 	/*
632 	 * If the log page fetch for some reason completed with an error,
633 	 *  don't pass log page data to the consumers.  In practice, this case
634 	 *  should never happen.
635 	 */
636 	if (nvme_completion_is_error(cpl))
637 		nvme_notify_async_consumers(aer->ctrlr, &aer->cpl,
638 		    aer->log_page_id, NULL, 0);
639 	else {
640 		/* Convert data to host endian */
641 		switch (aer->log_page_id) {
642 		case NVME_LOG_ERROR:
643 			err = (struct nvme_error_information_entry *)aer->log_page_buffer;
644 			for (i = 0; i < (aer->ctrlr->cdata.elpe + 1); i++)
645 				nvme_error_information_entry_swapbytes(err++);
646 			break;
647 		case NVME_LOG_HEALTH_INFORMATION:
648 			nvme_health_information_page_swapbytes(
649 			    (struct nvme_health_information_page *)aer->log_page_buffer);
650 			break;
651 		case NVME_LOG_FIRMWARE_SLOT:
652 			nvme_firmware_page_swapbytes(
653 			    (struct nvme_firmware_page *)aer->log_page_buffer);
654 			break;
655 		case INTEL_LOG_TEMP_STATS:
656 			intel_log_temp_stats_swapbytes(
657 			    (struct intel_log_temp_stats *)aer->log_page_buffer);
658 			break;
659 		default:
660 			break;
661 		}
662 
663 		if (aer->log_page_id == NVME_LOG_HEALTH_INFORMATION) {
664 			health_info = (struct nvme_health_information_page *)
665 			    aer->log_page_buffer;
666 			nvme_ctrlr_log_critical_warnings(aer->ctrlr,
667 			    health_info->critical_warning);
668 			/*
669 			 * Critical warnings reported through the
670 			 *  SMART/health log page are persistent, so
671 			 *  clear the associated bits in the async event
672 			 *  config so that we do not receive repeated
673 			 *  notifications for the same event.
674 			 */
675 			aer->ctrlr->async_event_config &=
676 			    ~health_info->critical_warning;
677 			nvme_ctrlr_cmd_set_async_event_config(aer->ctrlr,
678 			    aer->ctrlr->async_event_config, NULL, NULL);
679 		}
680 
681 
682 		/*
683 		 * Pass the cpl data from the original async event completion,
684 		 *  not the log page fetch.
685 		 */
686 		nvme_notify_async_consumers(aer->ctrlr, &aer->cpl,
687 		    aer->log_page_id, aer->log_page_buffer, aer->log_page_size);
688 	}
689 
690 	/*
691 	 * Repost another asynchronous event request to replace the one
692 	 *  that just completed.
693 	 */
694 	nvme_ctrlr_construct_and_submit_aer(aer->ctrlr, aer);
695 }
696 
697 static void
698 nvme_ctrlr_async_event_cb(void *arg, const struct nvme_completion *cpl)
699 {
700 	struct nvme_async_event_request	*aer = arg;
701 
702 	if (nvme_completion_is_error(cpl)) {
703 		/*
704 		 *  Do not retry failed async event requests.  This avoids
705 		 *  infinite loops where a new async event request is submitted
706 		 *  to replace the one just failed, only to fail again and
707 		 *  perpetuate the loop.
708 		 */
709 		return;
710 	}
711 
712 	/* Associated log page is in bits 23:16 of completion entry dw0. */
713 	aer->log_page_id = (cpl->cdw0 & 0xFF0000) >> 16;
714 
715 	nvme_printf(aer->ctrlr, "async event occurred (log page id=0x%x)\n",
716 	    aer->log_page_id);
717 
718 	if (is_log_page_id_valid(aer->log_page_id)) {
719 		aer->log_page_size = nvme_ctrlr_get_log_page_size(aer->ctrlr,
720 		    aer->log_page_id);
721 		memcpy(&aer->cpl, cpl, sizeof(*cpl));
722 		nvme_ctrlr_cmd_get_log_page(aer->ctrlr, aer->log_page_id,
723 		    NVME_GLOBAL_NAMESPACE_TAG, aer->log_page_buffer,
724 		    aer->log_page_size, nvme_ctrlr_async_event_log_page_cb,
725 		    aer);
726 		/* Wait to notify consumers until after log page is fetched. */
727 	} else {
728 		nvme_notify_async_consumers(aer->ctrlr, cpl, aer->log_page_id,
729 		    NULL, 0);
730 
731 		/*
732 		 * Repost another asynchronous event request to replace the one
733 		 *  that just completed.
734 		 */
735 		nvme_ctrlr_construct_and_submit_aer(aer->ctrlr, aer);
736 	}
737 }
738 
739 static void
740 nvme_ctrlr_construct_and_submit_aer(struct nvme_controller *ctrlr,
741     struct nvme_async_event_request *aer)
742 {
743 	struct nvme_request *req;
744 
745 	aer->ctrlr = ctrlr;
746 	req = nvme_allocate_request_null(nvme_ctrlr_async_event_cb, aer);
747 	aer->req = req;
748 
749 	/*
750 	 * Disable timeout here, since asynchronous event requests should by
751 	 *  nature never be timed out.
752 	 */
753 	req->timeout = FALSE;
754 	req->cmd.opc_fuse = NVME_CMD_SET_OPC(NVME_OPC_ASYNC_EVENT_REQUEST);
755 	nvme_ctrlr_submit_admin_request(ctrlr, req);
756 }
757 
758 static void
759 nvme_ctrlr_configure_aer(struct nvme_controller *ctrlr)
760 {
761 	struct nvme_completion_poll_status	status;
762 	struct nvme_async_event_request		*aer;
763 	uint32_t				i;
764 
765 	ctrlr->async_event_config = 0xFF;
766 	ctrlr->async_event_config &= ~NVME_CRIT_WARN_ST_RESERVED_MASK;
767 
768 	status.done = 0;
769 	nvme_ctrlr_cmd_get_feature(ctrlr, NVME_FEAT_TEMPERATURE_THRESHOLD,
770 	    0, NULL, 0, nvme_completion_poll_cb, &status);
771 	while (!atomic_load_acq_int(&status.done))
772 		pause("nvme", 1);
773 	if (nvme_completion_is_error(&status.cpl) ||
774 	    (status.cpl.cdw0 & 0xFFFF) == 0xFFFF ||
775 	    (status.cpl.cdw0 & 0xFFFF) == 0x0000) {
776 		nvme_printf(ctrlr, "temperature threshold not supported\n");
777 		ctrlr->async_event_config &= ~NVME_CRIT_WARN_ST_TEMPERATURE;
778 	}
779 
780 	nvme_ctrlr_cmd_set_async_event_config(ctrlr,
781 	    ctrlr->async_event_config, NULL, NULL);
782 
783 	/* aerl is a zero-based value, so we need to add 1 here. */
784 	ctrlr->num_aers = min(NVME_MAX_ASYNC_EVENTS, (ctrlr->cdata.aerl+1));
785 
786 	for (i = 0; i < ctrlr->num_aers; i++) {
787 		aer = &ctrlr->aer[i];
788 		nvme_ctrlr_construct_and_submit_aer(ctrlr, aer);
789 	}
790 }
791 
792 static void
793 nvme_ctrlr_configure_int_coalescing(struct nvme_controller *ctrlr)
794 {
795 
796 	ctrlr->int_coal_time = 0;
797 	TUNABLE_INT_FETCH("hw.nvme.int_coal_time",
798 	    &ctrlr->int_coal_time);
799 
800 	ctrlr->int_coal_threshold = 0;
801 	TUNABLE_INT_FETCH("hw.nvme.int_coal_threshold",
802 	    &ctrlr->int_coal_threshold);
803 
804 	nvme_ctrlr_cmd_set_interrupt_coalescing(ctrlr, ctrlr->int_coal_time,
805 	    ctrlr->int_coal_threshold, NULL, NULL);
806 }
807 
808 static void
809 nvme_ctrlr_start(void *ctrlr_arg)
810 {
811 	struct nvme_controller *ctrlr = ctrlr_arg;
812 	uint32_t old_num_io_queues;
813 	int i;
814 
815 	/*
816 	 * Only reset adminq here when we are restarting the
817 	 *  controller after a reset.  During initialization,
818 	 *  we have already submitted admin commands to get
819 	 *  the number of I/O queues supported, so cannot reset
820 	 *  the adminq again here.
821 	 */
822 	if (ctrlr->is_resetting) {
823 		nvme_qpair_reset(&ctrlr->adminq);
824 	}
825 
826 	for (i = 0; i < ctrlr->num_io_queues; i++)
827 		nvme_qpair_reset(&ctrlr->ioq[i]);
828 
829 	nvme_admin_qpair_enable(&ctrlr->adminq);
830 
831 	if (nvme_ctrlr_identify(ctrlr) != 0) {
832 		nvme_ctrlr_fail(ctrlr);
833 		return;
834 	}
835 
836 	/*
837 	 * The number of qpairs are determined during controller initialization,
838 	 *  including using NVMe SET_FEATURES/NUMBER_OF_QUEUES to determine the
839 	 *  HW limit.  We call SET_FEATURES again here so that it gets called
840 	 *  after any reset for controllers that depend on the driver to
841 	 *  explicit specify how many queues it will use.  This value should
842 	 *  never change between resets, so panic if somehow that does happen.
843 	 */
844 	if (ctrlr->is_resetting) {
845 		old_num_io_queues = ctrlr->num_io_queues;
846 		if (nvme_ctrlr_set_num_qpairs(ctrlr) != 0) {
847 			nvme_ctrlr_fail(ctrlr);
848 			return;
849 		}
850 
851 		if (old_num_io_queues != ctrlr->num_io_queues) {
852 			panic("num_io_queues changed from %u to %u",
853 			      old_num_io_queues, ctrlr->num_io_queues);
854 		}
855 	}
856 
857 	if (nvme_ctrlr_create_qpairs(ctrlr) != 0) {
858 		nvme_ctrlr_fail(ctrlr);
859 		return;
860 	}
861 
862 	if (nvme_ctrlr_construct_namespaces(ctrlr) != 0) {
863 		nvme_ctrlr_fail(ctrlr);
864 		return;
865 	}
866 
867 	nvme_ctrlr_configure_aer(ctrlr);
868 	nvme_ctrlr_configure_int_coalescing(ctrlr);
869 
870 	for (i = 0; i < ctrlr->num_io_queues; i++)
871 		nvme_io_qpair_enable(&ctrlr->ioq[i]);
872 }
873 
874 void
875 nvme_ctrlr_start_config_hook(void *arg)
876 {
877 	struct nvme_controller *ctrlr = arg;
878 
879 	nvme_qpair_reset(&ctrlr->adminq);
880 	nvme_admin_qpair_enable(&ctrlr->adminq);
881 
882 	if (nvme_ctrlr_set_num_qpairs(ctrlr) == 0 &&
883 	    nvme_ctrlr_construct_io_qpairs(ctrlr) == 0)
884 		nvme_ctrlr_start(ctrlr);
885 	else
886 		nvme_ctrlr_fail(ctrlr);
887 
888 	nvme_sysctl_initialize_ctrlr(ctrlr);
889 	config_intrhook_disestablish(&ctrlr->config_hook);
890 
891 	ctrlr->is_initialized = 1;
892 	nvme_notify_new_controller(ctrlr);
893 }
894 
895 static void
896 nvme_ctrlr_reset_task(void *arg, int pending)
897 {
898 	struct nvme_controller	*ctrlr = arg;
899 	int			status;
900 
901 	nvme_printf(ctrlr, "resetting controller\n");
902 	status = nvme_ctrlr_hw_reset(ctrlr);
903 	/*
904 	 * Use pause instead of DELAY, so that we yield to any nvme interrupt
905 	 *  handlers on this CPU that were blocked on a qpair lock. We want
906 	 *  all nvme interrupts completed before proceeding with restarting the
907 	 *  controller.
908 	 *
909 	 * XXX - any way to guarantee the interrupt handlers have quiesced?
910 	 */
911 	pause("nvmereset", hz / 10);
912 	if (status == 0)
913 		nvme_ctrlr_start(ctrlr);
914 	else
915 		nvme_ctrlr_fail(ctrlr);
916 
917 	atomic_cmpset_32(&ctrlr->is_resetting, 1, 0);
918 }
919 
920 /*
921  * Poll all the queues enabled on the device for completion.
922  */
923 void
924 nvme_ctrlr_poll(struct nvme_controller *ctrlr)
925 {
926 	int i;
927 
928 	nvme_qpair_process_completions(&ctrlr->adminq);
929 
930 	for (i = 0; i < ctrlr->num_io_queues; i++)
931 		if (ctrlr->ioq && ctrlr->ioq[i].cpl)
932 			nvme_qpair_process_completions(&ctrlr->ioq[i]);
933 }
934 
935 /*
936  * Poll the single-vector intertrupt case: num_io_queues will be 1 and
937  * there's only a single vector. While we're polling, we mask further
938  * interrupts in the controller.
939  */
940 void
941 nvme_ctrlr_intx_handler(void *arg)
942 {
943 	struct nvme_controller *ctrlr = arg;
944 
945 	nvme_mmio_write_4(ctrlr, intms, 1);
946 	nvme_ctrlr_poll(ctrlr);
947 	nvme_mmio_write_4(ctrlr, intmc, 1);
948 }
949 
950 static int
951 nvme_ctrlr_configure_intx(struct nvme_controller *ctrlr)
952 {
953 
954 	ctrlr->msix_enabled = 0;
955 	ctrlr->num_io_queues = 1;
956 	ctrlr->num_cpus_per_ioq = mp_ncpus;
957 	ctrlr->rid = 0;
958 	ctrlr->res = bus_alloc_resource_any(ctrlr->dev, SYS_RES_IRQ,
959 	    &ctrlr->rid, RF_SHAREABLE | RF_ACTIVE);
960 
961 	if (ctrlr->res == NULL) {
962 		nvme_printf(ctrlr, "unable to allocate shared IRQ\n");
963 		return (ENOMEM);
964 	}
965 
966 	bus_setup_intr(ctrlr->dev, ctrlr->res,
967 	    INTR_TYPE_MISC | INTR_MPSAFE, NULL, nvme_ctrlr_intx_handler,
968 	    ctrlr, &ctrlr->tag);
969 
970 	if (ctrlr->tag == NULL) {
971 		nvme_printf(ctrlr, "unable to setup intx handler\n");
972 		return (ENOMEM);
973 	}
974 
975 	return (0);
976 }
977 
978 static void
979 nvme_pt_done(void *arg, const struct nvme_completion *cpl)
980 {
981 	struct nvme_pt_command *pt = arg;
982 	struct mtx *mtx = pt->driver_lock;
983 	uint16_t status;
984 
985 	bzero(&pt->cpl, sizeof(pt->cpl));
986 	pt->cpl.cdw0 = cpl->cdw0;
987 
988 	status = cpl->status;
989 	status &= ~NVME_STATUS_P_MASK;
990 	pt->cpl.status = status;
991 
992 	mtx_lock(mtx);
993 	pt->driver_lock = NULL;
994 	wakeup(pt);
995 	mtx_unlock(mtx);
996 }
997 
998 int
999 nvme_ctrlr_passthrough_cmd(struct nvme_controller *ctrlr,
1000     struct nvme_pt_command *pt, uint32_t nsid, int is_user_buffer,
1001     int is_admin_cmd)
1002 {
1003 	struct nvme_request	*req;
1004 	struct mtx		*mtx;
1005 	struct buf		*buf = NULL;
1006 	int			ret = 0;
1007 	vm_offset_t		addr, end;
1008 
1009 	if (pt->len > 0) {
1010 		/*
1011 		 * vmapbuf calls vm_fault_quick_hold_pages which only maps full
1012 		 * pages. Ensure this request has fewer than MAXPHYS bytes when
1013 		 * extended to full pages.
1014 		 */
1015 		addr = (vm_offset_t)pt->buf;
1016 		end = round_page(addr + pt->len);
1017 		addr = trunc_page(addr);
1018 		if (end - addr > MAXPHYS)
1019 			return EIO;
1020 
1021 		if (pt->len > ctrlr->max_xfer_size) {
1022 			nvme_printf(ctrlr, "pt->len (%d) "
1023 			    "exceeds max_xfer_size (%d)\n", pt->len,
1024 			    ctrlr->max_xfer_size);
1025 			return EIO;
1026 		}
1027 		if (is_user_buffer) {
1028 			/*
1029 			 * Ensure the user buffer is wired for the duration of
1030 			 *  this passthrough command.
1031 			 */
1032 			PHOLD(curproc);
1033 			buf = getpbuf(NULL);
1034 			buf->b_data = pt->buf;
1035 			buf->b_bufsize = pt->len;
1036 			buf->b_iocmd = pt->is_read ? BIO_READ : BIO_WRITE;
1037 #ifdef NVME_UNMAPPED_BIO_SUPPORT
1038 			if (vmapbuf(buf, 1) < 0) {
1039 #else
1040 			if (vmapbuf(buf) < 0) {
1041 #endif
1042 				ret = EFAULT;
1043 				goto err;
1044 			}
1045 			req = nvme_allocate_request_vaddr(buf->b_data, pt->len,
1046 			    nvme_pt_done, pt);
1047 		} else
1048 			req = nvme_allocate_request_vaddr(pt->buf, pt->len,
1049 			    nvme_pt_done, pt);
1050 	} else
1051 		req = nvme_allocate_request_null(nvme_pt_done, pt);
1052 
1053 	/* Assume userspace already converted to little-endian */
1054 	req->cmd.opc_fuse = pt->cmd.opc_fuse;
1055 	req->cmd.cdw10 = pt->cmd.cdw10;
1056 	req->cmd.cdw11 = pt->cmd.cdw11;
1057 	req->cmd.cdw12 = pt->cmd.cdw12;
1058 	req->cmd.cdw13 = pt->cmd.cdw13;
1059 	req->cmd.cdw14 = pt->cmd.cdw14;
1060 	req->cmd.cdw15 = pt->cmd.cdw15;
1061 
1062 	req->cmd.nsid = htole32(nsid);
1063 
1064 	mtx = mtx_pool_find(mtxpool_sleep, pt);
1065 	pt->driver_lock = mtx;
1066 
1067 	if (is_admin_cmd)
1068 		nvme_ctrlr_submit_admin_request(ctrlr, req);
1069 	else
1070 		nvme_ctrlr_submit_io_request(ctrlr, req);
1071 
1072 	mtx_lock(mtx);
1073 	while (pt->driver_lock != NULL)
1074 		mtx_sleep(pt, mtx, PRIBIO, "nvme_pt", 0);
1075 	mtx_unlock(mtx);
1076 
1077 err:
1078 	if (buf != NULL) {
1079 		relpbuf(buf, NULL);
1080 		PRELE(curproc);
1081 	}
1082 
1083 	return (ret);
1084 }
1085 
1086 static int
1087 nvme_ctrlr_ioctl(struct cdev *cdev, u_long cmd, caddr_t arg, int flag,
1088     struct thread *td)
1089 {
1090 	struct nvme_controller			*ctrlr;
1091 	struct nvme_pt_command			*pt;
1092 
1093 	ctrlr = cdev->si_drv1;
1094 
1095 	switch (cmd) {
1096 	case NVME_RESET_CONTROLLER:
1097 		nvme_ctrlr_reset(ctrlr);
1098 		break;
1099 	case NVME_PASSTHROUGH_CMD:
1100 		pt = (struct nvme_pt_command *)arg;
1101 		return (nvme_ctrlr_passthrough_cmd(ctrlr, pt, le32toh(pt->cmd.nsid),
1102 		    1 /* is_user_buffer */, 1 /* is_admin_cmd */));
1103 	default:
1104 		return (ENOTTY);
1105 	}
1106 
1107 	return (0);
1108 }
1109 
1110 static struct cdevsw nvme_ctrlr_cdevsw = {
1111 	.d_version =	D_VERSION,
1112 	.d_flags =	0,
1113 	.d_ioctl =	nvme_ctrlr_ioctl
1114 };
1115 
1116 static void
1117 nvme_ctrlr_setup_interrupts(struct nvme_controller *ctrlr)
1118 {
1119 	device_t	dev;
1120 	int		per_cpu_io_queues;
1121 	int		min_cpus_per_ioq;
1122 	int		num_vectors_requested, num_vectors_allocated;
1123 	int		num_vectors_available;
1124 
1125 	dev = ctrlr->dev;
1126 	min_cpus_per_ioq = 1;
1127 	TUNABLE_INT_FETCH("hw.nvme.min_cpus_per_ioq", &min_cpus_per_ioq);
1128 
1129 	if (min_cpus_per_ioq < 1) {
1130 		min_cpus_per_ioq = 1;
1131 	} else if (min_cpus_per_ioq > mp_ncpus) {
1132 		min_cpus_per_ioq = mp_ncpus;
1133 	}
1134 
1135 	per_cpu_io_queues = 1;
1136 	TUNABLE_INT_FETCH("hw.nvme.per_cpu_io_queues", &per_cpu_io_queues);
1137 
1138 	if (per_cpu_io_queues == 0) {
1139 		min_cpus_per_ioq = mp_ncpus;
1140 	}
1141 
1142 	ctrlr->force_intx = 0;
1143 	TUNABLE_INT_FETCH("hw.nvme.force_intx", &ctrlr->force_intx);
1144 
1145 	/*
1146 	 * FreeBSD currently cannot allocate more than about 190 vectors at
1147 	 *  boot, meaning that systems with high core count and many devices
1148 	 *  requesting per-CPU interrupt vectors will not get their full
1149 	 *  allotment.  So first, try to allocate as many as we may need to
1150 	 *  understand what is available, then immediately release them.
1151 	 *  Then figure out how many of those we will actually use, based on
1152 	 *  assigning an equal number of cores to each I/O queue.
1153 	 */
1154 
1155 	/* One vector for per core I/O queue, plus one vector for admin queue. */
1156 	num_vectors_available = min(pci_msix_count(dev), mp_ncpus + 1);
1157 	if (pci_alloc_msix(dev, &num_vectors_available) != 0) {
1158 		num_vectors_available = 0;
1159 	}
1160 	pci_release_msi(dev);
1161 
1162 	if (ctrlr->force_intx || num_vectors_available < 2) {
1163 		nvme_ctrlr_configure_intx(ctrlr);
1164 		return;
1165 	}
1166 
1167 	/*
1168 	 * Do not use all vectors for I/O queues - one must be saved for the
1169 	 *  admin queue.
1170 	 */
1171 	ctrlr->num_cpus_per_ioq = max(min_cpus_per_ioq,
1172 	    howmany(mp_ncpus, num_vectors_available - 1));
1173 
1174 	ctrlr->num_io_queues = howmany(mp_ncpus, ctrlr->num_cpus_per_ioq);
1175 	num_vectors_requested = ctrlr->num_io_queues + 1;
1176 	num_vectors_allocated = num_vectors_requested;
1177 
1178 	/*
1179 	 * Now just allocate the number of vectors we need.  This should
1180 	 *  succeed, since we previously called pci_alloc_msix()
1181 	 *  successfully returning at least this many vectors, but just to
1182 	 *  be safe, if something goes wrong just revert to INTx.
1183 	 */
1184 	if (pci_alloc_msix(dev, &num_vectors_allocated) != 0) {
1185 		nvme_ctrlr_configure_intx(ctrlr);
1186 		return;
1187 	}
1188 
1189 	if (num_vectors_allocated < num_vectors_requested) {
1190 		pci_release_msi(dev);
1191 		nvme_ctrlr_configure_intx(ctrlr);
1192 		return;
1193 	}
1194 
1195 	ctrlr->msix_enabled = 1;
1196 }
1197 
1198 int
1199 nvme_ctrlr_construct(struct nvme_controller *ctrlr, device_t dev)
1200 {
1201 	struct make_dev_args	md_args;
1202 	uint32_t	cap_lo;
1203 	uint32_t	cap_hi;
1204 	uint8_t		to;
1205 	uint8_t		dstrd;
1206 	uint8_t		mpsmin;
1207 	int		status, timeout_period;
1208 
1209 	ctrlr->dev = dev;
1210 
1211 	mtx_init(&ctrlr->lock, "nvme ctrlr lock", NULL, MTX_DEF);
1212 
1213 	status = nvme_ctrlr_allocate_bar(ctrlr);
1214 
1215 	if (status != 0)
1216 		return (status);
1217 
1218 	/*
1219 	 * Software emulators may set the doorbell stride to something
1220 	 *  other than zero, but this driver is not set up to handle that.
1221 	 */
1222 	cap_hi = nvme_mmio_read_4(ctrlr, cap_hi);
1223 	dstrd = (cap_hi >> NVME_CAP_HI_REG_DSTRD_SHIFT) & NVME_CAP_HI_REG_DSTRD_MASK;
1224 	if (dstrd != 0)
1225 		return (ENXIO);
1226 
1227 	mpsmin = (cap_hi >> NVME_CAP_HI_REG_MPSMIN_SHIFT) & NVME_CAP_HI_REG_MPSMIN_MASK;
1228 	ctrlr->min_page_size = 1 << (12 + mpsmin);
1229 
1230 	/* Get ready timeout value from controller, in units of 500ms. */
1231 	cap_lo = nvme_mmio_read_4(ctrlr, cap_lo);
1232 	to = (cap_lo >> NVME_CAP_LO_REG_TO_SHIFT) & NVME_CAP_LO_REG_TO_MASK;
1233 	ctrlr->ready_timeout_in_ms = to * 500;
1234 
1235 	timeout_period = NVME_DEFAULT_TIMEOUT_PERIOD;
1236 	TUNABLE_INT_FETCH("hw.nvme.timeout_period", &timeout_period);
1237 	timeout_period = min(timeout_period, NVME_MAX_TIMEOUT_PERIOD);
1238 	timeout_period = max(timeout_period, NVME_MIN_TIMEOUT_PERIOD);
1239 	ctrlr->timeout_period = timeout_period;
1240 
1241 	nvme_retry_count = NVME_DEFAULT_RETRY_COUNT;
1242 	TUNABLE_INT_FETCH("hw.nvme.retry_count", &nvme_retry_count);
1243 
1244 	ctrlr->enable_aborts = 0;
1245 	TUNABLE_INT_FETCH("hw.nvme.enable_aborts", &ctrlr->enable_aborts);
1246 
1247 	nvme_ctrlr_setup_interrupts(ctrlr);
1248 
1249 	ctrlr->max_xfer_size = NVME_MAX_XFER_SIZE;
1250 	if (nvme_ctrlr_construct_admin_qpair(ctrlr) != 0)
1251 		return (ENXIO);
1252 
1253 	ctrlr->taskqueue = taskqueue_create("nvme_taskq", M_WAITOK,
1254 	    taskqueue_thread_enqueue, &ctrlr->taskqueue);
1255 	taskqueue_start_threads(&ctrlr->taskqueue, 1, PI_DISK, "nvme taskq");
1256 
1257 	ctrlr->is_resetting = 0;
1258 	ctrlr->is_initialized = 0;
1259 	ctrlr->notification_sent = 0;
1260 	TASK_INIT(&ctrlr->reset_task, 0, nvme_ctrlr_reset_task, ctrlr);
1261 	TASK_INIT(&ctrlr->fail_req_task, 0, nvme_ctrlr_fail_req_task, ctrlr);
1262 	STAILQ_INIT(&ctrlr->fail_req);
1263 	ctrlr->is_failed = FALSE;
1264 
1265 	make_dev_args_init(&md_args);
1266 	md_args.mda_devsw = &nvme_ctrlr_cdevsw;
1267 	md_args.mda_uid = UID_ROOT;
1268 	md_args.mda_gid = GID_WHEEL;
1269 	md_args.mda_mode = 0600;
1270 	md_args.mda_unit = device_get_unit(dev);
1271 	md_args.mda_si_drv1 = (void *)ctrlr;
1272 	status = make_dev_s(&md_args, &ctrlr->cdev, "nvme%d",
1273 	    device_get_unit(dev));
1274 	if (status != 0)
1275 		return (ENXIO);
1276 
1277 	return (0);
1278 }
1279 
1280 void
1281 nvme_ctrlr_destruct(struct nvme_controller *ctrlr, device_t dev)
1282 {
1283 	int				i;
1284 
1285 	if (ctrlr->resource == NULL)
1286 		goto nores;
1287 
1288 	for (i = 0; i < NVME_MAX_NAMESPACES; i++)
1289 		nvme_ns_destruct(&ctrlr->ns[i]);
1290 
1291 	if (ctrlr->cdev)
1292 		destroy_dev(ctrlr->cdev);
1293 
1294 	for (i = 0; i < ctrlr->num_io_queues; i++) {
1295 		nvme_ctrlr_destroy_qpair(ctrlr, &ctrlr->ioq[i]);
1296 		nvme_io_qpair_destroy(&ctrlr->ioq[i]);
1297 	}
1298 	free(ctrlr->ioq, M_NVME);
1299 
1300 	nvme_admin_qpair_destroy(&ctrlr->adminq);
1301 
1302 	/*
1303 	 *  Notify the controller of a shutdown, even though this is due to
1304 	 *   a driver unload, not a system shutdown (this path is not invoked
1305 	 *   during shutdown).  This ensures the controller receives a
1306 	 *   shutdown notification in case the system is shutdown before
1307 	 *   reloading the driver.
1308 	 */
1309 	nvme_ctrlr_shutdown(ctrlr);
1310 
1311 	nvme_ctrlr_disable(ctrlr);
1312 
1313 	if (ctrlr->taskqueue)
1314 		taskqueue_free(ctrlr->taskqueue);
1315 
1316 	if (ctrlr->tag)
1317 		bus_teardown_intr(ctrlr->dev, ctrlr->res, ctrlr->tag);
1318 
1319 	if (ctrlr->res)
1320 		bus_release_resource(ctrlr->dev, SYS_RES_IRQ,
1321 		    rman_get_rid(ctrlr->res), ctrlr->res);
1322 
1323 	if (ctrlr->msix_enabled)
1324 		pci_release_msi(dev);
1325 
1326 	if (ctrlr->bar4_resource != NULL) {
1327 		bus_release_resource(dev, SYS_RES_MEMORY,
1328 		    ctrlr->bar4_resource_id, ctrlr->bar4_resource);
1329 	}
1330 
1331 	bus_release_resource(dev, SYS_RES_MEMORY,
1332 	    ctrlr->resource_id, ctrlr->resource);
1333 
1334 nores:
1335 	mtx_destroy(&ctrlr->lock);
1336 }
1337 
1338 void
1339 nvme_ctrlr_shutdown(struct nvme_controller *ctrlr)
1340 {
1341 	uint32_t	cc;
1342 	uint32_t	csts;
1343 	int		ticks = 0;
1344 
1345 	cc = nvme_mmio_read_4(ctrlr, cc);
1346 	cc &= ~(NVME_CC_REG_SHN_MASK << NVME_CC_REG_SHN_SHIFT);
1347 	cc |= NVME_SHN_NORMAL << NVME_CC_REG_SHN_SHIFT;
1348 	nvme_mmio_write_4(ctrlr, cc, cc);
1349 
1350 	csts = nvme_mmio_read_4(ctrlr, csts);
1351 	while ((NVME_CSTS_GET_SHST(csts) != NVME_SHST_COMPLETE) && (ticks++ < 5*hz)) {
1352 		pause("nvme shn", 1);
1353 		csts = nvme_mmio_read_4(ctrlr, csts);
1354 	}
1355 	if (NVME_CSTS_GET_SHST(csts) != NVME_SHST_COMPLETE)
1356 		nvme_printf(ctrlr, "did not complete shutdown within 5 seconds "
1357 		    "of notification\n");
1358 }
1359 
1360 void
1361 nvme_ctrlr_submit_admin_request(struct nvme_controller *ctrlr,
1362     struct nvme_request *req)
1363 {
1364 
1365 	nvme_qpair_submit_request(&ctrlr->adminq, req);
1366 }
1367 
1368 void
1369 nvme_ctrlr_submit_io_request(struct nvme_controller *ctrlr,
1370     struct nvme_request *req)
1371 {
1372 	struct nvme_qpair       *qpair;
1373 
1374 	qpair = &ctrlr->ioq[curcpu / ctrlr->num_cpus_per_ioq];
1375 	nvme_qpair_submit_request(qpair, req);
1376 }
1377 
1378 device_t
1379 nvme_ctrlr_get_device(struct nvme_controller *ctrlr)
1380 {
1381 
1382 	return (ctrlr->dev);
1383 }
1384 
1385 const struct nvme_controller_data *
1386 nvme_ctrlr_get_data(struct nvme_controller *ctrlr)
1387 {
1388 
1389 	return (&ctrlr->cdata);
1390 }
1391