xref: /freebsd/sys/dev/nvme/nvme_ctrlr.c (revision 848602856f49c3937b8c6d168fd988263954b43a)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (C) 2012-2016 Intel Corporation
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 #include "opt_nvme.h"
30 
31 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/buf.h>
34 #include <sys/bus.h>
35 #include <sys/conf.h>
36 #include <sys/ioccom.h>
37 #include <sys/proc.h>
38 #include <sys/smp.h>
39 #include <sys/uio.h>
40 #include <sys/sbuf.h>
41 #include <sys/endian.h>
42 #include <sys/stdarg.h>
43 #include <vm/vm.h>
44 #include <vm/vm_page.h>
45 #include <vm/vm_extern.h>
46 #include <vm/vm_map.h>
47 
48 #include "nvme_private.h"
49 #include "nvme_linux.h"
50 
51 #include "nvme_if.h"
52 
53 #define B4_CHK_RDY_DELAY_MS	2300		/* work around controller bug */
54 
55 static void nvme_ctrlr_construct_and_submit_aer(struct nvme_controller *ctrlr,
56     struct nvme_async_event_request *aer);
57 
58 static void
nvme_ctrlr_barrier(struct nvme_controller * ctrlr,int flags)59 nvme_ctrlr_barrier(struct nvme_controller *ctrlr, int flags)
60 {
61 	bus_barrier(ctrlr->resource, 0, rman_get_size(ctrlr->resource), flags);
62 }
63 
64 static void
nvme_ctrlr_devctl_va(struct nvme_controller * ctrlr,const char * type,const char * msg,va_list ap)65 nvme_ctrlr_devctl_va(struct nvme_controller *ctrlr, const char *type,
66     const char *msg, va_list ap)
67 {
68 	struct sbuf sb;
69 	int error;
70 
71 	if (sbuf_new(&sb, NULL, 0, SBUF_AUTOEXTEND | SBUF_NOWAIT) == NULL)
72 		return;
73 	sbuf_printf(&sb, "name=\"%s\" ", device_get_nameunit(ctrlr->dev));
74 	sbuf_vprintf(&sb, msg, ap);
75 	error = sbuf_finish(&sb);
76 	if (error == 0)
77 		devctl_notify("nvme", "controller", type, sbuf_data(&sb));
78 	sbuf_delete(&sb);
79 }
80 
81 static void
nvme_ctrlr_devctl(struct nvme_controller * ctrlr,const char * type,const char * msg,...)82 nvme_ctrlr_devctl(struct nvme_controller *ctrlr, const char *type, const char *msg, ...)
83 {
84 	va_list ap;
85 
86 	va_start(ap, msg);
87 	nvme_ctrlr_devctl_va(ctrlr, type, msg, ap);
88 	va_end(ap);
89 }
90 
91 static void
nvme_ctrlr_devctl_log(struct nvme_controller * ctrlr,const char * type,const char * msg,...)92 nvme_ctrlr_devctl_log(struct nvme_controller *ctrlr, const char *type, const char *msg, ...)
93 {
94 	struct sbuf sb;
95 	va_list ap;
96 	int error;
97 
98 	if (sbuf_new(&sb, NULL, 0, SBUF_AUTOEXTEND | SBUF_NOWAIT) == NULL)
99 		return;
100 	sbuf_printf(&sb, "%s: ", device_get_nameunit(ctrlr->dev));
101 	va_start(ap, msg);
102 	sbuf_vprintf(&sb, msg, ap);
103 	va_end(ap);
104 	error = sbuf_finish(&sb);
105 	if (error == 0)
106 		printf("%s\n", sbuf_data(&sb));
107 	sbuf_delete(&sb);
108 	va_start(ap, msg);
109 	nvme_ctrlr_devctl_va(ctrlr, type, msg, ap);
110 	va_end(ap);
111 }
112 
113 static int
nvme_ctrlr_construct_admin_qpair(struct nvme_controller * ctrlr)114 nvme_ctrlr_construct_admin_qpair(struct nvme_controller *ctrlr)
115 {
116 	struct nvme_qpair	*qpair;
117 	uint32_t		num_entries;
118 	int			error;
119 
120 	qpair = &ctrlr->adminq;
121 	qpair->id = 0;
122 	qpair->cpu = CPU_FFS(&cpuset_domain[ctrlr->domain]) - 1;
123 	qpair->domain = ctrlr->domain;
124 
125 	num_entries = NVME_ADMIN_ENTRIES;
126 	TUNABLE_INT_FETCH("hw.nvme.admin_entries", &num_entries);
127 	/*
128 	 * If admin_entries was overridden to an invalid value, revert it
129 	 *  back to our default value.
130 	 */
131 	if (num_entries < NVME_MIN_ADMIN_ENTRIES ||
132 	    num_entries > NVME_MAX_ADMIN_ENTRIES) {
133 		nvme_printf(ctrlr, "invalid hw.nvme.admin_entries=%d "
134 		    "specified\n", num_entries);
135 		num_entries = NVME_ADMIN_ENTRIES;
136 	}
137 
138 	/*
139 	 * The admin queue's max xfer size is treated differently than the
140 	 *  max I/O xfer size.  16KB is sufficient here - maybe even less?
141 	 */
142 	error = nvme_qpair_construct(qpair, num_entries, NVME_ADMIN_TRACKERS,
143 	     ctrlr);
144 	return (error);
145 }
146 
147 #define QP(ctrlr, c)	((c) * (ctrlr)->num_io_queues / mp_ncpus)
148 
149 static int
nvme_ctrlr_construct_io_qpairs(struct nvme_controller * ctrlr)150 nvme_ctrlr_construct_io_qpairs(struct nvme_controller *ctrlr)
151 {
152 	struct nvme_qpair	*qpair;
153 	uint32_t		cap_lo;
154 	uint16_t		mqes;
155 	int			c, error, i, n;
156 	int			num_entries, num_trackers, max_entries;
157 
158 	/*
159 	 * NVMe spec sets a hard limit of 64K max entries, but devices may
160 	 * specify a smaller limit, so we need to check the MQES field in the
161 	 * capabilities register. We have to cap the number of entries to the
162 	 * current stride allows for in BAR 0/1, otherwise the remainder entries
163 	 * are inaccessible. MQES should reflect this, and this is just a
164 	 * fail-safe.
165 	 */
166 	max_entries =
167 	    (rman_get_size(ctrlr->resource) - nvme_mmio_offsetof(doorbell[0])) /
168 	    (1 << (ctrlr->dstrd + 1));
169 	num_entries = NVME_IO_ENTRIES;
170 	TUNABLE_INT_FETCH("hw.nvme.io_entries", &num_entries);
171 	cap_lo = nvme_mmio_read_4(ctrlr, cap_lo);
172 	mqes = NVME_CAP_LO_MQES(cap_lo);
173 	num_entries = min(num_entries, mqes + 1);
174 	num_entries = min(num_entries, max_entries);
175 
176 	num_trackers = NVME_IO_TRACKERS;
177 	TUNABLE_INT_FETCH("hw.nvme.io_trackers", &num_trackers);
178 
179 	num_trackers = max(num_trackers, NVME_MIN_IO_TRACKERS);
180 	num_trackers = min(num_trackers, NVME_MAX_IO_TRACKERS);
181 	/*
182 	 * No need to have more trackers than entries in the submit queue.  Note
183 	 * also that for a queue size of N, we can only have (N-1) commands
184 	 * outstanding, hence the "-1" here.
185 	 */
186 	num_trackers = min(num_trackers, (num_entries-1));
187 
188 	/*
189 	 * Our best estimate for the maximum number of I/Os that we should
190 	 * normally have in flight at one time. This should be viewed as a hint,
191 	 * not a hard limit and will need to be revisited when the upper layers
192 	 * of the storage system grows multi-queue support.
193 	 */
194 	ctrlr->max_hw_pend_io = num_trackers * ctrlr->num_io_queues * 3 / 4;
195 
196 	ctrlr->ioq = malloc(ctrlr->num_io_queues * sizeof(struct nvme_qpair),
197 	    M_NVME, M_ZERO | M_WAITOK);
198 
199 	for (i = c = n = 0; i < ctrlr->num_io_queues; i++, c += n) {
200 		qpair = &ctrlr->ioq[i];
201 
202 		/*
203 		 * Admin queue has ID=0. IO queues start at ID=1 -
204 		 *  hence the 'i+1' here.
205 		 */
206 		qpair->id = i + 1;
207 		if (ctrlr->num_io_queues > 1) {
208 			/* Find number of CPUs served by this queue. */
209 			for (n = 1; QP(ctrlr, c + n) == i; n++)
210 				;
211 			/* Shuffle multiple NVMe devices between CPUs. */
212 			qpair->cpu = c + (device_get_unit(ctrlr->dev)+n/2) % n;
213 			qpair->domain = pcpu_find(qpair->cpu)->pc_domain;
214 		} else {
215 			qpair->cpu = CPU_FFS(&cpuset_domain[ctrlr->domain]) - 1;
216 			qpair->domain = ctrlr->domain;
217 		}
218 
219 		/*
220 		 * For I/O queues, use the controller-wide max_xfer_size
221 		 *  calculated in nvme_attach().
222 		 */
223 		error = nvme_qpair_construct(qpair, num_entries, num_trackers,
224 		    ctrlr);
225 		if (error)
226 			return (error);
227 
228 		/*
229 		 * Do not bother binding interrupts if we only have one I/O
230 		 *  interrupt thread for this controller.
231 		 */
232 		if (ctrlr->num_io_queues > 1)
233 			bus_bind_intr(ctrlr->dev, qpair->res, qpair->cpu);
234 	}
235 
236 	return (0);
237 }
238 
239 static void
nvme_ctrlr_fail(struct nvme_controller * ctrlr,bool admin_also)240 nvme_ctrlr_fail(struct nvme_controller *ctrlr, bool admin_also)
241 {
242 	int i;
243 
244 	/*
245 	 * No need to disable queues before failing them. Failing is a superet
246 	 * of disabling (though pedantically we'd abort the AERs silently with
247 	 * a different error, though when we fail, that hardly matters).
248 	 */
249 	ctrlr->is_failed = true;
250 	if (admin_also) {
251 		ctrlr->is_failed_admin = true;
252 		nvme_qpair_fail(&ctrlr->adminq);
253 	}
254 	if (ctrlr->ioq != NULL) {
255 		for (i = 0; i < ctrlr->num_io_queues; i++) {
256 			nvme_qpair_fail(&ctrlr->ioq[i]);
257 		}
258 	}
259 	nvme_notify_fail(ctrlr);
260 }
261 
262 /*
263  * Wait for RDY to change.
264  *
265  * Starts sleeping for 1us and geometrically increases it the longer we wait,
266  * capped at 1ms.
267  */
268 static int
nvme_ctrlr_wait_for_ready(struct nvme_controller * ctrlr,int desired_val)269 nvme_ctrlr_wait_for_ready(struct nvme_controller *ctrlr, int desired_val)
270 {
271 	int timeout = ticks + MSEC_2_TICKS(ctrlr->ready_timeout_in_ms);
272 	sbintime_t delta_t = SBT_1US;
273 	uint32_t csts;
274 
275 	while (1) {
276 		csts = nvme_mmio_read_4(ctrlr, csts);
277 		if (csts == NVME_GONE)		/* Hot unplug. */
278 			return (ENXIO);
279 		if (NVMEV(NVME_CSTS_REG_RDY, csts) == desired_val)
280 			break;
281 		if (timeout - ticks < 0) {
282 			nvme_printf(ctrlr, "controller ready did not become %d "
283 			    "within %d ms\n", desired_val, ctrlr->ready_timeout_in_ms);
284 			return (ENXIO);
285 		}
286 
287 		pause_sbt("nvmerdy", delta_t, 0, C_PREL(1));
288 		delta_t = min(SBT_1MS, delta_t * 3 / 2);
289 	}
290 
291 	return (0);
292 }
293 
294 static int
nvme_ctrlr_disable(struct nvme_controller * ctrlr)295 nvme_ctrlr_disable(struct nvme_controller *ctrlr)
296 {
297 	uint32_t cc;
298 	uint32_t csts;
299 	uint8_t  en, rdy;
300 	int err;
301 
302 	cc = nvme_mmio_read_4(ctrlr, cc);
303 	csts = nvme_mmio_read_4(ctrlr, csts);
304 
305 	en = NVMEV(NVME_CC_REG_EN, cc);
306 	rdy = NVMEV(NVME_CSTS_REG_RDY, csts);
307 
308 	/*
309 	 * Per 3.1.5 in NVME 1.3 spec, transitioning CC.EN from 0 to 1
310 	 * when CSTS.RDY is 1 or transitioning CC.EN from 1 to 0 when
311 	 * CSTS.RDY is 0 "has undefined results" So make sure that CSTS.RDY
312 	 * isn't the desired value. Short circuit if we're already disabled.
313 	 */
314 	if (en == 0) {
315 		/* Wait for RDY == 0 or timeout & fail */
316 		if (rdy == 0)
317 			return (0);
318 		return (nvme_ctrlr_wait_for_ready(ctrlr, 0));
319 	}
320 	if (rdy == 0) {
321 		/* EN == 1, wait for  RDY == 1 or timeout & fail */
322 		err = nvme_ctrlr_wait_for_ready(ctrlr, 1);
323 		if (err != 0)
324 			return (err);
325 	}
326 
327 	cc &= ~NVMEM(NVME_CC_REG_EN);
328 	nvme_mmio_write_4(ctrlr, cc, cc);
329 
330 	/*
331 	 * A few drives have firmware bugs that freeze the drive if we access
332 	 * the mmio too soon after we disable.
333 	 */
334 	if (ctrlr->quirks & QUIRK_DELAY_B4_CHK_RDY)
335 		pause("nvmeR", MSEC_2_TICKS(B4_CHK_RDY_DELAY_MS));
336 	return (nvme_ctrlr_wait_for_ready(ctrlr, 0));
337 }
338 
339 static int
nvme_ctrlr_enable(struct nvme_controller * ctrlr)340 nvme_ctrlr_enable(struct nvme_controller *ctrlr)
341 {
342 	uint32_t	cc;
343 	uint32_t	csts;
344 	uint32_t	aqa;
345 	uint32_t	qsize;
346 	uint8_t		en, rdy;
347 	int		err;
348 
349 	cc = nvme_mmio_read_4(ctrlr, cc);
350 	csts = nvme_mmio_read_4(ctrlr, csts);
351 
352 	en = NVMEV(NVME_CC_REG_EN, cc);
353 	rdy = NVMEV(NVME_CSTS_REG_RDY, csts);
354 
355 	/*
356 	 * See note in nvme_ctrlr_disable. Short circuit if we're already enabled.
357 	 */
358 	if (en == 1) {
359 		if (rdy == 1)
360 			return (0);
361 		return (nvme_ctrlr_wait_for_ready(ctrlr, 1));
362 	}
363 
364 	/* EN == 0 already wait for RDY == 0 or timeout & fail */
365 	err = nvme_ctrlr_wait_for_ready(ctrlr, 0);
366 	if (err != 0)
367 		return (err);
368 
369 	nvme_mmio_write_8(ctrlr, asq, ctrlr->adminq.cmd_bus_addr);
370 	nvme_mmio_write_8(ctrlr, acq, ctrlr->adminq.cpl_bus_addr);
371 
372 	/* acqs and asqs are 0-based. */
373 	qsize = ctrlr->adminq.num_entries - 1;
374 
375 	aqa = 0;
376 	aqa |= NVMEF(NVME_AQA_REG_ACQS, qsize);
377 	aqa |= NVMEF(NVME_AQA_REG_ASQS, qsize);
378 	nvme_mmio_write_4(ctrlr, aqa, aqa);
379 
380 	/* Initialization values for CC */
381 	cc = 0;
382 	cc |= NVMEF(NVME_CC_REG_EN, 1);
383 	cc |= NVMEF(NVME_CC_REG_CSS, 0);
384 	cc |= NVMEF(NVME_CC_REG_AMS, 0);
385 	cc |= NVMEF(NVME_CC_REG_SHN, 0);
386 	cc |= NVMEF(NVME_CC_REG_IOSQES, 6); /* SQ entry size == 64 == 2^6 */
387 	cc |= NVMEF(NVME_CC_REG_IOCQES, 4); /* CQ entry size == 16 == 2^4 */
388 
389 	/*
390 	 * Use the Memory Page Size selected during device initialization.  Note
391 	 * that value stored in mps is suitable to use here without adjusting by
392 	 * NVME_MPS_SHIFT.
393 	 */
394 	cc |= NVMEF(NVME_CC_REG_MPS, ctrlr->mps);
395 
396 	nvme_ctrlr_barrier(ctrlr, BUS_SPACE_BARRIER_WRITE);
397 	nvme_mmio_write_4(ctrlr, cc, cc);
398 
399 	return (nvme_ctrlr_wait_for_ready(ctrlr, 1));
400 }
401 
402 static void
nvme_ctrlr_disable_qpairs(struct nvme_controller * ctrlr)403 nvme_ctrlr_disable_qpairs(struct nvme_controller *ctrlr)
404 {
405 	int i;
406 
407 	nvme_admin_qpair_disable(&ctrlr->adminq);
408 	/*
409 	 * I/O queues are not allocated before the initial HW
410 	 *  reset, so do not try to disable them.  Use is_initialized
411 	 *  to determine if this is the initial HW reset.
412 	 */
413 	if (ctrlr->is_initialized) {
414 		for (i = 0; i < ctrlr->num_io_queues; i++)
415 			nvme_io_qpair_disable(&ctrlr->ioq[i]);
416 	}
417 }
418 
419 static int
nvme_ctrlr_hw_reset(struct nvme_controller * ctrlr)420 nvme_ctrlr_hw_reset(struct nvme_controller *ctrlr)
421 {
422 	int err;
423 
424 	TSENTER();
425 
426 	ctrlr->is_failed_admin = true;
427 	nvme_ctrlr_disable_qpairs(ctrlr);
428 
429 	err = nvme_ctrlr_disable(ctrlr);
430 	if (err != 0)
431 		goto out;
432 
433 	err = nvme_ctrlr_enable(ctrlr);
434 out:
435 	if (err == 0)
436 		ctrlr->is_failed_admin = false;
437 
438 	TSEXIT();
439 	return (err);
440 }
441 
442 void
nvme_ctrlr_reset(struct nvme_controller * ctrlr)443 nvme_ctrlr_reset(struct nvme_controller *ctrlr)
444 {
445 	int cmpset;
446 
447 	cmpset = atomic_cmpset_32(&ctrlr->is_resetting, 0, 1);
448 
449 	if (cmpset == 0)
450 		/*
451 		 * Controller is already resetting.  Return immediately since
452 		 * there is no need to kick off another reset.
453 		 */
454 		return;
455 
456 	if (!ctrlr->is_dying)
457 		taskqueue_enqueue(ctrlr->taskqueue, &ctrlr->reset_task);
458 }
459 
460 static int
nvme_ctrlr_identify(struct nvme_controller * ctrlr)461 nvme_ctrlr_identify(struct nvme_controller *ctrlr)
462 {
463 	struct nvme_completion_poll_status	status;
464 
465 	status.done = 0;
466 	nvme_ctrlr_cmd_identify_controller(ctrlr, &ctrlr->cdata,
467 	    nvme_completion_poll_cb, &status);
468 	nvme_completion_poll(&status);
469 	if (nvme_completion_is_error(&status.cpl)) {
470 		nvme_printf(ctrlr, "nvme_identify_controller failed!\n");
471 		return (ENXIO);
472 	}
473 
474 	/* Convert data to host endian */
475 	nvme_controller_data_swapbytes(&ctrlr->cdata);
476 
477 	/*
478 	 * Use MDTS to ensure our default max_xfer_size doesn't exceed what the
479 	 *  controller supports.
480 	 */
481 	if (ctrlr->cdata.mdts > 0)
482 		ctrlr->max_xfer_size = min(ctrlr->max_xfer_size,
483 		    1 << (ctrlr->cdata.mdts + NVME_MPS_SHIFT +
484 			NVME_CAP_HI_MPSMIN(ctrlr->cap_hi)));
485 
486 	return (0);
487 }
488 
489 static int
nvme_ctrlr_set_num_qpairs(struct nvme_controller * ctrlr)490 nvme_ctrlr_set_num_qpairs(struct nvme_controller *ctrlr)
491 {
492 	struct nvme_completion_poll_status	status;
493 	int					cq_allocated, sq_allocated;
494 
495 	status.done = 0;
496 	nvme_ctrlr_cmd_set_num_queues(ctrlr, ctrlr->num_io_queues,
497 	    nvme_completion_poll_cb, &status);
498 	nvme_completion_poll(&status);
499 	if (nvme_completion_is_error(&status.cpl)) {
500 		nvme_printf(ctrlr, "nvme_ctrlr_set_num_qpairs failed!\n");
501 		return (ENXIO);
502 	}
503 
504 	/*
505 	 * Data in cdw0 is 0-based.
506 	 * Lower 16-bits indicate number of submission queues allocated.
507 	 * Upper 16-bits indicate number of completion queues allocated.
508 	 */
509 	sq_allocated = (status.cpl.cdw0 & 0xFFFF) + 1;
510 	cq_allocated = (status.cpl.cdw0 >> 16) + 1;
511 
512 	/*
513 	 * Controller may allocate more queues than we requested,
514 	 *  so use the minimum of the number requested and what was
515 	 *  actually allocated.
516 	 */
517 	ctrlr->num_io_queues = min(ctrlr->num_io_queues, sq_allocated);
518 	ctrlr->num_io_queues = min(ctrlr->num_io_queues, cq_allocated);
519 	if (ctrlr->num_io_queues > vm_ndomains)
520 		ctrlr->num_io_queues -= ctrlr->num_io_queues % vm_ndomains;
521 
522 	return (0);
523 }
524 
525 static int
nvme_ctrlr_create_qpairs(struct nvme_controller * ctrlr)526 nvme_ctrlr_create_qpairs(struct nvme_controller *ctrlr)
527 {
528 	struct nvme_completion_poll_status	status;
529 	struct nvme_qpair			*qpair;
530 	int					i;
531 
532 	for (i = 0; i < ctrlr->num_io_queues; i++) {
533 		qpair = &ctrlr->ioq[i];
534 
535 		status.done = 0;
536 		nvme_ctrlr_cmd_create_io_cq(ctrlr, qpair,
537 		    nvme_completion_poll_cb, &status);
538 		nvme_completion_poll(&status);
539 		if (nvme_completion_is_error(&status.cpl)) {
540 			nvme_printf(ctrlr, "nvme_create_io_cq failed!\n");
541 			return (ENXIO);
542 		}
543 
544 		status.done = 0;
545 		nvme_ctrlr_cmd_create_io_sq(ctrlr, qpair,
546 		    nvme_completion_poll_cb, &status);
547 		nvme_completion_poll(&status);
548 		if (nvme_completion_is_error(&status.cpl)) {
549 			nvme_printf(ctrlr, "nvme_create_io_sq failed!\n");
550 			return (ENXIO);
551 		}
552 	}
553 
554 	return (0);
555 }
556 
557 static int
nvme_ctrlr_delete_qpairs(struct nvme_controller * ctrlr)558 nvme_ctrlr_delete_qpairs(struct nvme_controller *ctrlr)
559 {
560 	struct nvme_completion_poll_status	status;
561 	struct nvme_qpair			*qpair;
562 
563 	for (int i = 0; i < ctrlr->num_io_queues; i++) {
564 		qpair = &ctrlr->ioq[i];
565 
566 		status.done = 0;
567 		nvme_ctrlr_cmd_delete_io_sq(ctrlr, qpair,
568 		    nvme_completion_poll_cb, &status);
569 		nvme_completion_poll(&status);
570 		if (nvme_completion_is_error(&status.cpl)) {
571 			nvme_printf(ctrlr, "nvme_destroy_io_sq failed!\n");
572 			return (ENXIO);
573 		}
574 
575 		status.done = 0;
576 		nvme_ctrlr_cmd_delete_io_cq(ctrlr, qpair,
577 		    nvme_completion_poll_cb, &status);
578 		nvme_completion_poll(&status);
579 		if (nvme_completion_is_error(&status.cpl)) {
580 			nvme_printf(ctrlr, "nvme_destroy_io_cq failed!\n");
581 			return (ENXIO);
582 		}
583 	}
584 
585 	return (0);
586 }
587 
588 static int
nvme_ctrlr_construct_namespaces(struct nvme_controller * ctrlr)589 nvme_ctrlr_construct_namespaces(struct nvme_controller *ctrlr)
590 {
591 	struct nvme_namespace	*ns;
592 	uint32_t 		i;
593 
594 	for (i = 0; i < min(ctrlr->cdata.nn, NVME_MAX_NAMESPACES); i++) {
595 		ns = &ctrlr->ns[i];
596 		nvme_ns_construct(ns, i+1, ctrlr);
597 	}
598 
599 	return (0);
600 }
601 
602 static bool
is_log_page_id_valid(uint8_t page_id)603 is_log_page_id_valid(uint8_t page_id)
604 {
605 	switch (page_id) {
606 	case NVME_LOG_ERROR:
607 	case NVME_LOG_HEALTH_INFORMATION:
608 	case NVME_LOG_FIRMWARE_SLOT:
609 	case NVME_LOG_CHANGED_NAMESPACE:
610 	case NVME_LOG_COMMAND_EFFECT:
611 	case NVME_LOG_RES_NOTIFICATION:
612 	case NVME_LOG_SANITIZE_STATUS:
613 		return (true);
614 	}
615 
616 	return (false);
617 }
618 
619 static uint32_t
nvme_ctrlr_get_log_page_size(struct nvme_controller * ctrlr,uint8_t page_id)620 nvme_ctrlr_get_log_page_size(struct nvme_controller *ctrlr, uint8_t page_id)
621 {
622 	uint32_t	log_page_size;
623 
624 	switch (page_id) {
625 	case NVME_LOG_ERROR:
626 		log_page_size = min(
627 		    sizeof(struct nvme_error_information_entry) *
628 		    (ctrlr->cdata.elpe + 1), NVME_MAX_AER_LOG_SIZE);
629 		break;
630 	case NVME_LOG_HEALTH_INFORMATION:
631 		log_page_size = sizeof(struct nvme_health_information_page);
632 		break;
633 	case NVME_LOG_FIRMWARE_SLOT:
634 		log_page_size = sizeof(struct nvme_firmware_page);
635 		break;
636 	case NVME_LOG_CHANGED_NAMESPACE:
637 		log_page_size = sizeof(struct nvme_ns_list);
638 		break;
639 	case NVME_LOG_COMMAND_EFFECT:
640 		log_page_size = sizeof(struct nvme_command_effects_page);
641 		break;
642 	case NVME_LOG_RES_NOTIFICATION:
643 		log_page_size = sizeof(struct nvme_res_notification_page);
644 		break;
645 	case NVME_LOG_SANITIZE_STATUS:
646 		log_page_size = sizeof(struct nvme_sanitize_status_page);
647 		break;
648 	default:
649 		log_page_size = 0;
650 		break;
651 	}
652 
653 	return (log_page_size);
654 }
655 
656 static void
nvme_ctrlr_log_critical_warnings(struct nvme_controller * ctrlr,uint8_t state)657 nvme_ctrlr_log_critical_warnings(struct nvme_controller *ctrlr,
658     uint8_t state)
659 {
660 	if (state & NVME_CRIT_WARN_ST_AVAILABLE_SPARE)
661 		nvme_printf(ctrlr, "SMART WARNING: available spare space below threshold\n");
662 
663 	if (state & NVME_CRIT_WARN_ST_TEMPERATURE)
664 		nvme_printf(ctrlr, "SMART WARNING: temperature above threshold\n");
665 
666 	if (state & NVME_CRIT_WARN_ST_DEVICE_RELIABILITY)
667 		nvme_printf(ctrlr, "SMART WARNING: device reliability degraded\n");
668 
669 	if (state & NVME_CRIT_WARN_ST_READ_ONLY)
670 		nvme_printf(ctrlr, "SMART WARNING: media placed in read only mode\n");
671 
672 	if (state & NVME_CRIT_WARN_ST_VOLATILE_MEMORY_BACKUP)
673 		nvme_printf(ctrlr, "SMART WARNING: volatile memory backup device failed\n");
674 
675 	if (state & NVME_CRIT_WARN_ST_PERSISTENT_MEMORY_REGION)
676 		nvme_printf(ctrlr, "SMART WARNING: persistent memory read only or unreliable\n");
677 
678 	if (state & NVME_CRIT_WARN_ST_RESERVED_MASK)
679 		nvme_printf(ctrlr, "SMART WARNING: unknown critical warning(s): state = 0x%02x\n",
680 		    state & NVME_CRIT_WARN_ST_RESERVED_MASK);
681 
682 	nvme_ctrlr_devctl(ctrlr, "SMART_ERROR", "state=0x%02x", state);
683 }
684 
685 static void
nvme_ctrlr_async_event_cb(void * arg,const struct nvme_completion * cpl)686 nvme_ctrlr_async_event_cb(void *arg, const struct nvme_completion *cpl)
687 {
688 	struct nvme_async_event_request	*aer = arg;
689 
690 	if (nvme_completion_is_error(cpl)) {
691 		/*
692 		 *  Do not retry failed async event requests.  This avoids
693 		 *  infinite loops where a new async event request is submitted
694 		 *  to replace the one just failed, only to fail again and
695 		 *  perpetuate the loop.
696 		 */
697 		return;
698 	}
699 
700 	/*
701 	 * Save the completion status and associated log page is in bits 23:16
702 	 * of completion entry dw0. Print a message and queue it for further
703 	 * processing.
704 	 */
705 	memcpy(&aer->cpl, cpl, sizeof(*cpl));
706 	aer->log_page_id = NVMEV(NVME_ASYNC_EVENT_LOG_PAGE_ID, cpl->cdw0);
707 	nvme_printf(aer->ctrlr, "async event occurred (type 0x%x, info 0x%02x,"
708 	    " page 0x%02x)\n", NVMEV(NVME_ASYNC_EVENT_TYPE, cpl->cdw0),
709 	    NVMEV(NVME_ASYNC_EVENT_INFO, cpl->cdw0),
710 	    aer->log_page_id);
711 	taskqueue_enqueue(aer->ctrlr->taskqueue, &aer->task);
712 }
713 
714 static void
nvme_ctrlr_construct_and_submit_aer(struct nvme_controller * ctrlr,struct nvme_async_event_request * aer)715 nvme_ctrlr_construct_and_submit_aer(struct nvme_controller *ctrlr,
716     struct nvme_async_event_request *aer)
717 {
718 	struct nvme_request *req;
719 
720 	/*
721 	 * We're racing the reset thread, so let that process submit this again.
722 	 * XXX does this really solve that race? And is that race even possible
723 	 * since we only reset when we've no theard from the card in a long
724 	 * time. Why would we get an AER in the middle of that just before we
725 	 * kick off the reset?
726 	 */
727 	if (ctrlr->is_resetting)
728 		return;
729 
730 	aer->ctrlr = ctrlr;
731 	req = nvme_allocate_request_null(M_WAITOK, nvme_ctrlr_async_event_cb,
732 	    aer);
733 	aer->req = req;
734 	aer->log_page_id = 0;		/* Not a valid page */
735 
736 	/*
737 	 * Disable timeout here, since asynchronous event requests should by
738 	 *  nature never be timed out.
739 	 */
740 	req->timeout = false;
741 	req->cmd.opc = NVME_OPC_ASYNC_EVENT_REQUEST;
742 	nvme_ctrlr_submit_admin_request(ctrlr, req);
743 }
744 
745 static void
nvme_ctrlr_configure_aer(struct nvme_controller * ctrlr)746 nvme_ctrlr_configure_aer(struct nvme_controller *ctrlr)
747 {
748 	struct nvme_completion_poll_status	status;
749 	struct nvme_async_event_request		*aer;
750 	uint32_t				i;
751 
752 	ctrlr->async_event_config = NVME_CRIT_WARN_ST_AVAILABLE_SPARE |
753 	    NVME_CRIT_WARN_ST_DEVICE_RELIABILITY |
754 	    NVME_CRIT_WARN_ST_READ_ONLY |
755 	    NVME_CRIT_WARN_ST_VOLATILE_MEMORY_BACKUP;
756 	if (ctrlr->cdata.ver >= NVME_REV(1, 2))
757 		ctrlr->async_event_config |=
758 		    ctrlr->cdata.oaes & (NVME_ASYNC_EVENT_NS_ATTRIBUTE |
759 			NVME_ASYNC_EVENT_FW_ACTIVATE);
760 
761 	status.done = 0;
762 	nvme_ctrlr_cmd_get_feature(ctrlr, NVME_FEAT_TEMPERATURE_THRESHOLD,
763 	    0, NULL, 0, nvme_completion_poll_cb, &status);
764 	nvme_completion_poll(&status);
765 	if (nvme_completion_is_error(&status.cpl) ||
766 	    (status.cpl.cdw0 & 0xFFFF) == 0xFFFF ||
767 	    (status.cpl.cdw0 & 0xFFFF) == 0x0000) {
768 		nvme_printf(ctrlr, "temperature threshold not supported\n");
769 	} else
770 		ctrlr->async_event_config |= NVME_CRIT_WARN_ST_TEMPERATURE;
771 
772 	nvme_ctrlr_cmd_set_async_event_config(ctrlr,
773 	    ctrlr->async_event_config, NULL, NULL);
774 
775 	/* aerl is a zero-based value, so we need to add 1 here. */
776 	ctrlr->num_aers = min(NVME_MAX_ASYNC_EVENTS, (ctrlr->cdata.aerl+1));
777 
778 	for (i = 0; i < ctrlr->num_aers; i++) {
779 		aer = &ctrlr->aer[i];
780 		nvme_ctrlr_construct_and_submit_aer(ctrlr, aer);
781 	}
782 }
783 
784 static void
nvme_ctrlr_configure_int_coalescing(struct nvme_controller * ctrlr)785 nvme_ctrlr_configure_int_coalescing(struct nvme_controller *ctrlr)
786 {
787 	ctrlr->int_coal_time = 0;
788 	TUNABLE_INT_FETCH("hw.nvme.int_coal_time",
789 	    &ctrlr->int_coal_time);
790 
791 	ctrlr->int_coal_threshold = 0;
792 	TUNABLE_INT_FETCH("hw.nvme.int_coal_threshold",
793 	    &ctrlr->int_coal_threshold);
794 
795 	nvme_ctrlr_cmd_set_interrupt_coalescing(ctrlr, ctrlr->int_coal_time,
796 	    ctrlr->int_coal_threshold, NULL, NULL);
797 }
798 
799 static void
nvme_ctrlr_hmb_free(struct nvme_controller * ctrlr)800 nvme_ctrlr_hmb_free(struct nvme_controller *ctrlr)
801 {
802 	struct nvme_hmb_chunk *hmbc;
803 	int i;
804 
805 	if (ctrlr->hmb_desc_paddr) {
806 		bus_dmamap_unload(ctrlr->hmb_desc_tag, ctrlr->hmb_desc_map);
807 		bus_dmamem_free(ctrlr->hmb_desc_tag, ctrlr->hmb_desc_vaddr,
808 		    ctrlr->hmb_desc_map);
809 		ctrlr->hmb_desc_paddr = 0;
810 	}
811 	if (ctrlr->hmb_desc_tag) {
812 		bus_dma_tag_destroy(ctrlr->hmb_desc_tag);
813 		ctrlr->hmb_desc_tag = NULL;
814 	}
815 	for (i = 0; i < ctrlr->hmb_nchunks; i++) {
816 		hmbc = &ctrlr->hmb_chunks[i];
817 		bus_dmamap_unload(ctrlr->hmb_tag, hmbc->hmbc_map);
818 		bus_dmamem_free(ctrlr->hmb_tag, hmbc->hmbc_vaddr,
819 		    hmbc->hmbc_map);
820 	}
821 	ctrlr->hmb_nchunks = 0;
822 	if (ctrlr->hmb_tag) {
823 		bus_dma_tag_destroy(ctrlr->hmb_tag);
824 		ctrlr->hmb_tag = NULL;
825 	}
826 	if (ctrlr->hmb_chunks) {
827 		free(ctrlr->hmb_chunks, M_NVME);
828 		ctrlr->hmb_chunks = NULL;
829 	}
830 }
831 
832 static void
nvme_ctrlr_hmb_alloc(struct nvme_controller * ctrlr)833 nvme_ctrlr_hmb_alloc(struct nvme_controller *ctrlr)
834 {
835 	struct nvme_hmb_chunk *hmbc;
836 	size_t pref, min, minc, size;
837 	int err, i;
838 	uint64_t max;
839 
840 	/* Limit HMB to 5% of RAM size per device by default. */
841 	max = (uint64_t)physmem * PAGE_SIZE / 20;
842 	TUNABLE_UINT64_FETCH("hw.nvme.hmb_max", &max);
843 
844 	/*
845 	 * Units of Host Memory Buffer in the Identify info are always in terms
846 	 * of 4k units.
847 	 */
848 	min = (long long unsigned)ctrlr->cdata.hmmin * NVME_HMB_UNITS;
849 	if (max == 0 || max < min)
850 		return;
851 	pref = MIN((long long unsigned)ctrlr->cdata.hmpre * NVME_HMB_UNITS, max);
852 	minc = MAX(ctrlr->cdata.hmminds * NVME_HMB_UNITS, ctrlr->page_size);
853 	if (min > 0 && ctrlr->cdata.hmmaxd > 0)
854 		minc = MAX(minc, min / ctrlr->cdata.hmmaxd);
855 	ctrlr->hmb_chunk = pref;
856 
857 again:
858 	/*
859 	 * However, the chunk sizes, number of chunks, and alignment of chunks
860 	 * are all based on the current MPS (ctrlr->page_size).
861 	 */
862 	ctrlr->hmb_chunk = roundup2(ctrlr->hmb_chunk, ctrlr->page_size);
863 	ctrlr->hmb_nchunks = howmany(pref, ctrlr->hmb_chunk);
864 	if (ctrlr->cdata.hmmaxd > 0 && ctrlr->hmb_nchunks > ctrlr->cdata.hmmaxd)
865 		ctrlr->hmb_nchunks = ctrlr->cdata.hmmaxd;
866 	ctrlr->hmb_chunks = malloc(sizeof(struct nvme_hmb_chunk) *
867 	    ctrlr->hmb_nchunks, M_NVME, M_WAITOK);
868 	err = bus_dma_tag_create(bus_get_dma_tag(ctrlr->dev),
869 	    ctrlr->page_size, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
870 	    ctrlr->hmb_chunk, 1, ctrlr->hmb_chunk, 0, NULL, NULL, &ctrlr->hmb_tag);
871 	if (err != 0) {
872 		nvme_printf(ctrlr, "HMB tag create failed %d\n", err);
873 		nvme_ctrlr_hmb_free(ctrlr);
874 		return;
875 	}
876 
877 	for (i = 0; i < ctrlr->hmb_nchunks; i++) {
878 		hmbc = &ctrlr->hmb_chunks[i];
879 		if (bus_dmamem_alloc(ctrlr->hmb_tag,
880 		    (void **)&hmbc->hmbc_vaddr, BUS_DMA_NOWAIT,
881 		    &hmbc->hmbc_map)) {
882 			nvme_printf(ctrlr, "failed to alloc HMB\n");
883 			break;
884 		}
885 		if (bus_dmamap_load(ctrlr->hmb_tag, hmbc->hmbc_map,
886 		    hmbc->hmbc_vaddr, ctrlr->hmb_chunk, nvme_single_map,
887 		    &hmbc->hmbc_paddr, BUS_DMA_NOWAIT) != 0) {
888 			bus_dmamem_free(ctrlr->hmb_tag, hmbc->hmbc_vaddr,
889 			    hmbc->hmbc_map);
890 			nvme_printf(ctrlr, "failed to load HMB\n");
891 			break;
892 		}
893 		bus_dmamap_sync(ctrlr->hmb_tag, hmbc->hmbc_map,
894 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
895 	}
896 
897 	if (i < ctrlr->hmb_nchunks && i * ctrlr->hmb_chunk < min &&
898 	    ctrlr->hmb_chunk / 2 >= minc) {
899 		ctrlr->hmb_nchunks = i;
900 		nvme_ctrlr_hmb_free(ctrlr);
901 		ctrlr->hmb_chunk /= 2;
902 		goto again;
903 	}
904 	ctrlr->hmb_nchunks = i;
905 	if (ctrlr->hmb_nchunks * ctrlr->hmb_chunk < min) {
906 		nvme_ctrlr_hmb_free(ctrlr);
907 		return;
908 	}
909 
910 	size = sizeof(struct nvme_hmb_desc) * ctrlr->hmb_nchunks;
911 	err = bus_dma_tag_create(bus_get_dma_tag(ctrlr->dev),
912 	    PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
913 	    size, 1, size, 0, NULL, NULL, &ctrlr->hmb_desc_tag);
914 	if (err != 0) {
915 		nvme_printf(ctrlr, "HMB desc tag create failed %d\n", err);
916 		nvme_ctrlr_hmb_free(ctrlr);
917 		return;
918 	}
919 	if (bus_dmamem_alloc(ctrlr->hmb_desc_tag,
920 	    (void **)&ctrlr->hmb_desc_vaddr, BUS_DMA_WAITOK,
921 	    &ctrlr->hmb_desc_map)) {
922 		nvme_printf(ctrlr, "failed to alloc HMB desc\n");
923 		nvme_ctrlr_hmb_free(ctrlr);
924 		return;
925 	}
926 	if (bus_dmamap_load(ctrlr->hmb_desc_tag, ctrlr->hmb_desc_map,
927 	    ctrlr->hmb_desc_vaddr, size, nvme_single_map,
928 	    &ctrlr->hmb_desc_paddr, BUS_DMA_NOWAIT) != 0) {
929 		bus_dmamem_free(ctrlr->hmb_desc_tag, ctrlr->hmb_desc_vaddr,
930 		    ctrlr->hmb_desc_map);
931 		nvme_printf(ctrlr, "failed to load HMB desc\n");
932 		nvme_ctrlr_hmb_free(ctrlr);
933 		return;
934 	}
935 
936 	for (i = 0; i < ctrlr->hmb_nchunks; i++) {
937 		memset(&ctrlr->hmb_desc_vaddr[i], 0,
938 		    sizeof(struct nvme_hmb_desc));
939 		ctrlr->hmb_desc_vaddr[i].addr =
940 		    htole64(ctrlr->hmb_chunks[i].hmbc_paddr);
941 		ctrlr->hmb_desc_vaddr[i].size = htole32(ctrlr->hmb_chunk / ctrlr->page_size);
942 	}
943 	bus_dmamap_sync(ctrlr->hmb_desc_tag, ctrlr->hmb_desc_map,
944 	    BUS_DMASYNC_PREWRITE);
945 
946 	nvme_printf(ctrlr, "Allocated %lluMB host memory buffer\n",
947 	    (long long unsigned)ctrlr->hmb_nchunks * ctrlr->hmb_chunk
948 	    / 1024 / 1024);
949 }
950 
951 static void
nvme_ctrlr_hmb_enable(struct nvme_controller * ctrlr,bool enable,bool memret)952 nvme_ctrlr_hmb_enable(struct nvme_controller *ctrlr, bool enable, bool memret)
953 {
954 	struct nvme_completion_poll_status	status;
955 	uint32_t cdw11;
956 
957 	cdw11 = 0;
958 	if (enable)
959 		cdw11 |= 1;
960 	if (memret)
961 		cdw11 |= 2;
962 	status.done = 0;
963 	nvme_ctrlr_cmd_set_feature(ctrlr, NVME_FEAT_HOST_MEMORY_BUFFER, cdw11,
964 	    ctrlr->hmb_nchunks * ctrlr->hmb_chunk / ctrlr->page_size,
965 	    ctrlr->hmb_desc_paddr, ctrlr->hmb_desc_paddr >> 32,
966 	    ctrlr->hmb_nchunks, NULL, 0,
967 	    nvme_completion_poll_cb, &status);
968 	nvme_completion_poll(&status);
969 	if (nvme_completion_is_error(&status.cpl))
970 		nvme_printf(ctrlr, "nvme_ctrlr_hmb_enable failed!\n");
971 }
972 
973 static void
nvme_ctrlr_start(void * ctrlr_arg,bool resetting)974 nvme_ctrlr_start(void *ctrlr_arg, bool resetting)
975 {
976 	struct nvme_controller *ctrlr = ctrlr_arg;
977 	uint32_t old_num_io_queues;
978 	int i;
979 
980 	TSENTER();
981 
982 	/*
983 	 * Only reset adminq here when we are restarting the
984 	 *  controller after a reset.  During initialization,
985 	 *  we have already submitted admin commands to get
986 	 *  the number of I/O queues supported, so cannot reset
987 	 *  the adminq again here.
988 	 */
989 	if (resetting) {
990 		nvme_qpair_reset(&ctrlr->adminq);
991 		nvme_admin_qpair_enable(&ctrlr->adminq);
992 	}
993 
994 	if (ctrlr->ioq != NULL) {
995 		for (i = 0; i < ctrlr->num_io_queues; i++)
996 			nvme_qpair_reset(&ctrlr->ioq[i]);
997 	}
998 
999 	/*
1000 	 * If it was a reset on initialization command timeout, just
1001 	 * return here, letting initialization code fail gracefully.
1002 	 */
1003 	if (resetting && !ctrlr->is_initialized)
1004 		return;
1005 
1006 	if (resetting && nvme_ctrlr_identify(ctrlr) != 0) {
1007 		nvme_ctrlr_fail(ctrlr, false);
1008 		return;
1009 	}
1010 
1011 	/*
1012 	 * The number of qpairs are determined during controller initialization,
1013 	 *  including using NVMe SET_FEATURES/NUMBER_OF_QUEUES to determine the
1014 	 *  HW limit.  We call SET_FEATURES again here so that it gets called
1015 	 *  after any reset for controllers that depend on the driver to
1016 	 *  explicit specify how many queues it will use.  This value should
1017 	 *  never change between resets, so panic if somehow that does happen.
1018 	 */
1019 	if (resetting) {
1020 		old_num_io_queues = ctrlr->num_io_queues;
1021 		if (nvme_ctrlr_set_num_qpairs(ctrlr) != 0) {
1022 			nvme_ctrlr_fail(ctrlr, false);
1023 			return;
1024 		}
1025 
1026 		if (old_num_io_queues != ctrlr->num_io_queues) {
1027 			panic("num_io_queues changed from %u to %u",
1028 			      old_num_io_queues, ctrlr->num_io_queues);
1029 		}
1030 	}
1031 
1032 	if (ctrlr->cdata.hmpre > 0 && ctrlr->hmb_nchunks == 0) {
1033 		nvme_ctrlr_hmb_alloc(ctrlr);
1034 		if (ctrlr->hmb_nchunks > 0)
1035 			nvme_ctrlr_hmb_enable(ctrlr, true, false);
1036 	} else if (ctrlr->hmb_nchunks > 0)
1037 		nvme_ctrlr_hmb_enable(ctrlr, true, true);
1038 
1039 	if (nvme_ctrlr_create_qpairs(ctrlr) != 0) {
1040 		nvme_ctrlr_fail(ctrlr, false);
1041 		return;
1042 	}
1043 
1044 	if (nvme_ctrlr_construct_namespaces(ctrlr) != 0) {
1045 		nvme_ctrlr_fail(ctrlr, false);
1046 		return;
1047 	}
1048 
1049 	nvme_ctrlr_configure_aer(ctrlr);
1050 	nvme_ctrlr_configure_int_coalescing(ctrlr);
1051 
1052 	for (i = 0; i < ctrlr->num_io_queues; i++)
1053 		nvme_io_qpair_enable(&ctrlr->ioq[i]);
1054 	TSEXIT();
1055 }
1056 
1057 void
nvme_ctrlr_start_config_hook(void * arg)1058 nvme_ctrlr_start_config_hook(void *arg)
1059 {
1060 	struct nvme_controller *ctrlr = arg;
1061 
1062 	TSENTER();
1063 
1064 	if (nvme_ctrlr_hw_reset(ctrlr) != 0 || ctrlr->fail_on_reset != 0) {
1065 		nvme_ctrlr_fail(ctrlr, true);
1066 		config_intrhook_disestablish(&ctrlr->config_hook);
1067 		return;
1068 	}
1069 
1070 	nvme_qpair_reset(&ctrlr->adminq);
1071 	nvme_admin_qpair_enable(&ctrlr->adminq);
1072 
1073 	if (nvme_ctrlr_identify(ctrlr) == 0 &&
1074 	    nvme_ctrlr_set_num_qpairs(ctrlr) == 0 &&
1075 	    nvme_ctrlr_construct_io_qpairs(ctrlr) == 0)
1076 		nvme_ctrlr_start(ctrlr, false);
1077 	else
1078 		nvme_ctrlr_fail(ctrlr, false);
1079 
1080 	nvme_sysctl_initialize_ctrlr(ctrlr);
1081 	config_intrhook_disestablish(&ctrlr->config_hook);
1082 
1083 	if (!ctrlr->is_failed) {
1084 		device_t child;
1085 
1086 		ctrlr->is_initialized = true;
1087 		child = device_add_child(ctrlr->dev, NULL, DEVICE_UNIT_ANY);
1088 		device_set_ivars(child, ctrlr);
1089 		bus_attach_children(ctrlr->dev);
1090 
1091 		/*
1092 		 * Now notify the child of all the known namepsaces
1093 		 */
1094 		for (int i = 0; i < min(ctrlr->cdata.nn, NVME_MAX_NAMESPACES); i++) {
1095 			struct nvme_namespace	*ns = &ctrlr->ns[i];
1096 
1097 			if (ns->data.nsze == 0)
1098 				continue;
1099 			NVME_NS_ADDED(child, ns);
1100 		}
1101 	}
1102 	TSEXIT();
1103 }
1104 
1105 static void
nvme_ctrlr_reset_task(void * arg,int pending)1106 nvme_ctrlr_reset_task(void *arg, int pending)
1107 {
1108 	struct nvme_controller	*ctrlr = arg;
1109 	int			status;
1110 
1111 	nvme_ctrlr_devctl_log(ctrlr, "RESET", "event=\"start\"");
1112 	status = nvme_ctrlr_hw_reset(ctrlr);
1113 	if (status == 0) {
1114 		nvme_ctrlr_devctl_log(ctrlr, "RESET", "event=\"success\"");
1115 		nvme_ctrlr_start(ctrlr, true);
1116 	} else {
1117 		nvme_ctrlr_devctl_log(ctrlr, "RESET", "event=\"timed_out\"");
1118 		nvme_ctrlr_fail(ctrlr, true);
1119 	}
1120 
1121 	atomic_cmpset_32(&ctrlr->is_resetting, 1, 0);
1122 }
1123 
1124 static void
nvme_ctrlr_aer_done(void * arg,const struct nvme_completion * cpl)1125 nvme_ctrlr_aer_done(void *arg,  const struct nvme_completion *cpl)
1126 {
1127 	struct nvme_async_event_request	*aer = arg;
1128 
1129 	mtx_lock(&aer->mtx);
1130 	if (nvme_completion_is_error(cpl))
1131 		aer->log_page_size = (uint32_t)-1;
1132 	else
1133 		aer->log_page_size = nvme_ctrlr_get_log_page_size(
1134 		    aer->ctrlr, aer->log_page_id);
1135 	wakeup(aer);
1136 	mtx_unlock(&aer->mtx);
1137 }
1138 
1139 static void
nvme_ctrlr_aer_task(void * arg,int pending)1140 nvme_ctrlr_aer_task(void *arg, int pending)
1141 {
1142 	struct nvme_async_event_request	*aer = arg;
1143 	struct nvme_controller	*ctrlr = aer->ctrlr;
1144 	uint32_t len;
1145 
1146 	/*
1147 	 * We're resetting, so just punt.
1148 	 */
1149 	if (ctrlr->is_resetting)
1150 		return;
1151 
1152 	if (!is_log_page_id_valid(aer->log_page_id)) {
1153 		/*
1154 		 * Repost another asynchronous event request to replace the one
1155 		 * that just completed.
1156 		 */
1157 		nvme_notify_async(ctrlr, &aer->cpl, aer->log_page_id, NULL, 0);
1158 		nvme_ctrlr_construct_and_submit_aer(ctrlr, aer);
1159 		goto out;
1160 	}
1161 
1162 	nvme_ctrlr_devctl(ctrlr, "aen", "type=0x%x info=0x%x page=0x%x",
1163 	    NVMEV(NVME_ASYNC_EVENT_TYPE, aer->cpl.cdw0),
1164 	    NVMEV(NVME_ASYNC_EVENT_INFO, aer->cpl.cdw0), aer->log_page_id);
1165 
1166 	aer->log_page_size = 0;
1167 	len = nvme_ctrlr_get_log_page_size(aer->ctrlr, aer->log_page_id);
1168 	nvme_ctrlr_cmd_get_log_page(aer->ctrlr, aer->log_page_id,
1169 	    NVME_GLOBAL_NAMESPACE_TAG, aer->log_page_buffer, len,
1170 	    nvme_ctrlr_aer_done, aer);
1171 	mtx_lock(&aer->mtx);
1172 	while (aer->log_page_size == 0)
1173 		mtx_sleep(aer, &aer->mtx, PRIBIO, "nvme_pt", 0);
1174 	mtx_unlock(&aer->mtx);
1175 
1176 	if (aer->log_page_size == (uint32_t)-1) {
1177 		/*
1178 		 * If the log page fetch for some reason completed with an
1179 		 * error, don't pass log page data to the consumers.  In
1180 		 * practice, this case should never happen.
1181 		 */
1182 		nvme_notify_async(aer->ctrlr, &aer->cpl, aer->log_page_id,
1183 		    NULL, 0);
1184 		goto out;
1185 	}
1186 
1187 	/* Convert data to host endian */
1188 	switch (aer->log_page_id) {
1189 	case NVME_LOG_ERROR: {
1190 		struct nvme_error_information_entry *err =
1191 		    (struct nvme_error_information_entry *)aer->log_page_buffer;
1192 		for (int i = 0; i < (aer->ctrlr->cdata.elpe + 1); i++)
1193 			nvme_error_information_entry_swapbytes(err++);
1194 		break;
1195 	}
1196 	case NVME_LOG_HEALTH_INFORMATION:
1197 		nvme_health_information_page_swapbytes(
1198 			(struct nvme_health_information_page *)aer->log_page_buffer);
1199 		break;
1200 	case NVME_LOG_CHANGED_NAMESPACE:
1201 		nvme_ns_list_swapbytes(
1202 			(struct nvme_ns_list *)aer->log_page_buffer);
1203 		break;
1204 	case NVME_LOG_COMMAND_EFFECT:
1205 		nvme_command_effects_page_swapbytes(
1206 			(struct nvme_command_effects_page *)aer->log_page_buffer);
1207 		break;
1208 	case NVME_LOG_RES_NOTIFICATION:
1209 		nvme_res_notification_page_swapbytes(
1210 			(struct nvme_res_notification_page *)aer->log_page_buffer);
1211 		break;
1212 	case NVME_LOG_SANITIZE_STATUS:
1213 		nvme_sanitize_status_page_swapbytes(
1214 			(struct nvme_sanitize_status_page *)aer->log_page_buffer);
1215 		break;
1216 	default:
1217 		break;
1218 	}
1219 
1220 	if (aer->log_page_id == NVME_LOG_HEALTH_INFORMATION) {
1221 		struct nvme_health_information_page *health_info =
1222 		    (struct nvme_health_information_page *)aer->log_page_buffer;
1223 
1224 		/*
1225 		 * Critical warnings reported through the SMART/health log page
1226 		 * are persistent, so clear the associated bits in the async
1227 		 * event config so that we do not receive repeated notifications
1228 		 * for the same event.
1229 		 */
1230 		nvme_ctrlr_log_critical_warnings(aer->ctrlr,
1231 		    health_info->critical_warning);
1232 		aer->ctrlr->async_event_config &=
1233 		    ~health_info->critical_warning;
1234 		nvme_ctrlr_cmd_set_async_event_config(aer->ctrlr,
1235 		    aer->ctrlr->async_event_config, NULL, NULL);
1236 	} else if (aer->log_page_id == NVME_LOG_CHANGED_NAMESPACE) {
1237 		device_t *children;
1238 		int n_children;
1239 		struct nvme_ns_list *nsl;
1240 
1241 		if (device_get_children(aer->ctrlr->dev, &children, &n_children) != 0) {
1242 			children = NULL;
1243 			n_children = 0;
1244 		}
1245 		nsl = (struct nvme_ns_list *)aer->log_page_buffer;
1246 		for (int i = 0; i < nitems(nsl->ns) && nsl->ns[i] != 0; i++) {
1247 			/*
1248 			 * I think we need to query the name space here and see
1249 			 * if it went away, arrived, or changed in size and call
1250 			 * the nuanced routine (after constructing or before
1251 			 * destructing the namespace). XXX needs more work XXX.
1252 			 */
1253 			for (int j = 0; j < n_children; j++)
1254 				NVME_NS_CHANGED(children[j], nsl->ns[i]);
1255 		}
1256 		free(children, M_TEMP);
1257 	}
1258 
1259 	/*
1260 	 * Pass the cpl data from the original async event completion, not the
1261 	 * log page fetch.
1262 	 */
1263 	nvme_notify_async(aer->ctrlr, &aer->cpl, aer->log_page_id,
1264 	    aer->log_page_buffer, aer->log_page_size);
1265 
1266 	/*
1267 	 * Repost another asynchronous event request to replace the one
1268 	 *  that just completed.
1269 	 */
1270 out:
1271 	nvme_ctrlr_construct_and_submit_aer(ctrlr, aer);
1272 }
1273 
1274 /*
1275  * Poll all the queues enabled on the device for completion.
1276  */
1277 void
nvme_ctrlr_poll(struct nvme_controller * ctrlr)1278 nvme_ctrlr_poll(struct nvme_controller *ctrlr)
1279 {
1280 	int i;
1281 
1282 	nvme_qpair_process_completions(&ctrlr->adminq);
1283 
1284 	for (i = 0; i < ctrlr->num_io_queues; i++)
1285 		if (ctrlr->ioq && ctrlr->ioq[i].cpl)
1286 			nvme_qpair_process_completions(&ctrlr->ioq[i]);
1287 }
1288 
1289 /*
1290  * Poll the single-vector interrupt case: num_io_queues will be 1 and
1291  * there's only a single vector. While we're polling, we mask further
1292  * interrupts in the controller.
1293  */
1294 void
nvme_ctrlr_shared_handler(void * arg)1295 nvme_ctrlr_shared_handler(void *arg)
1296 {
1297 	struct nvme_controller *ctrlr = arg;
1298 
1299 	nvme_mmio_write_4(ctrlr, intms, 1);
1300 	nvme_ctrlr_poll(ctrlr);
1301 	nvme_mmio_write_4(ctrlr, intmc, 1);
1302 }
1303 
1304 #define NVME_MAX_PAGES  (int)(1024 / sizeof(vm_page_t))
1305 
1306 static int
nvme_user_ioctl_req(vm_offset_t addr,size_t len,bool is_read,vm_page_t * upages,int max_pages,int * npagesp,struct nvme_request ** req,nvme_cb_fn_t cb_fn,void * cb_arg)1307 nvme_user_ioctl_req(vm_offset_t addr, size_t len, bool is_read,
1308     vm_page_t *upages, int max_pages, int *npagesp, struct nvme_request **req,
1309     nvme_cb_fn_t cb_fn, void *cb_arg)
1310 {
1311 	vm_prot_t prot = VM_PROT_READ;
1312 	int err;
1313 
1314 	if (is_read)
1315 		prot |= VM_PROT_WRITE;	/* Device will write to host memory */
1316 	err = vm_fault_hold_pages(&curproc->p_vmspace->vm_map,
1317 	    addr, len, prot, upages, max_pages, npagesp);
1318 	if (err != 0)
1319 		return (err);
1320 	*req = nvme_allocate_request_null(M_WAITOK, cb_fn, cb_arg);
1321 	(*req)->payload = memdesc_vmpages(upages, len, addr & PAGE_MASK);
1322 	(*req)->payload_valid = true;
1323 	return (0);
1324 }
1325 
1326 static void
nvme_user_ioctl_free(vm_page_t * pages,int npage)1327 nvme_user_ioctl_free(vm_page_t *pages, int npage)
1328 {
1329 	vm_page_unhold_pages(pages, npage);
1330 }
1331 
1332 static void
nvme_pt_done(void * arg,const struct nvme_completion * cpl)1333 nvme_pt_done(void *arg, const struct nvme_completion *cpl)
1334 {
1335 	struct nvme_pt_command *pt = arg;
1336 	struct mtx *mtx = pt->driver_lock;
1337 	uint16_t status;
1338 
1339 	bzero(&pt->cpl, sizeof(pt->cpl));
1340 	pt->cpl.cdw0 = cpl->cdw0;
1341 
1342 	status = cpl->status;
1343 	status &= ~NVMEM(NVME_STATUS_P);
1344 	pt->cpl.status = status;
1345 
1346 	mtx_lock(mtx);
1347 	pt->driver_lock = NULL;
1348 	wakeup(pt);
1349 	mtx_unlock(mtx);
1350 }
1351 
1352 int
nvme_ctrlr_passthrough_cmd(struct nvme_controller * ctrlr,struct nvme_pt_command * pt,uint32_t nsid,int is_user,int is_admin_cmd)1353 nvme_ctrlr_passthrough_cmd(struct nvme_controller *ctrlr,
1354     struct nvme_pt_command *pt, uint32_t nsid, int is_user,
1355     int is_admin_cmd)
1356 {
1357 	struct nvme_request *req;
1358 	struct mtx *mtx;
1359 	int ret = 0;
1360 	int npages = 0;
1361 	vm_page_t upages[NVME_MAX_PAGES];
1362 
1363 	if (pt->len > 0) {
1364 		if (pt->len > ctrlr->max_xfer_size) {
1365 			nvme_printf(ctrlr,
1366 			    "len (%d) exceeds max_xfer_size (%d)\n",
1367 			    pt->len, ctrlr->max_xfer_size);
1368 			return (EIO);
1369 		}
1370 		if (is_user) {
1371 			ret = nvme_user_ioctl_req((vm_offset_t)pt->buf, pt->len,
1372 			    pt->is_read, upages, nitems(upages), &npages, &req,
1373 			    nvme_pt_done, pt);
1374 			if (ret != 0)
1375 				return (ret);
1376 		} else
1377 			req = nvme_allocate_request_vaddr(pt->buf, pt->len,
1378 			    M_WAITOK, nvme_pt_done, pt);
1379 	} else
1380 		req = nvme_allocate_request_null(M_WAITOK, nvme_pt_done, pt);
1381 
1382 	/* Assume user space already converted to little-endian */
1383 	req->cmd.opc = pt->cmd.opc;
1384 	req->cmd.fuse = pt->cmd.fuse;
1385 	req->cmd.rsvd2 = pt->cmd.rsvd2;
1386 	req->cmd.rsvd3 = pt->cmd.rsvd3;
1387 	req->cmd.cdw10 = pt->cmd.cdw10;
1388 	req->cmd.cdw11 = pt->cmd.cdw11;
1389 	req->cmd.cdw12 = pt->cmd.cdw12;
1390 	req->cmd.cdw13 = pt->cmd.cdw13;
1391 	req->cmd.cdw14 = pt->cmd.cdw14;
1392 	req->cmd.cdw15 = pt->cmd.cdw15;
1393 
1394 	req->cmd.nsid = htole32(nsid);
1395 
1396 	mtx = mtx_pool_find(mtxpool_sleep, pt);
1397 	pt->driver_lock = mtx;
1398 
1399 	if (is_admin_cmd)
1400 		nvme_ctrlr_submit_admin_request(ctrlr, req);
1401 	else
1402 		nvme_ctrlr_submit_io_request(ctrlr, req);
1403 
1404 	mtx_lock(mtx);
1405 	while (pt->driver_lock != NULL)
1406 		mtx_sleep(pt, mtx, PRIBIO, "nvme_pt", 0);
1407 	mtx_unlock(mtx);
1408 
1409 	if (npages > 0)
1410 		nvme_user_ioctl_free(upages, npages);
1411 
1412 	return (ret);
1413 }
1414 
1415 static void
nvme_npc_done(void * arg,const struct nvme_completion * cpl)1416 nvme_npc_done(void *arg, const struct nvme_completion *cpl)
1417 {
1418 	struct nvme_passthru_cmd *npc = arg;
1419 	struct mtx *mtx = (void *)(uintptr_t)npc->metadata;
1420 
1421 	npc->result = cpl->cdw0;	/* cpl in host order by now */
1422 	mtx_lock(mtx);
1423 	npc->metadata = 0;
1424 	wakeup(npc);
1425 	mtx_unlock(mtx);
1426 }
1427 
1428 /* XXX refactor? */
1429 
1430 int
nvme_ctrlr_linux_passthru_cmd(struct nvme_controller * ctrlr,struct nvme_passthru_cmd * npc,uint32_t nsid,bool is_user,bool is_admin)1431 nvme_ctrlr_linux_passthru_cmd(struct nvme_controller *ctrlr,
1432     struct nvme_passthru_cmd *npc, uint32_t nsid, bool is_user, bool is_admin)
1433 {
1434 	struct nvme_request	*req;
1435 	struct mtx		*mtx;
1436 	int			ret = 0;
1437 	int			npages = 0;
1438 	vm_page_t		upages[NVME_MAX_PAGES];
1439 
1440 	/*
1441 	 * We don't support metadata.
1442 	 */
1443 	if (npc->metadata != 0 || npc->metadata_len != 0)
1444 		return (EIO);
1445 
1446 	if (npc->data_len > 0 && npc->addr != 0) {
1447 		if (npc->data_len > ctrlr->max_xfer_size) {
1448 			nvme_printf(ctrlr,
1449 			    "data_len (%d) exceeds max_xfer_size (%d)\n",
1450 			    npc->data_len, ctrlr->max_xfer_size);
1451 			return (EIO);
1452 		}
1453 		if (is_user) {
1454 			ret = nvme_user_ioctl_req(npc->addr, npc->data_len,
1455 			    npc->opcode & 0x1, upages, nitems(upages), &npages,
1456 			    &req, nvme_npc_done, npc);
1457 			if (ret != 0)
1458 				return (ret);
1459 		} else
1460 			req = nvme_allocate_request_vaddr(
1461 			    (void *)(uintptr_t)npc->addr, npc->data_len,
1462 			    M_WAITOK, nvme_npc_done, npc);
1463 	} else
1464 		req = nvme_allocate_request_null(M_WAITOK, nvme_npc_done, npc);
1465 
1466 	req->cmd.opc = npc->opcode;
1467 	req->cmd.fuse = npc->flags;
1468 	req->cmd.rsvd2 = htole32(npc->cdw2);
1469 	req->cmd.rsvd3 = htole32(npc->cdw3);
1470 	req->cmd.cdw10 = htole32(npc->cdw10);
1471 	req->cmd.cdw11 = htole32(npc->cdw11);
1472 	req->cmd.cdw12 = htole32(npc->cdw12);
1473 	req->cmd.cdw13 = htole32(npc->cdw13);
1474 	req->cmd.cdw14 = htole32(npc->cdw14);
1475 	req->cmd.cdw15 = htole32(npc->cdw15);
1476 
1477 	req->cmd.nsid = htole32(nsid);
1478 
1479 	mtx = mtx_pool_find(mtxpool_sleep, npc);
1480 	npc->metadata = (uintptr_t) mtx;
1481 
1482 	/* XXX no timeout passed down */
1483 	if (is_admin)
1484 		nvme_ctrlr_submit_admin_request(ctrlr, req);
1485 	else
1486 		nvme_ctrlr_submit_io_request(ctrlr, req);
1487 
1488 	mtx_lock(mtx);
1489 	while (npc->metadata != 0)
1490 		mtx_sleep(npc, mtx, PRIBIO, "nvme_npc", 0);
1491 	mtx_unlock(mtx);
1492 
1493 	if (npages > 0)
1494 		nvme_user_ioctl_free(upages, npages);
1495 
1496 	return (ret);
1497 }
1498 
1499 static int
nvme_ctrlr_ioctl(struct cdev * cdev,u_long cmd,caddr_t arg,int flag,struct thread * td)1500 nvme_ctrlr_ioctl(struct cdev *cdev, u_long cmd, caddr_t arg, int flag,
1501     struct thread *td)
1502 {
1503 	struct nvme_controller			*ctrlr;
1504 	struct nvme_pt_command			*pt;
1505 
1506 	ctrlr = cdev->si_drv1;
1507 
1508 	switch (cmd) {
1509 	case NVME_IOCTL_RESET: /* Linux compat */
1510 	case NVME_RESET_CONTROLLER:
1511 		nvme_ctrlr_reset(ctrlr);
1512 		break;
1513 	case NVME_PASSTHROUGH_CMD:
1514 		pt = (struct nvme_pt_command *)arg;
1515 		return (nvme_ctrlr_passthrough_cmd(ctrlr, pt, le32toh(pt->cmd.nsid),
1516 		    1 /* is_user_buffer */, 1 /* is_admin_cmd */));
1517 	case NVME_GET_NSID:
1518 	{
1519 		struct nvme_get_nsid *gnsid = (struct nvme_get_nsid *)arg;
1520 		strlcpy(gnsid->cdev, device_get_nameunit(ctrlr->dev),
1521 		    sizeof(gnsid->cdev));
1522 		gnsid->nsid = 0;
1523 		break;
1524 	}
1525 	case NVME_GET_MAX_XFER_SIZE:
1526 		*(uint64_t *)arg = ctrlr->max_xfer_size;
1527 		break;
1528 	case NVME_GET_CONTROLLER_DATA:
1529 		memcpy(arg, &ctrlr->cdata, sizeof(ctrlr->cdata));
1530 		break;
1531 	case DIOCGIDENT: {
1532 		uint8_t *sn = arg;
1533 		nvme_cdata_get_disk_ident(&ctrlr->cdata, sn);
1534 		break;
1535 	}
1536 	/* Linux Compatible (see nvme_linux.h) */
1537 	case NVME_IOCTL_ID:
1538 		td->td_retval[0] = 0xfffffffful;
1539 		return (0);
1540 
1541 	case NVME_IOCTL_ADMIN_CMD:
1542 	case NVME_IOCTL_IO_CMD: {
1543 		struct nvme_passthru_cmd *npc = (struct nvme_passthru_cmd *)arg;
1544 
1545 		return (nvme_ctrlr_linux_passthru_cmd(ctrlr, npc, npc->nsid, true,
1546 		    cmd == NVME_IOCTL_ADMIN_CMD));
1547 	}
1548 
1549 	default:
1550 		return (ENOTTY);
1551 	}
1552 
1553 	return (0);
1554 }
1555 
1556 static struct cdevsw nvme_ctrlr_cdevsw = {
1557 	.d_version =	D_VERSION,
1558 	.d_flags =	0,
1559 	.d_ioctl =	nvme_ctrlr_ioctl
1560 };
1561 
1562 int
nvme_ctrlr_construct(struct nvme_controller * ctrlr,device_t dev)1563 nvme_ctrlr_construct(struct nvme_controller *ctrlr, device_t dev)
1564 {
1565 	struct make_dev_args	md_args;
1566 	uint32_t	cap_lo;
1567 	uint32_t	cap_hi;
1568 	uint32_t	to, vs, pmrcap;
1569 	int		status, timeout_period;
1570 
1571 	ctrlr->dev = dev;
1572 
1573 	mtx_init(&ctrlr->lock, "nvme ctrlr lock", NULL, MTX_DEF);
1574 	if (bus_get_domain(dev, &ctrlr->domain) != 0)
1575 		ctrlr->domain = 0;
1576 
1577 	ctrlr->cap_lo = cap_lo = nvme_mmio_read_4(ctrlr, cap_lo);
1578 	if (bootverbose) {
1579 		device_printf(dev, "CapLo: 0x%08x: MQES %u%s%s%s%s, TO %u\n",
1580 		    cap_lo, NVME_CAP_LO_MQES(cap_lo),
1581 		    NVME_CAP_LO_CQR(cap_lo) ? ", CQR" : "",
1582 		    NVME_CAP_LO_AMS(cap_lo) ? ", AMS" : "",
1583 		    (NVME_CAP_LO_AMS(cap_lo) & 0x1) ? " WRRwUPC" : "",
1584 		    (NVME_CAP_LO_AMS(cap_lo) & 0x2) ? " VS" : "",
1585 		    NVME_CAP_LO_TO(cap_lo));
1586 	}
1587 	ctrlr->cap_hi = cap_hi = nvme_mmio_read_4(ctrlr, cap_hi);
1588 	if (bootverbose) {
1589 		device_printf(dev, "CapHi: 0x%08x: DSTRD %u%s, CSS %x%s, "
1590 		    "CPS %x, MPSMIN %u, MPSMAX %u%s%s%s%s%s\n", cap_hi,
1591 		    NVME_CAP_HI_DSTRD(cap_hi),
1592 		    NVME_CAP_HI_NSSRS(cap_hi) ? ", NSSRS" : "",
1593 		    NVME_CAP_HI_CSS(cap_hi),
1594 		    NVME_CAP_HI_BPS(cap_hi) ? ", BPS" : "",
1595 		    NVME_CAP_HI_CPS(cap_hi),
1596 		    NVME_CAP_HI_MPSMIN(cap_hi),
1597 		    NVME_CAP_HI_MPSMAX(cap_hi),
1598 		    NVME_CAP_HI_PMRS(cap_hi) ? ", PMRS" : "",
1599 		    NVME_CAP_HI_CMBS(cap_hi) ? ", CMBS" : "",
1600 		    NVME_CAP_HI_NSSS(cap_hi) ? ", NSSS" : "",
1601 		    NVME_CAP_HI_CRWMS(cap_hi) ? ", CRWMS" : "",
1602 		    NVME_CAP_HI_CRIMS(cap_hi) ? ", CRIMS" : "");
1603 	}
1604 	if (bootverbose) {
1605 		vs = nvme_mmio_read_4(ctrlr, vs);
1606 		device_printf(dev, "Version: 0x%08x: %d.%d\n", vs,
1607 		    NVME_MAJOR(vs), NVME_MINOR(vs));
1608 	}
1609 	if (bootverbose && NVME_CAP_HI_PMRS(cap_hi)) {
1610 		pmrcap = nvme_mmio_read_4(ctrlr, pmrcap);
1611 		device_printf(dev, "PMRCap: 0x%08x: BIR %u%s%s, PMRTU %u, "
1612 		    "PMRWBM %x, PMRTO %u%s\n", pmrcap,
1613 		    NVME_PMRCAP_BIR(pmrcap),
1614 		    NVME_PMRCAP_RDS(pmrcap) ? ", RDS" : "",
1615 		    NVME_PMRCAP_WDS(pmrcap) ? ", WDS" : "",
1616 		    NVME_PMRCAP_PMRTU(pmrcap),
1617 		    NVME_PMRCAP_PMRWBM(pmrcap),
1618 		    NVME_PMRCAP_PMRTO(pmrcap),
1619 		    NVME_PMRCAP_CMSS(pmrcap) ? ", CMSS" : "");
1620 	}
1621 
1622 	ctrlr->dstrd = NVME_CAP_HI_DSTRD(cap_hi) + 2;
1623 
1624 	ctrlr->mps = NVME_CAP_HI_MPSMIN(cap_hi);
1625 	ctrlr->page_size = 1 << (NVME_MPS_SHIFT + ctrlr->mps);
1626 
1627 	/* Get ready timeout value from controller, in units of 500ms. */
1628 	to = NVME_CAP_LO_TO(cap_lo) + 1;
1629 	ctrlr->ready_timeout_in_ms = to * 500;
1630 
1631 	timeout_period = NVME_ADMIN_TIMEOUT_PERIOD;
1632 	TUNABLE_INT_FETCH("hw.nvme.admin_timeout_period", &timeout_period);
1633 	timeout_period = min(timeout_period, NVME_MAX_TIMEOUT_PERIOD);
1634 	timeout_period = max(timeout_period, NVME_MIN_TIMEOUT_PERIOD);
1635 	ctrlr->admin_timeout_period = timeout_period;
1636 
1637 	timeout_period = NVME_DEFAULT_TIMEOUT_PERIOD;
1638 	TUNABLE_INT_FETCH("hw.nvme.timeout_period", &timeout_period);
1639 	timeout_period = min(timeout_period, NVME_MAX_TIMEOUT_PERIOD);
1640 	timeout_period = max(timeout_period, NVME_MIN_TIMEOUT_PERIOD);
1641 	ctrlr->timeout_period = timeout_period;
1642 
1643 	nvme_retry_count = NVME_DEFAULT_RETRY_COUNT;
1644 	TUNABLE_INT_FETCH("hw.nvme.retry_count", &nvme_retry_count);
1645 
1646 	ctrlr->enable_aborts = 0;
1647 	TUNABLE_INT_FETCH("hw.nvme.enable_aborts", &ctrlr->enable_aborts);
1648 
1649 	ctrlr->alignment_splits = counter_u64_alloc(M_WAITOK);
1650 
1651 	/* Cap transfers by the maximum addressable by page-sized PRP (4KB pages -> 2MB). */
1652 	ctrlr->max_xfer_size = MIN(maxphys, (ctrlr->page_size / 8 * ctrlr->page_size));
1653 	if (nvme_ctrlr_construct_admin_qpair(ctrlr) != 0)
1654 		return (ENXIO);
1655 
1656 	/*
1657 	 * Create 2 threads for the taskqueue. The reset thread will block when
1658 	 * it detects that the controller has failed until all I/O has been
1659 	 * failed up the stack. The second thread is used for AER events, which
1660 	 * can block, but only briefly for memory and log page fetching.
1661 	 */
1662 	ctrlr->taskqueue = taskqueue_create("nvme_taskq", M_WAITOK,
1663 	    taskqueue_thread_enqueue, &ctrlr->taskqueue);
1664 	taskqueue_start_threads(&ctrlr->taskqueue, 2, PI_DISK, "nvme taskq");
1665 
1666 	ctrlr->is_resetting = 0;
1667 	ctrlr->is_initialized = false;
1668 	TASK_INIT(&ctrlr->reset_task, 0, nvme_ctrlr_reset_task, ctrlr);
1669 	for (int i = 0; i < NVME_MAX_ASYNC_EVENTS; i++) {
1670 		struct nvme_async_event_request *aer = &ctrlr->aer[i];
1671 
1672 		TASK_INIT(&aer->task, 0, nvme_ctrlr_aer_task, aer);
1673 		mtx_init(&aer->mtx, "AER mutex", NULL, MTX_DEF);
1674 	}
1675 	ctrlr->is_failed = false;
1676 
1677 	make_dev_args_init(&md_args);
1678 	md_args.mda_devsw = &nvme_ctrlr_cdevsw;
1679 	md_args.mda_uid = UID_ROOT;
1680 	md_args.mda_gid = GID_WHEEL;
1681 	md_args.mda_mode = 0600;
1682 	md_args.mda_unit = device_get_unit(dev);
1683 	md_args.mda_si_drv1 = (void *)ctrlr;
1684 	status = make_dev_s(&md_args, &ctrlr->cdev, "%s",
1685 	    device_get_nameunit(dev));
1686 	if (status != 0)
1687 		return (ENXIO);
1688 
1689 	return (0);
1690 }
1691 
1692 /*
1693  * Called on detach, or on error on attach. The nvme_controller won't be used
1694  * again once we return, so we have to tear everything down (so nothing
1695  * references this, no callbacks, etc), but don't need to reset all the state
1696  * since nvme_controller will be freed soon.
1697  */
1698 void
nvme_ctrlr_destruct(struct nvme_controller * ctrlr,device_t dev)1699 nvme_ctrlr_destruct(struct nvme_controller *ctrlr, device_t dev)
1700 {
1701 	int	i;
1702 	bool	gone;
1703 
1704 	ctrlr->is_dying = true;
1705 
1706 	if (ctrlr->resource == NULL)
1707 		goto nores;
1708 	if (!mtx_initialized(&ctrlr->adminq.lock))
1709 		goto noadminq;
1710 
1711 	/*
1712 	 * Check whether it is a hot unplug or a clean driver detach.  If device
1713 	 * is not there any more, skip any shutdown commands.  Some hotplug
1714 	 * bridges will return zeros instead of ff's when the device is
1715 	 * departing, so ask the bridge if the device is gone. Some systems can
1716 	 * remove the drive w/o the bridge knowing its gone (they don't really
1717 	 * do hotplug), so failsafe with detecting all ff's (impossible with
1718 	 * this hardware) as the device being gone.
1719 	 */
1720 	gone = bus_child_present(dev) == 0 ||
1721 	    (nvme_mmio_read_4(ctrlr, csts) == NVME_GONE);
1722 	if (gone)
1723 		nvme_ctrlr_fail(ctrlr, true);
1724 	else
1725 		nvme_notify_fail(ctrlr);
1726 
1727 	for (i = 0; i < NVME_MAX_NAMESPACES; i++)
1728 		nvme_ns_destruct(&ctrlr->ns[i]);
1729 
1730 	if (ctrlr->cdev)
1731 		destroy_dev(ctrlr->cdev);
1732 
1733 	if (ctrlr->is_initialized) {
1734 		if (!gone) {
1735 			if (ctrlr->hmb_nchunks > 0)
1736 				nvme_ctrlr_hmb_enable(ctrlr, false, false);
1737 			nvme_ctrlr_delete_qpairs(ctrlr);
1738 		}
1739 		nvme_ctrlr_hmb_free(ctrlr);
1740 	}
1741 	if (ctrlr->ioq != NULL) {
1742 		for (i = 0; i < ctrlr->num_io_queues; i++)
1743 			nvme_io_qpair_destroy(&ctrlr->ioq[i]);
1744 		free(ctrlr->ioq, M_NVME);
1745 	}
1746 	nvme_admin_qpair_destroy(&ctrlr->adminq);
1747 
1748 	/*
1749 	 * Notify the controller of a shutdown, even though this is due to a
1750 	 * driver unload, not a system shutdown (this path is not invoked uring
1751 	 * shutdown).  This ensures the controller receives a shutdown
1752 	 * notification in case the system is shutdown before reloading the
1753 	 * driver. Some NVMe drives need this to flush their cache to stable
1754 	 * media and consider it a safe shutdown in SMART stats.
1755 	 */
1756 	if (!gone) {
1757 		nvme_ctrlr_shutdown(ctrlr);
1758 		nvme_ctrlr_disable(ctrlr);
1759 	}
1760 
1761 noadminq:
1762 	if (ctrlr->taskqueue) {
1763 		taskqueue_free(ctrlr->taskqueue);
1764 		for (int i = 0; i < NVME_MAX_ASYNC_EVENTS; i++) {
1765 			struct nvme_async_event_request *aer = &ctrlr->aer[i];
1766 
1767 			mtx_destroy(&aer->mtx);
1768 		}
1769 	}
1770 
1771 	if (ctrlr->tag)
1772 		bus_teardown_intr(ctrlr->dev, ctrlr->res, ctrlr->tag);
1773 
1774 	if (ctrlr->res)
1775 		bus_release_resource(ctrlr->dev, SYS_RES_IRQ,
1776 		    rman_get_rid(ctrlr->res), ctrlr->res);
1777 
1778 	if (ctrlr->msix_table_resource != NULL) {
1779 		bus_release_resource(dev, SYS_RES_MEMORY,
1780 		    ctrlr->msix_table_resource_id, ctrlr->msix_table_resource);
1781 	}
1782 
1783 	if (ctrlr->msix_pba_resource != NULL) {
1784 		bus_release_resource(dev, SYS_RES_MEMORY,
1785 		    ctrlr->msix_pba_resource_id, ctrlr->msix_pba_resource);
1786 	}
1787 
1788 	bus_release_resource(dev, SYS_RES_MEMORY,
1789 	    ctrlr->resource_id, ctrlr->resource);
1790 
1791 nores:
1792 	if (ctrlr->alignment_splits)
1793 		counter_u64_free(ctrlr->alignment_splits);
1794 
1795 	mtx_destroy(&ctrlr->lock);
1796 }
1797 
1798 void
nvme_ctrlr_shutdown(struct nvme_controller * ctrlr)1799 nvme_ctrlr_shutdown(struct nvme_controller *ctrlr)
1800 {
1801 	uint32_t	cc;
1802 	uint32_t	csts;
1803 	int		timeout;
1804 
1805 	cc = nvme_mmio_read_4(ctrlr, cc);
1806 	cc &= ~NVMEM(NVME_CC_REG_SHN);
1807 	cc |= NVMEF(NVME_CC_REG_SHN, NVME_SHN_NORMAL);
1808 	nvme_mmio_write_4(ctrlr, cc, cc);
1809 
1810 	timeout = ticks + (ctrlr->cdata.rtd3e == 0 ? 5 * hz :
1811 	    ((uint64_t)ctrlr->cdata.rtd3e * hz + 999999) / 1000000);
1812 	while (1) {
1813 		csts = nvme_mmio_read_4(ctrlr, csts);
1814 		if (csts == NVME_GONE)		/* Hot unplug. */
1815 			break;
1816 		if (NVME_CSTS_GET_SHST(csts) == NVME_SHST_COMPLETE)
1817 			break;
1818 		if (timeout - ticks < 0) {
1819 			nvme_printf(ctrlr, "shutdown timeout\n");
1820 			break;
1821 		}
1822 		pause("nvmeshut", 1);
1823 	}
1824 }
1825 
1826 void
nvme_ctrlr_submit_admin_request(struct nvme_controller * ctrlr,struct nvme_request * req)1827 nvme_ctrlr_submit_admin_request(struct nvme_controller *ctrlr,
1828     struct nvme_request *req)
1829 {
1830 	nvme_qpair_submit_request(&ctrlr->adminq, req);
1831 }
1832 
1833 void
nvme_ctrlr_submit_io_request(struct nvme_controller * ctrlr,struct nvme_request * req)1834 nvme_ctrlr_submit_io_request(struct nvme_controller *ctrlr,
1835     struct nvme_request *req)
1836 {
1837 	struct nvme_qpair       *qpair;
1838 
1839 	qpair = &ctrlr->ioq[QP(ctrlr, curcpu)];
1840 	nvme_qpair_submit_request(qpair, req);
1841 }
1842 
1843 device_t
nvme_ctrlr_get_device(struct nvme_controller * ctrlr)1844 nvme_ctrlr_get_device(struct nvme_controller *ctrlr)
1845 {
1846 	return (ctrlr->dev);
1847 }
1848 
1849 const struct nvme_controller_data *
nvme_ctrlr_get_data(struct nvme_controller * ctrlr)1850 nvme_ctrlr_get_data(struct nvme_controller *ctrlr)
1851 {
1852 	return (&ctrlr->cdata);
1853 }
1854 
1855 int
nvme_ctrlr_suspend(struct nvme_controller * ctrlr)1856 nvme_ctrlr_suspend(struct nvme_controller *ctrlr)
1857 {
1858 	int to = hz;
1859 
1860 	/*
1861 	 * Can't touch failed controllers, so it's already suspended. User will
1862 	 * need to do an explicit reset to bring it back, if that's even
1863 	 * possible.
1864 	 */
1865 	if (ctrlr->is_failed)
1866 		return (0);
1867 
1868 	/*
1869 	 * We don't want the reset taskqueue running, since it does similar
1870 	 * things, so prevent it from running after we start. Wait for any reset
1871 	 * that may have been started to complete. The reset process we follow
1872 	 * will ensure that any new I/O will queue and be given to the hardware
1873 	 * after we resume (though there should be none).
1874 	 */
1875 	while (atomic_cmpset_32(&ctrlr->is_resetting, 0, 1) == 0 && to-- > 0)
1876 		pause("nvmesusp", 1);
1877 	if (to <= 0) {
1878 		nvme_printf(ctrlr,
1879 		    "Competing reset task didn't finish. Try again later.\n");
1880 		return (EWOULDBLOCK);
1881 	}
1882 
1883 	if (ctrlr->hmb_nchunks > 0)
1884 		nvme_ctrlr_hmb_enable(ctrlr, false, false);
1885 
1886 	/*
1887 	 * Per Section 7.6.2 of NVMe spec 1.4, to properly suspend, we need to
1888 	 * delete the hardware I/O queues, and then shutdown. This properly
1889 	 * flushes any metadata the drive may have stored so it can survive
1890 	 * having its power removed and prevents the unsafe shutdown count from
1891 	 * incriminating. Once we delete the qpairs, we have to disable them
1892 	 * before shutting down.
1893 	 */
1894 	nvme_ctrlr_delete_qpairs(ctrlr);
1895 	nvme_ctrlr_disable_qpairs(ctrlr);
1896 	nvme_ctrlr_shutdown(ctrlr);
1897 
1898 	return (0);
1899 }
1900 
1901 int
nvme_ctrlr_resume(struct nvme_controller * ctrlr)1902 nvme_ctrlr_resume(struct nvme_controller *ctrlr)
1903 {
1904 	/*
1905 	 * Can't touch failed controllers, so nothing to do to resume.
1906 	 */
1907 	if (ctrlr->is_failed)
1908 		return (0);
1909 
1910 	if (nvme_ctrlr_hw_reset(ctrlr) != 0)
1911 		goto fail;
1912 
1913 	/*
1914 	 * Now that we've reset the hardware, we can restart the controller. Any
1915 	 * I/O that was pending is requeued. Any admin commands are aborted with
1916 	 * an error. Once we've restarted, stop flagging the controller as being
1917 	 * in the reset phase.
1918 	 */
1919 	nvme_ctrlr_start(ctrlr, true);
1920 	(void)atomic_cmpset_32(&ctrlr->is_resetting, 1, 0);
1921 
1922 	return (0);
1923 fail:
1924 	/*
1925 	 * Since we can't bring the controller out of reset, announce and fail
1926 	 * the controller. However, we have to return success for the resume
1927 	 * itself, due to questionable APIs.
1928 	 */
1929 	nvme_printf(ctrlr, "Failed to reset on resume, failing.\n");
1930 	nvme_ctrlr_fail(ctrlr, true);
1931 	(void)atomic_cmpset_32(&ctrlr->is_resetting, 1, 0);
1932 	return (0);
1933 }
1934