1bb0ec6b3SJim Harris /*- 2718cf2ccSPedro F. Giffuni * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3718cf2ccSPedro F. Giffuni * 450dea2daSJim Harris * Copyright (C) 2012-2016 Intel Corporation 5bb0ec6b3SJim Harris * All rights reserved. 6bb0ec6b3SJim Harris * 7bb0ec6b3SJim Harris * Redistribution and use in source and binary forms, with or without 8bb0ec6b3SJim Harris * modification, are permitted provided that the following conditions 9bb0ec6b3SJim Harris * are met: 10bb0ec6b3SJim Harris * 1. Redistributions of source code must retain the above copyright 11bb0ec6b3SJim Harris * notice, this list of conditions and the following disclaimer. 12bb0ec6b3SJim Harris * 2. Redistributions in binary form must reproduce the above copyright 13bb0ec6b3SJim Harris * notice, this list of conditions and the following disclaimer in the 14bb0ec6b3SJim Harris * documentation and/or other materials provided with the distribution. 15bb0ec6b3SJim Harris * 16bb0ec6b3SJim Harris * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17bb0ec6b3SJim Harris * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18bb0ec6b3SJim Harris * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19bb0ec6b3SJim Harris * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20bb0ec6b3SJim Harris * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21bb0ec6b3SJim Harris * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22bb0ec6b3SJim Harris * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23bb0ec6b3SJim Harris * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24bb0ec6b3SJim Harris * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25bb0ec6b3SJim Harris * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26bb0ec6b3SJim Harris * SUCH DAMAGE. 27bb0ec6b3SJim Harris */ 28bb0ec6b3SJim Harris 29bb0ec6b3SJim Harris #include <sys/cdefs.h> 30bb0ec6b3SJim Harris __FBSDID("$FreeBSD$"); 31bb0ec6b3SJim Harris 32f24c011bSWarner Losh #include "opt_cam.h" 33f24c011bSWarner Losh 34bb0ec6b3SJim Harris #include <sys/param.h> 357c3f19d7SJim Harris #include <sys/systm.h> 367c3f19d7SJim Harris #include <sys/buf.h> 37bb0ec6b3SJim Harris #include <sys/bus.h> 38bb0ec6b3SJim Harris #include <sys/conf.h> 39bb0ec6b3SJim Harris #include <sys/ioccom.h> 407c3f19d7SJim Harris #include <sys/proc.h> 41bb0ec6b3SJim Harris #include <sys/smp.h> 427c3f19d7SJim Harris #include <sys/uio.h> 43*244b8053SWarner Losh #include <sys/sbuf.h> 440d787e9bSWojciech Macek #include <sys/endian.h> 45*244b8053SWarner Losh #include <machine/stdarg.h> 461eab19cbSAlexander Motin #include <vm/vm.h> 47bb0ec6b3SJim Harris 48bb0ec6b3SJim Harris #include "nvme_private.h" 49bb0ec6b3SJim Harris 500d787e9bSWojciech Macek #define B4_CHK_RDY_DELAY_MS 2300 /* work around controller bug */ 51ce1ec9c1SWarner Losh 520a0b08ccSJim Harris static void nvme_ctrlr_construct_and_submit_aer(struct nvme_controller *ctrlr, 530a0b08ccSJim Harris struct nvme_async_event_request *aer); 54bb0ec6b3SJim Harris 55*244b8053SWarner Losh static void 56*244b8053SWarner Losh nvme_ctrlr_devctl_log(struct nvme_controller *ctrlr, const char *type, const char *msg, ...) 57*244b8053SWarner Losh { 58*244b8053SWarner Losh struct sbuf sb; 59*244b8053SWarner Losh va_list ap; 60*244b8053SWarner Losh int error; 61*244b8053SWarner Losh 62*244b8053SWarner Losh sbuf_new(&sb, NULL, 0, SBUF_AUTOEXTEND | SBUF_NOWAIT); 63*244b8053SWarner Losh sbuf_printf(&sb, "%s: ", device_get_nameunit(ctrlr->dev)); 64*244b8053SWarner Losh va_start(ap, msg); 65*244b8053SWarner Losh sbuf_vprintf(&sb, msg, ap); 66*244b8053SWarner Losh va_end(ap); 67*244b8053SWarner Losh error = sbuf_finish(&sb); 68*244b8053SWarner Losh if (error == 0) 69*244b8053SWarner Losh printf("%s\n", sbuf_data(&sb)); 70*244b8053SWarner Losh 71*244b8053SWarner Losh sbuf_clear(&sb); 72*244b8053SWarner Losh sbuf_printf(&sb, "name=\"%s\" reason=\"", device_get_nameunit(ctrlr->dev)); 73*244b8053SWarner Losh va_start(ap, msg); 74*244b8053SWarner Losh sbuf_vprintf(&sb, msg, ap); 75*244b8053SWarner Losh va_end(ap); 76*244b8053SWarner Losh sbuf_printf(&sb, "\""); 77*244b8053SWarner Losh error = sbuf_finish(&sb); 78*244b8053SWarner Losh if (error == 0) 79*244b8053SWarner Losh devctl_notify("nvme", "controller", type, sbuf_data(&sb)); 80*244b8053SWarner Losh sbuf_delete(&sb); 81*244b8053SWarner Losh } 82*244b8053SWarner Losh 83a965389bSScott Long static int 84bb0ec6b3SJim Harris nvme_ctrlr_construct_admin_qpair(struct nvme_controller *ctrlr) 85bb0ec6b3SJim Harris { 86bb0ec6b3SJim Harris struct nvme_qpair *qpair; 87bb0ec6b3SJim Harris uint32_t num_entries; 88a965389bSScott Long int error; 89bb0ec6b3SJim Harris 90bb0ec6b3SJim Harris qpair = &ctrlr->adminq; 911eab19cbSAlexander Motin qpair->id = 0; 921eab19cbSAlexander Motin qpair->cpu = CPU_FFS(&cpuset_domain[ctrlr->domain]) - 1; 931eab19cbSAlexander Motin qpair->domain = ctrlr->domain; 94bb0ec6b3SJim Harris 95bb0ec6b3SJim Harris num_entries = NVME_ADMIN_ENTRIES; 96bb0ec6b3SJim Harris TUNABLE_INT_FETCH("hw.nvme.admin_entries", &num_entries); 97bb0ec6b3SJim Harris /* 98bb0ec6b3SJim Harris * If admin_entries was overridden to an invalid value, revert it 99bb0ec6b3SJim Harris * back to our default value. 100bb0ec6b3SJim Harris */ 101bb0ec6b3SJim Harris if (num_entries < NVME_MIN_ADMIN_ENTRIES || 102bb0ec6b3SJim Harris num_entries > NVME_MAX_ADMIN_ENTRIES) { 103547d523eSJim Harris nvme_printf(ctrlr, "invalid hw.nvme.admin_entries=%d " 104547d523eSJim Harris "specified\n", num_entries); 105bb0ec6b3SJim Harris num_entries = NVME_ADMIN_ENTRIES; 106bb0ec6b3SJim Harris } 107bb0ec6b3SJim Harris 108bb0ec6b3SJim Harris /* 109bb0ec6b3SJim Harris * The admin queue's max xfer size is treated differently than the 110bb0ec6b3SJim Harris * max I/O xfer size. 16KB is sufficient here - maybe even less? 111bb0ec6b3SJim Harris */ 1121eab19cbSAlexander Motin error = nvme_qpair_construct(qpair, num_entries, NVME_ADMIN_TRACKERS, 11321b6da58SJim Harris ctrlr); 114a965389bSScott Long return (error); 115bb0ec6b3SJim Harris } 116bb0ec6b3SJim Harris 1171eab19cbSAlexander Motin #define QP(ctrlr, c) ((c) * (ctrlr)->num_io_queues / mp_ncpus) 1181eab19cbSAlexander Motin 119bb0ec6b3SJim Harris static int 120bb0ec6b3SJim Harris nvme_ctrlr_construct_io_qpairs(struct nvme_controller *ctrlr) 121bb0ec6b3SJim Harris { 122bb0ec6b3SJim Harris struct nvme_qpair *qpair; 1230d787e9bSWojciech Macek uint32_t cap_lo; 1240d787e9bSWojciech Macek uint16_t mqes; 1251eab19cbSAlexander Motin int c, error, i, n; 1261eab19cbSAlexander Motin int num_entries, num_trackers, max_entries; 127bb0ec6b3SJim Harris 128bb0ec6b3SJim Harris /* 129f93b7f95SWarner Losh * NVMe spec sets a hard limit of 64K max entries, but devices may 130f93b7f95SWarner Losh * specify a smaller limit, so we need to check the MQES field in the 131f93b7f95SWarner Losh * capabilities register. We have to cap the number of entries to the 132f93b7f95SWarner Losh * current stride allows for in BAR 0/1, otherwise the remainder entries 133f93b7f95SWarner Losh * are inaccessable. MQES should reflect this, and this is just a 134f93b7f95SWarner Losh * fail-safe. 135bb0ec6b3SJim Harris */ 136f93b7f95SWarner Losh max_entries = 137f93b7f95SWarner Losh (rman_get_size(ctrlr->resource) - nvme_mmio_offsetof(doorbell[0])) / 138f93b7f95SWarner Losh (1 << (ctrlr->dstrd + 1)); 139f93b7f95SWarner Losh num_entries = NVME_IO_ENTRIES; 140f93b7f95SWarner Losh TUNABLE_INT_FETCH("hw.nvme.io_entries", &num_entries); 1410d787e9bSWojciech Macek cap_lo = nvme_mmio_read_4(ctrlr, cap_lo); 14262d2cf18SWarner Losh mqes = NVME_CAP_LO_MQES(cap_lo); 1430d787e9bSWojciech Macek num_entries = min(num_entries, mqes + 1); 144f93b7f95SWarner Losh num_entries = min(num_entries, max_entries); 145bb0ec6b3SJim Harris 14621b6da58SJim Harris num_trackers = NVME_IO_TRACKERS; 14721b6da58SJim Harris TUNABLE_INT_FETCH("hw.nvme.io_trackers", &num_trackers); 14821b6da58SJim Harris 14921b6da58SJim Harris num_trackers = max(num_trackers, NVME_MIN_IO_TRACKERS); 15021b6da58SJim Harris num_trackers = min(num_trackers, NVME_MAX_IO_TRACKERS); 15121b6da58SJim Harris /* 152f93b7f95SWarner Losh * No need to have more trackers than entries in the submit queue. Note 153f93b7f95SWarner Losh * also that for a queue size of N, we can only have (N-1) commands 154f93b7f95SWarner Losh * outstanding, hence the "-1" here. 15521b6da58SJim Harris */ 15621b6da58SJim Harris num_trackers = min(num_trackers, (num_entries-1)); 15721b6da58SJim Harris 1582b647da7SJim Harris /* 159c02565f9SWarner Losh * Our best estimate for the maximum number of I/Os that we should 1604d547561SWarner Losh * normally have in flight at one time. This should be viewed as a hint, 1614d547561SWarner Losh * not a hard limit and will need to be revisited when the upper layers 162c02565f9SWarner Losh * of the storage system grows multi-queue support. 163c02565f9SWarner Losh */ 1645fff95ccSWarner Losh ctrlr->max_hw_pend_io = num_trackers * ctrlr->num_io_queues * 3 / 4; 165c02565f9SWarner Losh 166bb0ec6b3SJim Harris ctrlr->ioq = malloc(ctrlr->num_io_queues * sizeof(struct nvme_qpair), 167237d2019SJim Harris M_NVME, M_ZERO | M_WAITOK); 168bb0ec6b3SJim Harris 1691eab19cbSAlexander Motin for (i = c = n = 0; i < ctrlr->num_io_queues; i++, c += n) { 170bb0ec6b3SJim Harris qpair = &ctrlr->ioq[i]; 171bb0ec6b3SJim Harris 172bb0ec6b3SJim Harris /* 173bb0ec6b3SJim Harris * Admin queue has ID=0. IO queues start at ID=1 - 174bb0ec6b3SJim Harris * hence the 'i+1' here. 1751eab19cbSAlexander Motin */ 1761eab19cbSAlexander Motin qpair->id = i + 1; 1771eab19cbSAlexander Motin if (ctrlr->num_io_queues > 1) { 1781eab19cbSAlexander Motin /* Find number of CPUs served by this queue. */ 1791eab19cbSAlexander Motin for (n = 1; QP(ctrlr, c + n) == i; n++) 1801eab19cbSAlexander Motin ; 1811eab19cbSAlexander Motin /* Shuffle multiple NVMe devices between CPUs. */ 1821eab19cbSAlexander Motin qpair->cpu = c + (device_get_unit(ctrlr->dev)+n/2) % n; 1831eab19cbSAlexander Motin qpair->domain = pcpu_find(qpair->cpu)->pc_domain; 1841eab19cbSAlexander Motin } else { 1851eab19cbSAlexander Motin qpair->cpu = CPU_FFS(&cpuset_domain[ctrlr->domain]) - 1; 1861eab19cbSAlexander Motin qpair->domain = ctrlr->domain; 1871eab19cbSAlexander Motin } 1881eab19cbSAlexander Motin 1891eab19cbSAlexander Motin /* 190bb0ec6b3SJim Harris * For I/O queues, use the controller-wide max_xfer_size 191bb0ec6b3SJim Harris * calculated in nvme_attach(). 192bb0ec6b3SJim Harris */ 1931eab19cbSAlexander Motin error = nvme_qpair_construct(qpair, num_entries, num_trackers, 194bb0ec6b3SJim Harris ctrlr); 195a965389bSScott Long if (error) 196a965389bSScott Long return (error); 197bb0ec6b3SJim Harris 1982b647da7SJim Harris /* 1992b647da7SJim Harris * Do not bother binding interrupts if we only have one I/O 2002b647da7SJim Harris * interrupt thread for this controller. 2012b647da7SJim Harris */ 202c75ad8ceSJim Harris if (ctrlr->num_io_queues > 1) 2031eab19cbSAlexander Motin bus_bind_intr(ctrlr->dev, qpair->res, qpair->cpu); 204bb0ec6b3SJim Harris } 205bb0ec6b3SJim Harris 206bb0ec6b3SJim Harris return (0); 207bb0ec6b3SJim Harris } 208bb0ec6b3SJim Harris 209232e2edbSJim Harris static void 210232e2edbSJim Harris nvme_ctrlr_fail(struct nvme_controller *ctrlr) 211232e2edbSJim Harris { 212232e2edbSJim Harris int i; 213232e2edbSJim Harris 2147588c6ccSWarner Losh ctrlr->is_failed = true; 21571a28181SAlexander Motin nvme_admin_qpair_disable(&ctrlr->adminq); 216232e2edbSJim Harris nvme_qpair_fail(&ctrlr->adminq); 217824073fbSWarner Losh if (ctrlr->ioq != NULL) { 21871a28181SAlexander Motin for (i = 0; i < ctrlr->num_io_queues; i++) { 21971a28181SAlexander Motin nvme_io_qpair_disable(&ctrlr->ioq[i]); 220232e2edbSJim Harris nvme_qpair_fail(&ctrlr->ioq[i]); 221824073fbSWarner Losh } 22271a28181SAlexander Motin } 223232e2edbSJim Harris nvme_notify_fail_consumers(ctrlr); 224232e2edbSJim Harris } 225232e2edbSJim Harris 226232e2edbSJim Harris void 227232e2edbSJim Harris nvme_ctrlr_post_failed_request(struct nvme_controller *ctrlr, 228232e2edbSJim Harris struct nvme_request *req) 229232e2edbSJim Harris { 230232e2edbSJim Harris 231a90b8104SJim Harris mtx_lock(&ctrlr->lock); 232232e2edbSJim Harris STAILQ_INSERT_TAIL(&ctrlr->fail_req, req, stailq); 233a90b8104SJim Harris mtx_unlock(&ctrlr->lock); 234232e2edbSJim Harris taskqueue_enqueue(ctrlr->taskqueue, &ctrlr->fail_req_task); 235232e2edbSJim Harris } 236232e2edbSJim Harris 237232e2edbSJim Harris static void 238232e2edbSJim Harris nvme_ctrlr_fail_req_task(void *arg, int pending) 239232e2edbSJim Harris { 240232e2edbSJim Harris struct nvme_controller *ctrlr = arg; 241232e2edbSJim Harris struct nvme_request *req; 242232e2edbSJim Harris 243a90b8104SJim Harris mtx_lock(&ctrlr->lock); 244c252f637SAlexander Motin while ((req = STAILQ_FIRST(&ctrlr->fail_req)) != NULL) { 245232e2edbSJim Harris STAILQ_REMOVE_HEAD(&ctrlr->fail_req, stailq); 246c252f637SAlexander Motin mtx_unlock(&ctrlr->lock); 247232e2edbSJim Harris nvme_qpair_manual_complete_request(req->qpair, req, 2482ffd6fceSWarner Losh NVME_SCT_GENERIC, NVME_SC_ABORTED_BY_REQUEST); 249c252f637SAlexander Motin mtx_lock(&ctrlr->lock); 250232e2edbSJim Harris } 251a90b8104SJim Harris mtx_unlock(&ctrlr->lock); 252232e2edbSJim Harris } 253232e2edbSJim Harris 254bb0ec6b3SJim Harris static int 255cbdec09cSJim Harris nvme_ctrlr_wait_for_ready(struct nvme_controller *ctrlr, int desired_val) 256bb0ec6b3SJim Harris { 257bb0ec6b3SJim Harris int ms_waited; 2580d787e9bSWojciech Macek uint32_t csts; 259bb0ec6b3SJim Harris 260bb0ec6b3SJim Harris ms_waited = 0; 26171a28181SAlexander Motin while (1) { 26271a28181SAlexander Motin csts = nvme_mmio_read_4(ctrlr, csts); 26371a28181SAlexander Motin if (csts == 0xffffffff) /* Hot unplug. */ 26471a28181SAlexander Motin return (ENXIO); 26571a28181SAlexander Motin if (((csts >> NVME_CSTS_REG_RDY_SHIFT) & NVME_CSTS_REG_RDY_MASK) 26671a28181SAlexander Motin == desired_val) 26771a28181SAlexander Motin break; 268bb0ec6b3SJim Harris if (ms_waited++ > ctrlr->ready_timeout_in_ms) { 269cbdec09cSJim Harris nvme_printf(ctrlr, "controller ready did not become %d " 270cbdec09cSJim Harris "within %d ms\n", desired_val, ctrlr->ready_timeout_in_ms); 271bb0ec6b3SJim Harris return (ENXIO); 272bb0ec6b3SJim Harris } 273ce1ec9c1SWarner Losh DELAY(1000); 274bb0ec6b3SJim Harris } 275bb0ec6b3SJim Harris 276bb0ec6b3SJim Harris return (0); 277bb0ec6b3SJim Harris } 278bb0ec6b3SJim Harris 279ce1ec9c1SWarner Losh static int 280bb0ec6b3SJim Harris nvme_ctrlr_disable(struct nvme_controller *ctrlr) 281bb0ec6b3SJim Harris { 2820d787e9bSWojciech Macek uint32_t cc; 2830d787e9bSWojciech Macek uint32_t csts; 2840d787e9bSWojciech Macek uint8_t en, rdy; 285ce1ec9c1SWarner Losh int err; 286bb0ec6b3SJim Harris 2870d787e9bSWojciech Macek cc = nvme_mmio_read_4(ctrlr, cc); 2880d787e9bSWojciech Macek csts = nvme_mmio_read_4(ctrlr, csts); 2890d787e9bSWojciech Macek 2900d787e9bSWojciech Macek en = (cc >> NVME_CC_REG_EN_SHIFT) & NVME_CC_REG_EN_MASK; 2910d787e9bSWojciech Macek rdy = (csts >> NVME_CSTS_REG_RDY_SHIFT) & NVME_CSTS_REG_RDY_MASK; 292bb0ec6b3SJim Harris 293ce1ec9c1SWarner Losh /* 294ce1ec9c1SWarner Losh * Per 3.1.5 in NVME 1.3 spec, transitioning CC.EN from 0 to 1 295ce1ec9c1SWarner Losh * when CSTS.RDY is 1 or transitioning CC.EN from 1 to 0 when 296ce1ec9c1SWarner Losh * CSTS.RDY is 0 "has undefined results" So make sure that CSTS.RDY 297ce1ec9c1SWarner Losh * isn't the desired value. Short circuit if we're already disabled. 298ce1ec9c1SWarner Losh */ 2990d787e9bSWojciech Macek if (en == 1) { 3000d787e9bSWojciech Macek if (rdy == 0) { 301ce1ec9c1SWarner Losh /* EN == 1, wait for RDY == 1 or fail */ 302ce1ec9c1SWarner Losh err = nvme_ctrlr_wait_for_ready(ctrlr, 1); 303ce1ec9c1SWarner Losh if (err != 0) 304ce1ec9c1SWarner Losh return (err); 305ce1ec9c1SWarner Losh } 306ce1ec9c1SWarner Losh } else { 307ce1ec9c1SWarner Losh /* EN == 0 already wait for RDY == 0 */ 3080d787e9bSWojciech Macek if (rdy == 0) 309ce1ec9c1SWarner Losh return (0); 310ce1ec9c1SWarner Losh else 311ce1ec9c1SWarner Losh return (nvme_ctrlr_wait_for_ready(ctrlr, 0)); 312ce1ec9c1SWarner Losh } 313bb0ec6b3SJim Harris 3140d787e9bSWojciech Macek cc &= ~NVME_CC_REG_EN_MASK; 3150d787e9bSWojciech Macek nvme_mmio_write_4(ctrlr, cc, cc); 316ce1ec9c1SWarner Losh /* 317ce1ec9c1SWarner Losh * Some drives have issues with accessing the mmio after we 318ce1ec9c1SWarner Losh * disable, so delay for a bit after we write the bit to 319ce1ec9c1SWarner Losh * cope with these issues. 320ce1ec9c1SWarner Losh */ 321989c7f0bSWarner Losh if (ctrlr->quirks & QUIRK_DELAY_B4_CHK_RDY) 322ce1ec9c1SWarner Losh pause("nvmeR", B4_CHK_RDY_DELAY_MS * hz / 1000); 323ce1ec9c1SWarner Losh return (nvme_ctrlr_wait_for_ready(ctrlr, 0)); 324bb0ec6b3SJim Harris } 325bb0ec6b3SJim Harris 326bb0ec6b3SJim Harris static int 327bb0ec6b3SJim Harris nvme_ctrlr_enable(struct nvme_controller *ctrlr) 328bb0ec6b3SJim Harris { 3290d787e9bSWojciech Macek uint32_t cc; 3300d787e9bSWojciech Macek uint32_t csts; 3310d787e9bSWojciech Macek uint32_t aqa; 3320d787e9bSWojciech Macek uint32_t qsize; 3330d787e9bSWojciech Macek uint8_t en, rdy; 334ce1ec9c1SWarner Losh int err; 335bb0ec6b3SJim Harris 3360d787e9bSWojciech Macek cc = nvme_mmio_read_4(ctrlr, cc); 3370d787e9bSWojciech Macek csts = nvme_mmio_read_4(ctrlr, csts); 3380d787e9bSWojciech Macek 3390d787e9bSWojciech Macek en = (cc >> NVME_CC_REG_EN_SHIFT) & NVME_CC_REG_EN_MASK; 3400d787e9bSWojciech Macek rdy = (csts >> NVME_CSTS_REG_RDY_SHIFT) & NVME_CSTS_REG_RDY_MASK; 341bb0ec6b3SJim Harris 342ce1ec9c1SWarner Losh /* 343ce1ec9c1SWarner Losh * See note in nvme_ctrlr_disable. Short circuit if we're already enabled. 344ce1ec9c1SWarner Losh */ 3450d787e9bSWojciech Macek if (en == 1) { 3460d787e9bSWojciech Macek if (rdy == 1) 347bb0ec6b3SJim Harris return (0); 348bb0ec6b3SJim Harris else 349cbdec09cSJim Harris return (nvme_ctrlr_wait_for_ready(ctrlr, 1)); 350ce1ec9c1SWarner Losh } else { 351ce1ec9c1SWarner Losh /* EN == 0 already wait for RDY == 0 or fail */ 352ce1ec9c1SWarner Losh err = nvme_ctrlr_wait_for_ready(ctrlr, 0); 353ce1ec9c1SWarner Losh if (err != 0) 354ce1ec9c1SWarner Losh return (err); 355bb0ec6b3SJim Harris } 356bb0ec6b3SJim Harris 357bb0ec6b3SJim Harris nvme_mmio_write_8(ctrlr, asq, ctrlr->adminq.cmd_bus_addr); 358bb0ec6b3SJim Harris DELAY(5000); 359bb0ec6b3SJim Harris nvme_mmio_write_8(ctrlr, acq, ctrlr->adminq.cpl_bus_addr); 360bb0ec6b3SJim Harris DELAY(5000); 361bb0ec6b3SJim Harris 362bb0ec6b3SJim Harris /* acqs and asqs are 0-based. */ 3630d787e9bSWojciech Macek qsize = ctrlr->adminq.num_entries - 1; 3640d787e9bSWojciech Macek 3650d787e9bSWojciech Macek aqa = 0; 3660d787e9bSWojciech Macek aqa = (qsize & NVME_AQA_REG_ACQS_MASK) << NVME_AQA_REG_ACQS_SHIFT; 3670d787e9bSWojciech Macek aqa |= (qsize & NVME_AQA_REG_ASQS_MASK) << NVME_AQA_REG_ASQS_SHIFT; 3680d787e9bSWojciech Macek nvme_mmio_write_4(ctrlr, aqa, aqa); 369bb0ec6b3SJim Harris DELAY(5000); 370bb0ec6b3SJim Harris 3710d787e9bSWojciech Macek /* Initialization values for CC */ 3720d787e9bSWojciech Macek cc = 0; 3730d787e9bSWojciech Macek cc |= 1 << NVME_CC_REG_EN_SHIFT; 3740d787e9bSWojciech Macek cc |= 0 << NVME_CC_REG_CSS_SHIFT; 3750d787e9bSWojciech Macek cc |= 0 << NVME_CC_REG_AMS_SHIFT; 3760d787e9bSWojciech Macek cc |= 0 << NVME_CC_REG_SHN_SHIFT; 3770d787e9bSWojciech Macek cc |= 6 << NVME_CC_REG_IOSQES_SHIFT; /* SQ entry size == 64 == 2^6 */ 3780d787e9bSWojciech Macek cc |= 4 << NVME_CC_REG_IOCQES_SHIFT; /* CQ entry size == 16 == 2^4 */ 379bb0ec6b3SJim Harris 380bb0ec6b3SJim Harris /* This evaluates to 0, which is according to spec. */ 3810d787e9bSWojciech Macek cc |= (PAGE_SIZE >> 13) << NVME_CC_REG_MPS_SHIFT; 382bb0ec6b3SJim Harris 3830d787e9bSWojciech Macek nvme_mmio_write_4(ctrlr, cc, cc); 384bb0ec6b3SJim Harris 385cbdec09cSJim Harris return (nvme_ctrlr_wait_for_ready(ctrlr, 1)); 386bb0ec6b3SJim Harris } 387bb0ec6b3SJim Harris 3884d547561SWarner Losh static void 3894d547561SWarner Losh nvme_ctrlr_disable_qpairs(struct nvme_controller *ctrlr) 390bb0ec6b3SJim Harris { 3914d547561SWarner Losh int i; 392b846efd7SJim Harris 393b846efd7SJim Harris nvme_admin_qpair_disable(&ctrlr->adminq); 3942b647da7SJim Harris /* 3952b647da7SJim Harris * I/O queues are not allocated before the initial HW 3962b647da7SJim Harris * reset, so do not try to disable them. Use is_initialized 3972b647da7SJim Harris * to determine if this is the initial HW reset. 3982b647da7SJim Harris */ 3992b647da7SJim Harris if (ctrlr->is_initialized) { 400b846efd7SJim Harris for (i = 0; i < ctrlr->num_io_queues; i++) 401b846efd7SJim Harris nvme_io_qpair_disable(&ctrlr->ioq[i]); 4022b647da7SJim Harris } 4034d547561SWarner Losh } 4044d547561SWarner Losh 4054d547561SWarner Losh int 4064d547561SWarner Losh nvme_ctrlr_hw_reset(struct nvme_controller *ctrlr) 4074d547561SWarner Losh { 4084d547561SWarner Losh int err; 4094d547561SWarner Losh 4104d547561SWarner Losh nvme_ctrlr_disable_qpairs(ctrlr); 411b846efd7SJim Harris 412b846efd7SJim Harris DELAY(100*1000); 413bb0ec6b3SJim Harris 414ce1ec9c1SWarner Losh err = nvme_ctrlr_disable(ctrlr); 415ce1ec9c1SWarner Losh if (err != 0) 416ce1ec9c1SWarner Losh return err; 417bb0ec6b3SJim Harris return (nvme_ctrlr_enable(ctrlr)); 418bb0ec6b3SJim Harris } 419bb0ec6b3SJim Harris 420b846efd7SJim Harris void 421b846efd7SJim Harris nvme_ctrlr_reset(struct nvme_controller *ctrlr) 422b846efd7SJim Harris { 423f37c22a3SJim Harris int cmpset; 424f37c22a3SJim Harris 425f37c22a3SJim Harris cmpset = atomic_cmpset_32(&ctrlr->is_resetting, 0, 1); 426f37c22a3SJim Harris 427232e2edbSJim Harris if (cmpset == 0 || ctrlr->is_failed) 428232e2edbSJim Harris /* 429232e2edbSJim Harris * Controller is already resetting or has failed. Return 430232e2edbSJim Harris * immediately since there is no need to kick off another 431232e2edbSJim Harris * reset in these cases. 432232e2edbSJim Harris */ 433f37c22a3SJim Harris return; 434b846efd7SJim Harris 43548ce3178SJim Harris taskqueue_enqueue(ctrlr->taskqueue, &ctrlr->reset_task); 436b846efd7SJim Harris } 437b846efd7SJim Harris 438bb0ec6b3SJim Harris static int 439bb0ec6b3SJim Harris nvme_ctrlr_identify(struct nvme_controller *ctrlr) 440bb0ec6b3SJim Harris { 441955910a9SJim Harris struct nvme_completion_poll_status status; 442bb0ec6b3SJim Harris 44329077eb4SWarner Losh status.done = 0; 444bb0ec6b3SJim Harris nvme_ctrlr_cmd_identify_controller(ctrlr, &ctrlr->cdata, 445955910a9SJim Harris nvme_completion_poll_cb, &status); 446ab0681aaSWarner Losh nvme_completion_poll(&status); 447955910a9SJim Harris if (nvme_completion_is_error(&status.cpl)) { 448547d523eSJim Harris nvme_printf(ctrlr, "nvme_identify_controller failed!\n"); 449bb0ec6b3SJim Harris return (ENXIO); 450bb0ec6b3SJim Harris } 451bb0ec6b3SJim Harris 4520d787e9bSWojciech Macek /* Convert data to host endian */ 4530d787e9bSWojciech Macek nvme_controller_data_swapbytes(&ctrlr->cdata); 4540d787e9bSWojciech Macek 45502e33484SJim Harris /* 45602e33484SJim Harris * Use MDTS to ensure our default max_xfer_size doesn't exceed what the 45702e33484SJim Harris * controller supports. 45802e33484SJim Harris */ 45902e33484SJim Harris if (ctrlr->cdata.mdts > 0) 46002e33484SJim Harris ctrlr->max_xfer_size = min(ctrlr->max_xfer_size, 46102e33484SJim Harris ctrlr->min_page_size * (1 << (ctrlr->cdata.mdts))); 46202e33484SJim Harris 463bb0ec6b3SJim Harris return (0); 464bb0ec6b3SJim Harris } 465bb0ec6b3SJim Harris 466bb0ec6b3SJim Harris static int 467bb0ec6b3SJim Harris nvme_ctrlr_set_num_qpairs(struct nvme_controller *ctrlr) 468bb0ec6b3SJim Harris { 469955910a9SJim Harris struct nvme_completion_poll_status status; 4702b647da7SJim Harris int cq_allocated, sq_allocated; 471bb0ec6b3SJim Harris 47229077eb4SWarner Losh status.done = 0; 473bb0ec6b3SJim Harris nvme_ctrlr_cmd_set_num_queues(ctrlr, ctrlr->num_io_queues, 474955910a9SJim Harris nvme_completion_poll_cb, &status); 475ab0681aaSWarner Losh nvme_completion_poll(&status); 476955910a9SJim Harris if (nvme_completion_is_error(&status.cpl)) { 477824073fbSWarner Losh nvme_printf(ctrlr, "nvme_ctrlr_set_num_qpairs failed!\n"); 478bb0ec6b3SJim Harris return (ENXIO); 479bb0ec6b3SJim Harris } 480bb0ec6b3SJim Harris 481bb0ec6b3SJim Harris /* 482bb0ec6b3SJim Harris * Data in cdw0 is 0-based. 483bb0ec6b3SJim Harris * Lower 16-bits indicate number of submission queues allocated. 484bb0ec6b3SJim Harris * Upper 16-bits indicate number of completion queues allocated. 485bb0ec6b3SJim Harris */ 486955910a9SJim Harris sq_allocated = (status.cpl.cdw0 & 0xFFFF) + 1; 487955910a9SJim Harris cq_allocated = (status.cpl.cdw0 >> 16) + 1; 488bb0ec6b3SJim Harris 489bb0ec6b3SJim Harris /* 4902b647da7SJim Harris * Controller may allocate more queues than we requested, 4912b647da7SJim Harris * so use the minimum of the number requested and what was 4922b647da7SJim Harris * actually allocated. 493bb0ec6b3SJim Harris */ 4942b647da7SJim Harris ctrlr->num_io_queues = min(ctrlr->num_io_queues, sq_allocated); 4952b647da7SJim Harris ctrlr->num_io_queues = min(ctrlr->num_io_queues, cq_allocated); 4961eab19cbSAlexander Motin if (ctrlr->num_io_queues > vm_ndomains) 4971eab19cbSAlexander Motin ctrlr->num_io_queues -= ctrlr->num_io_queues % vm_ndomains; 498bb0ec6b3SJim Harris 499bb0ec6b3SJim Harris return (0); 500bb0ec6b3SJim Harris } 501bb0ec6b3SJim Harris 502bb0ec6b3SJim Harris static int 503bb0ec6b3SJim Harris nvme_ctrlr_create_qpairs(struct nvme_controller *ctrlr) 504bb0ec6b3SJim Harris { 505955910a9SJim Harris struct nvme_completion_poll_status status; 506bb0ec6b3SJim Harris struct nvme_qpair *qpair; 507955910a9SJim Harris int i; 508bb0ec6b3SJim Harris 509bb0ec6b3SJim Harris for (i = 0; i < ctrlr->num_io_queues; i++) { 510bb0ec6b3SJim Harris qpair = &ctrlr->ioq[i]; 511bb0ec6b3SJim Harris 51229077eb4SWarner Losh status.done = 0; 5131eab19cbSAlexander Motin nvme_ctrlr_cmd_create_io_cq(ctrlr, qpair, 514955910a9SJim Harris nvme_completion_poll_cb, &status); 515ab0681aaSWarner Losh nvme_completion_poll(&status); 516955910a9SJim Harris if (nvme_completion_is_error(&status.cpl)) { 517547d523eSJim Harris nvme_printf(ctrlr, "nvme_create_io_cq failed!\n"); 518bb0ec6b3SJim Harris return (ENXIO); 519bb0ec6b3SJim Harris } 520bb0ec6b3SJim Harris 52129077eb4SWarner Losh status.done = 0; 522bb0ec6b3SJim Harris nvme_ctrlr_cmd_create_io_sq(qpair->ctrlr, qpair, 523955910a9SJim Harris nvme_completion_poll_cb, &status); 524ab0681aaSWarner Losh nvme_completion_poll(&status); 525955910a9SJim Harris if (nvme_completion_is_error(&status.cpl)) { 526547d523eSJim Harris nvme_printf(ctrlr, "nvme_create_io_sq failed!\n"); 527bb0ec6b3SJim Harris return (ENXIO); 528bb0ec6b3SJim Harris } 529bb0ec6b3SJim Harris } 530bb0ec6b3SJim Harris 531bb0ec6b3SJim Harris return (0); 532bb0ec6b3SJim Harris } 533bb0ec6b3SJim Harris 534bb0ec6b3SJim Harris static int 5354d547561SWarner Losh nvme_ctrlr_delete_qpairs(struct nvme_controller *ctrlr) 5368b1e6ebeSWarner Losh { 5378b1e6ebeSWarner Losh struct nvme_completion_poll_status status; 5389835d216SWarner Losh struct nvme_qpair *qpair; 5399835d216SWarner Losh 5409835d216SWarner Losh for (int i = 0; i < ctrlr->num_io_queues; i++) { 5419835d216SWarner Losh qpair = &ctrlr->ioq[i]; 5428b1e6ebeSWarner Losh 5438b1e6ebeSWarner Losh status.done = 0; 5445d7fd8f7SWarner Losh nvme_ctrlr_cmd_delete_io_sq(ctrlr, qpair, 5458b1e6ebeSWarner Losh nvme_completion_poll_cb, &status); 546ab0681aaSWarner Losh nvme_completion_poll(&status); 5478b1e6ebeSWarner Losh if (nvme_completion_is_error(&status.cpl)) { 5485d7fd8f7SWarner Losh nvme_printf(ctrlr, "nvme_destroy_io_sq failed!\n"); 5498b1e6ebeSWarner Losh return (ENXIO); 5508b1e6ebeSWarner Losh } 5518b1e6ebeSWarner Losh 5528b1e6ebeSWarner Losh status.done = 0; 5538b1e6ebeSWarner Losh nvme_ctrlr_cmd_delete_io_cq(ctrlr, qpair, 5548b1e6ebeSWarner Losh nvme_completion_poll_cb, &status); 555ab0681aaSWarner Losh nvme_completion_poll(&status); 5568b1e6ebeSWarner Losh if (nvme_completion_is_error(&status.cpl)) { 5575d7fd8f7SWarner Losh nvme_printf(ctrlr, "nvme_destroy_io_cq failed!\n"); 5588b1e6ebeSWarner Losh return (ENXIO); 5598b1e6ebeSWarner Losh } 5609835d216SWarner Losh } 5618b1e6ebeSWarner Losh 5628b1e6ebeSWarner Losh return (0); 5638b1e6ebeSWarner Losh } 5648b1e6ebeSWarner Losh 5658b1e6ebeSWarner Losh static int 566bb0ec6b3SJim Harris nvme_ctrlr_construct_namespaces(struct nvme_controller *ctrlr) 567bb0ec6b3SJim Harris { 568bb0ec6b3SJim Harris struct nvme_namespace *ns; 569696c9502SWarner Losh uint32_t i; 570bb0ec6b3SJim Harris 571a8a18dd5SWarner Losh for (i = 0; i < min(ctrlr->cdata.nn, NVME_MAX_NAMESPACES); i++) { 572bb0ec6b3SJim Harris ns = &ctrlr->ns[i]; 573a8a18dd5SWarner Losh nvme_ns_construct(ns, i+1, ctrlr); 574bb0ec6b3SJim Harris } 575bb0ec6b3SJim Harris 576bb0ec6b3SJim Harris return (0); 577bb0ec6b3SJim Harris } 578bb0ec6b3SJim Harris 5797588c6ccSWarner Losh static bool 5802868353aSJim Harris is_log_page_id_valid(uint8_t page_id) 5812868353aSJim Harris { 5822868353aSJim Harris 5832868353aSJim Harris switch (page_id) { 5842868353aSJim Harris case NVME_LOG_ERROR: 5852868353aSJim Harris case NVME_LOG_HEALTH_INFORMATION: 5862868353aSJim Harris case NVME_LOG_FIRMWARE_SLOT: 587f439e3a4SAlexander Motin case NVME_LOG_CHANGED_NAMESPACE: 5886c99d132SAlexander Motin case NVME_LOG_COMMAND_EFFECT: 5896c99d132SAlexander Motin case NVME_LOG_RES_NOTIFICATION: 5906c99d132SAlexander Motin case NVME_LOG_SANITIZE_STATUS: 5917588c6ccSWarner Losh return (true); 5922868353aSJim Harris } 5932868353aSJim Harris 5947588c6ccSWarner Losh return (false); 5952868353aSJim Harris } 5962868353aSJim Harris 5972868353aSJim Harris static uint32_t 5982868353aSJim Harris nvme_ctrlr_get_log_page_size(struct nvme_controller *ctrlr, uint8_t page_id) 5992868353aSJim Harris { 6002868353aSJim Harris uint32_t log_page_size; 6012868353aSJim Harris 6022868353aSJim Harris switch (page_id) { 6032868353aSJim Harris case NVME_LOG_ERROR: 6042868353aSJim Harris log_page_size = min( 6052868353aSJim Harris sizeof(struct nvme_error_information_entry) * 6060d787e9bSWojciech Macek (ctrlr->cdata.elpe + 1), NVME_MAX_AER_LOG_SIZE); 6072868353aSJim Harris break; 6082868353aSJim Harris case NVME_LOG_HEALTH_INFORMATION: 6092868353aSJim Harris log_page_size = sizeof(struct nvme_health_information_page); 6102868353aSJim Harris break; 6112868353aSJim Harris case NVME_LOG_FIRMWARE_SLOT: 6122868353aSJim Harris log_page_size = sizeof(struct nvme_firmware_page); 6132868353aSJim Harris break; 614f439e3a4SAlexander Motin case NVME_LOG_CHANGED_NAMESPACE: 615f439e3a4SAlexander Motin log_page_size = sizeof(struct nvme_ns_list); 616f439e3a4SAlexander Motin break; 6176c99d132SAlexander Motin case NVME_LOG_COMMAND_EFFECT: 6186c99d132SAlexander Motin log_page_size = sizeof(struct nvme_command_effects_page); 6196c99d132SAlexander Motin break; 6206c99d132SAlexander Motin case NVME_LOG_RES_NOTIFICATION: 6216c99d132SAlexander Motin log_page_size = sizeof(struct nvme_res_notification_page); 6226c99d132SAlexander Motin break; 6236c99d132SAlexander Motin case NVME_LOG_SANITIZE_STATUS: 6246c99d132SAlexander Motin log_page_size = sizeof(struct nvme_sanitize_status_page); 6256c99d132SAlexander Motin break; 6262868353aSJim Harris default: 6272868353aSJim Harris log_page_size = 0; 6282868353aSJim Harris break; 6292868353aSJim Harris } 6302868353aSJim Harris 6312868353aSJim Harris return (log_page_size); 6322868353aSJim Harris } 6332868353aSJim Harris 6342868353aSJim Harris static void 635bb2f67fdSJim Harris nvme_ctrlr_log_critical_warnings(struct nvme_controller *ctrlr, 6360d787e9bSWojciech Macek uint8_t state) 637bb2f67fdSJim Harris { 638bb2f67fdSJim Harris 6390d787e9bSWojciech Macek if (state & NVME_CRIT_WARN_ST_AVAILABLE_SPARE) 640*244b8053SWarner Losh nvme_ctrlr_devctl_log(ctrlr, "critical", 641*244b8053SWarner Losh "available spare space below threshold"); 642bb2f67fdSJim Harris 6430d787e9bSWojciech Macek if (state & NVME_CRIT_WARN_ST_TEMPERATURE) 644*244b8053SWarner Losh nvme_ctrlr_devctl_log(ctrlr, "critical", 645*244b8053SWarner Losh "temperature above threshold"); 646bb2f67fdSJim Harris 6470d787e9bSWojciech Macek if (state & NVME_CRIT_WARN_ST_DEVICE_RELIABILITY) 648*244b8053SWarner Losh nvme_ctrlr_devctl_log(ctrlr, "critical", 649*244b8053SWarner Losh "device reliability degraded"); 650bb2f67fdSJim Harris 6510d787e9bSWojciech Macek if (state & NVME_CRIT_WARN_ST_READ_ONLY) 652*244b8053SWarner Losh nvme_ctrlr_devctl_log(ctrlr, "critical", 653*244b8053SWarner Losh "media placed in read only mode"); 654bb2f67fdSJim Harris 6550d787e9bSWojciech Macek if (state & NVME_CRIT_WARN_ST_VOLATILE_MEMORY_BACKUP) 656*244b8053SWarner Losh nvme_ctrlr_devctl_log(ctrlr, "critical", 657*244b8053SWarner Losh "volatile memory backup device failed"); 658bb2f67fdSJim Harris 6590d787e9bSWojciech Macek if (state & NVME_CRIT_WARN_ST_RESERVED_MASK) 660*244b8053SWarner Losh nvme_ctrlr_devctl_log(ctrlr, "critical", 661*244b8053SWarner Losh "unknown critical warning(s): state = 0x%02x", state); 662bb2f67fdSJim Harris } 663bb2f67fdSJim Harris 664bb2f67fdSJim Harris static void 6652868353aSJim Harris nvme_ctrlr_async_event_log_page_cb(void *arg, const struct nvme_completion *cpl) 6662868353aSJim Harris { 6672868353aSJim Harris struct nvme_async_event_request *aer = arg; 668bb2f67fdSJim Harris struct nvme_health_information_page *health_info; 669f439e3a4SAlexander Motin struct nvme_ns_list *nsl; 6700d787e9bSWojciech Macek struct nvme_error_information_entry *err; 6710d787e9bSWojciech Macek int i; 6722868353aSJim Harris 6730d7e13ecSJim Harris /* 6740d7e13ecSJim Harris * If the log page fetch for some reason completed with an error, 6750d7e13ecSJim Harris * don't pass log page data to the consumers. In practice, this case 6760d7e13ecSJim Harris * should never happen. 6770d7e13ecSJim Harris */ 6780d7e13ecSJim Harris if (nvme_completion_is_error(cpl)) 6790d7e13ecSJim Harris nvme_notify_async_consumers(aer->ctrlr, &aer->cpl, 6800d7e13ecSJim Harris aer->log_page_id, NULL, 0); 681bb2f67fdSJim Harris else { 6820d787e9bSWojciech Macek /* Convert data to host endian */ 6830d787e9bSWojciech Macek switch (aer->log_page_id) { 6840d787e9bSWojciech Macek case NVME_LOG_ERROR: 6850d787e9bSWojciech Macek err = (struct nvme_error_information_entry *)aer->log_page_buffer; 6860d787e9bSWojciech Macek for (i = 0; i < (aer->ctrlr->cdata.elpe + 1); i++) 6870d787e9bSWojciech Macek nvme_error_information_entry_swapbytes(err++); 6880d787e9bSWojciech Macek break; 6890d787e9bSWojciech Macek case NVME_LOG_HEALTH_INFORMATION: 6900d787e9bSWojciech Macek nvme_health_information_page_swapbytes( 6910d787e9bSWojciech Macek (struct nvme_health_information_page *)aer->log_page_buffer); 6920d787e9bSWojciech Macek break; 6930d787e9bSWojciech Macek case NVME_LOG_FIRMWARE_SLOT: 6940d787e9bSWojciech Macek nvme_firmware_page_swapbytes( 6950d787e9bSWojciech Macek (struct nvme_firmware_page *)aer->log_page_buffer); 6960d787e9bSWojciech Macek break; 697f439e3a4SAlexander Motin case NVME_LOG_CHANGED_NAMESPACE: 698f439e3a4SAlexander Motin nvme_ns_list_swapbytes( 699f439e3a4SAlexander Motin (struct nvme_ns_list *)aer->log_page_buffer); 700f439e3a4SAlexander Motin break; 7016c99d132SAlexander Motin case NVME_LOG_COMMAND_EFFECT: 7026c99d132SAlexander Motin nvme_command_effects_page_swapbytes( 7036c99d132SAlexander Motin (struct nvme_command_effects_page *)aer->log_page_buffer); 7046c99d132SAlexander Motin break; 7056c99d132SAlexander Motin case NVME_LOG_RES_NOTIFICATION: 7066c99d132SAlexander Motin nvme_res_notification_page_swapbytes( 7076c99d132SAlexander Motin (struct nvme_res_notification_page *)aer->log_page_buffer); 7086c99d132SAlexander Motin break; 7096c99d132SAlexander Motin case NVME_LOG_SANITIZE_STATUS: 7106c99d132SAlexander Motin nvme_sanitize_status_page_swapbytes( 7116c99d132SAlexander Motin (struct nvme_sanitize_status_page *)aer->log_page_buffer); 7126c99d132SAlexander Motin break; 7130d787e9bSWojciech Macek case INTEL_LOG_TEMP_STATS: 7140d787e9bSWojciech Macek intel_log_temp_stats_swapbytes( 7150d787e9bSWojciech Macek (struct intel_log_temp_stats *)aer->log_page_buffer); 7160d787e9bSWojciech Macek break; 7170d787e9bSWojciech Macek default: 7180d787e9bSWojciech Macek break; 7190d787e9bSWojciech Macek } 7200d787e9bSWojciech Macek 721bb2f67fdSJim Harris if (aer->log_page_id == NVME_LOG_HEALTH_INFORMATION) { 722bb2f67fdSJim Harris health_info = (struct nvme_health_information_page *) 723bb2f67fdSJim Harris aer->log_page_buffer; 724bb2f67fdSJim Harris nvme_ctrlr_log_critical_warnings(aer->ctrlr, 725bb2f67fdSJim Harris health_info->critical_warning); 726bb2f67fdSJim Harris /* 727bb2f67fdSJim Harris * Critical warnings reported through the 728bb2f67fdSJim Harris * SMART/health log page are persistent, so 729bb2f67fdSJim Harris * clear the associated bits in the async event 730bb2f67fdSJim Harris * config so that we do not receive repeated 731bb2f67fdSJim Harris * notifications for the same event. 732bb2f67fdSJim Harris */ 7330d787e9bSWojciech Macek aer->ctrlr->async_event_config &= 7340d787e9bSWojciech Macek ~health_info->critical_warning; 735bb2f67fdSJim Harris nvme_ctrlr_cmd_set_async_event_config(aer->ctrlr, 736bb2f67fdSJim Harris aer->ctrlr->async_event_config, NULL, NULL); 737f439e3a4SAlexander Motin } else if (aer->log_page_id == NVME_LOG_CHANGED_NAMESPACE && 738f439e3a4SAlexander Motin !nvme_use_nvd) { 739f439e3a4SAlexander Motin nsl = (struct nvme_ns_list *)aer->log_page_buffer; 740f439e3a4SAlexander Motin for (i = 0; i < nitems(nsl->ns) && nsl->ns[i] != 0; i++) { 741f439e3a4SAlexander Motin if (nsl->ns[i] > NVME_MAX_NAMESPACES) 742f439e3a4SAlexander Motin break; 743f439e3a4SAlexander Motin nvme_notify_ns(aer->ctrlr, nsl->ns[i]); 744f439e3a4SAlexander Motin } 745bb2f67fdSJim Harris } 746bb2f67fdSJim Harris 747bb2f67fdSJim Harris 7480d7e13ecSJim Harris /* 7490d7e13ecSJim Harris * Pass the cpl data from the original async event completion, 7500d7e13ecSJim Harris * not the log page fetch. 7510d7e13ecSJim Harris */ 7520d7e13ecSJim Harris nvme_notify_async_consumers(aer->ctrlr, &aer->cpl, 7530d7e13ecSJim Harris aer->log_page_id, aer->log_page_buffer, aer->log_page_size); 754bb2f67fdSJim Harris } 7552868353aSJim Harris 7562868353aSJim Harris /* 7572868353aSJim Harris * Repost another asynchronous event request to replace the one 7582868353aSJim Harris * that just completed. 7592868353aSJim Harris */ 7602868353aSJim Harris nvme_ctrlr_construct_and_submit_aer(aer->ctrlr, aer); 7612868353aSJim Harris } 7622868353aSJim Harris 763bb0ec6b3SJim Harris static void 7640a0b08ccSJim Harris nvme_ctrlr_async_event_cb(void *arg, const struct nvme_completion *cpl) 7650a0b08ccSJim Harris { 7660a0b08ccSJim Harris struct nvme_async_event_request *aer = arg; 7670a0b08ccSJim Harris 768ec526ea9SJim Harris if (nvme_completion_is_error(cpl)) { 7690a0b08ccSJim Harris /* 770ec526ea9SJim Harris * Do not retry failed async event requests. This avoids 771ec526ea9SJim Harris * infinite loops where a new async event request is submitted 772ec526ea9SJim Harris * to replace the one just failed, only to fail again and 773ec526ea9SJim Harris * perpetuate the loop. 7740a0b08ccSJim Harris */ 7750a0b08ccSJim Harris return; 7760a0b08ccSJim Harris } 7770a0b08ccSJim Harris 7782868353aSJim Harris /* Associated log page is in bits 23:16 of completion entry dw0. */ 7790d7e13ecSJim Harris aer->log_page_id = (cpl->cdw0 & 0xFF0000) >> 16; 7802868353aSJim Harris 781f439e3a4SAlexander Motin nvme_printf(aer->ctrlr, "async event occurred (type 0x%x, info 0x%02x," 782a6d222ebSAlexander Motin " page 0x%02x)\n", (cpl->cdw0 & 0x07), (cpl->cdw0 & 0xFF00) >> 8, 783547d523eSJim Harris aer->log_page_id); 784547d523eSJim Harris 7850d7e13ecSJim Harris if (is_log_page_id_valid(aer->log_page_id)) { 7862868353aSJim Harris aer->log_page_size = nvme_ctrlr_get_log_page_size(aer->ctrlr, 7870d7e13ecSJim Harris aer->log_page_id); 7882868353aSJim Harris memcpy(&aer->cpl, cpl, sizeof(*cpl)); 7890d7e13ecSJim Harris nvme_ctrlr_cmd_get_log_page(aer->ctrlr, aer->log_page_id, 7902868353aSJim Harris NVME_GLOBAL_NAMESPACE_TAG, aer->log_page_buffer, 7912868353aSJim Harris aer->log_page_size, nvme_ctrlr_async_event_log_page_cb, 7922868353aSJim Harris aer); 7932868353aSJim Harris /* Wait to notify consumers until after log page is fetched. */ 7942868353aSJim Harris } else { 7950d7e13ecSJim Harris nvme_notify_async_consumers(aer->ctrlr, cpl, aer->log_page_id, 7960d7e13ecSJim Harris NULL, 0); 797038a5ee4SJim Harris 7980a0b08ccSJim Harris /* 7992868353aSJim Harris * Repost another asynchronous event request to replace the one 8002868353aSJim Harris * that just completed. 8010a0b08ccSJim Harris */ 8020a0b08ccSJim Harris nvme_ctrlr_construct_and_submit_aer(aer->ctrlr, aer); 8030a0b08ccSJim Harris } 8042868353aSJim Harris } 8050a0b08ccSJim Harris 8060a0b08ccSJim Harris static void 8070a0b08ccSJim Harris nvme_ctrlr_construct_and_submit_aer(struct nvme_controller *ctrlr, 8080a0b08ccSJim Harris struct nvme_async_event_request *aer) 8090a0b08ccSJim Harris { 8100a0b08ccSJim Harris struct nvme_request *req; 8110a0b08ccSJim Harris 8120a0b08ccSJim Harris aer->ctrlr = ctrlr; 8131e526bc4SJim Harris req = nvme_allocate_request_null(nvme_ctrlr_async_event_cb, aer); 8140a0b08ccSJim Harris aer->req = req; 8150a0b08ccSJim Harris 8160a0b08ccSJim Harris /* 81794143332SJim Harris * Disable timeout here, since asynchronous event requests should by 81894143332SJim Harris * nature never be timed out. 8190a0b08ccSJim Harris */ 8207588c6ccSWarner Losh req->timeout = false; 8219544e6dcSChuck Tuffli req->cmd.opc = NVME_OPC_ASYNC_EVENT_REQUEST; 8220a0b08ccSJim Harris nvme_ctrlr_submit_admin_request(ctrlr, req); 8230a0b08ccSJim Harris } 8240a0b08ccSJim Harris 8250a0b08ccSJim Harris static void 826bb0ec6b3SJim Harris nvme_ctrlr_configure_aer(struct nvme_controller *ctrlr) 827bb0ec6b3SJim Harris { 828d5fc9821SJim Harris struct nvme_completion_poll_status status; 8290a0b08ccSJim Harris struct nvme_async_event_request *aer; 8300a0b08ccSJim Harris uint32_t i; 831bb0ec6b3SJim Harris 832f439e3a4SAlexander Motin ctrlr->async_event_config = NVME_CRIT_WARN_ST_AVAILABLE_SPARE | 833f439e3a4SAlexander Motin NVME_CRIT_WARN_ST_DEVICE_RELIABILITY | 834f439e3a4SAlexander Motin NVME_CRIT_WARN_ST_READ_ONLY | 835f439e3a4SAlexander Motin NVME_CRIT_WARN_ST_VOLATILE_MEMORY_BACKUP; 836f439e3a4SAlexander Motin if (ctrlr->cdata.ver >= NVME_REV(1, 2)) 837f439e3a4SAlexander Motin ctrlr->async_event_config |= 0x300; 838d5fc9821SJim Harris 83929077eb4SWarner Losh status.done = 0; 840d5fc9821SJim Harris nvme_ctrlr_cmd_get_feature(ctrlr, NVME_FEAT_TEMPERATURE_THRESHOLD, 841d5fc9821SJim Harris 0, NULL, 0, nvme_completion_poll_cb, &status); 842ab0681aaSWarner Losh nvme_completion_poll(&status); 843d5fc9821SJim Harris if (nvme_completion_is_error(&status.cpl) || 844d5fc9821SJim Harris (status.cpl.cdw0 & 0xFFFF) == 0xFFFF || 845d5fc9821SJim Harris (status.cpl.cdw0 & 0xFFFF) == 0x0000) { 846d5fc9821SJim Harris nvme_printf(ctrlr, "temperature threshold not supported\n"); 847f439e3a4SAlexander Motin } else 848f439e3a4SAlexander Motin ctrlr->async_event_config |= NVME_CRIT_WARN_ST_TEMPERATURE; 849d5fc9821SJim Harris 850bb2f67fdSJim Harris nvme_ctrlr_cmd_set_async_event_config(ctrlr, 851bb2f67fdSJim Harris ctrlr->async_event_config, NULL, NULL); 852bb0ec6b3SJim Harris 853bb0ec6b3SJim Harris /* aerl is a zero-based value, so we need to add 1 here. */ 8540a0b08ccSJim Harris ctrlr->num_aers = min(NVME_MAX_ASYNC_EVENTS, (ctrlr->cdata.aerl+1)); 855bb0ec6b3SJim Harris 8560a0b08ccSJim Harris for (i = 0; i < ctrlr->num_aers; i++) { 8570a0b08ccSJim Harris aer = &ctrlr->aer[i]; 8580a0b08ccSJim Harris nvme_ctrlr_construct_and_submit_aer(ctrlr, aer); 8590a0b08ccSJim Harris } 860bb0ec6b3SJim Harris } 861bb0ec6b3SJim Harris 862bb0ec6b3SJim Harris static void 863bb0ec6b3SJim Harris nvme_ctrlr_configure_int_coalescing(struct nvme_controller *ctrlr) 864bb0ec6b3SJim Harris { 865bb0ec6b3SJim Harris 866bb0ec6b3SJim Harris ctrlr->int_coal_time = 0; 867bb0ec6b3SJim Harris TUNABLE_INT_FETCH("hw.nvme.int_coal_time", 868bb0ec6b3SJim Harris &ctrlr->int_coal_time); 869bb0ec6b3SJim Harris 870bb0ec6b3SJim Harris ctrlr->int_coal_threshold = 0; 871bb0ec6b3SJim Harris TUNABLE_INT_FETCH("hw.nvme.int_coal_threshold", 872bb0ec6b3SJim Harris &ctrlr->int_coal_threshold); 873bb0ec6b3SJim Harris 874bb0ec6b3SJim Harris nvme_ctrlr_cmd_set_interrupt_coalescing(ctrlr, ctrlr->int_coal_time, 875bb0ec6b3SJim Harris ctrlr->int_coal_threshold, NULL, NULL); 876bb0ec6b3SJim Harris } 877bb0ec6b3SJim Harris 878be34f216SJim Harris static void 87967abaee9SAlexander Motin nvme_ctrlr_hmb_free(struct nvme_controller *ctrlr) 88067abaee9SAlexander Motin { 88167abaee9SAlexander Motin struct nvme_hmb_chunk *hmbc; 88267abaee9SAlexander Motin int i; 88367abaee9SAlexander Motin 88467abaee9SAlexander Motin if (ctrlr->hmb_desc_paddr) { 88567abaee9SAlexander Motin bus_dmamap_unload(ctrlr->hmb_desc_tag, ctrlr->hmb_desc_map); 88667abaee9SAlexander Motin bus_dmamem_free(ctrlr->hmb_desc_tag, ctrlr->hmb_desc_vaddr, 88767abaee9SAlexander Motin ctrlr->hmb_desc_map); 88867abaee9SAlexander Motin ctrlr->hmb_desc_paddr = 0; 88967abaee9SAlexander Motin } 89067abaee9SAlexander Motin if (ctrlr->hmb_desc_tag) { 89167abaee9SAlexander Motin bus_dma_tag_destroy(ctrlr->hmb_desc_tag); 892b2cdfb72SAlexander Motin ctrlr->hmb_desc_tag = NULL; 89367abaee9SAlexander Motin } 89467abaee9SAlexander Motin for (i = 0; i < ctrlr->hmb_nchunks; i++) { 89567abaee9SAlexander Motin hmbc = &ctrlr->hmb_chunks[i]; 89667abaee9SAlexander Motin bus_dmamap_unload(ctrlr->hmb_tag, hmbc->hmbc_map); 89767abaee9SAlexander Motin bus_dmamem_free(ctrlr->hmb_tag, hmbc->hmbc_vaddr, 89867abaee9SAlexander Motin hmbc->hmbc_map); 89967abaee9SAlexander Motin } 90067abaee9SAlexander Motin ctrlr->hmb_nchunks = 0; 90167abaee9SAlexander Motin if (ctrlr->hmb_tag) { 90267abaee9SAlexander Motin bus_dma_tag_destroy(ctrlr->hmb_tag); 90367abaee9SAlexander Motin ctrlr->hmb_tag = NULL; 90467abaee9SAlexander Motin } 90567abaee9SAlexander Motin if (ctrlr->hmb_chunks) { 90667abaee9SAlexander Motin free(ctrlr->hmb_chunks, M_NVME); 90767abaee9SAlexander Motin ctrlr->hmb_chunks = NULL; 90867abaee9SAlexander Motin } 90967abaee9SAlexander Motin } 91067abaee9SAlexander Motin 91167abaee9SAlexander Motin static void 91267abaee9SAlexander Motin nvme_ctrlr_hmb_alloc(struct nvme_controller *ctrlr) 91367abaee9SAlexander Motin { 91467abaee9SAlexander Motin struct nvme_hmb_chunk *hmbc; 91567abaee9SAlexander Motin size_t pref, min, minc, size; 91667abaee9SAlexander Motin int err, i; 91767abaee9SAlexander Motin uint64_t max; 91867abaee9SAlexander Motin 9191c7dd40eSAlexander Motin /* Limit HMB to 5% of RAM size per device by default. */ 9201c7dd40eSAlexander Motin max = (uint64_t)physmem * PAGE_SIZE / 20; 92167abaee9SAlexander Motin TUNABLE_UINT64_FETCH("hw.nvme.hmb_max", &max); 92267abaee9SAlexander Motin 92367abaee9SAlexander Motin min = (long long unsigned)ctrlr->cdata.hmmin * 4096; 9246de4e458SAlexander Motin if (max == 0 || max < min) 92567abaee9SAlexander Motin return; 92667abaee9SAlexander Motin pref = MIN((long long unsigned)ctrlr->cdata.hmpre * 4096, max); 92767abaee9SAlexander Motin minc = MAX(ctrlr->cdata.hmminds * 4096, PAGE_SIZE); 92867abaee9SAlexander Motin if (min > 0 && ctrlr->cdata.hmmaxd > 0) 92967abaee9SAlexander Motin minc = MAX(minc, min / ctrlr->cdata.hmmaxd); 93067abaee9SAlexander Motin ctrlr->hmb_chunk = pref; 93167abaee9SAlexander Motin 93267abaee9SAlexander Motin again: 93367abaee9SAlexander Motin ctrlr->hmb_chunk = roundup2(ctrlr->hmb_chunk, PAGE_SIZE); 93467abaee9SAlexander Motin ctrlr->hmb_nchunks = howmany(pref, ctrlr->hmb_chunk); 93567abaee9SAlexander Motin if (ctrlr->cdata.hmmaxd > 0 && ctrlr->hmb_nchunks > ctrlr->cdata.hmmaxd) 93667abaee9SAlexander Motin ctrlr->hmb_nchunks = ctrlr->cdata.hmmaxd; 93767abaee9SAlexander Motin ctrlr->hmb_chunks = malloc(sizeof(struct nvme_hmb_chunk) * 93867abaee9SAlexander Motin ctrlr->hmb_nchunks, M_NVME, M_WAITOK); 93967abaee9SAlexander Motin err = bus_dma_tag_create(bus_get_dma_tag(ctrlr->dev), 94067abaee9SAlexander Motin PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, 94167abaee9SAlexander Motin ctrlr->hmb_chunk, 1, ctrlr->hmb_chunk, 0, NULL, NULL, &ctrlr->hmb_tag); 94267abaee9SAlexander Motin if (err != 0) { 94367abaee9SAlexander Motin nvme_printf(ctrlr, "HMB tag create failed %d\n", err); 94467abaee9SAlexander Motin nvme_ctrlr_hmb_free(ctrlr); 94567abaee9SAlexander Motin return; 94667abaee9SAlexander Motin } 94767abaee9SAlexander Motin 94867abaee9SAlexander Motin for (i = 0; i < ctrlr->hmb_nchunks; i++) { 94967abaee9SAlexander Motin hmbc = &ctrlr->hmb_chunks[i]; 95067abaee9SAlexander Motin if (bus_dmamem_alloc(ctrlr->hmb_tag, 95167abaee9SAlexander Motin (void **)&hmbc->hmbc_vaddr, BUS_DMA_NOWAIT, 95267abaee9SAlexander Motin &hmbc->hmbc_map)) { 95367abaee9SAlexander Motin nvme_printf(ctrlr, "failed to alloc HMB\n"); 95467abaee9SAlexander Motin break; 95567abaee9SAlexander Motin } 95667abaee9SAlexander Motin if (bus_dmamap_load(ctrlr->hmb_tag, hmbc->hmbc_map, 95767abaee9SAlexander Motin hmbc->hmbc_vaddr, ctrlr->hmb_chunk, nvme_single_map, 95867abaee9SAlexander Motin &hmbc->hmbc_paddr, BUS_DMA_NOWAIT) != 0) { 95967abaee9SAlexander Motin bus_dmamem_free(ctrlr->hmb_tag, hmbc->hmbc_vaddr, 96067abaee9SAlexander Motin hmbc->hmbc_map); 96167abaee9SAlexander Motin nvme_printf(ctrlr, "failed to load HMB\n"); 96267abaee9SAlexander Motin break; 96367abaee9SAlexander Motin } 96467abaee9SAlexander Motin bus_dmamap_sync(ctrlr->hmb_tag, hmbc->hmbc_map, 96567abaee9SAlexander Motin BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 96667abaee9SAlexander Motin } 96767abaee9SAlexander Motin 96867abaee9SAlexander Motin if (i < ctrlr->hmb_nchunks && i * ctrlr->hmb_chunk < min && 96967abaee9SAlexander Motin ctrlr->hmb_chunk / 2 >= minc) { 97067abaee9SAlexander Motin ctrlr->hmb_nchunks = i; 97167abaee9SAlexander Motin nvme_ctrlr_hmb_free(ctrlr); 97267abaee9SAlexander Motin ctrlr->hmb_chunk /= 2; 97367abaee9SAlexander Motin goto again; 97467abaee9SAlexander Motin } 97567abaee9SAlexander Motin ctrlr->hmb_nchunks = i; 97667abaee9SAlexander Motin if (ctrlr->hmb_nchunks * ctrlr->hmb_chunk < min) { 97767abaee9SAlexander Motin nvme_ctrlr_hmb_free(ctrlr); 97867abaee9SAlexander Motin return; 97967abaee9SAlexander Motin } 98067abaee9SAlexander Motin 98167abaee9SAlexander Motin size = sizeof(struct nvme_hmb_desc) * ctrlr->hmb_nchunks; 98267abaee9SAlexander Motin err = bus_dma_tag_create(bus_get_dma_tag(ctrlr->dev), 98367abaee9SAlexander Motin 16, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, 98467abaee9SAlexander Motin size, 1, size, 0, NULL, NULL, &ctrlr->hmb_desc_tag); 98567abaee9SAlexander Motin if (err != 0) { 98667abaee9SAlexander Motin nvme_printf(ctrlr, "HMB desc tag create failed %d\n", err); 98767abaee9SAlexander Motin nvme_ctrlr_hmb_free(ctrlr); 98867abaee9SAlexander Motin return; 98967abaee9SAlexander Motin } 99067abaee9SAlexander Motin if (bus_dmamem_alloc(ctrlr->hmb_desc_tag, 99167abaee9SAlexander Motin (void **)&ctrlr->hmb_desc_vaddr, BUS_DMA_WAITOK, 99267abaee9SAlexander Motin &ctrlr->hmb_desc_map)) { 99367abaee9SAlexander Motin nvme_printf(ctrlr, "failed to alloc HMB desc\n"); 99467abaee9SAlexander Motin nvme_ctrlr_hmb_free(ctrlr); 99567abaee9SAlexander Motin return; 99667abaee9SAlexander Motin } 99767abaee9SAlexander Motin if (bus_dmamap_load(ctrlr->hmb_desc_tag, ctrlr->hmb_desc_map, 99867abaee9SAlexander Motin ctrlr->hmb_desc_vaddr, size, nvme_single_map, 99967abaee9SAlexander Motin &ctrlr->hmb_desc_paddr, BUS_DMA_NOWAIT) != 0) { 100067abaee9SAlexander Motin bus_dmamem_free(ctrlr->hmb_desc_tag, ctrlr->hmb_desc_vaddr, 100167abaee9SAlexander Motin ctrlr->hmb_desc_map); 100267abaee9SAlexander Motin nvme_printf(ctrlr, "failed to load HMB desc\n"); 100367abaee9SAlexander Motin nvme_ctrlr_hmb_free(ctrlr); 100467abaee9SAlexander Motin return; 100567abaee9SAlexander Motin } 100667abaee9SAlexander Motin 100767abaee9SAlexander Motin for (i = 0; i < ctrlr->hmb_nchunks; i++) { 100867abaee9SAlexander Motin ctrlr->hmb_desc_vaddr[i].addr = 100967abaee9SAlexander Motin htole64(ctrlr->hmb_chunks[i].hmbc_paddr); 101067abaee9SAlexander Motin ctrlr->hmb_desc_vaddr[i].size = htole32(ctrlr->hmb_chunk / 4096); 101167abaee9SAlexander Motin } 101267abaee9SAlexander Motin bus_dmamap_sync(ctrlr->hmb_desc_tag, ctrlr->hmb_desc_map, 101367abaee9SAlexander Motin BUS_DMASYNC_PREWRITE); 101467abaee9SAlexander Motin 101567abaee9SAlexander Motin nvme_printf(ctrlr, "Allocated %lluMB host memory buffer\n", 101667abaee9SAlexander Motin (long long unsigned)ctrlr->hmb_nchunks * ctrlr->hmb_chunk 101767abaee9SAlexander Motin / 1024 / 1024); 101867abaee9SAlexander Motin } 101967abaee9SAlexander Motin 102067abaee9SAlexander Motin static void 102167abaee9SAlexander Motin nvme_ctrlr_hmb_enable(struct nvme_controller *ctrlr, bool enable, bool memret) 102267abaee9SAlexander Motin { 102367abaee9SAlexander Motin struct nvme_completion_poll_status status; 102467abaee9SAlexander Motin uint32_t cdw11; 102567abaee9SAlexander Motin 102667abaee9SAlexander Motin cdw11 = 0; 102767abaee9SAlexander Motin if (enable) 102867abaee9SAlexander Motin cdw11 |= 1; 102967abaee9SAlexander Motin if (memret) 103067abaee9SAlexander Motin cdw11 |= 2; 103167abaee9SAlexander Motin status.done = 0; 103267abaee9SAlexander Motin nvme_ctrlr_cmd_set_feature(ctrlr, NVME_FEAT_HOST_MEMORY_BUFFER, cdw11, 103367abaee9SAlexander Motin ctrlr->hmb_nchunks * ctrlr->hmb_chunk / 4096, ctrlr->hmb_desc_paddr, 103467abaee9SAlexander Motin ctrlr->hmb_desc_paddr >> 32, ctrlr->hmb_nchunks, NULL, 0, 103567abaee9SAlexander Motin nvme_completion_poll_cb, &status); 103667abaee9SAlexander Motin nvme_completion_poll(&status); 103767abaee9SAlexander Motin if (nvme_completion_is_error(&status.cpl)) 103867abaee9SAlexander Motin nvme_printf(ctrlr, "nvme_ctrlr_hmb_enable failed!\n"); 103967abaee9SAlexander Motin } 104067abaee9SAlexander Motin 104167abaee9SAlexander Motin static void 10424d547561SWarner Losh nvme_ctrlr_start(void *ctrlr_arg, bool resetting) 1043bb0ec6b3SJim Harris { 1044bb0ec6b3SJim Harris struct nvme_controller *ctrlr = ctrlr_arg; 10452b647da7SJim Harris uint32_t old_num_io_queues; 1046b846efd7SJim Harris int i; 1047b846efd7SJim Harris 10482b647da7SJim Harris /* 10492b647da7SJim Harris * Only reset adminq here when we are restarting the 10502b647da7SJim Harris * controller after a reset. During initialization, 10512b647da7SJim Harris * we have already submitted admin commands to get 10522b647da7SJim Harris * the number of I/O queues supported, so cannot reset 10532b647da7SJim Harris * the adminq again here. 10542b647da7SJim Harris */ 10554d547561SWarner Losh if (resetting) 1056cb5b7c13SJim Harris nvme_qpair_reset(&ctrlr->adminq); 10572b647da7SJim Harris 1058cb5b7c13SJim Harris for (i = 0; i < ctrlr->num_io_queues; i++) 1059cb5b7c13SJim Harris nvme_qpair_reset(&ctrlr->ioq[i]); 1060cb5b7c13SJim Harris 1061b846efd7SJim Harris nvme_admin_qpair_enable(&ctrlr->adminq); 1062bb0ec6b3SJim Harris 1063232e2edbSJim Harris if (nvme_ctrlr_identify(ctrlr) != 0) { 1064232e2edbSJim Harris nvme_ctrlr_fail(ctrlr); 1065be34f216SJim Harris return; 1066232e2edbSJim Harris } 1067bb0ec6b3SJim Harris 10682b647da7SJim Harris /* 10692b647da7SJim Harris * The number of qpairs are determined during controller initialization, 10702b647da7SJim Harris * including using NVMe SET_FEATURES/NUMBER_OF_QUEUES to determine the 10712b647da7SJim Harris * HW limit. We call SET_FEATURES again here so that it gets called 10722b647da7SJim Harris * after any reset for controllers that depend on the driver to 10732b647da7SJim Harris * explicit specify how many queues it will use. This value should 10742b647da7SJim Harris * never change between resets, so panic if somehow that does happen. 10752b647da7SJim Harris */ 10764d547561SWarner Losh if (resetting) { 10772b647da7SJim Harris old_num_io_queues = ctrlr->num_io_queues; 1078232e2edbSJim Harris if (nvme_ctrlr_set_num_qpairs(ctrlr) != 0) { 1079232e2edbSJim Harris nvme_ctrlr_fail(ctrlr); 1080be34f216SJim Harris return; 1081232e2edbSJim Harris } 1082bb0ec6b3SJim Harris 10832b647da7SJim Harris if (old_num_io_queues != ctrlr->num_io_queues) { 10847b036d77SJim Harris panic("num_io_queues changed from %u to %u", 10857b036d77SJim Harris old_num_io_queues, ctrlr->num_io_queues); 10867b036d77SJim Harris } 10872b647da7SJim Harris } 10882b647da7SJim Harris 108967abaee9SAlexander Motin if (ctrlr->cdata.hmpre > 0 && ctrlr->hmb_nchunks == 0) { 109067abaee9SAlexander Motin nvme_ctrlr_hmb_alloc(ctrlr); 109167abaee9SAlexander Motin if (ctrlr->hmb_nchunks > 0) 109267abaee9SAlexander Motin nvme_ctrlr_hmb_enable(ctrlr, true, false); 109367abaee9SAlexander Motin } else if (ctrlr->hmb_nchunks > 0) 109467abaee9SAlexander Motin nvme_ctrlr_hmb_enable(ctrlr, true, true); 109567abaee9SAlexander Motin 1096232e2edbSJim Harris if (nvme_ctrlr_create_qpairs(ctrlr) != 0) { 1097232e2edbSJim Harris nvme_ctrlr_fail(ctrlr); 1098be34f216SJim Harris return; 1099232e2edbSJim Harris } 1100bb0ec6b3SJim Harris 1101232e2edbSJim Harris if (nvme_ctrlr_construct_namespaces(ctrlr) != 0) { 1102232e2edbSJim Harris nvme_ctrlr_fail(ctrlr); 1103be34f216SJim Harris return; 1104232e2edbSJim Harris } 1105bb0ec6b3SJim Harris 1106bb0ec6b3SJim Harris nvme_ctrlr_configure_aer(ctrlr); 1107bb0ec6b3SJim Harris nvme_ctrlr_configure_int_coalescing(ctrlr); 1108bb0ec6b3SJim Harris 1109b846efd7SJim Harris for (i = 0; i < ctrlr->num_io_queues; i++) 1110b846efd7SJim Harris nvme_io_qpair_enable(&ctrlr->ioq[i]); 1111bb0ec6b3SJim Harris } 1112bb0ec6b3SJim Harris 1113be34f216SJim Harris void 1114be34f216SJim Harris nvme_ctrlr_start_config_hook(void *arg) 1115be34f216SJim Harris { 1116be34f216SJim Harris struct nvme_controller *ctrlr = arg; 111766e59850SWarner Losh int status; 111866e59850SWarner Losh 111966e59850SWarner Losh /* 112066e59850SWarner Losh * Reset controller twice to ensure we do a transition from cc.en==1 to 112166e59850SWarner Losh * cc.en==0. This is because we don't really know what status the 112266e59850SWarner Losh * controller was left in when boot handed off to OS. Linux doesn't do 112366e59850SWarner Losh * this, however. If we adopt that policy, see also nvme_ctrlr_resume(). 112466e59850SWarner Losh */ 112566e59850SWarner Losh status = nvme_ctrlr_hw_reset(ctrlr); 112666e59850SWarner Losh if (status != 0) { 112766e59850SWarner Losh nvme_ctrlr_fail(ctrlr); 112866e59850SWarner Losh return; 112966e59850SWarner Losh } 113066e59850SWarner Losh 113166e59850SWarner Losh status = nvme_ctrlr_hw_reset(ctrlr); 113266e59850SWarner Losh if (status != 0) { 113366e59850SWarner Losh nvme_ctrlr_fail(ctrlr); 113466e59850SWarner Losh return; 113566e59850SWarner Losh } 1136be34f216SJim Harris 11372b647da7SJim Harris nvme_qpair_reset(&ctrlr->adminq); 11382b647da7SJim Harris nvme_admin_qpair_enable(&ctrlr->adminq); 11392b647da7SJim Harris 11402b647da7SJim Harris if (nvme_ctrlr_set_num_qpairs(ctrlr) == 0 && 11412b647da7SJim Harris nvme_ctrlr_construct_io_qpairs(ctrlr) == 0) 11424d547561SWarner Losh nvme_ctrlr_start(ctrlr, false); 11432b647da7SJim Harris else 11442b647da7SJim Harris nvme_ctrlr_fail(ctrlr); 11452b647da7SJim Harris 11462b647da7SJim Harris nvme_sysctl_initialize_ctrlr(ctrlr); 1147be34f216SJim Harris config_intrhook_disestablish(&ctrlr->config_hook); 1148496a2752SJim Harris 1149496a2752SJim Harris ctrlr->is_initialized = 1; 1150496a2752SJim Harris nvme_notify_new_controller(ctrlr); 1151b846efd7SJim Harris } 1152b846efd7SJim Harris 1153bb0ec6b3SJim Harris static void 115448ce3178SJim Harris nvme_ctrlr_reset_task(void *arg, int pending) 115512d191ecSJim Harris { 115612d191ecSJim Harris struct nvme_controller *ctrlr = arg; 115748ce3178SJim Harris int status; 115812d191ecSJim Harris 1159*244b8053SWarner Losh nvme_ctrlr_devctl_log(ctrlr, "RESET", "resetting controller"); 116048ce3178SJim Harris status = nvme_ctrlr_hw_reset(ctrlr); 116148ce3178SJim Harris /* 116248ce3178SJim Harris * Use pause instead of DELAY, so that we yield to any nvme interrupt 116348ce3178SJim Harris * handlers on this CPU that were blocked on a qpair lock. We want 116448ce3178SJim Harris * all nvme interrupts completed before proceeding with restarting the 116548ce3178SJim Harris * controller. 116648ce3178SJim Harris * 116748ce3178SJim Harris * XXX - any way to guarantee the interrupt handlers have quiesced? 116848ce3178SJim Harris */ 116948ce3178SJim Harris pause("nvmereset", hz / 10); 117048ce3178SJim Harris if (status == 0) 11714d547561SWarner Losh nvme_ctrlr_start(ctrlr, true); 1172232e2edbSJim Harris else 1173232e2edbSJim Harris nvme_ctrlr_fail(ctrlr); 1174f37c22a3SJim Harris 1175f37c22a3SJim Harris atomic_cmpset_32(&ctrlr->is_resetting, 1, 0); 117612d191ecSJim Harris } 117712d191ecSJim Harris 1178bb1c7be4SWarner Losh /* 1179bb1c7be4SWarner Losh * Poll all the queues enabled on the device for completion. 1180bb1c7be4SWarner Losh */ 1181bb1c7be4SWarner Losh void 1182bb1c7be4SWarner Losh nvme_ctrlr_poll(struct nvme_controller *ctrlr) 1183bb1c7be4SWarner Losh { 1184bb1c7be4SWarner Losh int i; 1185bb1c7be4SWarner Losh 1186bb1c7be4SWarner Losh nvme_qpair_process_completions(&ctrlr->adminq); 1187bb1c7be4SWarner Losh 1188bb1c7be4SWarner Losh for (i = 0; i < ctrlr->num_io_queues; i++) 1189bb1c7be4SWarner Losh if (ctrlr->ioq && ctrlr->ioq[i].cpl) 1190bb1c7be4SWarner Losh nvme_qpair_process_completions(&ctrlr->ioq[i]); 1191bb1c7be4SWarner Losh } 1192bb1c7be4SWarner Losh 1193bb1c7be4SWarner Losh /* 11944d547561SWarner Losh * Poll the single-vector interrupt case: num_io_queues will be 1 and 1195bb1c7be4SWarner Losh * there's only a single vector. While we're polling, we mask further 1196bb1c7be4SWarner Losh * interrupts in the controller. 1197bb1c7be4SWarner Losh */ 1198f24c011bSWarner Losh void 11994d6abcb1SJim Harris nvme_ctrlr_intx_handler(void *arg) 1200bb0ec6b3SJim Harris { 1201bb0ec6b3SJim Harris struct nvme_controller *ctrlr = arg; 1202bb0ec6b3SJim Harris 12034d6abcb1SJim Harris nvme_mmio_write_4(ctrlr, intms, 1); 1204bb1c7be4SWarner Losh nvme_ctrlr_poll(ctrlr); 1205bb0ec6b3SJim Harris nvme_mmio_write_4(ctrlr, intmc, 1); 1206bb0ec6b3SJim Harris } 1207bb0ec6b3SJim Harris 12087c3f19d7SJim Harris static void 12097c3f19d7SJim Harris nvme_pt_done(void *arg, const struct nvme_completion *cpl) 12107c3f19d7SJim Harris { 12117c3f19d7SJim Harris struct nvme_pt_command *pt = arg; 1212c252f637SAlexander Motin struct mtx *mtx = pt->driver_lock; 12130d787e9bSWojciech Macek uint16_t status; 12147c3f19d7SJim Harris 12157c3f19d7SJim Harris bzero(&pt->cpl, sizeof(pt->cpl)); 12167c3f19d7SJim Harris pt->cpl.cdw0 = cpl->cdw0; 12170d787e9bSWojciech Macek 12180d787e9bSWojciech Macek status = cpl->status; 12190d787e9bSWojciech Macek status &= ~NVME_STATUS_P_MASK; 12200d787e9bSWojciech Macek pt->cpl.status = status; 12217c3f19d7SJim Harris 1222c252f637SAlexander Motin mtx_lock(mtx); 1223c252f637SAlexander Motin pt->driver_lock = NULL; 12247c3f19d7SJim Harris wakeup(pt); 1225c252f637SAlexander Motin mtx_unlock(mtx); 12267c3f19d7SJim Harris } 12277c3f19d7SJim Harris 12287c3f19d7SJim Harris int 12297c3f19d7SJim Harris nvme_ctrlr_passthrough_cmd(struct nvme_controller *ctrlr, 12307c3f19d7SJim Harris struct nvme_pt_command *pt, uint32_t nsid, int is_user_buffer, 12317c3f19d7SJim Harris int is_admin_cmd) 12327c3f19d7SJim Harris { 12337c3f19d7SJim Harris struct nvme_request *req; 12347c3f19d7SJim Harris struct mtx *mtx; 12357c3f19d7SJim Harris struct buf *buf = NULL; 12367c3f19d7SJim Harris int ret = 0; 1237a3a6c48dSWarner Losh vm_offset_t addr, end; 12387c3f19d7SJim Harris 12397b68ae1eSJim Harris if (pt->len > 0) { 1240a3a6c48dSWarner Losh /* 1241a3a6c48dSWarner Losh * vmapbuf calls vm_fault_quick_hold_pages which only maps full 1242a3a6c48dSWarner Losh * pages. Ensure this request has fewer than MAXPHYS bytes when 1243a3a6c48dSWarner Losh * extended to full pages. 1244a3a6c48dSWarner Losh */ 1245a3a6c48dSWarner Losh addr = (vm_offset_t)pt->buf; 1246a3a6c48dSWarner Losh end = round_page(addr + pt->len); 1247a3a6c48dSWarner Losh addr = trunc_page(addr); 1248a3a6c48dSWarner Losh if (end - addr > MAXPHYS) 1249a3a6c48dSWarner Losh return EIO; 1250a3a6c48dSWarner Losh 12517b68ae1eSJim Harris if (pt->len > ctrlr->max_xfer_size) { 12527b68ae1eSJim Harris nvme_printf(ctrlr, "pt->len (%d) " 12537b68ae1eSJim Harris "exceeds max_xfer_size (%d)\n", pt->len, 12547b68ae1eSJim Harris ctrlr->max_xfer_size); 12557b68ae1eSJim Harris return EIO; 12567b68ae1eSJim Harris } 12577c3f19d7SJim Harris if (is_user_buffer) { 12587c3f19d7SJim Harris /* 12597c3f19d7SJim Harris * Ensure the user buffer is wired for the duration of 12604d547561SWarner Losh * this pass-through command. 12617c3f19d7SJim Harris */ 12627c3f19d7SJim Harris PHOLD(curproc); 1263756a5412SGleb Smirnoff buf = uma_zalloc(pbuf_zone, M_WAITOK); 12647c3f19d7SJim Harris buf->b_data = pt->buf; 12657c3f19d7SJim Harris buf->b_bufsize = pt->len; 12667c3f19d7SJim Harris buf->b_iocmd = pt->is_read ? BIO_READ : BIO_WRITE; 12677c3f19d7SJim Harris if (vmapbuf(buf, 1) < 0) { 12687c3f19d7SJim Harris ret = EFAULT; 12697c3f19d7SJim Harris goto err; 12707c3f19d7SJim Harris } 12717c3f19d7SJim Harris req = nvme_allocate_request_vaddr(buf->b_data, pt->len, 12727c3f19d7SJim Harris nvme_pt_done, pt); 12737c3f19d7SJim Harris } else 12747c3f19d7SJim Harris req = nvme_allocate_request_vaddr(pt->buf, pt->len, 12757c3f19d7SJim Harris nvme_pt_done, pt); 12767b68ae1eSJim Harris } else 12777c3f19d7SJim Harris req = nvme_allocate_request_null(nvme_pt_done, pt); 12787c3f19d7SJim Harris 12790d787e9bSWojciech Macek /* Assume user space already converted to little-endian */ 12809544e6dcSChuck Tuffli req->cmd.opc = pt->cmd.opc; 12819544e6dcSChuck Tuffli req->cmd.fuse = pt->cmd.fuse; 128291182bcfSWarner Losh req->cmd.rsvd2 = pt->cmd.rsvd2; 128391182bcfSWarner Losh req->cmd.rsvd3 = pt->cmd.rsvd3; 12847c3f19d7SJim Harris req->cmd.cdw10 = pt->cmd.cdw10; 12857c3f19d7SJim Harris req->cmd.cdw11 = pt->cmd.cdw11; 12867c3f19d7SJim Harris req->cmd.cdw12 = pt->cmd.cdw12; 12877c3f19d7SJim Harris req->cmd.cdw13 = pt->cmd.cdw13; 12887c3f19d7SJim Harris req->cmd.cdw14 = pt->cmd.cdw14; 12897c3f19d7SJim Harris req->cmd.cdw15 = pt->cmd.cdw15; 12907c3f19d7SJim Harris 12910d787e9bSWojciech Macek req->cmd.nsid = htole32(nsid); 12927c3f19d7SJim Harris 1293c252f637SAlexander Motin mtx = mtx_pool_find(mtxpool_sleep, pt); 12947c3f19d7SJim Harris pt->driver_lock = mtx; 12957c3f19d7SJim Harris 12967c3f19d7SJim Harris if (is_admin_cmd) 12977c3f19d7SJim Harris nvme_ctrlr_submit_admin_request(ctrlr, req); 12987c3f19d7SJim Harris else 12997c3f19d7SJim Harris nvme_ctrlr_submit_io_request(ctrlr, req); 13007c3f19d7SJim Harris 1301c252f637SAlexander Motin mtx_lock(mtx); 1302c252f637SAlexander Motin while (pt->driver_lock != NULL) 13037c3f19d7SJim Harris mtx_sleep(pt, mtx, PRIBIO, "nvme_pt", 0); 13047c3f19d7SJim Harris mtx_unlock(mtx); 13057c3f19d7SJim Harris 13067c3f19d7SJim Harris err: 13077c3f19d7SJim Harris if (buf != NULL) { 1308756a5412SGleb Smirnoff uma_zfree(pbuf_zone, buf); 13097c3f19d7SJim Harris PRELE(curproc); 13107c3f19d7SJim Harris } 13117c3f19d7SJim Harris 13127c3f19d7SJim Harris return (ret); 13137c3f19d7SJim Harris } 13147c3f19d7SJim Harris 1315bb0ec6b3SJim Harris static int 1316bb0ec6b3SJim Harris nvme_ctrlr_ioctl(struct cdev *cdev, u_long cmd, caddr_t arg, int flag, 1317bb0ec6b3SJim Harris struct thread *td) 1318bb0ec6b3SJim Harris { 1319bb0ec6b3SJim Harris struct nvme_controller *ctrlr; 13207c3f19d7SJim Harris struct nvme_pt_command *pt; 1321bb0ec6b3SJim Harris 1322bb0ec6b3SJim Harris ctrlr = cdev->si_drv1; 1323bb0ec6b3SJim Harris 1324bb0ec6b3SJim Harris switch (cmd) { 1325b846efd7SJim Harris case NVME_RESET_CONTROLLER: 1326b846efd7SJim Harris nvme_ctrlr_reset(ctrlr); 1327b846efd7SJim Harris break; 13287c3f19d7SJim Harris case NVME_PASSTHROUGH_CMD: 13297c3f19d7SJim Harris pt = (struct nvme_pt_command *)arg; 13300d787e9bSWojciech Macek return (nvme_ctrlr_passthrough_cmd(ctrlr, pt, le32toh(pt->cmd.nsid), 13317c3f19d7SJim Harris 1 /* is_user_buffer */, 1 /* is_admin_cmd */)); 1332a7bf63beSAlexander Motin case NVME_GET_NSID: 1333a7bf63beSAlexander Motin { 1334a7bf63beSAlexander Motin struct nvme_get_nsid *gnsid = (struct nvme_get_nsid *)arg; 1335a7bf63beSAlexander Motin strncpy(gnsid->cdev, device_get_nameunit(ctrlr->dev), 1336a7bf63beSAlexander Motin sizeof(gnsid->cdev)); 1337a7bf63beSAlexander Motin gnsid->nsid = 0; 1338a7bf63beSAlexander Motin break; 1339a7bf63beSAlexander Motin } 1340bb0ec6b3SJim Harris default: 1341bb0ec6b3SJim Harris return (ENOTTY); 1342bb0ec6b3SJim Harris } 1343bb0ec6b3SJim Harris 1344bb0ec6b3SJim Harris return (0); 1345bb0ec6b3SJim Harris } 1346bb0ec6b3SJim Harris 1347bb0ec6b3SJim Harris static struct cdevsw nvme_ctrlr_cdevsw = { 1348bb0ec6b3SJim Harris .d_version = D_VERSION, 1349bb0ec6b3SJim Harris .d_flags = 0, 1350bb0ec6b3SJim Harris .d_ioctl = nvme_ctrlr_ioctl 1351bb0ec6b3SJim Harris }; 1352bb0ec6b3SJim Harris 1353bb0ec6b3SJim Harris int 1354bb0ec6b3SJim Harris nvme_ctrlr_construct(struct nvme_controller *ctrlr, device_t dev) 1355bb0ec6b3SJim Harris { 1356e134ecdcSAlexander Motin struct make_dev_args md_args; 13570d787e9bSWojciech Macek uint32_t cap_lo; 13580d787e9bSWojciech Macek uint32_t cap_hi; 135908a607e0SWarner Losh uint32_t to; 13600d787e9bSWojciech Macek uint8_t mpsmin; 1361f42ca756SJim Harris int status, timeout_period; 1362bb0ec6b3SJim Harris 1363bb0ec6b3SJim Harris ctrlr->dev = dev; 1364bb0ec6b3SJim Harris 1365a90b8104SJim Harris mtx_init(&ctrlr->lock, "nvme ctrlr lock", NULL, MTX_DEF); 13661eab19cbSAlexander Motin if (bus_get_domain(dev, &ctrlr->domain) != 0) 13671eab19cbSAlexander Motin ctrlr->domain = 0; 1368a90b8104SJim Harris 13690d787e9bSWojciech Macek cap_hi = nvme_mmio_read_4(ctrlr, cap_hi); 1370f93b7f95SWarner Losh ctrlr->dstrd = NVME_CAP_HI_DSTRD(cap_hi) + 2; 1371bb0ec6b3SJim Harris 137262d2cf18SWarner Losh mpsmin = NVME_CAP_HI_MPSMIN(cap_hi); 13730d787e9bSWojciech Macek ctrlr->min_page_size = 1 << (12 + mpsmin); 137402e33484SJim Harris 1375bb0ec6b3SJim Harris /* Get ready timeout value from controller, in units of 500ms. */ 13760d787e9bSWojciech Macek cap_lo = nvme_mmio_read_4(ctrlr, cap_lo); 137762d2cf18SWarner Losh to = NVME_CAP_LO_TO(cap_lo) + 1; 13780d787e9bSWojciech Macek ctrlr->ready_timeout_in_ms = to * 500; 1379bb0ec6b3SJim Harris 138094143332SJim Harris timeout_period = NVME_DEFAULT_TIMEOUT_PERIOD; 138194143332SJim Harris TUNABLE_INT_FETCH("hw.nvme.timeout_period", &timeout_period); 138294143332SJim Harris timeout_period = min(timeout_period, NVME_MAX_TIMEOUT_PERIOD); 138394143332SJim Harris timeout_period = max(timeout_period, NVME_MIN_TIMEOUT_PERIOD); 138494143332SJim Harris ctrlr->timeout_period = timeout_period; 138594143332SJim Harris 1386cb5b7c13SJim Harris nvme_retry_count = NVME_DEFAULT_RETRY_COUNT; 1387cb5b7c13SJim Harris TUNABLE_INT_FETCH("hw.nvme.retry_count", &nvme_retry_count); 1388cb5b7c13SJim Harris 138948ce3178SJim Harris ctrlr->enable_aborts = 0; 139048ce3178SJim Harris TUNABLE_INT_FETCH("hw.nvme.enable_aborts", &ctrlr->enable_aborts); 139148ce3178SJim Harris 13928d09e3c4SJim Harris ctrlr->max_xfer_size = NVME_MAX_XFER_SIZE; 1393a965389bSScott Long if (nvme_ctrlr_construct_admin_qpair(ctrlr) != 0) 1394a965389bSScott Long return (ENXIO); 1395bb0ec6b3SJim Harris 139612d191ecSJim Harris ctrlr->taskqueue = taskqueue_create("nvme_taskq", M_WAITOK, 139712d191ecSJim Harris taskqueue_thread_enqueue, &ctrlr->taskqueue); 139812d191ecSJim Harris taskqueue_start_threads(&ctrlr->taskqueue, 1, PI_DISK, "nvme taskq"); 139912d191ecSJim Harris 1400f37c22a3SJim Harris ctrlr->is_resetting = 0; 1401496a2752SJim Harris ctrlr->is_initialized = 0; 1402496a2752SJim Harris ctrlr->notification_sent = 0; 1403232e2edbSJim Harris TASK_INIT(&ctrlr->reset_task, 0, nvme_ctrlr_reset_task, ctrlr); 1404232e2edbSJim Harris TASK_INIT(&ctrlr->fail_req_task, 0, nvme_ctrlr_fail_req_task, ctrlr); 1405232e2edbSJim Harris STAILQ_INIT(&ctrlr->fail_req); 14067588c6ccSWarner Losh ctrlr->is_failed = false; 1407f37c22a3SJim Harris 1408e134ecdcSAlexander Motin make_dev_args_init(&md_args); 1409e134ecdcSAlexander Motin md_args.mda_devsw = &nvme_ctrlr_cdevsw; 1410e134ecdcSAlexander Motin md_args.mda_uid = UID_ROOT; 1411e134ecdcSAlexander Motin md_args.mda_gid = GID_WHEEL; 1412e134ecdcSAlexander Motin md_args.mda_mode = 0600; 1413e134ecdcSAlexander Motin md_args.mda_unit = device_get_unit(dev); 1414e134ecdcSAlexander Motin md_args.mda_si_drv1 = (void *)ctrlr; 1415e134ecdcSAlexander Motin status = make_dev_s(&md_args, &ctrlr->cdev, "nvme%d", 1416e134ecdcSAlexander Motin device_get_unit(dev)); 1417e134ecdcSAlexander Motin if (status != 0) 1418e134ecdcSAlexander Motin return (ENXIO); 1419e134ecdcSAlexander Motin 1420bb0ec6b3SJim Harris return (0); 1421bb0ec6b3SJim Harris } 1422d281e8fbSJim Harris 1423d281e8fbSJim Harris void 1424990e741cSJim Harris nvme_ctrlr_destruct(struct nvme_controller *ctrlr, device_t dev) 1425990e741cSJim Harris { 142671a28181SAlexander Motin int gone, i; 1427990e741cSJim Harris 1428e134ecdcSAlexander Motin if (ctrlr->resource == NULL) 1429e134ecdcSAlexander Motin goto nores; 143012d191ecSJim Harris 143171a28181SAlexander Motin /* 143271a28181SAlexander Motin * Check whether it is a hot unplug or a clean driver detach. 143371a28181SAlexander Motin * If device is not there any more, skip any shutdown commands. 143471a28181SAlexander Motin */ 143571a28181SAlexander Motin gone = (nvme_mmio_read_4(ctrlr, csts) == 0xffffffff); 143671a28181SAlexander Motin if (gone) 143771a28181SAlexander Motin nvme_ctrlr_fail(ctrlr); 143871a28181SAlexander Motin else 1439f439e3a4SAlexander Motin nvme_notify_fail_consumers(ctrlr); 1440f439e3a4SAlexander Motin 1441b846efd7SJim Harris for (i = 0; i < NVME_MAX_NAMESPACES; i++) 1442b846efd7SJim Harris nvme_ns_destruct(&ctrlr->ns[i]); 1443990e741cSJim Harris 1444990e741cSJim Harris if (ctrlr->cdev) 1445990e741cSJim Harris destroy_dev(ctrlr->cdev); 1446990e741cSJim Harris 14478e61280bSWarner Losh if (ctrlr->is_initialized) { 144867abaee9SAlexander Motin if (!gone) { 144967abaee9SAlexander Motin if (ctrlr->hmb_nchunks > 0) 145067abaee9SAlexander Motin nvme_ctrlr_hmb_enable(ctrlr, false, false); 14514d547561SWarner Losh nvme_ctrlr_delete_qpairs(ctrlr); 145267abaee9SAlexander Motin } 145371a28181SAlexander Motin for (i = 0; i < ctrlr->num_io_queues; i++) 1454990e741cSJim Harris nvme_io_qpair_destroy(&ctrlr->ioq[i]); 1455990e741cSJim Harris free(ctrlr->ioq, M_NVME); 145667abaee9SAlexander Motin nvme_ctrlr_hmb_free(ctrlr); 1457990e741cSJim Harris nvme_admin_qpair_destroy(&ctrlr->adminq); 14588e61280bSWarner Losh } 1459990e741cSJim Harris 1460e134ecdcSAlexander Motin /* 1461e134ecdcSAlexander Motin * Notify the controller of a shutdown, even though this is due to 1462e134ecdcSAlexander Motin * a driver unload, not a system shutdown (this path is not invoked 1463e134ecdcSAlexander Motin * during shutdown). This ensures the controller receives a 1464e134ecdcSAlexander Motin * shutdown notification in case the system is shutdown before 1465e134ecdcSAlexander Motin * reloading the driver. 1466e134ecdcSAlexander Motin */ 146771a28181SAlexander Motin if (!gone) 1468e134ecdcSAlexander Motin nvme_ctrlr_shutdown(ctrlr); 1469990e741cSJim Harris 147071a28181SAlexander Motin if (!gone) 1471e134ecdcSAlexander Motin nvme_ctrlr_disable(ctrlr); 1472e134ecdcSAlexander Motin 1473e134ecdcSAlexander Motin if (ctrlr->taskqueue) 1474e134ecdcSAlexander Motin taskqueue_free(ctrlr->taskqueue); 1475990e741cSJim Harris 1476990e741cSJim Harris if (ctrlr->tag) 1477990e741cSJim Harris bus_teardown_intr(ctrlr->dev, ctrlr->res, ctrlr->tag); 1478990e741cSJim Harris 1479990e741cSJim Harris if (ctrlr->res) 1480990e741cSJim Harris bus_release_resource(ctrlr->dev, SYS_RES_IRQ, 1481990e741cSJim Harris rman_get_rid(ctrlr->res), ctrlr->res); 1482990e741cSJim Harris 1483e134ecdcSAlexander Motin if (ctrlr->bar4_resource != NULL) { 1484e134ecdcSAlexander Motin bus_release_resource(dev, SYS_RES_MEMORY, 1485e134ecdcSAlexander Motin ctrlr->bar4_resource_id, ctrlr->bar4_resource); 1486e134ecdcSAlexander Motin } 1487e134ecdcSAlexander Motin 1488e134ecdcSAlexander Motin bus_release_resource(dev, SYS_RES_MEMORY, 1489e134ecdcSAlexander Motin ctrlr->resource_id, ctrlr->resource); 1490e134ecdcSAlexander Motin 1491e134ecdcSAlexander Motin nores: 1492e134ecdcSAlexander Motin mtx_destroy(&ctrlr->lock); 1493990e741cSJim Harris } 1494990e741cSJim Harris 1495990e741cSJim Harris void 149656183abcSJim Harris nvme_ctrlr_shutdown(struct nvme_controller *ctrlr) 149756183abcSJim Harris { 14980d787e9bSWojciech Macek uint32_t cc; 14990d787e9bSWojciech Macek uint32_t csts; 150056183abcSJim Harris int ticks = 0; 150156183abcSJim Harris 15020d787e9bSWojciech Macek cc = nvme_mmio_read_4(ctrlr, cc); 15030d787e9bSWojciech Macek cc &= ~(NVME_CC_REG_SHN_MASK << NVME_CC_REG_SHN_SHIFT); 15040d787e9bSWojciech Macek cc |= NVME_SHN_NORMAL << NVME_CC_REG_SHN_SHIFT; 15050d787e9bSWojciech Macek nvme_mmio_write_4(ctrlr, cc, cc); 15060d787e9bSWojciech Macek 150771a28181SAlexander Motin while (1) { 15080d787e9bSWojciech Macek csts = nvme_mmio_read_4(ctrlr, csts); 150971a28181SAlexander Motin if (csts == 0xffffffff) /* Hot unplug. */ 151071a28181SAlexander Motin break; 151171a28181SAlexander Motin if (NVME_CSTS_GET_SHST(csts) == NVME_SHST_COMPLETE) 151271a28181SAlexander Motin break; 151371a28181SAlexander Motin if (ticks++ > 5*hz) { 151471a28181SAlexander Motin nvme_printf(ctrlr, "did not complete shutdown within" 151571a28181SAlexander Motin " 5 seconds of notification\n"); 151671a28181SAlexander Motin break; 151756183abcSJim Harris } 151871a28181SAlexander Motin pause("nvme shn", 1); 151971a28181SAlexander Motin } 152056183abcSJim Harris } 152156183abcSJim Harris 152256183abcSJim Harris void 1523d281e8fbSJim Harris nvme_ctrlr_submit_admin_request(struct nvme_controller *ctrlr, 1524d281e8fbSJim Harris struct nvme_request *req) 1525d281e8fbSJim Harris { 1526d281e8fbSJim Harris 15275ae9ed68SJim Harris nvme_qpair_submit_request(&ctrlr->adminq, req); 1528d281e8fbSJim Harris } 1529d281e8fbSJim Harris 1530d281e8fbSJim Harris void 1531d281e8fbSJim Harris nvme_ctrlr_submit_io_request(struct nvme_controller *ctrlr, 1532d281e8fbSJim Harris struct nvme_request *req) 1533d281e8fbSJim Harris { 1534d281e8fbSJim Harris struct nvme_qpair *qpair; 1535d281e8fbSJim Harris 15361eab19cbSAlexander Motin qpair = &ctrlr->ioq[QP(ctrlr, curcpu)]; 15375ae9ed68SJim Harris nvme_qpair_submit_request(qpair, req); 1538d281e8fbSJim Harris } 1539038a5ee4SJim Harris 1540038a5ee4SJim Harris device_t 1541038a5ee4SJim Harris nvme_ctrlr_get_device(struct nvme_controller *ctrlr) 1542038a5ee4SJim Harris { 1543038a5ee4SJim Harris 1544038a5ee4SJim Harris return (ctrlr->dev); 1545038a5ee4SJim Harris } 1546dbba7442SJim Harris 1547dbba7442SJim Harris const struct nvme_controller_data * 1548dbba7442SJim Harris nvme_ctrlr_get_data(struct nvme_controller *ctrlr) 1549dbba7442SJim Harris { 1550dbba7442SJim Harris 1551dbba7442SJim Harris return (&ctrlr->cdata); 1552dbba7442SJim Harris } 15534d547561SWarner Losh 15544d547561SWarner Losh int 15554d547561SWarner Losh nvme_ctrlr_suspend(struct nvme_controller *ctrlr) 15564d547561SWarner Losh { 15574d547561SWarner Losh int to = hz; 15584d547561SWarner Losh 15594d547561SWarner Losh /* 15604d547561SWarner Losh * Can't touch failed controllers, so it's already suspended. 15614d547561SWarner Losh */ 15624d547561SWarner Losh if (ctrlr->is_failed) 15634d547561SWarner Losh return (0); 15644d547561SWarner Losh 15654d547561SWarner Losh /* 15664d547561SWarner Losh * We don't want the reset taskqueue running, since it does similar 15674d547561SWarner Losh * things, so prevent it from running after we start. Wait for any reset 15684d547561SWarner Losh * that may have been started to complete. The reset process we follow 15694d547561SWarner Losh * will ensure that any new I/O will queue and be given to the hardware 15704d547561SWarner Losh * after we resume (though there should be none). 15714d547561SWarner Losh */ 15724d547561SWarner Losh while (atomic_cmpset_32(&ctrlr->is_resetting, 0, 1) == 0 && to-- > 0) 15734d547561SWarner Losh pause("nvmesusp", 1); 15744d547561SWarner Losh if (to <= 0) { 15754d547561SWarner Losh nvme_printf(ctrlr, 15764d547561SWarner Losh "Competing reset task didn't finish. Try again later.\n"); 15774d547561SWarner Losh return (EWOULDBLOCK); 15784d547561SWarner Losh } 15794d547561SWarner Losh 158067abaee9SAlexander Motin if (ctrlr->hmb_nchunks > 0) 158167abaee9SAlexander Motin nvme_ctrlr_hmb_enable(ctrlr, false, false); 158267abaee9SAlexander Motin 15834d547561SWarner Losh /* 15844d547561SWarner Losh * Per Section 7.6.2 of NVMe spec 1.4, to properly suspend, we need to 15854d547561SWarner Losh * delete the hardware I/O queues, and then shutdown. This properly 15864d547561SWarner Losh * flushes any metadata the drive may have stored so it can survive 15874d547561SWarner Losh * having its power removed and prevents the unsafe shutdown count from 15884d547561SWarner Losh * incriminating. Once we delete the qpairs, we have to disable them 15894d547561SWarner Losh * before shutting down. The delay is out of paranoia in 15904d547561SWarner Losh * nvme_ctrlr_hw_reset, and is repeated here (though we should have no 15914d547561SWarner Losh * pending I/O that the delay copes with). 15924d547561SWarner Losh */ 15934d547561SWarner Losh nvme_ctrlr_delete_qpairs(ctrlr); 15944d547561SWarner Losh nvme_ctrlr_disable_qpairs(ctrlr); 15954d547561SWarner Losh DELAY(100*1000); 15964d547561SWarner Losh nvme_ctrlr_shutdown(ctrlr); 15974d547561SWarner Losh 15984d547561SWarner Losh return (0); 15994d547561SWarner Losh } 16004d547561SWarner Losh 16014d547561SWarner Losh int 16024d547561SWarner Losh nvme_ctrlr_resume(struct nvme_controller *ctrlr) 16034d547561SWarner Losh { 16044d547561SWarner Losh 16054d547561SWarner Losh /* 16064d547561SWarner Losh * Can't touch failed controllers, so nothing to do to resume. 16074d547561SWarner Losh */ 16084d547561SWarner Losh if (ctrlr->is_failed) 16094d547561SWarner Losh return (0); 16104d547561SWarner Losh 16114d547561SWarner Losh /* 16124d547561SWarner Losh * Have to reset the hardware twice, just like we do on attach. See 16134d547561SWarner Losh * nmve_attach() for why. 16144d547561SWarner Losh */ 16154d547561SWarner Losh if (nvme_ctrlr_hw_reset(ctrlr) != 0) 16164d547561SWarner Losh goto fail; 16174d547561SWarner Losh if (nvme_ctrlr_hw_reset(ctrlr) != 0) 16184d547561SWarner Losh goto fail; 16194d547561SWarner Losh 16204d547561SWarner Losh /* 16214d547561SWarner Losh * Now that we're reset the hardware, we can restart the controller. Any 16224d547561SWarner Losh * I/O that was pending is requeued. Any admin commands are aborted with 16234d547561SWarner Losh * an error. Once we've restarted, take the controller out of reset. 16244d547561SWarner Losh */ 16254d547561SWarner Losh nvme_ctrlr_start(ctrlr, true); 16264d547561SWarner Losh atomic_cmpset_32(&ctrlr->is_resetting, 1, 0); 16274d547561SWarner Losh 16284d547561SWarner Losh return (0); 16294d547561SWarner Losh fail: 16304d547561SWarner Losh /* 16314d547561SWarner Losh * Since we can't bring the controller out of reset, announce and fail 16324d547561SWarner Losh * the controller. However, we have to return success for the resume 16334d547561SWarner Losh * itself, due to questionable APIs. 16344d547561SWarner Losh */ 16354d547561SWarner Losh nvme_printf(ctrlr, "Failed to reset on resume, failing.\n"); 16364d547561SWarner Losh nvme_ctrlr_fail(ctrlr); 16374d547561SWarner Losh atomic_cmpset_32(&ctrlr->is_resetting, 1, 0); 16384d547561SWarner Losh return (0); 16394d547561SWarner Losh } 1640