1 /*- 2 * Copyright (C) 2012-2016 Intel Corporation 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 #include <sys/param.h> 31 #include <sys/systm.h> 32 #include <sys/buf.h> 33 #include <sys/bus.h> 34 #include <sys/conf.h> 35 #include <sys/proc.h> 36 #include <sys/smp.h> 37 38 #include <dev/pci/pcireg.h> 39 #include <dev/pci/pcivar.h> 40 41 #include "nvme_private.h" 42 43 static int nvme_pci_probe(device_t); 44 static int nvme_pci_attach(device_t); 45 static int nvme_pci_detach(device_t); 46 47 static void nvme_ctrlr_setup_interrupts(struct nvme_controller *ctrlr); 48 49 static device_method_t nvme_pci_methods[] = { 50 /* Device interface */ 51 DEVMETHOD(device_probe, nvme_pci_probe), 52 DEVMETHOD(device_attach, nvme_pci_attach), 53 DEVMETHOD(device_detach, nvme_pci_detach), 54 DEVMETHOD(device_shutdown, nvme_shutdown), 55 { 0, 0 } 56 }; 57 58 static driver_t nvme_pci_driver = { 59 "nvme", 60 nvme_pci_methods, 61 sizeof(struct nvme_controller), 62 }; 63 64 DRIVER_MODULE(nvme, pci, nvme_pci_driver, nvme_devclass, NULL, 0); 65 66 static struct _pcsid 67 { 68 uint32_t devid; 69 int match_subdevice; 70 uint16_t subdevice; 71 const char *desc; 72 uint32_t quirks; 73 } pci_ids[] = { 74 { 0x01118086, 0, 0, "NVMe Controller" }, 75 { IDT32_PCI_ID, 0, 0, "IDT NVMe Controller (32 channel)" }, 76 { IDT8_PCI_ID, 0, 0, "IDT NVMe Controller (8 channel)" }, 77 { 0x09538086, 1, 0x3702, "DC P3700 SSD" }, 78 { 0x09538086, 1, 0x3703, "DC P3700 SSD [2.5\" SFF]" }, 79 { 0x09538086, 1, 0x3704, "DC P3500 SSD [Add-in Card]" }, 80 { 0x09538086, 1, 0x3705, "DC P3500 SSD [2.5\" SFF]" }, 81 { 0x09538086, 1, 0x3709, "DC P3600 SSD [Add-in Card]" }, 82 { 0x09538086, 1, 0x370a, "DC P3600 SSD [2.5\" SFF]" }, 83 { 0x00031c58, 0, 0, "HGST SN100", QUIRK_DELAY_B4_CHK_RDY }, 84 { 0x00231c58, 0, 0, "WDC SN200", QUIRK_DELAY_B4_CHK_RDY }, 85 { 0x05401c5f, 0, 0, "Memblaze Pblaze4", QUIRK_DELAY_B4_CHK_RDY }, 86 { 0xa821144d, 0, 0, "Samsung PM1725", QUIRK_DELAY_B4_CHK_RDY }, 87 { 0xa822144d, 0, 0, "Samsung PM1725a", QUIRK_DELAY_B4_CHK_RDY }, 88 { 0x00000000, 0, 0, NULL } 89 }; 90 91 92 static int 93 nvme_match(uint32_t devid, uint16_t subdevice, struct _pcsid *ep) 94 { 95 if (devid != ep->devid) 96 return 0; 97 98 if (!ep->match_subdevice) 99 return 1; 100 101 if (subdevice == ep->subdevice) 102 return 1; 103 else 104 return 0; 105 } 106 107 static int 108 nvme_pci_probe (device_t device) 109 { 110 struct nvme_controller *ctrlr = DEVICE2SOFTC(device); 111 struct _pcsid *ep; 112 uint32_t devid; 113 uint16_t subdevice; 114 115 devid = pci_get_devid(device); 116 subdevice = pci_get_subdevice(device); 117 ep = pci_ids; 118 119 while (ep->devid) { 120 if (nvme_match(devid, subdevice, ep)) 121 break; 122 ++ep; 123 } 124 if (ep->devid) 125 ctrlr->quirks = ep->quirks; 126 127 if (ep->desc) { 128 device_set_desc(device, ep->desc); 129 return (BUS_PROBE_DEFAULT); 130 } 131 132 #if defined(PCIS_STORAGE_NVM) 133 if (pci_get_class(device) == PCIC_STORAGE && 134 pci_get_subclass(device) == PCIS_STORAGE_NVM && 135 pci_get_progif(device) == PCIP_STORAGE_NVM_ENTERPRISE_NVMHCI_1_0) { 136 device_set_desc(device, "Generic NVMe Device"); 137 return (BUS_PROBE_GENERIC); 138 } 139 #endif 140 141 return (ENXIO); 142 } 143 144 static int 145 nvme_ctrlr_allocate_bar(struct nvme_controller *ctrlr) 146 { 147 148 ctrlr->resource_id = PCIR_BAR(0); 149 150 ctrlr->resource = bus_alloc_resource_any(ctrlr->dev, SYS_RES_MEMORY, 151 &ctrlr->resource_id, RF_ACTIVE); 152 153 if(ctrlr->resource == NULL) { 154 nvme_printf(ctrlr, "unable to allocate pci resource\n"); 155 return (ENOMEM); 156 } 157 158 ctrlr->bus_tag = rman_get_bustag(ctrlr->resource); 159 ctrlr->bus_handle = rman_get_bushandle(ctrlr->resource); 160 ctrlr->regs = (struct nvme_registers *)ctrlr->bus_handle; 161 162 /* 163 * The NVMe spec allows for the MSI-X table to be placed behind 164 * BAR 4/5, separate from the control/doorbell registers. Always 165 * try to map this bar, because it must be mapped prior to calling 166 * pci_alloc_msix(). If the table isn't behind BAR 4/5, 167 * bus_alloc_resource() will just return NULL which is OK. 168 */ 169 ctrlr->bar4_resource_id = PCIR_BAR(4); 170 ctrlr->bar4_resource = bus_alloc_resource_any(ctrlr->dev, SYS_RES_MEMORY, 171 &ctrlr->bar4_resource_id, RF_ACTIVE); 172 173 return (0); 174 } 175 176 static int 177 nvme_pci_attach(device_t dev) 178 { 179 struct nvme_controller*ctrlr = DEVICE2SOFTC(dev); 180 int status; 181 182 ctrlr->dev = dev; 183 status = nvme_ctrlr_allocate_bar(ctrlr); 184 if (status != 0) 185 goto bad; 186 pci_enable_busmaster(dev); 187 nvme_ctrlr_setup_interrupts(ctrlr); 188 return nvme_attach(dev); 189 bad: 190 if (ctrlr->resource != NULL) { 191 bus_release_resource(dev, SYS_RES_MEMORY, 192 ctrlr->resource_id, ctrlr->resource); 193 } 194 195 if (ctrlr->bar4_resource != NULL) { 196 bus_release_resource(dev, SYS_RES_MEMORY, 197 ctrlr->bar4_resource_id, ctrlr->bar4_resource); 198 } 199 200 if (ctrlr->tag) 201 bus_teardown_intr(dev, ctrlr->res, ctrlr->tag); 202 203 if (ctrlr->res) 204 bus_release_resource(dev, SYS_RES_IRQ, 205 rman_get_rid(ctrlr->res), ctrlr->res); 206 207 if (ctrlr->msix_enabled) 208 pci_release_msi(dev); 209 210 return status; 211 } 212 213 static int 214 nvme_pci_detach(device_t dev) 215 { 216 struct nvme_controller*ctrlr = DEVICE2SOFTC(dev); 217 int rv; 218 219 rv = nvme_detach(dev); 220 if (ctrlr->msix_enabled) 221 pci_release_msi(dev); 222 pci_disable_busmaster(dev); 223 return (rv); 224 } 225 226 static int 227 nvme_ctrlr_configure_intx(struct nvme_controller *ctrlr) 228 { 229 230 ctrlr->msix_enabled = 0; 231 ctrlr->num_io_queues = 1; 232 ctrlr->num_cpus_per_ioq = mp_ncpus; 233 ctrlr->rid = 0; 234 ctrlr->res = bus_alloc_resource_any(ctrlr->dev, SYS_RES_IRQ, 235 &ctrlr->rid, RF_SHAREABLE | RF_ACTIVE); 236 237 if (ctrlr->res == NULL) { 238 nvme_printf(ctrlr, "unable to allocate shared IRQ\n"); 239 return (ENOMEM); 240 } 241 242 bus_setup_intr(ctrlr->dev, ctrlr->res, 243 INTR_TYPE_MISC | INTR_MPSAFE, NULL, nvme_ctrlr_intx_handler, 244 ctrlr, &ctrlr->tag); 245 246 if (ctrlr->tag == NULL) { 247 nvme_printf(ctrlr, "unable to setup intx handler\n"); 248 return (ENOMEM); 249 } 250 251 return (0); 252 } 253 254 static void 255 nvme_ctrlr_setup_interrupts(struct nvme_controller *ctrlr) 256 { 257 device_t dev; 258 int per_cpu_io_queues; 259 int min_cpus_per_ioq; 260 int num_vectors_requested, num_vectors_allocated; 261 int num_vectors_available; 262 263 dev = ctrlr->dev; 264 min_cpus_per_ioq = 1; 265 TUNABLE_INT_FETCH("hw.nvme.min_cpus_per_ioq", &min_cpus_per_ioq); 266 267 if (min_cpus_per_ioq < 1) { 268 min_cpus_per_ioq = 1; 269 } else if (min_cpus_per_ioq > mp_ncpus) { 270 min_cpus_per_ioq = mp_ncpus; 271 } 272 273 per_cpu_io_queues = 1; 274 TUNABLE_INT_FETCH("hw.nvme.per_cpu_io_queues", &per_cpu_io_queues); 275 276 if (per_cpu_io_queues == 0) { 277 min_cpus_per_ioq = mp_ncpus; 278 } 279 280 ctrlr->force_intx = 0; 281 TUNABLE_INT_FETCH("hw.nvme.force_intx", &ctrlr->force_intx); 282 283 /* 284 * FreeBSD currently cannot allocate more than about 190 vectors at 285 * boot, meaning that systems with high core count and many devices 286 * requesting per-CPU interrupt vectors will not get their full 287 * allotment. So first, try to allocate as many as we may need to 288 * understand what is available, then immediately release them. 289 * Then figure out how many of those we will actually use, based on 290 * assigning an equal number of cores to each I/O queue. 291 */ 292 293 /* One vector for per core I/O queue, plus one vector for admin queue. */ 294 num_vectors_available = min(pci_msix_count(dev), mp_ncpus + 1); 295 if (pci_alloc_msix(dev, &num_vectors_available) != 0) { 296 num_vectors_available = 0; 297 } 298 pci_release_msi(dev); 299 300 if (ctrlr->force_intx || num_vectors_available < 2) { 301 nvme_ctrlr_configure_intx(ctrlr); 302 return; 303 } 304 305 /* 306 * Do not use all vectors for I/O queues - one must be saved for the 307 * admin queue. 308 */ 309 ctrlr->num_cpus_per_ioq = max(min_cpus_per_ioq, 310 howmany(mp_ncpus, num_vectors_available - 1)); 311 312 ctrlr->num_io_queues = howmany(mp_ncpus, ctrlr->num_cpus_per_ioq); 313 num_vectors_requested = ctrlr->num_io_queues + 1; 314 num_vectors_allocated = num_vectors_requested; 315 316 /* 317 * Now just allocate the number of vectors we need. This should 318 * succeed, since we previously called pci_alloc_msix() 319 * successfully returning at least this many vectors, but just to 320 * be safe, if something goes wrong just revert to INTx. 321 */ 322 if (pci_alloc_msix(dev, &num_vectors_allocated) != 0) { 323 nvme_ctrlr_configure_intx(ctrlr); 324 return; 325 } 326 327 if (num_vectors_allocated < num_vectors_requested) { 328 pci_release_msi(dev); 329 nvme_ctrlr_configure_intx(ctrlr); 330 return; 331 } 332 333 ctrlr->msix_enabled = 1; 334 } 335