1 /*- 2 * Copyright (C) 2012-2016 Intel Corporation 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 #include <sys/param.h> 31 #include <sys/systm.h> 32 #include <sys/buf.h> 33 #include <sys/bus.h> 34 #include <sys/conf.h> 35 #include <sys/proc.h> 36 #include <sys/smp.h> 37 38 #include <dev/pci/pcireg.h> 39 #include <dev/pci/pcivar.h> 40 41 #include "nvme_private.h" 42 43 static int nvme_pci_probe(device_t); 44 static int nvme_pci_attach(device_t); 45 static int nvme_pci_detach(device_t); 46 47 static void nvme_ctrlr_setup_interrupts(struct nvme_controller *ctrlr); 48 49 static device_method_t nvme_pci_methods[] = { 50 /* Device interface */ 51 DEVMETHOD(device_probe, nvme_pci_probe), 52 DEVMETHOD(device_attach, nvme_pci_attach), 53 DEVMETHOD(device_detach, nvme_pci_detach), 54 DEVMETHOD(device_shutdown, nvme_shutdown), 55 { 0, 0 } 56 }; 57 58 static driver_t nvme_pci_driver = { 59 "nvme", 60 nvme_pci_methods, 61 sizeof(struct nvme_controller), 62 }; 63 64 DRIVER_MODULE(nvme, pci, nvme_pci_driver, nvme_devclass, NULL, 0); 65 MODULE_VERSION(nvme_pci, 1); 66 67 static struct _pcsid 68 { 69 uint32_t devid; 70 int match_subdevice; 71 uint16_t subdevice; 72 const char *desc; 73 uint32_t quirks; 74 } pci_ids[] = { 75 { 0x01118086, 0, 0, "NVMe Controller" }, 76 { IDT32_PCI_ID, 0, 0, "IDT NVMe Controller (32 channel)" }, 77 { IDT8_PCI_ID, 0, 0, "IDT NVMe Controller (8 channel)" }, 78 { 0x09538086, 1, 0x3702, "DC P3700 SSD" }, 79 { 0x09538086, 1, 0x3703, "DC P3700 SSD [2.5\" SFF]" }, 80 { 0x09538086, 1, 0x3704, "DC P3500 SSD [Add-in Card]" }, 81 { 0x09538086, 1, 0x3705, "DC P3500 SSD [2.5\" SFF]" }, 82 { 0x09538086, 1, 0x3709, "DC P3600 SSD [Add-in Card]" }, 83 { 0x09538086, 1, 0x370a, "DC P3600 SSD [2.5\" SFF]" }, 84 { 0x00031c58, 0, 0, "HGST SN100", QUIRK_DELAY_B4_CHK_RDY }, 85 { 0x00231c58, 0, 0, "WDC SN200", QUIRK_DELAY_B4_CHK_RDY }, 86 { 0x05401c5f, 0, 0, "Memblaze Pblaze4", QUIRK_DELAY_B4_CHK_RDY }, 87 { 0xa821144d, 0, 0, "Samsung PM1725", QUIRK_DELAY_B4_CHK_RDY }, 88 { 0xa822144d, 0, 0, "Samsung PM1725a", QUIRK_DELAY_B4_CHK_RDY }, 89 { 0x00000000, 0, 0, NULL } 90 }; 91 92 93 static int 94 nvme_match(uint32_t devid, uint16_t subdevice, struct _pcsid *ep) 95 { 96 if (devid != ep->devid) 97 return 0; 98 99 if (!ep->match_subdevice) 100 return 1; 101 102 if (subdevice == ep->subdevice) 103 return 1; 104 else 105 return 0; 106 } 107 108 static int 109 nvme_pci_probe (device_t device) 110 { 111 struct nvme_controller *ctrlr = DEVICE2SOFTC(device); 112 struct _pcsid *ep; 113 uint32_t devid; 114 uint16_t subdevice; 115 116 devid = pci_get_devid(device); 117 subdevice = pci_get_subdevice(device); 118 ep = pci_ids; 119 120 while (ep->devid) { 121 if (nvme_match(devid, subdevice, ep)) 122 break; 123 ++ep; 124 } 125 if (ep->devid) 126 ctrlr->quirks = ep->quirks; 127 128 if (ep->desc) { 129 device_set_desc(device, ep->desc); 130 return (BUS_PROBE_DEFAULT); 131 } 132 133 #if defined(PCIS_STORAGE_NVM) 134 if (pci_get_class(device) == PCIC_STORAGE && 135 pci_get_subclass(device) == PCIS_STORAGE_NVM && 136 pci_get_progif(device) == PCIP_STORAGE_NVM_ENTERPRISE_NVMHCI_1_0) { 137 device_set_desc(device, "Generic NVMe Device"); 138 return (BUS_PROBE_GENERIC); 139 } 140 #endif 141 142 return (ENXIO); 143 } 144 145 static int 146 nvme_ctrlr_allocate_bar(struct nvme_controller *ctrlr) 147 { 148 149 ctrlr->resource_id = PCIR_BAR(0); 150 151 ctrlr->resource = bus_alloc_resource_any(ctrlr->dev, SYS_RES_MEMORY, 152 &ctrlr->resource_id, RF_ACTIVE); 153 154 if(ctrlr->resource == NULL) { 155 nvme_printf(ctrlr, "unable to allocate pci resource\n"); 156 return (ENOMEM); 157 } 158 159 ctrlr->bus_tag = rman_get_bustag(ctrlr->resource); 160 ctrlr->bus_handle = rman_get_bushandle(ctrlr->resource); 161 ctrlr->regs = (struct nvme_registers *)ctrlr->bus_handle; 162 163 /* 164 * The NVMe spec allows for the MSI-X table to be placed behind 165 * BAR 4/5, separate from the control/doorbell registers. Always 166 * try to map this bar, because it must be mapped prior to calling 167 * pci_alloc_msix(). If the table isn't behind BAR 4/5, 168 * bus_alloc_resource() will just return NULL which is OK. 169 */ 170 ctrlr->bar4_resource_id = PCIR_BAR(4); 171 ctrlr->bar4_resource = bus_alloc_resource_any(ctrlr->dev, SYS_RES_MEMORY, 172 &ctrlr->bar4_resource_id, RF_ACTIVE); 173 174 return (0); 175 } 176 177 static int 178 nvme_pci_attach(device_t dev) 179 { 180 struct nvme_controller*ctrlr = DEVICE2SOFTC(dev); 181 int status; 182 183 ctrlr->dev = dev; 184 status = nvme_ctrlr_allocate_bar(ctrlr); 185 if (status != 0) 186 goto bad; 187 pci_enable_busmaster(dev); 188 nvme_ctrlr_setup_interrupts(ctrlr); 189 return nvme_attach(dev); 190 bad: 191 if (ctrlr->resource != NULL) { 192 bus_release_resource(dev, SYS_RES_MEMORY, 193 ctrlr->resource_id, ctrlr->resource); 194 } 195 196 if (ctrlr->bar4_resource != NULL) { 197 bus_release_resource(dev, SYS_RES_MEMORY, 198 ctrlr->bar4_resource_id, ctrlr->bar4_resource); 199 } 200 201 if (ctrlr->tag) 202 bus_teardown_intr(dev, ctrlr->res, ctrlr->tag); 203 204 if (ctrlr->res) 205 bus_release_resource(dev, SYS_RES_IRQ, 206 rman_get_rid(ctrlr->res), ctrlr->res); 207 208 if (ctrlr->msix_enabled) 209 pci_release_msi(dev); 210 211 return status; 212 } 213 214 static int 215 nvme_pci_detach(device_t dev) 216 { 217 struct nvme_controller*ctrlr = DEVICE2SOFTC(dev); 218 219 if (ctrlr->msix_enabled) 220 pci_release_msi(dev); 221 pci_disable_busmaster(dev); 222 return (nvme_detach(dev)); 223 } 224 225 static int 226 nvme_ctrlr_configure_intx(struct nvme_controller *ctrlr) 227 { 228 229 ctrlr->msix_enabled = 0; 230 ctrlr->num_io_queues = 1; 231 ctrlr->num_cpus_per_ioq = mp_ncpus; 232 ctrlr->rid = 0; 233 ctrlr->res = bus_alloc_resource_any(ctrlr->dev, SYS_RES_IRQ, 234 &ctrlr->rid, RF_SHAREABLE | RF_ACTIVE); 235 236 if (ctrlr->res == NULL) { 237 nvme_printf(ctrlr, "unable to allocate shared IRQ\n"); 238 return (ENOMEM); 239 } 240 241 bus_setup_intr(ctrlr->dev, ctrlr->res, 242 INTR_TYPE_MISC | INTR_MPSAFE, NULL, nvme_ctrlr_intx_handler, 243 ctrlr, &ctrlr->tag); 244 245 if (ctrlr->tag == NULL) { 246 nvme_printf(ctrlr, "unable to setup intx handler\n"); 247 return (ENOMEM); 248 } 249 250 return (0); 251 } 252 253 static void 254 nvme_ctrlr_setup_interrupts(struct nvme_controller *ctrlr) 255 { 256 device_t dev; 257 int per_cpu_io_queues; 258 int min_cpus_per_ioq; 259 int num_vectors_requested, num_vectors_allocated; 260 int num_vectors_available; 261 262 dev = ctrlr->dev; 263 min_cpus_per_ioq = 1; 264 TUNABLE_INT_FETCH("hw.nvme.min_cpus_per_ioq", &min_cpus_per_ioq); 265 266 if (min_cpus_per_ioq < 1) { 267 min_cpus_per_ioq = 1; 268 } else if (min_cpus_per_ioq > mp_ncpus) { 269 min_cpus_per_ioq = mp_ncpus; 270 } 271 272 per_cpu_io_queues = 1; 273 TUNABLE_INT_FETCH("hw.nvme.per_cpu_io_queues", &per_cpu_io_queues); 274 275 if (per_cpu_io_queues == 0) { 276 min_cpus_per_ioq = mp_ncpus; 277 } 278 279 ctrlr->force_intx = 0; 280 TUNABLE_INT_FETCH("hw.nvme.force_intx", &ctrlr->force_intx); 281 282 /* 283 * FreeBSD currently cannot allocate more than about 190 vectors at 284 * boot, meaning that systems with high core count and many devices 285 * requesting per-CPU interrupt vectors will not get their full 286 * allotment. So first, try to allocate as many as we may need to 287 * understand what is available, then immediately release them. 288 * Then figure out how many of those we will actually use, based on 289 * assigning an equal number of cores to each I/O queue. 290 */ 291 292 /* One vector for per core I/O queue, plus one vector for admin queue. */ 293 num_vectors_available = min(pci_msix_count(dev), mp_ncpus + 1); 294 if (pci_alloc_msix(dev, &num_vectors_available) != 0) { 295 num_vectors_available = 0; 296 } 297 pci_release_msi(dev); 298 299 if (ctrlr->force_intx || num_vectors_available < 2) { 300 nvme_ctrlr_configure_intx(ctrlr); 301 return; 302 } 303 304 /* 305 * Do not use all vectors for I/O queues - one must be saved for the 306 * admin queue. 307 */ 308 ctrlr->num_cpus_per_ioq = max(min_cpus_per_ioq, 309 howmany(mp_ncpus, num_vectors_available - 1)); 310 311 ctrlr->num_io_queues = howmany(mp_ncpus, ctrlr->num_cpus_per_ioq); 312 num_vectors_requested = ctrlr->num_io_queues + 1; 313 num_vectors_allocated = num_vectors_requested; 314 315 /* 316 * Now just allocate the number of vectors we need. This should 317 * succeed, since we previously called pci_alloc_msix() 318 * successfully returning at least this many vectors, but just to 319 * be safe, if something goes wrong just revert to INTx. 320 */ 321 if (pci_alloc_msix(dev, &num_vectors_allocated) != 0) { 322 nvme_ctrlr_configure_intx(ctrlr); 323 return; 324 } 325 326 if (num_vectors_allocated < num_vectors_requested) { 327 pci_release_msi(dev); 328 nvme_ctrlr_configure_intx(ctrlr); 329 return; 330 } 331 332 ctrlr->msix_enabled = 1; 333 } 334