1 /*-
2 * Copyright (C) 2012-2016 Intel Corporation
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27 #include <sys/param.h>
28 #include <sys/systm.h>
29 #include <sys/buf.h>
30 #include <sys/bus.h>
31 #include <sys/conf.h>
32 #include <sys/proc.h>
33 #include <sys/smp.h>
34 #include <vm/vm.h>
35
36 #include <dev/pci/pcireg.h>
37 #include <dev/pci/pcivar.h>
38
39 #include "nvme_private.h"
40
41 static int nvme_pci_probe(device_t);
42 static int nvme_pci_attach(device_t);
43 static int nvme_pci_detach(device_t);
44 static int nvme_pci_suspend(device_t);
45 static int nvme_pci_resume(device_t);
46
47 static int nvme_ctrlr_setup_interrupts(struct nvme_controller *ctrlr);
48
49 static device_method_t nvme_pci_methods[] = {
50 /* Device interface */
51 DEVMETHOD(device_probe, nvme_pci_probe),
52 DEVMETHOD(device_attach, nvme_pci_attach),
53 DEVMETHOD(device_detach, nvme_pci_detach),
54 DEVMETHOD(device_suspend, nvme_pci_suspend),
55 DEVMETHOD(device_resume, nvme_pci_resume),
56 DEVMETHOD(device_shutdown, nvme_shutdown),
57 { 0, 0 }
58 };
59
60 static driver_t nvme_pci_driver = {
61 "nvme",
62 nvme_pci_methods,
63 sizeof(struct nvme_controller),
64 };
65
66 DRIVER_MODULE(nvme, pci, nvme_pci_driver, NULL, NULL);
67
68 static struct _pcsid
69 {
70 uint32_t devid;
71 int match_subdevice;
72 uint16_t subdevice;
73 const char *desc;
74 uint32_t quirks;
75 } pci_ids[] = {
76 { 0x01118086, 0, 0, "NVMe Controller" },
77 { IDT32_PCI_ID, 0, 0, "IDT NVMe Controller (32 channel)" },
78 { IDT8_PCI_ID, 0, 0, "IDT NVMe Controller (8 channel)" },
79 { 0x09538086, 1, 0x3702, "DC P3700 SSD" },
80 { 0x09538086, 1, 0x3703, "DC P3700 SSD [2.5\" SFF]" },
81 { 0x09538086, 1, 0x3704, "DC P3500 SSD [Add-in Card]" },
82 { 0x09538086, 1, 0x3705, "DC P3500 SSD [2.5\" SFF]" },
83 { 0x09538086, 1, 0x3709, "DC P3600 SSD [Add-in Card]" },
84 { 0x09538086, 1, 0x370a, "DC P3600 SSD [2.5\" SFF]" },
85 { 0x09538086, 0, 0, "Intel DC PC3500", QUIRK_INTEL_ALIGNMENT },
86 { 0x0a538086, 0, 0, "Intel DC PC3520", QUIRK_INTEL_ALIGNMENT },
87 { 0x0a548086, 0, 0, "Intel DC PC4500", QUIRK_INTEL_ALIGNMENT },
88 { 0x0a558086, 0, 0, "Dell Intel P4600", QUIRK_INTEL_ALIGNMENT },
89 { 0x00031c58, 0, 0, "HGST SN100", QUIRK_DELAY_B4_CHK_RDY },
90 { 0x00231c58, 0, 0, "WDC SN200", QUIRK_DELAY_B4_CHK_RDY },
91 { 0x05401c5f, 0, 0, "Memblaze Pblaze4", QUIRK_DELAY_B4_CHK_RDY },
92 { 0xa821144d, 0, 0, "Samsung PM1725", QUIRK_DELAY_B4_CHK_RDY },
93 { 0xa822144d, 0, 0, "Samsung PM1725a", QUIRK_DELAY_B4_CHK_RDY },
94 { 0x07f015ad, 0, 0, "VMware NVMe Controller" },
95 { 0x2003106b, 0, 0, "Apple S3X NVMe Controller" },
96 { 0x00000000, 0, 0, NULL }
97 };
98
99 static int
nvme_match(uint32_t devid,uint16_t subdevice,struct _pcsid * ep)100 nvme_match(uint32_t devid, uint16_t subdevice, struct _pcsid *ep)
101 {
102 if (devid != ep->devid)
103 return 0;
104
105 if (!ep->match_subdevice)
106 return 1;
107
108 if (subdevice == ep->subdevice)
109 return 1;
110 else
111 return 0;
112 }
113
114 static int
nvme_pci_probe(device_t device)115 nvme_pci_probe (device_t device)
116 {
117 struct nvme_controller *ctrlr = DEVICE2SOFTC(device);
118 struct _pcsid *ep;
119 uint32_t devid;
120 uint16_t subdevice;
121
122 devid = pci_get_devid(device);
123 subdevice = pci_get_subdevice(device);
124 ep = pci_ids;
125
126 while (ep->devid) {
127 if (nvme_match(devid, subdevice, ep))
128 break;
129 ++ep;
130 }
131 if (ep->devid)
132 ctrlr->quirks = ep->quirks;
133
134 if (ep->desc) {
135 device_set_desc(device, ep->desc);
136 return (BUS_PROBE_DEFAULT);
137 }
138
139 #if defined(PCIS_STORAGE_NVM)
140 if (pci_get_class(device) == PCIC_STORAGE &&
141 pci_get_subclass(device) == PCIS_STORAGE_NVM &&
142 pci_get_progif(device) == PCIP_STORAGE_NVM_ENTERPRISE_NVMHCI_1_0) {
143 device_set_desc(device, "Generic NVMe Device");
144 return (BUS_PROBE_GENERIC);
145 }
146 #endif
147
148 return (ENXIO);
149 }
150
151 static int
nvme_ctrlr_allocate_bar(struct nvme_controller * ctrlr)152 nvme_ctrlr_allocate_bar(struct nvme_controller *ctrlr)
153 {
154 ctrlr->resource_id = PCIR_BAR(0);
155 ctrlr->msix_table_resource_id = -1;
156 ctrlr->msix_table_resource = NULL;
157 ctrlr->msix_pba_resource_id = -1;
158 ctrlr->msix_pba_resource = NULL;
159
160 ctrlr->resource = bus_alloc_resource_any(ctrlr->dev, SYS_RES_MEMORY,
161 &ctrlr->resource_id, RF_ACTIVE);
162
163 if (ctrlr->resource == NULL) {
164 nvme_printf(ctrlr, "unable to allocate pci resource\n");
165 return (ENOMEM);
166 }
167
168 ctrlr->bus_tag = rman_get_bustag(ctrlr->resource);
169 ctrlr->bus_handle = rman_get_bushandle(ctrlr->resource);
170 ctrlr->regs = (struct nvme_registers *)ctrlr->bus_handle;
171
172 /*
173 * The NVMe spec allows for the MSI-X tables to be placed behind
174 * BAR 4 and/or 5, separate from the control/doorbell registers.
175 */
176
177 ctrlr->msix_table_resource_id = pci_msix_table_bar(ctrlr->dev);
178 ctrlr->msix_pba_resource_id = pci_msix_pba_bar(ctrlr->dev);
179
180 if (ctrlr->msix_table_resource_id >= 0 &&
181 ctrlr->msix_table_resource_id != ctrlr->resource_id) {
182 ctrlr->msix_table_resource = bus_alloc_resource_any(ctrlr->dev,
183 SYS_RES_MEMORY, &ctrlr->msix_table_resource_id, RF_ACTIVE);
184 if (ctrlr->msix_table_resource == NULL) {
185 nvme_printf(ctrlr, "unable to allocate msi-x table resource\n");
186 return (ENOMEM);
187 }
188 }
189 if (ctrlr->msix_pba_resource_id >= 0 &&
190 ctrlr->msix_pba_resource_id != ctrlr->resource_id &&
191 ctrlr->msix_pba_resource_id != ctrlr->msix_table_resource_id) {
192 ctrlr->msix_pba_resource = bus_alloc_resource_any(ctrlr->dev,
193 SYS_RES_MEMORY, &ctrlr->msix_pba_resource_id, RF_ACTIVE);
194 if (ctrlr->msix_pba_resource == NULL) {
195 nvme_printf(ctrlr, "unable to allocate msi-x pba resource\n");
196 return (ENOMEM);
197 }
198 }
199
200 return (0);
201 }
202
203 static int
nvme_pci_attach(device_t dev)204 nvme_pci_attach(device_t dev)
205 {
206 struct nvme_controller*ctrlr = DEVICE2SOFTC(dev);
207 int status;
208
209 ctrlr->dev = dev;
210 status = nvme_ctrlr_allocate_bar(ctrlr);
211 if (status != 0)
212 goto bad;
213 pci_enable_busmaster(dev);
214 status = nvme_ctrlr_setup_interrupts(ctrlr);
215 if (status != 0)
216 goto bad;
217 return nvme_attach(dev);
218 bad:
219 if (ctrlr->resource != NULL) {
220 bus_release_resource(dev, SYS_RES_MEMORY,
221 ctrlr->resource_id, ctrlr->resource);
222 }
223
224 if (ctrlr->msix_table_resource != NULL) {
225 bus_release_resource(dev, SYS_RES_MEMORY,
226 ctrlr->msix_table_resource_id, ctrlr->msix_table_resource);
227 }
228
229 if (ctrlr->msix_pba_resource != NULL) {
230 bus_release_resource(dev, SYS_RES_MEMORY,
231 ctrlr->msix_pba_resource_id, ctrlr->msix_pba_resource);
232 }
233
234 if (ctrlr->tag)
235 bus_teardown_intr(dev, ctrlr->res, ctrlr->tag);
236
237 if (ctrlr->res)
238 bus_release_resource(dev, SYS_RES_IRQ,
239 rman_get_rid(ctrlr->res), ctrlr->res);
240
241 if (ctrlr->msi_count > 0)
242 pci_release_msi(dev);
243
244 return status;
245 }
246
247 static int
nvme_pci_detach(device_t dev)248 nvme_pci_detach(device_t dev)
249 {
250 struct nvme_controller*ctrlr = DEVICE2SOFTC(dev);
251 int rv;
252
253 rv = nvme_detach(dev);
254 if (ctrlr->msi_count > 0)
255 pci_release_msi(dev);
256 pci_disable_busmaster(dev);
257 return (rv);
258 }
259
260 static int
nvme_ctrlr_setup_shared(struct nvme_controller * ctrlr,int rid)261 nvme_ctrlr_setup_shared(struct nvme_controller *ctrlr, int rid)
262 {
263 int error;
264
265 ctrlr->num_io_queues = 1;
266 ctrlr->rid = rid;
267 ctrlr->res = bus_alloc_resource_any(ctrlr->dev, SYS_RES_IRQ,
268 &ctrlr->rid, RF_SHAREABLE | RF_ACTIVE);
269 if (ctrlr->res == NULL) {
270 nvme_printf(ctrlr, "unable to allocate shared interrupt\n");
271 return (ENOMEM);
272 }
273
274 error = bus_setup_intr(ctrlr->dev, ctrlr->res,
275 INTR_TYPE_MISC | INTR_MPSAFE, NULL, nvme_ctrlr_shared_handler,
276 ctrlr, &ctrlr->tag);
277 if (error) {
278 nvme_printf(ctrlr, "unable to setup shared interrupt\n");
279 return (error);
280 }
281
282 return (0);
283 }
284
285 static int
nvme_ctrlr_setup_interrupts(struct nvme_controller * ctrlr)286 nvme_ctrlr_setup_interrupts(struct nvme_controller *ctrlr)
287 {
288 device_t dev;
289 int force_intx, num_io_queues, per_cpu_io_queues;
290 int min_cpus_per_ioq;
291 int num_vectors_requested;
292
293 dev = ctrlr->dev;
294
295 force_intx = 0;
296 TUNABLE_INT_FETCH("hw.nvme.force_intx", &force_intx);
297 if (force_intx)
298 return (nvme_ctrlr_setup_shared(ctrlr, 0));
299
300 if (pci_msix_count(dev) == 0)
301 goto msi;
302
303 /*
304 * Try to allocate one MSI-X per core for I/O queues, plus one
305 * for admin queue, but accept single shared MSI-X if have to.
306 * Fall back to MSI if can't get any MSI-X.
307 */
308 num_io_queues = mp_ncpus;
309 TUNABLE_INT_FETCH("hw.nvme.num_io_queues", &num_io_queues);
310 if (num_io_queues < 1 || num_io_queues > mp_ncpus)
311 num_io_queues = mp_ncpus;
312
313 per_cpu_io_queues = 1;
314 TUNABLE_INT_FETCH("hw.nvme.per_cpu_io_queues", &per_cpu_io_queues);
315 if (per_cpu_io_queues == 0)
316 num_io_queues = 1;
317
318 min_cpus_per_ioq = smp_threads_per_core;
319 TUNABLE_INT_FETCH("hw.nvme.min_cpus_per_ioq", &min_cpus_per_ioq);
320 if (min_cpus_per_ioq > 1) {
321 num_io_queues = min(num_io_queues,
322 max(1, mp_ncpus / min_cpus_per_ioq));
323 }
324
325 num_io_queues = min(num_io_queues, max(1, pci_msix_count(dev) - 1));
326
327 again:
328 if (num_io_queues > vm_ndomains)
329 num_io_queues -= num_io_queues % vm_ndomains;
330 num_vectors_requested = min(num_io_queues + 1, pci_msix_count(dev));
331 ctrlr->msi_count = num_vectors_requested;
332 if (pci_alloc_msix(dev, &ctrlr->msi_count) != 0) {
333 nvme_printf(ctrlr, "unable to allocate MSI-X\n");
334 ctrlr->msi_count = 0;
335 goto msi;
336 }
337 if (ctrlr->msi_count == 1)
338 return (nvme_ctrlr_setup_shared(ctrlr, 1));
339 if (ctrlr->msi_count != num_vectors_requested) {
340 pci_release_msi(dev);
341 num_io_queues = ctrlr->msi_count - 1;
342 goto again;
343 }
344
345 ctrlr->num_io_queues = num_io_queues;
346 return (0);
347
348 msi:
349 /*
350 * Try to allocate 2 MSIs (admin and I/O queues), but accept single
351 * shared if have to. Fall back to INTx if can't get any MSI.
352 */
353 ctrlr->msi_count = min(pci_msi_count(dev), 2);
354 if (ctrlr->msi_count > 0) {
355 if (pci_alloc_msi(dev, &ctrlr->msi_count) != 0) {
356 nvme_printf(ctrlr, "unable to allocate MSI\n");
357 ctrlr->msi_count = 0;
358 } else if (ctrlr->msi_count == 2) {
359 ctrlr->num_io_queues = 1;
360 return (0);
361 }
362 }
363 return (nvme_ctrlr_setup_shared(ctrlr, ctrlr->msi_count > 0 ? 1 : 0));
364 }
365
366 static int
nvme_pci_suspend(device_t dev)367 nvme_pci_suspend(device_t dev)
368 {
369 struct nvme_controller *ctrlr;
370
371 ctrlr = DEVICE2SOFTC(dev);
372 return (nvme_ctrlr_suspend(ctrlr));
373 }
374
375 static int
nvme_pci_resume(device_t dev)376 nvme_pci_resume(device_t dev)
377 {
378 struct nvme_controller *ctrlr;
379
380 ctrlr = DEVICE2SOFTC(dev);
381 return (nvme_ctrlr_resume(ctrlr));
382 }
383