xref: /freebsd/sys/dev/nvme/nvme.c (revision f4b37ed0f8b307b1f3f0f630ca725d68f1dff30d)
1 /*-
2  * Copyright (C) 2012-2014 Intel Corporation
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  */
26 
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
29 
30 #include <sys/param.h>
31 #include <sys/bus.h>
32 #include <sys/conf.h>
33 #include <sys/module.h>
34 
35 #include <vm/uma.h>
36 
37 #include <dev/pci/pcireg.h>
38 #include <dev/pci/pcivar.h>
39 
40 #include "nvme_private.h"
41 
42 struct nvme_consumer {
43 	uint32_t		id;
44 	nvme_cons_ns_fn_t	ns_fn;
45 	nvme_cons_ctrlr_fn_t	ctrlr_fn;
46 	nvme_cons_async_fn_t	async_fn;
47 	nvme_cons_fail_fn_t	fail_fn;
48 };
49 
50 struct nvme_consumer nvme_consumer[NVME_MAX_CONSUMERS];
51 #define	INVALID_CONSUMER_ID	0xFFFF
52 
53 uma_zone_t	nvme_request_zone;
54 int32_t		nvme_retry_count;
55 
56 MALLOC_DEFINE(M_NVME, "nvme", "nvme(4) memory allocations");
57 
58 static int    nvme_probe(device_t);
59 static int    nvme_attach(device_t);
60 static int    nvme_detach(device_t);
61 static int    nvme_modevent(module_t mod, int type, void *arg);
62 
63 static devclass_t nvme_devclass;
64 
65 static device_method_t nvme_pci_methods[] = {
66 	/* Device interface */
67 	DEVMETHOD(device_probe,     nvme_probe),
68 	DEVMETHOD(device_attach,    nvme_attach),
69 	DEVMETHOD(device_detach,    nvme_detach),
70 	{ 0, 0 }
71 };
72 
73 static driver_t nvme_pci_driver = {
74 	"nvme",
75 	nvme_pci_methods,
76 	sizeof(struct nvme_controller),
77 };
78 
79 DRIVER_MODULE(nvme, pci, nvme_pci_driver, nvme_devclass, nvme_modevent, 0);
80 MODULE_VERSION(nvme, 1);
81 
82 static struct _pcsid
83 {
84 	uint32_t	devid;
85 	int		match_subdevice;
86 	uint16_t	subdevice;
87 	const char	*desc;
88 } pci_ids[] = {
89 	{ 0x01118086,		0, 0, "NVMe Controller"  },
90 	{ IDT32_PCI_ID,		0, 0, "IDT NVMe Controller (32 channel)"  },
91 	{ IDT8_PCI_ID,		0, 0, "IDT NVMe Controller (8 channel)" },
92 	{ 0x09538086,		1, 0x3702, "DC P3700 SSD" },
93 	{ 0x09538086,		1, 0x3703, "DC P3700 SSD [2.5\" SFF]" },
94 	{ 0x09538086,		1, 0x3704, "DC P3500 SSD [Add-in Card]" },
95 	{ 0x09538086,		1, 0x3705, "DC P3500 SSD [2.5\" SFF]" },
96 	{ 0x09538086,		1, 0x3709, "DC P3600 SSD [Add-in Card]" },
97 	{ 0x09538086,		1, 0x370a, "DC P3600 SSD [2.5\" SFF]" },
98 	{ 0x00000000,		0, 0, NULL  }
99 };
100 
101 static int
102 nvme_match(uint32_t devid, uint16_t subdevice, struct _pcsid *ep)
103 {
104 	if (devid != ep->devid)
105 		return 0;
106 
107 	if (!ep->match_subdevice)
108 		return 1;
109 
110 	if (subdevice == ep->subdevice)
111 		return 1;
112 	else
113 		return 0;
114 }
115 
116 static int
117 nvme_probe (device_t device)
118 {
119 	struct _pcsid	*ep;
120 	uint32_t	devid;
121 	uint16_t	subdevice;
122 
123 	devid = pci_get_devid(device);
124 	subdevice = pci_get_subdevice(device);
125 	ep = pci_ids;
126 
127 	while (ep->devid) {
128 		if (nvme_match(devid, subdevice, ep))
129 			break;
130 		++ep;
131 	}
132 
133 	if (ep->desc) {
134 		device_set_desc(device, ep->desc);
135 		return (BUS_PROBE_DEFAULT);
136 	}
137 
138 #if defined(PCIS_STORAGE_NVM)
139 	if (pci_get_class(device)    == PCIC_STORAGE &&
140 	    pci_get_subclass(device) == PCIS_STORAGE_NVM &&
141 	    pci_get_progif(device)   == PCIP_STORAGE_NVM_ENTERPRISE_NVMHCI_1_0) {
142 		device_set_desc(device, "Generic NVMe Device");
143 		return (BUS_PROBE_GENERIC);
144 	}
145 #endif
146 
147 	return (ENXIO);
148 }
149 
150 static void
151 nvme_init(void)
152 {
153 	uint32_t	i;
154 
155 	nvme_request_zone = uma_zcreate("nvme_request",
156 	    sizeof(struct nvme_request), NULL, NULL, NULL, NULL, 0, 0);
157 
158 	for (i = 0; i < NVME_MAX_CONSUMERS; i++)
159 		nvme_consumer[i].id = INVALID_CONSUMER_ID;
160 }
161 
162 SYSINIT(nvme_register, SI_SUB_DRIVERS, SI_ORDER_SECOND, nvme_init, NULL);
163 
164 static void
165 nvme_uninit(void)
166 {
167 	uma_zdestroy(nvme_request_zone);
168 }
169 
170 SYSUNINIT(nvme_unregister, SI_SUB_DRIVERS, SI_ORDER_SECOND, nvme_uninit, NULL);
171 
172 static void
173 nvme_load(void)
174 {
175 }
176 
177 static void
178 nvme_unload(void)
179 {
180 }
181 
182 static void
183 nvme_shutdown(void)
184 {
185 	device_t		*devlist;
186 	struct nvme_controller	*ctrlr;
187 	int			dev, devcount;
188 
189 	if (devclass_get_devices(nvme_devclass, &devlist, &devcount))
190 		return;
191 
192 	for (dev = 0; dev < devcount; dev++) {
193 		ctrlr = DEVICE2SOFTC(devlist[dev]);
194 		nvme_ctrlr_shutdown(ctrlr);
195 	}
196 
197 	free(devlist, M_TEMP);
198 }
199 
200 static int
201 nvme_modevent(module_t mod, int type, void *arg)
202 {
203 
204 	switch (type) {
205 	case MOD_LOAD:
206 		nvme_load();
207 		break;
208 	case MOD_UNLOAD:
209 		nvme_unload();
210 		break;
211 	case MOD_SHUTDOWN:
212 		nvme_shutdown();
213 		break;
214 	default:
215 		break;
216 	}
217 
218 	return (0);
219 }
220 
221 void
222 nvme_dump_command(struct nvme_command *cmd)
223 {
224 	printf(
225 "opc:%x f:%x r1:%x cid:%x nsid:%x r2:%x r3:%x mptr:%jx prp1:%jx prp2:%jx cdw:%x %x %x %x %x %x\n",
226 	    cmd->opc, cmd->fuse, cmd->rsvd1, cmd->cid, cmd->nsid,
227 	    cmd->rsvd2, cmd->rsvd3,
228 	    (uintmax_t)cmd->mptr, (uintmax_t)cmd->prp1, (uintmax_t)cmd->prp2,
229 	    cmd->cdw10, cmd->cdw11, cmd->cdw12, cmd->cdw13, cmd->cdw14,
230 	    cmd->cdw15);
231 }
232 
233 void
234 nvme_dump_completion(struct nvme_completion *cpl)
235 {
236 	printf("cdw0:%08x sqhd:%04x sqid:%04x "
237 	    "cid:%04x p:%x sc:%02x sct:%x m:%x dnr:%x\n",
238 	    cpl->cdw0, cpl->sqhd, cpl->sqid,
239 	    cpl->cid, cpl->status.p, cpl->status.sc, cpl->status.sct,
240 	    cpl->status.m, cpl->status.dnr);
241 }
242 
243 static int
244 nvme_attach(device_t dev)
245 {
246 	struct nvme_controller	*ctrlr = DEVICE2SOFTC(dev);
247 	int			status;
248 
249 	status = nvme_ctrlr_construct(ctrlr, dev);
250 
251 	if (status != 0) {
252 		nvme_ctrlr_destruct(ctrlr, dev);
253 		return (status);
254 	}
255 
256 	/*
257 	 * Reset controller twice to ensure we do a transition from cc.en==1
258 	 *  to cc.en==0.  This is because we don't really know what status
259 	 *  the controller was left in when boot handed off to OS.
260 	 */
261 	status = nvme_ctrlr_hw_reset(ctrlr);
262 	if (status != 0) {
263 		nvme_ctrlr_destruct(ctrlr, dev);
264 		return (status);
265 	}
266 
267 	status = nvme_ctrlr_hw_reset(ctrlr);
268 	if (status != 0) {
269 		nvme_ctrlr_destruct(ctrlr, dev);
270 		return (status);
271 	}
272 
273 	nvme_sysctl_initialize_ctrlr(ctrlr);
274 
275 	pci_enable_busmaster(dev);
276 
277 	ctrlr->config_hook.ich_func = nvme_ctrlr_start_config_hook;
278 	ctrlr->config_hook.ich_arg = ctrlr;
279 
280 	config_intrhook_establish(&ctrlr->config_hook);
281 
282 	return (0);
283 }
284 
285 static int
286 nvme_detach (device_t dev)
287 {
288 	struct nvme_controller	*ctrlr = DEVICE2SOFTC(dev);
289 
290 	nvme_ctrlr_destruct(ctrlr, dev);
291 	pci_disable_busmaster(dev);
292 	return (0);
293 }
294 
295 static void
296 nvme_notify(struct nvme_consumer *cons,
297 	    struct nvme_controller *ctrlr)
298 {
299 	struct nvme_namespace	*ns;
300 	void			*ctrlr_cookie;
301 	int			cmpset, ns_idx;
302 
303 	/*
304 	 * The consumer may register itself after the nvme devices
305 	 *  have registered with the kernel, but before the
306 	 *  driver has completed initialization.  In that case,
307 	 *  return here, and when initialization completes, the
308 	 *  controller will make sure the consumer gets notified.
309 	 */
310 	if (!ctrlr->is_initialized)
311 		return;
312 
313 	cmpset = atomic_cmpset_32(&ctrlr->notification_sent, 0, 1);
314 
315 	if (cmpset == 0)
316 		return;
317 
318 	if (cons->ctrlr_fn != NULL)
319 		ctrlr_cookie = (*cons->ctrlr_fn)(ctrlr);
320 	else
321 		ctrlr_cookie = NULL;
322 	ctrlr->cons_cookie[cons->id] = ctrlr_cookie;
323 	if (ctrlr->is_failed) {
324 		if (cons->fail_fn != NULL)
325 			(*cons->fail_fn)(ctrlr_cookie);
326 		/*
327 		 * Do not notify consumers about the namespaces of a
328 		 *  failed controller.
329 		 */
330 		return;
331 	}
332 	for (ns_idx = 0; ns_idx < ctrlr->cdata.nn; ns_idx++) {
333 		ns = &ctrlr->ns[ns_idx];
334 		if (cons->ns_fn != NULL)
335 			ns->cons_cookie[cons->id] =
336 			    (*cons->ns_fn)(ns, ctrlr_cookie);
337 	}
338 }
339 
340 void
341 nvme_notify_new_controller(struct nvme_controller *ctrlr)
342 {
343 	int i;
344 
345 	for (i = 0; i < NVME_MAX_CONSUMERS; i++) {
346 		if (nvme_consumer[i].id != INVALID_CONSUMER_ID) {
347 			nvme_notify(&nvme_consumer[i], ctrlr);
348 		}
349 	}
350 }
351 
352 static void
353 nvme_notify_new_consumer(struct nvme_consumer *cons)
354 {
355 	device_t		*devlist;
356 	struct nvme_controller	*ctrlr;
357 	int			dev_idx, devcount;
358 
359 	if (devclass_get_devices(nvme_devclass, &devlist, &devcount))
360 		return;
361 
362 	for (dev_idx = 0; dev_idx < devcount; dev_idx++) {
363 		ctrlr = DEVICE2SOFTC(devlist[dev_idx]);
364 		nvme_notify(cons, ctrlr);
365 	}
366 
367 	free(devlist, M_TEMP);
368 }
369 
370 void
371 nvme_notify_async_consumers(struct nvme_controller *ctrlr,
372 			    const struct nvme_completion *async_cpl,
373 			    uint32_t log_page_id, void *log_page_buffer,
374 			    uint32_t log_page_size)
375 {
376 	struct nvme_consumer	*cons;
377 	uint32_t		i;
378 
379 	for (i = 0; i < NVME_MAX_CONSUMERS; i++) {
380 		cons = &nvme_consumer[i];
381 		if (cons->id != INVALID_CONSUMER_ID && cons->async_fn != NULL)
382 			(*cons->async_fn)(ctrlr->cons_cookie[i], async_cpl,
383 			    log_page_id, log_page_buffer, log_page_size);
384 	}
385 }
386 
387 void
388 nvme_notify_fail_consumers(struct nvme_controller *ctrlr)
389 {
390 	struct nvme_consumer	*cons;
391 	uint32_t		i;
392 
393 	for (i = 0; i < NVME_MAX_CONSUMERS; i++) {
394 		cons = &nvme_consumer[i];
395 		if (cons->id != INVALID_CONSUMER_ID && cons->fail_fn != NULL)
396 			cons->fail_fn(ctrlr->cons_cookie[i]);
397 	}
398 }
399 
400 struct nvme_consumer *
401 nvme_register_consumer(nvme_cons_ns_fn_t ns_fn, nvme_cons_ctrlr_fn_t ctrlr_fn,
402 		       nvme_cons_async_fn_t async_fn,
403 		       nvme_cons_fail_fn_t fail_fn)
404 {
405 	int i;
406 
407 	/*
408 	 * TODO: add locking around consumer registration.  Not an issue
409 	 *  right now since we only have one nvme consumer - nvd(4).
410 	 */
411 	for (i = 0; i < NVME_MAX_CONSUMERS; i++)
412 		if (nvme_consumer[i].id == INVALID_CONSUMER_ID) {
413 			nvme_consumer[i].id = i;
414 			nvme_consumer[i].ns_fn = ns_fn;
415 			nvme_consumer[i].ctrlr_fn = ctrlr_fn;
416 			nvme_consumer[i].async_fn = async_fn;
417 			nvme_consumer[i].fail_fn = fail_fn;
418 
419 			nvme_notify_new_consumer(&nvme_consumer[i]);
420 			return (&nvme_consumer[i]);
421 		}
422 
423 	printf("nvme(4): consumer not registered - no slots available\n");
424 	return (NULL);
425 }
426 
427 void
428 nvme_unregister_consumer(struct nvme_consumer *consumer)
429 {
430 
431 	consumer->id = INVALID_CONSUMER_ID;
432 }
433 
434 void
435 nvme_completion_poll_cb(void *arg, const struct nvme_completion *cpl)
436 {
437 	struct nvme_completion_poll_status	*status = arg;
438 
439 	/*
440 	 * Copy status into the argument passed by the caller, so that
441 	 *  the caller can check the status to determine if the
442 	 *  the request passed or failed.
443 	 */
444 	memcpy(&status->cpl, cpl, sizeof(*cpl));
445 	wmb();
446 	status->done = TRUE;
447 }
448