xref: /freebsd/sys/dev/nvme/nvme.c (revision 2164af29a083d67122fdf9c294a792c258c7a14d)
1 /*-
2  * Copyright (C) 2012-2014 Intel Corporation
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  */
26 
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
29 
30 #include <sys/param.h>
31 #include <sys/bus.h>
32 #include <sys/conf.h>
33 #include <sys/module.h>
34 
35 #include <vm/uma.h>
36 
37 #include <dev/pci/pcireg.h>
38 #include <dev/pci/pcivar.h>
39 
40 #include "nvme_private.h"
41 
42 struct nvme_consumer {
43 	uint32_t		id;
44 	nvme_cons_ns_fn_t	ns_fn;
45 	nvme_cons_ctrlr_fn_t	ctrlr_fn;
46 	nvme_cons_async_fn_t	async_fn;
47 	nvme_cons_fail_fn_t	fail_fn;
48 };
49 
50 struct nvme_consumer nvme_consumer[NVME_MAX_CONSUMERS];
51 #define	INVALID_CONSUMER_ID	0xFFFF
52 
53 uma_zone_t	nvme_request_zone;
54 int32_t		nvme_retry_count;
55 
56 MALLOC_DEFINE(M_NVME, "nvme", "nvme(4) memory allocations");
57 
58 static int    nvme_probe(device_t);
59 static int    nvme_attach(device_t);
60 static int    nvme_detach(device_t);
61 static int    nvme_shutdown(device_t);
62 static int    nvme_modevent(module_t mod, int type, void *arg);
63 
64 static devclass_t nvme_devclass;
65 
66 static device_method_t nvme_pci_methods[] = {
67 	/* Device interface */
68 	DEVMETHOD(device_probe,     nvme_probe),
69 	DEVMETHOD(device_attach,    nvme_attach),
70 	DEVMETHOD(device_detach,    nvme_detach),
71 	DEVMETHOD(device_shutdown,  nvme_shutdown),
72 	{ 0, 0 }
73 };
74 
75 static driver_t nvme_pci_driver = {
76 	"nvme",
77 	nvme_pci_methods,
78 	sizeof(struct nvme_controller),
79 };
80 
81 DRIVER_MODULE(nvme, pci, nvme_pci_driver, nvme_devclass, nvme_modevent, 0);
82 MODULE_VERSION(nvme, 1);
83 
84 static struct _pcsid
85 {
86 	uint32_t	devid;
87 	int		match_subdevice;
88 	uint16_t	subdevice;
89 	const char	*desc;
90 } pci_ids[] = {
91 	{ 0x01118086,		0, 0, "NVMe Controller"  },
92 	{ IDT32_PCI_ID,		0, 0, "IDT NVMe Controller (32 channel)"  },
93 	{ IDT8_PCI_ID,		0, 0, "IDT NVMe Controller (8 channel)" },
94 	{ 0x09538086,		1, 0x3702, "DC P3700 SSD" },
95 	{ 0x09538086,		1, 0x3703, "DC P3700 SSD [2.5\" SFF]" },
96 	{ 0x09538086,		1, 0x3704, "DC P3500 SSD [Add-in Card]" },
97 	{ 0x09538086,		1, 0x3705, "DC P3500 SSD [2.5\" SFF]" },
98 	{ 0x09538086,		1, 0x3709, "DC P3600 SSD [Add-in Card]" },
99 	{ 0x09538086,		1, 0x370a, "DC P3600 SSD [2.5\" SFF]" },
100 	{ 0x00000000,		0, 0, NULL  }
101 };
102 
103 static int
104 nvme_match(uint32_t devid, uint16_t subdevice, struct _pcsid *ep)
105 {
106 	if (devid != ep->devid)
107 		return 0;
108 
109 	if (!ep->match_subdevice)
110 		return 1;
111 
112 	if (subdevice == ep->subdevice)
113 		return 1;
114 	else
115 		return 0;
116 }
117 
118 static int
119 nvme_probe (device_t device)
120 {
121 	struct _pcsid	*ep;
122 	uint32_t	devid;
123 	uint16_t	subdevice;
124 
125 	devid = pci_get_devid(device);
126 	subdevice = pci_get_subdevice(device);
127 	ep = pci_ids;
128 
129 	while (ep->devid) {
130 		if (nvme_match(devid, subdevice, ep))
131 			break;
132 		++ep;
133 	}
134 
135 	if (ep->desc) {
136 		device_set_desc(device, ep->desc);
137 		return (BUS_PROBE_DEFAULT);
138 	}
139 
140 #if defined(PCIS_STORAGE_NVM)
141 	if (pci_get_class(device)    == PCIC_STORAGE &&
142 	    pci_get_subclass(device) == PCIS_STORAGE_NVM &&
143 	    pci_get_progif(device)   == PCIP_STORAGE_NVM_ENTERPRISE_NVMHCI_1_0) {
144 		device_set_desc(device, "Generic NVMe Device");
145 		return (BUS_PROBE_GENERIC);
146 	}
147 #endif
148 
149 	return (ENXIO);
150 }
151 
152 static void
153 nvme_init(void)
154 {
155 	uint32_t	i;
156 
157 	nvme_request_zone = uma_zcreate("nvme_request",
158 	    sizeof(struct nvme_request), NULL, NULL, NULL, NULL, 0, 0);
159 
160 	for (i = 0; i < NVME_MAX_CONSUMERS; i++)
161 		nvme_consumer[i].id = INVALID_CONSUMER_ID;
162 }
163 
164 SYSINIT(nvme_register, SI_SUB_DRIVERS, SI_ORDER_SECOND, nvme_init, NULL);
165 
166 static void
167 nvme_uninit(void)
168 {
169 	uma_zdestroy(nvme_request_zone);
170 }
171 
172 SYSUNINIT(nvme_unregister, SI_SUB_DRIVERS, SI_ORDER_SECOND, nvme_uninit, NULL);
173 
174 static void
175 nvme_load(void)
176 {
177 }
178 
179 static void
180 nvme_unload(void)
181 {
182 }
183 
184 static int
185 nvme_shutdown(device_t dev)
186 {
187 	struct nvme_controller	*ctrlr;
188 
189 	ctrlr = DEVICE2SOFTC(dev);
190 	nvme_ctrlr_shutdown(ctrlr);
191 
192 	return (0);
193 }
194 
195 static int
196 nvme_modevent(module_t mod, int type, void *arg)
197 {
198 
199 	switch (type) {
200 	case MOD_LOAD:
201 		nvme_load();
202 		break;
203 	case MOD_UNLOAD:
204 		nvme_unload();
205 		break;
206 	default:
207 		break;
208 	}
209 
210 	return (0);
211 }
212 
213 void
214 nvme_dump_command(struct nvme_command *cmd)
215 {
216 	printf(
217 "opc:%x f:%x r1:%x cid:%x nsid:%x r2:%x r3:%x mptr:%jx prp1:%jx prp2:%jx cdw:%x %x %x %x %x %x\n",
218 	    cmd->opc, cmd->fuse, cmd->rsvd1, cmd->cid, cmd->nsid,
219 	    cmd->rsvd2, cmd->rsvd3,
220 	    (uintmax_t)cmd->mptr, (uintmax_t)cmd->prp1, (uintmax_t)cmd->prp2,
221 	    cmd->cdw10, cmd->cdw11, cmd->cdw12, cmd->cdw13, cmd->cdw14,
222 	    cmd->cdw15);
223 }
224 
225 void
226 nvme_dump_completion(struct nvme_completion *cpl)
227 {
228 	printf("cdw0:%08x sqhd:%04x sqid:%04x "
229 	    "cid:%04x p:%x sc:%02x sct:%x m:%x dnr:%x\n",
230 	    cpl->cdw0, cpl->sqhd, cpl->sqid,
231 	    cpl->cid, cpl->status.p, cpl->status.sc, cpl->status.sct,
232 	    cpl->status.m, cpl->status.dnr);
233 }
234 
235 static int
236 nvme_attach(device_t dev)
237 {
238 	struct nvme_controller	*ctrlr = DEVICE2SOFTC(dev);
239 	int			status;
240 
241 	status = nvme_ctrlr_construct(ctrlr, dev);
242 
243 	if (status != 0) {
244 		nvme_ctrlr_destruct(ctrlr, dev);
245 		return (status);
246 	}
247 
248 	/*
249 	 * Reset controller twice to ensure we do a transition from cc.en==1
250 	 *  to cc.en==0.  This is because we don't really know what status
251 	 *  the controller was left in when boot handed off to OS.
252 	 */
253 	status = nvme_ctrlr_hw_reset(ctrlr);
254 	if (status != 0) {
255 		nvme_ctrlr_destruct(ctrlr, dev);
256 		return (status);
257 	}
258 
259 	status = nvme_ctrlr_hw_reset(ctrlr);
260 	if (status != 0) {
261 		nvme_ctrlr_destruct(ctrlr, dev);
262 		return (status);
263 	}
264 
265 	pci_enable_busmaster(dev);
266 
267 	ctrlr->config_hook.ich_func = nvme_ctrlr_start_config_hook;
268 	ctrlr->config_hook.ich_arg = ctrlr;
269 
270 	config_intrhook_establish(&ctrlr->config_hook);
271 
272 	return (0);
273 }
274 
275 static int
276 nvme_detach (device_t dev)
277 {
278 	struct nvme_controller	*ctrlr = DEVICE2SOFTC(dev);
279 
280 	nvme_ctrlr_destruct(ctrlr, dev);
281 	pci_disable_busmaster(dev);
282 	return (0);
283 }
284 
285 static void
286 nvme_notify(struct nvme_consumer *cons,
287 	    struct nvme_controller *ctrlr)
288 {
289 	struct nvme_namespace	*ns;
290 	void			*ctrlr_cookie;
291 	int			cmpset, ns_idx;
292 
293 	/*
294 	 * The consumer may register itself after the nvme devices
295 	 *  have registered with the kernel, but before the
296 	 *  driver has completed initialization.  In that case,
297 	 *  return here, and when initialization completes, the
298 	 *  controller will make sure the consumer gets notified.
299 	 */
300 	if (!ctrlr->is_initialized)
301 		return;
302 
303 	cmpset = atomic_cmpset_32(&ctrlr->notification_sent, 0, 1);
304 
305 	if (cmpset == 0)
306 		return;
307 
308 	if (cons->ctrlr_fn != NULL)
309 		ctrlr_cookie = (*cons->ctrlr_fn)(ctrlr);
310 	else
311 		ctrlr_cookie = NULL;
312 	ctrlr->cons_cookie[cons->id] = ctrlr_cookie;
313 	if (ctrlr->is_failed) {
314 		if (cons->fail_fn != NULL)
315 			(*cons->fail_fn)(ctrlr_cookie);
316 		/*
317 		 * Do not notify consumers about the namespaces of a
318 		 *  failed controller.
319 		 */
320 		return;
321 	}
322 	for (ns_idx = 0; ns_idx < min(ctrlr->cdata.nn, NVME_MAX_NAMESPACES); ns_idx++) {
323 		ns = &ctrlr->ns[ns_idx];
324 		if (ns->data.nsze == 0)
325 			continue;
326 		if (cons->ns_fn != NULL)
327 			ns->cons_cookie[cons->id] =
328 			    (*cons->ns_fn)(ns, ctrlr_cookie);
329 	}
330 }
331 
332 void
333 nvme_notify_new_controller(struct nvme_controller *ctrlr)
334 {
335 	int i;
336 
337 	for (i = 0; i < NVME_MAX_CONSUMERS; i++) {
338 		if (nvme_consumer[i].id != INVALID_CONSUMER_ID) {
339 			nvme_notify(&nvme_consumer[i], ctrlr);
340 		}
341 	}
342 }
343 
344 static void
345 nvme_notify_new_consumer(struct nvme_consumer *cons)
346 {
347 	device_t		*devlist;
348 	struct nvme_controller	*ctrlr;
349 	int			dev_idx, devcount;
350 
351 	if (devclass_get_devices(nvme_devclass, &devlist, &devcount))
352 		return;
353 
354 	for (dev_idx = 0; dev_idx < devcount; dev_idx++) {
355 		ctrlr = DEVICE2SOFTC(devlist[dev_idx]);
356 		nvme_notify(cons, ctrlr);
357 	}
358 
359 	free(devlist, M_TEMP);
360 }
361 
362 void
363 nvme_notify_async_consumers(struct nvme_controller *ctrlr,
364 			    const struct nvme_completion *async_cpl,
365 			    uint32_t log_page_id, void *log_page_buffer,
366 			    uint32_t log_page_size)
367 {
368 	struct nvme_consumer	*cons;
369 	uint32_t		i;
370 
371 	for (i = 0; i < NVME_MAX_CONSUMERS; i++) {
372 		cons = &nvme_consumer[i];
373 		if (cons->id != INVALID_CONSUMER_ID && cons->async_fn != NULL)
374 			(*cons->async_fn)(ctrlr->cons_cookie[i], async_cpl,
375 			    log_page_id, log_page_buffer, log_page_size);
376 	}
377 }
378 
379 void
380 nvme_notify_fail_consumers(struct nvme_controller *ctrlr)
381 {
382 	struct nvme_consumer	*cons;
383 	uint32_t		i;
384 
385 	/*
386 	 * This controller failed during initialization (i.e. IDENTIFY
387 	 *  command failed or timed out).  Do not notify any nvme
388 	 *  consumers of the failure here, since the consumer does not
389 	 *  even know about the controller yet.
390 	 */
391 	if (!ctrlr->is_initialized)
392 		return;
393 
394 	for (i = 0; i < NVME_MAX_CONSUMERS; i++) {
395 		cons = &nvme_consumer[i];
396 		if (cons->id != INVALID_CONSUMER_ID && cons->fail_fn != NULL)
397 			cons->fail_fn(ctrlr->cons_cookie[i]);
398 	}
399 }
400 
401 struct nvme_consumer *
402 nvme_register_consumer(nvme_cons_ns_fn_t ns_fn, nvme_cons_ctrlr_fn_t ctrlr_fn,
403 		       nvme_cons_async_fn_t async_fn,
404 		       nvme_cons_fail_fn_t fail_fn)
405 {
406 	int i;
407 
408 	/*
409 	 * TODO: add locking around consumer registration.  Not an issue
410 	 *  right now since we only have one nvme consumer - nvd(4).
411 	 */
412 	for (i = 0; i < NVME_MAX_CONSUMERS; i++)
413 		if (nvme_consumer[i].id == INVALID_CONSUMER_ID) {
414 			nvme_consumer[i].id = i;
415 			nvme_consumer[i].ns_fn = ns_fn;
416 			nvme_consumer[i].ctrlr_fn = ctrlr_fn;
417 			nvme_consumer[i].async_fn = async_fn;
418 			nvme_consumer[i].fail_fn = fail_fn;
419 
420 			nvme_notify_new_consumer(&nvme_consumer[i]);
421 			return (&nvme_consumer[i]);
422 		}
423 
424 	printf("nvme(4): consumer not registered - no slots available\n");
425 	return (NULL);
426 }
427 
428 void
429 nvme_unregister_consumer(struct nvme_consumer *consumer)
430 {
431 
432 	consumer->id = INVALID_CONSUMER_ID;
433 }
434 
435 void
436 nvme_completion_poll_cb(void *arg, const struct nvme_completion *cpl)
437 {
438 	struct nvme_completion_poll_status	*status = arg;
439 
440 	/*
441 	 * Copy status into the argument passed by the caller, so that
442 	 *  the caller can check the status to determine if the
443 	 *  the request passed or failed.
444 	 */
445 	memcpy(&status->cpl, cpl, sizeof(*cpl));
446 	wmb();
447 	status->done = TRUE;
448 }
449