xref: /freebsd/sys/dev/nvme/nvme.c (revision a25896ca1270e25b657ceaa8d47d5699515f5c25)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (C) 2012-2014 Intel Corporation
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31 
32 #include <sys/param.h>
33 #include <sys/bus.h>
34 #include <sys/conf.h>
35 #include <sys/module.h>
36 
37 #include <vm/uma.h>
38 
39 #include <dev/pci/pcireg.h>
40 #include <dev/pci/pcivar.h>
41 
42 #include "nvme_private.h"
43 
44 struct nvme_consumer {
45 	uint32_t		id;
46 	nvme_cons_ns_fn_t	ns_fn;
47 	nvme_cons_ctrlr_fn_t	ctrlr_fn;
48 	nvme_cons_async_fn_t	async_fn;
49 	nvme_cons_fail_fn_t	fail_fn;
50 };
51 
52 struct nvme_consumer nvme_consumer[NVME_MAX_CONSUMERS];
53 #define	INVALID_CONSUMER_ID	0xFFFF
54 
55 uma_zone_t	nvme_request_zone;
56 int32_t		nvme_retry_count;
57 
58 MALLOC_DEFINE(M_NVME, "nvme", "nvme(4) memory allocations");
59 
60 static int    nvme_probe(device_t);
61 static int    nvme_attach(device_t);
62 static int    nvme_detach(device_t);
63 static int    nvme_shutdown(device_t);
64 static int    nvme_modevent(module_t mod, int type, void *arg);
65 
66 static devclass_t nvme_devclass;
67 
68 static device_method_t nvme_pci_methods[] = {
69 	/* Device interface */
70 	DEVMETHOD(device_probe,     nvme_probe),
71 	DEVMETHOD(device_attach,    nvme_attach),
72 	DEVMETHOD(device_detach,    nvme_detach),
73 	DEVMETHOD(device_shutdown,  nvme_shutdown),
74 	{ 0, 0 }
75 };
76 
77 static driver_t nvme_pci_driver = {
78 	"nvme",
79 	nvme_pci_methods,
80 	sizeof(struct nvme_controller),
81 };
82 
83 DRIVER_MODULE(nvme, pci, nvme_pci_driver, nvme_devclass, nvme_modevent, 0);
84 MODULE_VERSION(nvme, 1);
85 MODULE_DEPEND(nvme, cam, 1, 1, 1);
86 
87 static struct _pcsid
88 {
89 	uint32_t	devid;
90 	int		match_subdevice;
91 	uint16_t	subdevice;
92 	const char	*desc;
93 	uint32_t	quirks;
94 } pci_ids[] = {
95 	{ 0x01118086,		0, 0, "NVMe Controller"  },
96 	{ IDT32_PCI_ID,		0, 0, "IDT NVMe Controller (32 channel)"  },
97 	{ IDT8_PCI_ID,		0, 0, "IDT NVMe Controller (8 channel)" },
98 	{ 0x09538086,		1, 0x3702, "DC P3700 SSD" },
99 	{ 0x09538086,		1, 0x3703, "DC P3700 SSD [2.5\" SFF]" },
100 	{ 0x09538086,		1, 0x3704, "DC P3500 SSD [Add-in Card]" },
101 	{ 0x09538086,		1, 0x3705, "DC P3500 SSD [2.5\" SFF]" },
102 	{ 0x09538086,		1, 0x3709, "DC P3600 SSD [Add-in Card]" },
103 	{ 0x09538086,		1, 0x370a, "DC P3600 SSD [2.5\" SFF]" },
104 	{ 0x00031c58,		0, 0, "HGST SN100",	QUIRK_DELAY_B4_CHK_RDY },
105 	{ 0x00231c58,		0, 0, "WDC SN200",	QUIRK_DELAY_B4_CHK_RDY },
106 	{ 0x05401c5f,		0, 0, "Memblaze Pblaze4", QUIRK_DELAY_B4_CHK_RDY },
107 	{ 0xa821144d,		0, 0, "Samsung PM1725", QUIRK_DELAY_B4_CHK_RDY },
108 	{ 0xa822144d,		0, 0, "Samsung PM1725a", QUIRK_DELAY_B4_CHK_RDY },
109 	{ 0x01161179,		0, 0, "Toshiba XG5", QUIRK_DISABLE_TIMEOUT },
110 	{ 0x00000000,		0, 0, NULL  }
111 };
112 
113 static int
114 nvme_match(uint32_t devid, uint16_t subdevice, struct _pcsid *ep)
115 {
116 	if (devid != ep->devid)
117 		return 0;
118 
119 	if (!ep->match_subdevice)
120 		return 1;
121 
122 	if (subdevice == ep->subdevice)
123 		return 1;
124 	else
125 		return 0;
126 }
127 
128 static int
129 nvme_probe (device_t device)
130 {
131 	struct _pcsid	*ep;
132 	uint32_t	devid;
133 	uint16_t	subdevice;
134 
135 	devid = pci_get_devid(device);
136 	subdevice = pci_get_subdevice(device);
137 	ep = pci_ids;
138 
139 	while (ep->devid) {
140 		if (nvme_match(devid, subdevice, ep))
141 			break;
142 		++ep;
143 	}
144 
145 	if (ep->desc) {
146 		device_set_desc(device, ep->desc);
147 		return (BUS_PROBE_DEFAULT);
148 	}
149 
150 #if defined(PCIS_STORAGE_NVM)
151 	if (pci_get_class(device)    == PCIC_STORAGE &&
152 	    pci_get_subclass(device) == PCIS_STORAGE_NVM &&
153 	    pci_get_progif(device)   == PCIP_STORAGE_NVM_ENTERPRISE_NVMHCI_1_0) {
154 		device_set_desc(device, "Generic NVMe Device");
155 		return (BUS_PROBE_GENERIC);
156 	}
157 #endif
158 
159 	return (ENXIO);
160 }
161 
162 static void
163 nvme_init(void)
164 {
165 	uint32_t	i;
166 
167 	nvme_request_zone = uma_zcreate("nvme_request",
168 	    sizeof(struct nvme_request), NULL, NULL, NULL, NULL, 0, 0);
169 
170 	for (i = 0; i < NVME_MAX_CONSUMERS; i++)
171 		nvme_consumer[i].id = INVALID_CONSUMER_ID;
172 }
173 
174 SYSINIT(nvme_register, SI_SUB_DRIVERS, SI_ORDER_SECOND, nvme_init, NULL);
175 
176 static void
177 nvme_uninit(void)
178 {
179 	uma_zdestroy(nvme_request_zone);
180 }
181 
182 SYSUNINIT(nvme_unregister, SI_SUB_DRIVERS, SI_ORDER_SECOND, nvme_uninit, NULL);
183 
184 static void
185 nvme_load(void)
186 {
187 }
188 
189 static void
190 nvme_unload(void)
191 {
192 }
193 
194 static int
195 nvme_shutdown(device_t dev)
196 {
197 	struct nvme_controller	*ctrlr;
198 
199 	ctrlr = DEVICE2SOFTC(dev);
200 	nvme_ctrlr_shutdown(ctrlr);
201 
202 	return (0);
203 }
204 
205 static int
206 nvme_modevent(module_t mod, int type, void *arg)
207 {
208 
209 	switch (type) {
210 	case MOD_LOAD:
211 		nvme_load();
212 		break;
213 	case MOD_UNLOAD:
214 		nvme_unload();
215 		break;
216 	default:
217 		break;
218 	}
219 
220 	return (0);
221 }
222 
223 void
224 nvme_dump_command(struct nvme_command *cmd)
225 {
226 
227 	printf(
228 "opc:%x f:%x cid:%x nsid:%x r2:%x r3:%x mptr:%jx prp1:%jx prp2:%jx cdw:%x %x %x %x %x %x\n",
229 	    cmd->opc, cmd->fuse, cmd->cid, le32toh(cmd->nsid),
230 	    cmd->rsvd2, cmd->rsvd3,
231 	    (uintmax_t)le64toh(cmd->mptr), (uintmax_t)le64toh(cmd->prp1), (uintmax_t)le64toh(cmd->prp2),
232 	    le32toh(cmd->cdw10), le32toh(cmd->cdw11), le32toh(cmd->cdw12),
233 	    le32toh(cmd->cdw13), le32toh(cmd->cdw14), le32toh(cmd->cdw15));
234 }
235 
236 void
237 nvme_dump_completion(struct nvme_completion *cpl)
238 {
239 	uint8_t p, sc, sct, m, dnr;
240 	uint16_t status;
241 
242 	status = le16toh(cpl->status);
243 
244 	p = NVME_STATUS_GET_P(status);
245 	sc = NVME_STATUS_GET_SC(status);
246 	sct = NVME_STATUS_GET_SCT(status);
247 	m = NVME_STATUS_GET_M(status);
248 	dnr = NVME_STATUS_GET_DNR(status);
249 
250 	printf("cdw0:%08x sqhd:%04x sqid:%04x "
251 	    "cid:%04x p:%x sc:%02x sct:%x m:%x dnr:%x\n",
252 	    le32toh(cpl->cdw0), le16toh(cpl->sqhd), le16toh(cpl->sqid),
253 	    cpl->cid, p, sc, sct, m, dnr);
254 }
255 
256 static int
257 nvme_attach(device_t dev)
258 {
259 	struct nvme_controller	*ctrlr = DEVICE2SOFTC(dev);
260 	int			status;
261 	struct _pcsid		*ep;
262 	uint32_t		devid;
263 	uint16_t		subdevice;
264 
265 	devid = pci_get_devid(dev);
266 	subdevice = pci_get_subdevice(dev);
267 	ep = pci_ids;
268 	while (ep->devid) {
269 		if (nvme_match(devid, subdevice, ep))
270 			break;
271 		++ep;
272 	}
273 	ctrlr->quirks = ep->quirks;
274 
275 	status = nvme_ctrlr_construct(ctrlr, dev);
276 
277 	if (status != 0) {
278 		nvme_ctrlr_destruct(ctrlr, dev);
279 		return (status);
280 	}
281 
282 	/*
283 	 * Some drives do not implement the completion timeout feature
284 	 * correctly. There's a WAR from the manufacturer to just disable it.
285 	 * The driver wouldn't respond correctly to a timeout anyway.
286 	 */
287 	if (ep->quirks & QUIRK_DISABLE_TIMEOUT) {
288 		int ptr;
289 		uint16_t devctl2;
290 
291 		status = pci_find_cap(dev, PCIY_EXPRESS, &ptr);
292 		if (status) {
293 			device_printf(dev, "Can't locate PCIe capability?");
294 			return (status);
295 		}
296 		devctl2 = pci_read_config(dev, ptr + PCIER_DEVICE_CTL2, sizeof(devctl2));
297 		devctl2 |= PCIEM_CTL2_COMP_TIMO_DISABLE;
298 		pci_write_config(dev, ptr + PCIER_DEVICE_CTL2, devctl2, sizeof(devctl2));
299 	}
300 
301 	/*
302 	 * Enable busmastering so the completion status messages can
303 	 * be busmastered back to the host.
304 	 */
305 	pci_enable_busmaster(dev);
306 
307 	/*
308 	 * Reset controller twice to ensure we do a transition from cc.en==1
309 	 *  to cc.en==0.  This is because we don't really know what status
310 	 *  the controller was left in when boot handed off to OS.
311 	 */
312 	status = nvme_ctrlr_hw_reset(ctrlr);
313 	if (status != 0) {
314 		nvme_ctrlr_destruct(ctrlr, dev);
315 		return (status);
316 	}
317 
318 	status = nvme_ctrlr_hw_reset(ctrlr);
319 	if (status != 0) {
320 		nvme_ctrlr_destruct(ctrlr, dev);
321 		return (status);
322 	}
323 
324 	ctrlr->config_hook.ich_func = nvme_ctrlr_start_config_hook;
325 	ctrlr->config_hook.ich_arg = ctrlr;
326 
327 	config_intrhook_establish(&ctrlr->config_hook);
328 
329 	return (0);
330 }
331 
332 static int
333 nvme_detach (device_t dev)
334 {
335 	struct nvme_controller	*ctrlr = DEVICE2SOFTC(dev);
336 
337 	nvme_ctrlr_destruct(ctrlr, dev);
338 	pci_disable_busmaster(dev);
339 	return (0);
340 }
341 
342 static void
343 nvme_notify(struct nvme_consumer *cons,
344 	    struct nvme_controller *ctrlr)
345 {
346 	struct nvme_namespace	*ns;
347 	void			*ctrlr_cookie;
348 	int			cmpset, ns_idx;
349 
350 	/*
351 	 * The consumer may register itself after the nvme devices
352 	 *  have registered with the kernel, but before the
353 	 *  driver has completed initialization.  In that case,
354 	 *  return here, and when initialization completes, the
355 	 *  controller will make sure the consumer gets notified.
356 	 */
357 	if (!ctrlr->is_initialized)
358 		return;
359 
360 	cmpset = atomic_cmpset_32(&ctrlr->notification_sent, 0, 1);
361 
362 	if (cmpset == 0)
363 		return;
364 
365 	if (cons->ctrlr_fn != NULL)
366 		ctrlr_cookie = (*cons->ctrlr_fn)(ctrlr);
367 	else
368 		ctrlr_cookie = NULL;
369 	ctrlr->cons_cookie[cons->id] = ctrlr_cookie;
370 	if (ctrlr->is_failed) {
371 		if (cons->fail_fn != NULL)
372 			(*cons->fail_fn)(ctrlr_cookie);
373 		/*
374 		 * Do not notify consumers about the namespaces of a
375 		 *  failed controller.
376 		 */
377 		return;
378 	}
379 	for (ns_idx = 0; ns_idx < min(ctrlr->cdata.nn, NVME_MAX_NAMESPACES); ns_idx++) {
380 		ns = &ctrlr->ns[ns_idx];
381 		if (ns->data.nsze == 0)
382 			continue;
383 		if (cons->ns_fn != NULL)
384 			ns->cons_cookie[cons->id] =
385 			    (*cons->ns_fn)(ns, ctrlr_cookie);
386 	}
387 }
388 
389 void
390 nvme_notify_new_controller(struct nvme_controller *ctrlr)
391 {
392 	int i;
393 
394 	for (i = 0; i < NVME_MAX_CONSUMERS; i++) {
395 		if (nvme_consumer[i].id != INVALID_CONSUMER_ID) {
396 			nvme_notify(&nvme_consumer[i], ctrlr);
397 		}
398 	}
399 }
400 
401 static void
402 nvme_notify_new_consumer(struct nvme_consumer *cons)
403 {
404 	device_t		*devlist;
405 	struct nvme_controller	*ctrlr;
406 	int			dev_idx, devcount;
407 
408 	if (devclass_get_devices(nvme_devclass, &devlist, &devcount))
409 		return;
410 
411 	for (dev_idx = 0; dev_idx < devcount; dev_idx++) {
412 		ctrlr = DEVICE2SOFTC(devlist[dev_idx]);
413 		nvme_notify(cons, ctrlr);
414 	}
415 
416 	free(devlist, M_TEMP);
417 }
418 
419 void
420 nvme_notify_async_consumers(struct nvme_controller *ctrlr,
421 			    const struct nvme_completion *async_cpl,
422 			    uint32_t log_page_id, void *log_page_buffer,
423 			    uint32_t log_page_size)
424 {
425 	struct nvme_consumer	*cons;
426 	uint32_t		i;
427 
428 	for (i = 0; i < NVME_MAX_CONSUMERS; i++) {
429 		cons = &nvme_consumer[i];
430 		if (cons->id != INVALID_CONSUMER_ID && cons->async_fn != NULL)
431 			(*cons->async_fn)(ctrlr->cons_cookie[i], async_cpl,
432 			    log_page_id, log_page_buffer, log_page_size);
433 	}
434 }
435 
436 void
437 nvme_notify_fail_consumers(struct nvme_controller *ctrlr)
438 {
439 	struct nvme_consumer	*cons;
440 	uint32_t		i;
441 
442 	/*
443 	 * This controller failed during initialization (i.e. IDENTIFY
444 	 *  command failed or timed out).  Do not notify any nvme
445 	 *  consumers of the failure here, since the consumer does not
446 	 *  even know about the controller yet.
447 	 */
448 	if (!ctrlr->is_initialized)
449 		return;
450 
451 	for (i = 0; i < NVME_MAX_CONSUMERS; i++) {
452 		cons = &nvme_consumer[i];
453 		if (cons->id != INVALID_CONSUMER_ID && cons->fail_fn != NULL)
454 			cons->fail_fn(ctrlr->cons_cookie[i]);
455 	}
456 }
457 
458 void
459 nvme_notify_ns(struct nvme_controller *ctrlr, int nsid)
460 {
461 	struct nvme_consumer	*cons;
462 	struct nvme_namespace	*ns = &ctrlr->ns[nsid - 1];
463 	uint32_t		i;
464 
465 	if (!ctrlr->is_initialized)
466 		return;
467 
468 	for (i = 0; i < NVME_MAX_CONSUMERS; i++) {
469 		cons = &nvme_consumer[i];
470 		if (cons->id != INVALID_CONSUMER_ID && cons->ns_fn != NULL)
471 			ns->cons_cookie[cons->id] =
472 			    (*cons->ns_fn)(ns, ctrlr->cons_cookie[cons->id]);
473 	}
474 }
475 
476 struct nvme_consumer *
477 nvme_register_consumer(nvme_cons_ns_fn_t ns_fn, nvme_cons_ctrlr_fn_t ctrlr_fn,
478 		       nvme_cons_async_fn_t async_fn,
479 		       nvme_cons_fail_fn_t fail_fn)
480 {
481 	int i;
482 
483 	/*
484 	 * TODO: add locking around consumer registration.  Not an issue
485 	 *  right now since we only have one nvme consumer - nvd(4).
486 	 */
487 	for (i = 0; i < NVME_MAX_CONSUMERS; i++)
488 		if (nvme_consumer[i].id == INVALID_CONSUMER_ID) {
489 			nvme_consumer[i].id = i;
490 			nvme_consumer[i].ns_fn = ns_fn;
491 			nvme_consumer[i].ctrlr_fn = ctrlr_fn;
492 			nvme_consumer[i].async_fn = async_fn;
493 			nvme_consumer[i].fail_fn = fail_fn;
494 
495 			nvme_notify_new_consumer(&nvme_consumer[i]);
496 			return (&nvme_consumer[i]);
497 		}
498 
499 	printf("nvme(4): consumer not registered - no slots available\n");
500 	return (NULL);
501 }
502 
503 void
504 nvme_unregister_consumer(struct nvme_consumer *consumer)
505 {
506 
507 	consumer->id = INVALID_CONSUMER_ID;
508 }
509 
510 void
511 nvme_completion_poll_cb(void *arg, const struct nvme_completion *cpl)
512 {
513 	struct nvme_completion_poll_status	*status = arg;
514 
515 	/*
516 	 * Copy status into the argument passed by the caller, so that
517 	 *  the caller can check the status to determine if the
518 	 *  the request passed or failed.
519 	 */
520 	memcpy(&status->cpl, cpl, sizeof(*cpl));
521 	atomic_store_rel_int(&status->done, 1);
522 }
523