xref: /freebsd/sys/dev/nvme/nvme_sim.c (revision 63b0c00eb043cf8cfabeb63528eda3190608a805)
1 /*-
2  * Copyright (c) 2016 Netflix, Inc.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer,
9  *    without modification, immediately at the beginning of the file.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
15  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
16  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
17  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
18  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
19  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
20  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
21  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
23  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24  */
25 
26 #include <sys/cdefs.h>
27 __FBSDID("$FreeBSD$");
28 
29 #include <sys/param.h>
30 #include <sys/systm.h>
31 #include <sys/buf.h>
32 #include <sys/bus.h>
33 #include <sys/conf.h>
34 #include <sys/ioccom.h>
35 #include <sys/malloc.h>
36 #include <sys/proc.h>
37 #include <sys/smp.h>
38 
39 #include <cam/cam.h>
40 #include <cam/cam_ccb.h>
41 #include <cam/cam_sim.h>
42 #include <cam/cam_xpt_sim.h>
43 #include <cam/cam_debug.h>
44 
45 #include <dev/pci/pcivar.h>
46 #include <dev/pci/pcireg.h>
47 
48 #include "nvme_private.h"
49 
50 #define ccb_accb_ptr spriv_ptr0
51 #define ccb_ctrlr_ptr spriv_ptr1
52 static void	nvme_sim_action(struct cam_sim *sim, union ccb *ccb);
53 static void	nvme_sim_poll(struct cam_sim *sim);
54 
55 #define sim2softc(sim)	((struct nvme_sim_softc *)cam_sim_softc(sim))
56 #define sim2ctrlr(sim)	(sim2softc(sim)->s_ctrlr)
57 
58 struct nvme_sim_softc
59 {
60 	struct nvme_controller	*s_ctrlr;
61 	struct cam_sim		*s_sim;
62 	struct cam_path		*s_path;
63 };
64 
65 static void
66 nvme_sim_nvmeio_done(void *ccb_arg, const struct nvme_completion *cpl)
67 {
68 	union ccb *ccb = (union ccb *)ccb_arg;
69 
70 	/*
71 	 * Let the periph know the completion, and let it sort out what
72 	 * it means. Report an error or success based on SC and SCT.
73 	 * We do not try to fetch additional data from the error log,
74 	 * though maybe we should in the future.
75 	 */
76 	memcpy(&ccb->nvmeio.cpl, cpl, sizeof(*cpl));
77 	ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
78 	if (nvme_completion_is_error(cpl)) {
79 		ccb->ccb_h.status = CAM_NVME_STATUS_ERROR;
80 		xpt_done(ccb);
81 	} else {
82 		ccb->ccb_h.status = CAM_REQ_CMP;
83 		xpt_done_direct(ccb);
84 	}
85 }
86 
87 static void
88 nvme_sim_nvmeio(struct cam_sim *sim, union ccb *ccb)
89 {
90 	struct ccb_nvmeio	*nvmeio = &ccb->nvmeio;
91 	struct nvme_request	*req;
92 	void			*payload;
93 	uint32_t		size;
94 	struct nvme_controller *ctrlr;
95 
96 	ctrlr = sim2ctrlr(sim);
97 	payload = nvmeio->data_ptr;
98 	size = nvmeio->dxfer_len;
99 	/* SG LIST ??? */
100 	if ((nvmeio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_BIO)
101 		req = nvme_allocate_request_bio((struct bio *)payload,
102 		    nvme_sim_nvmeio_done, ccb);
103 	else if ((nvmeio->ccb_h.flags & CAM_DATA_SG) == CAM_DATA_SG)
104 		req = nvme_allocate_request_ccb(ccb, nvme_sim_nvmeio_done, ccb);
105 	else if (payload == NULL)
106 		req = nvme_allocate_request_null(nvme_sim_nvmeio_done, ccb);
107 	else
108 		req = nvme_allocate_request_vaddr(payload, size,
109 		    nvme_sim_nvmeio_done, ccb);
110 
111 	if (req == NULL) {
112 		nvmeio->ccb_h.status = CAM_RESRC_UNAVAIL;
113 		xpt_done(ccb);
114 		return;
115 	}
116 	ccb->ccb_h.status |= CAM_SIM_QUEUED;
117 
118 	memcpy(&req->cmd, &ccb->nvmeio.cmd, sizeof(ccb->nvmeio.cmd));
119 
120 	if (ccb->ccb_h.func_code == XPT_NVME_IO)
121 		nvme_ctrlr_submit_io_request(ctrlr, req);
122 	else
123 		nvme_ctrlr_submit_admin_request(ctrlr, req);
124 }
125 
126 static uint32_t
127 nvme_link_kBps(struct nvme_controller *ctrlr)
128 {
129 	uint32_t speed, lanes, link[] = { 1, 250000, 500000, 985000, 1970000 };
130 	uint32_t status;
131 
132 	status = pcie_read_config(ctrlr->dev, PCIER_LINK_STA, 2);
133 	speed = status & PCIEM_LINK_STA_SPEED;
134 	lanes = (status & PCIEM_LINK_STA_WIDTH) >> 4;
135 	/*
136 	 * Failsafe on link speed indicator. If it is insane report the number of
137 	 * lanes as the speed. Not 100% accurate, but may be diagnostic.
138 	 */
139 	if (speed >= nitems(link))
140 		speed = 0;
141 	return link[speed] * lanes;
142 }
143 
144 static void
145 nvme_sim_action(struct cam_sim *sim, union ccb *ccb)
146 {
147 	struct nvme_controller *ctrlr;
148 
149 	CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE,
150 	    ("nvme_sim_action: func= %#x\n",
151 		ccb->ccb_h.func_code));
152 
153 	ctrlr = sim2ctrlr(sim);
154 
155 	switch (ccb->ccb_h.func_code) {
156 	case XPT_CALC_GEOMETRY:		/* Calculate Geometry Totally nuts ? XXX */
157 		/*
158 		 * Only meaningful for old-school SCSI disks since only the SCSI
159 		 * da driver generates them. Reject all these that slip through.
160 		 */
161 		/*FALLTHROUGH*/
162 	case XPT_ABORT:			/* Abort the specified CCB */
163 		ccb->ccb_h.status = CAM_REQ_INVALID;
164 		break;
165 	case XPT_SET_TRAN_SETTINGS:
166 		/*
167 		 * NVMe doesn't really have different transfer settings, but
168 		 * other parts of CAM think failure here is a big deal.
169 		 */
170 		ccb->ccb_h.status = CAM_REQ_CMP;
171 		break;
172 	case XPT_PATH_INQ:		/* Path routing inquiry */
173 	{
174 		struct ccb_pathinq	*cpi = &ccb->cpi;
175 		device_t		dev = ctrlr->dev;
176 
177 		/*
178 		 * For devices that are reported as children of the AHCI
179 		 * controller, which has no access to the config space for this
180 		 * controller, report the AHCI controller's data.
181 		 */
182 		if (ctrlr->quirks & QUIRK_AHCI)
183 			dev = device_get_parent(dev);
184 		cpi->version_num = 1;
185 		cpi->hba_inquiry = 0;
186 		cpi->target_sprt = 0;
187 		cpi->hba_misc =  PIM_UNMAPPED | PIM_NOSCAN;
188 		cpi->hba_eng_cnt = 0;
189 		cpi->max_target = 0;
190 		cpi->max_lun = ctrlr->cdata.nn;
191 		cpi->maxio = ctrlr->max_xfer_size;
192 		cpi->initiator_id = 0;
193 		cpi->bus_id = cam_sim_bus(sim);
194 		cpi->base_transfer_speed = nvme_link_kBps(ctrlr);
195 		strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
196 		strlcpy(cpi->hba_vid, "NVMe", HBA_IDLEN);
197 		strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
198 		cpi->unit_number = cam_sim_unit(sim);
199 		cpi->transport = XPORT_NVME;		/* XXX XPORT_PCIE ? */
200 		cpi->transport_version = nvme_mmio_read_4(ctrlr, vs);
201 		cpi->protocol = PROTO_NVME;
202 		cpi->protocol_version = nvme_mmio_read_4(ctrlr, vs);
203 		cpi->xport_specific.nvme.nsid = xpt_path_lun_id(ccb->ccb_h.path);
204 		cpi->xport_specific.nvme.domain = pci_get_domain(dev);
205 		cpi->xport_specific.nvme.bus = pci_get_bus(dev);
206 		cpi->xport_specific.nvme.slot = pci_get_slot(dev);
207 		cpi->xport_specific.nvme.function = pci_get_function(dev);
208 		cpi->xport_specific.nvme.extra = 0;
209 		strncpy(cpi->xport_specific.nvme.dev_name, device_get_nameunit(dev),
210 		    sizeof(cpi->xport_specific.nvme.dev_name));
211 		cpi->hba_vendor = pci_get_vendor(dev);
212 		cpi->hba_device = pci_get_device(dev);
213 		cpi->hba_subvendor = pci_get_subvendor(dev);
214 		cpi->hba_subdevice = pci_get_subdevice(dev);
215 		cpi->ccb_h.status = CAM_REQ_CMP;
216 		break;
217 	}
218 	case XPT_GET_TRAN_SETTINGS:	/* Get transport settings */
219 	{
220 		struct ccb_trans_settings	*cts;
221 		struct ccb_trans_settings_nvme	*nvmep;
222 		struct ccb_trans_settings_nvme	*nvmex;
223 		device_t dev;
224 		uint32_t status, caps, flags;
225 
226 		dev = ctrlr->dev;
227 		cts = &ccb->cts;
228 		nvmex = &cts->xport_specific.nvme;
229 		nvmep = &cts->proto_specific.nvme;
230 
231 		nvmex->spec = nvme_mmio_read_4(ctrlr, vs);
232 		nvmex->valid = CTS_NVME_VALID_SPEC;
233 		if ((ctrlr->quirks & QUIRK_AHCI) == 0) {
234 			/* AHCI redirect makes it impossible to query */
235 			status = pcie_read_config(dev, PCIER_LINK_STA, 2);
236 			caps = pcie_read_config(dev, PCIER_LINK_CAP, 2);
237 			flags = pcie_read_config(dev, PCIER_FLAGS, 2);
238 			if ((flags & PCIEM_FLAGS_TYPE) == PCIEM_TYPE_ENDPOINT) {
239 				nvmex->valid |= CTS_NVME_VALID_LINK;
240 				nvmex->speed = status & PCIEM_LINK_STA_SPEED;
241 				nvmex->lanes = (status & PCIEM_LINK_STA_WIDTH) >> 4;
242 				nvmex->max_speed = caps & PCIEM_LINK_CAP_MAX_SPEED;
243 				nvmex->max_lanes = (caps & PCIEM_LINK_CAP_MAX_WIDTH) >> 4;
244 			}
245 		}
246 
247 		/* XXX these should be something else maybe ? */
248 		nvmep->valid = CTS_NVME_VALID_SPEC;
249 		nvmep->spec = nvmex->spec;
250 
251 		cts->transport = XPORT_NVME;
252 		cts->transport_version = nvmex->spec;
253 		cts->protocol = PROTO_NVME;
254 		cts->protocol_version = nvmex->spec;
255 		cts->ccb_h.status = CAM_REQ_CMP;
256 		break;
257 	}
258 	case XPT_TERM_IO:		/* Terminate the I/O process */
259 		/*
260 		 * every driver handles this, but nothing generates it. Assume
261 		 * it's OK to just say 'that worked'.
262 		 */
263 		/*FALLTHROUGH*/
264 	case XPT_RESET_DEV:		/* Bus Device Reset the specified device */
265 	case XPT_RESET_BUS:		/* Reset the specified bus */
266 		/*
267 		 * NVMe doesn't really support physically resetting the bus. It's part
268 		 * of the bus scanning dance, so return sucess to tell the process to
269 		 * proceed.
270 		 */
271 		ccb->ccb_h.status = CAM_REQ_CMP;
272 		break;
273 	case XPT_NVME_IO:		/* Execute the requested I/O operation */
274 	case XPT_NVME_ADMIN:		/* or Admin operation */
275 		if (ctrlr->is_failed) {
276 			ccb->ccb_h.status = CAM_DEV_NOT_THERE;
277 			break;
278 		}
279 		nvme_sim_nvmeio(sim, ccb);
280 		return;			/* no done */
281 	default:
282 		ccb->ccb_h.status = CAM_REQ_INVALID;
283 		break;
284 	}
285 	xpt_done(ccb);
286 }
287 
288 static void
289 nvme_sim_poll(struct cam_sim *sim)
290 {
291 
292 	nvme_ctrlr_poll(sim2ctrlr(sim));
293 }
294 
295 static void *
296 nvme_sim_new_controller(struct nvme_controller *ctrlr)
297 {
298 	struct nvme_sim_softc *sc;
299 	struct cam_devq *devq;
300 	int max_trans;
301 
302 	max_trans = ctrlr->max_hw_pend_io;
303 	devq = cam_simq_alloc(max_trans);
304 	if (devq == NULL)
305 		return (NULL);
306 
307 	sc = malloc(sizeof(*sc), M_NVME, M_ZERO | M_WAITOK);
308 	sc->s_ctrlr = ctrlr;
309 
310 	sc->s_sim = cam_sim_alloc(nvme_sim_action, nvme_sim_poll,
311 	    "nvme", sc, device_get_unit(ctrlr->dev),
312 	    NULL, max_trans, max_trans, devq);
313 	if (sc->s_sim == NULL) {
314 		printf("Failed to allocate a sim\n");
315 		cam_simq_free(devq);
316 		goto err1;
317 	}
318 	if (xpt_bus_register(sc->s_sim, ctrlr->dev, 0) != CAM_SUCCESS) {
319 		printf("Failed to create a bus\n");
320 		goto err2;
321 	}
322 	if (xpt_create_path(&sc->s_path, /*periph*/NULL, cam_sim_path(sc->s_sim),
323 	    CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
324 		printf("Failed to create a path\n");
325 		goto err3;
326 	}
327 
328 	return (sc);
329 
330 err3:
331 	xpt_bus_deregister(cam_sim_path(sc->s_sim));
332 err2:
333 	cam_sim_free(sc->s_sim, /*free_devq*/TRUE);
334 err1:
335 	free(sc, M_NVME);
336 	return (NULL);
337 }
338 
339 static void *
340 nvme_sim_ns_change(struct nvme_namespace *ns, void *sc_arg)
341 {
342 	struct nvme_sim_softc *sc = sc_arg;
343 	union ccb *ccb;
344 
345 	ccb = xpt_alloc_ccb_nowait();
346 	if (ccb == NULL) {
347 		printf("unable to alloc CCB for rescan\n");
348 		return (NULL);
349 	}
350 
351 	/*
352 	 * We map the NVMe namespace idea onto the CAM unit LUN. For
353 	 * each new namespace, we create a new CAM path for it. We then
354 	 * rescan the path to get it to enumerate.
355 	 */
356 	if (xpt_create_path(&ccb->ccb_h.path, /*periph*/NULL,
357 	    cam_sim_path(sc->s_sim), 0, ns->id) != CAM_REQ_CMP) {
358 		printf("unable to create path for rescan\n");
359 		xpt_free_ccb(ccb);
360 		return (NULL);
361 	}
362 	xpt_rescan(ccb);
363 
364 	return (sc_arg);
365 }
366 
367 static void
368 nvme_sim_controller_fail(void *ctrlr_arg)
369 {
370 	struct nvme_sim_softc *sc = ctrlr_arg;
371 
372 	xpt_async(AC_LOST_DEVICE, sc->s_path, NULL);
373 	xpt_free_path(sc->s_path);
374 	xpt_bus_deregister(cam_sim_path(sc->s_sim));
375 	cam_sim_free(sc->s_sim, /*free_devq*/TRUE);
376 	free(sc, M_NVME);
377 }
378 
379 struct nvme_consumer *consumer_cookie;
380 
381 static void
382 nvme_sim_init(void)
383 {
384 	if (nvme_use_nvd)
385 		return;
386 
387 	consumer_cookie = nvme_register_consumer(nvme_sim_ns_change,
388 	    nvme_sim_new_controller, NULL, nvme_sim_controller_fail);
389 }
390 
391 SYSINIT(nvme_sim_register, SI_SUB_DRIVERS, SI_ORDER_ANY,
392     nvme_sim_init, NULL);
393 
394 static void
395 nvme_sim_uninit(void)
396 {
397 	if (nvme_use_nvd)
398 		return;
399 	/* XXX Cleanup */
400 
401 	nvme_unregister_consumer(consumer_cookie);
402 }
403 
404 SYSUNINIT(nvme_sim_unregister, SI_SUB_DRIVERS, SI_ORDER_ANY,
405     nvme_sim_uninit, NULL);
406