xref: /freebsd/sys/dev/nvme/nvme_sim.c (revision 53bb5613a8a15363718b6e6de8d965bf9a2c5469)
1 /*-
2  * Copyright (c) 2016 Netflix, Inc.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer,
9  *    without modification, immediately at the beginning of the file.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
15  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
16  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
17  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
18  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
19  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
20  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
21  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
23  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24  */
25 
26 #include <sys/param.h>
27 #include <sys/systm.h>
28 #include <sys/buf.h>
29 #include <sys/bus.h>
30 #include <sys/conf.h>
31 #include <sys/ioccom.h>
32 #include <sys/malloc.h>
33 #include <sys/proc.h>
34 #include <sys/smp.h>
35 
36 #include <cam/cam.h>
37 #include <cam/cam_ccb.h>
38 #include <cam/cam_sim.h>
39 #include <cam/cam_xpt_sim.h>
40 #include <cam/cam_debug.h>
41 
42 #include <dev/pci/pcivar.h>
43 #include <dev/pci/pcireg.h>
44 
45 #include "nvme_private.h"
46 
47 #define ccb_accb_ptr spriv_ptr0
48 #define ccb_ctrlr_ptr spriv_ptr1
49 static void	nvme_sim_action(struct cam_sim *sim, union ccb *ccb);
50 static void	nvme_sim_poll(struct cam_sim *sim);
51 
52 #define sim2softc(sim)	((struct nvme_sim_softc *)cam_sim_softc(sim))
53 #define sim2ctrlr(sim)	(sim2softc(sim)->s_ctrlr)
54 
55 struct nvme_sim_softc
56 {
57 	struct nvme_controller	*s_ctrlr;
58 	struct cam_sim		*s_sim;
59 	struct cam_path		*s_path;
60 };
61 
62 static void
63 nvme_sim_nvmeio_done(void *ccb_arg, const struct nvme_completion *cpl)
64 {
65 	union ccb *ccb = (union ccb *)ccb_arg;
66 
67 	/*
68 	 * Let the periph know the completion, and let it sort out what
69 	 * it means. Report an error or success based on SC and SCT.
70 	 * We do not try to fetch additional data from the error log,
71 	 * though maybe we should in the future.
72 	 */
73 	memcpy(&ccb->nvmeio.cpl, cpl, sizeof(*cpl));
74 	ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
75 	if (nvme_completion_is_error(cpl)) {
76 		ccb->ccb_h.status = CAM_NVME_STATUS_ERROR;
77 		xpt_done(ccb);
78 	} else {
79 		ccb->ccb_h.status = CAM_REQ_CMP;
80 		xpt_done_direct(ccb);
81 	}
82 }
83 
84 static void
85 nvme_sim_nvmeio(struct cam_sim *sim, union ccb *ccb)
86 {
87 	struct ccb_nvmeio	*nvmeio = &ccb->nvmeio;
88 	struct nvme_request	*req;
89 	void			*payload;
90 	uint32_t		size;
91 	struct nvme_controller *ctrlr;
92 
93 	ctrlr = sim2ctrlr(sim);
94 	payload = nvmeio->data_ptr;
95 	size = nvmeio->dxfer_len;
96 	/* SG LIST ??? */
97 	if ((nvmeio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_BIO)
98 		req = nvme_allocate_request_bio((struct bio *)payload,
99 		    nvme_sim_nvmeio_done, ccb);
100 	else if ((nvmeio->ccb_h.flags & CAM_DATA_SG) == CAM_DATA_SG)
101 		req = nvme_allocate_request_ccb(ccb, nvme_sim_nvmeio_done, ccb);
102 	else if (payload == NULL)
103 		req = nvme_allocate_request_null(nvme_sim_nvmeio_done, ccb);
104 	else
105 		req = nvme_allocate_request_vaddr(payload, size,
106 		    nvme_sim_nvmeio_done, ccb);
107 
108 	if (req == NULL) {
109 		nvmeio->ccb_h.status = CAM_RESRC_UNAVAIL;
110 		xpt_done(ccb);
111 		return;
112 	}
113 	ccb->ccb_h.status |= CAM_SIM_QUEUED;
114 
115 	memcpy(&req->cmd, &ccb->nvmeio.cmd, sizeof(ccb->nvmeio.cmd));
116 
117 	if (ccb->ccb_h.func_code == XPT_NVME_IO)
118 		nvme_ctrlr_submit_io_request(ctrlr, req);
119 	else
120 		nvme_ctrlr_submit_admin_request(ctrlr, req);
121 }
122 
123 static uint32_t
124 nvme_link_kBps(struct nvme_controller *ctrlr)
125 {
126 	uint32_t speed, lanes, link[] = { 1, 250000, 500000, 985000, 1970000 };
127 	uint32_t status;
128 
129 	status = pcie_read_config(ctrlr->dev, PCIER_LINK_STA, 2);
130 	speed = status & PCIEM_LINK_STA_SPEED;
131 	lanes = (status & PCIEM_LINK_STA_WIDTH) >> 4;
132 	/*
133 	 * Failsafe on link speed indicator. If it is insane report the number of
134 	 * lanes as the speed. Not 100% accurate, but may be diagnostic.
135 	 */
136 	if (speed >= nitems(link))
137 		speed = 0;
138 	return link[speed] * lanes;
139 }
140 
141 static void
142 nvme_sim_action(struct cam_sim *sim, union ccb *ccb)
143 {
144 	struct nvme_controller *ctrlr;
145 
146 	CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE,
147 	    ("nvme_sim_action: func= %#x\n",
148 		ccb->ccb_h.func_code));
149 
150 	ctrlr = sim2ctrlr(sim);
151 
152 	switch (ccb->ccb_h.func_code) {
153 	case XPT_CALC_GEOMETRY:		/* Calculate Geometry Totally nuts ? XXX */
154 		/*
155 		 * Only meaningful for old-school SCSI disks since only the SCSI
156 		 * da driver generates them. Reject all these that slip through.
157 		 */
158 		/*FALLTHROUGH*/
159 	case XPT_ABORT:			/* Abort the specified CCB */
160 		ccb->ccb_h.status = CAM_REQ_INVALID;
161 		break;
162 	case XPT_SET_TRAN_SETTINGS:
163 		/*
164 		 * NVMe doesn't really have different transfer settings, but
165 		 * other parts of CAM think failure here is a big deal.
166 		 */
167 		ccb->ccb_h.status = CAM_REQ_CMP;
168 		break;
169 	case XPT_PATH_INQ:		/* Path routing inquiry */
170 	{
171 		struct ccb_pathinq	*cpi = &ccb->cpi;
172 		device_t		dev = ctrlr->dev;
173 
174 		/*
175 		 * For devices that are reported as children of the AHCI
176 		 * controller, which has no access to the config space for this
177 		 * controller, report the AHCI controller's data.
178 		 */
179 		if (ctrlr->quirks & QUIRK_AHCI)
180 			dev = device_get_parent(dev);
181 		cpi->version_num = 1;
182 		cpi->hba_inquiry = 0;
183 		cpi->target_sprt = 0;
184 		cpi->hba_misc =  PIM_UNMAPPED | PIM_NOSCAN;
185 		cpi->hba_eng_cnt = 0;
186 		cpi->max_target = 0;
187 		cpi->max_lun = ctrlr->cdata.nn;
188 		cpi->maxio = ctrlr->max_xfer_size;
189 		cpi->initiator_id = 0;
190 		cpi->bus_id = cam_sim_bus(sim);
191 		cpi->base_transfer_speed = nvme_link_kBps(ctrlr);
192 		strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
193 		strlcpy(cpi->hba_vid, "NVMe", HBA_IDLEN);
194 		strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
195 		cpi->unit_number = cam_sim_unit(sim);
196 		cpi->transport = XPORT_NVME;		/* XXX XPORT_PCIE ? */
197 		cpi->transport_version = nvme_mmio_read_4(ctrlr, vs);
198 		cpi->protocol = PROTO_NVME;
199 		cpi->protocol_version = nvme_mmio_read_4(ctrlr, vs);
200 		cpi->xport_specific.nvme.nsid = xpt_path_lun_id(ccb->ccb_h.path);
201 		cpi->xport_specific.nvme.domain = pci_get_domain(dev);
202 		cpi->xport_specific.nvme.bus = pci_get_bus(dev);
203 		cpi->xport_specific.nvme.slot = pci_get_slot(dev);
204 		cpi->xport_specific.nvme.function = pci_get_function(dev);
205 		cpi->xport_specific.nvme.extra = 0;
206 		strlcpy(cpi->xport_specific.nvme.dev_name, device_get_nameunit(dev),
207 		    sizeof(cpi->xport_specific.nvme.dev_name));
208 		cpi->hba_vendor = pci_get_vendor(dev);
209 		cpi->hba_device = pci_get_device(dev);
210 		cpi->hba_subvendor = pci_get_subvendor(dev);
211 		cpi->hba_subdevice = pci_get_subdevice(dev);
212 		cpi->ccb_h.status = CAM_REQ_CMP;
213 		break;
214 	}
215 	case XPT_GET_TRAN_SETTINGS:	/* Get transport settings */
216 	{
217 		struct ccb_trans_settings	*cts;
218 		struct ccb_trans_settings_nvme	*nvmep;
219 		struct ccb_trans_settings_nvme	*nvmex;
220 		device_t dev;
221 		uint32_t status, caps, flags;
222 
223 		dev = ctrlr->dev;
224 		cts = &ccb->cts;
225 		nvmex = &cts->xport_specific.nvme;
226 		nvmep = &cts->proto_specific.nvme;
227 
228 		nvmex->spec = nvme_mmio_read_4(ctrlr, vs);
229 		nvmex->valid = CTS_NVME_VALID_SPEC;
230 		if ((ctrlr->quirks & QUIRK_AHCI) == 0) {
231 			/* AHCI redirect makes it impossible to query */
232 			status = pcie_read_config(dev, PCIER_LINK_STA, 2);
233 			caps = pcie_read_config(dev, PCIER_LINK_CAP, 2);
234 			flags = pcie_read_config(dev, PCIER_FLAGS, 2);
235 			if ((flags & PCIEM_FLAGS_TYPE) == PCIEM_TYPE_ENDPOINT) {
236 				nvmex->valid |= CTS_NVME_VALID_LINK;
237 				nvmex->speed = status & PCIEM_LINK_STA_SPEED;
238 				nvmex->lanes = (status & PCIEM_LINK_STA_WIDTH) >> 4;
239 				nvmex->max_speed = caps & PCIEM_LINK_CAP_MAX_SPEED;
240 				nvmex->max_lanes = (caps & PCIEM_LINK_CAP_MAX_WIDTH) >> 4;
241 			}
242 		}
243 
244 		/* XXX these should be something else maybe ? */
245 		nvmep->valid = CTS_NVME_VALID_SPEC;
246 		nvmep->spec = nvmex->spec;
247 
248 		cts->transport = XPORT_NVME;
249 		cts->transport_version = nvmex->spec;
250 		cts->protocol = PROTO_NVME;
251 		cts->protocol_version = nvmex->spec;
252 		cts->ccb_h.status = CAM_REQ_CMP;
253 		break;
254 	}
255 	case XPT_TERM_IO:		/* Terminate the I/O process */
256 		/*
257 		 * every driver handles this, but nothing generates it. Assume
258 		 * it's OK to just say 'that worked'.
259 		 */
260 		/*FALLTHROUGH*/
261 	case XPT_RESET_DEV:		/* Bus Device Reset the specified device */
262 	case XPT_RESET_BUS:		/* Reset the specified bus */
263 		/*
264 		 * NVMe doesn't really support physically resetting the bus. It's part
265 		 * of the bus scanning dance, so return sucess to tell the process to
266 		 * proceed.
267 		 */
268 		ccb->ccb_h.status = CAM_REQ_CMP;
269 		break;
270 	case XPT_NVME_IO:		/* Execute the requested I/O operation */
271 		if (ctrlr->is_failed) {
272 			/*
273 			 * I/O came in while we were failing the drive, so drop
274 			 * it. Once falure is complete, we'll be destroyed.
275 			 */
276 			ccb->ccb_h.status = CAM_DEV_NOT_THERE;
277 			break;
278 		}
279 		nvme_sim_nvmeio(sim, ccb);
280 		return;			/* no done */
281 	case XPT_NVME_ADMIN:		/* or Admin operation */
282 		if (ctrlr->is_failed_admin) {
283 			/*
284 			 * Admin request came in when we can't send admin
285 			 * commands, so drop it. Once falure is complete, we'll
286 			 * be destroyed.
287 			 */
288 			ccb->ccb_h.status = CAM_DEV_NOT_THERE;
289 			break;
290 		}
291 		nvme_sim_nvmeio(sim, ccb);
292 		return;			/* no done */
293 	default:
294 		ccb->ccb_h.status = CAM_REQ_INVALID;
295 		break;
296 	}
297 	xpt_done(ccb);
298 }
299 
300 static void
301 nvme_sim_poll(struct cam_sim *sim)
302 {
303 
304 	nvme_ctrlr_poll(sim2ctrlr(sim));
305 }
306 
307 static void *
308 nvme_sim_new_controller(struct nvme_controller *ctrlr)
309 {
310 	struct nvme_sim_softc *sc;
311 	struct cam_devq *devq;
312 	int max_trans;
313 
314 	max_trans = ctrlr->max_hw_pend_io;
315 	devq = cam_simq_alloc(max_trans);
316 	if (devq == NULL)
317 		return (NULL);
318 
319 	sc = malloc(sizeof(*sc), M_NVME, M_ZERO | M_WAITOK);
320 	sc->s_ctrlr = ctrlr;
321 
322 	sc->s_sim = cam_sim_alloc(nvme_sim_action, nvme_sim_poll,
323 	    "nvme", sc, device_get_unit(ctrlr->dev),
324 	    NULL, max_trans, max_trans, devq);
325 	if (sc->s_sim == NULL) {
326 		printf("Failed to allocate a sim\n");
327 		cam_simq_free(devq);
328 		goto err1;
329 	}
330 	if (xpt_bus_register(sc->s_sim, ctrlr->dev, 0) != CAM_SUCCESS) {
331 		printf("Failed to create a bus\n");
332 		goto err2;
333 	}
334 	if (xpt_create_path(&sc->s_path, /*periph*/NULL, cam_sim_path(sc->s_sim),
335 	    CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
336 		printf("Failed to create a path\n");
337 		goto err3;
338 	}
339 
340 	return (sc);
341 
342 err3:
343 	xpt_bus_deregister(cam_sim_path(sc->s_sim));
344 err2:
345 	cam_sim_free(sc->s_sim, /*free_devq*/TRUE);
346 err1:
347 	free(sc, M_NVME);
348 	return (NULL);
349 }
350 
351 static void *
352 nvme_sim_ns_change(struct nvme_namespace *ns, void *sc_arg)
353 {
354 	struct nvme_sim_softc *sc = sc_arg;
355 	union ccb *ccb;
356 
357 	ccb = xpt_alloc_ccb_nowait();
358 	if (ccb == NULL) {
359 		printf("unable to alloc CCB for rescan\n");
360 		return (NULL);
361 	}
362 
363 	/*
364 	 * We map the NVMe namespace idea onto the CAM unit LUN. For
365 	 * each new namespace, we create a new CAM path for it. We then
366 	 * rescan the path to get it to enumerate.
367 	 */
368 	if (xpt_create_path(&ccb->ccb_h.path, /*periph*/NULL,
369 	    cam_sim_path(sc->s_sim), 0, ns->id) != CAM_REQ_CMP) {
370 		printf("unable to create path for rescan\n");
371 		xpt_free_ccb(ccb);
372 		return (NULL);
373 	}
374 	xpt_rescan(ccb);
375 
376 	return (sc_arg);
377 }
378 
379 static void
380 nvme_sim_controller_fail(void *ctrlr_arg)
381 {
382 	struct nvme_sim_softc *sc = ctrlr_arg;
383 
384 	xpt_async(AC_LOST_DEVICE, sc->s_path, NULL);
385 	xpt_free_path(sc->s_path);
386 	xpt_bus_deregister(cam_sim_path(sc->s_sim));
387 	cam_sim_free(sc->s_sim, /*free_devq*/TRUE);
388 	free(sc, M_NVME);
389 }
390 
391 struct nvme_consumer *consumer_cookie;
392 
393 static void
394 nvme_sim_init(void)
395 {
396 	if (nvme_use_nvd)
397 		return;
398 
399 	consumer_cookie = nvme_register_consumer(nvme_sim_ns_change,
400 	    nvme_sim_new_controller, NULL, nvme_sim_controller_fail);
401 }
402 
403 SYSINIT(nvme_sim_register, SI_SUB_DRIVERS, SI_ORDER_ANY,
404     nvme_sim_init, NULL);
405 
406 static void
407 nvme_sim_uninit(void)
408 {
409 	if (nvme_use_nvd)
410 		return;
411 	/* XXX Cleanup */
412 
413 	nvme_unregister_consumer(consumer_cookie);
414 }
415 
416 SYSUNINIT(nvme_sim_unregister, SI_SUB_DRIVERS, SI_ORDER_ANY,
417     nvme_sim_uninit, NULL);
418