xref: /freebsd/sys/cam/nvme/nvme_xpt.c (revision 22cf89c938886d14f5796fc49f9f020c23ea8eaf)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2015 Netflix, Inc.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  *
27  * derived from ata_xpt.c: Copyright (c) 2009 Alexander Motin <mav@FreeBSD.org>
28  */
29 
30 #include <sys/cdefs.h>
31 #include <sys/param.h>
32 #include <sys/bus.h>
33 #include <sys/endian.h>
34 #include <sys/systm.h>
35 #include <sys/types.h>
36 #include <sys/malloc.h>
37 #include <sys/kernel.h>
38 #include <sys/time.h>
39 #include <sys/conf.h>
40 #include <sys/fcntl.h>
41 #include <sys/sbuf.h>
42 
43 #include <sys/lock.h>
44 #include <sys/mutex.h>
45 #include <sys/sysctl.h>
46 
47 #include <cam/cam.h>
48 #include <cam/cam_ccb.h>
49 #include <cam/cam_queue.h>
50 #include <cam/cam_periph.h>
51 #include <cam/cam_sim.h>
52 #include <cam/cam_xpt.h>
53 #include <cam/cam_xpt_sim.h>
54 #include <cam/cam_xpt_periph.h>
55 #include <cam/cam_xpt_internal.h>
56 #include <cam/cam_debug.h>
57 
58 #include <cam/scsi/scsi_all.h>
59 #include <cam/scsi/scsi_message.h>
60 #include <cam/nvme/nvme_all.h>
61 #include <machine/stdarg.h>	/* for xpt_print below */
62 #include "opt_cam.h"
63 
64 struct nvme_quirk_entry {
65 	u_int quirks;
66 #define CAM_QUIRK_MAXTAGS 1
67 	u_int mintags;
68 	u_int maxtags;
69 };
70 
71 /* Not even sure why we need this */
72 static periph_init_t nvme_probe_periph_init;
73 
74 static struct periph_driver nvme_probe_driver =
75 {
76 	nvme_probe_periph_init, "nvme_probe",
77 	TAILQ_HEAD_INITIALIZER(nvme_probe_driver.units), /* generation */ 0,
78 	CAM_PERIPH_DRV_EARLY
79 };
80 
81 PERIPHDRIVER_DECLARE(nvme_probe, nvme_probe_driver);
82 
83 typedef enum {
84 	NVME_PROBE_IDENTIFY_CD,
85 	NVME_PROBE_IDENTIFY_NS,
86 	NVME_PROBE_DONE,
87 	NVME_PROBE_INVALID
88 } nvme_probe_action;
89 
90 static char *nvme_probe_action_text[] = {
91 	"NVME_PROBE_IDENTIFY_CD",
92 	"NVME_PROBE_IDENTIFY_NS",
93 	"NVME_PROBE_DONE",
94 	"NVME_PROBE_INVALID"
95 };
96 
97 #define NVME_PROBE_SET_ACTION(softc, newaction)	\
98 do {									\
99 	char **text;							\
100 	text = nvme_probe_action_text;					\
101 	CAM_DEBUG((softc)->periph->path, CAM_DEBUG_PROBE,		\
102 	    ("Probe %s to %s\n", text[(softc)->action],			\
103 	    text[(newaction)]));					\
104 	(softc)->action = (newaction);					\
105 } while(0)
106 
107 typedef enum {
108 	NVME_PROBE_NO_ANNOUNCE	= 0x04
109 } nvme_probe_flags;
110 
111 typedef struct {
112 	TAILQ_HEAD(, ccb_hdr) request_ccbs;
113 	union {
114 		struct nvme_controller_data	cd;
115 		struct nvme_namespace_data	ns;
116 	};
117 	nvme_probe_action	action;
118 	nvme_probe_flags	flags;
119 	int		restart;
120 	struct cam_periph *periph;
121 } nvme_probe_softc;
122 
123 static struct nvme_quirk_entry nvme_quirk_table[] =
124 {
125 	{
126 //		{
127 //		  T_ANY, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED,
128 //		  /*vendor*/"*", /*product*/"*", /*revision*/"*"
129 //		},
130 		.quirks = 0, .mintags = 0, .maxtags = 0
131 	},
132 };
133 
134 static const int nvme_quirk_table_size =
135 	sizeof(nvme_quirk_table) / sizeof(*nvme_quirk_table);
136 
137 static cam_status	nvme_probe_register(struct cam_periph *periph,
138 				      void *arg);
139 static void	 nvme_probe_schedule(struct cam_periph *nvme_probe_periph);
140 static void	 nvme_probe_start(struct cam_periph *periph, union ccb *start_ccb);
141 static void	 nvme_probe_done(struct cam_periph *periph, union ccb *done_ccb);
142 static void	 nvme_probe_cleanup(struct cam_periph *periph);
143 //static void	 nvme_find_quirk(struct cam_ed *device);
144 static void	 nvme_scan_lun(struct cam_periph *periph,
145 			       struct cam_path *path, cam_flags flags,
146 			       union ccb *ccb);
147 static struct cam_ed *
148 		 nvme_alloc_device(struct cam_eb *bus, struct cam_et *target,
149 				   lun_id_t lun_id);
150 static void	 nvme_device_transport(struct cam_path *path);
151 static void	 nvme_dev_async(uint32_t async_code,
152 				struct cam_eb *bus,
153 				struct cam_et *target,
154 				struct cam_ed *device,
155 				void *async_arg);
156 static void	 nvme_action(union ccb *start_ccb);
157 static void	 nvme_announce_periph_sbuf(struct cam_periph *periph,
158     struct sbuf *sb);
159 static void	 nvme_proto_announce_sbuf(struct cam_ed *device,
160     struct sbuf *sb);
161 static void	 nvme_proto_denounce_sbuf(struct cam_ed *device,
162     struct sbuf *sb);
163 static void	 nvme_proto_debug_out(union ccb *ccb);
164 
165 static struct xpt_xport_ops nvme_xport_ops = {
166 	.alloc_device = nvme_alloc_device,
167 	.action = nvme_action,
168 	.async = nvme_dev_async,
169 	.announce_sbuf = nvme_announce_periph_sbuf,
170 };
171 #define NVME_XPT_XPORT(x, X)			\
172 static struct xpt_xport nvme_xport_ ## x = {	\
173 	.xport = XPORT_ ## X,			\
174 	.name = #x,				\
175 	.ops = &nvme_xport_ops,			\
176 };						\
177 CAM_XPT_XPORT(nvme_xport_ ## x);
178 
179 NVME_XPT_XPORT(nvme, NVME);
180 
181 #undef NVME_XPT_XPORT
182 
183 static struct xpt_proto_ops nvme_proto_ops = {
184 	.announce_sbuf = nvme_proto_announce_sbuf,
185 	.denounce_sbuf = nvme_proto_denounce_sbuf,
186 	.debug_out = nvme_proto_debug_out,
187 };
188 static struct xpt_proto nvme_proto = {
189 	.proto = PROTO_NVME,
190 	.name = "nvme",
191 	.ops = &nvme_proto_ops,
192 };
193 CAM_XPT_PROTO(nvme_proto);
194 
195 static void
196 nvme_probe_periph_init(void)
197 {
198 }
199 
200 static cam_status
201 nvme_probe_register(struct cam_periph *periph, void *arg)
202 {
203 	union ccb *request_ccb;	/* CCB representing the probe request */
204 	nvme_probe_softc *softc;
205 
206 	request_ccb = (union ccb *)arg;
207 	if (request_ccb == NULL) {
208 		printf("nvme_probe_register: no probe CCB, "
209 		       "can't register device\n");
210 		return(CAM_REQ_CMP_ERR);
211 	}
212 
213 	softc = (nvme_probe_softc *)malloc(sizeof(*softc), M_CAMXPT, M_ZERO | M_NOWAIT);
214 
215 	if (softc == NULL) {
216 		printf("nvme_probe_register: Unable to probe new device. "
217 		       "Unable to allocate softc\n");
218 		return(CAM_REQ_CMP_ERR);
219 	}
220 	TAILQ_INIT(&softc->request_ccbs);
221 	TAILQ_INSERT_TAIL(&softc->request_ccbs, &request_ccb->ccb_h,
222 			  periph_links.tqe);
223 	softc->flags = 0;
224 	periph->softc = softc;
225 	softc->periph = periph;
226 	softc->action = NVME_PROBE_INVALID;
227 	if (cam_periph_acquire(periph) != 0)
228 		return (CAM_REQ_CMP_ERR);
229 
230 	CAM_DEBUG(periph->path, CAM_DEBUG_PROBE, ("Probe started\n"));
231 
232 //	nvme_device_transport(periph->path);
233 	nvme_probe_schedule(periph);
234 
235 	return(CAM_REQ_CMP);
236 }
237 
238 static void
239 nvme_probe_schedule(struct cam_periph *periph)
240 {
241 	union ccb *ccb;
242 	nvme_probe_softc *softc;
243 
244 	softc = (nvme_probe_softc *)periph->softc;
245 	ccb = (union ccb *)TAILQ_FIRST(&softc->request_ccbs);
246 
247 	NVME_PROBE_SET_ACTION(softc, NVME_PROBE_IDENTIFY_CD);
248 
249 	if (ccb->crcn.flags & CAM_EXPECT_INQ_CHANGE)
250 		softc->flags |= NVME_PROBE_NO_ANNOUNCE;
251 	else
252 		softc->flags &= ~NVME_PROBE_NO_ANNOUNCE;
253 
254 	xpt_schedule(periph, CAM_PRIORITY_XPT);
255 }
256 
257 static void
258 nvme_probe_start(struct cam_periph *periph, union ccb *start_ccb)
259 {
260 	struct ccb_nvmeio *nvmeio;
261 	nvme_probe_softc *softc;
262 	lun_id_t lun;
263 
264 	CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("nvme_probe_start\n"));
265 
266 	softc = (nvme_probe_softc *)periph->softc;
267 	nvmeio = &start_ccb->nvmeio;
268 	lun = xpt_path_lun_id(periph->path);
269 
270 	if (softc->restart) {
271 		softc->restart = 0;
272 		NVME_PROBE_SET_ACTION(softc, NVME_PROBE_IDENTIFY_CD);
273 	}
274 
275 	switch (softc->action) {
276 	case NVME_PROBE_IDENTIFY_CD:
277 		cam_fill_nvmeadmin(nvmeio,
278 		    0,			/* retries */
279 		    nvme_probe_done,	/* cbfcnp */
280 		    CAM_DIR_IN,		/* flags */
281 		    (uint8_t *)&softc->cd,	/* data_ptr */
282 		    sizeof(softc->cd),		/* dxfer_len */
283 		    30 * 1000); /* timeout 30s */
284 		nvme_ns_cmd(nvmeio, NVME_OPC_IDENTIFY, 0,
285 		    1, 0, 0, 0, 0, 0);
286 		break;
287 	case NVME_PROBE_IDENTIFY_NS:
288 		cam_fill_nvmeadmin(nvmeio,
289 		    0,			/* retries */
290 		    nvme_probe_done,	/* cbfcnp */
291 		    CAM_DIR_IN,		/* flags */
292 		    (uint8_t *)&softc->ns,	/* data_ptr */
293 		    sizeof(softc->ns),		/* dxfer_len */
294 		    30 * 1000); /* timeout 30s */
295 		nvme_ns_cmd(nvmeio, NVME_OPC_IDENTIFY, lun,
296 		    0, 0, 0, 0, 0, 0);
297 		break;
298 	default:
299 		panic("nvme_probe_start: invalid action state 0x%x\n", softc->action);
300 	}
301 	start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
302 	xpt_action(start_ccb);
303 }
304 
305 static void
306 nvme_probe_done(struct cam_periph *periph, union ccb *done_ccb)
307 {
308 	struct nvme_namespace_data *nvme_data;
309 	struct nvme_controller_data *nvme_cdata;
310 	nvme_probe_softc *softc;
311 	struct cam_path *path;
312 	struct scsi_vpd_device_id *did;
313 	struct scsi_vpd_id_descriptor *idd;
314 	uint32_t  priority;
315 	int found = 1, e, g, len;
316 
317 	CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("nvme_probe_done\n"));
318 
319 	softc = (nvme_probe_softc *)periph->softc;
320 	path = done_ccb->ccb_h.path;
321 	priority = done_ccb->ccb_h.pinfo.priority;
322 
323 	if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
324 		if (cam_periph_error(done_ccb,
325 			0, softc->restart ? (SF_NO_RECOVERY | SF_NO_RETRY) : 0
326 		    ) == ERESTART) {
327 out:
328 			/* Drop freeze taken due to CAM_DEV_QFREEZE flag set. */
329 			cam_release_devq(path, 0, 0, 0, FALSE);
330 			return;
331 		}
332 		if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
333 			/* Don't wedge the queue */
334 			xpt_release_devq(path, /*count*/1, /*run_queue*/TRUE);
335 		}
336 
337 		/*
338 		 * If we get to this point, we got an error status back
339 		 * from the inquiry and the error status doesn't require
340 		 * automatically retrying the command.  Therefore, the
341 		 * inquiry failed.  If we had inquiry information before
342 		 * for this device, but this latest inquiry command failed,
343 		 * the device has probably gone away.  If this device isn't
344 		 * already marked unconfigured, notify the peripheral
345 		 * drivers that this device is no more.
346 		 */
347 device_fail:	if ((path->device->flags & CAM_DEV_UNCONFIGURED) == 0)
348 			xpt_async(AC_LOST_DEVICE, path, NULL);
349 		NVME_PROBE_SET_ACTION(softc, NVME_PROBE_INVALID);
350 		found = 0;
351 		goto done;
352 	}
353 	if (softc->restart)
354 		goto done;
355 	switch (softc->action) {
356 	case NVME_PROBE_IDENTIFY_CD:
357 		nvme_controller_data_swapbytes(&softc->cd);
358 
359 		nvme_cdata = path->device->nvme_cdata;
360 		if (nvme_cdata == NULL) {
361 			nvme_cdata = malloc(sizeof(*nvme_cdata), M_CAMXPT,
362 			    M_NOWAIT);
363 			if (nvme_cdata == NULL) {
364 				xpt_print(path, "Can't allocate memory");
365 				goto device_fail;
366 			}
367 		}
368 		bcopy(&softc->cd, nvme_cdata, sizeof(*nvme_cdata));
369 		path->device->nvme_cdata = nvme_cdata;
370 
371 		/* Save/update serial number. */
372 		if (path->device->serial_num != NULL) {
373 			free(path->device->serial_num, M_CAMXPT);
374 			path->device->serial_num = NULL;
375 			path->device->serial_num_len = 0;
376 		}
377 		path->device->serial_num = (uint8_t *)
378 		    malloc(NVME_SERIAL_NUMBER_LENGTH + 1, M_CAMXPT, M_NOWAIT);
379 		if (path->device->serial_num != NULL) {
380 			cam_strvis_flag(path->device->serial_num,
381 			    nvme_cdata->sn, sizeof(nvme_cdata->sn),
382 			    NVME_SERIAL_NUMBER_LENGTH + 1,
383 			    CAM_STRVIS_FLAG_NONASCII_SPC);
384 
385 			path->device->serial_num_len =
386 			    strlen(path->device->serial_num);
387 		}
388 
389 //		nvme_find_quirk(path->device);
390 		nvme_device_transport(path);
391 		NVME_PROBE_SET_ACTION(softc, NVME_PROBE_IDENTIFY_NS);
392 		xpt_release_ccb(done_ccb);
393 		xpt_schedule(periph, priority);
394 		goto out;
395 	case NVME_PROBE_IDENTIFY_NS:
396 		nvme_namespace_data_swapbytes(&softc->ns);
397 
398 		/* Check that the namespace exists. */
399 		if (softc->ns.nsze == 0)
400 			goto device_fail;
401 
402 		nvme_data = path->device->nvme_data;
403 		if (nvme_data == NULL) {
404 			nvme_data = malloc(sizeof(*nvme_data), M_CAMXPT,
405 			    M_NOWAIT);
406 			if (nvme_data == NULL) {
407 				xpt_print(path, "Can't allocate memory");
408 				goto device_fail;
409 			}
410 		}
411 		bcopy(&softc->ns, nvme_data, sizeof(*nvme_data));
412 		path->device->nvme_data = nvme_data;
413 
414 		/* Save/update device_id based on NGUID and/or EUI64. */
415 		if (path->device->device_id != NULL) {
416 			free(path->device->device_id, M_CAMXPT);
417 			path->device->device_id = NULL;
418 			path->device->device_id_len = 0;
419 		}
420 		len = 0;
421 		for (g = 0; g < sizeof(nvme_data->nguid); g++) {
422 			if (nvme_data->nguid[g] != 0)
423 				break;
424 		}
425 		if (g < sizeof(nvme_data->nguid))
426 			len += sizeof(struct scsi_vpd_id_descriptor) + 16;
427 		for (e = 0; e < sizeof(nvme_data->eui64); e++) {
428 			if (nvme_data->eui64[e] != 0)
429 				break;
430 		}
431 		if (e < sizeof(nvme_data->eui64))
432 			len += sizeof(struct scsi_vpd_id_descriptor) + 8;
433 		if (len > 0) {
434 			path->device->device_id = (uint8_t *)
435 			    malloc(SVPD_DEVICE_ID_HDR_LEN + len,
436 			    M_CAMXPT, M_NOWAIT);
437 		}
438 		if (path->device->device_id != NULL) {
439 			did = (struct scsi_vpd_device_id *)path->device->device_id;
440 			did->device = SID_QUAL_LU_CONNECTED | T_DIRECT;
441 			did->page_code = SVPD_DEVICE_ID;
442 			scsi_ulto2b(len, did->length);
443 			idd = (struct scsi_vpd_id_descriptor *)(did + 1);
444 			if (g < sizeof(nvme_data->nguid)) {
445 				idd->proto_codeset = SVPD_ID_CODESET_BINARY;
446 				idd->id_type = SVPD_ID_ASSOC_LUN | SVPD_ID_TYPE_EUI64;
447 				idd->length = 16;
448 				bcopy(nvme_data->nguid, idd->identifier, 16);
449 				idd = (struct scsi_vpd_id_descriptor *)
450 				    &idd->identifier[16];
451 			}
452 			if (e < sizeof(nvme_data->eui64)) {
453 				idd->proto_codeset = SVPD_ID_CODESET_BINARY;
454 				idd->id_type = SVPD_ID_ASSOC_LUN | SVPD_ID_TYPE_EUI64;
455 				idd->length = 8;
456 				bcopy(nvme_data->eui64, idd->identifier, 8);
457 			}
458 			path->device->device_id_len = SVPD_DEVICE_ID_HDR_LEN + len;
459 		}
460 
461 		if (periph->path->device->flags & CAM_DEV_UNCONFIGURED) {
462 			path->device->flags &= ~CAM_DEV_UNCONFIGURED;
463 			xpt_acquire_device(path->device);
464 			done_ccb->ccb_h.func_code = XPT_GDEV_TYPE;
465 			xpt_action(done_ccb);
466 			xpt_async(AC_FOUND_DEVICE, path, done_ccb);
467 		}
468 		NVME_PROBE_SET_ACTION(softc, NVME_PROBE_DONE);
469 		break;
470 	default:
471 		panic("nvme_probe_done: invalid action state 0x%x\n", softc->action);
472 	}
473 done:
474 	if (softc->restart) {
475 		softc->restart = 0;
476 		xpt_release_ccb(done_ccb);
477 		nvme_probe_schedule(periph);
478 		goto out;
479 	}
480 	xpt_release_ccb(done_ccb);
481 	CAM_DEBUG(periph->path, CAM_DEBUG_PROBE, ("Probe completed\n"));
482 	while ((done_ccb = (union ccb *)TAILQ_FIRST(&softc->request_ccbs))) {
483 		TAILQ_REMOVE(&softc->request_ccbs,
484 		    &done_ccb->ccb_h, periph_links.tqe);
485 		done_ccb->ccb_h.status = found ? CAM_REQ_CMP : CAM_REQ_CMP_ERR;
486 		xpt_done(done_ccb);
487 	}
488 	/* Drop freeze taken due to CAM_DEV_QFREEZE flag set. */
489 	cam_release_devq(path, 0, 0, 0, FALSE);
490 	cam_periph_invalidate(periph);
491 	cam_periph_release_locked(periph);
492 }
493 
494 static void
495 nvme_probe_cleanup(struct cam_periph *periph)
496 {
497 
498 	free(periph->softc, M_CAMXPT);
499 }
500 
501 #if 0
502 /* XXX should be used, don't delete */
503 static void
504 nvme_find_quirk(struct cam_ed *device)
505 {
506 	struct nvme_quirk_entry *quirk;
507 	caddr_t	match;
508 
509 	match = cam_quirkmatch((caddr_t)&device->nvme_data,
510 			       (caddr_t)nvme_quirk_table,
511 			       nvme_quirk_table_size,
512 			       sizeof(*nvme_quirk_table), nvme_identify_match);
513 
514 	if (match == NULL)
515 		panic("xpt_find_quirk: device didn't match wildcard entry!!");
516 
517 	quirk = (struct nvme_quirk_entry *)match;
518 	device->quirk = quirk;
519 	if (quirk->quirks & CAM_QUIRK_MAXTAGS) {
520 		device->mintags = quirk->mintags;
521 		device->maxtags = quirk->maxtags;
522 	}
523 }
524 #endif
525 
526 static void
527 nvme_scan_lun(struct cam_periph *periph, struct cam_path *path,
528 	     cam_flags flags, union ccb *request_ccb)
529 {
530 	struct ccb_pathinq cpi;
531 	cam_status status;
532 	struct cam_periph *old_periph;
533 	int lock;
534 
535 	CAM_DEBUG(path, CAM_DEBUG_TRACE, ("nvme_scan_lun\n"));
536 
537 	xpt_path_inq(&cpi, path);
538 
539 	if (cpi.ccb_h.status != CAM_REQ_CMP) {
540 		if (request_ccb != NULL) {
541 			request_ccb->ccb_h.status = cpi.ccb_h.status;
542 			xpt_done(request_ccb);
543 		}
544 		return;
545 	}
546 
547 	if (xpt_path_lun_id(path) == CAM_LUN_WILDCARD) {
548 		CAM_DEBUG(path, CAM_DEBUG_TRACE, ("nvme_scan_lun ignoring bus\n"));
549 		request_ccb->ccb_h.status = CAM_REQ_CMP;	/* XXX signal error ? */
550 		xpt_done(request_ccb);
551 		return;
552 	}
553 
554 	lock = (xpt_path_owned(path) == 0);
555 	if (lock)
556 		xpt_path_lock(path);
557 	if ((old_periph = cam_periph_find(path, "nvme_probe")) != NULL) {
558 		if ((old_periph->flags & CAM_PERIPH_INVALID) == 0) {
559 			nvme_probe_softc *softc;
560 
561 			softc = (nvme_probe_softc *)old_periph->softc;
562 			TAILQ_INSERT_TAIL(&softc->request_ccbs,
563 				&request_ccb->ccb_h, periph_links.tqe);
564 			softc->restart = 1;
565 			CAM_DEBUG(path, CAM_DEBUG_TRACE,
566 			    ("restarting nvme_probe device\n"));
567 		} else {
568 			request_ccb->ccb_h.status = CAM_REQ_CMP_ERR;
569 			CAM_DEBUG(path, CAM_DEBUG_TRACE,
570 			    ("Failing to restart nvme_probe device\n"));
571 			xpt_done(request_ccb);
572 		}
573 	} else {
574 		CAM_DEBUG(path, CAM_DEBUG_TRACE,
575 		    ("Adding nvme_probe device\n"));
576 		status = cam_periph_alloc(nvme_probe_register, NULL, nvme_probe_cleanup,
577 					  nvme_probe_start, "nvme_probe",
578 					  CAM_PERIPH_BIO,
579 					  request_ccb->ccb_h.path, NULL, 0,
580 					  request_ccb);
581 
582 		if (status != CAM_REQ_CMP) {
583 			xpt_print(path, "xpt_scan_lun: cam_alloc_periph "
584 			    "returned an error, can't continue probe\n");
585 			request_ccb->ccb_h.status = status;
586 			xpt_done(request_ccb);
587 		}
588 	}
589 	if (lock)
590 		xpt_path_unlock(path);
591 }
592 
593 static struct cam_ed *
594 nvme_alloc_device(struct cam_eb *bus, struct cam_et *target, lun_id_t lun_id)
595 {
596 	struct nvme_quirk_entry *quirk;
597 	struct cam_ed *device;
598 
599 	device = xpt_alloc_device(bus, target, lun_id);
600 	if (device == NULL)
601 		return (NULL);
602 
603 	/*
604 	 * Take the default quirk entry until we have inquiry
605 	 * data from nvme and can determine a better quirk to use.
606 	 */
607 	quirk = &nvme_quirk_table[nvme_quirk_table_size - 1];
608 	device->quirk = (void *)quirk;
609 	device->mintags = 0;
610 	device->maxtags = 0;
611 	device->inq_flags = 0;
612 	device->queue_flags = 0;
613 	device->device_id = NULL;
614 	device->device_id_len = 0;
615 	device->serial_num = NULL;
616 	device->serial_num_len = 0;
617 	return (device);
618 }
619 
620 static void
621 nvme_device_transport(struct cam_path *path)
622 {
623 	struct ccb_pathinq cpi;
624 	struct ccb_trans_settings cts;
625 	/* XXX get data from nvme namespace and other info ??? */
626 
627 	/* Get transport information from the SIM */
628 	xpt_path_inq(&cpi, path);
629 
630 	path->device->transport = cpi.transport;
631 	path->device->transport_version = cpi.transport_version;
632 
633 	path->device->protocol = cpi.protocol;
634 	path->device->protocol_version = cpi.protocol_version;
635 
636 	/* Tell the controller what we think */
637 	memset(&cts, 0, sizeof(cts));
638 	xpt_setup_ccb(&cts.ccb_h, path, CAM_PRIORITY_NONE);
639 	cts.ccb_h.func_code = XPT_SET_TRAN_SETTINGS;
640 	cts.type = CTS_TYPE_CURRENT_SETTINGS;
641 	cts.transport = path->device->transport;
642 	cts.transport_version = path->device->transport_version;
643 	cts.protocol = path->device->protocol;
644 	cts.protocol_version = path->device->protocol_version;
645 	cts.proto_specific.valid = 0;
646 	cts.xport_specific.valid = 0;
647 	xpt_action((union ccb *)&cts);
648 }
649 
650 static void
651 nvme_dev_advinfo(union ccb *start_ccb)
652 {
653 	struct cam_ed *device;
654 	struct ccb_dev_advinfo *cdai;
655 	off_t amt;
656 
657 	xpt_path_assert(start_ccb->ccb_h.path, MA_OWNED);
658 	start_ccb->ccb_h.status = CAM_REQ_INVALID;
659 	device = start_ccb->ccb_h.path->device;
660 	cdai = &start_ccb->cdai;
661 	switch(cdai->buftype) {
662 	case CDAI_TYPE_SCSI_DEVID:
663 		if (cdai->flags & CDAI_FLAG_STORE)
664 			return;
665 		cdai->provsiz = device->device_id_len;
666 		if (device->device_id_len == 0)
667 			break;
668 		amt = device->device_id_len;
669 		if (cdai->provsiz > cdai->bufsiz)
670 			amt = cdai->bufsiz;
671 		memcpy(cdai->buf, device->device_id, amt);
672 		break;
673 	case CDAI_TYPE_SERIAL_NUM:
674 		if (cdai->flags & CDAI_FLAG_STORE)
675 			return;
676 		cdai->provsiz = device->serial_num_len;
677 		if (device->serial_num_len == 0)
678 			break;
679 		amt = device->serial_num_len;
680 		if (cdai->provsiz > cdai->bufsiz)
681 			amt = cdai->bufsiz;
682 		memcpy(cdai->buf, device->serial_num, amt);
683 		break;
684 	case CDAI_TYPE_PHYS_PATH:
685 		if (cdai->flags & CDAI_FLAG_STORE) {
686 			if (device->physpath != NULL) {
687 				free(device->physpath, M_CAMXPT);
688 				device->physpath = NULL;
689 				device->physpath_len = 0;
690 			}
691 			/* Clear existing buffer if zero length */
692 			if (cdai->bufsiz == 0)
693 				break;
694 			device->physpath = malloc(cdai->bufsiz, M_CAMXPT, M_NOWAIT);
695 			if (device->physpath == NULL) {
696 				start_ccb->ccb_h.status = CAM_REQ_ABORTED;
697 				return;
698 			}
699 			device->physpath_len = cdai->bufsiz;
700 			memcpy(device->physpath, cdai->buf, cdai->bufsiz);
701 		} else {
702 			cdai->provsiz = device->physpath_len;
703 			if (device->physpath_len == 0)
704 				break;
705 			amt = device->physpath_len;
706 			if (cdai->provsiz > cdai->bufsiz)
707 				amt = cdai->bufsiz;
708 			memcpy(cdai->buf, device->physpath, amt);
709 		}
710 		break;
711 	case CDAI_TYPE_NVME_CNTRL:
712 		if (cdai->flags & CDAI_FLAG_STORE)
713 			return;
714 		amt = sizeof(struct nvme_controller_data);
715 		cdai->provsiz = amt;
716 		if (amt > cdai->bufsiz)
717 			amt = cdai->bufsiz;
718 		memcpy(cdai->buf, device->nvme_cdata, amt);
719 		break;
720 	case CDAI_TYPE_NVME_NS:
721 		if (cdai->flags & CDAI_FLAG_STORE)
722 			return;
723 		amt = sizeof(struct nvme_namespace_data);
724 		cdai->provsiz = amt;
725 		if (amt > cdai->bufsiz)
726 			amt = cdai->bufsiz;
727 		memcpy(cdai->buf, device->nvme_data, amt);
728 		break;
729 	default:
730 		return;
731 	}
732 	start_ccb->ccb_h.status = CAM_REQ_CMP;
733 
734 	if (cdai->flags & CDAI_FLAG_STORE) {
735 		xpt_async(AC_ADVINFO_CHANGED, start_ccb->ccb_h.path,
736 			  (void *)(uintptr_t)cdai->buftype);
737 	}
738 }
739 
740 static void
741 nvme_action(union ccb *start_ccb)
742 {
743 	CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_TRACE,
744 	    ("nvme_action: func= %#x\n", start_ccb->ccb_h.func_code));
745 
746 	switch (start_ccb->ccb_h.func_code) {
747 	case XPT_SCAN_BUS:
748 	case XPT_SCAN_TGT:
749 	case XPT_SCAN_LUN:
750 		nvme_scan_lun(start_ccb->ccb_h.path->periph,
751 			      start_ccb->ccb_h.path, start_ccb->crcn.flags,
752 			      start_ccb);
753 		break;
754 	case XPT_DEV_ADVINFO:
755 		nvme_dev_advinfo(start_ccb);
756 		break;
757 
758 	default:
759 		xpt_action_default(start_ccb);
760 		break;
761 	}
762 }
763 
764 /*
765  * Handle any per-device event notifications that require action by the XPT.
766  */
767 static void
768 nvme_dev_async(uint32_t async_code, struct cam_eb *bus, struct cam_et *target,
769 	      struct cam_ed *device, void *async_arg)
770 {
771 
772 	/*
773 	 * We only need to handle events for real devices.
774 	 */
775 	if (target->target_id == CAM_TARGET_WILDCARD
776 	 || device->lun_id == CAM_LUN_WILDCARD)
777 		return;
778 
779 	if (async_code == AC_LOST_DEVICE &&
780 	    (device->flags & CAM_DEV_UNCONFIGURED) == 0) {
781 		device->flags |= CAM_DEV_UNCONFIGURED;
782 		xpt_release_device(device);
783 	}
784 }
785 
786 static void
787 nvme_announce_periph_sbuf(struct cam_periph *periph, struct sbuf *sb)
788 {
789 	struct	ccb_pathinq cpi;
790 	struct	ccb_trans_settings cts;
791 	struct	cam_path *path = periph->path;
792 	struct ccb_trans_settings_nvme	*nvmex;
793 
794 	cam_periph_assert(periph, MA_OWNED);
795 
796 	/* Ask the SIM for connection details */
797 	memset(&cts, 0, sizeof(cts));
798 	xpt_setup_ccb(&cts.ccb_h, path, CAM_PRIORITY_NORMAL);
799 	cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS;
800 	cts.type = CTS_TYPE_CURRENT_SETTINGS;
801 	xpt_action((union ccb*)&cts);
802 	if ((cts.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP)
803 		return;
804 
805 	/* Ask the SIM for its base transfer speed */
806 	xpt_path_inq(&cpi, periph->path);
807 	sbuf_printf(sb, "%s%d: nvme version %d.%d",
808 	    periph->periph_name, periph->unit_number,
809 	    NVME_MAJOR(cts.protocol_version),
810 	    NVME_MINOR(cts.protocol_version));
811 	if (cts.transport == XPORT_NVME) {
812 		nvmex = &cts.proto_specific.nvme;
813 		if (nvmex->valid & CTS_NVME_VALID_LINK)
814 			sbuf_printf(sb,
815 			    " x%d (max x%d) lanes PCIe Gen%d (max Gen%d) link",
816 			    nvmex->lanes, nvmex->max_lanes,
817 			    nvmex->speed, nvmex->max_speed);
818 	}
819 	sbuf_printf(sb, "\n");
820 }
821 
822 static void
823 nvme_proto_announce_sbuf(struct cam_ed *device, struct sbuf *sb)
824 {
825 	nvme_print_ident(device->nvme_cdata, device->nvme_data, sb);
826 }
827 
828 static void
829 nvme_proto_denounce_sbuf(struct cam_ed *device, struct sbuf *sb)
830 {
831 	nvme_print_ident_short(device->nvme_cdata, device->nvme_data, sb);
832 }
833 
834 static void
835 nvme_proto_debug_out(union ccb *ccb)
836 {
837 	char cdb_str[(sizeof(struct nvme_command) * 3) + 1];
838 
839 	if (ccb->ccb_h.func_code != XPT_NVME_IO &&
840 	    ccb->ccb_h.func_code != XPT_NVME_ADMIN)
841 		return;
842 
843 	CAM_DEBUG(ccb->ccb_h.path,
844 	    CAM_DEBUG_CDB,("%s. NCB: %s\n", nvme_op_string(&ccb->nvmeio.cmd,
845 		ccb->ccb_h.func_code == XPT_NVME_ADMIN),
846 		nvme_cmd_string(&ccb->nvmeio.cmd, cdb_str, sizeof(cdb_str))));
847 }
848