xref: /freebsd/sys/cam/nvme/nvme_xpt.c (revision 058ab969fd2f7a8d04240d1e9bc9d63918480226)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2015 Netflix, Inc.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  *
27  * derived from ata_xpt.c: Copyright (c) 2009 Alexander Motin <mav@FreeBSD.org>
28  */
29 
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
32 
33 #include <sys/param.h>
34 #include <sys/bus.h>
35 #include <sys/endian.h>
36 #include <sys/systm.h>
37 #include <sys/types.h>
38 #include <sys/malloc.h>
39 #include <sys/kernel.h>
40 #include <sys/time.h>
41 #include <sys/conf.h>
42 #include <sys/fcntl.h>
43 #include <sys/sbuf.h>
44 
45 #include <sys/lock.h>
46 #include <sys/mutex.h>
47 #include <sys/sysctl.h>
48 
49 #include <cam/cam.h>
50 #include <cam/cam_ccb.h>
51 #include <cam/cam_queue.h>
52 #include <cam/cam_periph.h>
53 #include <cam/cam_sim.h>
54 #include <cam/cam_xpt.h>
55 #include <cam/cam_xpt_sim.h>
56 #include <cam/cam_xpt_periph.h>
57 #include <cam/cam_xpt_internal.h>
58 #include <cam/cam_debug.h>
59 
60 #include <cam/scsi/scsi_all.h>
61 #include <cam/scsi/scsi_message.h>
62 #include <cam/nvme/nvme_all.h>
63 #include <machine/stdarg.h>	/* for xpt_print below */
64 #include "opt_cam.h"
65 
66 struct nvme_quirk_entry {
67 	u_int quirks;
68 #define CAM_QUIRK_MAXTAGS 1
69 	u_int mintags;
70 	u_int maxtags;
71 };
72 
73 /* Not even sure why we need this */
74 static periph_init_t nvme_probe_periph_init;
75 
76 static struct periph_driver nvme_probe_driver =
77 {
78 	nvme_probe_periph_init, "nvme_probe",
79 	TAILQ_HEAD_INITIALIZER(nvme_probe_driver.units), /* generation */ 0,
80 	CAM_PERIPH_DRV_EARLY
81 };
82 
83 PERIPHDRIVER_DECLARE(nvme_probe, nvme_probe_driver);
84 
85 typedef enum {
86 	NVME_PROBE_IDENTIFY_CD,
87 	NVME_PROBE_IDENTIFY_NS,
88 	NVME_PROBE_DONE,
89 	NVME_PROBE_INVALID
90 } nvme_probe_action;
91 
92 static char *nvme_probe_action_text[] = {
93 	"NVME_PROBE_IDENTIFY_CD",
94 	"NVME_PROBE_IDENTIFY_NS",
95 	"NVME_PROBE_DONE",
96 	"NVME_PROBE_INVALID"
97 };
98 
99 #define NVME_PROBE_SET_ACTION(softc, newaction)	\
100 do {									\
101 	char **text;							\
102 	text = nvme_probe_action_text;					\
103 	CAM_DEBUG((softc)->periph->path, CAM_DEBUG_PROBE,		\
104 	    ("Probe %s to %s\n", text[(softc)->action],			\
105 	    text[(newaction)]));					\
106 	(softc)->action = (newaction);					\
107 } while(0)
108 
109 typedef enum {
110 	NVME_PROBE_NO_ANNOUNCE	= 0x04
111 } nvme_probe_flags;
112 
113 typedef struct {
114 	TAILQ_HEAD(, ccb_hdr) request_ccbs;
115 	union {
116 		struct nvme_controller_data	cd;
117 		struct nvme_namespace_data	ns;
118 	};
119 	nvme_probe_action	action;
120 	nvme_probe_flags	flags;
121 	int		restart;
122 	struct cam_periph *periph;
123 } nvme_probe_softc;
124 
125 static struct nvme_quirk_entry nvme_quirk_table[] =
126 {
127 	{
128 //		{
129 //		  T_ANY, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED,
130 //		  /*vendor*/"*", /*product*/"*", /*revision*/"*"
131 //		},
132 		.quirks = 0, .mintags = 0, .maxtags = 0
133 	},
134 };
135 
136 static const int nvme_quirk_table_size =
137 	sizeof(nvme_quirk_table) / sizeof(*nvme_quirk_table);
138 
139 static cam_status	nvme_probe_register(struct cam_periph *periph,
140 				      void *arg);
141 static void	 nvme_probe_schedule(struct cam_periph *nvme_probe_periph);
142 static void	 nvme_probe_start(struct cam_periph *periph, union ccb *start_ccb);
143 static void	 nvme_probe_done(struct cam_periph *periph, union ccb *done_ccb);
144 static void	 nvme_probe_cleanup(struct cam_periph *periph);
145 //static void	 nvme_find_quirk(struct cam_ed *device);
146 static void	 nvme_scan_lun(struct cam_periph *periph,
147 			       struct cam_path *path, cam_flags flags,
148 			       union ccb *ccb);
149 static struct cam_ed *
150 		 nvme_alloc_device(struct cam_eb *bus, struct cam_et *target,
151 				   lun_id_t lun_id);
152 static void	 nvme_device_transport(struct cam_path *path);
153 static void	 nvme_dev_async(uint32_t async_code,
154 				struct cam_eb *bus,
155 				struct cam_et *target,
156 				struct cam_ed *device,
157 				void *async_arg);
158 static void	 nvme_action(union ccb *start_ccb);
159 static void	 nvme_announce_periph_sbuf(struct cam_periph *periph,
160     struct sbuf *sb);
161 static void	 nvme_proto_announce_sbuf(struct cam_ed *device,
162     struct sbuf *sb);
163 static void	 nvme_proto_denounce_sbuf(struct cam_ed *device,
164     struct sbuf *sb);
165 static void	 nvme_proto_debug_out(union ccb *ccb);
166 
167 static struct xpt_xport_ops nvme_xport_ops = {
168 	.alloc_device = nvme_alloc_device,
169 	.action = nvme_action,
170 	.async = nvme_dev_async,
171 	.announce_sbuf = nvme_announce_periph_sbuf,
172 };
173 #define NVME_XPT_XPORT(x, X)			\
174 static struct xpt_xport nvme_xport_ ## x = {	\
175 	.xport = XPORT_ ## X,			\
176 	.name = #x,				\
177 	.ops = &nvme_xport_ops,			\
178 };						\
179 CAM_XPT_XPORT(nvme_xport_ ## x);
180 
181 NVME_XPT_XPORT(nvme, NVME);
182 
183 #undef NVME_XPT_XPORT
184 
185 static struct xpt_proto_ops nvme_proto_ops = {
186 	.announce_sbuf = nvme_proto_announce_sbuf,
187 	.denounce_sbuf = nvme_proto_denounce_sbuf,
188 	.debug_out = nvme_proto_debug_out,
189 };
190 static struct xpt_proto nvme_proto = {
191 	.proto = PROTO_NVME,
192 	.name = "nvme",
193 	.ops = &nvme_proto_ops,
194 };
195 CAM_XPT_PROTO(nvme_proto);
196 
197 static void
198 nvme_probe_periph_init(void)
199 {
200 }
201 
202 static cam_status
203 nvme_probe_register(struct cam_periph *periph, void *arg)
204 {
205 	union ccb *request_ccb;	/* CCB representing the probe request */
206 	nvme_probe_softc *softc;
207 
208 	request_ccb = (union ccb *)arg;
209 	if (request_ccb == NULL) {
210 		printf("nvme_probe_register: no probe CCB, "
211 		       "can't register device\n");
212 		return(CAM_REQ_CMP_ERR);
213 	}
214 
215 	softc = (nvme_probe_softc *)malloc(sizeof(*softc), M_CAMXPT, M_ZERO | M_NOWAIT);
216 
217 	if (softc == NULL) {
218 		printf("nvme_probe_register: Unable to probe new device. "
219 		       "Unable to allocate softc\n");
220 		return(CAM_REQ_CMP_ERR);
221 	}
222 	TAILQ_INIT(&softc->request_ccbs);
223 	TAILQ_INSERT_TAIL(&softc->request_ccbs, &request_ccb->ccb_h,
224 			  periph_links.tqe);
225 	softc->flags = 0;
226 	periph->softc = softc;
227 	softc->periph = periph;
228 	softc->action = NVME_PROBE_INVALID;
229 	if (cam_periph_acquire(periph) != 0)
230 		return (CAM_REQ_CMP_ERR);
231 
232 	CAM_DEBUG(periph->path, CAM_DEBUG_PROBE, ("Probe started\n"));
233 
234 //	nvme_device_transport(periph->path);
235 	nvme_probe_schedule(periph);
236 
237 	return(CAM_REQ_CMP);
238 }
239 
240 static void
241 nvme_probe_schedule(struct cam_periph *periph)
242 {
243 	union ccb *ccb;
244 	nvme_probe_softc *softc;
245 
246 	softc = (nvme_probe_softc *)periph->softc;
247 	ccb = (union ccb *)TAILQ_FIRST(&softc->request_ccbs);
248 
249 	NVME_PROBE_SET_ACTION(softc, NVME_PROBE_IDENTIFY_CD);
250 
251 	if (ccb->crcn.flags & CAM_EXPECT_INQ_CHANGE)
252 		softc->flags |= NVME_PROBE_NO_ANNOUNCE;
253 	else
254 		softc->flags &= ~NVME_PROBE_NO_ANNOUNCE;
255 
256 	xpt_schedule(periph, CAM_PRIORITY_XPT);
257 }
258 
259 static void
260 nvme_probe_start(struct cam_periph *periph, union ccb *start_ccb)
261 {
262 	struct ccb_nvmeio *nvmeio;
263 	nvme_probe_softc *softc;
264 	lun_id_t lun;
265 
266 	CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("nvme_probe_start\n"));
267 
268 	softc = (nvme_probe_softc *)periph->softc;
269 	nvmeio = &start_ccb->nvmeio;
270 	lun = xpt_path_lun_id(periph->path);
271 
272 	if (softc->restart) {
273 		softc->restart = 0;
274 		NVME_PROBE_SET_ACTION(softc, NVME_PROBE_IDENTIFY_CD);
275 	}
276 
277 	switch (softc->action) {
278 	case NVME_PROBE_IDENTIFY_CD:
279 		cam_fill_nvmeadmin(nvmeio,
280 		    0,			/* retries */
281 		    nvme_probe_done,	/* cbfcnp */
282 		    CAM_DIR_IN,		/* flags */
283 		    (uint8_t *)&softc->cd,	/* data_ptr */
284 		    sizeof(softc->cd),		/* dxfer_len */
285 		    30 * 1000); /* timeout 30s */
286 		nvme_ns_cmd(nvmeio, NVME_OPC_IDENTIFY, 0,
287 		    1, 0, 0, 0, 0, 0);
288 		break;
289 	case NVME_PROBE_IDENTIFY_NS:
290 		cam_fill_nvmeadmin(nvmeio,
291 		    0,			/* retries */
292 		    nvme_probe_done,	/* cbfcnp */
293 		    CAM_DIR_IN,		/* flags */
294 		    (uint8_t *)&softc->ns,	/* data_ptr */
295 		    sizeof(softc->ns),		/* dxfer_len */
296 		    30 * 1000); /* timeout 30s */
297 		nvme_ns_cmd(nvmeio, NVME_OPC_IDENTIFY, lun,
298 		    0, 0, 0, 0, 0, 0);
299 		break;
300 	default:
301 		panic("nvme_probe_start: invalid action state 0x%x\n", softc->action);
302 	}
303 	start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
304 	xpt_action(start_ccb);
305 }
306 
307 static void
308 nvme_probe_done(struct cam_periph *periph, union ccb *done_ccb)
309 {
310 	struct nvme_namespace_data *nvme_data;
311 	struct nvme_controller_data *nvme_cdata;
312 	nvme_probe_softc *softc;
313 	struct cam_path *path;
314 	struct scsi_vpd_device_id *did;
315 	struct scsi_vpd_id_descriptor *idd;
316 	uint32_t  priority;
317 	int found = 1, e, g, len;
318 
319 	CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("nvme_probe_done\n"));
320 
321 	softc = (nvme_probe_softc *)periph->softc;
322 	path = done_ccb->ccb_h.path;
323 	priority = done_ccb->ccb_h.pinfo.priority;
324 
325 	if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
326 		if (cam_periph_error(done_ccb,
327 			0, softc->restart ? (SF_NO_RECOVERY | SF_NO_RETRY) : 0
328 		    ) == ERESTART) {
329 out:
330 			/* Drop freeze taken due to CAM_DEV_QFREEZE flag set. */
331 			cam_release_devq(path, 0, 0, 0, FALSE);
332 			return;
333 		}
334 		if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
335 			/* Don't wedge the queue */
336 			xpt_release_devq(path, /*count*/1, /*run_queue*/TRUE);
337 		}
338 
339 		/*
340 		 * If we get to this point, we got an error status back
341 		 * from the inquiry and the error status doesn't require
342 		 * automatically retrying the command.  Therefore, the
343 		 * inquiry failed.  If we had inquiry information before
344 		 * for this device, but this latest inquiry command failed,
345 		 * the device has probably gone away.  If this device isn't
346 		 * already marked unconfigured, notify the peripheral
347 		 * drivers that this device is no more.
348 		 */
349 device_fail:	if ((path->device->flags & CAM_DEV_UNCONFIGURED) == 0)
350 			xpt_async(AC_LOST_DEVICE, path, NULL);
351 		NVME_PROBE_SET_ACTION(softc, NVME_PROBE_INVALID);
352 		found = 0;
353 		goto done;
354 	}
355 	if (softc->restart)
356 		goto done;
357 	switch (softc->action) {
358 	case NVME_PROBE_IDENTIFY_CD:
359 		nvme_controller_data_swapbytes(&softc->cd);
360 
361 		nvme_cdata = path->device->nvme_cdata;
362 		if (nvme_cdata == NULL) {
363 			nvme_cdata = malloc(sizeof(*nvme_cdata), M_CAMXPT,
364 			    M_NOWAIT);
365 			if (nvme_cdata == NULL) {
366 				xpt_print(path, "Can't allocate memory");
367 				goto device_fail;
368 			}
369 		}
370 		bcopy(&softc->cd, nvme_cdata, sizeof(*nvme_cdata));
371 		path->device->nvme_cdata = nvme_cdata;
372 
373 		/* Save/update serial number. */
374 		if (path->device->serial_num != NULL) {
375 			free(path->device->serial_num, M_CAMXPT);
376 			path->device->serial_num = NULL;
377 			path->device->serial_num_len = 0;
378 		}
379 		path->device->serial_num = (uint8_t *)
380 		    malloc(NVME_SERIAL_NUMBER_LENGTH + 1, M_CAMXPT, M_NOWAIT);
381 		if (path->device->serial_num != NULL) {
382 			cam_strvis_flag(path->device->serial_num,
383 			    nvme_cdata->sn, sizeof(nvme_cdata->sn),
384 			    NVME_SERIAL_NUMBER_LENGTH + 1,
385 			    CAM_STRVIS_FLAG_NONASCII_SPC);
386 
387 			path->device->serial_num_len =
388 			    strlen(path->device->serial_num);
389 		}
390 
391 //		nvme_find_quirk(path->device);
392 		nvme_device_transport(path);
393 		NVME_PROBE_SET_ACTION(softc, NVME_PROBE_IDENTIFY_NS);
394 		xpt_release_ccb(done_ccb);
395 		xpt_schedule(periph, priority);
396 		goto out;
397 	case NVME_PROBE_IDENTIFY_NS:
398 		nvme_namespace_data_swapbytes(&softc->ns);
399 
400 		/* Check that the namespace exists. */
401 		if (softc->ns.nsze == 0)
402 			goto device_fail;
403 
404 		nvme_data = path->device->nvme_data;
405 		if (nvme_data == NULL) {
406 			nvme_data = malloc(sizeof(*nvme_data), M_CAMXPT,
407 			    M_NOWAIT);
408 			if (nvme_data == NULL) {
409 				xpt_print(path, "Can't allocate memory");
410 				goto device_fail;
411 			}
412 		}
413 		bcopy(&softc->ns, nvme_data, sizeof(*nvme_data));
414 		path->device->nvme_data = nvme_data;
415 
416 		/* Save/update device_id based on NGUID and/or EUI64. */
417 		if (path->device->device_id != NULL) {
418 			free(path->device->device_id, M_CAMXPT);
419 			path->device->device_id = NULL;
420 			path->device->device_id_len = 0;
421 		}
422 		len = 0;
423 		for (g = 0; g < sizeof(nvme_data->nguid); g++) {
424 			if (nvme_data->nguid[g] != 0)
425 				break;
426 		}
427 		if (g < sizeof(nvme_data->nguid))
428 			len += sizeof(struct scsi_vpd_id_descriptor) + 16;
429 		for (e = 0; e < sizeof(nvme_data->eui64); e++) {
430 			if (nvme_data->eui64[e] != 0)
431 				break;
432 		}
433 		if (e < sizeof(nvme_data->eui64))
434 			len += sizeof(struct scsi_vpd_id_descriptor) + 8;
435 		if (len > 0) {
436 			path->device->device_id = (uint8_t *)
437 			    malloc(SVPD_DEVICE_ID_HDR_LEN + len,
438 			    M_CAMXPT, M_NOWAIT);
439 		}
440 		if (path->device->device_id != NULL) {
441 			did = (struct scsi_vpd_device_id *)path->device->device_id;
442 			did->device = SID_QUAL_LU_CONNECTED | T_DIRECT;
443 			did->page_code = SVPD_DEVICE_ID;
444 			scsi_ulto2b(len, did->length);
445 			idd = (struct scsi_vpd_id_descriptor *)(did + 1);
446 			if (g < sizeof(nvme_data->nguid)) {
447 				idd->proto_codeset = SVPD_ID_CODESET_BINARY;
448 				idd->id_type = SVPD_ID_ASSOC_LUN | SVPD_ID_TYPE_EUI64;
449 				idd->length = 16;
450 				bcopy(nvme_data->nguid, idd->identifier, 16);
451 				idd = (struct scsi_vpd_id_descriptor *)
452 				    &idd->identifier[16];
453 			}
454 			if (e < sizeof(nvme_data->eui64)) {
455 				idd->proto_codeset = SVPD_ID_CODESET_BINARY;
456 				idd->id_type = SVPD_ID_ASSOC_LUN | SVPD_ID_TYPE_EUI64;
457 				idd->length = 8;
458 				bcopy(nvme_data->eui64, idd->identifier, 8);
459 			}
460 			path->device->device_id_len = SVPD_DEVICE_ID_HDR_LEN + len;
461 		}
462 
463 		if (periph->path->device->flags & CAM_DEV_UNCONFIGURED) {
464 			path->device->flags &= ~CAM_DEV_UNCONFIGURED;
465 			xpt_acquire_device(path->device);
466 			done_ccb->ccb_h.func_code = XPT_GDEV_TYPE;
467 			xpt_action(done_ccb);
468 			xpt_async(AC_FOUND_DEVICE, path, done_ccb);
469 		}
470 		NVME_PROBE_SET_ACTION(softc, NVME_PROBE_DONE);
471 		break;
472 	default:
473 		panic("nvme_probe_done: invalid action state 0x%x\n", softc->action);
474 	}
475 done:
476 	if (softc->restart) {
477 		softc->restart = 0;
478 		xpt_release_ccb(done_ccb);
479 		nvme_probe_schedule(periph);
480 		goto out;
481 	}
482 	xpt_release_ccb(done_ccb);
483 	CAM_DEBUG(periph->path, CAM_DEBUG_PROBE, ("Probe completed\n"));
484 	while ((done_ccb = (union ccb *)TAILQ_FIRST(&softc->request_ccbs))) {
485 		TAILQ_REMOVE(&softc->request_ccbs,
486 		    &done_ccb->ccb_h, periph_links.tqe);
487 		done_ccb->ccb_h.status = found ? CAM_REQ_CMP : CAM_REQ_CMP_ERR;
488 		xpt_done(done_ccb);
489 	}
490 	/* Drop freeze taken due to CAM_DEV_QFREEZE flag set. */
491 	cam_release_devq(path, 0, 0, 0, FALSE);
492 	cam_periph_invalidate(periph);
493 	cam_periph_release_locked(periph);
494 }
495 
496 static void
497 nvme_probe_cleanup(struct cam_periph *periph)
498 {
499 
500 	free(periph->softc, M_CAMXPT);
501 }
502 
503 #if 0
504 /* XXX should be used, don't delete */
505 static void
506 nvme_find_quirk(struct cam_ed *device)
507 {
508 	struct nvme_quirk_entry *quirk;
509 	caddr_t	match;
510 
511 	match = cam_quirkmatch((caddr_t)&device->nvme_data,
512 			       (caddr_t)nvme_quirk_table,
513 			       nvme_quirk_table_size,
514 			       sizeof(*nvme_quirk_table), nvme_identify_match);
515 
516 	if (match == NULL)
517 		panic("xpt_find_quirk: device didn't match wildcard entry!!");
518 
519 	quirk = (struct nvme_quirk_entry *)match;
520 	device->quirk = quirk;
521 	if (quirk->quirks & CAM_QUIRK_MAXTAGS) {
522 		device->mintags = quirk->mintags;
523 		device->maxtags = quirk->maxtags;
524 	}
525 }
526 #endif
527 
528 static void
529 nvme_scan_lun(struct cam_periph *periph, struct cam_path *path,
530 	     cam_flags flags, union ccb *request_ccb)
531 {
532 	struct ccb_pathinq cpi;
533 	cam_status status;
534 	struct cam_periph *old_periph;
535 	int lock;
536 
537 	CAM_DEBUG(path, CAM_DEBUG_TRACE, ("nvme_scan_lun\n"));
538 
539 	xpt_path_inq(&cpi, path);
540 
541 	if (cpi.ccb_h.status != CAM_REQ_CMP) {
542 		if (request_ccb != NULL) {
543 			request_ccb->ccb_h.status = cpi.ccb_h.status;
544 			xpt_done(request_ccb);
545 		}
546 		return;
547 	}
548 
549 	if (xpt_path_lun_id(path) == CAM_LUN_WILDCARD) {
550 		CAM_DEBUG(path, CAM_DEBUG_TRACE, ("nvme_scan_lun ignoring bus\n"));
551 		request_ccb->ccb_h.status = CAM_REQ_CMP;	/* XXX signal error ? */
552 		xpt_done(request_ccb);
553 		return;
554 	}
555 
556 	lock = (xpt_path_owned(path) == 0);
557 	if (lock)
558 		xpt_path_lock(path);
559 	if ((old_periph = cam_periph_find(path, "nvme_probe")) != NULL) {
560 		if ((old_periph->flags & CAM_PERIPH_INVALID) == 0) {
561 			nvme_probe_softc *softc;
562 
563 			softc = (nvme_probe_softc *)old_periph->softc;
564 			TAILQ_INSERT_TAIL(&softc->request_ccbs,
565 				&request_ccb->ccb_h, periph_links.tqe);
566 			softc->restart = 1;
567 			CAM_DEBUG(path, CAM_DEBUG_TRACE,
568 			    ("restarting nvme_probe device\n"));
569 		} else {
570 			request_ccb->ccb_h.status = CAM_REQ_CMP_ERR;
571 			CAM_DEBUG(path, CAM_DEBUG_TRACE,
572 			    ("Failing to restart nvme_probe device\n"));
573 			xpt_done(request_ccb);
574 		}
575 	} else {
576 		CAM_DEBUG(path, CAM_DEBUG_TRACE,
577 		    ("Adding nvme_probe device\n"));
578 		status = cam_periph_alloc(nvme_probe_register, NULL, nvme_probe_cleanup,
579 					  nvme_probe_start, "nvme_probe",
580 					  CAM_PERIPH_BIO,
581 					  request_ccb->ccb_h.path, NULL, 0,
582 					  request_ccb);
583 
584 		if (status != CAM_REQ_CMP) {
585 			xpt_print(path, "xpt_scan_lun: cam_alloc_periph "
586 			    "returned an error, can't continue probe\n");
587 			request_ccb->ccb_h.status = status;
588 			xpt_done(request_ccb);
589 		}
590 	}
591 	if (lock)
592 		xpt_path_unlock(path);
593 }
594 
595 static struct cam_ed *
596 nvme_alloc_device(struct cam_eb *bus, struct cam_et *target, lun_id_t lun_id)
597 {
598 	struct nvme_quirk_entry *quirk;
599 	struct cam_ed *device;
600 
601 	device = xpt_alloc_device(bus, target, lun_id);
602 	if (device == NULL)
603 		return (NULL);
604 
605 	/*
606 	 * Take the default quirk entry until we have inquiry
607 	 * data from nvme and can determine a better quirk to use.
608 	 */
609 	quirk = &nvme_quirk_table[nvme_quirk_table_size - 1];
610 	device->quirk = (void *)quirk;
611 	device->mintags = 0;
612 	device->maxtags = 0;
613 	device->inq_flags = 0;
614 	device->queue_flags = 0;
615 	device->device_id = NULL;
616 	device->device_id_len = 0;
617 	device->serial_num = NULL;
618 	device->serial_num_len = 0;
619 	return (device);
620 }
621 
622 static void
623 nvme_device_transport(struct cam_path *path)
624 {
625 	struct ccb_pathinq cpi;
626 	struct ccb_trans_settings cts;
627 	/* XXX get data from nvme namespace and other info ??? */
628 
629 	/* Get transport information from the SIM */
630 	xpt_path_inq(&cpi, path);
631 
632 	path->device->transport = cpi.transport;
633 	path->device->transport_version = cpi.transport_version;
634 
635 	path->device->protocol = cpi.protocol;
636 	path->device->protocol_version = cpi.protocol_version;
637 
638 	/* Tell the controller what we think */
639 	memset(&cts, 0, sizeof(cts));
640 	xpt_setup_ccb(&cts.ccb_h, path, CAM_PRIORITY_NONE);
641 	cts.ccb_h.func_code = XPT_SET_TRAN_SETTINGS;
642 	cts.type = CTS_TYPE_CURRENT_SETTINGS;
643 	cts.transport = path->device->transport;
644 	cts.transport_version = path->device->transport_version;
645 	cts.protocol = path->device->protocol;
646 	cts.protocol_version = path->device->protocol_version;
647 	cts.proto_specific.valid = 0;
648 	cts.xport_specific.valid = 0;
649 	xpt_action((union ccb *)&cts);
650 }
651 
652 static void
653 nvme_dev_advinfo(union ccb *start_ccb)
654 {
655 	struct cam_ed *device;
656 	struct ccb_dev_advinfo *cdai;
657 	off_t amt;
658 
659 	xpt_path_assert(start_ccb->ccb_h.path, MA_OWNED);
660 	start_ccb->ccb_h.status = CAM_REQ_INVALID;
661 	device = start_ccb->ccb_h.path->device;
662 	cdai = &start_ccb->cdai;
663 	switch(cdai->buftype) {
664 	case CDAI_TYPE_SCSI_DEVID:
665 		if (cdai->flags & CDAI_FLAG_STORE)
666 			return;
667 		cdai->provsiz = device->device_id_len;
668 		if (device->device_id_len == 0)
669 			break;
670 		amt = device->device_id_len;
671 		if (cdai->provsiz > cdai->bufsiz)
672 			amt = cdai->bufsiz;
673 		memcpy(cdai->buf, device->device_id, amt);
674 		break;
675 	case CDAI_TYPE_SERIAL_NUM:
676 		if (cdai->flags & CDAI_FLAG_STORE)
677 			return;
678 		cdai->provsiz = device->serial_num_len;
679 		if (device->serial_num_len == 0)
680 			break;
681 		amt = device->serial_num_len;
682 		if (cdai->provsiz > cdai->bufsiz)
683 			amt = cdai->bufsiz;
684 		memcpy(cdai->buf, device->serial_num, amt);
685 		break;
686 	case CDAI_TYPE_PHYS_PATH:
687 		if (cdai->flags & CDAI_FLAG_STORE) {
688 			if (device->physpath != NULL) {
689 				free(device->physpath, M_CAMXPT);
690 				device->physpath = NULL;
691 				device->physpath_len = 0;
692 			}
693 			/* Clear existing buffer if zero length */
694 			if (cdai->bufsiz == 0)
695 				break;
696 			device->physpath = malloc(cdai->bufsiz, M_CAMXPT, M_NOWAIT);
697 			if (device->physpath == NULL) {
698 				start_ccb->ccb_h.status = CAM_REQ_ABORTED;
699 				return;
700 			}
701 			device->physpath_len = cdai->bufsiz;
702 			memcpy(device->physpath, cdai->buf, cdai->bufsiz);
703 		} else {
704 			cdai->provsiz = device->physpath_len;
705 			if (device->physpath_len == 0)
706 				break;
707 			amt = device->physpath_len;
708 			if (cdai->provsiz > cdai->bufsiz)
709 				amt = cdai->bufsiz;
710 			memcpy(cdai->buf, device->physpath, amt);
711 		}
712 		break;
713 	case CDAI_TYPE_NVME_CNTRL:
714 		if (cdai->flags & CDAI_FLAG_STORE)
715 			return;
716 		amt = sizeof(struct nvme_controller_data);
717 		cdai->provsiz = amt;
718 		if (amt > cdai->bufsiz)
719 			amt = cdai->bufsiz;
720 		memcpy(cdai->buf, device->nvme_cdata, amt);
721 		break;
722 	case CDAI_TYPE_NVME_NS:
723 		if (cdai->flags & CDAI_FLAG_STORE)
724 			return;
725 		amt = sizeof(struct nvme_namespace_data);
726 		cdai->provsiz = amt;
727 		if (amt > cdai->bufsiz)
728 			amt = cdai->bufsiz;
729 		memcpy(cdai->buf, device->nvme_data, amt);
730 		break;
731 	default:
732 		return;
733 	}
734 	start_ccb->ccb_h.status = CAM_REQ_CMP;
735 
736 	if (cdai->flags & CDAI_FLAG_STORE) {
737 		xpt_async(AC_ADVINFO_CHANGED, start_ccb->ccb_h.path,
738 			  (void *)(uintptr_t)cdai->buftype);
739 	}
740 }
741 
742 static void
743 nvme_action(union ccb *start_ccb)
744 {
745 	CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_TRACE,
746 	    ("nvme_action: func= %#x\n", start_ccb->ccb_h.func_code));
747 
748 	switch (start_ccb->ccb_h.func_code) {
749 	case XPT_SCAN_BUS:
750 	case XPT_SCAN_TGT:
751 	case XPT_SCAN_LUN:
752 		nvme_scan_lun(start_ccb->ccb_h.path->periph,
753 			      start_ccb->ccb_h.path, start_ccb->crcn.flags,
754 			      start_ccb);
755 		break;
756 	case XPT_DEV_ADVINFO:
757 		nvme_dev_advinfo(start_ccb);
758 		break;
759 
760 	default:
761 		xpt_action_default(start_ccb);
762 		break;
763 	}
764 }
765 
766 /*
767  * Handle any per-device event notifications that require action by the XPT.
768  */
769 static void
770 nvme_dev_async(uint32_t async_code, struct cam_eb *bus, struct cam_et *target,
771 	      struct cam_ed *device, void *async_arg)
772 {
773 
774 	/*
775 	 * We only need to handle events for real devices.
776 	 */
777 	if (target->target_id == CAM_TARGET_WILDCARD
778 	 || device->lun_id == CAM_LUN_WILDCARD)
779 		return;
780 
781 	if (async_code == AC_LOST_DEVICE &&
782 	    (device->flags & CAM_DEV_UNCONFIGURED) == 0) {
783 		device->flags |= CAM_DEV_UNCONFIGURED;
784 		xpt_release_device(device);
785 	}
786 }
787 
788 static void
789 nvme_announce_periph_sbuf(struct cam_periph *periph, struct sbuf *sb)
790 {
791 	struct	ccb_pathinq cpi;
792 	struct	ccb_trans_settings cts;
793 	struct	cam_path *path = periph->path;
794 	struct ccb_trans_settings_nvme	*nvmex;
795 
796 	cam_periph_assert(periph, MA_OWNED);
797 
798 	/* Ask the SIM for connection details */
799 	memset(&cts, 0, sizeof(cts));
800 	xpt_setup_ccb(&cts.ccb_h, path, CAM_PRIORITY_NORMAL);
801 	cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS;
802 	cts.type = CTS_TYPE_CURRENT_SETTINGS;
803 	xpt_action((union ccb*)&cts);
804 	if ((cts.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP)
805 		return;
806 
807 	/* Ask the SIM for its base transfer speed */
808 	xpt_path_inq(&cpi, periph->path);
809 	sbuf_printf(sb, "%s%d: nvme version %d.%d",
810 	    periph->periph_name, periph->unit_number,
811 	    NVME_MAJOR(cts.protocol_version),
812 	    NVME_MINOR(cts.protocol_version));
813 	if (cts.transport == XPORT_NVME) {
814 		nvmex = &cts.proto_specific.nvme;
815 		if (nvmex->valid & CTS_NVME_VALID_LINK)
816 			sbuf_printf(sb,
817 			    " x%d (max x%d) lanes PCIe Gen%d (max Gen%d) link",
818 			    nvmex->lanes, nvmex->max_lanes,
819 			    nvmex->speed, nvmex->max_speed);
820 	}
821 	sbuf_printf(sb, "\n");
822 }
823 
824 static void
825 nvme_proto_announce_sbuf(struct cam_ed *device, struct sbuf *sb)
826 {
827 	nvme_print_ident(device->nvme_cdata, device->nvme_data, sb);
828 }
829 
830 static void
831 nvme_proto_denounce_sbuf(struct cam_ed *device, struct sbuf *sb)
832 {
833 	nvme_print_ident_short(device->nvme_cdata, device->nvme_data, sb);
834 }
835 
836 static void
837 nvme_proto_debug_out(union ccb *ccb)
838 {
839 	char cdb_str[(sizeof(struct nvme_command) * 3) + 1];
840 
841 	if (ccb->ccb_h.func_code != XPT_NVME_IO &&
842 	    ccb->ccb_h.func_code != XPT_NVME_ADMIN)
843 		return;
844 
845 	CAM_DEBUG(ccb->ccb_h.path,
846 	    CAM_DEBUG_CDB,("%s. NCB: %s\n", nvme_op_string(&ccb->nvmeio.cmd,
847 		ccb->ccb_h.func_code == XPT_NVME_ADMIN),
848 		nvme_cmd_string(&ccb->nvmeio.cmd, cdb_str, sizeof(cdb_str))));
849 }
850