xref: /freebsd/sys/cam/cam_periph.c (revision ba3c1f5972d7b90feb6e6da47905ff2757e0fe57)
1 /*-
2  * Common functions for CAM "type" (peripheral) drivers.
3  *
4  * SPDX-License-Identifier: BSD-2-Clause
5  *
6  * Copyright (c) 1997, 1998 Justin T. Gibbs.
7  * Copyright (c) 1997, 1998, 1999, 2000 Kenneth D. Merry.
8  * All rights reserved.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions, and the following disclaimer,
15  *    without modification, immediately at the beginning of the file.
16  * 2. The name of the author may not be used to endorse or promote products
17  *    derived from this software without specific prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
23  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  */
31 
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
34 
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/types.h>
38 #include <sys/malloc.h>
39 #include <sys/kernel.h>
40 #include <sys/bio.h>
41 #include <sys/conf.h>
42 #include <sys/devctl.h>
43 #include <sys/lock.h>
44 #include <sys/mutex.h>
45 #include <sys/buf.h>
46 #include <sys/proc.h>
47 #include <sys/devicestat.h>
48 #include <sys/sbuf.h>
49 #include <sys/sysctl.h>
50 #include <vm/vm.h>
51 #include <vm/vm_extern.h>
52 
53 #include <cam/cam.h>
54 #include <cam/cam_ccb.h>
55 #include <cam/cam_compat.h>
56 #include <cam/cam_queue.h>
57 #include <cam/cam_xpt_periph.h>
58 #include <cam/cam_xpt_internal.h>
59 #include <cam/cam_periph.h>
60 #include <cam/cam_debug.h>
61 #include <cam/cam_sim.h>
62 
63 #include <cam/scsi/scsi_all.h>
64 #include <cam/scsi/scsi_message.h>
65 #include <cam/scsi/scsi_pass.h>
66 
67 static	u_int		camperiphnextunit(struct periph_driver *p_drv,
68 					  u_int newunit, bool wired,
69 					  path_id_t pathid, target_id_t target,
70 					  lun_id_t lun);
71 static	u_int		camperiphunit(struct periph_driver *p_drv,
72 				      path_id_t pathid, target_id_t target,
73 				      lun_id_t lun,
74 				      const char *sn);
75 static	void		camperiphdone(struct cam_periph *periph,
76 					union ccb *done_ccb);
77 static  void		camperiphfree(struct cam_periph *periph);
78 static int		camperiphscsistatuserror(union ccb *ccb,
79 					        union ccb **orig_ccb,
80 						 cam_flags camflags,
81 						 uint32_t sense_flags,
82 						 int *openings,
83 						 uint32_t *relsim_flags,
84 						 uint32_t *timeout,
85 						 uint32_t  *action,
86 						 const char **action_string);
87 static	int		camperiphscsisenseerror(union ccb *ccb,
88 					        union ccb **orig_ccb,
89 					        cam_flags camflags,
90 					        uint32_t sense_flags,
91 					        int *openings,
92 					        uint32_t *relsim_flags,
93 					        uint32_t *timeout,
94 					        uint32_t *action,
95 					        const char **action_string);
96 static void		cam_periph_devctl_notify(union ccb *ccb);
97 
98 static int nperiph_drivers;
99 static int initialized = 0;
100 struct periph_driver **periph_drivers;
101 
102 static MALLOC_DEFINE(M_CAMPERIPH, "CAM periph", "CAM peripheral buffers");
103 
104 static int periph_selto_delay = 1000;
105 TUNABLE_INT("kern.cam.periph_selto_delay", &periph_selto_delay);
106 static int periph_noresrc_delay = 500;
107 TUNABLE_INT("kern.cam.periph_noresrc_delay", &periph_noresrc_delay);
108 static int periph_busy_delay = 500;
109 TUNABLE_INT("kern.cam.periph_busy_delay", &periph_busy_delay);
110 
111 static u_int periph_mapmem_thresh = 65536;
112 SYSCTL_UINT(_kern_cam, OID_AUTO, mapmem_thresh, CTLFLAG_RWTUN,
113     &periph_mapmem_thresh, 0, "Threshold for user-space buffer mapping");
114 
115 void
116 periphdriver_register(void *data)
117 {
118 	struct periph_driver *drv = (struct periph_driver *)data;
119 	struct periph_driver **newdrivers, **old;
120 	int ndrivers;
121 
122 again:
123 	ndrivers = nperiph_drivers + 2;
124 	newdrivers = malloc(sizeof(*newdrivers) * ndrivers, M_CAMPERIPH,
125 			    M_WAITOK);
126 	xpt_lock_buses();
127 	if (ndrivers != nperiph_drivers + 2) {
128 		/*
129 		 * Lost race against itself; go around.
130 		 */
131 		xpt_unlock_buses();
132 		free(newdrivers, M_CAMPERIPH);
133 		goto again;
134 	}
135 	if (periph_drivers)
136 		bcopy(periph_drivers, newdrivers,
137 		      sizeof(*newdrivers) * nperiph_drivers);
138 	newdrivers[nperiph_drivers] = drv;
139 	newdrivers[nperiph_drivers + 1] = NULL;
140 	old = periph_drivers;
141 	periph_drivers = newdrivers;
142 	nperiph_drivers++;
143 	xpt_unlock_buses();
144 	if (old)
145 		free(old, M_CAMPERIPH);
146 	/* If driver marked as early or it is late now, initialize it. */
147 	if (((drv->flags & CAM_PERIPH_DRV_EARLY) != 0 && initialized > 0) ||
148 	    initialized > 1)
149 		(*drv->init)();
150 }
151 
152 int
153 periphdriver_unregister(void *data)
154 {
155 	struct periph_driver *drv = (struct periph_driver *)data;
156 	int error, n;
157 
158 	/* If driver marked as early or it is late now, deinitialize it. */
159 	if (((drv->flags & CAM_PERIPH_DRV_EARLY) != 0 && initialized > 0) ||
160 	    initialized > 1) {
161 		if (drv->deinit == NULL) {
162 			printf("CAM periph driver '%s' doesn't have deinit.\n",
163 			    drv->driver_name);
164 			return (EOPNOTSUPP);
165 		}
166 		error = drv->deinit();
167 		if (error != 0)
168 			return (error);
169 	}
170 
171 	xpt_lock_buses();
172 	for (n = 0; n < nperiph_drivers && periph_drivers[n] != drv; n++)
173 		;
174 	KASSERT(n < nperiph_drivers,
175 	    ("Periph driver '%s' was not registered", drv->driver_name));
176 	for (; n + 1 < nperiph_drivers; n++)
177 		periph_drivers[n] = periph_drivers[n + 1];
178 	periph_drivers[n + 1] = NULL;
179 	nperiph_drivers--;
180 	xpt_unlock_buses();
181 	return (0);
182 }
183 
184 void
185 periphdriver_init(int level)
186 {
187 	int	i, early;
188 
189 	initialized = max(initialized, level);
190 	for (i = 0; periph_drivers[i] != NULL; i++) {
191 		early = (periph_drivers[i]->flags & CAM_PERIPH_DRV_EARLY) ? 1 : 2;
192 		if (early == initialized)
193 			(*periph_drivers[i]->init)();
194 	}
195 }
196 
197 cam_status
198 cam_periph_alloc(periph_ctor_t *periph_ctor,
199 		 periph_oninv_t *periph_oninvalidate,
200 		 periph_dtor_t *periph_dtor, periph_start_t *periph_start,
201 		 char *name, cam_periph_type type, struct cam_path *path,
202 		 ac_callback_t *ac_callback, ac_code code, void *arg)
203 {
204 	struct		periph_driver **p_drv;
205 	struct		cam_sim *sim;
206 	struct		cam_periph *periph;
207 	struct		cam_periph *cur_periph;
208 	path_id_t	path_id;
209 	target_id_t	target_id;
210 	lun_id_t	lun_id;
211 	cam_status	status;
212 	u_int		init_level;
213 
214 	init_level = 0;
215 	/*
216 	 * Handle Hot-Plug scenarios.  If there is already a peripheral
217 	 * of our type assigned to this path, we are likely waiting for
218 	 * final close on an old, invalidated, peripheral.  If this is
219 	 * the case, queue up a deferred call to the peripheral's async
220 	 * handler.  If it looks like a mistaken re-allocation, complain.
221 	 */
222 	if ((periph = cam_periph_find(path, name)) != NULL) {
223 		if ((periph->flags & CAM_PERIPH_INVALID) != 0
224 		 && (periph->flags & CAM_PERIPH_NEW_DEV_FOUND) == 0) {
225 			periph->flags |= CAM_PERIPH_NEW_DEV_FOUND;
226 			periph->deferred_callback = ac_callback;
227 			periph->deferred_ac = code;
228 			return (CAM_REQ_INPROG);
229 		} else {
230 			printf("cam_periph_alloc: attempt to re-allocate "
231 			       "valid device %s%d rejected flags %#x "
232 			       "refcount %d\n", periph->periph_name,
233 			       periph->unit_number, periph->flags,
234 			       periph->refcount);
235 		}
236 		return (CAM_REQ_INVALID);
237 	}
238 
239 	periph = (struct cam_periph *)malloc(sizeof(*periph), M_CAMPERIPH,
240 					     M_NOWAIT|M_ZERO);
241 
242 	if (periph == NULL)
243 		return (CAM_RESRC_UNAVAIL);
244 
245 	init_level++;
246 
247 	sim = xpt_path_sim(path);
248 	path_id = xpt_path_path_id(path);
249 	target_id = xpt_path_target_id(path);
250 	lun_id = xpt_path_lun_id(path);
251 	periph->periph_start = periph_start;
252 	periph->periph_dtor = periph_dtor;
253 	periph->periph_oninval = periph_oninvalidate;
254 	periph->type = type;
255 	periph->periph_name = name;
256 	periph->scheduled_priority = CAM_PRIORITY_NONE;
257 	periph->immediate_priority = CAM_PRIORITY_NONE;
258 	periph->refcount = 1;		/* Dropped by invalidation. */
259 	periph->sim = sim;
260 	SLIST_INIT(&periph->ccb_list);
261 	status = xpt_create_path(&path, periph, path_id, target_id, lun_id);
262 	if (status != CAM_REQ_CMP)
263 		goto failure;
264 	periph->path = path;
265 
266 	xpt_lock_buses();
267 	for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) {
268 		if (strcmp((*p_drv)->driver_name, name) == 0)
269 			break;
270 	}
271 	if (*p_drv == NULL) {
272 		printf("cam_periph_alloc: invalid periph name '%s'\n", name);
273 		xpt_unlock_buses();
274 		xpt_free_path(periph->path);
275 		free(periph, M_CAMPERIPH);
276 		return (CAM_REQ_INVALID);
277 	}
278 	periph->unit_number = camperiphunit(*p_drv, path_id, target_id, lun_id,
279 	    path->device->serial_num);
280 	cur_periph = TAILQ_FIRST(&(*p_drv)->units);
281 	while (cur_periph != NULL
282 	    && cur_periph->unit_number < periph->unit_number)
283 		cur_periph = TAILQ_NEXT(cur_periph, unit_links);
284 	if (cur_periph != NULL) {
285 		KASSERT(cur_periph->unit_number != periph->unit_number,
286 		    ("duplicate units on periph list"));
287 		TAILQ_INSERT_BEFORE(cur_periph, periph, unit_links);
288 	} else {
289 		TAILQ_INSERT_TAIL(&(*p_drv)->units, periph, unit_links);
290 		(*p_drv)->generation++;
291 	}
292 	xpt_unlock_buses();
293 
294 	init_level++;
295 
296 	status = xpt_add_periph(periph);
297 	if (status != CAM_REQ_CMP)
298 		goto failure;
299 
300 	init_level++;
301 	CAM_DEBUG(periph->path, CAM_DEBUG_INFO, ("Periph created\n"));
302 
303 	status = periph_ctor(periph, arg);
304 
305 	if (status == CAM_REQ_CMP)
306 		init_level++;
307 
308 failure:
309 	switch (init_level) {
310 	case 4:
311 		/* Initialized successfully */
312 		break;
313 	case 3:
314 		CAM_DEBUG(periph->path, CAM_DEBUG_INFO, ("Periph destroyed\n"));
315 		xpt_remove_periph(periph);
316 		/* FALLTHROUGH */
317 	case 2:
318 		xpt_lock_buses();
319 		TAILQ_REMOVE(&(*p_drv)->units, periph, unit_links);
320 		xpt_unlock_buses();
321 		xpt_free_path(periph->path);
322 		/* FALLTHROUGH */
323 	case 1:
324 		free(periph, M_CAMPERIPH);
325 		/* FALLTHROUGH */
326 	case 0:
327 		/* No cleanup to perform. */
328 		break;
329 	default:
330 		panic("%s: Unknown init level", __func__);
331 	}
332 	return(status);
333 }
334 
335 /*
336  * Find a peripheral structure with the specified path, target, lun,
337  * and (optionally) type.  If the name is NULL, this function will return
338  * the first peripheral driver that matches the specified path.
339  */
340 struct cam_periph *
341 cam_periph_find(struct cam_path *path, char *name)
342 {
343 	struct periph_driver **p_drv;
344 	struct cam_periph *periph;
345 
346 	xpt_lock_buses();
347 	for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) {
348 		if (name != NULL && (strcmp((*p_drv)->driver_name, name) != 0))
349 			continue;
350 
351 		TAILQ_FOREACH(periph, &(*p_drv)->units, unit_links) {
352 			if (xpt_path_comp(periph->path, path) == 0) {
353 				xpt_unlock_buses();
354 				cam_periph_assert(periph, MA_OWNED);
355 				return(periph);
356 			}
357 		}
358 		if (name != NULL) {
359 			xpt_unlock_buses();
360 			return(NULL);
361 		}
362 	}
363 	xpt_unlock_buses();
364 	return(NULL);
365 }
366 
367 /*
368  * Find peripheral driver instances attached to the specified path.
369  */
370 int
371 cam_periph_list(struct cam_path *path, struct sbuf *sb)
372 {
373 	struct sbuf local_sb;
374 	struct periph_driver **p_drv;
375 	struct cam_periph *periph;
376 	int count;
377 	int sbuf_alloc_len;
378 
379 	sbuf_alloc_len = 16;
380 retry:
381 	sbuf_new(&local_sb, NULL, sbuf_alloc_len, SBUF_FIXEDLEN);
382 	count = 0;
383 	xpt_lock_buses();
384 	for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) {
385 		TAILQ_FOREACH(periph, &(*p_drv)->units, unit_links) {
386 			if (xpt_path_comp(periph->path, path) != 0)
387 				continue;
388 
389 			if (sbuf_len(&local_sb) != 0)
390 				sbuf_cat(&local_sb, ",");
391 
392 			sbuf_printf(&local_sb, "%s%d", periph->periph_name,
393 				    periph->unit_number);
394 
395 			if (sbuf_error(&local_sb) == ENOMEM) {
396 				sbuf_alloc_len *= 2;
397 				xpt_unlock_buses();
398 				sbuf_delete(&local_sb);
399 				goto retry;
400 			}
401 			count++;
402 		}
403 	}
404 	xpt_unlock_buses();
405 	sbuf_finish(&local_sb);
406 	if (sbuf_len(sb) != 0)
407 		sbuf_cat(sb, ",");
408 	sbuf_cat(sb, sbuf_data(&local_sb));
409 	sbuf_delete(&local_sb);
410 	return (count);
411 }
412 
413 int
414 cam_periph_acquire(struct cam_periph *periph)
415 {
416 	int status;
417 
418 	if (periph == NULL)
419 		return (EINVAL);
420 
421 	status = ENOENT;
422 	xpt_lock_buses();
423 	if ((periph->flags & CAM_PERIPH_INVALID) == 0) {
424 		periph->refcount++;
425 		status = 0;
426 	}
427 	xpt_unlock_buses();
428 
429 	return (status);
430 }
431 
432 void
433 cam_periph_doacquire(struct cam_periph *periph)
434 {
435 
436 	xpt_lock_buses();
437 	KASSERT(periph->refcount >= 1,
438 	    ("cam_periph_doacquire() with refcount == %d", periph->refcount));
439 	periph->refcount++;
440 	xpt_unlock_buses();
441 }
442 
443 void
444 cam_periph_release_locked_buses(struct cam_periph *periph)
445 {
446 
447 	cam_periph_assert(periph, MA_OWNED);
448 	KASSERT(periph->refcount >= 1, ("periph->refcount >= 1"));
449 	if (--periph->refcount == 0)
450 		camperiphfree(periph);
451 }
452 
453 void
454 cam_periph_release_locked(struct cam_periph *periph)
455 {
456 
457 	if (periph == NULL)
458 		return;
459 
460 	xpt_lock_buses();
461 	cam_periph_release_locked_buses(periph);
462 	xpt_unlock_buses();
463 }
464 
465 void
466 cam_periph_release(struct cam_periph *periph)
467 {
468 	struct mtx *mtx;
469 
470 	if (periph == NULL)
471 		return;
472 
473 	cam_periph_assert(periph, MA_NOTOWNED);
474 	mtx = cam_periph_mtx(periph);
475 	mtx_lock(mtx);
476 	cam_periph_release_locked(periph);
477 	mtx_unlock(mtx);
478 }
479 
480 /*
481  * hold/unhold act as mutual exclusion for sections of the code that
482  * need to sleep and want to make sure that other sections that
483  * will interfere are held off. This only protects exclusive sections
484  * from each other.
485  */
486 int
487 cam_periph_hold(struct cam_periph *periph, int priority)
488 {
489 	int error;
490 
491 	/*
492 	 * Increment the reference count on the peripheral
493 	 * while we wait for our lock attempt to succeed
494 	 * to ensure the peripheral doesn't disappear out
495 	 * from user us while we sleep.
496 	 */
497 
498 	if (cam_periph_acquire(periph) != 0)
499 		return (ENXIO);
500 
501 	cam_periph_assert(periph, MA_OWNED);
502 	while ((periph->flags & CAM_PERIPH_LOCKED) != 0) {
503 		periph->flags |= CAM_PERIPH_LOCK_WANTED;
504 		if ((error = cam_periph_sleep(periph, periph, priority,
505 		    "caplck", 0)) != 0) {
506 			cam_periph_release_locked(periph);
507 			return (error);
508 		}
509 		if (periph->flags & CAM_PERIPH_INVALID) {
510 			cam_periph_release_locked(periph);
511 			return (ENXIO);
512 		}
513 	}
514 
515 	periph->flags |= CAM_PERIPH_LOCKED;
516 	return (0);
517 }
518 
519 void
520 cam_periph_unhold(struct cam_periph *periph)
521 {
522 
523 	cam_periph_assert(periph, MA_OWNED);
524 
525 	periph->flags &= ~CAM_PERIPH_LOCKED;
526 	if ((periph->flags & CAM_PERIPH_LOCK_WANTED) != 0) {
527 		periph->flags &= ~CAM_PERIPH_LOCK_WANTED;
528 		wakeup(periph);
529 	}
530 
531 	cam_periph_release_locked(periph);
532 }
533 
534 void
535 cam_periph_hold_boot(struct cam_periph *periph)
536 {
537 
538 	root_mount_hold_token(periph->periph_name, &periph->periph_rootmount);
539 }
540 
541 void
542 cam_periph_release_boot(struct cam_periph *periph)
543 {
544 
545 	root_mount_rel(&periph->periph_rootmount);
546 }
547 
548 /*
549  * Look for the next unit number that is not currently in use for this
550  * peripheral type starting at "newunit".  Also exclude unit numbers that
551  * are reserved by for future "hardwiring" unless we already know that this
552  * is a potential wired device.  Only assume that the device is "wired" the
553  * first time through the loop since after that we'll be looking at unit
554  * numbers that did not match a wiring entry.
555  */
556 static u_int
557 camperiphnextunit(struct periph_driver *p_drv, u_int newunit, bool wired,
558 		  path_id_t pathid, target_id_t target, lun_id_t lun)
559 {
560 	struct	cam_periph *periph;
561 	char	*periph_name;
562 	int	i, val, dunit, r;
563 	const char *dname, *strval;
564 
565 	periph_name = p_drv->driver_name;
566 	for (;;newunit++) {
567 		for (periph = TAILQ_FIRST(&p_drv->units);
568 		     periph != NULL && periph->unit_number != newunit;
569 		     periph = TAILQ_NEXT(periph, unit_links))
570 			;
571 
572 		if (periph != NULL && periph->unit_number == newunit) {
573 			if (wired) {
574 				xpt_print(periph->path, "Duplicate Wired "
575 				    "Device entry!\n");
576 				xpt_print(periph->path, "Second device (%s "
577 				    "device at scbus%d target %d lun %d) will "
578 				    "not be wired\n", periph_name, pathid,
579 				    target, lun);
580 				wired = false;
581 			}
582 			continue;
583 		}
584 		if (wired)
585 			break;
586 
587 		/*
588 		 * Don't allow the mere presence of any attributes of a device
589 		 * means that it is for a wired down entry. Instead, insist that
590 		 * one of the matching criteria from camperiphunit be present
591 		 * for the device.
592 		 */
593 		i = 0;
594 		dname = periph_name;
595 		for (;;) {
596 			r = resource_find_dev(&i, dname, &dunit, NULL, NULL);
597 			if (r != 0)
598 				break;
599 
600 			if (newunit != dunit)
601 				continue;
602 			if (resource_string_value(dname, dunit, "sn", &strval) == 0 ||
603 			    resource_int_value(dname, dunit, "lun", &val) == 0 ||
604 			    resource_int_value(dname, dunit, "target", &val) == 0 ||
605 			    resource_string_value(dname, dunit, "at", &strval) == 0)
606 				break;
607 		}
608 		if (r != 0)
609 			break;
610 	}
611 	return (newunit);
612 }
613 
614 static u_int
615 camperiphunit(struct periph_driver *p_drv, path_id_t pathid,
616     target_id_t target, lun_id_t lun, const char *sn)
617 {
618 	bool	wired = false;
619 	u_int	unit;
620 	int	i, val, dunit;
621 	const char *dname, *strval;
622 	char	pathbuf[32], *periph_name;
623 
624 	periph_name = p_drv->driver_name;
625 	snprintf(pathbuf, sizeof(pathbuf), "scbus%d", pathid);
626 	unit = 0;
627 	i = 0;
628 	dname = periph_name;
629 
630 	for (wired = false; resource_find_dev(&i, dname, &dunit, NULL, NULL) == 0;
631 	     wired = false) {
632 		if (resource_string_value(dname, dunit, "at", &strval) == 0) {
633 			if (strcmp(strval, pathbuf) != 0)
634 				continue;
635 			wired = true;
636 		}
637 		if (resource_int_value(dname, dunit, "target", &val) == 0) {
638 			if (val != target)
639 				continue;
640 			wired = true;
641 		}
642 		if (resource_int_value(dname, dunit, "lun", &val) == 0) {
643 			if (val != lun)
644 				continue;
645 			wired = true;
646 		}
647 		if (resource_string_value(dname, dunit, "sn", &strval) == 0) {
648 			if (sn == NULL || strcmp(strval, sn) != 0)
649 				continue;
650 			wired = true;
651 		}
652 		if (wired) {
653 			unit = dunit;
654 			break;
655 		}
656 	}
657 
658 	/*
659 	 * Either start from 0 looking for the next unit or from
660 	 * the unit number given in the resource config.  This way,
661 	 * if we have wildcard matches, we don't return the same
662 	 * unit number twice.
663 	 */
664 	unit = camperiphnextunit(p_drv, unit, wired, pathid, target, lun);
665 
666 	return (unit);
667 }
668 
669 void
670 cam_periph_invalidate(struct cam_periph *periph)
671 {
672 
673 	cam_periph_assert(periph, MA_OWNED);
674 	/*
675 	 * We only tear down the device the first time a peripheral is
676 	 * invalidated.
677 	 */
678 	if ((periph->flags & CAM_PERIPH_INVALID) != 0)
679 		return;
680 
681 	CAM_DEBUG(periph->path, CAM_DEBUG_INFO, ("Periph invalidated\n"));
682 	if ((periph->flags & CAM_PERIPH_ANNOUNCED) && !rebooting) {
683 		struct sbuf sb;
684 		char buffer[160];
685 
686 		sbuf_new(&sb, buffer, 160, SBUF_FIXEDLEN);
687 		xpt_denounce_periph_sbuf(periph, &sb);
688 		sbuf_finish(&sb);
689 		sbuf_putbuf(&sb);
690 	}
691 	periph->flags |= CAM_PERIPH_INVALID;
692 	periph->flags &= ~CAM_PERIPH_NEW_DEV_FOUND;
693 	if (periph->periph_oninval != NULL)
694 		periph->periph_oninval(periph);
695 	cam_periph_release_locked(periph);
696 }
697 
698 static void
699 camperiphfree(struct cam_periph *periph)
700 {
701 	struct periph_driver **p_drv;
702 	struct periph_driver *drv;
703 
704 	cam_periph_assert(periph, MA_OWNED);
705 	KASSERT(periph->periph_allocating == 0, ("%s%d: freed while allocating",
706 	    periph->periph_name, periph->unit_number));
707 	for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) {
708 		if (strcmp((*p_drv)->driver_name, periph->periph_name) == 0)
709 			break;
710 	}
711 	if (*p_drv == NULL) {
712 		printf("camperiphfree: attempt to free non-existant periph\n");
713 		return;
714 	}
715 	/*
716 	 * Cache a pointer to the periph_driver structure.  If a
717 	 * periph_driver is added or removed from the array (see
718 	 * periphdriver_register()) while we drop the toplogy lock
719 	 * below, p_drv may change.  This doesn't protect against this
720 	 * particular periph_driver going away.  That will require full
721 	 * reference counting in the periph_driver infrastructure.
722 	 */
723 	drv = *p_drv;
724 
725 	/*
726 	 * We need to set this flag before dropping the topology lock, to
727 	 * let anyone who is traversing the list that this peripheral is
728 	 * about to be freed, and there will be no more reference count
729 	 * checks.
730 	 */
731 	periph->flags |= CAM_PERIPH_FREE;
732 
733 	/*
734 	 * The peripheral destructor semantics dictate calling with only the
735 	 * SIM mutex held.  Since it might sleep, it should not be called
736 	 * with the topology lock held.
737 	 */
738 	xpt_unlock_buses();
739 
740 	/*
741 	 * We need to call the peripheral destructor prior to removing the
742 	 * peripheral from the list.  Otherwise, we risk running into a
743 	 * scenario where the peripheral unit number may get reused
744 	 * (because it has been removed from the list), but some resources
745 	 * used by the peripheral are still hanging around.  In particular,
746 	 * the devfs nodes used by some peripherals like the pass(4) driver
747 	 * aren't fully cleaned up until the destructor is run.  If the
748 	 * unit number is reused before the devfs instance is fully gone,
749 	 * devfs will panic.
750 	 */
751 	if (periph->periph_dtor != NULL)
752 		periph->periph_dtor(periph);
753 
754 	/*
755 	 * The peripheral list is protected by the topology lock. We have to
756 	 * remove the periph from the drv list before we call deferred_ac. The
757 	 * AC_FOUND_DEVICE callback won't create a new periph if it's still there.
758 	 */
759 	xpt_lock_buses();
760 
761 	TAILQ_REMOVE(&drv->units, periph, unit_links);
762 	drv->generation++;
763 
764 	xpt_remove_periph(periph);
765 
766 	xpt_unlock_buses();
767 	if ((periph->flags & CAM_PERIPH_ANNOUNCED) && !rebooting)
768 		xpt_print(periph->path, "Periph destroyed\n");
769 	else
770 		CAM_DEBUG(periph->path, CAM_DEBUG_INFO, ("Periph destroyed\n"));
771 
772 	if (periph->flags & CAM_PERIPH_NEW_DEV_FOUND) {
773 		union ccb ccb;
774 		void *arg;
775 
776 		memset(&ccb, 0, sizeof(ccb));
777 		switch (periph->deferred_ac) {
778 		case AC_FOUND_DEVICE:
779 			ccb.ccb_h.func_code = XPT_GDEV_TYPE;
780 			xpt_setup_ccb(&ccb.ccb_h, periph->path, CAM_PRIORITY_NORMAL);
781 			xpt_action(&ccb);
782 			arg = &ccb;
783 			break;
784 		case AC_PATH_REGISTERED:
785 			xpt_path_inq(&ccb.cpi, periph->path);
786 			arg = &ccb;
787 			break;
788 		default:
789 			arg = NULL;
790 			break;
791 		}
792 		periph->deferred_callback(NULL, periph->deferred_ac,
793 					  periph->path, arg);
794 	}
795 	xpt_free_path(periph->path);
796 	free(periph, M_CAMPERIPH);
797 	xpt_lock_buses();
798 }
799 
800 /*
801  * Map user virtual pointers into kernel virtual address space, so we can
802  * access the memory.  This is now a generic function that centralizes most
803  * of the sanity checks on the data flags, if any.
804  * This also only works for up to maxphys memory.  Since we use
805  * buffers to map stuff in and out, we're limited to the buffer size.
806  */
807 int
808 cam_periph_mapmem(union ccb *ccb, struct cam_periph_map_info *mapinfo,
809     u_int maxmap)
810 {
811 	int numbufs, i;
812 	uint8_t **data_ptrs[CAM_PERIPH_MAXMAPS];
813 	uint32_t lengths[CAM_PERIPH_MAXMAPS];
814 	uint32_t dirs[CAM_PERIPH_MAXMAPS];
815 
816 	bzero(mapinfo, sizeof(*mapinfo));
817 	if (maxmap == 0)
818 		maxmap = DFLTPHYS;	/* traditional default */
819 	else if (maxmap > maxphys)
820 		maxmap = maxphys;	/* for safety */
821 	switch(ccb->ccb_h.func_code) {
822 	case XPT_DEV_MATCH:
823 		if (ccb->cdm.match_buf_len == 0) {
824 			printf("cam_periph_mapmem: invalid match buffer "
825 			       "length 0\n");
826 			return(EINVAL);
827 		}
828 		if (ccb->cdm.pattern_buf_len > 0) {
829 			data_ptrs[0] = (uint8_t **)&ccb->cdm.patterns;
830 			lengths[0] = ccb->cdm.pattern_buf_len;
831 			dirs[0] = CAM_DIR_OUT;
832 			data_ptrs[1] = (uint8_t **)&ccb->cdm.matches;
833 			lengths[1] = ccb->cdm.match_buf_len;
834 			dirs[1] = CAM_DIR_IN;
835 			numbufs = 2;
836 		} else {
837 			data_ptrs[0] = (uint8_t **)&ccb->cdm.matches;
838 			lengths[0] = ccb->cdm.match_buf_len;
839 			dirs[0] = CAM_DIR_IN;
840 			numbufs = 1;
841 		}
842 		/*
843 		 * This request will not go to the hardware, no reason
844 		 * to be so strict. vmapbuf() is able to map up to maxphys.
845 		 */
846 		maxmap = maxphys;
847 		break;
848 	case XPT_SCSI_IO:
849 	case XPT_CONT_TARGET_IO:
850 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE)
851 			return(0);
852 		if ((ccb->ccb_h.flags & CAM_DATA_MASK) != CAM_DATA_VADDR)
853 			return (EINVAL);
854 		data_ptrs[0] = &ccb->csio.data_ptr;
855 		lengths[0] = ccb->csio.dxfer_len;
856 		dirs[0] = ccb->ccb_h.flags & CAM_DIR_MASK;
857 		numbufs = 1;
858 		break;
859 	case XPT_ATA_IO:
860 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE)
861 			return(0);
862 		if ((ccb->ccb_h.flags & CAM_DATA_MASK) != CAM_DATA_VADDR)
863 			return (EINVAL);
864 		data_ptrs[0] = &ccb->ataio.data_ptr;
865 		lengths[0] = ccb->ataio.dxfer_len;
866 		dirs[0] = ccb->ccb_h.flags & CAM_DIR_MASK;
867 		numbufs = 1;
868 		break;
869 	case XPT_MMC_IO:
870 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE)
871 			return(0);
872 		/* Two mappings: one for cmd->data and one for cmd->data->data */
873 		data_ptrs[0] = (unsigned char **)&ccb->mmcio.cmd.data;
874 		lengths[0] = sizeof(struct mmc_data *);
875 		dirs[0] = ccb->ccb_h.flags & CAM_DIR_MASK;
876 		data_ptrs[1] = (unsigned char **)&ccb->mmcio.cmd.data->data;
877 		lengths[1] = ccb->mmcio.cmd.data->len;
878 		dirs[1] = ccb->ccb_h.flags & CAM_DIR_MASK;
879 		numbufs = 2;
880 		break;
881 	case XPT_SMP_IO:
882 		data_ptrs[0] = &ccb->smpio.smp_request;
883 		lengths[0] = ccb->smpio.smp_request_len;
884 		dirs[0] = CAM_DIR_OUT;
885 		data_ptrs[1] = &ccb->smpio.smp_response;
886 		lengths[1] = ccb->smpio.smp_response_len;
887 		dirs[1] = CAM_DIR_IN;
888 		numbufs = 2;
889 		break;
890 	case XPT_NVME_IO:
891 	case XPT_NVME_ADMIN:
892 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE)
893 			return (0);
894 		if ((ccb->ccb_h.flags & CAM_DATA_MASK) != CAM_DATA_VADDR)
895 			return (EINVAL);
896 		data_ptrs[0] = &ccb->nvmeio.data_ptr;
897 		lengths[0] = ccb->nvmeio.dxfer_len;
898 		dirs[0] = ccb->ccb_h.flags & CAM_DIR_MASK;
899 		numbufs = 1;
900 		break;
901 	case XPT_DEV_ADVINFO:
902 		if (ccb->cdai.bufsiz == 0)
903 			return (0);
904 
905 		data_ptrs[0] = (uint8_t **)&ccb->cdai.buf;
906 		lengths[0] = ccb->cdai.bufsiz;
907 		dirs[0] = CAM_DIR_IN;
908 		numbufs = 1;
909 
910 		/*
911 		 * This request will not go to the hardware, no reason
912 		 * to be so strict. vmapbuf() is able to map up to maxphys.
913 		 */
914 		maxmap = maxphys;
915 		break;
916 	default:
917 		return(EINVAL);
918 		break; /* NOTREACHED */
919 	}
920 
921 	/*
922 	 * Check the transfer length and permissions first, so we don't
923 	 * have to unmap any previously mapped buffers.
924 	 */
925 	for (i = 0; i < numbufs; i++) {
926 		if (lengths[i] > maxmap) {
927 			printf("cam_periph_mapmem: attempt to map %lu bytes, "
928 			       "which is greater than %lu\n",
929 			       (long)(lengths[i]), (u_long)maxmap);
930 			return (E2BIG);
931 		}
932 	}
933 
934 	/*
935 	 * This keeps the kernel stack of current thread from getting
936 	 * swapped.  In low-memory situations where the kernel stack might
937 	 * otherwise get swapped out, this holds it and allows the thread
938 	 * to make progress and release the kernel mapped pages sooner.
939 	 *
940 	 * XXX KDM should I use P_NOSWAP instead?
941 	 */
942 	PHOLD(curproc);
943 
944 	for (i = 0; i < numbufs; i++) {
945 		/* Save the user's data address. */
946 		mapinfo->orig[i] = *data_ptrs[i];
947 
948 		/*
949 		 * For small buffers use malloc+copyin/copyout instead of
950 		 * mapping to KVA to avoid expensive TLB shootdowns.  For
951 		 * small allocations malloc is backed by UMA, and so much
952 		 * cheaper on SMP systems.
953 		 */
954 		if (lengths[i] <= periph_mapmem_thresh &&
955 		    ccb->ccb_h.func_code != XPT_MMC_IO) {
956 			*data_ptrs[i] = malloc(lengths[i], M_CAMPERIPH,
957 			    M_WAITOK);
958 			if (dirs[i] != CAM_DIR_IN) {
959 				if (copyin(mapinfo->orig[i], *data_ptrs[i],
960 				    lengths[i]) != 0) {
961 					free(*data_ptrs[i], M_CAMPERIPH);
962 					*data_ptrs[i] = mapinfo->orig[i];
963 					goto fail;
964 				}
965 			} else
966 				bzero(*data_ptrs[i], lengths[i]);
967 			continue;
968 		}
969 
970 		/*
971 		 * Get the buffer.
972 		 */
973 		mapinfo->bp[i] = uma_zalloc(pbuf_zone, M_WAITOK);
974 
975 		/* set the direction */
976 		mapinfo->bp[i]->b_iocmd = (dirs[i] == CAM_DIR_OUT) ?
977 		    BIO_WRITE : BIO_READ;
978 
979 		/* Map the buffer into kernel memory. */
980 		if (vmapbuf(mapinfo->bp[i], *data_ptrs[i], lengths[i], 1) < 0) {
981 			uma_zfree(pbuf_zone, mapinfo->bp[i]);
982 			goto fail;
983 		}
984 
985 		/* set our pointer to the new mapped area */
986 		*data_ptrs[i] = mapinfo->bp[i]->b_data;
987 	}
988 
989 	/*
990 	 * Now that we've gotten this far, change ownership to the kernel
991 	 * of the buffers so that we don't run afoul of returning to user
992 	 * space with locks (on the buffer) held.
993 	 */
994 	for (i = 0; i < numbufs; i++) {
995 		if (mapinfo->bp[i])
996 			BUF_KERNPROC(mapinfo->bp[i]);
997 	}
998 
999 	mapinfo->num_bufs_used = numbufs;
1000 	return(0);
1001 
1002 fail:
1003 	for (i--; i >= 0; i--) {
1004 		if (mapinfo->bp[i]) {
1005 			vunmapbuf(mapinfo->bp[i]);
1006 			uma_zfree(pbuf_zone, mapinfo->bp[i]);
1007 		} else
1008 			free(*data_ptrs[i], M_CAMPERIPH);
1009 		*data_ptrs[i] = mapinfo->orig[i];
1010 	}
1011 	PRELE(curproc);
1012 	return(EACCES);
1013 }
1014 
1015 /*
1016  * Unmap memory segments mapped into kernel virtual address space by
1017  * cam_periph_mapmem().
1018  */
1019 void
1020 cam_periph_unmapmem(union ccb *ccb, struct cam_periph_map_info *mapinfo)
1021 {
1022 	int numbufs, i;
1023 	uint8_t **data_ptrs[CAM_PERIPH_MAXMAPS];
1024 	uint32_t lengths[CAM_PERIPH_MAXMAPS];
1025 	uint32_t dirs[CAM_PERIPH_MAXMAPS];
1026 
1027 	if (mapinfo->num_bufs_used <= 0) {
1028 		/* nothing to free and the process wasn't held. */
1029 		return;
1030 	}
1031 
1032 	switch (ccb->ccb_h.func_code) {
1033 	case XPT_DEV_MATCH:
1034 		if (ccb->cdm.pattern_buf_len > 0) {
1035 			data_ptrs[0] = (uint8_t **)&ccb->cdm.patterns;
1036 			lengths[0] = ccb->cdm.pattern_buf_len;
1037 			dirs[0] = CAM_DIR_OUT;
1038 			data_ptrs[1] = (uint8_t **)&ccb->cdm.matches;
1039 			lengths[1] = ccb->cdm.match_buf_len;
1040 			dirs[1] = CAM_DIR_IN;
1041 			numbufs = 2;
1042 		} else {
1043 			data_ptrs[0] = (uint8_t **)&ccb->cdm.matches;
1044 			lengths[0] = ccb->cdm.match_buf_len;
1045 			dirs[0] = CAM_DIR_IN;
1046 			numbufs = 1;
1047 		}
1048 		break;
1049 	case XPT_SCSI_IO:
1050 	case XPT_CONT_TARGET_IO:
1051 		data_ptrs[0] = &ccb->csio.data_ptr;
1052 		lengths[0] = ccb->csio.dxfer_len;
1053 		dirs[0] = ccb->ccb_h.flags & CAM_DIR_MASK;
1054 		numbufs = 1;
1055 		break;
1056 	case XPT_ATA_IO:
1057 		data_ptrs[0] = &ccb->ataio.data_ptr;
1058 		lengths[0] = ccb->ataio.dxfer_len;
1059 		dirs[0] = ccb->ccb_h.flags & CAM_DIR_MASK;
1060 		numbufs = 1;
1061 		break;
1062 	case XPT_MMC_IO:
1063 		data_ptrs[0] = (uint8_t **)&ccb->mmcio.cmd.data;
1064 		lengths[0] = sizeof(struct mmc_data *);
1065 		dirs[0] = ccb->ccb_h.flags & CAM_DIR_MASK;
1066 		data_ptrs[1] = (uint8_t **)&ccb->mmcio.cmd.data->data;
1067 		lengths[1] = ccb->mmcio.cmd.data->len;
1068 		dirs[1] = ccb->ccb_h.flags & CAM_DIR_MASK;
1069 		numbufs = 2;
1070 		break;
1071 	case XPT_SMP_IO:
1072 		data_ptrs[0] = &ccb->smpio.smp_request;
1073 		lengths[0] = ccb->smpio.smp_request_len;
1074 		dirs[0] = CAM_DIR_OUT;
1075 		data_ptrs[1] = &ccb->smpio.smp_response;
1076 		lengths[1] = ccb->smpio.smp_response_len;
1077 		dirs[1] = CAM_DIR_IN;
1078 		numbufs = 2;
1079 		break;
1080 	case XPT_NVME_IO:
1081 	case XPT_NVME_ADMIN:
1082 		data_ptrs[0] = &ccb->nvmeio.data_ptr;
1083 		lengths[0] = ccb->nvmeio.dxfer_len;
1084 		dirs[0] = ccb->ccb_h.flags & CAM_DIR_MASK;
1085 		numbufs = 1;
1086 		break;
1087 	case XPT_DEV_ADVINFO:
1088 		data_ptrs[0] = (uint8_t **)&ccb->cdai.buf;
1089 		lengths[0] = ccb->cdai.bufsiz;
1090 		dirs[0] = CAM_DIR_IN;
1091 		numbufs = 1;
1092 		break;
1093 	default:
1094 		/* allow ourselves to be swapped once again */
1095 		PRELE(curproc);
1096 		return;
1097 		break; /* NOTREACHED */
1098 	}
1099 
1100 	for (i = 0; i < numbufs; i++) {
1101 		if (mapinfo->bp[i]) {
1102 			/* unmap the buffer */
1103 			vunmapbuf(mapinfo->bp[i]);
1104 
1105 			/* release the buffer */
1106 			uma_zfree(pbuf_zone, mapinfo->bp[i]);
1107 		} else {
1108 			if (dirs[i] != CAM_DIR_OUT) {
1109 				copyout(*data_ptrs[i], mapinfo->orig[i],
1110 				    lengths[i]);
1111 			}
1112 			free(*data_ptrs[i], M_CAMPERIPH);
1113 		}
1114 
1115 		/* Set the user's pointer back to the original value */
1116 		*data_ptrs[i] = mapinfo->orig[i];
1117 	}
1118 
1119 	/* allow ourselves to be swapped once again */
1120 	PRELE(curproc);
1121 }
1122 
1123 int
1124 cam_periph_ioctl(struct cam_periph *periph, u_long cmd, caddr_t addr,
1125 		 int (*error_routine)(union ccb *ccb,
1126 				      cam_flags camflags,
1127 				      uint32_t sense_flags))
1128 {
1129 	union ccb 	     *ccb;
1130 	int 		     error;
1131 	int		     found;
1132 
1133 	error = found = 0;
1134 
1135 	switch(cmd){
1136 	case CAMGETPASSTHRU_0x19:
1137 	case CAMGETPASSTHRU:
1138 		ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL);
1139 		xpt_setup_ccb(&ccb->ccb_h,
1140 			      ccb->ccb_h.path,
1141 			      CAM_PRIORITY_NORMAL);
1142 		ccb->ccb_h.func_code = XPT_GDEVLIST;
1143 
1144 		/*
1145 		 * Basically, the point of this is that we go through
1146 		 * getting the list of devices, until we find a passthrough
1147 		 * device.  In the current version of the CAM code, the
1148 		 * only way to determine what type of device we're dealing
1149 		 * with is by its name.
1150 		 */
1151 		while (found == 0) {
1152 			ccb->cgdl.index = 0;
1153 			ccb->cgdl.status = CAM_GDEVLIST_MORE_DEVS;
1154 			while (ccb->cgdl.status == CAM_GDEVLIST_MORE_DEVS) {
1155 				/* we want the next device in the list */
1156 				xpt_action(ccb);
1157 				if (strncmp(ccb->cgdl.periph_name,
1158 				    "pass", 4) == 0){
1159 					found = 1;
1160 					break;
1161 				}
1162 			}
1163 			if ((ccb->cgdl.status == CAM_GDEVLIST_LAST_DEVICE) &&
1164 			    (found == 0)) {
1165 				ccb->cgdl.periph_name[0] = '\0';
1166 				ccb->cgdl.unit_number = 0;
1167 				break;
1168 			}
1169 		}
1170 
1171 		/* copy the result back out */
1172 		bcopy(ccb, addr, sizeof(union ccb));
1173 
1174 		/* and release the ccb */
1175 		xpt_release_ccb(ccb);
1176 
1177 		break;
1178 	default:
1179 		error = ENOTTY;
1180 		break;
1181 	}
1182 	return(error);
1183 }
1184 
1185 static void
1186 cam_periph_done_panic(struct cam_periph *periph, union ccb *done_ccb)
1187 {
1188 
1189 	panic("%s: already done with ccb %p", __func__, done_ccb);
1190 }
1191 
1192 static void
1193 cam_periph_done(struct cam_periph *periph, union ccb *done_ccb)
1194 {
1195 
1196 	/* Caller will release the CCB */
1197 	xpt_path_assert(done_ccb->ccb_h.path, MA_OWNED);
1198 	done_ccb->ccb_h.cbfcnp = cam_periph_done_panic;
1199 	wakeup(&done_ccb->ccb_h.cbfcnp);
1200 }
1201 
1202 static void
1203 cam_periph_ccbwait(union ccb *ccb)
1204 {
1205 
1206 	if ((ccb->ccb_h.func_code & XPT_FC_QUEUED) != 0) {
1207 		while (ccb->ccb_h.cbfcnp != cam_periph_done_panic)
1208 			xpt_path_sleep(ccb->ccb_h.path, &ccb->ccb_h.cbfcnp,
1209 			    PRIBIO, "cbwait", 0);
1210 	}
1211 	KASSERT(ccb->ccb_h.pinfo.index == CAM_UNQUEUED_INDEX &&
1212 	    (ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG,
1213 	    ("%s: proceeding with incomplete ccb: ccb=%p, func_code=%#x, "
1214 	     "status=%#x, index=%d", __func__, ccb, ccb->ccb_h.func_code,
1215 	     ccb->ccb_h.status, ccb->ccb_h.pinfo.index));
1216 }
1217 
1218 /*
1219  * Dispatch a CCB and wait for it to complete.  If the CCB has set a
1220  * callback function (ccb->ccb_h.cbfcnp), it will be overwritten and lost.
1221  */
1222 int
1223 cam_periph_runccb(union ccb *ccb,
1224 		  int (*error_routine)(union ccb *ccb,
1225 				       cam_flags camflags,
1226 				       uint32_t sense_flags),
1227 		  cam_flags camflags, uint32_t sense_flags,
1228 		  struct devstat *ds)
1229 {
1230 	struct bintime *starttime;
1231 	struct bintime ltime;
1232 	int error;
1233 	bool must_poll;
1234 	uint32_t timeout = 1;
1235 
1236 	starttime = NULL;
1237 	xpt_path_assert(ccb->ccb_h.path, MA_OWNED);
1238 	KASSERT((ccb->ccb_h.flags & CAM_UNLOCKED) == 0,
1239 	    ("%s: ccb=%p, func_code=%#x, flags=%#x", __func__, ccb,
1240 	     ccb->ccb_h.func_code, ccb->ccb_h.flags));
1241 
1242 	/*
1243 	 * If the user has supplied a stats structure, and if we understand
1244 	 * this particular type of ccb, record the transaction start.
1245 	 */
1246 	if (ds != NULL &&
1247 	    (ccb->ccb_h.func_code == XPT_SCSI_IO ||
1248 	    ccb->ccb_h.func_code == XPT_ATA_IO ||
1249 	    ccb->ccb_h.func_code == XPT_NVME_IO)) {
1250 		starttime = &ltime;
1251 		binuptime(starttime);
1252 		devstat_start_transaction(ds, starttime);
1253 	}
1254 
1255 	/*
1256 	 * We must poll the I/O while we're dumping. The scheduler is normally
1257 	 * stopped for dumping, except when we call doadump from ddb. While the
1258 	 * scheduler is running in this case, we still need to poll the I/O to
1259 	 * avoid sleeping waiting for the ccb to complete.
1260 	 *
1261 	 * A panic triggered dump stops the scheduler, any callback from the
1262 	 * shutdown_post_sync event will run with the scheduler stopped, but
1263 	 * before we're officially dumping. To avoid hanging in adashutdown
1264 	 * initiated commands (or other similar situations), we have to test for
1265 	 * either dumping or SCHEDULER_STOPPED() here.
1266 	 *
1267 	 * To avoid locking problems, dumping/polling callers must call
1268 	 * without a periph lock held.
1269 	 */
1270 	must_poll = dumping || SCHEDULER_STOPPED();
1271 	ccb->ccb_h.cbfcnp = cam_periph_done;
1272 
1273 	/*
1274 	 * If we're polling, then we need to ensure that we have ample resources
1275 	 * in the periph.  cam_periph_error can reschedule the ccb by calling
1276 	 * xpt_action and returning ERESTART, so we have to effect the polling
1277 	 * in the do loop below.
1278 	 */
1279 	if (must_poll) {
1280 		if (cam_sim_pollable(ccb->ccb_h.path->bus->sim))
1281 			timeout = xpt_poll_setup(ccb);
1282 		else
1283 			timeout = 0;
1284 	}
1285 
1286 	if (timeout == 0) {
1287 		ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
1288 		error = EBUSY;
1289 	} else {
1290 		xpt_action(ccb);
1291 		do {
1292 			if (must_poll) {
1293 				xpt_pollwait(ccb, timeout);
1294 				timeout = ccb->ccb_h.timeout * 10;
1295 			} else {
1296 				cam_periph_ccbwait(ccb);
1297 			}
1298 			if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP)
1299 				error = 0;
1300 			else if (error_routine != NULL) {
1301 				/*
1302 				 * cbfcnp is modified by cam_periph_ccbwait so
1303 				 * reset it before we call the error routine
1304 				 * which may call xpt_done.
1305 				 */
1306 				ccb->ccb_h.cbfcnp = cam_periph_done;
1307 				error = (*error_routine)(ccb, camflags, sense_flags);
1308 			} else
1309 				error = 0;
1310 		} while (error == ERESTART);
1311 	}
1312 
1313 	if ((ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
1314 		cam_release_devq(ccb->ccb_h.path,
1315 				 /* relsim_flags */0,
1316 				 /* openings */0,
1317 				 /* timeout */0,
1318 				 /* getcount_only */ FALSE);
1319 		ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
1320 	}
1321 
1322 	if (ds != NULL) {
1323 		uint32_t bytes;
1324 		devstat_tag_type tag;
1325 		bool valid = true;
1326 
1327 		if (ccb->ccb_h.func_code == XPT_SCSI_IO) {
1328 			bytes = ccb->csio.dxfer_len - ccb->csio.resid;
1329 			tag = (devstat_tag_type)(ccb->csio.tag_action & 0x3);
1330 		} else if (ccb->ccb_h.func_code == XPT_ATA_IO) {
1331 			bytes = ccb->ataio.dxfer_len - ccb->ataio.resid;
1332 			tag = (devstat_tag_type)0;
1333 		} else if (ccb->ccb_h.func_code == XPT_NVME_IO) {
1334 			bytes = ccb->nvmeio.dxfer_len; /* NB: resid no possible */
1335 			tag = (devstat_tag_type)0;
1336 		} else {
1337 			valid = false;
1338 		}
1339 		if (valid)
1340 			devstat_end_transaction(ds, bytes, tag,
1341 			    ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE) ?
1342 			    DEVSTAT_NO_DATA : (ccb->ccb_h.flags & CAM_DIR_OUT) ?
1343 			    DEVSTAT_WRITE : DEVSTAT_READ, NULL, starttime);
1344 	}
1345 
1346 	return(error);
1347 }
1348 
1349 void
1350 cam_freeze_devq(struct cam_path *path)
1351 {
1352 	struct ccb_hdr ccb_h;
1353 
1354 	CAM_DEBUG(path, CAM_DEBUG_TRACE, ("cam_freeze_devq\n"));
1355 	memset(&ccb_h, 0, sizeof(ccb_h));
1356 	xpt_setup_ccb(&ccb_h, path, /*priority*/1);
1357 	ccb_h.func_code = XPT_NOOP;
1358 	ccb_h.flags = CAM_DEV_QFREEZE;
1359 	xpt_action((union ccb *)&ccb_h);
1360 }
1361 
1362 uint32_t
1363 cam_release_devq(struct cam_path *path, uint32_t relsim_flags,
1364 		 uint32_t openings, uint32_t arg,
1365 		 int getcount_only)
1366 {
1367 	struct ccb_relsim crs;
1368 
1369 	CAM_DEBUG(path, CAM_DEBUG_TRACE, ("cam_release_devq(%u, %u, %u, %d)\n",
1370 	    relsim_flags, openings, arg, getcount_only));
1371 	memset(&crs, 0, sizeof(crs));
1372 	xpt_setup_ccb(&crs.ccb_h, path, CAM_PRIORITY_NORMAL);
1373 	crs.ccb_h.func_code = XPT_REL_SIMQ;
1374 	crs.ccb_h.flags = getcount_only ? CAM_DEV_QFREEZE : 0;
1375 	crs.release_flags = relsim_flags;
1376 	crs.openings = openings;
1377 	crs.release_timeout = arg;
1378 	xpt_action((union ccb *)&crs);
1379 	return (crs.qfrozen_cnt);
1380 }
1381 
1382 #define saved_ccb_ptr ppriv_ptr0
1383 static void
1384 camperiphdone(struct cam_periph *periph, union ccb *done_ccb)
1385 {
1386 	union ccb      *saved_ccb;
1387 	cam_status	status;
1388 	struct scsi_start_stop_unit *scsi_cmd;
1389 	int		error = 0, error_code, sense_key, asc, ascq;
1390 	uint16_t	done_flags;
1391 
1392 	scsi_cmd = (struct scsi_start_stop_unit *)
1393 	    &done_ccb->csio.cdb_io.cdb_bytes;
1394 	status = done_ccb->ccb_h.status;
1395 
1396 	if ((status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
1397 		if (scsi_extract_sense_ccb(done_ccb,
1398 		    &error_code, &sense_key, &asc, &ascq)) {
1399 			/*
1400 			 * If the error is "invalid field in CDB",
1401 			 * and the load/eject flag is set, turn the
1402 			 * flag off and try again.  This is just in
1403 			 * case the drive in question barfs on the
1404 			 * load eject flag.  The CAM code should set
1405 			 * the load/eject flag by default for
1406 			 * removable media.
1407 			 */
1408 			if ((scsi_cmd->opcode == START_STOP_UNIT) &&
1409 			    ((scsi_cmd->how & SSS_LOEJ) != 0) &&
1410 			     (asc == 0x24) && (ascq == 0x00)) {
1411 				scsi_cmd->how &= ~SSS_LOEJ;
1412 				if (status & CAM_DEV_QFRZN) {
1413 					cam_release_devq(done_ccb->ccb_h.path,
1414 					    0, 0, 0, 0);
1415 					done_ccb->ccb_h.status &=
1416 					    ~CAM_DEV_QFRZN;
1417 				}
1418 				xpt_action(done_ccb);
1419 				goto out;
1420 			}
1421 		}
1422 		error = cam_periph_error(done_ccb, 0,
1423 		    SF_RETRY_UA | SF_NO_PRINT);
1424 		if (error == ERESTART)
1425 			goto out;
1426 		if (done_ccb->ccb_h.status & CAM_DEV_QFRZN) {
1427 			cam_release_devq(done_ccb->ccb_h.path, 0, 0, 0, 0);
1428 			done_ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
1429 		}
1430 	} else {
1431 		/*
1432 		 * If we have successfully taken a device from the not
1433 		 * ready to ready state, re-scan the device and re-get
1434 		 * the inquiry information.  Many devices (mostly disks)
1435 		 * don't properly report their inquiry information unless
1436 		 * they are spun up.
1437 		 */
1438 		if (scsi_cmd->opcode == START_STOP_UNIT)
1439 			xpt_async(AC_INQ_CHANGED, done_ccb->ccb_h.path, NULL);
1440 	}
1441 
1442 	/* If we tried long wait and still failed, remember that. */
1443 	if ((periph->flags & CAM_PERIPH_RECOVERY_WAIT) &&
1444 	    (done_ccb->csio.cdb_io.cdb_bytes[0] == TEST_UNIT_READY)) {
1445 		periph->flags &= ~CAM_PERIPH_RECOVERY_WAIT;
1446 		if (error != 0 && done_ccb->ccb_h.retry_count == 0)
1447 			periph->flags |= CAM_PERIPH_RECOVERY_WAIT_FAILED;
1448 	}
1449 
1450 	/*
1451 	 * After recovery action(s) completed, return to the original CCB.
1452 	 * If the recovery CCB has failed, considering its own possible
1453 	 * retries and recovery, assume we are back in state where we have
1454 	 * been originally, but without recovery hopes left.  In such case,
1455 	 * after the final attempt below, we cancel any further retries,
1456 	 * blocking by that also any new recovery attempts for this CCB,
1457 	 * and the result will be the final one returned to the CCB owher.
1458 	 */
1459 	saved_ccb = (union ccb *)done_ccb->ccb_h.saved_ccb_ptr;
1460 	KASSERT(saved_ccb->ccb_h.func_code == XPT_SCSI_IO,
1461 	    ("%s: saved_ccb func_code %#x != XPT_SCSI_IO",
1462 	     __func__, saved_ccb->ccb_h.func_code));
1463 	KASSERT(done_ccb->ccb_h.func_code == XPT_SCSI_IO,
1464 	    ("%s: done_ccb func_code %#x != XPT_SCSI_IO",
1465 	     __func__, done_ccb->ccb_h.func_code));
1466 	saved_ccb->ccb_h.periph_links = done_ccb->ccb_h.periph_links;
1467 	done_flags = done_ccb->ccb_h.alloc_flags;
1468 	bcopy(saved_ccb, done_ccb, sizeof(struct ccb_scsiio));
1469 	done_ccb->ccb_h.alloc_flags = done_flags;
1470 	xpt_free_ccb(saved_ccb);
1471 	if (done_ccb->ccb_h.cbfcnp != camperiphdone)
1472 		periph->flags &= ~CAM_PERIPH_RECOVERY_INPROG;
1473 	if (error != 0)
1474 		done_ccb->ccb_h.retry_count = 0;
1475 	xpt_action(done_ccb);
1476 
1477 out:
1478 	/* Drop freeze taken due to CAM_DEV_QFREEZE flag set. */
1479 	cam_release_devq(done_ccb->ccb_h.path, 0, 0, 0, 0);
1480 }
1481 
1482 /*
1483  * Generic Async Event handler.  Peripheral drivers usually
1484  * filter out the events that require personal attention,
1485  * and leave the rest to this function.
1486  */
1487 void
1488 cam_periph_async(struct cam_periph *periph, uint32_t code,
1489 		 struct cam_path *path, void *arg)
1490 {
1491 	switch (code) {
1492 	case AC_LOST_DEVICE:
1493 		cam_periph_invalidate(periph);
1494 		break;
1495 	default:
1496 		break;
1497 	}
1498 }
1499 
1500 void
1501 cam_periph_bus_settle(struct cam_periph *periph, u_int bus_settle)
1502 {
1503 	struct ccb_getdevstats cgds;
1504 
1505 	memset(&cgds, 0, sizeof(cgds));
1506 	xpt_setup_ccb(&cgds.ccb_h, periph->path, CAM_PRIORITY_NORMAL);
1507 	cgds.ccb_h.func_code = XPT_GDEV_STATS;
1508 	xpt_action((union ccb *)&cgds);
1509 	cam_periph_freeze_after_event(periph, &cgds.last_reset, bus_settle);
1510 }
1511 
1512 void
1513 cam_periph_freeze_after_event(struct cam_periph *periph,
1514 			      struct timeval* event_time, u_int duration_ms)
1515 {
1516 	struct timeval delta;
1517 	struct timeval duration_tv;
1518 
1519 	if (!timevalisset(event_time))
1520 		return;
1521 
1522 	microtime(&delta);
1523 	timevalsub(&delta, event_time);
1524 	duration_tv.tv_sec = duration_ms / 1000;
1525 	duration_tv.tv_usec = (duration_ms % 1000) * 1000;
1526 	if (timevalcmp(&delta, &duration_tv, <)) {
1527 		timevalsub(&duration_tv, &delta);
1528 
1529 		duration_ms = duration_tv.tv_sec * 1000;
1530 		duration_ms += duration_tv.tv_usec / 1000;
1531 		cam_freeze_devq(periph->path);
1532 		cam_release_devq(periph->path,
1533 				RELSIM_RELEASE_AFTER_TIMEOUT,
1534 				/*reduction*/0,
1535 				/*timeout*/duration_ms,
1536 				/*getcount_only*/0);
1537 	}
1538 
1539 }
1540 
1541 static int
1542 camperiphscsistatuserror(union ccb *ccb, union ccb **orig_ccb,
1543     cam_flags camflags, uint32_t sense_flags,
1544     int *openings, uint32_t *relsim_flags,
1545     uint32_t *timeout, uint32_t *action, const char **action_string)
1546 {
1547 	struct cam_periph *periph;
1548 	int error;
1549 
1550 	switch (ccb->csio.scsi_status) {
1551 	case SCSI_STATUS_OK:
1552 	case SCSI_STATUS_COND_MET:
1553 	case SCSI_STATUS_INTERMED:
1554 	case SCSI_STATUS_INTERMED_COND_MET:
1555 		error = 0;
1556 		break;
1557 	case SCSI_STATUS_CMD_TERMINATED:
1558 	case SCSI_STATUS_CHECK_COND:
1559 		error = camperiphscsisenseerror(ccb, orig_ccb,
1560 					        camflags,
1561 					        sense_flags,
1562 					        openings,
1563 					        relsim_flags,
1564 					        timeout,
1565 					        action,
1566 					        action_string);
1567 		break;
1568 	case SCSI_STATUS_QUEUE_FULL:
1569 	{
1570 		/* no decrement */
1571 		struct ccb_getdevstats cgds;
1572 
1573 		/*
1574 		 * First off, find out what the current
1575 		 * transaction counts are.
1576 		 */
1577 		memset(&cgds, 0, sizeof(cgds));
1578 		xpt_setup_ccb(&cgds.ccb_h,
1579 			      ccb->ccb_h.path,
1580 			      CAM_PRIORITY_NORMAL);
1581 		cgds.ccb_h.func_code = XPT_GDEV_STATS;
1582 		xpt_action((union ccb *)&cgds);
1583 
1584 		/*
1585 		 * If we were the only transaction active, treat
1586 		 * the QUEUE FULL as if it were a BUSY condition.
1587 		 */
1588 		if (cgds.dev_active != 0) {
1589 			int total_openings;
1590 
1591 			/*
1592 		 	 * Reduce the number of openings to
1593 			 * be 1 less than the amount it took
1594 			 * to get a queue full bounded by the
1595 			 * minimum allowed tag count for this
1596 			 * device.
1597 		 	 */
1598 			total_openings = cgds.dev_active + cgds.dev_openings;
1599 			*openings = cgds.dev_active;
1600 			if (*openings < cgds.mintags)
1601 				*openings = cgds.mintags;
1602 			if (*openings < total_openings)
1603 				*relsim_flags = RELSIM_ADJUST_OPENINGS;
1604 			else {
1605 				/*
1606 				 * Some devices report queue full for
1607 				 * temporary resource shortages.  For
1608 				 * this reason, we allow a minimum
1609 				 * tag count to be entered via a
1610 				 * quirk entry to prevent the queue
1611 				 * count on these devices from falling
1612 				 * to a pessimisticly low value.  We
1613 				 * still wait for the next successful
1614 				 * completion, however, before queueing
1615 				 * more transactions to the device.
1616 				 */
1617 				*relsim_flags = RELSIM_RELEASE_AFTER_CMDCMPLT;
1618 			}
1619 			*timeout = 0;
1620 			error = ERESTART;
1621 			*action &= ~SSQ_PRINT_SENSE;
1622 			break;
1623 		}
1624 		/* FALLTHROUGH */
1625 	}
1626 	case SCSI_STATUS_BUSY:
1627 		/*
1628 		 * Restart the queue after either another
1629 		 * command completes or a 1 second timeout.
1630 		 */
1631 		periph = xpt_path_periph(ccb->ccb_h.path);
1632 		if (periph->flags & CAM_PERIPH_INVALID) {
1633 			error = ENXIO;
1634 			*action_string = "Periph was invalidated";
1635 		} else if ((sense_flags & SF_RETRY_BUSY) != 0 ||
1636 		    ccb->ccb_h.retry_count > 0) {
1637 			if ((sense_flags & SF_RETRY_BUSY) == 0)
1638 				ccb->ccb_h.retry_count--;
1639 			error = ERESTART;
1640 			*relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT
1641 				      | RELSIM_RELEASE_AFTER_CMDCMPLT;
1642 			*timeout = 1000;
1643 		} else {
1644 			error = EIO;
1645 			*action_string = "Retries exhausted";
1646 		}
1647 		break;
1648 	case SCSI_STATUS_RESERV_CONFLICT:
1649 	default:
1650 		error = EIO;
1651 		break;
1652 	}
1653 	return (error);
1654 }
1655 
1656 static int
1657 camperiphscsisenseerror(union ccb *ccb, union ccb **orig,
1658     cam_flags camflags, uint32_t sense_flags,
1659     int *openings, uint32_t *relsim_flags,
1660     uint32_t *timeout, uint32_t *action, const char **action_string)
1661 {
1662 	struct cam_periph *periph;
1663 	union ccb *orig_ccb = ccb;
1664 	int error, recoveryccb;
1665 	uint16_t flags;
1666 
1667 #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING)
1668 	if (ccb->ccb_h.func_code == XPT_SCSI_IO && ccb->csio.bio != NULL)
1669 		biotrack(ccb->csio.bio, __func__);
1670 #endif
1671 
1672 	periph = xpt_path_periph(ccb->ccb_h.path);
1673 	recoveryccb = (ccb->ccb_h.cbfcnp == camperiphdone);
1674 	if ((periph->flags & CAM_PERIPH_RECOVERY_INPROG) && !recoveryccb) {
1675 		/*
1676 		 * If error recovery is already in progress, don't attempt
1677 		 * to process this error, but requeue it unconditionally
1678 		 * and attempt to process it once error recovery has
1679 		 * completed.  This failed command is probably related to
1680 		 * the error that caused the currently active error recovery
1681 		 * action so our  current recovery efforts should also
1682 		 * address this command.  Be aware that the error recovery
1683 		 * code assumes that only one recovery action is in progress
1684 		 * on a particular peripheral instance at any given time
1685 		 * (e.g. only one saved CCB for error recovery) so it is
1686 		 * imperitive that we don't violate this assumption.
1687 		 */
1688 		error = ERESTART;
1689 		*action &= ~SSQ_PRINT_SENSE;
1690 	} else {
1691 		scsi_sense_action err_action;
1692 		struct ccb_getdev cgd;
1693 
1694 		/*
1695 		 * Grab the inquiry data for this device.
1696 		 */
1697 		memset(&cgd, 0, sizeof(cgd));
1698 		xpt_setup_ccb(&cgd.ccb_h, ccb->ccb_h.path, CAM_PRIORITY_NORMAL);
1699 		cgd.ccb_h.func_code = XPT_GDEV_TYPE;
1700 		xpt_action((union ccb *)&cgd);
1701 
1702 		err_action = scsi_error_action(&ccb->csio, &cgd.inq_data,
1703 		    sense_flags);
1704 		error = err_action & SS_ERRMASK;
1705 
1706 		/*
1707 		 * Do not autostart sequential access devices
1708 		 * to avoid unexpected tape loading.
1709 		 */
1710 		if ((err_action & SS_MASK) == SS_START &&
1711 		    SID_TYPE(&cgd.inq_data) == T_SEQUENTIAL) {
1712 			*action_string = "Will not autostart a "
1713 			    "sequential access device";
1714 			goto sense_error_done;
1715 		}
1716 
1717 		/*
1718 		 * Avoid recovery recursion if recovery action is the same.
1719 		 */
1720 		if ((err_action & SS_MASK) >= SS_START && recoveryccb) {
1721 			if (((err_action & SS_MASK) == SS_START &&
1722 			     ccb->csio.cdb_io.cdb_bytes[0] == START_STOP_UNIT) ||
1723 			    ((err_action & SS_MASK) == SS_TUR &&
1724 			     (ccb->csio.cdb_io.cdb_bytes[0] == TEST_UNIT_READY))) {
1725 				err_action = SS_RETRY|SSQ_DECREMENT_COUNT|EIO;
1726 				*relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT;
1727 				*timeout = 500;
1728 			}
1729 		}
1730 
1731 		/*
1732 		 * If the recovery action will consume a retry,
1733 		 * make sure we actually have retries available.
1734 		 */
1735 		if ((err_action & SSQ_DECREMENT_COUNT) != 0) {
1736 		 	if (ccb->ccb_h.retry_count > 0 &&
1737 			    (periph->flags & CAM_PERIPH_INVALID) == 0)
1738 		 		ccb->ccb_h.retry_count--;
1739 			else {
1740 				*action_string = "Retries exhausted";
1741 				goto sense_error_done;
1742 			}
1743 		}
1744 
1745 		if ((err_action & SS_MASK) >= SS_START) {
1746 			/*
1747 			 * Do common portions of commands that
1748 			 * use recovery CCBs.
1749 			 */
1750 			orig_ccb = xpt_alloc_ccb_nowait();
1751 			if (orig_ccb == NULL) {
1752 				*action_string = "Can't allocate recovery CCB";
1753 				goto sense_error_done;
1754 			}
1755 			/*
1756 			 * Clear freeze flag for original request here, as
1757 			 * this freeze will be dropped as part of ERESTART.
1758 			 */
1759 			ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
1760 
1761 			KASSERT(ccb->ccb_h.func_code == XPT_SCSI_IO,
1762 			    ("%s: ccb func_code %#x != XPT_SCSI_IO",
1763 			     __func__, ccb->ccb_h.func_code));
1764 			flags = orig_ccb->ccb_h.alloc_flags;
1765 			bcopy(ccb, orig_ccb, sizeof(struct ccb_scsiio));
1766 			orig_ccb->ccb_h.alloc_flags = flags;
1767 		}
1768 
1769 		switch (err_action & SS_MASK) {
1770 		case SS_NOP:
1771 			*action_string = "No recovery action needed";
1772 			error = 0;
1773 			break;
1774 		case SS_RETRY:
1775 			*action_string = "Retrying command (per sense data)";
1776 			error = ERESTART;
1777 			break;
1778 		case SS_FAIL:
1779 			*action_string = "Unretryable error";
1780 			break;
1781 		case SS_START:
1782 		{
1783 			int le;
1784 
1785 			/*
1786 			 * Send a start unit command to the device, and
1787 			 * then retry the command.
1788 			 */
1789 			*action_string = "Attempting to start unit";
1790 			periph->flags |= CAM_PERIPH_RECOVERY_INPROG;
1791 
1792 			/*
1793 			 * Check for removable media and set
1794 			 * load/eject flag appropriately.
1795 			 */
1796 			if (SID_IS_REMOVABLE(&cgd.inq_data))
1797 				le = TRUE;
1798 			else
1799 				le = FALSE;
1800 
1801 			scsi_start_stop(&ccb->csio,
1802 					/*retries*/1,
1803 					camperiphdone,
1804 					MSG_SIMPLE_Q_TAG,
1805 					/*start*/TRUE,
1806 					/*load/eject*/le,
1807 					/*immediate*/FALSE,
1808 					SSD_FULL_SIZE,
1809 					/*timeout*/50000);
1810 			break;
1811 		}
1812 		case SS_TUR:
1813 		{
1814 			/*
1815 			 * Send a Test Unit Ready to the device.
1816 			 * If the 'many' flag is set, we send 120
1817 			 * test unit ready commands, one every half
1818 			 * second.  Otherwise, we just send one TUR.
1819 			 * We only want to do this if the retry
1820 			 * count has not been exhausted.
1821 			 */
1822 			int retries;
1823 
1824 			if ((err_action & SSQ_MANY) != 0 && (periph->flags &
1825 			     CAM_PERIPH_RECOVERY_WAIT_FAILED) == 0) {
1826 				periph->flags |= CAM_PERIPH_RECOVERY_WAIT;
1827 				*action_string = "Polling device for readiness";
1828 				retries = 120;
1829 			} else {
1830 				*action_string = "Testing device for readiness";
1831 				retries = 1;
1832 			}
1833 			periph->flags |= CAM_PERIPH_RECOVERY_INPROG;
1834 			scsi_test_unit_ready(&ccb->csio,
1835 					     retries,
1836 					     camperiphdone,
1837 					     MSG_SIMPLE_Q_TAG,
1838 					     SSD_FULL_SIZE,
1839 					     /*timeout*/5000);
1840 
1841 			/*
1842 			 * Accomplish our 500ms delay by deferring
1843 			 * the release of our device queue appropriately.
1844 			 */
1845 			*relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT;
1846 			*timeout = 500;
1847 			break;
1848 		}
1849 		default:
1850 			panic("Unhandled error action %x", err_action);
1851 		}
1852 
1853 		if ((err_action & SS_MASK) >= SS_START) {
1854 			/*
1855 			 * Drop the priority, so that the recovery
1856 			 * CCB is the first to execute.  Freeze the queue
1857 			 * after this command is sent so that we can
1858 			 * restore the old csio and have it queued in
1859 			 * the proper order before we release normal
1860 			 * transactions to the device.
1861 			 */
1862 			ccb->ccb_h.pinfo.priority--;
1863 			ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
1864 			ccb->ccb_h.saved_ccb_ptr = orig_ccb;
1865 			error = ERESTART;
1866 			*orig = orig_ccb;
1867 		}
1868 
1869 sense_error_done:
1870 		*action = err_action;
1871 	}
1872 	return (error);
1873 }
1874 
1875 /*
1876  * Generic error handler.  Peripheral drivers usually filter
1877  * out the errors that they handle in a unique manner, then
1878  * call this function.
1879  */
1880 int
1881 cam_periph_error(union ccb *ccb, cam_flags camflags,
1882 		 uint32_t sense_flags)
1883 {
1884 	struct cam_path *newpath;
1885 	union ccb  *orig_ccb, *scan_ccb;
1886 	struct cam_periph *periph;
1887 	const char *action_string;
1888 	cam_status  status;
1889 	int	    frozen, error, openings, devctl_err;
1890 	uint32_t   action, relsim_flags, timeout;
1891 
1892 	action = SSQ_PRINT_SENSE;
1893 	periph = xpt_path_periph(ccb->ccb_h.path);
1894 	action_string = NULL;
1895 	status = ccb->ccb_h.status;
1896 	frozen = (status & CAM_DEV_QFRZN) != 0;
1897 	status &= CAM_STATUS_MASK;
1898 	devctl_err = openings = relsim_flags = timeout = 0;
1899 	orig_ccb = ccb;
1900 
1901 	/* Filter the errors that should be reported via devctl */
1902 	switch (ccb->ccb_h.status & CAM_STATUS_MASK) {
1903 	case CAM_CMD_TIMEOUT:
1904 	case CAM_REQ_ABORTED:
1905 	case CAM_REQ_CMP_ERR:
1906 	case CAM_REQ_TERMIO:
1907 	case CAM_UNREC_HBA_ERROR:
1908 	case CAM_DATA_RUN_ERR:
1909 	case CAM_SCSI_STATUS_ERROR:
1910 	case CAM_ATA_STATUS_ERROR:
1911 	case CAM_SMP_STATUS_ERROR:
1912 	case CAM_DEV_NOT_THERE:
1913 	case CAM_NVME_STATUS_ERROR:
1914 		devctl_err++;
1915 		break;
1916 	default:
1917 		break;
1918 	}
1919 
1920 	switch (status) {
1921 	case CAM_REQ_CMP:
1922 		error = 0;
1923 		action &= ~SSQ_PRINT_SENSE;
1924 		break;
1925 	case CAM_SCSI_STATUS_ERROR:
1926 		error = camperiphscsistatuserror(ccb, &orig_ccb,
1927 		    camflags, sense_flags, &openings, &relsim_flags,
1928 		    &timeout, &action, &action_string);
1929 		break;
1930 	case CAM_AUTOSENSE_FAIL:
1931 		error = EIO;	/* we have to kill the command */
1932 		break;
1933 	case CAM_UA_ABORT:
1934 	case CAM_UA_TERMIO:
1935 	case CAM_MSG_REJECT_REC:
1936 		/* XXX Don't know that these are correct */
1937 		error = EIO;
1938 		break;
1939 	case CAM_SEL_TIMEOUT:
1940 		if ((camflags & CAM_RETRY_SELTO) != 0) {
1941 			if (ccb->ccb_h.retry_count > 0 &&
1942 			    (periph->flags & CAM_PERIPH_INVALID) == 0) {
1943 				ccb->ccb_h.retry_count--;
1944 				error = ERESTART;
1945 
1946 				/*
1947 				 * Wait a bit to give the device
1948 				 * time to recover before we try again.
1949 				 */
1950 				relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT;
1951 				timeout = periph_selto_delay;
1952 				break;
1953 			}
1954 			action_string = "Retries exhausted";
1955 		}
1956 		/* FALLTHROUGH */
1957 	case CAM_DEV_NOT_THERE:
1958 		error = ENXIO;
1959 		action = SSQ_LOST;
1960 		break;
1961 	case CAM_REQ_INVALID:
1962 	case CAM_PATH_INVALID:
1963 	case CAM_NO_HBA:
1964 	case CAM_PROVIDE_FAIL:
1965 	case CAM_REQ_TOO_BIG:
1966 	case CAM_LUN_INVALID:
1967 	case CAM_TID_INVALID:
1968 	case CAM_FUNC_NOTAVAIL:
1969 		error = EINVAL;
1970 		break;
1971 	case CAM_SCSI_BUS_RESET:
1972 	case CAM_BDR_SENT:
1973 		/*
1974 		 * Commands that repeatedly timeout and cause these
1975 		 * kinds of error recovery actions, should return
1976 		 * CAM_CMD_TIMEOUT, which allows us to safely assume
1977 		 * that this command was an innocent bystander to
1978 		 * these events and should be unconditionally
1979 		 * retried.
1980 		 */
1981 	case CAM_REQUEUE_REQ:
1982 		/* Unconditional requeue if device is still there */
1983 		if (periph->flags & CAM_PERIPH_INVALID) {
1984 			action_string = "Periph was invalidated";
1985 			error = ENXIO;
1986 		} else if (sense_flags & SF_NO_RETRY) {
1987 			error = EIO;
1988 			action_string = "Retry was blocked";
1989 		} else {
1990 			error = ERESTART;
1991 			action &= ~SSQ_PRINT_SENSE;
1992 		}
1993 		break;
1994 	case CAM_RESRC_UNAVAIL:
1995 		/* Wait a bit for the resource shortage to abate. */
1996 		timeout = periph_noresrc_delay;
1997 		/* FALLTHROUGH */
1998 	case CAM_BUSY:
1999 		if (timeout == 0) {
2000 			/* Wait a bit for the busy condition to abate. */
2001 			timeout = periph_busy_delay;
2002 		}
2003 		relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT;
2004 		/* FALLTHROUGH */
2005 	case CAM_ATA_STATUS_ERROR:
2006 	case CAM_NVME_STATUS_ERROR:
2007 	case CAM_SMP_STATUS_ERROR:
2008 	case CAM_REQ_CMP_ERR:
2009 	case CAM_CMD_TIMEOUT:
2010 	case CAM_UNEXP_BUSFREE:
2011 	case CAM_UNCOR_PARITY:
2012 	case CAM_DATA_RUN_ERR:
2013 	default:
2014 		if (periph->flags & CAM_PERIPH_INVALID) {
2015 			error = ENXIO;
2016 			action_string = "Periph was invalidated";
2017 		} else if (ccb->ccb_h.retry_count == 0) {
2018 			error = EIO;
2019 			action_string = "Retries exhausted";
2020 		} else if (sense_flags & SF_NO_RETRY) {
2021 			error = EIO;
2022 			action_string = "Retry was blocked";
2023 		} else {
2024 			ccb->ccb_h.retry_count--;
2025 			error = ERESTART;
2026 		}
2027 		break;
2028 	}
2029 
2030 	if ((sense_flags & SF_PRINT_ALWAYS) ||
2031 	    CAM_DEBUGGED(ccb->ccb_h.path, CAM_DEBUG_INFO))
2032 		action |= SSQ_PRINT_SENSE;
2033 	else if (sense_flags & SF_NO_PRINT)
2034 		action &= ~SSQ_PRINT_SENSE;
2035 	if ((action & SSQ_PRINT_SENSE) != 0)
2036 		cam_error_print(orig_ccb, CAM_ESF_ALL, CAM_EPF_ALL);
2037 	if (error != 0 && (action & SSQ_PRINT_SENSE) != 0) {
2038 		if (error != ERESTART) {
2039 			if (action_string == NULL)
2040 				action_string = "Unretryable error";
2041 			xpt_print(ccb->ccb_h.path, "Error %d, %s\n",
2042 			    error, action_string);
2043 		} else if (action_string != NULL)
2044 			xpt_print(ccb->ccb_h.path, "%s\n", action_string);
2045 		else {
2046 			xpt_print(ccb->ccb_h.path,
2047 			    "Retrying command, %d more tries remain\n",
2048 			    ccb->ccb_h.retry_count);
2049 		}
2050 	}
2051 
2052 	if (devctl_err && (error != 0 || (action & SSQ_PRINT_SENSE) != 0))
2053 		cam_periph_devctl_notify(orig_ccb);
2054 
2055 	if ((action & SSQ_LOST) != 0) {
2056 		lun_id_t lun_id;
2057 
2058 		/*
2059 		 * For a selection timeout, we consider all of the LUNs on
2060 		 * the target to be gone.  If the status is CAM_DEV_NOT_THERE,
2061 		 * then we only get rid of the device(s) specified by the
2062 		 * path in the original CCB.
2063 		 */
2064 		if (status == CAM_SEL_TIMEOUT)
2065 			lun_id = CAM_LUN_WILDCARD;
2066 		else
2067 			lun_id = xpt_path_lun_id(ccb->ccb_h.path);
2068 
2069 		/* Should we do more if we can't create the path?? */
2070 		if (xpt_create_path(&newpath, periph,
2071 				    xpt_path_path_id(ccb->ccb_h.path),
2072 				    xpt_path_target_id(ccb->ccb_h.path),
2073 				    lun_id) == CAM_REQ_CMP) {
2074 			/*
2075 			 * Let peripheral drivers know that this
2076 			 * device has gone away.
2077 			 */
2078 			xpt_async(AC_LOST_DEVICE, newpath, NULL);
2079 			xpt_free_path(newpath);
2080 		}
2081 	}
2082 
2083 	/* Broadcast UNIT ATTENTIONs to all periphs. */
2084 	if ((action & SSQ_UA) != 0)
2085 		xpt_async(AC_UNIT_ATTENTION, orig_ccb->ccb_h.path, orig_ccb);
2086 
2087 	/* Rescan target on "Reported LUNs data has changed" */
2088 	if ((action & SSQ_RESCAN) != 0) {
2089 		if (xpt_create_path(&newpath, NULL,
2090 				    xpt_path_path_id(ccb->ccb_h.path),
2091 				    xpt_path_target_id(ccb->ccb_h.path),
2092 				    CAM_LUN_WILDCARD) == CAM_REQ_CMP) {
2093 			scan_ccb = xpt_alloc_ccb_nowait();
2094 			if (scan_ccb != NULL) {
2095 				scan_ccb->ccb_h.path = newpath;
2096 				scan_ccb->ccb_h.func_code = XPT_SCAN_TGT;
2097 				scan_ccb->crcn.flags = 0;
2098 				xpt_rescan(scan_ccb);
2099 			} else {
2100 				xpt_print(newpath,
2101 				    "Can't allocate CCB to rescan target\n");
2102 				xpt_free_path(newpath);
2103 			}
2104 		}
2105 	}
2106 
2107 	/* Attempt a retry */
2108 	if (error == ERESTART || error == 0) {
2109 		if (frozen != 0)
2110 			ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
2111 		if (error == ERESTART)
2112 			xpt_action(ccb);
2113 		if (frozen != 0)
2114 			cam_release_devq(ccb->ccb_h.path,
2115 					 relsim_flags,
2116 					 openings,
2117 					 timeout,
2118 					 /*getcount_only*/0);
2119 	}
2120 
2121 	return (error);
2122 }
2123 
2124 #define CAM_PERIPH_DEVD_MSG_SIZE	256
2125 
2126 static void
2127 cam_periph_devctl_notify(union ccb *ccb)
2128 {
2129 	struct cam_periph *periph;
2130 	struct ccb_getdev *cgd;
2131 	struct sbuf sb;
2132 	int serr, sk, asc, ascq;
2133 	char *sbmsg, *type;
2134 
2135 	sbmsg = malloc(CAM_PERIPH_DEVD_MSG_SIZE, M_CAMPERIPH, M_NOWAIT);
2136 	if (sbmsg == NULL)
2137 		return;
2138 
2139 	sbuf_new(&sb, sbmsg, CAM_PERIPH_DEVD_MSG_SIZE, SBUF_FIXEDLEN);
2140 
2141 	periph = xpt_path_periph(ccb->ccb_h.path);
2142 	sbuf_printf(&sb, "device=%s%d ", periph->periph_name,
2143 	    periph->unit_number);
2144 
2145 	sbuf_printf(&sb, "serial=\"");
2146 	if ((cgd = (struct ccb_getdev *)xpt_alloc_ccb_nowait()) != NULL) {
2147 		xpt_setup_ccb(&cgd->ccb_h, ccb->ccb_h.path,
2148 		    CAM_PRIORITY_NORMAL);
2149 		cgd->ccb_h.func_code = XPT_GDEV_TYPE;
2150 		xpt_action((union ccb *)cgd);
2151 
2152 		if (cgd->ccb_h.status == CAM_REQ_CMP)
2153 			sbuf_bcat(&sb, cgd->serial_num, cgd->serial_num_len);
2154 		xpt_free_ccb((union ccb *)cgd);
2155 	}
2156 	sbuf_printf(&sb, "\" ");
2157 	sbuf_printf(&sb, "cam_status=\"0x%x\" ", ccb->ccb_h.status);
2158 
2159 	switch (ccb->ccb_h.status & CAM_STATUS_MASK) {
2160 	case CAM_CMD_TIMEOUT:
2161 		sbuf_printf(&sb, "timeout=%d ", ccb->ccb_h.timeout);
2162 		type = "timeout";
2163 		break;
2164 	case CAM_SCSI_STATUS_ERROR:
2165 		sbuf_printf(&sb, "scsi_status=%d ", ccb->csio.scsi_status);
2166 		if (scsi_extract_sense_ccb(ccb, &serr, &sk, &asc, &ascq))
2167 			sbuf_printf(&sb, "scsi_sense=\"%02x %02x %02x %02x\" ",
2168 			    serr, sk, asc, ascq);
2169 		type = "error";
2170 		break;
2171 	case CAM_ATA_STATUS_ERROR:
2172 		sbuf_printf(&sb, "RES=\"");
2173 		ata_res_sbuf(&ccb->ataio.res, &sb);
2174 		sbuf_printf(&sb, "\" ");
2175 		type = "error";
2176 		break;
2177 	default:
2178 		type = "error";
2179 		break;
2180 	}
2181 
2182 	if (ccb->ccb_h.func_code == XPT_SCSI_IO) {
2183 		sbuf_printf(&sb, "CDB=\"");
2184 		scsi_cdb_sbuf(scsiio_cdb_ptr(&ccb->csio), &sb);
2185 		sbuf_printf(&sb, "\" ");
2186 	} else if (ccb->ccb_h.func_code == XPT_ATA_IO) {
2187 		sbuf_printf(&sb, "ACB=\"");
2188 		ata_cmd_sbuf(&ccb->ataio.cmd, &sb);
2189 		sbuf_printf(&sb, "\" ");
2190 	}
2191 
2192 	if (sbuf_finish(&sb) == 0)
2193 		devctl_notify("CAM", "periph", type, sbuf_data(&sb));
2194 	sbuf_delete(&sb);
2195 	free(sbmsg, M_CAMPERIPH);
2196 }
2197 
2198 /*
2199  * Sysctl to force an invalidation of the drive right now. Can be
2200  * called with CTLFLAG_MPSAFE since we take periph lock.
2201  */
2202 int
2203 cam_periph_invalidate_sysctl(SYSCTL_HANDLER_ARGS)
2204 {
2205 	struct cam_periph *periph;
2206 	int error, value;
2207 
2208 	periph = arg1;
2209 	value = 0;
2210 	error = sysctl_handle_int(oidp, &value, 0, req);
2211 	if (error != 0 || req->newptr == NULL || value != 1)
2212 		return (error);
2213 
2214 	cam_periph_lock(periph);
2215 	cam_periph_invalidate(periph);
2216 	cam_periph_unlock(periph);
2217 
2218 	return (0);
2219 }
2220