xref: /freebsd/sys/cam/cam_periph.c (revision de9468837c92cab304c658480bd32dbe4e022d01)
1 /*-
2  * Common functions for CAM "type" (peripheral) drivers.
3  *
4  * SPDX-License-Identifier: BSD-2-Clause
5  *
6  * Copyright (c) 1997, 1998 Justin T. Gibbs.
7  * Copyright (c) 1997, 1998, 1999, 2000 Kenneth D. Merry.
8  * All rights reserved.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions, and the following disclaimer,
15  *    without modification, immediately at the beginning of the file.
16  * 2. The name of the author may not be used to endorse or promote products
17  *    derived from this software without specific prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
23  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  */
31 
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/types.h>
35 #include <sys/malloc.h>
36 #include <sys/kernel.h>
37 #include <sys/bio.h>
38 #include <sys/conf.h>
39 #include <sys/devctl.h>
40 #include <sys/lock.h>
41 #include <sys/mutex.h>
42 #include <sys/buf.h>
43 #include <sys/proc.h>
44 #include <sys/devicestat.h>
45 #include <sys/sbuf.h>
46 #include <sys/sysctl.h>
47 #include <vm/vm.h>
48 #include <vm/vm_extern.h>
49 
50 #include <cam/cam.h>
51 #include <cam/cam_ccb.h>
52 #include <cam/cam_compat.h>
53 #include <cam/cam_queue.h>
54 #include <cam/cam_xpt_periph.h>
55 #include <cam/cam_xpt_internal.h>
56 #include <cam/cam_periph.h>
57 #include <cam/cam_debug.h>
58 #include <cam/cam_sim.h>
59 
60 #include <cam/scsi/scsi_all.h>
61 #include <cam/scsi/scsi_message.h>
62 #include <cam/scsi/scsi_pass.h>
63 
64 static	u_int		camperiphnextunit(struct periph_driver *p_drv,
65 					  u_int newunit, bool wired,
66 					  path_id_t pathid, target_id_t target,
67 					  lun_id_t lun);
68 static	u_int		camperiphunit(struct periph_driver *p_drv,
69 				      path_id_t pathid, target_id_t target,
70 				      lun_id_t lun,
71 				      const char *sn);
72 static	void		camperiphdone(struct cam_periph *periph,
73 					union ccb *done_ccb);
74 static  void		camperiphfree(struct cam_periph *periph);
75 static int		camperiphscsistatuserror(union ccb *ccb,
76 					        union ccb **orig_ccb,
77 						 cam_flags camflags,
78 						 uint32_t sense_flags,
79 						 int *openings,
80 						 uint32_t *relsim_flags,
81 						 uint32_t *timeout,
82 						 uint32_t  *action,
83 						 const char **action_string);
84 static	int		camperiphscsisenseerror(union ccb *ccb,
85 					        union ccb **orig_ccb,
86 					        cam_flags camflags,
87 					        uint32_t sense_flags,
88 					        int *openings,
89 					        uint32_t *relsim_flags,
90 					        uint32_t *timeout,
91 					        uint32_t *action,
92 					        const char **action_string);
93 static void		cam_periph_devctl_notify(union ccb *ccb);
94 
95 static int nperiph_drivers;
96 static int initialized = 0;
97 struct periph_driver **periph_drivers;
98 
99 static MALLOC_DEFINE(M_CAMPERIPH, "CAM periph", "CAM peripheral buffers");
100 
101 static int periph_selto_delay = 1000;
102 TUNABLE_INT("kern.cam.periph_selto_delay", &periph_selto_delay);
103 static int periph_noresrc_delay = 500;
104 TUNABLE_INT("kern.cam.periph_noresrc_delay", &periph_noresrc_delay);
105 static int periph_busy_delay = 500;
106 TUNABLE_INT("kern.cam.periph_busy_delay", &periph_busy_delay);
107 
108 static u_int periph_mapmem_thresh = 65536;
109 SYSCTL_UINT(_kern_cam, OID_AUTO, mapmem_thresh, CTLFLAG_RWTUN,
110     &periph_mapmem_thresh, 0, "Threshold for user-space buffer mapping");
111 
112 void
113 periphdriver_register(void *data)
114 {
115 	struct periph_driver *drv = (struct periph_driver *)data;
116 	struct periph_driver **newdrivers, **old;
117 	int ndrivers;
118 
119 again:
120 	ndrivers = nperiph_drivers + 2;
121 	newdrivers = malloc(sizeof(*newdrivers) * ndrivers, M_CAMPERIPH,
122 			    M_WAITOK);
123 	xpt_lock_buses();
124 	if (ndrivers != nperiph_drivers + 2) {
125 		/*
126 		 * Lost race against itself; go around.
127 		 */
128 		xpt_unlock_buses();
129 		free(newdrivers, M_CAMPERIPH);
130 		goto again;
131 	}
132 	if (periph_drivers)
133 		bcopy(periph_drivers, newdrivers,
134 		      sizeof(*newdrivers) * nperiph_drivers);
135 	newdrivers[nperiph_drivers] = drv;
136 	newdrivers[nperiph_drivers + 1] = NULL;
137 	old = periph_drivers;
138 	periph_drivers = newdrivers;
139 	nperiph_drivers++;
140 	xpt_unlock_buses();
141 	if (old)
142 		free(old, M_CAMPERIPH);
143 	/* If driver marked as early or it is late now, initialize it. */
144 	if (((drv->flags & CAM_PERIPH_DRV_EARLY) != 0 && initialized > 0) ||
145 	    initialized > 1)
146 		(*drv->init)();
147 }
148 
149 int
150 periphdriver_unregister(void *data)
151 {
152 	struct periph_driver *drv = (struct periph_driver *)data;
153 	int error, n;
154 
155 	/* If driver marked as early or it is late now, deinitialize it. */
156 	if (((drv->flags & CAM_PERIPH_DRV_EARLY) != 0 && initialized > 0) ||
157 	    initialized > 1) {
158 		if (drv->deinit == NULL) {
159 			printf("CAM periph driver '%s' doesn't have deinit.\n",
160 			    drv->driver_name);
161 			return (EOPNOTSUPP);
162 		}
163 		error = drv->deinit();
164 		if (error != 0)
165 			return (error);
166 	}
167 
168 	xpt_lock_buses();
169 	for (n = 0; n < nperiph_drivers && periph_drivers[n] != drv; n++)
170 		;
171 	KASSERT(n < nperiph_drivers,
172 	    ("Periph driver '%s' was not registered", drv->driver_name));
173 	for (; n + 1 < nperiph_drivers; n++)
174 		periph_drivers[n] = periph_drivers[n + 1];
175 	periph_drivers[n + 1] = NULL;
176 	nperiph_drivers--;
177 	xpt_unlock_buses();
178 	return (0);
179 }
180 
181 void
182 periphdriver_init(int level)
183 {
184 	int	i, early;
185 
186 	initialized = max(initialized, level);
187 	for (i = 0; periph_drivers[i] != NULL; i++) {
188 		early = (periph_drivers[i]->flags & CAM_PERIPH_DRV_EARLY) ? 1 : 2;
189 		if (early == initialized)
190 			(*periph_drivers[i]->init)();
191 	}
192 }
193 
194 cam_status
195 cam_periph_alloc(periph_ctor_t *periph_ctor,
196 		 periph_oninv_t *periph_oninvalidate,
197 		 periph_dtor_t *periph_dtor, periph_start_t *periph_start,
198 		 char *name, cam_periph_type type, struct cam_path *path,
199 		 ac_callback_t *ac_callback, ac_code code, void *arg)
200 {
201 	struct		periph_driver **p_drv;
202 	struct		cam_sim *sim;
203 	struct		cam_periph *periph;
204 	struct		cam_periph *cur_periph;
205 	path_id_t	path_id;
206 	target_id_t	target_id;
207 	lun_id_t	lun_id;
208 	cam_status	status;
209 	u_int		init_level;
210 
211 	init_level = 0;
212 	/*
213 	 * Handle Hot-Plug scenarios.  If there is already a peripheral
214 	 * of our type assigned to this path, we are likely waiting for
215 	 * final close on an old, invalidated, peripheral.  If this is
216 	 * the case, queue up a deferred call to the peripheral's async
217 	 * handler.  If it looks like a mistaken re-allocation, complain.
218 	 */
219 	if ((periph = cam_periph_find(path, name)) != NULL) {
220 		if ((periph->flags & CAM_PERIPH_INVALID) != 0
221 		 && (periph->flags & CAM_PERIPH_NEW_DEV_FOUND) == 0) {
222 			periph->flags |= CAM_PERIPH_NEW_DEV_FOUND;
223 			periph->deferred_callback = ac_callback;
224 			periph->deferred_ac = code;
225 			return (CAM_REQ_INPROG);
226 		} else {
227 			printf("cam_periph_alloc: attempt to re-allocate "
228 			       "valid device %s%d rejected flags %#x "
229 			       "refcount %d\n", periph->periph_name,
230 			       periph->unit_number, periph->flags,
231 			       periph->refcount);
232 		}
233 		return (CAM_REQ_INVALID);
234 	}
235 
236 	periph = (struct cam_periph *)malloc(sizeof(*periph), M_CAMPERIPH,
237 					     M_NOWAIT|M_ZERO);
238 
239 	if (periph == NULL)
240 		return (CAM_RESRC_UNAVAIL);
241 
242 	init_level++;
243 
244 	sim = xpt_path_sim(path);
245 	path_id = xpt_path_path_id(path);
246 	target_id = xpt_path_target_id(path);
247 	lun_id = xpt_path_lun_id(path);
248 	periph->periph_start = periph_start;
249 	periph->periph_dtor = periph_dtor;
250 	periph->periph_oninval = periph_oninvalidate;
251 	periph->type = type;
252 	periph->periph_name = name;
253 	periph->scheduled_priority = CAM_PRIORITY_NONE;
254 	periph->immediate_priority = CAM_PRIORITY_NONE;
255 	periph->refcount = 1;		/* Dropped by invalidation. */
256 	periph->sim = sim;
257 	SLIST_INIT(&periph->ccb_list);
258 	status = xpt_create_path(&path, periph, path_id, target_id, lun_id);
259 	if (status != CAM_REQ_CMP)
260 		goto failure;
261 	periph->path = path;
262 
263 	xpt_lock_buses();
264 	for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) {
265 		if (strcmp((*p_drv)->driver_name, name) == 0)
266 			break;
267 	}
268 	if (*p_drv == NULL) {
269 		printf("cam_periph_alloc: invalid periph name '%s'\n", name);
270 		xpt_unlock_buses();
271 		xpt_free_path(periph->path);
272 		free(periph, M_CAMPERIPH);
273 		return (CAM_REQ_INVALID);
274 	}
275 	periph->unit_number = camperiphunit(*p_drv, path_id, target_id, lun_id,
276 	    path->device->serial_num);
277 	cur_periph = TAILQ_FIRST(&(*p_drv)->units);
278 	while (cur_periph != NULL
279 	    && cur_periph->unit_number < periph->unit_number)
280 		cur_periph = TAILQ_NEXT(cur_periph, unit_links);
281 	if (cur_periph != NULL) {
282 		KASSERT(cur_periph->unit_number != periph->unit_number,
283 		    ("duplicate units on periph list"));
284 		TAILQ_INSERT_BEFORE(cur_periph, periph, unit_links);
285 	} else {
286 		TAILQ_INSERT_TAIL(&(*p_drv)->units, periph, unit_links);
287 		(*p_drv)->generation++;
288 	}
289 	xpt_unlock_buses();
290 
291 	init_level++;
292 
293 	status = xpt_add_periph(periph);
294 	if (status != CAM_REQ_CMP)
295 		goto failure;
296 
297 	init_level++;
298 	CAM_DEBUG(periph->path, CAM_DEBUG_INFO, ("Periph created\n"));
299 
300 	status = periph_ctor(periph, arg);
301 
302 	if (status == CAM_REQ_CMP)
303 		init_level++;
304 
305 failure:
306 	switch (init_level) {
307 	case 4:
308 		/* Initialized successfully */
309 		break;
310 	case 3:
311 		CAM_DEBUG(periph->path, CAM_DEBUG_INFO, ("Periph destroyed\n"));
312 		xpt_remove_periph(periph);
313 		/* FALLTHROUGH */
314 	case 2:
315 		xpt_lock_buses();
316 		TAILQ_REMOVE(&(*p_drv)->units, periph, unit_links);
317 		xpt_unlock_buses();
318 		xpt_free_path(periph->path);
319 		/* FALLTHROUGH */
320 	case 1:
321 		free(periph, M_CAMPERIPH);
322 		/* FALLTHROUGH */
323 	case 0:
324 		/* No cleanup to perform. */
325 		break;
326 	default:
327 		panic("%s: Unknown init level", __func__);
328 	}
329 	return(status);
330 }
331 
332 /*
333  * Find a peripheral structure with the specified path, target, lun,
334  * and (optionally) type.  If the name is NULL, this function will return
335  * the first peripheral driver that matches the specified path.
336  */
337 struct cam_periph *
338 cam_periph_find(struct cam_path *path, char *name)
339 {
340 	struct periph_driver **p_drv;
341 	struct cam_periph *periph;
342 
343 	xpt_lock_buses();
344 	for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) {
345 		if (name != NULL && (strcmp((*p_drv)->driver_name, name) != 0))
346 			continue;
347 
348 		TAILQ_FOREACH(periph, &(*p_drv)->units, unit_links) {
349 			if (xpt_path_comp(periph->path, path) == 0) {
350 				xpt_unlock_buses();
351 				cam_periph_assert(periph, MA_OWNED);
352 				return(periph);
353 			}
354 		}
355 		if (name != NULL) {
356 			xpt_unlock_buses();
357 			return(NULL);
358 		}
359 	}
360 	xpt_unlock_buses();
361 	return(NULL);
362 }
363 
364 /*
365  * Find peripheral driver instances attached to the specified path.
366  */
367 int
368 cam_periph_list(struct cam_path *path, struct sbuf *sb)
369 {
370 	struct sbuf local_sb;
371 	struct periph_driver **p_drv;
372 	struct cam_periph *periph;
373 	int count;
374 	int sbuf_alloc_len;
375 
376 	sbuf_alloc_len = 16;
377 retry:
378 	sbuf_new(&local_sb, NULL, sbuf_alloc_len, SBUF_FIXEDLEN);
379 	count = 0;
380 	xpt_lock_buses();
381 	for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) {
382 		TAILQ_FOREACH(periph, &(*p_drv)->units, unit_links) {
383 			if (xpt_path_comp(periph->path, path) != 0)
384 				continue;
385 
386 			if (sbuf_len(&local_sb) != 0)
387 				sbuf_cat(&local_sb, ",");
388 
389 			sbuf_printf(&local_sb, "%s%d", periph->periph_name,
390 				    periph->unit_number);
391 
392 			if (sbuf_error(&local_sb) == ENOMEM) {
393 				sbuf_alloc_len *= 2;
394 				xpt_unlock_buses();
395 				sbuf_delete(&local_sb);
396 				goto retry;
397 			}
398 			count++;
399 		}
400 	}
401 	xpt_unlock_buses();
402 	sbuf_finish(&local_sb);
403 	if (sbuf_len(sb) != 0)
404 		sbuf_cat(sb, ",");
405 	sbuf_cat(sb, sbuf_data(&local_sb));
406 	sbuf_delete(&local_sb);
407 	return (count);
408 }
409 
410 int
411 cam_periph_acquire(struct cam_periph *periph)
412 {
413 	int status;
414 
415 	if (periph == NULL)
416 		return (EINVAL);
417 
418 	status = ENOENT;
419 	xpt_lock_buses();
420 	if ((periph->flags & CAM_PERIPH_INVALID) == 0) {
421 		periph->refcount++;
422 		status = 0;
423 	}
424 	xpt_unlock_buses();
425 
426 	return (status);
427 }
428 
429 void
430 cam_periph_doacquire(struct cam_periph *periph)
431 {
432 
433 	xpt_lock_buses();
434 	KASSERT(periph->refcount >= 1,
435 	    ("cam_periph_doacquire() with refcount == %d", periph->refcount));
436 	periph->refcount++;
437 	xpt_unlock_buses();
438 }
439 
440 void
441 cam_periph_release_locked_buses(struct cam_periph *periph)
442 {
443 
444 	cam_periph_assert(periph, MA_OWNED);
445 	KASSERT(periph->refcount >= 1, ("periph->refcount >= 1"));
446 	if (--periph->refcount == 0)
447 		camperiphfree(periph);
448 }
449 
450 void
451 cam_periph_release_locked(struct cam_periph *periph)
452 {
453 
454 	if (periph == NULL)
455 		return;
456 
457 	xpt_lock_buses();
458 	cam_periph_release_locked_buses(periph);
459 	xpt_unlock_buses();
460 }
461 
462 void
463 cam_periph_release(struct cam_periph *periph)
464 {
465 	struct mtx *mtx;
466 
467 	if (periph == NULL)
468 		return;
469 
470 	cam_periph_assert(periph, MA_NOTOWNED);
471 	mtx = cam_periph_mtx(periph);
472 	mtx_lock(mtx);
473 	cam_periph_release_locked(periph);
474 	mtx_unlock(mtx);
475 }
476 
477 /*
478  * hold/unhold act as mutual exclusion for sections of the code that
479  * need to sleep and want to make sure that other sections that
480  * will interfere are held off. This only protects exclusive sections
481  * from each other.
482  */
483 int
484 cam_periph_hold(struct cam_periph *periph, int priority)
485 {
486 	int error;
487 
488 	/*
489 	 * Increment the reference count on the peripheral
490 	 * while we wait for our lock attempt to succeed
491 	 * to ensure the peripheral doesn't disappear out
492 	 * from user us while we sleep.
493 	 */
494 
495 	if (cam_periph_acquire(periph) != 0)
496 		return (ENXIO);
497 
498 	cam_periph_assert(periph, MA_OWNED);
499 	while ((periph->flags & CAM_PERIPH_LOCKED) != 0) {
500 		periph->flags |= CAM_PERIPH_LOCK_WANTED;
501 		if ((error = cam_periph_sleep(periph, periph, priority,
502 		    "caplck", 0)) != 0) {
503 			cam_periph_release_locked(periph);
504 			return (error);
505 		}
506 		if (periph->flags & CAM_PERIPH_INVALID) {
507 			cam_periph_release_locked(periph);
508 			return (ENXIO);
509 		}
510 	}
511 
512 	periph->flags |= CAM_PERIPH_LOCKED;
513 	return (0);
514 }
515 
516 void
517 cam_periph_unhold(struct cam_periph *periph)
518 {
519 
520 	cam_periph_assert(periph, MA_OWNED);
521 
522 	periph->flags &= ~CAM_PERIPH_LOCKED;
523 	if ((periph->flags & CAM_PERIPH_LOCK_WANTED) != 0) {
524 		periph->flags &= ~CAM_PERIPH_LOCK_WANTED;
525 		wakeup(periph);
526 	}
527 
528 	cam_periph_release_locked(periph);
529 }
530 
531 void
532 cam_periph_hold_boot(struct cam_periph *periph)
533 {
534 
535 	root_mount_hold_token(periph->periph_name, &periph->periph_rootmount);
536 }
537 
538 void
539 cam_periph_release_boot(struct cam_periph *periph)
540 {
541 
542 	root_mount_rel(&periph->periph_rootmount);
543 }
544 
545 /*
546  * Look for the next unit number that is not currently in use for this
547  * peripheral type starting at "newunit".  Also exclude unit numbers that
548  * are reserved by for future "hardwiring" unless we already know that this
549  * is a potential wired device.  Only assume that the device is "wired" the
550  * first time through the loop since after that we'll be looking at unit
551  * numbers that did not match a wiring entry.
552  */
553 static u_int
554 camperiphnextunit(struct periph_driver *p_drv, u_int newunit, bool wired,
555 		  path_id_t pathid, target_id_t target, lun_id_t lun)
556 {
557 	struct	cam_periph *periph;
558 	char	*periph_name;
559 	int	i, val, dunit, r;
560 	const char *dname, *strval;
561 
562 	periph_name = p_drv->driver_name;
563 	for (;;newunit++) {
564 		for (periph = TAILQ_FIRST(&p_drv->units);
565 		     periph != NULL && periph->unit_number != newunit;
566 		     periph = TAILQ_NEXT(periph, unit_links))
567 			;
568 
569 		if (periph != NULL && periph->unit_number == newunit) {
570 			if (wired) {
571 				xpt_print(periph->path, "Duplicate Wired "
572 				    "Device entry!\n");
573 				xpt_print(periph->path, "Second device (%s "
574 				    "device at scbus%d target %d lun %d) will "
575 				    "not be wired\n", periph_name, pathid,
576 				    target, lun);
577 				wired = false;
578 			}
579 			continue;
580 		}
581 		if (wired)
582 			break;
583 
584 		/*
585 		 * Don't allow the mere presence of any attributes of a device
586 		 * means that it is for a wired down entry. Instead, insist that
587 		 * one of the matching criteria from camperiphunit be present
588 		 * for the device.
589 		 */
590 		i = 0;
591 		dname = periph_name;
592 		for (;;) {
593 			r = resource_find_dev(&i, dname, &dunit, NULL, NULL);
594 			if (r != 0)
595 				break;
596 
597 			if (newunit != dunit)
598 				continue;
599 			if (resource_string_value(dname, dunit, "sn", &strval) == 0 ||
600 			    resource_int_value(dname, dunit, "lun", &val) == 0 ||
601 			    resource_int_value(dname, dunit, "target", &val) == 0 ||
602 			    resource_string_value(dname, dunit, "at", &strval) == 0)
603 				break;
604 		}
605 		if (r != 0)
606 			break;
607 	}
608 	return (newunit);
609 }
610 
611 static u_int
612 camperiphunit(struct periph_driver *p_drv, path_id_t pathid,
613     target_id_t target, lun_id_t lun, const char *sn)
614 {
615 	bool	wired = false;
616 	u_int	unit;
617 	int	i, val, dunit;
618 	const char *dname, *strval;
619 	char	pathbuf[32], *periph_name;
620 
621 	periph_name = p_drv->driver_name;
622 	snprintf(pathbuf, sizeof(pathbuf), "scbus%d", pathid);
623 	unit = 0;
624 	i = 0;
625 	dname = periph_name;
626 
627 	for (wired = false; resource_find_dev(&i, dname, &dunit, NULL, NULL) == 0;
628 	     wired = false) {
629 		if (resource_string_value(dname, dunit, "at", &strval) == 0) {
630 			if (strcmp(strval, pathbuf) != 0)
631 				continue;
632 			wired = true;
633 		}
634 		if (resource_int_value(dname, dunit, "target", &val) == 0) {
635 			if (val != target)
636 				continue;
637 			wired = true;
638 		}
639 		if (resource_int_value(dname, dunit, "lun", &val) == 0) {
640 			if (val != lun)
641 				continue;
642 			wired = true;
643 		}
644 		if (resource_string_value(dname, dunit, "sn", &strval) == 0) {
645 			if (sn == NULL || strcmp(strval, sn) != 0)
646 				continue;
647 			wired = true;
648 		}
649 		if (wired) {
650 			unit = dunit;
651 			break;
652 		}
653 	}
654 
655 	/*
656 	 * Either start from 0 looking for the next unit or from
657 	 * the unit number given in the resource config.  This way,
658 	 * if we have wildcard matches, we don't return the same
659 	 * unit number twice.
660 	 */
661 	unit = camperiphnextunit(p_drv, unit, wired, pathid, target, lun);
662 
663 	return (unit);
664 }
665 
666 void
667 cam_periph_invalidate(struct cam_periph *periph)
668 {
669 
670 	cam_periph_assert(periph, MA_OWNED);
671 	/*
672 	 * We only tear down the device the first time a peripheral is
673 	 * invalidated.
674 	 */
675 	if ((periph->flags & CAM_PERIPH_INVALID) != 0)
676 		return;
677 
678 	CAM_DEBUG(periph->path, CAM_DEBUG_INFO, ("Periph invalidated\n"));
679 	if ((periph->flags & CAM_PERIPH_ANNOUNCED) && !rebooting) {
680 		struct sbuf sb;
681 		char buffer[160];
682 
683 		sbuf_new(&sb, buffer, 160, SBUF_FIXEDLEN);
684 		xpt_denounce_periph_sbuf(periph, &sb);
685 		sbuf_finish(&sb);
686 		sbuf_putbuf(&sb);
687 	}
688 	periph->flags |= CAM_PERIPH_INVALID;
689 	periph->flags &= ~CAM_PERIPH_NEW_DEV_FOUND;
690 	if (periph->periph_oninval != NULL)
691 		periph->periph_oninval(periph);
692 	cam_periph_release_locked(periph);
693 }
694 
695 static void
696 camperiphfree(struct cam_periph *periph)
697 {
698 	struct periph_driver **p_drv;
699 	struct periph_driver *drv;
700 
701 	cam_periph_assert(periph, MA_OWNED);
702 	KASSERT(periph->periph_allocating == 0, ("%s%d: freed while allocating",
703 	    periph->periph_name, periph->unit_number));
704 	for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) {
705 		if (strcmp((*p_drv)->driver_name, periph->periph_name) == 0)
706 			break;
707 	}
708 	if (*p_drv == NULL) {
709 		printf("camperiphfree: attempt to free non-existant periph\n");
710 		return;
711 	}
712 	/*
713 	 * Cache a pointer to the periph_driver structure.  If a
714 	 * periph_driver is added or removed from the array (see
715 	 * periphdriver_register()) while we drop the toplogy lock
716 	 * below, p_drv may change.  This doesn't protect against this
717 	 * particular periph_driver going away.  That will require full
718 	 * reference counting in the periph_driver infrastructure.
719 	 */
720 	drv = *p_drv;
721 
722 	/*
723 	 * We need to set this flag before dropping the topology lock, to
724 	 * let anyone who is traversing the list that this peripheral is
725 	 * about to be freed, and there will be no more reference count
726 	 * checks.
727 	 */
728 	periph->flags |= CAM_PERIPH_FREE;
729 
730 	/*
731 	 * The peripheral destructor semantics dictate calling with only the
732 	 * SIM mutex held.  Since it might sleep, it should not be called
733 	 * with the topology lock held.
734 	 */
735 	xpt_unlock_buses();
736 
737 	/*
738 	 * We need to call the peripheral destructor prior to removing the
739 	 * peripheral from the list.  Otherwise, we risk running into a
740 	 * scenario where the peripheral unit number may get reused
741 	 * (because it has been removed from the list), but some resources
742 	 * used by the peripheral are still hanging around.  In particular,
743 	 * the devfs nodes used by some peripherals like the pass(4) driver
744 	 * aren't fully cleaned up until the destructor is run.  If the
745 	 * unit number is reused before the devfs instance is fully gone,
746 	 * devfs will panic.
747 	 */
748 	if (periph->periph_dtor != NULL)
749 		periph->periph_dtor(periph);
750 
751 	/*
752 	 * The peripheral list is protected by the topology lock. We have to
753 	 * remove the periph from the drv list before we call deferred_ac. The
754 	 * AC_FOUND_DEVICE callback won't create a new periph if it's still there.
755 	 */
756 	xpt_lock_buses();
757 
758 	TAILQ_REMOVE(&drv->units, periph, unit_links);
759 	drv->generation++;
760 
761 	xpt_remove_periph(periph);
762 
763 	xpt_unlock_buses();
764 	if ((periph->flags & CAM_PERIPH_ANNOUNCED) && !rebooting)
765 		xpt_print(periph->path, "Periph destroyed\n");
766 	else
767 		CAM_DEBUG(periph->path, CAM_DEBUG_INFO, ("Periph destroyed\n"));
768 
769 	if (periph->flags & CAM_PERIPH_NEW_DEV_FOUND) {
770 		union ccb ccb;
771 		void *arg;
772 
773 		memset(&ccb, 0, sizeof(ccb));
774 		switch (periph->deferred_ac) {
775 		case AC_FOUND_DEVICE:
776 			ccb.ccb_h.func_code = XPT_GDEV_TYPE;
777 			xpt_setup_ccb(&ccb.ccb_h, periph->path, CAM_PRIORITY_NORMAL);
778 			xpt_action(&ccb);
779 			arg = &ccb;
780 			break;
781 		case AC_PATH_REGISTERED:
782 			xpt_path_inq(&ccb.cpi, periph->path);
783 			arg = &ccb;
784 			break;
785 		default:
786 			arg = NULL;
787 			break;
788 		}
789 		periph->deferred_callback(NULL, periph->deferred_ac,
790 					  periph->path, arg);
791 	}
792 	xpt_free_path(periph->path);
793 	free(periph, M_CAMPERIPH);
794 	xpt_lock_buses();
795 }
796 
797 /*
798  * Map user virtual pointers into kernel virtual address space, so we can
799  * access the memory.  This is now a generic function that centralizes most
800  * of the sanity checks on the data flags, if any.
801  * This also only works for up to maxphys memory.  Since we use
802  * buffers to map stuff in and out, we're limited to the buffer size.
803  */
804 int
805 cam_periph_mapmem(union ccb *ccb, struct cam_periph_map_info *mapinfo,
806     u_int maxmap)
807 {
808 	int numbufs, i;
809 	uint8_t **data_ptrs[CAM_PERIPH_MAXMAPS];
810 	uint32_t lengths[CAM_PERIPH_MAXMAPS];
811 	uint32_t dirs[CAM_PERIPH_MAXMAPS];
812 
813 	bzero(mapinfo, sizeof(*mapinfo));
814 	if (maxmap == 0)
815 		maxmap = DFLTPHYS;	/* traditional default */
816 	else if (maxmap > maxphys)
817 		maxmap = maxphys;	/* for safety */
818 	switch(ccb->ccb_h.func_code) {
819 	case XPT_DEV_MATCH:
820 		if (ccb->cdm.match_buf_len == 0) {
821 			printf("cam_periph_mapmem: invalid match buffer "
822 			       "length 0\n");
823 			return(EINVAL);
824 		}
825 		if (ccb->cdm.pattern_buf_len > 0) {
826 			data_ptrs[0] = (uint8_t **)&ccb->cdm.patterns;
827 			lengths[0] = ccb->cdm.pattern_buf_len;
828 			dirs[0] = CAM_DIR_OUT;
829 			data_ptrs[1] = (uint8_t **)&ccb->cdm.matches;
830 			lengths[1] = ccb->cdm.match_buf_len;
831 			dirs[1] = CAM_DIR_IN;
832 			numbufs = 2;
833 		} else {
834 			data_ptrs[0] = (uint8_t **)&ccb->cdm.matches;
835 			lengths[0] = ccb->cdm.match_buf_len;
836 			dirs[0] = CAM_DIR_IN;
837 			numbufs = 1;
838 		}
839 		/*
840 		 * This request will not go to the hardware, no reason
841 		 * to be so strict. vmapbuf() is able to map up to maxphys.
842 		 */
843 		maxmap = maxphys;
844 		break;
845 	case XPT_SCSI_IO:
846 	case XPT_CONT_TARGET_IO:
847 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE)
848 			return(0);
849 		if ((ccb->ccb_h.flags & CAM_DATA_MASK) != CAM_DATA_VADDR)
850 			return (EINVAL);
851 		data_ptrs[0] = &ccb->csio.data_ptr;
852 		lengths[0] = ccb->csio.dxfer_len;
853 		dirs[0] = ccb->ccb_h.flags & CAM_DIR_MASK;
854 		numbufs = 1;
855 		break;
856 	case XPT_ATA_IO:
857 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE)
858 			return(0);
859 		if ((ccb->ccb_h.flags & CAM_DATA_MASK) != CAM_DATA_VADDR)
860 			return (EINVAL);
861 		data_ptrs[0] = &ccb->ataio.data_ptr;
862 		lengths[0] = ccb->ataio.dxfer_len;
863 		dirs[0] = ccb->ccb_h.flags & CAM_DIR_MASK;
864 		numbufs = 1;
865 		break;
866 	case XPT_MMC_IO:
867 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE)
868 			return(0);
869 		/* Two mappings: one for cmd->data and one for cmd->data->data */
870 		data_ptrs[0] = (unsigned char **)&ccb->mmcio.cmd.data;
871 		lengths[0] = sizeof(struct mmc_data *);
872 		dirs[0] = ccb->ccb_h.flags & CAM_DIR_MASK;
873 		data_ptrs[1] = (unsigned char **)&ccb->mmcio.cmd.data->data;
874 		lengths[1] = ccb->mmcio.cmd.data->len;
875 		dirs[1] = ccb->ccb_h.flags & CAM_DIR_MASK;
876 		numbufs = 2;
877 		break;
878 	case XPT_SMP_IO:
879 		data_ptrs[0] = &ccb->smpio.smp_request;
880 		lengths[0] = ccb->smpio.smp_request_len;
881 		dirs[0] = CAM_DIR_OUT;
882 		data_ptrs[1] = &ccb->smpio.smp_response;
883 		lengths[1] = ccb->smpio.smp_response_len;
884 		dirs[1] = CAM_DIR_IN;
885 		numbufs = 2;
886 		break;
887 	case XPT_NVME_IO:
888 	case XPT_NVME_ADMIN:
889 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE)
890 			return (0);
891 		if ((ccb->ccb_h.flags & CAM_DATA_MASK) != CAM_DATA_VADDR)
892 			return (EINVAL);
893 		data_ptrs[0] = &ccb->nvmeio.data_ptr;
894 		lengths[0] = ccb->nvmeio.dxfer_len;
895 		dirs[0] = ccb->ccb_h.flags & CAM_DIR_MASK;
896 		numbufs = 1;
897 		break;
898 	case XPT_DEV_ADVINFO:
899 		if (ccb->cdai.bufsiz == 0)
900 			return (0);
901 
902 		data_ptrs[0] = (uint8_t **)&ccb->cdai.buf;
903 		lengths[0] = ccb->cdai.bufsiz;
904 		dirs[0] = CAM_DIR_IN;
905 		numbufs = 1;
906 
907 		/*
908 		 * This request will not go to the hardware, no reason
909 		 * to be so strict. vmapbuf() is able to map up to maxphys.
910 		 */
911 		maxmap = maxphys;
912 		break;
913 	default:
914 		return(EINVAL);
915 		break; /* NOTREACHED */
916 	}
917 
918 	/*
919 	 * Check the transfer length and permissions first, so we don't
920 	 * have to unmap any previously mapped buffers.
921 	 */
922 	for (i = 0; i < numbufs; i++) {
923 		if (lengths[i] > maxmap) {
924 			printf("cam_periph_mapmem: attempt to map %lu bytes, "
925 			       "which is greater than %lu\n",
926 			       (long)(lengths[i]), (u_long)maxmap);
927 			return (E2BIG);
928 		}
929 	}
930 
931 	for (i = 0; i < numbufs; i++) {
932 		/* Save the user's data address. */
933 		mapinfo->orig[i] = *data_ptrs[i];
934 
935 		/*
936 		 * For small buffers use malloc+copyin/copyout instead of
937 		 * mapping to KVA to avoid expensive TLB shootdowns.  For
938 		 * small allocations malloc is backed by UMA, and so much
939 		 * cheaper on SMP systems.
940 		 */
941 		if (lengths[i] <= periph_mapmem_thresh &&
942 		    ccb->ccb_h.func_code != XPT_MMC_IO) {
943 			*data_ptrs[i] = malloc(lengths[i], M_CAMPERIPH,
944 			    M_WAITOK);
945 			if (dirs[i] != CAM_DIR_IN) {
946 				if (copyin(mapinfo->orig[i], *data_ptrs[i],
947 				    lengths[i]) != 0) {
948 					free(*data_ptrs[i], M_CAMPERIPH);
949 					*data_ptrs[i] = mapinfo->orig[i];
950 					goto fail;
951 				}
952 			} else
953 				bzero(*data_ptrs[i], lengths[i]);
954 			continue;
955 		}
956 
957 		/*
958 		 * Get the buffer.
959 		 */
960 		mapinfo->bp[i] = uma_zalloc(pbuf_zone, M_WAITOK);
961 
962 		/* set the direction */
963 		mapinfo->bp[i]->b_iocmd = (dirs[i] == CAM_DIR_OUT) ?
964 		    BIO_WRITE : BIO_READ;
965 
966 		/* Map the buffer into kernel memory. */
967 		if (vmapbuf(mapinfo->bp[i], *data_ptrs[i], lengths[i], 1) < 0) {
968 			uma_zfree(pbuf_zone, mapinfo->bp[i]);
969 			goto fail;
970 		}
971 
972 		/* set our pointer to the new mapped area */
973 		*data_ptrs[i] = mapinfo->bp[i]->b_data;
974 	}
975 
976 	/*
977 	 * Now that we've gotten this far, change ownership to the kernel
978 	 * of the buffers so that we don't run afoul of returning to user
979 	 * space with locks (on the buffer) held.
980 	 */
981 	for (i = 0; i < numbufs; i++) {
982 		if (mapinfo->bp[i])
983 			BUF_KERNPROC(mapinfo->bp[i]);
984 	}
985 
986 	mapinfo->num_bufs_used = numbufs;
987 	return(0);
988 
989 fail:
990 	for (i--; i >= 0; i--) {
991 		if (mapinfo->bp[i]) {
992 			vunmapbuf(mapinfo->bp[i]);
993 			uma_zfree(pbuf_zone, mapinfo->bp[i]);
994 		} else
995 			free(*data_ptrs[i], M_CAMPERIPH);
996 		*data_ptrs[i] = mapinfo->orig[i];
997 	}
998 	return(EACCES);
999 }
1000 
1001 /*
1002  * Unmap memory segments mapped into kernel virtual address space by
1003  * cam_periph_mapmem().
1004  */
1005 int
1006 cam_periph_unmapmem(union ccb *ccb, struct cam_periph_map_info *mapinfo)
1007 {
1008 	int error, numbufs, i;
1009 	uint8_t **data_ptrs[CAM_PERIPH_MAXMAPS];
1010 	uint32_t lengths[CAM_PERIPH_MAXMAPS];
1011 	uint32_t dirs[CAM_PERIPH_MAXMAPS];
1012 
1013 	if (mapinfo->num_bufs_used <= 0) {
1014 		/* nothing to free and the process wasn't held. */
1015 		return (0);
1016 	}
1017 
1018 	switch (ccb->ccb_h.func_code) {
1019 	case XPT_DEV_MATCH:
1020 		if (ccb->cdm.pattern_buf_len > 0) {
1021 			data_ptrs[0] = (uint8_t **)&ccb->cdm.patterns;
1022 			lengths[0] = ccb->cdm.pattern_buf_len;
1023 			dirs[0] = CAM_DIR_OUT;
1024 			data_ptrs[1] = (uint8_t **)&ccb->cdm.matches;
1025 			lengths[1] = ccb->cdm.match_buf_len;
1026 			dirs[1] = CAM_DIR_IN;
1027 			numbufs = 2;
1028 		} else {
1029 			data_ptrs[0] = (uint8_t **)&ccb->cdm.matches;
1030 			lengths[0] = ccb->cdm.match_buf_len;
1031 			dirs[0] = CAM_DIR_IN;
1032 			numbufs = 1;
1033 		}
1034 		break;
1035 	case XPT_SCSI_IO:
1036 	case XPT_CONT_TARGET_IO:
1037 		data_ptrs[0] = &ccb->csio.data_ptr;
1038 		lengths[0] = ccb->csio.dxfer_len;
1039 		dirs[0] = ccb->ccb_h.flags & CAM_DIR_MASK;
1040 		numbufs = 1;
1041 		break;
1042 	case XPT_ATA_IO:
1043 		data_ptrs[0] = &ccb->ataio.data_ptr;
1044 		lengths[0] = ccb->ataio.dxfer_len;
1045 		dirs[0] = ccb->ccb_h.flags & CAM_DIR_MASK;
1046 		numbufs = 1;
1047 		break;
1048 	case XPT_MMC_IO:
1049 		data_ptrs[0] = (uint8_t **)&ccb->mmcio.cmd.data;
1050 		lengths[0] = sizeof(struct mmc_data *);
1051 		dirs[0] = ccb->ccb_h.flags & CAM_DIR_MASK;
1052 		data_ptrs[1] = (uint8_t **)&ccb->mmcio.cmd.data->data;
1053 		lengths[1] = ccb->mmcio.cmd.data->len;
1054 		dirs[1] = ccb->ccb_h.flags & CAM_DIR_MASK;
1055 		numbufs = 2;
1056 		break;
1057 	case XPT_SMP_IO:
1058 		data_ptrs[0] = &ccb->smpio.smp_request;
1059 		lengths[0] = ccb->smpio.smp_request_len;
1060 		dirs[0] = CAM_DIR_OUT;
1061 		data_ptrs[1] = &ccb->smpio.smp_response;
1062 		lengths[1] = ccb->smpio.smp_response_len;
1063 		dirs[1] = CAM_DIR_IN;
1064 		numbufs = 2;
1065 		break;
1066 	case XPT_NVME_IO:
1067 	case XPT_NVME_ADMIN:
1068 		data_ptrs[0] = &ccb->nvmeio.data_ptr;
1069 		lengths[0] = ccb->nvmeio.dxfer_len;
1070 		dirs[0] = ccb->ccb_h.flags & CAM_DIR_MASK;
1071 		numbufs = 1;
1072 		break;
1073 	case XPT_DEV_ADVINFO:
1074 		data_ptrs[0] = (uint8_t **)&ccb->cdai.buf;
1075 		lengths[0] = ccb->cdai.bufsiz;
1076 		dirs[0] = CAM_DIR_IN;
1077 		numbufs = 1;
1078 		break;
1079 	default:
1080 		numbufs = 0;
1081 		break;
1082 	}
1083 
1084 	error = 0;
1085 	for (i = 0; i < numbufs; i++) {
1086 		if (mapinfo->bp[i]) {
1087 			/* unmap the buffer */
1088 			vunmapbuf(mapinfo->bp[i]);
1089 
1090 			/* release the buffer */
1091 			uma_zfree(pbuf_zone, mapinfo->bp[i]);
1092 		} else {
1093 			if (dirs[i] != CAM_DIR_OUT) {
1094 				int error1;
1095 
1096 				error1 = copyout(*data_ptrs[i], mapinfo->orig[i],
1097 				    lengths[i]);
1098 				if (error == 0)
1099 					error = error1;
1100 			}
1101 			free(*data_ptrs[i], M_CAMPERIPH);
1102 		}
1103 
1104 		/* Set the user's pointer back to the original value */
1105 		*data_ptrs[i] = mapinfo->orig[i];
1106 	}
1107 
1108 	return (error);
1109 }
1110 
1111 int
1112 cam_periph_ioctl(struct cam_periph *periph, u_long cmd, caddr_t addr,
1113 		 int (*error_routine)(union ccb *ccb,
1114 				      cam_flags camflags,
1115 				      uint32_t sense_flags))
1116 {
1117 	union ccb 	     *ccb;
1118 	int 		     error;
1119 	int		     found;
1120 
1121 	error = found = 0;
1122 
1123 	switch(cmd){
1124 	case CAMGETPASSTHRU_0x19:
1125 	case CAMGETPASSTHRU:
1126 		ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL);
1127 		xpt_setup_ccb(&ccb->ccb_h,
1128 			      ccb->ccb_h.path,
1129 			      CAM_PRIORITY_NORMAL);
1130 		ccb->ccb_h.func_code = XPT_GDEVLIST;
1131 
1132 		/*
1133 		 * Basically, the point of this is that we go through
1134 		 * getting the list of devices, until we find a passthrough
1135 		 * device.  In the current version of the CAM code, the
1136 		 * only way to determine what type of device we're dealing
1137 		 * with is by its name.
1138 		 */
1139 		while (found == 0) {
1140 			ccb->cgdl.index = 0;
1141 			ccb->cgdl.status = CAM_GDEVLIST_MORE_DEVS;
1142 			while (ccb->cgdl.status == CAM_GDEVLIST_MORE_DEVS) {
1143 				/* we want the next device in the list */
1144 				xpt_action(ccb);
1145 				if (strncmp(ccb->cgdl.periph_name,
1146 				    "pass", 4) == 0){
1147 					found = 1;
1148 					break;
1149 				}
1150 			}
1151 			if ((ccb->cgdl.status == CAM_GDEVLIST_LAST_DEVICE) &&
1152 			    (found == 0)) {
1153 				ccb->cgdl.periph_name[0] = '\0';
1154 				ccb->cgdl.unit_number = 0;
1155 				break;
1156 			}
1157 		}
1158 
1159 		/* copy the result back out */
1160 		bcopy(ccb, addr, sizeof(union ccb));
1161 
1162 		/* and release the ccb */
1163 		xpt_release_ccb(ccb);
1164 
1165 		break;
1166 	default:
1167 		error = ENOTTY;
1168 		break;
1169 	}
1170 	return(error);
1171 }
1172 
1173 static void
1174 cam_periph_done_panic(struct cam_periph *periph, union ccb *done_ccb)
1175 {
1176 
1177 	panic("%s: already done with ccb %p", __func__, done_ccb);
1178 }
1179 
1180 static void
1181 cam_periph_done(struct cam_periph *periph, union ccb *done_ccb)
1182 {
1183 
1184 	/* Caller will release the CCB */
1185 	xpt_path_assert(done_ccb->ccb_h.path, MA_OWNED);
1186 	done_ccb->ccb_h.cbfcnp = cam_periph_done_panic;
1187 	wakeup(&done_ccb->ccb_h.cbfcnp);
1188 }
1189 
1190 static void
1191 cam_periph_ccbwait(union ccb *ccb)
1192 {
1193 
1194 	if ((ccb->ccb_h.func_code & XPT_FC_QUEUED) != 0) {
1195 		while (ccb->ccb_h.cbfcnp != cam_periph_done_panic)
1196 			xpt_path_sleep(ccb->ccb_h.path, &ccb->ccb_h.cbfcnp,
1197 			    PRIBIO, "cbwait", 0);
1198 	}
1199 	KASSERT(ccb->ccb_h.pinfo.index == CAM_UNQUEUED_INDEX &&
1200 	    (ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG,
1201 	    ("%s: proceeding with incomplete ccb: ccb=%p, func_code=%#x, "
1202 	     "status=%#x, index=%d", __func__, ccb, ccb->ccb_h.func_code,
1203 	     ccb->ccb_h.status, ccb->ccb_h.pinfo.index));
1204 }
1205 
1206 /*
1207  * Dispatch a CCB and wait for it to complete.  If the CCB has set a
1208  * callback function (ccb->ccb_h.cbfcnp), it will be overwritten and lost.
1209  */
1210 int
1211 cam_periph_runccb(union ccb *ccb,
1212 		  int (*error_routine)(union ccb *ccb,
1213 				       cam_flags camflags,
1214 				       uint32_t sense_flags),
1215 		  cam_flags camflags, uint32_t sense_flags,
1216 		  struct devstat *ds)
1217 {
1218 	struct bintime *starttime;
1219 	struct bintime ltime;
1220 	int error;
1221 	bool must_poll;
1222 	uint32_t timeout = 1;
1223 
1224 	starttime = NULL;
1225 	xpt_path_assert(ccb->ccb_h.path, MA_OWNED);
1226 	KASSERT((ccb->ccb_h.flags & CAM_UNLOCKED) == 0,
1227 	    ("%s: ccb=%p, func_code=%#x, flags=%#x", __func__, ccb,
1228 	     ccb->ccb_h.func_code, ccb->ccb_h.flags));
1229 
1230 	/*
1231 	 * If the user has supplied a stats structure, and if we understand
1232 	 * this particular type of ccb, record the transaction start.
1233 	 */
1234 	if (ds != NULL &&
1235 	    (ccb->ccb_h.func_code == XPT_SCSI_IO ||
1236 	    ccb->ccb_h.func_code == XPT_ATA_IO ||
1237 	    ccb->ccb_h.func_code == XPT_NVME_IO)) {
1238 		starttime = &ltime;
1239 		binuptime(starttime);
1240 		devstat_start_transaction(ds, starttime);
1241 	}
1242 
1243 	/*
1244 	 * We must poll the I/O while we're dumping. The scheduler is normally
1245 	 * stopped for dumping, except when we call doadump from ddb. While the
1246 	 * scheduler is running in this case, we still need to poll the I/O to
1247 	 * avoid sleeping waiting for the ccb to complete.
1248 	 *
1249 	 * A panic triggered dump stops the scheduler, any callback from the
1250 	 * shutdown_post_sync event will run with the scheduler stopped, but
1251 	 * before we're officially dumping. To avoid hanging in adashutdown
1252 	 * initiated commands (or other similar situations), we have to test for
1253 	 * either dumping or SCHEDULER_STOPPED() here.
1254 	 *
1255 	 * To avoid locking problems, dumping/polling callers must call
1256 	 * without a periph lock held.
1257 	 */
1258 	must_poll = dumping || SCHEDULER_STOPPED();
1259 	ccb->ccb_h.cbfcnp = cam_periph_done;
1260 
1261 	/*
1262 	 * If we're polling, then we need to ensure that we have ample resources
1263 	 * in the periph.  cam_periph_error can reschedule the ccb by calling
1264 	 * xpt_action and returning ERESTART, so we have to effect the polling
1265 	 * in the do loop below.
1266 	 */
1267 	if (must_poll) {
1268 		if (cam_sim_pollable(ccb->ccb_h.path->bus->sim))
1269 			timeout = xpt_poll_setup(ccb);
1270 		else
1271 			timeout = 0;
1272 	}
1273 
1274 	if (timeout == 0) {
1275 		ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
1276 		error = EBUSY;
1277 	} else {
1278 		xpt_action(ccb);
1279 		do {
1280 			if (must_poll) {
1281 				xpt_pollwait(ccb, timeout);
1282 				timeout = ccb->ccb_h.timeout * 10;
1283 			} else {
1284 				cam_periph_ccbwait(ccb);
1285 			}
1286 			if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP)
1287 				error = 0;
1288 			else if (error_routine != NULL) {
1289 				/*
1290 				 * cbfcnp is modified by cam_periph_ccbwait so
1291 				 * reset it before we call the error routine
1292 				 * which may call xpt_done.
1293 				 */
1294 				ccb->ccb_h.cbfcnp = cam_periph_done;
1295 				error = (*error_routine)(ccb, camflags, sense_flags);
1296 			} else
1297 				error = 0;
1298 		} while (error == ERESTART);
1299 	}
1300 
1301 	if ((ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
1302 		cam_release_devq(ccb->ccb_h.path,
1303 				 /* relsim_flags */0,
1304 				 /* openings */0,
1305 				 /* timeout */0,
1306 				 /* getcount_only */ FALSE);
1307 		ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
1308 	}
1309 
1310 	if (ds != NULL) {
1311 		uint32_t bytes;
1312 		devstat_tag_type tag;
1313 		bool valid = true;
1314 
1315 		if (ccb->ccb_h.func_code == XPT_SCSI_IO) {
1316 			bytes = ccb->csio.dxfer_len - ccb->csio.resid;
1317 			tag = (devstat_tag_type)(ccb->csio.tag_action & 0x3);
1318 		} else if (ccb->ccb_h.func_code == XPT_ATA_IO) {
1319 			bytes = ccb->ataio.dxfer_len - ccb->ataio.resid;
1320 			tag = (devstat_tag_type)0;
1321 		} else if (ccb->ccb_h.func_code == XPT_NVME_IO) {
1322 			bytes = ccb->nvmeio.dxfer_len; /* NB: resid no possible */
1323 			tag = (devstat_tag_type)0;
1324 		} else {
1325 			valid = false;
1326 		}
1327 		if (valid)
1328 			devstat_end_transaction(ds, bytes, tag,
1329 			    ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE) ?
1330 			    DEVSTAT_NO_DATA : (ccb->ccb_h.flags & CAM_DIR_OUT) ?
1331 			    DEVSTAT_WRITE : DEVSTAT_READ, NULL, starttime);
1332 	}
1333 
1334 	return(error);
1335 }
1336 
1337 void
1338 cam_freeze_devq(struct cam_path *path)
1339 {
1340 	struct ccb_hdr ccb_h;
1341 
1342 	CAM_DEBUG(path, CAM_DEBUG_TRACE, ("cam_freeze_devq\n"));
1343 	memset(&ccb_h, 0, sizeof(ccb_h));
1344 	xpt_setup_ccb(&ccb_h, path, /*priority*/1);
1345 	ccb_h.func_code = XPT_NOOP;
1346 	ccb_h.flags = CAM_DEV_QFREEZE;
1347 	xpt_action((union ccb *)&ccb_h);
1348 }
1349 
1350 uint32_t
1351 cam_release_devq(struct cam_path *path, uint32_t relsim_flags,
1352 		 uint32_t openings, uint32_t arg,
1353 		 int getcount_only)
1354 {
1355 	struct ccb_relsim crs;
1356 
1357 	CAM_DEBUG(path, CAM_DEBUG_TRACE, ("cam_release_devq(%u, %u, %u, %d)\n",
1358 	    relsim_flags, openings, arg, getcount_only));
1359 	memset(&crs, 0, sizeof(crs));
1360 	xpt_setup_ccb(&crs.ccb_h, path, CAM_PRIORITY_NORMAL);
1361 	crs.ccb_h.func_code = XPT_REL_SIMQ;
1362 	crs.ccb_h.flags = getcount_only ? CAM_DEV_QFREEZE : 0;
1363 	crs.release_flags = relsim_flags;
1364 	crs.openings = openings;
1365 	crs.release_timeout = arg;
1366 	xpt_action((union ccb *)&crs);
1367 	return (crs.qfrozen_cnt);
1368 }
1369 
1370 #define saved_ccb_ptr ppriv_ptr0
1371 static void
1372 camperiphdone(struct cam_periph *periph, union ccb *done_ccb)
1373 {
1374 	union ccb      *saved_ccb;
1375 	cam_status	status;
1376 	struct scsi_start_stop_unit *scsi_cmd;
1377 	int		error = 0, error_code, sense_key, asc, ascq;
1378 	uint16_t	done_flags;
1379 
1380 	scsi_cmd = (struct scsi_start_stop_unit *)
1381 	    &done_ccb->csio.cdb_io.cdb_bytes;
1382 	status = done_ccb->ccb_h.status;
1383 
1384 	if ((status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
1385 		if (scsi_extract_sense_ccb(done_ccb,
1386 		    &error_code, &sense_key, &asc, &ascq)) {
1387 			/*
1388 			 * If the error is "invalid field in CDB",
1389 			 * and the load/eject flag is set, turn the
1390 			 * flag off and try again.  This is just in
1391 			 * case the drive in question barfs on the
1392 			 * load eject flag.  The CAM code should set
1393 			 * the load/eject flag by default for
1394 			 * removable media.
1395 			 */
1396 			if ((scsi_cmd->opcode == START_STOP_UNIT) &&
1397 			    ((scsi_cmd->how & SSS_LOEJ) != 0) &&
1398 			     (asc == 0x24) && (ascq == 0x00)) {
1399 				scsi_cmd->how &= ~SSS_LOEJ;
1400 				if (status & CAM_DEV_QFRZN) {
1401 					cam_release_devq(done_ccb->ccb_h.path,
1402 					    0, 0, 0, 0);
1403 					done_ccb->ccb_h.status &=
1404 					    ~CAM_DEV_QFRZN;
1405 				}
1406 				xpt_action(done_ccb);
1407 				goto out;
1408 			}
1409 		}
1410 		error = cam_periph_error(done_ccb, 0,
1411 		    SF_RETRY_UA | SF_NO_PRINT);
1412 		if (error == ERESTART)
1413 			goto out;
1414 		if (done_ccb->ccb_h.status & CAM_DEV_QFRZN) {
1415 			cam_release_devq(done_ccb->ccb_h.path, 0, 0, 0, 0);
1416 			done_ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
1417 		}
1418 	} else {
1419 		/*
1420 		 * If we have successfully taken a device from the not
1421 		 * ready to ready state, re-scan the device and re-get
1422 		 * the inquiry information.  Many devices (mostly disks)
1423 		 * don't properly report their inquiry information unless
1424 		 * they are spun up.
1425 		 */
1426 		if (scsi_cmd->opcode == START_STOP_UNIT)
1427 			xpt_async(AC_INQ_CHANGED, done_ccb->ccb_h.path, NULL);
1428 	}
1429 
1430 	/* If we tried long wait and still failed, remember that. */
1431 	if ((periph->flags & CAM_PERIPH_RECOVERY_WAIT) &&
1432 	    (done_ccb->csio.cdb_io.cdb_bytes[0] == TEST_UNIT_READY)) {
1433 		periph->flags &= ~CAM_PERIPH_RECOVERY_WAIT;
1434 		if (error != 0 && done_ccb->ccb_h.retry_count == 0)
1435 			periph->flags |= CAM_PERIPH_RECOVERY_WAIT_FAILED;
1436 	}
1437 
1438 	/*
1439 	 * After recovery action(s) completed, return to the original CCB.
1440 	 * If the recovery CCB has failed, considering its own possible
1441 	 * retries and recovery, assume we are back in state where we have
1442 	 * been originally, but without recovery hopes left.  In such case,
1443 	 * after the final attempt below, we cancel any further retries,
1444 	 * blocking by that also any new recovery attempts for this CCB,
1445 	 * and the result will be the final one returned to the CCB owher.
1446 	 */
1447 	saved_ccb = (union ccb *)done_ccb->ccb_h.saved_ccb_ptr;
1448 	KASSERT(saved_ccb->ccb_h.func_code == XPT_SCSI_IO,
1449 	    ("%s: saved_ccb func_code %#x != XPT_SCSI_IO",
1450 	     __func__, saved_ccb->ccb_h.func_code));
1451 	KASSERT(done_ccb->ccb_h.func_code == XPT_SCSI_IO,
1452 	    ("%s: done_ccb func_code %#x != XPT_SCSI_IO",
1453 	     __func__, done_ccb->ccb_h.func_code));
1454 	saved_ccb->ccb_h.periph_links = done_ccb->ccb_h.periph_links;
1455 	done_flags = done_ccb->ccb_h.alloc_flags;
1456 	bcopy(saved_ccb, done_ccb, sizeof(struct ccb_scsiio));
1457 	done_ccb->ccb_h.alloc_flags = done_flags;
1458 	xpt_free_ccb(saved_ccb);
1459 	if (done_ccb->ccb_h.cbfcnp != camperiphdone)
1460 		periph->flags &= ~CAM_PERIPH_RECOVERY_INPROG;
1461 	if (error != 0)
1462 		done_ccb->ccb_h.retry_count = 0;
1463 	xpt_action(done_ccb);
1464 
1465 out:
1466 	/* Drop freeze taken due to CAM_DEV_QFREEZE flag set. */
1467 	cam_release_devq(done_ccb->ccb_h.path, 0, 0, 0, 0);
1468 }
1469 
1470 /*
1471  * Generic Async Event handler.  Peripheral drivers usually
1472  * filter out the events that require personal attention,
1473  * and leave the rest to this function.
1474  */
1475 void
1476 cam_periph_async(struct cam_periph *periph, uint32_t code,
1477 		 struct cam_path *path, void *arg)
1478 {
1479 	switch (code) {
1480 	case AC_LOST_DEVICE:
1481 		cam_periph_invalidate(periph);
1482 		break;
1483 	default:
1484 		break;
1485 	}
1486 }
1487 
1488 void
1489 cam_periph_bus_settle(struct cam_periph *periph, u_int bus_settle)
1490 {
1491 	struct ccb_getdevstats cgds;
1492 
1493 	memset(&cgds, 0, sizeof(cgds));
1494 	xpt_setup_ccb(&cgds.ccb_h, periph->path, CAM_PRIORITY_NORMAL);
1495 	cgds.ccb_h.func_code = XPT_GDEV_STATS;
1496 	xpt_action((union ccb *)&cgds);
1497 	cam_periph_freeze_after_event(periph, &cgds.last_reset, bus_settle);
1498 }
1499 
1500 void
1501 cam_periph_freeze_after_event(struct cam_periph *periph,
1502 			      struct timeval* event_time, u_int duration_ms)
1503 {
1504 	struct timeval delta;
1505 	struct timeval duration_tv;
1506 
1507 	if (!timevalisset(event_time))
1508 		return;
1509 
1510 	microtime(&delta);
1511 	timevalsub(&delta, event_time);
1512 	duration_tv.tv_sec = duration_ms / 1000;
1513 	duration_tv.tv_usec = (duration_ms % 1000) * 1000;
1514 	if (timevalcmp(&delta, &duration_tv, <)) {
1515 		timevalsub(&duration_tv, &delta);
1516 
1517 		duration_ms = duration_tv.tv_sec * 1000;
1518 		duration_ms += duration_tv.tv_usec / 1000;
1519 		cam_freeze_devq(periph->path);
1520 		cam_release_devq(periph->path,
1521 				RELSIM_RELEASE_AFTER_TIMEOUT,
1522 				/*reduction*/0,
1523 				/*timeout*/duration_ms,
1524 				/*getcount_only*/0);
1525 	}
1526 
1527 }
1528 
1529 static int
1530 camperiphscsistatuserror(union ccb *ccb, union ccb **orig_ccb,
1531     cam_flags camflags, uint32_t sense_flags,
1532     int *openings, uint32_t *relsim_flags,
1533     uint32_t *timeout, uint32_t *action, const char **action_string)
1534 {
1535 	struct cam_periph *periph;
1536 	int error;
1537 
1538 	switch (ccb->csio.scsi_status) {
1539 	case SCSI_STATUS_OK:
1540 	case SCSI_STATUS_COND_MET:
1541 	case SCSI_STATUS_INTERMED:
1542 	case SCSI_STATUS_INTERMED_COND_MET:
1543 		error = 0;
1544 		break;
1545 	case SCSI_STATUS_CMD_TERMINATED:
1546 	case SCSI_STATUS_CHECK_COND:
1547 		error = camperiphscsisenseerror(ccb, orig_ccb,
1548 					        camflags,
1549 					        sense_flags,
1550 					        openings,
1551 					        relsim_flags,
1552 					        timeout,
1553 					        action,
1554 					        action_string);
1555 		break;
1556 	case SCSI_STATUS_QUEUE_FULL:
1557 	{
1558 		/* no decrement */
1559 		struct ccb_getdevstats cgds;
1560 
1561 		/*
1562 		 * First off, find out what the current
1563 		 * transaction counts are.
1564 		 */
1565 		memset(&cgds, 0, sizeof(cgds));
1566 		xpt_setup_ccb(&cgds.ccb_h,
1567 			      ccb->ccb_h.path,
1568 			      CAM_PRIORITY_NORMAL);
1569 		cgds.ccb_h.func_code = XPT_GDEV_STATS;
1570 		xpt_action((union ccb *)&cgds);
1571 
1572 		/*
1573 		 * If we were the only transaction active, treat
1574 		 * the QUEUE FULL as if it were a BUSY condition.
1575 		 */
1576 		if (cgds.dev_active != 0) {
1577 			int total_openings;
1578 
1579 			/*
1580 		 	 * Reduce the number of openings to
1581 			 * be 1 less than the amount it took
1582 			 * to get a queue full bounded by the
1583 			 * minimum allowed tag count for this
1584 			 * device.
1585 		 	 */
1586 			total_openings = cgds.dev_active + cgds.dev_openings;
1587 			*openings = cgds.dev_active;
1588 			if (*openings < cgds.mintags)
1589 				*openings = cgds.mintags;
1590 			if (*openings < total_openings)
1591 				*relsim_flags = RELSIM_ADJUST_OPENINGS;
1592 			else {
1593 				/*
1594 				 * Some devices report queue full for
1595 				 * temporary resource shortages.  For
1596 				 * this reason, we allow a minimum
1597 				 * tag count to be entered via a
1598 				 * quirk entry to prevent the queue
1599 				 * count on these devices from falling
1600 				 * to a pessimisticly low value.  We
1601 				 * still wait for the next successful
1602 				 * completion, however, before queueing
1603 				 * more transactions to the device.
1604 				 */
1605 				*relsim_flags = RELSIM_RELEASE_AFTER_CMDCMPLT;
1606 			}
1607 			*timeout = 0;
1608 			error = ERESTART;
1609 			*action &= ~SSQ_PRINT_SENSE;
1610 			break;
1611 		}
1612 		/* FALLTHROUGH */
1613 	}
1614 	case SCSI_STATUS_BUSY:
1615 		/*
1616 		 * Restart the queue after either another
1617 		 * command completes or a 1 second timeout.
1618 		 */
1619 		periph = xpt_path_periph(ccb->ccb_h.path);
1620 		if (periph->flags & CAM_PERIPH_INVALID) {
1621 			error = ENXIO;
1622 			*action_string = "Periph was invalidated";
1623 		} else if ((sense_flags & SF_RETRY_BUSY) != 0 ||
1624 		    ccb->ccb_h.retry_count > 0) {
1625 			if ((sense_flags & SF_RETRY_BUSY) == 0)
1626 				ccb->ccb_h.retry_count--;
1627 			error = ERESTART;
1628 			*relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT
1629 				      | RELSIM_RELEASE_AFTER_CMDCMPLT;
1630 			*timeout = 1000;
1631 		} else {
1632 			error = EIO;
1633 			*action_string = "Retries exhausted";
1634 		}
1635 		break;
1636 	case SCSI_STATUS_RESERV_CONFLICT:
1637 	default:
1638 		error = EIO;
1639 		break;
1640 	}
1641 	return (error);
1642 }
1643 
1644 static int
1645 camperiphscsisenseerror(union ccb *ccb, union ccb **orig,
1646     cam_flags camflags, uint32_t sense_flags,
1647     int *openings, uint32_t *relsim_flags,
1648     uint32_t *timeout, uint32_t *action, const char **action_string)
1649 {
1650 	struct cam_periph *periph;
1651 	union ccb *orig_ccb = ccb;
1652 	int error, recoveryccb;
1653 	uint16_t flags;
1654 
1655 #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING)
1656 	if (ccb->ccb_h.func_code == XPT_SCSI_IO && ccb->csio.bio != NULL)
1657 		biotrack(ccb->csio.bio, __func__);
1658 #endif
1659 
1660 	periph = xpt_path_periph(ccb->ccb_h.path);
1661 	recoveryccb = (ccb->ccb_h.cbfcnp == camperiphdone);
1662 	if ((periph->flags & CAM_PERIPH_RECOVERY_INPROG) && !recoveryccb) {
1663 		/*
1664 		 * If error recovery is already in progress, don't attempt
1665 		 * to process this error, but requeue it unconditionally
1666 		 * and attempt to process it once error recovery has
1667 		 * completed.  This failed command is probably related to
1668 		 * the error that caused the currently active error recovery
1669 		 * action so our  current recovery efforts should also
1670 		 * address this command.  Be aware that the error recovery
1671 		 * code assumes that only one recovery action is in progress
1672 		 * on a particular peripheral instance at any given time
1673 		 * (e.g. only one saved CCB for error recovery) so it is
1674 		 * imperitive that we don't violate this assumption.
1675 		 */
1676 		error = ERESTART;
1677 		*action &= ~SSQ_PRINT_SENSE;
1678 	} else {
1679 		scsi_sense_action err_action;
1680 		struct ccb_getdev cgd;
1681 
1682 		/*
1683 		 * Grab the inquiry data for this device.
1684 		 */
1685 		memset(&cgd, 0, sizeof(cgd));
1686 		xpt_setup_ccb(&cgd.ccb_h, ccb->ccb_h.path, CAM_PRIORITY_NORMAL);
1687 		cgd.ccb_h.func_code = XPT_GDEV_TYPE;
1688 		xpt_action((union ccb *)&cgd);
1689 
1690 		err_action = scsi_error_action(&ccb->csio, &cgd.inq_data,
1691 		    sense_flags);
1692 		error = err_action & SS_ERRMASK;
1693 
1694 		/*
1695 		 * Do not autostart sequential access devices
1696 		 * to avoid unexpected tape loading.
1697 		 */
1698 		if ((err_action & SS_MASK) == SS_START &&
1699 		    SID_TYPE(&cgd.inq_data) == T_SEQUENTIAL) {
1700 			*action_string = "Will not autostart a "
1701 			    "sequential access device";
1702 			goto sense_error_done;
1703 		}
1704 
1705 		/*
1706 		 * Avoid recovery recursion if recovery action is the same.
1707 		 */
1708 		if ((err_action & SS_MASK) >= SS_START && recoveryccb) {
1709 			if (((err_action & SS_MASK) == SS_START &&
1710 			     ccb->csio.cdb_io.cdb_bytes[0] == START_STOP_UNIT) ||
1711 			    ((err_action & SS_MASK) == SS_TUR &&
1712 			     (ccb->csio.cdb_io.cdb_bytes[0] == TEST_UNIT_READY))) {
1713 				err_action = SS_RETRY|SSQ_DECREMENT_COUNT|EIO;
1714 				*relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT;
1715 				*timeout = 500;
1716 			}
1717 		}
1718 
1719 		/*
1720 		 * If the recovery action will consume a retry,
1721 		 * make sure we actually have retries available.
1722 		 */
1723 		if ((err_action & SSQ_DECREMENT_COUNT) != 0) {
1724 		 	if (ccb->ccb_h.retry_count > 0 &&
1725 			    (periph->flags & CAM_PERIPH_INVALID) == 0)
1726 		 		ccb->ccb_h.retry_count--;
1727 			else {
1728 				*action_string = "Retries exhausted";
1729 				goto sense_error_done;
1730 			}
1731 		}
1732 
1733 		if ((err_action & SS_MASK) >= SS_START) {
1734 			/*
1735 			 * Do common portions of commands that
1736 			 * use recovery CCBs.
1737 			 */
1738 			orig_ccb = xpt_alloc_ccb_nowait();
1739 			if (orig_ccb == NULL) {
1740 				*action_string = "Can't allocate recovery CCB";
1741 				goto sense_error_done;
1742 			}
1743 			/*
1744 			 * Clear freeze flag for original request here, as
1745 			 * this freeze will be dropped as part of ERESTART.
1746 			 */
1747 			ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
1748 
1749 			KASSERT(ccb->ccb_h.func_code == XPT_SCSI_IO,
1750 			    ("%s: ccb func_code %#x != XPT_SCSI_IO",
1751 			     __func__, ccb->ccb_h.func_code));
1752 			flags = orig_ccb->ccb_h.alloc_flags;
1753 			bcopy(ccb, orig_ccb, sizeof(struct ccb_scsiio));
1754 			orig_ccb->ccb_h.alloc_flags = flags;
1755 		}
1756 
1757 		switch (err_action & SS_MASK) {
1758 		case SS_NOP:
1759 			*action_string = "No recovery action needed";
1760 			error = 0;
1761 			break;
1762 		case SS_RETRY:
1763 			*action_string = "Retrying command (per sense data)";
1764 			error = ERESTART;
1765 			break;
1766 		case SS_FAIL:
1767 			*action_string = "Unretryable error";
1768 			break;
1769 		case SS_START:
1770 		{
1771 			int le;
1772 
1773 			/*
1774 			 * Send a start unit command to the device, and
1775 			 * then retry the command.
1776 			 */
1777 			*action_string = "Attempting to start unit";
1778 			periph->flags |= CAM_PERIPH_RECOVERY_INPROG;
1779 
1780 			/*
1781 			 * Check for removable media and set
1782 			 * load/eject flag appropriately.
1783 			 */
1784 			if (SID_IS_REMOVABLE(&cgd.inq_data))
1785 				le = TRUE;
1786 			else
1787 				le = FALSE;
1788 
1789 			scsi_start_stop(&ccb->csio,
1790 					/*retries*/1,
1791 					camperiphdone,
1792 					MSG_SIMPLE_Q_TAG,
1793 					/*start*/TRUE,
1794 					/*load/eject*/le,
1795 					/*immediate*/FALSE,
1796 					SSD_FULL_SIZE,
1797 					/*timeout*/50000);
1798 			break;
1799 		}
1800 		case SS_TUR:
1801 		{
1802 			/*
1803 			 * Send a Test Unit Ready to the device.
1804 			 * If the 'many' flag is set, we send 120
1805 			 * test unit ready commands, one every half
1806 			 * second.  Otherwise, we just send one TUR.
1807 			 * We only want to do this if the retry
1808 			 * count has not been exhausted.
1809 			 */
1810 			int retries;
1811 
1812 			if ((err_action & SSQ_MANY) != 0 && (periph->flags &
1813 			     CAM_PERIPH_RECOVERY_WAIT_FAILED) == 0) {
1814 				periph->flags |= CAM_PERIPH_RECOVERY_WAIT;
1815 				*action_string = "Polling device for readiness";
1816 				retries = 120;
1817 			} else {
1818 				*action_string = "Testing device for readiness";
1819 				retries = 1;
1820 			}
1821 			periph->flags |= CAM_PERIPH_RECOVERY_INPROG;
1822 			scsi_test_unit_ready(&ccb->csio,
1823 					     retries,
1824 					     camperiphdone,
1825 					     MSG_SIMPLE_Q_TAG,
1826 					     SSD_FULL_SIZE,
1827 					     /*timeout*/5000);
1828 
1829 			/*
1830 			 * Accomplish our 500ms delay by deferring
1831 			 * the release of our device queue appropriately.
1832 			 */
1833 			*relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT;
1834 			*timeout = 500;
1835 			break;
1836 		}
1837 		default:
1838 			panic("Unhandled error action %x", err_action);
1839 		}
1840 
1841 		if ((err_action & SS_MASK) >= SS_START) {
1842 			/*
1843 			 * Drop the priority, so that the recovery
1844 			 * CCB is the first to execute.  Freeze the queue
1845 			 * after this command is sent so that we can
1846 			 * restore the old csio and have it queued in
1847 			 * the proper order before we release normal
1848 			 * transactions to the device.
1849 			 */
1850 			ccb->ccb_h.pinfo.priority--;
1851 			ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
1852 			ccb->ccb_h.saved_ccb_ptr = orig_ccb;
1853 			error = ERESTART;
1854 			*orig = orig_ccb;
1855 		}
1856 
1857 sense_error_done:
1858 		*action = err_action;
1859 	}
1860 	return (error);
1861 }
1862 
1863 /*
1864  * Generic error handler.  Peripheral drivers usually filter
1865  * out the errors that they handle in a unique manner, then
1866  * call this function.
1867  */
1868 int
1869 cam_periph_error(union ccb *ccb, cam_flags camflags,
1870 		 uint32_t sense_flags)
1871 {
1872 	struct cam_path *newpath;
1873 	union ccb  *orig_ccb, *scan_ccb;
1874 	struct cam_periph *periph;
1875 	const char *action_string;
1876 	cam_status  status;
1877 	int	    frozen, error, openings, devctl_err;
1878 	uint32_t   action, relsim_flags, timeout;
1879 
1880 	action = SSQ_PRINT_SENSE;
1881 	periph = xpt_path_periph(ccb->ccb_h.path);
1882 	action_string = NULL;
1883 	status = ccb->ccb_h.status;
1884 	frozen = (status & CAM_DEV_QFRZN) != 0;
1885 	status &= CAM_STATUS_MASK;
1886 	devctl_err = openings = relsim_flags = timeout = 0;
1887 	orig_ccb = ccb;
1888 
1889 	/* Filter the errors that should be reported via devctl */
1890 	switch (ccb->ccb_h.status & CAM_STATUS_MASK) {
1891 	case CAM_CMD_TIMEOUT:
1892 	case CAM_REQ_ABORTED:
1893 	case CAM_REQ_CMP_ERR:
1894 	case CAM_REQ_TERMIO:
1895 	case CAM_UNREC_HBA_ERROR:
1896 	case CAM_DATA_RUN_ERR:
1897 	case CAM_SCSI_STATUS_ERROR:
1898 	case CAM_ATA_STATUS_ERROR:
1899 	case CAM_SMP_STATUS_ERROR:
1900 	case CAM_DEV_NOT_THERE:
1901 	case CAM_NVME_STATUS_ERROR:
1902 		devctl_err++;
1903 		break;
1904 	default:
1905 		break;
1906 	}
1907 
1908 	switch (status) {
1909 	case CAM_REQ_CMP:
1910 		error = 0;
1911 		action &= ~SSQ_PRINT_SENSE;
1912 		break;
1913 	case CAM_SCSI_STATUS_ERROR:
1914 		error = camperiphscsistatuserror(ccb, &orig_ccb,
1915 		    camflags, sense_flags, &openings, &relsim_flags,
1916 		    &timeout, &action, &action_string);
1917 		break;
1918 	case CAM_AUTOSENSE_FAIL:
1919 		error = EIO;	/* we have to kill the command */
1920 		break;
1921 	case CAM_UA_ABORT:
1922 	case CAM_UA_TERMIO:
1923 	case CAM_MSG_REJECT_REC:
1924 		/* XXX Don't know that these are correct */
1925 		error = EIO;
1926 		break;
1927 	case CAM_SEL_TIMEOUT:
1928 		if ((camflags & CAM_RETRY_SELTO) != 0) {
1929 			if (ccb->ccb_h.retry_count > 0 &&
1930 			    (periph->flags & CAM_PERIPH_INVALID) == 0) {
1931 				ccb->ccb_h.retry_count--;
1932 				error = ERESTART;
1933 
1934 				/*
1935 				 * Wait a bit to give the device
1936 				 * time to recover before we try again.
1937 				 */
1938 				relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT;
1939 				timeout = periph_selto_delay;
1940 				break;
1941 			}
1942 			action_string = "Retries exhausted";
1943 		}
1944 		/* FALLTHROUGH */
1945 	case CAM_DEV_NOT_THERE:
1946 		error = ENXIO;
1947 		action = SSQ_LOST;
1948 		break;
1949 	case CAM_REQ_INVALID:
1950 	case CAM_PATH_INVALID:
1951 	case CAM_NO_HBA:
1952 	case CAM_PROVIDE_FAIL:
1953 	case CAM_REQ_TOO_BIG:
1954 	case CAM_LUN_INVALID:
1955 	case CAM_TID_INVALID:
1956 	case CAM_FUNC_NOTAVAIL:
1957 		error = EINVAL;
1958 		break;
1959 	case CAM_SCSI_BUS_RESET:
1960 	case CAM_BDR_SENT:
1961 		/*
1962 		 * Commands that repeatedly timeout and cause these
1963 		 * kinds of error recovery actions, should return
1964 		 * CAM_CMD_TIMEOUT, which allows us to safely assume
1965 		 * that this command was an innocent bystander to
1966 		 * these events and should be unconditionally
1967 		 * retried.
1968 		 */
1969 	case CAM_REQUEUE_REQ:
1970 		/* Unconditional requeue if device is still there */
1971 		if (periph->flags & CAM_PERIPH_INVALID) {
1972 			action_string = "Periph was invalidated";
1973 			error = ENXIO;
1974 		} else if (sense_flags & SF_NO_RETRY) {
1975 			error = EIO;
1976 			action_string = "Retry was blocked";
1977 		} else {
1978 			error = ERESTART;
1979 			action &= ~SSQ_PRINT_SENSE;
1980 		}
1981 		break;
1982 	case CAM_RESRC_UNAVAIL:
1983 		/* Wait a bit for the resource shortage to abate. */
1984 		timeout = periph_noresrc_delay;
1985 		/* FALLTHROUGH */
1986 	case CAM_BUSY:
1987 		if (timeout == 0) {
1988 			/* Wait a bit for the busy condition to abate. */
1989 			timeout = periph_busy_delay;
1990 		}
1991 		relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT;
1992 		/* FALLTHROUGH */
1993 	case CAM_ATA_STATUS_ERROR:
1994 	case CAM_NVME_STATUS_ERROR:
1995 	case CAM_SMP_STATUS_ERROR:
1996 	case CAM_REQ_CMP_ERR:
1997 	case CAM_CMD_TIMEOUT:
1998 	case CAM_UNEXP_BUSFREE:
1999 	case CAM_UNCOR_PARITY:
2000 	case CAM_DATA_RUN_ERR:
2001 	default:
2002 		if (periph->flags & CAM_PERIPH_INVALID) {
2003 			error = ENXIO;
2004 			action_string = "Periph was invalidated";
2005 		} else if (ccb->ccb_h.retry_count == 0) {
2006 			error = EIO;
2007 			action_string = "Retries exhausted";
2008 		} else if (sense_flags & SF_NO_RETRY) {
2009 			error = EIO;
2010 			action_string = "Retry was blocked";
2011 		} else {
2012 			ccb->ccb_h.retry_count--;
2013 			error = ERESTART;
2014 		}
2015 		break;
2016 	}
2017 
2018 	if ((sense_flags & SF_PRINT_ALWAYS) ||
2019 	    CAM_DEBUGGED(ccb->ccb_h.path, CAM_DEBUG_INFO))
2020 		action |= SSQ_PRINT_SENSE;
2021 	else if (sense_flags & SF_NO_PRINT)
2022 		action &= ~SSQ_PRINT_SENSE;
2023 	if ((action & SSQ_PRINT_SENSE) != 0)
2024 		cam_error_print(orig_ccb, CAM_ESF_ALL, CAM_EPF_ALL);
2025 	if (error != 0 && (action & SSQ_PRINT_SENSE) != 0) {
2026 		if (error != ERESTART) {
2027 			if (action_string == NULL)
2028 				action_string = "Unretryable error";
2029 			xpt_print(ccb->ccb_h.path, "Error %d, %s\n",
2030 			    error, action_string);
2031 		} else if (action_string != NULL)
2032 			xpt_print(ccb->ccb_h.path, "%s\n", action_string);
2033 		else {
2034 			xpt_print(ccb->ccb_h.path,
2035 			    "Retrying command, %d more tries remain\n",
2036 			    ccb->ccb_h.retry_count);
2037 		}
2038 	}
2039 
2040 	if (devctl_err && (error != 0 || (action & SSQ_PRINT_SENSE) != 0))
2041 		cam_periph_devctl_notify(orig_ccb);
2042 
2043 	if ((action & SSQ_LOST) != 0) {
2044 		lun_id_t lun_id;
2045 
2046 		/*
2047 		 * For a selection timeout, we consider all of the LUNs on
2048 		 * the target to be gone.  If the status is CAM_DEV_NOT_THERE,
2049 		 * then we only get rid of the device(s) specified by the
2050 		 * path in the original CCB.
2051 		 */
2052 		if (status == CAM_SEL_TIMEOUT)
2053 			lun_id = CAM_LUN_WILDCARD;
2054 		else
2055 			lun_id = xpt_path_lun_id(ccb->ccb_h.path);
2056 
2057 		/* Should we do more if we can't create the path?? */
2058 		if (xpt_create_path(&newpath, periph,
2059 				    xpt_path_path_id(ccb->ccb_h.path),
2060 				    xpt_path_target_id(ccb->ccb_h.path),
2061 				    lun_id) == CAM_REQ_CMP) {
2062 			/*
2063 			 * Let peripheral drivers know that this
2064 			 * device has gone away.
2065 			 */
2066 			xpt_async(AC_LOST_DEVICE, newpath, NULL);
2067 			xpt_free_path(newpath);
2068 		}
2069 	}
2070 
2071 	/* Broadcast UNIT ATTENTIONs to all periphs. */
2072 	if ((action & SSQ_UA) != 0)
2073 		xpt_async(AC_UNIT_ATTENTION, orig_ccb->ccb_h.path, orig_ccb);
2074 
2075 	/* Rescan target on "Reported LUNs data has changed" */
2076 	if ((action & SSQ_RESCAN) != 0) {
2077 		if (xpt_create_path(&newpath, NULL,
2078 				    xpt_path_path_id(ccb->ccb_h.path),
2079 				    xpt_path_target_id(ccb->ccb_h.path),
2080 				    CAM_LUN_WILDCARD) == CAM_REQ_CMP) {
2081 			scan_ccb = xpt_alloc_ccb_nowait();
2082 			if (scan_ccb != NULL) {
2083 				scan_ccb->ccb_h.path = newpath;
2084 				scan_ccb->ccb_h.func_code = XPT_SCAN_TGT;
2085 				scan_ccb->crcn.flags = 0;
2086 				xpt_rescan(scan_ccb);
2087 			} else {
2088 				xpt_print(newpath,
2089 				    "Can't allocate CCB to rescan target\n");
2090 				xpt_free_path(newpath);
2091 			}
2092 		}
2093 	}
2094 
2095 	/* Attempt a retry */
2096 	if (error == ERESTART || error == 0) {
2097 		if (frozen != 0)
2098 			ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
2099 		if (error == ERESTART)
2100 			xpt_action(ccb);
2101 		if (frozen != 0)
2102 			cam_release_devq(ccb->ccb_h.path,
2103 					 relsim_flags,
2104 					 openings,
2105 					 timeout,
2106 					 /*getcount_only*/0);
2107 	}
2108 
2109 	return (error);
2110 }
2111 
2112 #define CAM_PERIPH_DEVD_MSG_SIZE	256
2113 
2114 static void
2115 cam_periph_devctl_notify(union ccb *ccb)
2116 {
2117 	struct cam_periph *periph;
2118 	struct ccb_getdev *cgd;
2119 	struct sbuf sb;
2120 	int serr, sk, asc, ascq;
2121 	char *sbmsg, *type;
2122 
2123 	sbmsg = malloc(CAM_PERIPH_DEVD_MSG_SIZE, M_CAMPERIPH, M_NOWAIT);
2124 	if (sbmsg == NULL)
2125 		return;
2126 
2127 	sbuf_new(&sb, sbmsg, CAM_PERIPH_DEVD_MSG_SIZE, SBUF_FIXEDLEN);
2128 
2129 	periph = xpt_path_periph(ccb->ccb_h.path);
2130 	sbuf_printf(&sb, "device=%s%d ", periph->periph_name,
2131 	    periph->unit_number);
2132 
2133 	sbuf_cat(&sb, "serial=\"");
2134 	if ((cgd = (struct ccb_getdev *)xpt_alloc_ccb_nowait()) != NULL) {
2135 		xpt_setup_ccb(&cgd->ccb_h, ccb->ccb_h.path,
2136 		    CAM_PRIORITY_NORMAL);
2137 		cgd->ccb_h.func_code = XPT_GDEV_TYPE;
2138 		xpt_action((union ccb *)cgd);
2139 
2140 		if (cgd->ccb_h.status == CAM_REQ_CMP)
2141 			sbuf_bcat(&sb, cgd->serial_num, cgd->serial_num_len);
2142 		xpt_free_ccb((union ccb *)cgd);
2143 	}
2144 	sbuf_cat(&sb, "\" ");
2145 	sbuf_printf(&sb, "cam_status=\"0x%x\" ", ccb->ccb_h.status);
2146 
2147 	switch (ccb->ccb_h.status & CAM_STATUS_MASK) {
2148 	case CAM_CMD_TIMEOUT:
2149 		sbuf_printf(&sb, "timeout=%d ", ccb->ccb_h.timeout);
2150 		type = "timeout";
2151 		break;
2152 	case CAM_SCSI_STATUS_ERROR:
2153 		sbuf_printf(&sb, "scsi_status=%d ", ccb->csio.scsi_status);
2154 		if (scsi_extract_sense_ccb(ccb, &serr, &sk, &asc, &ascq))
2155 			sbuf_printf(&sb, "scsi_sense=\"%02x %02x %02x %02x\" ",
2156 			    serr, sk, asc, ascq);
2157 		type = "error";
2158 		break;
2159 	case CAM_ATA_STATUS_ERROR:
2160 		sbuf_cat(&sb, "RES=\"");
2161 		ata_res_sbuf(&ccb->ataio.res, &sb);
2162 		sbuf_cat(&sb, "\" ");
2163 		type = "error";
2164 		break;
2165 	case CAM_NVME_STATUS_ERROR:
2166 	{
2167 		struct ccb_nvmeio *n = &ccb->nvmeio;
2168 
2169 		sbuf_printf(&sb, "sc=\"%02x\" sct=\"%02x\" cdw0=\"%08x\" ",
2170 		    NVME_STATUS_GET_SC(n->cpl.status),
2171 		    NVME_STATUS_GET_SCT(n->cpl.status), n->cpl.cdw0);
2172 		type = "error";
2173 		break;
2174 	}
2175 	default:
2176 		type = "error";
2177 		break;
2178 	}
2179 
2180 
2181 	switch (ccb->ccb_h.func_code) {
2182 	case XPT_SCSI_IO:
2183 		sbuf_cat(&sb, "CDB=\"");
2184 		scsi_cdb_sbuf(scsiio_cdb_ptr(&ccb->csio), &sb);
2185 		sbuf_cat(&sb, "\" ");
2186 		break;
2187 	case XPT_ATA_IO:
2188 		sbuf_cat(&sb, "ACB=\"");
2189 		ata_cmd_sbuf(&ccb->ataio.cmd, &sb);
2190 		sbuf_cat(&sb, "\" ");
2191 		break;
2192 	case XPT_NVME_IO:
2193 	case XPT_NVME_ADMIN:
2194 	{
2195 		struct ccb_nvmeio *n = &ccb->nvmeio;
2196 		struct nvme_command *cmd = &n->cmd;
2197 
2198 		// XXX Likely should be nvme_cmd_sbuf
2199 		sbuf_printf(&sb, "opc=\"%02x\" fuse=\"%02x\" cid=\"%04x\" "
2200 		    "nsid=\"%08x\" cdw10=\"%08x\" cdw11=\"%08x\" cdw12=\"%08x\" "
2201 		    "cdw13=\"%08x\" cdw14=\"%08x\" cdw15=\"%08x\" ",
2202 		    cmd->opc, cmd->fuse, cmd->cid, cmd->nsid, cmd->cdw10,
2203 		    cmd->cdw11, cmd->cdw12, cmd->cdw13, cmd->cdw14, cmd->cdw15);
2204 		break;
2205 	}
2206 	default:
2207 		break;
2208 	}
2209 
2210 	if (sbuf_finish(&sb) == 0)
2211 		devctl_notify("CAM", "periph", type, sbuf_data(&sb));
2212 	sbuf_delete(&sb);
2213 	free(sbmsg, M_CAMPERIPH);
2214 }
2215 
2216 /*
2217  * Sysctl to force an invalidation of the drive right now. Can be
2218  * called with CTLFLAG_MPSAFE since we take periph lock.
2219  */
2220 int
2221 cam_periph_invalidate_sysctl(SYSCTL_HANDLER_ARGS)
2222 {
2223 	struct cam_periph *periph;
2224 	int error, value;
2225 
2226 	periph = arg1;
2227 	value = 0;
2228 	error = sysctl_handle_int(oidp, &value, 0, req);
2229 	if (error != 0 || req->newptr == NULL || value != 1)
2230 		return (error);
2231 
2232 	cam_periph_lock(periph);
2233 	cam_periph_invalidate(periph);
2234 	cam_periph_unlock(periph);
2235 
2236 	return (0);
2237 }
2238