xref: /freebsd/sys/cam/cam_xpt.c (revision 6dcefcac2b043be030851e03a721607b414b666b)
1 /*-
2  * Implementation of the Common Access Method Transport (XPT) layer.
3  *
4  * Copyright (c) 1997, 1998, 1999 Justin T. Gibbs.
5  * Copyright (c) 1997, 1998, 1999 Kenneth D. Merry.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions, and the following disclaimer,
13  *    without modification, immediately at the beginning of the file.
14  * 2. The name of the author may not be used to endorse or promote products
15  *    derived from this software without specific prior written permission.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
21  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  */
29 
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
32 
33 #include <sys/param.h>
34 #include <sys/bus.h>
35 #include <sys/systm.h>
36 #include <sys/types.h>
37 #include <sys/malloc.h>
38 #include <sys/kernel.h>
39 #include <sys/time.h>
40 #include <sys/conf.h>
41 #include <sys/fcntl.h>
42 #include <sys/interrupt.h>
43 #include <sys/sbuf.h>
44 #include <sys/taskqueue.h>
45 
46 #include <sys/lock.h>
47 #include <sys/mutex.h>
48 #include <sys/sysctl.h>
49 #include <sys/kthread.h>
50 
51 #include <cam/cam.h>
52 #include <cam/cam_ccb.h>
53 #include <cam/cam_periph.h>
54 #include <cam/cam_queue.h>
55 #include <cam/cam_sim.h>
56 #include <cam/cam_xpt.h>
57 #include <cam/cam_xpt_sim.h>
58 #include <cam/cam_xpt_periph.h>
59 #include <cam/cam_xpt_internal.h>
60 #include <cam/cam_debug.h>
61 #include <cam/cam_compat.h>
62 
63 #include <cam/scsi/scsi_all.h>
64 #include <cam/scsi/scsi_message.h>
65 #include <cam/scsi/scsi_pass.h>
66 
67 #include <machine/md_var.h>	/* geometry translation */
68 #include <machine/stdarg.h>	/* for xpt_print below */
69 
70 #include "opt_cam.h"
71 
72 /*
73  * This is the maximum number of high powered commands (e.g. start unit)
74  * that can be outstanding at a particular time.
75  */
76 #ifndef CAM_MAX_HIGHPOWER
77 #define CAM_MAX_HIGHPOWER  4
78 #endif
79 
80 /* Datastructures internal to the xpt layer */
81 MALLOC_DEFINE(M_CAMXPT, "CAM XPT", "CAM XPT buffers");
82 MALLOC_DEFINE(M_CAMDEV, "CAM DEV", "CAM devices");
83 MALLOC_DEFINE(M_CAMCCB, "CAM CCB", "CAM CCBs");
84 MALLOC_DEFINE(M_CAMPATH, "CAM path", "CAM paths");
85 
86 /* Object for defering XPT actions to a taskqueue */
87 struct xpt_task {
88 	struct task	task;
89 	void		*data1;
90 	uintptr_t	data2;
91 };
92 
93 typedef enum {
94 	XPT_FLAG_OPEN		= 0x01
95 } xpt_flags;
96 
97 struct xpt_softc {
98 	xpt_flags		flags;
99 	u_int32_t		xpt_generation;
100 
101 	/* number of high powered commands that can go through right now */
102 	STAILQ_HEAD(highpowerlist, ccb_hdr)	highpowerq;
103 	int			num_highpower;
104 
105 	/* queue for handling async rescan requests. */
106 	TAILQ_HEAD(, ccb_hdr) ccb_scanq;
107 	int buses_to_config;
108 	int buses_config_done;
109 
110 	/* Registered busses */
111 	TAILQ_HEAD(,cam_eb)	xpt_busses;
112 	u_int			bus_generation;
113 
114 	struct intr_config_hook	*xpt_config_hook;
115 
116 	int			boot_delay;
117 	struct callout 		boot_callout;
118 
119 	struct mtx		xpt_topo_lock;
120 	struct mtx		xpt_lock;
121 };
122 
123 typedef enum {
124 	DM_RET_COPY		= 0x01,
125 	DM_RET_FLAG_MASK	= 0x0f,
126 	DM_RET_NONE		= 0x00,
127 	DM_RET_STOP		= 0x10,
128 	DM_RET_DESCEND		= 0x20,
129 	DM_RET_ERROR		= 0x30,
130 	DM_RET_ACTION_MASK	= 0xf0
131 } dev_match_ret;
132 
133 typedef enum {
134 	XPT_DEPTH_BUS,
135 	XPT_DEPTH_TARGET,
136 	XPT_DEPTH_DEVICE,
137 	XPT_DEPTH_PERIPH
138 } xpt_traverse_depth;
139 
140 struct xpt_traverse_config {
141 	xpt_traverse_depth	depth;
142 	void			*tr_func;
143 	void			*tr_arg;
144 };
145 
146 typedef	int	xpt_busfunc_t (struct cam_eb *bus, void *arg);
147 typedef	int	xpt_targetfunc_t (struct cam_et *target, void *arg);
148 typedef	int	xpt_devicefunc_t (struct cam_ed *device, void *arg);
149 typedef	int	xpt_periphfunc_t (struct cam_periph *periph, void *arg);
150 typedef int	xpt_pdrvfunc_t (struct periph_driver **pdrv, void *arg);
151 
152 /* Transport layer configuration information */
153 static struct xpt_softc xsoftc;
154 
155 TUNABLE_INT("kern.cam.boot_delay", &xsoftc.boot_delay);
156 SYSCTL_INT(_kern_cam, OID_AUTO, boot_delay, CTLFLAG_RDTUN,
157            &xsoftc.boot_delay, 0, "Bus registration wait time");
158 
159 /* Queues for our software interrupt handler */
160 typedef TAILQ_HEAD(cam_isrq, ccb_hdr) cam_isrq_t;
161 typedef TAILQ_HEAD(cam_simq, cam_sim) cam_simq_t;
162 static cam_simq_t cam_simq;
163 static struct mtx cam_simq_lock;
164 
165 /* Pointers to software interrupt handlers */
166 static void *cambio_ih;
167 
168 struct cam_periph *xpt_periph;
169 
170 static periph_init_t xpt_periph_init;
171 
172 static struct periph_driver xpt_driver =
173 {
174 	xpt_periph_init, "xpt",
175 	TAILQ_HEAD_INITIALIZER(xpt_driver.units), /* generation */ 0,
176 	CAM_PERIPH_DRV_EARLY
177 };
178 
179 PERIPHDRIVER_DECLARE(xpt, xpt_driver);
180 
181 static d_open_t xptopen;
182 static d_close_t xptclose;
183 static d_ioctl_t xptioctl;
184 static d_ioctl_t xptdoioctl;
185 
186 static struct cdevsw xpt_cdevsw = {
187 	.d_version =	D_VERSION,
188 	.d_flags =	0,
189 	.d_open =	xptopen,
190 	.d_close =	xptclose,
191 	.d_ioctl =	xptioctl,
192 	.d_name =	"xpt",
193 };
194 
195 /* Storage for debugging datastructures */
196 struct cam_path *cam_dpath;
197 u_int32_t cam_dflags = CAM_DEBUG_FLAGS;
198 TUNABLE_INT("kern.cam.dflags", &cam_dflags);
199 SYSCTL_UINT(_kern_cam, OID_AUTO, dflags, CTLFLAG_RW,
200 	&cam_dflags, 0, "Enabled debug flags");
201 u_int32_t cam_debug_delay = CAM_DEBUG_DELAY;
202 TUNABLE_INT("kern.cam.debug_delay", &cam_debug_delay);
203 SYSCTL_UINT(_kern_cam, OID_AUTO, debug_delay, CTLFLAG_RW,
204 	&cam_debug_delay, 0, "Delay in us after each debug message");
205 
206 /* Our boot-time initialization hook */
207 static int cam_module_event_handler(module_t, int /*modeventtype_t*/, void *);
208 
209 static moduledata_t cam_moduledata = {
210 	"cam",
211 	cam_module_event_handler,
212 	NULL
213 };
214 
215 static int	xpt_init(void *);
216 
217 DECLARE_MODULE(cam, cam_moduledata, SI_SUB_CONFIGURE, SI_ORDER_SECOND);
218 MODULE_VERSION(cam, 1);
219 
220 
221 static void		xpt_async_bcast(struct async_list *async_head,
222 					u_int32_t async_code,
223 					struct cam_path *path,
224 					void *async_arg);
225 static path_id_t xptnextfreepathid(void);
226 static path_id_t xptpathid(const char *sim_name, int sim_unit, int sim_bus);
227 static union ccb *xpt_get_ccb(struct cam_ed *device);
228 static void	 xpt_run_dev_allocq(struct cam_ed *device);
229 static void	 xpt_run_devq(struct cam_devq *devq);
230 static timeout_t xpt_release_devq_timeout;
231 static void	 xpt_release_simq_timeout(void *arg) __unused;
232 static void	 xpt_release_bus(struct cam_eb *bus);
233 static void	 xpt_release_devq_device(struct cam_ed *dev, u_int count,
234 		    int run_queue);
235 static struct cam_et*
236 		 xpt_alloc_target(struct cam_eb *bus, target_id_t target_id);
237 static void	 xpt_release_target(struct cam_et *target);
238 static struct cam_eb*
239 		 xpt_find_bus(path_id_t path_id);
240 static struct cam_et*
241 		 xpt_find_target(struct cam_eb *bus, target_id_t target_id);
242 static struct cam_ed*
243 		 xpt_find_device(struct cam_et *target, lun_id_t lun_id);
244 static void	 xpt_config(void *arg);
245 static xpt_devicefunc_t xptpassannouncefunc;
246 static void	 xptaction(struct cam_sim *sim, union ccb *work_ccb);
247 static void	 xptpoll(struct cam_sim *sim);
248 static void	 camisr(void *);
249 static void	 camisr_runqueue(void *);
250 static dev_match_ret	xptbusmatch(struct dev_match_pattern *patterns,
251 				    u_int num_patterns, struct cam_eb *bus);
252 static dev_match_ret	xptdevicematch(struct dev_match_pattern *patterns,
253 				       u_int num_patterns,
254 				       struct cam_ed *device);
255 static dev_match_ret	xptperiphmatch(struct dev_match_pattern *patterns,
256 				       u_int num_patterns,
257 				       struct cam_periph *periph);
258 static xpt_busfunc_t	xptedtbusfunc;
259 static xpt_targetfunc_t	xptedttargetfunc;
260 static xpt_devicefunc_t	xptedtdevicefunc;
261 static xpt_periphfunc_t	xptedtperiphfunc;
262 static xpt_pdrvfunc_t	xptplistpdrvfunc;
263 static xpt_periphfunc_t	xptplistperiphfunc;
264 static int		xptedtmatch(struct ccb_dev_match *cdm);
265 static int		xptperiphlistmatch(struct ccb_dev_match *cdm);
266 static int		xptbustraverse(struct cam_eb *start_bus,
267 				       xpt_busfunc_t *tr_func, void *arg);
268 static int		xpttargettraverse(struct cam_eb *bus,
269 					  struct cam_et *start_target,
270 					  xpt_targetfunc_t *tr_func, void *arg);
271 static int		xptdevicetraverse(struct cam_et *target,
272 					  struct cam_ed *start_device,
273 					  xpt_devicefunc_t *tr_func, void *arg);
274 static int		xptperiphtraverse(struct cam_ed *device,
275 					  struct cam_periph *start_periph,
276 					  xpt_periphfunc_t *tr_func, void *arg);
277 static int		xptpdrvtraverse(struct periph_driver **start_pdrv,
278 					xpt_pdrvfunc_t *tr_func, void *arg);
279 static int		xptpdperiphtraverse(struct periph_driver **pdrv,
280 					    struct cam_periph *start_periph,
281 					    xpt_periphfunc_t *tr_func,
282 					    void *arg);
283 static xpt_busfunc_t	xptdefbusfunc;
284 static xpt_targetfunc_t	xptdeftargetfunc;
285 static xpt_devicefunc_t	xptdefdevicefunc;
286 static xpt_periphfunc_t	xptdefperiphfunc;
287 static void		xpt_finishconfig_task(void *context, int pending);
288 static void		xpt_dev_async_default(u_int32_t async_code,
289 					      struct cam_eb *bus,
290 					      struct cam_et *target,
291 					      struct cam_ed *device,
292 					      void *async_arg);
293 static struct cam_ed *	xpt_alloc_device_default(struct cam_eb *bus,
294 						 struct cam_et *target,
295 						 lun_id_t lun_id);
296 static xpt_devicefunc_t	xptsetasyncfunc;
297 static xpt_busfunc_t	xptsetasyncbusfunc;
298 static cam_status	xptregister(struct cam_periph *periph,
299 				    void *arg);
300 static __inline int periph_is_queued(struct cam_periph *periph);
301 static __inline int device_is_queued(struct cam_ed *device);
302 
303 static __inline int
304 xpt_schedule_devq(struct cam_devq *devq, struct cam_ed *dev)
305 {
306 	int	retval;
307 
308 	if ((dev->ccbq.queue.entries > 0) &&
309 	    (dev->ccbq.dev_openings > 0) &&
310 	    (dev->ccbq.queue.qfrozen_cnt == 0)) {
311 		/*
312 		 * The priority of a device waiting for controller
313 		 * resources is that of the highest priority CCB
314 		 * enqueued.
315 		 */
316 		retval =
317 		    xpt_schedule_dev(&devq->send_queue,
318 				     &dev->devq_entry.pinfo,
319 				     CAMQ_GET_PRIO(&dev->ccbq.queue));
320 	} else {
321 		retval = 0;
322 	}
323 	return (retval);
324 }
325 
326 static __inline int
327 periph_is_queued(struct cam_periph *periph)
328 {
329 	return (periph->pinfo.index != CAM_UNQUEUED_INDEX);
330 }
331 
332 static __inline int
333 device_is_queued(struct cam_ed *device)
334 {
335 	return (device->devq_entry.pinfo.index != CAM_UNQUEUED_INDEX);
336 }
337 
338 static void
339 xpt_periph_init()
340 {
341 	make_dev(&xpt_cdevsw, 0, UID_ROOT, GID_OPERATOR, 0600, "xpt0");
342 }
343 
344 static void
345 xptdone(struct cam_periph *periph, union ccb *done_ccb)
346 {
347 	/* Caller will release the CCB */
348 	wakeup(&done_ccb->ccb_h.cbfcnp);
349 }
350 
351 static int
352 xptopen(struct cdev *dev, int flags, int fmt, struct thread *td)
353 {
354 
355 	/*
356 	 * Only allow read-write access.
357 	 */
358 	if (((flags & FWRITE) == 0) || ((flags & FREAD) == 0))
359 		return(EPERM);
360 
361 	/*
362 	 * We don't allow nonblocking access.
363 	 */
364 	if ((flags & O_NONBLOCK) != 0) {
365 		printf("%s: can't do nonblocking access\n", devtoname(dev));
366 		return(ENODEV);
367 	}
368 
369 	/* Mark ourselves open */
370 	mtx_lock(&xsoftc.xpt_lock);
371 	xsoftc.flags |= XPT_FLAG_OPEN;
372 	mtx_unlock(&xsoftc.xpt_lock);
373 
374 	return(0);
375 }
376 
377 static int
378 xptclose(struct cdev *dev, int flag, int fmt, struct thread *td)
379 {
380 
381 	/* Mark ourselves closed */
382 	mtx_lock(&xsoftc.xpt_lock);
383 	xsoftc.flags &= ~XPT_FLAG_OPEN;
384 	mtx_unlock(&xsoftc.xpt_lock);
385 
386 	return(0);
387 }
388 
389 /*
390  * Don't automatically grab the xpt softc lock here even though this is going
391  * through the xpt device.  The xpt device is really just a back door for
392  * accessing other devices and SIMs, so the right thing to do is to grab
393  * the appropriate SIM lock once the bus/SIM is located.
394  */
395 static int
396 xptioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td)
397 {
398 	int error;
399 
400 	if ((error = xptdoioctl(dev, cmd, addr, flag, td)) == ENOTTY) {
401 		error = cam_compat_ioctl(dev, &cmd, &addr, &flag, td);
402 		if (error == EAGAIN)
403 			return (xptdoioctl(dev, cmd, addr, flag, td));
404 	}
405 	return (error);
406 }
407 
408 static int
409 xptdoioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td)
410 {
411 	int error;
412 
413 	error = 0;
414 
415 	switch(cmd) {
416 	/*
417 	 * For the transport layer CAMIOCOMMAND ioctl, we really only want
418 	 * to accept CCB types that don't quite make sense to send through a
419 	 * passthrough driver. XPT_PATH_INQ is an exception to this, as stated
420 	 * in the CAM spec.
421 	 */
422 	case CAMIOCOMMAND: {
423 		union ccb *ccb;
424 		union ccb *inccb;
425 		struct cam_eb *bus;
426 
427 		inccb = (union ccb *)addr;
428 
429 		bus = xpt_find_bus(inccb->ccb_h.path_id);
430 		if (bus == NULL)
431 			return (EINVAL);
432 
433 		switch (inccb->ccb_h.func_code) {
434 		case XPT_SCAN_BUS:
435 		case XPT_RESET_BUS:
436 			if (inccb->ccb_h.target_id != CAM_TARGET_WILDCARD ||
437 			    inccb->ccb_h.target_lun != CAM_LUN_WILDCARD) {
438 				xpt_release_bus(bus);
439 				return (EINVAL);
440 			}
441 			break;
442 		case XPT_SCAN_TGT:
443 			if (inccb->ccb_h.target_id == CAM_TARGET_WILDCARD ||
444 			    inccb->ccb_h.target_lun != CAM_LUN_WILDCARD) {
445 				xpt_release_bus(bus);
446 				return (EINVAL);
447 			}
448 			break;
449 		default:
450 			break;
451 		}
452 
453 		switch(inccb->ccb_h.func_code) {
454 		case XPT_SCAN_BUS:
455 		case XPT_RESET_BUS:
456 		case XPT_PATH_INQ:
457 		case XPT_ENG_INQ:
458 		case XPT_SCAN_LUN:
459 		case XPT_SCAN_TGT:
460 
461 			ccb = xpt_alloc_ccb();
462 
463 			CAM_SIM_LOCK(bus->sim);
464 
465 			/*
466 			 * Create a path using the bus, target, and lun the
467 			 * user passed in.
468 			 */
469 			if (xpt_create_path(&ccb->ccb_h.path, NULL,
470 					    inccb->ccb_h.path_id,
471 					    inccb->ccb_h.target_id,
472 					    inccb->ccb_h.target_lun) !=
473 					    CAM_REQ_CMP){
474 				error = EINVAL;
475 				CAM_SIM_UNLOCK(bus->sim);
476 				xpt_free_ccb(ccb);
477 				break;
478 			}
479 			/* Ensure all of our fields are correct */
480 			xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path,
481 				      inccb->ccb_h.pinfo.priority);
482 			xpt_merge_ccb(ccb, inccb);
483 			ccb->ccb_h.cbfcnp = xptdone;
484 			cam_periph_runccb(ccb, NULL, 0, 0, NULL);
485 			bcopy(ccb, inccb, sizeof(union ccb));
486 			xpt_free_path(ccb->ccb_h.path);
487 			xpt_free_ccb(ccb);
488 			CAM_SIM_UNLOCK(bus->sim);
489 			break;
490 
491 		case XPT_DEBUG: {
492 			union ccb ccb;
493 
494 			/*
495 			 * This is an immediate CCB, so it's okay to
496 			 * allocate it on the stack.
497 			 */
498 
499 			CAM_SIM_LOCK(bus->sim);
500 
501 			/*
502 			 * Create a path using the bus, target, and lun the
503 			 * user passed in.
504 			 */
505 			if (xpt_create_path(&ccb.ccb_h.path, NULL,
506 					    inccb->ccb_h.path_id,
507 					    inccb->ccb_h.target_id,
508 					    inccb->ccb_h.target_lun) !=
509 					    CAM_REQ_CMP){
510 				error = EINVAL;
511 				CAM_SIM_UNLOCK(bus->sim);
512 				break;
513 			}
514 			/* Ensure all of our fields are correct */
515 			xpt_setup_ccb(&ccb.ccb_h, ccb.ccb_h.path,
516 				      inccb->ccb_h.pinfo.priority);
517 			xpt_merge_ccb(&ccb, inccb);
518 			ccb.ccb_h.cbfcnp = xptdone;
519 			xpt_action(&ccb);
520 			bcopy(&ccb, inccb, sizeof(union ccb));
521 			xpt_free_path(ccb.ccb_h.path);
522 			CAM_SIM_UNLOCK(bus->sim);
523 			break;
524 
525 		}
526 		case XPT_DEV_MATCH: {
527 			struct cam_periph_map_info mapinfo;
528 			struct cam_path *old_path;
529 
530 			/*
531 			 * We can't deal with physical addresses for this
532 			 * type of transaction.
533 			 */
534 			if ((inccb->ccb_h.flags & CAM_DATA_MASK) !=
535 			    CAM_DATA_VADDR) {
536 				error = EINVAL;
537 				break;
538 			}
539 
540 			/*
541 			 * Save this in case the caller had it set to
542 			 * something in particular.
543 			 */
544 			old_path = inccb->ccb_h.path;
545 
546 			/*
547 			 * We really don't need a path for the matching
548 			 * code.  The path is needed because of the
549 			 * debugging statements in xpt_action().  They
550 			 * assume that the CCB has a valid path.
551 			 */
552 			inccb->ccb_h.path = xpt_periph->path;
553 
554 			bzero(&mapinfo, sizeof(mapinfo));
555 
556 			/*
557 			 * Map the pattern and match buffers into kernel
558 			 * virtual address space.
559 			 */
560 			error = cam_periph_mapmem(inccb, &mapinfo);
561 
562 			if (error) {
563 				inccb->ccb_h.path = old_path;
564 				break;
565 			}
566 
567 			/*
568 			 * This is an immediate CCB, we can send it on directly.
569 			 */
570 			CAM_SIM_LOCK(xpt_path_sim(xpt_periph->path));
571 			xpt_action(inccb);
572 			CAM_SIM_UNLOCK(xpt_path_sim(xpt_periph->path));
573 
574 			/*
575 			 * Map the buffers back into user space.
576 			 */
577 			cam_periph_unmapmem(inccb, &mapinfo);
578 
579 			inccb->ccb_h.path = old_path;
580 
581 			error = 0;
582 			break;
583 		}
584 		default:
585 			error = ENOTSUP;
586 			break;
587 		}
588 		xpt_release_bus(bus);
589 		break;
590 	}
591 	/*
592 	 * This is the getpassthru ioctl. It takes a XPT_GDEVLIST ccb as input,
593 	 * with the periphal driver name and unit name filled in.  The other
594 	 * fields don't really matter as input.  The passthrough driver name
595 	 * ("pass"), and unit number are passed back in the ccb.  The current
596 	 * device generation number, and the index into the device peripheral
597 	 * driver list, and the status are also passed back.  Note that
598 	 * since we do everything in one pass, unlike the XPT_GDEVLIST ccb,
599 	 * we never return a status of CAM_GDEVLIST_LIST_CHANGED.  It is
600 	 * (or rather should be) impossible for the device peripheral driver
601 	 * list to change since we look at the whole thing in one pass, and
602 	 * we do it with lock protection.
603 	 *
604 	 */
605 	case CAMGETPASSTHRU: {
606 		union ccb *ccb;
607 		struct cam_periph *periph;
608 		struct periph_driver **p_drv;
609 		char   *name;
610 		u_int unit;
611 		u_int cur_generation;
612 		int base_periph_found;
613 		int splbreaknum;
614 
615 		ccb = (union ccb *)addr;
616 		unit = ccb->cgdl.unit_number;
617 		name = ccb->cgdl.periph_name;
618 		/*
619 		 * Every 100 devices, we want to drop our lock protection to
620 		 * give the software interrupt handler a chance to run.
621 		 * Most systems won't run into this check, but this should
622 		 * avoid starvation in the software interrupt handler in
623 		 * large systems.
624 		 */
625 		splbreaknum = 100;
626 
627 		ccb = (union ccb *)addr;
628 
629 		base_periph_found = 0;
630 
631 		/*
632 		 * Sanity check -- make sure we don't get a null peripheral
633 		 * driver name.
634 		 */
635 		if (*ccb->cgdl.periph_name == '\0') {
636 			error = EINVAL;
637 			break;
638 		}
639 
640 		/* Keep the list from changing while we traverse it */
641 		xpt_lock_buses();
642 ptstartover:
643 		cur_generation = xsoftc.xpt_generation;
644 
645 		/* first find our driver in the list of drivers */
646 		for (p_drv = periph_drivers; *p_drv != NULL; p_drv++)
647 			if (strcmp((*p_drv)->driver_name, name) == 0)
648 				break;
649 
650 		if (*p_drv == NULL) {
651 			xpt_unlock_buses();
652 			ccb->ccb_h.status = CAM_REQ_CMP_ERR;
653 			ccb->cgdl.status = CAM_GDEVLIST_ERROR;
654 			*ccb->cgdl.periph_name = '\0';
655 			ccb->cgdl.unit_number = 0;
656 			error = ENOENT;
657 			break;
658 		}
659 
660 		/*
661 		 * Run through every peripheral instance of this driver
662 		 * and check to see whether it matches the unit passed
663 		 * in by the user.  If it does, get out of the loops and
664 		 * find the passthrough driver associated with that
665 		 * peripheral driver.
666 		 */
667 		for (periph = TAILQ_FIRST(&(*p_drv)->units); periph != NULL;
668 		     periph = TAILQ_NEXT(periph, unit_links)) {
669 
670 			if (periph->unit_number == unit) {
671 				break;
672 			} else if (--splbreaknum == 0) {
673 				xpt_unlock_buses();
674 				xpt_lock_buses();
675 				splbreaknum = 100;
676 				if (cur_generation != xsoftc.xpt_generation)
677 				       goto ptstartover;
678 			}
679 		}
680 		/*
681 		 * If we found the peripheral driver that the user passed
682 		 * in, go through all of the peripheral drivers for that
683 		 * particular device and look for a passthrough driver.
684 		 */
685 		if (periph != NULL) {
686 			struct cam_ed *device;
687 			int i;
688 
689 			base_periph_found = 1;
690 			device = periph->path->device;
691 			for (i = 0, periph = SLIST_FIRST(&device->periphs);
692 			     periph != NULL;
693 			     periph = SLIST_NEXT(periph, periph_links), i++) {
694 				/*
695 				 * Check to see whether we have a
696 				 * passthrough device or not.
697 				 */
698 				if (strcmp(periph->periph_name, "pass") == 0) {
699 					/*
700 					 * Fill in the getdevlist fields.
701 					 */
702 					strcpy(ccb->cgdl.periph_name,
703 					       periph->periph_name);
704 					ccb->cgdl.unit_number =
705 						periph->unit_number;
706 					if (SLIST_NEXT(periph, periph_links))
707 						ccb->cgdl.status =
708 							CAM_GDEVLIST_MORE_DEVS;
709 					else
710 						ccb->cgdl.status =
711 						       CAM_GDEVLIST_LAST_DEVICE;
712 					ccb->cgdl.generation =
713 						device->generation;
714 					ccb->cgdl.index = i;
715 					/*
716 					 * Fill in some CCB header fields
717 					 * that the user may want.
718 					 */
719 					ccb->ccb_h.path_id =
720 						periph->path->bus->path_id;
721 					ccb->ccb_h.target_id =
722 						periph->path->target->target_id;
723 					ccb->ccb_h.target_lun =
724 						periph->path->device->lun_id;
725 					ccb->ccb_h.status = CAM_REQ_CMP;
726 					break;
727 				}
728 			}
729 		}
730 
731 		/*
732 		 * If the periph is null here, one of two things has
733 		 * happened.  The first possibility is that we couldn't
734 		 * find the unit number of the particular peripheral driver
735 		 * that the user is asking about.  e.g. the user asks for
736 		 * the passthrough driver for "da11".  We find the list of
737 		 * "da" peripherals all right, but there is no unit 11.
738 		 * The other possibility is that we went through the list
739 		 * of peripheral drivers attached to the device structure,
740 		 * but didn't find one with the name "pass".  Either way,
741 		 * we return ENOENT, since we couldn't find something.
742 		 */
743 		if (periph == NULL) {
744 			ccb->ccb_h.status = CAM_REQ_CMP_ERR;
745 			ccb->cgdl.status = CAM_GDEVLIST_ERROR;
746 			*ccb->cgdl.periph_name = '\0';
747 			ccb->cgdl.unit_number = 0;
748 			error = ENOENT;
749 			/*
750 			 * It is unfortunate that this is even necessary,
751 			 * but there are many, many clueless users out there.
752 			 * If this is true, the user is looking for the
753 			 * passthrough driver, but doesn't have one in his
754 			 * kernel.
755 			 */
756 			if (base_periph_found == 1) {
757 				printf("xptioctl: pass driver is not in the "
758 				       "kernel\n");
759 				printf("xptioctl: put \"device pass\" in "
760 				       "your kernel config file\n");
761 			}
762 		}
763 		xpt_unlock_buses();
764 		break;
765 		}
766 	default:
767 		error = ENOTTY;
768 		break;
769 	}
770 
771 	return(error);
772 }
773 
774 static int
775 cam_module_event_handler(module_t mod, int what, void *arg)
776 {
777 	int error;
778 
779 	switch (what) {
780 	case MOD_LOAD:
781 		if ((error = xpt_init(NULL)) != 0)
782 			return (error);
783 		break;
784 	case MOD_UNLOAD:
785 		return EBUSY;
786 	default:
787 		return EOPNOTSUPP;
788 	}
789 
790 	return 0;
791 }
792 
793 static void
794 xpt_rescan_done(struct cam_periph *periph, union ccb *done_ccb)
795 {
796 
797 	if (done_ccb->ccb_h.ppriv_ptr1 == NULL) {
798 		xpt_free_path(done_ccb->ccb_h.path);
799 		xpt_free_ccb(done_ccb);
800 	} else {
801 		done_ccb->ccb_h.cbfcnp = done_ccb->ccb_h.ppriv_ptr1;
802 		(*done_ccb->ccb_h.cbfcnp)(periph, done_ccb);
803 	}
804 	xpt_release_boot();
805 }
806 
807 /* thread to handle bus rescans */
808 static void
809 xpt_scanner_thread(void *dummy)
810 {
811 	union ccb	*ccb;
812 	struct cam_sim	*sim;
813 
814 	xpt_lock_buses();
815 	for (;;) {
816 		if (TAILQ_EMPTY(&xsoftc.ccb_scanq))
817 			msleep(&xsoftc.ccb_scanq, &xsoftc.xpt_topo_lock, PRIBIO,
818 			       "ccb_scanq", 0);
819 		if ((ccb = (union ccb *)TAILQ_FIRST(&xsoftc.ccb_scanq)) != NULL) {
820 			TAILQ_REMOVE(&xsoftc.ccb_scanq, &ccb->ccb_h, sim_links.tqe);
821 			xpt_unlock_buses();
822 
823 			sim = ccb->ccb_h.path->bus->sim;
824 			CAM_SIM_LOCK(sim);
825 			xpt_action(ccb);
826 			CAM_SIM_UNLOCK(sim);
827 
828 			xpt_lock_buses();
829 		}
830 	}
831 }
832 
833 void
834 xpt_rescan(union ccb *ccb)
835 {
836 	struct ccb_hdr *hdr;
837 
838 	/* Prepare request */
839 	if (ccb->ccb_h.path->target->target_id == CAM_TARGET_WILDCARD &&
840 	    ccb->ccb_h.path->device->lun_id == CAM_LUN_WILDCARD)
841 		ccb->ccb_h.func_code = XPT_SCAN_BUS;
842 	else if (ccb->ccb_h.path->target->target_id != CAM_TARGET_WILDCARD &&
843 	    ccb->ccb_h.path->device->lun_id == CAM_LUN_WILDCARD)
844 		ccb->ccb_h.func_code = XPT_SCAN_TGT;
845 	else if (ccb->ccb_h.path->target->target_id != CAM_TARGET_WILDCARD &&
846 	    ccb->ccb_h.path->device->lun_id != CAM_LUN_WILDCARD)
847 		ccb->ccb_h.func_code = XPT_SCAN_LUN;
848 	else {
849 		xpt_print(ccb->ccb_h.path, "illegal scan path\n");
850 		xpt_free_path(ccb->ccb_h.path);
851 		xpt_free_ccb(ccb);
852 		return;
853 	}
854 	ccb->ccb_h.ppriv_ptr1 = ccb->ccb_h.cbfcnp;
855 	ccb->ccb_h.cbfcnp = xpt_rescan_done;
856 	xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path, CAM_PRIORITY_XPT);
857 	/* Don't make duplicate entries for the same paths. */
858 	xpt_lock_buses();
859 	if (ccb->ccb_h.ppriv_ptr1 == NULL) {
860 		TAILQ_FOREACH(hdr, &xsoftc.ccb_scanq, sim_links.tqe) {
861 			if (xpt_path_comp(hdr->path, ccb->ccb_h.path) == 0) {
862 				wakeup(&xsoftc.ccb_scanq);
863 				xpt_unlock_buses();
864 				xpt_print(ccb->ccb_h.path, "rescan already queued\n");
865 				xpt_free_path(ccb->ccb_h.path);
866 				xpt_free_ccb(ccb);
867 				return;
868 			}
869 		}
870 	}
871 	TAILQ_INSERT_TAIL(&xsoftc.ccb_scanq, &ccb->ccb_h, sim_links.tqe);
872 	xsoftc.buses_to_config++;
873 	wakeup(&xsoftc.ccb_scanq);
874 	xpt_unlock_buses();
875 }
876 
877 /* Functions accessed by the peripheral drivers */
878 static int
879 xpt_init(void *dummy)
880 {
881 	struct cam_sim *xpt_sim;
882 	struct cam_path *path;
883 	struct cam_devq *devq;
884 	cam_status status;
885 
886 	TAILQ_INIT(&xsoftc.xpt_busses);
887 	TAILQ_INIT(&cam_simq);
888 	TAILQ_INIT(&xsoftc.ccb_scanq);
889 	STAILQ_INIT(&xsoftc.highpowerq);
890 	xsoftc.num_highpower = CAM_MAX_HIGHPOWER;
891 
892 	mtx_init(&cam_simq_lock, "CAM SIMQ lock", NULL, MTX_DEF);
893 	mtx_init(&xsoftc.xpt_lock, "XPT lock", NULL, MTX_DEF);
894 	mtx_init(&xsoftc.xpt_topo_lock, "XPT topology lock", NULL, MTX_DEF);
895 
896 #ifdef CAM_BOOT_DELAY
897 	/*
898 	 * Override this value at compile time to assist our users
899 	 * who don't use loader to boot a kernel.
900 	 */
901 	xsoftc.boot_delay = CAM_BOOT_DELAY;
902 #endif
903 	/*
904 	 * The xpt layer is, itself, the equivelent of a SIM.
905 	 * Allow 16 ccbs in the ccb pool for it.  This should
906 	 * give decent parallelism when we probe busses and
907 	 * perform other XPT functions.
908 	 */
909 	devq = cam_simq_alloc(16);
910 	xpt_sim = cam_sim_alloc(xptaction,
911 				xptpoll,
912 				"xpt",
913 				/*softc*/NULL,
914 				/*unit*/0,
915 				/*mtx*/&xsoftc.xpt_lock,
916 				/*max_dev_transactions*/0,
917 				/*max_tagged_dev_transactions*/0,
918 				devq);
919 	if (xpt_sim == NULL)
920 		return (ENOMEM);
921 
922 	mtx_lock(&xsoftc.xpt_lock);
923 	if ((status = xpt_bus_register(xpt_sim, NULL, 0)) != CAM_SUCCESS) {
924 		mtx_unlock(&xsoftc.xpt_lock);
925 		printf("xpt_init: xpt_bus_register failed with status %#x,"
926 		       " failing attach\n", status);
927 		return (EINVAL);
928 	}
929 
930 	/*
931 	 * Looking at the XPT from the SIM layer, the XPT is
932 	 * the equivelent of a peripheral driver.  Allocate
933 	 * a peripheral driver entry for us.
934 	 */
935 	if ((status = xpt_create_path(&path, NULL, CAM_XPT_PATH_ID,
936 				      CAM_TARGET_WILDCARD,
937 				      CAM_LUN_WILDCARD)) != CAM_REQ_CMP) {
938 		mtx_unlock(&xsoftc.xpt_lock);
939 		printf("xpt_init: xpt_create_path failed with status %#x,"
940 		       " failing attach\n", status);
941 		return (EINVAL);
942 	}
943 
944 	cam_periph_alloc(xptregister, NULL, NULL, NULL, "xpt", CAM_PERIPH_BIO,
945 			 path, NULL, 0, xpt_sim);
946 	xpt_free_path(path);
947 	mtx_unlock(&xsoftc.xpt_lock);
948 	/* Install our software interrupt handlers */
949 	swi_add(NULL, "cambio", camisr, NULL, SWI_CAMBIO, INTR_MPSAFE, &cambio_ih);
950 	/*
951 	 * Register a callback for when interrupts are enabled.
952 	 */
953 	xsoftc.xpt_config_hook =
954 	    (struct intr_config_hook *)malloc(sizeof(struct intr_config_hook),
955 					      M_CAMXPT, M_NOWAIT | M_ZERO);
956 	if (xsoftc.xpt_config_hook == NULL) {
957 		printf("xpt_init: Cannot malloc config hook "
958 		       "- failing attach\n");
959 		return (ENOMEM);
960 	}
961 	xsoftc.xpt_config_hook->ich_func = xpt_config;
962 	if (config_intrhook_establish(xsoftc.xpt_config_hook) != 0) {
963 		free (xsoftc.xpt_config_hook, M_CAMXPT);
964 		printf("xpt_init: config_intrhook_establish failed "
965 		       "- failing attach\n");
966 	}
967 
968 	return (0);
969 }
970 
971 static cam_status
972 xptregister(struct cam_periph *periph, void *arg)
973 {
974 	struct cam_sim *xpt_sim;
975 
976 	if (periph == NULL) {
977 		printf("xptregister: periph was NULL!!\n");
978 		return(CAM_REQ_CMP_ERR);
979 	}
980 
981 	xpt_sim = (struct cam_sim *)arg;
982 	xpt_sim->softc = periph;
983 	xpt_periph = periph;
984 	periph->softc = NULL;
985 
986 	return(CAM_REQ_CMP);
987 }
988 
989 int32_t
990 xpt_add_periph(struct cam_periph *periph)
991 {
992 	struct cam_ed *device;
993 	int32_t	 status;
994 	struct periph_list *periph_head;
995 
996 	mtx_assert(periph->sim->mtx, MA_OWNED);
997 
998 	device = periph->path->device;
999 
1000 	periph_head = &device->periphs;
1001 
1002 	status = CAM_REQ_CMP;
1003 
1004 	if (device != NULL) {
1005 		/*
1006 		 * Make room for this peripheral
1007 		 * so it will fit in the queue
1008 		 * when it's scheduled to run
1009 		 */
1010 		status = camq_resize(&device->drvq,
1011 				     device->drvq.array_size + 1);
1012 
1013 		device->generation++;
1014 
1015 		SLIST_INSERT_HEAD(periph_head, periph, periph_links);
1016 	}
1017 
1018 	xpt_lock_buses();
1019 	xsoftc.xpt_generation++;
1020 	xpt_unlock_buses();
1021 
1022 	return (status);
1023 }
1024 
1025 void
1026 xpt_remove_periph(struct cam_periph *periph, int topology_lock_held)
1027 {
1028 	struct cam_ed *device;
1029 
1030 	mtx_assert(periph->sim->mtx, MA_OWNED);
1031 
1032 	device = periph->path->device;
1033 
1034 	if (device != NULL) {
1035 		struct periph_list *periph_head;
1036 
1037 		periph_head = &device->periphs;
1038 
1039 		/* Release the slot for this peripheral */
1040 		camq_resize(&device->drvq, device->drvq.array_size - 1);
1041 
1042 		device->generation++;
1043 
1044 		SLIST_REMOVE(periph_head, periph, cam_periph, periph_links);
1045 	}
1046 
1047 	if (topology_lock_held == 0)
1048 		xpt_lock_buses();
1049 
1050 	xsoftc.xpt_generation++;
1051 
1052 	if (topology_lock_held == 0)
1053 		xpt_unlock_buses();
1054 }
1055 
1056 
1057 void
1058 xpt_announce_periph(struct cam_periph *periph, char *announce_string)
1059 {
1060 	struct	cam_path *path = periph->path;
1061 
1062 	mtx_assert(periph->sim->mtx, MA_OWNED);
1063 
1064 	printf("%s%d at %s%d bus %d scbus%d target %d lun %d\n",
1065 	       periph->periph_name, periph->unit_number,
1066 	       path->bus->sim->sim_name,
1067 	       path->bus->sim->unit_number,
1068 	       path->bus->sim->bus_id,
1069 	       path->bus->path_id,
1070 	       path->target->target_id,
1071 	       path->device->lun_id);
1072 	printf("%s%d: ", periph->periph_name, periph->unit_number);
1073 	if (path->device->protocol == PROTO_SCSI)
1074 		scsi_print_inquiry(&path->device->inq_data);
1075 	else if (path->device->protocol == PROTO_ATA ||
1076 	    path->device->protocol == PROTO_SATAPM)
1077 		ata_print_ident(&path->device->ident_data);
1078 	else if (path->device->protocol == PROTO_SEMB)
1079 		semb_print_ident(
1080 		    (struct sep_identify_data *)&path->device->ident_data);
1081 	else
1082 		printf("Unknown protocol device\n");
1083 	if (bootverbose && path->device->serial_num_len > 0) {
1084 		/* Don't wrap the screen  - print only the first 60 chars */
1085 		printf("%s%d: Serial Number %.60s\n", periph->periph_name,
1086 		       periph->unit_number, path->device->serial_num);
1087 	}
1088 	/* Announce transport details. */
1089 	(*(path->bus->xport->announce))(periph);
1090 	/* Announce command queueing. */
1091 	if (path->device->inq_flags & SID_CmdQue
1092 	 || path->device->flags & CAM_DEV_TAG_AFTER_COUNT) {
1093 		printf("%s%d: Command Queueing enabled\n",
1094 		       periph->periph_name, periph->unit_number);
1095 	}
1096 	/* Announce caller's details if they've passed in. */
1097 	if (announce_string != NULL)
1098 		printf("%s%d: %s\n", periph->periph_name,
1099 		       periph->unit_number, announce_string);
1100 }
1101 
1102 void
1103 xpt_announce_quirks(struct cam_periph *periph, int quirks, char *bit_string)
1104 {
1105 	if (quirks != 0) {
1106 		printf("%s%d: quirks=0x%b\n", periph->periph_name,
1107 		    periph->unit_number, quirks, bit_string);
1108 	}
1109 }
1110 
1111 int
1112 xpt_getattr(char *buf, size_t len, const char *attr, struct cam_path *path)
1113 {
1114 	int ret = -1, l;
1115 	struct ccb_dev_advinfo cdai;
1116 	struct scsi_vpd_id_descriptor *idd;
1117 
1118 	mtx_assert(path->bus->sim->mtx, MA_OWNED);
1119 
1120 	memset(&cdai, 0, sizeof(cdai));
1121 	xpt_setup_ccb(&cdai.ccb_h, path, CAM_PRIORITY_NORMAL);
1122 	cdai.ccb_h.func_code = XPT_DEV_ADVINFO;
1123 	cdai.bufsiz = len;
1124 
1125 	if (!strcmp(attr, "GEOM::ident"))
1126 		cdai.buftype = CDAI_TYPE_SERIAL_NUM;
1127 	else if (!strcmp(attr, "GEOM::physpath"))
1128 		cdai.buftype = CDAI_TYPE_PHYS_PATH;
1129 	else if (!strcmp(attr, "GEOM::lunid")) {
1130 		cdai.buftype = CDAI_TYPE_SCSI_DEVID;
1131 		cdai.bufsiz = CAM_SCSI_DEVID_MAXLEN;
1132 	} else
1133 		goto out;
1134 
1135 	cdai.buf = malloc(cdai.bufsiz, M_CAMXPT, M_NOWAIT|M_ZERO);
1136 	if (cdai.buf == NULL) {
1137 		ret = ENOMEM;
1138 		goto out;
1139 	}
1140 	xpt_action((union ccb *)&cdai); /* can only be synchronous */
1141 	if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0)
1142 		cam_release_devq(cdai.ccb_h.path, 0, 0, 0, FALSE);
1143 	if (cdai.provsiz == 0)
1144 		goto out;
1145 	if (cdai.buftype == CDAI_TYPE_SCSI_DEVID) {
1146 		idd = scsi_get_devid((struct scsi_vpd_device_id *)cdai.buf,
1147 		    cdai.provsiz, scsi_devid_is_lun_naa);
1148 		if (idd == NULL)
1149 			idd = scsi_get_devid((struct scsi_vpd_device_id *)cdai.buf,
1150 			    cdai.provsiz, scsi_devid_is_lun_eui64);
1151 		if (idd == NULL)
1152 			idd = scsi_get_devid((struct scsi_vpd_device_id *)cdai.buf,
1153 			    cdai.provsiz, scsi_devid_is_lun_t10);
1154 		if (idd == NULL)
1155 			idd = scsi_get_devid((struct scsi_vpd_device_id *)cdai.buf,
1156 			    cdai.provsiz, scsi_devid_is_lun_name);
1157 		if (idd == NULL)
1158 			goto out;
1159 		ret = 0;
1160 		if ((idd->proto_codeset & SVPD_ID_CODESET_MASK) == SVPD_ID_CODESET_ASCII ||
1161 		    (idd->proto_codeset & SVPD_ID_CODESET_MASK) == SVPD_ID_CODESET_UTF8) {
1162 			l = strnlen(idd->identifier, idd->length);
1163 			if (l < len) {
1164 				bcopy(idd->identifier, buf, l);
1165 				buf[l] = 0;
1166 			} else
1167 				ret = EFAULT;
1168 		} else {
1169 			if (idd->length * 2 < len) {
1170 				for (l = 0; l < idd->length; l++)
1171 					sprintf(buf + l * 2, "%02x",
1172 					    idd->identifier[l]);
1173 			} else
1174 				ret = EFAULT;
1175 		}
1176 	} else {
1177 		ret = 0;
1178 		if (strlcpy(buf, cdai.buf, len) >= len)
1179 			ret = EFAULT;
1180 	}
1181 
1182 out:
1183 	if (cdai.buf != NULL)
1184 		free(cdai.buf, M_CAMXPT);
1185 	return ret;
1186 }
1187 
1188 static dev_match_ret
1189 xptbusmatch(struct dev_match_pattern *patterns, u_int num_patterns,
1190 	    struct cam_eb *bus)
1191 {
1192 	dev_match_ret retval;
1193 	int i;
1194 
1195 	retval = DM_RET_NONE;
1196 
1197 	/*
1198 	 * If we aren't given something to match against, that's an error.
1199 	 */
1200 	if (bus == NULL)
1201 		return(DM_RET_ERROR);
1202 
1203 	/*
1204 	 * If there are no match entries, then this bus matches no
1205 	 * matter what.
1206 	 */
1207 	if ((patterns == NULL) || (num_patterns == 0))
1208 		return(DM_RET_DESCEND | DM_RET_COPY);
1209 
1210 	for (i = 0; i < num_patterns; i++) {
1211 		struct bus_match_pattern *cur_pattern;
1212 
1213 		/*
1214 		 * If the pattern in question isn't for a bus node, we
1215 		 * aren't interested.  However, we do indicate to the
1216 		 * calling routine that we should continue descending the
1217 		 * tree, since the user wants to match against lower-level
1218 		 * EDT elements.
1219 		 */
1220 		if (patterns[i].type != DEV_MATCH_BUS) {
1221 			if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
1222 				retval |= DM_RET_DESCEND;
1223 			continue;
1224 		}
1225 
1226 		cur_pattern = &patterns[i].pattern.bus_pattern;
1227 
1228 		/*
1229 		 * If they want to match any bus node, we give them any
1230 		 * device node.
1231 		 */
1232 		if (cur_pattern->flags == BUS_MATCH_ANY) {
1233 			/* set the copy flag */
1234 			retval |= DM_RET_COPY;
1235 
1236 			/*
1237 			 * If we've already decided on an action, go ahead
1238 			 * and return.
1239 			 */
1240 			if ((retval & DM_RET_ACTION_MASK) != DM_RET_NONE)
1241 				return(retval);
1242 		}
1243 
1244 		/*
1245 		 * Not sure why someone would do this...
1246 		 */
1247 		if (cur_pattern->flags == BUS_MATCH_NONE)
1248 			continue;
1249 
1250 		if (((cur_pattern->flags & BUS_MATCH_PATH) != 0)
1251 		 && (cur_pattern->path_id != bus->path_id))
1252 			continue;
1253 
1254 		if (((cur_pattern->flags & BUS_MATCH_BUS_ID) != 0)
1255 		 && (cur_pattern->bus_id != bus->sim->bus_id))
1256 			continue;
1257 
1258 		if (((cur_pattern->flags & BUS_MATCH_UNIT) != 0)
1259 		 && (cur_pattern->unit_number != bus->sim->unit_number))
1260 			continue;
1261 
1262 		if (((cur_pattern->flags & BUS_MATCH_NAME) != 0)
1263 		 && (strncmp(cur_pattern->dev_name, bus->sim->sim_name,
1264 			     DEV_IDLEN) != 0))
1265 			continue;
1266 
1267 		/*
1268 		 * If we get to this point, the user definitely wants
1269 		 * information on this bus.  So tell the caller to copy the
1270 		 * data out.
1271 		 */
1272 		retval |= DM_RET_COPY;
1273 
1274 		/*
1275 		 * If the return action has been set to descend, then we
1276 		 * know that we've already seen a non-bus matching
1277 		 * expression, therefore we need to further descend the tree.
1278 		 * This won't change by continuing around the loop, so we
1279 		 * go ahead and return.  If we haven't seen a non-bus
1280 		 * matching expression, we keep going around the loop until
1281 		 * we exhaust the matching expressions.  We'll set the stop
1282 		 * flag once we fall out of the loop.
1283 		 */
1284 		if ((retval & DM_RET_ACTION_MASK) == DM_RET_DESCEND)
1285 			return(retval);
1286 	}
1287 
1288 	/*
1289 	 * If the return action hasn't been set to descend yet, that means
1290 	 * we haven't seen anything other than bus matching patterns.  So
1291 	 * tell the caller to stop descending the tree -- the user doesn't
1292 	 * want to match against lower level tree elements.
1293 	 */
1294 	if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
1295 		retval |= DM_RET_STOP;
1296 
1297 	return(retval);
1298 }
1299 
1300 static dev_match_ret
1301 xptdevicematch(struct dev_match_pattern *patterns, u_int num_patterns,
1302 	       struct cam_ed *device)
1303 {
1304 	dev_match_ret retval;
1305 	int i;
1306 
1307 	retval = DM_RET_NONE;
1308 
1309 	/*
1310 	 * If we aren't given something to match against, that's an error.
1311 	 */
1312 	if (device == NULL)
1313 		return(DM_RET_ERROR);
1314 
1315 	/*
1316 	 * If there are no match entries, then this device matches no
1317 	 * matter what.
1318 	 */
1319 	if ((patterns == NULL) || (num_patterns == 0))
1320 		return(DM_RET_DESCEND | DM_RET_COPY);
1321 
1322 	for (i = 0; i < num_patterns; i++) {
1323 		struct device_match_pattern *cur_pattern;
1324 		struct scsi_vpd_device_id *device_id_page;
1325 
1326 		/*
1327 		 * If the pattern in question isn't for a device node, we
1328 		 * aren't interested.
1329 		 */
1330 		if (patterns[i].type != DEV_MATCH_DEVICE) {
1331 			if ((patterns[i].type == DEV_MATCH_PERIPH)
1332 			 && ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE))
1333 				retval |= DM_RET_DESCEND;
1334 			continue;
1335 		}
1336 
1337 		cur_pattern = &patterns[i].pattern.device_pattern;
1338 
1339 		/* Error out if mutually exclusive options are specified. */
1340 		if ((cur_pattern->flags & (DEV_MATCH_INQUIRY|DEV_MATCH_DEVID))
1341 		 == (DEV_MATCH_INQUIRY|DEV_MATCH_DEVID))
1342 			return(DM_RET_ERROR);
1343 
1344 		/*
1345 		 * If they want to match any device node, we give them any
1346 		 * device node.
1347 		 */
1348 		if (cur_pattern->flags == DEV_MATCH_ANY)
1349 			goto copy_dev_node;
1350 
1351 		/*
1352 		 * Not sure why someone would do this...
1353 		 */
1354 		if (cur_pattern->flags == DEV_MATCH_NONE)
1355 			continue;
1356 
1357 		if (((cur_pattern->flags & DEV_MATCH_PATH) != 0)
1358 		 && (cur_pattern->path_id != device->target->bus->path_id))
1359 			continue;
1360 
1361 		if (((cur_pattern->flags & DEV_MATCH_TARGET) != 0)
1362 		 && (cur_pattern->target_id != device->target->target_id))
1363 			continue;
1364 
1365 		if (((cur_pattern->flags & DEV_MATCH_LUN) != 0)
1366 		 && (cur_pattern->target_lun != device->lun_id))
1367 			continue;
1368 
1369 		if (((cur_pattern->flags & DEV_MATCH_INQUIRY) != 0)
1370 		 && (cam_quirkmatch((caddr_t)&device->inq_data,
1371 				    (caddr_t)&cur_pattern->data.inq_pat,
1372 				    1, sizeof(cur_pattern->data.inq_pat),
1373 				    scsi_static_inquiry_match) == NULL))
1374 			continue;
1375 
1376 		device_id_page = (struct scsi_vpd_device_id *)device->device_id;
1377 		if (((cur_pattern->flags & DEV_MATCH_DEVID) != 0)
1378 		 && (device->device_id_len < SVPD_DEVICE_ID_HDR_LEN
1379 		  || scsi_devid_match((uint8_t *)device_id_page->desc_list,
1380 				      device->device_id_len
1381 				    - SVPD_DEVICE_ID_HDR_LEN,
1382 				      cur_pattern->data.devid_pat.id,
1383 				      cur_pattern->data.devid_pat.id_len) != 0))
1384 			continue;
1385 
1386 copy_dev_node:
1387 		/*
1388 		 * If we get to this point, the user definitely wants
1389 		 * information on this device.  So tell the caller to copy
1390 		 * the data out.
1391 		 */
1392 		retval |= DM_RET_COPY;
1393 
1394 		/*
1395 		 * If the return action has been set to descend, then we
1396 		 * know that we've already seen a peripheral matching
1397 		 * expression, therefore we need to further descend the tree.
1398 		 * This won't change by continuing around the loop, so we
1399 		 * go ahead and return.  If we haven't seen a peripheral
1400 		 * matching expression, we keep going around the loop until
1401 		 * we exhaust the matching expressions.  We'll set the stop
1402 		 * flag once we fall out of the loop.
1403 		 */
1404 		if ((retval & DM_RET_ACTION_MASK) == DM_RET_DESCEND)
1405 			return(retval);
1406 	}
1407 
1408 	/*
1409 	 * If the return action hasn't been set to descend yet, that means
1410 	 * we haven't seen any peripheral matching patterns.  So tell the
1411 	 * caller to stop descending the tree -- the user doesn't want to
1412 	 * match against lower level tree elements.
1413 	 */
1414 	if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
1415 		retval |= DM_RET_STOP;
1416 
1417 	return(retval);
1418 }
1419 
1420 /*
1421  * Match a single peripheral against any number of match patterns.
1422  */
1423 static dev_match_ret
1424 xptperiphmatch(struct dev_match_pattern *patterns, u_int num_patterns,
1425 	       struct cam_periph *periph)
1426 {
1427 	dev_match_ret retval;
1428 	int i;
1429 
1430 	/*
1431 	 * If we aren't given something to match against, that's an error.
1432 	 */
1433 	if (periph == NULL)
1434 		return(DM_RET_ERROR);
1435 
1436 	/*
1437 	 * If there are no match entries, then this peripheral matches no
1438 	 * matter what.
1439 	 */
1440 	if ((patterns == NULL) || (num_patterns == 0))
1441 		return(DM_RET_STOP | DM_RET_COPY);
1442 
1443 	/*
1444 	 * There aren't any nodes below a peripheral node, so there's no
1445 	 * reason to descend the tree any further.
1446 	 */
1447 	retval = DM_RET_STOP;
1448 
1449 	for (i = 0; i < num_patterns; i++) {
1450 		struct periph_match_pattern *cur_pattern;
1451 
1452 		/*
1453 		 * If the pattern in question isn't for a peripheral, we
1454 		 * aren't interested.
1455 		 */
1456 		if (patterns[i].type != DEV_MATCH_PERIPH)
1457 			continue;
1458 
1459 		cur_pattern = &patterns[i].pattern.periph_pattern;
1460 
1461 		/*
1462 		 * If they want to match on anything, then we will do so.
1463 		 */
1464 		if (cur_pattern->flags == PERIPH_MATCH_ANY) {
1465 			/* set the copy flag */
1466 			retval |= DM_RET_COPY;
1467 
1468 			/*
1469 			 * We've already set the return action to stop,
1470 			 * since there are no nodes below peripherals in
1471 			 * the tree.
1472 			 */
1473 			return(retval);
1474 		}
1475 
1476 		/*
1477 		 * Not sure why someone would do this...
1478 		 */
1479 		if (cur_pattern->flags == PERIPH_MATCH_NONE)
1480 			continue;
1481 
1482 		if (((cur_pattern->flags & PERIPH_MATCH_PATH) != 0)
1483 		 && (cur_pattern->path_id != periph->path->bus->path_id))
1484 			continue;
1485 
1486 		/*
1487 		 * For the target and lun id's, we have to make sure the
1488 		 * target and lun pointers aren't NULL.  The xpt peripheral
1489 		 * has a wildcard target and device.
1490 		 */
1491 		if (((cur_pattern->flags & PERIPH_MATCH_TARGET) != 0)
1492 		 && ((periph->path->target == NULL)
1493 		 ||(cur_pattern->target_id != periph->path->target->target_id)))
1494 			continue;
1495 
1496 		if (((cur_pattern->flags & PERIPH_MATCH_LUN) != 0)
1497 		 && ((periph->path->device == NULL)
1498 		 || (cur_pattern->target_lun != periph->path->device->lun_id)))
1499 			continue;
1500 
1501 		if (((cur_pattern->flags & PERIPH_MATCH_UNIT) != 0)
1502 		 && (cur_pattern->unit_number != periph->unit_number))
1503 			continue;
1504 
1505 		if (((cur_pattern->flags & PERIPH_MATCH_NAME) != 0)
1506 		 && (strncmp(cur_pattern->periph_name, periph->periph_name,
1507 			     DEV_IDLEN) != 0))
1508 			continue;
1509 
1510 		/*
1511 		 * If we get to this point, the user definitely wants
1512 		 * information on this peripheral.  So tell the caller to
1513 		 * copy the data out.
1514 		 */
1515 		retval |= DM_RET_COPY;
1516 
1517 		/*
1518 		 * The return action has already been set to stop, since
1519 		 * peripherals don't have any nodes below them in the EDT.
1520 		 */
1521 		return(retval);
1522 	}
1523 
1524 	/*
1525 	 * If we get to this point, the peripheral that was passed in
1526 	 * doesn't match any of the patterns.
1527 	 */
1528 	return(retval);
1529 }
1530 
1531 static int
1532 xptedtbusfunc(struct cam_eb *bus, void *arg)
1533 {
1534 	struct ccb_dev_match *cdm;
1535 	dev_match_ret retval;
1536 
1537 	cdm = (struct ccb_dev_match *)arg;
1538 
1539 	/*
1540 	 * If our position is for something deeper in the tree, that means
1541 	 * that we've already seen this node.  So, we keep going down.
1542 	 */
1543 	if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
1544 	 && (cdm->pos.cookie.bus == bus)
1545 	 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
1546 	 && (cdm->pos.cookie.target != NULL))
1547 		retval = DM_RET_DESCEND;
1548 	else
1549 		retval = xptbusmatch(cdm->patterns, cdm->num_patterns, bus);
1550 
1551 	/*
1552 	 * If we got an error, bail out of the search.
1553 	 */
1554 	if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
1555 		cdm->status = CAM_DEV_MATCH_ERROR;
1556 		return(0);
1557 	}
1558 
1559 	/*
1560 	 * If the copy flag is set, copy this bus out.
1561 	 */
1562 	if (retval & DM_RET_COPY) {
1563 		int spaceleft, j;
1564 
1565 		spaceleft = cdm->match_buf_len - (cdm->num_matches *
1566 			sizeof(struct dev_match_result));
1567 
1568 		/*
1569 		 * If we don't have enough space to put in another
1570 		 * match result, save our position and tell the
1571 		 * user there are more devices to check.
1572 		 */
1573 		if (spaceleft < sizeof(struct dev_match_result)) {
1574 			bzero(&cdm->pos, sizeof(cdm->pos));
1575 			cdm->pos.position_type =
1576 				CAM_DEV_POS_EDT | CAM_DEV_POS_BUS;
1577 
1578 			cdm->pos.cookie.bus = bus;
1579 			cdm->pos.generations[CAM_BUS_GENERATION]=
1580 				xsoftc.bus_generation;
1581 			cdm->status = CAM_DEV_MATCH_MORE;
1582 			return(0);
1583 		}
1584 		j = cdm->num_matches;
1585 		cdm->num_matches++;
1586 		cdm->matches[j].type = DEV_MATCH_BUS;
1587 		cdm->matches[j].result.bus_result.path_id = bus->path_id;
1588 		cdm->matches[j].result.bus_result.bus_id = bus->sim->bus_id;
1589 		cdm->matches[j].result.bus_result.unit_number =
1590 			bus->sim->unit_number;
1591 		strncpy(cdm->matches[j].result.bus_result.dev_name,
1592 			bus->sim->sim_name, DEV_IDLEN);
1593 	}
1594 
1595 	/*
1596 	 * If the user is only interested in busses, there's no
1597 	 * reason to descend to the next level in the tree.
1598 	 */
1599 	if ((retval & DM_RET_ACTION_MASK) == DM_RET_STOP)
1600 		return(1);
1601 
1602 	/*
1603 	 * If there is a target generation recorded, check it to
1604 	 * make sure the target list hasn't changed.
1605 	 */
1606 	if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
1607 	 && (bus == cdm->pos.cookie.bus)
1608 	 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
1609 	 && (cdm->pos.generations[CAM_TARGET_GENERATION] != 0)
1610 	 && (cdm->pos.generations[CAM_TARGET_GENERATION] !=
1611 	     bus->generation)) {
1612 		cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
1613 		return(0);
1614 	}
1615 
1616 	if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
1617 	 && (cdm->pos.cookie.bus == bus)
1618 	 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
1619 	 && (cdm->pos.cookie.target != NULL))
1620 		return(xpttargettraverse(bus,
1621 					(struct cam_et *)cdm->pos.cookie.target,
1622 					 xptedttargetfunc, arg));
1623 	else
1624 		return(xpttargettraverse(bus, NULL, xptedttargetfunc, arg));
1625 }
1626 
1627 static int
1628 xptedttargetfunc(struct cam_et *target, void *arg)
1629 {
1630 	struct ccb_dev_match *cdm;
1631 
1632 	cdm = (struct ccb_dev_match *)arg;
1633 
1634 	/*
1635 	 * If there is a device list generation recorded, check it to
1636 	 * make sure the device list hasn't changed.
1637 	 */
1638 	if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
1639 	 && (cdm->pos.cookie.bus == target->bus)
1640 	 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
1641 	 && (cdm->pos.cookie.target == target)
1642 	 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
1643 	 && (cdm->pos.generations[CAM_DEV_GENERATION] != 0)
1644 	 && (cdm->pos.generations[CAM_DEV_GENERATION] !=
1645 	     target->generation)) {
1646 		cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
1647 		return(0);
1648 	}
1649 
1650 	if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
1651 	 && (cdm->pos.cookie.bus == target->bus)
1652 	 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
1653 	 && (cdm->pos.cookie.target == target)
1654 	 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
1655 	 && (cdm->pos.cookie.device != NULL))
1656 		return(xptdevicetraverse(target,
1657 					(struct cam_ed *)cdm->pos.cookie.device,
1658 					 xptedtdevicefunc, arg));
1659 	else
1660 		return(xptdevicetraverse(target, NULL, xptedtdevicefunc, arg));
1661 }
1662 
1663 static int
1664 xptedtdevicefunc(struct cam_ed *device, void *arg)
1665 {
1666 
1667 	struct ccb_dev_match *cdm;
1668 	dev_match_ret retval;
1669 
1670 	cdm = (struct ccb_dev_match *)arg;
1671 
1672 	/*
1673 	 * If our position is for something deeper in the tree, that means
1674 	 * that we've already seen this node.  So, we keep going down.
1675 	 */
1676 	if ((cdm->pos.position_type & CAM_DEV_POS_DEVICE)
1677 	 && (cdm->pos.cookie.device == device)
1678 	 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
1679 	 && (cdm->pos.cookie.periph != NULL))
1680 		retval = DM_RET_DESCEND;
1681 	else
1682 		retval = xptdevicematch(cdm->patterns, cdm->num_patterns,
1683 					device);
1684 
1685 	if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
1686 		cdm->status = CAM_DEV_MATCH_ERROR;
1687 		return(0);
1688 	}
1689 
1690 	/*
1691 	 * If the copy flag is set, copy this device out.
1692 	 */
1693 	if (retval & DM_RET_COPY) {
1694 		int spaceleft, j;
1695 
1696 		spaceleft = cdm->match_buf_len - (cdm->num_matches *
1697 			sizeof(struct dev_match_result));
1698 
1699 		/*
1700 		 * If we don't have enough space to put in another
1701 		 * match result, save our position and tell the
1702 		 * user there are more devices to check.
1703 		 */
1704 		if (spaceleft < sizeof(struct dev_match_result)) {
1705 			bzero(&cdm->pos, sizeof(cdm->pos));
1706 			cdm->pos.position_type =
1707 				CAM_DEV_POS_EDT | CAM_DEV_POS_BUS |
1708 				CAM_DEV_POS_TARGET | CAM_DEV_POS_DEVICE;
1709 
1710 			cdm->pos.cookie.bus = device->target->bus;
1711 			cdm->pos.generations[CAM_BUS_GENERATION]=
1712 				xsoftc.bus_generation;
1713 			cdm->pos.cookie.target = device->target;
1714 			cdm->pos.generations[CAM_TARGET_GENERATION] =
1715 				device->target->bus->generation;
1716 			cdm->pos.cookie.device = device;
1717 			cdm->pos.generations[CAM_DEV_GENERATION] =
1718 				device->target->generation;
1719 			cdm->status = CAM_DEV_MATCH_MORE;
1720 			return(0);
1721 		}
1722 		j = cdm->num_matches;
1723 		cdm->num_matches++;
1724 		cdm->matches[j].type = DEV_MATCH_DEVICE;
1725 		cdm->matches[j].result.device_result.path_id =
1726 			device->target->bus->path_id;
1727 		cdm->matches[j].result.device_result.target_id =
1728 			device->target->target_id;
1729 		cdm->matches[j].result.device_result.target_lun =
1730 			device->lun_id;
1731 		cdm->matches[j].result.device_result.protocol =
1732 			device->protocol;
1733 		bcopy(&device->inq_data,
1734 		      &cdm->matches[j].result.device_result.inq_data,
1735 		      sizeof(struct scsi_inquiry_data));
1736 		bcopy(&device->ident_data,
1737 		      &cdm->matches[j].result.device_result.ident_data,
1738 		      sizeof(struct ata_params));
1739 
1740 		/* Let the user know whether this device is unconfigured */
1741 		if (device->flags & CAM_DEV_UNCONFIGURED)
1742 			cdm->matches[j].result.device_result.flags =
1743 				DEV_RESULT_UNCONFIGURED;
1744 		else
1745 			cdm->matches[j].result.device_result.flags =
1746 				DEV_RESULT_NOFLAG;
1747 	}
1748 
1749 	/*
1750 	 * If the user isn't interested in peripherals, don't descend
1751 	 * the tree any further.
1752 	 */
1753 	if ((retval & DM_RET_ACTION_MASK) == DM_RET_STOP)
1754 		return(1);
1755 
1756 	/*
1757 	 * If there is a peripheral list generation recorded, make sure
1758 	 * it hasn't changed.
1759 	 */
1760 	if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
1761 	 && (device->target->bus == cdm->pos.cookie.bus)
1762 	 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
1763 	 && (device->target == cdm->pos.cookie.target)
1764 	 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
1765 	 && (device == cdm->pos.cookie.device)
1766 	 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
1767 	 && (cdm->pos.generations[CAM_PERIPH_GENERATION] != 0)
1768 	 && (cdm->pos.generations[CAM_PERIPH_GENERATION] !=
1769 	     device->generation)){
1770 		cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
1771 		return(0);
1772 	}
1773 
1774 	if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
1775 	 && (cdm->pos.cookie.bus == device->target->bus)
1776 	 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
1777 	 && (cdm->pos.cookie.target == device->target)
1778 	 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
1779 	 && (cdm->pos.cookie.device == device)
1780 	 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
1781 	 && (cdm->pos.cookie.periph != NULL))
1782 		return(xptperiphtraverse(device,
1783 				(struct cam_periph *)cdm->pos.cookie.periph,
1784 				xptedtperiphfunc, arg));
1785 	else
1786 		return(xptperiphtraverse(device, NULL, xptedtperiphfunc, arg));
1787 }
1788 
1789 static int
1790 xptedtperiphfunc(struct cam_periph *periph, void *arg)
1791 {
1792 	struct ccb_dev_match *cdm;
1793 	dev_match_ret retval;
1794 
1795 	cdm = (struct ccb_dev_match *)arg;
1796 
1797 	retval = xptperiphmatch(cdm->patterns, cdm->num_patterns, periph);
1798 
1799 	if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
1800 		cdm->status = CAM_DEV_MATCH_ERROR;
1801 		return(0);
1802 	}
1803 
1804 	/*
1805 	 * If the copy flag is set, copy this peripheral out.
1806 	 */
1807 	if (retval & DM_RET_COPY) {
1808 		int spaceleft, j;
1809 
1810 		spaceleft = cdm->match_buf_len - (cdm->num_matches *
1811 			sizeof(struct dev_match_result));
1812 
1813 		/*
1814 		 * If we don't have enough space to put in another
1815 		 * match result, save our position and tell the
1816 		 * user there are more devices to check.
1817 		 */
1818 		if (spaceleft < sizeof(struct dev_match_result)) {
1819 			bzero(&cdm->pos, sizeof(cdm->pos));
1820 			cdm->pos.position_type =
1821 				CAM_DEV_POS_EDT | CAM_DEV_POS_BUS |
1822 				CAM_DEV_POS_TARGET | CAM_DEV_POS_DEVICE |
1823 				CAM_DEV_POS_PERIPH;
1824 
1825 			cdm->pos.cookie.bus = periph->path->bus;
1826 			cdm->pos.generations[CAM_BUS_GENERATION]=
1827 				xsoftc.bus_generation;
1828 			cdm->pos.cookie.target = periph->path->target;
1829 			cdm->pos.generations[CAM_TARGET_GENERATION] =
1830 				periph->path->bus->generation;
1831 			cdm->pos.cookie.device = periph->path->device;
1832 			cdm->pos.generations[CAM_DEV_GENERATION] =
1833 				periph->path->target->generation;
1834 			cdm->pos.cookie.periph = periph;
1835 			cdm->pos.generations[CAM_PERIPH_GENERATION] =
1836 				periph->path->device->generation;
1837 			cdm->status = CAM_DEV_MATCH_MORE;
1838 			return(0);
1839 		}
1840 
1841 		j = cdm->num_matches;
1842 		cdm->num_matches++;
1843 		cdm->matches[j].type = DEV_MATCH_PERIPH;
1844 		cdm->matches[j].result.periph_result.path_id =
1845 			periph->path->bus->path_id;
1846 		cdm->matches[j].result.periph_result.target_id =
1847 			periph->path->target->target_id;
1848 		cdm->matches[j].result.periph_result.target_lun =
1849 			periph->path->device->lun_id;
1850 		cdm->matches[j].result.periph_result.unit_number =
1851 			periph->unit_number;
1852 		strncpy(cdm->matches[j].result.periph_result.periph_name,
1853 			periph->periph_name, DEV_IDLEN);
1854 	}
1855 
1856 	return(1);
1857 }
1858 
1859 static int
1860 xptedtmatch(struct ccb_dev_match *cdm)
1861 {
1862 	int ret;
1863 
1864 	cdm->num_matches = 0;
1865 
1866 	/*
1867 	 * Check the bus list generation.  If it has changed, the user
1868 	 * needs to reset everything and start over.
1869 	 */
1870 	if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
1871 	 && (cdm->pos.generations[CAM_BUS_GENERATION] != 0)
1872 	 && (cdm->pos.generations[CAM_BUS_GENERATION] != xsoftc.bus_generation)) {
1873 		cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
1874 		return(0);
1875 	}
1876 
1877 	if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
1878 	 && (cdm->pos.cookie.bus != NULL))
1879 		ret = xptbustraverse((struct cam_eb *)cdm->pos.cookie.bus,
1880 				     xptedtbusfunc, cdm);
1881 	else
1882 		ret = xptbustraverse(NULL, xptedtbusfunc, cdm);
1883 
1884 	/*
1885 	 * If we get back 0, that means that we had to stop before fully
1886 	 * traversing the EDT.  It also means that one of the subroutines
1887 	 * has set the status field to the proper value.  If we get back 1,
1888 	 * we've fully traversed the EDT and copied out any matching entries.
1889 	 */
1890 	if (ret == 1)
1891 		cdm->status = CAM_DEV_MATCH_LAST;
1892 
1893 	return(ret);
1894 }
1895 
1896 static int
1897 xptplistpdrvfunc(struct periph_driver **pdrv, void *arg)
1898 {
1899 	struct ccb_dev_match *cdm;
1900 
1901 	cdm = (struct ccb_dev_match *)arg;
1902 
1903 	if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
1904 	 && (cdm->pos.cookie.pdrv == pdrv)
1905 	 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
1906 	 && (cdm->pos.generations[CAM_PERIPH_GENERATION] != 0)
1907 	 && (cdm->pos.generations[CAM_PERIPH_GENERATION] !=
1908 	     (*pdrv)->generation)) {
1909 		cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
1910 		return(0);
1911 	}
1912 
1913 	if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
1914 	 && (cdm->pos.cookie.pdrv == pdrv)
1915 	 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
1916 	 && (cdm->pos.cookie.periph != NULL))
1917 		return(xptpdperiphtraverse(pdrv,
1918 				(struct cam_periph *)cdm->pos.cookie.periph,
1919 				xptplistperiphfunc, arg));
1920 	else
1921 		return(xptpdperiphtraverse(pdrv, NULL,xptplistperiphfunc, arg));
1922 }
1923 
1924 static int
1925 xptplistperiphfunc(struct cam_periph *periph, void *arg)
1926 {
1927 	struct ccb_dev_match *cdm;
1928 	dev_match_ret retval;
1929 
1930 	cdm = (struct ccb_dev_match *)arg;
1931 
1932 	retval = xptperiphmatch(cdm->patterns, cdm->num_patterns, periph);
1933 
1934 	if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
1935 		cdm->status = CAM_DEV_MATCH_ERROR;
1936 		return(0);
1937 	}
1938 
1939 	/*
1940 	 * If the copy flag is set, copy this peripheral out.
1941 	 */
1942 	if (retval & DM_RET_COPY) {
1943 		int spaceleft, j;
1944 
1945 		spaceleft = cdm->match_buf_len - (cdm->num_matches *
1946 			sizeof(struct dev_match_result));
1947 
1948 		/*
1949 		 * If we don't have enough space to put in another
1950 		 * match result, save our position and tell the
1951 		 * user there are more devices to check.
1952 		 */
1953 		if (spaceleft < sizeof(struct dev_match_result)) {
1954 			struct periph_driver **pdrv;
1955 
1956 			pdrv = NULL;
1957 			bzero(&cdm->pos, sizeof(cdm->pos));
1958 			cdm->pos.position_type =
1959 				CAM_DEV_POS_PDRV | CAM_DEV_POS_PDPTR |
1960 				CAM_DEV_POS_PERIPH;
1961 
1962 			/*
1963 			 * This may look a bit non-sensical, but it is
1964 			 * actually quite logical.  There are very few
1965 			 * peripheral drivers, and bloating every peripheral
1966 			 * structure with a pointer back to its parent
1967 			 * peripheral driver linker set entry would cost
1968 			 * more in the long run than doing this quick lookup.
1969 			 */
1970 			for (pdrv = periph_drivers; *pdrv != NULL; pdrv++) {
1971 				if (strcmp((*pdrv)->driver_name,
1972 				    periph->periph_name) == 0)
1973 					break;
1974 			}
1975 
1976 			if (*pdrv == NULL) {
1977 				cdm->status = CAM_DEV_MATCH_ERROR;
1978 				return(0);
1979 			}
1980 
1981 			cdm->pos.cookie.pdrv = pdrv;
1982 			/*
1983 			 * The periph generation slot does double duty, as
1984 			 * does the periph pointer slot.  They are used for
1985 			 * both edt and pdrv lookups and positioning.
1986 			 */
1987 			cdm->pos.cookie.periph = periph;
1988 			cdm->pos.generations[CAM_PERIPH_GENERATION] =
1989 				(*pdrv)->generation;
1990 			cdm->status = CAM_DEV_MATCH_MORE;
1991 			return(0);
1992 		}
1993 
1994 		j = cdm->num_matches;
1995 		cdm->num_matches++;
1996 		cdm->matches[j].type = DEV_MATCH_PERIPH;
1997 		cdm->matches[j].result.periph_result.path_id =
1998 			periph->path->bus->path_id;
1999 
2000 		/*
2001 		 * The transport layer peripheral doesn't have a target or
2002 		 * lun.
2003 		 */
2004 		if (periph->path->target)
2005 			cdm->matches[j].result.periph_result.target_id =
2006 				periph->path->target->target_id;
2007 		else
2008 			cdm->matches[j].result.periph_result.target_id = -1;
2009 
2010 		if (periph->path->device)
2011 			cdm->matches[j].result.periph_result.target_lun =
2012 				periph->path->device->lun_id;
2013 		else
2014 			cdm->matches[j].result.periph_result.target_lun = -1;
2015 
2016 		cdm->matches[j].result.periph_result.unit_number =
2017 			periph->unit_number;
2018 		strncpy(cdm->matches[j].result.periph_result.periph_name,
2019 			periph->periph_name, DEV_IDLEN);
2020 	}
2021 
2022 	return(1);
2023 }
2024 
2025 static int
2026 xptperiphlistmatch(struct ccb_dev_match *cdm)
2027 {
2028 	int ret;
2029 
2030 	cdm->num_matches = 0;
2031 
2032 	/*
2033 	 * At this point in the edt traversal function, we check the bus
2034 	 * list generation to make sure that no busses have been added or
2035 	 * removed since the user last sent a XPT_DEV_MATCH ccb through.
2036 	 * For the peripheral driver list traversal function, however, we
2037 	 * don't have to worry about new peripheral driver types coming or
2038 	 * going; they're in a linker set, and therefore can't change
2039 	 * without a recompile.
2040 	 */
2041 
2042 	if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
2043 	 && (cdm->pos.cookie.pdrv != NULL))
2044 		ret = xptpdrvtraverse(
2045 				(struct periph_driver **)cdm->pos.cookie.pdrv,
2046 				xptplistpdrvfunc, cdm);
2047 	else
2048 		ret = xptpdrvtraverse(NULL, xptplistpdrvfunc, cdm);
2049 
2050 	/*
2051 	 * If we get back 0, that means that we had to stop before fully
2052 	 * traversing the peripheral driver tree.  It also means that one of
2053 	 * the subroutines has set the status field to the proper value.  If
2054 	 * we get back 1, we've fully traversed the EDT and copied out any
2055 	 * matching entries.
2056 	 */
2057 	if (ret == 1)
2058 		cdm->status = CAM_DEV_MATCH_LAST;
2059 
2060 	return(ret);
2061 }
2062 
2063 static int
2064 xptbustraverse(struct cam_eb *start_bus, xpt_busfunc_t *tr_func, void *arg)
2065 {
2066 	struct cam_eb *bus, *next_bus;
2067 	int retval;
2068 
2069 	retval = 1;
2070 
2071 	xpt_lock_buses();
2072 	for (bus = (start_bus ? start_bus : TAILQ_FIRST(&xsoftc.xpt_busses));
2073 	     bus != NULL;
2074 	     bus = next_bus) {
2075 
2076 		bus->refcount++;
2077 
2078 		/*
2079 		 * XXX The locking here is obviously very complex.  We
2080 		 * should work to simplify it.
2081 		 */
2082 		xpt_unlock_buses();
2083 		CAM_SIM_LOCK(bus->sim);
2084 		retval = tr_func(bus, arg);
2085 		CAM_SIM_UNLOCK(bus->sim);
2086 
2087 		xpt_lock_buses();
2088 		next_bus = TAILQ_NEXT(bus, links);
2089 		xpt_unlock_buses();
2090 
2091 		xpt_release_bus(bus);
2092 
2093 		if (retval == 0)
2094 			return(retval);
2095 		xpt_lock_buses();
2096 	}
2097 	xpt_unlock_buses();
2098 
2099 	return(retval);
2100 }
2101 
2102 static int
2103 xpttargettraverse(struct cam_eb *bus, struct cam_et *start_target,
2104 		  xpt_targetfunc_t *tr_func, void *arg)
2105 {
2106 	struct cam_et *target, *next_target;
2107 	int retval;
2108 
2109 	mtx_assert(bus->sim->mtx, MA_OWNED);
2110 	retval = 1;
2111 	for (target = (start_target ? start_target :
2112 		       TAILQ_FIRST(&bus->et_entries));
2113 	     target != NULL; target = next_target) {
2114 
2115 		target->refcount++;
2116 
2117 		retval = tr_func(target, arg);
2118 
2119 		next_target = TAILQ_NEXT(target, links);
2120 
2121 		xpt_release_target(target);
2122 
2123 		if (retval == 0)
2124 			return(retval);
2125 	}
2126 
2127 	return(retval);
2128 }
2129 
2130 static int
2131 xptdevicetraverse(struct cam_et *target, struct cam_ed *start_device,
2132 		  xpt_devicefunc_t *tr_func, void *arg)
2133 {
2134 	struct cam_ed *device, *next_device;
2135 	int retval;
2136 
2137 	mtx_assert(target->bus->sim->mtx, MA_OWNED);
2138 	retval = 1;
2139 	for (device = (start_device ? start_device :
2140 		       TAILQ_FIRST(&target->ed_entries));
2141 	     device != NULL;
2142 	     device = next_device) {
2143 
2144 		/*
2145 		 * Hold a reference so the current device does not go away
2146 		 * on us.
2147 		 */
2148 		device->refcount++;
2149 
2150 		retval = tr_func(device, arg);
2151 
2152 		/*
2153 		 * Grab our next pointer before we release the current
2154 		 * device.
2155 		 */
2156 		next_device = TAILQ_NEXT(device, links);
2157 
2158 		xpt_release_device(device);
2159 
2160 		if (retval == 0)
2161 			return(retval);
2162 	}
2163 
2164 	return(retval);
2165 }
2166 
2167 static int
2168 xptperiphtraverse(struct cam_ed *device, struct cam_periph *start_periph,
2169 		  xpt_periphfunc_t *tr_func, void *arg)
2170 {
2171 	struct cam_periph *periph, *next_periph;
2172 	int retval;
2173 
2174 	retval = 1;
2175 
2176 	mtx_assert(device->sim->mtx, MA_OWNED);
2177 	xpt_lock_buses();
2178 	for (periph = (start_periph ? start_periph :
2179 		       SLIST_FIRST(&device->periphs));
2180 	     periph != NULL;
2181 	     periph = next_periph) {
2182 
2183 
2184 		/*
2185 		 * In this case, we want to show peripherals that have been
2186 		 * invalidated, but not peripherals that are scheduled to
2187 		 * be freed.  So instead of calling cam_periph_acquire(),
2188 		 * which will fail if the periph has been invalidated, we
2189 		 * just check for the free flag here.  If it is in the
2190 		 * process of being freed, we skip to the next periph.
2191 		 */
2192 		if (periph->flags & CAM_PERIPH_FREE) {
2193 			next_periph = SLIST_NEXT(periph, periph_links);
2194 			continue;
2195 		}
2196 
2197 		/*
2198 		 * Acquire a reference to this periph while we call the
2199 		 * traversal function, so it can't go away.
2200 		 */
2201 		periph->refcount++;
2202 
2203 		retval = tr_func(periph, arg);
2204 
2205 		/*
2206 		 * Grab the next peripheral before we release this one, so
2207 		 * our next pointer is still valid.
2208 		 */
2209 		next_periph = SLIST_NEXT(periph, periph_links);
2210 
2211 		cam_periph_release_locked_buses(periph);
2212 
2213 		if (retval == 0)
2214 			goto bailout_done;
2215 	}
2216 
2217 bailout_done:
2218 
2219 	xpt_unlock_buses();
2220 
2221 	return(retval);
2222 }
2223 
2224 static int
2225 xptpdrvtraverse(struct periph_driver **start_pdrv,
2226 		xpt_pdrvfunc_t *tr_func, void *arg)
2227 {
2228 	struct periph_driver **pdrv;
2229 	int retval;
2230 
2231 	retval = 1;
2232 
2233 	/*
2234 	 * We don't traverse the peripheral driver list like we do the
2235 	 * other lists, because it is a linker set, and therefore cannot be
2236 	 * changed during runtime.  If the peripheral driver list is ever
2237 	 * re-done to be something other than a linker set (i.e. it can
2238 	 * change while the system is running), the list traversal should
2239 	 * be modified to work like the other traversal functions.
2240 	 */
2241 	for (pdrv = (start_pdrv ? start_pdrv : periph_drivers);
2242 	     *pdrv != NULL; pdrv++) {
2243 		retval = tr_func(pdrv, arg);
2244 
2245 		if (retval == 0)
2246 			return(retval);
2247 	}
2248 
2249 	return(retval);
2250 }
2251 
2252 static int
2253 xptpdperiphtraverse(struct periph_driver **pdrv,
2254 		    struct cam_periph *start_periph,
2255 		    xpt_periphfunc_t *tr_func, void *arg)
2256 {
2257 	struct cam_periph *periph, *next_periph;
2258 	struct cam_sim *sim;
2259 	int retval;
2260 
2261 	retval = 1;
2262 
2263 	xpt_lock_buses();
2264 	for (periph = (start_periph ? start_periph :
2265 	     TAILQ_FIRST(&(*pdrv)->units)); periph != NULL;
2266 	     periph = next_periph) {
2267 
2268 
2269 		/*
2270 		 * In this case, we want to show peripherals that have been
2271 		 * invalidated, but not peripherals that are scheduled to
2272 		 * be freed.  So instead of calling cam_periph_acquire(),
2273 		 * which will fail if the periph has been invalidated, we
2274 		 * just check for the free flag here.  If it is free, we
2275 		 * skip to the next periph.
2276 		 */
2277 		if (periph->flags & CAM_PERIPH_FREE) {
2278 			next_periph = TAILQ_NEXT(periph, unit_links);
2279 			continue;
2280 		}
2281 
2282 		/*
2283 		 * Acquire a reference to this periph while we call the
2284 		 * traversal function, so it can't go away.
2285 		 */
2286 		periph->refcount++;
2287 		sim = periph->sim;
2288 		xpt_unlock_buses();
2289 		CAM_SIM_LOCK(sim);
2290 		xpt_lock_buses();
2291 		retval = tr_func(periph, arg);
2292 
2293 		/*
2294 		 * Grab the next peripheral before we release this one, so
2295 		 * our next pointer is still valid.
2296 		 */
2297 		next_periph = TAILQ_NEXT(periph, unit_links);
2298 
2299 		cam_periph_release_locked_buses(periph);
2300 		CAM_SIM_UNLOCK(sim);
2301 
2302 		if (retval == 0)
2303 			goto bailout_done;
2304 	}
2305 bailout_done:
2306 
2307 	xpt_unlock_buses();
2308 
2309 	return(retval);
2310 }
2311 
2312 static int
2313 xptdefbusfunc(struct cam_eb *bus, void *arg)
2314 {
2315 	struct xpt_traverse_config *tr_config;
2316 
2317 	tr_config = (struct xpt_traverse_config *)arg;
2318 
2319 	if (tr_config->depth == XPT_DEPTH_BUS) {
2320 		xpt_busfunc_t *tr_func;
2321 
2322 		tr_func = (xpt_busfunc_t *)tr_config->tr_func;
2323 
2324 		return(tr_func(bus, tr_config->tr_arg));
2325 	} else
2326 		return(xpttargettraverse(bus, NULL, xptdeftargetfunc, arg));
2327 }
2328 
2329 static int
2330 xptdeftargetfunc(struct cam_et *target, void *arg)
2331 {
2332 	struct xpt_traverse_config *tr_config;
2333 
2334 	tr_config = (struct xpt_traverse_config *)arg;
2335 
2336 	if (tr_config->depth == XPT_DEPTH_TARGET) {
2337 		xpt_targetfunc_t *tr_func;
2338 
2339 		tr_func = (xpt_targetfunc_t *)tr_config->tr_func;
2340 
2341 		return(tr_func(target, tr_config->tr_arg));
2342 	} else
2343 		return(xptdevicetraverse(target, NULL, xptdefdevicefunc, arg));
2344 }
2345 
2346 static int
2347 xptdefdevicefunc(struct cam_ed *device, void *arg)
2348 {
2349 	struct xpt_traverse_config *tr_config;
2350 
2351 	tr_config = (struct xpt_traverse_config *)arg;
2352 
2353 	if (tr_config->depth == XPT_DEPTH_DEVICE) {
2354 		xpt_devicefunc_t *tr_func;
2355 
2356 		tr_func = (xpt_devicefunc_t *)tr_config->tr_func;
2357 
2358 		return(tr_func(device, tr_config->tr_arg));
2359 	} else
2360 		return(xptperiphtraverse(device, NULL, xptdefperiphfunc, arg));
2361 }
2362 
2363 static int
2364 xptdefperiphfunc(struct cam_periph *periph, void *arg)
2365 {
2366 	struct xpt_traverse_config *tr_config;
2367 	xpt_periphfunc_t *tr_func;
2368 
2369 	tr_config = (struct xpt_traverse_config *)arg;
2370 
2371 	tr_func = (xpt_periphfunc_t *)tr_config->tr_func;
2372 
2373 	/*
2374 	 * Unlike the other default functions, we don't check for depth
2375 	 * here.  The peripheral driver level is the last level in the EDT,
2376 	 * so if we're here, we should execute the function in question.
2377 	 */
2378 	return(tr_func(periph, tr_config->tr_arg));
2379 }
2380 
2381 /*
2382  * Execute the given function for every bus in the EDT.
2383  */
2384 static int
2385 xpt_for_all_busses(xpt_busfunc_t *tr_func, void *arg)
2386 {
2387 	struct xpt_traverse_config tr_config;
2388 
2389 	tr_config.depth = XPT_DEPTH_BUS;
2390 	tr_config.tr_func = tr_func;
2391 	tr_config.tr_arg = arg;
2392 
2393 	return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
2394 }
2395 
2396 /*
2397  * Execute the given function for every device in the EDT.
2398  */
2399 static int
2400 xpt_for_all_devices(xpt_devicefunc_t *tr_func, void *arg)
2401 {
2402 	struct xpt_traverse_config tr_config;
2403 
2404 	tr_config.depth = XPT_DEPTH_DEVICE;
2405 	tr_config.tr_func = tr_func;
2406 	tr_config.tr_arg = arg;
2407 
2408 	return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
2409 }
2410 
2411 static int
2412 xptsetasyncfunc(struct cam_ed *device, void *arg)
2413 {
2414 	struct cam_path path;
2415 	struct ccb_getdev cgd;
2416 	struct ccb_setasync *csa = (struct ccb_setasync *)arg;
2417 
2418 	/*
2419 	 * Don't report unconfigured devices (Wildcard devs,
2420 	 * devices only for target mode, device instances
2421 	 * that have been invalidated but are waiting for
2422 	 * their last reference count to be released).
2423 	 */
2424 	if ((device->flags & CAM_DEV_UNCONFIGURED) != 0)
2425 		return (1);
2426 
2427 	xpt_compile_path(&path,
2428 			 NULL,
2429 			 device->target->bus->path_id,
2430 			 device->target->target_id,
2431 			 device->lun_id);
2432 	xpt_setup_ccb(&cgd.ccb_h, &path, CAM_PRIORITY_NORMAL);
2433 	cgd.ccb_h.func_code = XPT_GDEV_TYPE;
2434 	xpt_action((union ccb *)&cgd);
2435 	csa->callback(csa->callback_arg,
2436 			    AC_FOUND_DEVICE,
2437 			    &path, &cgd);
2438 	xpt_release_path(&path);
2439 
2440 	return(1);
2441 }
2442 
2443 static int
2444 xptsetasyncbusfunc(struct cam_eb *bus, void *arg)
2445 {
2446 	struct cam_path path;
2447 	struct ccb_pathinq cpi;
2448 	struct ccb_setasync *csa = (struct ccb_setasync *)arg;
2449 
2450 	xpt_compile_path(&path, /*periph*/NULL,
2451 			 bus->sim->path_id,
2452 			 CAM_TARGET_WILDCARD,
2453 			 CAM_LUN_WILDCARD);
2454 	xpt_setup_ccb(&cpi.ccb_h, &path, CAM_PRIORITY_NORMAL);
2455 	cpi.ccb_h.func_code = XPT_PATH_INQ;
2456 	xpt_action((union ccb *)&cpi);
2457 	csa->callback(csa->callback_arg,
2458 			    AC_PATH_REGISTERED,
2459 			    &path, &cpi);
2460 	xpt_release_path(&path);
2461 
2462 	return(1);
2463 }
2464 
2465 void
2466 xpt_action(union ccb *start_ccb)
2467 {
2468 
2469 	CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xpt_action\n"));
2470 
2471 	start_ccb->ccb_h.status = CAM_REQ_INPROG;
2472 	(*(start_ccb->ccb_h.path->bus->xport->action))(start_ccb);
2473 }
2474 
2475 void
2476 xpt_action_default(union ccb *start_ccb)
2477 {
2478 	struct cam_path *path;
2479 
2480 	path = start_ccb->ccb_h.path;
2481 	CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_action_default\n"));
2482 
2483 	switch (start_ccb->ccb_h.func_code) {
2484 	case XPT_SCSI_IO:
2485 	{
2486 		struct cam_ed *device;
2487 
2488 		/*
2489 		 * For the sake of compatibility with SCSI-1
2490 		 * devices that may not understand the identify
2491 		 * message, we include lun information in the
2492 		 * second byte of all commands.  SCSI-1 specifies
2493 		 * that luns are a 3 bit value and reserves only 3
2494 		 * bits for lun information in the CDB.  Later
2495 		 * revisions of the SCSI spec allow for more than 8
2496 		 * luns, but have deprecated lun information in the
2497 		 * CDB.  So, if the lun won't fit, we must omit.
2498 		 *
2499 		 * Also be aware that during initial probing for devices,
2500 		 * the inquiry information is unknown but initialized to 0.
2501 		 * This means that this code will be exercised while probing
2502 		 * devices with an ANSI revision greater than 2.
2503 		 */
2504 		device = path->device;
2505 		if (device->protocol_version <= SCSI_REV_2
2506 		 && start_ccb->ccb_h.target_lun < 8
2507 		 && (start_ccb->ccb_h.flags & CAM_CDB_POINTER) == 0) {
2508 
2509 			start_ccb->csio.cdb_io.cdb_bytes[1] |=
2510 			    start_ccb->ccb_h.target_lun << 5;
2511 		}
2512 		start_ccb->csio.scsi_status = SCSI_STATUS_OK;
2513 	}
2514 	/* FALLTHROUGH */
2515 	case XPT_TARGET_IO:
2516 	case XPT_CONT_TARGET_IO:
2517 		start_ccb->csio.sense_resid = 0;
2518 		start_ccb->csio.resid = 0;
2519 		/* FALLTHROUGH */
2520 	case XPT_ATA_IO:
2521 		if (start_ccb->ccb_h.func_code == XPT_ATA_IO)
2522 			start_ccb->ataio.resid = 0;
2523 		/* FALLTHROUGH */
2524 	case XPT_RESET_DEV:
2525 	case XPT_ENG_EXEC:
2526 	case XPT_SMP_IO:
2527 		cam_ccbq_insert_ccb(&path->device->ccbq, start_ccb);
2528 		if (xpt_schedule_devq(path->bus->sim->devq, path->device))
2529 			xpt_run_devq(path->bus->sim->devq);
2530 		break;
2531 	case XPT_CALC_GEOMETRY:
2532 	{
2533 		struct cam_sim *sim;
2534 
2535 		/* Filter out garbage */
2536 		if (start_ccb->ccg.block_size == 0
2537 		 || start_ccb->ccg.volume_size == 0) {
2538 			start_ccb->ccg.cylinders = 0;
2539 			start_ccb->ccg.heads = 0;
2540 			start_ccb->ccg.secs_per_track = 0;
2541 			start_ccb->ccb_h.status = CAM_REQ_CMP;
2542 			break;
2543 		}
2544 #if defined(PC98) || defined(__sparc64__)
2545 		/*
2546 		 * In a PC-98 system, geometry translation depens on
2547 		 * the "real" device geometry obtained from mode page 4.
2548 		 * SCSI geometry translation is performed in the
2549 		 * initialization routine of the SCSI BIOS and the result
2550 		 * stored in host memory.  If the translation is available
2551 		 * in host memory, use it.  If not, rely on the default
2552 		 * translation the device driver performs.
2553 		 * For sparc64, we may need adjust the geometry of large
2554 		 * disks in order to fit the limitations of the 16-bit
2555 		 * fields of the VTOC8 disk label.
2556 		 */
2557 		if (scsi_da_bios_params(&start_ccb->ccg) != 0) {
2558 			start_ccb->ccb_h.status = CAM_REQ_CMP;
2559 			break;
2560 		}
2561 #endif
2562 		sim = path->bus->sim;
2563 		(*(sim->sim_action))(sim, start_ccb);
2564 		break;
2565 	}
2566 	case XPT_ABORT:
2567 	{
2568 		union ccb* abort_ccb;
2569 
2570 		abort_ccb = start_ccb->cab.abort_ccb;
2571 		if (XPT_FC_IS_DEV_QUEUED(abort_ccb)) {
2572 
2573 			if (abort_ccb->ccb_h.pinfo.index >= 0) {
2574 				struct cam_ccbq *ccbq;
2575 				struct cam_ed *device;
2576 
2577 				device = abort_ccb->ccb_h.path->device;
2578 				ccbq = &device->ccbq;
2579 				cam_ccbq_remove_ccb(ccbq, abort_ccb);
2580 				abort_ccb->ccb_h.status =
2581 				    CAM_REQ_ABORTED|CAM_DEV_QFRZN;
2582 				xpt_freeze_devq(abort_ccb->ccb_h.path, 1);
2583 				xpt_done(abort_ccb);
2584 				start_ccb->ccb_h.status = CAM_REQ_CMP;
2585 				break;
2586 			}
2587 			if (abort_ccb->ccb_h.pinfo.index == CAM_UNQUEUED_INDEX
2588 			 && (abort_ccb->ccb_h.status & CAM_SIM_QUEUED) == 0) {
2589 				/*
2590 				 * We've caught this ccb en route to
2591 				 * the SIM.  Flag it for abort and the
2592 				 * SIM will do so just before starting
2593 				 * real work on the CCB.
2594 				 */
2595 				abort_ccb->ccb_h.status =
2596 				    CAM_REQ_ABORTED|CAM_DEV_QFRZN;
2597 				xpt_freeze_devq(abort_ccb->ccb_h.path, 1);
2598 				start_ccb->ccb_h.status = CAM_REQ_CMP;
2599 				break;
2600 			}
2601 		}
2602 		if (XPT_FC_IS_QUEUED(abort_ccb)
2603 		 && (abort_ccb->ccb_h.pinfo.index == CAM_DONEQ_INDEX)) {
2604 			/*
2605 			 * It's already completed but waiting
2606 			 * for our SWI to get to it.
2607 			 */
2608 			start_ccb->ccb_h.status = CAM_UA_ABORT;
2609 			break;
2610 		}
2611 		/*
2612 		 * If we weren't able to take care of the abort request
2613 		 * in the XPT, pass the request down to the SIM for processing.
2614 		 */
2615 	}
2616 	/* FALLTHROUGH */
2617 	case XPT_ACCEPT_TARGET_IO:
2618 	case XPT_EN_LUN:
2619 	case XPT_IMMED_NOTIFY:
2620 	case XPT_NOTIFY_ACK:
2621 	case XPT_RESET_BUS:
2622 	case XPT_IMMEDIATE_NOTIFY:
2623 	case XPT_NOTIFY_ACKNOWLEDGE:
2624 	case XPT_GET_SIM_KNOB:
2625 	case XPT_SET_SIM_KNOB:
2626 	{
2627 		struct cam_sim *sim;
2628 
2629 		sim = path->bus->sim;
2630 		(*(sim->sim_action))(sim, start_ccb);
2631 		break;
2632 	}
2633 	case XPT_PATH_INQ:
2634 	{
2635 		struct cam_sim *sim;
2636 
2637 		sim = path->bus->sim;
2638 		(*(sim->sim_action))(sim, start_ccb);
2639 		break;
2640 	}
2641 	case XPT_PATH_STATS:
2642 		start_ccb->cpis.last_reset = path->bus->last_reset;
2643 		start_ccb->ccb_h.status = CAM_REQ_CMP;
2644 		break;
2645 	case XPT_GDEV_TYPE:
2646 	{
2647 		struct cam_ed *dev;
2648 
2649 		dev = path->device;
2650 		if ((dev->flags & CAM_DEV_UNCONFIGURED) != 0) {
2651 			start_ccb->ccb_h.status = CAM_DEV_NOT_THERE;
2652 		} else {
2653 			struct ccb_getdev *cgd;
2654 
2655 			cgd = &start_ccb->cgd;
2656 			cgd->protocol = dev->protocol;
2657 			cgd->inq_data = dev->inq_data;
2658 			cgd->ident_data = dev->ident_data;
2659 			cgd->inq_flags = dev->inq_flags;
2660 			cgd->ccb_h.status = CAM_REQ_CMP;
2661 			cgd->serial_num_len = dev->serial_num_len;
2662 			if ((dev->serial_num_len > 0)
2663 			 && (dev->serial_num != NULL))
2664 				bcopy(dev->serial_num, cgd->serial_num,
2665 				      dev->serial_num_len);
2666 		}
2667 		break;
2668 	}
2669 	case XPT_GDEV_STATS:
2670 	{
2671 		struct cam_ed *dev;
2672 
2673 		dev = path->device;
2674 		if ((dev->flags & CAM_DEV_UNCONFIGURED) != 0) {
2675 			start_ccb->ccb_h.status = CAM_DEV_NOT_THERE;
2676 		} else {
2677 			struct ccb_getdevstats *cgds;
2678 			struct cam_eb *bus;
2679 			struct cam_et *tar;
2680 
2681 			cgds = &start_ccb->cgds;
2682 			bus = path->bus;
2683 			tar = path->target;
2684 			cgds->dev_openings = dev->ccbq.dev_openings;
2685 			cgds->dev_active = dev->ccbq.dev_active;
2686 			cgds->devq_openings = dev->ccbq.devq_openings;
2687 			cgds->devq_queued = dev->ccbq.queue.entries;
2688 			cgds->held = dev->ccbq.held;
2689 			cgds->last_reset = tar->last_reset;
2690 			cgds->maxtags = dev->maxtags;
2691 			cgds->mintags = dev->mintags;
2692 			if (timevalcmp(&tar->last_reset, &bus->last_reset, <))
2693 				cgds->last_reset = bus->last_reset;
2694 			cgds->ccb_h.status = CAM_REQ_CMP;
2695 		}
2696 		break;
2697 	}
2698 	case XPT_GDEVLIST:
2699 	{
2700 		struct cam_periph	*nperiph;
2701 		struct periph_list	*periph_head;
2702 		struct ccb_getdevlist	*cgdl;
2703 		u_int			i;
2704 		struct cam_ed		*device;
2705 		int			found;
2706 
2707 
2708 		found = 0;
2709 
2710 		/*
2711 		 * Don't want anyone mucking with our data.
2712 		 */
2713 		device = path->device;
2714 		periph_head = &device->periphs;
2715 		cgdl = &start_ccb->cgdl;
2716 
2717 		/*
2718 		 * Check and see if the list has changed since the user
2719 		 * last requested a list member.  If so, tell them that the
2720 		 * list has changed, and therefore they need to start over
2721 		 * from the beginning.
2722 		 */
2723 		if ((cgdl->index != 0) &&
2724 		    (cgdl->generation != device->generation)) {
2725 			cgdl->status = CAM_GDEVLIST_LIST_CHANGED;
2726 			break;
2727 		}
2728 
2729 		/*
2730 		 * Traverse the list of peripherals and attempt to find
2731 		 * the requested peripheral.
2732 		 */
2733 		for (nperiph = SLIST_FIRST(periph_head), i = 0;
2734 		     (nperiph != NULL) && (i <= cgdl->index);
2735 		     nperiph = SLIST_NEXT(nperiph, periph_links), i++) {
2736 			if (i == cgdl->index) {
2737 				strncpy(cgdl->periph_name,
2738 					nperiph->periph_name,
2739 					DEV_IDLEN);
2740 				cgdl->unit_number = nperiph->unit_number;
2741 				found = 1;
2742 			}
2743 		}
2744 		if (found == 0) {
2745 			cgdl->status = CAM_GDEVLIST_ERROR;
2746 			break;
2747 		}
2748 
2749 		if (nperiph == NULL)
2750 			cgdl->status = CAM_GDEVLIST_LAST_DEVICE;
2751 		else
2752 			cgdl->status = CAM_GDEVLIST_MORE_DEVS;
2753 
2754 		cgdl->index++;
2755 		cgdl->generation = device->generation;
2756 
2757 		cgdl->ccb_h.status = CAM_REQ_CMP;
2758 		break;
2759 	}
2760 	case XPT_DEV_MATCH:
2761 	{
2762 		dev_pos_type position_type;
2763 		struct ccb_dev_match *cdm;
2764 
2765 		cdm = &start_ccb->cdm;
2766 
2767 		/*
2768 		 * There are two ways of getting at information in the EDT.
2769 		 * The first way is via the primary EDT tree.  It starts
2770 		 * with a list of busses, then a list of targets on a bus,
2771 		 * then devices/luns on a target, and then peripherals on a
2772 		 * device/lun.  The "other" way is by the peripheral driver
2773 		 * lists.  The peripheral driver lists are organized by
2774 		 * peripheral driver.  (obviously)  So it makes sense to
2775 		 * use the peripheral driver list if the user is looking
2776 		 * for something like "da1", or all "da" devices.  If the
2777 		 * user is looking for something on a particular bus/target
2778 		 * or lun, it's generally better to go through the EDT tree.
2779 		 */
2780 
2781 		if (cdm->pos.position_type != CAM_DEV_POS_NONE)
2782 			position_type = cdm->pos.position_type;
2783 		else {
2784 			u_int i;
2785 
2786 			position_type = CAM_DEV_POS_NONE;
2787 
2788 			for (i = 0; i < cdm->num_patterns; i++) {
2789 				if ((cdm->patterns[i].type == DEV_MATCH_BUS)
2790 				 ||(cdm->patterns[i].type == DEV_MATCH_DEVICE)){
2791 					position_type = CAM_DEV_POS_EDT;
2792 					break;
2793 				}
2794 			}
2795 
2796 			if (cdm->num_patterns == 0)
2797 				position_type = CAM_DEV_POS_EDT;
2798 			else if (position_type == CAM_DEV_POS_NONE)
2799 				position_type = CAM_DEV_POS_PDRV;
2800 		}
2801 
2802 		/*
2803 		 * Note that we drop the SIM lock here, because the EDT
2804 		 * traversal code needs to do its own locking.
2805 		 */
2806 		CAM_SIM_UNLOCK(xpt_path_sim(cdm->ccb_h.path));
2807 		switch(position_type & CAM_DEV_POS_TYPEMASK) {
2808 		case CAM_DEV_POS_EDT:
2809 			xptedtmatch(cdm);
2810 			break;
2811 		case CAM_DEV_POS_PDRV:
2812 			xptperiphlistmatch(cdm);
2813 			break;
2814 		default:
2815 			cdm->status = CAM_DEV_MATCH_ERROR;
2816 			break;
2817 		}
2818 		CAM_SIM_LOCK(xpt_path_sim(cdm->ccb_h.path));
2819 
2820 		if (cdm->status == CAM_DEV_MATCH_ERROR)
2821 			start_ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2822 		else
2823 			start_ccb->ccb_h.status = CAM_REQ_CMP;
2824 
2825 		break;
2826 	}
2827 	case XPT_SASYNC_CB:
2828 	{
2829 		struct ccb_setasync *csa;
2830 		struct async_node *cur_entry;
2831 		struct async_list *async_head;
2832 		u_int32_t added;
2833 
2834 		csa = &start_ccb->csa;
2835 		added = csa->event_enable;
2836 		async_head = &path->device->asyncs;
2837 
2838 		/*
2839 		 * If there is already an entry for us, simply
2840 		 * update it.
2841 		 */
2842 		cur_entry = SLIST_FIRST(async_head);
2843 		while (cur_entry != NULL) {
2844 			if ((cur_entry->callback_arg == csa->callback_arg)
2845 			 && (cur_entry->callback == csa->callback))
2846 				break;
2847 			cur_entry = SLIST_NEXT(cur_entry, links);
2848 		}
2849 
2850 		if (cur_entry != NULL) {
2851 		 	/*
2852 			 * If the request has no flags set,
2853 			 * remove the entry.
2854 			 */
2855 			added &= ~cur_entry->event_enable;
2856 			if (csa->event_enable == 0) {
2857 				SLIST_REMOVE(async_head, cur_entry,
2858 					     async_node, links);
2859 				xpt_release_device(path->device);
2860 				free(cur_entry, M_CAMXPT);
2861 			} else {
2862 				cur_entry->event_enable = csa->event_enable;
2863 			}
2864 			csa->event_enable = added;
2865 		} else {
2866 			cur_entry = malloc(sizeof(*cur_entry), M_CAMXPT,
2867 					   M_NOWAIT);
2868 			if (cur_entry == NULL) {
2869 				csa->ccb_h.status = CAM_RESRC_UNAVAIL;
2870 				break;
2871 			}
2872 			cur_entry->event_enable = csa->event_enable;
2873 			cur_entry->callback_arg = csa->callback_arg;
2874 			cur_entry->callback = csa->callback;
2875 			SLIST_INSERT_HEAD(async_head, cur_entry, links);
2876 			xpt_acquire_device(path->device);
2877 		}
2878 		start_ccb->ccb_h.status = CAM_REQ_CMP;
2879 		break;
2880 	}
2881 	case XPT_REL_SIMQ:
2882 	{
2883 		struct ccb_relsim *crs;
2884 		struct cam_ed *dev;
2885 
2886 		crs = &start_ccb->crs;
2887 		dev = path->device;
2888 		if (dev == NULL) {
2889 
2890 			crs->ccb_h.status = CAM_DEV_NOT_THERE;
2891 			break;
2892 		}
2893 
2894 		if ((crs->release_flags & RELSIM_ADJUST_OPENINGS) != 0) {
2895 
2896 			/* Don't ever go below one opening */
2897 			if (crs->openings > 0) {
2898 				xpt_dev_ccbq_resize(path, crs->openings);
2899 				if (bootverbose) {
2900 					xpt_print(path,
2901 					    "number of openings is now %d\n",
2902 					    crs->openings);
2903 				}
2904 			}
2905 		}
2906 
2907 		if ((crs->release_flags & RELSIM_RELEASE_AFTER_TIMEOUT) != 0) {
2908 
2909 			if ((dev->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) {
2910 
2911 				/*
2912 				 * Just extend the old timeout and decrement
2913 				 * the freeze count so that a single timeout
2914 				 * is sufficient for releasing the queue.
2915 				 */
2916 				start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
2917 				callout_stop(&dev->callout);
2918 			} else {
2919 
2920 				start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
2921 			}
2922 
2923 			callout_reset(&dev->callout,
2924 			    (crs->release_timeout * hz) / 1000,
2925 			    xpt_release_devq_timeout, dev);
2926 
2927 			dev->flags |= CAM_DEV_REL_TIMEOUT_PENDING;
2928 
2929 		}
2930 
2931 		if ((crs->release_flags & RELSIM_RELEASE_AFTER_CMDCMPLT) != 0) {
2932 
2933 			if ((dev->flags & CAM_DEV_REL_ON_COMPLETE) != 0) {
2934 				/*
2935 				 * Decrement the freeze count so that a single
2936 				 * completion is still sufficient to unfreeze
2937 				 * the queue.
2938 				 */
2939 				start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
2940 			} else {
2941 
2942 				dev->flags |= CAM_DEV_REL_ON_COMPLETE;
2943 				start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
2944 			}
2945 		}
2946 
2947 		if ((crs->release_flags & RELSIM_RELEASE_AFTER_QEMPTY) != 0) {
2948 
2949 			if ((dev->flags & CAM_DEV_REL_ON_QUEUE_EMPTY) != 0
2950 			 || (dev->ccbq.dev_active == 0)) {
2951 
2952 				start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
2953 			} else {
2954 
2955 				dev->flags |= CAM_DEV_REL_ON_QUEUE_EMPTY;
2956 				start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
2957 			}
2958 		}
2959 
2960 		if ((start_ccb->ccb_h.flags & CAM_DEV_QFREEZE) == 0)
2961 			xpt_release_devq(path, /*count*/1, /*run_queue*/TRUE);
2962 		start_ccb->crs.qfrozen_cnt = dev->ccbq.queue.qfrozen_cnt;
2963 		start_ccb->ccb_h.status = CAM_REQ_CMP;
2964 		break;
2965 	}
2966 	case XPT_DEBUG: {
2967 		struct cam_path *oldpath;
2968 		struct cam_sim *oldsim;
2969 
2970 		/* Check that all request bits are supported. */
2971 		if (start_ccb->cdbg.flags & ~(CAM_DEBUG_COMPILE)) {
2972 			start_ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
2973 			break;
2974 		}
2975 
2976 		cam_dflags = CAM_DEBUG_NONE;
2977 		if (cam_dpath != NULL) {
2978 			/* To release the old path we must hold proper lock. */
2979 			oldpath = cam_dpath;
2980 			cam_dpath = NULL;
2981 			oldsim = xpt_path_sim(oldpath);
2982 			CAM_SIM_UNLOCK(xpt_path_sim(start_ccb->ccb_h.path));
2983 			CAM_SIM_LOCK(oldsim);
2984 			xpt_free_path(oldpath);
2985 			CAM_SIM_UNLOCK(oldsim);
2986 			CAM_SIM_LOCK(xpt_path_sim(start_ccb->ccb_h.path));
2987 		}
2988 		if (start_ccb->cdbg.flags != CAM_DEBUG_NONE) {
2989 			if (xpt_create_path(&cam_dpath, NULL,
2990 					    start_ccb->ccb_h.path_id,
2991 					    start_ccb->ccb_h.target_id,
2992 					    start_ccb->ccb_h.target_lun) !=
2993 					    CAM_REQ_CMP) {
2994 				start_ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
2995 			} else {
2996 				cam_dflags = start_ccb->cdbg.flags;
2997 				start_ccb->ccb_h.status = CAM_REQ_CMP;
2998 				xpt_print(cam_dpath, "debugging flags now %x\n",
2999 				    cam_dflags);
3000 			}
3001 		} else
3002 			start_ccb->ccb_h.status = CAM_REQ_CMP;
3003 		break;
3004 	}
3005 	case XPT_NOOP:
3006 		if ((start_ccb->ccb_h.flags & CAM_DEV_QFREEZE) != 0)
3007 			xpt_freeze_devq(path, 1);
3008 		start_ccb->ccb_h.status = CAM_REQ_CMP;
3009 		break;
3010 	default:
3011 	case XPT_SDEV_TYPE:
3012 	case XPT_TERM_IO:
3013 	case XPT_ENG_INQ:
3014 		/* XXX Implement */
3015 		printf("%s: CCB type %#x not supported\n", __func__,
3016 		       start_ccb->ccb_h.func_code);
3017 		start_ccb->ccb_h.status = CAM_PROVIDE_FAIL;
3018 		if (start_ccb->ccb_h.func_code & XPT_FC_DEV_QUEUED) {
3019 			xpt_done(start_ccb);
3020 		}
3021 		break;
3022 	}
3023 }
3024 
3025 void
3026 xpt_polled_action(union ccb *start_ccb)
3027 {
3028 	u_int32_t timeout;
3029 	struct	  cam_sim *sim;
3030 	struct	  cam_devq *devq;
3031 	struct	  cam_ed *dev;
3032 
3033 
3034 	timeout = start_ccb->ccb_h.timeout * 10;
3035 	sim = start_ccb->ccb_h.path->bus->sim;
3036 	devq = sim->devq;
3037 	dev = start_ccb->ccb_h.path->device;
3038 
3039 	mtx_assert(sim->mtx, MA_OWNED);
3040 
3041 	/* Don't use ISR for this SIM while polling. */
3042 	sim->flags |= CAM_SIM_POLLED;
3043 
3044 	/*
3045 	 * Steal an opening so that no other queued requests
3046 	 * can get it before us while we simulate interrupts.
3047 	 */
3048 	dev->ccbq.devq_openings--;
3049 	dev->ccbq.dev_openings--;
3050 
3051 	while(((devq != NULL && devq->send_openings <= 0) ||
3052 	   dev->ccbq.dev_openings < 0) && (--timeout > 0)) {
3053 		DELAY(100);
3054 		(*(sim->sim_poll))(sim);
3055 		camisr_runqueue(&sim->sim_doneq);
3056 	}
3057 
3058 	dev->ccbq.devq_openings++;
3059 	dev->ccbq.dev_openings++;
3060 
3061 	if (timeout != 0) {
3062 		xpt_action(start_ccb);
3063 		while(--timeout > 0) {
3064 			(*(sim->sim_poll))(sim);
3065 			camisr_runqueue(&sim->sim_doneq);
3066 			if ((start_ccb->ccb_h.status  & CAM_STATUS_MASK)
3067 			    != CAM_REQ_INPROG)
3068 				break;
3069 			DELAY(100);
3070 		}
3071 		if (timeout == 0) {
3072 			/*
3073 			 * XXX Is it worth adding a sim_timeout entry
3074 			 * point so we can attempt recovery?  If
3075 			 * this is only used for dumps, I don't think
3076 			 * it is.
3077 			 */
3078 			start_ccb->ccb_h.status = CAM_CMD_TIMEOUT;
3079 		}
3080 	} else {
3081 		start_ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
3082 	}
3083 
3084 	/* We will use CAM ISR for this SIM again. */
3085 	sim->flags &= ~CAM_SIM_POLLED;
3086 }
3087 
3088 /*
3089  * Schedule a peripheral driver to receive a ccb when it's
3090  * target device has space for more transactions.
3091  */
3092 void
3093 xpt_schedule(struct cam_periph *perph, u_int32_t new_priority)
3094 {
3095 	struct cam_ed *device;
3096 	int runq = 0;
3097 
3098 	mtx_assert(perph->sim->mtx, MA_OWNED);
3099 
3100 	CAM_DEBUG(perph->path, CAM_DEBUG_TRACE, ("xpt_schedule\n"));
3101 	device = perph->path->device;
3102 	if (periph_is_queued(perph)) {
3103 		/* Simply reorder based on new priority */
3104 		CAM_DEBUG(perph->path, CAM_DEBUG_SUBTRACE,
3105 			  ("   change priority to %d\n", new_priority));
3106 		if (new_priority < perph->pinfo.priority) {
3107 			camq_change_priority(&device->drvq,
3108 					     perph->pinfo.index,
3109 					     new_priority);
3110 			runq = 1;
3111 		}
3112 	} else {
3113 		/* New entry on the queue */
3114 		CAM_DEBUG(perph->path, CAM_DEBUG_SUBTRACE,
3115 			  ("   added periph to queue\n"));
3116 		perph->pinfo.priority = new_priority;
3117 		perph->pinfo.generation = ++device->drvq.generation;
3118 		camq_insert(&device->drvq, &perph->pinfo);
3119 		runq = 1;
3120 	}
3121 	if (runq != 0) {
3122 		CAM_DEBUG(perph->path, CAM_DEBUG_SUBTRACE,
3123 			  ("   calling xpt_run_dev_allocq\n"));
3124 		xpt_run_dev_allocq(device);
3125 	}
3126 }
3127 
3128 
3129 /*
3130  * Schedule a device to run on a given queue.
3131  * If the device was inserted as a new entry on the queue,
3132  * return 1 meaning the device queue should be run. If we
3133  * were already queued, implying someone else has already
3134  * started the queue, return 0 so the caller doesn't attempt
3135  * to run the queue.
3136  */
3137 int
3138 xpt_schedule_dev(struct camq *queue, cam_pinfo *pinfo,
3139 		 u_int32_t new_priority)
3140 {
3141 	int retval;
3142 	u_int32_t old_priority;
3143 
3144 	CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_schedule_dev\n"));
3145 
3146 	old_priority = pinfo->priority;
3147 
3148 	/*
3149 	 * Are we already queued?
3150 	 */
3151 	if (pinfo->index != CAM_UNQUEUED_INDEX) {
3152 		/* Simply reorder based on new priority */
3153 		if (new_priority < old_priority) {
3154 			camq_change_priority(queue, pinfo->index,
3155 					     new_priority);
3156 			CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3157 					("changed priority to %d\n",
3158 					 new_priority));
3159 			retval = 1;
3160 		} else
3161 			retval = 0;
3162 	} else {
3163 		/* New entry on the queue */
3164 		if (new_priority < old_priority)
3165 			pinfo->priority = new_priority;
3166 
3167 		CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3168 				("Inserting onto queue\n"));
3169 		pinfo->generation = ++queue->generation;
3170 		camq_insert(queue, pinfo);
3171 		retval = 1;
3172 	}
3173 	return (retval);
3174 }
3175 
3176 static void
3177 xpt_run_dev_allocq(struct cam_ed *device)
3178 {
3179 	struct camq	*drvq;
3180 
3181 	if (device->ccbq.devq_allocating)
3182 		return;
3183 	device->ccbq.devq_allocating = 1;
3184 	CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_dev_allocq(%p)\n", device));
3185 	drvq = &device->drvq;
3186 	while ((drvq->entries > 0) &&
3187 	    (device->ccbq.devq_openings > 0 ||
3188 	     CAMQ_GET_PRIO(drvq) <= CAM_PRIORITY_OOB) &&
3189 	    (device->ccbq.queue.qfrozen_cnt == 0)) {
3190 		union	ccb *work_ccb;
3191 		struct	cam_periph *drv;
3192 
3193 		KASSERT(drvq->entries > 0, ("xpt_run_dev_allocq: "
3194 		    "Device on queue without any work to do"));
3195 		if ((work_ccb = xpt_get_ccb(device)) != NULL) {
3196 			drv = (struct cam_periph*)camq_remove(drvq, CAMQ_HEAD);
3197 			xpt_setup_ccb(&work_ccb->ccb_h, drv->path,
3198 				      drv->pinfo.priority);
3199 			CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3200 					("calling periph start\n"));
3201 			drv->periph_start(drv, work_ccb);
3202 		} else {
3203 			/*
3204 			 * Malloc failure in alloc_ccb
3205 			 */
3206 			/*
3207 			 * XXX add us to a list to be run from free_ccb
3208 			 * if we don't have any ccbs active on this
3209 			 * device queue otherwise we may never get run
3210 			 * again.
3211 			 */
3212 			break;
3213 		}
3214 	}
3215 	device->ccbq.devq_allocating = 0;
3216 }
3217 
3218 static void
3219 xpt_run_devq(struct cam_devq *devq)
3220 {
3221 	char cdb_str[(SCSI_MAX_CDBLEN * 3) + 1];
3222 
3223 	CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_devq\n"));
3224 
3225 	devq->send_queue.qfrozen_cnt++;
3226 	while ((devq->send_queue.entries > 0)
3227 	    && (devq->send_openings > 0)
3228 	    && (devq->send_queue.qfrozen_cnt <= 1)) {
3229 		struct	cam_ed_qinfo *qinfo;
3230 		struct	cam_ed *device;
3231 		union ccb *work_ccb;
3232 		struct	cam_sim *sim;
3233 
3234 		qinfo = (struct cam_ed_qinfo *)camq_remove(&devq->send_queue,
3235 							   CAMQ_HEAD);
3236 		device = qinfo->device;
3237 		CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3238 				("running device %p\n", device));
3239 
3240 		work_ccb = cam_ccbq_peek_ccb(&device->ccbq, CAMQ_HEAD);
3241 		if (work_ccb == NULL) {
3242 			printf("device on run queue with no ccbs???\n");
3243 			continue;
3244 		}
3245 
3246 		if ((work_ccb->ccb_h.flags & CAM_HIGH_POWER) != 0) {
3247 
3248 			mtx_lock(&xsoftc.xpt_lock);
3249 		 	if (xsoftc.num_highpower <= 0) {
3250 				/*
3251 				 * We got a high power command, but we
3252 				 * don't have any available slots.  Freeze
3253 				 * the device queue until we have a slot
3254 				 * available.
3255 				 */
3256 				xpt_freeze_devq(work_ccb->ccb_h.path, 1);
3257 				STAILQ_INSERT_TAIL(&xsoftc.highpowerq,
3258 						   &work_ccb->ccb_h,
3259 						   xpt_links.stqe);
3260 
3261 				mtx_unlock(&xsoftc.xpt_lock);
3262 				continue;
3263 			} else {
3264 				/*
3265 				 * Consume a high power slot while
3266 				 * this ccb runs.
3267 				 */
3268 				xsoftc.num_highpower--;
3269 			}
3270 			mtx_unlock(&xsoftc.xpt_lock);
3271 		}
3272 		cam_ccbq_remove_ccb(&device->ccbq, work_ccb);
3273 		cam_ccbq_send_ccb(&device->ccbq, work_ccb);
3274 
3275 		devq->send_openings--;
3276 		devq->send_active++;
3277 
3278 		xpt_schedule_devq(devq, device);
3279 
3280 		if ((work_ccb->ccb_h.flags & CAM_DEV_QFREEZE) != 0) {
3281 			/*
3282 			 * The client wants to freeze the queue
3283 			 * after this CCB is sent.
3284 			 */
3285 			xpt_freeze_devq(work_ccb->ccb_h.path, 1);
3286 		}
3287 
3288 		/* In Target mode, the peripheral driver knows best... */
3289 		if (work_ccb->ccb_h.func_code == XPT_SCSI_IO) {
3290 			if ((device->inq_flags & SID_CmdQue) != 0
3291 			 && work_ccb->csio.tag_action != CAM_TAG_ACTION_NONE)
3292 				work_ccb->ccb_h.flags |= CAM_TAG_ACTION_VALID;
3293 			else
3294 				/*
3295 				 * Clear this in case of a retried CCB that
3296 				 * failed due to a rejected tag.
3297 				 */
3298 				work_ccb->ccb_h.flags &= ~CAM_TAG_ACTION_VALID;
3299 		}
3300 
3301 		switch (work_ccb->ccb_h.func_code) {
3302 		case XPT_SCSI_IO:
3303 			CAM_DEBUG(work_ccb->ccb_h.path,
3304 			    CAM_DEBUG_CDB,("%s. CDB: %s\n",
3305 			     scsi_op_desc(work_ccb->csio.cdb_io.cdb_bytes[0],
3306 					  &device->inq_data),
3307 			     scsi_cdb_string(work_ccb->csio.cdb_io.cdb_bytes,
3308 					     cdb_str, sizeof(cdb_str))));
3309 			break;
3310 		case XPT_ATA_IO:
3311 			CAM_DEBUG(work_ccb->ccb_h.path,
3312 			    CAM_DEBUG_CDB,("%s. ACB: %s\n",
3313 			     ata_op_string(&work_ccb->ataio.cmd),
3314 			     ata_cmd_string(&work_ccb->ataio.cmd,
3315 					    cdb_str, sizeof(cdb_str))));
3316 			break;
3317 		default:
3318 			break;
3319 		}
3320 
3321 		/*
3322 		 * Device queues can be shared among multiple sim instances
3323 		 * that reside on different busses.  Use the SIM in the queue
3324 		 * CCB's path, rather than the one in the bus that was passed
3325 		 * into this function.
3326 		 */
3327 		sim = work_ccb->ccb_h.path->bus->sim;
3328 		(*(sim->sim_action))(sim, work_ccb);
3329 	}
3330 	devq->send_queue.qfrozen_cnt--;
3331 }
3332 
3333 /*
3334  * This function merges stuff from the slave ccb into the master ccb, while
3335  * keeping important fields in the master ccb constant.
3336  */
3337 void
3338 xpt_merge_ccb(union ccb *master_ccb, union ccb *slave_ccb)
3339 {
3340 
3341 	/*
3342 	 * Pull fields that are valid for peripheral drivers to set
3343 	 * into the master CCB along with the CCB "payload".
3344 	 */
3345 	master_ccb->ccb_h.retry_count = slave_ccb->ccb_h.retry_count;
3346 	master_ccb->ccb_h.func_code = slave_ccb->ccb_h.func_code;
3347 	master_ccb->ccb_h.timeout = slave_ccb->ccb_h.timeout;
3348 	master_ccb->ccb_h.flags = slave_ccb->ccb_h.flags;
3349 	bcopy(&(&slave_ccb->ccb_h)[1], &(&master_ccb->ccb_h)[1],
3350 	      sizeof(union ccb) - sizeof(struct ccb_hdr));
3351 }
3352 
3353 void
3354 xpt_setup_ccb(struct ccb_hdr *ccb_h, struct cam_path *path, u_int32_t priority)
3355 {
3356 
3357 	CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_setup_ccb\n"));
3358 	ccb_h->pinfo.priority = priority;
3359 	ccb_h->path = path;
3360 	ccb_h->path_id = path->bus->path_id;
3361 	if (path->target)
3362 		ccb_h->target_id = path->target->target_id;
3363 	else
3364 		ccb_h->target_id = CAM_TARGET_WILDCARD;
3365 	if (path->device) {
3366 		ccb_h->target_lun = path->device->lun_id;
3367 		ccb_h->pinfo.generation = ++path->device->ccbq.queue.generation;
3368 	} else {
3369 		ccb_h->target_lun = CAM_TARGET_WILDCARD;
3370 	}
3371 	ccb_h->pinfo.index = CAM_UNQUEUED_INDEX;
3372 	ccb_h->flags = 0;
3373 }
3374 
3375 /* Path manipulation functions */
3376 cam_status
3377 xpt_create_path(struct cam_path **new_path_ptr, struct cam_periph *perph,
3378 		path_id_t path_id, target_id_t target_id, lun_id_t lun_id)
3379 {
3380 	struct	   cam_path *path;
3381 	cam_status status;
3382 
3383 	path = (struct cam_path *)malloc(sizeof(*path), M_CAMPATH, M_NOWAIT);
3384 
3385 	if (path == NULL) {
3386 		status = CAM_RESRC_UNAVAIL;
3387 		return(status);
3388 	}
3389 	status = xpt_compile_path(path, perph, path_id, target_id, lun_id);
3390 	if (status != CAM_REQ_CMP) {
3391 		free(path, M_CAMPATH);
3392 		path = NULL;
3393 	}
3394 	*new_path_ptr = path;
3395 	return (status);
3396 }
3397 
3398 cam_status
3399 xpt_create_path_unlocked(struct cam_path **new_path_ptr,
3400 			 struct cam_periph *periph, path_id_t path_id,
3401 			 target_id_t target_id, lun_id_t lun_id)
3402 {
3403 	struct	   cam_path *path;
3404 	struct	   cam_eb *bus = NULL;
3405 	cam_status status;
3406 
3407 	path = (struct cam_path *)malloc(sizeof(*path), M_CAMPATH, M_WAITOK);
3408 
3409 	bus = xpt_find_bus(path_id);
3410 	if (bus != NULL)
3411 		CAM_SIM_LOCK(bus->sim);
3412 	status = xpt_compile_path(path, periph, path_id, target_id, lun_id);
3413 	if (bus != NULL) {
3414 		CAM_SIM_UNLOCK(bus->sim);
3415 		xpt_release_bus(bus);
3416 	}
3417 	if (status != CAM_REQ_CMP) {
3418 		free(path, M_CAMPATH);
3419 		path = NULL;
3420 	}
3421 	*new_path_ptr = path;
3422 	return (status);
3423 }
3424 
3425 cam_status
3426 xpt_compile_path(struct cam_path *new_path, struct cam_periph *perph,
3427 		 path_id_t path_id, target_id_t target_id, lun_id_t lun_id)
3428 {
3429 	struct	     cam_eb *bus;
3430 	struct	     cam_et *target;
3431 	struct	     cam_ed *device;
3432 	cam_status   status;
3433 
3434 	status = CAM_REQ_CMP;	/* Completed without error */
3435 	target = NULL;		/* Wildcarded */
3436 	device = NULL;		/* Wildcarded */
3437 
3438 	/*
3439 	 * We will potentially modify the EDT, so block interrupts
3440 	 * that may attempt to create cam paths.
3441 	 */
3442 	bus = xpt_find_bus(path_id);
3443 	if (bus == NULL) {
3444 		status = CAM_PATH_INVALID;
3445 	} else {
3446 		target = xpt_find_target(bus, target_id);
3447 		if (target == NULL) {
3448 			/* Create one */
3449 			struct cam_et *new_target;
3450 
3451 			new_target = xpt_alloc_target(bus, target_id);
3452 			if (new_target == NULL) {
3453 				status = CAM_RESRC_UNAVAIL;
3454 			} else {
3455 				target = new_target;
3456 			}
3457 		}
3458 		if (target != NULL) {
3459 			device = xpt_find_device(target, lun_id);
3460 			if (device == NULL) {
3461 				/* Create one */
3462 				struct cam_ed *new_device;
3463 
3464 				new_device =
3465 				    (*(bus->xport->alloc_device))(bus,
3466 								      target,
3467 								      lun_id);
3468 				if (new_device == NULL) {
3469 					status = CAM_RESRC_UNAVAIL;
3470 				} else {
3471 					device = new_device;
3472 				}
3473 			}
3474 		}
3475 	}
3476 
3477 	/*
3478 	 * Only touch the user's data if we are successful.
3479 	 */
3480 	if (status == CAM_REQ_CMP) {
3481 		new_path->periph = perph;
3482 		new_path->bus = bus;
3483 		new_path->target = target;
3484 		new_path->device = device;
3485 		CAM_DEBUG(new_path, CAM_DEBUG_TRACE, ("xpt_compile_path\n"));
3486 	} else {
3487 		if (device != NULL)
3488 			xpt_release_device(device);
3489 		if (target != NULL)
3490 			xpt_release_target(target);
3491 		if (bus != NULL)
3492 			xpt_release_bus(bus);
3493 	}
3494 	return (status);
3495 }
3496 
3497 void
3498 xpt_release_path(struct cam_path *path)
3499 {
3500 	CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_release_path\n"));
3501 	if (path->device != NULL) {
3502 		xpt_release_device(path->device);
3503 		path->device = NULL;
3504 	}
3505 	if (path->target != NULL) {
3506 		xpt_release_target(path->target);
3507 		path->target = NULL;
3508 	}
3509 	if (path->bus != NULL) {
3510 		xpt_release_bus(path->bus);
3511 		path->bus = NULL;
3512 	}
3513 }
3514 
3515 void
3516 xpt_free_path(struct cam_path *path)
3517 {
3518 
3519 	CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_free_path\n"));
3520 	xpt_release_path(path);
3521 	free(path, M_CAMPATH);
3522 }
3523 
3524 void
3525 xpt_path_counts(struct cam_path *path, uint32_t *bus_ref,
3526     uint32_t *periph_ref, uint32_t *target_ref, uint32_t *device_ref)
3527 {
3528 
3529 	xpt_lock_buses();
3530 	if (bus_ref) {
3531 		if (path->bus)
3532 			*bus_ref = path->bus->refcount;
3533 		else
3534 			*bus_ref = 0;
3535 	}
3536 	if (periph_ref) {
3537 		if (path->periph)
3538 			*periph_ref = path->periph->refcount;
3539 		else
3540 			*periph_ref = 0;
3541 	}
3542 	xpt_unlock_buses();
3543 	if (target_ref) {
3544 		if (path->target)
3545 			*target_ref = path->target->refcount;
3546 		else
3547 			*target_ref = 0;
3548 	}
3549 	if (device_ref) {
3550 		if (path->device)
3551 			*device_ref = path->device->refcount;
3552 		else
3553 			*device_ref = 0;
3554 	}
3555 }
3556 
3557 /*
3558  * Return -1 for failure, 0 for exact match, 1 for match with wildcards
3559  * in path1, 2 for match with wildcards in path2.
3560  */
3561 int
3562 xpt_path_comp(struct cam_path *path1, struct cam_path *path2)
3563 {
3564 	int retval = 0;
3565 
3566 	if (path1->bus != path2->bus) {
3567 		if (path1->bus->path_id == CAM_BUS_WILDCARD)
3568 			retval = 1;
3569 		else if (path2->bus->path_id == CAM_BUS_WILDCARD)
3570 			retval = 2;
3571 		else
3572 			return (-1);
3573 	}
3574 	if (path1->target != path2->target) {
3575 		if (path1->target->target_id == CAM_TARGET_WILDCARD) {
3576 			if (retval == 0)
3577 				retval = 1;
3578 		} else if (path2->target->target_id == CAM_TARGET_WILDCARD)
3579 			retval = 2;
3580 		else
3581 			return (-1);
3582 	}
3583 	if (path1->device != path2->device) {
3584 		if (path1->device->lun_id == CAM_LUN_WILDCARD) {
3585 			if (retval == 0)
3586 				retval = 1;
3587 		} else if (path2->device->lun_id == CAM_LUN_WILDCARD)
3588 			retval = 2;
3589 		else
3590 			return (-1);
3591 	}
3592 	return (retval);
3593 }
3594 
3595 void
3596 xpt_print_path(struct cam_path *path)
3597 {
3598 
3599 	if (path == NULL)
3600 		printf("(nopath): ");
3601 	else {
3602 		if (path->periph != NULL)
3603 			printf("(%s%d:", path->periph->periph_name,
3604 			       path->periph->unit_number);
3605 		else
3606 			printf("(noperiph:");
3607 
3608 		if (path->bus != NULL)
3609 			printf("%s%d:%d:", path->bus->sim->sim_name,
3610 			       path->bus->sim->unit_number,
3611 			       path->bus->sim->bus_id);
3612 		else
3613 			printf("nobus:");
3614 
3615 		if (path->target != NULL)
3616 			printf("%d:", path->target->target_id);
3617 		else
3618 			printf("X:");
3619 
3620 		if (path->device != NULL)
3621 			printf("%d): ", path->device->lun_id);
3622 		else
3623 			printf("X): ");
3624 	}
3625 }
3626 
3627 void
3628 xpt_print(struct cam_path *path, const char *fmt, ...)
3629 {
3630 	va_list ap;
3631 	xpt_print_path(path);
3632 	va_start(ap, fmt);
3633 	vprintf(fmt, ap);
3634 	va_end(ap);
3635 }
3636 
3637 int
3638 xpt_path_string(struct cam_path *path, char *str, size_t str_len)
3639 {
3640 	struct sbuf sb;
3641 
3642 #ifdef INVARIANTS
3643 	if (path != NULL && path->bus != NULL)
3644 		mtx_assert(path->bus->sim->mtx, MA_OWNED);
3645 #endif
3646 
3647 	sbuf_new(&sb, str, str_len, 0);
3648 
3649 	if (path == NULL)
3650 		sbuf_printf(&sb, "(nopath): ");
3651 	else {
3652 		if (path->periph != NULL)
3653 			sbuf_printf(&sb, "(%s%d:", path->periph->periph_name,
3654 				    path->periph->unit_number);
3655 		else
3656 			sbuf_printf(&sb, "(noperiph:");
3657 
3658 		if (path->bus != NULL)
3659 			sbuf_printf(&sb, "%s%d:%d:", path->bus->sim->sim_name,
3660 				    path->bus->sim->unit_number,
3661 				    path->bus->sim->bus_id);
3662 		else
3663 			sbuf_printf(&sb, "nobus:");
3664 
3665 		if (path->target != NULL)
3666 			sbuf_printf(&sb, "%d:", path->target->target_id);
3667 		else
3668 			sbuf_printf(&sb, "X:");
3669 
3670 		if (path->device != NULL)
3671 			sbuf_printf(&sb, "%d): ", path->device->lun_id);
3672 		else
3673 			sbuf_printf(&sb, "X): ");
3674 	}
3675 	sbuf_finish(&sb);
3676 
3677 	return(sbuf_len(&sb));
3678 }
3679 
3680 path_id_t
3681 xpt_path_path_id(struct cam_path *path)
3682 {
3683 	return(path->bus->path_id);
3684 }
3685 
3686 target_id_t
3687 xpt_path_target_id(struct cam_path *path)
3688 {
3689 	if (path->target != NULL)
3690 		return (path->target->target_id);
3691 	else
3692 		return (CAM_TARGET_WILDCARD);
3693 }
3694 
3695 lun_id_t
3696 xpt_path_lun_id(struct cam_path *path)
3697 {
3698 	if (path->device != NULL)
3699 		return (path->device->lun_id);
3700 	else
3701 		return (CAM_LUN_WILDCARD);
3702 }
3703 
3704 struct cam_sim *
3705 xpt_path_sim(struct cam_path *path)
3706 {
3707 
3708 	return (path->bus->sim);
3709 }
3710 
3711 struct cam_periph*
3712 xpt_path_periph(struct cam_path *path)
3713 {
3714 	mtx_assert(path->bus->sim->mtx, MA_OWNED);
3715 
3716 	return (path->periph);
3717 }
3718 
3719 int
3720 xpt_path_legacy_ata_id(struct cam_path *path)
3721 {
3722 	struct cam_eb *bus;
3723 	int bus_id;
3724 
3725 	if ((strcmp(path->bus->sim->sim_name, "ata") != 0) &&
3726 	    strcmp(path->bus->sim->sim_name, "ahcich") != 0 &&
3727 	    strcmp(path->bus->sim->sim_name, "mvsch") != 0 &&
3728 	    strcmp(path->bus->sim->sim_name, "siisch") != 0)
3729 		return (-1);
3730 
3731 	if (strcmp(path->bus->sim->sim_name, "ata") == 0 &&
3732 	    path->bus->sim->unit_number < 2) {
3733 		bus_id = path->bus->sim->unit_number;
3734 	} else {
3735 		bus_id = 2;
3736 		xpt_lock_buses();
3737 		TAILQ_FOREACH(bus, &xsoftc.xpt_busses, links) {
3738 			if (bus == path->bus)
3739 				break;
3740 			if ((strcmp(bus->sim->sim_name, "ata") == 0 &&
3741 			     bus->sim->unit_number >= 2) ||
3742 			    strcmp(bus->sim->sim_name, "ahcich") == 0 ||
3743 			    strcmp(bus->sim->sim_name, "mvsch") == 0 ||
3744 			    strcmp(bus->sim->sim_name, "siisch") == 0)
3745 				bus_id++;
3746 		}
3747 		xpt_unlock_buses();
3748 	}
3749 	if (path->target != NULL) {
3750 		if (path->target->target_id < 2)
3751 			return (bus_id * 2 + path->target->target_id);
3752 		else
3753 			return (-1);
3754 	} else
3755 		return (bus_id * 2);
3756 }
3757 
3758 /*
3759  * Release a CAM control block for the caller.  Remit the cost of the structure
3760  * to the device referenced by the path.  If the this device had no 'credits'
3761  * and peripheral drivers have registered async callbacks for this notification
3762  * call them now.
3763  */
3764 void
3765 xpt_release_ccb(union ccb *free_ccb)
3766 {
3767 	struct	 cam_path *path;
3768 	struct	 cam_ed *device;
3769 	struct	 cam_eb *bus;
3770 	struct   cam_sim *sim;
3771 
3772 	CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_release_ccb\n"));
3773 	path = free_ccb->ccb_h.path;
3774 	device = path->device;
3775 	bus = path->bus;
3776 	sim = bus->sim;
3777 
3778 	mtx_assert(sim->mtx, MA_OWNED);
3779 
3780 	cam_ccbq_release_opening(&device->ccbq);
3781 	if (device->flags & CAM_DEV_RESIZE_QUEUE_NEEDED) {
3782 		device->flags &= ~CAM_DEV_RESIZE_QUEUE_NEEDED;
3783 		cam_ccbq_resize(&device->ccbq,
3784 		    device->ccbq.dev_openings + device->ccbq.dev_active);
3785 	}
3786 	if (sim->ccb_count > sim->max_ccbs) {
3787 		xpt_free_ccb(free_ccb);
3788 		sim->ccb_count--;
3789 	} else {
3790 		SLIST_INSERT_HEAD(&sim->ccb_freeq, &free_ccb->ccb_h,
3791 		    xpt_links.sle);
3792 	}
3793 	xpt_run_dev_allocq(device);
3794 }
3795 
3796 /* Functions accessed by SIM drivers */
3797 
3798 static struct xpt_xport xport_default = {
3799 	.alloc_device = xpt_alloc_device_default,
3800 	.action = xpt_action_default,
3801 	.async = xpt_dev_async_default,
3802 };
3803 
3804 /*
3805  * A sim structure, listing the SIM entry points and instance
3806  * identification info is passed to xpt_bus_register to hook the SIM
3807  * into the CAM framework.  xpt_bus_register creates a cam_eb entry
3808  * for this new bus and places it in the array of busses and assigns
3809  * it a path_id.  The path_id may be influenced by "hard wiring"
3810  * information specified by the user.  Once interrupt services are
3811  * available, the bus will be probed.
3812  */
3813 int32_t
3814 xpt_bus_register(struct cam_sim *sim, device_t parent, u_int32_t bus)
3815 {
3816 	struct cam_eb *new_bus;
3817 	struct cam_eb *old_bus;
3818 	struct ccb_pathinq cpi;
3819 	struct cam_path *path;
3820 	cam_status status;
3821 
3822 	mtx_assert(sim->mtx, MA_OWNED);
3823 
3824 	sim->bus_id = bus;
3825 	new_bus = (struct cam_eb *)malloc(sizeof(*new_bus),
3826 					  M_CAMXPT, M_NOWAIT);
3827 	if (new_bus == NULL) {
3828 		/* Couldn't satisfy request */
3829 		return (CAM_RESRC_UNAVAIL);
3830 	}
3831 	if (strcmp(sim->sim_name, "xpt") != 0) {
3832 		sim->path_id =
3833 		    xptpathid(sim->sim_name, sim->unit_number, sim->bus_id);
3834 	}
3835 
3836 	TAILQ_INIT(&new_bus->et_entries);
3837 	new_bus->path_id = sim->path_id;
3838 	cam_sim_hold(sim);
3839 	new_bus->sim = sim;
3840 	timevalclear(&new_bus->last_reset);
3841 	new_bus->flags = 0;
3842 	new_bus->refcount = 1;	/* Held until a bus_deregister event */
3843 	new_bus->generation = 0;
3844 
3845 	xpt_lock_buses();
3846 	old_bus = TAILQ_FIRST(&xsoftc.xpt_busses);
3847 	while (old_bus != NULL
3848 	    && old_bus->path_id < new_bus->path_id)
3849 		old_bus = TAILQ_NEXT(old_bus, links);
3850 	if (old_bus != NULL)
3851 		TAILQ_INSERT_BEFORE(old_bus, new_bus, links);
3852 	else
3853 		TAILQ_INSERT_TAIL(&xsoftc.xpt_busses, new_bus, links);
3854 	xsoftc.bus_generation++;
3855 	xpt_unlock_buses();
3856 
3857 	/*
3858 	 * Set a default transport so that a PATH_INQ can be issued to
3859 	 * the SIM.  This will then allow for probing and attaching of
3860 	 * a more appropriate transport.
3861 	 */
3862 	new_bus->xport = &xport_default;
3863 
3864 	status = xpt_create_path(&path, /*periph*/NULL, sim->path_id,
3865 				  CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
3866 	if (status != CAM_REQ_CMP) {
3867 		xpt_release_bus(new_bus);
3868 		free(path, M_CAMXPT);
3869 		return (CAM_RESRC_UNAVAIL);
3870 	}
3871 
3872 	xpt_setup_ccb(&cpi.ccb_h, path, CAM_PRIORITY_NORMAL);
3873 	cpi.ccb_h.func_code = XPT_PATH_INQ;
3874 	xpt_action((union ccb *)&cpi);
3875 
3876 	if (cpi.ccb_h.status == CAM_REQ_CMP) {
3877 		switch (cpi.transport) {
3878 		case XPORT_SPI:
3879 		case XPORT_SAS:
3880 		case XPORT_FC:
3881 		case XPORT_USB:
3882 		case XPORT_ISCSI:
3883 		case XPORT_PPB:
3884 			new_bus->xport = scsi_get_xport();
3885 			break;
3886 		case XPORT_ATA:
3887 		case XPORT_SATA:
3888 			new_bus->xport = ata_get_xport();
3889 			break;
3890 		default:
3891 			new_bus->xport = &xport_default;
3892 			break;
3893 		}
3894 	}
3895 
3896 	/* Notify interested parties */
3897 	if (sim->path_id != CAM_XPT_PATH_ID) {
3898 		union	ccb *scan_ccb;
3899 
3900 		xpt_async(AC_PATH_REGISTERED, path, &cpi);
3901 		/* Initiate bus rescan. */
3902 		scan_ccb = xpt_alloc_ccb_nowait();
3903 		scan_ccb->ccb_h.path = path;
3904 		scan_ccb->ccb_h.func_code = XPT_SCAN_BUS;
3905 		scan_ccb->crcn.flags = 0;
3906 		xpt_rescan(scan_ccb);
3907 	} else
3908 		xpt_free_path(path);
3909 	return (CAM_SUCCESS);
3910 }
3911 
3912 int32_t
3913 xpt_bus_deregister(path_id_t pathid)
3914 {
3915 	struct cam_path bus_path;
3916 	cam_status status;
3917 
3918 	status = xpt_compile_path(&bus_path, NULL, pathid,
3919 				  CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
3920 	if (status != CAM_REQ_CMP)
3921 		return (status);
3922 
3923 	xpt_async(AC_LOST_DEVICE, &bus_path, NULL);
3924 	xpt_async(AC_PATH_DEREGISTERED, &bus_path, NULL);
3925 
3926 	/* Release the reference count held while registered. */
3927 	xpt_release_bus(bus_path.bus);
3928 	xpt_release_path(&bus_path);
3929 
3930 	return (CAM_REQ_CMP);
3931 }
3932 
3933 static path_id_t
3934 xptnextfreepathid(void)
3935 {
3936 	struct cam_eb *bus;
3937 	path_id_t pathid;
3938 	const char *strval;
3939 
3940 	pathid = 0;
3941 	xpt_lock_buses();
3942 	bus = TAILQ_FIRST(&xsoftc.xpt_busses);
3943 retry:
3944 	/* Find an unoccupied pathid */
3945 	while (bus != NULL && bus->path_id <= pathid) {
3946 		if (bus->path_id == pathid)
3947 			pathid++;
3948 		bus = TAILQ_NEXT(bus, links);
3949 	}
3950 	xpt_unlock_buses();
3951 
3952 	/*
3953 	 * Ensure that this pathid is not reserved for
3954 	 * a bus that may be registered in the future.
3955 	 */
3956 	if (resource_string_value("scbus", pathid, "at", &strval) == 0) {
3957 		++pathid;
3958 		/* Start the search over */
3959 		xpt_lock_buses();
3960 		goto retry;
3961 	}
3962 	return (pathid);
3963 }
3964 
3965 static path_id_t
3966 xptpathid(const char *sim_name, int sim_unit, int sim_bus)
3967 {
3968 	path_id_t pathid;
3969 	int i, dunit, val;
3970 	char buf[32];
3971 	const char *dname;
3972 
3973 	pathid = CAM_XPT_PATH_ID;
3974 	snprintf(buf, sizeof(buf), "%s%d", sim_name, sim_unit);
3975 	i = 0;
3976 	while ((resource_find_match(&i, &dname, &dunit, "at", buf)) == 0) {
3977 		if (strcmp(dname, "scbus")) {
3978 			/* Avoid a bit of foot shooting. */
3979 			continue;
3980 		}
3981 		if (dunit < 0)		/* unwired?! */
3982 			continue;
3983 		if (resource_int_value("scbus", dunit, "bus", &val) == 0) {
3984 			if (sim_bus == val) {
3985 				pathid = dunit;
3986 				break;
3987 			}
3988 		} else if (sim_bus == 0) {
3989 			/* Unspecified matches bus 0 */
3990 			pathid = dunit;
3991 			break;
3992 		} else {
3993 			printf("Ambiguous scbus configuration for %s%d "
3994 			       "bus %d, cannot wire down.  The kernel "
3995 			       "config entry for scbus%d should "
3996 			       "specify a controller bus.\n"
3997 			       "Scbus will be assigned dynamically.\n",
3998 			       sim_name, sim_unit, sim_bus, dunit);
3999 			break;
4000 		}
4001 	}
4002 
4003 	if (pathid == CAM_XPT_PATH_ID)
4004 		pathid = xptnextfreepathid();
4005 	return (pathid);
4006 }
4007 
4008 static const char *
4009 xpt_async_string(u_int32_t async_code)
4010 {
4011 
4012 	switch (async_code) {
4013 	case AC_BUS_RESET: return ("AC_BUS_RESET");
4014 	case AC_UNSOL_RESEL: return ("AC_UNSOL_RESEL");
4015 	case AC_SCSI_AEN: return ("AC_SCSI_AEN");
4016 	case AC_SENT_BDR: return ("AC_SENT_BDR");
4017 	case AC_PATH_REGISTERED: return ("AC_PATH_REGISTERED");
4018 	case AC_PATH_DEREGISTERED: return ("AC_PATH_DEREGISTERED");
4019 	case AC_FOUND_DEVICE: return ("AC_FOUND_DEVICE");
4020 	case AC_LOST_DEVICE: return ("AC_LOST_DEVICE");
4021 	case AC_TRANSFER_NEG: return ("AC_TRANSFER_NEG");
4022 	case AC_INQ_CHANGED: return ("AC_INQ_CHANGED");
4023 	case AC_GETDEV_CHANGED: return ("AC_GETDEV_CHANGED");
4024 	case AC_CONTRACT: return ("AC_CONTRACT");
4025 	case AC_ADVINFO_CHANGED: return ("AC_ADVINFO_CHANGED");
4026 	case AC_UNIT_ATTENTION: return ("AC_UNIT_ATTENTION");
4027 	}
4028 	return ("AC_UNKNOWN");
4029 }
4030 
4031 void
4032 xpt_async(u_int32_t async_code, struct cam_path *path, void *async_arg)
4033 {
4034 	struct cam_eb *bus;
4035 	struct cam_et *target, *next_target;
4036 	struct cam_ed *device, *next_device;
4037 
4038 	mtx_assert(path->bus->sim->mtx, MA_OWNED);
4039 	CAM_DEBUG(path, CAM_DEBUG_TRACE | CAM_DEBUG_INFO,
4040 	    ("xpt_async(%s)\n", xpt_async_string(async_code)));
4041 
4042 	/*
4043 	 * Most async events come from a CAM interrupt context.  In
4044 	 * a few cases, the error recovery code at the peripheral layer,
4045 	 * which may run from our SWI or a process context, may signal
4046 	 * deferred events with a call to xpt_async.
4047 	 */
4048 
4049 	bus = path->bus;
4050 
4051 	if (async_code == AC_BUS_RESET) {
4052 		/* Update our notion of when the last reset occurred */
4053 		microtime(&bus->last_reset);
4054 	}
4055 
4056 	for (target = TAILQ_FIRST(&bus->et_entries);
4057 	     target != NULL;
4058 	     target = next_target) {
4059 
4060 		next_target = TAILQ_NEXT(target, links);
4061 
4062 		if (path->target != target
4063 		 && path->target->target_id != CAM_TARGET_WILDCARD
4064 		 && target->target_id != CAM_TARGET_WILDCARD)
4065 			continue;
4066 
4067 		if (async_code == AC_SENT_BDR) {
4068 			/* Update our notion of when the last reset occurred */
4069 			microtime(&path->target->last_reset);
4070 		}
4071 
4072 		for (device = TAILQ_FIRST(&target->ed_entries);
4073 		     device != NULL;
4074 		     device = next_device) {
4075 
4076 			next_device = TAILQ_NEXT(device, links);
4077 
4078 			if (path->device != device
4079 			 && path->device->lun_id != CAM_LUN_WILDCARD
4080 			 && device->lun_id != CAM_LUN_WILDCARD)
4081 				continue;
4082 			/*
4083 			 * The async callback could free the device.
4084 			 * If it is a broadcast async, it doesn't hold
4085 			 * device reference, so take our own reference.
4086 			 */
4087 			xpt_acquire_device(device);
4088 			(*(bus->xport->async))(async_code, bus,
4089 					       target, device,
4090 					       async_arg);
4091 
4092 			xpt_async_bcast(&device->asyncs, async_code,
4093 					path, async_arg);
4094 			xpt_release_device(device);
4095 		}
4096 	}
4097 
4098 	/*
4099 	 * If this wasn't a fully wildcarded async, tell all
4100 	 * clients that want all async events.
4101 	 */
4102 	if (bus != xpt_periph->path->bus)
4103 		xpt_async_bcast(&xpt_periph->path->device->asyncs, async_code,
4104 				path, async_arg);
4105 }
4106 
4107 static void
4108 xpt_async_bcast(struct async_list *async_head,
4109 		u_int32_t async_code,
4110 		struct cam_path *path, void *async_arg)
4111 {
4112 	struct async_node *cur_entry;
4113 
4114 	cur_entry = SLIST_FIRST(async_head);
4115 	while (cur_entry != NULL) {
4116 		struct async_node *next_entry;
4117 		/*
4118 		 * Grab the next list entry before we call the current
4119 		 * entry's callback.  This is because the callback function
4120 		 * can delete its async callback entry.
4121 		 */
4122 		next_entry = SLIST_NEXT(cur_entry, links);
4123 		if ((cur_entry->event_enable & async_code) != 0)
4124 			cur_entry->callback(cur_entry->callback_arg,
4125 					    async_code, path,
4126 					    async_arg);
4127 		cur_entry = next_entry;
4128 	}
4129 }
4130 
4131 static void
4132 xpt_dev_async_default(u_int32_t async_code, struct cam_eb *bus,
4133 		      struct cam_et *target, struct cam_ed *device,
4134 		      void *async_arg)
4135 {
4136 	printf("%s called\n", __func__);
4137 }
4138 
4139 u_int32_t
4140 xpt_freeze_devq(struct cam_path *path, u_int count)
4141 {
4142 	struct cam_ed *dev = path->device;
4143 
4144 	mtx_assert(path->bus->sim->mtx, MA_OWNED);
4145 	dev->ccbq.queue.qfrozen_cnt += count;
4146 	/* Remove frozen device from sendq. */
4147 	if (device_is_queued(dev)) {
4148 		camq_remove(&dev->sim->devq->send_queue,
4149 		    dev->devq_entry.pinfo.index);
4150 	}
4151 	return (dev->ccbq.queue.qfrozen_cnt);
4152 }
4153 
4154 u_int32_t
4155 xpt_freeze_simq(struct cam_sim *sim, u_int count)
4156 {
4157 
4158 	mtx_assert(sim->mtx, MA_OWNED);
4159 	sim->devq->send_queue.qfrozen_cnt += count;
4160 	return (sim->devq->send_queue.qfrozen_cnt);
4161 }
4162 
4163 static void
4164 xpt_release_devq_timeout(void *arg)
4165 {
4166 	struct cam_ed *device;
4167 
4168 	device = (struct cam_ed *)arg;
4169 	xpt_release_devq_device(device, /*count*/1, /*run_queue*/TRUE);
4170 }
4171 
4172 void
4173 xpt_release_devq(struct cam_path *path, u_int count, int run_queue)
4174 {
4175 
4176 	mtx_assert(path->bus->sim->mtx, MA_OWNED);
4177 	xpt_release_devq_device(path->device, count, run_queue);
4178 }
4179 
4180 void
4181 xpt_release_devq_device(struct cam_ed *dev, u_int count, int run_queue)
4182 {
4183 
4184 	if (count > dev->ccbq.queue.qfrozen_cnt) {
4185 #ifdef INVARIANTS
4186 		printf("xpt_release_devq(): requested %u > present %u\n",
4187 		    count, dev->ccbq.queue.qfrozen_cnt);
4188 #endif
4189 		count = dev->ccbq.queue.qfrozen_cnt;
4190 	}
4191 	dev->ccbq.queue.qfrozen_cnt -= count;
4192 	if (dev->ccbq.queue.qfrozen_cnt == 0) {
4193 		/*
4194 		 * No longer need to wait for a successful
4195 		 * command completion.
4196 		 */
4197 		dev->flags &= ~CAM_DEV_REL_ON_COMPLETE;
4198 		/*
4199 		 * Remove any timeouts that might be scheduled
4200 		 * to release this queue.
4201 		 */
4202 		if ((dev->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) {
4203 			callout_stop(&dev->callout);
4204 			dev->flags &= ~CAM_DEV_REL_TIMEOUT_PENDING;
4205 		}
4206 		xpt_run_dev_allocq(dev);
4207 		if (run_queue == 0)
4208 			return;
4209 		/*
4210 		 * Now that we are unfrozen schedule the
4211 		 * device so any pending transactions are
4212 		 * run.
4213 		 */
4214 		if (xpt_schedule_devq(dev->sim->devq, dev))
4215 			xpt_run_devq(dev->sim->devq);
4216 	}
4217 }
4218 
4219 void
4220 xpt_release_simq(struct cam_sim *sim, int run_queue)
4221 {
4222 	struct	camq *sendq;
4223 
4224 	mtx_assert(sim->mtx, MA_OWNED);
4225 	sendq = &(sim->devq->send_queue);
4226 	if (sendq->qfrozen_cnt <= 0) {
4227 #ifdef INVARIANTS
4228 		printf("xpt_release_simq: requested 1 > present %u\n",
4229 		    sendq->qfrozen_cnt);
4230 #endif
4231 	} else
4232 		sendq->qfrozen_cnt--;
4233 	if (sendq->qfrozen_cnt == 0) {
4234 		/*
4235 		 * If there is a timeout scheduled to release this
4236 		 * sim queue, remove it.  The queue frozen count is
4237 		 * already at 0.
4238 		 */
4239 		if ((sim->flags & CAM_SIM_REL_TIMEOUT_PENDING) != 0){
4240 			callout_stop(&sim->callout);
4241 			sim->flags &= ~CAM_SIM_REL_TIMEOUT_PENDING;
4242 		}
4243 		if (run_queue) {
4244 			/*
4245 			 * Now that we are unfrozen run the send queue.
4246 			 */
4247 			xpt_run_devq(sim->devq);
4248 		}
4249 	}
4250 }
4251 
4252 /*
4253  * XXX Appears to be unused.
4254  */
4255 static void
4256 xpt_release_simq_timeout(void *arg)
4257 {
4258 	struct cam_sim *sim;
4259 
4260 	sim = (struct cam_sim *)arg;
4261 	xpt_release_simq(sim, /* run_queue */ TRUE);
4262 }
4263 
4264 void
4265 xpt_done(union ccb *done_ccb)
4266 {
4267 	struct cam_sim *sim;
4268 	int	first;
4269 
4270 	CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xpt_done\n"));
4271 	if ((done_ccb->ccb_h.func_code & XPT_FC_QUEUED) != 0) {
4272 		/*
4273 		 * Queue up the request for handling by our SWI handler
4274 		 * any of the "non-immediate" type of ccbs.
4275 		 */
4276 		sim = done_ccb->ccb_h.path->bus->sim;
4277 		TAILQ_INSERT_TAIL(&sim->sim_doneq, &done_ccb->ccb_h,
4278 		    sim_links.tqe);
4279 		done_ccb->ccb_h.pinfo.index = CAM_DONEQ_INDEX;
4280 		if ((sim->flags & (CAM_SIM_ON_DONEQ | CAM_SIM_POLLED |
4281 		    CAM_SIM_BATCH)) == 0) {
4282 			mtx_lock(&cam_simq_lock);
4283 			first = TAILQ_EMPTY(&cam_simq);
4284 			TAILQ_INSERT_TAIL(&cam_simq, sim, links);
4285 			mtx_unlock(&cam_simq_lock);
4286 			sim->flags |= CAM_SIM_ON_DONEQ;
4287 			if (first)
4288 				swi_sched(cambio_ih, 0);
4289 		}
4290 	}
4291 }
4292 
4293 void
4294 xpt_batch_start(struct cam_sim *sim)
4295 {
4296 
4297 	KASSERT((sim->flags & CAM_SIM_BATCH) == 0, ("Batch flag already set"));
4298 	sim->flags |= CAM_SIM_BATCH;
4299 }
4300 
4301 void
4302 xpt_batch_done(struct cam_sim *sim)
4303 {
4304 
4305 	KASSERT((sim->flags & CAM_SIM_BATCH) != 0, ("Batch flag was not set"));
4306 	sim->flags &= ~CAM_SIM_BATCH;
4307 	if (!TAILQ_EMPTY(&sim->sim_doneq) &&
4308 	    (sim->flags & CAM_SIM_ON_DONEQ) == 0)
4309 		camisr_runqueue(&sim->sim_doneq);
4310 }
4311 
4312 union ccb *
4313 xpt_alloc_ccb()
4314 {
4315 	union ccb *new_ccb;
4316 
4317 	new_ccb = malloc(sizeof(*new_ccb), M_CAMCCB, M_ZERO|M_WAITOK);
4318 	return (new_ccb);
4319 }
4320 
4321 union ccb *
4322 xpt_alloc_ccb_nowait()
4323 {
4324 	union ccb *new_ccb;
4325 
4326 	new_ccb = malloc(sizeof(*new_ccb), M_CAMCCB, M_ZERO|M_NOWAIT);
4327 	return (new_ccb);
4328 }
4329 
4330 void
4331 xpt_free_ccb(union ccb *free_ccb)
4332 {
4333 	free(free_ccb, M_CAMCCB);
4334 }
4335 
4336 
4337 
4338 /* Private XPT functions */
4339 
4340 /*
4341  * Get a CAM control block for the caller. Charge the structure to the device
4342  * referenced by the path.  If the this device has no 'credits' then the
4343  * device already has the maximum number of outstanding operations under way
4344  * and we return NULL. If we don't have sufficient resources to allocate more
4345  * ccbs, we also return NULL.
4346  */
4347 static union ccb *
4348 xpt_get_ccb(struct cam_ed *device)
4349 {
4350 	union ccb *new_ccb;
4351 	struct cam_sim *sim;
4352 
4353 	sim = device->sim;
4354 	if ((new_ccb = (union ccb *)SLIST_FIRST(&sim->ccb_freeq)) == NULL) {
4355 		new_ccb = xpt_alloc_ccb_nowait();
4356                 if (new_ccb == NULL) {
4357 			return (NULL);
4358 		}
4359 		if ((sim->flags & CAM_SIM_MPSAFE) == 0)
4360 			callout_handle_init(&new_ccb->ccb_h.timeout_ch);
4361 		SLIST_INSERT_HEAD(&sim->ccb_freeq, &new_ccb->ccb_h,
4362 				  xpt_links.sle);
4363 		sim->ccb_count++;
4364 	}
4365 	cam_ccbq_take_opening(&device->ccbq);
4366 	SLIST_REMOVE_HEAD(&sim->ccb_freeq, xpt_links.sle);
4367 	return (new_ccb);
4368 }
4369 
4370 static void
4371 xpt_release_bus(struct cam_eb *bus)
4372 {
4373 
4374 	xpt_lock_buses();
4375 	KASSERT(bus->refcount >= 1, ("bus->refcount >= 1"));
4376 	if (--bus->refcount > 0) {
4377 		xpt_unlock_buses();
4378 		return;
4379 	}
4380 	KASSERT(TAILQ_EMPTY(&bus->et_entries),
4381 	    ("refcount is zero, but target list is not empty"));
4382 	TAILQ_REMOVE(&xsoftc.xpt_busses, bus, links);
4383 	xsoftc.bus_generation++;
4384 	xpt_unlock_buses();
4385 	cam_sim_release(bus->sim);
4386 	free(bus, M_CAMXPT);
4387 }
4388 
4389 static struct cam_et *
4390 xpt_alloc_target(struct cam_eb *bus, target_id_t target_id)
4391 {
4392 	struct cam_et *cur_target, *target;
4393 
4394 	mtx_assert(bus->sim->mtx, MA_OWNED);
4395 	target = (struct cam_et *)malloc(sizeof(*target), M_CAMXPT,
4396 					 M_NOWAIT|M_ZERO);
4397 	if (target == NULL)
4398 		return (NULL);
4399 
4400 	TAILQ_INIT(&target->ed_entries);
4401 	target->bus = bus;
4402 	target->target_id = target_id;
4403 	target->refcount = 1;
4404 	target->generation = 0;
4405 	target->luns = NULL;
4406 	timevalclear(&target->last_reset);
4407 	/*
4408 	 * Hold a reference to our parent bus so it
4409 	 * will not go away before we do.
4410 	 */
4411 	xpt_lock_buses();
4412 	bus->refcount++;
4413 	xpt_unlock_buses();
4414 
4415 	/* Insertion sort into our bus's target list */
4416 	cur_target = TAILQ_FIRST(&bus->et_entries);
4417 	while (cur_target != NULL && cur_target->target_id < target_id)
4418 		cur_target = TAILQ_NEXT(cur_target, links);
4419 	if (cur_target != NULL) {
4420 		TAILQ_INSERT_BEFORE(cur_target, target, links);
4421 	} else {
4422 		TAILQ_INSERT_TAIL(&bus->et_entries, target, links);
4423 	}
4424 	bus->generation++;
4425 	return (target);
4426 }
4427 
4428 static void
4429 xpt_release_target(struct cam_et *target)
4430 {
4431 
4432 	mtx_assert(target->bus->sim->mtx, MA_OWNED);
4433 	if (--target->refcount > 0)
4434 		return;
4435 	KASSERT(TAILQ_EMPTY(&target->ed_entries),
4436 	    ("refcount is zero, but device list is not empty"));
4437 	TAILQ_REMOVE(&target->bus->et_entries, target, links);
4438 	target->bus->generation++;
4439 	xpt_release_bus(target->bus);
4440 	if (target->luns)
4441 		free(target->luns, M_CAMXPT);
4442 	free(target, M_CAMXPT);
4443 }
4444 
4445 static struct cam_ed *
4446 xpt_alloc_device_default(struct cam_eb *bus, struct cam_et *target,
4447 			 lun_id_t lun_id)
4448 {
4449 	struct cam_ed *device;
4450 
4451 	device = xpt_alloc_device(bus, target, lun_id);
4452 	if (device == NULL)
4453 		return (NULL);
4454 
4455 	device->mintags = 1;
4456 	device->maxtags = 1;
4457 	bus->sim->max_ccbs += device->ccbq.devq_openings;
4458 	return (device);
4459 }
4460 
4461 struct cam_ed *
4462 xpt_alloc_device(struct cam_eb *bus, struct cam_et *target, lun_id_t lun_id)
4463 {
4464 	struct cam_ed	*cur_device, *device;
4465 	struct cam_devq	*devq;
4466 	cam_status status;
4467 
4468 	mtx_assert(target->bus->sim->mtx, MA_OWNED);
4469 	/* Make space for us in the device queue on our bus */
4470 	devq = bus->sim->devq;
4471 	status = cam_devq_resize(devq, devq->send_queue.array_size + 1);
4472 	if (status != CAM_REQ_CMP)
4473 		return (NULL);
4474 
4475 	device = (struct cam_ed *)malloc(sizeof(*device),
4476 					 M_CAMDEV, M_NOWAIT|M_ZERO);
4477 	if (device == NULL)
4478 		return (NULL);
4479 
4480 	cam_init_pinfo(&device->devq_entry.pinfo);
4481 	device->devq_entry.device = device;
4482 	device->target = target;
4483 	device->lun_id = lun_id;
4484 	device->sim = bus->sim;
4485 	/* Initialize our queues */
4486 	if (camq_init(&device->drvq, 0) != 0) {
4487 		free(device, M_CAMDEV);
4488 		return (NULL);
4489 	}
4490 	if (cam_ccbq_init(&device->ccbq,
4491 			  bus->sim->max_dev_openings) != 0) {
4492 		camq_fini(&device->drvq);
4493 		free(device, M_CAMDEV);
4494 		return (NULL);
4495 	}
4496 	SLIST_INIT(&device->asyncs);
4497 	SLIST_INIT(&device->periphs);
4498 	device->generation = 0;
4499 	device->flags = CAM_DEV_UNCONFIGURED;
4500 	device->tag_delay_count = 0;
4501 	device->tag_saved_openings = 0;
4502 	device->refcount = 1;
4503 	callout_init_mtx(&device->callout, bus->sim->mtx, 0);
4504 
4505 	cur_device = TAILQ_FIRST(&target->ed_entries);
4506 	while (cur_device != NULL && cur_device->lun_id < lun_id)
4507 		cur_device = TAILQ_NEXT(cur_device, links);
4508 	if (cur_device != NULL)
4509 		TAILQ_INSERT_BEFORE(cur_device, device, links);
4510 	else
4511 		TAILQ_INSERT_TAIL(&target->ed_entries, device, links);
4512 	target->refcount++;
4513 	target->generation++;
4514 	return (device);
4515 }
4516 
4517 void
4518 xpt_acquire_device(struct cam_ed *device)
4519 {
4520 
4521 	mtx_assert(device->sim->mtx, MA_OWNED);
4522 	device->refcount++;
4523 }
4524 
4525 void
4526 xpt_release_device(struct cam_ed *device)
4527 {
4528 	struct cam_devq *devq;
4529 
4530 	mtx_assert(device->sim->mtx, MA_OWNED);
4531 	if (--device->refcount > 0)
4532 		return;
4533 
4534 	KASSERT(SLIST_EMPTY(&device->periphs),
4535 	    ("refcount is zero, but periphs list is not empty"));
4536 	if (device->devq_entry.pinfo.index != CAM_UNQUEUED_INDEX)
4537 		panic("Removing device while still queued for ccbs");
4538 
4539 	if ((device->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0)
4540 		callout_stop(&device->callout);
4541 
4542 	TAILQ_REMOVE(&device->target->ed_entries, device,links);
4543 	device->target->generation++;
4544 	device->target->bus->sim->max_ccbs -= device->ccbq.devq_openings;
4545 	/* Release our slot in the devq */
4546 	devq = device->target->bus->sim->devq;
4547 	cam_devq_resize(devq, devq->send_queue.array_size - 1);
4548 	camq_fini(&device->drvq);
4549 	cam_ccbq_fini(&device->ccbq);
4550 	/*
4551 	 * Free allocated memory.  free(9) does nothing if the
4552 	 * supplied pointer is NULL, so it is safe to call without
4553 	 * checking.
4554 	 */
4555 	free(device->supported_vpds, M_CAMXPT);
4556 	free(device->device_id, M_CAMXPT);
4557 	free(device->physpath, M_CAMXPT);
4558 	free(device->rcap_buf, M_CAMXPT);
4559 	free(device->serial_num, M_CAMXPT);
4560 
4561 	xpt_release_target(device->target);
4562 	free(device, M_CAMDEV);
4563 }
4564 
4565 u_int32_t
4566 xpt_dev_ccbq_resize(struct cam_path *path, int newopenings)
4567 {
4568 	int	diff;
4569 	int	result;
4570 	struct	cam_ed *dev;
4571 
4572 	dev = path->device;
4573 
4574 	diff = newopenings - (dev->ccbq.dev_active + dev->ccbq.dev_openings);
4575 	result = cam_ccbq_resize(&dev->ccbq, newopenings);
4576 	if (result == CAM_REQ_CMP && (diff < 0)) {
4577 		dev->flags |= CAM_DEV_RESIZE_QUEUE_NEEDED;
4578 	}
4579 	if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
4580 	 || (dev->inq_flags & SID_CmdQue) != 0)
4581 		dev->tag_saved_openings = newopenings;
4582 	/* Adjust the global limit */
4583 	dev->sim->max_ccbs += diff;
4584 	return (result);
4585 }
4586 
4587 static struct cam_eb *
4588 xpt_find_bus(path_id_t path_id)
4589 {
4590 	struct cam_eb *bus;
4591 
4592 	xpt_lock_buses();
4593 	for (bus = TAILQ_FIRST(&xsoftc.xpt_busses);
4594 	     bus != NULL;
4595 	     bus = TAILQ_NEXT(bus, links)) {
4596 		if (bus->path_id == path_id) {
4597 			bus->refcount++;
4598 			break;
4599 		}
4600 	}
4601 	xpt_unlock_buses();
4602 	return (bus);
4603 }
4604 
4605 static struct cam_et *
4606 xpt_find_target(struct cam_eb *bus, target_id_t	target_id)
4607 {
4608 	struct cam_et *target;
4609 
4610 	mtx_assert(bus->sim->mtx, MA_OWNED);
4611 	for (target = TAILQ_FIRST(&bus->et_entries);
4612 	     target != NULL;
4613 	     target = TAILQ_NEXT(target, links)) {
4614 		if (target->target_id == target_id) {
4615 			target->refcount++;
4616 			break;
4617 		}
4618 	}
4619 	return (target);
4620 }
4621 
4622 static struct cam_ed *
4623 xpt_find_device(struct cam_et *target, lun_id_t lun_id)
4624 {
4625 	struct cam_ed *device;
4626 
4627 	mtx_assert(target->bus->sim->mtx, MA_OWNED);
4628 	for (device = TAILQ_FIRST(&target->ed_entries);
4629 	     device != NULL;
4630 	     device = TAILQ_NEXT(device, links)) {
4631 		if (device->lun_id == lun_id) {
4632 			device->refcount++;
4633 			break;
4634 		}
4635 	}
4636 	return (device);
4637 }
4638 
4639 void
4640 xpt_start_tags(struct cam_path *path)
4641 {
4642 	struct ccb_relsim crs;
4643 	struct cam_ed *device;
4644 	struct cam_sim *sim;
4645 	int    newopenings;
4646 
4647 	device = path->device;
4648 	sim = path->bus->sim;
4649 	device->flags &= ~CAM_DEV_TAG_AFTER_COUNT;
4650 	xpt_freeze_devq(path, /*count*/1);
4651 	device->inq_flags |= SID_CmdQue;
4652 	if (device->tag_saved_openings != 0)
4653 		newopenings = device->tag_saved_openings;
4654 	else
4655 		newopenings = min(device->maxtags,
4656 				  sim->max_tagged_dev_openings);
4657 	xpt_dev_ccbq_resize(path, newopenings);
4658 	xpt_async(AC_GETDEV_CHANGED, path, NULL);
4659 	xpt_setup_ccb(&crs.ccb_h, path, CAM_PRIORITY_NORMAL);
4660 	crs.ccb_h.func_code = XPT_REL_SIMQ;
4661 	crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY;
4662 	crs.openings
4663 	    = crs.release_timeout
4664 	    = crs.qfrozen_cnt
4665 	    = 0;
4666 	xpt_action((union ccb *)&crs);
4667 }
4668 
4669 void
4670 xpt_stop_tags(struct cam_path *path)
4671 {
4672 	struct ccb_relsim crs;
4673 	struct cam_ed *device;
4674 	struct cam_sim *sim;
4675 
4676 	device = path->device;
4677 	sim = path->bus->sim;
4678 	device->flags &= ~CAM_DEV_TAG_AFTER_COUNT;
4679 	device->tag_delay_count = 0;
4680 	xpt_freeze_devq(path, /*count*/1);
4681 	device->inq_flags &= ~SID_CmdQue;
4682 	xpt_dev_ccbq_resize(path, sim->max_dev_openings);
4683 	xpt_async(AC_GETDEV_CHANGED, path, NULL);
4684 	xpt_setup_ccb(&crs.ccb_h, path, CAM_PRIORITY_NORMAL);
4685 	crs.ccb_h.func_code = XPT_REL_SIMQ;
4686 	crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY;
4687 	crs.openings
4688 	    = crs.release_timeout
4689 	    = crs.qfrozen_cnt
4690 	    = 0;
4691 	xpt_action((union ccb *)&crs);
4692 }
4693 
4694 static void
4695 xpt_boot_delay(void *arg)
4696 {
4697 
4698 	xpt_release_boot();
4699 }
4700 
4701 static void
4702 xpt_config(void *arg)
4703 {
4704 	/*
4705 	 * Now that interrupts are enabled, go find our devices
4706 	 */
4707 
4708 	/* Setup debugging path */
4709 	if (cam_dflags != CAM_DEBUG_NONE) {
4710 		if (xpt_create_path_unlocked(&cam_dpath, NULL,
4711 				    CAM_DEBUG_BUS, CAM_DEBUG_TARGET,
4712 				    CAM_DEBUG_LUN) != CAM_REQ_CMP) {
4713 			printf("xpt_config: xpt_create_path() failed for debug"
4714 			       " target %d:%d:%d, debugging disabled\n",
4715 			       CAM_DEBUG_BUS, CAM_DEBUG_TARGET, CAM_DEBUG_LUN);
4716 			cam_dflags = CAM_DEBUG_NONE;
4717 		}
4718 	} else
4719 		cam_dpath = NULL;
4720 
4721 	periphdriver_init(1);
4722 	xpt_hold_boot();
4723 	callout_init(&xsoftc.boot_callout, 1);
4724 	callout_reset(&xsoftc.boot_callout, hz * xsoftc.boot_delay / 1000,
4725 	    xpt_boot_delay, NULL);
4726 	/* Fire up rescan thread. */
4727 	if (kproc_create(xpt_scanner_thread, NULL, NULL, 0, 0, "xpt_thrd")) {
4728 		printf("xpt_config: failed to create rescan thread.\n");
4729 	}
4730 }
4731 
4732 void
4733 xpt_hold_boot(void)
4734 {
4735 	xpt_lock_buses();
4736 	xsoftc.buses_to_config++;
4737 	xpt_unlock_buses();
4738 }
4739 
4740 void
4741 xpt_release_boot(void)
4742 {
4743 	xpt_lock_buses();
4744 	xsoftc.buses_to_config--;
4745 	if (xsoftc.buses_to_config == 0 && xsoftc.buses_config_done == 0) {
4746 		struct	xpt_task *task;
4747 
4748 		xsoftc.buses_config_done = 1;
4749 		xpt_unlock_buses();
4750 		/* Call manually because we don't have any busses */
4751 		task = malloc(sizeof(struct xpt_task), M_CAMXPT, M_NOWAIT);
4752 		if (task != NULL) {
4753 			TASK_INIT(&task->task, 0, xpt_finishconfig_task, task);
4754 			taskqueue_enqueue(taskqueue_thread, &task->task);
4755 		}
4756 	} else
4757 		xpt_unlock_buses();
4758 }
4759 
4760 /*
4761  * If the given device only has one peripheral attached to it, and if that
4762  * peripheral is the passthrough driver, announce it.  This insures that the
4763  * user sees some sort of announcement for every peripheral in their system.
4764  */
4765 static int
4766 xptpassannouncefunc(struct cam_ed *device, void *arg)
4767 {
4768 	struct cam_periph *periph;
4769 	int i;
4770 
4771 	for (periph = SLIST_FIRST(&device->periphs), i = 0; periph != NULL;
4772 	     periph = SLIST_NEXT(periph, periph_links), i++);
4773 
4774 	periph = SLIST_FIRST(&device->periphs);
4775 	if ((i == 1)
4776 	 && (strncmp(periph->periph_name, "pass", 4) == 0))
4777 		xpt_announce_periph(periph, NULL);
4778 
4779 	return(1);
4780 }
4781 
4782 static void
4783 xpt_finishconfig_task(void *context, int pending)
4784 {
4785 
4786 	periphdriver_init(2);
4787 	/*
4788 	 * Check for devices with no "standard" peripheral driver
4789 	 * attached.  For any devices like that, announce the
4790 	 * passthrough driver so the user will see something.
4791 	 */
4792 	if (!bootverbose)
4793 		xpt_for_all_devices(xptpassannouncefunc, NULL);
4794 
4795 	/* Release our hook so that the boot can continue. */
4796 	config_intrhook_disestablish(xsoftc.xpt_config_hook);
4797 	free(xsoftc.xpt_config_hook, M_CAMXPT);
4798 	xsoftc.xpt_config_hook = NULL;
4799 
4800 	free(context, M_CAMXPT);
4801 }
4802 
4803 cam_status
4804 xpt_register_async(int event, ac_callback_t *cbfunc, void *cbarg,
4805 		   struct cam_path *path)
4806 {
4807 	struct ccb_setasync csa;
4808 	cam_status status;
4809 	int xptpath = 0;
4810 
4811 	if (path == NULL) {
4812 		mtx_lock(&xsoftc.xpt_lock);
4813 		status = xpt_create_path(&path, /*periph*/NULL, CAM_XPT_PATH_ID,
4814 					 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
4815 		if (status != CAM_REQ_CMP) {
4816 			mtx_unlock(&xsoftc.xpt_lock);
4817 			return (status);
4818 		}
4819 		xptpath = 1;
4820 	}
4821 
4822 	xpt_setup_ccb(&csa.ccb_h, path, CAM_PRIORITY_NORMAL);
4823 	csa.ccb_h.func_code = XPT_SASYNC_CB;
4824 	csa.event_enable = event;
4825 	csa.callback = cbfunc;
4826 	csa.callback_arg = cbarg;
4827 	xpt_action((union ccb *)&csa);
4828 	status = csa.ccb_h.status;
4829 
4830 	if (xptpath) {
4831 		xpt_free_path(path);
4832 		mtx_unlock(&xsoftc.xpt_lock);
4833 	}
4834 
4835 	if ((status == CAM_REQ_CMP) &&
4836 	    (csa.event_enable & AC_FOUND_DEVICE)) {
4837 		/*
4838 		 * Get this peripheral up to date with all
4839 		 * the currently existing devices.
4840 		 */
4841 		xpt_for_all_devices(xptsetasyncfunc, &csa);
4842 	}
4843 	if ((status == CAM_REQ_CMP) &&
4844 	    (csa.event_enable & AC_PATH_REGISTERED)) {
4845 		/*
4846 		 * Get this peripheral up to date with all
4847 		 * the currently existing busses.
4848 		 */
4849 		xpt_for_all_busses(xptsetasyncbusfunc, &csa);
4850 	}
4851 
4852 	return (status);
4853 }
4854 
4855 static void
4856 xptaction(struct cam_sim *sim, union ccb *work_ccb)
4857 {
4858 	CAM_DEBUG(work_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xptaction\n"));
4859 
4860 	switch (work_ccb->ccb_h.func_code) {
4861 	/* Common cases first */
4862 	case XPT_PATH_INQ:		/* Path routing inquiry */
4863 	{
4864 		struct ccb_pathinq *cpi;
4865 
4866 		cpi = &work_ccb->cpi;
4867 		cpi->version_num = 1; /* XXX??? */
4868 		cpi->hba_inquiry = 0;
4869 		cpi->target_sprt = 0;
4870 		cpi->hba_misc = 0;
4871 		cpi->hba_eng_cnt = 0;
4872 		cpi->max_target = 0;
4873 		cpi->max_lun = 0;
4874 		cpi->initiator_id = 0;
4875 		strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
4876 		strncpy(cpi->hba_vid, "", HBA_IDLEN);
4877 		strncpy(cpi->dev_name, sim->sim_name, DEV_IDLEN);
4878 		cpi->unit_number = sim->unit_number;
4879 		cpi->bus_id = sim->bus_id;
4880 		cpi->base_transfer_speed = 0;
4881 		cpi->protocol = PROTO_UNSPECIFIED;
4882 		cpi->protocol_version = PROTO_VERSION_UNSPECIFIED;
4883 		cpi->transport = XPORT_UNSPECIFIED;
4884 		cpi->transport_version = XPORT_VERSION_UNSPECIFIED;
4885 		cpi->ccb_h.status = CAM_REQ_CMP;
4886 		xpt_done(work_ccb);
4887 		break;
4888 	}
4889 	default:
4890 		work_ccb->ccb_h.status = CAM_REQ_INVALID;
4891 		xpt_done(work_ccb);
4892 		break;
4893 	}
4894 }
4895 
4896 /*
4897  * The xpt as a "controller" has no interrupt sources, so polling
4898  * is a no-op.
4899  */
4900 static void
4901 xptpoll(struct cam_sim *sim)
4902 {
4903 }
4904 
4905 void
4906 xpt_lock_buses(void)
4907 {
4908 	mtx_lock(&xsoftc.xpt_topo_lock);
4909 }
4910 
4911 void
4912 xpt_unlock_buses(void)
4913 {
4914 	mtx_unlock(&xsoftc.xpt_topo_lock);
4915 }
4916 
4917 static void
4918 camisr(void *dummy)
4919 {
4920 	cam_simq_t queue;
4921 	struct cam_sim *sim;
4922 
4923 	mtx_lock(&cam_simq_lock);
4924 	TAILQ_INIT(&queue);
4925 	while (!TAILQ_EMPTY(&cam_simq)) {
4926 		TAILQ_CONCAT(&queue, &cam_simq, links);
4927 		mtx_unlock(&cam_simq_lock);
4928 
4929 		while ((sim = TAILQ_FIRST(&queue)) != NULL) {
4930 			TAILQ_REMOVE(&queue, sim, links);
4931 			CAM_SIM_LOCK(sim);
4932 			camisr_runqueue(&sim->sim_doneq);
4933 			sim->flags &= ~CAM_SIM_ON_DONEQ;
4934 			CAM_SIM_UNLOCK(sim);
4935 		}
4936 		mtx_lock(&cam_simq_lock);
4937 	}
4938 	mtx_unlock(&cam_simq_lock);
4939 }
4940 
4941 static void
4942 camisr_runqueue(void *V_queue)
4943 {
4944 	cam_isrq_t *queue = V_queue;
4945 	struct	ccb_hdr *ccb_h;
4946 
4947 	while ((ccb_h = TAILQ_FIRST(queue)) != NULL) {
4948 		int	runq;
4949 
4950 		TAILQ_REMOVE(queue, ccb_h, sim_links.tqe);
4951 		ccb_h->pinfo.index = CAM_UNQUEUED_INDEX;
4952 
4953 		CAM_DEBUG(ccb_h->path, CAM_DEBUG_TRACE,
4954 			  ("camisr\n"));
4955 
4956 		runq = FALSE;
4957 
4958 		if (ccb_h->flags & CAM_HIGH_POWER) {
4959 			struct highpowerlist	*hphead;
4960 			union ccb		*send_ccb;
4961 
4962 			mtx_lock(&xsoftc.xpt_lock);
4963 			hphead = &xsoftc.highpowerq;
4964 
4965 			send_ccb = (union ccb *)STAILQ_FIRST(hphead);
4966 
4967 			/*
4968 			 * Increment the count since this command is done.
4969 			 */
4970 			xsoftc.num_highpower++;
4971 
4972 			/*
4973 			 * Any high powered commands queued up?
4974 			 */
4975 			if (send_ccb != NULL) {
4976 
4977 				STAILQ_REMOVE_HEAD(hphead, xpt_links.stqe);
4978 				mtx_unlock(&xsoftc.xpt_lock);
4979 
4980 				xpt_release_devq(send_ccb->ccb_h.path,
4981 						 /*count*/1, /*runqueue*/TRUE);
4982 			} else
4983 				mtx_unlock(&xsoftc.xpt_lock);
4984 		}
4985 
4986 		if ((ccb_h->func_code & XPT_FC_USER_CCB) == 0) {
4987 			struct cam_ed *dev;
4988 
4989 			dev = ccb_h->path->device;
4990 
4991 			cam_ccbq_ccb_done(&dev->ccbq, (union ccb *)ccb_h);
4992 			ccb_h->path->bus->sim->devq->send_active--;
4993 			ccb_h->path->bus->sim->devq->send_openings++;
4994 			runq = TRUE;
4995 
4996 			if (((dev->flags & CAM_DEV_REL_ON_QUEUE_EMPTY) != 0
4997 			  && (dev->ccbq.dev_active == 0))) {
4998 				dev->flags &= ~CAM_DEV_REL_ON_QUEUE_EMPTY;
4999 				xpt_release_devq(ccb_h->path, /*count*/1,
5000 						 /*run_queue*/FALSE);
5001 			}
5002 
5003 			if (((dev->flags & CAM_DEV_REL_ON_COMPLETE) != 0
5004 			  && (ccb_h->status&CAM_STATUS_MASK) != CAM_REQUEUE_REQ)) {
5005 				dev->flags &= ~CAM_DEV_REL_ON_COMPLETE;
5006 				xpt_release_devq(ccb_h->path, /*count*/1,
5007 						 /*run_queue*/FALSE);
5008 			}
5009 
5010 			if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
5011 			 && (--dev->tag_delay_count == 0))
5012 				xpt_start_tags(ccb_h->path);
5013 			if (!device_is_queued(dev)) {
5014 				(void)xpt_schedule_devq(
5015 				    ccb_h->path->bus->sim->devq, dev);
5016 			}
5017 		}
5018 
5019 		if (ccb_h->status & CAM_RELEASE_SIMQ) {
5020 			xpt_release_simq(ccb_h->path->bus->sim,
5021 					 /*run_queue*/TRUE);
5022 			ccb_h->status &= ~CAM_RELEASE_SIMQ;
5023 			runq = FALSE;
5024 		}
5025 
5026 		if ((ccb_h->flags & CAM_DEV_QFRZDIS)
5027 		 && (ccb_h->status & CAM_DEV_QFRZN)) {
5028 			xpt_release_devq(ccb_h->path, /*count*/1,
5029 					 /*run_queue*/TRUE);
5030 			ccb_h->status &= ~CAM_DEV_QFRZN;
5031 		} else if (runq) {
5032 			xpt_run_devq(ccb_h->path->bus->sim->devq);
5033 		}
5034 
5035 		/* Call the peripheral driver's callback */
5036 		(*ccb_h->cbfcnp)(ccb_h->path->periph, (union ccb *)ccb_h);
5037 	}
5038 }
5039