xref: /freebsd/sys/cam/cam_xpt.c (revision 25408c853d9ecb2e76b9e38407338f86ecb8a55c)
1 /*-
2  * Implementation of the Common Access Method Transport (XPT) layer.
3  *
4  * Copyright (c) 1997, 1998, 1999 Justin T. Gibbs.
5  * Copyright (c) 1997, 1998, 1999 Kenneth D. Merry.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions, and the following disclaimer,
13  *    without modification, immediately at the beginning of the file.
14  * 2. The name of the author may not be used to endorse or promote products
15  *    derived from this software without specific prior written permission.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
21  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  */
29 
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
32 
33 #include <sys/param.h>
34 #include <sys/bus.h>
35 #include <sys/systm.h>
36 #include <sys/types.h>
37 #include <sys/malloc.h>
38 #include <sys/kernel.h>
39 #include <sys/time.h>
40 #include <sys/conf.h>
41 #include <sys/fcntl.h>
42 #include <sys/interrupt.h>
43 #include <sys/sbuf.h>
44 #include <sys/taskqueue.h>
45 
46 #include <sys/lock.h>
47 #include <sys/mutex.h>
48 #include <sys/sysctl.h>
49 #include <sys/kthread.h>
50 
51 #include <cam/cam.h>
52 #include <cam/cam_ccb.h>
53 #include <cam/cam_periph.h>
54 #include <cam/cam_queue.h>
55 #include <cam/cam_sim.h>
56 #include <cam/cam_xpt.h>
57 #include <cam/cam_xpt_sim.h>
58 #include <cam/cam_xpt_periph.h>
59 #include <cam/cam_xpt_internal.h>
60 #include <cam/cam_debug.h>
61 
62 #include <cam/scsi/scsi_all.h>
63 #include <cam/scsi/scsi_message.h>
64 #include <cam/scsi/scsi_pass.h>
65 
66 #include <machine/md_var.h>	/* geometry translation */
67 #include <machine/stdarg.h>	/* for xpt_print below */
68 
69 #include "opt_cam.h"
70 
71 /*
72  * This is the maximum number of high powered commands (e.g. start unit)
73  * that can be outstanding at a particular time.
74  */
75 #ifndef CAM_MAX_HIGHPOWER
76 #define CAM_MAX_HIGHPOWER  4
77 #endif
78 
79 /* Datastructures internal to the xpt layer */
80 MALLOC_DEFINE(M_CAMXPT, "CAM XPT", "CAM XPT buffers");
81 MALLOC_DEFINE(M_CAMDEV, "CAM DEV", "CAM devices");
82 MALLOC_DEFINE(M_CAMCCB, "CAM CCB", "CAM CCBs");
83 MALLOC_DEFINE(M_CAMPATH, "CAM path", "CAM paths");
84 
85 /* Object for defering XPT actions to a taskqueue */
86 struct xpt_task {
87 	struct task	task;
88 	void		*data1;
89 	uintptr_t	data2;
90 };
91 
92 typedef enum {
93 	XPT_FLAG_OPEN		= 0x01
94 } xpt_flags;
95 
96 struct xpt_softc {
97 	xpt_flags		flags;
98 	u_int32_t		xpt_generation;
99 
100 	/* number of high powered commands that can go through right now */
101 	STAILQ_HEAD(highpowerlist, ccb_hdr)	highpowerq;
102 	int			num_highpower;
103 
104 	/* queue for handling async rescan requests. */
105 	TAILQ_HEAD(, ccb_hdr) ccb_scanq;
106 	int buses_to_config;
107 	int buses_config_done;
108 
109 	/* Registered busses */
110 	TAILQ_HEAD(,cam_eb)	xpt_busses;
111 	u_int			bus_generation;
112 
113 	struct intr_config_hook	*xpt_config_hook;
114 
115 	int			boot_delay;
116 	struct callout 		boot_callout;
117 
118 	struct mtx		xpt_topo_lock;
119 	struct mtx		xpt_lock;
120 };
121 
122 typedef enum {
123 	DM_RET_COPY		= 0x01,
124 	DM_RET_FLAG_MASK	= 0x0f,
125 	DM_RET_NONE		= 0x00,
126 	DM_RET_STOP		= 0x10,
127 	DM_RET_DESCEND		= 0x20,
128 	DM_RET_ERROR		= 0x30,
129 	DM_RET_ACTION_MASK	= 0xf0
130 } dev_match_ret;
131 
132 typedef enum {
133 	XPT_DEPTH_BUS,
134 	XPT_DEPTH_TARGET,
135 	XPT_DEPTH_DEVICE,
136 	XPT_DEPTH_PERIPH
137 } xpt_traverse_depth;
138 
139 struct xpt_traverse_config {
140 	xpt_traverse_depth	depth;
141 	void			*tr_func;
142 	void			*tr_arg;
143 };
144 
145 typedef	int	xpt_busfunc_t (struct cam_eb *bus, void *arg);
146 typedef	int	xpt_targetfunc_t (struct cam_et *target, void *arg);
147 typedef	int	xpt_devicefunc_t (struct cam_ed *device, void *arg);
148 typedef	int	xpt_periphfunc_t (struct cam_periph *periph, void *arg);
149 typedef int	xpt_pdrvfunc_t (struct periph_driver **pdrv, void *arg);
150 
151 /* Transport layer configuration information */
152 static struct xpt_softc xsoftc;
153 
154 TUNABLE_INT("kern.cam.boot_delay", &xsoftc.boot_delay);
155 SYSCTL_INT(_kern_cam, OID_AUTO, boot_delay, CTLFLAG_RDTUN,
156            &xsoftc.boot_delay, 0, "Bus registration wait time");
157 
158 /* Queues for our software interrupt handler */
159 typedef TAILQ_HEAD(cam_isrq, ccb_hdr) cam_isrq_t;
160 typedef TAILQ_HEAD(cam_simq, cam_sim) cam_simq_t;
161 static cam_simq_t cam_simq;
162 static struct mtx cam_simq_lock;
163 
164 /* Pointers to software interrupt handlers */
165 static void *cambio_ih;
166 
167 struct cam_periph *xpt_periph;
168 
169 static periph_init_t xpt_periph_init;
170 
171 static struct periph_driver xpt_driver =
172 {
173 	xpt_periph_init, "xpt",
174 	TAILQ_HEAD_INITIALIZER(xpt_driver.units), /* generation */ 0,
175 	CAM_PERIPH_DRV_EARLY
176 };
177 
178 PERIPHDRIVER_DECLARE(xpt, xpt_driver);
179 
180 static d_open_t xptopen;
181 static d_close_t xptclose;
182 static d_ioctl_t xptioctl;
183 
184 static struct cdevsw xpt_cdevsw = {
185 	.d_version =	D_VERSION,
186 	.d_flags =	0,
187 	.d_open =	xptopen,
188 	.d_close =	xptclose,
189 	.d_ioctl =	xptioctl,
190 	.d_name =	"xpt",
191 };
192 
193 /* Storage for debugging datastructures */
194 struct cam_path *cam_dpath;
195 u_int32_t cam_dflags = CAM_DEBUG_FLAGS;
196 TUNABLE_INT("kern.cam.dflags", &cam_dflags);
197 SYSCTL_UINT(_kern_cam, OID_AUTO, dflags, CTLFLAG_RW,
198 	&cam_dflags, 0, "Enabled debug flags");
199 u_int32_t cam_debug_delay = CAM_DEBUG_DELAY;
200 TUNABLE_INT("kern.cam.debug_delay", &cam_debug_delay);
201 SYSCTL_UINT(_kern_cam, OID_AUTO, debug_delay, CTLFLAG_RW,
202 	&cam_debug_delay, 0, "Delay in us after each debug message");
203 
204 /* Our boot-time initialization hook */
205 static int cam_module_event_handler(module_t, int /*modeventtype_t*/, void *);
206 
207 static moduledata_t cam_moduledata = {
208 	"cam",
209 	cam_module_event_handler,
210 	NULL
211 };
212 
213 static int	xpt_init(void *);
214 
215 DECLARE_MODULE(cam, cam_moduledata, SI_SUB_CONFIGURE, SI_ORDER_SECOND);
216 MODULE_VERSION(cam, 1);
217 
218 
219 static void		xpt_async_bcast(struct async_list *async_head,
220 					u_int32_t async_code,
221 					struct cam_path *path,
222 					void *async_arg);
223 static path_id_t xptnextfreepathid(void);
224 static path_id_t xptpathid(const char *sim_name, int sim_unit, int sim_bus);
225 static union ccb *xpt_get_ccb(struct cam_ed *device);
226 static void	 xpt_run_dev_allocq(struct cam_eb *bus);
227 static void	 xpt_run_dev_sendq(struct cam_eb *bus);
228 static timeout_t xpt_release_devq_timeout;
229 static void	 xpt_release_simq_timeout(void *arg) __unused;
230 static void	 xpt_release_bus(struct cam_eb *bus);
231 static void	 xpt_release_devq_device(struct cam_ed *dev, cam_rl rl,
232 		    u_int count, int run_queue);
233 static struct cam_et*
234 		 xpt_alloc_target(struct cam_eb *bus, target_id_t target_id);
235 static void	 xpt_release_target(struct cam_et *target);
236 static struct cam_eb*
237 		 xpt_find_bus(path_id_t path_id);
238 static struct cam_et*
239 		 xpt_find_target(struct cam_eb *bus, target_id_t target_id);
240 static struct cam_ed*
241 		 xpt_find_device(struct cam_et *target, lun_id_t lun_id);
242 static void	 xpt_config(void *arg);
243 static xpt_devicefunc_t xptpassannouncefunc;
244 static void	 xptaction(struct cam_sim *sim, union ccb *work_ccb);
245 static void	 xptpoll(struct cam_sim *sim);
246 static void	 camisr(void *);
247 static void	 camisr_runqueue(void *);
248 static dev_match_ret	xptbusmatch(struct dev_match_pattern *patterns,
249 				    u_int num_patterns, struct cam_eb *bus);
250 static dev_match_ret	xptdevicematch(struct dev_match_pattern *patterns,
251 				       u_int num_patterns,
252 				       struct cam_ed *device);
253 static dev_match_ret	xptperiphmatch(struct dev_match_pattern *patterns,
254 				       u_int num_patterns,
255 				       struct cam_periph *periph);
256 static xpt_busfunc_t	xptedtbusfunc;
257 static xpt_targetfunc_t	xptedttargetfunc;
258 static xpt_devicefunc_t	xptedtdevicefunc;
259 static xpt_periphfunc_t	xptedtperiphfunc;
260 static xpt_pdrvfunc_t	xptplistpdrvfunc;
261 static xpt_periphfunc_t	xptplistperiphfunc;
262 static int		xptedtmatch(struct ccb_dev_match *cdm);
263 static int		xptperiphlistmatch(struct ccb_dev_match *cdm);
264 static int		xptbustraverse(struct cam_eb *start_bus,
265 				       xpt_busfunc_t *tr_func, void *arg);
266 static int		xpttargettraverse(struct cam_eb *bus,
267 					  struct cam_et *start_target,
268 					  xpt_targetfunc_t *tr_func, void *arg);
269 static int		xptdevicetraverse(struct cam_et *target,
270 					  struct cam_ed *start_device,
271 					  xpt_devicefunc_t *tr_func, void *arg);
272 static int		xptperiphtraverse(struct cam_ed *device,
273 					  struct cam_periph *start_periph,
274 					  xpt_periphfunc_t *tr_func, void *arg);
275 static int		xptpdrvtraverse(struct periph_driver **start_pdrv,
276 					xpt_pdrvfunc_t *tr_func, void *arg);
277 static int		xptpdperiphtraverse(struct periph_driver **pdrv,
278 					    struct cam_periph *start_periph,
279 					    xpt_periphfunc_t *tr_func,
280 					    void *arg);
281 static xpt_busfunc_t	xptdefbusfunc;
282 static xpt_targetfunc_t	xptdeftargetfunc;
283 static xpt_devicefunc_t	xptdefdevicefunc;
284 static xpt_periphfunc_t	xptdefperiphfunc;
285 static void		xpt_finishconfig_task(void *context, int pending);
286 static void		xpt_dev_async_default(u_int32_t async_code,
287 					      struct cam_eb *bus,
288 					      struct cam_et *target,
289 					      struct cam_ed *device,
290 					      void *async_arg);
291 static struct cam_ed *	xpt_alloc_device_default(struct cam_eb *bus,
292 						 struct cam_et *target,
293 						 lun_id_t lun_id);
294 static xpt_devicefunc_t	xptsetasyncfunc;
295 static xpt_busfunc_t	xptsetasyncbusfunc;
296 static cam_status	xptregister(struct cam_periph *periph,
297 				    void *arg);
298 static __inline int periph_is_queued(struct cam_periph *periph);
299 static __inline int device_is_alloc_queued(struct cam_ed *device);
300 static __inline int device_is_send_queued(struct cam_ed *device);
301 
302 static __inline int
303 xpt_schedule_dev_allocq(struct cam_eb *bus, struct cam_ed *dev)
304 {
305 	int retval;
306 
307 	if ((dev->drvq.entries > 0) &&
308 	    (dev->ccbq.devq_openings > 0) &&
309 	    (cam_ccbq_frozen(&dev->ccbq, CAM_PRIORITY_TO_RL(
310 		CAMQ_GET_PRIO(&dev->drvq))) == 0)) {
311 		/*
312 		 * The priority of a device waiting for CCB resources
313 		 * is that of the highest priority peripheral driver
314 		 * enqueued.
315 		 */
316 		retval = xpt_schedule_dev(&bus->sim->devq->alloc_queue,
317 					  &dev->alloc_ccb_entry.pinfo,
318 					  CAMQ_GET_PRIO(&dev->drvq));
319 	} else {
320 		retval = 0;
321 	}
322 
323 	return (retval);
324 }
325 
326 static __inline int
327 xpt_schedule_dev_sendq(struct cam_eb *bus, struct cam_ed *dev)
328 {
329 	int	retval;
330 
331 	if ((dev->ccbq.queue.entries > 0) &&
332 	    (dev->ccbq.dev_openings > 0) &&
333 	    (cam_ccbq_frozen_top(&dev->ccbq) == 0)) {
334 		/*
335 		 * The priority of a device waiting for controller
336 		 * resources is that of the highest priority CCB
337 		 * enqueued.
338 		 */
339 		retval =
340 		    xpt_schedule_dev(&bus->sim->devq->send_queue,
341 				     &dev->send_ccb_entry.pinfo,
342 				     CAMQ_GET_PRIO(&dev->ccbq.queue));
343 	} else {
344 		retval = 0;
345 	}
346 	return (retval);
347 }
348 
349 static __inline int
350 periph_is_queued(struct cam_periph *periph)
351 {
352 	return (periph->pinfo.index != CAM_UNQUEUED_INDEX);
353 }
354 
355 static __inline int
356 device_is_alloc_queued(struct cam_ed *device)
357 {
358 	return (device->alloc_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX);
359 }
360 
361 static __inline int
362 device_is_send_queued(struct cam_ed *device)
363 {
364 	return (device->send_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX);
365 }
366 
367 static void
368 xpt_periph_init()
369 {
370 	make_dev(&xpt_cdevsw, 0, UID_ROOT, GID_OPERATOR, 0600, "xpt0");
371 }
372 
373 static void
374 xptdone(struct cam_periph *periph, union ccb *done_ccb)
375 {
376 	/* Caller will release the CCB */
377 	wakeup(&done_ccb->ccb_h.cbfcnp);
378 }
379 
380 static int
381 xptopen(struct cdev *dev, int flags, int fmt, struct thread *td)
382 {
383 
384 	/*
385 	 * Only allow read-write access.
386 	 */
387 	if (((flags & FWRITE) == 0) || ((flags & FREAD) == 0))
388 		return(EPERM);
389 
390 	/*
391 	 * We don't allow nonblocking access.
392 	 */
393 	if ((flags & O_NONBLOCK) != 0) {
394 		printf("%s: can't do nonblocking access\n", devtoname(dev));
395 		return(ENODEV);
396 	}
397 
398 	/* Mark ourselves open */
399 	mtx_lock(&xsoftc.xpt_lock);
400 	xsoftc.flags |= XPT_FLAG_OPEN;
401 	mtx_unlock(&xsoftc.xpt_lock);
402 
403 	return(0);
404 }
405 
406 static int
407 xptclose(struct cdev *dev, int flag, int fmt, struct thread *td)
408 {
409 
410 	/* Mark ourselves closed */
411 	mtx_lock(&xsoftc.xpt_lock);
412 	xsoftc.flags &= ~XPT_FLAG_OPEN;
413 	mtx_unlock(&xsoftc.xpt_lock);
414 
415 	return(0);
416 }
417 
418 /*
419  * Don't automatically grab the xpt softc lock here even though this is going
420  * through the xpt device.  The xpt device is really just a back door for
421  * accessing other devices and SIMs, so the right thing to do is to grab
422  * the appropriate SIM lock once the bus/SIM is located.
423  */
424 static int
425 xptioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td)
426 {
427 	int error;
428 
429 	error = 0;
430 
431 	switch(cmd) {
432 	/*
433 	 * For the transport layer CAMIOCOMMAND ioctl, we really only want
434 	 * to accept CCB types that don't quite make sense to send through a
435 	 * passthrough driver. XPT_PATH_INQ is an exception to this, as stated
436 	 * in the CAM spec.
437 	 */
438 	case CAMIOCOMMAND: {
439 		union ccb *ccb;
440 		union ccb *inccb;
441 		struct cam_eb *bus;
442 
443 		inccb = (union ccb *)addr;
444 
445 		bus = xpt_find_bus(inccb->ccb_h.path_id);
446 		if (bus == NULL)
447 			return (EINVAL);
448 
449 		switch (inccb->ccb_h.func_code) {
450 		case XPT_SCAN_BUS:
451 		case XPT_RESET_BUS:
452 			if (inccb->ccb_h.target_id != CAM_TARGET_WILDCARD ||
453 			    inccb->ccb_h.target_lun != CAM_LUN_WILDCARD) {
454 				xpt_release_bus(bus);
455 				return (EINVAL);
456 			}
457 			break;
458 		case XPT_SCAN_TGT:
459 			if (inccb->ccb_h.target_id == CAM_TARGET_WILDCARD ||
460 			    inccb->ccb_h.target_lun != CAM_LUN_WILDCARD) {
461 				xpt_release_bus(bus);
462 				return (EINVAL);
463 			}
464 			break;
465 		default:
466 			break;
467 		}
468 
469 		switch(inccb->ccb_h.func_code) {
470 		case XPT_SCAN_BUS:
471 		case XPT_RESET_BUS:
472 		case XPT_PATH_INQ:
473 		case XPT_ENG_INQ:
474 		case XPT_SCAN_LUN:
475 		case XPT_SCAN_TGT:
476 
477 			ccb = xpt_alloc_ccb();
478 
479 			CAM_SIM_LOCK(bus->sim);
480 
481 			/*
482 			 * Create a path using the bus, target, and lun the
483 			 * user passed in.
484 			 */
485 			if (xpt_create_path(&ccb->ccb_h.path, xpt_periph,
486 					    inccb->ccb_h.path_id,
487 					    inccb->ccb_h.target_id,
488 					    inccb->ccb_h.target_lun) !=
489 					    CAM_REQ_CMP){
490 				error = EINVAL;
491 				CAM_SIM_UNLOCK(bus->sim);
492 				xpt_free_ccb(ccb);
493 				break;
494 			}
495 			/* Ensure all of our fields are correct */
496 			xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path,
497 				      inccb->ccb_h.pinfo.priority);
498 			xpt_merge_ccb(ccb, inccb);
499 			ccb->ccb_h.cbfcnp = xptdone;
500 			cam_periph_runccb(ccb, NULL, 0, 0, NULL);
501 			bcopy(ccb, inccb, sizeof(union ccb));
502 			xpt_free_path(ccb->ccb_h.path);
503 			xpt_free_ccb(ccb);
504 			CAM_SIM_UNLOCK(bus->sim);
505 			break;
506 
507 		case XPT_DEBUG: {
508 			union ccb ccb;
509 
510 			/*
511 			 * This is an immediate CCB, so it's okay to
512 			 * allocate it on the stack.
513 			 */
514 
515 			CAM_SIM_LOCK(bus->sim);
516 
517 			/*
518 			 * Create a path using the bus, target, and lun the
519 			 * user passed in.
520 			 */
521 			if (xpt_create_path(&ccb.ccb_h.path, xpt_periph,
522 					    inccb->ccb_h.path_id,
523 					    inccb->ccb_h.target_id,
524 					    inccb->ccb_h.target_lun) !=
525 					    CAM_REQ_CMP){
526 				error = EINVAL;
527 				CAM_SIM_UNLOCK(bus->sim);
528 				break;
529 			}
530 			/* Ensure all of our fields are correct */
531 			xpt_setup_ccb(&ccb.ccb_h, ccb.ccb_h.path,
532 				      inccb->ccb_h.pinfo.priority);
533 			xpt_merge_ccb(&ccb, inccb);
534 			ccb.ccb_h.cbfcnp = xptdone;
535 			xpt_action(&ccb);
536 			CAM_SIM_UNLOCK(bus->sim);
537 			bcopy(&ccb, inccb, sizeof(union ccb));
538 			xpt_free_path(ccb.ccb_h.path);
539 			break;
540 
541 		}
542 		case XPT_DEV_MATCH: {
543 			struct cam_periph_map_info mapinfo;
544 			struct cam_path *old_path;
545 
546 			/*
547 			 * We can't deal with physical addresses for this
548 			 * type of transaction.
549 			 */
550 			if (inccb->ccb_h.flags & CAM_DATA_PHYS) {
551 				error = EINVAL;
552 				break;
553 			}
554 
555 			/*
556 			 * Save this in case the caller had it set to
557 			 * something in particular.
558 			 */
559 			old_path = inccb->ccb_h.path;
560 
561 			/*
562 			 * We really don't need a path for the matching
563 			 * code.  The path is needed because of the
564 			 * debugging statements in xpt_action().  They
565 			 * assume that the CCB has a valid path.
566 			 */
567 			inccb->ccb_h.path = xpt_periph->path;
568 
569 			bzero(&mapinfo, sizeof(mapinfo));
570 
571 			/*
572 			 * Map the pattern and match buffers into kernel
573 			 * virtual address space.
574 			 */
575 			error = cam_periph_mapmem(inccb, &mapinfo);
576 
577 			if (error) {
578 				inccb->ccb_h.path = old_path;
579 				break;
580 			}
581 
582 			/*
583 			 * This is an immediate CCB, we can send it on directly.
584 			 */
585 			xpt_action(inccb);
586 
587 			/*
588 			 * Map the buffers back into user space.
589 			 */
590 			cam_periph_unmapmem(inccb, &mapinfo);
591 
592 			inccb->ccb_h.path = old_path;
593 
594 			error = 0;
595 			break;
596 		}
597 		default:
598 			error = ENOTSUP;
599 			break;
600 		}
601 		xpt_release_bus(bus);
602 		break;
603 	}
604 	/*
605 	 * This is the getpassthru ioctl. It takes a XPT_GDEVLIST ccb as input,
606 	 * with the periphal driver name and unit name filled in.  The other
607 	 * fields don't really matter as input.  The passthrough driver name
608 	 * ("pass"), and unit number are passed back in the ccb.  The current
609 	 * device generation number, and the index into the device peripheral
610 	 * driver list, and the status are also passed back.  Note that
611 	 * since we do everything in one pass, unlike the XPT_GDEVLIST ccb,
612 	 * we never return a status of CAM_GDEVLIST_LIST_CHANGED.  It is
613 	 * (or rather should be) impossible for the device peripheral driver
614 	 * list to change since we look at the whole thing in one pass, and
615 	 * we do it with lock protection.
616 	 *
617 	 */
618 	case CAMGETPASSTHRU: {
619 		union ccb *ccb;
620 		struct cam_periph *periph;
621 		struct periph_driver **p_drv;
622 		char   *name;
623 		u_int unit;
624 		u_int cur_generation;
625 		int base_periph_found;
626 		int splbreaknum;
627 
628 		ccb = (union ccb *)addr;
629 		unit = ccb->cgdl.unit_number;
630 		name = ccb->cgdl.periph_name;
631 		/*
632 		 * Every 100 devices, we want to drop our lock protection to
633 		 * give the software interrupt handler a chance to run.
634 		 * Most systems won't run into this check, but this should
635 		 * avoid starvation in the software interrupt handler in
636 		 * large systems.
637 		 */
638 		splbreaknum = 100;
639 
640 		ccb = (union ccb *)addr;
641 
642 		base_periph_found = 0;
643 
644 		/*
645 		 * Sanity check -- make sure we don't get a null peripheral
646 		 * driver name.
647 		 */
648 		if (*ccb->cgdl.periph_name == '\0') {
649 			error = EINVAL;
650 			break;
651 		}
652 
653 		/* Keep the list from changing while we traverse it */
654 		mtx_lock(&xsoftc.xpt_topo_lock);
655 ptstartover:
656 		cur_generation = xsoftc.xpt_generation;
657 
658 		/* first find our driver in the list of drivers */
659 		for (p_drv = periph_drivers; *p_drv != NULL; p_drv++)
660 			if (strcmp((*p_drv)->driver_name, name) == 0)
661 				break;
662 
663 		if (*p_drv == NULL) {
664 			mtx_unlock(&xsoftc.xpt_topo_lock);
665 			ccb->ccb_h.status = CAM_REQ_CMP_ERR;
666 			ccb->cgdl.status = CAM_GDEVLIST_ERROR;
667 			*ccb->cgdl.periph_name = '\0';
668 			ccb->cgdl.unit_number = 0;
669 			error = ENOENT;
670 			break;
671 		}
672 
673 		/*
674 		 * Run through every peripheral instance of this driver
675 		 * and check to see whether it matches the unit passed
676 		 * in by the user.  If it does, get out of the loops and
677 		 * find the passthrough driver associated with that
678 		 * peripheral driver.
679 		 */
680 		for (periph = TAILQ_FIRST(&(*p_drv)->units); periph != NULL;
681 		     periph = TAILQ_NEXT(periph, unit_links)) {
682 
683 			if (periph->unit_number == unit) {
684 				break;
685 			} else if (--splbreaknum == 0) {
686 				mtx_unlock(&xsoftc.xpt_topo_lock);
687 				mtx_lock(&xsoftc.xpt_topo_lock);
688 				splbreaknum = 100;
689 				if (cur_generation != xsoftc.xpt_generation)
690 				       goto ptstartover;
691 			}
692 		}
693 		/*
694 		 * If we found the peripheral driver that the user passed
695 		 * in, go through all of the peripheral drivers for that
696 		 * particular device and look for a passthrough driver.
697 		 */
698 		if (periph != NULL) {
699 			struct cam_ed *device;
700 			int i;
701 
702 			base_periph_found = 1;
703 			device = periph->path->device;
704 			for (i = 0, periph = SLIST_FIRST(&device->periphs);
705 			     periph != NULL;
706 			     periph = SLIST_NEXT(periph, periph_links), i++) {
707 				/*
708 				 * Check to see whether we have a
709 				 * passthrough device or not.
710 				 */
711 				if (strcmp(periph->periph_name, "pass") == 0) {
712 					/*
713 					 * Fill in the getdevlist fields.
714 					 */
715 					strcpy(ccb->cgdl.periph_name,
716 					       periph->periph_name);
717 					ccb->cgdl.unit_number =
718 						periph->unit_number;
719 					if (SLIST_NEXT(periph, periph_links))
720 						ccb->cgdl.status =
721 							CAM_GDEVLIST_MORE_DEVS;
722 					else
723 						ccb->cgdl.status =
724 						       CAM_GDEVLIST_LAST_DEVICE;
725 					ccb->cgdl.generation =
726 						device->generation;
727 					ccb->cgdl.index = i;
728 					/*
729 					 * Fill in some CCB header fields
730 					 * that the user may want.
731 					 */
732 					ccb->ccb_h.path_id =
733 						periph->path->bus->path_id;
734 					ccb->ccb_h.target_id =
735 						periph->path->target->target_id;
736 					ccb->ccb_h.target_lun =
737 						periph->path->device->lun_id;
738 					ccb->ccb_h.status = CAM_REQ_CMP;
739 					break;
740 				}
741 			}
742 		}
743 
744 		/*
745 		 * If the periph is null here, one of two things has
746 		 * happened.  The first possibility is that we couldn't
747 		 * find the unit number of the particular peripheral driver
748 		 * that the user is asking about.  e.g. the user asks for
749 		 * the passthrough driver for "da11".  We find the list of
750 		 * "da" peripherals all right, but there is no unit 11.
751 		 * The other possibility is that we went through the list
752 		 * of peripheral drivers attached to the device structure,
753 		 * but didn't find one with the name "pass".  Either way,
754 		 * we return ENOENT, since we couldn't find something.
755 		 */
756 		if (periph == NULL) {
757 			ccb->ccb_h.status = CAM_REQ_CMP_ERR;
758 			ccb->cgdl.status = CAM_GDEVLIST_ERROR;
759 			*ccb->cgdl.periph_name = '\0';
760 			ccb->cgdl.unit_number = 0;
761 			error = ENOENT;
762 			/*
763 			 * It is unfortunate that this is even necessary,
764 			 * but there are many, many clueless users out there.
765 			 * If this is true, the user is looking for the
766 			 * passthrough driver, but doesn't have one in his
767 			 * kernel.
768 			 */
769 			if (base_periph_found == 1) {
770 				printf("xptioctl: pass driver is not in the "
771 				       "kernel\n");
772 				printf("xptioctl: put \"device pass\" in "
773 				       "your kernel config file\n");
774 			}
775 		}
776 		mtx_unlock(&xsoftc.xpt_topo_lock);
777 		break;
778 		}
779 	default:
780 		error = ENOTTY;
781 		break;
782 	}
783 
784 	return(error);
785 }
786 
787 static int
788 cam_module_event_handler(module_t mod, int what, void *arg)
789 {
790 	int error;
791 
792 	switch (what) {
793 	case MOD_LOAD:
794 		if ((error = xpt_init(NULL)) != 0)
795 			return (error);
796 		break;
797 	case MOD_UNLOAD:
798 		return EBUSY;
799 	default:
800 		return EOPNOTSUPP;
801 	}
802 
803 	return 0;
804 }
805 
806 static void
807 xpt_rescan_done(struct cam_periph *periph, union ccb *done_ccb)
808 {
809 
810 	if (done_ccb->ccb_h.ppriv_ptr1 == NULL) {
811 		xpt_free_path(done_ccb->ccb_h.path);
812 		xpt_free_ccb(done_ccb);
813 	} else {
814 		done_ccb->ccb_h.cbfcnp = done_ccb->ccb_h.ppriv_ptr1;
815 		(*done_ccb->ccb_h.cbfcnp)(periph, done_ccb);
816 	}
817 	xpt_release_boot();
818 }
819 
820 /* thread to handle bus rescans */
821 static void
822 xpt_scanner_thread(void *dummy)
823 {
824 	union ccb	*ccb;
825 	struct cam_sim	*sim;
826 
827 	xpt_lock_buses();
828 	for (;;) {
829 		if (TAILQ_EMPTY(&xsoftc.ccb_scanq))
830 			msleep(&xsoftc.ccb_scanq, &xsoftc.xpt_topo_lock, PRIBIO,
831 			       "ccb_scanq", 0);
832 		if ((ccb = (union ccb *)TAILQ_FIRST(&xsoftc.ccb_scanq)) != NULL) {
833 			TAILQ_REMOVE(&xsoftc.ccb_scanq, &ccb->ccb_h, sim_links.tqe);
834 			xpt_unlock_buses();
835 
836 			sim = ccb->ccb_h.path->bus->sim;
837 			CAM_SIM_LOCK(sim);
838 			xpt_action(ccb);
839 			CAM_SIM_UNLOCK(sim);
840 
841 			xpt_lock_buses();
842 		}
843 	}
844 }
845 
846 void
847 xpt_rescan(union ccb *ccb)
848 {
849 	struct ccb_hdr *hdr;
850 
851 	/* Prepare request */
852 	if (ccb->ccb_h.path->target->target_id == CAM_TARGET_WILDCARD &&
853 	    ccb->ccb_h.path->device->lun_id == CAM_LUN_WILDCARD)
854 		ccb->ccb_h.func_code = XPT_SCAN_BUS;
855 	else if (ccb->ccb_h.path->target->target_id != CAM_TARGET_WILDCARD &&
856 	    ccb->ccb_h.path->device->lun_id == CAM_LUN_WILDCARD)
857 		ccb->ccb_h.func_code = XPT_SCAN_TGT;
858 	else if (ccb->ccb_h.path->target->target_id != CAM_TARGET_WILDCARD &&
859 	    ccb->ccb_h.path->device->lun_id != CAM_LUN_WILDCARD)
860 		ccb->ccb_h.func_code = XPT_SCAN_LUN;
861 	else {
862 		xpt_print(ccb->ccb_h.path, "illegal scan path\n");
863 		xpt_free_path(ccb->ccb_h.path);
864 		xpt_free_ccb(ccb);
865 		return;
866 	}
867 	ccb->ccb_h.ppriv_ptr1 = ccb->ccb_h.cbfcnp;
868 	ccb->ccb_h.cbfcnp = xpt_rescan_done;
869 	xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path, CAM_PRIORITY_XPT);
870 	/* Don't make duplicate entries for the same paths. */
871 	xpt_lock_buses();
872 	if (ccb->ccb_h.ppriv_ptr1 == NULL) {
873 		TAILQ_FOREACH(hdr, &xsoftc.ccb_scanq, sim_links.tqe) {
874 			if (xpt_path_comp(hdr->path, ccb->ccb_h.path) == 0) {
875 				wakeup(&xsoftc.ccb_scanq);
876 				xpt_unlock_buses();
877 				xpt_print(ccb->ccb_h.path, "rescan already queued\n");
878 				xpt_free_path(ccb->ccb_h.path);
879 				xpt_free_ccb(ccb);
880 				return;
881 			}
882 		}
883 	}
884 	TAILQ_INSERT_TAIL(&xsoftc.ccb_scanq, &ccb->ccb_h, sim_links.tqe);
885 	xsoftc.buses_to_config++;
886 	wakeup(&xsoftc.ccb_scanq);
887 	xpt_unlock_buses();
888 }
889 
890 /* Functions accessed by the peripheral drivers */
891 static int
892 xpt_init(void *dummy)
893 {
894 	struct cam_sim *xpt_sim;
895 	struct cam_path *path;
896 	struct cam_devq *devq;
897 	cam_status status;
898 
899 	TAILQ_INIT(&xsoftc.xpt_busses);
900 	TAILQ_INIT(&cam_simq);
901 	TAILQ_INIT(&xsoftc.ccb_scanq);
902 	STAILQ_INIT(&xsoftc.highpowerq);
903 	xsoftc.num_highpower = CAM_MAX_HIGHPOWER;
904 
905 	mtx_init(&cam_simq_lock, "CAM SIMQ lock", NULL, MTX_DEF);
906 	mtx_init(&xsoftc.xpt_lock, "XPT lock", NULL, MTX_DEF);
907 	mtx_init(&xsoftc.xpt_topo_lock, "XPT topology lock", NULL, MTX_DEF);
908 
909 	/*
910 	 * The xpt layer is, itself, the equivelent of a SIM.
911 	 * Allow 16 ccbs in the ccb pool for it.  This should
912 	 * give decent parallelism when we probe busses and
913 	 * perform other XPT functions.
914 	 */
915 	devq = cam_simq_alloc(16);
916 	xpt_sim = cam_sim_alloc(xptaction,
917 				xptpoll,
918 				"xpt",
919 				/*softc*/NULL,
920 				/*unit*/0,
921 				/*mtx*/&xsoftc.xpt_lock,
922 				/*max_dev_transactions*/0,
923 				/*max_tagged_dev_transactions*/0,
924 				devq);
925 	if (xpt_sim == NULL)
926 		return (ENOMEM);
927 
928 	mtx_lock(&xsoftc.xpt_lock);
929 	if ((status = xpt_bus_register(xpt_sim, NULL, 0)) != CAM_SUCCESS) {
930 		mtx_unlock(&xsoftc.xpt_lock);
931 		printf("xpt_init: xpt_bus_register failed with status %#x,"
932 		       " failing attach\n", status);
933 		return (EINVAL);
934 	}
935 
936 	/*
937 	 * Looking at the XPT from the SIM layer, the XPT is
938 	 * the equivelent of a peripheral driver.  Allocate
939 	 * a peripheral driver entry for us.
940 	 */
941 	if ((status = xpt_create_path(&path, NULL, CAM_XPT_PATH_ID,
942 				      CAM_TARGET_WILDCARD,
943 				      CAM_LUN_WILDCARD)) != CAM_REQ_CMP) {
944 		mtx_unlock(&xsoftc.xpt_lock);
945 		printf("xpt_init: xpt_create_path failed with status %#x,"
946 		       " failing attach\n", status);
947 		return (EINVAL);
948 	}
949 
950 	cam_periph_alloc(xptregister, NULL, NULL, NULL, "xpt", CAM_PERIPH_BIO,
951 			 path, NULL, 0, xpt_sim);
952 	xpt_free_path(path);
953 	mtx_unlock(&xsoftc.xpt_lock);
954 	/* Install our software interrupt handlers */
955 	swi_add(NULL, "cambio", camisr, NULL, SWI_CAMBIO, INTR_MPSAFE, &cambio_ih);
956 	/*
957 	 * Register a callback for when interrupts are enabled.
958 	 */
959 	xsoftc.xpt_config_hook =
960 	    (struct intr_config_hook *)malloc(sizeof(struct intr_config_hook),
961 					      M_CAMXPT, M_NOWAIT | M_ZERO);
962 	if (xsoftc.xpt_config_hook == NULL) {
963 		printf("xpt_init: Cannot malloc config hook "
964 		       "- failing attach\n");
965 		return (ENOMEM);
966 	}
967 	xsoftc.xpt_config_hook->ich_func = xpt_config;
968 	if (config_intrhook_establish(xsoftc.xpt_config_hook) != 0) {
969 		free (xsoftc.xpt_config_hook, M_CAMXPT);
970 		printf("xpt_init: config_intrhook_establish failed "
971 		       "- failing attach\n");
972 	}
973 
974 	return (0);
975 }
976 
977 static cam_status
978 xptregister(struct cam_periph *periph, void *arg)
979 {
980 	struct cam_sim *xpt_sim;
981 
982 	if (periph == NULL) {
983 		printf("xptregister: periph was NULL!!\n");
984 		return(CAM_REQ_CMP_ERR);
985 	}
986 
987 	xpt_sim = (struct cam_sim *)arg;
988 	xpt_sim->softc = periph;
989 	xpt_periph = periph;
990 	periph->softc = NULL;
991 
992 	return(CAM_REQ_CMP);
993 }
994 
995 int32_t
996 xpt_add_periph(struct cam_periph *periph)
997 {
998 	struct cam_ed *device;
999 	int32_t	 status;
1000 	struct periph_list *periph_head;
1001 
1002 	mtx_assert(periph->sim->mtx, MA_OWNED);
1003 
1004 	device = periph->path->device;
1005 
1006 	periph_head = &device->periphs;
1007 
1008 	status = CAM_REQ_CMP;
1009 
1010 	if (device != NULL) {
1011 		/*
1012 		 * Make room for this peripheral
1013 		 * so it will fit in the queue
1014 		 * when it's scheduled to run
1015 		 */
1016 		status = camq_resize(&device->drvq,
1017 				     device->drvq.array_size + 1);
1018 
1019 		device->generation++;
1020 
1021 		SLIST_INSERT_HEAD(periph_head, periph, periph_links);
1022 	}
1023 
1024 	mtx_lock(&xsoftc.xpt_topo_lock);
1025 	xsoftc.xpt_generation++;
1026 	mtx_unlock(&xsoftc.xpt_topo_lock);
1027 
1028 	return (status);
1029 }
1030 
1031 void
1032 xpt_remove_periph(struct cam_periph *periph, int topology_lock_held)
1033 {
1034 	struct cam_ed *device;
1035 
1036 	mtx_assert(periph->sim->mtx, MA_OWNED);
1037 
1038 	device = periph->path->device;
1039 
1040 	if (device != NULL) {
1041 		struct periph_list *periph_head;
1042 
1043 		periph_head = &device->periphs;
1044 
1045 		/* Release the slot for this peripheral */
1046 		camq_resize(&device->drvq, device->drvq.array_size - 1);
1047 
1048 		device->generation++;
1049 
1050 		SLIST_REMOVE(periph_head, periph, cam_periph, periph_links);
1051 	}
1052 
1053 	if (topology_lock_held == 0)
1054 		mtx_lock(&xsoftc.xpt_topo_lock);
1055 
1056 	xsoftc.xpt_generation++;
1057 
1058 	if (topology_lock_held == 0)
1059 		mtx_unlock(&xsoftc.xpt_topo_lock);
1060 }
1061 
1062 
1063 void
1064 xpt_announce_periph(struct cam_periph *periph, char *announce_string)
1065 {
1066 	struct	cam_path *path = periph->path;
1067 
1068 	mtx_assert(periph->sim->mtx, MA_OWNED);
1069 
1070 	printf("%s%d at %s%d bus %d scbus%d target %d lun %d\n",
1071 	       periph->periph_name, periph->unit_number,
1072 	       path->bus->sim->sim_name,
1073 	       path->bus->sim->unit_number,
1074 	       path->bus->sim->bus_id,
1075 	       path->bus->path_id,
1076 	       path->target->target_id,
1077 	       path->device->lun_id);
1078 	printf("%s%d: ", periph->periph_name, periph->unit_number);
1079 	if (path->device->protocol == PROTO_SCSI)
1080 		scsi_print_inquiry(&path->device->inq_data);
1081 	else if (path->device->protocol == PROTO_ATA ||
1082 	    path->device->protocol == PROTO_SATAPM)
1083 		ata_print_ident(&path->device->ident_data);
1084 	else if (path->device->protocol == PROTO_SEMB)
1085 		semb_print_ident(
1086 		    (struct sep_identify_data *)&path->device->ident_data);
1087 	else
1088 		printf("Unknown protocol device\n");
1089 	if (bootverbose && path->device->serial_num_len > 0) {
1090 		/* Don't wrap the screen  - print only the first 60 chars */
1091 		printf("%s%d: Serial Number %.60s\n", periph->periph_name,
1092 		       periph->unit_number, path->device->serial_num);
1093 	}
1094 	/* Announce transport details. */
1095 	(*(path->bus->xport->announce))(periph);
1096 	/* Announce command queueing. */
1097 	if (path->device->inq_flags & SID_CmdQue
1098 	 || path->device->flags & CAM_DEV_TAG_AFTER_COUNT) {
1099 		printf("%s%d: Command Queueing enabled\n",
1100 		       periph->periph_name, periph->unit_number);
1101 	}
1102 	/* Announce caller's details if they've passed in. */
1103 	if (announce_string != NULL)
1104 		printf("%s%d: %s\n", periph->periph_name,
1105 		       periph->unit_number, announce_string);
1106 }
1107 
1108 int
1109 xpt_getattr(char *buf, size_t len, const char *attr, struct cam_path *path)
1110 {
1111 	int ret = -1;
1112 	struct ccb_dev_advinfo cdai;
1113 
1114 	mtx_assert(path->bus->sim->mtx, MA_OWNED);
1115 
1116 	memset(&cdai, 0, sizeof(cdai));
1117 	xpt_setup_ccb(&cdai.ccb_h, path, CAM_PRIORITY_NORMAL);
1118 	cdai.ccb_h.func_code = XPT_DEV_ADVINFO;
1119 	cdai.bufsiz = len;
1120 
1121 	if (!strcmp(attr, "GEOM::ident"))
1122 		cdai.buftype = CDAI_TYPE_SERIAL_NUM;
1123 	else if (!strcmp(attr, "GEOM::physpath"))
1124 		cdai.buftype = CDAI_TYPE_PHYS_PATH;
1125 	else
1126 		goto out;
1127 
1128 	cdai.buf = malloc(cdai.bufsiz, M_CAMXPT, M_NOWAIT|M_ZERO);
1129 	if (cdai.buf == NULL) {
1130 		ret = ENOMEM;
1131 		goto out;
1132 	}
1133 	xpt_action((union ccb *)&cdai); /* can only be synchronous */
1134 	if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0)
1135 		cam_release_devq(cdai.ccb_h.path, 0, 0, 0, FALSE);
1136 	if (cdai.provsiz == 0)
1137 		goto out;
1138 	ret = 0;
1139 	if (strlcpy(buf, cdai.buf, len) >= len)
1140 		ret = EFAULT;
1141 
1142 out:
1143 	if (cdai.buf != NULL)
1144 		free(cdai.buf, M_CAMXPT);
1145 	return ret;
1146 }
1147 
1148 static dev_match_ret
1149 xptbusmatch(struct dev_match_pattern *patterns, u_int num_patterns,
1150 	    struct cam_eb *bus)
1151 {
1152 	dev_match_ret retval;
1153 	int i;
1154 
1155 	retval = DM_RET_NONE;
1156 
1157 	/*
1158 	 * If we aren't given something to match against, that's an error.
1159 	 */
1160 	if (bus == NULL)
1161 		return(DM_RET_ERROR);
1162 
1163 	/*
1164 	 * If there are no match entries, then this bus matches no
1165 	 * matter what.
1166 	 */
1167 	if ((patterns == NULL) || (num_patterns == 0))
1168 		return(DM_RET_DESCEND | DM_RET_COPY);
1169 
1170 	for (i = 0; i < num_patterns; i++) {
1171 		struct bus_match_pattern *cur_pattern;
1172 
1173 		/*
1174 		 * If the pattern in question isn't for a bus node, we
1175 		 * aren't interested.  However, we do indicate to the
1176 		 * calling routine that we should continue descending the
1177 		 * tree, since the user wants to match against lower-level
1178 		 * EDT elements.
1179 		 */
1180 		if (patterns[i].type != DEV_MATCH_BUS) {
1181 			if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
1182 				retval |= DM_RET_DESCEND;
1183 			continue;
1184 		}
1185 
1186 		cur_pattern = &patterns[i].pattern.bus_pattern;
1187 
1188 		/*
1189 		 * If they want to match any bus node, we give them any
1190 		 * device node.
1191 		 */
1192 		if (cur_pattern->flags == BUS_MATCH_ANY) {
1193 			/* set the copy flag */
1194 			retval |= DM_RET_COPY;
1195 
1196 			/*
1197 			 * If we've already decided on an action, go ahead
1198 			 * and return.
1199 			 */
1200 			if ((retval & DM_RET_ACTION_MASK) != DM_RET_NONE)
1201 				return(retval);
1202 		}
1203 
1204 		/*
1205 		 * Not sure why someone would do this...
1206 		 */
1207 		if (cur_pattern->flags == BUS_MATCH_NONE)
1208 			continue;
1209 
1210 		if (((cur_pattern->flags & BUS_MATCH_PATH) != 0)
1211 		 && (cur_pattern->path_id != bus->path_id))
1212 			continue;
1213 
1214 		if (((cur_pattern->flags & BUS_MATCH_BUS_ID) != 0)
1215 		 && (cur_pattern->bus_id != bus->sim->bus_id))
1216 			continue;
1217 
1218 		if (((cur_pattern->flags & BUS_MATCH_UNIT) != 0)
1219 		 && (cur_pattern->unit_number != bus->sim->unit_number))
1220 			continue;
1221 
1222 		if (((cur_pattern->flags & BUS_MATCH_NAME) != 0)
1223 		 && (strncmp(cur_pattern->dev_name, bus->sim->sim_name,
1224 			     DEV_IDLEN) != 0))
1225 			continue;
1226 
1227 		/*
1228 		 * If we get to this point, the user definitely wants
1229 		 * information on this bus.  So tell the caller to copy the
1230 		 * data out.
1231 		 */
1232 		retval |= DM_RET_COPY;
1233 
1234 		/*
1235 		 * If the return action has been set to descend, then we
1236 		 * know that we've already seen a non-bus matching
1237 		 * expression, therefore we need to further descend the tree.
1238 		 * This won't change by continuing around the loop, so we
1239 		 * go ahead and return.  If we haven't seen a non-bus
1240 		 * matching expression, we keep going around the loop until
1241 		 * we exhaust the matching expressions.  We'll set the stop
1242 		 * flag once we fall out of the loop.
1243 		 */
1244 		if ((retval & DM_RET_ACTION_MASK) == DM_RET_DESCEND)
1245 			return(retval);
1246 	}
1247 
1248 	/*
1249 	 * If the return action hasn't been set to descend yet, that means
1250 	 * we haven't seen anything other than bus matching patterns.  So
1251 	 * tell the caller to stop descending the tree -- the user doesn't
1252 	 * want to match against lower level tree elements.
1253 	 */
1254 	if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
1255 		retval |= DM_RET_STOP;
1256 
1257 	return(retval);
1258 }
1259 
1260 static dev_match_ret
1261 xptdevicematch(struct dev_match_pattern *patterns, u_int num_patterns,
1262 	       struct cam_ed *device)
1263 {
1264 	dev_match_ret retval;
1265 	int i;
1266 
1267 	retval = DM_RET_NONE;
1268 
1269 	/*
1270 	 * If we aren't given something to match against, that's an error.
1271 	 */
1272 	if (device == NULL)
1273 		return(DM_RET_ERROR);
1274 
1275 	/*
1276 	 * If there are no match entries, then this device matches no
1277 	 * matter what.
1278 	 */
1279 	if ((patterns == NULL) || (num_patterns == 0))
1280 		return(DM_RET_DESCEND | DM_RET_COPY);
1281 
1282 	for (i = 0; i < num_patterns; i++) {
1283 		struct device_match_pattern *cur_pattern;
1284 		struct scsi_vpd_device_id *device_id_page;
1285 
1286 		/*
1287 		 * If the pattern in question isn't for a device node, we
1288 		 * aren't interested.
1289 		 */
1290 		if (patterns[i].type != DEV_MATCH_DEVICE) {
1291 			if ((patterns[i].type == DEV_MATCH_PERIPH)
1292 			 && ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE))
1293 				retval |= DM_RET_DESCEND;
1294 			continue;
1295 		}
1296 
1297 		cur_pattern = &patterns[i].pattern.device_pattern;
1298 
1299 		/* Error out if mutually exclusive options are specified. */
1300 		if ((cur_pattern->flags & (DEV_MATCH_INQUIRY|DEV_MATCH_DEVID))
1301 		 == (DEV_MATCH_INQUIRY|DEV_MATCH_DEVID))
1302 			return(DM_RET_ERROR);
1303 
1304 		/*
1305 		 * If they want to match any device node, we give them any
1306 		 * device node.
1307 		 */
1308 		if (cur_pattern->flags == DEV_MATCH_ANY)
1309 			goto copy_dev_node;
1310 
1311 		/*
1312 		 * Not sure why someone would do this...
1313 		 */
1314 		if (cur_pattern->flags == DEV_MATCH_NONE)
1315 			continue;
1316 
1317 		if (((cur_pattern->flags & DEV_MATCH_PATH) != 0)
1318 		 && (cur_pattern->path_id != device->target->bus->path_id))
1319 			continue;
1320 
1321 		if (((cur_pattern->flags & DEV_MATCH_TARGET) != 0)
1322 		 && (cur_pattern->target_id != device->target->target_id))
1323 			continue;
1324 
1325 		if (((cur_pattern->flags & DEV_MATCH_LUN) != 0)
1326 		 && (cur_pattern->target_lun != device->lun_id))
1327 			continue;
1328 
1329 		if (((cur_pattern->flags & DEV_MATCH_INQUIRY) != 0)
1330 		 && (cam_quirkmatch((caddr_t)&device->inq_data,
1331 				    (caddr_t)&cur_pattern->data.inq_pat,
1332 				    1, sizeof(cur_pattern->data.inq_pat),
1333 				    scsi_static_inquiry_match) == NULL))
1334 			continue;
1335 
1336 		device_id_page = (struct scsi_vpd_device_id *)device->device_id;
1337 		if (((cur_pattern->flags & DEV_MATCH_DEVID) != 0)
1338 		 && (device->device_id_len < SVPD_DEVICE_ID_HDR_LEN
1339 		  || scsi_devid_match((uint8_t *)device_id_page->desc_list,
1340 				      device->device_id_len
1341 				    - SVPD_DEVICE_ID_HDR_LEN,
1342 				      cur_pattern->data.devid_pat.id,
1343 				      cur_pattern->data.devid_pat.id_len) != 0))
1344 			continue;
1345 
1346 copy_dev_node:
1347 		/*
1348 		 * If we get to this point, the user definitely wants
1349 		 * information on this device.  So tell the caller to copy
1350 		 * the data out.
1351 		 */
1352 		retval |= DM_RET_COPY;
1353 
1354 		/*
1355 		 * If the return action has been set to descend, then we
1356 		 * know that we've already seen a peripheral matching
1357 		 * expression, therefore we need to further descend the tree.
1358 		 * This won't change by continuing around the loop, so we
1359 		 * go ahead and return.  If we haven't seen a peripheral
1360 		 * matching expression, we keep going around the loop until
1361 		 * we exhaust the matching expressions.  We'll set the stop
1362 		 * flag once we fall out of the loop.
1363 		 */
1364 		if ((retval & DM_RET_ACTION_MASK) == DM_RET_DESCEND)
1365 			return(retval);
1366 	}
1367 
1368 	/*
1369 	 * If the return action hasn't been set to descend yet, that means
1370 	 * we haven't seen any peripheral matching patterns.  So tell the
1371 	 * caller to stop descending the tree -- the user doesn't want to
1372 	 * match against lower level tree elements.
1373 	 */
1374 	if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
1375 		retval |= DM_RET_STOP;
1376 
1377 	return(retval);
1378 }
1379 
1380 /*
1381  * Match a single peripheral against any number of match patterns.
1382  */
1383 static dev_match_ret
1384 xptperiphmatch(struct dev_match_pattern *patterns, u_int num_patterns,
1385 	       struct cam_periph *periph)
1386 {
1387 	dev_match_ret retval;
1388 	int i;
1389 
1390 	/*
1391 	 * If we aren't given something to match against, that's an error.
1392 	 */
1393 	if (periph == NULL)
1394 		return(DM_RET_ERROR);
1395 
1396 	/*
1397 	 * If there are no match entries, then this peripheral matches no
1398 	 * matter what.
1399 	 */
1400 	if ((patterns == NULL) || (num_patterns == 0))
1401 		return(DM_RET_STOP | DM_RET_COPY);
1402 
1403 	/*
1404 	 * There aren't any nodes below a peripheral node, so there's no
1405 	 * reason to descend the tree any further.
1406 	 */
1407 	retval = DM_RET_STOP;
1408 
1409 	for (i = 0; i < num_patterns; i++) {
1410 		struct periph_match_pattern *cur_pattern;
1411 
1412 		/*
1413 		 * If the pattern in question isn't for a peripheral, we
1414 		 * aren't interested.
1415 		 */
1416 		if (patterns[i].type != DEV_MATCH_PERIPH)
1417 			continue;
1418 
1419 		cur_pattern = &patterns[i].pattern.periph_pattern;
1420 
1421 		/*
1422 		 * If they want to match on anything, then we will do so.
1423 		 */
1424 		if (cur_pattern->flags == PERIPH_MATCH_ANY) {
1425 			/* set the copy flag */
1426 			retval |= DM_RET_COPY;
1427 
1428 			/*
1429 			 * We've already set the return action to stop,
1430 			 * since there are no nodes below peripherals in
1431 			 * the tree.
1432 			 */
1433 			return(retval);
1434 		}
1435 
1436 		/*
1437 		 * Not sure why someone would do this...
1438 		 */
1439 		if (cur_pattern->flags == PERIPH_MATCH_NONE)
1440 			continue;
1441 
1442 		if (((cur_pattern->flags & PERIPH_MATCH_PATH) != 0)
1443 		 && (cur_pattern->path_id != periph->path->bus->path_id))
1444 			continue;
1445 
1446 		/*
1447 		 * For the target and lun id's, we have to make sure the
1448 		 * target and lun pointers aren't NULL.  The xpt peripheral
1449 		 * has a wildcard target and device.
1450 		 */
1451 		if (((cur_pattern->flags & PERIPH_MATCH_TARGET) != 0)
1452 		 && ((periph->path->target == NULL)
1453 		 ||(cur_pattern->target_id != periph->path->target->target_id)))
1454 			continue;
1455 
1456 		if (((cur_pattern->flags & PERIPH_MATCH_LUN) != 0)
1457 		 && ((periph->path->device == NULL)
1458 		 || (cur_pattern->target_lun != periph->path->device->lun_id)))
1459 			continue;
1460 
1461 		if (((cur_pattern->flags & PERIPH_MATCH_UNIT) != 0)
1462 		 && (cur_pattern->unit_number != periph->unit_number))
1463 			continue;
1464 
1465 		if (((cur_pattern->flags & PERIPH_MATCH_NAME) != 0)
1466 		 && (strncmp(cur_pattern->periph_name, periph->periph_name,
1467 			     DEV_IDLEN) != 0))
1468 			continue;
1469 
1470 		/*
1471 		 * If we get to this point, the user definitely wants
1472 		 * information on this peripheral.  So tell the caller to
1473 		 * copy the data out.
1474 		 */
1475 		retval |= DM_RET_COPY;
1476 
1477 		/*
1478 		 * The return action has already been set to stop, since
1479 		 * peripherals don't have any nodes below them in the EDT.
1480 		 */
1481 		return(retval);
1482 	}
1483 
1484 	/*
1485 	 * If we get to this point, the peripheral that was passed in
1486 	 * doesn't match any of the patterns.
1487 	 */
1488 	return(retval);
1489 }
1490 
1491 static int
1492 xptedtbusfunc(struct cam_eb *bus, void *arg)
1493 {
1494 	struct ccb_dev_match *cdm;
1495 	dev_match_ret retval;
1496 
1497 	cdm = (struct ccb_dev_match *)arg;
1498 
1499 	/*
1500 	 * If our position is for something deeper in the tree, that means
1501 	 * that we've already seen this node.  So, we keep going down.
1502 	 */
1503 	if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
1504 	 && (cdm->pos.cookie.bus == bus)
1505 	 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
1506 	 && (cdm->pos.cookie.target != NULL))
1507 		retval = DM_RET_DESCEND;
1508 	else
1509 		retval = xptbusmatch(cdm->patterns, cdm->num_patterns, bus);
1510 
1511 	/*
1512 	 * If we got an error, bail out of the search.
1513 	 */
1514 	if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
1515 		cdm->status = CAM_DEV_MATCH_ERROR;
1516 		return(0);
1517 	}
1518 
1519 	/*
1520 	 * If the copy flag is set, copy this bus out.
1521 	 */
1522 	if (retval & DM_RET_COPY) {
1523 		int spaceleft, j;
1524 
1525 		spaceleft = cdm->match_buf_len - (cdm->num_matches *
1526 			sizeof(struct dev_match_result));
1527 
1528 		/*
1529 		 * If we don't have enough space to put in another
1530 		 * match result, save our position and tell the
1531 		 * user there are more devices to check.
1532 		 */
1533 		if (spaceleft < sizeof(struct dev_match_result)) {
1534 			bzero(&cdm->pos, sizeof(cdm->pos));
1535 			cdm->pos.position_type =
1536 				CAM_DEV_POS_EDT | CAM_DEV_POS_BUS;
1537 
1538 			cdm->pos.cookie.bus = bus;
1539 			cdm->pos.generations[CAM_BUS_GENERATION]=
1540 				xsoftc.bus_generation;
1541 			cdm->status = CAM_DEV_MATCH_MORE;
1542 			return(0);
1543 		}
1544 		j = cdm->num_matches;
1545 		cdm->num_matches++;
1546 		cdm->matches[j].type = DEV_MATCH_BUS;
1547 		cdm->matches[j].result.bus_result.path_id = bus->path_id;
1548 		cdm->matches[j].result.bus_result.bus_id = bus->sim->bus_id;
1549 		cdm->matches[j].result.bus_result.unit_number =
1550 			bus->sim->unit_number;
1551 		strncpy(cdm->matches[j].result.bus_result.dev_name,
1552 			bus->sim->sim_name, DEV_IDLEN);
1553 	}
1554 
1555 	/*
1556 	 * If the user is only interested in busses, there's no
1557 	 * reason to descend to the next level in the tree.
1558 	 */
1559 	if ((retval & DM_RET_ACTION_MASK) == DM_RET_STOP)
1560 		return(1);
1561 
1562 	/*
1563 	 * If there is a target generation recorded, check it to
1564 	 * make sure the target list hasn't changed.
1565 	 */
1566 	if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
1567 	 && (bus == cdm->pos.cookie.bus)
1568 	 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
1569 	 && (cdm->pos.generations[CAM_TARGET_GENERATION] != 0)
1570 	 && (cdm->pos.generations[CAM_TARGET_GENERATION] !=
1571 	     bus->generation)) {
1572 		cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
1573 		return(0);
1574 	}
1575 
1576 	if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
1577 	 && (cdm->pos.cookie.bus == bus)
1578 	 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
1579 	 && (cdm->pos.cookie.target != NULL))
1580 		return(xpttargettraverse(bus,
1581 					(struct cam_et *)cdm->pos.cookie.target,
1582 					 xptedttargetfunc, arg));
1583 	else
1584 		return(xpttargettraverse(bus, NULL, xptedttargetfunc, arg));
1585 }
1586 
1587 static int
1588 xptedttargetfunc(struct cam_et *target, void *arg)
1589 {
1590 	struct ccb_dev_match *cdm;
1591 
1592 	cdm = (struct ccb_dev_match *)arg;
1593 
1594 	/*
1595 	 * If there is a device list generation recorded, check it to
1596 	 * make sure the device list hasn't changed.
1597 	 */
1598 	if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
1599 	 && (cdm->pos.cookie.bus == target->bus)
1600 	 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
1601 	 && (cdm->pos.cookie.target == target)
1602 	 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
1603 	 && (cdm->pos.generations[CAM_DEV_GENERATION] != 0)
1604 	 && (cdm->pos.generations[CAM_DEV_GENERATION] !=
1605 	     target->generation)) {
1606 		cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
1607 		return(0);
1608 	}
1609 
1610 	if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
1611 	 && (cdm->pos.cookie.bus == target->bus)
1612 	 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
1613 	 && (cdm->pos.cookie.target == target)
1614 	 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
1615 	 && (cdm->pos.cookie.device != NULL))
1616 		return(xptdevicetraverse(target,
1617 					(struct cam_ed *)cdm->pos.cookie.device,
1618 					 xptedtdevicefunc, arg));
1619 	else
1620 		return(xptdevicetraverse(target, NULL, xptedtdevicefunc, arg));
1621 }
1622 
1623 static int
1624 xptedtdevicefunc(struct cam_ed *device, void *arg)
1625 {
1626 
1627 	struct ccb_dev_match *cdm;
1628 	dev_match_ret retval;
1629 
1630 	cdm = (struct ccb_dev_match *)arg;
1631 
1632 	/*
1633 	 * If our position is for something deeper in the tree, that means
1634 	 * that we've already seen this node.  So, we keep going down.
1635 	 */
1636 	if ((cdm->pos.position_type & CAM_DEV_POS_DEVICE)
1637 	 && (cdm->pos.cookie.device == device)
1638 	 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
1639 	 && (cdm->pos.cookie.periph != NULL))
1640 		retval = DM_RET_DESCEND;
1641 	else
1642 		retval = xptdevicematch(cdm->patterns, cdm->num_patterns,
1643 					device);
1644 
1645 	if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
1646 		cdm->status = CAM_DEV_MATCH_ERROR;
1647 		return(0);
1648 	}
1649 
1650 	/*
1651 	 * If the copy flag is set, copy this device out.
1652 	 */
1653 	if (retval & DM_RET_COPY) {
1654 		int spaceleft, j;
1655 
1656 		spaceleft = cdm->match_buf_len - (cdm->num_matches *
1657 			sizeof(struct dev_match_result));
1658 
1659 		/*
1660 		 * If we don't have enough space to put in another
1661 		 * match result, save our position and tell the
1662 		 * user there are more devices to check.
1663 		 */
1664 		if (spaceleft < sizeof(struct dev_match_result)) {
1665 			bzero(&cdm->pos, sizeof(cdm->pos));
1666 			cdm->pos.position_type =
1667 				CAM_DEV_POS_EDT | CAM_DEV_POS_BUS |
1668 				CAM_DEV_POS_TARGET | CAM_DEV_POS_DEVICE;
1669 
1670 			cdm->pos.cookie.bus = device->target->bus;
1671 			cdm->pos.generations[CAM_BUS_GENERATION]=
1672 				xsoftc.bus_generation;
1673 			cdm->pos.cookie.target = device->target;
1674 			cdm->pos.generations[CAM_TARGET_GENERATION] =
1675 				device->target->bus->generation;
1676 			cdm->pos.cookie.device = device;
1677 			cdm->pos.generations[CAM_DEV_GENERATION] =
1678 				device->target->generation;
1679 			cdm->status = CAM_DEV_MATCH_MORE;
1680 			return(0);
1681 		}
1682 		j = cdm->num_matches;
1683 		cdm->num_matches++;
1684 		cdm->matches[j].type = DEV_MATCH_DEVICE;
1685 		cdm->matches[j].result.device_result.path_id =
1686 			device->target->bus->path_id;
1687 		cdm->matches[j].result.device_result.target_id =
1688 			device->target->target_id;
1689 		cdm->matches[j].result.device_result.target_lun =
1690 			device->lun_id;
1691 		cdm->matches[j].result.device_result.protocol =
1692 			device->protocol;
1693 		bcopy(&device->inq_data,
1694 		      &cdm->matches[j].result.device_result.inq_data,
1695 		      sizeof(struct scsi_inquiry_data));
1696 		bcopy(&device->ident_data,
1697 		      &cdm->matches[j].result.device_result.ident_data,
1698 		      sizeof(struct ata_params));
1699 
1700 		/* Let the user know whether this device is unconfigured */
1701 		if (device->flags & CAM_DEV_UNCONFIGURED)
1702 			cdm->matches[j].result.device_result.flags =
1703 				DEV_RESULT_UNCONFIGURED;
1704 		else
1705 			cdm->matches[j].result.device_result.flags =
1706 				DEV_RESULT_NOFLAG;
1707 	}
1708 
1709 	/*
1710 	 * If the user isn't interested in peripherals, don't descend
1711 	 * the tree any further.
1712 	 */
1713 	if ((retval & DM_RET_ACTION_MASK) == DM_RET_STOP)
1714 		return(1);
1715 
1716 	/*
1717 	 * If there is a peripheral list generation recorded, make sure
1718 	 * it hasn't changed.
1719 	 */
1720 	if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
1721 	 && (device->target->bus == cdm->pos.cookie.bus)
1722 	 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
1723 	 && (device->target == cdm->pos.cookie.target)
1724 	 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
1725 	 && (device == cdm->pos.cookie.device)
1726 	 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
1727 	 && (cdm->pos.generations[CAM_PERIPH_GENERATION] != 0)
1728 	 && (cdm->pos.generations[CAM_PERIPH_GENERATION] !=
1729 	     device->generation)){
1730 		cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
1731 		return(0);
1732 	}
1733 
1734 	if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
1735 	 && (cdm->pos.cookie.bus == device->target->bus)
1736 	 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
1737 	 && (cdm->pos.cookie.target == device->target)
1738 	 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
1739 	 && (cdm->pos.cookie.device == device)
1740 	 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
1741 	 && (cdm->pos.cookie.periph != NULL))
1742 		return(xptperiphtraverse(device,
1743 				(struct cam_periph *)cdm->pos.cookie.periph,
1744 				xptedtperiphfunc, arg));
1745 	else
1746 		return(xptperiphtraverse(device, NULL, xptedtperiphfunc, arg));
1747 }
1748 
1749 static int
1750 xptedtperiphfunc(struct cam_periph *periph, void *arg)
1751 {
1752 	struct ccb_dev_match *cdm;
1753 	dev_match_ret retval;
1754 
1755 	cdm = (struct ccb_dev_match *)arg;
1756 
1757 	retval = xptperiphmatch(cdm->patterns, cdm->num_patterns, periph);
1758 
1759 	if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
1760 		cdm->status = CAM_DEV_MATCH_ERROR;
1761 		return(0);
1762 	}
1763 
1764 	/*
1765 	 * If the copy flag is set, copy this peripheral out.
1766 	 */
1767 	if (retval & DM_RET_COPY) {
1768 		int spaceleft, j;
1769 
1770 		spaceleft = cdm->match_buf_len - (cdm->num_matches *
1771 			sizeof(struct dev_match_result));
1772 
1773 		/*
1774 		 * If we don't have enough space to put in another
1775 		 * match result, save our position and tell the
1776 		 * user there are more devices to check.
1777 		 */
1778 		if (spaceleft < sizeof(struct dev_match_result)) {
1779 			bzero(&cdm->pos, sizeof(cdm->pos));
1780 			cdm->pos.position_type =
1781 				CAM_DEV_POS_EDT | CAM_DEV_POS_BUS |
1782 				CAM_DEV_POS_TARGET | CAM_DEV_POS_DEVICE |
1783 				CAM_DEV_POS_PERIPH;
1784 
1785 			cdm->pos.cookie.bus = periph->path->bus;
1786 			cdm->pos.generations[CAM_BUS_GENERATION]=
1787 				xsoftc.bus_generation;
1788 			cdm->pos.cookie.target = periph->path->target;
1789 			cdm->pos.generations[CAM_TARGET_GENERATION] =
1790 				periph->path->bus->generation;
1791 			cdm->pos.cookie.device = periph->path->device;
1792 			cdm->pos.generations[CAM_DEV_GENERATION] =
1793 				periph->path->target->generation;
1794 			cdm->pos.cookie.periph = periph;
1795 			cdm->pos.generations[CAM_PERIPH_GENERATION] =
1796 				periph->path->device->generation;
1797 			cdm->status = CAM_DEV_MATCH_MORE;
1798 			return(0);
1799 		}
1800 
1801 		j = cdm->num_matches;
1802 		cdm->num_matches++;
1803 		cdm->matches[j].type = DEV_MATCH_PERIPH;
1804 		cdm->matches[j].result.periph_result.path_id =
1805 			periph->path->bus->path_id;
1806 		cdm->matches[j].result.periph_result.target_id =
1807 			periph->path->target->target_id;
1808 		cdm->matches[j].result.periph_result.target_lun =
1809 			periph->path->device->lun_id;
1810 		cdm->matches[j].result.periph_result.unit_number =
1811 			periph->unit_number;
1812 		strncpy(cdm->matches[j].result.periph_result.periph_name,
1813 			periph->periph_name, DEV_IDLEN);
1814 	}
1815 
1816 	return(1);
1817 }
1818 
1819 static int
1820 xptedtmatch(struct ccb_dev_match *cdm)
1821 {
1822 	int ret;
1823 
1824 	cdm->num_matches = 0;
1825 
1826 	/*
1827 	 * Check the bus list generation.  If it has changed, the user
1828 	 * needs to reset everything and start over.
1829 	 */
1830 	if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
1831 	 && (cdm->pos.generations[CAM_BUS_GENERATION] != 0)
1832 	 && (cdm->pos.generations[CAM_BUS_GENERATION] != xsoftc.bus_generation)) {
1833 		cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
1834 		return(0);
1835 	}
1836 
1837 	if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
1838 	 && (cdm->pos.cookie.bus != NULL))
1839 		ret = xptbustraverse((struct cam_eb *)cdm->pos.cookie.bus,
1840 				     xptedtbusfunc, cdm);
1841 	else
1842 		ret = xptbustraverse(NULL, xptedtbusfunc, cdm);
1843 
1844 	/*
1845 	 * If we get back 0, that means that we had to stop before fully
1846 	 * traversing the EDT.  It also means that one of the subroutines
1847 	 * has set the status field to the proper value.  If we get back 1,
1848 	 * we've fully traversed the EDT and copied out any matching entries.
1849 	 */
1850 	if (ret == 1)
1851 		cdm->status = CAM_DEV_MATCH_LAST;
1852 
1853 	return(ret);
1854 }
1855 
1856 static int
1857 xptplistpdrvfunc(struct periph_driver **pdrv, void *arg)
1858 {
1859 	struct ccb_dev_match *cdm;
1860 
1861 	cdm = (struct ccb_dev_match *)arg;
1862 
1863 	if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
1864 	 && (cdm->pos.cookie.pdrv == pdrv)
1865 	 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
1866 	 && (cdm->pos.generations[CAM_PERIPH_GENERATION] != 0)
1867 	 && (cdm->pos.generations[CAM_PERIPH_GENERATION] !=
1868 	     (*pdrv)->generation)) {
1869 		cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
1870 		return(0);
1871 	}
1872 
1873 	if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
1874 	 && (cdm->pos.cookie.pdrv == pdrv)
1875 	 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
1876 	 && (cdm->pos.cookie.periph != NULL))
1877 		return(xptpdperiphtraverse(pdrv,
1878 				(struct cam_periph *)cdm->pos.cookie.periph,
1879 				xptplistperiphfunc, arg));
1880 	else
1881 		return(xptpdperiphtraverse(pdrv, NULL,xptplistperiphfunc, arg));
1882 }
1883 
1884 static int
1885 xptplistperiphfunc(struct cam_periph *periph, void *arg)
1886 {
1887 	struct ccb_dev_match *cdm;
1888 	dev_match_ret retval;
1889 
1890 	cdm = (struct ccb_dev_match *)arg;
1891 
1892 	retval = xptperiphmatch(cdm->patterns, cdm->num_patterns, periph);
1893 
1894 	if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
1895 		cdm->status = CAM_DEV_MATCH_ERROR;
1896 		return(0);
1897 	}
1898 
1899 	/*
1900 	 * If the copy flag is set, copy this peripheral out.
1901 	 */
1902 	if (retval & DM_RET_COPY) {
1903 		int spaceleft, j;
1904 
1905 		spaceleft = cdm->match_buf_len - (cdm->num_matches *
1906 			sizeof(struct dev_match_result));
1907 
1908 		/*
1909 		 * If we don't have enough space to put in another
1910 		 * match result, save our position and tell the
1911 		 * user there are more devices to check.
1912 		 */
1913 		if (spaceleft < sizeof(struct dev_match_result)) {
1914 			struct periph_driver **pdrv;
1915 
1916 			pdrv = NULL;
1917 			bzero(&cdm->pos, sizeof(cdm->pos));
1918 			cdm->pos.position_type =
1919 				CAM_DEV_POS_PDRV | CAM_DEV_POS_PDPTR |
1920 				CAM_DEV_POS_PERIPH;
1921 
1922 			/*
1923 			 * This may look a bit non-sensical, but it is
1924 			 * actually quite logical.  There are very few
1925 			 * peripheral drivers, and bloating every peripheral
1926 			 * structure with a pointer back to its parent
1927 			 * peripheral driver linker set entry would cost
1928 			 * more in the long run than doing this quick lookup.
1929 			 */
1930 			for (pdrv = periph_drivers; *pdrv != NULL; pdrv++) {
1931 				if (strcmp((*pdrv)->driver_name,
1932 				    periph->periph_name) == 0)
1933 					break;
1934 			}
1935 
1936 			if (*pdrv == NULL) {
1937 				cdm->status = CAM_DEV_MATCH_ERROR;
1938 				return(0);
1939 			}
1940 
1941 			cdm->pos.cookie.pdrv = pdrv;
1942 			/*
1943 			 * The periph generation slot does double duty, as
1944 			 * does the periph pointer slot.  They are used for
1945 			 * both edt and pdrv lookups and positioning.
1946 			 */
1947 			cdm->pos.cookie.periph = periph;
1948 			cdm->pos.generations[CAM_PERIPH_GENERATION] =
1949 				(*pdrv)->generation;
1950 			cdm->status = CAM_DEV_MATCH_MORE;
1951 			return(0);
1952 		}
1953 
1954 		j = cdm->num_matches;
1955 		cdm->num_matches++;
1956 		cdm->matches[j].type = DEV_MATCH_PERIPH;
1957 		cdm->matches[j].result.periph_result.path_id =
1958 			periph->path->bus->path_id;
1959 
1960 		/*
1961 		 * The transport layer peripheral doesn't have a target or
1962 		 * lun.
1963 		 */
1964 		if (periph->path->target)
1965 			cdm->matches[j].result.periph_result.target_id =
1966 				periph->path->target->target_id;
1967 		else
1968 			cdm->matches[j].result.periph_result.target_id = -1;
1969 
1970 		if (periph->path->device)
1971 			cdm->matches[j].result.periph_result.target_lun =
1972 				periph->path->device->lun_id;
1973 		else
1974 			cdm->matches[j].result.periph_result.target_lun = -1;
1975 
1976 		cdm->matches[j].result.periph_result.unit_number =
1977 			periph->unit_number;
1978 		strncpy(cdm->matches[j].result.periph_result.periph_name,
1979 			periph->periph_name, DEV_IDLEN);
1980 	}
1981 
1982 	return(1);
1983 }
1984 
1985 static int
1986 xptperiphlistmatch(struct ccb_dev_match *cdm)
1987 {
1988 	int ret;
1989 
1990 	cdm->num_matches = 0;
1991 
1992 	/*
1993 	 * At this point in the edt traversal function, we check the bus
1994 	 * list generation to make sure that no busses have been added or
1995 	 * removed since the user last sent a XPT_DEV_MATCH ccb through.
1996 	 * For the peripheral driver list traversal function, however, we
1997 	 * don't have to worry about new peripheral driver types coming or
1998 	 * going; they're in a linker set, and therefore can't change
1999 	 * without a recompile.
2000 	 */
2001 
2002 	if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
2003 	 && (cdm->pos.cookie.pdrv != NULL))
2004 		ret = xptpdrvtraverse(
2005 				(struct periph_driver **)cdm->pos.cookie.pdrv,
2006 				xptplistpdrvfunc, cdm);
2007 	else
2008 		ret = xptpdrvtraverse(NULL, xptplistpdrvfunc, cdm);
2009 
2010 	/*
2011 	 * If we get back 0, that means that we had to stop before fully
2012 	 * traversing the peripheral driver tree.  It also means that one of
2013 	 * the subroutines has set the status field to the proper value.  If
2014 	 * we get back 1, we've fully traversed the EDT and copied out any
2015 	 * matching entries.
2016 	 */
2017 	if (ret == 1)
2018 		cdm->status = CAM_DEV_MATCH_LAST;
2019 
2020 	return(ret);
2021 }
2022 
2023 static int
2024 xptbustraverse(struct cam_eb *start_bus, xpt_busfunc_t *tr_func, void *arg)
2025 {
2026 	struct cam_eb *bus, *next_bus;
2027 	int retval;
2028 
2029 	retval = 1;
2030 
2031 	mtx_lock(&xsoftc.xpt_topo_lock);
2032 	for (bus = (start_bus ? start_bus : TAILQ_FIRST(&xsoftc.xpt_busses));
2033 	     bus != NULL;
2034 	     bus = next_bus) {
2035 
2036 		bus->refcount++;
2037 
2038 		/*
2039 		 * XXX The locking here is obviously very complex.  We
2040 		 * should work to simplify it.
2041 		 */
2042 		mtx_unlock(&xsoftc.xpt_topo_lock);
2043 		CAM_SIM_LOCK(bus->sim);
2044 		retval = tr_func(bus, arg);
2045 		CAM_SIM_UNLOCK(bus->sim);
2046 
2047 		mtx_lock(&xsoftc.xpt_topo_lock);
2048 		next_bus = TAILQ_NEXT(bus, links);
2049 		mtx_unlock(&xsoftc.xpt_topo_lock);
2050 
2051 		xpt_release_bus(bus);
2052 
2053 		if (retval == 0)
2054 			return(retval);
2055 		mtx_lock(&xsoftc.xpt_topo_lock);
2056 	}
2057 	mtx_unlock(&xsoftc.xpt_topo_lock);
2058 
2059 	return(retval);
2060 }
2061 
2062 int
2063 xpt_sim_opened(struct cam_sim *sim)
2064 {
2065 	struct cam_eb *bus;
2066 	struct cam_et *target;
2067 	struct cam_ed *device;
2068 	struct cam_periph *periph;
2069 
2070 	KASSERT(sim->refcount >= 1, ("sim->refcount >= 1"));
2071 	mtx_assert(sim->mtx, MA_OWNED);
2072 
2073 	mtx_lock(&xsoftc.xpt_topo_lock);
2074 	TAILQ_FOREACH(bus, &xsoftc.xpt_busses, links) {
2075 		if (bus->sim != sim)
2076 			continue;
2077 
2078 		TAILQ_FOREACH(target, &bus->et_entries, links) {
2079 			TAILQ_FOREACH(device, &target->ed_entries, links) {
2080 				SLIST_FOREACH(periph, &device->periphs,
2081 				    periph_links) {
2082 					if (periph->refcount > 0) {
2083 						mtx_unlock(&xsoftc.xpt_topo_lock);
2084 						return (1);
2085 					}
2086 				}
2087 			}
2088 		}
2089 	}
2090 
2091 	mtx_unlock(&xsoftc.xpt_topo_lock);
2092 	return (0);
2093 }
2094 
2095 static int
2096 xpttargettraverse(struct cam_eb *bus, struct cam_et *start_target,
2097 		  xpt_targetfunc_t *tr_func, void *arg)
2098 {
2099 	struct cam_et *target, *next_target;
2100 	int retval;
2101 
2102 	retval = 1;
2103 	for (target = (start_target ? start_target :
2104 		       TAILQ_FIRST(&bus->et_entries));
2105 	     target != NULL; target = next_target) {
2106 
2107 		target->refcount++;
2108 
2109 		retval = tr_func(target, arg);
2110 
2111 		next_target = TAILQ_NEXT(target, links);
2112 
2113 		xpt_release_target(target);
2114 
2115 		if (retval == 0)
2116 			return(retval);
2117 	}
2118 
2119 	return(retval);
2120 }
2121 
2122 static int
2123 xptdevicetraverse(struct cam_et *target, struct cam_ed *start_device,
2124 		  xpt_devicefunc_t *tr_func, void *arg)
2125 {
2126 	struct cam_ed *device, *next_device;
2127 	int retval;
2128 
2129 	retval = 1;
2130 	for (device = (start_device ? start_device :
2131 		       TAILQ_FIRST(&target->ed_entries));
2132 	     device != NULL;
2133 	     device = next_device) {
2134 
2135 		/*
2136 		 * Hold a reference so the current device does not go away
2137 		 * on us.
2138 		 */
2139 		device->refcount++;
2140 
2141 		retval = tr_func(device, arg);
2142 
2143 		/*
2144 		 * Grab our next pointer before we release the current
2145 		 * device.
2146 		 */
2147 		next_device = TAILQ_NEXT(device, links);
2148 
2149 		xpt_release_device(device);
2150 
2151 		if (retval == 0)
2152 			return(retval);
2153 	}
2154 
2155 	return(retval);
2156 }
2157 
2158 static int
2159 xptperiphtraverse(struct cam_ed *device, struct cam_periph *start_periph,
2160 		  xpt_periphfunc_t *tr_func, void *arg)
2161 {
2162 	struct cam_periph *periph, *next_periph;
2163 	int retval;
2164 
2165 	retval = 1;
2166 
2167 	xpt_lock_buses();
2168 	for (periph = (start_periph ? start_periph :
2169 		       SLIST_FIRST(&device->periphs));
2170 	     periph != NULL;
2171 	     periph = next_periph) {
2172 
2173 
2174 		/*
2175 		 * In this case, we want to show peripherals that have been
2176 		 * invalidated, but not peripherals that are scheduled to
2177 		 * be freed.  So instead of calling cam_periph_acquire(),
2178 		 * which will fail if the periph has been invalidated, we
2179 		 * just check for the free flag here.  If it is free, we
2180 		 * skip to the next periph.
2181 		 */
2182 		if (periph->flags & CAM_PERIPH_FREE) {
2183 			next_periph = SLIST_NEXT(periph, periph_links);
2184 			continue;
2185 		}
2186 
2187 		/*
2188 		 * Acquire a reference to this periph while we call the
2189 		 * traversal function, so it can't go away.
2190 		 */
2191 		periph->refcount++;
2192 
2193 		xpt_unlock_buses();
2194 
2195 		retval = tr_func(periph, arg);
2196 
2197 		/*
2198 		 * We need the lock for list traversal.
2199 		 */
2200 		xpt_lock_buses();
2201 
2202 		/*
2203 		 * Grab the next peripheral before we release this one, so
2204 		 * our next pointer is still valid.
2205 		 */
2206 		next_periph = SLIST_NEXT(periph, periph_links);
2207 
2208 		cam_periph_release_locked_buses(periph);
2209 
2210 		if (retval == 0)
2211 			goto bailout_done;
2212 	}
2213 
2214 bailout_done:
2215 
2216 	xpt_unlock_buses();
2217 
2218 	return(retval);
2219 }
2220 
2221 static int
2222 xptpdrvtraverse(struct periph_driver **start_pdrv,
2223 		xpt_pdrvfunc_t *tr_func, void *arg)
2224 {
2225 	struct periph_driver **pdrv;
2226 	int retval;
2227 
2228 	retval = 1;
2229 
2230 	/*
2231 	 * We don't traverse the peripheral driver list like we do the
2232 	 * other lists, because it is a linker set, and therefore cannot be
2233 	 * changed during runtime.  If the peripheral driver list is ever
2234 	 * re-done to be something other than a linker set (i.e. it can
2235 	 * change while the system is running), the list traversal should
2236 	 * be modified to work like the other traversal functions.
2237 	 */
2238 	for (pdrv = (start_pdrv ? start_pdrv : periph_drivers);
2239 	     *pdrv != NULL; pdrv++) {
2240 		retval = tr_func(pdrv, arg);
2241 
2242 		if (retval == 0)
2243 			return(retval);
2244 	}
2245 
2246 	return(retval);
2247 }
2248 
2249 static int
2250 xptpdperiphtraverse(struct periph_driver **pdrv,
2251 		    struct cam_periph *start_periph,
2252 		    xpt_periphfunc_t *tr_func, void *arg)
2253 {
2254 	struct cam_periph *periph, *next_periph;
2255 	int retval;
2256 
2257 	retval = 1;
2258 
2259 	xpt_lock_buses();
2260 	for (periph = (start_periph ? start_periph :
2261 	     TAILQ_FIRST(&(*pdrv)->units)); periph != NULL;
2262 	     periph = next_periph) {
2263 
2264 
2265 		/*
2266 		 * In this case, we want to show peripherals that have been
2267 		 * invalidated, but not peripherals that are scheduled to
2268 		 * be freed.  So instead of calling cam_periph_acquire(),
2269 		 * which will fail if the periph has been invalidated, we
2270 		 * just check for the free flag here.  If it is free, we
2271 		 * skip to the next periph.
2272 		 */
2273 		if (periph->flags & CAM_PERIPH_FREE) {
2274 			next_periph = TAILQ_NEXT(periph, unit_links);
2275 			continue;
2276 		}
2277 
2278 		/*
2279 		 * Acquire a reference to this periph while we call the
2280 		 * traversal function, so it can't go away.
2281 		 */
2282 		periph->refcount++;
2283 
2284 		/*
2285 		 * XXX KDM we have the toplogy lock here, but in
2286 		 * xptperiphtraverse(), we drop it before calling the
2287 		 * traversal function.  Which is correct?
2288 		 */
2289 		retval = tr_func(periph, arg);
2290 
2291 		/*
2292 		 * Grab the next peripheral before we release this one, so
2293 		 * our next pointer is still valid.
2294 		 */
2295 		next_periph = TAILQ_NEXT(periph, unit_links);
2296 
2297 		cam_periph_release_locked_buses(periph);
2298 
2299 		if (retval == 0)
2300 			goto bailout_done;
2301 	}
2302 bailout_done:
2303 
2304 	xpt_unlock_buses();
2305 
2306 	return(retval);
2307 }
2308 
2309 static int
2310 xptdefbusfunc(struct cam_eb *bus, void *arg)
2311 {
2312 	struct xpt_traverse_config *tr_config;
2313 
2314 	tr_config = (struct xpt_traverse_config *)arg;
2315 
2316 	if (tr_config->depth == XPT_DEPTH_BUS) {
2317 		xpt_busfunc_t *tr_func;
2318 
2319 		tr_func = (xpt_busfunc_t *)tr_config->tr_func;
2320 
2321 		return(tr_func(bus, tr_config->tr_arg));
2322 	} else
2323 		return(xpttargettraverse(bus, NULL, xptdeftargetfunc, arg));
2324 }
2325 
2326 static int
2327 xptdeftargetfunc(struct cam_et *target, void *arg)
2328 {
2329 	struct xpt_traverse_config *tr_config;
2330 
2331 	tr_config = (struct xpt_traverse_config *)arg;
2332 
2333 	if (tr_config->depth == XPT_DEPTH_TARGET) {
2334 		xpt_targetfunc_t *tr_func;
2335 
2336 		tr_func = (xpt_targetfunc_t *)tr_config->tr_func;
2337 
2338 		return(tr_func(target, tr_config->tr_arg));
2339 	} else
2340 		return(xptdevicetraverse(target, NULL, xptdefdevicefunc, arg));
2341 }
2342 
2343 static int
2344 xptdefdevicefunc(struct cam_ed *device, void *arg)
2345 {
2346 	struct xpt_traverse_config *tr_config;
2347 
2348 	tr_config = (struct xpt_traverse_config *)arg;
2349 
2350 	if (tr_config->depth == XPT_DEPTH_DEVICE) {
2351 		xpt_devicefunc_t *tr_func;
2352 
2353 		tr_func = (xpt_devicefunc_t *)tr_config->tr_func;
2354 
2355 		return(tr_func(device, tr_config->tr_arg));
2356 	} else
2357 		return(xptperiphtraverse(device, NULL, xptdefperiphfunc, arg));
2358 }
2359 
2360 static int
2361 xptdefperiphfunc(struct cam_periph *periph, void *arg)
2362 {
2363 	struct xpt_traverse_config *tr_config;
2364 	xpt_periphfunc_t *tr_func;
2365 
2366 	tr_config = (struct xpt_traverse_config *)arg;
2367 
2368 	tr_func = (xpt_periphfunc_t *)tr_config->tr_func;
2369 
2370 	/*
2371 	 * Unlike the other default functions, we don't check for depth
2372 	 * here.  The peripheral driver level is the last level in the EDT,
2373 	 * so if we're here, we should execute the function in question.
2374 	 */
2375 	return(tr_func(periph, tr_config->tr_arg));
2376 }
2377 
2378 /*
2379  * Execute the given function for every bus in the EDT.
2380  */
2381 static int
2382 xpt_for_all_busses(xpt_busfunc_t *tr_func, void *arg)
2383 {
2384 	struct xpt_traverse_config tr_config;
2385 
2386 	tr_config.depth = XPT_DEPTH_BUS;
2387 	tr_config.tr_func = tr_func;
2388 	tr_config.tr_arg = arg;
2389 
2390 	return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
2391 }
2392 
2393 /*
2394  * Execute the given function for every device in the EDT.
2395  */
2396 static int
2397 xpt_for_all_devices(xpt_devicefunc_t *tr_func, void *arg)
2398 {
2399 	struct xpt_traverse_config tr_config;
2400 
2401 	tr_config.depth = XPT_DEPTH_DEVICE;
2402 	tr_config.tr_func = tr_func;
2403 	tr_config.tr_arg = arg;
2404 
2405 	return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
2406 }
2407 
2408 static int
2409 xptsetasyncfunc(struct cam_ed *device, void *arg)
2410 {
2411 	struct cam_path path;
2412 	struct ccb_getdev cgd;
2413 	struct ccb_setasync *csa = (struct ccb_setasync *)arg;
2414 
2415 	/*
2416 	 * Don't report unconfigured devices (Wildcard devs,
2417 	 * devices only for target mode, device instances
2418 	 * that have been invalidated but are waiting for
2419 	 * their last reference count to be released).
2420 	 */
2421 	if ((device->flags & CAM_DEV_UNCONFIGURED) != 0)
2422 		return (1);
2423 
2424 	xpt_compile_path(&path,
2425 			 NULL,
2426 			 device->target->bus->path_id,
2427 			 device->target->target_id,
2428 			 device->lun_id);
2429 	xpt_setup_ccb(&cgd.ccb_h, &path, CAM_PRIORITY_NORMAL);
2430 	cgd.ccb_h.func_code = XPT_GDEV_TYPE;
2431 	xpt_action((union ccb *)&cgd);
2432 	csa->callback(csa->callback_arg,
2433 			    AC_FOUND_DEVICE,
2434 			    &path, &cgd);
2435 	xpt_release_path(&path);
2436 
2437 	return(1);
2438 }
2439 
2440 static int
2441 xptsetasyncbusfunc(struct cam_eb *bus, void *arg)
2442 {
2443 	struct cam_path path;
2444 	struct ccb_pathinq cpi;
2445 	struct ccb_setasync *csa = (struct ccb_setasync *)arg;
2446 
2447 	xpt_compile_path(&path, /*periph*/NULL,
2448 			 bus->sim->path_id,
2449 			 CAM_TARGET_WILDCARD,
2450 			 CAM_LUN_WILDCARD);
2451 	xpt_setup_ccb(&cpi.ccb_h, &path, CAM_PRIORITY_NORMAL);
2452 	cpi.ccb_h.func_code = XPT_PATH_INQ;
2453 	xpt_action((union ccb *)&cpi);
2454 	csa->callback(csa->callback_arg,
2455 			    AC_PATH_REGISTERED,
2456 			    &path, &cpi);
2457 	xpt_release_path(&path);
2458 
2459 	return(1);
2460 }
2461 
2462 void
2463 xpt_action(union ccb *start_ccb)
2464 {
2465 
2466 	CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xpt_action\n"));
2467 
2468 	start_ccb->ccb_h.status = CAM_REQ_INPROG;
2469 	/* Compatibility for RL-unaware code. */
2470 	if (CAM_PRIORITY_TO_RL(start_ccb->ccb_h.pinfo.priority) == 0)
2471 	    start_ccb->ccb_h.pinfo.priority += CAM_PRIORITY_NORMAL - 1;
2472 	(*(start_ccb->ccb_h.path->bus->xport->action))(start_ccb);
2473 }
2474 
2475 void
2476 xpt_action_default(union ccb *start_ccb)
2477 {
2478 	char cdb_str[(SCSI_MAX_CDBLEN * 3) + 1];
2479 	struct cam_path *path;
2480 
2481 	path = start_ccb->ccb_h.path;
2482 	CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_action_default\n"));
2483 
2484 	switch (start_ccb->ccb_h.func_code) {
2485 	case XPT_SCSI_IO:
2486 	{
2487 		struct cam_ed *device;
2488 
2489 		/*
2490 		 * For the sake of compatibility with SCSI-1
2491 		 * devices that may not understand the identify
2492 		 * message, we include lun information in the
2493 		 * second byte of all commands.  SCSI-1 specifies
2494 		 * that luns are a 3 bit value and reserves only 3
2495 		 * bits for lun information in the CDB.  Later
2496 		 * revisions of the SCSI spec allow for more than 8
2497 		 * luns, but have deprecated lun information in the
2498 		 * CDB.  So, if the lun won't fit, we must omit.
2499 		 *
2500 		 * Also be aware that during initial probing for devices,
2501 		 * the inquiry information is unknown but initialized to 0.
2502 		 * This means that this code will be exercised while probing
2503 		 * devices with an ANSI revision greater than 2.
2504 		 */
2505 		device = path->device;
2506 		if (device->protocol_version <= SCSI_REV_2
2507 		 && start_ccb->ccb_h.target_lun < 8
2508 		 && (start_ccb->ccb_h.flags & CAM_CDB_POINTER) == 0) {
2509 
2510 			start_ccb->csio.cdb_io.cdb_bytes[1] |=
2511 			    start_ccb->ccb_h.target_lun << 5;
2512 		}
2513 		start_ccb->csio.scsi_status = SCSI_STATUS_OK;
2514 		CAM_DEBUG(path, CAM_DEBUG_CDB,("%s. CDB: %s\n",
2515 			  scsi_op_desc(start_ccb->csio.cdb_io.cdb_bytes[0],
2516 			  	       &path->device->inq_data),
2517 			  scsi_cdb_string(start_ccb->csio.cdb_io.cdb_bytes,
2518 					  cdb_str, sizeof(cdb_str))));
2519 	}
2520 	/* FALLTHROUGH */
2521 	case XPT_TARGET_IO:
2522 	case XPT_CONT_TARGET_IO:
2523 		start_ccb->csio.sense_resid = 0;
2524 		start_ccb->csio.resid = 0;
2525 		/* FALLTHROUGH */
2526 	case XPT_ATA_IO:
2527 		if (start_ccb->ccb_h.func_code == XPT_ATA_IO) {
2528 			start_ccb->ataio.resid = 0;
2529 			CAM_DEBUG(path, CAM_DEBUG_CDB,("%s. ACB: %s\n",
2530 			    ata_op_string(&start_ccb->ataio.cmd),
2531 			    ata_cmd_string(&start_ccb->ataio.cmd,
2532 					  cdb_str, sizeof(cdb_str))));
2533 		}
2534 		/* FALLTHROUGH */
2535 	case XPT_RESET_DEV:
2536 	case XPT_ENG_EXEC:
2537 	case XPT_SMP_IO:
2538 	{
2539 		int frozen;
2540 
2541 		frozen = cam_ccbq_insert_ccb(&path->device->ccbq, start_ccb);
2542 		path->device->sim->devq->alloc_openings += frozen;
2543 		if (frozen > 0)
2544 			xpt_run_dev_allocq(path->bus);
2545 		if (xpt_schedule_dev_sendq(path->bus, path->device))
2546 			xpt_run_dev_sendq(path->bus);
2547 		break;
2548 	}
2549 	case XPT_CALC_GEOMETRY:
2550 	{
2551 		struct cam_sim *sim;
2552 
2553 		/* Filter out garbage */
2554 		if (start_ccb->ccg.block_size == 0
2555 		 || start_ccb->ccg.volume_size == 0) {
2556 			start_ccb->ccg.cylinders = 0;
2557 			start_ccb->ccg.heads = 0;
2558 			start_ccb->ccg.secs_per_track = 0;
2559 			start_ccb->ccb_h.status = CAM_REQ_CMP;
2560 			break;
2561 		}
2562 #if defined(PC98) || defined(__sparc64__)
2563 		/*
2564 		 * In a PC-98 system, geometry translation depens on
2565 		 * the "real" device geometry obtained from mode page 4.
2566 		 * SCSI geometry translation is performed in the
2567 		 * initialization routine of the SCSI BIOS and the result
2568 		 * stored in host memory.  If the translation is available
2569 		 * in host memory, use it.  If not, rely on the default
2570 		 * translation the device driver performs.
2571 		 * For sparc64, we may need adjust the geometry of large
2572 		 * disks in order to fit the limitations of the 16-bit
2573 		 * fields of the VTOC8 disk label.
2574 		 */
2575 		if (scsi_da_bios_params(&start_ccb->ccg) != 0) {
2576 			start_ccb->ccb_h.status = CAM_REQ_CMP;
2577 			break;
2578 		}
2579 #endif
2580 		sim = path->bus->sim;
2581 		(*(sim->sim_action))(sim, start_ccb);
2582 		break;
2583 	}
2584 	case XPT_ABORT:
2585 	{
2586 		union ccb* abort_ccb;
2587 
2588 		abort_ccb = start_ccb->cab.abort_ccb;
2589 		if (XPT_FC_IS_DEV_QUEUED(abort_ccb)) {
2590 
2591 			if (abort_ccb->ccb_h.pinfo.index >= 0) {
2592 				struct cam_ccbq *ccbq;
2593 				struct cam_ed *device;
2594 
2595 				device = abort_ccb->ccb_h.path->device;
2596 				ccbq = &device->ccbq;
2597 				device->sim->devq->alloc_openings -=
2598 				    cam_ccbq_remove_ccb(ccbq, abort_ccb);
2599 				abort_ccb->ccb_h.status =
2600 				    CAM_REQ_ABORTED|CAM_DEV_QFRZN;
2601 				xpt_freeze_devq(abort_ccb->ccb_h.path, 1);
2602 				xpt_done(abort_ccb);
2603 				start_ccb->ccb_h.status = CAM_REQ_CMP;
2604 				break;
2605 			}
2606 			if (abort_ccb->ccb_h.pinfo.index == CAM_UNQUEUED_INDEX
2607 			 && (abort_ccb->ccb_h.status & CAM_SIM_QUEUED) == 0) {
2608 				/*
2609 				 * We've caught this ccb en route to
2610 				 * the SIM.  Flag it for abort and the
2611 				 * SIM will do so just before starting
2612 				 * real work on the CCB.
2613 				 */
2614 				abort_ccb->ccb_h.status =
2615 				    CAM_REQ_ABORTED|CAM_DEV_QFRZN;
2616 				xpt_freeze_devq(abort_ccb->ccb_h.path, 1);
2617 				start_ccb->ccb_h.status = CAM_REQ_CMP;
2618 				break;
2619 			}
2620 		}
2621 		if (XPT_FC_IS_QUEUED(abort_ccb)
2622 		 && (abort_ccb->ccb_h.pinfo.index == CAM_DONEQ_INDEX)) {
2623 			/*
2624 			 * It's already completed but waiting
2625 			 * for our SWI to get to it.
2626 			 */
2627 			start_ccb->ccb_h.status = CAM_UA_ABORT;
2628 			break;
2629 		}
2630 		/*
2631 		 * If we weren't able to take care of the abort request
2632 		 * in the XPT, pass the request down to the SIM for processing.
2633 		 */
2634 	}
2635 	/* FALLTHROUGH */
2636 	case XPT_ACCEPT_TARGET_IO:
2637 	case XPT_EN_LUN:
2638 	case XPT_IMMED_NOTIFY:
2639 	case XPT_NOTIFY_ACK:
2640 	case XPT_RESET_BUS:
2641 	case XPT_IMMEDIATE_NOTIFY:
2642 	case XPT_NOTIFY_ACKNOWLEDGE:
2643 	case XPT_GET_SIM_KNOB:
2644 	case XPT_SET_SIM_KNOB:
2645 	{
2646 		struct cam_sim *sim;
2647 
2648 		sim = path->bus->sim;
2649 		(*(sim->sim_action))(sim, start_ccb);
2650 		break;
2651 	}
2652 	case XPT_PATH_INQ:
2653 	{
2654 		struct cam_sim *sim;
2655 
2656 		sim = path->bus->sim;
2657 		(*(sim->sim_action))(sim, start_ccb);
2658 		break;
2659 	}
2660 	case XPT_PATH_STATS:
2661 		start_ccb->cpis.last_reset = path->bus->last_reset;
2662 		start_ccb->ccb_h.status = CAM_REQ_CMP;
2663 		break;
2664 	case XPT_GDEV_TYPE:
2665 	{
2666 		struct cam_ed *dev;
2667 
2668 		dev = path->device;
2669 		if ((dev->flags & CAM_DEV_UNCONFIGURED) != 0) {
2670 			start_ccb->ccb_h.status = CAM_DEV_NOT_THERE;
2671 		} else {
2672 			struct ccb_getdev *cgd;
2673 
2674 			cgd = &start_ccb->cgd;
2675 			cgd->protocol = dev->protocol;
2676 			cgd->inq_data = dev->inq_data;
2677 			cgd->ident_data = dev->ident_data;
2678 			cgd->inq_flags = dev->inq_flags;
2679 			cgd->ccb_h.status = CAM_REQ_CMP;
2680 			cgd->serial_num_len = dev->serial_num_len;
2681 			if ((dev->serial_num_len > 0)
2682 			 && (dev->serial_num != NULL))
2683 				bcopy(dev->serial_num, cgd->serial_num,
2684 				      dev->serial_num_len);
2685 		}
2686 		break;
2687 	}
2688 	case XPT_GDEV_STATS:
2689 	{
2690 		struct cam_ed *dev;
2691 
2692 		dev = path->device;
2693 		if ((dev->flags & CAM_DEV_UNCONFIGURED) != 0) {
2694 			start_ccb->ccb_h.status = CAM_DEV_NOT_THERE;
2695 		} else {
2696 			struct ccb_getdevstats *cgds;
2697 			struct cam_eb *bus;
2698 			struct cam_et *tar;
2699 
2700 			cgds = &start_ccb->cgds;
2701 			bus = path->bus;
2702 			tar = path->target;
2703 			cgds->dev_openings = dev->ccbq.dev_openings;
2704 			cgds->dev_active = dev->ccbq.dev_active;
2705 			cgds->devq_openings = dev->ccbq.devq_openings;
2706 			cgds->devq_queued = dev->ccbq.queue.entries;
2707 			cgds->held = dev->ccbq.held;
2708 			cgds->last_reset = tar->last_reset;
2709 			cgds->maxtags = dev->maxtags;
2710 			cgds->mintags = dev->mintags;
2711 			if (timevalcmp(&tar->last_reset, &bus->last_reset, <))
2712 				cgds->last_reset = bus->last_reset;
2713 			cgds->ccb_h.status = CAM_REQ_CMP;
2714 		}
2715 		break;
2716 	}
2717 	case XPT_GDEVLIST:
2718 	{
2719 		struct cam_periph	*nperiph;
2720 		struct periph_list	*periph_head;
2721 		struct ccb_getdevlist	*cgdl;
2722 		u_int			i;
2723 		struct cam_ed		*device;
2724 		int			found;
2725 
2726 
2727 		found = 0;
2728 
2729 		/*
2730 		 * Don't want anyone mucking with our data.
2731 		 */
2732 		device = path->device;
2733 		periph_head = &device->periphs;
2734 		cgdl = &start_ccb->cgdl;
2735 
2736 		/*
2737 		 * Check and see if the list has changed since the user
2738 		 * last requested a list member.  If so, tell them that the
2739 		 * list has changed, and therefore they need to start over
2740 		 * from the beginning.
2741 		 */
2742 		if ((cgdl->index != 0) &&
2743 		    (cgdl->generation != device->generation)) {
2744 			cgdl->status = CAM_GDEVLIST_LIST_CHANGED;
2745 			break;
2746 		}
2747 
2748 		/*
2749 		 * Traverse the list of peripherals and attempt to find
2750 		 * the requested peripheral.
2751 		 */
2752 		for (nperiph = SLIST_FIRST(periph_head), i = 0;
2753 		     (nperiph != NULL) && (i <= cgdl->index);
2754 		     nperiph = SLIST_NEXT(nperiph, periph_links), i++) {
2755 			if (i == cgdl->index) {
2756 				strncpy(cgdl->periph_name,
2757 					nperiph->periph_name,
2758 					DEV_IDLEN);
2759 				cgdl->unit_number = nperiph->unit_number;
2760 				found = 1;
2761 			}
2762 		}
2763 		if (found == 0) {
2764 			cgdl->status = CAM_GDEVLIST_ERROR;
2765 			break;
2766 		}
2767 
2768 		if (nperiph == NULL)
2769 			cgdl->status = CAM_GDEVLIST_LAST_DEVICE;
2770 		else
2771 			cgdl->status = CAM_GDEVLIST_MORE_DEVS;
2772 
2773 		cgdl->index++;
2774 		cgdl->generation = device->generation;
2775 
2776 		cgdl->ccb_h.status = CAM_REQ_CMP;
2777 		break;
2778 	}
2779 	case XPT_DEV_MATCH:
2780 	{
2781 		dev_pos_type position_type;
2782 		struct ccb_dev_match *cdm;
2783 
2784 		cdm = &start_ccb->cdm;
2785 
2786 		/*
2787 		 * There are two ways of getting at information in the EDT.
2788 		 * The first way is via the primary EDT tree.  It starts
2789 		 * with a list of busses, then a list of targets on a bus,
2790 		 * then devices/luns on a target, and then peripherals on a
2791 		 * device/lun.  The "other" way is by the peripheral driver
2792 		 * lists.  The peripheral driver lists are organized by
2793 		 * peripheral driver.  (obviously)  So it makes sense to
2794 		 * use the peripheral driver list if the user is looking
2795 		 * for something like "da1", or all "da" devices.  If the
2796 		 * user is looking for something on a particular bus/target
2797 		 * or lun, it's generally better to go through the EDT tree.
2798 		 */
2799 
2800 		if (cdm->pos.position_type != CAM_DEV_POS_NONE)
2801 			position_type = cdm->pos.position_type;
2802 		else {
2803 			u_int i;
2804 
2805 			position_type = CAM_DEV_POS_NONE;
2806 
2807 			for (i = 0; i < cdm->num_patterns; i++) {
2808 				if ((cdm->patterns[i].type == DEV_MATCH_BUS)
2809 				 ||(cdm->patterns[i].type == DEV_MATCH_DEVICE)){
2810 					position_type = CAM_DEV_POS_EDT;
2811 					break;
2812 				}
2813 			}
2814 
2815 			if (cdm->num_patterns == 0)
2816 				position_type = CAM_DEV_POS_EDT;
2817 			else if (position_type == CAM_DEV_POS_NONE)
2818 				position_type = CAM_DEV_POS_PDRV;
2819 		}
2820 
2821 		switch(position_type & CAM_DEV_POS_TYPEMASK) {
2822 		case CAM_DEV_POS_EDT:
2823 			xptedtmatch(cdm);
2824 			break;
2825 		case CAM_DEV_POS_PDRV:
2826 			xptperiphlistmatch(cdm);
2827 			break;
2828 		default:
2829 			cdm->status = CAM_DEV_MATCH_ERROR;
2830 			break;
2831 		}
2832 
2833 		if (cdm->status == CAM_DEV_MATCH_ERROR)
2834 			start_ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2835 		else
2836 			start_ccb->ccb_h.status = CAM_REQ_CMP;
2837 
2838 		break;
2839 	}
2840 	case XPT_SASYNC_CB:
2841 	{
2842 		struct ccb_setasync *csa;
2843 		struct async_node *cur_entry;
2844 		struct async_list *async_head;
2845 		u_int32_t added;
2846 
2847 		csa = &start_ccb->csa;
2848 		added = csa->event_enable;
2849 		async_head = &path->device->asyncs;
2850 
2851 		/*
2852 		 * If there is already an entry for us, simply
2853 		 * update it.
2854 		 */
2855 		cur_entry = SLIST_FIRST(async_head);
2856 		while (cur_entry != NULL) {
2857 			if ((cur_entry->callback_arg == csa->callback_arg)
2858 			 && (cur_entry->callback == csa->callback))
2859 				break;
2860 			cur_entry = SLIST_NEXT(cur_entry, links);
2861 		}
2862 
2863 		if (cur_entry != NULL) {
2864 		 	/*
2865 			 * If the request has no flags set,
2866 			 * remove the entry.
2867 			 */
2868 			added &= ~cur_entry->event_enable;
2869 			if (csa->event_enable == 0) {
2870 				SLIST_REMOVE(async_head, cur_entry,
2871 					     async_node, links);
2872 				xpt_release_device(path->device);
2873 				free(cur_entry, M_CAMXPT);
2874 			} else {
2875 				cur_entry->event_enable = csa->event_enable;
2876 			}
2877 			csa->event_enable = added;
2878 		} else {
2879 			cur_entry = malloc(sizeof(*cur_entry), M_CAMXPT,
2880 					   M_NOWAIT);
2881 			if (cur_entry == NULL) {
2882 				csa->ccb_h.status = CAM_RESRC_UNAVAIL;
2883 				break;
2884 			}
2885 			cur_entry->event_enable = csa->event_enable;
2886 			cur_entry->callback_arg = csa->callback_arg;
2887 			cur_entry->callback = csa->callback;
2888 			SLIST_INSERT_HEAD(async_head, cur_entry, links);
2889 			xpt_acquire_device(path->device);
2890 		}
2891 		start_ccb->ccb_h.status = CAM_REQ_CMP;
2892 		break;
2893 	}
2894 	case XPT_REL_SIMQ:
2895 	{
2896 		struct ccb_relsim *crs;
2897 		struct cam_ed *dev;
2898 
2899 		crs = &start_ccb->crs;
2900 		dev = path->device;
2901 		if (dev == NULL) {
2902 
2903 			crs->ccb_h.status = CAM_DEV_NOT_THERE;
2904 			break;
2905 		}
2906 
2907 		if ((crs->release_flags & RELSIM_ADJUST_OPENINGS) != 0) {
2908 
2909 			/* Don't ever go below one opening */
2910 			if (crs->openings > 0) {
2911 				xpt_dev_ccbq_resize(path, crs->openings);
2912 				if (bootverbose) {
2913 					xpt_print(path,
2914 					    "number of openings is now %d\n",
2915 					    crs->openings);
2916 				}
2917 			}
2918 		}
2919 
2920 		if ((crs->release_flags & RELSIM_RELEASE_AFTER_TIMEOUT) != 0) {
2921 
2922 			if ((dev->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) {
2923 
2924 				/*
2925 				 * Just extend the old timeout and decrement
2926 				 * the freeze count so that a single timeout
2927 				 * is sufficient for releasing the queue.
2928 				 */
2929 				start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
2930 				callout_stop(&dev->callout);
2931 			} else {
2932 
2933 				start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
2934 			}
2935 
2936 			callout_reset(&dev->callout,
2937 			    (crs->release_timeout * hz) / 1000,
2938 			    xpt_release_devq_timeout, dev);
2939 
2940 			dev->flags |= CAM_DEV_REL_TIMEOUT_PENDING;
2941 
2942 		}
2943 
2944 		if ((crs->release_flags & RELSIM_RELEASE_AFTER_CMDCMPLT) != 0) {
2945 
2946 			if ((dev->flags & CAM_DEV_REL_ON_COMPLETE) != 0) {
2947 				/*
2948 				 * Decrement the freeze count so that a single
2949 				 * completion is still sufficient to unfreeze
2950 				 * the queue.
2951 				 */
2952 				start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
2953 			} else {
2954 
2955 				dev->flags |= CAM_DEV_REL_ON_COMPLETE;
2956 				start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
2957 			}
2958 		}
2959 
2960 		if ((crs->release_flags & RELSIM_RELEASE_AFTER_QEMPTY) != 0) {
2961 
2962 			if ((dev->flags & CAM_DEV_REL_ON_QUEUE_EMPTY) != 0
2963 			 || (dev->ccbq.dev_active == 0)) {
2964 
2965 				start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
2966 			} else {
2967 
2968 				dev->flags |= CAM_DEV_REL_ON_QUEUE_EMPTY;
2969 				start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
2970 			}
2971 		}
2972 
2973 		if ((start_ccb->ccb_h.flags & CAM_DEV_QFREEZE) == 0) {
2974 			xpt_release_devq_rl(path, /*runlevel*/
2975 			    (crs->release_flags & RELSIM_RELEASE_RUNLEVEL) ?
2976 				crs->release_timeout : 0,
2977 			    /*count*/1, /*run_queue*/TRUE);
2978 		}
2979 		start_ccb->crs.qfrozen_cnt = dev->ccbq.queue.qfrozen_cnt[0];
2980 		start_ccb->ccb_h.status = CAM_REQ_CMP;
2981 		break;
2982 	}
2983 	case XPT_DEBUG: {
2984 		/* Check that all request bits are supported. */
2985 		if (start_ccb->cdbg.flags & ~(CAM_DEBUG_COMPILE)) {
2986 			start_ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
2987 			break;
2988 		}
2989 
2990 		cam_dflags = start_ccb->cdbg.flags;
2991 		if (cam_dpath != NULL) {
2992 			xpt_free_path(cam_dpath);
2993 			cam_dpath = NULL;
2994 		}
2995 		if (cam_dflags != CAM_DEBUG_NONE) {
2996 			if (xpt_create_path(&cam_dpath, xpt_periph,
2997 					    start_ccb->ccb_h.path_id,
2998 					    start_ccb->ccb_h.target_id,
2999 					    start_ccb->ccb_h.target_lun) !=
3000 					    CAM_REQ_CMP) {
3001 				start_ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
3002 				cam_dflags = CAM_DEBUG_NONE;
3003 			} else {
3004 				start_ccb->ccb_h.status = CAM_REQ_CMP;
3005 				xpt_print(cam_dpath, "debugging flags now %x\n",
3006 				    cam_dflags);
3007 			}
3008 		} else {
3009 			cam_dpath = NULL;
3010 			start_ccb->ccb_h.status = CAM_REQ_CMP;
3011 		}
3012 		break;
3013 	}
3014 	case XPT_FREEZE_QUEUE:
3015 	{
3016 		struct ccb_relsim *crs = &start_ccb->crs;
3017 
3018 		xpt_freeze_devq_rl(path, /*runlevel*/
3019 		    (crs->release_flags & RELSIM_RELEASE_RUNLEVEL) ?
3020 		    crs->release_timeout : 0, /*count*/1);
3021 		start_ccb->ccb_h.status = CAM_REQ_CMP;
3022 		break;
3023 	}
3024 	case XPT_NOOP:
3025 		if ((start_ccb->ccb_h.flags & CAM_DEV_QFREEZE) != 0)
3026 			xpt_freeze_devq(path, 1);
3027 		start_ccb->ccb_h.status = CAM_REQ_CMP;
3028 		break;
3029 	default:
3030 	case XPT_SDEV_TYPE:
3031 	case XPT_TERM_IO:
3032 	case XPT_ENG_INQ:
3033 		/* XXX Implement */
3034 		printf("%s: CCB type %#x not supported\n", __func__,
3035 		       start_ccb->ccb_h.func_code);
3036 		start_ccb->ccb_h.status = CAM_PROVIDE_FAIL;
3037 		if (start_ccb->ccb_h.func_code & XPT_FC_DEV_QUEUED) {
3038 			xpt_done(start_ccb);
3039 		}
3040 		break;
3041 	}
3042 }
3043 
3044 void
3045 xpt_polled_action(union ccb *start_ccb)
3046 {
3047 	u_int32_t timeout;
3048 	struct	  cam_sim *sim;
3049 	struct	  cam_devq *devq;
3050 	struct	  cam_ed *dev;
3051 
3052 
3053 	timeout = start_ccb->ccb_h.timeout * 10;
3054 	sim = start_ccb->ccb_h.path->bus->sim;
3055 	devq = sim->devq;
3056 	dev = start_ccb->ccb_h.path->device;
3057 
3058 	mtx_assert(sim->mtx, MA_OWNED);
3059 
3060 	/* Don't use ISR for this SIM while polling. */
3061 	sim->flags |= CAM_SIM_POLLED;
3062 
3063 	/*
3064 	 * Steal an opening so that no other queued requests
3065 	 * can get it before us while we simulate interrupts.
3066 	 */
3067 	dev->ccbq.devq_openings--;
3068 	dev->ccbq.dev_openings--;
3069 
3070 	while(((devq != NULL && devq->send_openings <= 0) ||
3071 	   dev->ccbq.dev_openings < 0) && (--timeout > 0)) {
3072 		DELAY(100);
3073 		(*(sim->sim_poll))(sim);
3074 		camisr_runqueue(&sim->sim_doneq);
3075 	}
3076 
3077 	dev->ccbq.devq_openings++;
3078 	dev->ccbq.dev_openings++;
3079 
3080 	if (timeout != 0) {
3081 		xpt_action(start_ccb);
3082 		while(--timeout > 0) {
3083 			(*(sim->sim_poll))(sim);
3084 			camisr_runqueue(&sim->sim_doneq);
3085 			if ((start_ccb->ccb_h.status  & CAM_STATUS_MASK)
3086 			    != CAM_REQ_INPROG)
3087 				break;
3088 			DELAY(100);
3089 		}
3090 		if (timeout == 0) {
3091 			/*
3092 			 * XXX Is it worth adding a sim_timeout entry
3093 			 * point so we can attempt recovery?  If
3094 			 * this is only used for dumps, I don't think
3095 			 * it is.
3096 			 */
3097 			start_ccb->ccb_h.status = CAM_CMD_TIMEOUT;
3098 		}
3099 	} else {
3100 		start_ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
3101 	}
3102 
3103 	/* We will use CAM ISR for this SIM again. */
3104 	sim->flags &= ~CAM_SIM_POLLED;
3105 }
3106 
3107 /*
3108  * Schedule a peripheral driver to receive a ccb when it's
3109  * target device has space for more transactions.
3110  */
3111 void
3112 xpt_schedule(struct cam_periph *perph, u_int32_t new_priority)
3113 {
3114 	struct cam_ed *device;
3115 	int runq = 0;
3116 
3117 	mtx_assert(perph->sim->mtx, MA_OWNED);
3118 
3119 	CAM_DEBUG(perph->path, CAM_DEBUG_TRACE, ("xpt_schedule\n"));
3120 	device = perph->path->device;
3121 	if (periph_is_queued(perph)) {
3122 		/* Simply reorder based on new priority */
3123 		CAM_DEBUG(perph->path, CAM_DEBUG_SUBTRACE,
3124 			  ("   change priority to %d\n", new_priority));
3125 		if (new_priority < perph->pinfo.priority) {
3126 			camq_change_priority(&device->drvq,
3127 					     perph->pinfo.index,
3128 					     new_priority);
3129 			runq = xpt_schedule_dev_allocq(perph->path->bus, device);
3130 		}
3131 	} else {
3132 		/* New entry on the queue */
3133 		CAM_DEBUG(perph->path, CAM_DEBUG_SUBTRACE,
3134 			  ("   added periph to queue\n"));
3135 		perph->pinfo.priority = new_priority;
3136 		perph->pinfo.generation = ++device->drvq.generation;
3137 		camq_insert(&device->drvq, &perph->pinfo);
3138 		runq = xpt_schedule_dev_allocq(perph->path->bus, device);
3139 	}
3140 	if (runq != 0) {
3141 		CAM_DEBUG(perph->path, CAM_DEBUG_SUBTRACE,
3142 			  ("   calling xpt_run_devq\n"));
3143 		xpt_run_dev_allocq(perph->path->bus);
3144 	}
3145 }
3146 
3147 
3148 /*
3149  * Schedule a device to run on a given queue.
3150  * If the device was inserted as a new entry on the queue,
3151  * return 1 meaning the device queue should be run. If we
3152  * were already queued, implying someone else has already
3153  * started the queue, return 0 so the caller doesn't attempt
3154  * to run the queue.
3155  */
3156 int
3157 xpt_schedule_dev(struct camq *queue, cam_pinfo *pinfo,
3158 		 u_int32_t new_priority)
3159 {
3160 	int retval;
3161 	u_int32_t old_priority;
3162 
3163 	CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_schedule_dev\n"));
3164 
3165 	old_priority = pinfo->priority;
3166 
3167 	/*
3168 	 * Are we already queued?
3169 	 */
3170 	if (pinfo->index != CAM_UNQUEUED_INDEX) {
3171 		/* Simply reorder based on new priority */
3172 		if (new_priority < old_priority) {
3173 			camq_change_priority(queue, pinfo->index,
3174 					     new_priority);
3175 			CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3176 					("changed priority to %d\n",
3177 					 new_priority));
3178 			retval = 1;
3179 		} else
3180 			retval = 0;
3181 	} else {
3182 		/* New entry on the queue */
3183 		if (new_priority < old_priority)
3184 			pinfo->priority = new_priority;
3185 
3186 		CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3187 				("Inserting onto queue\n"));
3188 		pinfo->generation = ++queue->generation;
3189 		camq_insert(queue, pinfo);
3190 		retval = 1;
3191 	}
3192 	return (retval);
3193 }
3194 
3195 static void
3196 xpt_run_dev_allocq(struct cam_eb *bus)
3197 {
3198 	struct	cam_devq *devq;
3199 
3200 	CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_dev_allocq\n"));
3201 	devq = bus->sim->devq;
3202 
3203 	CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3204 			("   qfrozen_cnt == 0x%x, entries == %d, "
3205 			 "openings == %d, active == %d\n",
3206 			 devq->alloc_queue.qfrozen_cnt[0],
3207 			 devq->alloc_queue.entries,
3208 			 devq->alloc_openings,
3209 			 devq->alloc_active));
3210 
3211 	devq->alloc_queue.qfrozen_cnt[0]++;
3212 	while ((devq->alloc_queue.entries > 0)
3213 	    && (devq->alloc_openings > 0)
3214 	    && (devq->alloc_queue.qfrozen_cnt[0] <= 1)) {
3215 		struct	cam_ed_qinfo *qinfo;
3216 		struct	cam_ed *device;
3217 		union	ccb *work_ccb;
3218 		struct	cam_periph *drv;
3219 		struct	camq *drvq;
3220 
3221 		qinfo = (struct cam_ed_qinfo *)camq_remove(&devq->alloc_queue,
3222 							   CAMQ_HEAD);
3223 		device = qinfo->device;
3224 		CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3225 				("running device %p\n", device));
3226 
3227 		drvq = &device->drvq;
3228 		KASSERT(drvq->entries > 0, ("xpt_run_dev_allocq: "
3229 		    "Device on queue without any work to do"));
3230 		if ((work_ccb = xpt_get_ccb(device)) != NULL) {
3231 			devq->alloc_openings--;
3232 			devq->alloc_active++;
3233 			drv = (struct cam_periph*)camq_remove(drvq, CAMQ_HEAD);
3234 			xpt_setup_ccb(&work_ccb->ccb_h, drv->path,
3235 				      drv->pinfo.priority);
3236 			CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3237 					("calling periph start\n"));
3238 			drv->periph_start(drv, work_ccb);
3239 		} else {
3240 			/*
3241 			 * Malloc failure in alloc_ccb
3242 			 */
3243 			/*
3244 			 * XXX add us to a list to be run from free_ccb
3245 			 * if we don't have any ccbs active on this
3246 			 * device queue otherwise we may never get run
3247 			 * again.
3248 			 */
3249 			break;
3250 		}
3251 
3252 		/* We may have more work. Attempt to reschedule. */
3253 		xpt_schedule_dev_allocq(bus, device);
3254 	}
3255 	devq->alloc_queue.qfrozen_cnt[0]--;
3256 }
3257 
3258 static void
3259 xpt_run_dev_sendq(struct cam_eb *bus)
3260 {
3261 	struct	cam_devq *devq;
3262 
3263 	CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_dev_sendq\n"));
3264 
3265 	devq = bus->sim->devq;
3266 
3267 	devq->send_queue.qfrozen_cnt[0]++;
3268 	while ((devq->send_queue.entries > 0)
3269 	    && (devq->send_openings > 0)
3270 	    && (devq->send_queue.qfrozen_cnt[0] <= 1)) {
3271 		struct	cam_ed_qinfo *qinfo;
3272 		struct	cam_ed *device;
3273 		union ccb *work_ccb;
3274 		struct	cam_sim *sim;
3275 
3276 		qinfo = (struct cam_ed_qinfo *)camq_remove(&devq->send_queue,
3277 							   CAMQ_HEAD);
3278 		device = qinfo->device;
3279 		CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3280 				("running device %p\n", device));
3281 
3282 		work_ccb = cam_ccbq_peek_ccb(&device->ccbq, CAMQ_HEAD);
3283 		if (work_ccb == NULL) {
3284 			printf("device on run queue with no ccbs???\n");
3285 			continue;
3286 		}
3287 
3288 		if ((work_ccb->ccb_h.flags & CAM_HIGH_POWER) != 0) {
3289 
3290 			mtx_lock(&xsoftc.xpt_lock);
3291 		 	if (xsoftc.num_highpower <= 0) {
3292 				/*
3293 				 * We got a high power command, but we
3294 				 * don't have any available slots.  Freeze
3295 				 * the device queue until we have a slot
3296 				 * available.
3297 				 */
3298 				xpt_freeze_devq(work_ccb->ccb_h.path, 1);
3299 				STAILQ_INSERT_TAIL(&xsoftc.highpowerq,
3300 						   &work_ccb->ccb_h,
3301 						   xpt_links.stqe);
3302 
3303 				mtx_unlock(&xsoftc.xpt_lock);
3304 				continue;
3305 			} else {
3306 				/*
3307 				 * Consume a high power slot while
3308 				 * this ccb runs.
3309 				 */
3310 				xsoftc.num_highpower--;
3311 			}
3312 			mtx_unlock(&xsoftc.xpt_lock);
3313 		}
3314 		cam_ccbq_remove_ccb(&device->ccbq, work_ccb);
3315 		cam_ccbq_send_ccb(&device->ccbq, work_ccb);
3316 
3317 		devq->send_openings--;
3318 		devq->send_active++;
3319 
3320 		xpt_schedule_dev_sendq(bus, device);
3321 
3322 		if (work_ccb && (work_ccb->ccb_h.flags & CAM_DEV_QFREEZE) != 0){
3323 			/*
3324 			 * The client wants to freeze the queue
3325 			 * after this CCB is sent.
3326 			 */
3327 			xpt_freeze_devq(work_ccb->ccb_h.path, 1);
3328 		}
3329 
3330 		/* In Target mode, the peripheral driver knows best... */
3331 		if (work_ccb->ccb_h.func_code == XPT_SCSI_IO) {
3332 			if ((device->inq_flags & SID_CmdQue) != 0
3333 			 && work_ccb->csio.tag_action != CAM_TAG_ACTION_NONE)
3334 				work_ccb->ccb_h.flags |= CAM_TAG_ACTION_VALID;
3335 			else
3336 				/*
3337 				 * Clear this in case of a retried CCB that
3338 				 * failed due to a rejected tag.
3339 				 */
3340 				work_ccb->ccb_h.flags &= ~CAM_TAG_ACTION_VALID;
3341 		}
3342 
3343 		/*
3344 		 * Device queues can be shared among multiple sim instances
3345 		 * that reside on different busses.  Use the SIM in the queue
3346 		 * CCB's path, rather than the one in the bus that was passed
3347 		 * into this function.
3348 		 */
3349 		sim = work_ccb->ccb_h.path->bus->sim;
3350 		(*(sim->sim_action))(sim, work_ccb);
3351 	}
3352 	devq->send_queue.qfrozen_cnt[0]--;
3353 }
3354 
3355 /*
3356  * This function merges stuff from the slave ccb into the master ccb, while
3357  * keeping important fields in the master ccb constant.
3358  */
3359 void
3360 xpt_merge_ccb(union ccb *master_ccb, union ccb *slave_ccb)
3361 {
3362 
3363 	/*
3364 	 * Pull fields that are valid for peripheral drivers to set
3365 	 * into the master CCB along with the CCB "payload".
3366 	 */
3367 	master_ccb->ccb_h.retry_count = slave_ccb->ccb_h.retry_count;
3368 	master_ccb->ccb_h.func_code = slave_ccb->ccb_h.func_code;
3369 	master_ccb->ccb_h.timeout = slave_ccb->ccb_h.timeout;
3370 	master_ccb->ccb_h.flags = slave_ccb->ccb_h.flags;
3371 	bcopy(&(&slave_ccb->ccb_h)[1], &(&master_ccb->ccb_h)[1],
3372 	      sizeof(union ccb) - sizeof(struct ccb_hdr));
3373 }
3374 
3375 void
3376 xpt_setup_ccb(struct ccb_hdr *ccb_h, struct cam_path *path, u_int32_t priority)
3377 {
3378 
3379 	CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_setup_ccb\n"));
3380 	ccb_h->pinfo.priority = priority;
3381 	ccb_h->path = path;
3382 	ccb_h->path_id = path->bus->path_id;
3383 	if (path->target)
3384 		ccb_h->target_id = path->target->target_id;
3385 	else
3386 		ccb_h->target_id = CAM_TARGET_WILDCARD;
3387 	if (path->device) {
3388 		ccb_h->target_lun = path->device->lun_id;
3389 		ccb_h->pinfo.generation = ++path->device->ccbq.queue.generation;
3390 	} else {
3391 		ccb_h->target_lun = CAM_TARGET_WILDCARD;
3392 	}
3393 	ccb_h->pinfo.index = CAM_UNQUEUED_INDEX;
3394 	ccb_h->flags = 0;
3395 }
3396 
3397 /* Path manipulation functions */
3398 cam_status
3399 xpt_create_path(struct cam_path **new_path_ptr, struct cam_periph *perph,
3400 		path_id_t path_id, target_id_t target_id, lun_id_t lun_id)
3401 {
3402 	struct	   cam_path *path;
3403 	cam_status status;
3404 
3405 	path = (struct cam_path *)malloc(sizeof(*path), M_CAMPATH, M_NOWAIT);
3406 
3407 	if (path == NULL) {
3408 		status = CAM_RESRC_UNAVAIL;
3409 		return(status);
3410 	}
3411 	status = xpt_compile_path(path, perph, path_id, target_id, lun_id);
3412 	if (status != CAM_REQ_CMP) {
3413 		free(path, M_CAMPATH);
3414 		path = NULL;
3415 	}
3416 	*new_path_ptr = path;
3417 	return (status);
3418 }
3419 
3420 cam_status
3421 xpt_create_path_unlocked(struct cam_path **new_path_ptr,
3422 			 struct cam_periph *periph, path_id_t path_id,
3423 			 target_id_t target_id, lun_id_t lun_id)
3424 {
3425 	struct	   cam_path *path;
3426 	struct	   cam_eb *bus = NULL;
3427 	cam_status status;
3428 	int	   need_unlock = 0;
3429 
3430 	path = (struct cam_path *)malloc(sizeof(*path), M_CAMPATH, M_WAITOK);
3431 
3432 	if (path_id != CAM_BUS_WILDCARD) {
3433 		bus = xpt_find_bus(path_id);
3434 		if (bus != NULL) {
3435 			need_unlock = 1;
3436 			CAM_SIM_LOCK(bus->sim);
3437 		}
3438 	}
3439 	status = xpt_compile_path(path, periph, path_id, target_id, lun_id);
3440 	if (need_unlock) {
3441 		CAM_SIM_UNLOCK(bus->sim);
3442 		xpt_release_bus(bus);
3443 	}
3444 	if (status != CAM_REQ_CMP) {
3445 		free(path, M_CAMPATH);
3446 		path = NULL;
3447 	}
3448 	*new_path_ptr = path;
3449 	return (status);
3450 }
3451 
3452 cam_status
3453 xpt_compile_path(struct cam_path *new_path, struct cam_periph *perph,
3454 		 path_id_t path_id, target_id_t target_id, lun_id_t lun_id)
3455 {
3456 	struct	     cam_eb *bus;
3457 	struct	     cam_et *target;
3458 	struct	     cam_ed *device;
3459 	cam_status   status;
3460 
3461 	status = CAM_REQ_CMP;	/* Completed without error */
3462 	target = NULL;		/* Wildcarded */
3463 	device = NULL;		/* Wildcarded */
3464 
3465 	/*
3466 	 * We will potentially modify the EDT, so block interrupts
3467 	 * that may attempt to create cam paths.
3468 	 */
3469 	bus = xpt_find_bus(path_id);
3470 	if (bus == NULL) {
3471 		status = CAM_PATH_INVALID;
3472 	} else {
3473 		target = xpt_find_target(bus, target_id);
3474 		if (target == NULL) {
3475 			/* Create one */
3476 			struct cam_et *new_target;
3477 
3478 			new_target = xpt_alloc_target(bus, target_id);
3479 			if (new_target == NULL) {
3480 				status = CAM_RESRC_UNAVAIL;
3481 			} else {
3482 				target = new_target;
3483 			}
3484 		}
3485 		if (target != NULL) {
3486 			device = xpt_find_device(target, lun_id);
3487 			if (device == NULL) {
3488 				/* Create one */
3489 				struct cam_ed *new_device;
3490 
3491 				new_device =
3492 				    (*(bus->xport->alloc_device))(bus,
3493 								      target,
3494 								      lun_id);
3495 				if (new_device == NULL) {
3496 					status = CAM_RESRC_UNAVAIL;
3497 				} else {
3498 					device = new_device;
3499 				}
3500 			}
3501 		}
3502 	}
3503 
3504 	/*
3505 	 * Only touch the user's data if we are successful.
3506 	 */
3507 	if (status == CAM_REQ_CMP) {
3508 		new_path->periph = perph;
3509 		new_path->bus = bus;
3510 		new_path->target = target;
3511 		new_path->device = device;
3512 		CAM_DEBUG(new_path, CAM_DEBUG_TRACE, ("xpt_compile_path\n"));
3513 	} else {
3514 		if (device != NULL)
3515 			xpt_release_device(device);
3516 		if (target != NULL)
3517 			xpt_release_target(target);
3518 		if (bus != NULL)
3519 			xpt_release_bus(bus);
3520 	}
3521 	return (status);
3522 }
3523 
3524 void
3525 xpt_release_path(struct cam_path *path)
3526 {
3527 	CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_release_path\n"));
3528 	if (path->device != NULL) {
3529 		xpt_release_device(path->device);
3530 		path->device = NULL;
3531 	}
3532 	if (path->target != NULL) {
3533 		xpt_release_target(path->target);
3534 		path->target = NULL;
3535 	}
3536 	if (path->bus != NULL) {
3537 		xpt_release_bus(path->bus);
3538 		path->bus = NULL;
3539 	}
3540 }
3541 
3542 void
3543 xpt_free_path(struct cam_path *path)
3544 {
3545 
3546 	CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_free_path\n"));
3547 	xpt_release_path(path);
3548 	free(path, M_CAMPATH);
3549 }
3550 
3551 void
3552 xpt_path_counts(struct cam_path *path, uint32_t *bus_ref,
3553     uint32_t *periph_ref, uint32_t *target_ref, uint32_t *device_ref)
3554 {
3555 
3556 	mtx_lock(&xsoftc.xpt_topo_lock);
3557 	if (bus_ref) {
3558 		if (path->bus)
3559 			*bus_ref = path->bus->refcount;
3560 		else
3561 			*bus_ref = 0;
3562 	}
3563 	mtx_unlock(&xsoftc.xpt_topo_lock);
3564 	if (periph_ref) {
3565 		if (path->periph)
3566 			*periph_ref = path->periph->refcount;
3567 		else
3568 			*periph_ref = 0;
3569 	}
3570 	if (target_ref) {
3571 		if (path->target)
3572 			*target_ref = path->target->refcount;
3573 		else
3574 			*target_ref = 0;
3575 	}
3576 	if (device_ref) {
3577 		if (path->device)
3578 			*device_ref = path->device->refcount;
3579 		else
3580 			*device_ref = 0;
3581 	}
3582 }
3583 
3584 /*
3585  * Return -1 for failure, 0 for exact match, 1 for match with wildcards
3586  * in path1, 2 for match with wildcards in path2.
3587  */
3588 int
3589 xpt_path_comp(struct cam_path *path1, struct cam_path *path2)
3590 {
3591 	int retval = 0;
3592 
3593 	if (path1->bus != path2->bus) {
3594 		if (path1->bus->path_id == CAM_BUS_WILDCARD)
3595 			retval = 1;
3596 		else if (path2->bus->path_id == CAM_BUS_WILDCARD)
3597 			retval = 2;
3598 		else
3599 			return (-1);
3600 	}
3601 	if (path1->target != path2->target) {
3602 		if (path1->target->target_id == CAM_TARGET_WILDCARD) {
3603 			if (retval == 0)
3604 				retval = 1;
3605 		} else if (path2->target->target_id == CAM_TARGET_WILDCARD)
3606 			retval = 2;
3607 		else
3608 			return (-1);
3609 	}
3610 	if (path1->device != path2->device) {
3611 		if (path1->device->lun_id == CAM_LUN_WILDCARD) {
3612 			if (retval == 0)
3613 				retval = 1;
3614 		} else if (path2->device->lun_id == CAM_LUN_WILDCARD)
3615 			retval = 2;
3616 		else
3617 			return (-1);
3618 	}
3619 	return (retval);
3620 }
3621 
3622 void
3623 xpt_print_path(struct cam_path *path)
3624 {
3625 
3626 	if (path == NULL)
3627 		printf("(nopath): ");
3628 	else {
3629 		if (path->periph != NULL)
3630 			printf("(%s%d:", path->periph->periph_name,
3631 			       path->periph->unit_number);
3632 		else
3633 			printf("(noperiph:");
3634 
3635 		if (path->bus != NULL)
3636 			printf("%s%d:%d:", path->bus->sim->sim_name,
3637 			       path->bus->sim->unit_number,
3638 			       path->bus->sim->bus_id);
3639 		else
3640 			printf("nobus:");
3641 
3642 		if (path->target != NULL)
3643 			printf("%d:", path->target->target_id);
3644 		else
3645 			printf("X:");
3646 
3647 		if (path->device != NULL)
3648 			printf("%d): ", path->device->lun_id);
3649 		else
3650 			printf("X): ");
3651 	}
3652 }
3653 
3654 void
3655 xpt_print(struct cam_path *path, const char *fmt, ...)
3656 {
3657 	va_list ap;
3658 	xpt_print_path(path);
3659 	va_start(ap, fmt);
3660 	vprintf(fmt, ap);
3661 	va_end(ap);
3662 }
3663 
3664 int
3665 xpt_path_string(struct cam_path *path, char *str, size_t str_len)
3666 {
3667 	struct sbuf sb;
3668 
3669 #ifdef INVARIANTS
3670 	if (path != NULL && path->bus != NULL)
3671 		mtx_assert(path->bus->sim->mtx, MA_OWNED);
3672 #endif
3673 
3674 	sbuf_new(&sb, str, str_len, 0);
3675 
3676 	if (path == NULL)
3677 		sbuf_printf(&sb, "(nopath): ");
3678 	else {
3679 		if (path->periph != NULL)
3680 			sbuf_printf(&sb, "(%s%d:", path->periph->periph_name,
3681 				    path->periph->unit_number);
3682 		else
3683 			sbuf_printf(&sb, "(noperiph:");
3684 
3685 		if (path->bus != NULL)
3686 			sbuf_printf(&sb, "%s%d:%d:", path->bus->sim->sim_name,
3687 				    path->bus->sim->unit_number,
3688 				    path->bus->sim->bus_id);
3689 		else
3690 			sbuf_printf(&sb, "nobus:");
3691 
3692 		if (path->target != NULL)
3693 			sbuf_printf(&sb, "%d:", path->target->target_id);
3694 		else
3695 			sbuf_printf(&sb, "X:");
3696 
3697 		if (path->device != NULL)
3698 			sbuf_printf(&sb, "%d): ", path->device->lun_id);
3699 		else
3700 			sbuf_printf(&sb, "X): ");
3701 	}
3702 	sbuf_finish(&sb);
3703 
3704 	return(sbuf_len(&sb));
3705 }
3706 
3707 path_id_t
3708 xpt_path_path_id(struct cam_path *path)
3709 {
3710 	return(path->bus->path_id);
3711 }
3712 
3713 target_id_t
3714 xpt_path_target_id(struct cam_path *path)
3715 {
3716 	if (path->target != NULL)
3717 		return (path->target->target_id);
3718 	else
3719 		return (CAM_TARGET_WILDCARD);
3720 }
3721 
3722 lun_id_t
3723 xpt_path_lun_id(struct cam_path *path)
3724 {
3725 	if (path->device != NULL)
3726 		return (path->device->lun_id);
3727 	else
3728 		return (CAM_LUN_WILDCARD);
3729 }
3730 
3731 struct cam_sim *
3732 xpt_path_sim(struct cam_path *path)
3733 {
3734 
3735 	return (path->bus->sim);
3736 }
3737 
3738 struct cam_periph*
3739 xpt_path_periph(struct cam_path *path)
3740 {
3741 	mtx_assert(path->bus->sim->mtx, MA_OWNED);
3742 
3743 	return (path->periph);
3744 }
3745 
3746 int
3747 xpt_path_legacy_ata_id(struct cam_path *path)
3748 {
3749 	struct cam_eb *bus;
3750 	int bus_id;
3751 
3752 	if ((strcmp(path->bus->sim->sim_name, "ata") != 0) &&
3753 	    strcmp(path->bus->sim->sim_name, "ahcich") != 0 &&
3754 	    strcmp(path->bus->sim->sim_name, "mvsch") != 0 &&
3755 	    strcmp(path->bus->sim->sim_name, "siisch") != 0)
3756 		return (-1);
3757 
3758 	if (strcmp(path->bus->sim->sim_name, "ata") == 0 &&
3759 	    path->bus->sim->unit_number < 2) {
3760 		bus_id = path->bus->sim->unit_number;
3761 	} else {
3762 		bus_id = 2;
3763 		xpt_lock_buses();
3764 		TAILQ_FOREACH(bus, &xsoftc.xpt_busses, links) {
3765 			if (bus == path->bus)
3766 				break;
3767 			if ((strcmp(bus->sim->sim_name, "ata") == 0 &&
3768 			     bus->sim->unit_number >= 2) ||
3769 			    strcmp(bus->sim->sim_name, "ahcich") == 0 ||
3770 			    strcmp(bus->sim->sim_name, "mvsch") == 0 ||
3771 			    strcmp(bus->sim->sim_name, "siisch") == 0)
3772 				bus_id++;
3773 		}
3774 		xpt_unlock_buses();
3775 	}
3776 	if (path->target != NULL) {
3777 		if (path->target->target_id < 2)
3778 			return (bus_id * 2 + path->target->target_id);
3779 		else
3780 			return (-1);
3781 	} else
3782 		return (bus_id * 2);
3783 }
3784 
3785 /*
3786  * Release a CAM control block for the caller.  Remit the cost of the structure
3787  * to the device referenced by the path.  If the this device had no 'credits'
3788  * and peripheral drivers have registered async callbacks for this notification
3789  * call them now.
3790  */
3791 void
3792 xpt_release_ccb(union ccb *free_ccb)
3793 {
3794 	struct	 cam_path *path;
3795 	struct	 cam_ed *device;
3796 	struct	 cam_eb *bus;
3797 	struct   cam_sim *sim;
3798 
3799 	CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_release_ccb\n"));
3800 	path = free_ccb->ccb_h.path;
3801 	device = path->device;
3802 	bus = path->bus;
3803 	sim = bus->sim;
3804 
3805 	mtx_assert(sim->mtx, MA_OWNED);
3806 
3807 	cam_ccbq_release_opening(&device->ccbq);
3808 	if (device->flags & CAM_DEV_RESIZE_QUEUE_NEEDED) {
3809 		device->flags &= ~CAM_DEV_RESIZE_QUEUE_NEEDED;
3810 		cam_ccbq_resize(&device->ccbq,
3811 		    device->ccbq.dev_openings + device->ccbq.dev_active);
3812 	}
3813 	if (sim->ccb_count > sim->max_ccbs) {
3814 		xpt_free_ccb(free_ccb);
3815 		sim->ccb_count--;
3816 	} else {
3817 		SLIST_INSERT_HEAD(&sim->ccb_freeq, &free_ccb->ccb_h,
3818 		    xpt_links.sle);
3819 	}
3820 	if (sim->devq == NULL) {
3821 		return;
3822 	}
3823 	sim->devq->alloc_openings++;
3824 	sim->devq->alloc_active--;
3825 	if (device_is_alloc_queued(device) == 0)
3826 		xpt_schedule_dev_allocq(bus, device);
3827 	xpt_run_dev_allocq(bus);
3828 }
3829 
3830 /* Functions accessed by SIM drivers */
3831 
3832 static struct xpt_xport xport_default = {
3833 	.alloc_device = xpt_alloc_device_default,
3834 	.action = xpt_action_default,
3835 	.async = xpt_dev_async_default,
3836 };
3837 
3838 /*
3839  * A sim structure, listing the SIM entry points and instance
3840  * identification info is passed to xpt_bus_register to hook the SIM
3841  * into the CAM framework.  xpt_bus_register creates a cam_eb entry
3842  * for this new bus and places it in the array of busses and assigns
3843  * it a path_id.  The path_id may be influenced by "hard wiring"
3844  * information specified by the user.  Once interrupt services are
3845  * available, the bus will be probed.
3846  */
3847 int32_t
3848 xpt_bus_register(struct cam_sim *sim, device_t parent, u_int32_t bus)
3849 {
3850 	struct cam_eb *new_bus;
3851 	struct cam_eb *old_bus;
3852 	struct ccb_pathinq cpi;
3853 	struct cam_path *path;
3854 	cam_status status;
3855 
3856 	mtx_assert(sim->mtx, MA_OWNED);
3857 
3858 	sim->bus_id = bus;
3859 	new_bus = (struct cam_eb *)malloc(sizeof(*new_bus),
3860 					  M_CAMXPT, M_NOWAIT);
3861 	if (new_bus == NULL) {
3862 		/* Couldn't satisfy request */
3863 		return (CAM_RESRC_UNAVAIL);
3864 	}
3865 	if (strcmp(sim->sim_name, "xpt") != 0) {
3866 		sim->path_id =
3867 		    xptpathid(sim->sim_name, sim->unit_number, sim->bus_id);
3868 	}
3869 
3870 	TAILQ_INIT(&new_bus->et_entries);
3871 	new_bus->path_id = sim->path_id;
3872 	cam_sim_hold(sim);
3873 	new_bus->sim = sim;
3874 	timevalclear(&new_bus->last_reset);
3875 	new_bus->flags = 0;
3876 	new_bus->refcount = 1;	/* Held until a bus_deregister event */
3877 	new_bus->generation = 0;
3878 
3879 	mtx_lock(&xsoftc.xpt_topo_lock);
3880 	old_bus = TAILQ_FIRST(&xsoftc.xpt_busses);
3881 	while (old_bus != NULL
3882 	    && old_bus->path_id < new_bus->path_id)
3883 		old_bus = TAILQ_NEXT(old_bus, links);
3884 	if (old_bus != NULL)
3885 		TAILQ_INSERT_BEFORE(old_bus, new_bus, links);
3886 	else
3887 		TAILQ_INSERT_TAIL(&xsoftc.xpt_busses, new_bus, links);
3888 	xsoftc.bus_generation++;
3889 	mtx_unlock(&xsoftc.xpt_topo_lock);
3890 
3891 	/*
3892 	 * Set a default transport so that a PATH_INQ can be issued to
3893 	 * the SIM.  This will then allow for probing and attaching of
3894 	 * a more appropriate transport.
3895 	 */
3896 	new_bus->xport = &xport_default;
3897 
3898 	status = xpt_create_path(&path, /*periph*/NULL, sim->path_id,
3899 				  CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
3900 	if (status != CAM_REQ_CMP) {
3901 		xpt_release_bus(new_bus);
3902 		free(path, M_CAMXPT);
3903 		return (CAM_RESRC_UNAVAIL);
3904 	}
3905 
3906 	xpt_setup_ccb(&cpi.ccb_h, path, CAM_PRIORITY_NORMAL);
3907 	cpi.ccb_h.func_code = XPT_PATH_INQ;
3908 	xpt_action((union ccb *)&cpi);
3909 
3910 	if (cpi.ccb_h.status == CAM_REQ_CMP) {
3911 		switch (cpi.transport) {
3912 		case XPORT_SPI:
3913 		case XPORT_SAS:
3914 		case XPORT_FC:
3915 		case XPORT_USB:
3916 		case XPORT_ISCSI:
3917 		case XPORT_PPB:
3918 			new_bus->xport = scsi_get_xport();
3919 			break;
3920 		case XPORT_ATA:
3921 		case XPORT_SATA:
3922 			new_bus->xport = ata_get_xport();
3923 			break;
3924 		default:
3925 			new_bus->xport = &xport_default;
3926 			break;
3927 		}
3928 	}
3929 
3930 	/* Notify interested parties */
3931 	if (sim->path_id != CAM_XPT_PATH_ID) {
3932 		union	ccb *scan_ccb;
3933 
3934 		xpt_async(AC_PATH_REGISTERED, path, &cpi);
3935 		/* Initiate bus rescan. */
3936 		scan_ccb = xpt_alloc_ccb_nowait();
3937 		scan_ccb->ccb_h.path = path;
3938 		scan_ccb->ccb_h.func_code = XPT_SCAN_BUS;
3939 		scan_ccb->crcn.flags = 0;
3940 		xpt_rescan(scan_ccb);
3941 	} else
3942 		xpt_free_path(path);
3943 	return (CAM_SUCCESS);
3944 }
3945 
3946 int32_t
3947 xpt_bus_deregister(path_id_t pathid)
3948 {
3949 	struct cam_path bus_path;
3950 	cam_status status;
3951 
3952 	status = xpt_compile_path(&bus_path, NULL, pathid,
3953 				  CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
3954 	if (status != CAM_REQ_CMP)
3955 		return (status);
3956 
3957 	xpt_async(AC_LOST_DEVICE, &bus_path, NULL);
3958 	xpt_async(AC_PATH_DEREGISTERED, &bus_path, NULL);
3959 
3960 	/* Release the reference count held while registered. */
3961 	xpt_release_bus(bus_path.bus);
3962 	xpt_release_path(&bus_path);
3963 
3964 	return (CAM_REQ_CMP);
3965 }
3966 
3967 static path_id_t
3968 xptnextfreepathid(void)
3969 {
3970 	struct cam_eb *bus;
3971 	path_id_t pathid;
3972 	const char *strval;
3973 
3974 	pathid = 0;
3975 	mtx_lock(&xsoftc.xpt_topo_lock);
3976 	bus = TAILQ_FIRST(&xsoftc.xpt_busses);
3977 retry:
3978 	/* Find an unoccupied pathid */
3979 	while (bus != NULL && bus->path_id <= pathid) {
3980 		if (bus->path_id == pathid)
3981 			pathid++;
3982 		bus = TAILQ_NEXT(bus, links);
3983 	}
3984 	mtx_unlock(&xsoftc.xpt_topo_lock);
3985 
3986 	/*
3987 	 * Ensure that this pathid is not reserved for
3988 	 * a bus that may be registered in the future.
3989 	 */
3990 	if (resource_string_value("scbus", pathid, "at", &strval) == 0) {
3991 		++pathid;
3992 		/* Start the search over */
3993 		mtx_lock(&xsoftc.xpt_topo_lock);
3994 		goto retry;
3995 	}
3996 	return (pathid);
3997 }
3998 
3999 static path_id_t
4000 xptpathid(const char *sim_name, int sim_unit, int sim_bus)
4001 {
4002 	path_id_t pathid;
4003 	int i, dunit, val;
4004 	char buf[32];
4005 	const char *dname;
4006 
4007 	pathid = CAM_XPT_PATH_ID;
4008 	snprintf(buf, sizeof(buf), "%s%d", sim_name, sim_unit);
4009 	i = 0;
4010 	while ((resource_find_match(&i, &dname, &dunit, "at", buf)) == 0) {
4011 		if (strcmp(dname, "scbus")) {
4012 			/* Avoid a bit of foot shooting. */
4013 			continue;
4014 		}
4015 		if (dunit < 0)		/* unwired?! */
4016 			continue;
4017 		if (resource_int_value("scbus", dunit, "bus", &val) == 0) {
4018 			if (sim_bus == val) {
4019 				pathid = dunit;
4020 				break;
4021 			}
4022 		} else if (sim_bus == 0) {
4023 			/* Unspecified matches bus 0 */
4024 			pathid = dunit;
4025 			break;
4026 		} else {
4027 			printf("Ambiguous scbus configuration for %s%d "
4028 			       "bus %d, cannot wire down.  The kernel "
4029 			       "config entry for scbus%d should "
4030 			       "specify a controller bus.\n"
4031 			       "Scbus will be assigned dynamically.\n",
4032 			       sim_name, sim_unit, sim_bus, dunit);
4033 			break;
4034 		}
4035 	}
4036 
4037 	if (pathid == CAM_XPT_PATH_ID)
4038 		pathid = xptnextfreepathid();
4039 	return (pathid);
4040 }
4041 
4042 static const char *
4043 xpt_async_string(u_int32_t async_code)
4044 {
4045 
4046 	switch (async_code) {
4047 	case AC_BUS_RESET: return ("AC_BUS_RESET");
4048 	case AC_UNSOL_RESEL: return ("AC_UNSOL_RESEL");
4049 	case AC_SCSI_AEN: return ("AC_SCSI_AEN");
4050 	case AC_SENT_BDR: return ("AC_SENT_BDR");
4051 	case AC_PATH_REGISTERED: return ("AC_PATH_REGISTERED");
4052 	case AC_PATH_DEREGISTERED: return ("AC_PATH_DEREGISTERED");
4053 	case AC_FOUND_DEVICE: return ("AC_FOUND_DEVICE");
4054 	case AC_LOST_DEVICE: return ("AC_LOST_DEVICE");
4055 	case AC_TRANSFER_NEG: return ("AC_TRANSFER_NEG");
4056 	case AC_INQ_CHANGED: return ("AC_INQ_CHANGED");
4057 	case AC_GETDEV_CHANGED: return ("AC_GETDEV_CHANGED");
4058 	case AC_CONTRACT: return ("AC_CONTRACT");
4059 	case AC_ADVINFO_CHANGED: return ("AC_ADVINFO_CHANGED");
4060 	case AC_UNIT_ATTENTION: return ("AC_UNIT_ATTENTION");
4061 	}
4062 	return ("AC_UNKNOWN");
4063 }
4064 
4065 void
4066 xpt_async(u_int32_t async_code, struct cam_path *path, void *async_arg)
4067 {
4068 	struct cam_eb *bus;
4069 	struct cam_et *target, *next_target;
4070 	struct cam_ed *device, *next_device;
4071 
4072 	mtx_assert(path->bus->sim->mtx, MA_OWNED);
4073 	CAM_DEBUG(path, CAM_DEBUG_TRACE | CAM_DEBUG_INFO,
4074 	    ("xpt_async(%s)\n", xpt_async_string(async_code)));
4075 
4076 	/*
4077 	 * Most async events come from a CAM interrupt context.  In
4078 	 * a few cases, the error recovery code at the peripheral layer,
4079 	 * which may run from our SWI or a process context, may signal
4080 	 * deferred events with a call to xpt_async.
4081 	 */
4082 
4083 	bus = path->bus;
4084 
4085 	if (async_code == AC_BUS_RESET) {
4086 		/* Update our notion of when the last reset occurred */
4087 		microtime(&bus->last_reset);
4088 	}
4089 
4090 	for (target = TAILQ_FIRST(&bus->et_entries);
4091 	     target != NULL;
4092 	     target = next_target) {
4093 
4094 		next_target = TAILQ_NEXT(target, links);
4095 
4096 		if (path->target != target
4097 		 && path->target->target_id != CAM_TARGET_WILDCARD
4098 		 && target->target_id != CAM_TARGET_WILDCARD)
4099 			continue;
4100 
4101 		if (async_code == AC_SENT_BDR) {
4102 			/* Update our notion of when the last reset occurred */
4103 			microtime(&path->target->last_reset);
4104 		}
4105 
4106 		for (device = TAILQ_FIRST(&target->ed_entries);
4107 		     device != NULL;
4108 		     device = next_device) {
4109 
4110 			next_device = TAILQ_NEXT(device, links);
4111 
4112 			if (path->device != device
4113 			 && path->device->lun_id != CAM_LUN_WILDCARD
4114 			 && device->lun_id != CAM_LUN_WILDCARD)
4115 				continue;
4116 			/*
4117 			 * The async callback could free the device.
4118 			 * If it is a broadcast async, it doesn't hold
4119 			 * device reference, so take our own reference.
4120 			 */
4121 			xpt_acquire_device(device);
4122 			(*(bus->xport->async))(async_code, bus,
4123 					       target, device,
4124 					       async_arg);
4125 
4126 			xpt_async_bcast(&device->asyncs, async_code,
4127 					path, async_arg);
4128 			xpt_release_device(device);
4129 		}
4130 	}
4131 
4132 	/*
4133 	 * If this wasn't a fully wildcarded async, tell all
4134 	 * clients that want all async events.
4135 	 */
4136 	if (bus != xpt_periph->path->bus)
4137 		xpt_async_bcast(&xpt_periph->path->device->asyncs, async_code,
4138 				path, async_arg);
4139 }
4140 
4141 static void
4142 xpt_async_bcast(struct async_list *async_head,
4143 		u_int32_t async_code,
4144 		struct cam_path *path, void *async_arg)
4145 {
4146 	struct async_node *cur_entry;
4147 
4148 	cur_entry = SLIST_FIRST(async_head);
4149 	while (cur_entry != NULL) {
4150 		struct async_node *next_entry;
4151 		/*
4152 		 * Grab the next list entry before we call the current
4153 		 * entry's callback.  This is because the callback function
4154 		 * can delete its async callback entry.
4155 		 */
4156 		next_entry = SLIST_NEXT(cur_entry, links);
4157 		if ((cur_entry->event_enable & async_code) != 0)
4158 			cur_entry->callback(cur_entry->callback_arg,
4159 					    async_code, path,
4160 					    async_arg);
4161 		cur_entry = next_entry;
4162 	}
4163 }
4164 
4165 static void
4166 xpt_dev_async_default(u_int32_t async_code, struct cam_eb *bus,
4167 		      struct cam_et *target, struct cam_ed *device,
4168 		      void *async_arg)
4169 {
4170 	printf("%s called\n", __func__);
4171 }
4172 
4173 u_int32_t
4174 xpt_freeze_devq_rl(struct cam_path *path, cam_rl rl, u_int count)
4175 {
4176 	struct cam_ed *dev = path->device;
4177 
4178 	mtx_assert(path->bus->sim->mtx, MA_OWNED);
4179 	dev->sim->devq->alloc_openings +=
4180 	    cam_ccbq_freeze(&dev->ccbq, rl, count);
4181 	/* Remove frozen device from allocq. */
4182 	if (device_is_alloc_queued(dev) &&
4183 	    cam_ccbq_frozen(&dev->ccbq, CAM_PRIORITY_TO_RL(
4184 	     CAMQ_GET_PRIO(&dev->drvq)))) {
4185 		camq_remove(&dev->sim->devq->alloc_queue,
4186 		    dev->alloc_ccb_entry.pinfo.index);
4187 	}
4188 	/* Remove frozen device from sendq. */
4189 	if (device_is_send_queued(dev) &&
4190 	    cam_ccbq_frozen_top(&dev->ccbq)) {
4191 		camq_remove(&dev->sim->devq->send_queue,
4192 		    dev->send_ccb_entry.pinfo.index);
4193 	}
4194 	return (dev->ccbq.queue.qfrozen_cnt[rl]);
4195 }
4196 
4197 u_int32_t
4198 xpt_freeze_devq(struct cam_path *path, u_int count)
4199 {
4200 
4201 	return (xpt_freeze_devq_rl(path, 0, count));
4202 }
4203 
4204 u_int32_t
4205 xpt_freeze_simq(struct cam_sim *sim, u_int count)
4206 {
4207 
4208 	mtx_assert(sim->mtx, MA_OWNED);
4209 	sim->devq->send_queue.qfrozen_cnt[0] += count;
4210 	return (sim->devq->send_queue.qfrozen_cnt[0]);
4211 }
4212 
4213 static void
4214 xpt_release_devq_timeout(void *arg)
4215 {
4216 	struct cam_ed *device;
4217 
4218 	device = (struct cam_ed *)arg;
4219 
4220 	xpt_release_devq_device(device, /*rl*/0, /*count*/1, /*run_queue*/TRUE);
4221 }
4222 
4223 void
4224 xpt_release_devq(struct cam_path *path, u_int count, int run_queue)
4225 {
4226 	mtx_assert(path->bus->sim->mtx, MA_OWNED);
4227 
4228 	xpt_release_devq_device(path->device, /*rl*/0, count, run_queue);
4229 }
4230 
4231 void
4232 xpt_release_devq_rl(struct cam_path *path, cam_rl rl, u_int count, int run_queue)
4233 {
4234 	mtx_assert(path->bus->sim->mtx, MA_OWNED);
4235 
4236 	xpt_release_devq_device(path->device, rl, count, run_queue);
4237 }
4238 
4239 static void
4240 xpt_release_devq_device(struct cam_ed *dev, cam_rl rl, u_int count, int run_queue)
4241 {
4242 
4243 	if (count > dev->ccbq.queue.qfrozen_cnt[rl]) {
4244 #ifdef INVARIANTS
4245 		printf("xpt_release_devq(%d): requested %u > present %u\n",
4246 		    rl, count, dev->ccbq.queue.qfrozen_cnt[rl]);
4247 #endif
4248 		count = dev->ccbq.queue.qfrozen_cnt[rl];
4249 	}
4250 	dev->sim->devq->alloc_openings -=
4251 	    cam_ccbq_release(&dev->ccbq, rl, count);
4252 	if (cam_ccbq_frozen(&dev->ccbq, CAM_PRIORITY_TO_RL(
4253 	    CAMQ_GET_PRIO(&dev->drvq))) == 0) {
4254 		if (xpt_schedule_dev_allocq(dev->target->bus, dev))
4255 			xpt_run_dev_allocq(dev->target->bus);
4256 	}
4257 	if (cam_ccbq_frozen_top(&dev->ccbq) == 0) {
4258 		/*
4259 		 * No longer need to wait for a successful
4260 		 * command completion.
4261 		 */
4262 		dev->flags &= ~CAM_DEV_REL_ON_COMPLETE;
4263 		/*
4264 		 * Remove any timeouts that might be scheduled
4265 		 * to release this queue.
4266 		 */
4267 		if ((dev->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) {
4268 			callout_stop(&dev->callout);
4269 			dev->flags &= ~CAM_DEV_REL_TIMEOUT_PENDING;
4270 		}
4271 		if (run_queue == 0)
4272 			return;
4273 		/*
4274 		 * Now that we are unfrozen schedule the
4275 		 * device so any pending transactions are
4276 		 * run.
4277 		 */
4278 		if (xpt_schedule_dev_sendq(dev->target->bus, dev))
4279 			xpt_run_dev_sendq(dev->target->bus);
4280 	}
4281 }
4282 
4283 void
4284 xpt_release_simq(struct cam_sim *sim, int run_queue)
4285 {
4286 	struct	camq *sendq;
4287 
4288 	mtx_assert(sim->mtx, MA_OWNED);
4289 	sendq = &(sim->devq->send_queue);
4290 	if (sendq->qfrozen_cnt[0] <= 0) {
4291 #ifdef INVARIANTS
4292 		printf("xpt_release_simq: requested 1 > present %u\n",
4293 		    sendq->qfrozen_cnt[0]);
4294 #endif
4295 	} else
4296 		sendq->qfrozen_cnt[0]--;
4297 	if (sendq->qfrozen_cnt[0] == 0) {
4298 		/*
4299 		 * If there is a timeout scheduled to release this
4300 		 * sim queue, remove it.  The queue frozen count is
4301 		 * already at 0.
4302 		 */
4303 		if ((sim->flags & CAM_SIM_REL_TIMEOUT_PENDING) != 0){
4304 			callout_stop(&sim->callout);
4305 			sim->flags &= ~CAM_SIM_REL_TIMEOUT_PENDING;
4306 		}
4307 		if (run_queue) {
4308 			struct cam_eb *bus;
4309 
4310 			/*
4311 			 * Now that we are unfrozen run the send queue.
4312 			 */
4313 			bus = xpt_find_bus(sim->path_id);
4314 			xpt_run_dev_sendq(bus);
4315 			xpt_release_bus(bus);
4316 		}
4317 	}
4318 }
4319 
4320 /*
4321  * XXX Appears to be unused.
4322  */
4323 static void
4324 xpt_release_simq_timeout(void *arg)
4325 {
4326 	struct cam_sim *sim;
4327 
4328 	sim = (struct cam_sim *)arg;
4329 	xpt_release_simq(sim, /* run_queue */ TRUE);
4330 }
4331 
4332 void
4333 xpt_done(union ccb *done_ccb)
4334 {
4335 	struct cam_sim *sim;
4336 	int	first;
4337 
4338 	CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xpt_done\n"));
4339 	if ((done_ccb->ccb_h.func_code & XPT_FC_QUEUED) != 0) {
4340 		/*
4341 		 * Queue up the request for handling by our SWI handler
4342 		 * any of the "non-immediate" type of ccbs.
4343 		 */
4344 		sim = done_ccb->ccb_h.path->bus->sim;
4345 		TAILQ_INSERT_TAIL(&sim->sim_doneq, &done_ccb->ccb_h,
4346 		    sim_links.tqe);
4347 		done_ccb->ccb_h.pinfo.index = CAM_DONEQ_INDEX;
4348 		if ((sim->flags & (CAM_SIM_ON_DONEQ | CAM_SIM_POLLED |
4349 		    CAM_SIM_BATCH)) == 0) {
4350 			mtx_lock(&cam_simq_lock);
4351 			first = TAILQ_EMPTY(&cam_simq);
4352 			TAILQ_INSERT_TAIL(&cam_simq, sim, links);
4353 			mtx_unlock(&cam_simq_lock);
4354 			sim->flags |= CAM_SIM_ON_DONEQ;
4355 			if (first)
4356 				swi_sched(cambio_ih, 0);
4357 		}
4358 	}
4359 }
4360 
4361 void
4362 xpt_batch_start(struct cam_sim *sim)
4363 {
4364 
4365 	KASSERT((sim->flags & CAM_SIM_BATCH) == 0, ("Batch flag already set"));
4366 	sim->flags |= CAM_SIM_BATCH;
4367 }
4368 
4369 void
4370 xpt_batch_done(struct cam_sim *sim)
4371 {
4372 
4373 	KASSERT((sim->flags & CAM_SIM_BATCH) != 0, ("Batch flag was not set"));
4374 	sim->flags &= ~CAM_SIM_BATCH;
4375 	if (!TAILQ_EMPTY(&sim->sim_doneq) &&
4376 	    (sim->flags & CAM_SIM_ON_DONEQ) == 0)
4377 		camisr_runqueue(&sim->sim_doneq);
4378 }
4379 
4380 union ccb *
4381 xpt_alloc_ccb()
4382 {
4383 	union ccb *new_ccb;
4384 
4385 	new_ccb = malloc(sizeof(*new_ccb), M_CAMCCB, M_ZERO|M_WAITOK);
4386 	return (new_ccb);
4387 }
4388 
4389 union ccb *
4390 xpt_alloc_ccb_nowait()
4391 {
4392 	union ccb *new_ccb;
4393 
4394 	new_ccb = malloc(sizeof(*new_ccb), M_CAMCCB, M_ZERO|M_NOWAIT);
4395 	return (new_ccb);
4396 }
4397 
4398 void
4399 xpt_free_ccb(union ccb *free_ccb)
4400 {
4401 	free(free_ccb, M_CAMCCB);
4402 }
4403 
4404 
4405 
4406 /* Private XPT functions */
4407 
4408 /*
4409  * Get a CAM control block for the caller. Charge the structure to the device
4410  * referenced by the path.  If the this device has no 'credits' then the
4411  * device already has the maximum number of outstanding operations under way
4412  * and we return NULL. If we don't have sufficient resources to allocate more
4413  * ccbs, we also return NULL.
4414  */
4415 static union ccb *
4416 xpt_get_ccb(struct cam_ed *device)
4417 {
4418 	union ccb *new_ccb;
4419 	struct cam_sim *sim;
4420 
4421 	sim = device->sim;
4422 	if ((new_ccb = (union ccb *)SLIST_FIRST(&sim->ccb_freeq)) == NULL) {
4423 		new_ccb = xpt_alloc_ccb_nowait();
4424                 if (new_ccb == NULL) {
4425 			return (NULL);
4426 		}
4427 		if ((sim->flags & CAM_SIM_MPSAFE) == 0)
4428 			callout_handle_init(&new_ccb->ccb_h.timeout_ch);
4429 		SLIST_INSERT_HEAD(&sim->ccb_freeq, &new_ccb->ccb_h,
4430 				  xpt_links.sle);
4431 		sim->ccb_count++;
4432 	}
4433 	cam_ccbq_take_opening(&device->ccbq);
4434 	SLIST_REMOVE_HEAD(&sim->ccb_freeq, xpt_links.sle);
4435 	return (new_ccb);
4436 }
4437 
4438 static void
4439 xpt_release_bus(struct cam_eb *bus)
4440 {
4441 
4442 	mtx_lock(&xsoftc.xpt_topo_lock);
4443 	KASSERT(bus->refcount >= 1, ("bus->refcount >= 1"));
4444 	if ((--bus->refcount == 0)
4445 	 && (TAILQ_FIRST(&bus->et_entries) == NULL)) {
4446 		TAILQ_REMOVE(&xsoftc.xpt_busses, bus, links);
4447 		xsoftc.bus_generation++;
4448 		mtx_unlock(&xsoftc.xpt_topo_lock);
4449 		cam_sim_release(bus->sim);
4450 		free(bus, M_CAMXPT);
4451 	} else
4452 		mtx_unlock(&xsoftc.xpt_topo_lock);
4453 }
4454 
4455 static struct cam_et *
4456 xpt_alloc_target(struct cam_eb *bus, target_id_t target_id)
4457 {
4458 	struct cam_et *target;
4459 
4460 	target = (struct cam_et *)malloc(sizeof(*target), M_CAMXPT,
4461 					 M_NOWAIT|M_ZERO);
4462 	if (target != NULL) {
4463 		struct cam_et *cur_target;
4464 
4465 		TAILQ_INIT(&target->ed_entries);
4466 		target->bus = bus;
4467 		target->target_id = target_id;
4468 		target->refcount = 1;
4469 		target->generation = 0;
4470 		target->luns = NULL;
4471 		timevalclear(&target->last_reset);
4472 		/*
4473 		 * Hold a reference to our parent bus so it
4474 		 * will not go away before we do.
4475 		 */
4476 		mtx_lock(&xsoftc.xpt_topo_lock);
4477 		bus->refcount++;
4478 		mtx_unlock(&xsoftc.xpt_topo_lock);
4479 
4480 		/* Insertion sort into our bus's target list */
4481 		cur_target = TAILQ_FIRST(&bus->et_entries);
4482 		while (cur_target != NULL && cur_target->target_id < target_id)
4483 			cur_target = TAILQ_NEXT(cur_target, links);
4484 
4485 		if (cur_target != NULL) {
4486 			TAILQ_INSERT_BEFORE(cur_target, target, links);
4487 		} else {
4488 			TAILQ_INSERT_TAIL(&bus->et_entries, target, links);
4489 		}
4490 		bus->generation++;
4491 	}
4492 	return (target);
4493 }
4494 
4495 static void
4496 xpt_release_target(struct cam_et *target)
4497 {
4498 
4499 	if (target->refcount == 1) {
4500 		if (TAILQ_FIRST(&target->ed_entries) == NULL) {
4501 			TAILQ_REMOVE(&target->bus->et_entries, target, links);
4502 			target->bus->generation++;
4503 			xpt_release_bus(target->bus);
4504 			if (target->luns)
4505 				free(target->luns, M_CAMXPT);
4506 			free(target, M_CAMXPT);
4507 		}
4508 	} else
4509 		target->refcount--;
4510 }
4511 
4512 static struct cam_ed *
4513 xpt_alloc_device_default(struct cam_eb *bus, struct cam_et *target,
4514 			 lun_id_t lun_id)
4515 {
4516 	struct cam_ed *device, *cur_device;
4517 
4518 	device = xpt_alloc_device(bus, target, lun_id);
4519 	if (device == NULL)
4520 		return (NULL);
4521 
4522 	device->mintags = 1;
4523 	device->maxtags = 1;
4524 	bus->sim->max_ccbs += device->ccbq.devq_openings;
4525 	cur_device = TAILQ_FIRST(&target->ed_entries);
4526 	while (cur_device != NULL && cur_device->lun_id < lun_id)
4527 		cur_device = TAILQ_NEXT(cur_device, links);
4528 	if (cur_device != NULL) {
4529 		TAILQ_INSERT_BEFORE(cur_device, device, links);
4530 	} else {
4531 		TAILQ_INSERT_TAIL(&target->ed_entries, device, links);
4532 	}
4533 	target->generation++;
4534 
4535 	return (device);
4536 }
4537 
4538 struct cam_ed *
4539 xpt_alloc_device(struct cam_eb *bus, struct cam_et *target, lun_id_t lun_id)
4540 {
4541 	struct	   cam_ed *device;
4542 	struct	   cam_devq *devq;
4543 	cam_status status;
4544 
4545 	/* Make space for us in the device queue on our bus */
4546 	devq = bus->sim->devq;
4547 	status = cam_devq_resize(devq, devq->alloc_queue.array_size + 1);
4548 
4549 	if (status != CAM_REQ_CMP) {
4550 		device = NULL;
4551 	} else {
4552 		device = (struct cam_ed *)malloc(sizeof(*device),
4553 						 M_CAMDEV, M_NOWAIT|M_ZERO);
4554 	}
4555 
4556 	if (device != NULL) {
4557 		cam_init_pinfo(&device->alloc_ccb_entry.pinfo);
4558 		device->alloc_ccb_entry.device = device;
4559 		cam_init_pinfo(&device->send_ccb_entry.pinfo);
4560 		device->send_ccb_entry.device = device;
4561 		device->target = target;
4562 		device->lun_id = lun_id;
4563 		device->sim = bus->sim;
4564 		/* Initialize our queues */
4565 		if (camq_init(&device->drvq, 0) != 0) {
4566 			free(device, M_CAMDEV);
4567 			return (NULL);
4568 		}
4569 		if (cam_ccbq_init(&device->ccbq,
4570 				  bus->sim->max_dev_openings) != 0) {
4571 			camq_fini(&device->drvq);
4572 			free(device, M_CAMDEV);
4573 			return (NULL);
4574 		}
4575 		SLIST_INIT(&device->asyncs);
4576 		SLIST_INIT(&device->periphs);
4577 		device->generation = 0;
4578 		device->owner = NULL;
4579 		device->flags = CAM_DEV_UNCONFIGURED;
4580 		device->tag_delay_count = 0;
4581 		device->tag_saved_openings = 0;
4582 		device->refcount = 1;
4583 		callout_init_mtx(&device->callout, bus->sim->mtx, 0);
4584 
4585 		/*
4586 		 * Hold a reference to our parent target so it
4587 		 * will not go away before we do.
4588 		 */
4589 		target->refcount++;
4590 
4591 	}
4592 	return (device);
4593 }
4594 
4595 void
4596 xpt_acquire_device(struct cam_ed *device)
4597 {
4598 
4599 	device->refcount++;
4600 }
4601 
4602 void
4603 xpt_release_device(struct cam_ed *device)
4604 {
4605 
4606 	if (device->refcount == 1) {
4607 		struct cam_devq *devq;
4608 
4609 		if (device->alloc_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX
4610 		 || device->send_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX)
4611 			panic("Removing device while still queued for ccbs");
4612 
4613 		if ((device->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0)
4614 			callout_stop(&device->callout);
4615 
4616 		TAILQ_REMOVE(&device->target->ed_entries, device,links);
4617 		device->target->generation++;
4618 		device->target->bus->sim->max_ccbs -= device->ccbq.devq_openings;
4619 		/* Release our slot in the devq */
4620 		devq = device->target->bus->sim->devq;
4621 		cam_devq_resize(devq, devq->alloc_queue.array_size - 1);
4622 		camq_fini(&device->drvq);
4623 		cam_ccbq_fini(&device->ccbq);
4624 		/*
4625 		 * Free allocated memory.  free(9) does nothing if the
4626 		 * supplied pointer is NULL, so it is safe to call without
4627 		 * checking.
4628 		 */
4629 		free(device->supported_vpds, M_CAMXPT);
4630 		free(device->device_id, M_CAMXPT);
4631 		free(device->physpath, M_CAMXPT);
4632 		free(device->rcap_buf, M_CAMXPT);
4633 		free(device->serial_num, M_CAMXPT);
4634 
4635 		xpt_release_target(device->target);
4636 		free(device, M_CAMDEV);
4637 	} else
4638 		device->refcount--;
4639 }
4640 
4641 u_int32_t
4642 xpt_dev_ccbq_resize(struct cam_path *path, int newopenings)
4643 {
4644 	int	diff;
4645 	int	result;
4646 	struct	cam_ed *dev;
4647 
4648 	dev = path->device;
4649 
4650 	diff = newopenings - (dev->ccbq.dev_active + dev->ccbq.dev_openings);
4651 	result = cam_ccbq_resize(&dev->ccbq, newopenings);
4652 	if (result == CAM_REQ_CMP && (diff < 0)) {
4653 		dev->flags |= CAM_DEV_RESIZE_QUEUE_NEEDED;
4654 	}
4655 	if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
4656 	 || (dev->inq_flags & SID_CmdQue) != 0)
4657 		dev->tag_saved_openings = newopenings;
4658 	/* Adjust the global limit */
4659 	dev->sim->max_ccbs += diff;
4660 	return (result);
4661 }
4662 
4663 static struct cam_eb *
4664 xpt_find_bus(path_id_t path_id)
4665 {
4666 	struct cam_eb *bus;
4667 
4668 	mtx_lock(&xsoftc.xpt_topo_lock);
4669 	for (bus = TAILQ_FIRST(&xsoftc.xpt_busses);
4670 	     bus != NULL;
4671 	     bus = TAILQ_NEXT(bus, links)) {
4672 		if (bus->path_id == path_id) {
4673 			bus->refcount++;
4674 			break;
4675 		}
4676 	}
4677 	mtx_unlock(&xsoftc.xpt_topo_lock);
4678 	return (bus);
4679 }
4680 
4681 static struct cam_et *
4682 xpt_find_target(struct cam_eb *bus, target_id_t	target_id)
4683 {
4684 	struct cam_et *target;
4685 
4686 	for (target = TAILQ_FIRST(&bus->et_entries);
4687 	     target != NULL;
4688 	     target = TAILQ_NEXT(target, links)) {
4689 		if (target->target_id == target_id) {
4690 			target->refcount++;
4691 			break;
4692 		}
4693 	}
4694 	return (target);
4695 }
4696 
4697 static struct cam_ed *
4698 xpt_find_device(struct cam_et *target, lun_id_t lun_id)
4699 {
4700 	struct cam_ed *device;
4701 
4702 	for (device = TAILQ_FIRST(&target->ed_entries);
4703 	     device != NULL;
4704 	     device = TAILQ_NEXT(device, links)) {
4705 		if (device->lun_id == lun_id) {
4706 			device->refcount++;
4707 			break;
4708 		}
4709 	}
4710 	return (device);
4711 }
4712 
4713 void
4714 xpt_start_tags(struct cam_path *path)
4715 {
4716 	struct ccb_relsim crs;
4717 	struct cam_ed *device;
4718 	struct cam_sim *sim;
4719 	int    newopenings;
4720 
4721 	device = path->device;
4722 	sim = path->bus->sim;
4723 	device->flags &= ~CAM_DEV_TAG_AFTER_COUNT;
4724 	xpt_freeze_devq(path, /*count*/1);
4725 	device->inq_flags |= SID_CmdQue;
4726 	if (device->tag_saved_openings != 0)
4727 		newopenings = device->tag_saved_openings;
4728 	else
4729 		newopenings = min(device->maxtags,
4730 				  sim->max_tagged_dev_openings);
4731 	xpt_dev_ccbq_resize(path, newopenings);
4732 	xpt_async(AC_GETDEV_CHANGED, path, NULL);
4733 	xpt_setup_ccb(&crs.ccb_h, path, CAM_PRIORITY_NORMAL);
4734 	crs.ccb_h.func_code = XPT_REL_SIMQ;
4735 	crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY;
4736 	crs.openings
4737 	    = crs.release_timeout
4738 	    = crs.qfrozen_cnt
4739 	    = 0;
4740 	xpt_action((union ccb *)&crs);
4741 }
4742 
4743 void
4744 xpt_stop_tags(struct cam_path *path)
4745 {
4746 	struct ccb_relsim crs;
4747 	struct cam_ed *device;
4748 	struct cam_sim *sim;
4749 
4750 	device = path->device;
4751 	sim = path->bus->sim;
4752 	device->flags &= ~CAM_DEV_TAG_AFTER_COUNT;
4753 	device->tag_delay_count = 0;
4754 	xpt_freeze_devq(path, /*count*/1);
4755 	device->inq_flags &= ~SID_CmdQue;
4756 	xpt_dev_ccbq_resize(path, sim->max_dev_openings);
4757 	xpt_async(AC_GETDEV_CHANGED, path, NULL);
4758 	xpt_setup_ccb(&crs.ccb_h, path, CAM_PRIORITY_NORMAL);
4759 	crs.ccb_h.func_code = XPT_REL_SIMQ;
4760 	crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY;
4761 	crs.openings
4762 	    = crs.release_timeout
4763 	    = crs.qfrozen_cnt
4764 	    = 0;
4765 	xpt_action((union ccb *)&crs);
4766 }
4767 
4768 static void
4769 xpt_boot_delay(void *arg)
4770 {
4771 
4772 	xpt_release_boot();
4773 }
4774 
4775 static void
4776 xpt_config(void *arg)
4777 {
4778 	/*
4779 	 * Now that interrupts are enabled, go find our devices
4780 	 */
4781 
4782 	/* Setup debugging path */
4783 	if (cam_dflags != CAM_DEBUG_NONE) {
4784 		/*
4785 		 * Locking is specifically omitted here.  No SIMs have
4786 		 * registered yet, so xpt_create_path will only be searching
4787 		 * empty lists of targets and devices.
4788 		 */
4789 		if (xpt_create_path(&cam_dpath, xpt_periph,
4790 				    CAM_DEBUG_BUS, CAM_DEBUG_TARGET,
4791 				    CAM_DEBUG_LUN) != CAM_REQ_CMP) {
4792 			printf("xpt_config: xpt_create_path() failed for debug"
4793 			       " target %d:%d:%d, debugging disabled\n",
4794 			       CAM_DEBUG_BUS, CAM_DEBUG_TARGET, CAM_DEBUG_LUN);
4795 			cam_dflags = CAM_DEBUG_NONE;
4796 		}
4797 	} else
4798 		cam_dpath = NULL;
4799 
4800 	periphdriver_init(1);
4801 	xpt_hold_boot();
4802 	callout_init(&xsoftc.boot_callout, 1);
4803 	callout_reset(&xsoftc.boot_callout, hz * xsoftc.boot_delay / 1000,
4804 	    xpt_boot_delay, NULL);
4805 	/* Fire up rescan thread. */
4806 	if (kproc_create(xpt_scanner_thread, NULL, NULL, 0, 0, "xpt_thrd")) {
4807 		printf("xpt_config: failed to create rescan thread.\n");
4808 	}
4809 }
4810 
4811 void
4812 xpt_hold_boot(void)
4813 {
4814 	xpt_lock_buses();
4815 	xsoftc.buses_to_config++;
4816 	xpt_unlock_buses();
4817 }
4818 
4819 void
4820 xpt_release_boot(void)
4821 {
4822 	xpt_lock_buses();
4823 	xsoftc.buses_to_config--;
4824 	if (xsoftc.buses_to_config == 0 && xsoftc.buses_config_done == 0) {
4825 		struct	xpt_task *task;
4826 
4827 		xsoftc.buses_config_done = 1;
4828 		xpt_unlock_buses();
4829 		/* Call manually because we don't have any busses */
4830 		task = malloc(sizeof(struct xpt_task), M_CAMXPT, M_NOWAIT);
4831 		if (task != NULL) {
4832 			TASK_INIT(&task->task, 0, xpt_finishconfig_task, task);
4833 			taskqueue_enqueue(taskqueue_thread, &task->task);
4834 		}
4835 	} else
4836 		xpt_unlock_buses();
4837 }
4838 
4839 /*
4840  * If the given device only has one peripheral attached to it, and if that
4841  * peripheral is the passthrough driver, announce it.  This insures that the
4842  * user sees some sort of announcement for every peripheral in their system.
4843  */
4844 static int
4845 xptpassannouncefunc(struct cam_ed *device, void *arg)
4846 {
4847 	struct cam_periph *periph;
4848 	int i;
4849 
4850 	for (periph = SLIST_FIRST(&device->periphs), i = 0; periph != NULL;
4851 	     periph = SLIST_NEXT(periph, periph_links), i++);
4852 
4853 	periph = SLIST_FIRST(&device->periphs);
4854 	if ((i == 1)
4855 	 && (strncmp(periph->periph_name, "pass", 4) == 0))
4856 		xpt_announce_periph(periph, NULL);
4857 
4858 	return(1);
4859 }
4860 
4861 static void
4862 xpt_finishconfig_task(void *context, int pending)
4863 {
4864 
4865 	periphdriver_init(2);
4866 	/*
4867 	 * Check for devices with no "standard" peripheral driver
4868 	 * attached.  For any devices like that, announce the
4869 	 * passthrough driver so the user will see something.
4870 	 */
4871 	if (!bootverbose)
4872 		xpt_for_all_devices(xptpassannouncefunc, NULL);
4873 
4874 	/* Release our hook so that the boot can continue. */
4875 	config_intrhook_disestablish(xsoftc.xpt_config_hook);
4876 	free(xsoftc.xpt_config_hook, M_CAMXPT);
4877 	xsoftc.xpt_config_hook = NULL;
4878 
4879 	free(context, M_CAMXPT);
4880 }
4881 
4882 cam_status
4883 xpt_register_async(int event, ac_callback_t *cbfunc, void *cbarg,
4884 		   struct cam_path *path)
4885 {
4886 	struct ccb_setasync csa;
4887 	cam_status status;
4888 	int xptpath = 0;
4889 
4890 	if (path == NULL) {
4891 		mtx_lock(&xsoftc.xpt_lock);
4892 		status = xpt_create_path(&path, /*periph*/NULL, CAM_XPT_PATH_ID,
4893 					 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
4894 		if (status != CAM_REQ_CMP) {
4895 			mtx_unlock(&xsoftc.xpt_lock);
4896 			return (status);
4897 		}
4898 		xptpath = 1;
4899 	}
4900 
4901 	xpt_setup_ccb(&csa.ccb_h, path, CAM_PRIORITY_NORMAL);
4902 	csa.ccb_h.func_code = XPT_SASYNC_CB;
4903 	csa.event_enable = event;
4904 	csa.callback = cbfunc;
4905 	csa.callback_arg = cbarg;
4906 	xpt_action((union ccb *)&csa);
4907 	status = csa.ccb_h.status;
4908 
4909 	if (xptpath) {
4910 		xpt_free_path(path);
4911 		mtx_unlock(&xsoftc.xpt_lock);
4912 	}
4913 
4914 	if ((status == CAM_REQ_CMP) &&
4915 	    (csa.event_enable & AC_FOUND_DEVICE)) {
4916 		/*
4917 		 * Get this peripheral up to date with all
4918 		 * the currently existing devices.
4919 		 */
4920 		xpt_for_all_devices(xptsetasyncfunc, &csa);
4921 	}
4922 	if ((status == CAM_REQ_CMP) &&
4923 	    (csa.event_enable & AC_PATH_REGISTERED)) {
4924 		/*
4925 		 * Get this peripheral up to date with all
4926 		 * the currently existing busses.
4927 		 */
4928 		xpt_for_all_busses(xptsetasyncbusfunc, &csa);
4929 	}
4930 
4931 	return (status);
4932 }
4933 
4934 static void
4935 xptaction(struct cam_sim *sim, union ccb *work_ccb)
4936 {
4937 	CAM_DEBUG(work_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xptaction\n"));
4938 
4939 	switch (work_ccb->ccb_h.func_code) {
4940 	/* Common cases first */
4941 	case XPT_PATH_INQ:		/* Path routing inquiry */
4942 	{
4943 		struct ccb_pathinq *cpi;
4944 
4945 		cpi = &work_ccb->cpi;
4946 		cpi->version_num = 1; /* XXX??? */
4947 		cpi->hba_inquiry = 0;
4948 		cpi->target_sprt = 0;
4949 		cpi->hba_misc = 0;
4950 		cpi->hba_eng_cnt = 0;
4951 		cpi->max_target = 0;
4952 		cpi->max_lun = 0;
4953 		cpi->initiator_id = 0;
4954 		strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
4955 		strncpy(cpi->hba_vid, "", HBA_IDLEN);
4956 		strncpy(cpi->dev_name, sim->sim_name, DEV_IDLEN);
4957 		cpi->unit_number = sim->unit_number;
4958 		cpi->bus_id = sim->bus_id;
4959 		cpi->base_transfer_speed = 0;
4960 		cpi->protocol = PROTO_UNSPECIFIED;
4961 		cpi->protocol_version = PROTO_VERSION_UNSPECIFIED;
4962 		cpi->transport = XPORT_UNSPECIFIED;
4963 		cpi->transport_version = XPORT_VERSION_UNSPECIFIED;
4964 		cpi->ccb_h.status = CAM_REQ_CMP;
4965 		xpt_done(work_ccb);
4966 		break;
4967 	}
4968 	default:
4969 		work_ccb->ccb_h.status = CAM_REQ_INVALID;
4970 		xpt_done(work_ccb);
4971 		break;
4972 	}
4973 }
4974 
4975 /*
4976  * The xpt as a "controller" has no interrupt sources, so polling
4977  * is a no-op.
4978  */
4979 static void
4980 xptpoll(struct cam_sim *sim)
4981 {
4982 }
4983 
4984 void
4985 xpt_lock_buses(void)
4986 {
4987 	mtx_lock(&xsoftc.xpt_topo_lock);
4988 }
4989 
4990 void
4991 xpt_unlock_buses(void)
4992 {
4993 	mtx_unlock(&xsoftc.xpt_topo_lock);
4994 }
4995 
4996 static void
4997 camisr(void *dummy)
4998 {
4999 	cam_simq_t queue;
5000 	struct cam_sim *sim;
5001 
5002 	mtx_lock(&cam_simq_lock);
5003 	TAILQ_INIT(&queue);
5004 	while (!TAILQ_EMPTY(&cam_simq)) {
5005 		TAILQ_CONCAT(&queue, &cam_simq, links);
5006 		mtx_unlock(&cam_simq_lock);
5007 
5008 		while ((sim = TAILQ_FIRST(&queue)) != NULL) {
5009 			TAILQ_REMOVE(&queue, sim, links);
5010 			CAM_SIM_LOCK(sim);
5011 			camisr_runqueue(&sim->sim_doneq);
5012 			sim->flags &= ~CAM_SIM_ON_DONEQ;
5013 			CAM_SIM_UNLOCK(sim);
5014 		}
5015 		mtx_lock(&cam_simq_lock);
5016 	}
5017 	mtx_unlock(&cam_simq_lock);
5018 }
5019 
5020 static void
5021 camisr_runqueue(void *V_queue)
5022 {
5023 	cam_isrq_t *queue = V_queue;
5024 	struct	ccb_hdr *ccb_h;
5025 
5026 	while ((ccb_h = TAILQ_FIRST(queue)) != NULL) {
5027 		int	runq;
5028 
5029 		TAILQ_REMOVE(queue, ccb_h, sim_links.tqe);
5030 		ccb_h->pinfo.index = CAM_UNQUEUED_INDEX;
5031 
5032 		CAM_DEBUG(ccb_h->path, CAM_DEBUG_TRACE,
5033 			  ("camisr\n"));
5034 
5035 		runq = FALSE;
5036 
5037 		if (ccb_h->flags & CAM_HIGH_POWER) {
5038 			struct highpowerlist	*hphead;
5039 			union ccb		*send_ccb;
5040 
5041 			mtx_lock(&xsoftc.xpt_lock);
5042 			hphead = &xsoftc.highpowerq;
5043 
5044 			send_ccb = (union ccb *)STAILQ_FIRST(hphead);
5045 
5046 			/*
5047 			 * Increment the count since this command is done.
5048 			 */
5049 			xsoftc.num_highpower++;
5050 
5051 			/*
5052 			 * Any high powered commands queued up?
5053 			 */
5054 			if (send_ccb != NULL) {
5055 
5056 				STAILQ_REMOVE_HEAD(hphead, xpt_links.stqe);
5057 				mtx_unlock(&xsoftc.xpt_lock);
5058 
5059 				xpt_release_devq(send_ccb->ccb_h.path,
5060 						 /*count*/1, /*runqueue*/TRUE);
5061 			} else
5062 				mtx_unlock(&xsoftc.xpt_lock);
5063 		}
5064 
5065 		if ((ccb_h->func_code & XPT_FC_USER_CCB) == 0) {
5066 			struct cam_ed *dev;
5067 
5068 			dev = ccb_h->path->device;
5069 
5070 			cam_ccbq_ccb_done(&dev->ccbq, (union ccb *)ccb_h);
5071 			ccb_h->path->bus->sim->devq->send_active--;
5072 			ccb_h->path->bus->sim->devq->send_openings++;
5073 			runq = TRUE;
5074 
5075 			if (((dev->flags & CAM_DEV_REL_ON_QUEUE_EMPTY) != 0
5076 			  && (dev->ccbq.dev_active == 0))) {
5077 				dev->flags &= ~CAM_DEV_REL_ON_QUEUE_EMPTY;
5078 				xpt_release_devq(ccb_h->path, /*count*/1,
5079 						 /*run_queue*/FALSE);
5080 			}
5081 
5082 			if (((dev->flags & CAM_DEV_REL_ON_COMPLETE) != 0
5083 			  && (ccb_h->status&CAM_STATUS_MASK) != CAM_REQUEUE_REQ)) {
5084 				dev->flags &= ~CAM_DEV_REL_ON_COMPLETE;
5085 				xpt_release_devq(ccb_h->path, /*count*/1,
5086 						 /*run_queue*/FALSE);
5087 			}
5088 
5089 			if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
5090 			 && (--dev->tag_delay_count == 0))
5091 				xpt_start_tags(ccb_h->path);
5092 			if (!device_is_send_queued(dev)) {
5093 				(void)xpt_schedule_dev_sendq(ccb_h->path->bus,
5094 							     dev);
5095 			}
5096 		}
5097 
5098 		if (ccb_h->status & CAM_RELEASE_SIMQ) {
5099 			xpt_release_simq(ccb_h->path->bus->sim,
5100 					 /*run_queue*/TRUE);
5101 			ccb_h->status &= ~CAM_RELEASE_SIMQ;
5102 			runq = FALSE;
5103 		}
5104 
5105 		if ((ccb_h->flags & CAM_DEV_QFRZDIS)
5106 		 && (ccb_h->status & CAM_DEV_QFRZN)) {
5107 			xpt_release_devq(ccb_h->path, /*count*/1,
5108 					 /*run_queue*/TRUE);
5109 			ccb_h->status &= ~CAM_DEV_QFRZN;
5110 		} else if (runq) {
5111 			xpt_run_dev_sendq(ccb_h->path->bus);
5112 		}
5113 
5114 		/* Call the peripheral driver's callback */
5115 		(*ccb_h->cbfcnp)(ccb_h->path->periph, (union ccb *)ccb_h);
5116 	}
5117 }
5118