xref: /freebsd/sys/cam/cam_xpt.c (revision 4ed925457ab06e83238a5db33e89ccc94b99a713)
1 /*-
2  * Implementation of the Common Access Method Transport (XPT) layer.
3  *
4  * Copyright (c) 1997, 1998, 1999 Justin T. Gibbs.
5  * Copyright (c) 1997, 1998, 1999 Kenneth D. Merry.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions, and the following disclaimer,
13  *    without modification, immediately at the beginning of the file.
14  * 2. The name of the author may not be used to endorse or promote products
15  *    derived from this software without specific prior written permission.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
21  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  */
29 
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
32 
33 #include <sys/param.h>
34 #include <sys/bus.h>
35 #include <sys/systm.h>
36 #include <sys/types.h>
37 #include <sys/malloc.h>
38 #include <sys/kernel.h>
39 #include <sys/time.h>
40 #include <sys/conf.h>
41 #include <sys/fcntl.h>
42 #include <sys/reboot.h>
43 #include <sys/interrupt.h>
44 #include <sys/sbuf.h>
45 #include <sys/taskqueue.h>
46 
47 #include <sys/lock.h>
48 #include <sys/mutex.h>
49 #include <sys/sysctl.h>
50 #include <sys/kthread.h>
51 
52 #ifdef PC98
53 #include <pc98/pc98/pc98_machdep.h>	/* geometry translation */
54 #endif
55 
56 #include <cam/cam.h>
57 #include <cam/cam_ccb.h>
58 #include <cam/cam_periph.h>
59 #include <cam/cam_queue.h>
60 #include <cam/cam_sim.h>
61 #include <cam/cam_xpt.h>
62 #include <cam/cam_xpt_sim.h>
63 #include <cam/cam_xpt_periph.h>
64 #include <cam/cam_xpt_internal.h>
65 #include <cam/cam_debug.h>
66 
67 #include <cam/scsi/scsi_all.h>
68 #include <cam/scsi/scsi_message.h>
69 #include <cam/scsi/scsi_pass.h>
70 #include <machine/stdarg.h>	/* for xpt_print below */
71 #include "opt_cam.h"
72 
73 /*
74  * This is the maximum number of high powered commands (e.g. start unit)
75  * that can be outstanding at a particular time.
76  */
77 #ifndef CAM_MAX_HIGHPOWER
78 #define CAM_MAX_HIGHPOWER  4
79 #endif
80 
81 /* Datastructures internal to the xpt layer */
82 MALLOC_DEFINE(M_CAMXPT, "CAM XPT", "CAM XPT buffers");
83 
84 /* Object for defering XPT actions to a taskqueue */
85 struct xpt_task {
86 	struct task	task;
87 	void		*data1;
88 	uintptr_t	data2;
89 };
90 
91 typedef enum {
92 	XPT_FLAG_OPEN		= 0x01
93 } xpt_flags;
94 
95 struct xpt_softc {
96 	xpt_flags		flags;
97 	u_int32_t		xpt_generation;
98 
99 	/* number of high powered commands that can go through right now */
100 	STAILQ_HEAD(highpowerlist, ccb_hdr)	highpowerq;
101 	int			num_highpower;
102 
103 	/* queue for handling async rescan requests. */
104 	TAILQ_HEAD(, ccb_hdr) ccb_scanq;
105 	int buses_to_config;
106 	int buses_config_done;
107 
108 	/* Registered busses */
109 	TAILQ_HEAD(,cam_eb)	xpt_busses;
110 	u_int			bus_generation;
111 
112 	struct intr_config_hook	*xpt_config_hook;
113 
114 	int			boot_delay;
115 	struct callout 		boot_callout;
116 
117 	struct mtx		xpt_topo_lock;
118 	struct mtx		xpt_lock;
119 };
120 
121 typedef enum {
122 	DM_RET_COPY		= 0x01,
123 	DM_RET_FLAG_MASK	= 0x0f,
124 	DM_RET_NONE		= 0x00,
125 	DM_RET_STOP		= 0x10,
126 	DM_RET_DESCEND		= 0x20,
127 	DM_RET_ERROR		= 0x30,
128 	DM_RET_ACTION_MASK	= 0xf0
129 } dev_match_ret;
130 
131 typedef enum {
132 	XPT_DEPTH_BUS,
133 	XPT_DEPTH_TARGET,
134 	XPT_DEPTH_DEVICE,
135 	XPT_DEPTH_PERIPH
136 } xpt_traverse_depth;
137 
138 struct xpt_traverse_config {
139 	xpt_traverse_depth	depth;
140 	void			*tr_func;
141 	void			*tr_arg;
142 };
143 
144 typedef	int	xpt_busfunc_t (struct cam_eb *bus, void *arg);
145 typedef	int	xpt_targetfunc_t (struct cam_et *target, void *arg);
146 typedef	int	xpt_devicefunc_t (struct cam_ed *device, void *arg);
147 typedef	int	xpt_periphfunc_t (struct cam_periph *periph, void *arg);
148 typedef int	xpt_pdrvfunc_t (struct periph_driver **pdrv, void *arg);
149 
150 /* Transport layer configuration information */
151 static struct xpt_softc xsoftc;
152 
153 TUNABLE_INT("kern.cam.boot_delay", &xsoftc.boot_delay);
154 SYSCTL_INT(_kern_cam, OID_AUTO, boot_delay, CTLFLAG_RDTUN,
155            &xsoftc.boot_delay, 0, "Bus registration wait time");
156 static int	xpt_power_down = 0;
157 TUNABLE_INT("kern.cam.power_down", &xpt_power_down);
158 SYSCTL_INT(_kern_cam, OID_AUTO, power_down, CTLFLAG_RW,
159            &xpt_power_down, 0, "Power down devices on shutdown");
160 
161 /* Queues for our software interrupt handler */
162 typedef TAILQ_HEAD(cam_isrq, ccb_hdr) cam_isrq_t;
163 typedef TAILQ_HEAD(cam_simq, cam_sim) cam_simq_t;
164 static cam_simq_t cam_simq;
165 static struct mtx cam_simq_lock;
166 
167 /* Pointers to software interrupt handlers */
168 static void *cambio_ih;
169 
170 struct cam_periph *xpt_periph;
171 
172 static periph_init_t xpt_periph_init;
173 
174 static struct periph_driver xpt_driver =
175 {
176 	xpt_periph_init, "xpt",
177 	TAILQ_HEAD_INITIALIZER(xpt_driver.units), /* generation */ 0,
178 	CAM_PERIPH_DRV_EARLY
179 };
180 
181 PERIPHDRIVER_DECLARE(xpt, xpt_driver);
182 
183 static d_open_t xptopen;
184 static d_close_t xptclose;
185 static d_ioctl_t xptioctl;
186 
187 static struct cdevsw xpt_cdevsw = {
188 	.d_version =	D_VERSION,
189 	.d_flags =	0,
190 	.d_open =	xptopen,
191 	.d_close =	xptclose,
192 	.d_ioctl =	xptioctl,
193 	.d_name =	"xpt",
194 };
195 
196 /* Storage for debugging datastructures */
197 #ifdef	CAMDEBUG
198 struct cam_path *cam_dpath;
199 u_int32_t cam_dflags;
200 u_int32_t cam_debug_delay;
201 #endif
202 
203 /* Our boot-time initialization hook */
204 static int cam_module_event_handler(module_t, int /*modeventtype_t*/, void *);
205 
206 static moduledata_t cam_moduledata = {
207 	"cam",
208 	cam_module_event_handler,
209 	NULL
210 };
211 
212 static int	xpt_init(void *);
213 
214 DECLARE_MODULE(cam, cam_moduledata, SI_SUB_CONFIGURE, SI_ORDER_SECOND);
215 MODULE_VERSION(cam, 1);
216 
217 
218 static void		xpt_async_bcast(struct async_list *async_head,
219 					u_int32_t async_code,
220 					struct cam_path *path,
221 					void *async_arg);
222 static path_id_t xptnextfreepathid(void);
223 static path_id_t xptpathid(const char *sim_name, int sim_unit, int sim_bus);
224 static union ccb *xpt_get_ccb(struct cam_ed *device);
225 static void	 xpt_run_dev_allocq(struct cam_eb *bus);
226 static void	 xpt_run_dev_sendq(struct cam_eb *bus);
227 static timeout_t xpt_release_devq_timeout;
228 static void	 xpt_release_simq_timeout(void *arg) __unused;
229 static void	 xpt_release_bus(struct cam_eb *bus);
230 static void	 xpt_release_devq_device(struct cam_ed *dev, cam_rl rl,
231 		    u_int count, int run_queue);
232 static struct cam_et*
233 		 xpt_alloc_target(struct cam_eb *bus, target_id_t target_id);
234 static void	 xpt_release_target(struct cam_et *target);
235 static struct cam_eb*
236 		 xpt_find_bus(path_id_t path_id);
237 static struct cam_et*
238 		 xpt_find_target(struct cam_eb *bus, target_id_t target_id);
239 static struct cam_ed*
240 		 xpt_find_device(struct cam_et *target, lun_id_t lun_id);
241 static void	 xpt_config(void *arg);
242 static xpt_devicefunc_t xptpassannouncefunc;
243 static void	 xpt_shutdown(void *arg, int howto);
244 static void	 xptaction(struct cam_sim *sim, union ccb *work_ccb);
245 static void	 xptpoll(struct cam_sim *sim);
246 static void	 camisr(void *);
247 static void	 camisr_runqueue(void *);
248 static dev_match_ret	xptbusmatch(struct dev_match_pattern *patterns,
249 				    u_int num_patterns, struct cam_eb *bus);
250 static dev_match_ret	xptdevicematch(struct dev_match_pattern *patterns,
251 				       u_int num_patterns,
252 				       struct cam_ed *device);
253 static dev_match_ret	xptperiphmatch(struct dev_match_pattern *patterns,
254 				       u_int num_patterns,
255 				       struct cam_periph *periph);
256 static xpt_busfunc_t	xptedtbusfunc;
257 static xpt_targetfunc_t	xptedttargetfunc;
258 static xpt_devicefunc_t	xptedtdevicefunc;
259 static xpt_periphfunc_t	xptedtperiphfunc;
260 static xpt_pdrvfunc_t	xptplistpdrvfunc;
261 static xpt_periphfunc_t	xptplistperiphfunc;
262 static int		xptedtmatch(struct ccb_dev_match *cdm);
263 static int		xptperiphlistmatch(struct ccb_dev_match *cdm);
264 static int		xptbustraverse(struct cam_eb *start_bus,
265 				       xpt_busfunc_t *tr_func, void *arg);
266 static int		xpttargettraverse(struct cam_eb *bus,
267 					  struct cam_et *start_target,
268 					  xpt_targetfunc_t *tr_func, void *arg);
269 static int		xptdevicetraverse(struct cam_et *target,
270 					  struct cam_ed *start_device,
271 					  xpt_devicefunc_t *tr_func, void *arg);
272 static int		xptperiphtraverse(struct cam_ed *device,
273 					  struct cam_periph *start_periph,
274 					  xpt_periphfunc_t *tr_func, void *arg);
275 static int		xptpdrvtraverse(struct periph_driver **start_pdrv,
276 					xpt_pdrvfunc_t *tr_func, void *arg);
277 static int		xptpdperiphtraverse(struct periph_driver **pdrv,
278 					    struct cam_periph *start_periph,
279 					    xpt_periphfunc_t *tr_func,
280 					    void *arg);
281 static xpt_busfunc_t	xptdefbusfunc;
282 static xpt_targetfunc_t	xptdeftargetfunc;
283 static xpt_devicefunc_t	xptdefdevicefunc;
284 static xpt_periphfunc_t	xptdefperiphfunc;
285 static void		xpt_finishconfig_task(void *context, int pending);
286 static int		xpt_for_all_busses(xpt_busfunc_t *tr_func, void *arg);
287 static int		xpt_for_all_devices(xpt_devicefunc_t *tr_func,
288 					    void *arg);
289 static void		xpt_dev_async_default(u_int32_t async_code,
290 					      struct cam_eb *bus,
291 					      struct cam_et *target,
292 					      struct cam_ed *device,
293 					      void *async_arg);
294 static struct cam_ed *	xpt_alloc_device_default(struct cam_eb *bus,
295 						 struct cam_et *target,
296 						 lun_id_t lun_id);
297 static xpt_devicefunc_t	xptsetasyncfunc;
298 static xpt_busfunc_t	xptsetasyncbusfunc;
299 static cam_status	xptregister(struct cam_periph *periph,
300 				    void *arg);
301 static __inline int periph_is_queued(struct cam_periph *periph);
302 static __inline int device_is_alloc_queued(struct cam_ed *device);
303 static __inline int device_is_send_queued(struct cam_ed *device);
304 
305 static __inline int
306 xpt_schedule_dev_allocq(struct cam_eb *bus, struct cam_ed *dev)
307 {
308 	int retval;
309 
310 	if ((dev->drvq.entries > 0) &&
311 	    (dev->ccbq.devq_openings > 0) &&
312 	    (cam_ccbq_frozen(&dev->ccbq, CAM_PRIORITY_TO_RL(
313 		CAMQ_GET_PRIO(&dev->drvq))) == 0)) {
314 		/*
315 		 * The priority of a device waiting for CCB resources
316 		 * is that of the the highest priority peripheral driver
317 		 * enqueued.
318 		 */
319 		retval = xpt_schedule_dev(&bus->sim->devq->alloc_queue,
320 					  &dev->alloc_ccb_entry.pinfo,
321 					  CAMQ_GET_PRIO(&dev->drvq));
322 	} else {
323 		retval = 0;
324 	}
325 
326 	return (retval);
327 }
328 
329 static __inline int
330 xpt_schedule_dev_sendq(struct cam_eb *bus, struct cam_ed *dev)
331 {
332 	int	retval;
333 
334 	if ((dev->ccbq.queue.entries > 0) &&
335 	    (dev->ccbq.dev_openings > 0) &&
336 	    (cam_ccbq_frozen_top(&dev->ccbq) == 0)) {
337 		/*
338 		 * The priority of a device waiting for controller
339 		 * resources is that of the the highest priority CCB
340 		 * enqueued.
341 		 */
342 		retval =
343 		    xpt_schedule_dev(&bus->sim->devq->send_queue,
344 				     &dev->send_ccb_entry.pinfo,
345 				     CAMQ_GET_PRIO(&dev->ccbq.queue));
346 	} else {
347 		retval = 0;
348 	}
349 	return (retval);
350 }
351 
352 static __inline int
353 periph_is_queued(struct cam_periph *periph)
354 {
355 	return (periph->pinfo.index != CAM_UNQUEUED_INDEX);
356 }
357 
358 static __inline int
359 device_is_alloc_queued(struct cam_ed *device)
360 {
361 	return (device->alloc_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX);
362 }
363 
364 static __inline int
365 device_is_send_queued(struct cam_ed *device)
366 {
367 	return (device->send_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX);
368 }
369 
370 static void
371 xpt_periph_init()
372 {
373 	make_dev(&xpt_cdevsw, 0, UID_ROOT, GID_OPERATOR, 0600, "xpt0");
374 }
375 
376 static void
377 xptdone(struct cam_periph *periph, union ccb *done_ccb)
378 {
379 	/* Caller will release the CCB */
380 	wakeup(&done_ccb->ccb_h.cbfcnp);
381 }
382 
383 static int
384 xptopen(struct cdev *dev, int flags, int fmt, struct thread *td)
385 {
386 
387 	/*
388 	 * Only allow read-write access.
389 	 */
390 	if (((flags & FWRITE) == 0) || ((flags & FREAD) == 0))
391 		return(EPERM);
392 
393 	/*
394 	 * We don't allow nonblocking access.
395 	 */
396 	if ((flags & O_NONBLOCK) != 0) {
397 		printf("%s: can't do nonblocking access\n", devtoname(dev));
398 		return(ENODEV);
399 	}
400 
401 	/* Mark ourselves open */
402 	mtx_lock(&xsoftc.xpt_lock);
403 	xsoftc.flags |= XPT_FLAG_OPEN;
404 	mtx_unlock(&xsoftc.xpt_lock);
405 
406 	return(0);
407 }
408 
409 static int
410 xptclose(struct cdev *dev, int flag, int fmt, struct thread *td)
411 {
412 
413 	/* Mark ourselves closed */
414 	mtx_lock(&xsoftc.xpt_lock);
415 	xsoftc.flags &= ~XPT_FLAG_OPEN;
416 	mtx_unlock(&xsoftc.xpt_lock);
417 
418 	return(0);
419 }
420 
421 /*
422  * Don't automatically grab the xpt softc lock here even though this is going
423  * through the xpt device.  The xpt device is really just a back door for
424  * accessing other devices and SIMs, so the right thing to do is to grab
425  * the appropriate SIM lock once the bus/SIM is located.
426  */
427 static int
428 xptioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td)
429 {
430 	int error;
431 
432 	error = 0;
433 
434 	switch(cmd) {
435 	/*
436 	 * For the transport layer CAMIOCOMMAND ioctl, we really only want
437 	 * to accept CCB types that don't quite make sense to send through a
438 	 * passthrough driver. XPT_PATH_INQ is an exception to this, as stated
439 	 * in the CAM spec.
440 	 */
441 	case CAMIOCOMMAND: {
442 		union ccb *ccb;
443 		union ccb *inccb;
444 		struct cam_eb *bus;
445 
446 		inccb = (union ccb *)addr;
447 
448 		bus = xpt_find_bus(inccb->ccb_h.path_id);
449 		if (bus == NULL) {
450 			error = EINVAL;
451 			break;
452 		}
453 
454 		switch(inccb->ccb_h.func_code) {
455 		case XPT_SCAN_BUS:
456 		case XPT_RESET_BUS:
457 			if ((inccb->ccb_h.target_id != CAM_TARGET_WILDCARD)
458 			 || (inccb->ccb_h.target_lun != CAM_LUN_WILDCARD)) {
459 				error = EINVAL;
460 				break;
461 			}
462 			/* FALLTHROUGH */
463 		case XPT_PATH_INQ:
464 		case XPT_ENG_INQ:
465 		case XPT_SCAN_LUN:
466 
467 			ccb = xpt_alloc_ccb();
468 
469 			CAM_SIM_LOCK(bus->sim);
470 			/* Ensure passed in target/lun supported on this bus. */
471 			if ((inccb->ccb_h.target_id != CAM_TARGET_WILDCARD) ||
472 			    (inccb->ccb_h.target_lun != CAM_LUN_WILDCARD)) {
473 				if (xpt_create_path(&ccb->ccb_h.path,
474 					    xpt_periph,
475 					    inccb->ccb_h.path_id,
476 					    CAM_TARGET_WILDCARD,
477 					    CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
478 					error = EINVAL;
479 					CAM_SIM_UNLOCK(bus->sim);
480 					xpt_free_ccb(ccb);
481 					break;
482 				}
483 				xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path,
484 				    inccb->ccb_h.pinfo.priority);
485 				ccb->ccb_h.func_code = XPT_PATH_INQ;
486 				xpt_action(ccb);
487 				xpt_free_path(ccb->ccb_h.path);
488 				if ((inccb->ccb_h.target_id != CAM_TARGET_WILDCARD &&
489 				    inccb->ccb_h.target_id > ccb->cpi.max_target) ||
490 				    (inccb->ccb_h.target_lun != CAM_LUN_WILDCARD &&
491 				    inccb->ccb_h.target_lun > ccb->cpi.max_lun)) {
492 					error = EINVAL;
493 					CAM_SIM_UNLOCK(bus->sim);
494 					xpt_free_ccb(ccb);
495 					break;
496 				}
497 			}
498 			/*
499 			 * Create a path using the bus, target, and lun the
500 			 * user passed in.
501 			 */
502 			if (xpt_create_path(&ccb->ccb_h.path, xpt_periph,
503 					    inccb->ccb_h.path_id,
504 					    inccb->ccb_h.target_id,
505 					    inccb->ccb_h.target_lun) !=
506 					    CAM_REQ_CMP){
507 				error = EINVAL;
508 				CAM_SIM_UNLOCK(bus->sim);
509 				xpt_free_ccb(ccb);
510 				break;
511 			}
512 			/* Ensure all of our fields are correct */
513 			xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path,
514 				      inccb->ccb_h.pinfo.priority);
515 			xpt_merge_ccb(ccb, inccb);
516 			ccb->ccb_h.cbfcnp = xptdone;
517 			cam_periph_runccb(ccb, NULL, 0, 0, NULL);
518 			bcopy(ccb, inccb, sizeof(union ccb));
519 			xpt_free_path(ccb->ccb_h.path);
520 			xpt_free_ccb(ccb);
521 			CAM_SIM_UNLOCK(bus->sim);
522 			break;
523 
524 		case XPT_DEBUG: {
525 			union ccb ccb;
526 
527 			/*
528 			 * This is an immediate CCB, so it's okay to
529 			 * allocate it on the stack.
530 			 */
531 
532 			CAM_SIM_LOCK(bus->sim);
533 
534 			/*
535 			 * Create a path using the bus, target, and lun the
536 			 * user passed in.
537 			 */
538 			if (xpt_create_path(&ccb.ccb_h.path, xpt_periph,
539 					    inccb->ccb_h.path_id,
540 					    inccb->ccb_h.target_id,
541 					    inccb->ccb_h.target_lun) !=
542 					    CAM_REQ_CMP){
543 				error = EINVAL;
544 				CAM_SIM_UNLOCK(bus->sim);
545 				break;
546 			}
547 			/* Ensure all of our fields are correct */
548 			xpt_setup_ccb(&ccb.ccb_h, ccb.ccb_h.path,
549 				      inccb->ccb_h.pinfo.priority);
550 			xpt_merge_ccb(&ccb, inccb);
551 			ccb.ccb_h.cbfcnp = xptdone;
552 			xpt_action(&ccb);
553 			CAM_SIM_UNLOCK(bus->sim);
554 			bcopy(&ccb, inccb, sizeof(union ccb));
555 			xpt_free_path(ccb.ccb_h.path);
556 			break;
557 
558 		}
559 		case XPT_DEV_MATCH: {
560 			struct cam_periph_map_info mapinfo;
561 			struct cam_path *old_path;
562 
563 			/*
564 			 * We can't deal with physical addresses for this
565 			 * type of transaction.
566 			 */
567 			if (inccb->ccb_h.flags & CAM_DATA_PHYS) {
568 				error = EINVAL;
569 				break;
570 			}
571 
572 			/*
573 			 * Save this in case the caller had it set to
574 			 * something in particular.
575 			 */
576 			old_path = inccb->ccb_h.path;
577 
578 			/*
579 			 * We really don't need a path for the matching
580 			 * code.  The path is needed because of the
581 			 * debugging statements in xpt_action().  They
582 			 * assume that the CCB has a valid path.
583 			 */
584 			inccb->ccb_h.path = xpt_periph->path;
585 
586 			bzero(&mapinfo, sizeof(mapinfo));
587 
588 			/*
589 			 * Map the pattern and match buffers into kernel
590 			 * virtual address space.
591 			 */
592 			error = cam_periph_mapmem(inccb, &mapinfo);
593 
594 			if (error) {
595 				inccb->ccb_h.path = old_path;
596 				break;
597 			}
598 
599 			/*
600 			 * This is an immediate CCB, we can send it on directly.
601 			 */
602 			xpt_action(inccb);
603 
604 			/*
605 			 * Map the buffers back into user space.
606 			 */
607 			cam_periph_unmapmem(inccb, &mapinfo);
608 
609 			inccb->ccb_h.path = old_path;
610 
611 			error = 0;
612 			break;
613 		}
614 		default:
615 			error = ENOTSUP;
616 			break;
617 		}
618 		xpt_release_bus(bus);
619 		break;
620 	}
621 	/*
622 	 * This is the getpassthru ioctl. It takes a XPT_GDEVLIST ccb as input,
623 	 * with the periphal driver name and unit name filled in.  The other
624 	 * fields don't really matter as input.  The passthrough driver name
625 	 * ("pass"), and unit number are passed back in the ccb.  The current
626 	 * device generation number, and the index into the device peripheral
627 	 * driver list, and the status are also passed back.  Note that
628 	 * since we do everything in one pass, unlike the XPT_GDEVLIST ccb,
629 	 * we never return a status of CAM_GDEVLIST_LIST_CHANGED.  It is
630 	 * (or rather should be) impossible for the device peripheral driver
631 	 * list to change since we look at the whole thing in one pass, and
632 	 * we do it with lock protection.
633 	 *
634 	 */
635 	case CAMGETPASSTHRU: {
636 		union ccb *ccb;
637 		struct cam_periph *periph;
638 		struct periph_driver **p_drv;
639 		char   *name;
640 		u_int unit;
641 		u_int cur_generation;
642 		int base_periph_found;
643 		int splbreaknum;
644 
645 		ccb = (union ccb *)addr;
646 		unit = ccb->cgdl.unit_number;
647 		name = ccb->cgdl.periph_name;
648 		/*
649 		 * Every 100 devices, we want to drop our lock protection to
650 		 * give the software interrupt handler a chance to run.
651 		 * Most systems won't run into this check, but this should
652 		 * avoid starvation in the software interrupt handler in
653 		 * large systems.
654 		 */
655 		splbreaknum = 100;
656 
657 		ccb = (union ccb *)addr;
658 
659 		base_periph_found = 0;
660 
661 		/*
662 		 * Sanity check -- make sure we don't get a null peripheral
663 		 * driver name.
664 		 */
665 		if (*ccb->cgdl.periph_name == '\0') {
666 			error = EINVAL;
667 			break;
668 		}
669 
670 		/* Keep the list from changing while we traverse it */
671 		mtx_lock(&xsoftc.xpt_topo_lock);
672 ptstartover:
673 		cur_generation = xsoftc.xpt_generation;
674 
675 		/* first find our driver in the list of drivers */
676 		for (p_drv = periph_drivers; *p_drv != NULL; p_drv++)
677 			if (strcmp((*p_drv)->driver_name, name) == 0)
678 				break;
679 
680 		if (*p_drv == NULL) {
681 			mtx_unlock(&xsoftc.xpt_topo_lock);
682 			ccb->ccb_h.status = CAM_REQ_CMP_ERR;
683 			ccb->cgdl.status = CAM_GDEVLIST_ERROR;
684 			*ccb->cgdl.periph_name = '\0';
685 			ccb->cgdl.unit_number = 0;
686 			error = ENOENT;
687 			break;
688 		}
689 
690 		/*
691 		 * Run through every peripheral instance of this driver
692 		 * and check to see whether it matches the unit passed
693 		 * in by the user.  If it does, get out of the loops and
694 		 * find the passthrough driver associated with that
695 		 * peripheral driver.
696 		 */
697 		for (periph = TAILQ_FIRST(&(*p_drv)->units); periph != NULL;
698 		     periph = TAILQ_NEXT(periph, unit_links)) {
699 
700 			if (periph->unit_number == unit) {
701 				break;
702 			} else if (--splbreaknum == 0) {
703 				mtx_unlock(&xsoftc.xpt_topo_lock);
704 				mtx_lock(&xsoftc.xpt_topo_lock);
705 				splbreaknum = 100;
706 				if (cur_generation != xsoftc.xpt_generation)
707 				       goto ptstartover;
708 			}
709 		}
710 		/*
711 		 * If we found the peripheral driver that the user passed
712 		 * in, go through all of the peripheral drivers for that
713 		 * particular device and look for a passthrough driver.
714 		 */
715 		if (periph != NULL) {
716 			struct cam_ed *device;
717 			int i;
718 
719 			base_periph_found = 1;
720 			device = periph->path->device;
721 			for (i = 0, periph = SLIST_FIRST(&device->periphs);
722 			     periph != NULL;
723 			     periph = SLIST_NEXT(periph, periph_links), i++) {
724 				/*
725 				 * Check to see whether we have a
726 				 * passthrough device or not.
727 				 */
728 				if (strcmp(periph->periph_name, "pass") == 0) {
729 					/*
730 					 * Fill in the getdevlist fields.
731 					 */
732 					strcpy(ccb->cgdl.periph_name,
733 					       periph->periph_name);
734 					ccb->cgdl.unit_number =
735 						periph->unit_number;
736 					if (SLIST_NEXT(periph, periph_links))
737 						ccb->cgdl.status =
738 							CAM_GDEVLIST_MORE_DEVS;
739 					else
740 						ccb->cgdl.status =
741 						       CAM_GDEVLIST_LAST_DEVICE;
742 					ccb->cgdl.generation =
743 						device->generation;
744 					ccb->cgdl.index = i;
745 					/*
746 					 * Fill in some CCB header fields
747 					 * that the user may want.
748 					 */
749 					ccb->ccb_h.path_id =
750 						periph->path->bus->path_id;
751 					ccb->ccb_h.target_id =
752 						periph->path->target->target_id;
753 					ccb->ccb_h.target_lun =
754 						periph->path->device->lun_id;
755 					ccb->ccb_h.status = CAM_REQ_CMP;
756 					break;
757 				}
758 			}
759 		}
760 
761 		/*
762 		 * If the periph is null here, one of two things has
763 		 * happened.  The first possibility is that we couldn't
764 		 * find the unit number of the particular peripheral driver
765 		 * that the user is asking about.  e.g. the user asks for
766 		 * the passthrough driver for "da11".  We find the list of
767 		 * "da" peripherals all right, but there is no unit 11.
768 		 * The other possibility is that we went through the list
769 		 * of peripheral drivers attached to the device structure,
770 		 * but didn't find one with the name "pass".  Either way,
771 		 * we return ENOENT, since we couldn't find something.
772 		 */
773 		if (periph == NULL) {
774 			ccb->ccb_h.status = CAM_REQ_CMP_ERR;
775 			ccb->cgdl.status = CAM_GDEVLIST_ERROR;
776 			*ccb->cgdl.periph_name = '\0';
777 			ccb->cgdl.unit_number = 0;
778 			error = ENOENT;
779 			/*
780 			 * It is unfortunate that this is even necessary,
781 			 * but there are many, many clueless users out there.
782 			 * If this is true, the user is looking for the
783 			 * passthrough driver, but doesn't have one in his
784 			 * kernel.
785 			 */
786 			if (base_periph_found == 1) {
787 				printf("xptioctl: pass driver is not in the "
788 				       "kernel\n");
789 				printf("xptioctl: put \"device pass\" in "
790 				       "your kernel config file\n");
791 			}
792 		}
793 		mtx_unlock(&xsoftc.xpt_topo_lock);
794 		break;
795 		}
796 	default:
797 		error = ENOTTY;
798 		break;
799 	}
800 
801 	return(error);
802 }
803 
804 static int
805 cam_module_event_handler(module_t mod, int what, void *arg)
806 {
807 	int error;
808 
809 	switch (what) {
810 	case MOD_LOAD:
811 		if ((error = xpt_init(NULL)) != 0)
812 			return (error);
813 		break;
814 	case MOD_UNLOAD:
815 		return EBUSY;
816 	default:
817 		return EOPNOTSUPP;
818 	}
819 
820 	return 0;
821 }
822 
823 static void
824 xpt_rescan_done(struct cam_periph *periph, union ccb *done_ccb)
825 {
826 
827 	if (done_ccb->ccb_h.ppriv_ptr1 == NULL) {
828 		xpt_free_path(done_ccb->ccb_h.path);
829 		xpt_free_ccb(done_ccb);
830 	} else {
831 		done_ccb->ccb_h.cbfcnp = done_ccb->ccb_h.ppriv_ptr1;
832 		(*done_ccb->ccb_h.cbfcnp)(periph, done_ccb);
833 	}
834 	xpt_release_boot();
835 }
836 
837 /* thread to handle bus rescans */
838 static void
839 xpt_scanner_thread(void *dummy)
840 {
841 	union ccb	*ccb;
842 	struct cam_sim	*sim;
843 
844 	xpt_lock_buses();
845 	for (;;) {
846 		if (TAILQ_EMPTY(&xsoftc.ccb_scanq))
847 			msleep(&xsoftc.ccb_scanq, &xsoftc.xpt_topo_lock, PRIBIO,
848 			       "ccb_scanq", 0);
849 		if ((ccb = (union ccb *)TAILQ_FIRST(&xsoftc.ccb_scanq)) != NULL) {
850 			TAILQ_REMOVE(&xsoftc.ccb_scanq, &ccb->ccb_h, sim_links.tqe);
851 			xpt_unlock_buses();
852 
853 			sim = ccb->ccb_h.path->bus->sim;
854 			CAM_SIM_LOCK(sim);
855 			xpt_action(ccb);
856 			CAM_SIM_UNLOCK(sim);
857 
858 			xpt_lock_buses();
859 		}
860 	}
861 }
862 
863 void
864 xpt_rescan(union ccb *ccb)
865 {
866 	struct ccb_hdr *hdr;
867 
868 	/* Prepare request */
869 	if(ccb->ccb_h.path->target->target_id == CAM_TARGET_WILDCARD)
870 		ccb->ccb_h.func_code = XPT_SCAN_BUS;
871 	else
872 		ccb->ccb_h.func_code = XPT_SCAN_LUN;
873 	ccb->ccb_h.ppriv_ptr1 = ccb->ccb_h.cbfcnp;
874 	ccb->ccb_h.cbfcnp = xpt_rescan_done;
875 	xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path, CAM_PRIORITY_XPT);
876 	/* Don't make duplicate entries for the same paths. */
877 	xpt_lock_buses();
878 	if (ccb->ccb_h.ppriv_ptr1 == NULL) {
879 		TAILQ_FOREACH(hdr, &xsoftc.ccb_scanq, sim_links.tqe) {
880 			if (xpt_path_comp(hdr->path, ccb->ccb_h.path) == 0) {
881 				wakeup(&xsoftc.ccb_scanq);
882 				xpt_unlock_buses();
883 				xpt_print(ccb->ccb_h.path, "rescan already queued\n");
884 				xpt_free_path(ccb->ccb_h.path);
885 				xpt_free_ccb(ccb);
886 				return;
887 			}
888 		}
889 	}
890 	TAILQ_INSERT_TAIL(&xsoftc.ccb_scanq, &ccb->ccb_h, sim_links.tqe);
891 	xsoftc.buses_to_config++;
892 	wakeup(&xsoftc.ccb_scanq);
893 	xpt_unlock_buses();
894 }
895 
896 /* Functions accessed by the peripheral drivers */
897 static int
898 xpt_init(void *dummy)
899 {
900 	struct cam_sim *xpt_sim;
901 	struct cam_path *path;
902 	struct cam_devq *devq;
903 	cam_status status;
904 
905 	TAILQ_INIT(&xsoftc.xpt_busses);
906 	TAILQ_INIT(&cam_simq);
907 	TAILQ_INIT(&xsoftc.ccb_scanq);
908 	STAILQ_INIT(&xsoftc.highpowerq);
909 	xsoftc.num_highpower = CAM_MAX_HIGHPOWER;
910 
911 	mtx_init(&cam_simq_lock, "CAM SIMQ lock", NULL, MTX_DEF);
912 	mtx_init(&xsoftc.xpt_lock, "XPT lock", NULL, MTX_DEF);
913 	mtx_init(&xsoftc.xpt_topo_lock, "XPT topology lock", NULL, MTX_DEF);
914 
915 	/*
916 	 * The xpt layer is, itself, the equivelent of a SIM.
917 	 * Allow 16 ccbs in the ccb pool for it.  This should
918 	 * give decent parallelism when we probe busses and
919 	 * perform other XPT functions.
920 	 */
921 	devq = cam_simq_alloc(16);
922 	xpt_sim = cam_sim_alloc(xptaction,
923 				xptpoll,
924 				"xpt",
925 				/*softc*/NULL,
926 				/*unit*/0,
927 				/*mtx*/&xsoftc.xpt_lock,
928 				/*max_dev_transactions*/0,
929 				/*max_tagged_dev_transactions*/0,
930 				devq);
931 	if (xpt_sim == NULL)
932 		return (ENOMEM);
933 
934 	mtx_lock(&xsoftc.xpt_lock);
935 	if ((status = xpt_bus_register(xpt_sim, NULL, 0)) != CAM_SUCCESS) {
936 		mtx_unlock(&xsoftc.xpt_lock);
937 		printf("xpt_init: xpt_bus_register failed with status %#x,"
938 		       " failing attach\n", status);
939 		return (EINVAL);
940 	}
941 
942 	/*
943 	 * Looking at the XPT from the SIM layer, the XPT is
944 	 * the equivelent of a peripheral driver.  Allocate
945 	 * a peripheral driver entry for us.
946 	 */
947 	if ((status = xpt_create_path(&path, NULL, CAM_XPT_PATH_ID,
948 				      CAM_TARGET_WILDCARD,
949 				      CAM_LUN_WILDCARD)) != CAM_REQ_CMP) {
950 		mtx_unlock(&xsoftc.xpt_lock);
951 		printf("xpt_init: xpt_create_path failed with status %#x,"
952 		       " failing attach\n", status);
953 		return (EINVAL);
954 	}
955 
956 	cam_periph_alloc(xptregister, NULL, NULL, NULL, "xpt", CAM_PERIPH_BIO,
957 			 path, NULL, 0, xpt_sim);
958 	xpt_free_path(path);
959 	mtx_unlock(&xsoftc.xpt_lock);
960 	/* Install our software interrupt handlers */
961 	swi_add(NULL, "cambio", camisr, NULL, SWI_CAMBIO, INTR_MPSAFE, &cambio_ih);
962 	/*
963 	 * Register a callback for when interrupts are enabled.
964 	 */
965 	xsoftc.xpt_config_hook =
966 	    (struct intr_config_hook *)malloc(sizeof(struct intr_config_hook),
967 					      M_CAMXPT, M_NOWAIT | M_ZERO);
968 	if (xsoftc.xpt_config_hook == NULL) {
969 		printf("xpt_init: Cannot malloc config hook "
970 		       "- failing attach\n");
971 		return (ENOMEM);
972 	}
973 	xsoftc.xpt_config_hook->ich_func = xpt_config;
974 	if (config_intrhook_establish(xsoftc.xpt_config_hook) != 0) {
975 		free (xsoftc.xpt_config_hook, M_CAMXPT);
976 		printf("xpt_init: config_intrhook_establish failed "
977 		       "- failing attach\n");
978 	}
979 
980 	return (0);
981 }
982 
983 static cam_status
984 xptregister(struct cam_periph *periph, void *arg)
985 {
986 	struct cam_sim *xpt_sim;
987 
988 	if (periph == NULL) {
989 		printf("xptregister: periph was NULL!!\n");
990 		return(CAM_REQ_CMP_ERR);
991 	}
992 
993 	xpt_sim = (struct cam_sim *)arg;
994 	xpt_sim->softc = periph;
995 	xpt_periph = periph;
996 	periph->softc = NULL;
997 
998 	return(CAM_REQ_CMP);
999 }
1000 
1001 int32_t
1002 xpt_add_periph(struct cam_periph *periph)
1003 {
1004 	struct cam_ed *device;
1005 	int32_t	 status;
1006 	struct periph_list *periph_head;
1007 
1008 	mtx_assert(periph->sim->mtx, MA_OWNED);
1009 
1010 	device = periph->path->device;
1011 
1012 	periph_head = &device->periphs;
1013 
1014 	status = CAM_REQ_CMP;
1015 
1016 	if (device != NULL) {
1017 		/*
1018 		 * Make room for this peripheral
1019 		 * so it will fit in the queue
1020 		 * when it's scheduled to run
1021 		 */
1022 		status = camq_resize(&device->drvq,
1023 				     device->drvq.array_size + 1);
1024 
1025 		device->generation++;
1026 
1027 		SLIST_INSERT_HEAD(periph_head, periph, periph_links);
1028 	}
1029 
1030 	mtx_lock(&xsoftc.xpt_topo_lock);
1031 	xsoftc.xpt_generation++;
1032 	mtx_unlock(&xsoftc.xpt_topo_lock);
1033 
1034 	return (status);
1035 }
1036 
1037 void
1038 xpt_remove_periph(struct cam_periph *periph)
1039 {
1040 	struct cam_ed *device;
1041 
1042 	mtx_assert(periph->sim->mtx, MA_OWNED);
1043 
1044 	device = periph->path->device;
1045 
1046 	if (device != NULL) {
1047 		struct periph_list *periph_head;
1048 
1049 		periph_head = &device->periphs;
1050 
1051 		/* Release the slot for this peripheral */
1052 		camq_resize(&device->drvq, device->drvq.array_size - 1);
1053 
1054 		device->generation++;
1055 
1056 		SLIST_REMOVE(periph_head, periph, cam_periph, periph_links);
1057 	}
1058 
1059 	mtx_lock(&xsoftc.xpt_topo_lock);
1060 	xsoftc.xpt_generation++;
1061 	mtx_unlock(&xsoftc.xpt_topo_lock);
1062 }
1063 
1064 
1065 void
1066 xpt_announce_periph(struct cam_periph *periph, char *announce_string)
1067 {
1068 	struct	ccb_pathinq cpi;
1069 	struct	ccb_trans_settings cts;
1070 	struct	cam_path *path;
1071 	u_int	speed;
1072 	u_int	freq;
1073 	u_int	mb;
1074 
1075 	mtx_assert(periph->sim->mtx, MA_OWNED);
1076 
1077 	path = periph->path;
1078 	/*
1079 	 * To ensure that this is printed in one piece,
1080 	 * mask out CAM interrupts.
1081 	 */
1082 	printf("%s%d at %s%d bus %d scbus%d target %d lun %d\n",
1083 	       periph->periph_name, periph->unit_number,
1084 	       path->bus->sim->sim_name,
1085 	       path->bus->sim->unit_number,
1086 	       path->bus->sim->bus_id,
1087 	       path->bus->path_id,
1088 	       path->target->target_id,
1089 	       path->device->lun_id);
1090 	printf("%s%d: ", periph->periph_name, periph->unit_number);
1091 	if (path->device->protocol == PROTO_SCSI)
1092 	    scsi_print_inquiry(&path->device->inq_data);
1093 	else if (path->device->protocol == PROTO_ATA ||
1094 	    path->device->protocol == PROTO_SATAPM)
1095 		ata_print_ident(&path->device->ident_data);
1096 	else
1097 	    printf("Unknown protocol device\n");
1098 	if (bootverbose && path->device->serial_num_len > 0) {
1099 		/* Don't wrap the screen  - print only the first 60 chars */
1100 		printf("%s%d: Serial Number %.60s\n", periph->periph_name,
1101 		       periph->unit_number, path->device->serial_num);
1102 	}
1103 	xpt_setup_ccb(&cts.ccb_h, path, CAM_PRIORITY_NORMAL);
1104 	cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS;
1105 	cts.type = CTS_TYPE_CURRENT_SETTINGS;
1106 	xpt_action((union ccb*)&cts);
1107 	if ((cts.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
1108 		return;
1109 	}
1110 
1111 	/* Ask the SIM for its base transfer speed */
1112 	xpt_setup_ccb(&cpi.ccb_h, path, CAM_PRIORITY_NORMAL);
1113 	cpi.ccb_h.func_code = XPT_PATH_INQ;
1114 	xpt_action((union ccb *)&cpi);
1115 
1116 	speed = cpi.base_transfer_speed;
1117 	freq = 0;
1118 	if (cts.ccb_h.status == CAM_REQ_CMP && cts.transport == XPORT_SPI) {
1119 		struct	ccb_trans_settings_spi *spi =
1120 		    &cts.xport_specific.spi;
1121 
1122 		if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) != 0
1123 		  && spi->sync_offset != 0) {
1124 			freq = scsi_calc_syncsrate(spi->sync_period);
1125 			speed = freq;
1126 		}
1127 		if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0)
1128 			speed *= (0x01 << spi->bus_width);
1129 	}
1130 	if (cts.ccb_h.status == CAM_REQ_CMP && cts.transport == XPORT_FC) {
1131 		struct	ccb_trans_settings_fc *fc =
1132 		    &cts.xport_specific.fc;
1133 
1134 		if (fc->valid & CTS_FC_VALID_SPEED)
1135 			speed = fc->bitrate;
1136 	}
1137 	if (cts.ccb_h.status == CAM_REQ_CMP && cts.transport == XPORT_SAS) {
1138 		struct	ccb_trans_settings_sas *sas =
1139 		    &cts.xport_specific.sas;
1140 
1141 		if (sas->valid & CTS_SAS_VALID_SPEED)
1142 			speed = sas->bitrate;
1143 	}
1144 	if (cts.ccb_h.status == CAM_REQ_CMP && cts.transport == XPORT_ATA) {
1145 		struct	ccb_trans_settings_ata *ata =
1146 		    &cts.xport_specific.ata;
1147 
1148 		if (ata->valid & CTS_ATA_VALID_MODE)
1149 			speed = ata_mode2speed(ata->mode);
1150 	}
1151 	if (cts.ccb_h.status == CAM_REQ_CMP && cts.transport == XPORT_SATA) {
1152 		struct	ccb_trans_settings_sata *sata =
1153 		    &cts.xport_specific.sata;
1154 
1155 		if (sata->valid & CTS_SATA_VALID_REVISION)
1156 			speed = ata_revision2speed(sata->revision);
1157 	}
1158 
1159 	mb = speed / 1000;
1160 	if (mb > 0)
1161 		printf("%s%d: %d.%03dMB/s transfers",
1162 		       periph->periph_name, periph->unit_number,
1163 		       mb, speed % 1000);
1164 	else
1165 		printf("%s%d: %dKB/s transfers", periph->periph_name,
1166 		       periph->unit_number, speed);
1167 	/* Report additional information about SPI connections */
1168 	if (cts.ccb_h.status == CAM_REQ_CMP && cts.transport == XPORT_SPI) {
1169 		struct	ccb_trans_settings_spi *spi;
1170 
1171 		spi = &cts.xport_specific.spi;
1172 		if (freq != 0) {
1173 			printf(" (%d.%03dMHz%s, offset %d", freq / 1000,
1174 			       freq % 1000,
1175 			       (spi->ppr_options & MSG_EXT_PPR_DT_REQ) != 0
1176 			     ? " DT" : "",
1177 			       spi->sync_offset);
1178 		}
1179 		if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0
1180 		 && spi->bus_width > 0) {
1181 			if (freq != 0) {
1182 				printf(", ");
1183 			} else {
1184 				printf(" (");
1185 			}
1186 			printf("%dbit)", 8 * (0x01 << spi->bus_width));
1187 		} else if (freq != 0) {
1188 			printf(")");
1189 		}
1190 	}
1191 	if (cts.ccb_h.status == CAM_REQ_CMP && cts.transport == XPORT_FC) {
1192 		struct	ccb_trans_settings_fc *fc;
1193 
1194 		fc = &cts.xport_specific.fc;
1195 		if (fc->valid & CTS_FC_VALID_WWNN)
1196 			printf(" WWNN 0x%llx", (long long) fc->wwnn);
1197 		if (fc->valid & CTS_FC_VALID_WWPN)
1198 			printf(" WWPN 0x%llx", (long long) fc->wwpn);
1199 		if (fc->valid & CTS_FC_VALID_PORT)
1200 			printf(" PortID 0x%x", fc->port);
1201 	}
1202 	if (cts.ccb_h.status == CAM_REQ_CMP && cts.transport == XPORT_ATA) {
1203 		struct ccb_trans_settings_ata *ata =
1204 		    &cts.xport_specific.ata;
1205 
1206 		printf(" (");
1207 		if (ata->valid & CTS_ATA_VALID_MODE)
1208 			printf("%s, ", ata_mode2string(ata->mode));
1209 		if ((ata->valid & CTS_ATA_VALID_ATAPI) && ata->atapi != 0)
1210 			printf("ATAPI %dbytes, ", ata->atapi);
1211 		if (ata->valid & CTS_ATA_VALID_BYTECOUNT)
1212 			printf("PIO %dbytes", ata->bytecount);
1213 		printf(")");
1214 	}
1215 	if (cts.ccb_h.status == CAM_REQ_CMP && cts.transport == XPORT_SATA) {
1216 		struct ccb_trans_settings_sata *sata =
1217 		    &cts.xport_specific.sata;
1218 
1219 		printf(" (");
1220 		if (sata->valid & CTS_SATA_VALID_REVISION)
1221 			printf("SATA %d.x, ", sata->revision);
1222 		if (sata->valid & CTS_SATA_VALID_MODE)
1223 			printf("%s, ", ata_mode2string(sata->mode));
1224 		if ((sata->valid & CTS_ATA_VALID_ATAPI) && sata->atapi != 0)
1225 			printf("ATAPI %dbytes, ", sata->atapi);
1226 		if (sata->valid & CTS_SATA_VALID_BYTECOUNT)
1227 			printf("PIO %dbytes", sata->bytecount);
1228 		printf(")");
1229 	}
1230 	if (path->device->inq_flags & SID_CmdQue
1231 	 || path->device->flags & CAM_DEV_TAG_AFTER_COUNT) {
1232 		printf("\n%s%d: Command Queueing enabled",
1233 		       periph->periph_name, periph->unit_number);
1234 	}
1235 	printf("\n");
1236 
1237 	/*
1238 	 * We only want to print the caller's announce string if they've
1239 	 * passed one in..
1240 	 */
1241 	if (announce_string != NULL)
1242 		printf("%s%d: %s\n", periph->periph_name,
1243 		       periph->unit_number, announce_string);
1244 }
1245 
1246 static dev_match_ret
1247 xptbusmatch(struct dev_match_pattern *patterns, u_int num_patterns,
1248 	    struct cam_eb *bus)
1249 {
1250 	dev_match_ret retval;
1251 	int i;
1252 
1253 	retval = DM_RET_NONE;
1254 
1255 	/*
1256 	 * If we aren't given something to match against, that's an error.
1257 	 */
1258 	if (bus == NULL)
1259 		return(DM_RET_ERROR);
1260 
1261 	/*
1262 	 * If there are no match entries, then this bus matches no
1263 	 * matter what.
1264 	 */
1265 	if ((patterns == NULL) || (num_patterns == 0))
1266 		return(DM_RET_DESCEND | DM_RET_COPY);
1267 
1268 	for (i = 0; i < num_patterns; i++) {
1269 		struct bus_match_pattern *cur_pattern;
1270 
1271 		/*
1272 		 * If the pattern in question isn't for a bus node, we
1273 		 * aren't interested.  However, we do indicate to the
1274 		 * calling routine that we should continue descending the
1275 		 * tree, since the user wants to match against lower-level
1276 		 * EDT elements.
1277 		 */
1278 		if (patterns[i].type != DEV_MATCH_BUS) {
1279 			if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
1280 				retval |= DM_RET_DESCEND;
1281 			continue;
1282 		}
1283 
1284 		cur_pattern = &patterns[i].pattern.bus_pattern;
1285 
1286 		/*
1287 		 * If they want to match any bus node, we give them any
1288 		 * device node.
1289 		 */
1290 		if (cur_pattern->flags == BUS_MATCH_ANY) {
1291 			/* set the copy flag */
1292 			retval |= DM_RET_COPY;
1293 
1294 			/*
1295 			 * If we've already decided on an action, go ahead
1296 			 * and return.
1297 			 */
1298 			if ((retval & DM_RET_ACTION_MASK) != DM_RET_NONE)
1299 				return(retval);
1300 		}
1301 
1302 		/*
1303 		 * Not sure why someone would do this...
1304 		 */
1305 		if (cur_pattern->flags == BUS_MATCH_NONE)
1306 			continue;
1307 
1308 		if (((cur_pattern->flags & BUS_MATCH_PATH) != 0)
1309 		 && (cur_pattern->path_id != bus->path_id))
1310 			continue;
1311 
1312 		if (((cur_pattern->flags & BUS_MATCH_BUS_ID) != 0)
1313 		 && (cur_pattern->bus_id != bus->sim->bus_id))
1314 			continue;
1315 
1316 		if (((cur_pattern->flags & BUS_MATCH_UNIT) != 0)
1317 		 && (cur_pattern->unit_number != bus->sim->unit_number))
1318 			continue;
1319 
1320 		if (((cur_pattern->flags & BUS_MATCH_NAME) != 0)
1321 		 && (strncmp(cur_pattern->dev_name, bus->sim->sim_name,
1322 			     DEV_IDLEN) != 0))
1323 			continue;
1324 
1325 		/*
1326 		 * If we get to this point, the user definitely wants
1327 		 * information on this bus.  So tell the caller to copy the
1328 		 * data out.
1329 		 */
1330 		retval |= DM_RET_COPY;
1331 
1332 		/*
1333 		 * If the return action has been set to descend, then we
1334 		 * know that we've already seen a non-bus matching
1335 		 * expression, therefore we need to further descend the tree.
1336 		 * This won't change by continuing around the loop, so we
1337 		 * go ahead and return.  If we haven't seen a non-bus
1338 		 * matching expression, we keep going around the loop until
1339 		 * we exhaust the matching expressions.  We'll set the stop
1340 		 * flag once we fall out of the loop.
1341 		 */
1342 		if ((retval & DM_RET_ACTION_MASK) == DM_RET_DESCEND)
1343 			return(retval);
1344 	}
1345 
1346 	/*
1347 	 * If the return action hasn't been set to descend yet, that means
1348 	 * we haven't seen anything other than bus matching patterns.  So
1349 	 * tell the caller to stop descending the tree -- the user doesn't
1350 	 * want to match against lower level tree elements.
1351 	 */
1352 	if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
1353 		retval |= DM_RET_STOP;
1354 
1355 	return(retval);
1356 }
1357 
1358 static dev_match_ret
1359 xptdevicematch(struct dev_match_pattern *patterns, u_int num_patterns,
1360 	       struct cam_ed *device)
1361 {
1362 	dev_match_ret retval;
1363 	int i;
1364 
1365 	retval = DM_RET_NONE;
1366 
1367 	/*
1368 	 * If we aren't given something to match against, that's an error.
1369 	 */
1370 	if (device == NULL)
1371 		return(DM_RET_ERROR);
1372 
1373 	/*
1374 	 * If there are no match entries, then this device matches no
1375 	 * matter what.
1376 	 */
1377 	if ((patterns == NULL) || (num_patterns == 0))
1378 		return(DM_RET_DESCEND | DM_RET_COPY);
1379 
1380 	for (i = 0; i < num_patterns; i++) {
1381 		struct device_match_pattern *cur_pattern;
1382 
1383 		/*
1384 		 * If the pattern in question isn't for a device node, we
1385 		 * aren't interested.
1386 		 */
1387 		if (patterns[i].type != DEV_MATCH_DEVICE) {
1388 			if ((patterns[i].type == DEV_MATCH_PERIPH)
1389 			 && ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE))
1390 				retval |= DM_RET_DESCEND;
1391 			continue;
1392 		}
1393 
1394 		cur_pattern = &patterns[i].pattern.device_pattern;
1395 
1396 		/*
1397 		 * If they want to match any device node, we give them any
1398 		 * device node.
1399 		 */
1400 		if (cur_pattern->flags == DEV_MATCH_ANY) {
1401 			/* set the copy flag */
1402 			retval |= DM_RET_COPY;
1403 
1404 
1405 			/*
1406 			 * If we've already decided on an action, go ahead
1407 			 * and return.
1408 			 */
1409 			if ((retval & DM_RET_ACTION_MASK) != DM_RET_NONE)
1410 				return(retval);
1411 		}
1412 
1413 		/*
1414 		 * Not sure why someone would do this...
1415 		 */
1416 		if (cur_pattern->flags == DEV_MATCH_NONE)
1417 			continue;
1418 
1419 		if (((cur_pattern->flags & DEV_MATCH_PATH) != 0)
1420 		 && (cur_pattern->path_id != device->target->bus->path_id))
1421 			continue;
1422 
1423 		if (((cur_pattern->flags & DEV_MATCH_TARGET) != 0)
1424 		 && (cur_pattern->target_id != device->target->target_id))
1425 			continue;
1426 
1427 		if (((cur_pattern->flags & DEV_MATCH_LUN) != 0)
1428 		 && (cur_pattern->target_lun != device->lun_id))
1429 			continue;
1430 
1431 		if (((cur_pattern->flags & DEV_MATCH_INQUIRY) != 0)
1432 		 && (cam_quirkmatch((caddr_t)&device->inq_data,
1433 				    (caddr_t)&cur_pattern->inq_pat,
1434 				    1, sizeof(cur_pattern->inq_pat),
1435 				    scsi_static_inquiry_match) == NULL))
1436 			continue;
1437 
1438 		/*
1439 		 * If we get to this point, the user definitely wants
1440 		 * information on this device.  So tell the caller to copy
1441 		 * the data out.
1442 		 */
1443 		retval |= DM_RET_COPY;
1444 
1445 		/*
1446 		 * If the return action has been set to descend, then we
1447 		 * know that we've already seen a peripheral matching
1448 		 * expression, therefore we need to further descend the tree.
1449 		 * This won't change by continuing around the loop, so we
1450 		 * go ahead and return.  If we haven't seen a peripheral
1451 		 * matching expression, we keep going around the loop until
1452 		 * we exhaust the matching expressions.  We'll set the stop
1453 		 * flag once we fall out of the loop.
1454 		 */
1455 		if ((retval & DM_RET_ACTION_MASK) == DM_RET_DESCEND)
1456 			return(retval);
1457 	}
1458 
1459 	/*
1460 	 * If the return action hasn't been set to descend yet, that means
1461 	 * we haven't seen any peripheral matching patterns.  So tell the
1462 	 * caller to stop descending the tree -- the user doesn't want to
1463 	 * match against lower level tree elements.
1464 	 */
1465 	if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
1466 		retval |= DM_RET_STOP;
1467 
1468 	return(retval);
1469 }
1470 
1471 /*
1472  * Match a single peripheral against any number of match patterns.
1473  */
1474 static dev_match_ret
1475 xptperiphmatch(struct dev_match_pattern *patterns, u_int num_patterns,
1476 	       struct cam_periph *periph)
1477 {
1478 	dev_match_ret retval;
1479 	int i;
1480 
1481 	/*
1482 	 * If we aren't given something to match against, that's an error.
1483 	 */
1484 	if (periph == NULL)
1485 		return(DM_RET_ERROR);
1486 
1487 	/*
1488 	 * If there are no match entries, then this peripheral matches no
1489 	 * matter what.
1490 	 */
1491 	if ((patterns == NULL) || (num_patterns == 0))
1492 		return(DM_RET_STOP | DM_RET_COPY);
1493 
1494 	/*
1495 	 * There aren't any nodes below a peripheral node, so there's no
1496 	 * reason to descend the tree any further.
1497 	 */
1498 	retval = DM_RET_STOP;
1499 
1500 	for (i = 0; i < num_patterns; i++) {
1501 		struct periph_match_pattern *cur_pattern;
1502 
1503 		/*
1504 		 * If the pattern in question isn't for a peripheral, we
1505 		 * aren't interested.
1506 		 */
1507 		if (patterns[i].type != DEV_MATCH_PERIPH)
1508 			continue;
1509 
1510 		cur_pattern = &patterns[i].pattern.periph_pattern;
1511 
1512 		/*
1513 		 * If they want to match on anything, then we will do so.
1514 		 */
1515 		if (cur_pattern->flags == PERIPH_MATCH_ANY) {
1516 			/* set the copy flag */
1517 			retval |= DM_RET_COPY;
1518 
1519 			/*
1520 			 * We've already set the return action to stop,
1521 			 * since there are no nodes below peripherals in
1522 			 * the tree.
1523 			 */
1524 			return(retval);
1525 		}
1526 
1527 		/*
1528 		 * Not sure why someone would do this...
1529 		 */
1530 		if (cur_pattern->flags == PERIPH_MATCH_NONE)
1531 			continue;
1532 
1533 		if (((cur_pattern->flags & PERIPH_MATCH_PATH) != 0)
1534 		 && (cur_pattern->path_id != periph->path->bus->path_id))
1535 			continue;
1536 
1537 		/*
1538 		 * For the target and lun id's, we have to make sure the
1539 		 * target and lun pointers aren't NULL.  The xpt peripheral
1540 		 * has a wildcard target and device.
1541 		 */
1542 		if (((cur_pattern->flags & PERIPH_MATCH_TARGET) != 0)
1543 		 && ((periph->path->target == NULL)
1544 		 ||(cur_pattern->target_id != periph->path->target->target_id)))
1545 			continue;
1546 
1547 		if (((cur_pattern->flags & PERIPH_MATCH_LUN) != 0)
1548 		 && ((periph->path->device == NULL)
1549 		 || (cur_pattern->target_lun != periph->path->device->lun_id)))
1550 			continue;
1551 
1552 		if (((cur_pattern->flags & PERIPH_MATCH_UNIT) != 0)
1553 		 && (cur_pattern->unit_number != periph->unit_number))
1554 			continue;
1555 
1556 		if (((cur_pattern->flags & PERIPH_MATCH_NAME) != 0)
1557 		 && (strncmp(cur_pattern->periph_name, periph->periph_name,
1558 			     DEV_IDLEN) != 0))
1559 			continue;
1560 
1561 		/*
1562 		 * If we get to this point, the user definitely wants
1563 		 * information on this peripheral.  So tell the caller to
1564 		 * copy the data out.
1565 		 */
1566 		retval |= DM_RET_COPY;
1567 
1568 		/*
1569 		 * The return action has already been set to stop, since
1570 		 * peripherals don't have any nodes below them in the EDT.
1571 		 */
1572 		return(retval);
1573 	}
1574 
1575 	/*
1576 	 * If we get to this point, the peripheral that was passed in
1577 	 * doesn't match any of the patterns.
1578 	 */
1579 	return(retval);
1580 }
1581 
1582 static int
1583 xptedtbusfunc(struct cam_eb *bus, void *arg)
1584 {
1585 	struct ccb_dev_match *cdm;
1586 	dev_match_ret retval;
1587 
1588 	cdm = (struct ccb_dev_match *)arg;
1589 
1590 	/*
1591 	 * If our position is for something deeper in the tree, that means
1592 	 * that we've already seen this node.  So, we keep going down.
1593 	 */
1594 	if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
1595 	 && (cdm->pos.cookie.bus == bus)
1596 	 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
1597 	 && (cdm->pos.cookie.target != NULL))
1598 		retval = DM_RET_DESCEND;
1599 	else
1600 		retval = xptbusmatch(cdm->patterns, cdm->num_patterns, bus);
1601 
1602 	/*
1603 	 * If we got an error, bail out of the search.
1604 	 */
1605 	if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
1606 		cdm->status = CAM_DEV_MATCH_ERROR;
1607 		return(0);
1608 	}
1609 
1610 	/*
1611 	 * If the copy flag is set, copy this bus out.
1612 	 */
1613 	if (retval & DM_RET_COPY) {
1614 		int spaceleft, j;
1615 
1616 		spaceleft = cdm->match_buf_len - (cdm->num_matches *
1617 			sizeof(struct dev_match_result));
1618 
1619 		/*
1620 		 * If we don't have enough space to put in another
1621 		 * match result, save our position and tell the
1622 		 * user there are more devices to check.
1623 		 */
1624 		if (spaceleft < sizeof(struct dev_match_result)) {
1625 			bzero(&cdm->pos, sizeof(cdm->pos));
1626 			cdm->pos.position_type =
1627 				CAM_DEV_POS_EDT | CAM_DEV_POS_BUS;
1628 
1629 			cdm->pos.cookie.bus = bus;
1630 			cdm->pos.generations[CAM_BUS_GENERATION]=
1631 				xsoftc.bus_generation;
1632 			cdm->status = CAM_DEV_MATCH_MORE;
1633 			return(0);
1634 		}
1635 		j = cdm->num_matches;
1636 		cdm->num_matches++;
1637 		cdm->matches[j].type = DEV_MATCH_BUS;
1638 		cdm->matches[j].result.bus_result.path_id = bus->path_id;
1639 		cdm->matches[j].result.bus_result.bus_id = bus->sim->bus_id;
1640 		cdm->matches[j].result.bus_result.unit_number =
1641 			bus->sim->unit_number;
1642 		strncpy(cdm->matches[j].result.bus_result.dev_name,
1643 			bus->sim->sim_name, DEV_IDLEN);
1644 	}
1645 
1646 	/*
1647 	 * If the user is only interested in busses, there's no
1648 	 * reason to descend to the next level in the tree.
1649 	 */
1650 	if ((retval & DM_RET_ACTION_MASK) == DM_RET_STOP)
1651 		return(1);
1652 
1653 	/*
1654 	 * If there is a target generation recorded, check it to
1655 	 * make sure the target list hasn't changed.
1656 	 */
1657 	if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
1658 	 && (bus == cdm->pos.cookie.bus)
1659 	 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
1660 	 && (cdm->pos.generations[CAM_TARGET_GENERATION] != 0)
1661 	 && (cdm->pos.generations[CAM_TARGET_GENERATION] !=
1662 	     bus->generation)) {
1663 		cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
1664 		return(0);
1665 	}
1666 
1667 	if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
1668 	 && (cdm->pos.cookie.bus == bus)
1669 	 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
1670 	 && (cdm->pos.cookie.target != NULL))
1671 		return(xpttargettraverse(bus,
1672 					(struct cam_et *)cdm->pos.cookie.target,
1673 					 xptedttargetfunc, arg));
1674 	else
1675 		return(xpttargettraverse(bus, NULL, xptedttargetfunc, arg));
1676 }
1677 
1678 static int
1679 xptedttargetfunc(struct cam_et *target, void *arg)
1680 {
1681 	struct ccb_dev_match *cdm;
1682 
1683 	cdm = (struct ccb_dev_match *)arg;
1684 
1685 	/*
1686 	 * If there is a device list generation recorded, check it to
1687 	 * make sure the device list hasn't changed.
1688 	 */
1689 	if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
1690 	 && (cdm->pos.cookie.bus == target->bus)
1691 	 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
1692 	 && (cdm->pos.cookie.target == target)
1693 	 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
1694 	 && (cdm->pos.generations[CAM_DEV_GENERATION] != 0)
1695 	 && (cdm->pos.generations[CAM_DEV_GENERATION] !=
1696 	     target->generation)) {
1697 		cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
1698 		return(0);
1699 	}
1700 
1701 	if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
1702 	 && (cdm->pos.cookie.bus == target->bus)
1703 	 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
1704 	 && (cdm->pos.cookie.target == target)
1705 	 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
1706 	 && (cdm->pos.cookie.device != NULL))
1707 		return(xptdevicetraverse(target,
1708 					(struct cam_ed *)cdm->pos.cookie.device,
1709 					 xptedtdevicefunc, arg));
1710 	else
1711 		return(xptdevicetraverse(target, NULL, xptedtdevicefunc, arg));
1712 }
1713 
1714 static int
1715 xptedtdevicefunc(struct cam_ed *device, void *arg)
1716 {
1717 
1718 	struct ccb_dev_match *cdm;
1719 	dev_match_ret retval;
1720 
1721 	cdm = (struct ccb_dev_match *)arg;
1722 
1723 	/*
1724 	 * If our position is for something deeper in the tree, that means
1725 	 * that we've already seen this node.  So, we keep going down.
1726 	 */
1727 	if ((cdm->pos.position_type & CAM_DEV_POS_DEVICE)
1728 	 && (cdm->pos.cookie.device == device)
1729 	 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
1730 	 && (cdm->pos.cookie.periph != NULL))
1731 		retval = DM_RET_DESCEND;
1732 	else
1733 		retval = xptdevicematch(cdm->patterns, cdm->num_patterns,
1734 					device);
1735 
1736 	if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
1737 		cdm->status = CAM_DEV_MATCH_ERROR;
1738 		return(0);
1739 	}
1740 
1741 	/*
1742 	 * If the copy flag is set, copy this device out.
1743 	 */
1744 	if (retval & DM_RET_COPY) {
1745 		int spaceleft, j;
1746 
1747 		spaceleft = cdm->match_buf_len - (cdm->num_matches *
1748 			sizeof(struct dev_match_result));
1749 
1750 		/*
1751 		 * If we don't have enough space to put in another
1752 		 * match result, save our position and tell the
1753 		 * user there are more devices to check.
1754 		 */
1755 		if (spaceleft < sizeof(struct dev_match_result)) {
1756 			bzero(&cdm->pos, sizeof(cdm->pos));
1757 			cdm->pos.position_type =
1758 				CAM_DEV_POS_EDT | CAM_DEV_POS_BUS |
1759 				CAM_DEV_POS_TARGET | CAM_DEV_POS_DEVICE;
1760 
1761 			cdm->pos.cookie.bus = device->target->bus;
1762 			cdm->pos.generations[CAM_BUS_GENERATION]=
1763 				xsoftc.bus_generation;
1764 			cdm->pos.cookie.target = device->target;
1765 			cdm->pos.generations[CAM_TARGET_GENERATION] =
1766 				device->target->bus->generation;
1767 			cdm->pos.cookie.device = device;
1768 			cdm->pos.generations[CAM_DEV_GENERATION] =
1769 				device->target->generation;
1770 			cdm->status = CAM_DEV_MATCH_MORE;
1771 			return(0);
1772 		}
1773 		j = cdm->num_matches;
1774 		cdm->num_matches++;
1775 		cdm->matches[j].type = DEV_MATCH_DEVICE;
1776 		cdm->matches[j].result.device_result.path_id =
1777 			device->target->bus->path_id;
1778 		cdm->matches[j].result.device_result.target_id =
1779 			device->target->target_id;
1780 		cdm->matches[j].result.device_result.target_lun =
1781 			device->lun_id;
1782 		cdm->matches[j].result.device_result.protocol =
1783 			device->protocol;
1784 		bcopy(&device->inq_data,
1785 		      &cdm->matches[j].result.device_result.inq_data,
1786 		      sizeof(struct scsi_inquiry_data));
1787 		bcopy(&device->ident_data,
1788 		      &cdm->matches[j].result.device_result.ident_data,
1789 		      sizeof(struct ata_params));
1790 
1791 		/* Let the user know whether this device is unconfigured */
1792 		if (device->flags & CAM_DEV_UNCONFIGURED)
1793 			cdm->matches[j].result.device_result.flags =
1794 				DEV_RESULT_UNCONFIGURED;
1795 		else
1796 			cdm->matches[j].result.device_result.flags =
1797 				DEV_RESULT_NOFLAG;
1798 	}
1799 
1800 	/*
1801 	 * If the user isn't interested in peripherals, don't descend
1802 	 * the tree any further.
1803 	 */
1804 	if ((retval & DM_RET_ACTION_MASK) == DM_RET_STOP)
1805 		return(1);
1806 
1807 	/*
1808 	 * If there is a peripheral list generation recorded, make sure
1809 	 * it hasn't changed.
1810 	 */
1811 	if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
1812 	 && (device->target->bus == cdm->pos.cookie.bus)
1813 	 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
1814 	 && (device->target == cdm->pos.cookie.target)
1815 	 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
1816 	 && (device == cdm->pos.cookie.device)
1817 	 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
1818 	 && (cdm->pos.generations[CAM_PERIPH_GENERATION] != 0)
1819 	 && (cdm->pos.generations[CAM_PERIPH_GENERATION] !=
1820 	     device->generation)){
1821 		cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
1822 		return(0);
1823 	}
1824 
1825 	if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
1826 	 && (cdm->pos.cookie.bus == device->target->bus)
1827 	 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
1828 	 && (cdm->pos.cookie.target == device->target)
1829 	 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
1830 	 && (cdm->pos.cookie.device == device)
1831 	 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
1832 	 && (cdm->pos.cookie.periph != NULL))
1833 		return(xptperiphtraverse(device,
1834 				(struct cam_periph *)cdm->pos.cookie.periph,
1835 				xptedtperiphfunc, arg));
1836 	else
1837 		return(xptperiphtraverse(device, NULL, xptedtperiphfunc, arg));
1838 }
1839 
1840 static int
1841 xptedtperiphfunc(struct cam_periph *periph, void *arg)
1842 {
1843 	struct ccb_dev_match *cdm;
1844 	dev_match_ret retval;
1845 
1846 	cdm = (struct ccb_dev_match *)arg;
1847 
1848 	retval = xptperiphmatch(cdm->patterns, cdm->num_patterns, periph);
1849 
1850 	if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
1851 		cdm->status = CAM_DEV_MATCH_ERROR;
1852 		return(0);
1853 	}
1854 
1855 	/*
1856 	 * If the copy flag is set, copy this peripheral out.
1857 	 */
1858 	if (retval & DM_RET_COPY) {
1859 		int spaceleft, j;
1860 
1861 		spaceleft = cdm->match_buf_len - (cdm->num_matches *
1862 			sizeof(struct dev_match_result));
1863 
1864 		/*
1865 		 * If we don't have enough space to put in another
1866 		 * match result, save our position and tell the
1867 		 * user there are more devices to check.
1868 		 */
1869 		if (spaceleft < sizeof(struct dev_match_result)) {
1870 			bzero(&cdm->pos, sizeof(cdm->pos));
1871 			cdm->pos.position_type =
1872 				CAM_DEV_POS_EDT | CAM_DEV_POS_BUS |
1873 				CAM_DEV_POS_TARGET | CAM_DEV_POS_DEVICE |
1874 				CAM_DEV_POS_PERIPH;
1875 
1876 			cdm->pos.cookie.bus = periph->path->bus;
1877 			cdm->pos.generations[CAM_BUS_GENERATION]=
1878 				xsoftc.bus_generation;
1879 			cdm->pos.cookie.target = periph->path->target;
1880 			cdm->pos.generations[CAM_TARGET_GENERATION] =
1881 				periph->path->bus->generation;
1882 			cdm->pos.cookie.device = periph->path->device;
1883 			cdm->pos.generations[CAM_DEV_GENERATION] =
1884 				periph->path->target->generation;
1885 			cdm->pos.cookie.periph = periph;
1886 			cdm->pos.generations[CAM_PERIPH_GENERATION] =
1887 				periph->path->device->generation;
1888 			cdm->status = CAM_DEV_MATCH_MORE;
1889 			return(0);
1890 		}
1891 
1892 		j = cdm->num_matches;
1893 		cdm->num_matches++;
1894 		cdm->matches[j].type = DEV_MATCH_PERIPH;
1895 		cdm->matches[j].result.periph_result.path_id =
1896 			periph->path->bus->path_id;
1897 		cdm->matches[j].result.periph_result.target_id =
1898 			periph->path->target->target_id;
1899 		cdm->matches[j].result.periph_result.target_lun =
1900 			periph->path->device->lun_id;
1901 		cdm->matches[j].result.periph_result.unit_number =
1902 			periph->unit_number;
1903 		strncpy(cdm->matches[j].result.periph_result.periph_name,
1904 			periph->periph_name, DEV_IDLEN);
1905 	}
1906 
1907 	return(1);
1908 }
1909 
1910 static int
1911 xptedtmatch(struct ccb_dev_match *cdm)
1912 {
1913 	int ret;
1914 
1915 	cdm->num_matches = 0;
1916 
1917 	/*
1918 	 * Check the bus list generation.  If it has changed, the user
1919 	 * needs to reset everything and start over.
1920 	 */
1921 	if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
1922 	 && (cdm->pos.generations[CAM_BUS_GENERATION] != 0)
1923 	 && (cdm->pos.generations[CAM_BUS_GENERATION] != xsoftc.bus_generation)) {
1924 		cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
1925 		return(0);
1926 	}
1927 
1928 	if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
1929 	 && (cdm->pos.cookie.bus != NULL))
1930 		ret = xptbustraverse((struct cam_eb *)cdm->pos.cookie.bus,
1931 				     xptedtbusfunc, cdm);
1932 	else
1933 		ret = xptbustraverse(NULL, xptedtbusfunc, cdm);
1934 
1935 	/*
1936 	 * If we get back 0, that means that we had to stop before fully
1937 	 * traversing the EDT.  It also means that one of the subroutines
1938 	 * has set the status field to the proper value.  If we get back 1,
1939 	 * we've fully traversed the EDT and copied out any matching entries.
1940 	 */
1941 	if (ret == 1)
1942 		cdm->status = CAM_DEV_MATCH_LAST;
1943 
1944 	return(ret);
1945 }
1946 
1947 static int
1948 xptplistpdrvfunc(struct periph_driver **pdrv, void *arg)
1949 {
1950 	struct ccb_dev_match *cdm;
1951 
1952 	cdm = (struct ccb_dev_match *)arg;
1953 
1954 	if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
1955 	 && (cdm->pos.cookie.pdrv == pdrv)
1956 	 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
1957 	 && (cdm->pos.generations[CAM_PERIPH_GENERATION] != 0)
1958 	 && (cdm->pos.generations[CAM_PERIPH_GENERATION] !=
1959 	     (*pdrv)->generation)) {
1960 		cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
1961 		return(0);
1962 	}
1963 
1964 	if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
1965 	 && (cdm->pos.cookie.pdrv == pdrv)
1966 	 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
1967 	 && (cdm->pos.cookie.periph != NULL))
1968 		return(xptpdperiphtraverse(pdrv,
1969 				(struct cam_periph *)cdm->pos.cookie.periph,
1970 				xptplistperiphfunc, arg));
1971 	else
1972 		return(xptpdperiphtraverse(pdrv, NULL,xptplistperiphfunc, arg));
1973 }
1974 
1975 static int
1976 xptplistperiphfunc(struct cam_periph *periph, void *arg)
1977 {
1978 	struct ccb_dev_match *cdm;
1979 	dev_match_ret retval;
1980 
1981 	cdm = (struct ccb_dev_match *)arg;
1982 
1983 	retval = xptperiphmatch(cdm->patterns, cdm->num_patterns, periph);
1984 
1985 	if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
1986 		cdm->status = CAM_DEV_MATCH_ERROR;
1987 		return(0);
1988 	}
1989 
1990 	/*
1991 	 * If the copy flag is set, copy this peripheral out.
1992 	 */
1993 	if (retval & DM_RET_COPY) {
1994 		int spaceleft, j;
1995 
1996 		spaceleft = cdm->match_buf_len - (cdm->num_matches *
1997 			sizeof(struct dev_match_result));
1998 
1999 		/*
2000 		 * If we don't have enough space to put in another
2001 		 * match result, save our position and tell the
2002 		 * user there are more devices to check.
2003 		 */
2004 		if (spaceleft < sizeof(struct dev_match_result)) {
2005 			struct periph_driver **pdrv;
2006 
2007 			pdrv = NULL;
2008 			bzero(&cdm->pos, sizeof(cdm->pos));
2009 			cdm->pos.position_type =
2010 				CAM_DEV_POS_PDRV | CAM_DEV_POS_PDPTR |
2011 				CAM_DEV_POS_PERIPH;
2012 
2013 			/*
2014 			 * This may look a bit non-sensical, but it is
2015 			 * actually quite logical.  There are very few
2016 			 * peripheral drivers, and bloating every peripheral
2017 			 * structure with a pointer back to its parent
2018 			 * peripheral driver linker set entry would cost
2019 			 * more in the long run than doing this quick lookup.
2020 			 */
2021 			for (pdrv = periph_drivers; *pdrv != NULL; pdrv++) {
2022 				if (strcmp((*pdrv)->driver_name,
2023 				    periph->periph_name) == 0)
2024 					break;
2025 			}
2026 
2027 			if (*pdrv == NULL) {
2028 				cdm->status = CAM_DEV_MATCH_ERROR;
2029 				return(0);
2030 			}
2031 
2032 			cdm->pos.cookie.pdrv = pdrv;
2033 			/*
2034 			 * The periph generation slot does double duty, as
2035 			 * does the periph pointer slot.  They are used for
2036 			 * both edt and pdrv lookups and positioning.
2037 			 */
2038 			cdm->pos.cookie.periph = periph;
2039 			cdm->pos.generations[CAM_PERIPH_GENERATION] =
2040 				(*pdrv)->generation;
2041 			cdm->status = CAM_DEV_MATCH_MORE;
2042 			return(0);
2043 		}
2044 
2045 		j = cdm->num_matches;
2046 		cdm->num_matches++;
2047 		cdm->matches[j].type = DEV_MATCH_PERIPH;
2048 		cdm->matches[j].result.periph_result.path_id =
2049 			periph->path->bus->path_id;
2050 
2051 		/*
2052 		 * The transport layer peripheral doesn't have a target or
2053 		 * lun.
2054 		 */
2055 		if (periph->path->target)
2056 			cdm->matches[j].result.periph_result.target_id =
2057 				periph->path->target->target_id;
2058 		else
2059 			cdm->matches[j].result.periph_result.target_id = -1;
2060 
2061 		if (periph->path->device)
2062 			cdm->matches[j].result.periph_result.target_lun =
2063 				periph->path->device->lun_id;
2064 		else
2065 			cdm->matches[j].result.periph_result.target_lun = -1;
2066 
2067 		cdm->matches[j].result.periph_result.unit_number =
2068 			periph->unit_number;
2069 		strncpy(cdm->matches[j].result.periph_result.periph_name,
2070 			periph->periph_name, DEV_IDLEN);
2071 	}
2072 
2073 	return(1);
2074 }
2075 
2076 static int
2077 xptperiphlistmatch(struct ccb_dev_match *cdm)
2078 {
2079 	int ret;
2080 
2081 	cdm->num_matches = 0;
2082 
2083 	/*
2084 	 * At this point in the edt traversal function, we check the bus
2085 	 * list generation to make sure that no busses have been added or
2086 	 * removed since the user last sent a XPT_DEV_MATCH ccb through.
2087 	 * For the peripheral driver list traversal function, however, we
2088 	 * don't have to worry about new peripheral driver types coming or
2089 	 * going; they're in a linker set, and therefore can't change
2090 	 * without a recompile.
2091 	 */
2092 
2093 	if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
2094 	 && (cdm->pos.cookie.pdrv != NULL))
2095 		ret = xptpdrvtraverse(
2096 				(struct periph_driver **)cdm->pos.cookie.pdrv,
2097 				xptplistpdrvfunc, cdm);
2098 	else
2099 		ret = xptpdrvtraverse(NULL, xptplistpdrvfunc, cdm);
2100 
2101 	/*
2102 	 * If we get back 0, that means that we had to stop before fully
2103 	 * traversing the peripheral driver tree.  It also means that one of
2104 	 * the subroutines has set the status field to the proper value.  If
2105 	 * we get back 1, we've fully traversed the EDT and copied out any
2106 	 * matching entries.
2107 	 */
2108 	if (ret == 1)
2109 		cdm->status = CAM_DEV_MATCH_LAST;
2110 
2111 	return(ret);
2112 }
2113 
2114 static int
2115 xptbustraverse(struct cam_eb *start_bus, xpt_busfunc_t *tr_func, void *arg)
2116 {
2117 	struct cam_eb *bus, *next_bus;
2118 	int retval;
2119 
2120 	retval = 1;
2121 
2122 	mtx_lock(&xsoftc.xpt_topo_lock);
2123 	for (bus = (start_bus ? start_bus : TAILQ_FIRST(&xsoftc.xpt_busses));
2124 	     bus != NULL;
2125 	     bus = next_bus) {
2126 		next_bus = TAILQ_NEXT(bus, links);
2127 
2128 		mtx_unlock(&xsoftc.xpt_topo_lock);
2129 		CAM_SIM_LOCK(bus->sim);
2130 		retval = tr_func(bus, arg);
2131 		CAM_SIM_UNLOCK(bus->sim);
2132 		if (retval == 0)
2133 			return(retval);
2134 		mtx_lock(&xsoftc.xpt_topo_lock);
2135 	}
2136 	mtx_unlock(&xsoftc.xpt_topo_lock);
2137 
2138 	return(retval);
2139 }
2140 
2141 int
2142 xpt_sim_opened(struct cam_sim *sim)
2143 {
2144 	struct cam_eb *bus;
2145 	struct cam_et *target;
2146 	struct cam_ed *device;
2147 	struct cam_periph *periph;
2148 
2149 	KASSERT(sim->refcount >= 1, ("sim->refcount >= 1"));
2150 	mtx_assert(sim->mtx, MA_OWNED);
2151 
2152 	mtx_lock(&xsoftc.xpt_topo_lock);
2153 	TAILQ_FOREACH(bus, &xsoftc.xpt_busses, links) {
2154 		if (bus->sim != sim)
2155 			continue;
2156 
2157 		TAILQ_FOREACH(target, &bus->et_entries, links) {
2158 			TAILQ_FOREACH(device, &target->ed_entries, links) {
2159 				SLIST_FOREACH(periph, &device->periphs,
2160 				    periph_links) {
2161 					if (periph->refcount > 0) {
2162 						mtx_unlock(&xsoftc.xpt_topo_lock);
2163 						return (1);
2164 					}
2165 				}
2166 			}
2167 		}
2168 	}
2169 
2170 	mtx_unlock(&xsoftc.xpt_topo_lock);
2171 	return (0);
2172 }
2173 
2174 static int
2175 xpttargettraverse(struct cam_eb *bus, struct cam_et *start_target,
2176 		  xpt_targetfunc_t *tr_func, void *arg)
2177 {
2178 	struct cam_et *target, *next_target;
2179 	int retval;
2180 
2181 	retval = 1;
2182 	for (target = (start_target ? start_target :
2183 		       TAILQ_FIRST(&bus->et_entries));
2184 	     target != NULL; target = next_target) {
2185 
2186 		next_target = TAILQ_NEXT(target, links);
2187 
2188 		retval = tr_func(target, arg);
2189 
2190 		if (retval == 0)
2191 			return(retval);
2192 	}
2193 
2194 	return(retval);
2195 }
2196 
2197 static int
2198 xptdevicetraverse(struct cam_et *target, struct cam_ed *start_device,
2199 		  xpt_devicefunc_t *tr_func, void *arg)
2200 {
2201 	struct cam_ed *device, *next_device;
2202 	int retval;
2203 
2204 	retval = 1;
2205 	for (device = (start_device ? start_device :
2206 		       TAILQ_FIRST(&target->ed_entries));
2207 	     device != NULL;
2208 	     device = next_device) {
2209 
2210 		next_device = TAILQ_NEXT(device, links);
2211 
2212 		retval = tr_func(device, arg);
2213 
2214 		if (retval == 0)
2215 			return(retval);
2216 	}
2217 
2218 	return(retval);
2219 }
2220 
2221 static int
2222 xptperiphtraverse(struct cam_ed *device, struct cam_periph *start_periph,
2223 		  xpt_periphfunc_t *tr_func, void *arg)
2224 {
2225 	struct cam_periph *periph, *next_periph;
2226 	int retval;
2227 
2228 	retval = 1;
2229 
2230 	for (periph = (start_periph ? start_periph :
2231 		       SLIST_FIRST(&device->periphs));
2232 	     periph != NULL;
2233 	     periph = next_periph) {
2234 
2235 		next_periph = SLIST_NEXT(periph, periph_links);
2236 
2237 		retval = tr_func(periph, arg);
2238 		if (retval == 0)
2239 			return(retval);
2240 	}
2241 
2242 	return(retval);
2243 }
2244 
2245 static int
2246 xptpdrvtraverse(struct periph_driver **start_pdrv,
2247 		xpt_pdrvfunc_t *tr_func, void *arg)
2248 {
2249 	struct periph_driver **pdrv;
2250 	int retval;
2251 
2252 	retval = 1;
2253 
2254 	/*
2255 	 * We don't traverse the peripheral driver list like we do the
2256 	 * other lists, because it is a linker set, and therefore cannot be
2257 	 * changed during runtime.  If the peripheral driver list is ever
2258 	 * re-done to be something other than a linker set (i.e. it can
2259 	 * change while the system is running), the list traversal should
2260 	 * be modified to work like the other traversal functions.
2261 	 */
2262 	for (pdrv = (start_pdrv ? start_pdrv : periph_drivers);
2263 	     *pdrv != NULL; pdrv++) {
2264 		retval = tr_func(pdrv, arg);
2265 
2266 		if (retval == 0)
2267 			return(retval);
2268 	}
2269 
2270 	return(retval);
2271 }
2272 
2273 static int
2274 xptpdperiphtraverse(struct periph_driver **pdrv,
2275 		    struct cam_periph *start_periph,
2276 		    xpt_periphfunc_t *tr_func, void *arg)
2277 {
2278 	struct cam_periph *periph, *next_periph;
2279 	int retval;
2280 
2281 	retval = 1;
2282 
2283 	for (periph = (start_periph ? start_periph :
2284 	     TAILQ_FIRST(&(*pdrv)->units)); periph != NULL;
2285 	     periph = next_periph) {
2286 
2287 		next_periph = TAILQ_NEXT(periph, unit_links);
2288 
2289 		retval = tr_func(periph, arg);
2290 		if (retval == 0)
2291 			return(retval);
2292 	}
2293 	return(retval);
2294 }
2295 
2296 static int
2297 xptdefbusfunc(struct cam_eb *bus, void *arg)
2298 {
2299 	struct xpt_traverse_config *tr_config;
2300 
2301 	tr_config = (struct xpt_traverse_config *)arg;
2302 
2303 	if (tr_config->depth == XPT_DEPTH_BUS) {
2304 		xpt_busfunc_t *tr_func;
2305 
2306 		tr_func = (xpt_busfunc_t *)tr_config->tr_func;
2307 
2308 		return(tr_func(bus, tr_config->tr_arg));
2309 	} else
2310 		return(xpttargettraverse(bus, NULL, xptdeftargetfunc, arg));
2311 }
2312 
2313 static int
2314 xptdeftargetfunc(struct cam_et *target, void *arg)
2315 {
2316 	struct xpt_traverse_config *tr_config;
2317 
2318 	tr_config = (struct xpt_traverse_config *)arg;
2319 
2320 	if (tr_config->depth == XPT_DEPTH_TARGET) {
2321 		xpt_targetfunc_t *tr_func;
2322 
2323 		tr_func = (xpt_targetfunc_t *)tr_config->tr_func;
2324 
2325 		return(tr_func(target, tr_config->tr_arg));
2326 	} else
2327 		return(xptdevicetraverse(target, NULL, xptdefdevicefunc, arg));
2328 }
2329 
2330 static int
2331 xptdefdevicefunc(struct cam_ed *device, void *arg)
2332 {
2333 	struct xpt_traverse_config *tr_config;
2334 
2335 	tr_config = (struct xpt_traverse_config *)arg;
2336 
2337 	if (tr_config->depth == XPT_DEPTH_DEVICE) {
2338 		xpt_devicefunc_t *tr_func;
2339 
2340 		tr_func = (xpt_devicefunc_t *)tr_config->tr_func;
2341 
2342 		return(tr_func(device, tr_config->tr_arg));
2343 	} else
2344 		return(xptperiphtraverse(device, NULL, xptdefperiphfunc, arg));
2345 }
2346 
2347 static int
2348 xptdefperiphfunc(struct cam_periph *periph, void *arg)
2349 {
2350 	struct xpt_traverse_config *tr_config;
2351 	xpt_periphfunc_t *tr_func;
2352 
2353 	tr_config = (struct xpt_traverse_config *)arg;
2354 
2355 	tr_func = (xpt_periphfunc_t *)tr_config->tr_func;
2356 
2357 	/*
2358 	 * Unlike the other default functions, we don't check for depth
2359 	 * here.  The peripheral driver level is the last level in the EDT,
2360 	 * so if we're here, we should execute the function in question.
2361 	 */
2362 	return(tr_func(periph, tr_config->tr_arg));
2363 }
2364 
2365 /*
2366  * Execute the given function for every bus in the EDT.
2367  */
2368 static int
2369 xpt_for_all_busses(xpt_busfunc_t *tr_func, void *arg)
2370 {
2371 	struct xpt_traverse_config tr_config;
2372 
2373 	tr_config.depth = XPT_DEPTH_BUS;
2374 	tr_config.tr_func = tr_func;
2375 	tr_config.tr_arg = arg;
2376 
2377 	return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
2378 }
2379 
2380 /*
2381  * Execute the given function for every device in the EDT.
2382  */
2383 static int
2384 xpt_for_all_devices(xpt_devicefunc_t *tr_func, void *arg)
2385 {
2386 	struct xpt_traverse_config tr_config;
2387 
2388 	tr_config.depth = XPT_DEPTH_DEVICE;
2389 	tr_config.tr_func = tr_func;
2390 	tr_config.tr_arg = arg;
2391 
2392 	return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
2393 }
2394 
2395 static int
2396 xptsetasyncfunc(struct cam_ed *device, void *arg)
2397 {
2398 	struct cam_path path;
2399 	struct ccb_getdev cgd;
2400 	struct ccb_setasync *csa = (struct ccb_setasync *)arg;
2401 
2402 	/*
2403 	 * Don't report unconfigured devices (Wildcard devs,
2404 	 * devices only for target mode, device instances
2405 	 * that have been invalidated but are waiting for
2406 	 * their last reference count to be released).
2407 	 */
2408 	if ((device->flags & CAM_DEV_UNCONFIGURED) != 0)
2409 		return (1);
2410 
2411 	xpt_compile_path(&path,
2412 			 NULL,
2413 			 device->target->bus->path_id,
2414 			 device->target->target_id,
2415 			 device->lun_id);
2416 	xpt_setup_ccb(&cgd.ccb_h, &path, CAM_PRIORITY_NORMAL);
2417 	cgd.ccb_h.func_code = XPT_GDEV_TYPE;
2418 	xpt_action((union ccb *)&cgd);
2419 	csa->callback(csa->callback_arg,
2420 			    AC_FOUND_DEVICE,
2421 			    &path, &cgd);
2422 	xpt_release_path(&path);
2423 
2424 	return(1);
2425 }
2426 
2427 static int
2428 xptsetasyncbusfunc(struct cam_eb *bus, void *arg)
2429 {
2430 	struct cam_path path;
2431 	struct ccb_pathinq cpi;
2432 	struct ccb_setasync *csa = (struct ccb_setasync *)arg;
2433 
2434 	xpt_compile_path(&path, /*periph*/NULL,
2435 			 bus->sim->path_id,
2436 			 CAM_TARGET_WILDCARD,
2437 			 CAM_LUN_WILDCARD);
2438 	xpt_setup_ccb(&cpi.ccb_h, &path, CAM_PRIORITY_NORMAL);
2439 	cpi.ccb_h.func_code = XPT_PATH_INQ;
2440 	xpt_action((union ccb *)&cpi);
2441 	csa->callback(csa->callback_arg,
2442 			    AC_PATH_REGISTERED,
2443 			    &path, &cpi);
2444 	xpt_release_path(&path);
2445 
2446 	return(1);
2447 }
2448 
2449 void
2450 xpt_action(union ccb *start_ccb)
2451 {
2452 
2453 	CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xpt_action\n"));
2454 
2455 	start_ccb->ccb_h.status = CAM_REQ_INPROG;
2456 	/* Compatibility for RL-unaware code. */
2457 	if (CAM_PRIORITY_TO_RL(start_ccb->ccb_h.pinfo.priority) == 0)
2458 	    start_ccb->ccb_h.pinfo.priority += CAM_PRIORITY_NORMAL - 1;
2459 	(*(start_ccb->ccb_h.path->bus->xport->action))(start_ccb);
2460 }
2461 
2462 void
2463 xpt_action_default(union ccb *start_ccb)
2464 {
2465 
2466 	CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xpt_action_default\n"));
2467 
2468 
2469 	switch (start_ccb->ccb_h.func_code) {
2470 	case XPT_SCSI_IO:
2471 	{
2472 		struct cam_ed *device;
2473 #ifdef CAMDEBUG
2474 		char cdb_str[(SCSI_MAX_CDBLEN * 3) + 1];
2475 		struct cam_path *path;
2476 
2477 		path = start_ccb->ccb_h.path;
2478 #endif
2479 
2480 		/*
2481 		 * For the sake of compatibility with SCSI-1
2482 		 * devices that may not understand the identify
2483 		 * message, we include lun information in the
2484 		 * second byte of all commands.  SCSI-1 specifies
2485 		 * that luns are a 3 bit value and reserves only 3
2486 		 * bits for lun information in the CDB.  Later
2487 		 * revisions of the SCSI spec allow for more than 8
2488 		 * luns, but have deprecated lun information in the
2489 		 * CDB.  So, if the lun won't fit, we must omit.
2490 		 *
2491 		 * Also be aware that during initial probing for devices,
2492 		 * the inquiry information is unknown but initialized to 0.
2493 		 * This means that this code will be exercised while probing
2494 		 * devices with an ANSI revision greater than 2.
2495 		 */
2496 		device = start_ccb->ccb_h.path->device;
2497 		if (device->protocol_version <= SCSI_REV_2
2498 		 && start_ccb->ccb_h.target_lun < 8
2499 		 && (start_ccb->ccb_h.flags & CAM_CDB_POINTER) == 0) {
2500 
2501 			start_ccb->csio.cdb_io.cdb_bytes[1] |=
2502 			    start_ccb->ccb_h.target_lun << 5;
2503 		}
2504 		start_ccb->csio.scsi_status = SCSI_STATUS_OK;
2505 		CAM_DEBUG(path, CAM_DEBUG_CDB,("%s. CDB: %s\n",
2506 			  scsi_op_desc(start_ccb->csio.cdb_io.cdb_bytes[0],
2507 			  	       &path->device->inq_data),
2508 			  scsi_cdb_string(start_ccb->csio.cdb_io.cdb_bytes,
2509 					  cdb_str, sizeof(cdb_str))));
2510 	}
2511 	/* FALLTHROUGH */
2512 	case XPT_TARGET_IO:
2513 	case XPT_CONT_TARGET_IO:
2514 		start_ccb->csio.sense_resid = 0;
2515 		start_ccb->csio.resid = 0;
2516 		/* FALLTHROUGH */
2517 	case XPT_ATA_IO:
2518 		if (start_ccb->ccb_h.func_code == XPT_ATA_IO) {
2519 			start_ccb->ataio.resid = 0;
2520 		}
2521 	case XPT_RESET_DEV:
2522 	case XPT_ENG_EXEC:
2523 	{
2524 		struct cam_path *path = start_ccb->ccb_h.path;
2525 		int frozen;
2526 
2527 		frozen = cam_ccbq_insert_ccb(&path->device->ccbq, start_ccb);
2528 		path->device->sim->devq->alloc_openings += frozen;
2529 		if (frozen > 0)
2530 			xpt_run_dev_allocq(path->bus);
2531 		if (xpt_schedule_dev_sendq(path->bus, path->device))
2532 			xpt_run_dev_sendq(path->bus);
2533 		break;
2534 	}
2535 	case XPT_CALC_GEOMETRY:
2536 	{
2537 		struct cam_sim *sim;
2538 
2539 		/* Filter out garbage */
2540 		if (start_ccb->ccg.block_size == 0
2541 		 || start_ccb->ccg.volume_size == 0) {
2542 			start_ccb->ccg.cylinders = 0;
2543 			start_ccb->ccg.heads = 0;
2544 			start_ccb->ccg.secs_per_track = 0;
2545 			start_ccb->ccb_h.status = CAM_REQ_CMP;
2546 			break;
2547 		}
2548 #ifdef PC98
2549 		/*
2550 		 * In a PC-98 system, geometry translation depens on
2551 		 * the "real" device geometry obtained from mode page 4.
2552 		 * SCSI geometry translation is performed in the
2553 		 * initialization routine of the SCSI BIOS and the result
2554 		 * stored in host memory.  If the translation is available
2555 		 * in host memory, use it.  If not, rely on the default
2556 		 * translation the device driver performs.
2557 		 */
2558 		if (scsi_da_bios_params(&start_ccb->ccg) != 0) {
2559 			start_ccb->ccb_h.status = CAM_REQ_CMP;
2560 			break;
2561 		}
2562 #endif
2563 		sim = start_ccb->ccb_h.path->bus->sim;
2564 		(*(sim->sim_action))(sim, start_ccb);
2565 		break;
2566 	}
2567 	case XPT_ABORT:
2568 	{
2569 		union ccb* abort_ccb;
2570 
2571 		abort_ccb = start_ccb->cab.abort_ccb;
2572 		if (XPT_FC_IS_DEV_QUEUED(abort_ccb)) {
2573 
2574 			if (abort_ccb->ccb_h.pinfo.index >= 0) {
2575 				struct cam_ccbq *ccbq;
2576 				struct cam_ed *device;
2577 
2578 				device = abort_ccb->ccb_h.path->device;
2579 				ccbq = &device->ccbq;
2580 				device->sim->devq->alloc_openings -=
2581 				    cam_ccbq_remove_ccb(ccbq, abort_ccb);
2582 				abort_ccb->ccb_h.status =
2583 				    CAM_REQ_ABORTED|CAM_DEV_QFRZN;
2584 				xpt_freeze_devq(abort_ccb->ccb_h.path, 1);
2585 				xpt_done(abort_ccb);
2586 				start_ccb->ccb_h.status = CAM_REQ_CMP;
2587 				break;
2588 			}
2589 			if (abort_ccb->ccb_h.pinfo.index == CAM_UNQUEUED_INDEX
2590 			 && (abort_ccb->ccb_h.status & CAM_SIM_QUEUED) == 0) {
2591 				/*
2592 				 * We've caught this ccb en route to
2593 				 * the SIM.  Flag it for abort and the
2594 				 * SIM will do so just before starting
2595 				 * real work on the CCB.
2596 				 */
2597 				abort_ccb->ccb_h.status =
2598 				    CAM_REQ_ABORTED|CAM_DEV_QFRZN;
2599 				xpt_freeze_devq(abort_ccb->ccb_h.path, 1);
2600 				start_ccb->ccb_h.status = CAM_REQ_CMP;
2601 				break;
2602 			}
2603 		}
2604 		if (XPT_FC_IS_QUEUED(abort_ccb)
2605 		 && (abort_ccb->ccb_h.pinfo.index == CAM_DONEQ_INDEX)) {
2606 			/*
2607 			 * It's already completed but waiting
2608 			 * for our SWI to get to it.
2609 			 */
2610 			start_ccb->ccb_h.status = CAM_UA_ABORT;
2611 			break;
2612 		}
2613 		/*
2614 		 * If we weren't able to take care of the abort request
2615 		 * in the XPT, pass the request down to the SIM for processing.
2616 		 */
2617 	}
2618 	/* FALLTHROUGH */
2619 	case XPT_ACCEPT_TARGET_IO:
2620 	case XPT_EN_LUN:
2621 	case XPT_IMMED_NOTIFY:
2622 	case XPT_NOTIFY_ACK:
2623 	case XPT_RESET_BUS:
2624 	case XPT_IMMEDIATE_NOTIFY:
2625 	case XPT_NOTIFY_ACKNOWLEDGE:
2626 	case XPT_GET_SIM_KNOB:
2627 	case XPT_SET_SIM_KNOB:
2628 	{
2629 		struct cam_sim *sim;
2630 
2631 		sim = start_ccb->ccb_h.path->bus->sim;
2632 		(*(sim->sim_action))(sim, start_ccb);
2633 		break;
2634 	}
2635 	case XPT_PATH_INQ:
2636 	{
2637 		struct cam_sim *sim;
2638 
2639 		sim = start_ccb->ccb_h.path->bus->sim;
2640 		(*(sim->sim_action))(sim, start_ccb);
2641 		break;
2642 	}
2643 	case XPT_PATH_STATS:
2644 		start_ccb->cpis.last_reset =
2645 			start_ccb->ccb_h.path->bus->last_reset;
2646 		start_ccb->ccb_h.status = CAM_REQ_CMP;
2647 		break;
2648 	case XPT_GDEV_TYPE:
2649 	{
2650 		struct cam_ed *dev;
2651 
2652 		dev = start_ccb->ccb_h.path->device;
2653 		if ((dev->flags & CAM_DEV_UNCONFIGURED) != 0) {
2654 			start_ccb->ccb_h.status = CAM_DEV_NOT_THERE;
2655 		} else {
2656 			struct ccb_getdev *cgd;
2657 			struct cam_eb *bus;
2658 			struct cam_et *tar;
2659 
2660 			cgd = &start_ccb->cgd;
2661 			bus = cgd->ccb_h.path->bus;
2662 			tar = cgd->ccb_h.path->target;
2663 			cgd->protocol = dev->protocol;
2664 			cgd->inq_data = dev->inq_data;
2665 			cgd->ident_data = dev->ident_data;
2666 			cgd->inq_flags = dev->inq_flags;
2667 			cgd->ccb_h.status = CAM_REQ_CMP;
2668 			cgd->serial_num_len = dev->serial_num_len;
2669 			if ((dev->serial_num_len > 0)
2670 			 && (dev->serial_num != NULL))
2671 				bcopy(dev->serial_num, cgd->serial_num,
2672 				      dev->serial_num_len);
2673 		}
2674 		break;
2675 	}
2676 	case XPT_GDEV_STATS:
2677 	{
2678 		struct cam_ed *dev;
2679 
2680 		dev = start_ccb->ccb_h.path->device;
2681 		if ((dev->flags & CAM_DEV_UNCONFIGURED) != 0) {
2682 			start_ccb->ccb_h.status = CAM_DEV_NOT_THERE;
2683 		} else {
2684 			struct ccb_getdevstats *cgds;
2685 			struct cam_eb *bus;
2686 			struct cam_et *tar;
2687 
2688 			cgds = &start_ccb->cgds;
2689 			bus = cgds->ccb_h.path->bus;
2690 			tar = cgds->ccb_h.path->target;
2691 			cgds->dev_openings = dev->ccbq.dev_openings;
2692 			cgds->dev_active = dev->ccbq.dev_active;
2693 			cgds->devq_openings = dev->ccbq.devq_openings;
2694 			cgds->devq_queued = dev->ccbq.queue.entries;
2695 			cgds->held = dev->ccbq.held;
2696 			cgds->last_reset = tar->last_reset;
2697 			cgds->maxtags = dev->maxtags;
2698 			cgds->mintags = dev->mintags;
2699 			if (timevalcmp(&tar->last_reset, &bus->last_reset, <))
2700 				cgds->last_reset = bus->last_reset;
2701 			cgds->ccb_h.status = CAM_REQ_CMP;
2702 		}
2703 		break;
2704 	}
2705 	case XPT_GDEVLIST:
2706 	{
2707 		struct cam_periph	*nperiph;
2708 		struct periph_list	*periph_head;
2709 		struct ccb_getdevlist	*cgdl;
2710 		u_int			i;
2711 		struct cam_ed		*device;
2712 		int			found;
2713 
2714 
2715 		found = 0;
2716 
2717 		/*
2718 		 * Don't want anyone mucking with our data.
2719 		 */
2720 		device = start_ccb->ccb_h.path->device;
2721 		periph_head = &device->periphs;
2722 		cgdl = &start_ccb->cgdl;
2723 
2724 		/*
2725 		 * Check and see if the list has changed since the user
2726 		 * last requested a list member.  If so, tell them that the
2727 		 * list has changed, and therefore they need to start over
2728 		 * from the beginning.
2729 		 */
2730 		if ((cgdl->index != 0) &&
2731 		    (cgdl->generation != device->generation)) {
2732 			cgdl->status = CAM_GDEVLIST_LIST_CHANGED;
2733 			break;
2734 		}
2735 
2736 		/*
2737 		 * Traverse the list of peripherals and attempt to find
2738 		 * the requested peripheral.
2739 		 */
2740 		for (nperiph = SLIST_FIRST(periph_head), i = 0;
2741 		     (nperiph != NULL) && (i <= cgdl->index);
2742 		     nperiph = SLIST_NEXT(nperiph, periph_links), i++) {
2743 			if (i == cgdl->index) {
2744 				strncpy(cgdl->periph_name,
2745 					nperiph->periph_name,
2746 					DEV_IDLEN);
2747 				cgdl->unit_number = nperiph->unit_number;
2748 				found = 1;
2749 			}
2750 		}
2751 		if (found == 0) {
2752 			cgdl->status = CAM_GDEVLIST_ERROR;
2753 			break;
2754 		}
2755 
2756 		if (nperiph == NULL)
2757 			cgdl->status = CAM_GDEVLIST_LAST_DEVICE;
2758 		else
2759 			cgdl->status = CAM_GDEVLIST_MORE_DEVS;
2760 
2761 		cgdl->index++;
2762 		cgdl->generation = device->generation;
2763 
2764 		cgdl->ccb_h.status = CAM_REQ_CMP;
2765 		break;
2766 	}
2767 	case XPT_DEV_MATCH:
2768 	{
2769 		dev_pos_type position_type;
2770 		struct ccb_dev_match *cdm;
2771 
2772 		cdm = &start_ccb->cdm;
2773 
2774 		/*
2775 		 * There are two ways of getting at information in the EDT.
2776 		 * The first way is via the primary EDT tree.  It starts
2777 		 * with a list of busses, then a list of targets on a bus,
2778 		 * then devices/luns on a target, and then peripherals on a
2779 		 * device/lun.  The "other" way is by the peripheral driver
2780 		 * lists.  The peripheral driver lists are organized by
2781 		 * peripheral driver.  (obviously)  So it makes sense to
2782 		 * use the peripheral driver list if the user is looking
2783 		 * for something like "da1", or all "da" devices.  If the
2784 		 * user is looking for something on a particular bus/target
2785 		 * or lun, it's generally better to go through the EDT tree.
2786 		 */
2787 
2788 		if (cdm->pos.position_type != CAM_DEV_POS_NONE)
2789 			position_type = cdm->pos.position_type;
2790 		else {
2791 			u_int i;
2792 
2793 			position_type = CAM_DEV_POS_NONE;
2794 
2795 			for (i = 0; i < cdm->num_patterns; i++) {
2796 				if ((cdm->patterns[i].type == DEV_MATCH_BUS)
2797 				 ||(cdm->patterns[i].type == DEV_MATCH_DEVICE)){
2798 					position_type = CAM_DEV_POS_EDT;
2799 					break;
2800 				}
2801 			}
2802 
2803 			if (cdm->num_patterns == 0)
2804 				position_type = CAM_DEV_POS_EDT;
2805 			else if (position_type == CAM_DEV_POS_NONE)
2806 				position_type = CAM_DEV_POS_PDRV;
2807 		}
2808 
2809 		switch(position_type & CAM_DEV_POS_TYPEMASK) {
2810 		case CAM_DEV_POS_EDT:
2811 			xptedtmatch(cdm);
2812 			break;
2813 		case CAM_DEV_POS_PDRV:
2814 			xptperiphlistmatch(cdm);
2815 			break;
2816 		default:
2817 			cdm->status = CAM_DEV_MATCH_ERROR;
2818 			break;
2819 		}
2820 
2821 		if (cdm->status == CAM_DEV_MATCH_ERROR)
2822 			start_ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2823 		else
2824 			start_ccb->ccb_h.status = CAM_REQ_CMP;
2825 
2826 		break;
2827 	}
2828 	case XPT_SASYNC_CB:
2829 	{
2830 		struct ccb_setasync *csa;
2831 		struct async_node *cur_entry;
2832 		struct async_list *async_head;
2833 		u_int32_t added;
2834 
2835 		csa = &start_ccb->csa;
2836 		added = csa->event_enable;
2837 		async_head = &csa->ccb_h.path->device->asyncs;
2838 
2839 		/*
2840 		 * If there is already an entry for us, simply
2841 		 * update it.
2842 		 */
2843 		cur_entry = SLIST_FIRST(async_head);
2844 		while (cur_entry != NULL) {
2845 			if ((cur_entry->callback_arg == csa->callback_arg)
2846 			 && (cur_entry->callback == csa->callback))
2847 				break;
2848 			cur_entry = SLIST_NEXT(cur_entry, links);
2849 		}
2850 
2851 		if (cur_entry != NULL) {
2852 		 	/*
2853 			 * If the request has no flags set,
2854 			 * remove the entry.
2855 			 */
2856 			added &= ~cur_entry->event_enable;
2857 			if (csa->event_enable == 0) {
2858 				SLIST_REMOVE(async_head, cur_entry,
2859 					     async_node, links);
2860 				xpt_release_device(csa->ccb_h.path->device);
2861 				free(cur_entry, M_CAMXPT);
2862 			} else {
2863 				cur_entry->event_enable = csa->event_enable;
2864 			}
2865 			csa->event_enable = added;
2866 		} else {
2867 			cur_entry = malloc(sizeof(*cur_entry), M_CAMXPT,
2868 					   M_NOWAIT);
2869 			if (cur_entry == NULL) {
2870 				csa->ccb_h.status = CAM_RESRC_UNAVAIL;
2871 				break;
2872 			}
2873 			cur_entry->event_enable = csa->event_enable;
2874 			cur_entry->callback_arg = csa->callback_arg;
2875 			cur_entry->callback = csa->callback;
2876 			SLIST_INSERT_HEAD(async_head, cur_entry, links);
2877 			xpt_acquire_device(csa->ccb_h.path->device);
2878 		}
2879 		start_ccb->ccb_h.status = CAM_REQ_CMP;
2880 		break;
2881 	}
2882 	case XPT_REL_SIMQ:
2883 	{
2884 		struct ccb_relsim *crs;
2885 		struct cam_ed *dev;
2886 
2887 		crs = &start_ccb->crs;
2888 		dev = crs->ccb_h.path->device;
2889 		if (dev == NULL) {
2890 
2891 			crs->ccb_h.status = CAM_DEV_NOT_THERE;
2892 			break;
2893 		}
2894 
2895 		if ((crs->release_flags & RELSIM_ADJUST_OPENINGS) != 0) {
2896 
2897  			if (INQ_DATA_TQ_ENABLED(&dev->inq_data)) {
2898 				/* Don't ever go below one opening */
2899 				if (crs->openings > 0) {
2900 					xpt_dev_ccbq_resize(crs->ccb_h.path,
2901 							    crs->openings);
2902 
2903 					if (bootverbose) {
2904 						xpt_print(crs->ccb_h.path,
2905 						    "tagged openings now %d\n",
2906 						    crs->openings);
2907 					}
2908 				}
2909 			}
2910 		}
2911 
2912 		if ((crs->release_flags & RELSIM_RELEASE_AFTER_TIMEOUT) != 0) {
2913 
2914 			if ((dev->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) {
2915 
2916 				/*
2917 				 * Just extend the old timeout and decrement
2918 				 * the freeze count so that a single timeout
2919 				 * is sufficient for releasing the queue.
2920 				 */
2921 				start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
2922 				callout_stop(&dev->callout);
2923 			} else {
2924 
2925 				start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
2926 			}
2927 
2928 			callout_reset(&dev->callout,
2929 			    (crs->release_timeout * hz) / 1000,
2930 			    xpt_release_devq_timeout, dev);
2931 
2932 			dev->flags |= CAM_DEV_REL_TIMEOUT_PENDING;
2933 
2934 		}
2935 
2936 		if ((crs->release_flags & RELSIM_RELEASE_AFTER_CMDCMPLT) != 0) {
2937 
2938 			if ((dev->flags & CAM_DEV_REL_ON_COMPLETE) != 0) {
2939 				/*
2940 				 * Decrement the freeze count so that a single
2941 				 * completion is still sufficient to unfreeze
2942 				 * the queue.
2943 				 */
2944 				start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
2945 			} else {
2946 
2947 				dev->flags |= CAM_DEV_REL_ON_COMPLETE;
2948 				start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
2949 			}
2950 		}
2951 
2952 		if ((crs->release_flags & RELSIM_RELEASE_AFTER_QEMPTY) != 0) {
2953 
2954 			if ((dev->flags & CAM_DEV_REL_ON_QUEUE_EMPTY) != 0
2955 			 || (dev->ccbq.dev_active == 0)) {
2956 
2957 				start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
2958 			} else {
2959 
2960 				dev->flags |= CAM_DEV_REL_ON_QUEUE_EMPTY;
2961 				start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
2962 			}
2963 		}
2964 
2965 		if ((start_ccb->ccb_h.flags & CAM_DEV_QFREEZE) == 0) {
2966 			xpt_release_devq_rl(crs->ccb_h.path, /*runlevel*/
2967 			    (crs->release_flags & RELSIM_RELEASE_RUNLEVEL) ?
2968 				crs->release_timeout : 0,
2969 			    /*count*/1, /*run_queue*/TRUE);
2970 		}
2971 		start_ccb->crs.qfrozen_cnt = dev->ccbq.queue.qfrozen_cnt[0];
2972 		start_ccb->ccb_h.status = CAM_REQ_CMP;
2973 		break;
2974 	}
2975 	case XPT_DEBUG: {
2976 #ifdef CAMDEBUG
2977 #ifdef CAM_DEBUG_DELAY
2978 		cam_debug_delay = CAM_DEBUG_DELAY;
2979 #endif
2980 		cam_dflags = start_ccb->cdbg.flags;
2981 		if (cam_dpath != NULL) {
2982 			xpt_free_path(cam_dpath);
2983 			cam_dpath = NULL;
2984 		}
2985 
2986 		if (cam_dflags != CAM_DEBUG_NONE) {
2987 			if (xpt_create_path(&cam_dpath, xpt_periph,
2988 					    start_ccb->ccb_h.path_id,
2989 					    start_ccb->ccb_h.target_id,
2990 					    start_ccb->ccb_h.target_lun) !=
2991 					    CAM_REQ_CMP) {
2992 				start_ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
2993 				cam_dflags = CAM_DEBUG_NONE;
2994 			} else {
2995 				start_ccb->ccb_h.status = CAM_REQ_CMP;
2996 				xpt_print(cam_dpath, "debugging flags now %x\n",
2997 				    cam_dflags);
2998 			}
2999 		} else {
3000 			cam_dpath = NULL;
3001 			start_ccb->ccb_h.status = CAM_REQ_CMP;
3002 		}
3003 #else /* !CAMDEBUG */
3004 		start_ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
3005 #endif /* CAMDEBUG */
3006 		break;
3007 	}
3008 	case XPT_FREEZE_QUEUE:
3009 	{
3010 		struct ccb_relsim *crs = &start_ccb->crs;
3011 
3012 		xpt_freeze_devq_rl(crs->ccb_h.path, /*runlevel*/
3013 		    (crs->release_flags & RELSIM_RELEASE_RUNLEVEL) ?
3014 		    crs->release_timeout : 0, /*count*/1);
3015 		start_ccb->ccb_h.status = CAM_REQ_CMP;
3016 		break;
3017 	}
3018 	case XPT_NOOP:
3019 		if ((start_ccb->ccb_h.flags & CAM_DEV_QFREEZE) != 0)
3020 			xpt_freeze_devq(start_ccb->ccb_h.path, 1);
3021 		start_ccb->ccb_h.status = CAM_REQ_CMP;
3022 		break;
3023 	default:
3024 	case XPT_SDEV_TYPE:
3025 	case XPT_TERM_IO:
3026 	case XPT_ENG_INQ:
3027 		/* XXX Implement */
3028 		start_ccb->ccb_h.status = CAM_PROVIDE_FAIL;
3029 		break;
3030 	}
3031 }
3032 
3033 void
3034 xpt_polled_action(union ccb *start_ccb)
3035 {
3036 	u_int32_t timeout;
3037 	struct	  cam_sim *sim;
3038 	struct	  cam_devq *devq;
3039 	struct	  cam_ed *dev;
3040 
3041 
3042 	timeout = start_ccb->ccb_h.timeout;
3043 	sim = start_ccb->ccb_h.path->bus->sim;
3044 	devq = sim->devq;
3045 	dev = start_ccb->ccb_h.path->device;
3046 
3047 	mtx_assert(sim->mtx, MA_OWNED);
3048 
3049 	/*
3050 	 * Steal an opening so that no other queued requests
3051 	 * can get it before us while we simulate interrupts.
3052 	 */
3053 	dev->ccbq.devq_openings--;
3054 	dev->ccbq.dev_openings--;
3055 
3056 	while(((devq != NULL && devq->send_openings <= 0) ||
3057 	   dev->ccbq.dev_openings < 0) && (--timeout > 0)) {
3058 		DELAY(1000);
3059 		(*(sim->sim_poll))(sim);
3060 		camisr_runqueue(&sim->sim_doneq);
3061 	}
3062 
3063 	dev->ccbq.devq_openings++;
3064 	dev->ccbq.dev_openings++;
3065 
3066 	if (timeout != 0) {
3067 		xpt_action(start_ccb);
3068 		while(--timeout > 0) {
3069 			(*(sim->sim_poll))(sim);
3070 			camisr_runqueue(&sim->sim_doneq);
3071 			if ((start_ccb->ccb_h.status  & CAM_STATUS_MASK)
3072 			    != CAM_REQ_INPROG)
3073 				break;
3074 			DELAY(1000);
3075 		}
3076 		if (timeout == 0) {
3077 			/*
3078 			 * XXX Is it worth adding a sim_timeout entry
3079 			 * point so we can attempt recovery?  If
3080 			 * this is only used for dumps, I don't think
3081 			 * it is.
3082 			 */
3083 			start_ccb->ccb_h.status = CAM_CMD_TIMEOUT;
3084 		}
3085 	} else {
3086 		start_ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
3087 	}
3088 }
3089 
3090 /*
3091  * Schedule a peripheral driver to receive a ccb when it's
3092  * target device has space for more transactions.
3093  */
3094 void
3095 xpt_schedule(struct cam_periph *perph, u_int32_t new_priority)
3096 {
3097 	struct cam_ed *device;
3098 	int runq = 0;
3099 
3100 	mtx_assert(perph->sim->mtx, MA_OWNED);
3101 
3102 	CAM_DEBUG(perph->path, CAM_DEBUG_TRACE, ("xpt_schedule\n"));
3103 	device = perph->path->device;
3104 	if (periph_is_queued(perph)) {
3105 		/* Simply reorder based on new priority */
3106 		CAM_DEBUG(perph->path, CAM_DEBUG_SUBTRACE,
3107 			  ("   change priority to %d\n", new_priority));
3108 		if (new_priority < perph->pinfo.priority) {
3109 			camq_change_priority(&device->drvq,
3110 					     perph->pinfo.index,
3111 					     new_priority);
3112 			runq = xpt_schedule_dev_allocq(perph->path->bus, device);
3113 		}
3114 	} else {
3115 		/* New entry on the queue */
3116 		CAM_DEBUG(perph->path, CAM_DEBUG_SUBTRACE,
3117 			  ("   added periph to queue\n"));
3118 		perph->pinfo.priority = new_priority;
3119 		perph->pinfo.generation = ++device->drvq.generation;
3120 		camq_insert(&device->drvq, &perph->pinfo);
3121 		runq = xpt_schedule_dev_allocq(perph->path->bus, device);
3122 	}
3123 	if (runq != 0) {
3124 		CAM_DEBUG(perph->path, CAM_DEBUG_SUBTRACE,
3125 			  ("   calling xpt_run_devq\n"));
3126 		xpt_run_dev_allocq(perph->path->bus);
3127 	}
3128 }
3129 
3130 
3131 /*
3132  * Schedule a device to run on a given queue.
3133  * If the device was inserted as a new entry on the queue,
3134  * return 1 meaning the device queue should be run. If we
3135  * were already queued, implying someone else has already
3136  * started the queue, return 0 so the caller doesn't attempt
3137  * to run the queue.
3138  */
3139 int
3140 xpt_schedule_dev(struct camq *queue, cam_pinfo *pinfo,
3141 		 u_int32_t new_priority)
3142 {
3143 	int retval;
3144 	u_int32_t old_priority;
3145 
3146 	CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_schedule_dev\n"));
3147 
3148 	old_priority = pinfo->priority;
3149 
3150 	/*
3151 	 * Are we already queued?
3152 	 */
3153 	if (pinfo->index != CAM_UNQUEUED_INDEX) {
3154 		/* Simply reorder based on new priority */
3155 		if (new_priority < old_priority) {
3156 			camq_change_priority(queue, pinfo->index,
3157 					     new_priority);
3158 			CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3159 					("changed priority to %d\n",
3160 					 new_priority));
3161 			retval = 1;
3162 		} else
3163 			retval = 0;
3164 	} else {
3165 		/* New entry on the queue */
3166 		if (new_priority < old_priority)
3167 			pinfo->priority = new_priority;
3168 
3169 		CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3170 				("Inserting onto queue\n"));
3171 		pinfo->generation = ++queue->generation;
3172 		camq_insert(queue, pinfo);
3173 		retval = 1;
3174 	}
3175 	return (retval);
3176 }
3177 
3178 static void
3179 xpt_run_dev_allocq(struct cam_eb *bus)
3180 {
3181 	struct	cam_devq *devq;
3182 
3183 	CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_dev_allocq\n"));
3184 	devq = bus->sim->devq;
3185 
3186 	CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3187 			("   qfrozen_cnt == 0x%x, entries == %d, "
3188 			 "openings == %d, active == %d\n",
3189 			 devq->alloc_queue.qfrozen_cnt[0],
3190 			 devq->alloc_queue.entries,
3191 			 devq->alloc_openings,
3192 			 devq->alloc_active));
3193 
3194 	devq->alloc_queue.qfrozen_cnt[0]++;
3195 	while ((devq->alloc_queue.entries > 0)
3196 	    && (devq->alloc_openings > 0)
3197 	    && (devq->alloc_queue.qfrozen_cnt[0] <= 1)) {
3198 		struct	cam_ed_qinfo *qinfo;
3199 		struct	cam_ed *device;
3200 		union	ccb *work_ccb;
3201 		struct	cam_periph *drv;
3202 		struct	camq *drvq;
3203 
3204 		qinfo = (struct cam_ed_qinfo *)camq_remove(&devq->alloc_queue,
3205 							   CAMQ_HEAD);
3206 		device = qinfo->device;
3207 		CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3208 				("running device %p\n", device));
3209 
3210 		drvq = &device->drvq;
3211 
3212 #ifdef CAMDEBUG
3213 		if (drvq->entries <= 0) {
3214 			panic("xpt_run_dev_allocq: "
3215 			      "Device on queue without any work to do");
3216 		}
3217 #endif
3218 		if ((work_ccb = xpt_get_ccb(device)) != NULL) {
3219 			devq->alloc_openings--;
3220 			devq->alloc_active++;
3221 			drv = (struct cam_periph*)camq_remove(drvq, CAMQ_HEAD);
3222 			xpt_setup_ccb(&work_ccb->ccb_h, drv->path,
3223 				      drv->pinfo.priority);
3224 			CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3225 					("calling periph start\n"));
3226 			drv->periph_start(drv, work_ccb);
3227 		} else {
3228 			/*
3229 			 * Malloc failure in alloc_ccb
3230 			 */
3231 			/*
3232 			 * XXX add us to a list to be run from free_ccb
3233 			 * if we don't have any ccbs active on this
3234 			 * device queue otherwise we may never get run
3235 			 * again.
3236 			 */
3237 			break;
3238 		}
3239 
3240 		/* We may have more work. Attempt to reschedule. */
3241 		xpt_schedule_dev_allocq(bus, device);
3242 	}
3243 	devq->alloc_queue.qfrozen_cnt[0]--;
3244 }
3245 
3246 static void
3247 xpt_run_dev_sendq(struct cam_eb *bus)
3248 {
3249 	struct	cam_devq *devq;
3250 
3251 	CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_dev_sendq\n"));
3252 
3253 	devq = bus->sim->devq;
3254 
3255 	devq->send_queue.qfrozen_cnt[0]++;
3256 	while ((devq->send_queue.entries > 0)
3257 	    && (devq->send_openings > 0)
3258 	    && (devq->send_queue.qfrozen_cnt[0] <= 1)) {
3259 		struct	cam_ed_qinfo *qinfo;
3260 		struct	cam_ed *device;
3261 		union ccb *work_ccb;
3262 		struct	cam_sim *sim;
3263 
3264 		qinfo = (struct cam_ed_qinfo *)camq_remove(&devq->send_queue,
3265 							   CAMQ_HEAD);
3266 		device = qinfo->device;
3267 		CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3268 				("running device %p\n", device));
3269 
3270 		work_ccb = cam_ccbq_peek_ccb(&device->ccbq, CAMQ_HEAD);
3271 		if (work_ccb == NULL) {
3272 			printf("device on run queue with no ccbs???\n");
3273 			continue;
3274 		}
3275 
3276 		if ((work_ccb->ccb_h.flags & CAM_HIGH_POWER) != 0) {
3277 
3278 			mtx_lock(&xsoftc.xpt_lock);
3279 		 	if (xsoftc.num_highpower <= 0) {
3280 				/*
3281 				 * We got a high power command, but we
3282 				 * don't have any available slots.  Freeze
3283 				 * the device queue until we have a slot
3284 				 * available.
3285 				 */
3286 				xpt_freeze_devq(work_ccb->ccb_h.path, 1);
3287 				STAILQ_INSERT_TAIL(&xsoftc.highpowerq,
3288 						   &work_ccb->ccb_h,
3289 						   xpt_links.stqe);
3290 
3291 				mtx_unlock(&xsoftc.xpt_lock);
3292 				continue;
3293 			} else {
3294 				/*
3295 				 * Consume a high power slot while
3296 				 * this ccb runs.
3297 				 */
3298 				xsoftc.num_highpower--;
3299 			}
3300 			mtx_unlock(&xsoftc.xpt_lock);
3301 		}
3302 		cam_ccbq_remove_ccb(&device->ccbq, work_ccb);
3303 		cam_ccbq_send_ccb(&device->ccbq, work_ccb);
3304 
3305 		devq->send_openings--;
3306 		devq->send_active++;
3307 
3308 		xpt_schedule_dev_sendq(bus, device);
3309 
3310 		if (work_ccb && (work_ccb->ccb_h.flags & CAM_DEV_QFREEZE) != 0){
3311 			/*
3312 			 * The client wants to freeze the queue
3313 			 * after this CCB is sent.
3314 			 */
3315 			xpt_freeze_devq(work_ccb->ccb_h.path, 1);
3316 		}
3317 
3318 		/* In Target mode, the peripheral driver knows best... */
3319 		if (work_ccb->ccb_h.func_code == XPT_SCSI_IO) {
3320 			if ((device->inq_flags & SID_CmdQue) != 0
3321 			 && work_ccb->csio.tag_action != CAM_TAG_ACTION_NONE)
3322 				work_ccb->ccb_h.flags |= CAM_TAG_ACTION_VALID;
3323 			else
3324 				/*
3325 				 * Clear this in case of a retried CCB that
3326 				 * failed due to a rejected tag.
3327 				 */
3328 				work_ccb->ccb_h.flags &= ~CAM_TAG_ACTION_VALID;
3329 		}
3330 
3331 		/*
3332 		 * Device queues can be shared among multiple sim instances
3333 		 * that reside on different busses.  Use the SIM in the queue
3334 		 * CCB's path, rather than the one in the bus that was passed
3335 		 * into this function.
3336 		 */
3337 		sim = work_ccb->ccb_h.path->bus->sim;
3338 		(*(sim->sim_action))(sim, work_ccb);
3339 	}
3340 	devq->send_queue.qfrozen_cnt[0]--;
3341 }
3342 
3343 /*
3344  * This function merges stuff from the slave ccb into the master ccb, while
3345  * keeping important fields in the master ccb constant.
3346  */
3347 void
3348 xpt_merge_ccb(union ccb *master_ccb, union ccb *slave_ccb)
3349 {
3350 
3351 	/*
3352 	 * Pull fields that are valid for peripheral drivers to set
3353 	 * into the master CCB along with the CCB "payload".
3354 	 */
3355 	master_ccb->ccb_h.retry_count = slave_ccb->ccb_h.retry_count;
3356 	master_ccb->ccb_h.func_code = slave_ccb->ccb_h.func_code;
3357 	master_ccb->ccb_h.timeout = slave_ccb->ccb_h.timeout;
3358 	master_ccb->ccb_h.flags = slave_ccb->ccb_h.flags;
3359 	bcopy(&(&slave_ccb->ccb_h)[1], &(&master_ccb->ccb_h)[1],
3360 	      sizeof(union ccb) - sizeof(struct ccb_hdr));
3361 }
3362 
3363 void
3364 xpt_setup_ccb(struct ccb_hdr *ccb_h, struct cam_path *path, u_int32_t priority)
3365 {
3366 
3367 	CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_setup_ccb\n"));
3368 	ccb_h->pinfo.priority = priority;
3369 	ccb_h->path = path;
3370 	ccb_h->path_id = path->bus->path_id;
3371 	if (path->target)
3372 		ccb_h->target_id = path->target->target_id;
3373 	else
3374 		ccb_h->target_id = CAM_TARGET_WILDCARD;
3375 	if (path->device) {
3376 		ccb_h->target_lun = path->device->lun_id;
3377 		ccb_h->pinfo.generation = ++path->device->ccbq.queue.generation;
3378 	} else {
3379 		ccb_h->target_lun = CAM_TARGET_WILDCARD;
3380 	}
3381 	ccb_h->pinfo.index = CAM_UNQUEUED_INDEX;
3382 	ccb_h->flags = 0;
3383 }
3384 
3385 /* Path manipulation functions */
3386 cam_status
3387 xpt_create_path(struct cam_path **new_path_ptr, struct cam_periph *perph,
3388 		path_id_t path_id, target_id_t target_id, lun_id_t lun_id)
3389 {
3390 	struct	   cam_path *path;
3391 	cam_status status;
3392 
3393 	path = (struct cam_path *)malloc(sizeof(*path), M_CAMXPT, M_NOWAIT);
3394 
3395 	if (path == NULL) {
3396 		status = CAM_RESRC_UNAVAIL;
3397 		return(status);
3398 	}
3399 	status = xpt_compile_path(path, perph, path_id, target_id, lun_id);
3400 	if (status != CAM_REQ_CMP) {
3401 		free(path, M_CAMXPT);
3402 		path = NULL;
3403 	}
3404 	*new_path_ptr = path;
3405 	return (status);
3406 }
3407 
3408 cam_status
3409 xpt_create_path_unlocked(struct cam_path **new_path_ptr,
3410 			 struct cam_periph *periph, path_id_t path_id,
3411 			 target_id_t target_id, lun_id_t lun_id)
3412 {
3413 	struct	   cam_path *path;
3414 	struct	   cam_eb *bus = NULL;
3415 	cam_status status;
3416 	int	   need_unlock = 0;
3417 
3418 	path = (struct cam_path *)malloc(sizeof(*path), M_CAMXPT, M_WAITOK);
3419 
3420 	if (path_id != CAM_BUS_WILDCARD) {
3421 		bus = xpt_find_bus(path_id);
3422 		if (bus != NULL) {
3423 			need_unlock = 1;
3424 			CAM_SIM_LOCK(bus->sim);
3425 		}
3426 	}
3427 	status = xpt_compile_path(path, periph, path_id, target_id, lun_id);
3428 	if (need_unlock)
3429 		CAM_SIM_UNLOCK(bus->sim);
3430 	if (status != CAM_REQ_CMP) {
3431 		free(path, M_CAMXPT);
3432 		path = NULL;
3433 	}
3434 	*new_path_ptr = path;
3435 	return (status);
3436 }
3437 
3438 cam_status
3439 xpt_compile_path(struct cam_path *new_path, struct cam_periph *perph,
3440 		 path_id_t path_id, target_id_t target_id, lun_id_t lun_id)
3441 {
3442 	struct	     cam_eb *bus;
3443 	struct	     cam_et *target;
3444 	struct	     cam_ed *device;
3445 	cam_status   status;
3446 
3447 	status = CAM_REQ_CMP;	/* Completed without error */
3448 	target = NULL;		/* Wildcarded */
3449 	device = NULL;		/* Wildcarded */
3450 
3451 	/*
3452 	 * We will potentially modify the EDT, so block interrupts
3453 	 * that may attempt to create cam paths.
3454 	 */
3455 	bus = xpt_find_bus(path_id);
3456 	if (bus == NULL) {
3457 		status = CAM_PATH_INVALID;
3458 	} else {
3459 		target = xpt_find_target(bus, target_id);
3460 		if (target == NULL) {
3461 			/* Create one */
3462 			struct cam_et *new_target;
3463 
3464 			new_target = xpt_alloc_target(bus, target_id);
3465 			if (new_target == NULL) {
3466 				status = CAM_RESRC_UNAVAIL;
3467 			} else {
3468 				target = new_target;
3469 			}
3470 		}
3471 		if (target != NULL) {
3472 			device = xpt_find_device(target, lun_id);
3473 			if (device == NULL) {
3474 				/* Create one */
3475 				struct cam_ed *new_device;
3476 
3477 				new_device =
3478 				    (*(bus->xport->alloc_device))(bus,
3479 								      target,
3480 								      lun_id);
3481 				if (new_device == NULL) {
3482 					status = CAM_RESRC_UNAVAIL;
3483 				} else {
3484 					device = new_device;
3485 				}
3486 			}
3487 		}
3488 	}
3489 
3490 	/*
3491 	 * Only touch the user's data if we are successful.
3492 	 */
3493 	if (status == CAM_REQ_CMP) {
3494 		new_path->periph = perph;
3495 		new_path->bus = bus;
3496 		new_path->target = target;
3497 		new_path->device = device;
3498 		CAM_DEBUG(new_path, CAM_DEBUG_TRACE, ("xpt_compile_path\n"));
3499 	} else {
3500 		if (device != NULL)
3501 			xpt_release_device(device);
3502 		if (target != NULL)
3503 			xpt_release_target(target);
3504 		if (bus != NULL)
3505 			xpt_release_bus(bus);
3506 	}
3507 	return (status);
3508 }
3509 
3510 void
3511 xpt_release_path(struct cam_path *path)
3512 {
3513 	CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_release_path\n"));
3514 	if (path->device != NULL) {
3515 		xpt_release_device(path->device);
3516 		path->device = NULL;
3517 	}
3518 	if (path->target != NULL) {
3519 		xpt_release_target(path->target);
3520 		path->target = NULL;
3521 	}
3522 	if (path->bus != NULL) {
3523 		xpt_release_bus(path->bus);
3524 		path->bus = NULL;
3525 	}
3526 }
3527 
3528 void
3529 xpt_free_path(struct cam_path *path)
3530 {
3531 
3532 	CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_free_path\n"));
3533 	xpt_release_path(path);
3534 	free(path, M_CAMXPT);
3535 }
3536 
3537 
3538 /*
3539  * Return -1 for failure, 0 for exact match, 1 for match with wildcards
3540  * in path1, 2 for match with wildcards in path2.
3541  */
3542 int
3543 xpt_path_comp(struct cam_path *path1, struct cam_path *path2)
3544 {
3545 	int retval = 0;
3546 
3547 	if (path1->bus != path2->bus) {
3548 		if (path1->bus->path_id == CAM_BUS_WILDCARD)
3549 			retval = 1;
3550 		else if (path2->bus->path_id == CAM_BUS_WILDCARD)
3551 			retval = 2;
3552 		else
3553 			return (-1);
3554 	}
3555 	if (path1->target != path2->target) {
3556 		if (path1->target->target_id == CAM_TARGET_WILDCARD) {
3557 			if (retval == 0)
3558 				retval = 1;
3559 		} else if (path2->target->target_id == CAM_TARGET_WILDCARD)
3560 			retval = 2;
3561 		else
3562 			return (-1);
3563 	}
3564 	if (path1->device != path2->device) {
3565 		if (path1->device->lun_id == CAM_LUN_WILDCARD) {
3566 			if (retval == 0)
3567 				retval = 1;
3568 		} else if (path2->device->lun_id == CAM_LUN_WILDCARD)
3569 			retval = 2;
3570 		else
3571 			return (-1);
3572 	}
3573 	return (retval);
3574 }
3575 
3576 void
3577 xpt_print_path(struct cam_path *path)
3578 {
3579 
3580 	if (path == NULL)
3581 		printf("(nopath): ");
3582 	else {
3583 		if (path->periph != NULL)
3584 			printf("(%s%d:", path->periph->periph_name,
3585 			       path->periph->unit_number);
3586 		else
3587 			printf("(noperiph:");
3588 
3589 		if (path->bus != NULL)
3590 			printf("%s%d:%d:", path->bus->sim->sim_name,
3591 			       path->bus->sim->unit_number,
3592 			       path->bus->sim->bus_id);
3593 		else
3594 			printf("nobus:");
3595 
3596 		if (path->target != NULL)
3597 			printf("%d:", path->target->target_id);
3598 		else
3599 			printf("X:");
3600 
3601 		if (path->device != NULL)
3602 			printf("%d): ", path->device->lun_id);
3603 		else
3604 			printf("X): ");
3605 	}
3606 }
3607 
3608 void
3609 xpt_print(struct cam_path *path, const char *fmt, ...)
3610 {
3611 	va_list ap;
3612 	xpt_print_path(path);
3613 	va_start(ap, fmt);
3614 	vprintf(fmt, ap);
3615 	va_end(ap);
3616 }
3617 
3618 int
3619 xpt_path_string(struct cam_path *path, char *str, size_t str_len)
3620 {
3621 	struct sbuf sb;
3622 
3623 #ifdef INVARIANTS
3624 	if (path != NULL && path->bus != NULL)
3625 		mtx_assert(path->bus->sim->mtx, MA_OWNED);
3626 #endif
3627 
3628 	sbuf_new(&sb, str, str_len, 0);
3629 
3630 	if (path == NULL)
3631 		sbuf_printf(&sb, "(nopath): ");
3632 	else {
3633 		if (path->periph != NULL)
3634 			sbuf_printf(&sb, "(%s%d:", path->periph->periph_name,
3635 				    path->periph->unit_number);
3636 		else
3637 			sbuf_printf(&sb, "(noperiph:");
3638 
3639 		if (path->bus != NULL)
3640 			sbuf_printf(&sb, "%s%d:%d:", path->bus->sim->sim_name,
3641 				    path->bus->sim->unit_number,
3642 				    path->bus->sim->bus_id);
3643 		else
3644 			sbuf_printf(&sb, "nobus:");
3645 
3646 		if (path->target != NULL)
3647 			sbuf_printf(&sb, "%d:", path->target->target_id);
3648 		else
3649 			sbuf_printf(&sb, "X:");
3650 
3651 		if (path->device != NULL)
3652 			sbuf_printf(&sb, "%d): ", path->device->lun_id);
3653 		else
3654 			sbuf_printf(&sb, "X): ");
3655 	}
3656 	sbuf_finish(&sb);
3657 
3658 	return(sbuf_len(&sb));
3659 }
3660 
3661 path_id_t
3662 xpt_path_path_id(struct cam_path *path)
3663 {
3664 	mtx_assert(path->bus->sim->mtx, MA_OWNED);
3665 
3666 	return(path->bus->path_id);
3667 }
3668 
3669 target_id_t
3670 xpt_path_target_id(struct cam_path *path)
3671 {
3672 	mtx_assert(path->bus->sim->mtx, MA_OWNED);
3673 
3674 	if (path->target != NULL)
3675 		return (path->target->target_id);
3676 	else
3677 		return (CAM_TARGET_WILDCARD);
3678 }
3679 
3680 lun_id_t
3681 xpt_path_lun_id(struct cam_path *path)
3682 {
3683 	mtx_assert(path->bus->sim->mtx, MA_OWNED);
3684 
3685 	if (path->device != NULL)
3686 		return (path->device->lun_id);
3687 	else
3688 		return (CAM_LUN_WILDCARD);
3689 }
3690 
3691 struct cam_sim *
3692 xpt_path_sim(struct cam_path *path)
3693 {
3694 
3695 	return (path->bus->sim);
3696 }
3697 
3698 struct cam_periph*
3699 xpt_path_periph(struct cam_path *path)
3700 {
3701 	mtx_assert(path->bus->sim->mtx, MA_OWNED);
3702 
3703 	return (path->periph);
3704 }
3705 
3706 /*
3707  * Release a CAM control block for the caller.  Remit the cost of the structure
3708  * to the device referenced by the path.  If the this device had no 'credits'
3709  * and peripheral drivers have registered async callbacks for this notification
3710  * call them now.
3711  */
3712 void
3713 xpt_release_ccb(union ccb *free_ccb)
3714 {
3715 	struct	 cam_path *path;
3716 	struct	 cam_ed *device;
3717 	struct	 cam_eb *bus;
3718 	struct   cam_sim *sim;
3719 
3720 	CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_release_ccb\n"));
3721 	path = free_ccb->ccb_h.path;
3722 	device = path->device;
3723 	bus = path->bus;
3724 	sim = bus->sim;
3725 
3726 	mtx_assert(sim->mtx, MA_OWNED);
3727 
3728 	cam_ccbq_release_opening(&device->ccbq);
3729 	if (device->flags & CAM_DEV_RESIZE_QUEUE_NEEDED) {
3730 		device->flags &= ~CAM_DEV_RESIZE_QUEUE_NEEDED;
3731 		cam_ccbq_resize(&device->ccbq,
3732 		    device->ccbq.dev_openings + device->ccbq.dev_active);
3733 	}
3734 	if (sim->ccb_count > sim->max_ccbs) {
3735 		xpt_free_ccb(free_ccb);
3736 		sim->ccb_count--;
3737 	} else {
3738 		SLIST_INSERT_HEAD(&sim->ccb_freeq, &free_ccb->ccb_h,
3739 		    xpt_links.sle);
3740 	}
3741 	if (sim->devq == NULL) {
3742 		return;
3743 	}
3744 	sim->devq->alloc_openings++;
3745 	sim->devq->alloc_active--;
3746 	if (device_is_alloc_queued(device) == 0)
3747 		xpt_schedule_dev_allocq(bus, device);
3748 	xpt_run_dev_allocq(bus);
3749 }
3750 
3751 /* Functions accessed by SIM drivers */
3752 
3753 static struct xpt_xport xport_default = {
3754 	.alloc_device = xpt_alloc_device_default,
3755 	.action = xpt_action_default,
3756 	.async = xpt_dev_async_default,
3757 };
3758 
3759 /*
3760  * A sim structure, listing the SIM entry points and instance
3761  * identification info is passed to xpt_bus_register to hook the SIM
3762  * into the CAM framework.  xpt_bus_register creates a cam_eb entry
3763  * for this new bus and places it in the array of busses and assigns
3764  * it a path_id.  The path_id may be influenced by "hard wiring"
3765  * information specified by the user.  Once interrupt services are
3766  * available, the bus will be probed.
3767  */
3768 int32_t
3769 xpt_bus_register(struct cam_sim *sim, device_t parent, u_int32_t bus)
3770 {
3771 	struct cam_eb *new_bus;
3772 	struct cam_eb *old_bus;
3773 	struct ccb_pathinq cpi;
3774 	struct cam_path *path;
3775 	cam_status status;
3776 
3777 	mtx_assert(sim->mtx, MA_OWNED);
3778 
3779 	sim->bus_id = bus;
3780 	new_bus = (struct cam_eb *)malloc(sizeof(*new_bus),
3781 					  M_CAMXPT, M_NOWAIT);
3782 	if (new_bus == NULL) {
3783 		/* Couldn't satisfy request */
3784 		return (CAM_RESRC_UNAVAIL);
3785 	}
3786 	path = (struct cam_path *)malloc(sizeof(*path), M_CAMXPT, M_NOWAIT);
3787 	if (path == NULL) {
3788 		free(new_bus, M_CAMXPT);
3789 		return (CAM_RESRC_UNAVAIL);
3790 	}
3791 
3792 	if (strcmp(sim->sim_name, "xpt") != 0) {
3793 		sim->path_id =
3794 		    xptpathid(sim->sim_name, sim->unit_number, sim->bus_id);
3795 	}
3796 
3797 	TAILQ_INIT(&new_bus->et_entries);
3798 	new_bus->path_id = sim->path_id;
3799 	cam_sim_hold(sim);
3800 	new_bus->sim = sim;
3801 	timevalclear(&new_bus->last_reset);
3802 	new_bus->flags = 0;
3803 	new_bus->refcount = 1;	/* Held until a bus_deregister event */
3804 	new_bus->generation = 0;
3805 
3806 	mtx_lock(&xsoftc.xpt_topo_lock);
3807 	old_bus = TAILQ_FIRST(&xsoftc.xpt_busses);
3808 	while (old_bus != NULL
3809 	    && old_bus->path_id < new_bus->path_id)
3810 		old_bus = TAILQ_NEXT(old_bus, links);
3811 	if (old_bus != NULL)
3812 		TAILQ_INSERT_BEFORE(old_bus, new_bus, links);
3813 	else
3814 		TAILQ_INSERT_TAIL(&xsoftc.xpt_busses, new_bus, links);
3815 	xsoftc.bus_generation++;
3816 	mtx_unlock(&xsoftc.xpt_topo_lock);
3817 
3818 	/*
3819 	 * Set a default transport so that a PATH_INQ can be issued to
3820 	 * the SIM.  This will then allow for probing and attaching of
3821 	 * a more appropriate transport.
3822 	 */
3823 	new_bus->xport = &xport_default;
3824 
3825 	status = xpt_compile_path(path, /*periph*/NULL, sim->path_id,
3826 				  CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
3827 	if (status != CAM_REQ_CMP)
3828 		printf("xpt_compile_path returned %d\n", status);
3829 
3830 	xpt_setup_ccb(&cpi.ccb_h, path, CAM_PRIORITY_NORMAL);
3831 	cpi.ccb_h.func_code = XPT_PATH_INQ;
3832 	xpt_action((union ccb *)&cpi);
3833 
3834 	if (cpi.ccb_h.status == CAM_REQ_CMP) {
3835 		switch (cpi.transport) {
3836 		case XPORT_SPI:
3837 		case XPORT_SAS:
3838 		case XPORT_FC:
3839 		case XPORT_USB:
3840 		case XPORT_ISCSI:
3841 		case XPORT_PPB:
3842 			new_bus->xport = scsi_get_xport();
3843 			break;
3844 		case XPORT_ATA:
3845 		case XPORT_SATA:
3846 			new_bus->xport = ata_get_xport();
3847 			break;
3848 		default:
3849 			new_bus->xport = &xport_default;
3850 			break;
3851 		}
3852 	}
3853 
3854 	/* Notify interested parties */
3855 	if (sim->path_id != CAM_XPT_PATH_ID) {
3856 		union	ccb *scan_ccb;
3857 
3858 		xpt_async(AC_PATH_REGISTERED, path, &cpi);
3859 		/* Initiate bus rescan. */
3860 		scan_ccb = xpt_alloc_ccb_nowait();
3861 		scan_ccb->ccb_h.path = path;
3862 		scan_ccb->ccb_h.func_code = XPT_SCAN_BUS;
3863 		scan_ccb->crcn.flags = 0;
3864 		xpt_rescan(scan_ccb);
3865 	} else
3866 		xpt_free_path(path);
3867 	return (CAM_SUCCESS);
3868 }
3869 
3870 int32_t
3871 xpt_bus_deregister(path_id_t pathid)
3872 {
3873 	struct cam_path bus_path;
3874 	cam_status status;
3875 
3876 	status = xpt_compile_path(&bus_path, NULL, pathid,
3877 				  CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
3878 	if (status != CAM_REQ_CMP)
3879 		return (status);
3880 
3881 	xpt_async(AC_LOST_DEVICE, &bus_path, NULL);
3882 	xpt_async(AC_PATH_DEREGISTERED, &bus_path, NULL);
3883 
3884 	/* Release the reference count held while registered. */
3885 	xpt_release_bus(bus_path.bus);
3886 	xpt_release_path(&bus_path);
3887 
3888 	return (CAM_REQ_CMP);
3889 }
3890 
3891 static path_id_t
3892 xptnextfreepathid(void)
3893 {
3894 	struct cam_eb *bus;
3895 	path_id_t pathid;
3896 	const char *strval;
3897 
3898 	pathid = 0;
3899 	mtx_lock(&xsoftc.xpt_topo_lock);
3900 	bus = TAILQ_FIRST(&xsoftc.xpt_busses);
3901 retry:
3902 	/* Find an unoccupied pathid */
3903 	while (bus != NULL && bus->path_id <= pathid) {
3904 		if (bus->path_id == pathid)
3905 			pathid++;
3906 		bus = TAILQ_NEXT(bus, links);
3907 	}
3908 	mtx_unlock(&xsoftc.xpt_topo_lock);
3909 
3910 	/*
3911 	 * Ensure that this pathid is not reserved for
3912 	 * a bus that may be registered in the future.
3913 	 */
3914 	if (resource_string_value("scbus", pathid, "at", &strval) == 0) {
3915 		++pathid;
3916 		/* Start the search over */
3917 		mtx_lock(&xsoftc.xpt_topo_lock);
3918 		goto retry;
3919 	}
3920 	return (pathid);
3921 }
3922 
3923 static path_id_t
3924 xptpathid(const char *sim_name, int sim_unit, int sim_bus)
3925 {
3926 	path_id_t pathid;
3927 	int i, dunit, val;
3928 	char buf[32];
3929 	const char *dname;
3930 
3931 	pathid = CAM_XPT_PATH_ID;
3932 	snprintf(buf, sizeof(buf), "%s%d", sim_name, sim_unit);
3933 	i = 0;
3934 	while ((resource_find_match(&i, &dname, &dunit, "at", buf)) == 0) {
3935 		if (strcmp(dname, "scbus")) {
3936 			/* Avoid a bit of foot shooting. */
3937 			continue;
3938 		}
3939 		if (dunit < 0)		/* unwired?! */
3940 			continue;
3941 		if (resource_int_value("scbus", dunit, "bus", &val) == 0) {
3942 			if (sim_bus == val) {
3943 				pathid = dunit;
3944 				break;
3945 			}
3946 		} else if (sim_bus == 0) {
3947 			/* Unspecified matches bus 0 */
3948 			pathid = dunit;
3949 			break;
3950 		} else {
3951 			printf("Ambiguous scbus configuration for %s%d "
3952 			       "bus %d, cannot wire down.  The kernel "
3953 			       "config entry for scbus%d should "
3954 			       "specify a controller bus.\n"
3955 			       "Scbus will be assigned dynamically.\n",
3956 			       sim_name, sim_unit, sim_bus, dunit);
3957 			break;
3958 		}
3959 	}
3960 
3961 	if (pathid == CAM_XPT_PATH_ID)
3962 		pathid = xptnextfreepathid();
3963 	return (pathid);
3964 }
3965 
3966 void
3967 xpt_async(u_int32_t async_code, struct cam_path *path, void *async_arg)
3968 {
3969 	struct cam_eb *bus;
3970 	struct cam_et *target, *next_target;
3971 	struct cam_ed *device, *next_device;
3972 
3973 	mtx_assert(path->bus->sim->mtx, MA_OWNED);
3974 
3975 	CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_async\n"));
3976 
3977 	/*
3978 	 * Most async events come from a CAM interrupt context.  In
3979 	 * a few cases, the error recovery code at the peripheral layer,
3980 	 * which may run from our SWI or a process context, may signal
3981 	 * deferred events with a call to xpt_async.
3982 	 */
3983 
3984 	bus = path->bus;
3985 
3986 	if (async_code == AC_BUS_RESET) {
3987 		/* Update our notion of when the last reset occurred */
3988 		microtime(&bus->last_reset);
3989 	}
3990 
3991 	for (target = TAILQ_FIRST(&bus->et_entries);
3992 	     target != NULL;
3993 	     target = next_target) {
3994 
3995 		next_target = TAILQ_NEXT(target, links);
3996 
3997 		if (path->target != target
3998 		 && path->target->target_id != CAM_TARGET_WILDCARD
3999 		 && target->target_id != CAM_TARGET_WILDCARD)
4000 			continue;
4001 
4002 		if (async_code == AC_SENT_BDR) {
4003 			/* Update our notion of when the last reset occurred */
4004 			microtime(&path->target->last_reset);
4005 		}
4006 
4007 		for (device = TAILQ_FIRST(&target->ed_entries);
4008 		     device != NULL;
4009 		     device = next_device) {
4010 
4011 			next_device = TAILQ_NEXT(device, links);
4012 
4013 			if (path->device != device
4014 			 && path->device->lun_id != CAM_LUN_WILDCARD
4015 			 && device->lun_id != CAM_LUN_WILDCARD)
4016 				continue;
4017 			/*
4018 			 * The async callback could free the device.
4019 			 * If it is a broadcast async, it doesn't hold
4020 			 * device reference, so take our own reference.
4021 			 */
4022 			xpt_acquire_device(device);
4023 			(*(bus->xport->async))(async_code, bus,
4024 					       target, device,
4025 					       async_arg);
4026 
4027 			xpt_async_bcast(&device->asyncs, async_code,
4028 					path, async_arg);
4029 			xpt_release_device(device);
4030 		}
4031 	}
4032 
4033 	/*
4034 	 * If this wasn't a fully wildcarded async, tell all
4035 	 * clients that want all async events.
4036 	 */
4037 	if (bus != xpt_periph->path->bus)
4038 		xpt_async_bcast(&xpt_periph->path->device->asyncs, async_code,
4039 				path, async_arg);
4040 }
4041 
4042 static void
4043 xpt_async_bcast(struct async_list *async_head,
4044 		u_int32_t async_code,
4045 		struct cam_path *path, void *async_arg)
4046 {
4047 	struct async_node *cur_entry;
4048 
4049 	cur_entry = SLIST_FIRST(async_head);
4050 	while (cur_entry != NULL) {
4051 		struct async_node *next_entry;
4052 		/*
4053 		 * Grab the next list entry before we call the current
4054 		 * entry's callback.  This is because the callback function
4055 		 * can delete its async callback entry.
4056 		 */
4057 		next_entry = SLIST_NEXT(cur_entry, links);
4058 		if ((cur_entry->event_enable & async_code) != 0)
4059 			cur_entry->callback(cur_entry->callback_arg,
4060 					    async_code, path,
4061 					    async_arg);
4062 		cur_entry = next_entry;
4063 	}
4064 }
4065 
4066 static void
4067 xpt_dev_async_default(u_int32_t async_code, struct cam_eb *bus,
4068 		      struct cam_et *target, struct cam_ed *device,
4069 		      void *async_arg)
4070 {
4071 	printf("xpt_dev_async called\n");
4072 }
4073 
4074 u_int32_t
4075 xpt_freeze_devq_rl(struct cam_path *path, cam_rl rl, u_int count)
4076 {
4077 	struct cam_ed *dev = path->device;
4078 
4079 	mtx_assert(path->bus->sim->mtx, MA_OWNED);
4080 	dev->sim->devq->alloc_openings +=
4081 	    cam_ccbq_freeze(&dev->ccbq, rl, count);
4082 	/* Remove frozen device from allocq. */
4083 	if (device_is_alloc_queued(dev) &&
4084 	    cam_ccbq_frozen(&dev->ccbq, CAM_PRIORITY_TO_RL(
4085 	     CAMQ_GET_PRIO(&dev->drvq)))) {
4086 		camq_remove(&dev->sim->devq->alloc_queue,
4087 		    dev->alloc_ccb_entry.pinfo.index);
4088 	}
4089 	/* Remove frozen device from sendq. */
4090 	if (device_is_send_queued(dev) &&
4091 	    cam_ccbq_frozen_top(&dev->ccbq)) {
4092 		camq_remove(&dev->sim->devq->send_queue,
4093 		    dev->send_ccb_entry.pinfo.index);
4094 	}
4095 	return (dev->ccbq.queue.qfrozen_cnt[rl]);
4096 }
4097 
4098 u_int32_t
4099 xpt_freeze_devq(struct cam_path *path, u_int count)
4100 {
4101 
4102 	return (xpt_freeze_devq_rl(path, 0, count));
4103 }
4104 
4105 u_int32_t
4106 xpt_freeze_simq(struct cam_sim *sim, u_int count)
4107 {
4108 
4109 	mtx_assert(sim->mtx, MA_OWNED);
4110 	sim->devq->send_queue.qfrozen_cnt[0] += count;
4111 	return (sim->devq->send_queue.qfrozen_cnt[0]);
4112 }
4113 
4114 static void
4115 xpt_release_devq_timeout(void *arg)
4116 {
4117 	struct cam_ed *device;
4118 
4119 	device = (struct cam_ed *)arg;
4120 
4121 	xpt_release_devq_device(device, /*rl*/0, /*count*/1, /*run_queue*/TRUE);
4122 }
4123 
4124 void
4125 xpt_release_devq(struct cam_path *path, u_int count, int run_queue)
4126 {
4127 	mtx_assert(path->bus->sim->mtx, MA_OWNED);
4128 
4129 	xpt_release_devq_device(path->device, /*rl*/0, count, run_queue);
4130 }
4131 
4132 void
4133 xpt_release_devq_rl(struct cam_path *path, cam_rl rl, u_int count, int run_queue)
4134 {
4135 	mtx_assert(path->bus->sim->mtx, MA_OWNED);
4136 
4137 	xpt_release_devq_device(path->device, rl, count, run_queue);
4138 }
4139 
4140 static void
4141 xpt_release_devq_device(struct cam_ed *dev, cam_rl rl, u_int count, int run_queue)
4142 {
4143 
4144 	if (count > dev->ccbq.queue.qfrozen_cnt[rl]) {
4145 #ifdef INVARIANTS
4146 		printf("xpt_release_devq(%d): requested %u > present %u\n",
4147 		    rl, count, dev->ccbq.queue.qfrozen_cnt[rl]);
4148 #endif
4149 		count = dev->ccbq.queue.qfrozen_cnt[rl];
4150 	}
4151 	dev->sim->devq->alloc_openings -=
4152 	    cam_ccbq_release(&dev->ccbq, rl, count);
4153 	if (cam_ccbq_frozen(&dev->ccbq, CAM_PRIORITY_TO_RL(
4154 	    CAMQ_GET_PRIO(&dev->drvq))) == 0) {
4155 		if (xpt_schedule_dev_allocq(dev->target->bus, dev))
4156 			xpt_run_dev_allocq(dev->target->bus);
4157 	}
4158 	if (cam_ccbq_frozen_top(&dev->ccbq) == 0) {
4159 		/*
4160 		 * No longer need to wait for a successful
4161 		 * command completion.
4162 		 */
4163 		dev->flags &= ~CAM_DEV_REL_ON_COMPLETE;
4164 		/*
4165 		 * Remove any timeouts that might be scheduled
4166 		 * to release this queue.
4167 		 */
4168 		if ((dev->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) {
4169 			callout_stop(&dev->callout);
4170 			dev->flags &= ~CAM_DEV_REL_TIMEOUT_PENDING;
4171 		}
4172 		if (run_queue == 0)
4173 			return;
4174 		/*
4175 		 * Now that we are unfrozen schedule the
4176 		 * device so any pending transactions are
4177 		 * run.
4178 		 */
4179 		if (xpt_schedule_dev_sendq(dev->target->bus, dev))
4180 			xpt_run_dev_sendq(dev->target->bus);
4181 	}
4182 }
4183 
4184 void
4185 xpt_release_simq(struct cam_sim *sim, int run_queue)
4186 {
4187 	struct	camq *sendq;
4188 
4189 	mtx_assert(sim->mtx, MA_OWNED);
4190 	sendq = &(sim->devq->send_queue);
4191 	if (sendq->qfrozen_cnt[0] <= 0) {
4192 #ifdef INVARIANTS
4193 		printf("xpt_release_simq: requested 1 > present %u\n",
4194 		    sendq->qfrozen_cnt[0]);
4195 #endif
4196 	} else
4197 		sendq->qfrozen_cnt[0]--;
4198 	if (sendq->qfrozen_cnt[0] == 0) {
4199 		/*
4200 		 * If there is a timeout scheduled to release this
4201 		 * sim queue, remove it.  The queue frozen count is
4202 		 * already at 0.
4203 		 */
4204 		if ((sim->flags & CAM_SIM_REL_TIMEOUT_PENDING) != 0){
4205 			callout_stop(&sim->callout);
4206 			sim->flags &= ~CAM_SIM_REL_TIMEOUT_PENDING;
4207 		}
4208 		if (run_queue) {
4209 			struct cam_eb *bus;
4210 
4211 			/*
4212 			 * Now that we are unfrozen run the send queue.
4213 			 */
4214 			bus = xpt_find_bus(sim->path_id);
4215 			xpt_run_dev_sendq(bus);
4216 			xpt_release_bus(bus);
4217 		}
4218 	}
4219 }
4220 
4221 /*
4222  * XXX Appears to be unused.
4223  */
4224 static void
4225 xpt_release_simq_timeout(void *arg)
4226 {
4227 	struct cam_sim *sim;
4228 
4229 	sim = (struct cam_sim *)arg;
4230 	xpt_release_simq(sim, /* run_queue */ TRUE);
4231 }
4232 
4233 void
4234 xpt_done(union ccb *done_ccb)
4235 {
4236 	struct cam_sim *sim;
4237 	int	first;
4238 
4239 	CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xpt_done\n"));
4240 	if ((done_ccb->ccb_h.func_code & XPT_FC_QUEUED) != 0) {
4241 		/*
4242 		 * Queue up the request for handling by our SWI handler
4243 		 * any of the "non-immediate" type of ccbs.
4244 		 */
4245 		sim = done_ccb->ccb_h.path->bus->sim;
4246 		TAILQ_INSERT_TAIL(&sim->sim_doneq, &done_ccb->ccb_h,
4247 		    sim_links.tqe);
4248 		done_ccb->ccb_h.pinfo.index = CAM_DONEQ_INDEX;
4249 		if ((sim->flags & CAM_SIM_ON_DONEQ) == 0) {
4250 			mtx_lock(&cam_simq_lock);
4251 			first = TAILQ_EMPTY(&cam_simq);
4252 			TAILQ_INSERT_TAIL(&cam_simq, sim, links);
4253 			mtx_unlock(&cam_simq_lock);
4254 			sim->flags |= CAM_SIM_ON_DONEQ;
4255 			if (first)
4256 				swi_sched(cambio_ih, 0);
4257 		}
4258 	}
4259 }
4260 
4261 union ccb *
4262 xpt_alloc_ccb()
4263 {
4264 	union ccb *new_ccb;
4265 
4266 	new_ccb = malloc(sizeof(*new_ccb), M_CAMXPT, M_ZERO|M_WAITOK);
4267 	return (new_ccb);
4268 }
4269 
4270 union ccb *
4271 xpt_alloc_ccb_nowait()
4272 {
4273 	union ccb *new_ccb;
4274 
4275 	new_ccb = malloc(sizeof(*new_ccb), M_CAMXPT, M_ZERO|M_NOWAIT);
4276 	return (new_ccb);
4277 }
4278 
4279 void
4280 xpt_free_ccb(union ccb *free_ccb)
4281 {
4282 	free(free_ccb, M_CAMXPT);
4283 }
4284 
4285 
4286 
4287 /* Private XPT functions */
4288 
4289 /*
4290  * Get a CAM control block for the caller. Charge the structure to the device
4291  * referenced by the path.  If the this device has no 'credits' then the
4292  * device already has the maximum number of outstanding operations under way
4293  * and we return NULL. If we don't have sufficient resources to allocate more
4294  * ccbs, we also return NULL.
4295  */
4296 static union ccb *
4297 xpt_get_ccb(struct cam_ed *device)
4298 {
4299 	union ccb *new_ccb;
4300 	struct cam_sim *sim;
4301 
4302 	sim = device->sim;
4303 	if ((new_ccb = (union ccb *)SLIST_FIRST(&sim->ccb_freeq)) == NULL) {
4304 		new_ccb = xpt_alloc_ccb_nowait();
4305                 if (new_ccb == NULL) {
4306 			return (NULL);
4307 		}
4308 		if ((sim->flags & CAM_SIM_MPSAFE) == 0)
4309 			callout_handle_init(&new_ccb->ccb_h.timeout_ch);
4310 		SLIST_INSERT_HEAD(&sim->ccb_freeq, &new_ccb->ccb_h,
4311 				  xpt_links.sle);
4312 		sim->ccb_count++;
4313 	}
4314 	cam_ccbq_take_opening(&device->ccbq);
4315 	SLIST_REMOVE_HEAD(&sim->ccb_freeq, xpt_links.sle);
4316 	return (new_ccb);
4317 }
4318 
4319 static void
4320 xpt_release_bus(struct cam_eb *bus)
4321 {
4322 
4323 	if ((--bus->refcount == 0)
4324 	 && (TAILQ_FIRST(&bus->et_entries) == NULL)) {
4325 		mtx_lock(&xsoftc.xpt_topo_lock);
4326 		TAILQ_REMOVE(&xsoftc.xpt_busses, bus, links);
4327 		xsoftc.bus_generation++;
4328 		mtx_unlock(&xsoftc.xpt_topo_lock);
4329 		cam_sim_release(bus->sim);
4330 		free(bus, M_CAMXPT);
4331 	}
4332 }
4333 
4334 static struct cam_et *
4335 xpt_alloc_target(struct cam_eb *bus, target_id_t target_id)
4336 {
4337 	struct cam_et *target;
4338 
4339 	target = (struct cam_et *)malloc(sizeof(*target), M_CAMXPT, M_NOWAIT);
4340 	if (target != NULL) {
4341 		struct cam_et *cur_target;
4342 
4343 		TAILQ_INIT(&target->ed_entries);
4344 		target->bus = bus;
4345 		target->target_id = target_id;
4346 		target->refcount = 1;
4347 		target->generation = 0;
4348 		timevalclear(&target->last_reset);
4349 		/*
4350 		 * Hold a reference to our parent bus so it
4351 		 * will not go away before we do.
4352 		 */
4353 		bus->refcount++;
4354 
4355 		/* Insertion sort into our bus's target list */
4356 		cur_target = TAILQ_FIRST(&bus->et_entries);
4357 		while (cur_target != NULL && cur_target->target_id < target_id)
4358 			cur_target = TAILQ_NEXT(cur_target, links);
4359 
4360 		if (cur_target != NULL) {
4361 			TAILQ_INSERT_BEFORE(cur_target, target, links);
4362 		} else {
4363 			TAILQ_INSERT_TAIL(&bus->et_entries, target, links);
4364 		}
4365 		bus->generation++;
4366 	}
4367 	return (target);
4368 }
4369 
4370 static void
4371 xpt_release_target(struct cam_et *target)
4372 {
4373 
4374 	if ((--target->refcount == 0)
4375 	 && (TAILQ_FIRST(&target->ed_entries) == NULL)) {
4376 		TAILQ_REMOVE(&target->bus->et_entries, target, links);
4377 		target->bus->generation++;
4378 		xpt_release_bus(target->bus);
4379 		free(target, M_CAMXPT);
4380 	}
4381 }
4382 
4383 static struct cam_ed *
4384 xpt_alloc_device_default(struct cam_eb *bus, struct cam_et *target,
4385 			 lun_id_t lun_id)
4386 {
4387 	struct cam_ed *device, *cur_device;
4388 
4389 	device = xpt_alloc_device(bus, target, lun_id);
4390 	if (device == NULL)
4391 		return (NULL);
4392 
4393 	device->mintags = 1;
4394 	device->maxtags = 1;
4395 	bus->sim->max_ccbs += device->ccbq.devq_openings;
4396 	cur_device = TAILQ_FIRST(&target->ed_entries);
4397 	while (cur_device != NULL && cur_device->lun_id < lun_id)
4398 		cur_device = TAILQ_NEXT(cur_device, links);
4399 	if (cur_device != NULL) {
4400 		TAILQ_INSERT_BEFORE(cur_device, device, links);
4401 	} else {
4402 		TAILQ_INSERT_TAIL(&target->ed_entries, device, links);
4403 	}
4404 	target->generation++;
4405 
4406 	return (device);
4407 }
4408 
4409 struct cam_ed *
4410 xpt_alloc_device(struct cam_eb *bus, struct cam_et *target, lun_id_t lun_id)
4411 {
4412 	struct	   cam_ed *device;
4413 	struct	   cam_devq *devq;
4414 	cam_status status;
4415 
4416 	/* Make space for us in the device queue on our bus */
4417 	devq = bus->sim->devq;
4418 	status = cam_devq_resize(devq, devq->alloc_queue.array_size + 1);
4419 
4420 	if (status != CAM_REQ_CMP) {
4421 		device = NULL;
4422 	} else {
4423 		device = (struct cam_ed *)malloc(sizeof(*device),
4424 						 M_CAMXPT, M_NOWAIT);
4425 	}
4426 
4427 	if (device != NULL) {
4428 		cam_init_pinfo(&device->alloc_ccb_entry.pinfo);
4429 		device->alloc_ccb_entry.device = device;
4430 		cam_init_pinfo(&device->send_ccb_entry.pinfo);
4431 		device->send_ccb_entry.device = device;
4432 		device->target = target;
4433 		device->lun_id = lun_id;
4434 		device->sim = bus->sim;
4435 		/* Initialize our queues */
4436 		if (camq_init(&device->drvq, 0) != 0) {
4437 			free(device, M_CAMXPT);
4438 			return (NULL);
4439 		}
4440 		if (cam_ccbq_init(&device->ccbq,
4441 				  bus->sim->max_dev_openings) != 0) {
4442 			camq_fini(&device->drvq);
4443 			free(device, M_CAMXPT);
4444 			return (NULL);
4445 		}
4446 		SLIST_INIT(&device->asyncs);
4447 		SLIST_INIT(&device->periphs);
4448 		device->generation = 0;
4449 		device->owner = NULL;
4450 		device->flags = CAM_DEV_UNCONFIGURED;
4451 		device->tag_delay_count = 0;
4452 		device->tag_saved_openings = 0;
4453 		device->refcount = 1;
4454 		callout_init_mtx(&device->callout, bus->sim->mtx, 0);
4455 
4456 		/*
4457 		 * Hold a reference to our parent target so it
4458 		 * will not go away before we do.
4459 		 */
4460 		target->refcount++;
4461 
4462 	}
4463 	return (device);
4464 }
4465 
4466 void
4467 xpt_acquire_device(struct cam_ed *device)
4468 {
4469 
4470 	device->refcount++;
4471 }
4472 
4473 void
4474 xpt_release_device(struct cam_ed *device)
4475 {
4476 
4477 	if (--device->refcount == 0) {
4478 		struct cam_devq *devq;
4479 
4480 		if (device->alloc_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX
4481 		 || device->send_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX)
4482 			panic("Removing device while still queued for ccbs");
4483 
4484 		if ((device->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0)
4485 				callout_stop(&device->callout);
4486 
4487 		TAILQ_REMOVE(&device->target->ed_entries, device,links);
4488 		device->target->generation++;
4489 		device->target->bus->sim->max_ccbs -= device->ccbq.devq_openings;
4490 		/* Release our slot in the devq */
4491 		devq = device->target->bus->sim->devq;
4492 		cam_devq_resize(devq, devq->alloc_queue.array_size - 1);
4493 		camq_fini(&device->drvq);
4494 		cam_ccbq_fini(&device->ccbq);
4495 		xpt_release_target(device->target);
4496 		free(device, M_CAMXPT);
4497 	}
4498 }
4499 
4500 u_int32_t
4501 xpt_dev_ccbq_resize(struct cam_path *path, int newopenings)
4502 {
4503 	int	diff;
4504 	int	result;
4505 	struct	cam_ed *dev;
4506 
4507 	dev = path->device;
4508 
4509 	diff = newopenings - (dev->ccbq.dev_active + dev->ccbq.dev_openings);
4510 	result = cam_ccbq_resize(&dev->ccbq, newopenings);
4511 	if (result == CAM_REQ_CMP && (diff < 0)) {
4512 		dev->flags |= CAM_DEV_RESIZE_QUEUE_NEEDED;
4513 	}
4514 	if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
4515 	 || (dev->inq_flags & SID_CmdQue) != 0)
4516 		dev->tag_saved_openings = newopenings;
4517 	/* Adjust the global limit */
4518 	dev->sim->max_ccbs += diff;
4519 	return (result);
4520 }
4521 
4522 static struct cam_eb *
4523 xpt_find_bus(path_id_t path_id)
4524 {
4525 	struct cam_eb *bus;
4526 
4527 	mtx_lock(&xsoftc.xpt_topo_lock);
4528 	for (bus = TAILQ_FIRST(&xsoftc.xpt_busses);
4529 	     bus != NULL;
4530 	     bus = TAILQ_NEXT(bus, links)) {
4531 		if (bus->path_id == path_id) {
4532 			bus->refcount++;
4533 			break;
4534 		}
4535 	}
4536 	mtx_unlock(&xsoftc.xpt_topo_lock);
4537 	return (bus);
4538 }
4539 
4540 static struct cam_et *
4541 xpt_find_target(struct cam_eb *bus, target_id_t	target_id)
4542 {
4543 	struct cam_et *target;
4544 
4545 	for (target = TAILQ_FIRST(&bus->et_entries);
4546 	     target != NULL;
4547 	     target = TAILQ_NEXT(target, links)) {
4548 		if (target->target_id == target_id) {
4549 			target->refcount++;
4550 			break;
4551 		}
4552 	}
4553 	return (target);
4554 }
4555 
4556 static struct cam_ed *
4557 xpt_find_device(struct cam_et *target, lun_id_t lun_id)
4558 {
4559 	struct cam_ed *device;
4560 
4561 	for (device = TAILQ_FIRST(&target->ed_entries);
4562 	     device != NULL;
4563 	     device = TAILQ_NEXT(device, links)) {
4564 		if (device->lun_id == lun_id) {
4565 			device->refcount++;
4566 			break;
4567 		}
4568 	}
4569 	return (device);
4570 }
4571 
4572 void
4573 xpt_start_tags(struct cam_path *path)
4574 {
4575 	struct ccb_relsim crs;
4576 	struct cam_ed *device;
4577 	struct cam_sim *sim;
4578 	int    newopenings;
4579 
4580 	device = path->device;
4581 	sim = path->bus->sim;
4582 	device->flags &= ~CAM_DEV_TAG_AFTER_COUNT;
4583 	xpt_freeze_devq(path, /*count*/1);
4584 	device->inq_flags |= SID_CmdQue;
4585 	if (device->tag_saved_openings != 0)
4586 		newopenings = device->tag_saved_openings;
4587 	else
4588 		newopenings = min(device->maxtags,
4589 				  sim->max_tagged_dev_openings);
4590 	xpt_dev_ccbq_resize(path, newopenings);
4591 	xpt_setup_ccb(&crs.ccb_h, path, CAM_PRIORITY_NORMAL);
4592 	crs.ccb_h.func_code = XPT_REL_SIMQ;
4593 	crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY;
4594 	crs.openings
4595 	    = crs.release_timeout
4596 	    = crs.qfrozen_cnt
4597 	    = 0;
4598 	xpt_action((union ccb *)&crs);
4599 }
4600 
4601 void
4602 xpt_stop_tags(struct cam_path *path)
4603 {
4604 	struct ccb_relsim crs;
4605 	struct cam_ed *device;
4606 	struct cam_sim *sim;
4607 
4608 	device = path->device;
4609 	sim = path->bus->sim;
4610 	device->flags &= ~CAM_DEV_TAG_AFTER_COUNT;
4611 	device->tag_delay_count = 0;
4612 	xpt_freeze_devq(path, /*count*/1);
4613 	device->inq_flags &= ~SID_CmdQue;
4614 	xpt_dev_ccbq_resize(path, sim->max_dev_openings);
4615 	xpt_setup_ccb(&crs.ccb_h, path, CAM_PRIORITY_NORMAL);
4616 	crs.ccb_h.func_code = XPT_REL_SIMQ;
4617 	crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY;
4618 	crs.openings
4619 	    = crs.release_timeout
4620 	    = crs.qfrozen_cnt
4621 	    = 0;
4622 	xpt_action((union ccb *)&crs);
4623 }
4624 
4625 static void
4626 xpt_boot_delay(void *arg)
4627 {
4628 
4629 	xpt_release_boot();
4630 }
4631 
4632 static void
4633 xpt_config(void *arg)
4634 {
4635 	/*
4636 	 * Now that interrupts are enabled, go find our devices
4637 	 */
4638 
4639 #ifdef CAMDEBUG
4640 	/* Setup debugging flags and path */
4641 #ifdef CAM_DEBUG_FLAGS
4642 	cam_dflags = CAM_DEBUG_FLAGS;
4643 #else /* !CAM_DEBUG_FLAGS */
4644 	cam_dflags = CAM_DEBUG_NONE;
4645 #endif /* CAM_DEBUG_FLAGS */
4646 #ifdef CAM_DEBUG_BUS
4647 	if (cam_dflags != CAM_DEBUG_NONE) {
4648 		/*
4649 		 * Locking is specifically omitted here.  No SIMs have
4650 		 * registered yet, so xpt_create_path will only be searching
4651 		 * empty lists of targets and devices.
4652 		 */
4653 		if (xpt_create_path(&cam_dpath, xpt_periph,
4654 				    CAM_DEBUG_BUS, CAM_DEBUG_TARGET,
4655 				    CAM_DEBUG_LUN) != CAM_REQ_CMP) {
4656 			printf("xpt_config: xpt_create_path() failed for debug"
4657 			       " target %d:%d:%d, debugging disabled\n",
4658 			       CAM_DEBUG_BUS, CAM_DEBUG_TARGET, CAM_DEBUG_LUN);
4659 			cam_dflags = CAM_DEBUG_NONE;
4660 		}
4661 	} else
4662 		cam_dpath = NULL;
4663 #else /* !CAM_DEBUG_BUS */
4664 	cam_dpath = NULL;
4665 #endif /* CAM_DEBUG_BUS */
4666 #endif /* CAMDEBUG */
4667 
4668 	/* Register our shutdown event handler */
4669 	if ((EVENTHANDLER_REGISTER(shutdown_final, xpt_shutdown,
4670 				   NULL, SHUTDOWN_PRI_FIRST)) == NULL) {
4671 		printf("xpt_config: failed to register shutdown event.\n");
4672 	}
4673 
4674 	periphdriver_init(1);
4675 	xpt_hold_boot();
4676 	callout_init(&xsoftc.boot_callout, 1);
4677 	callout_reset(&xsoftc.boot_callout, hz * xsoftc.boot_delay / 1000,
4678 	    xpt_boot_delay, NULL);
4679 	/* Fire up rescan thread. */
4680 	if (kproc_create(xpt_scanner_thread, NULL, NULL, 0, 0, "xpt_thrd")) {
4681 		printf("xpt_config: failed to create rescan thread.\n");
4682 	}
4683 }
4684 
4685 void
4686 xpt_hold_boot(void)
4687 {
4688 	xpt_lock_buses();
4689 	xsoftc.buses_to_config++;
4690 	xpt_unlock_buses();
4691 }
4692 
4693 void
4694 xpt_release_boot(void)
4695 {
4696 	xpt_lock_buses();
4697 	xsoftc.buses_to_config--;
4698 	if (xsoftc.buses_to_config == 0 && xsoftc.buses_config_done == 0) {
4699 		struct	xpt_task *task;
4700 
4701 		xsoftc.buses_config_done = 1;
4702 		xpt_unlock_buses();
4703 		/* Call manually because we don't have any busses */
4704 		task = malloc(sizeof(struct xpt_task), M_CAMXPT, M_NOWAIT);
4705 		if (task != NULL) {
4706 			TASK_INIT(&task->task, 0, xpt_finishconfig_task, task);
4707 			taskqueue_enqueue(taskqueue_thread, &task->task);
4708 		}
4709 	} else
4710 		xpt_unlock_buses();
4711 }
4712 
4713 /*
4714  * If the given device only has one peripheral attached to it, and if that
4715  * peripheral is the passthrough driver, announce it.  This insures that the
4716  * user sees some sort of announcement for every peripheral in their system.
4717  */
4718 static int
4719 xptpassannouncefunc(struct cam_ed *device, void *arg)
4720 {
4721 	struct cam_periph *periph;
4722 	int i;
4723 
4724 	for (periph = SLIST_FIRST(&device->periphs), i = 0; periph != NULL;
4725 	     periph = SLIST_NEXT(periph, periph_links), i++);
4726 
4727 	periph = SLIST_FIRST(&device->periphs);
4728 	if ((i == 1)
4729 	 && (strncmp(periph->periph_name, "pass", 4) == 0))
4730 		xpt_announce_periph(periph, NULL);
4731 
4732 	return(1);
4733 }
4734 
4735 static void
4736 xpt_finishconfig_task(void *context, int pending)
4737 {
4738 
4739 	periphdriver_init(2);
4740 	/*
4741 	 * Check for devices with no "standard" peripheral driver
4742 	 * attached.  For any devices like that, announce the
4743 	 * passthrough driver so the user will see something.
4744 	 */
4745 	xpt_for_all_devices(xptpassannouncefunc, NULL);
4746 
4747 	/* Release our hook so that the boot can continue. */
4748 	config_intrhook_disestablish(xsoftc.xpt_config_hook);
4749 	free(xsoftc.xpt_config_hook, M_CAMXPT);
4750 	xsoftc.xpt_config_hook = NULL;
4751 
4752 	free(context, M_CAMXPT);
4753 }
4754 
4755 /*
4756  * Power down all devices when we are going to power down the system.
4757  */
4758 static void
4759 xpt_shutdown_dev_done(struct cam_periph *periph, union ccb *done_ccb)
4760 {
4761 
4762 	/* No-op. We're polling. */
4763 	return;
4764 }
4765 
4766 static int
4767 xpt_shutdown_dev(struct cam_ed *device, void *arg)
4768 {
4769 	union ccb ccb;
4770 	struct cam_path path;
4771 
4772 	if (device->flags & CAM_DEV_UNCONFIGURED)
4773 		return (1);
4774 
4775 	if (device->protocol == PROTO_ATA) {
4776 		/* Only power down device if it supports power management. */
4777 		if ((device->ident_data.support.command1 &
4778 		    ATA_SUPPORT_POWERMGT) == 0)
4779 			return (1);
4780 	} else if (device->protocol != PROTO_SCSI)
4781 		return (1);
4782 
4783 	xpt_compile_path(&path,
4784 			 NULL,
4785 			 device->target->bus->path_id,
4786 			 device->target->target_id,
4787 			 device->lun_id);
4788 	xpt_setup_ccb(&ccb.ccb_h, &path, CAM_PRIORITY_NORMAL);
4789 	if (device->protocol == PROTO_ATA) {
4790 		cam_fill_ataio(&ccb.ataio,
4791 				    1,
4792 				    xpt_shutdown_dev_done,
4793 				    CAM_DIR_NONE,
4794 				    0,
4795 				    NULL,
4796 				    0,
4797 				    30*1000);
4798 		ata_28bit_cmd(&ccb.ataio, ATA_SLEEP, 0, 0, 0);
4799 	} else {
4800 		scsi_start_stop(&ccb.csio,
4801 				/*retries*/1,
4802 				xpt_shutdown_dev_done,
4803 				MSG_SIMPLE_Q_TAG,
4804 				/*start*/FALSE,
4805 				/*load/eject*/FALSE,
4806 				/*immediate*/TRUE,
4807 				SSD_FULL_SIZE,
4808 				/*timeout*/50*1000);
4809 	}
4810 	xpt_polled_action(&ccb);
4811 
4812 	if ((ccb.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP)
4813 		xpt_print(&path, "Device power down failed\n");
4814 	if ((ccb.ccb_h.status & CAM_DEV_QFRZN) != 0)
4815 		cam_release_devq(ccb.ccb_h.path,
4816 				 /*relsim_flags*/0,
4817 				 /*reduction*/0,
4818 				 /*timeout*/0,
4819 				 /*getcount_only*/0);
4820 	xpt_release_path(&path);
4821 	return (1);
4822 }
4823 
4824 static void
4825 xpt_shutdown(void * arg, int howto)
4826 {
4827 
4828 	if (!xpt_power_down)
4829 		return;
4830 	if ((howto & RB_POWEROFF) == 0)
4831 		return;
4832 
4833 	xpt_for_all_devices(xpt_shutdown_dev, NULL);
4834 }
4835 
4836 cam_status
4837 xpt_register_async(int event, ac_callback_t *cbfunc, void *cbarg,
4838 		   struct cam_path *path)
4839 {
4840 	struct ccb_setasync csa;
4841 	cam_status status;
4842 	int xptpath = 0;
4843 
4844 	if (path == NULL) {
4845 		mtx_lock(&xsoftc.xpt_lock);
4846 		status = xpt_create_path(&path, /*periph*/NULL, CAM_XPT_PATH_ID,
4847 					 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
4848 		if (status != CAM_REQ_CMP) {
4849 			mtx_unlock(&xsoftc.xpt_lock);
4850 			return (status);
4851 		}
4852 		xptpath = 1;
4853 	}
4854 
4855 	xpt_setup_ccb(&csa.ccb_h, path, CAM_PRIORITY_NORMAL);
4856 	csa.ccb_h.func_code = XPT_SASYNC_CB;
4857 	csa.event_enable = event;
4858 	csa.callback = cbfunc;
4859 	csa.callback_arg = cbarg;
4860 	xpt_action((union ccb *)&csa);
4861 	status = csa.ccb_h.status;
4862 	if (xptpath) {
4863 		xpt_free_path(path);
4864 		mtx_unlock(&xsoftc.xpt_lock);
4865 
4866 		if ((status == CAM_REQ_CMP) &&
4867 		    (csa.event_enable & AC_FOUND_DEVICE)) {
4868 			/*
4869 			 * Get this peripheral up to date with all
4870 			 * the currently existing devices.
4871 			 */
4872 			xpt_for_all_devices(xptsetasyncfunc, &csa);
4873 		}
4874 		if ((status == CAM_REQ_CMP) &&
4875 		    (csa.event_enable & AC_PATH_REGISTERED)) {
4876 			/*
4877 			 * Get this peripheral up to date with all
4878 			 * the currently existing busses.
4879 			 */
4880 			xpt_for_all_busses(xptsetasyncbusfunc, &csa);
4881 		}
4882 	}
4883 	return (status);
4884 }
4885 
4886 static void
4887 xptaction(struct cam_sim *sim, union ccb *work_ccb)
4888 {
4889 	CAM_DEBUG(work_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xptaction\n"));
4890 
4891 	switch (work_ccb->ccb_h.func_code) {
4892 	/* Common cases first */
4893 	case XPT_PATH_INQ:		/* Path routing inquiry */
4894 	{
4895 		struct ccb_pathinq *cpi;
4896 
4897 		cpi = &work_ccb->cpi;
4898 		cpi->version_num = 1; /* XXX??? */
4899 		cpi->hba_inquiry = 0;
4900 		cpi->target_sprt = 0;
4901 		cpi->hba_misc = 0;
4902 		cpi->hba_eng_cnt = 0;
4903 		cpi->max_target = 0;
4904 		cpi->max_lun = 0;
4905 		cpi->initiator_id = 0;
4906 		strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
4907 		strncpy(cpi->hba_vid, "", HBA_IDLEN);
4908 		strncpy(cpi->dev_name, sim->sim_name, DEV_IDLEN);
4909 		cpi->unit_number = sim->unit_number;
4910 		cpi->bus_id = sim->bus_id;
4911 		cpi->base_transfer_speed = 0;
4912 		cpi->protocol = PROTO_UNSPECIFIED;
4913 		cpi->protocol_version = PROTO_VERSION_UNSPECIFIED;
4914 		cpi->transport = XPORT_UNSPECIFIED;
4915 		cpi->transport_version = XPORT_VERSION_UNSPECIFIED;
4916 		cpi->ccb_h.status = CAM_REQ_CMP;
4917 		xpt_done(work_ccb);
4918 		break;
4919 	}
4920 	default:
4921 		work_ccb->ccb_h.status = CAM_REQ_INVALID;
4922 		xpt_done(work_ccb);
4923 		break;
4924 	}
4925 }
4926 
4927 /*
4928  * The xpt as a "controller" has no interrupt sources, so polling
4929  * is a no-op.
4930  */
4931 static void
4932 xptpoll(struct cam_sim *sim)
4933 {
4934 }
4935 
4936 void
4937 xpt_lock_buses(void)
4938 {
4939 	mtx_lock(&xsoftc.xpt_topo_lock);
4940 }
4941 
4942 void
4943 xpt_unlock_buses(void)
4944 {
4945 	mtx_unlock(&xsoftc.xpt_topo_lock);
4946 }
4947 
4948 static void
4949 camisr(void *dummy)
4950 {
4951 	cam_simq_t queue;
4952 	struct cam_sim *sim;
4953 
4954 	mtx_lock(&cam_simq_lock);
4955 	TAILQ_INIT(&queue);
4956 	while (!TAILQ_EMPTY(&cam_simq)) {
4957 		TAILQ_CONCAT(&queue, &cam_simq, links);
4958 		mtx_unlock(&cam_simq_lock);
4959 
4960 		while ((sim = TAILQ_FIRST(&queue)) != NULL) {
4961 			TAILQ_REMOVE(&queue, sim, links);
4962 			CAM_SIM_LOCK(sim);
4963 			sim->flags &= ~CAM_SIM_ON_DONEQ;
4964 			camisr_runqueue(&sim->sim_doneq);
4965 			CAM_SIM_UNLOCK(sim);
4966 		}
4967 		mtx_lock(&cam_simq_lock);
4968 	}
4969 	mtx_unlock(&cam_simq_lock);
4970 }
4971 
4972 static void
4973 camisr_runqueue(void *V_queue)
4974 {
4975 	cam_isrq_t *queue = V_queue;
4976 	struct	ccb_hdr *ccb_h;
4977 
4978 	while ((ccb_h = TAILQ_FIRST(queue)) != NULL) {
4979 		int	runq;
4980 
4981 		TAILQ_REMOVE(queue, ccb_h, sim_links.tqe);
4982 		ccb_h->pinfo.index = CAM_UNQUEUED_INDEX;
4983 
4984 		CAM_DEBUG(ccb_h->path, CAM_DEBUG_TRACE,
4985 			  ("camisr\n"));
4986 
4987 		runq = FALSE;
4988 
4989 		if (ccb_h->flags & CAM_HIGH_POWER) {
4990 			struct highpowerlist	*hphead;
4991 			union ccb		*send_ccb;
4992 
4993 			mtx_lock(&xsoftc.xpt_lock);
4994 			hphead = &xsoftc.highpowerq;
4995 
4996 			send_ccb = (union ccb *)STAILQ_FIRST(hphead);
4997 
4998 			/*
4999 			 * Increment the count since this command is done.
5000 			 */
5001 			xsoftc.num_highpower++;
5002 
5003 			/*
5004 			 * Any high powered commands queued up?
5005 			 */
5006 			if (send_ccb != NULL) {
5007 
5008 				STAILQ_REMOVE_HEAD(hphead, xpt_links.stqe);
5009 				mtx_unlock(&xsoftc.xpt_lock);
5010 
5011 				xpt_release_devq(send_ccb->ccb_h.path,
5012 						 /*count*/1, /*runqueue*/TRUE);
5013 			} else
5014 				mtx_unlock(&xsoftc.xpt_lock);
5015 		}
5016 
5017 		if ((ccb_h->func_code & XPT_FC_USER_CCB) == 0) {
5018 			struct cam_ed *dev;
5019 
5020 			dev = ccb_h->path->device;
5021 
5022 			cam_ccbq_ccb_done(&dev->ccbq, (union ccb *)ccb_h);
5023 			ccb_h->path->bus->sim->devq->send_active--;
5024 			ccb_h->path->bus->sim->devq->send_openings++;
5025 			runq = TRUE;
5026 
5027 			if (((dev->flags & CAM_DEV_REL_ON_COMPLETE) != 0
5028 			  && (ccb_h->status&CAM_STATUS_MASK) != CAM_REQUEUE_REQ)
5029 			 || ((dev->flags & CAM_DEV_REL_ON_QUEUE_EMPTY) != 0
5030 			  && (dev->ccbq.dev_active == 0))) {
5031 				xpt_release_devq(ccb_h->path, /*count*/1,
5032 						 /*run_queue*/FALSE);
5033 			}
5034 
5035 			if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
5036 			 && (--dev->tag_delay_count == 0))
5037 				xpt_start_tags(ccb_h->path);
5038 		}
5039 
5040 		if (ccb_h->status & CAM_RELEASE_SIMQ) {
5041 			xpt_release_simq(ccb_h->path->bus->sim,
5042 					 /*run_queue*/TRUE);
5043 			ccb_h->status &= ~CAM_RELEASE_SIMQ;
5044 			runq = FALSE;
5045 		}
5046 
5047 		if ((ccb_h->flags & CAM_DEV_QFRZDIS)
5048 		 && (ccb_h->status & CAM_DEV_QFRZN)) {
5049 			xpt_release_devq(ccb_h->path, /*count*/1,
5050 					 /*run_queue*/TRUE);
5051 			ccb_h->status &= ~CAM_DEV_QFRZN;
5052 		} else if (runq) {
5053 			xpt_run_dev_sendq(ccb_h->path->bus);
5054 		}
5055 
5056 		/* Call the peripheral driver's callback */
5057 		(*ccb_h->cbfcnp)(ccb_h->path->periph, (union ccb *)ccb_h);
5058 	}
5059 }
5060 
5061