xref: /freebsd/sys/cam/cam_xpt.c (revision d74e86d9e30043893d6b308468008b65640ddcae)
1 /*
2  * Implementation of the Common Access Method Transport (XPT) layer.
3  *
4  * Copyright (c) 1997, 1998 Justin T. Gibbs.
5  * Copyright (c) 1997, 1998 Kenneth D. Merry.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions, and the following disclaimer,
13  *    without modification, immediately at the beginning of the file.
14  * 2. The name of the author may not be used to endorse or promote products
15  *    derived from this software without specific prior written permission.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
21  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  *
29  *      $Id: cam_xpt.c,v 1.4 1998/09/16 13:24:37 gibbs Exp $
30  */
31 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/types.h>
34 #include <sys/malloc.h>
35 #include <sys/device.h>
36 #include <sys/kernel.h>
37 #include <sys/conf.h>
38 #include <sys/fcntl.h>
39 #include <sys/md5.h>
40 #include <sys/devicestat.h>
41 
42 #ifdef PC98
43 #include <pc98/pc98/pc98_machdep.h>	/* geometry translation */
44 #endif
45 
46 #include <machine/clock.h>
47 #include <machine/ipl.h>
48 
49 #include <cam/cam.h>
50 #include <cam/cam_conf.h>
51 #include <cam/cam_ccb.h>
52 #include <cam/cam_periph.h>
53 #include <cam/cam_sim.h>
54 #include <cam/cam_xpt.h>
55 #include <cam/cam_xpt_sim.h>
56 #include <cam/cam_xpt_periph.h>
57 #include <cam/cam_debug.h>
58 
59 #include <cam/scsi/scsi_all.h>
60 #include <cam/scsi/scsi_message.h>
61 #include <cam/scsi/scsi_pass.h>
62 #include "opt_cam.h"
63 #include "opt_scsi.h"
64 
65 extern	void	(*ihandlers[32]) __P((void));
66 
67 /* Datastructures internal to the xpt layer */
68 
69 /*
70  * Definition of an async handler callback block.  These are used to add
71  * SIMs and peripherals to the async callback lists.
72  */
73 struct async_node {
74 	SLIST_ENTRY(async_node)	links;
75 	u_int32_t	event_enable;	/* Async Event enables */
76 	void		(*callback)(void *arg, u_int32_t code,
77 				    struct cam_path *path, void *args);
78 	void		*callback_arg;
79 };
80 
81 SLIST_HEAD(async_list, async_node);
82 SLIST_HEAD(periph_list, cam_periph);
83 STAILQ_HEAD(highpowerlist, ccb_hdr) highpowerq;
84 
85 /*
86  * This is the maximum number of high powered commands (e.g. start unit)
87  * that can be outstanding at a particular time.
88  */
89 #ifndef CAM_MAX_HIGHPOWER
90 #define CAM_MAX_HIGHPOWER  4
91 #endif
92 
93 /*
94  * This is the number of seconds we wait for devices to settle after a SCSI
95  * bus reset.
96  */
97 #ifndef SCSI_DELAY
98 #define SCSI_DELAY 2000
99 #endif
100 #if (SCSI_DELAY < 100)
101 #error "SCSI_DELAY is in milliseconds, not seconds!  Please use a larger value"
102 #endif
103 
104 /* number of high powered commands that can go through right now */
105 static int num_highpower = CAM_MAX_HIGHPOWER;
106 
107 /*
108  * Structure for queueing a device in a run queue.
109  * There is one run queue for allocating new ccbs,
110  * and another for sending ccbs to the controller.
111  */
112 struct cam_ed_qinfo {
113 	cam_pinfo pinfo;
114 	struct	  cam_ed *device;
115 };
116 
117 /*
118  * The CAM EDT (Existing Device Table) contains the device information for
119  * all devices for all busses in the system.  The table contains a
120  * cam_ed structure for each device on the bus.
121  */
122 struct cam_ed {
123 	TAILQ_ENTRY(cam_ed) links;
124 	struct	cam_ed_qinfo alloc_ccb_entry;
125 	struct	cam_ed_qinfo send_ccb_entry;
126 	struct	cam_et	 *target;
127 	lun_id_t	 lun_id;
128 	struct	camq drvq;		/*
129 					 * Queue of type drivers wanting to do
130 					 * work on this device.
131 					 */
132 	struct	cam_ccbq ccbq;		/* Queue of pending ccbs */
133 	struct	async_list asyncs;	/* Async callback info for this B/T/L */
134 	struct	periph_list periphs;	/* All attached devices */
135 	u_int	generation;		/* Generation number */
136 	struct	cam_periph *owner;	/* Peripheral driver's ownership tag */
137 	struct	xpt_quirk_entry *quirk;	/* Oddities about this device */
138 					/* Storage for the inquiry data */
139 	struct	scsi_inquiry_data inq_data;
140 	u_int8_t	 inq_flags;	/*
141 					 * Current settings for inquiry flags.
142 					 * This allows us to override settings
143 					 * like disconnection and tagged
144 					 * queuing for a device.
145 					 */
146 	u_int8_t	 queue_flags;	/* Queue flags from the control page */
147 	u_int8_t	 *serial_num;
148 	u_int8_t	 serial_num_len;
149 	u_int32_t	 qfrozen_cnt;
150 	u_int32_t	 flags;
151 #define CAM_DEV_UNCONFIGURED	 	0x01
152 #define CAM_DEV_REL_TIMEOUT_PENDING	0x02
153 #define CAM_DEV_REL_ON_COMPLETE		0x04
154 #define CAM_DEV_REL_ON_QUEUE_EMPTY	0x08
155 #define CAM_DEV_RESIZE_QUEUE_NEEDED	0x10
156 	u_int32_t	 refcount;
157 	struct		 callout_handle c_handle;
158 };
159 
160 /*
161  * Each target is represented by an ET (Existing Target).  These
162  * entries are created when a target is successfully probed with an
163  * identify, and removed when a device fails to respond after a number
164  * of retries, or a bus rescan finds the device missing.
165  */
166 struct cam_et {
167 	TAILQ_HEAD(, cam_ed) ed_entries;
168 	TAILQ_ENTRY(cam_et) links;
169 	struct	cam_eb	*bus;
170 	target_id_t	target_id;
171 	u_int32_t	refcount;
172 	u_int		generation;
173 };
174 
175 /*
176  * Each bus is represented by an EB (Existing Bus).  These entries
177  * are created by calls to xpt_bus_register and deleted by calls to
178  * xpt_bus_deregister.
179  */
180 struct cam_eb {
181 	TAILQ_HEAD(, cam_et) et_entries;
182 	TAILQ_ENTRY(cam_eb)  links;
183 	struct async_list    asyncs;	/* Async callback info for this B/T/L */
184 	path_id_t	     path_id;
185 	struct cam_sim	     *sim;
186 	u_int32_t	     flags;
187 #define	CAM_EB_RUNQ_SCHEDULED	0x01
188 	u_int		     generation;
189 };
190 
191 struct cam_path {
192 	struct cam_periph *periph;
193 	struct cam_eb	  *bus;
194 	struct cam_et	  *target;
195 	struct cam_ed	  *device;
196 };
197 
198 struct xpt_quirk_entry {
199 	struct scsi_inquiry_pattern inq_pat;
200 	u_int8_t quirks;
201 #define	CAM_QUIRK_NOLUNS	0x01
202 #define	CAM_QUIRK_NOSERIAL	0x02
203 	u_int8_t mintags;
204 	u_int8_t maxtags;
205 };
206 
207 typedef enum {
208 	XPT_FLAG_OPEN		= 0x01
209 } xpt_flags;
210 
211 struct xpt_softc {
212 	xpt_flags	flags;
213 	u_int32_t	generation;
214 #ifdef DEVFS
215 	void		*xpt_devfs_token;
216 	void		*ctl_devfs_token;
217 #endif
218 };
219 
220 static const char quantum[] = "QUANTUM";
221 
222 static struct xpt_quirk_entry xpt_quirk_table[] =
223 {
224 	{
225 		/* Reports QUEUE FULL for temporary resource shortages */
226 		{ T_DIRECT, SIP_MEDIA_FIXED, quantum, "XP39100?", "*" },
227 		/*quirks*/0, /*mintags*/24, /*maxtags*/32
228 	},
229 	{
230 		/* Reports QUEUE FULL for temporary resource shortages */
231 		{ T_DIRECT, SIP_MEDIA_FIXED, quantum, "XP34550?", "*" },
232 		/*quirks*/0, /*mintags*/24, /*maxtags*/32
233 	},
234 	{
235 		/* Broken tagged queuing drive */
236 		{ T_DIRECT, SIP_MEDIA_FIXED, "HP", "C372*", "*" },
237 		/*quirks*/0, /*mintags*/0, /*maxtags*/0
238 	},
239 	{
240 		/* Broken tagged queuing drive */
241 		{ T_DIRECT, SIP_MEDIA_FIXED, "MICROP", "3391*", "x43h" },
242 		/*quirks*/0, /*mintags*/0, /*maxtags*/0
243 	},
244         {
245 		/* Broken tagged queuing drive */
246                 { T_DIRECT, SIP_MEDIA_REMOVABLE, "iomega", "jaz*", "*" },
247 		/*quirks*/0, /*mintags*/0, /*maxtags*/0
248         },
249         {
250 		/* Doesn't understand EVP Serial Requests */
251 		{
252 			T_CDROM, SIP_MEDIA_REMOVABLE,
253 			"TOSHIBA", "CD-ROM XM-3401TA", "1094"
254 		},
255 		CAM_QUIRK_NOSERIAL, /*mintags*/0, /*maxtags*/0
256         },
257         {
258 		/*
259 		 * Hack until multiple-luns are supported by
260 		 * the target mode code.
261 		 */
262 		{
263 			T_PROCESSOR, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED,
264 			"FreeBSD", "TM-PT", "*"
265 		},
266 		CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
267         },
268 	{
269 		/* Really only one LUN */
270 		{
271 			T_ENCLOSURE, SIP_MEDIA_FIXED, "SUN", "SENA*", "*" },
272 			CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
273 		},
274 	{
275 		/* Default tagged queuing parameters for all devices */
276 		{
277 		  T_ANY, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED,
278 		  /*vendor*/"*", /*product*/"*", /*revision*/"*"
279 		},
280 		/*quirks*/0, /*mintags*/2, /*maxtags*/64
281 	},
282 };
283 typedef enum {
284 	DM_RET_COPY		= 0x01,
285 	DM_RET_FLAG_MASK	= 0x0f,
286 	DM_RET_NONE		= 0x00,
287 	DM_RET_STOP		= 0x10,
288 	DM_RET_DESCEND		= 0x20,
289 	DM_RET_ERROR		= 0x30,
290 	DM_RET_ACTION_MASK	= 0xf0
291 } dev_match_ret;
292 
293 typedef enum {
294 	XPT_DEPTH_BUS,
295 	XPT_DEPTH_TARGET,
296 	XPT_DEPTH_DEVICE,
297 	XPT_DEPTH_PERIPH
298 } xpt_traverse_depth;
299 
300 struct xpt_traverse_config {
301 	xpt_traverse_depth	depth;
302 	void			*tr_func;
303 	void			*tr_arg;
304 };
305 
306 typedef	int	xpt_busfunc_t (struct cam_eb *bus, void *arg);
307 typedef	int	xpt_targetfunc_t (struct cam_et *target, void *arg);
308 typedef	int	xpt_devicefunc_t (struct cam_ed *device, void *arg);
309 typedef	int	xpt_periphfunc_t (struct cam_periph *periph, void *arg);
310 typedef int	xpt_pdrvfunc_t (struct periph_driver **pdrv, void *arg);
311 
312 /* Transport layer configuration information */
313 static struct xpt_softc xsoftc;
314 
315 /* Queues for our software interrupt handler */
316 typedef TAILQ_HEAD(cam_isrq, ccb_hdr) cam_isrq_t;
317 static cam_isrq_t cam_bioq;
318 static cam_isrq_t cam_netq;
319 
320 /* "Pool" of inactive ccbs managed by xpt_alloc_ccb and xpt_free_ccb */
321 SLIST_HEAD(,ccb_hdr) ccb_freeq;
322 static u_int xpt_max_ccbs;	/*
323 				 * Maximum size of ccb pool.  Modified as
324 				 * devices are added/removed or have their
325 				 * opening counts changed.
326 				 */
327 static u_int xpt_ccb_count;	/* Current count of allocated ccbs */
328 
329 static struct cam_periph *xpt_periph;
330 
331 static periph_init_t xpt_periph_init;
332 
333 static periph_init_t probe_periph_init;
334 
335 static struct periph_driver xpt_driver =
336 {
337 	xpt_periph_init, "xpt",
338 	TAILQ_HEAD_INITIALIZER(xpt_driver.units)
339 };
340 
341 static struct periph_driver probe_driver =
342 {
343 	probe_periph_init, "probe",
344 	TAILQ_HEAD_INITIALIZER(probe_driver.units)
345 };
346 
347 DATA_SET(periphdriver_set, xpt_driver);
348 DATA_SET(periphdriver_set, probe_driver);
349 
350 #define XPT_CDEV_MAJOR 104
351 
352 static d_open_t xptopen;
353 static d_close_t xptclose;
354 static d_ioctl_t xptioctl;
355 
356 static struct cdevsw xpt_cdevsw =
357 {
358 	/*d_open*/	xptopen,
359 	/*d_close*/	xptclose,
360 	/*d_read*/	noread,
361 	/*d_write*/	nowrite,
362 	/*d_ioctl*/	xptioctl,
363 	/*d_stop*/	nostop,
364 	/*d_reset*/	noreset,
365 	/*d_devtotty*/	nodevtotty,
366 	/*d_poll*/	NULL,
367 	/*d_mmap*/	nommap,
368 	/*d_strategy*/	nostrategy,
369 	/*d_name*/	"xpt",
370 	/*d_spare*/	NULL,
371 	/*d_maj*/	-1,
372 	/*d_dump*/	nodump,
373 	/*d_psize*/	nopsize,
374 	/*d_flags*/	0,
375 	/*d_maxio*/	0,
376 	/*b_maj*/	-1
377 };
378 
379 static struct intr_config_hook *xpt_config_hook;
380 
381 /* Registered busses */
382 TAILQ_HEAD(,cam_eb) xpt_busses;
383 static u_int bus_generation;
384 
385 /* Storage for debugging datastructures */
386 #ifdef	CAMDEBUG
387 struct cam_path *cam_dpath;
388 u_int32_t cam_dflags;
389 #endif
390 
391 #if defined(CAM_DEBUG_FLAGS) && !defined(CAMDEBUG)
392 #error "You must have options CAMDEBUG to use options CAM_DEBUG_FLAGS"
393 #endif
394 
395 /*
396  * In order to enable the CAM_DEBUG_* options, the user must have CAMDEBUG
397  * enabled.  Also, the user must have either none, or all of CAM_DEBUG_BUS,
398  * CAM_DEBUG_TARGET, and CAM_DEBUG_LUN specified.
399  */
400 #if defined(CAM_DEBUG_BUS) || defined(CAM_DEBUG_TARGET) \
401     || defined(CAM_DEBUG_LUN)
402 #ifdef CAMDEBUG
403 #if !defined(CAM_DEBUG_BUS) || !defined(CAM_DEBUG_TARGET) \
404     || !defined(CAM_DEBUG_LUN)
405 #error "You must define all or none of CAM_DEBUG_BUS, CAM_DEBUG_TARGET \
406         and CAM_DEBUG_LUN"
407 #endif /* !CAM_DEBUG_BUS || !CAM_DEBUG_TARGET || !CAM_DEBUG_LUN */
408 #else /* !CAMDEBUG */
409 #error "You must use options CAMDEBUG if you use the CAM_DEBUG_* options"
410 #endif /* CAMDEBUG */
411 #endif /* CAM_DEBUG_BUS || CAM_DEBUG_TARGET || CAM_DEBUG_LUN */
412 
413 /* Forward declarations for private functions */
414 void	xpt_init(void);
415 
416 static cam_status	xpt_compile_path(struct cam_path *new_path,
417 					 struct cam_periph *perph,
418 					 path_id_t path_id,
419 					 target_id_t target_id,
420 					 lun_id_t lun_id);
421 
422 static void		xpt_release_path(struct cam_path *path);
423 
424 static void		xpt_async_bcast(struct async_list *async_head,
425 					u_int32_t async_code,
426 					struct cam_path *path,
427 					void *async_arg);
428 static int 	 xptnextfreebus(path_id_t startbus);
429 static int	 xptpathid(const char *sim_name, int sim_unit, int sim_bus,
430 			   path_id_t *nextpath);
431 static union ccb *xpt_get_ccb(struct cam_ed *device);
432 static int	 xpt_schedule_dev(struct camq *queue, cam_pinfo *dev_pinfo,
433 				  u_int32_t new_priority);
434 static void	 xpt_run_dev_allocq(struct cam_eb *bus);
435 static void	 xpt_run_dev_sendq(struct cam_eb *bus);
436 static timeout_t xpt_release_devq_timeout;
437 static timeout_t xpt_release_simq_timeout;
438 static struct cam_et*
439 		 xpt_alloc_target(struct cam_eb *bus, target_id_t target_id);
440 static void	 xpt_release_target(struct cam_eb *bus, struct cam_et *target);
441 static struct cam_ed*
442 		 xpt_alloc_device(struct cam_eb *bus, struct cam_et *target,
443 				  lun_id_t lun_id);
444 static void	 xpt_release_device(struct cam_eb *bus, struct cam_et *target,
445 				    struct cam_ed *device);
446 static u_int32_t xpt_dev_ccbq_resize(struct cam_path *path, int newopenings);
447 static struct cam_eb*
448 		 xpt_find_bus(path_id_t path_id);
449 static struct cam_et*
450 		 xpt_find_target(struct cam_eb *bus, target_id_t target_id);
451 static struct cam_ed*
452 		 xpt_find_device(struct cam_et *target, lun_id_t lun_id);
453 static void	 xpt_scan_bus(struct cam_periph *periph, union ccb *ccb);
454 static void	 xpt_scan_lun(struct cam_periph *periph,
455 			      struct cam_path *path, cam_flags flags,
456 			      union ccb *ccb);
457 static void	 xptscandone(struct cam_periph *periph, union ccb *done_ccb);
458 static xpt_busfunc_t	xptconfigbuscountfunc;
459 static xpt_busfunc_t	xptconfigfunc;
460 static void	 xpt_config(void *arg);
461 static xpt_devicefunc_t	xptfinishconfigfunc;
462 static xpt_devicefunc_t xptpassannouncefunc;
463 static void	 xpt_finishconfig(struct cam_periph *periph, union ccb *ccb);
464 static void	 xptaction(struct cam_sim *sim, union ccb *work_ccb);
465        void	 swi_camnet(void);
466        void	 swi_cambio(void);
467 static void	 camisr(cam_isrq_t *queue);
468 #if 0
469 static void	 xptstart(struct cam_periph *periph, union ccb *work_ccb);
470 static void	 xptasync(struct cam_periph *periph,
471 			  u_int32_t code, cam_path *path);
472 #endif
473 static dev_match_ret	xptbusmatch(struct dev_match_pattern *patterns,
474 				    int num_patterns, struct cam_eb *bus);
475 static dev_match_ret	xptdevicematch(struct dev_match_pattern *patterns,
476 				       int num_patterns, struct cam_ed *device);
477 static dev_match_ret	xptperiphmatch(struct dev_match_pattern *patterns,
478 				       int num_patterns,
479 				       struct cam_periph *periph);
480 static xpt_busfunc_t	xptedtbusfunc;
481 static xpt_targetfunc_t	xptedttargetfunc;
482 static xpt_devicefunc_t	xptedtdevicefunc;
483 static xpt_periphfunc_t	xptedtperiphfunc;
484 static xpt_pdrvfunc_t	xptplistpdrvfunc;
485 static xpt_periphfunc_t	xptplistperiphfunc;
486 static int		xptedtmatch(struct ccb_dev_match *cdm);
487 static int		xptperiphlistmatch(struct ccb_dev_match *cdm);
488 static int		xptbustraverse(struct cam_eb *start_bus,
489 				       xpt_busfunc_t *tr_func, void *arg);
490 static int		xpttargettraverse(struct cam_eb *bus,
491 					  struct cam_et *start_target,
492 					  xpt_targetfunc_t *tr_func, void *arg);
493 static int		xptdevicetraverse(struct cam_et *target,
494 					  struct cam_ed *start_device,
495 					  xpt_devicefunc_t *tr_func, void *arg);
496 static int		xptperiphtraverse(struct cam_ed *device,
497 					  struct cam_periph *start_periph,
498 					  xpt_periphfunc_t *tr_func, void *arg);
499 static int		xptpdrvtraverse(struct periph_driver **start_pdrv,
500 					xpt_pdrvfunc_t *tr_func, void *arg);
501 static int		xptpdperiphtraverse(struct periph_driver **pdrv,
502 					    struct cam_periph *start_periph,
503 					    xpt_periphfunc_t *tr_func,
504 					    void *arg);
505 static xpt_busfunc_t	xptdefbusfunc;
506 static xpt_targetfunc_t	xptdeftargetfunc;
507 static xpt_devicefunc_t	xptdefdevicefunc;
508 static xpt_periphfunc_t	xptdefperiphfunc;
509 static int		xpt_for_all_busses(xpt_busfunc_t *tr_func, void *arg);
510 static int		xpt_for_all_targets(xpt_targetfunc_t *tr_func,
511 					    void *arg);
512 static int		xpt_for_all_devices(xpt_devicefunc_t *tr_func,
513 					    void *arg);
514 static int		xpt_for_all_periphs(xpt_periphfunc_t *tr_func,
515 					    void *arg);
516 static xpt_devicefunc_t	xptsetasyncfunc;
517 static xpt_busfunc_t	xptsetasyncbusfunc;
518 static cam_status	xptregister(struct cam_periph *periph,
519 				    void *arg);
520 static cam_status	proberegister(struct cam_periph *periph,
521 				      void *arg);
522 static void	 probeschedule(struct cam_periph *probe_periph);
523 static void	 probestart(struct cam_periph *periph, union ccb *start_ccb);
524 static void	 probedone(struct cam_periph *periph, union ccb *done_ccb);
525 static void	 probecleanup(struct cam_periph *periph);
526 static void	 xpt_find_quirk(struct cam_ed *device);
527 static void	 xpt_set_transfer_settings(struct ccb_trans_settings *cts,
528 					   int async_update);
529 static __inline int xpt_schedule_dev_allocq(struct cam_eb *bus,
530 					    struct cam_ed *dev);
531 static __inline int xpt_schedule_dev_sendq(struct cam_eb *bus,
532 					   struct cam_ed *dev);
533 static __inline int periph_is_queued(struct cam_periph *periph);
534 static __inline int device_is_alloc_queued(struct cam_ed *device);
535 static __inline int device_is_send_queued(struct cam_ed *device);
536 static __inline int dev_allocq_is_runnable(struct cam_devq *devq);
537 
538 static __inline int
539 xpt_schedule_dev_allocq(struct cam_eb *bus, struct cam_ed *dev)
540 {
541 	int retval;
542 
543 	if (dev->ccbq.devq_openings > 0) {
544 		if ((dev->flags & CAM_DEV_RESIZE_QUEUE_NEEDED) != 0) {
545 			cam_ccbq_resize(&dev->ccbq,
546 					dev->ccbq.dev_openings
547 					+ dev->ccbq.dev_active);
548 			dev->flags &= ~CAM_DEV_RESIZE_QUEUE_NEEDED;
549 		}
550 		retval = xpt_schedule_dev(&bus->sim->devq->alloc_queue,
551 					  &dev->alloc_ccb_entry.pinfo,
552 					  dev->drvq.queue_array[0]->priority);
553 	} else {
554 		retval = 0;
555 	}
556 
557 	return (retval);
558 }
559 
560 static __inline int
561 xpt_schedule_dev_sendq(struct cam_eb *bus, struct cam_ed *dev)
562 {
563 	int	retval;
564 
565 	if (dev->ccbq.dev_openings > 0) {
566 		retval = xpt_schedule_dev(&bus->sim->devq->send_queue,
567 					  &dev->send_ccb_entry.pinfo,
568 					  dev->ccbq.queue.queue_array[0]->priority);
569 	} else {
570 		retval = 0;
571 	}
572 	return (retval);
573 }
574 
575 static __inline int
576 periph_is_queued(struct cam_periph *periph)
577 {
578 	return (periph->pinfo.index != CAM_UNQUEUED_INDEX);
579 }
580 
581 static __inline int
582 device_is_alloc_queued(struct cam_ed *device)
583 {
584 	return (device->alloc_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX);
585 }
586 
587 static __inline int
588 device_is_send_queued(struct cam_ed *device)
589 {
590 	return (device->send_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX);
591 }
592 
593 static __inline int
594 dev_allocq_is_runnable(struct cam_devq *devq)
595 {
596 	/*
597 	 * Have work to do.
598 	 * Have space to do more work.
599 	 * Allowed to do work.
600 	 */
601 	return ((devq->alloc_queue.qfrozen_cnt == 0)
602 	     && (devq->alloc_queue.entries > 0)
603 	     && (devq->alloc_openings > 0));
604 }
605 
606 static void
607 xpt_periph_init()
608 {
609 	dev_t dev;
610 
611 	dev = makedev(XPT_CDEV_MAJOR, 0);
612 	cdevsw_add(&dev, &xpt_cdevsw, NULL);
613 }
614 
615 static void
616 probe_periph_init()
617 {
618 }
619 
620 
621 static void
622 xptdone(struct cam_periph *periph, union ccb *done_ccb)
623 {
624 	/* Caller will release the CCB */
625 	wakeup(&done_ccb->ccb_h.cbfcnp);
626 }
627 
628 static int
629 xptopen(dev_t dev, int flags, int fmt, struct proc *p)
630 {
631 	int unit;
632 
633 	unit = minor(dev) & 0xff;
634 
635 	/*
636 	 * Only allow read-write access.
637 	 */
638 	if (((flags & FWRITE) == 0) || ((flags & FREAD) == 0))
639 		return(EPERM);
640 
641 	/*
642 	 * We don't allow nonblocking access.
643 	 */
644 	if ((flags & O_NONBLOCK) != 0) {
645 		printf("xpt%d: can't do nonblocking accesss\n", unit);
646 		return(ENODEV);
647 	}
648 
649 	/*
650 	 * We only have one transport layer right now.  If someone accesses
651 	 * us via something other than minor number 1, point out their
652 	 * mistake.
653 	 */
654 	if (unit != 0) {
655 		printf("xptopen: got invalid xpt unit %d\n", unit);
656 		return(ENXIO);
657 	}
658 
659 	/* Mark ourselves open */
660 	xsoftc.flags |= XPT_FLAG_OPEN;
661 
662 	return(0);
663 }
664 
665 static int
666 xptclose(dev_t dev, int flag, int fmt, struct proc *p)
667 {
668 	int unit;
669 
670 	unit = minor(dev) & 0xff;
671 
672 	/*
673 	 * We only have one transport layer right now.  If someone accesses
674 	 * us via something other than minor number 1, point out their
675 	 * mistake.
676 	 */
677 	if (unit != 0) {
678 		printf("xptclose: got invalid xpt unit %d\n", unit);
679 		return(ENXIO);
680 	}
681 
682 	/* Mark ourselves closed */
683 	xsoftc.flags &= ~XPT_FLAG_OPEN;
684 
685 	return(0);
686 }
687 
688 static int
689 xptioctl(dev_t dev, u_long cmd, caddr_t addr, int flag, struct proc *p)
690 {
691 	int unit, error;
692 
693 	error = 0;
694 	unit = minor(dev) & 0xff;
695 
696 	/*
697 	 * We only have one transport layer right now.  If someone accesses
698 	 * us via something other than minor number 1, point out their
699 	 * mistake.
700 	 */
701 	if (unit != 0) {
702 		printf("xptioctl: got invalid xpt unit %d\n", unit);
703 		return(ENXIO);
704 	}
705 
706 	switch(cmd) {
707 	/*
708 	 * For the transport layer CAMIOCOMMAND ioctl, we really only want
709 	 * to accept CCB types that don't quite make sense to send through a
710 	 * passthrough driver.
711 	 */
712 	case CAMIOCOMMAND: {
713 		union ccb *ccb;
714 		union ccb *inccb;
715 
716 		inccb = (union ccb *)addr;
717 
718 		switch(inccb->ccb_h.func_code) {
719 		case XPT_SCAN_BUS:
720 		case XPT_RESET_BUS:
721 			if ((inccb->ccb_h.target_id != CAM_TARGET_WILDCARD)
722 			 || (inccb->ccb_h.target_lun != CAM_LUN_WILDCARD)) {
723 				error = EINVAL;
724 				break;
725 			}
726 			/* FALLTHROUGH */
727 		case XPT_SCAN_LUN:
728 		case XPT_ENG_INQ:  /* XXX not implemented yet */
729 		case XPT_ENG_EXEC:
730 
731 			ccb = xpt_alloc_ccb();
732 
733 			/*
734 			 * Create a path using the bus, target, and lun the
735 			 * user passed in.
736 			 */
737 			if (xpt_create_path(&ccb->ccb_h.path, xpt_periph,
738 					    inccb->ccb_h.path_id,
739 					    inccb->ccb_h.target_id,
740 					    inccb->ccb_h.target_lun) !=
741 					    CAM_REQ_CMP){
742 				error = EINVAL;
743 				xpt_free_ccb(ccb);
744 				break;
745 			}
746 			/* Ensure all of our fields are correct */
747 			xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path,
748 				      inccb->ccb_h.pinfo.priority);
749 			xpt_merge_ccb(ccb, inccb);
750 			ccb->ccb_h.cbfcnp = xptdone;
751 			cam_periph_runccb(ccb, NULL, 0, 0, NULL);
752 			bcopy(ccb, inccb, sizeof(union ccb));
753 			xpt_free_path(ccb->ccb_h.path);
754 			xpt_free_ccb(ccb);
755 			break;
756 
757 		case XPT_DEBUG: {
758 			union ccb ccb;
759 
760 			/*
761 			 * This is an immedaite CCB, so it's okay to
762 			 * allocate it on the stack.
763 			 */
764 
765 			/*
766 			 * Create a path using the bus, target, and lun the
767 			 * user passed in.
768 			 */
769 			if (xpt_create_path(&ccb.ccb_h.path, xpt_periph,
770 					    inccb->ccb_h.path_id,
771 					    inccb->ccb_h.target_id,
772 					    inccb->ccb_h.target_lun) !=
773 					    CAM_REQ_CMP){
774 				error = EINVAL;
775 				break;
776 			}
777 			/* Ensure all of our fields are correct */
778 			xpt_setup_ccb(&ccb.ccb_h, ccb.ccb_h.path,
779 				      inccb->ccb_h.pinfo.priority);
780 			xpt_merge_ccb(&ccb, inccb);
781 			ccb.ccb_h.cbfcnp = xptdone;
782 			xpt_action(&ccb);
783 			bcopy(&ccb, inccb, sizeof(union ccb));
784 			xpt_free_path(ccb.ccb_h.path);
785 			break;
786 
787 		}
788 		case XPT_DEV_MATCH: {
789 			struct cam_periph_map_info mapinfo;
790 
791 			/*
792 			 * We can't deal with physical addresses for this
793 			 * type of transaction.
794 			 */
795 			if (inccb->ccb_h.flags & CAM_DATA_PHYS) {
796 				error = EINVAL;
797 				break;
798 			}
799 			bzero(&mapinfo, sizeof(mapinfo));
800 
801 			/*
802 			 * Map the pattern and match buffers into kernel
803 			 * virtual address space.
804 			 */
805 			error = cam_periph_mapmem(inccb, &mapinfo);
806 
807 			if (error)
808 				break;
809 
810 			/*
811 			 * This is an immediate CCB, we can send it on directly.
812 			 */
813 			xpt_action(inccb);
814 
815 			/*
816 			 * Map the buffers back into user space.
817 			 */
818 			cam_periph_unmapmem(inccb, &mapinfo);
819 
820 			error = 0;
821 			break;
822 		}
823 		default:
824 			error = EINVAL;
825 			break;
826 		}
827 		break;
828 	}
829 	/*
830 	 * This is the getpassthru ioctl. It takes a XPT_GDEVLIST ccb as input,
831 	 * with the periphal driver name and unit name filled in.  The other
832 	 * fields don't really matter as input.  The passthrough driver name
833 	 * ("pass"), and unit number are passed back in the ccb.  The current
834 	 * device generation number, and the index into the device peripheral
835 	 * driver list, and the status are also passed back.  Note that
836 	 * since we do everything in one pass, unlike the XPT_GDEVLIST ccb,
837 	 * we never return a status of CAM_GDEVLIST_LIST_CHANGED.  It is
838 	 * (or rather should be) impossible for the device peripheral driver
839 	 * list to change since we look at the whole thing in one pass, and
840 	 * we do it with splsoftcam protection.
841 	 *
842 	 */
843 	case CAMGETPASSTHRU: {
844 		union ccb *ccb;
845 		struct cam_periph *periph;
846 		struct periph_driver **p_drv;
847 		char   *name;
848 		int unit;
849 		int cur_generation;
850 		int splbreaknum;
851 		int s;
852 		int i;
853 
854 		ccb = (union ccb *)addr;
855 		unit = ccb->cgdl.unit_number;
856 		name = ccb->cgdl.periph_name;
857 		/*
858 		 * Every 100 devices, we want to drop our spl protection to
859 		 * give the software interrupt handler a chance to run.
860 		 * Most systems won't run into this check, but this should
861 		 * avoid starvation in the software interrupt handler in
862 		 * large systems.
863 		 */
864 		splbreaknum = 100;
865 
866 		ccb = (union ccb *)addr;
867 
868 		/*
869 		 * Sanity check -- make sure we don't get a null peripheral
870 		 * driver name.
871 		 */
872 		if (*ccb->cgdl.periph_name == '\0') {
873 			error = EINVAL;
874 			break;
875 		}
876 
877 		/* Keep the list from changing while we traverse it */
878 		s = splsoftcam();
879 ptstartover:
880 		cur_generation = xsoftc.generation;
881 
882 		/* first find our driver in the list of drivers */
883 		for (p_drv = (struct periph_driver **)periphdriver_set.ls_items;
884 		     *p_drv != NULL; p_drv++)
885 			if (strcmp((*p_drv)->driver_name, name) == 0)
886 				break;
887 
888 		if (*p_drv == NULL) {
889 			splx(s);
890 			ccb->ccb_h.status = CAM_REQ_CMP_ERR;
891 			ccb->cgdl.status = CAM_GDEVLIST_ERROR;
892 			*ccb->cgdl.periph_name = '\0';
893 			ccb->cgdl.unit_number = 0;
894 			error = ENOENT;
895 			break;
896 		}
897 
898 		/*
899 		 * Run through every peripheral instance of this driver
900 		 * and check to see whether it matches the unit passed
901 		 * in by the user.  If it does, get out of the loops and
902 		 * find the passthrough driver associated with that
903 		 * peripheral driver.
904 		 */
905 		for (periph = TAILQ_FIRST(&(*p_drv)->units); periph != NULL;
906 		     periph = TAILQ_NEXT(periph, unit_links)) {
907 
908 			if (periph->unit_number == unit) {
909 				break;
910 			} else if (--splbreaknum == 0) {
911 				splx(s);
912 				s = splsoftcam();
913 				splbreaknum = 100;
914 				if (cur_generation != xsoftc.generation)
915 				       goto ptstartover;
916 			}
917 		}
918 		/*
919 		 * If we found the peripheral driver that the user passed
920 		 * in, go through all of the peripheral drivers for that
921 		 * particular device and look for a passthrough driver.
922 		 */
923 		if (periph != NULL) {
924 			struct cam_ed *device;
925 			int i;
926 
927 			device = periph->path->device;
928 			for (i = 0, periph = device->periphs.slh_first;
929 			     periph != NULL;
930 			     periph = periph->periph_links.sle_next, i++) {
931 				/*
932 				 * Check to see whether we have a
933 				 * passthrough device or not.
934 				 */
935 				if (strcmp(periph->periph_name, "pass") == 0) {
936 					/*
937 					 * Fill in the getdevlist fields.
938 					 */
939 					strcpy(ccb->cgdl.periph_name,
940 					       periph->periph_name);
941 					ccb->cgdl.unit_number =
942 						periph->unit_number;
943 					if (periph->periph_links.sle_next)
944 						ccb->cgdl.status =
945 							CAM_GDEVLIST_MORE_DEVS;
946 					else
947 						ccb->cgdl.status =
948 						       CAM_GDEVLIST_LAST_DEVICE;
949 					ccb->cgdl.generation =
950 						device->generation;
951 					ccb->cgdl.index = i;
952 					/*
953 					 * Fill in some CCB header fields
954 					 * that the user may want.
955 					 */
956 					ccb->ccb_h.path_id =
957 						periph->path->bus->path_id;
958 					ccb->ccb_h.target_id =
959 						periph->path->target->target_id;
960 					ccb->ccb_h.target_lun =
961 						periph->path->device->lun_id;
962 					ccb->ccb_h.status = CAM_REQ_CMP;
963 					break;
964 				}
965 			}
966 		}
967 
968 		/*
969 		 * If the periph is null here, one of two things has
970 		 * happened.  The first possibility is that we couldn't
971 		 * find the unit number of the particular peripheral driver
972 		 * that the user is asking about.  e.g. the user asks for
973 		 * the passthrough driver for "da11".  We find the list of
974 		 * "da" peripherals all right, but there is no unit 11.
975 		 * The other possibility is that we went through the list
976 		 * of peripheral drivers attached to the device structure,
977 		 * but didn't find one with the name "pass".  Either way,
978 		 * we return ENOENT, since we couldn't find something.
979 		 */
980 		if (periph == NULL) {
981 			ccb->ccb_h.status = CAM_REQ_CMP_ERR;
982 			ccb->cgdl.status = CAM_GDEVLIST_ERROR;
983 			*ccb->cgdl.periph_name = '\0';
984 			ccb->cgdl.unit_number = 0;
985 			error = ENOENT;
986 		}
987 		splx(s);
988 		break;
989 		}
990 	default:
991 		error = ENOTTY;
992 		break;
993 	}
994 
995 	return(error);
996 }
997 
998 /* Functions accessed by the peripheral drivers */
999 void
1000 xpt_init()
1001 {
1002 	struct cam_sim *xpt_sim;
1003 	struct cam_path *path;
1004 	struct cam_devq;
1005 	cam_status status;
1006 
1007 	TAILQ_INIT(&xpt_busses);
1008 	TAILQ_INIT(&cam_bioq);
1009 	TAILQ_INIT(&cam_netq);
1010 	SLIST_INIT(&ccb_freeq);
1011 	STAILQ_INIT(&highpowerq);
1012 
1013 	/*
1014 	 * The xpt layer is, itself, the equivelent of a SIM.
1015 	 * Allow 16 ccbs in the ccb pool for it.  This should
1016 	 * give decent parallelism when we probe busses and
1017 	 * perform other XPT functions.
1018 	 */
1019 	xpt_sim = (struct cam_sim *)malloc(sizeof(*xpt_sim),
1020 					   M_DEVBUF, M_WAITOK);
1021 	xpt_sim->sim_action = xptaction;
1022 	xpt_sim->sim_name = "xpt";
1023 	xpt_sim->path_id = CAM_XPT_PATH_ID;
1024 	xpt_sim->bus_id = 0;
1025 	xpt_sim->max_tagged_dev_openings = 0;
1026 	xpt_sim->max_dev_openings = 0;
1027 	xpt_sim->devq = cam_simq_alloc(16);
1028 	xpt_max_ccbs = 16;
1029 
1030 	xpt_bus_register(xpt_sim, 0);
1031 
1032 	/*
1033 	 * Looking at the XPT from the SIM layer, the XPT is
1034 	 * the equivelent of a peripheral driver.  Allocate
1035 	 * a peripheral driver entry for us.
1036 	 */
1037 	if ((status = xpt_create_path(&path, NULL, CAM_XPT_PATH_ID,
1038 				      CAM_TARGET_WILDCARD,
1039 				      CAM_LUN_WILDCARD)) != CAM_REQ_CMP) {
1040 		printf("xpt_init: xpt_create_path failed with status %#x,"
1041 		       " failing attach\n", status);
1042 		return;
1043 	}
1044 
1045 	cam_periph_alloc(xptregister, NULL, NULL, "xpt", CAM_PERIPH_BIO,
1046 			 path, NULL, 0, NULL);
1047 	xpt_free_path(path);
1048 
1049 	xpt_sim->softc = xpt_periph;
1050 
1051 	/*
1052 	 * Register a callback for when interrupts are enabled.
1053 	 */
1054 	xpt_config_hook =
1055 	    (struct intr_config_hook *)malloc(sizeof(struct intr_config_hook),
1056 					      M_TEMP, M_NOWAIT);
1057 	if (xpt_config_hook == NULL) {
1058 		printf("xpt_init: Cannot malloc config hook "
1059 		       "- failing attach\n");
1060 		return;
1061 	}
1062 	bzero(xpt_config_hook, sizeof(*xpt_config_hook));
1063 
1064 	xpt_config_hook->ich_func = xpt_config;
1065 	if (config_intrhook_establish(xpt_config_hook) != 0) {
1066 		free (xpt_config_hook, M_TEMP);
1067 		printf("xpt_init: config_intrhook_establish failed "
1068 		       "- failing attach\n");
1069 	}
1070 
1071 	/* Install our software interrupt handlers */
1072 	/* XXX Should call some MI function to do this */
1073 #ifdef __i386__
1074 	ihandlers[SWI_CAMNET] = swi_camnet;
1075 	ihandlers[SWI_CAMBIO] = swi_cambio;
1076 #endif
1077 }
1078 
1079 static cam_status
1080 xptregister(struct cam_periph *periph, void *arg)
1081 {
1082 	if (periph == NULL) {
1083 		printf("xptregister: periph was NULL!!\n");
1084 		return(CAM_REQ_CMP_ERR);
1085 	}
1086 
1087 	periph->softc = NULL;
1088 
1089 	xpt_periph = periph;
1090 
1091 	return(CAM_REQ_CMP);
1092 }
1093 
1094 int32_t
1095 xpt_add_periph(struct cam_periph *periph)
1096 {
1097 	struct cam_ed *device;
1098 	int32_t	 status;
1099 	struct periph_list *periph_head;
1100 
1101 	device = periph->path->device;
1102 
1103 	periph_head = &device->periphs;
1104 
1105 	status = CAM_REQ_CMP;
1106 
1107 	if (device != NULL) {
1108 		int s;
1109 
1110 		/*
1111 		 * Make room for this peripheral
1112 		 * so it will fit in the queue
1113 		 * when it's scheduled to run
1114 		 */
1115 		s = splsoftcam();
1116 		status = camq_resize(&device->drvq,
1117 				     device->drvq.array_size + 1);
1118 
1119 		device->generation++;
1120 
1121 		SLIST_INSERT_HEAD(periph_head, periph, periph_links);
1122 
1123 		splx(s);
1124 	}
1125 
1126 	xsoftc.generation++;
1127 
1128 	return (status);
1129 }
1130 
1131 void
1132 xpt_remove_periph(struct cam_periph *periph)
1133 {
1134 	struct cam_ed *device;
1135 
1136 	device = periph->path->device;
1137 
1138 	if (device != NULL) {
1139 		int s;
1140 		struct periph_list *periph_head;
1141 
1142 		periph_head = &device->periphs;
1143 
1144 		/* Release the slot for this peripheral */
1145 		s = splsoftcam();
1146 		camq_resize(&device->drvq, device->drvq.array_size - 1);
1147 
1148 		device->generation++;
1149 
1150 		SLIST_REMOVE(periph_head, periph, cam_periph, periph_links);
1151 
1152 		splx(s);
1153 	}
1154 
1155 	xsoftc.generation++;
1156 
1157 }
1158 
1159 void
1160 xpt_announce_periph(struct cam_periph *periph, char *announce_string)
1161 {
1162 	int s;
1163 	u_int mb;
1164 	struct cam_path *path;
1165 	struct ccb_trans_settings cts;
1166 
1167 	path = periph->path;
1168 	/*
1169 	 * To ensure that this is printed in one piece,
1170 	 * mask out CAM interrupts.
1171 	 */
1172 	s = splsoftcam();
1173 	printf("%s%d at %s%d bus %d target %d lun %d\n",
1174 	       periph->periph_name, periph->unit_number,
1175 	       path->bus->sim->sim_name,
1176 	       path->bus->sim->unit_number,
1177 	       path->bus->sim->bus_id,
1178 	       path->target->target_id,
1179 	       path->device->lun_id);
1180 	printf("%s%d: ", periph->periph_name, periph->unit_number);
1181 	scsi_print_inquiry(&path->device->inq_data);
1182 	if ((bootverbose)
1183 	 && (path->device->serial_num_len > 0)) {
1184 		/* Don't wrap the screen  - print only the first 60 chars */
1185 		printf("%s%d: Serial Number %.60s\n", periph->periph_name,
1186 		       periph->unit_number, path->device->serial_num);
1187 	}
1188 	xpt_setup_ccb(&cts.ccb_h, path, /*priority*/1);
1189 	cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS;
1190 	cts.flags = CCB_TRANS_CURRENT_SETTINGS;
1191 	xpt_action((union ccb*)&cts);
1192 	if (cts.ccb_h.status == CAM_REQ_CMP) {
1193 		u_int speed;
1194 		u_int freq;
1195 
1196 		if ((cts.valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0
1197 		  && cts.sync_offset != 0) {
1198 			freq = scsi_calc_syncsrate(cts.sync_period);
1199 			speed = freq;
1200 		} else {
1201 			freq = 0;
1202 			speed = path->bus->sim->base_transfer_speed;
1203 		}
1204 		if ((cts.valid & CCB_TRANS_BUS_WIDTH_VALID) != 0)
1205 			speed *= (0x01 << cts.bus_width);
1206 		mb = speed / 1000;
1207 		if (mb > 0)
1208 			printf("%s%d: %d.%dMB/s transfers", periph->periph_name,
1209 			       periph->unit_number, mb, speed % 1000);
1210 		else
1211 			printf("%s%d: %dKB/s transfers", periph->periph_name,
1212 			       periph->unit_number, (speed % 1000) * 1000);
1213 		if ((cts.valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0
1214 		 && cts.sync_offset != 0) {
1215 			printf(" (%d.%dMHz, offset %d", freq / 1000,
1216 			       freq % 1000, cts.sync_offset);
1217 		}
1218 		if ((cts.valid & CCB_TRANS_BUS_WIDTH_VALID) != 0
1219 		 && cts.bus_width > 0) {
1220 			if ((cts.valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0
1221 			 && cts.sync_offset != 0) {
1222 				printf(", ");
1223 			} else {
1224 				printf(" (");
1225 			}
1226 			printf("%dbit)", 8 * (0x01 << cts.bus_width));
1227 		} else if ((cts.valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0
1228 			&& cts.sync_offset != 0) {
1229 			printf(")");
1230 		}
1231 		if (path->device->inq_flags & SID_CmdQue) {
1232 			printf(", Tagged Queueing Enabled");
1233 		}
1234 
1235 		printf("\n");
1236 	} else if (path->device->inq_flags & SID_CmdQue) {
1237 		printf("%s%d: Tagged Queueing Enabled\n",
1238 		       periph->periph_name, periph->unit_number);
1239 	}
1240 
1241 	/*
1242 	 * We only want to print the caller's announce string if they've
1243 	 * passed one in..
1244 	 */
1245 	if (announce_string != NULL)
1246 		printf("%s%d: %s\n", periph->periph_name,
1247 		       periph->unit_number, announce_string);
1248 	splx(s);
1249 }
1250 
1251 
1252 static dev_match_ret
1253 xptbusmatch(struct dev_match_pattern *patterns, int num_patterns,
1254 	    struct cam_eb *bus)
1255 {
1256 	dev_match_ret retval;
1257 	int i;
1258 
1259 	retval = DM_RET_NONE;
1260 
1261 	/*
1262 	 * If we aren't given something to match against, that's an error.
1263 	 */
1264 	if (bus == NULL)
1265 		return(DM_RET_ERROR);
1266 
1267 	/*
1268 	 * If there are no match entries, then this bus matches no
1269 	 * matter what.
1270 	 */
1271 	if ((patterns == NULL) || (num_patterns == 0))
1272 		return(DM_RET_DESCEND | DM_RET_COPY);
1273 
1274 	for (i = 0; i < num_patterns; i++) {
1275 		struct bus_match_pattern *cur_pattern;
1276 
1277 		/*
1278 		 * If the pattern in question isn't for a bus node, we
1279 		 * aren't interested.  However, we do indicate to the
1280 		 * calling routine that we should continue descending the
1281 		 * tree, since the user wants to match against lower-level
1282 		 * EDT elements.
1283 		 */
1284 		if (patterns[i].type != DEV_MATCH_BUS) {
1285 			if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
1286 				retval |= DM_RET_DESCEND;
1287 			continue;
1288 		}
1289 
1290 		cur_pattern = &patterns[i].pattern.bus_pattern;
1291 
1292 		/*
1293 		 * If they want to match any bus node, we give them any
1294 		 * device node.
1295 		 */
1296 		if (cur_pattern->flags == BUS_MATCH_ANY) {
1297 			/* set the copy flag */
1298 			retval |= DM_RET_COPY;
1299 
1300 			/*
1301 			 * If we've already decided on an action, go ahead
1302 			 * and return.
1303 			 */
1304 			if ((retval & DM_RET_ACTION_MASK) != DM_RET_NONE)
1305 				return(retval);
1306 		}
1307 
1308 		/*
1309 		 * Not sure why someone would do this...
1310 		 */
1311 		if (cur_pattern->flags == BUS_MATCH_NONE)
1312 			continue;
1313 
1314 		if (((cur_pattern->flags & BUS_MATCH_PATH) != 0)
1315 		 && (cur_pattern->path_id != bus->path_id))
1316 			continue;
1317 
1318 		if (((cur_pattern->flags & BUS_MATCH_BUS_ID) != 0)
1319 		 && (cur_pattern->bus_id != bus->sim->bus_id))
1320 			continue;
1321 
1322 		if (((cur_pattern->flags & BUS_MATCH_UNIT) != 0)
1323 		 && (cur_pattern->unit_number != bus->sim->unit_number))
1324 			continue;
1325 
1326 		if (((cur_pattern->flags & BUS_MATCH_NAME) != 0)
1327 		 && (strncmp(cur_pattern->dev_name, bus->sim->sim_name,
1328 			     DEV_IDLEN) != 0))
1329 			continue;
1330 
1331 		/*
1332 		 * If we get to this point, the user definitely wants
1333 		 * information on this bus.  So tell the caller to copy the
1334 		 * data out.
1335 		 */
1336 		retval |= DM_RET_COPY;
1337 
1338 		/*
1339 		 * If the return action has been set to descend, then we
1340 		 * know that we've already seen a non-bus matching
1341 		 * expression, therefore we need to further descend the tree.
1342 		 * This won't change by continuing around the loop, so we
1343 		 * go ahead and return.  If we haven't seen a non-bus
1344 		 * matching expression, we keep going around the loop until
1345 		 * we exhaust the matching expressions.  We'll set the stop
1346 		 * flag once we fall out of the loop.
1347 		 */
1348 		if ((retval & DM_RET_ACTION_MASK) == DM_RET_DESCEND)
1349 			return(retval);
1350 	}
1351 
1352 	/*
1353 	 * If the return action hasn't been set to descend yet, that means
1354 	 * we haven't seen anything other than bus matching patterns.  So
1355 	 * tell the caller to stop descending the tree -- the user doesn't
1356 	 * want to match against lower level tree elements.
1357 	 */
1358 	if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
1359 		retval |= DM_RET_STOP;
1360 
1361 	return(retval);
1362 }
1363 
1364 static dev_match_ret
1365 xptdevicematch(struct dev_match_pattern *patterns, int num_patterns,
1366 	       struct cam_ed *device)
1367 {
1368 	dev_match_ret retval;
1369 	int i;
1370 
1371 	retval = DM_RET_NONE;
1372 
1373 	/*
1374 	 * If we aren't given something to match against, that's an error.
1375 	 */
1376 	if (device == NULL)
1377 		return(DM_RET_ERROR);
1378 
1379 	/*
1380 	 * If there are no match entries, then this device matches no
1381 	 * matter what.
1382 	 */
1383 	if ((patterns == NULL) || (patterns == 0))
1384 		return(DM_RET_DESCEND | DM_RET_COPY);
1385 
1386 	for (i = 0; i < num_patterns; i++) {
1387 		struct device_match_pattern *cur_pattern;
1388 
1389 		/*
1390 		 * If the pattern in question isn't for a device node, we
1391 		 * aren't interested.
1392 		 */
1393 		if (patterns[i].type != DEV_MATCH_DEVICE) {
1394 			if ((patterns[i].type == DEV_MATCH_PERIPH)
1395 			 && ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE))
1396 				retval |= DM_RET_DESCEND;
1397 			continue;
1398 		}
1399 
1400 		cur_pattern = &patterns[i].pattern.device_pattern;
1401 
1402 		/*
1403 		 * If they want to match any device node, we give them any
1404 		 * device node.
1405 		 */
1406 		if (cur_pattern->flags == DEV_MATCH_ANY) {
1407 			/* set the copy flag */
1408 			retval |= DM_RET_COPY;
1409 
1410 
1411 			/*
1412 			 * If we've already decided on an action, go ahead
1413 			 * and return.
1414 			 */
1415 			if ((retval & DM_RET_ACTION_MASK) != DM_RET_NONE)
1416 				return(retval);
1417 		}
1418 
1419 		/*
1420 		 * Not sure why someone would do this...
1421 		 */
1422 		if (cur_pattern->flags == DEV_MATCH_NONE)
1423 			continue;
1424 
1425 		if (((cur_pattern->flags & DEV_MATCH_PATH) != 0)
1426 		 && (cur_pattern->path_id != device->target->bus->path_id))
1427 			continue;
1428 
1429 		if (((cur_pattern->flags & DEV_MATCH_TARGET) != 0)
1430 		 && (cur_pattern->target_id != device->target->target_id))
1431 			continue;
1432 
1433 		if (((cur_pattern->flags & DEV_MATCH_LUN) != 0)
1434 		 && (cur_pattern->target_lun != device->lun_id))
1435 			continue;
1436 
1437 		if (((cur_pattern->flags & DEV_MATCH_INQUIRY) != 0)
1438 		 && (cam_quirkmatch((caddr_t)&device->inq_data,
1439 				    (caddr_t)&cur_pattern->inq_pat,
1440 				    1, sizeof(cur_pattern->inq_pat),
1441 				    scsi_static_inquiry_match) == NULL))
1442 			continue;
1443 
1444 		/*
1445 		 * If we get to this point, the user definitely wants
1446 		 * information on this device.  So tell the caller to copy
1447 		 * the data out.
1448 		 */
1449 		retval |= DM_RET_COPY;
1450 
1451 		/*
1452 		 * If the return action has been set to descend, then we
1453 		 * know that we've already seen a peripheral matching
1454 		 * expression, therefore we need to further descend the tree.
1455 		 * This won't change by continuing around the loop, so we
1456 		 * go ahead and return.  If we haven't seen a peripheral
1457 		 * matching expression, we keep going around the loop until
1458 		 * we exhaust the matching expressions.  We'll set the stop
1459 		 * flag once we fall out of the loop.
1460 		 */
1461 		if ((retval & DM_RET_ACTION_MASK) == DM_RET_DESCEND)
1462 			return(retval);
1463 	}
1464 
1465 	/*
1466 	 * If the return action hasn't been set to descend yet, that means
1467 	 * we haven't seen any peripheral matching patterns.  So tell the
1468 	 * caller to stop descending the tree -- the user doesn't want to
1469 	 * match against lower level tree elements.
1470 	 */
1471 	if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
1472 		retval |= DM_RET_STOP;
1473 
1474 	return(retval);
1475 }
1476 
1477 /*
1478  * Match a single peripheral against any number of match patterns.
1479  */
1480 static dev_match_ret
1481 xptperiphmatch(struct dev_match_pattern *patterns, int num_patterns,
1482 	       struct cam_periph *periph)
1483 {
1484 	dev_match_ret retval;
1485 	int i;
1486 
1487 	/*
1488 	 * If we aren't given something to match against, that's an error.
1489 	 */
1490 	if (periph == NULL)
1491 		return(DM_RET_ERROR);
1492 
1493 	/*
1494 	 * If there are no match entries, then this peripheral matches no
1495 	 * matter what.
1496 	 */
1497 	if ((patterns == NULL) || (num_patterns == 0))
1498 		return(DM_RET_STOP | DM_RET_COPY);
1499 
1500 	/*
1501 	 * There aren't any nodes below a peripheral node, so there's no
1502 	 * reason to descend the tree any further.
1503 	 */
1504 	retval = DM_RET_STOP;
1505 
1506 	for (i = 0; i < num_patterns; i++) {
1507 		struct periph_match_pattern *cur_pattern;
1508 
1509 		/*
1510 		 * If the pattern in question isn't for a peripheral, we
1511 		 * aren't interested.
1512 		 */
1513 		if (patterns[i].type != DEV_MATCH_PERIPH)
1514 			continue;
1515 
1516 		cur_pattern = &patterns[i].pattern.periph_pattern;
1517 
1518 		/*
1519 		 * If they want to match on anything, then we will do so.
1520 		 */
1521 		if (cur_pattern->flags == PERIPH_MATCH_ANY) {
1522 			/* set the copy flag */
1523 			retval |= DM_RET_COPY;
1524 
1525 			/*
1526 			 * We've already set the return action to stop,
1527 			 * since there are no nodes below peripherals in
1528 			 * the tree.
1529 			 */
1530 			return(retval);
1531 		}
1532 
1533 		/*
1534 		 * Not sure why someone would do this...
1535 		 */
1536 		if (cur_pattern->flags == PERIPH_MATCH_NONE)
1537 			continue;
1538 
1539 		if (((cur_pattern->flags & PERIPH_MATCH_PATH) != 0)
1540 		 && (cur_pattern->path_id != periph->path->bus->path_id))
1541 			continue;
1542 
1543 		/*
1544 		 * For the target and lun id's, we have to make sure the
1545 		 * target and lun pointers aren't NULL.  The xpt peripheral
1546 		 * has a wildcard target and device.
1547 		 */
1548 		if (((cur_pattern->flags & PERIPH_MATCH_TARGET) != 0)
1549 		 && ((periph->path->target == NULL)
1550 		 ||(cur_pattern->target_id != periph->path->target->target_id)))
1551 			continue;
1552 
1553 		if (((cur_pattern->flags & PERIPH_MATCH_LUN) != 0)
1554 		 && ((periph->path->device == NULL)
1555 		 || (cur_pattern->target_lun != periph->path->device->lun_id)))
1556 			continue;
1557 
1558 		if (((cur_pattern->flags & PERIPH_MATCH_UNIT) != 0)
1559 		 && (cur_pattern->unit_number != periph->unit_number))
1560 			continue;
1561 
1562 		if (((cur_pattern->flags & PERIPH_MATCH_NAME) != 0)
1563 		 && (strncmp(cur_pattern->periph_name, periph->periph_name,
1564 			     DEV_IDLEN) != 0))
1565 			continue;
1566 
1567 		/*
1568 		 * If we get to this point, the user definitely wants
1569 		 * information on this peripheral.  So tell the caller to
1570 		 * copy the data out.
1571 		 */
1572 		retval |= DM_RET_COPY;
1573 
1574 		/*
1575 		 * The return action has already been set to stop, since
1576 		 * peripherals don't have any nodes below them in the EDT.
1577 		 */
1578 		return(retval);
1579 	}
1580 
1581 	/*
1582 	 * If we get to this point, the peripheral that was passed in
1583 	 * doesn't match any of the patterns.
1584 	 */
1585 	return(retval);
1586 }
1587 
1588 static int
1589 xptedtbusfunc(struct cam_eb *bus, void *arg)
1590 {
1591 	struct ccb_dev_match *cdm;
1592 	dev_match_ret retval;
1593 
1594 	cdm = (struct ccb_dev_match *)arg;
1595 
1596 	/*
1597 	 * If our position is for something deeper in the tree, that means
1598 	 * that we've already seen this node.  So, we keep going down.
1599 	 */
1600 	if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
1601 	 && (cdm->pos.cookie.bus == bus)
1602 	 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
1603 	 && (cdm->pos.cookie.target != NULL))
1604 		retval = DM_RET_DESCEND;
1605 	else
1606 		retval = xptbusmatch(cdm->patterns, cdm->num_patterns, bus);
1607 
1608 	/*
1609 	 * If we got an error, bail out of the search.
1610 	 */
1611 	if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
1612 		cdm->status = CAM_DEV_MATCH_ERROR;
1613 		return(0);
1614 	}
1615 
1616 	/*
1617 	 * If the copy flag is set, copy this bus out.
1618 	 */
1619 	if (retval & DM_RET_COPY) {
1620 		int spaceleft, j;
1621 
1622 		spaceleft = cdm->match_buf_len - (cdm->num_matches *
1623 			sizeof(struct dev_match_result));
1624 
1625 		/*
1626 		 * If we don't have enough space to put in another
1627 		 * match result, save our position and tell the
1628 		 * user there are more devices to check.
1629 		 */
1630 		if (spaceleft < sizeof(struct dev_match_result)) {
1631 			bzero(&cdm->pos, sizeof(cdm->pos));
1632 			cdm->pos.position_type =
1633 				CAM_DEV_POS_EDT | CAM_DEV_POS_BUS;
1634 
1635 			cdm->pos.cookie.bus = bus;
1636 			cdm->pos.generations[CAM_BUS_GENERATION]=
1637 				bus_generation;
1638 			cdm->status = CAM_DEV_MATCH_MORE;
1639 			return(0);
1640 		}
1641 		j = cdm->num_matches;
1642 		cdm->num_matches++;
1643 		cdm->matches[j].type = DEV_MATCH_BUS;
1644 		cdm->matches[j].result.bus_result.path_id = bus->path_id;
1645 		cdm->matches[j].result.bus_result.bus_id = bus->sim->bus_id;
1646 		cdm->matches[j].result.bus_result.unit_number =
1647 			bus->sim->unit_number;
1648 		strncpy(cdm->matches[j].result.bus_result.dev_name,
1649 			bus->sim->sim_name, DEV_IDLEN);
1650 	}
1651 
1652 	/*
1653 	 * If the user is only interested in busses, there's no
1654 	 * reason to descend to the next level in the tree.
1655 	 */
1656 	if ((retval & DM_RET_ACTION_MASK) == DM_RET_STOP)
1657 		return(1);
1658 
1659 	/*
1660 	 * If there is a target generation recorded, check it to
1661 	 * make sure the target list hasn't changed.
1662 	 */
1663 	if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
1664 	 && (bus == cdm->pos.cookie.bus)
1665 	 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
1666 	 && (cdm->pos.generations[CAM_TARGET_GENERATION] != 0)
1667 	 && (cdm->pos.generations[CAM_TARGET_GENERATION] !=
1668 	     bus->generation)) {
1669 		cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
1670 		return(0);
1671 	}
1672 
1673 	if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
1674 	 && (cdm->pos.cookie.bus == bus)
1675 	 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
1676 	 && (cdm->pos.cookie.target != NULL))
1677 		return(xpttargettraverse(bus,
1678 					(struct cam_et *)cdm->pos.cookie.target,
1679 					 xptedttargetfunc, arg));
1680 	else
1681 		return(xpttargettraverse(bus, NULL, xptedttargetfunc, arg));
1682 }
1683 
1684 static int
1685 xptedttargetfunc(struct cam_et *target, void *arg)
1686 {
1687 	struct ccb_dev_match *cdm;
1688 
1689 	cdm = (struct ccb_dev_match *)arg;
1690 
1691 	/*
1692 	 * If there is a device list generation recorded, check it to
1693 	 * make sure the device list hasn't changed.
1694 	 */
1695 	if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
1696 	 && (cdm->pos.cookie.bus == target->bus)
1697 	 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
1698 	 && (cdm->pos.cookie.target == target)
1699 	 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
1700 	 && (cdm->pos.generations[CAM_DEV_GENERATION] != 0)
1701 	 && (cdm->pos.generations[CAM_DEV_GENERATION] !=
1702 	     target->generation)) {
1703 		cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
1704 		return(0);
1705 	}
1706 
1707 	if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
1708 	 && (cdm->pos.cookie.bus == target->bus)
1709 	 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
1710 	 && (cdm->pos.cookie.target == target)
1711 	 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
1712 	 && (cdm->pos.cookie.device != NULL))
1713 		return(xptdevicetraverse(target,
1714 					(struct cam_ed *)cdm->pos.cookie.device,
1715 					 xptedtdevicefunc, arg));
1716 	else
1717 		return(xptdevicetraverse(target, NULL, xptedtdevicefunc, arg));
1718 }
1719 
1720 static int
1721 xptedtdevicefunc(struct cam_ed *device, void *arg)
1722 {
1723 
1724 	struct ccb_dev_match *cdm;
1725 	dev_match_ret retval;
1726 	u_int dev_gen;
1727 
1728 	cdm = (struct ccb_dev_match *)arg;
1729 
1730 	/*
1731 	 * If our position is for something deeper in the tree, that means
1732 	 * that we've already seen this node.  So, we keep going down.
1733 	 */
1734 	if ((cdm->pos.position_type & CAM_DEV_POS_DEVICE)
1735 	 && (cdm->pos.cookie.device == device)
1736 	 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
1737 	 && (cdm->pos.cookie.periph != NULL))
1738 		retval = DM_RET_DESCEND;
1739 	else
1740 		retval = xptdevicematch(cdm->patterns, cdm->num_patterns,
1741 					device);
1742 
1743 	if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
1744 		cdm->status = CAM_DEV_MATCH_ERROR;
1745 		return(0);
1746 	}
1747 
1748 	/*
1749 	 * If the copy flag is set, copy this device out.
1750 	 */
1751 	if (retval & DM_RET_COPY) {
1752 		int spaceleft, j;
1753 
1754 		spaceleft = cdm->match_buf_len - (cdm->num_matches *
1755 			sizeof(struct dev_match_result));
1756 
1757 		/*
1758 		 * If we don't have enough space to put in another
1759 		 * match result, save our position and tell the
1760 		 * user there are more devices to check.
1761 		 */
1762 		if (spaceleft < sizeof(struct dev_match_result)) {
1763 			bzero(&cdm->pos, sizeof(cdm->pos));
1764 			cdm->pos.position_type =
1765 				CAM_DEV_POS_EDT | CAM_DEV_POS_BUS |
1766 				CAM_DEV_POS_TARGET | CAM_DEV_POS_DEVICE;
1767 
1768 			cdm->pos.cookie.bus = device->target->bus;
1769 			cdm->pos.generations[CAM_BUS_GENERATION]=
1770 				bus_generation;
1771 			cdm->pos.cookie.target = device->target;
1772 			cdm->pos.generations[CAM_TARGET_GENERATION] =
1773 				device->target->bus->generation;
1774 			cdm->pos.cookie.device = device;
1775 			cdm->pos.generations[CAM_DEV_GENERATION] =
1776 				device->target->generation;
1777 			cdm->status = CAM_DEV_MATCH_MORE;
1778 			return(0);
1779 		}
1780 		j = cdm->num_matches;
1781 		cdm->num_matches++;
1782 		cdm->matches[j].type = DEV_MATCH_DEVICE;
1783 		cdm->matches[j].result.device_result.path_id =
1784 			device->target->bus->path_id;
1785 		cdm->matches[j].result.device_result.target_id =
1786 			device->target->target_id;
1787 		cdm->matches[j].result.device_result.target_lun =
1788 			device->lun_id;
1789 		bcopy(&device->inq_data,
1790 		      &cdm->matches[j].result.device_result.inq_data,
1791 		      sizeof(struct scsi_inquiry_data));
1792 	}
1793 
1794 	/*
1795 	 * If the user isn't interested in peripherals, don't descend
1796 	 * the tree any further.
1797 	 */
1798 	if ((retval & DM_RET_ACTION_MASK) == DM_RET_STOP)
1799 		return(1);
1800 
1801 	/*
1802 	 * If there is a peripheral list generation recorded, make sure
1803 	 * it hasn't changed.
1804 	 */
1805 	if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
1806 	 && (device->target->bus == cdm->pos.cookie.bus)
1807 	 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
1808 	 && (device->target == cdm->pos.cookie.target)
1809 	 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
1810 	 && (device == cdm->pos.cookie.device)
1811 	 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
1812 	 && (cdm->pos.generations[CAM_PERIPH_GENERATION] != 0)
1813 	 && (cdm->pos.generations[CAM_PERIPH_GENERATION] !=
1814 	     device->generation)){
1815 		cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
1816 		return(0);
1817 	}
1818 
1819 	if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
1820 	 && (cdm->pos.cookie.bus == device->target->bus)
1821 	 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
1822 	 && (cdm->pos.cookie.target == device->target)
1823 	 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
1824 	 && (cdm->pos.cookie.device == device)
1825 	 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
1826 	 && (cdm->pos.cookie.periph != NULL))
1827 		return(xptperiphtraverse(device,
1828 				(struct cam_periph *)cdm->pos.cookie.periph,
1829 				xptedtperiphfunc, arg));
1830 	else
1831 		return(xptperiphtraverse(device, NULL, xptedtperiphfunc, arg));
1832 }
1833 
1834 static int
1835 xptedtperiphfunc(struct cam_periph *periph, void *arg)
1836 {
1837 	struct ccb_dev_match *cdm;
1838 	dev_match_ret retval;
1839 
1840 	cdm = (struct ccb_dev_match *)arg;
1841 
1842 	retval = xptperiphmatch(cdm->patterns, cdm->num_patterns, periph);
1843 
1844 	if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
1845 		cdm->status = CAM_DEV_MATCH_ERROR;
1846 		return(0);
1847 	}
1848 
1849 	/*
1850 	 * If the copy flag is set, copy this peripheral out.
1851 	 */
1852 	if (retval & DM_RET_COPY) {
1853 		int spaceleft, j;
1854 
1855 		spaceleft = cdm->match_buf_len - (cdm->num_matches *
1856 			sizeof(struct dev_match_result));
1857 
1858 		/*
1859 		 * If we don't have enough space to put in another
1860 		 * match result, save our position and tell the
1861 		 * user there are more devices to check.
1862 		 */
1863 		if (spaceleft < sizeof(struct dev_match_result)) {
1864 			bzero(&cdm->pos, sizeof(cdm->pos));
1865 			cdm->pos.position_type =
1866 				CAM_DEV_POS_EDT | CAM_DEV_POS_BUS |
1867 				CAM_DEV_POS_TARGET | CAM_DEV_POS_DEVICE |
1868 				CAM_DEV_POS_PERIPH;
1869 
1870 			cdm->pos.cookie.bus = periph->path->bus;
1871 			cdm->pos.generations[CAM_BUS_GENERATION]=
1872 				bus_generation;
1873 			cdm->pos.cookie.target = periph->path->target;
1874 			cdm->pos.generations[CAM_TARGET_GENERATION] =
1875 				periph->path->bus->generation;
1876 			cdm->pos.cookie.device = periph->path->device;
1877 			cdm->pos.generations[CAM_DEV_GENERATION] =
1878 				periph->path->target->generation;
1879 			cdm->pos.cookie.periph = periph;
1880 			cdm->pos.generations[CAM_PERIPH_GENERATION] =
1881 				periph->path->device->generation;
1882 			cdm->status = CAM_DEV_MATCH_MORE;
1883 			return(0);
1884 		}
1885 
1886 		j = cdm->num_matches;
1887 		cdm->num_matches++;
1888 		cdm->matches[j].type = DEV_MATCH_PERIPH;
1889 		cdm->matches[j].result.periph_result.path_id =
1890 			periph->path->bus->path_id;
1891 		cdm->matches[j].result.periph_result.target_id =
1892 			periph->path->target->target_id;
1893 		cdm->matches[j].result.periph_result.target_lun =
1894 			periph->path->device->lun_id;
1895 		cdm->matches[j].result.periph_result.unit_number =
1896 			periph->unit_number;
1897 		strncpy(cdm->matches[j].result.periph_result.periph_name,
1898 			periph->periph_name, DEV_IDLEN);
1899 	}
1900 
1901 	return(1);
1902 }
1903 
1904 static int
1905 xptedtmatch(struct ccb_dev_match *cdm)
1906 {
1907 	int ret;
1908 
1909 	cdm->num_matches = 0;
1910 
1911 	/*
1912 	 * Check the bus list generation.  If it has changed, the user
1913 	 * needs to reset everything and start over.
1914 	 */
1915 	if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
1916 	 && (cdm->pos.generations[CAM_BUS_GENERATION] != 0)
1917 	 && (cdm->pos.generations[CAM_BUS_GENERATION] != bus_generation)) {
1918 		cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
1919 		return(0);
1920 	}
1921 
1922 	if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
1923 	 && (cdm->pos.cookie.bus != NULL))
1924 		ret = xptbustraverse((struct cam_eb *)cdm->pos.cookie.bus,
1925 				     xptedtbusfunc, cdm);
1926 	else
1927 		ret = xptbustraverse(NULL, xptedtbusfunc, cdm);
1928 
1929 	/*
1930 	 * If we get back 0, that means that we had to stop before fully
1931 	 * traversing the EDT.  It also means that one of the subroutines
1932 	 * has set the status field to the proper value.  If we get back 1,
1933 	 * we've fully traversed the EDT and copied out any matching entries.
1934 	 */
1935 	if (ret == 1)
1936 		cdm->status = CAM_DEV_MATCH_LAST;
1937 
1938 	return(ret);
1939 }
1940 
1941 static int
1942 xptplistpdrvfunc(struct periph_driver **pdrv, void *arg)
1943 {
1944 	struct ccb_dev_match *cdm;
1945 
1946 	cdm = (struct ccb_dev_match *)arg;
1947 
1948 	if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
1949 	 && (cdm->pos.cookie.pdrv == pdrv)
1950 	 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
1951 	 && (cdm->pos.generations[CAM_PERIPH_GENERATION] != 0)
1952 	 && (cdm->pos.generations[CAM_PERIPH_GENERATION] !=
1953 	     (*pdrv)->generation)) {
1954 		cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
1955 		return(0);
1956 	}
1957 
1958 	if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
1959 	 && (cdm->pos.cookie.pdrv == pdrv)
1960 	 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
1961 	 && (cdm->pos.cookie.periph != NULL))
1962 		return(xptpdperiphtraverse(pdrv,
1963 				(struct cam_periph *)cdm->pos.cookie.periph,
1964 				xptplistperiphfunc, arg));
1965 	else
1966 		return(xptpdperiphtraverse(pdrv, NULL,xptplistperiphfunc, arg));
1967 }
1968 
1969 static int
1970 xptplistperiphfunc(struct cam_periph *periph, void *arg)
1971 {
1972 	struct ccb_dev_match *cdm;
1973 	dev_match_ret retval;
1974 
1975 	cdm = (struct ccb_dev_match *)arg;
1976 
1977 	retval = xptperiphmatch(cdm->patterns, cdm->num_patterns, periph);
1978 
1979 	if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
1980 		cdm->status = CAM_DEV_MATCH_ERROR;
1981 		return(0);
1982 	}
1983 
1984 	/*
1985 	 * If the copy flag is set, copy this peripheral out.
1986 	 */
1987 	if (retval & DM_RET_COPY) {
1988 		int spaceleft, j;
1989 
1990 		spaceleft = cdm->match_buf_len - (cdm->num_matches *
1991 			sizeof(struct dev_match_result));
1992 
1993 		/*
1994 		 * If we don't have enough space to put in another
1995 		 * match result, save our position and tell the
1996 		 * user there are more devices to check.
1997 		 */
1998 		if (spaceleft < sizeof(struct dev_match_result)) {
1999 			struct periph_driver **pdrv;
2000 
2001 			pdrv = NULL;
2002 			bzero(&cdm->pos, sizeof(cdm->pos));
2003 			cdm->pos.position_type =
2004 				CAM_DEV_POS_PDRV | CAM_DEV_POS_PDPTR |
2005 				CAM_DEV_POS_PERIPH;
2006 
2007 			/*
2008 			 * This may look a bit non-sensical, but it is
2009 			 * actually quite logical.  There are very few
2010 			 * peripheral drivers, and bloating every peripheral
2011 			 * structure with a pointer back to its parent
2012 			 * peripheral driver linker set entry would cost
2013 			 * more in the long run than doing this quick lookup.
2014 			 */
2015 			for (pdrv =
2016 			     (struct periph_driver **)periphdriver_set.ls_items;
2017 			     *pdrv != NULL; pdrv++) {
2018 				if (strcmp((*pdrv)->driver_name,
2019 				    periph->periph_name) == 0)
2020 					break;
2021 			}
2022 
2023 			if (pdrv == NULL) {
2024 				cdm->status = CAM_DEV_MATCH_ERROR;
2025 				return(0);
2026 			}
2027 
2028 			cdm->pos.cookie.pdrv = pdrv;
2029 			/*
2030 			 * The periph generation slot does double duty, as
2031 			 * does the periph pointer slot.  They are used for
2032 			 * both edt and pdrv lookups and positioning.
2033 			 */
2034 			cdm->pos.cookie.periph = periph;
2035 			cdm->pos.generations[CAM_PERIPH_GENERATION] =
2036 				(*pdrv)->generation;
2037 			cdm->status = CAM_DEV_MATCH_MORE;
2038 			return(0);
2039 		}
2040 
2041 		j = cdm->num_matches;
2042 		cdm->num_matches++;
2043 		cdm->matches[j].type = DEV_MATCH_PERIPH;
2044 		cdm->matches[j].result.periph_result.path_id =
2045 			periph->path->bus->path_id;
2046 
2047 		/*
2048 		 * The transport layer peripheral doesn't have a target or
2049 		 * lun.
2050 		 */
2051 		if (periph->path->target)
2052 			cdm->matches[j].result.periph_result.target_id =
2053 				periph->path->target->target_id;
2054 		else
2055 			cdm->matches[j].result.periph_result.target_id = -1;
2056 
2057 		if (periph->path->device)
2058 			cdm->matches[j].result.periph_result.target_lun =
2059 				periph->path->device->lun_id;
2060 		else
2061 			cdm->matches[j].result.periph_result.target_lun = -1;
2062 
2063 		cdm->matches[j].result.periph_result.unit_number =
2064 			periph->unit_number;
2065 		strncpy(cdm->matches[j].result.periph_result.periph_name,
2066 			periph->periph_name, DEV_IDLEN);
2067 	}
2068 
2069 	return(1);
2070 }
2071 
2072 static int
2073 xptperiphlistmatch(struct ccb_dev_match *cdm)
2074 {
2075 	int ret;
2076 
2077 	cdm->num_matches = 0;
2078 
2079 	/*
2080 	 * At this point in the edt traversal function, we check the bus
2081 	 * list generation to make sure that no busses have been added or
2082 	 * removed since the user last sent a XPT_DEV_MATCH ccb through.
2083 	 * For the peripheral driver list traversal function, however, we
2084 	 * don't have to worry about new peripheral driver types coming or
2085 	 * going; they're in a linker set, and therefore can't change
2086 	 * without a recompile.
2087 	 */
2088 
2089 	if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
2090 	 && (cdm->pos.cookie.pdrv != NULL))
2091 		ret = xptpdrvtraverse(
2092 			        (struct periph_driver **)cdm->pos.cookie.pdrv,
2093 				xptplistpdrvfunc, cdm);
2094 	else
2095 		ret = xptpdrvtraverse(NULL, xptplistpdrvfunc, cdm);
2096 
2097 	/*
2098 	 * If we get back 0, that means that we had to stop before fully
2099 	 * traversing the peripheral driver tree.  It also means that one of
2100 	 * the subroutines has set the status field to the proper value.  If
2101 	 * we get back 1, we've fully traversed the EDT and copied out any
2102 	 * matching entries.
2103 	 */
2104 	if (ret == 1)
2105 		cdm->status = CAM_DEV_MATCH_LAST;
2106 
2107 	return(ret);
2108 }
2109 
2110 static int
2111 xptbustraverse(struct cam_eb *start_bus, xpt_busfunc_t *tr_func, void *arg)
2112 {
2113 	struct cam_eb *bus, *next_bus;
2114 	int retval;
2115 
2116 	retval = 1;
2117 
2118 	for (bus = (start_bus ? start_bus : TAILQ_FIRST(&xpt_busses));
2119 	     bus != NULL;
2120 	     bus = next_bus) {
2121 		next_bus = TAILQ_NEXT(bus, links);
2122 
2123 		retval = tr_func(bus, arg);
2124 		if (retval == 0)
2125 			return(retval);
2126 	}
2127 
2128 	return(retval);
2129 }
2130 
2131 static int
2132 xpttargettraverse(struct cam_eb *bus, struct cam_et *start_target,
2133 		  xpt_targetfunc_t *tr_func, void *arg)
2134 {
2135 	struct cam_et *target, *next_target;
2136 	int retval;
2137 
2138 	retval = 1;
2139 	for (target = (start_target ? start_target :
2140 		       TAILQ_FIRST(&bus->et_entries));
2141 	     target != NULL; target = next_target) {
2142 
2143 		next_target = TAILQ_NEXT(target, links);
2144 
2145 		retval = tr_func(target, arg);
2146 
2147 		if (retval == 0)
2148 			return(retval);
2149 	}
2150 
2151 	return(retval);
2152 }
2153 
2154 static int
2155 xptdevicetraverse(struct cam_et *target, struct cam_ed *start_device,
2156 		  xpt_devicefunc_t *tr_func, void *arg)
2157 {
2158 	struct cam_ed *device, *next_device;
2159 	int retval;
2160 
2161 	retval = 1;
2162 	for (device = (start_device ? start_device :
2163 		       TAILQ_FIRST(&target->ed_entries));
2164 	     device != NULL;
2165 	     device = next_device) {
2166 
2167 		next_device = TAILQ_NEXT(device, links);
2168 
2169 		retval = tr_func(device, arg);
2170 
2171 		if (retval == 0)
2172 			return(retval);
2173 	}
2174 
2175 	return(retval);
2176 }
2177 
2178 static int
2179 xptperiphtraverse(struct cam_ed *device, struct cam_periph *start_periph,
2180 		  xpt_periphfunc_t *tr_func, void *arg)
2181 {
2182 	struct cam_periph *periph, *next_periph;
2183 	int retval;
2184 
2185 	retval = 1;
2186 
2187 	for (periph = (start_periph ? start_periph :
2188 		       SLIST_FIRST(&device->periphs));
2189 	     periph != NULL;
2190 	     periph = next_periph) {
2191 
2192 		next_periph = SLIST_NEXT(periph, periph_links);
2193 
2194 		retval = tr_func(periph, arg);
2195 		if (retval == 0)
2196 			return(retval);
2197 	}
2198 
2199 	return(retval);
2200 }
2201 
2202 static int
2203 xptpdrvtraverse(struct periph_driver **start_pdrv,
2204 		xpt_pdrvfunc_t *tr_func, void *arg)
2205 {
2206 	struct periph_driver **pdrv;
2207 	int retval;
2208 
2209 	retval = 1;
2210 
2211 	/*
2212 	 * We don't traverse the peripheral driver list like we do the
2213 	 * other lists, because it is a linker set, and therefore cannot be
2214 	 * changed during runtime.  If the peripheral driver list is ever
2215 	 * re-done to be something other than a linker set (i.e. it can
2216 	 * change while the system is running), the list traversal should
2217 	 * be modified to work like the other traversal functions.
2218 	 */
2219 	for (pdrv = (start_pdrv ? start_pdrv :
2220 	     (struct periph_driver **)periphdriver_set.ls_items);
2221 	     *pdrv != NULL; pdrv++) {
2222 		retval = tr_func(pdrv, arg);
2223 
2224 		if (retval == 0)
2225 			return(retval);
2226 	}
2227 
2228 	return(retval);
2229 }
2230 
2231 static int
2232 xptpdperiphtraverse(struct periph_driver **pdrv,
2233 		    struct cam_periph *start_periph,
2234 		    xpt_periphfunc_t *tr_func, void *arg)
2235 {
2236 	struct cam_periph *periph, *next_periph;
2237 	int retval;
2238 
2239 	retval = 1;
2240 
2241 	for (periph = (start_periph ? start_periph :
2242 	     TAILQ_FIRST(&(*pdrv)->units)); periph != NULL;
2243 	     periph = next_periph) {
2244 
2245 		next_periph = TAILQ_NEXT(periph, unit_links);
2246 
2247 		retval = tr_func(periph, arg);
2248 		if (retval == 0)
2249 			return(retval);
2250 	}
2251 	return(retval);
2252 }
2253 
2254 static int
2255 xptdefbusfunc(struct cam_eb *bus, void *arg)
2256 {
2257 	struct xpt_traverse_config *tr_config;
2258 
2259 	tr_config = (struct xpt_traverse_config *)arg;
2260 
2261 	if (tr_config->depth == XPT_DEPTH_BUS) {
2262 		xpt_busfunc_t *tr_func;
2263 
2264 		tr_func = (xpt_busfunc_t *)tr_config->tr_func;
2265 
2266 		return(tr_func(bus, tr_config->tr_arg));
2267 	} else
2268 		return(xpttargettraverse(bus, NULL, xptdeftargetfunc, arg));
2269 }
2270 
2271 static int
2272 xptdeftargetfunc(struct cam_et *target, void *arg)
2273 {
2274 	struct xpt_traverse_config *tr_config;
2275 
2276 	tr_config = (struct xpt_traverse_config *)arg;
2277 
2278 	if (tr_config->depth == XPT_DEPTH_TARGET) {
2279 		xpt_targetfunc_t *tr_func;
2280 
2281 		tr_func = (xpt_targetfunc_t *)tr_config->tr_func;
2282 
2283 		return(tr_func(target, tr_config->tr_arg));
2284 	} else
2285 		return(xptdevicetraverse(target, NULL, xptdefdevicefunc, arg));
2286 }
2287 
2288 static int
2289 xptdefdevicefunc(struct cam_ed *device, void *arg)
2290 {
2291 	struct xpt_traverse_config *tr_config;
2292 
2293 	tr_config = (struct xpt_traverse_config *)arg;
2294 
2295 	if (tr_config->depth == XPT_DEPTH_DEVICE) {
2296 		xpt_devicefunc_t *tr_func;
2297 
2298 		tr_func = (xpt_devicefunc_t *)tr_config->tr_func;
2299 
2300 		return(tr_func(device, tr_config->tr_arg));
2301 	} else
2302 		return(xptperiphtraverse(device, NULL, xptdefperiphfunc, arg));
2303 }
2304 
2305 static int
2306 xptdefperiphfunc(struct cam_periph *periph, void *arg)
2307 {
2308 	struct xpt_traverse_config *tr_config;
2309 	xpt_periphfunc_t *tr_func;
2310 
2311 	tr_config = (struct xpt_traverse_config *)arg;
2312 
2313 	tr_func = (xpt_periphfunc_t *)tr_config->tr_func;
2314 
2315 	/*
2316 	 * Unlike the other default functions, we don't check for depth
2317 	 * here.  The peripheral driver level is the last level in the EDT,
2318 	 * so if we're here, we should execute the function in question.
2319 	 */
2320 	return(tr_func(periph, tr_config->tr_arg));
2321 }
2322 
2323 /*
2324  * Execute the given function for every bus in the EDT.
2325  */
2326 static int
2327 xpt_for_all_busses(xpt_busfunc_t *tr_func, void *arg)
2328 {
2329 	struct xpt_traverse_config tr_config;
2330 
2331 	tr_config.depth = XPT_DEPTH_BUS;
2332 	tr_config.tr_func = tr_func;
2333 	tr_config.tr_arg = arg;
2334 
2335 	return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
2336 }
2337 
2338 /*
2339  * Execute the given function for every target in the EDT.
2340  */
2341 static int
2342 xpt_for_all_targets(xpt_targetfunc_t *tr_func, void *arg)
2343 {
2344 	struct xpt_traverse_config tr_config;
2345 
2346 	tr_config.depth = XPT_DEPTH_TARGET;
2347 	tr_config.tr_func = tr_func;
2348 	tr_config.tr_arg = arg;
2349 
2350 	return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
2351 }
2352 
2353 /*
2354  * Execute the given function for every device in the EDT.
2355  */
2356 static int
2357 xpt_for_all_devices(xpt_devicefunc_t *tr_func, void *arg)
2358 {
2359 	struct xpt_traverse_config tr_config;
2360 
2361 	tr_config.depth = XPT_DEPTH_DEVICE;
2362 	tr_config.tr_func = tr_func;
2363 	tr_config.tr_arg = arg;
2364 
2365 	return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
2366 }
2367 
2368 /*
2369  * Execute the given function for every peripheral in the EDT.
2370  */
2371 static int
2372 xpt_for_all_periphs(xpt_periphfunc_t *tr_func, void *arg)
2373 {
2374 	struct xpt_traverse_config tr_config;
2375 
2376 	tr_config.depth = XPT_DEPTH_PERIPH;
2377 	tr_config.tr_func = tr_func;
2378 	tr_config.tr_arg = arg;
2379 
2380 	return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
2381 }
2382 
2383 static int
2384 xptsetasyncfunc(struct cam_ed *device, void *arg)
2385 {
2386 	struct cam_path path;
2387 	struct ccb_getdev cgd;
2388 	struct async_node *cur_entry;
2389 
2390 	cur_entry = (struct async_node *)arg;
2391 
2392 	xpt_compile_path(&path,
2393 			 NULL,
2394 			 device->target->bus->path_id,
2395 			 device->target->target_id,
2396 			 device->lun_id);
2397 	xpt_setup_ccb(&cgd.ccb_h, &path, /*priority*/1);
2398 	cgd.ccb_h.func_code = XPT_GDEV_TYPE;
2399 	xpt_action((union ccb *)&cgd);
2400 	cur_entry->callback(cur_entry->callback_arg,
2401 			    AC_FOUND_DEVICE,
2402 			    &path, &cgd);
2403 	xpt_release_path(&path);
2404 
2405 	return(1);
2406 }
2407 static int
2408 xptsetasyncbusfunc(struct cam_eb *bus, void *arg)
2409 {
2410 	struct cam_path path;
2411 	struct ccb_pathinq cpi;
2412 	struct async_node *cur_entry;
2413 
2414 	cur_entry = (struct async_node *)arg;
2415 
2416 	xpt_compile_path(&path, /*periph*/NULL,
2417 			 bus->sim->path_id,
2418 			 CAM_TARGET_WILDCARD,
2419 			 CAM_LUN_WILDCARD);
2420 	xpt_setup_ccb(&cpi.ccb_h, &path, /*priority*/1);
2421 	cpi.ccb_h.func_code = XPT_PATH_INQ;
2422 	xpt_action((union ccb *)&cpi);
2423 	cur_entry->callback(cur_entry->callback_arg,
2424 			    AC_PATH_REGISTERED,
2425 			    &path, &cpi);
2426 	xpt_release_path(&path);
2427 
2428 	return(1);
2429 }
2430 
2431 void
2432 xpt_action(union ccb *start_ccb)
2433 {
2434 	CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xpt_action\n"));
2435 
2436 	start_ccb->ccb_h.status = CAM_REQ_INPROG;
2437 
2438 	switch (start_ccb->ccb_h.func_code) {
2439 	case XPT_SCSI_IO:
2440 		/*
2441 		 * For the sake of compatibility with SCSI-1
2442 		 * devices that may not understand the identify
2443 		 * message, we include lun information in the
2444 		 * second byte of all commands.  SCSI-1 specifies
2445 		 * that luns are a 3 bit value and reserves only 3
2446 		 * bits for lun information in the CDB.  Later
2447 		 * revisions of the SCSI spec allow for more than 8
2448 		 * luns, but have deprecated lun information in the
2449 		 * CDB.  So, if the lun won't fit, we must omit.
2450 		 *
2451 		 * Also be aware that during initial probing for devices,
2452 		 * the inquiry information is unknown but initialized to 0.
2453 		 * This means that this code will be exercised while probing
2454 		 * devices with an ANSI revision greater than 2.
2455 		 */
2456 		if (SID_ANSI_REV(&start_ccb->ccb_h.path->device->inq_data) <= 2
2457 		 && start_ccb->ccb_h.target_lun < 8
2458 		 && (start_ccb->ccb_h.flags & CAM_CDB_POINTER) == 0) {
2459 
2460 			start_ccb->csio.cdb_io.cdb_bytes[1] |=
2461 			    start_ccb->ccb_h.target_lun << 5;
2462 		}
2463 		start_ccb->csio.scsi_status = SCSI_STATUS_OK;
2464 		start_ccb->csio.sense_resid = 0;
2465 		start_ccb->csio.resid = 0;
2466 		/* FALLTRHOUGH */
2467 	case XPT_TARGET_IO:
2468 	case XPT_CONT_TARGET_IO:
2469 	case XPT_ENG_EXEC:
2470 	{
2471 		struct cam_path *path;
2472 		int s;
2473 		int runq;
2474 
2475 		path = start_ccb->ccb_h.path;
2476 		s = splsoftcam();
2477 
2478 		cam_ccbq_insert_ccb(&path->device->ccbq, start_ccb);
2479 		if (path->device->qfrozen_cnt == 0)
2480 			runq = xpt_schedule_dev_sendq(path->bus, path->device);
2481 		else
2482 			runq = 0;
2483 		splx(s);
2484 		if (runq != 0)
2485 			xpt_run_dev_sendq(path->bus);
2486 		break;
2487 	}
2488 	case XPT_SET_TRAN_SETTINGS:
2489 	{
2490 		xpt_set_transfer_settings(&start_ccb->cts,
2491 					  /*async_update*/FALSE);
2492 		break;
2493 	}
2494 	case XPT_CALC_GEOMETRY:
2495 		/* Filter out garbage */
2496 		if (start_ccb->ccg.block_size == 0
2497 		 || start_ccb->ccg.volume_size == 0) {
2498 			start_ccb->ccg.cylinders = 0;
2499 			start_ccb->ccg.heads = 0;
2500 			start_ccb->ccg.secs_per_track = 0;
2501 			start_ccb->ccb_h.status = CAM_REQ_CMP;
2502 			break;
2503 		}
2504 #ifdef PC98
2505 		/*
2506 		 * In a PC-98 system, geometry translation depens on
2507 		 * the "real" device geometry obtained from mode page 4.
2508 		 * SCSI geometry translation is performed in the
2509 		 * initialization routine of the SCSI BIOS and the result
2510 		 * stored in host memory.  If the translation is available
2511 		 * in host memory, use it.  If not, rely on the default
2512 		 * translation the device driver performs.
2513 		 */
2514 		if (scsi_da_bios_params(&start_ccb->ccg) != 0) {
2515 			start_ccb->ccb_h.status = CAM_REQ_CMP;
2516 			break;
2517 		}
2518 		/* FALLTHROUGH */
2519 #endif
2520 	case XPT_ACCEPT_TARGET_IO:
2521 	case XPT_EN_LUN:
2522 	case XPT_IMMED_NOTIFY:
2523 	case XPT_NOTIFY_ACK:
2524 	case XPT_GET_TRAN_SETTINGS:
2525 	case XPT_PATH_INQ:
2526 	case XPT_RESET_BUS:
2527 	{
2528 		struct cam_sim *sim;
2529 
2530 		sim = start_ccb->ccb_h.path->bus->sim;
2531 		(*(sim->sim_action))(sim, start_ccb);
2532 		break;
2533 	}
2534 	case XPT_GDEV_TYPE:
2535 		if ((start_ccb->ccb_h.path->device->flags & CAM_DEV_UNCONFIGURED) != 0) {
2536 			start_ccb->ccb_h.status = CAM_DEV_NOT_THERE;
2537 		} else {
2538 			struct ccb_getdev *cgd;
2539 			struct cam_et *tar;
2540 			struct cam_ed *dev;
2541 			int s;
2542 
2543 			s = splsoftcam();
2544 			cgd = &start_ccb->cgd;
2545 			tar = cgd->ccb_h.path->target;
2546 			dev = cgd->ccb_h.path->device;
2547 			cgd->inq_data = dev->inq_data;
2548 			cgd->pd_type = SID_TYPE(&dev->inq_data);
2549 			cgd->dev_openings = dev->ccbq.dev_openings;
2550 			cgd->dev_active = dev->ccbq.dev_active;
2551 			cgd->devq_openings = dev->ccbq.devq_openings;
2552 			cgd->devq_queued = dev->ccbq.queue.entries;
2553 			cgd->held = dev->ccbq.held;
2554 			cgd->maxtags = dev->quirk->maxtags;
2555 			cgd->mintags = dev->quirk->mintags;
2556 			cgd->ccb_h.status = CAM_REQ_CMP;
2557 			cgd->serial_num_len = dev->serial_num_len;
2558 			if ((dev->serial_num_len > 0)
2559 			 && (dev->serial_num != NULL))
2560 				bcopy(dev->serial_num, cgd->serial_num,
2561 				      dev->serial_num_len);
2562 			splx(s);
2563 		}
2564 		break;
2565 	case XPT_GDEVLIST:
2566 	{
2567 		struct cam_periph	*nperiph;
2568 		struct periph_list	*periph_head;
2569 		struct ccb_getdevlist	*cgdl;
2570 		int			i;
2571 		int			s;
2572 		struct cam_ed		*device;
2573 		int			found;
2574 
2575 
2576 		found = 0;
2577 
2578 		/*
2579 		 * Don't want anyone mucking with our data.
2580 		 */
2581 		s = splsoftcam();
2582 		device = start_ccb->ccb_h.path->device;
2583 		periph_head = &device->periphs;
2584 		cgdl = &start_ccb->cgdl;
2585 
2586 		/*
2587 		 * Check and see if the list has changed since the user
2588 		 * last requested a list member.  If so, tell them that the
2589 		 * list has changed, and therefore they need to start over
2590 		 * from the beginning.
2591 		 */
2592 		if ((cgdl->index != 0) &&
2593 		    (cgdl->generation != device->generation)) {
2594 			cgdl->status = CAM_GDEVLIST_LIST_CHANGED;
2595 			splx(s);
2596 			break;
2597 		}
2598 
2599 		/*
2600 		 * Traverse the list of peripherals and attempt to find
2601 		 * the requested peripheral.
2602 		 */
2603 		for (nperiph = periph_head->slh_first, i = 0;
2604 		     (nperiph != NULL) && (i <= cgdl->index);
2605 		     nperiph = nperiph->periph_links.sle_next, i++) {
2606 			if (i == cgdl->index) {
2607 				strncpy(cgdl->periph_name,
2608 					nperiph->periph_name,
2609 					DEV_IDLEN);
2610 				cgdl->unit_number = nperiph->unit_number;
2611 				found = 1;
2612 			}
2613 		}
2614 		if (found == 0) {
2615 			cgdl->status = CAM_GDEVLIST_ERROR;
2616 			splx(s);
2617 			break;
2618 		}
2619 
2620 		if (nperiph == NULL)
2621 			cgdl->status = CAM_GDEVLIST_LAST_DEVICE;
2622 		else
2623 			cgdl->status = CAM_GDEVLIST_MORE_DEVS;
2624 
2625 		cgdl->index++;
2626 		cgdl->generation = device->generation;
2627 
2628 		splx(s);
2629 		cgdl->ccb_h.status = CAM_REQ_CMP;
2630 		break;
2631 	}
2632 	case XPT_DEV_MATCH:
2633 	{
2634 		int s;
2635 		dev_pos_type position_type;
2636 		struct ccb_dev_match *cdm;
2637 		int ret;
2638 
2639 		cdm = &start_ccb->cdm;
2640 
2641 		/*
2642 		 * Prevent EDT changes while we traverse it.
2643 		 */
2644 		s = splsoftcam();
2645 		/*
2646 		 * There are two ways of getting at information in the EDT.
2647 		 * The first way is via the primary EDT tree.  It starts
2648 		 * with a list of busses, then a list of targets on a bus,
2649 		 * then devices/luns on a target, and then peripherals on a
2650 		 * device/lun.  The "other" way is by the peripheral driver
2651 		 * lists.  The peripheral driver lists are organized by
2652 		 * peripheral driver.  (obviously)  So it makes sense to
2653 		 * use the peripheral driver list if the user is looking
2654 		 * for something like "da1", or all "da" devices.  If the
2655 		 * user is looking for something on a particular bus/target
2656 		 * or lun, it's generally better to go through the EDT tree.
2657 		 */
2658 
2659 		if (cdm->pos.position_type != CAM_DEV_POS_NONE)
2660 			position_type = cdm->pos.position_type;
2661 		else {
2662 			int i;
2663 
2664 			position_type = CAM_DEV_POS_NONE;
2665 
2666 			for (i = 0; i < cdm->num_patterns; i++) {
2667 				if ((cdm->patterns[i].type == DEV_MATCH_BUS)
2668 				 ||(cdm->patterns[i].type == DEV_MATCH_DEVICE)){
2669 					position_type = CAM_DEV_POS_EDT;
2670 					break;
2671 				}
2672 			}
2673 
2674 			if (cdm->num_patterns == 0)
2675 				position_type = CAM_DEV_POS_EDT;
2676 			else if (position_type == CAM_DEV_POS_NONE)
2677 				position_type = CAM_DEV_POS_PDRV;
2678 		}
2679 
2680 		switch(position_type & CAM_DEV_POS_TYPEMASK) {
2681 		case CAM_DEV_POS_EDT:
2682 			ret = xptedtmatch(cdm);
2683 			break;
2684 		case CAM_DEV_POS_PDRV:
2685 			ret = xptperiphlistmatch(cdm);
2686 			break;
2687 		default:
2688 			cdm->status = CAM_DEV_MATCH_ERROR;
2689 			break;
2690 		}
2691 
2692 		splx(s);
2693 
2694 		if (cdm->status == CAM_DEV_MATCH_ERROR)
2695 			start_ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2696 		else
2697 			start_ccb->ccb_h.status = CAM_REQ_CMP;
2698 
2699 		break;
2700 	}
2701 	case XPT_SASYNC_CB:
2702 	{
2703 		/*
2704 		 * First off, determine the list we want to
2705 		 * be insterted into.
2706 		 */
2707 		struct ccb_setasync *csa;
2708 		struct async_node *cur_entry;
2709 		struct async_list *async_head;
2710 		u_int32_t added;
2711 		int s;
2712 
2713 		csa = &start_ccb->csa;
2714 		added = csa->event_enable;
2715 		if (csa->ccb_h.path->device != NULL) {
2716 			async_head = &csa->ccb_h.path->device->asyncs;
2717 		} else {
2718 			async_head = &csa->ccb_h.path->bus->asyncs;
2719 		}
2720 
2721 		/*
2722 		 * If there is already an entry for us, simply
2723 		 * update it.
2724 		 */
2725 		s = splsoftcam();
2726 		cur_entry = SLIST_FIRST(async_head);
2727 		while (cur_entry != NULL) {
2728 			if ((cur_entry->callback_arg == csa->callback_arg)
2729 			 && (cur_entry->callback == csa->callback))
2730 				break;
2731 			cur_entry = SLIST_NEXT(cur_entry, links);
2732 		}
2733 
2734 		if (cur_entry != NULL) {
2735 		 	/*
2736 			 * If the request has no flags set,
2737 			 * remove the entry.
2738 			 */
2739 			added &= ~cur_entry->event_enable;
2740 			if (csa->event_enable == 0) {
2741 				SLIST_REMOVE(async_head, cur_entry,
2742 					     async_node, links);
2743 				free(cur_entry, M_DEVBUF);
2744 			} else {
2745 				cur_entry->event_enable = csa->event_enable;
2746 			}
2747 		} else {
2748 			cur_entry = malloc(sizeof(*cur_entry), M_DEVBUF,
2749 					   M_NOWAIT);
2750 			if (cur_entry == NULL) {
2751 				splx(s);
2752 				csa->ccb_h.status = CAM_RESRC_UNAVAIL;
2753 				break;
2754 			}
2755 			cur_entry->callback_arg = csa->callback_arg;
2756 			cur_entry->callback = csa->callback;
2757 			cur_entry->event_enable = csa->event_enable;
2758 			SLIST_INSERT_HEAD(async_head, cur_entry, links);
2759 		}
2760 
2761 		if ((added & AC_FOUND_DEVICE) != 0) {
2762 			/*
2763 			 * Get this peripheral up to date with all
2764 			 * the currently existing devices.
2765 			 */
2766 			xpt_for_all_devices(xptsetasyncfunc, cur_entry);
2767 		}
2768 		if ((added & AC_PATH_REGISTERED) != 0) {
2769 			/*
2770 			 * Get this peripheral up to date with all
2771 			 * the currently existing busses.
2772 			 */
2773 			xpt_for_all_busses(xptsetasyncbusfunc, cur_entry);
2774 		}
2775 		splx(s);
2776 		start_ccb->ccb_h.status = CAM_REQ_CMP;
2777 		break;
2778 	}
2779 	case XPT_REL_SIMQ:
2780 	{
2781 		struct ccb_relsim *crs;
2782 		struct cam_ed *dev;
2783 		int s;
2784 
2785 		crs = &start_ccb->crs;
2786 		dev = crs->ccb_h.path->device;
2787 		if (dev == NULL) {
2788 
2789 			crs->ccb_h.status = CAM_DEV_NOT_THERE;
2790 			break;
2791 		}
2792 
2793 		s = splcam();
2794 
2795 		if ((crs->release_flags & RELSIM_ADJUST_OPENINGS) != 0) {
2796 
2797  			if ((dev->inq_data.flags & SID_CmdQue) != 0) {
2798 				int reduction;
2799 
2800 				/* Don't ever go below one opening */
2801 				if (crs->openings > 0) {
2802 					xpt_dev_ccbq_resize(crs->ccb_h.path,
2803 							    crs->openings);
2804 
2805 					if (bootverbose || 1) {
2806 						xpt_print_path(crs->ccb_h.path);
2807 						printf("tagged openings "
2808 						       "now %d\n",
2809 						       crs->openings);
2810 					}
2811 				}
2812 			}
2813 		}
2814 
2815 		if ((crs->release_flags & RELSIM_RELEASE_AFTER_TIMEOUT) != 0) {
2816 
2817 			if ((dev->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) {
2818 
2819 				/*
2820 				 * Just extend the old timeout and decrement
2821 				 * the freeze count so that a single timeout
2822 				 * is sufficient for releasing the queue.
2823 				 */
2824 				start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
2825 				untimeout(xpt_release_devq_timeout,
2826 					  dev, dev->c_handle);
2827 			} else {
2828 
2829 				start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
2830 			}
2831 
2832 			dev->c_handle =
2833 				timeout(xpt_release_devq_timeout,
2834 					dev,
2835 					(crs->release_timeout * hz) / 1000);
2836 
2837 			dev->flags |= CAM_DEV_REL_TIMEOUT_PENDING;
2838 
2839 		}
2840 
2841 		if ((crs->release_flags & RELSIM_RELEASE_AFTER_CMDCMPLT) != 0) {
2842 
2843 			if ((dev->flags & CAM_DEV_REL_ON_COMPLETE) != 0) {
2844 				/*
2845 				 * Decrement the freeze count so that a single
2846 				 * completion is still sufficient to unfreeze
2847 				 * the queue.
2848 				 */
2849 				start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
2850 			} else {
2851 
2852 				dev->flags |= CAM_DEV_REL_ON_COMPLETE;
2853 				start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
2854 			}
2855 		}
2856 
2857 		if ((crs->release_flags & RELSIM_RELEASE_AFTER_QEMPTY) != 0) {
2858 
2859 			if ((dev->flags & CAM_DEV_REL_ON_QUEUE_EMPTY) != 0
2860 			 || (dev->ccbq.dev_active == 0)) {
2861 
2862 				start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
2863 			} else {
2864 
2865 				dev->flags |= CAM_DEV_REL_ON_QUEUE_EMPTY;
2866 				start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
2867 			}
2868 		}
2869 		splx(s);
2870 
2871 		if ((start_ccb->ccb_h.flags & CAM_DEV_QFREEZE) == 0) {
2872 
2873 			xpt_release_devq(crs->ccb_h.path->device,
2874 					 /*run_queue*/TRUE);
2875 		}
2876 		start_ccb->crs.qfrozen_cnt = dev->qfrozen_cnt;
2877 		start_ccb->ccb_h.status = CAM_REQ_CMP;
2878 		break;
2879 	}
2880 	case XPT_SCAN_BUS:
2881 		xpt_scan_bus(start_ccb->ccb_h.path->periph, start_ccb);
2882 		break;
2883 	case XPT_SCAN_LUN:
2884 		xpt_scan_lun(start_ccb->ccb_h.path->periph,
2885 			     start_ccb->ccb_h.path, start_ccb->crcn.flags,
2886 			     start_ccb);
2887 		break;
2888 	case XPT_DEBUG: {
2889 #ifdef CAMDEBUG
2890 		int s;
2891 
2892 		s = splcam();
2893 		cam_dflags = start_ccb->cdbg.flags;
2894 		if (cam_dpath != NULL) {
2895 			xpt_free_path(cam_dpath);
2896 			cam_dpath = NULL;
2897 		}
2898 
2899 		if (cam_dflags != CAM_DEBUG_NONE) {
2900 			if (xpt_create_path(&cam_dpath, xpt_periph,
2901 					    start_ccb->ccb_h.path_id,
2902 					    start_ccb->ccb_h.target_id,
2903 					    start_ccb->ccb_h.target_lun) !=
2904 					    CAM_REQ_CMP) {
2905 				start_ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
2906 				cam_dflags = CAM_DEBUG_NONE;
2907 			} else
2908 				start_ccb->ccb_h.status = CAM_REQ_CMP;
2909 		} else {
2910 			cam_dpath = NULL;
2911 			start_ccb->ccb_h.status = CAM_REQ_CMP;
2912 		}
2913 		splx(s);
2914 #else /* !CAMDEBUG */
2915 		start_ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
2916 #endif /* CAMDEBUG */
2917 		break;
2918 	}
2919 	case XPT_NOOP:
2920 		start_ccb->ccb_h.status = CAM_REQ_CMP;
2921 		break;
2922 	default:
2923 	case XPT_SDEV_TYPE:
2924 	case XPT_ABORT:
2925 	case XPT_RESET_DEV:
2926 	case XPT_TERM_IO:
2927 	case XPT_ENG_INQ:
2928 		/* XXX Implement */
2929 		start_ccb->ccb_h.status = CAM_PROVIDE_FAIL;
2930 		break;
2931 	}
2932 }
2933 
2934 void
2935 xpt_polled_action(union ccb *start_ccb)
2936 {
2937 	int	  s;
2938 	u_int32_t timeout;
2939 	struct	  cam_sim *sim;
2940 	struct	  cam_devq *devq;
2941 	struct	  cam_ed *dev;
2942 
2943 	timeout = start_ccb->ccb_h.timeout;
2944 	sim = start_ccb->ccb_h.path->bus->sim;
2945 	devq = sim->devq;
2946 	dev = start_ccb->ccb_h.path->device;
2947 
2948 	s = splcam();
2949 
2950 	/*
2951 	 * Steal an opening so that no other queued requests
2952 	 * can get it before us while we simulate interrupts.
2953 	 */
2954 	dev->ccbq.devq_openings--;
2955 	dev->ccbq.dev_openings--;
2956 
2957 	while((devq->send_openings <= 0 || dev->ccbq.dev_openings < 0)
2958 	   && (--timeout > 0)) {
2959 		DELAY(1000);
2960 		(*(sim->sim_poll))(sim);
2961 		swi_camnet();
2962 		swi_cambio();
2963 	}
2964 
2965 	dev->ccbq.devq_openings++;
2966 	dev->ccbq.dev_openings++;
2967 
2968 	if (timeout != 0) {
2969 		xpt_action(start_ccb);
2970 		while(--timeout > 0) {
2971 			(*(sim->sim_poll))(sim);
2972 			swi_camnet();
2973 			swi_cambio();
2974 			if ((start_ccb->ccb_h.status  & CAM_STATUS_MASK)
2975 			    != CAM_REQ_INPROG)
2976 				break;
2977 			DELAY(1000);
2978 		}
2979 		if (timeout == 0) {
2980 			/*
2981 			 * XXX Is it worth adding a sim_timeout entry
2982 			 * point so we can attempt recovery?  If
2983 			 * this is only used for dumps, I don't think
2984 			 * it is.
2985 			 */
2986 			start_ccb->ccb_h.status = CAM_CMD_TIMEOUT;
2987 		}
2988 	} else {
2989 		start_ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
2990 	}
2991 	splx(s);
2992 }
2993 
2994 /*
2995  * Schedule a peripheral driver to receive a ccb when it's
2996  * target device has space for more transactions.
2997  */
2998 void
2999 xpt_schedule(struct cam_periph *perph, u_int32_t new_priority)
3000 {
3001 	struct cam_ed *device;
3002 	int s;
3003 	int runq;
3004 
3005 	CAM_DEBUG(perph->path, CAM_DEBUG_TRACE, ("xpt_schedule\n"));
3006 	device = perph->path->device;
3007 	s = splsoftcam();
3008 	if (periph_is_queued(perph)) {
3009 		/* Simply reorder based on new priority */
3010 		CAM_DEBUG(perph->path, CAM_DEBUG_SUBTRACE,
3011 			  ("   change priority to %d\n", new_priority));
3012 		if (new_priority < perph->pinfo.priority) {
3013 			camq_change_priority(&device->drvq,
3014 					     perph->pinfo.index,
3015 					     new_priority);
3016 		}
3017 		runq = 0;
3018 	} else {
3019 		/* New entry on the queue */
3020 		CAM_DEBUG(perph->path, CAM_DEBUG_SUBTRACE,
3021 			  ("   added periph to queue\n"));
3022 		if (device->drvq.generation++ == 0) {
3023 			/* Generation wrap, regen all entries */
3024 			camq_regen(&device->drvq);
3025 		}
3026 		perph->pinfo.priority = new_priority;
3027 		perph->pinfo.generation = device->drvq.generation;
3028 		camq_insert(&device->drvq, &perph->pinfo);
3029 		runq = xpt_schedule_dev_allocq(perph->path->bus, device);
3030 	}
3031 	splx(s);
3032 	if (runq != 0) {
3033 		CAM_DEBUG(perph->path, CAM_DEBUG_SUBTRACE,
3034 			  ("   calling xpt_run_devq\n"));
3035 		xpt_run_dev_allocq(perph->path->bus);
3036 	}
3037 }
3038 
3039 
3040 /*
3041  * Schedule a device to run on a given queue.
3042  * If the device was inserted as a new entry on the queue,
3043  * return 1 meaning the device queue should be run. If we
3044  * were already queued, implying someone else has already
3045  * started the queue, return 0 so the caller doesn't attempt
3046  * to run the queue.  Must be run at either splsoftcam
3047  * (or splcam since that encompases splsoftcam).
3048  */
3049 static int
3050 xpt_schedule_dev(struct camq *queue, cam_pinfo *pinfo,
3051 		 u_int32_t new_priority)
3052 {
3053 	int retval;
3054 	u_int32_t old_priority;
3055 
3056 	CAM_DEBUG_PRINT(CAM_DEBUG_TRACE, ("xpt_schedule_dev\n"));
3057 
3058 	old_priority = pinfo->priority;
3059 
3060 	/*
3061 	 * Are we already queued?
3062 	 */
3063 	if (pinfo->index != CAM_UNQUEUED_INDEX) {
3064 		/* Simply reorder based on new priority */
3065 		if (new_priority < old_priority) {
3066 			camq_change_priority(queue, pinfo->index,
3067 					     new_priority);
3068 			CAM_DEBUG_PRINT(CAM_DEBUG_SUBTRACE,
3069 					("changed priority to %d\n",
3070 					 new_priority));
3071 		}
3072 		retval = 0;
3073 	} else {
3074 		/* New entry on the queue */
3075 		if (new_priority < old_priority)
3076 			pinfo->priority = new_priority;
3077 
3078 		CAM_DEBUG_PRINT(CAM_DEBUG_SUBTRACE,
3079 				("Inserting onto queue\n"));
3080 		if (queue->generation++ == 0) {
3081 			/* Generation wrap, regen all entries */
3082 			camq_regen(queue);
3083 		}
3084 		pinfo->generation = queue->generation;
3085 		camq_insert(queue, pinfo);
3086 		retval = 1;
3087 	}
3088 	return (retval);
3089 }
3090 
3091 static void
3092 xpt_run_dev_allocq(struct cam_eb *bus)
3093 {
3094 	struct	cam_devq *devq;
3095 	int	s;
3096 
3097 	CAM_DEBUG_PRINT(CAM_DEBUG_TRACE, ("xpt_run_dev_allocq\n"));
3098 	devq = bus->sim->devq;
3099 
3100 	CAM_DEBUG_PRINT(CAM_DEBUG_SUBTRACE,
3101 			("   qfrozen_cnt == 0x%x, entries == %d, "
3102 			 "openings == %d, active == %d\n",
3103 			 devq->alloc_queue.qfrozen_cnt,
3104 			 devq->alloc_queue.entries,
3105 			 devq->alloc_openings,
3106 			 devq->alloc_active));
3107 
3108 	s = splsoftcam();
3109 	devq->alloc_queue.qfrozen_cnt++;
3110 	while ((devq->alloc_queue.entries > 0)
3111 	    && (devq->alloc_openings > 0)
3112 	    && (devq->alloc_queue.qfrozen_cnt <= 1)) {
3113 		struct	cam_ed_qinfo *qinfo;
3114 		struct	cam_ed *device;
3115 		union	ccb *work_ccb;
3116 		struct	cam_periph *drv;
3117 		struct	camq *drvq;
3118 
3119 		qinfo = (struct cam_ed_qinfo *)camq_remove(&devq->alloc_queue,
3120 							   /*position*/0);
3121 		device = qinfo->device;
3122 
3123 		CAM_DEBUG_PRINT(CAM_DEBUG_SUBTRACE,
3124 				("running device 0x%x\n", device));
3125 
3126 		drvq = &device->drvq;
3127 
3128 #ifdef CAMDEBUG
3129 		if (drvq->entries <= 0) {
3130 			panic("xpt_run_dev_allocq: "
3131 			      "Device on queue without any work to do");
3132 		}
3133 #endif
3134 		if ((work_ccb = xpt_get_ccb(device)) != NULL) {
3135 			devq->alloc_openings--;
3136 			devq->alloc_active++;
3137 			drv = (struct cam_periph*)camq_remove(drvq,
3138 							      /*pos*/0);
3139 			/* Update priority */
3140 			if (drvq->entries > 0) {
3141 				qinfo->pinfo.priority = drvq->queue_array[0]->priority;
3142 			} else {
3143 				qinfo->pinfo.priority = CAM_PRIORITY_NONE;
3144 			}
3145 			splx(s);
3146 			xpt_setup_ccb(&work_ccb->ccb_h, drv->path,
3147 				      drv->pinfo.priority);
3148 			CAM_DEBUG_PRINT(CAM_DEBUG_SUBTRACE,
3149 					("calling periph start\n"));
3150 			drv->periph_start(drv, work_ccb);
3151 		} else {
3152 			/*
3153 			 * Malloc failure in alloc_ccb
3154 			 */
3155 			/*
3156 			 * XXX add us to a list to be run from free_ccb
3157 			 * if we don't have any ccbs active on this
3158 			 * device queue otherwise we may never get run
3159 			 * again.
3160 			 */
3161 			break;
3162 		}
3163 
3164 		/* Raise IPL for possible insertion and test at top of loop */
3165 		s = splsoftcam();
3166 
3167 		if (drvq->entries > 0) {
3168 			/* We have more work.  Attempt to reschedule */
3169 			xpt_schedule_dev_allocq(bus, device);
3170 		}
3171 	}
3172 	devq->alloc_queue.qfrozen_cnt--;
3173 	splx(s);
3174 }
3175 
3176 static void
3177 xpt_run_dev_sendq(struct cam_eb *bus)
3178 {
3179 	struct	cam_devq *devq;
3180 	int	s;
3181 
3182 	CAM_DEBUG_PRINT(CAM_DEBUG_TRACE, ("xpt_run_dev_sendq\n"));
3183 
3184 	devq = bus->sim->devq;
3185 
3186 	s = splcam();
3187 	devq->send_queue.qfrozen_cnt++;
3188 	splx(s);
3189 	s = splsoftcam();
3190 	while ((devq->send_queue.entries > 0)
3191 	    && (devq->send_openings > 0)) {
3192 		struct	cam_ed_qinfo *qinfo;
3193 		struct	cam_ed *device;
3194 		union ccb *work_ccb;
3195 		struct	cam_sim *sim;
3196 		int	ospl;
3197 
3198 		ospl = splcam();
3199 	    	if (devq->send_queue.qfrozen_cnt > 1) {
3200 			splx(ospl);
3201 			break;
3202 		}
3203 
3204 		qinfo = (struct cam_ed_qinfo *)camq_remove(&devq->send_queue,
3205 							   /*position*/0);
3206 		device = qinfo->device;
3207 
3208 		/*
3209 		 * If the device has been "frozen", don't attempt
3210 		 * to run it.
3211 		 */
3212 		if (device->qfrozen_cnt > 0) {
3213 			splx(ospl);
3214 			continue;
3215 		}
3216 
3217 		CAM_DEBUG_PRINT(CAM_DEBUG_SUBTRACE,
3218 				("running device 0x%x\n", device));
3219 
3220 		work_ccb = cam_ccbq_peek_ccb(&device->ccbq, 0);
3221 		if (work_ccb == NULL) {
3222 			printf("device on run queue with no ccbs???");
3223 			splx(ospl);
3224 			continue;
3225 		}
3226 
3227 		if ((work_ccb->ccb_h.flags & CAM_HIGH_POWER) != 0) {
3228 
3229 		 	if (num_highpower <= 0) {
3230 				/*
3231 				 * We got a high power command, but we
3232 				 * don't have any available slots.  Freeze
3233 				 * the device queue until we have a slot
3234 				 * available.
3235 				 */
3236 				device->qfrozen_cnt++;
3237 				STAILQ_INSERT_TAIL(&highpowerq,
3238 						   &work_ccb->ccb_h,
3239 						   xpt_links.stqe);
3240 
3241 				splx(ospl);
3242 				continue;
3243 			} else {
3244 				/*
3245 				 * Consume a high power slot while
3246 				 * this ccb runs.
3247 				 */
3248 				num_highpower--;
3249 			}
3250 		}
3251 		devq->active_dev = device;
3252 		cam_ccbq_remove_ccb(&device->ccbq, work_ccb);
3253 
3254 		cam_ccbq_send_ccb(&device->ccbq, work_ccb);
3255 		splx(ospl);
3256 
3257 		devq->send_openings--;
3258 		devq->send_active++;
3259 
3260 		if (device->ccbq.queue.entries > 0) {
3261 			qinfo->pinfo.priority =
3262 			    device->ccbq.queue.queue_array[0]->priority;
3263 			xpt_schedule_dev_sendq(bus, device);
3264 		} else {
3265 			qinfo->pinfo.priority = CAM_PRIORITY_NONE;
3266 		}
3267 
3268 		if (work_ccb && (work_ccb->ccb_h.flags & CAM_DEV_QFREEZE) != 0){
3269 			/*
3270 			 * The client wants to freeze the queue
3271 			 * after this CCB is sent.
3272 			 */
3273 			ospl = splcam();
3274 			device->qfrozen_cnt++;
3275 			splx(ospl);
3276 		}
3277 
3278 		splx(s);
3279 
3280 		if ((device->inq_flags & SID_CmdQue) != 0)
3281 			work_ccb->ccb_h.flags |= CAM_TAG_ACTION_VALID;
3282 		else
3283 			/*
3284 			 * Clear this in case of a retried CCB that failed
3285 			 * due to a rejected tag.
3286 			 */
3287 			work_ccb->ccb_h.flags &= ~CAM_TAG_ACTION_VALID;
3288 
3289 		/*
3290 		 * Device queues can be shared among multiple sim instances
3291 		 * that reside on different busses.  Use the SIM in the queue
3292 		 * CCB's path, rather than the one in the bus that was passed
3293 		 * into this function.
3294 		 */
3295 		sim = work_ccb->ccb_h.path->bus->sim;
3296 		(*(sim->sim_action))(sim, work_ccb);
3297 
3298 		ospl = splcam();
3299 		devq->active_dev = NULL;
3300 		splx(ospl);
3301 		/* Raise IPL for possible insertion and test at top of loop */
3302 		s = splsoftcam();
3303 	}
3304 	splx(s);
3305 	s = splcam();
3306 	devq->send_queue.qfrozen_cnt--;
3307 	splx(s);
3308 }
3309 
3310 /*
3311  * This function merges stuff from the slave ccb into the master ccb, while
3312  * keeping important fields in the master ccb constant.
3313  */
3314 void
3315 xpt_merge_ccb(union ccb *master_ccb, union ccb *slave_ccb)
3316 {
3317 	/*
3318 	 * Pull fields that are valid for peripheral drivers to set
3319 	 * into the master CCB along with the CCB "payload".
3320 	 */
3321 	master_ccb->ccb_h.retry_count = slave_ccb->ccb_h.retry_count;
3322 	master_ccb->ccb_h.func_code = slave_ccb->ccb_h.func_code;
3323 	master_ccb->ccb_h.timeout = slave_ccb->ccb_h.timeout;
3324 	master_ccb->ccb_h.flags = slave_ccb->ccb_h.flags;
3325 	bcopy(&(&slave_ccb->ccb_h)[1], &(&master_ccb->ccb_h)[1],
3326 	      sizeof(union ccb) - sizeof(struct ccb_hdr));
3327 }
3328 
3329 void
3330 xpt_setup_ccb(struct ccb_hdr *ccb_h, struct cam_path *path, u_int32_t priority)
3331 {
3332 	CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_setup_ccb\n"));
3333 	ccb_h->pinfo.priority = priority;
3334 	ccb_h->path = path;
3335 	ccb_h->path_id = path->bus->path_id;
3336 	if (path->target)
3337 		ccb_h->target_id = path->target->target_id;
3338 	else
3339 		ccb_h->target_id = CAM_TARGET_WILDCARD;
3340 	if (path->device) {
3341 		if (path->device->ccbq.queue.generation++ == 0) {
3342 			/* Generation wrap, regen all entries */
3343 			cam_ccbq_regen(&path->device->ccbq);
3344 		}
3345 		ccb_h->target_lun = path->device->lun_id;
3346 		ccb_h->pinfo.generation = path->device->ccbq.queue.generation;
3347 	} else {
3348 		ccb_h->target_lun = CAM_TARGET_WILDCARD;
3349 	}
3350 	ccb_h->pinfo.index = CAM_UNQUEUED_INDEX;
3351 	ccb_h->flags = 0;
3352 }
3353 
3354 /* Path manipulation functions */
3355 cam_status
3356 xpt_create_path(struct cam_path **new_path_ptr, struct cam_periph *perph,
3357 		path_id_t path_id, target_id_t target_id, lun_id_t lun_id)
3358 {
3359 	struct	   cam_path *path;
3360 	cam_status status;
3361 
3362 	path = (struct cam_path *)malloc(sizeof(*path), M_DEVBUF, M_NOWAIT);
3363 
3364 	if (path == NULL) {
3365 		status = CAM_RESRC_UNAVAIL;
3366 		return(status);
3367 	}
3368 	status = xpt_compile_path(path, perph, path_id, target_id, lun_id);
3369 	if (status != CAM_REQ_CMP) {
3370 		free(path, M_DEVBUF);
3371 		path = NULL;
3372 	}
3373 	*new_path_ptr = path;
3374 	return (status);
3375 }
3376 
3377 static cam_status
3378 xpt_compile_path(struct cam_path *new_path, struct cam_periph *perph,
3379 		 path_id_t path_id, target_id_t target_id, lun_id_t lun_id)
3380 {
3381 	struct	     cam_eb *bus;
3382 	struct	     cam_et *target;
3383 	struct	     cam_ed *device;
3384 	cam_status   status;
3385 	int	     s;
3386 
3387 	status = CAM_REQ_CMP;	/* Completed without error */
3388 	target = NULL;		/* Wildcarded */
3389 	device = NULL;		/* Wildcarded */
3390 	s = splsoftcam();
3391 	bus = xpt_find_bus(path_id);
3392 	if (bus == NULL) {
3393 		status = CAM_PATH_INVALID;
3394 	} else if (target_id != CAM_TARGET_WILDCARD) {
3395 		target = xpt_find_target(bus, target_id);
3396 		if (target == NULL) {
3397 			if (path_id == CAM_XPT_PATH_ID) {
3398 				status = CAM_TID_INVALID;
3399 			} else {
3400 				/* Create one */
3401 				struct cam_et *new_target;
3402 
3403 				new_target = xpt_alloc_target(bus, target_id);
3404 				if (new_target == NULL) {
3405 					status = CAM_RESRC_UNAVAIL;
3406 				} else {
3407 					target = new_target;
3408 				}
3409 			}
3410 		}
3411 		if (target != NULL && lun_id != CAM_LUN_WILDCARD) {
3412 			device = xpt_find_device(target, lun_id);
3413 			if (device == NULL) {
3414 				if (path_id == CAM_XPT_PATH_ID) {
3415 					status = CAM_LUN_INVALID;
3416 				} else {
3417 					/* Create one */
3418 					struct cam_ed *new_device;
3419 
3420 					new_device = xpt_alloc_device(bus,
3421 								      target,
3422 								      lun_id);
3423 					if (new_device == NULL) {
3424 						status = CAM_RESRC_UNAVAIL;
3425 					} else {
3426 						device = new_device;
3427 					}
3428 				}
3429 			}
3430 		}
3431 	} else if (lun_id != CAM_LUN_WILDCARD) {
3432 		/*
3433 		 * Specific luns are not allowed if the
3434 		 * target is wildcarded
3435 		 */
3436 		status = CAM_LUN_INVALID;
3437 	}
3438 
3439 	/*
3440 	 * Only touch the user's data if we are successful.
3441 	 */
3442 	if (status == CAM_REQ_CMP) {
3443 		new_path->periph = perph;
3444 		new_path->bus = bus;
3445 		new_path->target = target;
3446 		new_path->device = device;
3447 		CAM_DEBUG(new_path, CAM_DEBUG_TRACE, ("xpt_compile_path\n"));
3448 	} else {
3449 		if (device != NULL)
3450 			xpt_release_device(bus, target, device);
3451 		if (target != NULL)
3452 			xpt_release_target(bus, target);
3453 	}
3454 	splx(s);
3455 	return (status);
3456 }
3457 
3458 static void
3459 xpt_release_path(struct cam_path *path)
3460 {
3461 	CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_release_path\n"));
3462 	if (path->device != NULL)
3463 		xpt_release_device(path->bus, path->target, path->device);
3464 	if (path->target != NULL)
3465 		xpt_release_target(path->bus, path->target);
3466 }
3467 
3468 void
3469 xpt_free_path(struct cam_path *path)
3470 {
3471 	CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_free_path\n"));
3472 	xpt_release_path(path);
3473 	free(path, M_DEVBUF);
3474 }
3475 
3476 
3477 /*
3478  * Return -1 for failure, 0 for exact match, 1 for match with wildcards.
3479  */
3480 int
3481 xpt_path_comp(struct cam_path *path1, struct cam_path *path2)
3482 {
3483 	int retval = 0;
3484 
3485 	if (path1->bus != path2->bus) {
3486 		if ((path1->bus == NULL)
3487 		 || (path2->bus == NULL))
3488 			retval = 1;
3489 		else
3490 			return (-1);
3491 	}
3492 	if (path1->target != path2->target) {
3493 		if ((path1->target == NULL)
3494 		 || (path2->target == NULL))
3495 			retval = 1;
3496 		else
3497 			return (-1);
3498 	}
3499 	if (path1->device != path2->device) {
3500 		if ((path1->device == NULL)
3501 		 || (path2->device == NULL))
3502 			retval = 1;
3503 		else
3504 			return (-1);
3505 	}
3506 	return (retval);
3507 }
3508 
3509 void
3510 xpt_print_path(struct cam_path *path)
3511 {
3512 	if (path == NULL)
3513 		printf("(nopath): ");
3514 	else {
3515 		if (path->periph != NULL)
3516 			printf("(%s%d:", path->periph->periph_name,
3517 			       path->periph->unit_number);
3518 		else
3519 			printf("(noperiph:");
3520 
3521 		if (path->bus != NULL)
3522 			printf("%s%d:%d:", path->bus->sim->sim_name,
3523 			       path->bus->sim->unit_number,
3524 			       path->bus->sim->bus_id);
3525 		else
3526 			printf("nobus:");
3527 
3528 		if (path->target != NULL)
3529 			printf("%d:", path->target->target_id);
3530 		else
3531 			printf("X:");
3532 
3533 		if (path->device != NULL)
3534 			printf("%d): ", path->device->lun_id);
3535 		else
3536 			printf("X): ");
3537 	}
3538 }
3539 
3540 path_id_t
3541 xpt_path_path_id(struct cam_path *path)
3542 {
3543 	return(path->bus->path_id);
3544 }
3545 
3546 target_id_t
3547 xpt_path_target_id(struct cam_path *path)
3548 {
3549 	if (path->target != NULL)
3550 		return (path->target->target_id);
3551 	else
3552 		return (CAM_TARGET_WILDCARD);
3553 }
3554 
3555 lun_id_t
3556 xpt_path_lun_id(struct cam_path *path)
3557 {
3558 	if (path->device != NULL)
3559 		return (path->device->lun_id);
3560 	else
3561 		return (CAM_LUN_WILDCARD);
3562 }
3563 
3564 struct cam_sim *
3565 xpt_path_sim(struct cam_path *path)
3566 {
3567 	return (path->bus->sim);
3568 }
3569 
3570 struct cam_periph*
3571 xpt_path_periph(struct cam_path *path)
3572 {
3573 	return (path->periph);
3574 }
3575 
3576 /*
3577  * Release a CAM control block for the caller.  Remit the cost of the structure
3578  * to the device referenced by the path.  If the this device had no 'credits'
3579  * and peripheral drivers have registered async callbacks for this notification
3580  * call them now.
3581  */
3582 void
3583 xpt_release_ccb(union ccb *free_ccb)
3584 {
3585 	int	 s;
3586 	struct	 cam_path *path;
3587 	struct	 cam_ed *device;
3588 	struct	 cam_eb *bus;
3589 
3590 	CAM_DEBUG_PRINT(CAM_DEBUG_TRACE, ("xpt_release_ccb\n"));
3591 	path = free_ccb->ccb_h.path;
3592 	device = path->device;
3593 	bus = path->bus;
3594 	s = splsoftcam();
3595 	cam_ccbq_release_opening(&device->ccbq);
3596 	if (xpt_ccb_count > xpt_max_ccbs) {
3597 		xpt_free_ccb(free_ccb);
3598 		xpt_ccb_count--;
3599 	} else {
3600 		SLIST_INSERT_HEAD(&ccb_freeq, &free_ccb->ccb_h, xpt_links.sle);
3601 	}
3602 	bus->sim->devq->alloc_openings++;
3603 	bus->sim->devq->alloc_active--;
3604 	/* XXX Turn this into an inline function - xpt_run_device?? */
3605 	if ((device_is_alloc_queued(device) == 0)
3606 	 && (device->drvq.entries > 0)) {
3607 		xpt_schedule_dev_allocq(bus, device);
3608 	}
3609 	splx(s);
3610 	if (dev_allocq_is_runnable(bus->sim->devq))
3611 		xpt_run_dev_allocq(bus);
3612 }
3613 
3614 /* Functions accessed by SIM drivers */
3615 
3616 /*
3617  * A sim structure, listing the SIM entry points and instance
3618  * identification info is passed to xpt_bus_register to hook the SIM
3619  * into the CAM framework.  xpt_bus_register creates a cam_eb entry
3620  * for this new bus and places it in the array of busses and assigns
3621  * it a path_id.  The path_id may be influenced by "hard wiring"
3622  * information specified by the user.  Once interrupt services are
3623  * availible, the bus will be probed.
3624  */
3625 int32_t
3626 xpt_bus_register(struct cam_sim *sim, u_int32_t bus)
3627 {
3628 	static path_id_t buscount;
3629 	struct cam_eb *new_bus;
3630 	struct ccb_pathinq cpi;
3631 	int s;
3632 
3633 	sim->bus_id = bus;
3634 	new_bus = (struct cam_eb *)malloc(sizeof(*new_bus),
3635 					  M_DEVBUF, M_NOWAIT);
3636 	if (new_bus == NULL) {
3637 		/* Couldn't satisfy request */
3638 		return (CAM_RESRC_UNAVAIL);
3639 	}
3640 
3641 	bzero(new_bus, sizeof(*new_bus));
3642 
3643 	if (strcmp(sim->sim_name, "xpt") != 0) {
3644 
3645 		sim->path_id = xptpathid(sim->sim_name, sim->unit_number,
3646 					 sim->bus_id, &buscount);
3647 	}
3648 
3649 	new_bus->path_id = sim->path_id;
3650 	new_bus->sim = sim;
3651 	SLIST_INIT(&new_bus->asyncs);
3652 	TAILQ_INIT(&new_bus->et_entries);
3653 	s = splsoftcam();
3654 	TAILQ_INSERT_TAIL(&xpt_busses, new_bus, links);
3655 	bus_generation++;
3656 
3657 	/* Notify interested parties */
3658 	if (sim->path_id != CAM_XPT_PATH_ID) {
3659 		struct cam_path path;
3660 
3661 		xpt_compile_path(&path, /*periph*/NULL, sim->path_id,
3662 			         CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
3663 		xpt_setup_ccb(&cpi.ccb_h, &path, /*priority*/1);
3664 		cpi.ccb_h.func_code = XPT_PATH_INQ;
3665 		xpt_action((union ccb *)&cpi);
3666 		xpt_async(AC_PATH_REGISTERED, xpt_periph->path, &cpi);
3667 		xpt_release_path(&path);
3668 	}
3669 	splx(s);
3670 	return (CAM_SUCCESS);
3671 }
3672 
3673 static int
3674 xptnextfreebus(path_id_t startbus)
3675 {
3676 	struct cam_sim_config *sim_conf;
3677 
3678 	sim_conf = cam_sinit;
3679 	while (sim_conf->sim_name != NULL) {
3680 
3681 		if (IS_SPECIFIED(sim_conf->pathid)
3682 		 && (startbus == sim_conf->pathid)) {
3683 			++startbus;
3684 			/* Start the search over */
3685 			sim_conf = cam_sinit;
3686 		} else {
3687 			sim_conf++;
3688 		}
3689 	}
3690 	return (startbus);
3691 }
3692 
3693 static int
3694 xptpathid(const char *sim_name, int sim_unit,
3695 	  int sim_bus, path_id_t *nextpath)
3696 {
3697 	struct cam_sim_config *sim_conf;
3698 	path_id_t pathid;
3699 
3700 	pathid = CAM_XPT_PATH_ID;
3701 	for (sim_conf = cam_sinit; sim_conf->sim_name != NULL; sim_conf++) {
3702 
3703 		if (!IS_SPECIFIED(sim_conf->pathid))
3704 			continue;
3705 
3706 		if (!strcmp(sim_name, sim_conf->sim_name)
3707 		 && (sim_unit == sim_conf->sim_unit)) {
3708 
3709 			if (IS_SPECIFIED(sim_conf->sim_bus)) {
3710 				if (sim_bus == sim_conf->sim_bus) {
3711 					pathid = sim_conf->pathid;
3712 					break;
3713 				}
3714 			} else if (sim_bus == 0) {
3715 				/* Unspecified matches bus 0 */
3716 				pathid = sim_conf->pathid;
3717 				break;
3718 			} else {
3719 				printf("Ambiguous scbus configuration for %s%d "
3720 				       "bus %d, cannot wire down.  The kernel "
3721 				       "config entry for scbus%d should "
3722 				       "specify a controller bus.\n"
3723 				       "Scbus will be assigned dynamically.\n",
3724 				       sim_name, sim_unit, sim_bus,
3725 				       sim_conf->pathid);
3726                              break;
3727 			}
3728 		}
3729 	}
3730 
3731 	if (pathid == CAM_XPT_PATH_ID) {
3732 		pathid = xptnextfreebus(*nextpath);
3733 		*nextpath = pathid + 1;
3734 	}
3735 	return (pathid);
3736 }
3737 
3738 int32_t
3739 xpt_bus_deregister(path_id)
3740 	u_int8_t path_id;
3741 {
3742 	/* XXX */
3743 	return (CAM_SUCCESS);
3744 }
3745 
3746 void
3747 xpt_async(u_int32_t async_code, struct cam_path *path, void *async_arg)
3748 {
3749 	struct cam_eb *bus;
3750 	struct cam_et *target, *next_target;
3751 	struct cam_ed *device, *next_device;
3752 	int s;
3753 
3754 	CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_async\n"));
3755 
3756 	s = splsoftcam();
3757 
3758 	bus = path->bus;
3759 
3760 	/*
3761 	 * Freeze the SIM queue for SCSI_DELAY ms to
3762 	 * allow the bus to settle.
3763 	 */
3764 	if (async_code == AC_BUS_RESET) {
3765 		struct cam_sim *sim;
3766 
3767 		sim = bus->sim;
3768 
3769 		/*
3770 		 * If there isn't already another timeout pending, go ahead
3771 		 * and freeze the simq and set the timeout flag.  If there
3772 		 * is another timeout pending, replace it with this
3773 		 * timeout.  There could be two bus reset async broadcasts
3774 		 * sent for some dual-channel controllers.
3775 		 */
3776 		if ((sim->flags & CAM_SIM_REL_TIMEOUT_PENDING) == 0) {
3777 			xpt_freeze_simq(sim, 1);
3778 			sim->flags |= CAM_SIM_REL_TIMEOUT_PENDING;
3779 		} else
3780 			untimeout(xpt_release_simq_timeout, sim, sim->c_handle);
3781 
3782 		sim->c_handle = timeout(xpt_release_simq_timeout,
3783 					sim, (SCSI_DELAY * hz) / 1000);
3784 	}
3785 
3786 	for (target = TAILQ_FIRST(&bus->et_entries);
3787 	     target != NULL;
3788 	     target = next_target) {
3789 
3790 		next_target = TAILQ_NEXT(target, links);
3791 
3792 		if (path->target != target
3793 		 && path->target != NULL)
3794 			continue;
3795 
3796 		if (async_code == AC_TRANSFER_NEG) {
3797 			struct ccb_trans_settings *settings;
3798 
3799 			settings = (struct ccb_trans_settings *)async_arg;
3800 			xpt_set_transfer_settings(settings,
3801 						  /*async_update*/TRUE);
3802 		}
3803 
3804 		for (device = TAILQ_FIRST(&target->ed_entries);
3805 		     device != NULL;
3806 		     device = next_device) {
3807 			cam_status status;
3808 			struct cam_path newpath;
3809 
3810 			next_device = TAILQ_NEXT(device, links);
3811 
3812 			if (path->device != device
3813 			 && path->device != NULL)
3814 				continue;
3815 
3816 			/*
3817 			 * We'll need this path for either one of these two
3818 			 * async callback codes.  Basically, we need to
3819 			 * compile our own path instead of just using the path
3820 			 * the user passes in since the user may well have
3821 			 * passed in a wildcardded path.  I'm not really
3822 			 * sure why anyone would want to wildcard a path for
3823 			 * either one of these async callbacks, but we need
3824 			 * to be able to handle it if they do.
3825 			 */
3826 			if ((async_code == AC_SENT_BDR)
3827 			 || (async_code == AC_INQ_CHANGED))
3828 				status = xpt_compile_path(&newpath, NULL,
3829 							  bus->path_id,
3830 							  target->target_id,
3831 							  device->lun_id);
3832 			else
3833 				status = CAM_REQ_CMP; /* silence the compiler */
3834 
3835 			/*
3836 			 * If we send a BDR, freeze the device queue for
3837 			 * SCSI_DELAY seconds to allow it to settle down.
3838 			 */
3839 			if (async_code == AC_SENT_BDR) {
3840 				if (status == CAM_REQ_CMP) {
3841 					xpt_freeze_devq(&newpath, 1);
3842 					/*
3843 					 * Although this looks bad, it
3844 					 * isn't as bad as it seems.  We're
3845 					 * passing in a stack-allocated path
3846 					 * that we then immediately release
3847 					 * after scheduling a timeout to
3848 					 * release the device queue.  So
3849 					 * the path won't be around when
3850 					 * the timeout fires, right?  Right.
3851 					 * But it doesn't matter, since
3852 					 * xpt_release_devq and its timeout
3853 					 * function both take the device as
3854 					 * an argument.  Theoretically, the
3855 					 * device will still be there when
3856 					 * the timeout fires, even though
3857 					 * the path will be gone.
3858 					 */
3859 					cam_release_devq(
3860 						   &newpath,
3861 						   /*relsim_flags*/
3862 						   RELSIM_RELEASE_AFTER_TIMEOUT,
3863 						   /*reduction*/0,
3864 						   /*timeout*/SCSI_DELAY,
3865 						   /*getcount_only*/0);
3866 					xpt_release_path(&newpath);
3867 				}
3868 			} else if (async_code == AC_INQ_CHANGED) {
3869 				/*
3870 				 * We've sent a start unit command, or
3871 				 * something similar to a device that may
3872 				 * have caused its inquiry data to change.
3873 				 * So we re-scan the device to refresh the
3874 				 * inquiry data for it.
3875 				 */
3876 				if (status == CAM_REQ_CMP) {
3877 					xpt_scan_lun(path->periph, &newpath,
3878 						     CAM_EXPECT_INQ_CHANGE,
3879 						     NULL);
3880 					xpt_release_path(&newpath);
3881 				}
3882 			} else if (async_code == AC_LOST_DEVICE)
3883 				device->flags |= CAM_DEV_UNCONFIGURED;
3884 
3885 			xpt_async_bcast(&device->asyncs,
3886 					async_code,
3887 					path,
3888 					async_arg);
3889 		}
3890 	}
3891 	xpt_async_bcast(&bus->asyncs, async_code,
3892 			path, async_arg);
3893 	splx(s);
3894 }
3895 
3896 static void
3897 xpt_async_bcast(struct async_list *async_head,
3898 		u_int32_t async_code,
3899 		struct cam_path *path, void *async_arg)
3900 {
3901 	struct async_node *cur_entry;
3902 
3903 	cur_entry = SLIST_FIRST(async_head);
3904 	while (cur_entry != NULL) {
3905 		struct async_node *next_entry;
3906 		/*
3907 		 * Grab the next list entry before we call the current
3908 		 * entry's callback.  This is because the callback function
3909 		 * can delete its async callback entry.
3910 		 */
3911 		next_entry = SLIST_NEXT(cur_entry, links);
3912 		if ((cur_entry->event_enable & async_code) != 0)
3913 			cur_entry->callback(cur_entry->callback_arg,
3914 					    async_code, path,
3915 					    async_arg);
3916 		cur_entry = next_entry;
3917 	}
3918 }
3919 
3920 u_int32_t
3921 xpt_freeze_devq(struct cam_path *path, u_int count)
3922 {
3923 	int s;
3924 	struct ccb_hdr *ccbh;
3925 
3926 	s = splcam();
3927 	path->device->qfrozen_cnt += count;
3928 
3929 	/*
3930 	 * Mark the last CCB in the queue as needing
3931 	 * to be requeued if the driver hasn't
3932 	 * changed it's state yet.  This fixes a race
3933 	 * where a ccb is just about to be queued to
3934 	 * a controller driver when it's interrupt routine
3935 	 * freezes the queue.  To completly close the
3936 	 * hole, controller drives must check to see
3937 	 * if a ccb's status is still CAM_REQ_INPROG
3938 	 * under spl protection just before they queue
3939 	 * the CCB.  See ahc_action/ahc_freeze_devq for
3940 	 * an example.
3941 	 */
3942 	ccbh = TAILQ_LAST(&path->device->ccbq.active_ccbs, ccb_hdr_list);
3943 	if (ccbh && ccbh->status == CAM_REQ_INPROG)
3944 		ccbh->status = CAM_REQUEUE_REQ;
3945 	splx(s);
3946 	return (path->device->qfrozen_cnt);
3947 }
3948 
3949 u_int32_t
3950 xpt_freeze_simq(struct cam_sim *sim, u_int count)
3951 {
3952 	sim->devq->send_queue.qfrozen_cnt += count;
3953 	if (sim->devq->active_dev != NULL) {
3954 		struct ccb_hdr *ccbh;
3955 
3956 		ccbh = TAILQ_LAST(&sim->devq->active_dev->ccbq.active_ccbs,
3957 				  ccb_hdr_list);
3958 		if (ccbh && ccbh->status == CAM_REQ_INPROG)
3959 			ccbh->status = CAM_REQUEUE_REQ;
3960 	}
3961 	return (sim->devq->send_queue.qfrozen_cnt);
3962 }
3963 
3964 static void
3965 xpt_release_devq_timeout(void *arg)
3966 {
3967 	struct cam_ed *device;
3968 
3969 	device = (struct cam_ed *)arg;
3970 
3971 	xpt_release_devq(device, /*run_queue*/TRUE);
3972 }
3973 
3974 void
3975 xpt_release_devq(struct cam_ed *dev, int run_queue)
3976 {
3977 	int	rundevq;
3978 	int	s;
3979 
3980 	rundevq = 0;
3981 	s = splcam();
3982 	if (dev->qfrozen_cnt > 0) {
3983 
3984 		dev->qfrozen_cnt--;
3985 		if (dev->qfrozen_cnt == 0) {
3986 
3987 			/*
3988 			 * No longer need to wait for a successful
3989 			 * command completion.
3990 			 */
3991 			dev->flags &= ~CAM_DEV_REL_ON_COMPLETE;
3992 
3993 			/*
3994 			 * Remove any timeouts that might be scheduled
3995 			 * to release this queue.
3996 			 */
3997 			if ((dev->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) {
3998 				untimeout(xpt_release_devq_timeout, dev,
3999 					  dev->c_handle);
4000 				dev->flags &= ~CAM_DEV_REL_TIMEOUT_PENDING;
4001 			}
4002 
4003 			/*
4004 			 * Now that we are unfrozen schedule the
4005 			 * device so any pending transactions are
4006 			 * run.
4007 			 */
4008 			if ((dev->ccbq.queue.entries > 0)
4009 			 && (xpt_schedule_dev_sendq(dev->target->bus, dev))
4010 			 && (run_queue != 0)) {
4011 				rundevq = 1;
4012 			}
4013 		}
4014 	}
4015 	splx(s);
4016 	if (rundevq != 0)
4017 		xpt_run_dev_sendq(dev->target->bus);
4018 }
4019 
4020 void
4021 xpt_release_simq(struct cam_sim *sim, int run_queue)
4022 {
4023 	int	s;
4024 	struct	camq *sendq;
4025 
4026 	sendq = &(sim->devq->send_queue);
4027 	s = splcam();
4028 	if (sendq->qfrozen_cnt > 0) {
4029 
4030 		sendq->qfrozen_cnt--;
4031 		if (sendq->qfrozen_cnt == 0) {
4032 
4033 			/*
4034 			 * If there is a timeout scheduled to release this
4035 			 * sim queue, remove it.  The queue frozen count is
4036 			 * already at 0.
4037 			 */
4038 			if ((sim->flags & CAM_SIM_REL_TIMEOUT_PENDING) != 0){
4039 				untimeout(xpt_release_simq_timeout, sim,
4040 					  sim->c_handle);
4041 				sim->flags &= ~CAM_SIM_REL_TIMEOUT_PENDING;
4042 			}
4043 
4044 			splx(s);
4045 
4046 			if (run_queue) {
4047 				/*
4048 				 * Now that we are unfrozen run the send queue.
4049 				 */
4050 				xpt_run_dev_sendq(xpt_find_bus(sim->path_id));
4051 			}
4052 		} else
4053 			splx(s);
4054 	} else
4055 		splx(s);
4056 }
4057 
4058 static void
4059 xpt_release_simq_timeout(void *arg)
4060 {
4061 	struct cam_sim *sim;
4062 
4063 	sim = (struct cam_sim *)arg;
4064 	xpt_release_simq(sim, /* run_queue */ TRUE);
4065 }
4066 
4067 void
4068 xpt_done(union ccb *done_ccb)
4069 {
4070 	int s;
4071 
4072 	s = splcam();
4073 
4074 	CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xpt_done\n"));
4075 	switch (done_ccb->ccb_h.func_code) {
4076 	case XPT_SCSI_IO:
4077 	case XPT_ENG_EXEC:
4078 	case XPT_TARGET_IO:
4079 	case XPT_ACCEPT_TARGET_IO:
4080 	case XPT_CONT_TARGET_IO:
4081 	case XPT_SCAN_BUS:
4082 	case XPT_SCAN_LUN:
4083 	{
4084 		/*
4085 		 * Queue up the request for handling by our SWI handler
4086 		 * any of the "non-immediate" type of ccbs.
4087 		 */
4088 		switch (done_ccb->ccb_h.path->periph->type) {
4089 		case CAM_PERIPH_BIO:
4090 			TAILQ_INSERT_TAIL(&cam_bioq, &done_ccb->ccb_h,
4091 					  sim_links.tqe);
4092 			done_ccb->ccb_h.pinfo.index = CAM_DONEQ_INDEX;
4093 			setsoftcambio();
4094 			break;
4095 		case CAM_PERIPH_NET:
4096 			TAILQ_INSERT_TAIL(&cam_netq, &done_ccb->ccb_h,
4097 					  sim_links.tqe);
4098 			done_ccb->ccb_h.pinfo.index = CAM_DONEQ_INDEX;
4099 			setsoftcamnet();
4100 			break;
4101 		}
4102 		break;
4103 	}
4104 	default:
4105 		break;
4106 	}
4107 	splx(s);
4108 }
4109 
4110 union ccb *
4111 xpt_alloc_ccb()
4112 {
4113 	union ccb *new_ccb;
4114 
4115 	new_ccb = malloc(sizeof(*new_ccb), M_DEVBUF, M_WAITOK);
4116 	return (new_ccb);
4117 }
4118 
4119 void
4120 xpt_free_ccb(union ccb *free_ccb)
4121 {
4122 	free(free_ccb, M_DEVBUF);
4123 }
4124 
4125 
4126 
4127 /* Private XPT functions */
4128 
4129 /*
4130  * Get a CAM control block for the caller. Charge the structure to the device
4131  * referenced by the path.  If the this device has no 'credits' then the
4132  * device already has the maximum number of outstanding operations under way
4133  * and we return NULL. If we don't have sufficient resources to allocate more
4134  * ccbs, we also return NULL.
4135  */
4136 static union ccb *
4137 xpt_get_ccb(struct cam_ed *device)
4138 {
4139 	union ccb *new_ccb;
4140 	int s;
4141 
4142 	s = splsoftcam();
4143 	if ((new_ccb = (union ccb *)ccb_freeq.slh_first) == NULL) {
4144 		new_ccb = malloc(sizeof(*new_ccb), M_DEVBUF, M_NOWAIT);
4145                 if (new_ccb == NULL) {
4146 			splx(s);
4147 			return (NULL);
4148 		}
4149 		callout_handle_init(&new_ccb->ccb_h.timeout_ch);
4150 		SLIST_INSERT_HEAD(&ccb_freeq, &new_ccb->ccb_h,
4151 				  xpt_links.sle);
4152 		xpt_ccb_count++;
4153 	}
4154 	cam_ccbq_take_opening(&device->ccbq);
4155 	SLIST_REMOVE_HEAD(&ccb_freeq, xpt_links.sle);
4156 	splx(s);
4157 	return (new_ccb);
4158 }
4159 
4160 
4161 static struct cam_et *
4162 xpt_alloc_target(struct cam_eb *bus, target_id_t target_id)
4163 {
4164 	struct cam_et *target;
4165 
4166 	target = (struct cam_et *)malloc(sizeof(*target), M_DEVBUF, M_NOWAIT);
4167 	if (target != NULL) {
4168 		struct cam_et *cur_target;
4169 
4170 		target->bus = bus;
4171 		target->target_id = target_id;
4172 		target->refcount = 1;
4173 		TAILQ_INIT(&target->ed_entries);
4174 
4175 		/* Insertion sort into our bus's target list */
4176 		cur_target = TAILQ_FIRST(&bus->et_entries);
4177 		while (cur_target != NULL && cur_target->target_id < target_id)
4178 			cur_target = TAILQ_NEXT(cur_target, links);
4179 
4180 		if (cur_target != NULL) {
4181 			TAILQ_INSERT_BEFORE(cur_target, target, links);
4182 		} else {
4183 			TAILQ_INSERT_TAIL(&bus->et_entries, target, links);
4184 			bus->generation++;
4185 		}
4186 	}
4187 	return (target);
4188 }
4189 
4190 void
4191 xpt_release_target(struct cam_eb *bus, struct cam_et *target)
4192 {
4193 	if ((--target->refcount == 0)
4194 	 && (TAILQ_FIRST(&target->ed_entries) == NULL)) {
4195 		TAILQ_REMOVE(&bus->et_entries, target, links);
4196 		bus->generation++;
4197 		free(target, M_DEVBUF);
4198 	}
4199 }
4200 
4201 static struct cam_ed *
4202 xpt_alloc_device(struct cam_eb *bus, struct cam_et *target, lun_id_t lun_id)
4203 {
4204 	struct	 cam_ed *device;
4205 	struct 	 cam_devq *devq;
4206 	int32_t	 status;
4207 	int	 s;
4208 
4209 	s = splsoftcam();
4210 	/* Make space for us in the device queue on our bus */
4211 	devq = bus->sim->devq;
4212 	status = cam_devq_resize(devq, devq->alloc_queue.array_size + 1);
4213 	splx(s);
4214 
4215 	if (status != CAM_REQ_CMP) {
4216 		device = NULL;
4217 	} else {
4218 		device = (struct cam_ed *)malloc(sizeof(*device),
4219 						 M_DEVBUF, M_NOWAIT);
4220 	}
4221 
4222 	if (device != NULL) {
4223 		struct cam_ed *cur_device;
4224 
4225 		bzero(device, sizeof(*device));
4226 
4227 		SLIST_INIT(&device->asyncs);
4228 		SLIST_INIT(&device->periphs);
4229 		callout_handle_init(&device->c_handle);
4230 		device->refcount = 1;
4231 		device->flags |= CAM_DEV_UNCONFIGURED;
4232 
4233 		cam_init_pinfo(&device->alloc_ccb_entry.pinfo);
4234 		device->alloc_ccb_entry.device = device;
4235 		cam_init_pinfo(&device->send_ccb_entry.pinfo);
4236 		device->send_ccb_entry.device = device;
4237 
4238 		device->target = target;
4239 
4240 		device->lun_id = lun_id;
4241 
4242 		/* Initialize our queues */
4243 		if (camq_init(&device->drvq, 0) != 0) {
4244 			free(device, M_DEVBUF);
4245 			return (NULL);
4246 		}
4247 
4248 		if (cam_ccbq_init(&device->ccbq,
4249 				  bus->sim->max_dev_openings) != 0) {
4250 			camq_fini(&device->drvq);
4251 			free(device, M_DEVBUF);
4252 			return (NULL);
4253 		}
4254 		s = splsoftcam();
4255 		/*
4256 		 * XXX should be limited by number of CCBs this bus can
4257 		 * do.
4258 		 */
4259 		xpt_max_ccbs += device->ccbq.devq_openings;
4260 		/* Insertion sort into our target's device list */
4261 		cur_device = TAILQ_FIRST(&target->ed_entries);
4262 		while (cur_device != NULL && cur_device->lun_id < lun_id)
4263 			cur_device = TAILQ_NEXT(cur_device, links);
4264 		if (cur_device != NULL) {
4265 			TAILQ_INSERT_BEFORE(cur_device, device, links);
4266 		} else {
4267 			TAILQ_INSERT_TAIL(&target->ed_entries, device, links);
4268 			target->generation++;
4269 		}
4270 		splx(s);
4271 	}
4272 	return (device);
4273 }
4274 
4275 static void
4276 xpt_release_device(struct cam_eb *bus, struct cam_et *target,
4277 		   struct cam_ed *device)
4278 {
4279 	int s;
4280 
4281 	if ((--device->refcount == 0)
4282 	 && ((device->flags & CAM_DEV_UNCONFIGURED) != 0)) {
4283 		struct cam_devq *devq;
4284 
4285 		s = splsoftcam();
4286 		TAILQ_REMOVE(&target->ed_entries, device,links);
4287 		target->generation++;
4288 		xpt_max_ccbs -= device->ccbq.devq_openings;
4289 		free(device, M_DEVBUF);
4290 		/* Release our slot in the devq */
4291 		devq = bus->sim->devq;
4292 		cam_devq_resize(devq, devq->alloc_queue.array_size - 1);
4293 		splx(s);
4294 	}
4295 }
4296 
4297 static u_int32_t
4298 xpt_dev_ccbq_resize(struct cam_path *path, int newopenings)
4299 {
4300 	int	s;
4301 	int	diff;
4302 	int	result;
4303 	struct	cam_ed *dev;
4304 
4305 	dev = path->device;
4306 	s = splsoftcam();
4307 
4308 	diff = newopenings - (dev->ccbq.dev_active + dev->ccbq.dev_openings);
4309 	result = cam_ccbq_resize(&dev->ccbq, newopenings);
4310 	if (result == CAM_REQ_CMP && (diff < 0)) {
4311 		dev->flags |= CAM_DEV_RESIZE_QUEUE_NEEDED;
4312 	}
4313 	/* Adjust the global limit */
4314 	xpt_max_ccbs += diff;
4315 	splx(s);
4316 	return (result);
4317 }
4318 
4319 static struct cam_eb *
4320 xpt_find_bus(path_id_t path_id)
4321 {
4322 	struct cam_eb *bus;
4323 
4324 	for (bus = TAILQ_FIRST(&xpt_busses);
4325 	     bus != NULL;
4326 	     bus = TAILQ_NEXT(bus, links)) {
4327 		if (bus->path_id == path_id)
4328 			break;
4329 	}
4330 	return (bus);
4331 }
4332 
4333 static struct cam_et *
4334 xpt_find_target(struct cam_eb *bus, target_id_t	target_id)
4335 {
4336 	struct cam_et *target;
4337 
4338 	for (target = TAILQ_FIRST(&bus->et_entries);
4339 	     target != NULL;
4340 	     target = TAILQ_NEXT(target, links)) {
4341 		if (target->target_id == target_id) {
4342 			target->refcount++;
4343 			break;
4344 		}
4345 	}
4346 	return (target);
4347 }
4348 
4349 static struct cam_ed *
4350 xpt_find_device(struct cam_et *target, lun_id_t lun_id)
4351 {
4352 	struct cam_ed *device;
4353 
4354 	for (device = TAILQ_FIRST(&target->ed_entries);
4355 	     device != NULL;
4356 	     device = TAILQ_NEXT(device, links)) {
4357 		if (device->lun_id == lun_id) {
4358 			device->refcount++;
4359 			break;
4360 		}
4361 	}
4362 	return (device);
4363 }
4364 
4365 typedef struct {
4366 	union	ccb *request_ccb;
4367 	struct 	ccb_pathinq *cpi;
4368 	int	pending_count;
4369 } xpt_scan_bus_info;
4370 
4371 /*
4372  * To start a scan, request_ccb is an XPT_SCAN_BUS ccb.
4373  * As the scan progresses, xpt_scan_bus is used as the
4374  * callback on completion function.
4375  */
4376 static void
4377 xpt_scan_bus(struct cam_periph *periph, union ccb *request_ccb)
4378 {
4379 	CAM_DEBUG(request_ccb->ccb_h.path, CAM_DEBUG_TRACE,
4380 		  ("xpt_scan_bus\n"));
4381 	switch (request_ccb->ccb_h.func_code) {
4382 	case XPT_SCAN_BUS:
4383 	{
4384 		xpt_scan_bus_info *scan_info;
4385 		union	ccb *work_ccb;
4386 		struct	cam_path *path;
4387 		u_int	i;
4388 		u_int	max_target;
4389 		u_int	initiator_id;
4390 
4391 		/* Find out the characteristics of the bus */
4392 		work_ccb = xpt_alloc_ccb();
4393 		xpt_setup_ccb(&work_ccb->ccb_h, request_ccb->ccb_h.path,
4394 			      request_ccb->ccb_h.pinfo.priority);
4395 		work_ccb->ccb_h.func_code = XPT_PATH_INQ;
4396 		xpt_action(work_ccb);
4397 		if (work_ccb->ccb_h.status != CAM_REQ_CMP) {
4398 			request_ccb->ccb_h.status = work_ccb->ccb_h.status;
4399 			xpt_free_ccb(work_ccb);
4400 			xpt_done(request_ccb);
4401 			return;
4402 		}
4403 
4404 		/* Save some state for use while we probe for devices */
4405 		scan_info = (xpt_scan_bus_info *)
4406 		    malloc(sizeof(xpt_scan_bus_info), M_TEMP, M_WAITOK);
4407 		scan_info->request_ccb = request_ccb;
4408 		scan_info->cpi = &work_ccb->cpi;
4409 
4410 		/* Cache on our stack so we can work asynchronously */
4411 		max_target = scan_info->cpi->max_target;
4412 		initiator_id = scan_info->cpi->initiator_id;
4413 
4414 		/*
4415 		 * Don't count the initiator if the
4416 		 * initiator is addressable.
4417 		 */
4418 		scan_info->pending_count = max_target + 1;
4419 		if (initiator_id <= max_target)
4420 			scan_info->pending_count--;
4421 
4422 		for (i = 0; i <= max_target; i++) {
4423 			cam_status status;
4424 		 	if (i == initiator_id)
4425 				continue;
4426 
4427 			status = xpt_create_path(&path, xpt_periph,
4428 						 request_ccb->ccb_h.path_id,
4429 						 i, 0);
4430 			if (status != CAM_REQ_CMP) {
4431 				printf("xpt_scan_bus: xpt_create_path failed"
4432 				       " with status %#x, bus scan halted\n",
4433 				       status);
4434 				break;
4435 			}
4436 			work_ccb = xpt_alloc_ccb();
4437 			xpt_setup_ccb(&work_ccb->ccb_h, path,
4438 				      request_ccb->ccb_h.pinfo.priority);
4439 			work_ccb->ccb_h.func_code = XPT_SCAN_LUN;
4440 			work_ccb->ccb_h.cbfcnp = xpt_scan_bus;
4441 			work_ccb->ccb_h.ppriv_ptr0 = scan_info;
4442 			work_ccb->crcn.flags = request_ccb->crcn.flags;
4443 #if 0
4444 			printf("xpt_scan_bus: probing %d:%d:%d\n",
4445 				request_ccb->ccb_h.path_id, i, 0);
4446 #endif
4447 			xpt_action(work_ccb);
4448 		}
4449 		break;
4450 	}
4451 	case XPT_SCAN_LUN:
4452 	{
4453 		xpt_scan_bus_info *scan_info;
4454 		path_id_t path_id;
4455 		target_id_t target_id;
4456 		lun_id_t lun_id;
4457 
4458 		/* Reuse the same CCB to query if a device was really found */
4459 		scan_info = (xpt_scan_bus_info *)request_ccb->ccb_h.ppriv_ptr0;
4460 		xpt_setup_ccb(&request_ccb->ccb_h, request_ccb->ccb_h.path,
4461 			      request_ccb->ccb_h.pinfo.priority);
4462 		request_ccb->ccb_h.func_code = XPT_GDEV_TYPE;
4463 
4464 		path_id = request_ccb->ccb_h.path_id;
4465 		target_id = request_ccb->ccb_h.target_id;
4466 		lun_id = request_ccb->ccb_h.target_lun;
4467 		xpt_action(request_ccb);
4468 
4469 #if 0
4470 		printf("xpt_scan_bus: got back probe from %d:%d:%d\n",
4471 			path_id, target_id, lun_id);
4472 #endif
4473 
4474 		if (request_ccb->ccb_h.status != CAM_REQ_CMP) {
4475 			struct cam_ed *device;
4476 			struct cam_et *target;
4477 
4478 			/*
4479 			 * If we already probed lun 0 successfully, or
4480 			 * we have additional configured luns on this
4481 			 * target that might have "gone away", go onto
4482 			 * the next lun.
4483 			 */
4484 			target = request_ccb->ccb_h.path->target;
4485 			device = TAILQ_FIRST(&target->ed_entries);
4486 			if (device != NULL)
4487 				device = TAILQ_NEXT(device, links);
4488 
4489 			if ((lun_id != 0) || (device != NULL)) {
4490 				/* Try the next lun */
4491 				lun_id++;
4492 			}
4493 		} else {
4494 			struct cam_ed *device;
4495 
4496 			device = request_ccb->ccb_h.path->device;
4497 
4498 			if ((device->quirk->quirks & CAM_QUIRK_NOLUNS) == 0) {
4499 				/* Try the next lun */
4500 				lun_id++;
4501 			}
4502 		}
4503 
4504 		xpt_free_path(request_ccb->ccb_h.path);
4505 
4506 		/* Check Bounds */
4507 		if ((lun_id == request_ccb->ccb_h.target_lun)
4508 		 || lun_id > scan_info->cpi->max_lun) {
4509 			/* We're done */
4510 
4511 			xpt_free_ccb(request_ccb);
4512 			scan_info->pending_count--;
4513 			if (scan_info->pending_count == 0) {
4514 				xpt_free_ccb((union ccb *)scan_info->cpi);
4515 				request_ccb = scan_info->request_ccb;
4516 				free(scan_info, M_TEMP);
4517 				request_ccb->ccb_h.status = CAM_REQ_CMP;
4518 				xpt_done(request_ccb);
4519 			}
4520 		} else {
4521 			/* Try the next device */
4522 			struct cam_path *path;
4523 			cam_status status;
4524 
4525 			path = request_ccb->ccb_h.path;
4526 			status = xpt_create_path(&path, xpt_periph,
4527 						 path_id, target_id, lun_id);
4528 			if (status != CAM_REQ_CMP) {
4529 				printf("xpt_scan_bus: xpt_create_path failed "
4530 				       "with status %#x, halting LUN scan\n",
4531 			 	       status);
4532 				xpt_free_ccb(request_ccb);
4533 				scan_info->pending_count--;
4534 				if (scan_info->pending_count == 0) {
4535 					xpt_free_ccb(
4536 						(union ccb *)scan_info->cpi);
4537 					request_ccb = scan_info->request_ccb;
4538 					free(scan_info, M_TEMP);
4539 					request_ccb->ccb_h.status = CAM_REQ_CMP;
4540 					xpt_done(request_ccb);
4541 					break;
4542 				}
4543 			}
4544 			xpt_setup_ccb(&request_ccb->ccb_h, path,
4545 				      request_ccb->ccb_h.pinfo.priority);
4546 			request_ccb->ccb_h.func_code = XPT_SCAN_LUN;
4547 			request_ccb->ccb_h.cbfcnp = xpt_scan_bus;
4548 			request_ccb->ccb_h.ppriv_ptr0 = scan_info;
4549 			request_ccb->crcn.flags =
4550 				scan_info->request_ccb->crcn.flags;
4551 #if 0
4552 			xpt_print_path(path);
4553 			printf("xpt_scan bus probing\n");
4554 #endif
4555 			xpt_action(request_ccb);
4556 		}
4557 		break;
4558 	}
4559 	default:
4560 		break;
4561 	}
4562 }
4563 
4564 typedef enum {
4565 	PROBE_TUR,
4566 	PROBE_INQUIRY,
4567 	PROBE_MODE_SENSE,
4568 	PROBE_SERIAL_NUM,
4569 	PROBE_TUR_FOR_NEGOTIATION
4570 } probe_action;
4571 
4572 typedef enum {
4573 	PROBE_INQUIRY_CKSUM	= 0x01,
4574 	PROBE_SERIAL_CKSUM	= 0x02,
4575 	PROBE_NO_ANNOUNCE	= 0x04
4576 } probe_flags;
4577 
4578 typedef struct {
4579 	TAILQ_HEAD(, ccb_hdr) request_ccbs;
4580 	probe_action	action;
4581 	union ccb	saved_ccb;
4582 	probe_flags	flags;
4583 	MD5_CTX		context;
4584 	u_int8_t	digest[16];
4585 } probe_softc;
4586 
4587 static void
4588 xpt_scan_lun(struct cam_periph *periph, struct cam_path *path,
4589 	     cam_flags flags, union ccb *request_ccb)
4590 {
4591 	u_int32_t unit;
4592 	cam_status status;
4593 	struct cam_path *new_path;
4594 	struct cam_periph *old_periph;
4595 	int s;
4596 
4597 	CAM_DEBUG(request_ccb->ccb_h.path, CAM_DEBUG_TRACE,
4598 		  ("xpt_scan_lun\n"));
4599 
4600 	if (request_ccb == NULL) {
4601 		request_ccb = malloc(sizeof(union ccb), M_TEMP, M_NOWAIT);
4602 		if (request_ccb == NULL) {
4603 			xpt_print_path(path);
4604 			printf("xpt_scan_lun: can't allocate CCB, can't "
4605 			       "continue\n");
4606 			return;
4607 		}
4608 		new_path = malloc(sizeof(*new_path), M_TEMP, M_NOWAIT);
4609 		if (new_path == NULL) {
4610 			xpt_print_path(path);
4611 			printf("xpt_scan_lun: can't allocate path, can't "
4612 			       "continue\n");
4613 			free(request_ccb, M_TEMP);
4614 			return;
4615 		}
4616 		status = xpt_compile_path(new_path, periph, path->bus->path_id,
4617 					  path->target->target_id,
4618 					  path->device->lun_id);
4619 
4620 		if (status != CAM_REQ_CMP) {
4621 			xpt_print_path(path);
4622 			printf("xpt_scan_lun: can't compile path, can't "
4623 			       "continue\n");
4624 			free(request_ccb, M_TEMP);
4625 			free(new_path, M_TEMP);
4626 			return;
4627 		}
4628 		xpt_setup_ccb(&request_ccb->ccb_h, new_path, /*priority*/ 1);
4629 		request_ccb->ccb_h.cbfcnp = xptscandone;
4630 		request_ccb->ccb_h.func_code = XPT_SCAN_LUN;
4631 		request_ccb->crcn.flags = flags;
4632 	}
4633 
4634 	s = splsoftcam();
4635 	if ((old_periph = cam_periph_find(path, "probe")) != NULL) {
4636 		probe_softc *softc;
4637 
4638 		softc = (probe_softc *)old_periph->softc;
4639 		TAILQ_INSERT_TAIL(&softc->request_ccbs, &request_ccb->ccb_h,
4640 				  periph_links.tqe);
4641 	} else {
4642 		status = cam_periph_alloc(proberegister, probecleanup,
4643 					  probestart, "probe",
4644 					  CAM_PERIPH_BIO,
4645 					  request_ccb->ccb_h.path, NULL, 0,
4646 					  request_ccb);
4647 
4648 		if (status != CAM_REQ_CMP) {
4649 			xpt_print_path(path);
4650 			printf("xpt_scan_lun: cam_alloc_periph returned an "
4651 			       "error, can't continue probe\n");
4652 			request_ccb->ccb_h.status = status;
4653 			xpt_done(request_ccb);
4654 		}
4655 	}
4656 	splx(s);
4657 }
4658 
4659 static void
4660 xptscandone(struct cam_periph *periph, union ccb *done_ccb)
4661 {
4662 	xpt_release_path(done_ccb->ccb_h.path);
4663 	free(done_ccb->ccb_h.path, M_TEMP);
4664 	free(done_ccb, M_TEMP);
4665 }
4666 
4667 static cam_status
4668 proberegister(struct cam_periph *periph, void *arg)
4669 {
4670 	struct ccb_getdev *cgd;
4671 	probe_softc *softc;
4672 	union ccb *ccb;
4673 
4674 	cgd = (struct ccb_getdev *)arg;
4675 	if (periph == NULL) {
4676 		printf("proberegister: periph was NULL!!\n");
4677 		return(CAM_REQ_CMP_ERR);
4678 	}
4679 
4680 	if (cgd == NULL) {
4681 		printf("proberegister: no getdev CCB, can't register device\n");
4682 		return(CAM_REQ_CMP_ERR);
4683 	}
4684 
4685 	softc = (probe_softc *)malloc(sizeof(*softc), M_TEMP, M_NOWAIT);
4686 
4687 	if (softc == NULL) {
4688 		printf("proberegister: Unable to probe new device. "
4689 		       "Unable to allocate softc\n");
4690 		return(CAM_REQ_CMP_ERR);
4691 	}
4692 	ccb = (union ccb *)cgd;
4693 	TAILQ_INIT(&softc->request_ccbs);
4694 	TAILQ_INSERT_TAIL(&softc->request_ccbs, &ccb->ccb_h, periph_links.tqe);
4695 	softc->flags = 0;
4696 	periph->softc = softc;
4697 	cam_periph_acquire(periph);
4698 	probeschedule(periph);
4699 	return(CAM_REQ_CMP);
4700 }
4701 
4702 static void
4703 probeschedule(struct cam_periph *periph)
4704 {
4705 	union ccb *ccb;
4706 	probe_softc *softc;
4707 
4708 	softc = (probe_softc *)periph->softc;
4709 	ccb = (union ccb *)TAILQ_FIRST(&softc->request_ccbs);
4710 
4711 	/*
4712 	 * If a device has gone away and another device, or the same one,
4713 	 * is back in the same place, it should have a unit attention
4714 	 * condition pending.  It will not report the unit attention in
4715 	 * response to an inquiry, which may leave invalid transfer
4716 	 * negotiations in effect.  The TUR will reveal the unit attention
4717 	 * condition.  Only send the TUR for lun 0, since some devices
4718 	 * will get confused by commands other than inquiry to non-existent
4719 	 * luns.  If you think a device has gone away start your scan from
4720 	 * lun 0.  This will insure that any bogus transfer settings are
4721 	 * invalidated.
4722 	 */
4723 	if (((ccb->ccb_h.path->device->flags & CAM_DEV_UNCONFIGURED)==0)
4724 	 && (ccb->ccb_h.target_lun == 0))
4725 		softc->action = PROBE_TUR;
4726 	else
4727 		softc->action = PROBE_INQUIRY;
4728 
4729 	if (ccb->crcn.flags & CAM_EXPECT_INQ_CHANGE)
4730 		softc->flags |= PROBE_NO_ANNOUNCE;
4731 	else
4732 		softc->flags &= ~PROBE_NO_ANNOUNCE;
4733 
4734 	xpt_schedule(periph, ccb->ccb_h.pinfo.priority);
4735 }
4736 
4737 static void
4738 probestart(struct cam_periph *periph, union ccb *start_ccb)
4739 {
4740 	/* Probe the device that our peripheral driver points to */
4741 	struct ccb_scsiio *csio;
4742 	probe_softc *softc;
4743 
4744 	CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("probestart\n"));
4745 
4746 	softc = (probe_softc *)periph->softc;
4747 	csio = &start_ccb->csio;
4748 
4749 	switch (softc->action) {
4750 	case PROBE_TUR:
4751 	case PROBE_TUR_FOR_NEGOTIATION:
4752 	{
4753 		scsi_test_unit_ready(csio,
4754 				     /*retries*/4,
4755 				     probedone,
4756 				     MSG_SIMPLE_Q_TAG,
4757 				     SSD_FULL_SIZE,
4758 				     /*timeout*/10000);
4759 		break;
4760 	}
4761 	case PROBE_INQUIRY:
4762 	{
4763 		struct scsi_inquiry_data *inq_buf;
4764 
4765 		inq_buf = &periph->path->device->inq_data;
4766 		/*
4767 		 * If the device is currently configured, we calculate an
4768 		 * MD5 checksum of the inquiry data, and if the serial number
4769 		 * length is greater than 0, add the serial number data
4770 		 * into the checksum as well.  Once the inquiry and the
4771 		 * serial number check finish, we attempt to figure out
4772 		 * whether we still have the same device.
4773 		 */
4774 		if ((periph->path->device->flags & CAM_DEV_UNCONFIGURED) == 0) {
4775 
4776 			MD5Init(&softc->context);
4777 			MD5Update(&softc->context, (unsigned char *)inq_buf,
4778 				  sizeof(struct scsi_inquiry_data));
4779 			softc->flags |= PROBE_INQUIRY_CKSUM;
4780 			if (periph->path->device->serial_num_len > 0) {
4781 				MD5Update(&softc->context,
4782 					  periph->path->device->serial_num,
4783 					  periph->path->device->serial_num_len);
4784 				softc->flags |= PROBE_SERIAL_CKSUM;
4785 			}
4786 			MD5Final(softc->digest, &softc->context);
4787 		}
4788 
4789 		scsi_inquiry(csio,
4790 			     /*retries*/4,
4791 			     probedone,
4792 			     MSG_SIMPLE_Q_TAG,
4793 			     (u_int8_t *)inq_buf,
4794 			     sizeof(*inq_buf),
4795 			     /*evpd*/FALSE,
4796 			     /*page_code*/0,
4797 			     SSD_MIN_SIZE,
4798 			     /*timeout*/5 * 1000);
4799 		break;
4800 	}
4801 	case PROBE_MODE_SENSE:
4802 	{
4803 		void  *mode_buf;
4804 		int    mode_buf_len;
4805 
4806 		mode_buf_len = sizeof(struct scsi_mode_header_6)
4807 			     + sizeof(struct scsi_mode_blk_desc)
4808 			     + sizeof(struct scsi_control_page);
4809 		mode_buf = malloc(mode_buf_len, M_TEMP, M_NOWAIT);
4810 		if (mode_buf != NULL) {
4811 	                scsi_mode_sense(csio,
4812 					/*retries*/4,
4813 					probedone,
4814 					MSG_SIMPLE_Q_TAG,
4815 					/*dbd*/FALSE,
4816 					SMS_PAGE_CTRL_CURRENT,
4817 					SMS_CONTROL_MODE_PAGE,
4818 					mode_buf,
4819 					mode_buf_len,
4820 					SSD_FULL_SIZE,
4821 					/*timeout*/5000);
4822 			break;
4823 		}
4824 		xpt_print_path(periph->path);
4825 		printf("Unable to mode sense control page - malloc failure\n");
4826 		softc->action = PROBE_SERIAL_NUM;
4827 		/* FALLTHROUGH */
4828 	}
4829 	case PROBE_SERIAL_NUM:
4830 	{
4831 		struct scsi_vpd_unit_serial_number *serial_buf;
4832 		struct cam_ed* device;
4833 
4834 		serial_buf = NULL;
4835 		device = periph->path->device;
4836 		device->serial_num = NULL;
4837 		device->serial_num_len = 0;
4838 
4839 		if ((device->quirk->quirks & CAM_QUIRK_NOSERIAL) == 0)
4840 			serial_buf = (struct scsi_vpd_unit_serial_number *)
4841 				malloc(sizeof(*serial_buf), M_TEMP, M_NOWAIT);
4842 
4843 		if (serial_buf != NULL) {
4844 			bzero(serial_buf, sizeof(*serial_buf));
4845 			scsi_inquiry(csio,
4846 				     /*retries*/4,
4847 				     probedone,
4848 				     MSG_SIMPLE_Q_TAG,
4849 				     (u_int8_t *)serial_buf,
4850 				     sizeof(*serial_buf),
4851 				     /*evpd*/TRUE,
4852 				     SVPD_UNIT_SERIAL_NUMBER,
4853 				     SSD_MIN_SIZE,
4854 				     /*timeout*/5 * 1000);
4855 			break;
4856 		}
4857 		/*
4858 		 * We'll have to do without, let our probedone
4859 		 * routine finish up for us.
4860 		 */
4861 		start_ccb->csio.data_ptr = NULL;
4862 		probedone(periph, start_ccb);
4863 		return;
4864 	}
4865 	}
4866 	xpt_action(start_ccb);
4867 }
4868 
4869 static void
4870 probedone(struct cam_periph *periph, union ccb *done_ccb)
4871 {
4872 	probe_softc *softc;
4873 	struct cam_path *path;
4874 	u_int32_t  priority;
4875 
4876 	CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("probedone\n"));
4877 
4878 	softc = (probe_softc *)periph->softc;
4879 	path = done_ccb->ccb_h.path;
4880 	priority = done_ccb->ccb_h.pinfo.priority;
4881 
4882 	switch (softc->action) {
4883 	case PROBE_TUR:
4884 	{
4885 		if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
4886 
4887 			if (cam_periph_error(done_ccb, 0,
4888 					     SF_NO_PRINT, NULL) == ERESTART)
4889 				return;
4890 			else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
4891 				/* Don't wedge the queue */
4892 				xpt_release_devq(done_ccb->ccb_h.path->device,
4893 						 /*run_queue*/TRUE);
4894 		}
4895 		softc->action = PROBE_INQUIRY;
4896 		xpt_release_ccb(done_ccb);
4897 		xpt_schedule(periph, priority);
4898 		return;
4899 	}
4900 	case PROBE_INQUIRY:
4901 	{
4902 		if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
4903 			struct scsi_inquiry_data *inq_buf;
4904 			u_int8_t periph_qual;
4905 			u_int8_t periph_dtype;
4906 
4907 			inq_buf = &path->device->inq_data;
4908 
4909 			periph_qual = SID_QUAL(inq_buf);
4910 			periph_dtype = SID_TYPE(inq_buf);
4911 			if (periph_dtype != T_NODEVICE) {
4912 				switch(periph_qual) {
4913 				case SID_QUAL_LU_CONNECTED:
4914 				{
4915 					xpt_find_quirk(path->device);
4916 
4917 					if ((inq_buf->flags & SID_CmdQue) != 0)
4918 						softc->action =
4919 						    PROBE_MODE_SENSE;
4920 					else
4921 						softc->action =
4922 						    PROBE_SERIAL_NUM;
4923 
4924 					path->device->flags &=
4925 						~CAM_DEV_UNCONFIGURED;
4926 
4927 					xpt_release_ccb(done_ccb);
4928 					xpt_schedule(periph, priority);
4929 					return;
4930 				}
4931 				default:
4932 					break;
4933 				}
4934 			}
4935 		} else if (cam_periph_error(done_ccb, 0,
4936 					    done_ccb->ccb_h.target_lun > 0
4937 					    ? SF_RETRY_UA|SF_QUIET_IR
4938 					    : SF_RETRY_UA,
4939 					    &softc->saved_ccb) == ERESTART) {
4940 			return;
4941 		} else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
4942 			/* Don't wedge the queue */
4943 			xpt_release_devq(done_ccb->ccb_h.path->device,
4944 					 /*run_queue*/TRUE);
4945 		}
4946 		/*
4947 		 * If we get to this point, we got an error status back
4948 		 * from the inquiry and the error status doesn't require
4949 		 * automatically retrying the command.  Therefore, the
4950 		 * inquiry failed.  If we had inquiry information before
4951 		 * for this device, but this latest inquiry command failed,
4952 		 * the device has probably gone away.  If this device isn't
4953 		 * already marked unconfigured, notify the peripheral
4954 		 * drivers that this device is no more.
4955 		 */
4956 		if ((path->device->flags & CAM_DEV_UNCONFIGURED) == 0)
4957 			/* Send the async notification. */
4958 			xpt_async(AC_LOST_DEVICE, path, NULL);
4959 
4960 		xpt_release_ccb(done_ccb);
4961 		break;
4962 	}
4963 	case PROBE_MODE_SENSE:
4964 	{
4965 		struct ccb_scsiio *csio;
4966 		struct scsi_mode_header_6 *mode_hdr;
4967 
4968 		csio = &done_ccb->csio;
4969 		mode_hdr = (struct scsi_mode_header_6 *)csio->data_ptr;
4970 		if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
4971 			struct scsi_control_page *page;
4972 			u_int8_t *offset;
4973 
4974 			offset = ((u_int8_t *)&mode_hdr[1])
4975 			    + mode_hdr->blk_desc_len;
4976 			page = (struct scsi_control_page *)offset;
4977 			path->device->queue_flags = page->queue_flags;
4978 		} else if (cam_periph_error(done_ccb, 0,
4979 					    SF_RETRY_UA|SF_NO_PRINT,
4980 					    &softc->saved_ccb) == ERESTART) {
4981 			return;
4982 		} else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
4983 			/* Don't wedge the queue */
4984 			xpt_release_devq(done_ccb->ccb_h.path->device,
4985 					 /*run_queue*/TRUE);
4986 		}
4987 		xpt_release_ccb(done_ccb);
4988 		free(mode_hdr, M_TEMP);
4989 		softc->action = PROBE_SERIAL_NUM;
4990 		xpt_schedule(periph, priority);
4991 		return;
4992 	}
4993 	case PROBE_SERIAL_NUM:
4994 	{
4995 		struct ccb_scsiio *csio;
4996 		struct scsi_vpd_unit_serial_number *serial_buf;
4997 		u_int32_t  priority;
4998 		int changed;
4999 		int have_serialnum;
5000 
5001 		changed = 1;
5002 		have_serialnum = 0;
5003 		csio = &done_ccb->csio;
5004 		priority = done_ccb->ccb_h.pinfo.priority;
5005 		serial_buf =
5006 		    (struct scsi_vpd_unit_serial_number *)csio->data_ptr;
5007 
5008 		/* Clean up from previous instance of this device */
5009 		if (path->device->serial_num != NULL) {
5010 			free(path->device->serial_num, M_DEVBUF);
5011 			path->device->serial_num = NULL;
5012 			path->device->serial_num_len = 0;
5013 		}
5014 
5015 		if (serial_buf == NULL) {
5016 			/*
5017 			 * Don't process the command as it was never sent
5018 			 */
5019 		} else if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP
5020 		      && (serial_buf->length > 0)) {
5021 
5022 			have_serialnum = 1;
5023 			path->device->serial_num =
5024 				(u_int8_t *)malloc((serial_buf->length + 1),
5025 						   M_DEVBUF, M_NOWAIT);
5026 			if (path->device->serial_num != NULL) {
5027 				bcopy(serial_buf->serial_num,
5028 				      path->device->serial_num,
5029 				      serial_buf->length);
5030 				path->device->serial_num_len =
5031 				    serial_buf->length;
5032 				path->device->serial_num[serial_buf->length]
5033 				    = '\0';
5034 			}
5035 		} else if (cam_periph_error(done_ccb, 0,
5036 					    SF_RETRY_UA|SF_NO_PRINT,
5037 					    &softc->saved_ccb) == ERESTART) {
5038 			return;
5039 		} else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
5040 			/* Don't wedge the queue */
5041 			xpt_release_devq(done_ccb->ccb_h.path->device,
5042 					 /*run_queue*/TRUE);
5043 		}
5044 
5045 		/*
5046 		 * Let's see if we have seen this device before.
5047 		 */
5048 		if ((softc->flags & PROBE_INQUIRY_CKSUM) != 0) {
5049 			MD5_CTX context;
5050 			u_int8_t digest[16];
5051 
5052 			MD5Init(&context);
5053 
5054 			MD5Update(&context,
5055 				  (unsigned char *)&path->device->inq_data,
5056 				  sizeof(struct scsi_inquiry_data));
5057 
5058 			if (have_serialnum)
5059 				MD5Update(&context, serial_buf->serial_num,
5060 					  serial_buf->length);
5061 
5062 			MD5Final(digest, &context);
5063 			if (bcmp(softc->digest, digest, 16) == 0)
5064 				changed = 0;
5065 
5066 			/*
5067 			 * XXX Do we need to do a TUR in order to ensure
5068 			 *     that the device really hasn't changed???
5069 			 */
5070 			if ((changed != 0)
5071 			 && ((softc->flags & PROBE_NO_ANNOUNCE) == 0))
5072 				xpt_async(AC_LOST_DEVICE, path, NULL);
5073 		}
5074 		if (serial_buf != NULL)
5075 			free(serial_buf, M_TEMP);
5076 
5077 		if (changed != 0) {
5078 			/*
5079 			 * Now that we have all the necessary
5080 			 * information to safely perform transfer
5081 			 * negotiations... Controllers don't perform
5082 			 * any negotiation or tagged queuing until
5083 			 * after the first XPT_SET_TRAN_SETTINGS ccb is
5084 			 * received.  So, on a new device, just retreive
5085 			 * the user settings, and set them as the current
5086 			 * settings to set the device up.
5087 			 */
5088 			done_ccb->ccb_h.func_code = XPT_GET_TRAN_SETTINGS;
5089 			done_ccb->cts.flags = CCB_TRANS_USER_SETTINGS;
5090 			xpt_action(done_ccb);
5091 			done_ccb->ccb_h.func_code = XPT_SET_TRAN_SETTINGS;
5092 			done_ccb->cts.flags &= ~CCB_TRANS_USER_SETTINGS;
5093 			done_ccb->cts.flags |= CCB_TRANS_CURRENT_SETTINGS;
5094 			xpt_action(done_ccb);
5095 			xpt_release_ccb(done_ccb);
5096 
5097 			/*
5098 			 * Perform a TUR to allow the controller to
5099 			 * perform any necessary transfer negotiation.
5100 			 */
5101 			softc->action = PROBE_TUR_FOR_NEGOTIATION;
5102 			xpt_schedule(periph, priority);
5103 			return;
5104 		}
5105 		xpt_release_ccb(done_ccb);
5106 		break;
5107 	}
5108 	case PROBE_TUR_FOR_NEGOTIATION:
5109 		if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
5110 			/* Don't wedge the queue */
5111 			xpt_release_devq(done_ccb->ccb_h.path->device,
5112 					 /*run_queue*/TRUE);
5113 		}
5114 
5115 		path->device->flags &= ~CAM_DEV_UNCONFIGURED;
5116 
5117 		if ((softc->flags & PROBE_NO_ANNOUNCE) == 0) {
5118 			/* Inform the XPT that a new device has been found */
5119 			done_ccb->ccb_h.func_code = XPT_GDEV_TYPE;
5120 			xpt_action(done_ccb);
5121 
5122 			xpt_async(AC_FOUND_DEVICE, xpt_periph->path, done_ccb);
5123 		}
5124 		xpt_release_ccb(done_ccb);
5125 		break;
5126 	}
5127 	done_ccb = (union ccb *)TAILQ_FIRST(&softc->request_ccbs);
5128 	TAILQ_REMOVE(&softc->request_ccbs, &done_ccb->ccb_h, periph_links.tqe);
5129 	done_ccb->ccb_h.status = CAM_REQ_CMP;
5130 	xpt_done(done_ccb);
5131 	if (TAILQ_FIRST(&softc->request_ccbs) == NULL) {
5132 		cam_periph_invalidate(periph);
5133 		cam_periph_release(periph);
5134 	} else {
5135 		probeschedule(periph);
5136 	}
5137 }
5138 
5139 static void
5140 probecleanup(struct cam_periph *periph)
5141 {
5142 	free(periph->softc, M_TEMP);
5143 }
5144 
5145 static void
5146 xpt_find_quirk(struct cam_ed *device)
5147 {
5148 	caddr_t	match;
5149 
5150 	match = cam_quirkmatch((caddr_t)&device->inq_data,
5151 			       (caddr_t)xpt_quirk_table,
5152 			       sizeof(xpt_quirk_table)/sizeof(*xpt_quirk_table),
5153 			       sizeof(*xpt_quirk_table), scsi_inquiry_match);
5154 
5155 	if (match == NULL)
5156 		panic("xpt_find_quirk: device didn't match wildcard entry!!");
5157 
5158 	device->quirk = (struct xpt_quirk_entry *)match;
5159 }
5160 
5161 static void
5162 xpt_set_transfer_settings(struct ccb_trans_settings *cts, int async_update)
5163 {
5164 	struct	cam_ed *device;
5165 	struct	cam_sim *sim;
5166 	int	qfrozen;
5167 
5168 	device = cts->ccb_h.path->device;
5169 	sim = cts->ccb_h.path->bus->sim;
5170 	if (async_update == FALSE) {
5171 		struct	scsi_inquiry_data *inq_data;
5172 		struct	ccb_pathinq cpi;
5173 
5174 		/*
5175 		 * Perform sanity checking against what the
5176 		 * controller and device can do.
5177 		 */
5178 		xpt_setup_ccb(&cpi.ccb_h, cts->ccb_h.path, /*priority*/1);
5179 		cpi.ccb_h.func_code = XPT_PATH_INQ;
5180 		xpt_action((union ccb *)&cpi);
5181 
5182 		inq_data = &device->inq_data;
5183 		if ((inq_data->flags & SID_Sync) == 0
5184 		 || (cpi.hba_inquiry & PI_SDTR_ABLE) == 0) {
5185 			/* Force async */
5186 			cts->sync_period = 0;
5187 			cts->sync_offset = 0;
5188 		}
5189 
5190 		switch (cts->bus_width) {
5191 		case MSG_EXT_WDTR_BUS_32_BIT:
5192 			if ((inq_data->flags & SID_WBus32) != 0
5193 			 && (cpi.hba_inquiry & PI_WIDE_32) != 0)
5194 				break;
5195 			/* Fall Through to 16-bit */
5196 		case MSG_EXT_WDTR_BUS_16_BIT:
5197 			if ((inq_data->flags & SID_WBus16) != 0
5198 			 && (cpi.hba_inquiry & PI_WIDE_16) != 0) {
5199 				cts->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
5200 				break;
5201 			}
5202 			/* Fall Through to 8-bit */
5203 		default: /* New bus width?? */
5204 		case MSG_EXT_WDTR_BUS_8_BIT:
5205 			/* All targets can do this */
5206 			cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
5207 			break;
5208 		}
5209 
5210 		if ((cts->flags & CCB_TRANS_DISC_ENB) == 0) {
5211 			/*
5212 			 * Can't tag queue without disconnection.
5213 			 */
5214 			cts->flags &= ~CCB_TRANS_TAG_ENB;
5215 		}
5216 
5217 		if ((cpi.hba_inquiry & PI_TAG_ABLE) == 0
5218 		 || (inq_data->flags & SID_CmdQue) == 0
5219 		 || (device->queue_flags & SCP_QUEUE_DQUE) != 0
5220 		 || (device->quirk->mintags == 0)) {
5221 			/*
5222 			 * Can't tag on hardware that doesn't support,
5223 			 * doesn't have it enabled, or has broken tag support.
5224 			 */
5225 			cts->flags &= ~CCB_TRANS_TAG_ENB;
5226 		}
5227 	}
5228 
5229 	/*
5230 	 * If we are transitioning from tags to no-tags or
5231 	 * vice-versa, we need to carefully freeze and restart
5232 	 * the queue so that we don't overlap tagged and non-tagged
5233 	 * commands.
5234 	 */
5235 	if ((cts->valid & CCB_TRANS_TQ_VALID) != 0
5236 	 && (((cts->flags & CCB_TRANS_TAG_ENB) != 0
5237 	   && (device->inq_flags & SID_CmdQue) == 0)
5238 	  || ((cts->flags & CCB_TRANS_TAG_ENB) == 0
5239 	   && (device->inq_flags & SID_CmdQue) != 0))) {
5240 		int newopenings;
5241 
5242 		xpt_freeze_devq(cts->ccb_h.path, /*count*/1);
5243 		qfrozen = TRUE;
5244 
5245 		if ((cts->flags & CCB_TRANS_TAG_ENB) != 0) {
5246 			newopenings = min(device->quirk->maxtags,
5247 					  sim->max_tagged_dev_openings);
5248 	  		device->inq_flags |= SID_CmdQue;
5249 		} else {
5250 			newopenings = sim->max_dev_openings;
5251 	  		device->inq_flags &= ~SID_CmdQue;
5252 		}
5253 		xpt_dev_ccbq_resize(cts->ccb_h.path, newopenings);
5254 	} else {
5255 		qfrozen = FALSE;
5256 	}
5257 
5258 	if (async_update == FALSE)
5259 		(*(sim->sim_action))(sim, (union ccb *)cts);
5260 
5261 	if (qfrozen) {
5262 		struct ccb_relsim crs;
5263 
5264 		xpt_setup_ccb(&crs.ccb_h, cts->ccb_h.path,
5265 			      /*priority*/1);
5266 		crs.ccb_h.func_code = XPT_REL_SIMQ;
5267 		crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY;
5268 		crs.openings
5269 		    = crs.release_timeout
5270 		    = crs.qfrozen_cnt
5271 		    = 0;
5272 		xpt_action((union ccb *)&crs);
5273 	}
5274 }
5275 
5276 static int busses_to_config;
5277 
5278 static int
5279 xptconfigbuscountfunc(struct cam_eb *bus, void *arg)
5280 {
5281 	if (bus->path_id != CAM_XPT_PATH_ID)
5282 		busses_to_config++;
5283 
5284 	return(1);
5285 }
5286 
5287 static int
5288 xptconfigfunc(struct cam_eb *bus, void *arg)
5289 {
5290 	struct	cam_path *path;
5291 	union	ccb *work_ccb;
5292 
5293 	if (bus->path_id != CAM_XPT_PATH_ID) {
5294 		cam_status status;
5295 
5296 		work_ccb = xpt_alloc_ccb();
5297 		if ((status = xpt_create_path(&path, xpt_periph, bus->path_id,
5298 					      CAM_TARGET_WILDCARD,
5299 					      CAM_LUN_WILDCARD)) !=CAM_REQ_CMP){
5300 			printf("xptconfigfunc: xpt_create_path failed with "
5301 			       "status %#x for bus %d\n", status, bus->path_id);
5302 			printf("xptconfigfunc: halting bus configuration\n");
5303 			xpt_free_ccb(work_ccb);
5304 			return(0);
5305 		}
5306 		xpt_setup_ccb(&work_ccb->ccb_h, path, /*priority*/1);
5307 		work_ccb->ccb_h.func_code = XPT_RESET_BUS;
5308 		work_ccb->ccb_h.cbfcnp = NULL;
5309 		CAM_DEBUG(path, CAM_DEBUG_SUBTRACE,
5310 			  ("Resetting Bus\n"));
5311 		xpt_action(work_ccb);
5312 		xpt_finishconfig(xpt_periph, work_ccb);
5313 	}
5314 
5315 	return(1);
5316 
5317 }
5318 
5319 static void
5320 xpt_config(void *arg)
5321 {
5322 	/* Now that interrupts are enabled, go find our devices */
5323 	struct cam_eb *bus;
5324 
5325 #ifdef CAMDEBUG
5326 	/* Setup debugging flags and path */
5327 #ifdef CAM_DEBUG_FLAGS
5328 	cam_dflags = CAM_DEBUG_FLAGS;
5329 #else /* !CAM_DEBUG_FLAGS */
5330 	cam_dflags = CAM_DEBUG_NONE;
5331 #endif /* CAM_DEBUG_FLAGS */
5332 #ifdef CAM_DEBUG_BUS
5333 	if (cam_dflags != CAM_DEBUG_NONE) {
5334 		if (xpt_create_path(&cam_dpath, xpt_periph,
5335 				    CAM_DEBUG_BUS, CAM_DEBUG_TARGET,
5336 				    CAM_DEBUG_LUN) != CAM_REQ_CMP) {
5337 			printf("xpt_config: xpt_create_path() failed for debug"
5338 			       " target %d:%d:%d, debugging disabled\n",
5339 			       CAM_DEBUG_BUS, CAM_DEBUG_TARGET, CAM_DEBUG_LUN);
5340 			cam_dflags = CAM_DEBUG_NONE;
5341 		}
5342 	} else
5343 		cam_dpath = NULL;
5344 #else /* !CAM_DEBUG_BUS */
5345 	cam_dpath = NULL;
5346 #endif /* CAM_DEBUG_BUS */
5347 #endif /* CAMDEBUG */
5348 
5349 	/* Scan all installed busses */
5350 	xpt_for_all_busses(xptconfigbuscountfunc, NULL);
5351 
5352 	xpt_for_all_busses(xptconfigfunc, NULL);
5353 
5354 	/* Call xpt_finishconfig once in case we dodn't have any busses */
5355 	xpt_finishconfig(xpt_periph, NULL);
5356 }
5357 
5358 static int
5359 xptfinishconfigfunc(struct cam_ed *device, void *arg)
5360 {
5361 	union ccb *done_ccb;
5362 	cam_status status;
5363 
5364 	done_ccb = (union ccb *)arg;
5365 
5366 	if ((status = xpt_create_path(&done_ccb->ccb_h.path,
5367 				      xpt_periph, device->target->bus->path_id,
5368 				      device->target->target_id,
5369 				      device->lun_id)) != CAM_REQ_CMP) {
5370 		printf("xptfinishconfig: xpt_create_path failed with status"
5371 		       " %#x, halting bus configuration\n", status);
5372 		return(0);
5373 	}
5374 
5375 	xpt_setup_ccb(&done_ccb->ccb_h,
5376 		      done_ccb->ccb_h.path,
5377 		      /*priority*/1);
5378 
5379 	done_ccb->ccb_h.func_code = XPT_GDEV_TYPE;
5380 	xpt_action(done_ccb);
5381 	xpt_async(AC_FOUND_DEVICE, done_ccb->ccb_h.path, done_ccb);
5382 
5383 	return(1);
5384 }
5385 
5386 /*
5387  * If the given device only has one peripheral attached to it, and if that
5388  * peripheral is the passthrough driver, announce it.  This insures that the
5389  * user sees some sort of announcement for every peripheral in their system.
5390  */
5391 static int
5392 xptpassannouncefunc(struct cam_ed *device, void *arg)
5393 {
5394 	struct cam_periph *periph;
5395 	int i;
5396 
5397 	for (periph = SLIST_FIRST(&device->periphs), i = 0; periph != NULL;
5398 	     periph = SLIST_NEXT(periph, periph_links), i++);
5399 
5400 	periph = SLIST_FIRST(&device->periphs);
5401 	if ((i == 1)
5402 	 && (strncmp(periph->periph_name, "pass", 4) == 0))
5403 		xpt_announce_periph(periph, NULL);
5404 
5405 	return(1);
5406 }
5407 
5408 static void
5409 xpt_finishconfig(struct cam_periph *periph, union ccb *done_ccb)
5410 {
5411 	struct	periph_driver **p_drv;
5412 	struct	cam_eb *bus;
5413 	struct	cam_et *target;
5414 	struct	cam_ed *dev;
5415 	struct	cam_periph  *nperiph;
5416 	struct	periph_list *periph_head;
5417 	int	i;
5418 
5419 	if (done_ccb != NULL) {
5420 		CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE,
5421 			  ("xpt_finishconfig\n"));
5422 		switch(done_ccb->ccb_h.func_code) {
5423 		case XPT_RESET_BUS:
5424 			if (done_ccb->ccb_h.status == CAM_REQ_CMP) {
5425 				done_ccb->ccb_h.func_code = XPT_SCAN_BUS;
5426 				done_ccb->ccb_h.cbfcnp = xpt_finishconfig;
5427 				xpt_action(done_ccb);
5428 				return;
5429 			}
5430 			/* FALLTHROUGH */
5431 		case XPT_SCAN_BUS:
5432 			xpt_free_path(done_ccb->ccb_h.path);
5433 			busses_to_config--;
5434 			break;
5435 		default:
5436 			break;
5437 		}
5438 	}
5439 
5440 	if (busses_to_config == 0) {
5441 		/* Register all the peripheral drivers */
5442 		/* XXX This will have to change when we have LKMs */
5443 		p_drv = (struct periph_driver **)periphdriver_set.ls_items;
5444 		for (i = 0; p_drv[i] != NULL; i++) {
5445 			(*p_drv[i]->init)();
5446 		}
5447 
5448 		/*
5449 		 * Itterate through our devices announcing
5450 		 * them in probed bus order.
5451 		 */
5452 		xpt_for_all_devices(xptfinishconfigfunc, done_ccb);
5453 
5454 		/*
5455 		 * Check for devices with no "standard" peripheral driver
5456 		 * attached.  For any devices like that, announce the
5457 		 * passthrough driver so the user will see something.
5458 		 */
5459 		xpt_for_all_devices(xptpassannouncefunc, NULL);
5460 
5461 		/* Release our hook so that the boot can continue. */
5462 		config_intrhook_disestablish(xpt_config_hook);
5463 	}
5464 	if (done_ccb != NULL)
5465 		xpt_free_ccb(done_ccb);
5466 }
5467 
5468 static void
5469 xptaction(struct cam_sim *sim, union ccb *work_ccb)
5470 {
5471 	CAM_DEBUG(work_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xptaction\n"));
5472 
5473 	switch (work_ccb->ccb_h.func_code) {
5474 	/* Common cases first */
5475 	case XPT_PATH_INQ:		/* Path routing inquiry */
5476 	{
5477 		struct ccb_pathinq *cpi;
5478 
5479 		cpi = &work_ccb->cpi;
5480 		cpi->version_num = 1; /* XXX??? */
5481 		cpi->hba_inquiry = 0;
5482 		cpi->target_sprt = 0;
5483 		cpi->hba_misc = 0;
5484 		cpi->hba_eng_cnt = 0;
5485 		cpi->max_target = 0;
5486 		cpi->max_lun = 0;
5487 		cpi->initiator_id = 0;
5488 		strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
5489 		strncpy(cpi->hba_vid, "", HBA_IDLEN);
5490 		strncpy(cpi->dev_name, sim->sim_name, DEV_IDLEN);
5491 		cpi->unit_number = sim->unit_number;
5492 		cpi->bus_id = sim->bus_id;
5493 		cpi->ccb_h.status = CAM_REQ_CMP;
5494 		xpt_done(work_ccb);
5495 		break;
5496 	}
5497 	default:
5498 		work_ccb->ccb_h.status = CAM_REQ_INVALID;
5499 		xpt_done(work_ccb);
5500 		break;
5501 	}
5502 }
5503 
5504 /*
5505  * Should only be called by the machine interrupt dispatch routines,
5506  * so put these prototypes here instead of in the header.
5507  *
5508  * XXX we should really have a way to dynamically register SWI handlers.
5509  */
5510 
5511 void
5512 swi_camnet()
5513 {
5514 	camisr(&cam_netq);
5515 }
5516 
5517 void
5518 swi_cambio()
5519 {
5520 	camisr(&cam_bioq);
5521 }
5522 
5523 static void
5524 camisr(cam_isrq_t *queue)
5525 {
5526 	int	s;
5527 	struct	ccb_hdr *ccb_h;
5528 
5529 	s = splcam();
5530 	while ((ccb_h = TAILQ_FIRST(queue)) != NULL) {
5531 		int	runq;
5532 
5533 		TAILQ_REMOVE(queue, ccb_h, sim_links.tqe);
5534 		ccb_h->pinfo.index = CAM_UNQUEUED_INDEX;
5535 		splx(s);
5536 
5537 		CAM_DEBUG(ccb_h->path, CAM_DEBUG_TRACE,
5538 			  ("camisr"));
5539 
5540 		runq = FALSE;
5541 
5542 		if (ccb_h->flags & CAM_HIGH_POWER) {
5543 			struct highpowerlist	*hphead;
5544 			struct cam_ed		*device;
5545 			union ccb		*send_ccb;
5546 
5547 			hphead = &highpowerq;
5548 
5549 			send_ccb = (union ccb *)STAILQ_FIRST(hphead);
5550 
5551 			/*
5552 			 * Increment the count since this command is done.
5553 			 */
5554 			num_highpower++;
5555 
5556 			/*
5557 			 * Any high powered commands queued up?
5558 			 */
5559 			if (send_ccb != NULL) {
5560 				device = send_ccb->ccb_h.path->device;
5561 
5562 				STAILQ_REMOVE_HEAD(hphead, xpt_links.stqe);
5563 
5564 				xpt_release_devq(send_ccb->ccb_h.path->device,
5565 						 TRUE);
5566 			}
5567 		}
5568 		if ((ccb_h->func_code != XPT_ACCEPT_TARGET_IO)
5569 		 && (ccb_h->func_code != XPT_SCAN_LUN)
5570 		 && (ccb_h->func_code != XPT_SCAN_BUS)) {
5571 			struct cam_ed *dev;
5572 
5573 			dev = ccb_h->path->device;
5574 
5575 			s = splcam();
5576 			cam_ccbq_ccb_done(&dev->ccbq, (union ccb *)ccb_h);
5577 
5578 			ccb_h->path->bus->sim->devq->send_active--;
5579 			ccb_h->path->bus->sim->devq->send_openings++;
5580 			splx(s);
5581 
5582 			if ((dev->flags & CAM_DEV_REL_ON_COMPLETE) != 0
5583 			 || ((dev->flags & CAM_DEV_REL_ON_QUEUE_EMPTY) != 0
5584 			  && (dev->ccbq.dev_active == 0))) {
5585 
5586 				xpt_release_devq(ccb_h->path->device,
5587 						 /*run_queue*/TRUE);
5588 			}
5589 
5590 			if ((dev->ccbq.queue.entries > 0)
5591 			 && (dev->qfrozen_cnt == 0)
5592 			 && (device_is_send_queued(dev) == 0)) {
5593 				runq = xpt_schedule_dev_sendq(ccb_h->path->bus,
5594 							      dev);
5595 			}
5596 		}
5597 
5598 		if (ccb_h->status & CAM_RELEASE_SIMQ) {
5599 			xpt_release_simq(ccb_h->path->bus->sim,
5600 					 /*run_queue*/TRUE);
5601 		} else if ((ccb_h->flags & CAM_DEV_QFRZDIS)
5602 			&& (ccb_h->status & CAM_DEV_QFRZN)) {
5603 			xpt_release_devq(ccb_h->path->device,
5604 					 /*run_queue*/TRUE);
5605 			ccb_h->status &= ~CAM_DEV_QFRZN;
5606 		} else if (runq) {
5607 			xpt_run_dev_sendq(ccb_h->path->bus);
5608 		}
5609 
5610 		/* Call the peripheral driver's callback */
5611 		(*ccb_h->cbfcnp)(ccb_h->path->periph,
5612 				 (union ccb *)ccb_h);
5613 
5614 		/* Raise IPL for while test */
5615 		s = splcam();
5616 	}
5617 	splx(s);
5618 }
5619