xref: /freebsd/sys/cam/cam_xpt.c (revision b601c69bdbe8755d26570261d7fd4c02ee4eff74)
1 /*
2  * Implementation of the Common Access Method Transport (XPT) layer.
3  *
4  * Copyright (c) 1997, 1998, 1999 Justin T. Gibbs.
5  * Copyright (c) 1997, 1998, 1999 Kenneth D. Merry.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions, and the following disclaimer,
13  *    without modification, immediately at the beginning of the file.
14  * 2. The name of the author may not be used to endorse or promote products
15  *    derived from this software without specific prior written permission.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
21  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  *
29  * $FreeBSD$
30  */
31 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/types.h>
34 #include <sys/malloc.h>
35 #include <sys/kernel.h>
36 #include <sys/time.h>
37 #include <sys/conf.h>
38 #include <sys/fcntl.h>
39 #include <sys/md5.h>
40 #include <sys/devicestat.h>
41 #include <sys/interrupt.h>
42 #include <sys/bus.h>
43 
44 #ifdef PC98
45 #include <pc98/pc98/pc98_machdep.h>	/* geometry translation */
46 #endif
47 
48 #include <machine/clock.h>
49 #include <machine/ipl.h>
50 
51 #include <cam/cam.h>
52 #include <cam/cam_ccb.h>
53 #include <cam/cam_periph.h>
54 #include <cam/cam_sim.h>
55 #include <cam/cam_xpt.h>
56 #include <cam/cam_xpt_sim.h>
57 #include <cam/cam_xpt_periph.h>
58 #include <cam/cam_debug.h>
59 
60 #include <cam/scsi/scsi_all.h>
61 #include <cam/scsi/scsi_message.h>
62 #include <cam/scsi/scsi_pass.h>
63 #include "opt_cam.h"
64 
65 /* Datastructures internal to the xpt layer */
66 
67 /*
68  * Definition of an async handler callback block.  These are used to add
69  * SIMs and peripherals to the async callback lists.
70  */
71 struct async_node {
72 	SLIST_ENTRY(async_node)	links;
73 	u_int32_t	event_enable;	/* Async Event enables */
74 	void		(*callback)(void *arg, u_int32_t code,
75 				    struct cam_path *path, void *args);
76 	void		*callback_arg;
77 };
78 
79 SLIST_HEAD(async_list, async_node);
80 SLIST_HEAD(periph_list, cam_periph);
81 static STAILQ_HEAD(highpowerlist, ccb_hdr) highpowerq;
82 
83 /*
84  * This is the maximum number of high powered commands (e.g. start unit)
85  * that can be outstanding at a particular time.
86  */
87 #ifndef CAM_MAX_HIGHPOWER
88 #define CAM_MAX_HIGHPOWER  4
89 #endif
90 
91 /* number of high powered commands that can go through right now */
92 static int num_highpower = CAM_MAX_HIGHPOWER;
93 
94 /*
95  * Structure for queueing a device in a run queue.
96  * There is one run queue for allocating new ccbs,
97  * and another for sending ccbs to the controller.
98  */
99 struct cam_ed_qinfo {
100 	cam_pinfo pinfo;
101 	struct	  cam_ed *device;
102 };
103 
104 /*
105  * The CAM EDT (Existing Device Table) contains the device information for
106  * all devices for all busses in the system.  The table contains a
107  * cam_ed structure for each device on the bus.
108  */
109 struct cam_ed {
110 	TAILQ_ENTRY(cam_ed) links;
111 	struct	cam_ed_qinfo alloc_ccb_entry;
112 	struct	cam_ed_qinfo send_ccb_entry;
113 	struct	cam_et	 *target;
114 	lun_id_t	 lun_id;
115 	struct	camq drvq;		/*
116 					 * Queue of type drivers wanting to do
117 					 * work on this device.
118 					 */
119 	struct	cam_ccbq ccbq;		/* Queue of pending ccbs */
120 	struct	async_list asyncs;	/* Async callback info for this B/T/L */
121 	struct	periph_list periphs;	/* All attached devices */
122 	u_int	generation;		/* Generation number */
123 	struct	cam_periph *owner;	/* Peripheral driver's ownership tag */
124 	struct	xpt_quirk_entry *quirk;	/* Oddities about this device */
125 					/* Storage for the inquiry data */
126 	struct	scsi_inquiry_data inq_data;
127 	u_int8_t	 inq_flags;	/*
128 					 * Current settings for inquiry flags.
129 					 * This allows us to override settings
130 					 * like disconnection and tagged
131 					 * queuing for a device.
132 					 */
133 	u_int8_t	 queue_flags;	/* Queue flags from the control page */
134 	u_int8_t	 serial_num_len;
135 	u_int8_t	 *serial_num;
136 	u_int32_t	 qfrozen_cnt;
137 	u_int32_t	 flags;
138 #define CAM_DEV_UNCONFIGURED	 	0x01
139 #define CAM_DEV_REL_TIMEOUT_PENDING	0x02
140 #define CAM_DEV_REL_ON_COMPLETE		0x04
141 #define CAM_DEV_REL_ON_QUEUE_EMPTY	0x08
142 #define CAM_DEV_RESIZE_QUEUE_NEEDED	0x10
143 #define CAM_DEV_TAG_AFTER_COUNT		0x20
144 #define CAM_DEV_INQUIRY_DATA_VALID	0x40
145 	u_int32_t	 tag_delay_count;
146 #define	CAM_TAG_DELAY_COUNT		5
147 	u_int32_t	 refcount;
148 	struct		 callout_handle c_handle;
149 };
150 
151 /*
152  * Each target is represented by an ET (Existing Target).  These
153  * entries are created when a target is successfully probed with an
154  * identify, and removed when a device fails to respond after a number
155  * of retries, or a bus rescan finds the device missing.
156  */
157 struct cam_et {
158 	TAILQ_HEAD(, cam_ed) ed_entries;
159 	TAILQ_ENTRY(cam_et) links;
160 	struct	cam_eb	*bus;
161 	target_id_t	target_id;
162 	u_int32_t	refcount;
163 	u_int		generation;
164 	struct		timeval last_reset;
165 };
166 
167 /*
168  * Each bus is represented by an EB (Existing Bus).  These entries
169  * are created by calls to xpt_bus_register and deleted by calls to
170  * xpt_bus_deregister.
171  */
172 struct cam_eb {
173 	TAILQ_HEAD(, cam_et) et_entries;
174 	TAILQ_ENTRY(cam_eb)  links;
175 	path_id_t	     path_id;
176 	struct cam_sim	     *sim;
177 	struct timeval	     last_reset;
178 	u_int32_t	     flags;
179 #define	CAM_EB_RUNQ_SCHEDULED	0x01
180 	u_int32_t	     refcount;
181 	u_int		     generation;
182 };
183 
184 struct cam_path {
185 	struct cam_periph *periph;
186 	struct cam_eb	  *bus;
187 	struct cam_et	  *target;
188 	struct cam_ed	  *device;
189 };
190 
191 struct xpt_quirk_entry {
192 	struct scsi_inquiry_pattern inq_pat;
193 	u_int8_t quirks;
194 #define	CAM_QUIRK_NOLUNS	0x01
195 #define	CAM_QUIRK_NOSERIAL	0x02
196 #define	CAM_QUIRK_HILUNS	0x04
197 	u_int mintags;
198 	u_int maxtags;
199 };
200 #define	CAM_SCSI2_MAXLUN	8
201 
202 typedef enum {
203 	XPT_FLAG_OPEN		= 0x01
204 } xpt_flags;
205 
206 struct xpt_softc {
207 	xpt_flags	flags;
208 	u_int32_t	generation;
209 };
210 
211 static const char quantum[] = "QUANTUM";
212 static const char sony[] = "SONY";
213 static const char west_digital[] = "WDIGTL";
214 static const char samsung[] = "SAMSUNG";
215 static const char seagate[] = "SEAGATE";
216 static const char microp[] = "MICROP";
217 
218 static struct xpt_quirk_entry xpt_quirk_table[] =
219 {
220 	{
221 		/* Reports QUEUE FULL for temporary resource shortages */
222 		{ T_DIRECT, SIP_MEDIA_FIXED, quantum, "XP39100*", "*" },
223 		/*quirks*/0, /*mintags*/24, /*maxtags*/32
224 	},
225 	{
226 		/* Reports QUEUE FULL for temporary resource shortages */
227 		{ T_DIRECT, SIP_MEDIA_FIXED, quantum, "XP34550*", "*" },
228 		/*quirks*/0, /*mintags*/24, /*maxtags*/32
229 	},
230 	{
231 		/* Reports QUEUE FULL for temporary resource shortages */
232 		{ T_DIRECT, SIP_MEDIA_FIXED, quantum, "XP32275*", "*" },
233 		/*quirks*/0, /*mintags*/24, /*maxtags*/32
234 	},
235 	{
236 		/* Broken tagged queuing drive */
237 		{ T_DIRECT, SIP_MEDIA_FIXED, microp, "4421-07*", "*" },
238 		/*quirks*/0, /*mintags*/0, /*maxtags*/0
239 	},
240 	{
241 		/* Broken tagged queuing drive */
242 		{ T_DIRECT, SIP_MEDIA_FIXED, "HP", "C372*", "*" },
243 		/*quirks*/0, /*mintags*/0, /*maxtags*/0
244 	},
245 	{
246 		/* Broken tagged queuing drive */
247 		{ T_DIRECT, SIP_MEDIA_FIXED, microp, "3391*", "x43h" },
248 		/*quirks*/0, /*mintags*/0, /*maxtags*/0
249 	},
250 	{
251 		/*
252 		 * Unfortunately, the Quantum Atlas III has the same
253 		 * problem as the Atlas II drives above.
254 		 * Reported by: "Johan Granlund" <johan@granlund.nu>
255 		 *
256 		 * For future reference, the drive with the problem was:
257 		 * QUANTUM QM39100TD-SW N1B0
258 		 *
259 		 * It's possible that Quantum will fix the problem in later
260 		 * firmware revisions.  If that happens, the quirk entry
261 		 * will need to be made specific to the firmware revisions
262 		 * with the problem.
263 		 *
264 		 */
265 		/* Reports QUEUE FULL for temporary resource shortages */
266 		{ T_DIRECT, SIP_MEDIA_FIXED, quantum, "QM39100*", "*" },
267 		/*quirks*/0, /*mintags*/24, /*maxtags*/32
268 	},
269 	{
270 		/*
271 		 * 18 Gig Atlas III, same problem as the 9G version.
272 		 * Reported by: Andre Albsmeier
273 		 *		<andre.albsmeier@mchp.siemens.de>
274 		 *
275 		 * For future reference, the drive with the problem was:
276 		 * QUANTUM QM318000TD-S N491
277 		 */
278 		/* Reports QUEUE FULL for temporary resource shortages */
279 		{ T_DIRECT, SIP_MEDIA_FIXED, quantum, "QM318000*", "*" },
280 		/*quirks*/0, /*mintags*/24, /*maxtags*/32
281 	},
282 	{
283 		/*
284 		 * Broken tagged queuing drive
285 		 * Reported by: Bret Ford <bford@uop.cs.uop.edu>
286 		 *         and: Martin Renters <martin@tdc.on.ca>
287 		 */
288 		{ T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST410800*", "71*" },
289 		/*quirks*/0, /*mintags*/0, /*maxtags*/0
290 	},
291 		/*
292 		 * The Seagate Medalist Pro drives have very poor write
293 		 * performance with anything more than 2 tags.
294 		 *
295 		 * Reported by:  Paul van der Zwan <paulz@trantor.xs4all.nl>
296 		 * Drive:  <SEAGATE ST36530N 1444>
297 		 *
298 		 * Reported by:  Jeremy Lea <reg@shale.csir.co.za>
299 		 * Drive:  <SEAGATE ST34520W 1281>
300 		 *
301 		 * No one has actually reported that the 9G version
302 		 * (ST39140*) of the Medalist Pro has the same problem, but
303 		 * we're assuming that it does because the 4G and 6.5G
304 		 * versions of the drive are broken.
305 		 */
306 	{
307 		{ T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST34520*", "*"},
308 		/*quirks*/0, /*mintags*/2, /*maxtags*/2
309 	},
310 	{
311 		{ T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST36530*", "*"},
312 		/*quirks*/0, /*mintags*/2, /*maxtags*/2
313 	},
314 	{
315 		{ T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST39140*", "*"},
316 		/*quirks*/0, /*mintags*/2, /*maxtags*/2
317 	},
318 	{
319 		/*
320 		 * Slow when tagged queueing is enabled.  Write performance
321 		 * steadily drops off with more and more concurrent
322 		 * transactions.  Best sequential write performance with
323 		 * tagged queueing turned off and write caching turned on.
324 		 *
325 		 * PR:  kern/10398
326 		 * Submitted by:  Hideaki Okada <hokada@isl.melco.co.jp>
327 		 * Drive:  DCAS-34330 w/ "S65A" firmware.
328 		 *
329 		 * The drive with the problem had the "S65A" firmware
330 		 * revision, and has also been reported (by Stephen J.
331 		 * Roznowski <sjr@home.net>) for a drive with the "S61A"
332 		 * firmware revision.
333 		 *
334 		 * Although no one has reported problems with the 2 gig
335 		 * version of the DCAS drive, the assumption is that it
336 		 * has the same problems as the 4 gig version.  Therefore
337 		 * this quirk entries disables tagged queueing for all
338 		 * DCAS drives.
339 		 */
340 		{ T_DIRECT, SIP_MEDIA_FIXED, "IBM", "DCAS*", "*" },
341 		/*quirks*/0, /*mintags*/0, /*maxtags*/0
342 	},
343 	{
344 		/* Broken tagged queuing drive */
345 		{ T_DIRECT, SIP_MEDIA_REMOVABLE, "iomega", "jaz*", "*" },
346 		/*quirks*/0, /*mintags*/0, /*maxtags*/0
347 	},
348 	{
349 		/* Broken tagged queuing drive */
350 		{ T_DIRECT, SIP_MEDIA_FIXED, "CONNER", "CFP2107*", "*" },
351 		/*quirks*/0, /*mintags*/0, /*maxtags*/0
352 	},
353 	{
354 		/*
355 		 * Broken tagged queuing drive.
356 		 * Submitted by:
357 		 * NAKAJI Hiroyuki <nakaji@zeisei.dpri.kyoto-u.ac.jp>
358 		 * in PR kern/9535
359 		 */
360 		{ T_DIRECT, SIP_MEDIA_FIXED, samsung, "WN34324U*", "*" },
361 		/*quirks*/0, /*mintags*/0, /*maxtags*/0
362 	},
363         {
364 		/*
365 		 * Slow when tagged queueing is enabled. (1.5MB/sec versus
366 		 * 8MB/sec.)
367 		 * Submitted by: Andrew Gallatin <gallatin@cs.duke.edu>
368 		 * Best performance with these drives is achieved with
369 		 * tagged queueing turned off, and write caching turned on.
370 		 */
371 		{ T_DIRECT, SIP_MEDIA_FIXED, west_digital, "WDE*", "*" },
372 		/*quirks*/0, /*mintags*/0, /*maxtags*/0
373         },
374         {
375 		/*
376 		 * Slow when tagged queueing is enabled. (1.5MB/sec versus
377 		 * 8MB/sec.)
378 		 * Submitted by: Andrew Gallatin <gallatin@cs.duke.edu>
379 		 * Best performance with these drives is achieved with
380 		 * tagged queueing turned off, and write caching turned on.
381 		 */
382 		{ T_DIRECT, SIP_MEDIA_FIXED, west_digital, "ENTERPRISE", "*" },
383 		/*quirks*/0, /*mintags*/0, /*maxtags*/0
384         },
385 	{
386 		/*
387 		 * Doesn't handle queue full condition correctly,
388 		 * so we need to limit maxtags to what the device
389 		 * can handle instead of determining this automatically.
390 		 */
391 		{ T_DIRECT, SIP_MEDIA_FIXED, samsung, "WN321010S*", "*" },
392 		/*quirks*/0, /*mintags*/2, /*maxtags*/32
393 	},
394 	{
395 		/* Really only one LUN */
396 		{ T_ENCLOSURE, SIP_MEDIA_FIXED, "SUN", "SENA*", "*" },
397 		CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
398 	},
399 	{
400 		/* I can't believe we need a quirk for DPT volumes. */
401 		{ T_ANY, SIP_MEDIA_FIXED|SIP_MEDIA_REMOVABLE, "DPT", "*", "*" },
402 		CAM_QUIRK_NOSERIAL|CAM_QUIRK_NOLUNS,
403 		/*mintags*/0, /*maxtags*/255
404 	},
405 	{
406 		/*
407 		 * Many Sony CDROM drives don't like multi-LUN probing.
408 		 */
409 		{ T_CDROM, SIP_MEDIA_REMOVABLE, sony, "CD-ROM CDU*", "*" },
410 		CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
411 	},
412 	{
413 		/*
414 		 * This drive doesn't like multiple LUN probing.
415 		 * Submitted by:  Parag Patel <parag@cgt.com>
416 		 */
417 		{ T_WORM, SIP_MEDIA_REMOVABLE, sony, "CD-R   CDU9*", "*" },
418 		CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
419 	},
420 	{
421 		/*
422 		 * The 8200 doesn't like multi-lun probing, and probably
423 		 * don't like serial number requests either.
424 		 */
425 		{
426 			T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "EXABYTE",
427 			"EXB-8200*", "*"
428 		},
429 		CAM_QUIRK_NOSERIAL|CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
430 	},
431 	{
432 		/*
433 		 * These Hitachi drives don't like multi-lun probing.
434 		 * The PR submitter has a DK319H, but says that the Linux
435 		 * kernel has a similar work-around for the DK312 and DK314,
436 		 * so all DK31* drives are quirked here.
437 		 * PR:            misc/18793
438 		 * Submitted by:  Paul Haddad <paul@pth.com>
439 		 */
440 		{ T_DIRECT, SIP_MEDIA_FIXED, "HITACHI", "DK31*", "*" },
441 		CAM_QUIRK_NOLUNS, /*mintags*/2, /*maxtags*/255
442 	},
443 	{
444 		/*
445 		 * This old revision of the TDC3600 is also SCSI-1, and
446 		 * hangs upon serial number probing.
447 		 */
448 		{
449 			T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "TANDBERG",
450 			" TDC 3600", "U07:"
451 		},
452 		CAM_QUIRK_NOSERIAL, /*mintags*/0, /*maxtags*/0
453 	},
454 	{
455 		/*
456 		 * Would repond to all LUNs if asked for.
457 		 */
458 		{
459 			T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "CALIPER",
460 			"CP150", "*"
461 		},
462 		CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
463 	},
464 	{
465 		/*
466 		 * Would repond to all LUNs if asked for.
467 		 */
468 		{
469 			T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "KENNEDY",
470 			"96X2*", "*"
471 		},
472 		CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
473 	},
474 	{
475 		/* Submitted by: Matthew Dodd <winter@jurai.net> */
476 		{ T_PROCESSOR, SIP_MEDIA_FIXED, "Cabletrn", "EA41*", "*" },
477 		CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
478 	},
479 	{
480 		/* Submitted by: Matthew Dodd <winter@jurai.net> */
481 		{ T_PROCESSOR, SIP_MEDIA_FIXED, "CABLETRN", "EA41*", "*" },
482 		CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
483 	},
484 	{
485 		/* TeraSolutions special settings for TRC-22 RAID */
486 		{ T_DIRECT, SIP_MEDIA_FIXED, "TERASOLU", "TRC-22", "*" },
487 		  /*quirks*/0, /*mintags*/55, /*maxtags*/255
488 	},
489 	{
490 		/* Default tagged queuing parameters for all devices */
491 		{
492 		  T_ANY, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED,
493 		  /*vendor*/"*", /*product*/"*", /*revision*/"*"
494 		},
495 		/*quirks*/0, /*mintags*/2, /*maxtags*/255
496 	},
497 };
498 
499 static const int xpt_quirk_table_size =
500 	sizeof(xpt_quirk_table) / sizeof(*xpt_quirk_table);
501 
502 typedef enum {
503 	DM_RET_COPY		= 0x01,
504 	DM_RET_FLAG_MASK	= 0x0f,
505 	DM_RET_NONE		= 0x00,
506 	DM_RET_STOP		= 0x10,
507 	DM_RET_DESCEND		= 0x20,
508 	DM_RET_ERROR		= 0x30,
509 	DM_RET_ACTION_MASK	= 0xf0
510 } dev_match_ret;
511 
512 typedef enum {
513 	XPT_DEPTH_BUS,
514 	XPT_DEPTH_TARGET,
515 	XPT_DEPTH_DEVICE,
516 	XPT_DEPTH_PERIPH
517 } xpt_traverse_depth;
518 
519 struct xpt_traverse_config {
520 	xpt_traverse_depth	depth;
521 	void			*tr_func;
522 	void			*tr_arg;
523 };
524 
525 typedef	int	xpt_busfunc_t (struct cam_eb *bus, void *arg);
526 typedef	int	xpt_targetfunc_t (struct cam_et *target, void *arg);
527 typedef	int	xpt_devicefunc_t (struct cam_ed *device, void *arg);
528 typedef	int	xpt_periphfunc_t (struct cam_periph *periph, void *arg);
529 typedef int	xpt_pdrvfunc_t (struct periph_driver **pdrv, void *arg);
530 
531 /* Transport layer configuration information */
532 static struct xpt_softc xsoftc;
533 
534 /* Queues for our software interrupt handler */
535 typedef TAILQ_HEAD(cam_isrq, ccb_hdr) cam_isrq_t;
536 static cam_isrq_t cam_bioq;
537 static cam_isrq_t cam_netq;
538 
539 /* "Pool" of inactive ccbs managed by xpt_alloc_ccb and xpt_free_ccb */
540 static SLIST_HEAD(,ccb_hdr) ccb_freeq;
541 static u_int xpt_max_ccbs;	/*
542 				 * Maximum size of ccb pool.  Modified as
543 				 * devices are added/removed or have their
544 				 * opening counts changed.
545 				 */
546 static u_int xpt_ccb_count;	/* Current count of allocated ccbs */
547 
548 struct cam_periph *xpt_periph;
549 
550 static periph_init_t xpt_periph_init;
551 
552 static periph_init_t probe_periph_init;
553 
554 static struct periph_driver xpt_driver =
555 {
556 	xpt_periph_init, "xpt",
557 	TAILQ_HEAD_INITIALIZER(xpt_driver.units)
558 };
559 
560 static struct periph_driver probe_driver =
561 {
562 	probe_periph_init, "probe",
563 	TAILQ_HEAD_INITIALIZER(probe_driver.units)
564 };
565 
566 DATA_SET(periphdriver_set, xpt_driver);
567 DATA_SET(periphdriver_set, probe_driver);
568 
569 #define XPT_CDEV_MAJOR 104
570 
571 static d_open_t xptopen;
572 static d_close_t xptclose;
573 static d_ioctl_t xptioctl;
574 
575 static struct cdevsw xpt_cdevsw = {
576 	/* open */	xptopen,
577 	/* close */	xptclose,
578 	/* read */	noread,
579 	/* write */	nowrite,
580 	/* ioctl */	xptioctl,
581 	/* poll */	nopoll,
582 	/* mmap */	nommap,
583 	/* strategy */	nostrategy,
584 	/* name */	"xpt",
585 	/* maj */	XPT_CDEV_MAJOR,
586 	/* dump */	nodump,
587 	/* psize */	nopsize,
588 	/* flags */	0,
589 	/* bmaj */	-1
590 };
591 
592 static struct intr_config_hook *xpt_config_hook;
593 
594 /* Registered busses */
595 static TAILQ_HEAD(,cam_eb) xpt_busses;
596 static u_int bus_generation;
597 
598 /* Storage for debugging datastructures */
599 #ifdef	CAMDEBUG
600 struct cam_path *cam_dpath;
601 u_int32_t cam_dflags;
602 u_int32_t cam_debug_delay;
603 #endif
604 
605 #if defined(CAM_DEBUG_FLAGS) && !defined(CAMDEBUG)
606 #error "You must have options CAMDEBUG to use options CAM_DEBUG_FLAGS"
607 #endif
608 
609 /*
610  * In order to enable the CAM_DEBUG_* options, the user must have CAMDEBUG
611  * enabled.  Also, the user must have either none, or all of CAM_DEBUG_BUS,
612  * CAM_DEBUG_TARGET, and CAM_DEBUG_LUN specified.
613  */
614 #if defined(CAM_DEBUG_BUS) || defined(CAM_DEBUG_TARGET) \
615     || defined(CAM_DEBUG_LUN)
616 #ifdef CAMDEBUG
617 #if !defined(CAM_DEBUG_BUS) || !defined(CAM_DEBUG_TARGET) \
618     || !defined(CAM_DEBUG_LUN)
619 #error "You must define all or none of CAM_DEBUG_BUS, CAM_DEBUG_TARGET \
620         and CAM_DEBUG_LUN"
621 #endif /* !CAM_DEBUG_BUS || !CAM_DEBUG_TARGET || !CAM_DEBUG_LUN */
622 #else /* !CAMDEBUG */
623 #error "You must use options CAMDEBUG if you use the CAM_DEBUG_* options"
624 #endif /* CAMDEBUG */
625 #endif /* CAM_DEBUG_BUS || CAM_DEBUG_TARGET || CAM_DEBUG_LUN */
626 
627 /* Our boot-time initialization hook */
628 static void	xpt_init(void *);
629 SYSINIT(cam, SI_SUB_CONFIGURE, SI_ORDER_SECOND, xpt_init, NULL);
630 
631 static cam_status	xpt_compile_path(struct cam_path *new_path,
632 					 struct cam_periph *perph,
633 					 path_id_t path_id,
634 					 target_id_t target_id,
635 					 lun_id_t lun_id);
636 
637 static void		xpt_release_path(struct cam_path *path);
638 
639 static void		xpt_async_bcast(struct async_list *async_head,
640 					u_int32_t async_code,
641 					struct cam_path *path,
642 					void *async_arg);
643 static path_id_t xptnextfreepathid(void);
644 static path_id_t xptpathid(const char *sim_name, int sim_unit, int sim_bus);
645 static union ccb *xpt_get_ccb(struct cam_ed *device);
646 static int	 xpt_schedule_dev(struct camq *queue, cam_pinfo *dev_pinfo,
647 				  u_int32_t new_priority);
648 static void	 xpt_run_dev_allocq(struct cam_eb *bus);
649 static void	 xpt_run_dev_sendq(struct cam_eb *bus);
650 static timeout_t xpt_release_devq_timeout;
651 static timeout_t xpt_release_simq_timeout;
652 static void	 xpt_release_bus(struct cam_eb *bus);
653 static void	 xpt_release_devq_device(struct cam_ed *dev, u_int count,
654 					 int run_queue);
655 static struct cam_et*
656 		 xpt_alloc_target(struct cam_eb *bus, target_id_t target_id);
657 static void	 xpt_release_target(struct cam_eb *bus, struct cam_et *target);
658 static struct cam_ed*
659 		 xpt_alloc_device(struct cam_eb *bus, struct cam_et *target,
660 				  lun_id_t lun_id);
661 static void	 xpt_release_device(struct cam_eb *bus, struct cam_et *target,
662 				    struct cam_ed *device);
663 static u_int32_t xpt_dev_ccbq_resize(struct cam_path *path, int newopenings);
664 static struct cam_eb*
665 		 xpt_find_bus(path_id_t path_id);
666 static struct cam_et*
667 		 xpt_find_target(struct cam_eb *bus, target_id_t target_id);
668 static struct cam_ed*
669 		 xpt_find_device(struct cam_et *target, lun_id_t lun_id);
670 static void	 xpt_scan_bus(struct cam_periph *periph, union ccb *ccb);
671 static void	 xpt_scan_lun(struct cam_periph *periph,
672 			      struct cam_path *path, cam_flags flags,
673 			      union ccb *ccb);
674 static void	 xptscandone(struct cam_periph *periph, union ccb *done_ccb);
675 static xpt_busfunc_t	xptconfigbuscountfunc;
676 static xpt_busfunc_t	xptconfigfunc;
677 static void	 xpt_config(void *arg);
678 static xpt_devicefunc_t xptpassannouncefunc;
679 static void	 xpt_finishconfig(struct cam_periph *periph, union ccb *ccb);
680 static void	 xptaction(struct cam_sim *sim, union ccb *work_ccb);
681 static void	 xptpoll(struct cam_sim *sim);
682 static swihand_t swi_camnet;
683 static swihand_t swi_cambio;
684 static void	 camisr(cam_isrq_t *queue);
685 #if 0
686 static void	 xptstart(struct cam_periph *periph, union ccb *work_ccb);
687 static void	 xptasync(struct cam_periph *periph,
688 			  u_int32_t code, cam_path *path);
689 #endif
690 static dev_match_ret	xptbusmatch(struct dev_match_pattern *patterns,
691 				    int num_patterns, struct cam_eb *bus);
692 static dev_match_ret	xptdevicematch(struct dev_match_pattern *patterns,
693 				       int num_patterns, struct cam_ed *device);
694 static dev_match_ret	xptperiphmatch(struct dev_match_pattern *patterns,
695 				       int num_patterns,
696 				       struct cam_periph *periph);
697 static xpt_busfunc_t	xptedtbusfunc;
698 static xpt_targetfunc_t	xptedttargetfunc;
699 static xpt_devicefunc_t	xptedtdevicefunc;
700 static xpt_periphfunc_t	xptedtperiphfunc;
701 static xpt_pdrvfunc_t	xptplistpdrvfunc;
702 static xpt_periphfunc_t	xptplistperiphfunc;
703 static int		xptedtmatch(struct ccb_dev_match *cdm);
704 static int		xptperiphlistmatch(struct ccb_dev_match *cdm);
705 static int		xptbustraverse(struct cam_eb *start_bus,
706 				       xpt_busfunc_t *tr_func, void *arg);
707 static int		xpttargettraverse(struct cam_eb *bus,
708 					  struct cam_et *start_target,
709 					  xpt_targetfunc_t *tr_func, void *arg);
710 static int		xptdevicetraverse(struct cam_et *target,
711 					  struct cam_ed *start_device,
712 					  xpt_devicefunc_t *tr_func, void *arg);
713 static int		xptperiphtraverse(struct cam_ed *device,
714 					  struct cam_periph *start_periph,
715 					  xpt_periphfunc_t *tr_func, void *arg);
716 static int		xptpdrvtraverse(struct periph_driver **start_pdrv,
717 					xpt_pdrvfunc_t *tr_func, void *arg);
718 static int		xptpdperiphtraverse(struct periph_driver **pdrv,
719 					    struct cam_periph *start_periph,
720 					    xpt_periphfunc_t *tr_func,
721 					    void *arg);
722 static xpt_busfunc_t	xptdefbusfunc;
723 static xpt_targetfunc_t	xptdeftargetfunc;
724 static xpt_devicefunc_t	xptdefdevicefunc;
725 static xpt_periphfunc_t	xptdefperiphfunc;
726 static int		xpt_for_all_busses(xpt_busfunc_t *tr_func, void *arg);
727 #ifdef notusedyet
728 static int		xpt_for_all_targets(xpt_targetfunc_t *tr_func,
729 					    void *arg);
730 #endif
731 static int		xpt_for_all_devices(xpt_devicefunc_t *tr_func,
732 					    void *arg);
733 #ifdef notusedyet
734 static int		xpt_for_all_periphs(xpt_periphfunc_t *tr_func,
735 					    void *arg);
736 #endif
737 static xpt_devicefunc_t	xptsetasyncfunc;
738 static xpt_busfunc_t	xptsetasyncbusfunc;
739 static cam_status	xptregister(struct cam_periph *periph,
740 				    void *arg);
741 static cam_status	proberegister(struct cam_periph *periph,
742 				      void *arg);
743 static void	 probeschedule(struct cam_periph *probe_periph);
744 static void	 probestart(struct cam_periph *periph, union ccb *start_ccb);
745 static void	 proberequestdefaultnegotiation(struct cam_periph *periph);
746 static void	 probedone(struct cam_periph *periph, union ccb *done_ccb);
747 static void	 probecleanup(struct cam_periph *periph);
748 static void	 xpt_find_quirk(struct cam_ed *device);
749 static void	 xpt_set_transfer_settings(struct ccb_trans_settings *cts,
750 					   struct cam_ed *device,
751 					   int async_update);
752 static void	 xpt_toggle_tags(struct cam_path *path);
753 static void	 xpt_start_tags(struct cam_path *path);
754 static __inline int xpt_schedule_dev_allocq(struct cam_eb *bus,
755 					    struct cam_ed *dev);
756 static __inline int xpt_schedule_dev_sendq(struct cam_eb *bus,
757 					   struct cam_ed *dev);
758 static __inline int periph_is_queued(struct cam_periph *periph);
759 static __inline int device_is_alloc_queued(struct cam_ed *device);
760 static __inline int device_is_send_queued(struct cam_ed *device);
761 static __inline int dev_allocq_is_runnable(struct cam_devq *devq);
762 
763 static __inline int
764 xpt_schedule_dev_allocq(struct cam_eb *bus, struct cam_ed *dev)
765 {
766 	int retval;
767 
768 	if (dev->ccbq.devq_openings > 0) {
769 		if ((dev->flags & CAM_DEV_RESIZE_QUEUE_NEEDED) != 0) {
770 			cam_ccbq_resize(&dev->ccbq,
771 					dev->ccbq.dev_openings
772 					+ dev->ccbq.dev_active);
773 			dev->flags &= ~CAM_DEV_RESIZE_QUEUE_NEEDED;
774 		}
775 		/*
776 		 * The priority of a device waiting for CCB resources
777 		 * is that of the the highest priority peripheral driver
778 		 * enqueued.
779 		 */
780 		retval = xpt_schedule_dev(&bus->sim->devq->alloc_queue,
781 					  &dev->alloc_ccb_entry.pinfo,
782 					  CAMQ_GET_HEAD(&dev->drvq)->priority);
783 	} else {
784 		retval = 0;
785 	}
786 
787 	return (retval);
788 }
789 
790 static __inline int
791 xpt_schedule_dev_sendq(struct cam_eb *bus, struct cam_ed *dev)
792 {
793 	int	retval;
794 
795 	if (dev->ccbq.dev_openings > 0) {
796 		/*
797 		 * The priority of a device waiting for controller
798 		 * resources is that of the the highest priority CCB
799 		 * enqueued.
800 		 */
801 		retval =
802 		    xpt_schedule_dev(&bus->sim->devq->send_queue,
803 				     &dev->send_ccb_entry.pinfo,
804 				     CAMQ_GET_HEAD(&dev->ccbq.queue)->priority);
805 	} else {
806 		retval = 0;
807 	}
808 	return (retval);
809 }
810 
811 static __inline int
812 periph_is_queued(struct cam_periph *periph)
813 {
814 	return (periph->pinfo.index != CAM_UNQUEUED_INDEX);
815 }
816 
817 static __inline int
818 device_is_alloc_queued(struct cam_ed *device)
819 {
820 	return (device->alloc_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX);
821 }
822 
823 static __inline int
824 device_is_send_queued(struct cam_ed *device)
825 {
826 	return (device->send_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX);
827 }
828 
829 static __inline int
830 dev_allocq_is_runnable(struct cam_devq *devq)
831 {
832 	/*
833 	 * Have work to do.
834 	 * Have space to do more work.
835 	 * Allowed to do work.
836 	 */
837 	return ((devq->alloc_queue.qfrozen_cnt == 0)
838 	     && (devq->alloc_queue.entries > 0)
839 	     && (devq->alloc_openings > 0));
840 }
841 
842 static void
843 xpt_periph_init()
844 {
845 	make_dev(&xpt_cdevsw, 0, UID_ROOT, GID_OPERATOR, 0600, "xpt0");
846 }
847 
848 static void
849 probe_periph_init()
850 {
851 }
852 
853 
854 static void
855 xptdone(struct cam_periph *periph, union ccb *done_ccb)
856 {
857 	/* Caller will release the CCB */
858 	wakeup(&done_ccb->ccb_h.cbfcnp);
859 }
860 
861 static int
862 xptopen(dev_t dev, int flags, int fmt, struct proc *p)
863 {
864 	int unit;
865 
866 	unit = minor(dev) & 0xff;
867 
868 	/*
869 	 * Only allow read-write access.
870 	 */
871 	if (((flags & FWRITE) == 0) || ((flags & FREAD) == 0))
872 		return(EPERM);
873 
874 	/*
875 	 * We don't allow nonblocking access.
876 	 */
877 	if ((flags & O_NONBLOCK) != 0) {
878 		printf("xpt%d: can't do nonblocking accesss\n", unit);
879 		return(ENODEV);
880 	}
881 
882 	/*
883 	 * We only have one transport layer right now.  If someone accesses
884 	 * us via something other than minor number 1, point out their
885 	 * mistake.
886 	 */
887 	if (unit != 0) {
888 		printf("xptopen: got invalid xpt unit %d\n", unit);
889 		return(ENXIO);
890 	}
891 
892 	/* Mark ourselves open */
893 	xsoftc.flags |= XPT_FLAG_OPEN;
894 
895 	return(0);
896 }
897 
898 static int
899 xptclose(dev_t dev, int flag, int fmt, struct proc *p)
900 {
901 	int unit;
902 
903 	unit = minor(dev) & 0xff;
904 
905 	/*
906 	 * We only have one transport layer right now.  If someone accesses
907 	 * us via something other than minor number 1, point out their
908 	 * mistake.
909 	 */
910 	if (unit != 0) {
911 		printf("xptclose: got invalid xpt unit %d\n", unit);
912 		return(ENXIO);
913 	}
914 
915 	/* Mark ourselves closed */
916 	xsoftc.flags &= ~XPT_FLAG_OPEN;
917 
918 	return(0);
919 }
920 
921 static int
922 xptioctl(dev_t dev, u_long cmd, caddr_t addr, int flag, struct proc *p)
923 {
924 	int unit, error;
925 
926 	error = 0;
927 	unit = minor(dev) & 0xff;
928 
929 	/*
930 	 * We only have one transport layer right now.  If someone accesses
931 	 * us via something other than minor number 1, point out their
932 	 * mistake.
933 	 */
934 	if (unit != 0) {
935 		printf("xptioctl: got invalid xpt unit %d\n", unit);
936 		return(ENXIO);
937 	}
938 
939 	switch(cmd) {
940 	/*
941 	 * For the transport layer CAMIOCOMMAND ioctl, we really only want
942 	 * to accept CCB types that don't quite make sense to send through a
943 	 * passthrough driver.
944 	 */
945 	case CAMIOCOMMAND: {
946 		union ccb *ccb;
947 		union ccb *inccb;
948 
949 		inccb = (union ccb *)addr;
950 
951 		switch(inccb->ccb_h.func_code) {
952 		case XPT_SCAN_BUS:
953 		case XPT_RESET_BUS:
954 			if ((inccb->ccb_h.target_id != CAM_TARGET_WILDCARD)
955 			 || (inccb->ccb_h.target_lun != CAM_LUN_WILDCARD)) {
956 				error = EINVAL;
957 				break;
958 			}
959 			/* FALLTHROUGH */
960 		case XPT_SCAN_LUN:
961 
962 			ccb = xpt_alloc_ccb();
963 
964 			/*
965 			 * Create a path using the bus, target, and lun the
966 			 * user passed in.
967 			 */
968 			if (xpt_create_path(&ccb->ccb_h.path, xpt_periph,
969 					    inccb->ccb_h.path_id,
970 					    inccb->ccb_h.target_id,
971 					    inccb->ccb_h.target_lun) !=
972 					    CAM_REQ_CMP){
973 				error = EINVAL;
974 				xpt_free_ccb(ccb);
975 				break;
976 			}
977 			/* Ensure all of our fields are correct */
978 			xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path,
979 				      inccb->ccb_h.pinfo.priority);
980 			xpt_merge_ccb(ccb, inccb);
981 			ccb->ccb_h.cbfcnp = xptdone;
982 			cam_periph_runccb(ccb, NULL, 0, 0, NULL);
983 			bcopy(ccb, inccb, sizeof(union ccb));
984 			xpt_free_path(ccb->ccb_h.path);
985 			xpt_free_ccb(ccb);
986 			break;
987 
988 		case XPT_DEBUG: {
989 			union ccb ccb;
990 
991 			/*
992 			 * This is an immediate CCB, so it's okay to
993 			 * allocate it on the stack.
994 			 */
995 
996 			/*
997 			 * Create a path using the bus, target, and lun the
998 			 * user passed in.
999 			 */
1000 			if (xpt_create_path(&ccb.ccb_h.path, xpt_periph,
1001 					    inccb->ccb_h.path_id,
1002 					    inccb->ccb_h.target_id,
1003 					    inccb->ccb_h.target_lun) !=
1004 					    CAM_REQ_CMP){
1005 				error = EINVAL;
1006 				break;
1007 			}
1008 			/* Ensure all of our fields are correct */
1009 			xpt_setup_ccb(&ccb.ccb_h, ccb.ccb_h.path,
1010 				      inccb->ccb_h.pinfo.priority);
1011 			xpt_merge_ccb(&ccb, inccb);
1012 			ccb.ccb_h.cbfcnp = xptdone;
1013 			xpt_action(&ccb);
1014 			bcopy(&ccb, inccb, sizeof(union ccb));
1015 			xpt_free_path(ccb.ccb_h.path);
1016 			break;
1017 
1018 		}
1019 		case XPT_DEV_MATCH: {
1020 			struct cam_periph_map_info mapinfo;
1021 			struct cam_path *old_path;
1022 
1023 			/*
1024 			 * We can't deal with physical addresses for this
1025 			 * type of transaction.
1026 			 */
1027 			if (inccb->ccb_h.flags & CAM_DATA_PHYS) {
1028 				error = EINVAL;
1029 				break;
1030 			}
1031 
1032 			/*
1033 			 * Save this in case the caller had it set to
1034 			 * something in particular.
1035 			 */
1036 			old_path = inccb->ccb_h.path;
1037 
1038 			/*
1039 			 * We really don't need a path for the matching
1040 			 * code.  The path is needed because of the
1041 			 * debugging statements in xpt_action().  They
1042 			 * assume that the CCB has a valid path.
1043 			 */
1044 			inccb->ccb_h.path = xpt_periph->path;
1045 
1046 			bzero(&mapinfo, sizeof(mapinfo));
1047 
1048 			/*
1049 			 * Map the pattern and match buffers into kernel
1050 			 * virtual address space.
1051 			 */
1052 			error = cam_periph_mapmem(inccb, &mapinfo);
1053 
1054 			if (error) {
1055 				inccb->ccb_h.path = old_path;
1056 				break;
1057 			}
1058 
1059 			/*
1060 			 * This is an immediate CCB, we can send it on directly.
1061 			 */
1062 			xpt_action(inccb);
1063 
1064 			/*
1065 			 * Map the buffers back into user space.
1066 			 */
1067 			cam_periph_unmapmem(inccb, &mapinfo);
1068 
1069 			inccb->ccb_h.path = old_path;
1070 
1071 			error = 0;
1072 			break;
1073 		}
1074 		default:
1075 			error = EINVAL;
1076 			break;
1077 		}
1078 		break;
1079 	}
1080 	/*
1081 	 * This is the getpassthru ioctl. It takes a XPT_GDEVLIST ccb as input,
1082 	 * with the periphal driver name and unit name filled in.  The other
1083 	 * fields don't really matter as input.  The passthrough driver name
1084 	 * ("pass"), and unit number are passed back in the ccb.  The current
1085 	 * device generation number, and the index into the device peripheral
1086 	 * driver list, and the status are also passed back.  Note that
1087 	 * since we do everything in one pass, unlike the XPT_GDEVLIST ccb,
1088 	 * we never return a status of CAM_GDEVLIST_LIST_CHANGED.  It is
1089 	 * (or rather should be) impossible for the device peripheral driver
1090 	 * list to change since we look at the whole thing in one pass, and
1091 	 * we do it with splcam protection.
1092 	 *
1093 	 */
1094 	case CAMGETPASSTHRU: {
1095 		union ccb *ccb;
1096 		struct cam_periph *periph;
1097 		struct periph_driver **p_drv;
1098 		char   *name;
1099 		int unit;
1100 		int cur_generation;
1101 		int base_periph_found;
1102 		int splbreaknum;
1103 		int s;
1104 
1105 		ccb = (union ccb *)addr;
1106 		unit = ccb->cgdl.unit_number;
1107 		name = ccb->cgdl.periph_name;
1108 		/*
1109 		 * Every 100 devices, we want to drop our spl protection to
1110 		 * give the software interrupt handler a chance to run.
1111 		 * Most systems won't run into this check, but this should
1112 		 * avoid starvation in the software interrupt handler in
1113 		 * large systems.
1114 		 */
1115 		splbreaknum = 100;
1116 
1117 		ccb = (union ccb *)addr;
1118 
1119 		base_periph_found = 0;
1120 
1121 		/*
1122 		 * Sanity check -- make sure we don't get a null peripheral
1123 		 * driver name.
1124 		 */
1125 		if (*ccb->cgdl.periph_name == '\0') {
1126 			error = EINVAL;
1127 			break;
1128 		}
1129 
1130 		/* Keep the list from changing while we traverse it */
1131 		s = splcam();
1132 ptstartover:
1133 		cur_generation = xsoftc.generation;
1134 
1135 		/* first find our driver in the list of drivers */
1136 		for (p_drv = (struct periph_driver **)periphdriver_set.ls_items;
1137 		     *p_drv != NULL; p_drv++)
1138 			if (strcmp((*p_drv)->driver_name, name) == 0)
1139 				break;
1140 
1141 		if (*p_drv == NULL) {
1142 			splx(s);
1143 			ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1144 			ccb->cgdl.status = CAM_GDEVLIST_ERROR;
1145 			*ccb->cgdl.periph_name = '\0';
1146 			ccb->cgdl.unit_number = 0;
1147 			error = ENOENT;
1148 			break;
1149 		}
1150 
1151 		/*
1152 		 * Run through every peripheral instance of this driver
1153 		 * and check to see whether it matches the unit passed
1154 		 * in by the user.  If it does, get out of the loops and
1155 		 * find the passthrough driver associated with that
1156 		 * peripheral driver.
1157 		 */
1158 		for (periph = TAILQ_FIRST(&(*p_drv)->units); periph != NULL;
1159 		     periph = TAILQ_NEXT(periph, unit_links)) {
1160 
1161 			if (periph->unit_number == unit) {
1162 				break;
1163 			} else if (--splbreaknum == 0) {
1164 				splx(s);
1165 				s = splcam();
1166 				splbreaknum = 100;
1167 				if (cur_generation != xsoftc.generation)
1168 				       goto ptstartover;
1169 			}
1170 		}
1171 		/*
1172 		 * If we found the peripheral driver that the user passed
1173 		 * in, go through all of the peripheral drivers for that
1174 		 * particular device and look for a passthrough driver.
1175 		 */
1176 		if (periph != NULL) {
1177 			struct cam_ed *device;
1178 			int i;
1179 
1180 			base_periph_found = 1;
1181 			device = periph->path->device;
1182 			for (i = 0, periph = device->periphs.slh_first;
1183 			     periph != NULL;
1184 			     periph = periph->periph_links.sle_next, i++) {
1185 				/*
1186 				 * Check to see whether we have a
1187 				 * passthrough device or not.
1188 				 */
1189 				if (strcmp(periph->periph_name, "pass") == 0) {
1190 					/*
1191 					 * Fill in the getdevlist fields.
1192 					 */
1193 					strcpy(ccb->cgdl.periph_name,
1194 					       periph->periph_name);
1195 					ccb->cgdl.unit_number =
1196 						periph->unit_number;
1197 					if (periph->periph_links.sle_next)
1198 						ccb->cgdl.status =
1199 							CAM_GDEVLIST_MORE_DEVS;
1200 					else
1201 						ccb->cgdl.status =
1202 						       CAM_GDEVLIST_LAST_DEVICE;
1203 					ccb->cgdl.generation =
1204 						device->generation;
1205 					ccb->cgdl.index = i;
1206 					/*
1207 					 * Fill in some CCB header fields
1208 					 * that the user may want.
1209 					 */
1210 					ccb->ccb_h.path_id =
1211 						periph->path->bus->path_id;
1212 					ccb->ccb_h.target_id =
1213 						periph->path->target->target_id;
1214 					ccb->ccb_h.target_lun =
1215 						periph->path->device->lun_id;
1216 					ccb->ccb_h.status = CAM_REQ_CMP;
1217 					break;
1218 				}
1219 			}
1220 		}
1221 
1222 		/*
1223 		 * If the periph is null here, one of two things has
1224 		 * happened.  The first possibility is that we couldn't
1225 		 * find the unit number of the particular peripheral driver
1226 		 * that the user is asking about.  e.g. the user asks for
1227 		 * the passthrough driver for "da11".  We find the list of
1228 		 * "da" peripherals all right, but there is no unit 11.
1229 		 * The other possibility is that we went through the list
1230 		 * of peripheral drivers attached to the device structure,
1231 		 * but didn't find one with the name "pass".  Either way,
1232 		 * we return ENOENT, since we couldn't find something.
1233 		 */
1234 		if (periph == NULL) {
1235 			ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1236 			ccb->cgdl.status = CAM_GDEVLIST_ERROR;
1237 			*ccb->cgdl.periph_name = '\0';
1238 			ccb->cgdl.unit_number = 0;
1239 			error = ENOENT;
1240 			/*
1241 			 * It is unfortunate that this is even necessary,
1242 			 * but there are many, many clueless users out there.
1243 			 * If this is true, the user is looking for the
1244 			 * passthrough driver, but doesn't have one in his
1245 			 * kernel.
1246 			 */
1247 			if (base_periph_found == 1) {
1248 				printf("xptioctl: pass driver is not in the "
1249 				       "kernel\n");
1250 				printf("xptioctl: put \"device pass0\" in "
1251 				       "your kernel config file\n");
1252 			}
1253 		}
1254 		splx(s);
1255 		break;
1256 		}
1257 	default:
1258 		error = ENOTTY;
1259 		break;
1260 	}
1261 
1262 	return(error);
1263 }
1264 
1265 /* Functions accessed by the peripheral drivers */
1266 static void
1267 xpt_init(dummy)
1268 	void *dummy;
1269 {
1270 	struct cam_sim *xpt_sim;
1271 	struct cam_path *path;
1272 	struct cam_devq *devq;
1273 	cam_status status;
1274 
1275 	TAILQ_INIT(&xpt_busses);
1276 	TAILQ_INIT(&cam_bioq);
1277 	TAILQ_INIT(&cam_netq);
1278 	SLIST_INIT(&ccb_freeq);
1279 	STAILQ_INIT(&highpowerq);
1280 
1281 	/*
1282 	 * The xpt layer is, itself, the equivelent of a SIM.
1283 	 * Allow 16 ccbs in the ccb pool for it.  This should
1284 	 * give decent parallelism when we probe busses and
1285 	 * perform other XPT functions.
1286 	 */
1287 	devq = cam_simq_alloc(16);
1288 	xpt_sim = cam_sim_alloc(xptaction,
1289 				xptpoll,
1290 				"xpt",
1291 				/*softc*/NULL,
1292 				/*unit*/0,
1293 				/*max_dev_transactions*/0,
1294 				/*max_tagged_dev_transactions*/0,
1295 				devq);
1296 	xpt_max_ccbs = 16;
1297 
1298 	xpt_bus_register(xpt_sim, /*bus #*/0);
1299 
1300 	/*
1301 	 * Looking at the XPT from the SIM layer, the XPT is
1302 	 * the equivelent of a peripheral driver.  Allocate
1303 	 * a peripheral driver entry for us.
1304 	 */
1305 	if ((status = xpt_create_path(&path, NULL, CAM_XPT_PATH_ID,
1306 				      CAM_TARGET_WILDCARD,
1307 				      CAM_LUN_WILDCARD)) != CAM_REQ_CMP) {
1308 		printf("xpt_init: xpt_create_path failed with status %#x,"
1309 		       " failing attach\n", status);
1310 		return;
1311 	}
1312 
1313 	cam_periph_alloc(xptregister, NULL, NULL, NULL, "xpt", CAM_PERIPH_BIO,
1314 			 path, NULL, 0, NULL);
1315 	xpt_free_path(path);
1316 
1317 	xpt_sim->softc = xpt_periph;
1318 
1319 	/*
1320 	 * Register a callback for when interrupts are enabled.
1321 	 */
1322 	xpt_config_hook =
1323 	    (struct intr_config_hook *)malloc(sizeof(struct intr_config_hook),
1324 					      M_TEMP, M_NOWAIT);
1325 	if (xpt_config_hook == NULL) {
1326 		printf("xpt_init: Cannot malloc config hook "
1327 		       "- failing attach\n");
1328 		return;
1329 	}
1330 	bzero(xpt_config_hook, sizeof(*xpt_config_hook));
1331 
1332 	xpt_config_hook->ich_func = xpt_config;
1333 	if (config_intrhook_establish(xpt_config_hook) != 0) {
1334 		free (xpt_config_hook, M_TEMP);
1335 		printf("xpt_init: config_intrhook_establish failed "
1336 		       "- failing attach\n");
1337 	}
1338 
1339 	/* Install our software interrupt handlers */
1340 	register_swi(SWI_CAMNET, swi_camnet);
1341 	register_swi(SWI_CAMBIO, swi_cambio);
1342 }
1343 
1344 static cam_status
1345 xptregister(struct cam_periph *periph, void *arg)
1346 {
1347 	if (periph == NULL) {
1348 		printf("xptregister: periph was NULL!!\n");
1349 		return(CAM_REQ_CMP_ERR);
1350 	}
1351 
1352 	periph->softc = NULL;
1353 
1354 	xpt_periph = periph;
1355 
1356 	return(CAM_REQ_CMP);
1357 }
1358 
1359 int32_t
1360 xpt_add_periph(struct cam_periph *periph)
1361 {
1362 	struct cam_ed *device;
1363 	int32_t	 status;
1364 	struct periph_list *periph_head;
1365 
1366 	device = periph->path->device;
1367 
1368 	periph_head = &device->periphs;
1369 
1370 	status = CAM_REQ_CMP;
1371 
1372 	if (device != NULL) {
1373 		int s;
1374 
1375 		/*
1376 		 * Make room for this peripheral
1377 		 * so it will fit in the queue
1378 		 * when it's scheduled to run
1379 		 */
1380 		s = splsoftcam();
1381 		status = camq_resize(&device->drvq,
1382 				     device->drvq.array_size + 1);
1383 
1384 		device->generation++;
1385 
1386 		SLIST_INSERT_HEAD(periph_head, periph, periph_links);
1387 
1388 		splx(s);
1389 	}
1390 
1391 	xsoftc.generation++;
1392 
1393 	return (status);
1394 }
1395 
1396 void
1397 xpt_remove_periph(struct cam_periph *periph)
1398 {
1399 	struct cam_ed *device;
1400 
1401 	device = periph->path->device;
1402 
1403 	if (device != NULL) {
1404 		int s;
1405 		struct periph_list *periph_head;
1406 
1407 		periph_head = &device->periphs;
1408 
1409 		/* Release the slot for this peripheral */
1410 		s = splsoftcam();
1411 		camq_resize(&device->drvq, device->drvq.array_size - 1);
1412 
1413 		device->generation++;
1414 
1415 		SLIST_REMOVE(periph_head, periph, cam_periph, periph_links);
1416 
1417 		splx(s);
1418 	}
1419 
1420 	xsoftc.generation++;
1421 
1422 }
1423 
1424 void
1425 xpt_announce_periph(struct cam_periph *periph, char *announce_string)
1426 {
1427 	int s;
1428 	u_int mb;
1429 	struct cam_path *path;
1430 	struct ccb_trans_settings cts;
1431 
1432 	path = periph->path;
1433 	/*
1434 	 * To ensure that this is printed in one piece,
1435 	 * mask out CAM interrupts.
1436 	 */
1437 	s = splsoftcam();
1438 	printf("%s%d at %s%d bus %d target %d lun %d\n",
1439 	       periph->periph_name, periph->unit_number,
1440 	       path->bus->sim->sim_name,
1441 	       path->bus->sim->unit_number,
1442 	       path->bus->sim->bus_id,
1443 	       path->target->target_id,
1444 	       path->device->lun_id);
1445 	printf("%s%d: ", periph->periph_name, periph->unit_number);
1446 	scsi_print_inquiry(&path->device->inq_data);
1447 	if ((bootverbose)
1448 	 && (path->device->serial_num_len > 0)) {
1449 		/* Don't wrap the screen  - print only the first 60 chars */
1450 		printf("%s%d: Serial Number %.60s\n", periph->periph_name,
1451 		       periph->unit_number, path->device->serial_num);
1452 	}
1453 	xpt_setup_ccb(&cts.ccb_h, path, /*priority*/1);
1454 	cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS;
1455 	cts.flags = CCB_TRANS_CURRENT_SETTINGS;
1456 	xpt_action((union ccb*)&cts);
1457 	if (cts.ccb_h.status == CAM_REQ_CMP) {
1458 		u_int speed;
1459 		u_int freq;
1460 
1461 		if ((cts.valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0
1462 		  && cts.sync_offset != 0) {
1463 			freq = scsi_calc_syncsrate(cts.sync_period);
1464 			speed = freq;
1465 		} else {
1466 			struct ccb_pathinq cpi;
1467 
1468 			/* Ask the SIM for its base transfer speed */
1469 			xpt_setup_ccb(&cpi.ccb_h, path, /*priority*/1);
1470 			cpi.ccb_h.func_code = XPT_PATH_INQ;
1471 			xpt_action((union ccb *)&cpi);
1472 
1473 			speed = cpi.base_transfer_speed;
1474 			freq = 0;
1475 		}
1476 		if ((cts.valid & CCB_TRANS_BUS_WIDTH_VALID) != 0)
1477 			speed *= (0x01 << cts.bus_width);
1478 		mb = speed / 1000;
1479 		if (mb > 0)
1480 			printf("%s%d: %d.%03dMB/s transfers",
1481 			       periph->periph_name, periph->unit_number,
1482 			       mb, speed % 1000);
1483 		else
1484 			printf("%s%d: %dKB/s transfers", periph->periph_name,
1485 			       periph->unit_number, speed);
1486 		if ((cts.valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0
1487 		 && cts.sync_offset != 0) {
1488 			printf(" (%d.%03dMHz, offset %d", freq / 1000,
1489 			       freq % 1000, cts.sync_offset);
1490 		}
1491 		if ((cts.valid & CCB_TRANS_BUS_WIDTH_VALID) != 0
1492 		 && cts.bus_width > 0) {
1493 			if ((cts.valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0
1494 			 && cts.sync_offset != 0) {
1495 				printf(", ");
1496 			} else {
1497 				printf(" (");
1498 			}
1499 			printf("%dbit)", 8 * (0x01 << cts.bus_width));
1500 		} else if ((cts.valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0
1501 			&& cts.sync_offset != 0) {
1502 			printf(")");
1503 		}
1504 
1505 		if (path->device->inq_flags & SID_CmdQue
1506 		 || path->device->flags & CAM_DEV_TAG_AFTER_COUNT) {
1507 			printf(", Tagged Queueing Enabled");
1508 		}
1509 
1510 		printf("\n");
1511 	} else if (path->device->inq_flags & SID_CmdQue
1512    		|| path->device->flags & CAM_DEV_TAG_AFTER_COUNT) {
1513 		printf("%s%d: Tagged Queueing Enabled\n",
1514 		       periph->periph_name, periph->unit_number);
1515 	}
1516 
1517 	/*
1518 	 * We only want to print the caller's announce string if they've
1519 	 * passed one in..
1520 	 */
1521 	if (announce_string != NULL)
1522 		printf("%s%d: %s\n", periph->periph_name,
1523 		       periph->unit_number, announce_string);
1524 	splx(s);
1525 }
1526 
1527 
1528 static dev_match_ret
1529 xptbusmatch(struct dev_match_pattern *patterns, int num_patterns,
1530 	    struct cam_eb *bus)
1531 {
1532 	dev_match_ret retval;
1533 	int i;
1534 
1535 	retval = DM_RET_NONE;
1536 
1537 	/*
1538 	 * If we aren't given something to match against, that's an error.
1539 	 */
1540 	if (bus == NULL)
1541 		return(DM_RET_ERROR);
1542 
1543 	/*
1544 	 * If there are no match entries, then this bus matches no
1545 	 * matter what.
1546 	 */
1547 	if ((patterns == NULL) || (num_patterns == 0))
1548 		return(DM_RET_DESCEND | DM_RET_COPY);
1549 
1550 	for (i = 0; i < num_patterns; i++) {
1551 		struct bus_match_pattern *cur_pattern;
1552 
1553 		/*
1554 		 * If the pattern in question isn't for a bus node, we
1555 		 * aren't interested.  However, we do indicate to the
1556 		 * calling routine that we should continue descending the
1557 		 * tree, since the user wants to match against lower-level
1558 		 * EDT elements.
1559 		 */
1560 		if (patterns[i].type != DEV_MATCH_BUS) {
1561 			if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
1562 				retval |= DM_RET_DESCEND;
1563 			continue;
1564 		}
1565 
1566 		cur_pattern = &patterns[i].pattern.bus_pattern;
1567 
1568 		/*
1569 		 * If they want to match any bus node, we give them any
1570 		 * device node.
1571 		 */
1572 		if (cur_pattern->flags == BUS_MATCH_ANY) {
1573 			/* set the copy flag */
1574 			retval |= DM_RET_COPY;
1575 
1576 			/*
1577 			 * If we've already decided on an action, go ahead
1578 			 * and return.
1579 			 */
1580 			if ((retval & DM_RET_ACTION_MASK) != DM_RET_NONE)
1581 				return(retval);
1582 		}
1583 
1584 		/*
1585 		 * Not sure why someone would do this...
1586 		 */
1587 		if (cur_pattern->flags == BUS_MATCH_NONE)
1588 			continue;
1589 
1590 		if (((cur_pattern->flags & BUS_MATCH_PATH) != 0)
1591 		 && (cur_pattern->path_id != bus->path_id))
1592 			continue;
1593 
1594 		if (((cur_pattern->flags & BUS_MATCH_BUS_ID) != 0)
1595 		 && (cur_pattern->bus_id != bus->sim->bus_id))
1596 			continue;
1597 
1598 		if (((cur_pattern->flags & BUS_MATCH_UNIT) != 0)
1599 		 && (cur_pattern->unit_number != bus->sim->unit_number))
1600 			continue;
1601 
1602 		if (((cur_pattern->flags & BUS_MATCH_NAME) != 0)
1603 		 && (strncmp(cur_pattern->dev_name, bus->sim->sim_name,
1604 			     DEV_IDLEN) != 0))
1605 			continue;
1606 
1607 		/*
1608 		 * If we get to this point, the user definitely wants
1609 		 * information on this bus.  So tell the caller to copy the
1610 		 * data out.
1611 		 */
1612 		retval |= DM_RET_COPY;
1613 
1614 		/*
1615 		 * If the return action has been set to descend, then we
1616 		 * know that we've already seen a non-bus matching
1617 		 * expression, therefore we need to further descend the tree.
1618 		 * This won't change by continuing around the loop, so we
1619 		 * go ahead and return.  If we haven't seen a non-bus
1620 		 * matching expression, we keep going around the loop until
1621 		 * we exhaust the matching expressions.  We'll set the stop
1622 		 * flag once we fall out of the loop.
1623 		 */
1624 		if ((retval & DM_RET_ACTION_MASK) == DM_RET_DESCEND)
1625 			return(retval);
1626 	}
1627 
1628 	/*
1629 	 * If the return action hasn't been set to descend yet, that means
1630 	 * we haven't seen anything other than bus matching patterns.  So
1631 	 * tell the caller to stop descending the tree -- the user doesn't
1632 	 * want to match against lower level tree elements.
1633 	 */
1634 	if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
1635 		retval |= DM_RET_STOP;
1636 
1637 	return(retval);
1638 }
1639 
1640 static dev_match_ret
1641 xptdevicematch(struct dev_match_pattern *patterns, int num_patterns,
1642 	       struct cam_ed *device)
1643 {
1644 	dev_match_ret retval;
1645 	int i;
1646 
1647 	retval = DM_RET_NONE;
1648 
1649 	/*
1650 	 * If we aren't given something to match against, that's an error.
1651 	 */
1652 	if (device == NULL)
1653 		return(DM_RET_ERROR);
1654 
1655 	/*
1656 	 * If there are no match entries, then this device matches no
1657 	 * matter what.
1658 	 */
1659 	if ((patterns == NULL) || (patterns == 0))
1660 		return(DM_RET_DESCEND | DM_RET_COPY);
1661 
1662 	for (i = 0; i < num_patterns; i++) {
1663 		struct device_match_pattern *cur_pattern;
1664 
1665 		/*
1666 		 * If the pattern in question isn't for a device node, we
1667 		 * aren't interested.
1668 		 */
1669 		if (patterns[i].type != DEV_MATCH_DEVICE) {
1670 			if ((patterns[i].type == DEV_MATCH_PERIPH)
1671 			 && ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE))
1672 				retval |= DM_RET_DESCEND;
1673 			continue;
1674 		}
1675 
1676 		cur_pattern = &patterns[i].pattern.device_pattern;
1677 
1678 		/*
1679 		 * If they want to match any device node, we give them any
1680 		 * device node.
1681 		 */
1682 		if (cur_pattern->flags == DEV_MATCH_ANY) {
1683 			/* set the copy flag */
1684 			retval |= DM_RET_COPY;
1685 
1686 
1687 			/*
1688 			 * If we've already decided on an action, go ahead
1689 			 * and return.
1690 			 */
1691 			if ((retval & DM_RET_ACTION_MASK) != DM_RET_NONE)
1692 				return(retval);
1693 		}
1694 
1695 		/*
1696 		 * Not sure why someone would do this...
1697 		 */
1698 		if (cur_pattern->flags == DEV_MATCH_NONE)
1699 			continue;
1700 
1701 		if (((cur_pattern->flags & DEV_MATCH_PATH) != 0)
1702 		 && (cur_pattern->path_id != device->target->bus->path_id))
1703 			continue;
1704 
1705 		if (((cur_pattern->flags & DEV_MATCH_TARGET) != 0)
1706 		 && (cur_pattern->target_id != device->target->target_id))
1707 			continue;
1708 
1709 		if (((cur_pattern->flags & DEV_MATCH_LUN) != 0)
1710 		 && (cur_pattern->target_lun != device->lun_id))
1711 			continue;
1712 
1713 		if (((cur_pattern->flags & DEV_MATCH_INQUIRY) != 0)
1714 		 && (cam_quirkmatch((caddr_t)&device->inq_data,
1715 				    (caddr_t)&cur_pattern->inq_pat,
1716 				    1, sizeof(cur_pattern->inq_pat),
1717 				    scsi_static_inquiry_match) == NULL))
1718 			continue;
1719 
1720 		/*
1721 		 * If we get to this point, the user definitely wants
1722 		 * information on this device.  So tell the caller to copy
1723 		 * the data out.
1724 		 */
1725 		retval |= DM_RET_COPY;
1726 
1727 		/*
1728 		 * If the return action has been set to descend, then we
1729 		 * know that we've already seen a peripheral matching
1730 		 * expression, therefore we need to further descend the tree.
1731 		 * This won't change by continuing around the loop, so we
1732 		 * go ahead and return.  If we haven't seen a peripheral
1733 		 * matching expression, we keep going around the loop until
1734 		 * we exhaust the matching expressions.  We'll set the stop
1735 		 * flag once we fall out of the loop.
1736 		 */
1737 		if ((retval & DM_RET_ACTION_MASK) == DM_RET_DESCEND)
1738 			return(retval);
1739 	}
1740 
1741 	/*
1742 	 * If the return action hasn't been set to descend yet, that means
1743 	 * we haven't seen any peripheral matching patterns.  So tell the
1744 	 * caller to stop descending the tree -- the user doesn't want to
1745 	 * match against lower level tree elements.
1746 	 */
1747 	if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
1748 		retval |= DM_RET_STOP;
1749 
1750 	return(retval);
1751 }
1752 
1753 /*
1754  * Match a single peripheral against any number of match patterns.
1755  */
1756 static dev_match_ret
1757 xptperiphmatch(struct dev_match_pattern *patterns, int num_patterns,
1758 	       struct cam_periph *periph)
1759 {
1760 	dev_match_ret retval;
1761 	int i;
1762 
1763 	/*
1764 	 * If we aren't given something to match against, that's an error.
1765 	 */
1766 	if (periph == NULL)
1767 		return(DM_RET_ERROR);
1768 
1769 	/*
1770 	 * If there are no match entries, then this peripheral matches no
1771 	 * matter what.
1772 	 */
1773 	if ((patterns == NULL) || (num_patterns == 0))
1774 		return(DM_RET_STOP | DM_RET_COPY);
1775 
1776 	/*
1777 	 * There aren't any nodes below a peripheral node, so there's no
1778 	 * reason to descend the tree any further.
1779 	 */
1780 	retval = DM_RET_STOP;
1781 
1782 	for (i = 0; i < num_patterns; i++) {
1783 		struct periph_match_pattern *cur_pattern;
1784 
1785 		/*
1786 		 * If the pattern in question isn't for a peripheral, we
1787 		 * aren't interested.
1788 		 */
1789 		if (patterns[i].type != DEV_MATCH_PERIPH)
1790 			continue;
1791 
1792 		cur_pattern = &patterns[i].pattern.periph_pattern;
1793 
1794 		/*
1795 		 * If they want to match on anything, then we will do so.
1796 		 */
1797 		if (cur_pattern->flags == PERIPH_MATCH_ANY) {
1798 			/* set the copy flag */
1799 			retval |= DM_RET_COPY;
1800 
1801 			/*
1802 			 * We've already set the return action to stop,
1803 			 * since there are no nodes below peripherals in
1804 			 * the tree.
1805 			 */
1806 			return(retval);
1807 		}
1808 
1809 		/*
1810 		 * Not sure why someone would do this...
1811 		 */
1812 		if (cur_pattern->flags == PERIPH_MATCH_NONE)
1813 			continue;
1814 
1815 		if (((cur_pattern->flags & PERIPH_MATCH_PATH) != 0)
1816 		 && (cur_pattern->path_id != periph->path->bus->path_id))
1817 			continue;
1818 
1819 		/*
1820 		 * For the target and lun id's, we have to make sure the
1821 		 * target and lun pointers aren't NULL.  The xpt peripheral
1822 		 * has a wildcard target and device.
1823 		 */
1824 		if (((cur_pattern->flags & PERIPH_MATCH_TARGET) != 0)
1825 		 && ((periph->path->target == NULL)
1826 		 ||(cur_pattern->target_id != periph->path->target->target_id)))
1827 			continue;
1828 
1829 		if (((cur_pattern->flags & PERIPH_MATCH_LUN) != 0)
1830 		 && ((periph->path->device == NULL)
1831 		 || (cur_pattern->target_lun != periph->path->device->lun_id)))
1832 			continue;
1833 
1834 		if (((cur_pattern->flags & PERIPH_MATCH_UNIT) != 0)
1835 		 && (cur_pattern->unit_number != periph->unit_number))
1836 			continue;
1837 
1838 		if (((cur_pattern->flags & PERIPH_MATCH_NAME) != 0)
1839 		 && (strncmp(cur_pattern->periph_name, periph->periph_name,
1840 			     DEV_IDLEN) != 0))
1841 			continue;
1842 
1843 		/*
1844 		 * If we get to this point, the user definitely wants
1845 		 * information on this peripheral.  So tell the caller to
1846 		 * copy the data out.
1847 		 */
1848 		retval |= DM_RET_COPY;
1849 
1850 		/*
1851 		 * The return action has already been set to stop, since
1852 		 * peripherals don't have any nodes below them in the EDT.
1853 		 */
1854 		return(retval);
1855 	}
1856 
1857 	/*
1858 	 * If we get to this point, the peripheral that was passed in
1859 	 * doesn't match any of the patterns.
1860 	 */
1861 	return(retval);
1862 }
1863 
1864 static int
1865 xptedtbusfunc(struct cam_eb *bus, void *arg)
1866 {
1867 	struct ccb_dev_match *cdm;
1868 	dev_match_ret retval;
1869 
1870 	cdm = (struct ccb_dev_match *)arg;
1871 
1872 	/*
1873 	 * If our position is for something deeper in the tree, that means
1874 	 * that we've already seen this node.  So, we keep going down.
1875 	 */
1876 	if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
1877 	 && (cdm->pos.cookie.bus == bus)
1878 	 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
1879 	 && (cdm->pos.cookie.target != NULL))
1880 		retval = DM_RET_DESCEND;
1881 	else
1882 		retval = xptbusmatch(cdm->patterns, cdm->num_patterns, bus);
1883 
1884 	/*
1885 	 * If we got an error, bail out of the search.
1886 	 */
1887 	if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
1888 		cdm->status = CAM_DEV_MATCH_ERROR;
1889 		return(0);
1890 	}
1891 
1892 	/*
1893 	 * If the copy flag is set, copy this bus out.
1894 	 */
1895 	if (retval & DM_RET_COPY) {
1896 		int spaceleft, j;
1897 
1898 		spaceleft = cdm->match_buf_len - (cdm->num_matches *
1899 			sizeof(struct dev_match_result));
1900 
1901 		/*
1902 		 * If we don't have enough space to put in another
1903 		 * match result, save our position and tell the
1904 		 * user there are more devices to check.
1905 		 */
1906 		if (spaceleft < sizeof(struct dev_match_result)) {
1907 			bzero(&cdm->pos, sizeof(cdm->pos));
1908 			cdm->pos.position_type =
1909 				CAM_DEV_POS_EDT | CAM_DEV_POS_BUS;
1910 
1911 			cdm->pos.cookie.bus = bus;
1912 			cdm->pos.generations[CAM_BUS_GENERATION]=
1913 				bus_generation;
1914 			cdm->status = CAM_DEV_MATCH_MORE;
1915 			return(0);
1916 		}
1917 		j = cdm->num_matches;
1918 		cdm->num_matches++;
1919 		cdm->matches[j].type = DEV_MATCH_BUS;
1920 		cdm->matches[j].result.bus_result.path_id = bus->path_id;
1921 		cdm->matches[j].result.bus_result.bus_id = bus->sim->bus_id;
1922 		cdm->matches[j].result.bus_result.unit_number =
1923 			bus->sim->unit_number;
1924 		strncpy(cdm->matches[j].result.bus_result.dev_name,
1925 			bus->sim->sim_name, DEV_IDLEN);
1926 	}
1927 
1928 	/*
1929 	 * If the user is only interested in busses, there's no
1930 	 * reason to descend to the next level in the tree.
1931 	 */
1932 	if ((retval & DM_RET_ACTION_MASK) == DM_RET_STOP)
1933 		return(1);
1934 
1935 	/*
1936 	 * If there is a target generation recorded, check it to
1937 	 * make sure the target list hasn't changed.
1938 	 */
1939 	if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
1940 	 && (bus == cdm->pos.cookie.bus)
1941 	 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
1942 	 && (cdm->pos.generations[CAM_TARGET_GENERATION] != 0)
1943 	 && (cdm->pos.generations[CAM_TARGET_GENERATION] !=
1944 	     bus->generation)) {
1945 		cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
1946 		return(0);
1947 	}
1948 
1949 	if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
1950 	 && (cdm->pos.cookie.bus == bus)
1951 	 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
1952 	 && (cdm->pos.cookie.target != NULL))
1953 		return(xpttargettraverse(bus,
1954 					(struct cam_et *)cdm->pos.cookie.target,
1955 					 xptedttargetfunc, arg));
1956 	else
1957 		return(xpttargettraverse(bus, NULL, xptedttargetfunc, arg));
1958 }
1959 
1960 static int
1961 xptedttargetfunc(struct cam_et *target, void *arg)
1962 {
1963 	struct ccb_dev_match *cdm;
1964 
1965 	cdm = (struct ccb_dev_match *)arg;
1966 
1967 	/*
1968 	 * If there is a device list generation recorded, check it to
1969 	 * make sure the device list hasn't changed.
1970 	 */
1971 	if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
1972 	 && (cdm->pos.cookie.bus == target->bus)
1973 	 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
1974 	 && (cdm->pos.cookie.target == target)
1975 	 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
1976 	 && (cdm->pos.generations[CAM_DEV_GENERATION] != 0)
1977 	 && (cdm->pos.generations[CAM_DEV_GENERATION] !=
1978 	     target->generation)) {
1979 		cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
1980 		return(0);
1981 	}
1982 
1983 	if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
1984 	 && (cdm->pos.cookie.bus == target->bus)
1985 	 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
1986 	 && (cdm->pos.cookie.target == target)
1987 	 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
1988 	 && (cdm->pos.cookie.device != NULL))
1989 		return(xptdevicetraverse(target,
1990 					(struct cam_ed *)cdm->pos.cookie.device,
1991 					 xptedtdevicefunc, arg));
1992 	else
1993 		return(xptdevicetraverse(target, NULL, xptedtdevicefunc, arg));
1994 }
1995 
1996 static int
1997 xptedtdevicefunc(struct cam_ed *device, void *arg)
1998 {
1999 
2000 	struct ccb_dev_match *cdm;
2001 	dev_match_ret retval;
2002 
2003 	cdm = (struct ccb_dev_match *)arg;
2004 
2005 	/*
2006 	 * If our position is for something deeper in the tree, that means
2007 	 * that we've already seen this node.  So, we keep going down.
2008 	 */
2009 	if ((cdm->pos.position_type & CAM_DEV_POS_DEVICE)
2010 	 && (cdm->pos.cookie.device == device)
2011 	 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
2012 	 && (cdm->pos.cookie.periph != NULL))
2013 		retval = DM_RET_DESCEND;
2014 	else
2015 		retval = xptdevicematch(cdm->patterns, cdm->num_patterns,
2016 					device);
2017 
2018 	if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
2019 		cdm->status = CAM_DEV_MATCH_ERROR;
2020 		return(0);
2021 	}
2022 
2023 	/*
2024 	 * If the copy flag is set, copy this device out.
2025 	 */
2026 	if (retval & DM_RET_COPY) {
2027 		int spaceleft, j;
2028 
2029 		spaceleft = cdm->match_buf_len - (cdm->num_matches *
2030 			sizeof(struct dev_match_result));
2031 
2032 		/*
2033 		 * If we don't have enough space to put in another
2034 		 * match result, save our position and tell the
2035 		 * user there are more devices to check.
2036 		 */
2037 		if (spaceleft < sizeof(struct dev_match_result)) {
2038 			bzero(&cdm->pos, sizeof(cdm->pos));
2039 			cdm->pos.position_type =
2040 				CAM_DEV_POS_EDT | CAM_DEV_POS_BUS |
2041 				CAM_DEV_POS_TARGET | CAM_DEV_POS_DEVICE;
2042 
2043 			cdm->pos.cookie.bus = device->target->bus;
2044 			cdm->pos.generations[CAM_BUS_GENERATION]=
2045 				bus_generation;
2046 			cdm->pos.cookie.target = device->target;
2047 			cdm->pos.generations[CAM_TARGET_GENERATION] =
2048 				device->target->bus->generation;
2049 			cdm->pos.cookie.device = device;
2050 			cdm->pos.generations[CAM_DEV_GENERATION] =
2051 				device->target->generation;
2052 			cdm->status = CAM_DEV_MATCH_MORE;
2053 			return(0);
2054 		}
2055 		j = cdm->num_matches;
2056 		cdm->num_matches++;
2057 		cdm->matches[j].type = DEV_MATCH_DEVICE;
2058 		cdm->matches[j].result.device_result.path_id =
2059 			device->target->bus->path_id;
2060 		cdm->matches[j].result.device_result.target_id =
2061 			device->target->target_id;
2062 		cdm->matches[j].result.device_result.target_lun =
2063 			device->lun_id;
2064 		bcopy(&device->inq_data,
2065 		      &cdm->matches[j].result.device_result.inq_data,
2066 		      sizeof(struct scsi_inquiry_data));
2067 
2068 		/* Let the user know whether this device is unconfigured */
2069 		if (device->flags & CAM_DEV_UNCONFIGURED)
2070 			cdm->matches[j].result.device_result.flags =
2071 				DEV_RESULT_UNCONFIGURED;
2072 		else
2073 			cdm->matches[j].result.device_result.flags =
2074 				DEV_RESULT_NOFLAG;
2075 	}
2076 
2077 	/*
2078 	 * If the user isn't interested in peripherals, don't descend
2079 	 * the tree any further.
2080 	 */
2081 	if ((retval & DM_RET_ACTION_MASK) == DM_RET_STOP)
2082 		return(1);
2083 
2084 	/*
2085 	 * If there is a peripheral list generation recorded, make sure
2086 	 * it hasn't changed.
2087 	 */
2088 	if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2089 	 && (device->target->bus == cdm->pos.cookie.bus)
2090 	 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
2091 	 && (device->target == cdm->pos.cookie.target)
2092 	 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
2093 	 && (device == cdm->pos.cookie.device)
2094 	 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
2095 	 && (cdm->pos.generations[CAM_PERIPH_GENERATION] != 0)
2096 	 && (cdm->pos.generations[CAM_PERIPH_GENERATION] !=
2097 	     device->generation)){
2098 		cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
2099 		return(0);
2100 	}
2101 
2102 	if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2103 	 && (cdm->pos.cookie.bus == device->target->bus)
2104 	 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
2105 	 && (cdm->pos.cookie.target == device->target)
2106 	 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
2107 	 && (cdm->pos.cookie.device == device)
2108 	 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
2109 	 && (cdm->pos.cookie.periph != NULL))
2110 		return(xptperiphtraverse(device,
2111 				(struct cam_periph *)cdm->pos.cookie.periph,
2112 				xptedtperiphfunc, arg));
2113 	else
2114 		return(xptperiphtraverse(device, NULL, xptedtperiphfunc, arg));
2115 }
2116 
2117 static int
2118 xptedtperiphfunc(struct cam_periph *periph, void *arg)
2119 {
2120 	struct ccb_dev_match *cdm;
2121 	dev_match_ret retval;
2122 
2123 	cdm = (struct ccb_dev_match *)arg;
2124 
2125 	retval = xptperiphmatch(cdm->patterns, cdm->num_patterns, periph);
2126 
2127 	if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
2128 		cdm->status = CAM_DEV_MATCH_ERROR;
2129 		return(0);
2130 	}
2131 
2132 	/*
2133 	 * If the copy flag is set, copy this peripheral out.
2134 	 */
2135 	if (retval & DM_RET_COPY) {
2136 		int spaceleft, j;
2137 
2138 		spaceleft = cdm->match_buf_len - (cdm->num_matches *
2139 			sizeof(struct dev_match_result));
2140 
2141 		/*
2142 		 * If we don't have enough space to put in another
2143 		 * match result, save our position and tell the
2144 		 * user there are more devices to check.
2145 		 */
2146 		if (spaceleft < sizeof(struct dev_match_result)) {
2147 			bzero(&cdm->pos, sizeof(cdm->pos));
2148 			cdm->pos.position_type =
2149 				CAM_DEV_POS_EDT | CAM_DEV_POS_BUS |
2150 				CAM_DEV_POS_TARGET | CAM_DEV_POS_DEVICE |
2151 				CAM_DEV_POS_PERIPH;
2152 
2153 			cdm->pos.cookie.bus = periph->path->bus;
2154 			cdm->pos.generations[CAM_BUS_GENERATION]=
2155 				bus_generation;
2156 			cdm->pos.cookie.target = periph->path->target;
2157 			cdm->pos.generations[CAM_TARGET_GENERATION] =
2158 				periph->path->bus->generation;
2159 			cdm->pos.cookie.device = periph->path->device;
2160 			cdm->pos.generations[CAM_DEV_GENERATION] =
2161 				periph->path->target->generation;
2162 			cdm->pos.cookie.periph = periph;
2163 			cdm->pos.generations[CAM_PERIPH_GENERATION] =
2164 				periph->path->device->generation;
2165 			cdm->status = CAM_DEV_MATCH_MORE;
2166 			return(0);
2167 		}
2168 
2169 		j = cdm->num_matches;
2170 		cdm->num_matches++;
2171 		cdm->matches[j].type = DEV_MATCH_PERIPH;
2172 		cdm->matches[j].result.periph_result.path_id =
2173 			periph->path->bus->path_id;
2174 		cdm->matches[j].result.periph_result.target_id =
2175 			periph->path->target->target_id;
2176 		cdm->matches[j].result.periph_result.target_lun =
2177 			periph->path->device->lun_id;
2178 		cdm->matches[j].result.periph_result.unit_number =
2179 			periph->unit_number;
2180 		strncpy(cdm->matches[j].result.periph_result.periph_name,
2181 			periph->periph_name, DEV_IDLEN);
2182 	}
2183 
2184 	return(1);
2185 }
2186 
2187 static int
2188 xptedtmatch(struct ccb_dev_match *cdm)
2189 {
2190 	int ret;
2191 
2192 	cdm->num_matches = 0;
2193 
2194 	/*
2195 	 * Check the bus list generation.  If it has changed, the user
2196 	 * needs to reset everything and start over.
2197 	 */
2198 	if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2199 	 && (cdm->pos.generations[CAM_BUS_GENERATION] != 0)
2200 	 && (cdm->pos.generations[CAM_BUS_GENERATION] != bus_generation)) {
2201 		cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
2202 		return(0);
2203 	}
2204 
2205 	if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2206 	 && (cdm->pos.cookie.bus != NULL))
2207 		ret = xptbustraverse((struct cam_eb *)cdm->pos.cookie.bus,
2208 				     xptedtbusfunc, cdm);
2209 	else
2210 		ret = xptbustraverse(NULL, xptedtbusfunc, cdm);
2211 
2212 	/*
2213 	 * If we get back 0, that means that we had to stop before fully
2214 	 * traversing the EDT.  It also means that one of the subroutines
2215 	 * has set the status field to the proper value.  If we get back 1,
2216 	 * we've fully traversed the EDT and copied out any matching entries.
2217 	 */
2218 	if (ret == 1)
2219 		cdm->status = CAM_DEV_MATCH_LAST;
2220 
2221 	return(ret);
2222 }
2223 
2224 static int
2225 xptplistpdrvfunc(struct periph_driver **pdrv, void *arg)
2226 {
2227 	struct ccb_dev_match *cdm;
2228 
2229 	cdm = (struct ccb_dev_match *)arg;
2230 
2231 	if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
2232 	 && (cdm->pos.cookie.pdrv == pdrv)
2233 	 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
2234 	 && (cdm->pos.generations[CAM_PERIPH_GENERATION] != 0)
2235 	 && (cdm->pos.generations[CAM_PERIPH_GENERATION] !=
2236 	     (*pdrv)->generation)) {
2237 		cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
2238 		return(0);
2239 	}
2240 
2241 	if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
2242 	 && (cdm->pos.cookie.pdrv == pdrv)
2243 	 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
2244 	 && (cdm->pos.cookie.periph != NULL))
2245 		return(xptpdperiphtraverse(pdrv,
2246 				(struct cam_periph *)cdm->pos.cookie.periph,
2247 				xptplistperiphfunc, arg));
2248 	else
2249 		return(xptpdperiphtraverse(pdrv, NULL,xptplistperiphfunc, arg));
2250 }
2251 
2252 static int
2253 xptplistperiphfunc(struct cam_periph *periph, void *arg)
2254 {
2255 	struct ccb_dev_match *cdm;
2256 	dev_match_ret retval;
2257 
2258 	cdm = (struct ccb_dev_match *)arg;
2259 
2260 	retval = xptperiphmatch(cdm->patterns, cdm->num_patterns, periph);
2261 
2262 	if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
2263 		cdm->status = CAM_DEV_MATCH_ERROR;
2264 		return(0);
2265 	}
2266 
2267 	/*
2268 	 * If the copy flag is set, copy this peripheral out.
2269 	 */
2270 	if (retval & DM_RET_COPY) {
2271 		int spaceleft, j;
2272 
2273 		spaceleft = cdm->match_buf_len - (cdm->num_matches *
2274 			sizeof(struct dev_match_result));
2275 
2276 		/*
2277 		 * If we don't have enough space to put in another
2278 		 * match result, save our position and tell the
2279 		 * user there are more devices to check.
2280 		 */
2281 		if (spaceleft < sizeof(struct dev_match_result)) {
2282 			struct periph_driver **pdrv;
2283 
2284 			pdrv = NULL;
2285 			bzero(&cdm->pos, sizeof(cdm->pos));
2286 			cdm->pos.position_type =
2287 				CAM_DEV_POS_PDRV | CAM_DEV_POS_PDPTR |
2288 				CAM_DEV_POS_PERIPH;
2289 
2290 			/*
2291 			 * This may look a bit non-sensical, but it is
2292 			 * actually quite logical.  There are very few
2293 			 * peripheral drivers, and bloating every peripheral
2294 			 * structure with a pointer back to its parent
2295 			 * peripheral driver linker set entry would cost
2296 			 * more in the long run than doing this quick lookup.
2297 			 */
2298 			for (pdrv =
2299 			     (struct periph_driver **)periphdriver_set.ls_items;
2300 			     *pdrv != NULL; pdrv++) {
2301 				if (strcmp((*pdrv)->driver_name,
2302 				    periph->periph_name) == 0)
2303 					break;
2304 			}
2305 
2306 			if (pdrv == NULL) {
2307 				cdm->status = CAM_DEV_MATCH_ERROR;
2308 				return(0);
2309 			}
2310 
2311 			cdm->pos.cookie.pdrv = pdrv;
2312 			/*
2313 			 * The periph generation slot does double duty, as
2314 			 * does the periph pointer slot.  They are used for
2315 			 * both edt and pdrv lookups and positioning.
2316 			 */
2317 			cdm->pos.cookie.periph = periph;
2318 			cdm->pos.generations[CAM_PERIPH_GENERATION] =
2319 				(*pdrv)->generation;
2320 			cdm->status = CAM_DEV_MATCH_MORE;
2321 			return(0);
2322 		}
2323 
2324 		j = cdm->num_matches;
2325 		cdm->num_matches++;
2326 		cdm->matches[j].type = DEV_MATCH_PERIPH;
2327 		cdm->matches[j].result.periph_result.path_id =
2328 			periph->path->bus->path_id;
2329 
2330 		/*
2331 		 * The transport layer peripheral doesn't have a target or
2332 		 * lun.
2333 		 */
2334 		if (periph->path->target)
2335 			cdm->matches[j].result.periph_result.target_id =
2336 				periph->path->target->target_id;
2337 		else
2338 			cdm->matches[j].result.periph_result.target_id = -1;
2339 
2340 		if (periph->path->device)
2341 			cdm->matches[j].result.periph_result.target_lun =
2342 				periph->path->device->lun_id;
2343 		else
2344 			cdm->matches[j].result.periph_result.target_lun = -1;
2345 
2346 		cdm->matches[j].result.periph_result.unit_number =
2347 			periph->unit_number;
2348 		strncpy(cdm->matches[j].result.periph_result.periph_name,
2349 			periph->periph_name, DEV_IDLEN);
2350 	}
2351 
2352 	return(1);
2353 }
2354 
2355 static int
2356 xptperiphlistmatch(struct ccb_dev_match *cdm)
2357 {
2358 	int ret;
2359 
2360 	cdm->num_matches = 0;
2361 
2362 	/*
2363 	 * At this point in the edt traversal function, we check the bus
2364 	 * list generation to make sure that no busses have been added or
2365 	 * removed since the user last sent a XPT_DEV_MATCH ccb through.
2366 	 * For the peripheral driver list traversal function, however, we
2367 	 * don't have to worry about new peripheral driver types coming or
2368 	 * going; they're in a linker set, and therefore can't change
2369 	 * without a recompile.
2370 	 */
2371 
2372 	if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
2373 	 && (cdm->pos.cookie.pdrv != NULL))
2374 		ret = xptpdrvtraverse(
2375 				(struct periph_driver **)cdm->pos.cookie.pdrv,
2376 				xptplistpdrvfunc, cdm);
2377 	else
2378 		ret = xptpdrvtraverse(NULL, xptplistpdrvfunc, cdm);
2379 
2380 	/*
2381 	 * If we get back 0, that means that we had to stop before fully
2382 	 * traversing the peripheral driver tree.  It also means that one of
2383 	 * the subroutines has set the status field to the proper value.  If
2384 	 * we get back 1, we've fully traversed the EDT and copied out any
2385 	 * matching entries.
2386 	 */
2387 	if (ret == 1)
2388 		cdm->status = CAM_DEV_MATCH_LAST;
2389 
2390 	return(ret);
2391 }
2392 
2393 static int
2394 xptbustraverse(struct cam_eb *start_bus, xpt_busfunc_t *tr_func, void *arg)
2395 {
2396 	struct cam_eb *bus, *next_bus;
2397 	int retval;
2398 
2399 	retval = 1;
2400 
2401 	for (bus = (start_bus ? start_bus : TAILQ_FIRST(&xpt_busses));
2402 	     bus != NULL;
2403 	     bus = next_bus) {
2404 		next_bus = TAILQ_NEXT(bus, links);
2405 
2406 		retval = tr_func(bus, arg);
2407 		if (retval == 0)
2408 			return(retval);
2409 	}
2410 
2411 	return(retval);
2412 }
2413 
2414 static int
2415 xpttargettraverse(struct cam_eb *bus, struct cam_et *start_target,
2416 		  xpt_targetfunc_t *tr_func, void *arg)
2417 {
2418 	struct cam_et *target, *next_target;
2419 	int retval;
2420 
2421 	retval = 1;
2422 	for (target = (start_target ? start_target :
2423 		       TAILQ_FIRST(&bus->et_entries));
2424 	     target != NULL; target = next_target) {
2425 
2426 		next_target = TAILQ_NEXT(target, links);
2427 
2428 		retval = tr_func(target, arg);
2429 
2430 		if (retval == 0)
2431 			return(retval);
2432 	}
2433 
2434 	return(retval);
2435 }
2436 
2437 static int
2438 xptdevicetraverse(struct cam_et *target, struct cam_ed *start_device,
2439 		  xpt_devicefunc_t *tr_func, void *arg)
2440 {
2441 	struct cam_ed *device, *next_device;
2442 	int retval;
2443 
2444 	retval = 1;
2445 	for (device = (start_device ? start_device :
2446 		       TAILQ_FIRST(&target->ed_entries));
2447 	     device != NULL;
2448 	     device = next_device) {
2449 
2450 		next_device = TAILQ_NEXT(device, links);
2451 
2452 		retval = tr_func(device, arg);
2453 
2454 		if (retval == 0)
2455 			return(retval);
2456 	}
2457 
2458 	return(retval);
2459 }
2460 
2461 static int
2462 xptperiphtraverse(struct cam_ed *device, struct cam_periph *start_periph,
2463 		  xpt_periphfunc_t *tr_func, void *arg)
2464 {
2465 	struct cam_periph *periph, *next_periph;
2466 	int retval;
2467 
2468 	retval = 1;
2469 
2470 	for (periph = (start_periph ? start_periph :
2471 		       SLIST_FIRST(&device->periphs));
2472 	     periph != NULL;
2473 	     periph = next_periph) {
2474 
2475 		next_periph = SLIST_NEXT(periph, periph_links);
2476 
2477 		retval = tr_func(periph, arg);
2478 		if (retval == 0)
2479 			return(retval);
2480 	}
2481 
2482 	return(retval);
2483 }
2484 
2485 static int
2486 xptpdrvtraverse(struct periph_driver **start_pdrv,
2487 		xpt_pdrvfunc_t *tr_func, void *arg)
2488 {
2489 	struct periph_driver **pdrv;
2490 	int retval;
2491 
2492 	retval = 1;
2493 
2494 	/*
2495 	 * We don't traverse the peripheral driver list like we do the
2496 	 * other lists, because it is a linker set, and therefore cannot be
2497 	 * changed during runtime.  If the peripheral driver list is ever
2498 	 * re-done to be something other than a linker set (i.e. it can
2499 	 * change while the system is running), the list traversal should
2500 	 * be modified to work like the other traversal functions.
2501 	 */
2502 	for (pdrv = (start_pdrv ? start_pdrv :
2503 	     (struct periph_driver **)periphdriver_set.ls_items);
2504 	     *pdrv != NULL; pdrv++) {
2505 		retval = tr_func(pdrv, arg);
2506 
2507 		if (retval == 0)
2508 			return(retval);
2509 	}
2510 
2511 	return(retval);
2512 }
2513 
2514 static int
2515 xptpdperiphtraverse(struct periph_driver **pdrv,
2516 		    struct cam_periph *start_periph,
2517 		    xpt_periphfunc_t *tr_func, void *arg)
2518 {
2519 	struct cam_periph *periph, *next_periph;
2520 	int retval;
2521 
2522 	retval = 1;
2523 
2524 	for (periph = (start_periph ? start_periph :
2525 	     TAILQ_FIRST(&(*pdrv)->units)); periph != NULL;
2526 	     periph = next_periph) {
2527 
2528 		next_periph = TAILQ_NEXT(periph, unit_links);
2529 
2530 		retval = tr_func(periph, arg);
2531 		if (retval == 0)
2532 			return(retval);
2533 	}
2534 	return(retval);
2535 }
2536 
2537 static int
2538 xptdefbusfunc(struct cam_eb *bus, void *arg)
2539 {
2540 	struct xpt_traverse_config *tr_config;
2541 
2542 	tr_config = (struct xpt_traverse_config *)arg;
2543 
2544 	if (tr_config->depth == XPT_DEPTH_BUS) {
2545 		xpt_busfunc_t *tr_func;
2546 
2547 		tr_func = (xpt_busfunc_t *)tr_config->tr_func;
2548 
2549 		return(tr_func(bus, tr_config->tr_arg));
2550 	} else
2551 		return(xpttargettraverse(bus, NULL, xptdeftargetfunc, arg));
2552 }
2553 
2554 static int
2555 xptdeftargetfunc(struct cam_et *target, void *arg)
2556 {
2557 	struct xpt_traverse_config *tr_config;
2558 
2559 	tr_config = (struct xpt_traverse_config *)arg;
2560 
2561 	if (tr_config->depth == XPT_DEPTH_TARGET) {
2562 		xpt_targetfunc_t *tr_func;
2563 
2564 		tr_func = (xpt_targetfunc_t *)tr_config->tr_func;
2565 
2566 		return(tr_func(target, tr_config->tr_arg));
2567 	} else
2568 		return(xptdevicetraverse(target, NULL, xptdefdevicefunc, arg));
2569 }
2570 
2571 static int
2572 xptdefdevicefunc(struct cam_ed *device, void *arg)
2573 {
2574 	struct xpt_traverse_config *tr_config;
2575 
2576 	tr_config = (struct xpt_traverse_config *)arg;
2577 
2578 	if (tr_config->depth == XPT_DEPTH_DEVICE) {
2579 		xpt_devicefunc_t *tr_func;
2580 
2581 		tr_func = (xpt_devicefunc_t *)tr_config->tr_func;
2582 
2583 		return(tr_func(device, tr_config->tr_arg));
2584 	} else
2585 		return(xptperiphtraverse(device, NULL, xptdefperiphfunc, arg));
2586 }
2587 
2588 static int
2589 xptdefperiphfunc(struct cam_periph *periph, void *arg)
2590 {
2591 	struct xpt_traverse_config *tr_config;
2592 	xpt_periphfunc_t *tr_func;
2593 
2594 	tr_config = (struct xpt_traverse_config *)arg;
2595 
2596 	tr_func = (xpt_periphfunc_t *)tr_config->tr_func;
2597 
2598 	/*
2599 	 * Unlike the other default functions, we don't check for depth
2600 	 * here.  The peripheral driver level is the last level in the EDT,
2601 	 * so if we're here, we should execute the function in question.
2602 	 */
2603 	return(tr_func(periph, tr_config->tr_arg));
2604 }
2605 
2606 /*
2607  * Execute the given function for every bus in the EDT.
2608  */
2609 static int
2610 xpt_for_all_busses(xpt_busfunc_t *tr_func, void *arg)
2611 {
2612 	struct xpt_traverse_config tr_config;
2613 
2614 	tr_config.depth = XPT_DEPTH_BUS;
2615 	tr_config.tr_func = tr_func;
2616 	tr_config.tr_arg = arg;
2617 
2618 	return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
2619 }
2620 
2621 #ifdef notusedyet
2622 /*
2623  * Execute the given function for every target in the EDT.
2624  */
2625 static int
2626 xpt_for_all_targets(xpt_targetfunc_t *tr_func, void *arg)
2627 {
2628 	struct xpt_traverse_config tr_config;
2629 
2630 	tr_config.depth = XPT_DEPTH_TARGET;
2631 	tr_config.tr_func = tr_func;
2632 	tr_config.tr_arg = arg;
2633 
2634 	return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
2635 }
2636 #endif /* notusedyet */
2637 
2638 /*
2639  * Execute the given function for every device in the EDT.
2640  */
2641 static int
2642 xpt_for_all_devices(xpt_devicefunc_t *tr_func, void *arg)
2643 {
2644 	struct xpt_traverse_config tr_config;
2645 
2646 	tr_config.depth = XPT_DEPTH_DEVICE;
2647 	tr_config.tr_func = tr_func;
2648 	tr_config.tr_arg = arg;
2649 
2650 	return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
2651 }
2652 
2653 #ifdef notusedyet
2654 /*
2655  * Execute the given function for every peripheral in the EDT.
2656  */
2657 static int
2658 xpt_for_all_periphs(xpt_periphfunc_t *tr_func, void *arg)
2659 {
2660 	struct xpt_traverse_config tr_config;
2661 
2662 	tr_config.depth = XPT_DEPTH_PERIPH;
2663 	tr_config.tr_func = tr_func;
2664 	tr_config.tr_arg = arg;
2665 
2666 	return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
2667 }
2668 #endif /* notusedyet */
2669 
2670 static int
2671 xptsetasyncfunc(struct cam_ed *device, void *arg)
2672 {
2673 	struct cam_path path;
2674 	struct ccb_getdev cgd;
2675 	struct async_node *cur_entry;
2676 
2677 	cur_entry = (struct async_node *)arg;
2678 
2679 	/*
2680 	 * Don't report unconfigured devices (Wildcard devs,
2681 	 * devices only for target mode, device instances
2682 	 * that have been invalidated but are waiting for
2683 	 * their last reference count to be released).
2684 	 */
2685 	if ((device->flags & CAM_DEV_UNCONFIGURED) != 0)
2686 		return (1);
2687 
2688 	xpt_compile_path(&path,
2689 			 NULL,
2690 			 device->target->bus->path_id,
2691 			 device->target->target_id,
2692 			 device->lun_id);
2693 	xpt_setup_ccb(&cgd.ccb_h, &path, /*priority*/1);
2694 	cgd.ccb_h.func_code = XPT_GDEV_TYPE;
2695 	xpt_action((union ccb *)&cgd);
2696 	cur_entry->callback(cur_entry->callback_arg,
2697 			    AC_FOUND_DEVICE,
2698 			    &path, &cgd);
2699 	xpt_release_path(&path);
2700 
2701 	return(1);
2702 }
2703 
2704 static int
2705 xptsetasyncbusfunc(struct cam_eb *bus, void *arg)
2706 {
2707 	struct cam_path path;
2708 	struct ccb_pathinq cpi;
2709 	struct async_node *cur_entry;
2710 
2711 	cur_entry = (struct async_node *)arg;
2712 
2713 	xpt_compile_path(&path, /*periph*/NULL,
2714 			 bus->sim->path_id,
2715 			 CAM_TARGET_WILDCARD,
2716 			 CAM_LUN_WILDCARD);
2717 	xpt_setup_ccb(&cpi.ccb_h, &path, /*priority*/1);
2718 	cpi.ccb_h.func_code = XPT_PATH_INQ;
2719 	xpt_action((union ccb *)&cpi);
2720 	cur_entry->callback(cur_entry->callback_arg,
2721 			    AC_PATH_REGISTERED,
2722 			    &path, &cpi);
2723 	xpt_release_path(&path);
2724 
2725 	return(1);
2726 }
2727 
2728 void
2729 xpt_action(union ccb *start_ccb)
2730 {
2731 	int iopl;
2732 
2733 	CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xpt_action\n"));
2734 
2735 	start_ccb->ccb_h.status = CAM_REQ_INPROG;
2736 
2737 	iopl = splsoftcam();
2738 	switch (start_ccb->ccb_h.func_code) {
2739 	case XPT_SCSI_IO:
2740 	{
2741 #ifdef CAMDEBUG
2742 		char cdb_str[(SCSI_MAX_CDBLEN * 3) + 1];
2743 		struct cam_path *path;
2744 
2745 		path = start_ccb->ccb_h.path;
2746 #endif
2747 
2748 		/*
2749 		 * For the sake of compatibility with SCSI-1
2750 		 * devices that may not understand the identify
2751 		 * message, we include lun information in the
2752 		 * second byte of all commands.  SCSI-1 specifies
2753 		 * that luns are a 3 bit value and reserves only 3
2754 		 * bits for lun information in the CDB.  Later
2755 		 * revisions of the SCSI spec allow for more than 8
2756 		 * luns, but have deprecated lun information in the
2757 		 * CDB.  So, if the lun won't fit, we must omit.
2758 		 *
2759 		 * Also be aware that during initial probing for devices,
2760 		 * the inquiry information is unknown but initialized to 0.
2761 		 * This means that this code will be exercised while probing
2762 		 * devices with an ANSI revision greater than 2.
2763 		 */
2764 		if (SID_ANSI_REV(&start_ccb->ccb_h.path->device->inq_data) <= 2
2765 		 && start_ccb->ccb_h.target_lun < 8
2766 		 && (start_ccb->ccb_h.flags & CAM_CDB_POINTER) == 0) {
2767 
2768 			start_ccb->csio.cdb_io.cdb_bytes[1] |=
2769 			    start_ccb->ccb_h.target_lun << 5;
2770 		}
2771 		start_ccb->csio.scsi_status = SCSI_STATUS_OK;
2772 		CAM_DEBUG(path, CAM_DEBUG_CDB,("%s. CDB: %s\n",
2773 			  scsi_op_desc(start_ccb->csio.cdb_io.cdb_bytes[0],
2774 			  	       &path->device->inq_data),
2775 			  scsi_cdb_string(start_ccb->csio.cdb_io.cdb_bytes,
2776 					  cdb_str, sizeof(cdb_str))));
2777 		/* FALLTHROUGH */
2778 	}
2779 	case XPT_TARGET_IO:
2780 	case XPT_CONT_TARGET_IO:
2781 		start_ccb->csio.sense_resid = 0;
2782 		start_ccb->csio.resid = 0;
2783 		/* FALLTHROUGH */
2784 	case XPT_RESET_DEV:
2785 	case XPT_ENG_EXEC:
2786 	{
2787 		struct cam_path *path;
2788 		int s;
2789 		int runq;
2790 
2791 		path = start_ccb->ccb_h.path;
2792 		s = splsoftcam();
2793 
2794 		cam_ccbq_insert_ccb(&path->device->ccbq, start_ccb);
2795 		if (path->device->qfrozen_cnt == 0)
2796 			runq = xpt_schedule_dev_sendq(path->bus, path->device);
2797 		else
2798 			runq = 0;
2799 		splx(s);
2800 		if (runq != 0)
2801 			xpt_run_dev_sendq(path->bus);
2802 		break;
2803 	}
2804 	case XPT_SET_TRAN_SETTINGS:
2805 	{
2806 		xpt_set_transfer_settings(&start_ccb->cts,
2807 					  start_ccb->ccb_h.path->device,
2808 					  /*async_update*/FALSE);
2809 		break;
2810 	}
2811 	case XPT_CALC_GEOMETRY:
2812 	{
2813 		struct cam_sim *sim;
2814 
2815 		/* Filter out garbage */
2816 		if (start_ccb->ccg.block_size == 0
2817 		 || start_ccb->ccg.volume_size == 0) {
2818 			start_ccb->ccg.cylinders = 0;
2819 			start_ccb->ccg.heads = 0;
2820 			start_ccb->ccg.secs_per_track = 0;
2821 			start_ccb->ccb_h.status = CAM_REQ_CMP;
2822 			break;
2823 		}
2824 #ifdef PC98
2825 		/*
2826 		 * In a PC-98 system, geometry translation depens on
2827 		 * the "real" device geometry obtained from mode page 4.
2828 		 * SCSI geometry translation is performed in the
2829 		 * initialization routine of the SCSI BIOS and the result
2830 		 * stored in host memory.  If the translation is available
2831 		 * in host memory, use it.  If not, rely on the default
2832 		 * translation the device driver performs.
2833 		 */
2834 		if (scsi_da_bios_params(&start_ccb->ccg) != 0) {
2835 			start_ccb->ccb_h.status = CAM_REQ_CMP;
2836 			break;
2837 		}
2838 #endif
2839 		sim = start_ccb->ccb_h.path->bus->sim;
2840 		(*(sim->sim_action))(sim, start_ccb);
2841 		break;
2842 	}
2843 	case XPT_ABORT:
2844 	{
2845 		union ccb* abort_ccb;
2846 		int s;
2847 
2848 		abort_ccb = start_ccb->cab.abort_ccb;
2849 		if (XPT_FC_IS_DEV_QUEUED(abort_ccb)) {
2850 
2851 			if (abort_ccb->ccb_h.pinfo.index >= 0) {
2852 				struct cam_ccbq *ccbq;
2853 
2854 				ccbq = &abort_ccb->ccb_h.path->device->ccbq;
2855 				cam_ccbq_remove_ccb(ccbq, abort_ccb);
2856 				abort_ccb->ccb_h.status =
2857 				    CAM_REQ_ABORTED|CAM_DEV_QFRZN;
2858 				xpt_freeze_devq(abort_ccb->ccb_h.path, 1);
2859 				s = splcam();
2860 				xpt_done(abort_ccb);
2861 				splx(s);
2862 				start_ccb->ccb_h.status = CAM_REQ_CMP;
2863 				break;
2864 			}
2865 			if (abort_ccb->ccb_h.pinfo.index == CAM_UNQUEUED_INDEX
2866 			 && (abort_ccb->ccb_h.status & CAM_SIM_QUEUED) == 0) {
2867 				/*
2868 				 * We've caught this ccb en route to
2869 				 * the SIM.  Flag it for abort and the
2870 				 * SIM will do so just before starting
2871 				 * real work on the CCB.
2872 				 */
2873 				abort_ccb->ccb_h.status =
2874 				    CAM_REQ_ABORTED|CAM_DEV_QFRZN;
2875 				xpt_freeze_devq(abort_ccb->ccb_h.path, 1);
2876 				start_ccb->ccb_h.status = CAM_REQ_CMP;
2877 				break;
2878 			}
2879 		}
2880 		if (XPT_FC_IS_QUEUED(abort_ccb)
2881 		 && (abort_ccb->ccb_h.pinfo.index == CAM_DONEQ_INDEX)) {
2882 			/*
2883 			 * It's already completed but waiting
2884 			 * for our SWI to get to it.
2885 			 */
2886 			start_ccb->ccb_h.status = CAM_UA_ABORT;
2887 			break;
2888 		}
2889 		/*
2890 		 * If we weren't able to take care of the abort request
2891 		 * in the XPT, pass the request down to the SIM for processing.
2892 		 */
2893 		/* FALLTHROUGH */
2894 	}
2895 	case XPT_ACCEPT_TARGET_IO:
2896 	case XPT_EN_LUN:
2897 	case XPT_IMMED_NOTIFY:
2898 	case XPT_NOTIFY_ACK:
2899 	case XPT_GET_TRAN_SETTINGS:
2900 	case XPT_RESET_BUS:
2901 	{
2902 		struct cam_sim *sim;
2903 
2904 		sim = start_ccb->ccb_h.path->bus->sim;
2905 		(*(sim->sim_action))(sim, start_ccb);
2906 		break;
2907 	}
2908 	case XPT_PATH_INQ:
2909 	{
2910 		struct cam_sim *sim;
2911 
2912 		sim = start_ccb->ccb_h.path->bus->sim;
2913 		(*(sim->sim_action))(sim, start_ccb);
2914 		break;
2915 	}
2916 	case XPT_PATH_STATS:
2917 		start_ccb->cpis.last_reset =
2918 			start_ccb->ccb_h.path->bus->last_reset;
2919 		start_ccb->ccb_h.status = CAM_REQ_CMP;
2920 		break;
2921 	case XPT_GDEV_TYPE:
2922 	{
2923 		struct cam_ed *dev;
2924 		int s;
2925 
2926 		dev = start_ccb->ccb_h.path->device;
2927 		s = splcam();
2928 		if ((dev->flags & CAM_DEV_UNCONFIGURED) != 0) {
2929 			start_ccb->ccb_h.status = CAM_DEV_NOT_THERE;
2930 		} else {
2931 			struct ccb_getdev *cgd;
2932 			struct cam_eb *bus;
2933 			struct cam_et *tar;
2934 
2935 			cgd = &start_ccb->cgd;
2936 			bus = cgd->ccb_h.path->bus;
2937 			tar = cgd->ccb_h.path->target;
2938 			cgd->inq_data = dev->inq_data;
2939 			cgd->ccb_h.status = CAM_REQ_CMP;
2940 			cgd->serial_num_len = dev->serial_num_len;
2941 			if ((dev->serial_num_len > 0)
2942 			 && (dev->serial_num != NULL))
2943 				bcopy(dev->serial_num, cgd->serial_num,
2944 				      dev->serial_num_len);
2945 		}
2946 		splx(s);
2947 		break;
2948 	}
2949 	case XPT_GDEV_STATS:
2950 	{
2951 		struct cam_ed *dev;
2952 		int s;
2953 
2954 		dev = start_ccb->ccb_h.path->device;
2955 		s = splcam();
2956 		if ((dev->flags & CAM_DEV_UNCONFIGURED) != 0) {
2957 			start_ccb->ccb_h.status = CAM_DEV_NOT_THERE;
2958 		} else {
2959 			struct ccb_getdevstats *cgds;
2960 			struct cam_eb *bus;
2961 			struct cam_et *tar;
2962 
2963 			cgds = &start_ccb->cgds;
2964 			bus = cgds->ccb_h.path->bus;
2965 			tar = cgds->ccb_h.path->target;
2966 			cgds->dev_openings = dev->ccbq.dev_openings;
2967 			cgds->dev_active = dev->ccbq.dev_active;
2968 			cgds->devq_openings = dev->ccbq.devq_openings;
2969 			cgds->devq_queued = dev->ccbq.queue.entries;
2970 			cgds->held = dev->ccbq.held;
2971 			cgds->last_reset = tar->last_reset;
2972 			cgds->maxtags = dev->quirk->maxtags;
2973 			cgds->mintags = dev->quirk->mintags;
2974 			if (timevalcmp(&tar->last_reset, &bus->last_reset, <))
2975 				cgds->last_reset = bus->last_reset;
2976 			cgds->ccb_h.status = CAM_REQ_CMP;
2977 		}
2978 		splx(s);
2979 		break;
2980 	}
2981 	case XPT_GDEVLIST:
2982 	{
2983 		struct cam_periph	*nperiph;
2984 		struct periph_list	*periph_head;
2985 		struct ccb_getdevlist	*cgdl;
2986 		int			i;
2987 		int			s;
2988 		struct cam_ed		*device;
2989 		int			found;
2990 
2991 
2992 		found = 0;
2993 
2994 		/*
2995 		 * Don't want anyone mucking with our data.
2996 		 */
2997 		s = splcam();
2998 		device = start_ccb->ccb_h.path->device;
2999 		periph_head = &device->periphs;
3000 		cgdl = &start_ccb->cgdl;
3001 
3002 		/*
3003 		 * Check and see if the list has changed since the user
3004 		 * last requested a list member.  If so, tell them that the
3005 		 * list has changed, and therefore they need to start over
3006 		 * from the beginning.
3007 		 */
3008 		if ((cgdl->index != 0) &&
3009 		    (cgdl->generation != device->generation)) {
3010 			cgdl->status = CAM_GDEVLIST_LIST_CHANGED;
3011 			splx(s);
3012 			break;
3013 		}
3014 
3015 		/*
3016 		 * Traverse the list of peripherals and attempt to find
3017 		 * the requested peripheral.
3018 		 */
3019 		for (nperiph = periph_head->slh_first, i = 0;
3020 		     (nperiph != NULL) && (i <= cgdl->index);
3021 		     nperiph = nperiph->periph_links.sle_next, i++) {
3022 			if (i == cgdl->index) {
3023 				strncpy(cgdl->periph_name,
3024 					nperiph->periph_name,
3025 					DEV_IDLEN);
3026 				cgdl->unit_number = nperiph->unit_number;
3027 				found = 1;
3028 			}
3029 		}
3030 		if (found == 0) {
3031 			cgdl->status = CAM_GDEVLIST_ERROR;
3032 			splx(s);
3033 			break;
3034 		}
3035 
3036 		if (nperiph == NULL)
3037 			cgdl->status = CAM_GDEVLIST_LAST_DEVICE;
3038 		else
3039 			cgdl->status = CAM_GDEVLIST_MORE_DEVS;
3040 
3041 		cgdl->index++;
3042 		cgdl->generation = device->generation;
3043 
3044 		splx(s);
3045 		cgdl->ccb_h.status = CAM_REQ_CMP;
3046 		break;
3047 	}
3048 	case XPT_DEV_MATCH:
3049 	{
3050 		int s;
3051 		dev_pos_type position_type;
3052 		struct ccb_dev_match *cdm;
3053 		int ret;
3054 
3055 		cdm = &start_ccb->cdm;
3056 
3057 		/*
3058 		 * Prevent EDT changes while we traverse it.
3059 		 */
3060 		s = splcam();
3061 		/*
3062 		 * There are two ways of getting at information in the EDT.
3063 		 * The first way is via the primary EDT tree.  It starts
3064 		 * with a list of busses, then a list of targets on a bus,
3065 		 * then devices/luns on a target, and then peripherals on a
3066 		 * device/lun.  The "other" way is by the peripheral driver
3067 		 * lists.  The peripheral driver lists are organized by
3068 		 * peripheral driver.  (obviously)  So it makes sense to
3069 		 * use the peripheral driver list if the user is looking
3070 		 * for something like "da1", or all "da" devices.  If the
3071 		 * user is looking for something on a particular bus/target
3072 		 * or lun, it's generally better to go through the EDT tree.
3073 		 */
3074 
3075 		if (cdm->pos.position_type != CAM_DEV_POS_NONE)
3076 			position_type = cdm->pos.position_type;
3077 		else {
3078 			int i;
3079 
3080 			position_type = CAM_DEV_POS_NONE;
3081 
3082 			for (i = 0; i < cdm->num_patterns; i++) {
3083 				if ((cdm->patterns[i].type == DEV_MATCH_BUS)
3084 				 ||(cdm->patterns[i].type == DEV_MATCH_DEVICE)){
3085 					position_type = CAM_DEV_POS_EDT;
3086 					break;
3087 				}
3088 			}
3089 
3090 			if (cdm->num_patterns == 0)
3091 				position_type = CAM_DEV_POS_EDT;
3092 			else if (position_type == CAM_DEV_POS_NONE)
3093 				position_type = CAM_DEV_POS_PDRV;
3094 		}
3095 
3096 		switch(position_type & CAM_DEV_POS_TYPEMASK) {
3097 		case CAM_DEV_POS_EDT:
3098 			ret = xptedtmatch(cdm);
3099 			break;
3100 		case CAM_DEV_POS_PDRV:
3101 			ret = xptperiphlistmatch(cdm);
3102 			break;
3103 		default:
3104 			cdm->status = CAM_DEV_MATCH_ERROR;
3105 			break;
3106 		}
3107 
3108 		splx(s);
3109 
3110 		if (cdm->status == CAM_DEV_MATCH_ERROR)
3111 			start_ccb->ccb_h.status = CAM_REQ_CMP_ERR;
3112 		else
3113 			start_ccb->ccb_h.status = CAM_REQ_CMP;
3114 
3115 		break;
3116 	}
3117 	case XPT_SASYNC_CB:
3118 	{
3119 		struct ccb_setasync *csa;
3120 		struct async_node *cur_entry;
3121 		struct async_list *async_head;
3122 		u_int32_t added;
3123 		int s;
3124 
3125 		csa = &start_ccb->csa;
3126 		added = csa->event_enable;
3127 		async_head = &csa->ccb_h.path->device->asyncs;
3128 
3129 		/*
3130 		 * If there is already an entry for us, simply
3131 		 * update it.
3132 		 */
3133 		s = splcam();
3134 		cur_entry = SLIST_FIRST(async_head);
3135 		while (cur_entry != NULL) {
3136 			if ((cur_entry->callback_arg == csa->callback_arg)
3137 			 && (cur_entry->callback == csa->callback))
3138 				break;
3139 			cur_entry = SLIST_NEXT(cur_entry, links);
3140 		}
3141 
3142 		if (cur_entry != NULL) {
3143 		 	/*
3144 			 * If the request has no flags set,
3145 			 * remove the entry.
3146 			 */
3147 			added &= ~cur_entry->event_enable;
3148 			if (csa->event_enable == 0) {
3149 				SLIST_REMOVE(async_head, cur_entry,
3150 					     async_node, links);
3151 				csa->ccb_h.path->device->refcount--;
3152 				free(cur_entry, M_DEVBUF);
3153 			} else {
3154 				cur_entry->event_enable = csa->event_enable;
3155 			}
3156 		} else {
3157 			cur_entry = malloc(sizeof(*cur_entry), M_DEVBUF,
3158 					   M_NOWAIT);
3159 			if (cur_entry == NULL) {
3160 				splx(s);
3161 				csa->ccb_h.status = CAM_RESRC_UNAVAIL;
3162 				break;
3163 			}
3164 			cur_entry->event_enable = csa->event_enable;
3165 			cur_entry->callback_arg = csa->callback_arg;
3166 			cur_entry->callback = csa->callback;
3167 			SLIST_INSERT_HEAD(async_head, cur_entry, links);
3168 			csa->ccb_h.path->device->refcount++;
3169 		}
3170 
3171 		if ((added & AC_FOUND_DEVICE) != 0) {
3172 			/*
3173 			 * Get this peripheral up to date with all
3174 			 * the currently existing devices.
3175 			 */
3176 			xpt_for_all_devices(xptsetasyncfunc, cur_entry);
3177 		}
3178 		if ((added & AC_PATH_REGISTERED) != 0) {
3179 			/*
3180 			 * Get this peripheral up to date with all
3181 			 * the currently existing busses.
3182 			 */
3183 			xpt_for_all_busses(xptsetasyncbusfunc, cur_entry);
3184 		}
3185 		splx(s);
3186 		start_ccb->ccb_h.status = CAM_REQ_CMP;
3187 		break;
3188 	}
3189 	case XPT_REL_SIMQ:
3190 	{
3191 		struct ccb_relsim *crs;
3192 		struct cam_ed *dev;
3193 		int s;
3194 
3195 		crs = &start_ccb->crs;
3196 		dev = crs->ccb_h.path->device;
3197 		if (dev == NULL) {
3198 
3199 			crs->ccb_h.status = CAM_DEV_NOT_THERE;
3200 			break;
3201 		}
3202 
3203 		s = splcam();
3204 
3205 		if ((crs->release_flags & RELSIM_ADJUST_OPENINGS) != 0) {
3206 
3207  			if ((dev->inq_data.flags & SID_CmdQue) != 0) {
3208 
3209 				/* Don't ever go below one opening */
3210 				if (crs->openings > 0) {
3211 					xpt_dev_ccbq_resize(crs->ccb_h.path,
3212 							    crs->openings);
3213 
3214 					if (bootverbose) {
3215 						xpt_print_path(crs->ccb_h.path);
3216 						printf("tagged openings "
3217 						       "now %d\n",
3218 						       crs->openings);
3219 					}
3220 				}
3221 			}
3222 		}
3223 
3224 		if ((crs->release_flags & RELSIM_RELEASE_AFTER_TIMEOUT) != 0) {
3225 
3226 			if ((dev->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) {
3227 
3228 				/*
3229 				 * Just extend the old timeout and decrement
3230 				 * the freeze count so that a single timeout
3231 				 * is sufficient for releasing the queue.
3232 				 */
3233 				start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
3234 				untimeout(xpt_release_devq_timeout,
3235 					  dev, dev->c_handle);
3236 			} else {
3237 
3238 				start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
3239 			}
3240 
3241 			dev->c_handle =
3242 				timeout(xpt_release_devq_timeout,
3243 					dev,
3244 					(crs->release_timeout * hz) / 1000);
3245 
3246 			dev->flags |= CAM_DEV_REL_TIMEOUT_PENDING;
3247 
3248 		}
3249 
3250 		if ((crs->release_flags & RELSIM_RELEASE_AFTER_CMDCMPLT) != 0) {
3251 
3252 			if ((dev->flags & CAM_DEV_REL_ON_COMPLETE) != 0) {
3253 				/*
3254 				 * Decrement the freeze count so that a single
3255 				 * completion is still sufficient to unfreeze
3256 				 * the queue.
3257 				 */
3258 				start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
3259 			} else {
3260 
3261 				dev->flags |= CAM_DEV_REL_ON_COMPLETE;
3262 				start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
3263 			}
3264 		}
3265 
3266 		if ((crs->release_flags & RELSIM_RELEASE_AFTER_QEMPTY) != 0) {
3267 
3268 			if ((dev->flags & CAM_DEV_REL_ON_QUEUE_EMPTY) != 0
3269 			 || (dev->ccbq.dev_active == 0)) {
3270 
3271 				start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
3272 			} else {
3273 
3274 				dev->flags |= CAM_DEV_REL_ON_QUEUE_EMPTY;
3275 				start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
3276 			}
3277 		}
3278 		splx(s);
3279 
3280 		if ((start_ccb->ccb_h.flags & CAM_DEV_QFREEZE) == 0) {
3281 
3282 			xpt_release_devq(crs->ccb_h.path, /*count*/1,
3283 					 /*run_queue*/TRUE);
3284 		}
3285 		start_ccb->crs.qfrozen_cnt = dev->qfrozen_cnt;
3286 		start_ccb->ccb_h.status = CAM_REQ_CMP;
3287 		break;
3288 	}
3289 	case XPT_SCAN_BUS:
3290 		xpt_scan_bus(start_ccb->ccb_h.path->periph, start_ccb);
3291 		break;
3292 	case XPT_SCAN_LUN:
3293 		xpt_scan_lun(start_ccb->ccb_h.path->periph,
3294 			     start_ccb->ccb_h.path, start_ccb->crcn.flags,
3295 			     start_ccb);
3296 		break;
3297 	case XPT_DEBUG: {
3298 #ifdef CAMDEBUG
3299 		int s;
3300 
3301 		s = splcam();
3302 #ifdef CAM_DEBUG_DELAY
3303 		cam_debug_delay = CAM_DEBUG_DELAY;
3304 #endif
3305 		cam_dflags = start_ccb->cdbg.flags;
3306 		if (cam_dpath != NULL) {
3307 			xpt_free_path(cam_dpath);
3308 			cam_dpath = NULL;
3309 		}
3310 
3311 		if (cam_dflags != CAM_DEBUG_NONE) {
3312 			if (xpt_create_path(&cam_dpath, xpt_periph,
3313 					    start_ccb->ccb_h.path_id,
3314 					    start_ccb->ccb_h.target_id,
3315 					    start_ccb->ccb_h.target_lun) !=
3316 					    CAM_REQ_CMP) {
3317 				start_ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
3318 				cam_dflags = CAM_DEBUG_NONE;
3319 			} else {
3320 				start_ccb->ccb_h.status = CAM_REQ_CMP;
3321 				xpt_print_path(cam_dpath);
3322 				printf("debugging flags now %x\n", cam_dflags);
3323 			}
3324 		} else {
3325 			cam_dpath = NULL;
3326 			start_ccb->ccb_h.status = CAM_REQ_CMP;
3327 		}
3328 		splx(s);
3329 #else /* !CAMDEBUG */
3330 		start_ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
3331 #endif /* CAMDEBUG */
3332 		break;
3333 	}
3334 	case XPT_NOOP:
3335 		if ((start_ccb->ccb_h.flags & CAM_DEV_QFREEZE) != 0)
3336 			xpt_freeze_devq(start_ccb->ccb_h.path, 1);
3337 		start_ccb->ccb_h.status = CAM_REQ_CMP;
3338 		break;
3339 	default:
3340 	case XPT_SDEV_TYPE:
3341 	case XPT_TERM_IO:
3342 	case XPT_ENG_INQ:
3343 		/* XXX Implement */
3344 		start_ccb->ccb_h.status = CAM_PROVIDE_FAIL;
3345 		break;
3346 	}
3347 	splx(iopl);
3348 }
3349 
3350 void
3351 xpt_polled_action(union ccb *start_ccb)
3352 {
3353 	int	  s;
3354 	u_int32_t timeout;
3355 	struct	  cam_sim *sim;
3356 	struct	  cam_devq *devq;
3357 	struct	  cam_ed *dev;
3358 
3359 	timeout = start_ccb->ccb_h.timeout;
3360 	sim = start_ccb->ccb_h.path->bus->sim;
3361 	devq = sim->devq;
3362 	dev = start_ccb->ccb_h.path->device;
3363 
3364 	s = splcam();
3365 
3366 	/*
3367 	 * Steal an opening so that no other queued requests
3368 	 * can get it before us while we simulate interrupts.
3369 	 */
3370 	dev->ccbq.devq_openings--;
3371 	dev->ccbq.dev_openings--;
3372 
3373 	while((devq->send_openings <= 0 || dev->ccbq.dev_openings < 0)
3374 	   && (--timeout > 0)) {
3375 		DELAY(1000);
3376 		(*(sim->sim_poll))(sim);
3377 		swi_camnet();
3378 		swi_cambio();
3379 	}
3380 
3381 	dev->ccbq.devq_openings++;
3382 	dev->ccbq.dev_openings++;
3383 
3384 	if (timeout != 0) {
3385 		xpt_action(start_ccb);
3386 		while(--timeout > 0) {
3387 			(*(sim->sim_poll))(sim);
3388 			swi_camnet();
3389 			swi_cambio();
3390 			if ((start_ccb->ccb_h.status  & CAM_STATUS_MASK)
3391 			    != CAM_REQ_INPROG)
3392 				break;
3393 			DELAY(1000);
3394 		}
3395 		if (timeout == 0) {
3396 			/*
3397 			 * XXX Is it worth adding a sim_timeout entry
3398 			 * point so we can attempt recovery?  If
3399 			 * this is only used for dumps, I don't think
3400 			 * it is.
3401 			 */
3402 			start_ccb->ccb_h.status = CAM_CMD_TIMEOUT;
3403 		}
3404 	} else {
3405 		start_ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
3406 	}
3407 	splx(s);
3408 }
3409 
3410 /*
3411  * Schedule a peripheral driver to receive a ccb when it's
3412  * target device has space for more transactions.
3413  */
3414 void
3415 xpt_schedule(struct cam_periph *perph, u_int32_t new_priority)
3416 {
3417 	struct cam_ed *device;
3418 	int s;
3419 	int runq;
3420 
3421 	CAM_DEBUG(perph->path, CAM_DEBUG_TRACE, ("xpt_schedule\n"));
3422 	device = perph->path->device;
3423 	s = splsoftcam();
3424 	if (periph_is_queued(perph)) {
3425 		/* Simply reorder based on new priority */
3426 		CAM_DEBUG(perph->path, CAM_DEBUG_SUBTRACE,
3427 			  ("   change priority to %d\n", new_priority));
3428 		if (new_priority < perph->pinfo.priority) {
3429 			camq_change_priority(&device->drvq,
3430 					     perph->pinfo.index,
3431 					     new_priority);
3432 		}
3433 		runq = 0;
3434 	} else {
3435 		/* New entry on the queue */
3436 		CAM_DEBUG(perph->path, CAM_DEBUG_SUBTRACE,
3437 			  ("   added periph to queue\n"));
3438 		perph->pinfo.priority = new_priority;
3439 		perph->pinfo.generation = ++device->drvq.generation;
3440 		camq_insert(&device->drvq, &perph->pinfo);
3441 		runq = xpt_schedule_dev_allocq(perph->path->bus, device);
3442 	}
3443 	splx(s);
3444 	if (runq != 0) {
3445 		CAM_DEBUG(perph->path, CAM_DEBUG_SUBTRACE,
3446 			  ("   calling xpt_run_devq\n"));
3447 		xpt_run_dev_allocq(perph->path->bus);
3448 	}
3449 }
3450 
3451 
3452 /*
3453  * Schedule a device to run on a given queue.
3454  * If the device was inserted as a new entry on the queue,
3455  * return 1 meaning the device queue should be run. If we
3456  * were already queued, implying someone else has already
3457  * started the queue, return 0 so the caller doesn't attempt
3458  * to run the queue.  Must be run at either splsoftcam
3459  * (or splcam since that encompases splsoftcam).
3460  */
3461 static int
3462 xpt_schedule_dev(struct camq *queue, cam_pinfo *pinfo,
3463 		 u_int32_t new_priority)
3464 {
3465 	int retval;
3466 	u_int32_t old_priority;
3467 
3468 	CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_schedule_dev\n"));
3469 
3470 	old_priority = pinfo->priority;
3471 
3472 	/*
3473 	 * Are we already queued?
3474 	 */
3475 	if (pinfo->index != CAM_UNQUEUED_INDEX) {
3476 		/* Simply reorder based on new priority */
3477 		if (new_priority < old_priority) {
3478 			camq_change_priority(queue, pinfo->index,
3479 					     new_priority);
3480 			CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3481 					("changed priority to %d\n",
3482 					 new_priority));
3483 		}
3484 		retval = 0;
3485 	} else {
3486 		/* New entry on the queue */
3487 		if (new_priority < old_priority)
3488 			pinfo->priority = new_priority;
3489 
3490 		CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3491 				("Inserting onto queue\n"));
3492 		pinfo->generation = ++queue->generation;
3493 		camq_insert(queue, pinfo);
3494 		retval = 1;
3495 	}
3496 	return (retval);
3497 }
3498 
3499 static void
3500 xpt_run_dev_allocq(struct cam_eb *bus)
3501 {
3502 	struct	cam_devq *devq;
3503 	int	s;
3504 
3505 	CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_dev_allocq\n"));
3506 	devq = bus->sim->devq;
3507 
3508 	CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3509 			("   qfrozen_cnt == 0x%x, entries == %d, "
3510 			 "openings == %d, active == %d\n",
3511 			 devq->alloc_queue.qfrozen_cnt,
3512 			 devq->alloc_queue.entries,
3513 			 devq->alloc_openings,
3514 			 devq->alloc_active));
3515 
3516 	s = splsoftcam();
3517 	devq->alloc_queue.qfrozen_cnt++;
3518 	while ((devq->alloc_queue.entries > 0)
3519 	    && (devq->alloc_openings > 0)
3520 	    && (devq->alloc_queue.qfrozen_cnt <= 1)) {
3521 		struct	cam_ed_qinfo *qinfo;
3522 		struct	cam_ed *device;
3523 		union	ccb *work_ccb;
3524 		struct	cam_periph *drv;
3525 		struct	camq *drvq;
3526 
3527 		qinfo = (struct cam_ed_qinfo *)camq_remove(&devq->alloc_queue,
3528 							   CAMQ_HEAD);
3529 		device = qinfo->device;
3530 
3531 		CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3532 				("running device %p\n", device));
3533 
3534 		drvq = &device->drvq;
3535 
3536 #ifdef CAMDEBUG
3537 		if (drvq->entries <= 0) {
3538 			panic("xpt_run_dev_allocq: "
3539 			      "Device on queue without any work to do");
3540 		}
3541 #endif
3542 		if ((work_ccb = xpt_get_ccb(device)) != NULL) {
3543 			devq->alloc_openings--;
3544 			devq->alloc_active++;
3545 			drv = (struct cam_periph*)camq_remove(drvq, CAMQ_HEAD);
3546 			splx(s);
3547 			xpt_setup_ccb(&work_ccb->ccb_h, drv->path,
3548 				      drv->pinfo.priority);
3549 			CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3550 					("calling periph start\n"));
3551 			drv->periph_start(drv, work_ccb);
3552 		} else {
3553 			/*
3554 			 * Malloc failure in alloc_ccb
3555 			 */
3556 			/*
3557 			 * XXX add us to a list to be run from free_ccb
3558 			 * if we don't have any ccbs active on this
3559 			 * device queue otherwise we may never get run
3560 			 * again.
3561 			 */
3562 			break;
3563 		}
3564 
3565 		/* Raise IPL for possible insertion and test at top of loop */
3566 		s = splsoftcam();
3567 
3568 		if (drvq->entries > 0) {
3569 			/* We have more work.  Attempt to reschedule */
3570 			xpt_schedule_dev_allocq(bus, device);
3571 		}
3572 	}
3573 	devq->alloc_queue.qfrozen_cnt--;
3574 	splx(s);
3575 }
3576 
3577 static void
3578 xpt_run_dev_sendq(struct cam_eb *bus)
3579 {
3580 	struct	cam_devq *devq;
3581 	int	s;
3582 
3583 	CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_dev_sendq\n"));
3584 
3585 	devq = bus->sim->devq;
3586 
3587 	s = splcam();
3588 	devq->send_queue.qfrozen_cnt++;
3589 	splx(s);
3590 	s = splsoftcam();
3591 	while ((devq->send_queue.entries > 0)
3592 	    && (devq->send_openings > 0)) {
3593 		struct	cam_ed_qinfo *qinfo;
3594 		struct	cam_ed *device;
3595 		union ccb *work_ccb;
3596 		struct	cam_sim *sim;
3597 		int	ospl;
3598 
3599 		ospl = splcam();
3600 	    	if (devq->send_queue.qfrozen_cnt > 1) {
3601 			splx(ospl);
3602 			break;
3603 		}
3604 
3605 		qinfo = (struct cam_ed_qinfo *)camq_remove(&devq->send_queue,
3606 							   CAMQ_HEAD);
3607 		device = qinfo->device;
3608 
3609 		/*
3610 		 * If the device has been "frozen", don't attempt
3611 		 * to run it.
3612 		 */
3613 		if (device->qfrozen_cnt > 0) {
3614 			splx(ospl);
3615 			continue;
3616 		}
3617 
3618 		CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3619 				("running device %p\n", device));
3620 
3621 		work_ccb = cam_ccbq_peek_ccb(&device->ccbq, CAMQ_HEAD);
3622 		if (work_ccb == NULL) {
3623 			printf("device on run queue with no ccbs???");
3624 			splx(ospl);
3625 			continue;
3626 		}
3627 
3628 		if ((work_ccb->ccb_h.flags & CAM_HIGH_POWER) != 0) {
3629 
3630 		 	if (num_highpower <= 0) {
3631 				/*
3632 				 * We got a high power command, but we
3633 				 * don't have any available slots.  Freeze
3634 				 * the device queue until we have a slot
3635 				 * available.
3636 				 */
3637 				device->qfrozen_cnt++;
3638 				STAILQ_INSERT_TAIL(&highpowerq,
3639 						   &work_ccb->ccb_h,
3640 						   xpt_links.stqe);
3641 
3642 				splx(ospl);
3643 				continue;
3644 			} else {
3645 				/*
3646 				 * Consume a high power slot while
3647 				 * this ccb runs.
3648 				 */
3649 				num_highpower--;
3650 			}
3651 		}
3652 		devq->active_dev = device;
3653 		cam_ccbq_remove_ccb(&device->ccbq, work_ccb);
3654 
3655 		cam_ccbq_send_ccb(&device->ccbq, work_ccb);
3656 		splx(ospl);
3657 
3658 		devq->send_openings--;
3659 		devq->send_active++;
3660 
3661 		if (device->ccbq.queue.entries > 0)
3662 			xpt_schedule_dev_sendq(bus, device);
3663 
3664 		if (work_ccb && (work_ccb->ccb_h.flags & CAM_DEV_QFREEZE) != 0){
3665 			/*
3666 			 * The client wants to freeze the queue
3667 			 * after this CCB is sent.
3668 			 */
3669 			ospl = splcam();
3670 			device->qfrozen_cnt++;
3671 			splx(ospl);
3672 		}
3673 
3674 		splx(s);
3675 
3676 		/* In Target mode, the peripheral driver knows best... */
3677 		if (work_ccb->ccb_h.func_code == XPT_SCSI_IO) {
3678 			if ((device->inq_flags & SID_CmdQue) != 0
3679 			 && work_ccb->csio.tag_action != CAM_TAG_ACTION_NONE)
3680 				work_ccb->ccb_h.flags |= CAM_TAG_ACTION_VALID;
3681 			else
3682 				/*
3683 				 * Clear this in case of a retried CCB that
3684 				 * failed due to a rejected tag.
3685 				 */
3686 				work_ccb->ccb_h.flags &= ~CAM_TAG_ACTION_VALID;
3687 		}
3688 
3689 		/*
3690 		 * Device queues can be shared among multiple sim instances
3691 		 * that reside on different busses.  Use the SIM in the queue
3692 		 * CCB's path, rather than the one in the bus that was passed
3693 		 * into this function.
3694 		 */
3695 		sim = work_ccb->ccb_h.path->bus->sim;
3696 		(*(sim->sim_action))(sim, work_ccb);
3697 
3698 		ospl = splcam();
3699 		devq->active_dev = NULL;
3700 		splx(ospl);
3701 		/* Raise IPL for possible insertion and test at top of loop */
3702 		s = splsoftcam();
3703 	}
3704 	splx(s);
3705 	s = splcam();
3706 	devq->send_queue.qfrozen_cnt--;
3707 	splx(s);
3708 }
3709 
3710 /*
3711  * This function merges stuff from the slave ccb into the master ccb, while
3712  * keeping important fields in the master ccb constant.
3713  */
3714 void
3715 xpt_merge_ccb(union ccb *master_ccb, union ccb *slave_ccb)
3716 {
3717 	/*
3718 	 * Pull fields that are valid for peripheral drivers to set
3719 	 * into the master CCB along with the CCB "payload".
3720 	 */
3721 	master_ccb->ccb_h.retry_count = slave_ccb->ccb_h.retry_count;
3722 	master_ccb->ccb_h.func_code = slave_ccb->ccb_h.func_code;
3723 	master_ccb->ccb_h.timeout = slave_ccb->ccb_h.timeout;
3724 	master_ccb->ccb_h.flags = slave_ccb->ccb_h.flags;
3725 	bcopy(&(&slave_ccb->ccb_h)[1], &(&master_ccb->ccb_h)[1],
3726 	      sizeof(union ccb) - sizeof(struct ccb_hdr));
3727 }
3728 
3729 void
3730 xpt_setup_ccb(struct ccb_hdr *ccb_h, struct cam_path *path, u_int32_t priority)
3731 {
3732 	CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_setup_ccb\n"));
3733 	ccb_h->pinfo.priority = priority;
3734 	ccb_h->path = path;
3735 	ccb_h->path_id = path->bus->path_id;
3736 	if (path->target)
3737 		ccb_h->target_id = path->target->target_id;
3738 	else
3739 		ccb_h->target_id = CAM_TARGET_WILDCARD;
3740 	if (path->device) {
3741 		ccb_h->target_lun = path->device->lun_id;
3742 		ccb_h->pinfo.generation = ++path->device->ccbq.queue.generation;
3743 	} else {
3744 		ccb_h->target_lun = CAM_TARGET_WILDCARD;
3745 	}
3746 	ccb_h->pinfo.index = CAM_UNQUEUED_INDEX;
3747 	ccb_h->flags = 0;
3748 }
3749 
3750 /* Path manipulation functions */
3751 cam_status
3752 xpt_create_path(struct cam_path **new_path_ptr, struct cam_periph *perph,
3753 		path_id_t path_id, target_id_t target_id, lun_id_t lun_id)
3754 {
3755 	struct	   cam_path *path;
3756 	cam_status status;
3757 
3758 	path = (struct cam_path *)malloc(sizeof(*path), M_DEVBUF, M_NOWAIT);
3759 
3760 	if (path == NULL) {
3761 		status = CAM_RESRC_UNAVAIL;
3762 		return(status);
3763 	}
3764 	status = xpt_compile_path(path, perph, path_id, target_id, lun_id);
3765 	if (status != CAM_REQ_CMP) {
3766 		free(path, M_DEVBUF);
3767 		path = NULL;
3768 	}
3769 	*new_path_ptr = path;
3770 	return (status);
3771 }
3772 
3773 static cam_status
3774 xpt_compile_path(struct cam_path *new_path, struct cam_periph *perph,
3775 		 path_id_t path_id, target_id_t target_id, lun_id_t lun_id)
3776 {
3777 	struct	     cam_eb *bus;
3778 	struct	     cam_et *target;
3779 	struct	     cam_ed *device;
3780 	cam_status   status;
3781 	int	     s;
3782 
3783 	status = CAM_REQ_CMP;	/* Completed without error */
3784 	target = NULL;		/* Wildcarded */
3785 	device = NULL;		/* Wildcarded */
3786 
3787 	/*
3788 	 * We will potentially modify the EDT, so block interrupts
3789 	 * that may attempt to create cam paths.
3790 	 */
3791 	s = splcam();
3792 	bus = xpt_find_bus(path_id);
3793 	if (bus == NULL) {
3794 		status = CAM_PATH_INVALID;
3795 	} else {
3796 		target = xpt_find_target(bus, target_id);
3797 		if (target == NULL) {
3798 			/* Create one */
3799 			struct cam_et *new_target;
3800 
3801 			new_target = xpt_alloc_target(bus, target_id);
3802 			if (new_target == NULL) {
3803 				status = CAM_RESRC_UNAVAIL;
3804 			} else {
3805 				target = new_target;
3806 			}
3807 		}
3808 		if (target != NULL) {
3809 			device = xpt_find_device(target, lun_id);
3810 			if (device == NULL) {
3811 				/* Create one */
3812 				struct cam_ed *new_device;
3813 
3814 				new_device = xpt_alloc_device(bus,
3815 							      target,
3816 							      lun_id);
3817 				if (new_device == NULL) {
3818 					status = CAM_RESRC_UNAVAIL;
3819 				} else {
3820 					device = new_device;
3821 				}
3822 			}
3823 		}
3824 	}
3825 	splx(s);
3826 
3827 	/*
3828 	 * Only touch the user's data if we are successful.
3829 	 */
3830 	if (status == CAM_REQ_CMP) {
3831 		new_path->periph = perph;
3832 		new_path->bus = bus;
3833 		new_path->target = target;
3834 		new_path->device = device;
3835 		CAM_DEBUG(new_path, CAM_DEBUG_TRACE, ("xpt_compile_path\n"));
3836 	} else {
3837 		if (device != NULL)
3838 			xpt_release_device(bus, target, device);
3839 		if (target != NULL)
3840 			xpt_release_target(bus, target);
3841 		if (bus != NULL)
3842 			xpt_release_bus(bus);
3843 	}
3844 	return (status);
3845 }
3846 
3847 static void
3848 xpt_release_path(struct cam_path *path)
3849 {
3850 	CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_release_path\n"));
3851 	if (path->device != NULL) {
3852 		xpt_release_device(path->bus, path->target, path->device);
3853 		path->device = NULL;
3854 	}
3855 	if (path->target != NULL) {
3856 		xpt_release_target(path->bus, path->target);
3857 		path->target = NULL;
3858 	}
3859 	if (path->bus != NULL) {
3860 		xpt_release_bus(path->bus);
3861 		path->bus = NULL;
3862 	}
3863 }
3864 
3865 void
3866 xpt_free_path(struct cam_path *path)
3867 {
3868 	CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_free_path\n"));
3869 	xpt_release_path(path);
3870 	free(path, M_DEVBUF);
3871 }
3872 
3873 
3874 /*
3875  * Return -1 for failure, 0 for exact match, 1 for match with wildcards
3876  * in path1, 2 for match with wildcards in path2.
3877  */
3878 int
3879 xpt_path_comp(struct cam_path *path1, struct cam_path *path2)
3880 {
3881 	int retval = 0;
3882 
3883 	if (path1->bus != path2->bus) {
3884 		if (path1->bus->path_id == CAM_BUS_WILDCARD)
3885 			retval = 1;
3886 		else if (path2->bus->path_id == CAM_BUS_WILDCARD)
3887 			retval = 2;
3888 		else
3889 			return (-1);
3890 	}
3891 	if (path1->target != path2->target) {
3892 		if (path1->target->target_id == CAM_TARGET_WILDCARD) {
3893 			if (retval == 0)
3894 				retval = 1;
3895 		} else if (path2->target->target_id == CAM_TARGET_WILDCARD)
3896 			retval = 2;
3897 		else
3898 			return (-1);
3899 	}
3900 	if (path1->device != path2->device) {
3901 		if (path1->device->lun_id == CAM_LUN_WILDCARD) {
3902 			if (retval == 0)
3903 				retval = 1;
3904 		} else if (path2->device->lun_id == CAM_LUN_WILDCARD)
3905 			retval = 2;
3906 		else
3907 			return (-1);
3908 	}
3909 	return (retval);
3910 }
3911 
3912 void
3913 xpt_print_path(struct cam_path *path)
3914 {
3915 	if (path == NULL)
3916 		printf("(nopath): ");
3917 	else {
3918 		if (path->periph != NULL)
3919 			printf("(%s%d:", path->periph->periph_name,
3920 			       path->periph->unit_number);
3921 		else
3922 			printf("(noperiph:");
3923 
3924 		if (path->bus != NULL)
3925 			printf("%s%d:%d:", path->bus->sim->sim_name,
3926 			       path->bus->sim->unit_number,
3927 			       path->bus->sim->bus_id);
3928 		else
3929 			printf("nobus:");
3930 
3931 		if (path->target != NULL)
3932 			printf("%d:", path->target->target_id);
3933 		else
3934 			printf("X:");
3935 
3936 		if (path->device != NULL)
3937 			printf("%d): ", path->device->lun_id);
3938 		else
3939 			printf("X): ");
3940 	}
3941 }
3942 
3943 path_id_t
3944 xpt_path_path_id(struct cam_path *path)
3945 {
3946 	return(path->bus->path_id);
3947 }
3948 
3949 target_id_t
3950 xpt_path_target_id(struct cam_path *path)
3951 {
3952 	if (path->target != NULL)
3953 		return (path->target->target_id);
3954 	else
3955 		return (CAM_TARGET_WILDCARD);
3956 }
3957 
3958 lun_id_t
3959 xpt_path_lun_id(struct cam_path *path)
3960 {
3961 	if (path->device != NULL)
3962 		return (path->device->lun_id);
3963 	else
3964 		return (CAM_LUN_WILDCARD);
3965 }
3966 
3967 struct cam_sim *
3968 xpt_path_sim(struct cam_path *path)
3969 {
3970 	return (path->bus->sim);
3971 }
3972 
3973 struct cam_periph*
3974 xpt_path_periph(struct cam_path *path)
3975 {
3976 	return (path->periph);
3977 }
3978 
3979 /*
3980  * Release a CAM control block for the caller.  Remit the cost of the structure
3981  * to the device referenced by the path.  If the this device had no 'credits'
3982  * and peripheral drivers have registered async callbacks for this notification
3983  * call them now.
3984  */
3985 void
3986 xpt_release_ccb(union ccb *free_ccb)
3987 {
3988 	int	 s;
3989 	struct	 cam_path *path;
3990 	struct	 cam_ed *device;
3991 	struct	 cam_eb *bus;
3992 
3993 	CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_release_ccb\n"));
3994 	path = free_ccb->ccb_h.path;
3995 	device = path->device;
3996 	bus = path->bus;
3997 	s = splsoftcam();
3998 	cam_ccbq_release_opening(&device->ccbq);
3999 	if (xpt_ccb_count > xpt_max_ccbs) {
4000 		xpt_free_ccb(free_ccb);
4001 		xpt_ccb_count--;
4002 	} else {
4003 		SLIST_INSERT_HEAD(&ccb_freeq, &free_ccb->ccb_h, xpt_links.sle);
4004 	}
4005 	bus->sim->devq->alloc_openings++;
4006 	bus->sim->devq->alloc_active--;
4007 	/* XXX Turn this into an inline function - xpt_run_device?? */
4008 	if ((device_is_alloc_queued(device) == 0)
4009 	 && (device->drvq.entries > 0)) {
4010 		xpt_schedule_dev_allocq(bus, device);
4011 	}
4012 	splx(s);
4013 	if (dev_allocq_is_runnable(bus->sim->devq))
4014 		xpt_run_dev_allocq(bus);
4015 }
4016 
4017 /* Functions accessed by SIM drivers */
4018 
4019 /*
4020  * A sim structure, listing the SIM entry points and instance
4021  * identification info is passed to xpt_bus_register to hook the SIM
4022  * into the CAM framework.  xpt_bus_register creates a cam_eb entry
4023  * for this new bus and places it in the array of busses and assigns
4024  * it a path_id.  The path_id may be influenced by "hard wiring"
4025  * information specified by the user.  Once interrupt services are
4026  * availible, the bus will be probed.
4027  */
4028 int32_t
4029 xpt_bus_register(struct cam_sim *sim, u_int32_t bus)
4030 {
4031 	struct cam_eb *new_bus;
4032 	struct cam_eb *old_bus;
4033 	struct ccb_pathinq cpi;
4034 	int s;
4035 
4036 	sim->bus_id = bus;
4037 	new_bus = (struct cam_eb *)malloc(sizeof(*new_bus),
4038 					  M_DEVBUF, M_NOWAIT);
4039 	if (new_bus == NULL) {
4040 		/* Couldn't satisfy request */
4041 		return (CAM_RESRC_UNAVAIL);
4042 	}
4043 
4044 	if (strcmp(sim->sim_name, "xpt") != 0) {
4045 
4046 		sim->path_id =
4047 		    xptpathid(sim->sim_name, sim->unit_number, sim->bus_id);
4048 	}
4049 
4050 	TAILQ_INIT(&new_bus->et_entries);
4051 	new_bus->path_id = sim->path_id;
4052 	new_bus->sim = sim;
4053 	timevalclear(&new_bus->last_reset);
4054 	new_bus->flags = 0;
4055 	new_bus->refcount = 1;	/* Held until a bus_deregister event */
4056 	new_bus->generation = 0;
4057 	s = splcam();
4058 	old_bus = TAILQ_FIRST(&xpt_busses);
4059 	while (old_bus != NULL
4060 	    && old_bus->path_id < new_bus->path_id)
4061 		old_bus = TAILQ_NEXT(old_bus, links);
4062 	if (old_bus != NULL)
4063 		TAILQ_INSERT_BEFORE(old_bus, new_bus, links);
4064 	else
4065 		TAILQ_INSERT_TAIL(&xpt_busses, new_bus, links);
4066 	bus_generation++;
4067 	splx(s);
4068 
4069 	/* Notify interested parties */
4070 	if (sim->path_id != CAM_XPT_PATH_ID) {
4071 		struct cam_path path;
4072 
4073 		xpt_compile_path(&path, /*periph*/NULL, sim->path_id,
4074 			         CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
4075 		xpt_setup_ccb(&cpi.ccb_h, &path, /*priority*/1);
4076 		cpi.ccb_h.func_code = XPT_PATH_INQ;
4077 		xpt_action((union ccb *)&cpi);
4078 		xpt_async(AC_PATH_REGISTERED, xpt_periph->path, &cpi);
4079 		xpt_release_path(&path);
4080 	}
4081 	return (CAM_SUCCESS);
4082 }
4083 
4084 int32_t
4085 xpt_bus_deregister(path_id_t pathid)
4086 {
4087 	struct cam_path bus_path;
4088 	cam_status status;
4089 
4090 	status = xpt_compile_path(&bus_path, NULL, pathid,
4091 				  CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
4092 	if (status != CAM_REQ_CMP)
4093 		return (status);
4094 
4095 	xpt_async(AC_LOST_DEVICE, &bus_path, NULL);
4096 	xpt_async(AC_PATH_DEREGISTERED, &bus_path, NULL);
4097 
4098 	/* Release the reference count held while registered. */
4099 	xpt_release_bus(bus_path.bus);
4100 	xpt_release_path(&bus_path);
4101 
4102 	return (CAM_REQ_CMP);
4103 }
4104 
4105 static path_id_t
4106 xptnextfreepathid(void)
4107 {
4108 	struct cam_eb *bus;
4109 	path_id_t pathid;
4110 	char *strval;
4111 
4112 	pathid = 0;
4113 	bus = TAILQ_FIRST(&xpt_busses);
4114 retry:
4115 	/* Find an unoccupied pathid */
4116 	while (bus != NULL
4117 	    && bus->path_id <= pathid) {
4118 		if (bus->path_id == pathid)
4119 			pathid++;
4120 		bus = TAILQ_NEXT(bus, links);
4121 	}
4122 
4123 	/*
4124 	 * Ensure that this pathid is not reserved for
4125 	 * a bus that may be registered in the future.
4126 	 */
4127 	if (resource_string_value("scbus", pathid, "at", &strval) == 0) {
4128 		++pathid;
4129 		/* Start the search over */
4130 		goto retry;
4131 	}
4132 	return (pathid);
4133 }
4134 
4135 static path_id_t
4136 xptpathid(const char *sim_name, int sim_unit, int sim_bus)
4137 {
4138 	path_id_t pathid;
4139 	int i, dunit, val;
4140 	char buf[32], *strval;
4141 
4142 	pathid = CAM_XPT_PATH_ID;
4143 	snprintf(buf, sizeof(buf), "%s%d", sim_name, sim_unit);
4144 	i = -1;
4145 	while ((i = resource_locate(i, "scbus")) != -1) {
4146 		dunit = resource_query_unit(i);
4147 		if (dunit < 0)		/* unwired?! */
4148 			continue;
4149 		if (resource_string_value("scbus", dunit, "at", &strval) != 0)
4150 			continue;
4151 		if (strcmp(buf, strval) != 0)
4152 			continue;
4153 		if (resource_int_value("scbus", dunit, "bus", &val) == 0) {
4154 			if (sim_bus == val) {
4155 				pathid = dunit;
4156 				break;
4157 			}
4158 		} else if (sim_bus == 0) {
4159 			/* Unspecified matches bus 0 */
4160 			pathid = dunit;
4161 			break;
4162 		} else {
4163 			printf("Ambiguous scbus configuration for %s%d "
4164 			       "bus %d, cannot wire down.  The kernel "
4165 			       "config entry for scbus%d should "
4166 			       "specify a controller bus.\n"
4167 			       "Scbus will be assigned dynamically.\n",
4168 			       sim_name, sim_unit, sim_bus, dunit);
4169 			break;
4170 		}
4171 	}
4172 
4173 	if (pathid == CAM_XPT_PATH_ID)
4174 		pathid = xptnextfreepathid();
4175 	return (pathid);
4176 }
4177 
4178 void
4179 xpt_async(u_int32_t async_code, struct cam_path *path, void *async_arg)
4180 {
4181 	struct cam_eb *bus;
4182 	struct cam_et *target, *next_target;
4183 	struct cam_ed *device, *next_device;
4184 	int s;
4185 
4186 	CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_async\n"));
4187 
4188 	/*
4189 	 * Most async events come from a CAM interrupt context.  In
4190 	 * a few cases, the error recovery code at the peripheral layer,
4191 	 * which may run from our SWI or a process context, may signal
4192 	 * deferred events with a call to xpt_async. Ensure async
4193 	 * notifications are serialized by blocking cam interrupts.
4194 	 */
4195 	s = splcam();
4196 
4197 	bus = path->bus;
4198 
4199 	if (async_code == AC_BUS_RESET) {
4200 		int s;
4201 
4202 		s = splclock();
4203 		/* Update our notion of when the last reset occurred */
4204 		microtime(&bus->last_reset);
4205 		splx(s);
4206 	}
4207 
4208 	for (target = TAILQ_FIRST(&bus->et_entries);
4209 	     target != NULL;
4210 	     target = next_target) {
4211 
4212 		next_target = TAILQ_NEXT(target, links);
4213 
4214 		if (path->target != target
4215 		 && path->target->target_id != CAM_TARGET_WILDCARD)
4216 			continue;
4217 
4218 		if (async_code == AC_SENT_BDR) {
4219 			int s;
4220 
4221 			/* Update our notion of when the last reset occurred */
4222 			s = splclock();
4223 			microtime(&path->target->last_reset);
4224 			splx(s);
4225 		}
4226 
4227 		for (device = TAILQ_FIRST(&target->ed_entries);
4228 		     device != NULL;
4229 		     device = next_device) {
4230 			cam_status status;
4231 			struct cam_path newpath;
4232 
4233 			next_device = TAILQ_NEXT(device, links);
4234 
4235 			if (path->device != device
4236 			 && path->device->lun_id != CAM_LUN_WILDCARD)
4237 				continue;
4238 
4239 			/*
4240 			 * We need our own path with wildcards expanded to
4241 			 * handle certain types of events.
4242 			 */
4243 			if ((async_code == AC_SENT_BDR)
4244 			 || (async_code == AC_BUS_RESET)
4245 			 || (async_code == AC_INQ_CHANGED))
4246 				status = xpt_compile_path(&newpath, NULL,
4247 							  bus->path_id,
4248 							  target->target_id,
4249 							  device->lun_id);
4250 			else
4251 				status = CAM_REQ_CMP_ERR;
4252 
4253 			if (status == CAM_REQ_CMP) {
4254 
4255 				/*
4256 				 * Allow transfer negotiation to occur in a
4257 				 * tag free environment.
4258 				 */
4259 				if (async_code == AC_SENT_BDR
4260 				  || async_code == AC_BUS_RESET)
4261 					xpt_toggle_tags(&newpath);
4262 
4263 				if (async_code == AC_INQ_CHANGED) {
4264 					/*
4265 					 * We've sent a start unit command, or
4266 					 * something similar to a device that
4267 					 * may have caused its inquiry data to
4268 					 * change. So we re-scan the device to
4269 					 * refresh the inquiry data for it.
4270 					 */
4271 					xpt_scan_lun(newpath.periph, &newpath,
4272 						     CAM_EXPECT_INQ_CHANGE,
4273 						     NULL);
4274 				}
4275 				xpt_release_path(&newpath);
4276 			} else if (async_code == AC_LOST_DEVICE) {
4277 				device->flags |= CAM_DEV_UNCONFIGURED;
4278 			} else if (async_code == AC_TRANSFER_NEG) {
4279 				struct ccb_trans_settings *settings;
4280 
4281 				settings =
4282 				    (struct ccb_trans_settings *)async_arg;
4283 				xpt_set_transfer_settings(settings, device,
4284 							  /*async_update*/TRUE);
4285 			}
4286 
4287 			xpt_async_bcast(&device->asyncs,
4288 					async_code,
4289 					path,
4290 					async_arg);
4291 		}
4292 	}
4293 
4294 	/*
4295 	 * If this wasn't a fully wildcarded async, tell all
4296 	 * clients that want all async events.
4297 	 */
4298 	if (bus != xpt_periph->path->bus)
4299 		xpt_async_bcast(&xpt_periph->path->device->asyncs, async_code,
4300 				path, async_arg);
4301 	splx(s);
4302 }
4303 
4304 static void
4305 xpt_async_bcast(struct async_list *async_head,
4306 		u_int32_t async_code,
4307 		struct cam_path *path, void *async_arg)
4308 {
4309 	struct async_node *cur_entry;
4310 
4311 	cur_entry = SLIST_FIRST(async_head);
4312 	while (cur_entry != NULL) {
4313 		struct async_node *next_entry;
4314 		/*
4315 		 * Grab the next list entry before we call the current
4316 		 * entry's callback.  This is because the callback function
4317 		 * can delete its async callback entry.
4318 		 */
4319 		next_entry = SLIST_NEXT(cur_entry, links);
4320 		if ((cur_entry->event_enable & async_code) != 0)
4321 			cur_entry->callback(cur_entry->callback_arg,
4322 					    async_code, path,
4323 					    async_arg);
4324 		cur_entry = next_entry;
4325 	}
4326 }
4327 
4328 u_int32_t
4329 xpt_freeze_devq(struct cam_path *path, u_int count)
4330 {
4331 	int s;
4332 	struct ccb_hdr *ccbh;
4333 
4334 	s = splcam();
4335 	path->device->qfrozen_cnt += count;
4336 
4337 	/*
4338 	 * Mark the last CCB in the queue as needing
4339 	 * to be requeued if the driver hasn't
4340 	 * changed it's state yet.  This fixes a race
4341 	 * where a ccb is just about to be queued to
4342 	 * a controller driver when it's interrupt routine
4343 	 * freezes the queue.  To completly close the
4344 	 * hole, controller drives must check to see
4345 	 * if a ccb's status is still CAM_REQ_INPROG
4346 	 * under spl protection just before they queue
4347 	 * the CCB.  See ahc_action/ahc_freeze_devq for
4348 	 * an example.
4349 	 */
4350 	ccbh = TAILQ_LAST(&path->device->ccbq.active_ccbs, ccb_hdr_tailq);
4351 	if (ccbh && ccbh->status == CAM_REQ_INPROG)
4352 		ccbh->status = CAM_REQUEUE_REQ;
4353 	splx(s);
4354 	return (path->device->qfrozen_cnt);
4355 }
4356 
4357 u_int32_t
4358 xpt_freeze_simq(struct cam_sim *sim, u_int count)
4359 {
4360 	sim->devq->send_queue.qfrozen_cnt += count;
4361 	if (sim->devq->active_dev != NULL) {
4362 		struct ccb_hdr *ccbh;
4363 
4364 		ccbh = TAILQ_LAST(&sim->devq->active_dev->ccbq.active_ccbs,
4365 				  ccb_hdr_tailq);
4366 		if (ccbh && ccbh->status == CAM_REQ_INPROG)
4367 			ccbh->status = CAM_REQUEUE_REQ;
4368 	}
4369 	return (sim->devq->send_queue.qfrozen_cnt);
4370 }
4371 
4372 static void
4373 xpt_release_devq_timeout(void *arg)
4374 {
4375 	struct cam_ed *device;
4376 
4377 	device = (struct cam_ed *)arg;
4378 
4379 	xpt_release_devq_device(device, /*count*/1, /*run_queue*/TRUE);
4380 }
4381 
4382 void
4383 xpt_release_devq(struct cam_path *path, u_int count, int run_queue)
4384 {
4385 	xpt_release_devq_device(path->device, count, run_queue);
4386 }
4387 
4388 static void
4389 xpt_release_devq_device(struct cam_ed *dev, u_int count, int run_queue)
4390 {
4391 	int	rundevq;
4392 	int	s0, s1;
4393 
4394 	rundevq = 0;
4395 	s0 = splsoftcam();
4396 	s1 = splcam();
4397 	if (dev->qfrozen_cnt > 0) {
4398 
4399 		count = (count > dev->qfrozen_cnt) ? dev->qfrozen_cnt : count;
4400 		dev->qfrozen_cnt -= count;
4401 		if (dev->qfrozen_cnt == 0) {
4402 
4403 			/*
4404 			 * No longer need to wait for a successful
4405 			 * command completion.
4406 			 */
4407 			dev->flags &= ~CAM_DEV_REL_ON_COMPLETE;
4408 
4409 			/*
4410 			 * Remove any timeouts that might be scheduled
4411 			 * to release this queue.
4412 			 */
4413 			if ((dev->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) {
4414 				untimeout(xpt_release_devq_timeout, dev,
4415 					  dev->c_handle);
4416 				dev->flags &= ~CAM_DEV_REL_TIMEOUT_PENDING;
4417 			}
4418 
4419 			/*
4420 			 * Now that we are unfrozen schedule the
4421 			 * device so any pending transactions are
4422 			 * run.
4423 			 */
4424 			if ((dev->ccbq.queue.entries > 0)
4425 			 && (xpt_schedule_dev_sendq(dev->target->bus, dev))
4426 			 && (run_queue != 0)) {
4427 				rundevq = 1;
4428 			}
4429 		}
4430 	}
4431 	splx(s1);
4432 	if (rundevq != 0)
4433 		xpt_run_dev_sendq(dev->target->bus);
4434 	splx(s0);
4435 }
4436 
4437 void
4438 xpt_release_simq(struct cam_sim *sim, int run_queue)
4439 {
4440 	int	s;
4441 	struct	camq *sendq;
4442 
4443 	sendq = &(sim->devq->send_queue);
4444 	s = splcam();
4445 	if (sendq->qfrozen_cnt > 0) {
4446 
4447 		sendq->qfrozen_cnt--;
4448 		if (sendq->qfrozen_cnt == 0) {
4449 			struct cam_eb *bus;
4450 
4451 			/*
4452 			 * If there is a timeout scheduled to release this
4453 			 * sim queue, remove it.  The queue frozen count is
4454 			 * already at 0.
4455 			 */
4456 			if ((sim->flags & CAM_SIM_REL_TIMEOUT_PENDING) != 0){
4457 				untimeout(xpt_release_simq_timeout, sim,
4458 					  sim->c_handle);
4459 				sim->flags &= ~CAM_SIM_REL_TIMEOUT_PENDING;
4460 			}
4461 			bus = xpt_find_bus(sim->path_id);
4462 			splx(s);
4463 
4464 			if (run_queue) {
4465 				/*
4466 				 * Now that we are unfrozen run the send queue.
4467 				 */
4468 				xpt_run_dev_sendq(bus);
4469 			}
4470 			xpt_release_bus(bus);
4471 		} else
4472 			splx(s);
4473 	} else
4474 		splx(s);
4475 }
4476 
4477 static void
4478 xpt_release_simq_timeout(void *arg)
4479 {
4480 	struct cam_sim *sim;
4481 
4482 	sim = (struct cam_sim *)arg;
4483 	xpt_release_simq(sim, /* run_queue */ TRUE);
4484 }
4485 
4486 void
4487 xpt_done(union ccb *done_ccb)
4488 {
4489 	int s;
4490 
4491 	s = splcam();
4492 
4493 	CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xpt_done\n"));
4494 	if ((done_ccb->ccb_h.func_code & XPT_FC_QUEUED) != 0) {
4495 		/*
4496 		 * Queue up the request for handling by our SWI handler
4497 		 * any of the "non-immediate" type of ccbs.
4498 		 */
4499 		switch (done_ccb->ccb_h.path->periph->type) {
4500 		case CAM_PERIPH_BIO:
4501 			TAILQ_INSERT_TAIL(&cam_bioq, &done_ccb->ccb_h,
4502 					  sim_links.tqe);
4503 			done_ccb->ccb_h.pinfo.index = CAM_DONEQ_INDEX;
4504 			setsoftcambio();
4505 			break;
4506 		case CAM_PERIPH_NET:
4507 			TAILQ_INSERT_TAIL(&cam_netq, &done_ccb->ccb_h,
4508 					  sim_links.tqe);
4509 			done_ccb->ccb_h.pinfo.index = CAM_DONEQ_INDEX;
4510 			setsoftcamnet();
4511 			break;
4512 		}
4513 	}
4514 	splx(s);
4515 }
4516 
4517 union ccb *
4518 xpt_alloc_ccb()
4519 {
4520 	union ccb *new_ccb;
4521 
4522 	new_ccb = malloc(sizeof(*new_ccb), M_DEVBUF, M_WAITOK);
4523 	return (new_ccb);
4524 }
4525 
4526 void
4527 xpt_free_ccb(union ccb *free_ccb)
4528 {
4529 	free(free_ccb, M_DEVBUF);
4530 }
4531 
4532 
4533 
4534 /* Private XPT functions */
4535 
4536 /*
4537  * Get a CAM control block for the caller. Charge the structure to the device
4538  * referenced by the path.  If the this device has no 'credits' then the
4539  * device already has the maximum number of outstanding operations under way
4540  * and we return NULL. If we don't have sufficient resources to allocate more
4541  * ccbs, we also return NULL.
4542  */
4543 static union ccb *
4544 xpt_get_ccb(struct cam_ed *device)
4545 {
4546 	union ccb *new_ccb;
4547 	int s;
4548 
4549 	s = splsoftcam();
4550 	if ((new_ccb = (union ccb *)ccb_freeq.slh_first) == NULL) {
4551 		new_ccb = malloc(sizeof(*new_ccb), M_DEVBUF, M_NOWAIT);
4552                 if (new_ccb == NULL) {
4553 			splx(s);
4554 			return (NULL);
4555 		}
4556 		callout_handle_init(&new_ccb->ccb_h.timeout_ch);
4557 		SLIST_INSERT_HEAD(&ccb_freeq, &new_ccb->ccb_h,
4558 				  xpt_links.sle);
4559 		xpt_ccb_count++;
4560 	}
4561 	cam_ccbq_take_opening(&device->ccbq);
4562 	SLIST_REMOVE_HEAD(&ccb_freeq, xpt_links.sle);
4563 	splx(s);
4564 	return (new_ccb);
4565 }
4566 
4567 static void
4568 xpt_release_bus(struct cam_eb *bus)
4569 {
4570 	int s;
4571 
4572 	s = splcam();
4573 	if ((--bus->refcount == 0)
4574 	 && (TAILQ_FIRST(&bus->et_entries) == NULL)) {
4575 		TAILQ_REMOVE(&xpt_busses, bus, links);
4576 		bus_generation++;
4577 		splx(s);
4578 		free(bus, M_DEVBUF);
4579 	} else
4580 		splx(s);
4581 }
4582 
4583 static struct cam_et *
4584 xpt_alloc_target(struct cam_eb *bus, target_id_t target_id)
4585 {
4586 	struct cam_et *target;
4587 
4588 	target = (struct cam_et *)malloc(sizeof(*target), M_DEVBUF, M_NOWAIT);
4589 	if (target != NULL) {
4590 		struct cam_et *cur_target;
4591 
4592 		TAILQ_INIT(&target->ed_entries);
4593 		target->bus = bus;
4594 		target->target_id = target_id;
4595 		target->refcount = 1;
4596 		target->generation = 0;
4597 		timevalclear(&target->last_reset);
4598 		/*
4599 		 * Hold a reference to our parent bus so it
4600 		 * will not go away before we do.
4601 		 */
4602 		bus->refcount++;
4603 
4604 		/* Insertion sort into our bus's target list */
4605 		cur_target = TAILQ_FIRST(&bus->et_entries);
4606 		while (cur_target != NULL && cur_target->target_id < target_id)
4607 			cur_target = TAILQ_NEXT(cur_target, links);
4608 
4609 		if (cur_target != NULL) {
4610 			TAILQ_INSERT_BEFORE(cur_target, target, links);
4611 		} else {
4612 			TAILQ_INSERT_TAIL(&bus->et_entries, target, links);
4613 		}
4614 		bus->generation++;
4615 	}
4616 	return (target);
4617 }
4618 
4619 static void
4620 xpt_release_target(struct cam_eb *bus, struct cam_et *target)
4621 {
4622 	int s;
4623 
4624 	s = splcam();
4625 	if ((--target->refcount == 0)
4626 	 && (TAILQ_FIRST(&target->ed_entries) == NULL)) {
4627 		TAILQ_REMOVE(&bus->et_entries, target, links);
4628 		bus->generation++;
4629 		splx(s);
4630 		free(target, M_DEVBUF);
4631 		xpt_release_bus(bus);
4632 	} else
4633 		splx(s);
4634 }
4635 
4636 static struct cam_ed *
4637 xpt_alloc_device(struct cam_eb *bus, struct cam_et *target, lun_id_t lun_id)
4638 {
4639 	struct	   cam_ed *device;
4640 	struct	   cam_devq *devq;
4641 	cam_status status;
4642 
4643 	/* Make space for us in the device queue on our bus */
4644 	devq = bus->sim->devq;
4645 	status = cam_devq_resize(devq, devq->alloc_queue.array_size + 1);
4646 
4647 	if (status != CAM_REQ_CMP) {
4648 		device = NULL;
4649 	} else {
4650 		device = (struct cam_ed *)malloc(sizeof(*device),
4651 						 M_DEVBUF, M_NOWAIT);
4652 	}
4653 
4654 	if (device != NULL) {
4655 		struct cam_ed *cur_device;
4656 
4657 		cam_init_pinfo(&device->alloc_ccb_entry.pinfo);
4658 		device->alloc_ccb_entry.device = device;
4659 		cam_init_pinfo(&device->send_ccb_entry.pinfo);
4660 		device->send_ccb_entry.device = device;
4661 		device->target = target;
4662 		device->lun_id = lun_id;
4663 		/* Initialize our queues */
4664 		if (camq_init(&device->drvq, 0) != 0) {
4665 			free(device, M_DEVBUF);
4666 			return (NULL);
4667 		}
4668 		if (cam_ccbq_init(&device->ccbq,
4669 				  bus->sim->max_dev_openings) != 0) {
4670 			camq_fini(&device->drvq);
4671 			free(device, M_DEVBUF);
4672 			return (NULL);
4673 		}
4674 		SLIST_INIT(&device->asyncs);
4675 		SLIST_INIT(&device->periphs);
4676 		device->generation = 0;
4677 		device->owner = NULL;
4678 		/*
4679 		 * Take the default quirk entry until we have inquiry
4680 		 * data and can determine a better quirk to use.
4681 		 */
4682 		device->quirk = &xpt_quirk_table[xpt_quirk_table_size - 1];
4683 		bzero(&device->inq_data, sizeof(device->inq_data));
4684 		device->inq_flags = 0;
4685 		device->queue_flags = 0;
4686 		device->serial_num = NULL;
4687 		device->serial_num_len = 0;
4688 		device->qfrozen_cnt = 0;
4689 		device->flags = CAM_DEV_UNCONFIGURED;
4690 		device->tag_delay_count = 0;
4691 		device->refcount = 1;
4692 		callout_handle_init(&device->c_handle);
4693 
4694 		/*
4695 		 * Hold a reference to our parent target so it
4696 		 * will not go away before we do.
4697 		 */
4698 		target->refcount++;
4699 
4700 		/*
4701 		 * XXX should be limited by number of CCBs this bus can
4702 		 * do.
4703 		 */
4704 		xpt_max_ccbs += device->ccbq.devq_openings;
4705 		/* Insertion sort into our target's device list */
4706 		cur_device = TAILQ_FIRST(&target->ed_entries);
4707 		while (cur_device != NULL && cur_device->lun_id < lun_id)
4708 			cur_device = TAILQ_NEXT(cur_device, links);
4709 		if (cur_device != NULL) {
4710 			TAILQ_INSERT_BEFORE(cur_device, device, links);
4711 		} else {
4712 			TAILQ_INSERT_TAIL(&target->ed_entries, device, links);
4713 		}
4714 		target->generation++;
4715 	}
4716 	return (device);
4717 }
4718 
4719 static void
4720 xpt_release_device(struct cam_eb *bus, struct cam_et *target,
4721 		   struct cam_ed *device)
4722 {
4723 	int s;
4724 
4725 	s = splcam();
4726 	if ((--device->refcount == 0)
4727 	 && ((device->flags & CAM_DEV_UNCONFIGURED) != 0)) {
4728 		struct cam_devq *devq;
4729 
4730 		if (device->alloc_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX
4731 		 || device->send_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX)
4732 			panic("Removing device while still queued for ccbs");
4733 
4734 		if ((device->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0)
4735 				untimeout(xpt_release_devq_timeout, device,
4736 					  device->c_handle);
4737 
4738 		TAILQ_REMOVE(&target->ed_entries, device,links);
4739 		target->generation++;
4740 		xpt_max_ccbs -= device->ccbq.devq_openings;
4741 		/* Release our slot in the devq */
4742 		devq = bus->sim->devq;
4743 		cam_devq_resize(devq, devq->alloc_queue.array_size - 1);
4744 		splx(s);
4745 		free(device, M_DEVBUF);
4746 		xpt_release_target(bus, target);
4747 	} else
4748 		splx(s);
4749 }
4750 
4751 static u_int32_t
4752 xpt_dev_ccbq_resize(struct cam_path *path, int newopenings)
4753 {
4754 	int	s;
4755 	int	diff;
4756 	int	result;
4757 	struct	cam_ed *dev;
4758 
4759 	dev = path->device;
4760 	s = splsoftcam();
4761 
4762 	diff = newopenings - (dev->ccbq.dev_active + dev->ccbq.dev_openings);
4763 	result = cam_ccbq_resize(&dev->ccbq, newopenings);
4764 	if (result == CAM_REQ_CMP && (diff < 0)) {
4765 		dev->flags |= CAM_DEV_RESIZE_QUEUE_NEEDED;
4766 	}
4767 	/* Adjust the global limit */
4768 	xpt_max_ccbs += diff;
4769 	splx(s);
4770 	return (result);
4771 }
4772 
4773 static struct cam_eb *
4774 xpt_find_bus(path_id_t path_id)
4775 {
4776 	struct cam_eb *bus;
4777 
4778 	for (bus = TAILQ_FIRST(&xpt_busses);
4779 	     bus != NULL;
4780 	     bus = TAILQ_NEXT(bus, links)) {
4781 		if (bus->path_id == path_id) {
4782 			bus->refcount++;
4783 			break;
4784 		}
4785 	}
4786 	return (bus);
4787 }
4788 
4789 static struct cam_et *
4790 xpt_find_target(struct cam_eb *bus, target_id_t	target_id)
4791 {
4792 	struct cam_et *target;
4793 
4794 	for (target = TAILQ_FIRST(&bus->et_entries);
4795 	     target != NULL;
4796 	     target = TAILQ_NEXT(target, links)) {
4797 		if (target->target_id == target_id) {
4798 			target->refcount++;
4799 			break;
4800 		}
4801 	}
4802 	return (target);
4803 }
4804 
4805 static struct cam_ed *
4806 xpt_find_device(struct cam_et *target, lun_id_t lun_id)
4807 {
4808 	struct cam_ed *device;
4809 
4810 	for (device = TAILQ_FIRST(&target->ed_entries);
4811 	     device != NULL;
4812 	     device = TAILQ_NEXT(device, links)) {
4813 		if (device->lun_id == lun_id) {
4814 			device->refcount++;
4815 			break;
4816 		}
4817 	}
4818 	return (device);
4819 }
4820 
4821 typedef struct {
4822 	union	ccb *request_ccb;
4823 	struct 	ccb_pathinq *cpi;
4824 	int	pending_count;
4825 } xpt_scan_bus_info;
4826 
4827 /*
4828  * To start a scan, request_ccb is an XPT_SCAN_BUS ccb.
4829  * As the scan progresses, xpt_scan_bus is used as the
4830  * callback on completion function.
4831  */
4832 static void
4833 xpt_scan_bus(struct cam_periph *periph, union ccb *request_ccb)
4834 {
4835 	CAM_DEBUG(request_ccb->ccb_h.path, CAM_DEBUG_TRACE,
4836 		  ("xpt_scan_bus\n"));
4837 	switch (request_ccb->ccb_h.func_code) {
4838 	case XPT_SCAN_BUS:
4839 	{
4840 		xpt_scan_bus_info *scan_info;
4841 		union	ccb *work_ccb;
4842 		struct	cam_path *path;
4843 		u_int	i;
4844 		u_int	max_target;
4845 		u_int	initiator_id;
4846 
4847 		/* Find out the characteristics of the bus */
4848 		work_ccb = xpt_alloc_ccb();
4849 		xpt_setup_ccb(&work_ccb->ccb_h, request_ccb->ccb_h.path,
4850 			      request_ccb->ccb_h.pinfo.priority);
4851 		work_ccb->ccb_h.func_code = XPT_PATH_INQ;
4852 		xpt_action(work_ccb);
4853 		if (work_ccb->ccb_h.status != CAM_REQ_CMP) {
4854 			request_ccb->ccb_h.status = work_ccb->ccb_h.status;
4855 			xpt_free_ccb(work_ccb);
4856 			xpt_done(request_ccb);
4857 			return;
4858 		}
4859 
4860 		if ((work_ccb->cpi.hba_misc & PIM_NOINITIATOR) != 0) {
4861 			/*
4862 			 * Can't scan the bus on an adapter that
4863 			 * cannot perform the initiator role.
4864 			 */
4865 			request_ccb->ccb_h.status = CAM_REQ_CMP;
4866 			xpt_free_ccb(work_ccb);
4867 			xpt_done(request_ccb);
4868 			return;
4869 		}
4870 
4871 		/* Save some state for use while we probe for devices */
4872 		scan_info = (xpt_scan_bus_info *)
4873 		    malloc(sizeof(xpt_scan_bus_info), M_TEMP, M_WAITOK);
4874 		scan_info->request_ccb = request_ccb;
4875 		scan_info->cpi = &work_ccb->cpi;
4876 
4877 		/* Cache on our stack so we can work asynchronously */
4878 		max_target = scan_info->cpi->max_target;
4879 		initiator_id = scan_info->cpi->initiator_id;
4880 
4881 		/*
4882 		 * Don't count the initiator if the
4883 		 * initiator is addressable.
4884 		 */
4885 		scan_info->pending_count = max_target + 1;
4886 		if (initiator_id <= max_target)
4887 			scan_info->pending_count--;
4888 
4889 		for (i = 0; i <= max_target; i++) {
4890 			cam_status status;
4891 		 	if (i == initiator_id)
4892 				continue;
4893 
4894 			status = xpt_create_path(&path, xpt_periph,
4895 						 request_ccb->ccb_h.path_id,
4896 						 i, 0);
4897 			if (status != CAM_REQ_CMP) {
4898 				printf("xpt_scan_bus: xpt_create_path failed"
4899 				       " with status %#x, bus scan halted\n",
4900 				       status);
4901 				break;
4902 			}
4903 			work_ccb = xpt_alloc_ccb();
4904 			xpt_setup_ccb(&work_ccb->ccb_h, path,
4905 				      request_ccb->ccb_h.pinfo.priority);
4906 			work_ccb->ccb_h.func_code = XPT_SCAN_LUN;
4907 			work_ccb->ccb_h.cbfcnp = xpt_scan_bus;
4908 			work_ccb->ccb_h.ppriv_ptr0 = scan_info;
4909 			work_ccb->crcn.flags = request_ccb->crcn.flags;
4910 #if 0
4911 			printf("xpt_scan_bus: probing %d:%d:%d\n",
4912 				request_ccb->ccb_h.path_id, i, 0);
4913 #endif
4914 			xpt_action(work_ccb);
4915 		}
4916 		break;
4917 	}
4918 	case XPT_SCAN_LUN:
4919 	{
4920 		xpt_scan_bus_info *scan_info;
4921 		path_id_t path_id;
4922 		target_id_t target_id;
4923 		lun_id_t lun_id;
4924 
4925 		/* Reuse the same CCB to query if a device was really found */
4926 		scan_info = (xpt_scan_bus_info *)request_ccb->ccb_h.ppriv_ptr0;
4927 		xpt_setup_ccb(&request_ccb->ccb_h, request_ccb->ccb_h.path,
4928 			      request_ccb->ccb_h.pinfo.priority);
4929 		request_ccb->ccb_h.func_code = XPT_GDEV_TYPE;
4930 
4931 		path_id = request_ccb->ccb_h.path_id;
4932 		target_id = request_ccb->ccb_h.target_id;
4933 		lun_id = request_ccb->ccb_h.target_lun;
4934 		xpt_action(request_ccb);
4935 
4936 #if 0
4937 		printf("xpt_scan_bus: got back probe from %d:%d:%d\n",
4938 			path_id, target_id, lun_id);
4939 #endif
4940 
4941 		if (request_ccb->ccb_h.status != CAM_REQ_CMP) {
4942 			struct cam_ed *device;
4943 			struct cam_et *target;
4944 			int s, phl;
4945 
4946 			/*
4947 			 * If we already probed lun 0 successfully, or
4948 			 * we have additional configured luns on this
4949 			 * target that might have "gone away", go onto
4950 			 * the next lun.
4951 			 */
4952 			target = request_ccb->ccb_h.path->target;
4953 			/*
4954 			 * We may touch devices that we don't
4955 			 * hold references too, so ensure they
4956 			 * don't disappear out from under us.
4957 			 * The target above is referenced by the
4958 			 * path in the request ccb.
4959 			 */
4960 			phl = 0;
4961 			s = splcam();
4962 			device = TAILQ_FIRST(&target->ed_entries);
4963 			if (device != NULL) {
4964 				phl = device->quirk->quirks & CAM_QUIRK_HILUNS;
4965 				if (device->lun_id == 0)
4966 					device = TAILQ_NEXT(device, links);
4967 			}
4968 			splx(s);
4969 			if ((lun_id != 0) || (device != NULL)) {
4970 				if (lun_id < (CAM_SCSI2_MAXLUN-1) || phl)
4971 					lun_id++;
4972 			}
4973 		} else {
4974 			struct cam_ed *device;
4975 
4976 			device = request_ccb->ccb_h.path->device;
4977 
4978 			if ((device->quirk->quirks & CAM_QUIRK_NOLUNS) == 0) {
4979 				/* Try the next lun */
4980 				if (lun_id < (CAM_SCSI2_MAXLUN-1) ||
4981 				    (device->quirk->quirks & CAM_QUIRK_HILUNS))
4982 					lun_id++;
4983 			}
4984 		}
4985 
4986 		xpt_free_path(request_ccb->ccb_h.path);
4987 
4988 		/* Check Bounds */
4989 		if ((lun_id == request_ccb->ccb_h.target_lun)
4990 		 || lun_id > scan_info->cpi->max_lun) {
4991 			/* We're done */
4992 
4993 			xpt_free_ccb(request_ccb);
4994 			scan_info->pending_count--;
4995 			if (scan_info->pending_count == 0) {
4996 				xpt_free_ccb((union ccb *)scan_info->cpi);
4997 				request_ccb = scan_info->request_ccb;
4998 				free(scan_info, M_TEMP);
4999 				request_ccb->ccb_h.status = CAM_REQ_CMP;
5000 				xpt_done(request_ccb);
5001 			}
5002 		} else {
5003 			/* Try the next device */
5004 			struct cam_path *path;
5005 			cam_status status;
5006 
5007 			path = request_ccb->ccb_h.path;
5008 			status = xpt_create_path(&path, xpt_periph,
5009 						 path_id, target_id, lun_id);
5010 			if (status != CAM_REQ_CMP) {
5011 				printf("xpt_scan_bus: xpt_create_path failed "
5012 				       "with status %#x, halting LUN scan\n",
5013 			 	       status);
5014 				xpt_free_ccb(request_ccb);
5015 				scan_info->pending_count--;
5016 				if (scan_info->pending_count == 0) {
5017 					xpt_free_ccb(
5018 						(union ccb *)scan_info->cpi);
5019 					request_ccb = scan_info->request_ccb;
5020 					free(scan_info, M_TEMP);
5021 					request_ccb->ccb_h.status = CAM_REQ_CMP;
5022 					xpt_done(request_ccb);
5023 					break;
5024 				}
5025 			}
5026 			xpt_setup_ccb(&request_ccb->ccb_h, path,
5027 				      request_ccb->ccb_h.pinfo.priority);
5028 			request_ccb->ccb_h.func_code = XPT_SCAN_LUN;
5029 			request_ccb->ccb_h.cbfcnp = xpt_scan_bus;
5030 			request_ccb->ccb_h.ppriv_ptr0 = scan_info;
5031 			request_ccb->crcn.flags =
5032 				scan_info->request_ccb->crcn.flags;
5033 #if 0
5034 			xpt_print_path(path);
5035 			printf("xpt_scan bus probing\n");
5036 #endif
5037 			xpt_action(request_ccb);
5038 		}
5039 		break;
5040 	}
5041 	default:
5042 		break;
5043 	}
5044 }
5045 
5046 typedef enum {
5047 	PROBE_TUR,
5048 	PROBE_INQUIRY,
5049 	PROBE_FULL_INQUIRY,
5050 	PROBE_MODE_SENSE,
5051 	PROBE_SERIAL_NUM,
5052 	PROBE_TUR_FOR_NEGOTIATION
5053 } probe_action;
5054 
5055 typedef enum {
5056 	PROBE_INQUIRY_CKSUM	= 0x01,
5057 	PROBE_SERIAL_CKSUM	= 0x02,
5058 	PROBE_NO_ANNOUNCE	= 0x04
5059 } probe_flags;
5060 
5061 typedef struct {
5062 	TAILQ_HEAD(, ccb_hdr) request_ccbs;
5063 	probe_action	action;
5064 	union ccb	saved_ccb;
5065 	probe_flags	flags;
5066 	MD5_CTX		context;
5067 	u_int8_t	digest[16];
5068 } probe_softc;
5069 
5070 static void
5071 xpt_scan_lun(struct cam_periph *periph, struct cam_path *path,
5072 	     cam_flags flags, union ccb *request_ccb)
5073 {
5074 	struct ccb_pathinq cpi;
5075 	cam_status status;
5076 	struct cam_path *new_path;
5077 	struct cam_periph *old_periph;
5078 	int s;
5079 
5080 	CAM_DEBUG(request_ccb->ccb_h.path, CAM_DEBUG_TRACE,
5081 		  ("xpt_scan_lun\n"));
5082 
5083 	xpt_setup_ccb(&cpi.ccb_h, path, /*priority*/1);
5084 	cpi.ccb_h.func_code = XPT_PATH_INQ;
5085 	xpt_action((union ccb *)&cpi);
5086 
5087 	if (cpi.ccb_h.status != CAM_REQ_CMP) {
5088 		if (request_ccb != NULL) {
5089 			request_ccb->ccb_h.status = cpi.ccb_h.status;
5090 			xpt_done(request_ccb);
5091 		}
5092 		return;
5093 	}
5094 
5095 	if ((cpi.hba_misc & PIM_NOINITIATOR) != 0) {
5096 		/*
5097 		 * Can't scan the bus on an adapter that
5098 		 * cannot perform the initiator role.
5099 		 */
5100 		if (request_ccb != NULL) {
5101 			request_ccb->ccb_h.status = CAM_REQ_CMP;
5102 			xpt_done(request_ccb);
5103 		}
5104 		return;
5105 	}
5106 
5107 	if (request_ccb == NULL) {
5108 		request_ccb = malloc(sizeof(union ccb), M_TEMP, M_NOWAIT);
5109 		if (request_ccb == NULL) {
5110 			xpt_print_path(path);
5111 			printf("xpt_scan_lun: can't allocate CCB, can't "
5112 			       "continue\n");
5113 			return;
5114 		}
5115 		new_path = malloc(sizeof(*new_path), M_TEMP, M_NOWAIT);
5116 		if (new_path == NULL) {
5117 			xpt_print_path(path);
5118 			printf("xpt_scan_lun: can't allocate path, can't "
5119 			       "continue\n");
5120 			free(request_ccb, M_TEMP);
5121 			return;
5122 		}
5123 		status = xpt_compile_path(new_path, xpt_periph,
5124 					  path->bus->path_id,
5125 					  path->target->target_id,
5126 					  path->device->lun_id);
5127 
5128 		if (status != CAM_REQ_CMP) {
5129 			xpt_print_path(path);
5130 			printf("xpt_scan_lun: can't compile path, can't "
5131 			       "continue\n");
5132 			free(request_ccb, M_TEMP);
5133 			free(new_path, M_TEMP);
5134 			return;
5135 		}
5136 		xpt_setup_ccb(&request_ccb->ccb_h, new_path, /*priority*/ 1);
5137 		request_ccb->ccb_h.cbfcnp = xptscandone;
5138 		request_ccb->ccb_h.func_code = XPT_SCAN_LUN;
5139 		request_ccb->crcn.flags = flags;
5140 	}
5141 
5142 	s = splsoftcam();
5143 	if ((old_periph = cam_periph_find(path, "probe")) != NULL) {
5144 		probe_softc *softc;
5145 
5146 		softc = (probe_softc *)old_periph->softc;
5147 		TAILQ_INSERT_TAIL(&softc->request_ccbs, &request_ccb->ccb_h,
5148 				  periph_links.tqe);
5149 	} else {
5150 		status = cam_periph_alloc(proberegister, NULL, probecleanup,
5151 					  probestart, "probe",
5152 					  CAM_PERIPH_BIO,
5153 					  request_ccb->ccb_h.path, NULL, 0,
5154 					  request_ccb);
5155 
5156 		if (status != CAM_REQ_CMP) {
5157 			xpt_print_path(path);
5158 			printf("xpt_scan_lun: cam_alloc_periph returned an "
5159 			       "error, can't continue probe\n");
5160 			request_ccb->ccb_h.status = status;
5161 			xpt_done(request_ccb);
5162 		}
5163 	}
5164 	splx(s);
5165 }
5166 
5167 static void
5168 xptscandone(struct cam_periph *periph, union ccb *done_ccb)
5169 {
5170 	xpt_release_path(done_ccb->ccb_h.path);
5171 	free(done_ccb->ccb_h.path, M_TEMP);
5172 	free(done_ccb, M_TEMP);
5173 }
5174 
5175 static cam_status
5176 proberegister(struct cam_periph *periph, void *arg)
5177 {
5178 	union ccb *request_ccb;	/* CCB representing the probe request */
5179 	probe_softc *softc;
5180 
5181 	request_ccb = (union ccb *)arg;
5182 	if (periph == NULL) {
5183 		printf("proberegister: periph was NULL!!\n");
5184 		return(CAM_REQ_CMP_ERR);
5185 	}
5186 
5187 	if (request_ccb == NULL) {
5188 		printf("proberegister: no probe CCB, "
5189 		       "can't register device\n");
5190 		return(CAM_REQ_CMP_ERR);
5191 	}
5192 
5193 	softc = (probe_softc *)malloc(sizeof(*softc), M_TEMP, M_NOWAIT);
5194 
5195 	if (softc == NULL) {
5196 		printf("proberegister: Unable to probe new device. "
5197 		       "Unable to allocate softc\n");
5198 		return(CAM_REQ_CMP_ERR);
5199 	}
5200 	TAILQ_INIT(&softc->request_ccbs);
5201 	TAILQ_INSERT_TAIL(&softc->request_ccbs, &request_ccb->ccb_h,
5202 			  periph_links.tqe);
5203 	softc->flags = 0;
5204 	periph->softc = softc;
5205 	cam_periph_acquire(periph);
5206 	/*
5207 	 * Ensure we've waited at least a bus settle
5208 	 * delay before attempting to probe the device.
5209 	 * For HBAs that don't do bus resets, this won't make a difference.
5210 	 */
5211 	cam_periph_freeze_after_event(periph, &periph->path->bus->last_reset,
5212 				      SCSI_DELAY);
5213 	probeschedule(periph);
5214 	return(CAM_REQ_CMP);
5215 }
5216 
5217 static void
5218 probeschedule(struct cam_periph *periph)
5219 {
5220 	struct ccb_pathinq cpi;
5221 	union ccb *ccb;
5222 	probe_softc *softc;
5223 
5224 	softc = (probe_softc *)periph->softc;
5225 	ccb = (union ccb *)TAILQ_FIRST(&softc->request_ccbs);
5226 
5227 	xpt_setup_ccb(&cpi.ccb_h, periph->path, /*priority*/1);
5228 	cpi.ccb_h.func_code = XPT_PATH_INQ;
5229 	xpt_action((union ccb *)&cpi);
5230 
5231 	/*
5232 	 * If a device has gone away and another device, or the same one,
5233 	 * is back in the same place, it should have a unit attention
5234 	 * condition pending.  It will not report the unit attention in
5235 	 * response to an inquiry, which may leave invalid transfer
5236 	 * negotiations in effect.  The TUR will reveal the unit attention
5237 	 * condition.  Only send the TUR for lun 0, since some devices
5238 	 * will get confused by commands other than inquiry to non-existent
5239 	 * luns.  If you think a device has gone away start your scan from
5240 	 * lun 0.  This will insure that any bogus transfer settings are
5241 	 * invalidated.
5242 	 *
5243 	 * If we haven't seen the device before and the controller supports
5244 	 * some kind of transfer negotiation, negotiate with the first
5245 	 * sent command if no bus reset was performed at startup.  This
5246 	 * ensures that the device is not confused by transfer negotiation
5247 	 * settings left over by loader or BIOS action.
5248 	 */
5249 	if (((ccb->ccb_h.path->device->flags & CAM_DEV_UNCONFIGURED) == 0)
5250 	 && (ccb->ccb_h.target_lun == 0)) {
5251 		softc->action = PROBE_TUR;
5252 	} else if ((cpi.hba_inquiry & (PI_WIDE_32|PI_WIDE_16|PI_SDTR_ABLE)) != 0
5253 	      && (cpi.hba_misc & PIM_NOBUSRESET) != 0) {
5254 		proberequestdefaultnegotiation(periph);
5255 		softc->action = PROBE_INQUIRY;
5256 	} else {
5257 		softc->action = PROBE_INQUIRY;
5258 	}
5259 
5260 	if (ccb->crcn.flags & CAM_EXPECT_INQ_CHANGE)
5261 		softc->flags |= PROBE_NO_ANNOUNCE;
5262 	else
5263 		softc->flags &= ~PROBE_NO_ANNOUNCE;
5264 
5265 	xpt_schedule(periph, ccb->ccb_h.pinfo.priority);
5266 }
5267 
5268 static void
5269 probestart(struct cam_periph *periph, union ccb *start_ccb)
5270 {
5271 	/* Probe the device that our peripheral driver points to */
5272 	struct ccb_scsiio *csio;
5273 	probe_softc *softc;
5274 
5275 	CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("probestart\n"));
5276 
5277 	softc = (probe_softc *)periph->softc;
5278 	csio = &start_ccb->csio;
5279 
5280 	switch (softc->action) {
5281 	case PROBE_TUR:
5282 	case PROBE_TUR_FOR_NEGOTIATION:
5283 	{
5284 		scsi_test_unit_ready(csio,
5285 				     /*retries*/4,
5286 				     probedone,
5287 				     MSG_SIMPLE_Q_TAG,
5288 				     SSD_FULL_SIZE,
5289 				     /*timeout*/60000);
5290 		break;
5291 	}
5292 	case PROBE_INQUIRY:
5293 	case PROBE_FULL_INQUIRY:
5294 	{
5295 		u_int inquiry_len;
5296 		struct scsi_inquiry_data *inq_buf;
5297 
5298 		inq_buf = &periph->path->device->inq_data;
5299 		/*
5300 		 * If the device is currently configured, we calculate an
5301 		 * MD5 checksum of the inquiry data, and if the serial number
5302 		 * length is greater than 0, add the serial number data
5303 		 * into the checksum as well.  Once the inquiry and the
5304 		 * serial number check finish, we attempt to figure out
5305 		 * whether we still have the same device.
5306 		 */
5307 		if ((periph->path->device->flags & CAM_DEV_UNCONFIGURED) == 0) {
5308 
5309 			MD5Init(&softc->context);
5310 			MD5Update(&softc->context, (unsigned char *)inq_buf,
5311 				  sizeof(struct scsi_inquiry_data));
5312 			softc->flags |= PROBE_INQUIRY_CKSUM;
5313 			if (periph->path->device->serial_num_len > 0) {
5314 				MD5Update(&softc->context,
5315 					  periph->path->device->serial_num,
5316 					  periph->path->device->serial_num_len);
5317 				softc->flags |= PROBE_SERIAL_CKSUM;
5318 			}
5319 			MD5Final(softc->digest, &softc->context);
5320 		}
5321 
5322 		if (softc->action == PROBE_INQUIRY)
5323 			inquiry_len = SHORT_INQUIRY_LENGTH;
5324 		else
5325 			inquiry_len = inq_buf->additional_length + 4;
5326 
5327 		scsi_inquiry(csio,
5328 			     /*retries*/4,
5329 			     probedone,
5330 			     MSG_SIMPLE_Q_TAG,
5331 			     (u_int8_t *)inq_buf,
5332 			     inquiry_len,
5333 			     /*evpd*/FALSE,
5334 			     /*page_code*/0,
5335 			     SSD_MIN_SIZE,
5336 			     /*timeout*/60 * 1000);
5337 		break;
5338 	}
5339 	case PROBE_MODE_SENSE:
5340 	{
5341 		void  *mode_buf;
5342 		int    mode_buf_len;
5343 
5344 		mode_buf_len = sizeof(struct scsi_mode_header_6)
5345 			     + sizeof(struct scsi_mode_blk_desc)
5346 			     + sizeof(struct scsi_control_page);
5347 		mode_buf = malloc(mode_buf_len, M_TEMP, M_NOWAIT);
5348 		if (mode_buf != NULL) {
5349 	                scsi_mode_sense(csio,
5350 					/*retries*/4,
5351 					probedone,
5352 					MSG_SIMPLE_Q_TAG,
5353 					/*dbd*/FALSE,
5354 					SMS_PAGE_CTRL_CURRENT,
5355 					SMS_CONTROL_MODE_PAGE,
5356 					mode_buf,
5357 					mode_buf_len,
5358 					SSD_FULL_SIZE,
5359 					/*timeout*/60000);
5360 			break;
5361 		}
5362 		xpt_print_path(periph->path);
5363 		printf("Unable to mode sense control page - malloc failure\n");
5364 		softc->action = PROBE_SERIAL_NUM;
5365 		/* FALLTHROUGH */
5366 	}
5367 	case PROBE_SERIAL_NUM:
5368 	{
5369 		struct scsi_vpd_unit_serial_number *serial_buf;
5370 		struct cam_ed* device;
5371 
5372 		serial_buf = NULL;
5373 		device = periph->path->device;
5374 		device->serial_num = NULL;
5375 		device->serial_num_len = 0;
5376 
5377 		if ((device->quirk->quirks & CAM_QUIRK_NOSERIAL) == 0)
5378 			serial_buf = (struct scsi_vpd_unit_serial_number *)
5379 				malloc(sizeof(*serial_buf), M_TEMP, M_NOWAIT);
5380 
5381 		if (serial_buf != NULL) {
5382 			bzero(serial_buf, sizeof(*serial_buf));
5383 			scsi_inquiry(csio,
5384 				     /*retries*/4,
5385 				     probedone,
5386 				     MSG_SIMPLE_Q_TAG,
5387 				     (u_int8_t *)serial_buf,
5388 				     sizeof(*serial_buf),
5389 				     /*evpd*/TRUE,
5390 				     SVPD_UNIT_SERIAL_NUMBER,
5391 				     SSD_MIN_SIZE,
5392 				     /*timeout*/60 * 1000);
5393 			break;
5394 		}
5395 		/*
5396 		 * We'll have to do without, let our probedone
5397 		 * routine finish up for us.
5398 		 */
5399 		start_ccb->csio.data_ptr = NULL;
5400 		probedone(periph, start_ccb);
5401 		return;
5402 	}
5403 	}
5404 	xpt_action(start_ccb);
5405 }
5406 
5407 static void
5408 proberequestdefaultnegotiation(struct cam_periph *periph)
5409 {
5410 	struct ccb_trans_settings cts;
5411 
5412 	xpt_setup_ccb(&cts.ccb_h, periph->path, /*priority*/1);
5413 	cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS;
5414 	cts.flags = CCB_TRANS_USER_SETTINGS;
5415 	xpt_action((union ccb *)&cts);
5416 	cts.ccb_h.func_code = XPT_SET_TRAN_SETTINGS;
5417 	cts.flags &= ~CCB_TRANS_USER_SETTINGS;
5418 	cts.flags |= CCB_TRANS_CURRENT_SETTINGS;
5419 	xpt_action((union ccb *)&cts);
5420 }
5421 
5422 static void
5423 probedone(struct cam_periph *periph, union ccb *done_ccb)
5424 {
5425 	probe_softc *softc;
5426 	struct cam_path *path;
5427 	u_int32_t  priority;
5428 
5429 	CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("probedone\n"));
5430 
5431 	softc = (probe_softc *)periph->softc;
5432 	path = done_ccb->ccb_h.path;
5433 	priority = done_ccb->ccb_h.pinfo.priority;
5434 
5435 	switch (softc->action) {
5436 	case PROBE_TUR:
5437 	{
5438 		if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
5439 
5440 			if (cam_periph_error(done_ccb, 0,
5441 					     SF_NO_PRINT, NULL) == ERESTART)
5442 				return;
5443 			else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
5444 				/* Don't wedge the queue */
5445 				xpt_release_devq(done_ccb->ccb_h.path,
5446 						 /*count*/1,
5447 						 /*run_queue*/TRUE);
5448 		}
5449 		softc->action = PROBE_INQUIRY;
5450 		xpt_release_ccb(done_ccb);
5451 		xpt_schedule(periph, priority);
5452 		return;
5453 	}
5454 	case PROBE_INQUIRY:
5455 	case PROBE_FULL_INQUIRY:
5456 	{
5457 		if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
5458 			struct scsi_inquiry_data *inq_buf;
5459 			u_int8_t periph_qual;
5460 			u_int8_t periph_dtype;
5461 
5462 			path->device->flags |= CAM_DEV_INQUIRY_DATA_VALID;
5463 			inq_buf = &path->device->inq_data;
5464 
5465 			periph_qual = SID_QUAL(inq_buf);
5466 			periph_dtype = SID_TYPE(inq_buf);
5467 
5468 			if (periph_dtype != T_NODEVICE) {
5469 				switch(periph_qual) {
5470 				case SID_QUAL_LU_CONNECTED:
5471 				{
5472 					u_int8_t alen;
5473 
5474 					/*
5475 					 * We conservatively request only
5476 					 * SHORT_INQUIRY_LEN bytes of inquiry
5477 					 * information during our first try
5478 					 * at sending an INQUIRY. If the device
5479 					 * has more information to give,
5480 					 * perform a second request specifying
5481 					 * the amount of information the device
5482 					 * is willing to give.
5483 					 */
5484 					alen = inq_buf->additional_length;
5485 					if (softc->action == PROBE_INQUIRY
5486 					 && alen > (SHORT_INQUIRY_LENGTH - 4)) {
5487 						softc->action =
5488 						    PROBE_FULL_INQUIRY;
5489 						xpt_release_ccb(done_ccb);
5490 						xpt_schedule(periph, priority);
5491 						return;
5492 					}
5493 
5494 					xpt_find_quirk(path->device);
5495 
5496 					if ((inq_buf->flags & SID_CmdQue) != 0)
5497 						softc->action =
5498 						    PROBE_MODE_SENSE;
5499 					else
5500 						softc->action =
5501 						    PROBE_SERIAL_NUM;
5502 
5503 					path->device->flags &=
5504 						~CAM_DEV_UNCONFIGURED;
5505 
5506 					xpt_release_ccb(done_ccb);
5507 					xpt_schedule(periph, priority);
5508 					return;
5509 				}
5510 				default:
5511 					break;
5512 				}
5513 			}
5514 		} else if (cam_periph_error(done_ccb, 0,
5515 					    done_ccb->ccb_h.target_lun > 0
5516 					    ? SF_RETRY_UA|SF_QUIET_IR
5517 					    : SF_RETRY_UA,
5518 					    &softc->saved_ccb) == ERESTART) {
5519 			return;
5520 		} else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
5521 			/* Don't wedge the queue */
5522 			xpt_release_devq(done_ccb->ccb_h.path, /*count*/1,
5523 					 /*run_queue*/TRUE);
5524 		}
5525 		/*
5526 		 * If we get to this point, we got an error status back
5527 		 * from the inquiry and the error status doesn't require
5528 		 * automatically retrying the command.  Therefore, the
5529 		 * inquiry failed.  If we had inquiry information before
5530 		 * for this device, but this latest inquiry command failed,
5531 		 * the device has probably gone away.  If this device isn't
5532 		 * already marked unconfigured, notify the peripheral
5533 		 * drivers that this device is no more.
5534 		 */
5535 		if ((path->device->flags & CAM_DEV_UNCONFIGURED) == 0)
5536 			/* Send the async notification. */
5537 			xpt_async(AC_LOST_DEVICE, path, NULL);
5538 
5539 		xpt_release_ccb(done_ccb);
5540 		break;
5541 	}
5542 	case PROBE_MODE_SENSE:
5543 	{
5544 		struct ccb_scsiio *csio;
5545 		struct scsi_mode_header_6 *mode_hdr;
5546 
5547 		csio = &done_ccb->csio;
5548 		mode_hdr = (struct scsi_mode_header_6 *)csio->data_ptr;
5549 		if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
5550 			struct scsi_control_page *page;
5551 			u_int8_t *offset;
5552 
5553 			offset = ((u_int8_t *)&mode_hdr[1])
5554 			    + mode_hdr->blk_desc_len;
5555 			page = (struct scsi_control_page *)offset;
5556 			path->device->queue_flags = page->queue_flags;
5557 		} else if (cam_periph_error(done_ccb, 0,
5558 					    SF_RETRY_UA|SF_NO_PRINT,
5559 					    &softc->saved_ccb) == ERESTART) {
5560 			return;
5561 		} else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
5562 			/* Don't wedge the queue */
5563 			xpt_release_devq(done_ccb->ccb_h.path,
5564 					 /*count*/1, /*run_queue*/TRUE);
5565 		}
5566 		xpt_release_ccb(done_ccb);
5567 		free(mode_hdr, M_TEMP);
5568 		softc->action = PROBE_SERIAL_NUM;
5569 		xpt_schedule(periph, priority);
5570 		return;
5571 	}
5572 	case PROBE_SERIAL_NUM:
5573 	{
5574 		struct ccb_scsiio *csio;
5575 		struct scsi_vpd_unit_serial_number *serial_buf;
5576 		u_int32_t  priority;
5577 		int changed;
5578 		int have_serialnum;
5579 
5580 		changed = 1;
5581 		have_serialnum = 0;
5582 		csio = &done_ccb->csio;
5583 		priority = done_ccb->ccb_h.pinfo.priority;
5584 		serial_buf =
5585 		    (struct scsi_vpd_unit_serial_number *)csio->data_ptr;
5586 
5587 		/* Clean up from previous instance of this device */
5588 		if (path->device->serial_num != NULL) {
5589 			free(path->device->serial_num, M_DEVBUF);
5590 			path->device->serial_num = NULL;
5591 			path->device->serial_num_len = 0;
5592 		}
5593 
5594 		if (serial_buf == NULL) {
5595 			/*
5596 			 * Don't process the command as it was never sent
5597 			 */
5598 		} else if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP
5599 			&& (serial_buf->length > 0)) {
5600 
5601 			have_serialnum = 1;
5602 			path->device->serial_num =
5603 				(u_int8_t *)malloc((serial_buf->length + 1),
5604 						   M_DEVBUF, M_NOWAIT);
5605 			if (path->device->serial_num != NULL) {
5606 				bcopy(serial_buf->serial_num,
5607 				      path->device->serial_num,
5608 				      serial_buf->length);
5609 				path->device->serial_num_len =
5610 				    serial_buf->length;
5611 				path->device->serial_num[serial_buf->length]
5612 				    = '\0';
5613 			}
5614 		} else if (cam_periph_error(done_ccb, 0,
5615 					    SF_RETRY_UA|SF_NO_PRINT,
5616 					    &softc->saved_ccb) == ERESTART) {
5617 			return;
5618 		} else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
5619 			/* Don't wedge the queue */
5620 			xpt_release_devq(done_ccb->ccb_h.path, /*count*/1,
5621 					 /*run_queue*/TRUE);
5622 		}
5623 
5624 		/*
5625 		 * Let's see if we have seen this device before.
5626 		 */
5627 		if ((softc->flags & PROBE_INQUIRY_CKSUM) != 0) {
5628 			MD5_CTX context;
5629 			u_int8_t digest[16];
5630 
5631 			MD5Init(&context);
5632 
5633 			MD5Update(&context,
5634 				  (unsigned char *)&path->device->inq_data,
5635 				  sizeof(struct scsi_inquiry_data));
5636 
5637 			if (have_serialnum)
5638 				MD5Update(&context, serial_buf->serial_num,
5639 					  serial_buf->length);
5640 
5641 			MD5Final(digest, &context);
5642 			if (bcmp(softc->digest, digest, 16) == 0)
5643 				changed = 0;
5644 
5645 			/*
5646 			 * XXX Do we need to do a TUR in order to ensure
5647 			 *     that the device really hasn't changed???
5648 			 */
5649 			if ((changed != 0)
5650 			 && ((softc->flags & PROBE_NO_ANNOUNCE) == 0))
5651 				xpt_async(AC_LOST_DEVICE, path, NULL);
5652 		}
5653 		if (serial_buf != NULL)
5654 			free(serial_buf, M_TEMP);
5655 
5656 		if (changed != 0) {
5657 			/*
5658 			 * Now that we have all the necessary
5659 			 * information to safely perform transfer
5660 			 * negotiations... Controllers don't perform
5661 			 * any negotiation or tagged queuing until
5662 			 * after the first XPT_SET_TRAN_SETTINGS ccb is
5663 			 * received.  So, on a new device, just retreive
5664 			 * the user settings, and set them as the current
5665 			 * settings to set the device up.
5666 			 */
5667 			proberequestdefaultnegotiation(periph);
5668 			xpt_release_ccb(done_ccb);
5669 
5670 			/*
5671 			 * Perform a TUR to allow the controller to
5672 			 * perform any necessary transfer negotiation.
5673 			 */
5674 			softc->action = PROBE_TUR_FOR_NEGOTIATION;
5675 			xpt_schedule(periph, priority);
5676 			return;
5677 		}
5678 		xpt_release_ccb(done_ccb);
5679 		break;
5680 	}
5681 	case PROBE_TUR_FOR_NEGOTIATION:
5682 		if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
5683 			/* Don't wedge the queue */
5684 			xpt_release_devq(done_ccb->ccb_h.path, /*count*/1,
5685 					 /*run_queue*/TRUE);
5686 		}
5687 
5688 		path->device->flags &= ~CAM_DEV_UNCONFIGURED;
5689 
5690 		if ((softc->flags & PROBE_NO_ANNOUNCE) == 0) {
5691 			/* Inform the XPT that a new device has been found */
5692 			done_ccb->ccb_h.func_code = XPT_GDEV_TYPE;
5693 			xpt_action(done_ccb);
5694 
5695 			xpt_async(AC_FOUND_DEVICE, xpt_periph->path, done_ccb);
5696 		}
5697 		xpt_release_ccb(done_ccb);
5698 		break;
5699 	}
5700 	done_ccb = (union ccb *)TAILQ_FIRST(&softc->request_ccbs);
5701 	TAILQ_REMOVE(&softc->request_ccbs, &done_ccb->ccb_h, periph_links.tqe);
5702 	done_ccb->ccb_h.status = CAM_REQ_CMP;
5703 	xpt_done(done_ccb);
5704 	if (TAILQ_FIRST(&softc->request_ccbs) == NULL) {
5705 		cam_periph_invalidate(periph);
5706 		cam_periph_release(periph);
5707 	} else {
5708 		probeschedule(periph);
5709 	}
5710 }
5711 
5712 static void
5713 probecleanup(struct cam_periph *periph)
5714 {
5715 	free(periph->softc, M_TEMP);
5716 }
5717 
5718 static void
5719 xpt_find_quirk(struct cam_ed *device)
5720 {
5721 	caddr_t	match;
5722 
5723 	match = cam_quirkmatch((caddr_t)&device->inq_data,
5724 			       (caddr_t)xpt_quirk_table,
5725 			       sizeof(xpt_quirk_table)/sizeof(*xpt_quirk_table),
5726 			       sizeof(*xpt_quirk_table), scsi_inquiry_match);
5727 
5728 	if (match == NULL)
5729 		panic("xpt_find_quirk: device didn't match wildcard entry!!");
5730 
5731 	device->quirk = (struct xpt_quirk_entry *)match;
5732 }
5733 
5734 static void
5735 xpt_set_transfer_settings(struct ccb_trans_settings *cts, struct cam_ed *device,
5736 			  int async_update)
5737 {
5738 	struct	cam_sim *sim;
5739 	int	qfrozen;
5740 
5741 	sim = cts->ccb_h.path->bus->sim;
5742 	if (async_update == FALSE) {
5743 		struct	scsi_inquiry_data *inq_data;
5744 		struct	ccb_pathinq cpi;
5745 		struct	ccb_trans_settings cur_cts;
5746 
5747 		if (device == NULL) {
5748 			cts->ccb_h.status = CAM_PATH_INVALID;
5749 			xpt_done((union ccb *)cts);
5750 			return;
5751 		}
5752 
5753 		/*
5754 		 * Perform sanity checking against what the
5755 		 * controller and device can do.
5756 		 */
5757 		xpt_setup_ccb(&cpi.ccb_h, cts->ccb_h.path, /*priority*/1);
5758 		cpi.ccb_h.func_code = XPT_PATH_INQ;
5759 		xpt_action((union ccb *)&cpi);
5760 		xpt_setup_ccb(&cur_cts.ccb_h, cts->ccb_h.path, /*priority*/1);
5761 		cur_cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS;
5762 		cur_cts.flags = CCB_TRANS_CURRENT_SETTINGS;
5763 		xpt_action((union ccb *)&cur_cts);
5764 		inq_data = &device->inq_data;
5765 
5766 		/* Fill in any gaps in what the user gave us */
5767 		if ((cts->valid & CCB_TRANS_SYNC_RATE_VALID) == 0)
5768 			cts->sync_period = cur_cts.sync_period;
5769 		if ((cts->valid & CCB_TRANS_SYNC_OFFSET_VALID) == 0)
5770 			cts->sync_offset = cur_cts.sync_offset;
5771 		if ((cts->valid & CCB_TRANS_BUS_WIDTH_VALID) == 0)
5772 			cts->bus_width = cur_cts.bus_width;
5773 		if ((cts->valid & CCB_TRANS_DISC_VALID) == 0) {
5774 			cts->flags &= ~CCB_TRANS_DISC_ENB;
5775 			cts->flags |= cur_cts.flags & CCB_TRANS_DISC_ENB;
5776 		}
5777 		if ((cts->valid & CCB_TRANS_TQ_VALID) == 0) {
5778 			cts->flags &= ~CCB_TRANS_TAG_ENB;
5779 			cts->flags |= cur_cts.flags & CCB_TRANS_TAG_ENB;
5780 		}
5781 		if (((device->flags & CAM_DEV_INQUIRY_DATA_VALID) != 0
5782 		  && (inq_data->flags & SID_Sync) == 0)
5783 		 || (cpi.hba_inquiry & PI_SDTR_ABLE) == 0) {
5784 			/* Force async */
5785 			cts->sync_period = 0;
5786 			cts->sync_offset = 0;
5787 		}
5788 
5789 		/*
5790 		 * Don't allow DT transmission rates if the
5791 		 * device does not support it.
5792 		 */
5793 		if ((device->flags & CAM_DEV_INQUIRY_DATA_VALID) != 0
5794 		 && (inq_data->spi3data & SID_SPI_CLOCK_DT) == 0
5795 		 && cts->sync_period <= 0x9)
5796 			cts->sync_period = 0xa;
5797 
5798 		switch (cts->bus_width) {
5799 		case MSG_EXT_WDTR_BUS_32_BIT:
5800 			if (((device->flags & CAM_DEV_INQUIRY_DATA_VALID) == 0
5801 			  || (inq_data->flags & SID_WBus32) != 0)
5802 			 && (cpi.hba_inquiry & PI_WIDE_32) != 0)
5803 				break;
5804 			/* Fall Through to 16-bit */
5805 		case MSG_EXT_WDTR_BUS_16_BIT:
5806 			if (((device->flags & CAM_DEV_INQUIRY_DATA_VALID) == 0
5807 			  || (inq_data->flags & SID_WBus16) != 0)
5808 			 && (cpi.hba_inquiry & PI_WIDE_16) != 0) {
5809 				cts->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
5810 				break;
5811 			}
5812 			/* Fall Through to 8-bit */
5813 		default: /* New bus width?? */
5814 		case MSG_EXT_WDTR_BUS_8_BIT:
5815 			/* All targets can do this */
5816 			cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
5817 			break;
5818 		}
5819 
5820 		if ((cts->flags & CCB_TRANS_DISC_ENB) == 0) {
5821 			/*
5822 			 * Can't tag queue without disconnection.
5823 			 */
5824 			cts->flags &= ~CCB_TRANS_TAG_ENB;
5825 			cts->valid |= CCB_TRANS_TQ_VALID;
5826 		}
5827 
5828 		if ((cpi.hba_inquiry & PI_TAG_ABLE) == 0
5829 		 || (inq_data->flags & SID_CmdQue) == 0
5830 		 || (device->queue_flags & SCP_QUEUE_DQUE) != 0
5831 		 || (device->quirk->mintags == 0)) {
5832 			/*
5833 			 * Can't tag on hardware that doesn't support,
5834 			 * doesn't have it enabled, or has broken tag support.
5835 			 */
5836 			cts->flags &= ~CCB_TRANS_TAG_ENB;
5837 		}
5838 	}
5839 
5840 	qfrozen = FALSE;
5841 	if ((cts->valid & CCB_TRANS_TQ_VALID) != 0
5842 	 && (async_update == FALSE)) {
5843 		int device_tagenb;
5844 
5845 		/*
5846 		 * If we are transitioning from tags to no-tags or
5847 		 * vice-versa, we need to carefully freeze and restart
5848 		 * the queue so that we don't overlap tagged and non-tagged
5849 		 * commands.  We also temporarily stop tags if there is
5850 		 * a change in transfer negotiation settings to allow
5851 		 * "tag-less" negotiation.
5852 		 */
5853 		if ((device->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
5854 		 || (device->inq_flags & SID_CmdQue) != 0)
5855 			device_tagenb = TRUE;
5856 		else
5857 			device_tagenb = FALSE;
5858 
5859 		if (((cts->flags & CCB_TRANS_TAG_ENB) != 0
5860 		  && device_tagenb == FALSE)
5861 		 || ((cts->flags & CCB_TRANS_TAG_ENB) == 0
5862 		  && device_tagenb == TRUE)) {
5863 
5864 			if ((cts->flags & CCB_TRANS_TAG_ENB) != 0) {
5865 				/*
5866 				 * Delay change to use tags until after a
5867 				 * few commands have gone to this device so
5868 				 * the controller has time to perform transfer
5869 				 * negotiations without tagged messages getting
5870 				 * in the way.
5871 				 */
5872 				device->tag_delay_count = CAM_TAG_DELAY_COUNT;
5873 				device->flags |= CAM_DEV_TAG_AFTER_COUNT;
5874 			} else {
5875 				xpt_freeze_devq(cts->ccb_h.path, /*count*/1);
5876 				qfrozen = TRUE;
5877 		  		device->inq_flags &= ~SID_CmdQue;
5878 				xpt_dev_ccbq_resize(cts->ccb_h.path,
5879 						    sim->max_dev_openings);
5880 				device->flags &= ~CAM_DEV_TAG_AFTER_COUNT;
5881 				device->tag_delay_count = 0;
5882 			}
5883 		}
5884 	}
5885 
5886 	if (async_update == FALSE) {
5887 		/*
5888 		 * If we are currently performing tagged transactions to
5889 		 * this device and want to change its negotiation parameters,
5890 		 * go non-tagged for a bit to give the controller a chance to
5891 		 * negotiate unhampered by tag messages.
5892 		 */
5893 		if ((device->inq_flags & SID_CmdQue) != 0
5894 		 && (cts->flags & (CCB_TRANS_SYNC_RATE_VALID|
5895 				   CCB_TRANS_SYNC_OFFSET_VALID|
5896 				   CCB_TRANS_BUS_WIDTH_VALID)) != 0)
5897 			xpt_toggle_tags(cts->ccb_h.path);
5898 
5899 		(*(sim->sim_action))(sim, (union ccb *)cts);
5900 	}
5901 
5902 	if (qfrozen) {
5903 		struct ccb_relsim crs;
5904 
5905 		xpt_setup_ccb(&crs.ccb_h, cts->ccb_h.path,
5906 			      /*priority*/1);
5907 		crs.ccb_h.func_code = XPT_REL_SIMQ;
5908 		crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY;
5909 		crs.openings
5910 		    = crs.release_timeout
5911 		    = crs.qfrozen_cnt
5912 		    = 0;
5913 		xpt_action((union ccb *)&crs);
5914 	}
5915 }
5916 
5917 static void
5918 xpt_toggle_tags(struct cam_path *path)
5919 {
5920 	struct cam_ed *dev;
5921 
5922 	/*
5923 	 * Give controllers a chance to renegotiate
5924 	 * before starting tag operations.  We
5925 	 * "toggle" tagged queuing off then on
5926 	 * which causes the tag enable command delay
5927 	 * counter to come into effect.
5928 	 */
5929 	dev = path->device;
5930 	if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
5931 	 || ((dev->inq_flags & SID_CmdQue) != 0
5932  	  && (dev->inq_flags & (SID_Sync|SID_WBus16|SID_WBus32)) != 0)) {
5933 		struct ccb_trans_settings cts;
5934 
5935 		xpt_setup_ccb(&cts.ccb_h, path, 1);
5936 		cts.flags = 0;
5937 		cts.valid = CCB_TRANS_TQ_VALID;
5938 		xpt_set_transfer_settings(&cts, path->device,
5939 					  /*async_update*/TRUE);
5940 		cts.flags = CCB_TRANS_TAG_ENB;
5941 		xpt_set_transfer_settings(&cts, path->device,
5942 					  /*async_update*/TRUE);
5943 	}
5944 }
5945 
5946 static void
5947 xpt_start_tags(struct cam_path *path)
5948 {
5949 	struct ccb_relsim crs;
5950 	struct cam_ed *device;
5951 	struct cam_sim *sim;
5952 	int    newopenings;
5953 
5954 	device = path->device;
5955 	sim = path->bus->sim;
5956 	device->flags &= ~CAM_DEV_TAG_AFTER_COUNT;
5957 	xpt_freeze_devq(path, /*count*/1);
5958 	device->inq_flags |= SID_CmdQue;
5959 	newopenings = min(device->quirk->maxtags, sim->max_tagged_dev_openings);
5960 	xpt_dev_ccbq_resize(path, newopenings);
5961 	xpt_setup_ccb(&crs.ccb_h, path, /*priority*/1);
5962 	crs.ccb_h.func_code = XPT_REL_SIMQ;
5963 	crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY;
5964 	crs.openings
5965 	    = crs.release_timeout
5966 	    = crs.qfrozen_cnt
5967 	    = 0;
5968 	xpt_action((union ccb *)&crs);
5969 }
5970 
5971 static int busses_to_config;
5972 static int busses_to_reset;
5973 
5974 static int
5975 xptconfigbuscountfunc(struct cam_eb *bus, void *arg)
5976 {
5977 	if (bus->path_id != CAM_XPT_PATH_ID) {
5978 		struct cam_path path;
5979 		struct ccb_pathinq cpi;
5980 		int can_negotiate;
5981 
5982 		busses_to_config++;
5983 		xpt_compile_path(&path, NULL, bus->path_id,
5984 				 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
5985 		xpt_setup_ccb(&cpi.ccb_h, &path, /*priority*/1);
5986 		cpi.ccb_h.func_code = XPT_PATH_INQ;
5987 		xpt_action((union ccb *)&cpi);
5988 		can_negotiate = cpi.hba_inquiry;
5989 		can_negotiate &= (PI_WIDE_32|PI_WIDE_16|PI_SDTR_ABLE);
5990 		if ((cpi.hba_misc & PIM_NOBUSRESET) == 0
5991 		 && can_negotiate)
5992 			busses_to_reset++;
5993 		xpt_release_path(&path);
5994 	}
5995 
5996 	return(1);
5997 }
5998 
5999 static int
6000 xptconfigfunc(struct cam_eb *bus, void *arg)
6001 {
6002 	struct	cam_path *path;
6003 	union	ccb *work_ccb;
6004 
6005 	if (bus->path_id != CAM_XPT_PATH_ID) {
6006 		cam_status status;
6007 		int can_negotiate;
6008 
6009 		work_ccb = xpt_alloc_ccb();
6010 		if ((status = xpt_create_path(&path, xpt_periph, bus->path_id,
6011 					      CAM_TARGET_WILDCARD,
6012 					      CAM_LUN_WILDCARD)) !=CAM_REQ_CMP){
6013 			printf("xptconfigfunc: xpt_create_path failed with "
6014 			       "status %#x for bus %d\n", status, bus->path_id);
6015 			printf("xptconfigfunc: halting bus configuration\n");
6016 			xpt_free_ccb(work_ccb);
6017 			busses_to_config--;
6018 			xpt_finishconfig(xpt_periph, NULL);
6019 			return(0);
6020 		}
6021 		xpt_setup_ccb(&work_ccb->ccb_h, path, /*priority*/1);
6022 		work_ccb->ccb_h.func_code = XPT_PATH_INQ;
6023 		xpt_action(work_ccb);
6024 		if (work_ccb->ccb_h.status != CAM_REQ_CMP) {
6025 			printf("xptconfigfunc: CPI failed on bus %d "
6026 			       "with status %d\n", bus->path_id,
6027 			       work_ccb->ccb_h.status);
6028 			xpt_finishconfig(xpt_periph, work_ccb);
6029 			return(1);
6030 		}
6031 
6032 		can_negotiate = work_ccb->cpi.hba_inquiry;
6033 		can_negotiate &= (PI_WIDE_32|PI_WIDE_16|PI_SDTR_ABLE);
6034 		if ((work_ccb->cpi.hba_misc & PIM_NOBUSRESET) == 0
6035 		 && (can_negotiate != 0)) {
6036 			xpt_setup_ccb(&work_ccb->ccb_h, path, /*priority*/1);
6037 			work_ccb->ccb_h.func_code = XPT_RESET_BUS;
6038 			work_ccb->ccb_h.cbfcnp = NULL;
6039 			CAM_DEBUG(path, CAM_DEBUG_SUBTRACE,
6040 				  ("Resetting Bus\n"));
6041 			xpt_action(work_ccb);
6042 			xpt_finishconfig(xpt_periph, work_ccb);
6043 		} else {
6044 			/* Act as though we performed a successful BUS RESET */
6045 			work_ccb->ccb_h.func_code = XPT_RESET_BUS;
6046 			xpt_finishconfig(xpt_periph, work_ccb);
6047 		}
6048 	}
6049 
6050 	return(1);
6051 }
6052 
6053 static void
6054 xpt_config(void *arg)
6055 {
6056 	/* Now that interrupts are enabled, go find our devices */
6057 
6058 #ifdef CAMDEBUG
6059 	/* Setup debugging flags and path */
6060 #ifdef CAM_DEBUG_FLAGS
6061 	cam_dflags = CAM_DEBUG_FLAGS;
6062 #else /* !CAM_DEBUG_FLAGS */
6063 	cam_dflags = CAM_DEBUG_NONE;
6064 #endif /* CAM_DEBUG_FLAGS */
6065 #ifdef CAM_DEBUG_BUS
6066 	if (cam_dflags != CAM_DEBUG_NONE) {
6067 		if (xpt_create_path(&cam_dpath, xpt_periph,
6068 				    CAM_DEBUG_BUS, CAM_DEBUG_TARGET,
6069 				    CAM_DEBUG_LUN) != CAM_REQ_CMP) {
6070 			printf("xpt_config: xpt_create_path() failed for debug"
6071 			       " target %d:%d:%d, debugging disabled\n",
6072 			       CAM_DEBUG_BUS, CAM_DEBUG_TARGET, CAM_DEBUG_LUN);
6073 			cam_dflags = CAM_DEBUG_NONE;
6074 		}
6075 	} else
6076 		cam_dpath = NULL;
6077 #else /* !CAM_DEBUG_BUS */
6078 	cam_dpath = NULL;
6079 #endif /* CAM_DEBUG_BUS */
6080 #endif /* CAMDEBUG */
6081 
6082 	/*
6083 	 * Scan all installed busses.
6084 	 */
6085 	xpt_for_all_busses(xptconfigbuscountfunc, NULL);
6086 
6087 	if (busses_to_config == 0) {
6088 		/* Call manually because we don't have any busses */
6089 		xpt_finishconfig(xpt_periph, NULL);
6090 	} else  {
6091 		if (busses_to_reset > 0 && SCSI_DELAY >= 2000) {
6092 			printf("Waiting %d seconds for SCSI "
6093 			       "devices to settle\n", SCSI_DELAY/1000);
6094 		}
6095 		xpt_for_all_busses(xptconfigfunc, NULL);
6096 	}
6097 }
6098 
6099 /*
6100  * If the given device only has one peripheral attached to it, and if that
6101  * peripheral is the passthrough driver, announce it.  This insures that the
6102  * user sees some sort of announcement for every peripheral in their system.
6103  */
6104 static int
6105 xptpassannouncefunc(struct cam_ed *device, void *arg)
6106 {
6107 	struct cam_periph *periph;
6108 	int i;
6109 
6110 	for (periph = SLIST_FIRST(&device->periphs), i = 0; periph != NULL;
6111 	     periph = SLIST_NEXT(periph, periph_links), i++);
6112 
6113 	periph = SLIST_FIRST(&device->periphs);
6114 	if ((i == 1)
6115 	 && (strncmp(periph->periph_name, "pass", 4) == 0))
6116 		xpt_announce_periph(periph, NULL);
6117 
6118 	return(1);
6119 }
6120 
6121 static void
6122 xpt_finishconfig(struct cam_periph *periph, union ccb *done_ccb)
6123 {
6124 	struct	periph_driver **p_drv;
6125 	int	i;
6126 
6127 	if (done_ccb != NULL) {
6128 		CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE,
6129 			  ("xpt_finishconfig\n"));
6130 		switch(done_ccb->ccb_h.func_code) {
6131 		case XPT_RESET_BUS:
6132 			if (done_ccb->ccb_h.status == CAM_REQ_CMP) {
6133 				done_ccb->ccb_h.func_code = XPT_SCAN_BUS;
6134 				done_ccb->ccb_h.cbfcnp = xpt_finishconfig;
6135 				xpt_action(done_ccb);
6136 				return;
6137 			}
6138 			/* FALLTHROUGH */
6139 		case XPT_SCAN_BUS:
6140 		default:
6141 			xpt_free_path(done_ccb->ccb_h.path);
6142 			busses_to_config--;
6143 			break;
6144 		}
6145 	}
6146 
6147 	if (busses_to_config == 0) {
6148 		/* Register all the peripheral drivers */
6149 		/* XXX This will have to change when we have loadable modules */
6150 		p_drv = (struct periph_driver **)periphdriver_set.ls_items;
6151 		for (i = 0; p_drv[i] != NULL; i++) {
6152 			(*p_drv[i]->init)();
6153 		}
6154 
6155 		/*
6156 		 * Check for devices with no "standard" peripheral driver
6157 		 * attached.  For any devices like that, announce the
6158 		 * passthrough driver so the user will see something.
6159 		 */
6160 		xpt_for_all_devices(xptpassannouncefunc, NULL);
6161 
6162 		/* Release our hook so that the boot can continue. */
6163 		config_intrhook_disestablish(xpt_config_hook);
6164 		free(xpt_config_hook, M_TEMP);
6165 		xpt_config_hook = NULL;
6166 	}
6167 	if (done_ccb != NULL)
6168 		xpt_free_ccb(done_ccb);
6169 }
6170 
6171 static void
6172 xptaction(struct cam_sim *sim, union ccb *work_ccb)
6173 {
6174 	CAM_DEBUG(work_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xptaction\n"));
6175 
6176 	switch (work_ccb->ccb_h.func_code) {
6177 	/* Common cases first */
6178 	case XPT_PATH_INQ:		/* Path routing inquiry */
6179 	{
6180 		struct ccb_pathinq *cpi;
6181 
6182 		cpi = &work_ccb->cpi;
6183 		cpi->version_num = 1; /* XXX??? */
6184 		cpi->hba_inquiry = 0;
6185 		cpi->target_sprt = 0;
6186 		cpi->hba_misc = 0;
6187 		cpi->hba_eng_cnt = 0;
6188 		cpi->max_target = 0;
6189 		cpi->max_lun = 0;
6190 		cpi->initiator_id = 0;
6191 		strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
6192 		strncpy(cpi->hba_vid, "", HBA_IDLEN);
6193 		strncpy(cpi->dev_name, sim->sim_name, DEV_IDLEN);
6194 		cpi->unit_number = sim->unit_number;
6195 		cpi->bus_id = sim->bus_id;
6196 		cpi->base_transfer_speed = 0;
6197 		cpi->ccb_h.status = CAM_REQ_CMP;
6198 		xpt_done(work_ccb);
6199 		break;
6200 	}
6201 	default:
6202 		work_ccb->ccb_h.status = CAM_REQ_INVALID;
6203 		xpt_done(work_ccb);
6204 		break;
6205 	}
6206 }
6207 
6208 /*
6209  * The xpt as a "controller" has no interrupt sources, so polling
6210  * is a no-op.
6211  */
6212 static void
6213 xptpoll(struct cam_sim *sim)
6214 {
6215 }
6216 
6217 /*
6218  * Should only be called by the machine interrupt dispatch routines,
6219  * so put these prototypes here instead of in the header.
6220  */
6221 
6222 static void
6223 swi_camnet(void)
6224 {
6225 	camisr(&cam_netq);
6226 }
6227 
6228 static void
6229 swi_cambio(void)
6230 {
6231 	camisr(&cam_bioq);
6232 }
6233 
6234 static void
6235 camisr(cam_isrq_t *queue)
6236 {
6237 	int	s;
6238 	struct	ccb_hdr *ccb_h;
6239 
6240 	s = splcam();
6241 	while ((ccb_h = TAILQ_FIRST(queue)) != NULL) {
6242 		int	runq;
6243 
6244 		TAILQ_REMOVE(queue, ccb_h, sim_links.tqe);
6245 		ccb_h->pinfo.index = CAM_UNQUEUED_INDEX;
6246 		splx(s);
6247 
6248 		CAM_DEBUG(ccb_h->path, CAM_DEBUG_TRACE,
6249 			  ("camisr"));
6250 
6251 		runq = FALSE;
6252 
6253 		if (ccb_h->flags & CAM_HIGH_POWER) {
6254 			struct highpowerlist	*hphead;
6255 			struct cam_ed		*device;
6256 			union ccb		*send_ccb;
6257 
6258 			hphead = &highpowerq;
6259 
6260 			send_ccb = (union ccb *)STAILQ_FIRST(hphead);
6261 
6262 			/*
6263 			 * Increment the count since this command is done.
6264 			 */
6265 			num_highpower++;
6266 
6267 			/*
6268 			 * Any high powered commands queued up?
6269 			 */
6270 			if (send_ccb != NULL) {
6271 				device = send_ccb->ccb_h.path->device;
6272 
6273 				STAILQ_REMOVE_HEAD(hphead, xpt_links.stqe);
6274 
6275 				xpt_release_devq(send_ccb->ccb_h.path,
6276 						 /*count*/1, /*runqueue*/TRUE);
6277 			}
6278 		}
6279 		if ((ccb_h->func_code & XPT_FC_USER_CCB) == 0) {
6280 			struct cam_ed *dev;
6281 
6282 			dev = ccb_h->path->device;
6283 
6284 			s = splcam();
6285 			cam_ccbq_ccb_done(&dev->ccbq, (union ccb *)ccb_h);
6286 
6287 			ccb_h->path->bus->sim->devq->send_active--;
6288 			ccb_h->path->bus->sim->devq->send_openings++;
6289 			splx(s);
6290 
6291 			if ((dev->flags & CAM_DEV_REL_ON_COMPLETE) != 0
6292 			 || ((dev->flags & CAM_DEV_REL_ON_QUEUE_EMPTY) != 0
6293 			  && (dev->ccbq.dev_active == 0))) {
6294 
6295 				xpt_release_devq(ccb_h->path, /*count*/1,
6296 						 /*run_queue*/TRUE);
6297 			}
6298 
6299 			if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
6300 			 && (--dev->tag_delay_count == 0))
6301 				xpt_start_tags(ccb_h->path);
6302 
6303 			if ((dev->ccbq.queue.entries > 0)
6304 			 && (dev->qfrozen_cnt == 0)
6305 			 && (device_is_send_queued(dev) == 0)) {
6306 				runq = xpt_schedule_dev_sendq(ccb_h->path->bus,
6307 							      dev);
6308 			}
6309 		}
6310 
6311 		if (ccb_h->status & CAM_RELEASE_SIMQ) {
6312 			xpt_release_simq(ccb_h->path->bus->sim,
6313 					 /*run_queue*/TRUE);
6314 			ccb_h->status &= ~CAM_RELEASE_SIMQ;
6315 			runq = FALSE;
6316 		}
6317 
6318 		if ((ccb_h->flags & CAM_DEV_QFRZDIS)
6319 		 && (ccb_h->status & CAM_DEV_QFRZN)) {
6320 			xpt_release_devq(ccb_h->path, /*count*/1,
6321 					 /*run_queue*/TRUE);
6322 			ccb_h->status &= ~CAM_DEV_QFRZN;
6323 		} else if (runq) {
6324 			xpt_run_dev_sendq(ccb_h->path->bus);
6325 		}
6326 
6327 		/* Call the peripheral driver's callback */
6328 		(*ccb_h->cbfcnp)(ccb_h->path->periph, (union ccb *)ccb_h);
6329 
6330 		/* Raise IPL for while test */
6331 		s = splcam();
6332 	}
6333 	splx(s);
6334 }
6335