xref: /freebsd/sys/cam/cam_xpt.c (revision 3fe92528afe8313fecf48822dde74bad5e380f48)
1 /*-
2  * Implementation of the Common Access Method Transport (XPT) layer.
3  *
4  * Copyright (c) 1997, 1998, 1999 Justin T. Gibbs.
5  * Copyright (c) 1997, 1998, 1999 Kenneth D. Merry.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions, and the following disclaimer,
13  *    without modification, immediately at the beginning of the file.
14  * 2. The name of the author may not be used to endorse or promote products
15  *    derived from this software without specific prior written permission.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
21  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  */
29 
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
32 
33 #include <sys/param.h>
34 #include <sys/bus.h>
35 #include <sys/systm.h>
36 #include <sys/types.h>
37 #include <sys/malloc.h>
38 #include <sys/kernel.h>
39 #include <sys/time.h>
40 #include <sys/conf.h>
41 #include <sys/fcntl.h>
42 #include <sys/md5.h>
43 #include <sys/interrupt.h>
44 #include <sys/sbuf.h>
45 
46 #include <sys/lock.h>
47 #include <sys/mutex.h>
48 #include <sys/sysctl.h>
49 
50 #ifdef PC98
51 #include <pc98/pc98/pc98_machdep.h>	/* geometry translation */
52 #endif
53 
54 #include <cam/cam.h>
55 #include <cam/cam_ccb.h>
56 #include <cam/cam_periph.h>
57 #include <cam/cam_sim.h>
58 #include <cam/cam_xpt.h>
59 #include <cam/cam_xpt_sim.h>
60 #include <cam/cam_xpt_periph.h>
61 #include <cam/cam_debug.h>
62 
63 #include <cam/scsi/scsi_all.h>
64 #include <cam/scsi/scsi_message.h>
65 #include <cam/scsi/scsi_pass.h>
66 #include "opt_cam.h"
67 
68 /* Datastructures internal to the xpt layer */
69 MALLOC_DEFINE(M_CAMXPT, "CAM XPT", "CAM XPT buffers");
70 
71 /*
72  * Definition of an async handler callback block.  These are used to add
73  * SIMs and peripherals to the async callback lists.
74  */
75 struct async_node {
76 	SLIST_ENTRY(async_node)	links;
77 	u_int32_t	event_enable;	/* Async Event enables */
78 	void		(*callback)(void *arg, u_int32_t code,
79 				    struct cam_path *path, void *args);
80 	void		*callback_arg;
81 };
82 
83 SLIST_HEAD(async_list, async_node);
84 SLIST_HEAD(periph_list, cam_periph);
85 static STAILQ_HEAD(highpowerlist, ccb_hdr) highpowerq;
86 
87 /*
88  * This is the maximum number of high powered commands (e.g. start unit)
89  * that can be outstanding at a particular time.
90  */
91 #ifndef CAM_MAX_HIGHPOWER
92 #define CAM_MAX_HIGHPOWER  4
93 #endif
94 
95 /* number of high powered commands that can go through right now */
96 static int num_highpower = CAM_MAX_HIGHPOWER;
97 
98 /*
99  * Structure for queueing a device in a run queue.
100  * There is one run queue for allocating new ccbs,
101  * and another for sending ccbs to the controller.
102  */
103 struct cam_ed_qinfo {
104 	cam_pinfo pinfo;
105 	struct	  cam_ed *device;
106 };
107 
108 /*
109  * The CAM EDT (Existing Device Table) contains the device information for
110  * all devices for all busses in the system.  The table contains a
111  * cam_ed structure for each device on the bus.
112  */
113 struct cam_ed {
114 	TAILQ_ENTRY(cam_ed) links;
115 	struct	cam_ed_qinfo alloc_ccb_entry;
116 	struct	cam_ed_qinfo send_ccb_entry;
117 	struct	cam_et	 *target;
118 	lun_id_t	 lun_id;
119 	struct	camq drvq;		/*
120 					 * Queue of type drivers wanting to do
121 					 * work on this device.
122 					 */
123 	struct	cam_ccbq ccbq;		/* Queue of pending ccbs */
124 	struct	async_list asyncs;	/* Async callback info for this B/T/L */
125 	struct	periph_list periphs;	/* All attached devices */
126 	u_int	generation;		/* Generation number */
127 	struct	cam_periph *owner;	/* Peripheral driver's ownership tag */
128 	struct	xpt_quirk_entry *quirk;	/* Oddities about this device */
129 					/* Storage for the inquiry data */
130 #ifdef CAM_NEW_TRAN_CODE
131 	cam_proto	 protocol;
132 	u_int		 protocol_version;
133 	cam_xport	 transport;
134 	u_int		 transport_version;
135 #endif /* CAM_NEW_TRAN_CODE */
136 	struct		 scsi_inquiry_data inq_data;
137 	u_int8_t	 inq_flags;	/*
138 					 * Current settings for inquiry flags.
139 					 * This allows us to override settings
140 					 * like disconnection and tagged
141 					 * queuing for a device.
142 					 */
143 	u_int8_t	 queue_flags;	/* Queue flags from the control page */
144 	u_int8_t	 serial_num_len;
145 	u_int8_t	*serial_num;
146 	u_int32_t	 qfrozen_cnt;
147 	u_int32_t	 flags;
148 #define CAM_DEV_UNCONFIGURED	 	0x01
149 #define CAM_DEV_REL_TIMEOUT_PENDING	0x02
150 #define CAM_DEV_REL_ON_COMPLETE		0x04
151 #define CAM_DEV_REL_ON_QUEUE_EMPTY	0x08
152 #define CAM_DEV_RESIZE_QUEUE_NEEDED	0x10
153 #define CAM_DEV_TAG_AFTER_COUNT		0x20
154 #define CAM_DEV_INQUIRY_DATA_VALID	0x40
155 	u_int32_t	 tag_delay_count;
156 #define	CAM_TAG_DELAY_COUNT		5
157 	u_int32_t	 tag_saved_openings;
158 	u_int32_t	 refcount;
159 	struct		 callout_handle c_handle;
160 };
161 
162 /*
163  * Each target is represented by an ET (Existing Target).  These
164  * entries are created when a target is successfully probed with an
165  * identify, and removed when a device fails to respond after a number
166  * of retries, or a bus rescan finds the device missing.
167  */
168 struct cam_et {
169 	TAILQ_HEAD(, cam_ed) ed_entries;
170 	TAILQ_ENTRY(cam_et) links;
171 	struct	cam_eb	*bus;
172 	target_id_t	target_id;
173 	u_int32_t	refcount;
174 	u_int		generation;
175 	struct		timeval last_reset;
176 };
177 
178 /*
179  * Each bus is represented by an EB (Existing Bus).  These entries
180  * are created by calls to xpt_bus_register and deleted by calls to
181  * xpt_bus_deregister.
182  */
183 struct cam_eb {
184 	TAILQ_HEAD(, cam_et) et_entries;
185 	TAILQ_ENTRY(cam_eb)  links;
186 	path_id_t	     path_id;
187 	struct cam_sim	     *sim;
188 	struct timeval	     last_reset;
189 	u_int32_t	     flags;
190 #define	CAM_EB_RUNQ_SCHEDULED	0x01
191 	u_int32_t	     refcount;
192 	u_int		     generation;
193 };
194 
195 struct cam_path {
196 	struct cam_periph *periph;
197 	struct cam_eb	  *bus;
198 	struct cam_et	  *target;
199 	struct cam_ed	  *device;
200 };
201 
202 struct xpt_quirk_entry {
203 	struct scsi_inquiry_pattern inq_pat;
204 	u_int8_t quirks;
205 #define	CAM_QUIRK_NOLUNS	0x01
206 #define	CAM_QUIRK_NOSERIAL	0x02
207 #define	CAM_QUIRK_HILUNS	0x04
208 #define	CAM_QUIRK_NOHILUNS	0x08
209 	u_int mintags;
210 	u_int maxtags;
211 };
212 
213 static int cam_srch_hi = 0;
214 TUNABLE_INT("kern.cam.cam_srch_hi", &cam_srch_hi);
215 static int sysctl_cam_search_luns(SYSCTL_HANDLER_ARGS);
216 SYSCTL_PROC(_kern_cam, OID_AUTO, cam_srch_hi, CTLTYPE_INT|CTLFLAG_RW, 0, 0,
217     sysctl_cam_search_luns, "I",
218     "allow search above LUN 7 for SCSI3 and greater devices");
219 
220 #define	CAM_SCSI2_MAXLUN	8
221 /*
222  * If we're not quirked to search <= the first 8 luns
223  * and we are either quirked to search above lun 8,
224  * or we're > SCSI-2 and we've enabled hilun searching,
225  * or we're > SCSI-2 and the last lun was a success,
226  * we can look for luns above lun 8.
227  */
228 #define	CAN_SRCH_HI_SPARSE(dv)				\
229   (((dv->quirk->quirks & CAM_QUIRK_NOHILUNS) == 0) 	\
230   && ((dv->quirk->quirks & CAM_QUIRK_HILUNS)		\
231   || (SID_ANSI_REV(&dv->inq_data) > SCSI_REV_2 && cam_srch_hi)))
232 
233 #define	CAN_SRCH_HI_DENSE(dv)				\
234   (((dv->quirk->quirks & CAM_QUIRK_NOHILUNS) == 0) 	\
235   && ((dv->quirk->quirks & CAM_QUIRK_HILUNS)		\
236   || (SID_ANSI_REV(&dv->inq_data) > SCSI_REV_2)))
237 
238 typedef enum {
239 	XPT_FLAG_OPEN		= 0x01
240 } xpt_flags;
241 
242 struct xpt_softc {
243 	xpt_flags	flags;
244 	u_int32_t	generation;
245 };
246 
247 static const char quantum[] = "QUANTUM";
248 static const char sony[] = "SONY";
249 static const char west_digital[] = "WDIGTL";
250 static const char samsung[] = "SAMSUNG";
251 static const char seagate[] = "SEAGATE";
252 static const char microp[] = "MICROP";
253 
254 static struct xpt_quirk_entry xpt_quirk_table[] =
255 {
256 	{
257 		/* Reports QUEUE FULL for temporary resource shortages */
258 		{ T_DIRECT, SIP_MEDIA_FIXED, quantum, "XP39100*", "*" },
259 		/*quirks*/0, /*mintags*/24, /*maxtags*/32
260 	},
261 	{
262 		/* Reports QUEUE FULL for temporary resource shortages */
263 		{ T_DIRECT, SIP_MEDIA_FIXED, quantum, "XP34550*", "*" },
264 		/*quirks*/0, /*mintags*/24, /*maxtags*/32
265 	},
266 	{
267 		/* Reports QUEUE FULL for temporary resource shortages */
268 		{ T_DIRECT, SIP_MEDIA_FIXED, quantum, "XP32275*", "*" },
269 		/*quirks*/0, /*mintags*/24, /*maxtags*/32
270 	},
271 	{
272 		/* Broken tagged queuing drive */
273 		{ T_DIRECT, SIP_MEDIA_FIXED, microp, "4421-07*", "*" },
274 		/*quirks*/0, /*mintags*/0, /*maxtags*/0
275 	},
276 	{
277 		/* Broken tagged queuing drive */
278 		{ T_DIRECT, SIP_MEDIA_FIXED, "HP", "C372*", "*" },
279 		/*quirks*/0, /*mintags*/0, /*maxtags*/0
280 	},
281 	{
282 		/* Broken tagged queuing drive */
283 		{ T_DIRECT, SIP_MEDIA_FIXED, microp, "3391*", "x43h" },
284 		/*quirks*/0, /*mintags*/0, /*maxtags*/0
285 	},
286 	{
287 		/*
288 		 * Unfortunately, the Quantum Atlas III has the same
289 		 * problem as the Atlas II drives above.
290 		 * Reported by: "Johan Granlund" <johan@granlund.nu>
291 		 *
292 		 * For future reference, the drive with the problem was:
293 		 * QUANTUM QM39100TD-SW N1B0
294 		 *
295 		 * It's possible that Quantum will fix the problem in later
296 		 * firmware revisions.  If that happens, the quirk entry
297 		 * will need to be made specific to the firmware revisions
298 		 * with the problem.
299 		 *
300 		 */
301 		/* Reports QUEUE FULL for temporary resource shortages */
302 		{ T_DIRECT, SIP_MEDIA_FIXED, quantum, "QM39100*", "*" },
303 		/*quirks*/0, /*mintags*/24, /*maxtags*/32
304 	},
305 	{
306 		/*
307 		 * 18 Gig Atlas III, same problem as the 9G version.
308 		 * Reported by: Andre Albsmeier
309 		 *		<andre.albsmeier@mchp.siemens.de>
310 		 *
311 		 * For future reference, the drive with the problem was:
312 		 * QUANTUM QM318000TD-S N491
313 		 */
314 		/* Reports QUEUE FULL for temporary resource shortages */
315 		{ T_DIRECT, SIP_MEDIA_FIXED, quantum, "QM318000*", "*" },
316 		/*quirks*/0, /*mintags*/24, /*maxtags*/32
317 	},
318 	{
319 		/*
320 		 * Broken tagged queuing drive
321 		 * Reported by: Bret Ford <bford@uop.cs.uop.edu>
322 		 *         and: Martin Renters <martin@tdc.on.ca>
323 		 */
324 		{ T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST410800*", "71*" },
325 		/*quirks*/0, /*mintags*/0, /*maxtags*/0
326 	},
327 		/*
328 		 * The Seagate Medalist Pro drives have very poor write
329 		 * performance with anything more than 2 tags.
330 		 *
331 		 * Reported by:  Paul van der Zwan <paulz@trantor.xs4all.nl>
332 		 * Drive:  <SEAGATE ST36530N 1444>
333 		 *
334 		 * Reported by:  Jeremy Lea <reg@shale.csir.co.za>
335 		 * Drive:  <SEAGATE ST34520W 1281>
336 		 *
337 		 * No one has actually reported that the 9G version
338 		 * (ST39140*) of the Medalist Pro has the same problem, but
339 		 * we're assuming that it does because the 4G and 6.5G
340 		 * versions of the drive are broken.
341 		 */
342 	{
343 		{ T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST34520*", "*"},
344 		/*quirks*/0, /*mintags*/2, /*maxtags*/2
345 	},
346 	{
347 		{ T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST36530*", "*"},
348 		/*quirks*/0, /*mintags*/2, /*maxtags*/2
349 	},
350 	{
351 		{ T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST39140*", "*"},
352 		/*quirks*/0, /*mintags*/2, /*maxtags*/2
353 	},
354 	{
355 		/*
356 		 * Slow when tagged queueing is enabled.  Write performance
357 		 * steadily drops off with more and more concurrent
358 		 * transactions.  Best sequential write performance with
359 		 * tagged queueing turned off and write caching turned on.
360 		 *
361 		 * PR:  kern/10398
362 		 * Submitted by:  Hideaki Okada <hokada@isl.melco.co.jp>
363 		 * Drive:  DCAS-34330 w/ "S65A" firmware.
364 		 *
365 		 * The drive with the problem had the "S65A" firmware
366 		 * revision, and has also been reported (by Stephen J.
367 		 * Roznowski <sjr@home.net>) for a drive with the "S61A"
368 		 * firmware revision.
369 		 *
370 		 * Although no one has reported problems with the 2 gig
371 		 * version of the DCAS drive, the assumption is that it
372 		 * has the same problems as the 4 gig version.  Therefore
373 		 * this quirk entries disables tagged queueing for all
374 		 * DCAS drives.
375 		 */
376 		{ T_DIRECT, SIP_MEDIA_FIXED, "IBM", "DCAS*", "*" },
377 		/*quirks*/0, /*mintags*/0, /*maxtags*/0
378 	},
379 	{
380 		/* Broken tagged queuing drive */
381 		{ T_DIRECT, SIP_MEDIA_REMOVABLE, "iomega", "jaz*", "*" },
382 		/*quirks*/0, /*mintags*/0, /*maxtags*/0
383 	},
384 	{
385 		/* Broken tagged queuing drive */
386 		{ T_DIRECT, SIP_MEDIA_FIXED, "CONNER", "CFP2107*", "*" },
387 		/*quirks*/0, /*mintags*/0, /*maxtags*/0
388 	},
389 	{
390 		/* This does not support other than LUN 0 */
391 		{ T_DIRECT, SIP_MEDIA_FIXED, "VMware*", "*", "*" },
392 		CAM_QUIRK_NOLUNS, /*mintags*/2, /*maxtags*/255
393 	},
394 	{
395 		/*
396 		 * Broken tagged queuing drive.
397 		 * Submitted by:
398 		 * NAKAJI Hiroyuki <nakaji@zeisei.dpri.kyoto-u.ac.jp>
399 		 * in PR kern/9535
400 		 */
401 		{ T_DIRECT, SIP_MEDIA_FIXED, samsung, "WN34324U*", "*" },
402 		/*quirks*/0, /*mintags*/0, /*maxtags*/0
403 	},
404         {
405 		/*
406 		 * Slow when tagged queueing is enabled. (1.5MB/sec versus
407 		 * 8MB/sec.)
408 		 * Submitted by: Andrew Gallatin <gallatin@cs.duke.edu>
409 		 * Best performance with these drives is achieved with
410 		 * tagged queueing turned off, and write caching turned on.
411 		 */
412 		{ T_DIRECT, SIP_MEDIA_FIXED, west_digital, "WDE*", "*" },
413 		/*quirks*/0, /*mintags*/0, /*maxtags*/0
414         },
415         {
416 		/*
417 		 * Slow when tagged queueing is enabled. (1.5MB/sec versus
418 		 * 8MB/sec.)
419 		 * Submitted by: Andrew Gallatin <gallatin@cs.duke.edu>
420 		 * Best performance with these drives is achieved with
421 		 * tagged queueing turned off, and write caching turned on.
422 		 */
423 		{ T_DIRECT, SIP_MEDIA_FIXED, west_digital, "ENTERPRISE", "*" },
424 		/*quirks*/0, /*mintags*/0, /*maxtags*/0
425         },
426 	{
427 		/*
428 		 * Doesn't handle queue full condition correctly,
429 		 * so we need to limit maxtags to what the device
430 		 * can handle instead of determining this automatically.
431 		 */
432 		{ T_DIRECT, SIP_MEDIA_FIXED, samsung, "WN321010S*", "*" },
433 		/*quirks*/0, /*mintags*/2, /*maxtags*/32
434 	},
435 	{
436 		/* Really only one LUN */
437 		{ T_ENCLOSURE, SIP_MEDIA_FIXED, "SUN", "SENA", "*" },
438 		CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
439 	},
440 	{
441 		/* I can't believe we need a quirk for DPT volumes. */
442 		{ T_ANY, SIP_MEDIA_FIXED|SIP_MEDIA_REMOVABLE, "DPT", "*", "*" },
443 		CAM_QUIRK_NOSERIAL|CAM_QUIRK_NOLUNS,
444 		/*mintags*/0, /*maxtags*/255
445 	},
446 	{
447 		/*
448 		 * Many Sony CDROM drives don't like multi-LUN probing.
449 		 */
450 		{ T_CDROM, SIP_MEDIA_REMOVABLE, sony, "CD-ROM CDU*", "*" },
451 		CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
452 	},
453 	{
454 		/*
455 		 * This drive doesn't like multiple LUN probing.
456 		 * Submitted by:  Parag Patel <parag@cgt.com>
457 		 */
458 		{ T_WORM, SIP_MEDIA_REMOVABLE, sony, "CD-R   CDU9*", "*" },
459 		CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
460 	},
461 	{
462 		{ T_WORM, SIP_MEDIA_REMOVABLE, "YAMAHA", "CDR100*", "*" },
463 		CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
464 	},
465 	{
466 		/*
467 		 * The 8200 doesn't like multi-lun probing, and probably
468 		 * don't like serial number requests either.
469 		 */
470 		{
471 			T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "EXABYTE",
472 			"EXB-8200*", "*"
473 		},
474 		CAM_QUIRK_NOSERIAL|CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
475 	},
476 	{
477 		/*
478 		 * Let's try the same as above, but for a drive that says
479 		 * it's an IPL-6860 but is actually an EXB 8200.
480 		 */
481 		{
482 			T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "EXABYTE",
483 			"IPL-6860*", "*"
484 		},
485 		CAM_QUIRK_NOSERIAL|CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
486 	},
487 	{
488 		/*
489 		 * These Hitachi drives don't like multi-lun probing.
490 		 * The PR submitter has a DK319H, but says that the Linux
491 		 * kernel has a similar work-around for the DK312 and DK314,
492 		 * so all DK31* drives are quirked here.
493 		 * PR:            misc/18793
494 		 * Submitted by:  Paul Haddad <paul@pth.com>
495 		 */
496 		{ T_DIRECT, SIP_MEDIA_FIXED, "HITACHI", "DK31*", "*" },
497 		CAM_QUIRK_NOLUNS, /*mintags*/2, /*maxtags*/255
498 	},
499 	{
500 		/*
501 		 * The Hitachi CJ series with J8A8 firmware apparantly has
502 		 * problems with tagged commands.
503 		 * PR: 23536
504 		 * Reported by: amagai@nue.org
505 		 */
506 		{ T_DIRECT, SIP_MEDIA_FIXED, "HITACHI", "DK32CJ*", "J8A8" },
507 		CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
508 	},
509 	{
510 		/*
511 		 * These are the large storage arrays.
512 		 * Submitted by:  William Carrel <william.carrel@infospace.com>
513 		 */
514 		{ T_DIRECT, SIP_MEDIA_FIXED, "HITACHI", "OPEN*", "*" },
515 		CAM_QUIRK_HILUNS, 2, 1024
516 	},
517 	{
518 		/*
519 		 * This old revision of the TDC3600 is also SCSI-1, and
520 		 * hangs upon serial number probing.
521 		 */
522 		{
523 			T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "TANDBERG",
524 			" TDC 3600", "U07:"
525 		},
526 		CAM_QUIRK_NOSERIAL, /*mintags*/0, /*maxtags*/0
527 	},
528 	{
529 		/*
530 		 * Maxtor Personal Storage 3000XT (Firewire)
531 		 * hangs upon serial number probing.
532 		 */
533 		{
534 			T_DIRECT, SIP_MEDIA_FIXED, "Maxtor",
535 			"1394 storage", "*"
536 		},
537 		CAM_QUIRK_NOSERIAL, /*mintags*/0, /*maxtags*/0
538 	},
539 	{
540 		/*
541 		 * Would repond to all LUNs if asked for.
542 		 */
543 		{
544 			T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "CALIPER",
545 			"CP150", "*"
546 		},
547 		CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
548 	},
549 	{
550 		/*
551 		 * Would repond to all LUNs if asked for.
552 		 */
553 		{
554 			T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "KENNEDY",
555 			"96X2*", "*"
556 		},
557 		CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
558 	},
559 	{
560 		/* Submitted by: Matthew Dodd <winter@jurai.net> */
561 		{ T_PROCESSOR, SIP_MEDIA_FIXED, "Cabletrn", "EA41*", "*" },
562 		CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
563 	},
564 	{
565 		/* Submitted by: Matthew Dodd <winter@jurai.net> */
566 		{ T_PROCESSOR, SIP_MEDIA_FIXED, "CABLETRN", "EA41*", "*" },
567 		CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
568 	},
569 	{
570 		/* TeraSolutions special settings for TRC-22 RAID */
571 		{ T_DIRECT, SIP_MEDIA_FIXED, "TERASOLU", "TRC-22", "*" },
572 		  /*quirks*/0, /*mintags*/55, /*maxtags*/255
573 	},
574 	{
575 		/* Veritas Storage Appliance */
576 		{ T_DIRECT, SIP_MEDIA_FIXED, "VERITAS", "*", "*" },
577 		  CAM_QUIRK_HILUNS, /*mintags*/2, /*maxtags*/1024
578 	},
579 	{
580 		/*
581 		 * Would respond to all LUNs.  Device type and removable
582 		 * flag are jumper-selectable.
583 		 */
584 		{ T_ANY, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED, "MaxOptix",
585 		  "Tahiti 1", "*"
586 		},
587 		CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
588 	},
589 	{
590 		/* EasyRAID E5A aka. areca ARC-6010 */
591 		{ T_DIRECT, SIP_MEDIA_FIXED, "easyRAID", "*", "*" },
592 		  CAM_QUIRK_NOHILUNS, /*mintags*/2, /*maxtags*/255
593 	},
594 	{
595 		{ T_ENCLOSURE, SIP_MEDIA_FIXED, "DP", "BACKPLANE", "*" },
596 		CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
597 	},
598 	{
599 		/* Default tagged queuing parameters for all devices */
600 		{
601 		  T_ANY, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED,
602 		  /*vendor*/"*", /*product*/"*", /*revision*/"*"
603 		},
604 		/*quirks*/0, /*mintags*/2, /*maxtags*/255
605 	},
606 };
607 
608 static const int xpt_quirk_table_size =
609 	sizeof(xpt_quirk_table) / sizeof(*xpt_quirk_table);
610 
611 typedef enum {
612 	DM_RET_COPY		= 0x01,
613 	DM_RET_FLAG_MASK	= 0x0f,
614 	DM_RET_NONE		= 0x00,
615 	DM_RET_STOP		= 0x10,
616 	DM_RET_DESCEND		= 0x20,
617 	DM_RET_ERROR		= 0x30,
618 	DM_RET_ACTION_MASK	= 0xf0
619 } dev_match_ret;
620 
621 typedef enum {
622 	XPT_DEPTH_BUS,
623 	XPT_DEPTH_TARGET,
624 	XPT_DEPTH_DEVICE,
625 	XPT_DEPTH_PERIPH
626 } xpt_traverse_depth;
627 
628 struct xpt_traverse_config {
629 	xpt_traverse_depth	depth;
630 	void			*tr_func;
631 	void			*tr_arg;
632 };
633 
634 typedef	int	xpt_busfunc_t (struct cam_eb *bus, void *arg);
635 typedef	int	xpt_targetfunc_t (struct cam_et *target, void *arg);
636 typedef	int	xpt_devicefunc_t (struct cam_ed *device, void *arg);
637 typedef	int	xpt_periphfunc_t (struct cam_periph *periph, void *arg);
638 typedef int	xpt_pdrvfunc_t (struct periph_driver **pdrv, void *arg);
639 
640 /* Transport layer configuration information */
641 static struct xpt_softc xsoftc;
642 
643 /* Queues for our software interrupt handler */
644 typedef TAILQ_HEAD(cam_isrq, ccb_hdr) cam_isrq_t;
645 static cam_isrq_t cam_bioq;
646 static struct mtx cam_bioq_lock;
647 
648 /* "Pool" of inactive ccbs managed by xpt_alloc_ccb and xpt_free_ccb */
649 static SLIST_HEAD(,ccb_hdr) ccb_freeq;
650 static u_int xpt_max_ccbs;	/*
651 				 * Maximum size of ccb pool.  Modified as
652 				 * devices are added/removed or have their
653 				 * opening counts changed.
654 				 */
655 static u_int xpt_ccb_count;	/* Current count of allocated ccbs */
656 
657 struct cam_periph *xpt_periph;
658 
659 static periph_init_t xpt_periph_init;
660 
661 static periph_init_t probe_periph_init;
662 
663 static struct periph_driver xpt_driver =
664 {
665 	xpt_periph_init, "xpt",
666 	TAILQ_HEAD_INITIALIZER(xpt_driver.units)
667 };
668 
669 static struct periph_driver probe_driver =
670 {
671 	probe_periph_init, "probe",
672 	TAILQ_HEAD_INITIALIZER(probe_driver.units)
673 };
674 
675 PERIPHDRIVER_DECLARE(xpt, xpt_driver);
676 PERIPHDRIVER_DECLARE(probe, probe_driver);
677 
678 
679 static d_open_t xptopen;
680 static d_close_t xptclose;
681 static d_ioctl_t xptioctl;
682 
683 static struct cdevsw xpt_cdevsw = {
684 	.d_version =	D_VERSION,
685 	.d_flags =	D_NEEDGIANT,
686 	.d_open =	xptopen,
687 	.d_close =	xptclose,
688 	.d_ioctl =	xptioctl,
689 	.d_name =	"xpt",
690 };
691 
692 static struct intr_config_hook *xpt_config_hook;
693 
694 static void dead_sim_action(struct cam_sim *sim, union ccb *ccb);
695 static void dead_sim_poll(struct cam_sim *sim);
696 
697 /* Dummy SIM that is used when the real one has gone. */
698 static struct cam_sim cam_dead_sim = {
699 	.sim_action =	dead_sim_action,
700 	.sim_poll =	dead_sim_poll,
701 	.sim_name =	"dead_sim",
702 };
703 
704 #define SIM_DEAD(sim)	((sim) == &cam_dead_sim)
705 
706 /* Registered busses */
707 static TAILQ_HEAD(,cam_eb) xpt_busses;
708 static u_int bus_generation;
709 
710 /* Storage for debugging datastructures */
711 #ifdef	CAMDEBUG
712 struct cam_path *cam_dpath;
713 u_int32_t cam_dflags;
714 u_int32_t cam_debug_delay;
715 #endif
716 
717 /* Pointers to software interrupt handlers */
718 static void *cambio_ih;
719 
720 #if defined(CAM_DEBUG_FLAGS) && !defined(CAMDEBUG)
721 #error "You must have options CAMDEBUG to use options CAM_DEBUG_FLAGS"
722 #endif
723 
724 /*
725  * In order to enable the CAM_DEBUG_* options, the user must have CAMDEBUG
726  * enabled.  Also, the user must have either none, or all of CAM_DEBUG_BUS,
727  * CAM_DEBUG_TARGET, and CAM_DEBUG_LUN specified.
728  */
729 #if defined(CAM_DEBUG_BUS) || defined(CAM_DEBUG_TARGET) \
730     || defined(CAM_DEBUG_LUN)
731 #ifdef CAMDEBUG
732 #if !defined(CAM_DEBUG_BUS) || !defined(CAM_DEBUG_TARGET) \
733     || !defined(CAM_DEBUG_LUN)
734 #error "You must define all or none of CAM_DEBUG_BUS, CAM_DEBUG_TARGET \
735         and CAM_DEBUG_LUN"
736 #endif /* !CAM_DEBUG_BUS || !CAM_DEBUG_TARGET || !CAM_DEBUG_LUN */
737 #else /* !CAMDEBUG */
738 #error "You must use options CAMDEBUG if you use the CAM_DEBUG_* options"
739 #endif /* CAMDEBUG */
740 #endif /* CAM_DEBUG_BUS || CAM_DEBUG_TARGET || CAM_DEBUG_LUN */
741 
742 /* Our boot-time initialization hook */
743 static int cam_module_event_handler(module_t, int /*modeventtype_t*/, void *);
744 
745 static moduledata_t cam_moduledata = {
746 	"cam",
747 	cam_module_event_handler,
748 	NULL
749 };
750 
751 static void	xpt_init(void *);
752 
753 DECLARE_MODULE(cam, cam_moduledata, SI_SUB_CONFIGURE, SI_ORDER_SECOND);
754 MODULE_VERSION(cam, 1);
755 
756 
757 static cam_status	xpt_compile_path(struct cam_path *new_path,
758 					 struct cam_periph *perph,
759 					 path_id_t path_id,
760 					 target_id_t target_id,
761 					 lun_id_t lun_id);
762 
763 static void		xpt_release_path(struct cam_path *path);
764 
765 static void		xpt_async_bcast(struct async_list *async_head,
766 					u_int32_t async_code,
767 					struct cam_path *path,
768 					void *async_arg);
769 static void		xpt_dev_async(u_int32_t async_code,
770 				      struct cam_eb *bus,
771 				      struct cam_et *target,
772 				      struct cam_ed *device,
773 				      void *async_arg);
774 static path_id_t xptnextfreepathid(void);
775 static path_id_t xptpathid(const char *sim_name, int sim_unit, int sim_bus);
776 static union ccb *xpt_get_ccb(struct cam_ed *device);
777 static int	 xpt_schedule_dev(struct camq *queue, cam_pinfo *dev_pinfo,
778 				  u_int32_t new_priority);
779 static void	 xpt_run_dev_allocq(struct cam_eb *bus);
780 static void	 xpt_run_dev_sendq(struct cam_eb *bus);
781 static timeout_t xpt_release_devq_timeout;
782 static timeout_t xpt_release_simq_timeout;
783 static void	 xpt_release_bus(struct cam_eb *bus);
784 static void	 xpt_release_devq_device(struct cam_ed *dev, u_int count,
785 					 int run_queue);
786 static struct cam_et*
787 		 xpt_alloc_target(struct cam_eb *bus, target_id_t target_id);
788 static void	 xpt_release_target(struct cam_eb *bus, struct cam_et *target);
789 static struct cam_ed*
790 		 xpt_alloc_device(struct cam_eb *bus, struct cam_et *target,
791 				  lun_id_t lun_id);
792 static void	 xpt_release_device(struct cam_eb *bus, struct cam_et *target,
793 				    struct cam_ed *device);
794 static u_int32_t xpt_dev_ccbq_resize(struct cam_path *path, int newopenings);
795 static struct cam_eb*
796 		 xpt_find_bus(path_id_t path_id);
797 static struct cam_et*
798 		 xpt_find_target(struct cam_eb *bus, target_id_t target_id);
799 static struct cam_ed*
800 		 xpt_find_device(struct cam_et *target, lun_id_t lun_id);
801 static void	 xpt_scan_bus(struct cam_periph *periph, union ccb *ccb);
802 static void	 xpt_scan_lun(struct cam_periph *periph,
803 			      struct cam_path *path, cam_flags flags,
804 			      union ccb *ccb);
805 static void	 xptscandone(struct cam_periph *periph, union ccb *done_ccb);
806 static xpt_busfunc_t	xptconfigbuscountfunc;
807 static xpt_busfunc_t	xptconfigfunc;
808 static void	 xpt_config(void *arg);
809 static xpt_devicefunc_t xptpassannouncefunc;
810 static void	 xpt_finishconfig(struct cam_periph *periph, union ccb *ccb);
811 static void	 xptaction(struct cam_sim *sim, union ccb *work_ccb);
812 static void	 xptpoll(struct cam_sim *sim);
813 static void	 camisr(void *);
814 #if 0
815 static void	 xptstart(struct cam_periph *periph, union ccb *work_ccb);
816 static void	 xptasync(struct cam_periph *periph,
817 			  u_int32_t code, cam_path *path);
818 #endif
819 static dev_match_ret	xptbusmatch(struct dev_match_pattern *patterns,
820 				    u_int num_patterns, struct cam_eb *bus);
821 static dev_match_ret	xptdevicematch(struct dev_match_pattern *patterns,
822 				       u_int num_patterns,
823 				       struct cam_ed *device);
824 static dev_match_ret	xptperiphmatch(struct dev_match_pattern *patterns,
825 				       u_int num_patterns,
826 				       struct cam_periph *periph);
827 static xpt_busfunc_t	xptedtbusfunc;
828 static xpt_targetfunc_t	xptedttargetfunc;
829 static xpt_devicefunc_t	xptedtdevicefunc;
830 static xpt_periphfunc_t	xptedtperiphfunc;
831 static xpt_pdrvfunc_t	xptplistpdrvfunc;
832 static xpt_periphfunc_t	xptplistperiphfunc;
833 static int		xptedtmatch(struct ccb_dev_match *cdm);
834 static int		xptperiphlistmatch(struct ccb_dev_match *cdm);
835 static int		xptbustraverse(struct cam_eb *start_bus,
836 				       xpt_busfunc_t *tr_func, void *arg);
837 static int		xpttargettraverse(struct cam_eb *bus,
838 					  struct cam_et *start_target,
839 					  xpt_targetfunc_t *tr_func, void *arg);
840 static int		xptdevicetraverse(struct cam_et *target,
841 					  struct cam_ed *start_device,
842 					  xpt_devicefunc_t *tr_func, void *arg);
843 static int		xptperiphtraverse(struct cam_ed *device,
844 					  struct cam_periph *start_periph,
845 					  xpt_periphfunc_t *tr_func, void *arg);
846 static int		xptpdrvtraverse(struct periph_driver **start_pdrv,
847 					xpt_pdrvfunc_t *tr_func, void *arg);
848 static int		xptpdperiphtraverse(struct periph_driver **pdrv,
849 					    struct cam_periph *start_periph,
850 					    xpt_periphfunc_t *tr_func,
851 					    void *arg);
852 static xpt_busfunc_t	xptdefbusfunc;
853 static xpt_targetfunc_t	xptdeftargetfunc;
854 static xpt_devicefunc_t	xptdefdevicefunc;
855 static xpt_periphfunc_t	xptdefperiphfunc;
856 static int		xpt_for_all_busses(xpt_busfunc_t *tr_func, void *arg);
857 #ifdef notusedyet
858 static int		xpt_for_all_targets(xpt_targetfunc_t *tr_func,
859 					    void *arg);
860 #endif
861 static int		xpt_for_all_devices(xpt_devicefunc_t *tr_func,
862 					    void *arg);
863 #ifdef notusedyet
864 static int		xpt_for_all_periphs(xpt_periphfunc_t *tr_func,
865 					    void *arg);
866 #endif
867 static xpt_devicefunc_t	xptsetasyncfunc;
868 static xpt_busfunc_t	xptsetasyncbusfunc;
869 static cam_status	xptregister(struct cam_periph *periph,
870 				    void *arg);
871 static cam_status	proberegister(struct cam_periph *periph,
872 				      void *arg);
873 static void	 probeschedule(struct cam_periph *probe_periph);
874 static void	 probestart(struct cam_periph *periph, union ccb *start_ccb);
875 static void	 proberequestdefaultnegotiation(struct cam_periph *periph);
876 static void	 probedone(struct cam_periph *periph, union ccb *done_ccb);
877 static void	 probecleanup(struct cam_periph *periph);
878 static void	 xpt_find_quirk(struct cam_ed *device);
879 #ifdef CAM_NEW_TRAN_CODE
880 static void	 xpt_devise_transport(struct cam_path *path);
881 #endif /* CAM_NEW_TRAN_CODE */
882 static void	 xpt_set_transfer_settings(struct ccb_trans_settings *cts,
883 					   struct cam_ed *device,
884 					   int async_update);
885 static void	 xpt_toggle_tags(struct cam_path *path);
886 static void	 xpt_start_tags(struct cam_path *path);
887 static __inline int xpt_schedule_dev_allocq(struct cam_eb *bus,
888 					    struct cam_ed *dev);
889 static __inline int xpt_schedule_dev_sendq(struct cam_eb *bus,
890 					   struct cam_ed *dev);
891 static __inline int periph_is_queued(struct cam_periph *periph);
892 static __inline int device_is_alloc_queued(struct cam_ed *device);
893 static __inline int device_is_send_queued(struct cam_ed *device);
894 static __inline int dev_allocq_is_runnable(struct cam_devq *devq);
895 
896 static __inline int
897 xpt_schedule_dev_allocq(struct cam_eb *bus, struct cam_ed *dev)
898 {
899 	int retval;
900 
901 	if (dev->ccbq.devq_openings > 0) {
902 		if ((dev->flags & CAM_DEV_RESIZE_QUEUE_NEEDED) != 0) {
903 			cam_ccbq_resize(&dev->ccbq,
904 					dev->ccbq.dev_openings
905 					+ dev->ccbq.dev_active);
906 			dev->flags &= ~CAM_DEV_RESIZE_QUEUE_NEEDED;
907 		}
908 		/*
909 		 * The priority of a device waiting for CCB resources
910 		 * is that of the the highest priority peripheral driver
911 		 * enqueued.
912 		 */
913 		retval = xpt_schedule_dev(&bus->sim->devq->alloc_queue,
914 					  &dev->alloc_ccb_entry.pinfo,
915 					  CAMQ_GET_HEAD(&dev->drvq)->priority);
916 	} else {
917 		retval = 0;
918 	}
919 
920 	return (retval);
921 }
922 
923 static __inline int
924 xpt_schedule_dev_sendq(struct cam_eb *bus, struct cam_ed *dev)
925 {
926 	int	retval;
927 
928 	if (dev->ccbq.dev_openings > 0) {
929 		/*
930 		 * The priority of a device waiting for controller
931 		 * resources is that of the the highest priority CCB
932 		 * enqueued.
933 		 */
934 		retval =
935 		    xpt_schedule_dev(&bus->sim->devq->send_queue,
936 				     &dev->send_ccb_entry.pinfo,
937 				     CAMQ_GET_HEAD(&dev->ccbq.queue)->priority);
938 	} else {
939 		retval = 0;
940 	}
941 	return (retval);
942 }
943 
944 static __inline int
945 periph_is_queued(struct cam_periph *periph)
946 {
947 	return (periph->pinfo.index != CAM_UNQUEUED_INDEX);
948 }
949 
950 static __inline int
951 device_is_alloc_queued(struct cam_ed *device)
952 {
953 	return (device->alloc_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX);
954 }
955 
956 static __inline int
957 device_is_send_queued(struct cam_ed *device)
958 {
959 	return (device->send_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX);
960 }
961 
962 static __inline int
963 dev_allocq_is_runnable(struct cam_devq *devq)
964 {
965 	/*
966 	 * Have work to do.
967 	 * Have space to do more work.
968 	 * Allowed to do work.
969 	 */
970 	return ((devq->alloc_queue.qfrozen_cnt == 0)
971 	     && (devq->alloc_queue.entries > 0)
972 	     && (devq->alloc_openings > 0));
973 }
974 
975 static void
976 xpt_periph_init()
977 {
978 	make_dev(&xpt_cdevsw, 0, UID_ROOT, GID_OPERATOR, 0600, "xpt0");
979 }
980 
981 static void
982 probe_periph_init()
983 {
984 }
985 
986 
987 static void
988 xptdone(struct cam_periph *periph, union ccb *done_ccb)
989 {
990 	/* Caller will release the CCB */
991 	wakeup(&done_ccb->ccb_h.cbfcnp);
992 }
993 
994 static int
995 xptopen(struct cdev *dev, int flags, int fmt, struct thread *td)
996 {
997 	int unit;
998 
999 	unit = minor(dev) & 0xff;
1000 
1001 	/*
1002 	 * Only allow read-write access.
1003 	 */
1004 	if (((flags & FWRITE) == 0) || ((flags & FREAD) == 0))
1005 		return(EPERM);
1006 
1007 	/*
1008 	 * We don't allow nonblocking access.
1009 	 */
1010 	if ((flags & O_NONBLOCK) != 0) {
1011 		printf("xpt%d: can't do nonblocking access\n", unit);
1012 		return(ENODEV);
1013 	}
1014 
1015 	/*
1016 	 * We only have one transport layer right now.  If someone accesses
1017 	 * us via something other than minor number 1, point out their
1018 	 * mistake.
1019 	 */
1020 	if (unit != 0) {
1021 		printf("xptopen: got invalid xpt unit %d\n", unit);
1022 		return(ENXIO);
1023 	}
1024 
1025 	/* Mark ourselves open */
1026 	xsoftc.flags |= XPT_FLAG_OPEN;
1027 
1028 	return(0);
1029 }
1030 
1031 static int
1032 xptclose(struct cdev *dev, int flag, int fmt, struct thread *td)
1033 {
1034 	int unit;
1035 
1036 	unit = minor(dev) & 0xff;
1037 
1038 	/*
1039 	 * We only have one transport layer right now.  If someone accesses
1040 	 * us via something other than minor number 1, point out their
1041 	 * mistake.
1042 	 */
1043 	if (unit != 0) {
1044 		printf("xptclose: got invalid xpt unit %d\n", unit);
1045 		return(ENXIO);
1046 	}
1047 
1048 	/* Mark ourselves closed */
1049 	xsoftc.flags &= ~XPT_FLAG_OPEN;
1050 
1051 	return(0);
1052 }
1053 
1054 static int
1055 xptioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td)
1056 {
1057 	int unit, error;
1058 
1059 	error = 0;
1060 	unit = minor(dev) & 0xff;
1061 
1062 	/*
1063 	 * We only have one transport layer right now.  If someone accesses
1064 	 * us via something other than minor number 1, point out their
1065 	 * mistake.
1066 	 */
1067 	if (unit != 0) {
1068 		printf("xptioctl: got invalid xpt unit %d\n", unit);
1069 		return(ENXIO);
1070 	}
1071 
1072 	switch(cmd) {
1073 	/*
1074 	 * For the transport layer CAMIOCOMMAND ioctl, we really only want
1075 	 * to accept CCB types that don't quite make sense to send through a
1076 	 * passthrough driver. XPT_PATH_INQ is an exception to this, as stated
1077 	 * in the CAM spec.
1078 	 */
1079 	case CAMIOCOMMAND: {
1080 		union ccb *ccb;
1081 		union ccb *inccb;
1082 
1083 		inccb = (union ccb *)addr;
1084 
1085 		switch(inccb->ccb_h.func_code) {
1086 		case XPT_SCAN_BUS:
1087 		case XPT_RESET_BUS:
1088 			if ((inccb->ccb_h.target_id != CAM_TARGET_WILDCARD)
1089 			 || (inccb->ccb_h.target_lun != CAM_LUN_WILDCARD)) {
1090 				error = EINVAL;
1091 				break;
1092 			}
1093 			/* FALLTHROUGH */
1094 		case XPT_PATH_INQ:
1095 		case XPT_ENG_INQ:
1096 		case XPT_SCAN_LUN:
1097 
1098 			ccb = xpt_alloc_ccb();
1099 
1100 			/*
1101 			 * Create a path using the bus, target, and lun the
1102 			 * user passed in.
1103 			 */
1104 			if (xpt_create_path(&ccb->ccb_h.path, xpt_periph,
1105 					    inccb->ccb_h.path_id,
1106 					    inccb->ccb_h.target_id,
1107 					    inccb->ccb_h.target_lun) !=
1108 					    CAM_REQ_CMP){
1109 				error = EINVAL;
1110 				xpt_free_ccb(ccb);
1111 				break;
1112 			}
1113 			/* Ensure all of our fields are correct */
1114 			xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path,
1115 				      inccb->ccb_h.pinfo.priority);
1116 			xpt_merge_ccb(ccb, inccb);
1117 			ccb->ccb_h.cbfcnp = xptdone;
1118 			cam_periph_runccb(ccb, NULL, 0, 0, NULL);
1119 			bcopy(ccb, inccb, sizeof(union ccb));
1120 			xpt_free_path(ccb->ccb_h.path);
1121 			xpt_free_ccb(ccb);
1122 			break;
1123 
1124 		case XPT_DEBUG: {
1125 			union ccb ccb;
1126 
1127 			/*
1128 			 * This is an immediate CCB, so it's okay to
1129 			 * allocate it on the stack.
1130 			 */
1131 
1132 			/*
1133 			 * Create a path using the bus, target, and lun the
1134 			 * user passed in.
1135 			 */
1136 			if (xpt_create_path(&ccb.ccb_h.path, xpt_periph,
1137 					    inccb->ccb_h.path_id,
1138 					    inccb->ccb_h.target_id,
1139 					    inccb->ccb_h.target_lun) !=
1140 					    CAM_REQ_CMP){
1141 				error = EINVAL;
1142 				break;
1143 			}
1144 			/* Ensure all of our fields are correct */
1145 			xpt_setup_ccb(&ccb.ccb_h, ccb.ccb_h.path,
1146 				      inccb->ccb_h.pinfo.priority);
1147 			xpt_merge_ccb(&ccb, inccb);
1148 			ccb.ccb_h.cbfcnp = xptdone;
1149 			xpt_action(&ccb);
1150 			bcopy(&ccb, inccb, sizeof(union ccb));
1151 			xpt_free_path(ccb.ccb_h.path);
1152 			break;
1153 
1154 		}
1155 		case XPT_DEV_MATCH: {
1156 			struct cam_periph_map_info mapinfo;
1157 			struct cam_path *old_path;
1158 
1159 			/*
1160 			 * We can't deal with physical addresses for this
1161 			 * type of transaction.
1162 			 */
1163 			if (inccb->ccb_h.flags & CAM_DATA_PHYS) {
1164 				error = EINVAL;
1165 				break;
1166 			}
1167 
1168 			/*
1169 			 * Save this in case the caller had it set to
1170 			 * something in particular.
1171 			 */
1172 			old_path = inccb->ccb_h.path;
1173 
1174 			/*
1175 			 * We really don't need a path for the matching
1176 			 * code.  The path is needed because of the
1177 			 * debugging statements in xpt_action().  They
1178 			 * assume that the CCB has a valid path.
1179 			 */
1180 			inccb->ccb_h.path = xpt_periph->path;
1181 
1182 			bzero(&mapinfo, sizeof(mapinfo));
1183 
1184 			/*
1185 			 * Map the pattern and match buffers into kernel
1186 			 * virtual address space.
1187 			 */
1188 			error = cam_periph_mapmem(inccb, &mapinfo);
1189 
1190 			if (error) {
1191 				inccb->ccb_h.path = old_path;
1192 				break;
1193 			}
1194 
1195 			/*
1196 			 * This is an immediate CCB, we can send it on directly.
1197 			 */
1198 			xpt_action(inccb);
1199 
1200 			/*
1201 			 * Map the buffers back into user space.
1202 			 */
1203 			cam_periph_unmapmem(inccb, &mapinfo);
1204 
1205 			inccb->ccb_h.path = old_path;
1206 
1207 			error = 0;
1208 			break;
1209 		}
1210 		default:
1211 			error = ENOTSUP;
1212 			break;
1213 		}
1214 		break;
1215 	}
1216 	/*
1217 	 * This is the getpassthru ioctl. It takes a XPT_GDEVLIST ccb as input,
1218 	 * with the periphal driver name and unit name filled in.  The other
1219 	 * fields don't really matter as input.  The passthrough driver name
1220 	 * ("pass"), and unit number are passed back in the ccb.  The current
1221 	 * device generation number, and the index into the device peripheral
1222 	 * driver list, and the status are also passed back.  Note that
1223 	 * since we do everything in one pass, unlike the XPT_GDEVLIST ccb,
1224 	 * we never return a status of CAM_GDEVLIST_LIST_CHANGED.  It is
1225 	 * (or rather should be) impossible for the device peripheral driver
1226 	 * list to change since we look at the whole thing in one pass, and
1227 	 * we do it with splcam protection.
1228 	 *
1229 	 */
1230 	case CAMGETPASSTHRU: {
1231 		union ccb *ccb;
1232 		struct cam_periph *periph;
1233 		struct periph_driver **p_drv;
1234 		char   *name;
1235 		u_int unit;
1236 		u_int cur_generation;
1237 		int base_periph_found;
1238 		int splbreaknum;
1239 		int s;
1240 
1241 		ccb = (union ccb *)addr;
1242 		unit = ccb->cgdl.unit_number;
1243 		name = ccb->cgdl.periph_name;
1244 		/*
1245 		 * Every 100 devices, we want to drop our spl protection to
1246 		 * give the software interrupt handler a chance to run.
1247 		 * Most systems won't run into this check, but this should
1248 		 * avoid starvation in the software interrupt handler in
1249 		 * large systems.
1250 		 */
1251 		splbreaknum = 100;
1252 
1253 		ccb = (union ccb *)addr;
1254 
1255 		base_periph_found = 0;
1256 
1257 		/*
1258 		 * Sanity check -- make sure we don't get a null peripheral
1259 		 * driver name.
1260 		 */
1261 		if (*ccb->cgdl.periph_name == '\0') {
1262 			error = EINVAL;
1263 			break;
1264 		}
1265 
1266 		/* Keep the list from changing while we traverse it */
1267 		s = splcam();
1268 ptstartover:
1269 		cur_generation = xsoftc.generation;
1270 
1271 		/* first find our driver in the list of drivers */
1272 		for (p_drv = periph_drivers; *p_drv != NULL; p_drv++)
1273 			if (strcmp((*p_drv)->driver_name, name) == 0)
1274 				break;
1275 
1276 		if (*p_drv == NULL) {
1277 			splx(s);
1278 			ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1279 			ccb->cgdl.status = CAM_GDEVLIST_ERROR;
1280 			*ccb->cgdl.periph_name = '\0';
1281 			ccb->cgdl.unit_number = 0;
1282 			error = ENOENT;
1283 			break;
1284 		}
1285 
1286 		/*
1287 		 * Run through every peripheral instance of this driver
1288 		 * and check to see whether it matches the unit passed
1289 		 * in by the user.  If it does, get out of the loops and
1290 		 * find the passthrough driver associated with that
1291 		 * peripheral driver.
1292 		 */
1293 		for (periph = TAILQ_FIRST(&(*p_drv)->units); periph != NULL;
1294 		     periph = TAILQ_NEXT(periph, unit_links)) {
1295 
1296 			if (periph->unit_number == unit) {
1297 				break;
1298 			} else if (--splbreaknum == 0) {
1299 				splx(s);
1300 				s = splcam();
1301 				splbreaknum = 100;
1302 				if (cur_generation != xsoftc.generation)
1303 				       goto ptstartover;
1304 			}
1305 		}
1306 		/*
1307 		 * If we found the peripheral driver that the user passed
1308 		 * in, go through all of the peripheral drivers for that
1309 		 * particular device and look for a passthrough driver.
1310 		 */
1311 		if (periph != NULL) {
1312 			struct cam_ed *device;
1313 			int i;
1314 
1315 			base_periph_found = 1;
1316 			device = periph->path->device;
1317 			for (i = 0, periph = SLIST_FIRST(&device->periphs);
1318 			     periph != NULL;
1319 			     periph = SLIST_NEXT(periph, periph_links), i++) {
1320 				/*
1321 				 * Check to see whether we have a
1322 				 * passthrough device or not.
1323 				 */
1324 				if (strcmp(periph->periph_name, "pass") == 0) {
1325 					/*
1326 					 * Fill in the getdevlist fields.
1327 					 */
1328 					strcpy(ccb->cgdl.periph_name,
1329 					       periph->periph_name);
1330 					ccb->cgdl.unit_number =
1331 						periph->unit_number;
1332 					if (SLIST_NEXT(periph, periph_links))
1333 						ccb->cgdl.status =
1334 							CAM_GDEVLIST_MORE_DEVS;
1335 					else
1336 						ccb->cgdl.status =
1337 						       CAM_GDEVLIST_LAST_DEVICE;
1338 					ccb->cgdl.generation =
1339 						device->generation;
1340 					ccb->cgdl.index = i;
1341 					/*
1342 					 * Fill in some CCB header fields
1343 					 * that the user may want.
1344 					 */
1345 					ccb->ccb_h.path_id =
1346 						periph->path->bus->path_id;
1347 					ccb->ccb_h.target_id =
1348 						periph->path->target->target_id;
1349 					ccb->ccb_h.target_lun =
1350 						periph->path->device->lun_id;
1351 					ccb->ccb_h.status = CAM_REQ_CMP;
1352 					break;
1353 				}
1354 			}
1355 		}
1356 
1357 		/*
1358 		 * If the periph is null here, one of two things has
1359 		 * happened.  The first possibility is that we couldn't
1360 		 * find the unit number of the particular peripheral driver
1361 		 * that the user is asking about.  e.g. the user asks for
1362 		 * the passthrough driver for "da11".  We find the list of
1363 		 * "da" peripherals all right, but there is no unit 11.
1364 		 * The other possibility is that we went through the list
1365 		 * of peripheral drivers attached to the device structure,
1366 		 * but didn't find one with the name "pass".  Either way,
1367 		 * we return ENOENT, since we couldn't find something.
1368 		 */
1369 		if (periph == NULL) {
1370 			ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1371 			ccb->cgdl.status = CAM_GDEVLIST_ERROR;
1372 			*ccb->cgdl.periph_name = '\0';
1373 			ccb->cgdl.unit_number = 0;
1374 			error = ENOENT;
1375 			/*
1376 			 * It is unfortunate that this is even necessary,
1377 			 * but there are many, many clueless users out there.
1378 			 * If this is true, the user is looking for the
1379 			 * passthrough driver, but doesn't have one in his
1380 			 * kernel.
1381 			 */
1382 			if (base_periph_found == 1) {
1383 				printf("xptioctl: pass driver is not in the "
1384 				       "kernel\n");
1385 				printf("xptioctl: put \"device pass0\" in "
1386 				       "your kernel config file\n");
1387 			}
1388 		}
1389 		splx(s);
1390 		break;
1391 		}
1392 	default:
1393 		error = ENOTTY;
1394 		break;
1395 	}
1396 
1397 	return(error);
1398 }
1399 
1400 static int
1401 cam_module_event_handler(module_t mod, int what, void *arg)
1402 {
1403 	if (what == MOD_LOAD) {
1404 		xpt_init(NULL);
1405 	} else if (what == MOD_UNLOAD) {
1406 		return EBUSY;
1407 	} else {
1408 		return EOPNOTSUPP;
1409 	}
1410 
1411 	return 0;
1412 }
1413 
1414 /* Functions accessed by the peripheral drivers */
1415 static void
1416 xpt_init(dummy)
1417 	void *dummy;
1418 {
1419 	struct cam_sim *xpt_sim;
1420 	struct cam_path *path;
1421 	struct cam_devq *devq;
1422 	cam_status status;
1423 
1424 	TAILQ_INIT(&xpt_busses);
1425 	TAILQ_INIT(&cam_bioq);
1426 	SLIST_INIT(&ccb_freeq);
1427 	STAILQ_INIT(&highpowerq);
1428 
1429 	mtx_init(&cam_bioq_lock, "CAM BIOQ lock", NULL, MTX_DEF);
1430 
1431 	/*
1432 	 * The xpt layer is, itself, the equivelent of a SIM.
1433 	 * Allow 16 ccbs in the ccb pool for it.  This should
1434 	 * give decent parallelism when we probe busses and
1435 	 * perform other XPT functions.
1436 	 */
1437 	devq = cam_simq_alloc(16);
1438 	xpt_sim = cam_sim_alloc(xptaction,
1439 				xptpoll,
1440 				"xpt",
1441 				/*softc*/NULL,
1442 				/*unit*/0,
1443 				/*max_dev_transactions*/0,
1444 				/*max_tagged_dev_transactions*/0,
1445 				devq);
1446 	xpt_max_ccbs = 16;
1447 
1448 	xpt_bus_register(xpt_sim, /*bus #*/0);
1449 
1450 	/*
1451 	 * Looking at the XPT from the SIM layer, the XPT is
1452 	 * the equivelent of a peripheral driver.  Allocate
1453 	 * a peripheral driver entry for us.
1454 	 */
1455 	if ((status = xpt_create_path(&path, NULL, CAM_XPT_PATH_ID,
1456 				      CAM_TARGET_WILDCARD,
1457 				      CAM_LUN_WILDCARD)) != CAM_REQ_CMP) {
1458 		printf("xpt_init: xpt_create_path failed with status %#x,"
1459 		       " failing attach\n", status);
1460 		return;
1461 	}
1462 
1463 	cam_periph_alloc(xptregister, NULL, NULL, NULL, "xpt", CAM_PERIPH_BIO,
1464 			 path, NULL, 0, NULL);
1465 	xpt_free_path(path);
1466 
1467 	xpt_sim->softc = xpt_periph;
1468 
1469 	/*
1470 	 * Register a callback for when interrupts are enabled.
1471 	 */
1472 	xpt_config_hook =
1473 	    (struct intr_config_hook *)malloc(sizeof(struct intr_config_hook),
1474 					      M_TEMP, M_NOWAIT | M_ZERO);
1475 	if (xpt_config_hook == NULL) {
1476 		printf("xpt_init: Cannot malloc config hook "
1477 		       "- failing attach\n");
1478 		return;
1479 	}
1480 
1481 	xpt_config_hook->ich_func = xpt_config;
1482 	if (config_intrhook_establish(xpt_config_hook) != 0) {
1483 		free (xpt_config_hook, M_TEMP);
1484 		printf("xpt_init: config_intrhook_establish failed "
1485 		       "- failing attach\n");
1486 	}
1487 
1488 	/* Install our software interrupt handlers */
1489 	swi_add(NULL, "cambio", camisr, &cam_bioq, SWI_CAMBIO, 0, &cambio_ih);
1490 }
1491 
1492 static cam_status
1493 xptregister(struct cam_periph *periph, void *arg)
1494 {
1495 	if (periph == NULL) {
1496 		printf("xptregister: periph was NULL!!\n");
1497 		return(CAM_REQ_CMP_ERR);
1498 	}
1499 
1500 	periph->softc = NULL;
1501 
1502 	xpt_periph = periph;
1503 
1504 	return(CAM_REQ_CMP);
1505 }
1506 
1507 int32_t
1508 xpt_add_periph(struct cam_periph *periph)
1509 {
1510 	struct cam_ed *device;
1511 	int32_t	 status;
1512 	struct periph_list *periph_head;
1513 
1514 	GIANT_REQUIRED;
1515 
1516 	device = periph->path->device;
1517 
1518 	periph_head = &device->periphs;
1519 
1520 	status = CAM_REQ_CMP;
1521 
1522 	if (device != NULL) {
1523 		int s;
1524 
1525 		/*
1526 		 * Make room for this peripheral
1527 		 * so it will fit in the queue
1528 		 * when it's scheduled to run
1529 		 */
1530 		s = splsoftcam();
1531 		status = camq_resize(&device->drvq,
1532 				     device->drvq.array_size + 1);
1533 
1534 		device->generation++;
1535 
1536 		SLIST_INSERT_HEAD(periph_head, periph, periph_links);
1537 
1538 		splx(s);
1539 	}
1540 
1541 	xsoftc.generation++;
1542 
1543 	return (status);
1544 }
1545 
1546 void
1547 xpt_remove_periph(struct cam_periph *periph)
1548 {
1549 	struct cam_ed *device;
1550 
1551 	GIANT_REQUIRED;
1552 
1553 	device = periph->path->device;
1554 
1555 	if (device != NULL) {
1556 		int s;
1557 		struct periph_list *periph_head;
1558 
1559 		periph_head = &device->periphs;
1560 
1561 		/* Release the slot for this peripheral */
1562 		s = splsoftcam();
1563 		camq_resize(&device->drvq, device->drvq.array_size - 1);
1564 
1565 		device->generation++;
1566 
1567 		SLIST_REMOVE(periph_head, periph, cam_periph, periph_links);
1568 
1569 		splx(s);
1570 	}
1571 
1572 	xsoftc.generation++;
1573 
1574 }
1575 
1576 #ifdef CAM_NEW_TRAN_CODE
1577 
1578 void
1579 xpt_announce_periph(struct cam_periph *periph, char *announce_string)
1580 {
1581 	struct	ccb_pathinq cpi;
1582 	struct	ccb_trans_settings cts;
1583 	struct	cam_path *path;
1584 	u_int	speed;
1585 	u_int	freq;
1586 	u_int	mb;
1587 	int	s;
1588 
1589 	GIANT_REQUIRED;
1590 
1591 	path = periph->path;
1592 	/*
1593 	 * To ensure that this is printed in one piece,
1594 	 * mask out CAM interrupts.
1595 	 */
1596 	s = splsoftcam();
1597 	printf("%s%d at %s%d bus %d target %d lun %d\n",
1598 	       periph->periph_name, periph->unit_number,
1599 	       path->bus->sim->sim_name,
1600 	       path->bus->sim->unit_number,
1601 	       path->bus->sim->bus_id,
1602 	       path->target->target_id,
1603 	       path->device->lun_id);
1604 	printf("%s%d: ", periph->periph_name, periph->unit_number);
1605 	scsi_print_inquiry(&path->device->inq_data);
1606 	if (bootverbose && path->device->serial_num_len > 0) {
1607 		/* Don't wrap the screen  - print only the first 60 chars */
1608 		printf("%s%d: Serial Number %.60s\n", periph->periph_name,
1609 		       periph->unit_number, path->device->serial_num);
1610 	}
1611 	xpt_setup_ccb(&cts.ccb_h, path, /*priority*/1);
1612 	cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS;
1613 	cts.type = CTS_TYPE_CURRENT_SETTINGS;
1614 	xpt_action((union ccb*)&cts);
1615 
1616 	/* Ask the SIM for its base transfer speed */
1617 	xpt_setup_ccb(&cpi.ccb_h, path, /*priority*/1);
1618 	cpi.ccb_h.func_code = XPT_PATH_INQ;
1619 	xpt_action((union ccb *)&cpi);
1620 
1621 	speed = cpi.base_transfer_speed;
1622 	freq = 0;
1623 	if (cts.ccb_h.status == CAM_REQ_CMP && cts.transport == XPORT_SPI) {
1624 		struct	ccb_trans_settings_spi *spi;
1625 
1626 		spi = &cts.xport_specific.spi;
1627 		if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) != 0
1628 		  && spi->sync_offset != 0) {
1629 			freq = scsi_calc_syncsrate(spi->sync_period);
1630 			speed = freq;
1631 		}
1632 
1633 		if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0)
1634 			speed *= (0x01 << spi->bus_width);
1635 	}
1636 
1637 	if (cts.ccb_h.status == CAM_REQ_CMP && cts.transport == XPORT_FC) {
1638 		struct	ccb_trans_settings_fc *fc = &cts.xport_specific.fc;
1639 		if (fc->valid & CTS_FC_VALID_SPEED) {
1640 			speed = fc->bitrate;
1641 		}
1642 	}
1643 
1644 	if (cts.ccb_h.status == CAM_REQ_CMP && cts.transport == XPORT_SAS) {
1645 		struct	ccb_trans_settings_sas *sas = &cts.xport_specific.sas;
1646 		if (sas->valid & CTS_SAS_VALID_SPEED) {
1647 			speed = sas->bitrate;
1648 		}
1649 	}
1650 
1651 	mb = speed / 1000;
1652 	if (mb > 0)
1653 		printf("%s%d: %d.%03dMB/s transfers",
1654 		       periph->periph_name, periph->unit_number,
1655 		       mb, speed % 1000);
1656 	else
1657 		printf("%s%d: %dKB/s transfers", periph->periph_name,
1658 		       periph->unit_number, speed);
1659 	/* Report additional information about SPI connections */
1660 	if (cts.ccb_h.status == CAM_REQ_CMP && cts.transport == XPORT_SPI) {
1661 		struct	ccb_trans_settings_spi *spi;
1662 
1663 		spi = &cts.xport_specific.spi;
1664 		if (freq != 0) {
1665 			printf(" (%d.%03dMHz%s, offset %d", freq / 1000,
1666 			       freq % 1000,
1667 			       (spi->ppr_options & MSG_EXT_PPR_DT_REQ) != 0
1668 			     ? " DT" : "",
1669 			       spi->sync_offset);
1670 		}
1671 		if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0
1672 		 && spi->bus_width > 0) {
1673 			if (freq != 0) {
1674 				printf(", ");
1675 			} else {
1676 				printf(" (");
1677 			}
1678 			printf("%dbit)", 8 * (0x01 << spi->bus_width));
1679 		} else if (freq != 0) {
1680 			printf(")");
1681 		}
1682 	}
1683 	if (cts.ccb_h.status == CAM_REQ_CMP && cts.transport == XPORT_FC) {
1684 		struct	ccb_trans_settings_fc *fc;
1685 
1686 		fc = &cts.xport_specific.fc;
1687 		if (fc->valid & CTS_FC_VALID_WWNN)
1688 			printf(" WWNN 0x%llx", (long long) fc->wwnn);
1689 		if (fc->valid & CTS_FC_VALID_WWPN)
1690 			printf(" WWPN 0x%llx", (long long) fc->wwpn);
1691 		if (fc->valid & CTS_FC_VALID_PORT)
1692 			printf(" PortID 0x%x", fc->port);
1693 	}
1694 
1695 	if (path->device->inq_flags & SID_CmdQue
1696 	 || path->device->flags & CAM_DEV_TAG_AFTER_COUNT) {
1697 		printf("\n%s%d: Tagged Queueing Enabled",
1698 		       periph->periph_name, periph->unit_number);
1699 	}
1700 	printf("\n");
1701 
1702 	/*
1703 	 * We only want to print the caller's announce string if they've
1704 	 * passed one in..
1705 	 */
1706 	if (announce_string != NULL)
1707 		printf("%s%d: %s\n", periph->periph_name,
1708 		       periph->unit_number, announce_string);
1709 	splx(s);
1710 }
1711 #else /* CAM_NEW_TRAN_CODE */
1712 void
1713 xpt_announce_periph(struct cam_periph *periph, char *announce_string)
1714 {
1715 	int s;
1716 	u_int mb;
1717 	struct cam_path *path;
1718 	struct ccb_trans_settings cts;
1719 
1720 	GIANT_REQUIRED;
1721 
1722 	path = periph->path;
1723 	/*
1724 	 * To ensure that this is printed in one piece,
1725 	 * mask out CAM interrupts.
1726 	 */
1727 	s = splsoftcam();
1728 	printf("%s%d at %s%d bus %d target %d lun %d\n",
1729 	       periph->periph_name, periph->unit_number,
1730 	       path->bus->sim->sim_name,
1731 	       path->bus->sim->unit_number,
1732 	       path->bus->sim->bus_id,
1733 	       path->target->target_id,
1734 	       path->device->lun_id);
1735 	printf("%s%d: ", periph->periph_name, periph->unit_number);
1736 	scsi_print_inquiry(&path->device->inq_data);
1737 	if ((bootverbose)
1738 	 && (path->device->serial_num_len > 0)) {
1739 		/* Don't wrap the screen  - print only the first 60 chars */
1740 		printf("%s%d: Serial Number %.60s\n", periph->periph_name,
1741 		       periph->unit_number, path->device->serial_num);
1742 	}
1743 	xpt_setup_ccb(&cts.ccb_h, path, /*priority*/1);
1744 	cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS;
1745 	cts.flags = CCB_TRANS_CURRENT_SETTINGS;
1746 	xpt_action((union ccb*)&cts);
1747 	if (cts.ccb_h.status == CAM_REQ_CMP) {
1748 		u_int speed;
1749 		u_int freq;
1750 
1751 		if ((cts.valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0
1752 		  && cts.sync_offset != 0) {
1753 			freq = scsi_calc_syncsrate(cts.sync_period);
1754 			speed = freq;
1755 		} else {
1756 			struct ccb_pathinq cpi;
1757 
1758 			/* Ask the SIM for its base transfer speed */
1759 			xpt_setup_ccb(&cpi.ccb_h, path, /*priority*/1);
1760 			cpi.ccb_h.func_code = XPT_PATH_INQ;
1761 			xpt_action((union ccb *)&cpi);
1762 
1763 			speed = cpi.base_transfer_speed;
1764 			freq = 0;
1765 		}
1766 		if ((cts.valid & CCB_TRANS_BUS_WIDTH_VALID) != 0)
1767 			speed *= (0x01 << cts.bus_width);
1768 		mb = speed / 1000;
1769 		if (mb > 0)
1770 			printf("%s%d: %d.%03dMB/s transfers",
1771 			       periph->periph_name, periph->unit_number,
1772 			       mb, speed % 1000);
1773 		else
1774 			printf("%s%d: %dKB/s transfers", periph->periph_name,
1775 			       periph->unit_number, speed);
1776 		if ((cts.valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0
1777 		 && cts.sync_offset != 0) {
1778 			printf(" (%d.%03dMHz, offset %d", freq / 1000,
1779 			       freq % 1000, cts.sync_offset);
1780 		}
1781 		if ((cts.valid & CCB_TRANS_BUS_WIDTH_VALID) != 0
1782 		 && cts.bus_width > 0) {
1783 			if ((cts.valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0
1784 			 && cts.sync_offset != 0) {
1785 				printf(", ");
1786 			} else {
1787 				printf(" (");
1788 			}
1789 			printf("%dbit)", 8 * (0x01 << cts.bus_width));
1790 		} else if ((cts.valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0
1791 			&& cts.sync_offset != 0) {
1792 			printf(")");
1793 		}
1794 
1795 		if (path->device->inq_flags & SID_CmdQue
1796 		 || path->device->flags & CAM_DEV_TAG_AFTER_COUNT) {
1797 			printf(", Tagged Queueing Enabled");
1798 		}
1799 
1800 		printf("\n");
1801 	} else if (path->device->inq_flags & SID_CmdQue
1802    		|| path->device->flags & CAM_DEV_TAG_AFTER_COUNT) {
1803 		printf("%s%d: Tagged Queueing Enabled\n",
1804 		       periph->periph_name, periph->unit_number);
1805 	}
1806 
1807 	/*
1808 	 * We only want to print the caller's announce string if they've
1809 	 * passed one in..
1810 	 */
1811 	if (announce_string != NULL)
1812 		printf("%s%d: %s\n", periph->periph_name,
1813 		       periph->unit_number, announce_string);
1814 	splx(s);
1815 }
1816 
1817 #endif /* CAM_NEW_TRAN_CODE */
1818 
1819 static dev_match_ret
1820 xptbusmatch(struct dev_match_pattern *patterns, u_int num_patterns,
1821 	    struct cam_eb *bus)
1822 {
1823 	dev_match_ret retval;
1824 	int i;
1825 
1826 	retval = DM_RET_NONE;
1827 
1828 	/*
1829 	 * If we aren't given something to match against, that's an error.
1830 	 */
1831 	if (bus == NULL)
1832 		return(DM_RET_ERROR);
1833 
1834 	/*
1835 	 * If there are no match entries, then this bus matches no
1836 	 * matter what.
1837 	 */
1838 	if ((patterns == NULL) || (num_patterns == 0))
1839 		return(DM_RET_DESCEND | DM_RET_COPY);
1840 
1841 	for (i = 0; i < num_patterns; i++) {
1842 		struct bus_match_pattern *cur_pattern;
1843 
1844 		/*
1845 		 * If the pattern in question isn't for a bus node, we
1846 		 * aren't interested.  However, we do indicate to the
1847 		 * calling routine that we should continue descending the
1848 		 * tree, since the user wants to match against lower-level
1849 		 * EDT elements.
1850 		 */
1851 		if (patterns[i].type != DEV_MATCH_BUS) {
1852 			if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
1853 				retval |= DM_RET_DESCEND;
1854 			continue;
1855 		}
1856 
1857 		cur_pattern = &patterns[i].pattern.bus_pattern;
1858 
1859 		/*
1860 		 * If they want to match any bus node, we give them any
1861 		 * device node.
1862 		 */
1863 		if (cur_pattern->flags == BUS_MATCH_ANY) {
1864 			/* set the copy flag */
1865 			retval |= DM_RET_COPY;
1866 
1867 			/*
1868 			 * If we've already decided on an action, go ahead
1869 			 * and return.
1870 			 */
1871 			if ((retval & DM_RET_ACTION_MASK) != DM_RET_NONE)
1872 				return(retval);
1873 		}
1874 
1875 		/*
1876 		 * Not sure why someone would do this...
1877 		 */
1878 		if (cur_pattern->flags == BUS_MATCH_NONE)
1879 			continue;
1880 
1881 		if (((cur_pattern->flags & BUS_MATCH_PATH) != 0)
1882 		 && (cur_pattern->path_id != bus->path_id))
1883 			continue;
1884 
1885 		if (((cur_pattern->flags & BUS_MATCH_BUS_ID) != 0)
1886 		 && (cur_pattern->bus_id != bus->sim->bus_id))
1887 			continue;
1888 
1889 		if (((cur_pattern->flags & BUS_MATCH_UNIT) != 0)
1890 		 && (cur_pattern->unit_number != bus->sim->unit_number))
1891 			continue;
1892 
1893 		if (((cur_pattern->flags & BUS_MATCH_NAME) != 0)
1894 		 && (strncmp(cur_pattern->dev_name, bus->sim->sim_name,
1895 			     DEV_IDLEN) != 0))
1896 			continue;
1897 
1898 		/*
1899 		 * If we get to this point, the user definitely wants
1900 		 * information on this bus.  So tell the caller to copy the
1901 		 * data out.
1902 		 */
1903 		retval |= DM_RET_COPY;
1904 
1905 		/*
1906 		 * If the return action has been set to descend, then we
1907 		 * know that we've already seen a non-bus matching
1908 		 * expression, therefore we need to further descend the tree.
1909 		 * This won't change by continuing around the loop, so we
1910 		 * go ahead and return.  If we haven't seen a non-bus
1911 		 * matching expression, we keep going around the loop until
1912 		 * we exhaust the matching expressions.  We'll set the stop
1913 		 * flag once we fall out of the loop.
1914 		 */
1915 		if ((retval & DM_RET_ACTION_MASK) == DM_RET_DESCEND)
1916 			return(retval);
1917 	}
1918 
1919 	/*
1920 	 * If the return action hasn't been set to descend yet, that means
1921 	 * we haven't seen anything other than bus matching patterns.  So
1922 	 * tell the caller to stop descending the tree -- the user doesn't
1923 	 * want to match against lower level tree elements.
1924 	 */
1925 	if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
1926 		retval |= DM_RET_STOP;
1927 
1928 	return(retval);
1929 }
1930 
1931 static dev_match_ret
1932 xptdevicematch(struct dev_match_pattern *patterns, u_int num_patterns,
1933 	       struct cam_ed *device)
1934 {
1935 	dev_match_ret retval;
1936 	int i;
1937 
1938 	retval = DM_RET_NONE;
1939 
1940 	/*
1941 	 * If we aren't given something to match against, that's an error.
1942 	 */
1943 	if (device == NULL)
1944 		return(DM_RET_ERROR);
1945 
1946 	/*
1947 	 * If there are no match entries, then this device matches no
1948 	 * matter what.
1949 	 */
1950 	if ((patterns == NULL) || (num_patterns == 0))
1951 		return(DM_RET_DESCEND | DM_RET_COPY);
1952 
1953 	for (i = 0; i < num_patterns; i++) {
1954 		struct device_match_pattern *cur_pattern;
1955 
1956 		/*
1957 		 * If the pattern in question isn't for a device node, we
1958 		 * aren't interested.
1959 		 */
1960 		if (patterns[i].type != DEV_MATCH_DEVICE) {
1961 			if ((patterns[i].type == DEV_MATCH_PERIPH)
1962 			 && ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE))
1963 				retval |= DM_RET_DESCEND;
1964 			continue;
1965 		}
1966 
1967 		cur_pattern = &patterns[i].pattern.device_pattern;
1968 
1969 		/*
1970 		 * If they want to match any device node, we give them any
1971 		 * device node.
1972 		 */
1973 		if (cur_pattern->flags == DEV_MATCH_ANY) {
1974 			/* set the copy flag */
1975 			retval |= DM_RET_COPY;
1976 
1977 
1978 			/*
1979 			 * If we've already decided on an action, go ahead
1980 			 * and return.
1981 			 */
1982 			if ((retval & DM_RET_ACTION_MASK) != DM_RET_NONE)
1983 				return(retval);
1984 		}
1985 
1986 		/*
1987 		 * Not sure why someone would do this...
1988 		 */
1989 		if (cur_pattern->flags == DEV_MATCH_NONE)
1990 			continue;
1991 
1992 		if (((cur_pattern->flags & DEV_MATCH_PATH) != 0)
1993 		 && (cur_pattern->path_id != device->target->bus->path_id))
1994 			continue;
1995 
1996 		if (((cur_pattern->flags & DEV_MATCH_TARGET) != 0)
1997 		 && (cur_pattern->target_id != device->target->target_id))
1998 			continue;
1999 
2000 		if (((cur_pattern->flags & DEV_MATCH_LUN) != 0)
2001 		 && (cur_pattern->target_lun != device->lun_id))
2002 			continue;
2003 
2004 		if (((cur_pattern->flags & DEV_MATCH_INQUIRY) != 0)
2005 		 && (cam_quirkmatch((caddr_t)&device->inq_data,
2006 				    (caddr_t)&cur_pattern->inq_pat,
2007 				    1, sizeof(cur_pattern->inq_pat),
2008 				    scsi_static_inquiry_match) == NULL))
2009 			continue;
2010 
2011 		/*
2012 		 * If we get to this point, the user definitely wants
2013 		 * information on this device.  So tell the caller to copy
2014 		 * the data out.
2015 		 */
2016 		retval |= DM_RET_COPY;
2017 
2018 		/*
2019 		 * If the return action has been set to descend, then we
2020 		 * know that we've already seen a peripheral matching
2021 		 * expression, therefore we need to further descend the tree.
2022 		 * This won't change by continuing around the loop, so we
2023 		 * go ahead and return.  If we haven't seen a peripheral
2024 		 * matching expression, we keep going around the loop until
2025 		 * we exhaust the matching expressions.  We'll set the stop
2026 		 * flag once we fall out of the loop.
2027 		 */
2028 		if ((retval & DM_RET_ACTION_MASK) == DM_RET_DESCEND)
2029 			return(retval);
2030 	}
2031 
2032 	/*
2033 	 * If the return action hasn't been set to descend yet, that means
2034 	 * we haven't seen any peripheral matching patterns.  So tell the
2035 	 * caller to stop descending the tree -- the user doesn't want to
2036 	 * match against lower level tree elements.
2037 	 */
2038 	if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
2039 		retval |= DM_RET_STOP;
2040 
2041 	return(retval);
2042 }
2043 
2044 /*
2045  * Match a single peripheral against any number of match patterns.
2046  */
2047 static dev_match_ret
2048 xptperiphmatch(struct dev_match_pattern *patterns, u_int num_patterns,
2049 	       struct cam_periph *periph)
2050 {
2051 	dev_match_ret retval;
2052 	int i;
2053 
2054 	/*
2055 	 * If we aren't given something to match against, that's an error.
2056 	 */
2057 	if (periph == NULL)
2058 		return(DM_RET_ERROR);
2059 
2060 	/*
2061 	 * If there are no match entries, then this peripheral matches no
2062 	 * matter what.
2063 	 */
2064 	if ((patterns == NULL) || (num_patterns == 0))
2065 		return(DM_RET_STOP | DM_RET_COPY);
2066 
2067 	/*
2068 	 * There aren't any nodes below a peripheral node, so there's no
2069 	 * reason to descend the tree any further.
2070 	 */
2071 	retval = DM_RET_STOP;
2072 
2073 	for (i = 0; i < num_patterns; i++) {
2074 		struct periph_match_pattern *cur_pattern;
2075 
2076 		/*
2077 		 * If the pattern in question isn't for a peripheral, we
2078 		 * aren't interested.
2079 		 */
2080 		if (patterns[i].type != DEV_MATCH_PERIPH)
2081 			continue;
2082 
2083 		cur_pattern = &patterns[i].pattern.periph_pattern;
2084 
2085 		/*
2086 		 * If they want to match on anything, then we will do so.
2087 		 */
2088 		if (cur_pattern->flags == PERIPH_MATCH_ANY) {
2089 			/* set the copy flag */
2090 			retval |= DM_RET_COPY;
2091 
2092 			/*
2093 			 * We've already set the return action to stop,
2094 			 * since there are no nodes below peripherals in
2095 			 * the tree.
2096 			 */
2097 			return(retval);
2098 		}
2099 
2100 		/*
2101 		 * Not sure why someone would do this...
2102 		 */
2103 		if (cur_pattern->flags == PERIPH_MATCH_NONE)
2104 			continue;
2105 
2106 		if (((cur_pattern->flags & PERIPH_MATCH_PATH) != 0)
2107 		 && (cur_pattern->path_id != periph->path->bus->path_id))
2108 			continue;
2109 
2110 		/*
2111 		 * For the target and lun id's, we have to make sure the
2112 		 * target and lun pointers aren't NULL.  The xpt peripheral
2113 		 * has a wildcard target and device.
2114 		 */
2115 		if (((cur_pattern->flags & PERIPH_MATCH_TARGET) != 0)
2116 		 && ((periph->path->target == NULL)
2117 		 ||(cur_pattern->target_id != periph->path->target->target_id)))
2118 			continue;
2119 
2120 		if (((cur_pattern->flags & PERIPH_MATCH_LUN) != 0)
2121 		 && ((periph->path->device == NULL)
2122 		 || (cur_pattern->target_lun != periph->path->device->lun_id)))
2123 			continue;
2124 
2125 		if (((cur_pattern->flags & PERIPH_MATCH_UNIT) != 0)
2126 		 && (cur_pattern->unit_number != periph->unit_number))
2127 			continue;
2128 
2129 		if (((cur_pattern->flags & PERIPH_MATCH_NAME) != 0)
2130 		 && (strncmp(cur_pattern->periph_name, periph->periph_name,
2131 			     DEV_IDLEN) != 0))
2132 			continue;
2133 
2134 		/*
2135 		 * If we get to this point, the user definitely wants
2136 		 * information on this peripheral.  So tell the caller to
2137 		 * copy the data out.
2138 		 */
2139 		retval |= DM_RET_COPY;
2140 
2141 		/*
2142 		 * The return action has already been set to stop, since
2143 		 * peripherals don't have any nodes below them in the EDT.
2144 		 */
2145 		return(retval);
2146 	}
2147 
2148 	/*
2149 	 * If we get to this point, the peripheral that was passed in
2150 	 * doesn't match any of the patterns.
2151 	 */
2152 	return(retval);
2153 }
2154 
2155 static int
2156 xptedtbusfunc(struct cam_eb *bus, void *arg)
2157 {
2158 	struct ccb_dev_match *cdm;
2159 	dev_match_ret retval;
2160 
2161 	cdm = (struct ccb_dev_match *)arg;
2162 
2163 	/*
2164 	 * If our position is for something deeper in the tree, that means
2165 	 * that we've already seen this node.  So, we keep going down.
2166 	 */
2167 	if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2168 	 && (cdm->pos.cookie.bus == bus)
2169 	 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
2170 	 && (cdm->pos.cookie.target != NULL))
2171 		retval = DM_RET_DESCEND;
2172 	else
2173 		retval = xptbusmatch(cdm->patterns, cdm->num_patterns, bus);
2174 
2175 	/*
2176 	 * If we got an error, bail out of the search.
2177 	 */
2178 	if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
2179 		cdm->status = CAM_DEV_MATCH_ERROR;
2180 		return(0);
2181 	}
2182 
2183 	/*
2184 	 * If the copy flag is set, copy this bus out.
2185 	 */
2186 	if (retval & DM_RET_COPY) {
2187 		int spaceleft, j;
2188 
2189 		spaceleft = cdm->match_buf_len - (cdm->num_matches *
2190 			sizeof(struct dev_match_result));
2191 
2192 		/*
2193 		 * If we don't have enough space to put in another
2194 		 * match result, save our position and tell the
2195 		 * user there are more devices to check.
2196 		 */
2197 		if (spaceleft < sizeof(struct dev_match_result)) {
2198 			bzero(&cdm->pos, sizeof(cdm->pos));
2199 			cdm->pos.position_type =
2200 				CAM_DEV_POS_EDT | CAM_DEV_POS_BUS;
2201 
2202 			cdm->pos.cookie.bus = bus;
2203 			cdm->pos.generations[CAM_BUS_GENERATION]=
2204 				bus_generation;
2205 			cdm->status = CAM_DEV_MATCH_MORE;
2206 			return(0);
2207 		}
2208 		j = cdm->num_matches;
2209 		cdm->num_matches++;
2210 		cdm->matches[j].type = DEV_MATCH_BUS;
2211 		cdm->matches[j].result.bus_result.path_id = bus->path_id;
2212 		cdm->matches[j].result.bus_result.bus_id = bus->sim->bus_id;
2213 		cdm->matches[j].result.bus_result.unit_number =
2214 			bus->sim->unit_number;
2215 		strncpy(cdm->matches[j].result.bus_result.dev_name,
2216 			bus->sim->sim_name, DEV_IDLEN);
2217 	}
2218 
2219 	/*
2220 	 * If the user is only interested in busses, there's no
2221 	 * reason to descend to the next level in the tree.
2222 	 */
2223 	if ((retval & DM_RET_ACTION_MASK) == DM_RET_STOP)
2224 		return(1);
2225 
2226 	/*
2227 	 * If there is a target generation recorded, check it to
2228 	 * make sure the target list hasn't changed.
2229 	 */
2230 	if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2231 	 && (bus == cdm->pos.cookie.bus)
2232 	 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
2233 	 && (cdm->pos.generations[CAM_TARGET_GENERATION] != 0)
2234 	 && (cdm->pos.generations[CAM_TARGET_GENERATION] !=
2235 	     bus->generation)) {
2236 		cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
2237 		return(0);
2238 	}
2239 
2240 	if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2241 	 && (cdm->pos.cookie.bus == bus)
2242 	 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
2243 	 && (cdm->pos.cookie.target != NULL))
2244 		return(xpttargettraverse(bus,
2245 					(struct cam_et *)cdm->pos.cookie.target,
2246 					 xptedttargetfunc, arg));
2247 	else
2248 		return(xpttargettraverse(bus, NULL, xptedttargetfunc, arg));
2249 }
2250 
2251 static int
2252 xptedttargetfunc(struct cam_et *target, void *arg)
2253 {
2254 	struct ccb_dev_match *cdm;
2255 
2256 	cdm = (struct ccb_dev_match *)arg;
2257 
2258 	/*
2259 	 * If there is a device list generation recorded, check it to
2260 	 * make sure the device list hasn't changed.
2261 	 */
2262 	if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2263 	 && (cdm->pos.cookie.bus == target->bus)
2264 	 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
2265 	 && (cdm->pos.cookie.target == target)
2266 	 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
2267 	 && (cdm->pos.generations[CAM_DEV_GENERATION] != 0)
2268 	 && (cdm->pos.generations[CAM_DEV_GENERATION] !=
2269 	     target->generation)) {
2270 		cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
2271 		return(0);
2272 	}
2273 
2274 	if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2275 	 && (cdm->pos.cookie.bus == target->bus)
2276 	 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
2277 	 && (cdm->pos.cookie.target == target)
2278 	 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
2279 	 && (cdm->pos.cookie.device != NULL))
2280 		return(xptdevicetraverse(target,
2281 					(struct cam_ed *)cdm->pos.cookie.device,
2282 					 xptedtdevicefunc, arg));
2283 	else
2284 		return(xptdevicetraverse(target, NULL, xptedtdevicefunc, arg));
2285 }
2286 
2287 static int
2288 xptedtdevicefunc(struct cam_ed *device, void *arg)
2289 {
2290 
2291 	struct ccb_dev_match *cdm;
2292 	dev_match_ret retval;
2293 
2294 	cdm = (struct ccb_dev_match *)arg;
2295 
2296 	/*
2297 	 * If our position is for something deeper in the tree, that means
2298 	 * that we've already seen this node.  So, we keep going down.
2299 	 */
2300 	if ((cdm->pos.position_type & CAM_DEV_POS_DEVICE)
2301 	 && (cdm->pos.cookie.device == device)
2302 	 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
2303 	 && (cdm->pos.cookie.periph != NULL))
2304 		retval = DM_RET_DESCEND;
2305 	else
2306 		retval = xptdevicematch(cdm->patterns, cdm->num_patterns,
2307 					device);
2308 
2309 	if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
2310 		cdm->status = CAM_DEV_MATCH_ERROR;
2311 		return(0);
2312 	}
2313 
2314 	/*
2315 	 * If the copy flag is set, copy this device out.
2316 	 */
2317 	if (retval & DM_RET_COPY) {
2318 		int spaceleft, j;
2319 
2320 		spaceleft = cdm->match_buf_len - (cdm->num_matches *
2321 			sizeof(struct dev_match_result));
2322 
2323 		/*
2324 		 * If we don't have enough space to put in another
2325 		 * match result, save our position and tell the
2326 		 * user there are more devices to check.
2327 		 */
2328 		if (spaceleft < sizeof(struct dev_match_result)) {
2329 			bzero(&cdm->pos, sizeof(cdm->pos));
2330 			cdm->pos.position_type =
2331 				CAM_DEV_POS_EDT | CAM_DEV_POS_BUS |
2332 				CAM_DEV_POS_TARGET | CAM_DEV_POS_DEVICE;
2333 
2334 			cdm->pos.cookie.bus = device->target->bus;
2335 			cdm->pos.generations[CAM_BUS_GENERATION]=
2336 				bus_generation;
2337 			cdm->pos.cookie.target = device->target;
2338 			cdm->pos.generations[CAM_TARGET_GENERATION] =
2339 				device->target->bus->generation;
2340 			cdm->pos.cookie.device = device;
2341 			cdm->pos.generations[CAM_DEV_GENERATION] =
2342 				device->target->generation;
2343 			cdm->status = CAM_DEV_MATCH_MORE;
2344 			return(0);
2345 		}
2346 		j = cdm->num_matches;
2347 		cdm->num_matches++;
2348 		cdm->matches[j].type = DEV_MATCH_DEVICE;
2349 		cdm->matches[j].result.device_result.path_id =
2350 			device->target->bus->path_id;
2351 		cdm->matches[j].result.device_result.target_id =
2352 			device->target->target_id;
2353 		cdm->matches[j].result.device_result.target_lun =
2354 			device->lun_id;
2355 		bcopy(&device->inq_data,
2356 		      &cdm->matches[j].result.device_result.inq_data,
2357 		      sizeof(struct scsi_inquiry_data));
2358 
2359 		/* Let the user know whether this device is unconfigured */
2360 		if (device->flags & CAM_DEV_UNCONFIGURED)
2361 			cdm->matches[j].result.device_result.flags =
2362 				DEV_RESULT_UNCONFIGURED;
2363 		else
2364 			cdm->matches[j].result.device_result.flags =
2365 				DEV_RESULT_NOFLAG;
2366 	}
2367 
2368 	/*
2369 	 * If the user isn't interested in peripherals, don't descend
2370 	 * the tree any further.
2371 	 */
2372 	if ((retval & DM_RET_ACTION_MASK) == DM_RET_STOP)
2373 		return(1);
2374 
2375 	/*
2376 	 * If there is a peripheral list generation recorded, make sure
2377 	 * it hasn't changed.
2378 	 */
2379 	if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2380 	 && (device->target->bus == cdm->pos.cookie.bus)
2381 	 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
2382 	 && (device->target == cdm->pos.cookie.target)
2383 	 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
2384 	 && (device == cdm->pos.cookie.device)
2385 	 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
2386 	 && (cdm->pos.generations[CAM_PERIPH_GENERATION] != 0)
2387 	 && (cdm->pos.generations[CAM_PERIPH_GENERATION] !=
2388 	     device->generation)){
2389 		cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
2390 		return(0);
2391 	}
2392 
2393 	if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2394 	 && (cdm->pos.cookie.bus == device->target->bus)
2395 	 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
2396 	 && (cdm->pos.cookie.target == device->target)
2397 	 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
2398 	 && (cdm->pos.cookie.device == device)
2399 	 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
2400 	 && (cdm->pos.cookie.periph != NULL))
2401 		return(xptperiphtraverse(device,
2402 				(struct cam_periph *)cdm->pos.cookie.periph,
2403 				xptedtperiphfunc, arg));
2404 	else
2405 		return(xptperiphtraverse(device, NULL, xptedtperiphfunc, arg));
2406 }
2407 
2408 static int
2409 xptedtperiphfunc(struct cam_periph *periph, void *arg)
2410 {
2411 	struct ccb_dev_match *cdm;
2412 	dev_match_ret retval;
2413 
2414 	cdm = (struct ccb_dev_match *)arg;
2415 
2416 	retval = xptperiphmatch(cdm->patterns, cdm->num_patterns, periph);
2417 
2418 	if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
2419 		cdm->status = CAM_DEV_MATCH_ERROR;
2420 		return(0);
2421 	}
2422 
2423 	/*
2424 	 * If the copy flag is set, copy this peripheral out.
2425 	 */
2426 	if (retval & DM_RET_COPY) {
2427 		int spaceleft, j;
2428 
2429 		spaceleft = cdm->match_buf_len - (cdm->num_matches *
2430 			sizeof(struct dev_match_result));
2431 
2432 		/*
2433 		 * If we don't have enough space to put in another
2434 		 * match result, save our position and tell the
2435 		 * user there are more devices to check.
2436 		 */
2437 		if (spaceleft < sizeof(struct dev_match_result)) {
2438 			bzero(&cdm->pos, sizeof(cdm->pos));
2439 			cdm->pos.position_type =
2440 				CAM_DEV_POS_EDT | CAM_DEV_POS_BUS |
2441 				CAM_DEV_POS_TARGET | CAM_DEV_POS_DEVICE |
2442 				CAM_DEV_POS_PERIPH;
2443 
2444 			cdm->pos.cookie.bus = periph->path->bus;
2445 			cdm->pos.generations[CAM_BUS_GENERATION]=
2446 				bus_generation;
2447 			cdm->pos.cookie.target = periph->path->target;
2448 			cdm->pos.generations[CAM_TARGET_GENERATION] =
2449 				periph->path->bus->generation;
2450 			cdm->pos.cookie.device = periph->path->device;
2451 			cdm->pos.generations[CAM_DEV_GENERATION] =
2452 				periph->path->target->generation;
2453 			cdm->pos.cookie.periph = periph;
2454 			cdm->pos.generations[CAM_PERIPH_GENERATION] =
2455 				periph->path->device->generation;
2456 			cdm->status = CAM_DEV_MATCH_MORE;
2457 			return(0);
2458 		}
2459 
2460 		j = cdm->num_matches;
2461 		cdm->num_matches++;
2462 		cdm->matches[j].type = DEV_MATCH_PERIPH;
2463 		cdm->matches[j].result.periph_result.path_id =
2464 			periph->path->bus->path_id;
2465 		cdm->matches[j].result.periph_result.target_id =
2466 			periph->path->target->target_id;
2467 		cdm->matches[j].result.periph_result.target_lun =
2468 			periph->path->device->lun_id;
2469 		cdm->matches[j].result.periph_result.unit_number =
2470 			periph->unit_number;
2471 		strncpy(cdm->matches[j].result.periph_result.periph_name,
2472 			periph->periph_name, DEV_IDLEN);
2473 	}
2474 
2475 	return(1);
2476 }
2477 
2478 static int
2479 xptedtmatch(struct ccb_dev_match *cdm)
2480 {
2481 	int ret;
2482 
2483 	cdm->num_matches = 0;
2484 
2485 	/*
2486 	 * Check the bus list generation.  If it has changed, the user
2487 	 * needs to reset everything and start over.
2488 	 */
2489 	if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2490 	 && (cdm->pos.generations[CAM_BUS_GENERATION] != 0)
2491 	 && (cdm->pos.generations[CAM_BUS_GENERATION] != bus_generation)) {
2492 		cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
2493 		return(0);
2494 	}
2495 
2496 	if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2497 	 && (cdm->pos.cookie.bus != NULL))
2498 		ret = xptbustraverse((struct cam_eb *)cdm->pos.cookie.bus,
2499 				     xptedtbusfunc, cdm);
2500 	else
2501 		ret = xptbustraverse(NULL, xptedtbusfunc, cdm);
2502 
2503 	/*
2504 	 * If we get back 0, that means that we had to stop before fully
2505 	 * traversing the EDT.  It also means that one of the subroutines
2506 	 * has set the status field to the proper value.  If we get back 1,
2507 	 * we've fully traversed the EDT and copied out any matching entries.
2508 	 */
2509 	if (ret == 1)
2510 		cdm->status = CAM_DEV_MATCH_LAST;
2511 
2512 	return(ret);
2513 }
2514 
2515 static int
2516 xptplistpdrvfunc(struct periph_driver **pdrv, void *arg)
2517 {
2518 	struct ccb_dev_match *cdm;
2519 
2520 	cdm = (struct ccb_dev_match *)arg;
2521 
2522 	if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
2523 	 && (cdm->pos.cookie.pdrv == pdrv)
2524 	 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
2525 	 && (cdm->pos.generations[CAM_PERIPH_GENERATION] != 0)
2526 	 && (cdm->pos.generations[CAM_PERIPH_GENERATION] !=
2527 	     (*pdrv)->generation)) {
2528 		cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
2529 		return(0);
2530 	}
2531 
2532 	if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
2533 	 && (cdm->pos.cookie.pdrv == pdrv)
2534 	 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
2535 	 && (cdm->pos.cookie.periph != NULL))
2536 		return(xptpdperiphtraverse(pdrv,
2537 				(struct cam_periph *)cdm->pos.cookie.periph,
2538 				xptplistperiphfunc, arg));
2539 	else
2540 		return(xptpdperiphtraverse(pdrv, NULL,xptplistperiphfunc, arg));
2541 }
2542 
2543 static int
2544 xptplistperiphfunc(struct cam_periph *periph, void *arg)
2545 {
2546 	struct ccb_dev_match *cdm;
2547 	dev_match_ret retval;
2548 
2549 	cdm = (struct ccb_dev_match *)arg;
2550 
2551 	retval = xptperiphmatch(cdm->patterns, cdm->num_patterns, periph);
2552 
2553 	if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
2554 		cdm->status = CAM_DEV_MATCH_ERROR;
2555 		return(0);
2556 	}
2557 
2558 	/*
2559 	 * If the copy flag is set, copy this peripheral out.
2560 	 */
2561 	if (retval & DM_RET_COPY) {
2562 		int spaceleft, j;
2563 
2564 		spaceleft = cdm->match_buf_len - (cdm->num_matches *
2565 			sizeof(struct dev_match_result));
2566 
2567 		/*
2568 		 * If we don't have enough space to put in another
2569 		 * match result, save our position and tell the
2570 		 * user there are more devices to check.
2571 		 */
2572 		if (spaceleft < sizeof(struct dev_match_result)) {
2573 			struct periph_driver **pdrv;
2574 
2575 			pdrv = NULL;
2576 			bzero(&cdm->pos, sizeof(cdm->pos));
2577 			cdm->pos.position_type =
2578 				CAM_DEV_POS_PDRV | CAM_DEV_POS_PDPTR |
2579 				CAM_DEV_POS_PERIPH;
2580 
2581 			/*
2582 			 * This may look a bit non-sensical, but it is
2583 			 * actually quite logical.  There are very few
2584 			 * peripheral drivers, and bloating every peripheral
2585 			 * structure with a pointer back to its parent
2586 			 * peripheral driver linker set entry would cost
2587 			 * more in the long run than doing this quick lookup.
2588 			 */
2589 			for (pdrv = periph_drivers; *pdrv != NULL; pdrv++) {
2590 				if (strcmp((*pdrv)->driver_name,
2591 				    periph->periph_name) == 0)
2592 					break;
2593 			}
2594 
2595 			if (*pdrv == NULL) {
2596 				cdm->status = CAM_DEV_MATCH_ERROR;
2597 				return(0);
2598 			}
2599 
2600 			cdm->pos.cookie.pdrv = pdrv;
2601 			/*
2602 			 * The periph generation slot does double duty, as
2603 			 * does the periph pointer slot.  They are used for
2604 			 * both edt and pdrv lookups and positioning.
2605 			 */
2606 			cdm->pos.cookie.periph = periph;
2607 			cdm->pos.generations[CAM_PERIPH_GENERATION] =
2608 				(*pdrv)->generation;
2609 			cdm->status = CAM_DEV_MATCH_MORE;
2610 			return(0);
2611 		}
2612 
2613 		j = cdm->num_matches;
2614 		cdm->num_matches++;
2615 		cdm->matches[j].type = DEV_MATCH_PERIPH;
2616 		cdm->matches[j].result.periph_result.path_id =
2617 			periph->path->bus->path_id;
2618 
2619 		/*
2620 		 * The transport layer peripheral doesn't have a target or
2621 		 * lun.
2622 		 */
2623 		if (periph->path->target)
2624 			cdm->matches[j].result.periph_result.target_id =
2625 				periph->path->target->target_id;
2626 		else
2627 			cdm->matches[j].result.periph_result.target_id = -1;
2628 
2629 		if (periph->path->device)
2630 			cdm->matches[j].result.periph_result.target_lun =
2631 				periph->path->device->lun_id;
2632 		else
2633 			cdm->matches[j].result.periph_result.target_lun = -1;
2634 
2635 		cdm->matches[j].result.periph_result.unit_number =
2636 			periph->unit_number;
2637 		strncpy(cdm->matches[j].result.periph_result.periph_name,
2638 			periph->periph_name, DEV_IDLEN);
2639 	}
2640 
2641 	return(1);
2642 }
2643 
2644 static int
2645 xptperiphlistmatch(struct ccb_dev_match *cdm)
2646 {
2647 	int ret;
2648 
2649 	cdm->num_matches = 0;
2650 
2651 	/*
2652 	 * At this point in the edt traversal function, we check the bus
2653 	 * list generation to make sure that no busses have been added or
2654 	 * removed since the user last sent a XPT_DEV_MATCH ccb through.
2655 	 * For the peripheral driver list traversal function, however, we
2656 	 * don't have to worry about new peripheral driver types coming or
2657 	 * going; they're in a linker set, and therefore can't change
2658 	 * without a recompile.
2659 	 */
2660 
2661 	if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
2662 	 && (cdm->pos.cookie.pdrv != NULL))
2663 		ret = xptpdrvtraverse(
2664 				(struct periph_driver **)cdm->pos.cookie.pdrv,
2665 				xptplistpdrvfunc, cdm);
2666 	else
2667 		ret = xptpdrvtraverse(NULL, xptplistpdrvfunc, cdm);
2668 
2669 	/*
2670 	 * If we get back 0, that means that we had to stop before fully
2671 	 * traversing the peripheral driver tree.  It also means that one of
2672 	 * the subroutines has set the status field to the proper value.  If
2673 	 * we get back 1, we've fully traversed the EDT and copied out any
2674 	 * matching entries.
2675 	 */
2676 	if (ret == 1)
2677 		cdm->status = CAM_DEV_MATCH_LAST;
2678 
2679 	return(ret);
2680 }
2681 
2682 static int
2683 xptbustraverse(struct cam_eb *start_bus, xpt_busfunc_t *tr_func, void *arg)
2684 {
2685 	struct cam_eb *bus, *next_bus;
2686 	int retval;
2687 
2688 	retval = 1;
2689 
2690 	for (bus = (start_bus ? start_bus : TAILQ_FIRST(&xpt_busses));
2691 	     bus != NULL;
2692 	     bus = next_bus) {
2693 		next_bus = TAILQ_NEXT(bus, links);
2694 
2695 		retval = tr_func(bus, arg);
2696 		if (retval == 0)
2697 			return(retval);
2698 	}
2699 
2700 	return(retval);
2701 }
2702 
2703 static int
2704 xpttargettraverse(struct cam_eb *bus, struct cam_et *start_target,
2705 		  xpt_targetfunc_t *tr_func, void *arg)
2706 {
2707 	struct cam_et *target, *next_target;
2708 	int retval;
2709 
2710 	retval = 1;
2711 	for (target = (start_target ? start_target :
2712 		       TAILQ_FIRST(&bus->et_entries));
2713 	     target != NULL; target = next_target) {
2714 
2715 		next_target = TAILQ_NEXT(target, links);
2716 
2717 		retval = tr_func(target, arg);
2718 
2719 		if (retval == 0)
2720 			return(retval);
2721 	}
2722 
2723 	return(retval);
2724 }
2725 
2726 static int
2727 xptdevicetraverse(struct cam_et *target, struct cam_ed *start_device,
2728 		  xpt_devicefunc_t *tr_func, void *arg)
2729 {
2730 	struct cam_ed *device, *next_device;
2731 	int retval;
2732 
2733 	retval = 1;
2734 	for (device = (start_device ? start_device :
2735 		       TAILQ_FIRST(&target->ed_entries));
2736 	     device != NULL;
2737 	     device = next_device) {
2738 
2739 		next_device = TAILQ_NEXT(device, links);
2740 
2741 		retval = tr_func(device, arg);
2742 
2743 		if (retval == 0)
2744 			return(retval);
2745 	}
2746 
2747 	return(retval);
2748 }
2749 
2750 static int
2751 xptperiphtraverse(struct cam_ed *device, struct cam_periph *start_periph,
2752 		  xpt_periphfunc_t *tr_func, void *arg)
2753 {
2754 	struct cam_periph *periph, *next_periph;
2755 	int retval;
2756 
2757 	retval = 1;
2758 
2759 	for (periph = (start_periph ? start_periph :
2760 		       SLIST_FIRST(&device->periphs));
2761 	     periph != NULL;
2762 	     periph = next_periph) {
2763 
2764 		next_periph = SLIST_NEXT(periph, periph_links);
2765 
2766 		retval = tr_func(periph, arg);
2767 		if (retval == 0)
2768 			return(retval);
2769 	}
2770 
2771 	return(retval);
2772 }
2773 
2774 static int
2775 xptpdrvtraverse(struct periph_driver **start_pdrv,
2776 		xpt_pdrvfunc_t *tr_func, void *arg)
2777 {
2778 	struct periph_driver **pdrv;
2779 	int retval;
2780 
2781 	retval = 1;
2782 
2783 	/*
2784 	 * We don't traverse the peripheral driver list like we do the
2785 	 * other lists, because it is a linker set, and therefore cannot be
2786 	 * changed during runtime.  If the peripheral driver list is ever
2787 	 * re-done to be something other than a linker set (i.e. it can
2788 	 * change while the system is running), the list traversal should
2789 	 * be modified to work like the other traversal functions.
2790 	 */
2791 	for (pdrv = (start_pdrv ? start_pdrv : periph_drivers);
2792 	     *pdrv != NULL; pdrv++) {
2793 		retval = tr_func(pdrv, arg);
2794 
2795 		if (retval == 0)
2796 			return(retval);
2797 	}
2798 
2799 	return(retval);
2800 }
2801 
2802 static int
2803 xptpdperiphtraverse(struct periph_driver **pdrv,
2804 		    struct cam_periph *start_periph,
2805 		    xpt_periphfunc_t *tr_func, void *arg)
2806 {
2807 	struct cam_periph *periph, *next_periph;
2808 	int retval;
2809 
2810 	retval = 1;
2811 
2812 	for (periph = (start_periph ? start_periph :
2813 	     TAILQ_FIRST(&(*pdrv)->units)); periph != NULL;
2814 	     periph = next_periph) {
2815 
2816 		next_periph = TAILQ_NEXT(periph, unit_links);
2817 
2818 		retval = tr_func(periph, arg);
2819 		if (retval == 0)
2820 			return(retval);
2821 	}
2822 	return(retval);
2823 }
2824 
2825 static int
2826 xptdefbusfunc(struct cam_eb *bus, void *arg)
2827 {
2828 	struct xpt_traverse_config *tr_config;
2829 
2830 	tr_config = (struct xpt_traverse_config *)arg;
2831 
2832 	if (tr_config->depth == XPT_DEPTH_BUS) {
2833 		xpt_busfunc_t *tr_func;
2834 
2835 		tr_func = (xpt_busfunc_t *)tr_config->tr_func;
2836 
2837 		return(tr_func(bus, tr_config->tr_arg));
2838 	} else
2839 		return(xpttargettraverse(bus, NULL, xptdeftargetfunc, arg));
2840 }
2841 
2842 static int
2843 xptdeftargetfunc(struct cam_et *target, void *arg)
2844 {
2845 	struct xpt_traverse_config *tr_config;
2846 
2847 	tr_config = (struct xpt_traverse_config *)arg;
2848 
2849 	if (tr_config->depth == XPT_DEPTH_TARGET) {
2850 		xpt_targetfunc_t *tr_func;
2851 
2852 		tr_func = (xpt_targetfunc_t *)tr_config->tr_func;
2853 
2854 		return(tr_func(target, tr_config->tr_arg));
2855 	} else
2856 		return(xptdevicetraverse(target, NULL, xptdefdevicefunc, arg));
2857 }
2858 
2859 static int
2860 xptdefdevicefunc(struct cam_ed *device, void *arg)
2861 {
2862 	struct xpt_traverse_config *tr_config;
2863 
2864 	tr_config = (struct xpt_traverse_config *)arg;
2865 
2866 	if (tr_config->depth == XPT_DEPTH_DEVICE) {
2867 		xpt_devicefunc_t *tr_func;
2868 
2869 		tr_func = (xpt_devicefunc_t *)tr_config->tr_func;
2870 
2871 		return(tr_func(device, tr_config->tr_arg));
2872 	} else
2873 		return(xptperiphtraverse(device, NULL, xptdefperiphfunc, arg));
2874 }
2875 
2876 static int
2877 xptdefperiphfunc(struct cam_periph *periph, void *arg)
2878 {
2879 	struct xpt_traverse_config *tr_config;
2880 	xpt_periphfunc_t *tr_func;
2881 
2882 	tr_config = (struct xpt_traverse_config *)arg;
2883 
2884 	tr_func = (xpt_periphfunc_t *)tr_config->tr_func;
2885 
2886 	/*
2887 	 * Unlike the other default functions, we don't check for depth
2888 	 * here.  The peripheral driver level is the last level in the EDT,
2889 	 * so if we're here, we should execute the function in question.
2890 	 */
2891 	return(tr_func(periph, tr_config->tr_arg));
2892 }
2893 
2894 /*
2895  * Execute the given function for every bus in the EDT.
2896  */
2897 static int
2898 xpt_for_all_busses(xpt_busfunc_t *tr_func, void *arg)
2899 {
2900 	struct xpt_traverse_config tr_config;
2901 
2902 	tr_config.depth = XPT_DEPTH_BUS;
2903 	tr_config.tr_func = tr_func;
2904 	tr_config.tr_arg = arg;
2905 
2906 	return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
2907 }
2908 
2909 #ifdef notusedyet
2910 /*
2911  * Execute the given function for every target in the EDT.
2912  */
2913 static int
2914 xpt_for_all_targets(xpt_targetfunc_t *tr_func, void *arg)
2915 {
2916 	struct xpt_traverse_config tr_config;
2917 
2918 	tr_config.depth = XPT_DEPTH_TARGET;
2919 	tr_config.tr_func = tr_func;
2920 	tr_config.tr_arg = arg;
2921 
2922 	return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
2923 }
2924 #endif /* notusedyet */
2925 
2926 /*
2927  * Execute the given function for every device in the EDT.
2928  */
2929 static int
2930 xpt_for_all_devices(xpt_devicefunc_t *tr_func, void *arg)
2931 {
2932 	struct xpt_traverse_config tr_config;
2933 
2934 	tr_config.depth = XPT_DEPTH_DEVICE;
2935 	tr_config.tr_func = tr_func;
2936 	tr_config.tr_arg = arg;
2937 
2938 	return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
2939 }
2940 
2941 #ifdef notusedyet
2942 /*
2943  * Execute the given function for every peripheral in the EDT.
2944  */
2945 static int
2946 xpt_for_all_periphs(xpt_periphfunc_t *tr_func, void *arg)
2947 {
2948 	struct xpt_traverse_config tr_config;
2949 
2950 	tr_config.depth = XPT_DEPTH_PERIPH;
2951 	tr_config.tr_func = tr_func;
2952 	tr_config.tr_arg = arg;
2953 
2954 	return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
2955 }
2956 #endif /* notusedyet */
2957 
2958 static int
2959 xptsetasyncfunc(struct cam_ed *device, void *arg)
2960 {
2961 	struct cam_path path;
2962 	struct ccb_getdev cgd;
2963 	struct async_node *cur_entry;
2964 
2965 	cur_entry = (struct async_node *)arg;
2966 
2967 	/*
2968 	 * Don't report unconfigured devices (Wildcard devs,
2969 	 * devices only for target mode, device instances
2970 	 * that have been invalidated but are waiting for
2971 	 * their last reference count to be released).
2972 	 */
2973 	if ((device->flags & CAM_DEV_UNCONFIGURED) != 0)
2974 		return (1);
2975 
2976 	xpt_compile_path(&path,
2977 			 NULL,
2978 			 device->target->bus->path_id,
2979 			 device->target->target_id,
2980 			 device->lun_id);
2981 	xpt_setup_ccb(&cgd.ccb_h, &path, /*priority*/1);
2982 	cgd.ccb_h.func_code = XPT_GDEV_TYPE;
2983 	xpt_action((union ccb *)&cgd);
2984 	cur_entry->callback(cur_entry->callback_arg,
2985 			    AC_FOUND_DEVICE,
2986 			    &path, &cgd);
2987 	xpt_release_path(&path);
2988 
2989 	return(1);
2990 }
2991 
2992 static int
2993 xptsetasyncbusfunc(struct cam_eb *bus, void *arg)
2994 {
2995 	struct cam_path path;
2996 	struct ccb_pathinq cpi;
2997 	struct async_node *cur_entry;
2998 
2999 	cur_entry = (struct async_node *)arg;
3000 
3001 	xpt_compile_path(&path, /*periph*/NULL,
3002 			 bus->sim->path_id,
3003 			 CAM_TARGET_WILDCARD,
3004 			 CAM_LUN_WILDCARD);
3005 	xpt_setup_ccb(&cpi.ccb_h, &path, /*priority*/1);
3006 	cpi.ccb_h.func_code = XPT_PATH_INQ;
3007 	xpt_action((union ccb *)&cpi);
3008 	cur_entry->callback(cur_entry->callback_arg,
3009 			    AC_PATH_REGISTERED,
3010 			    &path, &cpi);
3011 	xpt_release_path(&path);
3012 
3013 	return(1);
3014 }
3015 
3016 void
3017 xpt_action(union ccb *start_ccb)
3018 {
3019 	int iopl;
3020 
3021 	GIANT_REQUIRED;
3022 
3023 	CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xpt_action\n"));
3024 
3025 	start_ccb->ccb_h.status = CAM_REQ_INPROG;
3026 
3027 	iopl = splsoftcam();
3028 	switch (start_ccb->ccb_h.func_code) {
3029 	case XPT_SCSI_IO:
3030 	{
3031 #ifdef CAM_NEW_TRAN_CODE
3032 		struct cam_ed *device;
3033 #endif /* CAM_NEW_TRAN_CODE */
3034 #ifdef CAMDEBUG
3035 		char cdb_str[(SCSI_MAX_CDBLEN * 3) + 1];
3036 		struct cam_path *path;
3037 
3038 		path = start_ccb->ccb_h.path;
3039 #endif
3040 
3041 		/*
3042 		 * For the sake of compatibility with SCSI-1
3043 		 * devices that may not understand the identify
3044 		 * message, we include lun information in the
3045 		 * second byte of all commands.  SCSI-1 specifies
3046 		 * that luns are a 3 bit value and reserves only 3
3047 		 * bits for lun information in the CDB.  Later
3048 		 * revisions of the SCSI spec allow for more than 8
3049 		 * luns, but have deprecated lun information in the
3050 		 * CDB.  So, if the lun won't fit, we must omit.
3051 		 *
3052 		 * Also be aware that during initial probing for devices,
3053 		 * the inquiry information is unknown but initialized to 0.
3054 		 * This means that this code will be exercised while probing
3055 		 * devices with an ANSI revision greater than 2.
3056 		 */
3057 #ifdef CAM_NEW_TRAN_CODE
3058 		device = start_ccb->ccb_h.path->device;
3059 		if (device->protocol_version <= SCSI_REV_2
3060 #else /* CAM_NEW_TRAN_CODE */
3061 		if (SID_ANSI_REV(&start_ccb->ccb_h.path->device->inq_data) <= 2
3062 #endif /* CAM_NEW_TRAN_CODE */
3063 		 && start_ccb->ccb_h.target_lun < 8
3064 		 && (start_ccb->ccb_h.flags & CAM_CDB_POINTER) == 0) {
3065 
3066 			start_ccb->csio.cdb_io.cdb_bytes[1] |=
3067 			    start_ccb->ccb_h.target_lun << 5;
3068 		}
3069 		start_ccb->csio.scsi_status = SCSI_STATUS_OK;
3070 		CAM_DEBUG(path, CAM_DEBUG_CDB,("%s. CDB: %s\n",
3071 			  scsi_op_desc(start_ccb->csio.cdb_io.cdb_bytes[0],
3072 			  	       &path->device->inq_data),
3073 			  scsi_cdb_string(start_ccb->csio.cdb_io.cdb_bytes,
3074 					  cdb_str, sizeof(cdb_str))));
3075 	}
3076 	/* FALLTHROUGH */
3077 	case XPT_TARGET_IO:
3078 	case XPT_CONT_TARGET_IO:
3079 		start_ccb->csio.sense_resid = 0;
3080 		start_ccb->csio.resid = 0;
3081 		/* FALLTHROUGH */
3082 	case XPT_RESET_DEV:
3083 	case XPT_ENG_EXEC:
3084 	{
3085 		struct cam_path *path;
3086 		struct cam_sim *sim;
3087 		int s;
3088 		int runq;
3089 
3090 		path = start_ccb->ccb_h.path;
3091 		s = splsoftcam();
3092 
3093 		sim = path->bus->sim;
3094 		if (SIM_DEAD(sim)) {
3095 			/* The SIM has gone; just execute the CCB directly. */
3096 			cam_ccbq_send_ccb(&path->device->ccbq, start_ccb);
3097 			(*(sim->sim_action))(sim, start_ccb);
3098 			splx(s);
3099 			break;
3100 		}
3101 
3102 		cam_ccbq_insert_ccb(&path->device->ccbq, start_ccb);
3103 		if (path->device->qfrozen_cnt == 0)
3104 			runq = xpt_schedule_dev_sendq(path->bus, path->device);
3105 		else
3106 			runq = 0;
3107 		splx(s);
3108 		if (runq != 0)
3109 			xpt_run_dev_sendq(path->bus);
3110 		break;
3111 	}
3112 	case XPT_SET_TRAN_SETTINGS:
3113 	{
3114 		xpt_set_transfer_settings(&start_ccb->cts,
3115 					  start_ccb->ccb_h.path->device,
3116 					  /*async_update*/FALSE);
3117 		break;
3118 	}
3119 	case XPT_CALC_GEOMETRY:
3120 	{
3121 		struct cam_sim *sim;
3122 
3123 		/* Filter out garbage */
3124 		if (start_ccb->ccg.block_size == 0
3125 		 || start_ccb->ccg.volume_size == 0) {
3126 			start_ccb->ccg.cylinders = 0;
3127 			start_ccb->ccg.heads = 0;
3128 			start_ccb->ccg.secs_per_track = 0;
3129 			start_ccb->ccb_h.status = CAM_REQ_CMP;
3130 			break;
3131 		}
3132 #ifdef PC98
3133 		/*
3134 		 * In a PC-98 system, geometry translation depens on
3135 		 * the "real" device geometry obtained from mode page 4.
3136 		 * SCSI geometry translation is performed in the
3137 		 * initialization routine of the SCSI BIOS and the result
3138 		 * stored in host memory.  If the translation is available
3139 		 * in host memory, use it.  If not, rely on the default
3140 		 * translation the device driver performs.
3141 		 */
3142 		if (scsi_da_bios_params(&start_ccb->ccg) != 0) {
3143 			start_ccb->ccb_h.status = CAM_REQ_CMP;
3144 			break;
3145 		}
3146 #endif
3147 		sim = start_ccb->ccb_h.path->bus->sim;
3148 		(*(sim->sim_action))(sim, start_ccb);
3149 		break;
3150 	}
3151 	case XPT_ABORT:
3152 	{
3153 		union ccb* abort_ccb;
3154 		int s;
3155 
3156 		abort_ccb = start_ccb->cab.abort_ccb;
3157 		if (XPT_FC_IS_DEV_QUEUED(abort_ccb)) {
3158 
3159 			if (abort_ccb->ccb_h.pinfo.index >= 0) {
3160 				struct cam_ccbq *ccbq;
3161 
3162 				ccbq = &abort_ccb->ccb_h.path->device->ccbq;
3163 				cam_ccbq_remove_ccb(ccbq, abort_ccb);
3164 				abort_ccb->ccb_h.status =
3165 				    CAM_REQ_ABORTED|CAM_DEV_QFRZN;
3166 				xpt_freeze_devq(abort_ccb->ccb_h.path, 1);
3167 				s = splcam();
3168 				xpt_done(abort_ccb);
3169 				splx(s);
3170 				start_ccb->ccb_h.status = CAM_REQ_CMP;
3171 				break;
3172 			}
3173 			if (abort_ccb->ccb_h.pinfo.index == CAM_UNQUEUED_INDEX
3174 			 && (abort_ccb->ccb_h.status & CAM_SIM_QUEUED) == 0) {
3175 				/*
3176 				 * We've caught this ccb en route to
3177 				 * the SIM.  Flag it for abort and the
3178 				 * SIM will do so just before starting
3179 				 * real work on the CCB.
3180 				 */
3181 				abort_ccb->ccb_h.status =
3182 				    CAM_REQ_ABORTED|CAM_DEV_QFRZN;
3183 				xpt_freeze_devq(abort_ccb->ccb_h.path, 1);
3184 				start_ccb->ccb_h.status = CAM_REQ_CMP;
3185 				break;
3186 			}
3187 		}
3188 		if (XPT_FC_IS_QUEUED(abort_ccb)
3189 		 && (abort_ccb->ccb_h.pinfo.index == CAM_DONEQ_INDEX)) {
3190 			/*
3191 			 * It's already completed but waiting
3192 			 * for our SWI to get to it.
3193 			 */
3194 			start_ccb->ccb_h.status = CAM_UA_ABORT;
3195 			break;
3196 		}
3197 		/*
3198 		 * If we weren't able to take care of the abort request
3199 		 * in the XPT, pass the request down to the SIM for processing.
3200 		 */
3201 	}
3202 	/* FALLTHROUGH */
3203 	case XPT_ACCEPT_TARGET_IO:
3204 	case XPT_EN_LUN:
3205 	case XPT_IMMED_NOTIFY:
3206 	case XPT_NOTIFY_ACK:
3207 	case XPT_GET_TRAN_SETTINGS:
3208 	case XPT_RESET_BUS:
3209 	{
3210 		struct cam_sim *sim;
3211 
3212 		sim = start_ccb->ccb_h.path->bus->sim;
3213 		(*(sim->sim_action))(sim, start_ccb);
3214 		break;
3215 	}
3216 	case XPT_PATH_INQ:
3217 	{
3218 		struct cam_sim *sim;
3219 
3220 		sim = start_ccb->ccb_h.path->bus->sim;
3221 		(*(sim->sim_action))(sim, start_ccb);
3222 		break;
3223 	}
3224 	case XPT_PATH_STATS:
3225 		start_ccb->cpis.last_reset =
3226 			start_ccb->ccb_h.path->bus->last_reset;
3227 		start_ccb->ccb_h.status = CAM_REQ_CMP;
3228 		break;
3229 	case XPT_GDEV_TYPE:
3230 	{
3231 		struct cam_ed *dev;
3232 		int s;
3233 
3234 		dev = start_ccb->ccb_h.path->device;
3235 		s = splcam();
3236 		if ((dev->flags & CAM_DEV_UNCONFIGURED) != 0) {
3237 			start_ccb->ccb_h.status = CAM_DEV_NOT_THERE;
3238 		} else {
3239 			struct ccb_getdev *cgd;
3240 			struct cam_eb *bus;
3241 			struct cam_et *tar;
3242 
3243 			cgd = &start_ccb->cgd;
3244 			bus = cgd->ccb_h.path->bus;
3245 			tar = cgd->ccb_h.path->target;
3246 			cgd->inq_data = dev->inq_data;
3247 			cgd->ccb_h.status = CAM_REQ_CMP;
3248 			cgd->serial_num_len = dev->serial_num_len;
3249 			if ((dev->serial_num_len > 0)
3250 			 && (dev->serial_num != NULL))
3251 				bcopy(dev->serial_num, cgd->serial_num,
3252 				      dev->serial_num_len);
3253 		}
3254 		splx(s);
3255 		break;
3256 	}
3257 	case XPT_GDEV_STATS:
3258 	{
3259 		struct cam_ed *dev;
3260 		int s;
3261 
3262 		dev = start_ccb->ccb_h.path->device;
3263 		s = splcam();
3264 		if ((dev->flags & CAM_DEV_UNCONFIGURED) != 0) {
3265 			start_ccb->ccb_h.status = CAM_DEV_NOT_THERE;
3266 		} else {
3267 			struct ccb_getdevstats *cgds;
3268 			struct cam_eb *bus;
3269 			struct cam_et *tar;
3270 
3271 			cgds = &start_ccb->cgds;
3272 			bus = cgds->ccb_h.path->bus;
3273 			tar = cgds->ccb_h.path->target;
3274 			cgds->dev_openings = dev->ccbq.dev_openings;
3275 			cgds->dev_active = dev->ccbq.dev_active;
3276 			cgds->devq_openings = dev->ccbq.devq_openings;
3277 			cgds->devq_queued = dev->ccbq.queue.entries;
3278 			cgds->held = dev->ccbq.held;
3279 			cgds->last_reset = tar->last_reset;
3280 			cgds->maxtags = dev->quirk->maxtags;
3281 			cgds->mintags = dev->quirk->mintags;
3282 			if (timevalcmp(&tar->last_reset, &bus->last_reset, <))
3283 				cgds->last_reset = bus->last_reset;
3284 			cgds->ccb_h.status = CAM_REQ_CMP;
3285 		}
3286 		splx(s);
3287 		break;
3288 	}
3289 	case XPT_GDEVLIST:
3290 	{
3291 		struct cam_periph	*nperiph;
3292 		struct periph_list	*periph_head;
3293 		struct ccb_getdevlist	*cgdl;
3294 		u_int			i;
3295 		int			s;
3296 		struct cam_ed		*device;
3297 		int			found;
3298 
3299 
3300 		found = 0;
3301 
3302 		/*
3303 		 * Don't want anyone mucking with our data.
3304 		 */
3305 		s = splcam();
3306 		device = start_ccb->ccb_h.path->device;
3307 		periph_head = &device->periphs;
3308 		cgdl = &start_ccb->cgdl;
3309 
3310 		/*
3311 		 * Check and see if the list has changed since the user
3312 		 * last requested a list member.  If so, tell them that the
3313 		 * list has changed, and therefore they need to start over
3314 		 * from the beginning.
3315 		 */
3316 		if ((cgdl->index != 0) &&
3317 		    (cgdl->generation != device->generation)) {
3318 			cgdl->status = CAM_GDEVLIST_LIST_CHANGED;
3319 			splx(s);
3320 			break;
3321 		}
3322 
3323 		/*
3324 		 * Traverse the list of peripherals and attempt to find
3325 		 * the requested peripheral.
3326 		 */
3327 		for (nperiph = SLIST_FIRST(periph_head), i = 0;
3328 		     (nperiph != NULL) && (i <= cgdl->index);
3329 		     nperiph = SLIST_NEXT(nperiph, periph_links), i++) {
3330 			if (i == cgdl->index) {
3331 				strncpy(cgdl->periph_name,
3332 					nperiph->periph_name,
3333 					DEV_IDLEN);
3334 				cgdl->unit_number = nperiph->unit_number;
3335 				found = 1;
3336 			}
3337 		}
3338 		if (found == 0) {
3339 			cgdl->status = CAM_GDEVLIST_ERROR;
3340 			splx(s);
3341 			break;
3342 		}
3343 
3344 		if (nperiph == NULL)
3345 			cgdl->status = CAM_GDEVLIST_LAST_DEVICE;
3346 		else
3347 			cgdl->status = CAM_GDEVLIST_MORE_DEVS;
3348 
3349 		cgdl->index++;
3350 		cgdl->generation = device->generation;
3351 
3352 		splx(s);
3353 		cgdl->ccb_h.status = CAM_REQ_CMP;
3354 		break;
3355 	}
3356 	case XPT_DEV_MATCH:
3357 	{
3358 		int s;
3359 		dev_pos_type position_type;
3360 		struct ccb_dev_match *cdm;
3361 
3362 		cdm = &start_ccb->cdm;
3363 
3364 		/*
3365 		 * Prevent EDT changes while we traverse it.
3366 		 */
3367 		s = splcam();
3368 		/*
3369 		 * There are two ways of getting at information in the EDT.
3370 		 * The first way is via the primary EDT tree.  It starts
3371 		 * with a list of busses, then a list of targets on a bus,
3372 		 * then devices/luns on a target, and then peripherals on a
3373 		 * device/lun.  The "other" way is by the peripheral driver
3374 		 * lists.  The peripheral driver lists are organized by
3375 		 * peripheral driver.  (obviously)  So it makes sense to
3376 		 * use the peripheral driver list if the user is looking
3377 		 * for something like "da1", or all "da" devices.  If the
3378 		 * user is looking for something on a particular bus/target
3379 		 * or lun, it's generally better to go through the EDT tree.
3380 		 */
3381 
3382 		if (cdm->pos.position_type != CAM_DEV_POS_NONE)
3383 			position_type = cdm->pos.position_type;
3384 		else {
3385 			u_int i;
3386 
3387 			position_type = CAM_DEV_POS_NONE;
3388 
3389 			for (i = 0; i < cdm->num_patterns; i++) {
3390 				if ((cdm->patterns[i].type == DEV_MATCH_BUS)
3391 				 ||(cdm->patterns[i].type == DEV_MATCH_DEVICE)){
3392 					position_type = CAM_DEV_POS_EDT;
3393 					break;
3394 				}
3395 			}
3396 
3397 			if (cdm->num_patterns == 0)
3398 				position_type = CAM_DEV_POS_EDT;
3399 			else if (position_type == CAM_DEV_POS_NONE)
3400 				position_type = CAM_DEV_POS_PDRV;
3401 		}
3402 
3403 		switch(position_type & CAM_DEV_POS_TYPEMASK) {
3404 		case CAM_DEV_POS_EDT:
3405 			xptedtmatch(cdm);
3406 			break;
3407 		case CAM_DEV_POS_PDRV:
3408 			xptperiphlistmatch(cdm);
3409 			break;
3410 		default:
3411 			cdm->status = CAM_DEV_MATCH_ERROR;
3412 			break;
3413 		}
3414 
3415 		splx(s);
3416 
3417 		if (cdm->status == CAM_DEV_MATCH_ERROR)
3418 			start_ccb->ccb_h.status = CAM_REQ_CMP_ERR;
3419 		else
3420 			start_ccb->ccb_h.status = CAM_REQ_CMP;
3421 
3422 		break;
3423 	}
3424 	case XPT_SASYNC_CB:
3425 	{
3426 		struct ccb_setasync *csa;
3427 		struct async_node *cur_entry;
3428 		struct async_list *async_head;
3429 		u_int32_t added;
3430 		int s;
3431 
3432 		csa = &start_ccb->csa;
3433 		added = csa->event_enable;
3434 		async_head = &csa->ccb_h.path->device->asyncs;
3435 
3436 		/*
3437 		 * If there is already an entry for us, simply
3438 		 * update it.
3439 		 */
3440 		s = splcam();
3441 		cur_entry = SLIST_FIRST(async_head);
3442 		while (cur_entry != NULL) {
3443 			if ((cur_entry->callback_arg == csa->callback_arg)
3444 			 && (cur_entry->callback == csa->callback))
3445 				break;
3446 			cur_entry = SLIST_NEXT(cur_entry, links);
3447 		}
3448 
3449 		if (cur_entry != NULL) {
3450 		 	/*
3451 			 * If the request has no flags set,
3452 			 * remove the entry.
3453 			 */
3454 			added &= ~cur_entry->event_enable;
3455 			if (csa->event_enable == 0) {
3456 				SLIST_REMOVE(async_head, cur_entry,
3457 					     async_node, links);
3458 				csa->ccb_h.path->device->refcount--;
3459 				free(cur_entry, M_CAMXPT);
3460 			} else {
3461 				cur_entry->event_enable = csa->event_enable;
3462 			}
3463 		} else {
3464 			cur_entry = malloc(sizeof(*cur_entry), M_CAMXPT,
3465 					   M_NOWAIT);
3466 			if (cur_entry == NULL) {
3467 				splx(s);
3468 				csa->ccb_h.status = CAM_RESRC_UNAVAIL;
3469 				break;
3470 			}
3471 			cur_entry->event_enable = csa->event_enable;
3472 			cur_entry->callback_arg = csa->callback_arg;
3473 			cur_entry->callback = csa->callback;
3474 			SLIST_INSERT_HEAD(async_head, cur_entry, links);
3475 			csa->ccb_h.path->device->refcount++;
3476 		}
3477 
3478 		if ((added & AC_FOUND_DEVICE) != 0) {
3479 			/*
3480 			 * Get this peripheral up to date with all
3481 			 * the currently existing devices.
3482 			 */
3483 			xpt_for_all_devices(xptsetasyncfunc, cur_entry);
3484 		}
3485 		if ((added & AC_PATH_REGISTERED) != 0) {
3486 			/*
3487 			 * Get this peripheral up to date with all
3488 			 * the currently existing busses.
3489 			 */
3490 			xpt_for_all_busses(xptsetasyncbusfunc, cur_entry);
3491 		}
3492 		splx(s);
3493 		start_ccb->ccb_h.status = CAM_REQ_CMP;
3494 		break;
3495 	}
3496 	case XPT_REL_SIMQ:
3497 	{
3498 		struct ccb_relsim *crs;
3499 		struct cam_ed *dev;
3500 		int s;
3501 
3502 		crs = &start_ccb->crs;
3503 		dev = crs->ccb_h.path->device;
3504 		if (dev == NULL) {
3505 
3506 			crs->ccb_h.status = CAM_DEV_NOT_THERE;
3507 			break;
3508 		}
3509 
3510 		s = splcam();
3511 
3512 		if ((crs->release_flags & RELSIM_ADJUST_OPENINGS) != 0) {
3513 
3514  			if (INQ_DATA_TQ_ENABLED(&dev->inq_data)) {
3515 				/* Don't ever go below one opening */
3516 				if (crs->openings > 0) {
3517 					xpt_dev_ccbq_resize(crs->ccb_h.path,
3518 							    crs->openings);
3519 
3520 					if (bootverbose) {
3521 						xpt_print_path(crs->ccb_h.path);
3522 						printf("tagged openings "
3523 						       "now %d\n",
3524 						       crs->openings);
3525 					}
3526 				}
3527 			}
3528 		}
3529 
3530 		if ((crs->release_flags & RELSIM_RELEASE_AFTER_TIMEOUT) != 0) {
3531 
3532 			if ((dev->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) {
3533 
3534 				/*
3535 				 * Just extend the old timeout and decrement
3536 				 * the freeze count so that a single timeout
3537 				 * is sufficient for releasing the queue.
3538 				 */
3539 				start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
3540 				untimeout(xpt_release_devq_timeout,
3541 					  dev, dev->c_handle);
3542 			} else {
3543 
3544 				start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
3545 			}
3546 
3547 			dev->c_handle =
3548 				timeout(xpt_release_devq_timeout,
3549 					dev,
3550 					(crs->release_timeout * hz) / 1000);
3551 
3552 			dev->flags |= CAM_DEV_REL_TIMEOUT_PENDING;
3553 
3554 		}
3555 
3556 		if ((crs->release_flags & RELSIM_RELEASE_AFTER_CMDCMPLT) != 0) {
3557 
3558 			if ((dev->flags & CAM_DEV_REL_ON_COMPLETE) != 0) {
3559 				/*
3560 				 * Decrement the freeze count so that a single
3561 				 * completion is still sufficient to unfreeze
3562 				 * the queue.
3563 				 */
3564 				start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
3565 			} else {
3566 
3567 				dev->flags |= CAM_DEV_REL_ON_COMPLETE;
3568 				start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
3569 			}
3570 		}
3571 
3572 		if ((crs->release_flags & RELSIM_RELEASE_AFTER_QEMPTY) != 0) {
3573 
3574 			if ((dev->flags & CAM_DEV_REL_ON_QUEUE_EMPTY) != 0
3575 			 || (dev->ccbq.dev_active == 0)) {
3576 
3577 				start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
3578 			} else {
3579 
3580 				dev->flags |= CAM_DEV_REL_ON_QUEUE_EMPTY;
3581 				start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
3582 			}
3583 		}
3584 		splx(s);
3585 
3586 		if ((start_ccb->ccb_h.flags & CAM_DEV_QFREEZE) == 0) {
3587 
3588 			xpt_release_devq(crs->ccb_h.path, /*count*/1,
3589 					 /*run_queue*/TRUE);
3590 		}
3591 		start_ccb->crs.qfrozen_cnt = dev->qfrozen_cnt;
3592 		start_ccb->ccb_h.status = CAM_REQ_CMP;
3593 		break;
3594 	}
3595 	case XPT_SCAN_BUS:
3596 		xpt_scan_bus(start_ccb->ccb_h.path->periph, start_ccb);
3597 		break;
3598 	case XPT_SCAN_LUN:
3599 		xpt_scan_lun(start_ccb->ccb_h.path->periph,
3600 			     start_ccb->ccb_h.path, start_ccb->crcn.flags,
3601 			     start_ccb);
3602 		break;
3603 	case XPT_DEBUG: {
3604 #ifdef CAMDEBUG
3605 		int s;
3606 
3607 		s = splcam();
3608 #ifdef CAM_DEBUG_DELAY
3609 		cam_debug_delay = CAM_DEBUG_DELAY;
3610 #endif
3611 		cam_dflags = start_ccb->cdbg.flags;
3612 		if (cam_dpath != NULL) {
3613 			xpt_free_path(cam_dpath);
3614 			cam_dpath = NULL;
3615 		}
3616 
3617 		if (cam_dflags != CAM_DEBUG_NONE) {
3618 			if (xpt_create_path(&cam_dpath, xpt_periph,
3619 					    start_ccb->ccb_h.path_id,
3620 					    start_ccb->ccb_h.target_id,
3621 					    start_ccb->ccb_h.target_lun) !=
3622 					    CAM_REQ_CMP) {
3623 				start_ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
3624 				cam_dflags = CAM_DEBUG_NONE;
3625 			} else {
3626 				start_ccb->ccb_h.status = CAM_REQ_CMP;
3627 				xpt_print_path(cam_dpath);
3628 				printf("debugging flags now %x\n", cam_dflags);
3629 			}
3630 		} else {
3631 			cam_dpath = NULL;
3632 			start_ccb->ccb_h.status = CAM_REQ_CMP;
3633 		}
3634 		splx(s);
3635 #else /* !CAMDEBUG */
3636 		start_ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
3637 #endif /* CAMDEBUG */
3638 		break;
3639 	}
3640 	case XPT_NOOP:
3641 		if ((start_ccb->ccb_h.flags & CAM_DEV_QFREEZE) != 0)
3642 			xpt_freeze_devq(start_ccb->ccb_h.path, 1);
3643 		start_ccb->ccb_h.status = CAM_REQ_CMP;
3644 		break;
3645 	default:
3646 	case XPT_SDEV_TYPE:
3647 	case XPT_TERM_IO:
3648 	case XPT_ENG_INQ:
3649 		/* XXX Implement */
3650 		start_ccb->ccb_h.status = CAM_PROVIDE_FAIL;
3651 		break;
3652 	}
3653 	splx(iopl);
3654 }
3655 
3656 void
3657 xpt_polled_action(union ccb *start_ccb)
3658 {
3659 	int	  s;
3660 	u_int32_t timeout;
3661 	struct	  cam_sim *sim;
3662 	struct	  cam_devq *devq;
3663 	struct	  cam_ed *dev;
3664 
3665 	GIANT_REQUIRED;
3666 
3667 	timeout = start_ccb->ccb_h.timeout;
3668 	sim = start_ccb->ccb_h.path->bus->sim;
3669 	devq = sim->devq;
3670 	dev = start_ccb->ccb_h.path->device;
3671 
3672 	s = splcam();
3673 
3674 	/*
3675 	 * Steal an opening so that no other queued requests
3676 	 * can get it before us while we simulate interrupts.
3677 	 */
3678 	dev->ccbq.devq_openings--;
3679 	dev->ccbq.dev_openings--;
3680 
3681 	while(((devq != NULL && devq->send_openings <= 0) ||
3682 	   dev->ccbq.dev_openings < 0) && (--timeout > 0)) {
3683 		DELAY(1000);
3684 		(*(sim->sim_poll))(sim);
3685 		camisr(&cam_bioq);
3686 	}
3687 
3688 	dev->ccbq.devq_openings++;
3689 	dev->ccbq.dev_openings++;
3690 
3691 	if (timeout != 0) {
3692 		xpt_action(start_ccb);
3693 		while(--timeout > 0) {
3694 			(*(sim->sim_poll))(sim);
3695 			camisr(&cam_bioq);
3696 			if ((start_ccb->ccb_h.status  & CAM_STATUS_MASK)
3697 			    != CAM_REQ_INPROG)
3698 				break;
3699 			DELAY(1000);
3700 		}
3701 		if (timeout == 0) {
3702 			/*
3703 			 * XXX Is it worth adding a sim_timeout entry
3704 			 * point so we can attempt recovery?  If
3705 			 * this is only used for dumps, I don't think
3706 			 * it is.
3707 			 */
3708 			start_ccb->ccb_h.status = CAM_CMD_TIMEOUT;
3709 		}
3710 	} else {
3711 		start_ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
3712 	}
3713 	splx(s);
3714 }
3715 
3716 /*
3717  * Schedule a peripheral driver to receive a ccb when it's
3718  * target device has space for more transactions.
3719  */
3720 void
3721 xpt_schedule(struct cam_periph *perph, u_int32_t new_priority)
3722 {
3723 	struct cam_ed *device;
3724 	union ccb *work_ccb;
3725 	int s;
3726 	int runq;
3727 
3728 	GIANT_REQUIRED;
3729 
3730 	CAM_DEBUG(perph->path, CAM_DEBUG_TRACE, ("xpt_schedule\n"));
3731 	device = perph->path->device;
3732 	s = splsoftcam();
3733 	if (periph_is_queued(perph)) {
3734 		/* Simply reorder based on new priority */
3735 		CAM_DEBUG(perph->path, CAM_DEBUG_SUBTRACE,
3736 			  ("   change priority to %d\n", new_priority));
3737 		if (new_priority < perph->pinfo.priority) {
3738 			camq_change_priority(&device->drvq,
3739 					     perph->pinfo.index,
3740 					     new_priority);
3741 		}
3742 		runq = 0;
3743 	} else if (SIM_DEAD(perph->path->bus->sim)) {
3744 		/* The SIM is gone so just call periph_start directly. */
3745 		work_ccb = xpt_get_ccb(perph->path->device);
3746 		splx(s);
3747 		if (work_ccb == NULL)
3748 			return; /* XXX */
3749 		xpt_setup_ccb(&work_ccb->ccb_h, perph->path, new_priority);
3750 		perph->pinfo.priority = new_priority;
3751 		perph->periph_start(perph, work_ccb);
3752 		return;
3753 	} else {
3754 		/* New entry on the queue */
3755 		CAM_DEBUG(perph->path, CAM_DEBUG_SUBTRACE,
3756 			  ("   added periph to queue\n"));
3757 		perph->pinfo.priority = new_priority;
3758 		perph->pinfo.generation = ++device->drvq.generation;
3759 		camq_insert(&device->drvq, &perph->pinfo);
3760 		runq = xpt_schedule_dev_allocq(perph->path->bus, device);
3761 	}
3762 	splx(s);
3763 	if (runq != 0) {
3764 		CAM_DEBUG(perph->path, CAM_DEBUG_SUBTRACE,
3765 			  ("   calling xpt_run_devq\n"));
3766 		xpt_run_dev_allocq(perph->path->bus);
3767 	}
3768 }
3769 
3770 
3771 /*
3772  * Schedule a device to run on a given queue.
3773  * If the device was inserted as a new entry on the queue,
3774  * return 1 meaning the device queue should be run. If we
3775  * were already queued, implying someone else has already
3776  * started the queue, return 0 so the caller doesn't attempt
3777  * to run the queue.  Must be run at either splsoftcam
3778  * (or splcam since that encompases splsoftcam).
3779  */
3780 static int
3781 xpt_schedule_dev(struct camq *queue, cam_pinfo *pinfo,
3782 		 u_int32_t new_priority)
3783 {
3784 	int retval;
3785 	u_int32_t old_priority;
3786 
3787 	CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_schedule_dev\n"));
3788 
3789 	old_priority = pinfo->priority;
3790 
3791 	/*
3792 	 * Are we already queued?
3793 	 */
3794 	if (pinfo->index != CAM_UNQUEUED_INDEX) {
3795 		/* Simply reorder based on new priority */
3796 		if (new_priority < old_priority) {
3797 			camq_change_priority(queue, pinfo->index,
3798 					     new_priority);
3799 			CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3800 					("changed priority to %d\n",
3801 					 new_priority));
3802 		}
3803 		retval = 0;
3804 	} else {
3805 		/* New entry on the queue */
3806 		if (new_priority < old_priority)
3807 			pinfo->priority = new_priority;
3808 
3809 		CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3810 				("Inserting onto queue\n"));
3811 		pinfo->generation = ++queue->generation;
3812 		camq_insert(queue, pinfo);
3813 		retval = 1;
3814 	}
3815 	return (retval);
3816 }
3817 
3818 static void
3819 xpt_run_dev_allocq(struct cam_eb *bus)
3820 {
3821 	struct	cam_devq *devq;
3822 	int	s;
3823 
3824 	CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_dev_allocq\n"));
3825 	devq = bus->sim->devq;
3826 
3827 	CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3828 			("   qfrozen_cnt == 0x%x, entries == %d, "
3829 			 "openings == %d, active == %d\n",
3830 			 devq->alloc_queue.qfrozen_cnt,
3831 			 devq->alloc_queue.entries,
3832 			 devq->alloc_openings,
3833 			 devq->alloc_active));
3834 
3835 	s = splsoftcam();
3836 	devq->alloc_queue.qfrozen_cnt++;
3837 	while ((devq->alloc_queue.entries > 0)
3838 	    && (devq->alloc_openings > 0)
3839 	    && (devq->alloc_queue.qfrozen_cnt <= 1)) {
3840 		struct	cam_ed_qinfo *qinfo;
3841 		struct	cam_ed *device;
3842 		union	ccb *work_ccb;
3843 		struct	cam_periph *drv;
3844 		struct	camq *drvq;
3845 
3846 		qinfo = (struct cam_ed_qinfo *)camq_remove(&devq->alloc_queue,
3847 							   CAMQ_HEAD);
3848 		device = qinfo->device;
3849 
3850 		CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3851 				("running device %p\n", device));
3852 
3853 		drvq = &device->drvq;
3854 
3855 #ifdef CAMDEBUG
3856 		if (drvq->entries <= 0) {
3857 			panic("xpt_run_dev_allocq: "
3858 			      "Device on queue without any work to do");
3859 		}
3860 #endif
3861 		if ((work_ccb = xpt_get_ccb(device)) != NULL) {
3862 			devq->alloc_openings--;
3863 			devq->alloc_active++;
3864 			drv = (struct cam_periph*)camq_remove(drvq, CAMQ_HEAD);
3865 			splx(s);
3866 			xpt_setup_ccb(&work_ccb->ccb_h, drv->path,
3867 				      drv->pinfo.priority);
3868 			CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3869 					("calling periph start\n"));
3870 			drv->periph_start(drv, work_ccb);
3871 		} else {
3872 			/*
3873 			 * Malloc failure in alloc_ccb
3874 			 */
3875 			/*
3876 			 * XXX add us to a list to be run from free_ccb
3877 			 * if we don't have any ccbs active on this
3878 			 * device queue otherwise we may never get run
3879 			 * again.
3880 			 */
3881 			break;
3882 		}
3883 
3884 		/* Raise IPL for possible insertion and test at top of loop */
3885 		s = splsoftcam();
3886 
3887 		if (drvq->entries > 0) {
3888 			/* We have more work.  Attempt to reschedule */
3889 			xpt_schedule_dev_allocq(bus, device);
3890 		}
3891 	}
3892 	devq->alloc_queue.qfrozen_cnt--;
3893 	splx(s);
3894 }
3895 
3896 static void
3897 xpt_run_dev_sendq(struct cam_eb *bus)
3898 {
3899 	struct	cam_devq *devq;
3900 	int	s;
3901 
3902 	CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_dev_sendq\n"));
3903 
3904 	devq = bus->sim->devq;
3905 
3906 	s = splcam();
3907 	devq->send_queue.qfrozen_cnt++;
3908 	splx(s);
3909 	s = splsoftcam();
3910 	while ((devq->send_queue.entries > 0)
3911 	    && (devq->send_openings > 0)) {
3912 		struct	cam_ed_qinfo *qinfo;
3913 		struct	cam_ed *device;
3914 		union ccb *work_ccb;
3915 		struct	cam_sim *sim;
3916 		int	ospl;
3917 
3918 		ospl = splcam();
3919 	    	if (devq->send_queue.qfrozen_cnt > 1) {
3920 			splx(ospl);
3921 			break;
3922 		}
3923 
3924 		qinfo = (struct cam_ed_qinfo *)camq_remove(&devq->send_queue,
3925 							   CAMQ_HEAD);
3926 		device = qinfo->device;
3927 
3928 		/*
3929 		 * If the device has been "frozen", don't attempt
3930 		 * to run it.
3931 		 */
3932 		if (device->qfrozen_cnt > 0) {
3933 			splx(ospl);
3934 			continue;
3935 		}
3936 
3937 		CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3938 				("running device %p\n", device));
3939 
3940 		work_ccb = cam_ccbq_peek_ccb(&device->ccbq, CAMQ_HEAD);
3941 		if (work_ccb == NULL) {
3942 			printf("device on run queue with no ccbs???\n");
3943 			splx(ospl);
3944 			continue;
3945 		}
3946 
3947 		if ((work_ccb->ccb_h.flags & CAM_HIGH_POWER) != 0) {
3948 
3949 		 	if (num_highpower <= 0) {
3950 				/*
3951 				 * We got a high power command, but we
3952 				 * don't have any available slots.  Freeze
3953 				 * the device queue until we have a slot
3954 				 * available.
3955 				 */
3956 				device->qfrozen_cnt++;
3957 				STAILQ_INSERT_TAIL(&highpowerq,
3958 						   &work_ccb->ccb_h,
3959 						   xpt_links.stqe);
3960 
3961 				splx(ospl);
3962 				continue;
3963 			} else {
3964 				/*
3965 				 * Consume a high power slot while
3966 				 * this ccb runs.
3967 				 */
3968 				num_highpower--;
3969 			}
3970 		}
3971 		devq->active_dev = device;
3972 		cam_ccbq_remove_ccb(&device->ccbq, work_ccb);
3973 
3974 		cam_ccbq_send_ccb(&device->ccbq, work_ccb);
3975 		splx(ospl);
3976 
3977 		devq->send_openings--;
3978 		devq->send_active++;
3979 
3980 		if (device->ccbq.queue.entries > 0)
3981 			xpt_schedule_dev_sendq(bus, device);
3982 
3983 		if (work_ccb && (work_ccb->ccb_h.flags & CAM_DEV_QFREEZE) != 0){
3984 			/*
3985 			 * The client wants to freeze the queue
3986 			 * after this CCB is sent.
3987 			 */
3988 			ospl = splcam();
3989 			device->qfrozen_cnt++;
3990 			splx(ospl);
3991 		}
3992 
3993 		splx(s);
3994 
3995 		/* In Target mode, the peripheral driver knows best... */
3996 		if (work_ccb->ccb_h.func_code == XPT_SCSI_IO) {
3997 			if ((device->inq_flags & SID_CmdQue) != 0
3998 			 && work_ccb->csio.tag_action != CAM_TAG_ACTION_NONE)
3999 				work_ccb->ccb_h.flags |= CAM_TAG_ACTION_VALID;
4000 			else
4001 				/*
4002 				 * Clear this in case of a retried CCB that
4003 				 * failed due to a rejected tag.
4004 				 */
4005 				work_ccb->ccb_h.flags &= ~CAM_TAG_ACTION_VALID;
4006 		}
4007 
4008 		/*
4009 		 * Device queues can be shared among multiple sim instances
4010 		 * that reside on different busses.  Use the SIM in the queue
4011 		 * CCB's path, rather than the one in the bus that was passed
4012 		 * into this function.
4013 		 */
4014 		sim = work_ccb->ccb_h.path->bus->sim;
4015 		(*(sim->sim_action))(sim, work_ccb);
4016 
4017 		ospl = splcam();
4018 		devq->active_dev = NULL;
4019 		splx(ospl);
4020 		/* Raise IPL for possible insertion and test at top of loop */
4021 		s = splsoftcam();
4022 	}
4023 	splx(s);
4024 	s = splcam();
4025 	devq->send_queue.qfrozen_cnt--;
4026 	splx(s);
4027 }
4028 
4029 /*
4030  * This function merges stuff from the slave ccb into the master ccb, while
4031  * keeping important fields in the master ccb constant.
4032  */
4033 void
4034 xpt_merge_ccb(union ccb *master_ccb, union ccb *slave_ccb)
4035 {
4036 	GIANT_REQUIRED;
4037 
4038 	/*
4039 	 * Pull fields that are valid for peripheral drivers to set
4040 	 * into the master CCB along with the CCB "payload".
4041 	 */
4042 	master_ccb->ccb_h.retry_count = slave_ccb->ccb_h.retry_count;
4043 	master_ccb->ccb_h.func_code = slave_ccb->ccb_h.func_code;
4044 	master_ccb->ccb_h.timeout = slave_ccb->ccb_h.timeout;
4045 	master_ccb->ccb_h.flags = slave_ccb->ccb_h.flags;
4046 	bcopy(&(&slave_ccb->ccb_h)[1], &(&master_ccb->ccb_h)[1],
4047 	      sizeof(union ccb) - sizeof(struct ccb_hdr));
4048 }
4049 
4050 void
4051 xpt_setup_ccb(struct ccb_hdr *ccb_h, struct cam_path *path, u_int32_t priority)
4052 {
4053 	GIANT_REQUIRED;
4054 
4055 	CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_setup_ccb\n"));
4056 	ccb_h->pinfo.priority = priority;
4057 	ccb_h->path = path;
4058 	ccb_h->path_id = path->bus->path_id;
4059 	if (path->target)
4060 		ccb_h->target_id = path->target->target_id;
4061 	else
4062 		ccb_h->target_id = CAM_TARGET_WILDCARD;
4063 	if (path->device) {
4064 		ccb_h->target_lun = path->device->lun_id;
4065 		ccb_h->pinfo.generation = ++path->device->ccbq.queue.generation;
4066 	} else {
4067 		ccb_h->target_lun = CAM_TARGET_WILDCARD;
4068 	}
4069 	ccb_h->pinfo.index = CAM_UNQUEUED_INDEX;
4070 	ccb_h->flags = 0;
4071 }
4072 
4073 /* Path manipulation functions */
4074 cam_status
4075 xpt_create_path(struct cam_path **new_path_ptr, struct cam_periph *perph,
4076 		path_id_t path_id, target_id_t target_id, lun_id_t lun_id)
4077 {
4078 	struct	   cam_path *path;
4079 	cam_status status;
4080 
4081 	GIANT_REQUIRED;
4082 
4083 	path = (struct cam_path *)malloc(sizeof(*path), M_CAMXPT, M_NOWAIT);
4084 
4085 	if (path == NULL) {
4086 		status = CAM_RESRC_UNAVAIL;
4087 		return(status);
4088 	}
4089 	status = xpt_compile_path(path, perph, path_id, target_id, lun_id);
4090 	if (status != CAM_REQ_CMP) {
4091 		free(path, M_CAMXPT);
4092 		path = NULL;
4093 	}
4094 	*new_path_ptr = path;
4095 	return (status);
4096 }
4097 
4098 static cam_status
4099 xpt_compile_path(struct cam_path *new_path, struct cam_periph *perph,
4100 		 path_id_t path_id, target_id_t target_id, lun_id_t lun_id)
4101 {
4102 	struct	     cam_eb *bus;
4103 	struct	     cam_et *target;
4104 	struct	     cam_ed *device;
4105 	cam_status   status;
4106 	int	     s;
4107 
4108 	status = CAM_REQ_CMP;	/* Completed without error */
4109 	target = NULL;		/* Wildcarded */
4110 	device = NULL;		/* Wildcarded */
4111 
4112 	/*
4113 	 * We will potentially modify the EDT, so block interrupts
4114 	 * that may attempt to create cam paths.
4115 	 */
4116 	s = splcam();
4117 	bus = xpt_find_bus(path_id);
4118 	if (bus == NULL) {
4119 		status = CAM_PATH_INVALID;
4120 	} else {
4121 		target = xpt_find_target(bus, target_id);
4122 		if (target == NULL) {
4123 			/* Create one */
4124 			struct cam_et *new_target;
4125 
4126 			new_target = xpt_alloc_target(bus, target_id);
4127 			if (new_target == NULL) {
4128 				status = CAM_RESRC_UNAVAIL;
4129 			} else {
4130 				target = new_target;
4131 			}
4132 		}
4133 		if (target != NULL) {
4134 			device = xpt_find_device(target, lun_id);
4135 			if (device == NULL) {
4136 				/* Create one */
4137 				struct cam_ed *new_device;
4138 
4139 				new_device = xpt_alloc_device(bus,
4140 							      target,
4141 							      lun_id);
4142 				if (new_device == NULL) {
4143 					status = CAM_RESRC_UNAVAIL;
4144 				} else {
4145 					device = new_device;
4146 				}
4147 			}
4148 		}
4149 	}
4150 	splx(s);
4151 
4152 	/*
4153 	 * Only touch the user's data if we are successful.
4154 	 */
4155 	if (status == CAM_REQ_CMP) {
4156 		new_path->periph = perph;
4157 		new_path->bus = bus;
4158 		new_path->target = target;
4159 		new_path->device = device;
4160 		CAM_DEBUG(new_path, CAM_DEBUG_TRACE, ("xpt_compile_path\n"));
4161 	} else {
4162 		if (device != NULL)
4163 			xpt_release_device(bus, target, device);
4164 		if (target != NULL)
4165 			xpt_release_target(bus, target);
4166 		if (bus != NULL)
4167 			xpt_release_bus(bus);
4168 	}
4169 	return (status);
4170 }
4171 
4172 static void
4173 xpt_release_path(struct cam_path *path)
4174 {
4175 	CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_release_path\n"));
4176 	if (path->device != NULL) {
4177 		xpt_release_device(path->bus, path->target, path->device);
4178 		path->device = NULL;
4179 	}
4180 	if (path->target != NULL) {
4181 		xpt_release_target(path->bus, path->target);
4182 		path->target = NULL;
4183 	}
4184 	if (path->bus != NULL) {
4185 		xpt_release_bus(path->bus);
4186 		path->bus = NULL;
4187 	}
4188 }
4189 
4190 void
4191 xpt_free_path(struct cam_path *path)
4192 {
4193 	GIANT_REQUIRED;
4194 
4195 	CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_free_path\n"));
4196 	xpt_release_path(path);
4197 	free(path, M_CAMXPT);
4198 }
4199 
4200 
4201 /*
4202  * Return -1 for failure, 0 for exact match, 1 for match with wildcards
4203  * in path1, 2 for match with wildcards in path2.
4204  */
4205 int
4206 xpt_path_comp(struct cam_path *path1, struct cam_path *path2)
4207 {
4208 	GIANT_REQUIRED;
4209 
4210 	int retval = 0;
4211 
4212 	if (path1->bus != path2->bus) {
4213 		if (path1->bus->path_id == CAM_BUS_WILDCARD)
4214 			retval = 1;
4215 		else if (path2->bus->path_id == CAM_BUS_WILDCARD)
4216 			retval = 2;
4217 		else
4218 			return (-1);
4219 	}
4220 	if (path1->target != path2->target) {
4221 		if (path1->target->target_id == CAM_TARGET_WILDCARD) {
4222 			if (retval == 0)
4223 				retval = 1;
4224 		} else if (path2->target->target_id == CAM_TARGET_WILDCARD)
4225 			retval = 2;
4226 		else
4227 			return (-1);
4228 	}
4229 	if (path1->device != path2->device) {
4230 		if (path1->device->lun_id == CAM_LUN_WILDCARD) {
4231 			if (retval == 0)
4232 				retval = 1;
4233 		} else if (path2->device->lun_id == CAM_LUN_WILDCARD)
4234 			retval = 2;
4235 		else
4236 			return (-1);
4237 	}
4238 	return (retval);
4239 }
4240 
4241 void
4242 xpt_print_path(struct cam_path *path)
4243 {
4244 	GIANT_REQUIRED;
4245 
4246 	if (path == NULL)
4247 		printf("(nopath): ");
4248 	else {
4249 		if (path->periph != NULL)
4250 			printf("(%s%d:", path->periph->periph_name,
4251 			       path->periph->unit_number);
4252 		else
4253 			printf("(noperiph:");
4254 
4255 		if (path->bus != NULL)
4256 			printf("%s%d:%d:", path->bus->sim->sim_name,
4257 			       path->bus->sim->unit_number,
4258 			       path->bus->sim->bus_id);
4259 		else
4260 			printf("nobus:");
4261 
4262 		if (path->target != NULL)
4263 			printf("%d:", path->target->target_id);
4264 		else
4265 			printf("X:");
4266 
4267 		if (path->device != NULL)
4268 			printf("%d): ", path->device->lun_id);
4269 		else
4270 			printf("X): ");
4271 	}
4272 }
4273 
4274 int
4275 xpt_path_string(struct cam_path *path, char *str, size_t str_len)
4276 {
4277 	struct sbuf sb;
4278 
4279 	GIANT_REQUIRED;
4280 
4281 	sbuf_new(&sb, str, str_len, 0);
4282 
4283 	if (path == NULL)
4284 		sbuf_printf(&sb, "(nopath): ");
4285 	else {
4286 		if (path->periph != NULL)
4287 			sbuf_printf(&sb, "(%s%d:", path->periph->periph_name,
4288 				    path->periph->unit_number);
4289 		else
4290 			sbuf_printf(&sb, "(noperiph:");
4291 
4292 		if (path->bus != NULL)
4293 			sbuf_printf(&sb, "%s%d:%d:", path->bus->sim->sim_name,
4294 				    path->bus->sim->unit_number,
4295 				    path->bus->sim->bus_id);
4296 		else
4297 			sbuf_printf(&sb, "nobus:");
4298 
4299 		if (path->target != NULL)
4300 			sbuf_printf(&sb, "%d:", path->target->target_id);
4301 		else
4302 			sbuf_printf(&sb, "X:");
4303 
4304 		if (path->device != NULL)
4305 			sbuf_printf(&sb, "%d): ", path->device->lun_id);
4306 		else
4307 			sbuf_printf(&sb, "X): ");
4308 	}
4309 	sbuf_finish(&sb);
4310 
4311 	return(sbuf_len(&sb));
4312 }
4313 
4314 path_id_t
4315 xpt_path_path_id(struct cam_path *path)
4316 {
4317 	GIANT_REQUIRED;
4318 
4319 	return(path->bus->path_id);
4320 }
4321 
4322 target_id_t
4323 xpt_path_target_id(struct cam_path *path)
4324 {
4325 	GIANT_REQUIRED;
4326 
4327 	if (path->target != NULL)
4328 		return (path->target->target_id);
4329 	else
4330 		return (CAM_TARGET_WILDCARD);
4331 }
4332 
4333 lun_id_t
4334 xpt_path_lun_id(struct cam_path *path)
4335 {
4336 	GIANT_REQUIRED;
4337 
4338 	if (path->device != NULL)
4339 		return (path->device->lun_id);
4340 	else
4341 		return (CAM_LUN_WILDCARD);
4342 }
4343 
4344 struct cam_sim *
4345 xpt_path_sim(struct cam_path *path)
4346 {
4347 	GIANT_REQUIRED;
4348 
4349 	return (path->bus->sim);
4350 }
4351 
4352 struct cam_periph*
4353 xpt_path_periph(struct cam_path *path)
4354 {
4355 	GIANT_REQUIRED;
4356 
4357 	return (path->periph);
4358 }
4359 
4360 /*
4361  * Release a CAM control block for the caller.  Remit the cost of the structure
4362  * to the device referenced by the path.  If the this device had no 'credits'
4363  * and peripheral drivers have registered async callbacks for this notification
4364  * call them now.
4365  */
4366 void
4367 xpt_release_ccb(union ccb *free_ccb)
4368 {
4369 	int	 s;
4370 	struct	 cam_path *path;
4371 	struct	 cam_ed *device;
4372 	struct	 cam_eb *bus;
4373 
4374 	GIANT_REQUIRED;
4375 
4376 	CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_release_ccb\n"));
4377 	path = free_ccb->ccb_h.path;
4378 	device = path->device;
4379 	bus = path->bus;
4380 	s = splsoftcam();
4381 	cam_ccbq_release_opening(&device->ccbq);
4382 	if (xpt_ccb_count > xpt_max_ccbs) {
4383 		xpt_free_ccb(free_ccb);
4384 		xpt_ccb_count--;
4385 	} else {
4386 		SLIST_INSERT_HEAD(&ccb_freeq, &free_ccb->ccb_h, xpt_links.sle);
4387 	}
4388 	if (bus->sim->devq == NULL) {
4389 		splx(s);
4390 		return;
4391 	}
4392 	bus->sim->devq->alloc_openings++;
4393 	bus->sim->devq->alloc_active--;
4394 	/* XXX Turn this into an inline function - xpt_run_device?? */
4395 	if ((device_is_alloc_queued(device) == 0)
4396 	 && (device->drvq.entries > 0)) {
4397 		xpt_schedule_dev_allocq(bus, device);
4398 	}
4399 	splx(s);
4400 	if (dev_allocq_is_runnable(bus->sim->devq))
4401 		xpt_run_dev_allocq(bus);
4402 }
4403 
4404 /* Functions accessed by SIM drivers */
4405 
4406 /*
4407  * A sim structure, listing the SIM entry points and instance
4408  * identification info is passed to xpt_bus_register to hook the SIM
4409  * into the CAM framework.  xpt_bus_register creates a cam_eb entry
4410  * for this new bus and places it in the array of busses and assigns
4411  * it a path_id.  The path_id may be influenced by "hard wiring"
4412  * information specified by the user.  Once interrupt services are
4413  * availible, the bus will be probed.
4414  */
4415 int32_t
4416 xpt_bus_register(struct cam_sim *sim, u_int32_t bus)
4417 {
4418 	struct cam_eb *new_bus;
4419 	struct cam_eb *old_bus;
4420 	struct ccb_pathinq cpi;
4421 	int s;
4422 
4423 	GIANT_REQUIRED;
4424 
4425 	sim->bus_id = bus;
4426 	new_bus = (struct cam_eb *)malloc(sizeof(*new_bus),
4427 					  M_CAMXPT, M_NOWAIT);
4428 	if (new_bus == NULL) {
4429 		/* Couldn't satisfy request */
4430 		return (CAM_RESRC_UNAVAIL);
4431 	}
4432 
4433 	if (strcmp(sim->sim_name, "xpt") != 0) {
4434 
4435 		sim->path_id =
4436 		    xptpathid(sim->sim_name, sim->unit_number, sim->bus_id);
4437 	}
4438 
4439 	TAILQ_INIT(&new_bus->et_entries);
4440 	new_bus->path_id = sim->path_id;
4441 	new_bus->sim = sim;
4442 	timevalclear(&new_bus->last_reset);
4443 	new_bus->flags = 0;
4444 	new_bus->refcount = 1;	/* Held until a bus_deregister event */
4445 	new_bus->generation = 0;
4446 	s = splcam();
4447 	old_bus = TAILQ_FIRST(&xpt_busses);
4448 	while (old_bus != NULL
4449 	    && old_bus->path_id < new_bus->path_id)
4450 		old_bus = TAILQ_NEXT(old_bus, links);
4451 	if (old_bus != NULL)
4452 		TAILQ_INSERT_BEFORE(old_bus, new_bus, links);
4453 	else
4454 		TAILQ_INSERT_TAIL(&xpt_busses, new_bus, links);
4455 	bus_generation++;
4456 	splx(s);
4457 
4458 	/* Notify interested parties */
4459 	if (sim->path_id != CAM_XPT_PATH_ID) {
4460 		struct cam_path path;
4461 
4462 		xpt_compile_path(&path, /*periph*/NULL, sim->path_id,
4463 			         CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
4464 		xpt_setup_ccb(&cpi.ccb_h, &path, /*priority*/1);
4465 		cpi.ccb_h.func_code = XPT_PATH_INQ;
4466 		xpt_action((union ccb *)&cpi);
4467 		xpt_async(AC_PATH_REGISTERED, &path, &cpi);
4468 		xpt_release_path(&path);
4469 	}
4470 	return (CAM_SUCCESS);
4471 }
4472 
4473 int32_t
4474 xpt_bus_deregister(path_id_t pathid)
4475 {
4476 	struct cam_path bus_path;
4477 	struct cam_ed *device;
4478 	struct cam_ed_qinfo *qinfo;
4479 	struct cam_devq *devq;
4480 	struct cam_periph *periph;
4481 	struct cam_sim *ccbsim;
4482 	union ccb *work_ccb;
4483 	cam_status status;
4484 
4485 	GIANT_REQUIRED;
4486 
4487 	status = xpt_compile_path(&bus_path, NULL, pathid,
4488 				  CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
4489 	if (status != CAM_REQ_CMP)
4490 		return (status);
4491 
4492 	xpt_async(AC_LOST_DEVICE, &bus_path, NULL);
4493 	xpt_async(AC_PATH_DEREGISTERED, &bus_path, NULL);
4494 
4495 	/* The SIM may be gone, so use a dummy SIM for any stray operations. */
4496 	devq = bus_path.bus->sim->devq;
4497 	bus_path.bus->sim = &cam_dead_sim;
4498 
4499 	/* Execute any pending operations now. */
4500 	while ((qinfo = (struct cam_ed_qinfo *)camq_remove(&devq->send_queue,
4501 	    CAMQ_HEAD)) != NULL ||
4502 	    (qinfo = (struct cam_ed_qinfo *)camq_remove(&devq->alloc_queue,
4503 	    CAMQ_HEAD)) != NULL) {
4504 		do {
4505 			device = qinfo->device;
4506 			work_ccb = cam_ccbq_peek_ccb(&device->ccbq, CAMQ_HEAD);
4507 			if (work_ccb != NULL) {
4508 				devq->active_dev = device;
4509 				cam_ccbq_remove_ccb(&device->ccbq, work_ccb);
4510 				cam_ccbq_send_ccb(&device->ccbq, work_ccb);
4511 				ccbsim = work_ccb->ccb_h.path->bus->sim;
4512 				(*(ccbsim->sim_action))(ccbsim, work_ccb);
4513 			}
4514 
4515 			periph = (struct cam_periph *)camq_remove(&device->drvq,
4516 			    CAMQ_HEAD);
4517 			if (periph != NULL)
4518 				xpt_schedule(periph, periph->pinfo.priority);
4519 		} while (work_ccb != NULL || periph != NULL);
4520 	}
4521 
4522 	/* Make sure all completed CCBs are processed. */
4523 	while (!TAILQ_EMPTY(&cam_bioq)) {
4524 		camisr(&cam_bioq);
4525 
4526 		/* Repeat the async's for the benefit of any new devices. */
4527 		xpt_async(AC_LOST_DEVICE, &bus_path, NULL);
4528 		xpt_async(AC_PATH_DEREGISTERED, &bus_path, NULL);
4529 	}
4530 
4531 	/* Release the reference count held while registered. */
4532 	xpt_release_bus(bus_path.bus);
4533 	xpt_release_path(&bus_path);
4534 
4535 	/* Recheck for more completed CCBs. */
4536 	while (!TAILQ_EMPTY(&cam_bioq))
4537 		camisr(&cam_bioq);
4538 
4539 	return (CAM_REQ_CMP);
4540 }
4541 
4542 static path_id_t
4543 xptnextfreepathid(void)
4544 {
4545 	struct cam_eb *bus;
4546 	path_id_t pathid;
4547 	const char *strval;
4548 
4549 	pathid = 0;
4550 	bus = TAILQ_FIRST(&xpt_busses);
4551 retry:
4552 	/* Find an unoccupied pathid */
4553 	while (bus != NULL
4554 	    && bus->path_id <= pathid) {
4555 		if (bus->path_id == pathid)
4556 			pathid++;
4557 		bus = TAILQ_NEXT(bus, links);
4558 	}
4559 
4560 	/*
4561 	 * Ensure that this pathid is not reserved for
4562 	 * a bus that may be registered in the future.
4563 	 */
4564 	if (resource_string_value("scbus", pathid, "at", &strval) == 0) {
4565 		++pathid;
4566 		/* Start the search over */
4567 		goto retry;
4568 	}
4569 	return (pathid);
4570 }
4571 
4572 static path_id_t
4573 xptpathid(const char *sim_name, int sim_unit, int sim_bus)
4574 {
4575 	path_id_t pathid;
4576 	int i, dunit, val;
4577 	char buf[32];
4578 	const char *dname;
4579 
4580 	pathid = CAM_XPT_PATH_ID;
4581 	snprintf(buf, sizeof(buf), "%s%d", sim_name, sim_unit);
4582 	i = 0;
4583 	while ((resource_find_match(&i, &dname, &dunit, "at", buf)) == 0) {
4584 		if (strcmp(dname, "scbus")) {
4585 			/* Avoid a bit of foot shooting. */
4586 			continue;
4587 		}
4588 		if (dunit < 0)		/* unwired?! */
4589 			continue;
4590 		if (resource_int_value("scbus", dunit, "bus", &val) == 0) {
4591 			if (sim_bus == val) {
4592 				pathid = dunit;
4593 				break;
4594 			}
4595 		} else if (sim_bus == 0) {
4596 			/* Unspecified matches bus 0 */
4597 			pathid = dunit;
4598 			break;
4599 		} else {
4600 			printf("Ambiguous scbus configuration for %s%d "
4601 			       "bus %d, cannot wire down.  The kernel "
4602 			       "config entry for scbus%d should "
4603 			       "specify a controller bus.\n"
4604 			       "Scbus will be assigned dynamically.\n",
4605 			       sim_name, sim_unit, sim_bus, dunit);
4606 			break;
4607 		}
4608 	}
4609 
4610 	if (pathid == CAM_XPT_PATH_ID)
4611 		pathid = xptnextfreepathid();
4612 	return (pathid);
4613 }
4614 
4615 void
4616 xpt_async(u_int32_t async_code, struct cam_path *path, void *async_arg)
4617 {
4618 	struct cam_eb *bus;
4619 	struct cam_et *target, *next_target;
4620 	struct cam_ed *device, *next_device;
4621 	int s;
4622 
4623 	GIANT_REQUIRED;
4624 
4625 	CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_async\n"));
4626 
4627 	/*
4628 	 * Most async events come from a CAM interrupt context.  In
4629 	 * a few cases, the error recovery code at the peripheral layer,
4630 	 * which may run from our SWI or a process context, may signal
4631 	 * deferred events with a call to xpt_async. Ensure async
4632 	 * notifications are serialized by blocking cam interrupts.
4633 	 */
4634 	s = splcam();
4635 
4636 	bus = path->bus;
4637 
4638 	if (async_code == AC_BUS_RESET) {
4639 		int s;
4640 
4641 		s = splclock();
4642 		/* Update our notion of when the last reset occurred */
4643 		microtime(&bus->last_reset);
4644 		splx(s);
4645 	}
4646 
4647 	for (target = TAILQ_FIRST(&bus->et_entries);
4648 	     target != NULL;
4649 	     target = next_target) {
4650 
4651 		next_target = TAILQ_NEXT(target, links);
4652 
4653 		if (path->target != target
4654 		 && path->target->target_id != CAM_TARGET_WILDCARD
4655 		 && target->target_id != CAM_TARGET_WILDCARD)
4656 			continue;
4657 
4658 		if (async_code == AC_SENT_BDR) {
4659 			int s;
4660 
4661 			/* Update our notion of when the last reset occurred */
4662 			s = splclock();
4663 			microtime(&path->target->last_reset);
4664 			splx(s);
4665 		}
4666 
4667 		for (device = TAILQ_FIRST(&target->ed_entries);
4668 		     device != NULL;
4669 		     device = next_device) {
4670 
4671 			next_device = TAILQ_NEXT(device, links);
4672 
4673 			if (path->device != device
4674 			 && path->device->lun_id != CAM_LUN_WILDCARD
4675 			 && device->lun_id != CAM_LUN_WILDCARD)
4676 				continue;
4677 
4678 			xpt_dev_async(async_code, bus, target,
4679 				      device, async_arg);
4680 
4681 			xpt_async_bcast(&device->asyncs, async_code,
4682 					path, async_arg);
4683 		}
4684 	}
4685 
4686 	/*
4687 	 * If this wasn't a fully wildcarded async, tell all
4688 	 * clients that want all async events.
4689 	 */
4690 	if (bus != xpt_periph->path->bus)
4691 		xpt_async_bcast(&xpt_periph->path->device->asyncs, async_code,
4692 				path, async_arg);
4693 	splx(s);
4694 }
4695 
4696 static void
4697 xpt_async_bcast(struct async_list *async_head,
4698 		u_int32_t async_code,
4699 		struct cam_path *path, void *async_arg)
4700 {
4701 	struct async_node *cur_entry;
4702 
4703 	cur_entry = SLIST_FIRST(async_head);
4704 	while (cur_entry != NULL) {
4705 		struct async_node *next_entry;
4706 		/*
4707 		 * Grab the next list entry before we call the current
4708 		 * entry's callback.  This is because the callback function
4709 		 * can delete its async callback entry.
4710 		 */
4711 		next_entry = SLIST_NEXT(cur_entry, links);
4712 		if ((cur_entry->event_enable & async_code) != 0)
4713 			cur_entry->callback(cur_entry->callback_arg,
4714 					    async_code, path,
4715 					    async_arg);
4716 		cur_entry = next_entry;
4717 	}
4718 }
4719 
4720 /*
4721  * Handle any per-device event notifications that require action by the XPT.
4722  */
4723 static void
4724 xpt_dev_async(u_int32_t async_code, struct cam_eb *bus, struct cam_et *target,
4725 	      struct cam_ed *device, void *async_arg)
4726 {
4727 	cam_status status;
4728 	struct cam_path newpath;
4729 
4730 	/*
4731 	 * We only need to handle events for real devices.
4732 	 */
4733 	if (target->target_id == CAM_TARGET_WILDCARD
4734 	 || device->lun_id == CAM_LUN_WILDCARD)
4735 		return;
4736 
4737 	/*
4738 	 * We need our own path with wildcards expanded to
4739 	 * handle certain types of events.
4740 	 */
4741 	if ((async_code == AC_SENT_BDR)
4742 	 || (async_code == AC_BUS_RESET)
4743 	 || (async_code == AC_INQ_CHANGED))
4744 		status = xpt_compile_path(&newpath, NULL,
4745 					  bus->path_id,
4746 					  target->target_id,
4747 					  device->lun_id);
4748 	else
4749 		status = CAM_REQ_CMP_ERR;
4750 
4751 	if (status == CAM_REQ_CMP) {
4752 
4753 		/*
4754 		 * Allow transfer negotiation to occur in a
4755 		 * tag free environment.
4756 		 */
4757 		if (async_code == AC_SENT_BDR
4758 		 || async_code == AC_BUS_RESET)
4759 			xpt_toggle_tags(&newpath);
4760 
4761 		if (async_code == AC_INQ_CHANGED) {
4762 			/*
4763 			 * We've sent a start unit command, or
4764 			 * something similar to a device that
4765 			 * may have caused its inquiry data to
4766 			 * change. So we re-scan the device to
4767 			 * refresh the inquiry data for it.
4768 			 */
4769 			xpt_scan_lun(newpath.periph, &newpath,
4770 				     CAM_EXPECT_INQ_CHANGE, NULL);
4771 		}
4772 		xpt_release_path(&newpath);
4773 	} else if (async_code == AC_LOST_DEVICE) {
4774 		device->flags |= CAM_DEV_UNCONFIGURED;
4775 	} else if (async_code == AC_TRANSFER_NEG) {
4776 		struct ccb_trans_settings *settings;
4777 
4778 		settings = (struct ccb_trans_settings *)async_arg;
4779 		xpt_set_transfer_settings(settings, device,
4780 					  /*async_update*/TRUE);
4781 	}
4782 }
4783 
4784 u_int32_t
4785 xpt_freeze_devq(struct cam_path *path, u_int count)
4786 {
4787 	int s;
4788 	struct ccb_hdr *ccbh;
4789 
4790 	GIANT_REQUIRED;
4791 
4792 	s = splcam();
4793 	path->device->qfrozen_cnt += count;
4794 
4795 	/*
4796 	 * Mark the last CCB in the queue as needing
4797 	 * to be requeued if the driver hasn't
4798 	 * changed it's state yet.  This fixes a race
4799 	 * where a ccb is just about to be queued to
4800 	 * a controller driver when it's interrupt routine
4801 	 * freezes the queue.  To completly close the
4802 	 * hole, controller drives must check to see
4803 	 * if a ccb's status is still CAM_REQ_INPROG
4804 	 * under spl protection just before they queue
4805 	 * the CCB.  See ahc_action/ahc_freeze_devq for
4806 	 * an example.
4807 	 */
4808 	ccbh = TAILQ_LAST(&path->device->ccbq.active_ccbs, ccb_hdr_tailq);
4809 	if (ccbh && ccbh->status == CAM_REQ_INPROG)
4810 		ccbh->status = CAM_REQUEUE_REQ;
4811 	splx(s);
4812 	return (path->device->qfrozen_cnt);
4813 }
4814 
4815 u_int32_t
4816 xpt_freeze_simq(struct cam_sim *sim, u_int count)
4817 {
4818 	GIANT_REQUIRED;
4819 
4820 	sim->devq->send_queue.qfrozen_cnt += count;
4821 	if (sim->devq->active_dev != NULL) {
4822 		struct ccb_hdr *ccbh;
4823 
4824 		ccbh = TAILQ_LAST(&sim->devq->active_dev->ccbq.active_ccbs,
4825 				  ccb_hdr_tailq);
4826 		if (ccbh && ccbh->status == CAM_REQ_INPROG)
4827 			ccbh->status = CAM_REQUEUE_REQ;
4828 	}
4829 	return (sim->devq->send_queue.qfrozen_cnt);
4830 }
4831 
4832 static void
4833 xpt_release_devq_timeout(void *arg)
4834 {
4835 	struct cam_ed *device;
4836 
4837 	device = (struct cam_ed *)arg;
4838 
4839 	xpt_release_devq_device(device, /*count*/1, /*run_queue*/TRUE);
4840 }
4841 
4842 void
4843 xpt_release_devq(struct cam_path *path, u_int count, int run_queue)
4844 {
4845 	GIANT_REQUIRED;
4846 
4847 	xpt_release_devq_device(path->device, count, run_queue);
4848 }
4849 
4850 static void
4851 xpt_release_devq_device(struct cam_ed *dev, u_int count, int run_queue)
4852 {
4853 	int	rundevq;
4854 	int	s0, s1;
4855 
4856 	rundevq = 0;
4857 	s0 = splsoftcam();
4858 	s1 = splcam();
4859 	if (dev->qfrozen_cnt > 0) {
4860 
4861 		count = (count > dev->qfrozen_cnt) ? dev->qfrozen_cnt : count;
4862 		dev->qfrozen_cnt -= count;
4863 		if (dev->qfrozen_cnt == 0) {
4864 
4865 			/*
4866 			 * No longer need to wait for a successful
4867 			 * command completion.
4868 			 */
4869 			dev->flags &= ~CAM_DEV_REL_ON_COMPLETE;
4870 
4871 			/*
4872 			 * Remove any timeouts that might be scheduled
4873 			 * to release this queue.
4874 			 */
4875 			if ((dev->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) {
4876 				untimeout(xpt_release_devq_timeout, dev,
4877 					  dev->c_handle);
4878 				dev->flags &= ~CAM_DEV_REL_TIMEOUT_PENDING;
4879 			}
4880 
4881 			/*
4882 			 * Now that we are unfrozen schedule the
4883 			 * device so any pending transactions are
4884 			 * run.
4885 			 */
4886 			if ((dev->ccbq.queue.entries > 0)
4887 			 && (xpt_schedule_dev_sendq(dev->target->bus, dev))
4888 			 && (run_queue != 0)) {
4889 				rundevq = 1;
4890 			}
4891 		}
4892 	}
4893 	splx(s1);
4894 	if (rundevq != 0)
4895 		xpt_run_dev_sendq(dev->target->bus);
4896 	splx(s0);
4897 }
4898 
4899 void
4900 xpt_release_simq(struct cam_sim *sim, int run_queue)
4901 {
4902 	int	s;
4903 	struct	camq *sendq;
4904 
4905 	GIANT_REQUIRED;
4906 
4907 	sendq = &(sim->devq->send_queue);
4908 	s = splcam();
4909 	if (sendq->qfrozen_cnt > 0) {
4910 
4911 		sendq->qfrozen_cnt--;
4912 		if (sendq->qfrozen_cnt == 0) {
4913 			struct cam_eb *bus;
4914 
4915 			/*
4916 			 * If there is a timeout scheduled to release this
4917 			 * sim queue, remove it.  The queue frozen count is
4918 			 * already at 0.
4919 			 */
4920 			if ((sim->flags & CAM_SIM_REL_TIMEOUT_PENDING) != 0){
4921 				untimeout(xpt_release_simq_timeout, sim,
4922 					  sim->c_handle);
4923 				sim->flags &= ~CAM_SIM_REL_TIMEOUT_PENDING;
4924 			}
4925 			bus = xpt_find_bus(sim->path_id);
4926 			splx(s);
4927 
4928 			if (run_queue) {
4929 				/*
4930 				 * Now that we are unfrozen run the send queue.
4931 				 */
4932 				xpt_run_dev_sendq(bus);
4933 			}
4934 			xpt_release_bus(bus);
4935 		} else
4936 			splx(s);
4937 	} else
4938 		splx(s);
4939 }
4940 
4941 static void
4942 xpt_release_simq_timeout(void *arg)
4943 {
4944 	struct cam_sim *sim;
4945 
4946 	sim = (struct cam_sim *)arg;
4947 	xpt_release_simq(sim, /* run_queue */ TRUE);
4948 }
4949 
4950 void
4951 xpt_done(union ccb *done_ccb)
4952 {
4953 	int s;
4954 
4955 	s = splcam();
4956 
4957 	CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xpt_done\n"));
4958 	if ((done_ccb->ccb_h.func_code & XPT_FC_QUEUED) != 0) {
4959 		/*
4960 		 * Queue up the request for handling by our SWI handler
4961 		 * any of the "non-immediate" type of ccbs.
4962 		 */
4963 		switch (done_ccb->ccb_h.path->periph->type) {
4964 		case CAM_PERIPH_BIO:
4965 			mtx_lock(&cam_bioq_lock);
4966 			TAILQ_INSERT_TAIL(&cam_bioq, &done_ccb->ccb_h,
4967 					  sim_links.tqe);
4968 			done_ccb->ccb_h.pinfo.index = CAM_DONEQ_INDEX;
4969 			mtx_unlock(&cam_bioq_lock);
4970 			swi_sched(cambio_ih, 0);
4971 			break;
4972 		default:
4973 			panic("unknown periph type %d",
4974 			    done_ccb->ccb_h.path->periph->type);
4975 		}
4976 	}
4977 	splx(s);
4978 }
4979 
4980 union ccb *
4981 xpt_alloc_ccb()
4982 {
4983 	union ccb *new_ccb;
4984 
4985 	GIANT_REQUIRED;
4986 
4987 	new_ccb = malloc(sizeof(*new_ccb), M_CAMXPT, M_WAITOK);
4988 	return (new_ccb);
4989 }
4990 
4991 union ccb *
4992 xpt_alloc_ccb_nowait()
4993 {
4994 	union ccb *new_ccb;
4995 
4996 	GIANT_REQUIRED;
4997 
4998 	new_ccb = malloc(sizeof(*new_ccb), M_CAMXPT, M_NOWAIT);
4999 	return (new_ccb);
5000 }
5001 
5002 void
5003 xpt_free_ccb(union ccb *free_ccb)
5004 {
5005 	free(free_ccb, M_CAMXPT);
5006 }
5007 
5008 
5009 
5010 /* Private XPT functions */
5011 
5012 /*
5013  * Get a CAM control block for the caller. Charge the structure to the device
5014  * referenced by the path.  If the this device has no 'credits' then the
5015  * device already has the maximum number of outstanding operations under way
5016  * and we return NULL. If we don't have sufficient resources to allocate more
5017  * ccbs, we also return NULL.
5018  */
5019 static union ccb *
5020 xpt_get_ccb(struct cam_ed *device)
5021 {
5022 	union ccb *new_ccb;
5023 	int s;
5024 
5025 	s = splsoftcam();
5026 	if ((new_ccb = (union ccb *)SLIST_FIRST(&ccb_freeq)) == NULL) {
5027 		new_ccb = xpt_alloc_ccb_nowait();
5028                 if (new_ccb == NULL) {
5029 			splx(s);
5030 			return (NULL);
5031 		}
5032 		callout_handle_init(&new_ccb->ccb_h.timeout_ch);
5033 		SLIST_INSERT_HEAD(&ccb_freeq, &new_ccb->ccb_h,
5034 				  xpt_links.sle);
5035 		xpt_ccb_count++;
5036 	}
5037 	cam_ccbq_take_opening(&device->ccbq);
5038 	SLIST_REMOVE_HEAD(&ccb_freeq, xpt_links.sle);
5039 	splx(s);
5040 	return (new_ccb);
5041 }
5042 
5043 static void
5044 xpt_release_bus(struct cam_eb *bus)
5045 {
5046 	int s;
5047 
5048 	s = splcam();
5049 	if ((--bus->refcount == 0)
5050 	 && (TAILQ_FIRST(&bus->et_entries) == NULL)) {
5051 		TAILQ_REMOVE(&xpt_busses, bus, links);
5052 		bus_generation++;
5053 		splx(s);
5054 		free(bus, M_CAMXPT);
5055 	} else
5056 		splx(s);
5057 }
5058 
5059 static struct cam_et *
5060 xpt_alloc_target(struct cam_eb *bus, target_id_t target_id)
5061 {
5062 	struct cam_et *target;
5063 
5064 	target = (struct cam_et *)malloc(sizeof(*target), M_CAMXPT, M_NOWAIT);
5065 	if (target != NULL) {
5066 		struct cam_et *cur_target;
5067 
5068 		TAILQ_INIT(&target->ed_entries);
5069 		target->bus = bus;
5070 		target->target_id = target_id;
5071 		target->refcount = 1;
5072 		target->generation = 0;
5073 		timevalclear(&target->last_reset);
5074 		/*
5075 		 * Hold a reference to our parent bus so it
5076 		 * will not go away before we do.
5077 		 */
5078 		bus->refcount++;
5079 
5080 		/* Insertion sort into our bus's target list */
5081 		cur_target = TAILQ_FIRST(&bus->et_entries);
5082 		while (cur_target != NULL && cur_target->target_id < target_id)
5083 			cur_target = TAILQ_NEXT(cur_target, links);
5084 
5085 		if (cur_target != NULL) {
5086 			TAILQ_INSERT_BEFORE(cur_target, target, links);
5087 		} else {
5088 			TAILQ_INSERT_TAIL(&bus->et_entries, target, links);
5089 		}
5090 		bus->generation++;
5091 	}
5092 	return (target);
5093 }
5094 
5095 static void
5096 xpt_release_target(struct cam_eb *bus, struct cam_et *target)
5097 {
5098 	int s;
5099 
5100 	s = splcam();
5101 	if ((--target->refcount == 0)
5102 	 && (TAILQ_FIRST(&target->ed_entries) == NULL)) {
5103 		TAILQ_REMOVE(&bus->et_entries, target, links);
5104 		bus->generation++;
5105 		splx(s);
5106 		free(target, M_CAMXPT);
5107 		xpt_release_bus(bus);
5108 	} else
5109 		splx(s);
5110 }
5111 
5112 static struct cam_ed *
5113 xpt_alloc_device(struct cam_eb *bus, struct cam_et *target, lun_id_t lun_id)
5114 {
5115 #ifdef CAM_NEW_TRAN_CODE
5116 	struct	   cam_path path;
5117 #endif /* CAM_NEW_TRAN_CODE */
5118 	struct	   cam_ed *device;
5119 	struct	   cam_devq *devq;
5120 	cam_status status;
5121 
5122 	if (SIM_DEAD(bus->sim))
5123 		return (NULL);
5124 
5125 	/* Make space for us in the device queue on our bus */
5126 	devq = bus->sim->devq;
5127 	status = cam_devq_resize(devq, devq->alloc_queue.array_size + 1);
5128 
5129 	if (status != CAM_REQ_CMP) {
5130 		device = NULL;
5131 	} else {
5132 		device = (struct cam_ed *)malloc(sizeof(*device),
5133 						 M_CAMXPT, M_NOWAIT);
5134 	}
5135 
5136 	if (device != NULL) {
5137 		struct cam_ed *cur_device;
5138 
5139 		cam_init_pinfo(&device->alloc_ccb_entry.pinfo);
5140 		device->alloc_ccb_entry.device = device;
5141 		cam_init_pinfo(&device->send_ccb_entry.pinfo);
5142 		device->send_ccb_entry.device = device;
5143 		device->target = target;
5144 		device->lun_id = lun_id;
5145 		/* Initialize our queues */
5146 		if (camq_init(&device->drvq, 0) != 0) {
5147 			free(device, M_CAMXPT);
5148 			return (NULL);
5149 		}
5150 		if (cam_ccbq_init(&device->ccbq,
5151 				  bus->sim->max_dev_openings) != 0) {
5152 			camq_fini(&device->drvq);
5153 			free(device, M_CAMXPT);
5154 			return (NULL);
5155 		}
5156 		SLIST_INIT(&device->asyncs);
5157 		SLIST_INIT(&device->periphs);
5158 		device->generation = 0;
5159 		device->owner = NULL;
5160 		/*
5161 		 * Take the default quirk entry until we have inquiry
5162 		 * data and can determine a better quirk to use.
5163 		 */
5164 		device->quirk = &xpt_quirk_table[xpt_quirk_table_size - 1];
5165 		bzero(&device->inq_data, sizeof(device->inq_data));
5166 		device->inq_flags = 0;
5167 		device->queue_flags = 0;
5168 		device->serial_num = NULL;
5169 		device->serial_num_len = 0;
5170 		device->qfrozen_cnt = 0;
5171 		device->flags = CAM_DEV_UNCONFIGURED;
5172 		device->tag_delay_count = 0;
5173 		device->tag_saved_openings = 0;
5174 		device->refcount = 1;
5175 		callout_handle_init(&device->c_handle);
5176 
5177 		/*
5178 		 * Hold a reference to our parent target so it
5179 		 * will not go away before we do.
5180 		 */
5181 		target->refcount++;
5182 
5183 		/*
5184 		 * XXX should be limited by number of CCBs this bus can
5185 		 * do.
5186 		 */
5187 		xpt_max_ccbs += device->ccbq.devq_openings;
5188 		/* Insertion sort into our target's device list */
5189 		cur_device = TAILQ_FIRST(&target->ed_entries);
5190 		while (cur_device != NULL && cur_device->lun_id < lun_id)
5191 			cur_device = TAILQ_NEXT(cur_device, links);
5192 		if (cur_device != NULL) {
5193 			TAILQ_INSERT_BEFORE(cur_device, device, links);
5194 		} else {
5195 			TAILQ_INSERT_TAIL(&target->ed_entries, device, links);
5196 		}
5197 		target->generation++;
5198 #ifdef CAM_NEW_TRAN_CODE
5199 		if (lun_id != CAM_LUN_WILDCARD) {
5200 			xpt_compile_path(&path,
5201 					 NULL,
5202 					 bus->path_id,
5203 					 target->target_id,
5204 					 lun_id);
5205 			xpt_devise_transport(&path);
5206 			xpt_release_path(&path);
5207 		}
5208 #endif /* CAM_NEW_TRAN_CODE */
5209 	}
5210 	return (device);
5211 }
5212 
5213 static void
5214 xpt_release_device(struct cam_eb *bus, struct cam_et *target,
5215 		   struct cam_ed *device)
5216 {
5217 	int s;
5218 
5219 	s = splcam();
5220 	if ((--device->refcount == 0)
5221 	 && ((device->flags & CAM_DEV_UNCONFIGURED) != 0)) {
5222 		struct cam_devq *devq;
5223 
5224 		if (device->alloc_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX
5225 		 || device->send_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX)
5226 			panic("Removing device while still queued for ccbs");
5227 
5228 		if ((device->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0)
5229 				untimeout(xpt_release_devq_timeout, device,
5230 					  device->c_handle);
5231 
5232 		TAILQ_REMOVE(&target->ed_entries, device,links);
5233 		target->generation++;
5234 		xpt_max_ccbs -= device->ccbq.devq_openings;
5235 		if (!SIM_DEAD(bus->sim)) {
5236 			/* Release our slot in the devq */
5237 			devq = bus->sim->devq;
5238 			cam_devq_resize(devq, devq->alloc_queue.array_size - 1);
5239 		}
5240 		splx(s);
5241 		camq_fini(&device->drvq);
5242 		camq_fini(&device->ccbq.queue);
5243 		free(device, M_CAMXPT);
5244 		xpt_release_target(bus, target);
5245 	} else
5246 		splx(s);
5247 }
5248 
5249 static u_int32_t
5250 xpt_dev_ccbq_resize(struct cam_path *path, int newopenings)
5251 {
5252 	int	s;
5253 	int	diff;
5254 	int	result;
5255 	struct	cam_ed *dev;
5256 
5257 	dev = path->device;
5258 	s = splsoftcam();
5259 
5260 	diff = newopenings - (dev->ccbq.dev_active + dev->ccbq.dev_openings);
5261 	result = cam_ccbq_resize(&dev->ccbq, newopenings);
5262 	if (result == CAM_REQ_CMP && (diff < 0)) {
5263 		dev->flags |= CAM_DEV_RESIZE_QUEUE_NEEDED;
5264 	}
5265 	if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
5266 	 || (dev->inq_flags & SID_CmdQue) != 0)
5267 		dev->tag_saved_openings = newopenings;
5268 	/* Adjust the global limit */
5269 	xpt_max_ccbs += diff;
5270 	splx(s);
5271 	return (result);
5272 }
5273 
5274 static struct cam_eb *
5275 xpt_find_bus(path_id_t path_id)
5276 {
5277 	struct cam_eb *bus;
5278 
5279 	for (bus = TAILQ_FIRST(&xpt_busses);
5280 	     bus != NULL;
5281 	     bus = TAILQ_NEXT(bus, links)) {
5282 		if (bus->path_id == path_id) {
5283 			bus->refcount++;
5284 			break;
5285 		}
5286 	}
5287 	return (bus);
5288 }
5289 
5290 static struct cam_et *
5291 xpt_find_target(struct cam_eb *bus, target_id_t	target_id)
5292 {
5293 	struct cam_et *target;
5294 
5295 	for (target = TAILQ_FIRST(&bus->et_entries);
5296 	     target != NULL;
5297 	     target = TAILQ_NEXT(target, links)) {
5298 		if (target->target_id == target_id) {
5299 			target->refcount++;
5300 			break;
5301 		}
5302 	}
5303 	return (target);
5304 }
5305 
5306 static struct cam_ed *
5307 xpt_find_device(struct cam_et *target, lun_id_t lun_id)
5308 {
5309 	struct cam_ed *device;
5310 
5311 	for (device = TAILQ_FIRST(&target->ed_entries);
5312 	     device != NULL;
5313 	     device = TAILQ_NEXT(device, links)) {
5314 		if (device->lun_id == lun_id) {
5315 			device->refcount++;
5316 			break;
5317 		}
5318 	}
5319 	return (device);
5320 }
5321 
5322 typedef struct {
5323 	union	ccb *request_ccb;
5324 	struct 	ccb_pathinq *cpi;
5325 	int	counter;
5326 } xpt_scan_bus_info;
5327 
5328 /*
5329  * To start a scan, request_ccb is an XPT_SCAN_BUS ccb.
5330  * As the scan progresses, xpt_scan_bus is used as the
5331  * callback on completion function.
5332  */
5333 static void
5334 xpt_scan_bus(struct cam_periph *periph, union ccb *request_ccb)
5335 {
5336 	CAM_DEBUG(request_ccb->ccb_h.path, CAM_DEBUG_TRACE,
5337 		  ("xpt_scan_bus\n"));
5338 	switch (request_ccb->ccb_h.func_code) {
5339 	case XPT_SCAN_BUS:
5340 	{
5341 		xpt_scan_bus_info *scan_info;
5342 		union	ccb *work_ccb;
5343 		struct	cam_path *path;
5344 		u_int	i;
5345 		u_int	max_target;
5346 		u_int	initiator_id;
5347 
5348 		/* Find out the characteristics of the bus */
5349 		work_ccb = xpt_alloc_ccb();
5350 		xpt_setup_ccb(&work_ccb->ccb_h, request_ccb->ccb_h.path,
5351 			      request_ccb->ccb_h.pinfo.priority);
5352 		work_ccb->ccb_h.func_code = XPT_PATH_INQ;
5353 		xpt_action(work_ccb);
5354 		if (work_ccb->ccb_h.status != CAM_REQ_CMP) {
5355 			request_ccb->ccb_h.status = work_ccb->ccb_h.status;
5356 			xpt_free_ccb(work_ccb);
5357 			xpt_done(request_ccb);
5358 			return;
5359 		}
5360 
5361 		if ((work_ccb->cpi.hba_misc & PIM_NOINITIATOR) != 0) {
5362 			/*
5363 			 * Can't scan the bus on an adapter that
5364 			 * cannot perform the initiator role.
5365 			 */
5366 			request_ccb->ccb_h.status = CAM_REQ_CMP;
5367 			xpt_free_ccb(work_ccb);
5368 			xpt_done(request_ccb);
5369 			return;
5370 		}
5371 
5372 		/* Save some state for use while we probe for devices */
5373 		scan_info = (xpt_scan_bus_info *)
5374 		    malloc(sizeof(xpt_scan_bus_info), M_TEMP, M_WAITOK);
5375 		scan_info->request_ccb = request_ccb;
5376 		scan_info->cpi = &work_ccb->cpi;
5377 
5378 		/* Cache on our stack so we can work asynchronously */
5379 		max_target = scan_info->cpi->max_target;
5380 		initiator_id = scan_info->cpi->initiator_id;
5381 
5382 
5383 		/*
5384 		 * We can scan all targets in parallel, or do it sequentially.
5385 		 */
5386 		if (scan_info->cpi->hba_misc & PIM_SEQSCAN) {
5387 			max_target = 0;
5388 			scan_info->counter = 0;
5389 		} else {
5390 			scan_info->counter = scan_info->cpi->max_target + 1;
5391 			if (scan_info->cpi->initiator_id < scan_info->counter) {
5392 				scan_info->counter--;
5393 			}
5394 		}
5395 
5396 		for (i = 0; i <= max_target; i++) {
5397 			cam_status status;
5398 			if (i == initiator_id)
5399 				continue;
5400 
5401 			status = xpt_create_path(&path, xpt_periph,
5402 						 request_ccb->ccb_h.path_id,
5403 						 i, 0);
5404 			if (status != CAM_REQ_CMP) {
5405 				printf("xpt_scan_bus: xpt_create_path failed"
5406 				       " with status %#x, bus scan halted\n",
5407 				       status);
5408 				free(scan_info, M_TEMP);
5409 				request_ccb->ccb_h.status = status;
5410 				xpt_free_ccb(work_ccb);
5411 				xpt_done(request_ccb);
5412 				break;
5413 			}
5414 			work_ccb = xpt_alloc_ccb();
5415 			xpt_setup_ccb(&work_ccb->ccb_h, path,
5416 				      request_ccb->ccb_h.pinfo.priority);
5417 			work_ccb->ccb_h.func_code = XPT_SCAN_LUN;
5418 			work_ccb->ccb_h.cbfcnp = xpt_scan_bus;
5419 			work_ccb->ccb_h.ppriv_ptr0 = scan_info;
5420 			work_ccb->crcn.flags = request_ccb->crcn.flags;
5421 			xpt_action(work_ccb);
5422 		}
5423 		break;
5424 	}
5425 	case XPT_SCAN_LUN:
5426 	{
5427 		cam_status status;
5428 		struct cam_path *path;
5429 		xpt_scan_bus_info *scan_info;
5430 		path_id_t path_id;
5431 		target_id_t target_id;
5432 		lun_id_t lun_id;
5433 
5434 		/* Reuse the same CCB to query if a device was really found */
5435 		scan_info = (xpt_scan_bus_info *)request_ccb->ccb_h.ppriv_ptr0;
5436 		xpt_setup_ccb(&request_ccb->ccb_h, request_ccb->ccb_h.path,
5437 			      request_ccb->ccb_h.pinfo.priority);
5438 		request_ccb->ccb_h.func_code = XPT_GDEV_TYPE;
5439 
5440 		path_id = request_ccb->ccb_h.path_id;
5441 		target_id = request_ccb->ccb_h.target_id;
5442 		lun_id = request_ccb->ccb_h.target_lun;
5443 		xpt_action(request_ccb);
5444 
5445 		if (request_ccb->ccb_h.status != CAM_REQ_CMP) {
5446 			struct cam_ed *device;
5447 			struct cam_et *target;
5448 			int s, phl;
5449 
5450 			/*
5451 			 * If we already probed lun 0 successfully, or
5452 			 * we have additional configured luns on this
5453 			 * target that might have "gone away", go onto
5454 			 * the next lun.
5455 			 */
5456 			target = request_ccb->ccb_h.path->target;
5457 			/*
5458 			 * We may touch devices that we don't
5459 			 * hold references too, so ensure they
5460 			 * don't disappear out from under us.
5461 			 * The target above is referenced by the
5462 			 * path in the request ccb.
5463 			 */
5464 			phl = 0;
5465 			s = splcam();
5466 			device = TAILQ_FIRST(&target->ed_entries);
5467 			if (device != NULL) {
5468 				phl = CAN_SRCH_HI_SPARSE(device);
5469 				if (device->lun_id == 0)
5470 					device = TAILQ_NEXT(device, links);
5471 			}
5472 			splx(s);
5473 			if ((lun_id != 0) || (device != NULL)) {
5474 				if (lun_id < (CAM_SCSI2_MAXLUN-1) || phl)
5475 					lun_id++;
5476 			}
5477 		} else {
5478 			struct cam_ed *device;
5479 
5480 			device = request_ccb->ccb_h.path->device;
5481 
5482 			if ((device->quirk->quirks & CAM_QUIRK_NOLUNS) == 0) {
5483 				/* Try the next lun */
5484 				if (lun_id < (CAM_SCSI2_MAXLUN-1)
5485 				  || CAN_SRCH_HI_DENSE(device))
5486 					lun_id++;
5487 			}
5488 		}
5489 
5490 		/*
5491 		 * Free the current request path- we're done with it.
5492 		 */
5493 		xpt_free_path(request_ccb->ccb_h.path);
5494 
5495 		/*
5496 		 * Check to see if we scan any further luns.
5497 		 */
5498 		if (lun_id == request_ccb->ccb_h.target_lun
5499                  || lun_id > scan_info->cpi->max_lun) {
5500 			int done;
5501 
5502  hop_again:
5503 			done = 0;
5504 			if (scan_info->cpi->hba_misc & PIM_SEQSCAN) {
5505 				scan_info->counter++;
5506 				if (scan_info->counter ==
5507 				    scan_info->cpi->initiator_id) {
5508 					scan_info->counter++;
5509 				}
5510 				if (scan_info->counter >=
5511 				    scan_info->cpi->max_target+1) {
5512 					done = 1;
5513 				}
5514 			} else {
5515 				scan_info->counter--;
5516 				if (scan_info->counter == 0) {
5517 					done = 1;
5518 				}
5519 			}
5520 			if (done) {
5521 				xpt_free_ccb(request_ccb);
5522 				xpt_free_ccb((union ccb *)scan_info->cpi);
5523 				request_ccb = scan_info->request_ccb;
5524 				free(scan_info, M_TEMP);
5525 				request_ccb->ccb_h.status = CAM_REQ_CMP;
5526 				xpt_done(request_ccb);
5527 				break;
5528 			}
5529 
5530 			if ((scan_info->cpi->hba_misc & PIM_SEQSCAN) == 0) {
5531 				break;
5532 			}
5533 			status = xpt_create_path(&path, xpt_periph,
5534 			    scan_info->request_ccb->ccb_h.path_id,
5535 			    scan_info->counter, 0);
5536 			if (status != CAM_REQ_CMP) {
5537 				printf("xpt_scan_bus: xpt_create_path failed"
5538 				    " with status %#x, bus scan halted\n",
5539 			       	    status);
5540 				xpt_free_ccb(request_ccb);
5541 				xpt_free_ccb((union ccb *)scan_info->cpi);
5542 				request_ccb = scan_info->request_ccb;
5543 				free(scan_info, M_TEMP);
5544 				request_ccb->ccb_h.status = status;
5545 				xpt_done(request_ccb);
5546 				break;
5547 			}
5548 			xpt_setup_ccb(&request_ccb->ccb_h, path,
5549 			    request_ccb->ccb_h.pinfo.priority);
5550 			request_ccb->ccb_h.func_code = XPT_SCAN_LUN;
5551 			request_ccb->ccb_h.cbfcnp = xpt_scan_bus;
5552 			request_ccb->ccb_h.ppriv_ptr0 = scan_info;
5553 			request_ccb->crcn.flags =
5554 			    scan_info->request_ccb->crcn.flags;
5555 		} else {
5556 			status = xpt_create_path(&path, xpt_periph,
5557 						 path_id, target_id, lun_id);
5558 			if (status != CAM_REQ_CMP) {
5559 				printf("xpt_scan_bus: xpt_create_path failed "
5560 				       "with status %#x, halting LUN scan\n",
5561 			 	       status);
5562 				goto hop_again;
5563 			}
5564 			xpt_setup_ccb(&request_ccb->ccb_h, path,
5565 				      request_ccb->ccb_h.pinfo.priority);
5566 			request_ccb->ccb_h.func_code = XPT_SCAN_LUN;
5567 			request_ccb->ccb_h.cbfcnp = xpt_scan_bus;
5568 			request_ccb->ccb_h.ppriv_ptr0 = scan_info;
5569 			request_ccb->crcn.flags =
5570 				scan_info->request_ccb->crcn.flags;
5571 		}
5572 		xpt_action(request_ccb);
5573 		break;
5574 	}
5575 	default:
5576 		break;
5577 	}
5578 }
5579 
5580 typedef enum {
5581 	PROBE_TUR,
5582 	PROBE_INQUIRY,
5583 	PROBE_FULL_INQUIRY,
5584 	PROBE_MODE_SENSE,
5585 	PROBE_SERIAL_NUM,
5586 	PROBE_TUR_FOR_NEGOTIATION
5587 } probe_action;
5588 
5589 typedef enum {
5590 	PROBE_INQUIRY_CKSUM	= 0x01,
5591 	PROBE_SERIAL_CKSUM	= 0x02,
5592 	PROBE_NO_ANNOUNCE	= 0x04
5593 } probe_flags;
5594 
5595 typedef struct {
5596 	TAILQ_HEAD(, ccb_hdr) request_ccbs;
5597 	probe_action	action;
5598 	union ccb	saved_ccb;
5599 	probe_flags	flags;
5600 	MD5_CTX		context;
5601 	u_int8_t	digest[16];
5602 } probe_softc;
5603 
5604 static void
5605 xpt_scan_lun(struct cam_periph *periph, struct cam_path *path,
5606 	     cam_flags flags, union ccb *request_ccb)
5607 {
5608 	struct ccb_pathinq cpi;
5609 	cam_status status;
5610 	struct cam_path *new_path;
5611 	struct cam_periph *old_periph;
5612 	int s;
5613 
5614 	CAM_DEBUG(request_ccb->ccb_h.path, CAM_DEBUG_TRACE,
5615 		  ("xpt_scan_lun\n"));
5616 
5617 	xpt_setup_ccb(&cpi.ccb_h, path, /*priority*/1);
5618 	cpi.ccb_h.func_code = XPT_PATH_INQ;
5619 	xpt_action((union ccb *)&cpi);
5620 
5621 	if (cpi.ccb_h.status != CAM_REQ_CMP) {
5622 		if (request_ccb != NULL) {
5623 			request_ccb->ccb_h.status = cpi.ccb_h.status;
5624 			xpt_done(request_ccb);
5625 		}
5626 		return;
5627 	}
5628 
5629 	if ((cpi.hba_misc & PIM_NOINITIATOR) != 0) {
5630 		/*
5631 		 * Can't scan the bus on an adapter that
5632 		 * cannot perform the initiator role.
5633 		 */
5634 		if (request_ccb != NULL) {
5635 			request_ccb->ccb_h.status = CAM_REQ_CMP;
5636 			xpt_done(request_ccb);
5637 		}
5638 		return;
5639 	}
5640 
5641 	if (request_ccb == NULL) {
5642 		request_ccb = malloc(sizeof(union ccb), M_TEMP, M_NOWAIT);
5643 		if (request_ccb == NULL) {
5644 			xpt_print_path(path);
5645 			printf("xpt_scan_lun: can't allocate CCB, can't "
5646 			       "continue\n");
5647 			return;
5648 		}
5649 		new_path = malloc(sizeof(*new_path), M_TEMP, M_NOWAIT);
5650 		if (new_path == NULL) {
5651 			xpt_print_path(path);
5652 			printf("xpt_scan_lun: can't allocate path, can't "
5653 			       "continue\n");
5654 			free(request_ccb, M_TEMP);
5655 			return;
5656 		}
5657 		status = xpt_compile_path(new_path, xpt_periph,
5658 					  path->bus->path_id,
5659 					  path->target->target_id,
5660 					  path->device->lun_id);
5661 
5662 		if (status != CAM_REQ_CMP) {
5663 			xpt_print_path(path);
5664 			printf("xpt_scan_lun: can't compile path, can't "
5665 			       "continue\n");
5666 			free(request_ccb, M_TEMP);
5667 			free(new_path, M_TEMP);
5668 			return;
5669 		}
5670 		xpt_setup_ccb(&request_ccb->ccb_h, new_path, /*priority*/ 1);
5671 		request_ccb->ccb_h.cbfcnp = xptscandone;
5672 		request_ccb->ccb_h.func_code = XPT_SCAN_LUN;
5673 		request_ccb->crcn.flags = flags;
5674 	}
5675 
5676 	s = splsoftcam();
5677 	if ((old_periph = cam_periph_find(path, "probe")) != NULL) {
5678 		probe_softc *softc;
5679 
5680 		softc = (probe_softc *)old_periph->softc;
5681 		TAILQ_INSERT_TAIL(&softc->request_ccbs, &request_ccb->ccb_h,
5682 				  periph_links.tqe);
5683 	} else {
5684 		status = cam_periph_alloc(proberegister, NULL, probecleanup,
5685 					  probestart, "probe",
5686 					  CAM_PERIPH_BIO,
5687 					  request_ccb->ccb_h.path, NULL, 0,
5688 					  request_ccb);
5689 
5690 		if (status != CAM_REQ_CMP) {
5691 			xpt_print_path(path);
5692 			printf("xpt_scan_lun: cam_alloc_periph returned an "
5693 			       "error, can't continue probe\n");
5694 			request_ccb->ccb_h.status = status;
5695 			xpt_done(request_ccb);
5696 		}
5697 	}
5698 	splx(s);
5699 }
5700 
5701 static void
5702 xptscandone(struct cam_periph *periph, union ccb *done_ccb)
5703 {
5704 	xpt_release_path(done_ccb->ccb_h.path);
5705 	free(done_ccb->ccb_h.path, M_TEMP);
5706 	free(done_ccb, M_TEMP);
5707 }
5708 
5709 static cam_status
5710 proberegister(struct cam_periph *periph, void *arg)
5711 {
5712 	union ccb *request_ccb;	/* CCB representing the probe request */
5713 	probe_softc *softc;
5714 
5715 	request_ccb = (union ccb *)arg;
5716 	if (periph == NULL) {
5717 		printf("proberegister: periph was NULL!!\n");
5718 		return(CAM_REQ_CMP_ERR);
5719 	}
5720 
5721 	if (request_ccb == NULL) {
5722 		printf("proberegister: no probe CCB, "
5723 		       "can't register device\n");
5724 		return(CAM_REQ_CMP_ERR);
5725 	}
5726 
5727 	softc = (probe_softc *)malloc(sizeof(*softc), M_TEMP, M_NOWAIT);
5728 
5729 	if (softc == NULL) {
5730 		printf("proberegister: Unable to probe new device. "
5731 		       "Unable to allocate softc\n");
5732 		return(CAM_REQ_CMP_ERR);
5733 	}
5734 	TAILQ_INIT(&softc->request_ccbs);
5735 	TAILQ_INSERT_TAIL(&softc->request_ccbs, &request_ccb->ccb_h,
5736 			  periph_links.tqe);
5737 	softc->flags = 0;
5738 	periph->softc = softc;
5739 	cam_periph_acquire(periph);
5740 	/*
5741 	 * Ensure we've waited at least a bus settle
5742 	 * delay before attempting to probe the device.
5743 	 * For HBAs that don't do bus resets, this won't make a difference.
5744 	 */
5745 	cam_periph_freeze_after_event(periph, &periph->path->bus->last_reset,
5746 				      scsi_delay);
5747 	probeschedule(periph);
5748 	return(CAM_REQ_CMP);
5749 }
5750 
5751 static void
5752 probeschedule(struct cam_periph *periph)
5753 {
5754 	struct ccb_pathinq cpi;
5755 	union ccb *ccb;
5756 	probe_softc *softc;
5757 
5758 	softc = (probe_softc *)periph->softc;
5759 	ccb = (union ccb *)TAILQ_FIRST(&softc->request_ccbs);
5760 
5761 	xpt_setup_ccb(&cpi.ccb_h, periph->path, /*priority*/1);
5762 	cpi.ccb_h.func_code = XPT_PATH_INQ;
5763 	xpt_action((union ccb *)&cpi);
5764 
5765 	/*
5766 	 * If a device has gone away and another device, or the same one,
5767 	 * is back in the same place, it should have a unit attention
5768 	 * condition pending.  It will not report the unit attention in
5769 	 * response to an inquiry, which may leave invalid transfer
5770 	 * negotiations in effect.  The TUR will reveal the unit attention
5771 	 * condition.  Only send the TUR for lun 0, since some devices
5772 	 * will get confused by commands other than inquiry to non-existent
5773 	 * luns.  If you think a device has gone away start your scan from
5774 	 * lun 0.  This will insure that any bogus transfer settings are
5775 	 * invalidated.
5776 	 *
5777 	 * If we haven't seen the device before and the controller supports
5778 	 * some kind of transfer negotiation, negotiate with the first
5779 	 * sent command if no bus reset was performed at startup.  This
5780 	 * ensures that the device is not confused by transfer negotiation
5781 	 * settings left over by loader or BIOS action.
5782 	 */
5783 	if (((ccb->ccb_h.path->device->flags & CAM_DEV_UNCONFIGURED) == 0)
5784 	 && (ccb->ccb_h.target_lun == 0)) {
5785 		softc->action = PROBE_TUR;
5786 	} else if ((cpi.hba_inquiry & (PI_WIDE_32|PI_WIDE_16|PI_SDTR_ABLE)) != 0
5787 	      && (cpi.hba_misc & PIM_NOBUSRESET) != 0) {
5788 		proberequestdefaultnegotiation(periph);
5789 		softc->action = PROBE_INQUIRY;
5790 	} else {
5791 		softc->action = PROBE_INQUIRY;
5792 	}
5793 
5794 	if (ccb->crcn.flags & CAM_EXPECT_INQ_CHANGE)
5795 		softc->flags |= PROBE_NO_ANNOUNCE;
5796 	else
5797 		softc->flags &= ~PROBE_NO_ANNOUNCE;
5798 
5799 	xpt_schedule(periph, ccb->ccb_h.pinfo.priority);
5800 }
5801 
5802 static void
5803 probestart(struct cam_periph *periph, union ccb *start_ccb)
5804 {
5805 	/* Probe the device that our peripheral driver points to */
5806 	struct ccb_scsiio *csio;
5807 	probe_softc *softc;
5808 
5809 	CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("probestart\n"));
5810 
5811 	softc = (probe_softc *)periph->softc;
5812 	csio = &start_ccb->csio;
5813 
5814 	switch (softc->action) {
5815 	case PROBE_TUR:
5816 	case PROBE_TUR_FOR_NEGOTIATION:
5817 	{
5818 		scsi_test_unit_ready(csio,
5819 				     /*retries*/4,
5820 				     probedone,
5821 				     MSG_SIMPLE_Q_TAG,
5822 				     SSD_FULL_SIZE,
5823 				     /*timeout*/60000);
5824 		break;
5825 	}
5826 	case PROBE_INQUIRY:
5827 	case PROBE_FULL_INQUIRY:
5828 	{
5829 		u_int inquiry_len;
5830 		struct scsi_inquiry_data *inq_buf;
5831 
5832 		inq_buf = &periph->path->device->inq_data;
5833 		/*
5834 		 * If the device is currently configured, we calculate an
5835 		 * MD5 checksum of the inquiry data, and if the serial number
5836 		 * length is greater than 0, add the serial number data
5837 		 * into the checksum as well.  Once the inquiry and the
5838 		 * serial number check finish, we attempt to figure out
5839 		 * whether we still have the same device.
5840 		 */
5841 		if ((periph->path->device->flags & CAM_DEV_UNCONFIGURED) == 0) {
5842 
5843 			MD5Init(&softc->context);
5844 			MD5Update(&softc->context, (unsigned char *)inq_buf,
5845 				  sizeof(struct scsi_inquiry_data));
5846 			softc->flags |= PROBE_INQUIRY_CKSUM;
5847 			if (periph->path->device->serial_num_len > 0) {
5848 				MD5Update(&softc->context,
5849 					  periph->path->device->serial_num,
5850 					  periph->path->device->serial_num_len);
5851 				softc->flags |= PROBE_SERIAL_CKSUM;
5852 			}
5853 			MD5Final(softc->digest, &softc->context);
5854 		}
5855 
5856 		if (softc->action == PROBE_INQUIRY)
5857 			inquiry_len = SHORT_INQUIRY_LENGTH;
5858 		else
5859 			inquiry_len = inq_buf->additional_length
5860 				    + offsetof(struct scsi_inquiry_data,
5861                                                additional_length) + 1;
5862 
5863 		/*
5864 		 * Some parallel SCSI devices fail to send an
5865 		 * ignore wide residue message when dealing with
5866 		 * odd length inquiry requests.  Round up to be
5867 		 * safe.
5868 		 */
5869 		inquiry_len = roundup2(inquiry_len, 2);
5870 
5871 		scsi_inquiry(csio,
5872 			     /*retries*/4,
5873 			     probedone,
5874 			     MSG_SIMPLE_Q_TAG,
5875 			     (u_int8_t *)inq_buf,
5876 			     inquiry_len,
5877 			     /*evpd*/FALSE,
5878 			     /*page_code*/0,
5879 			     SSD_MIN_SIZE,
5880 			     /*timeout*/60 * 1000);
5881 		break;
5882 	}
5883 	case PROBE_MODE_SENSE:
5884 	{
5885 		void  *mode_buf;
5886 		int    mode_buf_len;
5887 
5888 		mode_buf_len = sizeof(struct scsi_mode_header_6)
5889 			     + sizeof(struct scsi_mode_blk_desc)
5890 			     + sizeof(struct scsi_control_page);
5891 		mode_buf = malloc(mode_buf_len, M_TEMP, M_NOWAIT);
5892 		if (mode_buf != NULL) {
5893 	                scsi_mode_sense(csio,
5894 					/*retries*/4,
5895 					probedone,
5896 					MSG_SIMPLE_Q_TAG,
5897 					/*dbd*/FALSE,
5898 					SMS_PAGE_CTRL_CURRENT,
5899 					SMS_CONTROL_MODE_PAGE,
5900 					mode_buf,
5901 					mode_buf_len,
5902 					SSD_FULL_SIZE,
5903 					/*timeout*/60000);
5904 			break;
5905 		}
5906 		xpt_print_path(periph->path);
5907 		printf("Unable to mode sense control page - malloc failure\n");
5908 		softc->action = PROBE_SERIAL_NUM;
5909 	}
5910 	/* FALLTHROUGH */
5911 	case PROBE_SERIAL_NUM:
5912 	{
5913 		struct scsi_vpd_unit_serial_number *serial_buf;
5914 		struct cam_ed* device;
5915 
5916 		serial_buf = NULL;
5917 		device = periph->path->device;
5918 		device->serial_num = NULL;
5919 		device->serial_num_len = 0;
5920 
5921 		if ((device->quirk->quirks & CAM_QUIRK_NOSERIAL) == 0)
5922 			serial_buf = (struct scsi_vpd_unit_serial_number *)
5923 				malloc(sizeof(*serial_buf), M_TEMP,
5924 					M_NOWAIT | M_ZERO);
5925 
5926 		if (serial_buf != NULL) {
5927 			scsi_inquiry(csio,
5928 				     /*retries*/4,
5929 				     probedone,
5930 				     MSG_SIMPLE_Q_TAG,
5931 				     (u_int8_t *)serial_buf,
5932 				     sizeof(*serial_buf),
5933 				     /*evpd*/TRUE,
5934 				     SVPD_UNIT_SERIAL_NUMBER,
5935 				     SSD_MIN_SIZE,
5936 				     /*timeout*/60 * 1000);
5937 			break;
5938 		}
5939 		/*
5940 		 * We'll have to do without, let our probedone
5941 		 * routine finish up for us.
5942 		 */
5943 		start_ccb->csio.data_ptr = NULL;
5944 		probedone(periph, start_ccb);
5945 		return;
5946 	}
5947 	}
5948 	xpt_action(start_ccb);
5949 }
5950 
5951 static void
5952 proberequestdefaultnegotiation(struct cam_periph *periph)
5953 {
5954 	struct ccb_trans_settings cts;
5955 
5956 	xpt_setup_ccb(&cts.ccb_h, periph->path, /*priority*/1);
5957 	cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS;
5958 #ifdef CAM_NEW_TRAN_CODE
5959 	cts.type = CTS_TYPE_USER_SETTINGS;
5960 #else /* CAM_NEW_TRAN_CODE */
5961 	cts.flags = CCB_TRANS_USER_SETTINGS;
5962 #endif /* CAM_NEW_TRAN_CODE */
5963 	xpt_action((union ccb *)&cts);
5964 	cts.ccb_h.func_code = XPT_SET_TRAN_SETTINGS;
5965 #ifdef CAM_NEW_TRAN_CODE
5966 	cts.type = CTS_TYPE_CURRENT_SETTINGS;
5967 #else /* CAM_NEW_TRAN_CODE */
5968 	cts.flags &= ~CCB_TRANS_USER_SETTINGS;
5969 	cts.flags |= CCB_TRANS_CURRENT_SETTINGS;
5970 #endif /* CAM_NEW_TRAN_CODE */
5971 	xpt_action((union ccb *)&cts);
5972 }
5973 
5974 static void
5975 probedone(struct cam_periph *periph, union ccb *done_ccb)
5976 {
5977 	probe_softc *softc;
5978 	struct cam_path *path;
5979 	u_int32_t  priority;
5980 
5981 	CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("probedone\n"));
5982 
5983 	softc = (probe_softc *)periph->softc;
5984 	path = done_ccb->ccb_h.path;
5985 	priority = done_ccb->ccb_h.pinfo.priority;
5986 
5987 	switch (softc->action) {
5988 	case PROBE_TUR:
5989 	{
5990 		if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
5991 
5992 			if (cam_periph_error(done_ccb, 0,
5993 					     SF_NO_PRINT, NULL) == ERESTART)
5994 				return;
5995 			else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
5996 				/* Don't wedge the queue */
5997 				xpt_release_devq(done_ccb->ccb_h.path,
5998 						 /*count*/1,
5999 						 /*run_queue*/TRUE);
6000 		}
6001 		softc->action = PROBE_INQUIRY;
6002 		xpt_release_ccb(done_ccb);
6003 		xpt_schedule(periph, priority);
6004 		return;
6005 	}
6006 	case PROBE_INQUIRY:
6007 	case PROBE_FULL_INQUIRY:
6008 	{
6009 		if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
6010 			struct scsi_inquiry_data *inq_buf;
6011 			u_int8_t periph_qual;
6012 
6013 			path->device->flags |= CAM_DEV_INQUIRY_DATA_VALID;
6014 			inq_buf = &path->device->inq_data;
6015 
6016 			periph_qual = SID_QUAL(inq_buf);
6017 
6018 			switch(periph_qual) {
6019 			case SID_QUAL_LU_CONNECTED:
6020 			{
6021 				u_int8_t len;
6022 
6023 				/*
6024 				 * We conservatively request only
6025 				 * SHORT_INQUIRY_LEN bytes of inquiry
6026 				 * information during our first try
6027 				 * at sending an INQUIRY. If the device
6028 				 * has more information to give,
6029 				 * perform a second request specifying
6030 				 * the amount of information the device
6031 				 * is willing to give.
6032 				 */
6033 				len = inq_buf->additional_length
6034 				    + offsetof(struct scsi_inquiry_data,
6035                                                additional_length) + 1;
6036 				if (softc->action == PROBE_INQUIRY
6037 				 && len > SHORT_INQUIRY_LENGTH) {
6038 					softc->action = PROBE_FULL_INQUIRY;
6039 					xpt_release_ccb(done_ccb);
6040 					xpt_schedule(periph, priority);
6041 					return;
6042 				}
6043 
6044 				xpt_find_quirk(path->device);
6045 
6046 #ifdef CAM_NEW_TRAN_CODE
6047 				xpt_devise_transport(path);
6048 #endif /* CAM_NEW_TRAN_CODE */
6049 				if (INQ_DATA_TQ_ENABLED(inq_buf))
6050 					softc->action = PROBE_MODE_SENSE;
6051 				else
6052 					softc->action = PROBE_SERIAL_NUM;
6053 
6054 				path->device->flags &= ~CAM_DEV_UNCONFIGURED;
6055 
6056 				xpt_release_ccb(done_ccb);
6057 				xpt_schedule(periph, priority);
6058 				return;
6059 			}
6060 			default:
6061 				break;
6062 			}
6063 		} else if (cam_periph_error(done_ccb, 0,
6064 					    done_ccb->ccb_h.target_lun > 0
6065 					    ? SF_RETRY_UA|SF_QUIET_IR
6066 					    : SF_RETRY_UA,
6067 					    &softc->saved_ccb) == ERESTART) {
6068 			return;
6069 		} else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
6070 			/* Don't wedge the queue */
6071 			xpt_release_devq(done_ccb->ccb_h.path, /*count*/1,
6072 					 /*run_queue*/TRUE);
6073 		}
6074 		/*
6075 		 * If we get to this point, we got an error status back
6076 		 * from the inquiry and the error status doesn't require
6077 		 * automatically retrying the command.  Therefore, the
6078 		 * inquiry failed.  If we had inquiry information before
6079 		 * for this device, but this latest inquiry command failed,
6080 		 * the device has probably gone away.  If this device isn't
6081 		 * already marked unconfigured, notify the peripheral
6082 		 * drivers that this device is no more.
6083 		 */
6084 		if ((path->device->flags & CAM_DEV_UNCONFIGURED) == 0)
6085 			/* Send the async notification. */
6086 			xpt_async(AC_LOST_DEVICE, path, NULL);
6087 
6088 		xpt_release_ccb(done_ccb);
6089 		break;
6090 	}
6091 	case PROBE_MODE_SENSE:
6092 	{
6093 		struct ccb_scsiio *csio;
6094 		struct scsi_mode_header_6 *mode_hdr;
6095 
6096 		csio = &done_ccb->csio;
6097 		mode_hdr = (struct scsi_mode_header_6 *)csio->data_ptr;
6098 		if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
6099 			struct scsi_control_page *page;
6100 			u_int8_t *offset;
6101 
6102 			offset = ((u_int8_t *)&mode_hdr[1])
6103 			    + mode_hdr->blk_desc_len;
6104 			page = (struct scsi_control_page *)offset;
6105 			path->device->queue_flags = page->queue_flags;
6106 		} else if (cam_periph_error(done_ccb, 0,
6107 					    SF_RETRY_UA|SF_NO_PRINT,
6108 					    &softc->saved_ccb) == ERESTART) {
6109 			return;
6110 		} else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
6111 			/* Don't wedge the queue */
6112 			xpt_release_devq(done_ccb->ccb_h.path,
6113 					 /*count*/1, /*run_queue*/TRUE);
6114 		}
6115 		xpt_release_ccb(done_ccb);
6116 		free(mode_hdr, M_TEMP);
6117 		softc->action = PROBE_SERIAL_NUM;
6118 		xpt_schedule(periph, priority);
6119 		return;
6120 	}
6121 	case PROBE_SERIAL_NUM:
6122 	{
6123 		struct ccb_scsiio *csio;
6124 		struct scsi_vpd_unit_serial_number *serial_buf;
6125 		u_int32_t  priority;
6126 		int changed;
6127 		int have_serialnum;
6128 
6129 		changed = 1;
6130 		have_serialnum = 0;
6131 		csio = &done_ccb->csio;
6132 		priority = done_ccb->ccb_h.pinfo.priority;
6133 		serial_buf =
6134 		    (struct scsi_vpd_unit_serial_number *)csio->data_ptr;
6135 
6136 		/* Clean up from previous instance of this device */
6137 		if (path->device->serial_num != NULL) {
6138 			free(path->device->serial_num, M_CAMXPT);
6139 			path->device->serial_num = NULL;
6140 			path->device->serial_num_len = 0;
6141 		}
6142 
6143 		if (serial_buf == NULL) {
6144 			/*
6145 			 * Don't process the command as it was never sent
6146 			 */
6147 		} else if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP
6148 			&& (serial_buf->length > 0)) {
6149 
6150 			have_serialnum = 1;
6151 			path->device->serial_num =
6152 				(u_int8_t *)malloc((serial_buf->length + 1),
6153 						   M_CAMXPT, M_NOWAIT);
6154 			if (path->device->serial_num != NULL) {
6155 				bcopy(serial_buf->serial_num,
6156 				      path->device->serial_num,
6157 				      serial_buf->length);
6158 				path->device->serial_num_len =
6159 				    serial_buf->length;
6160 				path->device->serial_num[serial_buf->length]
6161 				    = '\0';
6162 			}
6163 		} else if (cam_periph_error(done_ccb, 0,
6164 					    SF_RETRY_UA|SF_NO_PRINT,
6165 					    &softc->saved_ccb) == ERESTART) {
6166 			return;
6167 		} else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
6168 			/* Don't wedge the queue */
6169 			xpt_release_devq(done_ccb->ccb_h.path, /*count*/1,
6170 					 /*run_queue*/TRUE);
6171 		}
6172 
6173 		/*
6174 		 * Let's see if we have seen this device before.
6175 		 */
6176 		if ((softc->flags & PROBE_INQUIRY_CKSUM) != 0) {
6177 			MD5_CTX context;
6178 			u_int8_t digest[16];
6179 
6180 			MD5Init(&context);
6181 
6182 			MD5Update(&context,
6183 				  (unsigned char *)&path->device->inq_data,
6184 				  sizeof(struct scsi_inquiry_data));
6185 
6186 			if (have_serialnum)
6187 				MD5Update(&context, serial_buf->serial_num,
6188 					  serial_buf->length);
6189 
6190 			MD5Final(digest, &context);
6191 			if (bcmp(softc->digest, digest, 16) == 0)
6192 				changed = 0;
6193 
6194 			/*
6195 			 * XXX Do we need to do a TUR in order to ensure
6196 			 *     that the device really hasn't changed???
6197 			 */
6198 			if ((changed != 0)
6199 			 && ((softc->flags & PROBE_NO_ANNOUNCE) == 0))
6200 				xpt_async(AC_LOST_DEVICE, path, NULL);
6201 		}
6202 		if (serial_buf != NULL)
6203 			free(serial_buf, M_TEMP);
6204 
6205 		if (changed != 0) {
6206 			/*
6207 			 * Now that we have all the necessary
6208 			 * information to safely perform transfer
6209 			 * negotiations... Controllers don't perform
6210 			 * any negotiation or tagged queuing until
6211 			 * after the first XPT_SET_TRAN_SETTINGS ccb is
6212 			 * received.  So, on a new device, just retreive
6213 			 * the user settings, and set them as the current
6214 			 * settings to set the device up.
6215 			 */
6216 			proberequestdefaultnegotiation(periph);
6217 			xpt_release_ccb(done_ccb);
6218 
6219 			/*
6220 			 * Perform a TUR to allow the controller to
6221 			 * perform any necessary transfer negotiation.
6222 			 */
6223 			softc->action = PROBE_TUR_FOR_NEGOTIATION;
6224 			xpt_schedule(periph, priority);
6225 			return;
6226 		}
6227 		xpt_release_ccb(done_ccb);
6228 		break;
6229 	}
6230 	case PROBE_TUR_FOR_NEGOTIATION:
6231 		if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
6232 			/* Don't wedge the queue */
6233 			xpt_release_devq(done_ccb->ccb_h.path, /*count*/1,
6234 					 /*run_queue*/TRUE);
6235 		}
6236 
6237 		path->device->flags &= ~CAM_DEV_UNCONFIGURED;
6238 
6239 		if ((softc->flags & PROBE_NO_ANNOUNCE) == 0) {
6240 			/* Inform the XPT that a new device has been found */
6241 			done_ccb->ccb_h.func_code = XPT_GDEV_TYPE;
6242 			xpt_action(done_ccb);
6243 
6244 			xpt_async(AC_FOUND_DEVICE, done_ccb->ccb_h.path,
6245 				  done_ccb);
6246 		}
6247 		xpt_release_ccb(done_ccb);
6248 		break;
6249 	}
6250 	done_ccb = (union ccb *)TAILQ_FIRST(&softc->request_ccbs);
6251 	TAILQ_REMOVE(&softc->request_ccbs, &done_ccb->ccb_h, periph_links.tqe);
6252 	done_ccb->ccb_h.status = CAM_REQ_CMP;
6253 	xpt_done(done_ccb);
6254 	if (TAILQ_FIRST(&softc->request_ccbs) == NULL) {
6255 		cam_periph_invalidate(periph);
6256 		cam_periph_release(periph);
6257 	} else {
6258 		probeschedule(periph);
6259 	}
6260 }
6261 
6262 static void
6263 probecleanup(struct cam_periph *periph)
6264 {
6265 	free(periph->softc, M_TEMP);
6266 }
6267 
6268 static void
6269 xpt_find_quirk(struct cam_ed *device)
6270 {
6271 	caddr_t	match;
6272 
6273 	match = cam_quirkmatch((caddr_t)&device->inq_data,
6274 			       (caddr_t)xpt_quirk_table,
6275 			       sizeof(xpt_quirk_table)/sizeof(*xpt_quirk_table),
6276 			       sizeof(*xpt_quirk_table), scsi_inquiry_match);
6277 
6278 	if (match == NULL)
6279 		panic("xpt_find_quirk: device didn't match wildcard entry!!");
6280 
6281 	device->quirk = (struct xpt_quirk_entry *)match;
6282 }
6283 
6284 static int
6285 sysctl_cam_search_luns(SYSCTL_HANDLER_ARGS)
6286 {
6287 	int error, bool;
6288 
6289 	bool = cam_srch_hi;
6290 	error = sysctl_handle_int(oidp, &bool, sizeof(bool), req);
6291 	if (error != 0 || req->newptr == NULL)
6292 		return (error);
6293 	if (bool == 0 || bool == 1) {
6294 		cam_srch_hi = bool;
6295 		return (0);
6296 	} else {
6297 		return (EINVAL);
6298 	}
6299 }
6300 
6301 #ifdef CAM_NEW_TRAN_CODE
6302 
6303 static void
6304 xpt_devise_transport(struct cam_path *path)
6305 {
6306 	struct ccb_pathinq cpi;
6307 	struct ccb_trans_settings cts;
6308 	struct scsi_inquiry_data *inq_buf;
6309 
6310 	/* Get transport information from the SIM */
6311 	xpt_setup_ccb(&cpi.ccb_h, path, /*priority*/1);
6312 	cpi.ccb_h.func_code = XPT_PATH_INQ;
6313 	xpt_action((union ccb *)&cpi);
6314 
6315 	inq_buf = NULL;
6316 	if ((path->device->flags & CAM_DEV_INQUIRY_DATA_VALID) != 0)
6317 		inq_buf = &path->device->inq_data;
6318 	path->device->protocol = PROTO_SCSI;
6319 	path->device->protocol_version =
6320 	    inq_buf != NULL ? SID_ANSI_REV(inq_buf) : cpi.protocol_version;
6321 	path->device->transport = cpi.transport;
6322 	path->device->transport_version = cpi.transport_version;
6323 
6324 	/*
6325 	 * Any device not using SPI3 features should
6326 	 * be considered SPI2 or lower.
6327 	 */
6328 	if (inq_buf != NULL) {
6329 		if (path->device->transport == XPORT_SPI
6330 		 && (inq_buf->spi3data & SID_SPI_MASK) == 0
6331 		 && path->device->transport_version > 2)
6332 			path->device->transport_version = 2;
6333 	} else {
6334 		struct cam_ed* otherdev;
6335 
6336 		for (otherdev = TAILQ_FIRST(&path->target->ed_entries);
6337 		     otherdev != NULL;
6338 		     otherdev = TAILQ_NEXT(otherdev, links)) {
6339 			if (otherdev != path->device)
6340 				break;
6341 		}
6342 
6343 		if (otherdev != NULL) {
6344 			/*
6345 			 * Initially assume the same versioning as
6346 			 * prior luns for this target.
6347 			 */
6348 			path->device->protocol_version =
6349 			    otherdev->protocol_version;
6350 			path->device->transport_version =
6351 			    otherdev->transport_version;
6352 		} else {
6353 			/* Until we know better, opt for safty */
6354 			path->device->protocol_version = 2;
6355 			if (path->device->transport == XPORT_SPI)
6356 				path->device->transport_version = 2;
6357 			else
6358 				path->device->transport_version = 0;
6359 		}
6360 	}
6361 
6362 	/*
6363 	 * XXX
6364 	 * For a device compliant with SPC-2 we should be able
6365 	 * to determine the transport version supported by
6366 	 * scrutinizing the version descriptors in the
6367 	 * inquiry buffer.
6368 	 */
6369 
6370 	/* Tell the controller what we think */
6371 	xpt_setup_ccb(&cts.ccb_h, path, /*priority*/1);
6372 	cts.ccb_h.func_code = XPT_SET_TRAN_SETTINGS;
6373 	cts.type = CTS_TYPE_CURRENT_SETTINGS;
6374 	cts.transport = path->device->transport;
6375 	cts.transport_version = path->device->transport_version;
6376 	cts.protocol = path->device->protocol;
6377 	cts.protocol_version = path->device->protocol_version;
6378 	cts.proto_specific.valid = 0;
6379 	cts.xport_specific.valid = 0;
6380 	xpt_action((union ccb *)&cts);
6381 }
6382 
6383 static void
6384 xpt_set_transfer_settings(struct ccb_trans_settings *cts, struct cam_ed *device,
6385 			  int async_update)
6386 {
6387 	struct	ccb_pathinq cpi;
6388 	struct	ccb_trans_settings cur_cts;
6389 	struct	ccb_trans_settings_scsi *scsi;
6390 	struct	ccb_trans_settings_scsi *cur_scsi;
6391 	struct	cam_sim *sim;
6392 	struct	scsi_inquiry_data *inq_data;
6393 
6394 	if (device == NULL) {
6395 		cts->ccb_h.status = CAM_PATH_INVALID;
6396 		xpt_done((union ccb *)cts);
6397 		return;
6398 	}
6399 
6400 	if (cts->protocol == PROTO_UNKNOWN
6401 	 || cts->protocol == PROTO_UNSPECIFIED) {
6402 		cts->protocol = device->protocol;
6403 		cts->protocol_version = device->protocol_version;
6404 	}
6405 
6406 	if (cts->protocol_version == PROTO_VERSION_UNKNOWN
6407 	 || cts->protocol_version == PROTO_VERSION_UNSPECIFIED)
6408 		cts->protocol_version = device->protocol_version;
6409 
6410 	if (cts->protocol != device->protocol) {
6411 		xpt_print_path(cts->ccb_h.path);
6412 		printf("Uninitialized Protocol %x:%x?\n",
6413 		       cts->protocol, device->protocol);
6414 		cts->protocol = device->protocol;
6415 	}
6416 
6417 	if (cts->protocol_version > device->protocol_version) {
6418 		if (bootverbose) {
6419 			xpt_print_path(cts->ccb_h.path);
6420 			printf("Down reving Protocol Version from %d to %d?\n",
6421 			       cts->protocol_version, device->protocol_version);
6422 		}
6423 		cts->protocol_version = device->protocol_version;
6424 	}
6425 
6426 	if (cts->transport == XPORT_UNKNOWN
6427 	 || cts->transport == XPORT_UNSPECIFIED) {
6428 		cts->transport = device->transport;
6429 		cts->transport_version = device->transport_version;
6430 	}
6431 
6432 	if (cts->transport_version == XPORT_VERSION_UNKNOWN
6433 	 || cts->transport_version == XPORT_VERSION_UNSPECIFIED)
6434 		cts->transport_version = device->transport_version;
6435 
6436 	if (cts->transport != device->transport) {
6437 		xpt_print_path(cts->ccb_h.path);
6438 		printf("Uninitialized Transport %x:%x?\n",
6439 		       cts->transport, device->transport);
6440 		cts->transport = device->transport;
6441 	}
6442 
6443 	if (cts->transport_version > device->transport_version) {
6444 		if (bootverbose) {
6445 			xpt_print_path(cts->ccb_h.path);
6446 			printf("Down reving Transport Version from %d to %d?\n",
6447 			       cts->transport_version,
6448 			       device->transport_version);
6449 		}
6450 		cts->transport_version = device->transport_version;
6451 	}
6452 
6453 	sim = cts->ccb_h.path->bus->sim;
6454 
6455 	/*
6456 	 * Nothing more of interest to do unless
6457 	 * this is a device connected via the
6458 	 * SCSI protocol.
6459 	 */
6460 	if (cts->protocol != PROTO_SCSI) {
6461 		if (async_update == FALSE)
6462 			(*(sim->sim_action))(sim, (union ccb *)cts);
6463 		return;
6464 	}
6465 
6466 	inq_data = &device->inq_data;
6467 	scsi = &cts->proto_specific.scsi;
6468 	xpt_setup_ccb(&cpi.ccb_h, cts->ccb_h.path, /*priority*/1);
6469 	cpi.ccb_h.func_code = XPT_PATH_INQ;
6470 	xpt_action((union ccb *)&cpi);
6471 
6472 	/* SCSI specific sanity checking */
6473 	if ((cpi.hba_inquiry & PI_TAG_ABLE) == 0
6474 	 || (INQ_DATA_TQ_ENABLED(inq_data)) == 0
6475 	 || (device->queue_flags & SCP_QUEUE_DQUE) != 0
6476 	 || (device->quirk->mintags == 0)) {
6477 		/*
6478 		 * Can't tag on hardware that doesn't support tags,
6479 		 * doesn't have it enabled, or has broken tag support.
6480 		 */
6481 		scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB;
6482 	}
6483 
6484 	if (async_update == FALSE) {
6485 		/*
6486 		 * Perform sanity checking against what the
6487 		 * controller and device can do.
6488 		 */
6489 		xpt_setup_ccb(&cur_cts.ccb_h, cts->ccb_h.path, /*priority*/1);
6490 		cur_cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS;
6491 		cur_cts.type = cts->type;
6492 		xpt_action((union ccb *)&cur_cts);
6493 
6494 		cur_scsi = &cur_cts.proto_specific.scsi;
6495 		if ((scsi->valid & CTS_SCSI_VALID_TQ) == 0) {
6496 			scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB;
6497 			scsi->flags |= cur_scsi->flags & CTS_SCSI_FLAGS_TAG_ENB;
6498 		}
6499 		if ((cur_scsi->valid & CTS_SCSI_VALID_TQ) == 0)
6500 			scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB;
6501 	}
6502 
6503 	/* SPI specific sanity checking */
6504 	if (cts->transport == XPORT_SPI && async_update == FALSE) {
6505 		u_int spi3caps;
6506 		struct ccb_trans_settings_spi *spi;
6507 		struct ccb_trans_settings_spi *cur_spi;
6508 
6509 		spi = &cts->xport_specific.spi;
6510 
6511 		cur_spi = &cur_cts.xport_specific.spi;
6512 
6513 		/* Fill in any gaps in what the user gave us */
6514 		if ((spi->valid & CTS_SPI_VALID_SYNC_RATE) == 0)
6515 			spi->sync_period = cur_spi->sync_period;
6516 		if ((cur_spi->valid & CTS_SPI_VALID_SYNC_RATE) == 0)
6517 			spi->sync_period = 0;
6518 		if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) == 0)
6519 			spi->sync_offset = cur_spi->sync_offset;
6520 		if ((cur_spi->valid & CTS_SPI_VALID_SYNC_OFFSET) == 0)
6521 			spi->sync_offset = 0;
6522 		if ((spi->valid & CTS_SPI_VALID_PPR_OPTIONS) == 0)
6523 			spi->ppr_options = cur_spi->ppr_options;
6524 		if ((cur_spi->valid & CTS_SPI_VALID_PPR_OPTIONS) == 0)
6525 			spi->ppr_options = 0;
6526 		if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) == 0)
6527 			spi->bus_width = cur_spi->bus_width;
6528 		if ((cur_spi->valid & CTS_SPI_VALID_BUS_WIDTH) == 0)
6529 			spi->bus_width = 0;
6530 		if ((spi->valid & CTS_SPI_VALID_DISC) == 0) {
6531 			spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB;
6532 			spi->flags |= cur_spi->flags & CTS_SPI_FLAGS_DISC_ENB;
6533 		}
6534 		if ((cur_spi->valid & CTS_SPI_VALID_DISC) == 0)
6535 			spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB;
6536 		if (((device->flags & CAM_DEV_INQUIRY_DATA_VALID) != 0
6537 		  && (inq_data->flags & SID_Sync) == 0
6538 		  && cts->type == CTS_TYPE_CURRENT_SETTINGS)
6539 		 || ((cpi.hba_inquiry & PI_SDTR_ABLE) == 0)
6540 		 || (cur_spi->sync_offset == 0)
6541 		 || (cur_spi->sync_period == 0)) {
6542 			/* Force async */
6543 			spi->sync_period = 0;
6544 			spi->sync_offset = 0;
6545 		}
6546 
6547 		switch (spi->bus_width) {
6548 		case MSG_EXT_WDTR_BUS_32_BIT:
6549 			if (((device->flags & CAM_DEV_INQUIRY_DATA_VALID) == 0
6550 			  || (inq_data->flags & SID_WBus32) != 0
6551 			  || cts->type == CTS_TYPE_USER_SETTINGS)
6552 			 && (cpi.hba_inquiry & PI_WIDE_32) != 0)
6553 				break;
6554 			/* Fall Through to 16-bit */
6555 		case MSG_EXT_WDTR_BUS_16_BIT:
6556 			if (((device->flags & CAM_DEV_INQUIRY_DATA_VALID) == 0
6557 			  || (inq_data->flags & SID_WBus16) != 0
6558 			  || cts->type == CTS_TYPE_USER_SETTINGS)
6559 			 && (cpi.hba_inquiry & PI_WIDE_16) != 0) {
6560 				spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
6561 				break;
6562 			}
6563 			/* Fall Through to 8-bit */
6564 		default: /* New bus width?? */
6565 		case MSG_EXT_WDTR_BUS_8_BIT:
6566 			/* All targets can do this */
6567 			spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
6568 			break;
6569 		}
6570 
6571 		spi3caps = cpi.xport_specific.spi.ppr_options;
6572 		if ((device->flags & CAM_DEV_INQUIRY_DATA_VALID) != 0
6573 		 && cts->type == CTS_TYPE_CURRENT_SETTINGS)
6574 			spi3caps &= inq_data->spi3data;
6575 
6576 		if ((spi3caps & SID_SPI_CLOCK_DT) == 0)
6577 			spi->ppr_options &= ~MSG_EXT_PPR_DT_REQ;
6578 
6579 		if ((spi3caps & SID_SPI_IUS) == 0)
6580 			spi->ppr_options &= ~MSG_EXT_PPR_IU_REQ;
6581 
6582 		if ((spi3caps & SID_SPI_QAS) == 0)
6583 			spi->ppr_options &= ~MSG_EXT_PPR_QAS_REQ;
6584 
6585 		/* No SPI Transfer settings are allowed unless we are wide */
6586 		if (spi->bus_width == 0)
6587 			spi->ppr_options = 0;
6588 
6589 		if ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) == 0) {
6590 			/*
6591 			 * Can't tag queue without disconnection.
6592 			 */
6593 			scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB;
6594 			scsi->valid |= CTS_SCSI_VALID_TQ;
6595 		}
6596 
6597 		/*
6598 		 * If we are currently performing tagged transactions to
6599 		 * this device and want to change its negotiation parameters,
6600 		 * go non-tagged for a bit to give the controller a chance to
6601 		 * negotiate unhampered by tag messages.
6602 		 */
6603 		if (cts->type == CTS_TYPE_CURRENT_SETTINGS
6604 		 && (device->inq_flags & SID_CmdQue) != 0
6605 		 && (scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0
6606 		 && (spi->flags & (CTS_SPI_VALID_SYNC_RATE|
6607 				   CTS_SPI_VALID_SYNC_OFFSET|
6608 				   CTS_SPI_VALID_BUS_WIDTH)) != 0)
6609 			xpt_toggle_tags(cts->ccb_h.path);
6610 	}
6611 
6612 	if (cts->type == CTS_TYPE_CURRENT_SETTINGS
6613 	 && (scsi->valid & CTS_SCSI_VALID_TQ) != 0) {
6614 		int device_tagenb;
6615 
6616 		/*
6617 		 * If we are transitioning from tags to no-tags or
6618 		 * vice-versa, we need to carefully freeze and restart
6619 		 * the queue so that we don't overlap tagged and non-tagged
6620 		 * commands.  We also temporarily stop tags if there is
6621 		 * a change in transfer negotiation settings to allow
6622 		 * "tag-less" negotiation.
6623 		 */
6624 		if ((device->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
6625 		 || (device->inq_flags & SID_CmdQue) != 0)
6626 			device_tagenb = TRUE;
6627 		else
6628 			device_tagenb = FALSE;
6629 
6630 		if (((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0
6631 		  && device_tagenb == FALSE)
6632 		 || ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) == 0
6633 		  && device_tagenb == TRUE)) {
6634 
6635 			if ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0) {
6636 				/*
6637 				 * Delay change to use tags until after a
6638 				 * few commands have gone to this device so
6639 				 * the controller has time to perform transfer
6640 				 * negotiations without tagged messages getting
6641 				 * in the way.
6642 				 */
6643 				device->tag_delay_count = CAM_TAG_DELAY_COUNT;
6644 				device->flags |= CAM_DEV_TAG_AFTER_COUNT;
6645 			} else {
6646 				struct ccb_relsim crs;
6647 
6648 				xpt_freeze_devq(cts->ccb_h.path, /*count*/1);
6649 		  		device->inq_flags &= ~SID_CmdQue;
6650 				xpt_dev_ccbq_resize(cts->ccb_h.path,
6651 						    sim->max_dev_openings);
6652 				device->flags &= ~CAM_DEV_TAG_AFTER_COUNT;
6653 				device->tag_delay_count = 0;
6654 
6655 				xpt_setup_ccb(&crs.ccb_h, cts->ccb_h.path,
6656 					      /*priority*/1);
6657 				crs.ccb_h.func_code = XPT_REL_SIMQ;
6658 				crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY;
6659 				crs.openings
6660 				    = crs.release_timeout
6661 				    = crs.qfrozen_cnt
6662 				    = 0;
6663 				xpt_action((union ccb *)&crs);
6664 			}
6665 		}
6666 	}
6667 	if (async_update == FALSE)
6668 		(*(sim->sim_action))(sim, (union ccb *)cts);
6669 }
6670 
6671 #else /* CAM_NEW_TRAN_CODE */
6672 
6673 static void
6674 xpt_set_transfer_settings(struct ccb_trans_settings *cts, struct cam_ed *device,
6675 			  int async_update)
6676 {
6677 	struct	cam_sim *sim;
6678 	int	qfrozen;
6679 
6680 	sim = cts->ccb_h.path->bus->sim;
6681 	if (async_update == FALSE) {
6682 		struct	scsi_inquiry_data *inq_data;
6683 		struct	ccb_pathinq cpi;
6684 		struct	ccb_trans_settings cur_cts;
6685 
6686 		if (device == NULL) {
6687 			cts->ccb_h.status = CAM_PATH_INVALID;
6688 			xpt_done((union ccb *)cts);
6689 			return;
6690 		}
6691 
6692 		/*
6693 		 * Perform sanity checking against what the
6694 		 * controller and device can do.
6695 		 */
6696 		xpt_setup_ccb(&cpi.ccb_h, cts->ccb_h.path, /*priority*/1);
6697 		cpi.ccb_h.func_code = XPT_PATH_INQ;
6698 		xpt_action((union ccb *)&cpi);
6699 		xpt_setup_ccb(&cur_cts.ccb_h, cts->ccb_h.path, /*priority*/1);
6700 		cur_cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS;
6701 		cur_cts.flags = CCB_TRANS_CURRENT_SETTINGS;
6702 		xpt_action((union ccb *)&cur_cts);
6703 		inq_data = &device->inq_data;
6704 
6705 		/* Fill in any gaps in what the user gave us */
6706 		if ((cts->valid & CCB_TRANS_SYNC_RATE_VALID) == 0)
6707 			cts->sync_period = cur_cts.sync_period;
6708 		if ((cts->valid & CCB_TRANS_SYNC_OFFSET_VALID) == 0)
6709 			cts->sync_offset = cur_cts.sync_offset;
6710 		if ((cts->valid & CCB_TRANS_BUS_WIDTH_VALID) == 0)
6711 			cts->bus_width = cur_cts.bus_width;
6712 		if ((cts->valid & CCB_TRANS_DISC_VALID) == 0) {
6713 			cts->flags &= ~CCB_TRANS_DISC_ENB;
6714 			cts->flags |= cur_cts.flags & CCB_TRANS_DISC_ENB;
6715 		}
6716 		if ((cts->valid & CCB_TRANS_TQ_VALID) == 0) {
6717 			cts->flags &= ~CCB_TRANS_TAG_ENB;
6718 			cts->flags |= cur_cts.flags & CCB_TRANS_TAG_ENB;
6719 		}
6720 
6721 		if (((device->flags & CAM_DEV_INQUIRY_DATA_VALID) != 0
6722 		  && (inq_data->flags & SID_Sync) == 0)
6723 		 || ((cpi.hba_inquiry & PI_SDTR_ABLE) == 0)
6724 		 || (cts->sync_offset == 0)
6725 		 || (cts->sync_period == 0)) {
6726 			/* Force async */
6727 			cts->sync_period = 0;
6728 			cts->sync_offset = 0;
6729 		} else if ((device->flags & CAM_DEV_INQUIRY_DATA_VALID) != 0
6730 			&& (inq_data->spi3data & SID_SPI_CLOCK_DT) == 0
6731 			&& cts->sync_period <= 0x9) {
6732 			/*
6733 			 * Don't allow DT transmission rates if the
6734 			 * device does not support it.
6735 			 */
6736 			cts->sync_period = 0xa;
6737 		}
6738 
6739 		switch (cts->bus_width) {
6740 		case MSG_EXT_WDTR_BUS_32_BIT:
6741 			if (((device->flags & CAM_DEV_INQUIRY_DATA_VALID) == 0
6742 			  || (inq_data->flags & SID_WBus32) != 0)
6743 			 && (cpi.hba_inquiry & PI_WIDE_32) != 0)
6744 				break;
6745 			/* FALLTHROUGH to 16-bit */
6746 		case MSG_EXT_WDTR_BUS_16_BIT:
6747 			if (((device->flags & CAM_DEV_INQUIRY_DATA_VALID) == 0
6748 			  || (inq_data->flags & SID_WBus16) != 0)
6749 			 && (cpi.hba_inquiry & PI_WIDE_16) != 0) {
6750 				cts->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
6751 				break;
6752 			}
6753 			/* FALLTHROUGH to 8-bit */
6754 		default: /* New bus width?? */
6755 		case MSG_EXT_WDTR_BUS_8_BIT:
6756 			/* All targets can do this */
6757 			cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
6758 			break;
6759 		}
6760 
6761 		if ((cts->flags & CCB_TRANS_DISC_ENB) == 0) {
6762 			/*
6763 			 * Can't tag queue without disconnection.
6764 			 */
6765 			cts->flags &= ~CCB_TRANS_TAG_ENB;
6766 			cts->valid |= CCB_TRANS_TQ_VALID;
6767 		}
6768 
6769 		if ((cpi.hba_inquiry & PI_TAG_ABLE) == 0
6770 	 	 || (INQ_DATA_TQ_ENABLED(inq_data)) == 0
6771 		 || (device->queue_flags & SCP_QUEUE_DQUE) != 0
6772 		 || (device->quirk->mintags == 0)) {
6773 			/*
6774 			 * Can't tag on hardware that doesn't support,
6775 			 * doesn't have it enabled, or has broken tag support.
6776 			 */
6777 			cts->flags &= ~CCB_TRANS_TAG_ENB;
6778 		}
6779 	}
6780 
6781 	qfrozen = FALSE;
6782 	if ((cts->valid & CCB_TRANS_TQ_VALID) != 0) {
6783 		int device_tagenb;
6784 
6785 		/*
6786 		 * If we are transitioning from tags to no-tags or
6787 		 * vice-versa, we need to carefully freeze and restart
6788 		 * the queue so that we don't overlap tagged and non-tagged
6789 		 * commands.  We also temporarily stop tags if there is
6790 		 * a change in transfer negotiation settings to allow
6791 		 * "tag-less" negotiation.
6792 		 */
6793 		if ((device->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
6794 		 || (device->inq_flags & SID_CmdQue) != 0)
6795 			device_tagenb = TRUE;
6796 		else
6797 			device_tagenb = FALSE;
6798 
6799 		if (((cts->flags & CCB_TRANS_TAG_ENB) != 0
6800 		  && device_tagenb == FALSE)
6801 		 || ((cts->flags & CCB_TRANS_TAG_ENB) == 0
6802 		  && device_tagenb == TRUE)) {
6803 
6804 			if ((cts->flags & CCB_TRANS_TAG_ENB) != 0) {
6805 				/*
6806 				 * Delay change to use tags until after a
6807 				 * few commands have gone to this device so
6808 				 * the controller has time to perform transfer
6809 				 * negotiations without tagged messages getting
6810 				 * in the way.
6811 				 */
6812 				device->tag_delay_count = CAM_TAG_DELAY_COUNT;
6813 				device->flags |= CAM_DEV_TAG_AFTER_COUNT;
6814 			} else {
6815 				xpt_freeze_devq(cts->ccb_h.path, /*count*/1);
6816 				qfrozen = TRUE;
6817 		  		device->inq_flags &= ~SID_CmdQue;
6818 				xpt_dev_ccbq_resize(cts->ccb_h.path,
6819 						    sim->max_dev_openings);
6820 				device->flags &= ~CAM_DEV_TAG_AFTER_COUNT;
6821 				device->tag_delay_count = 0;
6822 			}
6823 		}
6824 	}
6825 
6826 	if (async_update == FALSE) {
6827 		/*
6828 		 * If we are currently performing tagged transactions to
6829 		 * this device and want to change its negotiation parameters,
6830 		 * go non-tagged for a bit to give the controller a chance to
6831 		 * negotiate unhampered by tag messages.
6832 		 */
6833 		if ((device->inq_flags & SID_CmdQue) != 0
6834 		 && (cts->flags & (CCB_TRANS_SYNC_RATE_VALID|
6835 				   CCB_TRANS_SYNC_OFFSET_VALID|
6836 				   CCB_TRANS_BUS_WIDTH_VALID)) != 0)
6837 			xpt_toggle_tags(cts->ccb_h.path);
6838 
6839 		(*(sim->sim_action))(sim, (union ccb *)cts);
6840 	}
6841 
6842 	if (qfrozen) {
6843 		struct ccb_relsim crs;
6844 
6845 		xpt_setup_ccb(&crs.ccb_h, cts->ccb_h.path,
6846 			      /*priority*/1);
6847 		crs.ccb_h.func_code = XPT_REL_SIMQ;
6848 		crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY;
6849 		crs.openings
6850 		    = crs.release_timeout
6851 		    = crs.qfrozen_cnt
6852 		    = 0;
6853 		xpt_action((union ccb *)&crs);
6854 	}
6855 }
6856 
6857 
6858 #endif /* CAM_NEW_TRAN_CODE */
6859 
6860 static void
6861 xpt_toggle_tags(struct cam_path *path)
6862 {
6863 	struct cam_ed *dev;
6864 
6865 	/*
6866 	 * Give controllers a chance to renegotiate
6867 	 * before starting tag operations.  We
6868 	 * "toggle" tagged queuing off then on
6869 	 * which causes the tag enable command delay
6870 	 * counter to come into effect.
6871 	 */
6872 	dev = path->device;
6873 	if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
6874 	 || ((dev->inq_flags & SID_CmdQue) != 0
6875  	  && (dev->inq_flags & (SID_Sync|SID_WBus16|SID_WBus32)) != 0)) {
6876 		struct ccb_trans_settings cts;
6877 
6878 		xpt_setup_ccb(&cts.ccb_h, path, 1);
6879 #ifdef CAM_NEW_TRAN_CODE
6880 		cts.protocol = PROTO_SCSI;
6881 		cts.protocol_version = PROTO_VERSION_UNSPECIFIED;
6882 		cts.transport = XPORT_UNSPECIFIED;
6883 		cts.transport_version = XPORT_VERSION_UNSPECIFIED;
6884 		cts.proto_specific.scsi.flags = 0;
6885 		cts.proto_specific.scsi.valid = CTS_SCSI_VALID_TQ;
6886 #else /* CAM_NEW_TRAN_CODE */
6887 		cts.flags = 0;
6888 		cts.valid = CCB_TRANS_TQ_VALID;
6889 #endif /* CAM_NEW_TRAN_CODE */
6890 		xpt_set_transfer_settings(&cts, path->device,
6891 					  /*async_update*/TRUE);
6892 #ifdef CAM_NEW_TRAN_CODE
6893 		cts.proto_specific.scsi.flags = CTS_SCSI_FLAGS_TAG_ENB;
6894 #else /* CAM_NEW_TRAN_CODE */
6895 		cts.flags = CCB_TRANS_TAG_ENB;
6896 #endif /* CAM_NEW_TRAN_CODE */
6897 		xpt_set_transfer_settings(&cts, path->device,
6898 					  /*async_update*/TRUE);
6899 	}
6900 }
6901 
6902 static void
6903 xpt_start_tags(struct cam_path *path)
6904 {
6905 	struct ccb_relsim crs;
6906 	struct cam_ed *device;
6907 	struct cam_sim *sim;
6908 	int    newopenings;
6909 
6910 	device = path->device;
6911 	sim = path->bus->sim;
6912 	device->flags &= ~CAM_DEV_TAG_AFTER_COUNT;
6913 	xpt_freeze_devq(path, /*count*/1);
6914 	device->inq_flags |= SID_CmdQue;
6915 	if (device->tag_saved_openings != 0)
6916 		newopenings = device->tag_saved_openings;
6917 	else
6918 		newopenings = min(device->quirk->maxtags,
6919 				  sim->max_tagged_dev_openings);
6920 	xpt_dev_ccbq_resize(path, newopenings);
6921 	xpt_setup_ccb(&crs.ccb_h, path, /*priority*/1);
6922 	crs.ccb_h.func_code = XPT_REL_SIMQ;
6923 	crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY;
6924 	crs.openings
6925 	    = crs.release_timeout
6926 	    = crs.qfrozen_cnt
6927 	    = 0;
6928 	xpt_action((union ccb *)&crs);
6929 }
6930 
6931 static int busses_to_config;
6932 static int busses_to_reset;
6933 
6934 static int
6935 xptconfigbuscountfunc(struct cam_eb *bus, void *arg)
6936 {
6937 	if (bus->path_id != CAM_XPT_PATH_ID) {
6938 		struct cam_path path;
6939 		struct ccb_pathinq cpi;
6940 		int can_negotiate;
6941 
6942 		busses_to_config++;
6943 		xpt_compile_path(&path, NULL, bus->path_id,
6944 				 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
6945 		xpt_setup_ccb(&cpi.ccb_h, &path, /*priority*/1);
6946 		cpi.ccb_h.func_code = XPT_PATH_INQ;
6947 		xpt_action((union ccb *)&cpi);
6948 		can_negotiate = cpi.hba_inquiry;
6949 		can_negotiate &= (PI_WIDE_32|PI_WIDE_16|PI_SDTR_ABLE);
6950 		if ((cpi.hba_misc & PIM_NOBUSRESET) == 0
6951 		 && can_negotiate)
6952 			busses_to_reset++;
6953 		xpt_release_path(&path);
6954 	}
6955 
6956 	return(1);
6957 }
6958 
6959 static int
6960 xptconfigfunc(struct cam_eb *bus, void *arg)
6961 {
6962 	struct	cam_path *path;
6963 	union	ccb *work_ccb;
6964 
6965 	if (bus->path_id != CAM_XPT_PATH_ID) {
6966 		cam_status status;
6967 		int can_negotiate;
6968 
6969 		work_ccb = xpt_alloc_ccb();
6970 		if ((status = xpt_create_path(&path, xpt_periph, bus->path_id,
6971 					      CAM_TARGET_WILDCARD,
6972 					      CAM_LUN_WILDCARD)) !=CAM_REQ_CMP){
6973 			printf("xptconfigfunc: xpt_create_path failed with "
6974 			       "status %#x for bus %d\n", status, bus->path_id);
6975 			printf("xptconfigfunc: halting bus configuration\n");
6976 			xpt_free_ccb(work_ccb);
6977 			busses_to_config--;
6978 			xpt_finishconfig(xpt_periph, NULL);
6979 			return(0);
6980 		}
6981 		xpt_setup_ccb(&work_ccb->ccb_h, path, /*priority*/1);
6982 		work_ccb->ccb_h.func_code = XPT_PATH_INQ;
6983 		xpt_action(work_ccb);
6984 		if (work_ccb->ccb_h.status != CAM_REQ_CMP) {
6985 			printf("xptconfigfunc: CPI failed on bus %d "
6986 			       "with status %d\n", bus->path_id,
6987 			       work_ccb->ccb_h.status);
6988 			xpt_finishconfig(xpt_periph, work_ccb);
6989 			return(1);
6990 		}
6991 
6992 		can_negotiate = work_ccb->cpi.hba_inquiry;
6993 		can_negotiate &= (PI_WIDE_32|PI_WIDE_16|PI_SDTR_ABLE);
6994 		if ((work_ccb->cpi.hba_misc & PIM_NOBUSRESET) == 0
6995 		 && (can_negotiate != 0)) {
6996 			xpt_setup_ccb(&work_ccb->ccb_h, path, /*priority*/1);
6997 			work_ccb->ccb_h.func_code = XPT_RESET_BUS;
6998 			work_ccb->ccb_h.cbfcnp = NULL;
6999 			CAM_DEBUG(path, CAM_DEBUG_SUBTRACE,
7000 				  ("Resetting Bus\n"));
7001 			xpt_action(work_ccb);
7002 			xpt_finishconfig(xpt_periph, work_ccb);
7003 		} else {
7004 			/* Act as though we performed a successful BUS RESET */
7005 			work_ccb->ccb_h.func_code = XPT_RESET_BUS;
7006 			xpt_finishconfig(xpt_periph, work_ccb);
7007 		}
7008 	}
7009 
7010 	return(1);
7011 }
7012 
7013 static void
7014 xpt_config(void *arg)
7015 {
7016 	/*
7017 	 * Now that interrupts are enabled, go find our devices
7018 	 */
7019 
7020 #ifdef CAMDEBUG
7021 	/* Setup debugging flags and path */
7022 #ifdef CAM_DEBUG_FLAGS
7023 	cam_dflags = CAM_DEBUG_FLAGS;
7024 #else /* !CAM_DEBUG_FLAGS */
7025 	cam_dflags = CAM_DEBUG_NONE;
7026 #endif /* CAM_DEBUG_FLAGS */
7027 #ifdef CAM_DEBUG_BUS
7028 	if (cam_dflags != CAM_DEBUG_NONE) {
7029 		if (xpt_create_path(&cam_dpath, xpt_periph,
7030 				    CAM_DEBUG_BUS, CAM_DEBUG_TARGET,
7031 				    CAM_DEBUG_LUN) != CAM_REQ_CMP) {
7032 			printf("xpt_config: xpt_create_path() failed for debug"
7033 			       " target %d:%d:%d, debugging disabled\n",
7034 			       CAM_DEBUG_BUS, CAM_DEBUG_TARGET, CAM_DEBUG_LUN);
7035 			cam_dflags = CAM_DEBUG_NONE;
7036 		}
7037 	} else
7038 		cam_dpath = NULL;
7039 #else /* !CAM_DEBUG_BUS */
7040 	cam_dpath = NULL;
7041 #endif /* CAM_DEBUG_BUS */
7042 #endif /* CAMDEBUG */
7043 
7044 	/*
7045 	 * Scan all installed busses.
7046 	 */
7047 	xpt_for_all_busses(xptconfigbuscountfunc, NULL);
7048 
7049 	if (busses_to_config == 0) {
7050 		/* Call manually because we don't have any busses */
7051 		xpt_finishconfig(xpt_periph, NULL);
7052 	} else  {
7053 		if (busses_to_reset > 0 && scsi_delay >= 2000) {
7054 			printf("Waiting %d seconds for SCSI "
7055 			       "devices to settle\n", scsi_delay/1000);
7056 		}
7057 		xpt_for_all_busses(xptconfigfunc, NULL);
7058 	}
7059 }
7060 
7061 /*
7062  * If the given device only has one peripheral attached to it, and if that
7063  * peripheral is the passthrough driver, announce it.  This insures that the
7064  * user sees some sort of announcement for every peripheral in their system.
7065  */
7066 static int
7067 xptpassannouncefunc(struct cam_ed *device, void *arg)
7068 {
7069 	struct cam_periph *periph;
7070 	int i;
7071 
7072 	for (periph = SLIST_FIRST(&device->periphs), i = 0; periph != NULL;
7073 	     periph = SLIST_NEXT(periph, periph_links), i++);
7074 
7075 	periph = SLIST_FIRST(&device->periphs);
7076 	if ((i == 1)
7077 	 && (strncmp(periph->periph_name, "pass", 4) == 0))
7078 		xpt_announce_periph(periph, NULL);
7079 
7080 	return(1);
7081 }
7082 
7083 static void
7084 xpt_finishconfig(struct cam_periph *periph, union ccb *done_ccb)
7085 {
7086 	struct	periph_driver **p_drv;
7087 	int	i;
7088 
7089 	if (done_ccb != NULL) {
7090 		CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE,
7091 			  ("xpt_finishconfig\n"));
7092 		switch(done_ccb->ccb_h.func_code) {
7093 		case XPT_RESET_BUS:
7094 			if (done_ccb->ccb_h.status == CAM_REQ_CMP) {
7095 				done_ccb->ccb_h.func_code = XPT_SCAN_BUS;
7096 				done_ccb->ccb_h.cbfcnp = xpt_finishconfig;
7097 				done_ccb->crcn.flags = 0;
7098 				xpt_action(done_ccb);
7099 				return;
7100 			}
7101 			/* FALLTHROUGH */
7102 		case XPT_SCAN_BUS:
7103 		default:
7104 			xpt_free_path(done_ccb->ccb_h.path);
7105 			busses_to_config--;
7106 			break;
7107 		}
7108 	}
7109 
7110 	if (busses_to_config == 0) {
7111 		/* Register all the peripheral drivers */
7112 		/* XXX This will have to change when we have loadable modules */
7113 		p_drv = periph_drivers;
7114 		for (i = 0; p_drv[i] != NULL; i++) {
7115 			(*p_drv[i]->init)();
7116 		}
7117 
7118 		/*
7119 		 * Check for devices with no "standard" peripheral driver
7120 		 * attached.  For any devices like that, announce the
7121 		 * passthrough driver so the user will see something.
7122 		 */
7123 		xpt_for_all_devices(xptpassannouncefunc, NULL);
7124 
7125 		/* Release our hook so that the boot can continue. */
7126 		config_intrhook_disestablish(xpt_config_hook);
7127 		free(xpt_config_hook, M_TEMP);
7128 		xpt_config_hook = NULL;
7129 	}
7130 	if (done_ccb != NULL)
7131 		xpt_free_ccb(done_ccb);
7132 }
7133 
7134 static void
7135 xptaction(struct cam_sim *sim, union ccb *work_ccb)
7136 {
7137 	CAM_DEBUG(work_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xptaction\n"));
7138 
7139 	switch (work_ccb->ccb_h.func_code) {
7140 	/* Common cases first */
7141 	case XPT_PATH_INQ:		/* Path routing inquiry */
7142 	{
7143 		struct ccb_pathinq *cpi;
7144 
7145 		cpi = &work_ccb->cpi;
7146 		cpi->version_num = 1; /* XXX??? */
7147 		cpi->hba_inquiry = 0;
7148 		cpi->target_sprt = 0;
7149 		cpi->hba_misc = 0;
7150 		cpi->hba_eng_cnt = 0;
7151 		cpi->max_target = 0;
7152 		cpi->max_lun = 0;
7153 		cpi->initiator_id = 0;
7154 		strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
7155 		strncpy(cpi->hba_vid, "", HBA_IDLEN);
7156 		strncpy(cpi->dev_name, sim->sim_name, DEV_IDLEN);
7157 		cpi->unit_number = sim->unit_number;
7158 		cpi->bus_id = sim->bus_id;
7159 		cpi->base_transfer_speed = 0;
7160 #ifdef CAM_NEW_TRAN_CODE
7161 		cpi->protocol = PROTO_UNSPECIFIED;
7162 		cpi->protocol_version = PROTO_VERSION_UNSPECIFIED;
7163 		cpi->transport = XPORT_UNSPECIFIED;
7164 		cpi->transport_version = XPORT_VERSION_UNSPECIFIED;
7165 #endif /* CAM_NEW_TRAN_CODE */
7166 		cpi->ccb_h.status = CAM_REQ_CMP;
7167 		xpt_done(work_ccb);
7168 		break;
7169 	}
7170 	default:
7171 		work_ccb->ccb_h.status = CAM_REQ_INVALID;
7172 		xpt_done(work_ccb);
7173 		break;
7174 	}
7175 }
7176 
7177 /*
7178  * The xpt as a "controller" has no interrupt sources, so polling
7179  * is a no-op.
7180  */
7181 static void
7182 xptpoll(struct cam_sim *sim)
7183 {
7184 }
7185 
7186 static void
7187 camisr(void *V_queue)
7188 {
7189 	cam_isrq_t *oqueue = V_queue;
7190 	cam_isrq_t queue;
7191 	int	s;
7192 	struct	ccb_hdr *ccb_h;
7193 
7194 	/*
7195 	 * Transfer the ccb_bioq list to a temporary list so we can operate
7196 	 * on it without needing to lock/unlock on every loop.  The concat
7197 	 * function with re-init the real list for us.
7198 	 */
7199 	s = splcam();
7200 	mtx_lock(&cam_bioq_lock);
7201 	TAILQ_INIT(&queue);
7202 	TAILQ_CONCAT(&queue, oqueue, sim_links.tqe);
7203 	mtx_unlock(&cam_bioq_lock);
7204 
7205 	while ((ccb_h = TAILQ_FIRST(&queue)) != NULL) {
7206 		int	runq;
7207 
7208 		TAILQ_REMOVE(&queue, ccb_h, sim_links.tqe);
7209 		ccb_h->pinfo.index = CAM_UNQUEUED_INDEX;
7210 		splx(s);
7211 
7212 		CAM_DEBUG(ccb_h->path, CAM_DEBUG_TRACE,
7213 			  ("camisr\n"));
7214 
7215 		runq = FALSE;
7216 
7217 		if (ccb_h->flags & CAM_HIGH_POWER) {
7218 			struct highpowerlist	*hphead;
7219 			union ccb		*send_ccb;
7220 
7221 			hphead = &highpowerq;
7222 
7223 			send_ccb = (union ccb *)STAILQ_FIRST(hphead);
7224 
7225 			/*
7226 			 * Increment the count since this command is done.
7227 			 */
7228 			num_highpower++;
7229 
7230 			/*
7231 			 * Any high powered commands queued up?
7232 			 */
7233 			if (send_ccb != NULL) {
7234 
7235 				STAILQ_REMOVE_HEAD(hphead, xpt_links.stqe);
7236 
7237 				xpt_release_devq(send_ccb->ccb_h.path,
7238 						 /*count*/1, /*runqueue*/TRUE);
7239 			}
7240 		}
7241 		if ((ccb_h->func_code & XPT_FC_USER_CCB) == 0) {
7242 			struct cam_ed *dev;
7243 
7244 			dev = ccb_h->path->device;
7245 
7246 			s = splcam();
7247 			cam_ccbq_ccb_done(&dev->ccbq, (union ccb *)ccb_h);
7248 
7249 			if (!SIM_DEAD(ccb_h->path->bus->sim)) {
7250 				ccb_h->path->bus->sim->devq->send_active--;
7251 				ccb_h->path->bus->sim->devq->send_openings++;
7252 			}
7253 			splx(s);
7254 
7255 			if (((dev->flags & CAM_DEV_REL_ON_COMPLETE) != 0
7256 			  && (ccb_h->status&CAM_STATUS_MASK) != CAM_REQUEUE_REQ)
7257 			 || ((dev->flags & CAM_DEV_REL_ON_QUEUE_EMPTY) != 0
7258 			  && (dev->ccbq.dev_active == 0))) {
7259 
7260 				xpt_release_devq(ccb_h->path, /*count*/1,
7261 						 /*run_queue*/TRUE);
7262 			}
7263 
7264 			if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
7265 			 && (--dev->tag_delay_count == 0))
7266 				xpt_start_tags(ccb_h->path);
7267 
7268 			if ((dev->ccbq.queue.entries > 0)
7269 			 && (dev->qfrozen_cnt == 0)
7270 			 && (device_is_send_queued(dev) == 0)) {
7271 				runq = xpt_schedule_dev_sendq(ccb_h->path->bus,
7272 							      dev);
7273 			}
7274 		}
7275 
7276 		if (ccb_h->status & CAM_RELEASE_SIMQ) {
7277 			xpt_release_simq(ccb_h->path->bus->sim,
7278 					 /*run_queue*/TRUE);
7279 			ccb_h->status &= ~CAM_RELEASE_SIMQ;
7280 			runq = FALSE;
7281 		}
7282 
7283 		if ((ccb_h->flags & CAM_DEV_QFRZDIS)
7284 		 && (ccb_h->status & CAM_DEV_QFRZN)) {
7285 			xpt_release_devq(ccb_h->path, /*count*/1,
7286 					 /*run_queue*/TRUE);
7287 			ccb_h->status &= ~CAM_DEV_QFRZN;
7288 		} else if (runq) {
7289 			xpt_run_dev_sendq(ccb_h->path->bus);
7290 		}
7291 
7292 		/* Call the peripheral driver's callback */
7293 		(*ccb_h->cbfcnp)(ccb_h->path->periph, (union ccb *)ccb_h);
7294 
7295 		/* Raise IPL for while test */
7296 		s = splcam();
7297 	}
7298 	splx(s);
7299 }
7300 
7301 static void
7302 dead_sim_action(struct cam_sim *sim, union ccb *ccb)
7303 {
7304 
7305 	ccb->ccb_h.status = CAM_DEV_NOT_THERE;
7306 	xpt_done(ccb);
7307 }
7308 
7309 static void
7310 dead_sim_poll(struct cam_sim *sim)
7311 {
7312 }
7313