xref: /freebsd/sys/cam/cam_xpt.c (revision e4e9813eb92cd7c4d4b819a8fbed5cbd3d92f5d8)
1 /*-
2  * Implementation of the Common Access Method Transport (XPT) layer.
3  *
4  * Copyright (c) 1997, 1998, 1999 Justin T. Gibbs.
5  * Copyright (c) 1997, 1998, 1999 Kenneth D. Merry.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions, and the following disclaimer,
13  *    without modification, immediately at the beginning of the file.
14  * 2. The name of the author may not be used to endorse or promote products
15  *    derived from this software without specific prior written permission.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
21  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  */
29 
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
32 
33 #include <sys/param.h>
34 #include <sys/bus.h>
35 #include <sys/systm.h>
36 #include <sys/types.h>
37 #include <sys/malloc.h>
38 #include <sys/kernel.h>
39 #include <sys/time.h>
40 #include <sys/conf.h>
41 #include <sys/fcntl.h>
42 #include <sys/md5.h>
43 #include <sys/interrupt.h>
44 #include <sys/sbuf.h>
45 
46 #include <sys/lock.h>
47 #include <sys/mutex.h>
48 #include <sys/sysctl.h>
49 
50 #ifdef PC98
51 #include <pc98/pc98/pc98_machdep.h>	/* geometry translation */
52 #endif
53 
54 #include <cam/cam.h>
55 #include <cam/cam_ccb.h>
56 #include <cam/cam_periph.h>
57 #include <cam/cam_sim.h>
58 #include <cam/cam_xpt.h>
59 #include <cam/cam_xpt_sim.h>
60 #include <cam/cam_xpt_periph.h>
61 #include <cam/cam_debug.h>
62 
63 #include <cam/scsi/scsi_all.h>
64 #include <cam/scsi/scsi_message.h>
65 #include <cam/scsi/scsi_pass.h>
66 #include "opt_cam.h"
67 
68 /* Datastructures internal to the xpt layer */
69 MALLOC_DEFINE(M_CAMXPT, "CAM XPT", "CAM XPT buffers");
70 
71 /*
72  * Definition of an async handler callback block.  These are used to add
73  * SIMs and peripherals to the async callback lists.
74  */
75 struct async_node {
76 	SLIST_ENTRY(async_node)	links;
77 	u_int32_t	event_enable;	/* Async Event enables */
78 	void		(*callback)(void *arg, u_int32_t code,
79 				    struct cam_path *path, void *args);
80 	void		*callback_arg;
81 };
82 
83 SLIST_HEAD(async_list, async_node);
84 SLIST_HEAD(periph_list, cam_periph);
85 static STAILQ_HEAD(highpowerlist, ccb_hdr) highpowerq;
86 
87 /*
88  * This is the maximum number of high powered commands (e.g. start unit)
89  * that can be outstanding at a particular time.
90  */
91 #ifndef CAM_MAX_HIGHPOWER
92 #define CAM_MAX_HIGHPOWER  4
93 #endif
94 
95 /* number of high powered commands that can go through right now */
96 static int num_highpower = CAM_MAX_HIGHPOWER;
97 
98 /*
99  * Structure for queueing a device in a run queue.
100  * There is one run queue for allocating new ccbs,
101  * and another for sending ccbs to the controller.
102  */
103 struct cam_ed_qinfo {
104 	cam_pinfo pinfo;
105 	struct	  cam_ed *device;
106 };
107 
108 /*
109  * The CAM EDT (Existing Device Table) contains the device information for
110  * all devices for all busses in the system.  The table contains a
111  * cam_ed structure for each device on the bus.
112  */
113 struct cam_ed {
114 	TAILQ_ENTRY(cam_ed) links;
115 	struct	cam_ed_qinfo alloc_ccb_entry;
116 	struct	cam_ed_qinfo send_ccb_entry;
117 	struct	cam_et	 *target;
118 	lun_id_t	 lun_id;
119 	struct	camq drvq;		/*
120 					 * Queue of type drivers wanting to do
121 					 * work on this device.
122 					 */
123 	struct	cam_ccbq ccbq;		/* Queue of pending ccbs */
124 	struct	async_list asyncs;	/* Async callback info for this B/T/L */
125 	struct	periph_list periphs;	/* All attached devices */
126 	u_int	generation;		/* Generation number */
127 	struct	cam_periph *owner;	/* Peripheral driver's ownership tag */
128 	struct	xpt_quirk_entry *quirk;	/* Oddities about this device */
129 					/* Storage for the inquiry data */
130 #ifdef CAM_NEW_TRAN_CODE
131 	cam_proto	 protocol;
132 	u_int		 protocol_version;
133 	cam_xport	 transport;
134 	u_int		 transport_version;
135 #endif /* CAM_NEW_TRAN_CODE */
136 	struct		 scsi_inquiry_data inq_data;
137 	u_int8_t	 inq_flags;	/*
138 					 * Current settings for inquiry flags.
139 					 * This allows us to override settings
140 					 * like disconnection and tagged
141 					 * queuing for a device.
142 					 */
143 	u_int8_t	 queue_flags;	/* Queue flags from the control page */
144 	u_int8_t	 serial_num_len;
145 	u_int8_t	*serial_num;
146 	u_int32_t	 qfrozen_cnt;
147 	u_int32_t	 flags;
148 #define CAM_DEV_UNCONFIGURED	 	0x01
149 #define CAM_DEV_REL_TIMEOUT_PENDING	0x02
150 #define CAM_DEV_REL_ON_COMPLETE		0x04
151 #define CAM_DEV_REL_ON_QUEUE_EMPTY	0x08
152 #define CAM_DEV_RESIZE_QUEUE_NEEDED	0x10
153 #define CAM_DEV_TAG_AFTER_COUNT		0x20
154 #define CAM_DEV_INQUIRY_DATA_VALID	0x40
155 	u_int32_t	 tag_delay_count;
156 #define	CAM_TAG_DELAY_COUNT		5
157 	u_int32_t	 tag_saved_openings;
158 	u_int32_t	 refcount;
159 	struct		 callout_handle c_handle;
160 };
161 
162 /*
163  * Each target is represented by an ET (Existing Target).  These
164  * entries are created when a target is successfully probed with an
165  * identify, and removed when a device fails to respond after a number
166  * of retries, or a bus rescan finds the device missing.
167  */
168 struct cam_et {
169 	TAILQ_HEAD(, cam_ed) ed_entries;
170 	TAILQ_ENTRY(cam_et) links;
171 	struct	cam_eb	*bus;
172 	target_id_t	target_id;
173 	u_int32_t	refcount;
174 	u_int		generation;
175 	struct		timeval last_reset;
176 };
177 
178 /*
179  * Each bus is represented by an EB (Existing Bus).  These entries
180  * are created by calls to xpt_bus_register and deleted by calls to
181  * xpt_bus_deregister.
182  */
183 struct cam_eb {
184 	TAILQ_HEAD(, cam_et) et_entries;
185 	TAILQ_ENTRY(cam_eb)  links;
186 	path_id_t	     path_id;
187 	struct cam_sim	     *sim;
188 	struct timeval	     last_reset;
189 	u_int32_t	     flags;
190 #define	CAM_EB_RUNQ_SCHEDULED	0x01
191 	u_int32_t	     refcount;
192 	u_int		     generation;
193 };
194 
195 struct cam_path {
196 	struct cam_periph *periph;
197 	struct cam_eb	  *bus;
198 	struct cam_et	  *target;
199 	struct cam_ed	  *device;
200 };
201 
202 struct xpt_quirk_entry {
203 	struct scsi_inquiry_pattern inq_pat;
204 	u_int8_t quirks;
205 #define	CAM_QUIRK_NOLUNS	0x01
206 #define	CAM_QUIRK_NOSERIAL	0x02
207 #define	CAM_QUIRK_HILUNS	0x04
208 #define	CAM_QUIRK_NOHILUNS	0x08
209 	u_int mintags;
210 	u_int maxtags;
211 };
212 
213 static int cam_srch_hi = 0;
214 TUNABLE_INT("kern.cam.cam_srch_hi", &cam_srch_hi);
215 static int sysctl_cam_search_luns(SYSCTL_HANDLER_ARGS);
216 SYSCTL_PROC(_kern_cam, OID_AUTO, cam_srch_hi, CTLTYPE_INT|CTLFLAG_RW, 0, 0,
217     sysctl_cam_search_luns, "I",
218     "allow search above LUN 7 for SCSI3 and greater devices");
219 
220 #define	CAM_SCSI2_MAXLUN	8
221 /*
222  * If we're not quirked to search <= the first 8 luns
223  * and we are either quirked to search above lun 8,
224  * or we're > SCSI-2 and we've enabled hilun searching,
225  * or we're > SCSI-2 and the last lun was a success,
226  * we can look for luns above lun 8.
227  */
228 #define	CAN_SRCH_HI_SPARSE(dv)				\
229   (((dv->quirk->quirks & CAM_QUIRK_NOHILUNS) == 0) 	\
230   && ((dv->quirk->quirks & CAM_QUIRK_HILUNS)		\
231   || (SID_ANSI_REV(&dv->inq_data) > SCSI_REV_2 && cam_srch_hi)))
232 
233 #define	CAN_SRCH_HI_DENSE(dv)				\
234   (((dv->quirk->quirks & CAM_QUIRK_NOHILUNS) == 0) 	\
235   && ((dv->quirk->quirks & CAM_QUIRK_HILUNS)		\
236   || (SID_ANSI_REV(&dv->inq_data) > SCSI_REV_2)))
237 
238 typedef enum {
239 	XPT_FLAG_OPEN		= 0x01
240 } xpt_flags;
241 
242 struct xpt_softc {
243 	xpt_flags	flags;
244 	u_int32_t	generation;
245 };
246 
247 static const char quantum[] = "QUANTUM";
248 static const char sony[] = "SONY";
249 static const char west_digital[] = "WDIGTL";
250 static const char samsung[] = "SAMSUNG";
251 static const char seagate[] = "SEAGATE";
252 static const char microp[] = "MICROP";
253 
254 static struct xpt_quirk_entry xpt_quirk_table[] =
255 {
256 	{
257 		/* Reports QUEUE FULL for temporary resource shortages */
258 		{ T_DIRECT, SIP_MEDIA_FIXED, quantum, "XP39100*", "*" },
259 		/*quirks*/0, /*mintags*/24, /*maxtags*/32
260 	},
261 	{
262 		/* Reports QUEUE FULL for temporary resource shortages */
263 		{ T_DIRECT, SIP_MEDIA_FIXED, quantum, "XP34550*", "*" },
264 		/*quirks*/0, /*mintags*/24, /*maxtags*/32
265 	},
266 	{
267 		/* Reports QUEUE FULL for temporary resource shortages */
268 		{ T_DIRECT, SIP_MEDIA_FIXED, quantum, "XP32275*", "*" },
269 		/*quirks*/0, /*mintags*/24, /*maxtags*/32
270 	},
271 	{
272 		/* Broken tagged queuing drive */
273 		{ T_DIRECT, SIP_MEDIA_FIXED, microp, "4421-07*", "*" },
274 		/*quirks*/0, /*mintags*/0, /*maxtags*/0
275 	},
276 	{
277 		/* Broken tagged queuing drive */
278 		{ T_DIRECT, SIP_MEDIA_FIXED, "HP", "C372*", "*" },
279 		/*quirks*/0, /*mintags*/0, /*maxtags*/0
280 	},
281 	{
282 		/* Broken tagged queuing drive */
283 		{ T_DIRECT, SIP_MEDIA_FIXED, microp, "3391*", "x43h" },
284 		/*quirks*/0, /*mintags*/0, /*maxtags*/0
285 	},
286 	{
287 		/*
288 		 * Unfortunately, the Quantum Atlas III has the same
289 		 * problem as the Atlas II drives above.
290 		 * Reported by: "Johan Granlund" <johan@granlund.nu>
291 		 *
292 		 * For future reference, the drive with the problem was:
293 		 * QUANTUM QM39100TD-SW N1B0
294 		 *
295 		 * It's possible that Quantum will fix the problem in later
296 		 * firmware revisions.  If that happens, the quirk entry
297 		 * will need to be made specific to the firmware revisions
298 		 * with the problem.
299 		 *
300 		 */
301 		/* Reports QUEUE FULL for temporary resource shortages */
302 		{ T_DIRECT, SIP_MEDIA_FIXED, quantum, "QM39100*", "*" },
303 		/*quirks*/0, /*mintags*/24, /*maxtags*/32
304 	},
305 	{
306 		/*
307 		 * 18 Gig Atlas III, same problem as the 9G version.
308 		 * Reported by: Andre Albsmeier
309 		 *		<andre.albsmeier@mchp.siemens.de>
310 		 *
311 		 * For future reference, the drive with the problem was:
312 		 * QUANTUM QM318000TD-S N491
313 		 */
314 		/* Reports QUEUE FULL for temporary resource shortages */
315 		{ T_DIRECT, SIP_MEDIA_FIXED, quantum, "QM318000*", "*" },
316 		/*quirks*/0, /*mintags*/24, /*maxtags*/32
317 	},
318 	{
319 		/*
320 		 * Broken tagged queuing drive
321 		 * Reported by: Bret Ford <bford@uop.cs.uop.edu>
322 		 *         and: Martin Renters <martin@tdc.on.ca>
323 		 */
324 		{ T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST410800*", "71*" },
325 		/*quirks*/0, /*mintags*/0, /*maxtags*/0
326 	},
327 		/*
328 		 * The Seagate Medalist Pro drives have very poor write
329 		 * performance with anything more than 2 tags.
330 		 *
331 		 * Reported by:  Paul van der Zwan <paulz@trantor.xs4all.nl>
332 		 * Drive:  <SEAGATE ST36530N 1444>
333 		 *
334 		 * Reported by:  Jeremy Lea <reg@shale.csir.co.za>
335 		 * Drive:  <SEAGATE ST34520W 1281>
336 		 *
337 		 * No one has actually reported that the 9G version
338 		 * (ST39140*) of the Medalist Pro has the same problem, but
339 		 * we're assuming that it does because the 4G and 6.5G
340 		 * versions of the drive are broken.
341 		 */
342 	{
343 		{ T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST34520*", "*"},
344 		/*quirks*/0, /*mintags*/2, /*maxtags*/2
345 	},
346 	{
347 		{ T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST36530*", "*"},
348 		/*quirks*/0, /*mintags*/2, /*maxtags*/2
349 	},
350 	{
351 		{ T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST39140*", "*"},
352 		/*quirks*/0, /*mintags*/2, /*maxtags*/2
353 	},
354 	{
355 		/*
356 		 * Slow when tagged queueing is enabled.  Write performance
357 		 * steadily drops off with more and more concurrent
358 		 * transactions.  Best sequential write performance with
359 		 * tagged queueing turned off and write caching turned on.
360 		 *
361 		 * PR:  kern/10398
362 		 * Submitted by:  Hideaki Okada <hokada@isl.melco.co.jp>
363 		 * Drive:  DCAS-34330 w/ "S65A" firmware.
364 		 *
365 		 * The drive with the problem had the "S65A" firmware
366 		 * revision, and has also been reported (by Stephen J.
367 		 * Roznowski <sjr@home.net>) for a drive with the "S61A"
368 		 * firmware revision.
369 		 *
370 		 * Although no one has reported problems with the 2 gig
371 		 * version of the DCAS drive, the assumption is that it
372 		 * has the same problems as the 4 gig version.  Therefore
373 		 * this quirk entries disables tagged queueing for all
374 		 * DCAS drives.
375 		 */
376 		{ T_DIRECT, SIP_MEDIA_FIXED, "IBM", "DCAS*", "*" },
377 		/*quirks*/0, /*mintags*/0, /*maxtags*/0
378 	},
379 	{
380 		/* Broken tagged queuing drive */
381 		{ T_DIRECT, SIP_MEDIA_REMOVABLE, "iomega", "jaz*", "*" },
382 		/*quirks*/0, /*mintags*/0, /*maxtags*/0
383 	},
384 	{
385 		/* Broken tagged queuing drive */
386 		{ T_DIRECT, SIP_MEDIA_FIXED, "CONNER", "CFP2107*", "*" },
387 		/*quirks*/0, /*mintags*/0, /*maxtags*/0
388 	},
389 	{
390 		/* Does not support other than LUN 0 */
391 		{ T_DIRECT, SIP_MEDIA_FIXED, "VMware*", "*", "*" },
392 		CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
393 	},
394 	{
395 		/*
396 		 * Broken tagged queuing drive.
397 		 * Submitted by:
398 		 * NAKAJI Hiroyuki <nakaji@zeisei.dpri.kyoto-u.ac.jp>
399 		 * in PR kern/9535
400 		 */
401 		{ T_DIRECT, SIP_MEDIA_FIXED, samsung, "WN34324U*", "*" },
402 		/*quirks*/0, /*mintags*/0, /*maxtags*/0
403 	},
404         {
405 		/*
406 		 * Slow when tagged queueing is enabled. (1.5MB/sec versus
407 		 * 8MB/sec.)
408 		 * Submitted by: Andrew Gallatin <gallatin@cs.duke.edu>
409 		 * Best performance with these drives is achieved with
410 		 * tagged queueing turned off, and write caching turned on.
411 		 */
412 		{ T_DIRECT, SIP_MEDIA_FIXED, west_digital, "WDE*", "*" },
413 		/*quirks*/0, /*mintags*/0, /*maxtags*/0
414         },
415         {
416 		/*
417 		 * Slow when tagged queueing is enabled. (1.5MB/sec versus
418 		 * 8MB/sec.)
419 		 * Submitted by: Andrew Gallatin <gallatin@cs.duke.edu>
420 		 * Best performance with these drives is achieved with
421 		 * tagged queueing turned off, and write caching turned on.
422 		 */
423 		{ T_DIRECT, SIP_MEDIA_FIXED, west_digital, "ENTERPRISE", "*" },
424 		/*quirks*/0, /*mintags*/0, /*maxtags*/0
425         },
426 	{
427 		/*
428 		 * Doesn't handle queue full condition correctly,
429 		 * so we need to limit maxtags to what the device
430 		 * can handle instead of determining this automatically.
431 		 */
432 		{ T_DIRECT, SIP_MEDIA_FIXED, samsung, "WN321010S*", "*" },
433 		/*quirks*/0, /*mintags*/2, /*maxtags*/32
434 	},
435 	{
436 		/* Really only one LUN */
437 		{ T_ENCLOSURE, SIP_MEDIA_FIXED, "SUN", "SENA", "*" },
438 		CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
439 	},
440 	{
441 		/* I can't believe we need a quirk for DPT volumes. */
442 		{ T_ANY, SIP_MEDIA_FIXED|SIP_MEDIA_REMOVABLE, "DPT", "*", "*" },
443 		CAM_QUIRK_NOSERIAL|CAM_QUIRK_NOLUNS,
444 		/*mintags*/0, /*maxtags*/255
445 	},
446 	{
447 		/*
448 		 * Many Sony CDROM drives don't like multi-LUN probing.
449 		 */
450 		{ T_CDROM, SIP_MEDIA_REMOVABLE, sony, "CD-ROM CDU*", "*" },
451 		CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
452 	},
453 	{
454 		/*
455 		 * This drive doesn't like multiple LUN probing.
456 		 * Submitted by:  Parag Patel <parag@cgt.com>
457 		 */
458 		{ T_WORM, SIP_MEDIA_REMOVABLE, sony, "CD-R   CDU9*", "*" },
459 		CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
460 	},
461 	{
462 		{ T_WORM, SIP_MEDIA_REMOVABLE, "YAMAHA", "CDR100*", "*" },
463 		CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
464 	},
465 	{
466 		/*
467 		 * The 8200 doesn't like multi-lun probing, and probably
468 		 * don't like serial number requests either.
469 		 */
470 		{
471 			T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "EXABYTE",
472 			"EXB-8200*", "*"
473 		},
474 		CAM_QUIRK_NOSERIAL|CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
475 	},
476 	{
477 		/*
478 		 * Let's try the same as above, but for a drive that says
479 		 * it's an IPL-6860 but is actually an EXB 8200.
480 		 */
481 		{
482 			T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "EXABYTE",
483 			"IPL-6860*", "*"
484 		},
485 		CAM_QUIRK_NOSERIAL|CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
486 	},
487 	{
488 		/*
489 		 * These Hitachi drives don't like multi-lun probing.
490 		 * The PR submitter has a DK319H, but says that the Linux
491 		 * kernel has a similar work-around for the DK312 and DK314,
492 		 * so all DK31* drives are quirked here.
493 		 * PR:            misc/18793
494 		 * Submitted by:  Paul Haddad <paul@pth.com>
495 		 */
496 		{ T_DIRECT, SIP_MEDIA_FIXED, "HITACHI", "DK31*", "*" },
497 		CAM_QUIRK_NOLUNS, /*mintags*/2, /*maxtags*/255
498 	},
499 	{
500 		/*
501 		 * The Hitachi CJ series with J8A8 firmware apparantly has
502 		 * problems with tagged commands.
503 		 * PR: 23536
504 		 * Reported by: amagai@nue.org
505 		 */
506 		{ T_DIRECT, SIP_MEDIA_FIXED, "HITACHI", "DK32CJ*", "J8A8" },
507 		CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
508 	},
509 	{
510 		/*
511 		 * These are the large storage arrays.
512 		 * Submitted by:  William Carrel <william.carrel@infospace.com>
513 		 */
514 		{ T_DIRECT, SIP_MEDIA_FIXED, "HITACHI", "OPEN*", "*" },
515 		CAM_QUIRK_HILUNS, 2, 1024
516 	},
517 	{
518 		/*
519 		 * This old revision of the TDC3600 is also SCSI-1, and
520 		 * hangs upon serial number probing.
521 		 */
522 		{
523 			T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "TANDBERG",
524 			" TDC 3600", "U07:"
525 		},
526 		CAM_QUIRK_NOSERIAL, /*mintags*/0, /*maxtags*/0
527 	},
528 	{
529 		/*
530 		 * Maxtor Personal Storage 3000XT (Firewire)
531 		 * hangs upon serial number probing.
532 		 */
533 		{
534 			T_DIRECT, SIP_MEDIA_FIXED, "Maxtor",
535 			"1394 storage", "*"
536 		},
537 		CAM_QUIRK_NOSERIAL, /*mintags*/0, /*maxtags*/0
538 	},
539 	{
540 		/*
541 		 * Would repond to all LUNs if asked for.
542 		 */
543 		{
544 			T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "CALIPER",
545 			"CP150", "*"
546 		},
547 		CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
548 	},
549 	{
550 		/*
551 		 * Would repond to all LUNs if asked for.
552 		 */
553 		{
554 			T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "KENNEDY",
555 			"96X2*", "*"
556 		},
557 		CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
558 	},
559 	{
560 		/* Submitted by: Matthew Dodd <winter@jurai.net> */
561 		{ T_PROCESSOR, SIP_MEDIA_FIXED, "Cabletrn", "EA41*", "*" },
562 		CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
563 	},
564 	{
565 		/* Submitted by: Matthew Dodd <winter@jurai.net> */
566 		{ T_PROCESSOR, SIP_MEDIA_FIXED, "CABLETRN", "EA41*", "*" },
567 		CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
568 	},
569 	{
570 		/* TeraSolutions special settings for TRC-22 RAID */
571 		{ T_DIRECT, SIP_MEDIA_FIXED, "TERASOLU", "TRC-22", "*" },
572 		  /*quirks*/0, /*mintags*/55, /*maxtags*/255
573 	},
574 	{
575 		/* Veritas Storage Appliance */
576 		{ T_DIRECT, SIP_MEDIA_FIXED, "VERITAS", "*", "*" },
577 		  CAM_QUIRK_HILUNS, /*mintags*/2, /*maxtags*/1024
578 	},
579 	{
580 		/*
581 		 * Would respond to all LUNs.  Device type and removable
582 		 * flag are jumper-selectable.
583 		 */
584 		{ T_ANY, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED, "MaxOptix",
585 		  "Tahiti 1", "*"
586 		},
587 		CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
588 	},
589 	{
590 		/* EasyRAID E5A aka. areca ARC-6010 */
591 		{ T_DIRECT, SIP_MEDIA_FIXED, "easyRAID", "*", "*" },
592 		  CAM_QUIRK_NOHILUNS, /*mintags*/2, /*maxtags*/255
593 	},
594 	{
595 		/* Default tagged queuing parameters for all devices */
596 		{
597 		  T_ANY, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED,
598 		  /*vendor*/"*", /*product*/"*", /*revision*/"*"
599 		},
600 		/*quirks*/0, /*mintags*/2, /*maxtags*/255
601 	},
602 };
603 
604 static const int xpt_quirk_table_size =
605 	sizeof(xpt_quirk_table) / sizeof(*xpt_quirk_table);
606 
607 typedef enum {
608 	DM_RET_COPY		= 0x01,
609 	DM_RET_FLAG_MASK	= 0x0f,
610 	DM_RET_NONE		= 0x00,
611 	DM_RET_STOP		= 0x10,
612 	DM_RET_DESCEND		= 0x20,
613 	DM_RET_ERROR		= 0x30,
614 	DM_RET_ACTION_MASK	= 0xf0
615 } dev_match_ret;
616 
617 typedef enum {
618 	XPT_DEPTH_BUS,
619 	XPT_DEPTH_TARGET,
620 	XPT_DEPTH_DEVICE,
621 	XPT_DEPTH_PERIPH
622 } xpt_traverse_depth;
623 
624 struct xpt_traverse_config {
625 	xpt_traverse_depth	depth;
626 	void			*tr_func;
627 	void			*tr_arg;
628 };
629 
630 typedef	int	xpt_busfunc_t (struct cam_eb *bus, void *arg);
631 typedef	int	xpt_targetfunc_t (struct cam_et *target, void *arg);
632 typedef	int	xpt_devicefunc_t (struct cam_ed *device, void *arg);
633 typedef	int	xpt_periphfunc_t (struct cam_periph *periph, void *arg);
634 typedef int	xpt_pdrvfunc_t (struct periph_driver **pdrv, void *arg);
635 
636 /* Transport layer configuration information */
637 static struct xpt_softc xsoftc;
638 
639 /* Queues for our software interrupt handler */
640 typedef TAILQ_HEAD(cam_isrq, ccb_hdr) cam_isrq_t;
641 static cam_isrq_t cam_bioq;
642 static struct mtx cam_bioq_lock;
643 
644 /* "Pool" of inactive ccbs managed by xpt_alloc_ccb and xpt_free_ccb */
645 static SLIST_HEAD(,ccb_hdr) ccb_freeq;
646 static u_int xpt_max_ccbs;	/*
647 				 * Maximum size of ccb pool.  Modified as
648 				 * devices are added/removed or have their
649 				 * opening counts changed.
650 				 */
651 static u_int xpt_ccb_count;	/* Current count of allocated ccbs */
652 
653 struct cam_periph *xpt_periph;
654 
655 static periph_init_t xpt_periph_init;
656 
657 static periph_init_t probe_periph_init;
658 
659 static struct periph_driver xpt_driver =
660 {
661 	xpt_periph_init, "xpt",
662 	TAILQ_HEAD_INITIALIZER(xpt_driver.units)
663 };
664 
665 static struct periph_driver probe_driver =
666 {
667 	probe_periph_init, "probe",
668 	TAILQ_HEAD_INITIALIZER(probe_driver.units)
669 };
670 
671 PERIPHDRIVER_DECLARE(xpt, xpt_driver);
672 PERIPHDRIVER_DECLARE(probe, probe_driver);
673 
674 
675 static d_open_t xptopen;
676 static d_close_t xptclose;
677 static d_ioctl_t xptioctl;
678 
679 static struct cdevsw xpt_cdevsw = {
680 	.d_version =	D_VERSION,
681 	.d_flags =	D_NEEDGIANT,
682 	.d_open =	xptopen,
683 	.d_close =	xptclose,
684 	.d_ioctl =	xptioctl,
685 	.d_name =	"xpt",
686 };
687 
688 static struct intr_config_hook *xpt_config_hook;
689 
690 static void dead_sim_action(struct cam_sim *sim, union ccb *ccb);
691 static void dead_sim_poll(struct cam_sim *sim);
692 
693 /* Dummy SIM that is used when the real one has gone. */
694 static struct cam_sim cam_dead_sim = {
695 	.sim_action =	dead_sim_action,
696 	.sim_poll =	dead_sim_poll,
697 	.sim_name =	"dead_sim",
698 };
699 
700 #define SIM_DEAD(sim)	((sim) == &cam_dead_sim)
701 
702 /* Registered busses */
703 static TAILQ_HEAD(,cam_eb) xpt_busses;
704 static u_int bus_generation;
705 
706 /* Storage for debugging datastructures */
707 #ifdef	CAMDEBUG
708 struct cam_path *cam_dpath;
709 u_int32_t cam_dflags;
710 u_int32_t cam_debug_delay;
711 #endif
712 
713 /* Pointers to software interrupt handlers */
714 static void *cambio_ih;
715 
716 #if defined(CAM_DEBUG_FLAGS) && !defined(CAMDEBUG)
717 #error "You must have options CAMDEBUG to use options CAM_DEBUG_FLAGS"
718 #endif
719 
720 /*
721  * In order to enable the CAM_DEBUG_* options, the user must have CAMDEBUG
722  * enabled.  Also, the user must have either none, or all of CAM_DEBUG_BUS,
723  * CAM_DEBUG_TARGET, and CAM_DEBUG_LUN specified.
724  */
725 #if defined(CAM_DEBUG_BUS) || defined(CAM_DEBUG_TARGET) \
726     || defined(CAM_DEBUG_LUN)
727 #ifdef CAMDEBUG
728 #if !defined(CAM_DEBUG_BUS) || !defined(CAM_DEBUG_TARGET) \
729     || !defined(CAM_DEBUG_LUN)
730 #error "You must define all or none of CAM_DEBUG_BUS, CAM_DEBUG_TARGET \
731         and CAM_DEBUG_LUN"
732 #endif /* !CAM_DEBUG_BUS || !CAM_DEBUG_TARGET || !CAM_DEBUG_LUN */
733 #else /* !CAMDEBUG */
734 #error "You must use options CAMDEBUG if you use the CAM_DEBUG_* options"
735 #endif /* CAMDEBUG */
736 #endif /* CAM_DEBUG_BUS || CAM_DEBUG_TARGET || CAM_DEBUG_LUN */
737 
738 /* Our boot-time initialization hook */
739 static int cam_module_event_handler(module_t, int /*modeventtype_t*/, void *);
740 
741 static moduledata_t cam_moduledata = {
742 	"cam",
743 	cam_module_event_handler,
744 	NULL
745 };
746 
747 static void	xpt_init(void *);
748 
749 DECLARE_MODULE(cam, cam_moduledata, SI_SUB_CONFIGURE, SI_ORDER_SECOND);
750 MODULE_VERSION(cam, 1);
751 
752 
753 static cam_status	xpt_compile_path(struct cam_path *new_path,
754 					 struct cam_periph *perph,
755 					 path_id_t path_id,
756 					 target_id_t target_id,
757 					 lun_id_t lun_id);
758 
759 static void		xpt_release_path(struct cam_path *path);
760 
761 static void		xpt_async_bcast(struct async_list *async_head,
762 					u_int32_t async_code,
763 					struct cam_path *path,
764 					void *async_arg);
765 static void		xpt_dev_async(u_int32_t async_code,
766 				      struct cam_eb *bus,
767 				      struct cam_et *target,
768 				      struct cam_ed *device,
769 				      void *async_arg);
770 static path_id_t xptnextfreepathid(void);
771 static path_id_t xptpathid(const char *sim_name, int sim_unit, int sim_bus);
772 static union ccb *xpt_get_ccb(struct cam_ed *device);
773 static int	 xpt_schedule_dev(struct camq *queue, cam_pinfo *dev_pinfo,
774 				  u_int32_t new_priority);
775 static void	 xpt_run_dev_allocq(struct cam_eb *bus);
776 static void	 xpt_run_dev_sendq(struct cam_eb *bus);
777 static timeout_t xpt_release_devq_timeout;
778 static timeout_t xpt_release_simq_timeout;
779 static void	 xpt_release_bus(struct cam_eb *bus);
780 static void	 xpt_release_devq_device(struct cam_ed *dev, u_int count,
781 					 int run_queue);
782 static struct cam_et*
783 		 xpt_alloc_target(struct cam_eb *bus, target_id_t target_id);
784 static void	 xpt_release_target(struct cam_eb *bus, struct cam_et *target);
785 static struct cam_ed*
786 		 xpt_alloc_device(struct cam_eb *bus, struct cam_et *target,
787 				  lun_id_t lun_id);
788 static void	 xpt_release_device(struct cam_eb *bus, struct cam_et *target,
789 				    struct cam_ed *device);
790 static u_int32_t xpt_dev_ccbq_resize(struct cam_path *path, int newopenings);
791 static struct cam_eb*
792 		 xpt_find_bus(path_id_t path_id);
793 static struct cam_et*
794 		 xpt_find_target(struct cam_eb *bus, target_id_t target_id);
795 static struct cam_ed*
796 		 xpt_find_device(struct cam_et *target, lun_id_t lun_id);
797 static void	 xpt_scan_bus(struct cam_periph *periph, union ccb *ccb);
798 static void	 xpt_scan_lun(struct cam_periph *periph,
799 			      struct cam_path *path, cam_flags flags,
800 			      union ccb *ccb);
801 static void	 xptscandone(struct cam_periph *periph, union ccb *done_ccb);
802 static xpt_busfunc_t	xptconfigbuscountfunc;
803 static xpt_busfunc_t	xptconfigfunc;
804 static void	 xpt_config(void *arg);
805 static xpt_devicefunc_t xptpassannouncefunc;
806 static void	 xpt_finishconfig(struct cam_periph *periph, union ccb *ccb);
807 static void	 xptaction(struct cam_sim *sim, union ccb *work_ccb);
808 static void	 xptpoll(struct cam_sim *sim);
809 static void	 camisr(void *);
810 #if 0
811 static void	 xptstart(struct cam_periph *periph, union ccb *work_ccb);
812 static void	 xptasync(struct cam_periph *periph,
813 			  u_int32_t code, cam_path *path);
814 #endif
815 static dev_match_ret	xptbusmatch(struct dev_match_pattern *patterns,
816 				    u_int num_patterns, struct cam_eb *bus);
817 static dev_match_ret	xptdevicematch(struct dev_match_pattern *patterns,
818 				       u_int num_patterns,
819 				       struct cam_ed *device);
820 static dev_match_ret	xptperiphmatch(struct dev_match_pattern *patterns,
821 				       u_int num_patterns,
822 				       struct cam_periph *periph);
823 static xpt_busfunc_t	xptedtbusfunc;
824 static xpt_targetfunc_t	xptedttargetfunc;
825 static xpt_devicefunc_t	xptedtdevicefunc;
826 static xpt_periphfunc_t	xptedtperiphfunc;
827 static xpt_pdrvfunc_t	xptplistpdrvfunc;
828 static xpt_periphfunc_t	xptplistperiphfunc;
829 static int		xptedtmatch(struct ccb_dev_match *cdm);
830 static int		xptperiphlistmatch(struct ccb_dev_match *cdm);
831 static int		xptbustraverse(struct cam_eb *start_bus,
832 				       xpt_busfunc_t *tr_func, void *arg);
833 static int		xpttargettraverse(struct cam_eb *bus,
834 					  struct cam_et *start_target,
835 					  xpt_targetfunc_t *tr_func, void *arg);
836 static int		xptdevicetraverse(struct cam_et *target,
837 					  struct cam_ed *start_device,
838 					  xpt_devicefunc_t *tr_func, void *arg);
839 static int		xptperiphtraverse(struct cam_ed *device,
840 					  struct cam_periph *start_periph,
841 					  xpt_periphfunc_t *tr_func, void *arg);
842 static int		xptpdrvtraverse(struct periph_driver **start_pdrv,
843 					xpt_pdrvfunc_t *tr_func, void *arg);
844 static int		xptpdperiphtraverse(struct periph_driver **pdrv,
845 					    struct cam_periph *start_periph,
846 					    xpt_periphfunc_t *tr_func,
847 					    void *arg);
848 static xpt_busfunc_t	xptdefbusfunc;
849 static xpt_targetfunc_t	xptdeftargetfunc;
850 static xpt_devicefunc_t	xptdefdevicefunc;
851 static xpt_periphfunc_t	xptdefperiphfunc;
852 static int		xpt_for_all_busses(xpt_busfunc_t *tr_func, void *arg);
853 #ifdef notusedyet
854 static int		xpt_for_all_targets(xpt_targetfunc_t *tr_func,
855 					    void *arg);
856 #endif
857 static int		xpt_for_all_devices(xpt_devicefunc_t *tr_func,
858 					    void *arg);
859 #ifdef notusedyet
860 static int		xpt_for_all_periphs(xpt_periphfunc_t *tr_func,
861 					    void *arg);
862 #endif
863 static xpt_devicefunc_t	xptsetasyncfunc;
864 static xpt_busfunc_t	xptsetasyncbusfunc;
865 static cam_status	xptregister(struct cam_periph *periph,
866 				    void *arg);
867 static cam_status	proberegister(struct cam_periph *periph,
868 				      void *arg);
869 static void	 probeschedule(struct cam_periph *probe_periph);
870 static void	 probestart(struct cam_periph *periph, union ccb *start_ccb);
871 static void	 proberequestdefaultnegotiation(struct cam_periph *periph);
872 static void	 probedone(struct cam_periph *periph, union ccb *done_ccb);
873 static void	 probecleanup(struct cam_periph *periph);
874 static void	 xpt_find_quirk(struct cam_ed *device);
875 #ifdef CAM_NEW_TRAN_CODE
876 static void	 xpt_devise_transport(struct cam_path *path);
877 #endif /* CAM_NEW_TRAN_CODE */
878 static void	 xpt_set_transfer_settings(struct ccb_trans_settings *cts,
879 					   struct cam_ed *device,
880 					   int async_update);
881 static void	 xpt_toggle_tags(struct cam_path *path);
882 static void	 xpt_start_tags(struct cam_path *path);
883 static __inline int xpt_schedule_dev_allocq(struct cam_eb *bus,
884 					    struct cam_ed *dev);
885 static __inline int xpt_schedule_dev_sendq(struct cam_eb *bus,
886 					   struct cam_ed *dev);
887 static __inline int periph_is_queued(struct cam_periph *periph);
888 static __inline int device_is_alloc_queued(struct cam_ed *device);
889 static __inline int device_is_send_queued(struct cam_ed *device);
890 static __inline int dev_allocq_is_runnable(struct cam_devq *devq);
891 
892 static __inline int
893 xpt_schedule_dev_allocq(struct cam_eb *bus, struct cam_ed *dev)
894 {
895 	int retval;
896 
897 	if (dev->ccbq.devq_openings > 0) {
898 		if ((dev->flags & CAM_DEV_RESIZE_QUEUE_NEEDED) != 0) {
899 			cam_ccbq_resize(&dev->ccbq,
900 					dev->ccbq.dev_openings
901 					+ dev->ccbq.dev_active);
902 			dev->flags &= ~CAM_DEV_RESIZE_QUEUE_NEEDED;
903 		}
904 		/*
905 		 * The priority of a device waiting for CCB resources
906 		 * is that of the the highest priority peripheral driver
907 		 * enqueued.
908 		 */
909 		retval = xpt_schedule_dev(&bus->sim->devq->alloc_queue,
910 					  &dev->alloc_ccb_entry.pinfo,
911 					  CAMQ_GET_HEAD(&dev->drvq)->priority);
912 	} else {
913 		retval = 0;
914 	}
915 
916 	return (retval);
917 }
918 
919 static __inline int
920 xpt_schedule_dev_sendq(struct cam_eb *bus, struct cam_ed *dev)
921 {
922 	int	retval;
923 
924 	if (dev->ccbq.dev_openings > 0) {
925 		/*
926 		 * The priority of a device waiting for controller
927 		 * resources is that of the the highest priority CCB
928 		 * enqueued.
929 		 */
930 		retval =
931 		    xpt_schedule_dev(&bus->sim->devq->send_queue,
932 				     &dev->send_ccb_entry.pinfo,
933 				     CAMQ_GET_HEAD(&dev->ccbq.queue)->priority);
934 	} else {
935 		retval = 0;
936 	}
937 	return (retval);
938 }
939 
940 static __inline int
941 periph_is_queued(struct cam_periph *periph)
942 {
943 	return (periph->pinfo.index != CAM_UNQUEUED_INDEX);
944 }
945 
946 static __inline int
947 device_is_alloc_queued(struct cam_ed *device)
948 {
949 	return (device->alloc_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX);
950 }
951 
952 static __inline int
953 device_is_send_queued(struct cam_ed *device)
954 {
955 	return (device->send_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX);
956 }
957 
958 static __inline int
959 dev_allocq_is_runnable(struct cam_devq *devq)
960 {
961 	/*
962 	 * Have work to do.
963 	 * Have space to do more work.
964 	 * Allowed to do work.
965 	 */
966 	return ((devq->alloc_queue.qfrozen_cnt == 0)
967 	     && (devq->alloc_queue.entries > 0)
968 	     && (devq->alloc_openings > 0));
969 }
970 
971 static void
972 xpt_periph_init()
973 {
974 	make_dev(&xpt_cdevsw, 0, UID_ROOT, GID_OPERATOR, 0600, "xpt0");
975 }
976 
977 static void
978 probe_periph_init()
979 {
980 }
981 
982 
983 static void
984 xptdone(struct cam_periph *periph, union ccb *done_ccb)
985 {
986 	/* Caller will release the CCB */
987 	wakeup(&done_ccb->ccb_h.cbfcnp);
988 }
989 
990 static int
991 xptopen(struct cdev *dev, int flags, int fmt, struct thread *td)
992 {
993 	int unit;
994 
995 	unit = minor(dev) & 0xff;
996 
997 	/*
998 	 * Only allow read-write access.
999 	 */
1000 	if (((flags & FWRITE) == 0) || ((flags & FREAD) == 0))
1001 		return(EPERM);
1002 
1003 	/*
1004 	 * We don't allow nonblocking access.
1005 	 */
1006 	if ((flags & O_NONBLOCK) != 0) {
1007 		printf("xpt%d: can't do nonblocking access\n", unit);
1008 		return(ENODEV);
1009 	}
1010 
1011 	/*
1012 	 * We only have one transport layer right now.  If someone accesses
1013 	 * us via something other than minor number 1, point out their
1014 	 * mistake.
1015 	 */
1016 	if (unit != 0) {
1017 		printf("xptopen: got invalid xpt unit %d\n", unit);
1018 		return(ENXIO);
1019 	}
1020 
1021 	/* Mark ourselves open */
1022 	xsoftc.flags |= XPT_FLAG_OPEN;
1023 
1024 	return(0);
1025 }
1026 
1027 static int
1028 xptclose(struct cdev *dev, int flag, int fmt, struct thread *td)
1029 {
1030 	int unit;
1031 
1032 	unit = minor(dev) & 0xff;
1033 
1034 	/*
1035 	 * We only have one transport layer right now.  If someone accesses
1036 	 * us via something other than minor number 1, point out their
1037 	 * mistake.
1038 	 */
1039 	if (unit != 0) {
1040 		printf("xptclose: got invalid xpt unit %d\n", unit);
1041 		return(ENXIO);
1042 	}
1043 
1044 	/* Mark ourselves closed */
1045 	xsoftc.flags &= ~XPT_FLAG_OPEN;
1046 
1047 	return(0);
1048 }
1049 
1050 static int
1051 xptioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td)
1052 {
1053 	int unit, error;
1054 
1055 	error = 0;
1056 	unit = minor(dev) & 0xff;
1057 
1058 	/*
1059 	 * We only have one transport layer right now.  If someone accesses
1060 	 * us via something other than minor number 1, point out their
1061 	 * mistake.
1062 	 */
1063 	if (unit != 0) {
1064 		printf("xptioctl: got invalid xpt unit %d\n", unit);
1065 		return(ENXIO);
1066 	}
1067 
1068 	switch(cmd) {
1069 	/*
1070 	 * For the transport layer CAMIOCOMMAND ioctl, we really only want
1071 	 * to accept CCB types that don't quite make sense to send through a
1072 	 * passthrough driver. XPT_PATH_INQ is an exception to this, as stated
1073 	 * in the CAM spec.
1074 	 */
1075 	case CAMIOCOMMAND: {
1076 		union ccb *ccb;
1077 		union ccb *inccb;
1078 
1079 		inccb = (union ccb *)addr;
1080 
1081 		switch(inccb->ccb_h.func_code) {
1082 		case XPT_SCAN_BUS:
1083 		case XPT_RESET_BUS:
1084 			if ((inccb->ccb_h.target_id != CAM_TARGET_WILDCARD)
1085 			 || (inccb->ccb_h.target_lun != CAM_LUN_WILDCARD)) {
1086 				error = EINVAL;
1087 				break;
1088 			}
1089 			/* FALLTHROUGH */
1090 		case XPT_PATH_INQ:
1091 		case XPT_ENG_INQ:
1092 		case XPT_SCAN_LUN:
1093 
1094 			ccb = xpt_alloc_ccb();
1095 
1096 			/*
1097 			 * Create a path using the bus, target, and lun the
1098 			 * user passed in.
1099 			 */
1100 			if (xpt_create_path(&ccb->ccb_h.path, xpt_periph,
1101 					    inccb->ccb_h.path_id,
1102 					    inccb->ccb_h.target_id,
1103 					    inccb->ccb_h.target_lun) !=
1104 					    CAM_REQ_CMP){
1105 				error = EINVAL;
1106 				xpt_free_ccb(ccb);
1107 				break;
1108 			}
1109 			/* Ensure all of our fields are correct */
1110 			xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path,
1111 				      inccb->ccb_h.pinfo.priority);
1112 			xpt_merge_ccb(ccb, inccb);
1113 			ccb->ccb_h.cbfcnp = xptdone;
1114 			cam_periph_runccb(ccb, NULL, 0, 0, NULL);
1115 			bcopy(ccb, inccb, sizeof(union ccb));
1116 			xpt_free_path(ccb->ccb_h.path);
1117 			xpt_free_ccb(ccb);
1118 			break;
1119 
1120 		case XPT_DEBUG: {
1121 			union ccb ccb;
1122 
1123 			/*
1124 			 * This is an immediate CCB, so it's okay to
1125 			 * allocate it on the stack.
1126 			 */
1127 
1128 			/*
1129 			 * Create a path using the bus, target, and lun the
1130 			 * user passed in.
1131 			 */
1132 			if (xpt_create_path(&ccb.ccb_h.path, xpt_periph,
1133 					    inccb->ccb_h.path_id,
1134 					    inccb->ccb_h.target_id,
1135 					    inccb->ccb_h.target_lun) !=
1136 					    CAM_REQ_CMP){
1137 				error = EINVAL;
1138 				break;
1139 			}
1140 			/* Ensure all of our fields are correct */
1141 			xpt_setup_ccb(&ccb.ccb_h, ccb.ccb_h.path,
1142 				      inccb->ccb_h.pinfo.priority);
1143 			xpt_merge_ccb(&ccb, inccb);
1144 			ccb.ccb_h.cbfcnp = xptdone;
1145 			xpt_action(&ccb);
1146 			bcopy(&ccb, inccb, sizeof(union ccb));
1147 			xpt_free_path(ccb.ccb_h.path);
1148 			break;
1149 
1150 		}
1151 		case XPT_DEV_MATCH: {
1152 			struct cam_periph_map_info mapinfo;
1153 			struct cam_path *old_path;
1154 
1155 			/*
1156 			 * We can't deal with physical addresses for this
1157 			 * type of transaction.
1158 			 */
1159 			if (inccb->ccb_h.flags & CAM_DATA_PHYS) {
1160 				error = EINVAL;
1161 				break;
1162 			}
1163 
1164 			/*
1165 			 * Save this in case the caller had it set to
1166 			 * something in particular.
1167 			 */
1168 			old_path = inccb->ccb_h.path;
1169 
1170 			/*
1171 			 * We really don't need a path for the matching
1172 			 * code.  The path is needed because of the
1173 			 * debugging statements in xpt_action().  They
1174 			 * assume that the CCB has a valid path.
1175 			 */
1176 			inccb->ccb_h.path = xpt_periph->path;
1177 
1178 			bzero(&mapinfo, sizeof(mapinfo));
1179 
1180 			/*
1181 			 * Map the pattern and match buffers into kernel
1182 			 * virtual address space.
1183 			 */
1184 			error = cam_periph_mapmem(inccb, &mapinfo);
1185 
1186 			if (error) {
1187 				inccb->ccb_h.path = old_path;
1188 				break;
1189 			}
1190 
1191 			/*
1192 			 * This is an immediate CCB, we can send it on directly.
1193 			 */
1194 			xpt_action(inccb);
1195 
1196 			/*
1197 			 * Map the buffers back into user space.
1198 			 */
1199 			cam_periph_unmapmem(inccb, &mapinfo);
1200 
1201 			inccb->ccb_h.path = old_path;
1202 
1203 			error = 0;
1204 			break;
1205 		}
1206 		default:
1207 			error = ENOTSUP;
1208 			break;
1209 		}
1210 		break;
1211 	}
1212 	/*
1213 	 * This is the getpassthru ioctl. It takes a XPT_GDEVLIST ccb as input,
1214 	 * with the periphal driver name and unit name filled in.  The other
1215 	 * fields don't really matter as input.  The passthrough driver name
1216 	 * ("pass"), and unit number are passed back in the ccb.  The current
1217 	 * device generation number, and the index into the device peripheral
1218 	 * driver list, and the status are also passed back.  Note that
1219 	 * since we do everything in one pass, unlike the XPT_GDEVLIST ccb,
1220 	 * we never return a status of CAM_GDEVLIST_LIST_CHANGED.  It is
1221 	 * (or rather should be) impossible for the device peripheral driver
1222 	 * list to change since we look at the whole thing in one pass, and
1223 	 * we do it with splcam protection.
1224 	 *
1225 	 */
1226 	case CAMGETPASSTHRU: {
1227 		union ccb *ccb;
1228 		struct cam_periph *periph;
1229 		struct periph_driver **p_drv;
1230 		char   *name;
1231 		u_int unit;
1232 		u_int cur_generation;
1233 		int base_periph_found;
1234 		int splbreaknum;
1235 		int s;
1236 
1237 		ccb = (union ccb *)addr;
1238 		unit = ccb->cgdl.unit_number;
1239 		name = ccb->cgdl.periph_name;
1240 		/*
1241 		 * Every 100 devices, we want to drop our spl protection to
1242 		 * give the software interrupt handler a chance to run.
1243 		 * Most systems won't run into this check, but this should
1244 		 * avoid starvation in the software interrupt handler in
1245 		 * large systems.
1246 		 */
1247 		splbreaknum = 100;
1248 
1249 		ccb = (union ccb *)addr;
1250 
1251 		base_periph_found = 0;
1252 
1253 		/*
1254 		 * Sanity check -- make sure we don't get a null peripheral
1255 		 * driver name.
1256 		 */
1257 		if (*ccb->cgdl.periph_name == '\0') {
1258 			error = EINVAL;
1259 			break;
1260 		}
1261 
1262 		/* Keep the list from changing while we traverse it */
1263 		s = splcam();
1264 ptstartover:
1265 		cur_generation = xsoftc.generation;
1266 
1267 		/* first find our driver in the list of drivers */
1268 		for (p_drv = periph_drivers; *p_drv != NULL; p_drv++)
1269 			if (strcmp((*p_drv)->driver_name, name) == 0)
1270 				break;
1271 
1272 		if (*p_drv == NULL) {
1273 			splx(s);
1274 			ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1275 			ccb->cgdl.status = CAM_GDEVLIST_ERROR;
1276 			*ccb->cgdl.periph_name = '\0';
1277 			ccb->cgdl.unit_number = 0;
1278 			error = ENOENT;
1279 			break;
1280 		}
1281 
1282 		/*
1283 		 * Run through every peripheral instance of this driver
1284 		 * and check to see whether it matches the unit passed
1285 		 * in by the user.  If it does, get out of the loops and
1286 		 * find the passthrough driver associated with that
1287 		 * peripheral driver.
1288 		 */
1289 		for (periph = TAILQ_FIRST(&(*p_drv)->units); periph != NULL;
1290 		     periph = TAILQ_NEXT(periph, unit_links)) {
1291 
1292 			if (periph->unit_number == unit) {
1293 				break;
1294 			} else if (--splbreaknum == 0) {
1295 				splx(s);
1296 				s = splcam();
1297 				splbreaknum = 100;
1298 				if (cur_generation != xsoftc.generation)
1299 				       goto ptstartover;
1300 			}
1301 		}
1302 		/*
1303 		 * If we found the peripheral driver that the user passed
1304 		 * in, go through all of the peripheral drivers for that
1305 		 * particular device and look for a passthrough driver.
1306 		 */
1307 		if (periph != NULL) {
1308 			struct cam_ed *device;
1309 			int i;
1310 
1311 			base_periph_found = 1;
1312 			device = periph->path->device;
1313 			for (i = 0, periph = SLIST_FIRST(&device->periphs);
1314 			     periph != NULL;
1315 			     periph = SLIST_NEXT(periph, periph_links), i++) {
1316 				/*
1317 				 * Check to see whether we have a
1318 				 * passthrough device or not.
1319 				 */
1320 				if (strcmp(periph->periph_name, "pass") == 0) {
1321 					/*
1322 					 * Fill in the getdevlist fields.
1323 					 */
1324 					strcpy(ccb->cgdl.periph_name,
1325 					       periph->periph_name);
1326 					ccb->cgdl.unit_number =
1327 						periph->unit_number;
1328 					if (SLIST_NEXT(periph, periph_links))
1329 						ccb->cgdl.status =
1330 							CAM_GDEVLIST_MORE_DEVS;
1331 					else
1332 						ccb->cgdl.status =
1333 						       CAM_GDEVLIST_LAST_DEVICE;
1334 					ccb->cgdl.generation =
1335 						device->generation;
1336 					ccb->cgdl.index = i;
1337 					/*
1338 					 * Fill in some CCB header fields
1339 					 * that the user may want.
1340 					 */
1341 					ccb->ccb_h.path_id =
1342 						periph->path->bus->path_id;
1343 					ccb->ccb_h.target_id =
1344 						periph->path->target->target_id;
1345 					ccb->ccb_h.target_lun =
1346 						periph->path->device->lun_id;
1347 					ccb->ccb_h.status = CAM_REQ_CMP;
1348 					break;
1349 				}
1350 			}
1351 		}
1352 
1353 		/*
1354 		 * If the periph is null here, one of two things has
1355 		 * happened.  The first possibility is that we couldn't
1356 		 * find the unit number of the particular peripheral driver
1357 		 * that the user is asking about.  e.g. the user asks for
1358 		 * the passthrough driver for "da11".  We find the list of
1359 		 * "da" peripherals all right, but there is no unit 11.
1360 		 * The other possibility is that we went through the list
1361 		 * of peripheral drivers attached to the device structure,
1362 		 * but didn't find one with the name "pass".  Either way,
1363 		 * we return ENOENT, since we couldn't find something.
1364 		 */
1365 		if (periph == NULL) {
1366 			ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1367 			ccb->cgdl.status = CAM_GDEVLIST_ERROR;
1368 			*ccb->cgdl.periph_name = '\0';
1369 			ccb->cgdl.unit_number = 0;
1370 			error = ENOENT;
1371 			/*
1372 			 * It is unfortunate that this is even necessary,
1373 			 * but there are many, many clueless users out there.
1374 			 * If this is true, the user is looking for the
1375 			 * passthrough driver, but doesn't have one in his
1376 			 * kernel.
1377 			 */
1378 			if (base_periph_found == 1) {
1379 				printf("xptioctl: pass driver is not in the "
1380 				       "kernel\n");
1381 				printf("xptioctl: put \"device pass0\" in "
1382 				       "your kernel config file\n");
1383 			}
1384 		}
1385 		splx(s);
1386 		break;
1387 		}
1388 	default:
1389 		error = ENOTTY;
1390 		break;
1391 	}
1392 
1393 	return(error);
1394 }
1395 
1396 static int
1397 cam_module_event_handler(module_t mod, int what, void *arg)
1398 {
1399 	if (what == MOD_LOAD) {
1400 		xpt_init(NULL);
1401 	} else if (what == MOD_UNLOAD) {
1402 		return EBUSY;
1403 	} else {
1404 		return EOPNOTSUPP;
1405 	}
1406 
1407 	return 0;
1408 }
1409 
1410 /* Functions accessed by the peripheral drivers */
1411 static void
1412 xpt_init(dummy)
1413 	void *dummy;
1414 {
1415 	struct cam_sim *xpt_sim;
1416 	struct cam_path *path;
1417 	struct cam_devq *devq;
1418 	cam_status status;
1419 
1420 	TAILQ_INIT(&xpt_busses);
1421 	TAILQ_INIT(&cam_bioq);
1422 	SLIST_INIT(&ccb_freeq);
1423 	STAILQ_INIT(&highpowerq);
1424 
1425 	mtx_init(&cam_bioq_lock, "CAM BIOQ lock", NULL, MTX_DEF);
1426 
1427 	/*
1428 	 * The xpt layer is, itself, the equivelent of a SIM.
1429 	 * Allow 16 ccbs in the ccb pool for it.  This should
1430 	 * give decent parallelism when we probe busses and
1431 	 * perform other XPT functions.
1432 	 */
1433 	devq = cam_simq_alloc(16);
1434 	xpt_sim = cam_sim_alloc(xptaction,
1435 				xptpoll,
1436 				"xpt",
1437 				/*softc*/NULL,
1438 				/*unit*/0,
1439 				/*max_dev_transactions*/0,
1440 				/*max_tagged_dev_transactions*/0,
1441 				devq);
1442 	xpt_max_ccbs = 16;
1443 
1444 	xpt_bus_register(xpt_sim, /*bus #*/0);
1445 
1446 	/*
1447 	 * Looking at the XPT from the SIM layer, the XPT is
1448 	 * the equivelent of a peripheral driver.  Allocate
1449 	 * a peripheral driver entry for us.
1450 	 */
1451 	if ((status = xpt_create_path(&path, NULL, CAM_XPT_PATH_ID,
1452 				      CAM_TARGET_WILDCARD,
1453 				      CAM_LUN_WILDCARD)) != CAM_REQ_CMP) {
1454 		printf("xpt_init: xpt_create_path failed with status %#x,"
1455 		       " failing attach\n", status);
1456 		return;
1457 	}
1458 
1459 	cam_periph_alloc(xptregister, NULL, NULL, NULL, "xpt", CAM_PERIPH_BIO,
1460 			 path, NULL, 0, NULL);
1461 	xpt_free_path(path);
1462 
1463 	xpt_sim->softc = xpt_periph;
1464 
1465 	/*
1466 	 * Register a callback for when interrupts are enabled.
1467 	 */
1468 	xpt_config_hook =
1469 	    (struct intr_config_hook *)malloc(sizeof(struct intr_config_hook),
1470 					      M_TEMP, M_NOWAIT | M_ZERO);
1471 	if (xpt_config_hook == NULL) {
1472 		printf("xpt_init: Cannot malloc config hook "
1473 		       "- failing attach\n");
1474 		return;
1475 	}
1476 
1477 	xpt_config_hook->ich_func = xpt_config;
1478 	if (config_intrhook_establish(xpt_config_hook) != 0) {
1479 		free (xpt_config_hook, M_TEMP);
1480 		printf("xpt_init: config_intrhook_establish failed "
1481 		       "- failing attach\n");
1482 	}
1483 
1484 	/* Install our software interrupt handlers */
1485 	swi_add(NULL, "cambio", camisr, &cam_bioq, SWI_CAMBIO, 0, &cambio_ih);
1486 }
1487 
1488 static cam_status
1489 xptregister(struct cam_periph *periph, void *arg)
1490 {
1491 	if (periph == NULL) {
1492 		printf("xptregister: periph was NULL!!\n");
1493 		return(CAM_REQ_CMP_ERR);
1494 	}
1495 
1496 	periph->softc = NULL;
1497 
1498 	xpt_periph = periph;
1499 
1500 	return(CAM_REQ_CMP);
1501 }
1502 
1503 int32_t
1504 xpt_add_periph(struct cam_periph *periph)
1505 {
1506 	struct cam_ed *device;
1507 	int32_t	 status;
1508 	struct periph_list *periph_head;
1509 
1510 	GIANT_REQUIRED;
1511 
1512 	device = periph->path->device;
1513 
1514 	periph_head = &device->periphs;
1515 
1516 	status = CAM_REQ_CMP;
1517 
1518 	if (device != NULL) {
1519 		int s;
1520 
1521 		/*
1522 		 * Make room for this peripheral
1523 		 * so it will fit in the queue
1524 		 * when it's scheduled to run
1525 		 */
1526 		s = splsoftcam();
1527 		status = camq_resize(&device->drvq,
1528 				     device->drvq.array_size + 1);
1529 
1530 		device->generation++;
1531 
1532 		SLIST_INSERT_HEAD(periph_head, periph, periph_links);
1533 
1534 		splx(s);
1535 	}
1536 
1537 	xsoftc.generation++;
1538 
1539 	return (status);
1540 }
1541 
1542 void
1543 xpt_remove_periph(struct cam_periph *periph)
1544 {
1545 	struct cam_ed *device;
1546 
1547 	GIANT_REQUIRED;
1548 
1549 	device = periph->path->device;
1550 
1551 	if (device != NULL) {
1552 		int s;
1553 		struct periph_list *periph_head;
1554 
1555 		periph_head = &device->periphs;
1556 
1557 		/* Release the slot for this peripheral */
1558 		s = splsoftcam();
1559 		camq_resize(&device->drvq, device->drvq.array_size - 1);
1560 
1561 		device->generation++;
1562 
1563 		SLIST_REMOVE(periph_head, periph, cam_periph, periph_links);
1564 
1565 		splx(s);
1566 	}
1567 
1568 	xsoftc.generation++;
1569 
1570 }
1571 
1572 #ifdef CAM_NEW_TRAN_CODE
1573 
1574 void
1575 xpt_announce_periph(struct cam_periph *periph, char *announce_string)
1576 {
1577 	struct	ccb_pathinq cpi;
1578 	struct	ccb_trans_settings cts;
1579 	struct	cam_path *path;
1580 	u_int	speed;
1581 	u_int	freq;
1582 	u_int	mb;
1583 	int	s;
1584 
1585 	GIANT_REQUIRED;
1586 
1587 	path = periph->path;
1588 	/*
1589 	 * To ensure that this is printed in one piece,
1590 	 * mask out CAM interrupts.
1591 	 */
1592 	s = splsoftcam();
1593 	printf("%s%d at %s%d bus %d target %d lun %d\n",
1594 	       periph->periph_name, periph->unit_number,
1595 	       path->bus->sim->sim_name,
1596 	       path->bus->sim->unit_number,
1597 	       path->bus->sim->bus_id,
1598 	       path->target->target_id,
1599 	       path->device->lun_id);
1600 	printf("%s%d: ", periph->periph_name, periph->unit_number);
1601 	scsi_print_inquiry(&path->device->inq_data);
1602 	if (bootverbose && path->device->serial_num_len > 0) {
1603 		/* Don't wrap the screen  - print only the first 60 chars */
1604 		printf("%s%d: Serial Number %.60s\n", periph->periph_name,
1605 		       periph->unit_number, path->device->serial_num);
1606 	}
1607 	xpt_setup_ccb(&cts.ccb_h, path, /*priority*/1);
1608 	cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS;
1609 	cts.type = CTS_TYPE_CURRENT_SETTINGS;
1610 	xpt_action((union ccb*)&cts);
1611 
1612 	/* Ask the SIM for its base transfer speed */
1613 	xpt_setup_ccb(&cpi.ccb_h, path, /*priority*/1);
1614 	cpi.ccb_h.func_code = XPT_PATH_INQ;
1615 	xpt_action((union ccb *)&cpi);
1616 
1617 	speed = cpi.base_transfer_speed;
1618 	freq = 0;
1619 	if (cts.ccb_h.status == CAM_REQ_CMP && cts.transport == XPORT_SPI) {
1620 		struct	ccb_trans_settings_spi *spi;
1621 
1622 		spi = &cts.xport_specific.spi;
1623 		if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) != 0
1624 		  && spi->sync_offset != 0) {
1625 			freq = scsi_calc_syncsrate(spi->sync_period);
1626 			speed = freq;
1627 		}
1628 
1629 		if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0)
1630 			speed *= (0x01 << spi->bus_width);
1631 	}
1632 
1633 	if (cts.ccb_h.status == CAM_REQ_CMP && cts.transport == XPORT_FC) {
1634 		struct	ccb_trans_settings_fc *fc = &cts.xport_specific.fc;
1635 		if (fc->valid & CTS_FC_VALID_SPEED) {
1636 			speed = fc->bitrate;
1637 		}
1638 	}
1639 
1640 	if (cts.ccb_h.status == CAM_REQ_CMP && cts.transport == XPORT_SAS) {
1641 		struct	ccb_trans_settings_sas *sas = &cts.xport_specific.sas;
1642 		if (sas->valid & CTS_SAS_VALID_SPEED) {
1643 			speed = sas->bitrate;
1644 		}
1645 	}
1646 
1647 	mb = speed / 1000;
1648 	if (mb > 0)
1649 		printf("%s%d: %d.%03dMB/s transfers",
1650 		       periph->periph_name, periph->unit_number,
1651 		       mb, speed % 1000);
1652 	else
1653 		printf("%s%d: %dKB/s transfers", periph->periph_name,
1654 		       periph->unit_number, speed);
1655 	/* Report additional information about SPI connections */
1656 	if (cts.ccb_h.status == CAM_REQ_CMP && cts.transport == XPORT_SPI) {
1657 		struct	ccb_trans_settings_spi *spi;
1658 
1659 		spi = &cts.xport_specific.spi;
1660 		if (freq != 0) {
1661 			printf(" (%d.%03dMHz%s, offset %d", freq / 1000,
1662 			       freq % 1000,
1663 			       (spi->ppr_options & MSG_EXT_PPR_DT_REQ) != 0
1664 			     ? " DT" : "",
1665 			       spi->sync_offset);
1666 		}
1667 		if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0
1668 		 && spi->bus_width > 0) {
1669 			if (freq != 0) {
1670 				printf(", ");
1671 			} else {
1672 				printf(" (");
1673 			}
1674 			printf("%dbit)", 8 * (0x01 << spi->bus_width));
1675 		} else if (freq != 0) {
1676 			printf(")");
1677 		}
1678 	}
1679 	if (cts.ccb_h.status == CAM_REQ_CMP && cts.transport == XPORT_FC) {
1680 		struct	ccb_trans_settings_fc *fc;
1681 
1682 		fc = &cts.xport_specific.fc;
1683 		if (fc->valid & CTS_FC_VALID_WWNN)
1684 			printf(" WWNN 0x%llx", (long long) fc->wwnn);
1685 		if (fc->valid & CTS_FC_VALID_WWPN)
1686 			printf(" WWPN 0x%llx", (long long) fc->wwpn);
1687 		if (fc->valid & CTS_FC_VALID_PORT)
1688 			printf(" PortID 0x%x", fc->port);
1689 	}
1690 
1691 	if (path->device->inq_flags & SID_CmdQue
1692 	 || path->device->flags & CAM_DEV_TAG_AFTER_COUNT) {
1693 		printf("\n%s%d: Tagged Queueing Enabled",
1694 		       periph->periph_name, periph->unit_number);
1695 	}
1696 	printf("\n");
1697 
1698 	/*
1699 	 * We only want to print the caller's announce string if they've
1700 	 * passed one in..
1701 	 */
1702 	if (announce_string != NULL)
1703 		printf("%s%d: %s\n", periph->periph_name,
1704 		       periph->unit_number, announce_string);
1705 	splx(s);
1706 }
1707 #else /* CAM_NEW_TRAN_CODE */
1708 void
1709 xpt_announce_periph(struct cam_periph *periph, char *announce_string)
1710 {
1711 	int s;
1712 	u_int mb;
1713 	struct cam_path *path;
1714 	struct ccb_trans_settings cts;
1715 
1716 	GIANT_REQUIRED;
1717 
1718 	path = periph->path;
1719 	/*
1720 	 * To ensure that this is printed in one piece,
1721 	 * mask out CAM interrupts.
1722 	 */
1723 	s = splsoftcam();
1724 	printf("%s%d at %s%d bus %d target %d lun %d\n",
1725 	       periph->periph_name, periph->unit_number,
1726 	       path->bus->sim->sim_name,
1727 	       path->bus->sim->unit_number,
1728 	       path->bus->sim->bus_id,
1729 	       path->target->target_id,
1730 	       path->device->lun_id);
1731 	printf("%s%d: ", periph->periph_name, periph->unit_number);
1732 	scsi_print_inquiry(&path->device->inq_data);
1733 	if ((bootverbose)
1734 	 && (path->device->serial_num_len > 0)) {
1735 		/* Don't wrap the screen  - print only the first 60 chars */
1736 		printf("%s%d: Serial Number %.60s\n", periph->periph_name,
1737 		       periph->unit_number, path->device->serial_num);
1738 	}
1739 	xpt_setup_ccb(&cts.ccb_h, path, /*priority*/1);
1740 	cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS;
1741 	cts.flags = CCB_TRANS_CURRENT_SETTINGS;
1742 	xpt_action((union ccb*)&cts);
1743 	if (cts.ccb_h.status == CAM_REQ_CMP) {
1744 		u_int speed;
1745 		u_int freq;
1746 
1747 		if ((cts.valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0
1748 		  && cts.sync_offset != 0) {
1749 			freq = scsi_calc_syncsrate(cts.sync_period);
1750 			speed = freq;
1751 		} else {
1752 			struct ccb_pathinq cpi;
1753 
1754 			/* Ask the SIM for its base transfer speed */
1755 			xpt_setup_ccb(&cpi.ccb_h, path, /*priority*/1);
1756 			cpi.ccb_h.func_code = XPT_PATH_INQ;
1757 			xpt_action((union ccb *)&cpi);
1758 
1759 			speed = cpi.base_transfer_speed;
1760 			freq = 0;
1761 		}
1762 		if ((cts.valid & CCB_TRANS_BUS_WIDTH_VALID) != 0)
1763 			speed *= (0x01 << cts.bus_width);
1764 		mb = speed / 1000;
1765 		if (mb > 0)
1766 			printf("%s%d: %d.%03dMB/s transfers",
1767 			       periph->periph_name, periph->unit_number,
1768 			       mb, speed % 1000);
1769 		else
1770 			printf("%s%d: %dKB/s transfers", periph->periph_name,
1771 			       periph->unit_number, speed);
1772 		if ((cts.valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0
1773 		 && cts.sync_offset != 0) {
1774 			printf(" (%d.%03dMHz, offset %d", freq / 1000,
1775 			       freq % 1000, cts.sync_offset);
1776 		}
1777 		if ((cts.valid & CCB_TRANS_BUS_WIDTH_VALID) != 0
1778 		 && cts.bus_width > 0) {
1779 			if ((cts.valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0
1780 			 && cts.sync_offset != 0) {
1781 				printf(", ");
1782 			} else {
1783 				printf(" (");
1784 			}
1785 			printf("%dbit)", 8 * (0x01 << cts.bus_width));
1786 		} else if ((cts.valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0
1787 			&& cts.sync_offset != 0) {
1788 			printf(")");
1789 		}
1790 
1791 		if (path->device->inq_flags & SID_CmdQue
1792 		 || path->device->flags & CAM_DEV_TAG_AFTER_COUNT) {
1793 			printf(", Tagged Queueing Enabled");
1794 		}
1795 
1796 		printf("\n");
1797 	} else if (path->device->inq_flags & SID_CmdQue
1798    		|| path->device->flags & CAM_DEV_TAG_AFTER_COUNT) {
1799 		printf("%s%d: Tagged Queueing Enabled\n",
1800 		       periph->periph_name, periph->unit_number);
1801 	}
1802 
1803 	/*
1804 	 * We only want to print the caller's announce string if they've
1805 	 * passed one in..
1806 	 */
1807 	if (announce_string != NULL)
1808 		printf("%s%d: %s\n", periph->periph_name,
1809 		       periph->unit_number, announce_string);
1810 	splx(s);
1811 }
1812 
1813 #endif /* CAM_NEW_TRAN_CODE */
1814 
1815 static dev_match_ret
1816 xptbusmatch(struct dev_match_pattern *patterns, u_int num_patterns,
1817 	    struct cam_eb *bus)
1818 {
1819 	dev_match_ret retval;
1820 	int i;
1821 
1822 	retval = DM_RET_NONE;
1823 
1824 	/*
1825 	 * If we aren't given something to match against, that's an error.
1826 	 */
1827 	if (bus == NULL)
1828 		return(DM_RET_ERROR);
1829 
1830 	/*
1831 	 * If there are no match entries, then this bus matches no
1832 	 * matter what.
1833 	 */
1834 	if ((patterns == NULL) || (num_patterns == 0))
1835 		return(DM_RET_DESCEND | DM_RET_COPY);
1836 
1837 	for (i = 0; i < num_patterns; i++) {
1838 		struct bus_match_pattern *cur_pattern;
1839 
1840 		/*
1841 		 * If the pattern in question isn't for a bus node, we
1842 		 * aren't interested.  However, we do indicate to the
1843 		 * calling routine that we should continue descending the
1844 		 * tree, since the user wants to match against lower-level
1845 		 * EDT elements.
1846 		 */
1847 		if (patterns[i].type != DEV_MATCH_BUS) {
1848 			if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
1849 				retval |= DM_RET_DESCEND;
1850 			continue;
1851 		}
1852 
1853 		cur_pattern = &patterns[i].pattern.bus_pattern;
1854 
1855 		/*
1856 		 * If they want to match any bus node, we give them any
1857 		 * device node.
1858 		 */
1859 		if (cur_pattern->flags == BUS_MATCH_ANY) {
1860 			/* set the copy flag */
1861 			retval |= DM_RET_COPY;
1862 
1863 			/*
1864 			 * If we've already decided on an action, go ahead
1865 			 * and return.
1866 			 */
1867 			if ((retval & DM_RET_ACTION_MASK) != DM_RET_NONE)
1868 				return(retval);
1869 		}
1870 
1871 		/*
1872 		 * Not sure why someone would do this...
1873 		 */
1874 		if (cur_pattern->flags == BUS_MATCH_NONE)
1875 			continue;
1876 
1877 		if (((cur_pattern->flags & BUS_MATCH_PATH) != 0)
1878 		 && (cur_pattern->path_id != bus->path_id))
1879 			continue;
1880 
1881 		if (((cur_pattern->flags & BUS_MATCH_BUS_ID) != 0)
1882 		 && (cur_pattern->bus_id != bus->sim->bus_id))
1883 			continue;
1884 
1885 		if (((cur_pattern->flags & BUS_MATCH_UNIT) != 0)
1886 		 && (cur_pattern->unit_number != bus->sim->unit_number))
1887 			continue;
1888 
1889 		if (((cur_pattern->flags & BUS_MATCH_NAME) != 0)
1890 		 && (strncmp(cur_pattern->dev_name, bus->sim->sim_name,
1891 			     DEV_IDLEN) != 0))
1892 			continue;
1893 
1894 		/*
1895 		 * If we get to this point, the user definitely wants
1896 		 * information on this bus.  So tell the caller to copy the
1897 		 * data out.
1898 		 */
1899 		retval |= DM_RET_COPY;
1900 
1901 		/*
1902 		 * If the return action has been set to descend, then we
1903 		 * know that we've already seen a non-bus matching
1904 		 * expression, therefore we need to further descend the tree.
1905 		 * This won't change by continuing around the loop, so we
1906 		 * go ahead and return.  If we haven't seen a non-bus
1907 		 * matching expression, we keep going around the loop until
1908 		 * we exhaust the matching expressions.  We'll set the stop
1909 		 * flag once we fall out of the loop.
1910 		 */
1911 		if ((retval & DM_RET_ACTION_MASK) == DM_RET_DESCEND)
1912 			return(retval);
1913 	}
1914 
1915 	/*
1916 	 * If the return action hasn't been set to descend yet, that means
1917 	 * we haven't seen anything other than bus matching patterns.  So
1918 	 * tell the caller to stop descending the tree -- the user doesn't
1919 	 * want to match against lower level tree elements.
1920 	 */
1921 	if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
1922 		retval |= DM_RET_STOP;
1923 
1924 	return(retval);
1925 }
1926 
1927 static dev_match_ret
1928 xptdevicematch(struct dev_match_pattern *patterns, u_int num_patterns,
1929 	       struct cam_ed *device)
1930 {
1931 	dev_match_ret retval;
1932 	int i;
1933 
1934 	retval = DM_RET_NONE;
1935 
1936 	/*
1937 	 * If we aren't given something to match against, that's an error.
1938 	 */
1939 	if (device == NULL)
1940 		return(DM_RET_ERROR);
1941 
1942 	/*
1943 	 * If there are no match entries, then this device matches no
1944 	 * matter what.
1945 	 */
1946 	if ((patterns == NULL) || (num_patterns == 0))
1947 		return(DM_RET_DESCEND | DM_RET_COPY);
1948 
1949 	for (i = 0; i < num_patterns; i++) {
1950 		struct device_match_pattern *cur_pattern;
1951 
1952 		/*
1953 		 * If the pattern in question isn't for a device node, we
1954 		 * aren't interested.
1955 		 */
1956 		if (patterns[i].type != DEV_MATCH_DEVICE) {
1957 			if ((patterns[i].type == DEV_MATCH_PERIPH)
1958 			 && ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE))
1959 				retval |= DM_RET_DESCEND;
1960 			continue;
1961 		}
1962 
1963 		cur_pattern = &patterns[i].pattern.device_pattern;
1964 
1965 		/*
1966 		 * If they want to match any device node, we give them any
1967 		 * device node.
1968 		 */
1969 		if (cur_pattern->flags == DEV_MATCH_ANY) {
1970 			/* set the copy flag */
1971 			retval |= DM_RET_COPY;
1972 
1973 
1974 			/*
1975 			 * If we've already decided on an action, go ahead
1976 			 * and return.
1977 			 */
1978 			if ((retval & DM_RET_ACTION_MASK) != DM_RET_NONE)
1979 				return(retval);
1980 		}
1981 
1982 		/*
1983 		 * Not sure why someone would do this...
1984 		 */
1985 		if (cur_pattern->flags == DEV_MATCH_NONE)
1986 			continue;
1987 
1988 		if (((cur_pattern->flags & DEV_MATCH_PATH) != 0)
1989 		 && (cur_pattern->path_id != device->target->bus->path_id))
1990 			continue;
1991 
1992 		if (((cur_pattern->flags & DEV_MATCH_TARGET) != 0)
1993 		 && (cur_pattern->target_id != device->target->target_id))
1994 			continue;
1995 
1996 		if (((cur_pattern->flags & DEV_MATCH_LUN) != 0)
1997 		 && (cur_pattern->target_lun != device->lun_id))
1998 			continue;
1999 
2000 		if (((cur_pattern->flags & DEV_MATCH_INQUIRY) != 0)
2001 		 && (cam_quirkmatch((caddr_t)&device->inq_data,
2002 				    (caddr_t)&cur_pattern->inq_pat,
2003 				    1, sizeof(cur_pattern->inq_pat),
2004 				    scsi_static_inquiry_match) == NULL))
2005 			continue;
2006 
2007 		/*
2008 		 * If we get to this point, the user definitely wants
2009 		 * information on this device.  So tell the caller to copy
2010 		 * the data out.
2011 		 */
2012 		retval |= DM_RET_COPY;
2013 
2014 		/*
2015 		 * If the return action has been set to descend, then we
2016 		 * know that we've already seen a peripheral matching
2017 		 * expression, therefore we need to further descend the tree.
2018 		 * This won't change by continuing around the loop, so we
2019 		 * go ahead and return.  If we haven't seen a peripheral
2020 		 * matching expression, we keep going around the loop until
2021 		 * we exhaust the matching expressions.  We'll set the stop
2022 		 * flag once we fall out of the loop.
2023 		 */
2024 		if ((retval & DM_RET_ACTION_MASK) == DM_RET_DESCEND)
2025 			return(retval);
2026 	}
2027 
2028 	/*
2029 	 * If the return action hasn't been set to descend yet, that means
2030 	 * we haven't seen any peripheral matching patterns.  So tell the
2031 	 * caller to stop descending the tree -- the user doesn't want to
2032 	 * match against lower level tree elements.
2033 	 */
2034 	if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
2035 		retval |= DM_RET_STOP;
2036 
2037 	return(retval);
2038 }
2039 
2040 /*
2041  * Match a single peripheral against any number of match patterns.
2042  */
2043 static dev_match_ret
2044 xptperiphmatch(struct dev_match_pattern *patterns, u_int num_patterns,
2045 	       struct cam_periph *periph)
2046 {
2047 	dev_match_ret retval;
2048 	int i;
2049 
2050 	/*
2051 	 * If we aren't given something to match against, that's an error.
2052 	 */
2053 	if (periph == NULL)
2054 		return(DM_RET_ERROR);
2055 
2056 	/*
2057 	 * If there are no match entries, then this peripheral matches no
2058 	 * matter what.
2059 	 */
2060 	if ((patterns == NULL) || (num_patterns == 0))
2061 		return(DM_RET_STOP | DM_RET_COPY);
2062 
2063 	/*
2064 	 * There aren't any nodes below a peripheral node, so there's no
2065 	 * reason to descend the tree any further.
2066 	 */
2067 	retval = DM_RET_STOP;
2068 
2069 	for (i = 0; i < num_patterns; i++) {
2070 		struct periph_match_pattern *cur_pattern;
2071 
2072 		/*
2073 		 * If the pattern in question isn't for a peripheral, we
2074 		 * aren't interested.
2075 		 */
2076 		if (patterns[i].type != DEV_MATCH_PERIPH)
2077 			continue;
2078 
2079 		cur_pattern = &patterns[i].pattern.periph_pattern;
2080 
2081 		/*
2082 		 * If they want to match on anything, then we will do so.
2083 		 */
2084 		if (cur_pattern->flags == PERIPH_MATCH_ANY) {
2085 			/* set the copy flag */
2086 			retval |= DM_RET_COPY;
2087 
2088 			/*
2089 			 * We've already set the return action to stop,
2090 			 * since there are no nodes below peripherals in
2091 			 * the tree.
2092 			 */
2093 			return(retval);
2094 		}
2095 
2096 		/*
2097 		 * Not sure why someone would do this...
2098 		 */
2099 		if (cur_pattern->flags == PERIPH_MATCH_NONE)
2100 			continue;
2101 
2102 		if (((cur_pattern->flags & PERIPH_MATCH_PATH) != 0)
2103 		 && (cur_pattern->path_id != periph->path->bus->path_id))
2104 			continue;
2105 
2106 		/*
2107 		 * For the target and lun id's, we have to make sure the
2108 		 * target and lun pointers aren't NULL.  The xpt peripheral
2109 		 * has a wildcard target and device.
2110 		 */
2111 		if (((cur_pattern->flags & PERIPH_MATCH_TARGET) != 0)
2112 		 && ((periph->path->target == NULL)
2113 		 ||(cur_pattern->target_id != periph->path->target->target_id)))
2114 			continue;
2115 
2116 		if (((cur_pattern->flags & PERIPH_MATCH_LUN) != 0)
2117 		 && ((periph->path->device == NULL)
2118 		 || (cur_pattern->target_lun != periph->path->device->lun_id)))
2119 			continue;
2120 
2121 		if (((cur_pattern->flags & PERIPH_MATCH_UNIT) != 0)
2122 		 && (cur_pattern->unit_number != periph->unit_number))
2123 			continue;
2124 
2125 		if (((cur_pattern->flags & PERIPH_MATCH_NAME) != 0)
2126 		 && (strncmp(cur_pattern->periph_name, periph->periph_name,
2127 			     DEV_IDLEN) != 0))
2128 			continue;
2129 
2130 		/*
2131 		 * If we get to this point, the user definitely wants
2132 		 * information on this peripheral.  So tell the caller to
2133 		 * copy the data out.
2134 		 */
2135 		retval |= DM_RET_COPY;
2136 
2137 		/*
2138 		 * The return action has already been set to stop, since
2139 		 * peripherals don't have any nodes below them in the EDT.
2140 		 */
2141 		return(retval);
2142 	}
2143 
2144 	/*
2145 	 * If we get to this point, the peripheral that was passed in
2146 	 * doesn't match any of the patterns.
2147 	 */
2148 	return(retval);
2149 }
2150 
2151 static int
2152 xptedtbusfunc(struct cam_eb *bus, void *arg)
2153 {
2154 	struct ccb_dev_match *cdm;
2155 	dev_match_ret retval;
2156 
2157 	cdm = (struct ccb_dev_match *)arg;
2158 
2159 	/*
2160 	 * If our position is for something deeper in the tree, that means
2161 	 * that we've already seen this node.  So, we keep going down.
2162 	 */
2163 	if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2164 	 && (cdm->pos.cookie.bus == bus)
2165 	 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
2166 	 && (cdm->pos.cookie.target != NULL))
2167 		retval = DM_RET_DESCEND;
2168 	else
2169 		retval = xptbusmatch(cdm->patterns, cdm->num_patterns, bus);
2170 
2171 	/*
2172 	 * If we got an error, bail out of the search.
2173 	 */
2174 	if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
2175 		cdm->status = CAM_DEV_MATCH_ERROR;
2176 		return(0);
2177 	}
2178 
2179 	/*
2180 	 * If the copy flag is set, copy this bus out.
2181 	 */
2182 	if (retval & DM_RET_COPY) {
2183 		int spaceleft, j;
2184 
2185 		spaceleft = cdm->match_buf_len - (cdm->num_matches *
2186 			sizeof(struct dev_match_result));
2187 
2188 		/*
2189 		 * If we don't have enough space to put in another
2190 		 * match result, save our position and tell the
2191 		 * user there are more devices to check.
2192 		 */
2193 		if (spaceleft < sizeof(struct dev_match_result)) {
2194 			bzero(&cdm->pos, sizeof(cdm->pos));
2195 			cdm->pos.position_type =
2196 				CAM_DEV_POS_EDT | CAM_DEV_POS_BUS;
2197 
2198 			cdm->pos.cookie.bus = bus;
2199 			cdm->pos.generations[CAM_BUS_GENERATION]=
2200 				bus_generation;
2201 			cdm->status = CAM_DEV_MATCH_MORE;
2202 			return(0);
2203 		}
2204 		j = cdm->num_matches;
2205 		cdm->num_matches++;
2206 		cdm->matches[j].type = DEV_MATCH_BUS;
2207 		cdm->matches[j].result.bus_result.path_id = bus->path_id;
2208 		cdm->matches[j].result.bus_result.bus_id = bus->sim->bus_id;
2209 		cdm->matches[j].result.bus_result.unit_number =
2210 			bus->sim->unit_number;
2211 		strncpy(cdm->matches[j].result.bus_result.dev_name,
2212 			bus->sim->sim_name, DEV_IDLEN);
2213 	}
2214 
2215 	/*
2216 	 * If the user is only interested in busses, there's no
2217 	 * reason to descend to the next level in the tree.
2218 	 */
2219 	if ((retval & DM_RET_ACTION_MASK) == DM_RET_STOP)
2220 		return(1);
2221 
2222 	/*
2223 	 * If there is a target generation recorded, check it to
2224 	 * make sure the target list hasn't changed.
2225 	 */
2226 	if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2227 	 && (bus == cdm->pos.cookie.bus)
2228 	 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
2229 	 && (cdm->pos.generations[CAM_TARGET_GENERATION] != 0)
2230 	 && (cdm->pos.generations[CAM_TARGET_GENERATION] !=
2231 	     bus->generation)) {
2232 		cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
2233 		return(0);
2234 	}
2235 
2236 	if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2237 	 && (cdm->pos.cookie.bus == bus)
2238 	 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
2239 	 && (cdm->pos.cookie.target != NULL))
2240 		return(xpttargettraverse(bus,
2241 					(struct cam_et *)cdm->pos.cookie.target,
2242 					 xptedttargetfunc, arg));
2243 	else
2244 		return(xpttargettraverse(bus, NULL, xptedttargetfunc, arg));
2245 }
2246 
2247 static int
2248 xptedttargetfunc(struct cam_et *target, void *arg)
2249 {
2250 	struct ccb_dev_match *cdm;
2251 
2252 	cdm = (struct ccb_dev_match *)arg;
2253 
2254 	/*
2255 	 * If there is a device list generation recorded, check it to
2256 	 * make sure the device list hasn't changed.
2257 	 */
2258 	if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2259 	 && (cdm->pos.cookie.bus == target->bus)
2260 	 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
2261 	 && (cdm->pos.cookie.target == target)
2262 	 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
2263 	 && (cdm->pos.generations[CAM_DEV_GENERATION] != 0)
2264 	 && (cdm->pos.generations[CAM_DEV_GENERATION] !=
2265 	     target->generation)) {
2266 		cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
2267 		return(0);
2268 	}
2269 
2270 	if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2271 	 && (cdm->pos.cookie.bus == target->bus)
2272 	 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
2273 	 && (cdm->pos.cookie.target == target)
2274 	 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
2275 	 && (cdm->pos.cookie.device != NULL))
2276 		return(xptdevicetraverse(target,
2277 					(struct cam_ed *)cdm->pos.cookie.device,
2278 					 xptedtdevicefunc, arg));
2279 	else
2280 		return(xptdevicetraverse(target, NULL, xptedtdevicefunc, arg));
2281 }
2282 
2283 static int
2284 xptedtdevicefunc(struct cam_ed *device, void *arg)
2285 {
2286 
2287 	struct ccb_dev_match *cdm;
2288 	dev_match_ret retval;
2289 
2290 	cdm = (struct ccb_dev_match *)arg;
2291 
2292 	/*
2293 	 * If our position is for something deeper in the tree, that means
2294 	 * that we've already seen this node.  So, we keep going down.
2295 	 */
2296 	if ((cdm->pos.position_type & CAM_DEV_POS_DEVICE)
2297 	 && (cdm->pos.cookie.device == device)
2298 	 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
2299 	 && (cdm->pos.cookie.periph != NULL))
2300 		retval = DM_RET_DESCEND;
2301 	else
2302 		retval = xptdevicematch(cdm->patterns, cdm->num_patterns,
2303 					device);
2304 
2305 	if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
2306 		cdm->status = CAM_DEV_MATCH_ERROR;
2307 		return(0);
2308 	}
2309 
2310 	/*
2311 	 * If the copy flag is set, copy this device out.
2312 	 */
2313 	if (retval & DM_RET_COPY) {
2314 		int spaceleft, j;
2315 
2316 		spaceleft = cdm->match_buf_len - (cdm->num_matches *
2317 			sizeof(struct dev_match_result));
2318 
2319 		/*
2320 		 * If we don't have enough space to put in another
2321 		 * match result, save our position and tell the
2322 		 * user there are more devices to check.
2323 		 */
2324 		if (spaceleft < sizeof(struct dev_match_result)) {
2325 			bzero(&cdm->pos, sizeof(cdm->pos));
2326 			cdm->pos.position_type =
2327 				CAM_DEV_POS_EDT | CAM_DEV_POS_BUS |
2328 				CAM_DEV_POS_TARGET | CAM_DEV_POS_DEVICE;
2329 
2330 			cdm->pos.cookie.bus = device->target->bus;
2331 			cdm->pos.generations[CAM_BUS_GENERATION]=
2332 				bus_generation;
2333 			cdm->pos.cookie.target = device->target;
2334 			cdm->pos.generations[CAM_TARGET_GENERATION] =
2335 				device->target->bus->generation;
2336 			cdm->pos.cookie.device = device;
2337 			cdm->pos.generations[CAM_DEV_GENERATION] =
2338 				device->target->generation;
2339 			cdm->status = CAM_DEV_MATCH_MORE;
2340 			return(0);
2341 		}
2342 		j = cdm->num_matches;
2343 		cdm->num_matches++;
2344 		cdm->matches[j].type = DEV_MATCH_DEVICE;
2345 		cdm->matches[j].result.device_result.path_id =
2346 			device->target->bus->path_id;
2347 		cdm->matches[j].result.device_result.target_id =
2348 			device->target->target_id;
2349 		cdm->matches[j].result.device_result.target_lun =
2350 			device->lun_id;
2351 		bcopy(&device->inq_data,
2352 		      &cdm->matches[j].result.device_result.inq_data,
2353 		      sizeof(struct scsi_inquiry_data));
2354 
2355 		/* Let the user know whether this device is unconfigured */
2356 		if (device->flags & CAM_DEV_UNCONFIGURED)
2357 			cdm->matches[j].result.device_result.flags =
2358 				DEV_RESULT_UNCONFIGURED;
2359 		else
2360 			cdm->matches[j].result.device_result.flags =
2361 				DEV_RESULT_NOFLAG;
2362 	}
2363 
2364 	/*
2365 	 * If the user isn't interested in peripherals, don't descend
2366 	 * the tree any further.
2367 	 */
2368 	if ((retval & DM_RET_ACTION_MASK) == DM_RET_STOP)
2369 		return(1);
2370 
2371 	/*
2372 	 * If there is a peripheral list generation recorded, make sure
2373 	 * it hasn't changed.
2374 	 */
2375 	if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2376 	 && (device->target->bus == cdm->pos.cookie.bus)
2377 	 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
2378 	 && (device->target == cdm->pos.cookie.target)
2379 	 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
2380 	 && (device == cdm->pos.cookie.device)
2381 	 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
2382 	 && (cdm->pos.generations[CAM_PERIPH_GENERATION] != 0)
2383 	 && (cdm->pos.generations[CAM_PERIPH_GENERATION] !=
2384 	     device->generation)){
2385 		cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
2386 		return(0);
2387 	}
2388 
2389 	if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2390 	 && (cdm->pos.cookie.bus == device->target->bus)
2391 	 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
2392 	 && (cdm->pos.cookie.target == device->target)
2393 	 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
2394 	 && (cdm->pos.cookie.device == device)
2395 	 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
2396 	 && (cdm->pos.cookie.periph != NULL))
2397 		return(xptperiphtraverse(device,
2398 				(struct cam_periph *)cdm->pos.cookie.periph,
2399 				xptedtperiphfunc, arg));
2400 	else
2401 		return(xptperiphtraverse(device, NULL, xptedtperiphfunc, arg));
2402 }
2403 
2404 static int
2405 xptedtperiphfunc(struct cam_periph *periph, void *arg)
2406 {
2407 	struct ccb_dev_match *cdm;
2408 	dev_match_ret retval;
2409 
2410 	cdm = (struct ccb_dev_match *)arg;
2411 
2412 	retval = xptperiphmatch(cdm->patterns, cdm->num_patterns, periph);
2413 
2414 	if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
2415 		cdm->status = CAM_DEV_MATCH_ERROR;
2416 		return(0);
2417 	}
2418 
2419 	/*
2420 	 * If the copy flag is set, copy this peripheral out.
2421 	 */
2422 	if (retval & DM_RET_COPY) {
2423 		int spaceleft, j;
2424 
2425 		spaceleft = cdm->match_buf_len - (cdm->num_matches *
2426 			sizeof(struct dev_match_result));
2427 
2428 		/*
2429 		 * If we don't have enough space to put in another
2430 		 * match result, save our position and tell the
2431 		 * user there are more devices to check.
2432 		 */
2433 		if (spaceleft < sizeof(struct dev_match_result)) {
2434 			bzero(&cdm->pos, sizeof(cdm->pos));
2435 			cdm->pos.position_type =
2436 				CAM_DEV_POS_EDT | CAM_DEV_POS_BUS |
2437 				CAM_DEV_POS_TARGET | CAM_DEV_POS_DEVICE |
2438 				CAM_DEV_POS_PERIPH;
2439 
2440 			cdm->pos.cookie.bus = periph->path->bus;
2441 			cdm->pos.generations[CAM_BUS_GENERATION]=
2442 				bus_generation;
2443 			cdm->pos.cookie.target = periph->path->target;
2444 			cdm->pos.generations[CAM_TARGET_GENERATION] =
2445 				periph->path->bus->generation;
2446 			cdm->pos.cookie.device = periph->path->device;
2447 			cdm->pos.generations[CAM_DEV_GENERATION] =
2448 				periph->path->target->generation;
2449 			cdm->pos.cookie.periph = periph;
2450 			cdm->pos.generations[CAM_PERIPH_GENERATION] =
2451 				periph->path->device->generation;
2452 			cdm->status = CAM_DEV_MATCH_MORE;
2453 			return(0);
2454 		}
2455 
2456 		j = cdm->num_matches;
2457 		cdm->num_matches++;
2458 		cdm->matches[j].type = DEV_MATCH_PERIPH;
2459 		cdm->matches[j].result.periph_result.path_id =
2460 			periph->path->bus->path_id;
2461 		cdm->matches[j].result.periph_result.target_id =
2462 			periph->path->target->target_id;
2463 		cdm->matches[j].result.periph_result.target_lun =
2464 			periph->path->device->lun_id;
2465 		cdm->matches[j].result.periph_result.unit_number =
2466 			periph->unit_number;
2467 		strncpy(cdm->matches[j].result.periph_result.periph_name,
2468 			periph->periph_name, DEV_IDLEN);
2469 	}
2470 
2471 	return(1);
2472 }
2473 
2474 static int
2475 xptedtmatch(struct ccb_dev_match *cdm)
2476 {
2477 	int ret;
2478 
2479 	cdm->num_matches = 0;
2480 
2481 	/*
2482 	 * Check the bus list generation.  If it has changed, the user
2483 	 * needs to reset everything and start over.
2484 	 */
2485 	if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2486 	 && (cdm->pos.generations[CAM_BUS_GENERATION] != 0)
2487 	 && (cdm->pos.generations[CAM_BUS_GENERATION] != bus_generation)) {
2488 		cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
2489 		return(0);
2490 	}
2491 
2492 	if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2493 	 && (cdm->pos.cookie.bus != NULL))
2494 		ret = xptbustraverse((struct cam_eb *)cdm->pos.cookie.bus,
2495 				     xptedtbusfunc, cdm);
2496 	else
2497 		ret = xptbustraverse(NULL, xptedtbusfunc, cdm);
2498 
2499 	/*
2500 	 * If we get back 0, that means that we had to stop before fully
2501 	 * traversing the EDT.  It also means that one of the subroutines
2502 	 * has set the status field to the proper value.  If we get back 1,
2503 	 * we've fully traversed the EDT and copied out any matching entries.
2504 	 */
2505 	if (ret == 1)
2506 		cdm->status = CAM_DEV_MATCH_LAST;
2507 
2508 	return(ret);
2509 }
2510 
2511 static int
2512 xptplistpdrvfunc(struct periph_driver **pdrv, void *arg)
2513 {
2514 	struct ccb_dev_match *cdm;
2515 
2516 	cdm = (struct ccb_dev_match *)arg;
2517 
2518 	if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
2519 	 && (cdm->pos.cookie.pdrv == pdrv)
2520 	 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
2521 	 && (cdm->pos.generations[CAM_PERIPH_GENERATION] != 0)
2522 	 && (cdm->pos.generations[CAM_PERIPH_GENERATION] !=
2523 	     (*pdrv)->generation)) {
2524 		cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
2525 		return(0);
2526 	}
2527 
2528 	if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
2529 	 && (cdm->pos.cookie.pdrv == pdrv)
2530 	 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
2531 	 && (cdm->pos.cookie.periph != NULL))
2532 		return(xptpdperiphtraverse(pdrv,
2533 				(struct cam_periph *)cdm->pos.cookie.periph,
2534 				xptplistperiphfunc, arg));
2535 	else
2536 		return(xptpdperiphtraverse(pdrv, NULL,xptplistperiphfunc, arg));
2537 }
2538 
2539 static int
2540 xptplistperiphfunc(struct cam_periph *periph, void *arg)
2541 {
2542 	struct ccb_dev_match *cdm;
2543 	dev_match_ret retval;
2544 
2545 	cdm = (struct ccb_dev_match *)arg;
2546 
2547 	retval = xptperiphmatch(cdm->patterns, cdm->num_patterns, periph);
2548 
2549 	if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
2550 		cdm->status = CAM_DEV_MATCH_ERROR;
2551 		return(0);
2552 	}
2553 
2554 	/*
2555 	 * If the copy flag is set, copy this peripheral out.
2556 	 */
2557 	if (retval & DM_RET_COPY) {
2558 		int spaceleft, j;
2559 
2560 		spaceleft = cdm->match_buf_len - (cdm->num_matches *
2561 			sizeof(struct dev_match_result));
2562 
2563 		/*
2564 		 * If we don't have enough space to put in another
2565 		 * match result, save our position and tell the
2566 		 * user there are more devices to check.
2567 		 */
2568 		if (spaceleft < sizeof(struct dev_match_result)) {
2569 			struct periph_driver **pdrv;
2570 
2571 			pdrv = NULL;
2572 			bzero(&cdm->pos, sizeof(cdm->pos));
2573 			cdm->pos.position_type =
2574 				CAM_DEV_POS_PDRV | CAM_DEV_POS_PDPTR |
2575 				CAM_DEV_POS_PERIPH;
2576 
2577 			/*
2578 			 * This may look a bit non-sensical, but it is
2579 			 * actually quite logical.  There are very few
2580 			 * peripheral drivers, and bloating every peripheral
2581 			 * structure with a pointer back to its parent
2582 			 * peripheral driver linker set entry would cost
2583 			 * more in the long run than doing this quick lookup.
2584 			 */
2585 			for (pdrv = periph_drivers; *pdrv != NULL; pdrv++) {
2586 				if (strcmp((*pdrv)->driver_name,
2587 				    periph->periph_name) == 0)
2588 					break;
2589 			}
2590 
2591 			if (*pdrv == NULL) {
2592 				cdm->status = CAM_DEV_MATCH_ERROR;
2593 				return(0);
2594 			}
2595 
2596 			cdm->pos.cookie.pdrv = pdrv;
2597 			/*
2598 			 * The periph generation slot does double duty, as
2599 			 * does the periph pointer slot.  They are used for
2600 			 * both edt and pdrv lookups and positioning.
2601 			 */
2602 			cdm->pos.cookie.periph = periph;
2603 			cdm->pos.generations[CAM_PERIPH_GENERATION] =
2604 				(*pdrv)->generation;
2605 			cdm->status = CAM_DEV_MATCH_MORE;
2606 			return(0);
2607 		}
2608 
2609 		j = cdm->num_matches;
2610 		cdm->num_matches++;
2611 		cdm->matches[j].type = DEV_MATCH_PERIPH;
2612 		cdm->matches[j].result.periph_result.path_id =
2613 			periph->path->bus->path_id;
2614 
2615 		/*
2616 		 * The transport layer peripheral doesn't have a target or
2617 		 * lun.
2618 		 */
2619 		if (periph->path->target)
2620 			cdm->matches[j].result.periph_result.target_id =
2621 				periph->path->target->target_id;
2622 		else
2623 			cdm->matches[j].result.periph_result.target_id = -1;
2624 
2625 		if (periph->path->device)
2626 			cdm->matches[j].result.periph_result.target_lun =
2627 				periph->path->device->lun_id;
2628 		else
2629 			cdm->matches[j].result.periph_result.target_lun = -1;
2630 
2631 		cdm->matches[j].result.periph_result.unit_number =
2632 			periph->unit_number;
2633 		strncpy(cdm->matches[j].result.periph_result.periph_name,
2634 			periph->periph_name, DEV_IDLEN);
2635 	}
2636 
2637 	return(1);
2638 }
2639 
2640 static int
2641 xptperiphlistmatch(struct ccb_dev_match *cdm)
2642 {
2643 	int ret;
2644 
2645 	cdm->num_matches = 0;
2646 
2647 	/*
2648 	 * At this point in the edt traversal function, we check the bus
2649 	 * list generation to make sure that no busses have been added or
2650 	 * removed since the user last sent a XPT_DEV_MATCH ccb through.
2651 	 * For the peripheral driver list traversal function, however, we
2652 	 * don't have to worry about new peripheral driver types coming or
2653 	 * going; they're in a linker set, and therefore can't change
2654 	 * without a recompile.
2655 	 */
2656 
2657 	if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
2658 	 && (cdm->pos.cookie.pdrv != NULL))
2659 		ret = xptpdrvtraverse(
2660 				(struct periph_driver **)cdm->pos.cookie.pdrv,
2661 				xptplistpdrvfunc, cdm);
2662 	else
2663 		ret = xptpdrvtraverse(NULL, xptplistpdrvfunc, cdm);
2664 
2665 	/*
2666 	 * If we get back 0, that means that we had to stop before fully
2667 	 * traversing the peripheral driver tree.  It also means that one of
2668 	 * the subroutines has set the status field to the proper value.  If
2669 	 * we get back 1, we've fully traversed the EDT and copied out any
2670 	 * matching entries.
2671 	 */
2672 	if (ret == 1)
2673 		cdm->status = CAM_DEV_MATCH_LAST;
2674 
2675 	return(ret);
2676 }
2677 
2678 static int
2679 xptbustraverse(struct cam_eb *start_bus, xpt_busfunc_t *tr_func, void *arg)
2680 {
2681 	struct cam_eb *bus, *next_bus;
2682 	int retval;
2683 
2684 	retval = 1;
2685 
2686 	for (bus = (start_bus ? start_bus : TAILQ_FIRST(&xpt_busses));
2687 	     bus != NULL;
2688 	     bus = next_bus) {
2689 		next_bus = TAILQ_NEXT(bus, links);
2690 
2691 		retval = tr_func(bus, arg);
2692 		if (retval == 0)
2693 			return(retval);
2694 	}
2695 
2696 	return(retval);
2697 }
2698 
2699 static int
2700 xpttargettraverse(struct cam_eb *bus, struct cam_et *start_target,
2701 		  xpt_targetfunc_t *tr_func, void *arg)
2702 {
2703 	struct cam_et *target, *next_target;
2704 	int retval;
2705 
2706 	retval = 1;
2707 	for (target = (start_target ? start_target :
2708 		       TAILQ_FIRST(&bus->et_entries));
2709 	     target != NULL; target = next_target) {
2710 
2711 		next_target = TAILQ_NEXT(target, links);
2712 
2713 		retval = tr_func(target, arg);
2714 
2715 		if (retval == 0)
2716 			return(retval);
2717 	}
2718 
2719 	return(retval);
2720 }
2721 
2722 static int
2723 xptdevicetraverse(struct cam_et *target, struct cam_ed *start_device,
2724 		  xpt_devicefunc_t *tr_func, void *arg)
2725 {
2726 	struct cam_ed *device, *next_device;
2727 	int retval;
2728 
2729 	retval = 1;
2730 	for (device = (start_device ? start_device :
2731 		       TAILQ_FIRST(&target->ed_entries));
2732 	     device != NULL;
2733 	     device = next_device) {
2734 
2735 		next_device = TAILQ_NEXT(device, links);
2736 
2737 		retval = tr_func(device, arg);
2738 
2739 		if (retval == 0)
2740 			return(retval);
2741 	}
2742 
2743 	return(retval);
2744 }
2745 
2746 static int
2747 xptperiphtraverse(struct cam_ed *device, struct cam_periph *start_periph,
2748 		  xpt_periphfunc_t *tr_func, void *arg)
2749 {
2750 	struct cam_periph *periph, *next_periph;
2751 	int retval;
2752 
2753 	retval = 1;
2754 
2755 	for (periph = (start_periph ? start_periph :
2756 		       SLIST_FIRST(&device->periphs));
2757 	     periph != NULL;
2758 	     periph = next_periph) {
2759 
2760 		next_periph = SLIST_NEXT(periph, periph_links);
2761 
2762 		retval = tr_func(periph, arg);
2763 		if (retval == 0)
2764 			return(retval);
2765 	}
2766 
2767 	return(retval);
2768 }
2769 
2770 static int
2771 xptpdrvtraverse(struct periph_driver **start_pdrv,
2772 		xpt_pdrvfunc_t *tr_func, void *arg)
2773 {
2774 	struct periph_driver **pdrv;
2775 	int retval;
2776 
2777 	retval = 1;
2778 
2779 	/*
2780 	 * We don't traverse the peripheral driver list like we do the
2781 	 * other lists, because it is a linker set, and therefore cannot be
2782 	 * changed during runtime.  If the peripheral driver list is ever
2783 	 * re-done to be something other than a linker set (i.e. it can
2784 	 * change while the system is running), the list traversal should
2785 	 * be modified to work like the other traversal functions.
2786 	 */
2787 	for (pdrv = (start_pdrv ? start_pdrv : periph_drivers);
2788 	     *pdrv != NULL; pdrv++) {
2789 		retval = tr_func(pdrv, arg);
2790 
2791 		if (retval == 0)
2792 			return(retval);
2793 	}
2794 
2795 	return(retval);
2796 }
2797 
2798 static int
2799 xptpdperiphtraverse(struct periph_driver **pdrv,
2800 		    struct cam_periph *start_periph,
2801 		    xpt_periphfunc_t *tr_func, void *arg)
2802 {
2803 	struct cam_periph *periph, *next_periph;
2804 	int retval;
2805 
2806 	retval = 1;
2807 
2808 	for (periph = (start_periph ? start_periph :
2809 	     TAILQ_FIRST(&(*pdrv)->units)); periph != NULL;
2810 	     periph = next_periph) {
2811 
2812 		next_periph = TAILQ_NEXT(periph, unit_links);
2813 
2814 		retval = tr_func(periph, arg);
2815 		if (retval == 0)
2816 			return(retval);
2817 	}
2818 	return(retval);
2819 }
2820 
2821 static int
2822 xptdefbusfunc(struct cam_eb *bus, void *arg)
2823 {
2824 	struct xpt_traverse_config *tr_config;
2825 
2826 	tr_config = (struct xpt_traverse_config *)arg;
2827 
2828 	if (tr_config->depth == XPT_DEPTH_BUS) {
2829 		xpt_busfunc_t *tr_func;
2830 
2831 		tr_func = (xpt_busfunc_t *)tr_config->tr_func;
2832 
2833 		return(tr_func(bus, tr_config->tr_arg));
2834 	} else
2835 		return(xpttargettraverse(bus, NULL, xptdeftargetfunc, arg));
2836 }
2837 
2838 static int
2839 xptdeftargetfunc(struct cam_et *target, void *arg)
2840 {
2841 	struct xpt_traverse_config *tr_config;
2842 
2843 	tr_config = (struct xpt_traverse_config *)arg;
2844 
2845 	if (tr_config->depth == XPT_DEPTH_TARGET) {
2846 		xpt_targetfunc_t *tr_func;
2847 
2848 		tr_func = (xpt_targetfunc_t *)tr_config->tr_func;
2849 
2850 		return(tr_func(target, tr_config->tr_arg));
2851 	} else
2852 		return(xptdevicetraverse(target, NULL, xptdefdevicefunc, arg));
2853 }
2854 
2855 static int
2856 xptdefdevicefunc(struct cam_ed *device, void *arg)
2857 {
2858 	struct xpt_traverse_config *tr_config;
2859 
2860 	tr_config = (struct xpt_traverse_config *)arg;
2861 
2862 	if (tr_config->depth == XPT_DEPTH_DEVICE) {
2863 		xpt_devicefunc_t *tr_func;
2864 
2865 		tr_func = (xpt_devicefunc_t *)tr_config->tr_func;
2866 
2867 		return(tr_func(device, tr_config->tr_arg));
2868 	} else
2869 		return(xptperiphtraverse(device, NULL, xptdefperiphfunc, arg));
2870 }
2871 
2872 static int
2873 xptdefperiphfunc(struct cam_periph *periph, void *arg)
2874 {
2875 	struct xpt_traverse_config *tr_config;
2876 	xpt_periphfunc_t *tr_func;
2877 
2878 	tr_config = (struct xpt_traverse_config *)arg;
2879 
2880 	tr_func = (xpt_periphfunc_t *)tr_config->tr_func;
2881 
2882 	/*
2883 	 * Unlike the other default functions, we don't check for depth
2884 	 * here.  The peripheral driver level is the last level in the EDT,
2885 	 * so if we're here, we should execute the function in question.
2886 	 */
2887 	return(tr_func(periph, tr_config->tr_arg));
2888 }
2889 
2890 /*
2891  * Execute the given function for every bus in the EDT.
2892  */
2893 static int
2894 xpt_for_all_busses(xpt_busfunc_t *tr_func, void *arg)
2895 {
2896 	struct xpt_traverse_config tr_config;
2897 
2898 	tr_config.depth = XPT_DEPTH_BUS;
2899 	tr_config.tr_func = tr_func;
2900 	tr_config.tr_arg = arg;
2901 
2902 	return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
2903 }
2904 
2905 #ifdef notusedyet
2906 /*
2907  * Execute the given function for every target in the EDT.
2908  */
2909 static int
2910 xpt_for_all_targets(xpt_targetfunc_t *tr_func, void *arg)
2911 {
2912 	struct xpt_traverse_config tr_config;
2913 
2914 	tr_config.depth = XPT_DEPTH_TARGET;
2915 	tr_config.tr_func = tr_func;
2916 	tr_config.tr_arg = arg;
2917 
2918 	return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
2919 }
2920 #endif /* notusedyet */
2921 
2922 /*
2923  * Execute the given function for every device in the EDT.
2924  */
2925 static int
2926 xpt_for_all_devices(xpt_devicefunc_t *tr_func, void *arg)
2927 {
2928 	struct xpt_traverse_config tr_config;
2929 
2930 	tr_config.depth = XPT_DEPTH_DEVICE;
2931 	tr_config.tr_func = tr_func;
2932 	tr_config.tr_arg = arg;
2933 
2934 	return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
2935 }
2936 
2937 #ifdef notusedyet
2938 /*
2939  * Execute the given function for every peripheral in the EDT.
2940  */
2941 static int
2942 xpt_for_all_periphs(xpt_periphfunc_t *tr_func, void *arg)
2943 {
2944 	struct xpt_traverse_config tr_config;
2945 
2946 	tr_config.depth = XPT_DEPTH_PERIPH;
2947 	tr_config.tr_func = tr_func;
2948 	tr_config.tr_arg = arg;
2949 
2950 	return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
2951 }
2952 #endif /* notusedyet */
2953 
2954 static int
2955 xptsetasyncfunc(struct cam_ed *device, void *arg)
2956 {
2957 	struct cam_path path;
2958 	struct ccb_getdev cgd;
2959 	struct async_node *cur_entry;
2960 
2961 	cur_entry = (struct async_node *)arg;
2962 
2963 	/*
2964 	 * Don't report unconfigured devices (Wildcard devs,
2965 	 * devices only for target mode, device instances
2966 	 * that have been invalidated but are waiting for
2967 	 * their last reference count to be released).
2968 	 */
2969 	if ((device->flags & CAM_DEV_UNCONFIGURED) != 0)
2970 		return (1);
2971 
2972 	xpt_compile_path(&path,
2973 			 NULL,
2974 			 device->target->bus->path_id,
2975 			 device->target->target_id,
2976 			 device->lun_id);
2977 	xpt_setup_ccb(&cgd.ccb_h, &path, /*priority*/1);
2978 	cgd.ccb_h.func_code = XPT_GDEV_TYPE;
2979 	xpt_action((union ccb *)&cgd);
2980 	cur_entry->callback(cur_entry->callback_arg,
2981 			    AC_FOUND_DEVICE,
2982 			    &path, &cgd);
2983 	xpt_release_path(&path);
2984 
2985 	return(1);
2986 }
2987 
2988 static int
2989 xptsetasyncbusfunc(struct cam_eb *bus, void *arg)
2990 {
2991 	struct cam_path path;
2992 	struct ccb_pathinq cpi;
2993 	struct async_node *cur_entry;
2994 
2995 	cur_entry = (struct async_node *)arg;
2996 
2997 	xpt_compile_path(&path, /*periph*/NULL,
2998 			 bus->sim->path_id,
2999 			 CAM_TARGET_WILDCARD,
3000 			 CAM_LUN_WILDCARD);
3001 	xpt_setup_ccb(&cpi.ccb_h, &path, /*priority*/1);
3002 	cpi.ccb_h.func_code = XPT_PATH_INQ;
3003 	xpt_action((union ccb *)&cpi);
3004 	cur_entry->callback(cur_entry->callback_arg,
3005 			    AC_PATH_REGISTERED,
3006 			    &path, &cpi);
3007 	xpt_release_path(&path);
3008 
3009 	return(1);
3010 }
3011 
3012 void
3013 xpt_action(union ccb *start_ccb)
3014 {
3015 	int iopl;
3016 
3017 	GIANT_REQUIRED;
3018 
3019 	CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xpt_action\n"));
3020 
3021 	start_ccb->ccb_h.status = CAM_REQ_INPROG;
3022 
3023 	iopl = splsoftcam();
3024 	switch (start_ccb->ccb_h.func_code) {
3025 	case XPT_SCSI_IO:
3026 	{
3027 #ifdef CAM_NEW_TRAN_CODE
3028 		struct cam_ed *device;
3029 #endif /* CAM_NEW_TRAN_CODE */
3030 #ifdef CAMDEBUG
3031 		char cdb_str[(SCSI_MAX_CDBLEN * 3) + 1];
3032 		struct cam_path *path;
3033 
3034 		path = start_ccb->ccb_h.path;
3035 #endif
3036 
3037 		/*
3038 		 * For the sake of compatibility with SCSI-1
3039 		 * devices that may not understand the identify
3040 		 * message, we include lun information in the
3041 		 * second byte of all commands.  SCSI-1 specifies
3042 		 * that luns are a 3 bit value and reserves only 3
3043 		 * bits for lun information in the CDB.  Later
3044 		 * revisions of the SCSI spec allow for more than 8
3045 		 * luns, but have deprecated lun information in the
3046 		 * CDB.  So, if the lun won't fit, we must omit.
3047 		 *
3048 		 * Also be aware that during initial probing for devices,
3049 		 * the inquiry information is unknown but initialized to 0.
3050 		 * This means that this code will be exercised while probing
3051 		 * devices with an ANSI revision greater than 2.
3052 		 */
3053 #ifdef CAM_NEW_TRAN_CODE
3054 		device = start_ccb->ccb_h.path->device;
3055 		if (device->protocol_version <= SCSI_REV_2
3056 #else /* CAM_NEW_TRAN_CODE */
3057 		if (SID_ANSI_REV(&start_ccb->ccb_h.path->device->inq_data) <= 2
3058 #endif /* CAM_NEW_TRAN_CODE */
3059 		 && start_ccb->ccb_h.target_lun < 8
3060 		 && (start_ccb->ccb_h.flags & CAM_CDB_POINTER) == 0) {
3061 
3062 			start_ccb->csio.cdb_io.cdb_bytes[1] |=
3063 			    start_ccb->ccb_h.target_lun << 5;
3064 		}
3065 		start_ccb->csio.scsi_status = SCSI_STATUS_OK;
3066 		CAM_DEBUG(path, CAM_DEBUG_CDB,("%s. CDB: %s\n",
3067 			  scsi_op_desc(start_ccb->csio.cdb_io.cdb_bytes[0],
3068 			  	       &path->device->inq_data),
3069 			  scsi_cdb_string(start_ccb->csio.cdb_io.cdb_bytes,
3070 					  cdb_str, sizeof(cdb_str))));
3071 	}
3072 	/* FALLTHROUGH */
3073 	case XPT_TARGET_IO:
3074 	case XPT_CONT_TARGET_IO:
3075 		start_ccb->csio.sense_resid = 0;
3076 		start_ccb->csio.resid = 0;
3077 		/* FALLTHROUGH */
3078 	case XPT_RESET_DEV:
3079 	case XPT_ENG_EXEC:
3080 	{
3081 		struct cam_path *path;
3082 		struct cam_sim *sim;
3083 		int s;
3084 		int runq;
3085 
3086 		path = start_ccb->ccb_h.path;
3087 		s = splsoftcam();
3088 
3089 		sim = path->bus->sim;
3090 		if (SIM_DEAD(sim)) {
3091 			/* The SIM has gone; just execute the CCB directly. */
3092 			cam_ccbq_send_ccb(&path->device->ccbq, start_ccb);
3093 			(*(sim->sim_action))(sim, start_ccb);
3094 			splx(s);
3095 			break;
3096 		}
3097 
3098 		cam_ccbq_insert_ccb(&path->device->ccbq, start_ccb);
3099 		if (path->device->qfrozen_cnt == 0)
3100 			runq = xpt_schedule_dev_sendq(path->bus, path->device);
3101 		else
3102 			runq = 0;
3103 		splx(s);
3104 		if (runq != 0)
3105 			xpt_run_dev_sendq(path->bus);
3106 		break;
3107 	}
3108 	case XPT_SET_TRAN_SETTINGS:
3109 	{
3110 		xpt_set_transfer_settings(&start_ccb->cts,
3111 					  start_ccb->ccb_h.path->device,
3112 					  /*async_update*/FALSE);
3113 		break;
3114 	}
3115 	case XPT_CALC_GEOMETRY:
3116 	{
3117 		struct cam_sim *sim;
3118 
3119 		/* Filter out garbage */
3120 		if (start_ccb->ccg.block_size == 0
3121 		 || start_ccb->ccg.volume_size == 0) {
3122 			start_ccb->ccg.cylinders = 0;
3123 			start_ccb->ccg.heads = 0;
3124 			start_ccb->ccg.secs_per_track = 0;
3125 			start_ccb->ccb_h.status = CAM_REQ_CMP;
3126 			break;
3127 		}
3128 #ifdef PC98
3129 		/*
3130 		 * In a PC-98 system, geometry translation depens on
3131 		 * the "real" device geometry obtained from mode page 4.
3132 		 * SCSI geometry translation is performed in the
3133 		 * initialization routine of the SCSI BIOS and the result
3134 		 * stored in host memory.  If the translation is available
3135 		 * in host memory, use it.  If not, rely on the default
3136 		 * translation the device driver performs.
3137 		 */
3138 		if (scsi_da_bios_params(&start_ccb->ccg) != 0) {
3139 			start_ccb->ccb_h.status = CAM_REQ_CMP;
3140 			break;
3141 		}
3142 #endif
3143 		sim = start_ccb->ccb_h.path->bus->sim;
3144 		(*(sim->sim_action))(sim, start_ccb);
3145 		break;
3146 	}
3147 	case XPT_ABORT:
3148 	{
3149 		union ccb* abort_ccb;
3150 		int s;
3151 
3152 		abort_ccb = start_ccb->cab.abort_ccb;
3153 		if (XPT_FC_IS_DEV_QUEUED(abort_ccb)) {
3154 
3155 			if (abort_ccb->ccb_h.pinfo.index >= 0) {
3156 				struct cam_ccbq *ccbq;
3157 
3158 				ccbq = &abort_ccb->ccb_h.path->device->ccbq;
3159 				cam_ccbq_remove_ccb(ccbq, abort_ccb);
3160 				abort_ccb->ccb_h.status =
3161 				    CAM_REQ_ABORTED|CAM_DEV_QFRZN;
3162 				xpt_freeze_devq(abort_ccb->ccb_h.path, 1);
3163 				s = splcam();
3164 				xpt_done(abort_ccb);
3165 				splx(s);
3166 				start_ccb->ccb_h.status = CAM_REQ_CMP;
3167 				break;
3168 			}
3169 			if (abort_ccb->ccb_h.pinfo.index == CAM_UNQUEUED_INDEX
3170 			 && (abort_ccb->ccb_h.status & CAM_SIM_QUEUED) == 0) {
3171 				/*
3172 				 * We've caught this ccb en route to
3173 				 * the SIM.  Flag it for abort and the
3174 				 * SIM will do so just before starting
3175 				 * real work on the CCB.
3176 				 */
3177 				abort_ccb->ccb_h.status =
3178 				    CAM_REQ_ABORTED|CAM_DEV_QFRZN;
3179 				xpt_freeze_devq(abort_ccb->ccb_h.path, 1);
3180 				start_ccb->ccb_h.status = CAM_REQ_CMP;
3181 				break;
3182 			}
3183 		}
3184 		if (XPT_FC_IS_QUEUED(abort_ccb)
3185 		 && (abort_ccb->ccb_h.pinfo.index == CAM_DONEQ_INDEX)) {
3186 			/*
3187 			 * It's already completed but waiting
3188 			 * for our SWI to get to it.
3189 			 */
3190 			start_ccb->ccb_h.status = CAM_UA_ABORT;
3191 			break;
3192 		}
3193 		/*
3194 		 * If we weren't able to take care of the abort request
3195 		 * in the XPT, pass the request down to the SIM for processing.
3196 		 */
3197 	}
3198 	/* FALLTHROUGH */
3199 	case XPT_ACCEPT_TARGET_IO:
3200 	case XPT_EN_LUN:
3201 	case XPT_IMMED_NOTIFY:
3202 	case XPT_NOTIFY_ACK:
3203 	case XPT_GET_TRAN_SETTINGS:
3204 	case XPT_RESET_BUS:
3205 	{
3206 		struct cam_sim *sim;
3207 
3208 		sim = start_ccb->ccb_h.path->bus->sim;
3209 		(*(sim->sim_action))(sim, start_ccb);
3210 		break;
3211 	}
3212 	case XPT_PATH_INQ:
3213 	{
3214 		struct cam_sim *sim;
3215 
3216 		sim = start_ccb->ccb_h.path->bus->sim;
3217 		(*(sim->sim_action))(sim, start_ccb);
3218 		break;
3219 	}
3220 	case XPT_PATH_STATS:
3221 		start_ccb->cpis.last_reset =
3222 			start_ccb->ccb_h.path->bus->last_reset;
3223 		start_ccb->ccb_h.status = CAM_REQ_CMP;
3224 		break;
3225 	case XPT_GDEV_TYPE:
3226 	{
3227 		struct cam_ed *dev;
3228 		int s;
3229 
3230 		dev = start_ccb->ccb_h.path->device;
3231 		s = splcam();
3232 		if ((dev->flags & CAM_DEV_UNCONFIGURED) != 0) {
3233 			start_ccb->ccb_h.status = CAM_DEV_NOT_THERE;
3234 		} else {
3235 			struct ccb_getdev *cgd;
3236 			struct cam_eb *bus;
3237 			struct cam_et *tar;
3238 
3239 			cgd = &start_ccb->cgd;
3240 			bus = cgd->ccb_h.path->bus;
3241 			tar = cgd->ccb_h.path->target;
3242 			cgd->inq_data = dev->inq_data;
3243 			cgd->ccb_h.status = CAM_REQ_CMP;
3244 			cgd->serial_num_len = dev->serial_num_len;
3245 			if ((dev->serial_num_len > 0)
3246 			 && (dev->serial_num != NULL))
3247 				bcopy(dev->serial_num, cgd->serial_num,
3248 				      dev->serial_num_len);
3249 		}
3250 		splx(s);
3251 		break;
3252 	}
3253 	case XPT_GDEV_STATS:
3254 	{
3255 		struct cam_ed *dev;
3256 		int s;
3257 
3258 		dev = start_ccb->ccb_h.path->device;
3259 		s = splcam();
3260 		if ((dev->flags & CAM_DEV_UNCONFIGURED) != 0) {
3261 			start_ccb->ccb_h.status = CAM_DEV_NOT_THERE;
3262 		} else {
3263 			struct ccb_getdevstats *cgds;
3264 			struct cam_eb *bus;
3265 			struct cam_et *tar;
3266 
3267 			cgds = &start_ccb->cgds;
3268 			bus = cgds->ccb_h.path->bus;
3269 			tar = cgds->ccb_h.path->target;
3270 			cgds->dev_openings = dev->ccbq.dev_openings;
3271 			cgds->dev_active = dev->ccbq.dev_active;
3272 			cgds->devq_openings = dev->ccbq.devq_openings;
3273 			cgds->devq_queued = dev->ccbq.queue.entries;
3274 			cgds->held = dev->ccbq.held;
3275 			cgds->last_reset = tar->last_reset;
3276 			cgds->maxtags = dev->quirk->maxtags;
3277 			cgds->mintags = dev->quirk->mintags;
3278 			if (timevalcmp(&tar->last_reset, &bus->last_reset, <))
3279 				cgds->last_reset = bus->last_reset;
3280 			cgds->ccb_h.status = CAM_REQ_CMP;
3281 		}
3282 		splx(s);
3283 		break;
3284 	}
3285 	case XPT_GDEVLIST:
3286 	{
3287 		struct cam_periph	*nperiph;
3288 		struct periph_list	*periph_head;
3289 		struct ccb_getdevlist	*cgdl;
3290 		u_int			i;
3291 		int			s;
3292 		struct cam_ed		*device;
3293 		int			found;
3294 
3295 
3296 		found = 0;
3297 
3298 		/*
3299 		 * Don't want anyone mucking with our data.
3300 		 */
3301 		s = splcam();
3302 		device = start_ccb->ccb_h.path->device;
3303 		periph_head = &device->periphs;
3304 		cgdl = &start_ccb->cgdl;
3305 
3306 		/*
3307 		 * Check and see if the list has changed since the user
3308 		 * last requested a list member.  If so, tell them that the
3309 		 * list has changed, and therefore they need to start over
3310 		 * from the beginning.
3311 		 */
3312 		if ((cgdl->index != 0) &&
3313 		    (cgdl->generation != device->generation)) {
3314 			cgdl->status = CAM_GDEVLIST_LIST_CHANGED;
3315 			splx(s);
3316 			break;
3317 		}
3318 
3319 		/*
3320 		 * Traverse the list of peripherals and attempt to find
3321 		 * the requested peripheral.
3322 		 */
3323 		for (nperiph = SLIST_FIRST(periph_head), i = 0;
3324 		     (nperiph != NULL) && (i <= cgdl->index);
3325 		     nperiph = SLIST_NEXT(nperiph, periph_links), i++) {
3326 			if (i == cgdl->index) {
3327 				strncpy(cgdl->periph_name,
3328 					nperiph->periph_name,
3329 					DEV_IDLEN);
3330 				cgdl->unit_number = nperiph->unit_number;
3331 				found = 1;
3332 			}
3333 		}
3334 		if (found == 0) {
3335 			cgdl->status = CAM_GDEVLIST_ERROR;
3336 			splx(s);
3337 			break;
3338 		}
3339 
3340 		if (nperiph == NULL)
3341 			cgdl->status = CAM_GDEVLIST_LAST_DEVICE;
3342 		else
3343 			cgdl->status = CAM_GDEVLIST_MORE_DEVS;
3344 
3345 		cgdl->index++;
3346 		cgdl->generation = device->generation;
3347 
3348 		splx(s);
3349 		cgdl->ccb_h.status = CAM_REQ_CMP;
3350 		break;
3351 	}
3352 	case XPT_DEV_MATCH:
3353 	{
3354 		int s;
3355 		dev_pos_type position_type;
3356 		struct ccb_dev_match *cdm;
3357 
3358 		cdm = &start_ccb->cdm;
3359 
3360 		/*
3361 		 * Prevent EDT changes while we traverse it.
3362 		 */
3363 		s = splcam();
3364 		/*
3365 		 * There are two ways of getting at information in the EDT.
3366 		 * The first way is via the primary EDT tree.  It starts
3367 		 * with a list of busses, then a list of targets on a bus,
3368 		 * then devices/luns on a target, and then peripherals on a
3369 		 * device/lun.  The "other" way is by the peripheral driver
3370 		 * lists.  The peripheral driver lists are organized by
3371 		 * peripheral driver.  (obviously)  So it makes sense to
3372 		 * use the peripheral driver list if the user is looking
3373 		 * for something like "da1", or all "da" devices.  If the
3374 		 * user is looking for something on a particular bus/target
3375 		 * or lun, it's generally better to go through the EDT tree.
3376 		 */
3377 
3378 		if (cdm->pos.position_type != CAM_DEV_POS_NONE)
3379 			position_type = cdm->pos.position_type;
3380 		else {
3381 			u_int i;
3382 
3383 			position_type = CAM_DEV_POS_NONE;
3384 
3385 			for (i = 0; i < cdm->num_patterns; i++) {
3386 				if ((cdm->patterns[i].type == DEV_MATCH_BUS)
3387 				 ||(cdm->patterns[i].type == DEV_MATCH_DEVICE)){
3388 					position_type = CAM_DEV_POS_EDT;
3389 					break;
3390 				}
3391 			}
3392 
3393 			if (cdm->num_patterns == 0)
3394 				position_type = CAM_DEV_POS_EDT;
3395 			else if (position_type == CAM_DEV_POS_NONE)
3396 				position_type = CAM_DEV_POS_PDRV;
3397 		}
3398 
3399 		switch(position_type & CAM_DEV_POS_TYPEMASK) {
3400 		case CAM_DEV_POS_EDT:
3401 			xptedtmatch(cdm);
3402 			break;
3403 		case CAM_DEV_POS_PDRV:
3404 			xptperiphlistmatch(cdm);
3405 			break;
3406 		default:
3407 			cdm->status = CAM_DEV_MATCH_ERROR;
3408 			break;
3409 		}
3410 
3411 		splx(s);
3412 
3413 		if (cdm->status == CAM_DEV_MATCH_ERROR)
3414 			start_ccb->ccb_h.status = CAM_REQ_CMP_ERR;
3415 		else
3416 			start_ccb->ccb_h.status = CAM_REQ_CMP;
3417 
3418 		break;
3419 	}
3420 	case XPT_SASYNC_CB:
3421 	{
3422 		struct ccb_setasync *csa;
3423 		struct async_node *cur_entry;
3424 		struct async_list *async_head;
3425 		u_int32_t added;
3426 		int s;
3427 
3428 		csa = &start_ccb->csa;
3429 		added = csa->event_enable;
3430 		async_head = &csa->ccb_h.path->device->asyncs;
3431 
3432 		/*
3433 		 * If there is already an entry for us, simply
3434 		 * update it.
3435 		 */
3436 		s = splcam();
3437 		cur_entry = SLIST_FIRST(async_head);
3438 		while (cur_entry != NULL) {
3439 			if ((cur_entry->callback_arg == csa->callback_arg)
3440 			 && (cur_entry->callback == csa->callback))
3441 				break;
3442 			cur_entry = SLIST_NEXT(cur_entry, links);
3443 		}
3444 
3445 		if (cur_entry != NULL) {
3446 		 	/*
3447 			 * If the request has no flags set,
3448 			 * remove the entry.
3449 			 */
3450 			added &= ~cur_entry->event_enable;
3451 			if (csa->event_enable == 0) {
3452 				SLIST_REMOVE(async_head, cur_entry,
3453 					     async_node, links);
3454 				csa->ccb_h.path->device->refcount--;
3455 				free(cur_entry, M_CAMXPT);
3456 			} else {
3457 				cur_entry->event_enable = csa->event_enable;
3458 			}
3459 		} else {
3460 			cur_entry = malloc(sizeof(*cur_entry), M_CAMXPT,
3461 					   M_NOWAIT);
3462 			if (cur_entry == NULL) {
3463 				splx(s);
3464 				csa->ccb_h.status = CAM_RESRC_UNAVAIL;
3465 				break;
3466 			}
3467 			cur_entry->event_enable = csa->event_enable;
3468 			cur_entry->callback_arg = csa->callback_arg;
3469 			cur_entry->callback = csa->callback;
3470 			SLIST_INSERT_HEAD(async_head, cur_entry, links);
3471 			csa->ccb_h.path->device->refcount++;
3472 		}
3473 
3474 		if ((added & AC_FOUND_DEVICE) != 0) {
3475 			/*
3476 			 * Get this peripheral up to date with all
3477 			 * the currently existing devices.
3478 			 */
3479 			xpt_for_all_devices(xptsetasyncfunc, cur_entry);
3480 		}
3481 		if ((added & AC_PATH_REGISTERED) != 0) {
3482 			/*
3483 			 * Get this peripheral up to date with all
3484 			 * the currently existing busses.
3485 			 */
3486 			xpt_for_all_busses(xptsetasyncbusfunc, cur_entry);
3487 		}
3488 		splx(s);
3489 		start_ccb->ccb_h.status = CAM_REQ_CMP;
3490 		break;
3491 	}
3492 	case XPT_REL_SIMQ:
3493 	{
3494 		struct ccb_relsim *crs;
3495 		struct cam_ed *dev;
3496 		int s;
3497 
3498 		crs = &start_ccb->crs;
3499 		dev = crs->ccb_h.path->device;
3500 		if (dev == NULL) {
3501 
3502 			crs->ccb_h.status = CAM_DEV_NOT_THERE;
3503 			break;
3504 		}
3505 
3506 		s = splcam();
3507 
3508 		if ((crs->release_flags & RELSIM_ADJUST_OPENINGS) != 0) {
3509 
3510  			if (INQ_DATA_TQ_ENABLED(&dev->inq_data)) {
3511 				/* Don't ever go below one opening */
3512 				if (crs->openings > 0) {
3513 					xpt_dev_ccbq_resize(crs->ccb_h.path,
3514 							    crs->openings);
3515 
3516 					if (bootverbose) {
3517 						xpt_print_path(crs->ccb_h.path);
3518 						printf("tagged openings "
3519 						       "now %d\n",
3520 						       crs->openings);
3521 					}
3522 				}
3523 			}
3524 		}
3525 
3526 		if ((crs->release_flags & RELSIM_RELEASE_AFTER_TIMEOUT) != 0) {
3527 
3528 			if ((dev->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) {
3529 
3530 				/*
3531 				 * Just extend the old timeout and decrement
3532 				 * the freeze count so that a single timeout
3533 				 * is sufficient for releasing the queue.
3534 				 */
3535 				start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
3536 				untimeout(xpt_release_devq_timeout,
3537 					  dev, dev->c_handle);
3538 			} else {
3539 
3540 				start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
3541 			}
3542 
3543 			dev->c_handle =
3544 				timeout(xpt_release_devq_timeout,
3545 					dev,
3546 					(crs->release_timeout * hz) / 1000);
3547 
3548 			dev->flags |= CAM_DEV_REL_TIMEOUT_PENDING;
3549 
3550 		}
3551 
3552 		if ((crs->release_flags & RELSIM_RELEASE_AFTER_CMDCMPLT) != 0) {
3553 
3554 			if ((dev->flags & CAM_DEV_REL_ON_COMPLETE) != 0) {
3555 				/*
3556 				 * Decrement the freeze count so that a single
3557 				 * completion is still sufficient to unfreeze
3558 				 * the queue.
3559 				 */
3560 				start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
3561 			} else {
3562 
3563 				dev->flags |= CAM_DEV_REL_ON_COMPLETE;
3564 				start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
3565 			}
3566 		}
3567 
3568 		if ((crs->release_flags & RELSIM_RELEASE_AFTER_QEMPTY) != 0) {
3569 
3570 			if ((dev->flags & CAM_DEV_REL_ON_QUEUE_EMPTY) != 0
3571 			 || (dev->ccbq.dev_active == 0)) {
3572 
3573 				start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
3574 			} else {
3575 
3576 				dev->flags |= CAM_DEV_REL_ON_QUEUE_EMPTY;
3577 				start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
3578 			}
3579 		}
3580 		splx(s);
3581 
3582 		if ((start_ccb->ccb_h.flags & CAM_DEV_QFREEZE) == 0) {
3583 
3584 			xpt_release_devq(crs->ccb_h.path, /*count*/1,
3585 					 /*run_queue*/TRUE);
3586 		}
3587 		start_ccb->crs.qfrozen_cnt = dev->qfrozen_cnt;
3588 		start_ccb->ccb_h.status = CAM_REQ_CMP;
3589 		break;
3590 	}
3591 	case XPT_SCAN_BUS:
3592 		xpt_scan_bus(start_ccb->ccb_h.path->periph, start_ccb);
3593 		break;
3594 	case XPT_SCAN_LUN:
3595 		xpt_scan_lun(start_ccb->ccb_h.path->periph,
3596 			     start_ccb->ccb_h.path, start_ccb->crcn.flags,
3597 			     start_ccb);
3598 		break;
3599 	case XPT_DEBUG: {
3600 #ifdef CAMDEBUG
3601 		int s;
3602 
3603 		s = splcam();
3604 #ifdef CAM_DEBUG_DELAY
3605 		cam_debug_delay = CAM_DEBUG_DELAY;
3606 #endif
3607 		cam_dflags = start_ccb->cdbg.flags;
3608 		if (cam_dpath != NULL) {
3609 			xpt_free_path(cam_dpath);
3610 			cam_dpath = NULL;
3611 		}
3612 
3613 		if (cam_dflags != CAM_DEBUG_NONE) {
3614 			if (xpt_create_path(&cam_dpath, xpt_periph,
3615 					    start_ccb->ccb_h.path_id,
3616 					    start_ccb->ccb_h.target_id,
3617 					    start_ccb->ccb_h.target_lun) !=
3618 					    CAM_REQ_CMP) {
3619 				start_ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
3620 				cam_dflags = CAM_DEBUG_NONE;
3621 			} else {
3622 				start_ccb->ccb_h.status = CAM_REQ_CMP;
3623 				xpt_print_path(cam_dpath);
3624 				printf("debugging flags now %x\n", cam_dflags);
3625 			}
3626 		} else {
3627 			cam_dpath = NULL;
3628 			start_ccb->ccb_h.status = CAM_REQ_CMP;
3629 		}
3630 		splx(s);
3631 #else /* !CAMDEBUG */
3632 		start_ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
3633 #endif /* CAMDEBUG */
3634 		break;
3635 	}
3636 	case XPT_NOOP:
3637 		if ((start_ccb->ccb_h.flags & CAM_DEV_QFREEZE) != 0)
3638 			xpt_freeze_devq(start_ccb->ccb_h.path, 1);
3639 		start_ccb->ccb_h.status = CAM_REQ_CMP;
3640 		break;
3641 	default:
3642 	case XPT_SDEV_TYPE:
3643 	case XPT_TERM_IO:
3644 	case XPT_ENG_INQ:
3645 		/* XXX Implement */
3646 		start_ccb->ccb_h.status = CAM_PROVIDE_FAIL;
3647 		break;
3648 	}
3649 	splx(iopl);
3650 }
3651 
3652 void
3653 xpt_polled_action(union ccb *start_ccb)
3654 {
3655 	int	  s;
3656 	u_int32_t timeout;
3657 	struct	  cam_sim *sim;
3658 	struct	  cam_devq *devq;
3659 	struct	  cam_ed *dev;
3660 
3661 	GIANT_REQUIRED;
3662 
3663 	timeout = start_ccb->ccb_h.timeout;
3664 	sim = start_ccb->ccb_h.path->bus->sim;
3665 	devq = sim->devq;
3666 	dev = start_ccb->ccb_h.path->device;
3667 
3668 	s = splcam();
3669 
3670 	/*
3671 	 * Steal an opening so that no other queued requests
3672 	 * can get it before us while we simulate interrupts.
3673 	 */
3674 	dev->ccbq.devq_openings--;
3675 	dev->ccbq.dev_openings--;
3676 
3677 	while(((devq != NULL && devq->send_openings <= 0) ||
3678 	   dev->ccbq.dev_openings < 0) && (--timeout > 0)) {
3679 		DELAY(1000);
3680 		(*(sim->sim_poll))(sim);
3681 		camisr(&cam_bioq);
3682 	}
3683 
3684 	dev->ccbq.devq_openings++;
3685 	dev->ccbq.dev_openings++;
3686 
3687 	if (timeout != 0) {
3688 		xpt_action(start_ccb);
3689 		while(--timeout > 0) {
3690 			(*(sim->sim_poll))(sim);
3691 			camisr(&cam_bioq);
3692 			if ((start_ccb->ccb_h.status  & CAM_STATUS_MASK)
3693 			    != CAM_REQ_INPROG)
3694 				break;
3695 			DELAY(1000);
3696 		}
3697 		if (timeout == 0) {
3698 			/*
3699 			 * XXX Is it worth adding a sim_timeout entry
3700 			 * point so we can attempt recovery?  If
3701 			 * this is only used for dumps, I don't think
3702 			 * it is.
3703 			 */
3704 			start_ccb->ccb_h.status = CAM_CMD_TIMEOUT;
3705 		}
3706 	} else {
3707 		start_ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
3708 	}
3709 	splx(s);
3710 }
3711 
3712 /*
3713  * Schedule a peripheral driver to receive a ccb when it's
3714  * target device has space for more transactions.
3715  */
3716 void
3717 xpt_schedule(struct cam_periph *perph, u_int32_t new_priority)
3718 {
3719 	struct cam_ed *device;
3720 	union ccb *work_ccb;
3721 	int s;
3722 	int runq;
3723 
3724 	GIANT_REQUIRED;
3725 
3726 	CAM_DEBUG(perph->path, CAM_DEBUG_TRACE, ("xpt_schedule\n"));
3727 	device = perph->path->device;
3728 	s = splsoftcam();
3729 	if (periph_is_queued(perph)) {
3730 		/* Simply reorder based on new priority */
3731 		CAM_DEBUG(perph->path, CAM_DEBUG_SUBTRACE,
3732 			  ("   change priority to %d\n", new_priority));
3733 		if (new_priority < perph->pinfo.priority) {
3734 			camq_change_priority(&device->drvq,
3735 					     perph->pinfo.index,
3736 					     new_priority);
3737 		}
3738 		runq = 0;
3739 	} else if (SIM_DEAD(perph->path->bus->sim)) {
3740 		/* The SIM is gone so just call periph_start directly. */
3741 		work_ccb = xpt_get_ccb(perph->path->device);
3742 		splx(s);
3743 		if (work_ccb == NULL)
3744 			return; /* XXX */
3745 		xpt_setup_ccb(&work_ccb->ccb_h, perph->path, new_priority);
3746 		perph->pinfo.priority = new_priority;
3747 		perph->periph_start(perph, work_ccb);
3748 		return;
3749 	} else {
3750 		/* New entry on the queue */
3751 		CAM_DEBUG(perph->path, CAM_DEBUG_SUBTRACE,
3752 			  ("   added periph to queue\n"));
3753 		perph->pinfo.priority = new_priority;
3754 		perph->pinfo.generation = ++device->drvq.generation;
3755 		camq_insert(&device->drvq, &perph->pinfo);
3756 		runq = xpt_schedule_dev_allocq(perph->path->bus, device);
3757 	}
3758 	splx(s);
3759 	if (runq != 0) {
3760 		CAM_DEBUG(perph->path, CAM_DEBUG_SUBTRACE,
3761 			  ("   calling xpt_run_devq\n"));
3762 		xpt_run_dev_allocq(perph->path->bus);
3763 	}
3764 }
3765 
3766 
3767 /*
3768  * Schedule a device to run on a given queue.
3769  * If the device was inserted as a new entry on the queue,
3770  * return 1 meaning the device queue should be run. If we
3771  * were already queued, implying someone else has already
3772  * started the queue, return 0 so the caller doesn't attempt
3773  * to run the queue.  Must be run at either splsoftcam
3774  * (or splcam since that encompases splsoftcam).
3775  */
3776 static int
3777 xpt_schedule_dev(struct camq *queue, cam_pinfo *pinfo,
3778 		 u_int32_t new_priority)
3779 {
3780 	int retval;
3781 	u_int32_t old_priority;
3782 
3783 	CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_schedule_dev\n"));
3784 
3785 	old_priority = pinfo->priority;
3786 
3787 	/*
3788 	 * Are we already queued?
3789 	 */
3790 	if (pinfo->index != CAM_UNQUEUED_INDEX) {
3791 		/* Simply reorder based on new priority */
3792 		if (new_priority < old_priority) {
3793 			camq_change_priority(queue, pinfo->index,
3794 					     new_priority);
3795 			CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3796 					("changed priority to %d\n",
3797 					 new_priority));
3798 		}
3799 		retval = 0;
3800 	} else {
3801 		/* New entry on the queue */
3802 		if (new_priority < old_priority)
3803 			pinfo->priority = new_priority;
3804 
3805 		CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3806 				("Inserting onto queue\n"));
3807 		pinfo->generation = ++queue->generation;
3808 		camq_insert(queue, pinfo);
3809 		retval = 1;
3810 	}
3811 	return (retval);
3812 }
3813 
3814 static void
3815 xpt_run_dev_allocq(struct cam_eb *bus)
3816 {
3817 	struct	cam_devq *devq;
3818 	int	s;
3819 
3820 	CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_dev_allocq\n"));
3821 	devq = bus->sim->devq;
3822 
3823 	CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3824 			("   qfrozen_cnt == 0x%x, entries == %d, "
3825 			 "openings == %d, active == %d\n",
3826 			 devq->alloc_queue.qfrozen_cnt,
3827 			 devq->alloc_queue.entries,
3828 			 devq->alloc_openings,
3829 			 devq->alloc_active));
3830 
3831 	s = splsoftcam();
3832 	devq->alloc_queue.qfrozen_cnt++;
3833 	while ((devq->alloc_queue.entries > 0)
3834 	    && (devq->alloc_openings > 0)
3835 	    && (devq->alloc_queue.qfrozen_cnt <= 1)) {
3836 		struct	cam_ed_qinfo *qinfo;
3837 		struct	cam_ed *device;
3838 		union	ccb *work_ccb;
3839 		struct	cam_periph *drv;
3840 		struct	camq *drvq;
3841 
3842 		qinfo = (struct cam_ed_qinfo *)camq_remove(&devq->alloc_queue,
3843 							   CAMQ_HEAD);
3844 		device = qinfo->device;
3845 
3846 		CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3847 				("running device %p\n", device));
3848 
3849 		drvq = &device->drvq;
3850 
3851 #ifdef CAMDEBUG
3852 		if (drvq->entries <= 0) {
3853 			panic("xpt_run_dev_allocq: "
3854 			      "Device on queue without any work to do");
3855 		}
3856 #endif
3857 		if ((work_ccb = xpt_get_ccb(device)) != NULL) {
3858 			devq->alloc_openings--;
3859 			devq->alloc_active++;
3860 			drv = (struct cam_periph*)camq_remove(drvq, CAMQ_HEAD);
3861 			splx(s);
3862 			xpt_setup_ccb(&work_ccb->ccb_h, drv->path,
3863 				      drv->pinfo.priority);
3864 			CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3865 					("calling periph start\n"));
3866 			drv->periph_start(drv, work_ccb);
3867 		} else {
3868 			/*
3869 			 * Malloc failure in alloc_ccb
3870 			 */
3871 			/*
3872 			 * XXX add us to a list to be run from free_ccb
3873 			 * if we don't have any ccbs active on this
3874 			 * device queue otherwise we may never get run
3875 			 * again.
3876 			 */
3877 			break;
3878 		}
3879 
3880 		/* Raise IPL for possible insertion and test at top of loop */
3881 		s = splsoftcam();
3882 
3883 		if (drvq->entries > 0) {
3884 			/* We have more work.  Attempt to reschedule */
3885 			xpt_schedule_dev_allocq(bus, device);
3886 		}
3887 	}
3888 	devq->alloc_queue.qfrozen_cnt--;
3889 	splx(s);
3890 }
3891 
3892 static void
3893 xpt_run_dev_sendq(struct cam_eb *bus)
3894 {
3895 	struct	cam_devq *devq;
3896 	int	s;
3897 
3898 	CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_dev_sendq\n"));
3899 
3900 	devq = bus->sim->devq;
3901 
3902 	s = splcam();
3903 	devq->send_queue.qfrozen_cnt++;
3904 	splx(s);
3905 	s = splsoftcam();
3906 	while ((devq->send_queue.entries > 0)
3907 	    && (devq->send_openings > 0)) {
3908 		struct	cam_ed_qinfo *qinfo;
3909 		struct	cam_ed *device;
3910 		union ccb *work_ccb;
3911 		struct	cam_sim *sim;
3912 		int	ospl;
3913 
3914 		ospl = splcam();
3915 	    	if (devq->send_queue.qfrozen_cnt > 1) {
3916 			splx(ospl);
3917 			break;
3918 		}
3919 
3920 		qinfo = (struct cam_ed_qinfo *)camq_remove(&devq->send_queue,
3921 							   CAMQ_HEAD);
3922 		device = qinfo->device;
3923 
3924 		/*
3925 		 * If the device has been "frozen", don't attempt
3926 		 * to run it.
3927 		 */
3928 		if (device->qfrozen_cnt > 0) {
3929 			splx(ospl);
3930 			continue;
3931 		}
3932 
3933 		CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3934 				("running device %p\n", device));
3935 
3936 		work_ccb = cam_ccbq_peek_ccb(&device->ccbq, CAMQ_HEAD);
3937 		if (work_ccb == NULL) {
3938 			printf("device on run queue with no ccbs???\n");
3939 			splx(ospl);
3940 			continue;
3941 		}
3942 
3943 		if ((work_ccb->ccb_h.flags & CAM_HIGH_POWER) != 0) {
3944 
3945 		 	if (num_highpower <= 0) {
3946 				/*
3947 				 * We got a high power command, but we
3948 				 * don't have any available slots.  Freeze
3949 				 * the device queue until we have a slot
3950 				 * available.
3951 				 */
3952 				device->qfrozen_cnt++;
3953 				STAILQ_INSERT_TAIL(&highpowerq,
3954 						   &work_ccb->ccb_h,
3955 						   xpt_links.stqe);
3956 
3957 				splx(ospl);
3958 				continue;
3959 			} else {
3960 				/*
3961 				 * Consume a high power slot while
3962 				 * this ccb runs.
3963 				 */
3964 				num_highpower--;
3965 			}
3966 		}
3967 		devq->active_dev = device;
3968 		cam_ccbq_remove_ccb(&device->ccbq, work_ccb);
3969 
3970 		cam_ccbq_send_ccb(&device->ccbq, work_ccb);
3971 		splx(ospl);
3972 
3973 		devq->send_openings--;
3974 		devq->send_active++;
3975 
3976 		if (device->ccbq.queue.entries > 0)
3977 			xpt_schedule_dev_sendq(bus, device);
3978 
3979 		if (work_ccb && (work_ccb->ccb_h.flags & CAM_DEV_QFREEZE) != 0){
3980 			/*
3981 			 * The client wants to freeze the queue
3982 			 * after this CCB is sent.
3983 			 */
3984 			ospl = splcam();
3985 			device->qfrozen_cnt++;
3986 			splx(ospl);
3987 		}
3988 
3989 		splx(s);
3990 
3991 		/* In Target mode, the peripheral driver knows best... */
3992 		if (work_ccb->ccb_h.func_code == XPT_SCSI_IO) {
3993 			if ((device->inq_flags & SID_CmdQue) != 0
3994 			 && work_ccb->csio.tag_action != CAM_TAG_ACTION_NONE)
3995 				work_ccb->ccb_h.flags |= CAM_TAG_ACTION_VALID;
3996 			else
3997 				/*
3998 				 * Clear this in case of a retried CCB that
3999 				 * failed due to a rejected tag.
4000 				 */
4001 				work_ccb->ccb_h.flags &= ~CAM_TAG_ACTION_VALID;
4002 		}
4003 
4004 		/*
4005 		 * Device queues can be shared among multiple sim instances
4006 		 * that reside on different busses.  Use the SIM in the queue
4007 		 * CCB's path, rather than the one in the bus that was passed
4008 		 * into this function.
4009 		 */
4010 		sim = work_ccb->ccb_h.path->bus->sim;
4011 		(*(sim->sim_action))(sim, work_ccb);
4012 
4013 		ospl = splcam();
4014 		devq->active_dev = NULL;
4015 		splx(ospl);
4016 		/* Raise IPL for possible insertion and test at top of loop */
4017 		s = splsoftcam();
4018 	}
4019 	splx(s);
4020 	s = splcam();
4021 	devq->send_queue.qfrozen_cnt--;
4022 	splx(s);
4023 }
4024 
4025 /*
4026  * This function merges stuff from the slave ccb into the master ccb, while
4027  * keeping important fields in the master ccb constant.
4028  */
4029 void
4030 xpt_merge_ccb(union ccb *master_ccb, union ccb *slave_ccb)
4031 {
4032 	GIANT_REQUIRED;
4033 
4034 	/*
4035 	 * Pull fields that are valid for peripheral drivers to set
4036 	 * into the master CCB along with the CCB "payload".
4037 	 */
4038 	master_ccb->ccb_h.retry_count = slave_ccb->ccb_h.retry_count;
4039 	master_ccb->ccb_h.func_code = slave_ccb->ccb_h.func_code;
4040 	master_ccb->ccb_h.timeout = slave_ccb->ccb_h.timeout;
4041 	master_ccb->ccb_h.flags = slave_ccb->ccb_h.flags;
4042 	bcopy(&(&slave_ccb->ccb_h)[1], &(&master_ccb->ccb_h)[1],
4043 	      sizeof(union ccb) - sizeof(struct ccb_hdr));
4044 }
4045 
4046 void
4047 xpt_setup_ccb(struct ccb_hdr *ccb_h, struct cam_path *path, u_int32_t priority)
4048 {
4049 	GIANT_REQUIRED;
4050 
4051 	CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_setup_ccb\n"));
4052 	ccb_h->pinfo.priority = priority;
4053 	ccb_h->path = path;
4054 	ccb_h->path_id = path->bus->path_id;
4055 	if (path->target)
4056 		ccb_h->target_id = path->target->target_id;
4057 	else
4058 		ccb_h->target_id = CAM_TARGET_WILDCARD;
4059 	if (path->device) {
4060 		ccb_h->target_lun = path->device->lun_id;
4061 		ccb_h->pinfo.generation = ++path->device->ccbq.queue.generation;
4062 	} else {
4063 		ccb_h->target_lun = CAM_TARGET_WILDCARD;
4064 	}
4065 	ccb_h->pinfo.index = CAM_UNQUEUED_INDEX;
4066 	ccb_h->flags = 0;
4067 }
4068 
4069 /* Path manipulation functions */
4070 cam_status
4071 xpt_create_path(struct cam_path **new_path_ptr, struct cam_periph *perph,
4072 		path_id_t path_id, target_id_t target_id, lun_id_t lun_id)
4073 {
4074 	struct	   cam_path *path;
4075 	cam_status status;
4076 
4077 	GIANT_REQUIRED;
4078 
4079 	path = (struct cam_path *)malloc(sizeof(*path), M_CAMXPT, M_NOWAIT);
4080 
4081 	if (path == NULL) {
4082 		status = CAM_RESRC_UNAVAIL;
4083 		return(status);
4084 	}
4085 	status = xpt_compile_path(path, perph, path_id, target_id, lun_id);
4086 	if (status != CAM_REQ_CMP) {
4087 		free(path, M_CAMXPT);
4088 		path = NULL;
4089 	}
4090 	*new_path_ptr = path;
4091 	return (status);
4092 }
4093 
4094 static cam_status
4095 xpt_compile_path(struct cam_path *new_path, struct cam_periph *perph,
4096 		 path_id_t path_id, target_id_t target_id, lun_id_t lun_id)
4097 {
4098 	struct	     cam_eb *bus;
4099 	struct	     cam_et *target;
4100 	struct	     cam_ed *device;
4101 	cam_status   status;
4102 	int	     s;
4103 
4104 	status = CAM_REQ_CMP;	/* Completed without error */
4105 	target = NULL;		/* Wildcarded */
4106 	device = NULL;		/* Wildcarded */
4107 
4108 	/*
4109 	 * We will potentially modify the EDT, so block interrupts
4110 	 * that may attempt to create cam paths.
4111 	 */
4112 	s = splcam();
4113 	bus = xpt_find_bus(path_id);
4114 	if (bus == NULL) {
4115 		status = CAM_PATH_INVALID;
4116 	} else {
4117 		target = xpt_find_target(bus, target_id);
4118 		if (target == NULL) {
4119 			/* Create one */
4120 			struct cam_et *new_target;
4121 
4122 			new_target = xpt_alloc_target(bus, target_id);
4123 			if (new_target == NULL) {
4124 				status = CAM_RESRC_UNAVAIL;
4125 			} else {
4126 				target = new_target;
4127 			}
4128 		}
4129 		if (target != NULL) {
4130 			device = xpt_find_device(target, lun_id);
4131 			if (device == NULL) {
4132 				/* Create one */
4133 				struct cam_ed *new_device;
4134 
4135 				new_device = xpt_alloc_device(bus,
4136 							      target,
4137 							      lun_id);
4138 				if (new_device == NULL) {
4139 					status = CAM_RESRC_UNAVAIL;
4140 				} else {
4141 					device = new_device;
4142 				}
4143 			}
4144 		}
4145 	}
4146 	splx(s);
4147 
4148 	/*
4149 	 * Only touch the user's data if we are successful.
4150 	 */
4151 	if (status == CAM_REQ_CMP) {
4152 		new_path->periph = perph;
4153 		new_path->bus = bus;
4154 		new_path->target = target;
4155 		new_path->device = device;
4156 		CAM_DEBUG(new_path, CAM_DEBUG_TRACE, ("xpt_compile_path\n"));
4157 	} else {
4158 		if (device != NULL)
4159 			xpt_release_device(bus, target, device);
4160 		if (target != NULL)
4161 			xpt_release_target(bus, target);
4162 		if (bus != NULL)
4163 			xpt_release_bus(bus);
4164 	}
4165 	return (status);
4166 }
4167 
4168 static void
4169 xpt_release_path(struct cam_path *path)
4170 {
4171 	CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_release_path\n"));
4172 	if (path->device != NULL) {
4173 		xpt_release_device(path->bus, path->target, path->device);
4174 		path->device = NULL;
4175 	}
4176 	if (path->target != NULL) {
4177 		xpt_release_target(path->bus, path->target);
4178 		path->target = NULL;
4179 	}
4180 	if (path->bus != NULL) {
4181 		xpt_release_bus(path->bus);
4182 		path->bus = NULL;
4183 	}
4184 }
4185 
4186 void
4187 xpt_free_path(struct cam_path *path)
4188 {
4189 	GIANT_REQUIRED;
4190 
4191 	CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_free_path\n"));
4192 	xpt_release_path(path);
4193 	free(path, M_CAMXPT);
4194 }
4195 
4196 
4197 /*
4198  * Return -1 for failure, 0 for exact match, 1 for match with wildcards
4199  * in path1, 2 for match with wildcards in path2.
4200  */
4201 int
4202 xpt_path_comp(struct cam_path *path1, struct cam_path *path2)
4203 {
4204 	GIANT_REQUIRED;
4205 
4206 	int retval = 0;
4207 
4208 	if (path1->bus != path2->bus) {
4209 		if (path1->bus->path_id == CAM_BUS_WILDCARD)
4210 			retval = 1;
4211 		else if (path2->bus->path_id == CAM_BUS_WILDCARD)
4212 			retval = 2;
4213 		else
4214 			return (-1);
4215 	}
4216 	if (path1->target != path2->target) {
4217 		if (path1->target->target_id == CAM_TARGET_WILDCARD) {
4218 			if (retval == 0)
4219 				retval = 1;
4220 		} else if (path2->target->target_id == CAM_TARGET_WILDCARD)
4221 			retval = 2;
4222 		else
4223 			return (-1);
4224 	}
4225 	if (path1->device != path2->device) {
4226 		if (path1->device->lun_id == CAM_LUN_WILDCARD) {
4227 			if (retval == 0)
4228 				retval = 1;
4229 		} else if (path2->device->lun_id == CAM_LUN_WILDCARD)
4230 			retval = 2;
4231 		else
4232 			return (-1);
4233 	}
4234 	return (retval);
4235 }
4236 
4237 void
4238 xpt_print_path(struct cam_path *path)
4239 {
4240 	GIANT_REQUIRED;
4241 
4242 	if (path == NULL)
4243 		printf("(nopath): ");
4244 	else {
4245 		if (path->periph != NULL)
4246 			printf("(%s%d:", path->periph->periph_name,
4247 			       path->periph->unit_number);
4248 		else
4249 			printf("(noperiph:");
4250 
4251 		if (path->bus != NULL)
4252 			printf("%s%d:%d:", path->bus->sim->sim_name,
4253 			       path->bus->sim->unit_number,
4254 			       path->bus->sim->bus_id);
4255 		else
4256 			printf("nobus:");
4257 
4258 		if (path->target != NULL)
4259 			printf("%d:", path->target->target_id);
4260 		else
4261 			printf("X:");
4262 
4263 		if (path->device != NULL)
4264 			printf("%d): ", path->device->lun_id);
4265 		else
4266 			printf("X): ");
4267 	}
4268 }
4269 
4270 int
4271 xpt_path_string(struct cam_path *path, char *str, size_t str_len)
4272 {
4273 	struct sbuf sb;
4274 
4275 	GIANT_REQUIRED;
4276 
4277 	sbuf_new(&sb, str, str_len, 0);
4278 
4279 	if (path == NULL)
4280 		sbuf_printf(&sb, "(nopath): ");
4281 	else {
4282 		if (path->periph != NULL)
4283 			sbuf_printf(&sb, "(%s%d:", path->periph->periph_name,
4284 				    path->periph->unit_number);
4285 		else
4286 			sbuf_printf(&sb, "(noperiph:");
4287 
4288 		if (path->bus != NULL)
4289 			sbuf_printf(&sb, "%s%d:%d:", path->bus->sim->sim_name,
4290 				    path->bus->sim->unit_number,
4291 				    path->bus->sim->bus_id);
4292 		else
4293 			sbuf_printf(&sb, "nobus:");
4294 
4295 		if (path->target != NULL)
4296 			sbuf_printf(&sb, "%d:", path->target->target_id);
4297 		else
4298 			sbuf_printf(&sb, "X:");
4299 
4300 		if (path->device != NULL)
4301 			sbuf_printf(&sb, "%d): ", path->device->lun_id);
4302 		else
4303 			sbuf_printf(&sb, "X): ");
4304 	}
4305 	sbuf_finish(&sb);
4306 
4307 	return(sbuf_len(&sb));
4308 }
4309 
4310 path_id_t
4311 xpt_path_path_id(struct cam_path *path)
4312 {
4313 	GIANT_REQUIRED;
4314 
4315 	return(path->bus->path_id);
4316 }
4317 
4318 target_id_t
4319 xpt_path_target_id(struct cam_path *path)
4320 {
4321 	GIANT_REQUIRED;
4322 
4323 	if (path->target != NULL)
4324 		return (path->target->target_id);
4325 	else
4326 		return (CAM_TARGET_WILDCARD);
4327 }
4328 
4329 lun_id_t
4330 xpt_path_lun_id(struct cam_path *path)
4331 {
4332 	GIANT_REQUIRED;
4333 
4334 	if (path->device != NULL)
4335 		return (path->device->lun_id);
4336 	else
4337 		return (CAM_LUN_WILDCARD);
4338 }
4339 
4340 struct cam_sim *
4341 xpt_path_sim(struct cam_path *path)
4342 {
4343 	GIANT_REQUIRED;
4344 
4345 	return (path->bus->sim);
4346 }
4347 
4348 struct cam_periph*
4349 xpt_path_periph(struct cam_path *path)
4350 {
4351 	GIANT_REQUIRED;
4352 
4353 	return (path->periph);
4354 }
4355 
4356 /*
4357  * Release a CAM control block for the caller.  Remit the cost of the structure
4358  * to the device referenced by the path.  If the this device had no 'credits'
4359  * and peripheral drivers have registered async callbacks for this notification
4360  * call them now.
4361  */
4362 void
4363 xpt_release_ccb(union ccb *free_ccb)
4364 {
4365 	int	 s;
4366 	struct	 cam_path *path;
4367 	struct	 cam_ed *device;
4368 	struct	 cam_eb *bus;
4369 
4370 	GIANT_REQUIRED;
4371 
4372 	CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_release_ccb\n"));
4373 	path = free_ccb->ccb_h.path;
4374 	device = path->device;
4375 	bus = path->bus;
4376 	s = splsoftcam();
4377 	cam_ccbq_release_opening(&device->ccbq);
4378 	if (xpt_ccb_count > xpt_max_ccbs) {
4379 		xpt_free_ccb(free_ccb);
4380 		xpt_ccb_count--;
4381 	} else {
4382 		SLIST_INSERT_HEAD(&ccb_freeq, &free_ccb->ccb_h, xpt_links.sle);
4383 	}
4384 	if (bus->sim->devq == NULL) {
4385 		splx(s);
4386 		return;
4387 	}
4388 	bus->sim->devq->alloc_openings++;
4389 	bus->sim->devq->alloc_active--;
4390 	/* XXX Turn this into an inline function - xpt_run_device?? */
4391 	if ((device_is_alloc_queued(device) == 0)
4392 	 && (device->drvq.entries > 0)) {
4393 		xpt_schedule_dev_allocq(bus, device);
4394 	}
4395 	splx(s);
4396 	if (dev_allocq_is_runnable(bus->sim->devq))
4397 		xpt_run_dev_allocq(bus);
4398 }
4399 
4400 /* Functions accessed by SIM drivers */
4401 
4402 /*
4403  * A sim structure, listing the SIM entry points and instance
4404  * identification info is passed to xpt_bus_register to hook the SIM
4405  * into the CAM framework.  xpt_bus_register creates a cam_eb entry
4406  * for this new bus and places it in the array of busses and assigns
4407  * it a path_id.  The path_id may be influenced by "hard wiring"
4408  * information specified by the user.  Once interrupt services are
4409  * availible, the bus will be probed.
4410  */
4411 int32_t
4412 xpt_bus_register(struct cam_sim *sim, u_int32_t bus)
4413 {
4414 	struct cam_eb *new_bus;
4415 	struct cam_eb *old_bus;
4416 	struct ccb_pathinq cpi;
4417 	int s;
4418 
4419 	GIANT_REQUIRED;
4420 
4421 	sim->bus_id = bus;
4422 	new_bus = (struct cam_eb *)malloc(sizeof(*new_bus),
4423 					  M_CAMXPT, M_NOWAIT);
4424 	if (new_bus == NULL) {
4425 		/* Couldn't satisfy request */
4426 		return (CAM_RESRC_UNAVAIL);
4427 	}
4428 
4429 	if (strcmp(sim->sim_name, "xpt") != 0) {
4430 
4431 		sim->path_id =
4432 		    xptpathid(sim->sim_name, sim->unit_number, sim->bus_id);
4433 	}
4434 
4435 	TAILQ_INIT(&new_bus->et_entries);
4436 	new_bus->path_id = sim->path_id;
4437 	new_bus->sim = sim;
4438 	timevalclear(&new_bus->last_reset);
4439 	new_bus->flags = 0;
4440 	new_bus->refcount = 1;	/* Held until a bus_deregister event */
4441 	new_bus->generation = 0;
4442 	s = splcam();
4443 	old_bus = TAILQ_FIRST(&xpt_busses);
4444 	while (old_bus != NULL
4445 	    && old_bus->path_id < new_bus->path_id)
4446 		old_bus = TAILQ_NEXT(old_bus, links);
4447 	if (old_bus != NULL)
4448 		TAILQ_INSERT_BEFORE(old_bus, new_bus, links);
4449 	else
4450 		TAILQ_INSERT_TAIL(&xpt_busses, new_bus, links);
4451 	bus_generation++;
4452 	splx(s);
4453 
4454 	/* Notify interested parties */
4455 	if (sim->path_id != CAM_XPT_PATH_ID) {
4456 		struct cam_path path;
4457 
4458 		xpt_compile_path(&path, /*periph*/NULL, sim->path_id,
4459 			         CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
4460 		xpt_setup_ccb(&cpi.ccb_h, &path, /*priority*/1);
4461 		cpi.ccb_h.func_code = XPT_PATH_INQ;
4462 		xpt_action((union ccb *)&cpi);
4463 		xpt_async(AC_PATH_REGISTERED, &path, &cpi);
4464 		xpt_release_path(&path);
4465 	}
4466 	return (CAM_SUCCESS);
4467 }
4468 
4469 int32_t
4470 xpt_bus_deregister(path_id_t pathid)
4471 {
4472 	struct cam_path bus_path;
4473 	struct cam_ed *device;
4474 	struct cam_ed_qinfo *qinfo;
4475 	struct cam_devq *devq;
4476 	struct cam_periph *periph;
4477 	struct cam_sim *ccbsim;
4478 	union ccb *work_ccb;
4479 	cam_status status;
4480 
4481 	GIANT_REQUIRED;
4482 
4483 	status = xpt_compile_path(&bus_path, NULL, pathid,
4484 				  CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
4485 	if (status != CAM_REQ_CMP)
4486 		return (status);
4487 
4488 	xpt_async(AC_LOST_DEVICE, &bus_path, NULL);
4489 	xpt_async(AC_PATH_DEREGISTERED, &bus_path, NULL);
4490 
4491 	/* The SIM may be gone, so use a dummy SIM for any stray operations. */
4492 	devq = bus_path.bus->sim->devq;
4493 	bus_path.bus->sim = &cam_dead_sim;
4494 
4495 	/* Execute any pending operations now. */
4496 	while ((qinfo = (struct cam_ed_qinfo *)camq_remove(&devq->send_queue,
4497 	    CAMQ_HEAD)) != NULL ||
4498 	    (qinfo = (struct cam_ed_qinfo *)camq_remove(&devq->alloc_queue,
4499 	    CAMQ_HEAD)) != NULL) {
4500 		do {
4501 			device = qinfo->device;
4502 			work_ccb = cam_ccbq_peek_ccb(&device->ccbq, CAMQ_HEAD);
4503 			if (work_ccb != NULL) {
4504 				devq->active_dev = device;
4505 				cam_ccbq_remove_ccb(&device->ccbq, work_ccb);
4506 				cam_ccbq_send_ccb(&device->ccbq, work_ccb);
4507 				ccbsim = work_ccb->ccb_h.path->bus->sim;
4508 				(*(ccbsim->sim_action))(ccbsim, work_ccb);
4509 			}
4510 
4511 			periph = (struct cam_periph *)camq_remove(&device->drvq,
4512 			    CAMQ_HEAD);
4513 			if (periph != NULL)
4514 				xpt_schedule(periph, periph->pinfo.priority);
4515 		} while (work_ccb != NULL || periph != NULL);
4516 	}
4517 
4518 	/* Make sure all completed CCBs are processed. */
4519 	while (!TAILQ_EMPTY(&cam_bioq)) {
4520 		camisr(&cam_bioq);
4521 
4522 		/* Repeat the async's for the benefit of any new devices. */
4523 		xpt_async(AC_LOST_DEVICE, &bus_path, NULL);
4524 		xpt_async(AC_PATH_DEREGISTERED, &bus_path, NULL);
4525 	}
4526 
4527 	/* Release the reference count held while registered. */
4528 	xpt_release_bus(bus_path.bus);
4529 	xpt_release_path(&bus_path);
4530 
4531 	/* Recheck for more completed CCBs. */
4532 	while (!TAILQ_EMPTY(&cam_bioq))
4533 		camisr(&cam_bioq);
4534 
4535 	return (CAM_REQ_CMP);
4536 }
4537 
4538 static path_id_t
4539 xptnextfreepathid(void)
4540 {
4541 	struct cam_eb *bus;
4542 	path_id_t pathid;
4543 	const char *strval;
4544 
4545 	pathid = 0;
4546 	bus = TAILQ_FIRST(&xpt_busses);
4547 retry:
4548 	/* Find an unoccupied pathid */
4549 	while (bus != NULL
4550 	    && bus->path_id <= pathid) {
4551 		if (bus->path_id == pathid)
4552 			pathid++;
4553 		bus = TAILQ_NEXT(bus, links);
4554 	}
4555 
4556 	/*
4557 	 * Ensure that this pathid is not reserved for
4558 	 * a bus that may be registered in the future.
4559 	 */
4560 	if (resource_string_value("scbus", pathid, "at", &strval) == 0) {
4561 		++pathid;
4562 		/* Start the search over */
4563 		goto retry;
4564 	}
4565 	return (pathid);
4566 }
4567 
4568 static path_id_t
4569 xptpathid(const char *sim_name, int sim_unit, int sim_bus)
4570 {
4571 	path_id_t pathid;
4572 	int i, dunit, val;
4573 	char buf[32];
4574 	const char *dname;
4575 
4576 	pathid = CAM_XPT_PATH_ID;
4577 	snprintf(buf, sizeof(buf), "%s%d", sim_name, sim_unit);
4578 	i = 0;
4579 	while ((resource_find_match(&i, &dname, &dunit, "at", buf)) == 0) {
4580 		if (strcmp(dname, "scbus")) {
4581 			/* Avoid a bit of foot shooting. */
4582 			continue;
4583 		}
4584 		if (dunit < 0)		/* unwired?! */
4585 			continue;
4586 		if (resource_int_value("scbus", dunit, "bus", &val) == 0) {
4587 			if (sim_bus == val) {
4588 				pathid = dunit;
4589 				break;
4590 			}
4591 		} else if (sim_bus == 0) {
4592 			/* Unspecified matches bus 0 */
4593 			pathid = dunit;
4594 			break;
4595 		} else {
4596 			printf("Ambiguous scbus configuration for %s%d "
4597 			       "bus %d, cannot wire down.  The kernel "
4598 			       "config entry for scbus%d should "
4599 			       "specify a controller bus.\n"
4600 			       "Scbus will be assigned dynamically.\n",
4601 			       sim_name, sim_unit, sim_bus, dunit);
4602 			break;
4603 		}
4604 	}
4605 
4606 	if (pathid == CAM_XPT_PATH_ID)
4607 		pathid = xptnextfreepathid();
4608 	return (pathid);
4609 }
4610 
4611 void
4612 xpt_async(u_int32_t async_code, struct cam_path *path, void *async_arg)
4613 {
4614 	struct cam_eb *bus;
4615 	struct cam_et *target, *next_target;
4616 	struct cam_ed *device, *next_device;
4617 	int s;
4618 
4619 	GIANT_REQUIRED;
4620 
4621 	CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_async\n"));
4622 
4623 	/*
4624 	 * Most async events come from a CAM interrupt context.  In
4625 	 * a few cases, the error recovery code at the peripheral layer,
4626 	 * which may run from our SWI or a process context, may signal
4627 	 * deferred events with a call to xpt_async. Ensure async
4628 	 * notifications are serialized by blocking cam interrupts.
4629 	 */
4630 	s = splcam();
4631 
4632 	bus = path->bus;
4633 
4634 	if (async_code == AC_BUS_RESET) {
4635 		int s;
4636 
4637 		s = splclock();
4638 		/* Update our notion of when the last reset occurred */
4639 		microtime(&bus->last_reset);
4640 		splx(s);
4641 	}
4642 
4643 	for (target = TAILQ_FIRST(&bus->et_entries);
4644 	     target != NULL;
4645 	     target = next_target) {
4646 
4647 		next_target = TAILQ_NEXT(target, links);
4648 
4649 		if (path->target != target
4650 		 && path->target->target_id != CAM_TARGET_WILDCARD
4651 		 && target->target_id != CAM_TARGET_WILDCARD)
4652 			continue;
4653 
4654 		if (async_code == AC_SENT_BDR) {
4655 			int s;
4656 
4657 			/* Update our notion of when the last reset occurred */
4658 			s = splclock();
4659 			microtime(&path->target->last_reset);
4660 			splx(s);
4661 		}
4662 
4663 		for (device = TAILQ_FIRST(&target->ed_entries);
4664 		     device != NULL;
4665 		     device = next_device) {
4666 
4667 			next_device = TAILQ_NEXT(device, links);
4668 
4669 			if (path->device != device
4670 			 && path->device->lun_id != CAM_LUN_WILDCARD
4671 			 && device->lun_id != CAM_LUN_WILDCARD)
4672 				continue;
4673 
4674 			xpt_dev_async(async_code, bus, target,
4675 				      device, async_arg);
4676 
4677 			xpt_async_bcast(&device->asyncs, async_code,
4678 					path, async_arg);
4679 		}
4680 	}
4681 
4682 	/*
4683 	 * If this wasn't a fully wildcarded async, tell all
4684 	 * clients that want all async events.
4685 	 */
4686 	if (bus != xpt_periph->path->bus)
4687 		xpt_async_bcast(&xpt_periph->path->device->asyncs, async_code,
4688 				path, async_arg);
4689 	splx(s);
4690 }
4691 
4692 static void
4693 xpt_async_bcast(struct async_list *async_head,
4694 		u_int32_t async_code,
4695 		struct cam_path *path, void *async_arg)
4696 {
4697 	struct async_node *cur_entry;
4698 
4699 	cur_entry = SLIST_FIRST(async_head);
4700 	while (cur_entry != NULL) {
4701 		struct async_node *next_entry;
4702 		/*
4703 		 * Grab the next list entry before we call the current
4704 		 * entry's callback.  This is because the callback function
4705 		 * can delete its async callback entry.
4706 		 */
4707 		next_entry = SLIST_NEXT(cur_entry, links);
4708 		if ((cur_entry->event_enable & async_code) != 0)
4709 			cur_entry->callback(cur_entry->callback_arg,
4710 					    async_code, path,
4711 					    async_arg);
4712 		cur_entry = next_entry;
4713 	}
4714 }
4715 
4716 /*
4717  * Handle any per-device event notifications that require action by the XPT.
4718  */
4719 static void
4720 xpt_dev_async(u_int32_t async_code, struct cam_eb *bus, struct cam_et *target,
4721 	      struct cam_ed *device, void *async_arg)
4722 {
4723 	cam_status status;
4724 	struct cam_path newpath;
4725 
4726 	/*
4727 	 * We only need to handle events for real devices.
4728 	 */
4729 	if (target->target_id == CAM_TARGET_WILDCARD
4730 	 || device->lun_id == CAM_LUN_WILDCARD)
4731 		return;
4732 
4733 	/*
4734 	 * We need our own path with wildcards expanded to
4735 	 * handle certain types of events.
4736 	 */
4737 	if ((async_code == AC_SENT_BDR)
4738 	 || (async_code == AC_BUS_RESET)
4739 	 || (async_code == AC_INQ_CHANGED))
4740 		status = xpt_compile_path(&newpath, NULL,
4741 					  bus->path_id,
4742 					  target->target_id,
4743 					  device->lun_id);
4744 	else
4745 		status = CAM_REQ_CMP_ERR;
4746 
4747 	if (status == CAM_REQ_CMP) {
4748 
4749 		/*
4750 		 * Allow transfer negotiation to occur in a
4751 		 * tag free environment.
4752 		 */
4753 		if (async_code == AC_SENT_BDR
4754 		 || async_code == AC_BUS_RESET)
4755 			xpt_toggle_tags(&newpath);
4756 
4757 		if (async_code == AC_INQ_CHANGED) {
4758 			/*
4759 			 * We've sent a start unit command, or
4760 			 * something similar to a device that
4761 			 * may have caused its inquiry data to
4762 			 * change. So we re-scan the device to
4763 			 * refresh the inquiry data for it.
4764 			 */
4765 			xpt_scan_lun(newpath.periph, &newpath,
4766 				     CAM_EXPECT_INQ_CHANGE, NULL);
4767 		}
4768 		xpt_release_path(&newpath);
4769 	} else if (async_code == AC_LOST_DEVICE) {
4770 		device->flags |= CAM_DEV_UNCONFIGURED;
4771 	} else if (async_code == AC_TRANSFER_NEG) {
4772 		struct ccb_trans_settings *settings;
4773 
4774 		settings = (struct ccb_trans_settings *)async_arg;
4775 		xpt_set_transfer_settings(settings, device,
4776 					  /*async_update*/TRUE);
4777 	}
4778 }
4779 
4780 u_int32_t
4781 xpt_freeze_devq(struct cam_path *path, u_int count)
4782 {
4783 	int s;
4784 	struct ccb_hdr *ccbh;
4785 
4786 	GIANT_REQUIRED;
4787 
4788 	s = splcam();
4789 	path->device->qfrozen_cnt += count;
4790 
4791 	/*
4792 	 * Mark the last CCB in the queue as needing
4793 	 * to be requeued if the driver hasn't
4794 	 * changed it's state yet.  This fixes a race
4795 	 * where a ccb is just about to be queued to
4796 	 * a controller driver when it's interrupt routine
4797 	 * freezes the queue.  To completly close the
4798 	 * hole, controller drives must check to see
4799 	 * if a ccb's status is still CAM_REQ_INPROG
4800 	 * under spl protection just before they queue
4801 	 * the CCB.  See ahc_action/ahc_freeze_devq for
4802 	 * an example.
4803 	 */
4804 	ccbh = TAILQ_LAST(&path->device->ccbq.active_ccbs, ccb_hdr_tailq);
4805 	if (ccbh && ccbh->status == CAM_REQ_INPROG)
4806 		ccbh->status = CAM_REQUEUE_REQ;
4807 	splx(s);
4808 	return (path->device->qfrozen_cnt);
4809 }
4810 
4811 u_int32_t
4812 xpt_freeze_simq(struct cam_sim *sim, u_int count)
4813 {
4814 	GIANT_REQUIRED;
4815 
4816 	sim->devq->send_queue.qfrozen_cnt += count;
4817 	if (sim->devq->active_dev != NULL) {
4818 		struct ccb_hdr *ccbh;
4819 
4820 		ccbh = TAILQ_LAST(&sim->devq->active_dev->ccbq.active_ccbs,
4821 				  ccb_hdr_tailq);
4822 		if (ccbh && ccbh->status == CAM_REQ_INPROG)
4823 			ccbh->status = CAM_REQUEUE_REQ;
4824 	}
4825 	return (sim->devq->send_queue.qfrozen_cnt);
4826 }
4827 
4828 static void
4829 xpt_release_devq_timeout(void *arg)
4830 {
4831 	struct cam_ed *device;
4832 
4833 	device = (struct cam_ed *)arg;
4834 
4835 	xpt_release_devq_device(device, /*count*/1, /*run_queue*/TRUE);
4836 }
4837 
4838 void
4839 xpt_release_devq(struct cam_path *path, u_int count, int run_queue)
4840 {
4841 	GIANT_REQUIRED;
4842 
4843 	xpt_release_devq_device(path->device, count, run_queue);
4844 }
4845 
4846 static void
4847 xpt_release_devq_device(struct cam_ed *dev, u_int count, int run_queue)
4848 {
4849 	int	rundevq;
4850 	int	s0, s1;
4851 
4852 	rundevq = 0;
4853 	s0 = splsoftcam();
4854 	s1 = splcam();
4855 	if (dev->qfrozen_cnt > 0) {
4856 
4857 		count = (count > dev->qfrozen_cnt) ? dev->qfrozen_cnt : count;
4858 		dev->qfrozen_cnt -= count;
4859 		if (dev->qfrozen_cnt == 0) {
4860 
4861 			/*
4862 			 * No longer need to wait for a successful
4863 			 * command completion.
4864 			 */
4865 			dev->flags &= ~CAM_DEV_REL_ON_COMPLETE;
4866 
4867 			/*
4868 			 * Remove any timeouts that might be scheduled
4869 			 * to release this queue.
4870 			 */
4871 			if ((dev->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) {
4872 				untimeout(xpt_release_devq_timeout, dev,
4873 					  dev->c_handle);
4874 				dev->flags &= ~CAM_DEV_REL_TIMEOUT_PENDING;
4875 			}
4876 
4877 			/*
4878 			 * Now that we are unfrozen schedule the
4879 			 * device so any pending transactions are
4880 			 * run.
4881 			 */
4882 			if ((dev->ccbq.queue.entries > 0)
4883 			 && (xpt_schedule_dev_sendq(dev->target->bus, dev))
4884 			 && (run_queue != 0)) {
4885 				rundevq = 1;
4886 			}
4887 		}
4888 	}
4889 	splx(s1);
4890 	if (rundevq != 0)
4891 		xpt_run_dev_sendq(dev->target->bus);
4892 	splx(s0);
4893 }
4894 
4895 void
4896 xpt_release_simq(struct cam_sim *sim, int run_queue)
4897 {
4898 	int	s;
4899 	struct	camq *sendq;
4900 
4901 	GIANT_REQUIRED;
4902 
4903 	sendq = &(sim->devq->send_queue);
4904 	s = splcam();
4905 	if (sendq->qfrozen_cnt > 0) {
4906 
4907 		sendq->qfrozen_cnt--;
4908 		if (sendq->qfrozen_cnt == 0) {
4909 			struct cam_eb *bus;
4910 
4911 			/*
4912 			 * If there is a timeout scheduled to release this
4913 			 * sim queue, remove it.  The queue frozen count is
4914 			 * already at 0.
4915 			 */
4916 			if ((sim->flags & CAM_SIM_REL_TIMEOUT_PENDING) != 0){
4917 				untimeout(xpt_release_simq_timeout, sim,
4918 					  sim->c_handle);
4919 				sim->flags &= ~CAM_SIM_REL_TIMEOUT_PENDING;
4920 			}
4921 			bus = xpt_find_bus(sim->path_id);
4922 			splx(s);
4923 
4924 			if (run_queue) {
4925 				/*
4926 				 * Now that we are unfrozen run the send queue.
4927 				 */
4928 				xpt_run_dev_sendq(bus);
4929 			}
4930 			xpt_release_bus(bus);
4931 		} else
4932 			splx(s);
4933 	} else
4934 		splx(s);
4935 }
4936 
4937 static void
4938 xpt_release_simq_timeout(void *arg)
4939 {
4940 	struct cam_sim *sim;
4941 
4942 	sim = (struct cam_sim *)arg;
4943 	xpt_release_simq(sim, /* run_queue */ TRUE);
4944 }
4945 
4946 void
4947 xpt_done(union ccb *done_ccb)
4948 {
4949 	int s;
4950 
4951 	s = splcam();
4952 
4953 	CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xpt_done\n"));
4954 	if ((done_ccb->ccb_h.func_code & XPT_FC_QUEUED) != 0) {
4955 		/*
4956 		 * Queue up the request for handling by our SWI handler
4957 		 * any of the "non-immediate" type of ccbs.
4958 		 */
4959 		switch (done_ccb->ccb_h.path->periph->type) {
4960 		case CAM_PERIPH_BIO:
4961 			mtx_lock(&cam_bioq_lock);
4962 			TAILQ_INSERT_TAIL(&cam_bioq, &done_ccb->ccb_h,
4963 					  sim_links.tqe);
4964 			done_ccb->ccb_h.pinfo.index = CAM_DONEQ_INDEX;
4965 			mtx_unlock(&cam_bioq_lock);
4966 			swi_sched(cambio_ih, 0);
4967 			break;
4968 		default:
4969 			panic("unknown periph type %d",
4970 			    done_ccb->ccb_h.path->periph->type);
4971 		}
4972 	}
4973 	splx(s);
4974 }
4975 
4976 union ccb *
4977 xpt_alloc_ccb()
4978 {
4979 	union ccb *new_ccb;
4980 
4981 	GIANT_REQUIRED;
4982 
4983 	new_ccb = malloc(sizeof(*new_ccb), M_CAMXPT, M_WAITOK);
4984 	return (new_ccb);
4985 }
4986 
4987 union ccb *
4988 xpt_alloc_ccb_nowait()
4989 {
4990 	union ccb *new_ccb;
4991 
4992 	GIANT_REQUIRED;
4993 
4994 	new_ccb = malloc(sizeof(*new_ccb), M_CAMXPT, M_NOWAIT);
4995 	return (new_ccb);
4996 }
4997 
4998 void
4999 xpt_free_ccb(union ccb *free_ccb)
5000 {
5001 	free(free_ccb, M_CAMXPT);
5002 }
5003 
5004 
5005 
5006 /* Private XPT functions */
5007 
5008 /*
5009  * Get a CAM control block for the caller. Charge the structure to the device
5010  * referenced by the path.  If the this device has no 'credits' then the
5011  * device already has the maximum number of outstanding operations under way
5012  * and we return NULL. If we don't have sufficient resources to allocate more
5013  * ccbs, we also return NULL.
5014  */
5015 static union ccb *
5016 xpt_get_ccb(struct cam_ed *device)
5017 {
5018 	union ccb *new_ccb;
5019 	int s;
5020 
5021 	s = splsoftcam();
5022 	if ((new_ccb = (union ccb *)SLIST_FIRST(&ccb_freeq)) == NULL) {
5023 		new_ccb = xpt_alloc_ccb_nowait();
5024                 if (new_ccb == NULL) {
5025 			splx(s);
5026 			return (NULL);
5027 		}
5028 		callout_handle_init(&new_ccb->ccb_h.timeout_ch);
5029 		SLIST_INSERT_HEAD(&ccb_freeq, &new_ccb->ccb_h,
5030 				  xpt_links.sle);
5031 		xpt_ccb_count++;
5032 	}
5033 	cam_ccbq_take_opening(&device->ccbq);
5034 	SLIST_REMOVE_HEAD(&ccb_freeq, xpt_links.sle);
5035 	splx(s);
5036 	return (new_ccb);
5037 }
5038 
5039 static void
5040 xpt_release_bus(struct cam_eb *bus)
5041 {
5042 	int s;
5043 
5044 	s = splcam();
5045 	if ((--bus->refcount == 0)
5046 	 && (TAILQ_FIRST(&bus->et_entries) == NULL)) {
5047 		TAILQ_REMOVE(&xpt_busses, bus, links);
5048 		bus_generation++;
5049 		splx(s);
5050 		free(bus, M_CAMXPT);
5051 	} else
5052 		splx(s);
5053 }
5054 
5055 static struct cam_et *
5056 xpt_alloc_target(struct cam_eb *bus, target_id_t target_id)
5057 {
5058 	struct cam_et *target;
5059 
5060 	target = (struct cam_et *)malloc(sizeof(*target), M_CAMXPT, M_NOWAIT);
5061 	if (target != NULL) {
5062 		struct cam_et *cur_target;
5063 
5064 		TAILQ_INIT(&target->ed_entries);
5065 		target->bus = bus;
5066 		target->target_id = target_id;
5067 		target->refcount = 1;
5068 		target->generation = 0;
5069 		timevalclear(&target->last_reset);
5070 		/*
5071 		 * Hold a reference to our parent bus so it
5072 		 * will not go away before we do.
5073 		 */
5074 		bus->refcount++;
5075 
5076 		/* Insertion sort into our bus's target list */
5077 		cur_target = TAILQ_FIRST(&bus->et_entries);
5078 		while (cur_target != NULL && cur_target->target_id < target_id)
5079 			cur_target = TAILQ_NEXT(cur_target, links);
5080 
5081 		if (cur_target != NULL) {
5082 			TAILQ_INSERT_BEFORE(cur_target, target, links);
5083 		} else {
5084 			TAILQ_INSERT_TAIL(&bus->et_entries, target, links);
5085 		}
5086 		bus->generation++;
5087 	}
5088 	return (target);
5089 }
5090 
5091 static void
5092 xpt_release_target(struct cam_eb *bus, struct cam_et *target)
5093 {
5094 	int s;
5095 
5096 	s = splcam();
5097 	if ((--target->refcount == 0)
5098 	 && (TAILQ_FIRST(&target->ed_entries) == NULL)) {
5099 		TAILQ_REMOVE(&bus->et_entries, target, links);
5100 		bus->generation++;
5101 		splx(s);
5102 		free(target, M_CAMXPT);
5103 		xpt_release_bus(bus);
5104 	} else
5105 		splx(s);
5106 }
5107 
5108 static struct cam_ed *
5109 xpt_alloc_device(struct cam_eb *bus, struct cam_et *target, lun_id_t lun_id)
5110 {
5111 #ifdef CAM_NEW_TRAN_CODE
5112 	struct	   cam_path path;
5113 #endif /* CAM_NEW_TRAN_CODE */
5114 	struct	   cam_ed *device;
5115 	struct	   cam_devq *devq;
5116 	cam_status status;
5117 
5118 	if (SIM_DEAD(bus->sim))
5119 		return (NULL);
5120 
5121 	/* Make space for us in the device queue on our bus */
5122 	devq = bus->sim->devq;
5123 	status = cam_devq_resize(devq, devq->alloc_queue.array_size + 1);
5124 
5125 	if (status != CAM_REQ_CMP) {
5126 		device = NULL;
5127 	} else {
5128 		device = (struct cam_ed *)malloc(sizeof(*device),
5129 						 M_CAMXPT, M_NOWAIT);
5130 	}
5131 
5132 	if (device != NULL) {
5133 		struct cam_ed *cur_device;
5134 
5135 		cam_init_pinfo(&device->alloc_ccb_entry.pinfo);
5136 		device->alloc_ccb_entry.device = device;
5137 		cam_init_pinfo(&device->send_ccb_entry.pinfo);
5138 		device->send_ccb_entry.device = device;
5139 		device->target = target;
5140 		device->lun_id = lun_id;
5141 		/* Initialize our queues */
5142 		if (camq_init(&device->drvq, 0) != 0) {
5143 			free(device, M_CAMXPT);
5144 			return (NULL);
5145 		}
5146 		if (cam_ccbq_init(&device->ccbq,
5147 				  bus->sim->max_dev_openings) != 0) {
5148 			camq_fini(&device->drvq);
5149 			free(device, M_CAMXPT);
5150 			return (NULL);
5151 		}
5152 		SLIST_INIT(&device->asyncs);
5153 		SLIST_INIT(&device->periphs);
5154 		device->generation = 0;
5155 		device->owner = NULL;
5156 		/*
5157 		 * Take the default quirk entry until we have inquiry
5158 		 * data and can determine a better quirk to use.
5159 		 */
5160 		device->quirk = &xpt_quirk_table[xpt_quirk_table_size - 1];
5161 		bzero(&device->inq_data, sizeof(device->inq_data));
5162 		device->inq_flags = 0;
5163 		device->queue_flags = 0;
5164 		device->serial_num = NULL;
5165 		device->serial_num_len = 0;
5166 		device->qfrozen_cnt = 0;
5167 		device->flags = CAM_DEV_UNCONFIGURED;
5168 		device->tag_delay_count = 0;
5169 		device->tag_saved_openings = 0;
5170 		device->refcount = 1;
5171 		callout_handle_init(&device->c_handle);
5172 
5173 		/*
5174 		 * Hold a reference to our parent target so it
5175 		 * will not go away before we do.
5176 		 */
5177 		target->refcount++;
5178 
5179 		/*
5180 		 * XXX should be limited by number of CCBs this bus can
5181 		 * do.
5182 		 */
5183 		xpt_max_ccbs += device->ccbq.devq_openings;
5184 		/* Insertion sort into our target's device list */
5185 		cur_device = TAILQ_FIRST(&target->ed_entries);
5186 		while (cur_device != NULL && cur_device->lun_id < lun_id)
5187 			cur_device = TAILQ_NEXT(cur_device, links);
5188 		if (cur_device != NULL) {
5189 			TAILQ_INSERT_BEFORE(cur_device, device, links);
5190 		} else {
5191 			TAILQ_INSERT_TAIL(&target->ed_entries, device, links);
5192 		}
5193 		target->generation++;
5194 #ifdef CAM_NEW_TRAN_CODE
5195 		if (lun_id != CAM_LUN_WILDCARD) {
5196 			xpt_compile_path(&path,
5197 					 NULL,
5198 					 bus->path_id,
5199 					 target->target_id,
5200 					 lun_id);
5201 			xpt_devise_transport(&path);
5202 			xpt_release_path(&path);
5203 		}
5204 #endif /* CAM_NEW_TRAN_CODE */
5205 	}
5206 	return (device);
5207 }
5208 
5209 static void
5210 xpt_release_device(struct cam_eb *bus, struct cam_et *target,
5211 		   struct cam_ed *device)
5212 {
5213 	int s;
5214 
5215 	s = splcam();
5216 	if ((--device->refcount == 0)
5217 	 && ((device->flags & CAM_DEV_UNCONFIGURED) != 0)) {
5218 		struct cam_devq *devq;
5219 
5220 		if (device->alloc_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX
5221 		 || device->send_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX)
5222 			panic("Removing device while still queued for ccbs");
5223 
5224 		if ((device->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0)
5225 				untimeout(xpt_release_devq_timeout, device,
5226 					  device->c_handle);
5227 
5228 		TAILQ_REMOVE(&target->ed_entries, device,links);
5229 		target->generation++;
5230 		xpt_max_ccbs -= device->ccbq.devq_openings;
5231 		if (!SIM_DEAD(bus->sim)) {
5232 			/* Release our slot in the devq */
5233 			devq = bus->sim->devq;
5234 			cam_devq_resize(devq, devq->alloc_queue.array_size - 1);
5235 		}
5236 		splx(s);
5237 		camq_fini(&device->drvq);
5238 		camq_fini(&device->ccbq.queue);
5239 		free(device, M_CAMXPT);
5240 		xpt_release_target(bus, target);
5241 	} else
5242 		splx(s);
5243 }
5244 
5245 static u_int32_t
5246 xpt_dev_ccbq_resize(struct cam_path *path, int newopenings)
5247 {
5248 	int	s;
5249 	int	diff;
5250 	int	result;
5251 	struct	cam_ed *dev;
5252 
5253 	dev = path->device;
5254 	s = splsoftcam();
5255 
5256 	diff = newopenings - (dev->ccbq.dev_active + dev->ccbq.dev_openings);
5257 	result = cam_ccbq_resize(&dev->ccbq, newopenings);
5258 	if (result == CAM_REQ_CMP && (diff < 0)) {
5259 		dev->flags |= CAM_DEV_RESIZE_QUEUE_NEEDED;
5260 	}
5261 	if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
5262 	 || (dev->inq_flags & SID_CmdQue) != 0)
5263 		dev->tag_saved_openings = newopenings;
5264 	/* Adjust the global limit */
5265 	xpt_max_ccbs += diff;
5266 	splx(s);
5267 	return (result);
5268 }
5269 
5270 static struct cam_eb *
5271 xpt_find_bus(path_id_t path_id)
5272 {
5273 	struct cam_eb *bus;
5274 
5275 	for (bus = TAILQ_FIRST(&xpt_busses);
5276 	     bus != NULL;
5277 	     bus = TAILQ_NEXT(bus, links)) {
5278 		if (bus->path_id == path_id) {
5279 			bus->refcount++;
5280 			break;
5281 		}
5282 	}
5283 	return (bus);
5284 }
5285 
5286 static struct cam_et *
5287 xpt_find_target(struct cam_eb *bus, target_id_t	target_id)
5288 {
5289 	struct cam_et *target;
5290 
5291 	for (target = TAILQ_FIRST(&bus->et_entries);
5292 	     target != NULL;
5293 	     target = TAILQ_NEXT(target, links)) {
5294 		if (target->target_id == target_id) {
5295 			target->refcount++;
5296 			break;
5297 		}
5298 	}
5299 	return (target);
5300 }
5301 
5302 static struct cam_ed *
5303 xpt_find_device(struct cam_et *target, lun_id_t lun_id)
5304 {
5305 	struct cam_ed *device;
5306 
5307 	for (device = TAILQ_FIRST(&target->ed_entries);
5308 	     device != NULL;
5309 	     device = TAILQ_NEXT(device, links)) {
5310 		if (device->lun_id == lun_id) {
5311 			device->refcount++;
5312 			break;
5313 		}
5314 	}
5315 	return (device);
5316 }
5317 
5318 typedef struct {
5319 	union	ccb *request_ccb;
5320 	struct 	ccb_pathinq *cpi;
5321 	int	counter;
5322 } xpt_scan_bus_info;
5323 
5324 /*
5325  * To start a scan, request_ccb is an XPT_SCAN_BUS ccb.
5326  * As the scan progresses, xpt_scan_bus is used as the
5327  * callback on completion function.
5328  */
5329 static void
5330 xpt_scan_bus(struct cam_periph *periph, union ccb *request_ccb)
5331 {
5332 	CAM_DEBUG(request_ccb->ccb_h.path, CAM_DEBUG_TRACE,
5333 		  ("xpt_scan_bus\n"));
5334 	switch (request_ccb->ccb_h.func_code) {
5335 	case XPT_SCAN_BUS:
5336 	{
5337 		xpt_scan_bus_info *scan_info;
5338 		union	ccb *work_ccb;
5339 		struct	cam_path *path;
5340 		u_int	i;
5341 		u_int	max_target;
5342 		u_int	initiator_id;
5343 
5344 		/* Find out the characteristics of the bus */
5345 		work_ccb = xpt_alloc_ccb();
5346 		xpt_setup_ccb(&work_ccb->ccb_h, request_ccb->ccb_h.path,
5347 			      request_ccb->ccb_h.pinfo.priority);
5348 		work_ccb->ccb_h.func_code = XPT_PATH_INQ;
5349 		xpt_action(work_ccb);
5350 		if (work_ccb->ccb_h.status != CAM_REQ_CMP) {
5351 			request_ccb->ccb_h.status = work_ccb->ccb_h.status;
5352 			xpt_free_ccb(work_ccb);
5353 			xpt_done(request_ccb);
5354 			return;
5355 		}
5356 
5357 		if ((work_ccb->cpi.hba_misc & PIM_NOINITIATOR) != 0) {
5358 			/*
5359 			 * Can't scan the bus on an adapter that
5360 			 * cannot perform the initiator role.
5361 			 */
5362 			request_ccb->ccb_h.status = CAM_REQ_CMP;
5363 			xpt_free_ccb(work_ccb);
5364 			xpt_done(request_ccb);
5365 			return;
5366 		}
5367 
5368 		/* Save some state for use while we probe for devices */
5369 		scan_info = (xpt_scan_bus_info *)
5370 		    malloc(sizeof(xpt_scan_bus_info), M_TEMP, M_WAITOK);
5371 		scan_info->request_ccb = request_ccb;
5372 		scan_info->cpi = &work_ccb->cpi;
5373 
5374 		/* Cache on our stack so we can work asynchronously */
5375 		max_target = scan_info->cpi->max_target;
5376 		initiator_id = scan_info->cpi->initiator_id;
5377 
5378 
5379 		/*
5380 		 * We can scan all targets in parallel, or do it sequentially.
5381 		 */
5382 		if (scan_info->cpi->hba_misc & PIM_SEQSCAN) {
5383 			max_target = 0;
5384 			scan_info->counter = 0;
5385 		} else {
5386 			scan_info->counter = scan_info->cpi->max_target + 1;
5387 			if (scan_info->cpi->initiator_id < scan_info->counter) {
5388 				scan_info->counter--;
5389 			}
5390 		}
5391 
5392 		for (i = 0; i <= max_target; i++) {
5393 			cam_status status;
5394 			if (i == initiator_id)
5395 				continue;
5396 
5397 			status = xpt_create_path(&path, xpt_periph,
5398 						 request_ccb->ccb_h.path_id,
5399 						 i, 0);
5400 			if (status != CAM_REQ_CMP) {
5401 				printf("xpt_scan_bus: xpt_create_path failed"
5402 				       " with status %#x, bus scan halted\n",
5403 				       status);
5404 				free(scan_info, M_TEMP);
5405 				request_ccb->ccb_h.status = status;
5406 				xpt_free_ccb(work_ccb);
5407 				xpt_done(request_ccb);
5408 				break;
5409 			}
5410 			work_ccb = xpt_alloc_ccb();
5411 			xpt_setup_ccb(&work_ccb->ccb_h, path,
5412 				      request_ccb->ccb_h.pinfo.priority);
5413 			work_ccb->ccb_h.func_code = XPT_SCAN_LUN;
5414 			work_ccb->ccb_h.cbfcnp = xpt_scan_bus;
5415 			work_ccb->ccb_h.ppriv_ptr0 = scan_info;
5416 			work_ccb->crcn.flags = request_ccb->crcn.flags;
5417 			xpt_action(work_ccb);
5418 		}
5419 		break;
5420 	}
5421 	case XPT_SCAN_LUN:
5422 	{
5423 		cam_status status;
5424 		struct cam_path *path;
5425 		xpt_scan_bus_info *scan_info;
5426 		path_id_t path_id;
5427 		target_id_t target_id;
5428 		lun_id_t lun_id;
5429 
5430 		/* Reuse the same CCB to query if a device was really found */
5431 		scan_info = (xpt_scan_bus_info *)request_ccb->ccb_h.ppriv_ptr0;
5432 		xpt_setup_ccb(&request_ccb->ccb_h, request_ccb->ccb_h.path,
5433 			      request_ccb->ccb_h.pinfo.priority);
5434 		request_ccb->ccb_h.func_code = XPT_GDEV_TYPE;
5435 
5436 		path_id = request_ccb->ccb_h.path_id;
5437 		target_id = request_ccb->ccb_h.target_id;
5438 		lun_id = request_ccb->ccb_h.target_lun;
5439 		xpt_action(request_ccb);
5440 
5441 		if (request_ccb->ccb_h.status != CAM_REQ_CMP) {
5442 			struct cam_ed *device;
5443 			struct cam_et *target;
5444 			int s, phl;
5445 
5446 			/*
5447 			 * If we already probed lun 0 successfully, or
5448 			 * we have additional configured luns on this
5449 			 * target that might have "gone away", go onto
5450 			 * the next lun.
5451 			 */
5452 			target = request_ccb->ccb_h.path->target;
5453 			/*
5454 			 * We may touch devices that we don't
5455 			 * hold references too, so ensure they
5456 			 * don't disappear out from under us.
5457 			 * The target above is referenced by the
5458 			 * path in the request ccb.
5459 			 */
5460 			phl = 0;
5461 			s = splcam();
5462 			device = TAILQ_FIRST(&target->ed_entries);
5463 			if (device != NULL) {
5464 				phl = CAN_SRCH_HI_SPARSE(device);
5465 				if (device->lun_id == 0)
5466 					device = TAILQ_NEXT(device, links);
5467 			}
5468 			splx(s);
5469 			if ((lun_id != 0) || (device != NULL)) {
5470 				if (lun_id < (CAM_SCSI2_MAXLUN-1) || phl)
5471 					lun_id++;
5472 			}
5473 		} else {
5474 			struct cam_ed *device;
5475 
5476 			device = request_ccb->ccb_h.path->device;
5477 
5478 			if ((device->quirk->quirks & CAM_QUIRK_NOLUNS) == 0) {
5479 				/* Try the next lun */
5480 				if (lun_id < (CAM_SCSI2_MAXLUN-1)
5481 				  || CAN_SRCH_HI_DENSE(device))
5482 					lun_id++;
5483 			}
5484 		}
5485 
5486 		/*
5487 		 * Free the current request path- we're done with it.
5488 		 */
5489 		xpt_free_path(request_ccb->ccb_h.path);
5490 
5491 		/*
5492 		 * Check to see if we scan any further luns.
5493 		 */
5494 		if (lun_id == request_ccb->ccb_h.target_lun
5495                  || lun_id > scan_info->cpi->max_lun) {
5496 			int done;
5497 
5498  hop_again:
5499 			done = 0;
5500 			if (scan_info->cpi->hba_misc & PIM_SEQSCAN) {
5501 				scan_info->counter++;
5502 				if (scan_info->counter ==
5503 				    scan_info->cpi->initiator_id) {
5504 					scan_info->counter++;
5505 				}
5506 				if (scan_info->counter >=
5507 				    scan_info->cpi->max_target+1) {
5508 					done = 1;
5509 				}
5510 			} else {
5511 				scan_info->counter--;
5512 				if (scan_info->counter == 0) {
5513 					done = 1;
5514 				}
5515 			}
5516 			if (done) {
5517 				xpt_free_ccb(request_ccb);
5518 				xpt_free_ccb((union ccb *)scan_info->cpi);
5519 				request_ccb = scan_info->request_ccb;
5520 				free(scan_info, M_TEMP);
5521 				request_ccb->ccb_h.status = CAM_REQ_CMP;
5522 				xpt_done(request_ccb);
5523 				break;
5524 			}
5525 
5526 			if ((scan_info->cpi->hba_misc & PIM_SEQSCAN) == 0) {
5527 				break;
5528 			}
5529 			status = xpt_create_path(&path, xpt_periph,
5530 			    scan_info->request_ccb->ccb_h.path_id,
5531 			    scan_info->counter, 0);
5532 			if (status != CAM_REQ_CMP) {
5533 				printf("xpt_scan_bus: xpt_create_path failed"
5534 				    " with status %#x, bus scan halted\n",
5535 			       	    status);
5536 				xpt_free_ccb(request_ccb);
5537 				xpt_free_ccb((union ccb *)scan_info->cpi);
5538 				request_ccb = scan_info->request_ccb;
5539 				free(scan_info, M_TEMP);
5540 				request_ccb->ccb_h.status = status;
5541 				xpt_done(request_ccb);
5542 				break;
5543 			}
5544 			xpt_setup_ccb(&request_ccb->ccb_h, path,
5545 			    request_ccb->ccb_h.pinfo.priority);
5546 			request_ccb->ccb_h.func_code = XPT_SCAN_LUN;
5547 			request_ccb->ccb_h.cbfcnp = xpt_scan_bus;
5548 			request_ccb->ccb_h.ppriv_ptr0 = scan_info;
5549 			request_ccb->crcn.flags =
5550 			    scan_info->request_ccb->crcn.flags;
5551 		} else {
5552 			status = xpt_create_path(&path, xpt_periph,
5553 						 path_id, target_id, lun_id);
5554 			if (status != CAM_REQ_CMP) {
5555 				printf("xpt_scan_bus: xpt_create_path failed "
5556 				       "with status %#x, halting LUN scan\n",
5557 			 	       status);
5558 				goto hop_again;
5559 			}
5560 			xpt_setup_ccb(&request_ccb->ccb_h, path,
5561 				      request_ccb->ccb_h.pinfo.priority);
5562 			request_ccb->ccb_h.func_code = XPT_SCAN_LUN;
5563 			request_ccb->ccb_h.cbfcnp = xpt_scan_bus;
5564 			request_ccb->ccb_h.ppriv_ptr0 = scan_info;
5565 			request_ccb->crcn.flags =
5566 				scan_info->request_ccb->crcn.flags;
5567 		}
5568 		xpt_action(request_ccb);
5569 		break;
5570 	}
5571 	default:
5572 		break;
5573 	}
5574 }
5575 
5576 typedef enum {
5577 	PROBE_TUR,
5578 	PROBE_INQUIRY,
5579 	PROBE_FULL_INQUIRY,
5580 	PROBE_MODE_SENSE,
5581 	PROBE_SERIAL_NUM,
5582 	PROBE_TUR_FOR_NEGOTIATION
5583 } probe_action;
5584 
5585 typedef enum {
5586 	PROBE_INQUIRY_CKSUM	= 0x01,
5587 	PROBE_SERIAL_CKSUM	= 0x02,
5588 	PROBE_NO_ANNOUNCE	= 0x04
5589 } probe_flags;
5590 
5591 typedef struct {
5592 	TAILQ_HEAD(, ccb_hdr) request_ccbs;
5593 	probe_action	action;
5594 	union ccb	saved_ccb;
5595 	probe_flags	flags;
5596 	MD5_CTX		context;
5597 	u_int8_t	digest[16];
5598 } probe_softc;
5599 
5600 static void
5601 xpt_scan_lun(struct cam_periph *periph, struct cam_path *path,
5602 	     cam_flags flags, union ccb *request_ccb)
5603 {
5604 	struct ccb_pathinq cpi;
5605 	cam_status status;
5606 	struct cam_path *new_path;
5607 	struct cam_periph *old_periph;
5608 	int s;
5609 
5610 	CAM_DEBUG(request_ccb->ccb_h.path, CAM_DEBUG_TRACE,
5611 		  ("xpt_scan_lun\n"));
5612 
5613 	xpt_setup_ccb(&cpi.ccb_h, path, /*priority*/1);
5614 	cpi.ccb_h.func_code = XPT_PATH_INQ;
5615 	xpt_action((union ccb *)&cpi);
5616 
5617 	if (cpi.ccb_h.status != CAM_REQ_CMP) {
5618 		if (request_ccb != NULL) {
5619 			request_ccb->ccb_h.status = cpi.ccb_h.status;
5620 			xpt_done(request_ccb);
5621 		}
5622 		return;
5623 	}
5624 
5625 	if ((cpi.hba_misc & PIM_NOINITIATOR) != 0) {
5626 		/*
5627 		 * Can't scan the bus on an adapter that
5628 		 * cannot perform the initiator role.
5629 		 */
5630 		if (request_ccb != NULL) {
5631 			request_ccb->ccb_h.status = CAM_REQ_CMP;
5632 			xpt_done(request_ccb);
5633 		}
5634 		return;
5635 	}
5636 
5637 	if (request_ccb == NULL) {
5638 		request_ccb = malloc(sizeof(union ccb), M_TEMP, M_NOWAIT);
5639 		if (request_ccb == NULL) {
5640 			xpt_print_path(path);
5641 			printf("xpt_scan_lun: can't allocate CCB, can't "
5642 			       "continue\n");
5643 			return;
5644 		}
5645 		new_path = malloc(sizeof(*new_path), M_TEMP, M_NOWAIT);
5646 		if (new_path == NULL) {
5647 			xpt_print_path(path);
5648 			printf("xpt_scan_lun: can't allocate path, can't "
5649 			       "continue\n");
5650 			free(request_ccb, M_TEMP);
5651 			return;
5652 		}
5653 		status = xpt_compile_path(new_path, xpt_periph,
5654 					  path->bus->path_id,
5655 					  path->target->target_id,
5656 					  path->device->lun_id);
5657 
5658 		if (status != CAM_REQ_CMP) {
5659 			xpt_print_path(path);
5660 			printf("xpt_scan_lun: can't compile path, can't "
5661 			       "continue\n");
5662 			free(request_ccb, M_TEMP);
5663 			free(new_path, M_TEMP);
5664 			return;
5665 		}
5666 		xpt_setup_ccb(&request_ccb->ccb_h, new_path, /*priority*/ 1);
5667 		request_ccb->ccb_h.cbfcnp = xptscandone;
5668 		request_ccb->ccb_h.func_code = XPT_SCAN_LUN;
5669 		request_ccb->crcn.flags = flags;
5670 	}
5671 
5672 	s = splsoftcam();
5673 	if ((old_periph = cam_periph_find(path, "probe")) != NULL) {
5674 		probe_softc *softc;
5675 
5676 		softc = (probe_softc *)old_periph->softc;
5677 		TAILQ_INSERT_TAIL(&softc->request_ccbs, &request_ccb->ccb_h,
5678 				  periph_links.tqe);
5679 	} else {
5680 		status = cam_periph_alloc(proberegister, NULL, probecleanup,
5681 					  probestart, "probe",
5682 					  CAM_PERIPH_BIO,
5683 					  request_ccb->ccb_h.path, NULL, 0,
5684 					  request_ccb);
5685 
5686 		if (status != CAM_REQ_CMP) {
5687 			xpt_print_path(path);
5688 			printf("xpt_scan_lun: cam_alloc_periph returned an "
5689 			       "error, can't continue probe\n");
5690 			request_ccb->ccb_h.status = status;
5691 			xpt_done(request_ccb);
5692 		}
5693 	}
5694 	splx(s);
5695 }
5696 
5697 static void
5698 xptscandone(struct cam_periph *periph, union ccb *done_ccb)
5699 {
5700 	xpt_release_path(done_ccb->ccb_h.path);
5701 	free(done_ccb->ccb_h.path, M_TEMP);
5702 	free(done_ccb, M_TEMP);
5703 }
5704 
5705 static cam_status
5706 proberegister(struct cam_periph *periph, void *arg)
5707 {
5708 	union ccb *request_ccb;	/* CCB representing the probe request */
5709 	probe_softc *softc;
5710 
5711 	request_ccb = (union ccb *)arg;
5712 	if (periph == NULL) {
5713 		printf("proberegister: periph was NULL!!\n");
5714 		return(CAM_REQ_CMP_ERR);
5715 	}
5716 
5717 	if (request_ccb == NULL) {
5718 		printf("proberegister: no probe CCB, "
5719 		       "can't register device\n");
5720 		return(CAM_REQ_CMP_ERR);
5721 	}
5722 
5723 	softc = (probe_softc *)malloc(sizeof(*softc), M_TEMP, M_NOWAIT);
5724 
5725 	if (softc == NULL) {
5726 		printf("proberegister: Unable to probe new device. "
5727 		       "Unable to allocate softc\n");
5728 		return(CAM_REQ_CMP_ERR);
5729 	}
5730 	TAILQ_INIT(&softc->request_ccbs);
5731 	TAILQ_INSERT_TAIL(&softc->request_ccbs, &request_ccb->ccb_h,
5732 			  periph_links.tqe);
5733 	softc->flags = 0;
5734 	periph->softc = softc;
5735 	cam_periph_acquire(periph);
5736 	/*
5737 	 * Ensure we've waited at least a bus settle
5738 	 * delay before attempting to probe the device.
5739 	 * For HBAs that don't do bus resets, this won't make a difference.
5740 	 */
5741 	cam_periph_freeze_after_event(periph, &periph->path->bus->last_reset,
5742 				      scsi_delay);
5743 	probeschedule(periph);
5744 	return(CAM_REQ_CMP);
5745 }
5746 
5747 static void
5748 probeschedule(struct cam_periph *periph)
5749 {
5750 	struct ccb_pathinq cpi;
5751 	union ccb *ccb;
5752 	probe_softc *softc;
5753 
5754 	softc = (probe_softc *)periph->softc;
5755 	ccb = (union ccb *)TAILQ_FIRST(&softc->request_ccbs);
5756 
5757 	xpt_setup_ccb(&cpi.ccb_h, periph->path, /*priority*/1);
5758 	cpi.ccb_h.func_code = XPT_PATH_INQ;
5759 	xpt_action((union ccb *)&cpi);
5760 
5761 	/*
5762 	 * If a device has gone away and another device, or the same one,
5763 	 * is back in the same place, it should have a unit attention
5764 	 * condition pending.  It will not report the unit attention in
5765 	 * response to an inquiry, which may leave invalid transfer
5766 	 * negotiations in effect.  The TUR will reveal the unit attention
5767 	 * condition.  Only send the TUR for lun 0, since some devices
5768 	 * will get confused by commands other than inquiry to non-existent
5769 	 * luns.  If you think a device has gone away start your scan from
5770 	 * lun 0.  This will insure that any bogus transfer settings are
5771 	 * invalidated.
5772 	 *
5773 	 * If we haven't seen the device before and the controller supports
5774 	 * some kind of transfer negotiation, negotiate with the first
5775 	 * sent command if no bus reset was performed at startup.  This
5776 	 * ensures that the device is not confused by transfer negotiation
5777 	 * settings left over by loader or BIOS action.
5778 	 */
5779 	if (((ccb->ccb_h.path->device->flags & CAM_DEV_UNCONFIGURED) == 0)
5780 	 && (ccb->ccb_h.target_lun == 0)) {
5781 		softc->action = PROBE_TUR;
5782 	} else if ((cpi.hba_inquiry & (PI_WIDE_32|PI_WIDE_16|PI_SDTR_ABLE)) != 0
5783 	      && (cpi.hba_misc & PIM_NOBUSRESET) != 0) {
5784 		proberequestdefaultnegotiation(periph);
5785 		softc->action = PROBE_INQUIRY;
5786 	} else {
5787 		softc->action = PROBE_INQUIRY;
5788 	}
5789 
5790 	if (ccb->crcn.flags & CAM_EXPECT_INQ_CHANGE)
5791 		softc->flags |= PROBE_NO_ANNOUNCE;
5792 	else
5793 		softc->flags &= ~PROBE_NO_ANNOUNCE;
5794 
5795 	xpt_schedule(periph, ccb->ccb_h.pinfo.priority);
5796 }
5797 
5798 static void
5799 probestart(struct cam_periph *periph, union ccb *start_ccb)
5800 {
5801 	/* Probe the device that our peripheral driver points to */
5802 	struct ccb_scsiio *csio;
5803 	probe_softc *softc;
5804 
5805 	CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("probestart\n"));
5806 
5807 	softc = (probe_softc *)periph->softc;
5808 	csio = &start_ccb->csio;
5809 
5810 	switch (softc->action) {
5811 	case PROBE_TUR:
5812 	case PROBE_TUR_FOR_NEGOTIATION:
5813 	{
5814 		scsi_test_unit_ready(csio,
5815 				     /*retries*/4,
5816 				     probedone,
5817 				     MSG_SIMPLE_Q_TAG,
5818 				     SSD_FULL_SIZE,
5819 				     /*timeout*/60000);
5820 		break;
5821 	}
5822 	case PROBE_INQUIRY:
5823 	case PROBE_FULL_INQUIRY:
5824 	{
5825 		u_int inquiry_len;
5826 		struct scsi_inquiry_data *inq_buf;
5827 
5828 		inq_buf = &periph->path->device->inq_data;
5829 		/*
5830 		 * If the device is currently configured, we calculate an
5831 		 * MD5 checksum of the inquiry data, and if the serial number
5832 		 * length is greater than 0, add the serial number data
5833 		 * into the checksum as well.  Once the inquiry and the
5834 		 * serial number check finish, we attempt to figure out
5835 		 * whether we still have the same device.
5836 		 */
5837 		if ((periph->path->device->flags & CAM_DEV_UNCONFIGURED) == 0) {
5838 
5839 			MD5Init(&softc->context);
5840 			MD5Update(&softc->context, (unsigned char *)inq_buf,
5841 				  sizeof(struct scsi_inquiry_data));
5842 			softc->flags |= PROBE_INQUIRY_CKSUM;
5843 			if (periph->path->device->serial_num_len > 0) {
5844 				MD5Update(&softc->context,
5845 					  periph->path->device->serial_num,
5846 					  periph->path->device->serial_num_len);
5847 				softc->flags |= PROBE_SERIAL_CKSUM;
5848 			}
5849 			MD5Final(softc->digest, &softc->context);
5850 		}
5851 
5852 		if (softc->action == PROBE_INQUIRY)
5853 			inquiry_len = SHORT_INQUIRY_LENGTH;
5854 		else
5855 			inquiry_len = inq_buf->additional_length
5856 				    + offsetof(struct scsi_inquiry_data,
5857                                                additional_length) + 1;
5858 
5859 		/*
5860 		 * Some parallel SCSI devices fail to send an
5861 		 * ignore wide residue message when dealing with
5862 		 * odd length inquiry requests.  Round up to be
5863 		 * safe.
5864 		 */
5865 		inquiry_len = roundup2(inquiry_len, 2);
5866 
5867 		scsi_inquiry(csio,
5868 			     /*retries*/4,
5869 			     probedone,
5870 			     MSG_SIMPLE_Q_TAG,
5871 			     (u_int8_t *)inq_buf,
5872 			     inquiry_len,
5873 			     /*evpd*/FALSE,
5874 			     /*page_code*/0,
5875 			     SSD_MIN_SIZE,
5876 			     /*timeout*/60 * 1000);
5877 		break;
5878 	}
5879 	case PROBE_MODE_SENSE:
5880 	{
5881 		void  *mode_buf;
5882 		int    mode_buf_len;
5883 
5884 		mode_buf_len = sizeof(struct scsi_mode_header_6)
5885 			     + sizeof(struct scsi_mode_blk_desc)
5886 			     + sizeof(struct scsi_control_page);
5887 		mode_buf = malloc(mode_buf_len, M_TEMP, M_NOWAIT);
5888 		if (mode_buf != NULL) {
5889 	                scsi_mode_sense(csio,
5890 					/*retries*/4,
5891 					probedone,
5892 					MSG_SIMPLE_Q_TAG,
5893 					/*dbd*/FALSE,
5894 					SMS_PAGE_CTRL_CURRENT,
5895 					SMS_CONTROL_MODE_PAGE,
5896 					mode_buf,
5897 					mode_buf_len,
5898 					SSD_FULL_SIZE,
5899 					/*timeout*/60000);
5900 			break;
5901 		}
5902 		xpt_print_path(periph->path);
5903 		printf("Unable to mode sense control page - malloc failure\n");
5904 		softc->action = PROBE_SERIAL_NUM;
5905 	}
5906 	/* FALLTHROUGH */
5907 	case PROBE_SERIAL_NUM:
5908 	{
5909 		struct scsi_vpd_unit_serial_number *serial_buf;
5910 		struct cam_ed* device;
5911 
5912 		serial_buf = NULL;
5913 		device = periph->path->device;
5914 		device->serial_num = NULL;
5915 		device->serial_num_len = 0;
5916 
5917 		if ((device->quirk->quirks & CAM_QUIRK_NOSERIAL) == 0)
5918 			serial_buf = (struct scsi_vpd_unit_serial_number *)
5919 				malloc(sizeof(*serial_buf), M_TEMP,
5920 					M_NOWAIT | M_ZERO);
5921 
5922 		if (serial_buf != NULL) {
5923 			scsi_inquiry(csio,
5924 				     /*retries*/4,
5925 				     probedone,
5926 				     MSG_SIMPLE_Q_TAG,
5927 				     (u_int8_t *)serial_buf,
5928 				     sizeof(*serial_buf),
5929 				     /*evpd*/TRUE,
5930 				     SVPD_UNIT_SERIAL_NUMBER,
5931 				     SSD_MIN_SIZE,
5932 				     /*timeout*/60 * 1000);
5933 			break;
5934 		}
5935 		/*
5936 		 * We'll have to do without, let our probedone
5937 		 * routine finish up for us.
5938 		 */
5939 		start_ccb->csio.data_ptr = NULL;
5940 		probedone(periph, start_ccb);
5941 		return;
5942 	}
5943 	}
5944 	xpt_action(start_ccb);
5945 }
5946 
5947 static void
5948 proberequestdefaultnegotiation(struct cam_periph *periph)
5949 {
5950 	struct ccb_trans_settings cts;
5951 
5952 	xpt_setup_ccb(&cts.ccb_h, periph->path, /*priority*/1);
5953 	cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS;
5954 #ifdef CAM_NEW_TRAN_CODE
5955 	cts.type = CTS_TYPE_USER_SETTINGS;
5956 #else /* CAM_NEW_TRAN_CODE */
5957 	cts.flags = CCB_TRANS_USER_SETTINGS;
5958 #endif /* CAM_NEW_TRAN_CODE */
5959 	xpt_action((union ccb *)&cts);
5960 	cts.ccb_h.func_code = XPT_SET_TRAN_SETTINGS;
5961 #ifdef CAM_NEW_TRAN_CODE
5962 	cts.type = CTS_TYPE_CURRENT_SETTINGS;
5963 #else /* CAM_NEW_TRAN_CODE */
5964 	cts.flags &= ~CCB_TRANS_USER_SETTINGS;
5965 	cts.flags |= CCB_TRANS_CURRENT_SETTINGS;
5966 #endif /* CAM_NEW_TRAN_CODE */
5967 	xpt_action((union ccb *)&cts);
5968 }
5969 
5970 static void
5971 probedone(struct cam_periph *periph, union ccb *done_ccb)
5972 {
5973 	probe_softc *softc;
5974 	struct cam_path *path;
5975 	u_int32_t  priority;
5976 
5977 	CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("probedone\n"));
5978 
5979 	softc = (probe_softc *)periph->softc;
5980 	path = done_ccb->ccb_h.path;
5981 	priority = done_ccb->ccb_h.pinfo.priority;
5982 
5983 	switch (softc->action) {
5984 	case PROBE_TUR:
5985 	{
5986 		if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
5987 
5988 			if (cam_periph_error(done_ccb, 0,
5989 					     SF_NO_PRINT, NULL) == ERESTART)
5990 				return;
5991 			else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
5992 				/* Don't wedge the queue */
5993 				xpt_release_devq(done_ccb->ccb_h.path,
5994 						 /*count*/1,
5995 						 /*run_queue*/TRUE);
5996 		}
5997 		softc->action = PROBE_INQUIRY;
5998 		xpt_release_ccb(done_ccb);
5999 		xpt_schedule(periph, priority);
6000 		return;
6001 	}
6002 	case PROBE_INQUIRY:
6003 	case PROBE_FULL_INQUIRY:
6004 	{
6005 		if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
6006 			struct scsi_inquiry_data *inq_buf;
6007 			u_int8_t periph_qual;
6008 
6009 			path->device->flags |= CAM_DEV_INQUIRY_DATA_VALID;
6010 			inq_buf = &path->device->inq_data;
6011 
6012 			periph_qual = SID_QUAL(inq_buf);
6013 
6014 			switch(periph_qual) {
6015 			case SID_QUAL_LU_CONNECTED:
6016 			{
6017 				u_int8_t len;
6018 
6019 				/*
6020 				 * We conservatively request only
6021 				 * SHORT_INQUIRY_LEN bytes of inquiry
6022 				 * information during our first try
6023 				 * at sending an INQUIRY. If the device
6024 				 * has more information to give,
6025 				 * perform a second request specifying
6026 				 * the amount of information the device
6027 				 * is willing to give.
6028 				 */
6029 				len = inq_buf->additional_length
6030 				    + offsetof(struct scsi_inquiry_data,
6031                                                additional_length) + 1;
6032 				if (softc->action == PROBE_INQUIRY
6033 				 && len > SHORT_INQUIRY_LENGTH) {
6034 					softc->action = PROBE_FULL_INQUIRY;
6035 					xpt_release_ccb(done_ccb);
6036 					xpt_schedule(periph, priority);
6037 					return;
6038 				}
6039 
6040 				xpt_find_quirk(path->device);
6041 
6042 #ifdef CAM_NEW_TRAN_CODE
6043 				xpt_devise_transport(path);
6044 #endif /* CAM_NEW_TRAN_CODE */
6045 				if (INQ_DATA_TQ_ENABLED(inq_buf))
6046 					softc->action = PROBE_MODE_SENSE;
6047 				else
6048 					softc->action = PROBE_SERIAL_NUM;
6049 
6050 				path->device->flags &= ~CAM_DEV_UNCONFIGURED;
6051 
6052 				xpt_release_ccb(done_ccb);
6053 				xpt_schedule(periph, priority);
6054 				return;
6055 			}
6056 			default:
6057 				break;
6058 			}
6059 		} else if (cam_periph_error(done_ccb, 0,
6060 					    done_ccb->ccb_h.target_lun > 0
6061 					    ? SF_RETRY_UA|SF_QUIET_IR
6062 					    : SF_RETRY_UA,
6063 					    &softc->saved_ccb) == ERESTART) {
6064 			return;
6065 		} else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
6066 			/* Don't wedge the queue */
6067 			xpt_release_devq(done_ccb->ccb_h.path, /*count*/1,
6068 					 /*run_queue*/TRUE);
6069 		}
6070 		/*
6071 		 * If we get to this point, we got an error status back
6072 		 * from the inquiry and the error status doesn't require
6073 		 * automatically retrying the command.  Therefore, the
6074 		 * inquiry failed.  If we had inquiry information before
6075 		 * for this device, but this latest inquiry command failed,
6076 		 * the device has probably gone away.  If this device isn't
6077 		 * already marked unconfigured, notify the peripheral
6078 		 * drivers that this device is no more.
6079 		 */
6080 		if ((path->device->flags & CAM_DEV_UNCONFIGURED) == 0)
6081 			/* Send the async notification. */
6082 			xpt_async(AC_LOST_DEVICE, path, NULL);
6083 
6084 		xpt_release_ccb(done_ccb);
6085 		break;
6086 	}
6087 	case PROBE_MODE_SENSE:
6088 	{
6089 		struct ccb_scsiio *csio;
6090 		struct scsi_mode_header_6 *mode_hdr;
6091 
6092 		csio = &done_ccb->csio;
6093 		mode_hdr = (struct scsi_mode_header_6 *)csio->data_ptr;
6094 		if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
6095 			struct scsi_control_page *page;
6096 			u_int8_t *offset;
6097 
6098 			offset = ((u_int8_t *)&mode_hdr[1])
6099 			    + mode_hdr->blk_desc_len;
6100 			page = (struct scsi_control_page *)offset;
6101 			path->device->queue_flags = page->queue_flags;
6102 		} else if (cam_periph_error(done_ccb, 0,
6103 					    SF_RETRY_UA|SF_NO_PRINT,
6104 					    &softc->saved_ccb) == ERESTART) {
6105 			return;
6106 		} else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
6107 			/* Don't wedge the queue */
6108 			xpt_release_devq(done_ccb->ccb_h.path,
6109 					 /*count*/1, /*run_queue*/TRUE);
6110 		}
6111 		xpt_release_ccb(done_ccb);
6112 		free(mode_hdr, M_TEMP);
6113 		softc->action = PROBE_SERIAL_NUM;
6114 		xpt_schedule(periph, priority);
6115 		return;
6116 	}
6117 	case PROBE_SERIAL_NUM:
6118 	{
6119 		struct ccb_scsiio *csio;
6120 		struct scsi_vpd_unit_serial_number *serial_buf;
6121 		u_int32_t  priority;
6122 		int changed;
6123 		int have_serialnum;
6124 
6125 		changed = 1;
6126 		have_serialnum = 0;
6127 		csio = &done_ccb->csio;
6128 		priority = done_ccb->ccb_h.pinfo.priority;
6129 		serial_buf =
6130 		    (struct scsi_vpd_unit_serial_number *)csio->data_ptr;
6131 
6132 		/* Clean up from previous instance of this device */
6133 		if (path->device->serial_num != NULL) {
6134 			free(path->device->serial_num, M_CAMXPT);
6135 			path->device->serial_num = NULL;
6136 			path->device->serial_num_len = 0;
6137 		}
6138 
6139 		if (serial_buf == NULL) {
6140 			/*
6141 			 * Don't process the command as it was never sent
6142 			 */
6143 		} else if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP
6144 			&& (serial_buf->length > 0)) {
6145 
6146 			have_serialnum = 1;
6147 			path->device->serial_num =
6148 				(u_int8_t *)malloc((serial_buf->length + 1),
6149 						   M_CAMXPT, M_NOWAIT);
6150 			if (path->device->serial_num != NULL) {
6151 				bcopy(serial_buf->serial_num,
6152 				      path->device->serial_num,
6153 				      serial_buf->length);
6154 				path->device->serial_num_len =
6155 				    serial_buf->length;
6156 				path->device->serial_num[serial_buf->length]
6157 				    = '\0';
6158 			}
6159 		} else if (cam_periph_error(done_ccb, 0,
6160 					    SF_RETRY_UA|SF_NO_PRINT,
6161 					    &softc->saved_ccb) == ERESTART) {
6162 			return;
6163 		} else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
6164 			/* Don't wedge the queue */
6165 			xpt_release_devq(done_ccb->ccb_h.path, /*count*/1,
6166 					 /*run_queue*/TRUE);
6167 		}
6168 
6169 		/*
6170 		 * Let's see if we have seen this device before.
6171 		 */
6172 		if ((softc->flags & PROBE_INQUIRY_CKSUM) != 0) {
6173 			MD5_CTX context;
6174 			u_int8_t digest[16];
6175 
6176 			MD5Init(&context);
6177 
6178 			MD5Update(&context,
6179 				  (unsigned char *)&path->device->inq_data,
6180 				  sizeof(struct scsi_inquiry_data));
6181 
6182 			if (have_serialnum)
6183 				MD5Update(&context, serial_buf->serial_num,
6184 					  serial_buf->length);
6185 
6186 			MD5Final(digest, &context);
6187 			if (bcmp(softc->digest, digest, 16) == 0)
6188 				changed = 0;
6189 
6190 			/*
6191 			 * XXX Do we need to do a TUR in order to ensure
6192 			 *     that the device really hasn't changed???
6193 			 */
6194 			if ((changed != 0)
6195 			 && ((softc->flags & PROBE_NO_ANNOUNCE) == 0))
6196 				xpt_async(AC_LOST_DEVICE, path, NULL);
6197 		}
6198 		if (serial_buf != NULL)
6199 			free(serial_buf, M_TEMP);
6200 
6201 		if (changed != 0) {
6202 			/*
6203 			 * Now that we have all the necessary
6204 			 * information to safely perform transfer
6205 			 * negotiations... Controllers don't perform
6206 			 * any negotiation or tagged queuing until
6207 			 * after the first XPT_SET_TRAN_SETTINGS ccb is
6208 			 * received.  So, on a new device, just retreive
6209 			 * the user settings, and set them as the current
6210 			 * settings to set the device up.
6211 			 */
6212 			proberequestdefaultnegotiation(periph);
6213 			xpt_release_ccb(done_ccb);
6214 
6215 			/*
6216 			 * Perform a TUR to allow the controller to
6217 			 * perform any necessary transfer negotiation.
6218 			 */
6219 			softc->action = PROBE_TUR_FOR_NEGOTIATION;
6220 			xpt_schedule(periph, priority);
6221 			return;
6222 		}
6223 		xpt_release_ccb(done_ccb);
6224 		break;
6225 	}
6226 	case PROBE_TUR_FOR_NEGOTIATION:
6227 		if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
6228 			/* Don't wedge the queue */
6229 			xpt_release_devq(done_ccb->ccb_h.path, /*count*/1,
6230 					 /*run_queue*/TRUE);
6231 		}
6232 
6233 		path->device->flags &= ~CAM_DEV_UNCONFIGURED;
6234 
6235 		if ((softc->flags & PROBE_NO_ANNOUNCE) == 0) {
6236 			/* Inform the XPT that a new device has been found */
6237 			done_ccb->ccb_h.func_code = XPT_GDEV_TYPE;
6238 			xpt_action(done_ccb);
6239 
6240 			xpt_async(AC_FOUND_DEVICE, done_ccb->ccb_h.path,
6241 				  done_ccb);
6242 		}
6243 		xpt_release_ccb(done_ccb);
6244 		break;
6245 	}
6246 	done_ccb = (union ccb *)TAILQ_FIRST(&softc->request_ccbs);
6247 	TAILQ_REMOVE(&softc->request_ccbs, &done_ccb->ccb_h, periph_links.tqe);
6248 	done_ccb->ccb_h.status = CAM_REQ_CMP;
6249 	xpt_done(done_ccb);
6250 	if (TAILQ_FIRST(&softc->request_ccbs) == NULL) {
6251 		cam_periph_invalidate(periph);
6252 		cam_periph_release(periph);
6253 	} else {
6254 		probeschedule(periph);
6255 	}
6256 }
6257 
6258 static void
6259 probecleanup(struct cam_periph *periph)
6260 {
6261 	free(periph->softc, M_TEMP);
6262 }
6263 
6264 static void
6265 xpt_find_quirk(struct cam_ed *device)
6266 {
6267 	caddr_t	match;
6268 
6269 	match = cam_quirkmatch((caddr_t)&device->inq_data,
6270 			       (caddr_t)xpt_quirk_table,
6271 			       sizeof(xpt_quirk_table)/sizeof(*xpt_quirk_table),
6272 			       sizeof(*xpt_quirk_table), scsi_inquiry_match);
6273 
6274 	if (match == NULL)
6275 		panic("xpt_find_quirk: device didn't match wildcard entry!!");
6276 
6277 	device->quirk = (struct xpt_quirk_entry *)match;
6278 }
6279 
6280 static int
6281 sysctl_cam_search_luns(SYSCTL_HANDLER_ARGS)
6282 {
6283 	int error, bool;
6284 
6285 	bool = cam_srch_hi;
6286 	error = sysctl_handle_int(oidp, &bool, sizeof(bool), req);
6287 	if (error != 0 || req->newptr == NULL)
6288 		return (error);
6289 	if (bool == 0 || bool == 1) {
6290 		cam_srch_hi = bool;
6291 		return (0);
6292 	} else {
6293 		return (EINVAL);
6294 	}
6295 }
6296 
6297 #ifdef CAM_NEW_TRAN_CODE
6298 
6299 static void
6300 xpt_devise_transport(struct cam_path *path)
6301 {
6302 	struct ccb_pathinq cpi;
6303 	struct ccb_trans_settings cts;
6304 	struct scsi_inquiry_data *inq_buf;
6305 
6306 	/* Get transport information from the SIM */
6307 	xpt_setup_ccb(&cpi.ccb_h, path, /*priority*/1);
6308 	cpi.ccb_h.func_code = XPT_PATH_INQ;
6309 	xpt_action((union ccb *)&cpi);
6310 
6311 	inq_buf = NULL;
6312 	if ((path->device->flags & CAM_DEV_INQUIRY_DATA_VALID) != 0)
6313 		inq_buf = &path->device->inq_data;
6314 	path->device->protocol = PROTO_SCSI;
6315 	path->device->protocol_version =
6316 	    inq_buf != NULL ? SID_ANSI_REV(inq_buf) : cpi.protocol_version;
6317 	path->device->transport = cpi.transport;
6318 	path->device->transport_version = cpi.transport_version;
6319 
6320 	/*
6321 	 * Any device not using SPI3 features should
6322 	 * be considered SPI2 or lower.
6323 	 */
6324 	if (inq_buf != NULL) {
6325 		if (path->device->transport == XPORT_SPI
6326 		 && (inq_buf->spi3data & SID_SPI_MASK) == 0
6327 		 && path->device->transport_version > 2)
6328 			path->device->transport_version = 2;
6329 	} else {
6330 		struct cam_ed* otherdev;
6331 
6332 		for (otherdev = TAILQ_FIRST(&path->target->ed_entries);
6333 		     otherdev != NULL;
6334 		     otherdev = TAILQ_NEXT(otherdev, links)) {
6335 			if (otherdev != path->device)
6336 				break;
6337 		}
6338 
6339 		if (otherdev != NULL) {
6340 			/*
6341 			 * Initially assume the same versioning as
6342 			 * prior luns for this target.
6343 			 */
6344 			path->device->protocol_version =
6345 			    otherdev->protocol_version;
6346 			path->device->transport_version =
6347 			    otherdev->transport_version;
6348 		} else {
6349 			/* Until we know better, opt for safty */
6350 			path->device->protocol_version = 2;
6351 			if (path->device->transport == XPORT_SPI)
6352 				path->device->transport_version = 2;
6353 			else
6354 				path->device->transport_version = 0;
6355 		}
6356 	}
6357 
6358 	/*
6359 	 * XXX
6360 	 * For a device compliant with SPC-2 we should be able
6361 	 * to determine the transport version supported by
6362 	 * scrutinizing the version descriptors in the
6363 	 * inquiry buffer.
6364 	 */
6365 
6366 	/* Tell the controller what we think */
6367 	xpt_setup_ccb(&cts.ccb_h, path, /*priority*/1);
6368 	cts.ccb_h.func_code = XPT_SET_TRAN_SETTINGS;
6369 	cts.type = CTS_TYPE_CURRENT_SETTINGS;
6370 	cts.transport = path->device->transport;
6371 	cts.transport_version = path->device->transport_version;
6372 	cts.protocol = path->device->protocol;
6373 	cts.protocol_version = path->device->protocol_version;
6374 	cts.proto_specific.valid = 0;
6375 	cts.xport_specific.valid = 0;
6376 	xpt_action((union ccb *)&cts);
6377 }
6378 
6379 static void
6380 xpt_set_transfer_settings(struct ccb_trans_settings *cts, struct cam_ed *device,
6381 			  int async_update)
6382 {
6383 	struct	ccb_pathinq cpi;
6384 	struct	ccb_trans_settings cur_cts;
6385 	struct	ccb_trans_settings_scsi *scsi;
6386 	struct	ccb_trans_settings_scsi *cur_scsi;
6387 	struct	cam_sim *sim;
6388 	struct	scsi_inquiry_data *inq_data;
6389 
6390 	if (device == NULL) {
6391 		cts->ccb_h.status = CAM_PATH_INVALID;
6392 		xpt_done((union ccb *)cts);
6393 		return;
6394 	}
6395 
6396 	if (cts->protocol == PROTO_UNKNOWN
6397 	 || cts->protocol == PROTO_UNSPECIFIED) {
6398 		cts->protocol = device->protocol;
6399 		cts->protocol_version = device->protocol_version;
6400 	}
6401 
6402 	if (cts->protocol_version == PROTO_VERSION_UNKNOWN
6403 	 || cts->protocol_version == PROTO_VERSION_UNSPECIFIED)
6404 		cts->protocol_version = device->protocol_version;
6405 
6406 	if (cts->protocol != device->protocol) {
6407 		xpt_print_path(cts->ccb_h.path);
6408 		printf("Uninitialized Protocol %x:%x?\n",
6409 		       cts->protocol, device->protocol);
6410 		cts->protocol = device->protocol;
6411 	}
6412 
6413 	if (cts->protocol_version > device->protocol_version) {
6414 		if (bootverbose) {
6415 			xpt_print_path(cts->ccb_h.path);
6416 			printf("Down reving Protocol Version from %d to %d?\n",
6417 			       cts->protocol_version, device->protocol_version);
6418 		}
6419 		cts->protocol_version = device->protocol_version;
6420 	}
6421 
6422 	if (cts->transport == XPORT_UNKNOWN
6423 	 || cts->transport == XPORT_UNSPECIFIED) {
6424 		cts->transport = device->transport;
6425 		cts->transport_version = device->transport_version;
6426 	}
6427 
6428 	if (cts->transport_version == XPORT_VERSION_UNKNOWN
6429 	 || cts->transport_version == XPORT_VERSION_UNSPECIFIED)
6430 		cts->transport_version = device->transport_version;
6431 
6432 	if (cts->transport != device->transport) {
6433 		xpt_print_path(cts->ccb_h.path);
6434 		printf("Uninitialized Transport %x:%x?\n",
6435 		       cts->transport, device->transport);
6436 		cts->transport = device->transport;
6437 	}
6438 
6439 	if (cts->transport_version > device->transport_version) {
6440 		if (bootverbose) {
6441 			xpt_print_path(cts->ccb_h.path);
6442 			printf("Down reving Transport Version from %d to %d?\n",
6443 			       cts->transport_version,
6444 			       device->transport_version);
6445 		}
6446 		cts->transport_version = device->transport_version;
6447 	}
6448 
6449 	sim = cts->ccb_h.path->bus->sim;
6450 
6451 	/*
6452 	 * Nothing more of interest to do unless
6453 	 * this is a device connected via the
6454 	 * SCSI protocol.
6455 	 */
6456 	if (cts->protocol != PROTO_SCSI) {
6457 		if (async_update == FALSE)
6458 			(*(sim->sim_action))(sim, (union ccb *)cts);
6459 		return;
6460 	}
6461 
6462 	inq_data = &device->inq_data;
6463 	scsi = &cts->proto_specific.scsi;
6464 	xpt_setup_ccb(&cpi.ccb_h, cts->ccb_h.path, /*priority*/1);
6465 	cpi.ccb_h.func_code = XPT_PATH_INQ;
6466 	xpt_action((union ccb *)&cpi);
6467 
6468 	/* SCSI specific sanity checking */
6469 	if ((cpi.hba_inquiry & PI_TAG_ABLE) == 0
6470 	 || (INQ_DATA_TQ_ENABLED(inq_data)) == 0
6471 	 || (device->queue_flags & SCP_QUEUE_DQUE) != 0
6472 	 || (device->quirk->mintags == 0)) {
6473 		/*
6474 		 * Can't tag on hardware that doesn't support tags,
6475 		 * doesn't have it enabled, or has broken tag support.
6476 		 */
6477 		scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB;
6478 	}
6479 
6480 	if (async_update == FALSE) {
6481 		/*
6482 		 * Perform sanity checking against what the
6483 		 * controller and device can do.
6484 		 */
6485 		xpt_setup_ccb(&cur_cts.ccb_h, cts->ccb_h.path, /*priority*/1);
6486 		cur_cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS;
6487 		cur_cts.type = cts->type;
6488 		xpt_action((union ccb *)&cur_cts);
6489 
6490 		cur_scsi = &cur_cts.proto_specific.scsi;
6491 		if ((scsi->valid & CTS_SCSI_VALID_TQ) == 0) {
6492 			scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB;
6493 			scsi->flags |= cur_scsi->flags & CTS_SCSI_FLAGS_TAG_ENB;
6494 		}
6495 		if ((cur_scsi->valid & CTS_SCSI_VALID_TQ) == 0)
6496 			scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB;
6497 	}
6498 
6499 	/* SPI specific sanity checking */
6500 	if (cts->transport == XPORT_SPI && async_update == FALSE) {
6501 		u_int spi3caps;
6502 		struct ccb_trans_settings_spi *spi;
6503 		struct ccb_trans_settings_spi *cur_spi;
6504 
6505 		spi = &cts->xport_specific.spi;
6506 
6507 		cur_spi = &cur_cts.xport_specific.spi;
6508 
6509 		/* Fill in any gaps in what the user gave us */
6510 		if ((spi->valid & CTS_SPI_VALID_SYNC_RATE) == 0)
6511 			spi->sync_period = cur_spi->sync_period;
6512 		if ((cur_spi->valid & CTS_SPI_VALID_SYNC_RATE) == 0)
6513 			spi->sync_period = 0;
6514 		if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) == 0)
6515 			spi->sync_offset = cur_spi->sync_offset;
6516 		if ((cur_spi->valid & CTS_SPI_VALID_SYNC_OFFSET) == 0)
6517 			spi->sync_offset = 0;
6518 		if ((spi->valid & CTS_SPI_VALID_PPR_OPTIONS) == 0)
6519 			spi->ppr_options = cur_spi->ppr_options;
6520 		if ((cur_spi->valid & CTS_SPI_VALID_PPR_OPTIONS) == 0)
6521 			spi->ppr_options = 0;
6522 		if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) == 0)
6523 			spi->bus_width = cur_spi->bus_width;
6524 		if ((cur_spi->valid & CTS_SPI_VALID_BUS_WIDTH) == 0)
6525 			spi->bus_width = 0;
6526 		if ((spi->valid & CTS_SPI_VALID_DISC) == 0) {
6527 			spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB;
6528 			spi->flags |= cur_spi->flags & CTS_SPI_FLAGS_DISC_ENB;
6529 		}
6530 		if ((cur_spi->valid & CTS_SPI_VALID_DISC) == 0)
6531 			spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB;
6532 		if (((device->flags & CAM_DEV_INQUIRY_DATA_VALID) != 0
6533 		  && (inq_data->flags & SID_Sync) == 0
6534 		  && cts->type == CTS_TYPE_CURRENT_SETTINGS)
6535 		 || ((cpi.hba_inquiry & PI_SDTR_ABLE) == 0)
6536 		 || (cur_spi->sync_offset == 0)
6537 		 || (cur_spi->sync_period == 0)) {
6538 			/* Force async */
6539 			spi->sync_period = 0;
6540 			spi->sync_offset = 0;
6541 		}
6542 
6543 		switch (spi->bus_width) {
6544 		case MSG_EXT_WDTR_BUS_32_BIT:
6545 			if (((device->flags & CAM_DEV_INQUIRY_DATA_VALID) == 0
6546 			  || (inq_data->flags & SID_WBus32) != 0
6547 			  || cts->type == CTS_TYPE_USER_SETTINGS)
6548 			 && (cpi.hba_inquiry & PI_WIDE_32) != 0)
6549 				break;
6550 			/* Fall Through to 16-bit */
6551 		case MSG_EXT_WDTR_BUS_16_BIT:
6552 			if (((device->flags & CAM_DEV_INQUIRY_DATA_VALID) == 0
6553 			  || (inq_data->flags & SID_WBus16) != 0
6554 			  || cts->type == CTS_TYPE_USER_SETTINGS)
6555 			 && (cpi.hba_inquiry & PI_WIDE_16) != 0) {
6556 				spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
6557 				break;
6558 			}
6559 			/* Fall Through to 8-bit */
6560 		default: /* New bus width?? */
6561 		case MSG_EXT_WDTR_BUS_8_BIT:
6562 			/* All targets can do this */
6563 			spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
6564 			break;
6565 		}
6566 
6567 		spi3caps = cpi.xport_specific.spi.ppr_options;
6568 		if ((device->flags & CAM_DEV_INQUIRY_DATA_VALID) != 0
6569 		 && cts->type == CTS_TYPE_CURRENT_SETTINGS)
6570 			spi3caps &= inq_data->spi3data;
6571 
6572 		if ((spi3caps & SID_SPI_CLOCK_DT) == 0)
6573 			spi->ppr_options &= ~MSG_EXT_PPR_DT_REQ;
6574 
6575 		if ((spi3caps & SID_SPI_IUS) == 0)
6576 			spi->ppr_options &= ~MSG_EXT_PPR_IU_REQ;
6577 
6578 		if ((spi3caps & SID_SPI_QAS) == 0)
6579 			spi->ppr_options &= ~MSG_EXT_PPR_QAS_REQ;
6580 
6581 		/* No SPI Transfer settings are allowed unless we are wide */
6582 		if (spi->bus_width == 0)
6583 			spi->ppr_options = 0;
6584 
6585 		if ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) == 0) {
6586 			/*
6587 			 * Can't tag queue without disconnection.
6588 			 */
6589 			scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB;
6590 			scsi->valid |= CTS_SCSI_VALID_TQ;
6591 		}
6592 
6593 		/*
6594 		 * If we are currently performing tagged transactions to
6595 		 * this device and want to change its negotiation parameters,
6596 		 * go non-tagged for a bit to give the controller a chance to
6597 		 * negotiate unhampered by tag messages.
6598 		 */
6599 		if (cts->type == CTS_TYPE_CURRENT_SETTINGS
6600 		 && (device->inq_flags & SID_CmdQue) != 0
6601 		 && (scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0
6602 		 && (spi->flags & (CTS_SPI_VALID_SYNC_RATE|
6603 				   CTS_SPI_VALID_SYNC_OFFSET|
6604 				   CTS_SPI_VALID_BUS_WIDTH)) != 0)
6605 			xpt_toggle_tags(cts->ccb_h.path);
6606 	}
6607 
6608 	if (cts->type == CTS_TYPE_CURRENT_SETTINGS
6609 	 && (scsi->valid & CTS_SCSI_VALID_TQ) != 0) {
6610 		int device_tagenb;
6611 
6612 		/*
6613 		 * If we are transitioning from tags to no-tags or
6614 		 * vice-versa, we need to carefully freeze and restart
6615 		 * the queue so that we don't overlap tagged and non-tagged
6616 		 * commands.  We also temporarily stop tags if there is
6617 		 * a change in transfer negotiation settings to allow
6618 		 * "tag-less" negotiation.
6619 		 */
6620 		if ((device->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
6621 		 || (device->inq_flags & SID_CmdQue) != 0)
6622 			device_tagenb = TRUE;
6623 		else
6624 			device_tagenb = FALSE;
6625 
6626 		if (((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0
6627 		  && device_tagenb == FALSE)
6628 		 || ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) == 0
6629 		  && device_tagenb == TRUE)) {
6630 
6631 			if ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0) {
6632 				/*
6633 				 * Delay change to use tags until after a
6634 				 * few commands have gone to this device so
6635 				 * the controller has time to perform transfer
6636 				 * negotiations without tagged messages getting
6637 				 * in the way.
6638 				 */
6639 				device->tag_delay_count = CAM_TAG_DELAY_COUNT;
6640 				device->flags |= CAM_DEV_TAG_AFTER_COUNT;
6641 			} else {
6642 				struct ccb_relsim crs;
6643 
6644 				xpt_freeze_devq(cts->ccb_h.path, /*count*/1);
6645 		  		device->inq_flags &= ~SID_CmdQue;
6646 				xpt_dev_ccbq_resize(cts->ccb_h.path,
6647 						    sim->max_dev_openings);
6648 				device->flags &= ~CAM_DEV_TAG_AFTER_COUNT;
6649 				device->tag_delay_count = 0;
6650 
6651 				xpt_setup_ccb(&crs.ccb_h, cts->ccb_h.path,
6652 					      /*priority*/1);
6653 				crs.ccb_h.func_code = XPT_REL_SIMQ;
6654 				crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY;
6655 				crs.openings
6656 				    = crs.release_timeout
6657 				    = crs.qfrozen_cnt
6658 				    = 0;
6659 				xpt_action((union ccb *)&crs);
6660 			}
6661 		}
6662 	}
6663 	if (async_update == FALSE)
6664 		(*(sim->sim_action))(sim, (union ccb *)cts);
6665 }
6666 
6667 #else /* CAM_NEW_TRAN_CODE */
6668 
6669 static void
6670 xpt_set_transfer_settings(struct ccb_trans_settings *cts, struct cam_ed *device,
6671 			  int async_update)
6672 {
6673 	struct	cam_sim *sim;
6674 	int	qfrozen;
6675 
6676 	sim = cts->ccb_h.path->bus->sim;
6677 	if (async_update == FALSE) {
6678 		struct	scsi_inquiry_data *inq_data;
6679 		struct	ccb_pathinq cpi;
6680 		struct	ccb_trans_settings cur_cts;
6681 
6682 		if (device == NULL) {
6683 			cts->ccb_h.status = CAM_PATH_INVALID;
6684 			xpt_done((union ccb *)cts);
6685 			return;
6686 		}
6687 
6688 		/*
6689 		 * Perform sanity checking against what the
6690 		 * controller and device can do.
6691 		 */
6692 		xpt_setup_ccb(&cpi.ccb_h, cts->ccb_h.path, /*priority*/1);
6693 		cpi.ccb_h.func_code = XPT_PATH_INQ;
6694 		xpt_action((union ccb *)&cpi);
6695 		xpt_setup_ccb(&cur_cts.ccb_h, cts->ccb_h.path, /*priority*/1);
6696 		cur_cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS;
6697 		cur_cts.flags = CCB_TRANS_CURRENT_SETTINGS;
6698 		xpt_action((union ccb *)&cur_cts);
6699 		inq_data = &device->inq_data;
6700 
6701 		/* Fill in any gaps in what the user gave us */
6702 		if ((cts->valid & CCB_TRANS_SYNC_RATE_VALID) == 0)
6703 			cts->sync_period = cur_cts.sync_period;
6704 		if ((cts->valid & CCB_TRANS_SYNC_OFFSET_VALID) == 0)
6705 			cts->sync_offset = cur_cts.sync_offset;
6706 		if ((cts->valid & CCB_TRANS_BUS_WIDTH_VALID) == 0)
6707 			cts->bus_width = cur_cts.bus_width;
6708 		if ((cts->valid & CCB_TRANS_DISC_VALID) == 0) {
6709 			cts->flags &= ~CCB_TRANS_DISC_ENB;
6710 			cts->flags |= cur_cts.flags & CCB_TRANS_DISC_ENB;
6711 		}
6712 		if ((cts->valid & CCB_TRANS_TQ_VALID) == 0) {
6713 			cts->flags &= ~CCB_TRANS_TAG_ENB;
6714 			cts->flags |= cur_cts.flags & CCB_TRANS_TAG_ENB;
6715 		}
6716 
6717 		if (((device->flags & CAM_DEV_INQUIRY_DATA_VALID) != 0
6718 		  && (inq_data->flags & SID_Sync) == 0)
6719 		 || ((cpi.hba_inquiry & PI_SDTR_ABLE) == 0)
6720 		 || (cts->sync_offset == 0)
6721 		 || (cts->sync_period == 0)) {
6722 			/* Force async */
6723 			cts->sync_period = 0;
6724 			cts->sync_offset = 0;
6725 		} else if ((device->flags & CAM_DEV_INQUIRY_DATA_VALID) != 0
6726 			&& (inq_data->spi3data & SID_SPI_CLOCK_DT) == 0
6727 			&& cts->sync_period <= 0x9) {
6728 			/*
6729 			 * Don't allow DT transmission rates if the
6730 			 * device does not support it.
6731 			 */
6732 			cts->sync_period = 0xa;
6733 		}
6734 
6735 		switch (cts->bus_width) {
6736 		case MSG_EXT_WDTR_BUS_32_BIT:
6737 			if (((device->flags & CAM_DEV_INQUIRY_DATA_VALID) == 0
6738 			  || (inq_data->flags & SID_WBus32) != 0)
6739 			 && (cpi.hba_inquiry & PI_WIDE_32) != 0)
6740 				break;
6741 			/* FALLTHROUGH to 16-bit */
6742 		case MSG_EXT_WDTR_BUS_16_BIT:
6743 			if (((device->flags & CAM_DEV_INQUIRY_DATA_VALID) == 0
6744 			  || (inq_data->flags & SID_WBus16) != 0)
6745 			 && (cpi.hba_inquiry & PI_WIDE_16) != 0) {
6746 				cts->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
6747 				break;
6748 			}
6749 			/* FALLTHROUGH to 8-bit */
6750 		default: /* New bus width?? */
6751 		case MSG_EXT_WDTR_BUS_8_BIT:
6752 			/* All targets can do this */
6753 			cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
6754 			break;
6755 		}
6756 
6757 		if ((cts->flags & CCB_TRANS_DISC_ENB) == 0) {
6758 			/*
6759 			 * Can't tag queue without disconnection.
6760 			 */
6761 			cts->flags &= ~CCB_TRANS_TAG_ENB;
6762 			cts->valid |= CCB_TRANS_TQ_VALID;
6763 		}
6764 
6765 		if ((cpi.hba_inquiry & PI_TAG_ABLE) == 0
6766 	 	 || (INQ_DATA_TQ_ENABLED(inq_data)) == 0
6767 		 || (device->queue_flags & SCP_QUEUE_DQUE) != 0
6768 		 || (device->quirk->mintags == 0)) {
6769 			/*
6770 			 * Can't tag on hardware that doesn't support,
6771 			 * doesn't have it enabled, or has broken tag support.
6772 			 */
6773 			cts->flags &= ~CCB_TRANS_TAG_ENB;
6774 		}
6775 	}
6776 
6777 	qfrozen = FALSE;
6778 	if ((cts->valid & CCB_TRANS_TQ_VALID) != 0) {
6779 		int device_tagenb;
6780 
6781 		/*
6782 		 * If we are transitioning from tags to no-tags or
6783 		 * vice-versa, we need to carefully freeze and restart
6784 		 * the queue so that we don't overlap tagged and non-tagged
6785 		 * commands.  We also temporarily stop tags if there is
6786 		 * a change in transfer negotiation settings to allow
6787 		 * "tag-less" negotiation.
6788 		 */
6789 		if ((device->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
6790 		 || (device->inq_flags & SID_CmdQue) != 0)
6791 			device_tagenb = TRUE;
6792 		else
6793 			device_tagenb = FALSE;
6794 
6795 		if (((cts->flags & CCB_TRANS_TAG_ENB) != 0
6796 		  && device_tagenb == FALSE)
6797 		 || ((cts->flags & CCB_TRANS_TAG_ENB) == 0
6798 		  && device_tagenb == TRUE)) {
6799 
6800 			if ((cts->flags & CCB_TRANS_TAG_ENB) != 0) {
6801 				/*
6802 				 * Delay change to use tags until after a
6803 				 * few commands have gone to this device so
6804 				 * the controller has time to perform transfer
6805 				 * negotiations without tagged messages getting
6806 				 * in the way.
6807 				 */
6808 				device->tag_delay_count = CAM_TAG_DELAY_COUNT;
6809 				device->flags |= CAM_DEV_TAG_AFTER_COUNT;
6810 			} else {
6811 				xpt_freeze_devq(cts->ccb_h.path, /*count*/1);
6812 				qfrozen = TRUE;
6813 		  		device->inq_flags &= ~SID_CmdQue;
6814 				xpt_dev_ccbq_resize(cts->ccb_h.path,
6815 						    sim->max_dev_openings);
6816 				device->flags &= ~CAM_DEV_TAG_AFTER_COUNT;
6817 				device->tag_delay_count = 0;
6818 			}
6819 		}
6820 	}
6821 
6822 	if (async_update == FALSE) {
6823 		/*
6824 		 * If we are currently performing tagged transactions to
6825 		 * this device and want to change its negotiation parameters,
6826 		 * go non-tagged for a bit to give the controller a chance to
6827 		 * negotiate unhampered by tag messages.
6828 		 */
6829 		if ((device->inq_flags & SID_CmdQue) != 0
6830 		 && (cts->flags & (CCB_TRANS_SYNC_RATE_VALID|
6831 				   CCB_TRANS_SYNC_OFFSET_VALID|
6832 				   CCB_TRANS_BUS_WIDTH_VALID)) != 0)
6833 			xpt_toggle_tags(cts->ccb_h.path);
6834 
6835 		(*(sim->sim_action))(sim, (union ccb *)cts);
6836 	}
6837 
6838 	if (qfrozen) {
6839 		struct ccb_relsim crs;
6840 
6841 		xpt_setup_ccb(&crs.ccb_h, cts->ccb_h.path,
6842 			      /*priority*/1);
6843 		crs.ccb_h.func_code = XPT_REL_SIMQ;
6844 		crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY;
6845 		crs.openings
6846 		    = crs.release_timeout
6847 		    = crs.qfrozen_cnt
6848 		    = 0;
6849 		xpt_action((union ccb *)&crs);
6850 	}
6851 }
6852 
6853 
6854 #endif /* CAM_NEW_TRAN_CODE */
6855 
6856 static void
6857 xpt_toggle_tags(struct cam_path *path)
6858 {
6859 	struct cam_ed *dev;
6860 
6861 	/*
6862 	 * Give controllers a chance to renegotiate
6863 	 * before starting tag operations.  We
6864 	 * "toggle" tagged queuing off then on
6865 	 * which causes the tag enable command delay
6866 	 * counter to come into effect.
6867 	 */
6868 	dev = path->device;
6869 	if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
6870 	 || ((dev->inq_flags & SID_CmdQue) != 0
6871  	  && (dev->inq_flags & (SID_Sync|SID_WBus16|SID_WBus32)) != 0)) {
6872 		struct ccb_trans_settings cts;
6873 
6874 		xpt_setup_ccb(&cts.ccb_h, path, 1);
6875 #ifdef CAM_NEW_TRAN_CODE
6876 		cts.protocol = PROTO_SCSI;
6877 		cts.protocol_version = PROTO_VERSION_UNSPECIFIED;
6878 		cts.transport = XPORT_UNSPECIFIED;
6879 		cts.transport_version = XPORT_VERSION_UNSPECIFIED;
6880 		cts.proto_specific.scsi.flags = 0;
6881 		cts.proto_specific.scsi.valid = CTS_SCSI_VALID_TQ;
6882 #else /* CAM_NEW_TRAN_CODE */
6883 		cts.flags = 0;
6884 		cts.valid = CCB_TRANS_TQ_VALID;
6885 #endif /* CAM_NEW_TRAN_CODE */
6886 		xpt_set_transfer_settings(&cts, path->device,
6887 					  /*async_update*/TRUE);
6888 #ifdef CAM_NEW_TRAN_CODE
6889 		cts.proto_specific.scsi.flags = CTS_SCSI_FLAGS_TAG_ENB;
6890 #else /* CAM_NEW_TRAN_CODE */
6891 		cts.flags = CCB_TRANS_TAG_ENB;
6892 #endif /* CAM_NEW_TRAN_CODE */
6893 		xpt_set_transfer_settings(&cts, path->device,
6894 					  /*async_update*/TRUE);
6895 	}
6896 }
6897 
6898 static void
6899 xpt_start_tags(struct cam_path *path)
6900 {
6901 	struct ccb_relsim crs;
6902 	struct cam_ed *device;
6903 	struct cam_sim *sim;
6904 	int    newopenings;
6905 
6906 	device = path->device;
6907 	sim = path->bus->sim;
6908 	device->flags &= ~CAM_DEV_TAG_AFTER_COUNT;
6909 	xpt_freeze_devq(path, /*count*/1);
6910 	device->inq_flags |= SID_CmdQue;
6911 	if (device->tag_saved_openings != 0)
6912 		newopenings = device->tag_saved_openings;
6913 	else
6914 		newopenings = min(device->quirk->maxtags,
6915 				  sim->max_tagged_dev_openings);
6916 	xpt_dev_ccbq_resize(path, newopenings);
6917 	xpt_setup_ccb(&crs.ccb_h, path, /*priority*/1);
6918 	crs.ccb_h.func_code = XPT_REL_SIMQ;
6919 	crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY;
6920 	crs.openings
6921 	    = crs.release_timeout
6922 	    = crs.qfrozen_cnt
6923 	    = 0;
6924 	xpt_action((union ccb *)&crs);
6925 }
6926 
6927 static int busses_to_config;
6928 static int busses_to_reset;
6929 
6930 static int
6931 xptconfigbuscountfunc(struct cam_eb *bus, void *arg)
6932 {
6933 	if (bus->path_id != CAM_XPT_PATH_ID) {
6934 		struct cam_path path;
6935 		struct ccb_pathinq cpi;
6936 		int can_negotiate;
6937 
6938 		busses_to_config++;
6939 		xpt_compile_path(&path, NULL, bus->path_id,
6940 				 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
6941 		xpt_setup_ccb(&cpi.ccb_h, &path, /*priority*/1);
6942 		cpi.ccb_h.func_code = XPT_PATH_INQ;
6943 		xpt_action((union ccb *)&cpi);
6944 		can_negotiate = cpi.hba_inquiry;
6945 		can_negotiate &= (PI_WIDE_32|PI_WIDE_16|PI_SDTR_ABLE);
6946 		if ((cpi.hba_misc & PIM_NOBUSRESET) == 0
6947 		 && can_negotiate)
6948 			busses_to_reset++;
6949 		xpt_release_path(&path);
6950 	}
6951 
6952 	return(1);
6953 }
6954 
6955 static int
6956 xptconfigfunc(struct cam_eb *bus, void *arg)
6957 {
6958 	struct	cam_path *path;
6959 	union	ccb *work_ccb;
6960 
6961 	if (bus->path_id != CAM_XPT_PATH_ID) {
6962 		cam_status status;
6963 		int can_negotiate;
6964 
6965 		work_ccb = xpt_alloc_ccb();
6966 		if ((status = xpt_create_path(&path, xpt_periph, bus->path_id,
6967 					      CAM_TARGET_WILDCARD,
6968 					      CAM_LUN_WILDCARD)) !=CAM_REQ_CMP){
6969 			printf("xptconfigfunc: xpt_create_path failed with "
6970 			       "status %#x for bus %d\n", status, bus->path_id);
6971 			printf("xptconfigfunc: halting bus configuration\n");
6972 			xpt_free_ccb(work_ccb);
6973 			busses_to_config--;
6974 			xpt_finishconfig(xpt_periph, NULL);
6975 			return(0);
6976 		}
6977 		xpt_setup_ccb(&work_ccb->ccb_h, path, /*priority*/1);
6978 		work_ccb->ccb_h.func_code = XPT_PATH_INQ;
6979 		xpt_action(work_ccb);
6980 		if (work_ccb->ccb_h.status != CAM_REQ_CMP) {
6981 			printf("xptconfigfunc: CPI failed on bus %d "
6982 			       "with status %d\n", bus->path_id,
6983 			       work_ccb->ccb_h.status);
6984 			xpt_finishconfig(xpt_periph, work_ccb);
6985 			return(1);
6986 		}
6987 
6988 		can_negotiate = work_ccb->cpi.hba_inquiry;
6989 		can_negotiate &= (PI_WIDE_32|PI_WIDE_16|PI_SDTR_ABLE);
6990 		if ((work_ccb->cpi.hba_misc & PIM_NOBUSRESET) == 0
6991 		 && (can_negotiate != 0)) {
6992 			xpt_setup_ccb(&work_ccb->ccb_h, path, /*priority*/1);
6993 			work_ccb->ccb_h.func_code = XPT_RESET_BUS;
6994 			work_ccb->ccb_h.cbfcnp = NULL;
6995 			CAM_DEBUG(path, CAM_DEBUG_SUBTRACE,
6996 				  ("Resetting Bus\n"));
6997 			xpt_action(work_ccb);
6998 			xpt_finishconfig(xpt_periph, work_ccb);
6999 		} else {
7000 			/* Act as though we performed a successful BUS RESET */
7001 			work_ccb->ccb_h.func_code = XPT_RESET_BUS;
7002 			xpt_finishconfig(xpt_periph, work_ccb);
7003 		}
7004 	}
7005 
7006 	return(1);
7007 }
7008 
7009 static void
7010 xpt_config(void *arg)
7011 {
7012 	/*
7013 	 * Now that interrupts are enabled, go find our devices
7014 	 */
7015 
7016 #ifdef CAMDEBUG
7017 	/* Setup debugging flags and path */
7018 #ifdef CAM_DEBUG_FLAGS
7019 	cam_dflags = CAM_DEBUG_FLAGS;
7020 #else /* !CAM_DEBUG_FLAGS */
7021 	cam_dflags = CAM_DEBUG_NONE;
7022 #endif /* CAM_DEBUG_FLAGS */
7023 #ifdef CAM_DEBUG_BUS
7024 	if (cam_dflags != CAM_DEBUG_NONE) {
7025 		if (xpt_create_path(&cam_dpath, xpt_periph,
7026 				    CAM_DEBUG_BUS, CAM_DEBUG_TARGET,
7027 				    CAM_DEBUG_LUN) != CAM_REQ_CMP) {
7028 			printf("xpt_config: xpt_create_path() failed for debug"
7029 			       " target %d:%d:%d, debugging disabled\n",
7030 			       CAM_DEBUG_BUS, CAM_DEBUG_TARGET, CAM_DEBUG_LUN);
7031 			cam_dflags = CAM_DEBUG_NONE;
7032 		}
7033 	} else
7034 		cam_dpath = NULL;
7035 #else /* !CAM_DEBUG_BUS */
7036 	cam_dpath = NULL;
7037 #endif /* CAM_DEBUG_BUS */
7038 #endif /* CAMDEBUG */
7039 
7040 	/*
7041 	 * Scan all installed busses.
7042 	 */
7043 	xpt_for_all_busses(xptconfigbuscountfunc, NULL);
7044 
7045 	if (busses_to_config == 0) {
7046 		/* Call manually because we don't have any busses */
7047 		xpt_finishconfig(xpt_periph, NULL);
7048 	} else  {
7049 		if (busses_to_reset > 0 && scsi_delay >= 2000) {
7050 			printf("Waiting %d seconds for SCSI "
7051 			       "devices to settle\n", scsi_delay/1000);
7052 		}
7053 		xpt_for_all_busses(xptconfigfunc, NULL);
7054 	}
7055 }
7056 
7057 /*
7058  * If the given device only has one peripheral attached to it, and if that
7059  * peripheral is the passthrough driver, announce it.  This insures that the
7060  * user sees some sort of announcement for every peripheral in their system.
7061  */
7062 static int
7063 xptpassannouncefunc(struct cam_ed *device, void *arg)
7064 {
7065 	struct cam_periph *periph;
7066 	int i;
7067 
7068 	for (periph = SLIST_FIRST(&device->periphs), i = 0; periph != NULL;
7069 	     periph = SLIST_NEXT(periph, periph_links), i++);
7070 
7071 	periph = SLIST_FIRST(&device->periphs);
7072 	if ((i == 1)
7073 	 && (strncmp(periph->periph_name, "pass", 4) == 0))
7074 		xpt_announce_periph(periph, NULL);
7075 
7076 	return(1);
7077 }
7078 
7079 static void
7080 xpt_finishconfig(struct cam_periph *periph, union ccb *done_ccb)
7081 {
7082 	struct	periph_driver **p_drv;
7083 	int	i;
7084 
7085 	if (done_ccb != NULL) {
7086 		CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE,
7087 			  ("xpt_finishconfig\n"));
7088 		switch(done_ccb->ccb_h.func_code) {
7089 		case XPT_RESET_BUS:
7090 			if (done_ccb->ccb_h.status == CAM_REQ_CMP) {
7091 				done_ccb->ccb_h.func_code = XPT_SCAN_BUS;
7092 				done_ccb->ccb_h.cbfcnp = xpt_finishconfig;
7093 				done_ccb->crcn.flags = 0;
7094 				xpt_action(done_ccb);
7095 				return;
7096 			}
7097 			/* FALLTHROUGH */
7098 		case XPT_SCAN_BUS:
7099 		default:
7100 			xpt_free_path(done_ccb->ccb_h.path);
7101 			busses_to_config--;
7102 			break;
7103 		}
7104 	}
7105 
7106 	if (busses_to_config == 0) {
7107 		/* Register all the peripheral drivers */
7108 		/* XXX This will have to change when we have loadable modules */
7109 		p_drv = periph_drivers;
7110 		for (i = 0; p_drv[i] != NULL; i++) {
7111 			(*p_drv[i]->init)();
7112 		}
7113 
7114 		/*
7115 		 * Check for devices with no "standard" peripheral driver
7116 		 * attached.  For any devices like that, announce the
7117 		 * passthrough driver so the user will see something.
7118 		 */
7119 		xpt_for_all_devices(xptpassannouncefunc, NULL);
7120 
7121 		/* Release our hook so that the boot can continue. */
7122 		config_intrhook_disestablish(xpt_config_hook);
7123 		free(xpt_config_hook, M_TEMP);
7124 		xpt_config_hook = NULL;
7125 	}
7126 	if (done_ccb != NULL)
7127 		xpt_free_ccb(done_ccb);
7128 }
7129 
7130 static void
7131 xptaction(struct cam_sim *sim, union ccb *work_ccb)
7132 {
7133 	CAM_DEBUG(work_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xptaction\n"));
7134 
7135 	switch (work_ccb->ccb_h.func_code) {
7136 	/* Common cases first */
7137 	case XPT_PATH_INQ:		/* Path routing inquiry */
7138 	{
7139 		struct ccb_pathinq *cpi;
7140 
7141 		cpi = &work_ccb->cpi;
7142 		cpi->version_num = 1; /* XXX??? */
7143 		cpi->hba_inquiry = 0;
7144 		cpi->target_sprt = 0;
7145 		cpi->hba_misc = 0;
7146 		cpi->hba_eng_cnt = 0;
7147 		cpi->max_target = 0;
7148 		cpi->max_lun = 0;
7149 		cpi->initiator_id = 0;
7150 		strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
7151 		strncpy(cpi->hba_vid, "", HBA_IDLEN);
7152 		strncpy(cpi->dev_name, sim->sim_name, DEV_IDLEN);
7153 		cpi->unit_number = sim->unit_number;
7154 		cpi->bus_id = sim->bus_id;
7155 		cpi->base_transfer_speed = 0;
7156 #ifdef CAM_NEW_TRAN_CODE
7157 		cpi->protocol = PROTO_UNSPECIFIED;
7158 		cpi->protocol_version = PROTO_VERSION_UNSPECIFIED;
7159 		cpi->transport = XPORT_UNSPECIFIED;
7160 		cpi->transport_version = XPORT_VERSION_UNSPECIFIED;
7161 #endif /* CAM_NEW_TRAN_CODE */
7162 		cpi->ccb_h.status = CAM_REQ_CMP;
7163 		xpt_done(work_ccb);
7164 		break;
7165 	}
7166 	default:
7167 		work_ccb->ccb_h.status = CAM_REQ_INVALID;
7168 		xpt_done(work_ccb);
7169 		break;
7170 	}
7171 }
7172 
7173 /*
7174  * The xpt as a "controller" has no interrupt sources, so polling
7175  * is a no-op.
7176  */
7177 static void
7178 xptpoll(struct cam_sim *sim)
7179 {
7180 }
7181 
7182 static void
7183 camisr(void *V_queue)
7184 {
7185 	cam_isrq_t *oqueue = V_queue;
7186 	cam_isrq_t queue;
7187 	int	s;
7188 	struct	ccb_hdr *ccb_h;
7189 
7190 	/*
7191 	 * Transfer the ccb_bioq list to a temporary list so we can operate
7192 	 * on it without needing to lock/unlock on every loop.  The concat
7193 	 * function with re-init the real list for us.
7194 	 */
7195 	s = splcam();
7196 	mtx_lock(&cam_bioq_lock);
7197 	TAILQ_INIT(&queue);
7198 	TAILQ_CONCAT(&queue, oqueue, sim_links.tqe);
7199 	mtx_unlock(&cam_bioq_lock);
7200 
7201 	while ((ccb_h = TAILQ_FIRST(&queue)) != NULL) {
7202 		int	runq;
7203 
7204 		TAILQ_REMOVE(&queue, ccb_h, sim_links.tqe);
7205 		ccb_h->pinfo.index = CAM_UNQUEUED_INDEX;
7206 		splx(s);
7207 
7208 		CAM_DEBUG(ccb_h->path, CAM_DEBUG_TRACE,
7209 			  ("camisr\n"));
7210 
7211 		runq = FALSE;
7212 
7213 		if (ccb_h->flags & CAM_HIGH_POWER) {
7214 			struct highpowerlist	*hphead;
7215 			union ccb		*send_ccb;
7216 
7217 			hphead = &highpowerq;
7218 
7219 			send_ccb = (union ccb *)STAILQ_FIRST(hphead);
7220 
7221 			/*
7222 			 * Increment the count since this command is done.
7223 			 */
7224 			num_highpower++;
7225 
7226 			/*
7227 			 * Any high powered commands queued up?
7228 			 */
7229 			if (send_ccb != NULL) {
7230 
7231 				STAILQ_REMOVE_HEAD(hphead, xpt_links.stqe);
7232 
7233 				xpt_release_devq(send_ccb->ccb_h.path,
7234 						 /*count*/1, /*runqueue*/TRUE);
7235 			}
7236 		}
7237 		if ((ccb_h->func_code & XPT_FC_USER_CCB) == 0) {
7238 			struct cam_ed *dev;
7239 
7240 			dev = ccb_h->path->device;
7241 
7242 			s = splcam();
7243 			cam_ccbq_ccb_done(&dev->ccbq, (union ccb *)ccb_h);
7244 
7245 			if (!SIM_DEAD(ccb_h->path->bus->sim)) {
7246 				ccb_h->path->bus->sim->devq->send_active--;
7247 				ccb_h->path->bus->sim->devq->send_openings++;
7248 			}
7249 			splx(s);
7250 
7251 			if (((dev->flags & CAM_DEV_REL_ON_COMPLETE) != 0
7252 			  && (ccb_h->status&CAM_STATUS_MASK) != CAM_REQUEUE_REQ)
7253 			 || ((dev->flags & CAM_DEV_REL_ON_QUEUE_EMPTY) != 0
7254 			  && (dev->ccbq.dev_active == 0))) {
7255 
7256 				xpt_release_devq(ccb_h->path, /*count*/1,
7257 						 /*run_queue*/TRUE);
7258 			}
7259 
7260 			if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
7261 			 && (--dev->tag_delay_count == 0))
7262 				xpt_start_tags(ccb_h->path);
7263 
7264 			if ((dev->ccbq.queue.entries > 0)
7265 			 && (dev->qfrozen_cnt == 0)
7266 			 && (device_is_send_queued(dev) == 0)) {
7267 				runq = xpt_schedule_dev_sendq(ccb_h->path->bus,
7268 							      dev);
7269 			}
7270 		}
7271 
7272 		if (ccb_h->status & CAM_RELEASE_SIMQ) {
7273 			xpt_release_simq(ccb_h->path->bus->sim,
7274 					 /*run_queue*/TRUE);
7275 			ccb_h->status &= ~CAM_RELEASE_SIMQ;
7276 			runq = FALSE;
7277 		}
7278 
7279 		if ((ccb_h->flags & CAM_DEV_QFRZDIS)
7280 		 && (ccb_h->status & CAM_DEV_QFRZN)) {
7281 			xpt_release_devq(ccb_h->path, /*count*/1,
7282 					 /*run_queue*/TRUE);
7283 			ccb_h->status &= ~CAM_DEV_QFRZN;
7284 		} else if (runq) {
7285 			xpt_run_dev_sendq(ccb_h->path->bus);
7286 		}
7287 
7288 		/* Call the peripheral driver's callback */
7289 		(*ccb_h->cbfcnp)(ccb_h->path->periph, (union ccb *)ccb_h);
7290 
7291 		/* Raise IPL for while test */
7292 		s = splcam();
7293 	}
7294 	splx(s);
7295 }
7296 
7297 static void
7298 dead_sim_action(struct cam_sim *sim, union ccb *ccb)
7299 {
7300 
7301 	ccb->ccb_h.status = CAM_DEV_NOT_THERE;
7302 	xpt_done(ccb);
7303 }
7304 
7305 static void
7306 dead_sim_poll(struct cam_sim *sim)
7307 {
7308 }
7309