xref: /freebsd/sys/cam/cam_xpt.c (revision 6af83ee0d2941d18880b6aaa2b4facd1d30c6106)
1 /*-
2  * Implementation of the Common Access Method Transport (XPT) layer.
3  *
4  * Copyright (c) 1997, 1998, 1999 Justin T. Gibbs.
5  * Copyright (c) 1997, 1998, 1999 Kenneth D. Merry.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions, and the following disclaimer,
13  *    without modification, immediately at the beginning of the file.
14  * 2. The name of the author may not be used to endorse or promote products
15  *    derived from this software without specific prior written permission.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
21  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  */
29 
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
32 
33 #include <sys/param.h>
34 #include <sys/bus.h>
35 #include <sys/systm.h>
36 #include <sys/types.h>
37 #include <sys/malloc.h>
38 #include <sys/kernel.h>
39 #include <sys/time.h>
40 #include <sys/conf.h>
41 #include <sys/fcntl.h>
42 #include <sys/md5.h>
43 #include <sys/interrupt.h>
44 #include <sys/sbuf.h>
45 
46 #include <sys/lock.h>
47 #include <sys/mutex.h>
48 
49 #ifdef PC98
50 #include <pc98/pc98/pc98_machdep.h>	/* geometry translation */
51 #endif
52 
53 #include <cam/cam.h>
54 #include <cam/cam_ccb.h>
55 #include <cam/cam_periph.h>
56 #include <cam/cam_sim.h>
57 #include <cam/cam_xpt.h>
58 #include <cam/cam_xpt_sim.h>
59 #include <cam/cam_xpt_periph.h>
60 #include <cam/cam_debug.h>
61 
62 #include <cam/scsi/scsi_all.h>
63 #include <cam/scsi/scsi_message.h>
64 #include <cam/scsi/scsi_pass.h>
65 #include "opt_cam.h"
66 
67 /* Datastructures internal to the xpt layer */
68 
69 /*
70  * Definition of an async handler callback block.  These are used to add
71  * SIMs and peripherals to the async callback lists.
72  */
73 struct async_node {
74 	SLIST_ENTRY(async_node)	links;
75 	u_int32_t	event_enable;	/* Async Event enables */
76 	void		(*callback)(void *arg, u_int32_t code,
77 				    struct cam_path *path, void *args);
78 	void		*callback_arg;
79 };
80 
81 SLIST_HEAD(async_list, async_node);
82 SLIST_HEAD(periph_list, cam_periph);
83 static STAILQ_HEAD(highpowerlist, ccb_hdr) highpowerq;
84 
85 /*
86  * This is the maximum number of high powered commands (e.g. start unit)
87  * that can be outstanding at a particular time.
88  */
89 #ifndef CAM_MAX_HIGHPOWER
90 #define CAM_MAX_HIGHPOWER  4
91 #endif
92 
93 /* number of high powered commands that can go through right now */
94 static int num_highpower = CAM_MAX_HIGHPOWER;
95 
96 /*
97  * Structure for queueing a device in a run queue.
98  * There is one run queue for allocating new ccbs,
99  * and another for sending ccbs to the controller.
100  */
101 struct cam_ed_qinfo {
102 	cam_pinfo pinfo;
103 	struct	  cam_ed *device;
104 };
105 
106 /*
107  * The CAM EDT (Existing Device Table) contains the device information for
108  * all devices for all busses in the system.  The table contains a
109  * cam_ed structure for each device on the bus.
110  */
111 struct cam_ed {
112 	TAILQ_ENTRY(cam_ed) links;
113 	struct	cam_ed_qinfo alloc_ccb_entry;
114 	struct	cam_ed_qinfo send_ccb_entry;
115 	struct	cam_et	 *target;
116 	lun_id_t	 lun_id;
117 	struct	camq drvq;		/*
118 					 * Queue of type drivers wanting to do
119 					 * work on this device.
120 					 */
121 	struct	cam_ccbq ccbq;		/* Queue of pending ccbs */
122 	struct	async_list asyncs;	/* Async callback info for this B/T/L */
123 	struct	periph_list periphs;	/* All attached devices */
124 	u_int	generation;		/* Generation number */
125 	struct	cam_periph *owner;	/* Peripheral driver's ownership tag */
126 	struct	xpt_quirk_entry *quirk;	/* Oddities about this device */
127 					/* Storage for the inquiry data */
128 #ifdef CAM_NEW_TRAN_CODE
129 	cam_proto	 protocol;
130 	u_int		 protocol_version;
131 	cam_xport	 transport;
132 	u_int		 transport_version;
133 #endif /* CAM_NEW_TRAN_CODE */
134 	struct		 scsi_inquiry_data inq_data;
135 	u_int8_t	 inq_flags;	/*
136 					 * Current settings for inquiry flags.
137 					 * This allows us to override settings
138 					 * like disconnection and tagged
139 					 * queuing for a device.
140 					 */
141 	u_int8_t	 queue_flags;	/* Queue flags from the control page */
142 	u_int8_t	 serial_num_len;
143 	u_int8_t	*serial_num;
144 	u_int32_t	 qfrozen_cnt;
145 	u_int32_t	 flags;
146 #define CAM_DEV_UNCONFIGURED	 	0x01
147 #define CAM_DEV_REL_TIMEOUT_PENDING	0x02
148 #define CAM_DEV_REL_ON_COMPLETE		0x04
149 #define CAM_DEV_REL_ON_QUEUE_EMPTY	0x08
150 #define CAM_DEV_RESIZE_QUEUE_NEEDED	0x10
151 #define CAM_DEV_TAG_AFTER_COUNT		0x20
152 #define CAM_DEV_INQUIRY_DATA_VALID	0x40
153 	u_int32_t	 tag_delay_count;
154 #define	CAM_TAG_DELAY_COUNT		5
155 	u_int32_t	 refcount;
156 	struct		 callout_handle c_handle;
157 };
158 
159 /*
160  * Each target is represented by an ET (Existing Target).  These
161  * entries are created when a target is successfully probed with an
162  * identify, and removed when a device fails to respond after a number
163  * of retries, or a bus rescan finds the device missing.
164  */
165 struct cam_et {
166 	TAILQ_HEAD(, cam_ed) ed_entries;
167 	TAILQ_ENTRY(cam_et) links;
168 	struct	cam_eb	*bus;
169 	target_id_t	target_id;
170 	u_int32_t	refcount;
171 	u_int		generation;
172 	struct		timeval last_reset;
173 };
174 
175 /*
176  * Each bus is represented by an EB (Existing Bus).  These entries
177  * are created by calls to xpt_bus_register and deleted by calls to
178  * xpt_bus_deregister.
179  */
180 struct cam_eb {
181 	TAILQ_HEAD(, cam_et) et_entries;
182 	TAILQ_ENTRY(cam_eb)  links;
183 	path_id_t	     path_id;
184 	struct cam_sim	     *sim;
185 	struct timeval	     last_reset;
186 	u_int32_t	     flags;
187 #define	CAM_EB_RUNQ_SCHEDULED	0x01
188 	u_int32_t	     refcount;
189 	u_int		     generation;
190 };
191 
192 struct cam_path {
193 	struct cam_periph *periph;
194 	struct cam_eb	  *bus;
195 	struct cam_et	  *target;
196 	struct cam_ed	  *device;
197 };
198 
199 struct xpt_quirk_entry {
200 	struct scsi_inquiry_pattern inq_pat;
201 	u_int8_t quirks;
202 #define	CAM_QUIRK_NOLUNS	0x01
203 #define	CAM_QUIRK_NOSERIAL	0x02
204 #define	CAM_QUIRK_HILUNS	0x04
205 #define	CAM_QUIRK_NOHILUNS	0x08
206 	u_int mintags;
207 	u_int maxtags;
208 };
209 #define	CAM_SCSI2_MAXLUN	8
210 /*
211  * If we're not quirked to search <= the first 8 luns
212  * and we are either quirked to search above lun 8,
213  * or we're > SCSI-2, we can look for luns above lun 8.
214  */
215 #define	CAN_SRCH_HI(dv)					\
216   (((dv->quirk->quirks & CAM_QUIRK_NOHILUNS) == 0) 	\
217   && ((dv->quirk->quirks & CAM_QUIRK_HILUNS)		\
218   || SID_ANSI_REV(&dv->inq_data) > SCSI_REV_2))
219 
220 typedef enum {
221 	XPT_FLAG_OPEN		= 0x01
222 } xpt_flags;
223 
224 struct xpt_softc {
225 	xpt_flags	flags;
226 	u_int32_t	generation;
227 };
228 
229 static const char quantum[] = "QUANTUM";
230 static const char sony[] = "SONY";
231 static const char west_digital[] = "WDIGTL";
232 static const char samsung[] = "SAMSUNG";
233 static const char seagate[] = "SEAGATE";
234 static const char microp[] = "MICROP";
235 
236 static struct xpt_quirk_entry xpt_quirk_table[] =
237 {
238 	{
239 		/* Reports QUEUE FULL for temporary resource shortages */
240 		{ T_DIRECT, SIP_MEDIA_FIXED, quantum, "XP39100*", "*" },
241 		/*quirks*/0, /*mintags*/24, /*maxtags*/32
242 	},
243 	{
244 		/* Reports QUEUE FULL for temporary resource shortages */
245 		{ T_DIRECT, SIP_MEDIA_FIXED, quantum, "XP34550*", "*" },
246 		/*quirks*/0, /*mintags*/24, /*maxtags*/32
247 	},
248 	{
249 		/* Reports QUEUE FULL for temporary resource shortages */
250 		{ T_DIRECT, SIP_MEDIA_FIXED, quantum, "XP32275*", "*" },
251 		/*quirks*/0, /*mintags*/24, /*maxtags*/32
252 	},
253 	{
254 		/* Broken tagged queuing drive */
255 		{ T_DIRECT, SIP_MEDIA_FIXED, microp, "4421-07*", "*" },
256 		/*quirks*/0, /*mintags*/0, /*maxtags*/0
257 	},
258 	{
259 		/* Broken tagged queuing drive */
260 		{ T_DIRECT, SIP_MEDIA_FIXED, "HP", "C372*", "*" },
261 		/*quirks*/0, /*mintags*/0, /*maxtags*/0
262 	},
263 	{
264 		/* Broken tagged queuing drive */
265 		{ T_DIRECT, SIP_MEDIA_FIXED, microp, "3391*", "x43h" },
266 		/*quirks*/0, /*mintags*/0, /*maxtags*/0
267 	},
268 	{
269 		/*
270 		 * Unfortunately, the Quantum Atlas III has the same
271 		 * problem as the Atlas II drives above.
272 		 * Reported by: "Johan Granlund" <johan@granlund.nu>
273 		 *
274 		 * For future reference, the drive with the problem was:
275 		 * QUANTUM QM39100TD-SW N1B0
276 		 *
277 		 * It's possible that Quantum will fix the problem in later
278 		 * firmware revisions.  If that happens, the quirk entry
279 		 * will need to be made specific to the firmware revisions
280 		 * with the problem.
281 		 *
282 		 */
283 		/* Reports QUEUE FULL for temporary resource shortages */
284 		{ T_DIRECT, SIP_MEDIA_FIXED, quantum, "QM39100*", "*" },
285 		/*quirks*/0, /*mintags*/24, /*maxtags*/32
286 	},
287 	{
288 		/*
289 		 * 18 Gig Atlas III, same problem as the 9G version.
290 		 * Reported by: Andre Albsmeier
291 		 *		<andre.albsmeier@mchp.siemens.de>
292 		 *
293 		 * For future reference, the drive with the problem was:
294 		 * QUANTUM QM318000TD-S N491
295 		 */
296 		/* Reports QUEUE FULL for temporary resource shortages */
297 		{ T_DIRECT, SIP_MEDIA_FIXED, quantum, "QM318000*", "*" },
298 		/*quirks*/0, /*mintags*/24, /*maxtags*/32
299 	},
300 	{
301 		/*
302 		 * Broken tagged queuing drive
303 		 * Reported by: Bret Ford <bford@uop.cs.uop.edu>
304 		 *         and: Martin Renters <martin@tdc.on.ca>
305 		 */
306 		{ T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST410800*", "71*" },
307 		/*quirks*/0, /*mintags*/0, /*maxtags*/0
308 	},
309 		/*
310 		 * The Seagate Medalist Pro drives have very poor write
311 		 * performance with anything more than 2 tags.
312 		 *
313 		 * Reported by:  Paul van der Zwan <paulz@trantor.xs4all.nl>
314 		 * Drive:  <SEAGATE ST36530N 1444>
315 		 *
316 		 * Reported by:  Jeremy Lea <reg@shale.csir.co.za>
317 		 * Drive:  <SEAGATE ST34520W 1281>
318 		 *
319 		 * No one has actually reported that the 9G version
320 		 * (ST39140*) of the Medalist Pro has the same problem, but
321 		 * we're assuming that it does because the 4G and 6.5G
322 		 * versions of the drive are broken.
323 		 */
324 	{
325 		{ T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST34520*", "*"},
326 		/*quirks*/0, /*mintags*/2, /*maxtags*/2
327 	},
328 	{
329 		{ T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST36530*", "*"},
330 		/*quirks*/0, /*mintags*/2, /*maxtags*/2
331 	},
332 	{
333 		{ T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST39140*", "*"},
334 		/*quirks*/0, /*mintags*/2, /*maxtags*/2
335 	},
336 	{
337 		/*
338 		 * Slow when tagged queueing is enabled.  Write performance
339 		 * steadily drops off with more and more concurrent
340 		 * transactions.  Best sequential write performance with
341 		 * tagged queueing turned off and write caching turned on.
342 		 *
343 		 * PR:  kern/10398
344 		 * Submitted by:  Hideaki Okada <hokada@isl.melco.co.jp>
345 		 * Drive:  DCAS-34330 w/ "S65A" firmware.
346 		 *
347 		 * The drive with the problem had the "S65A" firmware
348 		 * revision, and has also been reported (by Stephen J.
349 		 * Roznowski <sjr@home.net>) for a drive with the "S61A"
350 		 * firmware revision.
351 		 *
352 		 * Although no one has reported problems with the 2 gig
353 		 * version of the DCAS drive, the assumption is that it
354 		 * has the same problems as the 4 gig version.  Therefore
355 		 * this quirk entries disables tagged queueing for all
356 		 * DCAS drives.
357 		 */
358 		{ T_DIRECT, SIP_MEDIA_FIXED, "IBM", "DCAS*", "*" },
359 		/*quirks*/0, /*mintags*/0, /*maxtags*/0
360 	},
361 	{
362 		/* Broken tagged queuing drive */
363 		{ T_DIRECT, SIP_MEDIA_REMOVABLE, "iomega", "jaz*", "*" },
364 		/*quirks*/0, /*mintags*/0, /*maxtags*/0
365 	},
366 	{
367 		/* Broken tagged queuing drive */
368 		{ T_DIRECT, SIP_MEDIA_FIXED, "CONNER", "CFP2107*", "*" },
369 		/*quirks*/0, /*mintags*/0, /*maxtags*/0
370 	},
371 	{
372 		/*
373 		 * Broken tagged queuing drive.
374 		 * Submitted by:
375 		 * NAKAJI Hiroyuki <nakaji@zeisei.dpri.kyoto-u.ac.jp>
376 		 * in PR kern/9535
377 		 */
378 		{ T_DIRECT, SIP_MEDIA_FIXED, samsung, "WN34324U*", "*" },
379 		/*quirks*/0, /*mintags*/0, /*maxtags*/0
380 	},
381         {
382 		/*
383 		 * Slow when tagged queueing is enabled. (1.5MB/sec versus
384 		 * 8MB/sec.)
385 		 * Submitted by: Andrew Gallatin <gallatin@cs.duke.edu>
386 		 * Best performance with these drives is achieved with
387 		 * tagged queueing turned off, and write caching turned on.
388 		 */
389 		{ T_DIRECT, SIP_MEDIA_FIXED, west_digital, "WDE*", "*" },
390 		/*quirks*/0, /*mintags*/0, /*maxtags*/0
391         },
392         {
393 		/*
394 		 * Slow when tagged queueing is enabled. (1.5MB/sec versus
395 		 * 8MB/sec.)
396 		 * Submitted by: Andrew Gallatin <gallatin@cs.duke.edu>
397 		 * Best performance with these drives is achieved with
398 		 * tagged queueing turned off, and write caching turned on.
399 		 */
400 		{ T_DIRECT, SIP_MEDIA_FIXED, west_digital, "ENTERPRISE", "*" },
401 		/*quirks*/0, /*mintags*/0, /*maxtags*/0
402         },
403 	{
404 		/*
405 		 * Doesn't handle queue full condition correctly,
406 		 * so we need to limit maxtags to what the device
407 		 * can handle instead of determining this automatically.
408 		 */
409 		{ T_DIRECT, SIP_MEDIA_FIXED, samsung, "WN321010S*", "*" },
410 		/*quirks*/0, /*mintags*/2, /*maxtags*/32
411 	},
412 	{
413 		/* Really only one LUN */
414 		{ T_ENCLOSURE, SIP_MEDIA_FIXED, "SUN", "SENA", "*" },
415 		CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
416 	},
417 	{
418 		/* I can't believe we need a quirk for DPT volumes. */
419 		{ T_ANY, SIP_MEDIA_FIXED|SIP_MEDIA_REMOVABLE, "DPT", "*", "*" },
420 		CAM_QUIRK_NOSERIAL|CAM_QUIRK_NOLUNS,
421 		/*mintags*/0, /*maxtags*/255
422 	},
423 	{
424 		/*
425 		 * Many Sony CDROM drives don't like multi-LUN probing.
426 		 */
427 		{ T_CDROM, SIP_MEDIA_REMOVABLE, sony, "CD-ROM CDU*", "*" },
428 		CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
429 	},
430 	{
431 		/*
432 		 * This drive doesn't like multiple LUN probing.
433 		 * Submitted by:  Parag Patel <parag@cgt.com>
434 		 */
435 		{ T_WORM, SIP_MEDIA_REMOVABLE, sony, "CD-R   CDU9*", "*" },
436 		CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
437 	},
438 	{
439 		{ T_WORM, SIP_MEDIA_REMOVABLE, "YAMAHA", "CDR100*", "*" },
440 		CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
441 	},
442 	{
443 		/*
444 		 * The 8200 doesn't like multi-lun probing, and probably
445 		 * don't like serial number requests either.
446 		 */
447 		{
448 			T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "EXABYTE",
449 			"EXB-8200*", "*"
450 		},
451 		CAM_QUIRK_NOSERIAL|CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
452 	},
453 	{
454 		/*
455 		 * Let's try the same as above, but for a drive that says
456 		 * it's an IPL-6860 but is actually an EXB 8200.
457 		 */
458 		{
459 			T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "EXABYTE",
460 			"IPL-6860*", "*"
461 		},
462 		CAM_QUIRK_NOSERIAL|CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
463 	},
464 	{
465 		/*
466 		 * These Hitachi drives don't like multi-lun probing.
467 		 * The PR submitter has a DK319H, but says that the Linux
468 		 * kernel has a similar work-around for the DK312 and DK314,
469 		 * so all DK31* drives are quirked here.
470 		 * PR:            misc/18793
471 		 * Submitted by:  Paul Haddad <paul@pth.com>
472 		 */
473 		{ T_DIRECT, SIP_MEDIA_FIXED, "HITACHI", "DK31*", "*" },
474 		CAM_QUIRK_NOLUNS, /*mintags*/2, /*maxtags*/255
475 	},
476 	{
477 		/*
478 		 * The Hitachi CJ series with J8A8 firmware apparantly has
479 		 * problems with tagged commands.
480 		 * PR: 23536
481 		 * Reported by: amagai@nue.org
482 		 */
483 		{ T_DIRECT, SIP_MEDIA_FIXED, "HITACHI", "DK32CJ*", "J8A8" },
484 		CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
485 	},
486 	{
487 		/*
488 		 * These are the large storage arrays.
489 		 * Submitted by:  William Carrel <william.carrel@infospace.com>
490 		 */
491 		{ T_DIRECT, SIP_MEDIA_FIXED, "HITACHI", "OPEN*", "*" },
492 		CAM_QUIRK_HILUNS, 2, 1024
493 	},
494 	{
495 		/*
496 		 * This old revision of the TDC3600 is also SCSI-1, and
497 		 * hangs upon serial number probing.
498 		 */
499 		{
500 			T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "TANDBERG",
501 			" TDC 3600", "U07:"
502 		},
503 		CAM_QUIRK_NOSERIAL, /*mintags*/0, /*maxtags*/0
504 	},
505 	{
506 		/*
507 		 * Maxtor Personal Storage 3000XT (Firewire)
508 		 * hangs upon serial number probing.
509 		 */
510 		{
511 			T_DIRECT, SIP_MEDIA_FIXED, "Maxtor",
512 			"1394 storage", "*"
513 		},
514 		CAM_QUIRK_NOSERIAL, /*mintags*/0, /*maxtags*/0
515 	},
516 	{
517 		/*
518 		 * Would repond to all LUNs if asked for.
519 		 */
520 		{
521 			T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "CALIPER",
522 			"CP150", "*"
523 		},
524 		CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
525 	},
526 	{
527 		/*
528 		 * Would repond to all LUNs if asked for.
529 		 */
530 		{
531 			T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "KENNEDY",
532 			"96X2*", "*"
533 		},
534 		CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
535 	},
536 	{
537 		/* Submitted by: Matthew Dodd <winter@jurai.net> */
538 		{ T_PROCESSOR, SIP_MEDIA_FIXED, "Cabletrn", "EA41*", "*" },
539 		CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
540 	},
541 	{
542 		/* Submitted by: Matthew Dodd <winter@jurai.net> */
543 		{ T_PROCESSOR, SIP_MEDIA_FIXED, "CABLETRN", "EA41*", "*" },
544 		CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
545 	},
546 	{
547 		/* TeraSolutions special settings for TRC-22 RAID */
548 		{ T_DIRECT, SIP_MEDIA_FIXED, "TERASOLU", "TRC-22", "*" },
549 		  /*quirks*/0, /*mintags*/55, /*maxtags*/255
550 	},
551 	{
552 		/* Veritas Storage Appliance */
553 		{ T_DIRECT, SIP_MEDIA_FIXED, "VERITAS", "*", "*" },
554 		  CAM_QUIRK_HILUNS, /*mintags*/2, /*maxtags*/1024
555 	},
556 	{
557 		/*
558 		 * Would respond to all LUNs.  Device type and removable
559 		 * flag are jumper-selectable.
560 		 */
561 		{ T_ANY, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED, "MaxOptix",
562 		  "Tahiti 1", "*"
563 		},
564 		CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
565 	},
566 	{
567 		/* EasyRAID E5A aka. areca ARC-6010 */
568 		{ T_DIRECT, SIP_MEDIA_FIXED, "easyRAID", "*", "*" },
569 		  CAM_QUIRK_NOHILUNS, /*mintags*/2, /*maxtags*/255
570 	},
571 	{
572 		/* Default tagged queuing parameters for all devices */
573 		{
574 		  T_ANY, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED,
575 		  /*vendor*/"*", /*product*/"*", /*revision*/"*"
576 		},
577 		/*quirks*/0, /*mintags*/2, /*maxtags*/255
578 	},
579 };
580 
581 static const int xpt_quirk_table_size =
582 	sizeof(xpt_quirk_table) / sizeof(*xpt_quirk_table);
583 
584 typedef enum {
585 	DM_RET_COPY		= 0x01,
586 	DM_RET_FLAG_MASK	= 0x0f,
587 	DM_RET_NONE		= 0x00,
588 	DM_RET_STOP		= 0x10,
589 	DM_RET_DESCEND		= 0x20,
590 	DM_RET_ERROR		= 0x30,
591 	DM_RET_ACTION_MASK	= 0xf0
592 } dev_match_ret;
593 
594 typedef enum {
595 	XPT_DEPTH_BUS,
596 	XPT_DEPTH_TARGET,
597 	XPT_DEPTH_DEVICE,
598 	XPT_DEPTH_PERIPH
599 } xpt_traverse_depth;
600 
601 struct xpt_traverse_config {
602 	xpt_traverse_depth	depth;
603 	void			*tr_func;
604 	void			*tr_arg;
605 };
606 
607 typedef	int	xpt_busfunc_t (struct cam_eb *bus, void *arg);
608 typedef	int	xpt_targetfunc_t (struct cam_et *target, void *arg);
609 typedef	int	xpt_devicefunc_t (struct cam_ed *device, void *arg);
610 typedef	int	xpt_periphfunc_t (struct cam_periph *periph, void *arg);
611 typedef int	xpt_pdrvfunc_t (struct periph_driver **pdrv, void *arg);
612 
613 /* Transport layer configuration information */
614 static struct xpt_softc xsoftc;
615 
616 /* Queues for our software interrupt handler */
617 typedef TAILQ_HEAD(cam_isrq, ccb_hdr) cam_isrq_t;
618 static cam_isrq_t cam_bioq;
619 static struct mtx cam_bioq_lock;
620 
621 /* "Pool" of inactive ccbs managed by xpt_alloc_ccb and xpt_free_ccb */
622 static SLIST_HEAD(,ccb_hdr) ccb_freeq;
623 static u_int xpt_max_ccbs;	/*
624 				 * Maximum size of ccb pool.  Modified as
625 				 * devices are added/removed or have their
626 				 * opening counts changed.
627 				 */
628 static u_int xpt_ccb_count;	/* Current count of allocated ccbs */
629 
630 struct cam_periph *xpt_periph;
631 
632 static periph_init_t xpt_periph_init;
633 
634 static periph_init_t probe_periph_init;
635 
636 static struct periph_driver xpt_driver =
637 {
638 	xpt_periph_init, "xpt",
639 	TAILQ_HEAD_INITIALIZER(xpt_driver.units)
640 };
641 
642 static struct periph_driver probe_driver =
643 {
644 	probe_periph_init, "probe",
645 	TAILQ_HEAD_INITIALIZER(probe_driver.units)
646 };
647 
648 PERIPHDRIVER_DECLARE(xpt, xpt_driver);
649 PERIPHDRIVER_DECLARE(probe, probe_driver);
650 
651 
652 static d_open_t xptopen;
653 static d_close_t xptclose;
654 static d_ioctl_t xptioctl;
655 
656 static struct cdevsw xpt_cdevsw = {
657 	.d_version =	D_VERSION,
658 	.d_flags =	D_NEEDGIANT,
659 	.d_open =	xptopen,
660 	.d_close =	xptclose,
661 	.d_ioctl =	xptioctl,
662 	.d_name =	"xpt",
663 };
664 
665 static struct intr_config_hook *xpt_config_hook;
666 
667 /* Registered busses */
668 static TAILQ_HEAD(,cam_eb) xpt_busses;
669 static u_int bus_generation;
670 
671 /* Storage for debugging datastructures */
672 #ifdef	CAMDEBUG
673 struct cam_path *cam_dpath;
674 u_int32_t cam_dflags;
675 u_int32_t cam_debug_delay;
676 #endif
677 
678 /* Pointers to software interrupt handlers */
679 static void *cambio_ih;
680 
681 #if defined(CAM_DEBUG_FLAGS) && !defined(CAMDEBUG)
682 #error "You must have options CAMDEBUG to use options CAM_DEBUG_FLAGS"
683 #endif
684 
685 /*
686  * In order to enable the CAM_DEBUG_* options, the user must have CAMDEBUG
687  * enabled.  Also, the user must have either none, or all of CAM_DEBUG_BUS,
688  * CAM_DEBUG_TARGET, and CAM_DEBUG_LUN specified.
689  */
690 #if defined(CAM_DEBUG_BUS) || defined(CAM_DEBUG_TARGET) \
691     || defined(CAM_DEBUG_LUN)
692 #ifdef CAMDEBUG
693 #if !defined(CAM_DEBUG_BUS) || !defined(CAM_DEBUG_TARGET) \
694     || !defined(CAM_DEBUG_LUN)
695 #error "You must define all or none of CAM_DEBUG_BUS, CAM_DEBUG_TARGET \
696         and CAM_DEBUG_LUN"
697 #endif /* !CAM_DEBUG_BUS || !CAM_DEBUG_TARGET || !CAM_DEBUG_LUN */
698 #else /* !CAMDEBUG */
699 #error "You must use options CAMDEBUG if you use the CAM_DEBUG_* options"
700 #endif /* CAMDEBUG */
701 #endif /* CAM_DEBUG_BUS || CAM_DEBUG_TARGET || CAM_DEBUG_LUN */
702 
703 /* Our boot-time initialization hook */
704 static int cam_module_event_handler(module_t, int /*modeventtype_t*/, void *);
705 
706 static moduledata_t cam_moduledata = {
707 	"cam",
708 	cam_module_event_handler,
709 	NULL
710 };
711 
712 static void	xpt_init(void *);
713 
714 DECLARE_MODULE(cam, cam_moduledata, SI_SUB_CONFIGURE, SI_ORDER_SECOND);
715 MODULE_VERSION(cam, 1);
716 
717 
718 static cam_status	xpt_compile_path(struct cam_path *new_path,
719 					 struct cam_periph *perph,
720 					 path_id_t path_id,
721 					 target_id_t target_id,
722 					 lun_id_t lun_id);
723 
724 static void		xpt_release_path(struct cam_path *path);
725 
726 static void		xpt_async_bcast(struct async_list *async_head,
727 					u_int32_t async_code,
728 					struct cam_path *path,
729 					void *async_arg);
730 static void		xpt_dev_async(u_int32_t async_code,
731 				      struct cam_eb *bus,
732 				      struct cam_et *target,
733 				      struct cam_ed *device,
734 				      void *async_arg);
735 static path_id_t xptnextfreepathid(void);
736 static path_id_t xptpathid(const char *sim_name, int sim_unit, int sim_bus);
737 static union ccb *xpt_get_ccb(struct cam_ed *device);
738 static int	 xpt_schedule_dev(struct camq *queue, cam_pinfo *dev_pinfo,
739 				  u_int32_t new_priority);
740 static void	 xpt_run_dev_allocq(struct cam_eb *bus);
741 static void	 xpt_run_dev_sendq(struct cam_eb *bus);
742 static timeout_t xpt_release_devq_timeout;
743 static timeout_t xpt_release_simq_timeout;
744 static void	 xpt_release_bus(struct cam_eb *bus);
745 static void	 xpt_release_devq_device(struct cam_ed *dev, u_int count,
746 					 int run_queue);
747 static struct cam_et*
748 		 xpt_alloc_target(struct cam_eb *bus, target_id_t target_id);
749 static void	 xpt_release_target(struct cam_eb *bus, struct cam_et *target);
750 static struct cam_ed*
751 		 xpt_alloc_device(struct cam_eb *bus, struct cam_et *target,
752 				  lun_id_t lun_id);
753 static void	 xpt_release_device(struct cam_eb *bus, struct cam_et *target,
754 				    struct cam_ed *device);
755 static u_int32_t xpt_dev_ccbq_resize(struct cam_path *path, int newopenings);
756 static struct cam_eb*
757 		 xpt_find_bus(path_id_t path_id);
758 static struct cam_et*
759 		 xpt_find_target(struct cam_eb *bus, target_id_t target_id);
760 static struct cam_ed*
761 		 xpt_find_device(struct cam_et *target, lun_id_t lun_id);
762 static void	 xpt_scan_bus(struct cam_periph *periph, union ccb *ccb);
763 static void	 xpt_scan_lun(struct cam_periph *periph,
764 			      struct cam_path *path, cam_flags flags,
765 			      union ccb *ccb);
766 static void	 xptscandone(struct cam_periph *periph, union ccb *done_ccb);
767 static xpt_busfunc_t	xptconfigbuscountfunc;
768 static xpt_busfunc_t	xptconfigfunc;
769 static void	 xpt_config(void *arg);
770 static xpt_devicefunc_t xptpassannouncefunc;
771 static void	 xpt_finishconfig(struct cam_periph *periph, union ccb *ccb);
772 static void	 xptaction(struct cam_sim *sim, union ccb *work_ccb);
773 static void	 xptpoll(struct cam_sim *sim);
774 static void	 camisr(void *);
775 #if 0
776 static void	 xptstart(struct cam_periph *periph, union ccb *work_ccb);
777 static void	 xptasync(struct cam_periph *periph,
778 			  u_int32_t code, cam_path *path);
779 #endif
780 static dev_match_ret	xptbusmatch(struct dev_match_pattern *patterns,
781 				    u_int num_patterns, struct cam_eb *bus);
782 static dev_match_ret	xptdevicematch(struct dev_match_pattern *patterns,
783 				       u_int num_patterns,
784 				       struct cam_ed *device);
785 static dev_match_ret	xptperiphmatch(struct dev_match_pattern *patterns,
786 				       u_int num_patterns,
787 				       struct cam_periph *periph);
788 static xpt_busfunc_t	xptedtbusfunc;
789 static xpt_targetfunc_t	xptedttargetfunc;
790 static xpt_devicefunc_t	xptedtdevicefunc;
791 static xpt_periphfunc_t	xptedtperiphfunc;
792 static xpt_pdrvfunc_t	xptplistpdrvfunc;
793 static xpt_periphfunc_t	xptplistperiphfunc;
794 static int		xptedtmatch(struct ccb_dev_match *cdm);
795 static int		xptperiphlistmatch(struct ccb_dev_match *cdm);
796 static int		xptbustraverse(struct cam_eb *start_bus,
797 				       xpt_busfunc_t *tr_func, void *arg);
798 static int		xpttargettraverse(struct cam_eb *bus,
799 					  struct cam_et *start_target,
800 					  xpt_targetfunc_t *tr_func, void *arg);
801 static int		xptdevicetraverse(struct cam_et *target,
802 					  struct cam_ed *start_device,
803 					  xpt_devicefunc_t *tr_func, void *arg);
804 static int		xptperiphtraverse(struct cam_ed *device,
805 					  struct cam_periph *start_periph,
806 					  xpt_periphfunc_t *tr_func, void *arg);
807 static int		xptpdrvtraverse(struct periph_driver **start_pdrv,
808 					xpt_pdrvfunc_t *tr_func, void *arg);
809 static int		xptpdperiphtraverse(struct periph_driver **pdrv,
810 					    struct cam_periph *start_periph,
811 					    xpt_periphfunc_t *tr_func,
812 					    void *arg);
813 static xpt_busfunc_t	xptdefbusfunc;
814 static xpt_targetfunc_t	xptdeftargetfunc;
815 static xpt_devicefunc_t	xptdefdevicefunc;
816 static xpt_periphfunc_t	xptdefperiphfunc;
817 static int		xpt_for_all_busses(xpt_busfunc_t *tr_func, void *arg);
818 #ifdef notusedyet
819 static int		xpt_for_all_targets(xpt_targetfunc_t *tr_func,
820 					    void *arg);
821 #endif
822 static int		xpt_for_all_devices(xpt_devicefunc_t *tr_func,
823 					    void *arg);
824 #ifdef notusedyet
825 static int		xpt_for_all_periphs(xpt_periphfunc_t *tr_func,
826 					    void *arg);
827 #endif
828 static xpt_devicefunc_t	xptsetasyncfunc;
829 static xpt_busfunc_t	xptsetasyncbusfunc;
830 static cam_status	xptregister(struct cam_periph *periph,
831 				    void *arg);
832 static cam_status	proberegister(struct cam_periph *periph,
833 				      void *arg);
834 static void	 probeschedule(struct cam_periph *probe_periph);
835 static void	 probestart(struct cam_periph *periph, union ccb *start_ccb);
836 static void	 proberequestdefaultnegotiation(struct cam_periph *periph);
837 static void	 probedone(struct cam_periph *periph, union ccb *done_ccb);
838 static void	 probecleanup(struct cam_periph *periph);
839 static void	 xpt_find_quirk(struct cam_ed *device);
840 #ifdef CAM_NEW_TRAN_CODE
841 static void	 xpt_devise_transport(struct cam_path *path);
842 #endif /* CAM_NEW_TRAN_CODE */
843 static void	 xpt_set_transfer_settings(struct ccb_trans_settings *cts,
844 					   struct cam_ed *device,
845 					   int async_update);
846 static void	 xpt_toggle_tags(struct cam_path *path);
847 static void	 xpt_start_tags(struct cam_path *path);
848 static __inline int xpt_schedule_dev_allocq(struct cam_eb *bus,
849 					    struct cam_ed *dev);
850 static __inline int xpt_schedule_dev_sendq(struct cam_eb *bus,
851 					   struct cam_ed *dev);
852 static __inline int periph_is_queued(struct cam_periph *periph);
853 static __inline int device_is_alloc_queued(struct cam_ed *device);
854 static __inline int device_is_send_queued(struct cam_ed *device);
855 static __inline int dev_allocq_is_runnable(struct cam_devq *devq);
856 
857 static __inline int
858 xpt_schedule_dev_allocq(struct cam_eb *bus, struct cam_ed *dev)
859 {
860 	int retval;
861 
862 	if (dev->ccbq.devq_openings > 0) {
863 		if ((dev->flags & CAM_DEV_RESIZE_QUEUE_NEEDED) != 0) {
864 			cam_ccbq_resize(&dev->ccbq,
865 					dev->ccbq.dev_openings
866 					+ dev->ccbq.dev_active);
867 			dev->flags &= ~CAM_DEV_RESIZE_QUEUE_NEEDED;
868 		}
869 		/*
870 		 * The priority of a device waiting for CCB resources
871 		 * is that of the the highest priority peripheral driver
872 		 * enqueued.
873 		 */
874 		retval = xpt_schedule_dev(&bus->sim->devq->alloc_queue,
875 					  &dev->alloc_ccb_entry.pinfo,
876 					  CAMQ_GET_HEAD(&dev->drvq)->priority);
877 	} else {
878 		retval = 0;
879 	}
880 
881 	return (retval);
882 }
883 
884 static __inline int
885 xpt_schedule_dev_sendq(struct cam_eb *bus, struct cam_ed *dev)
886 {
887 	int	retval;
888 
889 	if (dev->ccbq.dev_openings > 0) {
890 		/*
891 		 * The priority of a device waiting for controller
892 		 * resources is that of the the highest priority CCB
893 		 * enqueued.
894 		 */
895 		retval =
896 		    xpt_schedule_dev(&bus->sim->devq->send_queue,
897 				     &dev->send_ccb_entry.pinfo,
898 				     CAMQ_GET_HEAD(&dev->ccbq.queue)->priority);
899 	} else {
900 		retval = 0;
901 	}
902 	return (retval);
903 }
904 
905 static __inline int
906 periph_is_queued(struct cam_periph *periph)
907 {
908 	return (periph->pinfo.index != CAM_UNQUEUED_INDEX);
909 }
910 
911 static __inline int
912 device_is_alloc_queued(struct cam_ed *device)
913 {
914 	return (device->alloc_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX);
915 }
916 
917 static __inline int
918 device_is_send_queued(struct cam_ed *device)
919 {
920 	return (device->send_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX);
921 }
922 
923 static __inline int
924 dev_allocq_is_runnable(struct cam_devq *devq)
925 {
926 	/*
927 	 * Have work to do.
928 	 * Have space to do more work.
929 	 * Allowed to do work.
930 	 */
931 	return ((devq->alloc_queue.qfrozen_cnt == 0)
932 	     && (devq->alloc_queue.entries > 0)
933 	     && (devq->alloc_openings > 0));
934 }
935 
936 static void
937 xpt_periph_init()
938 {
939 	make_dev(&xpt_cdevsw, 0, UID_ROOT, GID_OPERATOR, 0600, "xpt0");
940 }
941 
942 static void
943 probe_periph_init()
944 {
945 }
946 
947 
948 static void
949 xptdone(struct cam_periph *periph, union ccb *done_ccb)
950 {
951 	/* Caller will release the CCB */
952 	wakeup(&done_ccb->ccb_h.cbfcnp);
953 }
954 
955 static int
956 xptopen(struct cdev *dev, int flags, int fmt, struct thread *td)
957 {
958 	int unit;
959 
960 	unit = minor(dev) & 0xff;
961 
962 	/*
963 	 * Only allow read-write access.
964 	 */
965 	if (((flags & FWRITE) == 0) || ((flags & FREAD) == 0))
966 		return(EPERM);
967 
968 	/*
969 	 * We don't allow nonblocking access.
970 	 */
971 	if ((flags & O_NONBLOCK) != 0) {
972 		printf("xpt%d: can't do nonblocking access\n", unit);
973 		return(ENODEV);
974 	}
975 
976 	/*
977 	 * We only have one transport layer right now.  If someone accesses
978 	 * us via something other than minor number 1, point out their
979 	 * mistake.
980 	 */
981 	if (unit != 0) {
982 		printf("xptopen: got invalid xpt unit %d\n", unit);
983 		return(ENXIO);
984 	}
985 
986 	/* Mark ourselves open */
987 	xsoftc.flags |= XPT_FLAG_OPEN;
988 
989 	return(0);
990 }
991 
992 static int
993 xptclose(struct cdev *dev, int flag, int fmt, struct thread *td)
994 {
995 	int unit;
996 
997 	unit = minor(dev) & 0xff;
998 
999 	/*
1000 	 * We only have one transport layer right now.  If someone accesses
1001 	 * us via something other than minor number 1, point out their
1002 	 * mistake.
1003 	 */
1004 	if (unit != 0) {
1005 		printf("xptclose: got invalid xpt unit %d\n", unit);
1006 		return(ENXIO);
1007 	}
1008 
1009 	/* Mark ourselves closed */
1010 	xsoftc.flags &= ~XPT_FLAG_OPEN;
1011 
1012 	return(0);
1013 }
1014 
1015 static int
1016 xptioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td)
1017 {
1018 	int unit, error;
1019 
1020 	error = 0;
1021 	unit = minor(dev) & 0xff;
1022 
1023 	/*
1024 	 * We only have one transport layer right now.  If someone accesses
1025 	 * us via something other than minor number 1, point out their
1026 	 * mistake.
1027 	 */
1028 	if (unit != 0) {
1029 		printf("xptioctl: got invalid xpt unit %d\n", unit);
1030 		return(ENXIO);
1031 	}
1032 
1033 	switch(cmd) {
1034 	/*
1035 	 * For the transport layer CAMIOCOMMAND ioctl, we really only want
1036 	 * to accept CCB types that don't quite make sense to send through a
1037 	 * passthrough driver. XPT_PATH_INQ is an exception to this, as stated
1038 	 * in the CAM spec.
1039 	 */
1040 	case CAMIOCOMMAND: {
1041 		union ccb *ccb;
1042 		union ccb *inccb;
1043 
1044 		inccb = (union ccb *)addr;
1045 
1046 		switch(inccb->ccb_h.func_code) {
1047 		case XPT_SCAN_BUS:
1048 		case XPT_RESET_BUS:
1049 			if ((inccb->ccb_h.target_id != CAM_TARGET_WILDCARD)
1050 			 || (inccb->ccb_h.target_lun != CAM_LUN_WILDCARD)) {
1051 				error = EINVAL;
1052 				break;
1053 			}
1054 			/* FALLTHROUGH */
1055 		case XPT_PATH_INQ:
1056 		case XPT_ENG_INQ:
1057 		case XPT_SCAN_LUN:
1058 
1059 			ccb = xpt_alloc_ccb();
1060 
1061 			/*
1062 			 * Create a path using the bus, target, and lun the
1063 			 * user passed in.
1064 			 */
1065 			if (xpt_create_path(&ccb->ccb_h.path, xpt_periph,
1066 					    inccb->ccb_h.path_id,
1067 					    inccb->ccb_h.target_id,
1068 					    inccb->ccb_h.target_lun) !=
1069 					    CAM_REQ_CMP){
1070 				error = EINVAL;
1071 				xpt_free_ccb(ccb);
1072 				break;
1073 			}
1074 			/* Ensure all of our fields are correct */
1075 			xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path,
1076 				      inccb->ccb_h.pinfo.priority);
1077 			xpt_merge_ccb(ccb, inccb);
1078 			ccb->ccb_h.cbfcnp = xptdone;
1079 			cam_periph_runccb(ccb, NULL, 0, 0, NULL);
1080 			bcopy(ccb, inccb, sizeof(union ccb));
1081 			xpt_free_path(ccb->ccb_h.path);
1082 			xpt_free_ccb(ccb);
1083 			break;
1084 
1085 		case XPT_DEBUG: {
1086 			union ccb ccb;
1087 
1088 			/*
1089 			 * This is an immediate CCB, so it's okay to
1090 			 * allocate it on the stack.
1091 			 */
1092 
1093 			/*
1094 			 * Create a path using the bus, target, and lun the
1095 			 * user passed in.
1096 			 */
1097 			if (xpt_create_path(&ccb.ccb_h.path, xpt_periph,
1098 					    inccb->ccb_h.path_id,
1099 					    inccb->ccb_h.target_id,
1100 					    inccb->ccb_h.target_lun) !=
1101 					    CAM_REQ_CMP){
1102 				error = EINVAL;
1103 				break;
1104 			}
1105 			/* Ensure all of our fields are correct */
1106 			xpt_setup_ccb(&ccb.ccb_h, ccb.ccb_h.path,
1107 				      inccb->ccb_h.pinfo.priority);
1108 			xpt_merge_ccb(&ccb, inccb);
1109 			ccb.ccb_h.cbfcnp = xptdone;
1110 			xpt_action(&ccb);
1111 			bcopy(&ccb, inccb, sizeof(union ccb));
1112 			xpt_free_path(ccb.ccb_h.path);
1113 			break;
1114 
1115 		}
1116 		case XPT_DEV_MATCH: {
1117 			struct cam_periph_map_info mapinfo;
1118 			struct cam_path *old_path;
1119 
1120 			/*
1121 			 * We can't deal with physical addresses for this
1122 			 * type of transaction.
1123 			 */
1124 			if (inccb->ccb_h.flags & CAM_DATA_PHYS) {
1125 				error = EINVAL;
1126 				break;
1127 			}
1128 
1129 			/*
1130 			 * Save this in case the caller had it set to
1131 			 * something in particular.
1132 			 */
1133 			old_path = inccb->ccb_h.path;
1134 
1135 			/*
1136 			 * We really don't need a path for the matching
1137 			 * code.  The path is needed because of the
1138 			 * debugging statements in xpt_action().  They
1139 			 * assume that the CCB has a valid path.
1140 			 */
1141 			inccb->ccb_h.path = xpt_periph->path;
1142 
1143 			bzero(&mapinfo, sizeof(mapinfo));
1144 
1145 			/*
1146 			 * Map the pattern and match buffers into kernel
1147 			 * virtual address space.
1148 			 */
1149 			error = cam_periph_mapmem(inccb, &mapinfo);
1150 
1151 			if (error) {
1152 				inccb->ccb_h.path = old_path;
1153 				break;
1154 			}
1155 
1156 			/*
1157 			 * This is an immediate CCB, we can send it on directly.
1158 			 */
1159 			xpt_action(inccb);
1160 
1161 			/*
1162 			 * Map the buffers back into user space.
1163 			 */
1164 			cam_periph_unmapmem(inccb, &mapinfo);
1165 
1166 			inccb->ccb_h.path = old_path;
1167 
1168 			error = 0;
1169 			break;
1170 		}
1171 		default:
1172 			error = ENOTSUP;
1173 			break;
1174 		}
1175 		break;
1176 	}
1177 	/*
1178 	 * This is the getpassthru ioctl. It takes a XPT_GDEVLIST ccb as input,
1179 	 * with the periphal driver name and unit name filled in.  The other
1180 	 * fields don't really matter as input.  The passthrough driver name
1181 	 * ("pass"), and unit number are passed back in the ccb.  The current
1182 	 * device generation number, and the index into the device peripheral
1183 	 * driver list, and the status are also passed back.  Note that
1184 	 * since we do everything in one pass, unlike the XPT_GDEVLIST ccb,
1185 	 * we never return a status of CAM_GDEVLIST_LIST_CHANGED.  It is
1186 	 * (or rather should be) impossible for the device peripheral driver
1187 	 * list to change since we look at the whole thing in one pass, and
1188 	 * we do it with splcam protection.
1189 	 *
1190 	 */
1191 	case CAMGETPASSTHRU: {
1192 		union ccb *ccb;
1193 		struct cam_periph *periph;
1194 		struct periph_driver **p_drv;
1195 		char   *name;
1196 		u_int unit;
1197 		u_int cur_generation;
1198 		int base_periph_found;
1199 		int splbreaknum;
1200 		int s;
1201 
1202 		ccb = (union ccb *)addr;
1203 		unit = ccb->cgdl.unit_number;
1204 		name = ccb->cgdl.periph_name;
1205 		/*
1206 		 * Every 100 devices, we want to drop our spl protection to
1207 		 * give the software interrupt handler a chance to run.
1208 		 * Most systems won't run into this check, but this should
1209 		 * avoid starvation in the software interrupt handler in
1210 		 * large systems.
1211 		 */
1212 		splbreaknum = 100;
1213 
1214 		ccb = (union ccb *)addr;
1215 
1216 		base_periph_found = 0;
1217 
1218 		/*
1219 		 * Sanity check -- make sure we don't get a null peripheral
1220 		 * driver name.
1221 		 */
1222 		if (*ccb->cgdl.periph_name == '\0') {
1223 			error = EINVAL;
1224 			break;
1225 		}
1226 
1227 		/* Keep the list from changing while we traverse it */
1228 		s = splcam();
1229 ptstartover:
1230 		cur_generation = xsoftc.generation;
1231 
1232 		/* first find our driver in the list of drivers */
1233 		for (p_drv = periph_drivers; *p_drv != NULL; p_drv++)
1234 			if (strcmp((*p_drv)->driver_name, name) == 0)
1235 				break;
1236 
1237 		if (*p_drv == NULL) {
1238 			splx(s);
1239 			ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1240 			ccb->cgdl.status = CAM_GDEVLIST_ERROR;
1241 			*ccb->cgdl.periph_name = '\0';
1242 			ccb->cgdl.unit_number = 0;
1243 			error = ENOENT;
1244 			break;
1245 		}
1246 
1247 		/*
1248 		 * Run through every peripheral instance of this driver
1249 		 * and check to see whether it matches the unit passed
1250 		 * in by the user.  If it does, get out of the loops and
1251 		 * find the passthrough driver associated with that
1252 		 * peripheral driver.
1253 		 */
1254 		for (periph = TAILQ_FIRST(&(*p_drv)->units); periph != NULL;
1255 		     periph = TAILQ_NEXT(periph, unit_links)) {
1256 
1257 			if (periph->unit_number == unit) {
1258 				break;
1259 			} else if (--splbreaknum == 0) {
1260 				splx(s);
1261 				s = splcam();
1262 				splbreaknum = 100;
1263 				if (cur_generation != xsoftc.generation)
1264 				       goto ptstartover;
1265 			}
1266 		}
1267 		/*
1268 		 * If we found the peripheral driver that the user passed
1269 		 * in, go through all of the peripheral drivers for that
1270 		 * particular device and look for a passthrough driver.
1271 		 */
1272 		if (periph != NULL) {
1273 			struct cam_ed *device;
1274 			int i;
1275 
1276 			base_periph_found = 1;
1277 			device = periph->path->device;
1278 			for (i = 0, periph = SLIST_FIRST(&device->periphs);
1279 			     periph != NULL;
1280 			     periph = SLIST_NEXT(periph, periph_links), i++) {
1281 				/*
1282 				 * Check to see whether we have a
1283 				 * passthrough device or not.
1284 				 */
1285 				if (strcmp(periph->periph_name, "pass") == 0) {
1286 					/*
1287 					 * Fill in the getdevlist fields.
1288 					 */
1289 					strcpy(ccb->cgdl.periph_name,
1290 					       periph->periph_name);
1291 					ccb->cgdl.unit_number =
1292 						periph->unit_number;
1293 					if (SLIST_NEXT(periph, periph_links))
1294 						ccb->cgdl.status =
1295 							CAM_GDEVLIST_MORE_DEVS;
1296 					else
1297 						ccb->cgdl.status =
1298 						       CAM_GDEVLIST_LAST_DEVICE;
1299 					ccb->cgdl.generation =
1300 						device->generation;
1301 					ccb->cgdl.index = i;
1302 					/*
1303 					 * Fill in some CCB header fields
1304 					 * that the user may want.
1305 					 */
1306 					ccb->ccb_h.path_id =
1307 						periph->path->bus->path_id;
1308 					ccb->ccb_h.target_id =
1309 						periph->path->target->target_id;
1310 					ccb->ccb_h.target_lun =
1311 						periph->path->device->lun_id;
1312 					ccb->ccb_h.status = CAM_REQ_CMP;
1313 					break;
1314 				}
1315 			}
1316 		}
1317 
1318 		/*
1319 		 * If the periph is null here, one of two things has
1320 		 * happened.  The first possibility is that we couldn't
1321 		 * find the unit number of the particular peripheral driver
1322 		 * that the user is asking about.  e.g. the user asks for
1323 		 * the passthrough driver for "da11".  We find the list of
1324 		 * "da" peripherals all right, but there is no unit 11.
1325 		 * The other possibility is that we went through the list
1326 		 * of peripheral drivers attached to the device structure,
1327 		 * but didn't find one with the name "pass".  Either way,
1328 		 * we return ENOENT, since we couldn't find something.
1329 		 */
1330 		if (periph == NULL) {
1331 			ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1332 			ccb->cgdl.status = CAM_GDEVLIST_ERROR;
1333 			*ccb->cgdl.periph_name = '\0';
1334 			ccb->cgdl.unit_number = 0;
1335 			error = ENOENT;
1336 			/*
1337 			 * It is unfortunate that this is even necessary,
1338 			 * but there are many, many clueless users out there.
1339 			 * If this is true, the user is looking for the
1340 			 * passthrough driver, but doesn't have one in his
1341 			 * kernel.
1342 			 */
1343 			if (base_periph_found == 1) {
1344 				printf("xptioctl: pass driver is not in the "
1345 				       "kernel\n");
1346 				printf("xptioctl: put \"device pass0\" in "
1347 				       "your kernel config file\n");
1348 			}
1349 		}
1350 		splx(s);
1351 		break;
1352 		}
1353 	default:
1354 		error = ENOTTY;
1355 		break;
1356 	}
1357 
1358 	return(error);
1359 }
1360 
1361 static int
1362 cam_module_event_handler(module_t mod, int what, void *arg)
1363 {
1364 	if (what == MOD_LOAD) {
1365 		xpt_init(NULL);
1366 	} else if (what == MOD_UNLOAD) {
1367 		return EBUSY;
1368 	} else {
1369 		return EOPNOTSUPP;
1370 	}
1371 
1372 	return 0;
1373 }
1374 
1375 /* Functions accessed by the peripheral drivers */
1376 static void
1377 xpt_init(dummy)
1378 	void *dummy;
1379 {
1380 	struct cam_sim *xpt_sim;
1381 	struct cam_path *path;
1382 	struct cam_devq *devq;
1383 	cam_status status;
1384 
1385 	TAILQ_INIT(&xpt_busses);
1386 	TAILQ_INIT(&cam_bioq);
1387 	SLIST_INIT(&ccb_freeq);
1388 	STAILQ_INIT(&highpowerq);
1389 
1390 	mtx_init(&cam_bioq_lock, "CAM BIOQ lock", NULL, MTX_DEF);
1391 
1392 	/*
1393 	 * The xpt layer is, itself, the equivelent of a SIM.
1394 	 * Allow 16 ccbs in the ccb pool for it.  This should
1395 	 * give decent parallelism when we probe busses and
1396 	 * perform other XPT functions.
1397 	 */
1398 	devq = cam_simq_alloc(16);
1399 	xpt_sim = cam_sim_alloc(xptaction,
1400 				xptpoll,
1401 				"xpt",
1402 				/*softc*/NULL,
1403 				/*unit*/0,
1404 				/*max_dev_transactions*/0,
1405 				/*max_tagged_dev_transactions*/0,
1406 				devq);
1407 	xpt_max_ccbs = 16;
1408 
1409 	xpt_bus_register(xpt_sim, /*bus #*/0);
1410 
1411 	/*
1412 	 * Looking at the XPT from the SIM layer, the XPT is
1413 	 * the equivelent of a peripheral driver.  Allocate
1414 	 * a peripheral driver entry for us.
1415 	 */
1416 	if ((status = xpt_create_path(&path, NULL, CAM_XPT_PATH_ID,
1417 				      CAM_TARGET_WILDCARD,
1418 				      CAM_LUN_WILDCARD)) != CAM_REQ_CMP) {
1419 		printf("xpt_init: xpt_create_path failed with status %#x,"
1420 		       " failing attach\n", status);
1421 		return;
1422 	}
1423 
1424 	cam_periph_alloc(xptregister, NULL, NULL, NULL, "xpt", CAM_PERIPH_BIO,
1425 			 path, NULL, 0, NULL);
1426 	xpt_free_path(path);
1427 
1428 	xpt_sim->softc = xpt_periph;
1429 
1430 	/*
1431 	 * Register a callback for when interrupts are enabled.
1432 	 */
1433 	xpt_config_hook =
1434 	    (struct intr_config_hook *)malloc(sizeof(struct intr_config_hook),
1435 					      M_TEMP, M_NOWAIT | M_ZERO);
1436 	if (xpt_config_hook == NULL) {
1437 		printf("xpt_init: Cannot malloc config hook "
1438 		       "- failing attach\n");
1439 		return;
1440 	}
1441 
1442 	xpt_config_hook->ich_func = xpt_config;
1443 	if (config_intrhook_establish(xpt_config_hook) != 0) {
1444 		free (xpt_config_hook, M_TEMP);
1445 		printf("xpt_init: config_intrhook_establish failed "
1446 		       "- failing attach\n");
1447 	}
1448 
1449 	/* Install our software interrupt handlers */
1450 	swi_add(NULL, "cambio", camisr, &cam_bioq, SWI_CAMBIO, 0, &cambio_ih);
1451 }
1452 
1453 static cam_status
1454 xptregister(struct cam_periph *periph, void *arg)
1455 {
1456 	if (periph == NULL) {
1457 		printf("xptregister: periph was NULL!!\n");
1458 		return(CAM_REQ_CMP_ERR);
1459 	}
1460 
1461 	periph->softc = NULL;
1462 
1463 	xpt_periph = periph;
1464 
1465 	return(CAM_REQ_CMP);
1466 }
1467 
1468 int32_t
1469 xpt_add_periph(struct cam_periph *periph)
1470 {
1471 	struct cam_ed *device;
1472 	int32_t	 status;
1473 	struct periph_list *periph_head;
1474 
1475 	GIANT_REQUIRED;
1476 
1477 	device = periph->path->device;
1478 
1479 	periph_head = &device->periphs;
1480 
1481 	status = CAM_REQ_CMP;
1482 
1483 	if (device != NULL) {
1484 		int s;
1485 
1486 		/*
1487 		 * Make room for this peripheral
1488 		 * so it will fit in the queue
1489 		 * when it's scheduled to run
1490 		 */
1491 		s = splsoftcam();
1492 		status = camq_resize(&device->drvq,
1493 				     device->drvq.array_size + 1);
1494 
1495 		device->generation++;
1496 
1497 		SLIST_INSERT_HEAD(periph_head, periph, periph_links);
1498 
1499 		splx(s);
1500 	}
1501 
1502 	xsoftc.generation++;
1503 
1504 	return (status);
1505 }
1506 
1507 void
1508 xpt_remove_periph(struct cam_periph *periph)
1509 {
1510 	struct cam_ed *device;
1511 
1512 	GIANT_REQUIRED;
1513 
1514 	device = periph->path->device;
1515 
1516 	if (device != NULL) {
1517 		int s;
1518 		struct periph_list *periph_head;
1519 
1520 		periph_head = &device->periphs;
1521 
1522 		/* Release the slot for this peripheral */
1523 		s = splsoftcam();
1524 		camq_resize(&device->drvq, device->drvq.array_size - 1);
1525 
1526 		device->generation++;
1527 
1528 		SLIST_REMOVE(periph_head, periph, cam_periph, periph_links);
1529 
1530 		splx(s);
1531 	}
1532 
1533 	xsoftc.generation++;
1534 
1535 }
1536 
1537 #ifdef CAM_NEW_TRAN_CODE
1538 
1539 void
1540 xpt_announce_periph(struct cam_periph *periph, char *announce_string)
1541 {
1542 	struct	ccb_pathinq cpi;
1543 	struct	ccb_trans_settings cts;
1544 	struct	cam_path *path;
1545 	u_int	speed;
1546 	u_int	freq;
1547 	u_int	mb;
1548 	int	s;
1549 
1550 	GIANT_REQUIRED;
1551 
1552 	path = periph->path;
1553 	/*
1554 	 * To ensure that this is printed in one piece,
1555 	 * mask out CAM interrupts.
1556 	 */
1557 	s = splsoftcam();
1558 	printf("%s%d at %s%d bus %d target %d lun %d\n",
1559 	       periph->periph_name, periph->unit_number,
1560 	       path->bus->sim->sim_name,
1561 	       path->bus->sim->unit_number,
1562 	       path->bus->sim->bus_id,
1563 	       path->target->target_id,
1564 	       path->device->lun_id);
1565 	printf("%s%d: ", periph->periph_name, periph->unit_number);
1566 	scsi_print_inquiry(&path->device->inq_data);
1567 	if (bootverbose && path->device->serial_num_len > 0) {
1568 		/* Don't wrap the screen  - print only the first 60 chars */
1569 		printf("%s%d: Serial Number %.60s\n", periph->periph_name,
1570 		       periph->unit_number, path->device->serial_num);
1571 	}
1572 	xpt_setup_ccb(&cts.ccb_h, path, /*priority*/1);
1573 	cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS;
1574 	cts.type = CTS_TYPE_CURRENT_SETTINGS;
1575 	xpt_action((union ccb*)&cts);
1576 
1577 	/* Ask the SIM for its base transfer speed */
1578 	xpt_setup_ccb(&cpi.ccb_h, path, /*priority*/1);
1579 	cpi.ccb_h.func_code = XPT_PATH_INQ;
1580 	xpt_action((union ccb *)&cpi);
1581 
1582 	speed = cpi.base_transfer_speed;
1583 	freq = 0;
1584 	if (cts.ccb_h.status == CAM_REQ_CMP && cts.transport == XPORT_SPI) {
1585 		struct	ccb_trans_settings_spi *spi;
1586 
1587 		spi = &cts.xport_specific.spi;
1588 		if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) != 0
1589 		  && spi->sync_offset != 0) {
1590 			freq = scsi_calc_syncsrate(spi->sync_period);
1591 			speed = freq;
1592 		}
1593 
1594 		if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0)
1595 			speed *= (0x01 << spi->bus_width);
1596 	}
1597 
1598 	if (cts.ccb_h.status == CAM_REQ_CMP && cts.transport == XPORT_FC) {
1599 		struct	ccb_trans_settings_fc *fc = &cts.xport_specific.fc;
1600 		if (fc->valid & CTS_FC_VALID_SPEED) {
1601 			speed = fc->bitrate;
1602 		}
1603 	}
1604 
1605 	mb = speed / 1000;
1606 	if (mb > 0)
1607 		printf("%s%d: %d.%03dMB/s transfers",
1608 		       periph->periph_name, periph->unit_number,
1609 		       mb, speed % 1000);
1610 	else
1611 		printf("%s%d: %dKB/s transfers", periph->periph_name,
1612 		       periph->unit_number, speed);
1613 	/* Report additional information about SPI connections */
1614 	if (cts.ccb_h.status == CAM_REQ_CMP && cts.transport == XPORT_SPI) {
1615 		struct	ccb_trans_settings_spi *spi;
1616 
1617 		spi = &cts.xport_specific.spi;
1618 		if (freq != 0) {
1619 			printf(" (%d.%03dMHz%s, offset %d", freq / 1000,
1620 			       freq % 1000,
1621 			       (spi->ppr_options & MSG_EXT_PPR_DT_REQ) != 0
1622 			     ? " DT" : "",
1623 			       spi->sync_offset);
1624 		}
1625 		if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0
1626 		 && spi->bus_width > 0) {
1627 			if (freq != 0) {
1628 				printf(", ");
1629 			} else {
1630 				printf(" (");
1631 			}
1632 			printf("%dbit)", 8 * (0x01 << spi->bus_width));
1633 		} else if (freq != 0) {
1634 			printf(")");
1635 		}
1636 	}
1637 	if (cts.ccb_h.status == CAM_REQ_CMP && cts.transport == XPORT_FC) {
1638 		struct	ccb_trans_settings_fc *fc;
1639 
1640 		fc = &cts.xport_specific.fc;
1641 		if (fc->valid & CTS_FC_VALID_WWNN)
1642 			printf(" WWNN 0x%llx", (long long) fc->wwnn);
1643 		if (fc->valid & CTS_FC_VALID_WWPN)
1644 			printf(" WWPN 0x%llx", (long long) fc->wwpn);
1645 		if (fc->valid & CTS_FC_VALID_PORT)
1646 			printf(" PortID 0x%x", fc->port);
1647 	}
1648 
1649 	if (path->device->inq_flags & SID_CmdQue
1650 	 || path->device->flags & CAM_DEV_TAG_AFTER_COUNT) {
1651 		printf("\n%s%d: Tagged Queueing Enabled",
1652 		       periph->periph_name, periph->unit_number);
1653 	}
1654 	printf("\n");
1655 
1656 	/*
1657 	 * We only want to print the caller's announce string if they've
1658 	 * passed one in..
1659 	 */
1660 	if (announce_string != NULL)
1661 		printf("%s%d: %s\n", periph->periph_name,
1662 		       periph->unit_number, announce_string);
1663 	splx(s);
1664 }
1665 #else /* CAM_NEW_TRAN_CODE */
1666 void
1667 xpt_announce_periph(struct cam_periph *periph, char *announce_string)
1668 {
1669 	int s;
1670 	u_int mb;
1671 	struct cam_path *path;
1672 	struct ccb_trans_settings cts;
1673 
1674 	GIANT_REQUIRED;
1675 
1676 	path = periph->path;
1677 	/*
1678 	 * To ensure that this is printed in one piece,
1679 	 * mask out CAM interrupts.
1680 	 */
1681 	s = splsoftcam();
1682 	printf("%s%d at %s%d bus %d target %d lun %d\n",
1683 	       periph->periph_name, periph->unit_number,
1684 	       path->bus->sim->sim_name,
1685 	       path->bus->sim->unit_number,
1686 	       path->bus->sim->bus_id,
1687 	       path->target->target_id,
1688 	       path->device->lun_id);
1689 	printf("%s%d: ", periph->periph_name, periph->unit_number);
1690 	scsi_print_inquiry(&path->device->inq_data);
1691 	if ((bootverbose)
1692 	 && (path->device->serial_num_len > 0)) {
1693 		/* Don't wrap the screen  - print only the first 60 chars */
1694 		printf("%s%d: Serial Number %.60s\n", periph->periph_name,
1695 		       periph->unit_number, path->device->serial_num);
1696 	}
1697 	xpt_setup_ccb(&cts.ccb_h, path, /*priority*/1);
1698 	cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS;
1699 	cts.flags = CCB_TRANS_CURRENT_SETTINGS;
1700 	xpt_action((union ccb*)&cts);
1701 	if (cts.ccb_h.status == CAM_REQ_CMP) {
1702 		u_int speed;
1703 		u_int freq;
1704 
1705 		if ((cts.valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0
1706 		  && cts.sync_offset != 0) {
1707 			freq = scsi_calc_syncsrate(cts.sync_period);
1708 			speed = freq;
1709 		} else {
1710 			struct ccb_pathinq cpi;
1711 
1712 			/* Ask the SIM for its base transfer speed */
1713 			xpt_setup_ccb(&cpi.ccb_h, path, /*priority*/1);
1714 			cpi.ccb_h.func_code = XPT_PATH_INQ;
1715 			xpt_action((union ccb *)&cpi);
1716 
1717 			speed = cpi.base_transfer_speed;
1718 			freq = 0;
1719 		}
1720 		if ((cts.valid & CCB_TRANS_BUS_WIDTH_VALID) != 0)
1721 			speed *= (0x01 << cts.bus_width);
1722 		mb = speed / 1000;
1723 		if (mb > 0)
1724 			printf("%s%d: %d.%03dMB/s transfers",
1725 			       periph->periph_name, periph->unit_number,
1726 			       mb, speed % 1000);
1727 		else
1728 			printf("%s%d: %dKB/s transfers", periph->periph_name,
1729 			       periph->unit_number, speed);
1730 		if ((cts.valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0
1731 		 && cts.sync_offset != 0) {
1732 			printf(" (%d.%03dMHz, offset %d", freq / 1000,
1733 			       freq % 1000, cts.sync_offset);
1734 		}
1735 		if ((cts.valid & CCB_TRANS_BUS_WIDTH_VALID) != 0
1736 		 && cts.bus_width > 0) {
1737 			if ((cts.valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0
1738 			 && cts.sync_offset != 0) {
1739 				printf(", ");
1740 			} else {
1741 				printf(" (");
1742 			}
1743 			printf("%dbit)", 8 * (0x01 << cts.bus_width));
1744 		} else if ((cts.valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0
1745 			&& cts.sync_offset != 0) {
1746 			printf(")");
1747 		}
1748 
1749 		if (path->device->inq_flags & SID_CmdQue
1750 		 || path->device->flags & CAM_DEV_TAG_AFTER_COUNT) {
1751 			printf(", Tagged Queueing Enabled");
1752 		}
1753 
1754 		printf("\n");
1755 	} else if (path->device->inq_flags & SID_CmdQue
1756    		|| path->device->flags & CAM_DEV_TAG_AFTER_COUNT) {
1757 		printf("%s%d: Tagged Queueing Enabled\n",
1758 		       periph->periph_name, periph->unit_number);
1759 	}
1760 
1761 	/*
1762 	 * We only want to print the caller's announce string if they've
1763 	 * passed one in..
1764 	 */
1765 	if (announce_string != NULL)
1766 		printf("%s%d: %s\n", periph->periph_name,
1767 		       periph->unit_number, announce_string);
1768 	splx(s);
1769 }
1770 
1771 #endif /* CAM_NEW_TRAN_CODE */
1772 
1773 static dev_match_ret
1774 xptbusmatch(struct dev_match_pattern *patterns, u_int num_patterns,
1775 	    struct cam_eb *bus)
1776 {
1777 	dev_match_ret retval;
1778 	int i;
1779 
1780 	retval = DM_RET_NONE;
1781 
1782 	/*
1783 	 * If we aren't given something to match against, that's an error.
1784 	 */
1785 	if (bus == NULL)
1786 		return(DM_RET_ERROR);
1787 
1788 	/*
1789 	 * If there are no match entries, then this bus matches no
1790 	 * matter what.
1791 	 */
1792 	if ((patterns == NULL) || (num_patterns == 0))
1793 		return(DM_RET_DESCEND | DM_RET_COPY);
1794 
1795 	for (i = 0; i < num_patterns; i++) {
1796 		struct bus_match_pattern *cur_pattern;
1797 
1798 		/*
1799 		 * If the pattern in question isn't for a bus node, we
1800 		 * aren't interested.  However, we do indicate to the
1801 		 * calling routine that we should continue descending the
1802 		 * tree, since the user wants to match against lower-level
1803 		 * EDT elements.
1804 		 */
1805 		if (patterns[i].type != DEV_MATCH_BUS) {
1806 			if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
1807 				retval |= DM_RET_DESCEND;
1808 			continue;
1809 		}
1810 
1811 		cur_pattern = &patterns[i].pattern.bus_pattern;
1812 
1813 		/*
1814 		 * If they want to match any bus node, we give them any
1815 		 * device node.
1816 		 */
1817 		if (cur_pattern->flags == BUS_MATCH_ANY) {
1818 			/* set the copy flag */
1819 			retval |= DM_RET_COPY;
1820 
1821 			/*
1822 			 * If we've already decided on an action, go ahead
1823 			 * and return.
1824 			 */
1825 			if ((retval & DM_RET_ACTION_MASK) != DM_RET_NONE)
1826 				return(retval);
1827 		}
1828 
1829 		/*
1830 		 * Not sure why someone would do this...
1831 		 */
1832 		if (cur_pattern->flags == BUS_MATCH_NONE)
1833 			continue;
1834 
1835 		if (((cur_pattern->flags & BUS_MATCH_PATH) != 0)
1836 		 && (cur_pattern->path_id != bus->path_id))
1837 			continue;
1838 
1839 		if (((cur_pattern->flags & BUS_MATCH_BUS_ID) != 0)
1840 		 && (cur_pattern->bus_id != bus->sim->bus_id))
1841 			continue;
1842 
1843 		if (((cur_pattern->flags & BUS_MATCH_UNIT) != 0)
1844 		 && (cur_pattern->unit_number != bus->sim->unit_number))
1845 			continue;
1846 
1847 		if (((cur_pattern->flags & BUS_MATCH_NAME) != 0)
1848 		 && (strncmp(cur_pattern->dev_name, bus->sim->sim_name,
1849 			     DEV_IDLEN) != 0))
1850 			continue;
1851 
1852 		/*
1853 		 * If we get to this point, the user definitely wants
1854 		 * information on this bus.  So tell the caller to copy the
1855 		 * data out.
1856 		 */
1857 		retval |= DM_RET_COPY;
1858 
1859 		/*
1860 		 * If the return action has been set to descend, then we
1861 		 * know that we've already seen a non-bus matching
1862 		 * expression, therefore we need to further descend the tree.
1863 		 * This won't change by continuing around the loop, so we
1864 		 * go ahead and return.  If we haven't seen a non-bus
1865 		 * matching expression, we keep going around the loop until
1866 		 * we exhaust the matching expressions.  We'll set the stop
1867 		 * flag once we fall out of the loop.
1868 		 */
1869 		if ((retval & DM_RET_ACTION_MASK) == DM_RET_DESCEND)
1870 			return(retval);
1871 	}
1872 
1873 	/*
1874 	 * If the return action hasn't been set to descend yet, that means
1875 	 * we haven't seen anything other than bus matching patterns.  So
1876 	 * tell the caller to stop descending the tree -- the user doesn't
1877 	 * want to match against lower level tree elements.
1878 	 */
1879 	if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
1880 		retval |= DM_RET_STOP;
1881 
1882 	return(retval);
1883 }
1884 
1885 static dev_match_ret
1886 xptdevicematch(struct dev_match_pattern *patterns, u_int num_patterns,
1887 	       struct cam_ed *device)
1888 {
1889 	dev_match_ret retval;
1890 	int i;
1891 
1892 	retval = DM_RET_NONE;
1893 
1894 	/*
1895 	 * If we aren't given something to match against, that's an error.
1896 	 */
1897 	if (device == NULL)
1898 		return(DM_RET_ERROR);
1899 
1900 	/*
1901 	 * If there are no match entries, then this device matches no
1902 	 * matter what.
1903 	 */
1904 	if ((patterns == NULL) || (num_patterns == 0))
1905 		return(DM_RET_DESCEND | DM_RET_COPY);
1906 
1907 	for (i = 0; i < num_patterns; i++) {
1908 		struct device_match_pattern *cur_pattern;
1909 
1910 		/*
1911 		 * If the pattern in question isn't for a device node, we
1912 		 * aren't interested.
1913 		 */
1914 		if (patterns[i].type != DEV_MATCH_DEVICE) {
1915 			if ((patterns[i].type == DEV_MATCH_PERIPH)
1916 			 && ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE))
1917 				retval |= DM_RET_DESCEND;
1918 			continue;
1919 		}
1920 
1921 		cur_pattern = &patterns[i].pattern.device_pattern;
1922 
1923 		/*
1924 		 * If they want to match any device node, we give them any
1925 		 * device node.
1926 		 */
1927 		if (cur_pattern->flags == DEV_MATCH_ANY) {
1928 			/* set the copy flag */
1929 			retval |= DM_RET_COPY;
1930 
1931 
1932 			/*
1933 			 * If we've already decided on an action, go ahead
1934 			 * and return.
1935 			 */
1936 			if ((retval & DM_RET_ACTION_MASK) != DM_RET_NONE)
1937 				return(retval);
1938 		}
1939 
1940 		/*
1941 		 * Not sure why someone would do this...
1942 		 */
1943 		if (cur_pattern->flags == DEV_MATCH_NONE)
1944 			continue;
1945 
1946 		if (((cur_pattern->flags & DEV_MATCH_PATH) != 0)
1947 		 && (cur_pattern->path_id != device->target->bus->path_id))
1948 			continue;
1949 
1950 		if (((cur_pattern->flags & DEV_MATCH_TARGET) != 0)
1951 		 && (cur_pattern->target_id != device->target->target_id))
1952 			continue;
1953 
1954 		if (((cur_pattern->flags & DEV_MATCH_LUN) != 0)
1955 		 && (cur_pattern->target_lun != device->lun_id))
1956 			continue;
1957 
1958 		if (((cur_pattern->flags & DEV_MATCH_INQUIRY) != 0)
1959 		 && (cam_quirkmatch((caddr_t)&device->inq_data,
1960 				    (caddr_t)&cur_pattern->inq_pat,
1961 				    1, sizeof(cur_pattern->inq_pat),
1962 				    scsi_static_inquiry_match) == NULL))
1963 			continue;
1964 
1965 		/*
1966 		 * If we get to this point, the user definitely wants
1967 		 * information on this device.  So tell the caller to copy
1968 		 * the data out.
1969 		 */
1970 		retval |= DM_RET_COPY;
1971 
1972 		/*
1973 		 * If the return action has been set to descend, then we
1974 		 * know that we've already seen a peripheral matching
1975 		 * expression, therefore we need to further descend the tree.
1976 		 * This won't change by continuing around the loop, so we
1977 		 * go ahead and return.  If we haven't seen a peripheral
1978 		 * matching expression, we keep going around the loop until
1979 		 * we exhaust the matching expressions.  We'll set the stop
1980 		 * flag once we fall out of the loop.
1981 		 */
1982 		if ((retval & DM_RET_ACTION_MASK) == DM_RET_DESCEND)
1983 			return(retval);
1984 	}
1985 
1986 	/*
1987 	 * If the return action hasn't been set to descend yet, that means
1988 	 * we haven't seen any peripheral matching patterns.  So tell the
1989 	 * caller to stop descending the tree -- the user doesn't want to
1990 	 * match against lower level tree elements.
1991 	 */
1992 	if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
1993 		retval |= DM_RET_STOP;
1994 
1995 	return(retval);
1996 }
1997 
1998 /*
1999  * Match a single peripheral against any number of match patterns.
2000  */
2001 static dev_match_ret
2002 xptperiphmatch(struct dev_match_pattern *patterns, u_int num_patterns,
2003 	       struct cam_periph *periph)
2004 {
2005 	dev_match_ret retval;
2006 	int i;
2007 
2008 	/*
2009 	 * If we aren't given something to match against, that's an error.
2010 	 */
2011 	if (periph == NULL)
2012 		return(DM_RET_ERROR);
2013 
2014 	/*
2015 	 * If there are no match entries, then this peripheral matches no
2016 	 * matter what.
2017 	 */
2018 	if ((patterns == NULL) || (num_patterns == 0))
2019 		return(DM_RET_STOP | DM_RET_COPY);
2020 
2021 	/*
2022 	 * There aren't any nodes below a peripheral node, so there's no
2023 	 * reason to descend the tree any further.
2024 	 */
2025 	retval = DM_RET_STOP;
2026 
2027 	for (i = 0; i < num_patterns; i++) {
2028 		struct periph_match_pattern *cur_pattern;
2029 
2030 		/*
2031 		 * If the pattern in question isn't for a peripheral, we
2032 		 * aren't interested.
2033 		 */
2034 		if (patterns[i].type != DEV_MATCH_PERIPH)
2035 			continue;
2036 
2037 		cur_pattern = &patterns[i].pattern.periph_pattern;
2038 
2039 		/*
2040 		 * If they want to match on anything, then we will do so.
2041 		 */
2042 		if (cur_pattern->flags == PERIPH_MATCH_ANY) {
2043 			/* set the copy flag */
2044 			retval |= DM_RET_COPY;
2045 
2046 			/*
2047 			 * We've already set the return action to stop,
2048 			 * since there are no nodes below peripherals in
2049 			 * the tree.
2050 			 */
2051 			return(retval);
2052 		}
2053 
2054 		/*
2055 		 * Not sure why someone would do this...
2056 		 */
2057 		if (cur_pattern->flags == PERIPH_MATCH_NONE)
2058 			continue;
2059 
2060 		if (((cur_pattern->flags & PERIPH_MATCH_PATH) != 0)
2061 		 && (cur_pattern->path_id != periph->path->bus->path_id))
2062 			continue;
2063 
2064 		/*
2065 		 * For the target and lun id's, we have to make sure the
2066 		 * target and lun pointers aren't NULL.  The xpt peripheral
2067 		 * has a wildcard target and device.
2068 		 */
2069 		if (((cur_pattern->flags & PERIPH_MATCH_TARGET) != 0)
2070 		 && ((periph->path->target == NULL)
2071 		 ||(cur_pattern->target_id != periph->path->target->target_id)))
2072 			continue;
2073 
2074 		if (((cur_pattern->flags & PERIPH_MATCH_LUN) != 0)
2075 		 && ((periph->path->device == NULL)
2076 		 || (cur_pattern->target_lun != periph->path->device->lun_id)))
2077 			continue;
2078 
2079 		if (((cur_pattern->flags & PERIPH_MATCH_UNIT) != 0)
2080 		 && (cur_pattern->unit_number != periph->unit_number))
2081 			continue;
2082 
2083 		if (((cur_pattern->flags & PERIPH_MATCH_NAME) != 0)
2084 		 && (strncmp(cur_pattern->periph_name, periph->periph_name,
2085 			     DEV_IDLEN) != 0))
2086 			continue;
2087 
2088 		/*
2089 		 * If we get to this point, the user definitely wants
2090 		 * information on this peripheral.  So tell the caller to
2091 		 * copy the data out.
2092 		 */
2093 		retval |= DM_RET_COPY;
2094 
2095 		/*
2096 		 * The return action has already been set to stop, since
2097 		 * peripherals don't have any nodes below them in the EDT.
2098 		 */
2099 		return(retval);
2100 	}
2101 
2102 	/*
2103 	 * If we get to this point, the peripheral that was passed in
2104 	 * doesn't match any of the patterns.
2105 	 */
2106 	return(retval);
2107 }
2108 
2109 static int
2110 xptedtbusfunc(struct cam_eb *bus, void *arg)
2111 {
2112 	struct ccb_dev_match *cdm;
2113 	dev_match_ret retval;
2114 
2115 	cdm = (struct ccb_dev_match *)arg;
2116 
2117 	/*
2118 	 * If our position is for something deeper in the tree, that means
2119 	 * that we've already seen this node.  So, we keep going down.
2120 	 */
2121 	if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2122 	 && (cdm->pos.cookie.bus == bus)
2123 	 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
2124 	 && (cdm->pos.cookie.target != NULL))
2125 		retval = DM_RET_DESCEND;
2126 	else
2127 		retval = xptbusmatch(cdm->patterns, cdm->num_patterns, bus);
2128 
2129 	/*
2130 	 * If we got an error, bail out of the search.
2131 	 */
2132 	if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
2133 		cdm->status = CAM_DEV_MATCH_ERROR;
2134 		return(0);
2135 	}
2136 
2137 	/*
2138 	 * If the copy flag is set, copy this bus out.
2139 	 */
2140 	if (retval & DM_RET_COPY) {
2141 		int spaceleft, j;
2142 
2143 		spaceleft = cdm->match_buf_len - (cdm->num_matches *
2144 			sizeof(struct dev_match_result));
2145 
2146 		/*
2147 		 * If we don't have enough space to put in another
2148 		 * match result, save our position and tell the
2149 		 * user there are more devices to check.
2150 		 */
2151 		if (spaceleft < sizeof(struct dev_match_result)) {
2152 			bzero(&cdm->pos, sizeof(cdm->pos));
2153 			cdm->pos.position_type =
2154 				CAM_DEV_POS_EDT | CAM_DEV_POS_BUS;
2155 
2156 			cdm->pos.cookie.bus = bus;
2157 			cdm->pos.generations[CAM_BUS_GENERATION]=
2158 				bus_generation;
2159 			cdm->status = CAM_DEV_MATCH_MORE;
2160 			return(0);
2161 		}
2162 		j = cdm->num_matches;
2163 		cdm->num_matches++;
2164 		cdm->matches[j].type = DEV_MATCH_BUS;
2165 		cdm->matches[j].result.bus_result.path_id = bus->path_id;
2166 		cdm->matches[j].result.bus_result.bus_id = bus->sim->bus_id;
2167 		cdm->matches[j].result.bus_result.unit_number =
2168 			bus->sim->unit_number;
2169 		strncpy(cdm->matches[j].result.bus_result.dev_name,
2170 			bus->sim->sim_name, DEV_IDLEN);
2171 	}
2172 
2173 	/*
2174 	 * If the user is only interested in busses, there's no
2175 	 * reason to descend to the next level in the tree.
2176 	 */
2177 	if ((retval & DM_RET_ACTION_MASK) == DM_RET_STOP)
2178 		return(1);
2179 
2180 	/*
2181 	 * If there is a target generation recorded, check it to
2182 	 * make sure the target list hasn't changed.
2183 	 */
2184 	if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2185 	 && (bus == cdm->pos.cookie.bus)
2186 	 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
2187 	 && (cdm->pos.generations[CAM_TARGET_GENERATION] != 0)
2188 	 && (cdm->pos.generations[CAM_TARGET_GENERATION] !=
2189 	     bus->generation)) {
2190 		cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
2191 		return(0);
2192 	}
2193 
2194 	if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2195 	 && (cdm->pos.cookie.bus == bus)
2196 	 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
2197 	 && (cdm->pos.cookie.target != NULL))
2198 		return(xpttargettraverse(bus,
2199 					(struct cam_et *)cdm->pos.cookie.target,
2200 					 xptedttargetfunc, arg));
2201 	else
2202 		return(xpttargettraverse(bus, NULL, xptedttargetfunc, arg));
2203 }
2204 
2205 static int
2206 xptedttargetfunc(struct cam_et *target, void *arg)
2207 {
2208 	struct ccb_dev_match *cdm;
2209 
2210 	cdm = (struct ccb_dev_match *)arg;
2211 
2212 	/*
2213 	 * If there is a device list generation recorded, check it to
2214 	 * make sure the device list hasn't changed.
2215 	 */
2216 	if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2217 	 && (cdm->pos.cookie.bus == target->bus)
2218 	 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
2219 	 && (cdm->pos.cookie.target == target)
2220 	 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
2221 	 && (cdm->pos.generations[CAM_DEV_GENERATION] != 0)
2222 	 && (cdm->pos.generations[CAM_DEV_GENERATION] !=
2223 	     target->generation)) {
2224 		cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
2225 		return(0);
2226 	}
2227 
2228 	if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2229 	 && (cdm->pos.cookie.bus == target->bus)
2230 	 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
2231 	 && (cdm->pos.cookie.target == target)
2232 	 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
2233 	 && (cdm->pos.cookie.device != NULL))
2234 		return(xptdevicetraverse(target,
2235 					(struct cam_ed *)cdm->pos.cookie.device,
2236 					 xptedtdevicefunc, arg));
2237 	else
2238 		return(xptdevicetraverse(target, NULL, xptedtdevicefunc, arg));
2239 }
2240 
2241 static int
2242 xptedtdevicefunc(struct cam_ed *device, void *arg)
2243 {
2244 
2245 	struct ccb_dev_match *cdm;
2246 	dev_match_ret retval;
2247 
2248 	cdm = (struct ccb_dev_match *)arg;
2249 
2250 	/*
2251 	 * If our position is for something deeper in the tree, that means
2252 	 * that we've already seen this node.  So, we keep going down.
2253 	 */
2254 	if ((cdm->pos.position_type & CAM_DEV_POS_DEVICE)
2255 	 && (cdm->pos.cookie.device == device)
2256 	 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
2257 	 && (cdm->pos.cookie.periph != NULL))
2258 		retval = DM_RET_DESCEND;
2259 	else
2260 		retval = xptdevicematch(cdm->patterns, cdm->num_patterns,
2261 					device);
2262 
2263 	if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
2264 		cdm->status = CAM_DEV_MATCH_ERROR;
2265 		return(0);
2266 	}
2267 
2268 	/*
2269 	 * If the copy flag is set, copy this device out.
2270 	 */
2271 	if (retval & DM_RET_COPY) {
2272 		int spaceleft, j;
2273 
2274 		spaceleft = cdm->match_buf_len - (cdm->num_matches *
2275 			sizeof(struct dev_match_result));
2276 
2277 		/*
2278 		 * If we don't have enough space to put in another
2279 		 * match result, save our position and tell the
2280 		 * user there are more devices to check.
2281 		 */
2282 		if (spaceleft < sizeof(struct dev_match_result)) {
2283 			bzero(&cdm->pos, sizeof(cdm->pos));
2284 			cdm->pos.position_type =
2285 				CAM_DEV_POS_EDT | CAM_DEV_POS_BUS |
2286 				CAM_DEV_POS_TARGET | CAM_DEV_POS_DEVICE;
2287 
2288 			cdm->pos.cookie.bus = device->target->bus;
2289 			cdm->pos.generations[CAM_BUS_GENERATION]=
2290 				bus_generation;
2291 			cdm->pos.cookie.target = device->target;
2292 			cdm->pos.generations[CAM_TARGET_GENERATION] =
2293 				device->target->bus->generation;
2294 			cdm->pos.cookie.device = device;
2295 			cdm->pos.generations[CAM_DEV_GENERATION] =
2296 				device->target->generation;
2297 			cdm->status = CAM_DEV_MATCH_MORE;
2298 			return(0);
2299 		}
2300 		j = cdm->num_matches;
2301 		cdm->num_matches++;
2302 		cdm->matches[j].type = DEV_MATCH_DEVICE;
2303 		cdm->matches[j].result.device_result.path_id =
2304 			device->target->bus->path_id;
2305 		cdm->matches[j].result.device_result.target_id =
2306 			device->target->target_id;
2307 		cdm->matches[j].result.device_result.target_lun =
2308 			device->lun_id;
2309 		bcopy(&device->inq_data,
2310 		      &cdm->matches[j].result.device_result.inq_data,
2311 		      sizeof(struct scsi_inquiry_data));
2312 
2313 		/* Let the user know whether this device is unconfigured */
2314 		if (device->flags & CAM_DEV_UNCONFIGURED)
2315 			cdm->matches[j].result.device_result.flags =
2316 				DEV_RESULT_UNCONFIGURED;
2317 		else
2318 			cdm->matches[j].result.device_result.flags =
2319 				DEV_RESULT_NOFLAG;
2320 	}
2321 
2322 	/*
2323 	 * If the user isn't interested in peripherals, don't descend
2324 	 * the tree any further.
2325 	 */
2326 	if ((retval & DM_RET_ACTION_MASK) == DM_RET_STOP)
2327 		return(1);
2328 
2329 	/*
2330 	 * If there is a peripheral list generation recorded, make sure
2331 	 * it hasn't changed.
2332 	 */
2333 	if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2334 	 && (device->target->bus == cdm->pos.cookie.bus)
2335 	 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
2336 	 && (device->target == cdm->pos.cookie.target)
2337 	 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
2338 	 && (device == cdm->pos.cookie.device)
2339 	 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
2340 	 && (cdm->pos.generations[CAM_PERIPH_GENERATION] != 0)
2341 	 && (cdm->pos.generations[CAM_PERIPH_GENERATION] !=
2342 	     device->generation)){
2343 		cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
2344 		return(0);
2345 	}
2346 
2347 	if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2348 	 && (cdm->pos.cookie.bus == device->target->bus)
2349 	 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
2350 	 && (cdm->pos.cookie.target == device->target)
2351 	 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
2352 	 && (cdm->pos.cookie.device == device)
2353 	 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
2354 	 && (cdm->pos.cookie.periph != NULL))
2355 		return(xptperiphtraverse(device,
2356 				(struct cam_periph *)cdm->pos.cookie.periph,
2357 				xptedtperiphfunc, arg));
2358 	else
2359 		return(xptperiphtraverse(device, NULL, xptedtperiphfunc, arg));
2360 }
2361 
2362 static int
2363 xptedtperiphfunc(struct cam_periph *periph, void *arg)
2364 {
2365 	struct ccb_dev_match *cdm;
2366 	dev_match_ret retval;
2367 
2368 	cdm = (struct ccb_dev_match *)arg;
2369 
2370 	retval = xptperiphmatch(cdm->patterns, cdm->num_patterns, periph);
2371 
2372 	if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
2373 		cdm->status = CAM_DEV_MATCH_ERROR;
2374 		return(0);
2375 	}
2376 
2377 	/*
2378 	 * If the copy flag is set, copy this peripheral out.
2379 	 */
2380 	if (retval & DM_RET_COPY) {
2381 		int spaceleft, j;
2382 
2383 		spaceleft = cdm->match_buf_len - (cdm->num_matches *
2384 			sizeof(struct dev_match_result));
2385 
2386 		/*
2387 		 * If we don't have enough space to put in another
2388 		 * match result, save our position and tell the
2389 		 * user there are more devices to check.
2390 		 */
2391 		if (spaceleft < sizeof(struct dev_match_result)) {
2392 			bzero(&cdm->pos, sizeof(cdm->pos));
2393 			cdm->pos.position_type =
2394 				CAM_DEV_POS_EDT | CAM_DEV_POS_BUS |
2395 				CAM_DEV_POS_TARGET | CAM_DEV_POS_DEVICE |
2396 				CAM_DEV_POS_PERIPH;
2397 
2398 			cdm->pos.cookie.bus = periph->path->bus;
2399 			cdm->pos.generations[CAM_BUS_GENERATION]=
2400 				bus_generation;
2401 			cdm->pos.cookie.target = periph->path->target;
2402 			cdm->pos.generations[CAM_TARGET_GENERATION] =
2403 				periph->path->bus->generation;
2404 			cdm->pos.cookie.device = periph->path->device;
2405 			cdm->pos.generations[CAM_DEV_GENERATION] =
2406 				periph->path->target->generation;
2407 			cdm->pos.cookie.periph = periph;
2408 			cdm->pos.generations[CAM_PERIPH_GENERATION] =
2409 				periph->path->device->generation;
2410 			cdm->status = CAM_DEV_MATCH_MORE;
2411 			return(0);
2412 		}
2413 
2414 		j = cdm->num_matches;
2415 		cdm->num_matches++;
2416 		cdm->matches[j].type = DEV_MATCH_PERIPH;
2417 		cdm->matches[j].result.periph_result.path_id =
2418 			periph->path->bus->path_id;
2419 		cdm->matches[j].result.periph_result.target_id =
2420 			periph->path->target->target_id;
2421 		cdm->matches[j].result.periph_result.target_lun =
2422 			periph->path->device->lun_id;
2423 		cdm->matches[j].result.periph_result.unit_number =
2424 			periph->unit_number;
2425 		strncpy(cdm->matches[j].result.periph_result.periph_name,
2426 			periph->periph_name, DEV_IDLEN);
2427 	}
2428 
2429 	return(1);
2430 }
2431 
2432 static int
2433 xptedtmatch(struct ccb_dev_match *cdm)
2434 {
2435 	int ret;
2436 
2437 	cdm->num_matches = 0;
2438 
2439 	/*
2440 	 * Check the bus list generation.  If it has changed, the user
2441 	 * needs to reset everything and start over.
2442 	 */
2443 	if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2444 	 && (cdm->pos.generations[CAM_BUS_GENERATION] != 0)
2445 	 && (cdm->pos.generations[CAM_BUS_GENERATION] != bus_generation)) {
2446 		cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
2447 		return(0);
2448 	}
2449 
2450 	if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2451 	 && (cdm->pos.cookie.bus != NULL))
2452 		ret = xptbustraverse((struct cam_eb *)cdm->pos.cookie.bus,
2453 				     xptedtbusfunc, cdm);
2454 	else
2455 		ret = xptbustraverse(NULL, xptedtbusfunc, cdm);
2456 
2457 	/*
2458 	 * If we get back 0, that means that we had to stop before fully
2459 	 * traversing the EDT.  It also means that one of the subroutines
2460 	 * has set the status field to the proper value.  If we get back 1,
2461 	 * we've fully traversed the EDT and copied out any matching entries.
2462 	 */
2463 	if (ret == 1)
2464 		cdm->status = CAM_DEV_MATCH_LAST;
2465 
2466 	return(ret);
2467 }
2468 
2469 static int
2470 xptplistpdrvfunc(struct periph_driver **pdrv, void *arg)
2471 {
2472 	struct ccb_dev_match *cdm;
2473 
2474 	cdm = (struct ccb_dev_match *)arg;
2475 
2476 	if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
2477 	 && (cdm->pos.cookie.pdrv == pdrv)
2478 	 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
2479 	 && (cdm->pos.generations[CAM_PERIPH_GENERATION] != 0)
2480 	 && (cdm->pos.generations[CAM_PERIPH_GENERATION] !=
2481 	     (*pdrv)->generation)) {
2482 		cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
2483 		return(0);
2484 	}
2485 
2486 	if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
2487 	 && (cdm->pos.cookie.pdrv == pdrv)
2488 	 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
2489 	 && (cdm->pos.cookie.periph != NULL))
2490 		return(xptpdperiphtraverse(pdrv,
2491 				(struct cam_periph *)cdm->pos.cookie.periph,
2492 				xptplistperiphfunc, arg));
2493 	else
2494 		return(xptpdperiphtraverse(pdrv, NULL,xptplistperiphfunc, arg));
2495 }
2496 
2497 static int
2498 xptplistperiphfunc(struct cam_periph *periph, void *arg)
2499 {
2500 	struct ccb_dev_match *cdm;
2501 	dev_match_ret retval;
2502 
2503 	cdm = (struct ccb_dev_match *)arg;
2504 
2505 	retval = xptperiphmatch(cdm->patterns, cdm->num_patterns, periph);
2506 
2507 	if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
2508 		cdm->status = CAM_DEV_MATCH_ERROR;
2509 		return(0);
2510 	}
2511 
2512 	/*
2513 	 * If the copy flag is set, copy this peripheral out.
2514 	 */
2515 	if (retval & DM_RET_COPY) {
2516 		int spaceleft, j;
2517 
2518 		spaceleft = cdm->match_buf_len - (cdm->num_matches *
2519 			sizeof(struct dev_match_result));
2520 
2521 		/*
2522 		 * If we don't have enough space to put in another
2523 		 * match result, save our position and tell the
2524 		 * user there are more devices to check.
2525 		 */
2526 		if (spaceleft < sizeof(struct dev_match_result)) {
2527 			struct periph_driver **pdrv;
2528 
2529 			pdrv = NULL;
2530 			bzero(&cdm->pos, sizeof(cdm->pos));
2531 			cdm->pos.position_type =
2532 				CAM_DEV_POS_PDRV | CAM_DEV_POS_PDPTR |
2533 				CAM_DEV_POS_PERIPH;
2534 
2535 			/*
2536 			 * This may look a bit non-sensical, but it is
2537 			 * actually quite logical.  There are very few
2538 			 * peripheral drivers, and bloating every peripheral
2539 			 * structure with a pointer back to its parent
2540 			 * peripheral driver linker set entry would cost
2541 			 * more in the long run than doing this quick lookup.
2542 			 */
2543 			for (pdrv = periph_drivers; *pdrv != NULL; pdrv++) {
2544 				if (strcmp((*pdrv)->driver_name,
2545 				    periph->periph_name) == 0)
2546 					break;
2547 			}
2548 
2549 			if (pdrv == NULL) {
2550 				cdm->status = CAM_DEV_MATCH_ERROR;
2551 				return(0);
2552 			}
2553 
2554 			cdm->pos.cookie.pdrv = pdrv;
2555 			/*
2556 			 * The periph generation slot does double duty, as
2557 			 * does the periph pointer slot.  They are used for
2558 			 * both edt and pdrv lookups and positioning.
2559 			 */
2560 			cdm->pos.cookie.periph = periph;
2561 			cdm->pos.generations[CAM_PERIPH_GENERATION] =
2562 				(*pdrv)->generation;
2563 			cdm->status = CAM_DEV_MATCH_MORE;
2564 			return(0);
2565 		}
2566 
2567 		j = cdm->num_matches;
2568 		cdm->num_matches++;
2569 		cdm->matches[j].type = DEV_MATCH_PERIPH;
2570 		cdm->matches[j].result.periph_result.path_id =
2571 			periph->path->bus->path_id;
2572 
2573 		/*
2574 		 * The transport layer peripheral doesn't have a target or
2575 		 * lun.
2576 		 */
2577 		if (periph->path->target)
2578 			cdm->matches[j].result.periph_result.target_id =
2579 				periph->path->target->target_id;
2580 		else
2581 			cdm->matches[j].result.periph_result.target_id = -1;
2582 
2583 		if (periph->path->device)
2584 			cdm->matches[j].result.periph_result.target_lun =
2585 				periph->path->device->lun_id;
2586 		else
2587 			cdm->matches[j].result.periph_result.target_lun = -1;
2588 
2589 		cdm->matches[j].result.periph_result.unit_number =
2590 			periph->unit_number;
2591 		strncpy(cdm->matches[j].result.periph_result.periph_name,
2592 			periph->periph_name, DEV_IDLEN);
2593 	}
2594 
2595 	return(1);
2596 }
2597 
2598 static int
2599 xptperiphlistmatch(struct ccb_dev_match *cdm)
2600 {
2601 	int ret;
2602 
2603 	cdm->num_matches = 0;
2604 
2605 	/*
2606 	 * At this point in the edt traversal function, we check the bus
2607 	 * list generation to make sure that no busses have been added or
2608 	 * removed since the user last sent a XPT_DEV_MATCH ccb through.
2609 	 * For the peripheral driver list traversal function, however, we
2610 	 * don't have to worry about new peripheral driver types coming or
2611 	 * going; they're in a linker set, and therefore can't change
2612 	 * without a recompile.
2613 	 */
2614 
2615 	if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
2616 	 && (cdm->pos.cookie.pdrv != NULL))
2617 		ret = xptpdrvtraverse(
2618 				(struct periph_driver **)cdm->pos.cookie.pdrv,
2619 				xptplistpdrvfunc, cdm);
2620 	else
2621 		ret = xptpdrvtraverse(NULL, xptplistpdrvfunc, cdm);
2622 
2623 	/*
2624 	 * If we get back 0, that means that we had to stop before fully
2625 	 * traversing the peripheral driver tree.  It also means that one of
2626 	 * the subroutines has set the status field to the proper value.  If
2627 	 * we get back 1, we've fully traversed the EDT and copied out any
2628 	 * matching entries.
2629 	 */
2630 	if (ret == 1)
2631 		cdm->status = CAM_DEV_MATCH_LAST;
2632 
2633 	return(ret);
2634 }
2635 
2636 static int
2637 xptbustraverse(struct cam_eb *start_bus, xpt_busfunc_t *tr_func, void *arg)
2638 {
2639 	struct cam_eb *bus, *next_bus;
2640 	int retval;
2641 
2642 	retval = 1;
2643 
2644 	for (bus = (start_bus ? start_bus : TAILQ_FIRST(&xpt_busses));
2645 	     bus != NULL;
2646 	     bus = next_bus) {
2647 		next_bus = TAILQ_NEXT(bus, links);
2648 
2649 		retval = tr_func(bus, arg);
2650 		if (retval == 0)
2651 			return(retval);
2652 	}
2653 
2654 	return(retval);
2655 }
2656 
2657 static int
2658 xpttargettraverse(struct cam_eb *bus, struct cam_et *start_target,
2659 		  xpt_targetfunc_t *tr_func, void *arg)
2660 {
2661 	struct cam_et *target, *next_target;
2662 	int retval;
2663 
2664 	retval = 1;
2665 	for (target = (start_target ? start_target :
2666 		       TAILQ_FIRST(&bus->et_entries));
2667 	     target != NULL; target = next_target) {
2668 
2669 		next_target = TAILQ_NEXT(target, links);
2670 
2671 		retval = tr_func(target, arg);
2672 
2673 		if (retval == 0)
2674 			return(retval);
2675 	}
2676 
2677 	return(retval);
2678 }
2679 
2680 static int
2681 xptdevicetraverse(struct cam_et *target, struct cam_ed *start_device,
2682 		  xpt_devicefunc_t *tr_func, void *arg)
2683 {
2684 	struct cam_ed *device, *next_device;
2685 	int retval;
2686 
2687 	retval = 1;
2688 	for (device = (start_device ? start_device :
2689 		       TAILQ_FIRST(&target->ed_entries));
2690 	     device != NULL;
2691 	     device = next_device) {
2692 
2693 		next_device = TAILQ_NEXT(device, links);
2694 
2695 		retval = tr_func(device, arg);
2696 
2697 		if (retval == 0)
2698 			return(retval);
2699 	}
2700 
2701 	return(retval);
2702 }
2703 
2704 static int
2705 xptperiphtraverse(struct cam_ed *device, struct cam_periph *start_periph,
2706 		  xpt_periphfunc_t *tr_func, void *arg)
2707 {
2708 	struct cam_periph *periph, *next_periph;
2709 	int retval;
2710 
2711 	retval = 1;
2712 
2713 	for (periph = (start_periph ? start_periph :
2714 		       SLIST_FIRST(&device->periphs));
2715 	     periph != NULL;
2716 	     periph = next_periph) {
2717 
2718 		next_periph = SLIST_NEXT(periph, periph_links);
2719 
2720 		retval = tr_func(periph, arg);
2721 		if (retval == 0)
2722 			return(retval);
2723 	}
2724 
2725 	return(retval);
2726 }
2727 
2728 static int
2729 xptpdrvtraverse(struct periph_driver **start_pdrv,
2730 		xpt_pdrvfunc_t *tr_func, void *arg)
2731 {
2732 	struct periph_driver **pdrv;
2733 	int retval;
2734 
2735 	retval = 1;
2736 
2737 	/*
2738 	 * We don't traverse the peripheral driver list like we do the
2739 	 * other lists, because it is a linker set, and therefore cannot be
2740 	 * changed during runtime.  If the peripheral driver list is ever
2741 	 * re-done to be something other than a linker set (i.e. it can
2742 	 * change while the system is running), the list traversal should
2743 	 * be modified to work like the other traversal functions.
2744 	 */
2745 	for (pdrv = (start_pdrv ? start_pdrv : periph_drivers);
2746 	     *pdrv != NULL; pdrv++) {
2747 		retval = tr_func(pdrv, arg);
2748 
2749 		if (retval == 0)
2750 			return(retval);
2751 	}
2752 
2753 	return(retval);
2754 }
2755 
2756 static int
2757 xptpdperiphtraverse(struct periph_driver **pdrv,
2758 		    struct cam_periph *start_periph,
2759 		    xpt_periphfunc_t *tr_func, void *arg)
2760 {
2761 	struct cam_periph *periph, *next_periph;
2762 	int retval;
2763 
2764 	retval = 1;
2765 
2766 	for (periph = (start_periph ? start_periph :
2767 	     TAILQ_FIRST(&(*pdrv)->units)); periph != NULL;
2768 	     periph = next_periph) {
2769 
2770 		next_periph = TAILQ_NEXT(periph, unit_links);
2771 
2772 		retval = tr_func(periph, arg);
2773 		if (retval == 0)
2774 			return(retval);
2775 	}
2776 	return(retval);
2777 }
2778 
2779 static int
2780 xptdefbusfunc(struct cam_eb *bus, void *arg)
2781 {
2782 	struct xpt_traverse_config *tr_config;
2783 
2784 	tr_config = (struct xpt_traverse_config *)arg;
2785 
2786 	if (tr_config->depth == XPT_DEPTH_BUS) {
2787 		xpt_busfunc_t *tr_func;
2788 
2789 		tr_func = (xpt_busfunc_t *)tr_config->tr_func;
2790 
2791 		return(tr_func(bus, tr_config->tr_arg));
2792 	} else
2793 		return(xpttargettraverse(bus, NULL, xptdeftargetfunc, arg));
2794 }
2795 
2796 static int
2797 xptdeftargetfunc(struct cam_et *target, void *arg)
2798 {
2799 	struct xpt_traverse_config *tr_config;
2800 
2801 	tr_config = (struct xpt_traverse_config *)arg;
2802 
2803 	if (tr_config->depth == XPT_DEPTH_TARGET) {
2804 		xpt_targetfunc_t *tr_func;
2805 
2806 		tr_func = (xpt_targetfunc_t *)tr_config->tr_func;
2807 
2808 		return(tr_func(target, tr_config->tr_arg));
2809 	} else
2810 		return(xptdevicetraverse(target, NULL, xptdefdevicefunc, arg));
2811 }
2812 
2813 static int
2814 xptdefdevicefunc(struct cam_ed *device, void *arg)
2815 {
2816 	struct xpt_traverse_config *tr_config;
2817 
2818 	tr_config = (struct xpt_traverse_config *)arg;
2819 
2820 	if (tr_config->depth == XPT_DEPTH_DEVICE) {
2821 		xpt_devicefunc_t *tr_func;
2822 
2823 		tr_func = (xpt_devicefunc_t *)tr_config->tr_func;
2824 
2825 		return(tr_func(device, tr_config->tr_arg));
2826 	} else
2827 		return(xptperiphtraverse(device, NULL, xptdefperiphfunc, arg));
2828 }
2829 
2830 static int
2831 xptdefperiphfunc(struct cam_periph *periph, void *arg)
2832 {
2833 	struct xpt_traverse_config *tr_config;
2834 	xpt_periphfunc_t *tr_func;
2835 
2836 	tr_config = (struct xpt_traverse_config *)arg;
2837 
2838 	tr_func = (xpt_periphfunc_t *)tr_config->tr_func;
2839 
2840 	/*
2841 	 * Unlike the other default functions, we don't check for depth
2842 	 * here.  The peripheral driver level is the last level in the EDT,
2843 	 * so if we're here, we should execute the function in question.
2844 	 */
2845 	return(tr_func(periph, tr_config->tr_arg));
2846 }
2847 
2848 /*
2849  * Execute the given function for every bus in the EDT.
2850  */
2851 static int
2852 xpt_for_all_busses(xpt_busfunc_t *tr_func, void *arg)
2853 {
2854 	struct xpt_traverse_config tr_config;
2855 
2856 	tr_config.depth = XPT_DEPTH_BUS;
2857 	tr_config.tr_func = tr_func;
2858 	tr_config.tr_arg = arg;
2859 
2860 	return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
2861 }
2862 
2863 #ifdef notusedyet
2864 /*
2865  * Execute the given function for every target in the EDT.
2866  */
2867 static int
2868 xpt_for_all_targets(xpt_targetfunc_t *tr_func, void *arg)
2869 {
2870 	struct xpt_traverse_config tr_config;
2871 
2872 	tr_config.depth = XPT_DEPTH_TARGET;
2873 	tr_config.tr_func = tr_func;
2874 	tr_config.tr_arg = arg;
2875 
2876 	return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
2877 }
2878 #endif /* notusedyet */
2879 
2880 /*
2881  * Execute the given function for every device in the EDT.
2882  */
2883 static int
2884 xpt_for_all_devices(xpt_devicefunc_t *tr_func, void *arg)
2885 {
2886 	struct xpt_traverse_config tr_config;
2887 
2888 	tr_config.depth = XPT_DEPTH_DEVICE;
2889 	tr_config.tr_func = tr_func;
2890 	tr_config.tr_arg = arg;
2891 
2892 	return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
2893 }
2894 
2895 #ifdef notusedyet
2896 /*
2897  * Execute the given function for every peripheral in the EDT.
2898  */
2899 static int
2900 xpt_for_all_periphs(xpt_periphfunc_t *tr_func, void *arg)
2901 {
2902 	struct xpt_traverse_config tr_config;
2903 
2904 	tr_config.depth = XPT_DEPTH_PERIPH;
2905 	tr_config.tr_func = tr_func;
2906 	tr_config.tr_arg = arg;
2907 
2908 	return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
2909 }
2910 #endif /* notusedyet */
2911 
2912 static int
2913 xptsetasyncfunc(struct cam_ed *device, void *arg)
2914 {
2915 	struct cam_path path;
2916 	struct ccb_getdev cgd;
2917 	struct async_node *cur_entry;
2918 
2919 	cur_entry = (struct async_node *)arg;
2920 
2921 	/*
2922 	 * Don't report unconfigured devices (Wildcard devs,
2923 	 * devices only for target mode, device instances
2924 	 * that have been invalidated but are waiting for
2925 	 * their last reference count to be released).
2926 	 */
2927 	if ((device->flags & CAM_DEV_UNCONFIGURED) != 0)
2928 		return (1);
2929 
2930 	xpt_compile_path(&path,
2931 			 NULL,
2932 			 device->target->bus->path_id,
2933 			 device->target->target_id,
2934 			 device->lun_id);
2935 	xpt_setup_ccb(&cgd.ccb_h, &path, /*priority*/1);
2936 	cgd.ccb_h.func_code = XPT_GDEV_TYPE;
2937 	xpt_action((union ccb *)&cgd);
2938 	cur_entry->callback(cur_entry->callback_arg,
2939 			    AC_FOUND_DEVICE,
2940 			    &path, &cgd);
2941 	xpt_release_path(&path);
2942 
2943 	return(1);
2944 }
2945 
2946 static int
2947 xptsetasyncbusfunc(struct cam_eb *bus, void *arg)
2948 {
2949 	struct cam_path path;
2950 	struct ccb_pathinq cpi;
2951 	struct async_node *cur_entry;
2952 
2953 	cur_entry = (struct async_node *)arg;
2954 
2955 	xpt_compile_path(&path, /*periph*/NULL,
2956 			 bus->sim->path_id,
2957 			 CAM_TARGET_WILDCARD,
2958 			 CAM_LUN_WILDCARD);
2959 	xpt_setup_ccb(&cpi.ccb_h, &path, /*priority*/1);
2960 	cpi.ccb_h.func_code = XPT_PATH_INQ;
2961 	xpt_action((union ccb *)&cpi);
2962 	cur_entry->callback(cur_entry->callback_arg,
2963 			    AC_PATH_REGISTERED,
2964 			    &path, &cpi);
2965 	xpt_release_path(&path);
2966 
2967 	return(1);
2968 }
2969 
2970 void
2971 xpt_action(union ccb *start_ccb)
2972 {
2973 	int iopl;
2974 
2975 	GIANT_REQUIRED;
2976 
2977 	CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xpt_action\n"));
2978 
2979 	start_ccb->ccb_h.status = CAM_REQ_INPROG;
2980 
2981 	iopl = splsoftcam();
2982 	switch (start_ccb->ccb_h.func_code) {
2983 	case XPT_SCSI_IO:
2984 	{
2985 #ifdef CAM_NEW_TRAN_CODE
2986 		struct cam_ed *device;
2987 #endif /* CAM_NEW_TRAN_CODE */
2988 #ifdef CAMDEBUG
2989 		char cdb_str[(SCSI_MAX_CDBLEN * 3) + 1];
2990 		struct cam_path *path;
2991 
2992 		path = start_ccb->ccb_h.path;
2993 #endif
2994 
2995 		/*
2996 		 * For the sake of compatibility with SCSI-1
2997 		 * devices that may not understand the identify
2998 		 * message, we include lun information in the
2999 		 * second byte of all commands.  SCSI-1 specifies
3000 		 * that luns are a 3 bit value and reserves only 3
3001 		 * bits for lun information in the CDB.  Later
3002 		 * revisions of the SCSI spec allow for more than 8
3003 		 * luns, but have deprecated lun information in the
3004 		 * CDB.  So, if the lun won't fit, we must omit.
3005 		 *
3006 		 * Also be aware that during initial probing for devices,
3007 		 * the inquiry information is unknown but initialized to 0.
3008 		 * This means that this code will be exercised while probing
3009 		 * devices with an ANSI revision greater than 2.
3010 		 */
3011 #ifdef CAM_NEW_TRAN_CODE
3012 		device = start_ccb->ccb_h.path->device;
3013 		if (device->protocol_version <= SCSI_REV_2
3014 #else /* CAM_NEW_TRAN_CODE */
3015 		if (SID_ANSI_REV(&start_ccb->ccb_h.path->device->inq_data) <= 2
3016 #endif /* CAM_NEW_TRAN_CODE */
3017 		 && start_ccb->ccb_h.target_lun < 8
3018 		 && (start_ccb->ccb_h.flags & CAM_CDB_POINTER) == 0) {
3019 
3020 			start_ccb->csio.cdb_io.cdb_bytes[1] |=
3021 			    start_ccb->ccb_h.target_lun << 5;
3022 		}
3023 		start_ccb->csio.scsi_status = SCSI_STATUS_OK;
3024 		CAM_DEBUG(path, CAM_DEBUG_CDB,("%s. CDB: %s\n",
3025 			  scsi_op_desc(start_ccb->csio.cdb_io.cdb_bytes[0],
3026 			  	       &path->device->inq_data),
3027 			  scsi_cdb_string(start_ccb->csio.cdb_io.cdb_bytes,
3028 					  cdb_str, sizeof(cdb_str))));
3029 	}
3030 	/* FALLTHROUGH */
3031 	case XPT_TARGET_IO:
3032 	case XPT_CONT_TARGET_IO:
3033 		start_ccb->csio.sense_resid = 0;
3034 		start_ccb->csio.resid = 0;
3035 		/* FALLTHROUGH */
3036 	case XPT_RESET_DEV:
3037 	case XPT_ENG_EXEC:
3038 	{
3039 		struct cam_path *path;
3040 		int s;
3041 		int runq;
3042 
3043 		path = start_ccb->ccb_h.path;
3044 		s = splsoftcam();
3045 
3046 		cam_ccbq_insert_ccb(&path->device->ccbq, start_ccb);
3047 		if (path->device->qfrozen_cnt == 0)
3048 			runq = xpt_schedule_dev_sendq(path->bus, path->device);
3049 		else
3050 			runq = 0;
3051 		splx(s);
3052 		if (runq != 0)
3053 			xpt_run_dev_sendq(path->bus);
3054 		break;
3055 	}
3056 	case XPT_SET_TRAN_SETTINGS:
3057 	{
3058 		xpt_set_transfer_settings(&start_ccb->cts,
3059 					  start_ccb->ccb_h.path->device,
3060 					  /*async_update*/FALSE);
3061 		break;
3062 	}
3063 	case XPT_CALC_GEOMETRY:
3064 	{
3065 		struct cam_sim *sim;
3066 
3067 		/* Filter out garbage */
3068 		if (start_ccb->ccg.block_size == 0
3069 		 || start_ccb->ccg.volume_size == 0) {
3070 			start_ccb->ccg.cylinders = 0;
3071 			start_ccb->ccg.heads = 0;
3072 			start_ccb->ccg.secs_per_track = 0;
3073 			start_ccb->ccb_h.status = CAM_REQ_CMP;
3074 			break;
3075 		}
3076 #ifdef PC98
3077 		/*
3078 		 * In a PC-98 system, geometry translation depens on
3079 		 * the "real" device geometry obtained from mode page 4.
3080 		 * SCSI geometry translation is performed in the
3081 		 * initialization routine of the SCSI BIOS and the result
3082 		 * stored in host memory.  If the translation is available
3083 		 * in host memory, use it.  If not, rely on the default
3084 		 * translation the device driver performs.
3085 		 */
3086 		if (scsi_da_bios_params(&start_ccb->ccg) != 0) {
3087 			start_ccb->ccb_h.status = CAM_REQ_CMP;
3088 			break;
3089 		}
3090 #endif
3091 		sim = start_ccb->ccb_h.path->bus->sim;
3092 		(*(sim->sim_action))(sim, start_ccb);
3093 		break;
3094 	}
3095 	case XPT_ABORT:
3096 	{
3097 		union ccb* abort_ccb;
3098 		int s;
3099 
3100 		abort_ccb = start_ccb->cab.abort_ccb;
3101 		if (XPT_FC_IS_DEV_QUEUED(abort_ccb)) {
3102 
3103 			if (abort_ccb->ccb_h.pinfo.index >= 0) {
3104 				struct cam_ccbq *ccbq;
3105 
3106 				ccbq = &abort_ccb->ccb_h.path->device->ccbq;
3107 				cam_ccbq_remove_ccb(ccbq, abort_ccb);
3108 				abort_ccb->ccb_h.status =
3109 				    CAM_REQ_ABORTED|CAM_DEV_QFRZN;
3110 				xpt_freeze_devq(abort_ccb->ccb_h.path, 1);
3111 				s = splcam();
3112 				xpt_done(abort_ccb);
3113 				splx(s);
3114 				start_ccb->ccb_h.status = CAM_REQ_CMP;
3115 				break;
3116 			}
3117 			if (abort_ccb->ccb_h.pinfo.index == CAM_UNQUEUED_INDEX
3118 			 && (abort_ccb->ccb_h.status & CAM_SIM_QUEUED) == 0) {
3119 				/*
3120 				 * We've caught this ccb en route to
3121 				 * the SIM.  Flag it for abort and the
3122 				 * SIM will do so just before starting
3123 				 * real work on the CCB.
3124 				 */
3125 				abort_ccb->ccb_h.status =
3126 				    CAM_REQ_ABORTED|CAM_DEV_QFRZN;
3127 				xpt_freeze_devq(abort_ccb->ccb_h.path, 1);
3128 				start_ccb->ccb_h.status = CAM_REQ_CMP;
3129 				break;
3130 			}
3131 		}
3132 		if (XPT_FC_IS_QUEUED(abort_ccb)
3133 		 && (abort_ccb->ccb_h.pinfo.index == CAM_DONEQ_INDEX)) {
3134 			/*
3135 			 * It's already completed but waiting
3136 			 * for our SWI to get to it.
3137 			 */
3138 			start_ccb->ccb_h.status = CAM_UA_ABORT;
3139 			break;
3140 		}
3141 		/*
3142 		 * If we weren't able to take care of the abort request
3143 		 * in the XPT, pass the request down to the SIM for processing.
3144 		 */
3145 	}
3146 	/* FALLTHROUGH */
3147 	case XPT_ACCEPT_TARGET_IO:
3148 	case XPT_EN_LUN:
3149 	case XPT_IMMED_NOTIFY:
3150 	case XPT_NOTIFY_ACK:
3151 	case XPT_GET_TRAN_SETTINGS:
3152 	case XPT_RESET_BUS:
3153 	{
3154 		struct cam_sim *sim;
3155 
3156 		sim = start_ccb->ccb_h.path->bus->sim;
3157 		(*(sim->sim_action))(sim, start_ccb);
3158 		break;
3159 	}
3160 	case XPT_PATH_INQ:
3161 	{
3162 		struct cam_sim *sim;
3163 
3164 		sim = start_ccb->ccb_h.path->bus->sim;
3165 		(*(sim->sim_action))(sim, start_ccb);
3166 		break;
3167 	}
3168 	case XPT_PATH_STATS:
3169 		start_ccb->cpis.last_reset =
3170 			start_ccb->ccb_h.path->bus->last_reset;
3171 		start_ccb->ccb_h.status = CAM_REQ_CMP;
3172 		break;
3173 	case XPT_GDEV_TYPE:
3174 	{
3175 		struct cam_ed *dev;
3176 		int s;
3177 
3178 		dev = start_ccb->ccb_h.path->device;
3179 		s = splcam();
3180 		if ((dev->flags & CAM_DEV_UNCONFIGURED) != 0) {
3181 			start_ccb->ccb_h.status = CAM_DEV_NOT_THERE;
3182 		} else {
3183 			struct ccb_getdev *cgd;
3184 			struct cam_eb *bus;
3185 			struct cam_et *tar;
3186 
3187 			cgd = &start_ccb->cgd;
3188 			bus = cgd->ccb_h.path->bus;
3189 			tar = cgd->ccb_h.path->target;
3190 			cgd->inq_data = dev->inq_data;
3191 			cgd->ccb_h.status = CAM_REQ_CMP;
3192 			cgd->serial_num_len = dev->serial_num_len;
3193 			if ((dev->serial_num_len > 0)
3194 			 && (dev->serial_num != NULL))
3195 				bcopy(dev->serial_num, cgd->serial_num,
3196 				      dev->serial_num_len);
3197 		}
3198 		splx(s);
3199 		break;
3200 	}
3201 	case XPT_GDEV_STATS:
3202 	{
3203 		struct cam_ed *dev;
3204 		int s;
3205 
3206 		dev = start_ccb->ccb_h.path->device;
3207 		s = splcam();
3208 		if ((dev->flags & CAM_DEV_UNCONFIGURED) != 0) {
3209 			start_ccb->ccb_h.status = CAM_DEV_NOT_THERE;
3210 		} else {
3211 			struct ccb_getdevstats *cgds;
3212 			struct cam_eb *bus;
3213 			struct cam_et *tar;
3214 
3215 			cgds = &start_ccb->cgds;
3216 			bus = cgds->ccb_h.path->bus;
3217 			tar = cgds->ccb_h.path->target;
3218 			cgds->dev_openings = dev->ccbq.dev_openings;
3219 			cgds->dev_active = dev->ccbq.dev_active;
3220 			cgds->devq_openings = dev->ccbq.devq_openings;
3221 			cgds->devq_queued = dev->ccbq.queue.entries;
3222 			cgds->held = dev->ccbq.held;
3223 			cgds->last_reset = tar->last_reset;
3224 			cgds->maxtags = dev->quirk->maxtags;
3225 			cgds->mintags = dev->quirk->mintags;
3226 			if (timevalcmp(&tar->last_reset, &bus->last_reset, <))
3227 				cgds->last_reset = bus->last_reset;
3228 			cgds->ccb_h.status = CAM_REQ_CMP;
3229 		}
3230 		splx(s);
3231 		break;
3232 	}
3233 	case XPT_GDEVLIST:
3234 	{
3235 		struct cam_periph	*nperiph;
3236 		struct periph_list	*periph_head;
3237 		struct ccb_getdevlist	*cgdl;
3238 		u_int			i;
3239 		int			s;
3240 		struct cam_ed		*device;
3241 		int			found;
3242 
3243 
3244 		found = 0;
3245 
3246 		/*
3247 		 * Don't want anyone mucking with our data.
3248 		 */
3249 		s = splcam();
3250 		device = start_ccb->ccb_h.path->device;
3251 		periph_head = &device->periphs;
3252 		cgdl = &start_ccb->cgdl;
3253 
3254 		/*
3255 		 * Check and see if the list has changed since the user
3256 		 * last requested a list member.  If so, tell them that the
3257 		 * list has changed, and therefore they need to start over
3258 		 * from the beginning.
3259 		 */
3260 		if ((cgdl->index != 0) &&
3261 		    (cgdl->generation != device->generation)) {
3262 			cgdl->status = CAM_GDEVLIST_LIST_CHANGED;
3263 			splx(s);
3264 			break;
3265 		}
3266 
3267 		/*
3268 		 * Traverse the list of peripherals and attempt to find
3269 		 * the requested peripheral.
3270 		 */
3271 		for (nperiph = SLIST_FIRST(periph_head), i = 0;
3272 		     (nperiph != NULL) && (i <= cgdl->index);
3273 		     nperiph = SLIST_NEXT(nperiph, periph_links), i++) {
3274 			if (i == cgdl->index) {
3275 				strncpy(cgdl->periph_name,
3276 					nperiph->periph_name,
3277 					DEV_IDLEN);
3278 				cgdl->unit_number = nperiph->unit_number;
3279 				found = 1;
3280 			}
3281 		}
3282 		if (found == 0) {
3283 			cgdl->status = CAM_GDEVLIST_ERROR;
3284 			splx(s);
3285 			break;
3286 		}
3287 
3288 		if (nperiph == NULL)
3289 			cgdl->status = CAM_GDEVLIST_LAST_DEVICE;
3290 		else
3291 			cgdl->status = CAM_GDEVLIST_MORE_DEVS;
3292 
3293 		cgdl->index++;
3294 		cgdl->generation = device->generation;
3295 
3296 		splx(s);
3297 		cgdl->ccb_h.status = CAM_REQ_CMP;
3298 		break;
3299 	}
3300 	case XPT_DEV_MATCH:
3301 	{
3302 		int s;
3303 		dev_pos_type position_type;
3304 		struct ccb_dev_match *cdm;
3305 
3306 		cdm = &start_ccb->cdm;
3307 
3308 		/*
3309 		 * Prevent EDT changes while we traverse it.
3310 		 */
3311 		s = splcam();
3312 		/*
3313 		 * There are two ways of getting at information in the EDT.
3314 		 * The first way is via the primary EDT tree.  It starts
3315 		 * with a list of busses, then a list of targets on a bus,
3316 		 * then devices/luns on a target, and then peripherals on a
3317 		 * device/lun.  The "other" way is by the peripheral driver
3318 		 * lists.  The peripheral driver lists are organized by
3319 		 * peripheral driver.  (obviously)  So it makes sense to
3320 		 * use the peripheral driver list if the user is looking
3321 		 * for something like "da1", or all "da" devices.  If the
3322 		 * user is looking for something on a particular bus/target
3323 		 * or lun, it's generally better to go through the EDT tree.
3324 		 */
3325 
3326 		if (cdm->pos.position_type != CAM_DEV_POS_NONE)
3327 			position_type = cdm->pos.position_type;
3328 		else {
3329 			u_int i;
3330 
3331 			position_type = CAM_DEV_POS_NONE;
3332 
3333 			for (i = 0; i < cdm->num_patterns; i++) {
3334 				if ((cdm->patterns[i].type == DEV_MATCH_BUS)
3335 				 ||(cdm->patterns[i].type == DEV_MATCH_DEVICE)){
3336 					position_type = CAM_DEV_POS_EDT;
3337 					break;
3338 				}
3339 			}
3340 
3341 			if (cdm->num_patterns == 0)
3342 				position_type = CAM_DEV_POS_EDT;
3343 			else if (position_type == CAM_DEV_POS_NONE)
3344 				position_type = CAM_DEV_POS_PDRV;
3345 		}
3346 
3347 		switch(position_type & CAM_DEV_POS_TYPEMASK) {
3348 		case CAM_DEV_POS_EDT:
3349 			xptedtmatch(cdm);
3350 			break;
3351 		case CAM_DEV_POS_PDRV:
3352 			xptperiphlistmatch(cdm);
3353 			break;
3354 		default:
3355 			cdm->status = CAM_DEV_MATCH_ERROR;
3356 			break;
3357 		}
3358 
3359 		splx(s);
3360 
3361 		if (cdm->status == CAM_DEV_MATCH_ERROR)
3362 			start_ccb->ccb_h.status = CAM_REQ_CMP_ERR;
3363 		else
3364 			start_ccb->ccb_h.status = CAM_REQ_CMP;
3365 
3366 		break;
3367 	}
3368 	case XPT_SASYNC_CB:
3369 	{
3370 		struct ccb_setasync *csa;
3371 		struct async_node *cur_entry;
3372 		struct async_list *async_head;
3373 		u_int32_t added;
3374 		int s;
3375 
3376 		csa = &start_ccb->csa;
3377 		added = csa->event_enable;
3378 		async_head = &csa->ccb_h.path->device->asyncs;
3379 
3380 		/*
3381 		 * If there is already an entry for us, simply
3382 		 * update it.
3383 		 */
3384 		s = splcam();
3385 		cur_entry = SLIST_FIRST(async_head);
3386 		while (cur_entry != NULL) {
3387 			if ((cur_entry->callback_arg == csa->callback_arg)
3388 			 && (cur_entry->callback == csa->callback))
3389 				break;
3390 			cur_entry = SLIST_NEXT(cur_entry, links);
3391 		}
3392 
3393 		if (cur_entry != NULL) {
3394 		 	/*
3395 			 * If the request has no flags set,
3396 			 * remove the entry.
3397 			 */
3398 			added &= ~cur_entry->event_enable;
3399 			if (csa->event_enable == 0) {
3400 				SLIST_REMOVE(async_head, cur_entry,
3401 					     async_node, links);
3402 				csa->ccb_h.path->device->refcount--;
3403 				free(cur_entry, M_DEVBUF);
3404 			} else {
3405 				cur_entry->event_enable = csa->event_enable;
3406 			}
3407 		} else {
3408 			cur_entry = malloc(sizeof(*cur_entry), M_DEVBUF,
3409 					   M_NOWAIT);
3410 			if (cur_entry == NULL) {
3411 				splx(s);
3412 				csa->ccb_h.status = CAM_RESRC_UNAVAIL;
3413 				break;
3414 			}
3415 			cur_entry->event_enable = csa->event_enable;
3416 			cur_entry->callback_arg = csa->callback_arg;
3417 			cur_entry->callback = csa->callback;
3418 			SLIST_INSERT_HEAD(async_head, cur_entry, links);
3419 			csa->ccb_h.path->device->refcount++;
3420 		}
3421 
3422 		if ((added & AC_FOUND_DEVICE) != 0) {
3423 			/*
3424 			 * Get this peripheral up to date with all
3425 			 * the currently existing devices.
3426 			 */
3427 			xpt_for_all_devices(xptsetasyncfunc, cur_entry);
3428 		}
3429 		if ((added & AC_PATH_REGISTERED) != 0) {
3430 			/*
3431 			 * Get this peripheral up to date with all
3432 			 * the currently existing busses.
3433 			 */
3434 			xpt_for_all_busses(xptsetasyncbusfunc, cur_entry);
3435 		}
3436 		splx(s);
3437 		start_ccb->ccb_h.status = CAM_REQ_CMP;
3438 		break;
3439 	}
3440 	case XPT_REL_SIMQ:
3441 	{
3442 		struct ccb_relsim *crs;
3443 		struct cam_ed *dev;
3444 		int s;
3445 
3446 		crs = &start_ccb->crs;
3447 		dev = crs->ccb_h.path->device;
3448 		if (dev == NULL) {
3449 
3450 			crs->ccb_h.status = CAM_DEV_NOT_THERE;
3451 			break;
3452 		}
3453 
3454 		s = splcam();
3455 
3456 		if ((crs->release_flags & RELSIM_ADJUST_OPENINGS) != 0) {
3457 
3458  			if ((dev->inq_data.flags & SID_CmdQue) != 0) {
3459 
3460 				/* Don't ever go below one opening */
3461 				if (crs->openings > 0) {
3462 					xpt_dev_ccbq_resize(crs->ccb_h.path,
3463 							    crs->openings);
3464 
3465 					if (bootverbose) {
3466 						xpt_print_path(crs->ccb_h.path);
3467 						printf("tagged openings "
3468 						       "now %d\n",
3469 						       crs->openings);
3470 					}
3471 				}
3472 			}
3473 		}
3474 
3475 		if ((crs->release_flags & RELSIM_RELEASE_AFTER_TIMEOUT) != 0) {
3476 
3477 			if ((dev->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) {
3478 
3479 				/*
3480 				 * Just extend the old timeout and decrement
3481 				 * the freeze count so that a single timeout
3482 				 * is sufficient for releasing the queue.
3483 				 */
3484 				start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
3485 				untimeout(xpt_release_devq_timeout,
3486 					  dev, dev->c_handle);
3487 			} else {
3488 
3489 				start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
3490 			}
3491 
3492 			dev->c_handle =
3493 				timeout(xpt_release_devq_timeout,
3494 					dev,
3495 					(crs->release_timeout * hz) / 1000);
3496 
3497 			dev->flags |= CAM_DEV_REL_TIMEOUT_PENDING;
3498 
3499 		}
3500 
3501 		if ((crs->release_flags & RELSIM_RELEASE_AFTER_CMDCMPLT) != 0) {
3502 
3503 			if ((dev->flags & CAM_DEV_REL_ON_COMPLETE) != 0) {
3504 				/*
3505 				 * Decrement the freeze count so that a single
3506 				 * completion is still sufficient to unfreeze
3507 				 * the queue.
3508 				 */
3509 				start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
3510 			} else {
3511 
3512 				dev->flags |= CAM_DEV_REL_ON_COMPLETE;
3513 				start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
3514 			}
3515 		}
3516 
3517 		if ((crs->release_flags & RELSIM_RELEASE_AFTER_QEMPTY) != 0) {
3518 
3519 			if ((dev->flags & CAM_DEV_REL_ON_QUEUE_EMPTY) != 0
3520 			 || (dev->ccbq.dev_active == 0)) {
3521 
3522 				start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
3523 			} else {
3524 
3525 				dev->flags |= CAM_DEV_REL_ON_QUEUE_EMPTY;
3526 				start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
3527 			}
3528 		}
3529 		splx(s);
3530 
3531 		if ((start_ccb->ccb_h.flags & CAM_DEV_QFREEZE) == 0) {
3532 
3533 			xpt_release_devq(crs->ccb_h.path, /*count*/1,
3534 					 /*run_queue*/TRUE);
3535 		}
3536 		start_ccb->crs.qfrozen_cnt = dev->qfrozen_cnt;
3537 		start_ccb->ccb_h.status = CAM_REQ_CMP;
3538 		break;
3539 	}
3540 	case XPT_SCAN_BUS:
3541 		xpt_scan_bus(start_ccb->ccb_h.path->periph, start_ccb);
3542 		break;
3543 	case XPT_SCAN_LUN:
3544 		xpt_scan_lun(start_ccb->ccb_h.path->periph,
3545 			     start_ccb->ccb_h.path, start_ccb->crcn.flags,
3546 			     start_ccb);
3547 		break;
3548 	case XPT_DEBUG: {
3549 #ifdef CAMDEBUG
3550 		int s;
3551 
3552 		s = splcam();
3553 #ifdef CAM_DEBUG_DELAY
3554 		cam_debug_delay = CAM_DEBUG_DELAY;
3555 #endif
3556 		cam_dflags = start_ccb->cdbg.flags;
3557 		if (cam_dpath != NULL) {
3558 			xpt_free_path(cam_dpath);
3559 			cam_dpath = NULL;
3560 		}
3561 
3562 		if (cam_dflags != CAM_DEBUG_NONE) {
3563 			if (xpt_create_path(&cam_dpath, xpt_periph,
3564 					    start_ccb->ccb_h.path_id,
3565 					    start_ccb->ccb_h.target_id,
3566 					    start_ccb->ccb_h.target_lun) !=
3567 					    CAM_REQ_CMP) {
3568 				start_ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
3569 				cam_dflags = CAM_DEBUG_NONE;
3570 			} else {
3571 				start_ccb->ccb_h.status = CAM_REQ_CMP;
3572 				xpt_print_path(cam_dpath);
3573 				printf("debugging flags now %x\n", cam_dflags);
3574 			}
3575 		} else {
3576 			cam_dpath = NULL;
3577 			start_ccb->ccb_h.status = CAM_REQ_CMP;
3578 		}
3579 		splx(s);
3580 #else /* !CAMDEBUG */
3581 		start_ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
3582 #endif /* CAMDEBUG */
3583 		break;
3584 	}
3585 	case XPT_NOOP:
3586 		if ((start_ccb->ccb_h.flags & CAM_DEV_QFREEZE) != 0)
3587 			xpt_freeze_devq(start_ccb->ccb_h.path, 1);
3588 		start_ccb->ccb_h.status = CAM_REQ_CMP;
3589 		break;
3590 	default:
3591 	case XPT_SDEV_TYPE:
3592 	case XPT_TERM_IO:
3593 	case XPT_ENG_INQ:
3594 		/* XXX Implement */
3595 		start_ccb->ccb_h.status = CAM_PROVIDE_FAIL;
3596 		break;
3597 	}
3598 	splx(iopl);
3599 }
3600 
3601 void
3602 xpt_polled_action(union ccb *start_ccb)
3603 {
3604 	int	  s;
3605 	u_int32_t timeout;
3606 	struct	  cam_sim *sim;
3607 	struct	  cam_devq *devq;
3608 	struct	  cam_ed *dev;
3609 
3610 	GIANT_REQUIRED;
3611 
3612 	timeout = start_ccb->ccb_h.timeout;
3613 	sim = start_ccb->ccb_h.path->bus->sim;
3614 	devq = sim->devq;
3615 	dev = start_ccb->ccb_h.path->device;
3616 
3617 	s = splcam();
3618 
3619 	/*
3620 	 * Steal an opening so that no other queued requests
3621 	 * can get it before us while we simulate interrupts.
3622 	 */
3623 	dev->ccbq.devq_openings--;
3624 	dev->ccbq.dev_openings--;
3625 
3626 	while((devq->send_openings <= 0 || dev->ccbq.dev_openings < 0)
3627 	   && (--timeout > 0)) {
3628 		DELAY(1000);
3629 		(*(sim->sim_poll))(sim);
3630 		camisr(&cam_bioq);
3631 	}
3632 
3633 	dev->ccbq.devq_openings++;
3634 	dev->ccbq.dev_openings++;
3635 
3636 	if (timeout != 0) {
3637 		xpt_action(start_ccb);
3638 		while(--timeout > 0) {
3639 			(*(sim->sim_poll))(sim);
3640 			camisr(&cam_bioq);
3641 			if ((start_ccb->ccb_h.status  & CAM_STATUS_MASK)
3642 			    != CAM_REQ_INPROG)
3643 				break;
3644 			DELAY(1000);
3645 		}
3646 		if (timeout == 0) {
3647 			/*
3648 			 * XXX Is it worth adding a sim_timeout entry
3649 			 * point so we can attempt recovery?  If
3650 			 * this is only used for dumps, I don't think
3651 			 * it is.
3652 			 */
3653 			start_ccb->ccb_h.status = CAM_CMD_TIMEOUT;
3654 		}
3655 	} else {
3656 		start_ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
3657 	}
3658 	splx(s);
3659 }
3660 
3661 /*
3662  * Schedule a peripheral driver to receive a ccb when it's
3663  * target device has space for more transactions.
3664  */
3665 void
3666 xpt_schedule(struct cam_periph *perph, u_int32_t new_priority)
3667 {
3668 	struct cam_ed *device;
3669 	int s;
3670 	int runq;
3671 
3672 	GIANT_REQUIRED;
3673 
3674 	CAM_DEBUG(perph->path, CAM_DEBUG_TRACE, ("xpt_schedule\n"));
3675 	device = perph->path->device;
3676 	s = splsoftcam();
3677 	if (periph_is_queued(perph)) {
3678 		/* Simply reorder based on new priority */
3679 		CAM_DEBUG(perph->path, CAM_DEBUG_SUBTRACE,
3680 			  ("   change priority to %d\n", new_priority));
3681 		if (new_priority < perph->pinfo.priority) {
3682 			camq_change_priority(&device->drvq,
3683 					     perph->pinfo.index,
3684 					     new_priority);
3685 		}
3686 		runq = 0;
3687 	} else {
3688 		/* New entry on the queue */
3689 		CAM_DEBUG(perph->path, CAM_DEBUG_SUBTRACE,
3690 			  ("   added periph to queue\n"));
3691 		perph->pinfo.priority = new_priority;
3692 		perph->pinfo.generation = ++device->drvq.generation;
3693 		camq_insert(&device->drvq, &perph->pinfo);
3694 		runq = xpt_schedule_dev_allocq(perph->path->bus, device);
3695 	}
3696 	splx(s);
3697 	if (runq != 0) {
3698 		CAM_DEBUG(perph->path, CAM_DEBUG_SUBTRACE,
3699 			  ("   calling xpt_run_devq\n"));
3700 		xpt_run_dev_allocq(perph->path->bus);
3701 	}
3702 }
3703 
3704 
3705 /*
3706  * Schedule a device to run on a given queue.
3707  * If the device was inserted as a new entry on the queue,
3708  * return 1 meaning the device queue should be run. If we
3709  * were already queued, implying someone else has already
3710  * started the queue, return 0 so the caller doesn't attempt
3711  * to run the queue.  Must be run at either splsoftcam
3712  * (or splcam since that encompases splsoftcam).
3713  */
3714 static int
3715 xpt_schedule_dev(struct camq *queue, cam_pinfo *pinfo,
3716 		 u_int32_t new_priority)
3717 {
3718 	int retval;
3719 	u_int32_t old_priority;
3720 
3721 	CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_schedule_dev\n"));
3722 
3723 	old_priority = pinfo->priority;
3724 
3725 	/*
3726 	 * Are we already queued?
3727 	 */
3728 	if (pinfo->index != CAM_UNQUEUED_INDEX) {
3729 		/* Simply reorder based on new priority */
3730 		if (new_priority < old_priority) {
3731 			camq_change_priority(queue, pinfo->index,
3732 					     new_priority);
3733 			CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3734 					("changed priority to %d\n",
3735 					 new_priority));
3736 		}
3737 		retval = 0;
3738 	} else {
3739 		/* New entry on the queue */
3740 		if (new_priority < old_priority)
3741 			pinfo->priority = new_priority;
3742 
3743 		CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3744 				("Inserting onto queue\n"));
3745 		pinfo->generation = ++queue->generation;
3746 		camq_insert(queue, pinfo);
3747 		retval = 1;
3748 	}
3749 	return (retval);
3750 }
3751 
3752 static void
3753 xpt_run_dev_allocq(struct cam_eb *bus)
3754 {
3755 	struct	cam_devq *devq;
3756 	int	s;
3757 
3758 	CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_dev_allocq\n"));
3759 	devq = bus->sim->devq;
3760 
3761 	CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3762 			("   qfrozen_cnt == 0x%x, entries == %d, "
3763 			 "openings == %d, active == %d\n",
3764 			 devq->alloc_queue.qfrozen_cnt,
3765 			 devq->alloc_queue.entries,
3766 			 devq->alloc_openings,
3767 			 devq->alloc_active));
3768 
3769 	s = splsoftcam();
3770 	devq->alloc_queue.qfrozen_cnt++;
3771 	while ((devq->alloc_queue.entries > 0)
3772 	    && (devq->alloc_openings > 0)
3773 	    && (devq->alloc_queue.qfrozen_cnt <= 1)) {
3774 		struct	cam_ed_qinfo *qinfo;
3775 		struct	cam_ed *device;
3776 		union	ccb *work_ccb;
3777 		struct	cam_periph *drv;
3778 		struct	camq *drvq;
3779 
3780 		qinfo = (struct cam_ed_qinfo *)camq_remove(&devq->alloc_queue,
3781 							   CAMQ_HEAD);
3782 		device = qinfo->device;
3783 
3784 		CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3785 				("running device %p\n", device));
3786 
3787 		drvq = &device->drvq;
3788 
3789 #ifdef CAMDEBUG
3790 		if (drvq->entries <= 0) {
3791 			panic("xpt_run_dev_allocq: "
3792 			      "Device on queue without any work to do");
3793 		}
3794 #endif
3795 		if ((work_ccb = xpt_get_ccb(device)) != NULL) {
3796 			devq->alloc_openings--;
3797 			devq->alloc_active++;
3798 			drv = (struct cam_periph*)camq_remove(drvq, CAMQ_HEAD);
3799 			splx(s);
3800 			xpt_setup_ccb(&work_ccb->ccb_h, drv->path,
3801 				      drv->pinfo.priority);
3802 			CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3803 					("calling periph start\n"));
3804 			drv->periph_start(drv, work_ccb);
3805 		} else {
3806 			/*
3807 			 * Malloc failure in alloc_ccb
3808 			 */
3809 			/*
3810 			 * XXX add us to a list to be run from free_ccb
3811 			 * if we don't have any ccbs active on this
3812 			 * device queue otherwise we may never get run
3813 			 * again.
3814 			 */
3815 			break;
3816 		}
3817 
3818 		/* Raise IPL for possible insertion and test at top of loop */
3819 		s = splsoftcam();
3820 
3821 		if (drvq->entries > 0) {
3822 			/* We have more work.  Attempt to reschedule */
3823 			xpt_schedule_dev_allocq(bus, device);
3824 		}
3825 	}
3826 	devq->alloc_queue.qfrozen_cnt--;
3827 	splx(s);
3828 }
3829 
3830 static void
3831 xpt_run_dev_sendq(struct cam_eb *bus)
3832 {
3833 	struct	cam_devq *devq;
3834 	int	s;
3835 
3836 	CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_dev_sendq\n"));
3837 
3838 	devq = bus->sim->devq;
3839 
3840 	s = splcam();
3841 	devq->send_queue.qfrozen_cnt++;
3842 	splx(s);
3843 	s = splsoftcam();
3844 	while ((devq->send_queue.entries > 0)
3845 	    && (devq->send_openings > 0)) {
3846 		struct	cam_ed_qinfo *qinfo;
3847 		struct	cam_ed *device;
3848 		union ccb *work_ccb;
3849 		struct	cam_sim *sim;
3850 		int	ospl;
3851 
3852 		ospl = splcam();
3853 	    	if (devq->send_queue.qfrozen_cnt > 1) {
3854 			splx(ospl);
3855 			break;
3856 		}
3857 
3858 		qinfo = (struct cam_ed_qinfo *)camq_remove(&devq->send_queue,
3859 							   CAMQ_HEAD);
3860 		device = qinfo->device;
3861 
3862 		/*
3863 		 * If the device has been "frozen", don't attempt
3864 		 * to run it.
3865 		 */
3866 		if (device->qfrozen_cnt > 0) {
3867 			splx(ospl);
3868 			continue;
3869 		}
3870 
3871 		CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3872 				("running device %p\n", device));
3873 
3874 		work_ccb = cam_ccbq_peek_ccb(&device->ccbq, CAMQ_HEAD);
3875 		if (work_ccb == NULL) {
3876 			printf("device on run queue with no ccbs???\n");
3877 			splx(ospl);
3878 			continue;
3879 		}
3880 
3881 		if ((work_ccb->ccb_h.flags & CAM_HIGH_POWER) != 0) {
3882 
3883 		 	if (num_highpower <= 0) {
3884 				/*
3885 				 * We got a high power command, but we
3886 				 * don't have any available slots.  Freeze
3887 				 * the device queue until we have a slot
3888 				 * available.
3889 				 */
3890 				device->qfrozen_cnt++;
3891 				STAILQ_INSERT_TAIL(&highpowerq,
3892 						   &work_ccb->ccb_h,
3893 						   xpt_links.stqe);
3894 
3895 				splx(ospl);
3896 				continue;
3897 			} else {
3898 				/*
3899 				 * Consume a high power slot while
3900 				 * this ccb runs.
3901 				 */
3902 				num_highpower--;
3903 			}
3904 		}
3905 		devq->active_dev = device;
3906 		cam_ccbq_remove_ccb(&device->ccbq, work_ccb);
3907 
3908 		cam_ccbq_send_ccb(&device->ccbq, work_ccb);
3909 		splx(ospl);
3910 
3911 		devq->send_openings--;
3912 		devq->send_active++;
3913 
3914 		if (device->ccbq.queue.entries > 0)
3915 			xpt_schedule_dev_sendq(bus, device);
3916 
3917 		if (work_ccb && (work_ccb->ccb_h.flags & CAM_DEV_QFREEZE) != 0){
3918 			/*
3919 			 * The client wants to freeze the queue
3920 			 * after this CCB is sent.
3921 			 */
3922 			ospl = splcam();
3923 			device->qfrozen_cnt++;
3924 			splx(ospl);
3925 		}
3926 
3927 		splx(s);
3928 
3929 		/* In Target mode, the peripheral driver knows best... */
3930 		if (work_ccb->ccb_h.func_code == XPT_SCSI_IO) {
3931 			if ((device->inq_flags & SID_CmdQue) != 0
3932 			 && work_ccb->csio.tag_action != CAM_TAG_ACTION_NONE)
3933 				work_ccb->ccb_h.flags |= CAM_TAG_ACTION_VALID;
3934 			else
3935 				/*
3936 				 * Clear this in case of a retried CCB that
3937 				 * failed due to a rejected tag.
3938 				 */
3939 				work_ccb->ccb_h.flags &= ~CAM_TAG_ACTION_VALID;
3940 		}
3941 
3942 		/*
3943 		 * Device queues can be shared among multiple sim instances
3944 		 * that reside on different busses.  Use the SIM in the queue
3945 		 * CCB's path, rather than the one in the bus that was passed
3946 		 * into this function.
3947 		 */
3948 		sim = work_ccb->ccb_h.path->bus->sim;
3949 		(*(sim->sim_action))(sim, work_ccb);
3950 
3951 		ospl = splcam();
3952 		devq->active_dev = NULL;
3953 		splx(ospl);
3954 		/* Raise IPL for possible insertion and test at top of loop */
3955 		s = splsoftcam();
3956 	}
3957 	splx(s);
3958 	s = splcam();
3959 	devq->send_queue.qfrozen_cnt--;
3960 	splx(s);
3961 }
3962 
3963 /*
3964  * This function merges stuff from the slave ccb into the master ccb, while
3965  * keeping important fields in the master ccb constant.
3966  */
3967 void
3968 xpt_merge_ccb(union ccb *master_ccb, union ccb *slave_ccb)
3969 {
3970 	GIANT_REQUIRED;
3971 
3972 	/*
3973 	 * Pull fields that are valid for peripheral drivers to set
3974 	 * into the master CCB along with the CCB "payload".
3975 	 */
3976 	master_ccb->ccb_h.retry_count = slave_ccb->ccb_h.retry_count;
3977 	master_ccb->ccb_h.func_code = slave_ccb->ccb_h.func_code;
3978 	master_ccb->ccb_h.timeout = slave_ccb->ccb_h.timeout;
3979 	master_ccb->ccb_h.flags = slave_ccb->ccb_h.flags;
3980 	bcopy(&(&slave_ccb->ccb_h)[1], &(&master_ccb->ccb_h)[1],
3981 	      sizeof(union ccb) - sizeof(struct ccb_hdr));
3982 }
3983 
3984 void
3985 xpt_setup_ccb(struct ccb_hdr *ccb_h, struct cam_path *path, u_int32_t priority)
3986 {
3987 	GIANT_REQUIRED;
3988 
3989 	CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_setup_ccb\n"));
3990 	ccb_h->pinfo.priority = priority;
3991 	ccb_h->path = path;
3992 	ccb_h->path_id = path->bus->path_id;
3993 	if (path->target)
3994 		ccb_h->target_id = path->target->target_id;
3995 	else
3996 		ccb_h->target_id = CAM_TARGET_WILDCARD;
3997 	if (path->device) {
3998 		ccb_h->target_lun = path->device->lun_id;
3999 		ccb_h->pinfo.generation = ++path->device->ccbq.queue.generation;
4000 	} else {
4001 		ccb_h->target_lun = CAM_TARGET_WILDCARD;
4002 	}
4003 	ccb_h->pinfo.index = CAM_UNQUEUED_INDEX;
4004 	ccb_h->flags = 0;
4005 }
4006 
4007 /* Path manipulation functions */
4008 cam_status
4009 xpt_create_path(struct cam_path **new_path_ptr, struct cam_periph *perph,
4010 		path_id_t path_id, target_id_t target_id, lun_id_t lun_id)
4011 {
4012 	struct	   cam_path *path;
4013 	cam_status status;
4014 
4015 	GIANT_REQUIRED;
4016 
4017 	path = (struct cam_path *)malloc(sizeof(*path), M_DEVBUF, M_NOWAIT);
4018 
4019 	if (path == NULL) {
4020 		status = CAM_RESRC_UNAVAIL;
4021 		return(status);
4022 	}
4023 	status = xpt_compile_path(path, perph, path_id, target_id, lun_id);
4024 	if (status != CAM_REQ_CMP) {
4025 		free(path, M_DEVBUF);
4026 		path = NULL;
4027 	}
4028 	*new_path_ptr = path;
4029 	return (status);
4030 }
4031 
4032 static cam_status
4033 xpt_compile_path(struct cam_path *new_path, struct cam_periph *perph,
4034 		 path_id_t path_id, target_id_t target_id, lun_id_t lun_id)
4035 {
4036 	struct	     cam_eb *bus;
4037 	struct	     cam_et *target;
4038 	struct	     cam_ed *device;
4039 	cam_status   status;
4040 	int	     s;
4041 
4042 	status = CAM_REQ_CMP;	/* Completed without error */
4043 	target = NULL;		/* Wildcarded */
4044 	device = NULL;		/* Wildcarded */
4045 
4046 	/*
4047 	 * We will potentially modify the EDT, so block interrupts
4048 	 * that may attempt to create cam paths.
4049 	 */
4050 	s = splcam();
4051 	bus = xpt_find_bus(path_id);
4052 	if (bus == NULL) {
4053 		status = CAM_PATH_INVALID;
4054 	} else {
4055 		target = xpt_find_target(bus, target_id);
4056 		if (target == NULL) {
4057 			/* Create one */
4058 			struct cam_et *new_target;
4059 
4060 			new_target = xpt_alloc_target(bus, target_id);
4061 			if (new_target == NULL) {
4062 				status = CAM_RESRC_UNAVAIL;
4063 			} else {
4064 				target = new_target;
4065 			}
4066 		}
4067 		if (target != NULL) {
4068 			device = xpt_find_device(target, lun_id);
4069 			if (device == NULL) {
4070 				/* Create one */
4071 				struct cam_ed *new_device;
4072 
4073 				new_device = xpt_alloc_device(bus,
4074 							      target,
4075 							      lun_id);
4076 				if (new_device == NULL) {
4077 					status = CAM_RESRC_UNAVAIL;
4078 				} else {
4079 					device = new_device;
4080 				}
4081 			}
4082 		}
4083 	}
4084 	splx(s);
4085 
4086 	/*
4087 	 * Only touch the user's data if we are successful.
4088 	 */
4089 	if (status == CAM_REQ_CMP) {
4090 		new_path->periph = perph;
4091 		new_path->bus = bus;
4092 		new_path->target = target;
4093 		new_path->device = device;
4094 		CAM_DEBUG(new_path, CAM_DEBUG_TRACE, ("xpt_compile_path\n"));
4095 	} else {
4096 		if (device != NULL)
4097 			xpt_release_device(bus, target, device);
4098 		if (target != NULL)
4099 			xpt_release_target(bus, target);
4100 		if (bus != NULL)
4101 			xpt_release_bus(bus);
4102 	}
4103 	return (status);
4104 }
4105 
4106 static void
4107 xpt_release_path(struct cam_path *path)
4108 {
4109 	CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_release_path\n"));
4110 	if (path->device != NULL) {
4111 		xpt_release_device(path->bus, path->target, path->device);
4112 		path->device = NULL;
4113 	}
4114 	if (path->target != NULL) {
4115 		xpt_release_target(path->bus, path->target);
4116 		path->target = NULL;
4117 	}
4118 	if (path->bus != NULL) {
4119 		xpt_release_bus(path->bus);
4120 		path->bus = NULL;
4121 	}
4122 }
4123 
4124 void
4125 xpt_free_path(struct cam_path *path)
4126 {
4127 	GIANT_REQUIRED;
4128 
4129 	CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_free_path\n"));
4130 	xpt_release_path(path);
4131 	free(path, M_DEVBUF);
4132 }
4133 
4134 
4135 /*
4136  * Return -1 for failure, 0 for exact match, 1 for match with wildcards
4137  * in path1, 2 for match with wildcards in path2.
4138  */
4139 int
4140 xpt_path_comp(struct cam_path *path1, struct cam_path *path2)
4141 {
4142 	GIANT_REQUIRED;
4143 
4144 	int retval = 0;
4145 
4146 	if (path1->bus != path2->bus) {
4147 		if (path1->bus->path_id == CAM_BUS_WILDCARD)
4148 			retval = 1;
4149 		else if (path2->bus->path_id == CAM_BUS_WILDCARD)
4150 			retval = 2;
4151 		else
4152 			return (-1);
4153 	}
4154 	if (path1->target != path2->target) {
4155 		if (path1->target->target_id == CAM_TARGET_WILDCARD) {
4156 			if (retval == 0)
4157 				retval = 1;
4158 		} else if (path2->target->target_id == CAM_TARGET_WILDCARD)
4159 			retval = 2;
4160 		else
4161 			return (-1);
4162 	}
4163 	if (path1->device != path2->device) {
4164 		if (path1->device->lun_id == CAM_LUN_WILDCARD) {
4165 			if (retval == 0)
4166 				retval = 1;
4167 		} else if (path2->device->lun_id == CAM_LUN_WILDCARD)
4168 			retval = 2;
4169 		else
4170 			return (-1);
4171 	}
4172 	return (retval);
4173 }
4174 
4175 void
4176 xpt_print_path(struct cam_path *path)
4177 {
4178 	GIANT_REQUIRED;
4179 
4180 	if (path == NULL)
4181 		printf("(nopath): ");
4182 	else {
4183 		if (path->periph != NULL)
4184 			printf("(%s%d:", path->periph->periph_name,
4185 			       path->periph->unit_number);
4186 		else
4187 			printf("(noperiph:");
4188 
4189 		if (path->bus != NULL)
4190 			printf("%s%d:%d:", path->bus->sim->sim_name,
4191 			       path->bus->sim->unit_number,
4192 			       path->bus->sim->bus_id);
4193 		else
4194 			printf("nobus:");
4195 
4196 		if (path->target != NULL)
4197 			printf("%d:", path->target->target_id);
4198 		else
4199 			printf("X:");
4200 
4201 		if (path->device != NULL)
4202 			printf("%d): ", path->device->lun_id);
4203 		else
4204 			printf("X): ");
4205 	}
4206 }
4207 
4208 int
4209 xpt_path_string(struct cam_path *path, char *str, size_t str_len)
4210 {
4211 	struct sbuf sb;
4212 
4213 	GIANT_REQUIRED;
4214 
4215 	sbuf_new(&sb, str, str_len, 0);
4216 
4217 	if (path == NULL)
4218 		sbuf_printf(&sb, "(nopath): ");
4219 	else {
4220 		if (path->periph != NULL)
4221 			sbuf_printf(&sb, "(%s%d:", path->periph->periph_name,
4222 				    path->periph->unit_number);
4223 		else
4224 			sbuf_printf(&sb, "(noperiph:");
4225 
4226 		if (path->bus != NULL)
4227 			sbuf_printf(&sb, "%s%d:%d:", path->bus->sim->sim_name,
4228 				    path->bus->sim->unit_number,
4229 				    path->bus->sim->bus_id);
4230 		else
4231 			sbuf_printf(&sb, "nobus:");
4232 
4233 		if (path->target != NULL)
4234 			sbuf_printf(&sb, "%d:", path->target->target_id);
4235 		else
4236 			sbuf_printf(&sb, "X:");
4237 
4238 		if (path->device != NULL)
4239 			sbuf_printf(&sb, "%d): ", path->device->lun_id);
4240 		else
4241 			sbuf_printf(&sb, "X): ");
4242 	}
4243 	sbuf_finish(&sb);
4244 
4245 	return(sbuf_len(&sb));
4246 }
4247 
4248 path_id_t
4249 xpt_path_path_id(struct cam_path *path)
4250 {
4251 	GIANT_REQUIRED;
4252 
4253 	return(path->bus->path_id);
4254 }
4255 
4256 target_id_t
4257 xpt_path_target_id(struct cam_path *path)
4258 {
4259 	GIANT_REQUIRED;
4260 
4261 	if (path->target != NULL)
4262 		return (path->target->target_id);
4263 	else
4264 		return (CAM_TARGET_WILDCARD);
4265 }
4266 
4267 lun_id_t
4268 xpt_path_lun_id(struct cam_path *path)
4269 {
4270 	GIANT_REQUIRED;
4271 
4272 	if (path->device != NULL)
4273 		return (path->device->lun_id);
4274 	else
4275 		return (CAM_LUN_WILDCARD);
4276 }
4277 
4278 struct cam_sim *
4279 xpt_path_sim(struct cam_path *path)
4280 {
4281 	GIANT_REQUIRED;
4282 
4283 	return (path->bus->sim);
4284 }
4285 
4286 struct cam_periph*
4287 xpt_path_periph(struct cam_path *path)
4288 {
4289 	GIANT_REQUIRED;
4290 
4291 	return (path->periph);
4292 }
4293 
4294 /*
4295  * Release a CAM control block for the caller.  Remit the cost of the structure
4296  * to the device referenced by the path.  If the this device had no 'credits'
4297  * and peripheral drivers have registered async callbacks for this notification
4298  * call them now.
4299  */
4300 void
4301 xpt_release_ccb(union ccb *free_ccb)
4302 {
4303 	int	 s;
4304 	struct	 cam_path *path;
4305 	struct	 cam_ed *device;
4306 	struct	 cam_eb *bus;
4307 
4308 	GIANT_REQUIRED;
4309 
4310 	CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_release_ccb\n"));
4311 	path = free_ccb->ccb_h.path;
4312 	device = path->device;
4313 	bus = path->bus;
4314 	s = splsoftcam();
4315 	cam_ccbq_release_opening(&device->ccbq);
4316 	if (xpt_ccb_count > xpt_max_ccbs) {
4317 		xpt_free_ccb(free_ccb);
4318 		xpt_ccb_count--;
4319 	} else {
4320 		SLIST_INSERT_HEAD(&ccb_freeq, &free_ccb->ccb_h, xpt_links.sle);
4321 	}
4322 	bus->sim->devq->alloc_openings++;
4323 	bus->sim->devq->alloc_active--;
4324 	/* XXX Turn this into an inline function - xpt_run_device?? */
4325 	if ((device_is_alloc_queued(device) == 0)
4326 	 && (device->drvq.entries > 0)) {
4327 		xpt_schedule_dev_allocq(bus, device);
4328 	}
4329 	splx(s);
4330 	if (dev_allocq_is_runnable(bus->sim->devq))
4331 		xpt_run_dev_allocq(bus);
4332 }
4333 
4334 /* Functions accessed by SIM drivers */
4335 
4336 /*
4337  * A sim structure, listing the SIM entry points and instance
4338  * identification info is passed to xpt_bus_register to hook the SIM
4339  * into the CAM framework.  xpt_bus_register creates a cam_eb entry
4340  * for this new bus and places it in the array of busses and assigns
4341  * it a path_id.  The path_id may be influenced by "hard wiring"
4342  * information specified by the user.  Once interrupt services are
4343  * availible, the bus will be probed.
4344  */
4345 int32_t
4346 xpt_bus_register(struct cam_sim *sim, u_int32_t bus)
4347 {
4348 	struct cam_eb *new_bus;
4349 	struct cam_eb *old_bus;
4350 	struct ccb_pathinq cpi;
4351 	int s;
4352 
4353 	GIANT_REQUIRED;
4354 
4355 	sim->bus_id = bus;
4356 	new_bus = (struct cam_eb *)malloc(sizeof(*new_bus),
4357 					  M_DEVBUF, M_NOWAIT);
4358 	if (new_bus == NULL) {
4359 		/* Couldn't satisfy request */
4360 		return (CAM_RESRC_UNAVAIL);
4361 	}
4362 
4363 	if (strcmp(sim->sim_name, "xpt") != 0) {
4364 
4365 		sim->path_id =
4366 		    xptpathid(sim->sim_name, sim->unit_number, sim->bus_id);
4367 	}
4368 
4369 	TAILQ_INIT(&new_bus->et_entries);
4370 	new_bus->path_id = sim->path_id;
4371 	new_bus->sim = sim;
4372 	timevalclear(&new_bus->last_reset);
4373 	new_bus->flags = 0;
4374 	new_bus->refcount = 1;	/* Held until a bus_deregister event */
4375 	new_bus->generation = 0;
4376 	s = splcam();
4377 	old_bus = TAILQ_FIRST(&xpt_busses);
4378 	while (old_bus != NULL
4379 	    && old_bus->path_id < new_bus->path_id)
4380 		old_bus = TAILQ_NEXT(old_bus, links);
4381 	if (old_bus != NULL)
4382 		TAILQ_INSERT_BEFORE(old_bus, new_bus, links);
4383 	else
4384 		TAILQ_INSERT_TAIL(&xpt_busses, new_bus, links);
4385 	bus_generation++;
4386 	splx(s);
4387 
4388 	/* Notify interested parties */
4389 	if (sim->path_id != CAM_XPT_PATH_ID) {
4390 		struct cam_path path;
4391 
4392 		xpt_compile_path(&path, /*periph*/NULL, sim->path_id,
4393 			         CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
4394 		xpt_setup_ccb(&cpi.ccb_h, &path, /*priority*/1);
4395 		cpi.ccb_h.func_code = XPT_PATH_INQ;
4396 		xpt_action((union ccb *)&cpi);
4397 		xpt_async(AC_PATH_REGISTERED, &path, &cpi);
4398 		xpt_release_path(&path);
4399 	}
4400 	return (CAM_SUCCESS);
4401 }
4402 
4403 int32_t
4404 xpt_bus_deregister(path_id_t pathid)
4405 {
4406 	struct cam_path bus_path;
4407 	cam_status status;
4408 
4409 	GIANT_REQUIRED;
4410 
4411 	status = xpt_compile_path(&bus_path, NULL, pathid,
4412 				  CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
4413 	if (status != CAM_REQ_CMP)
4414 		return (status);
4415 
4416 	xpt_async(AC_LOST_DEVICE, &bus_path, NULL);
4417 	xpt_async(AC_PATH_DEREGISTERED, &bus_path, NULL);
4418 
4419 	/* Release the reference count held while registered. */
4420 	xpt_release_bus(bus_path.bus);
4421 	xpt_release_path(&bus_path);
4422 
4423 	return (CAM_REQ_CMP);
4424 }
4425 
4426 static path_id_t
4427 xptnextfreepathid(void)
4428 {
4429 	struct cam_eb *bus;
4430 	path_id_t pathid;
4431 	const char *strval;
4432 
4433 	pathid = 0;
4434 	bus = TAILQ_FIRST(&xpt_busses);
4435 retry:
4436 	/* Find an unoccupied pathid */
4437 	while (bus != NULL
4438 	    && bus->path_id <= pathid) {
4439 		if (bus->path_id == pathid)
4440 			pathid++;
4441 		bus = TAILQ_NEXT(bus, links);
4442 	}
4443 
4444 	/*
4445 	 * Ensure that this pathid is not reserved for
4446 	 * a bus that may be registered in the future.
4447 	 */
4448 	if (resource_string_value("scbus", pathid, "at", &strval) == 0) {
4449 		++pathid;
4450 		/* Start the search over */
4451 		goto retry;
4452 	}
4453 	return (pathid);
4454 }
4455 
4456 static path_id_t
4457 xptpathid(const char *sim_name, int sim_unit, int sim_bus)
4458 {
4459 	path_id_t pathid;
4460 	int i, dunit, val;
4461 	char buf[32];
4462 	const char *dname;
4463 
4464 	pathid = CAM_XPT_PATH_ID;
4465 	snprintf(buf, sizeof(buf), "%s%d", sim_name, sim_unit);
4466 	i = 0;
4467 	while ((resource_find_match(&i, &dname, &dunit, "at", buf)) == 0) {
4468 		if (strcmp(dname, "scbus")) {
4469 			/* Avoid a bit of foot shooting. */
4470 			continue;
4471 		}
4472 		if (dunit < 0)		/* unwired?! */
4473 			continue;
4474 		if (resource_int_value("scbus", dunit, "bus", &val) == 0) {
4475 			if (sim_bus == val) {
4476 				pathid = dunit;
4477 				break;
4478 			}
4479 		} else if (sim_bus == 0) {
4480 			/* Unspecified matches bus 0 */
4481 			pathid = dunit;
4482 			break;
4483 		} else {
4484 			printf("Ambiguous scbus configuration for %s%d "
4485 			       "bus %d, cannot wire down.  The kernel "
4486 			       "config entry for scbus%d should "
4487 			       "specify a controller bus.\n"
4488 			       "Scbus will be assigned dynamically.\n",
4489 			       sim_name, sim_unit, sim_bus, dunit);
4490 			break;
4491 		}
4492 	}
4493 
4494 	if (pathid == CAM_XPT_PATH_ID)
4495 		pathid = xptnextfreepathid();
4496 	return (pathid);
4497 }
4498 
4499 void
4500 xpt_async(u_int32_t async_code, struct cam_path *path, void *async_arg)
4501 {
4502 	struct cam_eb *bus;
4503 	struct cam_et *target, *next_target;
4504 	struct cam_ed *device, *next_device;
4505 	int s;
4506 
4507 	GIANT_REQUIRED;
4508 
4509 	CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_async\n"));
4510 
4511 	/*
4512 	 * Most async events come from a CAM interrupt context.  In
4513 	 * a few cases, the error recovery code at the peripheral layer,
4514 	 * which may run from our SWI or a process context, may signal
4515 	 * deferred events with a call to xpt_async. Ensure async
4516 	 * notifications are serialized by blocking cam interrupts.
4517 	 */
4518 	s = splcam();
4519 
4520 	bus = path->bus;
4521 
4522 	if (async_code == AC_BUS_RESET) {
4523 		int s;
4524 
4525 		s = splclock();
4526 		/* Update our notion of when the last reset occurred */
4527 		microtime(&bus->last_reset);
4528 		splx(s);
4529 	}
4530 
4531 	for (target = TAILQ_FIRST(&bus->et_entries);
4532 	     target != NULL;
4533 	     target = next_target) {
4534 
4535 		next_target = TAILQ_NEXT(target, links);
4536 
4537 		if (path->target != target
4538 		 && path->target->target_id != CAM_TARGET_WILDCARD
4539 		 && target->target_id != CAM_TARGET_WILDCARD)
4540 			continue;
4541 
4542 		if (async_code == AC_SENT_BDR) {
4543 			int s;
4544 
4545 			/* Update our notion of when the last reset occurred */
4546 			s = splclock();
4547 			microtime(&path->target->last_reset);
4548 			splx(s);
4549 		}
4550 
4551 		for (device = TAILQ_FIRST(&target->ed_entries);
4552 		     device != NULL;
4553 		     device = next_device) {
4554 
4555 			next_device = TAILQ_NEXT(device, links);
4556 
4557 			if (path->device != device
4558 			 && path->device->lun_id != CAM_LUN_WILDCARD
4559 			 && device->lun_id != CAM_LUN_WILDCARD)
4560 				continue;
4561 
4562 			xpt_dev_async(async_code, bus, target,
4563 				      device, async_arg);
4564 
4565 			xpt_async_bcast(&device->asyncs, async_code,
4566 					path, async_arg);
4567 		}
4568 	}
4569 
4570 	/*
4571 	 * If this wasn't a fully wildcarded async, tell all
4572 	 * clients that want all async events.
4573 	 */
4574 	if (bus != xpt_periph->path->bus)
4575 		xpt_async_bcast(&xpt_periph->path->device->asyncs, async_code,
4576 				path, async_arg);
4577 	splx(s);
4578 }
4579 
4580 static void
4581 xpt_async_bcast(struct async_list *async_head,
4582 		u_int32_t async_code,
4583 		struct cam_path *path, void *async_arg)
4584 {
4585 	struct async_node *cur_entry;
4586 
4587 	cur_entry = SLIST_FIRST(async_head);
4588 	while (cur_entry != NULL) {
4589 		struct async_node *next_entry;
4590 		/*
4591 		 * Grab the next list entry before we call the current
4592 		 * entry's callback.  This is because the callback function
4593 		 * can delete its async callback entry.
4594 		 */
4595 		next_entry = SLIST_NEXT(cur_entry, links);
4596 		if ((cur_entry->event_enable & async_code) != 0)
4597 			cur_entry->callback(cur_entry->callback_arg,
4598 					    async_code, path,
4599 					    async_arg);
4600 		cur_entry = next_entry;
4601 	}
4602 }
4603 
4604 /*
4605  * Handle any per-device event notifications that require action by the XPT.
4606  */
4607 static void
4608 xpt_dev_async(u_int32_t async_code, struct cam_eb *bus, struct cam_et *target,
4609 	      struct cam_ed *device, void *async_arg)
4610 {
4611 	cam_status status;
4612 	struct cam_path newpath;
4613 
4614 	/*
4615 	 * We only need to handle events for real devices.
4616 	 */
4617 	if (target->target_id == CAM_TARGET_WILDCARD
4618 	 || device->lun_id == CAM_LUN_WILDCARD)
4619 		return;
4620 
4621 	/*
4622 	 * We need our own path with wildcards expanded to
4623 	 * handle certain types of events.
4624 	 */
4625 	if ((async_code == AC_SENT_BDR)
4626 	 || (async_code == AC_BUS_RESET)
4627 	 || (async_code == AC_INQ_CHANGED))
4628 		status = xpt_compile_path(&newpath, NULL,
4629 					  bus->path_id,
4630 					  target->target_id,
4631 					  device->lun_id);
4632 	else
4633 		status = CAM_REQ_CMP_ERR;
4634 
4635 	if (status == CAM_REQ_CMP) {
4636 
4637 		/*
4638 		 * Allow transfer negotiation to occur in a
4639 		 * tag free environment.
4640 		 */
4641 		if (async_code == AC_SENT_BDR
4642 		 || async_code == AC_BUS_RESET)
4643 			xpt_toggle_tags(&newpath);
4644 
4645 		if (async_code == AC_INQ_CHANGED) {
4646 			/*
4647 			 * We've sent a start unit command, or
4648 			 * something similar to a device that
4649 			 * may have caused its inquiry data to
4650 			 * change. So we re-scan the device to
4651 			 * refresh the inquiry data for it.
4652 			 */
4653 			xpt_scan_lun(newpath.periph, &newpath,
4654 				     CAM_EXPECT_INQ_CHANGE, NULL);
4655 		}
4656 		xpt_release_path(&newpath);
4657 	} else if (async_code == AC_LOST_DEVICE) {
4658 		device->flags |= CAM_DEV_UNCONFIGURED;
4659 	} else if (async_code == AC_TRANSFER_NEG) {
4660 		struct ccb_trans_settings *settings;
4661 
4662 		settings = (struct ccb_trans_settings *)async_arg;
4663 		xpt_set_transfer_settings(settings, device,
4664 					  /*async_update*/TRUE);
4665 	}
4666 }
4667 
4668 u_int32_t
4669 xpt_freeze_devq(struct cam_path *path, u_int count)
4670 {
4671 	int s;
4672 	struct ccb_hdr *ccbh;
4673 
4674 	GIANT_REQUIRED;
4675 
4676 	s = splcam();
4677 	path->device->qfrozen_cnt += count;
4678 
4679 	/*
4680 	 * Mark the last CCB in the queue as needing
4681 	 * to be requeued if the driver hasn't
4682 	 * changed it's state yet.  This fixes a race
4683 	 * where a ccb is just about to be queued to
4684 	 * a controller driver when it's interrupt routine
4685 	 * freezes the queue.  To completly close the
4686 	 * hole, controller drives must check to see
4687 	 * if a ccb's status is still CAM_REQ_INPROG
4688 	 * under spl protection just before they queue
4689 	 * the CCB.  See ahc_action/ahc_freeze_devq for
4690 	 * an example.
4691 	 */
4692 	ccbh = TAILQ_LAST(&path->device->ccbq.active_ccbs, ccb_hdr_tailq);
4693 	if (ccbh && ccbh->status == CAM_REQ_INPROG)
4694 		ccbh->status = CAM_REQUEUE_REQ;
4695 	splx(s);
4696 	return (path->device->qfrozen_cnt);
4697 }
4698 
4699 u_int32_t
4700 xpt_freeze_simq(struct cam_sim *sim, u_int count)
4701 {
4702 	GIANT_REQUIRED;
4703 
4704 	sim->devq->send_queue.qfrozen_cnt += count;
4705 	if (sim->devq->active_dev != NULL) {
4706 		struct ccb_hdr *ccbh;
4707 
4708 		ccbh = TAILQ_LAST(&sim->devq->active_dev->ccbq.active_ccbs,
4709 				  ccb_hdr_tailq);
4710 		if (ccbh && ccbh->status == CAM_REQ_INPROG)
4711 			ccbh->status = CAM_REQUEUE_REQ;
4712 	}
4713 	return (sim->devq->send_queue.qfrozen_cnt);
4714 }
4715 
4716 static void
4717 xpt_release_devq_timeout(void *arg)
4718 {
4719 	struct cam_ed *device;
4720 
4721 	device = (struct cam_ed *)arg;
4722 
4723 	xpt_release_devq_device(device, /*count*/1, /*run_queue*/TRUE);
4724 }
4725 
4726 void
4727 xpt_release_devq(struct cam_path *path, u_int count, int run_queue)
4728 {
4729 	GIANT_REQUIRED;
4730 
4731 	xpt_release_devq_device(path->device, count, run_queue);
4732 }
4733 
4734 static void
4735 xpt_release_devq_device(struct cam_ed *dev, u_int count, int run_queue)
4736 {
4737 	int	rundevq;
4738 	int	s0, s1;
4739 
4740 	rundevq = 0;
4741 	s0 = splsoftcam();
4742 	s1 = splcam();
4743 	if (dev->qfrozen_cnt > 0) {
4744 
4745 		count = (count > dev->qfrozen_cnt) ? dev->qfrozen_cnt : count;
4746 		dev->qfrozen_cnt -= count;
4747 		if (dev->qfrozen_cnt == 0) {
4748 
4749 			/*
4750 			 * No longer need to wait for a successful
4751 			 * command completion.
4752 			 */
4753 			dev->flags &= ~CAM_DEV_REL_ON_COMPLETE;
4754 
4755 			/*
4756 			 * Remove any timeouts that might be scheduled
4757 			 * to release this queue.
4758 			 */
4759 			if ((dev->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) {
4760 				untimeout(xpt_release_devq_timeout, dev,
4761 					  dev->c_handle);
4762 				dev->flags &= ~CAM_DEV_REL_TIMEOUT_PENDING;
4763 			}
4764 
4765 			/*
4766 			 * Now that we are unfrozen schedule the
4767 			 * device so any pending transactions are
4768 			 * run.
4769 			 */
4770 			if ((dev->ccbq.queue.entries > 0)
4771 			 && (xpt_schedule_dev_sendq(dev->target->bus, dev))
4772 			 && (run_queue != 0)) {
4773 				rundevq = 1;
4774 			}
4775 		}
4776 	}
4777 	splx(s1);
4778 	if (rundevq != 0)
4779 		xpt_run_dev_sendq(dev->target->bus);
4780 	splx(s0);
4781 }
4782 
4783 void
4784 xpt_release_simq(struct cam_sim *sim, int run_queue)
4785 {
4786 	int	s;
4787 	struct	camq *sendq;
4788 
4789 	GIANT_REQUIRED;
4790 
4791 	sendq = &(sim->devq->send_queue);
4792 	s = splcam();
4793 	if (sendq->qfrozen_cnt > 0) {
4794 
4795 		sendq->qfrozen_cnt--;
4796 		if (sendq->qfrozen_cnt == 0) {
4797 			struct cam_eb *bus;
4798 
4799 			/*
4800 			 * If there is a timeout scheduled to release this
4801 			 * sim queue, remove it.  The queue frozen count is
4802 			 * already at 0.
4803 			 */
4804 			if ((sim->flags & CAM_SIM_REL_TIMEOUT_PENDING) != 0){
4805 				untimeout(xpt_release_simq_timeout, sim,
4806 					  sim->c_handle);
4807 				sim->flags &= ~CAM_SIM_REL_TIMEOUT_PENDING;
4808 			}
4809 			bus = xpt_find_bus(sim->path_id);
4810 			splx(s);
4811 
4812 			if (run_queue) {
4813 				/*
4814 				 * Now that we are unfrozen run the send queue.
4815 				 */
4816 				xpt_run_dev_sendq(bus);
4817 			}
4818 			xpt_release_bus(bus);
4819 		} else
4820 			splx(s);
4821 	} else
4822 		splx(s);
4823 }
4824 
4825 static void
4826 xpt_release_simq_timeout(void *arg)
4827 {
4828 	struct cam_sim *sim;
4829 
4830 	sim = (struct cam_sim *)arg;
4831 	xpt_release_simq(sim, /* run_queue */ TRUE);
4832 }
4833 
4834 void
4835 xpt_done(union ccb *done_ccb)
4836 {
4837 	int s;
4838 
4839 	s = splcam();
4840 
4841 	CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xpt_done\n"));
4842 	if ((done_ccb->ccb_h.func_code & XPT_FC_QUEUED) != 0) {
4843 		/*
4844 		 * Queue up the request for handling by our SWI handler
4845 		 * any of the "non-immediate" type of ccbs.
4846 		 */
4847 		switch (done_ccb->ccb_h.path->periph->type) {
4848 		case CAM_PERIPH_BIO:
4849 			mtx_lock(&cam_bioq_lock);
4850 			TAILQ_INSERT_TAIL(&cam_bioq, &done_ccb->ccb_h,
4851 					  sim_links.tqe);
4852 			done_ccb->ccb_h.pinfo.index = CAM_DONEQ_INDEX;
4853 			mtx_unlock(&cam_bioq_lock);
4854 			swi_sched(cambio_ih, 0);
4855 			break;
4856 		default:
4857 			panic("unknown periph type %d",
4858 			    done_ccb->ccb_h.path->periph->type);
4859 		}
4860 	}
4861 	splx(s);
4862 }
4863 
4864 union ccb *
4865 xpt_alloc_ccb()
4866 {
4867 	union ccb *new_ccb;
4868 
4869 	GIANT_REQUIRED;
4870 
4871 	new_ccb = malloc(sizeof(*new_ccb), M_DEVBUF, M_WAITOK);
4872 	return (new_ccb);
4873 }
4874 
4875 void
4876 xpt_free_ccb(union ccb *free_ccb)
4877 {
4878 	free(free_ccb, M_DEVBUF);
4879 }
4880 
4881 
4882 
4883 /* Private XPT functions */
4884 
4885 /*
4886  * Get a CAM control block for the caller. Charge the structure to the device
4887  * referenced by the path.  If the this device has no 'credits' then the
4888  * device already has the maximum number of outstanding operations under way
4889  * and we return NULL. If we don't have sufficient resources to allocate more
4890  * ccbs, we also return NULL.
4891  */
4892 static union ccb *
4893 xpt_get_ccb(struct cam_ed *device)
4894 {
4895 	union ccb *new_ccb;
4896 	int s;
4897 
4898 	s = splsoftcam();
4899 	if ((new_ccb = (union ccb *)SLIST_FIRST(&ccb_freeq)) == NULL) {
4900 		new_ccb = malloc(sizeof(*new_ccb), M_DEVBUF, M_NOWAIT);
4901                 if (new_ccb == NULL) {
4902 			splx(s);
4903 			return (NULL);
4904 		}
4905 		callout_handle_init(&new_ccb->ccb_h.timeout_ch);
4906 		SLIST_INSERT_HEAD(&ccb_freeq, &new_ccb->ccb_h,
4907 				  xpt_links.sle);
4908 		xpt_ccb_count++;
4909 	}
4910 	cam_ccbq_take_opening(&device->ccbq);
4911 	SLIST_REMOVE_HEAD(&ccb_freeq, xpt_links.sle);
4912 	splx(s);
4913 	return (new_ccb);
4914 }
4915 
4916 static void
4917 xpt_release_bus(struct cam_eb *bus)
4918 {
4919 	int s;
4920 
4921 	s = splcam();
4922 	if ((--bus->refcount == 0)
4923 	 && (TAILQ_FIRST(&bus->et_entries) == NULL)) {
4924 		TAILQ_REMOVE(&xpt_busses, bus, links);
4925 		bus_generation++;
4926 		splx(s);
4927 		free(bus, M_DEVBUF);
4928 	} else
4929 		splx(s);
4930 }
4931 
4932 static struct cam_et *
4933 xpt_alloc_target(struct cam_eb *bus, target_id_t target_id)
4934 {
4935 	struct cam_et *target;
4936 
4937 	target = (struct cam_et *)malloc(sizeof(*target), M_DEVBUF, M_NOWAIT);
4938 	if (target != NULL) {
4939 		struct cam_et *cur_target;
4940 
4941 		TAILQ_INIT(&target->ed_entries);
4942 		target->bus = bus;
4943 		target->target_id = target_id;
4944 		target->refcount = 1;
4945 		target->generation = 0;
4946 		timevalclear(&target->last_reset);
4947 		/*
4948 		 * Hold a reference to our parent bus so it
4949 		 * will not go away before we do.
4950 		 */
4951 		bus->refcount++;
4952 
4953 		/* Insertion sort into our bus's target list */
4954 		cur_target = TAILQ_FIRST(&bus->et_entries);
4955 		while (cur_target != NULL && cur_target->target_id < target_id)
4956 			cur_target = TAILQ_NEXT(cur_target, links);
4957 
4958 		if (cur_target != NULL) {
4959 			TAILQ_INSERT_BEFORE(cur_target, target, links);
4960 		} else {
4961 			TAILQ_INSERT_TAIL(&bus->et_entries, target, links);
4962 		}
4963 		bus->generation++;
4964 	}
4965 	return (target);
4966 }
4967 
4968 static void
4969 xpt_release_target(struct cam_eb *bus, struct cam_et *target)
4970 {
4971 	int s;
4972 
4973 	s = splcam();
4974 	if ((--target->refcount == 0)
4975 	 && (TAILQ_FIRST(&target->ed_entries) == NULL)) {
4976 		TAILQ_REMOVE(&bus->et_entries, target, links);
4977 		bus->generation++;
4978 		splx(s);
4979 		free(target, M_DEVBUF);
4980 		xpt_release_bus(bus);
4981 	} else
4982 		splx(s);
4983 }
4984 
4985 static struct cam_ed *
4986 xpt_alloc_device(struct cam_eb *bus, struct cam_et *target, lun_id_t lun_id)
4987 {
4988 #ifdef CAM_NEW_TRAN_CODE
4989 	struct	   cam_path path;
4990 #endif /* CAM_NEW_TRAN_CODE */
4991 	struct	   cam_ed *device;
4992 	struct	   cam_devq *devq;
4993 	cam_status status;
4994 
4995 	/* Make space for us in the device queue on our bus */
4996 	devq = bus->sim->devq;
4997 	status = cam_devq_resize(devq, devq->alloc_queue.array_size + 1);
4998 
4999 	if (status != CAM_REQ_CMP) {
5000 		device = NULL;
5001 	} else {
5002 		device = (struct cam_ed *)malloc(sizeof(*device),
5003 						 M_DEVBUF, M_NOWAIT);
5004 	}
5005 
5006 	if (device != NULL) {
5007 		struct cam_ed *cur_device;
5008 
5009 		cam_init_pinfo(&device->alloc_ccb_entry.pinfo);
5010 		device->alloc_ccb_entry.device = device;
5011 		cam_init_pinfo(&device->send_ccb_entry.pinfo);
5012 		device->send_ccb_entry.device = device;
5013 		device->target = target;
5014 		device->lun_id = lun_id;
5015 		/* Initialize our queues */
5016 		if (camq_init(&device->drvq, 0) != 0) {
5017 			free(device, M_DEVBUF);
5018 			return (NULL);
5019 		}
5020 		if (cam_ccbq_init(&device->ccbq,
5021 				  bus->sim->max_dev_openings) != 0) {
5022 			camq_fini(&device->drvq);
5023 			free(device, M_DEVBUF);
5024 			return (NULL);
5025 		}
5026 		SLIST_INIT(&device->asyncs);
5027 		SLIST_INIT(&device->periphs);
5028 		device->generation = 0;
5029 		device->owner = NULL;
5030 		/*
5031 		 * Take the default quirk entry until we have inquiry
5032 		 * data and can determine a better quirk to use.
5033 		 */
5034 		device->quirk = &xpt_quirk_table[xpt_quirk_table_size - 1];
5035 		bzero(&device->inq_data, sizeof(device->inq_data));
5036 		device->inq_flags = 0;
5037 		device->queue_flags = 0;
5038 		device->serial_num = NULL;
5039 		device->serial_num_len = 0;
5040 		device->qfrozen_cnt = 0;
5041 		device->flags = CAM_DEV_UNCONFIGURED;
5042 		device->tag_delay_count = 0;
5043 		device->refcount = 1;
5044 		callout_handle_init(&device->c_handle);
5045 
5046 		/*
5047 		 * Hold a reference to our parent target so it
5048 		 * will not go away before we do.
5049 		 */
5050 		target->refcount++;
5051 
5052 		/*
5053 		 * XXX should be limited by number of CCBs this bus can
5054 		 * do.
5055 		 */
5056 		xpt_max_ccbs += device->ccbq.devq_openings;
5057 		/* Insertion sort into our target's device list */
5058 		cur_device = TAILQ_FIRST(&target->ed_entries);
5059 		while (cur_device != NULL && cur_device->lun_id < lun_id)
5060 			cur_device = TAILQ_NEXT(cur_device, links);
5061 		if (cur_device != NULL) {
5062 			TAILQ_INSERT_BEFORE(cur_device, device, links);
5063 		} else {
5064 			TAILQ_INSERT_TAIL(&target->ed_entries, device, links);
5065 		}
5066 		target->generation++;
5067 #ifdef CAM_NEW_TRAN_CODE
5068 		if (lun_id != CAM_LUN_WILDCARD) {
5069 			xpt_compile_path(&path,
5070 					 NULL,
5071 					 bus->path_id,
5072 					 target->target_id,
5073 					 lun_id);
5074 			xpt_devise_transport(&path);
5075 			xpt_release_path(&path);
5076 		}
5077 #endif /* CAM_NEW_TRAN_CODE */
5078 	}
5079 	return (device);
5080 }
5081 
5082 static void
5083 xpt_release_device(struct cam_eb *bus, struct cam_et *target,
5084 		   struct cam_ed *device)
5085 {
5086 	int s;
5087 
5088 	s = splcam();
5089 	if ((--device->refcount == 0)
5090 	 && ((device->flags & CAM_DEV_UNCONFIGURED) != 0)) {
5091 		struct cam_devq *devq;
5092 
5093 		if (device->alloc_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX
5094 		 || device->send_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX)
5095 			panic("Removing device while still queued for ccbs");
5096 
5097 		if ((device->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0)
5098 				untimeout(xpt_release_devq_timeout, device,
5099 					  device->c_handle);
5100 
5101 		TAILQ_REMOVE(&target->ed_entries, device,links);
5102 		target->generation++;
5103 		xpt_max_ccbs -= device->ccbq.devq_openings;
5104 		/* Release our slot in the devq */
5105 		devq = bus->sim->devq;
5106 		cam_devq_resize(devq, devq->alloc_queue.array_size - 1);
5107 		splx(s);
5108 		free(device, M_DEVBUF);
5109 		xpt_release_target(bus, target);
5110 	} else
5111 		splx(s);
5112 }
5113 
5114 static u_int32_t
5115 xpt_dev_ccbq_resize(struct cam_path *path, int newopenings)
5116 {
5117 	int	s;
5118 	int	diff;
5119 	int	result;
5120 	struct	cam_ed *dev;
5121 
5122 	dev = path->device;
5123 	s = splsoftcam();
5124 
5125 	diff = newopenings - (dev->ccbq.dev_active + dev->ccbq.dev_openings);
5126 	result = cam_ccbq_resize(&dev->ccbq, newopenings);
5127 	if (result == CAM_REQ_CMP && (diff < 0)) {
5128 		dev->flags |= CAM_DEV_RESIZE_QUEUE_NEEDED;
5129 	}
5130 	/* Adjust the global limit */
5131 	xpt_max_ccbs += diff;
5132 	splx(s);
5133 	return (result);
5134 }
5135 
5136 static struct cam_eb *
5137 xpt_find_bus(path_id_t path_id)
5138 {
5139 	struct cam_eb *bus;
5140 
5141 	for (bus = TAILQ_FIRST(&xpt_busses);
5142 	     bus != NULL;
5143 	     bus = TAILQ_NEXT(bus, links)) {
5144 		if (bus->path_id == path_id) {
5145 			bus->refcount++;
5146 			break;
5147 		}
5148 	}
5149 	return (bus);
5150 }
5151 
5152 static struct cam_et *
5153 xpt_find_target(struct cam_eb *bus, target_id_t	target_id)
5154 {
5155 	struct cam_et *target;
5156 
5157 	for (target = TAILQ_FIRST(&bus->et_entries);
5158 	     target != NULL;
5159 	     target = TAILQ_NEXT(target, links)) {
5160 		if (target->target_id == target_id) {
5161 			target->refcount++;
5162 			break;
5163 		}
5164 	}
5165 	return (target);
5166 }
5167 
5168 static struct cam_ed *
5169 xpt_find_device(struct cam_et *target, lun_id_t lun_id)
5170 {
5171 	struct cam_ed *device;
5172 
5173 	for (device = TAILQ_FIRST(&target->ed_entries);
5174 	     device != NULL;
5175 	     device = TAILQ_NEXT(device, links)) {
5176 		if (device->lun_id == lun_id) {
5177 			device->refcount++;
5178 			break;
5179 		}
5180 	}
5181 	return (device);
5182 }
5183 
5184 typedef struct {
5185 	union	ccb *request_ccb;
5186 	struct 	ccb_pathinq *cpi;
5187 	int	pending_count;
5188 } xpt_scan_bus_info;
5189 
5190 /*
5191  * To start a scan, request_ccb is an XPT_SCAN_BUS ccb.
5192  * As the scan progresses, xpt_scan_bus is used as the
5193  * callback on completion function.
5194  */
5195 static void
5196 xpt_scan_bus(struct cam_periph *periph, union ccb *request_ccb)
5197 {
5198 	CAM_DEBUG(request_ccb->ccb_h.path, CAM_DEBUG_TRACE,
5199 		  ("xpt_scan_bus\n"));
5200 	switch (request_ccb->ccb_h.func_code) {
5201 	case XPT_SCAN_BUS:
5202 	{
5203 		xpt_scan_bus_info *scan_info;
5204 		union	ccb *work_ccb;
5205 		struct	cam_path *path;
5206 		u_int	i;
5207 		u_int	max_target;
5208 		u_int	initiator_id;
5209 
5210 		/* Find out the characteristics of the bus */
5211 		work_ccb = xpt_alloc_ccb();
5212 		xpt_setup_ccb(&work_ccb->ccb_h, request_ccb->ccb_h.path,
5213 			      request_ccb->ccb_h.pinfo.priority);
5214 		work_ccb->ccb_h.func_code = XPT_PATH_INQ;
5215 		xpt_action(work_ccb);
5216 		if (work_ccb->ccb_h.status != CAM_REQ_CMP) {
5217 			request_ccb->ccb_h.status = work_ccb->ccb_h.status;
5218 			xpt_free_ccb(work_ccb);
5219 			xpt_done(request_ccb);
5220 			return;
5221 		}
5222 
5223 		if ((work_ccb->cpi.hba_misc & PIM_NOINITIATOR) != 0) {
5224 			/*
5225 			 * Can't scan the bus on an adapter that
5226 			 * cannot perform the initiator role.
5227 			 */
5228 			request_ccb->ccb_h.status = CAM_REQ_CMP;
5229 			xpt_free_ccb(work_ccb);
5230 			xpt_done(request_ccb);
5231 			return;
5232 		}
5233 
5234 		/* Save some state for use while we probe for devices */
5235 		scan_info = (xpt_scan_bus_info *)
5236 		    malloc(sizeof(xpt_scan_bus_info), M_TEMP, M_WAITOK);
5237 		scan_info->request_ccb = request_ccb;
5238 		scan_info->cpi = &work_ccb->cpi;
5239 
5240 		/* Cache on our stack so we can work asynchronously */
5241 		max_target = scan_info->cpi->max_target;
5242 		initiator_id = scan_info->cpi->initiator_id;
5243 
5244 		/*
5245 		 * Don't count the initiator if the
5246 		 * initiator is addressable.
5247 		 */
5248 		scan_info->pending_count = max_target + 1;
5249 		if (initiator_id <= max_target)
5250 			scan_info->pending_count--;
5251 
5252 		for (i = 0; i <= max_target; i++) {
5253 			cam_status status;
5254 		 	if (i == initiator_id)
5255 				continue;
5256 
5257 			status = xpt_create_path(&path, xpt_periph,
5258 						 request_ccb->ccb_h.path_id,
5259 						 i, 0);
5260 			if (status != CAM_REQ_CMP) {
5261 				printf("xpt_scan_bus: xpt_create_path failed"
5262 				       " with status %#x, bus scan halted\n",
5263 				       status);
5264 				break;
5265 			}
5266 			work_ccb = xpt_alloc_ccb();
5267 			xpt_setup_ccb(&work_ccb->ccb_h, path,
5268 				      request_ccb->ccb_h.pinfo.priority);
5269 			work_ccb->ccb_h.func_code = XPT_SCAN_LUN;
5270 			work_ccb->ccb_h.cbfcnp = xpt_scan_bus;
5271 			work_ccb->ccb_h.ppriv_ptr0 = scan_info;
5272 			work_ccb->crcn.flags = request_ccb->crcn.flags;
5273 			xpt_action(work_ccb);
5274 		}
5275 		break;
5276 	}
5277 	case XPT_SCAN_LUN:
5278 	{
5279 		xpt_scan_bus_info *scan_info;
5280 		path_id_t path_id;
5281 		target_id_t target_id;
5282 		lun_id_t lun_id;
5283 
5284 		/* Reuse the same CCB to query if a device was really found */
5285 		scan_info = (xpt_scan_bus_info *)request_ccb->ccb_h.ppriv_ptr0;
5286 		xpt_setup_ccb(&request_ccb->ccb_h, request_ccb->ccb_h.path,
5287 			      request_ccb->ccb_h.pinfo.priority);
5288 		request_ccb->ccb_h.func_code = XPT_GDEV_TYPE;
5289 
5290 		path_id = request_ccb->ccb_h.path_id;
5291 		target_id = request_ccb->ccb_h.target_id;
5292 		lun_id = request_ccb->ccb_h.target_lun;
5293 		xpt_action(request_ccb);
5294 
5295 		if (request_ccb->ccb_h.status != CAM_REQ_CMP) {
5296 			struct cam_ed *device;
5297 			struct cam_et *target;
5298 			int s, phl;
5299 
5300 			/*
5301 			 * If we already probed lun 0 successfully, or
5302 			 * we have additional configured luns on this
5303 			 * target that might have "gone away", go onto
5304 			 * the next lun.
5305 			 */
5306 			target = request_ccb->ccb_h.path->target;
5307 			/*
5308 			 * We may touch devices that we don't
5309 			 * hold references too, so ensure they
5310 			 * don't disappear out from under us.
5311 			 * The target above is referenced by the
5312 			 * path in the request ccb.
5313 			 */
5314 			phl = 0;
5315 			s = splcam();
5316 			device = TAILQ_FIRST(&target->ed_entries);
5317 			if (device != NULL) {
5318 				phl = CAN_SRCH_HI(device);
5319 				if (device->lun_id == 0)
5320 					device = TAILQ_NEXT(device, links);
5321 			}
5322 			splx(s);
5323 			if ((lun_id != 0) || (device != NULL)) {
5324 				if (lun_id < (CAM_SCSI2_MAXLUN-1) || phl)
5325 					lun_id++;
5326 			}
5327 		} else {
5328 			struct cam_ed *device;
5329 
5330 			device = request_ccb->ccb_h.path->device;
5331 
5332 			if ((device->quirk->quirks & CAM_QUIRK_NOLUNS) == 0) {
5333 				/* Try the next lun */
5334 				if (lun_id < (CAM_SCSI2_MAXLUN-1)
5335 				  || CAN_SRCH_HI(device))
5336 					lun_id++;
5337 			}
5338 		}
5339 
5340 		xpt_free_path(request_ccb->ccb_h.path);
5341 
5342 		/* Check Bounds */
5343 		if ((lun_id == request_ccb->ccb_h.target_lun)
5344 		 || lun_id > scan_info->cpi->max_lun) {
5345 			/* We're done */
5346 
5347 			xpt_free_ccb(request_ccb);
5348 			scan_info->pending_count--;
5349 			if (scan_info->pending_count == 0) {
5350 				xpt_free_ccb((union ccb *)scan_info->cpi);
5351 				request_ccb = scan_info->request_ccb;
5352 				free(scan_info, M_TEMP);
5353 				request_ccb->ccb_h.status = CAM_REQ_CMP;
5354 				xpt_done(request_ccb);
5355 			}
5356 		} else {
5357 			/* Try the next device */
5358 			struct cam_path *path;
5359 			cam_status status;
5360 
5361 			path = request_ccb->ccb_h.path;
5362 			status = xpt_create_path(&path, xpt_periph,
5363 						 path_id, target_id, lun_id);
5364 			if (status != CAM_REQ_CMP) {
5365 				printf("xpt_scan_bus: xpt_create_path failed "
5366 				       "with status %#x, halting LUN scan\n",
5367 			 	       status);
5368 				xpt_free_ccb(request_ccb);
5369 				scan_info->pending_count--;
5370 				if (scan_info->pending_count == 0) {
5371 					xpt_free_ccb(
5372 						(union ccb *)scan_info->cpi);
5373 					request_ccb = scan_info->request_ccb;
5374 					free(scan_info, M_TEMP);
5375 					request_ccb->ccb_h.status = CAM_REQ_CMP;
5376 					xpt_done(request_ccb);
5377 					break;
5378 				}
5379 			}
5380 			xpt_setup_ccb(&request_ccb->ccb_h, path,
5381 				      request_ccb->ccb_h.pinfo.priority);
5382 			request_ccb->ccb_h.func_code = XPT_SCAN_LUN;
5383 			request_ccb->ccb_h.cbfcnp = xpt_scan_bus;
5384 			request_ccb->ccb_h.ppriv_ptr0 = scan_info;
5385 			request_ccb->crcn.flags =
5386 				scan_info->request_ccb->crcn.flags;
5387 			xpt_action(request_ccb);
5388 		}
5389 		break;
5390 	}
5391 	default:
5392 		break;
5393 	}
5394 }
5395 
5396 typedef enum {
5397 	PROBE_TUR,
5398 	PROBE_INQUIRY,
5399 	PROBE_FULL_INQUIRY,
5400 	PROBE_MODE_SENSE,
5401 	PROBE_SERIAL_NUM,
5402 	PROBE_TUR_FOR_NEGOTIATION
5403 } probe_action;
5404 
5405 typedef enum {
5406 	PROBE_INQUIRY_CKSUM	= 0x01,
5407 	PROBE_SERIAL_CKSUM	= 0x02,
5408 	PROBE_NO_ANNOUNCE	= 0x04
5409 } probe_flags;
5410 
5411 typedef struct {
5412 	TAILQ_HEAD(, ccb_hdr) request_ccbs;
5413 	probe_action	action;
5414 	union ccb	saved_ccb;
5415 	probe_flags	flags;
5416 	MD5_CTX		context;
5417 	u_int8_t	digest[16];
5418 } probe_softc;
5419 
5420 static void
5421 xpt_scan_lun(struct cam_periph *periph, struct cam_path *path,
5422 	     cam_flags flags, union ccb *request_ccb)
5423 {
5424 	struct ccb_pathinq cpi;
5425 	cam_status status;
5426 	struct cam_path *new_path;
5427 	struct cam_periph *old_periph;
5428 	int s;
5429 
5430 	CAM_DEBUG(request_ccb->ccb_h.path, CAM_DEBUG_TRACE,
5431 		  ("xpt_scan_lun\n"));
5432 
5433 	xpt_setup_ccb(&cpi.ccb_h, path, /*priority*/1);
5434 	cpi.ccb_h.func_code = XPT_PATH_INQ;
5435 	xpt_action((union ccb *)&cpi);
5436 
5437 	if (cpi.ccb_h.status != CAM_REQ_CMP) {
5438 		if (request_ccb != NULL) {
5439 			request_ccb->ccb_h.status = cpi.ccb_h.status;
5440 			xpt_done(request_ccb);
5441 		}
5442 		return;
5443 	}
5444 
5445 	if ((cpi.hba_misc & PIM_NOINITIATOR) != 0) {
5446 		/*
5447 		 * Can't scan the bus on an adapter that
5448 		 * cannot perform the initiator role.
5449 		 */
5450 		if (request_ccb != NULL) {
5451 			request_ccb->ccb_h.status = CAM_REQ_CMP;
5452 			xpt_done(request_ccb);
5453 		}
5454 		return;
5455 	}
5456 
5457 	if (request_ccb == NULL) {
5458 		request_ccb = malloc(sizeof(union ccb), M_TEMP, M_NOWAIT);
5459 		if (request_ccb == NULL) {
5460 			xpt_print_path(path);
5461 			printf("xpt_scan_lun: can't allocate CCB, can't "
5462 			       "continue\n");
5463 			return;
5464 		}
5465 		new_path = malloc(sizeof(*new_path), M_TEMP, M_NOWAIT);
5466 		if (new_path == NULL) {
5467 			xpt_print_path(path);
5468 			printf("xpt_scan_lun: can't allocate path, can't "
5469 			       "continue\n");
5470 			free(request_ccb, M_TEMP);
5471 			return;
5472 		}
5473 		status = xpt_compile_path(new_path, xpt_periph,
5474 					  path->bus->path_id,
5475 					  path->target->target_id,
5476 					  path->device->lun_id);
5477 
5478 		if (status != CAM_REQ_CMP) {
5479 			xpt_print_path(path);
5480 			printf("xpt_scan_lun: can't compile path, can't "
5481 			       "continue\n");
5482 			free(request_ccb, M_TEMP);
5483 			free(new_path, M_TEMP);
5484 			return;
5485 		}
5486 		xpt_setup_ccb(&request_ccb->ccb_h, new_path, /*priority*/ 1);
5487 		request_ccb->ccb_h.cbfcnp = xptscandone;
5488 		request_ccb->ccb_h.func_code = XPT_SCAN_LUN;
5489 		request_ccb->crcn.flags = flags;
5490 	}
5491 
5492 	s = splsoftcam();
5493 	if ((old_periph = cam_periph_find(path, "probe")) != NULL) {
5494 		probe_softc *softc;
5495 
5496 		softc = (probe_softc *)old_periph->softc;
5497 		TAILQ_INSERT_TAIL(&softc->request_ccbs, &request_ccb->ccb_h,
5498 				  periph_links.tqe);
5499 	} else {
5500 		status = cam_periph_alloc(proberegister, NULL, probecleanup,
5501 					  probestart, "probe",
5502 					  CAM_PERIPH_BIO,
5503 					  request_ccb->ccb_h.path, NULL, 0,
5504 					  request_ccb);
5505 
5506 		if (status != CAM_REQ_CMP) {
5507 			xpt_print_path(path);
5508 			printf("xpt_scan_lun: cam_alloc_periph returned an "
5509 			       "error, can't continue probe\n");
5510 			request_ccb->ccb_h.status = status;
5511 			xpt_done(request_ccb);
5512 		}
5513 	}
5514 	splx(s);
5515 }
5516 
5517 static void
5518 xptscandone(struct cam_periph *periph, union ccb *done_ccb)
5519 {
5520 	xpt_release_path(done_ccb->ccb_h.path);
5521 	free(done_ccb->ccb_h.path, M_TEMP);
5522 	free(done_ccb, M_TEMP);
5523 }
5524 
5525 static cam_status
5526 proberegister(struct cam_periph *periph, void *arg)
5527 {
5528 	union ccb *request_ccb;	/* CCB representing the probe request */
5529 	probe_softc *softc;
5530 
5531 	request_ccb = (union ccb *)arg;
5532 	if (periph == NULL) {
5533 		printf("proberegister: periph was NULL!!\n");
5534 		return(CAM_REQ_CMP_ERR);
5535 	}
5536 
5537 	if (request_ccb == NULL) {
5538 		printf("proberegister: no probe CCB, "
5539 		       "can't register device\n");
5540 		return(CAM_REQ_CMP_ERR);
5541 	}
5542 
5543 	softc = (probe_softc *)malloc(sizeof(*softc), M_TEMP, M_NOWAIT);
5544 
5545 	if (softc == NULL) {
5546 		printf("proberegister: Unable to probe new device. "
5547 		       "Unable to allocate softc\n");
5548 		return(CAM_REQ_CMP_ERR);
5549 	}
5550 	TAILQ_INIT(&softc->request_ccbs);
5551 	TAILQ_INSERT_TAIL(&softc->request_ccbs, &request_ccb->ccb_h,
5552 			  periph_links.tqe);
5553 	softc->flags = 0;
5554 	periph->softc = softc;
5555 	cam_periph_acquire(periph);
5556 	/*
5557 	 * Ensure we've waited at least a bus settle
5558 	 * delay before attempting to probe the device.
5559 	 * For HBAs that don't do bus resets, this won't make a difference.
5560 	 */
5561 	cam_periph_freeze_after_event(periph, &periph->path->bus->last_reset,
5562 				      scsi_delay);
5563 	probeschedule(periph);
5564 	return(CAM_REQ_CMP);
5565 }
5566 
5567 static void
5568 probeschedule(struct cam_periph *periph)
5569 {
5570 	struct ccb_pathinq cpi;
5571 	union ccb *ccb;
5572 	probe_softc *softc;
5573 
5574 	softc = (probe_softc *)periph->softc;
5575 	ccb = (union ccb *)TAILQ_FIRST(&softc->request_ccbs);
5576 
5577 	xpt_setup_ccb(&cpi.ccb_h, periph->path, /*priority*/1);
5578 	cpi.ccb_h.func_code = XPT_PATH_INQ;
5579 	xpt_action((union ccb *)&cpi);
5580 
5581 	/*
5582 	 * If a device has gone away and another device, or the same one,
5583 	 * is back in the same place, it should have a unit attention
5584 	 * condition pending.  It will not report the unit attention in
5585 	 * response to an inquiry, which may leave invalid transfer
5586 	 * negotiations in effect.  The TUR will reveal the unit attention
5587 	 * condition.  Only send the TUR for lun 0, since some devices
5588 	 * will get confused by commands other than inquiry to non-existent
5589 	 * luns.  If you think a device has gone away start your scan from
5590 	 * lun 0.  This will insure that any bogus transfer settings are
5591 	 * invalidated.
5592 	 *
5593 	 * If we haven't seen the device before and the controller supports
5594 	 * some kind of transfer negotiation, negotiate with the first
5595 	 * sent command if no bus reset was performed at startup.  This
5596 	 * ensures that the device is not confused by transfer negotiation
5597 	 * settings left over by loader or BIOS action.
5598 	 */
5599 	if (((ccb->ccb_h.path->device->flags & CAM_DEV_UNCONFIGURED) == 0)
5600 	 && (ccb->ccb_h.target_lun == 0)) {
5601 		softc->action = PROBE_TUR;
5602 	} else if ((cpi.hba_inquiry & (PI_WIDE_32|PI_WIDE_16|PI_SDTR_ABLE)) != 0
5603 	      && (cpi.hba_misc & PIM_NOBUSRESET) != 0) {
5604 		proberequestdefaultnegotiation(periph);
5605 		softc->action = PROBE_INQUIRY;
5606 	} else {
5607 		softc->action = PROBE_INQUIRY;
5608 	}
5609 
5610 	if (ccb->crcn.flags & CAM_EXPECT_INQ_CHANGE)
5611 		softc->flags |= PROBE_NO_ANNOUNCE;
5612 	else
5613 		softc->flags &= ~PROBE_NO_ANNOUNCE;
5614 
5615 	xpt_schedule(periph, ccb->ccb_h.pinfo.priority);
5616 }
5617 
5618 static void
5619 probestart(struct cam_periph *periph, union ccb *start_ccb)
5620 {
5621 	/* Probe the device that our peripheral driver points to */
5622 	struct ccb_scsiio *csio;
5623 	probe_softc *softc;
5624 
5625 	CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("probestart\n"));
5626 
5627 	softc = (probe_softc *)periph->softc;
5628 	csio = &start_ccb->csio;
5629 
5630 	switch (softc->action) {
5631 	case PROBE_TUR:
5632 	case PROBE_TUR_FOR_NEGOTIATION:
5633 	{
5634 		scsi_test_unit_ready(csio,
5635 				     /*retries*/4,
5636 				     probedone,
5637 				     MSG_SIMPLE_Q_TAG,
5638 				     SSD_FULL_SIZE,
5639 				     /*timeout*/60000);
5640 		break;
5641 	}
5642 	case PROBE_INQUIRY:
5643 	case PROBE_FULL_INQUIRY:
5644 	{
5645 		u_int inquiry_len;
5646 		struct scsi_inquiry_data *inq_buf;
5647 
5648 		inq_buf = &periph->path->device->inq_data;
5649 		/*
5650 		 * If the device is currently configured, we calculate an
5651 		 * MD5 checksum of the inquiry data, and if the serial number
5652 		 * length is greater than 0, add the serial number data
5653 		 * into the checksum as well.  Once the inquiry and the
5654 		 * serial number check finish, we attempt to figure out
5655 		 * whether we still have the same device.
5656 		 */
5657 		if ((periph->path->device->flags & CAM_DEV_UNCONFIGURED) == 0) {
5658 
5659 			MD5Init(&softc->context);
5660 			MD5Update(&softc->context, (unsigned char *)inq_buf,
5661 				  sizeof(struct scsi_inquiry_data));
5662 			softc->flags |= PROBE_INQUIRY_CKSUM;
5663 			if (periph->path->device->serial_num_len > 0) {
5664 				MD5Update(&softc->context,
5665 					  periph->path->device->serial_num,
5666 					  periph->path->device->serial_num_len);
5667 				softc->flags |= PROBE_SERIAL_CKSUM;
5668 			}
5669 			MD5Final(softc->digest, &softc->context);
5670 		}
5671 
5672 		if (softc->action == PROBE_INQUIRY)
5673 			inquiry_len = SHORT_INQUIRY_LENGTH;
5674 		else
5675 			inquiry_len = inq_buf->additional_length
5676 				    + offsetof(struct scsi_inquiry_data,
5677                                                additional_length) + 1;
5678 
5679 		/*
5680 		 * Some parallel SCSI devices fail to send an
5681 		 * ignore wide residue message when dealing with
5682 		 * odd length inquiry requests.  Round up to be
5683 		 * safe.
5684 		 */
5685 		inquiry_len = roundup2(inquiry_len, 2);
5686 
5687 		scsi_inquiry(csio,
5688 			     /*retries*/4,
5689 			     probedone,
5690 			     MSG_SIMPLE_Q_TAG,
5691 			     (u_int8_t *)inq_buf,
5692 			     inquiry_len,
5693 			     /*evpd*/FALSE,
5694 			     /*page_code*/0,
5695 			     SSD_MIN_SIZE,
5696 			     /*timeout*/60 * 1000);
5697 		break;
5698 	}
5699 	case PROBE_MODE_SENSE:
5700 	{
5701 		void  *mode_buf;
5702 		int    mode_buf_len;
5703 
5704 		mode_buf_len = sizeof(struct scsi_mode_header_6)
5705 			     + sizeof(struct scsi_mode_blk_desc)
5706 			     + sizeof(struct scsi_control_page);
5707 		mode_buf = malloc(mode_buf_len, M_TEMP, M_NOWAIT);
5708 		if (mode_buf != NULL) {
5709 	                scsi_mode_sense(csio,
5710 					/*retries*/4,
5711 					probedone,
5712 					MSG_SIMPLE_Q_TAG,
5713 					/*dbd*/FALSE,
5714 					SMS_PAGE_CTRL_CURRENT,
5715 					SMS_CONTROL_MODE_PAGE,
5716 					mode_buf,
5717 					mode_buf_len,
5718 					SSD_FULL_SIZE,
5719 					/*timeout*/60000);
5720 			break;
5721 		}
5722 		xpt_print_path(periph->path);
5723 		printf("Unable to mode sense control page - malloc failure\n");
5724 		softc->action = PROBE_SERIAL_NUM;
5725 	}
5726 	/* FALLTHROUGH */
5727 	case PROBE_SERIAL_NUM:
5728 	{
5729 		struct scsi_vpd_unit_serial_number *serial_buf;
5730 		struct cam_ed* device;
5731 
5732 		serial_buf = NULL;
5733 		device = periph->path->device;
5734 		device->serial_num = NULL;
5735 		device->serial_num_len = 0;
5736 
5737 		if ((device->quirk->quirks & CAM_QUIRK_NOSERIAL) == 0)
5738 			serial_buf = (struct scsi_vpd_unit_serial_number *)
5739 				malloc(sizeof(*serial_buf), M_TEMP,
5740 					M_NOWAIT | M_ZERO);
5741 
5742 		if (serial_buf != NULL) {
5743 			scsi_inquiry(csio,
5744 				     /*retries*/4,
5745 				     probedone,
5746 				     MSG_SIMPLE_Q_TAG,
5747 				     (u_int8_t *)serial_buf,
5748 				     sizeof(*serial_buf),
5749 				     /*evpd*/TRUE,
5750 				     SVPD_UNIT_SERIAL_NUMBER,
5751 				     SSD_MIN_SIZE,
5752 				     /*timeout*/60 * 1000);
5753 			break;
5754 		}
5755 		/*
5756 		 * We'll have to do without, let our probedone
5757 		 * routine finish up for us.
5758 		 */
5759 		start_ccb->csio.data_ptr = NULL;
5760 		probedone(periph, start_ccb);
5761 		return;
5762 	}
5763 	}
5764 	xpt_action(start_ccb);
5765 }
5766 
5767 static void
5768 proberequestdefaultnegotiation(struct cam_periph *periph)
5769 {
5770 	struct ccb_trans_settings cts;
5771 
5772 	xpt_setup_ccb(&cts.ccb_h, periph->path, /*priority*/1);
5773 	cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS;
5774 #ifdef CAM_NEW_TRAN_CODE
5775 	cts.type = CTS_TYPE_USER_SETTINGS;
5776 #else /* CAM_NEW_TRAN_CODE */
5777 	cts.flags = CCB_TRANS_USER_SETTINGS;
5778 #endif /* CAM_NEW_TRAN_CODE */
5779 	xpt_action((union ccb *)&cts);
5780 	cts.ccb_h.func_code = XPT_SET_TRAN_SETTINGS;
5781 #ifdef CAM_NEW_TRAN_CODE
5782 	cts.type = CTS_TYPE_CURRENT_SETTINGS;
5783 #else /* CAM_NEW_TRAN_CODE */
5784 	cts.flags &= ~CCB_TRANS_USER_SETTINGS;
5785 	cts.flags |= CCB_TRANS_CURRENT_SETTINGS;
5786 #endif /* CAM_NEW_TRAN_CODE */
5787 	xpt_action((union ccb *)&cts);
5788 }
5789 
5790 static void
5791 probedone(struct cam_periph *periph, union ccb *done_ccb)
5792 {
5793 	probe_softc *softc;
5794 	struct cam_path *path;
5795 	u_int32_t  priority;
5796 
5797 	CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("probedone\n"));
5798 
5799 	softc = (probe_softc *)periph->softc;
5800 	path = done_ccb->ccb_h.path;
5801 	priority = done_ccb->ccb_h.pinfo.priority;
5802 
5803 	switch (softc->action) {
5804 	case PROBE_TUR:
5805 	{
5806 		if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
5807 
5808 			if (cam_periph_error(done_ccb, 0,
5809 					     SF_NO_PRINT, NULL) == ERESTART)
5810 				return;
5811 			else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
5812 				/* Don't wedge the queue */
5813 				xpt_release_devq(done_ccb->ccb_h.path,
5814 						 /*count*/1,
5815 						 /*run_queue*/TRUE);
5816 		}
5817 		softc->action = PROBE_INQUIRY;
5818 		xpt_release_ccb(done_ccb);
5819 		xpt_schedule(periph, priority);
5820 		return;
5821 	}
5822 	case PROBE_INQUIRY:
5823 	case PROBE_FULL_INQUIRY:
5824 	{
5825 		if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
5826 			struct scsi_inquiry_data *inq_buf;
5827 			u_int8_t periph_qual;
5828 
5829 			path->device->flags |= CAM_DEV_INQUIRY_DATA_VALID;
5830 			inq_buf = &path->device->inq_data;
5831 
5832 			periph_qual = SID_QUAL(inq_buf);
5833 
5834 			switch(periph_qual) {
5835 			case SID_QUAL_LU_CONNECTED:
5836 			{
5837 				u_int8_t len;
5838 
5839 				/*
5840 				 * We conservatively request only
5841 				 * SHORT_INQUIRY_LEN bytes of inquiry
5842 				 * information during our first try
5843 				 * at sending an INQUIRY. If the device
5844 				 * has more information to give,
5845 				 * perform a second request specifying
5846 				 * the amount of information the device
5847 				 * is willing to give.
5848 				 */
5849 				len = inq_buf->additional_length
5850 				    + offsetof(struct scsi_inquiry_data,
5851                                                additional_length) + 1;
5852 				if (softc->action == PROBE_INQUIRY
5853 				 && len > SHORT_INQUIRY_LENGTH) {
5854 					softc->action = PROBE_FULL_INQUIRY;
5855 					xpt_release_ccb(done_ccb);
5856 					xpt_schedule(periph, priority);
5857 					return;
5858 				}
5859 
5860 				xpt_find_quirk(path->device);
5861 
5862 #ifdef CAM_NEW_TRAN_CODE
5863 				xpt_devise_transport(path);
5864 #endif /* CAM_NEW_TRAN_CODE */
5865 				if ((inq_buf->flags & SID_CmdQue) != 0)
5866 					softc->action = PROBE_MODE_SENSE;
5867 				else
5868 					softc->action = PROBE_SERIAL_NUM;
5869 
5870 				path->device->flags &= ~CAM_DEV_UNCONFIGURED;
5871 
5872 				xpt_release_ccb(done_ccb);
5873 				xpt_schedule(periph, priority);
5874 				return;
5875 			}
5876 			default:
5877 				break;
5878 			}
5879 		} else if (cam_periph_error(done_ccb, 0,
5880 					    done_ccb->ccb_h.target_lun > 0
5881 					    ? SF_RETRY_UA|SF_QUIET_IR
5882 					    : SF_RETRY_UA,
5883 					    &softc->saved_ccb) == ERESTART) {
5884 			return;
5885 		} else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
5886 			/* Don't wedge the queue */
5887 			xpt_release_devq(done_ccb->ccb_h.path, /*count*/1,
5888 					 /*run_queue*/TRUE);
5889 		}
5890 		/*
5891 		 * If we get to this point, we got an error status back
5892 		 * from the inquiry and the error status doesn't require
5893 		 * automatically retrying the command.  Therefore, the
5894 		 * inquiry failed.  If we had inquiry information before
5895 		 * for this device, but this latest inquiry command failed,
5896 		 * the device has probably gone away.  If this device isn't
5897 		 * already marked unconfigured, notify the peripheral
5898 		 * drivers that this device is no more.
5899 		 */
5900 		if ((path->device->flags & CAM_DEV_UNCONFIGURED) == 0)
5901 			/* Send the async notification. */
5902 			xpt_async(AC_LOST_DEVICE, path, NULL);
5903 
5904 		xpt_release_ccb(done_ccb);
5905 		break;
5906 	}
5907 	case PROBE_MODE_SENSE:
5908 	{
5909 		struct ccb_scsiio *csio;
5910 		struct scsi_mode_header_6 *mode_hdr;
5911 
5912 		csio = &done_ccb->csio;
5913 		mode_hdr = (struct scsi_mode_header_6 *)csio->data_ptr;
5914 		if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
5915 			struct scsi_control_page *page;
5916 			u_int8_t *offset;
5917 
5918 			offset = ((u_int8_t *)&mode_hdr[1])
5919 			    + mode_hdr->blk_desc_len;
5920 			page = (struct scsi_control_page *)offset;
5921 			path->device->queue_flags = page->queue_flags;
5922 		} else if (cam_periph_error(done_ccb, 0,
5923 					    SF_RETRY_UA|SF_NO_PRINT,
5924 					    &softc->saved_ccb) == ERESTART) {
5925 			return;
5926 		} else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
5927 			/* Don't wedge the queue */
5928 			xpt_release_devq(done_ccb->ccb_h.path,
5929 					 /*count*/1, /*run_queue*/TRUE);
5930 		}
5931 		xpt_release_ccb(done_ccb);
5932 		free(mode_hdr, M_TEMP);
5933 		softc->action = PROBE_SERIAL_NUM;
5934 		xpt_schedule(periph, priority);
5935 		return;
5936 	}
5937 	case PROBE_SERIAL_NUM:
5938 	{
5939 		struct ccb_scsiio *csio;
5940 		struct scsi_vpd_unit_serial_number *serial_buf;
5941 		u_int32_t  priority;
5942 		int changed;
5943 		int have_serialnum;
5944 
5945 		changed = 1;
5946 		have_serialnum = 0;
5947 		csio = &done_ccb->csio;
5948 		priority = done_ccb->ccb_h.pinfo.priority;
5949 		serial_buf =
5950 		    (struct scsi_vpd_unit_serial_number *)csio->data_ptr;
5951 
5952 		/* Clean up from previous instance of this device */
5953 		if (path->device->serial_num != NULL) {
5954 			free(path->device->serial_num, M_DEVBUF);
5955 			path->device->serial_num = NULL;
5956 			path->device->serial_num_len = 0;
5957 		}
5958 
5959 		if (serial_buf == NULL) {
5960 			/*
5961 			 * Don't process the command as it was never sent
5962 			 */
5963 		} else if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP
5964 			&& (serial_buf->length > 0)) {
5965 
5966 			have_serialnum = 1;
5967 			path->device->serial_num =
5968 				(u_int8_t *)malloc((serial_buf->length + 1),
5969 						   M_DEVBUF, M_NOWAIT);
5970 			if (path->device->serial_num != NULL) {
5971 				bcopy(serial_buf->serial_num,
5972 				      path->device->serial_num,
5973 				      serial_buf->length);
5974 				path->device->serial_num_len =
5975 				    serial_buf->length;
5976 				path->device->serial_num[serial_buf->length]
5977 				    = '\0';
5978 			}
5979 		} else if (cam_periph_error(done_ccb, 0,
5980 					    SF_RETRY_UA|SF_NO_PRINT,
5981 					    &softc->saved_ccb) == ERESTART) {
5982 			return;
5983 		} else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
5984 			/* Don't wedge the queue */
5985 			xpt_release_devq(done_ccb->ccb_h.path, /*count*/1,
5986 					 /*run_queue*/TRUE);
5987 		}
5988 
5989 		/*
5990 		 * Let's see if we have seen this device before.
5991 		 */
5992 		if ((softc->flags & PROBE_INQUIRY_CKSUM) != 0) {
5993 			MD5_CTX context;
5994 			u_int8_t digest[16];
5995 
5996 			MD5Init(&context);
5997 
5998 			MD5Update(&context,
5999 				  (unsigned char *)&path->device->inq_data,
6000 				  sizeof(struct scsi_inquiry_data));
6001 
6002 			if (have_serialnum)
6003 				MD5Update(&context, serial_buf->serial_num,
6004 					  serial_buf->length);
6005 
6006 			MD5Final(digest, &context);
6007 			if (bcmp(softc->digest, digest, 16) == 0)
6008 				changed = 0;
6009 
6010 			/*
6011 			 * XXX Do we need to do a TUR in order to ensure
6012 			 *     that the device really hasn't changed???
6013 			 */
6014 			if ((changed != 0)
6015 			 && ((softc->flags & PROBE_NO_ANNOUNCE) == 0))
6016 				xpt_async(AC_LOST_DEVICE, path, NULL);
6017 		}
6018 		if (serial_buf != NULL)
6019 			free(serial_buf, M_TEMP);
6020 
6021 		if (changed != 0) {
6022 			/*
6023 			 * Now that we have all the necessary
6024 			 * information to safely perform transfer
6025 			 * negotiations... Controllers don't perform
6026 			 * any negotiation or tagged queuing until
6027 			 * after the first XPT_SET_TRAN_SETTINGS ccb is
6028 			 * received.  So, on a new device, just retreive
6029 			 * the user settings, and set them as the current
6030 			 * settings to set the device up.
6031 			 */
6032 			proberequestdefaultnegotiation(periph);
6033 			xpt_release_ccb(done_ccb);
6034 
6035 			/*
6036 			 * Perform a TUR to allow the controller to
6037 			 * perform any necessary transfer negotiation.
6038 			 */
6039 			softc->action = PROBE_TUR_FOR_NEGOTIATION;
6040 			xpt_schedule(periph, priority);
6041 			return;
6042 		}
6043 		xpt_release_ccb(done_ccb);
6044 		break;
6045 	}
6046 	case PROBE_TUR_FOR_NEGOTIATION:
6047 		if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
6048 			/* Don't wedge the queue */
6049 			xpt_release_devq(done_ccb->ccb_h.path, /*count*/1,
6050 					 /*run_queue*/TRUE);
6051 		}
6052 
6053 		path->device->flags &= ~CAM_DEV_UNCONFIGURED;
6054 
6055 		if ((softc->flags & PROBE_NO_ANNOUNCE) == 0) {
6056 			/* Inform the XPT that a new device has been found */
6057 			done_ccb->ccb_h.func_code = XPT_GDEV_TYPE;
6058 			xpt_action(done_ccb);
6059 
6060 			xpt_async(AC_FOUND_DEVICE, xpt_periph->path, done_ccb);
6061 		}
6062 		xpt_release_ccb(done_ccb);
6063 		break;
6064 	}
6065 	done_ccb = (union ccb *)TAILQ_FIRST(&softc->request_ccbs);
6066 	TAILQ_REMOVE(&softc->request_ccbs, &done_ccb->ccb_h, periph_links.tqe);
6067 	done_ccb->ccb_h.status = CAM_REQ_CMP;
6068 	xpt_done(done_ccb);
6069 	if (TAILQ_FIRST(&softc->request_ccbs) == NULL) {
6070 		cam_periph_invalidate(periph);
6071 		cam_periph_release(periph);
6072 	} else {
6073 		probeschedule(periph);
6074 	}
6075 }
6076 
6077 static void
6078 probecleanup(struct cam_periph *periph)
6079 {
6080 	free(periph->softc, M_TEMP);
6081 }
6082 
6083 static void
6084 xpt_find_quirk(struct cam_ed *device)
6085 {
6086 	caddr_t	match;
6087 
6088 	match = cam_quirkmatch((caddr_t)&device->inq_data,
6089 			       (caddr_t)xpt_quirk_table,
6090 			       sizeof(xpt_quirk_table)/sizeof(*xpt_quirk_table),
6091 			       sizeof(*xpt_quirk_table), scsi_inquiry_match);
6092 
6093 	if (match == NULL)
6094 		panic("xpt_find_quirk: device didn't match wildcard entry!!");
6095 
6096 	device->quirk = (struct xpt_quirk_entry *)match;
6097 }
6098 
6099 #ifdef CAM_NEW_TRAN_CODE
6100 
6101 static void
6102 xpt_devise_transport(struct cam_path *path)
6103 {
6104 	struct ccb_pathinq cpi;
6105 	struct ccb_trans_settings cts;
6106 	struct scsi_inquiry_data *inq_buf;
6107 
6108 	/* Get transport information from the SIM */
6109 	xpt_setup_ccb(&cpi.ccb_h, path, /*priority*/1);
6110 	cpi.ccb_h.func_code = XPT_PATH_INQ;
6111 	xpt_action((union ccb *)&cpi);
6112 
6113 	inq_buf = NULL;
6114 	if ((path->device->flags & CAM_DEV_INQUIRY_DATA_VALID) != 0)
6115 		inq_buf = &path->device->inq_data;
6116 	path->device->protocol = PROTO_SCSI;
6117 	path->device->protocol_version =
6118 	    inq_buf != NULL ? SID_ANSI_REV(inq_buf) : cpi.protocol_version;
6119 	path->device->transport = cpi.transport;
6120 	path->device->transport_version = cpi.transport_version;
6121 
6122 	/*
6123 	 * Any device not using SPI3 features should
6124 	 * be considered SPI2 or lower.
6125 	 */
6126 	if (inq_buf != NULL) {
6127 		if (path->device->transport == XPORT_SPI
6128 		 && (inq_buf->spi3data & SID_SPI_MASK) == 0
6129 		 && path->device->transport_version > 2)
6130 			path->device->transport_version = 2;
6131 	} else {
6132 		struct cam_ed* otherdev;
6133 
6134 		for (otherdev = TAILQ_FIRST(&path->target->ed_entries);
6135 		     otherdev != NULL;
6136 		     otherdev = TAILQ_NEXT(otherdev, links)) {
6137 			if (otherdev != path->device)
6138 				break;
6139 		}
6140 
6141 		if (otherdev != NULL) {
6142 			/*
6143 			 * Initially assume the same versioning as
6144 			 * prior luns for this target.
6145 			 */
6146 			path->device->protocol_version =
6147 			    otherdev->protocol_version;
6148 			path->device->transport_version =
6149 			    otherdev->transport_version;
6150 		} else {
6151 			/* Until we know better, opt for safty */
6152 			path->device->protocol_version = 2;
6153 			if (path->device->transport == XPORT_SPI)
6154 				path->device->transport_version = 2;
6155 			else
6156 				path->device->transport_version = 0;
6157 		}
6158 	}
6159 
6160 	/*
6161 	 * XXX
6162 	 * For a device compliant with SPC-2 we should be able
6163 	 * to determine the transport version supported by
6164 	 * scrutinizing the version descriptors in the
6165 	 * inquiry buffer.
6166 	 */
6167 
6168 	/* Tell the controller what we think */
6169 	xpt_setup_ccb(&cts.ccb_h, path, /*priority*/1);
6170 	cts.ccb_h.func_code = XPT_SET_TRAN_SETTINGS;
6171 	cts.type = CTS_TYPE_CURRENT_SETTINGS;
6172 	cts.transport = path->device->transport;
6173 	cts.transport_version = path->device->transport_version;
6174 	cts.protocol = path->device->protocol;
6175 	cts.protocol_version = path->device->protocol_version;
6176 	cts.proto_specific.valid = 0;
6177 	cts.xport_specific.valid = 0;
6178 	xpt_action((union ccb *)&cts);
6179 }
6180 
6181 static void
6182 xpt_set_transfer_settings(struct ccb_trans_settings *cts, struct cam_ed *device,
6183 			  int async_update)
6184 {
6185 	struct	ccb_pathinq cpi;
6186 	struct	ccb_trans_settings cur_cts;
6187 	struct	ccb_trans_settings_scsi *scsi;
6188 	struct	ccb_trans_settings_scsi *cur_scsi;
6189 	struct	cam_sim *sim;
6190 	struct	scsi_inquiry_data *inq_data;
6191 
6192 	if (device == NULL) {
6193 		cts->ccb_h.status = CAM_PATH_INVALID;
6194 		xpt_done((union ccb *)cts);
6195 		return;
6196 	}
6197 
6198 	if (cts->protocol == PROTO_UNKNOWN
6199 	 || cts->protocol == PROTO_UNSPECIFIED) {
6200 		cts->protocol = device->protocol;
6201 		cts->protocol_version = device->protocol_version;
6202 	}
6203 
6204 	if (cts->protocol_version == PROTO_VERSION_UNKNOWN
6205 	 || cts->protocol_version == PROTO_VERSION_UNSPECIFIED)
6206 		cts->protocol_version = device->protocol_version;
6207 
6208 	if (cts->protocol != device->protocol) {
6209 		xpt_print_path(cts->ccb_h.path);
6210 		printf("Uninitialized Protocol %x:%x?\n",
6211 		       cts->protocol, device->protocol);
6212 		cts->protocol = device->protocol;
6213 	}
6214 
6215 	if (cts->protocol_version > device->protocol_version) {
6216 		if (bootverbose) {
6217 			xpt_print_path(cts->ccb_h.path);
6218 			printf("Down reving Protocol Version from %d to %d?\n",
6219 			       cts->protocol_version, device->protocol_version);
6220 		}
6221 		cts->protocol_version = device->protocol_version;
6222 	}
6223 
6224 	if (cts->transport == XPORT_UNKNOWN
6225 	 || cts->transport == XPORT_UNSPECIFIED) {
6226 		cts->transport = device->transport;
6227 		cts->transport_version = device->transport_version;
6228 	}
6229 
6230 	if (cts->transport_version == XPORT_VERSION_UNKNOWN
6231 	 || cts->transport_version == XPORT_VERSION_UNSPECIFIED)
6232 		cts->transport_version = device->transport_version;
6233 
6234 	if (cts->transport != device->transport) {
6235 		xpt_print_path(cts->ccb_h.path);
6236 		printf("Uninitialized Transport %x:%x?\n",
6237 		       cts->transport, device->transport);
6238 		cts->transport = device->transport;
6239 	}
6240 
6241 	if (cts->transport_version > device->transport_version) {
6242 		if (bootverbose) {
6243 			xpt_print_path(cts->ccb_h.path);
6244 			printf("Down reving Transport Version from %d to %d?\n",
6245 			       cts->transport_version,
6246 			       device->transport_version);
6247 		}
6248 		cts->transport_version = device->transport_version;
6249 	}
6250 
6251 	sim = cts->ccb_h.path->bus->sim;
6252 
6253 	/*
6254 	 * Nothing more of interest to do unless
6255 	 * this is a device connected via the
6256 	 * SCSI protocol.
6257 	 */
6258 	if (cts->protocol != PROTO_SCSI) {
6259 		if (async_update == FALSE)
6260 			(*(sim->sim_action))(sim, (union ccb *)cts);
6261 		return;
6262 	}
6263 
6264 	inq_data = &device->inq_data;
6265 	scsi = &cts->proto_specific.scsi;
6266 	xpt_setup_ccb(&cpi.ccb_h, cts->ccb_h.path, /*priority*/1);
6267 	cpi.ccb_h.func_code = XPT_PATH_INQ;
6268 	xpt_action((union ccb *)&cpi);
6269 
6270 	/* SCSI specific sanity checking */
6271 	if ((cpi.hba_inquiry & PI_TAG_ABLE) == 0
6272 	 || (inq_data->flags & SID_CmdQue) == 0
6273 	 || (device->queue_flags & SCP_QUEUE_DQUE) != 0
6274 	 || (device->quirk->mintags == 0)) {
6275 		/*
6276 		 * Can't tag on hardware that doesn't support tags,
6277 		 * doesn't have it enabled, or has broken tag support.
6278 		 */
6279 		scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB;
6280 	}
6281 
6282 	if (async_update == FALSE) {
6283 		/*
6284 		 * Perform sanity checking against what the
6285 		 * controller and device can do.
6286 		 */
6287 		xpt_setup_ccb(&cur_cts.ccb_h, cts->ccb_h.path, /*priority*/1);
6288 		cur_cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS;
6289 		cur_cts.type = cts->type;
6290 		xpt_action((union ccb *)&cur_cts);
6291 
6292 		cur_scsi = &cur_cts.proto_specific.scsi;
6293 		if ((scsi->valid & CTS_SCSI_VALID_TQ) == 0) {
6294 			scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB;
6295 			scsi->flags |= cur_scsi->flags & CTS_SCSI_FLAGS_TAG_ENB;
6296 		}
6297 		if ((cur_scsi->valid & CTS_SCSI_VALID_TQ) == 0)
6298 			scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB;
6299 	}
6300 
6301 	/* SPI specific sanity checking */
6302 	if (cts->transport == XPORT_SPI && async_update == FALSE) {
6303 		u_int spi3caps;
6304 		struct ccb_trans_settings_spi *spi;
6305 		struct ccb_trans_settings_spi *cur_spi;
6306 
6307 		spi = &cts->xport_specific.spi;
6308 
6309 		cur_spi = &cur_cts.xport_specific.spi;
6310 
6311 		/* Fill in any gaps in what the user gave us */
6312 		if ((spi->valid & CTS_SPI_VALID_SYNC_RATE) == 0)
6313 			spi->sync_period = cur_spi->sync_period;
6314 		if ((cur_spi->valid & CTS_SPI_VALID_SYNC_RATE) == 0)
6315 			spi->sync_period = 0;
6316 		if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) == 0)
6317 			spi->sync_offset = cur_spi->sync_offset;
6318 		if ((cur_spi->valid & CTS_SPI_VALID_SYNC_OFFSET) == 0)
6319 			spi->sync_offset = 0;
6320 		if ((spi->valid & CTS_SPI_VALID_PPR_OPTIONS) == 0)
6321 			spi->ppr_options = cur_spi->ppr_options;
6322 		if ((cur_spi->valid & CTS_SPI_VALID_PPR_OPTIONS) == 0)
6323 			spi->ppr_options = 0;
6324 		if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) == 0)
6325 			spi->bus_width = cur_spi->bus_width;
6326 		if ((cur_spi->valid & CTS_SPI_VALID_BUS_WIDTH) == 0)
6327 			spi->bus_width = 0;
6328 		if ((spi->valid & CTS_SPI_VALID_DISC) == 0) {
6329 			spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB;
6330 			spi->flags |= cur_spi->flags & CTS_SPI_FLAGS_DISC_ENB;
6331 		}
6332 		if ((cur_spi->valid & CTS_SPI_VALID_DISC) == 0)
6333 			spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB;
6334 		if (((device->flags & CAM_DEV_INQUIRY_DATA_VALID) != 0
6335 		  && (inq_data->flags & SID_Sync) == 0
6336 		  && cts->type == CTS_TYPE_CURRENT_SETTINGS)
6337 		 || ((cpi.hba_inquiry & PI_SDTR_ABLE) == 0)
6338 		 || (cur_spi->sync_offset == 0)
6339 		 || (cur_spi->sync_period == 0)) {
6340 			/* Force async */
6341 			spi->sync_period = 0;
6342 			spi->sync_offset = 0;
6343 		}
6344 
6345 		switch (spi->bus_width) {
6346 		case MSG_EXT_WDTR_BUS_32_BIT:
6347 			if (((device->flags & CAM_DEV_INQUIRY_DATA_VALID) == 0
6348 			  || (inq_data->flags & SID_WBus32) != 0
6349 			  || cts->type == CTS_TYPE_USER_SETTINGS)
6350 			 && (cpi.hba_inquiry & PI_WIDE_32) != 0)
6351 				break;
6352 			/* Fall Through to 16-bit */
6353 		case MSG_EXT_WDTR_BUS_16_BIT:
6354 			if (((device->flags & CAM_DEV_INQUIRY_DATA_VALID) == 0
6355 			  || (inq_data->flags & SID_WBus16) != 0
6356 			  || cts->type == CTS_TYPE_USER_SETTINGS)
6357 			 && (cpi.hba_inquiry & PI_WIDE_16) != 0) {
6358 				spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
6359 				break;
6360 			}
6361 			/* Fall Through to 8-bit */
6362 		default: /* New bus width?? */
6363 		case MSG_EXT_WDTR_BUS_8_BIT:
6364 			/* All targets can do this */
6365 			spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
6366 			break;
6367 		}
6368 
6369 		spi3caps = cpi.xport_specific.spi.ppr_options;
6370 		if ((device->flags & CAM_DEV_INQUIRY_DATA_VALID) != 0
6371 		 && cts->type == CTS_TYPE_CURRENT_SETTINGS)
6372 			spi3caps &= inq_data->spi3data;
6373 
6374 		if ((spi3caps & SID_SPI_CLOCK_DT) == 0)
6375 			spi->ppr_options &= ~MSG_EXT_PPR_DT_REQ;
6376 
6377 		if ((spi3caps & SID_SPI_IUS) == 0)
6378 			spi->ppr_options &= ~MSG_EXT_PPR_IU_REQ;
6379 
6380 		if ((spi3caps & SID_SPI_QAS) == 0)
6381 			spi->ppr_options &= ~MSG_EXT_PPR_QAS_REQ;
6382 
6383 		/* No SPI Transfer settings are allowed unless we are wide */
6384 		if (spi->bus_width == 0)
6385 			spi->ppr_options = 0;
6386 
6387 		if ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) == 0) {
6388 			/*
6389 			 * Can't tag queue without disconnection.
6390 			 */
6391 			scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB;
6392 			scsi->valid |= CTS_SCSI_VALID_TQ;
6393 		}
6394 
6395 		/*
6396 		 * If we are currently performing tagged transactions to
6397 		 * this device and want to change its negotiation parameters,
6398 		 * go non-tagged for a bit to give the controller a chance to
6399 		 * negotiate unhampered by tag messages.
6400 		 */
6401 		if (cts->type == CTS_TYPE_CURRENT_SETTINGS
6402 		 && (device->inq_flags & SID_CmdQue) != 0
6403 		 && (scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0
6404 		 && (spi->flags & (CTS_SPI_VALID_SYNC_RATE|
6405 				   CTS_SPI_VALID_SYNC_OFFSET|
6406 				   CTS_SPI_VALID_BUS_WIDTH)) != 0)
6407 			xpt_toggle_tags(cts->ccb_h.path);
6408 	}
6409 
6410 	if (cts->type == CTS_TYPE_CURRENT_SETTINGS
6411 	 && (scsi->valid & CTS_SCSI_VALID_TQ) != 0) {
6412 		int device_tagenb;
6413 
6414 		/*
6415 		 * If we are transitioning from tags to no-tags or
6416 		 * vice-versa, we need to carefully freeze and restart
6417 		 * the queue so that we don't overlap tagged and non-tagged
6418 		 * commands.  We also temporarily stop tags if there is
6419 		 * a change in transfer negotiation settings to allow
6420 		 * "tag-less" negotiation.
6421 		 */
6422 		if ((device->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
6423 		 || (device->inq_flags & SID_CmdQue) != 0)
6424 			device_tagenb = TRUE;
6425 		else
6426 			device_tagenb = FALSE;
6427 
6428 		if (((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0
6429 		  && device_tagenb == FALSE)
6430 		 || ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) == 0
6431 		  && device_tagenb == TRUE)) {
6432 
6433 			if ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0) {
6434 				/*
6435 				 * Delay change to use tags until after a
6436 				 * few commands have gone to this device so
6437 				 * the controller has time to perform transfer
6438 				 * negotiations without tagged messages getting
6439 				 * in the way.
6440 				 */
6441 				device->tag_delay_count = CAM_TAG_DELAY_COUNT;
6442 				device->flags |= CAM_DEV_TAG_AFTER_COUNT;
6443 			} else {
6444 				struct ccb_relsim crs;
6445 
6446 				xpt_freeze_devq(cts->ccb_h.path, /*count*/1);
6447 		  		device->inq_flags &= ~SID_CmdQue;
6448 				xpt_dev_ccbq_resize(cts->ccb_h.path,
6449 						    sim->max_dev_openings);
6450 				device->flags &= ~CAM_DEV_TAG_AFTER_COUNT;
6451 				device->tag_delay_count = 0;
6452 
6453 				xpt_setup_ccb(&crs.ccb_h, cts->ccb_h.path,
6454 					      /*priority*/1);
6455 				crs.ccb_h.func_code = XPT_REL_SIMQ;
6456 				crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY;
6457 				crs.openings
6458 				    = crs.release_timeout
6459 				    = crs.qfrozen_cnt
6460 				    = 0;
6461 				xpt_action((union ccb *)&crs);
6462 			}
6463 		}
6464 	}
6465 	if (async_update == FALSE)
6466 		(*(sim->sim_action))(sim, (union ccb *)cts);
6467 }
6468 
6469 #else /* CAM_NEW_TRAN_CODE */
6470 
6471 static void
6472 xpt_set_transfer_settings(struct ccb_trans_settings *cts, struct cam_ed *device,
6473 			  int async_update)
6474 {
6475 	struct	cam_sim *sim;
6476 	int	qfrozen;
6477 
6478 	sim = cts->ccb_h.path->bus->sim;
6479 	if (async_update == FALSE) {
6480 		struct	scsi_inquiry_data *inq_data;
6481 		struct	ccb_pathinq cpi;
6482 		struct	ccb_trans_settings cur_cts;
6483 
6484 		if (device == NULL) {
6485 			cts->ccb_h.status = CAM_PATH_INVALID;
6486 			xpt_done((union ccb *)cts);
6487 			return;
6488 		}
6489 
6490 		/*
6491 		 * Perform sanity checking against what the
6492 		 * controller and device can do.
6493 		 */
6494 		xpt_setup_ccb(&cpi.ccb_h, cts->ccb_h.path, /*priority*/1);
6495 		cpi.ccb_h.func_code = XPT_PATH_INQ;
6496 		xpt_action((union ccb *)&cpi);
6497 		xpt_setup_ccb(&cur_cts.ccb_h, cts->ccb_h.path, /*priority*/1);
6498 		cur_cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS;
6499 		cur_cts.flags = CCB_TRANS_CURRENT_SETTINGS;
6500 		xpt_action((union ccb *)&cur_cts);
6501 		inq_data = &device->inq_data;
6502 
6503 		/* Fill in any gaps in what the user gave us */
6504 		if ((cts->valid & CCB_TRANS_SYNC_RATE_VALID) == 0)
6505 			cts->sync_period = cur_cts.sync_period;
6506 		if ((cts->valid & CCB_TRANS_SYNC_OFFSET_VALID) == 0)
6507 			cts->sync_offset = cur_cts.sync_offset;
6508 		if ((cts->valid & CCB_TRANS_BUS_WIDTH_VALID) == 0)
6509 			cts->bus_width = cur_cts.bus_width;
6510 		if ((cts->valid & CCB_TRANS_DISC_VALID) == 0) {
6511 			cts->flags &= ~CCB_TRANS_DISC_ENB;
6512 			cts->flags |= cur_cts.flags & CCB_TRANS_DISC_ENB;
6513 		}
6514 		if ((cts->valid & CCB_TRANS_TQ_VALID) == 0) {
6515 			cts->flags &= ~CCB_TRANS_TAG_ENB;
6516 			cts->flags |= cur_cts.flags & CCB_TRANS_TAG_ENB;
6517 		}
6518 
6519 		if (((device->flags & CAM_DEV_INQUIRY_DATA_VALID) != 0
6520 		  && (inq_data->flags & SID_Sync) == 0)
6521 		 || ((cpi.hba_inquiry & PI_SDTR_ABLE) == 0)
6522 		 || (cts->sync_offset == 0)
6523 		 || (cts->sync_period == 0)) {
6524 			/* Force async */
6525 			cts->sync_period = 0;
6526 			cts->sync_offset = 0;
6527 		} else if ((device->flags & CAM_DEV_INQUIRY_DATA_VALID) != 0
6528 			&& (inq_data->spi3data & SID_SPI_CLOCK_DT) == 0
6529 			&& cts->sync_period <= 0x9) {
6530 			/*
6531 			 * Don't allow DT transmission rates if the
6532 			 * device does not support it.
6533 			 */
6534 			cts->sync_period = 0xa;
6535 		}
6536 
6537 		switch (cts->bus_width) {
6538 		case MSG_EXT_WDTR_BUS_32_BIT:
6539 			if (((device->flags & CAM_DEV_INQUIRY_DATA_VALID) == 0
6540 			  || (inq_data->flags & SID_WBus32) != 0)
6541 			 && (cpi.hba_inquiry & PI_WIDE_32) != 0)
6542 				break;
6543 			/* FALLTHROUGH to 16-bit */
6544 		case MSG_EXT_WDTR_BUS_16_BIT:
6545 			if (((device->flags & CAM_DEV_INQUIRY_DATA_VALID) == 0
6546 			  || (inq_data->flags & SID_WBus16) != 0)
6547 			 && (cpi.hba_inquiry & PI_WIDE_16) != 0) {
6548 				cts->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
6549 				break;
6550 			}
6551 			/* FALLTHROUGH to 8-bit */
6552 		default: /* New bus width?? */
6553 		case MSG_EXT_WDTR_BUS_8_BIT:
6554 			/* All targets can do this */
6555 			cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
6556 			break;
6557 		}
6558 
6559 		if ((cts->flags & CCB_TRANS_DISC_ENB) == 0) {
6560 			/*
6561 			 * Can't tag queue without disconnection.
6562 			 */
6563 			cts->flags &= ~CCB_TRANS_TAG_ENB;
6564 			cts->valid |= CCB_TRANS_TQ_VALID;
6565 		}
6566 
6567 		if ((cpi.hba_inquiry & PI_TAG_ABLE) == 0
6568 		 || (inq_data->flags & SID_CmdQue) == 0
6569 		 || (device->queue_flags & SCP_QUEUE_DQUE) != 0
6570 		 || (device->quirk->mintags == 0)) {
6571 			/*
6572 			 * Can't tag on hardware that doesn't support,
6573 			 * doesn't have it enabled, or has broken tag support.
6574 			 */
6575 			cts->flags &= ~CCB_TRANS_TAG_ENB;
6576 		}
6577 	}
6578 
6579 	qfrozen = FALSE;
6580 	if ((cts->valid & CCB_TRANS_TQ_VALID) != 0) {
6581 		int device_tagenb;
6582 
6583 		/*
6584 		 * If we are transitioning from tags to no-tags or
6585 		 * vice-versa, we need to carefully freeze and restart
6586 		 * the queue so that we don't overlap tagged and non-tagged
6587 		 * commands.  We also temporarily stop tags if there is
6588 		 * a change in transfer negotiation settings to allow
6589 		 * "tag-less" negotiation.
6590 		 */
6591 		if ((device->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
6592 		 || (device->inq_flags & SID_CmdQue) != 0)
6593 			device_tagenb = TRUE;
6594 		else
6595 			device_tagenb = FALSE;
6596 
6597 		if (((cts->flags & CCB_TRANS_TAG_ENB) != 0
6598 		  && device_tagenb == FALSE)
6599 		 || ((cts->flags & CCB_TRANS_TAG_ENB) == 0
6600 		  && device_tagenb == TRUE)) {
6601 
6602 			if ((cts->flags & CCB_TRANS_TAG_ENB) != 0) {
6603 				/*
6604 				 * Delay change to use tags until after a
6605 				 * few commands have gone to this device so
6606 				 * the controller has time to perform transfer
6607 				 * negotiations without tagged messages getting
6608 				 * in the way.
6609 				 */
6610 				device->tag_delay_count = CAM_TAG_DELAY_COUNT;
6611 				device->flags |= CAM_DEV_TAG_AFTER_COUNT;
6612 			} else {
6613 				xpt_freeze_devq(cts->ccb_h.path, /*count*/1);
6614 				qfrozen = TRUE;
6615 		  		device->inq_flags &= ~SID_CmdQue;
6616 				xpt_dev_ccbq_resize(cts->ccb_h.path,
6617 						    sim->max_dev_openings);
6618 				device->flags &= ~CAM_DEV_TAG_AFTER_COUNT;
6619 				device->tag_delay_count = 0;
6620 			}
6621 		}
6622 	}
6623 
6624 	if (async_update == FALSE) {
6625 		/*
6626 		 * If we are currently performing tagged transactions to
6627 		 * this device and want to change its negotiation parameters,
6628 		 * go non-tagged for a bit to give the controller a chance to
6629 		 * negotiate unhampered by tag messages.
6630 		 */
6631 		if ((device->inq_flags & SID_CmdQue) != 0
6632 		 && (cts->flags & (CCB_TRANS_SYNC_RATE_VALID|
6633 				   CCB_TRANS_SYNC_OFFSET_VALID|
6634 				   CCB_TRANS_BUS_WIDTH_VALID)) != 0)
6635 			xpt_toggle_tags(cts->ccb_h.path);
6636 
6637 		(*(sim->sim_action))(sim, (union ccb *)cts);
6638 	}
6639 
6640 	if (qfrozen) {
6641 		struct ccb_relsim crs;
6642 
6643 		xpt_setup_ccb(&crs.ccb_h, cts->ccb_h.path,
6644 			      /*priority*/1);
6645 		crs.ccb_h.func_code = XPT_REL_SIMQ;
6646 		crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY;
6647 		crs.openings
6648 		    = crs.release_timeout
6649 		    = crs.qfrozen_cnt
6650 		    = 0;
6651 		xpt_action((union ccb *)&crs);
6652 	}
6653 }
6654 
6655 
6656 #endif /* CAM_NEW_TRAN_CODE */
6657 
6658 static void
6659 xpt_toggle_tags(struct cam_path *path)
6660 {
6661 	struct cam_ed *dev;
6662 
6663 	/*
6664 	 * Give controllers a chance to renegotiate
6665 	 * before starting tag operations.  We
6666 	 * "toggle" tagged queuing off then on
6667 	 * which causes the tag enable command delay
6668 	 * counter to come into effect.
6669 	 */
6670 	dev = path->device;
6671 	if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
6672 	 || ((dev->inq_flags & SID_CmdQue) != 0
6673  	  && (dev->inq_flags & (SID_Sync|SID_WBus16|SID_WBus32)) != 0)) {
6674 		struct ccb_trans_settings cts;
6675 
6676 		xpt_setup_ccb(&cts.ccb_h, path, 1);
6677 #ifdef CAM_NEW_TRAN_CODE
6678 		cts.protocol = PROTO_SCSI;
6679 		cts.protocol_version = PROTO_VERSION_UNSPECIFIED;
6680 		cts.transport = XPORT_UNSPECIFIED;
6681 		cts.transport_version = XPORT_VERSION_UNSPECIFIED;
6682 		cts.proto_specific.scsi.flags = 0;
6683 		cts.proto_specific.scsi.valid = CTS_SCSI_VALID_TQ;
6684 #else /* CAM_NEW_TRAN_CODE */
6685 		cts.flags = 0;
6686 		cts.valid = CCB_TRANS_TQ_VALID;
6687 #endif /* CAM_NEW_TRAN_CODE */
6688 		xpt_set_transfer_settings(&cts, path->device,
6689 					  /*async_update*/TRUE);
6690 #ifdef CAM_NEW_TRAN_CODE
6691 		cts.proto_specific.scsi.flags = CTS_SCSI_FLAGS_TAG_ENB;
6692 #else /* CAM_NEW_TRAN_CODE */
6693 		cts.flags = CCB_TRANS_TAG_ENB;
6694 #endif /* CAM_NEW_TRAN_CODE */
6695 		xpt_set_transfer_settings(&cts, path->device,
6696 					  /*async_update*/TRUE);
6697 	}
6698 }
6699 
6700 static void
6701 xpt_start_tags(struct cam_path *path)
6702 {
6703 	struct ccb_relsim crs;
6704 	struct cam_ed *device;
6705 	struct cam_sim *sim;
6706 	int    newopenings;
6707 
6708 	device = path->device;
6709 	sim = path->bus->sim;
6710 	device->flags &= ~CAM_DEV_TAG_AFTER_COUNT;
6711 	xpt_freeze_devq(path, /*count*/1);
6712 	device->inq_flags |= SID_CmdQue;
6713 	newopenings = min(device->quirk->maxtags, sim->max_tagged_dev_openings);
6714 	xpt_dev_ccbq_resize(path, newopenings);
6715 	xpt_setup_ccb(&crs.ccb_h, path, /*priority*/1);
6716 	crs.ccb_h.func_code = XPT_REL_SIMQ;
6717 	crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY;
6718 	crs.openings
6719 	    = crs.release_timeout
6720 	    = crs.qfrozen_cnt
6721 	    = 0;
6722 	xpt_action((union ccb *)&crs);
6723 }
6724 
6725 static int busses_to_config;
6726 static int busses_to_reset;
6727 
6728 static int
6729 xptconfigbuscountfunc(struct cam_eb *bus, void *arg)
6730 {
6731 	if (bus->path_id != CAM_XPT_PATH_ID) {
6732 		struct cam_path path;
6733 		struct ccb_pathinq cpi;
6734 		int can_negotiate;
6735 
6736 		busses_to_config++;
6737 		xpt_compile_path(&path, NULL, bus->path_id,
6738 				 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
6739 		xpt_setup_ccb(&cpi.ccb_h, &path, /*priority*/1);
6740 		cpi.ccb_h.func_code = XPT_PATH_INQ;
6741 		xpt_action((union ccb *)&cpi);
6742 		can_negotiate = cpi.hba_inquiry;
6743 		can_negotiate &= (PI_WIDE_32|PI_WIDE_16|PI_SDTR_ABLE);
6744 		if ((cpi.hba_misc & PIM_NOBUSRESET) == 0
6745 		 && can_negotiate)
6746 			busses_to_reset++;
6747 		xpt_release_path(&path);
6748 	}
6749 
6750 	return(1);
6751 }
6752 
6753 static int
6754 xptconfigfunc(struct cam_eb *bus, void *arg)
6755 {
6756 	struct	cam_path *path;
6757 	union	ccb *work_ccb;
6758 
6759 	if (bus->path_id != CAM_XPT_PATH_ID) {
6760 		cam_status status;
6761 		int can_negotiate;
6762 
6763 		work_ccb = xpt_alloc_ccb();
6764 		if ((status = xpt_create_path(&path, xpt_periph, bus->path_id,
6765 					      CAM_TARGET_WILDCARD,
6766 					      CAM_LUN_WILDCARD)) !=CAM_REQ_CMP){
6767 			printf("xptconfigfunc: xpt_create_path failed with "
6768 			       "status %#x for bus %d\n", status, bus->path_id);
6769 			printf("xptconfigfunc: halting bus configuration\n");
6770 			xpt_free_ccb(work_ccb);
6771 			busses_to_config--;
6772 			xpt_finishconfig(xpt_periph, NULL);
6773 			return(0);
6774 		}
6775 		xpt_setup_ccb(&work_ccb->ccb_h, path, /*priority*/1);
6776 		work_ccb->ccb_h.func_code = XPT_PATH_INQ;
6777 		xpt_action(work_ccb);
6778 		if (work_ccb->ccb_h.status != CAM_REQ_CMP) {
6779 			printf("xptconfigfunc: CPI failed on bus %d "
6780 			       "with status %d\n", bus->path_id,
6781 			       work_ccb->ccb_h.status);
6782 			xpt_finishconfig(xpt_periph, work_ccb);
6783 			return(1);
6784 		}
6785 
6786 		can_negotiate = work_ccb->cpi.hba_inquiry;
6787 		can_negotiate &= (PI_WIDE_32|PI_WIDE_16|PI_SDTR_ABLE);
6788 		if ((work_ccb->cpi.hba_misc & PIM_NOBUSRESET) == 0
6789 		 && (can_negotiate != 0)) {
6790 			xpt_setup_ccb(&work_ccb->ccb_h, path, /*priority*/1);
6791 			work_ccb->ccb_h.func_code = XPT_RESET_BUS;
6792 			work_ccb->ccb_h.cbfcnp = NULL;
6793 			CAM_DEBUG(path, CAM_DEBUG_SUBTRACE,
6794 				  ("Resetting Bus\n"));
6795 			xpt_action(work_ccb);
6796 			xpt_finishconfig(xpt_periph, work_ccb);
6797 		} else {
6798 			/* Act as though we performed a successful BUS RESET */
6799 			work_ccb->ccb_h.func_code = XPT_RESET_BUS;
6800 			xpt_finishconfig(xpt_periph, work_ccb);
6801 		}
6802 	}
6803 
6804 	return(1);
6805 }
6806 
6807 static void
6808 xpt_config(void *arg)
6809 {
6810 	/*
6811 	 * Now that interrupts are enabled, go find our devices
6812 	 */
6813 
6814 #ifdef CAMDEBUG
6815 	/* Setup debugging flags and path */
6816 #ifdef CAM_DEBUG_FLAGS
6817 	cam_dflags = CAM_DEBUG_FLAGS;
6818 #else /* !CAM_DEBUG_FLAGS */
6819 	cam_dflags = CAM_DEBUG_NONE;
6820 #endif /* CAM_DEBUG_FLAGS */
6821 #ifdef CAM_DEBUG_BUS
6822 	if (cam_dflags != CAM_DEBUG_NONE) {
6823 		if (xpt_create_path(&cam_dpath, xpt_periph,
6824 				    CAM_DEBUG_BUS, CAM_DEBUG_TARGET,
6825 				    CAM_DEBUG_LUN) != CAM_REQ_CMP) {
6826 			printf("xpt_config: xpt_create_path() failed for debug"
6827 			       " target %d:%d:%d, debugging disabled\n",
6828 			       CAM_DEBUG_BUS, CAM_DEBUG_TARGET, CAM_DEBUG_LUN);
6829 			cam_dflags = CAM_DEBUG_NONE;
6830 		}
6831 	} else
6832 		cam_dpath = NULL;
6833 #else /* !CAM_DEBUG_BUS */
6834 	cam_dpath = NULL;
6835 #endif /* CAM_DEBUG_BUS */
6836 #endif /* CAMDEBUG */
6837 
6838 	/*
6839 	 * Scan all installed busses.
6840 	 */
6841 	xpt_for_all_busses(xptconfigbuscountfunc, NULL);
6842 
6843 	if (busses_to_config == 0) {
6844 		/* Call manually because we don't have any busses */
6845 		xpt_finishconfig(xpt_periph, NULL);
6846 	} else  {
6847 		if (busses_to_reset > 0 && scsi_delay >= 2000) {
6848 			printf("Waiting %d seconds for SCSI "
6849 			       "devices to settle\n", scsi_delay/1000);
6850 		}
6851 		xpt_for_all_busses(xptconfigfunc, NULL);
6852 	}
6853 }
6854 
6855 /*
6856  * If the given device only has one peripheral attached to it, and if that
6857  * peripheral is the passthrough driver, announce it.  This insures that the
6858  * user sees some sort of announcement for every peripheral in their system.
6859  */
6860 static int
6861 xptpassannouncefunc(struct cam_ed *device, void *arg)
6862 {
6863 	struct cam_periph *periph;
6864 	int i;
6865 
6866 	for (periph = SLIST_FIRST(&device->periphs), i = 0; periph != NULL;
6867 	     periph = SLIST_NEXT(periph, periph_links), i++);
6868 
6869 	periph = SLIST_FIRST(&device->periphs);
6870 	if ((i == 1)
6871 	 && (strncmp(periph->periph_name, "pass", 4) == 0))
6872 		xpt_announce_periph(periph, NULL);
6873 
6874 	return(1);
6875 }
6876 
6877 static void
6878 xpt_finishconfig(struct cam_periph *periph, union ccb *done_ccb)
6879 {
6880 	struct	periph_driver **p_drv;
6881 	int	i;
6882 
6883 	if (done_ccb != NULL) {
6884 		CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE,
6885 			  ("xpt_finishconfig\n"));
6886 		switch(done_ccb->ccb_h.func_code) {
6887 		case XPT_RESET_BUS:
6888 			if (done_ccb->ccb_h.status == CAM_REQ_CMP) {
6889 				done_ccb->ccb_h.func_code = XPT_SCAN_BUS;
6890 				done_ccb->ccb_h.cbfcnp = xpt_finishconfig;
6891 				xpt_action(done_ccb);
6892 				return;
6893 			}
6894 			/* FALLTHROUGH */
6895 		case XPT_SCAN_BUS:
6896 		default:
6897 			xpt_free_path(done_ccb->ccb_h.path);
6898 			busses_to_config--;
6899 			break;
6900 		}
6901 	}
6902 
6903 	if (busses_to_config == 0) {
6904 		/* Register all the peripheral drivers */
6905 		/* XXX This will have to change when we have loadable modules */
6906 		p_drv = periph_drivers;
6907 		for (i = 0; p_drv[i] != NULL; i++) {
6908 			(*p_drv[i]->init)();
6909 		}
6910 
6911 		/*
6912 		 * Check for devices with no "standard" peripheral driver
6913 		 * attached.  For any devices like that, announce the
6914 		 * passthrough driver so the user will see something.
6915 		 */
6916 		xpt_for_all_devices(xptpassannouncefunc, NULL);
6917 
6918 		/* Release our hook so that the boot can continue. */
6919 		config_intrhook_disestablish(xpt_config_hook);
6920 		free(xpt_config_hook, M_TEMP);
6921 		xpt_config_hook = NULL;
6922 	}
6923 	if (done_ccb != NULL)
6924 		xpt_free_ccb(done_ccb);
6925 }
6926 
6927 static void
6928 xptaction(struct cam_sim *sim, union ccb *work_ccb)
6929 {
6930 	CAM_DEBUG(work_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xptaction\n"));
6931 
6932 	switch (work_ccb->ccb_h.func_code) {
6933 	/* Common cases first */
6934 	case XPT_PATH_INQ:		/* Path routing inquiry */
6935 	{
6936 		struct ccb_pathinq *cpi;
6937 
6938 		cpi = &work_ccb->cpi;
6939 		cpi->version_num = 1; /* XXX??? */
6940 		cpi->hba_inquiry = 0;
6941 		cpi->target_sprt = 0;
6942 		cpi->hba_misc = 0;
6943 		cpi->hba_eng_cnt = 0;
6944 		cpi->max_target = 0;
6945 		cpi->max_lun = 0;
6946 		cpi->initiator_id = 0;
6947 		strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
6948 		strncpy(cpi->hba_vid, "", HBA_IDLEN);
6949 		strncpy(cpi->dev_name, sim->sim_name, DEV_IDLEN);
6950 		cpi->unit_number = sim->unit_number;
6951 		cpi->bus_id = sim->bus_id;
6952 		cpi->base_transfer_speed = 0;
6953 #ifdef CAM_NEW_TRAN_CODE
6954 		cpi->protocol = PROTO_UNSPECIFIED;
6955 		cpi->protocol_version = PROTO_VERSION_UNSPECIFIED;
6956 		cpi->transport = XPORT_UNSPECIFIED;
6957 		cpi->transport_version = XPORT_VERSION_UNSPECIFIED;
6958 #endif /* CAM_NEW_TRAN_CODE */
6959 		cpi->ccb_h.status = CAM_REQ_CMP;
6960 		xpt_done(work_ccb);
6961 		break;
6962 	}
6963 	default:
6964 		work_ccb->ccb_h.status = CAM_REQ_INVALID;
6965 		xpt_done(work_ccb);
6966 		break;
6967 	}
6968 }
6969 
6970 /*
6971  * The xpt as a "controller" has no interrupt sources, so polling
6972  * is a no-op.
6973  */
6974 static void
6975 xptpoll(struct cam_sim *sim)
6976 {
6977 }
6978 
6979 static void
6980 camisr(void *V_queue)
6981 {
6982 	cam_isrq_t *oqueue = V_queue;
6983 	cam_isrq_t queue;
6984 	int	s;
6985 	struct	ccb_hdr *ccb_h;
6986 
6987 	/*
6988 	 * Transfer the ccb_bioq list to a temporary list so we can operate
6989 	 * on it without needing to lock/unlock on every loop.  The concat
6990 	 * function with re-init the real list for us.
6991 	 */
6992 	s = splcam();
6993 	mtx_lock(&cam_bioq_lock);
6994 	TAILQ_INIT(&queue);
6995 	TAILQ_CONCAT(&queue, oqueue, sim_links.tqe);
6996 	mtx_unlock(&cam_bioq_lock);
6997 
6998 	while ((ccb_h = TAILQ_FIRST(&queue)) != NULL) {
6999 		int	runq;
7000 
7001 		TAILQ_REMOVE(&queue, ccb_h, sim_links.tqe);
7002 		ccb_h->pinfo.index = CAM_UNQUEUED_INDEX;
7003 		splx(s);
7004 
7005 		CAM_DEBUG(ccb_h->path, CAM_DEBUG_TRACE,
7006 			  ("camisr\n"));
7007 
7008 		runq = FALSE;
7009 
7010 		if (ccb_h->flags & CAM_HIGH_POWER) {
7011 			struct highpowerlist	*hphead;
7012 			union ccb		*send_ccb;
7013 
7014 			hphead = &highpowerq;
7015 
7016 			send_ccb = (union ccb *)STAILQ_FIRST(hphead);
7017 
7018 			/*
7019 			 * Increment the count since this command is done.
7020 			 */
7021 			num_highpower++;
7022 
7023 			/*
7024 			 * Any high powered commands queued up?
7025 			 */
7026 			if (send_ccb != NULL) {
7027 
7028 				STAILQ_REMOVE_HEAD(hphead, xpt_links.stqe);
7029 
7030 				xpt_release_devq(send_ccb->ccb_h.path,
7031 						 /*count*/1, /*runqueue*/TRUE);
7032 			}
7033 		}
7034 		if ((ccb_h->func_code & XPT_FC_USER_CCB) == 0) {
7035 			struct cam_ed *dev;
7036 
7037 			dev = ccb_h->path->device;
7038 
7039 			s = splcam();
7040 			cam_ccbq_ccb_done(&dev->ccbq, (union ccb *)ccb_h);
7041 
7042 			ccb_h->path->bus->sim->devq->send_active--;
7043 			ccb_h->path->bus->sim->devq->send_openings++;
7044 			splx(s);
7045 
7046 			if (((dev->flags & CAM_DEV_REL_ON_COMPLETE) != 0
7047 			  && (ccb_h->status&CAM_STATUS_MASK) != CAM_REQUEUE_REQ)
7048 			 || ((dev->flags & CAM_DEV_REL_ON_QUEUE_EMPTY) != 0
7049 			  && (dev->ccbq.dev_active == 0))) {
7050 
7051 				xpt_release_devq(ccb_h->path, /*count*/1,
7052 						 /*run_queue*/TRUE);
7053 			}
7054 
7055 			if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
7056 			 && (--dev->tag_delay_count == 0))
7057 				xpt_start_tags(ccb_h->path);
7058 
7059 			if ((dev->ccbq.queue.entries > 0)
7060 			 && (dev->qfrozen_cnt == 0)
7061 			 && (device_is_send_queued(dev) == 0)) {
7062 				runq = xpt_schedule_dev_sendq(ccb_h->path->bus,
7063 							      dev);
7064 			}
7065 		}
7066 
7067 		if (ccb_h->status & CAM_RELEASE_SIMQ) {
7068 			xpt_release_simq(ccb_h->path->bus->sim,
7069 					 /*run_queue*/TRUE);
7070 			ccb_h->status &= ~CAM_RELEASE_SIMQ;
7071 			runq = FALSE;
7072 		}
7073 
7074 		if ((ccb_h->flags & CAM_DEV_QFRZDIS)
7075 		 && (ccb_h->status & CAM_DEV_QFRZN)) {
7076 			xpt_release_devq(ccb_h->path, /*count*/1,
7077 					 /*run_queue*/TRUE);
7078 			ccb_h->status &= ~CAM_DEV_QFRZN;
7079 		} else if (runq) {
7080 			xpt_run_dev_sendq(ccb_h->path->bus);
7081 		}
7082 
7083 		/* Call the peripheral driver's callback */
7084 		(*ccb_h->cbfcnp)(ccb_h->path->periph, (union ccb *)ccb_h);
7085 
7086 		/* Raise IPL for while test */
7087 		s = splcam();
7088 	}
7089 	splx(s);
7090 }
7091