xref: /freebsd/sys/cam/cam_xpt.c (revision eacae6dc66aa881c102f11e2003174eea7e8af74)
1  /*-
2   * Implementation of the Common Access Method Transport (XPT) layer.
3   *
4   * Copyright (c) 1997, 1998, 1999 Justin T. Gibbs.
5   * Copyright (c) 1997, 1998, 1999 Kenneth D. Merry.
6   * All rights reserved.
7   *
8   * Redistribution and use in source and binary forms, with or without
9   * modification, are permitted provided that the following conditions
10   * are met:
11   * 1. Redistributions of source code must retain the above copyright
12   *    notice, this list of conditions, and the following disclaimer,
13   *    without modification, immediately at the beginning of the file.
14   * 2. The name of the author may not be used to endorse or promote products
15   *    derived from this software without specific prior written permission.
16   *
17   * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18   * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19   * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20   * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
21   * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22   * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23   * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24   * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25   * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26   * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27   * SUCH DAMAGE.
28   */
29  
30  #include <sys/cdefs.h>
31  __FBSDID("$FreeBSD$");
32  
33  #include <sys/param.h>
34  #include <sys/bus.h>
35  #include <sys/systm.h>
36  #include <sys/types.h>
37  #include <sys/malloc.h>
38  #include <sys/kernel.h>
39  #include <sys/time.h>
40  #include <sys/conf.h>
41  #include <sys/fcntl.h>
42  #include <sys/interrupt.h>
43  #include <sys/proc.h>
44  #include <sys/sbuf.h>
45  #include <sys/smp.h>
46  #include <sys/taskqueue.h>
47  
48  #include <sys/lock.h>
49  #include <sys/mutex.h>
50  #include <sys/sysctl.h>
51  #include <sys/kthread.h>
52  
53  #include <cam/cam.h>
54  #include <cam/cam_ccb.h>
55  #include <cam/cam_periph.h>
56  #include <cam/cam_queue.h>
57  #include <cam/cam_sim.h>
58  #include <cam/cam_xpt.h>
59  #include <cam/cam_xpt_sim.h>
60  #include <cam/cam_xpt_periph.h>
61  #include <cam/cam_xpt_internal.h>
62  #include <cam/cam_debug.h>
63  #include <cam/cam_compat.h>
64  
65  #include <cam/scsi/scsi_all.h>
66  #include <cam/scsi/scsi_message.h>
67  #include <cam/scsi/scsi_pass.h>
68  
69  #include <machine/md_var.h>	/* geometry translation */
70  #include <machine/stdarg.h>	/* for xpt_print below */
71  
72  #include "opt_cam.h"
73  
74  /*
75   * This is the maximum number of high powered commands (e.g. start unit)
76   * that can be outstanding at a particular time.
77   */
78  #ifndef CAM_MAX_HIGHPOWER
79  #define CAM_MAX_HIGHPOWER  4
80  #endif
81  
82  /* Datastructures internal to the xpt layer */
83  MALLOC_DEFINE(M_CAMXPT, "CAM XPT", "CAM XPT buffers");
84  MALLOC_DEFINE(M_CAMDEV, "CAM DEV", "CAM devices");
85  MALLOC_DEFINE(M_CAMCCB, "CAM CCB", "CAM CCBs");
86  MALLOC_DEFINE(M_CAMPATH, "CAM path", "CAM paths");
87  
88  /* Object for defering XPT actions to a taskqueue */
89  struct xpt_task {
90  	struct task	task;
91  	void		*data1;
92  	uintptr_t	data2;
93  };
94  
95  struct xpt_softc {
96  	uint32_t		xpt_generation;
97  
98  	/* number of high powered commands that can go through right now */
99  	struct mtx		xpt_highpower_lock;
100  	STAILQ_HEAD(highpowerlist, cam_ed)	highpowerq;
101  	int			num_highpower;
102  
103  	/* queue for handling async rescan requests. */
104  	TAILQ_HEAD(, ccb_hdr) ccb_scanq;
105  	int buses_to_config;
106  	int buses_config_done;
107  
108  	/* Registered busses */
109  	TAILQ_HEAD(,cam_eb)	xpt_busses;
110  	u_int			bus_generation;
111  
112  	struct intr_config_hook	*xpt_config_hook;
113  
114  	int			boot_delay;
115  	struct callout 		boot_callout;
116  
117  	struct mtx		xpt_topo_lock;
118  	struct mtx		xpt_lock;
119  	struct taskqueue	*xpt_taskq;
120  };
121  
122  typedef enum {
123  	DM_RET_COPY		= 0x01,
124  	DM_RET_FLAG_MASK	= 0x0f,
125  	DM_RET_NONE		= 0x00,
126  	DM_RET_STOP		= 0x10,
127  	DM_RET_DESCEND		= 0x20,
128  	DM_RET_ERROR		= 0x30,
129  	DM_RET_ACTION_MASK	= 0xf0
130  } dev_match_ret;
131  
132  typedef enum {
133  	XPT_DEPTH_BUS,
134  	XPT_DEPTH_TARGET,
135  	XPT_DEPTH_DEVICE,
136  	XPT_DEPTH_PERIPH
137  } xpt_traverse_depth;
138  
139  struct xpt_traverse_config {
140  	xpt_traverse_depth	depth;
141  	void			*tr_func;
142  	void			*tr_arg;
143  };
144  
145  typedef	int	xpt_busfunc_t (struct cam_eb *bus, void *arg);
146  typedef	int	xpt_targetfunc_t (struct cam_et *target, void *arg);
147  typedef	int	xpt_devicefunc_t (struct cam_ed *device, void *arg);
148  typedef	int	xpt_periphfunc_t (struct cam_periph *periph, void *arg);
149  typedef int	xpt_pdrvfunc_t (struct periph_driver **pdrv, void *arg);
150  
151  /* Transport layer configuration information */
152  static struct xpt_softc xsoftc;
153  
154  MTX_SYSINIT(xpt_topo_init, &xsoftc.xpt_topo_lock, "XPT topology lock", MTX_DEF);
155  
156  SYSCTL_INT(_kern_cam, OID_AUTO, boot_delay, CTLFLAG_RDTUN,
157             &xsoftc.boot_delay, 0, "Bus registration wait time");
158  SYSCTL_UINT(_kern_cam, OID_AUTO, xpt_generation, CTLFLAG_RD,
159  	    &xsoftc.xpt_generation, 0, "CAM peripheral generation count");
160  
161  struct cam_doneq {
162  	struct mtx_padalign	cam_doneq_mtx;
163  	STAILQ_HEAD(, ccb_hdr)	cam_doneq;
164  	int			cam_doneq_sleep;
165  };
166  
167  static struct cam_doneq cam_doneqs[MAXCPU];
168  static int cam_num_doneqs;
169  static struct proc *cam_proc;
170  
171  SYSCTL_INT(_kern_cam, OID_AUTO, num_doneqs, CTLFLAG_RDTUN,
172             &cam_num_doneqs, 0, "Number of completion queues/threads");
173  
174  struct cam_periph *xpt_periph;
175  
176  static periph_init_t xpt_periph_init;
177  
178  static struct periph_driver xpt_driver =
179  {
180  	xpt_periph_init, "xpt",
181  	TAILQ_HEAD_INITIALIZER(xpt_driver.units), /* generation */ 0,
182  	CAM_PERIPH_DRV_EARLY
183  };
184  
185  PERIPHDRIVER_DECLARE(xpt, xpt_driver);
186  
187  static d_open_t xptopen;
188  static d_close_t xptclose;
189  static d_ioctl_t xptioctl;
190  static d_ioctl_t xptdoioctl;
191  
192  static struct cdevsw xpt_cdevsw = {
193  	.d_version =	D_VERSION,
194  	.d_flags =	0,
195  	.d_open =	xptopen,
196  	.d_close =	xptclose,
197  	.d_ioctl =	xptioctl,
198  	.d_name =	"xpt",
199  };
200  
201  /* Storage for debugging datastructures */
202  struct cam_path *cam_dpath;
203  u_int32_t cam_dflags = CAM_DEBUG_FLAGS;
204  SYSCTL_UINT(_kern_cam, OID_AUTO, dflags, CTLFLAG_RWTUN,
205  	&cam_dflags, 0, "Enabled debug flags");
206  u_int32_t cam_debug_delay = CAM_DEBUG_DELAY;
207  SYSCTL_UINT(_kern_cam, OID_AUTO, debug_delay, CTLFLAG_RWTUN,
208  	&cam_debug_delay, 0, "Delay in us after each debug message");
209  
210  /* Our boot-time initialization hook */
211  static int cam_module_event_handler(module_t, int /*modeventtype_t*/, void *);
212  
213  static moduledata_t cam_moduledata = {
214  	"cam",
215  	cam_module_event_handler,
216  	NULL
217  };
218  
219  static int	xpt_init(void *);
220  
221  DECLARE_MODULE(cam, cam_moduledata, SI_SUB_CONFIGURE, SI_ORDER_SECOND);
222  MODULE_VERSION(cam, 1);
223  
224  
225  static void		xpt_async_bcast(struct async_list *async_head,
226  					u_int32_t async_code,
227  					struct cam_path *path,
228  					void *async_arg);
229  static path_id_t xptnextfreepathid(void);
230  static path_id_t xptpathid(const char *sim_name, int sim_unit, int sim_bus);
231  static union ccb *xpt_get_ccb(struct cam_periph *periph);
232  static union ccb *xpt_get_ccb_nowait(struct cam_periph *periph);
233  static void	 xpt_run_allocq(struct cam_periph *periph, int sleep);
234  static void	 xpt_run_allocq_task(void *context, int pending);
235  static void	 xpt_run_devq(struct cam_devq *devq);
236  static timeout_t xpt_release_devq_timeout;
237  static void	 xpt_release_simq_timeout(void *arg) __unused;
238  static void	 xpt_acquire_bus(struct cam_eb *bus);
239  static void	 xpt_release_bus(struct cam_eb *bus);
240  static uint32_t	 xpt_freeze_devq_device(struct cam_ed *dev, u_int count);
241  static int	 xpt_release_devq_device(struct cam_ed *dev, u_int count,
242  		    int run_queue);
243  static struct cam_et*
244  		 xpt_alloc_target(struct cam_eb *bus, target_id_t target_id);
245  static void	 xpt_acquire_target(struct cam_et *target);
246  static void	 xpt_release_target(struct cam_et *target);
247  static struct cam_eb*
248  		 xpt_find_bus(path_id_t path_id);
249  static struct cam_et*
250  		 xpt_find_target(struct cam_eb *bus, target_id_t target_id);
251  static struct cam_ed*
252  		 xpt_find_device(struct cam_et *target, lun_id_t lun_id);
253  static void	 xpt_config(void *arg);
254  static int	 xpt_schedule_dev(struct camq *queue, cam_pinfo *dev_pinfo,
255  				 u_int32_t new_priority);
256  static xpt_devicefunc_t xptpassannouncefunc;
257  static void	 xptaction(struct cam_sim *sim, union ccb *work_ccb);
258  static void	 xptpoll(struct cam_sim *sim);
259  static void	 camisr_runqueue(void);
260  static void	 xpt_done_process(struct ccb_hdr *ccb_h);
261  static void	 xpt_done_td(void *);
262  static dev_match_ret	xptbusmatch(struct dev_match_pattern *patterns,
263  				    u_int num_patterns, struct cam_eb *bus);
264  static dev_match_ret	xptdevicematch(struct dev_match_pattern *patterns,
265  				       u_int num_patterns,
266  				       struct cam_ed *device);
267  static dev_match_ret	xptperiphmatch(struct dev_match_pattern *patterns,
268  				       u_int num_patterns,
269  				       struct cam_periph *periph);
270  static xpt_busfunc_t	xptedtbusfunc;
271  static xpt_targetfunc_t	xptedttargetfunc;
272  static xpt_devicefunc_t	xptedtdevicefunc;
273  static xpt_periphfunc_t	xptedtperiphfunc;
274  static xpt_pdrvfunc_t	xptplistpdrvfunc;
275  static xpt_periphfunc_t	xptplistperiphfunc;
276  static int		xptedtmatch(struct ccb_dev_match *cdm);
277  static int		xptperiphlistmatch(struct ccb_dev_match *cdm);
278  static int		xptbustraverse(struct cam_eb *start_bus,
279  				       xpt_busfunc_t *tr_func, void *arg);
280  static int		xpttargettraverse(struct cam_eb *bus,
281  					  struct cam_et *start_target,
282  					  xpt_targetfunc_t *tr_func, void *arg);
283  static int		xptdevicetraverse(struct cam_et *target,
284  					  struct cam_ed *start_device,
285  					  xpt_devicefunc_t *tr_func, void *arg);
286  static int		xptperiphtraverse(struct cam_ed *device,
287  					  struct cam_periph *start_periph,
288  					  xpt_periphfunc_t *tr_func, void *arg);
289  static int		xptpdrvtraverse(struct periph_driver **start_pdrv,
290  					xpt_pdrvfunc_t *tr_func, void *arg);
291  static int		xptpdperiphtraverse(struct periph_driver **pdrv,
292  					    struct cam_periph *start_periph,
293  					    xpt_periphfunc_t *tr_func,
294  					    void *arg);
295  static xpt_busfunc_t	xptdefbusfunc;
296  static xpt_targetfunc_t	xptdeftargetfunc;
297  static xpt_devicefunc_t	xptdefdevicefunc;
298  static xpt_periphfunc_t	xptdefperiphfunc;
299  static void		xpt_finishconfig_task(void *context, int pending);
300  static void		xpt_dev_async_default(u_int32_t async_code,
301  					      struct cam_eb *bus,
302  					      struct cam_et *target,
303  					      struct cam_ed *device,
304  					      void *async_arg);
305  static struct cam_ed *	xpt_alloc_device_default(struct cam_eb *bus,
306  						 struct cam_et *target,
307  						 lun_id_t lun_id);
308  static xpt_devicefunc_t	xptsetasyncfunc;
309  static xpt_busfunc_t	xptsetasyncbusfunc;
310  static cam_status	xptregister(struct cam_periph *periph,
311  				    void *arg);
312  static __inline int device_is_queued(struct cam_ed *device);
313  
314  static __inline int
315  xpt_schedule_devq(struct cam_devq *devq, struct cam_ed *dev)
316  {
317  	int	retval;
318  
319  	mtx_assert(&devq->send_mtx, MA_OWNED);
320  	if ((dev->ccbq.queue.entries > 0) &&
321  	    (dev->ccbq.dev_openings > 0) &&
322  	    (dev->ccbq.queue.qfrozen_cnt == 0)) {
323  		/*
324  		 * The priority of a device waiting for controller
325  		 * resources is that of the highest priority CCB
326  		 * enqueued.
327  		 */
328  		retval =
329  		    xpt_schedule_dev(&devq->send_queue,
330  				     &dev->devq_entry,
331  				     CAMQ_GET_PRIO(&dev->ccbq.queue));
332  	} else {
333  		retval = 0;
334  	}
335  	return (retval);
336  }
337  
338  static __inline int
339  device_is_queued(struct cam_ed *device)
340  {
341  	return (device->devq_entry.index != CAM_UNQUEUED_INDEX);
342  }
343  
344  static void
345  xpt_periph_init()
346  {
347  	make_dev(&xpt_cdevsw, 0, UID_ROOT, GID_OPERATOR, 0600, "xpt0");
348  }
349  
350  static int
351  xptopen(struct cdev *dev, int flags, int fmt, struct thread *td)
352  {
353  
354  	/*
355  	 * Only allow read-write access.
356  	 */
357  	if (((flags & FWRITE) == 0) || ((flags & FREAD) == 0))
358  		return(EPERM);
359  
360  	/*
361  	 * We don't allow nonblocking access.
362  	 */
363  	if ((flags & O_NONBLOCK) != 0) {
364  		printf("%s: can't do nonblocking access\n", devtoname(dev));
365  		return(ENODEV);
366  	}
367  
368  	return(0);
369  }
370  
371  static int
372  xptclose(struct cdev *dev, int flag, int fmt, struct thread *td)
373  {
374  
375  	return(0);
376  }
377  
378  /*
379   * Don't automatically grab the xpt softc lock here even though this is going
380   * through the xpt device.  The xpt device is really just a back door for
381   * accessing other devices and SIMs, so the right thing to do is to grab
382   * the appropriate SIM lock once the bus/SIM is located.
383   */
384  static int
385  xptioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td)
386  {
387  	int error;
388  
389  	if ((error = xptdoioctl(dev, cmd, addr, flag, td)) == ENOTTY) {
390  		error = cam_compat_ioctl(dev, cmd, addr, flag, td, xptdoioctl);
391  	}
392  	return (error);
393  }
394  
395  static int
396  xptdoioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td)
397  {
398  	int error;
399  
400  	error = 0;
401  
402  	switch(cmd) {
403  	/*
404  	 * For the transport layer CAMIOCOMMAND ioctl, we really only want
405  	 * to accept CCB types that don't quite make sense to send through a
406  	 * passthrough driver. XPT_PATH_INQ is an exception to this, as stated
407  	 * in the CAM spec.
408  	 */
409  	case CAMIOCOMMAND: {
410  		union ccb *ccb;
411  		union ccb *inccb;
412  		struct cam_eb *bus;
413  
414  		inccb = (union ccb *)addr;
415  
416  		bus = xpt_find_bus(inccb->ccb_h.path_id);
417  		if (bus == NULL)
418  			return (EINVAL);
419  
420  		switch (inccb->ccb_h.func_code) {
421  		case XPT_SCAN_BUS:
422  		case XPT_RESET_BUS:
423  			if (inccb->ccb_h.target_id != CAM_TARGET_WILDCARD ||
424  			    inccb->ccb_h.target_lun != CAM_LUN_WILDCARD) {
425  				xpt_release_bus(bus);
426  				return (EINVAL);
427  			}
428  			break;
429  		case XPT_SCAN_TGT:
430  			if (inccb->ccb_h.target_id == CAM_TARGET_WILDCARD ||
431  			    inccb->ccb_h.target_lun != CAM_LUN_WILDCARD) {
432  				xpt_release_bus(bus);
433  				return (EINVAL);
434  			}
435  			break;
436  		default:
437  			break;
438  		}
439  
440  		switch(inccb->ccb_h.func_code) {
441  		case XPT_SCAN_BUS:
442  		case XPT_RESET_BUS:
443  		case XPT_PATH_INQ:
444  		case XPT_ENG_INQ:
445  		case XPT_SCAN_LUN:
446  		case XPT_SCAN_TGT:
447  
448  			ccb = xpt_alloc_ccb();
449  
450  			/*
451  			 * Create a path using the bus, target, and lun the
452  			 * user passed in.
453  			 */
454  			if (xpt_create_path(&ccb->ccb_h.path, NULL,
455  					    inccb->ccb_h.path_id,
456  					    inccb->ccb_h.target_id,
457  					    inccb->ccb_h.target_lun) !=
458  					    CAM_REQ_CMP){
459  				error = EINVAL;
460  				xpt_free_ccb(ccb);
461  				break;
462  			}
463  			/* Ensure all of our fields are correct */
464  			xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path,
465  				      inccb->ccb_h.pinfo.priority);
466  			xpt_merge_ccb(ccb, inccb);
467  			xpt_path_lock(ccb->ccb_h.path);
468  			cam_periph_runccb(ccb, NULL, 0, 0, NULL);
469  			xpt_path_unlock(ccb->ccb_h.path);
470  			bcopy(ccb, inccb, sizeof(union ccb));
471  			xpt_free_path(ccb->ccb_h.path);
472  			xpt_free_ccb(ccb);
473  			break;
474  
475  		case XPT_DEBUG: {
476  			union ccb ccb;
477  
478  			/*
479  			 * This is an immediate CCB, so it's okay to
480  			 * allocate it on the stack.
481  			 */
482  
483  			/*
484  			 * Create a path using the bus, target, and lun the
485  			 * user passed in.
486  			 */
487  			if (xpt_create_path(&ccb.ccb_h.path, NULL,
488  					    inccb->ccb_h.path_id,
489  					    inccb->ccb_h.target_id,
490  					    inccb->ccb_h.target_lun) !=
491  					    CAM_REQ_CMP){
492  				error = EINVAL;
493  				break;
494  			}
495  			/* Ensure all of our fields are correct */
496  			xpt_setup_ccb(&ccb.ccb_h, ccb.ccb_h.path,
497  				      inccb->ccb_h.pinfo.priority);
498  			xpt_merge_ccb(&ccb, inccb);
499  			xpt_action(&ccb);
500  			bcopy(&ccb, inccb, sizeof(union ccb));
501  			xpt_free_path(ccb.ccb_h.path);
502  			break;
503  
504  		}
505  		case XPT_DEV_MATCH: {
506  			struct cam_periph_map_info mapinfo;
507  			struct cam_path *old_path;
508  
509  			/*
510  			 * We can't deal with physical addresses for this
511  			 * type of transaction.
512  			 */
513  			if ((inccb->ccb_h.flags & CAM_DATA_MASK) !=
514  			    CAM_DATA_VADDR) {
515  				error = EINVAL;
516  				break;
517  			}
518  
519  			/*
520  			 * Save this in case the caller had it set to
521  			 * something in particular.
522  			 */
523  			old_path = inccb->ccb_h.path;
524  
525  			/*
526  			 * We really don't need a path for the matching
527  			 * code.  The path is needed because of the
528  			 * debugging statements in xpt_action().  They
529  			 * assume that the CCB has a valid path.
530  			 */
531  			inccb->ccb_h.path = xpt_periph->path;
532  
533  			bzero(&mapinfo, sizeof(mapinfo));
534  
535  			/*
536  			 * Map the pattern and match buffers into kernel
537  			 * virtual address space.
538  			 */
539  			error = cam_periph_mapmem(inccb, &mapinfo, MAXPHYS);
540  
541  			if (error) {
542  				inccb->ccb_h.path = old_path;
543  				break;
544  			}
545  
546  			/*
547  			 * This is an immediate CCB, we can send it on directly.
548  			 */
549  			xpt_action(inccb);
550  
551  			/*
552  			 * Map the buffers back into user space.
553  			 */
554  			cam_periph_unmapmem(inccb, &mapinfo);
555  
556  			inccb->ccb_h.path = old_path;
557  
558  			error = 0;
559  			break;
560  		}
561  		default:
562  			error = ENOTSUP;
563  			break;
564  		}
565  		xpt_release_bus(bus);
566  		break;
567  	}
568  	/*
569  	 * This is the getpassthru ioctl. It takes a XPT_GDEVLIST ccb as input,
570  	 * with the periphal driver name and unit name filled in.  The other
571  	 * fields don't really matter as input.  The passthrough driver name
572  	 * ("pass"), and unit number are passed back in the ccb.  The current
573  	 * device generation number, and the index into the device peripheral
574  	 * driver list, and the status are also passed back.  Note that
575  	 * since we do everything in one pass, unlike the XPT_GDEVLIST ccb,
576  	 * we never return a status of CAM_GDEVLIST_LIST_CHANGED.  It is
577  	 * (or rather should be) impossible for the device peripheral driver
578  	 * list to change since we look at the whole thing in one pass, and
579  	 * we do it with lock protection.
580  	 *
581  	 */
582  	case CAMGETPASSTHRU: {
583  		union ccb *ccb;
584  		struct cam_periph *periph;
585  		struct periph_driver **p_drv;
586  		char   *name;
587  		u_int unit;
588  		int base_periph_found;
589  
590  		ccb = (union ccb *)addr;
591  		unit = ccb->cgdl.unit_number;
592  		name = ccb->cgdl.periph_name;
593  		base_periph_found = 0;
594  
595  		/*
596  		 * Sanity check -- make sure we don't get a null peripheral
597  		 * driver name.
598  		 */
599  		if (*ccb->cgdl.periph_name == '\0') {
600  			error = EINVAL;
601  			break;
602  		}
603  
604  		/* Keep the list from changing while we traverse it */
605  		xpt_lock_buses();
606  
607  		/* first find our driver in the list of drivers */
608  		for (p_drv = periph_drivers; *p_drv != NULL; p_drv++)
609  			if (strcmp((*p_drv)->driver_name, name) == 0)
610  				break;
611  
612  		if (*p_drv == NULL) {
613  			xpt_unlock_buses();
614  			ccb->ccb_h.status = CAM_REQ_CMP_ERR;
615  			ccb->cgdl.status = CAM_GDEVLIST_ERROR;
616  			*ccb->cgdl.periph_name = '\0';
617  			ccb->cgdl.unit_number = 0;
618  			error = ENOENT;
619  			break;
620  		}
621  
622  		/*
623  		 * Run through every peripheral instance of this driver
624  		 * and check to see whether it matches the unit passed
625  		 * in by the user.  If it does, get out of the loops and
626  		 * find the passthrough driver associated with that
627  		 * peripheral driver.
628  		 */
629  		for (periph = TAILQ_FIRST(&(*p_drv)->units); periph != NULL;
630  		     periph = TAILQ_NEXT(periph, unit_links)) {
631  
632  			if (periph->unit_number == unit)
633  				break;
634  		}
635  		/*
636  		 * If we found the peripheral driver that the user passed
637  		 * in, go through all of the peripheral drivers for that
638  		 * particular device and look for a passthrough driver.
639  		 */
640  		if (periph != NULL) {
641  			struct cam_ed *device;
642  			int i;
643  
644  			base_periph_found = 1;
645  			device = periph->path->device;
646  			for (i = 0, periph = SLIST_FIRST(&device->periphs);
647  			     periph != NULL;
648  			     periph = SLIST_NEXT(periph, periph_links), i++) {
649  				/*
650  				 * Check to see whether we have a
651  				 * passthrough device or not.
652  				 */
653  				if (strcmp(periph->periph_name, "pass") == 0) {
654  					/*
655  					 * Fill in the getdevlist fields.
656  					 */
657  					strcpy(ccb->cgdl.periph_name,
658  					       periph->periph_name);
659  					ccb->cgdl.unit_number =
660  						periph->unit_number;
661  					if (SLIST_NEXT(periph, periph_links))
662  						ccb->cgdl.status =
663  							CAM_GDEVLIST_MORE_DEVS;
664  					else
665  						ccb->cgdl.status =
666  						       CAM_GDEVLIST_LAST_DEVICE;
667  					ccb->cgdl.generation =
668  						device->generation;
669  					ccb->cgdl.index = i;
670  					/*
671  					 * Fill in some CCB header fields
672  					 * that the user may want.
673  					 */
674  					ccb->ccb_h.path_id =
675  						periph->path->bus->path_id;
676  					ccb->ccb_h.target_id =
677  						periph->path->target->target_id;
678  					ccb->ccb_h.target_lun =
679  						periph->path->device->lun_id;
680  					ccb->ccb_h.status = CAM_REQ_CMP;
681  					break;
682  				}
683  			}
684  		}
685  
686  		/*
687  		 * If the periph is null here, one of two things has
688  		 * happened.  The first possibility is that we couldn't
689  		 * find the unit number of the particular peripheral driver
690  		 * that the user is asking about.  e.g. the user asks for
691  		 * the passthrough driver for "da11".  We find the list of
692  		 * "da" peripherals all right, but there is no unit 11.
693  		 * The other possibility is that we went through the list
694  		 * of peripheral drivers attached to the device structure,
695  		 * but didn't find one with the name "pass".  Either way,
696  		 * we return ENOENT, since we couldn't find something.
697  		 */
698  		if (periph == NULL) {
699  			ccb->ccb_h.status = CAM_REQ_CMP_ERR;
700  			ccb->cgdl.status = CAM_GDEVLIST_ERROR;
701  			*ccb->cgdl.periph_name = '\0';
702  			ccb->cgdl.unit_number = 0;
703  			error = ENOENT;
704  			/*
705  			 * It is unfortunate that this is even necessary,
706  			 * but there are many, many clueless users out there.
707  			 * If this is true, the user is looking for the
708  			 * passthrough driver, but doesn't have one in his
709  			 * kernel.
710  			 */
711  			if (base_periph_found == 1) {
712  				printf("xptioctl: pass driver is not in the "
713  				       "kernel\n");
714  				printf("xptioctl: put \"device pass\" in "
715  				       "your kernel config file\n");
716  			}
717  		}
718  		xpt_unlock_buses();
719  		break;
720  		}
721  	default:
722  		error = ENOTTY;
723  		break;
724  	}
725  
726  	return(error);
727  }
728  
729  static int
730  cam_module_event_handler(module_t mod, int what, void *arg)
731  {
732  	int error;
733  
734  	switch (what) {
735  	case MOD_LOAD:
736  		if ((error = xpt_init(NULL)) != 0)
737  			return (error);
738  		break;
739  	case MOD_UNLOAD:
740  		return EBUSY;
741  	default:
742  		return EOPNOTSUPP;
743  	}
744  
745  	return 0;
746  }
747  
748  static void
749  xpt_rescan_done(struct cam_periph *periph, union ccb *done_ccb)
750  {
751  
752  	if (done_ccb->ccb_h.ppriv_ptr1 == NULL) {
753  		xpt_free_path(done_ccb->ccb_h.path);
754  		xpt_free_ccb(done_ccb);
755  	} else {
756  		done_ccb->ccb_h.cbfcnp = done_ccb->ccb_h.ppriv_ptr1;
757  		(*done_ccb->ccb_h.cbfcnp)(periph, done_ccb);
758  	}
759  	xpt_release_boot();
760  }
761  
762  /* thread to handle bus rescans */
763  static void
764  xpt_scanner_thread(void *dummy)
765  {
766  	union ccb	*ccb;
767  	struct cam_path	 path;
768  
769  	xpt_lock_buses();
770  	for (;;) {
771  		if (TAILQ_EMPTY(&xsoftc.ccb_scanq))
772  			msleep(&xsoftc.ccb_scanq, &xsoftc.xpt_topo_lock, PRIBIO,
773  			       "-", 0);
774  		if ((ccb = (union ccb *)TAILQ_FIRST(&xsoftc.ccb_scanq)) != NULL) {
775  			TAILQ_REMOVE(&xsoftc.ccb_scanq, &ccb->ccb_h, sim_links.tqe);
776  			xpt_unlock_buses();
777  
778  			/*
779  			 * Since lock can be dropped inside and path freed
780  			 * by completion callback even before return here,
781  			 * take our own path copy for reference.
782  			 */
783  			xpt_copy_path(&path, ccb->ccb_h.path);
784  			xpt_path_lock(&path);
785  			xpt_action(ccb);
786  			xpt_path_unlock(&path);
787  			xpt_release_path(&path);
788  
789  			xpt_lock_buses();
790  		}
791  	}
792  }
793  
794  void
795  xpt_rescan(union ccb *ccb)
796  {
797  	struct ccb_hdr *hdr;
798  
799  	/* Prepare request */
800  	if (ccb->ccb_h.path->target->target_id == CAM_TARGET_WILDCARD &&
801  	    ccb->ccb_h.path->device->lun_id == CAM_LUN_WILDCARD)
802  		ccb->ccb_h.func_code = XPT_SCAN_BUS;
803  	else if (ccb->ccb_h.path->target->target_id != CAM_TARGET_WILDCARD &&
804  	    ccb->ccb_h.path->device->lun_id == CAM_LUN_WILDCARD)
805  		ccb->ccb_h.func_code = XPT_SCAN_TGT;
806  	else if (ccb->ccb_h.path->target->target_id != CAM_TARGET_WILDCARD &&
807  	    ccb->ccb_h.path->device->lun_id != CAM_LUN_WILDCARD)
808  		ccb->ccb_h.func_code = XPT_SCAN_LUN;
809  	else {
810  		xpt_print(ccb->ccb_h.path, "illegal scan path\n");
811  		xpt_free_path(ccb->ccb_h.path);
812  		xpt_free_ccb(ccb);
813  		return;
814  	}
815  	ccb->ccb_h.ppriv_ptr1 = ccb->ccb_h.cbfcnp;
816  	ccb->ccb_h.cbfcnp = xpt_rescan_done;
817  	xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path, CAM_PRIORITY_XPT);
818  	/* Don't make duplicate entries for the same paths. */
819  	xpt_lock_buses();
820  	if (ccb->ccb_h.ppriv_ptr1 == NULL) {
821  		TAILQ_FOREACH(hdr, &xsoftc.ccb_scanq, sim_links.tqe) {
822  			if (xpt_path_comp(hdr->path, ccb->ccb_h.path) == 0) {
823  				wakeup(&xsoftc.ccb_scanq);
824  				xpt_unlock_buses();
825  				xpt_print(ccb->ccb_h.path, "rescan already queued\n");
826  				xpt_free_path(ccb->ccb_h.path);
827  				xpt_free_ccb(ccb);
828  				return;
829  			}
830  		}
831  	}
832  	TAILQ_INSERT_TAIL(&xsoftc.ccb_scanq, &ccb->ccb_h, sim_links.tqe);
833  	xsoftc.buses_to_config++;
834  	wakeup(&xsoftc.ccb_scanq);
835  	xpt_unlock_buses();
836  }
837  
838  /* Functions accessed by the peripheral drivers */
839  static int
840  xpt_init(void *dummy)
841  {
842  	struct cam_sim *xpt_sim;
843  	struct cam_path *path;
844  	struct cam_devq *devq;
845  	cam_status status;
846  	int error, i;
847  
848  	TAILQ_INIT(&xsoftc.xpt_busses);
849  	TAILQ_INIT(&xsoftc.ccb_scanq);
850  	STAILQ_INIT(&xsoftc.highpowerq);
851  	xsoftc.num_highpower = CAM_MAX_HIGHPOWER;
852  
853  	mtx_init(&xsoftc.xpt_lock, "XPT lock", NULL, MTX_DEF);
854  	mtx_init(&xsoftc.xpt_highpower_lock, "XPT highpower lock", NULL, MTX_DEF);
855  	xsoftc.xpt_taskq = taskqueue_create("CAM XPT task", M_WAITOK,
856  	    taskqueue_thread_enqueue, /*context*/&xsoftc.xpt_taskq);
857  
858  #ifdef CAM_BOOT_DELAY
859  	/*
860  	 * Override this value at compile time to assist our users
861  	 * who don't use loader to boot a kernel.
862  	 */
863  	xsoftc.boot_delay = CAM_BOOT_DELAY;
864  #endif
865  	/*
866  	 * The xpt layer is, itself, the equivelent of a SIM.
867  	 * Allow 16 ccbs in the ccb pool for it.  This should
868  	 * give decent parallelism when we probe busses and
869  	 * perform other XPT functions.
870  	 */
871  	devq = cam_simq_alloc(16);
872  	xpt_sim = cam_sim_alloc(xptaction,
873  				xptpoll,
874  				"xpt",
875  				/*softc*/NULL,
876  				/*unit*/0,
877  				/*mtx*/&xsoftc.xpt_lock,
878  				/*max_dev_transactions*/0,
879  				/*max_tagged_dev_transactions*/0,
880  				devq);
881  	if (xpt_sim == NULL)
882  		return (ENOMEM);
883  
884  	mtx_lock(&xsoftc.xpt_lock);
885  	if ((status = xpt_bus_register(xpt_sim, NULL, 0)) != CAM_SUCCESS) {
886  		mtx_unlock(&xsoftc.xpt_lock);
887  		printf("xpt_init: xpt_bus_register failed with status %#x,"
888  		       " failing attach\n", status);
889  		return (EINVAL);
890  	}
891  	mtx_unlock(&xsoftc.xpt_lock);
892  
893  	/*
894  	 * Looking at the XPT from the SIM layer, the XPT is
895  	 * the equivelent of a peripheral driver.  Allocate
896  	 * a peripheral driver entry for us.
897  	 */
898  	if ((status = xpt_create_path(&path, NULL, CAM_XPT_PATH_ID,
899  				      CAM_TARGET_WILDCARD,
900  				      CAM_LUN_WILDCARD)) != CAM_REQ_CMP) {
901  		printf("xpt_init: xpt_create_path failed with status %#x,"
902  		       " failing attach\n", status);
903  		return (EINVAL);
904  	}
905  	xpt_path_lock(path);
906  	cam_periph_alloc(xptregister, NULL, NULL, NULL, "xpt", CAM_PERIPH_BIO,
907  			 path, NULL, 0, xpt_sim);
908  	xpt_path_unlock(path);
909  	xpt_free_path(path);
910  
911  	if (cam_num_doneqs < 1)
912  		cam_num_doneqs = 1 + mp_ncpus / 6;
913  	else if (cam_num_doneqs > MAXCPU)
914  		cam_num_doneqs = MAXCPU;
915  	for (i = 0; i < cam_num_doneqs; i++) {
916  		mtx_init(&cam_doneqs[i].cam_doneq_mtx, "CAM doneq", NULL,
917  		    MTX_DEF);
918  		STAILQ_INIT(&cam_doneqs[i].cam_doneq);
919  		error = kproc_kthread_add(xpt_done_td, &cam_doneqs[i],
920  		    &cam_proc, NULL, 0, 0, "cam", "doneq%d", i);
921  		if (error != 0) {
922  			cam_num_doneqs = i;
923  			break;
924  		}
925  	}
926  	if (cam_num_doneqs < 1) {
927  		printf("xpt_init: Cannot init completion queues "
928  		       "- failing attach\n");
929  		return (ENOMEM);
930  	}
931  	/*
932  	 * Register a callback for when interrupts are enabled.
933  	 */
934  	xsoftc.xpt_config_hook =
935  	    (struct intr_config_hook *)malloc(sizeof(struct intr_config_hook),
936  					      M_CAMXPT, M_NOWAIT | M_ZERO);
937  	if (xsoftc.xpt_config_hook == NULL) {
938  		printf("xpt_init: Cannot malloc config hook "
939  		       "- failing attach\n");
940  		return (ENOMEM);
941  	}
942  	xsoftc.xpt_config_hook->ich_func = xpt_config;
943  	if (config_intrhook_establish(xsoftc.xpt_config_hook) != 0) {
944  		free (xsoftc.xpt_config_hook, M_CAMXPT);
945  		printf("xpt_init: config_intrhook_establish failed "
946  		       "- failing attach\n");
947  	}
948  
949  	return (0);
950  }
951  
952  static cam_status
953  xptregister(struct cam_periph *periph, void *arg)
954  {
955  	struct cam_sim *xpt_sim;
956  
957  	if (periph == NULL) {
958  		printf("xptregister: periph was NULL!!\n");
959  		return(CAM_REQ_CMP_ERR);
960  	}
961  
962  	xpt_sim = (struct cam_sim *)arg;
963  	xpt_sim->softc = periph;
964  	xpt_periph = periph;
965  	periph->softc = NULL;
966  
967  	return(CAM_REQ_CMP);
968  }
969  
970  int32_t
971  xpt_add_periph(struct cam_periph *periph)
972  {
973  	struct cam_ed *device;
974  	int32_t	 status;
975  
976  	TASK_INIT(&periph->periph_run_task, 0, xpt_run_allocq_task, periph);
977  	device = periph->path->device;
978  	status = CAM_REQ_CMP;
979  	if (device != NULL) {
980  		mtx_lock(&device->target->bus->eb_mtx);
981  		device->generation++;
982  		SLIST_INSERT_HEAD(&device->periphs, periph, periph_links);
983  		mtx_unlock(&device->target->bus->eb_mtx);
984  		atomic_add_32(&xsoftc.xpt_generation, 1);
985  	}
986  
987  	return (status);
988  }
989  
990  void
991  xpt_remove_periph(struct cam_periph *periph)
992  {
993  	struct cam_ed *device;
994  
995  	device = periph->path->device;
996  	if (device != NULL) {
997  		mtx_lock(&device->target->bus->eb_mtx);
998  		device->generation++;
999  		SLIST_REMOVE(&device->periphs, periph, cam_periph, periph_links);
1000  		mtx_unlock(&device->target->bus->eb_mtx);
1001  		atomic_add_32(&xsoftc.xpt_generation, 1);
1002  	}
1003  }
1004  
1005  
1006  void
1007  xpt_announce_periph(struct cam_periph *periph, char *announce_string)
1008  {
1009  	struct	cam_path *path = periph->path;
1010  
1011  	cam_periph_assert(periph, MA_OWNED);
1012  	periph->flags |= CAM_PERIPH_ANNOUNCED;
1013  
1014  	printf("%s%d at %s%d bus %d scbus%d target %d lun %jx\n",
1015  	       periph->periph_name, periph->unit_number,
1016  	       path->bus->sim->sim_name,
1017  	       path->bus->sim->unit_number,
1018  	       path->bus->sim->bus_id,
1019  	       path->bus->path_id,
1020  	       path->target->target_id,
1021  	       (uintmax_t)path->device->lun_id);
1022  	printf("%s%d: ", periph->periph_name, periph->unit_number);
1023  	if (path->device->protocol == PROTO_SCSI)
1024  		scsi_print_inquiry(&path->device->inq_data);
1025  	else if (path->device->protocol == PROTO_ATA ||
1026  	    path->device->protocol == PROTO_SATAPM)
1027  		ata_print_ident(&path->device->ident_data);
1028  	else if (path->device->protocol == PROTO_SEMB)
1029  		semb_print_ident(
1030  		    (struct sep_identify_data *)&path->device->ident_data);
1031  	else
1032  		printf("Unknown protocol device\n");
1033  	if (path->device->serial_num_len > 0) {
1034  		/* Don't wrap the screen  - print only the first 60 chars */
1035  		printf("%s%d: Serial Number %.60s\n", periph->periph_name,
1036  		       periph->unit_number, path->device->serial_num);
1037  	}
1038  	/* Announce transport details. */
1039  	(*(path->bus->xport->announce))(periph);
1040  	/* Announce command queueing. */
1041  	if (path->device->inq_flags & SID_CmdQue
1042  	 || path->device->flags & CAM_DEV_TAG_AFTER_COUNT) {
1043  		printf("%s%d: Command Queueing enabled\n",
1044  		       periph->periph_name, periph->unit_number);
1045  	}
1046  	/* Announce caller's details if they've passed in. */
1047  	if (announce_string != NULL)
1048  		printf("%s%d: %s\n", periph->periph_name,
1049  		       periph->unit_number, announce_string);
1050  }
1051  
1052  void
1053  xpt_announce_quirks(struct cam_periph *periph, int quirks, char *bit_string)
1054  {
1055  	if (quirks != 0) {
1056  		printf("%s%d: quirks=0x%b\n", periph->periph_name,
1057  		    periph->unit_number, quirks, bit_string);
1058  	}
1059  }
1060  
1061  void
1062  xpt_denounce_periph(struct cam_periph *periph)
1063  {
1064  	struct	cam_path *path = periph->path;
1065  
1066  	cam_periph_assert(periph, MA_OWNED);
1067  	printf("%s%d at %s%d bus %d scbus%d target %d lun %jx\n",
1068  	       periph->periph_name, periph->unit_number,
1069  	       path->bus->sim->sim_name,
1070  	       path->bus->sim->unit_number,
1071  	       path->bus->sim->bus_id,
1072  	       path->bus->path_id,
1073  	       path->target->target_id,
1074  	       (uintmax_t)path->device->lun_id);
1075  	printf("%s%d: ", periph->periph_name, periph->unit_number);
1076  	if (path->device->protocol == PROTO_SCSI)
1077  		scsi_print_inquiry_short(&path->device->inq_data);
1078  	else if (path->device->protocol == PROTO_ATA ||
1079  	    path->device->protocol == PROTO_SATAPM)
1080  		ata_print_ident_short(&path->device->ident_data);
1081  	else if (path->device->protocol == PROTO_SEMB)
1082  		semb_print_ident_short(
1083  		    (struct sep_identify_data *)&path->device->ident_data);
1084  	else
1085  		printf("Unknown protocol device");
1086  	if (path->device->serial_num_len > 0)
1087  		printf(" s/n %.60s", path->device->serial_num);
1088  	printf(" detached\n");
1089  }
1090  
1091  
1092  int
1093  xpt_getattr(char *buf, size_t len, const char *attr, struct cam_path *path)
1094  {
1095  	int ret = -1, l;
1096  	struct ccb_dev_advinfo cdai;
1097  	struct scsi_vpd_id_descriptor *idd;
1098  
1099  	xpt_path_assert(path, MA_OWNED);
1100  
1101  	memset(&cdai, 0, sizeof(cdai));
1102  	xpt_setup_ccb(&cdai.ccb_h, path, CAM_PRIORITY_NORMAL);
1103  	cdai.ccb_h.func_code = XPT_DEV_ADVINFO;
1104  	cdai.bufsiz = len;
1105  
1106  	if (!strcmp(attr, "GEOM::ident"))
1107  		cdai.buftype = CDAI_TYPE_SERIAL_NUM;
1108  	else if (!strcmp(attr, "GEOM::physpath"))
1109  		cdai.buftype = CDAI_TYPE_PHYS_PATH;
1110  	else if (strcmp(attr, "GEOM::lunid") == 0 ||
1111  		 strcmp(attr, "GEOM::lunname") == 0) {
1112  		cdai.buftype = CDAI_TYPE_SCSI_DEVID;
1113  		cdai.bufsiz = CAM_SCSI_DEVID_MAXLEN;
1114  	} else
1115  		goto out;
1116  
1117  	cdai.buf = malloc(cdai.bufsiz, M_CAMXPT, M_NOWAIT|M_ZERO);
1118  	if (cdai.buf == NULL) {
1119  		ret = ENOMEM;
1120  		goto out;
1121  	}
1122  	xpt_action((union ccb *)&cdai); /* can only be synchronous */
1123  	if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0)
1124  		cam_release_devq(cdai.ccb_h.path, 0, 0, 0, FALSE);
1125  	if (cdai.provsiz == 0)
1126  		goto out;
1127  	if (cdai.buftype == CDAI_TYPE_SCSI_DEVID) {
1128  		if (strcmp(attr, "GEOM::lunid") == 0) {
1129  			idd = scsi_get_devid((struct scsi_vpd_device_id *)cdai.buf,
1130  			    cdai.provsiz, scsi_devid_is_lun_naa);
1131  			if (idd == NULL)
1132  				idd = scsi_get_devid((struct scsi_vpd_device_id *)cdai.buf,
1133  				    cdai.provsiz, scsi_devid_is_lun_eui64);
1134  		} else
1135  			idd = NULL;
1136  		if (idd == NULL)
1137  			idd = scsi_get_devid((struct scsi_vpd_device_id *)cdai.buf,
1138  			    cdai.provsiz, scsi_devid_is_lun_t10);
1139  		if (idd == NULL)
1140  			idd = scsi_get_devid((struct scsi_vpd_device_id *)cdai.buf,
1141  			    cdai.provsiz, scsi_devid_is_lun_name);
1142  		if (idd == NULL)
1143  			goto out;
1144  		ret = 0;
1145  		if ((idd->proto_codeset & SVPD_ID_CODESET_MASK) == SVPD_ID_CODESET_ASCII) {
1146  			if (idd->length < len) {
1147  				for (l = 0; l < idd->length; l++)
1148  					buf[l] = idd->identifier[l] ?
1149  					    idd->identifier[l] : ' ';
1150  				buf[l] = 0;
1151  			} else
1152  				ret = EFAULT;
1153  		} else if ((idd->proto_codeset & SVPD_ID_CODESET_MASK) == SVPD_ID_CODESET_UTF8) {
1154  			l = strnlen(idd->identifier, idd->length);
1155  			if (l < len) {
1156  				bcopy(idd->identifier, buf, l);
1157  				buf[l] = 0;
1158  			} else
1159  				ret = EFAULT;
1160  		} else {
1161  			if (idd->length * 2 < len) {
1162  				for (l = 0; l < idd->length; l++)
1163  					sprintf(buf + l * 2, "%02x",
1164  					    idd->identifier[l]);
1165  			} else
1166  				ret = EFAULT;
1167  		}
1168  	} else {
1169  		ret = 0;
1170  		if (strlcpy(buf, cdai.buf, len) >= len)
1171  			ret = EFAULT;
1172  	}
1173  
1174  out:
1175  	if (cdai.buf != NULL)
1176  		free(cdai.buf, M_CAMXPT);
1177  	return ret;
1178  }
1179  
1180  static dev_match_ret
1181  xptbusmatch(struct dev_match_pattern *patterns, u_int num_patterns,
1182  	    struct cam_eb *bus)
1183  {
1184  	dev_match_ret retval;
1185  	int i;
1186  
1187  	retval = DM_RET_NONE;
1188  
1189  	/*
1190  	 * If we aren't given something to match against, that's an error.
1191  	 */
1192  	if (bus == NULL)
1193  		return(DM_RET_ERROR);
1194  
1195  	/*
1196  	 * If there are no match entries, then this bus matches no
1197  	 * matter what.
1198  	 */
1199  	if ((patterns == NULL) || (num_patterns == 0))
1200  		return(DM_RET_DESCEND | DM_RET_COPY);
1201  
1202  	for (i = 0; i < num_patterns; i++) {
1203  		struct bus_match_pattern *cur_pattern;
1204  
1205  		/*
1206  		 * If the pattern in question isn't for a bus node, we
1207  		 * aren't interested.  However, we do indicate to the
1208  		 * calling routine that we should continue descending the
1209  		 * tree, since the user wants to match against lower-level
1210  		 * EDT elements.
1211  		 */
1212  		if (patterns[i].type != DEV_MATCH_BUS) {
1213  			if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
1214  				retval |= DM_RET_DESCEND;
1215  			continue;
1216  		}
1217  
1218  		cur_pattern = &patterns[i].pattern.bus_pattern;
1219  
1220  		/*
1221  		 * If they want to match any bus node, we give them any
1222  		 * device node.
1223  		 */
1224  		if (cur_pattern->flags == BUS_MATCH_ANY) {
1225  			/* set the copy flag */
1226  			retval |= DM_RET_COPY;
1227  
1228  			/*
1229  			 * If we've already decided on an action, go ahead
1230  			 * and return.
1231  			 */
1232  			if ((retval & DM_RET_ACTION_MASK) != DM_RET_NONE)
1233  				return(retval);
1234  		}
1235  
1236  		/*
1237  		 * Not sure why someone would do this...
1238  		 */
1239  		if (cur_pattern->flags == BUS_MATCH_NONE)
1240  			continue;
1241  
1242  		if (((cur_pattern->flags & BUS_MATCH_PATH) != 0)
1243  		 && (cur_pattern->path_id != bus->path_id))
1244  			continue;
1245  
1246  		if (((cur_pattern->flags & BUS_MATCH_BUS_ID) != 0)
1247  		 && (cur_pattern->bus_id != bus->sim->bus_id))
1248  			continue;
1249  
1250  		if (((cur_pattern->flags & BUS_MATCH_UNIT) != 0)
1251  		 && (cur_pattern->unit_number != bus->sim->unit_number))
1252  			continue;
1253  
1254  		if (((cur_pattern->flags & BUS_MATCH_NAME) != 0)
1255  		 && (strncmp(cur_pattern->dev_name, bus->sim->sim_name,
1256  			     DEV_IDLEN) != 0))
1257  			continue;
1258  
1259  		/*
1260  		 * If we get to this point, the user definitely wants
1261  		 * information on this bus.  So tell the caller to copy the
1262  		 * data out.
1263  		 */
1264  		retval |= DM_RET_COPY;
1265  
1266  		/*
1267  		 * If the return action has been set to descend, then we
1268  		 * know that we've already seen a non-bus matching
1269  		 * expression, therefore we need to further descend the tree.
1270  		 * This won't change by continuing around the loop, so we
1271  		 * go ahead and return.  If we haven't seen a non-bus
1272  		 * matching expression, we keep going around the loop until
1273  		 * we exhaust the matching expressions.  We'll set the stop
1274  		 * flag once we fall out of the loop.
1275  		 */
1276  		if ((retval & DM_RET_ACTION_MASK) == DM_RET_DESCEND)
1277  			return(retval);
1278  	}
1279  
1280  	/*
1281  	 * If the return action hasn't been set to descend yet, that means
1282  	 * we haven't seen anything other than bus matching patterns.  So
1283  	 * tell the caller to stop descending the tree -- the user doesn't
1284  	 * want to match against lower level tree elements.
1285  	 */
1286  	if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
1287  		retval |= DM_RET_STOP;
1288  
1289  	return(retval);
1290  }
1291  
1292  static dev_match_ret
1293  xptdevicematch(struct dev_match_pattern *patterns, u_int num_patterns,
1294  	       struct cam_ed *device)
1295  {
1296  	dev_match_ret retval;
1297  	int i;
1298  
1299  	retval = DM_RET_NONE;
1300  
1301  	/*
1302  	 * If we aren't given something to match against, that's an error.
1303  	 */
1304  	if (device == NULL)
1305  		return(DM_RET_ERROR);
1306  
1307  	/*
1308  	 * If there are no match entries, then this device matches no
1309  	 * matter what.
1310  	 */
1311  	if ((patterns == NULL) || (num_patterns == 0))
1312  		return(DM_RET_DESCEND | DM_RET_COPY);
1313  
1314  	for (i = 0; i < num_patterns; i++) {
1315  		struct device_match_pattern *cur_pattern;
1316  		struct scsi_vpd_device_id *device_id_page;
1317  
1318  		/*
1319  		 * If the pattern in question isn't for a device node, we
1320  		 * aren't interested.
1321  		 */
1322  		if (patterns[i].type != DEV_MATCH_DEVICE) {
1323  			if ((patterns[i].type == DEV_MATCH_PERIPH)
1324  			 && ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE))
1325  				retval |= DM_RET_DESCEND;
1326  			continue;
1327  		}
1328  
1329  		cur_pattern = &patterns[i].pattern.device_pattern;
1330  
1331  		/* Error out if mutually exclusive options are specified. */
1332  		if ((cur_pattern->flags & (DEV_MATCH_INQUIRY|DEV_MATCH_DEVID))
1333  		 == (DEV_MATCH_INQUIRY|DEV_MATCH_DEVID))
1334  			return(DM_RET_ERROR);
1335  
1336  		/*
1337  		 * If they want to match any device node, we give them any
1338  		 * device node.
1339  		 */
1340  		if (cur_pattern->flags == DEV_MATCH_ANY)
1341  			goto copy_dev_node;
1342  
1343  		/*
1344  		 * Not sure why someone would do this...
1345  		 */
1346  		if (cur_pattern->flags == DEV_MATCH_NONE)
1347  			continue;
1348  
1349  		if (((cur_pattern->flags & DEV_MATCH_PATH) != 0)
1350  		 && (cur_pattern->path_id != device->target->bus->path_id))
1351  			continue;
1352  
1353  		if (((cur_pattern->flags & DEV_MATCH_TARGET) != 0)
1354  		 && (cur_pattern->target_id != device->target->target_id))
1355  			continue;
1356  
1357  		if (((cur_pattern->flags & DEV_MATCH_LUN) != 0)
1358  		 && (cur_pattern->target_lun != device->lun_id))
1359  			continue;
1360  
1361  		if (((cur_pattern->flags & DEV_MATCH_INQUIRY) != 0)
1362  		 && (cam_quirkmatch((caddr_t)&device->inq_data,
1363  				    (caddr_t)&cur_pattern->data.inq_pat,
1364  				    1, sizeof(cur_pattern->data.inq_pat),
1365  				    scsi_static_inquiry_match) == NULL))
1366  			continue;
1367  
1368  		device_id_page = (struct scsi_vpd_device_id *)device->device_id;
1369  		if (((cur_pattern->flags & DEV_MATCH_DEVID) != 0)
1370  		 && (device->device_id_len < SVPD_DEVICE_ID_HDR_LEN
1371  		  || scsi_devid_match((uint8_t *)device_id_page->desc_list,
1372  				      device->device_id_len
1373  				    - SVPD_DEVICE_ID_HDR_LEN,
1374  				      cur_pattern->data.devid_pat.id,
1375  				      cur_pattern->data.devid_pat.id_len) != 0))
1376  			continue;
1377  
1378  copy_dev_node:
1379  		/*
1380  		 * If we get to this point, the user definitely wants
1381  		 * information on this device.  So tell the caller to copy
1382  		 * the data out.
1383  		 */
1384  		retval |= DM_RET_COPY;
1385  
1386  		/*
1387  		 * If the return action has been set to descend, then we
1388  		 * know that we've already seen a peripheral matching
1389  		 * expression, therefore we need to further descend the tree.
1390  		 * This won't change by continuing around the loop, so we
1391  		 * go ahead and return.  If we haven't seen a peripheral
1392  		 * matching expression, we keep going around the loop until
1393  		 * we exhaust the matching expressions.  We'll set the stop
1394  		 * flag once we fall out of the loop.
1395  		 */
1396  		if ((retval & DM_RET_ACTION_MASK) == DM_RET_DESCEND)
1397  			return(retval);
1398  	}
1399  
1400  	/*
1401  	 * If the return action hasn't been set to descend yet, that means
1402  	 * we haven't seen any peripheral matching patterns.  So tell the
1403  	 * caller to stop descending the tree -- the user doesn't want to
1404  	 * match against lower level tree elements.
1405  	 */
1406  	if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
1407  		retval |= DM_RET_STOP;
1408  
1409  	return(retval);
1410  }
1411  
1412  /*
1413   * Match a single peripheral against any number of match patterns.
1414   */
1415  static dev_match_ret
1416  xptperiphmatch(struct dev_match_pattern *patterns, u_int num_patterns,
1417  	       struct cam_periph *periph)
1418  {
1419  	dev_match_ret retval;
1420  	int i;
1421  
1422  	/*
1423  	 * If we aren't given something to match against, that's an error.
1424  	 */
1425  	if (periph == NULL)
1426  		return(DM_RET_ERROR);
1427  
1428  	/*
1429  	 * If there are no match entries, then this peripheral matches no
1430  	 * matter what.
1431  	 */
1432  	if ((patterns == NULL) || (num_patterns == 0))
1433  		return(DM_RET_STOP | DM_RET_COPY);
1434  
1435  	/*
1436  	 * There aren't any nodes below a peripheral node, so there's no
1437  	 * reason to descend the tree any further.
1438  	 */
1439  	retval = DM_RET_STOP;
1440  
1441  	for (i = 0; i < num_patterns; i++) {
1442  		struct periph_match_pattern *cur_pattern;
1443  
1444  		/*
1445  		 * If the pattern in question isn't for a peripheral, we
1446  		 * aren't interested.
1447  		 */
1448  		if (patterns[i].type != DEV_MATCH_PERIPH)
1449  			continue;
1450  
1451  		cur_pattern = &patterns[i].pattern.periph_pattern;
1452  
1453  		/*
1454  		 * If they want to match on anything, then we will do so.
1455  		 */
1456  		if (cur_pattern->flags == PERIPH_MATCH_ANY) {
1457  			/* set the copy flag */
1458  			retval |= DM_RET_COPY;
1459  
1460  			/*
1461  			 * We've already set the return action to stop,
1462  			 * since there are no nodes below peripherals in
1463  			 * the tree.
1464  			 */
1465  			return(retval);
1466  		}
1467  
1468  		/*
1469  		 * Not sure why someone would do this...
1470  		 */
1471  		if (cur_pattern->flags == PERIPH_MATCH_NONE)
1472  			continue;
1473  
1474  		if (((cur_pattern->flags & PERIPH_MATCH_PATH) != 0)
1475  		 && (cur_pattern->path_id != periph->path->bus->path_id))
1476  			continue;
1477  
1478  		/*
1479  		 * For the target and lun id's, we have to make sure the
1480  		 * target and lun pointers aren't NULL.  The xpt peripheral
1481  		 * has a wildcard target and device.
1482  		 */
1483  		if (((cur_pattern->flags & PERIPH_MATCH_TARGET) != 0)
1484  		 && ((periph->path->target == NULL)
1485  		 ||(cur_pattern->target_id != periph->path->target->target_id)))
1486  			continue;
1487  
1488  		if (((cur_pattern->flags & PERIPH_MATCH_LUN) != 0)
1489  		 && ((periph->path->device == NULL)
1490  		 || (cur_pattern->target_lun != periph->path->device->lun_id)))
1491  			continue;
1492  
1493  		if (((cur_pattern->flags & PERIPH_MATCH_UNIT) != 0)
1494  		 && (cur_pattern->unit_number != periph->unit_number))
1495  			continue;
1496  
1497  		if (((cur_pattern->flags & PERIPH_MATCH_NAME) != 0)
1498  		 && (strncmp(cur_pattern->periph_name, periph->periph_name,
1499  			     DEV_IDLEN) != 0))
1500  			continue;
1501  
1502  		/*
1503  		 * If we get to this point, the user definitely wants
1504  		 * information on this peripheral.  So tell the caller to
1505  		 * copy the data out.
1506  		 */
1507  		retval |= DM_RET_COPY;
1508  
1509  		/*
1510  		 * The return action has already been set to stop, since
1511  		 * peripherals don't have any nodes below them in the EDT.
1512  		 */
1513  		return(retval);
1514  	}
1515  
1516  	/*
1517  	 * If we get to this point, the peripheral that was passed in
1518  	 * doesn't match any of the patterns.
1519  	 */
1520  	return(retval);
1521  }
1522  
1523  static int
1524  xptedtbusfunc(struct cam_eb *bus, void *arg)
1525  {
1526  	struct ccb_dev_match *cdm;
1527  	struct cam_et *target;
1528  	dev_match_ret retval;
1529  
1530  	cdm = (struct ccb_dev_match *)arg;
1531  
1532  	/*
1533  	 * If our position is for something deeper in the tree, that means
1534  	 * that we've already seen this node.  So, we keep going down.
1535  	 */
1536  	if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
1537  	 && (cdm->pos.cookie.bus == bus)
1538  	 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
1539  	 && (cdm->pos.cookie.target != NULL))
1540  		retval = DM_RET_DESCEND;
1541  	else
1542  		retval = xptbusmatch(cdm->patterns, cdm->num_patterns, bus);
1543  
1544  	/*
1545  	 * If we got an error, bail out of the search.
1546  	 */
1547  	if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
1548  		cdm->status = CAM_DEV_MATCH_ERROR;
1549  		return(0);
1550  	}
1551  
1552  	/*
1553  	 * If the copy flag is set, copy this bus out.
1554  	 */
1555  	if (retval & DM_RET_COPY) {
1556  		int spaceleft, j;
1557  
1558  		spaceleft = cdm->match_buf_len - (cdm->num_matches *
1559  			sizeof(struct dev_match_result));
1560  
1561  		/*
1562  		 * If we don't have enough space to put in another
1563  		 * match result, save our position and tell the
1564  		 * user there are more devices to check.
1565  		 */
1566  		if (spaceleft < sizeof(struct dev_match_result)) {
1567  			bzero(&cdm->pos, sizeof(cdm->pos));
1568  			cdm->pos.position_type =
1569  				CAM_DEV_POS_EDT | CAM_DEV_POS_BUS;
1570  
1571  			cdm->pos.cookie.bus = bus;
1572  			cdm->pos.generations[CAM_BUS_GENERATION]=
1573  				xsoftc.bus_generation;
1574  			cdm->status = CAM_DEV_MATCH_MORE;
1575  			return(0);
1576  		}
1577  		j = cdm->num_matches;
1578  		cdm->num_matches++;
1579  		cdm->matches[j].type = DEV_MATCH_BUS;
1580  		cdm->matches[j].result.bus_result.path_id = bus->path_id;
1581  		cdm->matches[j].result.bus_result.bus_id = bus->sim->bus_id;
1582  		cdm->matches[j].result.bus_result.unit_number =
1583  			bus->sim->unit_number;
1584  		strncpy(cdm->matches[j].result.bus_result.dev_name,
1585  			bus->sim->sim_name, DEV_IDLEN);
1586  	}
1587  
1588  	/*
1589  	 * If the user is only interested in busses, there's no
1590  	 * reason to descend to the next level in the tree.
1591  	 */
1592  	if ((retval & DM_RET_ACTION_MASK) == DM_RET_STOP)
1593  		return(1);
1594  
1595  	/*
1596  	 * If there is a target generation recorded, check it to
1597  	 * make sure the target list hasn't changed.
1598  	 */
1599  	mtx_lock(&bus->eb_mtx);
1600  	if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
1601  	 && (cdm->pos.cookie.bus == bus)
1602  	 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
1603  	 && (cdm->pos.cookie.target != NULL)) {
1604  		if ((cdm->pos.generations[CAM_TARGET_GENERATION] !=
1605  		    bus->generation)) {
1606  			mtx_unlock(&bus->eb_mtx);
1607  			cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
1608  			return (0);
1609  		}
1610  		target = (struct cam_et *)cdm->pos.cookie.target;
1611  		target->refcount++;
1612  	} else
1613  		target = NULL;
1614  	mtx_unlock(&bus->eb_mtx);
1615  
1616  	return (xpttargettraverse(bus, target, xptedttargetfunc, arg));
1617  }
1618  
1619  static int
1620  xptedttargetfunc(struct cam_et *target, void *arg)
1621  {
1622  	struct ccb_dev_match *cdm;
1623  	struct cam_eb *bus;
1624  	struct cam_ed *device;
1625  
1626  	cdm = (struct ccb_dev_match *)arg;
1627  	bus = target->bus;
1628  
1629  	/*
1630  	 * If there is a device list generation recorded, check it to
1631  	 * make sure the device list hasn't changed.
1632  	 */
1633  	mtx_lock(&bus->eb_mtx);
1634  	if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
1635  	 && (cdm->pos.cookie.bus == bus)
1636  	 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
1637  	 && (cdm->pos.cookie.target == target)
1638  	 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
1639  	 && (cdm->pos.cookie.device != NULL)) {
1640  		if (cdm->pos.generations[CAM_DEV_GENERATION] !=
1641  		    target->generation) {
1642  			mtx_unlock(&bus->eb_mtx);
1643  			cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
1644  			return(0);
1645  		}
1646  		device = (struct cam_ed *)cdm->pos.cookie.device;
1647  		device->refcount++;
1648  	} else
1649  		device = NULL;
1650  	mtx_unlock(&bus->eb_mtx);
1651  
1652  	return (xptdevicetraverse(target, device, xptedtdevicefunc, arg));
1653  }
1654  
1655  static int
1656  xptedtdevicefunc(struct cam_ed *device, void *arg)
1657  {
1658  	struct cam_eb *bus;
1659  	struct cam_periph *periph;
1660  	struct ccb_dev_match *cdm;
1661  	dev_match_ret retval;
1662  
1663  	cdm = (struct ccb_dev_match *)arg;
1664  	bus = device->target->bus;
1665  
1666  	/*
1667  	 * If our position is for something deeper in the tree, that means
1668  	 * that we've already seen this node.  So, we keep going down.
1669  	 */
1670  	if ((cdm->pos.position_type & CAM_DEV_POS_DEVICE)
1671  	 && (cdm->pos.cookie.device == device)
1672  	 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
1673  	 && (cdm->pos.cookie.periph != NULL))
1674  		retval = DM_RET_DESCEND;
1675  	else
1676  		retval = xptdevicematch(cdm->patterns, cdm->num_patterns,
1677  					device);
1678  
1679  	if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
1680  		cdm->status = CAM_DEV_MATCH_ERROR;
1681  		return(0);
1682  	}
1683  
1684  	/*
1685  	 * If the copy flag is set, copy this device out.
1686  	 */
1687  	if (retval & DM_RET_COPY) {
1688  		int spaceleft, j;
1689  
1690  		spaceleft = cdm->match_buf_len - (cdm->num_matches *
1691  			sizeof(struct dev_match_result));
1692  
1693  		/*
1694  		 * If we don't have enough space to put in another
1695  		 * match result, save our position and tell the
1696  		 * user there are more devices to check.
1697  		 */
1698  		if (spaceleft < sizeof(struct dev_match_result)) {
1699  			bzero(&cdm->pos, sizeof(cdm->pos));
1700  			cdm->pos.position_type =
1701  				CAM_DEV_POS_EDT | CAM_DEV_POS_BUS |
1702  				CAM_DEV_POS_TARGET | CAM_DEV_POS_DEVICE;
1703  
1704  			cdm->pos.cookie.bus = device->target->bus;
1705  			cdm->pos.generations[CAM_BUS_GENERATION]=
1706  				xsoftc.bus_generation;
1707  			cdm->pos.cookie.target = device->target;
1708  			cdm->pos.generations[CAM_TARGET_GENERATION] =
1709  				device->target->bus->generation;
1710  			cdm->pos.cookie.device = device;
1711  			cdm->pos.generations[CAM_DEV_GENERATION] =
1712  				device->target->generation;
1713  			cdm->status = CAM_DEV_MATCH_MORE;
1714  			return(0);
1715  		}
1716  		j = cdm->num_matches;
1717  		cdm->num_matches++;
1718  		cdm->matches[j].type = DEV_MATCH_DEVICE;
1719  		cdm->matches[j].result.device_result.path_id =
1720  			device->target->bus->path_id;
1721  		cdm->matches[j].result.device_result.target_id =
1722  			device->target->target_id;
1723  		cdm->matches[j].result.device_result.target_lun =
1724  			device->lun_id;
1725  		cdm->matches[j].result.device_result.protocol =
1726  			device->protocol;
1727  		bcopy(&device->inq_data,
1728  		      &cdm->matches[j].result.device_result.inq_data,
1729  		      sizeof(struct scsi_inquiry_data));
1730  		bcopy(&device->ident_data,
1731  		      &cdm->matches[j].result.device_result.ident_data,
1732  		      sizeof(struct ata_params));
1733  
1734  		/* Let the user know whether this device is unconfigured */
1735  		if (device->flags & CAM_DEV_UNCONFIGURED)
1736  			cdm->matches[j].result.device_result.flags =
1737  				DEV_RESULT_UNCONFIGURED;
1738  		else
1739  			cdm->matches[j].result.device_result.flags =
1740  				DEV_RESULT_NOFLAG;
1741  	}
1742  
1743  	/*
1744  	 * If the user isn't interested in peripherals, don't descend
1745  	 * the tree any further.
1746  	 */
1747  	if ((retval & DM_RET_ACTION_MASK) == DM_RET_STOP)
1748  		return(1);
1749  
1750  	/*
1751  	 * If there is a peripheral list generation recorded, make sure
1752  	 * it hasn't changed.
1753  	 */
1754  	xpt_lock_buses();
1755  	mtx_lock(&bus->eb_mtx);
1756  	if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
1757  	 && (cdm->pos.cookie.bus == bus)
1758  	 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
1759  	 && (cdm->pos.cookie.target == device->target)
1760  	 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
1761  	 && (cdm->pos.cookie.device == device)
1762  	 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
1763  	 && (cdm->pos.cookie.periph != NULL)) {
1764  		if (cdm->pos.generations[CAM_PERIPH_GENERATION] !=
1765  		    device->generation) {
1766  			mtx_unlock(&bus->eb_mtx);
1767  			xpt_unlock_buses();
1768  			cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
1769  			return(0);
1770  		}
1771  		periph = (struct cam_periph *)cdm->pos.cookie.periph;
1772  		periph->refcount++;
1773  	} else
1774  		periph = NULL;
1775  	mtx_unlock(&bus->eb_mtx);
1776  	xpt_unlock_buses();
1777  
1778  	return (xptperiphtraverse(device, periph, xptedtperiphfunc, arg));
1779  }
1780  
1781  static int
1782  xptedtperiphfunc(struct cam_periph *periph, void *arg)
1783  {
1784  	struct ccb_dev_match *cdm;
1785  	dev_match_ret retval;
1786  
1787  	cdm = (struct ccb_dev_match *)arg;
1788  
1789  	retval = xptperiphmatch(cdm->patterns, cdm->num_patterns, periph);
1790  
1791  	if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
1792  		cdm->status = CAM_DEV_MATCH_ERROR;
1793  		return(0);
1794  	}
1795  
1796  	/*
1797  	 * If the copy flag is set, copy this peripheral out.
1798  	 */
1799  	if (retval & DM_RET_COPY) {
1800  		int spaceleft, j;
1801  
1802  		spaceleft = cdm->match_buf_len - (cdm->num_matches *
1803  			sizeof(struct dev_match_result));
1804  
1805  		/*
1806  		 * If we don't have enough space to put in another
1807  		 * match result, save our position and tell the
1808  		 * user there are more devices to check.
1809  		 */
1810  		if (spaceleft < sizeof(struct dev_match_result)) {
1811  			bzero(&cdm->pos, sizeof(cdm->pos));
1812  			cdm->pos.position_type =
1813  				CAM_DEV_POS_EDT | CAM_DEV_POS_BUS |
1814  				CAM_DEV_POS_TARGET | CAM_DEV_POS_DEVICE |
1815  				CAM_DEV_POS_PERIPH;
1816  
1817  			cdm->pos.cookie.bus = periph->path->bus;
1818  			cdm->pos.generations[CAM_BUS_GENERATION]=
1819  				xsoftc.bus_generation;
1820  			cdm->pos.cookie.target = periph->path->target;
1821  			cdm->pos.generations[CAM_TARGET_GENERATION] =
1822  				periph->path->bus->generation;
1823  			cdm->pos.cookie.device = periph->path->device;
1824  			cdm->pos.generations[CAM_DEV_GENERATION] =
1825  				periph->path->target->generation;
1826  			cdm->pos.cookie.periph = periph;
1827  			cdm->pos.generations[CAM_PERIPH_GENERATION] =
1828  				periph->path->device->generation;
1829  			cdm->status = CAM_DEV_MATCH_MORE;
1830  			return(0);
1831  		}
1832  
1833  		j = cdm->num_matches;
1834  		cdm->num_matches++;
1835  		cdm->matches[j].type = DEV_MATCH_PERIPH;
1836  		cdm->matches[j].result.periph_result.path_id =
1837  			periph->path->bus->path_id;
1838  		cdm->matches[j].result.periph_result.target_id =
1839  			periph->path->target->target_id;
1840  		cdm->matches[j].result.periph_result.target_lun =
1841  			periph->path->device->lun_id;
1842  		cdm->matches[j].result.periph_result.unit_number =
1843  			periph->unit_number;
1844  		strncpy(cdm->matches[j].result.periph_result.periph_name,
1845  			periph->periph_name, DEV_IDLEN);
1846  	}
1847  
1848  	return(1);
1849  }
1850  
1851  static int
1852  xptedtmatch(struct ccb_dev_match *cdm)
1853  {
1854  	struct cam_eb *bus;
1855  	int ret;
1856  
1857  	cdm->num_matches = 0;
1858  
1859  	/*
1860  	 * Check the bus list generation.  If it has changed, the user
1861  	 * needs to reset everything and start over.
1862  	 */
1863  	xpt_lock_buses();
1864  	if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
1865  	 && (cdm->pos.cookie.bus != NULL)) {
1866  		if (cdm->pos.generations[CAM_BUS_GENERATION] !=
1867  		    xsoftc.bus_generation) {
1868  			xpt_unlock_buses();
1869  			cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
1870  			return(0);
1871  		}
1872  		bus = (struct cam_eb *)cdm->pos.cookie.bus;
1873  		bus->refcount++;
1874  	} else
1875  		bus = NULL;
1876  	xpt_unlock_buses();
1877  
1878  	ret = xptbustraverse(bus, xptedtbusfunc, cdm);
1879  
1880  	/*
1881  	 * If we get back 0, that means that we had to stop before fully
1882  	 * traversing the EDT.  It also means that one of the subroutines
1883  	 * has set the status field to the proper value.  If we get back 1,
1884  	 * we've fully traversed the EDT and copied out any matching entries.
1885  	 */
1886  	if (ret == 1)
1887  		cdm->status = CAM_DEV_MATCH_LAST;
1888  
1889  	return(ret);
1890  }
1891  
1892  static int
1893  xptplistpdrvfunc(struct periph_driver **pdrv, void *arg)
1894  {
1895  	struct cam_periph *periph;
1896  	struct ccb_dev_match *cdm;
1897  
1898  	cdm = (struct ccb_dev_match *)arg;
1899  
1900  	xpt_lock_buses();
1901  	if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
1902  	 && (cdm->pos.cookie.pdrv == pdrv)
1903  	 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
1904  	 && (cdm->pos.cookie.periph != NULL)) {
1905  		if (cdm->pos.generations[CAM_PERIPH_GENERATION] !=
1906  		    (*pdrv)->generation) {
1907  			xpt_unlock_buses();
1908  			cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
1909  			return(0);
1910  		}
1911  		periph = (struct cam_periph *)cdm->pos.cookie.periph;
1912  		periph->refcount++;
1913  	} else
1914  		periph = NULL;
1915  	xpt_unlock_buses();
1916  
1917  	return (xptpdperiphtraverse(pdrv, periph, xptplistperiphfunc, arg));
1918  }
1919  
1920  static int
1921  xptplistperiphfunc(struct cam_periph *periph, void *arg)
1922  {
1923  	struct ccb_dev_match *cdm;
1924  	dev_match_ret retval;
1925  
1926  	cdm = (struct ccb_dev_match *)arg;
1927  
1928  	retval = xptperiphmatch(cdm->patterns, cdm->num_patterns, periph);
1929  
1930  	if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
1931  		cdm->status = CAM_DEV_MATCH_ERROR;
1932  		return(0);
1933  	}
1934  
1935  	/*
1936  	 * If the copy flag is set, copy this peripheral out.
1937  	 */
1938  	if (retval & DM_RET_COPY) {
1939  		int spaceleft, j;
1940  
1941  		spaceleft = cdm->match_buf_len - (cdm->num_matches *
1942  			sizeof(struct dev_match_result));
1943  
1944  		/*
1945  		 * If we don't have enough space to put in another
1946  		 * match result, save our position and tell the
1947  		 * user there are more devices to check.
1948  		 */
1949  		if (spaceleft < sizeof(struct dev_match_result)) {
1950  			struct periph_driver **pdrv;
1951  
1952  			pdrv = NULL;
1953  			bzero(&cdm->pos, sizeof(cdm->pos));
1954  			cdm->pos.position_type =
1955  				CAM_DEV_POS_PDRV | CAM_DEV_POS_PDPTR |
1956  				CAM_DEV_POS_PERIPH;
1957  
1958  			/*
1959  			 * This may look a bit non-sensical, but it is
1960  			 * actually quite logical.  There are very few
1961  			 * peripheral drivers, and bloating every peripheral
1962  			 * structure with a pointer back to its parent
1963  			 * peripheral driver linker set entry would cost
1964  			 * more in the long run than doing this quick lookup.
1965  			 */
1966  			for (pdrv = periph_drivers; *pdrv != NULL; pdrv++) {
1967  				if (strcmp((*pdrv)->driver_name,
1968  				    periph->periph_name) == 0)
1969  					break;
1970  			}
1971  
1972  			if (*pdrv == NULL) {
1973  				cdm->status = CAM_DEV_MATCH_ERROR;
1974  				return(0);
1975  			}
1976  
1977  			cdm->pos.cookie.pdrv = pdrv;
1978  			/*
1979  			 * The periph generation slot does double duty, as
1980  			 * does the periph pointer slot.  They are used for
1981  			 * both edt and pdrv lookups and positioning.
1982  			 */
1983  			cdm->pos.cookie.periph = periph;
1984  			cdm->pos.generations[CAM_PERIPH_GENERATION] =
1985  				(*pdrv)->generation;
1986  			cdm->status = CAM_DEV_MATCH_MORE;
1987  			return(0);
1988  		}
1989  
1990  		j = cdm->num_matches;
1991  		cdm->num_matches++;
1992  		cdm->matches[j].type = DEV_MATCH_PERIPH;
1993  		cdm->matches[j].result.periph_result.path_id =
1994  			periph->path->bus->path_id;
1995  
1996  		/*
1997  		 * The transport layer peripheral doesn't have a target or
1998  		 * lun.
1999  		 */
2000  		if (periph->path->target)
2001  			cdm->matches[j].result.periph_result.target_id =
2002  				periph->path->target->target_id;
2003  		else
2004  			cdm->matches[j].result.periph_result.target_id =
2005  				CAM_TARGET_WILDCARD;
2006  
2007  		if (periph->path->device)
2008  			cdm->matches[j].result.periph_result.target_lun =
2009  				periph->path->device->lun_id;
2010  		else
2011  			cdm->matches[j].result.periph_result.target_lun =
2012  				CAM_LUN_WILDCARD;
2013  
2014  		cdm->matches[j].result.periph_result.unit_number =
2015  			periph->unit_number;
2016  		strncpy(cdm->matches[j].result.periph_result.periph_name,
2017  			periph->periph_name, DEV_IDLEN);
2018  	}
2019  
2020  	return(1);
2021  }
2022  
2023  static int
2024  xptperiphlistmatch(struct ccb_dev_match *cdm)
2025  {
2026  	int ret;
2027  
2028  	cdm->num_matches = 0;
2029  
2030  	/*
2031  	 * At this point in the edt traversal function, we check the bus
2032  	 * list generation to make sure that no busses have been added or
2033  	 * removed since the user last sent a XPT_DEV_MATCH ccb through.
2034  	 * For the peripheral driver list traversal function, however, we
2035  	 * don't have to worry about new peripheral driver types coming or
2036  	 * going; they're in a linker set, and therefore can't change
2037  	 * without a recompile.
2038  	 */
2039  
2040  	if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
2041  	 && (cdm->pos.cookie.pdrv != NULL))
2042  		ret = xptpdrvtraverse(
2043  				(struct periph_driver **)cdm->pos.cookie.pdrv,
2044  				xptplistpdrvfunc, cdm);
2045  	else
2046  		ret = xptpdrvtraverse(NULL, xptplistpdrvfunc, cdm);
2047  
2048  	/*
2049  	 * If we get back 0, that means that we had to stop before fully
2050  	 * traversing the peripheral driver tree.  It also means that one of
2051  	 * the subroutines has set the status field to the proper value.  If
2052  	 * we get back 1, we've fully traversed the EDT and copied out any
2053  	 * matching entries.
2054  	 */
2055  	if (ret == 1)
2056  		cdm->status = CAM_DEV_MATCH_LAST;
2057  
2058  	return(ret);
2059  }
2060  
2061  static int
2062  xptbustraverse(struct cam_eb *start_bus, xpt_busfunc_t *tr_func, void *arg)
2063  {
2064  	struct cam_eb *bus, *next_bus;
2065  	int retval;
2066  
2067  	retval = 1;
2068  	if (start_bus)
2069  		bus = start_bus;
2070  	else {
2071  		xpt_lock_buses();
2072  		bus = TAILQ_FIRST(&xsoftc.xpt_busses);
2073  		if (bus == NULL) {
2074  			xpt_unlock_buses();
2075  			return (retval);
2076  		}
2077  		bus->refcount++;
2078  		xpt_unlock_buses();
2079  	}
2080  	for (; bus != NULL; bus = next_bus) {
2081  		retval = tr_func(bus, arg);
2082  		if (retval == 0) {
2083  			xpt_release_bus(bus);
2084  			break;
2085  		}
2086  		xpt_lock_buses();
2087  		next_bus = TAILQ_NEXT(bus, links);
2088  		if (next_bus)
2089  			next_bus->refcount++;
2090  		xpt_unlock_buses();
2091  		xpt_release_bus(bus);
2092  	}
2093  	return(retval);
2094  }
2095  
2096  static int
2097  xpttargettraverse(struct cam_eb *bus, struct cam_et *start_target,
2098  		  xpt_targetfunc_t *tr_func, void *arg)
2099  {
2100  	struct cam_et *target, *next_target;
2101  	int retval;
2102  
2103  	retval = 1;
2104  	if (start_target)
2105  		target = start_target;
2106  	else {
2107  		mtx_lock(&bus->eb_mtx);
2108  		target = TAILQ_FIRST(&bus->et_entries);
2109  		if (target == NULL) {
2110  			mtx_unlock(&bus->eb_mtx);
2111  			return (retval);
2112  		}
2113  		target->refcount++;
2114  		mtx_unlock(&bus->eb_mtx);
2115  	}
2116  	for (; target != NULL; target = next_target) {
2117  		retval = tr_func(target, arg);
2118  		if (retval == 0) {
2119  			xpt_release_target(target);
2120  			break;
2121  		}
2122  		mtx_lock(&bus->eb_mtx);
2123  		next_target = TAILQ_NEXT(target, links);
2124  		if (next_target)
2125  			next_target->refcount++;
2126  		mtx_unlock(&bus->eb_mtx);
2127  		xpt_release_target(target);
2128  	}
2129  	return(retval);
2130  }
2131  
2132  static int
2133  xptdevicetraverse(struct cam_et *target, struct cam_ed *start_device,
2134  		  xpt_devicefunc_t *tr_func, void *arg)
2135  {
2136  	struct cam_eb *bus;
2137  	struct cam_ed *device, *next_device;
2138  	int retval;
2139  
2140  	retval = 1;
2141  	bus = target->bus;
2142  	if (start_device)
2143  		device = start_device;
2144  	else {
2145  		mtx_lock(&bus->eb_mtx);
2146  		device = TAILQ_FIRST(&target->ed_entries);
2147  		if (device == NULL) {
2148  			mtx_unlock(&bus->eb_mtx);
2149  			return (retval);
2150  		}
2151  		device->refcount++;
2152  		mtx_unlock(&bus->eb_mtx);
2153  	}
2154  	for (; device != NULL; device = next_device) {
2155  		mtx_lock(&device->device_mtx);
2156  		retval = tr_func(device, arg);
2157  		mtx_unlock(&device->device_mtx);
2158  		if (retval == 0) {
2159  			xpt_release_device(device);
2160  			break;
2161  		}
2162  		mtx_lock(&bus->eb_mtx);
2163  		next_device = TAILQ_NEXT(device, links);
2164  		if (next_device)
2165  			next_device->refcount++;
2166  		mtx_unlock(&bus->eb_mtx);
2167  		xpt_release_device(device);
2168  	}
2169  	return(retval);
2170  }
2171  
2172  static int
2173  xptperiphtraverse(struct cam_ed *device, struct cam_periph *start_periph,
2174  		  xpt_periphfunc_t *tr_func, void *arg)
2175  {
2176  	struct cam_eb *bus;
2177  	struct cam_periph *periph, *next_periph;
2178  	int retval;
2179  
2180  	retval = 1;
2181  
2182  	bus = device->target->bus;
2183  	if (start_periph)
2184  		periph = start_periph;
2185  	else {
2186  		xpt_lock_buses();
2187  		mtx_lock(&bus->eb_mtx);
2188  		periph = SLIST_FIRST(&device->periphs);
2189  		while (periph != NULL && (periph->flags & CAM_PERIPH_FREE) != 0)
2190  			periph = SLIST_NEXT(periph, periph_links);
2191  		if (periph == NULL) {
2192  			mtx_unlock(&bus->eb_mtx);
2193  			xpt_unlock_buses();
2194  			return (retval);
2195  		}
2196  		periph->refcount++;
2197  		mtx_unlock(&bus->eb_mtx);
2198  		xpt_unlock_buses();
2199  	}
2200  	for (; periph != NULL; periph = next_periph) {
2201  		retval = tr_func(periph, arg);
2202  		if (retval == 0) {
2203  			cam_periph_release_locked(periph);
2204  			break;
2205  		}
2206  		xpt_lock_buses();
2207  		mtx_lock(&bus->eb_mtx);
2208  		next_periph = SLIST_NEXT(periph, periph_links);
2209  		while (next_periph != NULL &&
2210  		    (next_periph->flags & CAM_PERIPH_FREE) != 0)
2211  			next_periph = SLIST_NEXT(next_periph, periph_links);
2212  		if (next_periph)
2213  			next_periph->refcount++;
2214  		mtx_unlock(&bus->eb_mtx);
2215  		xpt_unlock_buses();
2216  		cam_periph_release_locked(periph);
2217  	}
2218  	return(retval);
2219  }
2220  
2221  static int
2222  xptpdrvtraverse(struct periph_driver **start_pdrv,
2223  		xpt_pdrvfunc_t *tr_func, void *arg)
2224  {
2225  	struct periph_driver **pdrv;
2226  	int retval;
2227  
2228  	retval = 1;
2229  
2230  	/*
2231  	 * We don't traverse the peripheral driver list like we do the
2232  	 * other lists, because it is a linker set, and therefore cannot be
2233  	 * changed during runtime.  If the peripheral driver list is ever
2234  	 * re-done to be something other than a linker set (i.e. it can
2235  	 * change while the system is running), the list traversal should
2236  	 * be modified to work like the other traversal functions.
2237  	 */
2238  	for (pdrv = (start_pdrv ? start_pdrv : periph_drivers);
2239  	     *pdrv != NULL; pdrv++) {
2240  		retval = tr_func(pdrv, arg);
2241  
2242  		if (retval == 0)
2243  			return(retval);
2244  	}
2245  
2246  	return(retval);
2247  }
2248  
2249  static int
2250  xptpdperiphtraverse(struct periph_driver **pdrv,
2251  		    struct cam_periph *start_periph,
2252  		    xpt_periphfunc_t *tr_func, void *arg)
2253  {
2254  	struct cam_periph *periph, *next_periph;
2255  	int retval;
2256  
2257  	retval = 1;
2258  
2259  	if (start_periph)
2260  		periph = start_periph;
2261  	else {
2262  		xpt_lock_buses();
2263  		periph = TAILQ_FIRST(&(*pdrv)->units);
2264  		while (periph != NULL && (periph->flags & CAM_PERIPH_FREE) != 0)
2265  			periph = TAILQ_NEXT(periph, unit_links);
2266  		if (periph == NULL) {
2267  			xpt_unlock_buses();
2268  			return (retval);
2269  		}
2270  		periph->refcount++;
2271  		xpt_unlock_buses();
2272  	}
2273  	for (; periph != NULL; periph = next_periph) {
2274  		cam_periph_lock(periph);
2275  		retval = tr_func(periph, arg);
2276  		cam_periph_unlock(periph);
2277  		if (retval == 0) {
2278  			cam_periph_release(periph);
2279  			break;
2280  		}
2281  		xpt_lock_buses();
2282  		next_periph = TAILQ_NEXT(periph, unit_links);
2283  		while (next_periph != NULL &&
2284  		    (next_periph->flags & CAM_PERIPH_FREE) != 0)
2285  			next_periph = TAILQ_NEXT(next_periph, unit_links);
2286  		if (next_periph)
2287  			next_periph->refcount++;
2288  		xpt_unlock_buses();
2289  		cam_periph_release(periph);
2290  	}
2291  	return(retval);
2292  }
2293  
2294  static int
2295  xptdefbusfunc(struct cam_eb *bus, void *arg)
2296  {
2297  	struct xpt_traverse_config *tr_config;
2298  
2299  	tr_config = (struct xpt_traverse_config *)arg;
2300  
2301  	if (tr_config->depth == XPT_DEPTH_BUS) {
2302  		xpt_busfunc_t *tr_func;
2303  
2304  		tr_func = (xpt_busfunc_t *)tr_config->tr_func;
2305  
2306  		return(tr_func(bus, tr_config->tr_arg));
2307  	} else
2308  		return(xpttargettraverse(bus, NULL, xptdeftargetfunc, arg));
2309  }
2310  
2311  static int
2312  xptdeftargetfunc(struct cam_et *target, void *arg)
2313  {
2314  	struct xpt_traverse_config *tr_config;
2315  
2316  	tr_config = (struct xpt_traverse_config *)arg;
2317  
2318  	if (tr_config->depth == XPT_DEPTH_TARGET) {
2319  		xpt_targetfunc_t *tr_func;
2320  
2321  		tr_func = (xpt_targetfunc_t *)tr_config->tr_func;
2322  
2323  		return(tr_func(target, tr_config->tr_arg));
2324  	} else
2325  		return(xptdevicetraverse(target, NULL, xptdefdevicefunc, arg));
2326  }
2327  
2328  static int
2329  xptdefdevicefunc(struct cam_ed *device, void *arg)
2330  {
2331  	struct xpt_traverse_config *tr_config;
2332  
2333  	tr_config = (struct xpt_traverse_config *)arg;
2334  
2335  	if (tr_config->depth == XPT_DEPTH_DEVICE) {
2336  		xpt_devicefunc_t *tr_func;
2337  
2338  		tr_func = (xpt_devicefunc_t *)tr_config->tr_func;
2339  
2340  		return(tr_func(device, tr_config->tr_arg));
2341  	} else
2342  		return(xptperiphtraverse(device, NULL, xptdefperiphfunc, arg));
2343  }
2344  
2345  static int
2346  xptdefperiphfunc(struct cam_periph *periph, void *arg)
2347  {
2348  	struct xpt_traverse_config *tr_config;
2349  	xpt_periphfunc_t *tr_func;
2350  
2351  	tr_config = (struct xpt_traverse_config *)arg;
2352  
2353  	tr_func = (xpt_periphfunc_t *)tr_config->tr_func;
2354  
2355  	/*
2356  	 * Unlike the other default functions, we don't check for depth
2357  	 * here.  The peripheral driver level is the last level in the EDT,
2358  	 * so if we're here, we should execute the function in question.
2359  	 */
2360  	return(tr_func(periph, tr_config->tr_arg));
2361  }
2362  
2363  /*
2364   * Execute the given function for every bus in the EDT.
2365   */
2366  static int
2367  xpt_for_all_busses(xpt_busfunc_t *tr_func, void *arg)
2368  {
2369  	struct xpt_traverse_config tr_config;
2370  
2371  	tr_config.depth = XPT_DEPTH_BUS;
2372  	tr_config.tr_func = tr_func;
2373  	tr_config.tr_arg = arg;
2374  
2375  	return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
2376  }
2377  
2378  /*
2379   * Execute the given function for every device in the EDT.
2380   */
2381  static int
2382  xpt_for_all_devices(xpt_devicefunc_t *tr_func, void *arg)
2383  {
2384  	struct xpt_traverse_config tr_config;
2385  
2386  	tr_config.depth = XPT_DEPTH_DEVICE;
2387  	tr_config.tr_func = tr_func;
2388  	tr_config.tr_arg = arg;
2389  
2390  	return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
2391  }
2392  
2393  static int
2394  xptsetasyncfunc(struct cam_ed *device, void *arg)
2395  {
2396  	struct cam_path path;
2397  	struct ccb_getdev cgd;
2398  	struct ccb_setasync *csa = (struct ccb_setasync *)arg;
2399  
2400  	/*
2401  	 * Don't report unconfigured devices (Wildcard devs,
2402  	 * devices only for target mode, device instances
2403  	 * that have been invalidated but are waiting for
2404  	 * their last reference count to be released).
2405  	 */
2406  	if ((device->flags & CAM_DEV_UNCONFIGURED) != 0)
2407  		return (1);
2408  
2409  	xpt_compile_path(&path,
2410  			 NULL,
2411  			 device->target->bus->path_id,
2412  			 device->target->target_id,
2413  			 device->lun_id);
2414  	xpt_setup_ccb(&cgd.ccb_h, &path, CAM_PRIORITY_NORMAL);
2415  	cgd.ccb_h.func_code = XPT_GDEV_TYPE;
2416  	xpt_action((union ccb *)&cgd);
2417  	csa->callback(csa->callback_arg,
2418  			    AC_FOUND_DEVICE,
2419  			    &path, &cgd);
2420  	xpt_release_path(&path);
2421  
2422  	return(1);
2423  }
2424  
2425  static int
2426  xptsetasyncbusfunc(struct cam_eb *bus, void *arg)
2427  {
2428  	struct cam_path path;
2429  	struct ccb_pathinq cpi;
2430  	struct ccb_setasync *csa = (struct ccb_setasync *)arg;
2431  
2432  	xpt_compile_path(&path, /*periph*/NULL,
2433  			 bus->path_id,
2434  			 CAM_TARGET_WILDCARD,
2435  			 CAM_LUN_WILDCARD);
2436  	xpt_path_lock(&path);
2437  	xpt_setup_ccb(&cpi.ccb_h, &path, CAM_PRIORITY_NORMAL);
2438  	cpi.ccb_h.func_code = XPT_PATH_INQ;
2439  	xpt_action((union ccb *)&cpi);
2440  	csa->callback(csa->callback_arg,
2441  			    AC_PATH_REGISTERED,
2442  			    &path, &cpi);
2443  	xpt_path_unlock(&path);
2444  	xpt_release_path(&path);
2445  
2446  	return(1);
2447  }
2448  
2449  void
2450  xpt_action(union ccb *start_ccb)
2451  {
2452  
2453  	CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xpt_action\n"));
2454  
2455  	start_ccb->ccb_h.status = CAM_REQ_INPROG;
2456  	(*(start_ccb->ccb_h.path->bus->xport->action))(start_ccb);
2457  }
2458  
2459  void
2460  xpt_action_default(union ccb *start_ccb)
2461  {
2462  	struct cam_path *path;
2463  	struct cam_sim *sim;
2464  	int lock;
2465  
2466  	path = start_ccb->ccb_h.path;
2467  	CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_action_default\n"));
2468  
2469  	switch (start_ccb->ccb_h.func_code) {
2470  	case XPT_SCSI_IO:
2471  	{
2472  		struct cam_ed *device;
2473  
2474  		/*
2475  		 * For the sake of compatibility with SCSI-1
2476  		 * devices that may not understand the identify
2477  		 * message, we include lun information in the
2478  		 * second byte of all commands.  SCSI-1 specifies
2479  		 * that luns are a 3 bit value and reserves only 3
2480  		 * bits for lun information in the CDB.  Later
2481  		 * revisions of the SCSI spec allow for more than 8
2482  		 * luns, but have deprecated lun information in the
2483  		 * CDB.  So, if the lun won't fit, we must omit.
2484  		 *
2485  		 * Also be aware that during initial probing for devices,
2486  		 * the inquiry information is unknown but initialized to 0.
2487  		 * This means that this code will be exercised while probing
2488  		 * devices with an ANSI revision greater than 2.
2489  		 */
2490  		device = path->device;
2491  		if (device->protocol_version <= SCSI_REV_2
2492  		 && start_ccb->ccb_h.target_lun < 8
2493  		 && (start_ccb->ccb_h.flags & CAM_CDB_POINTER) == 0) {
2494  
2495  			start_ccb->csio.cdb_io.cdb_bytes[1] |=
2496  			    start_ccb->ccb_h.target_lun << 5;
2497  		}
2498  		start_ccb->csio.scsi_status = SCSI_STATUS_OK;
2499  	}
2500  	/* FALLTHROUGH */
2501  	case XPT_TARGET_IO:
2502  	case XPT_CONT_TARGET_IO:
2503  		start_ccb->csio.sense_resid = 0;
2504  		start_ccb->csio.resid = 0;
2505  		/* FALLTHROUGH */
2506  	case XPT_ATA_IO:
2507  		if (start_ccb->ccb_h.func_code == XPT_ATA_IO)
2508  			start_ccb->ataio.resid = 0;
2509  		/* FALLTHROUGH */
2510  	case XPT_RESET_DEV:
2511  	case XPT_ENG_EXEC:
2512  	case XPT_SMP_IO:
2513  	{
2514  		struct cam_devq *devq;
2515  
2516  		devq = path->bus->sim->devq;
2517  		mtx_lock(&devq->send_mtx);
2518  		cam_ccbq_insert_ccb(&path->device->ccbq, start_ccb);
2519  		if (xpt_schedule_devq(devq, path->device) != 0)
2520  			xpt_run_devq(devq);
2521  		mtx_unlock(&devq->send_mtx);
2522  		break;
2523  	}
2524  	case XPT_CALC_GEOMETRY:
2525  		/* Filter out garbage */
2526  		if (start_ccb->ccg.block_size == 0
2527  		 || start_ccb->ccg.volume_size == 0) {
2528  			start_ccb->ccg.cylinders = 0;
2529  			start_ccb->ccg.heads = 0;
2530  			start_ccb->ccg.secs_per_track = 0;
2531  			start_ccb->ccb_h.status = CAM_REQ_CMP;
2532  			break;
2533  		}
2534  #if defined(PC98) || defined(__sparc64__)
2535  		/*
2536  		 * In a PC-98 system, geometry translation depens on
2537  		 * the "real" device geometry obtained from mode page 4.
2538  		 * SCSI geometry translation is performed in the
2539  		 * initialization routine of the SCSI BIOS and the result
2540  		 * stored in host memory.  If the translation is available
2541  		 * in host memory, use it.  If not, rely on the default
2542  		 * translation the device driver performs.
2543  		 * For sparc64, we may need adjust the geometry of large
2544  		 * disks in order to fit the limitations of the 16-bit
2545  		 * fields of the VTOC8 disk label.
2546  		 */
2547  		if (scsi_da_bios_params(&start_ccb->ccg) != 0) {
2548  			start_ccb->ccb_h.status = CAM_REQ_CMP;
2549  			break;
2550  		}
2551  #endif
2552  		goto call_sim;
2553  	case XPT_ABORT:
2554  	{
2555  		union ccb* abort_ccb;
2556  
2557  		abort_ccb = start_ccb->cab.abort_ccb;
2558  		if (XPT_FC_IS_DEV_QUEUED(abort_ccb)) {
2559  
2560  			if (abort_ccb->ccb_h.pinfo.index >= 0) {
2561  				struct cam_ccbq *ccbq;
2562  				struct cam_ed *device;
2563  
2564  				device = abort_ccb->ccb_h.path->device;
2565  				ccbq = &device->ccbq;
2566  				cam_ccbq_remove_ccb(ccbq, abort_ccb);
2567  				abort_ccb->ccb_h.status =
2568  				    CAM_REQ_ABORTED|CAM_DEV_QFRZN;
2569  				xpt_freeze_devq(abort_ccb->ccb_h.path, 1);
2570  				xpt_done(abort_ccb);
2571  				start_ccb->ccb_h.status = CAM_REQ_CMP;
2572  				break;
2573  			}
2574  			if (abort_ccb->ccb_h.pinfo.index == CAM_UNQUEUED_INDEX
2575  			 && (abort_ccb->ccb_h.status & CAM_SIM_QUEUED) == 0) {
2576  				/*
2577  				 * We've caught this ccb en route to
2578  				 * the SIM.  Flag it for abort and the
2579  				 * SIM will do so just before starting
2580  				 * real work on the CCB.
2581  				 */
2582  				abort_ccb->ccb_h.status =
2583  				    CAM_REQ_ABORTED|CAM_DEV_QFRZN;
2584  				xpt_freeze_devq(abort_ccb->ccb_h.path, 1);
2585  				start_ccb->ccb_h.status = CAM_REQ_CMP;
2586  				break;
2587  			}
2588  		}
2589  		if (XPT_FC_IS_QUEUED(abort_ccb)
2590  		 && (abort_ccb->ccb_h.pinfo.index == CAM_DONEQ_INDEX)) {
2591  			/*
2592  			 * It's already completed but waiting
2593  			 * for our SWI to get to it.
2594  			 */
2595  			start_ccb->ccb_h.status = CAM_UA_ABORT;
2596  			break;
2597  		}
2598  		/*
2599  		 * If we weren't able to take care of the abort request
2600  		 * in the XPT, pass the request down to the SIM for processing.
2601  		 */
2602  	}
2603  	/* FALLTHROUGH */
2604  	case XPT_ACCEPT_TARGET_IO:
2605  	case XPT_EN_LUN:
2606  	case XPT_IMMED_NOTIFY:
2607  	case XPT_NOTIFY_ACK:
2608  	case XPT_RESET_BUS:
2609  	case XPT_IMMEDIATE_NOTIFY:
2610  	case XPT_NOTIFY_ACKNOWLEDGE:
2611  	case XPT_GET_SIM_KNOB:
2612  	case XPT_SET_SIM_KNOB:
2613  	case XPT_GET_TRAN_SETTINGS:
2614  	case XPT_SET_TRAN_SETTINGS:
2615  	case XPT_PATH_INQ:
2616  call_sim:
2617  		sim = path->bus->sim;
2618  		lock = (mtx_owned(sim->mtx) == 0);
2619  		if (lock)
2620  			CAM_SIM_LOCK(sim);
2621  		(*(sim->sim_action))(sim, start_ccb);
2622  		if (lock)
2623  			CAM_SIM_UNLOCK(sim);
2624  		break;
2625  	case XPT_PATH_STATS:
2626  		start_ccb->cpis.last_reset = path->bus->last_reset;
2627  		start_ccb->ccb_h.status = CAM_REQ_CMP;
2628  		break;
2629  	case XPT_GDEV_TYPE:
2630  	{
2631  		struct cam_ed *dev;
2632  
2633  		dev = path->device;
2634  		if ((dev->flags & CAM_DEV_UNCONFIGURED) != 0) {
2635  			start_ccb->ccb_h.status = CAM_DEV_NOT_THERE;
2636  		} else {
2637  			struct ccb_getdev *cgd;
2638  
2639  			cgd = &start_ccb->cgd;
2640  			cgd->protocol = dev->protocol;
2641  			cgd->inq_data = dev->inq_data;
2642  			cgd->ident_data = dev->ident_data;
2643  			cgd->inq_flags = dev->inq_flags;
2644  			cgd->ccb_h.status = CAM_REQ_CMP;
2645  			cgd->serial_num_len = dev->serial_num_len;
2646  			if ((dev->serial_num_len > 0)
2647  			 && (dev->serial_num != NULL))
2648  				bcopy(dev->serial_num, cgd->serial_num,
2649  				      dev->serial_num_len);
2650  		}
2651  		break;
2652  	}
2653  	case XPT_GDEV_STATS:
2654  	{
2655  		struct cam_ed *dev;
2656  
2657  		dev = path->device;
2658  		if ((dev->flags & CAM_DEV_UNCONFIGURED) != 0) {
2659  			start_ccb->ccb_h.status = CAM_DEV_NOT_THERE;
2660  		} else {
2661  			struct ccb_getdevstats *cgds;
2662  			struct cam_eb *bus;
2663  			struct cam_et *tar;
2664  			struct cam_devq *devq;
2665  
2666  			cgds = &start_ccb->cgds;
2667  			bus = path->bus;
2668  			tar = path->target;
2669  			devq = bus->sim->devq;
2670  			mtx_lock(&devq->send_mtx);
2671  			cgds->dev_openings = dev->ccbq.dev_openings;
2672  			cgds->dev_active = dev->ccbq.dev_active;
2673  			cgds->allocated = dev->ccbq.allocated;
2674  			cgds->queued = cam_ccbq_pending_ccb_count(&dev->ccbq);
2675  			cgds->held = cgds->allocated - cgds->dev_active -
2676  			    cgds->queued;
2677  			cgds->last_reset = tar->last_reset;
2678  			cgds->maxtags = dev->maxtags;
2679  			cgds->mintags = dev->mintags;
2680  			if (timevalcmp(&tar->last_reset, &bus->last_reset, <))
2681  				cgds->last_reset = bus->last_reset;
2682  			mtx_unlock(&devq->send_mtx);
2683  			cgds->ccb_h.status = CAM_REQ_CMP;
2684  		}
2685  		break;
2686  	}
2687  	case XPT_GDEVLIST:
2688  	{
2689  		struct cam_periph	*nperiph;
2690  		struct periph_list	*periph_head;
2691  		struct ccb_getdevlist	*cgdl;
2692  		u_int			i;
2693  		struct cam_ed		*device;
2694  		int			found;
2695  
2696  
2697  		found = 0;
2698  
2699  		/*
2700  		 * Don't want anyone mucking with our data.
2701  		 */
2702  		device = path->device;
2703  		periph_head = &device->periphs;
2704  		cgdl = &start_ccb->cgdl;
2705  
2706  		/*
2707  		 * Check and see if the list has changed since the user
2708  		 * last requested a list member.  If so, tell them that the
2709  		 * list has changed, and therefore they need to start over
2710  		 * from the beginning.
2711  		 */
2712  		if ((cgdl->index != 0) &&
2713  		    (cgdl->generation != device->generation)) {
2714  			cgdl->status = CAM_GDEVLIST_LIST_CHANGED;
2715  			break;
2716  		}
2717  
2718  		/*
2719  		 * Traverse the list of peripherals and attempt to find
2720  		 * the requested peripheral.
2721  		 */
2722  		for (nperiph = SLIST_FIRST(periph_head), i = 0;
2723  		     (nperiph != NULL) && (i <= cgdl->index);
2724  		     nperiph = SLIST_NEXT(nperiph, periph_links), i++) {
2725  			if (i == cgdl->index) {
2726  				strncpy(cgdl->periph_name,
2727  					nperiph->periph_name,
2728  					DEV_IDLEN);
2729  				cgdl->unit_number = nperiph->unit_number;
2730  				found = 1;
2731  			}
2732  		}
2733  		if (found == 0) {
2734  			cgdl->status = CAM_GDEVLIST_ERROR;
2735  			break;
2736  		}
2737  
2738  		if (nperiph == NULL)
2739  			cgdl->status = CAM_GDEVLIST_LAST_DEVICE;
2740  		else
2741  			cgdl->status = CAM_GDEVLIST_MORE_DEVS;
2742  
2743  		cgdl->index++;
2744  		cgdl->generation = device->generation;
2745  
2746  		cgdl->ccb_h.status = CAM_REQ_CMP;
2747  		break;
2748  	}
2749  	case XPT_DEV_MATCH:
2750  	{
2751  		dev_pos_type position_type;
2752  		struct ccb_dev_match *cdm;
2753  
2754  		cdm = &start_ccb->cdm;
2755  
2756  		/*
2757  		 * There are two ways of getting at information in the EDT.
2758  		 * The first way is via the primary EDT tree.  It starts
2759  		 * with a list of busses, then a list of targets on a bus,
2760  		 * then devices/luns on a target, and then peripherals on a
2761  		 * device/lun.  The "other" way is by the peripheral driver
2762  		 * lists.  The peripheral driver lists are organized by
2763  		 * peripheral driver.  (obviously)  So it makes sense to
2764  		 * use the peripheral driver list if the user is looking
2765  		 * for something like "da1", or all "da" devices.  If the
2766  		 * user is looking for something on a particular bus/target
2767  		 * or lun, it's generally better to go through the EDT tree.
2768  		 */
2769  
2770  		if (cdm->pos.position_type != CAM_DEV_POS_NONE)
2771  			position_type = cdm->pos.position_type;
2772  		else {
2773  			u_int i;
2774  
2775  			position_type = CAM_DEV_POS_NONE;
2776  
2777  			for (i = 0; i < cdm->num_patterns; i++) {
2778  				if ((cdm->patterns[i].type == DEV_MATCH_BUS)
2779  				 ||(cdm->patterns[i].type == DEV_MATCH_DEVICE)){
2780  					position_type = CAM_DEV_POS_EDT;
2781  					break;
2782  				}
2783  			}
2784  
2785  			if (cdm->num_patterns == 0)
2786  				position_type = CAM_DEV_POS_EDT;
2787  			else if (position_type == CAM_DEV_POS_NONE)
2788  				position_type = CAM_DEV_POS_PDRV;
2789  		}
2790  
2791  		switch(position_type & CAM_DEV_POS_TYPEMASK) {
2792  		case CAM_DEV_POS_EDT:
2793  			xptedtmatch(cdm);
2794  			break;
2795  		case CAM_DEV_POS_PDRV:
2796  			xptperiphlistmatch(cdm);
2797  			break;
2798  		default:
2799  			cdm->status = CAM_DEV_MATCH_ERROR;
2800  			break;
2801  		}
2802  
2803  		if (cdm->status == CAM_DEV_MATCH_ERROR)
2804  			start_ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2805  		else
2806  			start_ccb->ccb_h.status = CAM_REQ_CMP;
2807  
2808  		break;
2809  	}
2810  	case XPT_SASYNC_CB:
2811  	{
2812  		struct ccb_setasync *csa;
2813  		struct async_node *cur_entry;
2814  		struct async_list *async_head;
2815  		u_int32_t added;
2816  
2817  		csa = &start_ccb->csa;
2818  		added = csa->event_enable;
2819  		async_head = &path->device->asyncs;
2820  
2821  		/*
2822  		 * If there is already an entry for us, simply
2823  		 * update it.
2824  		 */
2825  		cur_entry = SLIST_FIRST(async_head);
2826  		while (cur_entry != NULL) {
2827  			if ((cur_entry->callback_arg == csa->callback_arg)
2828  			 && (cur_entry->callback == csa->callback))
2829  				break;
2830  			cur_entry = SLIST_NEXT(cur_entry, links);
2831  		}
2832  
2833  		if (cur_entry != NULL) {
2834  		 	/*
2835  			 * If the request has no flags set,
2836  			 * remove the entry.
2837  			 */
2838  			added &= ~cur_entry->event_enable;
2839  			if (csa->event_enable == 0) {
2840  				SLIST_REMOVE(async_head, cur_entry,
2841  					     async_node, links);
2842  				xpt_release_device(path->device);
2843  				free(cur_entry, M_CAMXPT);
2844  			} else {
2845  				cur_entry->event_enable = csa->event_enable;
2846  			}
2847  			csa->event_enable = added;
2848  		} else {
2849  			cur_entry = malloc(sizeof(*cur_entry), M_CAMXPT,
2850  					   M_NOWAIT);
2851  			if (cur_entry == NULL) {
2852  				csa->ccb_h.status = CAM_RESRC_UNAVAIL;
2853  				break;
2854  			}
2855  			cur_entry->event_enable = csa->event_enable;
2856  			cur_entry->event_lock =
2857  			    mtx_owned(path->bus->sim->mtx) ? 1 : 0;
2858  			cur_entry->callback_arg = csa->callback_arg;
2859  			cur_entry->callback = csa->callback;
2860  			SLIST_INSERT_HEAD(async_head, cur_entry, links);
2861  			xpt_acquire_device(path->device);
2862  		}
2863  		start_ccb->ccb_h.status = CAM_REQ_CMP;
2864  		break;
2865  	}
2866  	case XPT_REL_SIMQ:
2867  	{
2868  		struct ccb_relsim *crs;
2869  		struct cam_ed *dev;
2870  
2871  		crs = &start_ccb->crs;
2872  		dev = path->device;
2873  		if (dev == NULL) {
2874  
2875  			crs->ccb_h.status = CAM_DEV_NOT_THERE;
2876  			break;
2877  		}
2878  
2879  		if ((crs->release_flags & RELSIM_ADJUST_OPENINGS) != 0) {
2880  
2881  			/* Don't ever go below one opening */
2882  			if (crs->openings > 0) {
2883  				xpt_dev_ccbq_resize(path, crs->openings);
2884  				if (bootverbose) {
2885  					xpt_print(path,
2886  					    "number of openings is now %d\n",
2887  					    crs->openings);
2888  				}
2889  			}
2890  		}
2891  
2892  		mtx_lock(&dev->sim->devq->send_mtx);
2893  		if ((crs->release_flags & RELSIM_RELEASE_AFTER_TIMEOUT) != 0) {
2894  
2895  			if ((dev->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) {
2896  
2897  				/*
2898  				 * Just extend the old timeout and decrement
2899  				 * the freeze count so that a single timeout
2900  				 * is sufficient for releasing the queue.
2901  				 */
2902  				start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
2903  				callout_stop(&dev->callout);
2904  			} else {
2905  
2906  				start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
2907  			}
2908  
2909  			callout_reset_sbt(&dev->callout,
2910  			    SBT_1MS * crs->release_timeout, 0,
2911  			    xpt_release_devq_timeout, dev, 0);
2912  
2913  			dev->flags |= CAM_DEV_REL_TIMEOUT_PENDING;
2914  
2915  		}
2916  
2917  		if ((crs->release_flags & RELSIM_RELEASE_AFTER_CMDCMPLT) != 0) {
2918  
2919  			if ((dev->flags & CAM_DEV_REL_ON_COMPLETE) != 0) {
2920  				/*
2921  				 * Decrement the freeze count so that a single
2922  				 * completion is still sufficient to unfreeze
2923  				 * the queue.
2924  				 */
2925  				start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
2926  			} else {
2927  
2928  				dev->flags |= CAM_DEV_REL_ON_COMPLETE;
2929  				start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
2930  			}
2931  		}
2932  
2933  		if ((crs->release_flags & RELSIM_RELEASE_AFTER_QEMPTY) != 0) {
2934  
2935  			if ((dev->flags & CAM_DEV_REL_ON_QUEUE_EMPTY) != 0
2936  			 || (dev->ccbq.dev_active == 0)) {
2937  
2938  				start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
2939  			} else {
2940  
2941  				dev->flags |= CAM_DEV_REL_ON_QUEUE_EMPTY;
2942  				start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
2943  			}
2944  		}
2945  		mtx_unlock(&dev->sim->devq->send_mtx);
2946  
2947  		if ((start_ccb->ccb_h.flags & CAM_DEV_QFREEZE) == 0)
2948  			xpt_release_devq(path, /*count*/1, /*run_queue*/TRUE);
2949  		start_ccb->crs.qfrozen_cnt = dev->ccbq.queue.qfrozen_cnt;
2950  		start_ccb->ccb_h.status = CAM_REQ_CMP;
2951  		break;
2952  	}
2953  	case XPT_DEBUG: {
2954  		struct cam_path *oldpath;
2955  
2956  		/* Check that all request bits are supported. */
2957  		if (start_ccb->cdbg.flags & ~(CAM_DEBUG_COMPILE)) {
2958  			start_ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
2959  			break;
2960  		}
2961  
2962  		cam_dflags = CAM_DEBUG_NONE;
2963  		if (cam_dpath != NULL) {
2964  			oldpath = cam_dpath;
2965  			cam_dpath = NULL;
2966  			xpt_free_path(oldpath);
2967  		}
2968  		if (start_ccb->cdbg.flags != CAM_DEBUG_NONE) {
2969  			if (xpt_create_path(&cam_dpath, NULL,
2970  					    start_ccb->ccb_h.path_id,
2971  					    start_ccb->ccb_h.target_id,
2972  					    start_ccb->ccb_h.target_lun) !=
2973  					    CAM_REQ_CMP) {
2974  				start_ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
2975  			} else {
2976  				cam_dflags = start_ccb->cdbg.flags;
2977  				start_ccb->ccb_h.status = CAM_REQ_CMP;
2978  				xpt_print(cam_dpath, "debugging flags now %x\n",
2979  				    cam_dflags);
2980  			}
2981  		} else
2982  			start_ccb->ccb_h.status = CAM_REQ_CMP;
2983  		break;
2984  	}
2985  	case XPT_NOOP:
2986  		if ((start_ccb->ccb_h.flags & CAM_DEV_QFREEZE) != 0)
2987  			xpt_freeze_devq(path, 1);
2988  		start_ccb->ccb_h.status = CAM_REQ_CMP;
2989  		break;
2990  	default:
2991  	case XPT_SDEV_TYPE:
2992  	case XPT_TERM_IO:
2993  	case XPT_ENG_INQ:
2994  		/* XXX Implement */
2995  		printf("%s: CCB type %#x not supported\n", __func__,
2996  		       start_ccb->ccb_h.func_code);
2997  		start_ccb->ccb_h.status = CAM_PROVIDE_FAIL;
2998  		if (start_ccb->ccb_h.func_code & XPT_FC_DEV_QUEUED) {
2999  			xpt_done(start_ccb);
3000  		}
3001  		break;
3002  	}
3003  }
3004  
3005  void
3006  xpt_polled_action(union ccb *start_ccb)
3007  {
3008  	u_int32_t timeout;
3009  	struct	  cam_sim *sim;
3010  	struct	  cam_devq *devq;
3011  	struct	  cam_ed *dev;
3012  
3013  	timeout = start_ccb->ccb_h.timeout * 10;
3014  	sim = start_ccb->ccb_h.path->bus->sim;
3015  	devq = sim->devq;
3016  	dev = start_ccb->ccb_h.path->device;
3017  
3018  	mtx_unlock(&dev->device_mtx);
3019  
3020  	/*
3021  	 * Steal an opening so that no other queued requests
3022  	 * can get it before us while we simulate interrupts.
3023  	 */
3024  	mtx_lock(&devq->send_mtx);
3025  	dev->ccbq.dev_openings--;
3026  	while((devq->send_openings <= 0 || dev->ccbq.dev_openings < 0) &&
3027  	    (--timeout > 0)) {
3028  		mtx_unlock(&devq->send_mtx);
3029  		DELAY(100);
3030  		CAM_SIM_LOCK(sim);
3031  		(*(sim->sim_poll))(sim);
3032  		CAM_SIM_UNLOCK(sim);
3033  		camisr_runqueue();
3034  		mtx_lock(&devq->send_mtx);
3035  	}
3036  	dev->ccbq.dev_openings++;
3037  	mtx_unlock(&devq->send_mtx);
3038  
3039  	if (timeout != 0) {
3040  		xpt_action(start_ccb);
3041  		while(--timeout > 0) {
3042  			CAM_SIM_LOCK(sim);
3043  			(*(sim->sim_poll))(sim);
3044  			CAM_SIM_UNLOCK(sim);
3045  			camisr_runqueue();
3046  			if ((start_ccb->ccb_h.status  & CAM_STATUS_MASK)
3047  			    != CAM_REQ_INPROG)
3048  				break;
3049  			DELAY(100);
3050  		}
3051  		if (timeout == 0) {
3052  			/*
3053  			 * XXX Is it worth adding a sim_timeout entry
3054  			 * point so we can attempt recovery?  If
3055  			 * this is only used for dumps, I don't think
3056  			 * it is.
3057  			 */
3058  			start_ccb->ccb_h.status = CAM_CMD_TIMEOUT;
3059  		}
3060  	} else {
3061  		start_ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
3062  	}
3063  
3064  	mtx_lock(&dev->device_mtx);
3065  }
3066  
3067  /*
3068   * Schedule a peripheral driver to receive a ccb when its
3069   * target device has space for more transactions.
3070   */
3071  void
3072  xpt_schedule(struct cam_periph *periph, u_int32_t new_priority)
3073  {
3074  
3075  	CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("xpt_schedule\n"));
3076  	cam_periph_assert(periph, MA_OWNED);
3077  	if (new_priority < periph->scheduled_priority) {
3078  		periph->scheduled_priority = new_priority;
3079  		xpt_run_allocq(periph, 0);
3080  	}
3081  }
3082  
3083  
3084  /*
3085   * Schedule a device to run on a given queue.
3086   * If the device was inserted as a new entry on the queue,
3087   * return 1 meaning the device queue should be run. If we
3088   * were already queued, implying someone else has already
3089   * started the queue, return 0 so the caller doesn't attempt
3090   * to run the queue.
3091   */
3092  static int
3093  xpt_schedule_dev(struct camq *queue, cam_pinfo *pinfo,
3094  		 u_int32_t new_priority)
3095  {
3096  	int retval;
3097  	u_int32_t old_priority;
3098  
3099  	CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_schedule_dev\n"));
3100  
3101  	old_priority = pinfo->priority;
3102  
3103  	/*
3104  	 * Are we already queued?
3105  	 */
3106  	if (pinfo->index != CAM_UNQUEUED_INDEX) {
3107  		/* Simply reorder based on new priority */
3108  		if (new_priority < old_priority) {
3109  			camq_change_priority(queue, pinfo->index,
3110  					     new_priority);
3111  			CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3112  					("changed priority to %d\n",
3113  					 new_priority));
3114  			retval = 1;
3115  		} else
3116  			retval = 0;
3117  	} else {
3118  		/* New entry on the queue */
3119  		if (new_priority < old_priority)
3120  			pinfo->priority = new_priority;
3121  
3122  		CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3123  				("Inserting onto queue\n"));
3124  		pinfo->generation = ++queue->generation;
3125  		camq_insert(queue, pinfo);
3126  		retval = 1;
3127  	}
3128  	return (retval);
3129  }
3130  
3131  static void
3132  xpt_run_allocq_task(void *context, int pending)
3133  {
3134  	struct cam_periph *periph = context;
3135  
3136  	cam_periph_lock(periph);
3137  	periph->flags &= ~CAM_PERIPH_RUN_TASK;
3138  	xpt_run_allocq(periph, 1);
3139  	cam_periph_unlock(periph);
3140  	cam_periph_release(periph);
3141  }
3142  
3143  static void
3144  xpt_run_allocq(struct cam_periph *periph, int sleep)
3145  {
3146  	struct cam_ed	*device;
3147  	union ccb	*ccb;
3148  	uint32_t	 prio;
3149  
3150  	cam_periph_assert(periph, MA_OWNED);
3151  	if (periph->periph_allocating)
3152  		return;
3153  	periph->periph_allocating = 1;
3154  	CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_allocq(%p)\n", periph));
3155  	device = periph->path->device;
3156  	ccb = NULL;
3157  restart:
3158  	while ((prio = min(periph->scheduled_priority,
3159  	    periph->immediate_priority)) != CAM_PRIORITY_NONE &&
3160  	    (periph->periph_allocated - (ccb != NULL ? 1 : 0) <
3161  	     device->ccbq.total_openings || prio <= CAM_PRIORITY_OOB)) {
3162  
3163  		if (ccb == NULL &&
3164  		    (ccb = xpt_get_ccb_nowait(periph)) == NULL) {
3165  			if (sleep) {
3166  				ccb = xpt_get_ccb(periph);
3167  				goto restart;
3168  			}
3169  			if (periph->flags & CAM_PERIPH_RUN_TASK)
3170  				break;
3171  			cam_periph_doacquire(periph);
3172  			periph->flags |= CAM_PERIPH_RUN_TASK;
3173  			taskqueue_enqueue(xsoftc.xpt_taskq,
3174  			    &periph->periph_run_task);
3175  			break;
3176  		}
3177  		xpt_setup_ccb(&ccb->ccb_h, periph->path, prio);
3178  		if (prio == periph->immediate_priority) {
3179  			periph->immediate_priority = CAM_PRIORITY_NONE;
3180  			CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3181  					("waking cam_periph_getccb()\n"));
3182  			SLIST_INSERT_HEAD(&periph->ccb_list, &ccb->ccb_h,
3183  					  periph_links.sle);
3184  			wakeup(&periph->ccb_list);
3185  		} else {
3186  			periph->scheduled_priority = CAM_PRIORITY_NONE;
3187  			CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3188  					("calling periph_start()\n"));
3189  			periph->periph_start(periph, ccb);
3190  		}
3191  		ccb = NULL;
3192  	}
3193  	if (ccb != NULL)
3194  		xpt_release_ccb(ccb);
3195  	periph->periph_allocating = 0;
3196  }
3197  
3198  static void
3199  xpt_run_devq(struct cam_devq *devq)
3200  {
3201  	char cdb_str[(SCSI_MAX_CDBLEN * 3) + 1];
3202  	int lock;
3203  
3204  	CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_devq\n"));
3205  
3206  	devq->send_queue.qfrozen_cnt++;
3207  	while ((devq->send_queue.entries > 0)
3208  	    && (devq->send_openings > 0)
3209  	    && (devq->send_queue.qfrozen_cnt <= 1)) {
3210  		struct	cam_ed *device;
3211  		union ccb *work_ccb;
3212  		struct	cam_sim *sim;
3213  
3214  		device = (struct cam_ed *)camq_remove(&devq->send_queue,
3215  							   CAMQ_HEAD);
3216  		CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3217  				("running device %p\n", device));
3218  
3219  		work_ccb = cam_ccbq_peek_ccb(&device->ccbq, CAMQ_HEAD);
3220  		if (work_ccb == NULL) {
3221  			printf("device on run queue with no ccbs???\n");
3222  			continue;
3223  		}
3224  
3225  		if ((work_ccb->ccb_h.flags & CAM_HIGH_POWER) != 0) {
3226  
3227  			mtx_lock(&xsoftc.xpt_highpower_lock);
3228  		 	if (xsoftc.num_highpower <= 0) {
3229  				/*
3230  				 * We got a high power command, but we
3231  				 * don't have any available slots.  Freeze
3232  				 * the device queue until we have a slot
3233  				 * available.
3234  				 */
3235  				xpt_freeze_devq_device(device, 1);
3236  				STAILQ_INSERT_TAIL(&xsoftc.highpowerq, device,
3237  						   highpowerq_entry);
3238  
3239  				mtx_unlock(&xsoftc.xpt_highpower_lock);
3240  				continue;
3241  			} else {
3242  				/*
3243  				 * Consume a high power slot while
3244  				 * this ccb runs.
3245  				 */
3246  				xsoftc.num_highpower--;
3247  			}
3248  			mtx_unlock(&xsoftc.xpt_highpower_lock);
3249  		}
3250  		cam_ccbq_remove_ccb(&device->ccbq, work_ccb);
3251  		cam_ccbq_send_ccb(&device->ccbq, work_ccb);
3252  		devq->send_openings--;
3253  		devq->send_active++;
3254  		xpt_schedule_devq(devq, device);
3255  		mtx_unlock(&devq->send_mtx);
3256  
3257  		if ((work_ccb->ccb_h.flags & CAM_DEV_QFREEZE) != 0) {
3258  			/*
3259  			 * The client wants to freeze the queue
3260  			 * after this CCB is sent.
3261  			 */
3262  			xpt_freeze_devq(work_ccb->ccb_h.path, 1);
3263  		}
3264  
3265  		/* In Target mode, the peripheral driver knows best... */
3266  		if (work_ccb->ccb_h.func_code == XPT_SCSI_IO) {
3267  			if ((device->inq_flags & SID_CmdQue) != 0
3268  			 && work_ccb->csio.tag_action != CAM_TAG_ACTION_NONE)
3269  				work_ccb->ccb_h.flags |= CAM_TAG_ACTION_VALID;
3270  			else
3271  				/*
3272  				 * Clear this in case of a retried CCB that
3273  				 * failed due to a rejected tag.
3274  				 */
3275  				work_ccb->ccb_h.flags &= ~CAM_TAG_ACTION_VALID;
3276  		}
3277  
3278  		switch (work_ccb->ccb_h.func_code) {
3279  		case XPT_SCSI_IO:
3280  			CAM_DEBUG(work_ccb->ccb_h.path,
3281  			    CAM_DEBUG_CDB,("%s. CDB: %s\n",
3282  			     scsi_op_desc(work_ccb->csio.cdb_io.cdb_bytes[0],
3283  					  &device->inq_data),
3284  			     scsi_cdb_string(work_ccb->csio.cdb_io.cdb_bytes,
3285  					     cdb_str, sizeof(cdb_str))));
3286  			break;
3287  		case XPT_ATA_IO:
3288  			CAM_DEBUG(work_ccb->ccb_h.path,
3289  			    CAM_DEBUG_CDB,("%s. ACB: %s\n",
3290  			     ata_op_string(&work_ccb->ataio.cmd),
3291  			     ata_cmd_string(&work_ccb->ataio.cmd,
3292  					    cdb_str, sizeof(cdb_str))));
3293  			break;
3294  		default:
3295  			break;
3296  		}
3297  
3298  		/*
3299  		 * Device queues can be shared among multiple SIM instances
3300  		 * that reside on different busses.  Use the SIM from the
3301  		 * queued device, rather than the one from the calling bus.
3302  		 */
3303  		sim = device->sim;
3304  		lock = (mtx_owned(sim->mtx) == 0);
3305  		if (lock)
3306  			CAM_SIM_LOCK(sim);
3307  		(*(sim->sim_action))(sim, work_ccb);
3308  		if (lock)
3309  			CAM_SIM_UNLOCK(sim);
3310  		mtx_lock(&devq->send_mtx);
3311  	}
3312  	devq->send_queue.qfrozen_cnt--;
3313  }
3314  
3315  /*
3316   * This function merges stuff from the slave ccb into the master ccb, while
3317   * keeping important fields in the master ccb constant.
3318   */
3319  void
3320  xpt_merge_ccb(union ccb *master_ccb, union ccb *slave_ccb)
3321  {
3322  
3323  	/*
3324  	 * Pull fields that are valid for peripheral drivers to set
3325  	 * into the master CCB along with the CCB "payload".
3326  	 */
3327  	master_ccb->ccb_h.retry_count = slave_ccb->ccb_h.retry_count;
3328  	master_ccb->ccb_h.func_code = slave_ccb->ccb_h.func_code;
3329  	master_ccb->ccb_h.timeout = slave_ccb->ccb_h.timeout;
3330  	master_ccb->ccb_h.flags = slave_ccb->ccb_h.flags;
3331  	bcopy(&(&slave_ccb->ccb_h)[1], &(&master_ccb->ccb_h)[1],
3332  	      sizeof(union ccb) - sizeof(struct ccb_hdr));
3333  }
3334  
3335  void
3336  xpt_setup_ccb_flags(struct ccb_hdr *ccb_h, struct cam_path *path,
3337  		    u_int32_t priority, u_int32_t flags)
3338  {
3339  
3340  	CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_setup_ccb\n"));
3341  	ccb_h->pinfo.priority = priority;
3342  	ccb_h->path = path;
3343  	ccb_h->path_id = path->bus->path_id;
3344  	if (path->target)
3345  		ccb_h->target_id = path->target->target_id;
3346  	else
3347  		ccb_h->target_id = CAM_TARGET_WILDCARD;
3348  	if (path->device) {
3349  		ccb_h->target_lun = path->device->lun_id;
3350  		ccb_h->pinfo.generation = ++path->device->ccbq.queue.generation;
3351  	} else {
3352  		ccb_h->target_lun = CAM_TARGET_WILDCARD;
3353  	}
3354  	ccb_h->pinfo.index = CAM_UNQUEUED_INDEX;
3355  	ccb_h->flags = flags;
3356  	ccb_h->xflags = 0;
3357  }
3358  
3359  void
3360  xpt_setup_ccb(struct ccb_hdr *ccb_h, struct cam_path *path, u_int32_t priority)
3361  {
3362  	xpt_setup_ccb_flags(ccb_h, path, priority, /*flags*/ 0);
3363  }
3364  
3365  /* Path manipulation functions */
3366  cam_status
3367  xpt_create_path(struct cam_path **new_path_ptr, struct cam_periph *perph,
3368  		path_id_t path_id, target_id_t target_id, lun_id_t lun_id)
3369  {
3370  	struct	   cam_path *path;
3371  	cam_status status;
3372  
3373  	path = (struct cam_path *)malloc(sizeof(*path), M_CAMPATH, M_NOWAIT);
3374  
3375  	if (path == NULL) {
3376  		status = CAM_RESRC_UNAVAIL;
3377  		return(status);
3378  	}
3379  	status = xpt_compile_path(path, perph, path_id, target_id, lun_id);
3380  	if (status != CAM_REQ_CMP) {
3381  		free(path, M_CAMPATH);
3382  		path = NULL;
3383  	}
3384  	*new_path_ptr = path;
3385  	return (status);
3386  }
3387  
3388  cam_status
3389  xpt_create_path_unlocked(struct cam_path **new_path_ptr,
3390  			 struct cam_periph *periph, path_id_t path_id,
3391  			 target_id_t target_id, lun_id_t lun_id)
3392  {
3393  
3394  	return (xpt_create_path(new_path_ptr, periph, path_id, target_id,
3395  	    lun_id));
3396  }
3397  
3398  cam_status
3399  xpt_compile_path(struct cam_path *new_path, struct cam_periph *perph,
3400  		 path_id_t path_id, target_id_t target_id, lun_id_t lun_id)
3401  {
3402  	struct	     cam_eb *bus;
3403  	struct	     cam_et *target;
3404  	struct	     cam_ed *device;
3405  	cam_status   status;
3406  
3407  	status = CAM_REQ_CMP;	/* Completed without error */
3408  	target = NULL;		/* Wildcarded */
3409  	device = NULL;		/* Wildcarded */
3410  
3411  	/*
3412  	 * We will potentially modify the EDT, so block interrupts
3413  	 * that may attempt to create cam paths.
3414  	 */
3415  	bus = xpt_find_bus(path_id);
3416  	if (bus == NULL) {
3417  		status = CAM_PATH_INVALID;
3418  	} else {
3419  		xpt_lock_buses();
3420  		mtx_lock(&bus->eb_mtx);
3421  		target = xpt_find_target(bus, target_id);
3422  		if (target == NULL) {
3423  			/* Create one */
3424  			struct cam_et *new_target;
3425  
3426  			new_target = xpt_alloc_target(bus, target_id);
3427  			if (new_target == NULL) {
3428  				status = CAM_RESRC_UNAVAIL;
3429  			} else {
3430  				target = new_target;
3431  			}
3432  		}
3433  		xpt_unlock_buses();
3434  		if (target != NULL) {
3435  			device = xpt_find_device(target, lun_id);
3436  			if (device == NULL) {
3437  				/* Create one */
3438  				struct cam_ed *new_device;
3439  
3440  				new_device =
3441  				    (*(bus->xport->alloc_device))(bus,
3442  								      target,
3443  								      lun_id);
3444  				if (new_device == NULL) {
3445  					status = CAM_RESRC_UNAVAIL;
3446  				} else {
3447  					device = new_device;
3448  				}
3449  			}
3450  		}
3451  		mtx_unlock(&bus->eb_mtx);
3452  	}
3453  
3454  	/*
3455  	 * Only touch the user's data if we are successful.
3456  	 */
3457  	if (status == CAM_REQ_CMP) {
3458  		new_path->periph = perph;
3459  		new_path->bus = bus;
3460  		new_path->target = target;
3461  		new_path->device = device;
3462  		CAM_DEBUG(new_path, CAM_DEBUG_TRACE, ("xpt_compile_path\n"));
3463  	} else {
3464  		if (device != NULL)
3465  			xpt_release_device(device);
3466  		if (target != NULL)
3467  			xpt_release_target(target);
3468  		if (bus != NULL)
3469  			xpt_release_bus(bus);
3470  	}
3471  	return (status);
3472  }
3473  
3474  cam_status
3475  xpt_clone_path(struct cam_path **new_path_ptr, struct cam_path *path)
3476  {
3477  	struct	   cam_path *new_path;
3478  
3479  	new_path = (struct cam_path *)malloc(sizeof(*path), M_CAMPATH, M_NOWAIT);
3480  	if (new_path == NULL)
3481  		return(CAM_RESRC_UNAVAIL);
3482  	xpt_copy_path(new_path, path);
3483  	*new_path_ptr = new_path;
3484  	return (CAM_REQ_CMP);
3485  }
3486  
3487  void
3488  xpt_copy_path(struct cam_path *new_path, struct cam_path *path)
3489  {
3490  
3491  	*new_path = *path;
3492  	if (path->bus != NULL)
3493  		xpt_acquire_bus(path->bus);
3494  	if (path->target != NULL)
3495  		xpt_acquire_target(path->target);
3496  	if (path->device != NULL)
3497  		xpt_acquire_device(path->device);
3498  }
3499  
3500  void
3501  xpt_release_path(struct cam_path *path)
3502  {
3503  	CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_release_path\n"));
3504  	if (path->device != NULL) {
3505  		xpt_release_device(path->device);
3506  		path->device = NULL;
3507  	}
3508  	if (path->target != NULL) {
3509  		xpt_release_target(path->target);
3510  		path->target = NULL;
3511  	}
3512  	if (path->bus != NULL) {
3513  		xpt_release_bus(path->bus);
3514  		path->bus = NULL;
3515  	}
3516  }
3517  
3518  void
3519  xpt_free_path(struct cam_path *path)
3520  {
3521  
3522  	CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_free_path\n"));
3523  	xpt_release_path(path);
3524  	free(path, M_CAMPATH);
3525  }
3526  
3527  void
3528  xpt_path_counts(struct cam_path *path, uint32_t *bus_ref,
3529      uint32_t *periph_ref, uint32_t *target_ref, uint32_t *device_ref)
3530  {
3531  
3532  	xpt_lock_buses();
3533  	if (bus_ref) {
3534  		if (path->bus)
3535  			*bus_ref = path->bus->refcount;
3536  		else
3537  			*bus_ref = 0;
3538  	}
3539  	if (periph_ref) {
3540  		if (path->periph)
3541  			*periph_ref = path->periph->refcount;
3542  		else
3543  			*periph_ref = 0;
3544  	}
3545  	xpt_unlock_buses();
3546  	if (target_ref) {
3547  		if (path->target)
3548  			*target_ref = path->target->refcount;
3549  		else
3550  			*target_ref = 0;
3551  	}
3552  	if (device_ref) {
3553  		if (path->device)
3554  			*device_ref = path->device->refcount;
3555  		else
3556  			*device_ref = 0;
3557  	}
3558  }
3559  
3560  /*
3561   * Return -1 for failure, 0 for exact match, 1 for match with wildcards
3562   * in path1, 2 for match with wildcards in path2.
3563   */
3564  int
3565  xpt_path_comp(struct cam_path *path1, struct cam_path *path2)
3566  {
3567  	int retval = 0;
3568  
3569  	if (path1->bus != path2->bus) {
3570  		if (path1->bus->path_id == CAM_BUS_WILDCARD)
3571  			retval = 1;
3572  		else if (path2->bus->path_id == CAM_BUS_WILDCARD)
3573  			retval = 2;
3574  		else
3575  			return (-1);
3576  	}
3577  	if (path1->target != path2->target) {
3578  		if (path1->target->target_id == CAM_TARGET_WILDCARD) {
3579  			if (retval == 0)
3580  				retval = 1;
3581  		} else if (path2->target->target_id == CAM_TARGET_WILDCARD)
3582  			retval = 2;
3583  		else
3584  			return (-1);
3585  	}
3586  	if (path1->device != path2->device) {
3587  		if (path1->device->lun_id == CAM_LUN_WILDCARD) {
3588  			if (retval == 0)
3589  				retval = 1;
3590  		} else if (path2->device->lun_id == CAM_LUN_WILDCARD)
3591  			retval = 2;
3592  		else
3593  			return (-1);
3594  	}
3595  	return (retval);
3596  }
3597  
3598  int
3599  xpt_path_comp_dev(struct cam_path *path, struct cam_ed *dev)
3600  {
3601  	int retval = 0;
3602  
3603  	if (path->bus != dev->target->bus) {
3604  		if (path->bus->path_id == CAM_BUS_WILDCARD)
3605  			retval = 1;
3606  		else if (dev->target->bus->path_id == CAM_BUS_WILDCARD)
3607  			retval = 2;
3608  		else
3609  			return (-1);
3610  	}
3611  	if (path->target != dev->target) {
3612  		if (path->target->target_id == CAM_TARGET_WILDCARD) {
3613  			if (retval == 0)
3614  				retval = 1;
3615  		} else if (dev->target->target_id == CAM_TARGET_WILDCARD)
3616  			retval = 2;
3617  		else
3618  			return (-1);
3619  	}
3620  	if (path->device != dev) {
3621  		if (path->device->lun_id == CAM_LUN_WILDCARD) {
3622  			if (retval == 0)
3623  				retval = 1;
3624  		} else if (dev->lun_id == CAM_LUN_WILDCARD)
3625  			retval = 2;
3626  		else
3627  			return (-1);
3628  	}
3629  	return (retval);
3630  }
3631  
3632  void
3633  xpt_print_path(struct cam_path *path)
3634  {
3635  
3636  	if (path == NULL)
3637  		printf("(nopath): ");
3638  	else {
3639  		if (path->periph != NULL)
3640  			printf("(%s%d:", path->periph->periph_name,
3641  			       path->periph->unit_number);
3642  		else
3643  			printf("(noperiph:");
3644  
3645  		if (path->bus != NULL)
3646  			printf("%s%d:%d:", path->bus->sim->sim_name,
3647  			       path->bus->sim->unit_number,
3648  			       path->bus->sim->bus_id);
3649  		else
3650  			printf("nobus:");
3651  
3652  		if (path->target != NULL)
3653  			printf("%d:", path->target->target_id);
3654  		else
3655  			printf("X:");
3656  
3657  		if (path->device != NULL)
3658  			printf("%jx): ", (uintmax_t)path->device->lun_id);
3659  		else
3660  			printf("X): ");
3661  	}
3662  }
3663  
3664  void
3665  xpt_print_device(struct cam_ed *device)
3666  {
3667  
3668  	if (device == NULL)
3669  		printf("(nopath): ");
3670  	else {
3671  		printf("(noperiph:%s%d:%d:%d:%jx): ", device->sim->sim_name,
3672  		       device->sim->unit_number,
3673  		       device->sim->bus_id,
3674  		       device->target->target_id,
3675  		       (uintmax_t)device->lun_id);
3676  	}
3677  }
3678  
3679  void
3680  xpt_print(struct cam_path *path, const char *fmt, ...)
3681  {
3682  	va_list ap;
3683  	xpt_print_path(path);
3684  	va_start(ap, fmt);
3685  	vprintf(fmt, ap);
3686  	va_end(ap);
3687  }
3688  
3689  int
3690  xpt_path_string(struct cam_path *path, char *str, size_t str_len)
3691  {
3692  	struct sbuf sb;
3693  
3694  	sbuf_new(&sb, str, str_len, 0);
3695  
3696  	if (path == NULL)
3697  		sbuf_printf(&sb, "(nopath): ");
3698  	else {
3699  		if (path->periph != NULL)
3700  			sbuf_printf(&sb, "(%s%d:", path->periph->periph_name,
3701  				    path->periph->unit_number);
3702  		else
3703  			sbuf_printf(&sb, "(noperiph:");
3704  
3705  		if (path->bus != NULL)
3706  			sbuf_printf(&sb, "%s%d:%d:", path->bus->sim->sim_name,
3707  				    path->bus->sim->unit_number,
3708  				    path->bus->sim->bus_id);
3709  		else
3710  			sbuf_printf(&sb, "nobus:");
3711  
3712  		if (path->target != NULL)
3713  			sbuf_printf(&sb, "%d:", path->target->target_id);
3714  		else
3715  			sbuf_printf(&sb, "X:");
3716  
3717  		if (path->device != NULL)
3718  			sbuf_printf(&sb, "%jx): ",
3719  			    (uintmax_t)path->device->lun_id);
3720  		else
3721  			sbuf_printf(&sb, "X): ");
3722  	}
3723  	sbuf_finish(&sb);
3724  
3725  	return(sbuf_len(&sb));
3726  }
3727  
3728  path_id_t
3729  xpt_path_path_id(struct cam_path *path)
3730  {
3731  	return(path->bus->path_id);
3732  }
3733  
3734  target_id_t
3735  xpt_path_target_id(struct cam_path *path)
3736  {
3737  	if (path->target != NULL)
3738  		return (path->target->target_id);
3739  	else
3740  		return (CAM_TARGET_WILDCARD);
3741  }
3742  
3743  lun_id_t
3744  xpt_path_lun_id(struct cam_path *path)
3745  {
3746  	if (path->device != NULL)
3747  		return (path->device->lun_id);
3748  	else
3749  		return (CAM_LUN_WILDCARD);
3750  }
3751  
3752  struct cam_sim *
3753  xpt_path_sim(struct cam_path *path)
3754  {
3755  
3756  	return (path->bus->sim);
3757  }
3758  
3759  struct cam_periph*
3760  xpt_path_periph(struct cam_path *path)
3761  {
3762  
3763  	return (path->periph);
3764  }
3765  
3766  /*
3767   * Release a CAM control block for the caller.  Remit the cost of the structure
3768   * to the device referenced by the path.  If the this device had no 'credits'
3769   * and peripheral drivers have registered async callbacks for this notification
3770   * call them now.
3771   */
3772  void
3773  xpt_release_ccb(union ccb *free_ccb)
3774  {
3775  	struct	 cam_ed *device;
3776  	struct	 cam_periph *periph;
3777  
3778  	CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_release_ccb\n"));
3779  	xpt_path_assert(free_ccb->ccb_h.path, MA_OWNED);
3780  	device = free_ccb->ccb_h.path->device;
3781  	periph = free_ccb->ccb_h.path->periph;
3782  
3783  	xpt_free_ccb(free_ccb);
3784  	periph->periph_allocated--;
3785  	cam_ccbq_release_opening(&device->ccbq);
3786  	xpt_run_allocq(periph, 0);
3787  }
3788  
3789  /* Functions accessed by SIM drivers */
3790  
3791  static struct xpt_xport xport_default = {
3792  	.alloc_device = xpt_alloc_device_default,
3793  	.action = xpt_action_default,
3794  	.async = xpt_dev_async_default,
3795  };
3796  
3797  /*
3798   * A sim structure, listing the SIM entry points and instance
3799   * identification info is passed to xpt_bus_register to hook the SIM
3800   * into the CAM framework.  xpt_bus_register creates a cam_eb entry
3801   * for this new bus and places it in the array of busses and assigns
3802   * it a path_id.  The path_id may be influenced by "hard wiring"
3803   * information specified by the user.  Once interrupt services are
3804   * available, the bus will be probed.
3805   */
3806  int32_t
3807  xpt_bus_register(struct cam_sim *sim, device_t parent, u_int32_t bus)
3808  {
3809  	struct cam_eb *new_bus;
3810  	struct cam_eb *old_bus;
3811  	struct ccb_pathinq cpi;
3812  	struct cam_path *path;
3813  	cam_status status;
3814  
3815  	mtx_assert(sim->mtx, MA_OWNED);
3816  
3817  	sim->bus_id = bus;
3818  	new_bus = (struct cam_eb *)malloc(sizeof(*new_bus),
3819  					  M_CAMXPT, M_NOWAIT|M_ZERO);
3820  	if (new_bus == NULL) {
3821  		/* Couldn't satisfy request */
3822  		return (CAM_RESRC_UNAVAIL);
3823  	}
3824  
3825  	mtx_init(&new_bus->eb_mtx, "CAM bus lock", NULL, MTX_DEF);
3826  	TAILQ_INIT(&new_bus->et_entries);
3827  	cam_sim_hold(sim);
3828  	new_bus->sim = sim;
3829  	timevalclear(&new_bus->last_reset);
3830  	new_bus->flags = 0;
3831  	new_bus->refcount = 1;	/* Held until a bus_deregister event */
3832  	new_bus->generation = 0;
3833  
3834  	xpt_lock_buses();
3835  	sim->path_id = new_bus->path_id =
3836  	    xptpathid(sim->sim_name, sim->unit_number, sim->bus_id);
3837  	old_bus = TAILQ_FIRST(&xsoftc.xpt_busses);
3838  	while (old_bus != NULL
3839  	    && old_bus->path_id < new_bus->path_id)
3840  		old_bus = TAILQ_NEXT(old_bus, links);
3841  	if (old_bus != NULL)
3842  		TAILQ_INSERT_BEFORE(old_bus, new_bus, links);
3843  	else
3844  		TAILQ_INSERT_TAIL(&xsoftc.xpt_busses, new_bus, links);
3845  	xsoftc.bus_generation++;
3846  	xpt_unlock_buses();
3847  
3848  	/*
3849  	 * Set a default transport so that a PATH_INQ can be issued to
3850  	 * the SIM.  This will then allow for probing and attaching of
3851  	 * a more appropriate transport.
3852  	 */
3853  	new_bus->xport = &xport_default;
3854  
3855  	status = xpt_create_path(&path, /*periph*/NULL, sim->path_id,
3856  				  CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
3857  	if (status != CAM_REQ_CMP) {
3858  		xpt_release_bus(new_bus);
3859  		free(path, M_CAMXPT);
3860  		return (CAM_RESRC_UNAVAIL);
3861  	}
3862  
3863  	xpt_setup_ccb(&cpi.ccb_h, path, CAM_PRIORITY_NORMAL);
3864  	cpi.ccb_h.func_code = XPT_PATH_INQ;
3865  	xpt_action((union ccb *)&cpi);
3866  
3867  	if (cpi.ccb_h.status == CAM_REQ_CMP) {
3868  		switch (cpi.transport) {
3869  		case XPORT_SPI:
3870  		case XPORT_SAS:
3871  		case XPORT_FC:
3872  		case XPORT_USB:
3873  		case XPORT_ISCSI:
3874  		case XPORT_SRP:
3875  		case XPORT_PPB:
3876  			new_bus->xport = scsi_get_xport();
3877  			break;
3878  		case XPORT_ATA:
3879  		case XPORT_SATA:
3880  			new_bus->xport = ata_get_xport();
3881  			break;
3882  		default:
3883  			new_bus->xport = &xport_default;
3884  			break;
3885  		}
3886  	}
3887  
3888  	/* Notify interested parties */
3889  	if (sim->path_id != CAM_XPT_PATH_ID) {
3890  
3891  		xpt_async(AC_PATH_REGISTERED, path, &cpi);
3892  		if ((cpi.hba_misc & PIM_NOSCAN) == 0) {
3893  			union	ccb *scan_ccb;
3894  
3895  			/* Initiate bus rescan. */
3896  			scan_ccb = xpt_alloc_ccb_nowait();
3897  			if (scan_ccb != NULL) {
3898  				scan_ccb->ccb_h.path = path;
3899  				scan_ccb->ccb_h.func_code = XPT_SCAN_BUS;
3900  				scan_ccb->crcn.flags = 0;
3901  				xpt_rescan(scan_ccb);
3902  			} else {
3903  				xpt_print(path,
3904  					  "Can't allocate CCB to scan bus\n");
3905  				xpt_free_path(path);
3906  			}
3907  		} else
3908  			xpt_free_path(path);
3909  	} else
3910  		xpt_free_path(path);
3911  	return (CAM_SUCCESS);
3912  }
3913  
3914  int32_t
3915  xpt_bus_deregister(path_id_t pathid)
3916  {
3917  	struct cam_path bus_path;
3918  	cam_status status;
3919  
3920  	status = xpt_compile_path(&bus_path, NULL, pathid,
3921  				  CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
3922  	if (status != CAM_REQ_CMP)
3923  		return (status);
3924  
3925  	xpt_async(AC_LOST_DEVICE, &bus_path, NULL);
3926  	xpt_async(AC_PATH_DEREGISTERED, &bus_path, NULL);
3927  
3928  	/* Release the reference count held while registered. */
3929  	xpt_release_bus(bus_path.bus);
3930  	xpt_release_path(&bus_path);
3931  
3932  	return (CAM_REQ_CMP);
3933  }
3934  
3935  static path_id_t
3936  xptnextfreepathid(void)
3937  {
3938  	struct cam_eb *bus;
3939  	path_id_t pathid;
3940  	const char *strval;
3941  
3942  	mtx_assert(&xsoftc.xpt_topo_lock, MA_OWNED);
3943  	pathid = 0;
3944  	bus = TAILQ_FIRST(&xsoftc.xpt_busses);
3945  retry:
3946  	/* Find an unoccupied pathid */
3947  	while (bus != NULL && bus->path_id <= pathid) {
3948  		if (bus->path_id == pathid)
3949  			pathid++;
3950  		bus = TAILQ_NEXT(bus, links);
3951  	}
3952  
3953  	/*
3954  	 * Ensure that this pathid is not reserved for
3955  	 * a bus that may be registered in the future.
3956  	 */
3957  	if (resource_string_value("scbus", pathid, "at", &strval) == 0) {
3958  		++pathid;
3959  		/* Start the search over */
3960  		goto retry;
3961  	}
3962  	return (pathid);
3963  }
3964  
3965  static path_id_t
3966  xptpathid(const char *sim_name, int sim_unit, int sim_bus)
3967  {
3968  	path_id_t pathid;
3969  	int i, dunit, val;
3970  	char buf[32];
3971  	const char *dname;
3972  
3973  	pathid = CAM_XPT_PATH_ID;
3974  	snprintf(buf, sizeof(buf), "%s%d", sim_name, sim_unit);
3975  	if (strcmp(buf, "xpt0") == 0 && sim_bus == 0)
3976  		return (pathid);
3977  	i = 0;
3978  	while ((resource_find_match(&i, &dname, &dunit, "at", buf)) == 0) {
3979  		if (strcmp(dname, "scbus")) {
3980  			/* Avoid a bit of foot shooting. */
3981  			continue;
3982  		}
3983  		if (dunit < 0)		/* unwired?! */
3984  			continue;
3985  		if (resource_int_value("scbus", dunit, "bus", &val) == 0) {
3986  			if (sim_bus == val) {
3987  				pathid = dunit;
3988  				break;
3989  			}
3990  		} else if (sim_bus == 0) {
3991  			/* Unspecified matches bus 0 */
3992  			pathid = dunit;
3993  			break;
3994  		} else {
3995  			printf("Ambiguous scbus configuration for %s%d "
3996  			       "bus %d, cannot wire down.  The kernel "
3997  			       "config entry for scbus%d should "
3998  			       "specify a controller bus.\n"
3999  			       "Scbus will be assigned dynamically.\n",
4000  			       sim_name, sim_unit, sim_bus, dunit);
4001  			break;
4002  		}
4003  	}
4004  
4005  	if (pathid == CAM_XPT_PATH_ID)
4006  		pathid = xptnextfreepathid();
4007  	return (pathid);
4008  }
4009  
4010  static const char *
4011  xpt_async_string(u_int32_t async_code)
4012  {
4013  
4014  	switch (async_code) {
4015  	case AC_BUS_RESET: return ("AC_BUS_RESET");
4016  	case AC_UNSOL_RESEL: return ("AC_UNSOL_RESEL");
4017  	case AC_SCSI_AEN: return ("AC_SCSI_AEN");
4018  	case AC_SENT_BDR: return ("AC_SENT_BDR");
4019  	case AC_PATH_REGISTERED: return ("AC_PATH_REGISTERED");
4020  	case AC_PATH_DEREGISTERED: return ("AC_PATH_DEREGISTERED");
4021  	case AC_FOUND_DEVICE: return ("AC_FOUND_DEVICE");
4022  	case AC_LOST_DEVICE: return ("AC_LOST_DEVICE");
4023  	case AC_TRANSFER_NEG: return ("AC_TRANSFER_NEG");
4024  	case AC_INQ_CHANGED: return ("AC_INQ_CHANGED");
4025  	case AC_GETDEV_CHANGED: return ("AC_GETDEV_CHANGED");
4026  	case AC_CONTRACT: return ("AC_CONTRACT");
4027  	case AC_ADVINFO_CHANGED: return ("AC_ADVINFO_CHANGED");
4028  	case AC_UNIT_ATTENTION: return ("AC_UNIT_ATTENTION");
4029  	}
4030  	return ("AC_UNKNOWN");
4031  }
4032  
4033  static int
4034  xpt_async_size(u_int32_t async_code)
4035  {
4036  
4037  	switch (async_code) {
4038  	case AC_BUS_RESET: return (0);
4039  	case AC_UNSOL_RESEL: return (0);
4040  	case AC_SCSI_AEN: return (0);
4041  	case AC_SENT_BDR: return (0);
4042  	case AC_PATH_REGISTERED: return (sizeof(struct ccb_pathinq));
4043  	case AC_PATH_DEREGISTERED: return (0);
4044  	case AC_FOUND_DEVICE: return (sizeof(struct ccb_getdev));
4045  	case AC_LOST_DEVICE: return (0);
4046  	case AC_TRANSFER_NEG: return (sizeof(struct ccb_trans_settings));
4047  	case AC_INQ_CHANGED: return (0);
4048  	case AC_GETDEV_CHANGED: return (0);
4049  	case AC_CONTRACT: return (sizeof(struct ac_contract));
4050  	case AC_ADVINFO_CHANGED: return (-1);
4051  	case AC_UNIT_ATTENTION: return (sizeof(struct ccb_scsiio));
4052  	}
4053  	return (0);
4054  }
4055  
4056  static int
4057  xpt_async_process_dev(struct cam_ed *device, void *arg)
4058  {
4059  	union ccb *ccb = arg;
4060  	struct cam_path *path = ccb->ccb_h.path;
4061  	void *async_arg = ccb->casync.async_arg_ptr;
4062  	u_int32_t async_code = ccb->casync.async_code;
4063  	int relock;
4064  
4065  	if (path->device != device
4066  	 && path->device->lun_id != CAM_LUN_WILDCARD
4067  	 && device->lun_id != CAM_LUN_WILDCARD)
4068  		return (1);
4069  
4070  	/*
4071  	 * The async callback could free the device.
4072  	 * If it is a broadcast async, it doesn't hold
4073  	 * device reference, so take our own reference.
4074  	 */
4075  	xpt_acquire_device(device);
4076  
4077  	/*
4078  	 * If async for specific device is to be delivered to
4079  	 * the wildcard client, take the specific device lock.
4080  	 * XXX: We may need a way for client to specify it.
4081  	 */
4082  	if ((device->lun_id == CAM_LUN_WILDCARD &&
4083  	     path->device->lun_id != CAM_LUN_WILDCARD) ||
4084  	    (device->target->target_id == CAM_TARGET_WILDCARD &&
4085  	     path->target->target_id != CAM_TARGET_WILDCARD) ||
4086  	    (device->target->bus->path_id == CAM_BUS_WILDCARD &&
4087  	     path->target->bus->path_id != CAM_BUS_WILDCARD)) {
4088  		mtx_unlock(&device->device_mtx);
4089  		xpt_path_lock(path);
4090  		relock = 1;
4091  	} else
4092  		relock = 0;
4093  
4094  	(*(device->target->bus->xport->async))(async_code,
4095  	    device->target->bus, device->target, device, async_arg);
4096  	xpt_async_bcast(&device->asyncs, async_code, path, async_arg);
4097  
4098  	if (relock) {
4099  		xpt_path_unlock(path);
4100  		mtx_lock(&device->device_mtx);
4101  	}
4102  	xpt_release_device(device);
4103  	return (1);
4104  }
4105  
4106  static int
4107  xpt_async_process_tgt(struct cam_et *target, void *arg)
4108  {
4109  	union ccb *ccb = arg;
4110  	struct cam_path *path = ccb->ccb_h.path;
4111  
4112  	if (path->target != target
4113  	 && path->target->target_id != CAM_TARGET_WILDCARD
4114  	 && target->target_id != CAM_TARGET_WILDCARD)
4115  		return (1);
4116  
4117  	if (ccb->casync.async_code == AC_SENT_BDR) {
4118  		/* Update our notion of when the last reset occurred */
4119  		microtime(&target->last_reset);
4120  	}
4121  
4122  	return (xptdevicetraverse(target, NULL, xpt_async_process_dev, ccb));
4123  }
4124  
4125  static void
4126  xpt_async_process(struct cam_periph *periph, union ccb *ccb)
4127  {
4128  	struct cam_eb *bus;
4129  	struct cam_path *path;
4130  	void *async_arg;
4131  	u_int32_t async_code;
4132  
4133  	path = ccb->ccb_h.path;
4134  	async_code = ccb->casync.async_code;
4135  	async_arg = ccb->casync.async_arg_ptr;
4136  	CAM_DEBUG(path, CAM_DEBUG_TRACE | CAM_DEBUG_INFO,
4137  	    ("xpt_async(%s)\n", xpt_async_string(async_code)));
4138  	bus = path->bus;
4139  
4140  	if (async_code == AC_BUS_RESET) {
4141  		/* Update our notion of when the last reset occurred */
4142  		microtime(&bus->last_reset);
4143  	}
4144  
4145  	xpttargettraverse(bus, NULL, xpt_async_process_tgt, ccb);
4146  
4147  	/*
4148  	 * If this wasn't a fully wildcarded async, tell all
4149  	 * clients that want all async events.
4150  	 */
4151  	if (bus != xpt_periph->path->bus) {
4152  		xpt_path_lock(xpt_periph->path);
4153  		xpt_async_process_dev(xpt_periph->path->device, ccb);
4154  		xpt_path_unlock(xpt_periph->path);
4155  	}
4156  
4157  	if (path->device != NULL && path->device->lun_id != CAM_LUN_WILDCARD)
4158  		xpt_release_devq(path, 1, TRUE);
4159  	else
4160  		xpt_release_simq(path->bus->sim, TRUE);
4161  	if (ccb->casync.async_arg_size > 0)
4162  		free(async_arg, M_CAMXPT);
4163  	xpt_free_path(path);
4164  	xpt_free_ccb(ccb);
4165  }
4166  
4167  static void
4168  xpt_async_bcast(struct async_list *async_head,
4169  		u_int32_t async_code,
4170  		struct cam_path *path, void *async_arg)
4171  {
4172  	struct async_node *cur_entry;
4173  	int lock;
4174  
4175  	cur_entry = SLIST_FIRST(async_head);
4176  	while (cur_entry != NULL) {
4177  		struct async_node *next_entry;
4178  		/*
4179  		 * Grab the next list entry before we call the current
4180  		 * entry's callback.  This is because the callback function
4181  		 * can delete its async callback entry.
4182  		 */
4183  		next_entry = SLIST_NEXT(cur_entry, links);
4184  		if ((cur_entry->event_enable & async_code) != 0) {
4185  			lock = cur_entry->event_lock;
4186  			if (lock)
4187  				CAM_SIM_LOCK(path->device->sim);
4188  			cur_entry->callback(cur_entry->callback_arg,
4189  					    async_code, path,
4190  					    async_arg);
4191  			if (lock)
4192  				CAM_SIM_UNLOCK(path->device->sim);
4193  		}
4194  		cur_entry = next_entry;
4195  	}
4196  }
4197  
4198  void
4199  xpt_async(u_int32_t async_code, struct cam_path *path, void *async_arg)
4200  {
4201  	union ccb *ccb;
4202  	int size;
4203  
4204  	ccb = xpt_alloc_ccb_nowait();
4205  	if (ccb == NULL) {
4206  		xpt_print(path, "Can't allocate CCB to send %s\n",
4207  		    xpt_async_string(async_code));
4208  		return;
4209  	}
4210  
4211  	if (xpt_clone_path(&ccb->ccb_h.path, path) != CAM_REQ_CMP) {
4212  		xpt_print(path, "Can't allocate path to send %s\n",
4213  		    xpt_async_string(async_code));
4214  		xpt_free_ccb(ccb);
4215  		return;
4216  	}
4217  	ccb->ccb_h.path->periph = NULL;
4218  	ccb->ccb_h.func_code = XPT_ASYNC;
4219  	ccb->ccb_h.cbfcnp = xpt_async_process;
4220  	ccb->ccb_h.flags |= CAM_UNLOCKED;
4221  	ccb->casync.async_code = async_code;
4222  	ccb->casync.async_arg_size = 0;
4223  	size = xpt_async_size(async_code);
4224  	if (size > 0 && async_arg != NULL) {
4225  		ccb->casync.async_arg_ptr = malloc(size, M_CAMXPT, M_NOWAIT);
4226  		if (ccb->casync.async_arg_ptr == NULL) {
4227  			xpt_print(path, "Can't allocate argument to send %s\n",
4228  			    xpt_async_string(async_code));
4229  			xpt_free_path(ccb->ccb_h.path);
4230  			xpt_free_ccb(ccb);
4231  			return;
4232  		}
4233  		memcpy(ccb->casync.async_arg_ptr, async_arg, size);
4234  		ccb->casync.async_arg_size = size;
4235  	} else if (size < 0) {
4236  		ccb->casync.async_arg_ptr = async_arg;
4237  		ccb->casync.async_arg_size = size;
4238  	}
4239  	if (path->device != NULL && path->device->lun_id != CAM_LUN_WILDCARD)
4240  		xpt_freeze_devq(path, 1);
4241  	else
4242  		xpt_freeze_simq(path->bus->sim, 1);
4243  	xpt_done(ccb);
4244  }
4245  
4246  static void
4247  xpt_dev_async_default(u_int32_t async_code, struct cam_eb *bus,
4248  		      struct cam_et *target, struct cam_ed *device,
4249  		      void *async_arg)
4250  {
4251  
4252  	/*
4253  	 * We only need to handle events for real devices.
4254  	 */
4255  	if (target->target_id == CAM_TARGET_WILDCARD
4256  	 || device->lun_id == CAM_LUN_WILDCARD)
4257  		return;
4258  
4259  	printf("%s called\n", __func__);
4260  }
4261  
4262  static uint32_t
4263  xpt_freeze_devq_device(struct cam_ed *dev, u_int count)
4264  {
4265  	struct cam_devq	*devq;
4266  	uint32_t freeze;
4267  
4268  	devq = dev->sim->devq;
4269  	mtx_assert(&devq->send_mtx, MA_OWNED);
4270  	CAM_DEBUG_DEV(dev, CAM_DEBUG_TRACE,
4271  	    ("xpt_freeze_devq_device(%d) %u->%u\n", count,
4272  	    dev->ccbq.queue.qfrozen_cnt, dev->ccbq.queue.qfrozen_cnt + count));
4273  	freeze = (dev->ccbq.queue.qfrozen_cnt += count);
4274  	/* Remove frozen device from sendq. */
4275  	if (device_is_queued(dev))
4276  		camq_remove(&devq->send_queue, dev->devq_entry.index);
4277  	return (freeze);
4278  }
4279  
4280  u_int32_t
4281  xpt_freeze_devq(struct cam_path *path, u_int count)
4282  {
4283  	struct cam_ed	*dev = path->device;
4284  	struct cam_devq	*devq;
4285  	uint32_t	 freeze;
4286  
4287  	devq = dev->sim->devq;
4288  	mtx_lock(&devq->send_mtx);
4289  	CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_freeze_devq(%d)\n", count));
4290  	freeze = xpt_freeze_devq_device(dev, count);
4291  	mtx_unlock(&devq->send_mtx);
4292  	return (freeze);
4293  }
4294  
4295  u_int32_t
4296  xpt_freeze_simq(struct cam_sim *sim, u_int count)
4297  {
4298  	struct cam_devq	*devq;
4299  	uint32_t	 freeze;
4300  
4301  	devq = sim->devq;
4302  	mtx_lock(&devq->send_mtx);
4303  	freeze = (devq->send_queue.qfrozen_cnt += count);
4304  	mtx_unlock(&devq->send_mtx);
4305  	return (freeze);
4306  }
4307  
4308  static void
4309  xpt_release_devq_timeout(void *arg)
4310  {
4311  	struct cam_ed *dev;
4312  	struct cam_devq *devq;
4313  
4314  	dev = (struct cam_ed *)arg;
4315  	CAM_DEBUG_DEV(dev, CAM_DEBUG_TRACE, ("xpt_release_devq_timeout\n"));
4316  	devq = dev->sim->devq;
4317  	mtx_assert(&devq->send_mtx, MA_OWNED);
4318  	if (xpt_release_devq_device(dev, /*count*/1, /*run_queue*/TRUE))
4319  		xpt_run_devq(devq);
4320  }
4321  
4322  void
4323  xpt_release_devq(struct cam_path *path, u_int count, int run_queue)
4324  {
4325  	struct cam_ed *dev;
4326  	struct cam_devq *devq;
4327  
4328  	CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_release_devq(%d, %d)\n",
4329  	    count, run_queue));
4330  	dev = path->device;
4331  	devq = dev->sim->devq;
4332  	mtx_lock(&devq->send_mtx);
4333  	if (xpt_release_devq_device(dev, count, run_queue))
4334  		xpt_run_devq(dev->sim->devq);
4335  	mtx_unlock(&devq->send_mtx);
4336  }
4337  
4338  static int
4339  xpt_release_devq_device(struct cam_ed *dev, u_int count, int run_queue)
4340  {
4341  
4342  	mtx_assert(&dev->sim->devq->send_mtx, MA_OWNED);
4343  	CAM_DEBUG_DEV(dev, CAM_DEBUG_TRACE,
4344  	    ("xpt_release_devq_device(%d, %d) %u->%u\n", count, run_queue,
4345  	    dev->ccbq.queue.qfrozen_cnt, dev->ccbq.queue.qfrozen_cnt - count));
4346  	if (count > dev->ccbq.queue.qfrozen_cnt) {
4347  #ifdef INVARIANTS
4348  		printf("xpt_release_devq(): requested %u > present %u\n",
4349  		    count, dev->ccbq.queue.qfrozen_cnt);
4350  #endif
4351  		count = dev->ccbq.queue.qfrozen_cnt;
4352  	}
4353  	dev->ccbq.queue.qfrozen_cnt -= count;
4354  	if (dev->ccbq.queue.qfrozen_cnt == 0) {
4355  		/*
4356  		 * No longer need to wait for a successful
4357  		 * command completion.
4358  		 */
4359  		dev->flags &= ~CAM_DEV_REL_ON_COMPLETE;
4360  		/*
4361  		 * Remove any timeouts that might be scheduled
4362  		 * to release this queue.
4363  		 */
4364  		if ((dev->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) {
4365  			callout_stop(&dev->callout);
4366  			dev->flags &= ~CAM_DEV_REL_TIMEOUT_PENDING;
4367  		}
4368  		/*
4369  		 * Now that we are unfrozen schedule the
4370  		 * device so any pending transactions are
4371  		 * run.
4372  		 */
4373  		xpt_schedule_devq(dev->sim->devq, dev);
4374  	} else
4375  		run_queue = 0;
4376  	return (run_queue);
4377  }
4378  
4379  void
4380  xpt_release_simq(struct cam_sim *sim, int run_queue)
4381  {
4382  	struct cam_devq	*devq;
4383  
4384  	devq = sim->devq;
4385  	mtx_lock(&devq->send_mtx);
4386  	if (devq->send_queue.qfrozen_cnt <= 0) {
4387  #ifdef INVARIANTS
4388  		printf("xpt_release_simq: requested 1 > present %u\n",
4389  		    devq->send_queue.qfrozen_cnt);
4390  #endif
4391  	} else
4392  		devq->send_queue.qfrozen_cnt--;
4393  	if (devq->send_queue.qfrozen_cnt == 0) {
4394  		/*
4395  		 * If there is a timeout scheduled to release this
4396  		 * sim queue, remove it.  The queue frozen count is
4397  		 * already at 0.
4398  		 */
4399  		if ((sim->flags & CAM_SIM_REL_TIMEOUT_PENDING) != 0){
4400  			callout_stop(&sim->callout);
4401  			sim->flags &= ~CAM_SIM_REL_TIMEOUT_PENDING;
4402  		}
4403  		if (run_queue) {
4404  			/*
4405  			 * Now that we are unfrozen run the send queue.
4406  			 */
4407  			xpt_run_devq(sim->devq);
4408  		}
4409  	}
4410  	mtx_unlock(&devq->send_mtx);
4411  }
4412  
4413  /*
4414   * XXX Appears to be unused.
4415   */
4416  static void
4417  xpt_release_simq_timeout(void *arg)
4418  {
4419  	struct cam_sim *sim;
4420  
4421  	sim = (struct cam_sim *)arg;
4422  	xpt_release_simq(sim, /* run_queue */ TRUE);
4423  }
4424  
4425  void
4426  xpt_done(union ccb *done_ccb)
4427  {
4428  	struct cam_doneq *queue;
4429  	int	run, hash;
4430  
4431  	CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xpt_done\n"));
4432  	if ((done_ccb->ccb_h.func_code & XPT_FC_QUEUED) == 0)
4433  		return;
4434  
4435  	hash = (done_ccb->ccb_h.path_id + done_ccb->ccb_h.target_id +
4436  	    done_ccb->ccb_h.target_lun) % cam_num_doneqs;
4437  	queue = &cam_doneqs[hash];
4438  	mtx_lock(&queue->cam_doneq_mtx);
4439  	run = (queue->cam_doneq_sleep && STAILQ_EMPTY(&queue->cam_doneq));
4440  	STAILQ_INSERT_TAIL(&queue->cam_doneq, &done_ccb->ccb_h, sim_links.stqe);
4441  	done_ccb->ccb_h.pinfo.index = CAM_DONEQ_INDEX;
4442  	mtx_unlock(&queue->cam_doneq_mtx);
4443  	if (run)
4444  		wakeup(&queue->cam_doneq);
4445  }
4446  
4447  void
4448  xpt_done_direct(union ccb *done_ccb)
4449  {
4450  
4451  	CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xpt_done_direct\n"));
4452  	if ((done_ccb->ccb_h.func_code & XPT_FC_QUEUED) == 0)
4453  		return;
4454  
4455  	xpt_done_process(&done_ccb->ccb_h);
4456  }
4457  
4458  union ccb *
4459  xpt_alloc_ccb()
4460  {
4461  	union ccb *new_ccb;
4462  
4463  	new_ccb = malloc(sizeof(*new_ccb), M_CAMCCB, M_ZERO|M_WAITOK);
4464  	return (new_ccb);
4465  }
4466  
4467  union ccb *
4468  xpt_alloc_ccb_nowait()
4469  {
4470  	union ccb *new_ccb;
4471  
4472  	new_ccb = malloc(sizeof(*new_ccb), M_CAMCCB, M_ZERO|M_NOWAIT);
4473  	return (new_ccb);
4474  }
4475  
4476  void
4477  xpt_free_ccb(union ccb *free_ccb)
4478  {
4479  	free(free_ccb, M_CAMCCB);
4480  }
4481  
4482  
4483  
4484  /* Private XPT functions */
4485  
4486  /*
4487   * Get a CAM control block for the caller. Charge the structure to the device
4488   * referenced by the path.  If we don't have sufficient resources to allocate
4489   * more ccbs, we return NULL.
4490   */
4491  static union ccb *
4492  xpt_get_ccb_nowait(struct cam_periph *periph)
4493  {
4494  	union ccb *new_ccb;
4495  
4496  	new_ccb = malloc(sizeof(*new_ccb), M_CAMCCB, M_ZERO|M_NOWAIT);
4497  	if (new_ccb == NULL)
4498  		return (NULL);
4499  	periph->periph_allocated++;
4500  	cam_ccbq_take_opening(&periph->path->device->ccbq);
4501  	return (new_ccb);
4502  }
4503  
4504  static union ccb *
4505  xpt_get_ccb(struct cam_periph *periph)
4506  {
4507  	union ccb *new_ccb;
4508  
4509  	cam_periph_unlock(periph);
4510  	new_ccb = malloc(sizeof(*new_ccb), M_CAMCCB, M_ZERO|M_WAITOK);
4511  	cam_periph_lock(periph);
4512  	periph->periph_allocated++;
4513  	cam_ccbq_take_opening(&periph->path->device->ccbq);
4514  	return (new_ccb);
4515  }
4516  
4517  union ccb *
4518  cam_periph_getccb(struct cam_periph *periph, u_int32_t priority)
4519  {
4520  	struct ccb_hdr *ccb_h;
4521  
4522  	CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("cam_periph_getccb\n"));
4523  	cam_periph_assert(periph, MA_OWNED);
4524  	while ((ccb_h = SLIST_FIRST(&periph->ccb_list)) == NULL ||
4525  	    ccb_h->pinfo.priority != priority) {
4526  		if (priority < periph->immediate_priority) {
4527  			periph->immediate_priority = priority;
4528  			xpt_run_allocq(periph, 0);
4529  		} else
4530  			cam_periph_sleep(periph, &periph->ccb_list, PRIBIO,
4531  			    "cgticb", 0);
4532  	}
4533  	SLIST_REMOVE_HEAD(&periph->ccb_list, periph_links.sle);
4534  	return ((union ccb *)ccb_h);
4535  }
4536  
4537  static void
4538  xpt_acquire_bus(struct cam_eb *bus)
4539  {
4540  
4541  	xpt_lock_buses();
4542  	bus->refcount++;
4543  	xpt_unlock_buses();
4544  }
4545  
4546  static void
4547  xpt_release_bus(struct cam_eb *bus)
4548  {
4549  
4550  	xpt_lock_buses();
4551  	KASSERT(bus->refcount >= 1, ("bus->refcount >= 1"));
4552  	if (--bus->refcount > 0) {
4553  		xpt_unlock_buses();
4554  		return;
4555  	}
4556  	TAILQ_REMOVE(&xsoftc.xpt_busses, bus, links);
4557  	xsoftc.bus_generation++;
4558  	xpt_unlock_buses();
4559  	KASSERT(TAILQ_EMPTY(&bus->et_entries),
4560  	    ("destroying bus, but target list is not empty"));
4561  	cam_sim_release(bus->sim);
4562  	mtx_destroy(&bus->eb_mtx);
4563  	free(bus, M_CAMXPT);
4564  }
4565  
4566  static struct cam_et *
4567  xpt_alloc_target(struct cam_eb *bus, target_id_t target_id)
4568  {
4569  	struct cam_et *cur_target, *target;
4570  
4571  	mtx_assert(&xsoftc.xpt_topo_lock, MA_OWNED);
4572  	mtx_assert(&bus->eb_mtx, MA_OWNED);
4573  	target = (struct cam_et *)malloc(sizeof(*target), M_CAMXPT,
4574  					 M_NOWAIT|M_ZERO);
4575  	if (target == NULL)
4576  		return (NULL);
4577  
4578  	TAILQ_INIT(&target->ed_entries);
4579  	target->bus = bus;
4580  	target->target_id = target_id;
4581  	target->refcount = 1;
4582  	target->generation = 0;
4583  	target->luns = NULL;
4584  	mtx_init(&target->luns_mtx, "CAM LUNs lock", NULL, MTX_DEF);
4585  	timevalclear(&target->last_reset);
4586  	/*
4587  	 * Hold a reference to our parent bus so it
4588  	 * will not go away before we do.
4589  	 */
4590  	bus->refcount++;
4591  
4592  	/* Insertion sort into our bus's target list */
4593  	cur_target = TAILQ_FIRST(&bus->et_entries);
4594  	while (cur_target != NULL && cur_target->target_id < target_id)
4595  		cur_target = TAILQ_NEXT(cur_target, links);
4596  	if (cur_target != NULL) {
4597  		TAILQ_INSERT_BEFORE(cur_target, target, links);
4598  	} else {
4599  		TAILQ_INSERT_TAIL(&bus->et_entries, target, links);
4600  	}
4601  	bus->generation++;
4602  	return (target);
4603  }
4604  
4605  static void
4606  xpt_acquire_target(struct cam_et *target)
4607  {
4608  	struct cam_eb *bus = target->bus;
4609  
4610  	mtx_lock(&bus->eb_mtx);
4611  	target->refcount++;
4612  	mtx_unlock(&bus->eb_mtx);
4613  }
4614  
4615  static void
4616  xpt_release_target(struct cam_et *target)
4617  {
4618  	struct cam_eb *bus = target->bus;
4619  
4620  	mtx_lock(&bus->eb_mtx);
4621  	if (--target->refcount > 0) {
4622  		mtx_unlock(&bus->eb_mtx);
4623  		return;
4624  	}
4625  	TAILQ_REMOVE(&bus->et_entries, target, links);
4626  	bus->generation++;
4627  	mtx_unlock(&bus->eb_mtx);
4628  	KASSERT(TAILQ_EMPTY(&target->ed_entries),
4629  	    ("destroying target, but device list is not empty"));
4630  	xpt_release_bus(bus);
4631  	mtx_destroy(&target->luns_mtx);
4632  	if (target->luns)
4633  		free(target->luns, M_CAMXPT);
4634  	free(target, M_CAMXPT);
4635  }
4636  
4637  static struct cam_ed *
4638  xpt_alloc_device_default(struct cam_eb *bus, struct cam_et *target,
4639  			 lun_id_t lun_id)
4640  {
4641  	struct cam_ed *device;
4642  
4643  	device = xpt_alloc_device(bus, target, lun_id);
4644  	if (device == NULL)
4645  		return (NULL);
4646  
4647  	device->mintags = 1;
4648  	device->maxtags = 1;
4649  	return (device);
4650  }
4651  
4652  static void
4653  xpt_destroy_device(void *context, int pending)
4654  {
4655  	struct cam_ed	*device = context;
4656  
4657  	mtx_lock(&device->device_mtx);
4658  	mtx_destroy(&device->device_mtx);
4659  	free(device, M_CAMDEV);
4660  }
4661  
4662  struct cam_ed *
4663  xpt_alloc_device(struct cam_eb *bus, struct cam_et *target, lun_id_t lun_id)
4664  {
4665  	struct cam_ed	*cur_device, *device;
4666  	struct cam_devq	*devq;
4667  	cam_status status;
4668  
4669  	mtx_assert(&bus->eb_mtx, MA_OWNED);
4670  	/* Make space for us in the device queue on our bus */
4671  	devq = bus->sim->devq;
4672  	mtx_lock(&devq->send_mtx);
4673  	status = cam_devq_resize(devq, devq->send_queue.array_size + 1);
4674  	mtx_unlock(&devq->send_mtx);
4675  	if (status != CAM_REQ_CMP)
4676  		return (NULL);
4677  
4678  	device = (struct cam_ed *)malloc(sizeof(*device),
4679  					 M_CAMDEV, M_NOWAIT|M_ZERO);
4680  	if (device == NULL)
4681  		return (NULL);
4682  
4683  	cam_init_pinfo(&device->devq_entry);
4684  	device->target = target;
4685  	device->lun_id = lun_id;
4686  	device->sim = bus->sim;
4687  	if (cam_ccbq_init(&device->ccbq,
4688  			  bus->sim->max_dev_openings) != 0) {
4689  		free(device, M_CAMDEV);
4690  		return (NULL);
4691  	}
4692  	SLIST_INIT(&device->asyncs);
4693  	SLIST_INIT(&device->periphs);
4694  	device->generation = 0;
4695  	device->flags = CAM_DEV_UNCONFIGURED;
4696  	device->tag_delay_count = 0;
4697  	device->tag_saved_openings = 0;
4698  	device->refcount = 1;
4699  	mtx_init(&device->device_mtx, "CAM device lock", NULL, MTX_DEF);
4700  	callout_init_mtx(&device->callout, &devq->send_mtx, 0);
4701  	TASK_INIT(&device->device_destroy_task, 0, xpt_destroy_device, device);
4702  	/*
4703  	 * Hold a reference to our parent bus so it
4704  	 * will not go away before we do.
4705  	 */
4706  	target->refcount++;
4707  
4708  	cur_device = TAILQ_FIRST(&target->ed_entries);
4709  	while (cur_device != NULL && cur_device->lun_id < lun_id)
4710  		cur_device = TAILQ_NEXT(cur_device, links);
4711  	if (cur_device != NULL)
4712  		TAILQ_INSERT_BEFORE(cur_device, device, links);
4713  	else
4714  		TAILQ_INSERT_TAIL(&target->ed_entries, device, links);
4715  	target->generation++;
4716  	return (device);
4717  }
4718  
4719  void
4720  xpt_acquire_device(struct cam_ed *device)
4721  {
4722  	struct cam_eb *bus = device->target->bus;
4723  
4724  	mtx_lock(&bus->eb_mtx);
4725  	device->refcount++;
4726  	mtx_unlock(&bus->eb_mtx);
4727  }
4728  
4729  void
4730  xpt_release_device(struct cam_ed *device)
4731  {
4732  	struct cam_eb *bus = device->target->bus;
4733  	struct cam_devq *devq;
4734  
4735  	mtx_lock(&bus->eb_mtx);
4736  	if (--device->refcount > 0) {
4737  		mtx_unlock(&bus->eb_mtx);
4738  		return;
4739  	}
4740  
4741  	TAILQ_REMOVE(&device->target->ed_entries, device,links);
4742  	device->target->generation++;
4743  	mtx_unlock(&bus->eb_mtx);
4744  
4745  	/* Release our slot in the devq */
4746  	devq = bus->sim->devq;
4747  	mtx_lock(&devq->send_mtx);
4748  	cam_devq_resize(devq, devq->send_queue.array_size - 1);
4749  	mtx_unlock(&devq->send_mtx);
4750  
4751  	KASSERT(SLIST_EMPTY(&device->periphs),
4752  	    ("destroying device, but periphs list is not empty"));
4753  	KASSERT(device->devq_entry.index == CAM_UNQUEUED_INDEX,
4754  	    ("destroying device while still queued for ccbs"));
4755  
4756  	if ((device->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0)
4757  		callout_stop(&device->callout);
4758  
4759  	xpt_release_target(device->target);
4760  
4761  	cam_ccbq_fini(&device->ccbq);
4762  	/*
4763  	 * Free allocated memory.  free(9) does nothing if the
4764  	 * supplied pointer is NULL, so it is safe to call without
4765  	 * checking.
4766  	 */
4767  	free(device->supported_vpds, M_CAMXPT);
4768  	free(device->device_id, M_CAMXPT);
4769  	free(device->ext_inq, M_CAMXPT);
4770  	free(device->physpath, M_CAMXPT);
4771  	free(device->rcap_buf, M_CAMXPT);
4772  	free(device->serial_num, M_CAMXPT);
4773  	taskqueue_enqueue(xsoftc.xpt_taskq, &device->device_destroy_task);
4774  }
4775  
4776  u_int32_t
4777  xpt_dev_ccbq_resize(struct cam_path *path, int newopenings)
4778  {
4779  	int	result;
4780  	struct	cam_ed *dev;
4781  
4782  	dev = path->device;
4783  	mtx_lock(&dev->sim->devq->send_mtx);
4784  	result = cam_ccbq_resize(&dev->ccbq, newopenings);
4785  	mtx_unlock(&dev->sim->devq->send_mtx);
4786  	if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
4787  	 || (dev->inq_flags & SID_CmdQue) != 0)
4788  		dev->tag_saved_openings = newopenings;
4789  	return (result);
4790  }
4791  
4792  static struct cam_eb *
4793  xpt_find_bus(path_id_t path_id)
4794  {
4795  	struct cam_eb *bus;
4796  
4797  	xpt_lock_buses();
4798  	for (bus = TAILQ_FIRST(&xsoftc.xpt_busses);
4799  	     bus != NULL;
4800  	     bus = TAILQ_NEXT(bus, links)) {
4801  		if (bus->path_id == path_id) {
4802  			bus->refcount++;
4803  			break;
4804  		}
4805  	}
4806  	xpt_unlock_buses();
4807  	return (bus);
4808  }
4809  
4810  static struct cam_et *
4811  xpt_find_target(struct cam_eb *bus, target_id_t	target_id)
4812  {
4813  	struct cam_et *target;
4814  
4815  	mtx_assert(&bus->eb_mtx, MA_OWNED);
4816  	for (target = TAILQ_FIRST(&bus->et_entries);
4817  	     target != NULL;
4818  	     target = TAILQ_NEXT(target, links)) {
4819  		if (target->target_id == target_id) {
4820  			target->refcount++;
4821  			break;
4822  		}
4823  	}
4824  	return (target);
4825  }
4826  
4827  static struct cam_ed *
4828  xpt_find_device(struct cam_et *target, lun_id_t lun_id)
4829  {
4830  	struct cam_ed *device;
4831  
4832  	mtx_assert(&target->bus->eb_mtx, MA_OWNED);
4833  	for (device = TAILQ_FIRST(&target->ed_entries);
4834  	     device != NULL;
4835  	     device = TAILQ_NEXT(device, links)) {
4836  		if (device->lun_id == lun_id) {
4837  			device->refcount++;
4838  			break;
4839  		}
4840  	}
4841  	return (device);
4842  }
4843  
4844  void
4845  xpt_start_tags(struct cam_path *path)
4846  {
4847  	struct ccb_relsim crs;
4848  	struct cam_ed *device;
4849  	struct cam_sim *sim;
4850  	int    newopenings;
4851  
4852  	device = path->device;
4853  	sim = path->bus->sim;
4854  	device->flags &= ~CAM_DEV_TAG_AFTER_COUNT;
4855  	xpt_freeze_devq(path, /*count*/1);
4856  	device->inq_flags |= SID_CmdQue;
4857  	if (device->tag_saved_openings != 0)
4858  		newopenings = device->tag_saved_openings;
4859  	else
4860  		newopenings = min(device->maxtags,
4861  				  sim->max_tagged_dev_openings);
4862  	xpt_dev_ccbq_resize(path, newopenings);
4863  	xpt_async(AC_GETDEV_CHANGED, path, NULL);
4864  	xpt_setup_ccb(&crs.ccb_h, path, CAM_PRIORITY_NORMAL);
4865  	crs.ccb_h.func_code = XPT_REL_SIMQ;
4866  	crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY;
4867  	crs.openings
4868  	    = crs.release_timeout
4869  	    = crs.qfrozen_cnt
4870  	    = 0;
4871  	xpt_action((union ccb *)&crs);
4872  }
4873  
4874  void
4875  xpt_stop_tags(struct cam_path *path)
4876  {
4877  	struct ccb_relsim crs;
4878  	struct cam_ed *device;
4879  	struct cam_sim *sim;
4880  
4881  	device = path->device;
4882  	sim = path->bus->sim;
4883  	device->flags &= ~CAM_DEV_TAG_AFTER_COUNT;
4884  	device->tag_delay_count = 0;
4885  	xpt_freeze_devq(path, /*count*/1);
4886  	device->inq_flags &= ~SID_CmdQue;
4887  	xpt_dev_ccbq_resize(path, sim->max_dev_openings);
4888  	xpt_async(AC_GETDEV_CHANGED, path, NULL);
4889  	xpt_setup_ccb(&crs.ccb_h, path, CAM_PRIORITY_NORMAL);
4890  	crs.ccb_h.func_code = XPT_REL_SIMQ;
4891  	crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY;
4892  	crs.openings
4893  	    = crs.release_timeout
4894  	    = crs.qfrozen_cnt
4895  	    = 0;
4896  	xpt_action((union ccb *)&crs);
4897  }
4898  
4899  static void
4900  xpt_boot_delay(void *arg)
4901  {
4902  
4903  	xpt_release_boot();
4904  }
4905  
4906  static void
4907  xpt_config(void *arg)
4908  {
4909  	/*
4910  	 * Now that interrupts are enabled, go find our devices
4911  	 */
4912  	if (taskqueue_start_threads(&xsoftc.xpt_taskq, 1, PRIBIO, "CAM taskq"))
4913  		printf("xpt_config: failed to create taskqueue thread.\n");
4914  
4915  	/* Setup debugging path */
4916  	if (cam_dflags != CAM_DEBUG_NONE) {
4917  		if (xpt_create_path(&cam_dpath, NULL,
4918  				    CAM_DEBUG_BUS, CAM_DEBUG_TARGET,
4919  				    CAM_DEBUG_LUN) != CAM_REQ_CMP) {
4920  			printf("xpt_config: xpt_create_path() failed for debug"
4921  			       " target %d:%d:%d, debugging disabled\n",
4922  			       CAM_DEBUG_BUS, CAM_DEBUG_TARGET, CAM_DEBUG_LUN);
4923  			cam_dflags = CAM_DEBUG_NONE;
4924  		}
4925  	} else
4926  		cam_dpath = NULL;
4927  
4928  	periphdriver_init(1);
4929  	xpt_hold_boot();
4930  	callout_init(&xsoftc.boot_callout, 1);
4931  	callout_reset_sbt(&xsoftc.boot_callout, SBT_1MS * xsoftc.boot_delay, 0,
4932  	    xpt_boot_delay, NULL, 0);
4933  	/* Fire up rescan thread. */
4934  	if (kproc_kthread_add(xpt_scanner_thread, NULL, &cam_proc, NULL, 0, 0,
4935  	    "cam", "scanner")) {
4936  		printf("xpt_config: failed to create rescan thread.\n");
4937  	}
4938  }
4939  
4940  void
4941  xpt_hold_boot(void)
4942  {
4943  	xpt_lock_buses();
4944  	xsoftc.buses_to_config++;
4945  	xpt_unlock_buses();
4946  }
4947  
4948  void
4949  xpt_release_boot(void)
4950  {
4951  	xpt_lock_buses();
4952  	xsoftc.buses_to_config--;
4953  	if (xsoftc.buses_to_config == 0 && xsoftc.buses_config_done == 0) {
4954  		struct	xpt_task *task;
4955  
4956  		xsoftc.buses_config_done = 1;
4957  		xpt_unlock_buses();
4958  		/* Call manually because we don't have any busses */
4959  		task = malloc(sizeof(struct xpt_task), M_CAMXPT, M_NOWAIT);
4960  		if (task != NULL) {
4961  			TASK_INIT(&task->task, 0, xpt_finishconfig_task, task);
4962  			taskqueue_enqueue(taskqueue_thread, &task->task);
4963  		}
4964  	} else
4965  		xpt_unlock_buses();
4966  }
4967  
4968  /*
4969   * If the given device only has one peripheral attached to it, and if that
4970   * peripheral is the passthrough driver, announce it.  This insures that the
4971   * user sees some sort of announcement for every peripheral in their system.
4972   */
4973  static int
4974  xptpassannouncefunc(struct cam_ed *device, void *arg)
4975  {
4976  	struct cam_periph *periph;
4977  	int i;
4978  
4979  	for (periph = SLIST_FIRST(&device->periphs), i = 0; periph != NULL;
4980  	     periph = SLIST_NEXT(periph, periph_links), i++);
4981  
4982  	periph = SLIST_FIRST(&device->periphs);
4983  	if ((i == 1)
4984  	 && (strncmp(periph->periph_name, "pass", 4) == 0))
4985  		xpt_announce_periph(periph, NULL);
4986  
4987  	return(1);
4988  }
4989  
4990  static void
4991  xpt_finishconfig_task(void *context, int pending)
4992  {
4993  
4994  	periphdriver_init(2);
4995  	/*
4996  	 * Check for devices with no "standard" peripheral driver
4997  	 * attached.  For any devices like that, announce the
4998  	 * passthrough driver so the user will see something.
4999  	 */
5000  	if (!bootverbose)
5001  		xpt_for_all_devices(xptpassannouncefunc, NULL);
5002  
5003  	/* Release our hook so that the boot can continue. */
5004  	config_intrhook_disestablish(xsoftc.xpt_config_hook);
5005  	free(xsoftc.xpt_config_hook, M_CAMXPT);
5006  	xsoftc.xpt_config_hook = NULL;
5007  
5008  	free(context, M_CAMXPT);
5009  }
5010  
5011  cam_status
5012  xpt_register_async(int event, ac_callback_t *cbfunc, void *cbarg,
5013  		   struct cam_path *path)
5014  {
5015  	struct ccb_setasync csa;
5016  	cam_status status;
5017  	int xptpath = 0;
5018  
5019  	if (path == NULL) {
5020  		status = xpt_create_path(&path, /*periph*/NULL, CAM_XPT_PATH_ID,
5021  					 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
5022  		if (status != CAM_REQ_CMP)
5023  			return (status);
5024  		xpt_path_lock(path);
5025  		xptpath = 1;
5026  	}
5027  
5028  	xpt_setup_ccb(&csa.ccb_h, path, CAM_PRIORITY_NORMAL);
5029  	csa.ccb_h.func_code = XPT_SASYNC_CB;
5030  	csa.event_enable = event;
5031  	csa.callback = cbfunc;
5032  	csa.callback_arg = cbarg;
5033  	xpt_action((union ccb *)&csa);
5034  	status = csa.ccb_h.status;
5035  
5036  	if (xptpath) {
5037  		xpt_path_unlock(path);
5038  		xpt_free_path(path);
5039  	}
5040  
5041  	if ((status == CAM_REQ_CMP) &&
5042  	    (csa.event_enable & AC_FOUND_DEVICE)) {
5043  		/*
5044  		 * Get this peripheral up to date with all
5045  		 * the currently existing devices.
5046  		 */
5047  		xpt_for_all_devices(xptsetasyncfunc, &csa);
5048  	}
5049  	if ((status == CAM_REQ_CMP) &&
5050  	    (csa.event_enable & AC_PATH_REGISTERED)) {
5051  		/*
5052  		 * Get this peripheral up to date with all
5053  		 * the currently existing busses.
5054  		 */
5055  		xpt_for_all_busses(xptsetasyncbusfunc, &csa);
5056  	}
5057  
5058  	return (status);
5059  }
5060  
5061  static void
5062  xptaction(struct cam_sim *sim, union ccb *work_ccb)
5063  {
5064  	CAM_DEBUG(work_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xptaction\n"));
5065  
5066  	switch (work_ccb->ccb_h.func_code) {
5067  	/* Common cases first */
5068  	case XPT_PATH_INQ:		/* Path routing inquiry */
5069  	{
5070  		struct ccb_pathinq *cpi;
5071  
5072  		cpi = &work_ccb->cpi;
5073  		cpi->version_num = 1; /* XXX??? */
5074  		cpi->hba_inquiry = 0;
5075  		cpi->target_sprt = 0;
5076  		cpi->hba_misc = 0;
5077  		cpi->hba_eng_cnt = 0;
5078  		cpi->max_target = 0;
5079  		cpi->max_lun = 0;
5080  		cpi->initiator_id = 0;
5081  		strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
5082  		strncpy(cpi->hba_vid, "", HBA_IDLEN);
5083  		strncpy(cpi->dev_name, sim->sim_name, DEV_IDLEN);
5084  		cpi->unit_number = sim->unit_number;
5085  		cpi->bus_id = sim->bus_id;
5086  		cpi->base_transfer_speed = 0;
5087  		cpi->protocol = PROTO_UNSPECIFIED;
5088  		cpi->protocol_version = PROTO_VERSION_UNSPECIFIED;
5089  		cpi->transport = XPORT_UNSPECIFIED;
5090  		cpi->transport_version = XPORT_VERSION_UNSPECIFIED;
5091  		cpi->ccb_h.status = CAM_REQ_CMP;
5092  		xpt_done(work_ccb);
5093  		break;
5094  	}
5095  	default:
5096  		work_ccb->ccb_h.status = CAM_REQ_INVALID;
5097  		xpt_done(work_ccb);
5098  		break;
5099  	}
5100  }
5101  
5102  /*
5103   * The xpt as a "controller" has no interrupt sources, so polling
5104   * is a no-op.
5105   */
5106  static void
5107  xptpoll(struct cam_sim *sim)
5108  {
5109  }
5110  
5111  void
5112  xpt_lock_buses(void)
5113  {
5114  	mtx_lock(&xsoftc.xpt_topo_lock);
5115  }
5116  
5117  void
5118  xpt_unlock_buses(void)
5119  {
5120  	mtx_unlock(&xsoftc.xpt_topo_lock);
5121  }
5122  
5123  struct mtx *
5124  xpt_path_mtx(struct cam_path *path)
5125  {
5126  
5127  	return (&path->device->device_mtx);
5128  }
5129  
5130  static void
5131  xpt_done_process(struct ccb_hdr *ccb_h)
5132  {
5133  	struct cam_sim *sim;
5134  	struct cam_devq *devq;
5135  	struct mtx *mtx = NULL;
5136  
5137  	if (ccb_h->flags & CAM_HIGH_POWER) {
5138  		struct highpowerlist	*hphead;
5139  		struct cam_ed		*device;
5140  
5141  		mtx_lock(&xsoftc.xpt_highpower_lock);
5142  		hphead = &xsoftc.highpowerq;
5143  
5144  		device = STAILQ_FIRST(hphead);
5145  
5146  		/*
5147  		 * Increment the count since this command is done.
5148  		 */
5149  		xsoftc.num_highpower++;
5150  
5151  		/*
5152  		 * Any high powered commands queued up?
5153  		 */
5154  		if (device != NULL) {
5155  
5156  			STAILQ_REMOVE_HEAD(hphead, highpowerq_entry);
5157  			mtx_unlock(&xsoftc.xpt_highpower_lock);
5158  
5159  			mtx_lock(&device->sim->devq->send_mtx);
5160  			xpt_release_devq_device(device,
5161  					 /*count*/1, /*runqueue*/TRUE);
5162  			mtx_unlock(&device->sim->devq->send_mtx);
5163  		} else
5164  			mtx_unlock(&xsoftc.xpt_highpower_lock);
5165  	}
5166  
5167  	sim = ccb_h->path->bus->sim;
5168  
5169  	if (ccb_h->status & CAM_RELEASE_SIMQ) {
5170  		xpt_release_simq(sim, /*run_queue*/FALSE);
5171  		ccb_h->status &= ~CAM_RELEASE_SIMQ;
5172  	}
5173  
5174  	if ((ccb_h->flags & CAM_DEV_QFRZDIS)
5175  	 && (ccb_h->status & CAM_DEV_QFRZN)) {
5176  		xpt_release_devq(ccb_h->path, /*count*/1, /*run_queue*/TRUE);
5177  		ccb_h->status &= ~CAM_DEV_QFRZN;
5178  	}
5179  
5180  	devq = sim->devq;
5181  	if ((ccb_h->func_code & XPT_FC_USER_CCB) == 0) {
5182  		struct cam_ed *dev = ccb_h->path->device;
5183  
5184  		mtx_lock(&devq->send_mtx);
5185  		devq->send_active--;
5186  		devq->send_openings++;
5187  		cam_ccbq_ccb_done(&dev->ccbq, (union ccb *)ccb_h);
5188  
5189  		if (((dev->flags & CAM_DEV_REL_ON_QUEUE_EMPTY) != 0
5190  		  && (dev->ccbq.dev_active == 0))) {
5191  			dev->flags &= ~CAM_DEV_REL_ON_QUEUE_EMPTY;
5192  			xpt_release_devq_device(dev, /*count*/1,
5193  					 /*run_queue*/FALSE);
5194  		}
5195  
5196  		if (((dev->flags & CAM_DEV_REL_ON_COMPLETE) != 0
5197  		  && (ccb_h->status&CAM_STATUS_MASK) != CAM_REQUEUE_REQ)) {
5198  			dev->flags &= ~CAM_DEV_REL_ON_COMPLETE;
5199  			xpt_release_devq_device(dev, /*count*/1,
5200  					 /*run_queue*/FALSE);
5201  		}
5202  
5203  		if (!device_is_queued(dev))
5204  			(void)xpt_schedule_devq(devq, dev);
5205  		xpt_run_devq(devq);
5206  		mtx_unlock(&devq->send_mtx);
5207  
5208  		if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0) {
5209  			mtx = xpt_path_mtx(ccb_h->path);
5210  			mtx_lock(mtx);
5211  
5212  			if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
5213  			 && (--dev->tag_delay_count == 0))
5214  				xpt_start_tags(ccb_h->path);
5215  		}
5216  	}
5217  
5218  	if ((ccb_h->flags & CAM_UNLOCKED) == 0) {
5219  		if (mtx == NULL) {
5220  			mtx = xpt_path_mtx(ccb_h->path);
5221  			mtx_lock(mtx);
5222  		}
5223  	} else {
5224  		if (mtx != NULL) {
5225  			mtx_unlock(mtx);
5226  			mtx = NULL;
5227  		}
5228  	}
5229  
5230  	/* Call the peripheral driver's callback */
5231  	ccb_h->pinfo.index = CAM_UNQUEUED_INDEX;
5232  	(*ccb_h->cbfcnp)(ccb_h->path->periph, (union ccb *)ccb_h);
5233  	if (mtx != NULL)
5234  		mtx_unlock(mtx);
5235  }
5236  
5237  void
5238  xpt_done_td(void *arg)
5239  {
5240  	struct cam_doneq *queue = arg;
5241  	struct ccb_hdr *ccb_h;
5242  	STAILQ_HEAD(, ccb_hdr)	doneq;
5243  
5244  	STAILQ_INIT(&doneq);
5245  	mtx_lock(&queue->cam_doneq_mtx);
5246  	while (1) {
5247  		while (STAILQ_EMPTY(&queue->cam_doneq)) {
5248  			queue->cam_doneq_sleep = 1;
5249  			msleep(&queue->cam_doneq, &queue->cam_doneq_mtx,
5250  			    PRIBIO, "-", 0);
5251  			queue->cam_doneq_sleep = 0;
5252  		}
5253  		STAILQ_CONCAT(&doneq, &queue->cam_doneq);
5254  		mtx_unlock(&queue->cam_doneq_mtx);
5255  
5256  		THREAD_NO_SLEEPING();
5257  		while ((ccb_h = STAILQ_FIRST(&doneq)) != NULL) {
5258  			STAILQ_REMOVE_HEAD(&doneq, sim_links.stqe);
5259  			xpt_done_process(ccb_h);
5260  		}
5261  		THREAD_SLEEPING_OK();
5262  
5263  		mtx_lock(&queue->cam_doneq_mtx);
5264  	}
5265  }
5266  
5267  static void
5268  camisr_runqueue(void)
5269  {
5270  	struct	ccb_hdr *ccb_h;
5271  	struct cam_doneq *queue;
5272  	int i;
5273  
5274  	/* Process global queues. */
5275  	for (i = 0; i < cam_num_doneqs; i++) {
5276  		queue = &cam_doneqs[i];
5277  		mtx_lock(&queue->cam_doneq_mtx);
5278  		while ((ccb_h = STAILQ_FIRST(&queue->cam_doneq)) != NULL) {
5279  			STAILQ_REMOVE_HEAD(&queue->cam_doneq, sim_links.stqe);
5280  			mtx_unlock(&queue->cam_doneq_mtx);
5281  			xpt_done_process(ccb_h);
5282  			mtx_lock(&queue->cam_doneq_mtx);
5283  		}
5284  		mtx_unlock(&queue->cam_doneq_mtx);
5285  	}
5286  }
5287