xref: /freebsd/sys/cam/cam_xpt.c (revision 1a7151f79664644b2e7c8e69427be8b846e9c1a4)
1 /*-
2  * Implementation of the Common Access Method Transport (XPT) layer.
3  *
4  * SPDX-License-Identifier: BSD-2-Clause
5  *
6  * Copyright (c) 1997, 1998, 1999 Justin T. Gibbs.
7  * Copyright (c) 1997, 1998, 1999 Kenneth D. Merry.
8  * All rights reserved.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions, and the following disclaimer,
15  *    without modification, immediately at the beginning of the file.
16  * 2. The name of the author may not be used to endorse or promote products
17  *    derived from this software without specific prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
23  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  */
31 
32 #include "opt_printf.h"
33 
34 #include <sys/param.h>
35 #include <sys/bio.h>
36 #include <sys/bus.h>
37 #include <sys/systm.h>
38 #include <sys/types.h>
39 #include <sys/malloc.h>
40 #include <sys/kernel.h>
41 #include <sys/time.h>
42 #include <sys/conf.h>
43 #include <sys/fcntl.h>
44 #include <sys/proc.h>
45 #include <sys/sbuf.h>
46 #include <sys/smp.h>
47 #include <sys/stdarg.h>
48 #include <sys/taskqueue.h>
49 
50 #include <sys/lock.h>
51 #include <sys/mutex.h>
52 #include <sys/sysctl.h>
53 #include <sys/kthread.h>
54 
55 #include <cam/cam.h>
56 #include <cam/cam_ccb.h>
57 #include <cam/cam_iosched.h>
58 #include <cam/cam_periph.h>
59 #include <cam/cam_queue.h>
60 #include <cam/cam_sim.h>
61 #include <cam/cam_xpt.h>
62 #include <cam/cam_xpt_sim.h>
63 #include <cam/cam_xpt_periph.h>
64 #include <cam/cam_xpt_internal.h>
65 #include <cam/cam_debug.h>
66 #include <cam/cam_compat.h>
67 
68 #include <cam/scsi/scsi_all.h>
69 #include <cam/scsi/scsi_message.h>
70 #include <cam/scsi/scsi_pass.h>
71 
72 
73 /* SDT Probes */
74 SDT_PROBE_DEFINE1(cam, , xpt, action, "union ccb *");
75 SDT_PROBE_DEFINE1(cam, , xpt, done, "union ccb *");
76 SDT_PROBE_DEFINE4(cam, , xpt, async__cb, "void *", "uint32_t",
77     "struct cam_path *", "void *");
78 
79 /* Wild guess based on not wanting to grow the stack too much */
80 #define XPT_PRINT_MAXLEN	512
81 #ifdef PRINTF_BUFR_SIZE
82 #define XPT_PRINT_LEN	PRINTF_BUFR_SIZE
83 #else
84 #define XPT_PRINT_LEN	128
85 #endif
86 _Static_assert(XPT_PRINT_LEN <= XPT_PRINT_MAXLEN, "XPT_PRINT_LEN is too large");
87 
88 /*
89  * This is the maximum number of high powered commands (e.g. start unit)
90  * that can be outstanding at a particular time.
91  */
92 #ifndef CAM_MAX_HIGHPOWER
93 #define CAM_MAX_HIGHPOWER  4
94 #endif
95 
96 /* Datastructures internal to the xpt layer */
97 MALLOC_DEFINE(M_CAMXPT, "CAM XPT", "CAM XPT buffers");
98 MALLOC_DEFINE(M_CAMDEV, "CAM DEV", "CAM devices");
99 MALLOC_DEFINE(M_CAMCCB, "CAM CCB", "CAM CCBs");
100 MALLOC_DEFINE(M_CAMPATH, "CAM path", "CAM paths");
101 
102 struct xpt_softc {
103 	uint32_t		xpt_generation;
104 
105 	/* number of high powered commands that can go through right now */
106 	struct mtx		xpt_highpower_lock;
107 	STAILQ_HEAD(highpowerlist, cam_ed)	highpowerq;
108 	int			num_highpower;
109 
110 	/* queue for handling async rescan requests. */
111 	TAILQ_HEAD(, ccb_hdr) ccb_scanq;
112 	int buses_to_config;
113 	int buses_config_done;
114 
115 	/*
116 	 * Registered buses
117 	 *
118 	 * N.B., "busses" is an archaic spelling of "buses".  In new code
119 	 * "buses" is preferred.
120 	 */
121 	TAILQ_HEAD(,cam_eb)	xpt_busses;
122 	u_int			bus_generation;
123 
124 	int			boot_delay;
125 	struct callout 		boot_callout;
126 	struct task		boot_task;
127 	struct root_hold_token	xpt_rootmount;
128 
129 	struct mtx		xpt_topo_lock;
130 	struct taskqueue	*xpt_taskq;
131 };
132 
133 typedef enum {
134 	DM_RET_COPY		= 0x01,
135 	DM_RET_FLAG_MASK	= 0x0f,
136 	DM_RET_NONE		= 0x00,
137 	DM_RET_STOP		= 0x10,
138 	DM_RET_DESCEND		= 0x20,
139 	DM_RET_ERROR		= 0x30,
140 	DM_RET_ACTION_MASK	= 0xf0
141 } dev_match_ret;
142 
143 typedef enum {
144 	XPT_DEPTH_BUS,
145 	XPT_DEPTH_TARGET,
146 	XPT_DEPTH_DEVICE,
147 	XPT_DEPTH_PERIPH
148 } xpt_traverse_depth;
149 
150 struct xpt_traverse_config {
151 	xpt_traverse_depth	depth;
152 	void			*tr_func;
153 	void			*tr_arg;
154 };
155 
156 typedef	int	xpt_busfunc_t (struct cam_eb *bus, void *arg);
157 typedef	int	xpt_targetfunc_t (struct cam_et *target, void *arg);
158 typedef	int	xpt_devicefunc_t (struct cam_ed *device, void *arg);
159 typedef	int	xpt_periphfunc_t (struct cam_periph *periph, void *arg);
160 typedef int	xpt_pdrvfunc_t (struct periph_driver **pdrv, void *arg);
161 
162 /* Transport layer configuration information */
163 static struct xpt_softc xsoftc;
164 
165 MTX_SYSINIT(xpt_topo_init, &xsoftc.xpt_topo_lock, "XPT topology lock", MTX_DEF);
166 
167 SYSCTL_INT(_kern_cam, OID_AUTO, boot_delay, CTLFLAG_RDTUN,
168            &xsoftc.boot_delay, 0, "Bus registration wait time");
169 SYSCTL_UINT(_kern_cam, OID_AUTO, xpt_generation, CTLFLAG_RD,
170 	    &xsoftc.xpt_generation, 0, "CAM peripheral generation count");
171 
172 struct cam_doneq {
173 	struct mtx_padalign	cam_doneq_mtx;
174 	STAILQ_HEAD(, ccb_hdr)	cam_doneq;
175 	int			cam_doneq_sleep;
176 };
177 
178 static struct cam_doneq cam_doneqs[MAXCPU];
179 static u_int __read_mostly cam_num_doneqs;
180 static struct proc *cam_proc;
181 static struct cam_doneq cam_async;
182 
183 SYSCTL_INT(_kern_cam, OID_AUTO, num_doneqs, CTLFLAG_RDTUN,
184            &cam_num_doneqs, 0, "Number of completion queues/threads");
185 
186 struct cam_periph *xpt_periph;
187 
188 static periph_init_t xpt_periph_init;
189 
190 static struct periph_driver xpt_driver =
191 {
192 	xpt_periph_init, "xpt",
193 	TAILQ_HEAD_INITIALIZER(xpt_driver.units), /* generation */ 0,
194 	CAM_PERIPH_DRV_EARLY
195 };
196 
197 PERIPHDRIVER_DECLARE(xpt, xpt_driver);
198 
199 static d_open_t xptopen;
200 static d_close_t xptclose;
201 static d_ioctl_t xptioctl;
202 static d_ioctl_t xptdoioctl;
203 
204 static struct cdevsw xpt_cdevsw = {
205 	.d_version =	D_VERSION,
206 	.d_flags =	0,
207 	.d_open =	xptopen,
208 	.d_close =	xptclose,
209 	.d_ioctl =	xptioctl,
210 	.d_name =	"xpt",
211 };
212 
213 /* Storage for debugging datastructures */
214 struct cam_path *cam_dpath;
215 uint32_t __read_mostly cam_dflags = CAM_DEBUG_FLAGS;
216 SYSCTL_UINT(_kern_cam, OID_AUTO, dflags, CTLFLAG_RWTUN,
217 	&cam_dflags, 0, "Enabled debug flags");
218 uint32_t cam_debug_delay = CAM_DEBUG_DELAY;
219 SYSCTL_UINT(_kern_cam, OID_AUTO, debug_delay, CTLFLAG_RWTUN,
220 	&cam_debug_delay, 0, "Delay in us after each debug message");
221 
222 /* Our boot-time initialization hook */
223 static int cam_module_event_handler(module_t, int /*modeventtype_t*/, void *);
224 
225 static moduledata_t cam_moduledata = {
226 	"cam",
227 	cam_module_event_handler,
228 	NULL
229 };
230 
231 static int	xpt_init(void *);
232 
233 DECLARE_MODULE(cam, cam_moduledata, SI_SUB_CONFIGURE, SI_ORDER_SECOND);
234 MODULE_VERSION(cam, 1);
235 
236 static void		xpt_async_bcast(struct async_list *async_head,
237 					uint32_t async_code,
238 					struct cam_path *path,
239 					void *async_arg);
240 static path_id_t xptnextfreepathid(void);
241 static path_id_t xptpathid(const char *sim_name, int sim_unit, int sim_bus);
242 static union ccb *xpt_get_ccb(struct cam_periph *periph);
243 static union ccb *xpt_get_ccb_nowait(struct cam_periph *periph);
244 static void	 xpt_run_allocq(struct cam_periph *periph, int sleep);
245 static void	 xpt_run_allocq_task(void *context, int pending);
246 static void	 xpt_run_devq(struct cam_devq *devq);
247 static callout_func_t xpt_release_devq_timeout;
248 static void	 xpt_acquire_bus(struct cam_eb *bus);
249 static void	 xpt_release_bus(struct cam_eb *bus);
250 static uint32_t	 xpt_freeze_devq_device(struct cam_ed *dev, u_int count);
251 static int	 xpt_release_devq_device(struct cam_ed *dev, u_int count,
252 		    int run_queue);
253 static struct cam_et*
254 		 xpt_alloc_target(struct cam_eb *bus, target_id_t target_id);
255 static void	 xpt_acquire_target(struct cam_et *target);
256 static void	 xpt_release_target(struct cam_et *target);
257 static struct cam_eb*
258 		 xpt_find_bus(path_id_t path_id);
259 static struct cam_et*
260 		 xpt_find_target(struct cam_eb *bus, target_id_t target_id);
261 static struct cam_ed*
262 		 xpt_find_device(struct cam_et *target, lun_id_t lun_id);
263 static void	 xpt_config(void *arg);
264 static void	 xpt_hold_boot_locked(void);
265 static int	 xpt_schedule_dev(struct camq *queue, cam_pinfo *dev_pinfo,
266 				 uint32_t new_priority);
267 static xpt_devicefunc_t xptpassannouncefunc;
268 static void	 xptaction(struct cam_sim *sim, union ccb *work_ccb);
269 static void	 xptpoll(struct cam_sim *sim);
270 static void	 camisr_runqueue(void);
271 static void	 xpt_done_process(struct ccb_hdr *ccb_h);
272 static void	 xpt_done_td(void *);
273 static void	 xpt_async_td(void *);
274 static dev_match_ret	xptbusmatch(struct dev_match_pattern *patterns,
275 				    u_int num_patterns, struct cam_eb *bus);
276 static dev_match_ret	xptdevicematch(struct dev_match_pattern *patterns,
277 				       u_int num_patterns,
278 				       struct cam_ed *device);
279 static dev_match_ret	xptperiphmatch(struct dev_match_pattern *patterns,
280 				       u_int num_patterns,
281 				       struct cam_periph *periph);
282 static xpt_busfunc_t	xptedtbusfunc;
283 static xpt_targetfunc_t	xptedttargetfunc;
284 static xpt_devicefunc_t	xptedtdevicefunc;
285 static xpt_periphfunc_t	xptedtperiphfunc;
286 static xpt_pdrvfunc_t	xptplistpdrvfunc;
287 static xpt_periphfunc_t	xptplistperiphfunc;
288 static int		xptedtmatch(struct ccb_dev_match *cdm);
289 static int		xptperiphlistmatch(struct ccb_dev_match *cdm);
290 static int		xptbustraverse(struct cam_eb *start_bus,
291 				       xpt_busfunc_t *tr_func, void *arg);
292 static int		xpttargettraverse(struct cam_eb *bus,
293 					  struct cam_et *start_target,
294 					  xpt_targetfunc_t *tr_func, void *arg);
295 static int		xptdevicetraverse(struct cam_et *target,
296 					  struct cam_ed *start_device,
297 					  xpt_devicefunc_t *tr_func, void *arg);
298 static int		xptperiphtraverse(struct cam_ed *device,
299 					  struct cam_periph *start_periph,
300 					  xpt_periphfunc_t *tr_func, void *arg);
301 static int		xptpdrvtraverse(struct periph_driver **start_pdrv,
302 					xpt_pdrvfunc_t *tr_func, void *arg);
303 static int		xptpdperiphtraverse(struct periph_driver **pdrv,
304 					    struct cam_periph *start_periph,
305 					    xpt_periphfunc_t *tr_func,
306 					    void *arg);
307 static xpt_busfunc_t	xptdefbusfunc;
308 static xpt_targetfunc_t	xptdeftargetfunc;
309 static xpt_devicefunc_t	xptdefdevicefunc;
310 static xpt_periphfunc_t	xptdefperiphfunc;
311 static void		xpt_finishconfig_task(void *context, int pending);
312 static void		xpt_dev_async_default(uint32_t async_code,
313 					      struct cam_eb *bus,
314 					      struct cam_et *target,
315 					      struct cam_ed *device,
316 					      void *async_arg);
317 static struct cam_ed *	xpt_alloc_device_default(struct cam_eb *bus,
318 						 struct cam_et *target,
319 						 lun_id_t lun_id);
320 static xpt_devicefunc_t	xptsetasyncfunc;
321 static xpt_busfunc_t	xptsetasyncbusfunc;
322 static cam_status	xptregister(struct cam_periph *periph,
323 				    void *arg);
324 
325 static __inline int
xpt_schedule_devq(struct cam_devq * devq,struct cam_ed * dev)326 xpt_schedule_devq(struct cam_devq *devq, struct cam_ed *dev)
327 {
328 	int	retval;
329 
330 	mtx_assert(&devq->send_mtx, MA_OWNED);
331 	if ((dev->ccbq.queue.entries > 0) &&
332 	    (dev->ccbq.dev_openings > 0) &&
333 	    (dev->ccbq.queue.qfrozen_cnt == 0)) {
334 		/*
335 		 * The priority of a device waiting for controller
336 		 * resources is that of the highest priority CCB
337 		 * enqueued.
338 		 */
339 		retval =
340 		    xpt_schedule_dev(&devq->send_queue,
341 				     &dev->devq_entry,
342 				     CAMQ_GET_PRIO(&dev->ccbq.queue));
343 	} else {
344 		retval = 0;
345 	}
346 	return (retval);
347 }
348 
349 static __inline int
device_is_queued(struct cam_ed * device)350 device_is_queued(struct cam_ed *device)
351 {
352 	return (device->devq_entry.index != CAM_UNQUEUED_INDEX);
353 }
354 
355 static void
xpt_periph_init(void)356 xpt_periph_init(void)
357 {
358 	make_dev(&xpt_cdevsw, 0, UID_ROOT, GID_OPERATOR, 0600, "xpt0");
359 }
360 
361 static int
xptopen(struct cdev * dev,int flags,int fmt,struct thread * td)362 xptopen(struct cdev *dev, int flags, int fmt, struct thread *td)
363 {
364 
365 	/*
366 	 * Only allow read-write access.
367 	 */
368 	if (((flags & FWRITE) == 0) || ((flags & FREAD) == 0))
369 		return(EPERM);
370 
371 	/*
372 	 * We don't allow nonblocking access.
373 	 */
374 	if ((flags & O_NONBLOCK) != 0) {
375 		printf("%s: can't do nonblocking access\n", devtoname(dev));
376 		return(ENODEV);
377 	}
378 
379 	return(0);
380 }
381 
382 static int
xptclose(struct cdev * dev,int flag,int fmt,struct thread * td)383 xptclose(struct cdev *dev, int flag, int fmt, struct thread *td)
384 {
385 
386 	return(0);
387 }
388 
389 /*
390  * Don't automatically grab the xpt softc lock here even though this is going
391  * through the xpt device.  The xpt device is really just a back door for
392  * accessing other devices and SIMs, so the right thing to do is to grab
393  * the appropriate SIM lock once the bus/SIM is located.
394  */
395 static int
xptioctl(struct cdev * dev,u_long cmd,caddr_t addr,int flag,struct thread * td)396 xptioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td)
397 {
398 	int error;
399 
400 	if ((error = xptdoioctl(dev, cmd, addr, flag, td)) == ENOTTY) {
401 		error = cam_compat_ioctl(dev, cmd, addr, flag, td, xptdoioctl);
402 	}
403 	return (error);
404 }
405 
406 static int
xptdoioctl(struct cdev * dev,u_long cmd,caddr_t addr,int flag,struct thread * td)407 xptdoioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td)
408 {
409 	int error;
410 
411 	error = 0;
412 
413 	switch(cmd) {
414 	/*
415 	 * For the transport layer CAMIOCOMMAND ioctl, we really only want
416 	 * to accept CCB types that don't quite make sense to send through a
417 	 * passthrough driver. XPT_PATH_INQ is an exception to this, as stated
418 	 * in the CAM spec.
419 	 */
420 	case CAMIOCOMMAND: {
421 		union ccb *ccb;
422 		union ccb *inccb;
423 		struct cam_eb *bus;
424 
425 		inccb = (union ccb *)addr;
426 #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING)
427 		if (inccb->ccb_h.func_code == XPT_SCSI_IO)
428 			inccb->csio.bio = NULL;
429 #endif
430 
431 		if (inccb->ccb_h.flags & CAM_UNLOCKED)
432 			return (EINVAL);
433 
434 		bus = xpt_find_bus(inccb->ccb_h.path_id);
435 		if (bus == NULL)
436 			return (EINVAL);
437 
438 		switch (inccb->ccb_h.func_code) {
439 		case XPT_SCAN_BUS:
440 		case XPT_RESET_BUS:
441 			if (inccb->ccb_h.target_id != CAM_TARGET_WILDCARD ||
442 			    inccb->ccb_h.target_lun != CAM_LUN_WILDCARD) {
443 				xpt_release_bus(bus);
444 				return (EINVAL);
445 			}
446 			break;
447 		case XPT_SCAN_TGT:
448 			if (inccb->ccb_h.target_id == CAM_TARGET_WILDCARD ||
449 			    inccb->ccb_h.target_lun != CAM_LUN_WILDCARD) {
450 				xpt_release_bus(bus);
451 				return (EINVAL);
452 			}
453 			break;
454 		default:
455 			break;
456 		}
457 
458 		switch(inccb->ccb_h.func_code) {
459 		case XPT_SCAN_BUS:
460 		case XPT_RESET_BUS:
461 		case XPT_PATH_INQ:
462 		case XPT_ENG_INQ:
463 		case XPT_SCAN_LUN:
464 		case XPT_SCAN_TGT:
465 
466 			ccb = xpt_alloc_ccb();
467 
468 			/*
469 			 * Create a path using the bus, target, and lun the
470 			 * user passed in.
471 			 */
472 			if (xpt_create_path(&ccb->ccb_h.path, NULL,
473 					    inccb->ccb_h.path_id,
474 					    inccb->ccb_h.target_id,
475 					    inccb->ccb_h.target_lun) !=
476 					    CAM_REQ_CMP){
477 				error = EINVAL;
478 				xpt_free_ccb(ccb);
479 				break;
480 			}
481 			/* Ensure all of our fields are correct */
482 			xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path,
483 				      inccb->ccb_h.pinfo.priority);
484 			xpt_merge_ccb(ccb, inccb);
485 			xpt_path_lock(ccb->ccb_h.path);
486 			cam_periph_runccb(ccb, NULL, 0, 0, NULL);
487 			xpt_path_unlock(ccb->ccb_h.path);
488 			bcopy(ccb, inccb, sizeof(union ccb));
489 			xpt_free_path(ccb->ccb_h.path);
490 			xpt_free_ccb(ccb);
491 			break;
492 
493 		case XPT_DEBUG: {
494 			union ccb ccb;
495 
496 			/*
497 			 * This is an immediate CCB, so it's okay to
498 			 * allocate it on the stack.
499 			 */
500 			memset(&ccb, 0, sizeof(ccb));
501 
502 			/*
503 			 * Create a path using the bus, target, and lun the
504 			 * user passed in.
505 			 */
506 			if (xpt_create_path(&ccb.ccb_h.path, NULL,
507 					    inccb->ccb_h.path_id,
508 					    inccb->ccb_h.target_id,
509 					    inccb->ccb_h.target_lun) !=
510 					    CAM_REQ_CMP){
511 				error = EINVAL;
512 				break;
513 			}
514 			/* Ensure all of our fields are correct */
515 			xpt_setup_ccb(&ccb.ccb_h, ccb.ccb_h.path,
516 				      inccb->ccb_h.pinfo.priority);
517 			xpt_merge_ccb(&ccb, inccb);
518 			xpt_action(&ccb);
519 			bcopy(&ccb, inccb, sizeof(union ccb));
520 			xpt_free_path(ccb.ccb_h.path);
521 			break;
522 		}
523 		case XPT_DEV_MATCH: {
524 			struct cam_periph_map_info mapinfo;
525 			struct cam_path *old_path;
526 
527 			/*
528 			 * We can't deal with physical addresses for this
529 			 * type of transaction.
530 			 */
531 			if ((inccb->ccb_h.flags & CAM_DATA_MASK) !=
532 			    CAM_DATA_VADDR) {
533 				error = EINVAL;
534 				break;
535 			}
536 
537 			/*
538 			 * Save this in case the caller had it set to
539 			 * something in particular.
540 			 */
541 			old_path = inccb->ccb_h.path;
542 
543 			/*
544 			 * We really don't need a path for the matching
545 			 * code.  The path is needed because of the
546 			 * debugging statements in xpt_action().  They
547 			 * assume that the CCB has a valid path.
548 			 */
549 			inccb->ccb_h.path = xpt_periph->path;
550 
551 			bzero(&mapinfo, sizeof(mapinfo));
552 
553 			/*
554 			 * Map the pattern and match buffers into kernel
555 			 * virtual address space.
556 			 */
557 			error = cam_periph_mapmem(inccb, &mapinfo, maxphys);
558 
559 			if (error) {
560 				inccb->ccb_h.path = old_path;
561 				break;
562 			}
563 
564 			/*
565 			 * This is an immediate CCB, we can send it on directly.
566 			 */
567 			xpt_action(inccb);
568 
569 			/*
570 			 * Map the buffers back into user space.
571 			 */
572 			error = cam_periph_unmapmem(inccb, &mapinfo);
573 
574 			inccb->ccb_h.path = old_path;
575 			break;
576 		}
577 		default:
578 			error = ENOTSUP;
579 			break;
580 		}
581 		xpt_release_bus(bus);
582 		break;
583 	}
584 	/*
585 	 * This is the getpassthru ioctl. It takes a XPT_GDEVLIST ccb as input,
586 	 * with the periphal driver name and unit name filled in.  The other
587 	 * fields don't really matter as input.  The passthrough driver name
588 	 * ("pass"), and unit number are passed back in the ccb.  The current
589 	 * device generation number, and the index into the device peripheral
590 	 * driver list, and the status are also passed back.  Note that
591 	 * since we do everything in one pass, unlike the XPT_GDEVLIST ccb,
592 	 * we never return a status of CAM_GDEVLIST_LIST_CHANGED.  It is
593 	 * (or rather should be) impossible for the device peripheral driver
594 	 * list to change since we look at the whole thing in one pass, and
595 	 * we do it with lock protection.
596 	 *
597 	 */
598 	case CAMGETPASSTHRU: {
599 		union ccb *ccb;
600 		struct cam_periph *periph;
601 		struct periph_driver **p_drv;
602 		char   *name;
603 		u_int unit;
604 		bool base_periph_found;
605 
606 		ccb = (union ccb *)addr;
607 		unit = ccb->cgdl.unit_number;
608 		name = ccb->cgdl.periph_name;
609 		base_periph_found = false;
610 #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING)
611 		if (ccb->ccb_h.func_code == XPT_SCSI_IO)
612 			ccb->csio.bio = NULL;
613 #endif
614 
615 		/*
616 		 * Sanity check -- make sure we don't get a null peripheral
617 		 * driver name.
618 		 */
619 		if (*ccb->cgdl.periph_name == '\0') {
620 			error = EINVAL;
621 			break;
622 		}
623 
624 		/* Keep the list from changing while we traverse it */
625 		xpt_lock_buses();
626 
627 		/* first find our driver in the list of drivers */
628 		for (p_drv = periph_drivers; *p_drv != NULL; p_drv++)
629 			if (strcmp((*p_drv)->driver_name, name) == 0)
630 				break;
631 
632 		if (*p_drv == NULL) {
633 			xpt_unlock_buses();
634 			ccb->ccb_h.status = CAM_REQ_CMP_ERR;
635 			ccb->cgdl.status = CAM_GDEVLIST_ERROR;
636 			*ccb->cgdl.periph_name = '\0';
637 			ccb->cgdl.unit_number = 0;
638 			error = ENOENT;
639 			break;
640 		}
641 
642 		/*
643 		 * Run through every peripheral instance of this driver
644 		 * and check to see whether it matches the unit passed
645 		 * in by the user.  If it does, get out of the loops and
646 		 * find the passthrough driver associated with that
647 		 * peripheral driver.
648 		 */
649 		for (periph = TAILQ_FIRST(&(*p_drv)->units); periph != NULL;
650 		     periph = TAILQ_NEXT(periph, unit_links)) {
651 			if (periph->unit_number == unit)
652 				break;
653 		}
654 		/*
655 		 * If we found the peripheral driver that the user passed
656 		 * in, go through all of the peripheral drivers for that
657 		 * particular device and look for a passthrough driver.
658 		 */
659 		if (periph != NULL) {
660 			struct cam_ed *device;
661 			int i;
662 
663 			base_periph_found = true;
664 			device = periph->path->device;
665 			for (i = 0, periph = SLIST_FIRST(&device->periphs);
666 			     periph != NULL;
667 			     periph = SLIST_NEXT(periph, periph_links), i++) {
668 				/*
669 				 * Check to see whether we have a
670 				 * passthrough device or not.
671 				 */
672 				if (strcmp(periph->periph_name, "pass") == 0) {
673 					/*
674 					 * Fill in the getdevlist fields.
675 					 */
676 					strlcpy(ccb->cgdl.periph_name,
677 					       periph->periph_name,
678 					       sizeof(ccb->cgdl.periph_name));
679 					ccb->cgdl.unit_number =
680 						periph->unit_number;
681 					if (SLIST_NEXT(periph, periph_links))
682 						ccb->cgdl.status =
683 							CAM_GDEVLIST_MORE_DEVS;
684 					else
685 						ccb->cgdl.status =
686 						       CAM_GDEVLIST_LAST_DEVICE;
687 					ccb->cgdl.generation =
688 						device->generation;
689 					ccb->cgdl.index = i;
690 					/*
691 					 * Fill in some CCB header fields
692 					 * that the user may want.
693 					 */
694 					ccb->ccb_h.path_id =
695 						periph->path->bus->path_id;
696 					ccb->ccb_h.target_id =
697 						periph->path->target->target_id;
698 					ccb->ccb_h.target_lun =
699 						periph->path->device->lun_id;
700 					ccb->ccb_h.status = CAM_REQ_CMP;
701 					break;
702 				}
703 			}
704 		}
705 
706 		/*
707 		 * If the periph is null here, one of two things has
708 		 * happened.  The first possibility is that we couldn't
709 		 * find the unit number of the particular peripheral driver
710 		 * that the user is asking about.  e.g. the user asks for
711 		 * the passthrough driver for "da11".  We find the list of
712 		 * "da" peripherals all right, but there is no unit 11.
713 		 * The other possibility is that we went through the list
714 		 * of peripheral drivers attached to the device structure,
715 		 * but didn't find one with the name "pass".  Either way,
716 		 * we return ENOENT, since we couldn't find something.
717 		 */
718 		if (periph == NULL) {
719 			ccb->ccb_h.status = CAM_REQ_CMP_ERR;
720 			ccb->cgdl.status = CAM_GDEVLIST_ERROR;
721 			*ccb->cgdl.periph_name = '\0';
722 			ccb->cgdl.unit_number = 0;
723 			error = ENOENT;
724 			/*
725 			 * It is unfortunate that this is even necessary,
726 			 * but there are many, many clueless users out there.
727 			 * If this is true, the user is looking for the
728 			 * passthrough driver, but doesn't have one in his
729 			 * kernel.
730 			 */
731 			if (base_periph_found) {
732 				printf(
733 		"xptioctl: pass driver is not in the kernel\n"
734 		"xptioctl: put \"device pass\" in your kernel config file\n");
735 			}
736 		}
737 		xpt_unlock_buses();
738 		break;
739 		}
740 	default:
741 		error = ENOTTY;
742 		break;
743 	}
744 
745 	return(error);
746 }
747 
748 static int
cam_module_event_handler(module_t mod,int what,void * arg)749 cam_module_event_handler(module_t mod, int what, void *arg)
750 {
751 	int error;
752 
753 	switch (what) {
754 	case MOD_LOAD:
755 		if ((error = xpt_init(NULL)) != 0)
756 			return (error);
757 		break;
758 	case MOD_UNLOAD:
759 		return EBUSY;
760 	default:
761 		return EOPNOTSUPP;
762 	}
763 
764 	return 0;
765 }
766 
767 static struct xpt_proto *
xpt_proto_find(cam_proto proto)768 xpt_proto_find(cam_proto proto)
769 {
770 	struct xpt_proto **pp;
771 
772 	SET_FOREACH(pp, cam_xpt_proto_set) {
773 		if ((*pp)->proto == proto)
774 			return *pp;
775 	}
776 
777 	return NULL;
778 }
779 
780 static void
xpt_rescan_done(struct cam_periph * periph,union ccb * done_ccb)781 xpt_rescan_done(struct cam_periph *periph, union ccb *done_ccb)
782 {
783 
784 	if (done_ccb->ccb_h.ppriv_ptr1 == NULL) {
785 		xpt_free_path(done_ccb->ccb_h.path);
786 		xpt_free_ccb(done_ccb);
787 	} else {
788 		done_ccb->ccb_h.cbfcnp = done_ccb->ccb_h.ppriv_ptr1;
789 		(*done_ccb->ccb_h.cbfcnp)(periph, done_ccb);
790 	}
791 	xpt_release_boot();
792 }
793 
794 /* thread to handle bus rescans */
795 static void
xpt_scanner_thread(void * dummy)796 xpt_scanner_thread(void *dummy)
797 {
798 	union ccb	*ccb;
799 	struct mtx	*mtx;
800 	struct cam_ed	*device;
801 
802 	xpt_lock_buses();
803 	for (;;) {
804 		if (TAILQ_EMPTY(&xsoftc.ccb_scanq))
805 			msleep(&xsoftc.ccb_scanq, &xsoftc.xpt_topo_lock, PRIBIO,
806 			       "-", 0);
807 		if ((ccb = (union ccb *)TAILQ_FIRST(&xsoftc.ccb_scanq)) != NULL) {
808 			TAILQ_REMOVE(&xsoftc.ccb_scanq, &ccb->ccb_h, sim_links.tqe);
809 			xpt_unlock_buses();
810 
811 			/*
812 			 * We need to lock the device's mutex which we use as
813 			 * the path mutex. We can't do it directly because the
814 			 * cam_path in the ccb may wind up going away because
815 			 * the path lock may be dropped and the path retired in
816 			 * the completion callback. We do this directly to keep
817 			 * the reference counts in cam_path sane. We also have
818 			 * to copy the device pointer because ccb_h.path may
819 			 * be freed in the callback.
820 			 */
821 			mtx = xpt_path_mtx(ccb->ccb_h.path);
822 			device = ccb->ccb_h.path->device;
823 			xpt_acquire_device(device);
824 			mtx_lock(mtx);
825 			xpt_action(ccb);
826 			mtx_unlock(mtx);
827 			xpt_release_device(device);
828 
829 			xpt_lock_buses();
830 		}
831 	}
832 }
833 
834 void
xpt_rescan(union ccb * ccb)835 xpt_rescan(union ccb *ccb)
836 {
837 	struct ccb_hdr *hdr;
838 
839 	/* Prepare request */
840 	if (ccb->ccb_h.path->target->target_id == CAM_TARGET_WILDCARD &&
841 	    ccb->ccb_h.path->device->lun_id == CAM_LUN_WILDCARD)
842 		ccb->ccb_h.func_code = XPT_SCAN_BUS;
843 	else if (ccb->ccb_h.path->target->target_id != CAM_TARGET_WILDCARD &&
844 	    ccb->ccb_h.path->device->lun_id == CAM_LUN_WILDCARD)
845 		ccb->ccb_h.func_code = XPT_SCAN_TGT;
846 	else if (ccb->ccb_h.path->target->target_id != CAM_TARGET_WILDCARD &&
847 	    ccb->ccb_h.path->device->lun_id != CAM_LUN_WILDCARD)
848 		ccb->ccb_h.func_code = XPT_SCAN_LUN;
849 	else {
850 		xpt_print(ccb->ccb_h.path, "illegal scan path\n");
851 		xpt_free_path(ccb->ccb_h.path);
852 		xpt_free_ccb(ccb);
853 		return;
854 	}
855 	CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE,
856 	    ("xpt_rescan: func %#x %s\n", ccb->ccb_h.func_code,
857  		xpt_action_name(ccb->ccb_h.func_code)));
858 
859 	ccb->ccb_h.ppriv_ptr1 = ccb->ccb_h.cbfcnp;
860 	ccb->ccb_h.cbfcnp = xpt_rescan_done;
861 	xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path, CAM_PRIORITY_XPT);
862 	/* Don't make duplicate entries for the same paths. */
863 	xpt_lock_buses();
864 	if (ccb->ccb_h.ppriv_ptr1 == NULL) {
865 		TAILQ_FOREACH(hdr, &xsoftc.ccb_scanq, sim_links.tqe) {
866 			if (xpt_path_comp(hdr->path, ccb->ccb_h.path) == 0) {
867 				wakeup(&xsoftc.ccb_scanq);
868 				xpt_unlock_buses();
869 				xpt_print(ccb->ccb_h.path, "rescan already queued\n");
870 				xpt_free_path(ccb->ccb_h.path);
871 				xpt_free_ccb(ccb);
872 				return;
873 			}
874 		}
875 	}
876 	TAILQ_INSERT_TAIL(&xsoftc.ccb_scanq, &ccb->ccb_h, sim_links.tqe);
877 	xpt_hold_boot_locked();
878 	wakeup(&xsoftc.ccb_scanq);
879 	xpt_unlock_buses();
880 }
881 
882 /* Functions accessed by the peripheral drivers */
883 static int
xpt_init(void * dummy)884 xpt_init(void *dummy)
885 {
886 	struct cam_sim *xpt_sim;
887 	struct cam_path *path;
888 	struct cam_devq *devq;
889 	cam_status status;
890 	int error, i;
891 
892 	TAILQ_INIT(&xsoftc.xpt_busses);
893 	TAILQ_INIT(&xsoftc.ccb_scanq);
894 	STAILQ_INIT(&xsoftc.highpowerq);
895 	xsoftc.num_highpower = CAM_MAX_HIGHPOWER;
896 
897 	mtx_init(&xsoftc.xpt_highpower_lock, "XPT highpower lock", NULL, MTX_DEF);
898 	xsoftc.xpt_taskq = taskqueue_create("CAM XPT task", M_WAITOK,
899 	    taskqueue_thread_enqueue, /*context*/&xsoftc.xpt_taskq);
900 
901 #ifdef CAM_BOOT_DELAY
902 	/*
903 	 * Override this value at compile time to assist our users
904 	 * who don't use loader to boot a kernel.
905 	 */
906 	xsoftc.boot_delay = CAM_BOOT_DELAY;
907 #endif
908 
909 	/*
910 	 * The xpt layer is, itself, the equivalent of a SIM.
911 	 * Allow 16 ccbs in the ccb pool for it.  This should
912 	 * give decent parallelism when we probe buses and
913 	 * perform other XPT functions.
914 	 */
915 	devq = cam_simq_alloc(16);
916 	if (devq == NULL)
917 		return (ENOMEM);
918 	xpt_sim = cam_sim_alloc(xptaction,
919 				xptpoll,
920 				"xpt",
921 				/*softc*/NULL,
922 				/*unit*/0,
923 				/*mtx*/NULL,
924 				/*max_dev_transactions*/0,
925 				/*max_tagged_dev_transactions*/0,
926 				devq);
927 	if (xpt_sim == NULL)
928 		return (ENOMEM);
929 
930 	if ((error = xpt_bus_register(xpt_sim, NULL, 0)) != CAM_SUCCESS) {
931 		printf(
932 		    "xpt_init: xpt_bus_register failed with errno %d, failing attach\n",
933 		    error);
934 		return (EINVAL);
935 	}
936 
937 	/*
938 	 * Looking at the XPT from the SIM layer, the XPT is
939 	 * the equivalent of a peripheral driver.  Allocate
940 	 * a peripheral driver entry for us.
941 	 */
942 	if ((status = xpt_create_path(&path, NULL, CAM_XPT_PATH_ID,
943 				      CAM_TARGET_WILDCARD,
944 				      CAM_LUN_WILDCARD)) != CAM_REQ_CMP) {
945 		printf(
946 	"xpt_init: xpt_create_path failed with status %#x, failing attach\n",
947 		    status);
948 		return (EINVAL);
949 	}
950 	xpt_path_lock(path);
951 	cam_periph_alloc(xptregister, NULL, NULL, NULL, "xpt", CAM_PERIPH_BIO,
952 			 path, NULL, 0, xpt_sim);
953 	xpt_path_unlock(path);
954 	xpt_free_path(path);
955 
956 	if (cam_num_doneqs < 1)
957 		cam_num_doneqs = 1 + mp_ncpus / 6;
958 	else if (cam_num_doneqs > MAXCPU)
959 		cam_num_doneqs = MAXCPU;
960 	for (i = 0; i < cam_num_doneqs; i++) {
961 		mtx_init(&cam_doneqs[i].cam_doneq_mtx, "CAM doneq", NULL,
962 		    MTX_DEF);
963 		STAILQ_INIT(&cam_doneqs[i].cam_doneq);
964 		error = kproc_kthread_add(xpt_done_td, &cam_doneqs[i],
965 		    &cam_proc, NULL, 0, 0, "cam", "doneq%d", i);
966 		if (error != 0) {
967 			cam_num_doneqs = i;
968 			break;
969 		}
970 	}
971 	if (cam_num_doneqs < 1) {
972 		printf("xpt_init: Cannot init completion queues - failing attach\n");
973 		return (ENOMEM);
974 	}
975 
976 	mtx_init(&cam_async.cam_doneq_mtx, "CAM async", NULL, MTX_DEF);
977 	STAILQ_INIT(&cam_async.cam_doneq);
978 	if (kproc_kthread_add(xpt_async_td, &cam_async,
979 		&cam_proc, NULL, 0, 0, "cam", "async") != 0) {
980 		printf("xpt_init: Cannot init async thread - failing attach\n");
981 		return (ENOMEM);
982 	}
983 
984 	/*
985 	 * Register a callback for when interrupts are enabled.
986 	 */
987 	config_intrhook_oneshot(xpt_config, NULL);
988 
989 	return (0);
990 }
991 
992 static cam_status
xptregister(struct cam_periph * periph,void * arg)993 xptregister(struct cam_periph *periph, void *arg)
994 {
995 	struct cam_sim *xpt_sim;
996 
997 	if (periph == NULL) {
998 		printf("xptregister: periph was NULL!!\n");
999 		return(CAM_REQ_CMP_ERR);
1000 	}
1001 
1002 	xpt_sim = (struct cam_sim *)arg;
1003 	xpt_sim->softc = periph;
1004 	xpt_periph = periph;
1005 	periph->softc = NULL;
1006 
1007 	return(CAM_REQ_CMP);
1008 }
1009 
1010 int32_t
xpt_add_periph(struct cam_periph * periph)1011 xpt_add_periph(struct cam_periph *periph)
1012 {
1013 	struct cam_ed *device;
1014 	int32_t	 status;
1015 
1016 	TASK_INIT(&periph->periph_run_task, 0, xpt_run_allocq_task, periph);
1017 	device = periph->path->device;
1018 	status = CAM_REQ_CMP;
1019 	if (device != NULL) {
1020 		mtx_lock(&device->target->bus->eb_mtx);
1021 		device->generation++;
1022 		SLIST_INSERT_HEAD(&device->periphs, periph, periph_links);
1023 		mtx_unlock(&device->target->bus->eb_mtx);
1024 		atomic_add_32(&xsoftc.xpt_generation, 1);
1025 	}
1026 
1027 	return (status);
1028 }
1029 
1030 void
xpt_remove_periph(struct cam_periph * periph)1031 xpt_remove_periph(struct cam_periph *periph)
1032 {
1033 	struct cam_ed *device;
1034 
1035 	device = periph->path->device;
1036 	if (device != NULL) {
1037 		mtx_lock(&device->target->bus->eb_mtx);
1038 		device->generation++;
1039 		SLIST_REMOVE(&device->periphs, periph, cam_periph, periph_links);
1040 		mtx_unlock(&device->target->bus->eb_mtx);
1041 		atomic_add_32(&xsoftc.xpt_generation, 1);
1042 	}
1043 }
1044 
1045 void
xpt_announce_periph(struct cam_periph * periph,char * announce_string)1046 xpt_announce_periph(struct cam_periph *periph, char *announce_string)
1047 {
1048 	char buf[128];
1049 	struct sbuf sb;
1050 
1051 	(void)sbuf_new(&sb, buf, sizeof(buf), SBUF_FIXEDLEN | SBUF_INCLUDENUL);
1052 	sbuf_set_drain(&sb, sbuf_printf_drain, NULL);
1053 	xpt_announce_periph_sbuf(periph, &sb, announce_string);
1054 	(void)sbuf_finish(&sb);
1055 	(void)sbuf_delete(&sb);
1056 }
1057 
1058 void
xpt_announce_periph_sbuf(struct cam_periph * periph,struct sbuf * sb,char * announce_string)1059 xpt_announce_periph_sbuf(struct cam_periph *periph, struct sbuf *sb,
1060     char *announce_string)
1061 {
1062 	struct	cam_path *path = periph->path;
1063 	struct  xpt_proto *proto;
1064 
1065 	cam_periph_assert(periph, MA_OWNED);
1066 	periph->flags |= CAM_PERIPH_ANNOUNCED;
1067 
1068 	sbuf_printf(sb, "%s%d at %s%d bus %d scbus%d target %d lun %jx\n",
1069 	    periph->periph_name, periph->unit_number,
1070 	    path->bus->sim->sim_name,
1071 	    path->bus->sim->unit_number,
1072 	    path->bus->sim->bus_id,
1073 	    path->bus->path_id,
1074 	    path->target->target_id,
1075 	    (uintmax_t)path->device->lun_id);
1076 	sbuf_printf(sb, "%s%d: ", periph->periph_name, periph->unit_number);
1077 	proto = xpt_proto_find(path->device->protocol);
1078 	if (proto)
1079 		proto->ops->announce_sbuf(path->device, sb);
1080 	else
1081 		sbuf_printf(sb, "Unknown protocol device %d\n",
1082 		    path->device->protocol);
1083 	if (path->device->serial_num_len > 0) {
1084 		/* Don't wrap the screen  - print only the first 60 chars */
1085 		sbuf_printf(sb, "%s%d: Serial Number %.60s\n",
1086 		    periph->periph_name, periph->unit_number,
1087 		    path->device->serial_num);
1088 	}
1089 	/* Announce transport details. */
1090 	path->bus->xport->ops->announce_sbuf(periph, sb);
1091 	/* Announce command queueing. */
1092 	if (path->device->inq_flags & SID_CmdQue
1093 	 || path->device->flags & CAM_DEV_TAG_AFTER_COUNT) {
1094 		sbuf_printf(sb, "%s%d: Command Queueing enabled\n",
1095 		    periph->periph_name, periph->unit_number);
1096 	}
1097 	/* Announce caller's details if they've passed in. */
1098 	if (announce_string != NULL)
1099 		sbuf_printf(sb, "%s%d: %s\n", periph->periph_name,
1100 		    periph->unit_number, announce_string);
1101 }
1102 
1103 void
xpt_announce_quirks(struct cam_periph * periph,int quirks,char * bit_string)1104 xpt_announce_quirks(struct cam_periph *periph, int quirks, char *bit_string)
1105 {
1106 	if (quirks != 0) {
1107 		printf("%s%d: quirks=0x%b\n", periph->periph_name,
1108 		    periph->unit_number, quirks, bit_string);
1109 	}
1110 }
1111 
1112 void
xpt_announce_quirks_sbuf(struct cam_periph * periph,struct sbuf * sb,int quirks,char * bit_string)1113 xpt_announce_quirks_sbuf(struct cam_periph *periph, struct sbuf *sb,
1114 			 int quirks, char *bit_string)
1115 {
1116 	if (quirks != 0) {
1117 		sbuf_printf(sb, "%s%d: quirks=0x%b\n", periph->periph_name,
1118 		    periph->unit_number, quirks, bit_string);
1119 	}
1120 }
1121 
1122 void
xpt_denounce_periph(struct cam_periph * periph)1123 xpt_denounce_periph(struct cam_periph *periph)
1124 {
1125 	char buf[128];
1126 	struct sbuf sb;
1127 
1128 	(void)sbuf_new(&sb, buf, sizeof(buf), SBUF_FIXEDLEN | SBUF_INCLUDENUL);
1129 	sbuf_set_drain(&sb, sbuf_printf_drain, NULL);
1130 	xpt_denounce_periph_sbuf(periph, &sb);
1131 	(void)sbuf_finish(&sb);
1132 	(void)sbuf_delete(&sb);
1133 }
1134 
1135 void
xpt_denounce_periph_sbuf(struct cam_periph * periph,struct sbuf * sb)1136 xpt_denounce_periph_sbuf(struct cam_periph *periph, struct sbuf *sb)
1137 {
1138 	struct cam_path *path = periph->path;
1139 	struct xpt_proto *proto;
1140 
1141 	cam_periph_assert(periph, MA_OWNED);
1142 
1143 	sbuf_printf(sb, "%s%d at %s%d bus %d scbus%d target %d lun %jx\n",
1144 	    periph->periph_name, periph->unit_number,
1145 	    path->bus->sim->sim_name,
1146 	    path->bus->sim->unit_number,
1147 	    path->bus->sim->bus_id,
1148 	    path->bus->path_id,
1149 	    path->target->target_id,
1150 	    (uintmax_t)path->device->lun_id);
1151 	sbuf_printf(sb, "%s%d: ", periph->periph_name, periph->unit_number);
1152 	proto = xpt_proto_find(path->device->protocol);
1153 	if (proto)
1154 		proto->ops->denounce_sbuf(path->device, sb);
1155 	else
1156 		sbuf_printf(sb, "Unknown protocol device %d",
1157 		    path->device->protocol);
1158 	if (path->device->serial_num_len > 0)
1159 		sbuf_printf(sb, " s/n %.60s", path->device->serial_num);
1160 	sbuf_cat(sb, " detached\n");
1161 }
1162 
1163 int
xpt_getattr(char * buf,size_t len,const char * attr,struct cam_path * path)1164 xpt_getattr(char *buf, size_t len, const char *attr, struct cam_path *path)
1165 {
1166 	int ret = -1, l, o;
1167 	struct ccb_dev_advinfo cdai;
1168 	struct scsi_vpd_device_id *did;
1169 	struct scsi_vpd_id_descriptor *idd;
1170 
1171 	xpt_path_assert(path, MA_OWNED);
1172 
1173 	memset(&cdai, 0, sizeof(cdai));
1174 	xpt_setup_ccb(&cdai.ccb_h, path, CAM_PRIORITY_NORMAL);
1175 	cdai.ccb_h.func_code = XPT_DEV_ADVINFO;
1176 	cdai.flags = CDAI_FLAG_NONE;
1177 	cdai.bufsiz = len;
1178 	cdai.buf = buf;
1179 
1180 	if (!strcmp(attr, "GEOM::ident"))
1181 		cdai.buftype = CDAI_TYPE_SERIAL_NUM;
1182 	else if (!strcmp(attr, "GEOM::physpath"))
1183 		cdai.buftype = CDAI_TYPE_PHYS_PATH;
1184 	else if (strcmp(attr, "GEOM::lunid") == 0 ||
1185 		 strcmp(attr, "GEOM::lunname") == 0) {
1186 		cdai.buftype = CDAI_TYPE_SCSI_DEVID;
1187 		cdai.bufsiz = CAM_SCSI_DEVID_MAXLEN;
1188 		cdai.buf = malloc(cdai.bufsiz, M_CAMXPT, M_NOWAIT);
1189 		if (cdai.buf == NULL) {
1190 			ret = ENOMEM;
1191 			goto out;
1192 		}
1193 	} else
1194 		goto out;
1195 
1196 	xpt_action((union ccb *)&cdai); /* can only be synchronous */
1197 	if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0)
1198 		cam_release_devq(cdai.ccb_h.path, 0, 0, 0, FALSE);
1199 	if (cdai.provsiz == 0)
1200 		goto out;
1201 	switch(cdai.buftype) {
1202 	case CDAI_TYPE_SCSI_DEVID:
1203 		did = (struct scsi_vpd_device_id *)cdai.buf;
1204 		if (strcmp(attr, "GEOM::lunid") == 0) {
1205 			idd = scsi_get_devid(did, cdai.provsiz,
1206 			    scsi_devid_is_lun_naa);
1207 			if (idd == NULL)
1208 				idd = scsi_get_devid(did, cdai.provsiz,
1209 				    scsi_devid_is_lun_eui64);
1210 			if (idd == NULL)
1211 				idd = scsi_get_devid(did, cdai.provsiz,
1212 				    scsi_devid_is_lun_uuid);
1213 			if (idd == NULL)
1214 				idd = scsi_get_devid(did, cdai.provsiz,
1215 				    scsi_devid_is_lun_md5);
1216 		} else
1217 			idd = NULL;
1218 
1219 		if (idd == NULL)
1220 			idd = scsi_get_devid(did, cdai.provsiz,
1221 			    scsi_devid_is_lun_t10);
1222 		if (idd == NULL)
1223 			idd = scsi_get_devid(did, cdai.provsiz,
1224 			    scsi_devid_is_lun_name);
1225 		if (idd == NULL)
1226 			break;
1227 
1228 		ret = 0;
1229 		if ((idd->proto_codeset & SVPD_ID_CODESET_MASK) ==
1230 		    SVPD_ID_CODESET_ASCII) {
1231 			if (idd->length < len) {
1232 				for (l = 0; l < idd->length; l++)
1233 					buf[l] = idd->identifier[l] ?
1234 					    idd->identifier[l] : ' ';
1235 				buf[l] = 0;
1236 			} else
1237 				ret = EFAULT;
1238 			break;
1239 		}
1240 		if ((idd->proto_codeset & SVPD_ID_CODESET_MASK) ==
1241 		    SVPD_ID_CODESET_UTF8) {
1242 			l = strnlen(idd->identifier, idd->length);
1243 			if (l < len) {
1244 				bcopy(idd->identifier, buf, l);
1245 				buf[l] = 0;
1246 			} else
1247 				ret = EFAULT;
1248 			break;
1249 		}
1250 		if ((idd->id_type & SVPD_ID_TYPE_MASK) ==
1251 		    SVPD_ID_TYPE_UUID && idd->identifier[0] == 0x10) {
1252 			if ((idd->length - 2) * 2 + 4 >= len) {
1253 				ret = EFAULT;
1254 				break;
1255 			}
1256 			for (l = 2, o = 0; l < idd->length; l++) {
1257 				if (l == 6 || l == 8 || l == 10 || l == 12)
1258 				    o += sprintf(buf + o, "-");
1259 				o += sprintf(buf + o, "%02x",
1260 				    idd->identifier[l]);
1261 			}
1262 			break;
1263 		}
1264 		if (idd->length * 2 < len) {
1265 			for (l = 0; l < idd->length; l++)
1266 				sprintf(buf + l * 2, "%02x",
1267 				    idd->identifier[l]);
1268 		} else
1269 				ret = EFAULT;
1270 		break;
1271 	default:
1272 		if (cdai.provsiz < len) {
1273 			cdai.buf[cdai.provsiz] = 0;
1274 			ret = 0;
1275 		} else
1276 			ret = EFAULT;
1277 		break;
1278 	}
1279 
1280 out:
1281 	if ((char *)cdai.buf != buf)
1282 		free(cdai.buf, M_CAMXPT);
1283 	return ret;
1284 }
1285 
1286 static dev_match_ret
xptbusmatch(struct dev_match_pattern * patterns,u_int num_patterns,struct cam_eb * bus)1287 xptbusmatch(struct dev_match_pattern *patterns, u_int num_patterns,
1288 	    struct cam_eb *bus)
1289 {
1290 	dev_match_ret retval;
1291 	u_int i;
1292 
1293 	retval = DM_RET_NONE;
1294 
1295 	/*
1296 	 * If we aren't given something to match against, that's an error.
1297 	 */
1298 	if (bus == NULL)
1299 		return(DM_RET_ERROR);
1300 
1301 	/*
1302 	 * If there are no match entries, then this bus matches no
1303 	 * matter what.
1304 	 */
1305 	if ((patterns == NULL) || (num_patterns == 0))
1306 		return(DM_RET_DESCEND | DM_RET_COPY);
1307 
1308 	for (i = 0; i < num_patterns; i++) {
1309 		struct bus_match_pattern *cur_pattern;
1310 		struct device_match_pattern *dp = &patterns[i].pattern.device_pattern;
1311 		struct periph_match_pattern *pp = &patterns[i].pattern.periph_pattern;
1312 
1313 		/*
1314 		 * If the pattern in question isn't for a bus node, we
1315 		 * aren't interested.  However, we do indicate to the
1316 		 * calling routine that we should continue descending the
1317 		 * tree, since the user wants to match against lower-level
1318 		 * EDT elements.
1319 		 */
1320 		if (patterns[i].type == DEV_MATCH_DEVICE &&
1321 		    (dp->flags & DEV_MATCH_PATH) != 0 &&
1322 		    dp->path_id != bus->path_id)
1323 			continue;
1324 		if (patterns[i].type == DEV_MATCH_PERIPH &&
1325 		    (pp->flags & PERIPH_MATCH_PATH) != 0 &&
1326 		    pp->path_id != bus->path_id)
1327 			continue;
1328 		if (patterns[i].type != DEV_MATCH_BUS) {
1329 			if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
1330 				retval |= DM_RET_DESCEND;
1331 			continue;
1332 		}
1333 
1334 		cur_pattern = &patterns[i].pattern.bus_pattern;
1335 
1336 		if (((cur_pattern->flags & BUS_MATCH_PATH) != 0)
1337 		 && (cur_pattern->path_id != bus->path_id))
1338 			continue;
1339 
1340 		if (((cur_pattern->flags & BUS_MATCH_BUS_ID) != 0)
1341 		 && (cur_pattern->bus_id != bus->sim->bus_id))
1342 			continue;
1343 
1344 		if (((cur_pattern->flags & BUS_MATCH_UNIT) != 0)
1345 		 && (cur_pattern->unit_number != bus->sim->unit_number))
1346 			continue;
1347 
1348 		if (((cur_pattern->flags & BUS_MATCH_NAME) != 0)
1349 		 && (strncmp(cur_pattern->dev_name, bus->sim->sim_name,
1350 			     DEV_IDLEN) != 0))
1351 			continue;
1352 
1353 		/*
1354 		 * If we get to this point, the user definitely wants
1355 		 * information on this bus.  So tell the caller to copy the
1356 		 * data out.
1357 		 */
1358 		retval |= DM_RET_COPY;
1359 
1360 		/*
1361 		 * If the return action has been set to descend, then we
1362 		 * know that we've already seen a non-bus matching
1363 		 * expression, therefore we need to further descend the tree.
1364 		 * This won't change by continuing around the loop, so we
1365 		 * go ahead and return.  If we haven't seen a non-bus
1366 		 * matching expression, we keep going around the loop until
1367 		 * we exhaust the matching expressions.  We'll set the stop
1368 		 * flag once we fall out of the loop.
1369 		 */
1370 		if ((retval & DM_RET_ACTION_MASK) == DM_RET_DESCEND)
1371 			return(retval);
1372 	}
1373 
1374 	/*
1375 	 * If the return action hasn't been set to descend yet, that means
1376 	 * we haven't seen anything other than bus matching patterns.  So
1377 	 * tell the caller to stop descending the tree -- the user doesn't
1378 	 * want to match against lower level tree elements.
1379 	 */
1380 	if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
1381 		retval |= DM_RET_STOP;
1382 
1383 	return(retval);
1384 }
1385 
1386 static dev_match_ret
xptdevicematch(struct dev_match_pattern * patterns,u_int num_patterns,struct cam_ed * device)1387 xptdevicematch(struct dev_match_pattern *patterns, u_int num_patterns,
1388 	       struct cam_ed *device)
1389 {
1390 	dev_match_ret retval;
1391 	u_int i;
1392 
1393 	retval = DM_RET_NONE;
1394 
1395 	/*
1396 	 * If we aren't given something to match against, that's an error.
1397 	 */
1398 	if (device == NULL)
1399 		return(DM_RET_ERROR);
1400 
1401 	/*
1402 	 * If there are no match entries, then this device matches no
1403 	 * matter what.
1404 	 */
1405 	if ((patterns == NULL) || (num_patterns == 0))
1406 		return(DM_RET_DESCEND | DM_RET_COPY);
1407 
1408 	for (i = 0; i < num_patterns; i++) {
1409 		struct device_match_pattern *cur_pattern;
1410 		struct scsi_vpd_device_id *device_id_page;
1411 		struct periph_match_pattern *pp = &patterns[i].pattern.periph_pattern;
1412 
1413 		/*
1414 		 * If the pattern in question isn't for a device node, we
1415 		 * aren't interested.
1416 		 */
1417 		if (patterns[i].type == DEV_MATCH_PERIPH &&
1418 		    (pp->flags & PERIPH_MATCH_TARGET) != 0 &&
1419 		    pp->target_id != device->target->target_id)
1420 			continue;
1421 		if (patterns[i].type == DEV_MATCH_PERIPH &&
1422 		    (pp->flags & PERIPH_MATCH_LUN) != 0 &&
1423 		    pp->target_lun != device->lun_id)
1424 			continue;
1425 		if (patterns[i].type != DEV_MATCH_DEVICE) {
1426 			if ((patterns[i].type == DEV_MATCH_PERIPH)
1427 			 && ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE))
1428 				retval |= DM_RET_DESCEND;
1429 			continue;
1430 		}
1431 
1432 		cur_pattern = &patterns[i].pattern.device_pattern;
1433 
1434 		/* Error out if mutually exclusive options are specified. */
1435 		if ((cur_pattern->flags & (DEV_MATCH_INQUIRY|DEV_MATCH_DEVID))
1436 		 == (DEV_MATCH_INQUIRY|DEV_MATCH_DEVID))
1437 			return(DM_RET_ERROR);
1438 
1439 		if (((cur_pattern->flags & DEV_MATCH_PATH) != 0)
1440 		 && (cur_pattern->path_id != device->target->bus->path_id))
1441 			continue;
1442 
1443 		if (((cur_pattern->flags & DEV_MATCH_TARGET) != 0)
1444 		 && (cur_pattern->target_id != device->target->target_id))
1445 			continue;
1446 
1447 		if (((cur_pattern->flags & DEV_MATCH_LUN) != 0)
1448 		 && (cur_pattern->target_lun != device->lun_id))
1449 			continue;
1450 
1451 		if (((cur_pattern->flags & DEV_MATCH_INQUIRY) != 0)
1452 		 && (cam_quirkmatch((caddr_t)&device->inq_data,
1453 				    (caddr_t)&cur_pattern->data.inq_pat,
1454 				    1, sizeof(cur_pattern->data.inq_pat),
1455 				    scsi_static_inquiry_match) == NULL))
1456 			continue;
1457 
1458 		device_id_page = (struct scsi_vpd_device_id *)device->device_id;
1459 		if (((cur_pattern->flags & DEV_MATCH_DEVID) != 0)
1460 		 && (device->device_id_len < SVPD_DEVICE_ID_HDR_LEN
1461 		  || scsi_devid_match((uint8_t *)device_id_page->desc_list,
1462 				      device->device_id_len
1463 				    - SVPD_DEVICE_ID_HDR_LEN,
1464 				      cur_pattern->data.devid_pat.id,
1465 				      cur_pattern->data.devid_pat.id_len) != 0))
1466 			continue;
1467 
1468 		/*
1469 		 * If we get to this point, the user definitely wants
1470 		 * information on this device.  So tell the caller to copy
1471 		 * the data out.
1472 		 */
1473 		retval |= DM_RET_COPY;
1474 
1475 		/*
1476 		 * If the return action has been set to descend, then we
1477 		 * know that we've already seen a peripheral matching
1478 		 * expression, therefore we need to further descend the tree.
1479 		 * This won't change by continuing around the loop, so we
1480 		 * go ahead and return.  If we haven't seen a peripheral
1481 		 * matching expression, we keep going around the loop until
1482 		 * we exhaust the matching expressions.  We'll set the stop
1483 		 * flag once we fall out of the loop.
1484 		 */
1485 		if ((retval & DM_RET_ACTION_MASK) == DM_RET_DESCEND)
1486 			return(retval);
1487 	}
1488 
1489 	/*
1490 	 * If the return action hasn't been set to descend yet, that means
1491 	 * we haven't seen any peripheral matching patterns.  So tell the
1492 	 * caller to stop descending the tree -- the user doesn't want to
1493 	 * match against lower level tree elements.
1494 	 */
1495 	if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
1496 		retval |= DM_RET_STOP;
1497 
1498 	return(retval);
1499 }
1500 
1501 /*
1502  * Match a single peripheral against any number of match patterns.
1503  */
1504 static dev_match_ret
xptperiphmatch(struct dev_match_pattern * patterns,u_int num_patterns,struct cam_periph * periph)1505 xptperiphmatch(struct dev_match_pattern *patterns, u_int num_patterns,
1506 	       struct cam_periph *periph)
1507 {
1508 	dev_match_ret retval;
1509 	u_int i;
1510 
1511 	/*
1512 	 * If we aren't given something to match against, that's an error.
1513 	 */
1514 	if (periph == NULL)
1515 		return(DM_RET_ERROR);
1516 
1517 	/*
1518 	 * If there are no match entries, then this peripheral matches no
1519 	 * matter what.
1520 	 */
1521 	if ((patterns == NULL) || (num_patterns == 0))
1522 		return(DM_RET_STOP | DM_RET_COPY);
1523 
1524 	/*
1525 	 * There aren't any nodes below a peripheral node, so there's no
1526 	 * reason to descend the tree any further.
1527 	 */
1528 	retval = DM_RET_STOP;
1529 
1530 	for (i = 0; i < num_patterns; i++) {
1531 		struct periph_match_pattern *cur_pattern;
1532 
1533 		/*
1534 		 * If the pattern in question isn't for a peripheral, we
1535 		 * aren't interested.
1536 		 */
1537 		if (patterns[i].type != DEV_MATCH_PERIPH)
1538 			continue;
1539 
1540 		cur_pattern = &patterns[i].pattern.periph_pattern;
1541 
1542 		if (((cur_pattern->flags & PERIPH_MATCH_PATH) != 0)
1543 		 && (cur_pattern->path_id != periph->path->bus->path_id))
1544 			continue;
1545 
1546 		/*
1547 		 * For the target and lun id's, we have to make sure the
1548 		 * target and lun pointers aren't NULL.  The xpt peripheral
1549 		 * has a wildcard target and device.
1550 		 */
1551 		if (((cur_pattern->flags & PERIPH_MATCH_TARGET) != 0)
1552 		 && ((periph->path->target == NULL)
1553 		 ||(cur_pattern->target_id != periph->path->target->target_id)))
1554 			continue;
1555 
1556 		if (((cur_pattern->flags & PERIPH_MATCH_LUN) != 0)
1557 		 && ((periph->path->device == NULL)
1558 		 || (cur_pattern->target_lun != periph->path->device->lun_id)))
1559 			continue;
1560 
1561 		if (((cur_pattern->flags & PERIPH_MATCH_UNIT) != 0)
1562 		 && (cur_pattern->unit_number != periph->unit_number))
1563 			continue;
1564 
1565 		if (((cur_pattern->flags & PERIPH_MATCH_NAME) != 0)
1566 		 && (strncmp(cur_pattern->periph_name, periph->periph_name,
1567 			     DEV_IDLEN) != 0))
1568 			continue;
1569 
1570 		/*
1571 		 * If we get to this point, the user definitely wants
1572 		 * information on this peripheral.  So tell the caller to
1573 		 * copy the data out.
1574 		 */
1575 		retval |= DM_RET_COPY;
1576 
1577 		/*
1578 		 * The return action has already been set to stop, since
1579 		 * peripherals don't have any nodes below them in the EDT.
1580 		 */
1581 		return(retval);
1582 	}
1583 
1584 	/*
1585 	 * If we get to this point, the peripheral that was passed in
1586 	 * doesn't match any of the patterns.
1587 	 */
1588 	return(retval);
1589 }
1590 
1591 static int
xptedtbusfunc(struct cam_eb * bus,void * arg)1592 xptedtbusfunc(struct cam_eb *bus, void *arg)
1593 {
1594 	struct ccb_dev_match *cdm;
1595 	struct cam_et *target;
1596 	dev_match_ret retval;
1597 
1598 	cdm = (struct ccb_dev_match *)arg;
1599 
1600 	/*
1601 	 * If our position is for something deeper in the tree, that means
1602 	 * that we've already seen this node.  So, we keep going down.
1603 	 */
1604 	if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
1605 	 && (cdm->pos.cookie.bus == bus)
1606 	 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
1607 	 && (cdm->pos.cookie.target != NULL))
1608 		retval = DM_RET_DESCEND;
1609 	else
1610 		retval = xptbusmatch(cdm->patterns, cdm->num_patterns, bus);
1611 
1612 	/*
1613 	 * If we got an error, bail out of the search.
1614 	 */
1615 	if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
1616 		cdm->status = CAM_DEV_MATCH_ERROR;
1617 		return(0);
1618 	}
1619 
1620 	/*
1621 	 * If the copy flag is set, copy this bus out.
1622 	 */
1623 	if (retval & DM_RET_COPY) {
1624 		int spaceleft, j;
1625 
1626 		spaceleft = cdm->match_buf_len - (cdm->num_matches *
1627 			sizeof(struct dev_match_result));
1628 
1629 		/*
1630 		 * If we don't have enough space to put in another
1631 		 * match result, save our position and tell the
1632 		 * user there are more devices to check.
1633 		 */
1634 		if (spaceleft < sizeof(struct dev_match_result)) {
1635 			bzero(&cdm->pos, sizeof(cdm->pos));
1636 			cdm->pos.position_type =
1637 				CAM_DEV_POS_EDT | CAM_DEV_POS_BUS;
1638 
1639 			cdm->pos.cookie.bus = bus;
1640 			cdm->pos.generations[CAM_BUS_GENERATION]=
1641 				xsoftc.bus_generation;
1642 			cdm->status = CAM_DEV_MATCH_MORE;
1643 			return(0);
1644 		}
1645 		j = cdm->num_matches;
1646 		cdm->num_matches++;
1647 		cdm->matches[j].type = DEV_MATCH_BUS;
1648 		cdm->matches[j].result.bus_result.path_id = bus->path_id;
1649 		cdm->matches[j].result.bus_result.bus_id = bus->sim->bus_id;
1650 		cdm->matches[j].result.bus_result.unit_number =
1651 			bus->sim->unit_number;
1652 		strlcpy(cdm->matches[j].result.bus_result.dev_name,
1653 			bus->sim->sim_name,
1654 			sizeof(cdm->matches[j].result.bus_result.dev_name));
1655 	}
1656 
1657 	/*
1658 	 * If the user is only interested in buses, there's no
1659 	 * reason to descend to the next level in the tree.
1660 	 */
1661 	if ((retval & DM_RET_ACTION_MASK) == DM_RET_STOP)
1662 		return(1);
1663 
1664 	/*
1665 	 * If there is a target generation recorded, check it to
1666 	 * make sure the target list hasn't changed.
1667 	 */
1668 	mtx_lock(&bus->eb_mtx);
1669 	if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
1670 	 && (cdm->pos.cookie.bus == bus)
1671 	 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
1672 	 && (cdm->pos.cookie.target != NULL)) {
1673 		if ((cdm->pos.generations[CAM_TARGET_GENERATION] !=
1674 		    bus->generation)) {
1675 			mtx_unlock(&bus->eb_mtx);
1676 			cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
1677 			return (0);
1678 		}
1679 		target = (struct cam_et *)cdm->pos.cookie.target;
1680 		target->refcount++;
1681 	} else
1682 		target = NULL;
1683 	mtx_unlock(&bus->eb_mtx);
1684 
1685 	return (xpttargettraverse(bus, target, xptedttargetfunc, arg));
1686 }
1687 
1688 static int
xptedttargetfunc(struct cam_et * target,void * arg)1689 xptedttargetfunc(struct cam_et *target, void *arg)
1690 {
1691 	struct ccb_dev_match *cdm;
1692 	struct cam_eb *bus;
1693 	struct cam_ed *device;
1694 
1695 	cdm = (struct ccb_dev_match *)arg;
1696 	bus = target->bus;
1697 
1698 	/*
1699 	 * If there is a device list generation recorded, check it to
1700 	 * make sure the device list hasn't changed.
1701 	 */
1702 	mtx_lock(&bus->eb_mtx);
1703 	if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
1704 	 && (cdm->pos.cookie.bus == bus)
1705 	 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
1706 	 && (cdm->pos.cookie.target == target)
1707 	 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
1708 	 && (cdm->pos.cookie.device != NULL)) {
1709 		if (cdm->pos.generations[CAM_DEV_GENERATION] !=
1710 		    target->generation) {
1711 			mtx_unlock(&bus->eb_mtx);
1712 			cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
1713 			return(0);
1714 		}
1715 		device = (struct cam_ed *)cdm->pos.cookie.device;
1716 		device->refcount++;
1717 	} else
1718 		device = NULL;
1719 	mtx_unlock(&bus->eb_mtx);
1720 
1721 	return (xptdevicetraverse(target, device, xptedtdevicefunc, arg));
1722 }
1723 
1724 static int
xptedtdevicefunc(struct cam_ed * device,void * arg)1725 xptedtdevicefunc(struct cam_ed *device, void *arg)
1726 {
1727 	struct cam_eb *bus;
1728 	struct cam_periph *periph;
1729 	struct ccb_dev_match *cdm;
1730 	dev_match_ret retval;
1731 
1732 	cdm = (struct ccb_dev_match *)arg;
1733 	bus = device->target->bus;
1734 
1735 	/*
1736 	 * If our position is for something deeper in the tree, that means
1737 	 * that we've already seen this node.  So, we keep going down.
1738 	 */
1739 	if ((cdm->pos.position_type & CAM_DEV_POS_DEVICE)
1740 	 && (cdm->pos.cookie.device == device)
1741 	 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
1742 	 && (cdm->pos.cookie.periph != NULL))
1743 		retval = DM_RET_DESCEND;
1744 	else
1745 		retval = xptdevicematch(cdm->patterns, cdm->num_patterns,
1746 					device);
1747 
1748 	if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
1749 		cdm->status = CAM_DEV_MATCH_ERROR;
1750 		return(0);
1751 	}
1752 
1753 	/*
1754 	 * If the copy flag is set, copy this device out.
1755 	 */
1756 	if (retval & DM_RET_COPY) {
1757 		int spaceleft, j;
1758 
1759 		spaceleft = cdm->match_buf_len - (cdm->num_matches *
1760 			sizeof(struct dev_match_result));
1761 
1762 		/*
1763 		 * If we don't have enough space to put in another
1764 		 * match result, save our position and tell the
1765 		 * user there are more devices to check.
1766 		 */
1767 		if (spaceleft < sizeof(struct dev_match_result)) {
1768 			bzero(&cdm->pos, sizeof(cdm->pos));
1769 			cdm->pos.position_type =
1770 				CAM_DEV_POS_EDT | CAM_DEV_POS_BUS |
1771 				CAM_DEV_POS_TARGET | CAM_DEV_POS_DEVICE;
1772 
1773 			cdm->pos.cookie.bus = device->target->bus;
1774 			cdm->pos.generations[CAM_BUS_GENERATION]=
1775 				xsoftc.bus_generation;
1776 			cdm->pos.cookie.target = device->target;
1777 			cdm->pos.generations[CAM_TARGET_GENERATION] =
1778 				device->target->bus->generation;
1779 			cdm->pos.cookie.device = device;
1780 			cdm->pos.generations[CAM_DEV_GENERATION] =
1781 				device->target->generation;
1782 			cdm->status = CAM_DEV_MATCH_MORE;
1783 			return(0);
1784 		}
1785 		j = cdm->num_matches;
1786 		cdm->num_matches++;
1787 		cdm->matches[j].type = DEV_MATCH_DEVICE;
1788 		cdm->matches[j].result.device_result.path_id =
1789 			device->target->bus->path_id;
1790 		cdm->matches[j].result.device_result.target_id =
1791 			device->target->target_id;
1792 		cdm->matches[j].result.device_result.target_lun =
1793 			device->lun_id;
1794 		cdm->matches[j].result.device_result.protocol =
1795 			device->protocol;
1796 		bcopy(&device->inq_data,
1797 		      &cdm->matches[j].result.device_result.inq_data,
1798 		      sizeof(struct scsi_inquiry_data));
1799 		bcopy(&device->ident_data,
1800 		      &cdm->matches[j].result.device_result.ident_data,
1801 		      sizeof(struct ata_params));
1802 
1803 		/* Let the user know whether this device is unconfigured */
1804 		if (device->flags & CAM_DEV_UNCONFIGURED)
1805 			cdm->matches[j].result.device_result.flags =
1806 				DEV_RESULT_UNCONFIGURED;
1807 		else
1808 			cdm->matches[j].result.device_result.flags =
1809 				DEV_RESULT_NOFLAG;
1810 	}
1811 
1812 	/*
1813 	 * If the user isn't interested in peripherals, don't descend
1814 	 * the tree any further.
1815 	 */
1816 	if ((retval & DM_RET_ACTION_MASK) == DM_RET_STOP)
1817 		return(1);
1818 
1819 	/*
1820 	 * If there is a peripheral list generation recorded, make sure
1821 	 * it hasn't changed.
1822 	 */
1823 	xpt_lock_buses();
1824 	mtx_lock(&bus->eb_mtx);
1825 	if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
1826 	 && (cdm->pos.cookie.bus == bus)
1827 	 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
1828 	 && (cdm->pos.cookie.target == device->target)
1829 	 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
1830 	 && (cdm->pos.cookie.device == device)
1831 	 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
1832 	 && (cdm->pos.cookie.periph != NULL)) {
1833 		if (cdm->pos.generations[CAM_PERIPH_GENERATION] !=
1834 		    device->generation) {
1835 			mtx_unlock(&bus->eb_mtx);
1836 			xpt_unlock_buses();
1837 			cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
1838 			return(0);
1839 		}
1840 		periph = (struct cam_periph *)cdm->pos.cookie.periph;
1841 		periph->refcount++;
1842 	} else
1843 		periph = NULL;
1844 	mtx_unlock(&bus->eb_mtx);
1845 	xpt_unlock_buses();
1846 
1847 	return (xptperiphtraverse(device, periph, xptedtperiphfunc, arg));
1848 }
1849 
1850 static int
xptedtperiphfunc(struct cam_periph * periph,void * arg)1851 xptedtperiphfunc(struct cam_periph *periph, void *arg)
1852 {
1853 	struct ccb_dev_match *cdm;
1854 	dev_match_ret retval;
1855 
1856 	cdm = (struct ccb_dev_match *)arg;
1857 
1858 	retval = xptperiphmatch(cdm->patterns, cdm->num_patterns, periph);
1859 
1860 	if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
1861 		cdm->status = CAM_DEV_MATCH_ERROR;
1862 		return(0);
1863 	}
1864 
1865 	/*
1866 	 * If the copy flag is set, copy this peripheral out.
1867 	 */
1868 	if (retval & DM_RET_COPY) {
1869 		int spaceleft, j;
1870 		size_t l;
1871 
1872 		spaceleft = cdm->match_buf_len - (cdm->num_matches *
1873 			sizeof(struct dev_match_result));
1874 
1875 		/*
1876 		 * If we don't have enough space to put in another
1877 		 * match result, save our position and tell the
1878 		 * user there are more devices to check.
1879 		 */
1880 		if (spaceleft < sizeof(struct dev_match_result)) {
1881 			bzero(&cdm->pos, sizeof(cdm->pos));
1882 			cdm->pos.position_type =
1883 				CAM_DEV_POS_EDT | CAM_DEV_POS_BUS |
1884 				CAM_DEV_POS_TARGET | CAM_DEV_POS_DEVICE |
1885 				CAM_DEV_POS_PERIPH;
1886 
1887 			cdm->pos.cookie.bus = periph->path->bus;
1888 			cdm->pos.generations[CAM_BUS_GENERATION]=
1889 				xsoftc.bus_generation;
1890 			cdm->pos.cookie.target = periph->path->target;
1891 			cdm->pos.generations[CAM_TARGET_GENERATION] =
1892 				periph->path->bus->generation;
1893 			cdm->pos.cookie.device = periph->path->device;
1894 			cdm->pos.generations[CAM_DEV_GENERATION] =
1895 				periph->path->target->generation;
1896 			cdm->pos.cookie.periph = periph;
1897 			cdm->pos.generations[CAM_PERIPH_GENERATION] =
1898 				periph->path->device->generation;
1899 			cdm->status = CAM_DEV_MATCH_MORE;
1900 			return(0);
1901 		}
1902 
1903 		j = cdm->num_matches;
1904 		cdm->num_matches++;
1905 		cdm->matches[j].type = DEV_MATCH_PERIPH;
1906 		cdm->matches[j].result.periph_result.path_id =
1907 			periph->path->bus->path_id;
1908 		cdm->matches[j].result.periph_result.target_id =
1909 			periph->path->target->target_id;
1910 		cdm->matches[j].result.periph_result.target_lun =
1911 			periph->path->device->lun_id;
1912 		cdm->matches[j].result.periph_result.unit_number =
1913 			periph->unit_number;
1914 		l = sizeof(cdm->matches[j].result.periph_result.periph_name);
1915 		strlcpy(cdm->matches[j].result.periph_result.periph_name,
1916 			periph->periph_name, l);
1917 	}
1918 
1919 	return(1);
1920 }
1921 
1922 static int
xptedtmatch(struct ccb_dev_match * cdm)1923 xptedtmatch(struct ccb_dev_match *cdm)
1924 {
1925 	struct cam_eb *bus;
1926 	int ret;
1927 
1928 	cdm->num_matches = 0;
1929 
1930 	/*
1931 	 * Check the bus list generation.  If it has changed, the user
1932 	 * needs to reset everything and start over.
1933 	 */
1934 	xpt_lock_buses();
1935 	if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
1936 	 && (cdm->pos.cookie.bus != NULL)) {
1937 		if (cdm->pos.generations[CAM_BUS_GENERATION] !=
1938 		    xsoftc.bus_generation) {
1939 			xpt_unlock_buses();
1940 			cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
1941 			return(0);
1942 		}
1943 		bus = (struct cam_eb *)cdm->pos.cookie.bus;
1944 		bus->refcount++;
1945 	} else
1946 		bus = NULL;
1947 	xpt_unlock_buses();
1948 
1949 	ret = xptbustraverse(bus, xptedtbusfunc, cdm);
1950 
1951 	/*
1952 	 * If we get back 0, that means that we had to stop before fully
1953 	 * traversing the EDT.  It also means that one of the subroutines
1954 	 * has set the status field to the proper value.  If we get back 1,
1955 	 * we've fully traversed the EDT and copied out any matching entries.
1956 	 */
1957 	if (ret == 1)
1958 		cdm->status = CAM_DEV_MATCH_LAST;
1959 
1960 	return(ret);
1961 }
1962 
1963 static int
xptplistpdrvfunc(struct periph_driver ** pdrv,void * arg)1964 xptplistpdrvfunc(struct periph_driver **pdrv, void *arg)
1965 {
1966 	struct cam_periph *periph;
1967 	struct ccb_dev_match *cdm;
1968 
1969 	cdm = (struct ccb_dev_match *)arg;
1970 
1971 	xpt_lock_buses();
1972 	if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
1973 	 && (cdm->pos.cookie.pdrv == pdrv)
1974 	 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
1975 	 && (cdm->pos.cookie.periph != NULL)) {
1976 		if (cdm->pos.generations[CAM_PERIPH_GENERATION] !=
1977 		    (*pdrv)->generation) {
1978 			xpt_unlock_buses();
1979 			cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
1980 			return(0);
1981 		}
1982 		periph = (struct cam_periph *)cdm->pos.cookie.periph;
1983 		periph->refcount++;
1984 	} else
1985 		periph = NULL;
1986 	xpt_unlock_buses();
1987 
1988 	return (xptpdperiphtraverse(pdrv, periph, xptplistperiphfunc, arg));
1989 }
1990 
1991 static int
xptplistperiphfunc(struct cam_periph * periph,void * arg)1992 xptplistperiphfunc(struct cam_periph *periph, void *arg)
1993 {
1994 	struct ccb_dev_match *cdm;
1995 	dev_match_ret retval;
1996 
1997 	cdm = (struct ccb_dev_match *)arg;
1998 
1999 	retval = xptperiphmatch(cdm->patterns, cdm->num_patterns, periph);
2000 
2001 	if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
2002 		cdm->status = CAM_DEV_MATCH_ERROR;
2003 		return(0);
2004 	}
2005 
2006 	/*
2007 	 * If the copy flag is set, copy this peripheral out.
2008 	 */
2009 	if (retval & DM_RET_COPY) {
2010 		int spaceleft, j;
2011 		size_t l;
2012 
2013 		spaceleft = cdm->match_buf_len - (cdm->num_matches *
2014 			sizeof(struct dev_match_result));
2015 
2016 		/*
2017 		 * If we don't have enough space to put in another
2018 		 * match result, save our position and tell the
2019 		 * user there are more devices to check.
2020 		 */
2021 		if (spaceleft < sizeof(struct dev_match_result)) {
2022 			struct periph_driver **pdrv;
2023 
2024 			pdrv = NULL;
2025 			bzero(&cdm->pos, sizeof(cdm->pos));
2026 			cdm->pos.position_type =
2027 				CAM_DEV_POS_PDRV | CAM_DEV_POS_PDPTR |
2028 				CAM_DEV_POS_PERIPH;
2029 
2030 			/*
2031 			 * This may look a bit non-sensical, but it is
2032 			 * actually quite logical.  There are very few
2033 			 * peripheral drivers, and bloating every peripheral
2034 			 * structure with a pointer back to its parent
2035 			 * peripheral driver linker set entry would cost
2036 			 * more in the long run than doing this quick lookup.
2037 			 */
2038 			for (pdrv = periph_drivers; *pdrv != NULL; pdrv++) {
2039 				if (strcmp((*pdrv)->driver_name,
2040 				    periph->periph_name) == 0)
2041 					break;
2042 			}
2043 
2044 			if (*pdrv == NULL) {
2045 				cdm->status = CAM_DEV_MATCH_ERROR;
2046 				return(0);
2047 			}
2048 
2049 			cdm->pos.cookie.pdrv = pdrv;
2050 			/*
2051 			 * The periph generation slot does double duty, as
2052 			 * does the periph pointer slot.  They are used for
2053 			 * both edt and pdrv lookups and positioning.
2054 			 */
2055 			cdm->pos.cookie.periph = periph;
2056 			cdm->pos.generations[CAM_PERIPH_GENERATION] =
2057 				(*pdrv)->generation;
2058 			cdm->status = CAM_DEV_MATCH_MORE;
2059 			return(0);
2060 		}
2061 
2062 		j = cdm->num_matches;
2063 		cdm->num_matches++;
2064 		cdm->matches[j].type = DEV_MATCH_PERIPH;
2065 		cdm->matches[j].result.periph_result.path_id =
2066 			periph->path->bus->path_id;
2067 
2068 		/*
2069 		 * The transport layer peripheral doesn't have a target or
2070 		 * lun.
2071 		 */
2072 		if (periph->path->target)
2073 			cdm->matches[j].result.periph_result.target_id =
2074 				periph->path->target->target_id;
2075 		else
2076 			cdm->matches[j].result.periph_result.target_id =
2077 				CAM_TARGET_WILDCARD;
2078 
2079 		if (periph->path->device)
2080 			cdm->matches[j].result.periph_result.target_lun =
2081 				periph->path->device->lun_id;
2082 		else
2083 			cdm->matches[j].result.periph_result.target_lun =
2084 				CAM_LUN_WILDCARD;
2085 
2086 		cdm->matches[j].result.periph_result.unit_number =
2087 			periph->unit_number;
2088 		l = sizeof(cdm->matches[j].result.periph_result.periph_name);
2089 		strlcpy(cdm->matches[j].result.periph_result.periph_name,
2090 			periph->periph_name, l);
2091 	}
2092 
2093 	return(1);
2094 }
2095 
2096 static int
xptperiphlistmatch(struct ccb_dev_match * cdm)2097 xptperiphlistmatch(struct ccb_dev_match *cdm)
2098 {
2099 	int ret;
2100 
2101 	cdm->num_matches = 0;
2102 
2103 	/*
2104 	 * At this point in the edt traversal function, we check the bus
2105 	 * list generation to make sure that no buses have been added or
2106 	 * removed since the user last sent a XPT_DEV_MATCH ccb through.
2107 	 * For the peripheral driver list traversal function, however, we
2108 	 * don't have to worry about new peripheral driver types coming or
2109 	 * going; they're in a linker set, and therefore can't change
2110 	 * without a recompile.
2111 	 */
2112 
2113 	if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
2114 	 && (cdm->pos.cookie.pdrv != NULL))
2115 		ret = xptpdrvtraverse(
2116 				(struct periph_driver **)cdm->pos.cookie.pdrv,
2117 				xptplistpdrvfunc, cdm);
2118 	else
2119 		ret = xptpdrvtraverse(NULL, xptplistpdrvfunc, cdm);
2120 
2121 	/*
2122 	 * If we get back 0, that means that we had to stop before fully
2123 	 * traversing the peripheral driver tree.  It also means that one of
2124 	 * the subroutines has set the status field to the proper value.  If
2125 	 * we get back 1, we've fully traversed the EDT and copied out any
2126 	 * matching entries.
2127 	 */
2128 	if (ret == 1)
2129 		cdm->status = CAM_DEV_MATCH_LAST;
2130 
2131 	return(ret);
2132 }
2133 
2134 static int
xptbustraverse(struct cam_eb * start_bus,xpt_busfunc_t * tr_func,void * arg)2135 xptbustraverse(struct cam_eb *start_bus, xpt_busfunc_t *tr_func, void *arg)
2136 {
2137 	struct cam_eb *bus, *next_bus;
2138 	int retval;
2139 
2140 	retval = 1;
2141 	if (start_bus)
2142 		bus = start_bus;
2143 	else {
2144 		xpt_lock_buses();
2145 		bus = TAILQ_FIRST(&xsoftc.xpt_busses);
2146 		if (bus == NULL) {
2147 			xpt_unlock_buses();
2148 			return (retval);
2149 		}
2150 		bus->refcount++;
2151 		xpt_unlock_buses();
2152 	}
2153 	for (; bus != NULL; bus = next_bus) {
2154 		retval = tr_func(bus, arg);
2155 		if (retval == 0) {
2156 			xpt_release_bus(bus);
2157 			break;
2158 		}
2159 		xpt_lock_buses();
2160 		next_bus = TAILQ_NEXT(bus, links);
2161 		if (next_bus)
2162 			next_bus->refcount++;
2163 		xpt_unlock_buses();
2164 		xpt_release_bus(bus);
2165 	}
2166 	return(retval);
2167 }
2168 
2169 static int
xpttargettraverse(struct cam_eb * bus,struct cam_et * start_target,xpt_targetfunc_t * tr_func,void * arg)2170 xpttargettraverse(struct cam_eb *bus, struct cam_et *start_target,
2171 		  xpt_targetfunc_t *tr_func, void *arg)
2172 {
2173 	struct cam_et *target, *next_target;
2174 	int retval;
2175 
2176 	retval = 1;
2177 	if (start_target)
2178 		target = start_target;
2179 	else {
2180 		mtx_lock(&bus->eb_mtx);
2181 		target = TAILQ_FIRST(&bus->et_entries);
2182 		if (target == NULL) {
2183 			mtx_unlock(&bus->eb_mtx);
2184 			return (retval);
2185 		}
2186 		target->refcount++;
2187 		mtx_unlock(&bus->eb_mtx);
2188 	}
2189 	for (; target != NULL; target = next_target) {
2190 		retval = tr_func(target, arg);
2191 		if (retval == 0) {
2192 			xpt_release_target(target);
2193 			break;
2194 		}
2195 		mtx_lock(&bus->eb_mtx);
2196 		next_target = TAILQ_NEXT(target, links);
2197 		if (next_target)
2198 			next_target->refcount++;
2199 		mtx_unlock(&bus->eb_mtx);
2200 		xpt_release_target(target);
2201 	}
2202 	return(retval);
2203 }
2204 
2205 static int
xptdevicetraverse(struct cam_et * target,struct cam_ed * start_device,xpt_devicefunc_t * tr_func,void * arg)2206 xptdevicetraverse(struct cam_et *target, struct cam_ed *start_device,
2207 		  xpt_devicefunc_t *tr_func, void *arg)
2208 {
2209 	struct cam_eb *bus;
2210 	struct cam_ed *device, *next_device;
2211 	int retval;
2212 
2213 	retval = 1;
2214 	bus = target->bus;
2215 	if (start_device)
2216 		device = start_device;
2217 	else {
2218 		mtx_lock(&bus->eb_mtx);
2219 		device = TAILQ_FIRST(&target->ed_entries);
2220 		if (device == NULL) {
2221 			mtx_unlock(&bus->eb_mtx);
2222 			return (retval);
2223 		}
2224 		device->refcount++;
2225 		mtx_unlock(&bus->eb_mtx);
2226 	}
2227 	for (; device != NULL; device = next_device) {
2228 		mtx_lock(&device->device_mtx);
2229 		retval = tr_func(device, arg);
2230 		mtx_unlock(&device->device_mtx);
2231 		if (retval == 0) {
2232 			xpt_release_device(device);
2233 			break;
2234 		}
2235 		mtx_lock(&bus->eb_mtx);
2236 		next_device = TAILQ_NEXT(device, links);
2237 		if (next_device)
2238 			next_device->refcount++;
2239 		mtx_unlock(&bus->eb_mtx);
2240 		xpt_release_device(device);
2241 	}
2242 	return(retval);
2243 }
2244 
2245 static int
xptperiphtraverse(struct cam_ed * device,struct cam_periph * start_periph,xpt_periphfunc_t * tr_func,void * arg)2246 xptperiphtraverse(struct cam_ed *device, struct cam_periph *start_periph,
2247 		  xpt_periphfunc_t *tr_func, void *arg)
2248 {
2249 	struct cam_eb *bus;
2250 	struct cam_periph *periph, *next_periph;
2251 	int retval;
2252 
2253 	retval = 1;
2254 
2255 	bus = device->target->bus;
2256 	if (start_periph)
2257 		periph = start_periph;
2258 	else {
2259 		xpt_lock_buses();
2260 		mtx_lock(&bus->eb_mtx);
2261 		periph = SLIST_FIRST(&device->periphs);
2262 		while (periph != NULL && (periph->flags & CAM_PERIPH_FREE) != 0)
2263 			periph = SLIST_NEXT(periph, periph_links);
2264 		if (periph == NULL) {
2265 			mtx_unlock(&bus->eb_mtx);
2266 			xpt_unlock_buses();
2267 			return (retval);
2268 		}
2269 		periph->refcount++;
2270 		mtx_unlock(&bus->eb_mtx);
2271 		xpt_unlock_buses();
2272 	}
2273 	for (; periph != NULL; periph = next_periph) {
2274 		retval = tr_func(periph, arg);
2275 		if (retval == 0) {
2276 			cam_periph_release_locked(periph);
2277 			break;
2278 		}
2279 		xpt_lock_buses();
2280 		mtx_lock(&bus->eb_mtx);
2281 		next_periph = SLIST_NEXT(periph, periph_links);
2282 		while (next_periph != NULL &&
2283 		    (next_periph->flags & CAM_PERIPH_FREE) != 0)
2284 			next_periph = SLIST_NEXT(next_periph, periph_links);
2285 		if (next_periph)
2286 			next_periph->refcount++;
2287 		mtx_unlock(&bus->eb_mtx);
2288 		xpt_unlock_buses();
2289 		cam_periph_release_locked(periph);
2290 	}
2291 	return(retval);
2292 }
2293 
2294 static int
xptpdrvtraverse(struct periph_driver ** start_pdrv,xpt_pdrvfunc_t * tr_func,void * arg)2295 xptpdrvtraverse(struct periph_driver **start_pdrv,
2296 		xpt_pdrvfunc_t *tr_func, void *arg)
2297 {
2298 	struct periph_driver **pdrv;
2299 	int retval;
2300 
2301 	retval = 1;
2302 
2303 	/*
2304 	 * We don't traverse the peripheral driver list like we do the
2305 	 * other lists, because it is a linker set, and therefore cannot be
2306 	 * changed during runtime.  If the peripheral driver list is ever
2307 	 * re-done to be something other than a linker set (i.e. it can
2308 	 * change while the system is running), the list traversal should
2309 	 * be modified to work like the other traversal functions.
2310 	 */
2311 	for (pdrv = (start_pdrv ? start_pdrv : periph_drivers);
2312 	     *pdrv != NULL; pdrv++) {
2313 		retval = tr_func(pdrv, arg);
2314 
2315 		if (retval == 0)
2316 			return(retval);
2317 	}
2318 
2319 	return(retval);
2320 }
2321 
2322 static int
xptpdperiphtraverse(struct periph_driver ** pdrv,struct cam_periph * start_periph,xpt_periphfunc_t * tr_func,void * arg)2323 xptpdperiphtraverse(struct periph_driver **pdrv,
2324 		    struct cam_periph *start_periph,
2325 		    xpt_periphfunc_t *tr_func, void *arg)
2326 {
2327 	struct cam_periph *periph, *next_periph;
2328 	int retval;
2329 
2330 	retval = 1;
2331 
2332 	if (start_periph)
2333 		periph = start_periph;
2334 	else {
2335 		xpt_lock_buses();
2336 		periph = TAILQ_FIRST(&(*pdrv)->units);
2337 		while (periph != NULL && (periph->flags & CAM_PERIPH_FREE) != 0)
2338 			periph = TAILQ_NEXT(periph, unit_links);
2339 		if (periph == NULL) {
2340 			xpt_unlock_buses();
2341 			return (retval);
2342 		}
2343 		periph->refcount++;
2344 		xpt_unlock_buses();
2345 	}
2346 	for (; periph != NULL; periph = next_periph) {
2347 		cam_periph_lock(periph);
2348 		retval = tr_func(periph, arg);
2349 		cam_periph_unlock(periph);
2350 		if (retval == 0) {
2351 			cam_periph_release(periph);
2352 			break;
2353 		}
2354 		xpt_lock_buses();
2355 		next_periph = TAILQ_NEXT(periph, unit_links);
2356 		while (next_periph != NULL &&
2357 		    (next_periph->flags & CAM_PERIPH_FREE) != 0)
2358 			next_periph = TAILQ_NEXT(next_periph, unit_links);
2359 		if (next_periph)
2360 			next_periph->refcount++;
2361 		xpt_unlock_buses();
2362 		cam_periph_release(periph);
2363 	}
2364 	return(retval);
2365 }
2366 
2367 static int
xptdefbusfunc(struct cam_eb * bus,void * arg)2368 xptdefbusfunc(struct cam_eb *bus, void *arg)
2369 {
2370 	struct xpt_traverse_config *tr_config;
2371 
2372 	tr_config = (struct xpt_traverse_config *)arg;
2373 
2374 	if (tr_config->depth == XPT_DEPTH_BUS) {
2375 		xpt_busfunc_t *tr_func;
2376 
2377 		tr_func = (xpt_busfunc_t *)tr_config->tr_func;
2378 
2379 		return(tr_func(bus, tr_config->tr_arg));
2380 	} else
2381 		return(xpttargettraverse(bus, NULL, xptdeftargetfunc, arg));
2382 }
2383 
2384 static int
xptdeftargetfunc(struct cam_et * target,void * arg)2385 xptdeftargetfunc(struct cam_et *target, void *arg)
2386 {
2387 	struct xpt_traverse_config *tr_config;
2388 
2389 	tr_config = (struct xpt_traverse_config *)arg;
2390 
2391 	if (tr_config->depth == XPT_DEPTH_TARGET) {
2392 		xpt_targetfunc_t *tr_func;
2393 
2394 		tr_func = (xpt_targetfunc_t *)tr_config->tr_func;
2395 
2396 		return(tr_func(target, tr_config->tr_arg));
2397 	} else
2398 		return(xptdevicetraverse(target, NULL, xptdefdevicefunc, arg));
2399 }
2400 
2401 static int
xptdefdevicefunc(struct cam_ed * device,void * arg)2402 xptdefdevicefunc(struct cam_ed *device, void *arg)
2403 {
2404 	struct xpt_traverse_config *tr_config;
2405 
2406 	tr_config = (struct xpt_traverse_config *)arg;
2407 
2408 	if (tr_config->depth == XPT_DEPTH_DEVICE) {
2409 		xpt_devicefunc_t *tr_func;
2410 
2411 		tr_func = (xpt_devicefunc_t *)tr_config->tr_func;
2412 
2413 		return(tr_func(device, tr_config->tr_arg));
2414 	} else
2415 		return(xptperiphtraverse(device, NULL, xptdefperiphfunc, arg));
2416 }
2417 
2418 static int
xptdefperiphfunc(struct cam_periph * periph,void * arg)2419 xptdefperiphfunc(struct cam_periph *periph, void *arg)
2420 {
2421 	struct xpt_traverse_config *tr_config;
2422 	xpt_periphfunc_t *tr_func;
2423 
2424 	tr_config = (struct xpt_traverse_config *)arg;
2425 
2426 	tr_func = (xpt_periphfunc_t *)tr_config->tr_func;
2427 
2428 	/*
2429 	 * Unlike the other default functions, we don't check for depth
2430 	 * here.  The peripheral driver level is the last level in the EDT,
2431 	 * so if we're here, we should execute the function in question.
2432 	 */
2433 	return(tr_func(periph, tr_config->tr_arg));
2434 }
2435 
2436 /*
2437  * Execute the given function for every bus in the EDT.
2438  */
2439 static int
xpt_for_all_busses(xpt_busfunc_t * tr_func,void * arg)2440 xpt_for_all_busses(xpt_busfunc_t *tr_func, void *arg)
2441 {
2442 	struct xpt_traverse_config tr_config;
2443 
2444 	tr_config.depth = XPT_DEPTH_BUS;
2445 	tr_config.tr_func = tr_func;
2446 	tr_config.tr_arg = arg;
2447 
2448 	return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
2449 }
2450 
2451 /*
2452  * Execute the given function for every device in the EDT.
2453  */
2454 static int
xpt_for_all_devices(xpt_devicefunc_t * tr_func,void * arg)2455 xpt_for_all_devices(xpt_devicefunc_t *tr_func, void *arg)
2456 {
2457 	struct xpt_traverse_config tr_config;
2458 
2459 	tr_config.depth = XPT_DEPTH_DEVICE;
2460 	tr_config.tr_func = tr_func;
2461 	tr_config.tr_arg = arg;
2462 
2463 	return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
2464 }
2465 
2466 static int
xptsetasyncfunc(struct cam_ed * device,void * arg)2467 xptsetasyncfunc(struct cam_ed *device, void *arg)
2468 {
2469 	struct cam_path path;
2470 	struct ccb_getdev cgd;
2471 	struct ccb_setasync *csa = (struct ccb_setasync *)arg;
2472 
2473 	/*
2474 	 * Don't report unconfigured devices (Wildcard devs,
2475 	 * devices only for target mode, device instances
2476 	 * that have been invalidated but are waiting for
2477 	 * their last reference count to be released).
2478 	 */
2479 	if ((device->flags & CAM_DEV_UNCONFIGURED) != 0)
2480 		return (1);
2481 
2482 	xpt_compile_path(&path,
2483 			 NULL,
2484 			 device->target->bus->path_id,
2485 			 device->target->target_id,
2486 			 device->lun_id);
2487 	xpt_gdev_type(&cgd, &path);
2488 	CAM_PROBE4(xpt, async__cb, csa->callback_arg,
2489 	    AC_FOUND_DEVICE, &path, &cgd);
2490 	csa->callback(csa->callback_arg,
2491 			    AC_FOUND_DEVICE,
2492 			    &path, &cgd);
2493 	xpt_release_path(&path);
2494 
2495 	return(1);
2496 }
2497 
2498 static int
xptsetasyncbusfunc(struct cam_eb * bus,void * arg)2499 xptsetasyncbusfunc(struct cam_eb *bus, void *arg)
2500 {
2501 	struct cam_path path;
2502 	struct ccb_pathinq cpi;
2503 	struct ccb_setasync *csa = (struct ccb_setasync *)arg;
2504 
2505 	xpt_compile_path(&path, /*periph*/NULL,
2506 			 bus->path_id,
2507 			 CAM_TARGET_WILDCARD,
2508 			 CAM_LUN_WILDCARD);
2509 	xpt_path_lock(&path);
2510 	xpt_path_inq(&cpi, &path);
2511 	CAM_PROBE4(xpt, async__cb, csa->callback_arg,
2512 	    AC_PATH_REGISTERED, &path, &cpi);
2513 	csa->callback(csa->callback_arg,
2514 			    AC_PATH_REGISTERED,
2515 			    &path, &cpi);
2516 	xpt_path_unlock(&path);
2517 	xpt_release_path(&path);
2518 
2519 	return(1);
2520 }
2521 
2522 void
xpt_action(union ccb * start_ccb)2523 xpt_action(union ccb *start_ccb)
2524 {
2525 
2526 	CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_TRACE,
2527 	    ("xpt_action: func %#x %s\n", start_ccb->ccb_h.func_code,
2528 		xpt_action_name(start_ccb->ccb_h.func_code)));
2529 
2530 	/*
2531 	 * Either it isn't queued, or it has a real priority. There still too
2532 	 * many places that reuse CCBs with a real priority to do immediate
2533 	 * queries to do the other side of this assert.
2534 	 */
2535 	KASSERT((start_ccb->ccb_h.func_code & XPT_FC_QUEUED) == 0 ||
2536 	    start_ccb->ccb_h.pinfo.priority != CAM_PRIORITY_NONE,
2537 	    ("%s: queued ccb and CAM_PRIORITY_NONE illegal.", __func__));
2538 
2539 	CAM_PROBE1(xpt, action, start_ccb);
2540 	start_ccb->ccb_h.status = CAM_REQ_INPROG;
2541 	(*(start_ccb->ccb_h.path->bus->xport->ops->action))(start_ccb);
2542 }
2543 
2544 void
xpt_action_default(union ccb * start_ccb)2545 xpt_action_default(union ccb *start_ccb)
2546 {
2547 	struct cam_path *path;
2548 	struct cam_sim *sim;
2549 	struct mtx *mtx;
2550 
2551 	path = start_ccb->ccb_h.path;
2552 	CAM_DEBUG(path, CAM_DEBUG_TRACE,
2553 	    ("xpt_action_default: func %#x %s\n", start_ccb->ccb_h.func_code,
2554 		xpt_action_name(start_ccb->ccb_h.func_code)));
2555 
2556 	switch (start_ccb->ccb_h.func_code) {
2557 	case XPT_SCSI_IO:
2558 	{
2559 		struct cam_ed *device;
2560 
2561 		/*
2562 		 * For the sake of compatibility with SCSI-1
2563 		 * devices that may not understand the identify
2564 		 * message, we include lun information in the
2565 		 * second byte of all commands.  SCSI-1 specifies
2566 		 * that luns are a 3 bit value and reserves only 3
2567 		 * bits for lun information in the CDB.  Later
2568 		 * revisions of the SCSI spec allow for more than 8
2569 		 * luns, but have deprecated lun information in the
2570 		 * CDB.  So, if the lun won't fit, we must omit.
2571 		 *
2572 		 * Also be aware that during initial probing for devices,
2573 		 * the inquiry information is unknown but initialized to 0.
2574 		 * This means that this code will be exercised while probing
2575 		 * devices with an ANSI revision greater than 2.
2576 		 */
2577 		device = path->device;
2578 		if (device->protocol_version <= SCSI_REV_2
2579 		 && start_ccb->ccb_h.target_lun < 8
2580 		 && (start_ccb->ccb_h.flags & CAM_CDB_POINTER) == 0) {
2581 			start_ccb->csio.cdb_io.cdb_bytes[1] |=
2582 			    start_ccb->ccb_h.target_lun << 5;
2583 		}
2584 		start_ccb->csio.scsi_status = SCSI_STATUS_OK;
2585 	}
2586 	/* FALLTHROUGH */
2587 	case XPT_TARGET_IO:
2588 	case XPT_CONT_TARGET_IO:
2589 		start_ccb->csio.sense_resid = 0;
2590 		start_ccb->csio.resid = 0;
2591 		/* FALLTHROUGH */
2592 	case XPT_ATA_IO:
2593 		if (start_ccb->ccb_h.func_code == XPT_ATA_IO)
2594 			start_ccb->ataio.resid = 0;
2595 		/* FALLTHROUGH */
2596 	case XPT_NVME_IO:
2597 	case XPT_NVME_ADMIN:
2598 	case XPT_MMC_IO:
2599 	case XPT_MMC_GET_TRAN_SETTINGS:
2600 	case XPT_MMC_SET_TRAN_SETTINGS:
2601 	case XPT_RESET_DEV:
2602 	case XPT_ENG_EXEC:
2603 	case XPT_SMP_IO:
2604 	{
2605 		struct cam_devq *devq;
2606 
2607 		devq = path->bus->sim->devq;
2608 		mtx_lock(&devq->send_mtx);
2609 		cam_ccbq_insert_ccb(&path->device->ccbq, start_ccb);
2610 		if (xpt_schedule_devq(devq, path->device) != 0)
2611 			xpt_run_devq(devq);
2612 		mtx_unlock(&devq->send_mtx);
2613 		break;
2614 	}
2615 	case XPT_CALC_GEOMETRY:
2616 		/* Filter out garbage */
2617 		if (start_ccb->ccg.block_size == 0
2618 		 || start_ccb->ccg.volume_size == 0) {
2619 			start_ccb->ccg.cylinders = 0;
2620 			start_ccb->ccg.heads = 0;
2621 			start_ccb->ccg.secs_per_track = 0;
2622 			start_ccb->ccb_h.status = CAM_REQ_CMP;
2623 			break;
2624 		}
2625 		goto call_sim;
2626 	case XPT_ABORT:
2627 	{
2628 		union ccb* abort_ccb;
2629 
2630 		abort_ccb = start_ccb->cab.abort_ccb;
2631 		if (XPT_FC_IS_DEV_QUEUED(abort_ccb)) {
2632 			struct cam_ed *device;
2633 			struct cam_devq *devq;
2634 
2635 			device = abort_ccb->ccb_h.path->device;
2636 			devq = device->sim->devq;
2637 
2638 			mtx_lock(&devq->send_mtx);
2639 			if (abort_ccb->ccb_h.pinfo.index > 0) {
2640 				cam_ccbq_remove_ccb(&device->ccbq, abort_ccb);
2641 				abort_ccb->ccb_h.status =
2642 				    CAM_REQ_ABORTED|CAM_DEV_QFRZN;
2643 				xpt_freeze_devq_device(device, 1);
2644 				mtx_unlock(&devq->send_mtx);
2645 				xpt_done(abort_ccb);
2646 				start_ccb->ccb_h.status = CAM_REQ_CMP;
2647 				break;
2648 			}
2649 			mtx_unlock(&devq->send_mtx);
2650 
2651 			if (abort_ccb->ccb_h.pinfo.index == CAM_UNQUEUED_INDEX
2652 			 && (abort_ccb->ccb_h.status & CAM_SIM_QUEUED) == 0) {
2653 				/*
2654 				 * We've caught this ccb en route to
2655 				 * the SIM.  Flag it for abort and the
2656 				 * SIM will do so just before starting
2657 				 * real work on the CCB.
2658 				 */
2659 				abort_ccb->ccb_h.status =
2660 				    CAM_REQ_ABORTED|CAM_DEV_QFRZN;
2661 				xpt_freeze_devq(abort_ccb->ccb_h.path, 1);
2662 				start_ccb->ccb_h.status = CAM_REQ_CMP;
2663 				break;
2664 			}
2665 		}
2666 		if (XPT_FC_IS_QUEUED(abort_ccb)
2667 		 && (abort_ccb->ccb_h.pinfo.index == CAM_DONEQ_INDEX)) {
2668 			/*
2669 			 * It's already completed but waiting
2670 			 * for our SWI to get to it.
2671 			 */
2672 			start_ccb->ccb_h.status = CAM_UA_ABORT;
2673 			break;
2674 		}
2675 		/*
2676 		 * If we weren't able to take care of the abort request
2677 		 * in the XPT, pass the request down to the SIM for processing.
2678 		 */
2679 	}
2680 	/* FALLTHROUGH */
2681 	case XPT_ACCEPT_TARGET_IO:
2682 	case XPT_EN_LUN:
2683 	case XPT_IMMED_NOTIFY:
2684 	case XPT_NOTIFY_ACK:
2685 	case XPT_RESET_BUS:
2686 	case XPT_IMMEDIATE_NOTIFY:
2687 	case XPT_NOTIFY_ACKNOWLEDGE:
2688 	case XPT_GET_SIM_KNOB_OLD:
2689 	case XPT_GET_SIM_KNOB:
2690 	case XPT_SET_SIM_KNOB:
2691 	case XPT_GET_TRAN_SETTINGS:
2692 	case XPT_SET_TRAN_SETTINGS:
2693 	case XPT_PATH_INQ:
2694 call_sim:
2695 		sim = path->bus->sim;
2696 		mtx = sim->mtx;
2697 		if (mtx && !mtx_owned(mtx))
2698 			mtx_lock(mtx);
2699 		else
2700 			mtx = NULL;
2701 
2702 		CAM_DEBUG(path, CAM_DEBUG_TRACE,
2703 		    ("Calling sim->sim_action(): func=%#x\n", start_ccb->ccb_h.func_code));
2704 		(*(sim->sim_action))(sim, start_ccb);
2705 		CAM_DEBUG(path, CAM_DEBUG_TRACE,
2706 		    ("sim->sim_action returned: status=%#x\n", start_ccb->ccb_h.status));
2707 		if (mtx)
2708 			mtx_unlock(mtx);
2709 		break;
2710 	case XPT_PATH_STATS:
2711 		start_ccb->cpis.last_reset = path->bus->last_reset;
2712 		start_ccb->ccb_h.status = CAM_REQ_CMP;
2713 		break;
2714 	case XPT_GDEV_TYPE:
2715 	{
2716 		struct cam_ed *dev;
2717 
2718 		dev = path->device;
2719 		if ((dev->flags & CAM_DEV_UNCONFIGURED) != 0) {
2720 			start_ccb->ccb_h.status = CAM_DEV_NOT_THERE;
2721 		} else {
2722 			struct ccb_getdev *cgd;
2723 
2724 			cgd = &start_ccb->cgd;
2725 			cgd->protocol = dev->protocol;
2726 			cgd->inq_data = dev->inq_data;
2727 			cgd->ident_data = dev->ident_data;
2728 			cgd->inq_flags = dev->inq_flags;
2729 			cgd->ccb_h.status = CAM_REQ_CMP;
2730 			cgd->serial_num_len = dev->serial_num_len;
2731 			if ((dev->serial_num_len > 0)
2732 			 && (dev->serial_num != NULL))
2733 				bcopy(dev->serial_num, cgd->serial_num,
2734 				      dev->serial_num_len);
2735 		}
2736 		break;
2737 	}
2738 	case XPT_GDEV_STATS:
2739 	{
2740 		struct ccb_getdevstats *cgds = &start_ccb->cgds;
2741 		struct cam_ed *dev = path->device;
2742 		struct cam_eb *bus = path->bus;
2743 		struct cam_et *tar = path->target;
2744 		struct cam_devq *devq = bus->sim->devq;
2745 
2746 		mtx_lock(&devq->send_mtx);
2747 		cgds->dev_openings = dev->ccbq.dev_openings;
2748 		cgds->dev_active = dev->ccbq.dev_active;
2749 		cgds->allocated = dev->ccbq.allocated;
2750 		cgds->queued = cam_ccbq_pending_ccb_count(&dev->ccbq);
2751 		cgds->held = cgds->allocated - cgds->dev_active - cgds->queued;
2752 		cgds->last_reset = tar->last_reset;
2753 		cgds->maxtags = dev->maxtags;
2754 		cgds->mintags = dev->mintags;
2755 		if (timevalcmp(&tar->last_reset, &bus->last_reset, <))
2756 			cgds->last_reset = bus->last_reset;
2757 		mtx_unlock(&devq->send_mtx);
2758 		cgds->ccb_h.status = CAM_REQ_CMP;
2759 		break;
2760 	}
2761 	case XPT_GDEVLIST:
2762 	{
2763 		struct cam_periph	*nperiph;
2764 		struct periph_list	*periph_head;
2765 		struct ccb_getdevlist	*cgdl;
2766 		u_int			i;
2767 		struct cam_ed		*device;
2768 		bool			found;
2769 
2770 		found = false;
2771 
2772 		/*
2773 		 * Don't want anyone mucking with our data.
2774 		 */
2775 		device = path->device;
2776 		periph_head = &device->periphs;
2777 		cgdl = &start_ccb->cgdl;
2778 
2779 		/*
2780 		 * Check and see if the list has changed since the user
2781 		 * last requested a list member.  If so, tell them that the
2782 		 * list has changed, and therefore they need to start over
2783 		 * from the beginning.
2784 		 */
2785 		if ((cgdl->index != 0) &&
2786 		    (cgdl->generation != device->generation)) {
2787 			cgdl->status = CAM_GDEVLIST_LIST_CHANGED;
2788 			break;
2789 		}
2790 
2791 		/*
2792 		 * Traverse the list of peripherals and attempt to find
2793 		 * the requested peripheral.
2794 		 */
2795 		for (nperiph = SLIST_FIRST(periph_head), i = 0;
2796 		     (nperiph != NULL) && (i <= cgdl->index);
2797 		     nperiph = SLIST_NEXT(nperiph, periph_links), i++) {
2798 			if (i == cgdl->index) {
2799 				strlcpy(cgdl->periph_name,
2800 					nperiph->periph_name,
2801 					sizeof(cgdl->periph_name));
2802 				cgdl->unit_number = nperiph->unit_number;
2803 				found = true;
2804 			}
2805 		}
2806 		if (!found) {
2807 			cgdl->status = CAM_GDEVLIST_ERROR;
2808 			break;
2809 		}
2810 
2811 		if (nperiph == NULL)
2812 			cgdl->status = CAM_GDEVLIST_LAST_DEVICE;
2813 		else
2814 			cgdl->status = CAM_GDEVLIST_MORE_DEVS;
2815 
2816 		cgdl->index++;
2817 		cgdl->generation = device->generation;
2818 
2819 		cgdl->ccb_h.status = CAM_REQ_CMP;
2820 		break;
2821 	}
2822 	case XPT_DEV_MATCH:
2823 	{
2824 		dev_pos_type position_type;
2825 		struct ccb_dev_match *cdm;
2826 
2827 		cdm = &start_ccb->cdm;
2828 
2829 		/*
2830 		 * There are two ways of getting at information in the EDT.
2831 		 * The first way is via the primary EDT tree.  It starts
2832 		 * with a list of buses, then a list of targets on a bus,
2833 		 * then devices/luns on a target, and then peripherals on a
2834 		 * device/lun.  The "other" way is by the peripheral driver
2835 		 * lists.  The peripheral driver lists are organized by
2836 		 * peripheral driver.  (obviously)  So it makes sense to
2837 		 * use the peripheral driver list if the user is looking
2838 		 * for something like "da1", or all "da" devices.  If the
2839 		 * user is looking for something on a particular bus/target
2840 		 * or lun, it's generally better to go through the EDT tree.
2841 		 */
2842 
2843 		if (cdm->pos.position_type != CAM_DEV_POS_NONE)
2844 			position_type = cdm->pos.position_type;
2845 		else {
2846 			u_int i;
2847 
2848 			position_type = CAM_DEV_POS_NONE;
2849 
2850 			for (i = 0; i < cdm->num_patterns; i++) {
2851 				if ((cdm->patterns[i].type == DEV_MATCH_BUS)
2852 				 ||(cdm->patterns[i].type == DEV_MATCH_DEVICE)){
2853 					position_type = CAM_DEV_POS_EDT;
2854 					break;
2855 				}
2856 			}
2857 
2858 			if (cdm->num_patterns == 0)
2859 				position_type = CAM_DEV_POS_EDT;
2860 			else if (position_type == CAM_DEV_POS_NONE)
2861 				position_type = CAM_DEV_POS_PDRV;
2862 		}
2863 
2864 		switch(position_type & CAM_DEV_POS_TYPEMASK) {
2865 		case CAM_DEV_POS_EDT:
2866 			xptedtmatch(cdm);
2867 			break;
2868 		case CAM_DEV_POS_PDRV:
2869 			xptperiphlistmatch(cdm);
2870 			break;
2871 		default:
2872 			cdm->status = CAM_DEV_MATCH_ERROR;
2873 			break;
2874 		}
2875 
2876 		if (cdm->status == CAM_DEV_MATCH_ERROR)
2877 			start_ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2878 		else
2879 			start_ccb->ccb_h.status = CAM_REQ_CMP;
2880 
2881 		break;
2882 	}
2883 	case XPT_SASYNC_CB:
2884 	{
2885 		struct ccb_setasync *csa;
2886 		struct async_node *cur_entry;
2887 		struct async_list *async_head;
2888 		uint32_t added;
2889 
2890 		csa = &start_ccb->csa;
2891 		added = csa->event_enable;
2892 		async_head = &path->device->asyncs;
2893 
2894 		/*
2895 		 * If there is already an entry for us, simply
2896 		 * update it.
2897 		 */
2898 		cur_entry = SLIST_FIRST(async_head);
2899 		while (cur_entry != NULL) {
2900 			if ((cur_entry->callback_arg == csa->callback_arg)
2901 			 && (cur_entry->callback == csa->callback))
2902 				break;
2903 			cur_entry = SLIST_NEXT(cur_entry, links);
2904 		}
2905 
2906 		if (cur_entry != NULL) {
2907 		 	/*
2908 			 * If the request has no flags set,
2909 			 * remove the entry.
2910 			 */
2911 			added &= ~cur_entry->event_enable;
2912 			if (csa->event_enable == 0) {
2913 				SLIST_REMOVE(async_head, cur_entry,
2914 					     async_node, links);
2915 				xpt_release_device(path->device);
2916 				free(cur_entry, M_CAMXPT);
2917 			} else {
2918 				cur_entry->event_enable = csa->event_enable;
2919 			}
2920 			csa->event_enable = added;
2921 		} else {
2922 			cur_entry = malloc(sizeof(*cur_entry), M_CAMXPT,
2923 					   M_NOWAIT);
2924 			if (cur_entry == NULL) {
2925 				csa->ccb_h.status = CAM_RESRC_UNAVAIL;
2926 				break;
2927 			}
2928 			cur_entry->event_enable = csa->event_enable;
2929 			cur_entry->event_lock = (path->bus->sim->mtx &&
2930 			    mtx_owned(path->bus->sim->mtx)) ? 1 : 0;
2931 			cur_entry->callback_arg = csa->callback_arg;
2932 			cur_entry->callback = csa->callback;
2933 			SLIST_INSERT_HEAD(async_head, cur_entry, links);
2934 			xpt_acquire_device(path->device);
2935 		}
2936 		start_ccb->ccb_h.status = CAM_REQ_CMP;
2937 		break;
2938 	}
2939 	case XPT_REL_SIMQ:
2940 	{
2941 		struct ccb_relsim *crs;
2942 		struct cam_ed *dev;
2943 
2944 		crs = &start_ccb->crs;
2945 		dev = path->device;
2946 		if (dev == NULL) {
2947 			crs->ccb_h.status = CAM_DEV_NOT_THERE;
2948 			break;
2949 		}
2950 
2951 		if ((crs->release_flags & RELSIM_ADJUST_OPENINGS) != 0) {
2952 			/* Don't ever go below one opening */
2953 			if (crs->openings > 0) {
2954 				xpt_dev_ccbq_resize(path, crs->openings);
2955 				if (bootverbose) {
2956 					xpt_print(path,
2957 					    "number of openings is now %d\n",
2958 					    crs->openings);
2959 				}
2960 			}
2961 		}
2962 
2963 		mtx_lock(&dev->sim->devq->send_mtx);
2964 		if ((crs->release_flags & RELSIM_RELEASE_AFTER_TIMEOUT) != 0) {
2965 			if ((dev->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) {
2966 				/*
2967 				 * Just extend the old timeout and decrement
2968 				 * the freeze count so that a single timeout
2969 				 * is sufficient for releasing the queue.
2970 				 */
2971 				start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
2972 				callout_stop(&dev->callout);
2973 			} else {
2974 				start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
2975 			}
2976 
2977 			callout_reset_sbt(&dev->callout,
2978 			    SBT_1MS * crs->release_timeout, SBT_1MS,
2979 			    xpt_release_devq_timeout, dev, 0);
2980 
2981 			dev->flags |= CAM_DEV_REL_TIMEOUT_PENDING;
2982 		}
2983 
2984 		if ((crs->release_flags & RELSIM_RELEASE_AFTER_CMDCMPLT) != 0) {
2985 			if ((dev->flags & CAM_DEV_REL_ON_COMPLETE) != 0) {
2986 				/*
2987 				 * Decrement the freeze count so that a single
2988 				 * completion is still sufficient to unfreeze
2989 				 * the queue.
2990 				 */
2991 				start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
2992 			} else {
2993 				dev->flags |= CAM_DEV_REL_ON_COMPLETE;
2994 				start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
2995 			}
2996 		}
2997 
2998 		if ((crs->release_flags & RELSIM_RELEASE_AFTER_QEMPTY) != 0) {
2999 			if ((dev->flags & CAM_DEV_REL_ON_QUEUE_EMPTY) != 0
3000 			 || (dev->ccbq.dev_active == 0)) {
3001 				start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
3002 			} else {
3003 				dev->flags |= CAM_DEV_REL_ON_QUEUE_EMPTY;
3004 				start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
3005 			}
3006 		}
3007 		mtx_unlock(&dev->sim->devq->send_mtx);
3008 
3009 		if ((start_ccb->ccb_h.flags & CAM_DEV_QFREEZE) == 0)
3010 			xpt_release_devq(path, /*count*/1, /*run_queue*/TRUE);
3011 		start_ccb->crs.qfrozen_cnt = dev->ccbq.queue.qfrozen_cnt;
3012 		start_ccb->ccb_h.status = CAM_REQ_CMP;
3013 		break;
3014 	}
3015 	case XPT_DEBUG: {
3016 		struct cam_path *oldpath;
3017 
3018 		/* Check that all request bits are supported. */
3019 		if (start_ccb->cdbg.flags & ~(CAM_DEBUG_COMPILE)) {
3020 			start_ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
3021 			break;
3022 		}
3023 
3024 		cam_dflags = CAM_DEBUG_NONE;
3025 		if (cam_dpath != NULL) {
3026 			oldpath = cam_dpath;
3027 			cam_dpath = NULL;
3028 			xpt_free_path(oldpath);
3029 		}
3030 		if (start_ccb->cdbg.flags != CAM_DEBUG_NONE) {
3031 			if (xpt_create_path(&cam_dpath, NULL,
3032 					    start_ccb->ccb_h.path_id,
3033 					    start_ccb->ccb_h.target_id,
3034 					    start_ccb->ccb_h.target_lun) !=
3035 					    CAM_REQ_CMP) {
3036 				start_ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
3037 			} else {
3038 				cam_dflags = start_ccb->cdbg.flags;
3039 				start_ccb->ccb_h.status = CAM_REQ_CMP;
3040 				xpt_print(cam_dpath, "debugging flags now %x\n",
3041 				    cam_dflags);
3042 			}
3043 		} else
3044 			start_ccb->ccb_h.status = CAM_REQ_CMP;
3045 		break;
3046 	}
3047 	case XPT_NOOP:
3048 		if ((start_ccb->ccb_h.flags & CAM_DEV_QFREEZE) != 0)
3049 			xpt_freeze_devq(path, 1);
3050 		start_ccb->ccb_h.status = CAM_REQ_CMP;
3051 		break;
3052 	case XPT_REPROBE_LUN:
3053 		xpt_async(AC_INQ_CHANGED, path, NULL);
3054 		start_ccb->ccb_h.status = CAM_REQ_CMP;
3055 		xpt_done(start_ccb);
3056 		break;
3057 	case XPT_ASYNC:
3058 		/*
3059 		 * Queue the async operation so it can be run from a sleepable
3060 		 * context.
3061 		 */
3062 		start_ccb->ccb_h.status = CAM_REQ_CMP;
3063 		mtx_lock(&cam_async.cam_doneq_mtx);
3064 		STAILQ_INSERT_TAIL(&cam_async.cam_doneq, &start_ccb->ccb_h, sim_links.stqe);
3065 		start_ccb->ccb_h.pinfo.index = CAM_ASYNC_INDEX;
3066 		mtx_unlock(&cam_async.cam_doneq_mtx);
3067 		wakeup(&cam_async.cam_doneq);
3068 		break;
3069 	default:
3070 	case XPT_SDEV_TYPE:
3071 	case XPT_TERM_IO:
3072 	case XPT_ENG_INQ:
3073 		/* XXX Implement */
3074 		xpt_print(start_ccb->ccb_h.path,
3075 		    "%s: CCB type %#x %s not supported\n", __func__,
3076 		    start_ccb->ccb_h.func_code,
3077 		    xpt_action_name(start_ccb->ccb_h.func_code));
3078 		start_ccb->ccb_h.status = CAM_PROVIDE_FAIL;
3079 		if (start_ccb->ccb_h.func_code & XPT_FC_DEV_QUEUED) {
3080 			xpt_done(start_ccb);
3081 		}
3082 		break;
3083 	}
3084 	CAM_DEBUG(path, CAM_DEBUG_TRACE,
3085 	    ("xpt_action_default: func= %#x %s status %#x\n",
3086 		start_ccb->ccb_h.func_code,
3087  		xpt_action_name(start_ccb->ccb_h.func_code),
3088 		start_ccb->ccb_h.status));
3089 }
3090 
3091 /*
3092  * Call the sim poll routine to allow the sim to complete
3093  * any inflight requests, then call camisr_runqueue to
3094  * complete any CCB that the polling completed.
3095  */
3096 void
xpt_sim_poll(struct cam_sim * sim)3097 xpt_sim_poll(struct cam_sim *sim)
3098 {
3099 	struct mtx *mtx;
3100 
3101 	KASSERT(cam_sim_pollable(sim), ("%s: non-pollable sim", __func__));
3102 	mtx = sim->mtx;
3103 	if (mtx)
3104 		mtx_lock(mtx);
3105 	(*(sim->sim_poll))(sim);
3106 	if (mtx)
3107 		mtx_unlock(mtx);
3108 	camisr_runqueue();
3109 }
3110 
3111 uint32_t
xpt_poll_setup(union ccb * start_ccb)3112 xpt_poll_setup(union ccb *start_ccb)
3113 {
3114 	uint32_t timeout;
3115 	struct	  cam_sim *sim;
3116 	struct	  cam_devq *devq;
3117 	struct	  cam_ed *dev;
3118 
3119 	timeout = start_ccb->ccb_h.timeout * 10;
3120 	sim = start_ccb->ccb_h.path->bus->sim;
3121 	devq = sim->devq;
3122 	dev = start_ccb->ccb_h.path->device;
3123 
3124 	KASSERT(cam_sim_pollable(sim), ("%s: non-pollable sim", __func__));
3125 
3126 	/*
3127 	 * Steal an opening so that no other queued requests
3128 	 * can get it before us while we simulate interrupts.
3129 	 */
3130 	mtx_lock(&devq->send_mtx);
3131 	dev->ccbq.dev_openings--;
3132 	while((devq->send_openings <= 0 || dev->ccbq.dev_openings < 0) &&
3133 	    (--timeout > 0)) {
3134 		mtx_unlock(&devq->send_mtx);
3135 		DELAY(100);
3136 		xpt_sim_poll(sim);
3137 		mtx_lock(&devq->send_mtx);
3138 	}
3139 	dev->ccbq.dev_openings++;
3140 	mtx_unlock(&devq->send_mtx);
3141 
3142 	return (timeout);
3143 }
3144 
3145 void
xpt_pollwait(union ccb * start_ccb,uint32_t timeout)3146 xpt_pollwait(union ccb *start_ccb, uint32_t timeout)
3147 {
3148 
3149 	KASSERT(cam_sim_pollable(start_ccb->ccb_h.path->bus->sim),
3150 	    ("%s: non-pollable sim", __func__));
3151 	while (--timeout > 0) {
3152 		xpt_sim_poll(start_ccb->ccb_h.path->bus->sim);
3153 		if ((start_ccb->ccb_h.status & CAM_STATUS_MASK)
3154 		    != CAM_REQ_INPROG)
3155 			break;
3156 		DELAY(100);
3157 	}
3158 
3159 	if (timeout == 0) {
3160 		/*
3161 		 * XXX Is it worth adding a sim_timeout entry
3162 		 * point so we can attempt recovery?  If
3163 		 * this is only used for dumps, I don't think
3164 		 * it is.
3165 		 */
3166 		start_ccb->ccb_h.status = CAM_CMD_TIMEOUT;
3167 	}
3168 }
3169 
3170 /*
3171  * Schedule a peripheral driver to receive a ccb when its
3172  * target device has space for more transactions.
3173  */
3174 void
xpt_schedule(struct cam_periph * periph,uint32_t new_priority)3175 xpt_schedule(struct cam_periph *periph, uint32_t new_priority)
3176 {
3177 
3178 	CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("xpt_schedule\n"));
3179 	cam_periph_assert(periph, MA_OWNED);
3180 	if (new_priority < periph->scheduled_priority) {
3181 		periph->scheduled_priority = new_priority;
3182 		xpt_run_allocq(periph, 0);
3183 	}
3184 }
3185 
3186 /*
3187  * Schedule a device to run on a given queue.
3188  * If the device was inserted as a new entry on the queue,
3189  * return 1 meaning the device queue should be run. If we
3190  * were already queued, implying someone else has already
3191  * started the queue, return 0 so the caller doesn't attempt
3192  * to run the queue.
3193  */
3194 static int
xpt_schedule_dev(struct camq * queue,cam_pinfo * pinfo,uint32_t new_priority)3195 xpt_schedule_dev(struct camq *queue, cam_pinfo *pinfo,
3196 		 uint32_t new_priority)
3197 {
3198 	int retval;
3199 	uint32_t old_priority;
3200 
3201 	CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_schedule_dev\n"));
3202 
3203 	old_priority = pinfo->priority;
3204 
3205 	/*
3206 	 * Are we already queued?
3207 	 */
3208 	if (pinfo->index != CAM_UNQUEUED_INDEX) {
3209 		/* Simply reorder based on new priority */
3210 		if (new_priority < old_priority) {
3211 			camq_change_priority(queue, pinfo->index,
3212 					     new_priority);
3213 			CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3214 					("changed priority to %d\n",
3215 					 new_priority));
3216 			retval = 1;
3217 		} else
3218 			retval = 0;
3219 	} else {
3220 		/* New entry on the queue */
3221 		if (new_priority < old_priority)
3222 			pinfo->priority = new_priority;
3223 
3224 		CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3225 				("Inserting onto queue\n"));
3226 		pinfo->generation = ++queue->generation;
3227 		camq_insert(queue, pinfo);
3228 		retval = 1;
3229 	}
3230 	return (retval);
3231 }
3232 
3233 static void
xpt_run_allocq_task(void * context,int pending)3234 xpt_run_allocq_task(void *context, int pending)
3235 {
3236 	struct cam_periph *periph = context;
3237 
3238 	cam_periph_lock(periph);
3239 	periph->flags &= ~CAM_PERIPH_RUN_TASK;
3240 	xpt_run_allocq(periph, 1);
3241 	cam_periph_unlock(periph);
3242 	cam_periph_release(periph);
3243 }
3244 
3245 static void
xpt_run_allocq(struct cam_periph * periph,int sleep)3246 xpt_run_allocq(struct cam_periph *periph, int sleep)
3247 {
3248 	struct cam_ed	*device;
3249 	union ccb	*ccb;
3250 	uint32_t	 prio;
3251 
3252 	cam_periph_assert(periph, MA_OWNED);
3253 	if (periph->periph_allocating)
3254 		return;
3255 	cam_periph_doacquire(periph);
3256 	periph->periph_allocating = 1;
3257 	CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_allocq(%p)\n", periph));
3258 	device = periph->path->device;
3259 	ccb = NULL;
3260 restart:
3261 	while ((prio = min(periph->scheduled_priority,
3262 	    periph->immediate_priority)) != CAM_PRIORITY_NONE &&
3263 	    (periph->periph_allocated - (ccb != NULL ? 1 : 0) <
3264 	     device->ccbq.total_openings || prio <= CAM_PRIORITY_OOB)) {
3265 		if (ccb == NULL &&
3266 		    (ccb = xpt_get_ccb_nowait(periph)) == NULL) {
3267 			if (sleep) {
3268 				ccb = xpt_get_ccb(periph);
3269 				goto restart;
3270 			}
3271 			if (periph->flags & CAM_PERIPH_RUN_TASK)
3272 				break;
3273 			cam_periph_doacquire(periph);
3274 			periph->flags |= CAM_PERIPH_RUN_TASK;
3275 			taskqueue_enqueue(xsoftc.xpt_taskq,
3276 			    &periph->periph_run_task);
3277 			break;
3278 		}
3279 		xpt_setup_ccb(&ccb->ccb_h, periph->path, prio);
3280 		if (prio == periph->immediate_priority) {
3281 			periph->immediate_priority = CAM_PRIORITY_NONE;
3282 			CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3283 					("waking cam_periph_getccb()\n"));
3284 			SLIST_INSERT_HEAD(&periph->ccb_list, &ccb->ccb_h,
3285 					  periph_links.sle);
3286 			wakeup(&periph->ccb_list);
3287 		} else {
3288 			periph->scheduled_priority = CAM_PRIORITY_NONE;
3289 			CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3290 					("calling periph_start()\n"));
3291 			periph->periph_start(periph, ccb);
3292 		}
3293 		ccb = NULL;
3294 	}
3295 	if (ccb != NULL)
3296 		xpt_release_ccb(ccb);
3297 	periph->periph_allocating = 0;
3298 	cam_periph_release_locked(periph);
3299 }
3300 
3301 static void
xpt_run_devq(struct cam_devq * devq)3302 xpt_run_devq(struct cam_devq *devq)
3303 {
3304 	struct mtx *mtx;
3305 
3306 	CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_devq\n"));
3307 
3308 	devq->send_queue.qfrozen_cnt++;
3309 	while ((devq->send_queue.entries > 0)
3310 	    && (devq->send_openings > 0)
3311 	    && (devq->send_queue.qfrozen_cnt <= 1)) {
3312 		struct	cam_ed *device;
3313 		union ccb *work_ccb;
3314 		struct	cam_sim *sim;
3315 		struct xpt_proto *proto;
3316 
3317 		device = (struct cam_ed *)camq_remove(&devq->send_queue,
3318 							   CAMQ_HEAD);
3319 		CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3320 				("running device %p\n", device));
3321 
3322 		work_ccb = cam_ccbq_peek_ccb(&device->ccbq, CAMQ_HEAD);
3323 		if (work_ccb == NULL) {
3324 			printf("device on run queue with no ccbs???\n");
3325 			continue;
3326 		}
3327 
3328 		if ((work_ccb->ccb_h.flags & CAM_HIGH_POWER) != 0) {
3329 			mtx_lock(&xsoftc.xpt_highpower_lock);
3330 		 	if (xsoftc.num_highpower <= 0) {
3331 				/*
3332 				 * We got a high power command, but we
3333 				 * don't have any available slots.  Freeze
3334 				 * the device queue until we have a slot
3335 				 * available.
3336 				 */
3337 				xpt_freeze_devq_device(device, 1);
3338 				STAILQ_INSERT_TAIL(&xsoftc.highpowerq, device,
3339 						   highpowerq_entry);
3340 
3341 				mtx_unlock(&xsoftc.xpt_highpower_lock);
3342 				continue;
3343 			} else {
3344 				/*
3345 				 * Consume a high power slot while
3346 				 * this ccb runs.
3347 				 */
3348 				xsoftc.num_highpower--;
3349 			}
3350 			mtx_unlock(&xsoftc.xpt_highpower_lock);
3351 		}
3352 		cam_ccbq_remove_ccb(&device->ccbq, work_ccb);
3353 		cam_ccbq_send_ccb(&device->ccbq, work_ccb);
3354 		devq->send_openings--;
3355 		devq->send_active++;
3356 		xpt_schedule_devq(devq, device);
3357 		mtx_unlock(&devq->send_mtx);
3358 
3359 		if ((work_ccb->ccb_h.flags & CAM_DEV_QFREEZE) != 0) {
3360 			/*
3361 			 * The client wants to freeze the queue
3362 			 * after this CCB is sent.
3363 			 */
3364 			xpt_freeze_devq(work_ccb->ccb_h.path, 1);
3365 		}
3366 
3367 		/* In Target mode, the peripheral driver knows best... */
3368 		if (work_ccb->ccb_h.func_code == XPT_SCSI_IO) {
3369 			if ((device->inq_flags & SID_CmdQue) != 0
3370 			 && work_ccb->csio.tag_action != CAM_TAG_ACTION_NONE)
3371 				work_ccb->ccb_h.flags |= CAM_TAG_ACTION_VALID;
3372 			else
3373 				/*
3374 				 * Clear this in case of a retried CCB that
3375 				 * failed due to a rejected tag.
3376 				 */
3377 				work_ccb->ccb_h.flags &= ~CAM_TAG_ACTION_VALID;
3378 		}
3379 
3380 		KASSERT(device == work_ccb->ccb_h.path->device,
3381 		    ("device (%p) / path->device (%p) mismatch",
3382 			device, work_ccb->ccb_h.path->device));
3383 		proto = xpt_proto_find(device->protocol);
3384 		if (proto && proto->ops->debug_out)
3385 			proto->ops->debug_out(work_ccb);
3386 
3387 		/*
3388 		 * Device queues can be shared among multiple SIM instances
3389 		 * that reside on different buses.  Use the SIM from the
3390 		 * queued device, rather than the one from the calling bus.
3391 		 */
3392 		sim = device->sim;
3393 		mtx = sim->mtx;
3394 		if (mtx && !mtx_owned(mtx))
3395 			mtx_lock(mtx);
3396 		else
3397 			mtx = NULL;
3398 		work_ccb->ccb_h.qos.periph_data = cam_iosched_now();
3399 		(*(sim->sim_action))(sim, work_ccb);
3400 		if (mtx)
3401 			mtx_unlock(mtx);
3402 		mtx_lock(&devq->send_mtx);
3403 	}
3404 	devq->send_queue.qfrozen_cnt--;
3405 }
3406 
3407 /*
3408  * This function merges stuff from the src ccb into the dst ccb, while keeping
3409  * important fields in the dst ccb constant.
3410  */
3411 void
xpt_merge_ccb(union ccb * dst_ccb,union ccb * src_ccb)3412 xpt_merge_ccb(union ccb *dst_ccb, union ccb *src_ccb)
3413 {
3414 
3415 	/*
3416 	 * Pull fields that are valid for peripheral drivers to set
3417 	 * into the dst CCB along with the CCB "payload".
3418 	 */
3419 	dst_ccb->ccb_h.retry_count = src_ccb->ccb_h.retry_count;
3420 	dst_ccb->ccb_h.func_code = src_ccb->ccb_h.func_code;
3421 	dst_ccb->ccb_h.timeout = src_ccb->ccb_h.timeout;
3422 	dst_ccb->ccb_h.flags = src_ccb->ccb_h.flags;
3423 	bcopy(&(&src_ccb->ccb_h)[1], &(&dst_ccb->ccb_h)[1],
3424 	      sizeof(union ccb) - sizeof(struct ccb_hdr));
3425 }
3426 
3427 void
xpt_setup_ccb_flags(struct ccb_hdr * ccb_h,struct cam_path * path,uint32_t priority,uint32_t flags)3428 xpt_setup_ccb_flags(struct ccb_hdr *ccb_h, struct cam_path *path,
3429 		    uint32_t priority, uint32_t flags)
3430 {
3431 
3432 	CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_setup_ccb\n"));
3433 	ccb_h->pinfo.priority = priority;
3434 	ccb_h->path = path;
3435 	ccb_h->path_id = path->bus->path_id;
3436 	if (path->target)
3437 		ccb_h->target_id = path->target->target_id;
3438 	else
3439 		ccb_h->target_id = CAM_TARGET_WILDCARD;
3440 	if (path->device) {
3441 		ccb_h->target_lun = path->device->lun_id;
3442 		ccb_h->pinfo.generation = ++path->device->ccbq.queue.generation;
3443 	} else {
3444 		ccb_h->target_lun = CAM_TARGET_WILDCARD;
3445 	}
3446 	ccb_h->pinfo.index = CAM_UNQUEUED_INDEX;
3447 	ccb_h->flags = flags;
3448 	ccb_h->xflags = 0;
3449 }
3450 
3451 void
xpt_setup_ccb(struct ccb_hdr * ccb_h,struct cam_path * path,uint32_t priority)3452 xpt_setup_ccb(struct ccb_hdr *ccb_h, struct cam_path *path, uint32_t priority)
3453 {
3454 	xpt_setup_ccb_flags(ccb_h, path, priority, /*flags*/ 0);
3455 }
3456 
3457 /* Path manipulation functions */
3458 cam_status
xpt_create_path(struct cam_path ** new_path_ptr,struct cam_periph * perph,path_id_t path_id,target_id_t target_id,lun_id_t lun_id)3459 xpt_create_path(struct cam_path **new_path_ptr, struct cam_periph *perph,
3460 		path_id_t path_id, target_id_t target_id, lun_id_t lun_id)
3461 {
3462 	struct	   cam_path *path;
3463 	cam_status status;
3464 
3465 	path = (struct cam_path *)malloc(sizeof(*path), M_CAMPATH, M_NOWAIT);
3466 
3467 	if (path == NULL) {
3468 		status = CAM_RESRC_UNAVAIL;
3469 		return(status);
3470 	}
3471 	status = xpt_compile_path(path, perph, path_id, target_id, lun_id);
3472 	if (status != CAM_REQ_CMP) {
3473 		free(path, M_CAMPATH);
3474 		path = NULL;
3475 	}
3476 	*new_path_ptr = path;
3477 	return (status);
3478 }
3479 
3480 cam_status
xpt_create_path_unlocked(struct cam_path ** new_path_ptr,struct cam_periph * periph,path_id_t path_id,target_id_t target_id,lun_id_t lun_id)3481 xpt_create_path_unlocked(struct cam_path **new_path_ptr,
3482 			 struct cam_periph *periph, path_id_t path_id,
3483 			 target_id_t target_id, lun_id_t lun_id)
3484 {
3485 
3486 	return (xpt_create_path(new_path_ptr, periph, path_id, target_id,
3487 	    lun_id));
3488 }
3489 
3490 cam_status
xpt_compile_path(struct cam_path * new_path,struct cam_periph * perph,path_id_t path_id,target_id_t target_id,lun_id_t lun_id)3491 xpt_compile_path(struct cam_path *new_path, struct cam_periph *perph,
3492 		 path_id_t path_id, target_id_t target_id, lun_id_t lun_id)
3493 {
3494 	struct	     cam_eb *bus;
3495 	struct	     cam_et *target;
3496 	struct	     cam_ed *device;
3497 	cam_status   status;
3498 
3499 	status = CAM_REQ_CMP;	/* Completed without error */
3500 	target = NULL;		/* Wildcarded */
3501 	device = NULL;		/* Wildcarded */
3502 
3503 	/*
3504 	 * We will potentially modify the EDT, so block interrupts
3505 	 * that may attempt to create cam paths.
3506 	 */
3507 	bus = xpt_find_bus(path_id);
3508 	if (bus == NULL) {
3509 		status = CAM_PATH_INVALID;
3510 	} else {
3511 		xpt_lock_buses();
3512 		mtx_lock(&bus->eb_mtx);
3513 		target = xpt_find_target(bus, target_id);
3514 		if (target == NULL) {
3515 			/* Create one */
3516 			struct cam_et *new_target;
3517 
3518 			new_target = xpt_alloc_target(bus, target_id);
3519 			if (new_target == NULL) {
3520 				status = CAM_RESRC_UNAVAIL;
3521 			} else {
3522 				target = new_target;
3523 			}
3524 		}
3525 		xpt_unlock_buses();
3526 		if (target != NULL) {
3527 			device = xpt_find_device(target, lun_id);
3528 			if (device == NULL) {
3529 				/* Create one */
3530 				struct cam_ed *new_device;
3531 
3532 				new_device =
3533 				    (*(bus->xport->ops->alloc_device))(bus,
3534 								       target,
3535 								       lun_id);
3536 				if (new_device == NULL) {
3537 					status = CAM_RESRC_UNAVAIL;
3538 				} else {
3539 					device = new_device;
3540 				}
3541 			}
3542 		}
3543 		mtx_unlock(&bus->eb_mtx);
3544 	}
3545 
3546 	/*
3547 	 * Only touch the user's data if we are successful.
3548 	 */
3549 	if (status == CAM_REQ_CMP) {
3550 		new_path->periph = perph;
3551 		new_path->bus = bus;
3552 		new_path->target = target;
3553 		new_path->device = device;
3554 		CAM_DEBUG(new_path, CAM_DEBUG_TRACE, ("xpt_compile_path\n"));
3555 	} else {
3556 		if (device != NULL)
3557 			xpt_release_device(device);
3558 		if (target != NULL)
3559 			xpt_release_target(target);
3560 		if (bus != NULL)
3561 			xpt_release_bus(bus);
3562 	}
3563 	return (status);
3564 }
3565 
3566 int
xpt_clone_path(struct cam_path ** new_path_ptr,struct cam_path * path)3567 xpt_clone_path(struct cam_path **new_path_ptr, struct cam_path *path)
3568 {
3569 	struct	   cam_path *new_path;
3570 
3571 	new_path = (struct cam_path *)malloc(sizeof(*path), M_CAMPATH, M_NOWAIT);
3572 	if (new_path == NULL)
3573 		return (ENOMEM);
3574 	*new_path = *path;
3575 	if (path->bus != NULL)
3576 		xpt_acquire_bus(path->bus);
3577 	if (path->target != NULL)
3578 		xpt_acquire_target(path->target);
3579 	if (path->device != NULL)
3580 		xpt_acquire_device(path->device);
3581 	*new_path_ptr = new_path;
3582 	return (0);
3583 }
3584 
3585 void
xpt_release_path(struct cam_path * path)3586 xpt_release_path(struct cam_path *path)
3587 {
3588 	CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_release_path\n"));
3589 	if (path->device != NULL) {
3590 		xpt_release_device(path->device);
3591 		path->device = NULL;
3592 	}
3593 	if (path->target != NULL) {
3594 		xpt_release_target(path->target);
3595 		path->target = NULL;
3596 	}
3597 	if (path->bus != NULL) {
3598 		xpt_release_bus(path->bus);
3599 		path->bus = NULL;
3600 	}
3601 }
3602 
3603 void
xpt_free_path(struct cam_path * path)3604 xpt_free_path(struct cam_path *path)
3605 {
3606 
3607 	CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_free_path\n"));
3608 	xpt_release_path(path);
3609 	free(path, M_CAMPATH);
3610 }
3611 
3612 void
xpt_path_counts(struct cam_path * path,uint32_t * bus_ref,uint32_t * periph_ref,uint32_t * target_ref,uint32_t * device_ref)3613 xpt_path_counts(struct cam_path *path, uint32_t *bus_ref,
3614     uint32_t *periph_ref, uint32_t *target_ref, uint32_t *device_ref)
3615 {
3616 
3617 	xpt_lock_buses();
3618 	if (bus_ref) {
3619 		if (path->bus)
3620 			*bus_ref = path->bus->refcount;
3621 		else
3622 			*bus_ref = 0;
3623 	}
3624 	if (periph_ref) {
3625 		if (path->periph)
3626 			*periph_ref = path->periph->refcount;
3627 		else
3628 			*periph_ref = 0;
3629 	}
3630 	xpt_unlock_buses();
3631 	if (target_ref) {
3632 		if (path->target)
3633 			*target_ref = path->target->refcount;
3634 		else
3635 			*target_ref = 0;
3636 	}
3637 	if (device_ref) {
3638 		if (path->device)
3639 			*device_ref = path->device->refcount;
3640 		else
3641 			*device_ref = 0;
3642 	}
3643 }
3644 
3645 /*
3646  * Return -1 for failure, 0 for exact match, 1 for match with wildcards
3647  * in path1, 2 for match with wildcards in path2.
3648  */
3649 int
xpt_path_comp(struct cam_path * path1,struct cam_path * path2)3650 xpt_path_comp(struct cam_path *path1, struct cam_path *path2)
3651 {
3652 	int retval = 0;
3653 
3654 	if (path1->bus != path2->bus) {
3655 		if (path1->bus->path_id == CAM_BUS_WILDCARD)
3656 			retval = 1;
3657 		else if (path2->bus->path_id == CAM_BUS_WILDCARD)
3658 			retval = 2;
3659 		else
3660 			return (-1);
3661 	}
3662 	if (path1->target != path2->target) {
3663 		if (path1->target->target_id == CAM_TARGET_WILDCARD) {
3664 			if (retval == 0)
3665 				retval = 1;
3666 		} else if (path2->target->target_id == CAM_TARGET_WILDCARD)
3667 			retval = 2;
3668 		else
3669 			return (-1);
3670 	}
3671 	if (path1->device != path2->device) {
3672 		if (path1->device->lun_id == CAM_LUN_WILDCARD) {
3673 			if (retval == 0)
3674 				retval = 1;
3675 		} else if (path2->device->lun_id == CAM_LUN_WILDCARD)
3676 			retval = 2;
3677 		else
3678 			return (-1);
3679 	}
3680 	return (retval);
3681 }
3682 
3683 int
xpt_path_comp_dev(struct cam_path * path,struct cam_ed * dev)3684 xpt_path_comp_dev(struct cam_path *path, struct cam_ed *dev)
3685 {
3686 	int retval = 0;
3687 
3688 	if (path->bus != dev->target->bus) {
3689 		if (path->bus->path_id == CAM_BUS_WILDCARD)
3690 			retval = 1;
3691 		else if (dev->target->bus->path_id == CAM_BUS_WILDCARD)
3692 			retval = 2;
3693 		else
3694 			return (-1);
3695 	}
3696 	if (path->target != dev->target) {
3697 		if (path->target->target_id == CAM_TARGET_WILDCARD) {
3698 			if (retval == 0)
3699 				retval = 1;
3700 		} else if (dev->target->target_id == CAM_TARGET_WILDCARD)
3701 			retval = 2;
3702 		else
3703 			return (-1);
3704 	}
3705 	if (path->device != dev) {
3706 		if (path->device->lun_id == CAM_LUN_WILDCARD) {
3707 			if (retval == 0)
3708 				retval = 1;
3709 		} else if (dev->lun_id == CAM_LUN_WILDCARD)
3710 			retval = 2;
3711 		else
3712 			return (-1);
3713 	}
3714 	return (retval);
3715 }
3716 
3717 void
xpt_print_path(struct cam_path * path)3718 xpt_print_path(struct cam_path *path)
3719 {
3720 	struct sbuf sb;
3721 	char buffer[XPT_PRINT_LEN];
3722 
3723 	sbuf_new(&sb, buffer, XPT_PRINT_LEN, SBUF_FIXEDLEN);
3724 	xpt_path_sbuf(path, &sb);
3725 	sbuf_finish(&sb);
3726 	printf("%s", sbuf_data(&sb));
3727 	sbuf_delete(&sb);
3728 }
3729 
3730 static void
xpt_device_sbuf(struct cam_ed * device,struct sbuf * sb)3731 xpt_device_sbuf(struct cam_ed *device, struct sbuf *sb)
3732 {
3733 	if (device == NULL)
3734 		sbuf_cat(sb, "(nopath): ");
3735 	else {
3736 		sbuf_printf(sb, "(noperiph:%s%d:%d:%d:%jx): ",
3737 		    device->sim->sim_name,
3738 		    device->sim->unit_number,
3739 		    device->sim->bus_id,
3740 		    device->target->target_id,
3741 		    (uintmax_t)device->lun_id);
3742 	}
3743 }
3744 
3745 void
xpt_print(struct cam_path * path,const char * fmt,...)3746 xpt_print(struct cam_path *path, const char *fmt, ...)
3747 {
3748 	va_list ap;
3749 	struct sbuf sb;
3750 	char buffer[XPT_PRINT_LEN];
3751 
3752 	sbuf_new(&sb, buffer, XPT_PRINT_LEN, SBUF_FIXEDLEN);
3753 
3754 	xpt_path_sbuf(path, &sb);
3755 	va_start(ap, fmt);
3756 	sbuf_vprintf(&sb, fmt, ap);
3757 	va_end(ap);
3758 
3759 	sbuf_finish(&sb);
3760 	printf("%s", sbuf_data(&sb));
3761 	sbuf_delete(&sb);
3762 }
3763 
3764 char *
xpt_path_string(struct cam_path * path,char * str,size_t str_len)3765 xpt_path_string(struct cam_path *path, char *str, size_t str_len)
3766 {
3767 	struct sbuf sb;
3768 
3769 	sbuf_new(&sb, str, str_len, 0);
3770 	xpt_path_sbuf(path, &sb);
3771 	sbuf_finish(&sb);
3772 	return (str);
3773 }
3774 
3775 void
xpt_path_sbuf(struct cam_path * path,struct sbuf * sb)3776 xpt_path_sbuf(struct cam_path *path, struct sbuf *sb)
3777 {
3778 
3779 	if (path == NULL)
3780 		sbuf_cat(sb, "(nopath): ");
3781 	else {
3782 		if (path->periph != NULL)
3783 			sbuf_printf(sb, "(%s%d:", path->periph->periph_name,
3784 				    path->periph->unit_number);
3785 		else
3786 			sbuf_cat(sb, "(noperiph:");
3787 
3788 		if (path->bus != NULL)
3789 			sbuf_printf(sb, "%s%d:%d:", path->bus->sim->sim_name,
3790 				    path->bus->sim->unit_number,
3791 				    path->bus->sim->bus_id);
3792 		else
3793 			sbuf_cat(sb, "nobus:");
3794 
3795 		if (path->target != NULL)
3796 			sbuf_printf(sb, "%d:", path->target->target_id);
3797 		else
3798 			sbuf_cat(sb, "X:");
3799 
3800 		if (path->device != NULL)
3801 			sbuf_printf(sb, "%jx): ",
3802 			    (uintmax_t)path->device->lun_id);
3803 		else
3804 			sbuf_cat(sb, "X): ");
3805 	}
3806 }
3807 
3808 path_id_t
xpt_path_path_id(struct cam_path * path)3809 xpt_path_path_id(struct cam_path *path)
3810 {
3811 	return(path->bus->path_id);
3812 }
3813 
3814 target_id_t
xpt_path_target_id(struct cam_path * path)3815 xpt_path_target_id(struct cam_path *path)
3816 {
3817 	if (path->target != NULL)
3818 		return (path->target->target_id);
3819 	else
3820 		return (CAM_TARGET_WILDCARD);
3821 }
3822 
3823 lun_id_t
xpt_path_lun_id(struct cam_path * path)3824 xpt_path_lun_id(struct cam_path *path)
3825 {
3826 	if (path->device != NULL)
3827 		return (path->device->lun_id);
3828 	else
3829 		return (CAM_LUN_WILDCARD);
3830 }
3831 
3832 struct cam_sim *
xpt_path_sim(struct cam_path * path)3833 xpt_path_sim(struct cam_path *path)
3834 {
3835 
3836 	return (path->bus->sim);
3837 }
3838 
3839 struct cam_periph*
xpt_path_periph(struct cam_path * path)3840 xpt_path_periph(struct cam_path *path)
3841 {
3842 
3843 	return (path->periph);
3844 }
3845 
3846 /*
3847  * Release a CAM control block for the caller.  Remit the cost of the structure
3848  * to the device referenced by the path.  If the this device had no 'credits'
3849  * and peripheral drivers have registered async callbacks for this notification
3850  * call them now.
3851  */
3852 void
xpt_release_ccb(union ccb * free_ccb)3853 xpt_release_ccb(union ccb *free_ccb)
3854 {
3855 	struct	 cam_ed *device;
3856 	struct	 cam_periph *periph;
3857 
3858 	CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_release_ccb\n"));
3859 	xpt_path_assert(free_ccb->ccb_h.path, MA_OWNED);
3860 	device = free_ccb->ccb_h.path->device;
3861 	periph = free_ccb->ccb_h.path->periph;
3862 
3863 	xpt_free_ccb(free_ccb);
3864 	periph->periph_allocated--;
3865 	cam_ccbq_release_opening(&device->ccbq);
3866 	xpt_run_allocq(periph, 0);
3867 }
3868 
3869 /* Functions accessed by SIM drivers */
3870 
3871 static struct xpt_xport_ops xport_default_ops = {
3872 	.alloc_device = xpt_alloc_device_default,
3873 	.action = xpt_action_default,
3874 	.async = xpt_dev_async_default,
3875 };
3876 static struct xpt_xport xport_default = {
3877 	.xport = XPORT_UNKNOWN,
3878 	.name = "unknown",
3879 	.ops = &xport_default_ops,
3880 };
3881 
3882 CAM_XPT_XPORT(xport_default);
3883 
3884 /*
3885  * A sim structure, listing the SIM entry points and instance
3886  * identification info is passed to xpt_bus_register to hook the SIM
3887  * into the CAM framework.  xpt_bus_register creates a cam_eb entry
3888  * for this new bus and places it in the array of buses and assigns
3889  * it a path_id.  The path_id may be influenced by "hard wiring"
3890  * information specified by the user.  Once interrupt services are
3891  * available, the bus will be probed.
3892  */
3893 int
xpt_bus_register(struct cam_sim * sim,device_t parent,uint32_t bus)3894 xpt_bus_register(struct cam_sim *sim, device_t parent, uint32_t bus)
3895 {
3896 	struct cam_eb *new_bus;
3897 	struct cam_eb *old_bus;
3898 	struct ccb_pathinq cpi;
3899 	struct cam_path *path;
3900 	cam_status status;
3901 
3902 	sim->bus_id = bus;
3903 	new_bus = (struct cam_eb *)malloc(sizeof(*new_bus),
3904 					  M_CAMXPT, M_NOWAIT|M_ZERO);
3905 	if (new_bus == NULL) {
3906 		/* Couldn't satisfy request */
3907 		return (ENOMEM);
3908 	}
3909 
3910 	mtx_init(&new_bus->eb_mtx, "CAM bus lock", NULL, MTX_DEF);
3911 	TAILQ_INIT(&new_bus->et_entries);
3912 	cam_sim_hold(sim);
3913 	new_bus->sim = sim;
3914 	timevalclear(&new_bus->last_reset);
3915 	new_bus->flags = 0;
3916 	new_bus->refcount = 1;	/* Held until a bus_deregister event */
3917 	new_bus->generation = 0;
3918 	new_bus->parent_dev = parent;
3919 
3920 	xpt_lock_buses();
3921 	sim->path_id = new_bus->path_id =
3922 	    xptpathid(sim->sim_name, sim->unit_number, sim->bus_id);
3923 	old_bus = TAILQ_FIRST(&xsoftc.xpt_busses);
3924 	while (old_bus != NULL
3925 	    && old_bus->path_id < new_bus->path_id)
3926 		old_bus = TAILQ_NEXT(old_bus, links);
3927 	if (old_bus != NULL)
3928 		TAILQ_INSERT_BEFORE(old_bus, new_bus, links);
3929 	else
3930 		TAILQ_INSERT_TAIL(&xsoftc.xpt_busses, new_bus, links);
3931 	xsoftc.bus_generation++;
3932 	xpt_unlock_buses();
3933 
3934 	/*
3935 	 * Set a default transport so that a PATH_INQ can be issued to
3936 	 * the SIM.  This will then allow for probing and attaching of
3937 	 * a more appropriate transport.
3938 	 */
3939 	new_bus->xport = &xport_default;
3940 
3941 	status = xpt_create_path(&path, /*periph*/NULL, sim->path_id,
3942 				  CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
3943 	if (status != CAM_REQ_CMP) {
3944 		xpt_release_bus(new_bus);
3945 		return (ENOMEM);
3946 	}
3947 
3948 	xpt_path_inq(&cpi, path);
3949 
3950 	/*
3951 	 * Use the results of PATH_INQ to pick a transport.  Note that
3952 	 * the xpt bus (which uses XPORT_UNSPECIFIED) always uses
3953 	 * xport_default instead of a transport from
3954 	 * cam_xpt_port_set.
3955 	 */
3956 	if (cam_ccb_success((union ccb *)&cpi) &&
3957 	    cpi.transport != XPORT_UNSPECIFIED) {
3958 		struct xpt_xport **xpt;
3959 
3960 		SET_FOREACH(xpt, cam_xpt_xport_set) {
3961 			if ((*xpt)->xport == cpi.transport) {
3962 				new_bus->xport = *xpt;
3963 				break;
3964 			}
3965 		}
3966 		if (new_bus->xport == &xport_default) {
3967 			xpt_print(path,
3968 			    "No transport found for %d\n", cpi.transport);
3969 			xpt_release_bus(new_bus);
3970 			xpt_free_path(path);
3971 			return (EINVAL);
3972 		}
3973 	}
3974 
3975 	/* Notify interested parties */
3976 	if (sim->path_id != CAM_XPT_PATH_ID) {
3977 		xpt_async(AC_PATH_REGISTERED, path, &cpi);
3978 		if ((cpi.hba_misc & PIM_NOSCAN) == 0) {
3979 			union	ccb *scan_ccb;
3980 
3981 			/* Initiate bus rescan. */
3982 			scan_ccb = xpt_alloc_ccb_nowait();
3983 			if (scan_ccb != NULL) {
3984 				scan_ccb->ccb_h.path = path;
3985 				scan_ccb->ccb_h.func_code = XPT_SCAN_BUS;
3986 				scan_ccb->crcn.flags = 0;
3987 				xpt_rescan(scan_ccb);
3988 			} else {
3989 				xpt_print(path,
3990 					  "Can't allocate CCB to scan bus\n");
3991 				xpt_free_path(path);
3992 			}
3993 		} else
3994 			xpt_free_path(path);
3995 	} else
3996 		xpt_free_path(path);
3997 	return (CAM_SUCCESS);
3998 }
3999 
4000 int
xpt_bus_deregister(path_id_t pathid)4001 xpt_bus_deregister(path_id_t pathid)
4002 {
4003 	struct cam_path bus_path;
4004 	cam_status status;
4005 
4006 	status = xpt_compile_path(&bus_path, NULL, pathid,
4007 				  CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
4008 	if (status != CAM_REQ_CMP)
4009 		return (ENOMEM);
4010 
4011 	xpt_async(AC_LOST_DEVICE, &bus_path, NULL);
4012 	xpt_async(AC_PATH_DEREGISTERED, &bus_path, NULL);
4013 
4014 	/* Release the reference count held while registered. */
4015 	xpt_release_bus(bus_path.bus);
4016 	xpt_release_path(&bus_path);
4017 
4018 	return (CAM_SUCCESS);
4019 }
4020 
4021 static path_id_t
xptnextfreepathid(void)4022 xptnextfreepathid(void)
4023 {
4024 	struct cam_eb *bus;
4025 	path_id_t pathid;
4026 	const char *strval;
4027 
4028 	mtx_assert(&xsoftc.xpt_topo_lock, MA_OWNED);
4029 	pathid = 0;
4030 	bus = TAILQ_FIRST(&xsoftc.xpt_busses);
4031 retry:
4032 	/* Find an unoccupied pathid */
4033 	while (bus != NULL && bus->path_id <= pathid) {
4034 		if (bus->path_id == pathid)
4035 			pathid++;
4036 		bus = TAILQ_NEXT(bus, links);
4037 	}
4038 
4039 	/*
4040 	 * Ensure that this pathid is not reserved for
4041 	 * a bus that may be registered in the future.
4042 	 */
4043 	if (resource_string_value("scbus", pathid, "at", &strval) == 0) {
4044 		++pathid;
4045 		/* Start the search over */
4046 		goto retry;
4047 	}
4048 	return (pathid);
4049 }
4050 
4051 static path_id_t
xptpathid(const char * sim_name,int sim_unit,int sim_bus)4052 xptpathid(const char *sim_name, int sim_unit, int sim_bus)
4053 {
4054 	path_id_t pathid;
4055 	int i, dunit, val;
4056 	char buf[32];
4057 	const char *dname;
4058 
4059 	pathid = CAM_XPT_PATH_ID;
4060 	snprintf(buf, sizeof(buf), "%s%d", sim_name, sim_unit);
4061 	if (strcmp(buf, "xpt0") == 0 && sim_bus == 0)
4062 		return (pathid);
4063 	i = 0;
4064 	while ((resource_find_match(&i, &dname, &dunit, "at", buf)) == 0) {
4065 		if (strcmp(dname, "scbus")) {
4066 			/* Avoid a bit of foot shooting. */
4067 			continue;
4068 		}
4069 		if (dunit < 0)		/* unwired?! */
4070 			continue;
4071 		if (resource_int_value("scbus", dunit, "bus", &val) == 0) {
4072 			if (sim_bus == val) {
4073 				pathid = dunit;
4074 				break;
4075 			}
4076 		} else if (sim_bus == 0) {
4077 			/* Unspecified matches bus 0 */
4078 			pathid = dunit;
4079 			break;
4080 		} else {
4081 			printf(
4082 "Ambiguous scbus configuration for %s%d bus %d, cannot wire down.  The kernel\n"
4083 "config entry for scbus%d should specify a controller bus.\n"
4084 "Scbus will be assigned dynamically.\n",
4085 			    sim_name, sim_unit, sim_bus, dunit);
4086 			break;
4087 		}
4088 	}
4089 
4090 	if (pathid == CAM_XPT_PATH_ID)
4091 		pathid = xptnextfreepathid();
4092 	return (pathid);
4093 }
4094 
4095 static const char *
xpt_async_string(uint32_t async_code)4096 xpt_async_string(uint32_t async_code)
4097 {
4098 
4099 	switch (async_code) {
4100 	case AC_BUS_RESET: return ("AC_BUS_RESET");
4101 	case AC_UNSOL_RESEL: return ("AC_UNSOL_RESEL");
4102 	case AC_SCSI_AEN: return ("AC_SCSI_AEN");
4103 	case AC_SENT_BDR: return ("AC_SENT_BDR");
4104 	case AC_PATH_REGISTERED: return ("AC_PATH_REGISTERED");
4105 	case AC_PATH_DEREGISTERED: return ("AC_PATH_DEREGISTERED");
4106 	case AC_FOUND_DEVICE: return ("AC_FOUND_DEVICE");
4107 	case AC_LOST_DEVICE: return ("AC_LOST_DEVICE");
4108 	case AC_TRANSFER_NEG: return ("AC_TRANSFER_NEG");
4109 	case AC_INQ_CHANGED: return ("AC_INQ_CHANGED");
4110 	case AC_GETDEV_CHANGED: return ("AC_GETDEV_CHANGED");
4111 	case AC_CONTRACT: return ("AC_CONTRACT");
4112 	case AC_ADVINFO_CHANGED: return ("AC_ADVINFO_CHANGED");
4113 	case AC_UNIT_ATTENTION: return ("AC_UNIT_ATTENTION");
4114 	}
4115 	return ("AC_UNKNOWN");
4116 }
4117 
4118 static int
xpt_async_size(uint32_t async_code)4119 xpt_async_size(uint32_t async_code)
4120 {
4121 
4122 	switch (async_code) {
4123 	case AC_BUS_RESET: return (0);
4124 	case AC_UNSOL_RESEL: return (0);
4125 	case AC_SCSI_AEN: return (0);
4126 	case AC_SENT_BDR: return (0);
4127 	case AC_PATH_REGISTERED: return (sizeof(struct ccb_pathinq));
4128 	case AC_PATH_DEREGISTERED: return (0);
4129 	case AC_FOUND_DEVICE: return (sizeof(struct ccb_getdev));
4130 	case AC_LOST_DEVICE: return (0);
4131 	case AC_TRANSFER_NEG: return (sizeof(struct ccb_trans_settings));
4132 	case AC_INQ_CHANGED: return (0);
4133 	case AC_GETDEV_CHANGED: return (0);
4134 	case AC_CONTRACT: return (sizeof(struct ac_contract));
4135 	case AC_ADVINFO_CHANGED: return (-1);
4136 	case AC_UNIT_ATTENTION: return (sizeof(struct ccb_scsiio));
4137 	}
4138 	return (0);
4139 }
4140 
4141 static int
xpt_async_process_dev(struct cam_ed * device,void * arg)4142 xpt_async_process_dev(struct cam_ed *device, void *arg)
4143 {
4144 	union ccb *ccb = arg;
4145 	struct cam_path *path = ccb->ccb_h.path;
4146 	void *async_arg = ccb->casync.async_arg_ptr;
4147 	uint32_t async_code = ccb->casync.async_code;
4148 	bool relock;
4149 
4150 	if (path->device != device
4151 	 && path->device->lun_id != CAM_LUN_WILDCARD
4152 	 && device->lun_id != CAM_LUN_WILDCARD)
4153 		return (1);
4154 
4155 	/*
4156 	 * The async callback could free the device.
4157 	 * If it is a broadcast async, it doesn't hold
4158 	 * device reference, so take our own reference.
4159 	 */
4160 	xpt_acquire_device(device);
4161 
4162 	/*
4163 	 * If async for specific device is to be delivered to
4164 	 * the wildcard client, take the specific device lock.
4165 	 * XXX: We may need a way for client to specify it.
4166 	 */
4167 	if ((device->lun_id == CAM_LUN_WILDCARD &&
4168 	     path->device->lun_id != CAM_LUN_WILDCARD) ||
4169 	    (device->target->target_id == CAM_TARGET_WILDCARD &&
4170 	     path->target->target_id != CAM_TARGET_WILDCARD) ||
4171 	    (device->target->bus->path_id == CAM_BUS_WILDCARD &&
4172 	     path->target->bus->path_id != CAM_BUS_WILDCARD)) {
4173 		mtx_unlock(&device->device_mtx);
4174 		xpt_path_lock(path);
4175 		relock = true;
4176 	} else
4177 		relock = false;
4178 
4179 	(*(device->target->bus->xport->ops->async))(async_code,
4180 	    device->target->bus, device->target, device, async_arg);
4181 	xpt_async_bcast(&device->asyncs, async_code, path, async_arg);
4182 
4183 	if (relock) {
4184 		xpt_path_unlock(path);
4185 		mtx_lock(&device->device_mtx);
4186 	}
4187 	xpt_release_device(device);
4188 	return (1);
4189 }
4190 
4191 static int
xpt_async_process_tgt(struct cam_et * target,void * arg)4192 xpt_async_process_tgt(struct cam_et *target, void *arg)
4193 {
4194 	union ccb *ccb = arg;
4195 	struct cam_path *path = ccb->ccb_h.path;
4196 
4197 	if (path->target != target
4198 	 && path->target->target_id != CAM_TARGET_WILDCARD
4199 	 && target->target_id != CAM_TARGET_WILDCARD)
4200 		return (1);
4201 
4202 	if (ccb->casync.async_code == AC_SENT_BDR) {
4203 		/* Update our notion of when the last reset occurred */
4204 		microtime(&target->last_reset);
4205 	}
4206 
4207 	return (xptdevicetraverse(target, NULL, xpt_async_process_dev, ccb));
4208 }
4209 
4210 static void
xpt_async_process(struct cam_periph * periph,union ccb * ccb)4211 xpt_async_process(struct cam_periph *periph, union ccb *ccb)
4212 {
4213 	struct cam_eb *bus;
4214 	struct cam_path *path;
4215 	void *async_arg;
4216 	uint32_t async_code;
4217 
4218 	path = ccb->ccb_h.path;
4219 	async_code = ccb->casync.async_code;
4220 	async_arg = ccb->casync.async_arg_ptr;
4221 	CAM_DEBUG(path, CAM_DEBUG_TRACE | CAM_DEBUG_INFO,
4222 	    ("xpt_async(%s)\n", xpt_async_string(async_code)));
4223 	bus = path->bus;
4224 
4225 	if (async_code == AC_BUS_RESET) {
4226 		/* Update our notion of when the last reset occurred */
4227 		microtime(&bus->last_reset);
4228 	}
4229 
4230 	xpttargettraverse(bus, NULL, xpt_async_process_tgt, ccb);
4231 
4232 	/*
4233 	 * If this wasn't a fully wildcarded async, tell all
4234 	 * clients that want all async events.
4235 	 */
4236 	if (bus != xpt_periph->path->bus) {
4237 		xpt_path_lock(xpt_periph->path);
4238 		xpt_async_process_dev(xpt_periph->path->device, ccb);
4239 		xpt_path_unlock(xpt_periph->path);
4240 	}
4241 
4242 	if (path->device != NULL && path->device->lun_id != CAM_LUN_WILDCARD)
4243 		xpt_release_devq(path, 1, TRUE);
4244 	else
4245 		xpt_release_simq(path->bus->sim, TRUE);
4246 	if (ccb->casync.async_arg_size > 0)
4247 		free(async_arg, M_CAMXPT);
4248 	xpt_free_path(path);
4249 	xpt_free_ccb(ccb);
4250 }
4251 
4252 static void
xpt_async_bcast(struct async_list * async_head,uint32_t async_code,struct cam_path * path,void * async_arg)4253 xpt_async_bcast(struct async_list *async_head,
4254 		uint32_t async_code,
4255 		struct cam_path *path, void *async_arg)
4256 {
4257 	struct async_node *cur_entry;
4258 	struct mtx *mtx;
4259 
4260 	cur_entry = SLIST_FIRST(async_head);
4261 	while (cur_entry != NULL) {
4262 		struct async_node *next_entry;
4263 		/*
4264 		 * Grab the next list entry before we call the current
4265 		 * entry's callback.  This is because the callback function
4266 		 * can delete its async callback entry.
4267 		 */
4268 		next_entry = SLIST_NEXT(cur_entry, links);
4269 		if ((cur_entry->event_enable & async_code) != 0) {
4270 			mtx = cur_entry->event_lock ?
4271 			    path->device->sim->mtx : NULL;
4272 			if (mtx)
4273 				mtx_lock(mtx);
4274 			CAM_PROBE4(xpt, async__cb, cur_entry->callback_arg,
4275 			    async_code, path, async_arg);
4276 			cur_entry->callback(cur_entry->callback_arg,
4277 					    async_code, path,
4278 					    async_arg);
4279 			if (mtx)
4280 				mtx_unlock(mtx);
4281 		}
4282 		cur_entry = next_entry;
4283 	}
4284 }
4285 
4286 void
xpt_async(uint32_t async_code,struct cam_path * path,void * async_arg)4287 xpt_async(uint32_t async_code, struct cam_path *path, void *async_arg)
4288 {
4289 	union ccb *ccb;
4290 	int size;
4291 
4292 	ccb = xpt_alloc_ccb_nowait();
4293 	if (ccb == NULL) {
4294 		xpt_print(path, "Can't allocate CCB to send %s\n",
4295 		    xpt_async_string(async_code));
4296 		return;
4297 	}
4298 
4299 	if (xpt_clone_path(&ccb->ccb_h.path, path) != 0) {
4300 		xpt_print(path, "Can't allocate path to send %s\n",
4301 		    xpt_async_string(async_code));
4302 		xpt_free_ccb(ccb);
4303 		return;
4304 	}
4305 	ccb->ccb_h.path->periph = NULL;
4306 	ccb->ccb_h.func_code = XPT_ASYNC;
4307 	ccb->ccb_h.cbfcnp = xpt_async_process;
4308 	ccb->ccb_h.flags |= CAM_UNLOCKED;
4309 	ccb->casync.async_code = async_code;
4310 	ccb->casync.async_arg_size = 0;
4311 	size = xpt_async_size(async_code);
4312 	CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE,
4313 	    ("xpt_async: func %#x %s aync_code %d %s\n",
4314 		ccb->ccb_h.func_code,
4315 		xpt_action_name(ccb->ccb_h.func_code),
4316 		async_code,
4317 		xpt_async_string(async_code)));
4318 	if (size > 0 && async_arg != NULL) {
4319 		ccb->casync.async_arg_ptr = malloc(size, M_CAMXPT, M_NOWAIT);
4320 		if (ccb->casync.async_arg_ptr == NULL) {
4321 			xpt_print(path, "Can't allocate argument to send %s\n",
4322 			    xpt_async_string(async_code));
4323 			xpt_free_path(ccb->ccb_h.path);
4324 			xpt_free_ccb(ccb);
4325 			return;
4326 		}
4327 		memcpy(ccb->casync.async_arg_ptr, async_arg, size);
4328 		ccb->casync.async_arg_size = size;
4329 	} else if (size < 0) {
4330 		ccb->casync.async_arg_ptr = async_arg;
4331 		ccb->casync.async_arg_size = size;
4332 	}
4333 	if (path->device != NULL && path->device->lun_id != CAM_LUN_WILDCARD)
4334 		xpt_freeze_devq(path, 1);
4335 	else
4336 		xpt_freeze_simq(path->bus->sim, 1);
4337 	xpt_action(ccb);
4338 }
4339 
4340 static void
xpt_dev_async_default(uint32_t async_code,struct cam_eb * bus,struct cam_et * target,struct cam_ed * device,void * async_arg)4341 xpt_dev_async_default(uint32_t async_code, struct cam_eb *bus,
4342 		      struct cam_et *target, struct cam_ed *device,
4343 		      void *async_arg)
4344 {
4345 
4346 	/*
4347 	 * We only need to handle events for real devices.
4348 	 */
4349 	if (target->target_id == CAM_TARGET_WILDCARD
4350 	 || device->lun_id == CAM_LUN_WILDCARD)
4351 		return;
4352 
4353 	printf("%s called\n", __func__);
4354 }
4355 
4356 static uint32_t
xpt_freeze_devq_device(struct cam_ed * dev,u_int count)4357 xpt_freeze_devq_device(struct cam_ed *dev, u_int count)
4358 {
4359 	struct cam_devq	*devq;
4360 	uint32_t freeze;
4361 
4362 	devq = dev->sim->devq;
4363 	mtx_assert(&devq->send_mtx, MA_OWNED);
4364 	CAM_DEBUG_DEV(dev, CAM_DEBUG_TRACE,
4365 	    ("xpt_freeze_devq_device(%d) %u->%u\n", count,
4366 	    dev->ccbq.queue.qfrozen_cnt, dev->ccbq.queue.qfrozen_cnt + count));
4367 	freeze = (dev->ccbq.queue.qfrozen_cnt += count);
4368 	/* Remove frozen device from sendq. */
4369 	if (device_is_queued(dev))
4370 		camq_remove(&devq->send_queue, dev->devq_entry.index);
4371 	return (freeze);
4372 }
4373 
4374 uint32_t
xpt_freeze_devq(struct cam_path * path,u_int count)4375 xpt_freeze_devq(struct cam_path *path, u_int count)
4376 {
4377 	struct cam_ed	*dev = path->device;
4378 	struct cam_devq	*devq;
4379 	uint32_t	 freeze;
4380 
4381 	devq = dev->sim->devq;
4382 	mtx_lock(&devq->send_mtx);
4383 	CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_freeze_devq(%d)\n", count));
4384 	freeze = xpt_freeze_devq_device(dev, count);
4385 	mtx_unlock(&devq->send_mtx);
4386 	return (freeze);
4387 }
4388 
4389 uint32_t
xpt_freeze_simq(struct cam_sim * sim,u_int count)4390 xpt_freeze_simq(struct cam_sim *sim, u_int count)
4391 {
4392 	struct cam_devq	*devq;
4393 	uint32_t	 freeze;
4394 
4395 	devq = sim->devq;
4396 	mtx_lock(&devq->send_mtx);
4397 	freeze = (devq->send_queue.qfrozen_cnt += count);
4398 	mtx_unlock(&devq->send_mtx);
4399 	return (freeze);
4400 }
4401 
4402 static void
xpt_release_devq_timeout(void * arg)4403 xpt_release_devq_timeout(void *arg)
4404 {
4405 	struct cam_ed *dev;
4406 	struct cam_devq *devq;
4407 
4408 	dev = (struct cam_ed *)arg;
4409 	CAM_DEBUG_DEV(dev, CAM_DEBUG_TRACE, ("xpt_release_devq_timeout\n"));
4410 	devq = dev->sim->devq;
4411 	mtx_assert(&devq->send_mtx, MA_OWNED);
4412 	if (xpt_release_devq_device(dev, /*count*/1, /*run_queue*/TRUE))
4413 		xpt_run_devq(devq);
4414 }
4415 
4416 void
xpt_release_devq(struct cam_path * path,u_int count,int run_queue)4417 xpt_release_devq(struct cam_path *path, u_int count, int run_queue)
4418 {
4419 	struct cam_ed *dev;
4420 	struct cam_devq *devq;
4421 
4422 	CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_release_devq(%d, %d)\n",
4423 	    count, run_queue));
4424 	dev = path->device;
4425 	devq = dev->sim->devq;
4426 	mtx_lock(&devq->send_mtx);
4427 	if (xpt_release_devq_device(dev, count, run_queue))
4428 		xpt_run_devq(dev->sim->devq);
4429 	mtx_unlock(&devq->send_mtx);
4430 }
4431 
4432 static int
xpt_release_devq_device(struct cam_ed * dev,u_int count,int run_queue)4433 xpt_release_devq_device(struct cam_ed *dev, u_int count, int run_queue)
4434 {
4435 
4436 	mtx_assert(&dev->sim->devq->send_mtx, MA_OWNED);
4437 	CAM_DEBUG_DEV(dev, CAM_DEBUG_TRACE,
4438 	    ("xpt_release_devq_device(%d, %d) %u->%u\n", count, run_queue,
4439 	    dev->ccbq.queue.qfrozen_cnt, dev->ccbq.queue.qfrozen_cnt - count));
4440 	if (count > dev->ccbq.queue.qfrozen_cnt) {
4441 #ifdef INVARIANTS
4442 		printf("xpt_release_devq(): requested %u > present %u\n",
4443 		    count, dev->ccbq.queue.qfrozen_cnt);
4444 #endif
4445 		count = dev->ccbq.queue.qfrozen_cnt;
4446 	}
4447 	dev->ccbq.queue.qfrozen_cnt -= count;
4448 	if (dev->ccbq.queue.qfrozen_cnt == 0) {
4449 		/*
4450 		 * No longer need to wait for a successful
4451 		 * command completion.
4452 		 */
4453 		dev->flags &= ~CAM_DEV_REL_ON_COMPLETE;
4454 		/*
4455 		 * Remove any timeouts that might be scheduled
4456 		 * to release this queue.
4457 		 */
4458 		if ((dev->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) {
4459 			callout_stop(&dev->callout);
4460 			dev->flags &= ~CAM_DEV_REL_TIMEOUT_PENDING;
4461 		}
4462 		/*
4463 		 * Now that we are unfrozen schedule the
4464 		 * device so any pending transactions are
4465 		 * run.
4466 		 */
4467 		xpt_schedule_devq(dev->sim->devq, dev);
4468 	} else
4469 		run_queue = 0;
4470 	return (run_queue);
4471 }
4472 
4473 void
xpt_release_simq(struct cam_sim * sim,int run_queue)4474 xpt_release_simq(struct cam_sim *sim, int run_queue)
4475 {
4476 	struct cam_devq	*devq;
4477 
4478 	devq = sim->devq;
4479 	mtx_lock(&devq->send_mtx);
4480 	if (devq->send_queue.qfrozen_cnt <= 0) {
4481 #ifdef INVARIANTS
4482 		printf("xpt_release_simq: requested 1 > present %u\n",
4483 		    devq->send_queue.qfrozen_cnt);
4484 #endif
4485 	} else
4486 		devq->send_queue.qfrozen_cnt--;
4487 	if (devq->send_queue.qfrozen_cnt == 0) {
4488 		if (run_queue) {
4489 			/*
4490 			 * Now that we are unfrozen run the send queue.
4491 			 */
4492 			xpt_run_devq(sim->devq);
4493 		}
4494 	}
4495 	mtx_unlock(&devq->send_mtx);
4496 }
4497 
4498 void
xpt_done(union ccb * done_ccb)4499 xpt_done(union ccb *done_ccb)
4500 {
4501 	struct cam_doneq *queue;
4502 	int	run, hash;
4503 
4504 #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING)
4505 	if (done_ccb->ccb_h.func_code == XPT_SCSI_IO &&
4506 	    done_ccb->csio.bio != NULL)
4507 		biotrack(done_ccb->csio.bio, __func__);
4508 #endif
4509 
4510 	CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE,
4511 	    ("xpt_done: func= %#x %s status %#x\n",
4512 		done_ccb->ccb_h.func_code,
4513 		xpt_action_name(done_ccb->ccb_h.func_code),
4514 		done_ccb->ccb_h.status));
4515 	if ((done_ccb->ccb_h.func_code & XPT_FC_QUEUED) == 0) {
4516 		CAM_PROBE1(xpt, done, done_ccb);
4517 		return;
4518 	}
4519 
4520 	/* Store the time the ccb was in the sim */
4521 	done_ccb->ccb_h.qos.periph_data = cam_iosched_delta_t(done_ccb->ccb_h.qos.periph_data);
4522 	done_ccb->ccb_h.status |= CAM_QOS_VALID;
4523 	hash = (u_int)(done_ccb->ccb_h.path_id + done_ccb->ccb_h.target_id +
4524 	    done_ccb->ccb_h.target_lun) % cam_num_doneqs;
4525 	queue = &cam_doneqs[hash];
4526 	mtx_lock(&queue->cam_doneq_mtx);
4527 	run = (queue->cam_doneq_sleep && STAILQ_EMPTY(&queue->cam_doneq));
4528 	STAILQ_INSERT_TAIL(&queue->cam_doneq, &done_ccb->ccb_h, sim_links.stqe);
4529 	done_ccb->ccb_h.pinfo.index = CAM_DONEQ_INDEX;
4530 	mtx_unlock(&queue->cam_doneq_mtx);
4531 	if (run && !dumping)
4532 		wakeup(&queue->cam_doneq);
4533 }
4534 
4535 void
xpt_done_direct(union ccb * done_ccb)4536 xpt_done_direct(union ccb *done_ccb)
4537 {
4538 
4539 	CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE,
4540 	    ("xpt_done_direct: status %#x\n", done_ccb->ccb_h.status));
4541 	if ((done_ccb->ccb_h.func_code & XPT_FC_QUEUED) == 0)
4542 		return;
4543 
4544 	/* Store the time the ccb was in the sim */
4545 	done_ccb->ccb_h.qos.periph_data = cam_iosched_delta_t(done_ccb->ccb_h.qos.periph_data);
4546 	done_ccb->ccb_h.status |= CAM_QOS_VALID;
4547 	xpt_done_process(&done_ccb->ccb_h);
4548 }
4549 
4550 union ccb *
xpt_alloc_ccb(void)4551 xpt_alloc_ccb(void)
4552 {
4553 	union ccb *new_ccb;
4554 
4555 	new_ccb = malloc(sizeof(*new_ccb), M_CAMCCB, M_ZERO|M_WAITOK);
4556 	return (new_ccb);
4557 }
4558 
4559 union ccb *
xpt_alloc_ccb_nowait(void)4560 xpt_alloc_ccb_nowait(void)
4561 {
4562 	union ccb *new_ccb;
4563 
4564 	new_ccb = malloc(sizeof(*new_ccb), M_CAMCCB, M_ZERO|M_NOWAIT);
4565 	return (new_ccb);
4566 }
4567 
4568 void
xpt_free_ccb(union ccb * free_ccb)4569 xpt_free_ccb(union ccb *free_ccb)
4570 {
4571 	struct cam_periph *periph;
4572 
4573 	if (free_ccb->ccb_h.alloc_flags & CAM_CCB_FROM_UMA) {
4574 		/*
4575 		 * Looks like a CCB allocated from a periph UMA zone.
4576 		 */
4577 		periph = free_ccb->ccb_h.path->periph;
4578 		uma_zfree(periph->ccb_zone, free_ccb);
4579 	} else {
4580 		free(free_ccb, M_CAMCCB);
4581 	}
4582 }
4583 
4584 /* Private XPT functions */
4585 
4586 /*
4587  * Get a CAM control block for the caller. Charge the structure to the device
4588  * referenced by the path.  If we don't have sufficient resources to allocate
4589  * more ccbs, we return NULL.
4590  */
4591 static union ccb *
xpt_get_ccb_nowait(struct cam_periph * periph)4592 xpt_get_ccb_nowait(struct cam_periph *periph)
4593 {
4594 	union ccb *new_ccb;
4595 	int alloc_flags;
4596 
4597 	if (periph->ccb_zone != NULL) {
4598 		alloc_flags = CAM_CCB_FROM_UMA;
4599 		new_ccb = uma_zalloc(periph->ccb_zone, M_ZERO|M_NOWAIT);
4600 	} else {
4601 		alloc_flags = 0;
4602 		new_ccb = malloc(sizeof(*new_ccb), M_CAMCCB, M_ZERO|M_NOWAIT);
4603 	}
4604 	if (new_ccb == NULL)
4605 		return (NULL);
4606 	new_ccb->ccb_h.alloc_flags = alloc_flags;
4607 	periph->periph_allocated++;
4608 	cam_ccbq_take_opening(&periph->path->device->ccbq);
4609 	return (new_ccb);
4610 }
4611 
4612 static union ccb *
xpt_get_ccb(struct cam_periph * periph)4613 xpt_get_ccb(struct cam_periph *periph)
4614 {
4615 	union ccb *new_ccb;
4616 	int alloc_flags;
4617 
4618 	cam_periph_unlock(periph);
4619 	if (periph->ccb_zone != NULL) {
4620 		alloc_flags = CAM_CCB_FROM_UMA;
4621 		new_ccb = uma_zalloc(periph->ccb_zone, M_ZERO|M_WAITOK);
4622 	} else {
4623 		alloc_flags = 0;
4624 		new_ccb = malloc(sizeof(*new_ccb), M_CAMCCB, M_ZERO|M_WAITOK);
4625 	}
4626 	new_ccb->ccb_h.alloc_flags = alloc_flags;
4627 	cam_periph_lock(periph);
4628 	periph->periph_allocated++;
4629 	cam_ccbq_take_opening(&periph->path->device->ccbq);
4630 	return (new_ccb);
4631 }
4632 
4633 union ccb *
cam_periph_getccb(struct cam_periph * periph,uint32_t priority)4634 cam_periph_getccb(struct cam_periph *periph, uint32_t priority)
4635 {
4636 	struct ccb_hdr *ccb_h;
4637 
4638 	CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("cam_periph_getccb\n"));
4639 	cam_periph_assert(periph, MA_OWNED);
4640 	while ((ccb_h = SLIST_FIRST(&periph->ccb_list)) == NULL ||
4641 	    ccb_h->pinfo.priority != priority) {
4642 		if (priority < periph->immediate_priority) {
4643 			periph->immediate_priority = priority;
4644 			xpt_run_allocq(periph, 0);
4645 		} else
4646 			cam_periph_sleep(periph, &periph->ccb_list, PRIBIO,
4647 			    "cgticb", 0);
4648 	}
4649 	SLIST_REMOVE_HEAD(&periph->ccb_list, periph_links.sle);
4650 	return ((union ccb *)ccb_h);
4651 }
4652 
4653 static void
xpt_acquire_bus(struct cam_eb * bus)4654 xpt_acquire_bus(struct cam_eb *bus)
4655 {
4656 
4657 	xpt_lock_buses();
4658 	bus->refcount++;
4659 	xpt_unlock_buses();
4660 }
4661 
4662 static void
xpt_release_bus(struct cam_eb * bus)4663 xpt_release_bus(struct cam_eb *bus)
4664 {
4665 
4666 	xpt_lock_buses();
4667 	KASSERT(bus->refcount >= 1, ("bus->refcount >= 1"));
4668 	if (--bus->refcount > 0) {
4669 		xpt_unlock_buses();
4670 		return;
4671 	}
4672 	TAILQ_REMOVE(&xsoftc.xpt_busses, bus, links);
4673 	xsoftc.bus_generation++;
4674 	xpt_unlock_buses();
4675 	KASSERT(TAILQ_EMPTY(&bus->et_entries),
4676 	    ("destroying bus, but target list is not empty"));
4677 	cam_sim_release(bus->sim);
4678 	mtx_destroy(&bus->eb_mtx);
4679 	free(bus, M_CAMXPT);
4680 }
4681 
4682 static struct cam_et *
xpt_alloc_target(struct cam_eb * bus,target_id_t target_id)4683 xpt_alloc_target(struct cam_eb *bus, target_id_t target_id)
4684 {
4685 	struct cam_et *cur_target, *target;
4686 
4687 	mtx_assert(&xsoftc.xpt_topo_lock, MA_OWNED);
4688 	mtx_assert(&bus->eb_mtx, MA_OWNED);
4689 	target = (struct cam_et *)malloc(sizeof(*target), M_CAMXPT,
4690 					 M_NOWAIT|M_ZERO);
4691 	if (target == NULL)
4692 		return (NULL);
4693 
4694 	TAILQ_INIT(&target->ed_entries);
4695 	target->bus = bus;
4696 	target->target_id = target_id;
4697 	target->refcount = 1;
4698 	target->generation = 0;
4699 	target->luns = NULL;
4700 	target->wluns = NULL;
4701 	mtx_init(&target->luns_mtx, "CAM LUNs lock", NULL, MTX_DEF);
4702 	timevalclear(&target->last_reset);
4703 	/*
4704 	 * Hold a reference to our parent bus so it
4705 	 * will not go away before we do.
4706 	 */
4707 	bus->refcount++;
4708 
4709 	/* Insertion sort into our bus's target list */
4710 	cur_target = TAILQ_FIRST(&bus->et_entries);
4711 	while (cur_target != NULL && cur_target->target_id < target_id)
4712 		cur_target = TAILQ_NEXT(cur_target, links);
4713 	if (cur_target != NULL) {
4714 		TAILQ_INSERT_BEFORE(cur_target, target, links);
4715 	} else {
4716 		TAILQ_INSERT_TAIL(&bus->et_entries, target, links);
4717 	}
4718 	bus->generation++;
4719 	return (target);
4720 }
4721 
4722 static void
xpt_acquire_target(struct cam_et * target)4723 xpt_acquire_target(struct cam_et *target)
4724 {
4725 	struct cam_eb *bus = target->bus;
4726 
4727 	mtx_lock(&bus->eb_mtx);
4728 	target->refcount++;
4729 	mtx_unlock(&bus->eb_mtx);
4730 }
4731 
4732 static void
xpt_release_target(struct cam_et * target)4733 xpt_release_target(struct cam_et *target)
4734 {
4735 	struct cam_eb *bus = target->bus;
4736 
4737 	mtx_lock(&bus->eb_mtx);
4738 	if (--target->refcount > 0) {
4739 		mtx_unlock(&bus->eb_mtx);
4740 		return;
4741 	}
4742 	TAILQ_REMOVE(&bus->et_entries, target, links);
4743 	bus->generation++;
4744 	mtx_unlock(&bus->eb_mtx);
4745 	KASSERT(TAILQ_EMPTY(&target->ed_entries),
4746 	    ("destroying target, but device list is not empty"));
4747 	xpt_release_bus(bus);
4748 	mtx_destroy(&target->luns_mtx);
4749 	if (target->luns)
4750 		free(target->luns, M_CAMXPT);
4751 	free(target, M_CAMXPT);
4752 }
4753 
4754 static struct cam_ed *
xpt_alloc_device_default(struct cam_eb * bus,struct cam_et * target,lun_id_t lun_id)4755 xpt_alloc_device_default(struct cam_eb *bus, struct cam_et *target,
4756 			 lun_id_t lun_id)
4757 {
4758 	struct cam_ed *device;
4759 
4760 	device = xpt_alloc_device(bus, target, lun_id);
4761 	if (device == NULL)
4762 		return (NULL);
4763 
4764 	device->mintags = 1;
4765 	device->maxtags = 1;
4766 	return (device);
4767 }
4768 
4769 static void
xpt_destroy_device(void * context,int pending)4770 xpt_destroy_device(void *context, int pending)
4771 {
4772 	struct cam_ed	*device = context;
4773 
4774 	mtx_lock(&device->device_mtx);
4775 	mtx_destroy(&device->device_mtx);
4776 	free(device, M_CAMDEV);
4777 }
4778 
4779 struct cam_ed *
xpt_alloc_device(struct cam_eb * bus,struct cam_et * target,lun_id_t lun_id)4780 xpt_alloc_device(struct cam_eb *bus, struct cam_et *target, lun_id_t lun_id)
4781 {
4782 	struct cam_ed	*cur_device, *device;
4783 	struct cam_devq	*devq;
4784 	cam_status status;
4785 
4786 	mtx_assert(&bus->eb_mtx, MA_OWNED);
4787 	/* Make space for us in the device queue on our bus */
4788 	devq = bus->sim->devq;
4789 	mtx_lock(&devq->send_mtx);
4790 	status = cam_devq_resize(devq, devq->send_queue.array_size + 1);
4791 	mtx_unlock(&devq->send_mtx);
4792 	if (status != CAM_REQ_CMP)
4793 		return (NULL);
4794 
4795 	device = (struct cam_ed *)malloc(sizeof(*device),
4796 					 M_CAMDEV, M_NOWAIT|M_ZERO);
4797 	if (device == NULL)
4798 		return (NULL);
4799 
4800 	cam_init_pinfo(&device->devq_entry);
4801 	device->target = target;
4802 	device->lun_id = lun_id;
4803 	device->sim = bus->sim;
4804 	if (cam_ccbq_init(&device->ccbq,
4805 			  bus->sim->max_dev_openings) != 0) {
4806 		free(device, M_CAMDEV);
4807 		return (NULL);
4808 	}
4809 	SLIST_INIT(&device->asyncs);
4810 	SLIST_INIT(&device->periphs);
4811 	device->generation = 0;
4812 	device->flags = CAM_DEV_UNCONFIGURED;
4813 	device->tag_delay_count = 0;
4814 	device->tag_saved_openings = 0;
4815 	device->refcount = 1;
4816 	mtx_init(&device->device_mtx, "CAM device lock", NULL, MTX_DEF);
4817 	callout_init_mtx(&device->callout, &devq->send_mtx, 0);
4818 	TASK_INIT(&device->device_destroy_task, 0, xpt_destroy_device, device);
4819 	/*
4820 	 * Hold a reference to our parent bus so it
4821 	 * will not go away before we do.
4822 	 */
4823 	target->refcount++;
4824 
4825 	cur_device = TAILQ_FIRST(&target->ed_entries);
4826 	while (cur_device != NULL && cur_device->lun_id < lun_id)
4827 		cur_device = TAILQ_NEXT(cur_device, links);
4828 	if (cur_device != NULL)
4829 		TAILQ_INSERT_BEFORE(cur_device, device, links);
4830 	else
4831 		TAILQ_INSERT_TAIL(&target->ed_entries, device, links);
4832 	target->generation++;
4833 	return (device);
4834 }
4835 
4836 void
xpt_acquire_device(struct cam_ed * device)4837 xpt_acquire_device(struct cam_ed *device)
4838 {
4839 	struct cam_eb *bus = device->target->bus;
4840 
4841 	mtx_lock(&bus->eb_mtx);
4842 	device->refcount++;
4843 	mtx_unlock(&bus->eb_mtx);
4844 }
4845 
4846 void
xpt_release_device(struct cam_ed * device)4847 xpt_release_device(struct cam_ed *device)
4848 {
4849 	struct cam_eb *bus = device->target->bus;
4850 	struct cam_devq *devq;
4851 
4852 	mtx_lock(&bus->eb_mtx);
4853 	if (--device->refcount > 0) {
4854 		mtx_unlock(&bus->eb_mtx);
4855 		return;
4856 	}
4857 
4858 	TAILQ_REMOVE(&device->target->ed_entries, device,links);
4859 	device->target->generation++;
4860 	mtx_unlock(&bus->eb_mtx);
4861 
4862 	/* Release our slot in the devq */
4863 	devq = bus->sim->devq;
4864 	mtx_lock(&devq->send_mtx);
4865 	cam_devq_resize(devq, devq->send_queue.array_size - 1);
4866 
4867 	KASSERT(SLIST_EMPTY(&device->periphs),
4868 	    ("destroying device, but periphs list is not empty"));
4869 	KASSERT(device->devq_entry.index == CAM_UNQUEUED_INDEX,
4870 	    ("destroying device while still queued for ccbs"));
4871 
4872 	/* The send_mtx must be held when accessing the callout */
4873 	if ((device->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0)
4874 		callout_stop(&device->callout);
4875 
4876 	mtx_unlock(&devq->send_mtx);
4877 
4878 	xpt_release_target(device->target);
4879 
4880 	cam_ccbq_fini(&device->ccbq);
4881 	/*
4882 	 * Free allocated memory.  free(9) does nothing if the
4883 	 * supplied pointer is NULL, so it is safe to call without
4884 	 * checking.
4885 	 */
4886 	free(device->supported_vpds, M_CAMXPT);
4887 	free(device->device_id, M_CAMXPT);
4888 	free(device->ext_inq, M_CAMXPT);
4889 	free(device->physpath, M_CAMXPT);
4890 	free(device->rcap_buf, M_CAMXPT);
4891 	free(device->serial_num, M_CAMXPT);
4892 	free(device->nvme_data, M_CAMXPT);
4893 	free(device->nvme_cdata, M_CAMXPT);
4894 	taskqueue_enqueue(xsoftc.xpt_taskq, &device->device_destroy_task);
4895 }
4896 
4897 uint32_t
xpt_dev_ccbq_resize(struct cam_path * path,int newopenings)4898 xpt_dev_ccbq_resize(struct cam_path *path, int newopenings)
4899 {
4900 	int	result;
4901 	struct	cam_ed *dev;
4902 
4903 	dev = path->device;
4904 	mtx_lock(&dev->sim->devq->send_mtx);
4905 	result = cam_ccbq_resize(&dev->ccbq, newopenings);
4906 	mtx_unlock(&dev->sim->devq->send_mtx);
4907 	if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
4908 	 || (dev->inq_flags & SID_CmdQue) != 0)
4909 		dev->tag_saved_openings = newopenings;
4910 	return (result);
4911 }
4912 
4913 static struct cam_eb *
xpt_find_bus(path_id_t path_id)4914 xpt_find_bus(path_id_t path_id)
4915 {
4916 	struct cam_eb *bus;
4917 
4918 	xpt_lock_buses();
4919 	for (bus = TAILQ_FIRST(&xsoftc.xpt_busses);
4920 	     bus != NULL;
4921 	     bus = TAILQ_NEXT(bus, links)) {
4922 		if (bus->path_id == path_id) {
4923 			bus->refcount++;
4924 			break;
4925 		}
4926 	}
4927 	xpt_unlock_buses();
4928 	return (bus);
4929 }
4930 
4931 static struct cam_et *
xpt_find_target(struct cam_eb * bus,target_id_t target_id)4932 xpt_find_target(struct cam_eb *bus, target_id_t	target_id)
4933 {
4934 	struct cam_et *target;
4935 
4936 	mtx_assert(&bus->eb_mtx, MA_OWNED);
4937 	for (target = TAILQ_FIRST(&bus->et_entries);
4938 	     target != NULL;
4939 	     target = TAILQ_NEXT(target, links)) {
4940 		if (target->target_id == target_id) {
4941 			target->refcount++;
4942 			break;
4943 		}
4944 	}
4945 	return (target);
4946 }
4947 
4948 static struct cam_ed *
xpt_find_device(struct cam_et * target,lun_id_t lun_id)4949 xpt_find_device(struct cam_et *target, lun_id_t lun_id)
4950 {
4951 	struct cam_ed *device;
4952 
4953 	mtx_assert(&target->bus->eb_mtx, MA_OWNED);
4954 	for (device = TAILQ_FIRST(&target->ed_entries);
4955 	     device != NULL;
4956 	     device = TAILQ_NEXT(device, links)) {
4957 		if (device->lun_id == lun_id) {
4958 			device->refcount++;
4959 			break;
4960 		}
4961 	}
4962 	return (device);
4963 }
4964 
4965 void
xpt_start_tags(struct cam_path * path)4966 xpt_start_tags(struct cam_path *path)
4967 {
4968 	struct ccb_relsim crs;
4969 	struct cam_ed *device;
4970 	struct cam_sim *sim;
4971 	int    newopenings;
4972 
4973 	device = path->device;
4974 	sim = path->bus->sim;
4975 	device->flags &= ~CAM_DEV_TAG_AFTER_COUNT;
4976 	xpt_freeze_devq(path, /*count*/1);
4977 	device->inq_flags |= SID_CmdQue;
4978 	if (device->tag_saved_openings != 0)
4979 		newopenings = device->tag_saved_openings;
4980 	else
4981 		newopenings = min(device->maxtags,
4982 				  sim->max_tagged_dev_openings);
4983 	xpt_dev_ccbq_resize(path, newopenings);
4984 	xpt_async(AC_GETDEV_CHANGED, path, NULL);
4985 	memset(&crs, 0, sizeof(crs));
4986 	xpt_setup_ccb(&crs.ccb_h, path, CAM_PRIORITY_NORMAL);
4987 	crs.ccb_h.func_code = XPT_REL_SIMQ;
4988 	crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY;
4989 	crs.openings
4990 	    = crs.release_timeout
4991 	    = crs.qfrozen_cnt
4992 	    = 0;
4993 	xpt_action((union ccb *)&crs);
4994 }
4995 
4996 void
xpt_stop_tags(struct cam_path * path)4997 xpt_stop_tags(struct cam_path *path)
4998 {
4999 	struct ccb_relsim crs;
5000 	struct cam_ed *device;
5001 	struct cam_sim *sim;
5002 
5003 	device = path->device;
5004 	sim = path->bus->sim;
5005 	device->flags &= ~CAM_DEV_TAG_AFTER_COUNT;
5006 	device->tag_delay_count = 0;
5007 	xpt_freeze_devq(path, /*count*/1);
5008 	device->inq_flags &= ~SID_CmdQue;
5009 	xpt_dev_ccbq_resize(path, sim->max_dev_openings);
5010 	xpt_async(AC_GETDEV_CHANGED, path, NULL);
5011 	memset(&crs, 0, sizeof(crs));
5012 	xpt_setup_ccb(&crs.ccb_h, path, CAM_PRIORITY_NORMAL);
5013 	crs.ccb_h.func_code = XPT_REL_SIMQ;
5014 	crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY;
5015 	crs.openings
5016 	    = crs.release_timeout
5017 	    = crs.qfrozen_cnt
5018 	    = 0;
5019 	xpt_action((union ccb *)&crs);
5020 }
5021 
5022 /*
5023  * Assume all possible buses are detected by this time, so allow boot
5024  * as soon as they all are scanned.
5025  */
5026 static void
xpt_boot_delay(void * arg)5027 xpt_boot_delay(void *arg)
5028 {
5029 
5030 	xpt_release_boot();
5031 }
5032 
5033 /*
5034  * Now that all config hooks have completed, start boot_delay timer,
5035  * waiting for possibly still undetected buses (USB) to appear.
5036  */
5037 static void
xpt_ch_done(void * arg)5038 xpt_ch_done(void *arg)
5039 {
5040 
5041 	callout_init(&xsoftc.boot_callout, 1);
5042 	callout_reset_sbt(&xsoftc.boot_callout, SBT_1MS * xsoftc.boot_delay,
5043 	    SBT_1MS, xpt_boot_delay, NULL, 0);
5044 }
5045 SYSINIT(xpt_hw_delay, SI_SUB_INT_CONFIG_HOOKS, SI_ORDER_ANY, xpt_ch_done, NULL);
5046 
5047 /*
5048  * Now that interrupts are enabled, go find our devices
5049  */
5050 static void
xpt_config(void * arg)5051 xpt_config(void *arg)
5052 {
5053 	if (taskqueue_start_threads(&xsoftc.xpt_taskq, 1, PRIBIO, "CAM taskq"))
5054 		printf("xpt_config: failed to create taskqueue thread.\n");
5055 
5056 	/* Setup debugging path */
5057 	if (cam_dflags != CAM_DEBUG_NONE) {
5058 		if (xpt_create_path(&cam_dpath, NULL,
5059 				    CAM_DEBUG_BUS, CAM_DEBUG_TARGET,
5060 				    CAM_DEBUG_LUN) != CAM_REQ_CMP) {
5061 			printf(
5062 "xpt_config: xpt_create_path() failed for debug target %d:%d:%d, debugging disabled\n",
5063 			    CAM_DEBUG_BUS, CAM_DEBUG_TARGET, CAM_DEBUG_LUN);
5064 			cam_dflags = CAM_DEBUG_NONE;
5065 		}
5066 	} else
5067 		cam_dpath = NULL;
5068 
5069 	periphdriver_init(1);
5070 	xpt_hold_boot();
5071 
5072 	/* Fire up rescan thread. */
5073 	if (kproc_kthread_add(xpt_scanner_thread, NULL, &cam_proc, NULL, 0, 0,
5074 	    "cam", "scanner")) {
5075 		printf("xpt_config: failed to create rescan thread.\n");
5076 	}
5077 }
5078 
5079 void
xpt_hold_boot_locked(void)5080 xpt_hold_boot_locked(void)
5081 {
5082 
5083 	if (xsoftc.buses_to_config++ == 0)
5084 		root_mount_hold_token("CAM", &xsoftc.xpt_rootmount);
5085 }
5086 
5087 void
xpt_hold_boot(void)5088 xpt_hold_boot(void)
5089 {
5090 
5091 	xpt_lock_buses();
5092 	xpt_hold_boot_locked();
5093 	xpt_unlock_buses();
5094 }
5095 
5096 void
xpt_release_boot(void)5097 xpt_release_boot(void)
5098 {
5099 
5100 	xpt_lock_buses();
5101 	if (--xsoftc.buses_to_config == 0) {
5102 		if (xsoftc.buses_config_done == 0) {
5103 			xsoftc.buses_config_done = 1;
5104 			xsoftc.buses_to_config++;
5105 			TASK_INIT(&xsoftc.boot_task, 0, xpt_finishconfig_task,
5106 			    NULL);
5107 			taskqueue_enqueue(taskqueue_thread, &xsoftc.boot_task);
5108 		} else
5109 			root_mount_rel(&xsoftc.xpt_rootmount);
5110 	}
5111 	xpt_unlock_buses();
5112 }
5113 
5114 /*
5115  * If the given device only has one peripheral attached to it, and if that
5116  * peripheral is the passthrough driver, announce it.  This insures that the
5117  * user sees some sort of announcement for every peripheral in their system.
5118  */
5119 static int
xptpassannouncefunc(struct cam_ed * device,void * arg)5120 xptpassannouncefunc(struct cam_ed *device, void *arg)
5121 {
5122 	struct cam_periph *periph;
5123 	int i;
5124 
5125 	for (periph = SLIST_FIRST(&device->periphs), i = 0; periph != NULL;
5126 	     periph = SLIST_NEXT(periph, periph_links), i++);
5127 
5128 	periph = SLIST_FIRST(&device->periphs);
5129 	if ((i == 1)
5130 	 && (strncmp(periph->periph_name, "pass", 4) == 0))
5131 		xpt_announce_periph(periph, NULL);
5132 
5133 	return(1);
5134 }
5135 
5136 static void
xpt_finishconfig_task(void * context,int pending)5137 xpt_finishconfig_task(void *context, int pending)
5138 {
5139 
5140 	periphdriver_init(2);
5141 	/*
5142 	 * Check for devices with no "standard" peripheral driver
5143 	 * attached.  For any devices like that, announce the
5144 	 * passthrough driver so the user will see something.
5145 	 */
5146 	if (!bootverbose)
5147 		xpt_for_all_devices(xptpassannouncefunc, NULL);
5148 
5149 	xpt_release_boot();
5150 }
5151 
5152 cam_status
xpt_register_async(int event,ac_callback_t * cbfunc,void * cbarg,struct cam_path * path)5153 xpt_register_async(int event, ac_callback_t *cbfunc, void *cbarg,
5154 		   struct cam_path *path)
5155 {
5156 	struct ccb_setasync csa;
5157 	cam_status status;
5158 	bool xptpath = false;
5159 
5160 	if (path == NULL) {
5161 		status = xpt_create_path(&path, /*periph*/NULL, CAM_XPT_PATH_ID,
5162 					 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
5163 		if (status != CAM_REQ_CMP)
5164 			return (status);
5165 		xpt_path_lock(path);
5166 		xptpath = true;
5167 	}
5168 
5169 	memset(&csa, 0, sizeof(csa));
5170 	xpt_setup_ccb(&csa.ccb_h, path, CAM_PRIORITY_NORMAL);
5171 	csa.ccb_h.func_code = XPT_SASYNC_CB;
5172 	csa.event_enable = event;
5173 	csa.callback = cbfunc;
5174 	csa.callback_arg = cbarg;
5175 	xpt_action((union ccb *)&csa);
5176 	status = csa.ccb_h.status;
5177 
5178 	CAM_DEBUG(csa.ccb_h.path, CAM_DEBUG_TRACE,
5179 	    ("xpt_register_async: func %p\n", cbfunc));
5180 
5181 	if (xptpath) {
5182 		xpt_path_unlock(path);
5183 		xpt_free_path(path);
5184 	}
5185 
5186 	if ((status == CAM_REQ_CMP) &&
5187 	    (csa.event_enable & AC_FOUND_DEVICE)) {
5188 		/*
5189 		 * Get this peripheral up to date with all
5190 		 * the currently existing devices.
5191 		 */
5192 		xpt_for_all_devices(xptsetasyncfunc, &csa);
5193 	}
5194 	if ((status == CAM_REQ_CMP) &&
5195 	    (csa.event_enable & AC_PATH_REGISTERED)) {
5196 		/*
5197 		 * Get this peripheral up to date with all
5198 		 * the currently existing buses.
5199 		 */
5200 		xpt_for_all_busses(xptsetasyncbusfunc, &csa);
5201 	}
5202 
5203 	return (status);
5204 }
5205 
5206 static void
xptaction(struct cam_sim * sim,union ccb * work_ccb)5207 xptaction(struct cam_sim *sim, union ccb *work_ccb)
5208 {
5209 	CAM_DEBUG(work_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xptaction\n"));
5210 
5211 	switch (work_ccb->ccb_h.func_code) {
5212 	/* Common cases first */
5213 	case XPT_PATH_INQ:		/* Path routing inquiry */
5214 	{
5215 		struct ccb_pathinq *cpi;
5216 
5217 		cpi = &work_ccb->cpi;
5218 		cpi->version_num = 1; /* XXX??? */
5219 		cpi->hba_inquiry = 0;
5220 		cpi->target_sprt = 0;
5221 		cpi->hba_misc = 0;
5222 		cpi->hba_eng_cnt = 0;
5223 		cpi->max_target = 0;
5224 		cpi->max_lun = 0;
5225 		cpi->initiator_id = 0;
5226 		strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
5227 		strlcpy(cpi->hba_vid, "", HBA_IDLEN);
5228 		strlcpy(cpi->dev_name, sim->sim_name, DEV_IDLEN);
5229 		cpi->unit_number = sim->unit_number;
5230 		cpi->bus_id = sim->bus_id;
5231 		cpi->base_transfer_speed = 0;
5232 		cpi->protocol = PROTO_UNSPECIFIED;
5233 		cpi->protocol_version = PROTO_VERSION_UNSPECIFIED;
5234 		cpi->transport = XPORT_UNSPECIFIED;
5235 		cpi->transport_version = XPORT_VERSION_UNSPECIFIED;
5236 		cpi->ccb_h.status = CAM_REQ_CMP;
5237 		break;
5238 	}
5239 	default:
5240 		work_ccb->ccb_h.status = CAM_REQ_INVALID;
5241 		break;
5242 	}
5243 	xpt_done(work_ccb);
5244 }
5245 
5246 /*
5247  * The xpt as a "controller" has no interrupt sources, so polling
5248  * is a no-op.
5249  */
5250 static void
xptpoll(struct cam_sim * sim)5251 xptpoll(struct cam_sim *sim)
5252 {
5253 }
5254 
5255 void
xpt_lock_buses(void)5256 xpt_lock_buses(void)
5257 {
5258 	mtx_lock(&xsoftc.xpt_topo_lock);
5259 }
5260 
5261 void
xpt_unlock_buses(void)5262 xpt_unlock_buses(void)
5263 {
5264 	mtx_unlock(&xsoftc.xpt_topo_lock);
5265 }
5266 
5267 struct mtx *
xpt_path_mtx(struct cam_path * path)5268 xpt_path_mtx(struct cam_path *path)
5269 {
5270 
5271 	return (&path->device->device_mtx);
5272 }
5273 
5274 static void
xpt_done_process(struct ccb_hdr * ccb_h)5275 xpt_done_process(struct ccb_hdr *ccb_h)
5276 {
5277 	struct cam_sim *sim = NULL;
5278 	struct cam_devq *devq = NULL;
5279 	struct mtx *mtx = NULL;
5280 
5281 #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING)
5282 	struct ccb_scsiio *csio;
5283 
5284 	if (ccb_h->func_code == XPT_SCSI_IO) {
5285 		csio = &((union ccb *)ccb_h)->csio;
5286 		if (csio->bio != NULL)
5287 			biotrack(csio->bio, __func__);
5288 	}
5289 #endif
5290 
5291 	if (ccb_h->flags & CAM_HIGH_POWER) {
5292 		struct highpowerlist	*hphead;
5293 		struct cam_ed		*device;
5294 
5295 		mtx_lock(&xsoftc.xpt_highpower_lock);
5296 		hphead = &xsoftc.highpowerq;
5297 
5298 		device = STAILQ_FIRST(hphead);
5299 
5300 		/*
5301 		 * Increment the count since this command is done.
5302 		 */
5303 		xsoftc.num_highpower++;
5304 
5305 		/*
5306 		 * Any high powered commands queued up?
5307 		 */
5308 		if (device != NULL) {
5309 			STAILQ_REMOVE_HEAD(hphead, highpowerq_entry);
5310 			mtx_unlock(&xsoftc.xpt_highpower_lock);
5311 
5312 			mtx_lock(&device->sim->devq->send_mtx);
5313 			xpt_release_devq_device(device,
5314 					 /*count*/1, /*runqueue*/TRUE);
5315 			mtx_unlock(&device->sim->devq->send_mtx);
5316 		} else
5317 			mtx_unlock(&xsoftc.xpt_highpower_lock);
5318 	}
5319 
5320 	/*
5321 	 * Insulate against a race where the periph is destroyed but CCBs are
5322 	 * still not all processed. This shouldn't happen, but allows us better
5323 	 * bug diagnostic when it does.
5324 	 */
5325 	if (ccb_h->path->bus)
5326 		sim = ccb_h->path->bus->sim;
5327 
5328 	if (ccb_h->status & CAM_RELEASE_SIMQ) {
5329 		KASSERT(sim, ("sim missing for CAM_RELEASE_SIMQ request"));
5330 		xpt_release_simq(sim, /*run_queue*/FALSE);
5331 		ccb_h->status &= ~CAM_RELEASE_SIMQ;
5332 	}
5333 
5334 	if ((ccb_h->flags & CAM_DEV_QFRZDIS)
5335 	 && (ccb_h->status & CAM_DEV_QFRZN)) {
5336 		xpt_release_devq(ccb_h->path, /*count*/1, /*run_queue*/TRUE);
5337 		ccb_h->status &= ~CAM_DEV_QFRZN;
5338 	}
5339 
5340 	if ((ccb_h->func_code & XPT_FC_USER_CCB) == 0) {
5341 		struct cam_ed *dev = ccb_h->path->device;
5342 
5343 		if (sim)
5344 			devq = sim->devq;
5345 		KASSERT(devq, ("Periph disappeared with CCB %p %s request pending.",
5346 			ccb_h, xpt_action_name(ccb_h->func_code)));
5347 
5348 		mtx_lock(&devq->send_mtx);
5349 		devq->send_active--;
5350 		devq->send_openings++;
5351 		cam_ccbq_ccb_done(&dev->ccbq, (union ccb *)ccb_h);
5352 
5353 		if (((dev->flags & CAM_DEV_REL_ON_QUEUE_EMPTY) != 0
5354 		  && (dev->ccbq.dev_active == 0))) {
5355 			dev->flags &= ~CAM_DEV_REL_ON_QUEUE_EMPTY;
5356 			xpt_release_devq_device(dev, /*count*/1,
5357 					 /*run_queue*/FALSE);
5358 		}
5359 
5360 		if (((dev->flags & CAM_DEV_REL_ON_COMPLETE) != 0
5361 		  && (ccb_h->status&CAM_STATUS_MASK) != CAM_REQUEUE_REQ)) {
5362 			dev->flags &= ~CAM_DEV_REL_ON_COMPLETE;
5363 			xpt_release_devq_device(dev, /*count*/1,
5364 					 /*run_queue*/FALSE);
5365 		}
5366 
5367 		if (!device_is_queued(dev))
5368 			(void)xpt_schedule_devq(devq, dev);
5369 		xpt_run_devq(devq);
5370 		mtx_unlock(&devq->send_mtx);
5371 
5372 		if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0) {
5373 			mtx = xpt_path_mtx(ccb_h->path);
5374 			mtx_lock(mtx);
5375 
5376 			if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
5377 			 && (--dev->tag_delay_count == 0))
5378 				xpt_start_tags(ccb_h->path);
5379 		}
5380 	}
5381 
5382 	if ((ccb_h->flags & CAM_UNLOCKED) == 0) {
5383 		if (mtx == NULL) {
5384 			mtx = xpt_path_mtx(ccb_h->path);
5385 			mtx_lock(mtx);
5386 		}
5387 	} else {
5388 		if (mtx != NULL) {
5389 			mtx_unlock(mtx);
5390 			mtx = NULL;
5391 		}
5392 	}
5393 
5394 	/*
5395 	 * Call as late as possible. Do we want an early one too before the
5396 	 * unfreeze / releases above?
5397 	 */
5398 	CAM_PROBE1(xpt, done, (union ccb *)ccb_h);	/* container_of? */
5399 	/* Call the peripheral driver's callback */
5400 	ccb_h->pinfo.index = CAM_UNQUEUED_INDEX;
5401 	(*ccb_h->cbfcnp)(ccb_h->path->periph, (union ccb *)ccb_h);
5402 	if (mtx != NULL)
5403 		mtx_unlock(mtx);
5404 }
5405 
5406 /*
5407  * Parameterize instead and use xpt_done_td?
5408  */
5409 static void
xpt_async_td(void * arg)5410 xpt_async_td(void *arg)
5411 {
5412 	struct cam_doneq *queue = arg;
5413 	struct ccb_hdr *ccb_h;
5414 	STAILQ_HEAD(, ccb_hdr)	doneq;
5415 
5416 	STAILQ_INIT(&doneq);
5417 	mtx_lock(&queue->cam_doneq_mtx);
5418 	while (1) {
5419 		while (STAILQ_EMPTY(&queue->cam_doneq))
5420 			msleep(&queue->cam_doneq, &queue->cam_doneq_mtx,
5421 			    PRIBIO, "-", 0);
5422 		STAILQ_CONCAT(&doneq, &queue->cam_doneq);
5423 		mtx_unlock(&queue->cam_doneq_mtx);
5424 
5425 		while ((ccb_h = STAILQ_FIRST(&doneq)) != NULL) {
5426 			STAILQ_REMOVE_HEAD(&doneq, sim_links.stqe);
5427 			xpt_done_process(ccb_h);
5428 		}
5429 
5430 		mtx_lock(&queue->cam_doneq_mtx);
5431 	}
5432 }
5433 
5434 void
xpt_done_td(void * arg)5435 xpt_done_td(void *arg)
5436 {
5437 	struct cam_doneq *queue = arg;
5438 	struct ccb_hdr *ccb_h;
5439 	STAILQ_HEAD(, ccb_hdr)	doneq;
5440 
5441 	STAILQ_INIT(&doneq);
5442 	mtx_lock(&queue->cam_doneq_mtx);
5443 	while (1) {
5444 		while (STAILQ_EMPTY(&queue->cam_doneq)) {
5445 			queue->cam_doneq_sleep = 1;
5446 			msleep(&queue->cam_doneq, &queue->cam_doneq_mtx,
5447 			    PRIBIO, "-", 0);
5448 			queue->cam_doneq_sleep = 0;
5449 		}
5450 		STAILQ_CONCAT(&doneq, &queue->cam_doneq);
5451 		mtx_unlock(&queue->cam_doneq_mtx);
5452 
5453 		THREAD_NO_SLEEPING();
5454 		while ((ccb_h = STAILQ_FIRST(&doneq)) != NULL) {
5455 			STAILQ_REMOVE_HEAD(&doneq, sim_links.stqe);
5456 			xpt_done_process(ccb_h);
5457 		}
5458 		THREAD_SLEEPING_OK();
5459 
5460 		mtx_lock(&queue->cam_doneq_mtx);
5461 	}
5462 }
5463 
5464 static void
camisr_runqueue(void)5465 camisr_runqueue(void)
5466 {
5467 	struct	ccb_hdr *ccb_h;
5468 	struct cam_doneq *queue;
5469 	int i;
5470 
5471 	/* Process global queues. */
5472 	for (i = 0; i < cam_num_doneqs; i++) {
5473 		queue = &cam_doneqs[i];
5474 		mtx_lock(&queue->cam_doneq_mtx);
5475 		while ((ccb_h = STAILQ_FIRST(&queue->cam_doneq)) != NULL) {
5476 			STAILQ_REMOVE_HEAD(&queue->cam_doneq, sim_links.stqe);
5477 			mtx_unlock(&queue->cam_doneq_mtx);
5478 			xpt_done_process(ccb_h);
5479 			mtx_lock(&queue->cam_doneq_mtx);
5480 		}
5481 		mtx_unlock(&queue->cam_doneq_mtx);
5482 	}
5483 }
5484 
5485 /**
5486  * @brief Return the device_t associated with the path
5487  *
5488  * When a SIM is created, it registers a bus with a NEWBUS device_t. This is
5489  * stored in the internal cam_eb bus structure. There is no guarnatee any given
5490  * path will have a @c device_t associated with it (it's legal to call @c
5491  * xpt_bus_register with a @c NULL @c device_t.
5492  *
5493  * @param path		Path to return the device_t for.
5494  */
5495 device_t
xpt_path_sim_device(const struct cam_path * path)5496 xpt_path_sim_device(const struct cam_path *path)
5497 {
5498 	return (path->bus->parent_dev);
5499 }
5500 
5501 struct kv
5502 {
5503 	uint32_t v;
5504 	const char *name;
5505 };
5506 
5507 static struct kv map[] = {
5508 	{ XPT_NOOP, "XPT_NOOP" },
5509 	{ XPT_SCSI_IO, "XPT_SCSI_IO" },
5510 	{ XPT_GDEV_TYPE, "XPT_GDEV_TYPE" },
5511 	{ XPT_GDEVLIST, "XPT_GDEVLIST" },
5512 	{ XPT_PATH_INQ, "XPT_PATH_INQ" },
5513 	{ XPT_REL_SIMQ, "XPT_REL_SIMQ" },
5514 	{ XPT_SASYNC_CB, "XPT_SASYNC_CB" },
5515 	{ XPT_SDEV_TYPE, "XPT_SDEV_TYPE" },
5516 	{ XPT_SCAN_BUS, "XPT_SCAN_BUS" },
5517 	{ XPT_DEV_MATCH, "XPT_DEV_MATCH" },
5518 	{ XPT_DEBUG, "XPT_DEBUG" },
5519 	{ XPT_PATH_STATS, "XPT_PATH_STATS" },
5520 	{ XPT_GDEV_STATS, "XPT_GDEV_STATS" },
5521 	{ XPT_DEV_ADVINFO, "XPT_DEV_ADVINFO" },
5522 	{ XPT_ASYNC, "XPT_ASYNC" },
5523 	{ XPT_ABORT, "XPT_ABORT" },
5524 	{ XPT_RESET_BUS, "XPT_RESET_BUS" },
5525 	{ XPT_RESET_DEV, "XPT_RESET_DEV" },
5526 	{ XPT_TERM_IO, "XPT_TERM_IO" },
5527 	{ XPT_SCAN_LUN, "XPT_SCAN_LUN" },
5528 	{ XPT_GET_TRAN_SETTINGS, "XPT_GET_TRAN_SETTINGS" },
5529 	{ XPT_SET_TRAN_SETTINGS, "XPT_SET_TRAN_SETTINGS" },
5530 	{ XPT_CALC_GEOMETRY, "XPT_CALC_GEOMETRY" },
5531 	{ XPT_ATA_IO, "XPT_ATA_IO" },
5532 	{ XPT_GET_SIM_KNOB, "XPT_GET_SIM_KNOB" },
5533 	{ XPT_SET_SIM_KNOB, "XPT_SET_SIM_KNOB" },
5534 	{ XPT_NVME_IO, "XPT_NVME_IO" },
5535 	{ XPT_MMC_IO, "XPT_MMC_IO" },
5536 	{ XPT_SMP_IO, "XPT_SMP_IO" },
5537 	{ XPT_SCAN_TGT, "XPT_SCAN_TGT" },
5538 	{ XPT_NVME_ADMIN, "XPT_NVME_ADMIN" },
5539 	{ XPT_ENG_INQ, "XPT_ENG_INQ" },
5540 	{ XPT_ENG_EXEC, "XPT_ENG_EXEC" },
5541 	{ XPT_EN_LUN, "XPT_EN_LUN" },
5542 	{ XPT_TARGET_IO, "XPT_TARGET_IO" },
5543 	{ XPT_ACCEPT_TARGET_IO, "XPT_ACCEPT_TARGET_IO" },
5544 	{ XPT_CONT_TARGET_IO, "XPT_CONT_TARGET_IO" },
5545 	{ XPT_IMMED_NOTIFY, "XPT_IMMED_NOTIFY" },
5546 	{ XPT_NOTIFY_ACK, "XPT_NOTIFY_ACK" },
5547 	{ XPT_IMMEDIATE_NOTIFY, "XPT_IMMEDIATE_NOTIFY" },
5548 	{ XPT_NOTIFY_ACKNOWLEDGE, "XPT_NOTIFY_ACKNOWLEDGE" },
5549 	{ 0, 0 }
5550 };
5551 
5552 const char *
xpt_action_name(uint32_t action)5553 xpt_action_name(uint32_t action)
5554 {
5555 	static char buffer[32];	/* Only for unknown messages -- racy */
5556 	struct kv *walker = map;
5557 
5558 	while (walker->name != NULL) {
5559 		if (walker->v == action)
5560 			return (walker->name);
5561 		walker++;
5562 	}
5563 
5564 	snprintf(buffer, sizeof(buffer), "%#x", action);
5565 	return (buffer);
5566 }
5567 
5568 void
xpt_cam_path_debug(struct cam_path * path,const char * fmt,...)5569 xpt_cam_path_debug(struct cam_path *path, const char *fmt, ...)
5570 {
5571 	struct sbuf sbuf;
5572 	char buf[XPT_PRINT_LEN]; /* balance to not eat too much stack */
5573 	struct sbuf *sb = sbuf_new(&sbuf, buf, sizeof(buf), SBUF_FIXEDLEN | SBUF_INCLUDENUL);
5574 	va_list ap;
5575 
5576 	sbuf_set_drain(sb, sbuf_printf_drain, NULL);
5577 	xpt_path_sbuf(path, sb);
5578 	va_start(ap, fmt);
5579 	sbuf_vprintf(sb, fmt, ap);
5580 	va_end(ap);
5581 	sbuf_finish(sb);
5582 	sbuf_delete(sb);
5583 	if (cam_debug_delay != 0)
5584 		DELAY(cam_debug_delay);
5585 }
5586 
5587 void
xpt_cam_dev_debug(struct cam_ed * dev,const char * fmt,...)5588 xpt_cam_dev_debug(struct cam_ed *dev, const char *fmt, ...)
5589 {
5590 	struct sbuf sbuf;
5591 	char buf[XPT_PRINT_LEN]; /* balance to not eat too much stack */
5592 	struct sbuf *sb = sbuf_new(&sbuf, buf, sizeof(buf), SBUF_FIXEDLEN | SBUF_INCLUDENUL);
5593 	va_list ap;
5594 
5595 	sbuf_set_drain(sb, sbuf_printf_drain, NULL);
5596 	xpt_device_sbuf(dev, sb);
5597 	va_start(ap, fmt);
5598 	sbuf_vprintf(sb, fmt, ap);
5599 	va_end(ap);
5600 	sbuf_finish(sb);
5601 	sbuf_delete(sb);
5602 	if (cam_debug_delay != 0)
5603 		DELAY(cam_debug_delay);
5604 }
5605 
5606 void
xpt_cam_debug(const char * fmt,...)5607 xpt_cam_debug(const char *fmt, ...)
5608 {
5609 	struct sbuf sbuf;
5610 	char buf[XPT_PRINT_LEN]; /* balance to not eat too much stack */
5611 	struct sbuf *sb = sbuf_new(&sbuf, buf, sizeof(buf), SBUF_FIXEDLEN | SBUF_INCLUDENUL);
5612 	va_list ap;
5613 
5614 	sbuf_set_drain(sb, sbuf_printf_drain, NULL);
5615 	sbuf_cat(sb, "cam_debug: ");
5616 	va_start(ap, fmt);
5617 	sbuf_vprintf(sb, fmt, ap);
5618 	va_end(ap);
5619 	sbuf_finish(sb);
5620 	sbuf_delete(sb);
5621 	if (cam_debug_delay != 0)
5622 		DELAY(cam_debug_delay);
5623 }
5624