xref: /freebsd/sys/cam/cam_periph.c (revision ee41f1b1cf5e3d4f586cb85b46123b416275862c)
1 /*
2  * Common functions for CAM "type" (peripheral) drivers.
3  *
4  * Copyright (c) 1997, 1998 Justin T. Gibbs.
5  * Copyright (c) 1997, 1998, 1999, 2000 Kenneth D. Merry.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions, and the following disclaimer,
13  *    without modification, immediately at the beginning of the file.
14  * 2. The name of the author may not be used to endorse or promote products
15  *    derived from this software without specific prior written permission.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
21  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  *
29  * $FreeBSD$
30  */
31 
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/types.h>
35 #include <sys/malloc.h>
36 #include <sys/linker_set.h>
37 #include <sys/bio.h>
38 #include <sys/buf.h>
39 #include <sys/proc.h>
40 #include <sys/devicestat.h>
41 #include <sys/bus.h>
42 #include <vm/vm.h>
43 #include <vm/vm_extern.h>
44 
45 #include <cam/cam.h>
46 #include <cam/cam_ccb.h>
47 #include <cam/cam_xpt_periph.h>
48 #include <cam/cam_periph.h>
49 #include <cam/cam_debug.h>
50 
51 #include <cam/scsi/scsi_all.h>
52 #include <cam/scsi/scsi_message.h>
53 #include <cam/scsi/scsi_pass.h>
54 
55 static	u_int		camperiphnextunit(struct periph_driver *p_drv,
56 					  u_int newunit, int wired,
57 					  path_id_t pathid, target_id_t target,
58 					  lun_id_t lun);
59 static	u_int		camperiphunit(struct periph_driver *p_drv,
60 				      path_id_t pathid, target_id_t target,
61 				      lun_id_t lun);
62 static	void		camperiphdone(struct cam_periph *periph,
63 					union ccb *done_ccb);
64 static  void		camperiphfree(struct cam_periph *periph);
65 
66 static int nperiph_drivers;
67 struct periph_driver **periph_drivers;
68 
69 void
70 periphdriver_register(void *data)
71 {
72 	struct periph_driver **newdrivers, **old;
73 	int ndrivers;
74 
75 	ndrivers = nperiph_drivers + 2;
76 	newdrivers = malloc(sizeof(*newdrivers) * ndrivers, M_TEMP, M_WAITOK);
77 	if (periph_drivers)
78 		bcopy(periph_drivers, newdrivers,
79 		      sizeof(*newdrivers) * ndrivers);
80 	newdrivers[nperiph_drivers] = (struct periph_driver *)data;
81 	newdrivers[nperiph_drivers + 1] = NULL;
82 	old = periph_drivers;
83 	periph_drivers = newdrivers;
84 	if (old)
85 		free(old, M_TEMP);
86 	nperiph_drivers++;
87 }
88 
89 cam_status
90 cam_periph_alloc(periph_ctor_t *periph_ctor,
91 		 periph_oninv_t *periph_oninvalidate,
92 		 periph_dtor_t *periph_dtor, periph_start_t *periph_start,
93 		 char *name, cam_periph_type type, struct cam_path *path,
94 		 ac_callback_t *ac_callback, ac_code code, void *arg)
95 {
96 	struct		periph_driver **p_drv;
97 	struct		cam_periph *periph;
98 	struct		cam_periph *cur_periph;
99 	path_id_t	path_id;
100 	target_id_t	target_id;
101 	lun_id_t	lun_id;
102 	cam_status	status;
103 	u_int		init_level;
104 	int s;
105 
106 	init_level = 0;
107 	/*
108 	 * Handle Hot-Plug scenarios.  If there is already a peripheral
109 	 * of our type assigned to this path, we are likely waiting for
110 	 * final close on an old, invalidated, peripheral.  If this is
111 	 * the case, queue up a deferred call to the peripheral's async
112 	 * handler.  If it looks like a mistaken re-alloation, complain.
113 	 */
114 	if ((periph = cam_periph_find(path, name)) != NULL) {
115 
116 		if ((periph->flags & CAM_PERIPH_INVALID) != 0
117 		 && (periph->flags & CAM_PERIPH_NEW_DEV_FOUND) == 0) {
118 			periph->flags |= CAM_PERIPH_NEW_DEV_FOUND;
119 			periph->deferred_callback = ac_callback;
120 			periph->deferred_ac = code;
121 			return (CAM_REQ_INPROG);
122 		} else {
123 			printf("cam_periph_alloc: attempt to re-allocate "
124 			       "valid device %s%d rejected\n",
125 			       periph->periph_name, periph->unit_number);
126 		}
127 		return (CAM_REQ_INVALID);
128 	}
129 
130 	periph = (struct cam_periph *)malloc(sizeof(*periph), M_DEVBUF,
131 					     M_NOWAIT);
132 
133 	if (periph == NULL)
134 		return (CAM_RESRC_UNAVAIL);
135 
136 	init_level++;
137 
138 	for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) {
139 		if (strcmp((*p_drv)->driver_name, name) == 0)
140 			break;
141 	}
142 
143 	path_id = xpt_path_path_id(path);
144 	target_id = xpt_path_target_id(path);
145 	lun_id = xpt_path_lun_id(path);
146 	bzero(periph, sizeof(*periph));
147 	cam_init_pinfo(&periph->pinfo);
148 	periph->periph_start = periph_start;
149 	periph->periph_dtor = periph_dtor;
150 	periph->periph_oninval = periph_oninvalidate;
151 	periph->type = type;
152 	periph->periph_name = name;
153 	periph->unit_number = camperiphunit(*p_drv, path_id, target_id, lun_id);
154 	periph->immediate_priority = CAM_PRIORITY_NONE;
155 	periph->refcount = 0;
156 	SLIST_INIT(&periph->ccb_list);
157 	status = xpt_create_path(&path, periph, path_id, target_id, lun_id);
158 	if (status != CAM_REQ_CMP)
159 		goto failure;
160 
161 	periph->path = path;
162 	init_level++;
163 
164 	status = xpt_add_periph(periph);
165 
166 	if (status != CAM_REQ_CMP)
167 		goto failure;
168 
169 	s = splsoftcam();
170 	cur_periph = TAILQ_FIRST(&(*p_drv)->units);
171 	while (cur_periph != NULL
172 	    && cur_periph->unit_number < periph->unit_number)
173 		cur_periph = TAILQ_NEXT(cur_periph, unit_links);
174 
175 	if (cur_periph != NULL)
176 		TAILQ_INSERT_BEFORE(cur_periph, periph, unit_links);
177 	else {
178 		TAILQ_INSERT_TAIL(&(*p_drv)->units, periph, unit_links);
179 		(*p_drv)->generation++;
180 	}
181 
182 	splx(s);
183 
184 	init_level++;
185 
186 	status = periph_ctor(periph, arg);
187 
188 	if (status == CAM_REQ_CMP)
189 		init_level++;
190 
191 failure:
192 	switch (init_level) {
193 	case 4:
194 		/* Initialized successfully */
195 		break;
196 	case 3:
197 		s = splsoftcam();
198 		TAILQ_REMOVE(&(*p_drv)->units, periph, unit_links);
199 		splx(s);
200 		xpt_remove_periph(periph);
201 	case 2:
202 		xpt_free_path(periph->path);
203 	case 1:
204 		free(periph, M_DEVBUF);
205 	case 0:
206 		/* No cleanup to perform. */
207 		break;
208 	default:
209 		panic("cam_periph_alloc: Unkown init level");
210 	}
211 	return(status);
212 }
213 
214 /*
215  * Find a peripheral structure with the specified path, target, lun,
216  * and (optionally) type.  If the name is NULL, this function will return
217  * the first peripheral driver that matches the specified path.
218  */
219 struct cam_periph *
220 cam_periph_find(struct cam_path *path, char *name)
221 {
222 	struct periph_driver **p_drv;
223 	struct cam_periph *periph;
224 	int s;
225 
226 	for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) {
227 
228 		if (name != NULL && (strcmp((*p_drv)->driver_name, name) != 0))
229 			continue;
230 
231 		s = splsoftcam();
232 		TAILQ_FOREACH(periph, &(*p_drv)->units, unit_links) {
233 			if (xpt_path_comp(periph->path, path) == 0) {
234 				splx(s);
235 				return(periph);
236 			}
237 		}
238 		splx(s);
239 		if (name != NULL)
240 			return(NULL);
241 	}
242 	return(NULL);
243 }
244 
245 cam_status
246 cam_periph_acquire(struct cam_periph *periph)
247 {
248 	int s;
249 
250 	if (periph == NULL)
251 		return(CAM_REQ_CMP_ERR);
252 
253 	s = splsoftcam();
254 	periph->refcount++;
255 	splx(s);
256 
257 	return(CAM_REQ_CMP);
258 }
259 
260 void
261 cam_periph_release(struct cam_periph *periph)
262 {
263 	int s;
264 
265 	if (periph == NULL)
266 		return;
267 
268 	s = splsoftcam();
269 	if ((--periph->refcount == 0)
270 	 && (periph->flags & CAM_PERIPH_INVALID)) {
271 		camperiphfree(periph);
272 	}
273 	splx(s);
274 
275 }
276 
277 /*
278  * Look for the next unit number that is not currently in use for this
279  * peripheral type starting at "newunit".  Also exclude unit numbers that
280  * are reserved by for future "hardwiring" unless we already know that this
281  * is a potential wired device.  Only assume that the device is "wired" the
282  * first time through the loop since after that we'll be looking at unit
283  * numbers that did not match a wiring entry.
284  */
285 static u_int
286 camperiphnextunit(struct periph_driver *p_drv, u_int newunit, int wired,
287 		  path_id_t pathid, target_id_t target, lun_id_t lun)
288 {
289 	struct	cam_periph *periph;
290 	char	*periph_name, *strval;
291 	int	s;
292 	int	i, val, dunit;
293 	const char *dname;
294 
295 	s = splsoftcam();
296 	periph_name = p_drv->driver_name;
297 	for (;;newunit++) {
298 
299 		for (periph = TAILQ_FIRST(&p_drv->units);
300 		     periph != NULL && periph->unit_number != newunit;
301 		     periph = TAILQ_NEXT(periph, unit_links))
302 			;
303 
304 		if (periph != NULL && periph->unit_number == newunit) {
305 			if (wired != 0) {
306 				xpt_print_path(periph->path);
307 				printf("Duplicate Wired Device entry!\n");
308 				xpt_print_path(periph->path);
309 				printf("Second device (%s device at scbus%d "
310 				       "target %d lun %d) will not be wired\n",
311 				       periph_name, pathid, target, lun);
312 				wired = 0;
313 			}
314 			continue;
315 		}
316 		if (wired)
317 			break;
318 
319 		/*
320 		 * Don't match entries like "da 4" as a wired down
321 		 * device, but do match entries like "da 4 target 5"
322 		 * or even "da 4 scbus 1".
323 		 */
324 		i = -1;
325 		while ((i = resource_locate(i, periph_name)) != -1) {
326 			dname = resource_query_name(i);
327 			dunit = resource_query_unit(i);
328 			/* if no "target" and no specific scbus, skip */
329 			if (resource_int_value(dname, dunit, "target", &val) &&
330 			    (resource_string_value(dname, dunit, "at",&strval)||
331 			     strcmp(strval, "scbus") == 0))
332 				continue;
333 			if (newunit == dunit)
334 				break;
335 		}
336 		if (i == -1)
337 			break;
338 	}
339 	splx(s);
340 	return (newunit);
341 }
342 
343 static u_int
344 camperiphunit(struct periph_driver *p_drv, path_id_t pathid,
345 	      target_id_t target, lun_id_t lun)
346 {
347 	u_int	unit;
348 	int	hit, i, val, dunit;
349 	const char *dname;
350 	char	pathbuf[32], *strval, *periph_name;
351 
352 	unit = 0;
353 	hit = 0;
354 
355 	periph_name = p_drv->driver_name;
356 	snprintf(pathbuf, sizeof(pathbuf), "scbus%d", pathid);
357 	i = -1;
358 	while ((i = resource_locate(i, periph_name)) != -1) {
359 		dname = resource_query_name(i);
360 		dunit = resource_query_unit(i);
361 		if (resource_string_value(dname, dunit, "at", &strval) == 0) {
362 			if (strcmp(strval, pathbuf) != 0)
363 				continue;
364 			hit++;
365 		}
366 		if (resource_int_value(dname, dunit, "target", &val) == 0) {
367 			if (val != target)
368 				continue;
369 			hit++;
370 		}
371 		if (resource_int_value(dname, dunit, "lun", &val) == 0) {
372 			if (val != lun)
373 				continue;
374 			hit++;
375 		}
376 		if (hit != 0) {
377 			unit = dunit;
378 			break;
379 		}
380 	}
381 
382 	/*
383 	 * Either start from 0 looking for the next unit or from
384 	 * the unit number given in the resource config.  This way,
385 	 * if we have wildcard matches, we don't return the same
386 	 * unit number twice.
387 	 */
388 	unit = camperiphnextunit(p_drv, unit, /*wired*/hit, pathid,
389 				 target, lun);
390 
391 	return (unit);
392 }
393 
394 void
395 cam_periph_invalidate(struct cam_periph *periph)
396 {
397 	int s;
398 
399 	s = splsoftcam();
400 	/*
401 	 * We only call this routine the first time a peripheral is
402 	 * invalidated.  The oninvalidate() routine is always called at
403 	 * splsoftcam().
404 	 */
405 	if (((periph->flags & CAM_PERIPH_INVALID) == 0)
406 	 && (periph->periph_oninval != NULL))
407 		periph->periph_oninval(periph);
408 
409 	periph->flags |= CAM_PERIPH_INVALID;
410 	periph->flags &= ~CAM_PERIPH_NEW_DEV_FOUND;
411 
412 	if (periph->refcount == 0)
413 		camperiphfree(periph);
414 	else if (periph->refcount < 0)
415 		printf("cam_invalidate_periph: refcount < 0!!\n");
416 	splx(s);
417 }
418 
419 static void
420 camperiphfree(struct cam_periph *periph)
421 {
422 	int s;
423 	struct periph_driver **p_drv;
424 
425 	for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) {
426 		if (strcmp((*p_drv)->driver_name, periph->periph_name) == 0)
427 			break;
428 	}
429 
430 	if (periph->periph_dtor != NULL)
431 		periph->periph_dtor(periph);
432 
433 	s = splsoftcam();
434 	TAILQ_REMOVE(&(*p_drv)->units, periph, unit_links);
435 	(*p_drv)->generation++;
436 	splx(s);
437 
438 	xpt_remove_periph(periph);
439 
440 	if (periph->flags & CAM_PERIPH_NEW_DEV_FOUND) {
441 		union ccb ccb;
442 		void *arg;
443 
444 		switch (periph->deferred_ac) {
445 		case AC_FOUND_DEVICE:
446 			ccb.ccb_h.func_code = XPT_GDEV_TYPE;
447 			xpt_setup_ccb(&ccb.ccb_h, periph->path, /*priority*/ 1);
448 			xpt_action(&ccb);
449 			arg = &ccb;
450 			break;
451 		case AC_PATH_REGISTERED:
452 			ccb.ccb_h.func_code = XPT_PATH_INQ;
453 			xpt_setup_ccb(&ccb.ccb_h, periph->path, /*priority*/ 1);
454 			xpt_action(&ccb);
455 			arg = &ccb;
456 			break;
457 		default:
458 			arg = NULL;
459 			break;
460 		}
461 		periph->deferred_callback(NULL, periph->deferred_ac,
462 					  periph->path, arg);
463 	}
464 	xpt_free_path(periph->path);
465 	free(periph, M_DEVBUF);
466 }
467 
468 /*
469  * Wait interruptibly for an exclusive lock.
470  */
471 int
472 cam_periph_lock(struct cam_periph *periph, int priority)
473 {
474 	int error;
475 
476 	while ((periph->flags & CAM_PERIPH_LOCKED) != 0) {
477 		periph->flags |= CAM_PERIPH_LOCK_WANTED;
478 		if ((error = tsleep(periph, priority, "caplck", 0)) != 0)
479 			return error;
480 	}
481 
482 	if (cam_periph_acquire(periph) != CAM_REQ_CMP)
483 		return(ENXIO);
484 
485 	periph->flags |= CAM_PERIPH_LOCKED;
486 	return 0;
487 }
488 
489 /*
490  * Unlock and wake up any waiters.
491  */
492 void
493 cam_periph_unlock(struct cam_periph *periph)
494 {
495 	periph->flags &= ~CAM_PERIPH_LOCKED;
496 	if ((periph->flags & CAM_PERIPH_LOCK_WANTED) != 0) {
497 		periph->flags &= ~CAM_PERIPH_LOCK_WANTED;
498 		wakeup(periph);
499 	}
500 
501 	cam_periph_release(periph);
502 }
503 
504 /*
505  * Map user virtual pointers into kernel virtual address space, so we can
506  * access the memory.  This won't work on physical pointers, for now it's
507  * up to the caller to check for that.  (XXX KDM -- should we do that here
508  * instead?)  This also only works for up to MAXPHYS memory.  Since we use
509  * buffers to map stuff in and out, we're limited to the buffer size.
510  */
511 int
512 cam_periph_mapmem(union ccb *ccb, struct cam_periph_map_info *mapinfo)
513 {
514 	int numbufs, i;
515 	int flags[CAM_PERIPH_MAXMAPS];
516 	u_int8_t **data_ptrs[CAM_PERIPH_MAXMAPS];
517 	u_int32_t lengths[CAM_PERIPH_MAXMAPS];
518 	u_int32_t dirs[CAM_PERIPH_MAXMAPS];
519 
520 	switch(ccb->ccb_h.func_code) {
521 	case XPT_DEV_MATCH:
522 		if (ccb->cdm.match_buf_len == 0) {
523 			printf("cam_periph_mapmem: invalid match buffer "
524 			       "length 0\n");
525 			return(EINVAL);
526 		}
527 		if (ccb->cdm.pattern_buf_len > 0) {
528 			data_ptrs[0] = (u_int8_t **)&ccb->cdm.patterns;
529 			lengths[0] = ccb->cdm.pattern_buf_len;
530 			dirs[0] = CAM_DIR_OUT;
531 			data_ptrs[1] = (u_int8_t **)&ccb->cdm.matches;
532 			lengths[1] = ccb->cdm.match_buf_len;
533 			dirs[1] = CAM_DIR_IN;
534 			numbufs = 2;
535 		} else {
536 			data_ptrs[0] = (u_int8_t **)&ccb->cdm.matches;
537 			lengths[0] = ccb->cdm.match_buf_len;
538 			dirs[0] = CAM_DIR_IN;
539 			numbufs = 1;
540 		}
541 		break;
542 	case XPT_SCSI_IO:
543 	case XPT_CONT_TARGET_IO:
544 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE)
545 			return(0);
546 
547 		data_ptrs[0] = &ccb->csio.data_ptr;
548 		lengths[0] = ccb->csio.dxfer_len;
549 		dirs[0] = ccb->ccb_h.flags & CAM_DIR_MASK;
550 		numbufs = 1;
551 		break;
552 	default:
553 		return(EINVAL);
554 		break; /* NOTREACHED */
555 	}
556 
557 	/*
558 	 * Check the transfer length and permissions first, so we don't
559 	 * have to unmap any previously mapped buffers.
560 	 */
561 	for (i = 0; i < numbufs; i++) {
562 
563 		flags[i] = 0;
564 
565 		/*
566 		 * The userland data pointer passed in may not be page
567 		 * aligned.  vmapbuf() truncates the address to a page
568 		 * boundary, so if the address isn't page aligned, we'll
569 		 * need enough space for the given transfer length, plus
570 		 * whatever extra space is necessary to make it to the page
571 		 * boundary.
572 		 */
573 		if ((lengths[i] +
574 		    (((vm_offset_t)(*data_ptrs[i])) & PAGE_MASK)) > DFLTPHYS){
575 			printf("cam_periph_mapmem: attempt to map %lu bytes, "
576 			       "which is greater than DFLTPHYS(%d)\n",
577 			       (long)(lengths[i] +
578 			       (((vm_offset_t)(*data_ptrs[i])) & PAGE_MASK)),
579 			       DFLTPHYS);
580 			return(E2BIG);
581 		}
582 
583 		if (dirs[i] & CAM_DIR_OUT) {
584 			flags[i] = BIO_WRITE;
585 			if (!useracc(*data_ptrs[i], lengths[i],
586 				     VM_PROT_READ)) {
587 				printf("cam_periph_mapmem: error, "
588 					"address %p, length %lu isn't "
589 					"user accessible for READ\n",
590 					(void *)*data_ptrs[i],
591 					(u_long)lengths[i]);
592 				return(EACCES);
593 			}
594 		}
595 
596 		if (dirs[i] & CAM_DIR_IN) {
597 			flags[i] = BIO_READ;
598 			if (!useracc(*data_ptrs[i], lengths[i],
599 				     VM_PROT_WRITE)) {
600 				printf("cam_periph_mapmem: error, "
601 					"address %p, length %lu isn't "
602 					"user accessible for WRITE\n",
603 					(void *)*data_ptrs[i],
604 					(u_long)lengths[i]);
605 
606 				return(EACCES);
607 			}
608 		}
609 
610 	}
611 
612 	/* this keeps the current process from getting swapped */
613 	/*
614 	 * XXX KDM should I use P_NOSWAP instead?
615 	 */
616 	PHOLD(curproc);
617 
618 	for (i = 0; i < numbufs; i++) {
619 		/*
620 		 * Get the buffer.
621 		 */
622 		mapinfo->bp[i] = getpbuf(NULL);
623 
624 		/* save the buffer's data address */
625 		mapinfo->bp[i]->b_saveaddr = mapinfo->bp[i]->b_data;
626 
627 		/* put our pointer in the data slot */
628 		mapinfo->bp[i]->b_data = *data_ptrs[i];
629 
630 		/* set the transfer length, we know it's < DFLTPHYS */
631 		mapinfo->bp[i]->b_bufsize = lengths[i];
632 
633 		/* set the flags */
634 		mapinfo->bp[i]->b_flags = B_PHYS;
635 
636 		/* set the direction */
637 		mapinfo->bp[i]->b_iocmd = flags[i];
638 
639 		/* map the buffer into kernel memory */
640 		vmapbuf(mapinfo->bp[i]);
641 
642 		/* set our pointer to the new mapped area */
643 		*data_ptrs[i] = mapinfo->bp[i]->b_data;
644 
645 		mapinfo->num_bufs_used++;
646 	}
647 
648 	return(0);
649 }
650 
651 /*
652  * Unmap memory segments mapped into kernel virtual address space by
653  * cam_periph_mapmem().
654  */
655 void
656 cam_periph_unmapmem(union ccb *ccb, struct cam_periph_map_info *mapinfo)
657 {
658 	int numbufs, i;
659 	u_int8_t **data_ptrs[CAM_PERIPH_MAXMAPS];
660 
661 	if (mapinfo->num_bufs_used <= 0) {
662 		/* allow ourselves to be swapped once again */
663 		PRELE(curproc);
664 		return;
665 	}
666 
667 	switch (ccb->ccb_h.func_code) {
668 	case XPT_DEV_MATCH:
669 		numbufs = min(mapinfo->num_bufs_used, 2);
670 
671 		if (numbufs == 1) {
672 			data_ptrs[0] = (u_int8_t **)&ccb->cdm.matches;
673 		} else {
674 			data_ptrs[0] = (u_int8_t **)&ccb->cdm.patterns;
675 			data_ptrs[1] = (u_int8_t **)&ccb->cdm.matches;
676 		}
677 		break;
678 	case XPT_SCSI_IO:
679 	case XPT_CONT_TARGET_IO:
680 		data_ptrs[0] = &ccb->csio.data_ptr;
681 		numbufs = min(mapinfo->num_bufs_used, 1);
682 		break;
683 	default:
684 		/* allow ourselves to be swapped once again */
685 		PRELE(curproc);
686 		return;
687 		break; /* NOTREACHED */
688 	}
689 
690 	for (i = 0; i < numbufs; i++) {
691 		/* Set the user's pointer back to the original value */
692 		*data_ptrs[i] = mapinfo->bp[i]->b_saveaddr;
693 
694 		/* unmap the buffer */
695 		vunmapbuf(mapinfo->bp[i]);
696 
697 		/* clear the flags we set above */
698 		mapinfo->bp[i]->b_flags &= ~B_PHYS;
699 
700 		/* release the buffer */
701 		relpbuf(mapinfo->bp[i], NULL);
702 	}
703 
704 	/* allow ourselves to be swapped once again */
705 	PRELE(curproc);
706 }
707 
708 union ccb *
709 cam_periph_getccb(struct cam_periph *periph, u_int32_t priority)
710 {
711 	struct ccb_hdr *ccb_h;
712 	int s;
713 
714 	CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("entering cdgetccb\n"));
715 
716 	s = splsoftcam();
717 
718 	while (SLIST_FIRST(&periph->ccb_list) == NULL) {
719 		if (periph->immediate_priority > priority)
720 			periph->immediate_priority = priority;
721 		xpt_schedule(periph, priority);
722 		if ((SLIST_FIRST(&periph->ccb_list) != NULL)
723 		 && (SLIST_FIRST(&periph->ccb_list)->pinfo.priority == priority))
724 			break;
725 		tsleep(&periph->ccb_list, PRIBIO, "cgticb", 0);
726 	}
727 
728 	ccb_h = SLIST_FIRST(&periph->ccb_list);
729 	SLIST_REMOVE_HEAD(&periph->ccb_list, periph_links.sle);
730 	splx(s);
731 	return ((union ccb *)ccb_h);
732 }
733 
734 void
735 cam_periph_ccbwait(union ccb *ccb)
736 {
737 	int s;
738 
739 	s = splsoftcam();
740 	if ((ccb->ccb_h.pinfo.index != CAM_UNQUEUED_INDEX)
741 	 || ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG))
742 		tsleep(&ccb->ccb_h.cbfcnp, PRIBIO, "cbwait", 0);
743 
744 	splx(s);
745 }
746 
747 int
748 cam_periph_ioctl(struct cam_periph *periph, int cmd, caddr_t addr,
749 		 int (*error_routine)(union ccb *ccb,
750 				      cam_flags camflags,
751 				      u_int32_t sense_flags))
752 {
753 	union ccb 	     *ccb;
754 	int 		     error;
755 	int		     found;
756 
757 	error = found = 0;
758 
759 	switch(cmd){
760 	case CAMGETPASSTHRU:
761 		ccb = cam_periph_getccb(periph, /* priority */ 1);
762 		xpt_setup_ccb(&ccb->ccb_h,
763 			      ccb->ccb_h.path,
764 			      /*priority*/1);
765 		ccb->ccb_h.func_code = XPT_GDEVLIST;
766 
767 		/*
768 		 * Basically, the point of this is that we go through
769 		 * getting the list of devices, until we find a passthrough
770 		 * device.  In the current version of the CAM code, the
771 		 * only way to determine what type of device we're dealing
772 		 * with is by its name.
773 		 */
774 		while (found == 0) {
775 			ccb->cgdl.index = 0;
776 			ccb->cgdl.status = CAM_GDEVLIST_MORE_DEVS;
777 			while (ccb->cgdl.status == CAM_GDEVLIST_MORE_DEVS) {
778 
779 				/* we want the next device in the list */
780 				xpt_action(ccb);
781 				if (strncmp(ccb->cgdl.periph_name,
782 				    "pass", 4) == 0){
783 					found = 1;
784 					break;
785 				}
786 			}
787 			if ((ccb->cgdl.status == CAM_GDEVLIST_LAST_DEVICE) &&
788 			    (found == 0)) {
789 				ccb->cgdl.periph_name[0] = '\0';
790 				ccb->cgdl.unit_number = 0;
791 				break;
792 			}
793 		}
794 
795 		/* copy the result back out */
796 		bcopy(ccb, addr, sizeof(union ccb));
797 
798 		/* and release the ccb */
799 		xpt_release_ccb(ccb);
800 
801 		break;
802 	default:
803 		error = ENOTTY;
804 		break;
805 	}
806 	return(error);
807 }
808 
809 int
810 cam_periph_runccb(union ccb *ccb,
811 		  int (*error_routine)(union ccb *ccb,
812 				       cam_flags camflags,
813 				       u_int32_t sense_flags),
814 		  cam_flags camflags, u_int32_t sense_flags,
815 		  struct devstat *ds)
816 {
817 	int error;
818 
819 	error = 0;
820 
821 	/*
822 	 * If the user has supplied a stats structure, and if we understand
823 	 * this particular type of ccb, record the transaction start.
824 	 */
825 	if ((ds != NULL) && (ccb->ccb_h.func_code == XPT_SCSI_IO))
826 		devstat_start_transaction(ds);
827 
828 	xpt_action(ccb);
829 
830 	do {
831 		cam_periph_ccbwait(ccb);
832 		if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP)
833 			error = 0;
834 		else if (error_routine != NULL)
835 			error = (*error_routine)(ccb, camflags, sense_flags);
836 		else
837 			error = 0;
838 
839 	} while (error == ERESTART);
840 
841 	if ((ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
842 		cam_release_devq(ccb->ccb_h.path,
843 				 /* relsim_flags */0,
844 				 /* openings */0,
845 				 /* timeout */0,
846 				 /* getcount_only */ FALSE);
847 
848 	if ((ds != NULL) && (ccb->ccb_h.func_code == XPT_SCSI_IO))
849 		devstat_end_transaction(ds,
850 					ccb->csio.dxfer_len,
851 					ccb->csio.tag_action & 0xf,
852 					((ccb->ccb_h.flags & CAM_DIR_MASK) ==
853 					CAM_DIR_NONE) ?  DEVSTAT_NO_DATA :
854 					(ccb->ccb_h.flags & CAM_DIR_OUT) ?
855 					DEVSTAT_WRITE :
856 					DEVSTAT_READ);
857 
858 	return(error);
859 }
860 
861 void
862 cam_freeze_devq(struct cam_path *path)
863 {
864 	struct ccb_hdr ccb_h;
865 
866 	xpt_setup_ccb(&ccb_h, path, /*priority*/1);
867 	ccb_h.func_code = XPT_NOOP;
868 	ccb_h.flags = CAM_DEV_QFREEZE;
869 	xpt_action((union ccb *)&ccb_h);
870 }
871 
872 u_int32_t
873 cam_release_devq(struct cam_path *path, u_int32_t relsim_flags,
874 		 u_int32_t openings, u_int32_t timeout,
875 		 int getcount_only)
876 {
877 	struct ccb_relsim crs;
878 
879 	xpt_setup_ccb(&crs.ccb_h, path,
880 		      /*priority*/1);
881 	crs.ccb_h.func_code = XPT_REL_SIMQ;
882 	crs.ccb_h.flags = getcount_only ? CAM_DEV_QFREEZE : 0;
883 	crs.release_flags = relsim_flags;
884 	crs.openings = openings;
885 	crs.release_timeout = timeout;
886 	xpt_action((union ccb *)&crs);
887 	return (crs.qfrozen_cnt);
888 }
889 
890 #define saved_ccb_ptr ppriv_ptr0
891 static void
892 camperiphdone(struct cam_periph *periph, union ccb *done_ccb)
893 {
894 	cam_status	status;
895 	int		frozen;
896 	int		sense;
897 	struct scsi_start_stop_unit *scsi_cmd;
898 	u_int32_t	relsim_flags, timeout;
899 	u_int32_t	qfrozen_cnt;
900 
901 	status = done_ccb->ccb_h.status;
902 	frozen = (status & CAM_DEV_QFRZN) != 0;
903 	sense  = (status & CAM_AUTOSNS_VALID) != 0;
904 	status &= CAM_STATUS_MASK;
905 
906 	timeout = 0;
907 	relsim_flags = 0;
908 
909 	/*
910 	 * Unfreeze the queue once if it is already frozen..
911 	 */
912 	if (frozen != 0) {
913 		qfrozen_cnt = cam_release_devq(done_ccb->ccb_h.path,
914 					      /*relsim_flags*/0,
915 					      /*openings*/0,
916 					      /*timeout*/0,
917 					      /*getcount_only*/0);
918 	}
919 
920 	switch (status) {
921 
922 	case CAM_REQ_CMP:
923 
924 		/*
925 		 * If we have successfully taken a device from the not
926 		 * ready to ready state, re-scan the device and re-get the
927 		 * inquiry information.  Many devices (mostly disks) don't
928 		 * properly report their inquiry information unless they
929 		 * are spun up.
930 		 */
931 		if (done_ccb->ccb_h.func_code == XPT_SCSI_IO) {
932 			scsi_cmd = (struct scsi_start_stop_unit *)
933 					&done_ccb->csio.cdb_io.cdb_bytes;
934 
935 		 	if (scsi_cmd->opcode == START_STOP_UNIT)
936 				xpt_async(AC_INQ_CHANGED,
937 					  done_ccb->ccb_h.path, NULL);
938 		}
939 		bcopy(done_ccb->ccb_h.saved_ccb_ptr, done_ccb,
940 		      sizeof(union ccb));
941 
942 		periph->flags &= ~CAM_PERIPH_RECOVERY_INPROG;
943 
944 		xpt_action(done_ccb);
945 
946 		break;
947 	case CAM_SCSI_STATUS_ERROR:
948 		scsi_cmd = (struct scsi_start_stop_unit *)
949 				&done_ccb->csio.cdb_io.cdb_bytes;
950 		if (sense != 0) {
951 			struct scsi_sense_data *sense;
952 			int    error_code, sense_key, asc, ascq;
953 
954 			sense = &done_ccb->csio.sense_data;
955 			scsi_extract_sense(sense, &error_code,
956 					   &sense_key, &asc, &ascq);
957 
958 			/*
959 	 		 * If the error is "invalid field in CDB",
960 			 * and the load/eject flag is set, turn the
961 			 * flag off and try again.  This is just in
962 			 * case the drive in question barfs on the
963 			 * load eject flag.  The CAM code should set
964 			 * the load/eject flag by default for
965 			 * removable media.
966 			 */
967 
968 			/* XXX KDM
969 			 * Should we check to see what the specific
970 			 * scsi status is??  Or does it not matter
971 			 * since we already know that there was an
972 			 * error, and we know what the specific
973 			 * error code was, and we know what the
974 			 * opcode is..
975 			 */
976 			if ((scsi_cmd->opcode == START_STOP_UNIT) &&
977 			    ((scsi_cmd->how & SSS_LOEJ) != 0) &&
978 			     (asc == 0x24) && (ascq == 0x00) &&
979 			     (done_ccb->ccb_h.retry_count > 0)) {
980 
981 				scsi_cmd->how &= ~SSS_LOEJ;
982 
983 				xpt_action(done_ccb);
984 
985 			} else if (done_ccb->ccb_h.retry_count > 0) {
986 				/*
987 				 * In this case, the error recovery
988 				 * command failed, but we've got
989 				 * some retries left on it.  Give
990 				 * it another try.
991 				 */
992 
993 				/* set the timeout to .5 sec */
994 				relsim_flags =
995 					RELSIM_RELEASE_AFTER_TIMEOUT;
996 				timeout = 500;
997 
998 				xpt_action(done_ccb);
999 
1000 				break;
1001 
1002 			} else {
1003 				/*
1004 				 * Copy the original CCB back and
1005 				 * send it back to the caller.
1006 				 */
1007 				bcopy(done_ccb->ccb_h.saved_ccb_ptr,
1008 				      done_ccb, sizeof(union ccb));
1009 
1010 				periph->flags &= ~CAM_PERIPH_RECOVERY_INPROG;
1011 
1012 				xpt_action(done_ccb);
1013 			}
1014 		} else {
1015 			/*
1016 			 * Eh??  The command failed, but we don't
1017 			 * have any sense.  What's up with that?
1018 			 * Fire the CCB again to return it to the
1019 			 * caller.
1020 			 */
1021 			bcopy(done_ccb->ccb_h.saved_ccb_ptr,
1022 			      done_ccb, sizeof(union ccb));
1023 
1024 			periph->flags &= ~CAM_PERIPH_RECOVERY_INPROG;
1025 
1026 			xpt_action(done_ccb);
1027 
1028 		}
1029 		break;
1030 	default:
1031 		bcopy(done_ccb->ccb_h.saved_ccb_ptr, done_ccb,
1032 		      sizeof(union ccb));
1033 
1034 		periph->flags &= ~CAM_PERIPH_RECOVERY_INPROG;
1035 
1036 		xpt_action(done_ccb);
1037 
1038 		break;
1039 	}
1040 
1041 	/* decrement the retry count */
1042 	if (done_ccb->ccb_h.retry_count > 0)
1043 		done_ccb->ccb_h.retry_count--;
1044 
1045 	qfrozen_cnt = cam_release_devq(done_ccb->ccb_h.path,
1046 				      /*relsim_flags*/relsim_flags,
1047 				      /*openings*/0,
1048 				      /*timeout*/timeout,
1049 				      /*getcount_only*/0);
1050 }
1051 
1052 /*
1053  * Generic Async Event handler.  Peripheral drivers usually
1054  * filter out the events that require personal attention,
1055  * and leave the rest to this function.
1056  */
1057 void
1058 cam_periph_async(struct cam_periph *periph, u_int32_t code,
1059 		 struct cam_path *path, void *arg)
1060 {
1061 	switch (code) {
1062 	case AC_LOST_DEVICE:
1063 		cam_periph_invalidate(periph);
1064 		break;
1065 	case AC_SENT_BDR:
1066 	case AC_BUS_RESET:
1067 	{
1068 		cam_periph_bus_settle(periph, SCSI_DELAY);
1069 		break;
1070 	}
1071 	default:
1072 		break;
1073 	}
1074 }
1075 
1076 void
1077 cam_periph_bus_settle(struct cam_periph *periph, u_int bus_settle)
1078 {
1079 	struct ccb_getdevstats cgds;
1080 
1081 	xpt_setup_ccb(&cgds.ccb_h, periph->path, /*priority*/1);
1082 	cgds.ccb_h.func_code = XPT_GDEV_STATS;
1083 	xpt_action((union ccb *)&cgds);
1084 	cam_periph_freeze_after_event(periph, &cgds.last_reset, bus_settle);
1085 }
1086 
1087 void
1088 cam_periph_freeze_after_event(struct cam_periph *periph,
1089 			      struct timeval* event_time, u_int duration_ms)
1090 {
1091 	struct timeval delta;
1092 	struct timeval duration_tv;
1093 	int s;
1094 
1095 	s = splclock();
1096 	microtime(&delta);
1097 	splx(s);
1098 	timevalsub(&delta, event_time);
1099 	duration_tv.tv_sec = duration_ms / 1000;
1100 	duration_tv.tv_usec = (duration_ms % 1000) * 1000;
1101 	if (timevalcmp(&delta, &duration_tv, <)) {
1102 		timevalsub(&duration_tv, &delta);
1103 
1104 		duration_ms = duration_tv.tv_sec * 1000;
1105 		duration_ms += duration_tv.tv_usec / 1000;
1106 		cam_freeze_devq(periph->path);
1107 		cam_release_devq(periph->path,
1108 				RELSIM_RELEASE_AFTER_TIMEOUT,
1109 				/*reduction*/0,
1110 				/*timeout*/duration_ms,
1111 				/*getcount_only*/0);
1112 	}
1113 
1114 }
1115 
1116 /*
1117  * Generic error handler.  Peripheral drivers usually filter
1118  * out the errors that they handle in a unique mannor, then
1119  * call this function.
1120  */
1121 int
1122 cam_periph_error(union ccb *ccb, cam_flags camflags,
1123 		 u_int32_t sense_flags, union ccb *save_ccb)
1124 {
1125 	cam_status status;
1126 	int	   frozen;
1127 	int	   sense;
1128 	int	   error;
1129 	int        openings;
1130 	int	   retry;
1131 	u_int32_t  relsim_flags;
1132 	u_int32_t  timeout;
1133 
1134 	status = ccb->ccb_h.status;
1135 	frozen = (status & CAM_DEV_QFRZN) != 0;
1136 	sense  = (status & CAM_AUTOSNS_VALID) != 0;
1137 	status &= CAM_STATUS_MASK;
1138 	relsim_flags = 0;
1139 
1140 	switch (status) {
1141 	case CAM_REQ_CMP:
1142 		/* decrement the number of retries */
1143 		retry = ccb->ccb_h.retry_count > 0;
1144 		if (retry)
1145 			ccb->ccb_h.retry_count--;
1146 		error = 0;
1147 		break;
1148 	case CAM_AUTOSENSE_FAIL:
1149 	case CAM_SCSI_STATUS_ERROR:
1150 
1151 		switch (ccb->csio.scsi_status) {
1152 		case SCSI_STATUS_OK:
1153 		case SCSI_STATUS_COND_MET:
1154 		case SCSI_STATUS_INTERMED:
1155 		case SCSI_STATUS_INTERMED_COND_MET:
1156 			error = 0;
1157 			break;
1158 		case SCSI_STATUS_CMD_TERMINATED:
1159 		case SCSI_STATUS_CHECK_COND:
1160 			if (sense != 0) {
1161 				struct scsi_sense_data *sense;
1162 				int    error_code, sense_key, asc, ascq;
1163 				struct cam_periph *periph;
1164 				scsi_sense_action err_action;
1165 				struct ccb_getdev cgd;
1166 
1167 				sense = &ccb->csio.sense_data;
1168 				scsi_extract_sense(sense, &error_code,
1169 						   &sense_key, &asc, &ascq);
1170 				periph = xpt_path_periph(ccb->ccb_h.path);
1171 
1172 				/*
1173 				 * Grab the inquiry data for this device.
1174 				 */
1175 				xpt_setup_ccb(&cgd.ccb_h, ccb->ccb_h.path,
1176 					      /*priority*/ 1);
1177 				cgd.ccb_h.func_code = XPT_GDEV_TYPE;
1178 				xpt_action((union ccb *)&cgd);
1179 
1180 				err_action = scsi_error_action(asc, ascq,
1181 							       &cgd.inq_data);
1182 
1183 				/*
1184 				 * Send a Test Unit Ready to the device.
1185 				 * If the 'many' flag is set, we send 120
1186 				 * test unit ready commands, one every half
1187 				 * second.  Otherwise, we just send one TUR.
1188 				 * We only want to do this if the retry
1189 				 * count has not been exhausted.
1190 				 */
1191 				if (((err_action & SS_MASK) == SS_TUR)
1192 				 && save_ccb != NULL
1193 				 && ccb->ccb_h.retry_count > 0) {
1194 
1195 					/*
1196 					 * Since error recovery is already
1197 					 * in progress, don't attempt to
1198 					 * process this error.  It is probably
1199 					 * related to the error that caused
1200 					 * the currently active error recovery
1201 					 * action.  Also, we only have
1202 					 * space for one saved CCB, so if we
1203 					 * had two concurrent error recovery
1204 					 * actions, we would end up
1205 					 * over-writing one error recovery
1206 					 * CCB with another one.
1207 					 */
1208 					if (periph->flags &
1209 					    CAM_PERIPH_RECOVERY_INPROG) {
1210 						error = ERESTART;
1211 						break;
1212 					}
1213 
1214 					periph->flags |=
1215 						CAM_PERIPH_RECOVERY_INPROG;
1216 
1217 					/* decrement the number of retries */
1218 					if ((err_action &
1219 					     SSQ_DECREMENT_COUNT) != 0) {
1220 						retry = 1;
1221 						ccb->ccb_h.retry_count--;
1222 					}
1223 
1224 					bcopy(ccb, save_ccb, sizeof(*save_ccb));
1225 
1226 					/*
1227 					 * We retry this one every half
1228 					 * second for a minute.  If the
1229 					 * device hasn't become ready in a
1230 					 * minute's time, it's unlikely to
1231 					 * ever become ready.  If the table
1232 					 * doesn't specify SSQ_MANY, we can
1233 					 * only try this once.  Oh well.
1234 					 */
1235 					if ((err_action & SSQ_MANY) != 0)
1236 						scsi_test_unit_ready(&ccb->csio,
1237 							       /*retries*/120,
1238 							       camperiphdone,
1239 						 	       MSG_SIMPLE_Q_TAG,
1240 							       SSD_FULL_SIZE,
1241 							       /*timeout*/5000);
1242 					else
1243 						scsi_test_unit_ready(&ccb->csio,
1244 							       /*retries*/1,
1245 							       camperiphdone,
1246 						 	       MSG_SIMPLE_Q_TAG,
1247 							       SSD_FULL_SIZE,
1248 							       /*timeout*/5000);
1249 
1250 					/* release the queue after .5 sec.  */
1251 					relsim_flags =
1252 						RELSIM_RELEASE_AFTER_TIMEOUT;
1253 					timeout = 500;
1254 					/*
1255 					 * Drop the priority to 0 so that
1256 					 * we are the first to execute.  Also
1257 					 * freeze the queue after this command
1258 					 * is sent so that we can restore the
1259 					 * old csio and have it queued in the
1260 					 * proper order before we let normal
1261 					 * transactions go to the drive.
1262 					 */
1263 					ccb->ccb_h.pinfo.priority = 0;
1264 					ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
1265 
1266 					/*
1267 					 * Save a pointer to the original
1268 					 * CCB in the new CCB.
1269 					 */
1270 					ccb->ccb_h.saved_ccb_ptr = save_ccb;
1271 
1272 					error = ERESTART;
1273 				}
1274 				/*
1275 				 * Send a start unit command to the device,
1276 				 * and then retry the command.  We only
1277 				 * want to do this if the retry count has
1278 				 * not been exhausted.  If the user
1279 				 * specified 0 retries, then we follow
1280 				 * their request and do not retry.
1281 				 */
1282 				else if (((err_action & SS_MASK) == SS_START)
1283 				      && save_ccb != NULL
1284 				      && ccb->ccb_h.retry_count > 0) {
1285 					int le;
1286 
1287 					/*
1288 					 * Only one error recovery action
1289 					 * at a time.  See above.
1290 					 */
1291 					if (periph->flags &
1292 					    CAM_PERIPH_RECOVERY_INPROG) {
1293 						error = ERESTART;
1294 						break;
1295 					}
1296 
1297 					periph->flags |=
1298 						CAM_PERIPH_RECOVERY_INPROG;
1299 
1300 					/* decrement the number of retries */
1301 					retry = 1;
1302 					ccb->ccb_h.retry_count--;
1303 
1304 					/*
1305 					 * Check for removable media and
1306 					 * set load/eject flag
1307 					 * appropriately.
1308 					 */
1309 					if (SID_IS_REMOVABLE(&cgd.inq_data))
1310 						le = TRUE;
1311 					else
1312 						le = FALSE;
1313 
1314 					/*
1315 					 * Attempt to start the drive up.
1316 					 *
1317 					 * Save the current ccb so it can
1318 					 * be restored and retried once the
1319 					 * drive is started up.
1320 					 */
1321 					bcopy(ccb, save_ccb, sizeof(*save_ccb));
1322 
1323 					scsi_start_stop(&ccb->csio,
1324 							/*retries*/1,
1325 							camperiphdone,
1326 							MSG_SIMPLE_Q_TAG,
1327 							/*start*/TRUE,
1328 							/*load/eject*/le,
1329 							/*immediate*/FALSE,
1330 							SSD_FULL_SIZE,
1331 							/*timeout*/50000);
1332 					/*
1333 					 * Drop the priority to 0 so that
1334 					 * we are the first to execute.  Also
1335 					 * freeze the queue after this command
1336 					 * is sent so that we can restore the
1337 					 * old csio and have it queued in the
1338 					 * proper order before we let normal
1339 					 * transactions go to the drive.
1340 					 */
1341 					ccb->ccb_h.pinfo.priority = 0;
1342 					ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
1343 
1344 					/*
1345 					 * Save a pointer to the original
1346 					 * CCB in the new CCB.
1347 					 */
1348 					ccb->ccb_h.saved_ccb_ptr = save_ccb;
1349 
1350 					error = ERESTART;
1351 				} else if ((sense_flags & SF_RETRY_UA) != 0) {
1352 					/*
1353 					 * XXX KDM this is a *horrible*
1354 					 * hack.
1355 					 */
1356 					error = scsi_interpret_sense(ccb,
1357 								  sense_flags,
1358 								  &relsim_flags,
1359 								  &openings,
1360 								  &timeout,
1361 								  err_action);
1362 				}
1363 
1364 				/*
1365 				 * Theoretically, this code should send a
1366 				 * test unit ready to the given device, and
1367 				 * if it returns and error, send a start
1368 				 * unit command.  Since we don't yet have
1369 				 * the capability to do two-command error
1370 				 * recovery, just send a start unit.
1371 				 * XXX KDM fix this!
1372 				 */
1373 				else if (((err_action & SS_MASK) == SS_TURSTART)
1374 				      && save_ccb != NULL
1375 				      && ccb->ccb_h.retry_count > 0) {
1376 					int le;
1377 
1378 					/*
1379 					 * Only one error recovery action
1380 					 * at a time.  See above.
1381 					 */
1382 					if (periph->flags &
1383 					    CAM_PERIPH_RECOVERY_INPROG) {
1384 						error = ERESTART;
1385 						break;
1386 					}
1387 
1388 					periph->flags |=
1389 						CAM_PERIPH_RECOVERY_INPROG;
1390 
1391 					/* decrement the number of retries */
1392 					retry = 1;
1393 					ccb->ccb_h.retry_count--;
1394 
1395 					/*
1396 					 * Check for removable media and
1397 					 * set load/eject flag
1398 					 * appropriately.
1399 					 */
1400 					if (SID_IS_REMOVABLE(&cgd.inq_data))
1401 						le = TRUE;
1402 					else
1403 						le = FALSE;
1404 
1405 					/*
1406 					 * Attempt to start the drive up.
1407 					 *
1408 					 * Save the current ccb so it can
1409 					 * be restored and retried once the
1410 					 * drive is started up.
1411 					 */
1412 					bcopy(ccb, save_ccb, sizeof(*save_ccb));
1413 
1414 					scsi_start_stop(&ccb->csio,
1415 							/*retries*/1,
1416 							camperiphdone,
1417 							MSG_SIMPLE_Q_TAG,
1418 							/*start*/TRUE,
1419 							/*load/eject*/le,
1420 							/*immediate*/FALSE,
1421 							SSD_FULL_SIZE,
1422 							/*timeout*/50000);
1423 
1424 					/* release the queue after .5 sec.  */
1425 					relsim_flags =
1426 						RELSIM_RELEASE_AFTER_TIMEOUT;
1427 					timeout = 500;
1428 					/*
1429 					 * Drop the priority to 0 so that
1430 					 * we are the first to execute.  Also
1431 					 * freeze the queue after this command
1432 					 * is sent so that we can restore the
1433 					 * old csio and have it queued in the
1434 					 * proper order before we let normal
1435 					 * transactions go to the drive.
1436 					 */
1437 					ccb->ccb_h.pinfo.priority = 0;
1438 					ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
1439 
1440 					/*
1441 					 * Save a pointer to the original
1442 					 * CCB in the new CCB.
1443 					 */
1444 					ccb->ccb_h.saved_ccb_ptr = save_ccb;
1445 
1446 					error = ERESTART;
1447 				} else {
1448 					error = scsi_interpret_sense(ccb,
1449 								  sense_flags,
1450 								  &relsim_flags,
1451 								  &openings,
1452 								  &timeout,
1453 								  err_action);
1454 				}
1455 			} else if (ccb->csio.scsi_status ==
1456 				   SCSI_STATUS_CHECK_COND
1457 				&& status != CAM_AUTOSENSE_FAIL) {
1458 				/* no point in decrementing the retry count */
1459 				panic("cam_periph_error: scsi status of "
1460 				      "CHECK COND returned but no sense "
1461 				      "information is availible.  "
1462 				      "Controller should have returned "
1463 				      "CAM_AUTOSENSE_FAILED");
1464 				/* NOTREACHED */
1465 				error = EIO;
1466 			} else if (ccb->ccb_h.retry_count == 0) {
1467 				/*
1468 				 * XXX KDM shouldn't there be a better
1469 				 * argument to return??
1470 				 */
1471 				error = EIO;
1472 			} else {
1473 				/* decrement the number of retries */
1474 				retry = ccb->ccb_h.retry_count > 0;
1475 				if (retry)
1476 					ccb->ccb_h.retry_count--;
1477 				/*
1478 				 * If it was aborted with no
1479 				 * clue as to the reason, just
1480 				 * retry it again.
1481 				 */
1482 				error = ERESTART;
1483 			}
1484 			break;
1485 		case SCSI_STATUS_QUEUE_FULL:
1486 		{
1487 			/* no decrement */
1488 			struct ccb_getdevstats cgds;
1489 
1490 			/*
1491 			 * First off, find out what the current
1492 			 * transaction counts are.
1493 			 */
1494 			xpt_setup_ccb(&cgds.ccb_h,
1495 				      ccb->ccb_h.path,
1496 				      /*priority*/1);
1497 			cgds.ccb_h.func_code = XPT_GDEV_STATS;
1498 			xpt_action((union ccb *)&cgds);
1499 
1500 			/*
1501 			 * If we were the only transaction active, treat
1502 			 * the QUEUE FULL as if it were a BUSY condition.
1503 			 */
1504 			if (cgds.dev_active != 0) {
1505 				int total_openings;
1506 
1507 				/*
1508 			 	 * Reduce the number of openings to
1509 				 * be 1 less than the amount it took
1510 				 * to get a queue full bounded by the
1511 				 * minimum allowed tag count for this
1512 				 * device.
1513 			 	 */
1514 				total_openings =
1515 				    cgds.dev_active+cgds.dev_openings;
1516 				openings = cgds.dev_active;
1517 				if (openings < cgds.mintags)
1518 					openings = cgds.mintags;
1519 				if (openings < total_openings)
1520 					relsim_flags = RELSIM_ADJUST_OPENINGS;
1521 				else {
1522 					/*
1523 					 * Some devices report queue full for
1524 					 * temporary resource shortages.  For
1525 					 * this reason, we allow a minimum
1526 					 * tag count to be entered via a
1527 					 * quirk entry to prevent the queue
1528 					 * count on these devices from falling
1529 					 * to a pessimisticly low value.  We
1530 					 * still wait for the next successful
1531 					 * completion, however, before queueing
1532 					 * more transactions to the device.
1533 					 */
1534 					relsim_flags =
1535 					    RELSIM_RELEASE_AFTER_CMDCMPLT;
1536 				}
1537 				timeout = 0;
1538 				error = ERESTART;
1539 				break;
1540 			}
1541 			/* FALLTHROUGH */
1542 		}
1543 		case SCSI_STATUS_BUSY:
1544 			/*
1545 			 * Restart the queue after either another
1546 			 * command completes or a 1 second timeout.
1547 			 * If we have any retries left, that is.
1548 			 */
1549 			retry = ccb->ccb_h.retry_count > 0;
1550 			if (retry) {
1551 				ccb->ccb_h.retry_count--;
1552 				error = ERESTART;
1553 				relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT
1554 					     | RELSIM_RELEASE_AFTER_CMDCMPLT;
1555 				timeout = 1000;
1556 			} else {
1557 				error = EIO;
1558 			}
1559 			break;
1560 		case SCSI_STATUS_RESERV_CONFLICT:
1561 			error = EIO;
1562 			break;
1563 		default:
1564 			error = EIO;
1565 			break;
1566 		}
1567 		break;
1568 	case CAM_REQ_CMP_ERR:
1569 	case CAM_CMD_TIMEOUT:
1570 	case CAM_UNEXP_BUSFREE:
1571 	case CAM_UNCOR_PARITY:
1572 	case CAM_DATA_RUN_ERR:
1573 		/* decrement the number of retries */
1574 		retry = ccb->ccb_h.retry_count > 0;
1575 		if (retry) {
1576 			ccb->ccb_h.retry_count--;
1577 			error = ERESTART;
1578 		} else {
1579 			error = EIO;
1580 		}
1581 		break;
1582 	case CAM_UA_ABORT:
1583 	case CAM_UA_TERMIO:
1584 	case CAM_MSG_REJECT_REC:
1585 		/* XXX Don't know that these are correct */
1586 		error = EIO;
1587 		break;
1588 	case CAM_SEL_TIMEOUT:
1589 	{
1590 		/*
1591 		 * XXX
1592 		 * A single selection timeout should not be enough
1593 		 * to invalidate a device.  We should retry for multiple
1594 		 * seconds assuming this isn't a probe.  We'll probably
1595 		 * need a special flag for that.
1596 		 */
1597 #if 0
1598 		struct cam_path *newpath;
1599 
1600 		/* Should we do more if we can't create the path?? */
1601 		if (xpt_create_path(&newpath, xpt_path_periph(ccb->ccb_h.path),
1602 				    xpt_path_path_id(ccb->ccb_h.path),
1603 				    xpt_path_target_id(ccb->ccb_h.path),
1604 				    CAM_LUN_WILDCARD) != CAM_REQ_CMP)
1605 			break;
1606 		/*
1607 		 * Let peripheral drivers know that this device has gone
1608 		 * away.
1609 		 */
1610 		xpt_async(AC_LOST_DEVICE, newpath, NULL);
1611 		xpt_free_path(newpath);
1612 #endif
1613 		if ((sense_flags & SF_RETRY_SELTO) != 0) {
1614 			retry = ccb->ccb_h.retry_count > 0;
1615 			if (retry) {
1616 				ccb->ccb_h.retry_count--;
1617 				error = ERESTART;
1618 				/*
1619 				 * Wait half a second to give the device
1620 				 * time to recover before we try again.
1621 				 */
1622 				relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT;
1623 				timeout = 500;
1624 			} else {
1625 				error = ENXIO;
1626 			}
1627 		} else {
1628 			error = ENXIO;
1629 		}
1630 		break;
1631 	}
1632 	case CAM_REQ_INVALID:
1633 	case CAM_PATH_INVALID:
1634 	case CAM_DEV_NOT_THERE:
1635 	case CAM_NO_HBA:
1636 	case CAM_PROVIDE_FAIL:
1637 	case CAM_REQ_TOO_BIG:
1638 		error = EINVAL;
1639 		break;
1640 	case CAM_SCSI_BUS_RESET:
1641 	case CAM_BDR_SENT:
1642 	case CAM_REQUEUE_REQ:
1643 		/* Unconditional requeue, dammit */
1644 		error = ERESTART;
1645 		break;
1646 	case CAM_RESRC_UNAVAIL:
1647 	case CAM_BUSY:
1648 		/* timeout??? */
1649 	default:
1650 		/* decrement the number of retries */
1651 		retry = ccb->ccb_h.retry_count > 0;
1652 		if (retry) {
1653 			ccb->ccb_h.retry_count--;
1654 			error = ERESTART;
1655 		} else {
1656 			/* Check the sense codes */
1657 			error = EIO;
1658 		}
1659 		break;
1660 	}
1661 
1662 	/* Attempt a retry */
1663 	if (error == ERESTART || error == 0) {
1664 		if (frozen != 0)
1665 			ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
1666 
1667 		if (error == ERESTART)
1668 			xpt_action(ccb);
1669 
1670 		if (frozen != 0) {
1671 			cam_release_devq(ccb->ccb_h.path,
1672 					 relsim_flags,
1673 					 openings,
1674 					 timeout,
1675 					 /*getcount_only*/0);
1676 		}
1677 	}
1678 
1679 
1680 	return (error);
1681 }
1682