xref: /freebsd/sys/cam/scsi/scsi_da.c (revision 1b6c76a2fe091c74f08427e6c870851025a9cf67)
1 /*
2  * Implementation of SCSI Direct Access Peripheral driver for CAM.
3  *
4  * Copyright (c) 1997 Justin T. Gibbs.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions, and the following disclaimer,
12  *    without modification, immediately at the beginning of the file.
13  * 2. The name of the author may not be used to endorse or promote products
14  *    derived from this software without specific prior written permission.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
20  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  *
28  * $FreeBSD$
29  */
30 
31 #ifdef _KERNEL
32 #include "opt_hw_wdog.h"
33 #endif /* _KERNEL */
34 
35 #include <sys/param.h>
36 
37 #ifdef _KERNEL
38 #include <sys/systm.h>
39 #include <sys/kernel.h>
40 #include <sys/bio.h>
41 #endif /* _KERNEL */
42 
43 #include <sys/devicestat.h>
44 #include <sys/conf.h>
45 #include <sys/disk.h>
46 #include <sys/eventhandler.h>
47 #include <sys/malloc.h>
48 #include <sys/cons.h>
49 
50 #include <machine/md_var.h>
51 
52 #include <vm/vm.h>
53 #include <vm/pmap.h>
54 
55 #ifndef _KERNEL
56 #include <stdio.h>
57 #include <string.h>
58 #endif /* _KERNEL */
59 
60 #include <cam/cam.h>
61 #include <cam/cam_ccb.h>
62 #include <cam/cam_extend.h>
63 #include <cam/cam_periph.h>
64 #include <cam/cam_xpt_periph.h>
65 
66 #include <cam/scsi/scsi_message.h>
67 
68 #ifndef _KERNEL
69 #include <cam/scsi/scsi_da.h>
70 #endif /* !_KERNEL */
71 
72 #ifdef _KERNEL
73 typedef enum {
74 	DA_STATE_PROBE,
75 	DA_STATE_NORMAL
76 } da_state;
77 
78 typedef enum {
79 	DA_FLAG_PACK_INVALID	= 0x001,
80 	DA_FLAG_NEW_PACK	= 0x002,
81 	DA_FLAG_PACK_LOCKED	= 0x004,
82 	DA_FLAG_PACK_REMOVABLE	= 0x008,
83 	DA_FLAG_TAGGED_QUEUING	= 0x010,
84 	DA_FLAG_NEED_OTAG	= 0x020,
85 	DA_FLAG_WENT_IDLE	= 0x040,
86 	DA_FLAG_RETRY_UA	= 0x080,
87 	DA_FLAG_OPEN		= 0x100
88 } da_flags;
89 
90 typedef enum {
91 	DA_Q_NONE		= 0x00,
92 	DA_Q_NO_SYNC_CACHE	= 0x01,
93 	DA_Q_NO_6_BYTE		= 0x02
94 } da_quirks;
95 
96 typedef enum {
97 	DA_CCB_PROBE		= 0x01,
98 	DA_CCB_BUFFER_IO	= 0x02,
99 	DA_CCB_WAITING		= 0x03,
100 	DA_CCB_DUMP		= 0x04,
101 	DA_CCB_TYPE_MASK	= 0x0F,
102 	DA_CCB_RETRY_UA		= 0x10
103 } da_ccb_state;
104 
105 /* Offsets into our private area for storing information */
106 #define ccb_state	ppriv_field0
107 #define ccb_bp		ppriv_ptr1
108 
109 struct disk_params {
110 	u_int8_t  heads;
111 	u_int16_t cylinders;
112 	u_int8_t  secs_per_track;
113 	u_int32_t secsize;	/* Number of bytes/sector */
114 	u_int32_t sectors;	/* total number sectors */
115 };
116 
117 struct da_softc {
118 	struct	 bio_queue_head bio_queue;
119 	struct	 devstat device_stats;
120 	SLIST_ENTRY(da_softc) links;
121 	LIST_HEAD(, ccb_hdr) pending_ccbs;
122 	da_state state;
123 	da_flags flags;
124 	da_quirks quirks;
125 	int	 minimum_cmd_size;
126 	int	 ordered_tag_count;
127 	struct	 disk_params params;
128 	struct	 disk disk;
129 	union	 ccb saved_ccb;
130 	dev_t    dev;
131 };
132 
133 struct da_quirk_entry {
134 	struct scsi_inquiry_pattern inq_pat;
135 	da_quirks quirks;
136 };
137 
138 static const char quantum[] = "QUANTUM";
139 static const char microp[] = "MICROP";
140 
141 static struct da_quirk_entry da_quirk_table[] =
142 {
143 	{
144 		/*
145 		 * This particular Fujitsu drive doesn't like the
146 		 * synchronize cache command.
147 		 * Reported by: Tom Jackson <toj@gorilla.net>
148 		 */
149 		{T_DIRECT, SIP_MEDIA_FIXED, "FUJITSU", "M2954*", "*"},
150 		/*quirks*/ DA_Q_NO_SYNC_CACHE
151 
152 	},
153 	{
154 		/*
155 		 * This drive doesn't like the synchronize cache command
156 		 * either.  Reported by: Matthew Jacob <mjacob@feral.com>
157 		 * in NetBSD PR kern/6027, August 24, 1998.
158 		 */
159 		{T_DIRECT, SIP_MEDIA_FIXED, microp, "2217*", "*"},
160 		/*quirks*/ DA_Q_NO_SYNC_CACHE
161 	},
162 	{
163 		/*
164 		 * This drive doesn't like the synchronize cache command
165 		 * either.  Reported by: Hellmuth Michaelis (hm@kts.org)
166 		 * (PR 8882).
167 		 */
168 		{T_DIRECT, SIP_MEDIA_FIXED, microp, "2112*", "*"},
169 		/*quirks*/ DA_Q_NO_SYNC_CACHE
170 	},
171 	{
172 		/*
173 		 * Doesn't like the synchronize cache command.
174 		 * Reported by: Blaz Zupan <blaz@gold.amis.net>
175 		 */
176 		{T_DIRECT, SIP_MEDIA_FIXED, "NEC", "D3847*", "*"},
177 		/*quirks*/ DA_Q_NO_SYNC_CACHE
178 	},
179 	{
180 		/*
181 		 * Doesn't like the synchronize cache command.
182 		 */
183 		{T_DIRECT, SIP_MEDIA_FIXED, quantum, "MAVERICK 540S", "*"},
184 		/*quirks*/ DA_Q_NO_SYNC_CACHE
185 	},
186 	{
187 		/*
188 		 * Doesn't like the synchronize cache command.
189 		 */
190 		{T_DIRECT, SIP_MEDIA_FIXED, quantum, "LPS525S", "*"},
191 		/*quirks*/ DA_Q_NO_SYNC_CACHE
192 	},
193 	{
194 		/*
195 		 * Doesn't work correctly with 6 byte reads/writes.
196 		 * Returns illegal request, and points to byte 9 of the
197 		 * 6-byte CDB.
198 		 * Reported by:  Adam McDougall <bsdx@spawnet.com>
199 		 */
200 		{T_DIRECT, SIP_MEDIA_FIXED, quantum, "VIKING 4*", "*"},
201 		/*quirks*/ DA_Q_NO_6_BYTE
202 	},
203 	{
204 		/*
205 		 * See above.
206 		 */
207 		{T_DIRECT, SIP_MEDIA_FIXED, quantum, "VIKING 2*", "*"},
208 		/*quirks*/ DA_Q_NO_6_BYTE
209 	},
210 
211 	/* Below a list of quirks for USB devices supported by umass. */
212 	{
213 		/*
214 		 * This USB floppy drive uses the UFI command set. This
215 		 * command set is a derivative of the ATAPI command set and
216 		 * does not support READ_6 commands only READ_10. It also does
217 		 * not support sync cache (0x35).
218 		 */
219 		{T_DIRECT, SIP_MEDIA_REMOVABLE, "Y-E DATA", "USB-FDU", "*"},
220 		/*quirks*/ DA_Q_NO_6_BYTE|DA_Q_NO_SYNC_CACHE
221 	},
222 	{
223 		/* Another USB floppy */
224 		{T_DIRECT, SIP_MEDIA_REMOVABLE, "MATSHITA", "FDD CF-VFDU*","*"},
225 		/*quirks*/ DA_Q_NO_6_BYTE|DA_Q_NO_SYNC_CACHE
226 	},
227 	{
228 		/*
229 		 * Sony Memory Stick adapter MSAC-US1,
230 		 * does not support READ_6 commands only READ_10. It also does
231 		 * not support sync cache (0x35).
232 		 * Sony PCG-C1VJ Internal Memory Stick Slot (MSC-U01) also
233 		 * has this quirk.  Make all sony MS* products use this
234 		 * quirk.  Reported by: TERAMOTO Masahiro
235 		 * <teramoto@comm.eng.osaka-u.ac.jp> (PR 23378).
236 		 */
237 		{T_DIRECT, SIP_MEDIA_REMOVABLE, "Sony", "MS*", "*"},
238 		/*quirks*/ DA_Q_NO_6_BYTE|DA_Q_NO_SYNC_CACHE
239 	},
240 	{
241 		/*
242 		 * Sony DSC cameras (DSC-S30, DSC-S50, DSC-S70)
243 		 * do not support READ_6 commands, only READ_10.
244 		 */
245 		{T_DIRECT, SIP_MEDIA_REMOVABLE, "Sony", "Sony DSC", "*"},
246 		/*quirks*/ DA_Q_NO_6_BYTE|DA_Q_NO_SYNC_CACHE
247 	},
248 	{
249 		{T_OPTICAL, SIP_MEDIA_REMOVABLE, "FUJITSU", "MCF3064AP", "*"},
250 		/*quirks*/ DA_Q_NO_6_BYTE
251 	}
252 };
253 
254 static	d_open_t	daopen;
255 static	d_close_t	daclose;
256 static	d_strategy_t	dastrategy;
257 static	d_ioctl_t	daioctl;
258 static	d_dump_t	dadump;
259 static	periph_init_t	dainit;
260 static	void		daasync(void *callback_arg, u_int32_t code,
261 				struct cam_path *path, void *arg);
262 static	periph_ctor_t	daregister;
263 static	periph_dtor_t	dacleanup;
264 static	periph_start_t	dastart;
265 static	periph_oninv_t	daoninvalidate;
266 static	void		dadone(struct cam_periph *periph,
267 			       union ccb *done_ccb);
268 static  int		daerror(union ccb *ccb, u_int32_t cam_flags,
269 				u_int32_t sense_flags);
270 static void		daprevent(struct cam_periph *periph, int action);
271 static void		dasetgeom(struct cam_periph *periph,
272 				  struct scsi_read_capacity_data * rdcap);
273 static timeout_t	dasendorderedtag;
274 static void		dashutdown(void *arg, int howto);
275 
276 #ifndef DA_DEFAULT_TIMEOUT
277 #define DA_DEFAULT_TIMEOUT 60	/* Timeout in seconds */
278 #endif
279 
280 /*
281  * DA_ORDEREDTAG_INTERVAL determines how often, relative
282  * to the default timeout, we check to see whether an ordered
283  * tagged transaction is appropriate to prevent simple tag
284  * starvation.  Since we'd like to ensure that there is at least
285  * 1/2 of the timeout length left for a starved transaction to
286  * complete after we've sent an ordered tag, we must poll at least
287  * four times in every timeout period.  This takes care of the worst
288  * case where a starved transaction starts during an interval that
289  * meets the requirement "don't send an ordered tag" test so it takes
290  * us two intervals to determine that a tag must be sent.
291  */
292 #ifndef DA_ORDEREDTAG_INTERVAL
293 #define DA_ORDEREDTAG_INTERVAL 4
294 #endif
295 
296 static struct periph_driver dadriver =
297 {
298 	dainit, "da",
299 	TAILQ_HEAD_INITIALIZER(dadriver.units), /* generation */ 0
300 };
301 
302 PERIPHDRIVER_DECLARE(da, dadriver);
303 
304 #define DA_CDEV_MAJOR 13
305 
306 /* For 2.2-stable support */
307 #ifndef D_DISK
308 #define D_DISK 0
309 #endif
310 
311 static struct cdevsw da_cdevsw = {
312 	/* open */	daopen,
313 	/* close */	daclose,
314 	/* read */	physread,
315 	/* write */	physwrite,
316 	/* ioctl */	daioctl,
317 	/* poll */	nopoll,
318 	/* mmap */	nommap,
319 	/* strategy */	dastrategy,
320 	/* name */	"da",
321 	/* maj */	DA_CDEV_MAJOR,
322 	/* dump */	dadump,
323 	/* psize */	nopsize,
324 	/* flags */	D_DISK,
325 };
326 
327 static struct cdevsw dadisk_cdevsw;
328 
329 static SLIST_HEAD(,da_softc) softc_list;
330 static struct extend_array *daperiphs;
331 
332 static int
333 daopen(dev_t dev, int flags, int fmt, struct proc *p)
334 {
335 	struct cam_periph *periph;
336 	struct da_softc *softc;
337 	struct disklabel *label;
338 	struct scsi_read_capacity_data *rcap;
339 	union  ccb *ccb;
340 	int unit;
341 	int part;
342 	int error;
343 	int s;
344 
345 	unit = dkunit(dev);
346 	part = dkpart(dev);
347 	s = splsoftcam();
348 	periph = cam_extend_get(daperiphs, unit);
349 	if (periph == NULL) {
350 		splx(s);
351 		return (ENXIO);
352 	}
353 
354 	softc = (struct da_softc *)periph->softc;
355 
356 	CAM_DEBUG(periph->path, CAM_DEBUG_TRACE,
357 	    ("daopen: dev=%s (unit %d , partition %d)\n", devtoname(dev),
358 	     unit, part));
359 
360 	if ((error = cam_periph_lock(periph, PRIBIO|PCATCH)) != 0)
361 		return (error); /* error code from tsleep */
362 
363 	if (cam_periph_acquire(periph) != CAM_REQ_CMP)
364 		return(ENXIO);
365 	softc->flags |= DA_FLAG_OPEN;
366 
367 	if ((softc->flags & DA_FLAG_PACK_INVALID) != 0) {
368 		/* Invalidate our pack information. */
369 		disk_invalidate(&softc->disk);
370 		softc->flags &= ~DA_FLAG_PACK_INVALID;
371 	}
372 	splx(s);
373 
374 	/* Do a read capacity */
375 	rcap = (struct scsi_read_capacity_data *)malloc(sizeof(*rcap),
376 							M_TEMP,
377 							M_WAITOK);
378 
379 	ccb = cam_periph_getccb(periph, /*priority*/1);
380 	scsi_read_capacity(&ccb->csio,
381 			   /*retries*/4,
382 			   /*cbfncp*/dadone,
383 			   MSG_SIMPLE_Q_TAG,
384 			   rcap,
385 			   SSD_FULL_SIZE,
386 			   /*timeout*/60000);
387 	ccb->ccb_h.ccb_bp = NULL;
388 
389 	error = cam_periph_runccb(ccb, daerror,
390 				  /*cam_flags*/CAM_RETRY_SELTO,
391 				  /*sense_flags*/SF_RETRY_UA,
392 				  &softc->device_stats);
393 
394 	if ((ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
395 		cam_release_devq(ccb->ccb_h.path,
396 				 /*relsim_flags*/0,
397 				 /*reduction*/0,
398 				 /*timeout*/0,
399 				 /*getcount_only*/0);
400 	xpt_release_ccb(ccb);
401 
402 	if (error == 0)
403 		dasetgeom(periph, rcap);
404 
405 	free(rcap, M_TEMP);
406 
407 	if (error == 0) {
408 		struct ccb_getdev cgd;
409 
410 		/* Build label for whole disk. */
411 		label = &softc->disk.d_label;
412 		bzero(label, sizeof(*label));
413 		label->d_type = DTYPE_SCSI;
414 
415 		/*
416 		 * Grab the inquiry data to get the vendor and product names.
417 		 * Put them in the typename and packname for the label.
418 		 */
419 		xpt_setup_ccb(&cgd.ccb_h, periph->path, /*priority*/ 1);
420 		cgd.ccb_h.func_code = XPT_GDEV_TYPE;
421 		xpt_action((union ccb *)&cgd);
422 
423 		strncpy(label->d_typename, cgd.inq_data.vendor,
424 			min(SID_VENDOR_SIZE, sizeof(label->d_typename)));
425 		strncpy(label->d_packname, cgd.inq_data.product,
426 			min(SID_PRODUCT_SIZE, sizeof(label->d_packname)));
427 
428 		label->d_secsize = softc->params.secsize;
429 		label->d_nsectors = softc->params.secs_per_track;
430 		label->d_ntracks = softc->params.heads;
431 		label->d_ncylinders = softc->params.cylinders;
432 		label->d_secpercyl = softc->params.heads
433 				  * softc->params.secs_per_track;
434 		label->d_secperunit = softc->params.sectors;
435 
436 		/*
437 		 * Check to see whether or not the blocksize is set yet.
438 		 * If it isn't, set it and then clear the blocksize
439 		 * unavailable flag for the device statistics.
440 		 */
441 		if ((softc->device_stats.flags & DEVSTAT_BS_UNAVAILABLE) != 0){
442 			softc->device_stats.block_size = softc->params.secsize;
443 			softc->device_stats.flags &= ~DEVSTAT_BS_UNAVAILABLE;
444 		}
445 	}
446 
447 	if (error == 0) {
448 		if ((softc->flags & DA_FLAG_PACK_REMOVABLE) != 0)
449 			daprevent(periph, PR_PREVENT);
450 	}
451 	cam_periph_unlock(periph);
452 	return (error);
453 }
454 
455 static int
456 daclose(dev_t dev, int flag, int fmt, struct proc *p)
457 {
458 	struct	cam_periph *periph;
459 	struct	da_softc *softc;
460 	int	unit;
461 	int	error;
462 
463 	unit = dkunit(dev);
464 	periph = cam_extend_get(daperiphs, unit);
465 	if (periph == NULL)
466 		return (ENXIO);
467 
468 	softc = (struct da_softc *)periph->softc;
469 
470 	if ((error = cam_periph_lock(periph, PRIBIO)) != 0) {
471 		return (error); /* error code from tsleep */
472 	}
473 
474 	if ((softc->quirks & DA_Q_NO_SYNC_CACHE) == 0) {
475 		union	ccb *ccb;
476 
477 		ccb = cam_periph_getccb(periph, /*priority*/1);
478 
479 		scsi_synchronize_cache(&ccb->csio,
480 				       /*retries*/1,
481 				       /*cbfcnp*/dadone,
482 				       MSG_SIMPLE_Q_TAG,
483 				       /*begin_lba*/0,/* Cover the whole disk */
484 				       /*lb_count*/0,
485 				       SSD_FULL_SIZE,
486 				       5 * 60 * 1000);
487 
488 		cam_periph_runccb(ccb, /*error_routine*/NULL, /*cam_flags*/0,
489 				  /*sense_flags*/SF_RETRY_UA,
490 				  &softc->device_stats);
491 
492 		if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
493 			if ((ccb->ccb_h.status & CAM_STATUS_MASK) ==
494 			     CAM_SCSI_STATUS_ERROR) {
495 				int asc, ascq;
496 				int sense_key, error_code;
497 
498 				scsi_extract_sense(&ccb->csio.sense_data,
499 						   &error_code,
500 						   &sense_key,
501 						   &asc, &ascq);
502 				if (sense_key != SSD_KEY_ILLEGAL_REQUEST)
503 					scsi_sense_print(&ccb->csio);
504 			} else {
505 				xpt_print_path(periph->path);
506 				printf("Synchronize cache failed, status "
507 				       "== 0x%x, scsi status == 0x%x\n",
508 				       ccb->csio.ccb_h.status,
509 				       ccb->csio.scsi_status);
510 			}
511 		}
512 
513 		if ((ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
514 			cam_release_devq(ccb->ccb_h.path,
515 					 /*relsim_flags*/0,
516 					 /*reduction*/0,
517 					 /*timeout*/0,
518 					 /*getcount_only*/0);
519 
520 		xpt_release_ccb(ccb);
521 
522 	}
523 
524 	if ((softc->flags & DA_FLAG_PACK_REMOVABLE) != 0) {
525 		daprevent(periph, PR_ALLOW);
526 		/*
527 		 * If we've got removeable media, mark the blocksize as
528 		 * unavailable, since it could change when new media is
529 		 * inserted.
530 		 */
531 		softc->device_stats.flags |= DEVSTAT_BS_UNAVAILABLE;
532 	}
533 
534 	softc->flags &= ~DA_FLAG_OPEN;
535 	cam_periph_unlock(periph);
536 	cam_periph_release(periph);
537 	return (0);
538 }
539 
540 /*
541  * Actually translate the requested transfer into one the physical driver
542  * can understand.  The transfer is described by a buf and will include
543  * only one physical transfer.
544  */
545 static void
546 dastrategy(struct bio *bp)
547 {
548 	struct cam_periph *periph;
549 	struct da_softc *softc;
550 	u_int  unit;
551 	u_int  part;
552 	int    s;
553 
554 	unit = dkunit(bp->bio_dev);
555 	part = dkpart(bp->bio_dev);
556 	periph = cam_extend_get(daperiphs, unit);
557 	if (periph == NULL) {
558 		biofinish(bp, NULL, ENXIO);
559 		return;
560 	}
561 	softc = (struct da_softc *)periph->softc;
562 #if 0
563 	/*
564 	 * check it's not too big a transfer for our adapter
565 	 */
566 	scsi_minphys(bp,&sd_switch);
567 #endif
568 
569 	/*
570 	 * Mask interrupts so that the pack cannot be invalidated until
571 	 * after we are in the queue.  Otherwise, we might not properly
572 	 * clean up one of the buffers.
573 	 */
574 	s = splbio();
575 
576 	/*
577 	 * If the device has been made invalid, error out
578 	 */
579 	if ((softc->flags & DA_FLAG_PACK_INVALID)) {
580 		splx(s);
581 		biofinish(bp, NULL, ENXIO);
582 		return;
583 	}
584 
585 	/*
586 	 * Place it in the queue of disk activities for this disk
587 	 */
588 	bioqdisksort(&softc->bio_queue, bp);
589 
590 	splx(s);
591 
592 	/*
593 	 * Schedule ourselves for performing the work.
594 	 */
595 	xpt_schedule(periph, /* XXX priority */1);
596 
597 	return;
598 }
599 
600 /* For 2.2-stable support */
601 #ifndef ENOIOCTL
602 #define ENOIOCTL -1
603 #endif
604 
605 static int
606 daioctl(dev_t dev, u_long cmd, caddr_t addr, int flag, struct proc *p)
607 {
608 	struct cam_periph *periph;
609 	struct da_softc *softc;
610 	int unit;
611 	int error;
612 
613 	unit = dkunit(dev);
614 	periph = cam_extend_get(daperiphs, unit);
615 	if (periph == NULL)
616 		return (ENXIO);
617 
618 	softc = (struct da_softc *)periph->softc;
619 
620 	CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("daioctl\n"));
621 
622 	if ((error = cam_periph_lock(periph, PRIBIO|PCATCH)) != 0) {
623 		return (error); /* error code from tsleep */
624 	}
625 
626 	error = cam_periph_ioctl(periph, cmd, addr, daerror);
627 
628 	cam_periph_unlock(periph);
629 
630 	return (error);
631 }
632 
633 static int
634 dadump(dev_t dev)
635 {
636 	struct	    cam_periph *periph;
637 	struct	    da_softc *softc;
638 	u_int	    unit;
639 	u_int	    part;
640 	u_int	    secsize;
641 	u_int	    num;	/* number of sectors to write */
642 	u_int	    blknum;
643 	long	    blkcnt;
644 	vm_offset_t addr;
645 	struct	    ccb_scsiio csio;
646 	int         dumppages = MAXDUMPPGS;
647 	int	    error;
648 	int         i;
649 
650 	/* toss any characters present prior to dump */
651 	while (cncheckc() != -1)
652 		;
653 
654 	unit = dkunit(dev);
655 	part = dkpart(dev);
656 	periph = cam_extend_get(daperiphs, unit);
657 	if (periph == NULL) {
658 		return (ENXIO);
659 	}
660 	softc = (struct da_softc *)periph->softc;
661 
662 	if ((softc->flags & DA_FLAG_PACK_INVALID) != 0)
663 		return (ENXIO);
664 
665 	error = disk_dumpcheck(dev, &num, &blknum, &secsize);
666 	if (error)
667 		return (error);
668 
669 	addr = 0;	/* starting address */
670 	blkcnt = howmany(PAGE_SIZE, secsize);
671 
672 	while (num > 0) {
673 		caddr_t va = NULL;
674 
675 		if ((num / blkcnt) < dumppages)
676 			dumppages = num / blkcnt;
677 
678 		for (i = 0; i < dumppages; ++i) {
679 			vm_offset_t a = addr + (i * PAGE_SIZE);
680 			if (is_physical_memory(a))
681 				va = pmap_kenter_temporary(trunc_page(a), i);
682 			else
683 				va = pmap_kenter_temporary(trunc_page(0), i);
684 		}
685 
686 		xpt_setup_ccb(&csio.ccb_h, periph->path, /*priority*/1);
687 		csio.ccb_h.ccb_state = DA_CCB_DUMP;
688 		scsi_read_write(&csio,
689 				/*retries*/1,
690 				dadone,
691 				MSG_ORDERED_Q_TAG,
692 				/*read*/FALSE,
693 				/*byte2*/0,
694 				/*minimum_cmd_size*/ softc->minimum_cmd_size,
695 				blknum,
696 				blkcnt * dumppages,
697 				/*data_ptr*/(u_int8_t *) va,
698 				/*dxfer_len*/blkcnt * secsize * dumppages,
699 				/*sense_len*/SSD_FULL_SIZE,
700 				DA_DEFAULT_TIMEOUT * 1000);
701 		xpt_polled_action((union ccb *)&csio);
702 
703 		if ((csio.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
704 			printf("Aborting dump due to I/O error.\n");
705 			if ((csio.ccb_h.status & CAM_STATUS_MASK) ==
706 			     CAM_SCSI_STATUS_ERROR)
707 				scsi_sense_print(&csio);
708 			else
709 				printf("status == 0x%x, scsi status == 0x%x\n",
710 				       csio.ccb_h.status, csio.scsi_status);
711 			return(EIO);
712 		}
713 
714 		if (dumpstatus(addr, (long)(num * softc->params.secsize)) < 0)
715 			return (EINTR);
716 
717 		/* update block count */
718 		num -= blkcnt * dumppages;
719 		blknum += blkcnt * dumppages;
720 		addr += PAGE_SIZE * dumppages;
721 	}
722 
723 	/*
724 	 * Sync the disk cache contents to the physical media.
725 	 */
726 	if ((softc->quirks & DA_Q_NO_SYNC_CACHE) == 0) {
727 
728 		xpt_setup_ccb(&csio.ccb_h, periph->path, /*priority*/1);
729 		csio.ccb_h.ccb_state = DA_CCB_DUMP;
730 		scsi_synchronize_cache(&csio,
731 				       /*retries*/1,
732 				       /*cbfcnp*/dadone,
733 				       MSG_SIMPLE_Q_TAG,
734 				       /*begin_lba*/0,/* Cover the whole disk */
735 				       /*lb_count*/0,
736 				       SSD_FULL_SIZE,
737 				       5 * 60 * 1000);
738 		xpt_polled_action((union ccb *)&csio);
739 
740 		if ((csio.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
741 			if ((csio.ccb_h.status & CAM_STATUS_MASK) ==
742 			     CAM_SCSI_STATUS_ERROR) {
743 				int asc, ascq;
744 				int sense_key, error_code;
745 
746 				scsi_extract_sense(&csio.sense_data,
747 						   &error_code,
748 						   &sense_key,
749 						   &asc, &ascq);
750 				if (sense_key != SSD_KEY_ILLEGAL_REQUEST)
751 					scsi_sense_print(&csio);
752 			} else {
753 				xpt_print_path(periph->path);
754 				printf("Synchronize cache failed, status "
755 				       "== 0x%x, scsi status == 0x%x\n",
756 				       csio.ccb_h.status, csio.scsi_status);
757 			}
758 		}
759 	}
760 	return (0);
761 }
762 
763 static void
764 dainit(void)
765 {
766 	cam_status status;
767 	struct cam_path *path;
768 
769 	/*
770 	 * Create our extend array for storing the devices we attach to.
771 	 */
772 	daperiphs = cam_extend_new();
773 	SLIST_INIT(&softc_list);
774 	if (daperiphs == NULL) {
775 		printf("da: Failed to alloc extend array!\n");
776 		return;
777 	}
778 
779 	/*
780 	 * Install a global async callback.  This callback will
781 	 * receive async callbacks like "new device found".
782 	 */
783 	status = xpt_create_path(&path, /*periph*/NULL, CAM_XPT_PATH_ID,
784 				 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
785 
786 	if (status == CAM_REQ_CMP) {
787 		struct ccb_setasync csa;
788 
789                 xpt_setup_ccb(&csa.ccb_h, path, /*priority*/5);
790                 csa.ccb_h.func_code = XPT_SASYNC_CB;
791                 csa.event_enable = AC_FOUND_DEVICE;
792                 csa.callback = daasync;
793                 csa.callback_arg = NULL;
794                 xpt_action((union ccb *)&csa);
795 		status = csa.ccb_h.status;
796                 xpt_free_path(path);
797         }
798 
799 	if (status != CAM_REQ_CMP) {
800 		printf("da: Failed to attach master async callback "
801 		       "due to status 0x%x!\n", status);
802 	} else {
803 
804 		/*
805 		 * Schedule a periodic event to occasioanly send an
806 		 * ordered tag to a device.
807 		 */
808 		timeout(dasendorderedtag, NULL,
809 			(DA_DEFAULT_TIMEOUT * hz) / DA_ORDEREDTAG_INTERVAL);
810 
811 		/* Register our shutdown event handler */
812 		if ((EVENTHANDLER_REGISTER(shutdown_post_sync, dashutdown,
813 					   NULL, SHUTDOWN_PRI_DEFAULT)) == NULL)
814 		    printf("dainit: shutdown event registration failed!\n");
815 	}
816 }
817 
818 static void
819 daoninvalidate(struct cam_periph *periph)
820 {
821 	int s;
822 	struct da_softc *softc;
823 	struct bio *q_bp;
824 	struct ccb_setasync csa;
825 
826 	softc = (struct da_softc *)periph->softc;
827 
828 	/*
829 	 * De-register any async callbacks.
830 	 */
831 	xpt_setup_ccb(&csa.ccb_h, periph->path,
832 		      /* priority */ 5);
833 	csa.ccb_h.func_code = XPT_SASYNC_CB;
834 	csa.event_enable = 0;
835 	csa.callback = daasync;
836 	csa.callback_arg = periph;
837 	xpt_action((union ccb *)&csa);
838 
839 	softc->flags |= DA_FLAG_PACK_INVALID;
840 
841 	/*
842 	 * Although the oninvalidate() routines are always called at
843 	 * splsoftcam, we need to be at splbio() here to keep the buffer
844 	 * queue from being modified while we traverse it.
845 	 */
846 	s = splbio();
847 
848 	/*
849 	 * Return all queued I/O with ENXIO.
850 	 * XXX Handle any transactions queued to the card
851 	 *     with XPT_ABORT_CCB.
852 	 */
853 	while ((q_bp = bioq_first(&softc->bio_queue)) != NULL){
854 		bioq_remove(&softc->bio_queue, q_bp);
855 		q_bp->bio_resid = q_bp->bio_bcount;
856 		biofinish(q_bp, NULL, ENXIO);
857 	}
858 	splx(s);
859 
860 	SLIST_REMOVE(&softc_list, softc, da_softc, links);
861 
862 	xpt_print_path(periph->path);
863 	printf("lost device\n");
864 }
865 
866 static void
867 dacleanup(struct cam_periph *periph)
868 {
869 	struct da_softc *softc;
870 
871 	softc = (struct da_softc *)periph->softc;
872 
873 	devstat_remove_entry(&softc->device_stats);
874 	cam_extend_release(daperiphs, periph->unit_number);
875 	xpt_print_path(periph->path);
876 	printf("removing device entry\n");
877 	if (softc->dev) {
878 		disk_destroy(softc->dev);
879 	}
880 	free(softc, M_DEVBUF);
881 }
882 
883 static void
884 daasync(void *callback_arg, u_int32_t code,
885 	struct cam_path *path, void *arg)
886 {
887 	struct cam_periph *periph;
888 
889 	periph = (struct cam_periph *)callback_arg;
890 	switch (code) {
891 	case AC_FOUND_DEVICE:
892 	{
893 		struct ccb_getdev *cgd;
894 		cam_status status;
895 
896 		cgd = (struct ccb_getdev *)arg;
897 
898 		if (SID_TYPE(&cgd->inq_data) != T_DIRECT
899 		    && SID_TYPE(&cgd->inq_data) != T_OPTICAL)
900 			break;
901 
902 		/*
903 		 * Allocate a peripheral instance for
904 		 * this device and start the probe
905 		 * process.
906 		 */
907 		status = cam_periph_alloc(daregister, daoninvalidate,
908 					  dacleanup, dastart,
909 					  "da", CAM_PERIPH_BIO,
910 					  cgd->ccb_h.path, daasync,
911 					  AC_FOUND_DEVICE, cgd);
912 
913 		if (status != CAM_REQ_CMP
914 		 && status != CAM_REQ_INPROG)
915 			printf("daasync: Unable to attach to new device "
916 				"due to status 0x%x\n", status);
917 		break;
918 	}
919 	case AC_SENT_BDR:
920 	case AC_BUS_RESET:
921 	{
922 		struct da_softc *softc;
923 		struct ccb_hdr *ccbh;
924 		int s;
925 
926 		softc = (struct da_softc *)periph->softc;
927 		s = splsoftcam();
928 		/*
929 		 * Don't fail on the expected unit attention
930 		 * that will occur.
931 		 */
932 		softc->flags |= DA_FLAG_RETRY_UA;
933 		LIST_FOREACH(ccbh, &softc->pending_ccbs, periph_links.le)
934 			ccbh->ccb_state |= DA_CCB_RETRY_UA;
935 		splx(s);
936 		/* FALLTHROUGH*/
937 	}
938 	default:
939 		cam_periph_async(periph, code, path, arg);
940 		break;
941 	}
942 }
943 
944 static cam_status
945 daregister(struct cam_periph *periph, void *arg)
946 {
947 	int s;
948 	struct da_softc *softc;
949 	struct ccb_setasync csa;
950 	struct ccb_getdev *cgd;
951 	caddr_t match;
952 
953 	cgd = (struct ccb_getdev *)arg;
954 	if (periph == NULL) {
955 		printf("daregister: periph was NULL!!\n");
956 		return(CAM_REQ_CMP_ERR);
957 	}
958 
959 	if (cgd == NULL) {
960 		printf("daregister: no getdev CCB, can't register device\n");
961 		return(CAM_REQ_CMP_ERR);
962 	}
963 
964 	softc = (struct da_softc *)malloc(sizeof(*softc),M_DEVBUF,M_NOWAIT);
965 
966 	if (softc == NULL) {
967 		printf("daregister: Unable to probe new device. "
968 		       "Unable to allocate softc\n");
969 		return(CAM_REQ_CMP_ERR);
970 	}
971 
972 	bzero(softc, sizeof(*softc));
973 	LIST_INIT(&softc->pending_ccbs);
974 	softc->state = DA_STATE_PROBE;
975 	bioq_init(&softc->bio_queue);
976 	if (SID_IS_REMOVABLE(&cgd->inq_data))
977 		softc->flags |= DA_FLAG_PACK_REMOVABLE;
978 	if ((cgd->inq_data.flags & SID_CmdQue) != 0)
979 		softc->flags |= DA_FLAG_TAGGED_QUEUING;
980 
981 	periph->softc = softc;
982 
983 	cam_extend_set(daperiphs, periph->unit_number, periph);
984 
985 	/*
986 	 * See if this device has any quirks.
987 	 */
988 	match = cam_quirkmatch((caddr_t)&cgd->inq_data,
989 			       (caddr_t)da_quirk_table,
990 			       sizeof(da_quirk_table)/sizeof(*da_quirk_table),
991 			       sizeof(*da_quirk_table), scsi_inquiry_match);
992 
993 	if (match != NULL)
994 		softc->quirks = ((struct da_quirk_entry *)match)->quirks;
995 	else
996 		softc->quirks = DA_Q_NONE;
997 
998 	if (softc->quirks & DA_Q_NO_6_BYTE)
999 		softc->minimum_cmd_size = 10;
1000 	else
1001 		softc->minimum_cmd_size = 6;
1002 
1003 	/*
1004 	 * Block our timeout handler while we
1005 	 * add this softc to the dev list.
1006 	 */
1007 	s = splsoftclock();
1008 	SLIST_INSERT_HEAD(&softc_list, softc, links);
1009 	splx(s);
1010 
1011 	/*
1012 	 * The DA driver supports a blocksize, but
1013 	 * we don't know the blocksize until we do
1014 	 * a read capacity.  So, set a flag to
1015 	 * indicate that the blocksize is
1016 	 * unavailable right now.  We'll clear the
1017 	 * flag as soon as we've done a read capacity.
1018 	 */
1019 	devstat_add_entry(&softc->device_stats, "da",
1020 			  periph->unit_number, 0,
1021 	  		  DEVSTAT_BS_UNAVAILABLE,
1022 			  SID_TYPE(&cgd->inq_data) | DEVSTAT_TYPE_IF_SCSI,
1023 			  DEVSTAT_PRIORITY_DISK);
1024 
1025 	/*
1026 	 * Register this media as a disk
1027 	 */
1028 	softc->dev = disk_create(periph->unit_number, &softc->disk, 0,
1029 	    &da_cdevsw, &dadisk_cdevsw);
1030 
1031 	/*
1032 	 * Add async callbacks for bus reset and
1033 	 * bus device reset calls.  I don't bother
1034 	 * checking if this fails as, in most cases,
1035 	 * the system will function just fine without
1036 	 * them and the only alternative would be to
1037 	 * not attach the device on failure.
1038 	 */
1039 	xpt_setup_ccb(&csa.ccb_h, periph->path, /*priority*/5);
1040 	csa.ccb_h.func_code = XPT_SASYNC_CB;
1041 	csa.event_enable = AC_SENT_BDR | AC_BUS_RESET | AC_LOST_DEVICE;
1042 	csa.callback = daasync;
1043 	csa.callback_arg = periph;
1044 	xpt_action((union ccb *)&csa);
1045 	/*
1046 	 * Lock this peripheral until we are setup.
1047 	 * This first call can't block
1048 	 */
1049 	(void)cam_periph_lock(periph, PRIBIO);
1050 	xpt_schedule(periph, /*priority*/5);
1051 
1052 	return(CAM_REQ_CMP);
1053 }
1054 
1055 static void
1056 dastart(struct cam_periph *periph, union ccb *start_ccb)
1057 {
1058 	struct da_softc *softc;
1059 
1060 	softc = (struct da_softc *)periph->softc;
1061 
1062 
1063 	switch (softc->state) {
1064 	case DA_STATE_NORMAL:
1065 	{
1066 		/* Pull a buffer from the queue and get going on it */
1067 		struct bio *bp;
1068 		int s;
1069 
1070 		/*
1071 		 * See if there is a buf with work for us to do..
1072 		 */
1073 		s = splbio();
1074 		bp = bioq_first(&softc->bio_queue);
1075 		if (periph->immediate_priority <= periph->pinfo.priority) {
1076 			CAM_DEBUG_PRINT(CAM_DEBUG_SUBTRACE,
1077 					("queuing for immediate ccb\n"));
1078 			start_ccb->ccb_h.ccb_state = DA_CCB_WAITING;
1079 			SLIST_INSERT_HEAD(&periph->ccb_list, &start_ccb->ccb_h,
1080 					  periph_links.sle);
1081 			periph->immediate_priority = CAM_PRIORITY_NONE;
1082 			splx(s);
1083 			wakeup(&periph->ccb_list);
1084 		} else if (bp == NULL) {
1085 			splx(s);
1086 			xpt_release_ccb(start_ccb);
1087 		} else {
1088 			int oldspl;
1089 			u_int8_t tag_code;
1090 
1091 			bioq_remove(&softc->bio_queue, bp);
1092 
1093 			devstat_start_transaction(&softc->device_stats);
1094 
1095 			if ((bp->bio_flags & BIO_ORDERED) != 0
1096 			 || (softc->flags & DA_FLAG_NEED_OTAG) != 0) {
1097 				softc->flags &= ~DA_FLAG_NEED_OTAG;
1098 				softc->ordered_tag_count++;
1099 				tag_code = MSG_ORDERED_Q_TAG;
1100 			} else {
1101 				tag_code = MSG_SIMPLE_Q_TAG;
1102 			}
1103 			scsi_read_write(&start_ccb->csio,
1104 					/*retries*/4,
1105 					dadone,
1106 					tag_code,
1107 					bp->bio_cmd == BIO_READ,
1108 					/*byte2*/0,
1109 					softc->minimum_cmd_size,
1110 					bp->bio_pblkno,
1111 					bp->bio_bcount / softc->params.secsize,
1112 					bp->bio_data,
1113 					bp->bio_bcount,
1114 					/*sense_len*/SSD_FULL_SIZE,
1115 					DA_DEFAULT_TIMEOUT * 1000);
1116 			start_ccb->ccb_h.ccb_state = DA_CCB_BUFFER_IO;
1117 
1118 			/*
1119 			 * Block out any asyncronous callbacks
1120 			 * while we touch the pending ccb list.
1121 			 */
1122 			oldspl = splcam();
1123 			LIST_INSERT_HEAD(&softc->pending_ccbs,
1124 					 &start_ccb->ccb_h, periph_links.le);
1125 			splx(oldspl);
1126 
1127 			/* We expect a unit attention from this device */
1128 			if ((softc->flags & DA_FLAG_RETRY_UA) != 0) {
1129 				start_ccb->ccb_h.ccb_state |= DA_CCB_RETRY_UA;
1130 				softc->flags &= ~DA_FLAG_RETRY_UA;
1131 			}
1132 
1133 			start_ccb->ccb_h.ccb_bp = bp;
1134 			bp = bioq_first(&softc->bio_queue);
1135 			splx(s);
1136 
1137 			xpt_action(start_ccb);
1138 		}
1139 
1140 		if (bp != NULL) {
1141 			/* Have more work to do, so ensure we stay scheduled */
1142 			xpt_schedule(periph, /* XXX priority */1);
1143 		}
1144 		break;
1145 	}
1146 	case DA_STATE_PROBE:
1147 	{
1148 		struct ccb_scsiio *csio;
1149 		struct scsi_read_capacity_data *rcap;
1150 
1151 		rcap = (struct scsi_read_capacity_data *)malloc(sizeof(*rcap),
1152 								M_TEMP,
1153 								M_NOWAIT);
1154 		if (rcap == NULL) {
1155 			printf("dastart: Couldn't malloc read_capacity data\n");
1156 			/* da_free_periph??? */
1157 			break;
1158 		}
1159 		csio = &start_ccb->csio;
1160 		scsi_read_capacity(csio,
1161 				   /*retries*/4,
1162 				   dadone,
1163 				   MSG_SIMPLE_Q_TAG,
1164 				   rcap,
1165 				   SSD_FULL_SIZE,
1166 				   /*timeout*/5000);
1167 		start_ccb->ccb_h.ccb_bp = NULL;
1168 		start_ccb->ccb_h.ccb_state = DA_CCB_PROBE;
1169 		xpt_action(start_ccb);
1170 		break;
1171 	}
1172 	}
1173 }
1174 
1175 
1176 static void
1177 dadone(struct cam_periph *periph, union ccb *done_ccb)
1178 {
1179 	struct da_softc *softc;
1180 	struct ccb_scsiio *csio;
1181 
1182 	softc = (struct da_softc *)periph->softc;
1183 	csio = &done_ccb->csio;
1184 	switch (csio->ccb_h.ccb_state & DA_CCB_TYPE_MASK) {
1185 	case DA_CCB_BUFFER_IO:
1186 	{
1187 		struct bio *bp;
1188 		int    oldspl;
1189 
1190 		bp = (struct bio *)done_ccb->ccb_h.ccb_bp;
1191 		if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
1192 			int error;
1193 			int s;
1194 			int sf;
1195 
1196 			if ((csio->ccb_h.ccb_state & DA_CCB_RETRY_UA) != 0)
1197 				sf = SF_RETRY_UA;
1198 			else
1199 				sf = 0;
1200 
1201 			error = daerror(done_ccb, CAM_RETRY_SELTO, sf);
1202 			if (error == ERESTART) {
1203 				/*
1204 				 * A retry was scheuled, so
1205 				 * just return.
1206 				 */
1207 				return;
1208 			}
1209 			if (error != 0) {
1210 				struct bio *q_bp;
1211 
1212 				s = splbio();
1213 
1214 				if (error == ENXIO) {
1215 					/*
1216 					 * Catastrophic error.  Mark our pack as
1217 					 * invalid.
1218 					 */
1219 					/* XXX See if this is really a media
1220 					 *     change first.
1221 					 */
1222 					xpt_print_path(periph->path);
1223 					printf("Invalidating pack\n");
1224 					softc->flags |= DA_FLAG_PACK_INVALID;
1225 				}
1226 
1227 				/*
1228 				 * return all queued I/O with EIO, so that
1229 				 * the client can retry these I/Os in the
1230 				 * proper order should it attempt to recover.
1231 				 */
1232 				while ((q_bp = bioq_first(&softc->bio_queue))
1233 					!= NULL) {
1234 					bioq_remove(&softc->bio_queue, q_bp);
1235 					q_bp->bio_resid = q_bp->bio_bcount;
1236 					biofinish(q_bp, NULL, EIO);
1237 				}
1238 				splx(s);
1239 				bp->bio_error = error;
1240 				bp->bio_resid = bp->bio_bcount;
1241 				bp->bio_flags |= BIO_ERROR;
1242 			} else {
1243 				bp->bio_resid = csio->resid;
1244 				bp->bio_error = 0;
1245 				if (bp->bio_resid != 0) {
1246 					/* Short transfer ??? */
1247 					bp->bio_flags |= BIO_ERROR;
1248 				}
1249 			}
1250 			if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
1251 				cam_release_devq(done_ccb->ccb_h.path,
1252 						 /*relsim_flags*/0,
1253 						 /*reduction*/0,
1254 						 /*timeout*/0,
1255 						 /*getcount_only*/0);
1256 		} else {
1257 			if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
1258 				panic("REQ_CMP with QFRZN");
1259 			bp->bio_resid = csio->resid;
1260 			if (csio->resid > 0)
1261 				bp->bio_flags |= BIO_ERROR;
1262 		}
1263 
1264 		/*
1265 		 * Block out any asyncronous callbacks
1266 		 * while we touch the pending ccb list.
1267 		 */
1268 		oldspl = splcam();
1269 		LIST_REMOVE(&done_ccb->ccb_h, periph_links.le);
1270 		splx(oldspl);
1271 
1272 		if (softc->device_stats.busy_count == 0)
1273 			softc->flags |= DA_FLAG_WENT_IDLE;
1274 
1275 		biofinish(bp, &softc->device_stats, 0);
1276 		break;
1277 	}
1278 	case DA_CCB_PROBE:
1279 	{
1280 		struct	   scsi_read_capacity_data *rdcap;
1281 		char	   announce_buf[80];
1282 
1283 		rdcap = (struct scsi_read_capacity_data *)csio->data_ptr;
1284 
1285 		if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
1286 			struct disk_params *dp;
1287 
1288 			dasetgeom(periph, rdcap);
1289 			dp = &softc->params;
1290 			snprintf(announce_buf, sizeof(announce_buf),
1291 			        "%luMB (%u %u byte sectors: %dH %dS/T %dC)",
1292 				(unsigned long) (((u_int64_t)dp->secsize *
1293 				dp->sectors) / (1024*1024)), dp->sectors,
1294 				dp->secsize, dp->heads, dp->secs_per_track,
1295 				dp->cylinders);
1296 		} else {
1297 			int	error;
1298 
1299 			announce_buf[0] = '\0';
1300 
1301 			/*
1302 			 * Retry any UNIT ATTENTION type errors.  They
1303 			 * are expected at boot.
1304 			 */
1305 			error = daerror(done_ccb, CAM_RETRY_SELTO,
1306 					SF_RETRY_UA|SF_NO_PRINT);
1307 			if (error == ERESTART) {
1308 				/*
1309 				 * A retry was scheuled, so
1310 				 * just return.
1311 				 */
1312 				return;
1313 			} else if (error != 0) {
1314 				struct scsi_sense_data *sense;
1315 				int asc, ascq;
1316 				int sense_key, error_code;
1317 				int have_sense;
1318 				cam_status status;
1319 				struct ccb_getdev cgd;
1320 
1321 				/* Don't wedge this device's queue */
1322 				status = done_ccb->ccb_h.status;
1323 				if ((status & CAM_DEV_QFRZN) != 0)
1324 					cam_release_devq(done_ccb->ccb_h.path,
1325 							 /*relsim_flags*/0,
1326 							 /*reduction*/0,
1327 							 /*timeout*/0,
1328 							 /*getcount_only*/0);
1329 
1330 
1331 				xpt_setup_ccb(&cgd.ccb_h,
1332 					      done_ccb->ccb_h.path,
1333 					      /* priority */ 1);
1334 				cgd.ccb_h.func_code = XPT_GDEV_TYPE;
1335 				xpt_action((union ccb *)&cgd);
1336 
1337 				if (((csio->ccb_h.flags & CAM_SENSE_PHYS) != 0)
1338 				 || ((csio->ccb_h.flags & CAM_SENSE_PTR) != 0)
1339 				 || ((status & CAM_AUTOSNS_VALID) == 0))
1340 					have_sense = FALSE;
1341 				else
1342 					have_sense = TRUE;
1343 
1344 				if (have_sense) {
1345 					sense = &csio->sense_data;
1346 					scsi_extract_sense(sense, &error_code,
1347 							   &sense_key,
1348 							   &asc, &ascq);
1349 				}
1350 				/*
1351 				 * Attach to anything that claims to be a
1352 				 * direct access or optical disk device,
1353 				 * as long as it doesn't return a "Logical
1354 				 * unit not supported" (0x25) error.
1355 				 */
1356 				if ((have_sense) && (asc != 0x25)
1357 				 && (error_code == SSD_CURRENT_ERROR)) {
1358 					const char *sense_key_desc;
1359 					const char *asc_desc;
1360 
1361 					scsi_sense_desc(sense_key, asc, ascq,
1362 							&cgd.inq_data,
1363 							&sense_key_desc,
1364 							&asc_desc);
1365 					snprintf(announce_buf,
1366 					    sizeof(announce_buf),
1367 						"Attempt to query device "
1368 						"size failed: %s, %s",
1369 						sense_key_desc,
1370 						asc_desc);
1371 				} else {
1372 					if (have_sense)
1373 						scsi_sense_print(
1374 							&done_ccb->csio);
1375 					else {
1376 						xpt_print_path(periph->path);
1377 						printf("got CAM status %#x\n",
1378 						       done_ccb->ccb_h.status);
1379 					}
1380 
1381 					xpt_print_path(periph->path);
1382 					printf("fatal error, failed"
1383 					       " to attach to device\n");
1384 
1385 					/*
1386 					 * Free up resources.
1387 					 */
1388 					cam_periph_invalidate(periph);
1389 				}
1390 			}
1391 		}
1392 		free(rdcap, M_TEMP);
1393 		if (announce_buf[0] != '\0')
1394 			xpt_announce_periph(periph, announce_buf);
1395 		softc->state = DA_STATE_NORMAL;
1396 		/*
1397 		 * Since our peripheral may be invalidated by an error
1398 		 * above or an external event, we must release our CCB
1399 		 * before releasing the probe lock on the peripheral.
1400 		 * The peripheral will only go away once the last lock
1401 		 * is removed, and we need it around for the CCB release
1402 		 * operation.
1403 		 */
1404 		xpt_release_ccb(done_ccb);
1405 		cam_periph_unlock(periph);
1406 		return;
1407 	}
1408 	case DA_CCB_WAITING:
1409 	{
1410 		/* Caller will release the CCB */
1411 		wakeup(&done_ccb->ccb_h.cbfcnp);
1412 		return;
1413 	}
1414 	case DA_CCB_DUMP:
1415 		/* No-op.  We're polling */
1416 		return;
1417 	default:
1418 		break;
1419 	}
1420 	xpt_release_ccb(done_ccb);
1421 }
1422 
1423 static int
1424 daerror(union ccb *ccb, u_int32_t cam_flags, u_int32_t sense_flags)
1425 {
1426 	struct da_softc	  *softc;
1427 	struct cam_periph *periph;
1428 
1429 	periph = xpt_path_periph(ccb->ccb_h.path);
1430 	softc = (struct da_softc *)periph->softc;
1431 
1432 	/*
1433 	 * XXX
1434 	 * Until we have a better way of doing pack validation,
1435 	 * don't treat UAs as errors.
1436 	 */
1437 	sense_flags |= SF_RETRY_UA;
1438 	return(cam_periph_error(ccb, cam_flags, sense_flags,
1439 				&softc->saved_ccb));
1440 }
1441 
1442 static void
1443 daprevent(struct cam_periph *periph, int action)
1444 {
1445 	struct	da_softc *softc;
1446 	union	ccb *ccb;
1447 	int	error;
1448 
1449 	softc = (struct da_softc *)periph->softc;
1450 
1451 	if (((action == PR_ALLOW)
1452 	  && (softc->flags & DA_FLAG_PACK_LOCKED) == 0)
1453 	 || ((action == PR_PREVENT)
1454 	  && (softc->flags & DA_FLAG_PACK_LOCKED) != 0)) {
1455 		return;
1456 	}
1457 
1458 	ccb = cam_periph_getccb(periph, /*priority*/1);
1459 
1460 	scsi_prevent(&ccb->csio,
1461 		     /*retries*/1,
1462 		     /*cbcfp*/dadone,
1463 		     MSG_SIMPLE_Q_TAG,
1464 		     action,
1465 		     SSD_FULL_SIZE,
1466 		     5000);
1467 
1468 	error = cam_periph_runccb(ccb, /*error_routine*/NULL, CAM_RETRY_SELTO,
1469 				  SF_RETRY_UA, &softc->device_stats);
1470 
1471 	if (error == 0) {
1472 		if (action == PR_ALLOW)
1473 			softc->flags &= ~DA_FLAG_PACK_LOCKED;
1474 		else
1475 			softc->flags |= DA_FLAG_PACK_LOCKED;
1476 	}
1477 
1478 	xpt_release_ccb(ccb);
1479 }
1480 
1481 static void
1482 dasetgeom(struct cam_periph *periph, struct scsi_read_capacity_data * rdcap)
1483 {
1484 	struct ccb_calc_geometry ccg;
1485 	struct da_softc *softc;
1486 	struct disk_params *dp;
1487 
1488 	softc = (struct da_softc *)periph->softc;
1489 
1490 	dp = &softc->params;
1491 	dp->secsize = scsi_4btoul(rdcap->length);
1492 	dp->sectors = scsi_4btoul(rdcap->addr) + 1;
1493 	/*
1494 	 * Have the controller provide us with a geometry
1495 	 * for this disk.  The only time the geometry
1496 	 * matters is when we boot and the controller
1497 	 * is the only one knowledgeable enough to come
1498 	 * up with something that will make this a bootable
1499 	 * device.
1500 	 */
1501 	xpt_setup_ccb(&ccg.ccb_h, periph->path, /*priority*/1);
1502 	ccg.ccb_h.func_code = XPT_CALC_GEOMETRY;
1503 	ccg.block_size = dp->secsize;
1504 	ccg.volume_size = dp->sectors;
1505 	ccg.heads = 0;
1506 	ccg.secs_per_track = 0;
1507 	ccg.cylinders = 0;
1508 	xpt_action((union ccb*)&ccg);
1509 	dp->heads = ccg.heads;
1510 	dp->secs_per_track = ccg.secs_per_track;
1511 	dp->cylinders = ccg.cylinders;
1512 }
1513 
1514 static void
1515 dasendorderedtag(void *arg)
1516 {
1517 	struct da_softc *softc;
1518 	int s;
1519 
1520 	for (softc = SLIST_FIRST(&softc_list);
1521 	     softc != NULL;
1522 	     softc = SLIST_NEXT(softc, links)) {
1523 		s = splsoftcam();
1524 		if ((softc->ordered_tag_count == 0)
1525 		 && ((softc->flags & DA_FLAG_WENT_IDLE) == 0)) {
1526 			softc->flags |= DA_FLAG_NEED_OTAG;
1527 		}
1528 		if (softc->device_stats.busy_count > 0)
1529 			softc->flags &= ~DA_FLAG_WENT_IDLE;
1530 
1531 		softc->ordered_tag_count = 0;
1532 		splx(s);
1533 	}
1534 	/* Queue us up again */
1535 	timeout(dasendorderedtag, NULL,
1536 		(DA_DEFAULT_TIMEOUT * hz) / DA_ORDEREDTAG_INTERVAL);
1537 }
1538 
1539 /*
1540  * Step through all DA peripheral drivers, and if the device is still open,
1541  * sync the disk cache to physical media.
1542  */
1543 static void
1544 dashutdown(void * arg, int howto)
1545 {
1546 	struct cam_periph *periph;
1547 	struct da_softc *softc;
1548 
1549 	TAILQ_FOREACH(periph, &dadriver.units, unit_links) {
1550 		union ccb ccb;
1551 		softc = (struct da_softc *)periph->softc;
1552 
1553 		/*
1554 		 * We only sync the cache if the drive is still open, and
1555 		 * if the drive is capable of it..
1556 		 */
1557 		if (((softc->flags & DA_FLAG_OPEN) == 0)
1558 		 || (softc->quirks & DA_Q_NO_SYNC_CACHE))
1559 			continue;
1560 
1561 		xpt_setup_ccb(&ccb.ccb_h, periph->path, /*priority*/1);
1562 
1563 		ccb.ccb_h.ccb_state = DA_CCB_DUMP;
1564 		scsi_synchronize_cache(&ccb.csio,
1565 				       /*retries*/1,
1566 				       /*cbfcnp*/dadone,
1567 				       MSG_SIMPLE_Q_TAG,
1568 				       /*begin_lba*/0, /* whole disk */
1569 				       /*lb_count*/0,
1570 				       SSD_FULL_SIZE,
1571 				       5 * 60 * 1000);
1572 
1573 		xpt_polled_action(&ccb);
1574 
1575 		if ((ccb.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
1576 			if (((ccb.ccb_h.status & CAM_STATUS_MASK) ==
1577 			     CAM_SCSI_STATUS_ERROR)
1578 			 && (ccb.csio.scsi_status == SCSI_STATUS_CHECK_COND)){
1579 				int error_code, sense_key, asc, ascq;
1580 
1581 				scsi_extract_sense(&ccb.csio.sense_data,
1582 						   &error_code, &sense_key,
1583 						   &asc, &ascq);
1584 
1585 				if (sense_key != SSD_KEY_ILLEGAL_REQUEST)
1586 					scsi_sense_print(&ccb.csio);
1587 			} else {
1588 				xpt_print_path(periph->path);
1589 				printf("Synchronize cache failed, status "
1590 				       "== 0x%x, scsi status == 0x%x\n",
1591 				       ccb.ccb_h.status, ccb.csio.scsi_status);
1592 			}
1593 		}
1594 
1595 		if ((ccb.ccb_h.status & CAM_DEV_QFRZN) != 0)
1596 			cam_release_devq(ccb.ccb_h.path,
1597 					 /*relsim_flags*/0,
1598 					 /*reduction*/0,
1599 					 /*timeout*/0,
1600 					 /*getcount_only*/0);
1601 
1602 	}
1603 }
1604 
1605 #else /* !_KERNEL */
1606 
1607 /*
1608  * XXX This is only left out of the kernel build to silence warnings.  If,
1609  * for some reason this function is used in the kernel, the ifdefs should
1610  * be moved so it is included both in the kernel and userland.
1611  */
1612 void
1613 scsi_format_unit(struct ccb_scsiio *csio, u_int32_t retries,
1614 		 void (*cbfcnp)(struct cam_periph *, union ccb *),
1615 		 u_int8_t tag_action, u_int8_t byte2, u_int16_t ileave,
1616 		 u_int8_t *data_ptr, u_int32_t dxfer_len, u_int8_t sense_len,
1617 		 u_int32_t timeout)
1618 {
1619 	struct scsi_format_unit *scsi_cmd;
1620 
1621 	scsi_cmd = (struct scsi_format_unit *)&csio->cdb_io.cdb_bytes;
1622 	scsi_cmd->opcode = FORMAT_UNIT;
1623 	scsi_cmd->byte2 = byte2;
1624 	scsi_ulto2b(ileave, scsi_cmd->interleave);
1625 
1626 	cam_fill_csio(csio,
1627 		      retries,
1628 		      cbfcnp,
1629 		      /*flags*/ (dxfer_len > 0) ? CAM_DIR_OUT : CAM_DIR_NONE,
1630 		      tag_action,
1631 		      data_ptr,
1632 		      dxfer_len,
1633 		      sense_len,
1634 		      sizeof(*scsi_cmd),
1635 		      timeout);
1636 }
1637 
1638 #endif /* _KERNEL */
1639