xref: /freebsd/sys/cam/scsi/scsi_da.c (revision 0640d357f29fb1c0daaaffadd0416c5981413afd)
1 /*
2  * Implementation of SCSI Direct Access Peripheral driver for CAM.
3  *
4  * Copyright (c) 1997 Justin T. Gibbs.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions, and the following disclaimer,
12  *    without modification, immediately at the beginning of the file.
13  * 2. The name of the author may not be used to endorse or promote products
14  *    derived from this software without specific prior written permission.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
20  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  *
28  *      $Id: scsi_da.c,v 1.11 1998/10/13 23:34:54 ken Exp $
29  */
30 
31 #include "opt_hw_wdog.h"
32 
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/kernel.h>
36 #include <sys/buf.h>
37 #include <sys/devicestat.h>
38 #include <sys/dkbad.h>
39 #include <sys/disklabel.h>
40 #include <sys/diskslice.h>
41 #include <sys/malloc.h>
42 #include <sys/conf.h>
43 
44 #include <machine/cons.h>
45 #include <machine/md_var.h>
46 
47 #include <vm/vm.h>
48 #include <vm/vm_prot.h>
49 #include <vm/pmap.h>
50 
51 #include <cam/cam.h>
52 #include <cam/cam_ccb.h>
53 #include <cam/cam_extend.h>
54 #include <cam/cam_periph.h>
55 #include <cam/cam_xpt_periph.h>
56 
57 #include <cam/scsi/scsi_message.h>
58 
59 typedef enum {
60 	DA_STATE_PROBE,
61 	DA_STATE_NORMAL
62 } da_state;
63 
64 typedef enum {
65 	DA_FLAG_PACK_INVALID	= 0x001,
66 	DA_FLAG_NEW_PACK	= 0x002,
67 	DA_FLAG_PACK_LOCKED	= 0x004,
68 	DA_FLAG_PACK_REMOVABLE	= 0x008,
69 	DA_FLAG_TAGGED_QUEUING	= 0x010,
70 	DA_FLAG_NEED_OTAG	= 0x020,
71 	DA_FLAG_WENT_IDLE	= 0x040,
72 	DA_FLAG_RETRY_UA	= 0x080,
73 	DA_FLAG_OPEN		= 0x100
74 } da_flags;
75 
76 typedef enum {
77 	DA_Q_NONE		= 0x00,
78 	DA_Q_NO_SYNC_CACHE	= 0x01
79 } da_quirks;
80 
81 typedef enum {
82 	DA_CCB_PROBE		= 0x01,
83 	DA_CCB_BUFFER_IO	= 0x02,
84 	DA_CCB_WAITING		= 0x03,
85 	DA_CCB_DUMP		= 0x04,
86 	DA_CCB_TYPE_MASK	= 0x0F,
87 	DA_CCB_RETRY_UA		= 0x10
88 } da_ccb_state;
89 
90 /* Offsets into our private area for storing information */
91 #define ccb_state	ppriv_field0
92 #define ccb_bp		ppriv_ptr1
93 
94 struct disk_params {
95 	u_int8_t  heads;
96 	u_int16_t cylinders;
97 	u_int8_t  secs_per_track;
98 	u_int32_t secsize;	/* Number of bytes/sector */
99 	u_int32_t sectors;	/* total number sectors */
100 };
101 
102 struct da_softc {
103 	struct	 buf_queue_head buf_queue;
104 	struct	 devstat device_stats;
105 	SLIST_ENTRY(da_softc) links;
106 	LIST_HEAD(, ccb_hdr) pending_ccbs;
107 	da_state state;
108 	da_flags flags;
109 	da_quirks quirks;
110 	int	 ordered_tag_count;
111 	struct	 disk_params params;
112 	struct	 diskslices *dk_slices;	/* virtual drives */
113 	union	 ccb saved_ccb;
114 };
115 
116 struct da_quirk_entry {
117 	struct scsi_inquiry_pattern inq_pat;
118 	da_quirks quirks;
119 };
120 
121 static struct da_quirk_entry da_quirk_table[] =
122 {
123 	{
124 		/*
125 		 * This particular Fujitsu drive doesn't like the
126 		 * synchronize cache command.
127 		 * Reported by: Tom Jackson <toj@gorilla.net>
128 		 */
129 		{T_DIRECT, SIP_MEDIA_FIXED, "FUJITSU", "M2954*", "*"},
130 		/*quirks*/ DA_Q_NO_SYNC_CACHE
131 
132 	},
133 	{
134 		/*
135 		 * This drive doesn't like the synchronize cache command
136 		 * either.  Reported by: Matthew Jacob <mjacob@feral.com>
137 		 * in NetBSD PR kern/6027, August 24, 1998.
138 		 */
139 		{T_DIRECT, SIP_MEDIA_FIXED, "MICROP", "2217*", "*"},
140 		/*quirks*/ DA_Q_NO_SYNC_CACHE
141 	},
142 	{
143 		/*
144 		 * Doesn't like the synchronize cache command.
145 		 * Reported by: Blaz Zupan <blaz@gold.amis.net>
146 		 */
147 		{T_DIRECT, SIP_MEDIA_FIXED, "NEC", "D3847*", "*"},
148 		/*quirks*/ DA_Q_NO_SYNC_CACHE
149 	}
150 };
151 
152 static	d_open_t	daopen;
153 static	d_read_t	daread;
154 static	d_write_t	dawrite;
155 static	d_close_t	daclose;
156 static	d_strategy_t	dastrategy;
157 static	d_ioctl_t	daioctl;
158 static	d_dump_t	dadump;
159 static	d_psize_t	dasize;
160 static	periph_init_t	dainit;
161 static	void		daasync(void *callback_arg, u_int32_t code,
162 				struct cam_path *path, void *arg);
163 static	periph_ctor_t	daregister;
164 static	periph_dtor_t	dacleanup;
165 static	periph_start_t	dastart;
166 static	periph_oninv_t	daoninvalidate;
167 static	void		dadone(struct cam_periph *periph,
168 			       union ccb *done_ccb);
169 static  int		daerror(union ccb *ccb, u_int32_t cam_flags,
170 				u_int32_t sense_flags);
171 static void		daprevent(struct cam_periph *periph, int action);
172 static void		dasetgeom(struct cam_periph *periph,
173 				  struct scsi_read_capacity_data * rdcap);
174 static timeout_t	dasendorderedtag;
175 static void		dashutdown(int howto, void *arg);
176 
177 #ifndef DA_DEFAULT_TIMEOUT
178 #define DA_DEFAULT_TIMEOUT 60	/* Timeout in seconds */
179 #endif
180 
181 /*
182  * DA_ORDEREDTAG_INTERVAL determines how often, relative
183  * to the default timeout, we check to see whether an ordered
184  * tagged transaction is appropriate to prevent simple tag
185  * starvation.  Since we'd like to ensure that there is at least
186  * 1/2 of the timeout length left for a starved transaction to
187  * complete after we've sent an ordered tag, we must poll at least
188  * four times in every timeout period.  This takes care of the worst
189  * case where a starved transaction starts during an interval that
190  * meets the requirement "don't send an ordered tag" test so it takes
191  * us two intervals to determine that a tag must be sent.
192  */
193 #ifndef DA_ORDEREDTAG_INTERVAL
194 #define DA_ORDEREDTAG_INTERVAL 4
195 #endif
196 
197 static struct periph_driver dadriver =
198 {
199 	dainit, "da",
200 	TAILQ_HEAD_INITIALIZER(dadriver.units), /* generation */ 0
201 };
202 
203 DATA_SET(periphdriver_set, dadriver);
204 
205 #define DA_CDEV_MAJOR 13
206 #define DA_BDEV_MAJOR 4
207 
208 /* For 2.2-stable support */
209 #ifndef D_DISK
210 #define D_DISK 0
211 #endif
212 
213 static struct cdevsw da_cdevsw =
214 {
215 	/*d_open*/	daopen,
216 	/*d_close*/	daclose,
217 	/*d_read*/	daread,
218 	/*d_write*/	dawrite,
219 	/*d_ioctl*/	daioctl,
220 	/*d_stop*/	nostop,
221 	/*d_reset*/	noreset,
222 	/*d_devtotty*/	nodevtotty,
223 	/*d_poll*/	seltrue,
224 	/*d_mmap*/	nommap,
225 	/*d_strategy*/	dastrategy,
226 	/*d_name*/	"da",
227 	/*d_spare*/	NULL,
228 	/*d_maj*/	-1,
229 	/*d_dump*/	dadump,
230 	/*d_psize*/	dasize,
231 	/*d_flags*/	D_DISK,
232 	/*d_maxio*/	0,
233 	/*b_maj*/	-1
234 };
235 
236 static SLIST_HEAD(,da_softc) softc_list;
237 static struct extend_array *daperiphs;
238 
239 static int
240 daopen(dev_t dev, int flags, int fmt, struct proc *p)
241 {
242 	struct cam_periph *periph;
243 	struct da_softc *softc;
244 	struct disklabel label;
245 	int unit;
246 	int part;
247 	int error;
248 	int s;
249 
250 	unit = dkunit(dev);
251 	part = dkpart(dev);
252 	periph = cam_extend_get(daperiphs, unit);
253 	if (periph == NULL)
254 		return (ENXIO);
255 
256 	softc = (struct da_softc *)periph->softc;
257 
258 	CAM_DEBUG(periph->path, CAM_DEBUG_TRACE,
259 	    ("daopen: dev=0x%x (unit %d , partition %d)\n", dev,
260 	     unit, part));
261 
262 	if ((error = cam_periph_lock(periph, PRIBIO|PCATCH)) != 0) {
263 		return (error); /* error code from tsleep */
264 	}
265 
266 	if ((softc->flags & DA_FLAG_OPEN) == 0) {
267 		if (cam_periph_acquire(periph) != CAM_REQ_CMP)
268 			return(ENXIO);
269 		softc->flags |= DA_FLAG_OPEN;
270 	}
271 
272 	s = splsoftcam();
273 	if ((softc->flags & DA_FLAG_PACK_INVALID) != 0) {
274 		/*
275 		 * If any partition is open, although the disk has
276 		 * been invalidated, disallow further opens.
277 		 */
278 		if (dsisopen(softc->dk_slices)) {
279 			splx(s);
280 			cam_periph_unlock(periph);
281 			return (ENXIO);
282 		}
283 
284 		/* Invalidate our pack information. */
285 		dsgone(&softc->dk_slices);
286 		softc->flags &= ~DA_FLAG_PACK_INVALID;
287 	}
288 	splx(s);
289 
290 	/* Do a read capacity */
291 	{
292 		struct scsi_read_capacity_data *rcap;
293 		union  ccb *ccb;
294 
295 		rcap = (struct scsi_read_capacity_data *)malloc(sizeof(*rcap),
296 								M_TEMP,
297 								M_WAITOK);
298 
299 		ccb = cam_periph_getccb(periph, /*priority*/1);
300 		scsi_read_capacity(&ccb->csio,
301 				   /*retries*/1,
302 				   /*cbfncp*/dadone,
303 				   MSG_SIMPLE_Q_TAG,
304 				   rcap,
305 				   SSD_FULL_SIZE,
306 				   /*timeout*/60000);
307 		ccb->ccb_h.ccb_bp = NULL;
308 
309 		error = cam_periph_runccb(ccb, daerror, /*cam_flags*/0,
310 					  /*sense_flags*/SF_RETRY_UA,
311 					  &softc->device_stats);
312 
313 		xpt_release_ccb(ccb);
314 
315 		if (error == 0) {
316 			dasetgeom(periph, rcap);
317 		}
318 
319 		free(rcap, M_TEMP);
320 	}
321 
322 	if (error == 0) {
323 		struct ccb_getdev cgd;
324 
325 		/* Build label for whole disk. */
326 		bzero(&label, sizeof(label));
327 		label.d_type = DTYPE_SCSI;
328 
329 		/*
330 		 * Grab the inquiry data to get the vendor and product names.
331 		 * Put them in the typename and packname for the label.
332 		 */
333 		xpt_setup_ccb(&cgd.ccb_h, periph->path, /*priority*/ 1);
334 		cgd.ccb_h.func_code = XPT_GDEV_TYPE;
335 		xpt_action((union ccb *)&cgd);
336 
337 		strncpy(label.d_typename, cgd.inq_data.vendor,
338 			min(SID_VENDOR_SIZE, sizeof(label.d_typename)));
339 		strncpy(label.d_packname, cgd.inq_data.product,
340 			min(SID_PRODUCT_SIZE, sizeof(label.d_packname)));
341 
342 		label.d_secsize = softc->params.secsize;
343 		label.d_nsectors = softc->params.secs_per_track;
344 		label.d_ntracks = softc->params.heads;
345 		label.d_ncylinders = softc->params.cylinders;
346 		label.d_secpercyl = softc->params.heads
347 				  * softc->params.secs_per_track;
348 		label.d_secperunit = softc->params.sectors;
349 
350 		if ((dsisopen(softc->dk_slices) == 0)
351 		    && ((softc->flags & DA_FLAG_PACK_REMOVABLE) != 0)) {
352 			daprevent(periph, PR_PREVENT);
353 		}
354 
355 		/* Initialize slice tables. */
356 		error = dsopen("da", dev, fmt, 0, &softc->dk_slices, &label,
357 			       dastrategy, (ds_setgeom_t *)NULL,
358 			       &da_cdevsw);
359 
360 		/*
361 		 * Check to see whether or not the blocksize is set yet.
362 		 * If it isn't, set it and then clear the blocksize
363 		 * unavailable flag for the device statistics.
364 		 */
365 		if ((softc->device_stats.flags & DEVSTAT_BS_UNAVAILABLE) != 0){
366 			softc->device_stats.block_size = softc->params.secsize;
367 			softc->device_stats.flags &= ~DEVSTAT_BS_UNAVAILABLE;
368 		}
369 	}
370 
371 	if (error != 0) {
372 		if ((dsisopen(softc->dk_slices) == 0)
373 		 && ((softc->flags & DA_FLAG_PACK_REMOVABLE) != 0)) {
374 			daprevent(periph, PR_ALLOW);
375 		}
376 	}
377 	cam_periph_unlock(periph);
378 	return (error);
379 }
380 
381 static int
382 daclose(dev_t dev, int flag, int fmt, struct proc *p)
383 {
384 	struct	cam_periph *periph;
385 	struct	da_softc *softc;
386 	int	unit;
387 	int	error;
388 
389 	unit = dkunit(dev);
390 	periph = cam_extend_get(daperiphs, unit);
391 	if (periph == NULL)
392 		return (ENXIO);
393 
394 	softc = (struct da_softc *)periph->softc;
395 
396 	if ((error = cam_periph_lock(periph, PRIBIO)) != 0) {
397 		return (error); /* error code from tsleep */
398 	}
399 
400 	dsclose(dev, fmt, softc->dk_slices);
401 	if (dsisopen(softc->dk_slices)) {
402 		cam_periph_unlock(periph);
403 		return (0);
404 	}
405 
406 	if ((softc->quirks & DA_Q_NO_SYNC_CACHE) == 0) {
407 		union	ccb *ccb;
408 
409 		ccb = cam_periph_getccb(periph, /*priority*/1);
410 
411 		scsi_synchronize_cache(&ccb->csio,
412 				       /*retries*/1,
413 				       /*cbfcnp*/dadone,
414 				       MSG_SIMPLE_Q_TAG,
415 				       /*begin_lba*/0,/* Cover the whole disk */
416 				       /*lb_count*/0,
417 				       SSD_FULL_SIZE,
418 				       5 * 60 * 1000);
419 
420 		/* Ignore any errors */
421 		cam_periph_runccb(ccb, /*error_routine*/NULL, /*cam_flags*/0,
422 				  /*sense_flags*/0, &softc->device_stats);
423 
424 		if ((ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
425 			cam_release_devq(ccb->ccb_h.path,
426 					 /*relsim_flags*/0,
427 					 /*reduction*/0,
428 					 /*timeout*/0,
429 					 /*getcount_only*/0);
430 
431 		xpt_release_ccb(ccb);
432 
433 	}
434 
435 	if ((softc->flags & DA_FLAG_PACK_REMOVABLE) != 0) {
436 		daprevent(periph, PR_ALLOW);
437 		/*
438 		 * If we've got removeable media, mark the blocksize as
439 		 * unavailable, since it could change when new media is
440 		 * inserted.
441 		 */
442 		softc->device_stats.flags |= DEVSTAT_BS_UNAVAILABLE;
443 	}
444 
445 	softc->flags &= ~DA_FLAG_OPEN;
446 	cam_periph_unlock(periph);
447 	cam_periph_release(periph);
448 	return (0);
449 }
450 
451 static int
452 daread(dev_t dev, struct uio *uio, int ioflag)
453 {
454 	return(physio(dastrategy, NULL, dev, 1, minphys, uio));
455 }
456 
457 static int
458 dawrite(dev_t dev, struct uio *uio, int ioflag)
459 {
460 	return(physio(dastrategy, NULL, dev, 0, minphys, uio));
461 }
462 
463 /*
464  * Actually translate the requested transfer into one the physical driver
465  * can understand.  The transfer is described by a buf and will include
466  * only one physical transfer.
467  */
468 static void
469 dastrategy(struct buf *bp)
470 {
471 	struct cam_periph *periph;
472 	struct da_softc *softc;
473 	u_int  unit;
474 	u_int  part;
475 	int    s;
476 
477 	unit = dkunit(bp->b_dev);
478 	part = dkpart(bp->b_dev);
479 	periph = cam_extend_get(daperiphs, unit);
480 	if (periph == NULL) {
481 		bp->b_error = ENXIO;
482 		goto bad;
483 	}
484 	softc = (struct da_softc *)periph->softc;
485 #if 0
486 	/*
487 	 * check it's not too big a transfer for our adapter
488 	 */
489 	scsi_minphys(bp,&sd_switch);
490 #endif
491 
492 	/*
493 	 * Do bounds checking, adjust transfer, set b_cylin and b_pbklno.
494 	 */
495 	if (dscheck(bp, softc->dk_slices) <= 0)
496 		goto done;
497 
498 	/*
499 	 * Mask interrupts so that the pack cannot be invalidated until
500 	 * after we are in the queue.  Otherwise, we might not properly
501 	 * clean up one of the buffers.
502 	 */
503 	s = splbio();
504 
505 	/*
506 	 * If the device has been made invalid, error out
507 	 */
508 	if ((softc->flags & DA_FLAG_PACK_INVALID)) {
509 		splx(s);
510 		bp->b_error = ENXIO;
511 		goto bad;
512 	}
513 
514 	/*
515 	 * Place it in the queue of disk activities for this disk
516 	 */
517 	bufqdisksort(&softc->buf_queue, bp);
518 
519 	splx(s);
520 
521 	/*
522 	 * Schedule ourselves for performing the work.
523 	 */
524 	xpt_schedule(periph, /* XXX priority */1);
525 
526 	return;
527 bad:
528 	bp->b_flags |= B_ERROR;
529 done:
530 
531 	/*
532 	 * Correctly set the buf to indicate a completed xfer
533 	 */
534 	bp->b_resid = bp->b_bcount;
535 	biodone(bp);
536 	return;
537 }
538 
539 /* For 2.2-stable support */
540 #ifndef ENOIOCTL
541 #define ENOIOCTL -1
542 #endif
543 
544 static int
545 daioctl(dev_t dev, u_long cmd, caddr_t addr, int flag, struct proc *p)
546 {
547 	struct cam_periph *periph;
548 	struct da_softc *softc;
549 	int unit;
550 	int error;
551 
552 	unit = dkunit(dev);
553 	periph = cam_extend_get(daperiphs, unit);
554 	if (periph == NULL)
555 		return (ENXIO);
556 
557 	softc = (struct da_softc *)periph->softc;
558 
559 	CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("daioctl\n"));
560 
561 	if (cmd == DIOCSBAD)
562 		return (EINVAL);	/* XXX */
563 
564 	if ((error = cam_periph_lock(periph, PRIBIO|PCATCH)) != 0) {
565 		return (error); /* error code from tsleep */
566 	}
567 
568 	error = dsioctl("da", dev, cmd, addr, flag, &softc->dk_slices,
569 			dastrategy, (ds_setgeom_t *)NULL);
570 
571 	if (error == ENOIOCTL)
572 		error = cam_periph_ioctl(periph, cmd, addr, daerror);
573 
574 	cam_periph_unlock(periph);
575 
576 	return (error);
577 }
578 
579 static int
580 dadump(dev_t dev)
581 {
582 	struct	cam_periph *periph;
583 	struct	da_softc *softc;
584 	struct	disklabel *lp;
585 	u_int	unit;
586 	u_int	part;
587 	long	num;		/* number of sectors to write */
588 	long	blkoff;
589 	long	blknum;
590 	long	blkcnt;
591 	char	*addr;
592 	static	int dadoingadump = 0;
593 	struct	ccb_scsiio csio;
594 
595 	/* toss any characters present prior to dump */
596 	while (cncheckc() != -1)
597 		;
598 
599 	unit = dkunit(dev);
600 	part = dkpart(dev);
601 	periph = cam_extend_get(daperiphs, unit);
602 	if (periph == NULL) {
603 		return (ENXIO);
604 	}
605 	softc = (struct da_softc *)periph->softc;
606 
607 	if ((softc->flags & DA_FLAG_PACK_INVALID) != 0
608 	 || (softc->dk_slices == NULL)
609 	 || (lp = dsgetlabel(dev, softc->dk_slices)) == NULL)
610 		return (ENXIO);
611 
612 	/* Size of memory to dump, in disk sectors. */
613 	/* XXX Fix up for non DEV_BSIZE sectors!!! */
614 	num = (u_long)Maxmem * PAGE_SIZE / softc->params.secsize;
615 
616 	blkoff = lp->d_partitions[part].p_offset;
617 	blkoff += softc->dk_slices->dss_slices[dkslice(dev)].ds_offset;
618 
619 	/* check transfer bounds against partition size */
620 	if ((dumplo < 0) || ((dumplo + num) > lp->d_partitions[part].p_size))
621 		return (EINVAL);
622 
623 	if (dadoingadump != 0)
624 		return (EFAULT);
625 
626 	dadoingadump = 1;
627 
628 	blknum = dumplo + blkoff;
629 	blkcnt = PAGE_SIZE / softc->params.secsize;
630 
631 	addr = (char *)0;	/* starting address */
632 
633 	while (num > 0) {
634 
635 		if (is_physical_memory((vm_offset_t)addr)) {
636 			pmap_enter(kernel_pmap, (vm_offset_t)CADDR1,
637 				   trunc_page((vm_offset_t)addr), VM_PROT_READ, TRUE);
638 		} else {
639 			pmap_enter(kernel_pmap, (vm_offset_t)CADDR1,
640 				   trunc_page(0), VM_PROT_READ, TRUE);
641 		}
642 
643 		xpt_setup_ccb(&csio.ccb_h, periph->path, /*priority*/1);
644 		csio.ccb_h.ccb_state = DA_CCB_DUMP;
645 		scsi_read_write(&csio,
646 				/*retries*/1,
647 				dadone,
648 				MSG_ORDERED_Q_TAG,
649 				/*read*/FALSE,
650 				/*byte2*/0,
651 				/*minimum_cmd_size*/ 6,
652 				blknum,
653 				blkcnt,
654 				/*data_ptr*/CADDR1,
655 				/*dxfer_len*/blkcnt * softc->params.secsize,
656 				/*sense_len*/SSD_FULL_SIZE,
657 				DA_DEFAULT_TIMEOUT * 1000);
658 		xpt_polled_action((union ccb *)&csio);
659 
660 		if ((csio.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
661 			printf("Aborting dump due to I/O error.\n");
662 			if ((csio.ccb_h.status & CAM_STATUS_MASK) ==
663 			     CAM_SCSI_STATUS_ERROR)
664 				scsi_sense_print(&csio);
665 			else
666 				printf("status == 0x%x, scsi status == 0x%x\n",
667 				       csio.ccb_h.status, csio.scsi_status);
668 			return(EIO);
669 		}
670 
671 		if ((intptr_t)addr % (1024 * 1024) == 0) {
672 #ifdef	HW_WDOG
673 			if (wdog_tickler)
674 				(*wdog_tickler)();
675 #endif /* HW_WDOG */
676 			/* Count in MB of data left to write */
677 			printf("%ld ", (num  * softc->params.secsize)
678 				     / (1024 * 1024));
679 		}
680 
681 		/* update block count */
682 		num -= blkcnt;
683 		blknum += blkcnt;
684 		(long)addr += blkcnt * softc->params.secsize;
685 
686 		/* operator aborting dump? */
687 		if (cncheckc() != -1)
688 			return (EINTR);
689 	}
690 
691 	/*
692 	 * Sync the disk cache contents to the physical media.
693 	 */
694 	if ((softc->quirks & DA_Q_NO_SYNC_CACHE) == 0) {
695 
696 		xpt_setup_ccb(&csio.ccb_h, periph->path, /*priority*/1);
697 		csio.ccb_h.ccb_state = DA_CCB_DUMP;
698 		scsi_synchronize_cache(&csio,
699 				       /*retries*/1,
700 				       /*cbfcnp*/dadone,
701 				       MSG_SIMPLE_Q_TAG,
702 				       /*begin_lba*/0,/* Cover the whole disk */
703 				       /*lb_count*/0,
704 				       SSD_FULL_SIZE,
705 				       5 * 60 * 1000);
706 		xpt_polled_action((union ccb *)&csio);
707 
708 		if ((csio.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
709 			if ((csio.ccb_h.status & CAM_STATUS_MASK) ==
710 			     CAM_SCSI_STATUS_ERROR)
711 				scsi_sense_print(&csio);
712 			else {
713 				xpt_print_path(periph->path);
714 				printf("Synchronize cache failed, status "
715 				       "== 0x%x, scsi status == 0x%x\n",
716 				       csio.ccb_h.status, csio.scsi_status);
717 			}
718 		}
719 	}
720 	return (0);
721 }
722 
723 static int
724 dasize(dev_t dev)
725 {
726 	struct cam_periph *periph;
727 	struct da_softc *softc;
728 
729 	periph = cam_extend_get(daperiphs, dkunit(dev));
730 	if (periph == NULL)
731 		return (ENXIO);
732 
733 	softc = (struct da_softc *)periph->softc;
734 
735 	return (dssize(dev, &softc->dk_slices, daopen, daclose));
736 }
737 
738 static void
739 dainit(void)
740 {
741 	cam_status status;
742 	struct cam_path *path;
743 
744 	/*
745 	 * Create our extend array for storing the devices we attach to.
746 	 */
747 	daperiphs = cam_extend_new();
748 	SLIST_INIT(&softc_list);
749 	if (daperiphs == NULL) {
750 		printf("da: Failed to alloc extend array!\n");
751 		return;
752 	}
753 
754 	/*
755 	 * Install a global async callback.  This callback will
756 	 * receive async callbacks like "new device found".
757 	 */
758 	status = xpt_create_path(&path, /*periph*/NULL, CAM_XPT_PATH_ID,
759 				 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
760 
761 	if (status == CAM_REQ_CMP) {
762 		struct ccb_setasync csa;
763 
764                 xpt_setup_ccb(&csa.ccb_h, path, /*priority*/5);
765                 csa.ccb_h.func_code = XPT_SASYNC_CB;
766                 csa.event_enable = AC_FOUND_DEVICE;
767                 csa.callback = daasync;
768                 csa.callback_arg = NULL;
769                 xpt_action((union ccb *)&csa);
770 		status = csa.ccb_h.status;
771                 xpt_free_path(path);
772         }
773 
774 	if (status != CAM_REQ_CMP) {
775 		printf("da: Failed to attach master async callback "
776 		       "due to status 0x%x!\n", status);
777 	} else {
778 		int err;
779 
780 		/* If we were successfull, register our devsw */
781 		cdevsw_add_generic(DA_BDEV_MAJOR, DA_CDEV_MAJOR, &da_cdevsw);
782 
783 		/*
784 		 * Schedule a periodic event to occasioanly send an
785 		 * ordered tag to a device.
786 		 */
787 		timeout(dasendorderedtag, NULL,
788 			(DA_DEFAULT_TIMEOUT * hz) / DA_ORDEREDTAG_INTERVAL);
789 
790 		if ((err = at_shutdown(dashutdown, NULL,
791 				       SHUTDOWN_POST_SYNC)) != 0)
792 			printf("dainit: at_shutdown returned %d!\n", err);
793 	}
794 }
795 
796 static void
797 daoninvalidate(struct cam_periph *periph)
798 {
799 	int s;
800 	struct da_softc *softc;
801 	struct buf *q_bp;
802 	struct ccb_setasync csa;
803 
804 	softc = (struct da_softc *)periph->softc;
805 
806 	/*
807 	 * De-register any async callbacks.
808 	 */
809 	xpt_setup_ccb(&csa.ccb_h, periph->path,
810 		      /* priority */ 5);
811 	csa.ccb_h.func_code = XPT_SASYNC_CB;
812 	csa.event_enable = 0;
813 	csa.callback = daasync;
814 	csa.callback_arg = periph;
815 	xpt_action((union ccb *)&csa);
816 
817 	softc->flags |= DA_FLAG_PACK_INVALID;
818 
819 	/*
820 	 * Although the oninvalidate() routines are always called at
821 	 * splsoftcam, we need to be at splbio() here to keep the buffer
822 	 * queue from being modified while we traverse it.
823 	 */
824 	s = splbio();
825 
826 	/*
827 	 * Return all queued I/O with ENXIO.
828 	 * XXX Handle any transactions queued to the card
829 	 *     with XPT_ABORT_CCB.
830 	 */
831 	while ((q_bp = bufq_first(&softc->buf_queue)) != NULL){
832 		bufq_remove(&softc->buf_queue, q_bp);
833 		q_bp->b_resid = q_bp->b_bcount;
834 		q_bp->b_error = ENXIO;
835 		q_bp->b_flags |= B_ERROR;
836 		biodone(q_bp);
837 	}
838 	splx(s);
839 
840 	SLIST_REMOVE(&softc_list, softc, da_softc, links);
841 
842 	xpt_print_path(periph->path);
843 	printf("lost device\n");
844 }
845 
846 static void
847 dacleanup(struct cam_periph *periph)
848 {
849 	struct da_softc *softc;
850 
851 	softc = (struct da_softc *)periph->softc;
852 
853 	devstat_remove_entry(&softc->device_stats);
854 	cam_extend_release(daperiphs, periph->unit_number);
855 	xpt_print_path(periph->path);
856 	printf("removing device entry\n");
857 	free(softc, M_DEVBUF);
858 }
859 
860 static void
861 daasync(void *callback_arg, u_int32_t code,
862 	struct cam_path *path, void *arg)
863 {
864 	struct cam_periph *periph;
865 
866 	periph = (struct cam_periph *)callback_arg;
867 	switch (code) {
868 	case AC_FOUND_DEVICE:
869 	{
870 		struct ccb_getdev *cgd;
871 		cam_status status;
872 
873 		cgd = (struct ccb_getdev *)arg;
874 
875 		if ((cgd->pd_type != T_DIRECT) && (cgd->pd_type != T_OPTICAL))
876 			break;
877 
878 		/*
879 		 * Allocate a peripheral instance for
880 		 * this device and start the probe
881 		 * process.
882 		 */
883 		status = cam_periph_alloc(daregister, daoninvalidate,
884 					  dacleanup, dastart,
885 					  "da", CAM_PERIPH_BIO,
886 					  cgd->ccb_h.path, daasync,
887 					  AC_FOUND_DEVICE, cgd);
888 
889 		if (status != CAM_REQ_CMP
890 		 && status != CAM_REQ_INPROG)
891 			printf("daasync: Unable to attach to new device "
892 				"due to status 0x%x\n", status);
893 		break;
894 	}
895 	case AC_LOST_DEVICE:
896 		cam_periph_invalidate(periph);
897 		break;
898 	case AC_SENT_BDR:
899 	case AC_BUS_RESET:
900 	{
901 		struct da_softc *softc;
902 		struct ccb_hdr *ccbh;
903 		int s;
904 
905 		softc = (struct da_softc *)periph->softc;
906 		s = splsoftcam();
907 		/*
908 		 * Don't fail on the expected unit attention
909 		 * that will occur.
910 		 */
911 		softc->flags |= DA_FLAG_RETRY_UA;
912 		for (ccbh = LIST_FIRST(&softc->pending_ccbs);
913 		     ccbh != NULL; ccbh = LIST_NEXT(ccbh, periph_links.le))
914 			ccbh->ccb_state |= DA_CCB_RETRY_UA;
915 		splx(s);
916 		break;
917 	}
918 	case AC_TRANSFER_NEG:
919 	case AC_SCSI_AEN:
920 	case AC_UNSOL_RESEL:
921 	default:
922 		break;
923 	}
924 }
925 
926 static cam_status
927 daregister(struct cam_periph *periph, void *arg)
928 {
929 	int s;
930 	struct da_softc *softc;
931 	struct ccb_setasync csa;
932 	struct ccb_getdev *cgd;
933 	caddr_t match;
934 
935 	cgd = (struct ccb_getdev *)arg;
936 	if (periph == NULL) {
937 		printf("daregister: periph was NULL!!\n");
938 		return(CAM_REQ_CMP_ERR);
939 	}
940 
941 	if (cgd == NULL) {
942 		printf("daregister: no getdev CCB, can't register device\n");
943 		return(CAM_REQ_CMP_ERR);
944 	}
945 
946 	softc = (struct da_softc *)malloc(sizeof(*softc),M_DEVBUF,M_NOWAIT);
947 
948 	if (softc == NULL) {
949 		printf("daregister: Unable to probe new device. "
950 		       "Unable to allocate softc\n");
951 		return(CAM_REQ_CMP_ERR);
952 	}
953 
954 	bzero(softc, sizeof(*softc));
955 	LIST_INIT(&softc->pending_ccbs);
956 	softc->state = DA_STATE_PROBE;
957 	bufq_init(&softc->buf_queue);
958 	if (SID_IS_REMOVABLE(&cgd->inq_data))
959 		softc->flags |= DA_FLAG_PACK_REMOVABLE;
960 	if ((cgd->inq_data.flags & SID_CmdQue) != 0)
961 		softc->flags |= DA_FLAG_TAGGED_QUEUING;
962 
963 	periph->softc = softc;
964 
965 	cam_extend_set(daperiphs, periph->unit_number, periph);
966 
967 	/*
968 	 * See if this device has any quirks.
969 	 */
970 	match = cam_quirkmatch((caddr_t)&cgd->inq_data,
971 			       (caddr_t)da_quirk_table,
972 			       sizeof(da_quirk_table)/sizeof(*da_quirk_table),
973 			       sizeof(*da_quirk_table), scsi_inquiry_match);
974 
975 	if (match != NULL)
976 		softc->quirks = ((struct da_quirk_entry *)match)->quirks;
977 	else
978 		softc->quirks = DA_Q_NONE;
979 
980 	/*
981 	 * Block our timeout handler while we
982 	 * add this softc to the dev list.
983 	 */
984 	s = splsoftclock();
985 	SLIST_INSERT_HEAD(&softc_list, softc, links);
986 	splx(s);
987 
988 	/*
989 	 * The DA driver supports a blocksize, but
990 	 * we don't know the blocksize until we do
991 	 * a read capacity.  So, set a flag to
992 	 * indicate that the blocksize is
993 	 * unavailable right now.  We'll clear the
994 	 * flag as soon as we've done a read capacity.
995 	 */
996 	devstat_add_entry(&softc->device_stats, "da",
997 			  periph->unit_number, 0,
998 	  		  DEVSTAT_BS_UNAVAILABLE,
999 			  cgd->pd_type | DEVSTAT_TYPE_IF_SCSI);
1000 
1001 	/*
1002 	 * Add async callbacks for bus reset and
1003 	 * bus device reset calls.  I don't bother
1004 	 * checking if this fails as, in most cases,
1005 	 * the system will function just fine without
1006 	 * them and the only alternative would be to
1007 	 * not attach the device on failure.
1008 	 */
1009 	xpt_setup_ccb(&csa.ccb_h, periph->path, /*priority*/5);
1010 	csa.ccb_h.func_code = XPT_SASYNC_CB;
1011 	csa.event_enable = AC_SENT_BDR | AC_BUS_RESET | AC_LOST_DEVICE;
1012 	csa.callback = daasync;
1013 	csa.callback_arg = periph;
1014 	xpt_action((union ccb *)&csa);
1015 	/*
1016 	 * Lock this peripheral until we are setup.
1017 	 * This first call can't block
1018 	 */
1019 	(void)cam_periph_lock(periph, PRIBIO);
1020 	xpt_schedule(periph, /*priority*/5);
1021 
1022 	return(CAM_REQ_CMP);
1023 }
1024 
1025 static void
1026 dastart(struct cam_periph *periph, union ccb *start_ccb)
1027 {
1028 	struct da_softc *softc;
1029 
1030 	softc = (struct da_softc *)periph->softc;
1031 
1032 
1033 	switch (softc->state) {
1034 	case DA_STATE_NORMAL:
1035 	{
1036 		/* Pull a buffer from the queue and get going on it */
1037 		struct buf *bp;
1038 		int s;
1039 
1040 		/*
1041 		 * See if there is a buf with work for us to do..
1042 		 */
1043 		s = splbio();
1044 		bp = bufq_first(&softc->buf_queue);
1045 		if (periph->immediate_priority <= periph->pinfo.priority) {
1046 			CAM_DEBUG_PRINT(CAM_DEBUG_SUBTRACE,
1047 					("queuing for immediate ccb\n"));
1048 			start_ccb->ccb_h.ccb_state = DA_CCB_WAITING;
1049 			SLIST_INSERT_HEAD(&periph->ccb_list, &start_ccb->ccb_h,
1050 					  periph_links.sle);
1051 			periph->immediate_priority = CAM_PRIORITY_NONE;
1052 			splx(s);
1053 			wakeup(&periph->ccb_list);
1054 		} else if (bp == NULL) {
1055 			splx(s);
1056 			xpt_release_ccb(start_ccb);
1057 		} else {
1058 			int oldspl;
1059 			u_int8_t tag_code;
1060 
1061 			bufq_remove(&softc->buf_queue, bp);
1062 
1063 			devstat_start_transaction(&softc->device_stats);
1064 
1065 			if ((bp->b_flags & B_ORDERED) != 0
1066 			 || (softc->flags & DA_FLAG_NEED_OTAG) != 0) {
1067 				softc->flags &= ~DA_FLAG_NEED_OTAG;
1068 				softc->ordered_tag_count++;
1069 				tag_code = MSG_ORDERED_Q_TAG;
1070 			} else {
1071 				tag_code = MSG_SIMPLE_Q_TAG;
1072 			}
1073 			scsi_read_write(&start_ccb->csio,
1074 					/*retries*/4,
1075 					dadone,
1076 					tag_code,
1077 					bp->b_flags & B_READ,
1078 					/*byte2*/0,
1079 					/*minimum_cmd_size*/ 6,
1080 					bp->b_pblkno,
1081 					bp->b_bcount / softc->params.secsize,
1082 					bp->b_data,
1083 					bp->b_bcount,
1084 					/*sense_len*/SSD_FULL_SIZE,
1085 					DA_DEFAULT_TIMEOUT * 1000);
1086 			start_ccb->ccb_h.ccb_state = DA_CCB_BUFFER_IO;
1087 
1088 			/*
1089 			 * Block out any asyncronous callbacks
1090 			 * while we touch the pending ccb list.
1091 			 */
1092 			oldspl = splcam();
1093 			LIST_INSERT_HEAD(&softc->pending_ccbs,
1094 					 &start_ccb->ccb_h, periph_links.le);
1095 			splx(oldspl);
1096 
1097 			/* We expect a unit attention from this device */
1098 			if ((softc->flags & DA_FLAG_RETRY_UA) != 0) {
1099 				start_ccb->ccb_h.ccb_state |= DA_CCB_RETRY_UA;
1100 				softc->flags &= ~DA_FLAG_RETRY_UA;
1101 			}
1102 
1103 			start_ccb->ccb_h.ccb_bp = bp;
1104 			bp = bufq_first(&softc->buf_queue);
1105 			splx(s);
1106 
1107 			xpt_action(start_ccb);
1108 		}
1109 
1110 		if (bp != NULL) {
1111 			/* Have more work to do, so ensure we stay scheduled */
1112 			xpt_schedule(periph, /* XXX priority */1);
1113 		}
1114 		break;
1115 	}
1116 	case DA_STATE_PROBE:
1117 	{
1118 		struct ccb_scsiio *csio;
1119 		struct scsi_read_capacity_data *rcap;
1120 
1121 		rcap = (struct scsi_read_capacity_data *)malloc(sizeof(*rcap),
1122 								M_TEMP,
1123 								M_NOWAIT);
1124 		if (rcap == NULL) {
1125 			printf("dastart: Couldn't malloc read_capacity data\n");
1126 			/* da_free_periph??? */
1127 			break;
1128 		}
1129 		csio = &start_ccb->csio;
1130 		scsi_read_capacity(csio,
1131 				   /*retries*/4,
1132 				   dadone,
1133 				   MSG_SIMPLE_Q_TAG,
1134 				   rcap,
1135 				   SSD_FULL_SIZE,
1136 				   /*timeout*/5000);
1137 		start_ccb->ccb_h.ccb_bp = NULL;
1138 		start_ccb->ccb_h.ccb_state = DA_CCB_PROBE;
1139 		xpt_action(start_ccb);
1140 		break;
1141 	}
1142 	}
1143 }
1144 
1145 
1146 static void
1147 dadone(struct cam_periph *periph, union ccb *done_ccb)
1148 {
1149 	struct da_softc *softc;
1150 	struct ccb_scsiio *csio;
1151 
1152 	softc = (struct da_softc *)periph->softc;
1153 	csio = &done_ccb->csio;
1154 	switch (csio->ccb_h.ccb_state & DA_CCB_TYPE_MASK) {
1155 	case DA_CCB_BUFFER_IO:
1156 	{
1157 		struct buf *bp;
1158 		int    oldspl;
1159 
1160 		bp = (struct buf *)done_ccb->ccb_h.ccb_bp;
1161 		if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
1162 			int error;
1163 			int s;
1164 			int sf;
1165 
1166 			if ((csio->ccb_h.ccb_state & DA_CCB_RETRY_UA) != 0)
1167 				sf = SF_RETRY_UA;
1168 			else
1169 				sf = 0;
1170 
1171 			if ((error = daerror(done_ccb, 0, sf)) == ERESTART) {
1172 				/*
1173 				 * A retry was scheuled, so
1174 				 * just return.
1175 				 */
1176 				return;
1177 			}
1178 			if (error != 0) {
1179 				struct buf *q_bp;
1180 
1181 				s = splbio();
1182 
1183 				if (error == ENXIO) {
1184 					/*
1185 					 * Catastrophic error.  Mark our pack as
1186 					 * invalid.
1187 					 */
1188 					/* XXX See if this is really a media
1189 					 *     change first.
1190 					 */
1191 					xpt_print_path(periph->path);
1192 					printf("Invalidating pack\n");
1193 					softc->flags |= DA_FLAG_PACK_INVALID;
1194 				}
1195 
1196 				/*
1197 				 * return all queued I/O with EIO, so that
1198 				 * the client can retry these I/Os in the
1199 				 * proper order should it attempt to recover.
1200 				 */
1201 				while ((q_bp = bufq_first(&softc->buf_queue))
1202 					!= NULL) {
1203 					bufq_remove(&softc->buf_queue, q_bp);
1204 					q_bp->b_resid = q_bp->b_bcount;
1205 					q_bp->b_error = EIO;
1206 					q_bp->b_flags |= B_ERROR;
1207 					biodone(q_bp);
1208 				}
1209 				splx(s);
1210 				bp->b_error = error;
1211 				bp->b_resid = bp->b_bcount;
1212 				bp->b_flags |= B_ERROR;
1213 			} else {
1214 				bp->b_resid = csio->resid;
1215 				bp->b_error = 0;
1216 				if (bp->b_resid != 0) {
1217 					/* Short transfer ??? */
1218 					bp->b_flags |= B_ERROR;
1219 				}
1220 			}
1221 			if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
1222 				cam_release_devq(done_ccb->ccb_h.path,
1223 						 /*relsim_flags*/0,
1224 						 /*reduction*/0,
1225 						 /*timeout*/0,
1226 						 /*getcount_only*/0);
1227 		} else {
1228 			bp->b_resid = csio->resid;
1229 			if (csio->resid > 0)
1230 				bp->b_flags |= B_ERROR;
1231 		}
1232 
1233 		/*
1234 		 * Block out any asyncronous callbacks
1235 		 * while we touch the pending ccb list.
1236 		 */
1237 		oldspl = splcam();
1238 		LIST_REMOVE(&done_ccb->ccb_h, periph_links.le);
1239 		splx(oldspl);
1240 
1241 		devstat_end_transaction(&softc->device_stats,
1242 					bp->b_bcount - bp->b_resid,
1243 					done_ccb->csio.tag_action & 0xf,
1244 					(bp->b_flags & B_READ) ? DEVSTAT_READ
1245 							       : DEVSTAT_WRITE);
1246 
1247 		if (softc->device_stats.busy_count == 0)
1248 			softc->flags |= DA_FLAG_WENT_IDLE;
1249 
1250 		biodone(bp);
1251 		break;
1252 	}
1253 	case DA_CCB_PROBE:
1254 	{
1255 		struct	   scsi_read_capacity_data *rdcap;
1256 		char	   announce_buf[80];
1257 
1258 		rdcap = (struct scsi_read_capacity_data *)csio->data_ptr;
1259 
1260 		if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
1261 			struct disk_params *dp;
1262 
1263 			dasetgeom(periph, rdcap);
1264 			dp = &softc->params;
1265 			sprintf(announce_buf,
1266 				"%ldMB (%d %d byte sectors: %dH %dS/T %dC)",
1267 				dp->sectors / ((1024L * 1024L) / dp->secsize),
1268 				dp->sectors, dp->secsize, dp->heads,
1269 				dp->secs_per_track, dp->cylinders);
1270 		} else {
1271 			int	error;
1272 
1273 			/*
1274 			 * Retry any UNIT ATTENTION type errors.  They
1275 			 * are expected at boot.
1276 			 */
1277 			error = daerror(done_ccb, 0, SF_RETRY_UA|SF_NO_PRINT);
1278 			if (error == ERESTART) {
1279 				/*
1280 				 * A retry was scheuled, so
1281 				 * just return.
1282 				 */
1283 				return;
1284 			} else if (error != 0) {
1285 				struct scsi_sense_data *sense;
1286 				int asc, ascq;
1287 				int sense_key, error_code;
1288 				int have_sense;
1289 				cam_status status;
1290 				struct ccb_getdev cgd;
1291 
1292 				/* Don't wedge this device's queue */
1293 				cam_release_devq(done_ccb->ccb_h.path,
1294 						 /*relsim_flags*/0,
1295 						 /*reduction*/0,
1296 						 /*timeout*/0,
1297 						 /*getcount_only*/0);
1298 
1299 				status = done_ccb->ccb_h.status;
1300 
1301 				xpt_setup_ccb(&cgd.ccb_h,
1302 					      done_ccb->ccb_h.path,
1303 					      /* priority */ 1);
1304 				cgd.ccb_h.func_code = XPT_GDEV_TYPE;
1305 				xpt_action((union ccb *)&cgd);
1306 
1307 				if (((csio->ccb_h.flags & CAM_SENSE_PHYS) != 0)
1308 				 || ((csio->ccb_h.flags & CAM_SENSE_PTR) != 0)
1309 				 || ((status & CAM_AUTOSNS_VALID) == 0))
1310 					have_sense = FALSE;
1311 				else
1312 					have_sense = TRUE;
1313 
1314 				if (have_sense) {
1315 					sense = &csio->sense_data;
1316 					scsi_extract_sense(sense, &error_code,
1317 							   &sense_key,
1318 							   &asc, &ascq);
1319 				}
1320 				/*
1321 				 * With removable media devices, we expect
1322 				 * 0x3a (Medium not present) errors, since not
1323 				 * everyone leaves a disk in the drive.  If
1324 				 * the error is anything else, though, we
1325 				 * shouldn't attach.
1326 				 */
1327 				if ((have_sense) && (asc == 0x3a)
1328 				 && (error_code == SSD_CURRENT_ERROR))
1329 					sprintf(announce_buf,
1330 						"Attempt to query device "
1331 						"size failed: %s, %s",
1332 						scsi_sense_key_text[sense_key],
1333 						scsi_sense_desc(asc,ascq,
1334 								&cgd.inq_data));
1335 				else {
1336 					if (have_sense)
1337 						scsi_sense_print(
1338 							&done_ccb->csio);
1339 					else {
1340 						xpt_print_path(periph->path);
1341 						printf("got CAM status %#x\n",
1342 						       done_ccb->ccb_h.status);
1343 					}
1344 
1345 					xpt_print_path(periph->path);
1346 					printf("fatal error, failed"
1347 					       " to attach to device\n");
1348 
1349 					/*
1350 					 * Free up resources.
1351 					 */
1352 					cam_periph_invalidate(periph);
1353 					announce_buf[0] = '\0';
1354 				}
1355 			}
1356 		}
1357 		free(rdcap, M_TEMP);
1358 		if (announce_buf[0] != '\0')
1359 			xpt_announce_periph(periph, announce_buf);
1360 		softc->state = DA_STATE_NORMAL;
1361 		cam_periph_unlock(periph);
1362 		break;
1363 	}
1364 	case DA_CCB_WAITING:
1365 	{
1366 		/* Caller will release the CCB */
1367 		wakeup(&done_ccb->ccb_h.cbfcnp);
1368 		return;
1369 	}
1370 	case DA_CCB_DUMP:
1371 		/* No-op.  We're polling */
1372 		return;
1373 	}
1374 	xpt_release_ccb(done_ccb);
1375 }
1376 
1377 static int
1378 daerror(union ccb *ccb, u_int32_t cam_flags, u_int32_t sense_flags)
1379 {
1380 	struct da_softc	  *softc;
1381 	struct cam_periph *periph;
1382 
1383 	periph = xpt_path_periph(ccb->ccb_h.path);
1384 	softc = (struct da_softc *)periph->softc;
1385 
1386 	/*
1387 	 * XXX
1388 	 * Until we have a better way of doing pack validation,
1389 	 * don't treat UAs as errors.
1390 	 */
1391 	sense_flags |= SF_RETRY_UA;
1392 	return(cam_periph_error(ccb, cam_flags, sense_flags,
1393 				&softc->saved_ccb));
1394 }
1395 
1396 static void
1397 daprevent(struct cam_periph *periph, int action)
1398 {
1399 	struct	da_softc *softc;
1400 	union	ccb *ccb;
1401 	int	error;
1402 
1403 	softc = (struct da_softc *)periph->softc;
1404 
1405 	if (((action == PR_ALLOW)
1406 	  && (softc->flags & DA_FLAG_PACK_LOCKED) == 0)
1407 	 || ((action == PR_PREVENT)
1408 	  && (softc->flags & DA_FLAG_PACK_LOCKED) != 0)) {
1409 		return;
1410 	}
1411 
1412 	ccb = cam_periph_getccb(periph, /*priority*/1);
1413 
1414 	scsi_prevent(&ccb->csio,
1415 		     /*retries*/1,
1416 		     /*cbcfp*/dadone,
1417 		     MSG_SIMPLE_Q_TAG,
1418 		     action,
1419 		     SSD_FULL_SIZE,
1420 		     5000);
1421 
1422 	error = cam_periph_runccb(ccb, /*error_routine*/NULL, /*cam_flags*/0,
1423 				  /*sense_flags*/0, &softc->device_stats);
1424 
1425 	if (error == 0) {
1426 		if (action == PR_ALLOW)
1427 			softc->flags &= ~DA_FLAG_PACK_LOCKED;
1428 		else
1429 			softc->flags |= DA_FLAG_PACK_LOCKED;
1430 	}
1431 
1432 	xpt_release_ccb(ccb);
1433 }
1434 
1435 static void
1436 dasetgeom(struct cam_periph *periph, struct scsi_read_capacity_data * rdcap)
1437 {
1438 	struct ccb_calc_geometry ccg;
1439 	struct da_softc *softc;
1440 	struct disk_params *dp;
1441 
1442 	softc = (struct da_softc *)periph->softc;
1443 
1444 	dp = &softc->params;
1445 	dp->secsize = scsi_4btoul(rdcap->length);
1446 	dp->sectors = scsi_4btoul(rdcap->addr) + 1;
1447 	/*
1448 	 * Have the controller provide us with a geometry
1449 	 * for this disk.  The only time the geometry
1450 	 * matters is when we boot and the controller
1451 	 * is the only one knowledgeable enough to come
1452 	 * up with something that will make this a bootable
1453 	 * device.
1454 	 */
1455 	xpt_setup_ccb(&ccg.ccb_h, periph->path, /*priority*/1);
1456 	ccg.ccb_h.func_code = XPT_CALC_GEOMETRY;
1457 	ccg.block_size = dp->secsize;
1458 	ccg.volume_size = dp->sectors;
1459 	ccg.heads = 0;
1460 	ccg.secs_per_track = 0;
1461 	ccg.cylinders = 0;
1462 	xpt_action((union ccb*)&ccg);
1463 	dp->heads = ccg.heads;
1464 	dp->secs_per_track = ccg.secs_per_track;
1465 	dp->cylinders = ccg.cylinders;
1466 }
1467 
1468 static void
1469 dasendorderedtag(void *arg)
1470 {
1471 	struct da_softc *softc;
1472 	int s;
1473 
1474 	for (softc = SLIST_FIRST(&softc_list);
1475 	     softc != NULL;
1476 	     softc = SLIST_NEXT(softc, links)) {
1477 		s = splsoftcam();
1478 		if ((softc->ordered_tag_count == 0)
1479 		 && ((softc->flags & DA_FLAG_WENT_IDLE) == 0)) {
1480 			softc->flags |= DA_FLAG_NEED_OTAG;
1481 		}
1482 		if (softc->device_stats.busy_count > 0)
1483 			softc->flags &= ~DA_FLAG_WENT_IDLE;
1484 
1485 		softc->ordered_tag_count = 0;
1486 		splx(s);
1487 	}
1488 	/* Queue us up again */
1489 	timeout(dasendorderedtag, NULL,
1490 		(DA_DEFAULT_TIMEOUT * hz) / DA_ORDEREDTAG_INTERVAL);
1491 }
1492 
1493 /*
1494  * Step through all DA peripheral drivers, and if the device is still open,
1495  * sync the disk cache to physical media.
1496  */
1497 static void
1498 dashutdown(int howto, void *arg)
1499 {
1500 	struct cam_periph *periph;
1501 	struct da_softc *softc;
1502 
1503 	for (periph = TAILQ_FIRST(&dadriver.units); periph != NULL;
1504 	     periph = TAILQ_NEXT(periph, unit_links)) {
1505 		union ccb ccb;
1506 		softc = (struct da_softc *)periph->softc;
1507 
1508 		/*
1509 		 * We only sync the cache if the drive is still open, and
1510 		 * if the drive is capable of it..
1511 		 */
1512 		if (((softc->flags & DA_FLAG_OPEN) == 0)
1513 		 || (softc->quirks & DA_Q_NO_SYNC_CACHE))
1514 			continue;
1515 
1516 		xpt_setup_ccb(&ccb.ccb_h, periph->path, /*priority*/1);
1517 
1518 		ccb.ccb_h.ccb_state = DA_CCB_DUMP;
1519 		scsi_synchronize_cache(&ccb.csio,
1520 				       /*retries*/1,
1521 				       /*cbfcnp*/dadone,
1522 				       MSG_SIMPLE_Q_TAG,
1523 				       /*begin_lba*/0, /* whole disk */
1524 				       /*lb_count*/0,
1525 				       SSD_FULL_SIZE,
1526 				       5 * 60 * 1000);
1527 
1528 		xpt_polled_action(&ccb);
1529 
1530 		if ((ccb.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
1531 			if (((ccb.ccb_h.status & CAM_STATUS_MASK) ==
1532 			     CAM_SCSI_STATUS_ERROR)
1533 			 && (ccb.csio.scsi_status == SCSI_STATUS_CHECK_COND)){
1534 				int error_code, sense_key, asc, ascq;
1535 
1536 				scsi_extract_sense(&ccb.csio.sense_data,
1537 						   &error_code, &sense_key,
1538 						   &asc, &ascq);
1539 
1540 				if (sense_key != SSD_KEY_ILLEGAL_REQUEST)
1541 					scsi_sense_print(&ccb.csio);
1542 			} else {
1543 				xpt_print_path(periph->path);
1544 				printf("Synchronize cache failed, status "
1545 				       "== 0x%x, scsi status == 0x%x\n",
1546 				       ccb.ccb_h.status, ccb.csio.scsi_status);
1547 			}
1548 		}
1549 
1550 		if ((ccb.ccb_h.status & CAM_DEV_QFRZN) != 0)
1551 			cam_release_devq(ccb.ccb_h.path,
1552 					 /*relsim_flags*/0,
1553 					 /*reduction*/0,
1554 					 /*timeout*/0,
1555 					 /*getcount_only*/0);
1556 
1557 	}
1558 }
1559