xref: /freebsd/sys/cam/scsi/scsi_da.c (revision 23f282aa31e9b6fceacd449020e936e98d6f2298)
1 /*
2  * Implementation of SCSI Direct Access Peripheral driver for CAM.
3  *
4  * Copyright (c) 1997 Justin T. Gibbs.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions, and the following disclaimer,
12  *    without modification, immediately at the beginning of the file.
13  * 2. The name of the author may not be used to endorse or promote products
14  *    derived from this software without specific prior written permission.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
20  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  *
28  * $FreeBSD$
29  */
30 
31 #include "opt_hw_wdog.h"
32 
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/kernel.h>
36 #include <sys/buf.h>
37 #include <sys/devicestat.h>
38 #include <sys/conf.h>
39 #include <sys/disk.h>
40 #include <sys/eventhandler.h>
41 #include <sys/malloc.h>
42 #include <sys/cons.h>
43 
44 #include <machine/md_var.h>
45 
46 #include <vm/vm.h>
47 #include <vm/pmap.h>
48 
49 #include <cam/cam.h>
50 #include <cam/cam_ccb.h>
51 #include <cam/cam_extend.h>
52 #include <cam/cam_periph.h>
53 #include <cam/cam_xpt_periph.h>
54 
55 #include <cam/scsi/scsi_message.h>
56 
57 typedef enum {
58 	DA_STATE_PROBE,
59 	DA_STATE_NORMAL
60 } da_state;
61 
62 typedef enum {
63 	DA_FLAG_PACK_INVALID	= 0x001,
64 	DA_FLAG_NEW_PACK	= 0x002,
65 	DA_FLAG_PACK_LOCKED	= 0x004,
66 	DA_FLAG_PACK_REMOVABLE	= 0x008,
67 	DA_FLAG_TAGGED_QUEUING	= 0x010,
68 	DA_FLAG_NEED_OTAG	= 0x020,
69 	DA_FLAG_WENT_IDLE	= 0x040,
70 	DA_FLAG_RETRY_UA	= 0x080,
71 	DA_FLAG_OPEN		= 0x100
72 } da_flags;
73 
74 typedef enum {
75 	DA_Q_NONE		= 0x00,
76 	DA_Q_NO_SYNC_CACHE	= 0x01,
77 	DA_Q_NO_6_BYTE		= 0x02
78 } da_quirks;
79 
80 typedef enum {
81 	DA_CCB_PROBE		= 0x01,
82 	DA_CCB_BUFFER_IO	= 0x02,
83 	DA_CCB_WAITING		= 0x03,
84 	DA_CCB_DUMP		= 0x04,
85 	DA_CCB_TYPE_MASK	= 0x0F,
86 	DA_CCB_RETRY_UA		= 0x10
87 } da_ccb_state;
88 
89 /* Offsets into our private area for storing information */
90 #define ccb_state	ppriv_field0
91 #define ccb_bp		ppriv_ptr1
92 
93 struct disk_params {
94 	u_int8_t  heads;
95 	u_int16_t cylinders;
96 	u_int8_t  secs_per_track;
97 	u_int32_t secsize;	/* Number of bytes/sector */
98 	u_int32_t sectors;	/* total number sectors */
99 };
100 
101 struct da_softc {
102 	struct	 bio_queue_head bio_queue;
103 	struct	 devstat device_stats;
104 	SLIST_ENTRY(da_softc) links;
105 	LIST_HEAD(, ccb_hdr) pending_ccbs;
106 	da_state state;
107 	da_flags flags;
108 	da_quirks quirks;
109 	int	 minimum_cmd_size;
110 	int	 ordered_tag_count;
111 	struct	 disk_params params;
112 	struct	 disk disk;
113 	union	 ccb saved_ccb;
114 };
115 
116 struct da_quirk_entry {
117 	struct scsi_inquiry_pattern inq_pat;
118 	da_quirks quirks;
119 };
120 
121 static struct da_quirk_entry da_quirk_table[] =
122 {
123 	{
124 		/*
125 		 * This particular Fujitsu drive doesn't like the
126 		 * synchronize cache command.
127 		 * Reported by: Tom Jackson <toj@gorilla.net>
128 		 */
129 		{T_DIRECT, SIP_MEDIA_FIXED, "FUJITSU", "M2954*", "*"},
130 		/*quirks*/ DA_Q_NO_SYNC_CACHE
131 
132 	},
133 	{
134 		/*
135 		 * This drive doesn't like the synchronize cache command
136 		 * either.  Reported by: Matthew Jacob <mjacob@feral.com>
137 		 * in NetBSD PR kern/6027, August 24, 1998.
138 		 */
139 		{T_DIRECT, SIP_MEDIA_FIXED, "MICROP", "2217*", "*"},
140 		/*quirks*/ DA_Q_NO_SYNC_CACHE
141 	},
142 	{
143 		/*
144 		 * This drive doesn't like the synchronize cache command
145 		 * either.  Reported by: Hellmuth Michaelis (hm@kts.org)
146 		 * (PR 8882).
147 		 */
148 		{T_DIRECT, SIP_MEDIA_FIXED, "MICROP", "2112*", "*"},
149 		/*quirks*/ DA_Q_NO_SYNC_CACHE
150 	},
151 	{
152 		/*
153 		 * Doesn't like the synchronize cache command.
154 		 * Reported by: Blaz Zupan <blaz@gold.amis.net>
155 		 */
156 		{T_DIRECT, SIP_MEDIA_FIXED, "NEC", "D3847*", "*"},
157 		/*quirks*/ DA_Q_NO_SYNC_CACHE
158 	},
159 	{
160 		/*
161 		 * Doesn't work correctly with 6 byte reads/writes.
162 		 * Returns illegal request, and points to byte 9 of the
163 		 * 6-byte CDB.
164 		 * Reported by:  Adam McDougall <bsdx@spawnet.com>
165 		 */
166 		{T_DIRECT, SIP_MEDIA_FIXED, "QUANTUM", "VIKING 4*", "*"},
167 		/*quirks*/ DA_Q_NO_6_BYTE
168 	},
169 	{
170 		/*
171 		 * See above.
172 		 */
173 		{T_DIRECT, SIP_MEDIA_FIXED, "QUANTUM", "VIKING 2*", "*"},
174 		/*quirks*/ DA_Q_NO_6_BYTE
175 	},
176 	{
177 		/*
178 		 * This USB floppy drive uses the UFI command set. This
179 		 * command set is a derivative of the ATAPI command set and
180 		 * does not support READ_6 commands only READ_10. It also does
181 		 * not support sync cache (0x35).
182 		 */
183 		{T_DIRECT, SIP_MEDIA_REMOVABLE, "Y-E DATA", "USB-FDU", "*"},
184 		/*quirks*/ DA_Q_NO_6_BYTE|DA_Q_NO_SYNC_CACHE
185 	}
186 };
187 
188 static	d_open_t	daopen;
189 static	d_close_t	daclose;
190 static	d_strategy_t	dastrategy;
191 static	d_ioctl_t	daioctl;
192 static	d_dump_t	dadump;
193 static	periph_init_t	dainit;
194 static	void		daasync(void *callback_arg, u_int32_t code,
195 				struct cam_path *path, void *arg);
196 static	periph_ctor_t	daregister;
197 static	periph_dtor_t	dacleanup;
198 static	periph_start_t	dastart;
199 static	periph_oninv_t	daoninvalidate;
200 static	void		dadone(struct cam_periph *periph,
201 			       union ccb *done_ccb);
202 static  int		daerror(union ccb *ccb, u_int32_t cam_flags,
203 				u_int32_t sense_flags);
204 static void		daprevent(struct cam_periph *periph, int action);
205 static void		dasetgeom(struct cam_periph *periph,
206 				  struct scsi_read_capacity_data * rdcap);
207 static timeout_t	dasendorderedtag;
208 static void		dashutdown(void *arg, int howto);
209 
210 #ifndef DA_DEFAULT_TIMEOUT
211 #define DA_DEFAULT_TIMEOUT 60	/* Timeout in seconds */
212 #endif
213 
214 /*
215  * DA_ORDEREDTAG_INTERVAL determines how often, relative
216  * to the default timeout, we check to see whether an ordered
217  * tagged transaction is appropriate to prevent simple tag
218  * starvation.  Since we'd like to ensure that there is at least
219  * 1/2 of the timeout length left for a starved transaction to
220  * complete after we've sent an ordered tag, we must poll at least
221  * four times in every timeout period.  This takes care of the worst
222  * case where a starved transaction starts during an interval that
223  * meets the requirement "don't send an ordered tag" test so it takes
224  * us two intervals to determine that a tag must be sent.
225  */
226 #ifndef DA_ORDEREDTAG_INTERVAL
227 #define DA_ORDEREDTAG_INTERVAL 4
228 #endif
229 
230 static struct periph_driver dadriver =
231 {
232 	dainit, "da",
233 	TAILQ_HEAD_INITIALIZER(dadriver.units), /* generation */ 0
234 };
235 
236 DATA_SET(periphdriver_set, dadriver);
237 
238 #define DA_CDEV_MAJOR 13
239 #define DA_BDEV_MAJOR 4
240 
241 /* For 2.2-stable support */
242 #ifndef D_DISK
243 #define D_DISK 0
244 #endif
245 
246 static struct cdevsw da_cdevsw = {
247 	/* open */	daopen,
248 	/* close */	daclose,
249 	/* read */	physread,
250 	/* write */	physwrite,
251 	/* ioctl */	daioctl,
252 	/* poll */	nopoll,
253 	/* mmap */	nommap,
254 	/* strategy */	dastrategy,
255 	/* name */	"da",
256 	/* maj */	DA_CDEV_MAJOR,
257 	/* dump */	dadump,
258 	/* psize */	nopsize,
259 	/* flags */	D_DISK,
260 	/* bmaj */	DA_BDEV_MAJOR
261 };
262 
263 static struct cdevsw dadisk_cdevsw;
264 
265 static SLIST_HEAD(,da_softc) softc_list;
266 static struct extend_array *daperiphs;
267 
268 static int
269 daopen(dev_t dev, int flags, int fmt, struct proc *p)
270 {
271 	struct cam_periph *periph;
272 	struct da_softc *softc;
273 	struct disklabel *label;
274 	int unit;
275 	int part;
276 	int error;
277 	int s;
278 
279 	unit = dkunit(dev);
280 	part = dkpart(dev);
281 	periph = cam_extend_get(daperiphs, unit);
282 	if (periph == NULL)
283 		return (ENXIO);
284 
285 	softc = (struct da_softc *)periph->softc;
286 
287 	CAM_DEBUG(periph->path, CAM_DEBUG_TRACE,
288 	    ("daopen: dev=%s (unit %d , partition %d)\n", devtoname(dev),
289 	     unit, part));
290 
291 	if ((error = cam_periph_lock(periph, PRIBIO|PCATCH)) != 0) {
292 		return (error); /* error code from tsleep */
293 	}
294 
295 	if (cam_periph_acquire(periph) != CAM_REQ_CMP)
296 		return(ENXIO);
297 	softc->flags |= DA_FLAG_OPEN;
298 
299 	s = splsoftcam();
300 	if ((softc->flags & DA_FLAG_PACK_INVALID) != 0) {
301 		/* Invalidate our pack information. */
302 		disk_invalidate(&softc->disk);
303 		softc->flags &= ~DA_FLAG_PACK_INVALID;
304 	}
305 	splx(s);
306 
307 	/* Do a read capacity */
308 	{
309 		struct scsi_read_capacity_data *rcap;
310 		union  ccb *ccb;
311 
312 		rcap = (struct scsi_read_capacity_data *)malloc(sizeof(*rcap),
313 								M_TEMP,
314 								M_WAITOK);
315 
316 		ccb = cam_periph_getccb(periph, /*priority*/1);
317 		scsi_read_capacity(&ccb->csio,
318 				   /*retries*/1,
319 				   /*cbfncp*/dadone,
320 				   MSG_SIMPLE_Q_TAG,
321 				   rcap,
322 				   SSD_FULL_SIZE,
323 				   /*timeout*/60000);
324 		ccb->ccb_h.ccb_bp = NULL;
325 
326 		error = cam_periph_runccb(ccb, daerror, /*cam_flags*/0,
327 					  /*sense_flags*/SF_RETRY_UA |
328 							 SF_RETRY_SELTO,
329 					  &softc->device_stats);
330 
331 		xpt_release_ccb(ccb);
332 
333 		if (error == 0) {
334 			dasetgeom(periph, rcap);
335 		}
336 
337 		free(rcap, M_TEMP);
338 	}
339 
340 	if (error == 0) {
341 		struct ccb_getdev cgd;
342 
343 		/* Build label for whole disk. */
344 		label = &softc->disk.d_label;
345 		bzero(label, sizeof(*label));
346 		label->d_type = DTYPE_SCSI;
347 
348 		/*
349 		 * Grab the inquiry data to get the vendor and product names.
350 		 * Put them in the typename and packname for the label.
351 		 */
352 		xpt_setup_ccb(&cgd.ccb_h, periph->path, /*priority*/ 1);
353 		cgd.ccb_h.func_code = XPT_GDEV_TYPE;
354 		xpt_action((union ccb *)&cgd);
355 
356 		strncpy(label->d_typename, cgd.inq_data.vendor,
357 			min(SID_VENDOR_SIZE, sizeof(label->d_typename)));
358 		strncpy(label->d_packname, cgd.inq_data.product,
359 			min(SID_PRODUCT_SIZE, sizeof(label->d_packname)));
360 
361 		label->d_secsize = softc->params.secsize;
362 		label->d_nsectors = softc->params.secs_per_track;
363 		label->d_ntracks = softc->params.heads;
364 		label->d_ncylinders = softc->params.cylinders;
365 		label->d_secpercyl = softc->params.heads
366 				  * softc->params.secs_per_track;
367 		label->d_secperunit = softc->params.sectors;
368 
369 		if (((softc->flags & DA_FLAG_PACK_REMOVABLE) != 0)) {
370 			daprevent(periph, PR_PREVENT);
371 		}
372 
373 		/*
374 		 * Check to see whether or not the blocksize is set yet.
375 		 * If it isn't, set it and then clear the blocksize
376 		 * unavailable flag for the device statistics.
377 		 */
378 		if ((softc->device_stats.flags & DEVSTAT_BS_UNAVAILABLE) != 0){
379 			softc->device_stats.block_size = softc->params.secsize;
380 			softc->device_stats.flags &= ~DEVSTAT_BS_UNAVAILABLE;
381 		}
382 	}
383 
384 	if (error != 0) {
385 		if ((softc->flags & DA_FLAG_PACK_REMOVABLE) != 0) {
386 			daprevent(periph, PR_ALLOW);
387 		}
388 	}
389 	cam_periph_unlock(periph);
390 	return (error);
391 }
392 
393 static int
394 daclose(dev_t dev, int flag, int fmt, struct proc *p)
395 {
396 	struct	cam_periph *periph;
397 	struct	da_softc *softc;
398 	int	unit;
399 	int	error;
400 
401 	unit = dkunit(dev);
402 	periph = cam_extend_get(daperiphs, unit);
403 	if (periph == NULL)
404 		return (ENXIO);
405 
406 	softc = (struct da_softc *)periph->softc;
407 
408 	if ((error = cam_periph_lock(periph, PRIBIO)) != 0) {
409 		return (error); /* error code from tsleep */
410 	}
411 
412 	if ((softc->quirks & DA_Q_NO_SYNC_CACHE) == 0) {
413 		union	ccb *ccb;
414 
415 		ccb = cam_periph_getccb(periph, /*priority*/1);
416 
417 		scsi_synchronize_cache(&ccb->csio,
418 				       /*retries*/1,
419 				       /*cbfcnp*/dadone,
420 				       MSG_SIMPLE_Q_TAG,
421 				       /*begin_lba*/0,/* Cover the whole disk */
422 				       /*lb_count*/0,
423 				       SSD_FULL_SIZE,
424 				       5 * 60 * 1000);
425 
426 		cam_periph_runccb(ccb, /*error_routine*/NULL, /*cam_flags*/0,
427 				  /*sense_flags*/SF_RETRY_UA,
428 				  &softc->device_stats);
429 
430 		if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
431 			if ((ccb->ccb_h.status & CAM_STATUS_MASK) ==
432 			     CAM_SCSI_STATUS_ERROR) {
433 				int asc, ascq;
434 				int sense_key, error_code;
435 
436 				scsi_extract_sense(&ccb->csio.sense_data,
437 						   &error_code,
438 						   &sense_key,
439 						   &asc, &ascq);
440 				if (sense_key != SSD_KEY_ILLEGAL_REQUEST)
441 					scsi_sense_print(&ccb->csio);
442 			} else {
443 				xpt_print_path(periph->path);
444 				printf("Synchronize cache failed, status "
445 				       "== 0x%x, scsi status == 0x%x\n",
446 				       ccb->csio.ccb_h.status,
447 				       ccb->csio.scsi_status);
448 			}
449 		}
450 
451 		if ((ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
452 			cam_release_devq(ccb->ccb_h.path,
453 					 /*relsim_flags*/0,
454 					 /*reduction*/0,
455 					 /*timeout*/0,
456 					 /*getcount_only*/0);
457 
458 		xpt_release_ccb(ccb);
459 
460 	}
461 
462 	if ((softc->flags & DA_FLAG_PACK_REMOVABLE) != 0) {
463 		daprevent(periph, PR_ALLOW);
464 		/*
465 		 * If we've got removeable media, mark the blocksize as
466 		 * unavailable, since it could change when new media is
467 		 * inserted.
468 		 */
469 		softc->device_stats.flags |= DEVSTAT_BS_UNAVAILABLE;
470 	}
471 
472 	softc->flags &= ~DA_FLAG_OPEN;
473 	cam_periph_unlock(periph);
474 	cam_periph_release(periph);
475 	return (0);
476 }
477 
478 /*
479  * Actually translate the requested transfer into one the physical driver
480  * can understand.  The transfer is described by a buf and will include
481  * only one physical transfer.
482  */
483 static void
484 dastrategy(struct bio *bp)
485 {
486 	struct cam_periph *periph;
487 	struct da_softc *softc;
488 	u_int  unit;
489 	u_int  part;
490 	int    s;
491 
492 	unit = dkunit(bp->bio_dev);
493 	part = dkpart(bp->bio_dev);
494 	periph = cam_extend_get(daperiphs, unit);
495 	if (periph == NULL) {
496 		bp->bio_error = ENXIO;
497 		goto bad;
498 	}
499 	softc = (struct da_softc *)periph->softc;
500 #if 0
501 	/*
502 	 * check it's not too big a transfer for our adapter
503 	 */
504 	scsi_minphys(bp,&sd_switch);
505 #endif
506 
507 	/*
508 	 * Mask interrupts so that the pack cannot be invalidated until
509 	 * after we are in the queue.  Otherwise, we might not properly
510 	 * clean up one of the buffers.
511 	 */
512 	s = splbio();
513 
514 	/*
515 	 * If the device has been made invalid, error out
516 	 */
517 	if ((softc->flags & DA_FLAG_PACK_INVALID)) {
518 		splx(s);
519 		bp->bio_error = ENXIO;
520 		goto bad;
521 	}
522 
523 	/*
524 	 * Place it in the queue of disk activities for this disk
525 	 */
526 	bioqdisksort(&softc->bio_queue, bp);
527 
528 	splx(s);
529 
530 	/*
531 	 * Schedule ourselves for performing the work.
532 	 */
533 	xpt_schedule(periph, /* XXX priority */1);
534 
535 	return;
536 bad:
537 	bp->bio_flags |= BIO_ERROR;
538 
539 	/*
540 	 * Correctly set the buf to indicate a completed xfer
541 	 */
542 	bp->bio_resid = bp->bio_bcount;
543 	biodone(bp);
544 	return;
545 }
546 
547 /* For 2.2-stable support */
548 #ifndef ENOIOCTL
549 #define ENOIOCTL -1
550 #endif
551 
552 static int
553 daioctl(dev_t dev, u_long cmd, caddr_t addr, int flag, struct proc *p)
554 {
555 	struct cam_periph *periph;
556 	struct da_softc *softc;
557 	int unit;
558 	int error;
559 
560 	unit = dkunit(dev);
561 	periph = cam_extend_get(daperiphs, unit);
562 	if (periph == NULL)
563 		return (ENXIO);
564 
565 	softc = (struct da_softc *)periph->softc;
566 
567 	CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("daioctl\n"));
568 
569 	if ((error = cam_periph_lock(periph, PRIBIO|PCATCH)) != 0) {
570 		return (error); /* error code from tsleep */
571 	}
572 
573 	error = cam_periph_ioctl(periph, cmd, addr, daerror);
574 
575 	cam_periph_unlock(periph);
576 
577 	return (error);
578 }
579 
580 static int
581 dadump(dev_t dev)
582 {
583 	struct	    cam_periph *periph;
584 	struct	    da_softc *softc;
585 	u_int	    unit;
586 	u_int	    part;
587 	u_int	    secsize;
588 	u_int	    num;	/* number of sectors to write */
589 	u_int	    blknum;
590 	long	    blkcnt;
591 	vm_offset_t addr;
592 	struct	    ccb_scsiio csio;
593 	int	    error;
594 
595 	/* toss any characters present prior to dump */
596 	while (cncheckc() != -1)
597 		;
598 
599 	unit = dkunit(dev);
600 	part = dkpart(dev);
601 	periph = cam_extend_get(daperiphs, unit);
602 	if (periph == NULL) {
603 		return (ENXIO);
604 	}
605 	softc = (struct da_softc *)periph->softc;
606 
607 	if ((softc->flags & DA_FLAG_PACK_INVALID) != 0)
608 		return (ENXIO);
609 
610 	error = disk_dumpcheck(dev, &num, &blknum, &secsize);
611 	if (error)
612 		return (error);
613 
614 	addr = 0;	/* starting address */
615 	blkcnt = howmany(PAGE_SIZE, secsize);
616 
617 	while (num > 0) {
618 
619 		if (is_physical_memory(addr)) {
620 			pmap_enter(kernel_pmap, (vm_offset_t)CADDR1,
621 				   trunc_page(addr), VM_PROT_READ, TRUE);
622 		} else {
623 			pmap_enter(kernel_pmap, (vm_offset_t)CADDR1,
624 				   trunc_page(0), VM_PROT_READ, TRUE);
625 		}
626 
627 		xpt_setup_ccb(&csio.ccb_h, periph->path, /*priority*/1);
628 		csio.ccb_h.ccb_state = DA_CCB_DUMP;
629 		scsi_read_write(&csio,
630 				/*retries*/1,
631 				dadone,
632 				MSG_ORDERED_Q_TAG,
633 				/*read*/FALSE,
634 				/*byte2*/0,
635 				/*minimum_cmd_size*/ softc->minimum_cmd_size,
636 				blknum,
637 				blkcnt,
638 				/*data_ptr*/CADDR1,
639 				/*dxfer_len*/blkcnt * secsize,
640 				/*sense_len*/SSD_FULL_SIZE,
641 				DA_DEFAULT_TIMEOUT * 1000);
642 		xpt_polled_action((union ccb *)&csio);
643 
644 		if ((csio.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
645 			printf("Aborting dump due to I/O error.\n");
646 			if ((csio.ccb_h.status & CAM_STATUS_MASK) ==
647 			     CAM_SCSI_STATUS_ERROR)
648 				scsi_sense_print(&csio);
649 			else
650 				printf("status == 0x%x, scsi status == 0x%x\n",
651 				       csio.ccb_h.status, csio.scsi_status);
652 			return(EIO);
653 		}
654 
655 		if (addr % (1024 * 1024) == 0) {
656 #ifdef	HW_WDOG
657 			if (wdog_tickler)
658 				(*wdog_tickler)();
659 #endif /* HW_WDOG */
660 			/* Count in MB of data left to write */
661 			printf("%d ", (num  * softc->params.secsize)
662 				     / (1024 * 1024));
663 		}
664 
665 		/* update block count */
666 		num -= blkcnt;
667 		blknum += blkcnt;
668 		addr += PAGE_SIZE;
669 
670 		/* operator aborting dump? */
671 		if (cncheckc() != -1)
672 			return (EINTR);
673 	}
674 
675 	/*
676 	 * Sync the disk cache contents to the physical media.
677 	 */
678 	if ((softc->quirks & DA_Q_NO_SYNC_CACHE) == 0) {
679 
680 		xpt_setup_ccb(&csio.ccb_h, periph->path, /*priority*/1);
681 		csio.ccb_h.ccb_state = DA_CCB_DUMP;
682 		scsi_synchronize_cache(&csio,
683 				       /*retries*/1,
684 				       /*cbfcnp*/dadone,
685 				       MSG_SIMPLE_Q_TAG,
686 				       /*begin_lba*/0,/* Cover the whole disk */
687 				       /*lb_count*/0,
688 				       SSD_FULL_SIZE,
689 				       5 * 60 * 1000);
690 		xpt_polled_action((union ccb *)&csio);
691 
692 		if ((csio.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
693 			if ((csio.ccb_h.status & CAM_STATUS_MASK) ==
694 			     CAM_SCSI_STATUS_ERROR) {
695 				int asc, ascq;
696 				int sense_key, error_code;
697 
698 				scsi_extract_sense(&csio.sense_data,
699 						   &error_code,
700 						   &sense_key,
701 						   &asc, &ascq);
702 				if (sense_key != SSD_KEY_ILLEGAL_REQUEST)
703 					scsi_sense_print(&csio);
704 			} else {
705 				xpt_print_path(periph->path);
706 				printf("Synchronize cache failed, status "
707 				       "== 0x%x, scsi status == 0x%x\n",
708 				       csio.ccb_h.status, csio.scsi_status);
709 			}
710 		}
711 	}
712 	return (0);
713 }
714 
715 static void
716 dainit(void)
717 {
718 	cam_status status;
719 	struct cam_path *path;
720 
721 	/*
722 	 * Create our extend array for storing the devices we attach to.
723 	 */
724 	daperiphs = cam_extend_new();
725 	SLIST_INIT(&softc_list);
726 	if (daperiphs == NULL) {
727 		printf("da: Failed to alloc extend array!\n");
728 		return;
729 	}
730 
731 	/*
732 	 * Install a global async callback.  This callback will
733 	 * receive async callbacks like "new device found".
734 	 */
735 	status = xpt_create_path(&path, /*periph*/NULL, CAM_XPT_PATH_ID,
736 				 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
737 
738 	if (status == CAM_REQ_CMP) {
739 		struct ccb_setasync csa;
740 
741                 xpt_setup_ccb(&csa.ccb_h, path, /*priority*/5);
742                 csa.ccb_h.func_code = XPT_SASYNC_CB;
743                 csa.event_enable = AC_FOUND_DEVICE;
744                 csa.callback = daasync;
745                 csa.callback_arg = NULL;
746                 xpt_action((union ccb *)&csa);
747 		status = csa.ccb_h.status;
748                 xpt_free_path(path);
749         }
750 
751 	if (status != CAM_REQ_CMP) {
752 		printf("da: Failed to attach master async callback "
753 		       "due to status 0x%x!\n", status);
754 	} else {
755 
756 		/*
757 		 * Schedule a periodic event to occasioanly send an
758 		 * ordered tag to a device.
759 		 */
760 		timeout(dasendorderedtag, NULL,
761 			(DA_DEFAULT_TIMEOUT * hz) / DA_ORDEREDTAG_INTERVAL);
762 
763 		/* Register our shutdown event handler */
764 		if ((EVENTHANDLER_REGISTER(shutdown_post_sync, dashutdown,
765 					   NULL, SHUTDOWN_PRI_DEFAULT)) == NULL)
766 		    printf("dainit: shutdown event registration failed!\n");
767 	}
768 }
769 
770 static void
771 daoninvalidate(struct cam_periph *periph)
772 {
773 	int s;
774 	struct da_softc *softc;
775 	struct bio *q_bp;
776 	struct ccb_setasync csa;
777 
778 	softc = (struct da_softc *)periph->softc;
779 
780 	/*
781 	 * De-register any async callbacks.
782 	 */
783 	xpt_setup_ccb(&csa.ccb_h, periph->path,
784 		      /* priority */ 5);
785 	csa.ccb_h.func_code = XPT_SASYNC_CB;
786 	csa.event_enable = 0;
787 	csa.callback = daasync;
788 	csa.callback_arg = periph;
789 	xpt_action((union ccb *)&csa);
790 
791 	softc->flags |= DA_FLAG_PACK_INVALID;
792 
793 	/*
794 	 * Although the oninvalidate() routines are always called at
795 	 * splsoftcam, we need to be at splbio() here to keep the buffer
796 	 * queue from being modified while we traverse it.
797 	 */
798 	s = splbio();
799 
800 	/*
801 	 * Return all queued I/O with ENXIO.
802 	 * XXX Handle any transactions queued to the card
803 	 *     with XPT_ABORT_CCB.
804 	 */
805 	while ((q_bp = bioq_first(&softc->bio_queue)) != NULL){
806 		bioq_remove(&softc->bio_queue, q_bp);
807 		q_bp->bio_resid = q_bp->bio_bcount;
808 		q_bp->bio_error = ENXIO;
809 		q_bp->bio_flags |= BIO_ERROR;
810 		biodone(q_bp);
811 	}
812 	splx(s);
813 
814 	SLIST_REMOVE(&softc_list, softc, da_softc, links);
815 
816 	xpt_print_path(periph->path);
817 	printf("lost device\n");
818 }
819 
820 static void
821 dacleanup(struct cam_periph *periph)
822 {
823 	struct da_softc *softc;
824 
825 	softc = (struct da_softc *)periph->softc;
826 
827 	devstat_remove_entry(&softc->device_stats);
828 	cam_extend_release(daperiphs, periph->unit_number);
829 	xpt_print_path(periph->path);
830 	printf("removing device entry\n");
831 	free(softc, M_DEVBUF);
832 }
833 
834 static void
835 daasync(void *callback_arg, u_int32_t code,
836 	struct cam_path *path, void *arg)
837 {
838 	struct cam_periph *periph;
839 
840 	periph = (struct cam_periph *)callback_arg;
841 	switch (code) {
842 	case AC_FOUND_DEVICE:
843 	{
844 		struct ccb_getdev *cgd;
845 		cam_status status;
846 
847 		cgd = (struct ccb_getdev *)arg;
848 
849 		if (SID_TYPE(&cgd->inq_data) != T_DIRECT
850 		    && SID_TYPE(&cgd->inq_data) != T_OPTICAL)
851 			break;
852 
853 		/*
854 		 * Allocate a peripheral instance for
855 		 * this device and start the probe
856 		 * process.
857 		 */
858 		status = cam_periph_alloc(daregister, daoninvalidate,
859 					  dacleanup, dastart,
860 					  "da", CAM_PERIPH_BIO,
861 					  cgd->ccb_h.path, daasync,
862 					  AC_FOUND_DEVICE, cgd);
863 
864 		if (status != CAM_REQ_CMP
865 		 && status != CAM_REQ_INPROG)
866 			printf("daasync: Unable to attach to new device "
867 				"due to status 0x%x\n", status);
868 		break;
869 	}
870 	case AC_SENT_BDR:
871 	case AC_BUS_RESET:
872 	{
873 		struct da_softc *softc;
874 		struct ccb_hdr *ccbh;
875 		int s;
876 
877 		softc = (struct da_softc *)periph->softc;
878 		s = splsoftcam();
879 		/*
880 		 * Don't fail on the expected unit attention
881 		 * that will occur.
882 		 */
883 		softc->flags |= DA_FLAG_RETRY_UA;
884 		for (ccbh = LIST_FIRST(&softc->pending_ccbs);
885 		     ccbh != NULL; ccbh = LIST_NEXT(ccbh, periph_links.le))
886 			ccbh->ccb_state |= DA_CCB_RETRY_UA;
887 		splx(s);
888 		/* FALLTHROUGH*/
889 	}
890 	default:
891 		cam_periph_async(periph, code, path, arg);
892 		break;
893 	}
894 }
895 
896 static cam_status
897 daregister(struct cam_periph *periph, void *arg)
898 {
899 	int s;
900 	struct da_softc *softc;
901 	struct ccb_setasync csa;
902 	struct ccb_getdev *cgd;
903 	caddr_t match;
904 
905 	cgd = (struct ccb_getdev *)arg;
906 	if (periph == NULL) {
907 		printf("daregister: periph was NULL!!\n");
908 		return(CAM_REQ_CMP_ERR);
909 	}
910 
911 	if (cgd == NULL) {
912 		printf("daregister: no getdev CCB, can't register device\n");
913 		return(CAM_REQ_CMP_ERR);
914 	}
915 
916 	softc = (struct da_softc *)malloc(sizeof(*softc),M_DEVBUF,M_NOWAIT);
917 
918 	if (softc == NULL) {
919 		printf("daregister: Unable to probe new device. "
920 		       "Unable to allocate softc\n");
921 		return(CAM_REQ_CMP_ERR);
922 	}
923 
924 	bzero(softc, sizeof(*softc));
925 	LIST_INIT(&softc->pending_ccbs);
926 	softc->state = DA_STATE_PROBE;
927 	bioq_init(&softc->bio_queue);
928 	if (SID_IS_REMOVABLE(&cgd->inq_data))
929 		softc->flags |= DA_FLAG_PACK_REMOVABLE;
930 	if ((cgd->inq_data.flags & SID_CmdQue) != 0)
931 		softc->flags |= DA_FLAG_TAGGED_QUEUING;
932 
933 	periph->softc = softc;
934 
935 	cam_extend_set(daperiphs, periph->unit_number, periph);
936 
937 	/*
938 	 * See if this device has any quirks.
939 	 */
940 	match = cam_quirkmatch((caddr_t)&cgd->inq_data,
941 			       (caddr_t)da_quirk_table,
942 			       sizeof(da_quirk_table)/sizeof(*da_quirk_table),
943 			       sizeof(*da_quirk_table), scsi_inquiry_match);
944 
945 	if (match != NULL)
946 		softc->quirks = ((struct da_quirk_entry *)match)->quirks;
947 	else
948 		softc->quirks = DA_Q_NONE;
949 
950 	if (softc->quirks & DA_Q_NO_6_BYTE)
951 		softc->minimum_cmd_size = 10;
952 	else
953 		softc->minimum_cmd_size = 6;
954 
955 	/*
956 	 * Block our timeout handler while we
957 	 * add this softc to the dev list.
958 	 */
959 	s = splsoftclock();
960 	SLIST_INSERT_HEAD(&softc_list, softc, links);
961 	splx(s);
962 
963 	/*
964 	 * The DA driver supports a blocksize, but
965 	 * we don't know the blocksize until we do
966 	 * a read capacity.  So, set a flag to
967 	 * indicate that the blocksize is
968 	 * unavailable right now.  We'll clear the
969 	 * flag as soon as we've done a read capacity.
970 	 */
971 	devstat_add_entry(&softc->device_stats, "da",
972 			  periph->unit_number, 0,
973 	  		  DEVSTAT_BS_UNAVAILABLE,
974 			  SID_TYPE(&cgd->inq_data) | DEVSTAT_TYPE_IF_SCSI,
975 			  DEVSTAT_PRIORITY_DISK);
976 
977 	/*
978 	 * Register this media as a disk
979 	 */
980 	disk_create(periph->unit_number, &softc->disk, 0,
981 	    &da_cdevsw, &dadisk_cdevsw);
982 
983 	/*
984 	 * Add async callbacks for bus reset and
985 	 * bus device reset calls.  I don't bother
986 	 * checking if this fails as, in most cases,
987 	 * the system will function just fine without
988 	 * them and the only alternative would be to
989 	 * not attach the device on failure.
990 	 */
991 	xpt_setup_ccb(&csa.ccb_h, periph->path, /*priority*/5);
992 	csa.ccb_h.func_code = XPT_SASYNC_CB;
993 	csa.event_enable = AC_SENT_BDR | AC_BUS_RESET | AC_LOST_DEVICE;
994 	csa.callback = daasync;
995 	csa.callback_arg = periph;
996 	xpt_action((union ccb *)&csa);
997 	/*
998 	 * Lock this peripheral until we are setup.
999 	 * This first call can't block
1000 	 */
1001 	(void)cam_periph_lock(periph, PRIBIO);
1002 	xpt_schedule(periph, /*priority*/5);
1003 
1004 	return(CAM_REQ_CMP);
1005 }
1006 
1007 static void
1008 dastart(struct cam_periph *periph, union ccb *start_ccb)
1009 {
1010 	struct da_softc *softc;
1011 
1012 	softc = (struct da_softc *)periph->softc;
1013 
1014 
1015 	switch (softc->state) {
1016 	case DA_STATE_NORMAL:
1017 	{
1018 		/* Pull a buffer from the queue and get going on it */
1019 		struct bio *bp;
1020 		int s;
1021 
1022 		/*
1023 		 * See if there is a buf with work for us to do..
1024 		 */
1025 		s = splbio();
1026 		bp = bioq_first(&softc->bio_queue);
1027 		if (periph->immediate_priority <= periph->pinfo.priority) {
1028 			CAM_DEBUG_PRINT(CAM_DEBUG_SUBTRACE,
1029 					("queuing for immediate ccb\n"));
1030 			start_ccb->ccb_h.ccb_state = DA_CCB_WAITING;
1031 			SLIST_INSERT_HEAD(&periph->ccb_list, &start_ccb->ccb_h,
1032 					  periph_links.sle);
1033 			periph->immediate_priority = CAM_PRIORITY_NONE;
1034 			splx(s);
1035 			wakeup(&periph->ccb_list);
1036 		} else if (bp == NULL) {
1037 			splx(s);
1038 			xpt_release_ccb(start_ccb);
1039 		} else {
1040 			int oldspl;
1041 			u_int8_t tag_code;
1042 
1043 			bioq_remove(&softc->bio_queue, bp);
1044 
1045 			devstat_start_transaction(&softc->device_stats);
1046 
1047 			if ((bp->bio_flags & BIO_ORDERED) != 0
1048 			 || (softc->flags & DA_FLAG_NEED_OTAG) != 0) {
1049 				softc->flags &= ~DA_FLAG_NEED_OTAG;
1050 				softc->ordered_tag_count++;
1051 				tag_code = MSG_ORDERED_Q_TAG;
1052 			} else {
1053 				tag_code = MSG_SIMPLE_Q_TAG;
1054 			}
1055 			scsi_read_write(&start_ccb->csio,
1056 					/*retries*/4,
1057 					dadone,
1058 					tag_code,
1059 					bp->bio_cmd == BIO_READ,
1060 					/*byte2*/0,
1061 					softc->minimum_cmd_size,
1062 					bp->bio_pblkno,
1063 					bp->bio_bcount / softc->params.secsize,
1064 					bp->bio_data,
1065 					bp->bio_bcount,
1066 					/*sense_len*/SSD_FULL_SIZE,
1067 					DA_DEFAULT_TIMEOUT * 1000);
1068 			start_ccb->ccb_h.ccb_state = DA_CCB_BUFFER_IO;
1069 
1070 			/*
1071 			 * Block out any asyncronous callbacks
1072 			 * while we touch the pending ccb list.
1073 			 */
1074 			oldspl = splcam();
1075 			LIST_INSERT_HEAD(&softc->pending_ccbs,
1076 					 &start_ccb->ccb_h, periph_links.le);
1077 			splx(oldspl);
1078 
1079 			/* We expect a unit attention from this device */
1080 			if ((softc->flags & DA_FLAG_RETRY_UA) != 0) {
1081 				start_ccb->ccb_h.ccb_state |= DA_CCB_RETRY_UA;
1082 				softc->flags &= ~DA_FLAG_RETRY_UA;
1083 			}
1084 
1085 			start_ccb->ccb_h.ccb_bp = bp;
1086 			bp = bioq_first(&softc->bio_queue);
1087 			splx(s);
1088 
1089 			xpt_action(start_ccb);
1090 		}
1091 
1092 		if (bp != NULL) {
1093 			/* Have more work to do, so ensure we stay scheduled */
1094 			xpt_schedule(periph, /* XXX priority */1);
1095 		}
1096 		break;
1097 	}
1098 	case DA_STATE_PROBE:
1099 	{
1100 		struct ccb_scsiio *csio;
1101 		struct scsi_read_capacity_data *rcap;
1102 
1103 		rcap = (struct scsi_read_capacity_data *)malloc(sizeof(*rcap),
1104 								M_TEMP,
1105 								M_NOWAIT);
1106 		if (rcap == NULL) {
1107 			printf("dastart: Couldn't malloc read_capacity data\n");
1108 			/* da_free_periph??? */
1109 			break;
1110 		}
1111 		csio = &start_ccb->csio;
1112 		scsi_read_capacity(csio,
1113 				   /*retries*/4,
1114 				   dadone,
1115 				   MSG_SIMPLE_Q_TAG,
1116 				   rcap,
1117 				   SSD_FULL_SIZE,
1118 				   /*timeout*/5000);
1119 		start_ccb->ccb_h.ccb_bp = NULL;
1120 		start_ccb->ccb_h.ccb_state = DA_CCB_PROBE;
1121 		xpt_action(start_ccb);
1122 		break;
1123 	}
1124 	}
1125 }
1126 
1127 
1128 static void
1129 dadone(struct cam_periph *periph, union ccb *done_ccb)
1130 {
1131 	struct da_softc *softc;
1132 	struct ccb_scsiio *csio;
1133 
1134 	softc = (struct da_softc *)periph->softc;
1135 	csio = &done_ccb->csio;
1136 	switch (csio->ccb_h.ccb_state & DA_CCB_TYPE_MASK) {
1137 	case DA_CCB_BUFFER_IO:
1138 	{
1139 		struct bio *bp;
1140 		int    oldspl;
1141 
1142 		bp = (struct bio *)done_ccb->ccb_h.ccb_bp;
1143 		if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
1144 			int error;
1145 			int s;
1146 			int sf;
1147 
1148 			if ((csio->ccb_h.ccb_state & DA_CCB_RETRY_UA) != 0)
1149 				sf = SF_RETRY_UA;
1150 			else
1151 				sf = 0;
1152 
1153 			/* Retry selection timeouts */
1154 			sf |= SF_RETRY_SELTO;
1155 
1156 			if ((error = daerror(done_ccb, 0, sf)) == ERESTART) {
1157 				/*
1158 				 * A retry was scheuled, so
1159 				 * just return.
1160 				 */
1161 				return;
1162 			}
1163 			if (error != 0) {
1164 				struct bio *q_bp;
1165 
1166 				s = splbio();
1167 
1168 				if (error == ENXIO) {
1169 					/*
1170 					 * Catastrophic error.  Mark our pack as
1171 					 * invalid.
1172 					 */
1173 					/* XXX See if this is really a media
1174 					 *     change first.
1175 					 */
1176 					xpt_print_path(periph->path);
1177 					printf("Invalidating pack\n");
1178 					softc->flags |= DA_FLAG_PACK_INVALID;
1179 				}
1180 
1181 				/*
1182 				 * return all queued I/O with EIO, so that
1183 				 * the client can retry these I/Os in the
1184 				 * proper order should it attempt to recover.
1185 				 */
1186 				while ((q_bp = bioq_first(&softc->bio_queue))
1187 					!= NULL) {
1188 					bioq_remove(&softc->bio_queue, q_bp);
1189 					q_bp->bio_resid = q_bp->bio_bcount;
1190 					q_bp->bio_error = EIO;
1191 					q_bp->bio_flags |= BIO_ERROR;
1192 					biodone(q_bp);
1193 				}
1194 				splx(s);
1195 				bp->bio_error = error;
1196 				bp->bio_resid = bp->bio_bcount;
1197 				bp->bio_flags |= BIO_ERROR;
1198 			} else {
1199 				bp->bio_resid = csio->resid;
1200 				bp->bio_error = 0;
1201 				if (bp->bio_resid != 0) {
1202 					/* Short transfer ??? */
1203 					bp->bio_flags |= BIO_ERROR;
1204 				}
1205 			}
1206 			if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
1207 				cam_release_devq(done_ccb->ccb_h.path,
1208 						 /*relsim_flags*/0,
1209 						 /*reduction*/0,
1210 						 /*timeout*/0,
1211 						 /*getcount_only*/0);
1212 		} else {
1213 			bp->bio_resid = csio->resid;
1214 			if (csio->resid > 0)
1215 				bp->bio_flags |= BIO_ERROR;
1216 		}
1217 
1218 		/*
1219 		 * Block out any asyncronous callbacks
1220 		 * while we touch the pending ccb list.
1221 		 */
1222 		oldspl = splcam();
1223 		LIST_REMOVE(&done_ccb->ccb_h, periph_links.le);
1224 		splx(oldspl);
1225 
1226 		if (softc->device_stats.busy_count == 0)
1227 			softc->flags |= DA_FLAG_WENT_IDLE;
1228 
1229 		devstat_end_transaction_bio(&softc->device_stats, bp);
1230 		biodone(bp);
1231 		break;
1232 	}
1233 	case DA_CCB_PROBE:
1234 	{
1235 		struct	   scsi_read_capacity_data *rdcap;
1236 		char	   announce_buf[80];
1237 
1238 		rdcap = (struct scsi_read_capacity_data *)csio->data_ptr;
1239 
1240 		if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
1241 			struct disk_params *dp;
1242 
1243 			dasetgeom(periph, rdcap);
1244 			dp = &softc->params;
1245 			snprintf(announce_buf, sizeof(announce_buf),
1246 			        "%luMB (%u %u byte sectors: %dH %dS/T %dC)",
1247 				(unsigned long) (((u_int64_t)dp->secsize *
1248 				dp->sectors) / (1024*1024)), dp->sectors,
1249 				dp->secsize, dp->heads, dp->secs_per_track,
1250 				dp->cylinders);
1251 		} else {
1252 			int	error;
1253 
1254 			announce_buf[0] = '\0';
1255 
1256 			/*
1257 			 * Retry any UNIT ATTENTION type errors.  They
1258 			 * are expected at boot.
1259 			 */
1260 			error = daerror(done_ccb, 0, SF_RETRY_UA |
1261 					SF_RETRY_SELTO | SF_NO_PRINT);
1262 			if (error == ERESTART) {
1263 				/*
1264 				 * A retry was scheuled, so
1265 				 * just return.
1266 				 */
1267 				return;
1268 			} else if (error != 0) {
1269 				struct scsi_sense_data *sense;
1270 				int asc, ascq;
1271 				int sense_key, error_code;
1272 				int have_sense;
1273 				cam_status status;
1274 				struct ccb_getdev cgd;
1275 
1276 				/* Don't wedge this device's queue */
1277 				cam_release_devq(done_ccb->ccb_h.path,
1278 						 /*relsim_flags*/0,
1279 						 /*reduction*/0,
1280 						 /*timeout*/0,
1281 						 /*getcount_only*/0);
1282 
1283 				status = done_ccb->ccb_h.status;
1284 
1285 				xpt_setup_ccb(&cgd.ccb_h,
1286 					      done_ccb->ccb_h.path,
1287 					      /* priority */ 1);
1288 				cgd.ccb_h.func_code = XPT_GDEV_TYPE;
1289 				xpt_action((union ccb *)&cgd);
1290 
1291 				if (((csio->ccb_h.flags & CAM_SENSE_PHYS) != 0)
1292 				 || ((csio->ccb_h.flags & CAM_SENSE_PTR) != 0)
1293 				 || ((status & CAM_AUTOSNS_VALID) == 0))
1294 					have_sense = FALSE;
1295 				else
1296 					have_sense = TRUE;
1297 
1298 				if (have_sense) {
1299 					sense = &csio->sense_data;
1300 					scsi_extract_sense(sense, &error_code,
1301 							   &sense_key,
1302 							   &asc, &ascq);
1303 				}
1304 				/*
1305 				 * Attach to anything that claims to be a
1306 				 * direct access or optical disk device,
1307 				 * as long as it doesn't return a "Logical
1308 				 * unit not supported" (0x25) error.
1309 				 */
1310 				if ((have_sense) && (asc != 0x25)
1311 				 && (error_code == SSD_CURRENT_ERROR))
1312 					snprintf(announce_buf,
1313 					    sizeof(announce_buf),
1314 						"Attempt to query device "
1315 						"size failed: %s, %s",
1316 						scsi_sense_key_text[sense_key],
1317 						scsi_sense_desc(asc,ascq,
1318 								&cgd.inq_data));
1319 				else {
1320 					if (have_sense)
1321 						scsi_sense_print(
1322 							&done_ccb->csio);
1323 					else {
1324 						xpt_print_path(periph->path);
1325 						printf("got CAM status %#x\n",
1326 						       done_ccb->ccb_h.status);
1327 					}
1328 
1329 					xpt_print_path(periph->path);
1330 					printf("fatal error, failed"
1331 					       " to attach to device\n");
1332 
1333 					/*
1334 					 * Free up resources.
1335 					 */
1336 					cam_periph_invalidate(periph);
1337 				}
1338 			}
1339 		}
1340 		free(rdcap, M_TEMP);
1341 		if (announce_buf[0] != '\0')
1342 			xpt_announce_periph(periph, announce_buf);
1343 		softc->state = DA_STATE_NORMAL;
1344 		/*
1345 		 * Since our peripheral may be invalidated by an error
1346 		 * above or an external event, we must release our CCB
1347 		 * before releasing the probe lock on the peripheral.
1348 		 * The peripheral will only go away once the last lock
1349 		 * is removed, and we need it around for the CCB release
1350 		 * operation.
1351 		 */
1352 		xpt_release_ccb(done_ccb);
1353 		cam_periph_unlock(periph);
1354 		return;
1355 	}
1356 	case DA_CCB_WAITING:
1357 	{
1358 		/* Caller will release the CCB */
1359 		wakeup(&done_ccb->ccb_h.cbfcnp);
1360 		return;
1361 	}
1362 	case DA_CCB_DUMP:
1363 		/* No-op.  We're polling */
1364 		return;
1365 	default:
1366 		break;
1367 	}
1368 	xpt_release_ccb(done_ccb);
1369 }
1370 
1371 static int
1372 daerror(union ccb *ccb, u_int32_t cam_flags, u_int32_t sense_flags)
1373 {
1374 	struct da_softc	  *softc;
1375 	struct cam_periph *periph;
1376 
1377 	periph = xpt_path_periph(ccb->ccb_h.path);
1378 	softc = (struct da_softc *)periph->softc;
1379 
1380 	/*
1381 	 * XXX
1382 	 * Until we have a better way of doing pack validation,
1383 	 * don't treat UAs as errors.
1384 	 */
1385 	sense_flags |= SF_RETRY_UA;
1386 	return(cam_periph_error(ccb, cam_flags, sense_flags,
1387 				&softc->saved_ccb));
1388 }
1389 
1390 static void
1391 daprevent(struct cam_periph *periph, int action)
1392 {
1393 	struct	da_softc *softc;
1394 	union	ccb *ccb;
1395 	int	error;
1396 
1397 	softc = (struct da_softc *)periph->softc;
1398 
1399 	if (((action == PR_ALLOW)
1400 	  && (softc->flags & DA_FLAG_PACK_LOCKED) == 0)
1401 	 || ((action == PR_PREVENT)
1402 	  && (softc->flags & DA_FLAG_PACK_LOCKED) != 0)) {
1403 		return;
1404 	}
1405 
1406 	ccb = cam_periph_getccb(periph, /*priority*/1);
1407 
1408 	scsi_prevent(&ccb->csio,
1409 		     /*retries*/1,
1410 		     /*cbcfp*/dadone,
1411 		     MSG_SIMPLE_Q_TAG,
1412 		     action,
1413 		     SSD_FULL_SIZE,
1414 		     5000);
1415 
1416 	error = cam_periph_runccb(ccb, /*error_routine*/NULL, /*cam_flags*/0,
1417 				  /*sense_flags*/0, &softc->device_stats);
1418 
1419 	if (error == 0) {
1420 		if (action == PR_ALLOW)
1421 			softc->flags &= ~DA_FLAG_PACK_LOCKED;
1422 		else
1423 			softc->flags |= DA_FLAG_PACK_LOCKED;
1424 	}
1425 
1426 	xpt_release_ccb(ccb);
1427 }
1428 
1429 static void
1430 dasetgeom(struct cam_periph *periph, struct scsi_read_capacity_data * rdcap)
1431 {
1432 	struct ccb_calc_geometry ccg;
1433 	struct da_softc *softc;
1434 	struct disk_params *dp;
1435 
1436 	softc = (struct da_softc *)periph->softc;
1437 
1438 	dp = &softc->params;
1439 	dp->secsize = scsi_4btoul(rdcap->length);
1440 	dp->sectors = scsi_4btoul(rdcap->addr) + 1;
1441 	/*
1442 	 * Have the controller provide us with a geometry
1443 	 * for this disk.  The only time the geometry
1444 	 * matters is when we boot and the controller
1445 	 * is the only one knowledgeable enough to come
1446 	 * up with something that will make this a bootable
1447 	 * device.
1448 	 */
1449 	xpt_setup_ccb(&ccg.ccb_h, periph->path, /*priority*/1);
1450 	ccg.ccb_h.func_code = XPT_CALC_GEOMETRY;
1451 	ccg.block_size = dp->secsize;
1452 	ccg.volume_size = dp->sectors;
1453 	ccg.heads = 0;
1454 	ccg.secs_per_track = 0;
1455 	ccg.cylinders = 0;
1456 	xpt_action((union ccb*)&ccg);
1457 	dp->heads = ccg.heads;
1458 	dp->secs_per_track = ccg.secs_per_track;
1459 	dp->cylinders = ccg.cylinders;
1460 }
1461 
1462 static void
1463 dasendorderedtag(void *arg)
1464 {
1465 	struct da_softc *softc;
1466 	int s;
1467 
1468 	for (softc = SLIST_FIRST(&softc_list);
1469 	     softc != NULL;
1470 	     softc = SLIST_NEXT(softc, links)) {
1471 		s = splsoftcam();
1472 		if ((softc->ordered_tag_count == 0)
1473 		 && ((softc->flags & DA_FLAG_WENT_IDLE) == 0)) {
1474 			softc->flags |= DA_FLAG_NEED_OTAG;
1475 		}
1476 		if (softc->device_stats.busy_count > 0)
1477 			softc->flags &= ~DA_FLAG_WENT_IDLE;
1478 
1479 		softc->ordered_tag_count = 0;
1480 		splx(s);
1481 	}
1482 	/* Queue us up again */
1483 	timeout(dasendorderedtag, NULL,
1484 		(DA_DEFAULT_TIMEOUT * hz) / DA_ORDEREDTAG_INTERVAL);
1485 }
1486 
1487 /*
1488  * Step through all DA peripheral drivers, and if the device is still open,
1489  * sync the disk cache to physical media.
1490  */
1491 static void
1492 dashutdown(void * arg, int howto)
1493 {
1494 	struct cam_periph *periph;
1495 	struct da_softc *softc;
1496 
1497 	for (periph = TAILQ_FIRST(&dadriver.units); periph != NULL;
1498 	     periph = TAILQ_NEXT(periph, unit_links)) {
1499 		union ccb ccb;
1500 		softc = (struct da_softc *)periph->softc;
1501 
1502 		/*
1503 		 * We only sync the cache if the drive is still open, and
1504 		 * if the drive is capable of it..
1505 		 */
1506 		if (((softc->flags & DA_FLAG_OPEN) == 0)
1507 		 || (softc->quirks & DA_Q_NO_SYNC_CACHE))
1508 			continue;
1509 
1510 		xpt_setup_ccb(&ccb.ccb_h, periph->path, /*priority*/1);
1511 
1512 		ccb.ccb_h.ccb_state = DA_CCB_DUMP;
1513 		scsi_synchronize_cache(&ccb.csio,
1514 				       /*retries*/1,
1515 				       /*cbfcnp*/dadone,
1516 				       MSG_SIMPLE_Q_TAG,
1517 				       /*begin_lba*/0, /* whole disk */
1518 				       /*lb_count*/0,
1519 				       SSD_FULL_SIZE,
1520 				       5 * 60 * 1000);
1521 
1522 		xpt_polled_action(&ccb);
1523 
1524 		if ((ccb.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
1525 			if (((ccb.ccb_h.status & CAM_STATUS_MASK) ==
1526 			     CAM_SCSI_STATUS_ERROR)
1527 			 && (ccb.csio.scsi_status == SCSI_STATUS_CHECK_COND)){
1528 				int error_code, sense_key, asc, ascq;
1529 
1530 				scsi_extract_sense(&ccb.csio.sense_data,
1531 						   &error_code, &sense_key,
1532 						   &asc, &ascq);
1533 
1534 				if (sense_key != SSD_KEY_ILLEGAL_REQUEST)
1535 					scsi_sense_print(&ccb.csio);
1536 			} else {
1537 				xpt_print_path(periph->path);
1538 				printf("Synchronize cache failed, status "
1539 				       "== 0x%x, scsi status == 0x%x\n",
1540 				       ccb.ccb_h.status, ccb.csio.scsi_status);
1541 			}
1542 		}
1543 
1544 		if ((ccb.ccb_h.status & CAM_DEV_QFRZN) != 0)
1545 			cam_release_devq(ccb.ccb_h.path,
1546 					 /*relsim_flags*/0,
1547 					 /*reduction*/0,
1548 					 /*timeout*/0,
1549 					 /*getcount_only*/0);
1550 
1551 	}
1552 }
1553