xref: /freebsd/sys/cam/scsi/scsi_da.c (revision 7f9d26bd9d1b2754da8429257edbde0a8237f84f)
1 /*
2  * Implementation of SCSI Direct Access Peripheral driver for CAM.
3  *
4  * Copyright (c) 1997 Justin T. Gibbs.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions, and the following disclaimer,
12  *    without modification, immediately at the beginning of the file.
13  * 2. The name of the author may not be used to endorse or promote products
14  *    derived from this software without specific prior written permission.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
20  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  *
28  * $FreeBSD$
29  */
30 
31 #include "opt_hw_wdog.h"
32 
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/kernel.h>
36 #include <sys/buf.h>
37 #include <sys/devicestat.h>
38 #include <sys/conf.h>
39 #include <sys/disk.h>
40 #include <sys/eventhandler.h>
41 #include <sys/malloc.h>
42 #include <sys/cons.h>
43 
44 #include <machine/md_var.h>
45 
46 #include <vm/vm.h>
47 #include <vm/pmap.h>
48 
49 #include <cam/cam.h>
50 #include <cam/cam_ccb.h>
51 #include <cam/cam_extend.h>
52 #include <cam/cam_periph.h>
53 #include <cam/cam_xpt_periph.h>
54 
55 #include <cam/scsi/scsi_message.h>
56 
57 typedef enum {
58 	DA_STATE_PROBE,
59 	DA_STATE_NORMAL
60 } da_state;
61 
62 typedef enum {
63 	DA_FLAG_PACK_INVALID	= 0x001,
64 	DA_FLAG_NEW_PACK	= 0x002,
65 	DA_FLAG_PACK_LOCKED	= 0x004,
66 	DA_FLAG_PACK_REMOVABLE	= 0x008,
67 	DA_FLAG_TAGGED_QUEUING	= 0x010,
68 	DA_FLAG_NEED_OTAG	= 0x020,
69 	DA_FLAG_WENT_IDLE	= 0x040,
70 	DA_FLAG_RETRY_UA	= 0x080,
71 	DA_FLAG_OPEN		= 0x100
72 } da_flags;
73 
74 typedef enum {
75 	DA_Q_NONE		= 0x00,
76 	DA_Q_NO_SYNC_CACHE	= 0x01,
77 	DA_Q_NO_6_BYTE		= 0x02
78 } da_quirks;
79 
80 typedef enum {
81 	DA_CCB_PROBE		= 0x01,
82 	DA_CCB_BUFFER_IO	= 0x02,
83 	DA_CCB_WAITING		= 0x03,
84 	DA_CCB_DUMP		= 0x04,
85 	DA_CCB_TYPE_MASK	= 0x0F,
86 	DA_CCB_RETRY_UA		= 0x10
87 } da_ccb_state;
88 
89 /* Offsets into our private area for storing information */
90 #define ccb_state	ppriv_field0
91 #define ccb_bp		ppriv_ptr1
92 
93 struct disk_params {
94 	u_int8_t  heads;
95 	u_int16_t cylinders;
96 	u_int8_t  secs_per_track;
97 	u_int32_t secsize;	/* Number of bytes/sector */
98 	u_int32_t sectors;	/* total number sectors */
99 };
100 
101 struct da_softc {
102 	struct	 buf_queue_head buf_queue;
103 	struct	 devstat device_stats;
104 	SLIST_ENTRY(da_softc) links;
105 	LIST_HEAD(, ccb_hdr) pending_ccbs;
106 	da_state state;
107 	da_flags flags;
108 	da_quirks quirks;
109 	int	 minimum_cmd_size;
110 	int	 ordered_tag_count;
111 	struct	 disk_params params;
112 	struct	 disk disk;
113 	union	 ccb saved_ccb;
114 };
115 
116 struct da_quirk_entry {
117 	struct scsi_inquiry_pattern inq_pat;
118 	da_quirks quirks;
119 };
120 
121 static struct da_quirk_entry da_quirk_table[] =
122 {
123 	{
124 		/*
125 		 * This particular Fujitsu drive doesn't like the
126 		 * synchronize cache command.
127 		 * Reported by: Tom Jackson <toj@gorilla.net>
128 		 */
129 		{T_DIRECT, SIP_MEDIA_FIXED, "FUJITSU", "M2954*", "*"},
130 		/*quirks*/ DA_Q_NO_SYNC_CACHE
131 
132 	},
133 	{
134 		/*
135 		 * This drive doesn't like the synchronize cache command
136 		 * either.  Reported by: Matthew Jacob <mjacob@feral.com>
137 		 * in NetBSD PR kern/6027, August 24, 1998.
138 		 */
139 		{T_DIRECT, SIP_MEDIA_FIXED, "MICROP", "2217*", "*"},
140 		/*quirks*/ DA_Q_NO_SYNC_CACHE
141 	},
142 	{
143 		/*
144 		 * This drive doesn't like the synchronize cache command
145 		 * either.  Reported by: Hellmuth Michaelis (hm@kts.org)
146 		 * (PR 8882).
147 		 */
148 		{T_DIRECT, SIP_MEDIA_FIXED, "MICROP", "2112*", "*"},
149 		/*quirks*/ DA_Q_NO_SYNC_CACHE
150 	},
151 	{
152 		/*
153 		 * Doesn't like the synchronize cache command.
154 		 * Reported by: Blaz Zupan <blaz@gold.amis.net>
155 		 */
156 		{T_DIRECT, SIP_MEDIA_FIXED, "NEC", "D3847*", "*"},
157 		/*quirks*/ DA_Q_NO_SYNC_CACHE
158 	},
159 	{
160 		/*
161 		 * Doesn't work correctly with 6 byte reads/writes.
162 		 * Returns illegal request, and points to byte 9 of the
163 		 * 6-byte CDB.
164 		 * Reported by:  Adam McDougall <bsdx@spawnet.com>
165 		 */
166 		{T_DIRECT, SIP_MEDIA_FIXED, "QUANTUM", "VIKING 4*", "*"},
167 		/*quirks*/ DA_Q_NO_6_BYTE
168 	},
169 	{
170 		/*
171 		 * See above.
172 		 */
173 		{T_DIRECT, SIP_MEDIA_FIXED, "QUANTUM", "VIKING 2*", "*"},
174 		/*quirks*/ DA_Q_NO_6_BYTE
175 	}
176 };
177 
178 static	d_open_t	daopen;
179 static	d_close_t	daclose;
180 static	d_strategy_t	dastrategy;
181 static	d_ioctl_t	daioctl;
182 static	d_dump_t	dadump;
183 static	periph_init_t	dainit;
184 static	void		daasync(void *callback_arg, u_int32_t code,
185 				struct cam_path *path, void *arg);
186 static	periph_ctor_t	daregister;
187 static	periph_dtor_t	dacleanup;
188 static	periph_start_t	dastart;
189 static	periph_oninv_t	daoninvalidate;
190 static	void		dadone(struct cam_periph *periph,
191 			       union ccb *done_ccb);
192 static  int		daerror(union ccb *ccb, u_int32_t cam_flags,
193 				u_int32_t sense_flags);
194 static void		daprevent(struct cam_periph *periph, int action);
195 static void		dasetgeom(struct cam_periph *periph,
196 				  struct scsi_read_capacity_data * rdcap);
197 static timeout_t	dasendorderedtag;
198 static void		dashutdown(void *arg, int howto);
199 
200 #ifndef DA_DEFAULT_TIMEOUT
201 #define DA_DEFAULT_TIMEOUT 60	/* Timeout in seconds */
202 #endif
203 
204 /*
205  * DA_ORDEREDTAG_INTERVAL determines how often, relative
206  * to the default timeout, we check to see whether an ordered
207  * tagged transaction is appropriate to prevent simple tag
208  * starvation.  Since we'd like to ensure that there is at least
209  * 1/2 of the timeout length left for a starved transaction to
210  * complete after we've sent an ordered tag, we must poll at least
211  * four times in every timeout period.  This takes care of the worst
212  * case where a starved transaction starts during an interval that
213  * meets the requirement "don't send an ordered tag" test so it takes
214  * us two intervals to determine that a tag must be sent.
215  */
216 #ifndef DA_ORDEREDTAG_INTERVAL
217 #define DA_ORDEREDTAG_INTERVAL 4
218 #endif
219 
220 static struct periph_driver dadriver =
221 {
222 	dainit, "da",
223 	TAILQ_HEAD_INITIALIZER(dadriver.units), /* generation */ 0
224 };
225 
226 DATA_SET(periphdriver_set, dadriver);
227 
228 #define DA_CDEV_MAJOR 13
229 #define DA_BDEV_MAJOR 4
230 
231 /* For 2.2-stable support */
232 #ifndef D_DISK
233 #define D_DISK 0
234 #endif
235 
236 static struct cdevsw da_cdevsw = {
237 	/* open */	daopen,
238 	/* close */	daclose,
239 	/* read */	physread,
240 	/* write */	physwrite,
241 	/* ioctl */	daioctl,
242 	/* poll */	nopoll,
243 	/* mmap */	nommap,
244 	/* strategy */	dastrategy,
245 	/* name */	"da",
246 	/* maj */	DA_CDEV_MAJOR,
247 	/* dump */	dadump,
248 	/* psize */	nopsize,
249 	/* flags */	D_DISK,
250 	/* bmaj */	DA_BDEV_MAJOR
251 };
252 
253 static struct cdevsw dadisk_cdevsw;
254 
255 static SLIST_HEAD(,da_softc) softc_list;
256 static struct extend_array *daperiphs;
257 
258 static int
259 daopen(dev_t dev, int flags, int fmt, struct proc *p)
260 {
261 	struct cam_periph *periph;
262 	struct da_softc *softc;
263 	struct disklabel *label;
264 	int unit;
265 	int part;
266 	int error;
267 	int s;
268 
269 	unit = dkunit(dev);
270 	part = dkpart(dev);
271 	periph = cam_extend_get(daperiphs, unit);
272 	if (periph == NULL)
273 		return (ENXIO);
274 
275 	softc = (struct da_softc *)periph->softc;
276 
277 	CAM_DEBUG(periph->path, CAM_DEBUG_TRACE,
278 	    ("daopen: dev=%s (unit %d , partition %d)\n", devtoname(dev),
279 	     unit, part));
280 
281 	if ((error = cam_periph_lock(periph, PRIBIO|PCATCH)) != 0) {
282 		return (error); /* error code from tsleep */
283 	}
284 
285 	if (cam_periph_acquire(periph) != CAM_REQ_CMP)
286 		return(ENXIO);
287 	softc->flags |= DA_FLAG_OPEN;
288 
289 	s = splsoftcam();
290 	if ((softc->flags & DA_FLAG_PACK_INVALID) != 0) {
291 		/* Invalidate our pack information. */
292 		disk_invalidate(&softc->disk);
293 		softc->flags &= ~DA_FLAG_PACK_INVALID;
294 	}
295 	splx(s);
296 
297 	/* Do a read capacity */
298 	{
299 		struct scsi_read_capacity_data *rcap;
300 		union  ccb *ccb;
301 
302 		rcap = (struct scsi_read_capacity_data *)malloc(sizeof(*rcap),
303 								M_TEMP,
304 								M_WAITOK);
305 
306 		ccb = cam_periph_getccb(periph, /*priority*/1);
307 		scsi_read_capacity(&ccb->csio,
308 				   /*retries*/1,
309 				   /*cbfncp*/dadone,
310 				   MSG_SIMPLE_Q_TAG,
311 				   rcap,
312 				   SSD_FULL_SIZE,
313 				   /*timeout*/60000);
314 		ccb->ccb_h.ccb_bp = NULL;
315 
316 		error = cam_periph_runccb(ccb, daerror, /*cam_flags*/0,
317 					  /*sense_flags*/SF_RETRY_UA |
318 							 SF_RETRY_SELTO,
319 					  &softc->device_stats);
320 
321 		xpt_release_ccb(ccb);
322 
323 		if (error == 0) {
324 			dasetgeom(periph, rcap);
325 		}
326 
327 		free(rcap, M_TEMP);
328 	}
329 
330 	if (error == 0) {
331 		struct ccb_getdev cgd;
332 
333 		/* Build label for whole disk. */
334 		label = &softc->disk.d_label;
335 		bzero(label, sizeof(*label));
336 		label->d_type = DTYPE_SCSI;
337 
338 		/*
339 		 * Grab the inquiry data to get the vendor and product names.
340 		 * Put them in the typename and packname for the label.
341 		 */
342 		xpt_setup_ccb(&cgd.ccb_h, periph->path, /*priority*/ 1);
343 		cgd.ccb_h.func_code = XPT_GDEV_TYPE;
344 		xpt_action((union ccb *)&cgd);
345 
346 		strncpy(label->d_typename, cgd.inq_data.vendor,
347 			min(SID_VENDOR_SIZE, sizeof(label->d_typename)));
348 		strncpy(label->d_packname, cgd.inq_data.product,
349 			min(SID_PRODUCT_SIZE, sizeof(label->d_packname)));
350 
351 		label->d_secsize = softc->params.secsize;
352 		label->d_nsectors = softc->params.secs_per_track;
353 		label->d_ntracks = softc->params.heads;
354 		label->d_ncylinders = softc->params.cylinders;
355 		label->d_secpercyl = softc->params.heads
356 				  * softc->params.secs_per_track;
357 		label->d_secperunit = softc->params.sectors;
358 
359 		if (((softc->flags & DA_FLAG_PACK_REMOVABLE) != 0)) {
360 			daprevent(periph, PR_PREVENT);
361 		}
362 
363 		/*
364 		 * Check to see whether or not the blocksize is set yet.
365 		 * If it isn't, set it and then clear the blocksize
366 		 * unavailable flag for the device statistics.
367 		 */
368 		if ((softc->device_stats.flags & DEVSTAT_BS_UNAVAILABLE) != 0){
369 			softc->device_stats.block_size = softc->params.secsize;
370 			softc->device_stats.flags &= ~DEVSTAT_BS_UNAVAILABLE;
371 		}
372 	}
373 
374 	if (error != 0) {
375 		if ((softc->flags & DA_FLAG_PACK_REMOVABLE) != 0) {
376 			daprevent(periph, PR_ALLOW);
377 		}
378 	}
379 	cam_periph_unlock(periph);
380 	return (error);
381 }
382 
383 static int
384 daclose(dev_t dev, int flag, int fmt, struct proc *p)
385 {
386 	struct	cam_periph *periph;
387 	struct	da_softc *softc;
388 	int	unit;
389 	int	error;
390 
391 	unit = dkunit(dev);
392 	periph = cam_extend_get(daperiphs, unit);
393 	if (periph == NULL)
394 		return (ENXIO);
395 
396 	softc = (struct da_softc *)periph->softc;
397 
398 	if ((error = cam_periph_lock(periph, PRIBIO)) != 0) {
399 		return (error); /* error code from tsleep */
400 	}
401 
402 	if ((softc->quirks & DA_Q_NO_SYNC_CACHE) == 0) {
403 		union	ccb *ccb;
404 
405 		ccb = cam_periph_getccb(periph, /*priority*/1);
406 
407 		scsi_synchronize_cache(&ccb->csio,
408 				       /*retries*/1,
409 				       /*cbfcnp*/dadone,
410 				       MSG_SIMPLE_Q_TAG,
411 				       /*begin_lba*/0,/* Cover the whole disk */
412 				       /*lb_count*/0,
413 				       SSD_FULL_SIZE,
414 				       5 * 60 * 1000);
415 
416 		cam_periph_runccb(ccb, /*error_routine*/NULL, /*cam_flags*/0,
417 				  /*sense_flags*/SF_RETRY_UA,
418 				  &softc->device_stats);
419 
420 		if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
421 			if ((ccb->ccb_h.status & CAM_STATUS_MASK) ==
422 			     CAM_SCSI_STATUS_ERROR) {
423 				int asc, ascq;
424 				int sense_key, error_code;
425 
426 				scsi_extract_sense(&ccb->csio.sense_data,
427 						   &error_code,
428 						   &sense_key,
429 						   &asc, &ascq);
430 				if (sense_key != SSD_KEY_ILLEGAL_REQUEST)
431 					scsi_sense_print(&ccb->csio);
432 			} else {
433 				xpt_print_path(periph->path);
434 				printf("Synchronize cache failed, status "
435 				       "== 0x%x, scsi status == 0x%x\n",
436 				       ccb->csio.ccb_h.status,
437 				       ccb->csio.scsi_status);
438 			}
439 		}
440 
441 		if ((ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
442 			cam_release_devq(ccb->ccb_h.path,
443 					 /*relsim_flags*/0,
444 					 /*reduction*/0,
445 					 /*timeout*/0,
446 					 /*getcount_only*/0);
447 
448 		xpt_release_ccb(ccb);
449 
450 	}
451 
452 	if ((softc->flags & DA_FLAG_PACK_REMOVABLE) != 0) {
453 		daprevent(periph, PR_ALLOW);
454 		/*
455 		 * If we've got removeable media, mark the blocksize as
456 		 * unavailable, since it could change when new media is
457 		 * inserted.
458 		 */
459 		softc->device_stats.flags |= DEVSTAT_BS_UNAVAILABLE;
460 	}
461 
462 	softc->flags &= ~DA_FLAG_OPEN;
463 	cam_periph_unlock(periph);
464 	cam_periph_release(periph);
465 	return (0);
466 }
467 
468 /*
469  * Actually translate the requested transfer into one the physical driver
470  * can understand.  The transfer is described by a buf and will include
471  * only one physical transfer.
472  */
473 static void
474 dastrategy(struct buf *bp)
475 {
476 	struct cam_periph *periph;
477 	struct da_softc *softc;
478 	u_int  unit;
479 	u_int  part;
480 	int    s;
481 
482 	unit = dkunit(bp->b_dev);
483 	part = dkpart(bp->b_dev);
484 	periph = cam_extend_get(daperiphs, unit);
485 	if (periph == NULL) {
486 		bp->b_error = ENXIO;
487 		goto bad;
488 	}
489 	softc = (struct da_softc *)periph->softc;
490 #if 0
491 	/*
492 	 * check it's not too big a transfer for our adapter
493 	 */
494 	scsi_minphys(bp,&sd_switch);
495 #endif
496 
497 	/*
498 	 * Mask interrupts so that the pack cannot be invalidated until
499 	 * after we are in the queue.  Otherwise, we might not properly
500 	 * clean up one of the buffers.
501 	 */
502 	s = splbio();
503 
504 	/*
505 	 * If the device has been made invalid, error out
506 	 */
507 	if ((softc->flags & DA_FLAG_PACK_INVALID)) {
508 		splx(s);
509 		bp->b_error = ENXIO;
510 		goto bad;
511 	}
512 
513 	/*
514 	 * Place it in the queue of disk activities for this disk
515 	 */
516 	bufqdisksort(&softc->buf_queue, bp);
517 
518 	splx(s);
519 
520 	/*
521 	 * Schedule ourselves for performing the work.
522 	 */
523 	xpt_schedule(periph, /* XXX priority */1);
524 
525 	return;
526 bad:
527 	bp->b_flags |= B_ERROR;
528 
529 	/*
530 	 * Correctly set the buf to indicate a completed xfer
531 	 */
532 	bp->b_resid = bp->b_bcount;
533 	biodone(bp);
534 	return;
535 }
536 
537 /* For 2.2-stable support */
538 #ifndef ENOIOCTL
539 #define ENOIOCTL -1
540 #endif
541 
542 static int
543 daioctl(dev_t dev, u_long cmd, caddr_t addr, int flag, struct proc *p)
544 {
545 	struct cam_periph *periph;
546 	struct da_softc *softc;
547 	int unit;
548 	int error;
549 
550 	unit = dkunit(dev);
551 	periph = cam_extend_get(daperiphs, unit);
552 	if (periph == NULL)
553 		return (ENXIO);
554 
555 	softc = (struct da_softc *)periph->softc;
556 
557 	CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("daioctl\n"));
558 
559 	if ((error = cam_periph_lock(periph, PRIBIO|PCATCH)) != 0) {
560 		return (error); /* error code from tsleep */
561 	}
562 
563 	error = cam_periph_ioctl(periph, cmd, addr, daerror);
564 
565 	cam_periph_unlock(periph);
566 
567 	return (error);
568 }
569 
570 static int
571 dadump(dev_t dev)
572 {
573 	struct	    cam_periph *periph;
574 	struct	    da_softc *softc;
575 	u_int	    unit;
576 	u_int	    part;
577 	u_int	    secsize;
578 	u_int	    num;	/* number of sectors to write */
579 	u_int	    blknum;
580 	long	    blkcnt;
581 	vm_offset_t addr;
582 	struct	    ccb_scsiio csio;
583 	int	    error;
584 
585 	/* toss any characters present prior to dump */
586 	while (cncheckc() != -1)
587 		;
588 
589 	unit = dkunit(dev);
590 	part = dkpart(dev);
591 	periph = cam_extend_get(daperiphs, unit);
592 	if (periph == NULL) {
593 		return (ENXIO);
594 	}
595 	softc = (struct da_softc *)periph->softc;
596 
597 	if ((softc->flags & DA_FLAG_PACK_INVALID) != 0)
598 		return (ENXIO);
599 
600 	error = disk_dumpcheck(dev, &num, &blknum, &secsize);
601 	if (error)
602 		return (error);
603 
604 	addr = 0;	/* starting address */
605 	blkcnt = howmany(PAGE_SIZE, secsize);
606 
607 	while (num > 0) {
608 
609 		if (is_physical_memory(addr)) {
610 			pmap_enter(kernel_pmap, (vm_offset_t)CADDR1,
611 				   trunc_page(addr), VM_PROT_READ, TRUE);
612 		} else {
613 			pmap_enter(kernel_pmap, (vm_offset_t)CADDR1,
614 				   trunc_page(0), VM_PROT_READ, TRUE);
615 		}
616 
617 		xpt_setup_ccb(&csio.ccb_h, periph->path, /*priority*/1);
618 		csio.ccb_h.ccb_state = DA_CCB_DUMP;
619 		scsi_read_write(&csio,
620 				/*retries*/1,
621 				dadone,
622 				MSG_ORDERED_Q_TAG,
623 				/*read*/FALSE,
624 				/*byte2*/0,
625 				/*minimum_cmd_size*/ softc->minimum_cmd_size,
626 				blknum,
627 				blkcnt,
628 				/*data_ptr*/CADDR1,
629 				/*dxfer_len*/blkcnt * secsize,
630 				/*sense_len*/SSD_FULL_SIZE,
631 				DA_DEFAULT_TIMEOUT * 1000);
632 		xpt_polled_action((union ccb *)&csio);
633 
634 		if ((csio.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
635 			printf("Aborting dump due to I/O error.\n");
636 			if ((csio.ccb_h.status & CAM_STATUS_MASK) ==
637 			     CAM_SCSI_STATUS_ERROR)
638 				scsi_sense_print(&csio);
639 			else
640 				printf("status == 0x%x, scsi status == 0x%x\n",
641 				       csio.ccb_h.status, csio.scsi_status);
642 			return(EIO);
643 		}
644 
645 		if (addr % (1024 * 1024) == 0) {
646 #ifdef	HW_WDOG
647 			if (wdog_tickler)
648 				(*wdog_tickler)();
649 #endif /* HW_WDOG */
650 			/* Count in MB of data left to write */
651 			printf("%d ", (num  * softc->params.secsize)
652 				     / (1024 * 1024));
653 		}
654 
655 		/* update block count */
656 		num -= blkcnt;
657 		blknum += blkcnt;
658 		addr += PAGE_SIZE;
659 
660 		/* operator aborting dump? */
661 		if (cncheckc() != -1)
662 			return (EINTR);
663 	}
664 
665 	/*
666 	 * Sync the disk cache contents to the physical media.
667 	 */
668 	if ((softc->quirks & DA_Q_NO_SYNC_CACHE) == 0) {
669 
670 		xpt_setup_ccb(&csio.ccb_h, periph->path, /*priority*/1);
671 		csio.ccb_h.ccb_state = DA_CCB_DUMP;
672 		scsi_synchronize_cache(&csio,
673 				       /*retries*/1,
674 				       /*cbfcnp*/dadone,
675 				       MSG_SIMPLE_Q_TAG,
676 				       /*begin_lba*/0,/* Cover the whole disk */
677 				       /*lb_count*/0,
678 				       SSD_FULL_SIZE,
679 				       5 * 60 * 1000);
680 		xpt_polled_action((union ccb *)&csio);
681 
682 		if ((csio.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
683 			if ((csio.ccb_h.status & CAM_STATUS_MASK) ==
684 			     CAM_SCSI_STATUS_ERROR) {
685 				int asc, ascq;
686 				int sense_key, error_code;
687 
688 				scsi_extract_sense(&csio.sense_data,
689 						   &error_code,
690 						   &sense_key,
691 						   &asc, &ascq);
692 				if (sense_key != SSD_KEY_ILLEGAL_REQUEST)
693 					scsi_sense_print(&csio);
694 			} else {
695 				xpt_print_path(periph->path);
696 				printf("Synchronize cache failed, status "
697 				       "== 0x%x, scsi status == 0x%x\n",
698 				       csio.ccb_h.status, csio.scsi_status);
699 			}
700 		}
701 	}
702 	return (0);
703 }
704 
705 static void
706 dainit(void)
707 {
708 	cam_status status;
709 	struct cam_path *path;
710 
711 	/*
712 	 * Create our extend array for storing the devices we attach to.
713 	 */
714 	daperiphs = cam_extend_new();
715 	SLIST_INIT(&softc_list);
716 	if (daperiphs == NULL) {
717 		printf("da: Failed to alloc extend array!\n");
718 		return;
719 	}
720 
721 	/*
722 	 * Install a global async callback.  This callback will
723 	 * receive async callbacks like "new device found".
724 	 */
725 	status = xpt_create_path(&path, /*periph*/NULL, CAM_XPT_PATH_ID,
726 				 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
727 
728 	if (status == CAM_REQ_CMP) {
729 		struct ccb_setasync csa;
730 
731                 xpt_setup_ccb(&csa.ccb_h, path, /*priority*/5);
732                 csa.ccb_h.func_code = XPT_SASYNC_CB;
733                 csa.event_enable = AC_FOUND_DEVICE;
734                 csa.callback = daasync;
735                 csa.callback_arg = NULL;
736                 xpt_action((union ccb *)&csa);
737 		status = csa.ccb_h.status;
738                 xpt_free_path(path);
739         }
740 
741 	if (status != CAM_REQ_CMP) {
742 		printf("da: Failed to attach master async callback "
743 		       "due to status 0x%x!\n", status);
744 	} else {
745 
746 		/*
747 		 * Schedule a periodic event to occasioanly send an
748 		 * ordered tag to a device.
749 		 */
750 		timeout(dasendorderedtag, NULL,
751 			(DA_DEFAULT_TIMEOUT * hz) / DA_ORDEREDTAG_INTERVAL);
752 
753 		/* Register our shutdown event handler */
754 		if ((EVENTHANDLER_REGISTER(shutdown_post_sync, dashutdown,
755 					   NULL, SHUTDOWN_PRI_DEFAULT)) == NULL)
756 		    printf("dainit: shutdown event registration failed!\n");
757 	}
758 }
759 
760 static void
761 daoninvalidate(struct cam_periph *periph)
762 {
763 	int s;
764 	struct da_softc *softc;
765 	struct buf *q_bp;
766 	struct ccb_setasync csa;
767 
768 	softc = (struct da_softc *)periph->softc;
769 
770 	/*
771 	 * De-register any async callbacks.
772 	 */
773 	xpt_setup_ccb(&csa.ccb_h, periph->path,
774 		      /* priority */ 5);
775 	csa.ccb_h.func_code = XPT_SASYNC_CB;
776 	csa.event_enable = 0;
777 	csa.callback = daasync;
778 	csa.callback_arg = periph;
779 	xpt_action((union ccb *)&csa);
780 
781 	softc->flags |= DA_FLAG_PACK_INVALID;
782 
783 	/*
784 	 * Although the oninvalidate() routines are always called at
785 	 * splsoftcam, we need to be at splbio() here to keep the buffer
786 	 * queue from being modified while we traverse it.
787 	 */
788 	s = splbio();
789 
790 	/*
791 	 * Return all queued I/O with ENXIO.
792 	 * XXX Handle any transactions queued to the card
793 	 *     with XPT_ABORT_CCB.
794 	 */
795 	while ((q_bp = bufq_first(&softc->buf_queue)) != NULL){
796 		bufq_remove(&softc->buf_queue, q_bp);
797 		q_bp->b_resid = q_bp->b_bcount;
798 		q_bp->b_error = ENXIO;
799 		q_bp->b_flags |= B_ERROR;
800 		biodone(q_bp);
801 	}
802 	splx(s);
803 
804 	SLIST_REMOVE(&softc_list, softc, da_softc, links);
805 
806 	xpt_print_path(periph->path);
807 	printf("lost device\n");
808 }
809 
810 static void
811 dacleanup(struct cam_periph *periph)
812 {
813 	struct da_softc *softc;
814 
815 	softc = (struct da_softc *)periph->softc;
816 
817 	devstat_remove_entry(&softc->device_stats);
818 	cam_extend_release(daperiphs, periph->unit_number);
819 	xpt_print_path(periph->path);
820 	printf("removing device entry\n");
821 	free(softc, M_DEVBUF);
822 }
823 
824 static void
825 daasync(void *callback_arg, u_int32_t code,
826 	struct cam_path *path, void *arg)
827 {
828 	struct cam_periph *periph;
829 
830 	periph = (struct cam_periph *)callback_arg;
831 	switch (code) {
832 	case AC_FOUND_DEVICE:
833 	{
834 		struct ccb_getdev *cgd;
835 		cam_status status;
836 
837 		cgd = (struct ccb_getdev *)arg;
838 
839 		if ((cgd->pd_type != T_DIRECT) && (cgd->pd_type != T_OPTICAL))
840 			break;
841 
842 		/*
843 		 * Allocate a peripheral instance for
844 		 * this device and start the probe
845 		 * process.
846 		 */
847 		status = cam_periph_alloc(daregister, daoninvalidate,
848 					  dacleanup, dastart,
849 					  "da", CAM_PERIPH_BIO,
850 					  cgd->ccb_h.path, daasync,
851 					  AC_FOUND_DEVICE, cgd);
852 
853 		if (status != CAM_REQ_CMP
854 		 && status != CAM_REQ_INPROG)
855 			printf("daasync: Unable to attach to new device "
856 				"due to status 0x%x\n", status);
857 		break;
858 	}
859 	case AC_SENT_BDR:
860 	case AC_BUS_RESET:
861 	{
862 		struct da_softc *softc;
863 		struct ccb_hdr *ccbh;
864 		int s;
865 
866 		softc = (struct da_softc *)periph->softc;
867 		s = splsoftcam();
868 		/*
869 		 * Don't fail on the expected unit attention
870 		 * that will occur.
871 		 */
872 		softc->flags |= DA_FLAG_RETRY_UA;
873 		for (ccbh = LIST_FIRST(&softc->pending_ccbs);
874 		     ccbh != NULL; ccbh = LIST_NEXT(ccbh, periph_links.le))
875 			ccbh->ccb_state |= DA_CCB_RETRY_UA;
876 		splx(s);
877 		/* FALLTHROUGH*/
878 	}
879 	default:
880 		cam_periph_async(periph, code, path, arg);
881 		break;
882 	}
883 }
884 
885 static cam_status
886 daregister(struct cam_periph *periph, void *arg)
887 {
888 	int s;
889 	struct da_softc *softc;
890 	struct ccb_setasync csa;
891 	struct ccb_getdev *cgd;
892 	caddr_t match;
893 
894 	cgd = (struct ccb_getdev *)arg;
895 	if (periph == NULL) {
896 		printf("daregister: periph was NULL!!\n");
897 		return(CAM_REQ_CMP_ERR);
898 	}
899 
900 	if (cgd == NULL) {
901 		printf("daregister: no getdev CCB, can't register device\n");
902 		return(CAM_REQ_CMP_ERR);
903 	}
904 
905 	softc = (struct da_softc *)malloc(sizeof(*softc),M_DEVBUF,M_NOWAIT);
906 
907 	if (softc == NULL) {
908 		printf("daregister: Unable to probe new device. "
909 		       "Unable to allocate softc\n");
910 		return(CAM_REQ_CMP_ERR);
911 	}
912 
913 	bzero(softc, sizeof(*softc));
914 	LIST_INIT(&softc->pending_ccbs);
915 	softc->state = DA_STATE_PROBE;
916 	bufq_init(&softc->buf_queue);
917 	if (SID_IS_REMOVABLE(&cgd->inq_data))
918 		softc->flags |= DA_FLAG_PACK_REMOVABLE;
919 	if ((cgd->inq_data.flags & SID_CmdQue) != 0)
920 		softc->flags |= DA_FLAG_TAGGED_QUEUING;
921 
922 	periph->softc = softc;
923 
924 	cam_extend_set(daperiphs, periph->unit_number, periph);
925 
926 	/*
927 	 * See if this device has any quirks.
928 	 */
929 	match = cam_quirkmatch((caddr_t)&cgd->inq_data,
930 			       (caddr_t)da_quirk_table,
931 			       sizeof(da_quirk_table)/sizeof(*da_quirk_table),
932 			       sizeof(*da_quirk_table), scsi_inquiry_match);
933 
934 	if (match != NULL)
935 		softc->quirks = ((struct da_quirk_entry *)match)->quirks;
936 	else
937 		softc->quirks = DA_Q_NONE;
938 
939 	if (softc->quirks & DA_Q_NO_6_BYTE)
940 		softc->minimum_cmd_size = 10;
941 	else
942 		softc->minimum_cmd_size = 6;
943 
944 	/*
945 	 * Block our timeout handler while we
946 	 * add this softc to the dev list.
947 	 */
948 	s = splsoftclock();
949 	SLIST_INSERT_HEAD(&softc_list, softc, links);
950 	splx(s);
951 
952 	/*
953 	 * The DA driver supports a blocksize, but
954 	 * we don't know the blocksize until we do
955 	 * a read capacity.  So, set a flag to
956 	 * indicate that the blocksize is
957 	 * unavailable right now.  We'll clear the
958 	 * flag as soon as we've done a read capacity.
959 	 */
960 	devstat_add_entry(&softc->device_stats, "da",
961 			  periph->unit_number, 0,
962 	  		  DEVSTAT_BS_UNAVAILABLE,
963 			  cgd->pd_type | DEVSTAT_TYPE_IF_SCSI,
964 			  DEVSTAT_PRIORITY_DA);
965 
966 	/*
967 	 * Register this media as a disk
968 	 */
969 	disk_create(periph->unit_number, &softc->disk, 0,
970 	    &da_cdevsw, &dadisk_cdevsw);
971 
972 	/*
973 	 * Add async callbacks for bus reset and
974 	 * bus device reset calls.  I don't bother
975 	 * checking if this fails as, in most cases,
976 	 * the system will function just fine without
977 	 * them and the only alternative would be to
978 	 * not attach the device on failure.
979 	 */
980 	xpt_setup_ccb(&csa.ccb_h, periph->path, /*priority*/5);
981 	csa.ccb_h.func_code = XPT_SASYNC_CB;
982 	csa.event_enable = AC_SENT_BDR | AC_BUS_RESET | AC_LOST_DEVICE;
983 	csa.callback = daasync;
984 	csa.callback_arg = periph;
985 	xpt_action((union ccb *)&csa);
986 	/*
987 	 * Lock this peripheral until we are setup.
988 	 * This first call can't block
989 	 */
990 	(void)cam_periph_lock(periph, PRIBIO);
991 	xpt_schedule(periph, /*priority*/5);
992 
993 	return(CAM_REQ_CMP);
994 }
995 
996 static void
997 dastart(struct cam_periph *periph, union ccb *start_ccb)
998 {
999 	struct da_softc *softc;
1000 
1001 	softc = (struct da_softc *)periph->softc;
1002 
1003 
1004 	switch (softc->state) {
1005 	case DA_STATE_NORMAL:
1006 	{
1007 		/* Pull a buffer from the queue and get going on it */
1008 		struct buf *bp;
1009 		int s;
1010 
1011 		/*
1012 		 * See if there is a buf with work for us to do..
1013 		 */
1014 		s = splbio();
1015 		bp = bufq_first(&softc->buf_queue);
1016 		if (periph->immediate_priority <= periph->pinfo.priority) {
1017 			CAM_DEBUG_PRINT(CAM_DEBUG_SUBTRACE,
1018 					("queuing for immediate ccb\n"));
1019 			start_ccb->ccb_h.ccb_state = DA_CCB_WAITING;
1020 			SLIST_INSERT_HEAD(&periph->ccb_list, &start_ccb->ccb_h,
1021 					  periph_links.sle);
1022 			periph->immediate_priority = CAM_PRIORITY_NONE;
1023 			splx(s);
1024 			wakeup(&periph->ccb_list);
1025 		} else if (bp == NULL) {
1026 			splx(s);
1027 			xpt_release_ccb(start_ccb);
1028 		} else {
1029 			int oldspl;
1030 			u_int8_t tag_code;
1031 
1032 			bufq_remove(&softc->buf_queue, bp);
1033 
1034 			devstat_start_transaction(&softc->device_stats);
1035 
1036 			if ((bp->b_flags & B_ORDERED) != 0
1037 			 || (softc->flags & DA_FLAG_NEED_OTAG) != 0) {
1038 				softc->flags &= ~DA_FLAG_NEED_OTAG;
1039 				softc->ordered_tag_count++;
1040 				tag_code = MSG_ORDERED_Q_TAG;
1041 			} else {
1042 				tag_code = MSG_SIMPLE_Q_TAG;
1043 			}
1044 			scsi_read_write(&start_ccb->csio,
1045 					/*retries*/4,
1046 					dadone,
1047 					tag_code,
1048 					bp->b_flags & B_READ,
1049 					/*byte2*/0,
1050 					softc->minimum_cmd_size,
1051 					bp->b_pblkno,
1052 					bp->b_bcount / softc->params.secsize,
1053 					bp->b_data,
1054 					bp->b_bcount,
1055 					/*sense_len*/SSD_FULL_SIZE,
1056 					DA_DEFAULT_TIMEOUT * 1000);
1057 			start_ccb->ccb_h.ccb_state = DA_CCB_BUFFER_IO;
1058 
1059 			/*
1060 			 * Block out any asyncronous callbacks
1061 			 * while we touch the pending ccb list.
1062 			 */
1063 			oldspl = splcam();
1064 			LIST_INSERT_HEAD(&softc->pending_ccbs,
1065 					 &start_ccb->ccb_h, periph_links.le);
1066 			splx(oldspl);
1067 
1068 			/* We expect a unit attention from this device */
1069 			if ((softc->flags & DA_FLAG_RETRY_UA) != 0) {
1070 				start_ccb->ccb_h.ccb_state |= DA_CCB_RETRY_UA;
1071 				softc->flags &= ~DA_FLAG_RETRY_UA;
1072 			}
1073 
1074 			start_ccb->ccb_h.ccb_bp = bp;
1075 			bp = bufq_first(&softc->buf_queue);
1076 			splx(s);
1077 
1078 			xpt_action(start_ccb);
1079 		}
1080 
1081 		if (bp != NULL) {
1082 			/* Have more work to do, so ensure we stay scheduled */
1083 			xpt_schedule(periph, /* XXX priority */1);
1084 		}
1085 		break;
1086 	}
1087 	case DA_STATE_PROBE:
1088 	{
1089 		struct ccb_scsiio *csio;
1090 		struct scsi_read_capacity_data *rcap;
1091 
1092 		rcap = (struct scsi_read_capacity_data *)malloc(sizeof(*rcap),
1093 								M_TEMP,
1094 								M_NOWAIT);
1095 		if (rcap == NULL) {
1096 			printf("dastart: Couldn't malloc read_capacity data\n");
1097 			/* da_free_periph??? */
1098 			break;
1099 		}
1100 		csio = &start_ccb->csio;
1101 		scsi_read_capacity(csio,
1102 				   /*retries*/4,
1103 				   dadone,
1104 				   MSG_SIMPLE_Q_TAG,
1105 				   rcap,
1106 				   SSD_FULL_SIZE,
1107 				   /*timeout*/5000);
1108 		start_ccb->ccb_h.ccb_bp = NULL;
1109 		start_ccb->ccb_h.ccb_state = DA_CCB_PROBE;
1110 		xpt_action(start_ccb);
1111 		break;
1112 	}
1113 	}
1114 }
1115 
1116 
1117 static void
1118 dadone(struct cam_periph *periph, union ccb *done_ccb)
1119 {
1120 	struct da_softc *softc;
1121 	struct ccb_scsiio *csio;
1122 
1123 	softc = (struct da_softc *)periph->softc;
1124 	csio = &done_ccb->csio;
1125 	switch (csio->ccb_h.ccb_state & DA_CCB_TYPE_MASK) {
1126 	case DA_CCB_BUFFER_IO:
1127 	{
1128 		struct buf *bp;
1129 		int    oldspl;
1130 
1131 		bp = (struct buf *)done_ccb->ccb_h.ccb_bp;
1132 		if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
1133 			int error;
1134 			int s;
1135 			int sf;
1136 
1137 			if ((csio->ccb_h.ccb_state & DA_CCB_RETRY_UA) != 0)
1138 				sf = SF_RETRY_UA;
1139 			else
1140 				sf = 0;
1141 
1142 			/* Retry selection timeouts */
1143 			sf |= SF_RETRY_SELTO;
1144 
1145 			if ((error = daerror(done_ccb, 0, sf)) == ERESTART) {
1146 				/*
1147 				 * A retry was scheuled, so
1148 				 * just return.
1149 				 */
1150 				return;
1151 			}
1152 			if (error != 0) {
1153 				struct buf *q_bp;
1154 
1155 				s = splbio();
1156 
1157 				if (error == ENXIO) {
1158 					/*
1159 					 * Catastrophic error.  Mark our pack as
1160 					 * invalid.
1161 					 */
1162 					/* XXX See if this is really a media
1163 					 *     change first.
1164 					 */
1165 					xpt_print_path(periph->path);
1166 					printf("Invalidating pack\n");
1167 					softc->flags |= DA_FLAG_PACK_INVALID;
1168 				}
1169 
1170 				/*
1171 				 * return all queued I/O with EIO, so that
1172 				 * the client can retry these I/Os in the
1173 				 * proper order should it attempt to recover.
1174 				 */
1175 				while ((q_bp = bufq_first(&softc->buf_queue))
1176 					!= NULL) {
1177 					bufq_remove(&softc->buf_queue, q_bp);
1178 					q_bp->b_resid = q_bp->b_bcount;
1179 					q_bp->b_error = EIO;
1180 					q_bp->b_flags |= B_ERROR;
1181 					biodone(q_bp);
1182 				}
1183 				splx(s);
1184 				bp->b_error = error;
1185 				bp->b_resid = bp->b_bcount;
1186 				bp->b_flags |= B_ERROR;
1187 			} else {
1188 				bp->b_resid = csio->resid;
1189 				bp->b_error = 0;
1190 				if (bp->b_resid != 0) {
1191 					/* Short transfer ??? */
1192 					bp->b_flags |= B_ERROR;
1193 				}
1194 			}
1195 			if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
1196 				cam_release_devq(done_ccb->ccb_h.path,
1197 						 /*relsim_flags*/0,
1198 						 /*reduction*/0,
1199 						 /*timeout*/0,
1200 						 /*getcount_only*/0);
1201 		} else {
1202 			bp->b_resid = csio->resid;
1203 			if (csio->resid > 0)
1204 				bp->b_flags |= B_ERROR;
1205 		}
1206 
1207 		/*
1208 		 * Block out any asyncronous callbacks
1209 		 * while we touch the pending ccb list.
1210 		 */
1211 		oldspl = splcam();
1212 		LIST_REMOVE(&done_ccb->ccb_h, periph_links.le);
1213 		splx(oldspl);
1214 
1215 		if (softc->device_stats.busy_count == 0)
1216 			softc->flags |= DA_FLAG_WENT_IDLE;
1217 
1218 		devstat_end_transaction_buf(&softc->device_stats, bp);
1219 		biodone(bp);
1220 		break;
1221 	}
1222 	case DA_CCB_PROBE:
1223 	{
1224 		struct	   scsi_read_capacity_data *rdcap;
1225 		char	   announce_buf[80];
1226 
1227 		rdcap = (struct scsi_read_capacity_data *)csio->data_ptr;
1228 
1229 		if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
1230 			struct disk_params *dp;
1231 
1232 			dasetgeom(periph, rdcap);
1233 			dp = &softc->params;
1234 			snprintf(announce_buf, sizeof(announce_buf),
1235 			        "%luMB (%u %u byte sectors: %dH %dS/T %dC)",
1236 				(unsigned long) (((u_int64_t)dp->secsize *
1237 				dp->sectors) / (1024*1024)), dp->sectors,
1238 				dp->secsize, dp->heads, dp->secs_per_track,
1239 				dp->cylinders);
1240 		} else {
1241 			int	error;
1242 
1243 			announce_buf[0] = '\0';
1244 
1245 			/*
1246 			 * Retry any UNIT ATTENTION type errors.  They
1247 			 * are expected at boot.
1248 			 */
1249 			error = daerror(done_ccb, 0, SF_RETRY_UA |
1250 					SF_RETRY_SELTO | SF_NO_PRINT);
1251 			if (error == ERESTART) {
1252 				/*
1253 				 * A retry was scheuled, so
1254 				 * just return.
1255 				 */
1256 				return;
1257 			} else if (error != 0) {
1258 				struct scsi_sense_data *sense;
1259 				int asc, ascq;
1260 				int sense_key, error_code;
1261 				int have_sense;
1262 				cam_status status;
1263 				struct ccb_getdev cgd;
1264 
1265 				/* Don't wedge this device's queue */
1266 				cam_release_devq(done_ccb->ccb_h.path,
1267 						 /*relsim_flags*/0,
1268 						 /*reduction*/0,
1269 						 /*timeout*/0,
1270 						 /*getcount_only*/0);
1271 
1272 				status = done_ccb->ccb_h.status;
1273 
1274 				xpt_setup_ccb(&cgd.ccb_h,
1275 					      done_ccb->ccb_h.path,
1276 					      /* priority */ 1);
1277 				cgd.ccb_h.func_code = XPT_GDEV_TYPE;
1278 				xpt_action((union ccb *)&cgd);
1279 
1280 				if (((csio->ccb_h.flags & CAM_SENSE_PHYS) != 0)
1281 				 || ((csio->ccb_h.flags & CAM_SENSE_PTR) != 0)
1282 				 || ((status & CAM_AUTOSNS_VALID) == 0))
1283 					have_sense = FALSE;
1284 				else
1285 					have_sense = TRUE;
1286 
1287 				if (have_sense) {
1288 					sense = &csio->sense_data;
1289 					scsi_extract_sense(sense, &error_code,
1290 							   &sense_key,
1291 							   &asc, &ascq);
1292 				}
1293 				/*
1294 				 * Attach to anything that claims to be a
1295 				 * direct access or optical disk device,
1296 				 * as long as it doesn't return a "Logical
1297 				 * unit not supported" (0x25) error.
1298 				 */
1299 				if ((have_sense) && (asc != 0x25)
1300 				 && (error_code == SSD_CURRENT_ERROR))
1301 					snprintf(announce_buf,
1302 					    sizeof(announce_buf),
1303 						"Attempt to query device "
1304 						"size failed: %s, %s",
1305 						scsi_sense_key_text[sense_key],
1306 						scsi_sense_desc(asc,ascq,
1307 								&cgd.inq_data));
1308 				else {
1309 					if (have_sense)
1310 						scsi_sense_print(
1311 							&done_ccb->csio);
1312 					else {
1313 						xpt_print_path(periph->path);
1314 						printf("got CAM status %#x\n",
1315 						       done_ccb->ccb_h.status);
1316 					}
1317 
1318 					xpt_print_path(periph->path);
1319 					printf("fatal error, failed"
1320 					       " to attach to device\n");
1321 
1322 					/*
1323 					 * Free up resources.
1324 					 */
1325 					cam_periph_invalidate(periph);
1326 				}
1327 			}
1328 		}
1329 		free(rdcap, M_TEMP);
1330 		if (announce_buf[0] != '\0')
1331 			xpt_announce_periph(periph, announce_buf);
1332 		softc->state = DA_STATE_NORMAL;
1333 		/*
1334 		 * Since our peripheral may be invalidated by an error
1335 		 * above or an external event, we must release our CCB
1336 		 * before releasing the probe lock on the peripheral.
1337 		 * The peripheral will only go away once the last lock
1338 		 * is removed, and we need it around for the CCB release
1339 		 * operation.
1340 		 */
1341 		xpt_release_ccb(done_ccb);
1342 		cam_periph_unlock(periph);
1343 		return;
1344 	}
1345 	case DA_CCB_WAITING:
1346 	{
1347 		/* Caller will release the CCB */
1348 		wakeup(&done_ccb->ccb_h.cbfcnp);
1349 		return;
1350 	}
1351 	case DA_CCB_DUMP:
1352 		/* No-op.  We're polling */
1353 		return;
1354 	default:
1355 		break;
1356 	}
1357 	xpt_release_ccb(done_ccb);
1358 }
1359 
1360 static int
1361 daerror(union ccb *ccb, u_int32_t cam_flags, u_int32_t sense_flags)
1362 {
1363 	struct da_softc	  *softc;
1364 	struct cam_periph *periph;
1365 
1366 	periph = xpt_path_periph(ccb->ccb_h.path);
1367 	softc = (struct da_softc *)periph->softc;
1368 
1369 	/*
1370 	 * XXX
1371 	 * Until we have a better way of doing pack validation,
1372 	 * don't treat UAs as errors.
1373 	 */
1374 	sense_flags |= SF_RETRY_UA;
1375 	return(cam_periph_error(ccb, cam_flags, sense_flags,
1376 				&softc->saved_ccb));
1377 }
1378 
1379 static void
1380 daprevent(struct cam_periph *periph, int action)
1381 {
1382 	struct	da_softc *softc;
1383 	union	ccb *ccb;
1384 	int	error;
1385 
1386 	softc = (struct da_softc *)periph->softc;
1387 
1388 	if (((action == PR_ALLOW)
1389 	  && (softc->flags & DA_FLAG_PACK_LOCKED) == 0)
1390 	 || ((action == PR_PREVENT)
1391 	  && (softc->flags & DA_FLAG_PACK_LOCKED) != 0)) {
1392 		return;
1393 	}
1394 
1395 	ccb = cam_periph_getccb(periph, /*priority*/1);
1396 
1397 	scsi_prevent(&ccb->csio,
1398 		     /*retries*/1,
1399 		     /*cbcfp*/dadone,
1400 		     MSG_SIMPLE_Q_TAG,
1401 		     action,
1402 		     SSD_FULL_SIZE,
1403 		     5000);
1404 
1405 	error = cam_periph_runccb(ccb, /*error_routine*/NULL, /*cam_flags*/0,
1406 				  /*sense_flags*/0, &softc->device_stats);
1407 
1408 	if (error == 0) {
1409 		if (action == PR_ALLOW)
1410 			softc->flags &= ~DA_FLAG_PACK_LOCKED;
1411 		else
1412 			softc->flags |= DA_FLAG_PACK_LOCKED;
1413 	}
1414 
1415 	xpt_release_ccb(ccb);
1416 }
1417 
1418 static void
1419 dasetgeom(struct cam_periph *periph, struct scsi_read_capacity_data * rdcap)
1420 {
1421 	struct ccb_calc_geometry ccg;
1422 	struct da_softc *softc;
1423 	struct disk_params *dp;
1424 
1425 	softc = (struct da_softc *)periph->softc;
1426 
1427 	dp = &softc->params;
1428 	dp->secsize = scsi_4btoul(rdcap->length);
1429 	dp->sectors = scsi_4btoul(rdcap->addr) + 1;
1430 	/*
1431 	 * Have the controller provide us with a geometry
1432 	 * for this disk.  The only time the geometry
1433 	 * matters is when we boot and the controller
1434 	 * is the only one knowledgeable enough to come
1435 	 * up with something that will make this a bootable
1436 	 * device.
1437 	 */
1438 	xpt_setup_ccb(&ccg.ccb_h, periph->path, /*priority*/1);
1439 	ccg.ccb_h.func_code = XPT_CALC_GEOMETRY;
1440 	ccg.block_size = dp->secsize;
1441 	ccg.volume_size = dp->sectors;
1442 	ccg.heads = 0;
1443 	ccg.secs_per_track = 0;
1444 	ccg.cylinders = 0;
1445 	xpt_action((union ccb*)&ccg);
1446 	dp->heads = ccg.heads;
1447 	dp->secs_per_track = ccg.secs_per_track;
1448 	dp->cylinders = ccg.cylinders;
1449 }
1450 
1451 static void
1452 dasendorderedtag(void *arg)
1453 {
1454 	struct da_softc *softc;
1455 	int s;
1456 
1457 	for (softc = SLIST_FIRST(&softc_list);
1458 	     softc != NULL;
1459 	     softc = SLIST_NEXT(softc, links)) {
1460 		s = splsoftcam();
1461 		if ((softc->ordered_tag_count == 0)
1462 		 && ((softc->flags & DA_FLAG_WENT_IDLE) == 0)) {
1463 			softc->flags |= DA_FLAG_NEED_OTAG;
1464 		}
1465 		if (softc->device_stats.busy_count > 0)
1466 			softc->flags &= ~DA_FLAG_WENT_IDLE;
1467 
1468 		softc->ordered_tag_count = 0;
1469 		splx(s);
1470 	}
1471 	/* Queue us up again */
1472 	timeout(dasendorderedtag, NULL,
1473 		(DA_DEFAULT_TIMEOUT * hz) / DA_ORDEREDTAG_INTERVAL);
1474 }
1475 
1476 /*
1477  * Step through all DA peripheral drivers, and if the device is still open,
1478  * sync the disk cache to physical media.
1479  */
1480 static void
1481 dashutdown(void * arg, int howto)
1482 {
1483 	struct cam_periph *periph;
1484 	struct da_softc *softc;
1485 
1486 	for (periph = TAILQ_FIRST(&dadriver.units); periph != NULL;
1487 	     periph = TAILQ_NEXT(periph, unit_links)) {
1488 		union ccb ccb;
1489 		softc = (struct da_softc *)periph->softc;
1490 
1491 		/*
1492 		 * We only sync the cache if the drive is still open, and
1493 		 * if the drive is capable of it..
1494 		 */
1495 		if (((softc->flags & DA_FLAG_OPEN) == 0)
1496 		 || (softc->quirks & DA_Q_NO_SYNC_CACHE))
1497 			continue;
1498 
1499 		xpt_setup_ccb(&ccb.ccb_h, periph->path, /*priority*/1);
1500 
1501 		ccb.ccb_h.ccb_state = DA_CCB_DUMP;
1502 		scsi_synchronize_cache(&ccb.csio,
1503 				       /*retries*/1,
1504 				       /*cbfcnp*/dadone,
1505 				       MSG_SIMPLE_Q_TAG,
1506 				       /*begin_lba*/0, /* whole disk */
1507 				       /*lb_count*/0,
1508 				       SSD_FULL_SIZE,
1509 				       5 * 60 * 1000);
1510 
1511 		xpt_polled_action(&ccb);
1512 
1513 		if ((ccb.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
1514 			if (((ccb.ccb_h.status & CAM_STATUS_MASK) ==
1515 			     CAM_SCSI_STATUS_ERROR)
1516 			 && (ccb.csio.scsi_status == SCSI_STATUS_CHECK_COND)){
1517 				int error_code, sense_key, asc, ascq;
1518 
1519 				scsi_extract_sense(&ccb.csio.sense_data,
1520 						   &error_code, &sense_key,
1521 						   &asc, &ascq);
1522 
1523 				if (sense_key != SSD_KEY_ILLEGAL_REQUEST)
1524 					scsi_sense_print(&ccb.csio);
1525 			} else {
1526 				xpt_print_path(periph->path);
1527 				printf("Synchronize cache failed, status "
1528 				       "== 0x%x, scsi status == 0x%x\n",
1529 				       ccb.ccb_h.status, ccb.csio.scsi_status);
1530 			}
1531 		}
1532 
1533 		if ((ccb.ccb_h.status & CAM_DEV_QFRZN) != 0)
1534 			cam_release_devq(ccb.ccb_h.path,
1535 					 /*relsim_flags*/0,
1536 					 /*reduction*/0,
1537 					 /*timeout*/0,
1538 					 /*getcount_only*/0);
1539 
1540 	}
1541 }
1542