xref: /freebsd/sys/cam/scsi/scsi_da.c (revision 4cf49a43559ed9fdad601bdcccd2c55963008675)
1 /*
2  * Implementation of SCSI Direct Access Peripheral driver for CAM.
3  *
4  * Copyright (c) 1997 Justin T. Gibbs.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions, and the following disclaimer,
12  *    without modification, immediately at the beginning of the file.
13  * 2. The name of the author may not be used to endorse or promote products
14  *    derived from this software without specific prior written permission.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
20  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  *
28  * $FreeBSD$
29  */
30 
31 #include "opt_hw_wdog.h"
32 
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/kernel.h>
36 #include <sys/buf.h>
37 #include <sys/devicestat.h>
38 #include <sys/conf.h>
39 #include <sys/disk.h>
40 #include <sys/eventhandler.h>
41 #include <sys/malloc.h>
42 #include <sys/cons.h>
43 
44 #include <machine/md_var.h>
45 
46 #include <vm/vm.h>
47 #include <vm/vm_prot.h>
48 #include <vm/pmap.h>
49 
50 #include <cam/cam.h>
51 #include <cam/cam_ccb.h>
52 #include <cam/cam_extend.h>
53 #include <cam/cam_periph.h>
54 #include <cam/cam_xpt_periph.h>
55 
56 #include <cam/scsi/scsi_message.h>
57 
58 typedef enum {
59 	DA_STATE_PROBE,
60 	DA_STATE_NORMAL
61 } da_state;
62 
63 typedef enum {
64 	DA_FLAG_PACK_INVALID	= 0x001,
65 	DA_FLAG_NEW_PACK	= 0x002,
66 	DA_FLAG_PACK_LOCKED	= 0x004,
67 	DA_FLAG_PACK_REMOVABLE	= 0x008,
68 	DA_FLAG_TAGGED_QUEUING	= 0x010,
69 	DA_FLAG_NEED_OTAG	= 0x020,
70 	DA_FLAG_WENT_IDLE	= 0x040,
71 	DA_FLAG_RETRY_UA	= 0x080,
72 	DA_FLAG_OPEN		= 0x100
73 } da_flags;
74 
75 typedef enum {
76 	DA_Q_NONE		= 0x00,
77 	DA_Q_NO_SYNC_CACHE	= 0x01,
78 	DA_Q_NO_6_BYTE		= 0x02
79 } da_quirks;
80 
81 typedef enum {
82 	DA_CCB_PROBE		= 0x01,
83 	DA_CCB_BUFFER_IO	= 0x02,
84 	DA_CCB_WAITING		= 0x03,
85 	DA_CCB_DUMP		= 0x04,
86 	DA_CCB_TYPE_MASK	= 0x0F,
87 	DA_CCB_RETRY_UA		= 0x10
88 } da_ccb_state;
89 
90 /* Offsets into our private area for storing information */
91 #define ccb_state	ppriv_field0
92 #define ccb_bp		ppriv_ptr1
93 
94 struct disk_params {
95 	u_int8_t  heads;
96 	u_int16_t cylinders;
97 	u_int8_t  secs_per_track;
98 	u_int32_t secsize;	/* Number of bytes/sector */
99 	u_int32_t sectors;	/* total number sectors */
100 };
101 
102 struct da_softc {
103 	struct	 buf_queue_head buf_queue;
104 	struct	 devstat device_stats;
105 	SLIST_ENTRY(da_softc) links;
106 	LIST_HEAD(, ccb_hdr) pending_ccbs;
107 	da_state state;
108 	da_flags flags;
109 	da_quirks quirks;
110 	int	 minimum_cmd_size;
111 	int	 ordered_tag_count;
112 	struct	 disk_params params;
113 	struct	 disk disk;
114 	union	 ccb saved_ccb;
115 };
116 
117 struct da_quirk_entry {
118 	struct scsi_inquiry_pattern inq_pat;
119 	da_quirks quirks;
120 };
121 
122 static struct da_quirk_entry da_quirk_table[] =
123 {
124 	{
125 		/*
126 		 * This particular Fujitsu drive doesn't like the
127 		 * synchronize cache command.
128 		 * Reported by: Tom Jackson <toj@gorilla.net>
129 		 */
130 		{T_DIRECT, SIP_MEDIA_FIXED, "FUJITSU", "M2954*", "*"},
131 		/*quirks*/ DA_Q_NO_SYNC_CACHE
132 
133 	},
134 	{
135 		/*
136 		 * This drive doesn't like the synchronize cache command
137 		 * either.  Reported by: Matthew Jacob <mjacob@feral.com>
138 		 * in NetBSD PR kern/6027, August 24, 1998.
139 		 */
140 		{T_DIRECT, SIP_MEDIA_FIXED, "MICROP", "2217*", "*"},
141 		/*quirks*/ DA_Q_NO_SYNC_CACHE
142 	},
143 	{
144 		/*
145 		 * This drive doesn't like the synchronize cache command
146 		 * either.  Reported by: Hellmuth Michaelis (hm@kts.org)
147 		 * (PR 8882).
148 		 */
149 		{T_DIRECT, SIP_MEDIA_FIXED, "MICROP", "2112*", "*"},
150 		/*quirks*/ DA_Q_NO_SYNC_CACHE
151 	},
152 	{
153 		/*
154 		 * Doesn't like the synchronize cache command.
155 		 * Reported by: Blaz Zupan <blaz@gold.amis.net>
156 		 */
157 		{T_DIRECT, SIP_MEDIA_FIXED, "NEC", "D3847*", "*"},
158 		/*quirks*/ DA_Q_NO_SYNC_CACHE
159 	},
160 	{
161 		/*
162 		 * Doesn't work correctly with 6 byte reads/writes.
163 		 * Returns illegal request, and points to byte 9 of the
164 		 * 6-byte CDB.
165 		 * Reported by:  Adam McDougall <bsdx@spawnet.com>
166 		 */
167 		{T_DIRECT, SIP_MEDIA_FIXED, "QUANTUM", "VIKING 4*", "*"},
168 		/*quirks*/ DA_Q_NO_6_BYTE
169 	},
170 	{
171 		/*
172 		 * See above.
173 		 */
174 		{T_DIRECT, SIP_MEDIA_FIXED, "QUANTUM", "VIKING 2*", "*"},
175 		/*quirks*/ DA_Q_NO_6_BYTE
176 	}
177 };
178 
179 static	d_open_t	daopen;
180 static	d_close_t	daclose;
181 static	d_strategy_t	dastrategy;
182 static	d_ioctl_t	daioctl;
183 static	d_dump_t	dadump;
184 static	periph_init_t	dainit;
185 static	void		daasync(void *callback_arg, u_int32_t code,
186 				struct cam_path *path, void *arg);
187 static	periph_ctor_t	daregister;
188 static	periph_dtor_t	dacleanup;
189 static	periph_start_t	dastart;
190 static	periph_oninv_t	daoninvalidate;
191 static	void		dadone(struct cam_periph *periph,
192 			       union ccb *done_ccb);
193 static  int		daerror(union ccb *ccb, u_int32_t cam_flags,
194 				u_int32_t sense_flags);
195 static void		daprevent(struct cam_periph *periph, int action);
196 static void		dasetgeom(struct cam_periph *periph,
197 				  struct scsi_read_capacity_data * rdcap);
198 static timeout_t	dasendorderedtag;
199 static void		dashutdown(void *arg, int howto);
200 
201 #ifndef DA_DEFAULT_TIMEOUT
202 #define DA_DEFAULT_TIMEOUT 60	/* Timeout in seconds */
203 #endif
204 
205 /*
206  * DA_ORDEREDTAG_INTERVAL determines how often, relative
207  * to the default timeout, we check to see whether an ordered
208  * tagged transaction is appropriate to prevent simple tag
209  * starvation.  Since we'd like to ensure that there is at least
210  * 1/2 of the timeout length left for a starved transaction to
211  * complete after we've sent an ordered tag, we must poll at least
212  * four times in every timeout period.  This takes care of the worst
213  * case where a starved transaction starts during an interval that
214  * meets the requirement "don't send an ordered tag" test so it takes
215  * us two intervals to determine that a tag must be sent.
216  */
217 #ifndef DA_ORDEREDTAG_INTERVAL
218 #define DA_ORDEREDTAG_INTERVAL 4
219 #endif
220 
221 static struct periph_driver dadriver =
222 {
223 	dainit, "da",
224 	TAILQ_HEAD_INITIALIZER(dadriver.units), /* generation */ 0
225 };
226 
227 DATA_SET(periphdriver_set, dadriver);
228 
229 #define DA_CDEV_MAJOR 13
230 #define DA_BDEV_MAJOR 4
231 
232 /* For 2.2-stable support */
233 #ifndef D_DISK
234 #define D_DISK 0
235 #endif
236 
237 static struct cdevsw da_cdevsw = {
238 	/* open */	daopen,
239 	/* close */	daclose,
240 	/* read */	physread,
241 	/* write */	physwrite,
242 	/* ioctl */	daioctl,
243 	/* poll */	nopoll,
244 	/* mmap */	nommap,
245 	/* strategy */	dastrategy,
246 	/* name */	"da",
247 	/* maj */	DA_CDEV_MAJOR,
248 	/* dump */	dadump,
249 	/* psize */	nopsize,
250 	/* flags */	D_DISK,
251 	/* bmaj */	DA_BDEV_MAJOR
252 };
253 
254 static struct cdevsw dadisk_cdevsw;
255 
256 static SLIST_HEAD(,da_softc) softc_list;
257 static struct extend_array *daperiphs;
258 
259 static int
260 daopen(dev_t dev, int flags, int fmt, struct proc *p)
261 {
262 	struct cam_periph *periph;
263 	struct da_softc *softc;
264 	struct disklabel *label;
265 	int unit;
266 	int part;
267 	int error;
268 	int s;
269 
270 	unit = dkunit(dev);
271 	part = dkpart(dev);
272 	periph = cam_extend_get(daperiphs, unit);
273 	if (periph == NULL)
274 		return (ENXIO);
275 
276 	softc = (struct da_softc *)periph->softc;
277 
278 	CAM_DEBUG(periph->path, CAM_DEBUG_TRACE,
279 	    ("daopen: dev=%s (unit %d , partition %d)\n", devtoname(dev),
280 	     unit, part));
281 
282 	if ((error = cam_periph_lock(periph, PRIBIO|PCATCH)) != 0) {
283 		return (error); /* error code from tsleep */
284 	}
285 
286 	if (cam_periph_acquire(periph) != CAM_REQ_CMP)
287 		return(ENXIO);
288 	softc->flags |= DA_FLAG_OPEN;
289 
290 	s = splsoftcam();
291 	if ((softc->flags & DA_FLAG_PACK_INVALID) != 0) {
292 		/* Invalidate our pack information. */
293 		disk_invalidate(&softc->disk);
294 		softc->flags &= ~DA_FLAG_PACK_INVALID;
295 	}
296 	splx(s);
297 
298 	/* Do a read capacity */
299 	{
300 		struct scsi_read_capacity_data *rcap;
301 		union  ccb *ccb;
302 
303 		rcap = (struct scsi_read_capacity_data *)malloc(sizeof(*rcap),
304 								M_TEMP,
305 								M_WAITOK);
306 
307 		ccb = cam_periph_getccb(periph, /*priority*/1);
308 		scsi_read_capacity(&ccb->csio,
309 				   /*retries*/1,
310 				   /*cbfncp*/dadone,
311 				   MSG_SIMPLE_Q_TAG,
312 				   rcap,
313 				   SSD_FULL_SIZE,
314 				   /*timeout*/60000);
315 		ccb->ccb_h.ccb_bp = NULL;
316 
317 		error = cam_periph_runccb(ccb, daerror, /*cam_flags*/0,
318 					  /*sense_flags*/SF_RETRY_UA |
319 							 SF_RETRY_SELTO,
320 					  &softc->device_stats);
321 
322 		xpt_release_ccb(ccb);
323 
324 		if (error == 0) {
325 			dasetgeom(periph, rcap);
326 		}
327 
328 		free(rcap, M_TEMP);
329 	}
330 
331 	if (error == 0) {
332 		struct ccb_getdev cgd;
333 
334 		/* Build label for whole disk. */
335 		label = &softc->disk.d_label;
336 		bzero(label, sizeof(*label));
337 		label->d_type = DTYPE_SCSI;
338 
339 		/*
340 		 * Grab the inquiry data to get the vendor and product names.
341 		 * Put them in the typename and packname for the label.
342 		 */
343 		xpt_setup_ccb(&cgd.ccb_h, periph->path, /*priority*/ 1);
344 		cgd.ccb_h.func_code = XPT_GDEV_TYPE;
345 		xpt_action((union ccb *)&cgd);
346 
347 		strncpy(label->d_typename, cgd.inq_data.vendor,
348 			min(SID_VENDOR_SIZE, sizeof(label->d_typename)));
349 		strncpy(label->d_packname, cgd.inq_data.product,
350 			min(SID_PRODUCT_SIZE, sizeof(label->d_packname)));
351 
352 		label->d_secsize = softc->params.secsize;
353 		label->d_nsectors = softc->params.secs_per_track;
354 		label->d_ntracks = softc->params.heads;
355 		label->d_ncylinders = softc->params.cylinders;
356 		label->d_secpercyl = softc->params.heads
357 				  * softc->params.secs_per_track;
358 		label->d_secperunit = softc->params.sectors;
359 
360 		if (((softc->flags & DA_FLAG_PACK_REMOVABLE) != 0)) {
361 			daprevent(periph, PR_PREVENT);
362 		}
363 
364 		/*
365 		 * Check to see whether or not the blocksize is set yet.
366 		 * If it isn't, set it and then clear the blocksize
367 		 * unavailable flag for the device statistics.
368 		 */
369 		if ((softc->device_stats.flags & DEVSTAT_BS_UNAVAILABLE) != 0){
370 			softc->device_stats.block_size = softc->params.secsize;
371 			softc->device_stats.flags &= ~DEVSTAT_BS_UNAVAILABLE;
372 		}
373 	}
374 
375 	if (error != 0) {
376 		if ((softc->flags & DA_FLAG_PACK_REMOVABLE) != 0) {
377 			daprevent(periph, PR_ALLOW);
378 		}
379 	}
380 	cam_periph_unlock(periph);
381 	return (error);
382 }
383 
384 static int
385 daclose(dev_t dev, int flag, int fmt, struct proc *p)
386 {
387 	struct	cam_periph *periph;
388 	struct	da_softc *softc;
389 	int	unit;
390 	int	error;
391 
392 	unit = dkunit(dev);
393 	periph = cam_extend_get(daperiphs, unit);
394 	if (periph == NULL)
395 		return (ENXIO);
396 
397 	softc = (struct da_softc *)periph->softc;
398 
399 	if ((error = cam_periph_lock(periph, PRIBIO)) != 0) {
400 		return (error); /* error code from tsleep */
401 	}
402 
403 	if ((softc->quirks & DA_Q_NO_SYNC_CACHE) == 0) {
404 		union	ccb *ccb;
405 
406 		ccb = cam_periph_getccb(periph, /*priority*/1);
407 
408 		scsi_synchronize_cache(&ccb->csio,
409 				       /*retries*/1,
410 				       /*cbfcnp*/dadone,
411 				       MSG_SIMPLE_Q_TAG,
412 				       /*begin_lba*/0,/* Cover the whole disk */
413 				       /*lb_count*/0,
414 				       SSD_FULL_SIZE,
415 				       5 * 60 * 1000);
416 
417 		cam_periph_runccb(ccb, /*error_routine*/NULL, /*cam_flags*/0,
418 				  /*sense_flags*/SF_RETRY_UA,
419 				  &softc->device_stats);
420 
421 		if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
422 			if ((ccb->ccb_h.status & CAM_STATUS_MASK) ==
423 			     CAM_SCSI_STATUS_ERROR) {
424 				int asc, ascq;
425 				int sense_key, error_code;
426 
427 				scsi_extract_sense(&ccb->csio.sense_data,
428 						   &error_code,
429 						   &sense_key,
430 						   &asc, &ascq);
431 				if (sense_key != SSD_KEY_ILLEGAL_REQUEST)
432 					scsi_sense_print(&ccb->csio);
433 			} else {
434 				xpt_print_path(periph->path);
435 				printf("Synchronize cache failed, status "
436 				       "== 0x%x, scsi status == 0x%x\n",
437 				       ccb->csio.ccb_h.status,
438 				       ccb->csio.scsi_status);
439 			}
440 		}
441 
442 		if ((ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
443 			cam_release_devq(ccb->ccb_h.path,
444 					 /*relsim_flags*/0,
445 					 /*reduction*/0,
446 					 /*timeout*/0,
447 					 /*getcount_only*/0);
448 
449 		xpt_release_ccb(ccb);
450 
451 	}
452 
453 	if ((softc->flags & DA_FLAG_PACK_REMOVABLE) != 0) {
454 		daprevent(periph, PR_ALLOW);
455 		/*
456 		 * If we've got removeable media, mark the blocksize as
457 		 * unavailable, since it could change when new media is
458 		 * inserted.
459 		 */
460 		softc->device_stats.flags |= DEVSTAT_BS_UNAVAILABLE;
461 	}
462 
463 	softc->flags &= ~DA_FLAG_OPEN;
464 	cam_periph_unlock(periph);
465 	cam_periph_release(periph);
466 	return (0);
467 }
468 
469 /*
470  * Actually translate the requested transfer into one the physical driver
471  * can understand.  The transfer is described by a buf and will include
472  * only one physical transfer.
473  */
474 static void
475 dastrategy(struct buf *bp)
476 {
477 	struct cam_periph *periph;
478 	struct da_softc *softc;
479 	u_int  unit;
480 	u_int  part;
481 	int    s;
482 
483 	unit = dkunit(bp->b_dev);
484 	part = dkpart(bp->b_dev);
485 	periph = cam_extend_get(daperiphs, unit);
486 	if (periph == NULL) {
487 		bp->b_error = ENXIO;
488 		goto bad;
489 	}
490 	softc = (struct da_softc *)periph->softc;
491 #if 0
492 	/*
493 	 * check it's not too big a transfer for our adapter
494 	 */
495 	scsi_minphys(bp,&sd_switch);
496 #endif
497 
498 	/*
499 	 * Mask interrupts so that the pack cannot be invalidated until
500 	 * after we are in the queue.  Otherwise, we might not properly
501 	 * clean up one of the buffers.
502 	 */
503 	s = splbio();
504 
505 	/*
506 	 * If the device has been made invalid, error out
507 	 */
508 	if ((softc->flags & DA_FLAG_PACK_INVALID)) {
509 		splx(s);
510 		bp->b_error = ENXIO;
511 		goto bad;
512 	}
513 
514 	/*
515 	 * Place it in the queue of disk activities for this disk
516 	 */
517 	bufqdisksort(&softc->buf_queue, bp);
518 
519 	splx(s);
520 
521 	/*
522 	 * Schedule ourselves for performing the work.
523 	 */
524 	xpt_schedule(periph, /* XXX priority */1);
525 
526 	return;
527 bad:
528 	bp->b_flags |= B_ERROR;
529 
530 	/*
531 	 * Correctly set the buf to indicate a completed xfer
532 	 */
533 	bp->b_resid = bp->b_bcount;
534 	biodone(bp);
535 	return;
536 }
537 
538 /* For 2.2-stable support */
539 #ifndef ENOIOCTL
540 #define ENOIOCTL -1
541 #endif
542 
543 static int
544 daioctl(dev_t dev, u_long cmd, caddr_t addr, int flag, struct proc *p)
545 {
546 	struct cam_periph *periph;
547 	struct da_softc *softc;
548 	int unit;
549 	int error;
550 
551 	unit = dkunit(dev);
552 	periph = cam_extend_get(daperiphs, unit);
553 	if (periph == NULL)
554 		return (ENXIO);
555 
556 	softc = (struct da_softc *)periph->softc;
557 
558 	CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("daioctl\n"));
559 
560 	if ((error = cam_periph_lock(periph, PRIBIO|PCATCH)) != 0) {
561 		return (error); /* error code from tsleep */
562 	}
563 
564 	error = cam_periph_ioctl(periph, cmd, addr, daerror);
565 
566 	cam_periph_unlock(periph);
567 
568 	return (error);
569 }
570 
571 static int
572 dadump(dev_t dev)
573 {
574 	struct	    cam_periph *periph;
575 	struct	    da_softc *softc;
576 	u_int	    unit;
577 	u_int	    part;
578 	u_int	    secsize;
579 	u_int	    num;	/* number of sectors to write */
580 	u_int	    blknum;
581 	long	    blkcnt;
582 	vm_offset_t addr;
583 	struct	    ccb_scsiio csio;
584 	int	    error;
585 
586 	/* toss any characters present prior to dump */
587 	while (cncheckc() != -1)
588 		;
589 
590 	unit = dkunit(dev);
591 	part = dkpart(dev);
592 	periph = cam_extend_get(daperiphs, unit);
593 	if (periph == NULL) {
594 		return (ENXIO);
595 	}
596 	softc = (struct da_softc *)periph->softc;
597 
598 	if ((softc->flags & DA_FLAG_PACK_INVALID) != 0)
599 		return (ENXIO);
600 
601 	error = disk_dumpcheck(dev, &num, &blknum, &secsize);
602 	if (error)
603 		return (error);
604 
605 	addr = 0;	/* starting address */
606 	blkcnt = howmany(PAGE_SIZE, secsize);
607 
608 	while (num > 0) {
609 
610 		if (is_physical_memory(addr)) {
611 			pmap_enter(kernel_pmap, (vm_offset_t)CADDR1,
612 				   trunc_page(addr), VM_PROT_READ, TRUE);
613 		} else {
614 			pmap_enter(kernel_pmap, (vm_offset_t)CADDR1,
615 				   trunc_page(0), VM_PROT_READ, TRUE);
616 		}
617 
618 		xpt_setup_ccb(&csio.ccb_h, periph->path, /*priority*/1);
619 		csio.ccb_h.ccb_state = DA_CCB_DUMP;
620 		scsi_read_write(&csio,
621 				/*retries*/1,
622 				dadone,
623 				MSG_ORDERED_Q_TAG,
624 				/*read*/FALSE,
625 				/*byte2*/0,
626 				/*minimum_cmd_size*/ softc->minimum_cmd_size,
627 				blknum,
628 				blkcnt,
629 				/*data_ptr*/CADDR1,
630 				/*dxfer_len*/blkcnt * secsize,
631 				/*sense_len*/SSD_FULL_SIZE,
632 				DA_DEFAULT_TIMEOUT * 1000);
633 		xpt_polled_action((union ccb *)&csio);
634 
635 		if ((csio.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
636 			printf("Aborting dump due to I/O error.\n");
637 			if ((csio.ccb_h.status & CAM_STATUS_MASK) ==
638 			     CAM_SCSI_STATUS_ERROR)
639 				scsi_sense_print(&csio);
640 			else
641 				printf("status == 0x%x, scsi status == 0x%x\n",
642 				       csio.ccb_h.status, csio.scsi_status);
643 			return(EIO);
644 		}
645 
646 		if (addr % (1024 * 1024) == 0) {
647 #ifdef	HW_WDOG
648 			if (wdog_tickler)
649 				(*wdog_tickler)();
650 #endif /* HW_WDOG */
651 			/* Count in MB of data left to write */
652 			printf("%d ", (num  * softc->params.secsize)
653 				     / (1024 * 1024));
654 		}
655 
656 		/* update block count */
657 		num -= blkcnt;
658 		blknum += blkcnt;
659 		addr += PAGE_SIZE;
660 
661 		/* operator aborting dump? */
662 		if (cncheckc() != -1)
663 			return (EINTR);
664 	}
665 
666 	/*
667 	 * Sync the disk cache contents to the physical media.
668 	 */
669 	if ((softc->quirks & DA_Q_NO_SYNC_CACHE) == 0) {
670 
671 		xpt_setup_ccb(&csio.ccb_h, periph->path, /*priority*/1);
672 		csio.ccb_h.ccb_state = DA_CCB_DUMP;
673 		scsi_synchronize_cache(&csio,
674 				       /*retries*/1,
675 				       /*cbfcnp*/dadone,
676 				       MSG_SIMPLE_Q_TAG,
677 				       /*begin_lba*/0,/* Cover the whole disk */
678 				       /*lb_count*/0,
679 				       SSD_FULL_SIZE,
680 				       5 * 60 * 1000);
681 		xpt_polled_action((union ccb *)&csio);
682 
683 		if ((csio.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
684 			if ((csio.ccb_h.status & CAM_STATUS_MASK) ==
685 			     CAM_SCSI_STATUS_ERROR) {
686 				int asc, ascq;
687 				int sense_key, error_code;
688 
689 				scsi_extract_sense(&csio.sense_data,
690 						   &error_code,
691 						   &sense_key,
692 						   &asc, &ascq);
693 				if (sense_key != SSD_KEY_ILLEGAL_REQUEST)
694 					scsi_sense_print(&csio);
695 			} else {
696 				xpt_print_path(periph->path);
697 				printf("Synchronize cache failed, status "
698 				       "== 0x%x, scsi status == 0x%x\n",
699 				       csio.ccb_h.status, csio.scsi_status);
700 			}
701 		}
702 	}
703 	return (0);
704 }
705 
706 static void
707 dainit(void)
708 {
709 	cam_status status;
710 	struct cam_path *path;
711 
712 	/*
713 	 * Create our extend array for storing the devices we attach to.
714 	 */
715 	daperiphs = cam_extend_new();
716 	SLIST_INIT(&softc_list);
717 	if (daperiphs == NULL) {
718 		printf("da: Failed to alloc extend array!\n");
719 		return;
720 	}
721 
722 	/*
723 	 * Install a global async callback.  This callback will
724 	 * receive async callbacks like "new device found".
725 	 */
726 	status = xpt_create_path(&path, /*periph*/NULL, CAM_XPT_PATH_ID,
727 				 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
728 
729 	if (status == CAM_REQ_CMP) {
730 		struct ccb_setasync csa;
731 
732                 xpt_setup_ccb(&csa.ccb_h, path, /*priority*/5);
733                 csa.ccb_h.func_code = XPT_SASYNC_CB;
734                 csa.event_enable = AC_FOUND_DEVICE;
735                 csa.callback = daasync;
736                 csa.callback_arg = NULL;
737                 xpt_action((union ccb *)&csa);
738 		status = csa.ccb_h.status;
739                 xpt_free_path(path);
740         }
741 
742 	if (status != CAM_REQ_CMP) {
743 		printf("da: Failed to attach master async callback "
744 		       "due to status 0x%x!\n", status);
745 	} else {
746 
747 		/*
748 		 * Schedule a periodic event to occasioanly send an
749 		 * ordered tag to a device.
750 		 */
751 		timeout(dasendorderedtag, NULL,
752 			(DA_DEFAULT_TIMEOUT * hz) / DA_ORDEREDTAG_INTERVAL);
753 
754 		/* Register our shutdown event handler */
755 		if ((EVENTHANDLER_REGISTER(shutdown_post_sync, dashutdown,
756 					   NULL, SHUTDOWN_PRI_DEFAULT)) == NULL)
757 		    printf("dainit: shutdown event registration failed!\n");
758 	}
759 }
760 
761 static void
762 daoninvalidate(struct cam_periph *periph)
763 {
764 	int s;
765 	struct da_softc *softc;
766 	struct buf *q_bp;
767 	struct ccb_setasync csa;
768 
769 	softc = (struct da_softc *)periph->softc;
770 
771 	/*
772 	 * De-register any async callbacks.
773 	 */
774 	xpt_setup_ccb(&csa.ccb_h, periph->path,
775 		      /* priority */ 5);
776 	csa.ccb_h.func_code = XPT_SASYNC_CB;
777 	csa.event_enable = 0;
778 	csa.callback = daasync;
779 	csa.callback_arg = periph;
780 	xpt_action((union ccb *)&csa);
781 
782 	softc->flags |= DA_FLAG_PACK_INVALID;
783 
784 	/*
785 	 * Although the oninvalidate() routines are always called at
786 	 * splsoftcam, we need to be at splbio() here to keep the buffer
787 	 * queue from being modified while we traverse it.
788 	 */
789 	s = splbio();
790 
791 	/*
792 	 * Return all queued I/O with ENXIO.
793 	 * XXX Handle any transactions queued to the card
794 	 *     with XPT_ABORT_CCB.
795 	 */
796 	while ((q_bp = bufq_first(&softc->buf_queue)) != NULL){
797 		bufq_remove(&softc->buf_queue, q_bp);
798 		q_bp->b_resid = q_bp->b_bcount;
799 		q_bp->b_error = ENXIO;
800 		q_bp->b_flags |= B_ERROR;
801 		biodone(q_bp);
802 	}
803 	splx(s);
804 
805 	SLIST_REMOVE(&softc_list, softc, da_softc, links);
806 
807 	xpt_print_path(periph->path);
808 	printf("lost device\n");
809 }
810 
811 static void
812 dacleanup(struct cam_periph *periph)
813 {
814 	struct da_softc *softc;
815 
816 	softc = (struct da_softc *)periph->softc;
817 
818 	devstat_remove_entry(&softc->device_stats);
819 	cam_extend_release(daperiphs, periph->unit_number);
820 	xpt_print_path(periph->path);
821 	printf("removing device entry\n");
822 	free(softc, M_DEVBUF);
823 }
824 
825 static void
826 daasync(void *callback_arg, u_int32_t code,
827 	struct cam_path *path, void *arg)
828 {
829 	struct cam_periph *periph;
830 
831 	periph = (struct cam_periph *)callback_arg;
832 	switch (code) {
833 	case AC_FOUND_DEVICE:
834 	{
835 		struct ccb_getdev *cgd;
836 		cam_status status;
837 
838 		cgd = (struct ccb_getdev *)arg;
839 
840 		if ((cgd->pd_type != T_DIRECT) && (cgd->pd_type != T_OPTICAL))
841 			break;
842 
843 		/*
844 		 * Allocate a peripheral instance for
845 		 * this device and start the probe
846 		 * process.
847 		 */
848 		status = cam_periph_alloc(daregister, daoninvalidate,
849 					  dacleanup, dastart,
850 					  "da", CAM_PERIPH_BIO,
851 					  cgd->ccb_h.path, daasync,
852 					  AC_FOUND_DEVICE, cgd);
853 
854 		if (status != CAM_REQ_CMP
855 		 && status != CAM_REQ_INPROG)
856 			printf("daasync: Unable to attach to new device "
857 				"due to status 0x%x\n", status);
858 		break;
859 	}
860 	case AC_SENT_BDR:
861 	case AC_BUS_RESET:
862 	{
863 		struct da_softc *softc;
864 		struct ccb_hdr *ccbh;
865 		int s;
866 
867 		softc = (struct da_softc *)periph->softc;
868 		s = splsoftcam();
869 		/*
870 		 * Don't fail on the expected unit attention
871 		 * that will occur.
872 		 */
873 		softc->flags |= DA_FLAG_RETRY_UA;
874 		for (ccbh = LIST_FIRST(&softc->pending_ccbs);
875 		     ccbh != NULL; ccbh = LIST_NEXT(ccbh, periph_links.le))
876 			ccbh->ccb_state |= DA_CCB_RETRY_UA;
877 		splx(s);
878 		/* FALLTHROUGH*/
879 	}
880 	default:
881 		cam_periph_async(periph, code, path, arg);
882 		break;
883 	}
884 }
885 
886 static cam_status
887 daregister(struct cam_periph *periph, void *arg)
888 {
889 	int s;
890 	struct da_softc *softc;
891 	struct ccb_setasync csa;
892 	struct ccb_getdev *cgd;
893 	caddr_t match;
894 
895 	cgd = (struct ccb_getdev *)arg;
896 	if (periph == NULL) {
897 		printf("daregister: periph was NULL!!\n");
898 		return(CAM_REQ_CMP_ERR);
899 	}
900 
901 	if (cgd == NULL) {
902 		printf("daregister: no getdev CCB, can't register device\n");
903 		return(CAM_REQ_CMP_ERR);
904 	}
905 
906 	softc = (struct da_softc *)malloc(sizeof(*softc),M_DEVBUF,M_NOWAIT);
907 
908 	if (softc == NULL) {
909 		printf("daregister: Unable to probe new device. "
910 		       "Unable to allocate softc\n");
911 		return(CAM_REQ_CMP_ERR);
912 	}
913 
914 	bzero(softc, sizeof(*softc));
915 	LIST_INIT(&softc->pending_ccbs);
916 	softc->state = DA_STATE_PROBE;
917 	bufq_init(&softc->buf_queue);
918 	if (SID_IS_REMOVABLE(&cgd->inq_data))
919 		softc->flags |= DA_FLAG_PACK_REMOVABLE;
920 	if ((cgd->inq_data.flags & SID_CmdQue) != 0)
921 		softc->flags |= DA_FLAG_TAGGED_QUEUING;
922 
923 	periph->softc = softc;
924 
925 	cam_extend_set(daperiphs, periph->unit_number, periph);
926 
927 	/*
928 	 * See if this device has any quirks.
929 	 */
930 	match = cam_quirkmatch((caddr_t)&cgd->inq_data,
931 			       (caddr_t)da_quirk_table,
932 			       sizeof(da_quirk_table)/sizeof(*da_quirk_table),
933 			       sizeof(*da_quirk_table), scsi_inquiry_match);
934 
935 	if (match != NULL)
936 		softc->quirks = ((struct da_quirk_entry *)match)->quirks;
937 	else
938 		softc->quirks = DA_Q_NONE;
939 
940 	if (softc->quirks & DA_Q_NO_6_BYTE)
941 		softc->minimum_cmd_size = 10;
942 	else
943 		softc->minimum_cmd_size = 6;
944 
945 	/*
946 	 * Block our timeout handler while we
947 	 * add this softc to the dev list.
948 	 */
949 	s = splsoftclock();
950 	SLIST_INSERT_HEAD(&softc_list, softc, links);
951 	splx(s);
952 
953 	/*
954 	 * The DA driver supports a blocksize, but
955 	 * we don't know the blocksize until we do
956 	 * a read capacity.  So, set a flag to
957 	 * indicate that the blocksize is
958 	 * unavailable right now.  We'll clear the
959 	 * flag as soon as we've done a read capacity.
960 	 */
961 	devstat_add_entry(&softc->device_stats, "da",
962 			  periph->unit_number, 0,
963 	  		  DEVSTAT_BS_UNAVAILABLE,
964 			  cgd->pd_type | DEVSTAT_TYPE_IF_SCSI,
965 			  DEVSTAT_PRIORITY_DA);
966 
967 	/*
968 	 * Register this media as a disk
969 	 */
970 	disk_create(periph->unit_number, &softc->disk, 0,
971 	    &da_cdevsw, &dadisk_cdevsw);
972 
973 	/*
974 	 * Add async callbacks for bus reset and
975 	 * bus device reset calls.  I don't bother
976 	 * checking if this fails as, in most cases,
977 	 * the system will function just fine without
978 	 * them and the only alternative would be to
979 	 * not attach the device on failure.
980 	 */
981 	xpt_setup_ccb(&csa.ccb_h, periph->path, /*priority*/5);
982 	csa.ccb_h.func_code = XPT_SASYNC_CB;
983 	csa.event_enable = AC_SENT_BDR | AC_BUS_RESET | AC_LOST_DEVICE;
984 	csa.callback = daasync;
985 	csa.callback_arg = periph;
986 	xpt_action((union ccb *)&csa);
987 	/*
988 	 * Lock this peripheral until we are setup.
989 	 * This first call can't block
990 	 */
991 	(void)cam_periph_lock(periph, PRIBIO);
992 	xpt_schedule(periph, /*priority*/5);
993 
994 	return(CAM_REQ_CMP);
995 }
996 
997 static void
998 dastart(struct cam_periph *periph, union ccb *start_ccb)
999 {
1000 	struct da_softc *softc;
1001 
1002 	softc = (struct da_softc *)periph->softc;
1003 
1004 
1005 	switch (softc->state) {
1006 	case DA_STATE_NORMAL:
1007 	{
1008 		/* Pull a buffer from the queue and get going on it */
1009 		struct buf *bp;
1010 		int s;
1011 
1012 		/*
1013 		 * See if there is a buf with work for us to do..
1014 		 */
1015 		s = splbio();
1016 		bp = bufq_first(&softc->buf_queue);
1017 		if (periph->immediate_priority <= periph->pinfo.priority) {
1018 			CAM_DEBUG_PRINT(CAM_DEBUG_SUBTRACE,
1019 					("queuing for immediate ccb\n"));
1020 			start_ccb->ccb_h.ccb_state = DA_CCB_WAITING;
1021 			SLIST_INSERT_HEAD(&periph->ccb_list, &start_ccb->ccb_h,
1022 					  periph_links.sle);
1023 			periph->immediate_priority = CAM_PRIORITY_NONE;
1024 			splx(s);
1025 			wakeup(&periph->ccb_list);
1026 		} else if (bp == NULL) {
1027 			splx(s);
1028 			xpt_release_ccb(start_ccb);
1029 		} else {
1030 			int oldspl;
1031 			u_int8_t tag_code;
1032 
1033 			bufq_remove(&softc->buf_queue, bp);
1034 
1035 			devstat_start_transaction(&softc->device_stats);
1036 
1037 			if ((bp->b_flags & B_ORDERED) != 0
1038 			 || (softc->flags & DA_FLAG_NEED_OTAG) != 0) {
1039 				softc->flags &= ~DA_FLAG_NEED_OTAG;
1040 				softc->ordered_tag_count++;
1041 				tag_code = MSG_ORDERED_Q_TAG;
1042 			} else {
1043 				tag_code = MSG_SIMPLE_Q_TAG;
1044 			}
1045 			scsi_read_write(&start_ccb->csio,
1046 					/*retries*/4,
1047 					dadone,
1048 					tag_code,
1049 					bp->b_flags & B_READ,
1050 					/*byte2*/0,
1051 					softc->minimum_cmd_size,
1052 					bp->b_pblkno,
1053 					bp->b_bcount / softc->params.secsize,
1054 					bp->b_data,
1055 					bp->b_bcount,
1056 					/*sense_len*/SSD_FULL_SIZE,
1057 					DA_DEFAULT_TIMEOUT * 1000);
1058 			start_ccb->ccb_h.ccb_state = DA_CCB_BUFFER_IO;
1059 
1060 			/*
1061 			 * Block out any asyncronous callbacks
1062 			 * while we touch the pending ccb list.
1063 			 */
1064 			oldspl = splcam();
1065 			LIST_INSERT_HEAD(&softc->pending_ccbs,
1066 					 &start_ccb->ccb_h, periph_links.le);
1067 			splx(oldspl);
1068 
1069 			/* We expect a unit attention from this device */
1070 			if ((softc->flags & DA_FLAG_RETRY_UA) != 0) {
1071 				start_ccb->ccb_h.ccb_state |= DA_CCB_RETRY_UA;
1072 				softc->flags &= ~DA_FLAG_RETRY_UA;
1073 			}
1074 
1075 			start_ccb->ccb_h.ccb_bp = bp;
1076 			bp = bufq_first(&softc->buf_queue);
1077 			splx(s);
1078 
1079 			xpt_action(start_ccb);
1080 		}
1081 
1082 		if (bp != NULL) {
1083 			/* Have more work to do, so ensure we stay scheduled */
1084 			xpt_schedule(periph, /* XXX priority */1);
1085 		}
1086 		break;
1087 	}
1088 	case DA_STATE_PROBE:
1089 	{
1090 		struct ccb_scsiio *csio;
1091 		struct scsi_read_capacity_data *rcap;
1092 
1093 		rcap = (struct scsi_read_capacity_data *)malloc(sizeof(*rcap),
1094 								M_TEMP,
1095 								M_NOWAIT);
1096 		if (rcap == NULL) {
1097 			printf("dastart: Couldn't malloc read_capacity data\n");
1098 			/* da_free_periph??? */
1099 			break;
1100 		}
1101 		csio = &start_ccb->csio;
1102 		scsi_read_capacity(csio,
1103 				   /*retries*/4,
1104 				   dadone,
1105 				   MSG_SIMPLE_Q_TAG,
1106 				   rcap,
1107 				   SSD_FULL_SIZE,
1108 				   /*timeout*/5000);
1109 		start_ccb->ccb_h.ccb_bp = NULL;
1110 		start_ccb->ccb_h.ccb_state = DA_CCB_PROBE;
1111 		xpt_action(start_ccb);
1112 		break;
1113 	}
1114 	}
1115 }
1116 
1117 
1118 static void
1119 dadone(struct cam_periph *periph, union ccb *done_ccb)
1120 {
1121 	struct da_softc *softc;
1122 	struct ccb_scsiio *csio;
1123 
1124 	softc = (struct da_softc *)periph->softc;
1125 	csio = &done_ccb->csio;
1126 	switch (csio->ccb_h.ccb_state & DA_CCB_TYPE_MASK) {
1127 	case DA_CCB_BUFFER_IO:
1128 	{
1129 		struct buf *bp;
1130 		int    oldspl;
1131 
1132 		bp = (struct buf *)done_ccb->ccb_h.ccb_bp;
1133 		if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
1134 			int error;
1135 			int s;
1136 			int sf;
1137 
1138 			if ((csio->ccb_h.ccb_state & DA_CCB_RETRY_UA) != 0)
1139 				sf = SF_RETRY_UA;
1140 			else
1141 				sf = 0;
1142 
1143 			/* Retry selection timeouts */
1144 			sf |= SF_RETRY_SELTO;
1145 
1146 			if ((error = daerror(done_ccb, 0, sf)) == ERESTART) {
1147 				/*
1148 				 * A retry was scheuled, so
1149 				 * just return.
1150 				 */
1151 				return;
1152 			}
1153 			if (error != 0) {
1154 				struct buf *q_bp;
1155 
1156 				s = splbio();
1157 
1158 				if (error == ENXIO) {
1159 					/*
1160 					 * Catastrophic error.  Mark our pack as
1161 					 * invalid.
1162 					 */
1163 					/* XXX See if this is really a media
1164 					 *     change first.
1165 					 */
1166 					xpt_print_path(periph->path);
1167 					printf("Invalidating pack\n");
1168 					softc->flags |= DA_FLAG_PACK_INVALID;
1169 				}
1170 
1171 				/*
1172 				 * return all queued I/O with EIO, so that
1173 				 * the client can retry these I/Os in the
1174 				 * proper order should it attempt to recover.
1175 				 */
1176 				while ((q_bp = bufq_first(&softc->buf_queue))
1177 					!= NULL) {
1178 					bufq_remove(&softc->buf_queue, q_bp);
1179 					q_bp->b_resid = q_bp->b_bcount;
1180 					q_bp->b_error = EIO;
1181 					q_bp->b_flags |= B_ERROR;
1182 					biodone(q_bp);
1183 				}
1184 				splx(s);
1185 				bp->b_error = error;
1186 				bp->b_resid = bp->b_bcount;
1187 				bp->b_flags |= B_ERROR;
1188 			} else {
1189 				bp->b_resid = csio->resid;
1190 				bp->b_error = 0;
1191 				if (bp->b_resid != 0) {
1192 					/* Short transfer ??? */
1193 					bp->b_flags |= B_ERROR;
1194 				}
1195 			}
1196 			if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
1197 				cam_release_devq(done_ccb->ccb_h.path,
1198 						 /*relsim_flags*/0,
1199 						 /*reduction*/0,
1200 						 /*timeout*/0,
1201 						 /*getcount_only*/0);
1202 		} else {
1203 			bp->b_resid = csio->resid;
1204 			if (csio->resid > 0)
1205 				bp->b_flags |= B_ERROR;
1206 		}
1207 
1208 		/*
1209 		 * Block out any asyncronous callbacks
1210 		 * while we touch the pending ccb list.
1211 		 */
1212 		oldspl = splcam();
1213 		LIST_REMOVE(&done_ccb->ccb_h, periph_links.le);
1214 		splx(oldspl);
1215 
1216 		if (softc->device_stats.busy_count == 0)
1217 			softc->flags |= DA_FLAG_WENT_IDLE;
1218 
1219 		devstat_end_transaction_buf(&softc->device_stats, bp);
1220 		biodone(bp);
1221 		break;
1222 	}
1223 	case DA_CCB_PROBE:
1224 	{
1225 		struct	   scsi_read_capacity_data *rdcap;
1226 		char	   announce_buf[80];
1227 
1228 		rdcap = (struct scsi_read_capacity_data *)csio->data_ptr;
1229 
1230 		if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
1231 			struct disk_params *dp;
1232 
1233 			dasetgeom(periph, rdcap);
1234 			dp = &softc->params;
1235 			snprintf(announce_buf, sizeof(announce_buf),
1236 			        "%luMB (%u %u byte sectors: %dH %dS/T %dC)",
1237 				(unsigned long) (((u_int64_t)dp->secsize *
1238 				dp->sectors) / (1024*1024)), dp->sectors,
1239 				dp->secsize, dp->heads, dp->secs_per_track,
1240 				dp->cylinders);
1241 		} else {
1242 			int	error;
1243 
1244 			announce_buf[0] = '\0';
1245 
1246 			/*
1247 			 * Retry any UNIT ATTENTION type errors.  They
1248 			 * are expected at boot.
1249 			 */
1250 			error = daerror(done_ccb, 0, SF_RETRY_UA |
1251 					SF_RETRY_SELTO | SF_NO_PRINT);
1252 			if (error == ERESTART) {
1253 				/*
1254 				 * A retry was scheuled, so
1255 				 * just return.
1256 				 */
1257 				return;
1258 			} else if (error != 0) {
1259 				struct scsi_sense_data *sense;
1260 				int asc, ascq;
1261 				int sense_key, error_code;
1262 				int have_sense;
1263 				cam_status status;
1264 				struct ccb_getdev cgd;
1265 
1266 				/* Don't wedge this device's queue */
1267 				cam_release_devq(done_ccb->ccb_h.path,
1268 						 /*relsim_flags*/0,
1269 						 /*reduction*/0,
1270 						 /*timeout*/0,
1271 						 /*getcount_only*/0);
1272 
1273 				status = done_ccb->ccb_h.status;
1274 
1275 				xpt_setup_ccb(&cgd.ccb_h,
1276 					      done_ccb->ccb_h.path,
1277 					      /* priority */ 1);
1278 				cgd.ccb_h.func_code = XPT_GDEV_TYPE;
1279 				xpt_action((union ccb *)&cgd);
1280 
1281 				if (((csio->ccb_h.flags & CAM_SENSE_PHYS) != 0)
1282 				 || ((csio->ccb_h.flags & CAM_SENSE_PTR) != 0)
1283 				 || ((status & CAM_AUTOSNS_VALID) == 0))
1284 					have_sense = FALSE;
1285 				else
1286 					have_sense = TRUE;
1287 
1288 				if (have_sense) {
1289 					sense = &csio->sense_data;
1290 					scsi_extract_sense(sense, &error_code,
1291 							   &sense_key,
1292 							   &asc, &ascq);
1293 				}
1294 				/*
1295 				 * Attach to anything that claims to be a
1296 				 * direct access or optical disk device,
1297 				 * as long as it doesn't return a "Logical
1298 				 * unit not supported" (0x25) error.
1299 				 */
1300 				if ((have_sense) && (asc != 0x25)
1301 				 && (error_code == SSD_CURRENT_ERROR))
1302 					snprintf(announce_buf,
1303 					    sizeof(announce_buf),
1304 						"Attempt to query device "
1305 						"size failed: %s, %s",
1306 						scsi_sense_key_text[sense_key],
1307 						scsi_sense_desc(asc,ascq,
1308 								&cgd.inq_data));
1309 				else {
1310 					if (have_sense)
1311 						scsi_sense_print(
1312 							&done_ccb->csio);
1313 					else {
1314 						xpt_print_path(periph->path);
1315 						printf("got CAM status %#x\n",
1316 						       done_ccb->ccb_h.status);
1317 					}
1318 
1319 					xpt_print_path(periph->path);
1320 					printf("fatal error, failed"
1321 					       " to attach to device\n");
1322 
1323 					/*
1324 					 * Free up resources.
1325 					 */
1326 					cam_periph_invalidate(periph);
1327 				}
1328 			}
1329 		}
1330 		free(rdcap, M_TEMP);
1331 		if (announce_buf[0] != '\0')
1332 			xpt_announce_periph(periph, announce_buf);
1333 		softc->state = DA_STATE_NORMAL;
1334 		/*
1335 		 * Since our peripheral may be invalidated by an error
1336 		 * above or an external event, we must release our CCB
1337 		 * before releasing the probe lock on the peripheral.
1338 		 * The peripheral will only go away once the last lock
1339 		 * is removed, and we need it around for the CCB release
1340 		 * operation.
1341 		 */
1342 		xpt_release_ccb(done_ccb);
1343 		cam_periph_unlock(periph);
1344 		return;
1345 	}
1346 	case DA_CCB_WAITING:
1347 	{
1348 		/* Caller will release the CCB */
1349 		wakeup(&done_ccb->ccb_h.cbfcnp);
1350 		return;
1351 	}
1352 	case DA_CCB_DUMP:
1353 		/* No-op.  We're polling */
1354 		return;
1355 	default:
1356 		break;
1357 	}
1358 	xpt_release_ccb(done_ccb);
1359 }
1360 
1361 static int
1362 daerror(union ccb *ccb, u_int32_t cam_flags, u_int32_t sense_flags)
1363 {
1364 	struct da_softc	  *softc;
1365 	struct cam_periph *periph;
1366 
1367 	periph = xpt_path_periph(ccb->ccb_h.path);
1368 	softc = (struct da_softc *)periph->softc;
1369 
1370 	/*
1371 	 * XXX
1372 	 * Until we have a better way of doing pack validation,
1373 	 * don't treat UAs as errors.
1374 	 */
1375 	sense_flags |= SF_RETRY_UA;
1376 	return(cam_periph_error(ccb, cam_flags, sense_flags,
1377 				&softc->saved_ccb));
1378 }
1379 
1380 static void
1381 daprevent(struct cam_periph *periph, int action)
1382 {
1383 	struct	da_softc *softc;
1384 	union	ccb *ccb;
1385 	int	error;
1386 
1387 	softc = (struct da_softc *)periph->softc;
1388 
1389 	if (((action == PR_ALLOW)
1390 	  && (softc->flags & DA_FLAG_PACK_LOCKED) == 0)
1391 	 || ((action == PR_PREVENT)
1392 	  && (softc->flags & DA_FLAG_PACK_LOCKED) != 0)) {
1393 		return;
1394 	}
1395 
1396 	ccb = cam_periph_getccb(periph, /*priority*/1);
1397 
1398 	scsi_prevent(&ccb->csio,
1399 		     /*retries*/1,
1400 		     /*cbcfp*/dadone,
1401 		     MSG_SIMPLE_Q_TAG,
1402 		     action,
1403 		     SSD_FULL_SIZE,
1404 		     5000);
1405 
1406 	error = cam_periph_runccb(ccb, /*error_routine*/NULL, /*cam_flags*/0,
1407 				  /*sense_flags*/0, &softc->device_stats);
1408 
1409 	if (error == 0) {
1410 		if (action == PR_ALLOW)
1411 			softc->flags &= ~DA_FLAG_PACK_LOCKED;
1412 		else
1413 			softc->flags |= DA_FLAG_PACK_LOCKED;
1414 	}
1415 
1416 	xpt_release_ccb(ccb);
1417 }
1418 
1419 static void
1420 dasetgeom(struct cam_periph *periph, struct scsi_read_capacity_data * rdcap)
1421 {
1422 	struct ccb_calc_geometry ccg;
1423 	struct da_softc *softc;
1424 	struct disk_params *dp;
1425 
1426 	softc = (struct da_softc *)periph->softc;
1427 
1428 	dp = &softc->params;
1429 	dp->secsize = scsi_4btoul(rdcap->length);
1430 	dp->sectors = scsi_4btoul(rdcap->addr) + 1;
1431 	/*
1432 	 * Have the controller provide us with a geometry
1433 	 * for this disk.  The only time the geometry
1434 	 * matters is when we boot and the controller
1435 	 * is the only one knowledgeable enough to come
1436 	 * up with something that will make this a bootable
1437 	 * device.
1438 	 */
1439 	xpt_setup_ccb(&ccg.ccb_h, periph->path, /*priority*/1);
1440 	ccg.ccb_h.func_code = XPT_CALC_GEOMETRY;
1441 	ccg.block_size = dp->secsize;
1442 	ccg.volume_size = dp->sectors;
1443 	ccg.heads = 0;
1444 	ccg.secs_per_track = 0;
1445 	ccg.cylinders = 0;
1446 	xpt_action((union ccb*)&ccg);
1447 	dp->heads = ccg.heads;
1448 	dp->secs_per_track = ccg.secs_per_track;
1449 	dp->cylinders = ccg.cylinders;
1450 }
1451 
1452 static void
1453 dasendorderedtag(void *arg)
1454 {
1455 	struct da_softc *softc;
1456 	int s;
1457 
1458 	for (softc = SLIST_FIRST(&softc_list);
1459 	     softc != NULL;
1460 	     softc = SLIST_NEXT(softc, links)) {
1461 		s = splsoftcam();
1462 		if ((softc->ordered_tag_count == 0)
1463 		 && ((softc->flags & DA_FLAG_WENT_IDLE) == 0)) {
1464 			softc->flags |= DA_FLAG_NEED_OTAG;
1465 		}
1466 		if (softc->device_stats.busy_count > 0)
1467 			softc->flags &= ~DA_FLAG_WENT_IDLE;
1468 
1469 		softc->ordered_tag_count = 0;
1470 		splx(s);
1471 	}
1472 	/* Queue us up again */
1473 	timeout(dasendorderedtag, NULL,
1474 		(DA_DEFAULT_TIMEOUT * hz) / DA_ORDEREDTAG_INTERVAL);
1475 }
1476 
1477 /*
1478  * Step through all DA peripheral drivers, and if the device is still open,
1479  * sync the disk cache to physical media.
1480  */
1481 static void
1482 dashutdown(void * arg, int howto)
1483 {
1484 	struct cam_periph *periph;
1485 	struct da_softc *softc;
1486 
1487 	for (periph = TAILQ_FIRST(&dadriver.units); periph != NULL;
1488 	     periph = TAILQ_NEXT(periph, unit_links)) {
1489 		union ccb ccb;
1490 		softc = (struct da_softc *)periph->softc;
1491 
1492 		/*
1493 		 * We only sync the cache if the drive is still open, and
1494 		 * if the drive is capable of it..
1495 		 */
1496 		if (((softc->flags & DA_FLAG_OPEN) == 0)
1497 		 || (softc->quirks & DA_Q_NO_SYNC_CACHE))
1498 			continue;
1499 
1500 		xpt_setup_ccb(&ccb.ccb_h, periph->path, /*priority*/1);
1501 
1502 		ccb.ccb_h.ccb_state = DA_CCB_DUMP;
1503 		scsi_synchronize_cache(&ccb.csio,
1504 				       /*retries*/1,
1505 				       /*cbfcnp*/dadone,
1506 				       MSG_SIMPLE_Q_TAG,
1507 				       /*begin_lba*/0, /* whole disk */
1508 				       /*lb_count*/0,
1509 				       SSD_FULL_SIZE,
1510 				       5 * 60 * 1000);
1511 
1512 		xpt_polled_action(&ccb);
1513 
1514 		if ((ccb.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
1515 			if (((ccb.ccb_h.status & CAM_STATUS_MASK) ==
1516 			     CAM_SCSI_STATUS_ERROR)
1517 			 && (ccb.csio.scsi_status == SCSI_STATUS_CHECK_COND)){
1518 				int error_code, sense_key, asc, ascq;
1519 
1520 				scsi_extract_sense(&ccb.csio.sense_data,
1521 						   &error_code, &sense_key,
1522 						   &asc, &ascq);
1523 
1524 				if (sense_key != SSD_KEY_ILLEGAL_REQUEST)
1525 					scsi_sense_print(&ccb.csio);
1526 			} else {
1527 				xpt_print_path(periph->path);
1528 				printf("Synchronize cache failed, status "
1529 				       "== 0x%x, scsi status == 0x%x\n",
1530 				       ccb.ccb_h.status, ccb.csio.scsi_status);
1531 			}
1532 		}
1533 
1534 		if ((ccb.ccb_h.status & CAM_DEV_QFRZN) != 0)
1535 			cam_release_devq(ccb.ccb_h.path,
1536 					 /*relsim_flags*/0,
1537 					 /*reduction*/0,
1538 					 /*timeout*/0,
1539 					 /*getcount_only*/0);
1540 
1541 	}
1542 }
1543