xref: /freebsd/sys/cam/scsi/scsi_da.c (revision 49ff4debd3d4c155448b2b4e3b95b17d9eb575ed)
1 /*
2  * Implementation of SCSI Direct Access Peripheral driver for CAM.
3  *
4  * Copyright (c) 1997 Justin T. Gibbs.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions, and the following disclaimer,
12  *    without modification, immediately at the beginning of the file.
13  * 2. The name of the author may not be used to endorse or promote products
14  *    derived from this software without specific prior written permission.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
20  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  *
28  *      $Id: scsi_da.c,v 1.31 1999/08/09 10:34:30 phk Exp $
29  */
30 
31 #include "opt_hw_wdog.h"
32 
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/kernel.h>
36 #include <sys/buf.h>
37 #include <sys/devicestat.h>
38 #include <sys/dkbad.h>
39 #include <sys/disklabel.h>
40 #include <sys/diskslice.h>
41 #include <sys/malloc.h>
42 #include <sys/conf.h>
43 #include <sys/cons.h>
44 
45 #include <machine/md_var.h>
46 
47 #include <vm/vm.h>
48 #include <vm/vm_prot.h>
49 #include <vm/pmap.h>
50 
51 #include <cam/cam.h>
52 #include <cam/cam_ccb.h>
53 #include <cam/cam_extend.h>
54 #include <cam/cam_periph.h>
55 #include <cam/cam_xpt_periph.h>
56 
57 #include <cam/scsi/scsi_message.h>
58 
59 typedef enum {
60 	DA_STATE_PROBE,
61 	DA_STATE_NORMAL
62 } da_state;
63 
64 typedef enum {
65 	DA_FLAG_PACK_INVALID	= 0x001,
66 	DA_FLAG_NEW_PACK	= 0x002,
67 	DA_FLAG_PACK_LOCKED	= 0x004,
68 	DA_FLAG_PACK_REMOVABLE	= 0x008,
69 	DA_FLAG_TAGGED_QUEUING	= 0x010,
70 	DA_FLAG_NEED_OTAG	= 0x020,
71 	DA_FLAG_WENT_IDLE	= 0x040,
72 	DA_FLAG_RETRY_UA	= 0x080,
73 	DA_FLAG_OPEN		= 0x100
74 } da_flags;
75 
76 typedef enum {
77 	DA_Q_NONE		= 0x00,
78 	DA_Q_NO_SYNC_CACHE	= 0x01,
79 	DA_Q_NO_6_BYTE		= 0x02
80 } da_quirks;
81 
82 typedef enum {
83 	DA_CCB_PROBE		= 0x01,
84 	DA_CCB_BUFFER_IO	= 0x02,
85 	DA_CCB_WAITING		= 0x03,
86 	DA_CCB_DUMP		= 0x04,
87 	DA_CCB_TYPE_MASK	= 0x0F,
88 	DA_CCB_RETRY_UA		= 0x10
89 } da_ccb_state;
90 
91 /* Offsets into our private area for storing information */
92 #define ccb_state	ppriv_field0
93 #define ccb_bp		ppriv_ptr1
94 
95 struct disk_params {
96 	u_int8_t  heads;
97 	u_int16_t cylinders;
98 	u_int8_t  secs_per_track;
99 	u_int32_t secsize;	/* Number of bytes/sector */
100 	u_int32_t sectors;	/* total number sectors */
101 };
102 
103 struct da_softc {
104 	struct	 buf_queue_head buf_queue;
105 	struct	 devstat device_stats;
106 	SLIST_ENTRY(da_softc) links;
107 	LIST_HEAD(, ccb_hdr) pending_ccbs;
108 	da_state state;
109 	da_flags flags;
110 	da_quirks quirks;
111 	int	 minimum_cmd_size;
112 	int	 ordered_tag_count;
113 	struct	 disk_params params;
114 	struct	 diskslices *dk_slices;	/* virtual drives */
115 	union	 ccb saved_ccb;
116 };
117 
118 struct da_quirk_entry {
119 	struct scsi_inquiry_pattern inq_pat;
120 	da_quirks quirks;
121 };
122 
123 static struct da_quirk_entry da_quirk_table[] =
124 {
125 	{
126 		/*
127 		 * This particular Fujitsu drive doesn't like the
128 		 * synchronize cache command.
129 		 * Reported by: Tom Jackson <toj@gorilla.net>
130 		 */
131 		{T_DIRECT, SIP_MEDIA_FIXED, "FUJITSU", "M2954*", "*"},
132 		/*quirks*/ DA_Q_NO_SYNC_CACHE
133 
134 	},
135 	{
136 		/*
137 		 * This drive doesn't like the synchronize cache command
138 		 * either.  Reported by: Matthew Jacob <mjacob@feral.com>
139 		 * in NetBSD PR kern/6027, August 24, 1998.
140 		 */
141 		{T_DIRECT, SIP_MEDIA_FIXED, "MICROP", "2217*", "*"},
142 		/*quirks*/ DA_Q_NO_SYNC_CACHE
143 	},
144 	{
145 		/*
146 		 * This drive doesn't like the synchronize cache command
147 		 * either.  Reported by: Hellmuth Michaelis (hm@kts.org)
148 		 * (PR 8882).
149 		 */
150 		{T_DIRECT, SIP_MEDIA_FIXED, "MICROP", "2112*", "*"},
151 		/*quirks*/ DA_Q_NO_SYNC_CACHE
152 	},
153 	{
154 		/*
155 		 * Doesn't like the synchronize cache command.
156 		 * Reported by: Blaz Zupan <blaz@gold.amis.net>
157 		 */
158 		{T_DIRECT, SIP_MEDIA_FIXED, "NEC", "D3847*", "*"},
159 		/*quirks*/ DA_Q_NO_SYNC_CACHE
160 	},
161 	{
162 		/*
163 		 * Doesn't work correctly with 6 byte reads/writes.
164 		 * Returns illegal request, and points to byte 9 of the
165 		 * 6-byte CDB.
166 		 * Reported by:  Adam McDougall <bsdx@spawnet.com>
167 		 */
168 		{T_DIRECT, SIP_MEDIA_FIXED, "QUANTUM", "VIKING 4*", "*"},
169 		/*quirks*/ DA_Q_NO_6_BYTE
170 	},
171 	{
172 		/*
173 		 * See above.
174 		 */
175 		{T_DIRECT, SIP_MEDIA_FIXED, "QUANTUM", "VIKING 2*", "*"},
176 		/*quirks*/ DA_Q_NO_6_BYTE
177 	}
178 };
179 
180 static	d_open_t	daopen;
181 static	d_close_t	daclose;
182 static	d_strategy_t	dastrategy;
183 static	d_ioctl_t	daioctl;
184 static	d_dump_t	dadump;
185 static	d_psize_t	dasize;
186 static	periph_init_t	dainit;
187 static	void		daasync(void *callback_arg, u_int32_t code,
188 				struct cam_path *path, void *arg);
189 static	periph_ctor_t	daregister;
190 static	periph_dtor_t	dacleanup;
191 static	periph_start_t	dastart;
192 static	periph_oninv_t	daoninvalidate;
193 static	void		dadone(struct cam_periph *periph,
194 			       union ccb *done_ccb);
195 static  int		daerror(union ccb *ccb, u_int32_t cam_flags,
196 				u_int32_t sense_flags);
197 static void		daprevent(struct cam_periph *periph, int action);
198 static void		dasetgeom(struct cam_periph *periph,
199 				  struct scsi_read_capacity_data * rdcap);
200 static timeout_t	dasendorderedtag;
201 static void		dashutdown(int howto, void *arg);
202 
203 #ifndef DA_DEFAULT_TIMEOUT
204 #define DA_DEFAULT_TIMEOUT 60	/* Timeout in seconds */
205 #endif
206 
207 /*
208  * DA_ORDEREDTAG_INTERVAL determines how often, relative
209  * to the default timeout, we check to see whether an ordered
210  * tagged transaction is appropriate to prevent simple tag
211  * starvation.  Since we'd like to ensure that there is at least
212  * 1/2 of the timeout length left for a starved transaction to
213  * complete after we've sent an ordered tag, we must poll at least
214  * four times in every timeout period.  This takes care of the worst
215  * case where a starved transaction starts during an interval that
216  * meets the requirement "don't send an ordered tag" test so it takes
217  * us two intervals to determine that a tag must be sent.
218  */
219 #ifndef DA_ORDEREDTAG_INTERVAL
220 #define DA_ORDEREDTAG_INTERVAL 4
221 #endif
222 
223 static struct periph_driver dadriver =
224 {
225 	dainit, "da",
226 	TAILQ_HEAD_INITIALIZER(dadriver.units), /* generation */ 0
227 };
228 
229 DATA_SET(periphdriver_set, dadriver);
230 
231 #define DA_CDEV_MAJOR 13
232 #define DA_BDEV_MAJOR 4
233 
234 /* For 2.2-stable support */
235 #ifndef D_DISK
236 #define D_DISK 0
237 #endif
238 
239 static struct cdevsw da_cdevsw = {
240 	/* open */	daopen,
241 	/* close */	daclose,
242 	/* read */	physread,
243 	/* write */	physwrite,
244 	/* ioctl */	daioctl,
245 	/* stop */	nostop,
246 	/* reset */	noreset,
247 	/* devtotty */	nodevtotty,
248 	/* poll */	nopoll,
249 	/* mmap */	nommap,
250 	/* strategy */	dastrategy,
251 	/* name */	"da",
252 	/* parms */	noparms,
253 	/* maj */	DA_CDEV_MAJOR,
254 	/* dump */	dadump,
255 	/* psize */	dasize,
256 	/* flags */	D_DISK,
257 	/* maxio */	0,
258 	/* bmaj */	DA_BDEV_MAJOR
259 };
260 
261 static SLIST_HEAD(,da_softc) softc_list;
262 static struct extend_array *daperiphs;
263 
264 static int
265 daopen(dev_t dev, int flags, int fmt, struct proc *p)
266 {
267 	struct cam_periph *periph;
268 	struct da_softc *softc;
269 	struct disklabel label;
270 	int unit;
271 	int part;
272 	int error;
273 	int s;
274 
275 	unit = dkunit(dev);
276 	part = dkpart(dev);
277 	periph = cam_extend_get(daperiphs, unit);
278 	if (periph == NULL)
279 		return (ENXIO);
280 
281 	softc = (struct da_softc *)periph->softc;
282 
283 	CAM_DEBUG(periph->path, CAM_DEBUG_TRACE,
284 	    ("daopen: dev=0x%lx (unit %d , partition %d)\n", (long) dev,
285 	     unit, part));
286 
287 	if ((error = cam_periph_lock(periph, PRIBIO|PCATCH)) != 0) {
288 		return (error); /* error code from tsleep */
289 	}
290 
291 	if ((softc->flags & DA_FLAG_OPEN) == 0) {
292 		if (cam_periph_acquire(periph) != CAM_REQ_CMP)
293 			return(ENXIO);
294 		softc->flags |= DA_FLAG_OPEN;
295 	}
296 
297 	s = splsoftcam();
298 	if ((softc->flags & DA_FLAG_PACK_INVALID) != 0) {
299 		/*
300 		 * If any partition is open, although the disk has
301 		 * been invalidated, disallow further opens.
302 		 */
303 		if (dsisopen(softc->dk_slices)) {
304 			splx(s);
305 			cam_periph_unlock(periph);
306 			return (ENXIO);
307 		}
308 
309 		/* Invalidate our pack information. */
310 		dsgone(&softc->dk_slices);
311 		softc->flags &= ~DA_FLAG_PACK_INVALID;
312 	}
313 	splx(s);
314 
315 	/* Do a read capacity */
316 	{
317 		struct scsi_read_capacity_data *rcap;
318 		union  ccb *ccb;
319 
320 		rcap = (struct scsi_read_capacity_data *)malloc(sizeof(*rcap),
321 								M_TEMP,
322 								M_WAITOK);
323 
324 		ccb = cam_periph_getccb(periph, /*priority*/1);
325 		scsi_read_capacity(&ccb->csio,
326 				   /*retries*/1,
327 				   /*cbfncp*/dadone,
328 				   MSG_SIMPLE_Q_TAG,
329 				   rcap,
330 				   SSD_FULL_SIZE,
331 				   /*timeout*/60000);
332 		ccb->ccb_h.ccb_bp = NULL;
333 
334 		error = cam_periph_runccb(ccb, daerror, /*cam_flags*/0,
335 					  /*sense_flags*/SF_RETRY_UA |
336 							 SF_RETRY_SELTO,
337 					  &softc->device_stats);
338 
339 		xpt_release_ccb(ccb);
340 
341 		if (error == 0) {
342 			dasetgeom(periph, rcap);
343 		}
344 
345 		free(rcap, M_TEMP);
346 	}
347 
348 	if (error == 0) {
349 		struct ccb_getdev cgd;
350 
351 		/* Build label for whole disk. */
352 		bzero(&label, sizeof(label));
353 		label.d_type = DTYPE_SCSI;
354 
355 		/*
356 		 * Grab the inquiry data to get the vendor and product names.
357 		 * Put them in the typename and packname for the label.
358 		 */
359 		xpt_setup_ccb(&cgd.ccb_h, periph->path, /*priority*/ 1);
360 		cgd.ccb_h.func_code = XPT_GDEV_TYPE;
361 		xpt_action((union ccb *)&cgd);
362 
363 		strncpy(label.d_typename, cgd.inq_data.vendor,
364 			min(SID_VENDOR_SIZE, sizeof(label.d_typename)));
365 		strncpy(label.d_packname, cgd.inq_data.product,
366 			min(SID_PRODUCT_SIZE, sizeof(label.d_packname)));
367 
368 		label.d_secsize = softc->params.secsize;
369 		label.d_nsectors = softc->params.secs_per_track;
370 		label.d_ntracks = softc->params.heads;
371 		label.d_ncylinders = softc->params.cylinders;
372 		label.d_secpercyl = softc->params.heads
373 				  * softc->params.secs_per_track;
374 		label.d_secperunit = softc->params.sectors;
375 
376 		if ((dsisopen(softc->dk_slices) == 0)
377 		    && ((softc->flags & DA_FLAG_PACK_REMOVABLE) != 0)) {
378 			daprevent(periph, PR_PREVENT);
379 		}
380 
381 		/* Initialize slice tables. */
382 		error = dsopen("da", dev, fmt, 0, &softc->dk_slices, &label);
383 
384 		/*
385 		 * Check to see whether or not the blocksize is set yet.
386 		 * If it isn't, set it and then clear the blocksize
387 		 * unavailable flag for the device statistics.
388 		 */
389 		if ((softc->device_stats.flags & DEVSTAT_BS_UNAVAILABLE) != 0){
390 			softc->device_stats.block_size = softc->params.secsize;
391 			softc->device_stats.flags &= ~DEVSTAT_BS_UNAVAILABLE;
392 		}
393 	}
394 
395 	if (error != 0) {
396 		if ((dsisopen(softc->dk_slices) == 0)
397 		 && ((softc->flags & DA_FLAG_PACK_REMOVABLE) != 0)) {
398 			daprevent(periph, PR_ALLOW);
399 		}
400 	}
401 	cam_periph_unlock(periph);
402 	return (error);
403 }
404 
405 static int
406 daclose(dev_t dev, int flag, int fmt, struct proc *p)
407 {
408 	struct	cam_periph *periph;
409 	struct	da_softc *softc;
410 	int	unit;
411 	int	error;
412 
413 	unit = dkunit(dev);
414 	periph = cam_extend_get(daperiphs, unit);
415 	if (periph == NULL)
416 		return (ENXIO);
417 
418 	softc = (struct da_softc *)periph->softc;
419 
420 	if ((error = cam_periph_lock(periph, PRIBIO)) != 0) {
421 		return (error); /* error code from tsleep */
422 	}
423 
424 	dsclose(dev, fmt, softc->dk_slices);
425 	if (dsisopen(softc->dk_slices)) {
426 		cam_periph_unlock(periph);
427 		return (0);
428 	}
429 
430 	if ((softc->quirks & DA_Q_NO_SYNC_CACHE) == 0) {
431 		union	ccb *ccb;
432 
433 		ccb = cam_periph_getccb(periph, /*priority*/1);
434 
435 		scsi_synchronize_cache(&ccb->csio,
436 				       /*retries*/1,
437 				       /*cbfcnp*/dadone,
438 				       MSG_SIMPLE_Q_TAG,
439 				       /*begin_lba*/0,/* Cover the whole disk */
440 				       /*lb_count*/0,
441 				       SSD_FULL_SIZE,
442 				       5 * 60 * 1000);
443 
444 		cam_periph_runccb(ccb, /*error_routine*/NULL, /*cam_flags*/0,
445 				  /*sense_flags*/SF_RETRY_UA,
446 				  &softc->device_stats);
447 
448 		if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
449 			if ((ccb->ccb_h.status & CAM_STATUS_MASK) ==
450 			     CAM_SCSI_STATUS_ERROR) {
451 				int asc, ascq;
452 				int sense_key, error_code;
453 
454 				scsi_extract_sense(&ccb->csio.sense_data,
455 						   &error_code,
456 						   &sense_key,
457 						   &asc, &ascq);
458 				if (sense_key != SSD_KEY_ILLEGAL_REQUEST)
459 					scsi_sense_print(&ccb->csio);
460 			} else {
461 				xpt_print_path(periph->path);
462 				printf("Synchronize cache failed, status "
463 				       "== 0x%x, scsi status == 0x%x\n",
464 				       ccb->csio.ccb_h.status,
465 				       ccb->csio.scsi_status);
466 			}
467 		}
468 
469 		if ((ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
470 			cam_release_devq(ccb->ccb_h.path,
471 					 /*relsim_flags*/0,
472 					 /*reduction*/0,
473 					 /*timeout*/0,
474 					 /*getcount_only*/0);
475 
476 		xpt_release_ccb(ccb);
477 
478 	}
479 
480 	if ((softc->flags & DA_FLAG_PACK_REMOVABLE) != 0) {
481 		daprevent(periph, PR_ALLOW);
482 		/*
483 		 * If we've got removeable media, mark the blocksize as
484 		 * unavailable, since it could change when new media is
485 		 * inserted.
486 		 */
487 		softc->device_stats.flags |= DEVSTAT_BS_UNAVAILABLE;
488 	}
489 
490 	softc->flags &= ~DA_FLAG_OPEN;
491 	cam_periph_unlock(periph);
492 	cam_periph_release(periph);
493 	return (0);
494 }
495 
496 /*
497  * Actually translate the requested transfer into one the physical driver
498  * can understand.  The transfer is described by a buf and will include
499  * only one physical transfer.
500  */
501 static void
502 dastrategy(struct buf *bp)
503 {
504 	struct cam_periph *periph;
505 	struct da_softc *softc;
506 	u_int  unit;
507 	u_int  part;
508 	int    s;
509 
510 	unit = dkunit(bp->b_dev);
511 	part = dkpart(bp->b_dev);
512 	periph = cam_extend_get(daperiphs, unit);
513 	if (periph == NULL) {
514 		bp->b_error = ENXIO;
515 		goto bad;
516 	}
517 	softc = (struct da_softc *)periph->softc;
518 #if 0
519 	/*
520 	 * check it's not too big a transfer for our adapter
521 	 */
522 	scsi_minphys(bp,&sd_switch);
523 #endif
524 
525 	/*
526 	 * Do bounds checking, adjust transfer, set b_cylin and b_pbklno.
527 	 */
528 	if (dscheck(bp, softc->dk_slices) <= 0)
529 		goto done;
530 
531 	/*
532 	 * Mask interrupts so that the pack cannot be invalidated until
533 	 * after we are in the queue.  Otherwise, we might not properly
534 	 * clean up one of the buffers.
535 	 */
536 	s = splbio();
537 
538 	/*
539 	 * If the device has been made invalid, error out
540 	 */
541 	if ((softc->flags & DA_FLAG_PACK_INVALID)) {
542 		splx(s);
543 		bp->b_error = ENXIO;
544 		goto bad;
545 	}
546 
547 	/*
548 	 * Place it in the queue of disk activities for this disk
549 	 */
550 	bufqdisksort(&softc->buf_queue, bp);
551 
552 	splx(s);
553 
554 	/*
555 	 * Schedule ourselves for performing the work.
556 	 */
557 	xpt_schedule(periph, /* XXX priority */1);
558 
559 	return;
560 bad:
561 	bp->b_flags |= B_ERROR;
562 done:
563 
564 	/*
565 	 * Correctly set the buf to indicate a completed xfer
566 	 */
567 	bp->b_resid = bp->b_bcount;
568 	biodone(bp);
569 	return;
570 }
571 
572 /* For 2.2-stable support */
573 #ifndef ENOIOCTL
574 #define ENOIOCTL -1
575 #endif
576 
577 static int
578 daioctl(dev_t dev, u_long cmd, caddr_t addr, int flag, struct proc *p)
579 {
580 	struct cam_periph *periph;
581 	struct da_softc *softc;
582 	int unit;
583 	int error;
584 
585 	unit = dkunit(dev);
586 	periph = cam_extend_get(daperiphs, unit);
587 	if (periph == NULL)
588 		return (ENXIO);
589 
590 	softc = (struct da_softc *)periph->softc;
591 
592 	CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("daioctl\n"));
593 
594 	if (cmd == DIOCSBAD)
595 		return (EINVAL);	/* XXX */
596 
597 	if ((error = cam_periph_lock(periph, PRIBIO|PCATCH)) != 0) {
598 		return (error); /* error code from tsleep */
599 	}
600 
601 	error = dsioctl("da", dev, cmd, addr, flag, &softc->dk_slices);
602 
603 	if (error == ENOIOCTL)
604 		error = cam_periph_ioctl(periph, cmd, addr, daerror);
605 
606 	cam_periph_unlock(periph);
607 
608 	return (error);
609 }
610 
611 static int
612 dadump(dev_t dev)
613 {
614 	struct	    cam_periph *periph;
615 	struct	    da_softc *softc;
616 	struct	    disklabel *lp;
617 	u_int	    unit;
618 	u_int	    part;
619 	long	    num;	/* number of sectors to write */
620 	long	    blkoff;
621 	long	    blknum;
622 	long	    blkcnt;
623 	vm_offset_t addr;
624 	static	int dadoingadump = 0;
625 	struct	    ccb_scsiio csio;
626 
627 	/* toss any characters present prior to dump */
628 	while (cncheckc() != -1)
629 		;
630 
631 	unit = dkunit(dev);
632 	part = dkpart(dev);
633 	periph = cam_extend_get(daperiphs, unit);
634 	if (periph == NULL) {
635 		return (ENXIO);
636 	}
637 	softc = (struct da_softc *)periph->softc;
638 
639 	if ((softc->flags & DA_FLAG_PACK_INVALID) != 0
640 	 || (softc->dk_slices == NULL)
641 	 || (lp = dsgetlabel(dev, softc->dk_slices)) == NULL)
642 		return (ENXIO);
643 
644 	/* Size of memory to dump, in disk sectors. */
645 	/* XXX Fix up for non DEV_BSIZE sectors!!! */
646 	num = (u_long)Maxmem * PAGE_SIZE / softc->params.secsize;
647 
648 	blkoff = lp->d_partitions[part].p_offset;
649 	blkoff += softc->dk_slices->dss_slices[dkslice(dev)].ds_offset;
650 
651 	/* check transfer bounds against partition size */
652 	if ((dumplo < 0) || ((dumplo + num) > lp->d_partitions[part].p_size))
653 		return (EINVAL);
654 
655 	if (dadoingadump != 0)
656 		return (EFAULT);
657 
658 	dadoingadump = 1;
659 
660 	blknum = dumplo + blkoff;
661 	blkcnt = PAGE_SIZE / softc->params.secsize;
662 
663 	addr = 0;	/* starting address */
664 
665 	while (num > 0) {
666 
667 		if (is_physical_memory(addr)) {
668 			pmap_enter(kernel_pmap, (vm_offset_t)CADDR1,
669 				   trunc_page(addr), VM_PROT_READ, TRUE);
670 		} else {
671 			pmap_enter(kernel_pmap, (vm_offset_t)CADDR1,
672 				   trunc_page(0), VM_PROT_READ, TRUE);
673 		}
674 
675 		xpt_setup_ccb(&csio.ccb_h, periph->path, /*priority*/1);
676 		csio.ccb_h.ccb_state = DA_CCB_DUMP;
677 		scsi_read_write(&csio,
678 				/*retries*/1,
679 				dadone,
680 				MSG_ORDERED_Q_TAG,
681 				/*read*/FALSE,
682 				/*byte2*/0,
683 				/*minimum_cmd_size*/ softc->minimum_cmd_size,
684 				blknum,
685 				blkcnt,
686 				/*data_ptr*/CADDR1,
687 				/*dxfer_len*/blkcnt * softc->params.secsize,
688 				/*sense_len*/SSD_FULL_SIZE,
689 				DA_DEFAULT_TIMEOUT * 1000);
690 		xpt_polled_action((union ccb *)&csio);
691 
692 		if ((csio.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
693 			printf("Aborting dump due to I/O error.\n");
694 			if ((csio.ccb_h.status & CAM_STATUS_MASK) ==
695 			     CAM_SCSI_STATUS_ERROR)
696 				scsi_sense_print(&csio);
697 			else
698 				printf("status == 0x%x, scsi status == 0x%x\n",
699 				       csio.ccb_h.status, csio.scsi_status);
700 			return(EIO);
701 		}
702 
703 		if (addr % (1024 * 1024) == 0) {
704 #ifdef	HW_WDOG
705 			if (wdog_tickler)
706 				(*wdog_tickler)();
707 #endif /* HW_WDOG */
708 			/* Count in MB of data left to write */
709 			printf("%ld ", (num  * softc->params.secsize)
710 				     / (1024 * 1024));
711 		}
712 
713 		/* update block count */
714 		num -= blkcnt;
715 		blknum += blkcnt;
716 		addr += blkcnt * softc->params.secsize;
717 
718 		/* operator aborting dump? */
719 		if (cncheckc() != -1)
720 			return (EINTR);
721 	}
722 
723 	/*
724 	 * Sync the disk cache contents to the physical media.
725 	 */
726 	if ((softc->quirks & DA_Q_NO_SYNC_CACHE) == 0) {
727 
728 		xpt_setup_ccb(&csio.ccb_h, periph->path, /*priority*/1);
729 		csio.ccb_h.ccb_state = DA_CCB_DUMP;
730 		scsi_synchronize_cache(&csio,
731 				       /*retries*/1,
732 				       /*cbfcnp*/dadone,
733 				       MSG_SIMPLE_Q_TAG,
734 				       /*begin_lba*/0,/* Cover the whole disk */
735 				       /*lb_count*/0,
736 				       SSD_FULL_SIZE,
737 				       5 * 60 * 1000);
738 		xpt_polled_action((union ccb *)&csio);
739 
740 		if ((csio.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
741 			if ((csio.ccb_h.status & CAM_STATUS_MASK) ==
742 			     CAM_SCSI_STATUS_ERROR) {
743 				int asc, ascq;
744 				int sense_key, error_code;
745 
746 				scsi_extract_sense(&csio.sense_data,
747 						   &error_code,
748 						   &sense_key,
749 						   &asc, &ascq);
750 				if (sense_key != SSD_KEY_ILLEGAL_REQUEST)
751 					scsi_sense_print(&csio);
752 			} else {
753 				xpt_print_path(periph->path);
754 				printf("Synchronize cache failed, status "
755 				       "== 0x%x, scsi status == 0x%x\n",
756 				       csio.ccb_h.status, csio.scsi_status);
757 			}
758 		}
759 	}
760 	return (0);
761 }
762 
763 static int
764 dasize(dev_t dev)
765 {
766 	struct cam_periph *periph;
767 	struct da_softc *softc;
768 
769 	periph = cam_extend_get(daperiphs, dkunit(dev));
770 	if (periph == NULL)
771 		return (ENXIO);
772 
773 	softc = (struct da_softc *)periph->softc;
774 
775 	return (dssize(dev, &softc->dk_slices));
776 }
777 
778 static void
779 dainit(void)
780 {
781 	cam_status status;
782 	struct cam_path *path;
783 
784 	/*
785 	 * Create our extend array for storing the devices we attach to.
786 	 */
787 	daperiphs = cam_extend_new();
788 	SLIST_INIT(&softc_list);
789 	if (daperiphs == NULL) {
790 		printf("da: Failed to alloc extend array!\n");
791 		return;
792 	}
793 
794 	/*
795 	 * Install a global async callback.  This callback will
796 	 * receive async callbacks like "new device found".
797 	 */
798 	status = xpt_create_path(&path, /*periph*/NULL, CAM_XPT_PATH_ID,
799 				 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
800 
801 	if (status == CAM_REQ_CMP) {
802 		struct ccb_setasync csa;
803 
804                 xpt_setup_ccb(&csa.ccb_h, path, /*priority*/5);
805                 csa.ccb_h.func_code = XPT_SASYNC_CB;
806                 csa.event_enable = AC_FOUND_DEVICE;
807                 csa.callback = daasync;
808                 csa.callback_arg = NULL;
809                 xpt_action((union ccb *)&csa);
810 		status = csa.ccb_h.status;
811                 xpt_free_path(path);
812         }
813 
814 	if (status != CAM_REQ_CMP) {
815 		printf("da: Failed to attach master async callback "
816 		       "due to status 0x%x!\n", status);
817 	} else {
818 		int err;
819 
820 		/* If we were successfull, register our devsw */
821 		cdevsw_add(&da_cdevsw);
822 
823 		/*
824 		 * Schedule a periodic event to occasioanly send an
825 		 * ordered tag to a device.
826 		 */
827 		timeout(dasendorderedtag, NULL,
828 			(DA_DEFAULT_TIMEOUT * hz) / DA_ORDEREDTAG_INTERVAL);
829 
830 		if ((err = at_shutdown(dashutdown, NULL,
831 				       SHUTDOWN_POST_SYNC)) != 0)
832 			printf("dainit: at_shutdown returned %d!\n", err);
833 	}
834 }
835 
836 static void
837 daoninvalidate(struct cam_periph *periph)
838 {
839 	int s;
840 	struct da_softc *softc;
841 	struct buf *q_bp;
842 	struct ccb_setasync csa;
843 
844 	softc = (struct da_softc *)periph->softc;
845 
846 	/*
847 	 * De-register any async callbacks.
848 	 */
849 	xpt_setup_ccb(&csa.ccb_h, periph->path,
850 		      /* priority */ 5);
851 	csa.ccb_h.func_code = XPT_SASYNC_CB;
852 	csa.event_enable = 0;
853 	csa.callback = daasync;
854 	csa.callback_arg = periph;
855 	xpt_action((union ccb *)&csa);
856 
857 	softc->flags |= DA_FLAG_PACK_INVALID;
858 
859 	/*
860 	 * Although the oninvalidate() routines are always called at
861 	 * splsoftcam, we need to be at splbio() here to keep the buffer
862 	 * queue from being modified while we traverse it.
863 	 */
864 	s = splbio();
865 
866 	/*
867 	 * Return all queued I/O with ENXIO.
868 	 * XXX Handle any transactions queued to the card
869 	 *     with XPT_ABORT_CCB.
870 	 */
871 	while ((q_bp = bufq_first(&softc->buf_queue)) != NULL){
872 		bufq_remove(&softc->buf_queue, q_bp);
873 		q_bp->b_resid = q_bp->b_bcount;
874 		q_bp->b_error = ENXIO;
875 		q_bp->b_flags |= B_ERROR;
876 		biodone(q_bp);
877 	}
878 	splx(s);
879 
880 	SLIST_REMOVE(&softc_list, softc, da_softc, links);
881 
882 	xpt_print_path(periph->path);
883 	printf("lost device\n");
884 }
885 
886 static void
887 dacleanup(struct cam_periph *periph)
888 {
889 	struct da_softc *softc;
890 
891 	softc = (struct da_softc *)periph->softc;
892 
893 	devstat_remove_entry(&softc->device_stats);
894 	cam_extend_release(daperiphs, periph->unit_number);
895 	xpt_print_path(periph->path);
896 	printf("removing device entry\n");
897 	free(softc, M_DEVBUF);
898 }
899 
900 static void
901 daasync(void *callback_arg, u_int32_t code,
902 	struct cam_path *path, void *arg)
903 {
904 	struct cam_periph *periph;
905 
906 	periph = (struct cam_periph *)callback_arg;
907 	switch (code) {
908 	case AC_FOUND_DEVICE:
909 	{
910 		struct ccb_getdev *cgd;
911 		cam_status status;
912 
913 		cgd = (struct ccb_getdev *)arg;
914 
915 		if ((cgd->pd_type != T_DIRECT) && (cgd->pd_type != T_OPTICAL))
916 			break;
917 
918 		/*
919 		 * Allocate a peripheral instance for
920 		 * this device and start the probe
921 		 * process.
922 		 */
923 		status = cam_periph_alloc(daregister, daoninvalidate,
924 					  dacleanup, dastart,
925 					  "da", CAM_PERIPH_BIO,
926 					  cgd->ccb_h.path, daasync,
927 					  AC_FOUND_DEVICE, cgd);
928 
929 		if (status != CAM_REQ_CMP
930 		 && status != CAM_REQ_INPROG)
931 			printf("daasync: Unable to attach to new device "
932 				"due to status 0x%x\n", status);
933 		break;
934 	}
935 	case AC_SENT_BDR:
936 	case AC_BUS_RESET:
937 	{
938 		struct da_softc *softc;
939 		struct ccb_hdr *ccbh;
940 		int s;
941 
942 		softc = (struct da_softc *)periph->softc;
943 		s = splsoftcam();
944 		/*
945 		 * Don't fail on the expected unit attention
946 		 * that will occur.
947 		 */
948 		softc->flags |= DA_FLAG_RETRY_UA;
949 		for (ccbh = LIST_FIRST(&softc->pending_ccbs);
950 		     ccbh != NULL; ccbh = LIST_NEXT(ccbh, periph_links.le))
951 			ccbh->ccb_state |= DA_CCB_RETRY_UA;
952 		splx(s);
953 		/* FALLTHROUGH*/
954 	}
955 	default:
956 		cam_periph_async(periph, code, path, arg);
957 		break;
958 	}
959 }
960 
961 static cam_status
962 daregister(struct cam_periph *periph, void *arg)
963 {
964 	int s;
965 	struct da_softc *softc;
966 	struct ccb_setasync csa;
967 	struct ccb_getdev *cgd;
968 	caddr_t match;
969 
970 	cgd = (struct ccb_getdev *)arg;
971 	if (periph == NULL) {
972 		printf("daregister: periph was NULL!!\n");
973 		return(CAM_REQ_CMP_ERR);
974 	}
975 
976 	if (cgd == NULL) {
977 		printf("daregister: no getdev CCB, can't register device\n");
978 		return(CAM_REQ_CMP_ERR);
979 	}
980 
981 	softc = (struct da_softc *)malloc(sizeof(*softc),M_DEVBUF,M_NOWAIT);
982 
983 	if (softc == NULL) {
984 		printf("daregister: Unable to probe new device. "
985 		       "Unable to allocate softc\n");
986 		return(CAM_REQ_CMP_ERR);
987 	}
988 
989 	bzero(softc, sizeof(*softc));
990 	LIST_INIT(&softc->pending_ccbs);
991 	softc->state = DA_STATE_PROBE;
992 	bufq_init(&softc->buf_queue);
993 	if (SID_IS_REMOVABLE(&cgd->inq_data))
994 		softc->flags |= DA_FLAG_PACK_REMOVABLE;
995 	if ((cgd->inq_data.flags & SID_CmdQue) != 0)
996 		softc->flags |= DA_FLAG_TAGGED_QUEUING;
997 
998 	periph->softc = softc;
999 
1000 	cam_extend_set(daperiphs, periph->unit_number, periph);
1001 
1002 	/*
1003 	 * See if this device has any quirks.
1004 	 */
1005 	match = cam_quirkmatch((caddr_t)&cgd->inq_data,
1006 			       (caddr_t)da_quirk_table,
1007 			       sizeof(da_quirk_table)/sizeof(*da_quirk_table),
1008 			       sizeof(*da_quirk_table), scsi_inquiry_match);
1009 
1010 	if (match != NULL)
1011 		softc->quirks = ((struct da_quirk_entry *)match)->quirks;
1012 	else
1013 		softc->quirks = DA_Q_NONE;
1014 
1015 	if (softc->quirks & DA_Q_NO_6_BYTE)
1016 		softc->minimum_cmd_size = 10;
1017 	else
1018 		softc->minimum_cmd_size = 6;
1019 
1020 	/*
1021 	 * Block our timeout handler while we
1022 	 * add this softc to the dev list.
1023 	 */
1024 	s = splsoftclock();
1025 	SLIST_INSERT_HEAD(&softc_list, softc, links);
1026 	splx(s);
1027 
1028 	/*
1029 	 * The DA driver supports a blocksize, but
1030 	 * we don't know the blocksize until we do
1031 	 * a read capacity.  So, set a flag to
1032 	 * indicate that the blocksize is
1033 	 * unavailable right now.  We'll clear the
1034 	 * flag as soon as we've done a read capacity.
1035 	 */
1036 	devstat_add_entry(&softc->device_stats, "da",
1037 			  periph->unit_number, 0,
1038 	  		  DEVSTAT_BS_UNAVAILABLE,
1039 			  cgd->pd_type | DEVSTAT_TYPE_IF_SCSI,
1040 			  DEVSTAT_PRIORITY_DA);
1041 
1042 	/*
1043 	 * Add async callbacks for bus reset and
1044 	 * bus device reset calls.  I don't bother
1045 	 * checking if this fails as, in most cases,
1046 	 * the system will function just fine without
1047 	 * them and the only alternative would be to
1048 	 * not attach the device on failure.
1049 	 */
1050 	xpt_setup_ccb(&csa.ccb_h, periph->path, /*priority*/5);
1051 	csa.ccb_h.func_code = XPT_SASYNC_CB;
1052 	csa.event_enable = AC_SENT_BDR | AC_BUS_RESET | AC_LOST_DEVICE;
1053 	csa.callback = daasync;
1054 	csa.callback_arg = periph;
1055 	xpt_action((union ccb *)&csa);
1056 	/*
1057 	 * Lock this peripheral until we are setup.
1058 	 * This first call can't block
1059 	 */
1060 	(void)cam_periph_lock(periph, PRIBIO);
1061 	xpt_schedule(periph, /*priority*/5);
1062 
1063 	return(CAM_REQ_CMP);
1064 }
1065 
1066 static void
1067 dastart(struct cam_periph *periph, union ccb *start_ccb)
1068 {
1069 	struct da_softc *softc;
1070 
1071 	softc = (struct da_softc *)periph->softc;
1072 
1073 
1074 	switch (softc->state) {
1075 	case DA_STATE_NORMAL:
1076 	{
1077 		/* Pull a buffer from the queue and get going on it */
1078 		struct buf *bp;
1079 		int s;
1080 
1081 		/*
1082 		 * See if there is a buf with work for us to do..
1083 		 */
1084 		s = splbio();
1085 		bp = bufq_first(&softc->buf_queue);
1086 		if (periph->immediate_priority <= periph->pinfo.priority) {
1087 			CAM_DEBUG_PRINT(CAM_DEBUG_SUBTRACE,
1088 					("queuing for immediate ccb\n"));
1089 			start_ccb->ccb_h.ccb_state = DA_CCB_WAITING;
1090 			SLIST_INSERT_HEAD(&periph->ccb_list, &start_ccb->ccb_h,
1091 					  periph_links.sle);
1092 			periph->immediate_priority = CAM_PRIORITY_NONE;
1093 			splx(s);
1094 			wakeup(&periph->ccb_list);
1095 		} else if (bp == NULL) {
1096 			splx(s);
1097 			xpt_release_ccb(start_ccb);
1098 		} else {
1099 			int oldspl;
1100 			u_int8_t tag_code;
1101 
1102 			bufq_remove(&softc->buf_queue, bp);
1103 
1104 			devstat_start_transaction(&softc->device_stats);
1105 
1106 			if ((bp->b_flags & B_ORDERED) != 0
1107 			 || (softc->flags & DA_FLAG_NEED_OTAG) != 0) {
1108 				softc->flags &= ~DA_FLAG_NEED_OTAG;
1109 				softc->ordered_tag_count++;
1110 				tag_code = MSG_ORDERED_Q_TAG;
1111 			} else {
1112 				tag_code = MSG_SIMPLE_Q_TAG;
1113 			}
1114 			scsi_read_write(&start_ccb->csio,
1115 					/*retries*/4,
1116 					dadone,
1117 					tag_code,
1118 					bp->b_flags & B_READ,
1119 					/*byte2*/0,
1120 					softc->minimum_cmd_size,
1121 					bp->b_pblkno,
1122 					bp->b_bcount / softc->params.secsize,
1123 					bp->b_data,
1124 					bp->b_bcount,
1125 					/*sense_len*/SSD_FULL_SIZE,
1126 					DA_DEFAULT_TIMEOUT * 1000);
1127 			start_ccb->ccb_h.ccb_state = DA_CCB_BUFFER_IO;
1128 
1129 			/*
1130 			 * Block out any asyncronous callbacks
1131 			 * while we touch the pending ccb list.
1132 			 */
1133 			oldspl = splcam();
1134 			LIST_INSERT_HEAD(&softc->pending_ccbs,
1135 					 &start_ccb->ccb_h, periph_links.le);
1136 			splx(oldspl);
1137 
1138 			/* We expect a unit attention from this device */
1139 			if ((softc->flags & DA_FLAG_RETRY_UA) != 0) {
1140 				start_ccb->ccb_h.ccb_state |= DA_CCB_RETRY_UA;
1141 				softc->flags &= ~DA_FLAG_RETRY_UA;
1142 			}
1143 
1144 			start_ccb->ccb_h.ccb_bp = bp;
1145 			bp = bufq_first(&softc->buf_queue);
1146 			splx(s);
1147 
1148 			xpt_action(start_ccb);
1149 		}
1150 
1151 		if (bp != NULL) {
1152 			/* Have more work to do, so ensure we stay scheduled */
1153 			xpt_schedule(periph, /* XXX priority */1);
1154 		}
1155 		break;
1156 	}
1157 	case DA_STATE_PROBE:
1158 	{
1159 		struct ccb_scsiio *csio;
1160 		struct scsi_read_capacity_data *rcap;
1161 
1162 		rcap = (struct scsi_read_capacity_data *)malloc(sizeof(*rcap),
1163 								M_TEMP,
1164 								M_NOWAIT);
1165 		if (rcap == NULL) {
1166 			printf("dastart: Couldn't malloc read_capacity data\n");
1167 			/* da_free_periph??? */
1168 			break;
1169 		}
1170 		csio = &start_ccb->csio;
1171 		scsi_read_capacity(csio,
1172 				   /*retries*/4,
1173 				   dadone,
1174 				   MSG_SIMPLE_Q_TAG,
1175 				   rcap,
1176 				   SSD_FULL_SIZE,
1177 				   /*timeout*/5000);
1178 		start_ccb->ccb_h.ccb_bp = NULL;
1179 		start_ccb->ccb_h.ccb_state = DA_CCB_PROBE;
1180 		xpt_action(start_ccb);
1181 		break;
1182 	}
1183 	}
1184 }
1185 
1186 
1187 static void
1188 dadone(struct cam_periph *periph, union ccb *done_ccb)
1189 {
1190 	struct da_softc *softc;
1191 	struct ccb_scsiio *csio;
1192 
1193 	softc = (struct da_softc *)periph->softc;
1194 	csio = &done_ccb->csio;
1195 	switch (csio->ccb_h.ccb_state & DA_CCB_TYPE_MASK) {
1196 	case DA_CCB_BUFFER_IO:
1197 	{
1198 		struct buf *bp;
1199 		int    oldspl;
1200 
1201 		bp = (struct buf *)done_ccb->ccb_h.ccb_bp;
1202 		if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
1203 			int error;
1204 			int s;
1205 			int sf;
1206 
1207 			if ((csio->ccb_h.ccb_state & DA_CCB_RETRY_UA) != 0)
1208 				sf = SF_RETRY_UA;
1209 			else
1210 				sf = 0;
1211 
1212 			/* Retry selection timeouts */
1213 			sf |= SF_RETRY_SELTO;
1214 
1215 			if ((error = daerror(done_ccb, 0, sf)) == ERESTART) {
1216 				/*
1217 				 * A retry was scheuled, so
1218 				 * just return.
1219 				 */
1220 				return;
1221 			}
1222 			if (error != 0) {
1223 				struct buf *q_bp;
1224 
1225 				s = splbio();
1226 
1227 				if (error == ENXIO) {
1228 					/*
1229 					 * Catastrophic error.  Mark our pack as
1230 					 * invalid.
1231 					 */
1232 					/* XXX See if this is really a media
1233 					 *     change first.
1234 					 */
1235 					xpt_print_path(periph->path);
1236 					printf("Invalidating pack\n");
1237 					softc->flags |= DA_FLAG_PACK_INVALID;
1238 				}
1239 
1240 				/*
1241 				 * return all queued I/O with EIO, so that
1242 				 * the client can retry these I/Os in the
1243 				 * proper order should it attempt to recover.
1244 				 */
1245 				while ((q_bp = bufq_first(&softc->buf_queue))
1246 					!= NULL) {
1247 					bufq_remove(&softc->buf_queue, q_bp);
1248 					q_bp->b_resid = q_bp->b_bcount;
1249 					q_bp->b_error = EIO;
1250 					q_bp->b_flags |= B_ERROR;
1251 					biodone(q_bp);
1252 				}
1253 				splx(s);
1254 				bp->b_error = error;
1255 				bp->b_resid = bp->b_bcount;
1256 				bp->b_flags |= B_ERROR;
1257 			} else {
1258 				bp->b_resid = csio->resid;
1259 				bp->b_error = 0;
1260 				if (bp->b_resid != 0) {
1261 					/* Short transfer ??? */
1262 					bp->b_flags |= B_ERROR;
1263 				}
1264 			}
1265 			if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
1266 				cam_release_devq(done_ccb->ccb_h.path,
1267 						 /*relsim_flags*/0,
1268 						 /*reduction*/0,
1269 						 /*timeout*/0,
1270 						 /*getcount_only*/0);
1271 		} else {
1272 			bp->b_resid = csio->resid;
1273 			if (csio->resid > 0)
1274 				bp->b_flags |= B_ERROR;
1275 		}
1276 
1277 		/*
1278 		 * Block out any asyncronous callbacks
1279 		 * while we touch the pending ccb list.
1280 		 */
1281 		oldspl = splcam();
1282 		LIST_REMOVE(&done_ccb->ccb_h, periph_links.le);
1283 		splx(oldspl);
1284 
1285 		devstat_end_transaction(&softc->device_stats,
1286 					bp->b_bcount - bp->b_resid,
1287 					done_ccb->csio.tag_action & 0xf,
1288 					(bp->b_flags & B_READ) ? DEVSTAT_READ
1289 							       : DEVSTAT_WRITE);
1290 
1291 		if (softc->device_stats.busy_count == 0)
1292 			softc->flags |= DA_FLAG_WENT_IDLE;
1293 
1294 		biodone(bp);
1295 		break;
1296 	}
1297 	case DA_CCB_PROBE:
1298 	{
1299 		struct	   scsi_read_capacity_data *rdcap;
1300 		char	   announce_buf[80];
1301 
1302 		rdcap = (struct scsi_read_capacity_data *)csio->data_ptr;
1303 
1304 		if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
1305 			struct disk_params *dp;
1306 
1307 			dasetgeom(periph, rdcap);
1308 			dp = &softc->params;
1309 			snprintf(announce_buf, sizeof(announce_buf),
1310 			        "%luMB (%u %u byte sectors: %dH %dS/T %dC)",
1311 				(unsigned long) (((u_int64_t)dp->secsize *
1312 				dp->sectors) / (1024*1024)), dp->sectors,
1313 				dp->secsize, dp->heads, dp->secs_per_track,
1314 				dp->cylinders);
1315 		} else {
1316 			int	error;
1317 
1318 			/*
1319 			 * Retry any UNIT ATTENTION type errors.  They
1320 			 * are expected at boot.
1321 			 */
1322 			error = daerror(done_ccb, 0, SF_RETRY_UA |
1323 					SF_RETRY_SELTO | SF_NO_PRINT);
1324 			if (error == ERESTART) {
1325 				/*
1326 				 * A retry was scheuled, so
1327 				 * just return.
1328 				 */
1329 				return;
1330 			} else if (error != 0) {
1331 				struct scsi_sense_data *sense;
1332 				int asc, ascq;
1333 				int sense_key, error_code;
1334 				int have_sense;
1335 				cam_status status;
1336 				struct ccb_getdev cgd;
1337 
1338 				/* Don't wedge this device's queue */
1339 				cam_release_devq(done_ccb->ccb_h.path,
1340 						 /*relsim_flags*/0,
1341 						 /*reduction*/0,
1342 						 /*timeout*/0,
1343 						 /*getcount_only*/0);
1344 
1345 				status = done_ccb->ccb_h.status;
1346 
1347 				xpt_setup_ccb(&cgd.ccb_h,
1348 					      done_ccb->ccb_h.path,
1349 					      /* priority */ 1);
1350 				cgd.ccb_h.func_code = XPT_GDEV_TYPE;
1351 				xpt_action((union ccb *)&cgd);
1352 
1353 				if (((csio->ccb_h.flags & CAM_SENSE_PHYS) != 0)
1354 				 || ((csio->ccb_h.flags & CAM_SENSE_PTR) != 0)
1355 				 || ((status & CAM_AUTOSNS_VALID) == 0))
1356 					have_sense = FALSE;
1357 				else
1358 					have_sense = TRUE;
1359 
1360 				if (have_sense) {
1361 					sense = &csio->sense_data;
1362 					scsi_extract_sense(sense, &error_code,
1363 							   &sense_key,
1364 							   &asc, &ascq);
1365 				}
1366 				/*
1367 				 * Attach to anything that claims to be a
1368 				 * direct access or optical disk device,
1369 				 * as long as it doesn't return a "Logical
1370 				 * unit not supported" (0x25) error.
1371 				 */
1372 				if ((have_sense) && (asc != 0x25)
1373 				 && (error_code == SSD_CURRENT_ERROR))
1374 					snprintf(announce_buf,
1375 					    sizeof(announce_buf),
1376 						"Attempt to query device "
1377 						"size failed: %s, %s",
1378 						scsi_sense_key_text[sense_key],
1379 						scsi_sense_desc(asc,ascq,
1380 								&cgd.inq_data));
1381 				else {
1382 					if (have_sense)
1383 						scsi_sense_print(
1384 							&done_ccb->csio);
1385 					else {
1386 						xpt_print_path(periph->path);
1387 						printf("got CAM status %#x\n",
1388 						       done_ccb->ccb_h.status);
1389 					}
1390 
1391 					xpt_print_path(periph->path);
1392 					printf("fatal error, failed"
1393 					       " to attach to device\n");
1394 
1395 					/*
1396 					 * Free up resources.
1397 					 */
1398 					cam_periph_invalidate(periph);
1399 					announce_buf[0] = '\0';
1400 				}
1401 			}
1402 		}
1403 		free(rdcap, M_TEMP);
1404 		if (announce_buf[0] != '\0')
1405 			xpt_announce_periph(periph, announce_buf);
1406 		softc->state = DA_STATE_NORMAL;
1407 		/*
1408 		 * Since our peripheral may be invalidated by an error
1409 		 * above or an external event, we must release our CCB
1410 		 * before releasing the probe lock on the peripheral.
1411 		 * The peripheral will only go away once the last lock
1412 		 * is removed, and we need it around for the CCB release
1413 		 * operation.
1414 		 */
1415 		xpt_release_ccb(done_ccb);
1416 		cam_periph_unlock(periph);
1417 		return;
1418 	}
1419 	case DA_CCB_WAITING:
1420 	{
1421 		/* Caller will release the CCB */
1422 		wakeup(&done_ccb->ccb_h.cbfcnp);
1423 		return;
1424 	}
1425 	case DA_CCB_DUMP:
1426 		/* No-op.  We're polling */
1427 		return;
1428 	default:
1429 		break;
1430 	}
1431 	xpt_release_ccb(done_ccb);
1432 }
1433 
1434 static int
1435 daerror(union ccb *ccb, u_int32_t cam_flags, u_int32_t sense_flags)
1436 {
1437 	struct da_softc	  *softc;
1438 	struct cam_periph *periph;
1439 
1440 	periph = xpt_path_periph(ccb->ccb_h.path);
1441 	softc = (struct da_softc *)periph->softc;
1442 
1443 	/*
1444 	 * XXX
1445 	 * Until we have a better way of doing pack validation,
1446 	 * don't treat UAs as errors.
1447 	 */
1448 	sense_flags |= SF_RETRY_UA;
1449 	return(cam_periph_error(ccb, cam_flags, sense_flags,
1450 				&softc->saved_ccb));
1451 }
1452 
1453 static void
1454 daprevent(struct cam_periph *periph, int action)
1455 {
1456 	struct	da_softc *softc;
1457 	union	ccb *ccb;
1458 	int	error;
1459 
1460 	softc = (struct da_softc *)periph->softc;
1461 
1462 	if (((action == PR_ALLOW)
1463 	  && (softc->flags & DA_FLAG_PACK_LOCKED) == 0)
1464 	 || ((action == PR_PREVENT)
1465 	  && (softc->flags & DA_FLAG_PACK_LOCKED) != 0)) {
1466 		return;
1467 	}
1468 
1469 	ccb = cam_periph_getccb(periph, /*priority*/1);
1470 
1471 	scsi_prevent(&ccb->csio,
1472 		     /*retries*/1,
1473 		     /*cbcfp*/dadone,
1474 		     MSG_SIMPLE_Q_TAG,
1475 		     action,
1476 		     SSD_FULL_SIZE,
1477 		     5000);
1478 
1479 	error = cam_periph_runccb(ccb, /*error_routine*/NULL, /*cam_flags*/0,
1480 				  /*sense_flags*/0, &softc->device_stats);
1481 
1482 	if (error == 0) {
1483 		if (action == PR_ALLOW)
1484 			softc->flags &= ~DA_FLAG_PACK_LOCKED;
1485 		else
1486 			softc->flags |= DA_FLAG_PACK_LOCKED;
1487 	}
1488 
1489 	xpt_release_ccb(ccb);
1490 }
1491 
1492 static void
1493 dasetgeom(struct cam_periph *periph, struct scsi_read_capacity_data * rdcap)
1494 {
1495 	struct ccb_calc_geometry ccg;
1496 	struct da_softc *softc;
1497 	struct disk_params *dp;
1498 
1499 	softc = (struct da_softc *)periph->softc;
1500 
1501 	dp = &softc->params;
1502 	dp->secsize = scsi_4btoul(rdcap->length);
1503 	dp->sectors = scsi_4btoul(rdcap->addr) + 1;
1504 	/*
1505 	 * Have the controller provide us with a geometry
1506 	 * for this disk.  The only time the geometry
1507 	 * matters is when we boot and the controller
1508 	 * is the only one knowledgeable enough to come
1509 	 * up with something that will make this a bootable
1510 	 * device.
1511 	 */
1512 	xpt_setup_ccb(&ccg.ccb_h, periph->path, /*priority*/1);
1513 	ccg.ccb_h.func_code = XPT_CALC_GEOMETRY;
1514 	ccg.block_size = dp->secsize;
1515 	ccg.volume_size = dp->sectors;
1516 	ccg.heads = 0;
1517 	ccg.secs_per_track = 0;
1518 	ccg.cylinders = 0;
1519 	xpt_action((union ccb*)&ccg);
1520 	dp->heads = ccg.heads;
1521 	dp->secs_per_track = ccg.secs_per_track;
1522 	dp->cylinders = ccg.cylinders;
1523 }
1524 
1525 static void
1526 dasendorderedtag(void *arg)
1527 {
1528 	struct da_softc *softc;
1529 	int s;
1530 
1531 	for (softc = SLIST_FIRST(&softc_list);
1532 	     softc != NULL;
1533 	     softc = SLIST_NEXT(softc, links)) {
1534 		s = splsoftcam();
1535 		if ((softc->ordered_tag_count == 0)
1536 		 && ((softc->flags & DA_FLAG_WENT_IDLE) == 0)) {
1537 			softc->flags |= DA_FLAG_NEED_OTAG;
1538 		}
1539 		if (softc->device_stats.busy_count > 0)
1540 			softc->flags &= ~DA_FLAG_WENT_IDLE;
1541 
1542 		softc->ordered_tag_count = 0;
1543 		splx(s);
1544 	}
1545 	/* Queue us up again */
1546 	timeout(dasendorderedtag, NULL,
1547 		(DA_DEFAULT_TIMEOUT * hz) / DA_ORDEREDTAG_INTERVAL);
1548 }
1549 
1550 /*
1551  * Step through all DA peripheral drivers, and if the device is still open,
1552  * sync the disk cache to physical media.
1553  */
1554 static void
1555 dashutdown(int howto, void *arg)
1556 {
1557 	struct cam_periph *periph;
1558 	struct da_softc *softc;
1559 
1560 	for (periph = TAILQ_FIRST(&dadriver.units); periph != NULL;
1561 	     periph = TAILQ_NEXT(periph, unit_links)) {
1562 		union ccb ccb;
1563 		softc = (struct da_softc *)periph->softc;
1564 
1565 		/*
1566 		 * We only sync the cache if the drive is still open, and
1567 		 * if the drive is capable of it..
1568 		 */
1569 		if (((softc->flags & DA_FLAG_OPEN) == 0)
1570 		 || (softc->quirks & DA_Q_NO_SYNC_CACHE))
1571 			continue;
1572 
1573 		xpt_setup_ccb(&ccb.ccb_h, periph->path, /*priority*/1);
1574 
1575 		ccb.ccb_h.ccb_state = DA_CCB_DUMP;
1576 		scsi_synchronize_cache(&ccb.csio,
1577 				       /*retries*/1,
1578 				       /*cbfcnp*/dadone,
1579 				       MSG_SIMPLE_Q_TAG,
1580 				       /*begin_lba*/0, /* whole disk */
1581 				       /*lb_count*/0,
1582 				       SSD_FULL_SIZE,
1583 				       5 * 60 * 1000);
1584 
1585 		xpt_polled_action(&ccb);
1586 
1587 		if ((ccb.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
1588 			if (((ccb.ccb_h.status & CAM_STATUS_MASK) ==
1589 			     CAM_SCSI_STATUS_ERROR)
1590 			 && (ccb.csio.scsi_status == SCSI_STATUS_CHECK_COND)){
1591 				int error_code, sense_key, asc, ascq;
1592 
1593 				scsi_extract_sense(&ccb.csio.sense_data,
1594 						   &error_code, &sense_key,
1595 						   &asc, &ascq);
1596 
1597 				if (sense_key != SSD_KEY_ILLEGAL_REQUEST)
1598 					scsi_sense_print(&ccb.csio);
1599 			} else {
1600 				xpt_print_path(periph->path);
1601 				printf("Synchronize cache failed, status "
1602 				       "== 0x%x, scsi status == 0x%x\n",
1603 				       ccb.ccb_h.status, ccb.csio.scsi_status);
1604 			}
1605 		}
1606 
1607 		if ((ccb.ccb_h.status & CAM_DEV_QFRZN) != 0)
1608 			cam_release_devq(ccb.ccb_h.path,
1609 					 /*relsim_flags*/0,
1610 					 /*reduction*/0,
1611 					 /*timeout*/0,
1612 					 /*getcount_only*/0);
1613 
1614 	}
1615 }
1616