xref: /freebsd/sys/cam/scsi/scsi_da.c (revision 7f3dea244c40159a41ab22da77a434d7c5b5e85a)
1 /*
2  * Implementation of SCSI Direct Access Peripheral driver for CAM.
3  *
4  * Copyright (c) 1997 Justin T. Gibbs.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions, and the following disclaimer,
12  *    without modification, immediately at the beginning of the file.
13  * 2. The name of the author may not be used to endorse or promote products
14  *    derived from this software without specific prior written permission.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
20  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  *
28  *      $Id: scsi_da.c,v 1.34 1999/08/21 06:23:50 msmith Exp $
29  */
30 
31 #include "opt_hw_wdog.h"
32 
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/kernel.h>
36 #include <sys/buf.h>
37 #include <sys/devicestat.h>
38 #include <sys/dkbad.h>
39 #include <sys/disklabel.h>
40 #include <sys/diskslice.h>
41 #include <sys/eventhandler.h>
42 #include <sys/malloc.h>
43 #include <sys/conf.h>
44 #include <sys/cons.h>
45 
46 #include <machine/md_var.h>
47 
48 #include <vm/vm.h>
49 #include <vm/vm_prot.h>
50 #include <vm/pmap.h>
51 
52 #include <cam/cam.h>
53 #include <cam/cam_ccb.h>
54 #include <cam/cam_extend.h>
55 #include <cam/cam_periph.h>
56 #include <cam/cam_xpt_periph.h>
57 
58 #include <cam/scsi/scsi_message.h>
59 
60 typedef enum {
61 	DA_STATE_PROBE,
62 	DA_STATE_NORMAL
63 } da_state;
64 
65 typedef enum {
66 	DA_FLAG_PACK_INVALID	= 0x001,
67 	DA_FLAG_NEW_PACK	= 0x002,
68 	DA_FLAG_PACK_LOCKED	= 0x004,
69 	DA_FLAG_PACK_REMOVABLE	= 0x008,
70 	DA_FLAG_TAGGED_QUEUING	= 0x010,
71 	DA_FLAG_NEED_OTAG	= 0x020,
72 	DA_FLAG_WENT_IDLE	= 0x040,
73 	DA_FLAG_RETRY_UA	= 0x080,
74 	DA_FLAG_OPEN		= 0x100
75 } da_flags;
76 
77 typedef enum {
78 	DA_Q_NONE		= 0x00,
79 	DA_Q_NO_SYNC_CACHE	= 0x01,
80 	DA_Q_NO_6_BYTE		= 0x02
81 } da_quirks;
82 
83 typedef enum {
84 	DA_CCB_PROBE		= 0x01,
85 	DA_CCB_BUFFER_IO	= 0x02,
86 	DA_CCB_WAITING		= 0x03,
87 	DA_CCB_DUMP		= 0x04,
88 	DA_CCB_TYPE_MASK	= 0x0F,
89 	DA_CCB_RETRY_UA		= 0x10
90 } da_ccb_state;
91 
92 /* Offsets into our private area for storing information */
93 #define ccb_state	ppriv_field0
94 #define ccb_bp		ppriv_ptr1
95 
96 struct disk_params {
97 	u_int8_t  heads;
98 	u_int16_t cylinders;
99 	u_int8_t  secs_per_track;
100 	u_int32_t secsize;	/* Number of bytes/sector */
101 	u_int32_t sectors;	/* total number sectors */
102 };
103 
104 struct da_softc {
105 	struct	 buf_queue_head buf_queue;
106 	struct	 devstat device_stats;
107 	SLIST_ENTRY(da_softc) links;
108 	LIST_HEAD(, ccb_hdr) pending_ccbs;
109 	da_state state;
110 	da_flags flags;
111 	da_quirks quirks;
112 	int	 minimum_cmd_size;
113 	int	 ordered_tag_count;
114 	struct	 disk_params params;
115 	struct	 diskslices *dk_slices;	/* virtual drives */
116 	union	 ccb saved_ccb;
117 };
118 
119 struct da_quirk_entry {
120 	struct scsi_inquiry_pattern inq_pat;
121 	da_quirks quirks;
122 };
123 
124 static struct da_quirk_entry da_quirk_table[] =
125 {
126 	{
127 		/*
128 		 * This particular Fujitsu drive doesn't like the
129 		 * synchronize cache command.
130 		 * Reported by: Tom Jackson <toj@gorilla.net>
131 		 */
132 		{T_DIRECT, SIP_MEDIA_FIXED, "FUJITSU", "M2954*", "*"},
133 		/*quirks*/ DA_Q_NO_SYNC_CACHE
134 
135 	},
136 	{
137 		/*
138 		 * This drive doesn't like the synchronize cache command
139 		 * either.  Reported by: Matthew Jacob <mjacob@feral.com>
140 		 * in NetBSD PR kern/6027, August 24, 1998.
141 		 */
142 		{T_DIRECT, SIP_MEDIA_FIXED, "MICROP", "2217*", "*"},
143 		/*quirks*/ DA_Q_NO_SYNC_CACHE
144 	},
145 	{
146 		/*
147 		 * This drive doesn't like the synchronize cache command
148 		 * either.  Reported by: Hellmuth Michaelis (hm@kts.org)
149 		 * (PR 8882).
150 		 */
151 		{T_DIRECT, SIP_MEDIA_FIXED, "MICROP", "2112*", "*"},
152 		/*quirks*/ DA_Q_NO_SYNC_CACHE
153 	},
154 	{
155 		/*
156 		 * Doesn't like the synchronize cache command.
157 		 * Reported by: Blaz Zupan <blaz@gold.amis.net>
158 		 */
159 		{T_DIRECT, SIP_MEDIA_FIXED, "NEC", "D3847*", "*"},
160 		/*quirks*/ DA_Q_NO_SYNC_CACHE
161 	},
162 	{
163 		/*
164 		 * Doesn't work correctly with 6 byte reads/writes.
165 		 * Returns illegal request, and points to byte 9 of the
166 		 * 6-byte CDB.
167 		 * Reported by:  Adam McDougall <bsdx@spawnet.com>
168 		 */
169 		{T_DIRECT, SIP_MEDIA_FIXED, "QUANTUM", "VIKING 4*", "*"},
170 		/*quirks*/ DA_Q_NO_6_BYTE
171 	},
172 	{
173 		/*
174 		 * See above.
175 		 */
176 		{T_DIRECT, SIP_MEDIA_FIXED, "QUANTUM", "VIKING 2*", "*"},
177 		/*quirks*/ DA_Q_NO_6_BYTE
178 	}
179 };
180 
181 static	d_open_t	daopen;
182 static	d_close_t	daclose;
183 static	d_strategy_t	dastrategy;
184 static	d_ioctl_t	daioctl;
185 static	d_dump_t	dadump;
186 static	d_psize_t	dasize;
187 static	periph_init_t	dainit;
188 static	void		daasync(void *callback_arg, u_int32_t code,
189 				struct cam_path *path, void *arg);
190 static	periph_ctor_t	daregister;
191 static	periph_dtor_t	dacleanup;
192 static	periph_start_t	dastart;
193 static	periph_oninv_t	daoninvalidate;
194 static	void		dadone(struct cam_periph *periph,
195 			       union ccb *done_ccb);
196 static  int		daerror(union ccb *ccb, u_int32_t cam_flags,
197 				u_int32_t sense_flags);
198 static void		daprevent(struct cam_periph *periph, int action);
199 static void		dasetgeom(struct cam_periph *periph,
200 				  struct scsi_read_capacity_data * rdcap);
201 static timeout_t	dasendorderedtag;
202 static void		dashutdown(void *arg, int howto);
203 
204 #ifndef DA_DEFAULT_TIMEOUT
205 #define DA_DEFAULT_TIMEOUT 60	/* Timeout in seconds */
206 #endif
207 
208 /*
209  * DA_ORDEREDTAG_INTERVAL determines how often, relative
210  * to the default timeout, we check to see whether an ordered
211  * tagged transaction is appropriate to prevent simple tag
212  * starvation.  Since we'd like to ensure that there is at least
213  * 1/2 of the timeout length left for a starved transaction to
214  * complete after we've sent an ordered tag, we must poll at least
215  * four times in every timeout period.  This takes care of the worst
216  * case where a starved transaction starts during an interval that
217  * meets the requirement "don't send an ordered tag" test so it takes
218  * us two intervals to determine that a tag must be sent.
219  */
220 #ifndef DA_ORDEREDTAG_INTERVAL
221 #define DA_ORDEREDTAG_INTERVAL 4
222 #endif
223 
224 static struct periph_driver dadriver =
225 {
226 	dainit, "da",
227 	TAILQ_HEAD_INITIALIZER(dadriver.units), /* generation */ 0
228 };
229 
230 DATA_SET(periphdriver_set, dadriver);
231 
232 #define DA_CDEV_MAJOR 13
233 #define DA_BDEV_MAJOR 4
234 
235 /* For 2.2-stable support */
236 #ifndef D_DISK
237 #define D_DISK 0
238 #endif
239 
240 static struct cdevsw da_cdevsw = {
241 	/* open */	daopen,
242 	/* close */	daclose,
243 	/* read */	physread,
244 	/* write */	physwrite,
245 	/* ioctl */	daioctl,
246 	/* stop */	nostop,
247 	/* reset */	noreset,
248 	/* devtotty */	nodevtotty,
249 	/* poll */	nopoll,
250 	/* mmap */	nommap,
251 	/* strategy */	dastrategy,
252 	/* name */	"da",
253 	/* parms */	noparms,
254 	/* maj */	DA_CDEV_MAJOR,
255 	/* dump */	dadump,
256 	/* psize */	dasize,
257 	/* flags */	D_DISK,
258 	/* maxio */	0,
259 	/* bmaj */	DA_BDEV_MAJOR
260 };
261 
262 static SLIST_HEAD(,da_softc) softc_list;
263 static struct extend_array *daperiphs;
264 
265 static int
266 daopen(dev_t dev, int flags, int fmt, struct proc *p)
267 {
268 	struct cam_periph *periph;
269 	struct da_softc *softc;
270 	struct disklabel label;
271 	int unit;
272 	int part;
273 	int error;
274 	int s;
275 
276 	unit = dkunit(dev);
277 	part = dkpart(dev);
278 	periph = cam_extend_get(daperiphs, unit);
279 	if (periph == NULL)
280 		return (ENXIO);
281 
282 	softc = (struct da_softc *)periph->softc;
283 
284 	CAM_DEBUG(periph->path, CAM_DEBUG_TRACE,
285 	    ("daopen: dev=%s (unit %d , partition %d)\n", devtoname(dev),
286 	     unit, part));
287 
288 	if ((error = cam_periph_lock(periph, PRIBIO|PCATCH)) != 0) {
289 		return (error); /* error code from tsleep */
290 	}
291 
292 	if ((softc->flags & DA_FLAG_OPEN) == 0) {
293 		if (cam_periph_acquire(periph) != CAM_REQ_CMP)
294 			return(ENXIO);
295 		softc->flags |= DA_FLAG_OPEN;
296 	}
297 
298 	s = splsoftcam();
299 	if ((softc->flags & DA_FLAG_PACK_INVALID) != 0) {
300 		/*
301 		 * If any partition is open, although the disk has
302 		 * been invalidated, disallow further opens.
303 		 */
304 		if (dsisopen(softc->dk_slices)) {
305 			splx(s);
306 			cam_periph_unlock(periph);
307 			return (ENXIO);
308 		}
309 
310 		/* Invalidate our pack information. */
311 		dsgone(&softc->dk_slices);
312 		softc->flags &= ~DA_FLAG_PACK_INVALID;
313 	}
314 	splx(s);
315 
316 	/* Do a read capacity */
317 	{
318 		struct scsi_read_capacity_data *rcap;
319 		union  ccb *ccb;
320 
321 		rcap = (struct scsi_read_capacity_data *)malloc(sizeof(*rcap),
322 								M_TEMP,
323 								M_WAITOK);
324 
325 		ccb = cam_periph_getccb(periph, /*priority*/1);
326 		scsi_read_capacity(&ccb->csio,
327 				   /*retries*/1,
328 				   /*cbfncp*/dadone,
329 				   MSG_SIMPLE_Q_TAG,
330 				   rcap,
331 				   SSD_FULL_SIZE,
332 				   /*timeout*/60000);
333 		ccb->ccb_h.ccb_bp = NULL;
334 
335 		error = cam_periph_runccb(ccb, daerror, /*cam_flags*/0,
336 					  /*sense_flags*/SF_RETRY_UA |
337 							 SF_RETRY_SELTO,
338 					  &softc->device_stats);
339 
340 		xpt_release_ccb(ccb);
341 
342 		if (error == 0) {
343 			dasetgeom(periph, rcap);
344 		}
345 
346 		free(rcap, M_TEMP);
347 	}
348 
349 	if (error == 0) {
350 		struct ccb_getdev cgd;
351 
352 		/* Build label for whole disk. */
353 		bzero(&label, sizeof(label));
354 		label.d_type = DTYPE_SCSI;
355 
356 		/*
357 		 * Grab the inquiry data to get the vendor and product names.
358 		 * Put them in the typename and packname for the label.
359 		 */
360 		xpt_setup_ccb(&cgd.ccb_h, periph->path, /*priority*/ 1);
361 		cgd.ccb_h.func_code = XPT_GDEV_TYPE;
362 		xpt_action((union ccb *)&cgd);
363 
364 		strncpy(label.d_typename, cgd.inq_data.vendor,
365 			min(SID_VENDOR_SIZE, sizeof(label.d_typename)));
366 		strncpy(label.d_packname, cgd.inq_data.product,
367 			min(SID_PRODUCT_SIZE, sizeof(label.d_packname)));
368 
369 		label.d_secsize = softc->params.secsize;
370 		label.d_nsectors = softc->params.secs_per_track;
371 		label.d_ntracks = softc->params.heads;
372 		label.d_ncylinders = softc->params.cylinders;
373 		label.d_secpercyl = softc->params.heads
374 				  * softc->params.secs_per_track;
375 		label.d_secperunit = softc->params.sectors;
376 
377 		if ((dsisopen(softc->dk_slices) == 0)
378 		    && ((softc->flags & DA_FLAG_PACK_REMOVABLE) != 0)) {
379 			daprevent(periph, PR_PREVENT);
380 		}
381 
382 		/* Initialize slice tables. */
383 		error = dsopen("da", dev, fmt, 0, &softc->dk_slices, &label);
384 
385 		/*
386 		 * Check to see whether or not the blocksize is set yet.
387 		 * If it isn't, set it and then clear the blocksize
388 		 * unavailable flag for the device statistics.
389 		 */
390 		if ((softc->device_stats.flags & DEVSTAT_BS_UNAVAILABLE) != 0){
391 			softc->device_stats.block_size = softc->params.secsize;
392 			softc->device_stats.flags &= ~DEVSTAT_BS_UNAVAILABLE;
393 		}
394 	}
395 
396 	if (error != 0) {
397 		if ((dsisopen(softc->dk_slices) == 0)
398 		 && ((softc->flags & DA_FLAG_PACK_REMOVABLE) != 0)) {
399 			daprevent(periph, PR_ALLOW);
400 		}
401 	}
402 	cam_periph_unlock(periph);
403 	return (error);
404 }
405 
406 static int
407 daclose(dev_t dev, int flag, int fmt, struct proc *p)
408 {
409 	struct	cam_periph *periph;
410 	struct	da_softc *softc;
411 	int	unit;
412 	int	error;
413 
414 	unit = dkunit(dev);
415 	periph = cam_extend_get(daperiphs, unit);
416 	if (periph == NULL)
417 		return (ENXIO);
418 
419 	softc = (struct da_softc *)periph->softc;
420 
421 	if ((error = cam_periph_lock(periph, PRIBIO)) != 0) {
422 		return (error); /* error code from tsleep */
423 	}
424 
425 	dsclose(dev, fmt, softc->dk_slices);
426 	if (dsisopen(softc->dk_slices)) {
427 		cam_periph_unlock(periph);
428 		return (0);
429 	}
430 
431 	if ((softc->quirks & DA_Q_NO_SYNC_CACHE) == 0) {
432 		union	ccb *ccb;
433 
434 		ccb = cam_periph_getccb(periph, /*priority*/1);
435 
436 		scsi_synchronize_cache(&ccb->csio,
437 				       /*retries*/1,
438 				       /*cbfcnp*/dadone,
439 				       MSG_SIMPLE_Q_TAG,
440 				       /*begin_lba*/0,/* Cover the whole disk */
441 				       /*lb_count*/0,
442 				       SSD_FULL_SIZE,
443 				       5 * 60 * 1000);
444 
445 		cam_periph_runccb(ccb, /*error_routine*/NULL, /*cam_flags*/0,
446 				  /*sense_flags*/SF_RETRY_UA,
447 				  &softc->device_stats);
448 
449 		if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
450 			if ((ccb->ccb_h.status & CAM_STATUS_MASK) ==
451 			     CAM_SCSI_STATUS_ERROR) {
452 				int asc, ascq;
453 				int sense_key, error_code;
454 
455 				scsi_extract_sense(&ccb->csio.sense_data,
456 						   &error_code,
457 						   &sense_key,
458 						   &asc, &ascq);
459 				if (sense_key != SSD_KEY_ILLEGAL_REQUEST)
460 					scsi_sense_print(&ccb->csio);
461 			} else {
462 				xpt_print_path(periph->path);
463 				printf("Synchronize cache failed, status "
464 				       "== 0x%x, scsi status == 0x%x\n",
465 				       ccb->csio.ccb_h.status,
466 				       ccb->csio.scsi_status);
467 			}
468 		}
469 
470 		if ((ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
471 			cam_release_devq(ccb->ccb_h.path,
472 					 /*relsim_flags*/0,
473 					 /*reduction*/0,
474 					 /*timeout*/0,
475 					 /*getcount_only*/0);
476 
477 		xpt_release_ccb(ccb);
478 
479 	}
480 
481 	if ((softc->flags & DA_FLAG_PACK_REMOVABLE) != 0) {
482 		daprevent(periph, PR_ALLOW);
483 		/*
484 		 * If we've got removeable media, mark the blocksize as
485 		 * unavailable, since it could change when new media is
486 		 * inserted.
487 		 */
488 		softc->device_stats.flags |= DEVSTAT_BS_UNAVAILABLE;
489 	}
490 
491 	softc->flags &= ~DA_FLAG_OPEN;
492 	cam_periph_unlock(periph);
493 	cam_periph_release(periph);
494 	return (0);
495 }
496 
497 /*
498  * Actually translate the requested transfer into one the physical driver
499  * can understand.  The transfer is described by a buf and will include
500  * only one physical transfer.
501  */
502 static void
503 dastrategy(struct buf *bp)
504 {
505 	struct cam_periph *periph;
506 	struct da_softc *softc;
507 	u_int  unit;
508 	u_int  part;
509 	int    s;
510 
511 	unit = dkunit(bp->b_dev);
512 	part = dkpart(bp->b_dev);
513 	periph = cam_extend_get(daperiphs, unit);
514 	if (periph == NULL) {
515 		bp->b_error = ENXIO;
516 		goto bad;
517 	}
518 	softc = (struct da_softc *)periph->softc;
519 #if 0
520 	/*
521 	 * check it's not too big a transfer for our adapter
522 	 */
523 	scsi_minphys(bp,&sd_switch);
524 #endif
525 
526 	/*
527 	 * Do bounds checking, adjust transfer, set b_cylin and b_pbklno.
528 	 */
529 	if (dscheck(bp, softc->dk_slices) <= 0)
530 		goto done;
531 
532 	/*
533 	 * Mask interrupts so that the pack cannot be invalidated until
534 	 * after we are in the queue.  Otherwise, we might not properly
535 	 * clean up one of the buffers.
536 	 */
537 	s = splbio();
538 
539 	/*
540 	 * If the device has been made invalid, error out
541 	 */
542 	if ((softc->flags & DA_FLAG_PACK_INVALID)) {
543 		splx(s);
544 		bp->b_error = ENXIO;
545 		goto bad;
546 	}
547 
548 	/*
549 	 * Place it in the queue of disk activities for this disk
550 	 */
551 	bufqdisksort(&softc->buf_queue, bp);
552 
553 	splx(s);
554 
555 	/*
556 	 * Schedule ourselves for performing the work.
557 	 */
558 	xpt_schedule(periph, /* XXX priority */1);
559 
560 	return;
561 bad:
562 	bp->b_flags |= B_ERROR;
563 done:
564 
565 	/*
566 	 * Correctly set the buf to indicate a completed xfer
567 	 */
568 	bp->b_resid = bp->b_bcount;
569 	biodone(bp);
570 	return;
571 }
572 
573 /* For 2.2-stable support */
574 #ifndef ENOIOCTL
575 #define ENOIOCTL -1
576 #endif
577 
578 static int
579 daioctl(dev_t dev, u_long cmd, caddr_t addr, int flag, struct proc *p)
580 {
581 	struct cam_periph *periph;
582 	struct da_softc *softc;
583 	int unit;
584 	int error;
585 
586 	unit = dkunit(dev);
587 	periph = cam_extend_get(daperiphs, unit);
588 	if (periph == NULL)
589 		return (ENXIO);
590 
591 	softc = (struct da_softc *)periph->softc;
592 
593 	CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("daioctl\n"));
594 
595 	if (cmd == DIOCSBAD)
596 		return (EINVAL);	/* XXX */
597 
598 	if ((error = cam_periph_lock(periph, PRIBIO|PCATCH)) != 0) {
599 		return (error); /* error code from tsleep */
600 	}
601 
602 	error = dsioctl("da", dev, cmd, addr, flag, &softc->dk_slices);
603 
604 	if (error == ENOIOCTL)
605 		error = cam_periph_ioctl(periph, cmd, addr, daerror);
606 
607 	cam_periph_unlock(periph);
608 
609 	return (error);
610 }
611 
612 static int
613 dadump(dev_t dev)
614 {
615 	struct	    cam_periph *periph;
616 	struct	    da_softc *softc;
617 	struct	    disklabel *lp;
618 	u_int	    unit;
619 	u_int	    part;
620 	long	    num;	/* number of sectors to write */
621 	long	    blkoff;
622 	long	    blknum;
623 	long	    blkcnt;
624 	vm_offset_t addr;
625 	static	int dadoingadump = 0;
626 	struct	    ccb_scsiio csio;
627 
628 	/* toss any characters present prior to dump */
629 	while (cncheckc() != -1)
630 		;
631 
632 	unit = dkunit(dev);
633 	part = dkpart(dev);
634 	periph = cam_extend_get(daperiphs, unit);
635 	if (periph == NULL) {
636 		return (ENXIO);
637 	}
638 	softc = (struct da_softc *)periph->softc;
639 
640 	if ((softc->flags & DA_FLAG_PACK_INVALID) != 0
641 	 || (softc->dk_slices == NULL)
642 	 || (lp = dsgetlabel(dev, softc->dk_slices)) == NULL)
643 		return (ENXIO);
644 
645 	/* Size of memory to dump, in disk sectors. */
646 	/* XXX Fix up for non DEV_BSIZE sectors!!! */
647 	num = (u_long)Maxmem * PAGE_SIZE / softc->params.secsize;
648 
649 	blkoff = lp->d_partitions[part].p_offset;
650 	blkoff += softc->dk_slices->dss_slices[dkslice(dev)].ds_offset;
651 
652 	/* check transfer bounds against partition size */
653 	if ((dumplo < 0) || ((dumplo + num) > lp->d_partitions[part].p_size))
654 		return (EINVAL);
655 
656 	if (dadoingadump != 0)
657 		return (EFAULT);
658 
659 	dadoingadump = 1;
660 
661 	blknum = dumplo + blkoff;
662 	blkcnt = PAGE_SIZE / softc->params.secsize;
663 
664 	addr = 0;	/* starting address */
665 
666 	while (num > 0) {
667 
668 		if (is_physical_memory(addr)) {
669 			pmap_enter(kernel_pmap, (vm_offset_t)CADDR1,
670 				   trunc_page(addr), VM_PROT_READ, TRUE);
671 		} else {
672 			pmap_enter(kernel_pmap, (vm_offset_t)CADDR1,
673 				   trunc_page(0), VM_PROT_READ, TRUE);
674 		}
675 
676 		xpt_setup_ccb(&csio.ccb_h, periph->path, /*priority*/1);
677 		csio.ccb_h.ccb_state = DA_CCB_DUMP;
678 		scsi_read_write(&csio,
679 				/*retries*/1,
680 				dadone,
681 				MSG_ORDERED_Q_TAG,
682 				/*read*/FALSE,
683 				/*byte2*/0,
684 				/*minimum_cmd_size*/ softc->minimum_cmd_size,
685 				blknum,
686 				blkcnt,
687 				/*data_ptr*/CADDR1,
688 				/*dxfer_len*/blkcnt * softc->params.secsize,
689 				/*sense_len*/SSD_FULL_SIZE,
690 				DA_DEFAULT_TIMEOUT * 1000);
691 		xpt_polled_action((union ccb *)&csio);
692 
693 		if ((csio.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
694 			printf("Aborting dump due to I/O error.\n");
695 			if ((csio.ccb_h.status & CAM_STATUS_MASK) ==
696 			     CAM_SCSI_STATUS_ERROR)
697 				scsi_sense_print(&csio);
698 			else
699 				printf("status == 0x%x, scsi status == 0x%x\n",
700 				       csio.ccb_h.status, csio.scsi_status);
701 			return(EIO);
702 		}
703 
704 		if (addr % (1024 * 1024) == 0) {
705 #ifdef	HW_WDOG
706 			if (wdog_tickler)
707 				(*wdog_tickler)();
708 #endif /* HW_WDOG */
709 			/* Count in MB of data left to write */
710 			printf("%ld ", (num  * softc->params.secsize)
711 				     / (1024 * 1024));
712 		}
713 
714 		/* update block count */
715 		num -= blkcnt;
716 		blknum += blkcnt;
717 		addr += blkcnt * softc->params.secsize;
718 
719 		/* operator aborting dump? */
720 		if (cncheckc() != -1)
721 			return (EINTR);
722 	}
723 
724 	/*
725 	 * Sync the disk cache contents to the physical media.
726 	 */
727 	if ((softc->quirks & DA_Q_NO_SYNC_CACHE) == 0) {
728 
729 		xpt_setup_ccb(&csio.ccb_h, periph->path, /*priority*/1);
730 		csio.ccb_h.ccb_state = DA_CCB_DUMP;
731 		scsi_synchronize_cache(&csio,
732 				       /*retries*/1,
733 				       /*cbfcnp*/dadone,
734 				       MSG_SIMPLE_Q_TAG,
735 				       /*begin_lba*/0,/* Cover the whole disk */
736 				       /*lb_count*/0,
737 				       SSD_FULL_SIZE,
738 				       5 * 60 * 1000);
739 		xpt_polled_action((union ccb *)&csio);
740 
741 		if ((csio.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
742 			if ((csio.ccb_h.status & CAM_STATUS_MASK) ==
743 			     CAM_SCSI_STATUS_ERROR) {
744 				int asc, ascq;
745 				int sense_key, error_code;
746 
747 				scsi_extract_sense(&csio.sense_data,
748 						   &error_code,
749 						   &sense_key,
750 						   &asc, &ascq);
751 				if (sense_key != SSD_KEY_ILLEGAL_REQUEST)
752 					scsi_sense_print(&csio);
753 			} else {
754 				xpt_print_path(periph->path);
755 				printf("Synchronize cache failed, status "
756 				       "== 0x%x, scsi status == 0x%x\n",
757 				       csio.ccb_h.status, csio.scsi_status);
758 			}
759 		}
760 	}
761 	return (0);
762 }
763 
764 static int
765 dasize(dev_t dev)
766 {
767 	struct cam_periph *periph;
768 	struct da_softc *softc;
769 
770 	periph = cam_extend_get(daperiphs, dkunit(dev));
771 	if (periph == NULL)
772 		return (ENXIO);
773 
774 	softc = (struct da_softc *)periph->softc;
775 
776 	return (dssize(dev, &softc->dk_slices));
777 }
778 
779 static void
780 dainit(void)
781 {
782 	cam_status status;
783 	struct cam_path *path;
784 
785 	/*
786 	 * Create our extend array for storing the devices we attach to.
787 	 */
788 	daperiphs = cam_extend_new();
789 	SLIST_INIT(&softc_list);
790 	if (daperiphs == NULL) {
791 		printf("da: Failed to alloc extend array!\n");
792 		return;
793 	}
794 
795 	/*
796 	 * Install a global async callback.  This callback will
797 	 * receive async callbacks like "new device found".
798 	 */
799 	status = xpt_create_path(&path, /*periph*/NULL, CAM_XPT_PATH_ID,
800 				 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
801 
802 	if (status == CAM_REQ_CMP) {
803 		struct ccb_setasync csa;
804 
805                 xpt_setup_ccb(&csa.ccb_h, path, /*priority*/5);
806                 csa.ccb_h.func_code = XPT_SASYNC_CB;
807                 csa.event_enable = AC_FOUND_DEVICE;
808                 csa.callback = daasync;
809                 csa.callback_arg = NULL;
810                 xpt_action((union ccb *)&csa);
811 		status = csa.ccb_h.status;
812                 xpt_free_path(path);
813         }
814 
815 	if (status != CAM_REQ_CMP) {
816 		printf("da: Failed to attach master async callback "
817 		       "due to status 0x%x!\n", status);
818 	} else {
819 
820 		/* If we were successfull, register our devsw */
821 		cdevsw_add(&da_cdevsw);
822 
823 		/*
824 		 * Schedule a periodic event to occasioanly send an
825 		 * ordered tag to a device.
826 		 */
827 		timeout(dasendorderedtag, NULL,
828 			(DA_DEFAULT_TIMEOUT * hz) / DA_ORDEREDTAG_INTERVAL);
829 
830 		/* Register our shutdown event handler */
831 		if ((EVENTHANDLER_REGISTER(shutdown_post_sync, dashutdown,
832 					   NULL, SHUTDOWN_PRI_DEFAULT)) == NULL)
833 		    printf("dainit: shutdown event registration failed!\n");
834 	}
835 }
836 
837 static void
838 daoninvalidate(struct cam_periph *periph)
839 {
840 	int s;
841 	struct da_softc *softc;
842 	struct buf *q_bp;
843 	struct ccb_setasync csa;
844 
845 	softc = (struct da_softc *)periph->softc;
846 
847 	/*
848 	 * De-register any async callbacks.
849 	 */
850 	xpt_setup_ccb(&csa.ccb_h, periph->path,
851 		      /* priority */ 5);
852 	csa.ccb_h.func_code = XPT_SASYNC_CB;
853 	csa.event_enable = 0;
854 	csa.callback = daasync;
855 	csa.callback_arg = periph;
856 	xpt_action((union ccb *)&csa);
857 
858 	softc->flags |= DA_FLAG_PACK_INVALID;
859 
860 	/*
861 	 * Although the oninvalidate() routines are always called at
862 	 * splsoftcam, we need to be at splbio() here to keep the buffer
863 	 * queue from being modified while we traverse it.
864 	 */
865 	s = splbio();
866 
867 	/*
868 	 * Return all queued I/O with ENXIO.
869 	 * XXX Handle any transactions queued to the card
870 	 *     with XPT_ABORT_CCB.
871 	 */
872 	while ((q_bp = bufq_first(&softc->buf_queue)) != NULL){
873 		bufq_remove(&softc->buf_queue, q_bp);
874 		q_bp->b_resid = q_bp->b_bcount;
875 		q_bp->b_error = ENXIO;
876 		q_bp->b_flags |= B_ERROR;
877 		biodone(q_bp);
878 	}
879 	splx(s);
880 
881 	SLIST_REMOVE(&softc_list, softc, da_softc, links);
882 
883 	xpt_print_path(periph->path);
884 	printf("lost device\n");
885 }
886 
887 static void
888 dacleanup(struct cam_periph *periph)
889 {
890 	struct da_softc *softc;
891 
892 	softc = (struct da_softc *)periph->softc;
893 
894 	devstat_remove_entry(&softc->device_stats);
895 	cam_extend_release(daperiphs, periph->unit_number);
896 	xpt_print_path(periph->path);
897 	printf("removing device entry\n");
898 	free(softc, M_DEVBUF);
899 }
900 
901 static void
902 daasync(void *callback_arg, u_int32_t code,
903 	struct cam_path *path, void *arg)
904 {
905 	struct cam_periph *periph;
906 
907 	periph = (struct cam_periph *)callback_arg;
908 	switch (code) {
909 	case AC_FOUND_DEVICE:
910 	{
911 		struct ccb_getdev *cgd;
912 		cam_status status;
913 
914 		cgd = (struct ccb_getdev *)arg;
915 
916 		if ((cgd->pd_type != T_DIRECT) && (cgd->pd_type != T_OPTICAL))
917 			break;
918 
919 		/*
920 		 * Allocate a peripheral instance for
921 		 * this device and start the probe
922 		 * process.
923 		 */
924 		status = cam_periph_alloc(daregister, daoninvalidate,
925 					  dacleanup, dastart,
926 					  "da", CAM_PERIPH_BIO,
927 					  cgd->ccb_h.path, daasync,
928 					  AC_FOUND_DEVICE, cgd);
929 
930 		if (status != CAM_REQ_CMP
931 		 && status != CAM_REQ_INPROG)
932 			printf("daasync: Unable to attach to new device "
933 				"due to status 0x%x\n", status);
934 		break;
935 	}
936 	case AC_SENT_BDR:
937 	case AC_BUS_RESET:
938 	{
939 		struct da_softc *softc;
940 		struct ccb_hdr *ccbh;
941 		int s;
942 
943 		softc = (struct da_softc *)periph->softc;
944 		s = splsoftcam();
945 		/*
946 		 * Don't fail on the expected unit attention
947 		 * that will occur.
948 		 */
949 		softc->flags |= DA_FLAG_RETRY_UA;
950 		for (ccbh = LIST_FIRST(&softc->pending_ccbs);
951 		     ccbh != NULL; ccbh = LIST_NEXT(ccbh, periph_links.le))
952 			ccbh->ccb_state |= DA_CCB_RETRY_UA;
953 		splx(s);
954 		/* FALLTHROUGH*/
955 	}
956 	default:
957 		cam_periph_async(periph, code, path, arg);
958 		break;
959 	}
960 }
961 
962 static cam_status
963 daregister(struct cam_periph *periph, void *arg)
964 {
965 	int s;
966 	struct da_softc *softc;
967 	struct ccb_setasync csa;
968 	struct ccb_getdev *cgd;
969 	caddr_t match;
970 
971 	cgd = (struct ccb_getdev *)arg;
972 	if (periph == NULL) {
973 		printf("daregister: periph was NULL!!\n");
974 		return(CAM_REQ_CMP_ERR);
975 	}
976 
977 	if (cgd == NULL) {
978 		printf("daregister: no getdev CCB, can't register device\n");
979 		return(CAM_REQ_CMP_ERR);
980 	}
981 
982 	softc = (struct da_softc *)malloc(sizeof(*softc),M_DEVBUF,M_NOWAIT);
983 
984 	if (softc == NULL) {
985 		printf("daregister: Unable to probe new device. "
986 		       "Unable to allocate softc\n");
987 		return(CAM_REQ_CMP_ERR);
988 	}
989 
990 	bzero(softc, sizeof(*softc));
991 	LIST_INIT(&softc->pending_ccbs);
992 	softc->state = DA_STATE_PROBE;
993 	bufq_init(&softc->buf_queue);
994 	if (SID_IS_REMOVABLE(&cgd->inq_data))
995 		softc->flags |= DA_FLAG_PACK_REMOVABLE;
996 	if ((cgd->inq_data.flags & SID_CmdQue) != 0)
997 		softc->flags |= DA_FLAG_TAGGED_QUEUING;
998 
999 	periph->softc = softc;
1000 
1001 	cam_extend_set(daperiphs, periph->unit_number, periph);
1002 
1003 	/*
1004 	 * See if this device has any quirks.
1005 	 */
1006 	match = cam_quirkmatch((caddr_t)&cgd->inq_data,
1007 			       (caddr_t)da_quirk_table,
1008 			       sizeof(da_quirk_table)/sizeof(*da_quirk_table),
1009 			       sizeof(*da_quirk_table), scsi_inquiry_match);
1010 
1011 	if (match != NULL)
1012 		softc->quirks = ((struct da_quirk_entry *)match)->quirks;
1013 	else
1014 		softc->quirks = DA_Q_NONE;
1015 
1016 	if (softc->quirks & DA_Q_NO_6_BYTE)
1017 		softc->minimum_cmd_size = 10;
1018 	else
1019 		softc->minimum_cmd_size = 6;
1020 
1021 	/*
1022 	 * Block our timeout handler while we
1023 	 * add this softc to the dev list.
1024 	 */
1025 	s = splsoftclock();
1026 	SLIST_INSERT_HEAD(&softc_list, softc, links);
1027 	splx(s);
1028 
1029 	/*
1030 	 * The DA driver supports a blocksize, but
1031 	 * we don't know the blocksize until we do
1032 	 * a read capacity.  So, set a flag to
1033 	 * indicate that the blocksize is
1034 	 * unavailable right now.  We'll clear the
1035 	 * flag as soon as we've done a read capacity.
1036 	 */
1037 	devstat_add_entry(&softc->device_stats, "da",
1038 			  periph->unit_number, 0,
1039 	  		  DEVSTAT_BS_UNAVAILABLE,
1040 			  cgd->pd_type | DEVSTAT_TYPE_IF_SCSI,
1041 			  DEVSTAT_PRIORITY_DA);
1042 
1043 	/*
1044 	 * Add async callbacks for bus reset and
1045 	 * bus device reset calls.  I don't bother
1046 	 * checking if this fails as, in most cases,
1047 	 * the system will function just fine without
1048 	 * them and the only alternative would be to
1049 	 * not attach the device on failure.
1050 	 */
1051 	xpt_setup_ccb(&csa.ccb_h, periph->path, /*priority*/5);
1052 	csa.ccb_h.func_code = XPT_SASYNC_CB;
1053 	csa.event_enable = AC_SENT_BDR | AC_BUS_RESET | AC_LOST_DEVICE;
1054 	csa.callback = daasync;
1055 	csa.callback_arg = periph;
1056 	xpt_action((union ccb *)&csa);
1057 	/*
1058 	 * Lock this peripheral until we are setup.
1059 	 * This first call can't block
1060 	 */
1061 	(void)cam_periph_lock(periph, PRIBIO);
1062 	xpt_schedule(periph, /*priority*/5);
1063 
1064 	return(CAM_REQ_CMP);
1065 }
1066 
1067 static void
1068 dastart(struct cam_periph *periph, union ccb *start_ccb)
1069 {
1070 	struct da_softc *softc;
1071 
1072 	softc = (struct da_softc *)periph->softc;
1073 
1074 
1075 	switch (softc->state) {
1076 	case DA_STATE_NORMAL:
1077 	{
1078 		/* Pull a buffer from the queue and get going on it */
1079 		struct buf *bp;
1080 		int s;
1081 
1082 		/*
1083 		 * See if there is a buf with work for us to do..
1084 		 */
1085 		s = splbio();
1086 		bp = bufq_first(&softc->buf_queue);
1087 		if (periph->immediate_priority <= periph->pinfo.priority) {
1088 			CAM_DEBUG_PRINT(CAM_DEBUG_SUBTRACE,
1089 					("queuing for immediate ccb\n"));
1090 			start_ccb->ccb_h.ccb_state = DA_CCB_WAITING;
1091 			SLIST_INSERT_HEAD(&periph->ccb_list, &start_ccb->ccb_h,
1092 					  periph_links.sle);
1093 			periph->immediate_priority = CAM_PRIORITY_NONE;
1094 			splx(s);
1095 			wakeup(&periph->ccb_list);
1096 		} else if (bp == NULL) {
1097 			splx(s);
1098 			xpt_release_ccb(start_ccb);
1099 		} else {
1100 			int oldspl;
1101 			u_int8_t tag_code;
1102 
1103 			bufq_remove(&softc->buf_queue, bp);
1104 
1105 			devstat_start_transaction(&softc->device_stats);
1106 
1107 			if ((bp->b_flags & B_ORDERED) != 0
1108 			 || (softc->flags & DA_FLAG_NEED_OTAG) != 0) {
1109 				softc->flags &= ~DA_FLAG_NEED_OTAG;
1110 				softc->ordered_tag_count++;
1111 				tag_code = MSG_ORDERED_Q_TAG;
1112 			} else {
1113 				tag_code = MSG_SIMPLE_Q_TAG;
1114 			}
1115 			scsi_read_write(&start_ccb->csio,
1116 					/*retries*/4,
1117 					dadone,
1118 					tag_code,
1119 					bp->b_flags & B_READ,
1120 					/*byte2*/0,
1121 					softc->minimum_cmd_size,
1122 					bp->b_pblkno,
1123 					bp->b_bcount / softc->params.secsize,
1124 					bp->b_data,
1125 					bp->b_bcount,
1126 					/*sense_len*/SSD_FULL_SIZE,
1127 					DA_DEFAULT_TIMEOUT * 1000);
1128 			start_ccb->ccb_h.ccb_state = DA_CCB_BUFFER_IO;
1129 
1130 			/*
1131 			 * Block out any asyncronous callbacks
1132 			 * while we touch the pending ccb list.
1133 			 */
1134 			oldspl = splcam();
1135 			LIST_INSERT_HEAD(&softc->pending_ccbs,
1136 					 &start_ccb->ccb_h, periph_links.le);
1137 			splx(oldspl);
1138 
1139 			/* We expect a unit attention from this device */
1140 			if ((softc->flags & DA_FLAG_RETRY_UA) != 0) {
1141 				start_ccb->ccb_h.ccb_state |= DA_CCB_RETRY_UA;
1142 				softc->flags &= ~DA_FLAG_RETRY_UA;
1143 			}
1144 
1145 			start_ccb->ccb_h.ccb_bp = bp;
1146 			bp = bufq_first(&softc->buf_queue);
1147 			splx(s);
1148 
1149 			xpt_action(start_ccb);
1150 		}
1151 
1152 		if (bp != NULL) {
1153 			/* Have more work to do, so ensure we stay scheduled */
1154 			xpt_schedule(periph, /* XXX priority */1);
1155 		}
1156 		break;
1157 	}
1158 	case DA_STATE_PROBE:
1159 	{
1160 		struct ccb_scsiio *csio;
1161 		struct scsi_read_capacity_data *rcap;
1162 
1163 		rcap = (struct scsi_read_capacity_data *)malloc(sizeof(*rcap),
1164 								M_TEMP,
1165 								M_NOWAIT);
1166 		if (rcap == NULL) {
1167 			printf("dastart: Couldn't malloc read_capacity data\n");
1168 			/* da_free_periph??? */
1169 			break;
1170 		}
1171 		csio = &start_ccb->csio;
1172 		scsi_read_capacity(csio,
1173 				   /*retries*/4,
1174 				   dadone,
1175 				   MSG_SIMPLE_Q_TAG,
1176 				   rcap,
1177 				   SSD_FULL_SIZE,
1178 				   /*timeout*/5000);
1179 		start_ccb->ccb_h.ccb_bp = NULL;
1180 		start_ccb->ccb_h.ccb_state = DA_CCB_PROBE;
1181 		xpt_action(start_ccb);
1182 		break;
1183 	}
1184 	}
1185 }
1186 
1187 
1188 static void
1189 dadone(struct cam_periph *periph, union ccb *done_ccb)
1190 {
1191 	struct da_softc *softc;
1192 	struct ccb_scsiio *csio;
1193 
1194 	softc = (struct da_softc *)periph->softc;
1195 	csio = &done_ccb->csio;
1196 	switch (csio->ccb_h.ccb_state & DA_CCB_TYPE_MASK) {
1197 	case DA_CCB_BUFFER_IO:
1198 	{
1199 		struct buf *bp;
1200 		int    oldspl;
1201 
1202 		bp = (struct buf *)done_ccb->ccb_h.ccb_bp;
1203 		if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
1204 			int error;
1205 			int s;
1206 			int sf;
1207 
1208 			if ((csio->ccb_h.ccb_state & DA_CCB_RETRY_UA) != 0)
1209 				sf = SF_RETRY_UA;
1210 			else
1211 				sf = 0;
1212 
1213 			/* Retry selection timeouts */
1214 			sf |= SF_RETRY_SELTO;
1215 
1216 			if ((error = daerror(done_ccb, 0, sf)) == ERESTART) {
1217 				/*
1218 				 * A retry was scheuled, so
1219 				 * just return.
1220 				 */
1221 				return;
1222 			}
1223 			if (error != 0) {
1224 				struct buf *q_bp;
1225 
1226 				s = splbio();
1227 
1228 				if (error == ENXIO) {
1229 					/*
1230 					 * Catastrophic error.  Mark our pack as
1231 					 * invalid.
1232 					 */
1233 					/* XXX See if this is really a media
1234 					 *     change first.
1235 					 */
1236 					xpt_print_path(periph->path);
1237 					printf("Invalidating pack\n");
1238 					softc->flags |= DA_FLAG_PACK_INVALID;
1239 				}
1240 
1241 				/*
1242 				 * return all queued I/O with EIO, so that
1243 				 * the client can retry these I/Os in the
1244 				 * proper order should it attempt to recover.
1245 				 */
1246 				while ((q_bp = bufq_first(&softc->buf_queue))
1247 					!= NULL) {
1248 					bufq_remove(&softc->buf_queue, q_bp);
1249 					q_bp->b_resid = q_bp->b_bcount;
1250 					q_bp->b_error = EIO;
1251 					q_bp->b_flags |= B_ERROR;
1252 					biodone(q_bp);
1253 				}
1254 				splx(s);
1255 				bp->b_error = error;
1256 				bp->b_resid = bp->b_bcount;
1257 				bp->b_flags |= B_ERROR;
1258 			} else {
1259 				bp->b_resid = csio->resid;
1260 				bp->b_error = 0;
1261 				if (bp->b_resid != 0) {
1262 					/* Short transfer ??? */
1263 					bp->b_flags |= B_ERROR;
1264 				}
1265 			}
1266 			if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
1267 				cam_release_devq(done_ccb->ccb_h.path,
1268 						 /*relsim_flags*/0,
1269 						 /*reduction*/0,
1270 						 /*timeout*/0,
1271 						 /*getcount_only*/0);
1272 		} else {
1273 			bp->b_resid = csio->resid;
1274 			if (csio->resid > 0)
1275 				bp->b_flags |= B_ERROR;
1276 		}
1277 
1278 		/*
1279 		 * Block out any asyncronous callbacks
1280 		 * while we touch the pending ccb list.
1281 		 */
1282 		oldspl = splcam();
1283 		LIST_REMOVE(&done_ccb->ccb_h, periph_links.le);
1284 		splx(oldspl);
1285 
1286 		devstat_end_transaction(&softc->device_stats,
1287 					bp->b_bcount - bp->b_resid,
1288 					done_ccb->csio.tag_action & 0xf,
1289 					(bp->b_flags & B_READ) ? DEVSTAT_READ
1290 							       : DEVSTAT_WRITE);
1291 
1292 		if (softc->device_stats.busy_count == 0)
1293 			softc->flags |= DA_FLAG_WENT_IDLE;
1294 
1295 		biodone(bp);
1296 		break;
1297 	}
1298 	case DA_CCB_PROBE:
1299 	{
1300 		struct	   scsi_read_capacity_data *rdcap;
1301 		char	   announce_buf[80];
1302 
1303 		rdcap = (struct scsi_read_capacity_data *)csio->data_ptr;
1304 
1305 		if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
1306 			struct disk_params *dp;
1307 
1308 			dasetgeom(periph, rdcap);
1309 			dp = &softc->params;
1310 			snprintf(announce_buf, sizeof(announce_buf),
1311 			        "%luMB (%u %u byte sectors: %dH %dS/T %dC)",
1312 				(unsigned long) (((u_int64_t)dp->secsize *
1313 				dp->sectors) / (1024*1024)), dp->sectors,
1314 				dp->secsize, dp->heads, dp->secs_per_track,
1315 				dp->cylinders);
1316 		} else {
1317 			int	error;
1318 
1319 			announce_buf[0] = '\0';
1320 
1321 			/*
1322 			 * Retry any UNIT ATTENTION type errors.  They
1323 			 * are expected at boot.
1324 			 */
1325 			error = daerror(done_ccb, 0, SF_RETRY_UA |
1326 					SF_RETRY_SELTO | SF_NO_PRINT);
1327 			if (error == ERESTART) {
1328 				/*
1329 				 * A retry was scheuled, so
1330 				 * just return.
1331 				 */
1332 				return;
1333 			} else if (error != 0) {
1334 				struct scsi_sense_data *sense;
1335 				int asc, ascq;
1336 				int sense_key, error_code;
1337 				int have_sense;
1338 				cam_status status;
1339 				struct ccb_getdev cgd;
1340 
1341 				/* Don't wedge this device's queue */
1342 				cam_release_devq(done_ccb->ccb_h.path,
1343 						 /*relsim_flags*/0,
1344 						 /*reduction*/0,
1345 						 /*timeout*/0,
1346 						 /*getcount_only*/0);
1347 
1348 				status = done_ccb->ccb_h.status;
1349 
1350 				xpt_setup_ccb(&cgd.ccb_h,
1351 					      done_ccb->ccb_h.path,
1352 					      /* priority */ 1);
1353 				cgd.ccb_h.func_code = XPT_GDEV_TYPE;
1354 				xpt_action((union ccb *)&cgd);
1355 
1356 				if (((csio->ccb_h.flags & CAM_SENSE_PHYS) != 0)
1357 				 || ((csio->ccb_h.flags & CAM_SENSE_PTR) != 0)
1358 				 || ((status & CAM_AUTOSNS_VALID) == 0))
1359 					have_sense = FALSE;
1360 				else
1361 					have_sense = TRUE;
1362 
1363 				if (have_sense) {
1364 					sense = &csio->sense_data;
1365 					scsi_extract_sense(sense, &error_code,
1366 							   &sense_key,
1367 							   &asc, &ascq);
1368 				}
1369 				/*
1370 				 * Attach to anything that claims to be a
1371 				 * direct access or optical disk device,
1372 				 * as long as it doesn't return a "Logical
1373 				 * unit not supported" (0x25) error.
1374 				 */
1375 				if ((have_sense) && (asc != 0x25)
1376 				 && (error_code == SSD_CURRENT_ERROR))
1377 					snprintf(announce_buf,
1378 					    sizeof(announce_buf),
1379 						"Attempt to query device "
1380 						"size failed: %s, %s",
1381 						scsi_sense_key_text[sense_key],
1382 						scsi_sense_desc(asc,ascq,
1383 								&cgd.inq_data));
1384 				else {
1385 					if (have_sense)
1386 						scsi_sense_print(
1387 							&done_ccb->csio);
1388 					else {
1389 						xpt_print_path(periph->path);
1390 						printf("got CAM status %#x\n",
1391 						       done_ccb->ccb_h.status);
1392 					}
1393 
1394 					xpt_print_path(periph->path);
1395 					printf("fatal error, failed"
1396 					       " to attach to device\n");
1397 
1398 					/*
1399 					 * Free up resources.
1400 					 */
1401 					cam_periph_invalidate(periph);
1402 				}
1403 			}
1404 		}
1405 		free(rdcap, M_TEMP);
1406 		if (announce_buf[0] != '\0')
1407 			xpt_announce_periph(periph, announce_buf);
1408 		softc->state = DA_STATE_NORMAL;
1409 		/*
1410 		 * Since our peripheral may be invalidated by an error
1411 		 * above or an external event, we must release our CCB
1412 		 * before releasing the probe lock on the peripheral.
1413 		 * The peripheral will only go away once the last lock
1414 		 * is removed, and we need it around for the CCB release
1415 		 * operation.
1416 		 */
1417 		xpt_release_ccb(done_ccb);
1418 		cam_periph_unlock(periph);
1419 		return;
1420 	}
1421 	case DA_CCB_WAITING:
1422 	{
1423 		/* Caller will release the CCB */
1424 		wakeup(&done_ccb->ccb_h.cbfcnp);
1425 		return;
1426 	}
1427 	case DA_CCB_DUMP:
1428 		/* No-op.  We're polling */
1429 		return;
1430 	default:
1431 		break;
1432 	}
1433 	xpt_release_ccb(done_ccb);
1434 }
1435 
1436 static int
1437 daerror(union ccb *ccb, u_int32_t cam_flags, u_int32_t sense_flags)
1438 {
1439 	struct da_softc	  *softc;
1440 	struct cam_periph *periph;
1441 
1442 	periph = xpt_path_periph(ccb->ccb_h.path);
1443 	softc = (struct da_softc *)periph->softc;
1444 
1445 	/*
1446 	 * XXX
1447 	 * Until we have a better way of doing pack validation,
1448 	 * don't treat UAs as errors.
1449 	 */
1450 	sense_flags |= SF_RETRY_UA;
1451 	return(cam_periph_error(ccb, cam_flags, sense_flags,
1452 				&softc->saved_ccb));
1453 }
1454 
1455 static void
1456 daprevent(struct cam_periph *periph, int action)
1457 {
1458 	struct	da_softc *softc;
1459 	union	ccb *ccb;
1460 	int	error;
1461 
1462 	softc = (struct da_softc *)periph->softc;
1463 
1464 	if (((action == PR_ALLOW)
1465 	  && (softc->flags & DA_FLAG_PACK_LOCKED) == 0)
1466 	 || ((action == PR_PREVENT)
1467 	  && (softc->flags & DA_FLAG_PACK_LOCKED) != 0)) {
1468 		return;
1469 	}
1470 
1471 	ccb = cam_periph_getccb(periph, /*priority*/1);
1472 
1473 	scsi_prevent(&ccb->csio,
1474 		     /*retries*/1,
1475 		     /*cbcfp*/dadone,
1476 		     MSG_SIMPLE_Q_TAG,
1477 		     action,
1478 		     SSD_FULL_SIZE,
1479 		     5000);
1480 
1481 	error = cam_periph_runccb(ccb, /*error_routine*/NULL, /*cam_flags*/0,
1482 				  /*sense_flags*/0, &softc->device_stats);
1483 
1484 	if (error == 0) {
1485 		if (action == PR_ALLOW)
1486 			softc->flags &= ~DA_FLAG_PACK_LOCKED;
1487 		else
1488 			softc->flags |= DA_FLAG_PACK_LOCKED;
1489 	}
1490 
1491 	xpt_release_ccb(ccb);
1492 }
1493 
1494 static void
1495 dasetgeom(struct cam_periph *periph, struct scsi_read_capacity_data * rdcap)
1496 {
1497 	struct ccb_calc_geometry ccg;
1498 	struct da_softc *softc;
1499 	struct disk_params *dp;
1500 
1501 	softc = (struct da_softc *)periph->softc;
1502 
1503 	dp = &softc->params;
1504 	dp->secsize = scsi_4btoul(rdcap->length);
1505 	dp->sectors = scsi_4btoul(rdcap->addr) + 1;
1506 	/*
1507 	 * Have the controller provide us with a geometry
1508 	 * for this disk.  The only time the geometry
1509 	 * matters is when we boot and the controller
1510 	 * is the only one knowledgeable enough to come
1511 	 * up with something that will make this a bootable
1512 	 * device.
1513 	 */
1514 	xpt_setup_ccb(&ccg.ccb_h, periph->path, /*priority*/1);
1515 	ccg.ccb_h.func_code = XPT_CALC_GEOMETRY;
1516 	ccg.block_size = dp->secsize;
1517 	ccg.volume_size = dp->sectors;
1518 	ccg.heads = 0;
1519 	ccg.secs_per_track = 0;
1520 	ccg.cylinders = 0;
1521 	xpt_action((union ccb*)&ccg);
1522 	dp->heads = ccg.heads;
1523 	dp->secs_per_track = ccg.secs_per_track;
1524 	dp->cylinders = ccg.cylinders;
1525 }
1526 
1527 static void
1528 dasendorderedtag(void *arg)
1529 {
1530 	struct da_softc *softc;
1531 	int s;
1532 
1533 	for (softc = SLIST_FIRST(&softc_list);
1534 	     softc != NULL;
1535 	     softc = SLIST_NEXT(softc, links)) {
1536 		s = splsoftcam();
1537 		if ((softc->ordered_tag_count == 0)
1538 		 && ((softc->flags & DA_FLAG_WENT_IDLE) == 0)) {
1539 			softc->flags |= DA_FLAG_NEED_OTAG;
1540 		}
1541 		if (softc->device_stats.busy_count > 0)
1542 			softc->flags &= ~DA_FLAG_WENT_IDLE;
1543 
1544 		softc->ordered_tag_count = 0;
1545 		splx(s);
1546 	}
1547 	/* Queue us up again */
1548 	timeout(dasendorderedtag, NULL,
1549 		(DA_DEFAULT_TIMEOUT * hz) / DA_ORDEREDTAG_INTERVAL);
1550 }
1551 
1552 /*
1553  * Step through all DA peripheral drivers, and if the device is still open,
1554  * sync the disk cache to physical media.
1555  */
1556 static void
1557 dashutdown(void * arg, int howto)
1558 {
1559 	struct cam_periph *periph;
1560 	struct da_softc *softc;
1561 
1562 	for (periph = TAILQ_FIRST(&dadriver.units); periph != NULL;
1563 	     periph = TAILQ_NEXT(periph, unit_links)) {
1564 		union ccb ccb;
1565 		softc = (struct da_softc *)periph->softc;
1566 
1567 		/*
1568 		 * We only sync the cache if the drive is still open, and
1569 		 * if the drive is capable of it..
1570 		 */
1571 		if (((softc->flags & DA_FLAG_OPEN) == 0)
1572 		 || (softc->quirks & DA_Q_NO_SYNC_CACHE))
1573 			continue;
1574 
1575 		xpt_setup_ccb(&ccb.ccb_h, periph->path, /*priority*/1);
1576 
1577 		ccb.ccb_h.ccb_state = DA_CCB_DUMP;
1578 		scsi_synchronize_cache(&ccb.csio,
1579 				       /*retries*/1,
1580 				       /*cbfcnp*/dadone,
1581 				       MSG_SIMPLE_Q_TAG,
1582 				       /*begin_lba*/0, /* whole disk */
1583 				       /*lb_count*/0,
1584 				       SSD_FULL_SIZE,
1585 				       5 * 60 * 1000);
1586 
1587 		xpt_polled_action(&ccb);
1588 
1589 		if ((ccb.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
1590 			if (((ccb.ccb_h.status & CAM_STATUS_MASK) ==
1591 			     CAM_SCSI_STATUS_ERROR)
1592 			 && (ccb.csio.scsi_status == SCSI_STATUS_CHECK_COND)){
1593 				int error_code, sense_key, asc, ascq;
1594 
1595 				scsi_extract_sense(&ccb.csio.sense_data,
1596 						   &error_code, &sense_key,
1597 						   &asc, &ascq);
1598 
1599 				if (sense_key != SSD_KEY_ILLEGAL_REQUEST)
1600 					scsi_sense_print(&ccb.csio);
1601 			} else {
1602 				xpt_print_path(periph->path);
1603 				printf("Synchronize cache failed, status "
1604 				       "== 0x%x, scsi status == 0x%x\n",
1605 				       ccb.ccb_h.status, ccb.csio.scsi_status);
1606 			}
1607 		}
1608 
1609 		if ((ccb.ccb_h.status & CAM_DEV_QFRZN) != 0)
1610 			cam_release_devq(ccb.ccb_h.path,
1611 					 /*relsim_flags*/0,
1612 					 /*reduction*/0,
1613 					 /*timeout*/0,
1614 					 /*getcount_only*/0);
1615 
1616 	}
1617 }
1618