xref: /freebsd/sys/cam/scsi/scsi_da.c (revision 1d66272a85cde1c8a69c58f4b5dd649babd6eca6)
1 /*
2  * Implementation of SCSI Direct Access Peripheral driver for CAM.
3  *
4  * Copyright (c) 1997 Justin T. Gibbs.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions, and the following disclaimer,
12  *    without modification, immediately at the beginning of the file.
13  * 2. The name of the author may not be used to endorse or promote products
14  *    derived from this software without specific prior written permission.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
20  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  *
28  * $FreeBSD$
29  */
30 
31 #ifdef _KERNEL
32 #include "opt_hw_wdog.h"
33 #endif /* _KERNEL */
34 
35 #include <sys/param.h>
36 
37 #ifdef _KERNEL
38 #include <sys/systm.h>
39 #include <sys/kernel.h>
40 #include <sys/bio.h>
41 #endif /* _KERNEL */
42 
43 #include <sys/devicestat.h>
44 #include <sys/conf.h>
45 #include <sys/disk.h>
46 #include <sys/eventhandler.h>
47 #include <sys/malloc.h>
48 #include <sys/cons.h>
49 
50 #include <machine/md_var.h>
51 
52 #include <vm/vm.h>
53 #include <vm/pmap.h>
54 
55 #ifndef _KERNEL
56 #include <stdio.h>
57 #include <string.h>
58 #endif /* _KERNEL */
59 
60 #include <cam/cam.h>
61 #include <cam/cam_ccb.h>
62 #include <cam/cam_extend.h>
63 #include <cam/cam_periph.h>
64 #include <cam/cam_xpt_periph.h>
65 
66 #include <cam/scsi/scsi_message.h>
67 
68 #ifndef _KERNEL
69 #include <cam/scsi/scsi_da.h>
70 #endif /* !_KERNEL */
71 
72 #ifdef _KERNEL
73 typedef enum {
74 	DA_STATE_PROBE,
75 	DA_STATE_NORMAL
76 } da_state;
77 
78 typedef enum {
79 	DA_FLAG_PACK_INVALID	= 0x001,
80 	DA_FLAG_NEW_PACK	= 0x002,
81 	DA_FLAG_PACK_LOCKED	= 0x004,
82 	DA_FLAG_PACK_REMOVABLE	= 0x008,
83 	DA_FLAG_TAGGED_QUEUING	= 0x010,
84 	DA_FLAG_NEED_OTAG	= 0x020,
85 	DA_FLAG_WENT_IDLE	= 0x040,
86 	DA_FLAG_RETRY_UA	= 0x080,
87 	DA_FLAG_OPEN		= 0x100
88 } da_flags;
89 
90 typedef enum {
91 	DA_Q_NONE		= 0x00,
92 	DA_Q_NO_SYNC_CACHE	= 0x01,
93 	DA_Q_NO_6_BYTE		= 0x02
94 } da_quirks;
95 
96 typedef enum {
97 	DA_CCB_PROBE		= 0x01,
98 	DA_CCB_BUFFER_IO	= 0x02,
99 	DA_CCB_WAITING		= 0x03,
100 	DA_CCB_DUMP		= 0x04,
101 	DA_CCB_TYPE_MASK	= 0x0F,
102 	DA_CCB_RETRY_UA		= 0x10
103 } da_ccb_state;
104 
105 /* Offsets into our private area for storing information */
106 #define ccb_state	ppriv_field0
107 #define ccb_bp		ppriv_ptr1
108 
109 struct disk_params {
110 	u_int8_t  heads;
111 	u_int16_t cylinders;
112 	u_int8_t  secs_per_track;
113 	u_int32_t secsize;	/* Number of bytes/sector */
114 	u_int32_t sectors;	/* total number sectors */
115 };
116 
117 struct da_softc {
118 	struct	 bio_queue_head bio_queue;
119 	struct	 devstat device_stats;
120 	SLIST_ENTRY(da_softc) links;
121 	LIST_HEAD(, ccb_hdr) pending_ccbs;
122 	da_state state;
123 	da_flags flags;
124 	da_quirks quirks;
125 	int	 minimum_cmd_size;
126 	int	 ordered_tag_count;
127 	struct	 disk_params params;
128 	struct	 disk disk;
129 	union	 ccb saved_ccb;
130 };
131 
132 struct da_quirk_entry {
133 	struct scsi_inquiry_pattern inq_pat;
134 	da_quirks quirks;
135 };
136 
137 static const char quantum[] = "QUANTUM";
138 static const char microp[] = "MICROP";
139 
140 static struct da_quirk_entry da_quirk_table[] =
141 {
142 	{
143 		/*
144 		 * This particular Fujitsu drive doesn't like the
145 		 * synchronize cache command.
146 		 * Reported by: Tom Jackson <toj@gorilla.net>
147 		 */
148 		{T_DIRECT, SIP_MEDIA_FIXED, "FUJITSU", "M2954*", "*"},
149 		/*quirks*/ DA_Q_NO_SYNC_CACHE
150 
151 	},
152 	{
153 		/*
154 		 * This drive doesn't like the synchronize cache command
155 		 * either.  Reported by: Matthew Jacob <mjacob@feral.com>
156 		 * in NetBSD PR kern/6027, August 24, 1998.
157 		 */
158 		{T_DIRECT, SIP_MEDIA_FIXED, microp, "2217*", "*"},
159 		/*quirks*/ DA_Q_NO_SYNC_CACHE
160 	},
161 	{
162 		/*
163 		 * This drive doesn't like the synchronize cache command
164 		 * either.  Reported by: Hellmuth Michaelis (hm@kts.org)
165 		 * (PR 8882).
166 		 */
167 		{T_DIRECT, SIP_MEDIA_FIXED, microp, "2112*", "*"},
168 		/*quirks*/ DA_Q_NO_SYNC_CACHE
169 	},
170 	{
171 		/*
172 		 * Doesn't like the synchronize cache command.
173 		 * Reported by: Blaz Zupan <blaz@gold.amis.net>
174 		 */
175 		{T_DIRECT, SIP_MEDIA_FIXED, "NEC", "D3847*", "*"},
176 		/*quirks*/ DA_Q_NO_SYNC_CACHE
177 	},
178 	{
179 		/*
180 		 * Doesn't like the synchronize cache command.
181 		 */
182 		{T_DIRECT, SIP_MEDIA_FIXED, quantum, "MAVERICK 540S", "*"},
183 		/*quirks*/ DA_Q_NO_SYNC_CACHE
184 	},
185 	{
186 		/*
187 		 * Doesn't like the synchronize cache command.
188 		 */
189 		{T_DIRECT, SIP_MEDIA_FIXED, quantum, "LPS525S", "*"},
190 		/*quirks*/ DA_Q_NO_SYNC_CACHE
191 	},
192 	{
193 		/*
194 		 * Doesn't work correctly with 6 byte reads/writes.
195 		 * Returns illegal request, and points to byte 9 of the
196 		 * 6-byte CDB.
197 		 * Reported by:  Adam McDougall <bsdx@spawnet.com>
198 		 */
199 		{T_DIRECT, SIP_MEDIA_FIXED, quantum, "VIKING 4*", "*"},
200 		/*quirks*/ DA_Q_NO_6_BYTE
201 	},
202 	{
203 		/*
204 		 * See above.
205 		 */
206 		{T_DIRECT, SIP_MEDIA_FIXED, quantum, "VIKING 2*", "*"},
207 		/*quirks*/ DA_Q_NO_6_BYTE
208 	},
209 
210 	/* Below a list of quirks for USB devices supported by umass. */
211 	{
212 		/*
213 		 * This USB floppy drive uses the UFI command set. This
214 		 * command set is a derivative of the ATAPI command set and
215 		 * does not support READ_6 commands only READ_10. It also does
216 		 * not support sync cache (0x35).
217 		 */
218 		{T_DIRECT, SIP_MEDIA_REMOVABLE, "Y-E DATA", "USB-FDU", "*"},
219 		/*quirks*/ DA_Q_NO_6_BYTE|DA_Q_NO_SYNC_CACHE
220 	},
221 	{
222 		/* Another USB floppy */
223 		{T_DIRECT, SIP_MEDIA_REMOVABLE, "MATSHITA", "FDD CF-VFDU*","*"},
224 		/*quirks*/ DA_Q_NO_6_BYTE|DA_Q_NO_SYNC_CACHE
225 	},
226 	{
227 		/*
228 		 * Sony Memory Stick adapter MSAC-US1,
229 		 * does not support READ_6 commands only READ_10. It also does
230 		 * not support sync cache (0x35).
231 		 * Sony PCG-C1VJ Internal Memory Stick Slot (MSC-U01) also
232 		 * has this quirk.  Make all sony MS* products use this
233 		 * quirk.  Reported by: TERAMOTO Masahiro
234 		 * <teramoto@comm.eng.osaka-u.ac.jp> (PR 23378).
235 		 */
236 		{T_DIRECT, SIP_MEDIA_REMOVABLE, "Sony", "MS*", "*"},
237 		/*quirks*/ DA_Q_NO_6_BYTE|DA_Q_NO_SYNC_CACHE
238 	},
239 	{
240 		/*
241 		 * Sony DSC cameras (DSC-S30, DSC-S50, DSC-S70)
242 		 * do not support READ_6 commands, only READ_10.
243 		 */
244 		{T_DIRECT, SIP_MEDIA_REMOVABLE, "Sony", "Sony DSC", "*"},
245 		/*quirks*/ DA_Q_NO_6_BYTE|DA_Q_NO_SYNC_CACHE
246 	},
247 	{
248 		{T_OPTICAL, SIP_MEDIA_REMOVABLE, "FUJITSU", "MCF3064AP", "*"},
249 		/*quirks*/ DA_Q_NO_6_BYTE
250 	}
251 };
252 
253 static	d_open_t	daopen;
254 static	d_close_t	daclose;
255 static	d_strategy_t	dastrategy;
256 static	d_ioctl_t	daioctl;
257 static	d_dump_t	dadump;
258 static	periph_init_t	dainit;
259 static	void		daasync(void *callback_arg, u_int32_t code,
260 				struct cam_path *path, void *arg);
261 static	periph_ctor_t	daregister;
262 static	periph_dtor_t	dacleanup;
263 static	periph_start_t	dastart;
264 static	periph_oninv_t	daoninvalidate;
265 static	void		dadone(struct cam_periph *periph,
266 			       union ccb *done_ccb);
267 static  int		daerror(union ccb *ccb, u_int32_t cam_flags,
268 				u_int32_t sense_flags);
269 static void		daprevent(struct cam_periph *periph, int action);
270 static void		dasetgeom(struct cam_periph *periph,
271 				  struct scsi_read_capacity_data * rdcap);
272 static timeout_t	dasendorderedtag;
273 static void		dashutdown(void *arg, int howto);
274 
275 #ifndef DA_DEFAULT_TIMEOUT
276 #define DA_DEFAULT_TIMEOUT 60	/* Timeout in seconds */
277 #endif
278 
279 /*
280  * DA_ORDEREDTAG_INTERVAL determines how often, relative
281  * to the default timeout, we check to see whether an ordered
282  * tagged transaction is appropriate to prevent simple tag
283  * starvation.  Since we'd like to ensure that there is at least
284  * 1/2 of the timeout length left for a starved transaction to
285  * complete after we've sent an ordered tag, we must poll at least
286  * four times in every timeout period.  This takes care of the worst
287  * case where a starved transaction starts during an interval that
288  * meets the requirement "don't send an ordered tag" test so it takes
289  * us two intervals to determine that a tag must be sent.
290  */
291 #ifndef DA_ORDEREDTAG_INTERVAL
292 #define DA_ORDEREDTAG_INTERVAL 4
293 #endif
294 
295 static struct periph_driver dadriver =
296 {
297 	dainit, "da",
298 	TAILQ_HEAD_INITIALIZER(dadriver.units), /* generation */ 0
299 };
300 
301 DATA_SET(periphdriver_set, dadriver);
302 
303 #define DA_CDEV_MAJOR 13
304 #define DA_BDEV_MAJOR 4
305 
306 /* For 2.2-stable support */
307 #ifndef D_DISK
308 #define D_DISK 0
309 #endif
310 
311 static struct cdevsw da_cdevsw = {
312 	/* open */	daopen,
313 	/* close */	daclose,
314 	/* read */	physread,
315 	/* write */	physwrite,
316 	/* ioctl */	daioctl,
317 	/* poll */	nopoll,
318 	/* mmap */	nommap,
319 	/* strategy */	dastrategy,
320 	/* name */	"da",
321 	/* maj */	DA_CDEV_MAJOR,
322 	/* dump */	dadump,
323 	/* psize */	nopsize,
324 	/* flags */	D_DISK,
325 	/* bmaj */	DA_BDEV_MAJOR
326 };
327 
328 static struct cdevsw dadisk_cdevsw;
329 
330 static SLIST_HEAD(,da_softc) softc_list;
331 static struct extend_array *daperiphs;
332 
333 static int
334 daopen(dev_t dev, int flags, int fmt, struct proc *p)
335 {
336 	struct cam_periph *periph;
337 	struct da_softc *softc;
338 	struct disklabel *label;
339 	int unit;
340 	int part;
341 	int error;
342 	int s;
343 
344 	unit = dkunit(dev);
345 	part = dkpart(dev);
346 	periph = cam_extend_get(daperiphs, unit);
347 	if (periph == NULL)
348 		return (ENXIO);
349 
350 	softc = (struct da_softc *)periph->softc;
351 
352 	CAM_DEBUG(periph->path, CAM_DEBUG_TRACE,
353 	    ("daopen: dev=%s (unit %d , partition %d)\n", devtoname(dev),
354 	     unit, part));
355 
356 	if ((error = cam_periph_lock(periph, PRIBIO|PCATCH)) != 0) {
357 		return (error); /* error code from tsleep */
358 	}
359 
360 	if (cam_periph_acquire(periph) != CAM_REQ_CMP)
361 		return(ENXIO);
362 	softc->flags |= DA_FLAG_OPEN;
363 
364 	s = splsoftcam();
365 	if ((softc->flags & DA_FLAG_PACK_INVALID) != 0) {
366 		/* Invalidate our pack information. */
367 		disk_invalidate(&softc->disk);
368 		softc->flags &= ~DA_FLAG_PACK_INVALID;
369 	}
370 	splx(s);
371 
372 	/* Do a read capacity */
373 	{
374 		struct scsi_read_capacity_data *rcap;
375 		union  ccb *ccb;
376 
377 		rcap = (struct scsi_read_capacity_data *)malloc(sizeof(*rcap),
378 								M_TEMP,
379 								M_WAITOK);
380 
381 		ccb = cam_periph_getccb(periph, /*priority*/1);
382 		scsi_read_capacity(&ccb->csio,
383 				   /*retries*/1,
384 				   /*cbfncp*/dadone,
385 				   MSG_SIMPLE_Q_TAG,
386 				   rcap,
387 				   SSD_FULL_SIZE,
388 				   /*timeout*/60000);
389 		ccb->ccb_h.ccb_bp = NULL;
390 
391 		error = cam_periph_runccb(ccb, daerror, /*cam_flags*/0,
392 					  /*sense_flags*/SF_RETRY_UA |
393 							 SF_RETRY_SELTO,
394 					  &softc->device_stats);
395 
396 		xpt_release_ccb(ccb);
397 
398 		if (error == 0) {
399 			dasetgeom(periph, rcap);
400 		}
401 
402 		free(rcap, M_TEMP);
403 	}
404 
405 	if (error == 0) {
406 		struct ccb_getdev cgd;
407 
408 		/* Build label for whole disk. */
409 		label = &softc->disk.d_label;
410 		bzero(label, sizeof(*label));
411 		label->d_type = DTYPE_SCSI;
412 
413 		/*
414 		 * Grab the inquiry data to get the vendor and product names.
415 		 * Put them in the typename and packname for the label.
416 		 */
417 		xpt_setup_ccb(&cgd.ccb_h, periph->path, /*priority*/ 1);
418 		cgd.ccb_h.func_code = XPT_GDEV_TYPE;
419 		xpt_action((union ccb *)&cgd);
420 
421 		strncpy(label->d_typename, cgd.inq_data.vendor,
422 			min(SID_VENDOR_SIZE, sizeof(label->d_typename)));
423 		strncpy(label->d_packname, cgd.inq_data.product,
424 			min(SID_PRODUCT_SIZE, sizeof(label->d_packname)));
425 
426 		label->d_secsize = softc->params.secsize;
427 		label->d_nsectors = softc->params.secs_per_track;
428 		label->d_ntracks = softc->params.heads;
429 		label->d_ncylinders = softc->params.cylinders;
430 		label->d_secpercyl = softc->params.heads
431 				  * softc->params.secs_per_track;
432 		label->d_secperunit = softc->params.sectors;
433 
434 		if (((softc->flags & DA_FLAG_PACK_REMOVABLE) != 0)) {
435 			daprevent(periph, PR_PREVENT);
436 		}
437 
438 		/*
439 		 * Check to see whether or not the blocksize is set yet.
440 		 * If it isn't, set it and then clear the blocksize
441 		 * unavailable flag for the device statistics.
442 		 */
443 		if ((softc->device_stats.flags & DEVSTAT_BS_UNAVAILABLE) != 0){
444 			softc->device_stats.block_size = softc->params.secsize;
445 			softc->device_stats.flags &= ~DEVSTAT_BS_UNAVAILABLE;
446 		}
447 	}
448 
449 	if (error != 0) {
450 		if ((softc->flags & DA_FLAG_PACK_REMOVABLE) != 0) {
451 			daprevent(periph, PR_ALLOW);
452 		}
453 	}
454 	cam_periph_unlock(periph);
455 	return (error);
456 }
457 
458 static int
459 daclose(dev_t dev, int flag, int fmt, struct proc *p)
460 {
461 	struct	cam_periph *periph;
462 	struct	da_softc *softc;
463 	int	unit;
464 	int	error;
465 
466 	unit = dkunit(dev);
467 	periph = cam_extend_get(daperiphs, unit);
468 	if (periph == NULL)
469 		return (ENXIO);
470 
471 	softc = (struct da_softc *)periph->softc;
472 
473 	if ((error = cam_periph_lock(periph, PRIBIO)) != 0) {
474 		return (error); /* error code from tsleep */
475 	}
476 
477 	if ((softc->quirks & DA_Q_NO_SYNC_CACHE) == 0) {
478 		union	ccb *ccb;
479 
480 		ccb = cam_periph_getccb(periph, /*priority*/1);
481 
482 		scsi_synchronize_cache(&ccb->csio,
483 				       /*retries*/1,
484 				       /*cbfcnp*/dadone,
485 				       MSG_SIMPLE_Q_TAG,
486 				       /*begin_lba*/0,/* Cover the whole disk */
487 				       /*lb_count*/0,
488 				       SSD_FULL_SIZE,
489 				       5 * 60 * 1000);
490 
491 		cam_periph_runccb(ccb, /*error_routine*/NULL, /*cam_flags*/0,
492 				  /*sense_flags*/SF_RETRY_UA,
493 				  &softc->device_stats);
494 
495 		if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
496 			if ((ccb->ccb_h.status & CAM_STATUS_MASK) ==
497 			     CAM_SCSI_STATUS_ERROR) {
498 				int asc, ascq;
499 				int sense_key, error_code;
500 
501 				scsi_extract_sense(&ccb->csio.sense_data,
502 						   &error_code,
503 						   &sense_key,
504 						   &asc, &ascq);
505 				if (sense_key != SSD_KEY_ILLEGAL_REQUEST)
506 					scsi_sense_print(&ccb->csio);
507 			} else {
508 				xpt_print_path(periph->path);
509 				printf("Synchronize cache failed, status "
510 				       "== 0x%x, scsi status == 0x%x\n",
511 				       ccb->csio.ccb_h.status,
512 				       ccb->csio.scsi_status);
513 			}
514 		}
515 
516 		if ((ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
517 			cam_release_devq(ccb->ccb_h.path,
518 					 /*relsim_flags*/0,
519 					 /*reduction*/0,
520 					 /*timeout*/0,
521 					 /*getcount_only*/0);
522 
523 		xpt_release_ccb(ccb);
524 
525 	}
526 
527 	if ((softc->flags & DA_FLAG_PACK_REMOVABLE) != 0) {
528 		daprevent(periph, PR_ALLOW);
529 		/*
530 		 * If we've got removeable media, mark the blocksize as
531 		 * unavailable, since it could change when new media is
532 		 * inserted.
533 		 */
534 		softc->device_stats.flags |= DEVSTAT_BS_UNAVAILABLE;
535 	}
536 
537 	softc->flags &= ~DA_FLAG_OPEN;
538 	cam_periph_unlock(periph);
539 	cam_periph_release(periph);
540 	return (0);
541 }
542 
543 /*
544  * Actually translate the requested transfer into one the physical driver
545  * can understand.  The transfer is described by a buf and will include
546  * only one physical transfer.
547  */
548 static void
549 dastrategy(struct bio *bp)
550 {
551 	struct cam_periph *periph;
552 	struct da_softc *softc;
553 	u_int  unit;
554 	u_int  part;
555 	int    s;
556 
557 	unit = dkunit(bp->bio_dev);
558 	part = dkpart(bp->bio_dev);
559 	periph = cam_extend_get(daperiphs, unit);
560 	if (periph == NULL) {
561 		bp->bio_error = ENXIO;
562 		goto bad;
563 	}
564 	softc = (struct da_softc *)periph->softc;
565 #if 0
566 	/*
567 	 * check it's not too big a transfer for our adapter
568 	 */
569 	scsi_minphys(bp,&sd_switch);
570 #endif
571 
572 	/*
573 	 * Mask interrupts so that the pack cannot be invalidated until
574 	 * after we are in the queue.  Otherwise, we might not properly
575 	 * clean up one of the buffers.
576 	 */
577 	s = splbio();
578 
579 	/*
580 	 * If the device has been made invalid, error out
581 	 */
582 	if ((softc->flags & DA_FLAG_PACK_INVALID)) {
583 		splx(s);
584 		bp->bio_error = ENXIO;
585 		goto bad;
586 	}
587 
588 	/*
589 	 * Place it in the queue of disk activities for this disk
590 	 */
591 	bioqdisksort(&softc->bio_queue, bp);
592 
593 	splx(s);
594 
595 	/*
596 	 * Schedule ourselves for performing the work.
597 	 */
598 	xpt_schedule(periph, /* XXX priority */1);
599 
600 	return;
601 bad:
602 	bp->bio_flags |= BIO_ERROR;
603 
604 	/*
605 	 * Correctly set the buf to indicate a completed xfer
606 	 */
607 	bp->bio_resid = bp->bio_bcount;
608 	biodone(bp);
609 	return;
610 }
611 
612 /* For 2.2-stable support */
613 #ifndef ENOIOCTL
614 #define ENOIOCTL -1
615 #endif
616 
617 static int
618 daioctl(dev_t dev, u_long cmd, caddr_t addr, int flag, struct proc *p)
619 {
620 	struct cam_periph *periph;
621 	struct da_softc *softc;
622 	int unit;
623 	int error;
624 
625 	unit = dkunit(dev);
626 	periph = cam_extend_get(daperiphs, unit);
627 	if (periph == NULL)
628 		return (ENXIO);
629 
630 	softc = (struct da_softc *)periph->softc;
631 
632 	CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("daioctl\n"));
633 
634 	if ((error = cam_periph_lock(periph, PRIBIO|PCATCH)) != 0) {
635 		return (error); /* error code from tsleep */
636 	}
637 
638 	error = cam_periph_ioctl(periph, cmd, addr, daerror);
639 
640 	cam_periph_unlock(periph);
641 
642 	return (error);
643 }
644 
645 static int
646 dadump(dev_t dev)
647 {
648 	struct	    cam_periph *periph;
649 	struct	    da_softc *softc;
650 	u_int	    unit;
651 	u_int	    part;
652 	u_int	    secsize;
653 	u_int	    num;	/* number of sectors to write */
654 	u_int	    blknum;
655 	long	    blkcnt;
656 	vm_offset_t addr;
657 	struct	    ccb_scsiio csio;
658 	int         dumppages = MAXDUMPPGS;
659 	int	    error;
660 	int         i;
661 
662 	/* toss any characters present prior to dump */
663 	while (cncheckc() != -1)
664 		;
665 
666 	unit = dkunit(dev);
667 	part = dkpart(dev);
668 	periph = cam_extend_get(daperiphs, unit);
669 	if (periph == NULL) {
670 		return (ENXIO);
671 	}
672 	softc = (struct da_softc *)periph->softc;
673 
674 	if ((softc->flags & DA_FLAG_PACK_INVALID) != 0)
675 		return (ENXIO);
676 
677 	error = disk_dumpcheck(dev, &num, &blknum, &secsize);
678 	if (error)
679 		return (error);
680 
681 	addr = 0;	/* starting address */
682 	blkcnt = howmany(PAGE_SIZE, secsize);
683 
684 	while (num > 0) {
685 		caddr_t va = NULL;
686 
687 		if ((num / blkcnt) < dumppages)
688 			dumppages = num / blkcnt;
689 
690 		for (i = 0; i < dumppages; ++i) {
691 			vm_offset_t a = addr + (i * PAGE_SIZE);
692 			if (is_physical_memory(a))
693 				va = pmap_kenter_temporary(trunc_page(a), i);
694 			else
695 				va = pmap_kenter_temporary(trunc_page(0), i);
696 		}
697 
698 		xpt_setup_ccb(&csio.ccb_h, periph->path, /*priority*/1);
699 		csio.ccb_h.ccb_state = DA_CCB_DUMP;
700 		scsi_read_write(&csio,
701 				/*retries*/1,
702 				dadone,
703 				MSG_ORDERED_Q_TAG,
704 				/*read*/FALSE,
705 				/*byte2*/0,
706 				/*minimum_cmd_size*/ softc->minimum_cmd_size,
707 				blknum,
708 				blkcnt * dumppages,
709 				/*data_ptr*/(u_int8_t *) va,
710 				/*dxfer_len*/blkcnt * secsize * dumppages,
711 				/*sense_len*/SSD_FULL_SIZE,
712 				DA_DEFAULT_TIMEOUT * 1000);
713 		xpt_polled_action((union ccb *)&csio);
714 
715 		if ((csio.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
716 			printf("Aborting dump due to I/O error.\n");
717 			if ((csio.ccb_h.status & CAM_STATUS_MASK) ==
718 			     CAM_SCSI_STATUS_ERROR)
719 				scsi_sense_print(&csio);
720 			else
721 				printf("status == 0x%x, scsi status == 0x%x\n",
722 				       csio.ccb_h.status, csio.scsi_status);
723 			return(EIO);
724 		}
725 
726 		if (addr % (1024 * 1024) == 0) {
727 #ifdef	HW_WDOG
728 			if (wdog_tickler)
729 				(*wdog_tickler)();
730 #endif /* HW_WDOG */
731 			/* Count in MB of data left to write */
732 			printf("%d ", (num  * softc->params.secsize)
733 				     / (1024 * 1024));
734 		}
735 
736 		/* update block count */
737 		num -= blkcnt * dumppages;
738 		blknum += blkcnt * dumppages;
739 		addr += PAGE_SIZE * dumppages;
740 
741 		/* operator aborting dump? */
742 		if (cncheckc() != -1)
743 			return (EINTR);
744 	}
745 
746 	/*
747 	 * Sync the disk cache contents to the physical media.
748 	 */
749 	if ((softc->quirks & DA_Q_NO_SYNC_CACHE) == 0) {
750 
751 		xpt_setup_ccb(&csio.ccb_h, periph->path, /*priority*/1);
752 		csio.ccb_h.ccb_state = DA_CCB_DUMP;
753 		scsi_synchronize_cache(&csio,
754 				       /*retries*/1,
755 				       /*cbfcnp*/dadone,
756 				       MSG_SIMPLE_Q_TAG,
757 				       /*begin_lba*/0,/* Cover the whole disk */
758 				       /*lb_count*/0,
759 				       SSD_FULL_SIZE,
760 				       5 * 60 * 1000);
761 		xpt_polled_action((union ccb *)&csio);
762 
763 		if ((csio.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
764 			if ((csio.ccb_h.status & CAM_STATUS_MASK) ==
765 			     CAM_SCSI_STATUS_ERROR) {
766 				int asc, ascq;
767 				int sense_key, error_code;
768 
769 				scsi_extract_sense(&csio.sense_data,
770 						   &error_code,
771 						   &sense_key,
772 						   &asc, &ascq);
773 				if (sense_key != SSD_KEY_ILLEGAL_REQUEST)
774 					scsi_sense_print(&csio);
775 			} else {
776 				xpt_print_path(periph->path);
777 				printf("Synchronize cache failed, status "
778 				       "== 0x%x, scsi status == 0x%x\n",
779 				       csio.ccb_h.status, csio.scsi_status);
780 			}
781 		}
782 	}
783 	return (0);
784 }
785 
786 static void
787 dainit(void)
788 {
789 	cam_status status;
790 	struct cam_path *path;
791 
792 	/*
793 	 * Create our extend array for storing the devices we attach to.
794 	 */
795 	daperiphs = cam_extend_new();
796 	SLIST_INIT(&softc_list);
797 	if (daperiphs == NULL) {
798 		printf("da: Failed to alloc extend array!\n");
799 		return;
800 	}
801 
802 	/*
803 	 * Install a global async callback.  This callback will
804 	 * receive async callbacks like "new device found".
805 	 */
806 	status = xpt_create_path(&path, /*periph*/NULL, CAM_XPT_PATH_ID,
807 				 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
808 
809 	if (status == CAM_REQ_CMP) {
810 		struct ccb_setasync csa;
811 
812                 xpt_setup_ccb(&csa.ccb_h, path, /*priority*/5);
813                 csa.ccb_h.func_code = XPT_SASYNC_CB;
814                 csa.event_enable = AC_FOUND_DEVICE;
815                 csa.callback = daasync;
816                 csa.callback_arg = NULL;
817                 xpt_action((union ccb *)&csa);
818 		status = csa.ccb_h.status;
819                 xpt_free_path(path);
820         }
821 
822 	if (status != CAM_REQ_CMP) {
823 		printf("da: Failed to attach master async callback "
824 		       "due to status 0x%x!\n", status);
825 	} else {
826 
827 		/*
828 		 * Schedule a periodic event to occasioanly send an
829 		 * ordered tag to a device.
830 		 */
831 		timeout(dasendorderedtag, NULL,
832 			(DA_DEFAULT_TIMEOUT * hz) / DA_ORDEREDTAG_INTERVAL);
833 
834 		/* Register our shutdown event handler */
835 		if ((EVENTHANDLER_REGISTER(shutdown_post_sync, dashutdown,
836 					   NULL, SHUTDOWN_PRI_DEFAULT)) == NULL)
837 		    printf("dainit: shutdown event registration failed!\n");
838 	}
839 }
840 
841 static void
842 daoninvalidate(struct cam_periph *periph)
843 {
844 	int s;
845 	struct da_softc *softc;
846 	struct bio *q_bp;
847 	struct ccb_setasync csa;
848 
849 	softc = (struct da_softc *)periph->softc;
850 
851 	/*
852 	 * De-register any async callbacks.
853 	 */
854 	xpt_setup_ccb(&csa.ccb_h, periph->path,
855 		      /* priority */ 5);
856 	csa.ccb_h.func_code = XPT_SASYNC_CB;
857 	csa.event_enable = 0;
858 	csa.callback = daasync;
859 	csa.callback_arg = periph;
860 	xpt_action((union ccb *)&csa);
861 
862 	softc->flags |= DA_FLAG_PACK_INVALID;
863 
864 	/*
865 	 * Although the oninvalidate() routines are always called at
866 	 * splsoftcam, we need to be at splbio() here to keep the buffer
867 	 * queue from being modified while we traverse it.
868 	 */
869 	s = splbio();
870 
871 	/*
872 	 * Return all queued I/O with ENXIO.
873 	 * XXX Handle any transactions queued to the card
874 	 *     with XPT_ABORT_CCB.
875 	 */
876 	while ((q_bp = bioq_first(&softc->bio_queue)) != NULL){
877 		bioq_remove(&softc->bio_queue, q_bp);
878 		q_bp->bio_resid = q_bp->bio_bcount;
879 		q_bp->bio_error = ENXIO;
880 		q_bp->bio_flags |= BIO_ERROR;
881 		biodone(q_bp);
882 	}
883 	splx(s);
884 
885 	SLIST_REMOVE(&softc_list, softc, da_softc, links);
886 
887 	xpt_print_path(periph->path);
888 	printf("lost device\n");
889 }
890 
891 static void
892 dacleanup(struct cam_periph *periph)
893 {
894 	struct da_softc *softc;
895 
896 	softc = (struct da_softc *)periph->softc;
897 
898 	devstat_remove_entry(&softc->device_stats);
899 	cam_extend_release(daperiphs, periph->unit_number);
900 	xpt_print_path(periph->path);
901 	printf("removing device entry\n");
902 	free(softc, M_DEVBUF);
903 }
904 
905 static void
906 daasync(void *callback_arg, u_int32_t code,
907 	struct cam_path *path, void *arg)
908 {
909 	struct cam_periph *periph;
910 
911 	periph = (struct cam_periph *)callback_arg;
912 	switch (code) {
913 	case AC_FOUND_DEVICE:
914 	{
915 		struct ccb_getdev *cgd;
916 		cam_status status;
917 
918 		cgd = (struct ccb_getdev *)arg;
919 
920 		if (SID_TYPE(&cgd->inq_data) != T_DIRECT
921 		    && SID_TYPE(&cgd->inq_data) != T_OPTICAL)
922 			break;
923 
924 		/*
925 		 * Allocate a peripheral instance for
926 		 * this device and start the probe
927 		 * process.
928 		 */
929 		status = cam_periph_alloc(daregister, daoninvalidate,
930 					  dacleanup, dastart,
931 					  "da", CAM_PERIPH_BIO,
932 					  cgd->ccb_h.path, daasync,
933 					  AC_FOUND_DEVICE, cgd);
934 
935 		if (status != CAM_REQ_CMP
936 		 && status != CAM_REQ_INPROG)
937 			printf("daasync: Unable to attach to new device "
938 				"due to status 0x%x\n", status);
939 		break;
940 	}
941 	case AC_SENT_BDR:
942 	case AC_BUS_RESET:
943 	{
944 		struct da_softc *softc;
945 		struct ccb_hdr *ccbh;
946 		int s;
947 
948 		softc = (struct da_softc *)periph->softc;
949 		s = splsoftcam();
950 		/*
951 		 * Don't fail on the expected unit attention
952 		 * that will occur.
953 		 */
954 		softc->flags |= DA_FLAG_RETRY_UA;
955 		for (ccbh = LIST_FIRST(&softc->pending_ccbs);
956 		     ccbh != NULL; ccbh = LIST_NEXT(ccbh, periph_links.le))
957 			ccbh->ccb_state |= DA_CCB_RETRY_UA;
958 		splx(s);
959 		/* FALLTHROUGH*/
960 	}
961 	default:
962 		cam_periph_async(periph, code, path, arg);
963 		break;
964 	}
965 }
966 
967 static cam_status
968 daregister(struct cam_periph *periph, void *arg)
969 {
970 	int s;
971 	struct da_softc *softc;
972 	struct ccb_setasync csa;
973 	struct ccb_getdev *cgd;
974 	caddr_t match;
975 
976 	cgd = (struct ccb_getdev *)arg;
977 	if (periph == NULL) {
978 		printf("daregister: periph was NULL!!\n");
979 		return(CAM_REQ_CMP_ERR);
980 	}
981 
982 	if (cgd == NULL) {
983 		printf("daregister: no getdev CCB, can't register device\n");
984 		return(CAM_REQ_CMP_ERR);
985 	}
986 
987 	softc = (struct da_softc *)malloc(sizeof(*softc),M_DEVBUF,M_NOWAIT);
988 
989 	if (softc == NULL) {
990 		printf("daregister: Unable to probe new device. "
991 		       "Unable to allocate softc\n");
992 		return(CAM_REQ_CMP_ERR);
993 	}
994 
995 	bzero(softc, sizeof(*softc));
996 	LIST_INIT(&softc->pending_ccbs);
997 	softc->state = DA_STATE_PROBE;
998 	bioq_init(&softc->bio_queue);
999 	if (SID_IS_REMOVABLE(&cgd->inq_data))
1000 		softc->flags |= DA_FLAG_PACK_REMOVABLE;
1001 	if ((cgd->inq_data.flags & SID_CmdQue) != 0)
1002 		softc->flags |= DA_FLAG_TAGGED_QUEUING;
1003 
1004 	periph->softc = softc;
1005 
1006 	cam_extend_set(daperiphs, periph->unit_number, periph);
1007 
1008 	/*
1009 	 * See if this device has any quirks.
1010 	 */
1011 	match = cam_quirkmatch((caddr_t)&cgd->inq_data,
1012 			       (caddr_t)da_quirk_table,
1013 			       sizeof(da_quirk_table)/sizeof(*da_quirk_table),
1014 			       sizeof(*da_quirk_table), scsi_inquiry_match);
1015 
1016 	if (match != NULL)
1017 		softc->quirks = ((struct da_quirk_entry *)match)->quirks;
1018 	else
1019 		softc->quirks = DA_Q_NONE;
1020 
1021 	if (softc->quirks & DA_Q_NO_6_BYTE)
1022 		softc->minimum_cmd_size = 10;
1023 	else
1024 		softc->minimum_cmd_size = 6;
1025 
1026 	/*
1027 	 * Block our timeout handler while we
1028 	 * add this softc to the dev list.
1029 	 */
1030 	s = splsoftclock();
1031 	SLIST_INSERT_HEAD(&softc_list, softc, links);
1032 	splx(s);
1033 
1034 	/*
1035 	 * The DA driver supports a blocksize, but
1036 	 * we don't know the blocksize until we do
1037 	 * a read capacity.  So, set a flag to
1038 	 * indicate that the blocksize is
1039 	 * unavailable right now.  We'll clear the
1040 	 * flag as soon as we've done a read capacity.
1041 	 */
1042 	devstat_add_entry(&softc->device_stats, "da",
1043 			  periph->unit_number, 0,
1044 	  		  DEVSTAT_BS_UNAVAILABLE,
1045 			  SID_TYPE(&cgd->inq_data) | DEVSTAT_TYPE_IF_SCSI,
1046 			  DEVSTAT_PRIORITY_DISK);
1047 
1048 	/*
1049 	 * Register this media as a disk
1050 	 */
1051 	disk_create(periph->unit_number, &softc->disk, 0,
1052 	    &da_cdevsw, &dadisk_cdevsw);
1053 
1054 	/*
1055 	 * Add async callbacks for bus reset and
1056 	 * bus device reset calls.  I don't bother
1057 	 * checking if this fails as, in most cases,
1058 	 * the system will function just fine without
1059 	 * them and the only alternative would be to
1060 	 * not attach the device on failure.
1061 	 */
1062 	xpt_setup_ccb(&csa.ccb_h, periph->path, /*priority*/5);
1063 	csa.ccb_h.func_code = XPT_SASYNC_CB;
1064 	csa.event_enable = AC_SENT_BDR | AC_BUS_RESET | AC_LOST_DEVICE;
1065 	csa.callback = daasync;
1066 	csa.callback_arg = periph;
1067 	xpt_action((union ccb *)&csa);
1068 	/*
1069 	 * Lock this peripheral until we are setup.
1070 	 * This first call can't block
1071 	 */
1072 	(void)cam_periph_lock(periph, PRIBIO);
1073 	xpt_schedule(periph, /*priority*/5);
1074 
1075 	return(CAM_REQ_CMP);
1076 }
1077 
1078 static void
1079 dastart(struct cam_periph *periph, union ccb *start_ccb)
1080 {
1081 	struct da_softc *softc;
1082 
1083 	softc = (struct da_softc *)periph->softc;
1084 
1085 
1086 	switch (softc->state) {
1087 	case DA_STATE_NORMAL:
1088 	{
1089 		/* Pull a buffer from the queue and get going on it */
1090 		struct bio *bp;
1091 		int s;
1092 
1093 		/*
1094 		 * See if there is a buf with work for us to do..
1095 		 */
1096 		s = splbio();
1097 		bp = bioq_first(&softc->bio_queue);
1098 		if (periph->immediate_priority <= periph->pinfo.priority) {
1099 			CAM_DEBUG_PRINT(CAM_DEBUG_SUBTRACE,
1100 					("queuing for immediate ccb\n"));
1101 			start_ccb->ccb_h.ccb_state = DA_CCB_WAITING;
1102 			SLIST_INSERT_HEAD(&periph->ccb_list, &start_ccb->ccb_h,
1103 					  periph_links.sle);
1104 			periph->immediate_priority = CAM_PRIORITY_NONE;
1105 			splx(s);
1106 			wakeup(&periph->ccb_list);
1107 		} else if (bp == NULL) {
1108 			splx(s);
1109 			xpt_release_ccb(start_ccb);
1110 		} else {
1111 			int oldspl;
1112 			u_int8_t tag_code;
1113 
1114 			bioq_remove(&softc->bio_queue, bp);
1115 
1116 			devstat_start_transaction(&softc->device_stats);
1117 
1118 			if ((bp->bio_flags & BIO_ORDERED) != 0
1119 			 || (softc->flags & DA_FLAG_NEED_OTAG) != 0) {
1120 				softc->flags &= ~DA_FLAG_NEED_OTAG;
1121 				softc->ordered_tag_count++;
1122 				tag_code = MSG_ORDERED_Q_TAG;
1123 			} else {
1124 				tag_code = MSG_SIMPLE_Q_TAG;
1125 			}
1126 			scsi_read_write(&start_ccb->csio,
1127 					/*retries*/4,
1128 					dadone,
1129 					tag_code,
1130 					bp->bio_cmd == BIO_READ,
1131 					/*byte2*/0,
1132 					softc->minimum_cmd_size,
1133 					bp->bio_pblkno,
1134 					bp->bio_bcount / softc->params.secsize,
1135 					bp->bio_data,
1136 					bp->bio_bcount,
1137 					/*sense_len*/SSD_FULL_SIZE,
1138 					DA_DEFAULT_TIMEOUT * 1000);
1139 			start_ccb->ccb_h.ccb_state = DA_CCB_BUFFER_IO;
1140 
1141 			/*
1142 			 * Block out any asyncronous callbacks
1143 			 * while we touch the pending ccb list.
1144 			 */
1145 			oldspl = splcam();
1146 			LIST_INSERT_HEAD(&softc->pending_ccbs,
1147 					 &start_ccb->ccb_h, periph_links.le);
1148 			splx(oldspl);
1149 
1150 			/* We expect a unit attention from this device */
1151 			if ((softc->flags & DA_FLAG_RETRY_UA) != 0) {
1152 				start_ccb->ccb_h.ccb_state |= DA_CCB_RETRY_UA;
1153 				softc->flags &= ~DA_FLAG_RETRY_UA;
1154 			}
1155 
1156 			start_ccb->ccb_h.ccb_bp = bp;
1157 			bp = bioq_first(&softc->bio_queue);
1158 			splx(s);
1159 
1160 			xpt_action(start_ccb);
1161 		}
1162 
1163 		if (bp != NULL) {
1164 			/* Have more work to do, so ensure we stay scheduled */
1165 			xpt_schedule(periph, /* XXX priority */1);
1166 		}
1167 		break;
1168 	}
1169 	case DA_STATE_PROBE:
1170 	{
1171 		struct ccb_scsiio *csio;
1172 		struct scsi_read_capacity_data *rcap;
1173 
1174 		rcap = (struct scsi_read_capacity_data *)malloc(sizeof(*rcap),
1175 								M_TEMP,
1176 								M_NOWAIT);
1177 		if (rcap == NULL) {
1178 			printf("dastart: Couldn't malloc read_capacity data\n");
1179 			/* da_free_periph??? */
1180 			break;
1181 		}
1182 		csio = &start_ccb->csio;
1183 		scsi_read_capacity(csio,
1184 				   /*retries*/4,
1185 				   dadone,
1186 				   MSG_SIMPLE_Q_TAG,
1187 				   rcap,
1188 				   SSD_FULL_SIZE,
1189 				   /*timeout*/5000);
1190 		start_ccb->ccb_h.ccb_bp = NULL;
1191 		start_ccb->ccb_h.ccb_state = DA_CCB_PROBE;
1192 		xpt_action(start_ccb);
1193 		break;
1194 	}
1195 	}
1196 }
1197 
1198 
1199 static void
1200 dadone(struct cam_periph *periph, union ccb *done_ccb)
1201 {
1202 	struct da_softc *softc;
1203 	struct ccb_scsiio *csio;
1204 
1205 	softc = (struct da_softc *)periph->softc;
1206 	csio = &done_ccb->csio;
1207 	switch (csio->ccb_h.ccb_state & DA_CCB_TYPE_MASK) {
1208 	case DA_CCB_BUFFER_IO:
1209 	{
1210 		struct bio *bp;
1211 		int    oldspl;
1212 
1213 		bp = (struct bio *)done_ccb->ccb_h.ccb_bp;
1214 		if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
1215 			int error;
1216 			int s;
1217 			int sf;
1218 
1219 			if ((csio->ccb_h.ccb_state & DA_CCB_RETRY_UA) != 0)
1220 				sf = SF_RETRY_UA;
1221 			else
1222 				sf = 0;
1223 
1224 			/* Retry selection timeouts */
1225 			sf |= SF_RETRY_SELTO;
1226 
1227 			if ((error = daerror(done_ccb, 0, sf)) == ERESTART) {
1228 				/*
1229 				 * A retry was scheuled, so
1230 				 * just return.
1231 				 */
1232 				return;
1233 			}
1234 			if (error != 0) {
1235 				struct bio *q_bp;
1236 
1237 				s = splbio();
1238 
1239 				if (error == ENXIO) {
1240 					/*
1241 					 * Catastrophic error.  Mark our pack as
1242 					 * invalid.
1243 					 */
1244 					/* XXX See if this is really a media
1245 					 *     change first.
1246 					 */
1247 					xpt_print_path(periph->path);
1248 					printf("Invalidating pack\n");
1249 					softc->flags |= DA_FLAG_PACK_INVALID;
1250 				}
1251 
1252 				/*
1253 				 * return all queued I/O with EIO, so that
1254 				 * the client can retry these I/Os in the
1255 				 * proper order should it attempt to recover.
1256 				 */
1257 				while ((q_bp = bioq_first(&softc->bio_queue))
1258 					!= NULL) {
1259 					bioq_remove(&softc->bio_queue, q_bp);
1260 					q_bp->bio_resid = q_bp->bio_bcount;
1261 					q_bp->bio_error = EIO;
1262 					q_bp->bio_flags |= BIO_ERROR;
1263 					biodone(q_bp);
1264 				}
1265 				splx(s);
1266 				bp->bio_error = error;
1267 				bp->bio_resid = bp->bio_bcount;
1268 				bp->bio_flags |= BIO_ERROR;
1269 			} else {
1270 				bp->bio_resid = csio->resid;
1271 				bp->bio_error = 0;
1272 				if (bp->bio_resid != 0) {
1273 					/* Short transfer ??? */
1274 					bp->bio_flags |= BIO_ERROR;
1275 				}
1276 			}
1277 			if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
1278 				cam_release_devq(done_ccb->ccb_h.path,
1279 						 /*relsim_flags*/0,
1280 						 /*reduction*/0,
1281 						 /*timeout*/0,
1282 						 /*getcount_only*/0);
1283 		} else {
1284 			bp->bio_resid = csio->resid;
1285 			if (csio->resid > 0)
1286 				bp->bio_flags |= BIO_ERROR;
1287 		}
1288 
1289 		/*
1290 		 * Block out any asyncronous callbacks
1291 		 * while we touch the pending ccb list.
1292 		 */
1293 		oldspl = splcam();
1294 		LIST_REMOVE(&done_ccb->ccb_h, periph_links.le);
1295 		splx(oldspl);
1296 
1297 		if (softc->device_stats.busy_count == 0)
1298 			softc->flags |= DA_FLAG_WENT_IDLE;
1299 
1300 		devstat_end_transaction_bio(&softc->device_stats, bp);
1301 		biodone(bp);
1302 		break;
1303 	}
1304 	case DA_CCB_PROBE:
1305 	{
1306 		struct	   scsi_read_capacity_data *rdcap;
1307 		char	   announce_buf[80];
1308 
1309 		rdcap = (struct scsi_read_capacity_data *)csio->data_ptr;
1310 
1311 		if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
1312 			struct disk_params *dp;
1313 
1314 			dasetgeom(periph, rdcap);
1315 			dp = &softc->params;
1316 			snprintf(announce_buf, sizeof(announce_buf),
1317 			        "%luMB (%u %u byte sectors: %dH %dS/T %dC)",
1318 				(unsigned long) (((u_int64_t)dp->secsize *
1319 				dp->sectors) / (1024*1024)), dp->sectors,
1320 				dp->secsize, dp->heads, dp->secs_per_track,
1321 				dp->cylinders);
1322 		} else {
1323 			int	error;
1324 
1325 			announce_buf[0] = '\0';
1326 
1327 			/*
1328 			 * Retry any UNIT ATTENTION type errors.  They
1329 			 * are expected at boot.
1330 			 */
1331 			error = daerror(done_ccb, 0, SF_RETRY_UA |
1332 					SF_RETRY_SELTO | SF_NO_PRINT);
1333 			if (error == ERESTART) {
1334 				/*
1335 				 * A retry was scheuled, so
1336 				 * just return.
1337 				 */
1338 				return;
1339 			} else if (error != 0) {
1340 				struct scsi_sense_data *sense;
1341 				int asc, ascq;
1342 				int sense_key, error_code;
1343 				int have_sense;
1344 				cam_status status;
1345 				struct ccb_getdev cgd;
1346 
1347 				/* Don't wedge this device's queue */
1348 				cam_release_devq(done_ccb->ccb_h.path,
1349 						 /*relsim_flags*/0,
1350 						 /*reduction*/0,
1351 						 /*timeout*/0,
1352 						 /*getcount_only*/0);
1353 
1354 				status = done_ccb->ccb_h.status;
1355 
1356 				xpt_setup_ccb(&cgd.ccb_h,
1357 					      done_ccb->ccb_h.path,
1358 					      /* priority */ 1);
1359 				cgd.ccb_h.func_code = XPT_GDEV_TYPE;
1360 				xpt_action((union ccb *)&cgd);
1361 
1362 				if (((csio->ccb_h.flags & CAM_SENSE_PHYS) != 0)
1363 				 || ((csio->ccb_h.flags & CAM_SENSE_PTR) != 0)
1364 				 || ((status & CAM_AUTOSNS_VALID) == 0))
1365 					have_sense = FALSE;
1366 				else
1367 					have_sense = TRUE;
1368 
1369 				if (have_sense) {
1370 					sense = &csio->sense_data;
1371 					scsi_extract_sense(sense, &error_code,
1372 							   &sense_key,
1373 							   &asc, &ascq);
1374 				}
1375 				/*
1376 				 * Attach to anything that claims to be a
1377 				 * direct access or optical disk device,
1378 				 * as long as it doesn't return a "Logical
1379 				 * unit not supported" (0x25) error.
1380 				 */
1381 				if ((have_sense) && (asc != 0x25)
1382 				 && (error_code == SSD_CURRENT_ERROR))
1383 					snprintf(announce_buf,
1384 					    sizeof(announce_buf),
1385 						"Attempt to query device "
1386 						"size failed: %s, %s",
1387 						scsi_sense_key_text[sense_key],
1388 						scsi_sense_desc(asc,ascq,
1389 								&cgd.inq_data));
1390 				else {
1391 					if (have_sense)
1392 						scsi_sense_print(
1393 							&done_ccb->csio);
1394 					else {
1395 						xpt_print_path(periph->path);
1396 						printf("got CAM status %#x\n",
1397 						       done_ccb->ccb_h.status);
1398 					}
1399 
1400 					xpt_print_path(periph->path);
1401 					printf("fatal error, failed"
1402 					       " to attach to device\n");
1403 
1404 					/*
1405 					 * Free up resources.
1406 					 */
1407 					cam_periph_invalidate(periph);
1408 				}
1409 			}
1410 		}
1411 		free(rdcap, M_TEMP);
1412 		if (announce_buf[0] != '\0')
1413 			xpt_announce_periph(periph, announce_buf);
1414 		softc->state = DA_STATE_NORMAL;
1415 		/*
1416 		 * Since our peripheral may be invalidated by an error
1417 		 * above or an external event, we must release our CCB
1418 		 * before releasing the probe lock on the peripheral.
1419 		 * The peripheral will only go away once the last lock
1420 		 * is removed, and we need it around for the CCB release
1421 		 * operation.
1422 		 */
1423 		xpt_release_ccb(done_ccb);
1424 		cam_periph_unlock(periph);
1425 		return;
1426 	}
1427 	case DA_CCB_WAITING:
1428 	{
1429 		/* Caller will release the CCB */
1430 		wakeup(&done_ccb->ccb_h.cbfcnp);
1431 		return;
1432 	}
1433 	case DA_CCB_DUMP:
1434 		/* No-op.  We're polling */
1435 		return;
1436 	default:
1437 		break;
1438 	}
1439 	xpt_release_ccb(done_ccb);
1440 }
1441 
1442 static int
1443 daerror(union ccb *ccb, u_int32_t cam_flags, u_int32_t sense_flags)
1444 {
1445 	struct da_softc	  *softc;
1446 	struct cam_periph *periph;
1447 
1448 	periph = xpt_path_periph(ccb->ccb_h.path);
1449 	softc = (struct da_softc *)periph->softc;
1450 
1451 	/*
1452 	 * XXX
1453 	 * Until we have a better way of doing pack validation,
1454 	 * don't treat UAs as errors.
1455 	 */
1456 	sense_flags |= SF_RETRY_UA;
1457 	return(cam_periph_error(ccb, cam_flags, sense_flags,
1458 				&softc->saved_ccb));
1459 }
1460 
1461 static void
1462 daprevent(struct cam_periph *periph, int action)
1463 {
1464 	struct	da_softc *softc;
1465 	union	ccb *ccb;
1466 	int	error;
1467 
1468 	softc = (struct da_softc *)periph->softc;
1469 
1470 	if (((action == PR_ALLOW)
1471 	  && (softc->flags & DA_FLAG_PACK_LOCKED) == 0)
1472 	 || ((action == PR_PREVENT)
1473 	  && (softc->flags & DA_FLAG_PACK_LOCKED) != 0)) {
1474 		return;
1475 	}
1476 
1477 	ccb = cam_periph_getccb(periph, /*priority*/1);
1478 
1479 	scsi_prevent(&ccb->csio,
1480 		     /*retries*/1,
1481 		     /*cbcfp*/dadone,
1482 		     MSG_SIMPLE_Q_TAG,
1483 		     action,
1484 		     SSD_FULL_SIZE,
1485 		     5000);
1486 
1487 	error = cam_periph_runccb(ccb, /*error_routine*/NULL, /*cam_flags*/0,
1488 				  /*sense_flags*/0, &softc->device_stats);
1489 
1490 	if (error == 0) {
1491 		if (action == PR_ALLOW)
1492 			softc->flags &= ~DA_FLAG_PACK_LOCKED;
1493 		else
1494 			softc->flags |= DA_FLAG_PACK_LOCKED;
1495 	}
1496 
1497 	xpt_release_ccb(ccb);
1498 }
1499 
1500 static void
1501 dasetgeom(struct cam_periph *periph, struct scsi_read_capacity_data * rdcap)
1502 {
1503 	struct ccb_calc_geometry ccg;
1504 	struct da_softc *softc;
1505 	struct disk_params *dp;
1506 
1507 	softc = (struct da_softc *)periph->softc;
1508 
1509 	dp = &softc->params;
1510 	dp->secsize = scsi_4btoul(rdcap->length);
1511 	dp->sectors = scsi_4btoul(rdcap->addr) + 1;
1512 	/*
1513 	 * Have the controller provide us with a geometry
1514 	 * for this disk.  The only time the geometry
1515 	 * matters is when we boot and the controller
1516 	 * is the only one knowledgeable enough to come
1517 	 * up with something that will make this a bootable
1518 	 * device.
1519 	 */
1520 	xpt_setup_ccb(&ccg.ccb_h, periph->path, /*priority*/1);
1521 	ccg.ccb_h.func_code = XPT_CALC_GEOMETRY;
1522 	ccg.block_size = dp->secsize;
1523 	ccg.volume_size = dp->sectors;
1524 	ccg.heads = 0;
1525 	ccg.secs_per_track = 0;
1526 	ccg.cylinders = 0;
1527 	xpt_action((union ccb*)&ccg);
1528 	dp->heads = ccg.heads;
1529 	dp->secs_per_track = ccg.secs_per_track;
1530 	dp->cylinders = ccg.cylinders;
1531 }
1532 
1533 static void
1534 dasendorderedtag(void *arg)
1535 {
1536 	struct da_softc *softc;
1537 	int s;
1538 
1539 	for (softc = SLIST_FIRST(&softc_list);
1540 	     softc != NULL;
1541 	     softc = SLIST_NEXT(softc, links)) {
1542 		s = splsoftcam();
1543 		if ((softc->ordered_tag_count == 0)
1544 		 && ((softc->flags & DA_FLAG_WENT_IDLE) == 0)) {
1545 			softc->flags |= DA_FLAG_NEED_OTAG;
1546 		}
1547 		if (softc->device_stats.busy_count > 0)
1548 			softc->flags &= ~DA_FLAG_WENT_IDLE;
1549 
1550 		softc->ordered_tag_count = 0;
1551 		splx(s);
1552 	}
1553 	/* Queue us up again */
1554 	timeout(dasendorderedtag, NULL,
1555 		(DA_DEFAULT_TIMEOUT * hz) / DA_ORDEREDTAG_INTERVAL);
1556 }
1557 
1558 /*
1559  * Step through all DA peripheral drivers, and if the device is still open,
1560  * sync the disk cache to physical media.
1561  */
1562 static void
1563 dashutdown(void * arg, int howto)
1564 {
1565 	struct cam_periph *periph;
1566 	struct da_softc *softc;
1567 
1568 	for (periph = TAILQ_FIRST(&dadriver.units); periph != NULL;
1569 	     periph = TAILQ_NEXT(periph, unit_links)) {
1570 		union ccb ccb;
1571 		softc = (struct da_softc *)periph->softc;
1572 
1573 		/*
1574 		 * We only sync the cache if the drive is still open, and
1575 		 * if the drive is capable of it..
1576 		 */
1577 		if (((softc->flags & DA_FLAG_OPEN) == 0)
1578 		 || (softc->quirks & DA_Q_NO_SYNC_CACHE))
1579 			continue;
1580 
1581 		xpt_setup_ccb(&ccb.ccb_h, periph->path, /*priority*/1);
1582 
1583 		ccb.ccb_h.ccb_state = DA_CCB_DUMP;
1584 		scsi_synchronize_cache(&ccb.csio,
1585 				       /*retries*/1,
1586 				       /*cbfcnp*/dadone,
1587 				       MSG_SIMPLE_Q_TAG,
1588 				       /*begin_lba*/0, /* whole disk */
1589 				       /*lb_count*/0,
1590 				       SSD_FULL_SIZE,
1591 				       5 * 60 * 1000);
1592 
1593 		xpt_polled_action(&ccb);
1594 
1595 		if ((ccb.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
1596 			if (((ccb.ccb_h.status & CAM_STATUS_MASK) ==
1597 			     CAM_SCSI_STATUS_ERROR)
1598 			 && (ccb.csio.scsi_status == SCSI_STATUS_CHECK_COND)){
1599 				int error_code, sense_key, asc, ascq;
1600 
1601 				scsi_extract_sense(&ccb.csio.sense_data,
1602 						   &error_code, &sense_key,
1603 						   &asc, &ascq);
1604 
1605 				if (sense_key != SSD_KEY_ILLEGAL_REQUEST)
1606 					scsi_sense_print(&ccb.csio);
1607 			} else {
1608 				xpt_print_path(periph->path);
1609 				printf("Synchronize cache failed, status "
1610 				       "== 0x%x, scsi status == 0x%x\n",
1611 				       ccb.ccb_h.status, ccb.csio.scsi_status);
1612 			}
1613 		}
1614 
1615 		if ((ccb.ccb_h.status & CAM_DEV_QFRZN) != 0)
1616 			cam_release_devq(ccb.ccb_h.path,
1617 					 /*relsim_flags*/0,
1618 					 /*reduction*/0,
1619 					 /*timeout*/0,
1620 					 /*getcount_only*/0);
1621 
1622 	}
1623 }
1624 
1625 #else /* !_KERNEL */
1626 
1627 /*
1628  * XXX This is only left out of the kernel build to silence warnings.  If,
1629  * for some reason this function is used in the kernel, the ifdefs should
1630  * be moved so it is included both in the kernel and userland.
1631  */
1632 void
1633 scsi_format_unit(struct ccb_scsiio *csio, u_int32_t retries,
1634 		 void (*cbfcnp)(struct cam_periph *, union ccb *),
1635 		 u_int8_t tag_action, u_int8_t byte2, u_int16_t ileave,
1636 		 u_int8_t *data_ptr, u_int32_t dxfer_len, u_int8_t sense_len,
1637 		 u_int32_t timeout)
1638 {
1639 	struct scsi_format_unit *scsi_cmd;
1640 
1641 	scsi_cmd = (struct scsi_format_unit *)&csio->cdb_io.cdb_bytes;
1642 	scsi_cmd->opcode = FORMAT_UNIT;
1643 	scsi_cmd->byte2 = byte2;
1644 	scsi_ulto2b(ileave, scsi_cmd->interleave);
1645 
1646 	cam_fill_csio(csio,
1647 		      retries,
1648 		      cbfcnp,
1649 		      /*flags*/ (dxfer_len > 0) ? CAM_DIR_OUT : CAM_DIR_NONE,
1650 		      tag_action,
1651 		      data_ptr,
1652 		      dxfer_len,
1653 		      sense_len,
1654 		      sizeof(*scsi_cmd),
1655 		      timeout);
1656 }
1657 
1658 #endif /* _KERNEL */
1659