xref: /freebsd/sys/cam/scsi/scsi_da.c (revision 77a0943ded95b9e6438f7db70c4a28e4d93946d4)
1 /*
2  * Implementation of SCSI Direct Access Peripheral driver for CAM.
3  *
4  * Copyright (c) 1997 Justin T. Gibbs.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions, and the following disclaimer,
12  *    without modification, immediately at the beginning of the file.
13  * 2. The name of the author may not be used to endorse or promote products
14  *    derived from this software without specific prior written permission.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
20  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  *
28  * $FreeBSD$
29  */
30 
31 #ifdef _KERNEL
32 #include "opt_hw_wdog.h"
33 #endif /* _KERNEL */
34 
35 #include <sys/param.h>
36 
37 #ifdef _KERNEL
38 #include <sys/systm.h>
39 #include <sys/kernel.h>
40 #include <sys/bio.h>
41 #endif /* _KERNEL */
42 
43 #include <sys/devicestat.h>
44 #include <sys/conf.h>
45 #include <sys/disk.h>
46 #include <sys/eventhandler.h>
47 #include <sys/malloc.h>
48 #include <sys/cons.h>
49 
50 #include <machine/md_var.h>
51 
52 #include <vm/vm.h>
53 #include <vm/pmap.h>
54 
55 #ifndef _KERNEL
56 #include <stdio.h>
57 #include <string.h>
58 #endif /* _KERNEL */
59 
60 #include <cam/cam.h>
61 #include <cam/cam_ccb.h>
62 #include <cam/cam_extend.h>
63 #include <cam/cam_periph.h>
64 #include <cam/cam_xpt_periph.h>
65 
66 #include <cam/scsi/scsi_message.h>
67 
68 #ifndef _KERNEL
69 #include <cam/scsi/scsi_da.h>
70 #endif /* !_KERNEL */
71 
72 #ifdef _KERNEL
73 typedef enum {
74 	DA_STATE_PROBE,
75 	DA_STATE_NORMAL
76 } da_state;
77 
78 typedef enum {
79 	DA_FLAG_PACK_INVALID	= 0x001,
80 	DA_FLAG_NEW_PACK	= 0x002,
81 	DA_FLAG_PACK_LOCKED	= 0x004,
82 	DA_FLAG_PACK_REMOVABLE	= 0x008,
83 	DA_FLAG_TAGGED_QUEUING	= 0x010,
84 	DA_FLAG_NEED_OTAG	= 0x020,
85 	DA_FLAG_WENT_IDLE	= 0x040,
86 	DA_FLAG_RETRY_UA	= 0x080,
87 	DA_FLAG_OPEN		= 0x100
88 } da_flags;
89 
90 typedef enum {
91 	DA_Q_NONE		= 0x00,
92 	DA_Q_NO_SYNC_CACHE	= 0x01,
93 	DA_Q_NO_6_BYTE		= 0x02
94 } da_quirks;
95 
96 typedef enum {
97 	DA_CCB_PROBE		= 0x01,
98 	DA_CCB_BUFFER_IO	= 0x02,
99 	DA_CCB_WAITING		= 0x03,
100 	DA_CCB_DUMP		= 0x04,
101 	DA_CCB_TYPE_MASK	= 0x0F,
102 	DA_CCB_RETRY_UA		= 0x10
103 } da_ccb_state;
104 
105 /* Offsets into our private area for storing information */
106 #define ccb_state	ppriv_field0
107 #define ccb_bp		ppriv_ptr1
108 
109 struct disk_params {
110 	u_int8_t  heads;
111 	u_int16_t cylinders;
112 	u_int8_t  secs_per_track;
113 	u_int32_t secsize;	/* Number of bytes/sector */
114 	u_int32_t sectors;	/* total number sectors */
115 };
116 
117 struct da_softc {
118 	struct	 bio_queue_head bio_queue;
119 	struct	 devstat device_stats;
120 	SLIST_ENTRY(da_softc) links;
121 	LIST_HEAD(, ccb_hdr) pending_ccbs;
122 	da_state state;
123 	da_flags flags;
124 	da_quirks quirks;
125 	int	 minimum_cmd_size;
126 	int	 ordered_tag_count;
127 	struct	 disk_params params;
128 	struct	 disk disk;
129 	union	 ccb saved_ccb;
130 };
131 
132 struct da_quirk_entry {
133 	struct scsi_inquiry_pattern inq_pat;
134 	da_quirks quirks;
135 };
136 
137 static const char quantum[] = "QUANTUM";
138 static const char microp[] = "MICROP";
139 
140 static struct da_quirk_entry da_quirk_table[] =
141 {
142 	{
143 		/*
144 		 * This particular Fujitsu drive doesn't like the
145 		 * synchronize cache command.
146 		 * Reported by: Tom Jackson <toj@gorilla.net>
147 		 */
148 		{T_DIRECT, SIP_MEDIA_FIXED, "FUJITSU", "M2954*", "*"},
149 		/*quirks*/ DA_Q_NO_SYNC_CACHE
150 
151 	},
152 	{
153 		/*
154 		 * This drive doesn't like the synchronize cache command
155 		 * either.  Reported by: Matthew Jacob <mjacob@feral.com>
156 		 * in NetBSD PR kern/6027, August 24, 1998.
157 		 */
158 		{T_DIRECT, SIP_MEDIA_FIXED, microp, "2217*", "*"},
159 		/*quirks*/ DA_Q_NO_SYNC_CACHE
160 	},
161 	{
162 		/*
163 		 * This drive doesn't like the synchronize cache command
164 		 * either.  Reported by: Hellmuth Michaelis (hm@kts.org)
165 		 * (PR 8882).
166 		 */
167 		{T_DIRECT, SIP_MEDIA_FIXED, microp, "2112*", "*"},
168 		/*quirks*/ DA_Q_NO_SYNC_CACHE
169 	},
170 	{
171 		/*
172 		 * Doesn't like the synchronize cache command.
173 		 * Reported by: Blaz Zupan <blaz@gold.amis.net>
174 		 */
175 		{T_DIRECT, SIP_MEDIA_FIXED, "NEC", "D3847*", "*"},
176 		/*quirks*/ DA_Q_NO_SYNC_CACHE
177 	},
178 	{
179 		/*
180 		 * Doesn't like the synchronize cache command.
181 		 */
182 		{T_DIRECT, SIP_MEDIA_FIXED, quantum, "MAVERICK 540S", "*"},
183 		/*quirks*/ DA_Q_NO_SYNC_CACHE
184 	},
185 	{
186 		/*
187 		 * Doesn't like the synchronize cache command.
188 		 */
189 		{T_DIRECT, SIP_MEDIA_FIXED, quantum, "LPS525S", "*"},
190 		/*quirks*/ DA_Q_NO_SYNC_CACHE
191 	},
192 	{
193 		/*
194 		 * Doesn't work correctly with 6 byte reads/writes.
195 		 * Returns illegal request, and points to byte 9 of the
196 		 * 6-byte CDB.
197 		 * Reported by:  Adam McDougall <bsdx@spawnet.com>
198 		 */
199 		{T_DIRECT, SIP_MEDIA_FIXED, quantum, "VIKING 4*", "*"},
200 		/*quirks*/ DA_Q_NO_6_BYTE
201 	},
202 	{
203 		/*
204 		 * See above.
205 		 */
206 		{T_DIRECT, SIP_MEDIA_FIXED, quantum, "VIKING 2*", "*"},
207 		/*quirks*/ DA_Q_NO_6_BYTE
208 	},
209 
210 	/* Below a list of quirks for USB devices supported by umass. */
211 	{
212 		/*
213 		 * This USB floppy drive uses the UFI command set. This
214 		 * command set is a derivative of the ATAPI command set and
215 		 * does not support READ_6 commands only READ_10. It also does
216 		 * not support sync cache (0x35).
217 		 */
218 		{T_DIRECT, SIP_MEDIA_REMOVABLE, "Y-E DATA", "USB-FDU", "*"},
219 		/*quirks*/ DA_Q_NO_6_BYTE|DA_Q_NO_SYNC_CACHE
220 	},
221 	{
222 		/* Another USB floppy */
223 		{T_DIRECT, SIP_MEDIA_REMOVABLE, "MATSHITA", "FDD CF-VFDU*","*"},
224 		/*quirks*/ DA_Q_NO_6_BYTE|DA_Q_NO_SYNC_CACHE
225 	},
226 	{
227 		/*
228 		 * Sony Memory Stick adapter MSAC-US1,
229 		 * does not support READ_6 commands only READ_10. It also does
230 		 * not support sync cache (0x35).
231 		 */
232 		{T_DIRECT, SIP_MEDIA_REMOVABLE, "Sony", "MSAC-US1", "*"},
233 		/*quirks*/ DA_Q_NO_6_BYTE|DA_Q_NO_SYNC_CACHE
234 	},
235 	{
236 		/*
237 		 * Sony DSC cameras (DSC-S30, DSC-S50, DSC-S70)
238 		 * do not support READ_6 commands, only READ_10.
239 		 */
240 		{T_DIRECT, SIP_MEDIA_REMOVABLE, "Sony", "Sony DSC", "*"},
241 		/*quirks*/ DA_Q_NO_6_BYTE|DA_Q_NO_SYNC_CACHE
242 	}
243 };
244 
245 static	d_open_t	daopen;
246 static	d_close_t	daclose;
247 static	d_strategy_t	dastrategy;
248 static	d_ioctl_t	daioctl;
249 static	d_dump_t	dadump;
250 static	periph_init_t	dainit;
251 static	void		daasync(void *callback_arg, u_int32_t code,
252 				struct cam_path *path, void *arg);
253 static	periph_ctor_t	daregister;
254 static	periph_dtor_t	dacleanup;
255 static	periph_start_t	dastart;
256 static	periph_oninv_t	daoninvalidate;
257 static	void		dadone(struct cam_periph *periph,
258 			       union ccb *done_ccb);
259 static  int		daerror(union ccb *ccb, u_int32_t cam_flags,
260 				u_int32_t sense_flags);
261 static void		daprevent(struct cam_periph *periph, int action);
262 static void		dasetgeom(struct cam_periph *periph,
263 				  struct scsi_read_capacity_data * rdcap);
264 static timeout_t	dasendorderedtag;
265 static void		dashutdown(void *arg, int howto);
266 
267 #ifndef DA_DEFAULT_TIMEOUT
268 #define DA_DEFAULT_TIMEOUT 60	/* Timeout in seconds */
269 #endif
270 
271 /*
272  * DA_ORDEREDTAG_INTERVAL determines how often, relative
273  * to the default timeout, we check to see whether an ordered
274  * tagged transaction is appropriate to prevent simple tag
275  * starvation.  Since we'd like to ensure that there is at least
276  * 1/2 of the timeout length left for a starved transaction to
277  * complete after we've sent an ordered tag, we must poll at least
278  * four times in every timeout period.  This takes care of the worst
279  * case where a starved transaction starts during an interval that
280  * meets the requirement "don't send an ordered tag" test so it takes
281  * us two intervals to determine that a tag must be sent.
282  */
283 #ifndef DA_ORDEREDTAG_INTERVAL
284 #define DA_ORDEREDTAG_INTERVAL 4
285 #endif
286 
287 static struct periph_driver dadriver =
288 {
289 	dainit, "da",
290 	TAILQ_HEAD_INITIALIZER(dadriver.units), /* generation */ 0
291 };
292 
293 DATA_SET(periphdriver_set, dadriver);
294 
295 #define DA_CDEV_MAJOR 13
296 #define DA_BDEV_MAJOR 4
297 
298 /* For 2.2-stable support */
299 #ifndef D_DISK
300 #define D_DISK 0
301 #endif
302 
303 static struct cdevsw da_cdevsw = {
304 	/* open */	daopen,
305 	/* close */	daclose,
306 	/* read */	physread,
307 	/* write */	physwrite,
308 	/* ioctl */	daioctl,
309 	/* poll */	nopoll,
310 	/* mmap */	nommap,
311 	/* strategy */	dastrategy,
312 	/* name */	"da",
313 	/* maj */	DA_CDEV_MAJOR,
314 	/* dump */	dadump,
315 	/* psize */	nopsize,
316 	/* flags */	D_DISK,
317 	/* bmaj */	DA_BDEV_MAJOR
318 };
319 
320 static struct cdevsw dadisk_cdevsw;
321 
322 static SLIST_HEAD(,da_softc) softc_list;
323 static struct extend_array *daperiphs;
324 
325 static int
326 daopen(dev_t dev, int flags, int fmt, struct proc *p)
327 {
328 	struct cam_periph *periph;
329 	struct da_softc *softc;
330 	struct disklabel *label;
331 	int unit;
332 	int part;
333 	int error;
334 	int s;
335 
336 	unit = dkunit(dev);
337 	part = dkpart(dev);
338 	periph = cam_extend_get(daperiphs, unit);
339 	if (periph == NULL)
340 		return (ENXIO);
341 
342 	softc = (struct da_softc *)periph->softc;
343 
344 	CAM_DEBUG(periph->path, CAM_DEBUG_TRACE,
345 	    ("daopen: dev=%s (unit %d , partition %d)\n", devtoname(dev),
346 	     unit, part));
347 
348 	if ((error = cam_periph_lock(periph, PRIBIO|PCATCH)) != 0) {
349 		return (error); /* error code from tsleep */
350 	}
351 
352 	if (cam_periph_acquire(periph) != CAM_REQ_CMP)
353 		return(ENXIO);
354 	softc->flags |= DA_FLAG_OPEN;
355 
356 	s = splsoftcam();
357 	if ((softc->flags & DA_FLAG_PACK_INVALID) != 0) {
358 		/* Invalidate our pack information. */
359 		disk_invalidate(&softc->disk);
360 		softc->flags &= ~DA_FLAG_PACK_INVALID;
361 	}
362 	splx(s);
363 
364 	/* Do a read capacity */
365 	{
366 		struct scsi_read_capacity_data *rcap;
367 		union  ccb *ccb;
368 
369 		rcap = (struct scsi_read_capacity_data *)malloc(sizeof(*rcap),
370 								M_TEMP,
371 								M_WAITOK);
372 
373 		ccb = cam_periph_getccb(periph, /*priority*/1);
374 		scsi_read_capacity(&ccb->csio,
375 				   /*retries*/1,
376 				   /*cbfncp*/dadone,
377 				   MSG_SIMPLE_Q_TAG,
378 				   rcap,
379 				   SSD_FULL_SIZE,
380 				   /*timeout*/60000);
381 		ccb->ccb_h.ccb_bp = NULL;
382 
383 		error = cam_periph_runccb(ccb, daerror, /*cam_flags*/0,
384 					  /*sense_flags*/SF_RETRY_UA |
385 							 SF_RETRY_SELTO,
386 					  &softc->device_stats);
387 
388 		xpt_release_ccb(ccb);
389 
390 		if (error == 0) {
391 			dasetgeom(periph, rcap);
392 		}
393 
394 		free(rcap, M_TEMP);
395 	}
396 
397 	if (error == 0) {
398 		struct ccb_getdev cgd;
399 
400 		/* Build label for whole disk. */
401 		label = &softc->disk.d_label;
402 		bzero(label, sizeof(*label));
403 		label->d_type = DTYPE_SCSI;
404 
405 		/*
406 		 * Grab the inquiry data to get the vendor and product names.
407 		 * Put them in the typename and packname for the label.
408 		 */
409 		xpt_setup_ccb(&cgd.ccb_h, periph->path, /*priority*/ 1);
410 		cgd.ccb_h.func_code = XPT_GDEV_TYPE;
411 		xpt_action((union ccb *)&cgd);
412 
413 		strncpy(label->d_typename, cgd.inq_data.vendor,
414 			min(SID_VENDOR_SIZE, sizeof(label->d_typename)));
415 		strncpy(label->d_packname, cgd.inq_data.product,
416 			min(SID_PRODUCT_SIZE, sizeof(label->d_packname)));
417 
418 		label->d_secsize = softc->params.secsize;
419 		label->d_nsectors = softc->params.secs_per_track;
420 		label->d_ntracks = softc->params.heads;
421 		label->d_ncylinders = softc->params.cylinders;
422 		label->d_secpercyl = softc->params.heads
423 				  * softc->params.secs_per_track;
424 		label->d_secperunit = softc->params.sectors;
425 
426 		if (((softc->flags & DA_FLAG_PACK_REMOVABLE) != 0)) {
427 			daprevent(periph, PR_PREVENT);
428 		}
429 
430 		/*
431 		 * Check to see whether or not the blocksize is set yet.
432 		 * If it isn't, set it and then clear the blocksize
433 		 * unavailable flag for the device statistics.
434 		 */
435 		if ((softc->device_stats.flags & DEVSTAT_BS_UNAVAILABLE) != 0){
436 			softc->device_stats.block_size = softc->params.secsize;
437 			softc->device_stats.flags &= ~DEVSTAT_BS_UNAVAILABLE;
438 		}
439 	}
440 
441 	if (error != 0) {
442 		if ((softc->flags & DA_FLAG_PACK_REMOVABLE) != 0) {
443 			daprevent(periph, PR_ALLOW);
444 		}
445 	}
446 	cam_periph_unlock(periph);
447 	return (error);
448 }
449 
450 static int
451 daclose(dev_t dev, int flag, int fmt, struct proc *p)
452 {
453 	struct	cam_periph *periph;
454 	struct	da_softc *softc;
455 	int	unit;
456 	int	error;
457 
458 	unit = dkunit(dev);
459 	periph = cam_extend_get(daperiphs, unit);
460 	if (periph == NULL)
461 		return (ENXIO);
462 
463 	softc = (struct da_softc *)periph->softc;
464 
465 	if ((error = cam_periph_lock(periph, PRIBIO)) != 0) {
466 		return (error); /* error code from tsleep */
467 	}
468 
469 	if ((softc->quirks & DA_Q_NO_SYNC_CACHE) == 0) {
470 		union	ccb *ccb;
471 
472 		ccb = cam_periph_getccb(periph, /*priority*/1);
473 
474 		scsi_synchronize_cache(&ccb->csio,
475 				       /*retries*/1,
476 				       /*cbfcnp*/dadone,
477 				       MSG_SIMPLE_Q_TAG,
478 				       /*begin_lba*/0,/* Cover the whole disk */
479 				       /*lb_count*/0,
480 				       SSD_FULL_SIZE,
481 				       5 * 60 * 1000);
482 
483 		cam_periph_runccb(ccb, /*error_routine*/NULL, /*cam_flags*/0,
484 				  /*sense_flags*/SF_RETRY_UA,
485 				  &softc->device_stats);
486 
487 		if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
488 			if ((ccb->ccb_h.status & CAM_STATUS_MASK) ==
489 			     CAM_SCSI_STATUS_ERROR) {
490 				int asc, ascq;
491 				int sense_key, error_code;
492 
493 				scsi_extract_sense(&ccb->csio.sense_data,
494 						   &error_code,
495 						   &sense_key,
496 						   &asc, &ascq);
497 				if (sense_key != SSD_KEY_ILLEGAL_REQUEST)
498 					scsi_sense_print(&ccb->csio);
499 			} else {
500 				xpt_print_path(periph->path);
501 				printf("Synchronize cache failed, status "
502 				       "== 0x%x, scsi status == 0x%x\n",
503 				       ccb->csio.ccb_h.status,
504 				       ccb->csio.scsi_status);
505 			}
506 		}
507 
508 		if ((ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
509 			cam_release_devq(ccb->ccb_h.path,
510 					 /*relsim_flags*/0,
511 					 /*reduction*/0,
512 					 /*timeout*/0,
513 					 /*getcount_only*/0);
514 
515 		xpt_release_ccb(ccb);
516 
517 	}
518 
519 	if ((softc->flags & DA_FLAG_PACK_REMOVABLE) != 0) {
520 		daprevent(periph, PR_ALLOW);
521 		/*
522 		 * If we've got removeable media, mark the blocksize as
523 		 * unavailable, since it could change when new media is
524 		 * inserted.
525 		 */
526 		softc->device_stats.flags |= DEVSTAT_BS_UNAVAILABLE;
527 	}
528 
529 	softc->flags &= ~DA_FLAG_OPEN;
530 	cam_periph_unlock(periph);
531 	cam_periph_release(periph);
532 	return (0);
533 }
534 
535 /*
536  * Actually translate the requested transfer into one the physical driver
537  * can understand.  The transfer is described by a buf and will include
538  * only one physical transfer.
539  */
540 static void
541 dastrategy(struct bio *bp)
542 {
543 	struct cam_periph *periph;
544 	struct da_softc *softc;
545 	u_int  unit;
546 	u_int  part;
547 	int    s;
548 
549 	unit = dkunit(bp->bio_dev);
550 	part = dkpart(bp->bio_dev);
551 	periph = cam_extend_get(daperiphs, unit);
552 	if (periph == NULL) {
553 		bp->bio_error = ENXIO;
554 		goto bad;
555 	}
556 	softc = (struct da_softc *)periph->softc;
557 #if 0
558 	/*
559 	 * check it's not too big a transfer for our adapter
560 	 */
561 	scsi_minphys(bp,&sd_switch);
562 #endif
563 
564 	/*
565 	 * Mask interrupts so that the pack cannot be invalidated until
566 	 * after we are in the queue.  Otherwise, we might not properly
567 	 * clean up one of the buffers.
568 	 */
569 	s = splbio();
570 
571 	/*
572 	 * If the device has been made invalid, error out
573 	 */
574 	if ((softc->flags & DA_FLAG_PACK_INVALID)) {
575 		splx(s);
576 		bp->bio_error = ENXIO;
577 		goto bad;
578 	}
579 
580 	/*
581 	 * Place it in the queue of disk activities for this disk
582 	 */
583 	bioqdisksort(&softc->bio_queue, bp);
584 
585 	splx(s);
586 
587 	/*
588 	 * Schedule ourselves for performing the work.
589 	 */
590 	xpt_schedule(periph, /* XXX priority */1);
591 
592 	return;
593 bad:
594 	bp->bio_flags |= BIO_ERROR;
595 
596 	/*
597 	 * Correctly set the buf to indicate a completed xfer
598 	 */
599 	bp->bio_resid = bp->bio_bcount;
600 	biodone(bp);
601 	return;
602 }
603 
604 /* For 2.2-stable support */
605 #ifndef ENOIOCTL
606 #define ENOIOCTL -1
607 #endif
608 
609 static int
610 daioctl(dev_t dev, u_long cmd, caddr_t addr, int flag, struct proc *p)
611 {
612 	struct cam_periph *periph;
613 	struct da_softc *softc;
614 	int unit;
615 	int error;
616 
617 	unit = dkunit(dev);
618 	periph = cam_extend_get(daperiphs, unit);
619 	if (periph == NULL)
620 		return (ENXIO);
621 
622 	softc = (struct da_softc *)periph->softc;
623 
624 	CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("daioctl\n"));
625 
626 	if ((error = cam_periph_lock(periph, PRIBIO|PCATCH)) != 0) {
627 		return (error); /* error code from tsleep */
628 	}
629 
630 	error = cam_periph_ioctl(periph, cmd, addr, daerror);
631 
632 	cam_periph_unlock(periph);
633 
634 	return (error);
635 }
636 
637 static int
638 dadump(dev_t dev)
639 {
640 	struct	    cam_periph *periph;
641 	struct	    da_softc *softc;
642 	u_int	    unit;
643 	u_int	    part;
644 	u_int	    secsize;
645 	u_int	    num;	/* number of sectors to write */
646 	u_int	    blknum;
647 	long	    blkcnt;
648 	vm_offset_t addr;
649 	struct	    ccb_scsiio csio;
650 	int         dumppages = MAXDUMPPGS;
651 	int	    error;
652 	int         i;
653 
654 	/* toss any characters present prior to dump */
655 	while (cncheckc() != -1)
656 		;
657 
658 	unit = dkunit(dev);
659 	part = dkpart(dev);
660 	periph = cam_extend_get(daperiphs, unit);
661 	if (periph == NULL) {
662 		return (ENXIO);
663 	}
664 	softc = (struct da_softc *)periph->softc;
665 
666 	if ((softc->flags & DA_FLAG_PACK_INVALID) != 0)
667 		return (ENXIO);
668 
669 	error = disk_dumpcheck(dev, &num, &blknum, &secsize);
670 	if (error)
671 		return (error);
672 
673 	addr = 0;	/* starting address */
674 	blkcnt = howmany(PAGE_SIZE, secsize);
675 
676 	while (num > 0) {
677 		caddr_t va = NULL;
678 
679 		if ((num / blkcnt) < dumppages)
680 			dumppages = num / blkcnt;
681 
682 		for (i = 0; i < dumppages; ++i) {
683 			vm_offset_t a = addr + (i * PAGE_SIZE);
684 			if (is_physical_memory(a))
685 				va = pmap_kenter_temporary(trunc_page(a), i);
686 			else
687 				va = pmap_kenter_temporary(trunc_page(0), i);
688 		}
689 
690 		xpt_setup_ccb(&csio.ccb_h, periph->path, /*priority*/1);
691 		csio.ccb_h.ccb_state = DA_CCB_DUMP;
692 		scsi_read_write(&csio,
693 				/*retries*/1,
694 				dadone,
695 				MSG_ORDERED_Q_TAG,
696 				/*read*/FALSE,
697 				/*byte2*/0,
698 				/*minimum_cmd_size*/ softc->minimum_cmd_size,
699 				blknum,
700 				blkcnt * dumppages,
701 				/*data_ptr*/(u_int8_t *) va,
702 				/*dxfer_len*/blkcnt * secsize * dumppages,
703 				/*sense_len*/SSD_FULL_SIZE,
704 				DA_DEFAULT_TIMEOUT * 1000);
705 		xpt_polled_action((union ccb *)&csio);
706 
707 		if ((csio.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
708 			printf("Aborting dump due to I/O error.\n");
709 			if ((csio.ccb_h.status & CAM_STATUS_MASK) ==
710 			     CAM_SCSI_STATUS_ERROR)
711 				scsi_sense_print(&csio);
712 			else
713 				printf("status == 0x%x, scsi status == 0x%x\n",
714 				       csio.ccb_h.status, csio.scsi_status);
715 			return(EIO);
716 		}
717 
718 		if (addr % (1024 * 1024) == 0) {
719 #ifdef	HW_WDOG
720 			if (wdog_tickler)
721 				(*wdog_tickler)();
722 #endif /* HW_WDOG */
723 			/* Count in MB of data left to write */
724 			printf("%d ", (num  * softc->params.secsize)
725 				     / (1024 * 1024));
726 		}
727 
728 		/* update block count */
729 		num -= blkcnt * dumppages;
730 		blknum += blkcnt * dumppages;
731 		addr += PAGE_SIZE * dumppages;
732 
733 		/* operator aborting dump? */
734 		if (cncheckc() != -1)
735 			return (EINTR);
736 	}
737 
738 	/*
739 	 * Sync the disk cache contents to the physical media.
740 	 */
741 	if ((softc->quirks & DA_Q_NO_SYNC_CACHE) == 0) {
742 
743 		xpt_setup_ccb(&csio.ccb_h, periph->path, /*priority*/1);
744 		csio.ccb_h.ccb_state = DA_CCB_DUMP;
745 		scsi_synchronize_cache(&csio,
746 				       /*retries*/1,
747 				       /*cbfcnp*/dadone,
748 				       MSG_SIMPLE_Q_TAG,
749 				       /*begin_lba*/0,/* Cover the whole disk */
750 				       /*lb_count*/0,
751 				       SSD_FULL_SIZE,
752 				       5 * 60 * 1000);
753 		xpt_polled_action((union ccb *)&csio);
754 
755 		if ((csio.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
756 			if ((csio.ccb_h.status & CAM_STATUS_MASK) ==
757 			     CAM_SCSI_STATUS_ERROR) {
758 				int asc, ascq;
759 				int sense_key, error_code;
760 
761 				scsi_extract_sense(&csio.sense_data,
762 						   &error_code,
763 						   &sense_key,
764 						   &asc, &ascq);
765 				if (sense_key != SSD_KEY_ILLEGAL_REQUEST)
766 					scsi_sense_print(&csio);
767 			} else {
768 				xpt_print_path(periph->path);
769 				printf("Synchronize cache failed, status "
770 				       "== 0x%x, scsi status == 0x%x\n",
771 				       csio.ccb_h.status, csio.scsi_status);
772 			}
773 		}
774 	}
775 	return (0);
776 }
777 
778 static void
779 dainit(void)
780 {
781 	cam_status status;
782 	struct cam_path *path;
783 
784 	/*
785 	 * Create our extend array for storing the devices we attach to.
786 	 */
787 	daperiphs = cam_extend_new();
788 	SLIST_INIT(&softc_list);
789 	if (daperiphs == NULL) {
790 		printf("da: Failed to alloc extend array!\n");
791 		return;
792 	}
793 
794 	/*
795 	 * Install a global async callback.  This callback will
796 	 * receive async callbacks like "new device found".
797 	 */
798 	status = xpt_create_path(&path, /*periph*/NULL, CAM_XPT_PATH_ID,
799 				 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
800 
801 	if (status == CAM_REQ_CMP) {
802 		struct ccb_setasync csa;
803 
804                 xpt_setup_ccb(&csa.ccb_h, path, /*priority*/5);
805                 csa.ccb_h.func_code = XPT_SASYNC_CB;
806                 csa.event_enable = AC_FOUND_DEVICE;
807                 csa.callback = daasync;
808                 csa.callback_arg = NULL;
809                 xpt_action((union ccb *)&csa);
810 		status = csa.ccb_h.status;
811                 xpt_free_path(path);
812         }
813 
814 	if (status != CAM_REQ_CMP) {
815 		printf("da: Failed to attach master async callback "
816 		       "due to status 0x%x!\n", status);
817 	} else {
818 
819 		/*
820 		 * Schedule a periodic event to occasioanly send an
821 		 * ordered tag to a device.
822 		 */
823 		timeout(dasendorderedtag, NULL,
824 			(DA_DEFAULT_TIMEOUT * hz) / DA_ORDEREDTAG_INTERVAL);
825 
826 		/* Register our shutdown event handler */
827 		if ((EVENTHANDLER_REGISTER(shutdown_post_sync, dashutdown,
828 					   NULL, SHUTDOWN_PRI_DEFAULT)) == NULL)
829 		    printf("dainit: shutdown event registration failed!\n");
830 	}
831 }
832 
833 static void
834 daoninvalidate(struct cam_periph *periph)
835 {
836 	int s;
837 	struct da_softc *softc;
838 	struct bio *q_bp;
839 	struct ccb_setasync csa;
840 
841 	softc = (struct da_softc *)periph->softc;
842 
843 	/*
844 	 * De-register any async callbacks.
845 	 */
846 	xpt_setup_ccb(&csa.ccb_h, periph->path,
847 		      /* priority */ 5);
848 	csa.ccb_h.func_code = XPT_SASYNC_CB;
849 	csa.event_enable = 0;
850 	csa.callback = daasync;
851 	csa.callback_arg = periph;
852 	xpt_action((union ccb *)&csa);
853 
854 	softc->flags |= DA_FLAG_PACK_INVALID;
855 
856 	/*
857 	 * Although the oninvalidate() routines are always called at
858 	 * splsoftcam, we need to be at splbio() here to keep the buffer
859 	 * queue from being modified while we traverse it.
860 	 */
861 	s = splbio();
862 
863 	/*
864 	 * Return all queued I/O with ENXIO.
865 	 * XXX Handle any transactions queued to the card
866 	 *     with XPT_ABORT_CCB.
867 	 */
868 	while ((q_bp = bioq_first(&softc->bio_queue)) != NULL){
869 		bioq_remove(&softc->bio_queue, q_bp);
870 		q_bp->bio_resid = q_bp->bio_bcount;
871 		q_bp->bio_error = ENXIO;
872 		q_bp->bio_flags |= BIO_ERROR;
873 		biodone(q_bp);
874 	}
875 	splx(s);
876 
877 	SLIST_REMOVE(&softc_list, softc, da_softc, links);
878 
879 	xpt_print_path(periph->path);
880 	printf("lost device\n");
881 }
882 
883 static void
884 dacleanup(struct cam_periph *periph)
885 {
886 	struct da_softc *softc;
887 
888 	softc = (struct da_softc *)periph->softc;
889 
890 	devstat_remove_entry(&softc->device_stats);
891 	cam_extend_release(daperiphs, periph->unit_number);
892 	xpt_print_path(periph->path);
893 	printf("removing device entry\n");
894 	free(softc, M_DEVBUF);
895 }
896 
897 static void
898 daasync(void *callback_arg, u_int32_t code,
899 	struct cam_path *path, void *arg)
900 {
901 	struct cam_periph *periph;
902 
903 	periph = (struct cam_periph *)callback_arg;
904 	switch (code) {
905 	case AC_FOUND_DEVICE:
906 	{
907 		struct ccb_getdev *cgd;
908 		cam_status status;
909 
910 		cgd = (struct ccb_getdev *)arg;
911 
912 		if (SID_TYPE(&cgd->inq_data) != T_DIRECT
913 		    && SID_TYPE(&cgd->inq_data) != T_OPTICAL)
914 			break;
915 
916 		/*
917 		 * Allocate a peripheral instance for
918 		 * this device and start the probe
919 		 * process.
920 		 */
921 		status = cam_periph_alloc(daregister, daoninvalidate,
922 					  dacleanup, dastart,
923 					  "da", CAM_PERIPH_BIO,
924 					  cgd->ccb_h.path, daasync,
925 					  AC_FOUND_DEVICE, cgd);
926 
927 		if (status != CAM_REQ_CMP
928 		 && status != CAM_REQ_INPROG)
929 			printf("daasync: Unable to attach to new device "
930 				"due to status 0x%x\n", status);
931 		break;
932 	}
933 	case AC_SENT_BDR:
934 	case AC_BUS_RESET:
935 	{
936 		struct da_softc *softc;
937 		struct ccb_hdr *ccbh;
938 		int s;
939 
940 		softc = (struct da_softc *)periph->softc;
941 		s = splsoftcam();
942 		/*
943 		 * Don't fail on the expected unit attention
944 		 * that will occur.
945 		 */
946 		softc->flags |= DA_FLAG_RETRY_UA;
947 		for (ccbh = LIST_FIRST(&softc->pending_ccbs);
948 		     ccbh != NULL; ccbh = LIST_NEXT(ccbh, periph_links.le))
949 			ccbh->ccb_state |= DA_CCB_RETRY_UA;
950 		splx(s);
951 		/* FALLTHROUGH*/
952 	}
953 	default:
954 		cam_periph_async(periph, code, path, arg);
955 		break;
956 	}
957 }
958 
959 static cam_status
960 daregister(struct cam_periph *periph, void *arg)
961 {
962 	int s;
963 	struct da_softc *softc;
964 	struct ccb_setasync csa;
965 	struct ccb_getdev *cgd;
966 	caddr_t match;
967 
968 	cgd = (struct ccb_getdev *)arg;
969 	if (periph == NULL) {
970 		printf("daregister: periph was NULL!!\n");
971 		return(CAM_REQ_CMP_ERR);
972 	}
973 
974 	if (cgd == NULL) {
975 		printf("daregister: no getdev CCB, can't register device\n");
976 		return(CAM_REQ_CMP_ERR);
977 	}
978 
979 	softc = (struct da_softc *)malloc(sizeof(*softc),M_DEVBUF,M_NOWAIT);
980 
981 	if (softc == NULL) {
982 		printf("daregister: Unable to probe new device. "
983 		       "Unable to allocate softc\n");
984 		return(CAM_REQ_CMP_ERR);
985 	}
986 
987 	bzero(softc, sizeof(*softc));
988 	LIST_INIT(&softc->pending_ccbs);
989 	softc->state = DA_STATE_PROBE;
990 	bioq_init(&softc->bio_queue);
991 	if (SID_IS_REMOVABLE(&cgd->inq_data))
992 		softc->flags |= DA_FLAG_PACK_REMOVABLE;
993 	if ((cgd->inq_data.flags & SID_CmdQue) != 0)
994 		softc->flags |= DA_FLAG_TAGGED_QUEUING;
995 
996 	periph->softc = softc;
997 
998 	cam_extend_set(daperiphs, periph->unit_number, periph);
999 
1000 	/*
1001 	 * See if this device has any quirks.
1002 	 */
1003 	match = cam_quirkmatch((caddr_t)&cgd->inq_data,
1004 			       (caddr_t)da_quirk_table,
1005 			       sizeof(da_quirk_table)/sizeof(*da_quirk_table),
1006 			       sizeof(*da_quirk_table), scsi_inquiry_match);
1007 
1008 	if (match != NULL)
1009 		softc->quirks = ((struct da_quirk_entry *)match)->quirks;
1010 	else
1011 		softc->quirks = DA_Q_NONE;
1012 
1013 	if (softc->quirks & DA_Q_NO_6_BYTE)
1014 		softc->minimum_cmd_size = 10;
1015 	else
1016 		softc->minimum_cmd_size = 6;
1017 
1018 	/*
1019 	 * Block our timeout handler while we
1020 	 * add this softc to the dev list.
1021 	 */
1022 	s = splsoftclock();
1023 	SLIST_INSERT_HEAD(&softc_list, softc, links);
1024 	splx(s);
1025 
1026 	/*
1027 	 * The DA driver supports a blocksize, but
1028 	 * we don't know the blocksize until we do
1029 	 * a read capacity.  So, set a flag to
1030 	 * indicate that the blocksize is
1031 	 * unavailable right now.  We'll clear the
1032 	 * flag as soon as we've done a read capacity.
1033 	 */
1034 	devstat_add_entry(&softc->device_stats, "da",
1035 			  periph->unit_number, 0,
1036 	  		  DEVSTAT_BS_UNAVAILABLE,
1037 			  SID_TYPE(&cgd->inq_data) | DEVSTAT_TYPE_IF_SCSI,
1038 			  DEVSTAT_PRIORITY_DISK);
1039 
1040 	/*
1041 	 * Register this media as a disk
1042 	 */
1043 	disk_create(periph->unit_number, &softc->disk, 0,
1044 	    &da_cdevsw, &dadisk_cdevsw);
1045 
1046 	/*
1047 	 * Add async callbacks for bus reset and
1048 	 * bus device reset calls.  I don't bother
1049 	 * checking if this fails as, in most cases,
1050 	 * the system will function just fine without
1051 	 * them and the only alternative would be to
1052 	 * not attach the device on failure.
1053 	 */
1054 	xpt_setup_ccb(&csa.ccb_h, periph->path, /*priority*/5);
1055 	csa.ccb_h.func_code = XPT_SASYNC_CB;
1056 	csa.event_enable = AC_SENT_BDR | AC_BUS_RESET | AC_LOST_DEVICE;
1057 	csa.callback = daasync;
1058 	csa.callback_arg = periph;
1059 	xpt_action((union ccb *)&csa);
1060 	/*
1061 	 * Lock this peripheral until we are setup.
1062 	 * This first call can't block
1063 	 */
1064 	(void)cam_periph_lock(periph, PRIBIO);
1065 	xpt_schedule(periph, /*priority*/5);
1066 
1067 	return(CAM_REQ_CMP);
1068 }
1069 
1070 static void
1071 dastart(struct cam_periph *periph, union ccb *start_ccb)
1072 {
1073 	struct da_softc *softc;
1074 
1075 	softc = (struct da_softc *)periph->softc;
1076 
1077 
1078 	switch (softc->state) {
1079 	case DA_STATE_NORMAL:
1080 	{
1081 		/* Pull a buffer from the queue and get going on it */
1082 		struct bio *bp;
1083 		int s;
1084 
1085 		/*
1086 		 * See if there is a buf with work for us to do..
1087 		 */
1088 		s = splbio();
1089 		bp = bioq_first(&softc->bio_queue);
1090 		if (periph->immediate_priority <= periph->pinfo.priority) {
1091 			CAM_DEBUG_PRINT(CAM_DEBUG_SUBTRACE,
1092 					("queuing for immediate ccb\n"));
1093 			start_ccb->ccb_h.ccb_state = DA_CCB_WAITING;
1094 			SLIST_INSERT_HEAD(&periph->ccb_list, &start_ccb->ccb_h,
1095 					  periph_links.sle);
1096 			periph->immediate_priority = CAM_PRIORITY_NONE;
1097 			splx(s);
1098 			wakeup(&periph->ccb_list);
1099 		} else if (bp == NULL) {
1100 			splx(s);
1101 			xpt_release_ccb(start_ccb);
1102 		} else {
1103 			int oldspl;
1104 			u_int8_t tag_code;
1105 
1106 			bioq_remove(&softc->bio_queue, bp);
1107 
1108 			devstat_start_transaction(&softc->device_stats);
1109 
1110 			if ((bp->bio_flags & BIO_ORDERED) != 0
1111 			 || (softc->flags & DA_FLAG_NEED_OTAG) != 0) {
1112 				softc->flags &= ~DA_FLAG_NEED_OTAG;
1113 				softc->ordered_tag_count++;
1114 				tag_code = MSG_ORDERED_Q_TAG;
1115 			} else {
1116 				tag_code = MSG_SIMPLE_Q_TAG;
1117 			}
1118 			scsi_read_write(&start_ccb->csio,
1119 					/*retries*/4,
1120 					dadone,
1121 					tag_code,
1122 					bp->bio_cmd == BIO_READ,
1123 					/*byte2*/0,
1124 					softc->minimum_cmd_size,
1125 					bp->bio_pblkno,
1126 					bp->bio_bcount / softc->params.secsize,
1127 					bp->bio_data,
1128 					bp->bio_bcount,
1129 					/*sense_len*/SSD_FULL_SIZE,
1130 					DA_DEFAULT_TIMEOUT * 1000);
1131 			start_ccb->ccb_h.ccb_state = DA_CCB_BUFFER_IO;
1132 
1133 			/*
1134 			 * Block out any asyncronous callbacks
1135 			 * while we touch the pending ccb list.
1136 			 */
1137 			oldspl = splcam();
1138 			LIST_INSERT_HEAD(&softc->pending_ccbs,
1139 					 &start_ccb->ccb_h, periph_links.le);
1140 			splx(oldspl);
1141 
1142 			/* We expect a unit attention from this device */
1143 			if ((softc->flags & DA_FLAG_RETRY_UA) != 0) {
1144 				start_ccb->ccb_h.ccb_state |= DA_CCB_RETRY_UA;
1145 				softc->flags &= ~DA_FLAG_RETRY_UA;
1146 			}
1147 
1148 			start_ccb->ccb_h.ccb_bp = bp;
1149 			bp = bioq_first(&softc->bio_queue);
1150 			splx(s);
1151 
1152 			xpt_action(start_ccb);
1153 		}
1154 
1155 		if (bp != NULL) {
1156 			/* Have more work to do, so ensure we stay scheduled */
1157 			xpt_schedule(periph, /* XXX priority */1);
1158 		}
1159 		break;
1160 	}
1161 	case DA_STATE_PROBE:
1162 	{
1163 		struct ccb_scsiio *csio;
1164 		struct scsi_read_capacity_data *rcap;
1165 
1166 		rcap = (struct scsi_read_capacity_data *)malloc(sizeof(*rcap),
1167 								M_TEMP,
1168 								M_NOWAIT);
1169 		if (rcap == NULL) {
1170 			printf("dastart: Couldn't malloc read_capacity data\n");
1171 			/* da_free_periph??? */
1172 			break;
1173 		}
1174 		csio = &start_ccb->csio;
1175 		scsi_read_capacity(csio,
1176 				   /*retries*/4,
1177 				   dadone,
1178 				   MSG_SIMPLE_Q_TAG,
1179 				   rcap,
1180 				   SSD_FULL_SIZE,
1181 				   /*timeout*/5000);
1182 		start_ccb->ccb_h.ccb_bp = NULL;
1183 		start_ccb->ccb_h.ccb_state = DA_CCB_PROBE;
1184 		xpt_action(start_ccb);
1185 		break;
1186 	}
1187 	}
1188 }
1189 
1190 
1191 static void
1192 dadone(struct cam_periph *periph, union ccb *done_ccb)
1193 {
1194 	struct da_softc *softc;
1195 	struct ccb_scsiio *csio;
1196 
1197 	softc = (struct da_softc *)periph->softc;
1198 	csio = &done_ccb->csio;
1199 	switch (csio->ccb_h.ccb_state & DA_CCB_TYPE_MASK) {
1200 	case DA_CCB_BUFFER_IO:
1201 	{
1202 		struct bio *bp;
1203 		int    oldspl;
1204 
1205 		bp = (struct bio *)done_ccb->ccb_h.ccb_bp;
1206 		if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
1207 			int error;
1208 			int s;
1209 			int sf;
1210 
1211 			if ((csio->ccb_h.ccb_state & DA_CCB_RETRY_UA) != 0)
1212 				sf = SF_RETRY_UA;
1213 			else
1214 				sf = 0;
1215 
1216 			/* Retry selection timeouts */
1217 			sf |= SF_RETRY_SELTO;
1218 
1219 			if ((error = daerror(done_ccb, 0, sf)) == ERESTART) {
1220 				/*
1221 				 * A retry was scheuled, so
1222 				 * just return.
1223 				 */
1224 				return;
1225 			}
1226 			if (error != 0) {
1227 				struct bio *q_bp;
1228 
1229 				s = splbio();
1230 
1231 				if (error == ENXIO) {
1232 					/*
1233 					 * Catastrophic error.  Mark our pack as
1234 					 * invalid.
1235 					 */
1236 					/* XXX See if this is really a media
1237 					 *     change first.
1238 					 */
1239 					xpt_print_path(periph->path);
1240 					printf("Invalidating pack\n");
1241 					softc->flags |= DA_FLAG_PACK_INVALID;
1242 				}
1243 
1244 				/*
1245 				 * return all queued I/O with EIO, so that
1246 				 * the client can retry these I/Os in the
1247 				 * proper order should it attempt to recover.
1248 				 */
1249 				while ((q_bp = bioq_first(&softc->bio_queue))
1250 					!= NULL) {
1251 					bioq_remove(&softc->bio_queue, q_bp);
1252 					q_bp->bio_resid = q_bp->bio_bcount;
1253 					q_bp->bio_error = EIO;
1254 					q_bp->bio_flags |= BIO_ERROR;
1255 					biodone(q_bp);
1256 				}
1257 				splx(s);
1258 				bp->bio_error = error;
1259 				bp->bio_resid = bp->bio_bcount;
1260 				bp->bio_flags |= BIO_ERROR;
1261 			} else {
1262 				bp->bio_resid = csio->resid;
1263 				bp->bio_error = 0;
1264 				if (bp->bio_resid != 0) {
1265 					/* Short transfer ??? */
1266 					bp->bio_flags |= BIO_ERROR;
1267 				}
1268 			}
1269 			if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
1270 				cam_release_devq(done_ccb->ccb_h.path,
1271 						 /*relsim_flags*/0,
1272 						 /*reduction*/0,
1273 						 /*timeout*/0,
1274 						 /*getcount_only*/0);
1275 		} else {
1276 			bp->bio_resid = csio->resid;
1277 			if (csio->resid > 0)
1278 				bp->bio_flags |= BIO_ERROR;
1279 		}
1280 
1281 		/*
1282 		 * Block out any asyncronous callbacks
1283 		 * while we touch the pending ccb list.
1284 		 */
1285 		oldspl = splcam();
1286 		LIST_REMOVE(&done_ccb->ccb_h, periph_links.le);
1287 		splx(oldspl);
1288 
1289 		if (softc->device_stats.busy_count == 0)
1290 			softc->flags |= DA_FLAG_WENT_IDLE;
1291 
1292 		devstat_end_transaction_bio(&softc->device_stats, bp);
1293 		biodone(bp);
1294 		break;
1295 	}
1296 	case DA_CCB_PROBE:
1297 	{
1298 		struct	   scsi_read_capacity_data *rdcap;
1299 		char	   announce_buf[80];
1300 
1301 		rdcap = (struct scsi_read_capacity_data *)csio->data_ptr;
1302 
1303 		if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
1304 			struct disk_params *dp;
1305 
1306 			dasetgeom(periph, rdcap);
1307 			dp = &softc->params;
1308 			snprintf(announce_buf, sizeof(announce_buf),
1309 			        "%luMB (%u %u byte sectors: %dH %dS/T %dC)",
1310 				(unsigned long) (((u_int64_t)dp->secsize *
1311 				dp->sectors) / (1024*1024)), dp->sectors,
1312 				dp->secsize, dp->heads, dp->secs_per_track,
1313 				dp->cylinders);
1314 		} else {
1315 			int	error;
1316 
1317 			announce_buf[0] = '\0';
1318 
1319 			/*
1320 			 * Retry any UNIT ATTENTION type errors.  They
1321 			 * are expected at boot.
1322 			 */
1323 			error = daerror(done_ccb, 0, SF_RETRY_UA |
1324 					SF_RETRY_SELTO | SF_NO_PRINT);
1325 			if (error == ERESTART) {
1326 				/*
1327 				 * A retry was scheuled, so
1328 				 * just return.
1329 				 */
1330 				return;
1331 			} else if (error != 0) {
1332 				struct scsi_sense_data *sense;
1333 				int asc, ascq;
1334 				int sense_key, error_code;
1335 				int have_sense;
1336 				cam_status status;
1337 				struct ccb_getdev cgd;
1338 
1339 				/* Don't wedge this device's queue */
1340 				cam_release_devq(done_ccb->ccb_h.path,
1341 						 /*relsim_flags*/0,
1342 						 /*reduction*/0,
1343 						 /*timeout*/0,
1344 						 /*getcount_only*/0);
1345 
1346 				status = done_ccb->ccb_h.status;
1347 
1348 				xpt_setup_ccb(&cgd.ccb_h,
1349 					      done_ccb->ccb_h.path,
1350 					      /* priority */ 1);
1351 				cgd.ccb_h.func_code = XPT_GDEV_TYPE;
1352 				xpt_action((union ccb *)&cgd);
1353 
1354 				if (((csio->ccb_h.flags & CAM_SENSE_PHYS) != 0)
1355 				 || ((csio->ccb_h.flags & CAM_SENSE_PTR) != 0)
1356 				 || ((status & CAM_AUTOSNS_VALID) == 0))
1357 					have_sense = FALSE;
1358 				else
1359 					have_sense = TRUE;
1360 
1361 				if (have_sense) {
1362 					sense = &csio->sense_data;
1363 					scsi_extract_sense(sense, &error_code,
1364 							   &sense_key,
1365 							   &asc, &ascq);
1366 				}
1367 				/*
1368 				 * Attach to anything that claims to be a
1369 				 * direct access or optical disk device,
1370 				 * as long as it doesn't return a "Logical
1371 				 * unit not supported" (0x25) error.
1372 				 */
1373 				if ((have_sense) && (asc != 0x25)
1374 				 && (error_code == SSD_CURRENT_ERROR))
1375 					snprintf(announce_buf,
1376 					    sizeof(announce_buf),
1377 						"Attempt to query device "
1378 						"size failed: %s, %s",
1379 						scsi_sense_key_text[sense_key],
1380 						scsi_sense_desc(asc,ascq,
1381 								&cgd.inq_data));
1382 				else {
1383 					if (have_sense)
1384 						scsi_sense_print(
1385 							&done_ccb->csio);
1386 					else {
1387 						xpt_print_path(periph->path);
1388 						printf("got CAM status %#x\n",
1389 						       done_ccb->ccb_h.status);
1390 					}
1391 
1392 					xpt_print_path(periph->path);
1393 					printf("fatal error, failed"
1394 					       " to attach to device\n");
1395 
1396 					/*
1397 					 * Free up resources.
1398 					 */
1399 					cam_periph_invalidate(periph);
1400 				}
1401 			}
1402 		}
1403 		free(rdcap, M_TEMP);
1404 		if (announce_buf[0] != '\0')
1405 			xpt_announce_periph(periph, announce_buf);
1406 		softc->state = DA_STATE_NORMAL;
1407 		/*
1408 		 * Since our peripheral may be invalidated by an error
1409 		 * above or an external event, we must release our CCB
1410 		 * before releasing the probe lock on the peripheral.
1411 		 * The peripheral will only go away once the last lock
1412 		 * is removed, and we need it around for the CCB release
1413 		 * operation.
1414 		 */
1415 		xpt_release_ccb(done_ccb);
1416 		cam_periph_unlock(periph);
1417 		return;
1418 	}
1419 	case DA_CCB_WAITING:
1420 	{
1421 		/* Caller will release the CCB */
1422 		wakeup(&done_ccb->ccb_h.cbfcnp);
1423 		return;
1424 	}
1425 	case DA_CCB_DUMP:
1426 		/* No-op.  We're polling */
1427 		return;
1428 	default:
1429 		break;
1430 	}
1431 	xpt_release_ccb(done_ccb);
1432 }
1433 
1434 static int
1435 daerror(union ccb *ccb, u_int32_t cam_flags, u_int32_t sense_flags)
1436 {
1437 	struct da_softc	  *softc;
1438 	struct cam_periph *periph;
1439 
1440 	periph = xpt_path_periph(ccb->ccb_h.path);
1441 	softc = (struct da_softc *)periph->softc;
1442 
1443 	/*
1444 	 * XXX
1445 	 * Until we have a better way of doing pack validation,
1446 	 * don't treat UAs as errors.
1447 	 */
1448 	sense_flags |= SF_RETRY_UA;
1449 	return(cam_periph_error(ccb, cam_flags, sense_flags,
1450 				&softc->saved_ccb));
1451 }
1452 
1453 static void
1454 daprevent(struct cam_periph *periph, int action)
1455 {
1456 	struct	da_softc *softc;
1457 	union	ccb *ccb;
1458 	int	error;
1459 
1460 	softc = (struct da_softc *)periph->softc;
1461 
1462 	if (((action == PR_ALLOW)
1463 	  && (softc->flags & DA_FLAG_PACK_LOCKED) == 0)
1464 	 || ((action == PR_PREVENT)
1465 	  && (softc->flags & DA_FLAG_PACK_LOCKED) != 0)) {
1466 		return;
1467 	}
1468 
1469 	ccb = cam_periph_getccb(periph, /*priority*/1);
1470 
1471 	scsi_prevent(&ccb->csio,
1472 		     /*retries*/1,
1473 		     /*cbcfp*/dadone,
1474 		     MSG_SIMPLE_Q_TAG,
1475 		     action,
1476 		     SSD_FULL_SIZE,
1477 		     5000);
1478 
1479 	error = cam_periph_runccb(ccb, /*error_routine*/NULL, /*cam_flags*/0,
1480 				  /*sense_flags*/0, &softc->device_stats);
1481 
1482 	if (error == 0) {
1483 		if (action == PR_ALLOW)
1484 			softc->flags &= ~DA_FLAG_PACK_LOCKED;
1485 		else
1486 			softc->flags |= DA_FLAG_PACK_LOCKED;
1487 	}
1488 
1489 	xpt_release_ccb(ccb);
1490 }
1491 
1492 static void
1493 dasetgeom(struct cam_periph *periph, struct scsi_read_capacity_data * rdcap)
1494 {
1495 	struct ccb_calc_geometry ccg;
1496 	struct da_softc *softc;
1497 	struct disk_params *dp;
1498 
1499 	softc = (struct da_softc *)periph->softc;
1500 
1501 	dp = &softc->params;
1502 	dp->secsize = scsi_4btoul(rdcap->length);
1503 	dp->sectors = scsi_4btoul(rdcap->addr) + 1;
1504 	/*
1505 	 * Have the controller provide us with a geometry
1506 	 * for this disk.  The only time the geometry
1507 	 * matters is when we boot and the controller
1508 	 * is the only one knowledgeable enough to come
1509 	 * up with something that will make this a bootable
1510 	 * device.
1511 	 */
1512 	xpt_setup_ccb(&ccg.ccb_h, periph->path, /*priority*/1);
1513 	ccg.ccb_h.func_code = XPT_CALC_GEOMETRY;
1514 	ccg.block_size = dp->secsize;
1515 	ccg.volume_size = dp->sectors;
1516 	ccg.heads = 0;
1517 	ccg.secs_per_track = 0;
1518 	ccg.cylinders = 0;
1519 	xpt_action((union ccb*)&ccg);
1520 	dp->heads = ccg.heads;
1521 	dp->secs_per_track = ccg.secs_per_track;
1522 	dp->cylinders = ccg.cylinders;
1523 }
1524 
1525 static void
1526 dasendorderedtag(void *arg)
1527 {
1528 	struct da_softc *softc;
1529 	int s;
1530 
1531 	for (softc = SLIST_FIRST(&softc_list);
1532 	     softc != NULL;
1533 	     softc = SLIST_NEXT(softc, links)) {
1534 		s = splsoftcam();
1535 		if ((softc->ordered_tag_count == 0)
1536 		 && ((softc->flags & DA_FLAG_WENT_IDLE) == 0)) {
1537 			softc->flags |= DA_FLAG_NEED_OTAG;
1538 		}
1539 		if (softc->device_stats.busy_count > 0)
1540 			softc->flags &= ~DA_FLAG_WENT_IDLE;
1541 
1542 		softc->ordered_tag_count = 0;
1543 		splx(s);
1544 	}
1545 	/* Queue us up again */
1546 	timeout(dasendorderedtag, NULL,
1547 		(DA_DEFAULT_TIMEOUT * hz) / DA_ORDEREDTAG_INTERVAL);
1548 }
1549 
1550 /*
1551  * Step through all DA peripheral drivers, and if the device is still open,
1552  * sync the disk cache to physical media.
1553  */
1554 static void
1555 dashutdown(void * arg, int howto)
1556 {
1557 	struct cam_periph *periph;
1558 	struct da_softc *softc;
1559 
1560 	for (periph = TAILQ_FIRST(&dadriver.units); periph != NULL;
1561 	     periph = TAILQ_NEXT(periph, unit_links)) {
1562 		union ccb ccb;
1563 		softc = (struct da_softc *)periph->softc;
1564 
1565 		/*
1566 		 * We only sync the cache if the drive is still open, and
1567 		 * if the drive is capable of it..
1568 		 */
1569 		if (((softc->flags & DA_FLAG_OPEN) == 0)
1570 		 || (softc->quirks & DA_Q_NO_SYNC_CACHE))
1571 			continue;
1572 
1573 		xpt_setup_ccb(&ccb.ccb_h, periph->path, /*priority*/1);
1574 
1575 		ccb.ccb_h.ccb_state = DA_CCB_DUMP;
1576 		scsi_synchronize_cache(&ccb.csio,
1577 				       /*retries*/1,
1578 				       /*cbfcnp*/dadone,
1579 				       MSG_SIMPLE_Q_TAG,
1580 				       /*begin_lba*/0, /* whole disk */
1581 				       /*lb_count*/0,
1582 				       SSD_FULL_SIZE,
1583 				       5 * 60 * 1000);
1584 
1585 		xpt_polled_action(&ccb);
1586 
1587 		if ((ccb.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
1588 			if (((ccb.ccb_h.status & CAM_STATUS_MASK) ==
1589 			     CAM_SCSI_STATUS_ERROR)
1590 			 && (ccb.csio.scsi_status == SCSI_STATUS_CHECK_COND)){
1591 				int error_code, sense_key, asc, ascq;
1592 
1593 				scsi_extract_sense(&ccb.csio.sense_data,
1594 						   &error_code, &sense_key,
1595 						   &asc, &ascq);
1596 
1597 				if (sense_key != SSD_KEY_ILLEGAL_REQUEST)
1598 					scsi_sense_print(&ccb.csio);
1599 			} else {
1600 				xpt_print_path(periph->path);
1601 				printf("Synchronize cache failed, status "
1602 				       "== 0x%x, scsi status == 0x%x\n",
1603 				       ccb.ccb_h.status, ccb.csio.scsi_status);
1604 			}
1605 		}
1606 
1607 		if ((ccb.ccb_h.status & CAM_DEV_QFRZN) != 0)
1608 			cam_release_devq(ccb.ccb_h.path,
1609 					 /*relsim_flags*/0,
1610 					 /*reduction*/0,
1611 					 /*timeout*/0,
1612 					 /*getcount_only*/0);
1613 
1614 	}
1615 }
1616 
1617 #else /* !_KERNEL */
1618 
1619 /*
1620  * XXX This is only left out of the kernel build to silence warnings.  If,
1621  * for some reason this function is used in the kernel, the ifdefs should
1622  * be moved so it is included both in the kernel and userland.
1623  */
1624 void
1625 scsi_format_unit(struct ccb_scsiio *csio, u_int32_t retries,
1626 		 void (*cbfcnp)(struct cam_periph *, union ccb *),
1627 		 u_int8_t tag_action, u_int8_t byte2, u_int16_t ileave,
1628 		 u_int8_t *data_ptr, u_int32_t dxfer_len, u_int8_t sense_len,
1629 		 u_int32_t timeout)
1630 {
1631 	struct scsi_format_unit *scsi_cmd;
1632 
1633 	scsi_cmd = (struct scsi_format_unit *)&csio->cdb_io.cdb_bytes;
1634 	scsi_cmd->opcode = FORMAT_UNIT;
1635 	scsi_cmd->byte2 = byte2;
1636 	scsi_ulto2b(ileave, scsi_cmd->interleave);
1637 
1638 	cam_fill_csio(csio,
1639 		      retries,
1640 		      cbfcnp,
1641 		      /*flags*/ (dxfer_len > 0) ? CAM_DIR_OUT : CAM_DIR_NONE,
1642 		      tag_action,
1643 		      data_ptr,
1644 		      dxfer_len,
1645 		      sense_len,
1646 		      sizeof(*scsi_cmd),
1647 		      timeout);
1648 }
1649 
1650 #endif /* _KERNEL */
1651