xref: /freebsd/sys/cam/ata/ata_da.c (revision 050570efa79efcc9cf5adeb545f1a679c8dc377b)
1 /*-
2  * Copyright (c) 2009 Alexander Motin <mav@FreeBSD.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer,
10  *    without modification, immediately at the beginning of the file.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  */
26 
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
29 
30 #include <sys/param.h>
31 
32 #ifdef _KERNEL
33 #include <sys/systm.h>
34 #include <sys/kernel.h>
35 #include <sys/bio.h>
36 #include <sys/sysctl.h>
37 #include <sys/taskqueue.h>
38 #include <sys/lock.h>
39 #include <sys/mutex.h>
40 #include <sys/conf.h>
41 #include <sys/devicestat.h>
42 #include <sys/eventhandler.h>
43 #include <sys/malloc.h>
44 #include <sys/cons.h>
45 #include <sys/reboot.h>
46 #include <geom/geom_disk.h>
47 #endif /* _KERNEL */
48 
49 #ifndef _KERNEL
50 #include <stdio.h>
51 #include <string.h>
52 #endif /* _KERNEL */
53 
54 #include <cam/cam.h>
55 #include <cam/cam_ccb.h>
56 #include <cam/cam_periph.h>
57 #include <cam/cam_xpt_periph.h>
58 #include <cam/cam_sim.h>
59 
60 #include <cam/ata/ata_all.h>
61 
62 #include <machine/md_var.h>	/* geometry translation */
63 
64 #ifdef _KERNEL
65 
66 #define ATA_MAX_28BIT_LBA               268435455UL
67 
68 typedef enum {
69 	ADA_STATE_NORMAL
70 } ada_state;
71 
72 typedef enum {
73 	ADA_FLAG_PACK_INVALID	= 0x001,
74 	ADA_FLAG_CAN_48BIT	= 0x002,
75 	ADA_FLAG_CAN_FLUSHCACHE	= 0x004,
76 	ADA_FLAG_CAN_NCQ	= 0x008,
77 	ADA_FLAG_CAN_DMA	= 0x010,
78 	ADA_FLAG_NEED_OTAG	= 0x020,
79 	ADA_FLAG_WENT_IDLE	= 0x040,
80 	ADA_FLAG_CAN_TRIM	= 0x080,
81 	ADA_FLAG_OPEN		= 0x100,
82 	ADA_FLAG_SCTX_INIT	= 0x200,
83 	ADA_FLAG_CAN_CFA        = 0x400,
84 	ADA_FLAG_CAN_POWERMGT   = 0x800
85 } ada_flags;
86 
87 typedef enum {
88 	ADA_Q_NONE		= 0x00
89 } ada_quirks;
90 
91 typedef enum {
92 	ADA_CCB_BUFFER_IO	= 0x03,
93 	ADA_CCB_WAITING		= 0x04,
94 	ADA_CCB_DUMP		= 0x05,
95 	ADA_CCB_TRIM		= 0x06,
96 	ADA_CCB_TYPE_MASK	= 0x0F,
97 } ada_ccb_state;
98 
99 /* Offsets into our private area for storing information */
100 #define ccb_state	ppriv_field0
101 #define ccb_bp		ppriv_ptr1
102 
103 struct disk_params {
104 	u_int8_t  heads;
105 	u_int8_t  secs_per_track;
106 	u_int32_t cylinders;
107 	u_int32_t secsize;	/* Number of bytes/logical sector */
108 	u_int64_t sectors;	/* Total number sectors */
109 };
110 
111 #define TRIM_MAX_BLOCKS	4
112 #define TRIM_MAX_RANGES	TRIM_MAX_BLOCKS * 64
113 struct trim_request {
114 	uint8_t		data[TRIM_MAX_RANGES * 8];
115 	struct bio	*bps[TRIM_MAX_RANGES];
116 };
117 
118 struct ada_softc {
119 	struct	 bio_queue_head bio_queue;
120 	struct	 bio_queue_head trim_queue;
121 	ada_state state;
122 	ada_flags flags;
123 	ada_quirks quirks;
124 	int	 ordered_tag_count;
125 	int	 outstanding_cmds;
126 	int	 trim_max_ranges;
127 	int	 trim_running;
128 	struct	 disk_params params;
129 	struct	 disk *disk;
130 	struct task		sysctl_task;
131 	struct sysctl_ctx_list	sysctl_ctx;
132 	struct sysctl_oid	*sysctl_tree;
133 	struct callout		sendordered_c;
134 	struct trim_request	trim_req;
135 };
136 
137 struct ada_quirk_entry {
138 	struct scsi_inquiry_pattern inq_pat;
139 	ada_quirks quirks;
140 };
141 
142 static struct ada_quirk_entry ada_quirk_table[] =
143 {
144 	{
145 		/* Default */
146 		{
147 		  T_ANY, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED,
148 		  /*vendor*/"*", /*product*/"*", /*revision*/"*"
149 		},
150 		/*quirks*/0
151 	},
152 };
153 
154 static	disk_strategy_t	adastrategy;
155 static	dumper_t	adadump;
156 static	periph_init_t	adainit;
157 static	void		adaasync(void *callback_arg, u_int32_t code,
158 				struct cam_path *path, void *arg);
159 static	void		adasysctlinit(void *context, int pending);
160 static	periph_ctor_t	adaregister;
161 static	periph_dtor_t	adacleanup;
162 static	periph_start_t	adastart;
163 static	periph_oninv_t	adaoninvalidate;
164 static	void		adadone(struct cam_periph *periph,
165 			       union ccb *done_ccb);
166 static  int		adaerror(union ccb *ccb, u_int32_t cam_flags,
167 				u_int32_t sense_flags);
168 static void		adagetparams(struct cam_periph *periph,
169 				struct ccb_getdev *cgd);
170 static timeout_t	adasendorderedtag;
171 static void		adashutdown(void *arg, int howto);
172 
173 #ifndef ADA_DEFAULT_TIMEOUT
174 #define ADA_DEFAULT_TIMEOUT 30	/* Timeout in seconds */
175 #endif
176 
177 #ifndef	ADA_DEFAULT_RETRY
178 #define	ADA_DEFAULT_RETRY	4
179 #endif
180 
181 #ifndef	ADA_DEFAULT_SEND_ORDERED
182 #define	ADA_DEFAULT_SEND_ORDERED	1
183 #endif
184 
185 #ifndef	ADA_DEFAULT_SPINDOWN_SHUTDOWN
186 #define	ADA_DEFAULT_SPINDOWN_SHUTDOWN	1
187 #endif
188 
189 /*
190  * Most platforms map firmware geometry to actual, but some don't.  If
191  * not overridden, default to nothing.
192  */
193 #ifndef ata_disk_firmware_geom_adjust
194 #define	ata_disk_firmware_geom_adjust(disk)
195 #endif
196 
197 static int ada_retry_count = ADA_DEFAULT_RETRY;
198 static int ada_default_timeout = ADA_DEFAULT_TIMEOUT;
199 static int ada_send_ordered = ADA_DEFAULT_SEND_ORDERED;
200 static int ada_spindown_shutdown = ADA_DEFAULT_SPINDOWN_SHUTDOWN;
201 
202 SYSCTL_NODE(_kern_cam, OID_AUTO, ada, CTLFLAG_RD, 0,
203             "CAM Direct Access Disk driver");
204 SYSCTL_INT(_kern_cam_ada, OID_AUTO, retry_count, CTLFLAG_RW,
205            &ada_retry_count, 0, "Normal I/O retry count");
206 TUNABLE_INT("kern.cam.ada.retry_count", &ada_retry_count);
207 SYSCTL_INT(_kern_cam_ada, OID_AUTO, default_timeout, CTLFLAG_RW,
208            &ada_default_timeout, 0, "Normal I/O timeout (in seconds)");
209 TUNABLE_INT("kern.cam.ada.default_timeout", &ada_default_timeout);
210 SYSCTL_INT(_kern_cam_ada, OID_AUTO, ada_send_ordered, CTLFLAG_RW,
211            &ada_send_ordered, 0, "Send Ordered Tags");
212 TUNABLE_INT("kern.cam.ada.ada_send_ordered", &ada_send_ordered);
213 SYSCTL_INT(_kern_cam_ada, OID_AUTO, spindown_shutdown, CTLFLAG_RW,
214            &ada_spindown_shutdown, 0, "Spin down upon shutdown");
215 TUNABLE_INT("kern.cam.ada.spindown_shutdown", &ada_spindown_shutdown);
216 
217 /*
218  * ADA_ORDEREDTAG_INTERVAL determines how often, relative
219  * to the default timeout, we check to see whether an ordered
220  * tagged transaction is appropriate to prevent simple tag
221  * starvation.  Since we'd like to ensure that there is at least
222  * 1/2 of the timeout length left for a starved transaction to
223  * complete after we've sent an ordered tag, we must poll at least
224  * four times in every timeout period.  This takes care of the worst
225  * case where a starved transaction starts during an interval that
226  * meets the requirement "don't send an ordered tag" test so it takes
227  * us two intervals to determine that a tag must be sent.
228  */
229 #ifndef ADA_ORDEREDTAG_INTERVAL
230 #define ADA_ORDEREDTAG_INTERVAL 4
231 #endif
232 
233 static struct periph_driver adadriver =
234 {
235 	adainit, "ada",
236 	TAILQ_HEAD_INITIALIZER(adadriver.units), /* generation */ 0
237 };
238 
239 PERIPHDRIVER_DECLARE(ada, adadriver);
240 
241 MALLOC_DEFINE(M_ATADA, "ata_da", "ata_da buffers");
242 
243 static int
244 adaopen(struct disk *dp)
245 {
246 	struct cam_periph *periph;
247 	struct ada_softc *softc;
248 	int unit;
249 	int error;
250 
251 	periph = (struct cam_periph *)dp->d_drv1;
252 	if (periph == NULL) {
253 		return (ENXIO);
254 	}
255 
256 	if (cam_periph_acquire(periph) != CAM_REQ_CMP) {
257 		return(ENXIO);
258 	}
259 
260 	cam_periph_lock(periph);
261 	if ((error = cam_periph_hold(periph, PRIBIO|PCATCH)) != 0) {
262 		cam_periph_unlock(periph);
263 		cam_periph_release(periph);
264 		return (error);
265 	}
266 
267 	unit = periph->unit_number;
268 	softc = (struct ada_softc *)periph->softc;
269 	softc->flags |= ADA_FLAG_OPEN;
270 
271 	CAM_DEBUG(periph->path, CAM_DEBUG_TRACE,
272 	    ("adaopen: disk=%s%d (unit %d)\n", dp->d_name, dp->d_unit,
273 	     unit));
274 
275 	if ((softc->flags & ADA_FLAG_PACK_INVALID) != 0) {
276 		/* Invalidate our pack information. */
277 		softc->flags &= ~ADA_FLAG_PACK_INVALID;
278 	}
279 
280 	cam_periph_unhold(periph);
281 	cam_periph_unlock(periph);
282 	return (0);
283 }
284 
285 static int
286 adaclose(struct disk *dp)
287 {
288 	struct	cam_periph *periph;
289 	struct	ada_softc *softc;
290 	union ccb *ccb;
291 	int error;
292 
293 	periph = (struct cam_periph *)dp->d_drv1;
294 	if (periph == NULL)
295 		return (ENXIO);
296 
297 	cam_periph_lock(periph);
298 	if ((error = cam_periph_hold(periph, PRIBIO)) != 0) {
299 		cam_periph_unlock(periph);
300 		cam_periph_release(periph);
301 		return (error);
302 	}
303 
304 	softc = (struct ada_softc *)periph->softc;
305 	/* We only sync the cache if the drive is capable of it. */
306 	if (softc->flags & ADA_FLAG_CAN_FLUSHCACHE) {
307 
308 		ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL);
309 		cam_fill_ataio(&ccb->ataio,
310 				    1,
311 				    adadone,
312 				    CAM_DIR_NONE,
313 				    0,
314 				    NULL,
315 				    0,
316 				    ada_default_timeout*1000);
317 
318 		if (softc->flags & ADA_FLAG_CAN_48BIT)
319 			ata_48bit_cmd(&ccb->ataio, ATA_FLUSHCACHE48, 0, 0, 0);
320 		else
321 			ata_28bit_cmd(&ccb->ataio, ATA_FLUSHCACHE, 0, 0, 0);
322 		cam_periph_runccb(ccb, /*error_routine*/NULL, /*cam_flags*/0,
323 		    /*sense_flags*/0, softc->disk->d_devstat);
324 
325 		if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP)
326 			xpt_print(periph->path, "Synchronize cache failed\n");
327 
328 		if ((ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
329 			cam_release_devq(ccb->ccb_h.path,
330 					 /*relsim_flags*/0,
331 					 /*reduction*/0,
332 					 /*timeout*/0,
333 					 /*getcount_only*/0);
334 		xpt_release_ccb(ccb);
335 	}
336 
337 	softc->flags &= ~ADA_FLAG_OPEN;
338 	cam_periph_unhold(periph);
339 	cam_periph_unlock(periph);
340 	cam_periph_release(periph);
341 	return (0);
342 }
343 
344 static void
345 adaschedule(struct cam_periph *periph)
346 {
347 	struct ada_softc *softc = (struct ada_softc *)periph->softc;
348 
349 	if (bioq_first(&softc->bio_queue) ||
350 	    (!softc->trim_running && bioq_first(&softc->trim_queue))) {
351 		/* Have more work to do, so ensure we stay scheduled */
352 		xpt_schedule(periph, CAM_PRIORITY_NORMAL);
353 	}
354 }
355 
356 /*
357  * Actually translate the requested transfer into one the physical driver
358  * can understand.  The transfer is described by a buf and will include
359  * only one physical transfer.
360  */
361 static void
362 adastrategy(struct bio *bp)
363 {
364 	struct cam_periph *periph;
365 	struct ada_softc *softc;
366 
367 	periph = (struct cam_periph *)bp->bio_disk->d_drv1;
368 	if (periph == NULL) {
369 		biofinish(bp, NULL, ENXIO);
370 		return;
371 	}
372 	softc = (struct ada_softc *)periph->softc;
373 
374 	cam_periph_lock(periph);
375 
376 	/*
377 	 * If the device has been made invalid, error out
378 	 */
379 	if ((softc->flags & ADA_FLAG_PACK_INVALID)) {
380 		cam_periph_unlock(periph);
381 		biofinish(bp, NULL, ENXIO);
382 		return;
383 	}
384 
385 	/*
386 	 * Place it in the queue of disk activities for this disk
387 	 */
388 	if (bp->bio_cmd == BIO_DELETE &&
389 	    (softc->flags & ADA_FLAG_CAN_TRIM))
390 		bioq_disksort(&softc->trim_queue, bp);
391 	else
392 		bioq_disksort(&softc->bio_queue, bp);
393 
394 	/*
395 	 * Schedule ourselves for performing the work.
396 	 */
397 	adaschedule(periph);
398 	cam_periph_unlock(periph);
399 
400 	return;
401 }
402 
403 static int
404 adadump(void *arg, void *virtual, vm_offset_t physical, off_t offset, size_t length)
405 {
406 	struct	    cam_periph *periph;
407 	struct	    ada_softc *softc;
408 	u_int	    secsize;
409 	union	    ccb ccb;
410 	struct	    disk *dp;
411 	uint64_t    lba;
412 	uint16_t    count;
413 
414 	dp = arg;
415 	periph = dp->d_drv1;
416 	if (periph == NULL)
417 		return (ENXIO);
418 	softc = (struct ada_softc *)periph->softc;
419 	cam_periph_lock(periph);
420 	secsize = softc->params.secsize;
421 	lba = offset / secsize;
422 	count = length / secsize;
423 
424 	if ((softc->flags & ADA_FLAG_PACK_INVALID) != 0) {
425 		cam_periph_unlock(periph);
426 		return (ENXIO);
427 	}
428 
429 	if (length > 0) {
430 		xpt_setup_ccb(&ccb.ccb_h, periph->path, CAM_PRIORITY_NORMAL);
431 		ccb.ccb_h.ccb_state = ADA_CCB_DUMP;
432 		cam_fill_ataio(&ccb.ataio,
433 		    0,
434 		    adadone,
435 		    CAM_DIR_OUT,
436 		    0,
437 		    (u_int8_t *) virtual,
438 		    length,
439 		    ada_default_timeout*1000);
440 		if ((softc->flags & ADA_FLAG_CAN_48BIT) &&
441 		    (lba + count >= ATA_MAX_28BIT_LBA ||
442 		    count >= 256)) {
443 			ata_48bit_cmd(&ccb.ataio, ATA_WRITE_DMA48,
444 			    0, lba, count);
445 		} else {
446 			ata_28bit_cmd(&ccb.ataio, ATA_WRITE_DMA,
447 			    0, lba, count);
448 		}
449 		xpt_polled_action(&ccb);
450 
451 		if ((ccb.ataio.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
452 			printf("Aborting dump due to I/O error.\n");
453 			cam_periph_unlock(periph);
454 			return(EIO);
455 		}
456 		cam_periph_unlock(periph);
457 		return(0);
458 	}
459 
460 	if (softc->flags & ADA_FLAG_CAN_FLUSHCACHE) {
461 		xpt_setup_ccb(&ccb.ccb_h, periph->path, CAM_PRIORITY_NORMAL);
462 
463 		ccb.ccb_h.ccb_state = ADA_CCB_DUMP;
464 		cam_fill_ataio(&ccb.ataio,
465 				    1,
466 				    adadone,
467 				    CAM_DIR_NONE,
468 				    0,
469 				    NULL,
470 				    0,
471 				    ada_default_timeout*1000);
472 
473 		if (softc->flags & ADA_FLAG_CAN_48BIT)
474 			ata_48bit_cmd(&ccb.ataio, ATA_FLUSHCACHE48, 0, 0, 0);
475 		else
476 			ata_28bit_cmd(&ccb.ataio, ATA_FLUSHCACHE, 0, 0, 0);
477 		xpt_polled_action(&ccb);
478 
479 		if ((ccb.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP)
480 			xpt_print(periph->path, "Synchronize cache failed\n");
481 
482 		if ((ccb.ccb_h.status & CAM_DEV_QFRZN) != 0)
483 			cam_release_devq(ccb.ccb_h.path,
484 					 /*relsim_flags*/0,
485 					 /*reduction*/0,
486 					 /*timeout*/0,
487 					 /*getcount_only*/0);
488 	}
489 	cam_periph_unlock(periph);
490 	return (0);
491 }
492 
493 static void
494 adainit(void)
495 {
496 	cam_status status;
497 
498 	/*
499 	 * Install a global async callback.  This callback will
500 	 * receive async callbacks like "new device found".
501 	 */
502 	status = xpt_register_async(AC_FOUND_DEVICE, adaasync, NULL, NULL);
503 
504 	if (status != CAM_REQ_CMP) {
505 		printf("ada: Failed to attach master async callback "
506 		       "due to status 0x%x!\n", status);
507 	} else if (ada_send_ordered) {
508 
509 		/* Register our shutdown event handler */
510 		if ((EVENTHANDLER_REGISTER(shutdown_post_sync, adashutdown,
511 					   NULL, SHUTDOWN_PRI_DEFAULT)) == NULL)
512 		    printf("adainit: shutdown event registration failed!\n");
513 	}
514 }
515 
516 static void
517 adaoninvalidate(struct cam_periph *periph)
518 {
519 	struct ada_softc *softc;
520 
521 	softc = (struct ada_softc *)periph->softc;
522 
523 	/*
524 	 * De-register any async callbacks.
525 	 */
526 	xpt_register_async(0, adaasync, periph, periph->path);
527 
528 	softc->flags |= ADA_FLAG_PACK_INVALID;
529 
530 	/*
531 	 * Return all queued I/O with ENXIO.
532 	 * XXX Handle any transactions queued to the card
533 	 *     with XPT_ABORT_CCB.
534 	 */
535 	bioq_flush(&softc->bio_queue, NULL, ENXIO);
536 	bioq_flush(&softc->trim_queue, NULL, ENXIO);
537 
538 	disk_gone(softc->disk);
539 	xpt_print(periph->path, "lost device\n");
540 }
541 
542 static void
543 adacleanup(struct cam_periph *periph)
544 {
545 	struct ada_softc *softc;
546 
547 	softc = (struct ada_softc *)periph->softc;
548 
549 	xpt_print(periph->path, "removing device entry\n");
550 	cam_periph_unlock(periph);
551 
552 	/*
553 	 * If we can't free the sysctl tree, oh well...
554 	 */
555 	if ((softc->flags & ADA_FLAG_SCTX_INIT) != 0
556 	    && sysctl_ctx_free(&softc->sysctl_ctx) != 0) {
557 		xpt_print(periph->path, "can't remove sysctl context\n");
558 	}
559 
560 	disk_destroy(softc->disk);
561 	callout_drain(&softc->sendordered_c);
562 	free(softc, M_DEVBUF);
563 	cam_periph_lock(periph);
564 }
565 
566 static void
567 adaasync(void *callback_arg, u_int32_t code,
568 	struct cam_path *path, void *arg)
569 {
570 	struct cam_periph *periph;
571 
572 	periph = (struct cam_periph *)callback_arg;
573 	switch (code) {
574 	case AC_FOUND_DEVICE:
575 	{
576 		struct ccb_getdev *cgd;
577 		cam_status status;
578 
579 		cgd = (struct ccb_getdev *)arg;
580 		if (cgd == NULL)
581 			break;
582 
583 		if (cgd->protocol != PROTO_ATA)
584 			break;
585 
586 		/*
587 		 * Allocate a peripheral instance for
588 		 * this device and start the probe
589 		 * process.
590 		 */
591 		status = cam_periph_alloc(adaregister, adaoninvalidate,
592 					  adacleanup, adastart,
593 					  "ada", CAM_PERIPH_BIO,
594 					  cgd->ccb_h.path, adaasync,
595 					  AC_FOUND_DEVICE, cgd);
596 
597 		if (status != CAM_REQ_CMP
598 		 && status != CAM_REQ_INPROG)
599 			printf("adaasync: Unable to attach to new device "
600 				"due to status 0x%x\n", status);
601 		break;
602 	}
603 	default:
604 		cam_periph_async(periph, code, path, arg);
605 		break;
606 	}
607 }
608 
609 static void
610 adasysctlinit(void *context, int pending)
611 {
612 	struct cam_periph *periph;
613 	struct ada_softc *softc;
614 	char tmpstr[80], tmpstr2[80];
615 
616 	periph = (struct cam_periph *)context;
617 	if (cam_periph_acquire(periph) != CAM_REQ_CMP)
618 		return;
619 
620 	softc = (struct ada_softc *)periph->softc;
621 	snprintf(tmpstr, sizeof(tmpstr), "CAM ADA unit %d", periph->unit_number);
622 	snprintf(tmpstr2, sizeof(tmpstr2), "%d", periph->unit_number);
623 
624 	sysctl_ctx_init(&softc->sysctl_ctx);
625 	softc->flags |= ADA_FLAG_SCTX_INIT;
626 	softc->sysctl_tree = SYSCTL_ADD_NODE(&softc->sysctl_ctx,
627 		SYSCTL_STATIC_CHILDREN(_kern_cam_ada), OID_AUTO, tmpstr2,
628 		CTLFLAG_RD, 0, tmpstr);
629 	if (softc->sysctl_tree == NULL) {
630 		printf("adasysctlinit: unable to allocate sysctl tree\n");
631 		cam_periph_release(periph);
632 		return;
633 	}
634 
635 	cam_periph_release(periph);
636 }
637 
638 static cam_status
639 adaregister(struct cam_periph *periph, void *arg)
640 {
641 	struct ada_softc *softc;
642 	struct ccb_pathinq cpi;
643 	struct ccb_getdev *cgd;
644 	char   announce_buf[80];
645 	struct disk_params *dp;
646 	caddr_t match;
647 	u_int maxio;
648 
649 	cgd = (struct ccb_getdev *)arg;
650 	if (periph == NULL) {
651 		printf("adaregister: periph was NULL!!\n");
652 		return(CAM_REQ_CMP_ERR);
653 	}
654 
655 	if (cgd == NULL) {
656 		printf("adaregister: no getdev CCB, can't register device\n");
657 		return(CAM_REQ_CMP_ERR);
658 	}
659 
660 	softc = (struct ada_softc *)malloc(sizeof(*softc), M_DEVBUF,
661 	    M_NOWAIT|M_ZERO);
662 
663 	if (softc == NULL) {
664 		printf("adaregister: Unable to probe new device. "
665 		    "Unable to allocate softc\n");
666 		return(CAM_REQ_CMP_ERR);
667 	}
668 
669 	bioq_init(&softc->bio_queue);
670 	bioq_init(&softc->trim_queue);
671 
672 	if (cgd->ident_data.capabilities1 & ATA_SUPPORT_DMA)
673 		softc->flags |= ADA_FLAG_CAN_DMA;
674 	if (cgd->ident_data.support.command2 & ATA_SUPPORT_ADDRESS48)
675 		softc->flags |= ADA_FLAG_CAN_48BIT;
676 	if (cgd->ident_data.support.command2 & ATA_SUPPORT_FLUSHCACHE)
677 		softc->flags |= ADA_FLAG_CAN_FLUSHCACHE;
678 	if (cgd->ident_data.support.command1 & ATA_SUPPORT_POWERMGT)
679 		softc->flags |= ADA_FLAG_CAN_POWERMGT;
680 	if (cgd->ident_data.satacapabilities & ATA_SUPPORT_NCQ &&
681 	    cgd->inq_flags & SID_CmdQue)
682 		softc->flags |= ADA_FLAG_CAN_NCQ;
683 	if (cgd->ident_data.support_dsm & ATA_SUPPORT_DSM_TRIM) {
684 		softc->flags |= ADA_FLAG_CAN_TRIM;
685 		softc->trim_max_ranges = TRIM_MAX_RANGES;
686 		if (cgd->ident_data.max_dsm_blocks != 0) {
687 			softc->trim_max_ranges =
688 			    min(cgd->ident_data.max_dsm_blocks * 64,
689 				softc->trim_max_ranges);
690 		}
691 	}
692 	if (cgd->ident_data.support.command2 & ATA_SUPPORT_CFA)
693 		softc->flags |= ADA_FLAG_CAN_CFA;
694 	softc->state = ADA_STATE_NORMAL;
695 
696 	periph->softc = softc;
697 
698 	/*
699 	 * See if this device has any quirks.
700 	 */
701 	match = cam_quirkmatch((caddr_t)&cgd->ident_data,
702 			       (caddr_t)ada_quirk_table,
703 			       sizeof(ada_quirk_table)/sizeof(*ada_quirk_table),
704 			       sizeof(*ada_quirk_table), ata_identify_match);
705 	if (match != NULL)
706 		softc->quirks = ((struct ada_quirk_entry *)match)->quirks;
707 	else
708 		softc->quirks = ADA_Q_NONE;
709 
710 	bzero(&cpi, sizeof(cpi));
711 	xpt_setup_ccb(&cpi.ccb_h, periph->path, CAM_PRIORITY_NONE);
712 	cpi.ccb_h.func_code = XPT_PATH_INQ;
713 	xpt_action((union ccb *)&cpi);
714 
715 	TASK_INIT(&softc->sysctl_task, 0, adasysctlinit, periph);
716 
717 	/*
718 	 * Register this media as a disk
719 	 */
720 	mtx_unlock(periph->sim->mtx);
721 	adagetparams(periph, cgd);
722 	softc->disk = disk_alloc();
723 	softc->disk->d_open = adaopen;
724 	softc->disk->d_close = adaclose;
725 	softc->disk->d_strategy = adastrategy;
726 	softc->disk->d_dump = adadump;
727 	softc->disk->d_name = "ada";
728 	softc->disk->d_drv1 = periph;
729 	maxio = cpi.maxio;		/* Honor max I/O size of SIM */
730 	if (maxio == 0)
731 		maxio = DFLTPHYS;	/* traditional default */
732 	else if (maxio > MAXPHYS)
733 		maxio = MAXPHYS;	/* for safety */
734 	if (softc->flags & ADA_FLAG_CAN_48BIT)
735 		maxio = min(maxio, 65536 * softc->params.secsize);
736 	else					/* 28bit ATA command limit */
737 		maxio = min(maxio, 256 * softc->params.secsize);
738 	softc->disk->d_maxsize = maxio;
739 	softc->disk->d_unit = periph->unit_number;
740 	softc->disk->d_flags = 0;
741 	if (softc->flags & ADA_FLAG_CAN_FLUSHCACHE)
742 		softc->disk->d_flags |= DISKFLAG_CANFLUSHCACHE;
743 	if ((softc->flags & ADA_FLAG_CAN_TRIM) ||
744 	    ((softc->flags & ADA_FLAG_CAN_CFA) &&
745 	    !(softc->flags & ADA_FLAG_CAN_48BIT)))
746 		softc->disk->d_flags |= DISKFLAG_CANDELETE;
747 	strlcpy(softc->disk->d_ident, cgd->serial_num,
748 	    MIN(sizeof(softc->disk->d_ident), cgd->serial_num_len + 1));
749 	softc->disk->d_hba_vendor = cpi.hba_vendor;
750 	softc->disk->d_hba_device = cpi.hba_device;
751 	softc->disk->d_hba_subvendor = cpi.hba_subvendor;
752 	softc->disk->d_hba_subdevice = cpi.hba_subdevice;
753 
754 	softc->disk->d_sectorsize = softc->params.secsize;
755 	softc->disk->d_mediasize = (off_t)softc->params.sectors *
756 	    softc->params.secsize;
757 	if (ata_physical_sector_size(&cgd->ident_data) !=
758 	    softc->params.secsize) {
759 		softc->disk->d_stripesize =
760 		    ata_physical_sector_size(&cgd->ident_data);
761 		softc->disk->d_stripeoffset = (softc->disk->d_stripesize -
762 		    ata_logical_sector_offset(&cgd->ident_data)) %
763 		    softc->disk->d_stripesize;
764 	}
765 	softc->disk->d_fwsectors = softc->params.secs_per_track;
766 	softc->disk->d_fwheads = softc->params.heads;
767 	ata_disk_firmware_geom_adjust(softc->disk);
768 
769 	disk_create(softc->disk, DISK_VERSION);
770 	mtx_lock(periph->sim->mtx);
771 
772 	dp = &softc->params;
773 	snprintf(announce_buf, sizeof(announce_buf),
774 		"%juMB (%ju %u byte sectors: %dH %dS/T %dC)",
775 		(uintmax_t)(((uintmax_t)dp->secsize *
776 		dp->sectors) / (1024*1024)),
777 		(uintmax_t)dp->sectors,
778 		dp->secsize, dp->heads,
779 		dp->secs_per_track, dp->cylinders);
780 	xpt_announce_periph(periph, announce_buf);
781 	/*
782 	 * Add async callbacks for bus reset and
783 	 * bus device reset calls.  I don't bother
784 	 * checking if this fails as, in most cases,
785 	 * the system will function just fine without
786 	 * them and the only alternative would be to
787 	 * not attach the device on failure.
788 	 */
789 	xpt_register_async(AC_LOST_DEVICE,
790 			   adaasync, periph, periph->path);
791 
792 	/*
793 	 * Schedule a periodic event to occasionally send an
794 	 * ordered tag to a device.
795 	 */
796 	callout_init_mtx(&softc->sendordered_c, periph->sim->mtx, 0);
797 	callout_reset(&softc->sendordered_c,
798 	    (ADA_DEFAULT_TIMEOUT * hz) / ADA_ORDEREDTAG_INTERVAL,
799 	    adasendorderedtag, softc);
800 
801 	return(CAM_REQ_CMP);
802 }
803 
804 static void
805 adastart(struct cam_periph *periph, union ccb *start_ccb)
806 {
807 	struct ada_softc *softc = (struct ada_softc *)periph->softc;
808 	struct ccb_ataio *ataio = &start_ccb->ataio;
809 
810 	switch (softc->state) {
811 	case ADA_STATE_NORMAL:
812 	{
813 		struct bio *bp;
814 		u_int8_t tag_code;
815 
816 		/* Execute immediate CCB if waiting. */
817 		if (periph->immediate_priority <= periph->pinfo.priority) {
818 			CAM_DEBUG_PRINT(CAM_DEBUG_SUBTRACE,
819 					("queuing for immediate ccb\n"));
820 			start_ccb->ccb_h.ccb_state = ADA_CCB_WAITING;
821 			SLIST_INSERT_HEAD(&periph->ccb_list, &start_ccb->ccb_h,
822 					  periph_links.sle);
823 			periph->immediate_priority = CAM_PRIORITY_NONE;
824 			wakeup(&periph->ccb_list);
825 			/* Have more work to do, so ensure we stay scheduled */
826 			adaschedule(periph);
827 			break;
828 		}
829 		/* Run TRIM if not running yet. */
830 		if (!softc->trim_running &&
831 		    (bp = bioq_first(&softc->trim_queue)) != 0) {
832 			struct trim_request *req = &softc->trim_req;
833 			struct bio *bp1;
834 			int bps = 0, ranges = 0;
835 
836 			softc->trim_running = 1;
837 			bzero(req, sizeof(*req));
838 			bp1 = bp;
839 			do {
840 				uint64_t lba = bp1->bio_pblkno;
841 				int count = bp1->bio_bcount /
842 				    softc->params.secsize;
843 
844 				bioq_remove(&softc->trim_queue, bp1);
845 				while (count > 0) {
846 					int c = min(count, 0xffff);
847 					int off = ranges * 8;
848 
849 					req->data[off + 0] = lba & 0xff;
850 					req->data[off + 1] = (lba >> 8) & 0xff;
851 					req->data[off + 2] = (lba >> 16) & 0xff;
852 					req->data[off + 3] = (lba >> 24) & 0xff;
853 					req->data[off + 4] = (lba >> 32) & 0xff;
854 					req->data[off + 5] = (lba >> 40) & 0xff;
855 					req->data[off + 6] = c & 0xff;
856 					req->data[off + 7] = (c >> 8) & 0xff;
857 					lba += c;
858 					count -= c;
859 					ranges++;
860 				}
861 				req->bps[bps++] = bp1;
862 				bp1 = bioq_first(&softc->trim_queue);
863 				if (bp1 == NULL ||
864 				    bp1->bio_bcount / softc->params.secsize >
865 				    (softc->trim_max_ranges - ranges) * 0xffff)
866 					break;
867 			} while (1);
868 			cam_fill_ataio(ataio,
869 			    ada_retry_count,
870 			    adadone,
871 			    CAM_DIR_OUT,
872 			    0,
873 			    req->data,
874 			    ((ranges + 63) / 64) * 512,
875 			    ada_default_timeout * 1000);
876 			ata_48bit_cmd(ataio, ATA_DATA_SET_MANAGEMENT,
877 			    ATA_DSM_TRIM, 0, (ranges + 63) / 64);
878 			start_ccb->ccb_h.ccb_state = ADA_CCB_TRIM;
879 			goto out;
880 		}
881 		/* Run regular command. */
882 		bp = bioq_first(&softc->bio_queue);
883 		if (bp == NULL) {
884 			xpt_release_ccb(start_ccb);
885 			break;
886 		}
887 		bioq_remove(&softc->bio_queue, bp);
888 
889 		if ((bp->bio_flags & BIO_ORDERED) != 0
890 		 || (softc->flags & ADA_FLAG_NEED_OTAG) != 0) {
891 			softc->flags &= ~ADA_FLAG_NEED_OTAG;
892 			softc->ordered_tag_count++;
893 			tag_code = 0;
894 		} else {
895 			tag_code = 1;
896 		}
897 		switch (bp->bio_cmd) {
898 		case BIO_READ:
899 		case BIO_WRITE:
900 		{
901 			uint64_t lba = bp->bio_pblkno;
902 			uint16_t count = bp->bio_bcount / softc->params.secsize;
903 
904 			cam_fill_ataio(ataio,
905 			    ada_retry_count,
906 			    adadone,
907 			    bp->bio_cmd == BIO_READ ?
908 			        CAM_DIR_IN : CAM_DIR_OUT,
909 			    tag_code,
910 			    bp->bio_data,
911 			    bp->bio_bcount,
912 			    ada_default_timeout*1000);
913 
914 			if ((softc->flags & ADA_FLAG_CAN_NCQ) && tag_code) {
915 				if (bp->bio_cmd == BIO_READ) {
916 					ata_ncq_cmd(ataio, ATA_READ_FPDMA_QUEUED,
917 					    lba, count);
918 				} else {
919 					ata_ncq_cmd(ataio, ATA_WRITE_FPDMA_QUEUED,
920 					    lba, count);
921 				}
922 			} else if ((softc->flags & ADA_FLAG_CAN_48BIT) &&
923 			    (lba + count >= ATA_MAX_28BIT_LBA ||
924 			    count > 256)) {
925 				if (softc->flags & ADA_FLAG_CAN_DMA) {
926 					if (bp->bio_cmd == BIO_READ) {
927 						ata_48bit_cmd(ataio, ATA_READ_DMA48,
928 						    0, lba, count);
929 					} else {
930 						ata_48bit_cmd(ataio, ATA_WRITE_DMA48,
931 						    0, lba, count);
932 					}
933 				} else {
934 					if (bp->bio_cmd == BIO_READ) {
935 						ata_48bit_cmd(ataio, ATA_READ_MUL48,
936 						    0, lba, count);
937 					} else {
938 						ata_48bit_cmd(ataio, ATA_WRITE_MUL48,
939 						    0, lba, count);
940 					}
941 				}
942 			} else {
943 				if (count == 256)
944 					count = 0;
945 				if (softc->flags & ADA_FLAG_CAN_DMA) {
946 					if (bp->bio_cmd == BIO_READ) {
947 						ata_28bit_cmd(ataio, ATA_READ_DMA,
948 						    0, lba, count);
949 					} else {
950 						ata_28bit_cmd(ataio, ATA_WRITE_DMA,
951 						    0, lba, count);
952 					}
953 				} else {
954 					if (bp->bio_cmd == BIO_READ) {
955 						ata_28bit_cmd(ataio, ATA_READ_MUL,
956 						    0, lba, count);
957 					} else {
958 						ata_28bit_cmd(ataio, ATA_WRITE_MUL,
959 						    0, lba, count);
960 					}
961 				}
962 			}
963 			break;
964 		}
965 		case BIO_DELETE:
966 		{
967 			uint64_t lba = bp->bio_pblkno;
968 			uint16_t count = bp->bio_bcount / softc->params.secsize;
969 
970 			cam_fill_ataio(ataio,
971 			    ada_retry_count,
972 			    adadone,
973 			    CAM_DIR_NONE,
974 			    0,
975 			    NULL,
976 			    0,
977 			    ada_default_timeout*1000);
978 
979 			if (count >= 256)
980 				count = 0;
981 			ata_28bit_cmd(ataio, ATA_CFA_ERASE, 0, lba, count);
982 			break;
983 		}
984 		case BIO_FLUSH:
985 			cam_fill_ataio(ataio,
986 			    1,
987 			    adadone,
988 			    CAM_DIR_NONE,
989 			    0,
990 			    NULL,
991 			    0,
992 			    ada_default_timeout*1000);
993 
994 			if (softc->flags & ADA_FLAG_CAN_48BIT)
995 				ata_48bit_cmd(ataio, ATA_FLUSHCACHE48, 0, 0, 0);
996 			else
997 				ata_28bit_cmd(ataio, ATA_FLUSHCACHE, 0, 0, 0);
998 			break;
999 		}
1000 		start_ccb->ccb_h.ccb_state = ADA_CCB_BUFFER_IO;
1001 out:
1002 		start_ccb->ccb_h.ccb_bp = bp;
1003 		softc->outstanding_cmds++;
1004 		xpt_action(start_ccb);
1005 
1006 		/* May have more work to do, so ensure we stay scheduled */
1007 		adaschedule(periph);
1008 		break;
1009 	}
1010 	}
1011 }
1012 
1013 static void
1014 adadone(struct cam_periph *periph, union ccb *done_ccb)
1015 {
1016 	struct ada_softc *softc;
1017 	struct ccb_ataio *ataio;
1018 
1019 	softc = (struct ada_softc *)periph->softc;
1020 	ataio = &done_ccb->ataio;
1021 	switch (ataio->ccb_h.ccb_state & ADA_CCB_TYPE_MASK) {
1022 	case ADA_CCB_BUFFER_IO:
1023 	case ADA_CCB_TRIM:
1024 	{
1025 		struct bio *bp;
1026 
1027 		bp = (struct bio *)done_ccb->ccb_h.ccb_bp;
1028 		if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
1029 			int error;
1030 
1031 			error = adaerror(done_ccb, 0, 0);
1032 			if (error == ERESTART) {
1033 				/* A retry was scheduled, so just return. */
1034 				return;
1035 			}
1036 			if (error != 0) {
1037 				if (error == ENXIO) {
1038 					/*
1039 					 * Catastrophic error.  Mark our pack as
1040 					 * invalid.
1041 					 */
1042 					/*
1043 					 * XXX See if this is really a media
1044 					 * XXX change first?
1045 					 */
1046 					xpt_print(periph->path,
1047 					    "Invalidating pack\n");
1048 					softc->flags |= ADA_FLAG_PACK_INVALID;
1049 				}
1050 				bp->bio_error = error;
1051 				bp->bio_resid = bp->bio_bcount;
1052 				bp->bio_flags |= BIO_ERROR;
1053 			} else {
1054 				bp->bio_resid = ataio->resid;
1055 				bp->bio_error = 0;
1056 				if (bp->bio_resid != 0)
1057 					bp->bio_flags |= BIO_ERROR;
1058 			}
1059 			if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
1060 				cam_release_devq(done_ccb->ccb_h.path,
1061 						 /*relsim_flags*/0,
1062 						 /*reduction*/0,
1063 						 /*timeout*/0,
1064 						 /*getcount_only*/0);
1065 		} else {
1066 			if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
1067 				panic("REQ_CMP with QFRZN");
1068 			bp->bio_resid = ataio->resid;
1069 			if (ataio->resid > 0)
1070 				bp->bio_flags |= BIO_ERROR;
1071 		}
1072 		softc->outstanding_cmds--;
1073 		if (softc->outstanding_cmds == 0)
1074 			softc->flags |= ADA_FLAG_WENT_IDLE;
1075 		if ((ataio->ccb_h.ccb_state & ADA_CCB_TYPE_MASK) ==
1076 		    ADA_CCB_TRIM) {
1077 			struct trim_request *req =
1078 			    (struct trim_request *)ataio->data_ptr;
1079 			int i;
1080 
1081 			for (i = 1; i < softc->trim_max_ranges &&
1082 			    req->bps[i]; i++) {
1083 				struct bio *bp1 = req->bps[i];
1084 
1085 				bp1->bio_resid = bp->bio_resid;
1086 				bp1->bio_error = bp->bio_error;
1087 				if (bp->bio_flags & BIO_ERROR)
1088 					bp1->bio_flags |= BIO_ERROR;
1089 				biodone(bp1);
1090 			}
1091 			softc->trim_running = 0;
1092 			biodone(bp);
1093 			adaschedule(periph);
1094 		} else
1095 			biodone(bp);
1096 		break;
1097 	}
1098 	case ADA_CCB_WAITING:
1099 	{
1100 		/* Caller will release the CCB */
1101 		wakeup(&done_ccb->ccb_h.cbfcnp);
1102 		return;
1103 	}
1104 	case ADA_CCB_DUMP:
1105 		/* No-op.  We're polling */
1106 		return;
1107 	default:
1108 		break;
1109 	}
1110 	xpt_release_ccb(done_ccb);
1111 }
1112 
1113 static int
1114 adaerror(union ccb *ccb, u_int32_t cam_flags, u_int32_t sense_flags)
1115 {
1116 	struct ada_softc	  *softc;
1117 	struct cam_periph *periph;
1118 
1119 	periph = xpt_path_periph(ccb->ccb_h.path);
1120 	softc = (struct ada_softc *)periph->softc;
1121 
1122 	return(cam_periph_error(ccb, cam_flags, sense_flags, NULL));
1123 }
1124 
1125 static void
1126 adagetparams(struct cam_periph *periph, struct ccb_getdev *cgd)
1127 {
1128 	struct ada_softc *softc = (struct ada_softc *)periph->softc;
1129 	struct disk_params *dp = &softc->params;
1130 	u_int64_t lbasize48;
1131 	u_int32_t lbasize;
1132 
1133 	dp->secsize = ata_logical_sector_size(&cgd->ident_data);
1134 	if ((cgd->ident_data.atavalid & ATA_FLAG_54_58) &&
1135 		cgd->ident_data.current_heads && cgd->ident_data.current_sectors) {
1136 		dp->heads = cgd->ident_data.current_heads;
1137 		dp->secs_per_track = cgd->ident_data.current_sectors;
1138 		dp->cylinders = cgd->ident_data.cylinders;
1139 		dp->sectors = (u_int32_t)cgd->ident_data.current_size_1 |
1140 			  ((u_int32_t)cgd->ident_data.current_size_2 << 16);
1141 	} else {
1142 		dp->heads = cgd->ident_data.heads;
1143 		dp->secs_per_track = cgd->ident_data.sectors;
1144 		dp->cylinders = cgd->ident_data.cylinders;
1145 		dp->sectors = cgd->ident_data.cylinders * dp->heads * dp->secs_per_track;
1146 	}
1147 	lbasize = (u_int32_t)cgd->ident_data.lba_size_1 |
1148 		  ((u_int32_t)cgd->ident_data.lba_size_2 << 16);
1149 
1150 	/* use the 28bit LBA size if valid or bigger than the CHS mapping */
1151 	if (cgd->ident_data.cylinders == 16383 || dp->sectors < lbasize)
1152 		dp->sectors = lbasize;
1153 
1154 	/* use the 48bit LBA size if valid */
1155 	lbasize48 = ((u_int64_t)cgd->ident_data.lba_size48_1) |
1156 		    ((u_int64_t)cgd->ident_data.lba_size48_2 << 16) |
1157 		    ((u_int64_t)cgd->ident_data.lba_size48_3 << 32) |
1158 		    ((u_int64_t)cgd->ident_data.lba_size48_4 << 48);
1159 	if ((cgd->ident_data.support.command2 & ATA_SUPPORT_ADDRESS48) &&
1160 	    lbasize48 > ATA_MAX_28BIT_LBA)
1161 		dp->sectors = lbasize48;
1162 }
1163 
1164 static void
1165 adasendorderedtag(void *arg)
1166 {
1167 	struct ada_softc *softc = arg;
1168 
1169 	if (ada_send_ordered) {
1170 		if ((softc->ordered_tag_count == 0)
1171 		 && ((softc->flags & ADA_FLAG_WENT_IDLE) == 0)) {
1172 			softc->flags |= ADA_FLAG_NEED_OTAG;
1173 		}
1174 		if (softc->outstanding_cmds > 0)
1175 			softc->flags &= ~ADA_FLAG_WENT_IDLE;
1176 
1177 		softc->ordered_tag_count = 0;
1178 	}
1179 	/* Queue us up again */
1180 	callout_reset(&softc->sendordered_c,
1181 	    (ADA_DEFAULT_TIMEOUT * hz) / ADA_ORDEREDTAG_INTERVAL,
1182 	    adasendorderedtag, softc);
1183 }
1184 
1185 /*
1186  * Step through all ADA peripheral drivers, and if the device is still open,
1187  * sync the disk cache to physical media.
1188  */
1189 static void
1190 adashutdown(void * arg, int howto)
1191 {
1192 	struct cam_periph *periph;
1193 	struct ada_softc *softc;
1194 
1195 	TAILQ_FOREACH(periph, &adadriver.units, unit_links) {
1196 		union ccb ccb;
1197 
1198 		/* If we paniced with lock held - not recurse here. */
1199 		if (cam_periph_owned(periph))
1200 			continue;
1201 		cam_periph_lock(periph);
1202 		softc = (struct ada_softc *)periph->softc;
1203 		/*
1204 		 * We only sync the cache if the drive is still open, and
1205 		 * if the drive is capable of it..
1206 		 */
1207 		if (((softc->flags & ADA_FLAG_OPEN) == 0) ||
1208 		    (softc->flags & ADA_FLAG_CAN_FLUSHCACHE) == 0) {
1209 			cam_periph_unlock(periph);
1210 			continue;
1211 		}
1212 
1213 		xpt_setup_ccb(&ccb.ccb_h, periph->path, CAM_PRIORITY_NORMAL);
1214 
1215 		ccb.ccb_h.ccb_state = ADA_CCB_DUMP;
1216 		cam_fill_ataio(&ccb.ataio,
1217 				    1,
1218 				    adadone,
1219 				    CAM_DIR_NONE,
1220 				    0,
1221 				    NULL,
1222 				    0,
1223 				    ada_default_timeout*1000);
1224 
1225 		if (softc->flags & ADA_FLAG_CAN_48BIT)
1226 			ata_48bit_cmd(&ccb.ataio, ATA_FLUSHCACHE48, 0, 0, 0);
1227 		else
1228 			ata_28bit_cmd(&ccb.ataio, ATA_FLUSHCACHE, 0, 0, 0);
1229 		xpt_polled_action(&ccb);
1230 
1231 		if ((ccb.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP)
1232 			xpt_print(periph->path, "Synchronize cache failed\n");
1233 
1234 		if ((ccb.ccb_h.status & CAM_DEV_QFRZN) != 0)
1235 			cam_release_devq(ccb.ccb_h.path,
1236 					 /*relsim_flags*/0,
1237 					 /*reduction*/0,
1238 					 /*timeout*/0,
1239 					 /*getcount_only*/0);
1240 		cam_periph_unlock(periph);
1241 	}
1242 
1243 	if (ada_spindown_shutdown == 0 ||
1244 	    (howto & (RB_HALT | RB_POWEROFF)) == 0)
1245 		return;
1246 
1247 	TAILQ_FOREACH(periph, &adadriver.units, unit_links) {
1248 		union ccb ccb;
1249 
1250 		/* If we paniced with lock held - not recurse here. */
1251 		if (cam_periph_owned(periph))
1252 			continue;
1253 		cam_periph_lock(periph);
1254 		softc = (struct ada_softc *)periph->softc;
1255 		/*
1256 		 * We only spin-down the drive if it is capable of it..
1257 		 */
1258 		if ((softc->flags & ADA_FLAG_CAN_POWERMGT) == 0) {
1259 			cam_periph_unlock(periph);
1260 			continue;
1261 		}
1262 
1263 		if (bootverbose)
1264 			xpt_print(periph->path, "spin-down\n");
1265 
1266 		xpt_setup_ccb(&ccb.ccb_h, periph->path, CAM_PRIORITY_NORMAL);
1267 
1268 		ccb.ccb_h.ccb_state = ADA_CCB_DUMP;
1269 		cam_fill_ataio(&ccb.ataio,
1270 				    1,
1271 				    adadone,
1272 				    CAM_DIR_NONE,
1273 				    0,
1274 				    NULL,
1275 				    0,
1276 				    ada_default_timeout*1000);
1277 
1278 		ata_28bit_cmd(&ccb.ataio, ATA_STANDBY_IMMEDIATE, 0, 0, 0);
1279 		xpt_polled_action(&ccb);
1280 
1281 		if ((ccb.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP)
1282 			xpt_print(periph->path, "Spin-down disk failed\n");
1283 
1284 		if ((ccb.ccb_h.status & CAM_DEV_QFRZN) != 0)
1285 			cam_release_devq(ccb.ccb_h.path,
1286 					 /*relsim_flags*/0,
1287 					 /*reduction*/0,
1288 					 /*timeout*/0,
1289 					 /*getcount_only*/0);
1290 		cam_periph_unlock(periph);
1291 	}
1292 }
1293 
1294 #endif /* _KERNEL */
1295