xref: /freebsd/sys/cam/ata/ata_da.c (revision 4ed925457ab06e83238a5db33e89ccc94b99a713)
1 /*-
2  * Copyright (c) 2009 Alexander Motin <mav@FreeBSD.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer,
10  *    without modification, immediately at the beginning of the file.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  */
26 
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
29 
30 #include <sys/param.h>
31 
32 #ifdef _KERNEL
33 #include <sys/systm.h>
34 #include <sys/kernel.h>
35 #include <sys/bio.h>
36 #include <sys/sysctl.h>
37 #include <sys/taskqueue.h>
38 #include <sys/lock.h>
39 #include <sys/mutex.h>
40 #include <sys/conf.h>
41 #include <sys/devicestat.h>
42 #include <sys/eventhandler.h>
43 #include <sys/malloc.h>
44 #include <sys/cons.h>
45 #include <geom/geom_disk.h>
46 #endif /* _KERNEL */
47 
48 #ifndef _KERNEL
49 #include <stdio.h>
50 #include <string.h>
51 #endif /* _KERNEL */
52 
53 #include <cam/cam.h>
54 #include <cam/cam_ccb.h>
55 #include <cam/cam_periph.h>
56 #include <cam/cam_xpt_periph.h>
57 #include <cam/cam_sim.h>
58 
59 #include <cam/ata/ata_all.h>
60 
61 #ifdef _KERNEL
62 
63 #define ATA_MAX_28BIT_LBA               268435455UL
64 
65 typedef enum {
66 	ADA_STATE_NORMAL
67 } ada_state;
68 
69 typedef enum {
70 	ADA_FLAG_PACK_INVALID	= 0x001,
71 	ADA_FLAG_CAN_48BIT	= 0x002,
72 	ADA_FLAG_CAN_FLUSHCACHE	= 0x004,
73 	ADA_FLAG_CAN_NCQ	= 0x008,
74 	ADA_FLAG_CAN_DMA	= 0x010,
75 	ADA_FLAG_NEED_OTAG	= 0x020,
76 	ADA_FLAG_WENT_IDLE	= 0x040,
77 	ADA_FLAG_CAN_TRIM	= 0x080,
78 	ADA_FLAG_OPEN		= 0x100,
79 	ADA_FLAG_SCTX_INIT	= 0x200,
80 	ADA_FLAG_CAN_CFA        = 0x400
81 } ada_flags;
82 
83 typedef enum {
84 	ADA_Q_NONE		= 0x00
85 } ada_quirks;
86 
87 typedef enum {
88 	ADA_CCB_BUFFER_IO	= 0x03,
89 	ADA_CCB_WAITING		= 0x04,
90 	ADA_CCB_DUMP		= 0x05,
91 	ADA_CCB_TRIM		= 0x06,
92 	ADA_CCB_TYPE_MASK	= 0x0F,
93 } ada_ccb_state;
94 
95 /* Offsets into our private area for storing information */
96 #define ccb_state	ppriv_field0
97 #define ccb_bp		ppriv_ptr1
98 
99 struct disk_params {
100 	u_int8_t  heads;
101 	u_int8_t  secs_per_track;
102 	u_int32_t cylinders;
103 	u_int32_t secsize;	/* Number of bytes/logical sector */
104 	u_int64_t sectors;	/* Total number sectors */
105 };
106 
107 #define TRIM_MAX_BLOCKS	4
108 #define TRIM_MAX_RANGES	TRIM_MAX_BLOCKS * 64
109 struct trim_request {
110 	uint8_t		data[TRIM_MAX_RANGES * 8];
111 	struct bio	*bps[TRIM_MAX_RANGES];
112 };
113 
114 struct ada_softc {
115 	struct	 bio_queue_head bio_queue;
116 	struct	 bio_queue_head trim_queue;
117 	ada_state state;
118 	ada_flags flags;
119 	ada_quirks quirks;
120 	int	 ordered_tag_count;
121 	int	 outstanding_cmds;
122 	int	 trim_max_ranges;
123 	int	 trim_running;
124 	struct	 disk_params params;
125 	struct	 disk *disk;
126 	struct task		sysctl_task;
127 	struct sysctl_ctx_list	sysctl_ctx;
128 	struct sysctl_oid	*sysctl_tree;
129 	struct callout		sendordered_c;
130 	struct trim_request	trim_req;
131 };
132 
133 struct ada_quirk_entry {
134 	struct scsi_inquiry_pattern inq_pat;
135 	ada_quirks quirks;
136 };
137 
138 static struct ada_quirk_entry ada_quirk_table[] =
139 {
140 	{
141 		/* Default */
142 		{
143 		  T_ANY, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED,
144 		  /*vendor*/"*", /*product*/"*", /*revision*/"*"
145 		},
146 		/*quirks*/0
147 	},
148 };
149 
150 static	disk_strategy_t	adastrategy;
151 static	dumper_t	adadump;
152 static	periph_init_t	adainit;
153 static	void		adaasync(void *callback_arg, u_int32_t code,
154 				struct cam_path *path, void *arg);
155 static	void		adasysctlinit(void *context, int pending);
156 static	periph_ctor_t	adaregister;
157 static	periph_dtor_t	adacleanup;
158 static	periph_start_t	adastart;
159 static	periph_oninv_t	adaoninvalidate;
160 static	void		adadone(struct cam_periph *periph,
161 			       union ccb *done_ccb);
162 static  int		adaerror(union ccb *ccb, u_int32_t cam_flags,
163 				u_int32_t sense_flags);
164 static void		adagetparams(struct cam_periph *periph,
165 				struct ccb_getdev *cgd);
166 static timeout_t	adasendorderedtag;
167 static void		adashutdown(void *arg, int howto);
168 
169 #ifndef ADA_DEFAULT_TIMEOUT
170 #define ADA_DEFAULT_TIMEOUT 30	/* Timeout in seconds */
171 #endif
172 
173 #ifndef	ADA_DEFAULT_RETRY
174 #define	ADA_DEFAULT_RETRY	4
175 #endif
176 
177 #ifndef	ADA_DEFAULT_SEND_ORDERED
178 #define	ADA_DEFAULT_SEND_ORDERED	1
179 #endif
180 
181 
182 static int ada_retry_count = ADA_DEFAULT_RETRY;
183 static int ada_default_timeout = ADA_DEFAULT_TIMEOUT;
184 static int ada_send_ordered = ADA_DEFAULT_SEND_ORDERED;
185 
186 SYSCTL_NODE(_kern_cam, OID_AUTO, ada, CTLFLAG_RD, 0,
187             "CAM Direct Access Disk driver");
188 SYSCTL_INT(_kern_cam_ada, OID_AUTO, retry_count, CTLFLAG_RW,
189            &ada_retry_count, 0, "Normal I/O retry count");
190 TUNABLE_INT("kern.cam.ada.retry_count", &ada_retry_count);
191 SYSCTL_INT(_kern_cam_ada, OID_AUTO, default_timeout, CTLFLAG_RW,
192            &ada_default_timeout, 0, "Normal I/O timeout (in seconds)");
193 TUNABLE_INT("kern.cam.ada.default_timeout", &ada_default_timeout);
194 SYSCTL_INT(_kern_cam_ada, OID_AUTO, ada_send_ordered, CTLFLAG_RW,
195            &ada_send_ordered, 0, "Send Ordered Tags");
196 TUNABLE_INT("kern.cam.ada.ada_send_ordered", &ada_send_ordered);
197 
198 /*
199  * ADA_ORDEREDTAG_INTERVAL determines how often, relative
200  * to the default timeout, we check to see whether an ordered
201  * tagged transaction is appropriate to prevent simple tag
202  * starvation.  Since we'd like to ensure that there is at least
203  * 1/2 of the timeout length left for a starved transaction to
204  * complete after we've sent an ordered tag, we must poll at least
205  * four times in every timeout period.  This takes care of the worst
206  * case where a starved transaction starts during an interval that
207  * meets the requirement "don't send an ordered tag" test so it takes
208  * us two intervals to determine that a tag must be sent.
209  */
210 #ifndef ADA_ORDEREDTAG_INTERVAL
211 #define ADA_ORDEREDTAG_INTERVAL 4
212 #endif
213 
214 static struct periph_driver adadriver =
215 {
216 	adainit, "ada",
217 	TAILQ_HEAD_INITIALIZER(adadriver.units), /* generation */ 0
218 };
219 
220 PERIPHDRIVER_DECLARE(ada, adadriver);
221 
222 MALLOC_DEFINE(M_ATADA, "ata_da", "ata_da buffers");
223 
224 static int
225 adaopen(struct disk *dp)
226 {
227 	struct cam_periph *periph;
228 	struct ada_softc *softc;
229 	int unit;
230 	int error;
231 
232 	periph = (struct cam_periph *)dp->d_drv1;
233 	if (periph == NULL) {
234 		return (ENXIO);
235 	}
236 
237 	if (cam_periph_acquire(periph) != CAM_REQ_CMP) {
238 		return(ENXIO);
239 	}
240 
241 	cam_periph_lock(periph);
242 	if ((error = cam_periph_hold(periph, PRIBIO|PCATCH)) != 0) {
243 		cam_periph_unlock(periph);
244 		cam_periph_release(periph);
245 		return (error);
246 	}
247 
248 	unit = periph->unit_number;
249 	softc = (struct ada_softc *)periph->softc;
250 	softc->flags |= ADA_FLAG_OPEN;
251 
252 	CAM_DEBUG(periph->path, CAM_DEBUG_TRACE,
253 	    ("adaopen: disk=%s%d (unit %d)\n", dp->d_name, dp->d_unit,
254 	     unit));
255 
256 	if ((softc->flags & ADA_FLAG_PACK_INVALID) != 0) {
257 		/* Invalidate our pack information. */
258 		softc->flags &= ~ADA_FLAG_PACK_INVALID;
259 	}
260 
261 	cam_periph_unhold(periph);
262 	cam_periph_unlock(periph);
263 	return (0);
264 }
265 
266 static int
267 adaclose(struct disk *dp)
268 {
269 	struct	cam_periph *periph;
270 	struct	ada_softc *softc;
271 	union ccb *ccb;
272 	int error;
273 
274 	periph = (struct cam_periph *)dp->d_drv1;
275 	if (periph == NULL)
276 		return (ENXIO);
277 
278 	cam_periph_lock(periph);
279 	if ((error = cam_periph_hold(periph, PRIBIO)) != 0) {
280 		cam_periph_unlock(periph);
281 		cam_periph_release(periph);
282 		return (error);
283 	}
284 
285 	softc = (struct ada_softc *)periph->softc;
286 	/* We only sync the cache if the drive is capable of it. */
287 	if (softc->flags & ADA_FLAG_CAN_FLUSHCACHE) {
288 
289 		ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL);
290 		cam_fill_ataio(&ccb->ataio,
291 				    1,
292 				    adadone,
293 				    CAM_DIR_NONE,
294 				    0,
295 				    NULL,
296 				    0,
297 				    ada_default_timeout*1000);
298 
299 		if (softc->flags & ADA_FLAG_CAN_48BIT)
300 			ata_48bit_cmd(&ccb->ataio, ATA_FLUSHCACHE48, 0, 0, 0);
301 		else
302 			ata_28bit_cmd(&ccb->ataio, ATA_FLUSHCACHE, 0, 0, 0);
303 		cam_periph_runccb(ccb, /*error_routine*/NULL, /*cam_flags*/0,
304 		    /*sense_flags*/0, softc->disk->d_devstat);
305 
306 		if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP)
307 			xpt_print(periph->path, "Synchronize cache failed\n");
308 
309 		if ((ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
310 			cam_release_devq(ccb->ccb_h.path,
311 					 /*relsim_flags*/0,
312 					 /*reduction*/0,
313 					 /*timeout*/0,
314 					 /*getcount_only*/0);
315 		xpt_release_ccb(ccb);
316 	}
317 
318 	softc->flags &= ~ADA_FLAG_OPEN;
319 	cam_periph_unhold(periph);
320 	cam_periph_unlock(periph);
321 	cam_periph_release(periph);
322 	return (0);
323 }
324 
325 static void
326 adaschedule(struct cam_periph *periph)
327 {
328 	struct ada_softc *softc = (struct ada_softc *)periph->softc;
329 
330 	if (bioq_first(&softc->bio_queue) ||
331 	    (!softc->trim_running && bioq_first(&softc->trim_queue))) {
332 		/* Have more work to do, so ensure we stay scheduled */
333 		xpt_schedule(periph, CAM_PRIORITY_NORMAL);
334 	}
335 }
336 
337 /*
338  * Actually translate the requested transfer into one the physical driver
339  * can understand.  The transfer is described by a buf and will include
340  * only one physical transfer.
341  */
342 static void
343 adastrategy(struct bio *bp)
344 {
345 	struct cam_periph *periph;
346 	struct ada_softc *softc;
347 
348 	periph = (struct cam_periph *)bp->bio_disk->d_drv1;
349 	if (periph == NULL) {
350 		biofinish(bp, NULL, ENXIO);
351 		return;
352 	}
353 	softc = (struct ada_softc *)periph->softc;
354 
355 	cam_periph_lock(periph);
356 
357 	/*
358 	 * If the device has been made invalid, error out
359 	 */
360 	if ((softc->flags & ADA_FLAG_PACK_INVALID)) {
361 		cam_periph_unlock(periph);
362 		biofinish(bp, NULL, ENXIO);
363 		return;
364 	}
365 
366 	/*
367 	 * Place it in the queue of disk activities for this disk
368 	 */
369 	if (bp->bio_cmd == BIO_DELETE &&
370 	    (softc->flags & ADA_FLAG_CAN_TRIM))
371 		bioq_disksort(&softc->trim_queue, bp);
372 	else
373 		bioq_disksort(&softc->bio_queue, bp);
374 
375 	/*
376 	 * Schedule ourselves for performing the work.
377 	 */
378 	adaschedule(periph);
379 	cam_periph_unlock(periph);
380 
381 	return;
382 }
383 
384 static int
385 adadump(void *arg, void *virtual, vm_offset_t physical, off_t offset, size_t length)
386 {
387 	struct	    cam_periph *periph;
388 	struct	    ada_softc *softc;
389 	u_int	    secsize;
390 	union	    ccb ccb;
391 	struct	    disk *dp;
392 	uint64_t    lba;
393 	uint16_t    count;
394 
395 	dp = arg;
396 	periph = dp->d_drv1;
397 	if (periph == NULL)
398 		return (ENXIO);
399 	softc = (struct ada_softc *)periph->softc;
400 	cam_periph_lock(periph);
401 	secsize = softc->params.secsize;
402 	lba = offset / secsize;
403 	count = length / secsize;
404 
405 	if ((softc->flags & ADA_FLAG_PACK_INVALID) != 0) {
406 		cam_periph_unlock(periph);
407 		return (ENXIO);
408 	}
409 
410 	if (length > 0) {
411 		xpt_setup_ccb(&ccb.ccb_h, periph->path, CAM_PRIORITY_NORMAL);
412 		ccb.ccb_h.ccb_state = ADA_CCB_DUMP;
413 		cam_fill_ataio(&ccb.ataio,
414 		    0,
415 		    adadone,
416 		    CAM_DIR_OUT,
417 		    0,
418 		    (u_int8_t *) virtual,
419 		    length,
420 		    ada_default_timeout*1000);
421 		if ((softc->flags & ADA_FLAG_CAN_48BIT) &&
422 		    (lba + count >= ATA_MAX_28BIT_LBA ||
423 		    count >= 256)) {
424 			ata_48bit_cmd(&ccb.ataio, ATA_WRITE_DMA48,
425 			    0, lba, count);
426 		} else {
427 			ata_28bit_cmd(&ccb.ataio, ATA_WRITE_DMA,
428 			    0, lba, count);
429 		}
430 		xpt_polled_action(&ccb);
431 
432 		if ((ccb.ataio.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
433 			printf("Aborting dump due to I/O error.\n");
434 			cam_periph_unlock(periph);
435 			return(EIO);
436 		}
437 		cam_periph_unlock(periph);
438 		return(0);
439 	}
440 
441 	if (softc->flags & ADA_FLAG_CAN_FLUSHCACHE) {
442 		xpt_setup_ccb(&ccb.ccb_h, periph->path, CAM_PRIORITY_NORMAL);
443 
444 		ccb.ccb_h.ccb_state = ADA_CCB_DUMP;
445 		cam_fill_ataio(&ccb.ataio,
446 				    1,
447 				    adadone,
448 				    CAM_DIR_NONE,
449 				    0,
450 				    NULL,
451 				    0,
452 				    ada_default_timeout*1000);
453 
454 		if (softc->flags & ADA_FLAG_CAN_48BIT)
455 			ata_48bit_cmd(&ccb.ataio, ATA_FLUSHCACHE48, 0, 0, 0);
456 		else
457 			ata_28bit_cmd(&ccb.ataio, ATA_FLUSHCACHE, 0, 0, 0);
458 		xpt_polled_action(&ccb);
459 
460 		if ((ccb.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP)
461 			xpt_print(periph->path, "Synchronize cache failed\n");
462 
463 		if ((ccb.ccb_h.status & CAM_DEV_QFRZN) != 0)
464 			cam_release_devq(ccb.ccb_h.path,
465 					 /*relsim_flags*/0,
466 					 /*reduction*/0,
467 					 /*timeout*/0,
468 					 /*getcount_only*/0);
469 	}
470 	cam_periph_unlock(periph);
471 	return (0);
472 }
473 
474 static void
475 adainit(void)
476 {
477 	cam_status status;
478 
479 	/*
480 	 * Install a global async callback.  This callback will
481 	 * receive async callbacks like "new device found".
482 	 */
483 	status = xpt_register_async(AC_FOUND_DEVICE, adaasync, NULL, NULL);
484 
485 	if (status != CAM_REQ_CMP) {
486 		printf("ada: Failed to attach master async callback "
487 		       "due to status 0x%x!\n", status);
488 	} else if (ada_send_ordered) {
489 
490 		/* Register our shutdown event handler */
491 		if ((EVENTHANDLER_REGISTER(shutdown_post_sync, adashutdown,
492 					   NULL, SHUTDOWN_PRI_DEFAULT)) == NULL)
493 		    printf("adainit: shutdown event registration failed!\n");
494 	}
495 }
496 
497 static void
498 adaoninvalidate(struct cam_periph *periph)
499 {
500 	struct ada_softc *softc;
501 
502 	softc = (struct ada_softc *)periph->softc;
503 
504 	/*
505 	 * De-register any async callbacks.
506 	 */
507 	xpt_register_async(0, adaasync, periph, periph->path);
508 
509 	softc->flags |= ADA_FLAG_PACK_INVALID;
510 
511 	/*
512 	 * Return all queued I/O with ENXIO.
513 	 * XXX Handle any transactions queued to the card
514 	 *     with XPT_ABORT_CCB.
515 	 */
516 	bioq_flush(&softc->bio_queue, NULL, ENXIO);
517 	bioq_flush(&softc->trim_queue, NULL, ENXIO);
518 
519 	disk_gone(softc->disk);
520 	xpt_print(periph->path, "lost device\n");
521 }
522 
523 static void
524 adacleanup(struct cam_periph *periph)
525 {
526 	struct ada_softc *softc;
527 
528 	softc = (struct ada_softc *)periph->softc;
529 
530 	xpt_print(periph->path, "removing device entry\n");
531 	cam_periph_unlock(periph);
532 
533 	/*
534 	 * If we can't free the sysctl tree, oh well...
535 	 */
536 	if ((softc->flags & ADA_FLAG_SCTX_INIT) != 0
537 	    && sysctl_ctx_free(&softc->sysctl_ctx) != 0) {
538 		xpt_print(periph->path, "can't remove sysctl context\n");
539 	}
540 
541 	disk_destroy(softc->disk);
542 	callout_drain(&softc->sendordered_c);
543 	free(softc, M_DEVBUF);
544 	cam_periph_lock(periph);
545 }
546 
547 static void
548 adaasync(void *callback_arg, u_int32_t code,
549 	struct cam_path *path, void *arg)
550 {
551 	struct cam_periph *periph;
552 
553 	periph = (struct cam_periph *)callback_arg;
554 	switch (code) {
555 	case AC_FOUND_DEVICE:
556 	{
557 		struct ccb_getdev *cgd;
558 		cam_status status;
559 
560 		cgd = (struct ccb_getdev *)arg;
561 		if (cgd == NULL)
562 			break;
563 
564 		if (cgd->protocol != PROTO_ATA)
565 			break;
566 
567 		/*
568 		 * Allocate a peripheral instance for
569 		 * this device and start the probe
570 		 * process.
571 		 */
572 		status = cam_periph_alloc(adaregister, adaoninvalidate,
573 					  adacleanup, adastart,
574 					  "ada", CAM_PERIPH_BIO,
575 					  cgd->ccb_h.path, adaasync,
576 					  AC_FOUND_DEVICE, cgd);
577 
578 		if (status != CAM_REQ_CMP
579 		 && status != CAM_REQ_INPROG)
580 			printf("adaasync: Unable to attach to new device "
581 				"due to status 0x%x\n", status);
582 		break;
583 	}
584 	default:
585 		cam_periph_async(periph, code, path, arg);
586 		break;
587 	}
588 }
589 
590 static void
591 adasysctlinit(void *context, int pending)
592 {
593 	struct cam_periph *periph;
594 	struct ada_softc *softc;
595 	char tmpstr[80], tmpstr2[80];
596 
597 	periph = (struct cam_periph *)context;
598 	if (cam_periph_acquire(periph) != CAM_REQ_CMP)
599 		return;
600 
601 	softc = (struct ada_softc *)periph->softc;
602 	snprintf(tmpstr, sizeof(tmpstr), "CAM ADA unit %d", periph->unit_number);
603 	snprintf(tmpstr2, sizeof(tmpstr2), "%d", periph->unit_number);
604 
605 	sysctl_ctx_init(&softc->sysctl_ctx);
606 	softc->flags |= ADA_FLAG_SCTX_INIT;
607 	softc->sysctl_tree = SYSCTL_ADD_NODE(&softc->sysctl_ctx,
608 		SYSCTL_STATIC_CHILDREN(_kern_cam_ada), OID_AUTO, tmpstr2,
609 		CTLFLAG_RD, 0, tmpstr);
610 	if (softc->sysctl_tree == NULL) {
611 		printf("adasysctlinit: unable to allocate sysctl tree\n");
612 		cam_periph_release(periph);
613 		return;
614 	}
615 
616 	cam_periph_release(periph);
617 }
618 
619 static cam_status
620 adaregister(struct cam_periph *periph, void *arg)
621 {
622 	struct ada_softc *softc;
623 	struct ccb_pathinq cpi;
624 	struct ccb_getdev *cgd;
625 	char   announce_buf[80];
626 	struct disk_params *dp;
627 	caddr_t match;
628 	u_int maxio;
629 
630 	cgd = (struct ccb_getdev *)arg;
631 	if (periph == NULL) {
632 		printf("adaregister: periph was NULL!!\n");
633 		return(CAM_REQ_CMP_ERR);
634 	}
635 
636 	if (cgd == NULL) {
637 		printf("adaregister: no getdev CCB, can't register device\n");
638 		return(CAM_REQ_CMP_ERR);
639 	}
640 
641 	softc = (struct ada_softc *)malloc(sizeof(*softc), M_DEVBUF,
642 	    M_NOWAIT|M_ZERO);
643 
644 	if (softc == NULL) {
645 		printf("adaregister: Unable to probe new device. "
646 		    "Unable to allocate softc\n");
647 		return(CAM_REQ_CMP_ERR);
648 	}
649 
650 	bioq_init(&softc->bio_queue);
651 	bioq_init(&softc->trim_queue);
652 
653 	if (cgd->ident_data.capabilities1 & ATA_SUPPORT_DMA)
654 		softc->flags |= ADA_FLAG_CAN_DMA;
655 	if (cgd->ident_data.support.command2 & ATA_SUPPORT_ADDRESS48)
656 		softc->flags |= ADA_FLAG_CAN_48BIT;
657 	if (cgd->ident_data.support.command2 & ATA_SUPPORT_FLUSHCACHE)
658 		softc->flags |= ADA_FLAG_CAN_FLUSHCACHE;
659 	if (cgd->ident_data.satacapabilities & ATA_SUPPORT_NCQ &&
660 	    cgd->inq_flags & SID_CmdQue)
661 		softc->flags |= ADA_FLAG_CAN_NCQ;
662 	if (cgd->ident_data.support_dsm & ATA_SUPPORT_DSM_TRIM) {
663 		softc->flags |= ADA_FLAG_CAN_TRIM;
664 		softc->trim_max_ranges = TRIM_MAX_RANGES;
665 		if (cgd->ident_data.max_dsm_blocks != 0) {
666 			softc->trim_max_ranges =
667 			    min(cgd->ident_data.max_dsm_blocks * 64,
668 				softc->trim_max_ranges);
669 		}
670 	}
671 	if (cgd->ident_data.support.command2 & ATA_SUPPORT_CFA)
672 		softc->flags |= ADA_FLAG_CAN_CFA;
673 	softc->state = ADA_STATE_NORMAL;
674 
675 	periph->softc = softc;
676 
677 	/*
678 	 * See if this device has any quirks.
679 	 */
680 	match = cam_quirkmatch((caddr_t)&cgd->ident_data,
681 			       (caddr_t)ada_quirk_table,
682 			       sizeof(ada_quirk_table)/sizeof(*ada_quirk_table),
683 			       sizeof(*ada_quirk_table), ata_identify_match);
684 	if (match != NULL)
685 		softc->quirks = ((struct ada_quirk_entry *)match)->quirks;
686 	else
687 		softc->quirks = ADA_Q_NONE;
688 
689 	/* Check if the SIM does not want queued commands */
690 	bzero(&cpi, sizeof(cpi));
691 	xpt_setup_ccb(&cpi.ccb_h, periph->path, CAM_PRIORITY_NONE);
692 	cpi.ccb_h.func_code = XPT_PATH_INQ;
693 	xpt_action((union ccb *)&cpi);
694 	if (cpi.ccb_h.status != CAM_REQ_CMP ||
695 	    (cpi.hba_inquiry & PI_TAG_ABLE) == 0)
696 		softc->flags &= ~ADA_FLAG_CAN_NCQ;
697 
698 	TASK_INIT(&softc->sysctl_task, 0, adasysctlinit, periph);
699 
700 	/*
701 	 * Register this media as a disk
702 	 */
703 	mtx_unlock(periph->sim->mtx);
704 	adagetparams(periph, cgd);
705 	softc->disk = disk_alloc();
706 	softc->disk->d_open = adaopen;
707 	softc->disk->d_close = adaclose;
708 	softc->disk->d_strategy = adastrategy;
709 	softc->disk->d_dump = adadump;
710 	softc->disk->d_name = "ada";
711 	softc->disk->d_drv1 = periph;
712 	maxio = cpi.maxio;		/* Honor max I/O size of SIM */
713 	if (maxio == 0)
714 		maxio = DFLTPHYS;	/* traditional default */
715 	else if (maxio > MAXPHYS)
716 		maxio = MAXPHYS;	/* for safety */
717 	if (softc->flags & ADA_FLAG_CAN_48BIT)
718 		maxio = min(maxio, 65536 * softc->params.secsize);
719 	else					/* 28bit ATA command limit */
720 		maxio = min(maxio, 256 * softc->params.secsize);
721 	softc->disk->d_maxsize = maxio;
722 	softc->disk->d_unit = periph->unit_number;
723 	softc->disk->d_flags = 0;
724 	if (softc->flags & ADA_FLAG_CAN_FLUSHCACHE)
725 		softc->disk->d_flags |= DISKFLAG_CANFLUSHCACHE;
726 	if ((softc->flags & ADA_FLAG_CAN_TRIM) ||
727 	    ((softc->flags & ADA_FLAG_CAN_CFA) &&
728 	    !(softc->flags & ADA_FLAG_CAN_48BIT)))
729 		softc->disk->d_flags |= DISKFLAG_CANDELETE;
730 	strlcpy(softc->disk->d_ident, cgd->serial_num,
731 	    MIN(sizeof(softc->disk->d_ident), cgd->serial_num_len + 1));
732 
733 	softc->disk->d_sectorsize = softc->params.secsize;
734 	softc->disk->d_mediasize = (off_t)softc->params.sectors *
735 	    softc->params.secsize;
736 	if (ata_physical_sector_size(&cgd->ident_data) !=
737 	    softc->params.secsize) {
738 		softc->disk->d_stripesize =
739 		    ata_physical_sector_size(&cgd->ident_data);
740 		softc->disk->d_stripeoffset = (softc->disk->d_stripesize -
741 		    ata_logical_sector_offset(&cgd->ident_data)) %
742 		    softc->disk->d_stripesize;
743 	}
744 	/* XXX: these are not actually "firmware" values, so they may be wrong */
745 	softc->disk->d_fwsectors = softc->params.secs_per_track;
746 	softc->disk->d_fwheads = softc->params.heads;
747 
748 	disk_create(softc->disk, DISK_VERSION);
749 	mtx_lock(periph->sim->mtx);
750 
751 	dp = &softc->params;
752 	snprintf(announce_buf, sizeof(announce_buf),
753 		"%juMB (%ju %u byte sectors: %dH %dS/T %dC)",
754 		(uintmax_t)(((uintmax_t)dp->secsize *
755 		dp->sectors) / (1024*1024)),
756 		(uintmax_t)dp->sectors,
757 		dp->secsize, dp->heads,
758 		dp->secs_per_track, dp->cylinders);
759 	xpt_announce_periph(periph, announce_buf);
760 	/*
761 	 * Add async callbacks for bus reset and
762 	 * bus device reset calls.  I don't bother
763 	 * checking if this fails as, in most cases,
764 	 * the system will function just fine without
765 	 * them and the only alternative would be to
766 	 * not attach the device on failure.
767 	 */
768 	xpt_register_async(AC_LOST_DEVICE,
769 			   adaasync, periph, periph->path);
770 
771 	/*
772 	 * Schedule a periodic event to occasionally send an
773 	 * ordered tag to a device.
774 	 */
775 	callout_init_mtx(&softc->sendordered_c, periph->sim->mtx, 0);
776 	callout_reset(&softc->sendordered_c,
777 	    (ADA_DEFAULT_TIMEOUT * hz) / ADA_ORDEREDTAG_INTERVAL,
778 	    adasendorderedtag, softc);
779 
780 	return(CAM_REQ_CMP);
781 }
782 
783 static void
784 adastart(struct cam_periph *periph, union ccb *start_ccb)
785 {
786 	struct ada_softc *softc = (struct ada_softc *)periph->softc;
787 	struct ccb_ataio *ataio = &start_ccb->ataio;
788 
789 	switch (softc->state) {
790 	case ADA_STATE_NORMAL:
791 	{
792 		struct bio *bp;
793 		u_int8_t tag_code;
794 
795 		/* Execute immediate CCB if waiting. */
796 		if (periph->immediate_priority <= periph->pinfo.priority) {
797 			CAM_DEBUG_PRINT(CAM_DEBUG_SUBTRACE,
798 					("queuing for immediate ccb\n"));
799 			start_ccb->ccb_h.ccb_state = ADA_CCB_WAITING;
800 			SLIST_INSERT_HEAD(&periph->ccb_list, &start_ccb->ccb_h,
801 					  periph_links.sle);
802 			periph->immediate_priority = CAM_PRIORITY_NONE;
803 			wakeup(&periph->ccb_list);
804 			/* Have more work to do, so ensure we stay scheduled */
805 			adaschedule(periph);
806 			break;
807 		}
808 		/* Run TRIM if not running yet. */
809 		if (!softc->trim_running &&
810 		    (bp = bioq_first(&softc->trim_queue)) != 0) {
811 			struct trim_request *req = &softc->trim_req;
812 			struct bio *bp1;
813 			int bps = 0, ranges = 0;
814 
815 			softc->trim_running = 1;
816 			bzero(req, sizeof(*req));
817 			bp1 = bp;
818 			do {
819 				uint64_t lba = bp1->bio_pblkno;
820 				int count = bp1->bio_bcount /
821 				    softc->params.secsize;
822 
823 				bioq_remove(&softc->trim_queue, bp1);
824 				while (count > 0) {
825 					int c = min(count, 0xffff);
826 					int off = ranges * 8;
827 
828 					req->data[off + 0] = lba & 0xff;
829 					req->data[off + 1] = (lba >> 8) & 0xff;
830 					req->data[off + 2] = (lba >> 16) & 0xff;
831 					req->data[off + 3] = (lba >> 24) & 0xff;
832 					req->data[off + 4] = (lba >> 32) & 0xff;
833 					req->data[off + 5] = (lba >> 40) & 0xff;
834 					req->data[off + 6] = c & 0xff;
835 					req->data[off + 7] = (c >> 8) & 0xff;
836 					lba += c;
837 					count -= c;
838 					ranges++;
839 				}
840 				req->bps[bps++] = bp1;
841 				bp1 = bioq_first(&softc->trim_queue);
842 				if (bp1 == NULL ||
843 				    bp1->bio_bcount / softc->params.secsize >
844 				    (softc->trim_max_ranges - ranges) * 0xffff)
845 					break;
846 			} while (1);
847 			cam_fill_ataio(ataio,
848 			    ada_retry_count,
849 			    adadone,
850 			    CAM_DIR_OUT,
851 			    0,
852 			    req->data,
853 			    ((ranges + 63) / 64) * 512,
854 			    ada_default_timeout * 1000);
855 			ata_48bit_cmd(ataio, ATA_DATA_SET_MANAGEMENT,
856 			    ATA_DSM_TRIM, 0, (ranges + 63) / 64);
857 			start_ccb->ccb_h.ccb_state = ADA_CCB_TRIM;
858 			goto out;
859 		}
860 		/* Run regular command. */
861 		bp = bioq_first(&softc->bio_queue);
862 		if (bp == NULL) {
863 			xpt_release_ccb(start_ccb);
864 			break;
865 		}
866 		bioq_remove(&softc->bio_queue, bp);
867 
868 		if ((softc->flags & ADA_FLAG_NEED_OTAG) != 0) {
869 			softc->flags &= ~ADA_FLAG_NEED_OTAG;
870 			softc->ordered_tag_count++;
871 			tag_code = 0;
872 		} else {
873 			tag_code = 1;
874 		}
875 		switch (bp->bio_cmd) {
876 		case BIO_READ:
877 		case BIO_WRITE:
878 		{
879 			uint64_t lba = bp->bio_pblkno;
880 			uint16_t count = bp->bio_bcount / softc->params.secsize;
881 
882 			cam_fill_ataio(ataio,
883 			    ada_retry_count,
884 			    adadone,
885 			    bp->bio_cmd == BIO_READ ?
886 			        CAM_DIR_IN : CAM_DIR_OUT,
887 			    tag_code,
888 			    bp->bio_data,
889 			    bp->bio_bcount,
890 			    ada_default_timeout*1000);
891 
892 			if ((softc->flags & ADA_FLAG_CAN_NCQ) && tag_code) {
893 				if (bp->bio_cmd == BIO_READ) {
894 					ata_ncq_cmd(ataio, ATA_READ_FPDMA_QUEUED,
895 					    lba, count);
896 				} else {
897 					ata_ncq_cmd(ataio, ATA_WRITE_FPDMA_QUEUED,
898 					    lba, count);
899 				}
900 			} else if ((softc->flags & ADA_FLAG_CAN_48BIT) &&
901 			    (lba + count >= ATA_MAX_28BIT_LBA ||
902 			    count > 256)) {
903 				if (softc->flags & ADA_FLAG_CAN_DMA) {
904 					if (bp->bio_cmd == BIO_READ) {
905 						ata_48bit_cmd(ataio, ATA_READ_DMA48,
906 						    0, lba, count);
907 					} else {
908 						ata_48bit_cmd(ataio, ATA_WRITE_DMA48,
909 						    0, lba, count);
910 					}
911 				} else {
912 					if (bp->bio_cmd == BIO_READ) {
913 						ata_48bit_cmd(ataio, ATA_READ_MUL48,
914 						    0, lba, count);
915 					} else {
916 						ata_48bit_cmd(ataio, ATA_WRITE_MUL48,
917 						    0, lba, count);
918 					}
919 				}
920 			} else {
921 				if (count == 256)
922 					count = 0;
923 				if (softc->flags & ADA_FLAG_CAN_DMA) {
924 					if (bp->bio_cmd == BIO_READ) {
925 						ata_28bit_cmd(ataio, ATA_READ_DMA,
926 						    0, lba, count);
927 					} else {
928 						ata_28bit_cmd(ataio, ATA_WRITE_DMA,
929 						    0, lba, count);
930 					}
931 				} else {
932 					if (bp->bio_cmd == BIO_READ) {
933 						ata_28bit_cmd(ataio, ATA_READ_MUL,
934 						    0, lba, count);
935 					} else {
936 						ata_28bit_cmd(ataio, ATA_WRITE_MUL,
937 						    0, lba, count);
938 					}
939 				}
940 			}
941 			break;
942 		}
943 		case BIO_DELETE:
944 		{
945 			uint64_t lba = bp->bio_pblkno;
946 			uint16_t count = bp->bio_bcount / softc->params.secsize;
947 
948 			cam_fill_ataio(ataio,
949 			    ada_retry_count,
950 			    adadone,
951 			    CAM_DIR_NONE,
952 			    0,
953 			    NULL,
954 			    0,
955 			    ada_default_timeout*1000);
956 
957 			if (count >= 256)
958 				count = 0;
959 			ata_28bit_cmd(ataio, ATA_CFA_ERASE, 0, lba, count);
960 			break;
961 		}
962 		case BIO_FLUSH:
963 			cam_fill_ataio(ataio,
964 			    1,
965 			    adadone,
966 			    CAM_DIR_NONE,
967 			    0,
968 			    NULL,
969 			    0,
970 			    ada_default_timeout*1000);
971 
972 			if (softc->flags & ADA_FLAG_CAN_48BIT)
973 				ata_48bit_cmd(ataio, ATA_FLUSHCACHE48, 0, 0, 0);
974 			else
975 				ata_28bit_cmd(ataio, ATA_FLUSHCACHE, 0, 0, 0);
976 			break;
977 		}
978 		start_ccb->ccb_h.ccb_state = ADA_CCB_BUFFER_IO;
979 out:
980 		start_ccb->ccb_h.ccb_bp = bp;
981 		softc->outstanding_cmds++;
982 		xpt_action(start_ccb);
983 
984 		/* May have more work to do, so ensure we stay scheduled */
985 		adaschedule(periph);
986 		break;
987 	}
988 	}
989 }
990 
991 static void
992 adadone(struct cam_periph *periph, union ccb *done_ccb)
993 {
994 	struct ada_softc *softc;
995 	struct ccb_ataio *ataio;
996 
997 	softc = (struct ada_softc *)periph->softc;
998 	ataio = &done_ccb->ataio;
999 	switch (ataio->ccb_h.ccb_state & ADA_CCB_TYPE_MASK) {
1000 	case ADA_CCB_BUFFER_IO:
1001 	case ADA_CCB_TRIM:
1002 	{
1003 		struct bio *bp;
1004 
1005 		bp = (struct bio *)done_ccb->ccb_h.ccb_bp;
1006 		if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
1007 			int error;
1008 
1009 			error = adaerror(done_ccb, 0, 0);
1010 			if (error == ERESTART) {
1011 				/* A retry was scheduled, so just return. */
1012 				return;
1013 			}
1014 			if (error != 0) {
1015 				if (error == ENXIO) {
1016 					/*
1017 					 * Catastrophic error.  Mark our pack as
1018 					 * invalid.
1019 					 */
1020 					/*
1021 					 * XXX See if this is really a media
1022 					 * XXX change first?
1023 					 */
1024 					xpt_print(periph->path,
1025 					    "Invalidating pack\n");
1026 					softc->flags |= ADA_FLAG_PACK_INVALID;
1027 				}
1028 				bp->bio_error = error;
1029 				bp->bio_resid = bp->bio_bcount;
1030 				bp->bio_flags |= BIO_ERROR;
1031 			} else {
1032 				bp->bio_resid = ataio->resid;
1033 				bp->bio_error = 0;
1034 				if (bp->bio_resid != 0)
1035 					bp->bio_flags |= BIO_ERROR;
1036 			}
1037 			if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
1038 				cam_release_devq(done_ccb->ccb_h.path,
1039 						 /*relsim_flags*/0,
1040 						 /*reduction*/0,
1041 						 /*timeout*/0,
1042 						 /*getcount_only*/0);
1043 		} else {
1044 			if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
1045 				panic("REQ_CMP with QFRZN");
1046 			bp->bio_resid = ataio->resid;
1047 			if (ataio->resid > 0)
1048 				bp->bio_flags |= BIO_ERROR;
1049 		}
1050 		softc->outstanding_cmds--;
1051 		if (softc->outstanding_cmds == 0)
1052 			softc->flags |= ADA_FLAG_WENT_IDLE;
1053 		if ((ataio->ccb_h.ccb_state & ADA_CCB_TYPE_MASK) ==
1054 		    ADA_CCB_TRIM) {
1055 			struct trim_request *req =
1056 			    (struct trim_request *)ataio->data_ptr;
1057 			int i;
1058 
1059 			for (i = 1; i < softc->trim_max_ranges &&
1060 			    req->bps[i]; i++) {
1061 				struct bio *bp1 = req->bps[i];
1062 
1063 				bp1->bio_resid = bp->bio_resid;
1064 				bp1->bio_error = bp->bio_error;
1065 				if (bp->bio_flags & BIO_ERROR)
1066 					bp1->bio_flags |= BIO_ERROR;
1067 				biodone(bp1);
1068 			}
1069 			softc->trim_running = 0;
1070 			biodone(bp);
1071 			adaschedule(periph);
1072 		} else
1073 			biodone(bp);
1074 		break;
1075 	}
1076 	case ADA_CCB_WAITING:
1077 	{
1078 		/* Caller will release the CCB */
1079 		wakeup(&done_ccb->ccb_h.cbfcnp);
1080 		return;
1081 	}
1082 	case ADA_CCB_DUMP:
1083 		/* No-op.  We're polling */
1084 		return;
1085 	default:
1086 		break;
1087 	}
1088 	xpt_release_ccb(done_ccb);
1089 }
1090 
1091 static int
1092 adaerror(union ccb *ccb, u_int32_t cam_flags, u_int32_t sense_flags)
1093 {
1094 	struct ada_softc	  *softc;
1095 	struct cam_periph *periph;
1096 
1097 	periph = xpt_path_periph(ccb->ccb_h.path);
1098 	softc = (struct ada_softc *)periph->softc;
1099 
1100 	return(cam_periph_error(ccb, cam_flags, sense_flags, NULL));
1101 }
1102 
1103 static void
1104 adagetparams(struct cam_periph *periph, struct ccb_getdev *cgd)
1105 {
1106 	struct ada_softc *softc = (struct ada_softc *)periph->softc;
1107 	struct disk_params *dp = &softc->params;
1108 	u_int64_t lbasize48;
1109 	u_int32_t lbasize;
1110 
1111 	dp->secsize = ata_logical_sector_size(&cgd->ident_data);
1112 	if ((cgd->ident_data.atavalid & ATA_FLAG_54_58) &&
1113 		cgd->ident_data.current_heads && cgd->ident_data.current_sectors) {
1114 		dp->heads = cgd->ident_data.current_heads;
1115 		dp->secs_per_track = cgd->ident_data.current_sectors;
1116 		dp->cylinders = cgd->ident_data.cylinders;
1117 		dp->sectors = (u_int32_t)cgd->ident_data.current_size_1 |
1118 			  ((u_int32_t)cgd->ident_data.current_size_2 << 16);
1119 	} else {
1120 		dp->heads = cgd->ident_data.heads;
1121 		dp->secs_per_track = cgd->ident_data.sectors;
1122 		dp->cylinders = cgd->ident_data.cylinders;
1123 		dp->sectors = cgd->ident_data.cylinders * dp->heads * dp->secs_per_track;
1124 	}
1125 	lbasize = (u_int32_t)cgd->ident_data.lba_size_1 |
1126 		  ((u_int32_t)cgd->ident_data.lba_size_2 << 16);
1127 
1128 	/* use the 28bit LBA size if valid or bigger than the CHS mapping */
1129 	if (cgd->ident_data.cylinders == 16383 || dp->sectors < lbasize)
1130 		dp->sectors = lbasize;
1131 
1132 	/* use the 48bit LBA size if valid */
1133 	lbasize48 = ((u_int64_t)cgd->ident_data.lba_size48_1) |
1134 		    ((u_int64_t)cgd->ident_data.lba_size48_2 << 16) |
1135 		    ((u_int64_t)cgd->ident_data.lba_size48_3 << 32) |
1136 		    ((u_int64_t)cgd->ident_data.lba_size48_4 << 48);
1137 	if ((cgd->ident_data.support.command2 & ATA_SUPPORT_ADDRESS48) &&
1138 	    lbasize48 > ATA_MAX_28BIT_LBA)
1139 		dp->sectors = lbasize48;
1140 }
1141 
1142 static void
1143 adasendorderedtag(void *arg)
1144 {
1145 	struct ada_softc *softc = arg;
1146 
1147 	if (ada_send_ordered) {
1148 		if ((softc->ordered_tag_count == 0)
1149 		 && ((softc->flags & ADA_FLAG_WENT_IDLE) == 0)) {
1150 			softc->flags |= ADA_FLAG_NEED_OTAG;
1151 		}
1152 		if (softc->outstanding_cmds > 0)
1153 			softc->flags &= ~ADA_FLAG_WENT_IDLE;
1154 
1155 		softc->ordered_tag_count = 0;
1156 	}
1157 	/* Queue us up again */
1158 	callout_reset(&softc->sendordered_c,
1159 	    (ADA_DEFAULT_TIMEOUT * hz) / ADA_ORDEREDTAG_INTERVAL,
1160 	    adasendorderedtag, softc);
1161 }
1162 
1163 /*
1164  * Step through all ADA peripheral drivers, and if the device is still open,
1165  * sync the disk cache to physical media.
1166  */
1167 static void
1168 adashutdown(void * arg, int howto)
1169 {
1170 	struct cam_periph *periph;
1171 	struct ada_softc *softc;
1172 
1173 	TAILQ_FOREACH(periph, &adadriver.units, unit_links) {
1174 		union ccb ccb;
1175 
1176 		/* If we paniced with lock held - not recurse here. */
1177 		if (cam_periph_owned(periph))
1178 			continue;
1179 		cam_periph_lock(periph);
1180 		softc = (struct ada_softc *)periph->softc;
1181 		/*
1182 		 * We only sync the cache if the drive is still open, and
1183 		 * if the drive is capable of it..
1184 		 */
1185 		if (((softc->flags & ADA_FLAG_OPEN) == 0) ||
1186 		    (softc->flags & ADA_FLAG_CAN_FLUSHCACHE) == 0) {
1187 			cam_periph_unlock(periph);
1188 			continue;
1189 		}
1190 
1191 		xpt_setup_ccb(&ccb.ccb_h, periph->path, CAM_PRIORITY_NORMAL);
1192 
1193 		ccb.ccb_h.ccb_state = ADA_CCB_DUMP;
1194 		cam_fill_ataio(&ccb.ataio,
1195 				    1,
1196 				    adadone,
1197 				    CAM_DIR_NONE,
1198 				    0,
1199 				    NULL,
1200 				    0,
1201 				    ada_default_timeout*1000);
1202 
1203 		if (softc->flags & ADA_FLAG_CAN_48BIT)
1204 			ata_48bit_cmd(&ccb.ataio, ATA_FLUSHCACHE48, 0, 0, 0);
1205 		else
1206 			ata_28bit_cmd(&ccb.ataio, ATA_FLUSHCACHE, 0, 0, 0);
1207 		xpt_polled_action(&ccb);
1208 
1209 		if ((ccb.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP)
1210 			xpt_print(periph->path, "Synchronize cache failed\n");
1211 
1212 		if ((ccb.ccb_h.status & CAM_DEV_QFRZN) != 0)
1213 			cam_release_devq(ccb.ccb_h.path,
1214 					 /*relsim_flags*/0,
1215 					 /*reduction*/0,
1216 					 /*timeout*/0,
1217 					 /*getcount_only*/0);
1218 		cam_periph_unlock(periph);
1219 	}
1220 }
1221 
1222 #endif /* _KERNEL */
1223