xref: /freebsd/sys/cam/ata/ata_da.c (revision c2bce4a2fcf3083607e00a1734b47c249751c8a8)
1 /*-
2  * Copyright (c) 2009 Alexander Motin <mav@FreeBSD.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer,
10  *    without modification, immediately at the beginning of the file.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  */
26 
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
29 
30 #include "opt_ada.h"
31 
32 #include <sys/param.h>
33 
34 #ifdef _KERNEL
35 #include <sys/systm.h>
36 #include <sys/kernel.h>
37 #include <sys/bio.h>
38 #include <sys/sysctl.h>
39 #include <sys/taskqueue.h>
40 #include <sys/lock.h>
41 #include <sys/mutex.h>
42 #include <sys/conf.h>
43 #include <sys/devicestat.h>
44 #include <sys/eventhandler.h>
45 #include <sys/malloc.h>
46 #include <sys/cons.h>
47 #include <sys/reboot.h>
48 #include <geom/geom_disk.h>
49 #endif /* _KERNEL */
50 
51 #ifndef _KERNEL
52 #include <stdio.h>
53 #include <string.h>
54 #endif /* _KERNEL */
55 
56 #include <cam/cam.h>
57 #include <cam/cam_ccb.h>
58 #include <cam/cam_periph.h>
59 #include <cam/cam_xpt_periph.h>
60 #include <cam/cam_sim.h>
61 
62 #include <cam/ata/ata_all.h>
63 
64 #include <machine/md_var.h>	/* geometry translation */
65 
66 #ifdef _KERNEL
67 
68 #define ATA_MAX_28BIT_LBA               268435455UL
69 
70 typedef enum {
71 	ADA_STATE_WCACHE,
72 	ADA_STATE_NORMAL
73 } ada_state;
74 
75 typedef enum {
76 	ADA_FLAG_PACK_INVALID	= 0x001,
77 	ADA_FLAG_CAN_48BIT	= 0x002,
78 	ADA_FLAG_CAN_FLUSHCACHE	= 0x004,
79 	ADA_FLAG_CAN_NCQ	= 0x008,
80 	ADA_FLAG_CAN_DMA	= 0x010,
81 	ADA_FLAG_NEED_OTAG	= 0x020,
82 	ADA_FLAG_WENT_IDLE	= 0x040,
83 	ADA_FLAG_CAN_TRIM	= 0x080,
84 	ADA_FLAG_OPEN		= 0x100,
85 	ADA_FLAG_SCTX_INIT	= 0x200,
86 	ADA_FLAG_CAN_CFA        = 0x400,
87 	ADA_FLAG_CAN_POWERMGT   = 0x800
88 } ada_flags;
89 
90 typedef enum {
91 	ADA_Q_NONE		= 0x00
92 } ada_quirks;
93 
94 typedef enum {
95 	ADA_CCB_WCACHE		= 0x01,
96 	ADA_CCB_BUFFER_IO	= 0x03,
97 	ADA_CCB_WAITING		= 0x04,
98 	ADA_CCB_DUMP		= 0x05,
99 	ADA_CCB_TRIM		= 0x06,
100 	ADA_CCB_TYPE_MASK	= 0x0F,
101 } ada_ccb_state;
102 
103 /* Offsets into our private area for storing information */
104 #define ccb_state	ppriv_field0
105 #define ccb_bp		ppriv_ptr1
106 
107 struct disk_params {
108 	u_int8_t  heads;
109 	u_int8_t  secs_per_track;
110 	u_int32_t cylinders;
111 	u_int32_t secsize;	/* Number of bytes/logical sector */
112 	u_int64_t sectors;	/* Total number sectors */
113 };
114 
115 #define TRIM_MAX_BLOCKS	4
116 #define TRIM_MAX_RANGES	TRIM_MAX_BLOCKS * 64
117 struct trim_request {
118 	uint8_t		data[TRIM_MAX_RANGES * 8];
119 	struct bio	*bps[TRIM_MAX_RANGES];
120 };
121 
122 struct ada_softc {
123 	struct	 bio_queue_head bio_queue;
124 	struct	 bio_queue_head trim_queue;
125 	ada_state state;
126 	ada_flags flags;
127 	ada_quirks quirks;
128 	int	 ordered_tag_count;
129 	int	 outstanding_cmds;
130 	int	 trim_max_ranges;
131 	int	 trim_running;
132 	int	 write_cache;
133 #ifdef ADA_TEST_FAILURE
134 	int      force_read_error;
135 	int      force_write_error;
136 	int      periodic_read_error;
137 	int      periodic_read_count;
138 #endif
139 	struct	 disk_params params;
140 	struct	 disk *disk;
141 	struct task		sysctl_task;
142 	struct sysctl_ctx_list	sysctl_ctx;
143 	struct sysctl_oid	*sysctl_tree;
144 	struct callout		sendordered_c;
145 	struct trim_request	trim_req;
146 };
147 
148 struct ada_quirk_entry {
149 	struct scsi_inquiry_pattern inq_pat;
150 	ada_quirks quirks;
151 };
152 
153 static struct ada_quirk_entry ada_quirk_table[] =
154 {
155 	{
156 		/* Default */
157 		{
158 		  T_ANY, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED,
159 		  /*vendor*/"*", /*product*/"*", /*revision*/"*"
160 		},
161 		/*quirks*/0
162 	},
163 };
164 
165 static	disk_strategy_t	adastrategy;
166 static	dumper_t	adadump;
167 static	periph_init_t	adainit;
168 static	void		adaasync(void *callback_arg, u_int32_t code,
169 				struct cam_path *path, void *arg);
170 static	void		adasysctlinit(void *context, int pending);
171 static	periph_ctor_t	adaregister;
172 static	periph_dtor_t	adacleanup;
173 static	periph_start_t	adastart;
174 static	periph_oninv_t	adaoninvalidate;
175 static	void		adadone(struct cam_periph *periph,
176 			       union ccb *done_ccb);
177 static  int		adaerror(union ccb *ccb, u_int32_t cam_flags,
178 				u_int32_t sense_flags);
179 static void		adagetparams(struct cam_periph *periph,
180 				struct ccb_getdev *cgd);
181 static timeout_t	adasendorderedtag;
182 static void		adashutdown(void *arg, int howto);
183 static void		adasuspend(void *arg);
184 static void		adaresume(void *arg);
185 
186 #ifndef ADA_DEFAULT_TIMEOUT
187 #define ADA_DEFAULT_TIMEOUT 30	/* Timeout in seconds */
188 #endif
189 
190 #ifndef	ADA_DEFAULT_RETRY
191 #define	ADA_DEFAULT_RETRY	4
192 #endif
193 
194 #ifndef	ADA_DEFAULT_SEND_ORDERED
195 #define	ADA_DEFAULT_SEND_ORDERED	1
196 #endif
197 
198 #ifndef	ADA_DEFAULT_SPINDOWN_SHUTDOWN
199 #define	ADA_DEFAULT_SPINDOWN_SHUTDOWN	1
200 #endif
201 
202 #ifndef	ADA_DEFAULT_SPINDOWN_SUSPEND
203 #define	ADA_DEFAULT_SPINDOWN_SUSPEND	1
204 #endif
205 
206 #ifndef	ADA_DEFAULT_WRITE_CACHE
207 #define	ADA_DEFAULT_WRITE_CACHE	1
208 #endif
209 
210 /*
211  * Most platforms map firmware geometry to actual, but some don't.  If
212  * not overridden, default to nothing.
213  */
214 #ifndef ata_disk_firmware_geom_adjust
215 #define	ata_disk_firmware_geom_adjust(disk)
216 #endif
217 
218 static int ada_retry_count = ADA_DEFAULT_RETRY;
219 static int ada_default_timeout = ADA_DEFAULT_TIMEOUT;
220 static int ada_send_ordered = ADA_DEFAULT_SEND_ORDERED;
221 static int ada_spindown_shutdown = ADA_DEFAULT_SPINDOWN_SHUTDOWN;
222 static int ada_spindown_suspend = ADA_DEFAULT_SPINDOWN_SUSPEND;
223 static int ada_write_cache = ADA_DEFAULT_WRITE_CACHE;
224 
225 SYSCTL_NODE(_kern_cam, OID_AUTO, ada, CTLFLAG_RD, 0,
226             "CAM Direct Access Disk driver");
227 SYSCTL_INT(_kern_cam_ada, OID_AUTO, retry_count, CTLFLAG_RW,
228            &ada_retry_count, 0, "Normal I/O retry count");
229 TUNABLE_INT("kern.cam.ada.retry_count", &ada_retry_count);
230 SYSCTL_INT(_kern_cam_ada, OID_AUTO, default_timeout, CTLFLAG_RW,
231            &ada_default_timeout, 0, "Normal I/O timeout (in seconds)");
232 TUNABLE_INT("kern.cam.ada.default_timeout", &ada_default_timeout);
233 SYSCTL_INT(_kern_cam_ada, OID_AUTO, ada_send_ordered, CTLFLAG_RW,
234            &ada_send_ordered, 0, "Send Ordered Tags");
235 TUNABLE_INT("kern.cam.ada.ada_send_ordered", &ada_send_ordered);
236 SYSCTL_INT(_kern_cam_ada, OID_AUTO, spindown_shutdown, CTLFLAG_RW,
237            &ada_spindown_shutdown, 0, "Spin down upon shutdown");
238 TUNABLE_INT("kern.cam.ada.spindown_shutdown", &ada_spindown_shutdown);
239 SYSCTL_INT(_kern_cam_ada, OID_AUTO, spindown_suspend, CTLFLAG_RW,
240            &ada_spindown_suspend, 0, "Spin down upon suspend");
241 TUNABLE_INT("kern.cam.ada.spindown_suspend", &ada_spindown_suspend);
242 SYSCTL_INT(_kern_cam_ada, OID_AUTO, write_cache, CTLFLAG_RW,
243            &ada_write_cache, 0, "Enable disk write cache");
244 TUNABLE_INT("kern.cam.ada.write_cache", &ada_write_cache);
245 
246 /*
247  * ADA_ORDEREDTAG_INTERVAL determines how often, relative
248  * to the default timeout, we check to see whether an ordered
249  * tagged transaction is appropriate to prevent simple tag
250  * starvation.  Since we'd like to ensure that there is at least
251  * 1/2 of the timeout length left for a starved transaction to
252  * complete after we've sent an ordered tag, we must poll at least
253  * four times in every timeout period.  This takes care of the worst
254  * case where a starved transaction starts during an interval that
255  * meets the requirement "don't send an ordered tag" test so it takes
256  * us two intervals to determine that a tag must be sent.
257  */
258 #ifndef ADA_ORDEREDTAG_INTERVAL
259 #define ADA_ORDEREDTAG_INTERVAL 4
260 #endif
261 
262 static struct periph_driver adadriver =
263 {
264 	adainit, "ada",
265 	TAILQ_HEAD_INITIALIZER(adadriver.units), /* generation */ 0
266 };
267 
268 PERIPHDRIVER_DECLARE(ada, adadriver);
269 
270 MALLOC_DEFINE(M_ATADA, "ata_da", "ata_da buffers");
271 
272 static int
273 adaopen(struct disk *dp)
274 {
275 	struct cam_periph *periph;
276 	struct ada_softc *softc;
277 	int error;
278 
279 	periph = (struct cam_periph *)dp->d_drv1;
280 	if (periph == NULL) {
281 		return (ENXIO);
282 	}
283 
284 	if (cam_periph_acquire(periph) != CAM_REQ_CMP) {
285 		return(ENXIO);
286 	}
287 
288 	cam_periph_lock(periph);
289 	if ((error = cam_periph_hold(periph, PRIBIO|PCATCH)) != 0) {
290 		cam_periph_unlock(periph);
291 		cam_periph_release(periph);
292 		return (error);
293 	}
294 
295 	softc = (struct ada_softc *)periph->softc;
296 	softc->flags |= ADA_FLAG_OPEN;
297 
298 	CAM_DEBUG(periph->path, CAM_DEBUG_TRACE,
299 	    ("adaopen: disk=%s%d (unit %d)\n", dp->d_name, dp->d_unit,
300 	     periph->unit_number));
301 
302 	if ((softc->flags & ADA_FLAG_PACK_INVALID) != 0) {
303 		/* Invalidate our pack information. */
304 		softc->flags &= ~ADA_FLAG_PACK_INVALID;
305 	}
306 
307 	cam_periph_unhold(periph);
308 	cam_periph_unlock(periph);
309 	return (0);
310 }
311 
312 static int
313 adaclose(struct disk *dp)
314 {
315 	struct	cam_periph *periph;
316 	struct	ada_softc *softc;
317 	union ccb *ccb;
318 	int error;
319 
320 	periph = (struct cam_periph *)dp->d_drv1;
321 	if (periph == NULL)
322 		return (ENXIO);
323 
324 	cam_periph_lock(periph);
325 	if ((error = cam_periph_hold(periph, PRIBIO)) != 0) {
326 		cam_periph_unlock(periph);
327 		cam_periph_release(periph);
328 		return (error);
329 	}
330 
331 	softc = (struct ada_softc *)periph->softc;
332 	/* We only sync the cache if the drive is capable of it. */
333 	if (softc->flags & ADA_FLAG_CAN_FLUSHCACHE) {
334 
335 		ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL);
336 		cam_fill_ataio(&ccb->ataio,
337 				    1,
338 				    adadone,
339 				    CAM_DIR_NONE,
340 				    0,
341 				    NULL,
342 				    0,
343 				    ada_default_timeout*1000);
344 
345 		if (softc->flags & ADA_FLAG_CAN_48BIT)
346 			ata_48bit_cmd(&ccb->ataio, ATA_FLUSHCACHE48, 0, 0, 0);
347 		else
348 			ata_28bit_cmd(&ccb->ataio, ATA_FLUSHCACHE, 0, 0, 0);
349 		cam_periph_runccb(ccb, /*error_routine*/NULL, /*cam_flags*/0,
350 		    /*sense_flags*/0, softc->disk->d_devstat);
351 
352 		if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP)
353 			xpt_print(periph->path, "Synchronize cache failed\n");
354 
355 		if ((ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
356 			cam_release_devq(ccb->ccb_h.path,
357 					 /*relsim_flags*/0,
358 					 /*reduction*/0,
359 					 /*timeout*/0,
360 					 /*getcount_only*/0);
361 		xpt_release_ccb(ccb);
362 	}
363 
364 	softc->flags &= ~ADA_FLAG_OPEN;
365 	cam_periph_unhold(periph);
366 	cam_periph_unlock(periph);
367 	cam_periph_release(periph);
368 	return (0);
369 }
370 
371 static void
372 adaschedule(struct cam_periph *periph)
373 {
374 	struct ada_softc *softc = (struct ada_softc *)periph->softc;
375 
376 	if (bioq_first(&softc->bio_queue) ||
377 	    (!softc->trim_running && bioq_first(&softc->trim_queue))) {
378 		/* Have more work to do, so ensure we stay scheduled */
379 		xpt_schedule(periph, CAM_PRIORITY_NORMAL);
380 	}
381 }
382 
383 /*
384  * Actually translate the requested transfer into one the physical driver
385  * can understand.  The transfer is described by a buf and will include
386  * only one physical transfer.
387  */
388 static void
389 adastrategy(struct bio *bp)
390 {
391 	struct cam_periph *periph;
392 	struct ada_softc *softc;
393 
394 	periph = (struct cam_periph *)bp->bio_disk->d_drv1;
395 	if (periph == NULL) {
396 		biofinish(bp, NULL, ENXIO);
397 		return;
398 	}
399 	softc = (struct ada_softc *)periph->softc;
400 
401 	cam_periph_lock(periph);
402 
403 	/*
404 	 * If the device has been made invalid, error out
405 	 */
406 	if ((softc->flags & ADA_FLAG_PACK_INVALID)) {
407 		cam_periph_unlock(periph);
408 		biofinish(bp, NULL, ENXIO);
409 		return;
410 	}
411 
412 	/*
413 	 * Place it in the queue of disk activities for this disk
414 	 */
415 	if (bp->bio_cmd == BIO_DELETE &&
416 	    (softc->flags & ADA_FLAG_CAN_TRIM))
417 		bioq_disksort(&softc->trim_queue, bp);
418 	else
419 		bioq_disksort(&softc->bio_queue, bp);
420 
421 	/*
422 	 * Schedule ourselves for performing the work.
423 	 */
424 	adaschedule(periph);
425 	cam_periph_unlock(periph);
426 
427 	return;
428 }
429 
430 static int
431 adadump(void *arg, void *virtual, vm_offset_t physical, off_t offset, size_t length)
432 {
433 	struct	    cam_periph *periph;
434 	struct	    ada_softc *softc;
435 	u_int	    secsize;
436 	union	    ccb ccb;
437 	struct	    disk *dp;
438 	uint64_t    lba;
439 	uint16_t    count;
440 
441 	dp = arg;
442 	periph = dp->d_drv1;
443 	if (periph == NULL)
444 		return (ENXIO);
445 	softc = (struct ada_softc *)periph->softc;
446 	cam_periph_lock(periph);
447 	secsize = softc->params.secsize;
448 	lba = offset / secsize;
449 	count = length / secsize;
450 
451 	if ((softc->flags & ADA_FLAG_PACK_INVALID) != 0) {
452 		cam_periph_unlock(periph);
453 		return (ENXIO);
454 	}
455 
456 	if (length > 0) {
457 		xpt_setup_ccb(&ccb.ccb_h, periph->path, CAM_PRIORITY_NORMAL);
458 		ccb.ccb_h.ccb_state = ADA_CCB_DUMP;
459 		cam_fill_ataio(&ccb.ataio,
460 		    0,
461 		    adadone,
462 		    CAM_DIR_OUT,
463 		    0,
464 		    (u_int8_t *) virtual,
465 		    length,
466 		    ada_default_timeout*1000);
467 		if ((softc->flags & ADA_FLAG_CAN_48BIT) &&
468 		    (lba + count >= ATA_MAX_28BIT_LBA ||
469 		    count >= 256)) {
470 			ata_48bit_cmd(&ccb.ataio, ATA_WRITE_DMA48,
471 			    0, lba, count);
472 		} else {
473 			ata_28bit_cmd(&ccb.ataio, ATA_WRITE_DMA,
474 			    0, lba, count);
475 		}
476 		xpt_polled_action(&ccb);
477 
478 		if ((ccb.ataio.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
479 			printf("Aborting dump due to I/O error.\n");
480 			cam_periph_unlock(periph);
481 			return(EIO);
482 		}
483 		cam_periph_unlock(periph);
484 		return(0);
485 	}
486 
487 	if (softc->flags & ADA_FLAG_CAN_FLUSHCACHE) {
488 		xpt_setup_ccb(&ccb.ccb_h, periph->path, CAM_PRIORITY_NORMAL);
489 
490 		ccb.ccb_h.ccb_state = ADA_CCB_DUMP;
491 		cam_fill_ataio(&ccb.ataio,
492 				    1,
493 				    adadone,
494 				    CAM_DIR_NONE,
495 				    0,
496 				    NULL,
497 				    0,
498 				    ada_default_timeout*1000);
499 
500 		if (softc->flags & ADA_FLAG_CAN_48BIT)
501 			ata_48bit_cmd(&ccb.ataio, ATA_FLUSHCACHE48, 0, 0, 0);
502 		else
503 			ata_28bit_cmd(&ccb.ataio, ATA_FLUSHCACHE, 0, 0, 0);
504 		xpt_polled_action(&ccb);
505 
506 		if ((ccb.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP)
507 			xpt_print(periph->path, "Synchronize cache failed\n");
508 
509 		if ((ccb.ccb_h.status & CAM_DEV_QFRZN) != 0)
510 			cam_release_devq(ccb.ccb_h.path,
511 					 /*relsim_flags*/0,
512 					 /*reduction*/0,
513 					 /*timeout*/0,
514 					 /*getcount_only*/0);
515 	}
516 	cam_periph_unlock(periph);
517 	return (0);
518 }
519 
520 static void
521 adainit(void)
522 {
523 	cam_status status;
524 
525 	/*
526 	 * Install a global async callback.  This callback will
527 	 * receive async callbacks like "new device found".
528 	 */
529 	status = xpt_register_async(AC_FOUND_DEVICE, adaasync, NULL, NULL);
530 
531 	if (status != CAM_REQ_CMP) {
532 		printf("ada: Failed to attach master async callback "
533 		       "due to status 0x%x!\n", status);
534 	} else if (ada_send_ordered) {
535 
536 		/* Register our event handlers */
537 		if ((EVENTHANDLER_REGISTER(power_suspend, adasuspend,
538 					   NULL, EVENTHANDLER_PRI_LAST)) == NULL)
539 		    printf("adainit: power event registration failed!\n");
540 		if ((EVENTHANDLER_REGISTER(power_resume, adaresume,
541 					   NULL, EVENTHANDLER_PRI_LAST)) == NULL)
542 		    printf("adainit: power event registration failed!\n");
543 		if ((EVENTHANDLER_REGISTER(shutdown_post_sync, adashutdown,
544 					   NULL, SHUTDOWN_PRI_DEFAULT)) == NULL)
545 		    printf("adainit: shutdown event registration failed!\n");
546 	}
547 }
548 
549 static void
550 adaoninvalidate(struct cam_periph *periph)
551 {
552 	struct ada_softc *softc;
553 
554 	softc = (struct ada_softc *)periph->softc;
555 
556 	/*
557 	 * De-register any async callbacks.
558 	 */
559 	xpt_register_async(0, adaasync, periph, periph->path);
560 
561 	softc->flags |= ADA_FLAG_PACK_INVALID;
562 
563 	/*
564 	 * Return all queued I/O with ENXIO.
565 	 * XXX Handle any transactions queued to the card
566 	 *     with XPT_ABORT_CCB.
567 	 */
568 	bioq_flush(&softc->bio_queue, NULL, ENXIO);
569 	bioq_flush(&softc->trim_queue, NULL, ENXIO);
570 
571 	disk_gone(softc->disk);
572 	xpt_print(periph->path, "lost device\n");
573 }
574 
575 static void
576 adacleanup(struct cam_periph *periph)
577 {
578 	struct ada_softc *softc;
579 
580 	softc = (struct ada_softc *)periph->softc;
581 
582 	xpt_print(periph->path, "removing device entry\n");
583 	cam_periph_unlock(periph);
584 
585 	/*
586 	 * If we can't free the sysctl tree, oh well...
587 	 */
588 	if ((softc->flags & ADA_FLAG_SCTX_INIT) != 0
589 	    && sysctl_ctx_free(&softc->sysctl_ctx) != 0) {
590 		xpt_print(periph->path, "can't remove sysctl context\n");
591 	}
592 
593 	disk_destroy(softc->disk);
594 	callout_drain(&softc->sendordered_c);
595 	free(softc, M_DEVBUF);
596 	cam_periph_lock(periph);
597 }
598 
599 static void
600 adaasync(void *callback_arg, u_int32_t code,
601 	struct cam_path *path, void *arg)
602 {
603 	struct cam_periph *periph;
604 	struct ada_softc *softc;
605 
606 	periph = (struct cam_periph *)callback_arg;
607 	switch (code) {
608 	case AC_FOUND_DEVICE:
609 	{
610 		struct ccb_getdev *cgd;
611 		cam_status status;
612 
613 		cgd = (struct ccb_getdev *)arg;
614 		if (cgd == NULL)
615 			break;
616 
617 		if (cgd->protocol != PROTO_ATA)
618 			break;
619 
620 		/*
621 		 * Allocate a peripheral instance for
622 		 * this device and start the probe
623 		 * process.
624 		 */
625 		status = cam_periph_alloc(adaregister, adaoninvalidate,
626 					  adacleanup, adastart,
627 					  "ada", CAM_PERIPH_BIO,
628 					  cgd->ccb_h.path, adaasync,
629 					  AC_FOUND_DEVICE, cgd);
630 
631 		if (status != CAM_REQ_CMP
632 		 && status != CAM_REQ_INPROG)
633 			printf("adaasync: Unable to attach to new device "
634 				"due to status 0x%x\n", status);
635 		break;
636 	}
637 	case AC_SENT_BDR:
638 	case AC_BUS_RESET:
639 	{
640 		struct ccb_getdev cgd;
641 
642 		softc = (struct ada_softc *)periph->softc;
643 		cam_periph_async(periph, code, path, arg);
644 		if (ada_write_cache < 0 && softc->write_cache < 0)
645 			break;
646 		if (softc->state != ADA_STATE_NORMAL)
647 			break;
648 		xpt_setup_ccb(&cgd.ccb_h, periph->path, CAM_PRIORITY_NORMAL);
649 		cgd.ccb_h.func_code = XPT_GDEV_TYPE;
650 		xpt_action((union ccb *)&cgd);
651 		if ((cgd.ident_data.support.command1 & ATA_SUPPORT_WRITECACHE) == 0)
652 			break;
653 		softc->state = ADA_STATE_WCACHE;
654 		cam_periph_acquire(periph);
655 		cam_freeze_devq_arg(periph->path,
656 		    RELSIM_RELEASE_RUNLEVEL, CAM_RL_DEV + 1);
657 		xpt_schedule(periph, CAM_PRIORITY_DEV);
658 	}
659 	default:
660 		cam_periph_async(periph, code, path, arg);
661 		break;
662 	}
663 }
664 
665 static void
666 adasysctlinit(void *context, int pending)
667 {
668 	struct cam_periph *periph;
669 	struct ada_softc *softc;
670 	char tmpstr[80], tmpstr2[80];
671 
672 	periph = (struct cam_periph *)context;
673 
674 	/* periph was held for us when this task was enqueued */
675 	if (periph->flags & CAM_PERIPH_INVALID) {
676 		cam_periph_release(periph);
677 		return;
678 	}
679 
680 	softc = (struct ada_softc *)periph->softc;
681 	snprintf(tmpstr, sizeof(tmpstr), "CAM ADA unit %d", periph->unit_number);
682 	snprintf(tmpstr2, sizeof(tmpstr2), "%d", periph->unit_number);
683 
684 	sysctl_ctx_init(&softc->sysctl_ctx);
685 	softc->flags |= ADA_FLAG_SCTX_INIT;
686 	softc->sysctl_tree = SYSCTL_ADD_NODE(&softc->sysctl_ctx,
687 		SYSCTL_STATIC_CHILDREN(_kern_cam_ada), OID_AUTO, tmpstr2,
688 		CTLFLAG_RD, 0, tmpstr);
689 	if (softc->sysctl_tree == NULL) {
690 		printf("adasysctlinit: unable to allocate sysctl tree\n");
691 		cam_periph_release(periph);
692 		return;
693 	}
694 
695 	SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
696 		OID_AUTO, "write_cache", CTLFLAG_RW | CTLFLAG_MPSAFE,
697 		&softc->write_cache, 0, "Enable disk write cache.");
698 #ifdef ADA_TEST_FAILURE
699 	/*
700 	 * Add a 'door bell' sysctl which allows one to set it from userland
701 	 * and cause something bad to happen.  For the moment, we only allow
702 	 * whacking the next read or write.
703 	 */
704 	SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
705 		OID_AUTO, "force_read_error", CTLFLAG_RW | CTLFLAG_MPSAFE,
706 		&softc->force_read_error, 0,
707 		"Force a read error for the next N reads.");
708 	SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
709 		OID_AUTO, "force_write_error", CTLFLAG_RW | CTLFLAG_MPSAFE,
710 		&softc->force_write_error, 0,
711 		"Force a write error for the next N writes.");
712 	SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
713 		OID_AUTO, "periodic_read_error", CTLFLAG_RW | CTLFLAG_MPSAFE,
714 		&softc->periodic_read_error, 0,
715 		"Force a read error every N reads (don't set too low).");
716 #endif
717 	cam_periph_release(periph);
718 }
719 
720 static cam_status
721 adaregister(struct cam_periph *periph, void *arg)
722 {
723 	struct ada_softc *softc;
724 	struct ccb_pathinq cpi;
725 	struct ccb_getdev *cgd;
726 	char   announce_buf[80];
727 	struct disk_params *dp;
728 	caddr_t match;
729 	u_int maxio;
730 
731 	cgd = (struct ccb_getdev *)arg;
732 	if (periph == NULL) {
733 		printf("adaregister: periph was NULL!!\n");
734 		return(CAM_REQ_CMP_ERR);
735 	}
736 
737 	if (cgd == NULL) {
738 		printf("adaregister: no getdev CCB, can't register device\n");
739 		return(CAM_REQ_CMP_ERR);
740 	}
741 
742 	softc = (struct ada_softc *)malloc(sizeof(*softc), M_DEVBUF,
743 	    M_NOWAIT|M_ZERO);
744 
745 	if (softc == NULL) {
746 		printf("adaregister: Unable to probe new device. "
747 		    "Unable to allocate softc\n");
748 		return(CAM_REQ_CMP_ERR);
749 	}
750 
751 	bioq_init(&softc->bio_queue);
752 	bioq_init(&softc->trim_queue);
753 
754 	if (cgd->ident_data.capabilities1 & ATA_SUPPORT_DMA &&
755 	    (cgd->inq_flags & SID_DMA))
756 		softc->flags |= ADA_FLAG_CAN_DMA;
757 	if (cgd->ident_data.support.command2 & ATA_SUPPORT_ADDRESS48)
758 		softc->flags |= ADA_FLAG_CAN_48BIT;
759 	if (cgd->ident_data.support.command2 & ATA_SUPPORT_FLUSHCACHE)
760 		softc->flags |= ADA_FLAG_CAN_FLUSHCACHE;
761 	if (cgd->ident_data.support.command1 & ATA_SUPPORT_POWERMGT)
762 		softc->flags |= ADA_FLAG_CAN_POWERMGT;
763 	if (cgd->ident_data.satacapabilities & ATA_SUPPORT_NCQ &&
764 	    (cgd->inq_flags & SID_DMA) && (cgd->inq_flags & SID_CmdQue))
765 		softc->flags |= ADA_FLAG_CAN_NCQ;
766 	if (cgd->ident_data.support_dsm & ATA_SUPPORT_DSM_TRIM) {
767 		softc->flags |= ADA_FLAG_CAN_TRIM;
768 		softc->trim_max_ranges = TRIM_MAX_RANGES;
769 		if (cgd->ident_data.max_dsm_blocks != 0) {
770 			softc->trim_max_ranges =
771 			    min(cgd->ident_data.max_dsm_blocks * 64,
772 				softc->trim_max_ranges);
773 		}
774 	}
775 	if (cgd->ident_data.support.command2 & ATA_SUPPORT_CFA)
776 		softc->flags |= ADA_FLAG_CAN_CFA;
777 
778 	periph->softc = softc;
779 
780 	/*
781 	 * See if this device has any quirks.
782 	 */
783 	match = cam_quirkmatch((caddr_t)&cgd->ident_data,
784 			       (caddr_t)ada_quirk_table,
785 			       sizeof(ada_quirk_table)/sizeof(*ada_quirk_table),
786 			       sizeof(*ada_quirk_table), ata_identify_match);
787 	if (match != NULL)
788 		softc->quirks = ((struct ada_quirk_entry *)match)->quirks;
789 	else
790 		softc->quirks = ADA_Q_NONE;
791 
792 	bzero(&cpi, sizeof(cpi));
793 	xpt_setup_ccb(&cpi.ccb_h, periph->path, CAM_PRIORITY_NONE);
794 	cpi.ccb_h.func_code = XPT_PATH_INQ;
795 	xpt_action((union ccb *)&cpi);
796 
797 	TASK_INIT(&softc->sysctl_task, 0, adasysctlinit, periph);
798 
799 	/*
800 	 * Register this media as a disk
801 	 */
802 	(void)cam_periph_hold(periph, PRIBIO);
803 	mtx_unlock(periph->sim->mtx);
804 	softc->write_cache = -1;
805 	snprintf(announce_buf, sizeof(announce_buf),
806 	    "kern.cam.ada.%d.write_cache", periph->unit_number);
807 	TUNABLE_INT_FETCH(announce_buf, &softc->write_cache);
808 	adagetparams(periph, cgd);
809 	softc->disk = disk_alloc();
810 	softc->disk->d_devstat = devstat_new_entry(periph->periph_name,
811 			  periph->unit_number, softc->params.secsize,
812 			  DEVSTAT_ALL_SUPPORTED,
813 			  DEVSTAT_TYPE_DIRECT |
814 			  XPORT_DEVSTAT_TYPE(cpi.transport),
815 			  DEVSTAT_PRIORITY_DISK);
816 	softc->disk->d_open = adaopen;
817 	softc->disk->d_close = adaclose;
818 	softc->disk->d_strategy = adastrategy;
819 	softc->disk->d_dump = adadump;
820 	softc->disk->d_name = "ada";
821 	softc->disk->d_drv1 = periph;
822 	maxio = cpi.maxio;		/* Honor max I/O size of SIM */
823 	if (maxio == 0)
824 		maxio = DFLTPHYS;	/* traditional default */
825 	else if (maxio > MAXPHYS)
826 		maxio = MAXPHYS;	/* for safety */
827 	if (softc->flags & ADA_FLAG_CAN_48BIT)
828 		maxio = min(maxio, 65536 * softc->params.secsize);
829 	else					/* 28bit ATA command limit */
830 		maxio = min(maxio, 256 * softc->params.secsize);
831 	softc->disk->d_maxsize = maxio;
832 	softc->disk->d_unit = periph->unit_number;
833 	softc->disk->d_flags = 0;
834 	if (softc->flags & ADA_FLAG_CAN_FLUSHCACHE)
835 		softc->disk->d_flags |= DISKFLAG_CANFLUSHCACHE;
836 	if ((softc->flags & ADA_FLAG_CAN_TRIM) ||
837 	    ((softc->flags & ADA_FLAG_CAN_CFA) &&
838 	    !(softc->flags & ADA_FLAG_CAN_48BIT)))
839 		softc->disk->d_flags |= DISKFLAG_CANDELETE;
840 	strlcpy(softc->disk->d_ident, cgd->serial_num,
841 	    MIN(sizeof(softc->disk->d_ident), cgd->serial_num_len + 1));
842 	strlcpy(softc->disk->d_descr, cgd->ident_data.model,
843 	    MIN(sizeof(softc->disk->d_descr), sizeof(cgd->ident_data.model)));
844 	softc->disk->d_hba_vendor = cpi.hba_vendor;
845 	softc->disk->d_hba_device = cpi.hba_device;
846 	softc->disk->d_hba_subvendor = cpi.hba_subvendor;
847 	softc->disk->d_hba_subdevice = cpi.hba_subdevice;
848 
849 	softc->disk->d_sectorsize = softc->params.secsize;
850 	softc->disk->d_mediasize = (off_t)softc->params.sectors *
851 	    softc->params.secsize;
852 	if (ata_physical_sector_size(&cgd->ident_data) !=
853 	    softc->params.secsize) {
854 		softc->disk->d_stripesize =
855 		    ata_physical_sector_size(&cgd->ident_data);
856 		softc->disk->d_stripeoffset = (softc->disk->d_stripesize -
857 		    ata_logical_sector_offset(&cgd->ident_data)) %
858 		    softc->disk->d_stripesize;
859 	}
860 	softc->disk->d_fwsectors = softc->params.secs_per_track;
861 	softc->disk->d_fwheads = softc->params.heads;
862 	ata_disk_firmware_geom_adjust(softc->disk);
863 
864 	disk_create(softc->disk, DISK_VERSION);
865 	mtx_lock(periph->sim->mtx);
866 	cam_periph_unhold(periph);
867 
868 	dp = &softc->params;
869 	snprintf(announce_buf, sizeof(announce_buf),
870 		"%juMB (%ju %u byte sectors: %dH %dS/T %dC)",
871 		(uintmax_t)(((uintmax_t)dp->secsize *
872 		dp->sectors) / (1024*1024)),
873 		(uintmax_t)dp->sectors,
874 		dp->secsize, dp->heads,
875 		dp->secs_per_track, dp->cylinders);
876 	xpt_announce_periph(periph, announce_buf);
877 
878 	/*
879 	 * Create our sysctl variables, now that we know
880 	 * we have successfully attached.
881 	 */
882 	cam_periph_acquire(periph);
883 	taskqueue_enqueue(taskqueue_thread, &softc->sysctl_task);
884 
885 	/*
886 	 * Add async callbacks for bus reset and
887 	 * bus device reset calls.  I don't bother
888 	 * checking if this fails as, in most cases,
889 	 * the system will function just fine without
890 	 * them and the only alternative would be to
891 	 * not attach the device on failure.
892 	 */
893 	xpt_register_async(AC_SENT_BDR | AC_BUS_RESET | AC_LOST_DEVICE,
894 			   adaasync, periph, periph->path);
895 
896 	/*
897 	 * Schedule a periodic event to occasionally send an
898 	 * ordered tag to a device.
899 	 */
900 	callout_init_mtx(&softc->sendordered_c, periph->sim->mtx, 0);
901 	callout_reset(&softc->sendordered_c,
902 	    (ADA_DEFAULT_TIMEOUT * hz) / ADA_ORDEREDTAG_INTERVAL,
903 	    adasendorderedtag, softc);
904 
905 	if ((ada_write_cache >= 0 || softc->write_cache >= 0) &&
906 	    cgd->ident_data.support.command1 & ATA_SUPPORT_WRITECACHE) {
907 		softc->state = ADA_STATE_WCACHE;
908 		cam_periph_acquire(periph);
909 		cam_freeze_devq_arg(periph->path,
910 		    RELSIM_RELEASE_RUNLEVEL, CAM_RL_DEV + 1);
911 		xpt_schedule(periph, CAM_PRIORITY_DEV);
912 	} else
913 		softc->state = ADA_STATE_NORMAL;
914 
915 	return(CAM_REQ_CMP);
916 }
917 
918 static void
919 adastart(struct cam_periph *periph, union ccb *start_ccb)
920 {
921 	struct ada_softc *softc = (struct ada_softc *)periph->softc;
922 	struct ccb_ataio *ataio = &start_ccb->ataio;
923 
924 	switch (softc->state) {
925 	case ADA_STATE_NORMAL:
926 	{
927 		struct bio *bp;
928 		u_int8_t tag_code;
929 
930 		/* Execute immediate CCB if waiting. */
931 		if (periph->immediate_priority <= periph->pinfo.priority) {
932 			CAM_DEBUG_PRINT(CAM_DEBUG_SUBTRACE,
933 					("queuing for immediate ccb\n"));
934 			start_ccb->ccb_h.ccb_state = ADA_CCB_WAITING;
935 			SLIST_INSERT_HEAD(&periph->ccb_list, &start_ccb->ccb_h,
936 					  periph_links.sle);
937 			periph->immediate_priority = CAM_PRIORITY_NONE;
938 			wakeup(&periph->ccb_list);
939 			/* Have more work to do, so ensure we stay scheduled */
940 			adaschedule(periph);
941 			break;
942 		}
943 		/* Run TRIM if not running yet. */
944 		if (!softc->trim_running &&
945 		    (bp = bioq_first(&softc->trim_queue)) != 0) {
946 			struct trim_request *req = &softc->trim_req;
947 			struct bio *bp1;
948 			int bps = 0, ranges = 0;
949 
950 			softc->trim_running = 1;
951 			bzero(req, sizeof(*req));
952 			bp1 = bp;
953 			do {
954 				uint64_t lba = bp1->bio_pblkno;
955 				int count = bp1->bio_bcount /
956 				    softc->params.secsize;
957 
958 				bioq_remove(&softc->trim_queue, bp1);
959 				while (count > 0) {
960 					int c = min(count, 0xffff);
961 					int off = ranges * 8;
962 
963 					req->data[off + 0] = lba & 0xff;
964 					req->data[off + 1] = (lba >> 8) & 0xff;
965 					req->data[off + 2] = (lba >> 16) & 0xff;
966 					req->data[off + 3] = (lba >> 24) & 0xff;
967 					req->data[off + 4] = (lba >> 32) & 0xff;
968 					req->data[off + 5] = (lba >> 40) & 0xff;
969 					req->data[off + 6] = c & 0xff;
970 					req->data[off + 7] = (c >> 8) & 0xff;
971 					lba += c;
972 					count -= c;
973 					ranges++;
974 				}
975 				req->bps[bps++] = bp1;
976 				bp1 = bioq_first(&softc->trim_queue);
977 				if (bp1 == NULL ||
978 				    bp1->bio_bcount / softc->params.secsize >
979 				    (softc->trim_max_ranges - ranges) * 0xffff)
980 					break;
981 			} while (1);
982 			cam_fill_ataio(ataio,
983 			    ada_retry_count,
984 			    adadone,
985 			    CAM_DIR_OUT,
986 			    0,
987 			    req->data,
988 			    ((ranges + 63) / 64) * 512,
989 			    ada_default_timeout * 1000);
990 			ata_48bit_cmd(ataio, ATA_DATA_SET_MANAGEMENT,
991 			    ATA_DSM_TRIM, 0, (ranges + 63) / 64);
992 			start_ccb->ccb_h.ccb_state = ADA_CCB_TRIM;
993 			goto out;
994 		}
995 		/* Run regular command. */
996 		bp = bioq_first(&softc->bio_queue);
997 		if (bp == NULL) {
998 			xpt_release_ccb(start_ccb);
999 			break;
1000 		}
1001 		bioq_remove(&softc->bio_queue, bp);
1002 
1003 		if ((bp->bio_flags & BIO_ORDERED) != 0
1004 		 || (softc->flags & ADA_FLAG_NEED_OTAG) != 0) {
1005 			softc->flags &= ~ADA_FLAG_NEED_OTAG;
1006 			softc->ordered_tag_count++;
1007 			tag_code = 0;
1008 		} else {
1009 			tag_code = 1;
1010 		}
1011 		switch (bp->bio_cmd) {
1012 		case BIO_READ:
1013 		case BIO_WRITE:
1014 		{
1015 			uint64_t lba = bp->bio_pblkno;
1016 			uint16_t count = bp->bio_bcount / softc->params.secsize;
1017 #ifdef ADA_TEST_FAILURE
1018 			int fail = 0;
1019 
1020 			/*
1021 			 * Support the failure ioctls.  If the command is a
1022 			 * read, and there are pending forced read errors, or
1023 			 * if a write and pending write errors, then fail this
1024 			 * operation with EIO.  This is useful for testing
1025 			 * purposes.  Also, support having every Nth read fail.
1026 			 *
1027 			 * This is a rather blunt tool.
1028 			 */
1029 			if (bp->bio_cmd == BIO_READ) {
1030 				if (softc->force_read_error) {
1031 					softc->force_read_error--;
1032 					fail = 1;
1033 				}
1034 				if (softc->periodic_read_error > 0) {
1035 					if (++softc->periodic_read_count >=
1036 					    softc->periodic_read_error) {
1037 						softc->periodic_read_count = 0;
1038 						fail = 1;
1039 					}
1040 				}
1041 			} else {
1042 				if (softc->force_write_error) {
1043 					softc->force_write_error--;
1044 					fail = 1;
1045 				}
1046 			}
1047 			if (fail) {
1048 				bp->bio_error = EIO;
1049 				bp->bio_flags |= BIO_ERROR;
1050 				biodone(bp);
1051 				xpt_release_ccb(start_ccb);
1052 				adaschedule(periph);
1053 				return;
1054 			}
1055 #endif
1056 			cam_fill_ataio(ataio,
1057 			    ada_retry_count,
1058 			    adadone,
1059 			    bp->bio_cmd == BIO_READ ?
1060 			        CAM_DIR_IN : CAM_DIR_OUT,
1061 			    tag_code,
1062 			    bp->bio_data,
1063 			    bp->bio_bcount,
1064 			    ada_default_timeout*1000);
1065 
1066 			if ((softc->flags & ADA_FLAG_CAN_NCQ) && tag_code) {
1067 				if (bp->bio_cmd == BIO_READ) {
1068 					ata_ncq_cmd(ataio, ATA_READ_FPDMA_QUEUED,
1069 					    lba, count);
1070 				} else {
1071 					ata_ncq_cmd(ataio, ATA_WRITE_FPDMA_QUEUED,
1072 					    lba, count);
1073 				}
1074 			} else if ((softc->flags & ADA_FLAG_CAN_48BIT) &&
1075 			    (lba + count >= ATA_MAX_28BIT_LBA ||
1076 			    count > 256)) {
1077 				if (softc->flags & ADA_FLAG_CAN_DMA) {
1078 					if (bp->bio_cmd == BIO_READ) {
1079 						ata_48bit_cmd(ataio, ATA_READ_DMA48,
1080 						    0, lba, count);
1081 					} else {
1082 						ata_48bit_cmd(ataio, ATA_WRITE_DMA48,
1083 						    0, lba, count);
1084 					}
1085 				} else {
1086 					if (bp->bio_cmd == BIO_READ) {
1087 						ata_48bit_cmd(ataio, ATA_READ_MUL48,
1088 						    0, lba, count);
1089 					} else {
1090 						ata_48bit_cmd(ataio, ATA_WRITE_MUL48,
1091 						    0, lba, count);
1092 					}
1093 				}
1094 			} else {
1095 				if (count == 256)
1096 					count = 0;
1097 				if (softc->flags & ADA_FLAG_CAN_DMA) {
1098 					if (bp->bio_cmd == BIO_READ) {
1099 						ata_28bit_cmd(ataio, ATA_READ_DMA,
1100 						    0, lba, count);
1101 					} else {
1102 						ata_28bit_cmd(ataio, ATA_WRITE_DMA,
1103 						    0, lba, count);
1104 					}
1105 				} else {
1106 					if (bp->bio_cmd == BIO_READ) {
1107 						ata_28bit_cmd(ataio, ATA_READ_MUL,
1108 						    0, lba, count);
1109 					} else {
1110 						ata_28bit_cmd(ataio, ATA_WRITE_MUL,
1111 						    0, lba, count);
1112 					}
1113 				}
1114 			}
1115 			break;
1116 		}
1117 		case BIO_DELETE:
1118 		{
1119 			uint64_t lba = bp->bio_pblkno;
1120 			uint16_t count = bp->bio_bcount / softc->params.secsize;
1121 
1122 			cam_fill_ataio(ataio,
1123 			    ada_retry_count,
1124 			    adadone,
1125 			    CAM_DIR_NONE,
1126 			    0,
1127 			    NULL,
1128 			    0,
1129 			    ada_default_timeout*1000);
1130 
1131 			if (count >= 256)
1132 				count = 0;
1133 			ata_28bit_cmd(ataio, ATA_CFA_ERASE, 0, lba, count);
1134 			break;
1135 		}
1136 		case BIO_FLUSH:
1137 			cam_fill_ataio(ataio,
1138 			    1,
1139 			    adadone,
1140 			    CAM_DIR_NONE,
1141 			    0,
1142 			    NULL,
1143 			    0,
1144 			    ada_default_timeout*1000);
1145 
1146 			if (softc->flags & ADA_FLAG_CAN_48BIT)
1147 				ata_48bit_cmd(ataio, ATA_FLUSHCACHE48, 0, 0, 0);
1148 			else
1149 				ata_28bit_cmd(ataio, ATA_FLUSHCACHE, 0, 0, 0);
1150 			break;
1151 		}
1152 		start_ccb->ccb_h.ccb_state = ADA_CCB_BUFFER_IO;
1153 out:
1154 		start_ccb->ccb_h.ccb_bp = bp;
1155 		softc->outstanding_cmds++;
1156 		xpt_action(start_ccb);
1157 
1158 		/* May have more work to do, so ensure we stay scheduled */
1159 		adaschedule(periph);
1160 		break;
1161 	}
1162 	case ADA_STATE_WCACHE:
1163 	{
1164 		cam_fill_ataio(ataio,
1165 		    1,
1166 		    adadone,
1167 		    CAM_DIR_NONE,
1168 		    0,
1169 		    NULL,
1170 		    0,
1171 		    ada_default_timeout*1000);
1172 
1173 		ata_28bit_cmd(ataio, ATA_SETFEATURES, (softc->write_cache > 0 ||
1174 		     (softc->write_cache < 0 && ada_write_cache)) ?
1175 		    ATA_SF_ENAB_WCACHE : ATA_SF_DIS_WCACHE, 0, 0);
1176 		start_ccb->ccb_h.ccb_state = ADA_CCB_WCACHE;
1177 		xpt_action(start_ccb);
1178 		break;
1179 	}
1180 	}
1181 }
1182 
1183 static void
1184 adadone(struct cam_periph *periph, union ccb *done_ccb)
1185 {
1186 	struct ada_softc *softc;
1187 	struct ccb_ataio *ataio;
1188 
1189 	softc = (struct ada_softc *)periph->softc;
1190 	ataio = &done_ccb->ataio;
1191 	switch (ataio->ccb_h.ccb_state & ADA_CCB_TYPE_MASK) {
1192 	case ADA_CCB_BUFFER_IO:
1193 	case ADA_CCB_TRIM:
1194 	{
1195 		struct bio *bp;
1196 
1197 		bp = (struct bio *)done_ccb->ccb_h.ccb_bp;
1198 		if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
1199 			int error;
1200 
1201 			error = adaerror(done_ccb, 0, 0);
1202 			if (error == ERESTART) {
1203 				/* A retry was scheduled, so just return. */
1204 				return;
1205 			}
1206 			if (error != 0) {
1207 				if (error == ENXIO) {
1208 					/*
1209 					 * Catastrophic error.  Mark our pack as
1210 					 * invalid.
1211 					 */
1212 					/*
1213 					 * XXX See if this is really a media
1214 					 * XXX change first?
1215 					 */
1216 					xpt_print(periph->path,
1217 					    "Invalidating pack\n");
1218 					softc->flags |= ADA_FLAG_PACK_INVALID;
1219 				}
1220 				bp->bio_error = error;
1221 				bp->bio_resid = bp->bio_bcount;
1222 				bp->bio_flags |= BIO_ERROR;
1223 			} else {
1224 				bp->bio_resid = ataio->resid;
1225 				bp->bio_error = 0;
1226 				if (bp->bio_resid != 0)
1227 					bp->bio_flags |= BIO_ERROR;
1228 			}
1229 			if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
1230 				cam_release_devq(done_ccb->ccb_h.path,
1231 						 /*relsim_flags*/0,
1232 						 /*reduction*/0,
1233 						 /*timeout*/0,
1234 						 /*getcount_only*/0);
1235 		} else {
1236 			if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
1237 				panic("REQ_CMP with QFRZN");
1238 			bp->bio_resid = ataio->resid;
1239 			if (ataio->resid > 0)
1240 				bp->bio_flags |= BIO_ERROR;
1241 		}
1242 		softc->outstanding_cmds--;
1243 		if (softc->outstanding_cmds == 0)
1244 			softc->flags |= ADA_FLAG_WENT_IDLE;
1245 		if ((ataio->ccb_h.ccb_state & ADA_CCB_TYPE_MASK) ==
1246 		    ADA_CCB_TRIM) {
1247 			struct trim_request *req =
1248 			    (struct trim_request *)ataio->data_ptr;
1249 			int i;
1250 
1251 			for (i = 1; i < softc->trim_max_ranges &&
1252 			    req->bps[i]; i++) {
1253 				struct bio *bp1 = req->bps[i];
1254 
1255 				bp1->bio_resid = bp->bio_resid;
1256 				bp1->bio_error = bp->bio_error;
1257 				if (bp->bio_flags & BIO_ERROR)
1258 					bp1->bio_flags |= BIO_ERROR;
1259 				biodone(bp1);
1260 			}
1261 			softc->trim_running = 0;
1262 			biodone(bp);
1263 			adaschedule(periph);
1264 		} else
1265 			biodone(bp);
1266 		break;
1267 	}
1268 	case ADA_CCB_WCACHE:
1269 	{
1270 		if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
1271 			if (adaerror(done_ccb, 0, 0) == ERESTART) {
1272 				return;
1273 			} else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
1274 				cam_release_devq(done_ccb->ccb_h.path,
1275 				    /*relsim_flags*/0,
1276 				    /*reduction*/0,
1277 				    /*timeout*/0,
1278 				    /*getcount_only*/0);
1279 			}
1280 		}
1281 
1282 		softc->state = ADA_STATE_NORMAL;
1283 		/*
1284 		 * Since our peripheral may be invalidated by an error
1285 		 * above or an external event, we must release our CCB
1286 		 * before releasing the reference on the peripheral.
1287 		 * The peripheral will only go away once the last reference
1288 		 * is removed, and we need it around for the CCB release
1289 		 * operation.
1290 		 */
1291 		xpt_release_ccb(done_ccb);
1292 		cam_release_devq(periph->path,
1293 		    RELSIM_RELEASE_RUNLEVEL, 0, CAM_RL_DEV + 1, FALSE);
1294 		adaschedule(periph);
1295 		cam_periph_release_locked(periph);
1296 		return;
1297 	}
1298 	case ADA_CCB_WAITING:
1299 	{
1300 		/* Caller will release the CCB */
1301 		wakeup(&done_ccb->ccb_h.cbfcnp);
1302 		return;
1303 	}
1304 	case ADA_CCB_DUMP:
1305 		/* No-op.  We're polling */
1306 		return;
1307 	default:
1308 		break;
1309 	}
1310 	xpt_release_ccb(done_ccb);
1311 }
1312 
1313 static int
1314 adaerror(union ccb *ccb, u_int32_t cam_flags, u_int32_t sense_flags)
1315 {
1316 
1317 	return(cam_periph_error(ccb, cam_flags, sense_flags, NULL));
1318 }
1319 
1320 static void
1321 adagetparams(struct cam_periph *periph, struct ccb_getdev *cgd)
1322 {
1323 	struct ada_softc *softc = (struct ada_softc *)periph->softc;
1324 	struct disk_params *dp = &softc->params;
1325 	u_int64_t lbasize48;
1326 	u_int32_t lbasize;
1327 
1328 	dp->secsize = ata_logical_sector_size(&cgd->ident_data);
1329 	if ((cgd->ident_data.atavalid & ATA_FLAG_54_58) &&
1330 		cgd->ident_data.current_heads && cgd->ident_data.current_sectors) {
1331 		dp->heads = cgd->ident_data.current_heads;
1332 		dp->secs_per_track = cgd->ident_data.current_sectors;
1333 		dp->cylinders = cgd->ident_data.cylinders;
1334 		dp->sectors = (u_int32_t)cgd->ident_data.current_size_1 |
1335 			  ((u_int32_t)cgd->ident_data.current_size_2 << 16);
1336 	} else {
1337 		dp->heads = cgd->ident_data.heads;
1338 		dp->secs_per_track = cgd->ident_data.sectors;
1339 		dp->cylinders = cgd->ident_data.cylinders;
1340 		dp->sectors = cgd->ident_data.cylinders * dp->heads * dp->secs_per_track;
1341 	}
1342 	lbasize = (u_int32_t)cgd->ident_data.lba_size_1 |
1343 		  ((u_int32_t)cgd->ident_data.lba_size_2 << 16);
1344 
1345 	/* use the 28bit LBA size if valid or bigger than the CHS mapping */
1346 	if (cgd->ident_data.cylinders == 16383 || dp->sectors < lbasize)
1347 		dp->sectors = lbasize;
1348 
1349 	/* use the 48bit LBA size if valid */
1350 	lbasize48 = ((u_int64_t)cgd->ident_data.lba_size48_1) |
1351 		    ((u_int64_t)cgd->ident_data.lba_size48_2 << 16) |
1352 		    ((u_int64_t)cgd->ident_data.lba_size48_3 << 32) |
1353 		    ((u_int64_t)cgd->ident_data.lba_size48_4 << 48);
1354 	if ((cgd->ident_data.support.command2 & ATA_SUPPORT_ADDRESS48) &&
1355 	    lbasize48 > ATA_MAX_28BIT_LBA)
1356 		dp->sectors = lbasize48;
1357 }
1358 
1359 static void
1360 adasendorderedtag(void *arg)
1361 {
1362 	struct ada_softc *softc = arg;
1363 
1364 	if (ada_send_ordered) {
1365 		if ((softc->ordered_tag_count == 0)
1366 		 && ((softc->flags & ADA_FLAG_WENT_IDLE) == 0)) {
1367 			softc->flags |= ADA_FLAG_NEED_OTAG;
1368 		}
1369 		if (softc->outstanding_cmds > 0)
1370 			softc->flags &= ~ADA_FLAG_WENT_IDLE;
1371 
1372 		softc->ordered_tag_count = 0;
1373 	}
1374 	/* Queue us up again */
1375 	callout_reset(&softc->sendordered_c,
1376 	    (ADA_DEFAULT_TIMEOUT * hz) / ADA_ORDEREDTAG_INTERVAL,
1377 	    adasendorderedtag, softc);
1378 }
1379 
1380 /*
1381  * Step through all ADA peripheral drivers, and if the device is still open,
1382  * sync the disk cache to physical media.
1383  */
1384 static void
1385 adaflush(void)
1386 {
1387 	struct cam_periph *periph;
1388 	struct ada_softc *softc;
1389 
1390 	TAILQ_FOREACH(periph, &adadriver.units, unit_links) {
1391 		union ccb ccb;
1392 
1393 		/* If we paniced with lock held - not recurse here. */
1394 		if (cam_periph_owned(periph))
1395 			continue;
1396 		cam_periph_lock(periph);
1397 		softc = (struct ada_softc *)periph->softc;
1398 		/*
1399 		 * We only sync the cache if the drive is still open, and
1400 		 * if the drive is capable of it..
1401 		 */
1402 		if (((softc->flags & ADA_FLAG_OPEN) == 0) ||
1403 		    (softc->flags & ADA_FLAG_CAN_FLUSHCACHE) == 0) {
1404 			cam_periph_unlock(periph);
1405 			continue;
1406 		}
1407 
1408 		xpt_setup_ccb(&ccb.ccb_h, periph->path, CAM_PRIORITY_NORMAL);
1409 
1410 		ccb.ccb_h.ccb_state = ADA_CCB_DUMP;
1411 		cam_fill_ataio(&ccb.ataio,
1412 				    1,
1413 				    adadone,
1414 				    CAM_DIR_NONE,
1415 				    0,
1416 				    NULL,
1417 				    0,
1418 				    ada_default_timeout*1000);
1419 
1420 		if (softc->flags & ADA_FLAG_CAN_48BIT)
1421 			ata_48bit_cmd(&ccb.ataio, ATA_FLUSHCACHE48, 0, 0, 0);
1422 		else
1423 			ata_28bit_cmd(&ccb.ataio, ATA_FLUSHCACHE, 0, 0, 0);
1424 		xpt_polled_action(&ccb);
1425 
1426 		if ((ccb.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP)
1427 			xpt_print(periph->path, "Synchronize cache failed\n");
1428 
1429 		if ((ccb.ccb_h.status & CAM_DEV_QFRZN) != 0)
1430 			cam_release_devq(ccb.ccb_h.path,
1431 					 /*relsim_flags*/0,
1432 					 /*reduction*/0,
1433 					 /*timeout*/0,
1434 					 /*getcount_only*/0);
1435 		cam_periph_unlock(periph);
1436 	}
1437 }
1438 
1439 static void
1440 adaspindown(uint8_t cmd, int flags)
1441 {
1442 	struct cam_periph *periph;
1443 	struct ada_softc *softc;
1444 
1445 	TAILQ_FOREACH(periph, &adadriver.units, unit_links) {
1446 		union ccb ccb;
1447 
1448 		/* If we paniced with lock held - not recurse here. */
1449 		if (cam_periph_owned(periph))
1450 			continue;
1451 		cam_periph_lock(periph);
1452 		softc = (struct ada_softc *)periph->softc;
1453 		/*
1454 		 * We only spin-down the drive if it is capable of it..
1455 		 */
1456 		if ((softc->flags & ADA_FLAG_CAN_POWERMGT) == 0) {
1457 			cam_periph_unlock(periph);
1458 			continue;
1459 		}
1460 
1461 		if (bootverbose)
1462 			xpt_print(periph->path, "spin-down\n");
1463 
1464 		xpt_setup_ccb(&ccb.ccb_h, periph->path, CAM_PRIORITY_NORMAL);
1465 
1466 		ccb.ccb_h.ccb_state = ADA_CCB_DUMP;
1467 		cam_fill_ataio(&ccb.ataio,
1468 				    1,
1469 				    adadone,
1470 				    CAM_DIR_NONE | flags,
1471 				    0,
1472 				    NULL,
1473 				    0,
1474 				    ada_default_timeout*1000);
1475 
1476 		ata_28bit_cmd(&ccb.ataio, cmd, 0, 0, 0);
1477 		xpt_polled_action(&ccb);
1478 
1479 		if ((ccb.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP)
1480 			xpt_print(periph->path, "Spin-down disk failed\n");
1481 
1482 		if ((ccb.ccb_h.status & CAM_DEV_QFRZN) != 0)
1483 			cam_release_devq(ccb.ccb_h.path,
1484 					 /*relsim_flags*/0,
1485 					 /*reduction*/0,
1486 					 /*timeout*/0,
1487 					 /*getcount_only*/0);
1488 		cam_periph_unlock(periph);
1489 	}
1490 }
1491 
1492 static void
1493 adashutdown(void *arg, int howto)
1494 {
1495 
1496 	adaflush();
1497 	if (ada_spindown_shutdown != 0 &&
1498 	    (howto & (RB_HALT | RB_POWEROFF)) != 0)
1499 		adaspindown(ATA_STANDBY_IMMEDIATE, 0);
1500 }
1501 
1502 static void
1503 adasuspend(void *arg)
1504 {
1505 
1506 	adaflush();
1507 	if (ada_spindown_suspend != 0)
1508 		adaspindown(ATA_SLEEP, CAM_DEV_QFREEZE);
1509 }
1510 
1511 static void
1512 adaresume(void *arg)
1513 {
1514 	struct cam_periph *periph;
1515 	struct ada_softc *softc;
1516 
1517 	if (ada_spindown_suspend == 0)
1518 		return;
1519 
1520 	TAILQ_FOREACH(periph, &adadriver.units, unit_links) {
1521 		cam_periph_lock(periph);
1522 		softc = (struct ada_softc *)periph->softc;
1523 		/*
1524 		 * We only spin-down the drive if it is capable of it..
1525 		 */
1526 		if ((softc->flags & ADA_FLAG_CAN_POWERMGT) == 0) {
1527 			cam_periph_unlock(periph);
1528 			continue;
1529 		}
1530 
1531 		if (bootverbose)
1532 			xpt_print(periph->path, "resume\n");
1533 
1534 		/*
1535 		 * Drop freeze taken due to CAM_DEV_QFREEZE flag set on
1536 		 * sleep request.
1537 		 */
1538 		cam_release_devq(periph->path,
1539 			 /*relsim_flags*/0,
1540 			 /*openings*/0,
1541 			 /*timeout*/0,
1542 			 /*getcount_only*/0);
1543 
1544 		cam_periph_unlock(periph);
1545 	}
1546 }
1547 
1548 #endif /* _KERNEL */
1549