xref: /freebsd/sys/cam/ata/ata_da.c (revision 891b8ed4672a213bbe6f3f10522eeadb34d01b76)
1 /*-
2  * Copyright (c) 2009 Alexander Motin <mav@FreeBSD.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer,
10  *    without modification, immediately at the beginning of the file.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  */
26 
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
29 
30 #include "opt_ada.h"
31 
32 #include <sys/param.h>
33 
34 #ifdef _KERNEL
35 #include <sys/systm.h>
36 #include <sys/kernel.h>
37 #include <sys/bio.h>
38 #include <sys/sysctl.h>
39 #include <sys/taskqueue.h>
40 #include <sys/lock.h>
41 #include <sys/mutex.h>
42 #include <sys/conf.h>
43 #include <sys/devicestat.h>
44 #include <sys/eventhandler.h>
45 #include <sys/malloc.h>
46 #include <sys/cons.h>
47 #include <sys/reboot.h>
48 #include <geom/geom_disk.h>
49 #endif /* _KERNEL */
50 
51 #ifndef _KERNEL
52 #include <stdio.h>
53 #include <string.h>
54 #endif /* _KERNEL */
55 
56 #include <cam/cam.h>
57 #include <cam/cam_ccb.h>
58 #include <cam/cam_periph.h>
59 #include <cam/cam_xpt_periph.h>
60 #include <cam/cam_sim.h>
61 
62 #include <cam/ata/ata_all.h>
63 
64 #include <machine/md_var.h>	/* geometry translation */
65 
66 #ifdef _KERNEL
67 
68 #define ATA_MAX_28BIT_LBA               268435455UL
69 
70 typedef enum {
71 	ADA_STATE_WCACHE,
72 	ADA_STATE_NORMAL
73 } ada_state;
74 
75 typedef enum {
76 	ADA_FLAG_PACK_INVALID	= 0x001,
77 	ADA_FLAG_CAN_48BIT	= 0x002,
78 	ADA_FLAG_CAN_FLUSHCACHE	= 0x004,
79 	ADA_FLAG_CAN_NCQ	= 0x008,
80 	ADA_FLAG_CAN_DMA	= 0x010,
81 	ADA_FLAG_NEED_OTAG	= 0x020,
82 	ADA_FLAG_WENT_IDLE	= 0x040,
83 	ADA_FLAG_CAN_TRIM	= 0x080,
84 	ADA_FLAG_OPEN		= 0x100,
85 	ADA_FLAG_SCTX_INIT	= 0x200,
86 	ADA_FLAG_CAN_CFA        = 0x400,
87 	ADA_FLAG_CAN_POWERMGT   = 0x800
88 } ada_flags;
89 
90 typedef enum {
91 	ADA_Q_NONE		= 0x00
92 } ada_quirks;
93 
94 typedef enum {
95 	ADA_CCB_WCACHE		= 0x01,
96 	ADA_CCB_BUFFER_IO	= 0x03,
97 	ADA_CCB_WAITING		= 0x04,
98 	ADA_CCB_DUMP		= 0x05,
99 	ADA_CCB_TRIM		= 0x06,
100 	ADA_CCB_TYPE_MASK	= 0x0F,
101 } ada_ccb_state;
102 
103 /* Offsets into our private area for storing information */
104 #define ccb_state	ppriv_field0
105 #define ccb_bp		ppriv_ptr1
106 
107 struct disk_params {
108 	u_int8_t  heads;
109 	u_int8_t  secs_per_track;
110 	u_int32_t cylinders;
111 	u_int32_t secsize;	/* Number of bytes/logical sector */
112 	u_int64_t sectors;	/* Total number sectors */
113 };
114 
115 #define TRIM_MAX_BLOCKS	4
116 #define TRIM_MAX_RANGES	TRIM_MAX_BLOCKS * 64
117 struct trim_request {
118 	uint8_t		data[TRIM_MAX_RANGES * 8];
119 	struct bio	*bps[TRIM_MAX_RANGES];
120 };
121 
122 struct ada_softc {
123 	struct	 bio_queue_head bio_queue;
124 	struct	 bio_queue_head trim_queue;
125 	ada_state state;
126 	ada_flags flags;
127 	ada_quirks quirks;
128 	int	 ordered_tag_count;
129 	int	 outstanding_cmds;
130 	int	 trim_max_ranges;
131 	int	 trim_running;
132 	int	 write_cache;
133 #ifdef ADA_TEST_FAILURE
134 	int      force_read_error;
135 	int      force_write_error;
136 	int      periodic_read_error;
137 	int      periodic_read_count;
138 #endif
139 	struct	 disk_params params;
140 	struct	 disk *disk;
141 	struct task		sysctl_task;
142 	struct sysctl_ctx_list	sysctl_ctx;
143 	struct sysctl_oid	*sysctl_tree;
144 	struct callout		sendordered_c;
145 	struct trim_request	trim_req;
146 };
147 
148 struct ada_quirk_entry {
149 	struct scsi_inquiry_pattern inq_pat;
150 	ada_quirks quirks;
151 };
152 
153 static struct ada_quirk_entry ada_quirk_table[] =
154 {
155 	{
156 		/* Default */
157 		{
158 		  T_ANY, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED,
159 		  /*vendor*/"*", /*product*/"*", /*revision*/"*"
160 		},
161 		/*quirks*/0
162 	},
163 };
164 
165 static	disk_strategy_t	adastrategy;
166 static	dumper_t	adadump;
167 static	periph_init_t	adainit;
168 static	void		adaasync(void *callback_arg, u_int32_t code,
169 				struct cam_path *path, void *arg);
170 static	void		adasysctlinit(void *context, int pending);
171 static	periph_ctor_t	adaregister;
172 static	periph_dtor_t	adacleanup;
173 static	periph_start_t	adastart;
174 static	periph_oninv_t	adaoninvalidate;
175 static	void		adadone(struct cam_periph *periph,
176 			       union ccb *done_ccb);
177 static  int		adaerror(union ccb *ccb, u_int32_t cam_flags,
178 				u_int32_t sense_flags);
179 static void		adagetparams(struct cam_periph *periph,
180 				struct ccb_getdev *cgd);
181 static timeout_t	adasendorderedtag;
182 static void		adashutdown(void *arg, int howto);
183 
184 #ifndef ADA_DEFAULT_TIMEOUT
185 #define ADA_DEFAULT_TIMEOUT 30	/* Timeout in seconds */
186 #endif
187 
188 #ifndef	ADA_DEFAULT_RETRY
189 #define	ADA_DEFAULT_RETRY	4
190 #endif
191 
192 #ifndef	ADA_DEFAULT_SEND_ORDERED
193 #define	ADA_DEFAULT_SEND_ORDERED	1
194 #endif
195 
196 #ifndef	ADA_DEFAULT_SPINDOWN_SHUTDOWN
197 #define	ADA_DEFAULT_SPINDOWN_SHUTDOWN	1
198 #endif
199 
200 #ifndef	ADA_DEFAULT_WRITE_CACHE
201 #define	ADA_DEFAULT_WRITE_CACHE	1
202 #endif
203 
204 /*
205  * Most platforms map firmware geometry to actual, but some don't.  If
206  * not overridden, default to nothing.
207  */
208 #ifndef ata_disk_firmware_geom_adjust
209 #define	ata_disk_firmware_geom_adjust(disk)
210 #endif
211 
212 static int ada_retry_count = ADA_DEFAULT_RETRY;
213 static int ada_default_timeout = ADA_DEFAULT_TIMEOUT;
214 static int ada_send_ordered = ADA_DEFAULT_SEND_ORDERED;
215 static int ada_spindown_shutdown = ADA_DEFAULT_SPINDOWN_SHUTDOWN;
216 static int ada_write_cache = ADA_DEFAULT_WRITE_CACHE;
217 
218 SYSCTL_NODE(_kern_cam, OID_AUTO, ada, CTLFLAG_RD, 0,
219             "CAM Direct Access Disk driver");
220 SYSCTL_INT(_kern_cam_ada, OID_AUTO, retry_count, CTLFLAG_RW,
221            &ada_retry_count, 0, "Normal I/O retry count");
222 TUNABLE_INT("kern.cam.ada.retry_count", &ada_retry_count);
223 SYSCTL_INT(_kern_cam_ada, OID_AUTO, default_timeout, CTLFLAG_RW,
224            &ada_default_timeout, 0, "Normal I/O timeout (in seconds)");
225 TUNABLE_INT("kern.cam.ada.default_timeout", &ada_default_timeout);
226 SYSCTL_INT(_kern_cam_ada, OID_AUTO, ada_send_ordered, CTLFLAG_RW,
227            &ada_send_ordered, 0, "Send Ordered Tags");
228 TUNABLE_INT("kern.cam.ada.ada_send_ordered", &ada_send_ordered);
229 SYSCTL_INT(_kern_cam_ada, OID_AUTO, spindown_shutdown, CTLFLAG_RW,
230            &ada_spindown_shutdown, 0, "Spin down upon shutdown");
231 TUNABLE_INT("kern.cam.ada.spindown_shutdown", &ada_spindown_shutdown);
232 SYSCTL_INT(_kern_cam_ada, OID_AUTO, write_cache, CTLFLAG_RW,
233            &ada_write_cache, 0, "Enable disk write cache");
234 TUNABLE_INT("kern.cam.ada.write_cache", &ada_write_cache);
235 
236 /*
237  * ADA_ORDEREDTAG_INTERVAL determines how often, relative
238  * to the default timeout, we check to see whether an ordered
239  * tagged transaction is appropriate to prevent simple tag
240  * starvation.  Since we'd like to ensure that there is at least
241  * 1/2 of the timeout length left for a starved transaction to
242  * complete after we've sent an ordered tag, we must poll at least
243  * four times in every timeout period.  This takes care of the worst
244  * case where a starved transaction starts during an interval that
245  * meets the requirement "don't send an ordered tag" test so it takes
246  * us two intervals to determine that a tag must be sent.
247  */
248 #ifndef ADA_ORDEREDTAG_INTERVAL
249 #define ADA_ORDEREDTAG_INTERVAL 4
250 #endif
251 
252 static struct periph_driver adadriver =
253 {
254 	adainit, "ada",
255 	TAILQ_HEAD_INITIALIZER(adadriver.units), /* generation */ 0
256 };
257 
258 PERIPHDRIVER_DECLARE(ada, adadriver);
259 
260 MALLOC_DEFINE(M_ATADA, "ata_da", "ata_da buffers");
261 
262 static int
263 adaopen(struct disk *dp)
264 {
265 	struct cam_periph *periph;
266 	struct ada_softc *softc;
267 	int unit;
268 	int error;
269 
270 	periph = (struct cam_periph *)dp->d_drv1;
271 	if (periph == NULL) {
272 		return (ENXIO);
273 	}
274 
275 	if (cam_periph_acquire(periph) != CAM_REQ_CMP) {
276 		return(ENXIO);
277 	}
278 
279 	cam_periph_lock(periph);
280 	if ((error = cam_periph_hold(periph, PRIBIO|PCATCH)) != 0) {
281 		cam_periph_unlock(periph);
282 		cam_periph_release(periph);
283 		return (error);
284 	}
285 
286 	unit = periph->unit_number;
287 	softc = (struct ada_softc *)periph->softc;
288 	softc->flags |= ADA_FLAG_OPEN;
289 
290 	CAM_DEBUG(periph->path, CAM_DEBUG_TRACE,
291 	    ("adaopen: disk=%s%d (unit %d)\n", dp->d_name, dp->d_unit,
292 	     unit));
293 
294 	if ((softc->flags & ADA_FLAG_PACK_INVALID) != 0) {
295 		/* Invalidate our pack information. */
296 		softc->flags &= ~ADA_FLAG_PACK_INVALID;
297 	}
298 
299 	cam_periph_unhold(periph);
300 	cam_periph_unlock(periph);
301 	return (0);
302 }
303 
304 static int
305 adaclose(struct disk *dp)
306 {
307 	struct	cam_periph *periph;
308 	struct	ada_softc *softc;
309 	union ccb *ccb;
310 	int error;
311 
312 	periph = (struct cam_periph *)dp->d_drv1;
313 	if (periph == NULL)
314 		return (ENXIO);
315 
316 	cam_periph_lock(periph);
317 	if ((error = cam_periph_hold(periph, PRIBIO)) != 0) {
318 		cam_periph_unlock(periph);
319 		cam_periph_release(periph);
320 		return (error);
321 	}
322 
323 	softc = (struct ada_softc *)periph->softc;
324 	/* We only sync the cache if the drive is capable of it. */
325 	if (softc->flags & ADA_FLAG_CAN_FLUSHCACHE) {
326 
327 		ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL);
328 		cam_fill_ataio(&ccb->ataio,
329 				    1,
330 				    adadone,
331 				    CAM_DIR_NONE,
332 				    0,
333 				    NULL,
334 				    0,
335 				    ada_default_timeout*1000);
336 
337 		if (softc->flags & ADA_FLAG_CAN_48BIT)
338 			ata_48bit_cmd(&ccb->ataio, ATA_FLUSHCACHE48, 0, 0, 0);
339 		else
340 			ata_28bit_cmd(&ccb->ataio, ATA_FLUSHCACHE, 0, 0, 0);
341 		cam_periph_runccb(ccb, /*error_routine*/NULL, /*cam_flags*/0,
342 		    /*sense_flags*/0, softc->disk->d_devstat);
343 
344 		if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP)
345 			xpt_print(periph->path, "Synchronize cache failed\n");
346 
347 		if ((ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
348 			cam_release_devq(ccb->ccb_h.path,
349 					 /*relsim_flags*/0,
350 					 /*reduction*/0,
351 					 /*timeout*/0,
352 					 /*getcount_only*/0);
353 		xpt_release_ccb(ccb);
354 	}
355 
356 	softc->flags &= ~ADA_FLAG_OPEN;
357 	cam_periph_unhold(periph);
358 	cam_periph_unlock(periph);
359 	cam_periph_release(periph);
360 	return (0);
361 }
362 
363 static void
364 adaschedule(struct cam_periph *periph)
365 {
366 	struct ada_softc *softc = (struct ada_softc *)periph->softc;
367 
368 	if (bioq_first(&softc->bio_queue) ||
369 	    (!softc->trim_running && bioq_first(&softc->trim_queue))) {
370 		/* Have more work to do, so ensure we stay scheduled */
371 		xpt_schedule(periph, CAM_PRIORITY_NORMAL);
372 	}
373 }
374 
375 /*
376  * Actually translate the requested transfer into one the physical driver
377  * can understand.  The transfer is described by a buf and will include
378  * only one physical transfer.
379  */
380 static void
381 adastrategy(struct bio *bp)
382 {
383 	struct cam_periph *periph;
384 	struct ada_softc *softc;
385 
386 	periph = (struct cam_periph *)bp->bio_disk->d_drv1;
387 	if (periph == NULL) {
388 		biofinish(bp, NULL, ENXIO);
389 		return;
390 	}
391 	softc = (struct ada_softc *)periph->softc;
392 
393 	cam_periph_lock(periph);
394 
395 	/*
396 	 * If the device has been made invalid, error out
397 	 */
398 	if ((softc->flags & ADA_FLAG_PACK_INVALID)) {
399 		cam_periph_unlock(periph);
400 		biofinish(bp, NULL, ENXIO);
401 		return;
402 	}
403 
404 	/*
405 	 * Place it in the queue of disk activities for this disk
406 	 */
407 	if (bp->bio_cmd == BIO_DELETE &&
408 	    (softc->flags & ADA_FLAG_CAN_TRIM))
409 		bioq_disksort(&softc->trim_queue, bp);
410 	else
411 		bioq_disksort(&softc->bio_queue, bp);
412 
413 	/*
414 	 * Schedule ourselves for performing the work.
415 	 */
416 	adaschedule(periph);
417 	cam_periph_unlock(periph);
418 
419 	return;
420 }
421 
422 static int
423 adadump(void *arg, void *virtual, vm_offset_t physical, off_t offset, size_t length)
424 {
425 	struct	    cam_periph *periph;
426 	struct	    ada_softc *softc;
427 	u_int	    secsize;
428 	union	    ccb ccb;
429 	struct	    disk *dp;
430 	uint64_t    lba;
431 	uint16_t    count;
432 
433 	dp = arg;
434 	periph = dp->d_drv1;
435 	if (periph == NULL)
436 		return (ENXIO);
437 	softc = (struct ada_softc *)periph->softc;
438 	cam_periph_lock(periph);
439 	secsize = softc->params.secsize;
440 	lba = offset / secsize;
441 	count = length / secsize;
442 
443 	if ((softc->flags & ADA_FLAG_PACK_INVALID) != 0) {
444 		cam_periph_unlock(periph);
445 		return (ENXIO);
446 	}
447 
448 	if (length > 0) {
449 		xpt_setup_ccb(&ccb.ccb_h, periph->path, CAM_PRIORITY_NORMAL);
450 		ccb.ccb_h.ccb_state = ADA_CCB_DUMP;
451 		cam_fill_ataio(&ccb.ataio,
452 		    0,
453 		    adadone,
454 		    CAM_DIR_OUT,
455 		    0,
456 		    (u_int8_t *) virtual,
457 		    length,
458 		    ada_default_timeout*1000);
459 		if ((softc->flags & ADA_FLAG_CAN_48BIT) &&
460 		    (lba + count >= ATA_MAX_28BIT_LBA ||
461 		    count >= 256)) {
462 			ata_48bit_cmd(&ccb.ataio, ATA_WRITE_DMA48,
463 			    0, lba, count);
464 		} else {
465 			ata_28bit_cmd(&ccb.ataio, ATA_WRITE_DMA,
466 			    0, lba, count);
467 		}
468 		xpt_polled_action(&ccb);
469 
470 		if ((ccb.ataio.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
471 			printf("Aborting dump due to I/O error.\n");
472 			cam_periph_unlock(periph);
473 			return(EIO);
474 		}
475 		cam_periph_unlock(periph);
476 		return(0);
477 	}
478 
479 	if (softc->flags & ADA_FLAG_CAN_FLUSHCACHE) {
480 		xpt_setup_ccb(&ccb.ccb_h, periph->path, CAM_PRIORITY_NORMAL);
481 
482 		ccb.ccb_h.ccb_state = ADA_CCB_DUMP;
483 		cam_fill_ataio(&ccb.ataio,
484 				    1,
485 				    adadone,
486 				    CAM_DIR_NONE,
487 				    0,
488 				    NULL,
489 				    0,
490 				    ada_default_timeout*1000);
491 
492 		if (softc->flags & ADA_FLAG_CAN_48BIT)
493 			ata_48bit_cmd(&ccb.ataio, ATA_FLUSHCACHE48, 0, 0, 0);
494 		else
495 			ata_28bit_cmd(&ccb.ataio, ATA_FLUSHCACHE, 0, 0, 0);
496 		xpt_polled_action(&ccb);
497 
498 		if ((ccb.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP)
499 			xpt_print(periph->path, "Synchronize cache failed\n");
500 
501 		if ((ccb.ccb_h.status & CAM_DEV_QFRZN) != 0)
502 			cam_release_devq(ccb.ccb_h.path,
503 					 /*relsim_flags*/0,
504 					 /*reduction*/0,
505 					 /*timeout*/0,
506 					 /*getcount_only*/0);
507 	}
508 	cam_periph_unlock(periph);
509 	return (0);
510 }
511 
512 static void
513 adainit(void)
514 {
515 	cam_status status;
516 
517 	/*
518 	 * Install a global async callback.  This callback will
519 	 * receive async callbacks like "new device found".
520 	 */
521 	status = xpt_register_async(AC_FOUND_DEVICE, adaasync, NULL, NULL);
522 
523 	if (status != CAM_REQ_CMP) {
524 		printf("ada: Failed to attach master async callback "
525 		       "due to status 0x%x!\n", status);
526 	} else if (ada_send_ordered) {
527 
528 		/* Register our shutdown event handler */
529 		if ((EVENTHANDLER_REGISTER(shutdown_post_sync, adashutdown,
530 					   NULL, SHUTDOWN_PRI_DEFAULT)) == NULL)
531 		    printf("adainit: shutdown event registration failed!\n");
532 	}
533 }
534 
535 static void
536 adaoninvalidate(struct cam_periph *periph)
537 {
538 	struct ada_softc *softc;
539 
540 	softc = (struct ada_softc *)periph->softc;
541 
542 	/*
543 	 * De-register any async callbacks.
544 	 */
545 	xpt_register_async(0, adaasync, periph, periph->path);
546 
547 	softc->flags |= ADA_FLAG_PACK_INVALID;
548 
549 	/*
550 	 * Return all queued I/O with ENXIO.
551 	 * XXX Handle any transactions queued to the card
552 	 *     with XPT_ABORT_CCB.
553 	 */
554 	bioq_flush(&softc->bio_queue, NULL, ENXIO);
555 	bioq_flush(&softc->trim_queue, NULL, ENXIO);
556 
557 	disk_gone(softc->disk);
558 	xpt_print(periph->path, "lost device\n");
559 }
560 
561 static void
562 adacleanup(struct cam_periph *periph)
563 {
564 	struct ada_softc *softc;
565 
566 	softc = (struct ada_softc *)periph->softc;
567 
568 	xpt_print(periph->path, "removing device entry\n");
569 	cam_periph_unlock(periph);
570 
571 	/*
572 	 * If we can't free the sysctl tree, oh well...
573 	 */
574 	if ((softc->flags & ADA_FLAG_SCTX_INIT) != 0
575 	    && sysctl_ctx_free(&softc->sysctl_ctx) != 0) {
576 		xpt_print(periph->path, "can't remove sysctl context\n");
577 	}
578 
579 	disk_destroy(softc->disk);
580 	callout_drain(&softc->sendordered_c);
581 	free(softc, M_DEVBUF);
582 	cam_periph_lock(periph);
583 }
584 
585 static void
586 adaasync(void *callback_arg, u_int32_t code,
587 	struct cam_path *path, void *arg)
588 {
589 	struct cam_periph *periph;
590 	struct ada_softc *softc;
591 
592 	periph = (struct cam_periph *)callback_arg;
593 	switch (code) {
594 	case AC_FOUND_DEVICE:
595 	{
596 		struct ccb_getdev *cgd;
597 		cam_status status;
598 
599 		cgd = (struct ccb_getdev *)arg;
600 		if (cgd == NULL)
601 			break;
602 
603 		if (cgd->protocol != PROTO_ATA)
604 			break;
605 
606 		/*
607 		 * Allocate a peripheral instance for
608 		 * this device and start the probe
609 		 * process.
610 		 */
611 		status = cam_periph_alloc(adaregister, adaoninvalidate,
612 					  adacleanup, adastart,
613 					  "ada", CAM_PERIPH_BIO,
614 					  cgd->ccb_h.path, adaasync,
615 					  AC_FOUND_DEVICE, cgd);
616 
617 		if (status != CAM_REQ_CMP
618 		 && status != CAM_REQ_INPROG)
619 			printf("adaasync: Unable to attach to new device "
620 				"due to status 0x%x\n", status);
621 		break;
622 	}
623 	case AC_SENT_BDR:
624 	case AC_BUS_RESET:
625 	{
626 		struct ccb_getdev cgd;
627 
628 		softc = (struct ada_softc *)periph->softc;
629 		cam_periph_async(periph, code, path, arg);
630 		if (ada_write_cache < 0 && softc->write_cache < 0)
631 			break;
632 		if (softc->state != ADA_STATE_NORMAL)
633 			break;
634 		xpt_setup_ccb(&cgd.ccb_h, periph->path, CAM_PRIORITY_NORMAL);
635 		cgd.ccb_h.func_code = XPT_GDEV_TYPE;
636 		xpt_action((union ccb *)&cgd);
637 		if ((cgd.ident_data.support.command1 & ATA_SUPPORT_WRITECACHE) == 0)
638 			break;
639 		softc->state = ADA_STATE_WCACHE;
640 		cam_periph_acquire(periph);
641 		cam_freeze_devq_arg(periph->path,
642 		    RELSIM_RELEASE_RUNLEVEL, CAM_RL_DEV + 1);
643 		xpt_schedule(periph, CAM_PRIORITY_DEV);
644 	}
645 	default:
646 		cam_periph_async(periph, code, path, arg);
647 		break;
648 	}
649 }
650 
651 static void
652 adasysctlinit(void *context, int pending)
653 {
654 	struct cam_periph *periph;
655 	struct ada_softc *softc;
656 	char tmpstr[80], tmpstr2[80];
657 
658 	periph = (struct cam_periph *)context;
659 
660 	/* periph was held for us when this task was enqueued */
661 	if (periph->flags & CAM_PERIPH_INVALID) {
662 		cam_periph_release(periph);
663 		return;
664 	}
665 
666 	softc = (struct ada_softc *)periph->softc;
667 	snprintf(tmpstr, sizeof(tmpstr), "CAM ADA unit %d", periph->unit_number);
668 	snprintf(tmpstr2, sizeof(tmpstr2), "%d", periph->unit_number);
669 
670 	sysctl_ctx_init(&softc->sysctl_ctx);
671 	softc->flags |= ADA_FLAG_SCTX_INIT;
672 	softc->sysctl_tree = SYSCTL_ADD_NODE(&softc->sysctl_ctx,
673 		SYSCTL_STATIC_CHILDREN(_kern_cam_ada), OID_AUTO, tmpstr2,
674 		CTLFLAG_RD, 0, tmpstr);
675 	if (softc->sysctl_tree == NULL) {
676 		printf("adasysctlinit: unable to allocate sysctl tree\n");
677 		cam_periph_release(periph);
678 		return;
679 	}
680 
681 	SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
682 		OID_AUTO, "write_cache", CTLFLAG_RW | CTLFLAG_MPSAFE,
683 		&softc->write_cache, 0, "Enable disk write cache.");
684 #ifdef ADA_TEST_FAILURE
685 	/*
686 	 * Add a 'door bell' sysctl which allows one to set it from userland
687 	 * and cause something bad to happen.  For the moment, we only allow
688 	 * whacking the next read or write.
689 	 */
690 	SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
691 		OID_AUTO, "force_read_error", CTLFLAG_RW | CTLFLAG_MPSAFE,
692 		&softc->force_read_error, 0,
693 		"Force a read error for the next N reads.");
694 	SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
695 		OID_AUTO, "force_write_error", CTLFLAG_RW | CTLFLAG_MPSAFE,
696 		&softc->force_write_error, 0,
697 		"Force a write error for the next N writes.");
698 	SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
699 		OID_AUTO, "periodic_read_error", CTLFLAG_RW | CTLFLAG_MPSAFE,
700 		&softc->periodic_read_error, 0,
701 		"Force a read error every N reads (don't set too low).");
702 #endif
703 	cam_periph_release(periph);
704 }
705 
706 static cam_status
707 adaregister(struct cam_periph *periph, void *arg)
708 {
709 	struct ada_softc *softc;
710 	struct ccb_pathinq cpi;
711 	struct ccb_getdev *cgd;
712 	char   announce_buf[80];
713 	struct disk_params *dp;
714 	caddr_t match;
715 	u_int maxio;
716 
717 	cgd = (struct ccb_getdev *)arg;
718 	if (periph == NULL) {
719 		printf("adaregister: periph was NULL!!\n");
720 		return(CAM_REQ_CMP_ERR);
721 	}
722 
723 	if (cgd == NULL) {
724 		printf("adaregister: no getdev CCB, can't register device\n");
725 		return(CAM_REQ_CMP_ERR);
726 	}
727 
728 	softc = (struct ada_softc *)malloc(sizeof(*softc), M_DEVBUF,
729 	    M_NOWAIT|M_ZERO);
730 
731 	if (softc == NULL) {
732 		printf("adaregister: Unable to probe new device. "
733 		    "Unable to allocate softc\n");
734 		return(CAM_REQ_CMP_ERR);
735 	}
736 
737 	bioq_init(&softc->bio_queue);
738 	bioq_init(&softc->trim_queue);
739 
740 	if (cgd->ident_data.capabilities1 & ATA_SUPPORT_DMA)
741 		softc->flags |= ADA_FLAG_CAN_DMA;
742 	if (cgd->ident_data.support.command2 & ATA_SUPPORT_ADDRESS48)
743 		softc->flags |= ADA_FLAG_CAN_48BIT;
744 	if (cgd->ident_data.support.command2 & ATA_SUPPORT_FLUSHCACHE)
745 		softc->flags |= ADA_FLAG_CAN_FLUSHCACHE;
746 	if (cgd->ident_data.support.command1 & ATA_SUPPORT_POWERMGT)
747 		softc->flags |= ADA_FLAG_CAN_POWERMGT;
748 	if (cgd->ident_data.satacapabilities & ATA_SUPPORT_NCQ &&
749 	    cgd->inq_flags & SID_CmdQue)
750 		softc->flags |= ADA_FLAG_CAN_NCQ;
751 	if (cgd->ident_data.support_dsm & ATA_SUPPORT_DSM_TRIM) {
752 		softc->flags |= ADA_FLAG_CAN_TRIM;
753 		softc->trim_max_ranges = TRIM_MAX_RANGES;
754 		if (cgd->ident_data.max_dsm_blocks != 0) {
755 			softc->trim_max_ranges =
756 			    min(cgd->ident_data.max_dsm_blocks * 64,
757 				softc->trim_max_ranges);
758 		}
759 	}
760 	if (cgd->ident_data.support.command2 & ATA_SUPPORT_CFA)
761 		softc->flags |= ADA_FLAG_CAN_CFA;
762 
763 	periph->softc = softc;
764 
765 	/*
766 	 * See if this device has any quirks.
767 	 */
768 	match = cam_quirkmatch((caddr_t)&cgd->ident_data,
769 			       (caddr_t)ada_quirk_table,
770 			       sizeof(ada_quirk_table)/sizeof(*ada_quirk_table),
771 			       sizeof(*ada_quirk_table), ata_identify_match);
772 	if (match != NULL)
773 		softc->quirks = ((struct ada_quirk_entry *)match)->quirks;
774 	else
775 		softc->quirks = ADA_Q_NONE;
776 	softc->write_cache = -1;
777 	snprintf(announce_buf, sizeof(announce_buf),
778 	    "kern.cam.ada.%d.writa_cache", periph->unit_number);
779 	TUNABLE_INT_FETCH(announce_buf, &softc->write_cache);
780 
781 	bzero(&cpi, sizeof(cpi));
782 	xpt_setup_ccb(&cpi.ccb_h, periph->path, CAM_PRIORITY_NONE);
783 	cpi.ccb_h.func_code = XPT_PATH_INQ;
784 	xpt_action((union ccb *)&cpi);
785 
786 	TASK_INIT(&softc->sysctl_task, 0, adasysctlinit, periph);
787 
788 	/*
789 	 * Register this media as a disk
790 	 */
791 	mtx_unlock(periph->sim->mtx);
792 	adagetparams(periph, cgd);
793 	softc->disk = disk_alloc();
794 	softc->disk->d_open = adaopen;
795 	softc->disk->d_close = adaclose;
796 	softc->disk->d_strategy = adastrategy;
797 	softc->disk->d_dump = adadump;
798 	softc->disk->d_name = "ada";
799 	softc->disk->d_drv1 = periph;
800 	maxio = cpi.maxio;		/* Honor max I/O size of SIM */
801 	if (maxio == 0)
802 		maxio = DFLTPHYS;	/* traditional default */
803 	else if (maxio > MAXPHYS)
804 		maxio = MAXPHYS;	/* for safety */
805 	if (softc->flags & ADA_FLAG_CAN_48BIT)
806 		maxio = min(maxio, 65536 * softc->params.secsize);
807 	else					/* 28bit ATA command limit */
808 		maxio = min(maxio, 256 * softc->params.secsize);
809 	softc->disk->d_maxsize = maxio;
810 	softc->disk->d_unit = periph->unit_number;
811 	softc->disk->d_flags = 0;
812 	if (softc->flags & ADA_FLAG_CAN_FLUSHCACHE)
813 		softc->disk->d_flags |= DISKFLAG_CANFLUSHCACHE;
814 	if ((softc->flags & ADA_FLAG_CAN_TRIM) ||
815 	    ((softc->flags & ADA_FLAG_CAN_CFA) &&
816 	    !(softc->flags & ADA_FLAG_CAN_48BIT)))
817 		softc->disk->d_flags |= DISKFLAG_CANDELETE;
818 	strlcpy(softc->disk->d_ident, cgd->serial_num,
819 	    MIN(sizeof(softc->disk->d_ident), cgd->serial_num_len + 1));
820 	strlcpy(softc->disk->d_descr, cgd->ident_data.model,
821 	    MIN(sizeof(softc->disk->d_descr), sizeof(cgd->ident_data.model)));
822 	softc->disk->d_hba_vendor = cpi.hba_vendor;
823 	softc->disk->d_hba_device = cpi.hba_device;
824 	softc->disk->d_hba_subvendor = cpi.hba_subvendor;
825 	softc->disk->d_hba_subdevice = cpi.hba_subdevice;
826 
827 	softc->disk->d_sectorsize = softc->params.secsize;
828 	softc->disk->d_mediasize = (off_t)softc->params.sectors *
829 	    softc->params.secsize;
830 	if (ata_physical_sector_size(&cgd->ident_data) !=
831 	    softc->params.secsize) {
832 		softc->disk->d_stripesize =
833 		    ata_physical_sector_size(&cgd->ident_data);
834 		softc->disk->d_stripeoffset = (softc->disk->d_stripesize -
835 		    ata_logical_sector_offset(&cgd->ident_data)) %
836 		    softc->disk->d_stripesize;
837 	}
838 	softc->disk->d_fwsectors = softc->params.secs_per_track;
839 	softc->disk->d_fwheads = softc->params.heads;
840 	ata_disk_firmware_geom_adjust(softc->disk);
841 
842 	disk_create(softc->disk, DISK_VERSION);
843 	mtx_lock(periph->sim->mtx);
844 
845 	dp = &softc->params;
846 	snprintf(announce_buf, sizeof(announce_buf),
847 		"%juMB (%ju %u byte sectors: %dH %dS/T %dC)",
848 		(uintmax_t)(((uintmax_t)dp->secsize *
849 		dp->sectors) / (1024*1024)),
850 		(uintmax_t)dp->sectors,
851 		dp->secsize, dp->heads,
852 		dp->secs_per_track, dp->cylinders);
853 	xpt_announce_periph(periph, announce_buf);
854 
855 	/*
856 	 * Create our sysctl variables, now that we know
857 	 * we have successfully attached.
858 	 */
859 	cam_periph_acquire(periph);
860 	taskqueue_enqueue(taskqueue_thread, &softc->sysctl_task);
861 
862 	/*
863 	 * Add async callbacks for bus reset and
864 	 * bus device reset calls.  I don't bother
865 	 * checking if this fails as, in most cases,
866 	 * the system will function just fine without
867 	 * them and the only alternative would be to
868 	 * not attach the device on failure.
869 	 */
870 	xpt_register_async(AC_SENT_BDR | AC_BUS_RESET | AC_LOST_DEVICE,
871 			   adaasync, periph, periph->path);
872 
873 	/*
874 	 * Schedule a periodic event to occasionally send an
875 	 * ordered tag to a device.
876 	 */
877 	callout_init_mtx(&softc->sendordered_c, periph->sim->mtx, 0);
878 	callout_reset(&softc->sendordered_c,
879 	    (ADA_DEFAULT_TIMEOUT * hz) / ADA_ORDEREDTAG_INTERVAL,
880 	    adasendorderedtag, softc);
881 
882 	if ((ada_write_cache >= 0 || softc->write_cache >= 0) &&
883 	    cgd->ident_data.support.command1 & ATA_SUPPORT_WRITECACHE) {
884 		softc->state = ADA_STATE_WCACHE;
885 		cam_periph_acquire(periph);
886 		cam_freeze_devq_arg(periph->path,
887 		    RELSIM_RELEASE_RUNLEVEL, CAM_RL_DEV + 1);
888 		xpt_schedule(periph, CAM_PRIORITY_DEV);
889 	} else
890 		softc->state = ADA_STATE_NORMAL;
891 
892 	return(CAM_REQ_CMP);
893 }
894 
895 static void
896 adastart(struct cam_periph *periph, union ccb *start_ccb)
897 {
898 	struct ada_softc *softc = (struct ada_softc *)periph->softc;
899 	struct ccb_ataio *ataio = &start_ccb->ataio;
900 
901 	switch (softc->state) {
902 	case ADA_STATE_NORMAL:
903 	{
904 		struct bio *bp;
905 		u_int8_t tag_code;
906 
907 		/* Execute immediate CCB if waiting. */
908 		if (periph->immediate_priority <= periph->pinfo.priority) {
909 			CAM_DEBUG_PRINT(CAM_DEBUG_SUBTRACE,
910 					("queuing for immediate ccb\n"));
911 			start_ccb->ccb_h.ccb_state = ADA_CCB_WAITING;
912 			SLIST_INSERT_HEAD(&periph->ccb_list, &start_ccb->ccb_h,
913 					  periph_links.sle);
914 			periph->immediate_priority = CAM_PRIORITY_NONE;
915 			wakeup(&periph->ccb_list);
916 			/* Have more work to do, so ensure we stay scheduled */
917 			adaschedule(periph);
918 			break;
919 		}
920 		/* Run TRIM if not running yet. */
921 		if (!softc->trim_running &&
922 		    (bp = bioq_first(&softc->trim_queue)) != 0) {
923 			struct trim_request *req = &softc->trim_req;
924 			struct bio *bp1;
925 			int bps = 0, ranges = 0;
926 
927 			softc->trim_running = 1;
928 			bzero(req, sizeof(*req));
929 			bp1 = bp;
930 			do {
931 				uint64_t lba = bp1->bio_pblkno;
932 				int count = bp1->bio_bcount /
933 				    softc->params.secsize;
934 
935 				bioq_remove(&softc->trim_queue, bp1);
936 				while (count > 0) {
937 					int c = min(count, 0xffff);
938 					int off = ranges * 8;
939 
940 					req->data[off + 0] = lba & 0xff;
941 					req->data[off + 1] = (lba >> 8) & 0xff;
942 					req->data[off + 2] = (lba >> 16) & 0xff;
943 					req->data[off + 3] = (lba >> 24) & 0xff;
944 					req->data[off + 4] = (lba >> 32) & 0xff;
945 					req->data[off + 5] = (lba >> 40) & 0xff;
946 					req->data[off + 6] = c & 0xff;
947 					req->data[off + 7] = (c >> 8) & 0xff;
948 					lba += c;
949 					count -= c;
950 					ranges++;
951 				}
952 				req->bps[bps++] = bp1;
953 				bp1 = bioq_first(&softc->trim_queue);
954 				if (bp1 == NULL ||
955 				    bp1->bio_bcount / softc->params.secsize >
956 				    (softc->trim_max_ranges - ranges) * 0xffff)
957 					break;
958 			} while (1);
959 			cam_fill_ataio(ataio,
960 			    ada_retry_count,
961 			    adadone,
962 			    CAM_DIR_OUT,
963 			    0,
964 			    req->data,
965 			    ((ranges + 63) / 64) * 512,
966 			    ada_default_timeout * 1000);
967 			ata_48bit_cmd(ataio, ATA_DATA_SET_MANAGEMENT,
968 			    ATA_DSM_TRIM, 0, (ranges + 63) / 64);
969 			start_ccb->ccb_h.ccb_state = ADA_CCB_TRIM;
970 			goto out;
971 		}
972 		/* Run regular command. */
973 		bp = bioq_first(&softc->bio_queue);
974 		if (bp == NULL) {
975 			xpt_release_ccb(start_ccb);
976 			break;
977 		}
978 		bioq_remove(&softc->bio_queue, bp);
979 
980 		if ((bp->bio_flags & BIO_ORDERED) != 0
981 		 || (softc->flags & ADA_FLAG_NEED_OTAG) != 0) {
982 			softc->flags &= ~ADA_FLAG_NEED_OTAG;
983 			softc->ordered_tag_count++;
984 			tag_code = 0;
985 		} else {
986 			tag_code = 1;
987 		}
988 		switch (bp->bio_cmd) {
989 		case BIO_READ:
990 		case BIO_WRITE:
991 		{
992 			uint64_t lba = bp->bio_pblkno;
993 			uint16_t count = bp->bio_bcount / softc->params.secsize;
994 #ifdef ADA_TEST_FAILURE
995 			int fail = 0;
996 
997 			/*
998 			 * Support the failure ioctls.  If the command is a
999 			 * read, and there are pending forced read errors, or
1000 			 * if a write and pending write errors, then fail this
1001 			 * operation with EIO.  This is useful for testing
1002 			 * purposes.  Also, support having every Nth read fail.
1003 			 *
1004 			 * This is a rather blunt tool.
1005 			 */
1006 			if (bp->bio_cmd == BIO_READ) {
1007 				if (softc->force_read_error) {
1008 					softc->force_read_error--;
1009 					fail = 1;
1010 				}
1011 				if (softc->periodic_read_error > 0) {
1012 					if (++softc->periodic_read_count >=
1013 					    softc->periodic_read_error) {
1014 						softc->periodic_read_count = 0;
1015 						fail = 1;
1016 					}
1017 				}
1018 			} else {
1019 				if (softc->force_write_error) {
1020 					softc->force_write_error--;
1021 					fail = 1;
1022 				}
1023 			}
1024 			if (fail) {
1025 				bp->bio_error = EIO;
1026 				bp->bio_flags |= BIO_ERROR;
1027 				biodone(bp);
1028 				xpt_release_ccb(start_ccb);
1029 				adaschedule(periph);
1030 				return;
1031 			}
1032 #endif
1033 			cam_fill_ataio(ataio,
1034 			    ada_retry_count,
1035 			    adadone,
1036 			    bp->bio_cmd == BIO_READ ?
1037 			        CAM_DIR_IN : CAM_DIR_OUT,
1038 			    tag_code,
1039 			    bp->bio_data,
1040 			    bp->bio_bcount,
1041 			    ada_default_timeout*1000);
1042 
1043 			if ((softc->flags & ADA_FLAG_CAN_NCQ) && tag_code) {
1044 				if (bp->bio_cmd == BIO_READ) {
1045 					ata_ncq_cmd(ataio, ATA_READ_FPDMA_QUEUED,
1046 					    lba, count);
1047 				} else {
1048 					ata_ncq_cmd(ataio, ATA_WRITE_FPDMA_QUEUED,
1049 					    lba, count);
1050 				}
1051 			} else if ((softc->flags & ADA_FLAG_CAN_48BIT) &&
1052 			    (lba + count >= ATA_MAX_28BIT_LBA ||
1053 			    count > 256)) {
1054 				if (softc->flags & ADA_FLAG_CAN_DMA) {
1055 					if (bp->bio_cmd == BIO_READ) {
1056 						ata_48bit_cmd(ataio, ATA_READ_DMA48,
1057 						    0, lba, count);
1058 					} else {
1059 						ata_48bit_cmd(ataio, ATA_WRITE_DMA48,
1060 						    0, lba, count);
1061 					}
1062 				} else {
1063 					if (bp->bio_cmd == BIO_READ) {
1064 						ata_48bit_cmd(ataio, ATA_READ_MUL48,
1065 						    0, lba, count);
1066 					} else {
1067 						ata_48bit_cmd(ataio, ATA_WRITE_MUL48,
1068 						    0, lba, count);
1069 					}
1070 				}
1071 			} else {
1072 				if (count == 256)
1073 					count = 0;
1074 				if (softc->flags & ADA_FLAG_CAN_DMA) {
1075 					if (bp->bio_cmd == BIO_READ) {
1076 						ata_28bit_cmd(ataio, ATA_READ_DMA,
1077 						    0, lba, count);
1078 					} else {
1079 						ata_28bit_cmd(ataio, ATA_WRITE_DMA,
1080 						    0, lba, count);
1081 					}
1082 				} else {
1083 					if (bp->bio_cmd == BIO_READ) {
1084 						ata_28bit_cmd(ataio, ATA_READ_MUL,
1085 						    0, lba, count);
1086 					} else {
1087 						ata_28bit_cmd(ataio, ATA_WRITE_MUL,
1088 						    0, lba, count);
1089 					}
1090 				}
1091 			}
1092 			break;
1093 		}
1094 		case BIO_DELETE:
1095 		{
1096 			uint64_t lba = bp->bio_pblkno;
1097 			uint16_t count = bp->bio_bcount / softc->params.secsize;
1098 
1099 			cam_fill_ataio(ataio,
1100 			    ada_retry_count,
1101 			    adadone,
1102 			    CAM_DIR_NONE,
1103 			    0,
1104 			    NULL,
1105 			    0,
1106 			    ada_default_timeout*1000);
1107 
1108 			if (count >= 256)
1109 				count = 0;
1110 			ata_28bit_cmd(ataio, ATA_CFA_ERASE, 0, lba, count);
1111 			break;
1112 		}
1113 		case BIO_FLUSH:
1114 			cam_fill_ataio(ataio,
1115 			    1,
1116 			    adadone,
1117 			    CAM_DIR_NONE,
1118 			    0,
1119 			    NULL,
1120 			    0,
1121 			    ada_default_timeout*1000);
1122 
1123 			if (softc->flags & ADA_FLAG_CAN_48BIT)
1124 				ata_48bit_cmd(ataio, ATA_FLUSHCACHE48, 0, 0, 0);
1125 			else
1126 				ata_28bit_cmd(ataio, ATA_FLUSHCACHE, 0, 0, 0);
1127 			break;
1128 		}
1129 		start_ccb->ccb_h.ccb_state = ADA_CCB_BUFFER_IO;
1130 out:
1131 		start_ccb->ccb_h.ccb_bp = bp;
1132 		softc->outstanding_cmds++;
1133 		xpt_action(start_ccb);
1134 
1135 		/* May have more work to do, so ensure we stay scheduled */
1136 		adaschedule(periph);
1137 		break;
1138 	}
1139 	case ADA_STATE_WCACHE:
1140 	{
1141 		cam_fill_ataio(ataio,
1142 		    1,
1143 		    adadone,
1144 		    CAM_DIR_NONE,
1145 		    0,
1146 		    NULL,
1147 		    0,
1148 		    ada_default_timeout*1000);
1149 
1150 		ata_28bit_cmd(ataio, ATA_SETFEATURES, (softc->write_cache > 0 ||
1151 		     (softc->write_cache < 0 && ada_write_cache)) ?
1152 		    ATA_SF_ENAB_WCACHE : ATA_SF_DIS_WCACHE, 0, 0);
1153 		start_ccb->ccb_h.ccb_state = ADA_CCB_WCACHE;
1154 		xpt_action(start_ccb);
1155 		break;
1156 	}
1157 	}
1158 }
1159 
1160 static void
1161 adadone(struct cam_periph *periph, union ccb *done_ccb)
1162 {
1163 	struct ada_softc *softc;
1164 	struct ccb_ataio *ataio;
1165 
1166 	softc = (struct ada_softc *)periph->softc;
1167 	ataio = &done_ccb->ataio;
1168 	switch (ataio->ccb_h.ccb_state & ADA_CCB_TYPE_MASK) {
1169 	case ADA_CCB_BUFFER_IO:
1170 	case ADA_CCB_TRIM:
1171 	{
1172 		struct bio *bp;
1173 
1174 		bp = (struct bio *)done_ccb->ccb_h.ccb_bp;
1175 		if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
1176 			int error;
1177 
1178 			error = adaerror(done_ccb, 0, 0);
1179 			if (error == ERESTART) {
1180 				/* A retry was scheduled, so just return. */
1181 				return;
1182 			}
1183 			if (error != 0) {
1184 				if (error == ENXIO) {
1185 					/*
1186 					 * Catastrophic error.  Mark our pack as
1187 					 * invalid.
1188 					 */
1189 					/*
1190 					 * XXX See if this is really a media
1191 					 * XXX change first?
1192 					 */
1193 					xpt_print(periph->path,
1194 					    "Invalidating pack\n");
1195 					softc->flags |= ADA_FLAG_PACK_INVALID;
1196 				}
1197 				bp->bio_error = error;
1198 				bp->bio_resid = bp->bio_bcount;
1199 				bp->bio_flags |= BIO_ERROR;
1200 			} else {
1201 				bp->bio_resid = ataio->resid;
1202 				bp->bio_error = 0;
1203 				if (bp->bio_resid != 0)
1204 					bp->bio_flags |= BIO_ERROR;
1205 			}
1206 			if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
1207 				cam_release_devq(done_ccb->ccb_h.path,
1208 						 /*relsim_flags*/0,
1209 						 /*reduction*/0,
1210 						 /*timeout*/0,
1211 						 /*getcount_only*/0);
1212 		} else {
1213 			if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
1214 				panic("REQ_CMP with QFRZN");
1215 			bp->bio_resid = ataio->resid;
1216 			if (ataio->resid > 0)
1217 				bp->bio_flags |= BIO_ERROR;
1218 		}
1219 		softc->outstanding_cmds--;
1220 		if (softc->outstanding_cmds == 0)
1221 			softc->flags |= ADA_FLAG_WENT_IDLE;
1222 		if ((ataio->ccb_h.ccb_state & ADA_CCB_TYPE_MASK) ==
1223 		    ADA_CCB_TRIM) {
1224 			struct trim_request *req =
1225 			    (struct trim_request *)ataio->data_ptr;
1226 			int i;
1227 
1228 			for (i = 1; i < softc->trim_max_ranges &&
1229 			    req->bps[i]; i++) {
1230 				struct bio *bp1 = req->bps[i];
1231 
1232 				bp1->bio_resid = bp->bio_resid;
1233 				bp1->bio_error = bp->bio_error;
1234 				if (bp->bio_flags & BIO_ERROR)
1235 					bp1->bio_flags |= BIO_ERROR;
1236 				biodone(bp1);
1237 			}
1238 			softc->trim_running = 0;
1239 			biodone(bp);
1240 			adaschedule(periph);
1241 		} else
1242 			biodone(bp);
1243 		break;
1244 	}
1245 	case ADA_CCB_WCACHE:
1246 	{
1247 		if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
1248 			if (adaerror(done_ccb, 0, 0) == ERESTART) {
1249 				return;
1250 			} else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
1251 				cam_release_devq(done_ccb->ccb_h.path,
1252 				    /*relsim_flags*/0,
1253 				    /*reduction*/0,
1254 				    /*timeout*/0,
1255 				    /*getcount_only*/0);
1256 			}
1257 		}
1258 
1259 		softc->state = ADA_STATE_NORMAL;
1260 		/*
1261 		 * Since our peripheral may be invalidated by an error
1262 		 * above or an external event, we must release our CCB
1263 		 * before releasing the reference on the peripheral.
1264 		 * The peripheral will only go away once the last reference
1265 		 * is removed, and we need it around for the CCB release
1266 		 * operation.
1267 		 */
1268 		xpt_release_ccb(done_ccb);
1269 		cam_release_devq(periph->path,
1270 		    RELSIM_RELEASE_RUNLEVEL, 0, CAM_RL_DEV + 1, FALSE);
1271 		adaschedule(periph);
1272 		cam_periph_release_locked(periph);
1273 		return;
1274 	}
1275 	case ADA_CCB_WAITING:
1276 	{
1277 		/* Caller will release the CCB */
1278 		wakeup(&done_ccb->ccb_h.cbfcnp);
1279 		return;
1280 	}
1281 	case ADA_CCB_DUMP:
1282 		/* No-op.  We're polling */
1283 		return;
1284 	default:
1285 		break;
1286 	}
1287 	xpt_release_ccb(done_ccb);
1288 }
1289 
1290 static int
1291 adaerror(union ccb *ccb, u_int32_t cam_flags, u_int32_t sense_flags)
1292 {
1293 	struct ada_softc	  *softc;
1294 	struct cam_periph *periph;
1295 
1296 	periph = xpt_path_periph(ccb->ccb_h.path);
1297 	softc = (struct ada_softc *)periph->softc;
1298 
1299 	return(cam_periph_error(ccb, cam_flags, sense_flags, NULL));
1300 }
1301 
1302 static void
1303 adagetparams(struct cam_periph *periph, struct ccb_getdev *cgd)
1304 {
1305 	struct ada_softc *softc = (struct ada_softc *)periph->softc;
1306 	struct disk_params *dp = &softc->params;
1307 	u_int64_t lbasize48;
1308 	u_int32_t lbasize;
1309 
1310 	dp->secsize = ata_logical_sector_size(&cgd->ident_data);
1311 	if ((cgd->ident_data.atavalid & ATA_FLAG_54_58) &&
1312 		cgd->ident_data.current_heads && cgd->ident_data.current_sectors) {
1313 		dp->heads = cgd->ident_data.current_heads;
1314 		dp->secs_per_track = cgd->ident_data.current_sectors;
1315 		dp->cylinders = cgd->ident_data.cylinders;
1316 		dp->sectors = (u_int32_t)cgd->ident_data.current_size_1 |
1317 			  ((u_int32_t)cgd->ident_data.current_size_2 << 16);
1318 	} else {
1319 		dp->heads = cgd->ident_data.heads;
1320 		dp->secs_per_track = cgd->ident_data.sectors;
1321 		dp->cylinders = cgd->ident_data.cylinders;
1322 		dp->sectors = cgd->ident_data.cylinders * dp->heads * dp->secs_per_track;
1323 	}
1324 	lbasize = (u_int32_t)cgd->ident_data.lba_size_1 |
1325 		  ((u_int32_t)cgd->ident_data.lba_size_2 << 16);
1326 
1327 	/* use the 28bit LBA size if valid or bigger than the CHS mapping */
1328 	if (cgd->ident_data.cylinders == 16383 || dp->sectors < lbasize)
1329 		dp->sectors = lbasize;
1330 
1331 	/* use the 48bit LBA size if valid */
1332 	lbasize48 = ((u_int64_t)cgd->ident_data.lba_size48_1) |
1333 		    ((u_int64_t)cgd->ident_data.lba_size48_2 << 16) |
1334 		    ((u_int64_t)cgd->ident_data.lba_size48_3 << 32) |
1335 		    ((u_int64_t)cgd->ident_data.lba_size48_4 << 48);
1336 	if ((cgd->ident_data.support.command2 & ATA_SUPPORT_ADDRESS48) &&
1337 	    lbasize48 > ATA_MAX_28BIT_LBA)
1338 		dp->sectors = lbasize48;
1339 }
1340 
1341 static void
1342 adasendorderedtag(void *arg)
1343 {
1344 	struct ada_softc *softc = arg;
1345 
1346 	if (ada_send_ordered) {
1347 		if ((softc->ordered_tag_count == 0)
1348 		 && ((softc->flags & ADA_FLAG_WENT_IDLE) == 0)) {
1349 			softc->flags |= ADA_FLAG_NEED_OTAG;
1350 		}
1351 		if (softc->outstanding_cmds > 0)
1352 			softc->flags &= ~ADA_FLAG_WENT_IDLE;
1353 
1354 		softc->ordered_tag_count = 0;
1355 	}
1356 	/* Queue us up again */
1357 	callout_reset(&softc->sendordered_c,
1358 	    (ADA_DEFAULT_TIMEOUT * hz) / ADA_ORDEREDTAG_INTERVAL,
1359 	    adasendorderedtag, softc);
1360 }
1361 
1362 /*
1363  * Step through all ADA peripheral drivers, and if the device is still open,
1364  * sync the disk cache to physical media.
1365  */
1366 static void
1367 adashutdown(void * arg, int howto)
1368 {
1369 	struct cam_periph *periph;
1370 	struct ada_softc *softc;
1371 
1372 	TAILQ_FOREACH(periph, &adadriver.units, unit_links) {
1373 		union ccb ccb;
1374 
1375 		/* If we paniced with lock held - not recurse here. */
1376 		if (cam_periph_owned(periph))
1377 			continue;
1378 		cam_periph_lock(periph);
1379 		softc = (struct ada_softc *)periph->softc;
1380 		/*
1381 		 * We only sync the cache if the drive is still open, and
1382 		 * if the drive is capable of it..
1383 		 */
1384 		if (((softc->flags & ADA_FLAG_OPEN) == 0) ||
1385 		    (softc->flags & ADA_FLAG_CAN_FLUSHCACHE) == 0) {
1386 			cam_periph_unlock(periph);
1387 			continue;
1388 		}
1389 
1390 		xpt_setup_ccb(&ccb.ccb_h, periph->path, CAM_PRIORITY_NORMAL);
1391 
1392 		ccb.ccb_h.ccb_state = ADA_CCB_DUMP;
1393 		cam_fill_ataio(&ccb.ataio,
1394 				    1,
1395 				    adadone,
1396 				    CAM_DIR_NONE,
1397 				    0,
1398 				    NULL,
1399 				    0,
1400 				    ada_default_timeout*1000);
1401 
1402 		if (softc->flags & ADA_FLAG_CAN_48BIT)
1403 			ata_48bit_cmd(&ccb.ataio, ATA_FLUSHCACHE48, 0, 0, 0);
1404 		else
1405 			ata_28bit_cmd(&ccb.ataio, ATA_FLUSHCACHE, 0, 0, 0);
1406 		xpt_polled_action(&ccb);
1407 
1408 		if ((ccb.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP)
1409 			xpt_print(periph->path, "Synchronize cache failed\n");
1410 
1411 		if ((ccb.ccb_h.status & CAM_DEV_QFRZN) != 0)
1412 			cam_release_devq(ccb.ccb_h.path,
1413 					 /*relsim_flags*/0,
1414 					 /*reduction*/0,
1415 					 /*timeout*/0,
1416 					 /*getcount_only*/0);
1417 		cam_periph_unlock(periph);
1418 	}
1419 
1420 	if (ada_spindown_shutdown == 0 ||
1421 	    (howto & (RB_HALT | RB_POWEROFF)) == 0)
1422 		return;
1423 
1424 	TAILQ_FOREACH(periph, &adadriver.units, unit_links) {
1425 		union ccb ccb;
1426 
1427 		/* If we paniced with lock held - not recurse here. */
1428 		if (cam_periph_owned(periph))
1429 			continue;
1430 		cam_periph_lock(periph);
1431 		softc = (struct ada_softc *)periph->softc;
1432 		/*
1433 		 * We only spin-down the drive if it is capable of it..
1434 		 */
1435 		if ((softc->flags & ADA_FLAG_CAN_POWERMGT) == 0) {
1436 			cam_periph_unlock(periph);
1437 			continue;
1438 		}
1439 
1440 		if (bootverbose)
1441 			xpt_print(periph->path, "spin-down\n");
1442 
1443 		xpt_setup_ccb(&ccb.ccb_h, periph->path, CAM_PRIORITY_NORMAL);
1444 
1445 		ccb.ccb_h.ccb_state = ADA_CCB_DUMP;
1446 		cam_fill_ataio(&ccb.ataio,
1447 				    1,
1448 				    adadone,
1449 				    CAM_DIR_NONE,
1450 				    0,
1451 				    NULL,
1452 				    0,
1453 				    ada_default_timeout*1000);
1454 
1455 		ata_28bit_cmd(&ccb.ataio, ATA_STANDBY_IMMEDIATE, 0, 0, 0);
1456 		xpt_polled_action(&ccb);
1457 
1458 		if ((ccb.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP)
1459 			xpt_print(periph->path, "Spin-down disk failed\n");
1460 
1461 		if ((ccb.ccb_h.status & CAM_DEV_QFRZN) != 0)
1462 			cam_release_devq(ccb.ccb_h.path,
1463 					 /*relsim_flags*/0,
1464 					 /*reduction*/0,
1465 					 /*timeout*/0,
1466 					 /*getcount_only*/0);
1467 		cam_periph_unlock(periph);
1468 	}
1469 }
1470 
1471 #endif /* _KERNEL */
1472