xref: /freebsd/sys/cam/ata/ata_da.c (revision 7a1c0d963366a31363d3705697a083dd8efee077)
1 /*-
2  * Copyright (c) 2009 Alexander Motin <mav@FreeBSD.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer,
10  *    without modification, immediately at the beginning of the file.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  */
26 
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
29 
30 #include "opt_ada.h"
31 
32 #include <sys/param.h>
33 
34 #ifdef _KERNEL
35 #include <sys/systm.h>
36 #include <sys/kernel.h>
37 #include <sys/bio.h>
38 #include <sys/sysctl.h>
39 #include <sys/taskqueue.h>
40 #include <sys/lock.h>
41 #include <sys/mutex.h>
42 #include <sys/conf.h>
43 #include <sys/devicestat.h>
44 #include <sys/eventhandler.h>
45 #include <sys/malloc.h>
46 #include <sys/cons.h>
47 #include <sys/reboot.h>
48 #include <geom/geom_disk.h>
49 #endif /* _KERNEL */
50 
51 #ifndef _KERNEL
52 #include <stdio.h>
53 #include <string.h>
54 #endif /* _KERNEL */
55 
56 #include <cam/cam.h>
57 #include <cam/cam_ccb.h>
58 #include <cam/cam_periph.h>
59 #include <cam/cam_xpt_periph.h>
60 #include <cam/cam_sim.h>
61 
62 #include <cam/ata/ata_all.h>
63 
64 #include <machine/md_var.h>	/* geometry translation */
65 
66 #ifdef _KERNEL
67 
68 #define ATA_MAX_28BIT_LBA               268435455UL
69 
70 typedef enum {
71 	ADA_STATE_WCACHE,
72 	ADA_STATE_NORMAL
73 } ada_state;
74 
75 typedef enum {
76 	ADA_FLAG_PACK_INVALID	= 0x001,
77 	ADA_FLAG_CAN_48BIT	= 0x002,
78 	ADA_FLAG_CAN_FLUSHCACHE	= 0x004,
79 	ADA_FLAG_CAN_NCQ	= 0x008,
80 	ADA_FLAG_CAN_DMA	= 0x010,
81 	ADA_FLAG_NEED_OTAG	= 0x020,
82 	ADA_FLAG_WENT_IDLE	= 0x040,
83 	ADA_FLAG_CAN_TRIM	= 0x080,
84 	ADA_FLAG_OPEN		= 0x100,
85 	ADA_FLAG_SCTX_INIT	= 0x200,
86 	ADA_FLAG_CAN_CFA        = 0x400,
87 	ADA_FLAG_CAN_POWERMGT   = 0x800
88 } ada_flags;
89 
90 typedef enum {
91 	ADA_Q_NONE		= 0x00
92 } ada_quirks;
93 
94 typedef enum {
95 	ADA_CCB_WCACHE		= 0x01,
96 	ADA_CCB_BUFFER_IO	= 0x03,
97 	ADA_CCB_WAITING		= 0x04,
98 	ADA_CCB_DUMP		= 0x05,
99 	ADA_CCB_TRIM		= 0x06,
100 	ADA_CCB_TYPE_MASK	= 0x0F,
101 } ada_ccb_state;
102 
103 /* Offsets into our private area for storing information */
104 #define ccb_state	ppriv_field0
105 #define ccb_bp		ppriv_ptr1
106 
107 struct disk_params {
108 	u_int8_t  heads;
109 	u_int8_t  secs_per_track;
110 	u_int32_t cylinders;
111 	u_int32_t secsize;	/* Number of bytes/logical sector */
112 	u_int64_t sectors;	/* Total number sectors */
113 };
114 
115 #define TRIM_MAX_BLOCKS	4
116 #define TRIM_MAX_RANGES	TRIM_MAX_BLOCKS * 64
117 struct trim_request {
118 	uint8_t		data[TRIM_MAX_RANGES * 8];
119 	struct bio	*bps[TRIM_MAX_RANGES];
120 };
121 
122 struct ada_softc {
123 	struct	 bio_queue_head bio_queue;
124 	struct	 bio_queue_head trim_queue;
125 	ada_state state;
126 	ada_flags flags;
127 	ada_quirks quirks;
128 	int	 ordered_tag_count;
129 	int	 outstanding_cmds;
130 	int	 trim_max_ranges;
131 	int	 trim_running;
132 	int	 write_cache;
133 #ifdef ADA_TEST_FAILURE
134 	int      force_read_error;
135 	int      force_write_error;
136 	int      periodic_read_error;
137 	int      periodic_read_count;
138 #endif
139 	struct	 disk_params params;
140 	struct	 disk *disk;
141 	struct task		sysctl_task;
142 	struct sysctl_ctx_list	sysctl_ctx;
143 	struct sysctl_oid	*sysctl_tree;
144 	struct callout		sendordered_c;
145 	struct trim_request	trim_req;
146 };
147 
148 struct ada_quirk_entry {
149 	struct scsi_inquiry_pattern inq_pat;
150 	ada_quirks quirks;
151 };
152 
153 static struct ada_quirk_entry ada_quirk_table[] =
154 {
155 	{
156 		/* Default */
157 		{
158 		  T_ANY, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED,
159 		  /*vendor*/"*", /*product*/"*", /*revision*/"*"
160 		},
161 		/*quirks*/0
162 	},
163 };
164 
165 static	disk_strategy_t	adastrategy;
166 static	dumper_t	adadump;
167 static	periph_init_t	adainit;
168 static	void		adaasync(void *callback_arg, u_int32_t code,
169 				struct cam_path *path, void *arg);
170 static	void		adasysctlinit(void *context, int pending);
171 static	periph_ctor_t	adaregister;
172 static	periph_dtor_t	adacleanup;
173 static	periph_start_t	adastart;
174 static	periph_oninv_t	adaoninvalidate;
175 static	void		adadone(struct cam_periph *periph,
176 			       union ccb *done_ccb);
177 static  int		adaerror(union ccb *ccb, u_int32_t cam_flags,
178 				u_int32_t sense_flags);
179 static void		adagetparams(struct cam_periph *periph,
180 				struct ccb_getdev *cgd);
181 static timeout_t	adasendorderedtag;
182 static void		adashutdown(void *arg, int howto);
183 static void		adasuspend(void *arg);
184 static void		adaresume(void *arg);
185 
186 #ifndef ADA_DEFAULT_TIMEOUT
187 #define ADA_DEFAULT_TIMEOUT 30	/* Timeout in seconds */
188 #endif
189 
190 #ifndef	ADA_DEFAULT_RETRY
191 #define	ADA_DEFAULT_RETRY	4
192 #endif
193 
194 #ifndef	ADA_DEFAULT_SEND_ORDERED
195 #define	ADA_DEFAULT_SEND_ORDERED	1
196 #endif
197 
198 #ifndef	ADA_DEFAULT_SPINDOWN_SHUTDOWN
199 #define	ADA_DEFAULT_SPINDOWN_SHUTDOWN	1
200 #endif
201 
202 #ifndef	ADA_DEFAULT_SPINDOWN_SUSPEND
203 #define	ADA_DEFAULT_SPINDOWN_SUSPEND	1
204 #endif
205 
206 #ifndef	ADA_DEFAULT_WRITE_CACHE
207 #define	ADA_DEFAULT_WRITE_CACHE	1
208 #endif
209 
210 /*
211  * Most platforms map firmware geometry to actual, but some don't.  If
212  * not overridden, default to nothing.
213  */
214 #ifndef ata_disk_firmware_geom_adjust
215 #define	ata_disk_firmware_geom_adjust(disk)
216 #endif
217 
218 static int ada_retry_count = ADA_DEFAULT_RETRY;
219 static int ada_default_timeout = ADA_DEFAULT_TIMEOUT;
220 static int ada_send_ordered = ADA_DEFAULT_SEND_ORDERED;
221 static int ada_spindown_shutdown = ADA_DEFAULT_SPINDOWN_SHUTDOWN;
222 static int ada_spindown_suspend = ADA_DEFAULT_SPINDOWN_SUSPEND;
223 static int ada_write_cache = ADA_DEFAULT_WRITE_CACHE;
224 
225 SYSCTL_NODE(_kern_cam, OID_AUTO, ada, CTLFLAG_RD, 0,
226             "CAM Direct Access Disk driver");
227 SYSCTL_INT(_kern_cam_ada, OID_AUTO, retry_count, CTLFLAG_RW,
228            &ada_retry_count, 0, "Normal I/O retry count");
229 TUNABLE_INT("kern.cam.ada.retry_count", &ada_retry_count);
230 SYSCTL_INT(_kern_cam_ada, OID_AUTO, default_timeout, CTLFLAG_RW,
231            &ada_default_timeout, 0, "Normal I/O timeout (in seconds)");
232 TUNABLE_INT("kern.cam.ada.default_timeout", &ada_default_timeout);
233 SYSCTL_INT(_kern_cam_ada, OID_AUTO, ada_send_ordered, CTLFLAG_RW,
234            &ada_send_ordered, 0, "Send Ordered Tags");
235 TUNABLE_INT("kern.cam.ada.ada_send_ordered", &ada_send_ordered);
236 SYSCTL_INT(_kern_cam_ada, OID_AUTO, spindown_shutdown, CTLFLAG_RW,
237            &ada_spindown_shutdown, 0, "Spin down upon shutdown");
238 TUNABLE_INT("kern.cam.ada.spindown_shutdown", &ada_spindown_shutdown);
239 SYSCTL_INT(_kern_cam_ada, OID_AUTO, spindown_suspend, CTLFLAG_RW,
240            &ada_spindown_suspend, 0, "Spin down upon suspend");
241 TUNABLE_INT("kern.cam.ada.spindown_suspend", &ada_spindown_suspend);
242 SYSCTL_INT(_kern_cam_ada, OID_AUTO, write_cache, CTLFLAG_RW,
243            &ada_write_cache, 0, "Enable disk write cache");
244 TUNABLE_INT("kern.cam.ada.write_cache", &ada_write_cache);
245 
246 /*
247  * ADA_ORDEREDTAG_INTERVAL determines how often, relative
248  * to the default timeout, we check to see whether an ordered
249  * tagged transaction is appropriate to prevent simple tag
250  * starvation.  Since we'd like to ensure that there is at least
251  * 1/2 of the timeout length left for a starved transaction to
252  * complete after we've sent an ordered tag, we must poll at least
253  * four times in every timeout period.  This takes care of the worst
254  * case where a starved transaction starts during an interval that
255  * meets the requirement "don't send an ordered tag" test so it takes
256  * us two intervals to determine that a tag must be sent.
257  */
258 #ifndef ADA_ORDEREDTAG_INTERVAL
259 #define ADA_ORDEREDTAG_INTERVAL 4
260 #endif
261 
262 static struct periph_driver adadriver =
263 {
264 	adainit, "ada",
265 	TAILQ_HEAD_INITIALIZER(adadriver.units), /* generation */ 0
266 };
267 
268 PERIPHDRIVER_DECLARE(ada, adadriver);
269 
270 MALLOC_DEFINE(M_ATADA, "ata_da", "ata_da buffers");
271 
272 static int
273 adaopen(struct disk *dp)
274 {
275 	struct cam_periph *periph;
276 	struct ada_softc *softc;
277 	int unit;
278 	int error;
279 
280 	periph = (struct cam_periph *)dp->d_drv1;
281 	if (periph == NULL) {
282 		return (ENXIO);
283 	}
284 
285 	if (cam_periph_acquire(periph) != CAM_REQ_CMP) {
286 		return(ENXIO);
287 	}
288 
289 	cam_periph_lock(periph);
290 	if ((error = cam_periph_hold(periph, PRIBIO|PCATCH)) != 0) {
291 		cam_periph_unlock(periph);
292 		cam_periph_release(periph);
293 		return (error);
294 	}
295 
296 	unit = periph->unit_number;
297 	softc = (struct ada_softc *)periph->softc;
298 	softc->flags |= ADA_FLAG_OPEN;
299 
300 	CAM_DEBUG(periph->path, CAM_DEBUG_TRACE,
301 	    ("adaopen: disk=%s%d (unit %d)\n", dp->d_name, dp->d_unit,
302 	     unit));
303 
304 	if ((softc->flags & ADA_FLAG_PACK_INVALID) != 0) {
305 		/* Invalidate our pack information. */
306 		softc->flags &= ~ADA_FLAG_PACK_INVALID;
307 	}
308 
309 	cam_periph_unhold(periph);
310 	cam_periph_unlock(periph);
311 	return (0);
312 }
313 
314 static int
315 adaclose(struct disk *dp)
316 {
317 	struct	cam_periph *periph;
318 	struct	ada_softc *softc;
319 	union ccb *ccb;
320 	int error;
321 
322 	periph = (struct cam_periph *)dp->d_drv1;
323 	if (periph == NULL)
324 		return (ENXIO);
325 
326 	cam_periph_lock(periph);
327 	if ((error = cam_periph_hold(periph, PRIBIO)) != 0) {
328 		cam_periph_unlock(periph);
329 		cam_periph_release(periph);
330 		return (error);
331 	}
332 
333 	softc = (struct ada_softc *)periph->softc;
334 	/* We only sync the cache if the drive is capable of it. */
335 	if (softc->flags & ADA_FLAG_CAN_FLUSHCACHE) {
336 
337 		ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL);
338 		cam_fill_ataio(&ccb->ataio,
339 				    1,
340 				    adadone,
341 				    CAM_DIR_NONE,
342 				    0,
343 				    NULL,
344 				    0,
345 				    ada_default_timeout*1000);
346 
347 		if (softc->flags & ADA_FLAG_CAN_48BIT)
348 			ata_48bit_cmd(&ccb->ataio, ATA_FLUSHCACHE48, 0, 0, 0);
349 		else
350 			ata_28bit_cmd(&ccb->ataio, ATA_FLUSHCACHE, 0, 0, 0);
351 		cam_periph_runccb(ccb, /*error_routine*/NULL, /*cam_flags*/0,
352 		    /*sense_flags*/0, softc->disk->d_devstat);
353 
354 		if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP)
355 			xpt_print(periph->path, "Synchronize cache failed\n");
356 
357 		if ((ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
358 			cam_release_devq(ccb->ccb_h.path,
359 					 /*relsim_flags*/0,
360 					 /*reduction*/0,
361 					 /*timeout*/0,
362 					 /*getcount_only*/0);
363 		xpt_release_ccb(ccb);
364 	}
365 
366 	softc->flags &= ~ADA_FLAG_OPEN;
367 	cam_periph_unhold(periph);
368 	cam_periph_unlock(periph);
369 	cam_periph_release(periph);
370 	return (0);
371 }
372 
373 static void
374 adaschedule(struct cam_periph *periph)
375 {
376 	struct ada_softc *softc = (struct ada_softc *)periph->softc;
377 
378 	if (bioq_first(&softc->bio_queue) ||
379 	    (!softc->trim_running && bioq_first(&softc->trim_queue))) {
380 		/* Have more work to do, so ensure we stay scheduled */
381 		xpt_schedule(periph, CAM_PRIORITY_NORMAL);
382 	}
383 }
384 
385 /*
386  * Actually translate the requested transfer into one the physical driver
387  * can understand.  The transfer is described by a buf and will include
388  * only one physical transfer.
389  */
390 static void
391 adastrategy(struct bio *bp)
392 {
393 	struct cam_periph *periph;
394 	struct ada_softc *softc;
395 
396 	periph = (struct cam_periph *)bp->bio_disk->d_drv1;
397 	if (periph == NULL) {
398 		biofinish(bp, NULL, ENXIO);
399 		return;
400 	}
401 	softc = (struct ada_softc *)periph->softc;
402 
403 	cam_periph_lock(periph);
404 
405 	/*
406 	 * If the device has been made invalid, error out
407 	 */
408 	if ((softc->flags & ADA_FLAG_PACK_INVALID)) {
409 		cam_periph_unlock(periph);
410 		biofinish(bp, NULL, ENXIO);
411 		return;
412 	}
413 
414 	/*
415 	 * Place it in the queue of disk activities for this disk
416 	 */
417 	if (bp->bio_cmd == BIO_DELETE &&
418 	    (softc->flags & ADA_FLAG_CAN_TRIM))
419 		bioq_disksort(&softc->trim_queue, bp);
420 	else
421 		bioq_disksort(&softc->bio_queue, bp);
422 
423 	/*
424 	 * Schedule ourselves for performing the work.
425 	 */
426 	adaschedule(periph);
427 	cam_periph_unlock(periph);
428 
429 	return;
430 }
431 
432 static int
433 adadump(void *arg, void *virtual, vm_offset_t physical, off_t offset, size_t length)
434 {
435 	struct	    cam_periph *periph;
436 	struct	    ada_softc *softc;
437 	u_int	    secsize;
438 	union	    ccb ccb;
439 	struct	    disk *dp;
440 	uint64_t    lba;
441 	uint16_t    count;
442 
443 	dp = arg;
444 	periph = dp->d_drv1;
445 	if (periph == NULL)
446 		return (ENXIO);
447 	softc = (struct ada_softc *)periph->softc;
448 	cam_periph_lock(periph);
449 	secsize = softc->params.secsize;
450 	lba = offset / secsize;
451 	count = length / secsize;
452 
453 	if ((softc->flags & ADA_FLAG_PACK_INVALID) != 0) {
454 		cam_periph_unlock(periph);
455 		return (ENXIO);
456 	}
457 
458 	if (length > 0) {
459 		xpt_setup_ccb(&ccb.ccb_h, periph->path, CAM_PRIORITY_NORMAL);
460 		ccb.ccb_h.ccb_state = ADA_CCB_DUMP;
461 		cam_fill_ataio(&ccb.ataio,
462 		    0,
463 		    adadone,
464 		    CAM_DIR_OUT,
465 		    0,
466 		    (u_int8_t *) virtual,
467 		    length,
468 		    ada_default_timeout*1000);
469 		if ((softc->flags & ADA_FLAG_CAN_48BIT) &&
470 		    (lba + count >= ATA_MAX_28BIT_LBA ||
471 		    count >= 256)) {
472 			ata_48bit_cmd(&ccb.ataio, ATA_WRITE_DMA48,
473 			    0, lba, count);
474 		} else {
475 			ata_28bit_cmd(&ccb.ataio, ATA_WRITE_DMA,
476 			    0, lba, count);
477 		}
478 		xpt_polled_action(&ccb);
479 
480 		if ((ccb.ataio.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
481 			printf("Aborting dump due to I/O error.\n");
482 			cam_periph_unlock(periph);
483 			return(EIO);
484 		}
485 		cam_periph_unlock(periph);
486 		return(0);
487 	}
488 
489 	if (softc->flags & ADA_FLAG_CAN_FLUSHCACHE) {
490 		xpt_setup_ccb(&ccb.ccb_h, periph->path, CAM_PRIORITY_NORMAL);
491 
492 		ccb.ccb_h.ccb_state = ADA_CCB_DUMP;
493 		cam_fill_ataio(&ccb.ataio,
494 				    1,
495 				    adadone,
496 				    CAM_DIR_NONE,
497 				    0,
498 				    NULL,
499 				    0,
500 				    ada_default_timeout*1000);
501 
502 		if (softc->flags & ADA_FLAG_CAN_48BIT)
503 			ata_48bit_cmd(&ccb.ataio, ATA_FLUSHCACHE48, 0, 0, 0);
504 		else
505 			ata_28bit_cmd(&ccb.ataio, ATA_FLUSHCACHE, 0, 0, 0);
506 		xpt_polled_action(&ccb);
507 
508 		if ((ccb.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP)
509 			xpt_print(periph->path, "Synchronize cache failed\n");
510 
511 		if ((ccb.ccb_h.status & CAM_DEV_QFRZN) != 0)
512 			cam_release_devq(ccb.ccb_h.path,
513 					 /*relsim_flags*/0,
514 					 /*reduction*/0,
515 					 /*timeout*/0,
516 					 /*getcount_only*/0);
517 	}
518 	cam_periph_unlock(periph);
519 	return (0);
520 }
521 
522 static void
523 adainit(void)
524 {
525 	cam_status status;
526 
527 	/*
528 	 * Install a global async callback.  This callback will
529 	 * receive async callbacks like "new device found".
530 	 */
531 	status = xpt_register_async(AC_FOUND_DEVICE, adaasync, NULL, NULL);
532 
533 	if (status != CAM_REQ_CMP) {
534 		printf("ada: Failed to attach master async callback "
535 		       "due to status 0x%x!\n", status);
536 	} else if (ada_send_ordered) {
537 
538 		/* Register our event handlers */
539 		if ((EVENTHANDLER_REGISTER(power_suspend, adasuspend,
540 					   NULL, EVENTHANDLER_PRI_LAST)) == NULL)
541 		    printf("adainit: power event registration failed!\n");
542 		if ((EVENTHANDLER_REGISTER(power_resume, adaresume,
543 					   NULL, EVENTHANDLER_PRI_LAST)) == NULL)
544 		    printf("adainit: power event registration failed!\n");
545 		if ((EVENTHANDLER_REGISTER(shutdown_post_sync, adashutdown,
546 					   NULL, SHUTDOWN_PRI_DEFAULT)) == NULL)
547 		    printf("adainit: shutdown event registration failed!\n");
548 	}
549 }
550 
551 static void
552 adaoninvalidate(struct cam_periph *periph)
553 {
554 	struct ada_softc *softc;
555 
556 	softc = (struct ada_softc *)periph->softc;
557 
558 	/*
559 	 * De-register any async callbacks.
560 	 */
561 	xpt_register_async(0, adaasync, periph, periph->path);
562 
563 	softc->flags |= ADA_FLAG_PACK_INVALID;
564 
565 	/*
566 	 * Return all queued I/O with ENXIO.
567 	 * XXX Handle any transactions queued to the card
568 	 *     with XPT_ABORT_CCB.
569 	 */
570 	bioq_flush(&softc->bio_queue, NULL, ENXIO);
571 	bioq_flush(&softc->trim_queue, NULL, ENXIO);
572 
573 	disk_gone(softc->disk);
574 	xpt_print(periph->path, "lost device\n");
575 }
576 
577 static void
578 adacleanup(struct cam_periph *periph)
579 {
580 	struct ada_softc *softc;
581 
582 	softc = (struct ada_softc *)periph->softc;
583 
584 	xpt_print(periph->path, "removing device entry\n");
585 	cam_periph_unlock(periph);
586 
587 	/*
588 	 * If we can't free the sysctl tree, oh well...
589 	 */
590 	if ((softc->flags & ADA_FLAG_SCTX_INIT) != 0
591 	    && sysctl_ctx_free(&softc->sysctl_ctx) != 0) {
592 		xpt_print(periph->path, "can't remove sysctl context\n");
593 	}
594 
595 	disk_destroy(softc->disk);
596 	callout_drain(&softc->sendordered_c);
597 	free(softc, M_DEVBUF);
598 	cam_periph_lock(periph);
599 }
600 
601 static void
602 adaasync(void *callback_arg, u_int32_t code,
603 	struct cam_path *path, void *arg)
604 {
605 	struct cam_periph *periph;
606 	struct ada_softc *softc;
607 
608 	periph = (struct cam_periph *)callback_arg;
609 	switch (code) {
610 	case AC_FOUND_DEVICE:
611 	{
612 		struct ccb_getdev *cgd;
613 		cam_status status;
614 
615 		cgd = (struct ccb_getdev *)arg;
616 		if (cgd == NULL)
617 			break;
618 
619 		if (cgd->protocol != PROTO_ATA)
620 			break;
621 
622 		/*
623 		 * Allocate a peripheral instance for
624 		 * this device and start the probe
625 		 * process.
626 		 */
627 		status = cam_periph_alloc(adaregister, adaoninvalidate,
628 					  adacleanup, adastart,
629 					  "ada", CAM_PERIPH_BIO,
630 					  cgd->ccb_h.path, adaasync,
631 					  AC_FOUND_DEVICE, cgd);
632 
633 		if (status != CAM_REQ_CMP
634 		 && status != CAM_REQ_INPROG)
635 			printf("adaasync: Unable to attach to new device "
636 				"due to status 0x%x\n", status);
637 		break;
638 	}
639 	case AC_SENT_BDR:
640 	case AC_BUS_RESET:
641 	{
642 		struct ccb_getdev cgd;
643 
644 		softc = (struct ada_softc *)periph->softc;
645 		cam_periph_async(periph, code, path, arg);
646 		if (ada_write_cache < 0 && softc->write_cache < 0)
647 			break;
648 		if (softc->state != ADA_STATE_NORMAL)
649 			break;
650 		xpt_setup_ccb(&cgd.ccb_h, periph->path, CAM_PRIORITY_NORMAL);
651 		cgd.ccb_h.func_code = XPT_GDEV_TYPE;
652 		xpt_action((union ccb *)&cgd);
653 		if ((cgd.ident_data.support.command1 & ATA_SUPPORT_WRITECACHE) == 0)
654 			break;
655 		softc->state = ADA_STATE_WCACHE;
656 		cam_periph_acquire(periph);
657 		cam_freeze_devq_arg(periph->path,
658 		    RELSIM_RELEASE_RUNLEVEL, CAM_RL_DEV + 1);
659 		xpt_schedule(periph, CAM_PRIORITY_DEV);
660 	}
661 	default:
662 		cam_periph_async(periph, code, path, arg);
663 		break;
664 	}
665 }
666 
667 static void
668 adasysctlinit(void *context, int pending)
669 {
670 	struct cam_periph *periph;
671 	struct ada_softc *softc;
672 	char tmpstr[80], tmpstr2[80];
673 
674 	periph = (struct cam_periph *)context;
675 
676 	/* periph was held for us when this task was enqueued */
677 	if (periph->flags & CAM_PERIPH_INVALID) {
678 		cam_periph_release(periph);
679 		return;
680 	}
681 
682 	softc = (struct ada_softc *)periph->softc;
683 	snprintf(tmpstr, sizeof(tmpstr), "CAM ADA unit %d", periph->unit_number);
684 	snprintf(tmpstr2, sizeof(tmpstr2), "%d", periph->unit_number);
685 
686 	sysctl_ctx_init(&softc->sysctl_ctx);
687 	softc->flags |= ADA_FLAG_SCTX_INIT;
688 	softc->sysctl_tree = SYSCTL_ADD_NODE(&softc->sysctl_ctx,
689 		SYSCTL_STATIC_CHILDREN(_kern_cam_ada), OID_AUTO, tmpstr2,
690 		CTLFLAG_RD, 0, tmpstr);
691 	if (softc->sysctl_tree == NULL) {
692 		printf("adasysctlinit: unable to allocate sysctl tree\n");
693 		cam_periph_release(periph);
694 		return;
695 	}
696 
697 	SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
698 		OID_AUTO, "write_cache", CTLFLAG_RW | CTLFLAG_MPSAFE,
699 		&softc->write_cache, 0, "Enable disk write cache.");
700 #ifdef ADA_TEST_FAILURE
701 	/*
702 	 * Add a 'door bell' sysctl which allows one to set it from userland
703 	 * and cause something bad to happen.  For the moment, we only allow
704 	 * whacking the next read or write.
705 	 */
706 	SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
707 		OID_AUTO, "force_read_error", CTLFLAG_RW | CTLFLAG_MPSAFE,
708 		&softc->force_read_error, 0,
709 		"Force a read error for the next N reads.");
710 	SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
711 		OID_AUTO, "force_write_error", CTLFLAG_RW | CTLFLAG_MPSAFE,
712 		&softc->force_write_error, 0,
713 		"Force a write error for the next N writes.");
714 	SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
715 		OID_AUTO, "periodic_read_error", CTLFLAG_RW | CTLFLAG_MPSAFE,
716 		&softc->periodic_read_error, 0,
717 		"Force a read error every N reads (don't set too low).");
718 #endif
719 	cam_periph_release(periph);
720 }
721 
722 static cam_status
723 adaregister(struct cam_periph *periph, void *arg)
724 {
725 	struct ada_softc *softc;
726 	struct ccb_pathinq cpi;
727 	struct ccb_getdev *cgd;
728 	char   announce_buf[80];
729 	struct disk_params *dp;
730 	caddr_t match;
731 	u_int maxio;
732 
733 	cgd = (struct ccb_getdev *)arg;
734 	if (periph == NULL) {
735 		printf("adaregister: periph was NULL!!\n");
736 		return(CAM_REQ_CMP_ERR);
737 	}
738 
739 	if (cgd == NULL) {
740 		printf("adaregister: no getdev CCB, can't register device\n");
741 		return(CAM_REQ_CMP_ERR);
742 	}
743 
744 	softc = (struct ada_softc *)malloc(sizeof(*softc), M_DEVBUF,
745 	    M_NOWAIT|M_ZERO);
746 
747 	if (softc == NULL) {
748 		printf("adaregister: Unable to probe new device. "
749 		    "Unable to allocate softc\n");
750 		return(CAM_REQ_CMP_ERR);
751 	}
752 
753 	bioq_init(&softc->bio_queue);
754 	bioq_init(&softc->trim_queue);
755 
756 	if (cgd->ident_data.capabilities1 & ATA_SUPPORT_DMA)
757 		softc->flags |= ADA_FLAG_CAN_DMA;
758 	if (cgd->ident_data.support.command2 & ATA_SUPPORT_ADDRESS48)
759 		softc->flags |= ADA_FLAG_CAN_48BIT;
760 	if (cgd->ident_data.support.command2 & ATA_SUPPORT_FLUSHCACHE)
761 		softc->flags |= ADA_FLAG_CAN_FLUSHCACHE;
762 	if (cgd->ident_data.support.command1 & ATA_SUPPORT_POWERMGT)
763 		softc->flags |= ADA_FLAG_CAN_POWERMGT;
764 	if (cgd->ident_data.satacapabilities & ATA_SUPPORT_NCQ &&
765 	    cgd->inq_flags & SID_CmdQue)
766 		softc->flags |= ADA_FLAG_CAN_NCQ;
767 	if (cgd->ident_data.support_dsm & ATA_SUPPORT_DSM_TRIM) {
768 		softc->flags |= ADA_FLAG_CAN_TRIM;
769 		softc->trim_max_ranges = TRIM_MAX_RANGES;
770 		if (cgd->ident_data.max_dsm_blocks != 0) {
771 			softc->trim_max_ranges =
772 			    min(cgd->ident_data.max_dsm_blocks * 64,
773 				softc->trim_max_ranges);
774 		}
775 	}
776 	if (cgd->ident_data.support.command2 & ATA_SUPPORT_CFA)
777 		softc->flags |= ADA_FLAG_CAN_CFA;
778 
779 	periph->softc = softc;
780 
781 	/*
782 	 * See if this device has any quirks.
783 	 */
784 	match = cam_quirkmatch((caddr_t)&cgd->ident_data,
785 			       (caddr_t)ada_quirk_table,
786 			       sizeof(ada_quirk_table)/sizeof(*ada_quirk_table),
787 			       sizeof(*ada_quirk_table), ata_identify_match);
788 	if (match != NULL)
789 		softc->quirks = ((struct ada_quirk_entry *)match)->quirks;
790 	else
791 		softc->quirks = ADA_Q_NONE;
792 
793 	bzero(&cpi, sizeof(cpi));
794 	xpt_setup_ccb(&cpi.ccb_h, periph->path, CAM_PRIORITY_NONE);
795 	cpi.ccb_h.func_code = XPT_PATH_INQ;
796 	xpt_action((union ccb *)&cpi);
797 
798 	TASK_INIT(&softc->sysctl_task, 0, adasysctlinit, periph);
799 
800 	/*
801 	 * Register this media as a disk
802 	 */
803 	(void)cam_periph_hold(periph, PRIBIO);
804 	mtx_unlock(periph->sim->mtx);
805 	softc->write_cache = -1;
806 	snprintf(announce_buf, sizeof(announce_buf),
807 	    "kern.cam.ada.%d.write_cache", periph->unit_number);
808 	TUNABLE_INT_FETCH(announce_buf, &softc->write_cache);
809 	adagetparams(periph, cgd);
810 	softc->disk = disk_alloc();
811 	softc->disk->d_devstat = devstat_new_entry(periph->periph_name,
812 			  periph->unit_number, softc->params.secsize,
813 			  DEVSTAT_ALL_SUPPORTED,
814 			  DEVSTAT_TYPE_DIRECT |
815 			  XPORT_DEVSTAT_TYPE(cpi.transport),
816 			  DEVSTAT_PRIORITY_DISK);
817 	softc->disk->d_open = adaopen;
818 	softc->disk->d_close = adaclose;
819 	softc->disk->d_strategy = adastrategy;
820 	softc->disk->d_dump = adadump;
821 	softc->disk->d_name = "ada";
822 	softc->disk->d_drv1 = periph;
823 	maxio = cpi.maxio;		/* Honor max I/O size of SIM */
824 	if (maxio == 0)
825 		maxio = DFLTPHYS;	/* traditional default */
826 	else if (maxio > MAXPHYS)
827 		maxio = MAXPHYS;	/* for safety */
828 	if (softc->flags & ADA_FLAG_CAN_48BIT)
829 		maxio = min(maxio, 65536 * softc->params.secsize);
830 	else					/* 28bit ATA command limit */
831 		maxio = min(maxio, 256 * softc->params.secsize);
832 	softc->disk->d_maxsize = maxio;
833 	softc->disk->d_unit = periph->unit_number;
834 	softc->disk->d_flags = 0;
835 	if (softc->flags & ADA_FLAG_CAN_FLUSHCACHE)
836 		softc->disk->d_flags |= DISKFLAG_CANFLUSHCACHE;
837 	if ((softc->flags & ADA_FLAG_CAN_TRIM) ||
838 	    ((softc->flags & ADA_FLAG_CAN_CFA) &&
839 	    !(softc->flags & ADA_FLAG_CAN_48BIT)))
840 		softc->disk->d_flags |= DISKFLAG_CANDELETE;
841 	strlcpy(softc->disk->d_ident, cgd->serial_num,
842 	    MIN(sizeof(softc->disk->d_ident), cgd->serial_num_len + 1));
843 	strlcpy(softc->disk->d_descr, cgd->ident_data.model,
844 	    MIN(sizeof(softc->disk->d_descr), sizeof(cgd->ident_data.model)));
845 	softc->disk->d_hba_vendor = cpi.hba_vendor;
846 	softc->disk->d_hba_device = cpi.hba_device;
847 	softc->disk->d_hba_subvendor = cpi.hba_subvendor;
848 	softc->disk->d_hba_subdevice = cpi.hba_subdevice;
849 
850 	softc->disk->d_sectorsize = softc->params.secsize;
851 	softc->disk->d_mediasize = (off_t)softc->params.sectors *
852 	    softc->params.secsize;
853 	if (ata_physical_sector_size(&cgd->ident_data) !=
854 	    softc->params.secsize) {
855 		softc->disk->d_stripesize =
856 		    ata_physical_sector_size(&cgd->ident_data);
857 		softc->disk->d_stripeoffset = (softc->disk->d_stripesize -
858 		    ata_logical_sector_offset(&cgd->ident_data)) %
859 		    softc->disk->d_stripesize;
860 	}
861 	softc->disk->d_fwsectors = softc->params.secs_per_track;
862 	softc->disk->d_fwheads = softc->params.heads;
863 	ata_disk_firmware_geom_adjust(softc->disk);
864 
865 	disk_create(softc->disk, DISK_VERSION);
866 	mtx_lock(periph->sim->mtx);
867 	cam_periph_unhold(periph);
868 
869 	dp = &softc->params;
870 	snprintf(announce_buf, sizeof(announce_buf),
871 		"%juMB (%ju %u byte sectors: %dH %dS/T %dC)",
872 		(uintmax_t)(((uintmax_t)dp->secsize *
873 		dp->sectors) / (1024*1024)),
874 		(uintmax_t)dp->sectors,
875 		dp->secsize, dp->heads,
876 		dp->secs_per_track, dp->cylinders);
877 	xpt_announce_periph(periph, announce_buf);
878 
879 	/*
880 	 * Create our sysctl variables, now that we know
881 	 * we have successfully attached.
882 	 */
883 	cam_periph_acquire(periph);
884 	taskqueue_enqueue(taskqueue_thread, &softc->sysctl_task);
885 
886 	/*
887 	 * Add async callbacks for bus reset and
888 	 * bus device reset calls.  I don't bother
889 	 * checking if this fails as, in most cases,
890 	 * the system will function just fine without
891 	 * them and the only alternative would be to
892 	 * not attach the device on failure.
893 	 */
894 	xpt_register_async(AC_SENT_BDR | AC_BUS_RESET | AC_LOST_DEVICE,
895 			   adaasync, periph, periph->path);
896 
897 	/*
898 	 * Schedule a periodic event to occasionally send an
899 	 * ordered tag to a device.
900 	 */
901 	callout_init_mtx(&softc->sendordered_c, periph->sim->mtx, 0);
902 	callout_reset(&softc->sendordered_c,
903 	    (ADA_DEFAULT_TIMEOUT * hz) / ADA_ORDEREDTAG_INTERVAL,
904 	    adasendorderedtag, softc);
905 
906 	if ((ada_write_cache >= 0 || softc->write_cache >= 0) &&
907 	    cgd->ident_data.support.command1 & ATA_SUPPORT_WRITECACHE) {
908 		softc->state = ADA_STATE_WCACHE;
909 		cam_periph_acquire(periph);
910 		cam_freeze_devq_arg(periph->path,
911 		    RELSIM_RELEASE_RUNLEVEL, CAM_RL_DEV + 1);
912 		xpt_schedule(periph, CAM_PRIORITY_DEV);
913 	} else
914 		softc->state = ADA_STATE_NORMAL;
915 
916 	return(CAM_REQ_CMP);
917 }
918 
919 static void
920 adastart(struct cam_periph *periph, union ccb *start_ccb)
921 {
922 	struct ada_softc *softc = (struct ada_softc *)periph->softc;
923 	struct ccb_ataio *ataio = &start_ccb->ataio;
924 
925 	switch (softc->state) {
926 	case ADA_STATE_NORMAL:
927 	{
928 		struct bio *bp;
929 		u_int8_t tag_code;
930 
931 		/* Execute immediate CCB if waiting. */
932 		if (periph->immediate_priority <= periph->pinfo.priority) {
933 			CAM_DEBUG_PRINT(CAM_DEBUG_SUBTRACE,
934 					("queuing for immediate ccb\n"));
935 			start_ccb->ccb_h.ccb_state = ADA_CCB_WAITING;
936 			SLIST_INSERT_HEAD(&periph->ccb_list, &start_ccb->ccb_h,
937 					  periph_links.sle);
938 			periph->immediate_priority = CAM_PRIORITY_NONE;
939 			wakeup(&periph->ccb_list);
940 			/* Have more work to do, so ensure we stay scheduled */
941 			adaschedule(periph);
942 			break;
943 		}
944 		/* Run TRIM if not running yet. */
945 		if (!softc->trim_running &&
946 		    (bp = bioq_first(&softc->trim_queue)) != 0) {
947 			struct trim_request *req = &softc->trim_req;
948 			struct bio *bp1;
949 			int bps = 0, ranges = 0;
950 
951 			softc->trim_running = 1;
952 			bzero(req, sizeof(*req));
953 			bp1 = bp;
954 			do {
955 				uint64_t lba = bp1->bio_pblkno;
956 				int count = bp1->bio_bcount /
957 				    softc->params.secsize;
958 
959 				bioq_remove(&softc->trim_queue, bp1);
960 				while (count > 0) {
961 					int c = min(count, 0xffff);
962 					int off = ranges * 8;
963 
964 					req->data[off + 0] = lba & 0xff;
965 					req->data[off + 1] = (lba >> 8) & 0xff;
966 					req->data[off + 2] = (lba >> 16) & 0xff;
967 					req->data[off + 3] = (lba >> 24) & 0xff;
968 					req->data[off + 4] = (lba >> 32) & 0xff;
969 					req->data[off + 5] = (lba >> 40) & 0xff;
970 					req->data[off + 6] = c & 0xff;
971 					req->data[off + 7] = (c >> 8) & 0xff;
972 					lba += c;
973 					count -= c;
974 					ranges++;
975 				}
976 				req->bps[bps++] = bp1;
977 				bp1 = bioq_first(&softc->trim_queue);
978 				if (bp1 == NULL ||
979 				    bp1->bio_bcount / softc->params.secsize >
980 				    (softc->trim_max_ranges - ranges) * 0xffff)
981 					break;
982 			} while (1);
983 			cam_fill_ataio(ataio,
984 			    ada_retry_count,
985 			    adadone,
986 			    CAM_DIR_OUT,
987 			    0,
988 			    req->data,
989 			    ((ranges + 63) / 64) * 512,
990 			    ada_default_timeout * 1000);
991 			ata_48bit_cmd(ataio, ATA_DATA_SET_MANAGEMENT,
992 			    ATA_DSM_TRIM, 0, (ranges + 63) / 64);
993 			start_ccb->ccb_h.ccb_state = ADA_CCB_TRIM;
994 			goto out;
995 		}
996 		/* Run regular command. */
997 		bp = bioq_first(&softc->bio_queue);
998 		if (bp == NULL) {
999 			xpt_release_ccb(start_ccb);
1000 			break;
1001 		}
1002 		bioq_remove(&softc->bio_queue, bp);
1003 
1004 		if ((bp->bio_flags & BIO_ORDERED) != 0
1005 		 || (softc->flags & ADA_FLAG_NEED_OTAG) != 0) {
1006 			softc->flags &= ~ADA_FLAG_NEED_OTAG;
1007 			softc->ordered_tag_count++;
1008 			tag_code = 0;
1009 		} else {
1010 			tag_code = 1;
1011 		}
1012 		switch (bp->bio_cmd) {
1013 		case BIO_READ:
1014 		case BIO_WRITE:
1015 		{
1016 			uint64_t lba = bp->bio_pblkno;
1017 			uint16_t count = bp->bio_bcount / softc->params.secsize;
1018 #ifdef ADA_TEST_FAILURE
1019 			int fail = 0;
1020 
1021 			/*
1022 			 * Support the failure ioctls.  If the command is a
1023 			 * read, and there are pending forced read errors, or
1024 			 * if a write and pending write errors, then fail this
1025 			 * operation with EIO.  This is useful for testing
1026 			 * purposes.  Also, support having every Nth read fail.
1027 			 *
1028 			 * This is a rather blunt tool.
1029 			 */
1030 			if (bp->bio_cmd == BIO_READ) {
1031 				if (softc->force_read_error) {
1032 					softc->force_read_error--;
1033 					fail = 1;
1034 				}
1035 				if (softc->periodic_read_error > 0) {
1036 					if (++softc->periodic_read_count >=
1037 					    softc->periodic_read_error) {
1038 						softc->periodic_read_count = 0;
1039 						fail = 1;
1040 					}
1041 				}
1042 			} else {
1043 				if (softc->force_write_error) {
1044 					softc->force_write_error--;
1045 					fail = 1;
1046 				}
1047 			}
1048 			if (fail) {
1049 				bp->bio_error = EIO;
1050 				bp->bio_flags |= BIO_ERROR;
1051 				biodone(bp);
1052 				xpt_release_ccb(start_ccb);
1053 				adaschedule(periph);
1054 				return;
1055 			}
1056 #endif
1057 			cam_fill_ataio(ataio,
1058 			    ada_retry_count,
1059 			    adadone,
1060 			    bp->bio_cmd == BIO_READ ?
1061 			        CAM_DIR_IN : CAM_DIR_OUT,
1062 			    tag_code,
1063 			    bp->bio_data,
1064 			    bp->bio_bcount,
1065 			    ada_default_timeout*1000);
1066 
1067 			if ((softc->flags & ADA_FLAG_CAN_NCQ) && tag_code) {
1068 				if (bp->bio_cmd == BIO_READ) {
1069 					ata_ncq_cmd(ataio, ATA_READ_FPDMA_QUEUED,
1070 					    lba, count);
1071 				} else {
1072 					ata_ncq_cmd(ataio, ATA_WRITE_FPDMA_QUEUED,
1073 					    lba, count);
1074 				}
1075 			} else if ((softc->flags & ADA_FLAG_CAN_48BIT) &&
1076 			    (lba + count >= ATA_MAX_28BIT_LBA ||
1077 			    count > 256)) {
1078 				if (softc->flags & ADA_FLAG_CAN_DMA) {
1079 					if (bp->bio_cmd == BIO_READ) {
1080 						ata_48bit_cmd(ataio, ATA_READ_DMA48,
1081 						    0, lba, count);
1082 					} else {
1083 						ata_48bit_cmd(ataio, ATA_WRITE_DMA48,
1084 						    0, lba, count);
1085 					}
1086 				} else {
1087 					if (bp->bio_cmd == BIO_READ) {
1088 						ata_48bit_cmd(ataio, ATA_READ_MUL48,
1089 						    0, lba, count);
1090 					} else {
1091 						ata_48bit_cmd(ataio, ATA_WRITE_MUL48,
1092 						    0, lba, count);
1093 					}
1094 				}
1095 			} else {
1096 				if (count == 256)
1097 					count = 0;
1098 				if (softc->flags & ADA_FLAG_CAN_DMA) {
1099 					if (bp->bio_cmd == BIO_READ) {
1100 						ata_28bit_cmd(ataio, ATA_READ_DMA,
1101 						    0, lba, count);
1102 					} else {
1103 						ata_28bit_cmd(ataio, ATA_WRITE_DMA,
1104 						    0, lba, count);
1105 					}
1106 				} else {
1107 					if (bp->bio_cmd == BIO_READ) {
1108 						ata_28bit_cmd(ataio, ATA_READ_MUL,
1109 						    0, lba, count);
1110 					} else {
1111 						ata_28bit_cmd(ataio, ATA_WRITE_MUL,
1112 						    0, lba, count);
1113 					}
1114 				}
1115 			}
1116 			break;
1117 		}
1118 		case BIO_DELETE:
1119 		{
1120 			uint64_t lba = bp->bio_pblkno;
1121 			uint16_t count = bp->bio_bcount / softc->params.secsize;
1122 
1123 			cam_fill_ataio(ataio,
1124 			    ada_retry_count,
1125 			    adadone,
1126 			    CAM_DIR_NONE,
1127 			    0,
1128 			    NULL,
1129 			    0,
1130 			    ada_default_timeout*1000);
1131 
1132 			if (count >= 256)
1133 				count = 0;
1134 			ata_28bit_cmd(ataio, ATA_CFA_ERASE, 0, lba, count);
1135 			break;
1136 		}
1137 		case BIO_FLUSH:
1138 			cam_fill_ataio(ataio,
1139 			    1,
1140 			    adadone,
1141 			    CAM_DIR_NONE,
1142 			    0,
1143 			    NULL,
1144 			    0,
1145 			    ada_default_timeout*1000);
1146 
1147 			if (softc->flags & ADA_FLAG_CAN_48BIT)
1148 				ata_48bit_cmd(ataio, ATA_FLUSHCACHE48, 0, 0, 0);
1149 			else
1150 				ata_28bit_cmd(ataio, ATA_FLUSHCACHE, 0, 0, 0);
1151 			break;
1152 		}
1153 		start_ccb->ccb_h.ccb_state = ADA_CCB_BUFFER_IO;
1154 out:
1155 		start_ccb->ccb_h.ccb_bp = bp;
1156 		softc->outstanding_cmds++;
1157 		xpt_action(start_ccb);
1158 
1159 		/* May have more work to do, so ensure we stay scheduled */
1160 		adaschedule(periph);
1161 		break;
1162 	}
1163 	case ADA_STATE_WCACHE:
1164 	{
1165 		cam_fill_ataio(ataio,
1166 		    1,
1167 		    adadone,
1168 		    CAM_DIR_NONE,
1169 		    0,
1170 		    NULL,
1171 		    0,
1172 		    ada_default_timeout*1000);
1173 
1174 		ata_28bit_cmd(ataio, ATA_SETFEATURES, (softc->write_cache > 0 ||
1175 		     (softc->write_cache < 0 && ada_write_cache)) ?
1176 		    ATA_SF_ENAB_WCACHE : ATA_SF_DIS_WCACHE, 0, 0);
1177 		start_ccb->ccb_h.ccb_state = ADA_CCB_WCACHE;
1178 		xpt_action(start_ccb);
1179 		break;
1180 	}
1181 	}
1182 }
1183 
1184 static void
1185 adadone(struct cam_periph *periph, union ccb *done_ccb)
1186 {
1187 	struct ada_softc *softc;
1188 	struct ccb_ataio *ataio;
1189 
1190 	softc = (struct ada_softc *)periph->softc;
1191 	ataio = &done_ccb->ataio;
1192 	switch (ataio->ccb_h.ccb_state & ADA_CCB_TYPE_MASK) {
1193 	case ADA_CCB_BUFFER_IO:
1194 	case ADA_CCB_TRIM:
1195 	{
1196 		struct bio *bp;
1197 
1198 		bp = (struct bio *)done_ccb->ccb_h.ccb_bp;
1199 		if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
1200 			int error;
1201 
1202 			error = adaerror(done_ccb, 0, 0);
1203 			if (error == ERESTART) {
1204 				/* A retry was scheduled, so just return. */
1205 				return;
1206 			}
1207 			if (error != 0) {
1208 				if (error == ENXIO) {
1209 					/*
1210 					 * Catastrophic error.  Mark our pack as
1211 					 * invalid.
1212 					 */
1213 					/*
1214 					 * XXX See if this is really a media
1215 					 * XXX change first?
1216 					 */
1217 					xpt_print(periph->path,
1218 					    "Invalidating pack\n");
1219 					softc->flags |= ADA_FLAG_PACK_INVALID;
1220 				}
1221 				bp->bio_error = error;
1222 				bp->bio_resid = bp->bio_bcount;
1223 				bp->bio_flags |= BIO_ERROR;
1224 			} else {
1225 				bp->bio_resid = ataio->resid;
1226 				bp->bio_error = 0;
1227 				if (bp->bio_resid != 0)
1228 					bp->bio_flags |= BIO_ERROR;
1229 			}
1230 			if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
1231 				cam_release_devq(done_ccb->ccb_h.path,
1232 						 /*relsim_flags*/0,
1233 						 /*reduction*/0,
1234 						 /*timeout*/0,
1235 						 /*getcount_only*/0);
1236 		} else {
1237 			if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
1238 				panic("REQ_CMP with QFRZN");
1239 			bp->bio_resid = ataio->resid;
1240 			if (ataio->resid > 0)
1241 				bp->bio_flags |= BIO_ERROR;
1242 		}
1243 		softc->outstanding_cmds--;
1244 		if (softc->outstanding_cmds == 0)
1245 			softc->flags |= ADA_FLAG_WENT_IDLE;
1246 		if ((ataio->ccb_h.ccb_state & ADA_CCB_TYPE_MASK) ==
1247 		    ADA_CCB_TRIM) {
1248 			struct trim_request *req =
1249 			    (struct trim_request *)ataio->data_ptr;
1250 			int i;
1251 
1252 			for (i = 1; i < softc->trim_max_ranges &&
1253 			    req->bps[i]; i++) {
1254 				struct bio *bp1 = req->bps[i];
1255 
1256 				bp1->bio_resid = bp->bio_resid;
1257 				bp1->bio_error = bp->bio_error;
1258 				if (bp->bio_flags & BIO_ERROR)
1259 					bp1->bio_flags |= BIO_ERROR;
1260 				biodone(bp1);
1261 			}
1262 			softc->trim_running = 0;
1263 			biodone(bp);
1264 			adaschedule(periph);
1265 		} else
1266 			biodone(bp);
1267 		break;
1268 	}
1269 	case ADA_CCB_WCACHE:
1270 	{
1271 		if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
1272 			if (adaerror(done_ccb, 0, 0) == ERESTART) {
1273 				return;
1274 			} else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
1275 				cam_release_devq(done_ccb->ccb_h.path,
1276 				    /*relsim_flags*/0,
1277 				    /*reduction*/0,
1278 				    /*timeout*/0,
1279 				    /*getcount_only*/0);
1280 			}
1281 		}
1282 
1283 		softc->state = ADA_STATE_NORMAL;
1284 		/*
1285 		 * Since our peripheral may be invalidated by an error
1286 		 * above or an external event, we must release our CCB
1287 		 * before releasing the reference on the peripheral.
1288 		 * The peripheral will only go away once the last reference
1289 		 * is removed, and we need it around for the CCB release
1290 		 * operation.
1291 		 */
1292 		xpt_release_ccb(done_ccb);
1293 		cam_release_devq(periph->path,
1294 		    RELSIM_RELEASE_RUNLEVEL, 0, CAM_RL_DEV + 1, FALSE);
1295 		adaschedule(periph);
1296 		cam_periph_release_locked(periph);
1297 		return;
1298 	}
1299 	case ADA_CCB_WAITING:
1300 	{
1301 		/* Caller will release the CCB */
1302 		wakeup(&done_ccb->ccb_h.cbfcnp);
1303 		return;
1304 	}
1305 	case ADA_CCB_DUMP:
1306 		/* No-op.  We're polling */
1307 		return;
1308 	default:
1309 		break;
1310 	}
1311 	xpt_release_ccb(done_ccb);
1312 }
1313 
1314 static int
1315 adaerror(union ccb *ccb, u_int32_t cam_flags, u_int32_t sense_flags)
1316 {
1317 	struct ada_softc	  *softc;
1318 	struct cam_periph *periph;
1319 
1320 	periph = xpt_path_periph(ccb->ccb_h.path);
1321 	softc = (struct ada_softc *)periph->softc;
1322 
1323 	return(cam_periph_error(ccb, cam_flags, sense_flags, NULL));
1324 }
1325 
1326 static void
1327 adagetparams(struct cam_periph *periph, struct ccb_getdev *cgd)
1328 {
1329 	struct ada_softc *softc = (struct ada_softc *)periph->softc;
1330 	struct disk_params *dp = &softc->params;
1331 	u_int64_t lbasize48;
1332 	u_int32_t lbasize;
1333 
1334 	dp->secsize = ata_logical_sector_size(&cgd->ident_data);
1335 	if ((cgd->ident_data.atavalid & ATA_FLAG_54_58) &&
1336 		cgd->ident_data.current_heads && cgd->ident_data.current_sectors) {
1337 		dp->heads = cgd->ident_data.current_heads;
1338 		dp->secs_per_track = cgd->ident_data.current_sectors;
1339 		dp->cylinders = cgd->ident_data.cylinders;
1340 		dp->sectors = (u_int32_t)cgd->ident_data.current_size_1 |
1341 			  ((u_int32_t)cgd->ident_data.current_size_2 << 16);
1342 	} else {
1343 		dp->heads = cgd->ident_data.heads;
1344 		dp->secs_per_track = cgd->ident_data.sectors;
1345 		dp->cylinders = cgd->ident_data.cylinders;
1346 		dp->sectors = cgd->ident_data.cylinders * dp->heads * dp->secs_per_track;
1347 	}
1348 	lbasize = (u_int32_t)cgd->ident_data.lba_size_1 |
1349 		  ((u_int32_t)cgd->ident_data.lba_size_2 << 16);
1350 
1351 	/* use the 28bit LBA size if valid or bigger than the CHS mapping */
1352 	if (cgd->ident_data.cylinders == 16383 || dp->sectors < lbasize)
1353 		dp->sectors = lbasize;
1354 
1355 	/* use the 48bit LBA size if valid */
1356 	lbasize48 = ((u_int64_t)cgd->ident_data.lba_size48_1) |
1357 		    ((u_int64_t)cgd->ident_data.lba_size48_2 << 16) |
1358 		    ((u_int64_t)cgd->ident_data.lba_size48_3 << 32) |
1359 		    ((u_int64_t)cgd->ident_data.lba_size48_4 << 48);
1360 	if ((cgd->ident_data.support.command2 & ATA_SUPPORT_ADDRESS48) &&
1361 	    lbasize48 > ATA_MAX_28BIT_LBA)
1362 		dp->sectors = lbasize48;
1363 }
1364 
1365 static void
1366 adasendorderedtag(void *arg)
1367 {
1368 	struct ada_softc *softc = arg;
1369 
1370 	if (ada_send_ordered) {
1371 		if ((softc->ordered_tag_count == 0)
1372 		 && ((softc->flags & ADA_FLAG_WENT_IDLE) == 0)) {
1373 			softc->flags |= ADA_FLAG_NEED_OTAG;
1374 		}
1375 		if (softc->outstanding_cmds > 0)
1376 			softc->flags &= ~ADA_FLAG_WENT_IDLE;
1377 
1378 		softc->ordered_tag_count = 0;
1379 	}
1380 	/* Queue us up again */
1381 	callout_reset(&softc->sendordered_c,
1382 	    (ADA_DEFAULT_TIMEOUT * hz) / ADA_ORDEREDTAG_INTERVAL,
1383 	    adasendorderedtag, softc);
1384 }
1385 
1386 /*
1387  * Step through all ADA peripheral drivers, and if the device is still open,
1388  * sync the disk cache to physical media.
1389  */
1390 static void
1391 adaflush(void)
1392 {
1393 	struct cam_periph *periph;
1394 	struct ada_softc *softc;
1395 
1396 	TAILQ_FOREACH(periph, &adadriver.units, unit_links) {
1397 		union ccb ccb;
1398 
1399 		/* If we paniced with lock held - not recurse here. */
1400 		if (cam_periph_owned(periph))
1401 			continue;
1402 		cam_periph_lock(periph);
1403 		softc = (struct ada_softc *)periph->softc;
1404 		/*
1405 		 * We only sync the cache if the drive is still open, and
1406 		 * if the drive is capable of it..
1407 		 */
1408 		if (((softc->flags & ADA_FLAG_OPEN) == 0) ||
1409 		    (softc->flags & ADA_FLAG_CAN_FLUSHCACHE) == 0) {
1410 			cam_periph_unlock(periph);
1411 			continue;
1412 		}
1413 
1414 		xpt_setup_ccb(&ccb.ccb_h, periph->path, CAM_PRIORITY_NORMAL);
1415 
1416 		ccb.ccb_h.ccb_state = ADA_CCB_DUMP;
1417 		cam_fill_ataio(&ccb.ataio,
1418 				    1,
1419 				    adadone,
1420 				    CAM_DIR_NONE,
1421 				    0,
1422 				    NULL,
1423 				    0,
1424 				    ada_default_timeout*1000);
1425 
1426 		if (softc->flags & ADA_FLAG_CAN_48BIT)
1427 			ata_48bit_cmd(&ccb.ataio, ATA_FLUSHCACHE48, 0, 0, 0);
1428 		else
1429 			ata_28bit_cmd(&ccb.ataio, ATA_FLUSHCACHE, 0, 0, 0);
1430 		xpt_polled_action(&ccb);
1431 
1432 		if ((ccb.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP)
1433 			xpt_print(periph->path, "Synchronize cache failed\n");
1434 
1435 		if ((ccb.ccb_h.status & CAM_DEV_QFRZN) != 0)
1436 			cam_release_devq(ccb.ccb_h.path,
1437 					 /*relsim_flags*/0,
1438 					 /*reduction*/0,
1439 					 /*timeout*/0,
1440 					 /*getcount_only*/0);
1441 		cam_periph_unlock(periph);
1442 	}
1443 }
1444 
1445 static void
1446 adaspindown(uint8_t cmd, int flags)
1447 {
1448 	struct cam_periph *periph;
1449 	struct ada_softc *softc;
1450 
1451 	TAILQ_FOREACH(periph, &adadriver.units, unit_links) {
1452 		union ccb ccb;
1453 
1454 		/* If we paniced with lock held - not recurse here. */
1455 		if (cam_periph_owned(periph))
1456 			continue;
1457 		cam_periph_lock(periph);
1458 		softc = (struct ada_softc *)periph->softc;
1459 		/*
1460 		 * We only spin-down the drive if it is capable of it..
1461 		 */
1462 		if ((softc->flags & ADA_FLAG_CAN_POWERMGT) == 0) {
1463 			cam_periph_unlock(periph);
1464 			continue;
1465 		}
1466 
1467 		if (bootverbose)
1468 			xpt_print(periph->path, "spin-down\n");
1469 
1470 		xpt_setup_ccb(&ccb.ccb_h, periph->path, CAM_PRIORITY_NORMAL);
1471 
1472 		ccb.ccb_h.ccb_state = ADA_CCB_DUMP;
1473 		cam_fill_ataio(&ccb.ataio,
1474 				    1,
1475 				    adadone,
1476 				    CAM_DIR_NONE | flags,
1477 				    0,
1478 				    NULL,
1479 				    0,
1480 				    ada_default_timeout*1000);
1481 
1482 		ata_28bit_cmd(&ccb.ataio, cmd, 0, 0, 0);
1483 		xpt_polled_action(&ccb);
1484 
1485 		if ((ccb.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP)
1486 			xpt_print(periph->path, "Spin-down disk failed\n");
1487 
1488 		if ((ccb.ccb_h.status & CAM_DEV_QFRZN) != 0)
1489 			cam_release_devq(ccb.ccb_h.path,
1490 					 /*relsim_flags*/0,
1491 					 /*reduction*/0,
1492 					 /*timeout*/0,
1493 					 /*getcount_only*/0);
1494 		cam_periph_unlock(periph);
1495 	}
1496 }
1497 
1498 static void
1499 adashutdown(void *arg, int howto)
1500 {
1501 
1502 	adaflush();
1503 	if (ada_spindown_shutdown != 0 &&
1504 	    (howto & (RB_HALT | RB_POWEROFF)) != 0)
1505 		adaspindown(ATA_STANDBY_IMMEDIATE, 0);
1506 }
1507 
1508 static void
1509 adasuspend(void *arg)
1510 {
1511 
1512 	adaflush();
1513 	if (ada_spindown_suspend != 0)
1514 		adaspindown(ATA_SLEEP, CAM_DEV_QFREEZE);
1515 }
1516 
1517 static void
1518 adaresume(void *arg)
1519 {
1520 	struct cam_periph *periph;
1521 	struct ada_softc *softc;
1522 
1523 	if (ada_spindown_suspend == 0)
1524 		return;
1525 
1526 	TAILQ_FOREACH(periph, &adadriver.units, unit_links) {
1527 		cam_periph_lock(periph);
1528 		softc = (struct ada_softc *)periph->softc;
1529 		/*
1530 		 * We only spin-down the drive if it is capable of it..
1531 		 */
1532 		if ((softc->flags & ADA_FLAG_CAN_POWERMGT) == 0) {
1533 			cam_periph_unlock(periph);
1534 			continue;
1535 		}
1536 
1537 		if (bootverbose)
1538 			xpt_print(periph->path, "resume\n");
1539 
1540 		/*
1541 		 * Drop freeze taken due to CAM_DEV_QFREEZE flag set on
1542 		 * sleep request.
1543 		 */
1544 		cam_release_devq(periph->path,
1545 			 /*relsim_flags*/0,
1546 			 /*openings*/0,
1547 			 /*timeout*/0,
1548 			 /*getcount_only*/0);
1549 
1550 		cam_periph_unlock(periph);
1551 	}
1552 }
1553 
1554 #endif /* _KERNEL */
1555