xref: /freebsd/sys/cam/ata/ata_da.c (revision d2ce15bd43b3a1dcce08eecbff8d5d359946d972)
1 /*-
2  * Copyright (c) 2009 Alexander Motin <mav@FreeBSD.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer,
10  *    without modification, immediately at the beginning of the file.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  */
26 
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
29 
30 #include "opt_ada.h"
31 
32 #include <sys/param.h>
33 
34 #ifdef _KERNEL
35 #include <sys/systm.h>
36 #include <sys/kernel.h>
37 #include <sys/bio.h>
38 #include <sys/sysctl.h>
39 #include <sys/taskqueue.h>
40 #include <sys/lock.h>
41 #include <sys/mutex.h>
42 #include <sys/conf.h>
43 #include <sys/devicestat.h>
44 #include <sys/eventhandler.h>
45 #include <sys/malloc.h>
46 #include <sys/cons.h>
47 #include <sys/reboot.h>
48 #include <geom/geom_disk.h>
49 #endif /* _KERNEL */
50 
51 #ifndef _KERNEL
52 #include <stdio.h>
53 #include <string.h>
54 #endif /* _KERNEL */
55 
56 #include <cam/cam.h>
57 #include <cam/cam_ccb.h>
58 #include <cam/cam_periph.h>
59 #include <cam/cam_xpt_periph.h>
60 #include <cam/cam_sim.h>
61 
62 #include <cam/ata/ata_all.h>
63 
64 #include <machine/md_var.h>	/* geometry translation */
65 
66 #ifdef _KERNEL
67 
68 #define ATA_MAX_28BIT_LBA               268435455UL
69 
70 typedef enum {
71 	ADA_STATE_RAHEAD,
72 	ADA_STATE_WCACHE,
73 	ADA_STATE_NORMAL
74 } ada_state;
75 
76 typedef enum {
77 	ADA_FLAG_PACK_INVALID	= 0x0001,
78 	ADA_FLAG_CAN_48BIT	= 0x0002,
79 	ADA_FLAG_CAN_FLUSHCACHE	= 0x0004,
80 	ADA_FLAG_CAN_NCQ	= 0x0008,
81 	ADA_FLAG_CAN_DMA	= 0x0010,
82 	ADA_FLAG_NEED_OTAG	= 0x0020,
83 	ADA_FLAG_WENT_IDLE	= 0x0040,
84 	ADA_FLAG_CAN_TRIM	= 0x0080,
85 	ADA_FLAG_OPEN		= 0x0100,
86 	ADA_FLAG_SCTX_INIT	= 0x0200,
87 	ADA_FLAG_CAN_CFA        = 0x0400,
88 	ADA_FLAG_CAN_POWERMGT   = 0x0800,
89 	ADA_FLAG_CAN_DMA48	= 0x1000
90 } ada_flags;
91 
92 typedef enum {
93 	ADA_Q_NONE		= 0x00,
94 	ADA_Q_4K		= 0x01,
95 } ada_quirks;
96 
97 typedef enum {
98 	ADA_CCB_RAHEAD		= 0x01,
99 	ADA_CCB_WCACHE		= 0x02,
100 	ADA_CCB_BUFFER_IO	= 0x03,
101 	ADA_CCB_WAITING		= 0x04,
102 	ADA_CCB_DUMP		= 0x05,
103 	ADA_CCB_TRIM		= 0x06,
104 	ADA_CCB_TYPE_MASK	= 0x0F,
105 } ada_ccb_state;
106 
107 /* Offsets into our private area for storing information */
108 #define ccb_state	ppriv_field0
109 #define ccb_bp		ppriv_ptr1
110 
111 struct disk_params {
112 	u_int8_t  heads;
113 	u_int8_t  secs_per_track;
114 	u_int32_t cylinders;
115 	u_int32_t secsize;	/* Number of bytes/logical sector */
116 	u_int64_t sectors;	/* Total number sectors */
117 };
118 
119 #define TRIM_MAX_BLOCKS	8
120 #define TRIM_MAX_RANGES	(TRIM_MAX_BLOCKS * 64)
121 #define TRIM_MAX_BIOS	(TRIM_MAX_RANGES * 4)
122 struct trim_request {
123 	uint8_t		data[TRIM_MAX_RANGES * 8];
124 	struct bio	*bps[TRIM_MAX_BIOS];
125 };
126 
127 struct ada_softc {
128 	struct	 bio_queue_head bio_queue;
129 	struct	 bio_queue_head trim_queue;
130 	ada_state state;
131 	ada_flags flags;
132 	ada_quirks quirks;
133 	int	 sort_io_queue;
134 	int	 ordered_tag_count;
135 	int	 outstanding_cmds;
136 	int	 trim_max_ranges;
137 	int	 trim_running;
138 	int	 read_ahead;
139 	int	 write_cache;
140 #ifdef ADA_TEST_FAILURE
141 	int      force_read_error;
142 	int      force_write_error;
143 	int      periodic_read_error;
144 	int      periodic_read_count;
145 #endif
146 	struct	 disk_params params;
147 	struct	 disk *disk;
148 	struct task		sysctl_task;
149 	struct sysctl_ctx_list	sysctl_ctx;
150 	struct sysctl_oid	*sysctl_tree;
151 	struct callout		sendordered_c;
152 	struct trim_request	trim_req;
153 };
154 
155 struct ada_quirk_entry {
156 	struct scsi_inquiry_pattern inq_pat;
157 	ada_quirks quirks;
158 };
159 
160 static struct ada_quirk_entry ada_quirk_table[] =
161 {
162 	{
163 		/* Hitachi Advanced Format (4k) drives */
164 		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "Hitachi H??????????E3*", "*" },
165 		/*quirks*/ADA_Q_4K
166 	},
167 	{
168 		/* Samsung Advanced Format (4k) drives */
169 		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "SAMSUNG HD155UI*", "*" },
170 		/*quirks*/ADA_Q_4K
171 	},
172 	{
173 		/* Samsung Advanced Format (4k) drives */
174 		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "SAMSUNG HD204UI*", "*" },
175 		/*quirks*/ADA_Q_4K
176 	},
177 	{
178 		/* Seagate Barracuda Green Advanced Format (4k) drives */
179 		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "ST????DL*", "*" },
180 		/*quirks*/ADA_Q_4K
181 	},
182 	{
183 		/* Seagate Barracuda Advanced Format (4k) drives */
184 		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "ST???DM*", "*" },
185 		/*quirks*/ADA_Q_4K
186 	},
187 	{
188 		/* Seagate Barracuda Advanced Format (4k) drives */
189 		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "ST????DM*", "*" },
190 		/*quirks*/ADA_Q_4K
191 	},
192 	{
193 		/* Seagate Momentus Advanced Format (4k) drives */
194 		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "ST9500423AS*", "*" },
195 		/*quirks*/ADA_Q_4K
196 	},
197 	{
198 		/* Seagate Momentus Advanced Format (4k) drives */
199 		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "ST9500424AS*", "*" },
200 		/*quirks*/ADA_Q_4K
201 	},
202 	{
203 		/* Seagate Momentus Advanced Format (4k) drives */
204 		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "ST9640423AS*", "*" },
205 		/*quirks*/ADA_Q_4K
206 	},
207 	{
208 		/* Seagate Momentus Advanced Format (4k) drives */
209 		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "ST9640424AS*", "*" },
210 		/*quirks*/ADA_Q_4K
211 	},
212 	{
213 		/* Seagate Momentus Advanced Format (4k) drives */
214 		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "ST9750420AS*", "*" },
215 		/*quirks*/ADA_Q_4K
216 	},
217 	{
218 		/* Seagate Momentus Advanced Format (4k) drives */
219 		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "ST9750422AS*", "*" },
220 		/*quirks*/ADA_Q_4K
221 	},
222 	{
223 		/* Seagate Momentus Advanced Format (4k) drives */
224 		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "ST9750423AS*", "*" },
225 		/*quirks*/ADA_Q_4K
226 	},
227 	{
228 		/* Seagate Momentus Thin Advanced Format (4k) drives */
229 		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "ST???LT*", "*" },
230 		/*quirks*/ADA_Q_4K
231 	},
232 	{
233 		/* WDC Caviar Green Advanced Format (4k) drives */
234 		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "WDC WD????RS*", "*" },
235 		/*quirks*/ADA_Q_4K
236 	},
237 	{
238 		/* WDC Caviar Green Advanced Format (4k) drives */
239 		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "WDC WD????RX*", "*" },
240 		/*quirks*/ADA_Q_4K
241 	},
242 	{
243 		/* WDC Caviar Green Advanced Format (4k) drives */
244 		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "WDC WD??????RS*", "*" },
245 		/*quirks*/ADA_Q_4K
246 	},
247 	{
248 		/* WDC Caviar Green Advanced Format (4k) drives */
249 		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "WDC WD??????RX*", "*" },
250 		/*quirks*/ADA_Q_4K
251 	},
252 	{
253 		/* WDC Scorpio Black Advanced Format (4k) drives */
254 		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "WDC WD???PKT*", "*" },
255 		/*quirks*/ADA_Q_4K
256 	},
257 	{
258 		/* WDC Scorpio Black Advanced Format (4k) drives */
259 		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "WDC WD?????PKT*", "*" },
260 		/*quirks*/ADA_Q_4K
261 	},
262 	{
263 		/* WDC Scorpio Blue Advanced Format (4k) drives */
264 		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "WDC WD???PVT*", "*" },
265 		/*quirks*/ADA_Q_4K
266 	},
267 	{
268 		/* WDC Scorpio Blue Advanced Format (4k) drives */
269 		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "WDC WD?????PVT*", "*" },
270 		/*quirks*/ADA_Q_4K
271 	},
272 	{
273 		/*
274 		 * Corsair Force 2 SSDs
275 		 * 4k optimised & trim only works in 4k requests + 4k aligned
276 		 * Submitted by: Steven Hartland <steven.hartland@multiplay.co.uk>
277 		 * PR: 169974
278 		 */
279 		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "Corsair CSSD-F*", "*" },
280 		/*quirks*/ADA_Q_4K
281 	},
282 	{
283 		/*
284 		 * Corsair Force 3 SSDs
285 		 * 4k optimised & trim only works in 4k requests + 4k aligned
286 		 * Submitted by: Steven Hartland <steven.hartland@multiplay.co.uk>
287 		 * PR: 169974
288 		 */
289 		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "Corsair Force 3*", "*" },
290 		/*quirks*/ADA_Q_4K
291 	},
292 	{
293 		/*
294 		 * OCZ Agility 3 SSDs
295 		 * 4k optimised & trim only works in 4k requests + 4k aligned
296 		 * Submitted by: Steven Hartland <steven.hartland@multiplay.co.uk>
297 		 * PR: 169974
298 		 */
299 		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "OCZ-AGILITY3*", "*" },
300 		/*quirks*/ADA_Q_4K
301 	},
302 	{
303 		/*
304 		 * OCZ Vertex 2 SSDs (inc pro series)
305 		 * 4k optimised & trim only works in 4k requests + 4k aligned
306 		 * Submitted by: Steven Hartland <steven.hartland@multiplay.co.uk>
307 		 * PR: 169974
308 		 */
309 		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "OCZ?VERTEX2*", "*" },
310 		/*quirks*/ADA_Q_4K
311 	},
312 	{
313 		/*
314 		 * OCZ Vertex 3 SSDs
315 		 * 4k optimised & trim only works in 4k requests + 4k aligned
316 		 * Submitted by: Steven Hartland <steven.hartland@multiplay.co.uk>
317 		 * PR: 169974
318 		 */
319 		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "OCZ-VERTEX3*", "*" },
320 		/*quirks*/ADA_Q_4K
321 	},
322 	{
323 		/*
324 		 * SuperTalent TeraDrive CT SSDs
325 		 * 4k optimised & trim only works in 4k requests + 4k aligned
326 		 * Submitted by: Steven Hartland <steven.hartland@multiplay.co.uk>
327 		 * PR: 169974
328 		 */
329 		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "FTM??CT25H*", "*" },
330 		/*quirks*/ADA_Q_4K
331 	},
332 	{
333 		/*
334 		 * Crucial RealSSD C300 SSDs
335 		 * 4k optimised
336 		 * Submitted by: Steven Hartland <steven.hartland@multiplay.co.uk>
337 		 * PR: 169974
338 		 */
339 		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "C300-CTFDDAC???MAG*",
340 		"*" }, /*quirks*/ADA_Q_4K
341 	},
342 	{
343 		/*
344 		 * XceedIOPS SATA SSDs
345 		 * 4k optimised
346 		 * Submitted by: Steven Hartland <steven.hartland@multiplay.co.uk>
347 		 * PR: 169974
348 		 */
349 		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "SG9XCS2D*", "*" },
350 		/*quirks*/ADA_Q_4K
351 	},
352 	{
353 		/*
354 		 * Intel 330 Series SSDs
355 		 * 4k optimised & trim only works in 4k requests + 4k aligned
356 		 * Submitted by: Steven Hartland <steven.hartland@multiplay.co.uk>
357 		 * PR: 169974
358 		 */
359 		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "INTEL SSDSC2ct*", "*" },
360 		/*quirks*/ADA_Q_4K
361 	},
362 	{
363 		/*
364 		 * OCZ Deneva R Series SSDs
365 		 * 4k optimised & trim only works in 4k requests + 4k aligned
366 		 * Submitted by: Steven Hartland <steven.hartland@multiplay.co.uk>
367 		 * PR: 169974
368 		 */
369 		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "DENRSTE251M45*", "*" },
370 		/*quirks*/ADA_Q_4K
371 	},
372 	{
373 		/*
374 		 * Kingston HyperX 3k SSDs
375 		 * 4k optimised & trim only works in 4k requests + 4k aligned
376 		 * Submitted by: Steven Hartland <steven.hartland@multiplay.co.uk>
377 		 * PR: 169974
378 		 */
379 		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "KINGSTON SH103S3*", "*" },
380 		/*quirks*/ADA_Q_4K
381 	},
382 	{
383 		/* Default */
384 		{
385 		  T_ANY, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED,
386 		  /*vendor*/"*", /*product*/"*", /*revision*/"*"
387 		},
388 		/*quirks*/0
389 	},
390 };
391 
392 static	disk_strategy_t	adastrategy;
393 static	dumper_t	adadump;
394 static	periph_init_t	adainit;
395 static	void		adaasync(void *callback_arg, u_int32_t code,
396 				struct cam_path *path, void *arg);
397 static	void		adasysctlinit(void *context, int pending);
398 static	periph_ctor_t	adaregister;
399 static	periph_dtor_t	adacleanup;
400 static	periph_start_t	adastart;
401 static	periph_oninv_t	adaoninvalidate;
402 static	void		adadone(struct cam_periph *periph,
403 			       union ccb *done_ccb);
404 static  int		adaerror(union ccb *ccb, u_int32_t cam_flags,
405 				u_int32_t sense_flags);
406 static void		adagetparams(struct cam_periph *periph,
407 				struct ccb_getdev *cgd);
408 static timeout_t	adasendorderedtag;
409 static void		adashutdown(void *arg, int howto);
410 static void		adasuspend(void *arg);
411 static void		adaresume(void *arg);
412 
413 #ifndef	ADA_DEFAULT_LEGACY_ALIASES
414 #define	ADA_DEFAULT_LEGACY_ALIASES	1
415 #endif
416 
417 #ifndef ADA_DEFAULT_TIMEOUT
418 #define ADA_DEFAULT_TIMEOUT 30	/* Timeout in seconds */
419 #endif
420 
421 #ifndef	ADA_DEFAULT_RETRY
422 #define	ADA_DEFAULT_RETRY	4
423 #endif
424 
425 #ifndef	ADA_DEFAULT_SEND_ORDERED
426 #define	ADA_DEFAULT_SEND_ORDERED	1
427 #endif
428 
429 #ifndef	ADA_DEFAULT_SPINDOWN_SHUTDOWN
430 #define	ADA_DEFAULT_SPINDOWN_SHUTDOWN	1
431 #endif
432 
433 #ifndef	ADA_DEFAULT_SPINDOWN_SUSPEND
434 #define	ADA_DEFAULT_SPINDOWN_SUSPEND	1
435 #endif
436 
437 #ifndef	ADA_DEFAULT_READ_AHEAD
438 #define	ADA_DEFAULT_READ_AHEAD	1
439 #endif
440 
441 #ifndef	ADA_DEFAULT_WRITE_CACHE
442 #define	ADA_DEFAULT_WRITE_CACHE	1
443 #endif
444 
445 #define	ADA_RA	(softc->read_ahead >= 0 ? \
446 		 softc->read_ahead : ada_read_ahead)
447 #define	ADA_WC	(softc->write_cache >= 0 ? \
448 		 softc->write_cache : ada_write_cache)
449 #define	ADA_SIO	(softc->sort_io_queue >= 0 ? \
450 		 softc->sort_io_queue : cam_sort_io_queues)
451 
452 /*
453  * Most platforms map firmware geometry to actual, but some don't.  If
454  * not overridden, default to nothing.
455  */
456 #ifndef ata_disk_firmware_geom_adjust
457 #define	ata_disk_firmware_geom_adjust(disk)
458 #endif
459 
460 static int ada_legacy_aliases = ADA_DEFAULT_LEGACY_ALIASES;
461 static int ada_retry_count = ADA_DEFAULT_RETRY;
462 static int ada_default_timeout = ADA_DEFAULT_TIMEOUT;
463 static int ada_send_ordered = ADA_DEFAULT_SEND_ORDERED;
464 static int ada_spindown_shutdown = ADA_DEFAULT_SPINDOWN_SHUTDOWN;
465 static int ada_spindown_suspend = ADA_DEFAULT_SPINDOWN_SUSPEND;
466 static int ada_read_ahead = ADA_DEFAULT_READ_AHEAD;
467 static int ada_write_cache = ADA_DEFAULT_WRITE_CACHE;
468 
469 static SYSCTL_NODE(_kern_cam, OID_AUTO, ada, CTLFLAG_RD, 0,
470             "CAM Direct Access Disk driver");
471 SYSCTL_INT(_kern_cam_ada, OID_AUTO, legacy_aliases, CTLFLAG_RW,
472            &ada_legacy_aliases, 0, "Create legacy-like device aliases");
473 TUNABLE_INT("kern.cam.ada.legacy_aliases", &ada_legacy_aliases);
474 SYSCTL_INT(_kern_cam_ada, OID_AUTO, retry_count, CTLFLAG_RW,
475            &ada_retry_count, 0, "Normal I/O retry count");
476 TUNABLE_INT("kern.cam.ada.retry_count", &ada_retry_count);
477 SYSCTL_INT(_kern_cam_ada, OID_AUTO, default_timeout, CTLFLAG_RW,
478            &ada_default_timeout, 0, "Normal I/O timeout (in seconds)");
479 TUNABLE_INT("kern.cam.ada.default_timeout", &ada_default_timeout);
480 SYSCTL_INT(_kern_cam_ada, OID_AUTO, send_ordered, CTLFLAG_RW,
481            &ada_send_ordered, 0, "Send Ordered Tags");
482 TUNABLE_INT("kern.cam.ada.send_ordered", &ada_send_ordered);
483 SYSCTL_INT(_kern_cam_ada, OID_AUTO, spindown_shutdown, CTLFLAG_RW,
484            &ada_spindown_shutdown, 0, "Spin down upon shutdown");
485 TUNABLE_INT("kern.cam.ada.spindown_shutdown", &ada_spindown_shutdown);
486 SYSCTL_INT(_kern_cam_ada, OID_AUTO, spindown_suspend, CTLFLAG_RW,
487            &ada_spindown_suspend, 0, "Spin down upon suspend");
488 TUNABLE_INT("kern.cam.ada.spindown_suspend", &ada_spindown_suspend);
489 SYSCTL_INT(_kern_cam_ada, OID_AUTO, read_ahead, CTLFLAG_RW,
490            &ada_read_ahead, 0, "Enable disk read-ahead");
491 TUNABLE_INT("kern.cam.ada.read_ahead", &ada_read_ahead);
492 SYSCTL_INT(_kern_cam_ada, OID_AUTO, write_cache, CTLFLAG_RW,
493            &ada_write_cache, 0, "Enable disk write cache");
494 TUNABLE_INT("kern.cam.ada.write_cache", &ada_write_cache);
495 
496 /*
497  * ADA_ORDEREDTAG_INTERVAL determines how often, relative
498  * to the default timeout, we check to see whether an ordered
499  * tagged transaction is appropriate to prevent simple tag
500  * starvation.  Since we'd like to ensure that there is at least
501  * 1/2 of the timeout length left for a starved transaction to
502  * complete after we've sent an ordered tag, we must poll at least
503  * four times in every timeout period.  This takes care of the worst
504  * case where a starved transaction starts during an interval that
505  * meets the requirement "don't send an ordered tag" test so it takes
506  * us two intervals to determine that a tag must be sent.
507  */
508 #ifndef ADA_ORDEREDTAG_INTERVAL
509 #define ADA_ORDEREDTAG_INTERVAL 4
510 #endif
511 
512 static struct periph_driver adadriver =
513 {
514 	adainit, "ada",
515 	TAILQ_HEAD_INITIALIZER(adadriver.units), /* generation */ 0
516 };
517 
518 PERIPHDRIVER_DECLARE(ada, adadriver);
519 
520 static MALLOC_DEFINE(M_ATADA, "ata_da", "ata_da buffers");
521 
522 static int
523 adaopen(struct disk *dp)
524 {
525 	struct cam_periph *periph;
526 	struct ada_softc *softc;
527 	int error;
528 
529 	periph = (struct cam_periph *)dp->d_drv1;
530 	if (cam_periph_acquire(periph) != CAM_REQ_CMP) {
531 		return(ENXIO);
532 	}
533 
534 	cam_periph_lock(periph);
535 	if ((error = cam_periph_hold(periph, PRIBIO|PCATCH)) != 0) {
536 		cam_periph_unlock(periph);
537 		cam_periph_release(periph);
538 		return (error);
539 	}
540 
541 	softc = (struct ada_softc *)periph->softc;
542 	softc->flags |= ADA_FLAG_OPEN;
543 
544 	CAM_DEBUG(periph->path, CAM_DEBUG_TRACE | CAM_DEBUG_PERIPH,
545 	    ("adaopen\n"));
546 
547 	if ((softc->flags & ADA_FLAG_PACK_INVALID) != 0) {
548 		/* Invalidate our pack information. */
549 		softc->flags &= ~ADA_FLAG_PACK_INVALID;
550 	}
551 
552 	cam_periph_unhold(periph);
553 	cam_periph_unlock(periph);
554 	return (0);
555 }
556 
557 static int
558 adaclose(struct disk *dp)
559 {
560 	struct	cam_periph *periph;
561 	struct	ada_softc *softc;
562 	union ccb *ccb;
563 
564 	periph = (struct cam_periph *)dp->d_drv1;
565 	cam_periph_lock(periph);
566 	if (cam_periph_hold(periph, PRIBIO) != 0) {
567 		cam_periph_unlock(periph);
568 		cam_periph_release(periph);
569 		return (0);
570 	}
571 
572 	softc = (struct ada_softc *)periph->softc;
573 
574 	CAM_DEBUG(periph->path, CAM_DEBUG_TRACE | CAM_DEBUG_PERIPH,
575 	    ("adaclose\n"));
576 
577 	/* We only sync the cache if the drive is capable of it. */
578 	if ((softc->flags & ADA_FLAG_CAN_FLUSHCACHE) != 0 &&
579 	    (softc->flags & ADA_FLAG_PACK_INVALID) == 0) {
580 
581 		ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL);
582 		cam_fill_ataio(&ccb->ataio,
583 				    1,
584 				    adadone,
585 				    CAM_DIR_NONE,
586 				    0,
587 				    NULL,
588 				    0,
589 				    ada_default_timeout*1000);
590 
591 		if (softc->flags & ADA_FLAG_CAN_48BIT)
592 			ata_48bit_cmd(&ccb->ataio, ATA_FLUSHCACHE48, 0, 0, 0);
593 		else
594 			ata_28bit_cmd(&ccb->ataio, ATA_FLUSHCACHE, 0, 0, 0);
595 		cam_periph_runccb(ccb, adaerror, /*cam_flags*/0,
596 		    /*sense_flags*/0, softc->disk->d_devstat);
597 
598 		if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP)
599 			xpt_print(periph->path, "Synchronize cache failed\n");
600 		xpt_release_ccb(ccb);
601 	}
602 
603 	softc->flags &= ~ADA_FLAG_OPEN;
604 	cam_periph_unhold(periph);
605 	cam_periph_unlock(periph);
606 	cam_periph_release(periph);
607 	return (0);
608 }
609 
610 static void
611 adaschedule(struct cam_periph *periph)
612 {
613 	struct ada_softc *softc = (struct ada_softc *)periph->softc;
614 	uint32_t prio;
615 
616 	if (softc->state != ADA_STATE_NORMAL)
617 		return;
618 
619 	/* Check if cam_periph_getccb() was called. */
620 	prio = periph->immediate_priority;
621 
622 	/* Check if we have more work to do. */
623 	if (bioq_first(&softc->bio_queue) ||
624 	    (!softc->trim_running && bioq_first(&softc->trim_queue))) {
625 		prio = CAM_PRIORITY_NORMAL;
626 	}
627 
628 	/* Schedule CCB if any of above is true. */
629 	if (prio != CAM_PRIORITY_NONE)
630 		xpt_schedule(periph, prio);
631 }
632 
633 /*
634  * Actually translate the requested transfer into one the physical driver
635  * can understand.  The transfer is described by a buf and will include
636  * only one physical transfer.
637  */
638 static void
639 adastrategy(struct bio *bp)
640 {
641 	struct cam_periph *periph;
642 	struct ada_softc *softc;
643 
644 	periph = (struct cam_periph *)bp->bio_disk->d_drv1;
645 	softc = (struct ada_softc *)periph->softc;
646 
647 	cam_periph_lock(periph);
648 
649 	CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("adastrategy(%p)\n", bp));
650 
651 	/*
652 	 * If the device has been made invalid, error out
653 	 */
654 	if ((softc->flags & ADA_FLAG_PACK_INVALID)) {
655 		cam_periph_unlock(periph);
656 		biofinish(bp, NULL, ENXIO);
657 		return;
658 	}
659 
660 	/*
661 	 * Place it in the queue of disk activities for this disk
662 	 */
663 	if (bp->bio_cmd == BIO_DELETE &&
664 	    (softc->flags & ADA_FLAG_CAN_TRIM)) {
665 		if (ADA_SIO)
666 		    bioq_disksort(&softc->trim_queue, bp);
667 		else
668 		    bioq_insert_tail(&softc->trim_queue, bp);
669 	} else {
670 		if (ADA_SIO)
671 		    bioq_disksort(&softc->bio_queue, bp);
672 		else
673 		    bioq_insert_tail(&softc->bio_queue, bp);
674 	}
675 
676 	/*
677 	 * Schedule ourselves for performing the work.
678 	 */
679 	adaschedule(periph);
680 	cam_periph_unlock(periph);
681 
682 	return;
683 }
684 
685 static int
686 adadump(void *arg, void *virtual, vm_offset_t physical, off_t offset, size_t length)
687 {
688 	struct	    cam_periph *periph;
689 	struct	    ada_softc *softc;
690 	u_int	    secsize;
691 	union	    ccb ccb;
692 	struct	    disk *dp;
693 	uint64_t    lba;
694 	uint16_t    count;
695 	int	    error = 0;
696 
697 	dp = arg;
698 	periph = dp->d_drv1;
699 	softc = (struct ada_softc *)periph->softc;
700 	cam_periph_lock(periph);
701 	secsize = softc->params.secsize;
702 	lba = offset / secsize;
703 	count = length / secsize;
704 
705 	if ((softc->flags & ADA_FLAG_PACK_INVALID) != 0) {
706 		cam_periph_unlock(periph);
707 		return (ENXIO);
708 	}
709 
710 	if (length > 0) {
711 		xpt_setup_ccb(&ccb.ccb_h, periph->path, CAM_PRIORITY_NORMAL);
712 		ccb.ccb_h.ccb_state = ADA_CCB_DUMP;
713 		cam_fill_ataio(&ccb.ataio,
714 		    0,
715 		    adadone,
716 		    CAM_DIR_OUT,
717 		    0,
718 		    (u_int8_t *) virtual,
719 		    length,
720 		    ada_default_timeout*1000);
721 		if ((softc->flags & ADA_FLAG_CAN_48BIT) &&
722 		    (lba + count >= ATA_MAX_28BIT_LBA ||
723 		    count >= 256)) {
724 			ata_48bit_cmd(&ccb.ataio, ATA_WRITE_DMA48,
725 			    0, lba, count);
726 		} else {
727 			ata_28bit_cmd(&ccb.ataio, ATA_WRITE_DMA,
728 			    0, lba, count);
729 		}
730 		xpt_polled_action(&ccb);
731 
732 		error = cam_periph_error(&ccb,
733 		    0, SF_NO_RECOVERY | SF_NO_RETRY, NULL);
734 		if ((ccb.ccb_h.status & CAM_DEV_QFRZN) != 0)
735 			cam_release_devq(ccb.ccb_h.path, /*relsim_flags*/0,
736 			    /*reduction*/0, /*timeout*/0, /*getcount_only*/0);
737 		if (error != 0)
738 			printf("Aborting dump due to I/O error.\n");
739 
740 		cam_periph_unlock(periph);
741 		return (error);
742 	}
743 
744 	if (softc->flags & ADA_FLAG_CAN_FLUSHCACHE) {
745 		xpt_setup_ccb(&ccb.ccb_h, periph->path, CAM_PRIORITY_NORMAL);
746 
747 		ccb.ccb_h.ccb_state = ADA_CCB_DUMP;
748 		cam_fill_ataio(&ccb.ataio,
749 				    0,
750 				    adadone,
751 				    CAM_DIR_NONE,
752 				    0,
753 				    NULL,
754 				    0,
755 				    ada_default_timeout*1000);
756 
757 		if (softc->flags & ADA_FLAG_CAN_48BIT)
758 			ata_48bit_cmd(&ccb.ataio, ATA_FLUSHCACHE48, 0, 0, 0);
759 		else
760 			ata_28bit_cmd(&ccb.ataio, ATA_FLUSHCACHE, 0, 0, 0);
761 		xpt_polled_action(&ccb);
762 
763 		error = cam_periph_error(&ccb,
764 		    0, SF_NO_RECOVERY | SF_NO_RETRY, NULL);
765 		if ((ccb.ccb_h.status & CAM_DEV_QFRZN) != 0)
766 			cam_release_devq(ccb.ccb_h.path, /*relsim_flags*/0,
767 			    /*reduction*/0, /*timeout*/0, /*getcount_only*/0);
768 		if (error != 0)
769 			xpt_print(periph->path, "Synchronize cache failed\n");
770 	}
771 	cam_periph_unlock(periph);
772 	return (error);
773 }
774 
775 static void
776 adainit(void)
777 {
778 	cam_status status;
779 
780 	/*
781 	 * Install a global async callback.  This callback will
782 	 * receive async callbacks like "new device found".
783 	 */
784 	status = xpt_register_async(AC_FOUND_DEVICE, adaasync, NULL, NULL);
785 
786 	if (status != CAM_REQ_CMP) {
787 		printf("ada: Failed to attach master async callback "
788 		       "due to status 0x%x!\n", status);
789 	} else if (ada_send_ordered) {
790 
791 		/* Register our event handlers */
792 		if ((EVENTHANDLER_REGISTER(power_suspend, adasuspend,
793 					   NULL, EVENTHANDLER_PRI_LAST)) == NULL)
794 		    printf("adainit: power event registration failed!\n");
795 		if ((EVENTHANDLER_REGISTER(power_resume, adaresume,
796 					   NULL, EVENTHANDLER_PRI_LAST)) == NULL)
797 		    printf("adainit: power event registration failed!\n");
798 		if ((EVENTHANDLER_REGISTER(shutdown_post_sync, adashutdown,
799 					   NULL, SHUTDOWN_PRI_DEFAULT)) == NULL)
800 		    printf("adainit: shutdown event registration failed!\n");
801 	}
802 }
803 
804 static void
805 adaoninvalidate(struct cam_periph *periph)
806 {
807 	struct ada_softc *softc;
808 
809 	softc = (struct ada_softc *)periph->softc;
810 
811 	/*
812 	 * De-register any async callbacks.
813 	 */
814 	xpt_register_async(0, adaasync, periph, periph->path);
815 
816 	softc->flags |= ADA_FLAG_PACK_INVALID;
817 
818 	/*
819 	 * Return all queued I/O with ENXIO.
820 	 * XXX Handle any transactions queued to the card
821 	 *     with XPT_ABORT_CCB.
822 	 */
823 	bioq_flush(&softc->bio_queue, NULL, ENXIO);
824 	bioq_flush(&softc->trim_queue, NULL, ENXIO);
825 
826 	disk_gone(softc->disk);
827 	xpt_print(periph->path, "lost device\n");
828 }
829 
830 static void
831 adacleanup(struct cam_periph *periph)
832 {
833 	struct ada_softc *softc;
834 
835 	softc = (struct ada_softc *)periph->softc;
836 
837 	xpt_print(periph->path, "removing device entry\n");
838 	cam_periph_unlock(periph);
839 
840 	/*
841 	 * If we can't free the sysctl tree, oh well...
842 	 */
843 	if ((softc->flags & ADA_FLAG_SCTX_INIT) != 0
844 	    && sysctl_ctx_free(&softc->sysctl_ctx) != 0) {
845 		xpt_print(periph->path, "can't remove sysctl context\n");
846 	}
847 
848 	disk_destroy(softc->disk);
849 	callout_drain(&softc->sendordered_c);
850 	free(softc, M_DEVBUF);
851 	cam_periph_lock(periph);
852 }
853 
854 static void
855 adaasync(void *callback_arg, u_int32_t code,
856 	struct cam_path *path, void *arg)
857 {
858 	struct ccb_getdev cgd;
859 	struct cam_periph *periph;
860 	struct ada_softc *softc;
861 
862 	periph = (struct cam_periph *)callback_arg;
863 	switch (code) {
864 	case AC_FOUND_DEVICE:
865 	{
866 		struct ccb_getdev *cgd;
867 		cam_status status;
868 
869 		cgd = (struct ccb_getdev *)arg;
870 		if (cgd == NULL)
871 			break;
872 
873 		if (cgd->protocol != PROTO_ATA)
874 			break;
875 
876 		/*
877 		 * Allocate a peripheral instance for
878 		 * this device and start the probe
879 		 * process.
880 		 */
881 		status = cam_periph_alloc(adaregister, adaoninvalidate,
882 					  adacleanup, adastart,
883 					  "ada", CAM_PERIPH_BIO,
884 					  cgd->ccb_h.path, adaasync,
885 					  AC_FOUND_DEVICE, cgd);
886 
887 		if (status != CAM_REQ_CMP
888 		 && status != CAM_REQ_INPROG)
889 			printf("adaasync: Unable to attach to new device "
890 				"due to status 0x%x\n", status);
891 		break;
892 	}
893 	case AC_GETDEV_CHANGED:
894 	{
895 		softc = (struct ada_softc *)periph->softc;
896 		xpt_setup_ccb(&cgd.ccb_h, periph->path, CAM_PRIORITY_NORMAL);
897 		cgd.ccb_h.func_code = XPT_GDEV_TYPE;
898 		xpt_action((union ccb *)&cgd);
899 
900 		if ((cgd.ident_data.capabilities1 & ATA_SUPPORT_DMA) &&
901 		    (cgd.inq_flags & SID_DMA))
902 			softc->flags |= ADA_FLAG_CAN_DMA;
903 		else
904 			softc->flags &= ~ADA_FLAG_CAN_DMA;
905 		if (cgd.ident_data.support.command2 & ATA_SUPPORT_ADDRESS48) {
906 			softc->flags |= ADA_FLAG_CAN_48BIT;
907 			if (cgd.inq_flags & SID_DMA48)
908 				softc->flags |= ADA_FLAG_CAN_DMA48;
909 			else
910 				softc->flags &= ~ADA_FLAG_CAN_DMA48;
911 		} else
912 			softc->flags &= ~(ADA_FLAG_CAN_48BIT |
913 			    ADA_FLAG_CAN_DMA48);
914 		if ((cgd.ident_data.satacapabilities & ATA_SUPPORT_NCQ) &&
915 		    (cgd.inq_flags & SID_DMA) && (cgd.inq_flags & SID_CmdQue))
916 			softc->flags |= ADA_FLAG_CAN_NCQ;
917 		else
918 			softc->flags &= ~ADA_FLAG_CAN_NCQ;
919 		if ((cgd.ident_data.support_dsm & ATA_SUPPORT_DSM_TRIM) &&
920 		    (cgd.inq_flags & SID_DMA))
921 			softc->flags |= ADA_FLAG_CAN_TRIM;
922 		else
923 			softc->flags &= ~ADA_FLAG_CAN_TRIM;
924 
925 		cam_periph_async(periph, code, path, arg);
926 		break;
927 	}
928 	case AC_ADVINFO_CHANGED:
929 	{
930 		uintptr_t buftype;
931 
932 		buftype = (uintptr_t)arg;
933 		if (buftype == CDAI_TYPE_PHYS_PATH) {
934 			struct ada_softc *softc;
935 
936 			softc = periph->softc;
937 			disk_attr_changed(softc->disk, "GEOM::physpath",
938 					  M_NOWAIT);
939 		}
940 		break;
941 	}
942 	case AC_SENT_BDR:
943 	case AC_BUS_RESET:
944 	{
945 		softc = (struct ada_softc *)periph->softc;
946 		cam_periph_async(periph, code, path, arg);
947 		if (softc->state != ADA_STATE_NORMAL)
948 			break;
949 		xpt_setup_ccb(&cgd.ccb_h, periph->path, CAM_PRIORITY_NORMAL);
950 		cgd.ccb_h.func_code = XPT_GDEV_TYPE;
951 		xpt_action((union ccb *)&cgd);
952 		if (ADA_RA >= 0 &&
953 		    cgd.ident_data.support.command1 & ATA_SUPPORT_LOOKAHEAD)
954 			softc->state = ADA_STATE_RAHEAD;
955 		else if (ADA_WC >= 0 &&
956 		    cgd.ident_data.support.command1 & ATA_SUPPORT_WRITECACHE)
957 			softc->state = ADA_STATE_WCACHE;
958 		else
959 		    break;
960 		cam_periph_acquire(periph);
961 		cam_freeze_devq_arg(periph->path,
962 		    RELSIM_RELEASE_RUNLEVEL, CAM_RL_DEV + 1);
963 		xpt_schedule(periph, CAM_PRIORITY_DEV);
964 	}
965 	default:
966 		cam_periph_async(periph, code, path, arg);
967 		break;
968 	}
969 }
970 
971 static void
972 adasysctlinit(void *context, int pending)
973 {
974 	struct cam_periph *periph;
975 	struct ada_softc *softc;
976 	char tmpstr[80], tmpstr2[80];
977 
978 	periph = (struct cam_periph *)context;
979 
980 	/* periph was held for us when this task was enqueued */
981 	if (periph->flags & CAM_PERIPH_INVALID) {
982 		cam_periph_release(periph);
983 		return;
984 	}
985 
986 	softc = (struct ada_softc *)periph->softc;
987 	snprintf(tmpstr, sizeof(tmpstr), "CAM ADA unit %d", periph->unit_number);
988 	snprintf(tmpstr2, sizeof(tmpstr2), "%d", periph->unit_number);
989 
990 	sysctl_ctx_init(&softc->sysctl_ctx);
991 	softc->flags |= ADA_FLAG_SCTX_INIT;
992 	softc->sysctl_tree = SYSCTL_ADD_NODE(&softc->sysctl_ctx,
993 		SYSCTL_STATIC_CHILDREN(_kern_cam_ada), OID_AUTO, tmpstr2,
994 		CTLFLAG_RD, 0, tmpstr);
995 	if (softc->sysctl_tree == NULL) {
996 		printf("adasysctlinit: unable to allocate sysctl tree\n");
997 		cam_periph_release(periph);
998 		return;
999 	}
1000 
1001 	SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
1002 		OID_AUTO, "read_ahead", CTLFLAG_RW | CTLFLAG_MPSAFE,
1003 		&softc->read_ahead, 0, "Enable disk read ahead.");
1004 	SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
1005 		OID_AUTO, "write_cache", CTLFLAG_RW | CTLFLAG_MPSAFE,
1006 		&softc->write_cache, 0, "Enable disk write cache.");
1007 	SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
1008 		OID_AUTO, "sort_io_queue", CTLFLAG_RW | CTLFLAG_MPSAFE,
1009 		&softc->sort_io_queue, 0,
1010 		"Sort IO queue to try and optimise disk access patterns");
1011 #ifdef ADA_TEST_FAILURE
1012 	/*
1013 	 * Add a 'door bell' sysctl which allows one to set it from userland
1014 	 * and cause something bad to happen.  For the moment, we only allow
1015 	 * whacking the next read or write.
1016 	 */
1017 	SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
1018 		OID_AUTO, "force_read_error", CTLFLAG_RW | CTLFLAG_MPSAFE,
1019 		&softc->force_read_error, 0,
1020 		"Force a read error for the next N reads.");
1021 	SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
1022 		OID_AUTO, "force_write_error", CTLFLAG_RW | CTLFLAG_MPSAFE,
1023 		&softc->force_write_error, 0,
1024 		"Force a write error for the next N writes.");
1025 	SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
1026 		OID_AUTO, "periodic_read_error", CTLFLAG_RW | CTLFLAG_MPSAFE,
1027 		&softc->periodic_read_error, 0,
1028 		"Force a read error every N reads (don't set too low).");
1029 #endif
1030 	cam_periph_release(periph);
1031 }
1032 
1033 static int
1034 adagetattr(struct bio *bp)
1035 {
1036 	int ret;
1037 	struct cam_periph *periph;
1038 
1039 	periph = (struct cam_periph *)bp->bio_disk->d_drv1;
1040 	cam_periph_lock(periph);
1041 	ret = xpt_getattr(bp->bio_data, bp->bio_length, bp->bio_attribute,
1042 	    periph->path);
1043 	cam_periph_unlock(periph);
1044 	if (ret == 0)
1045 		bp->bio_completed = bp->bio_length;
1046 	return ret;
1047 }
1048 
1049 static cam_status
1050 adaregister(struct cam_periph *periph, void *arg)
1051 {
1052 	struct ada_softc *softc;
1053 	struct ccb_pathinq cpi;
1054 	struct ccb_getdev *cgd;
1055 	char   announce_buf[80], buf1[32];
1056 	struct disk_params *dp;
1057 	caddr_t match;
1058 	u_int maxio;
1059 	int legacy_id, quirks;
1060 
1061 	cgd = (struct ccb_getdev *)arg;
1062 	if (cgd == NULL) {
1063 		printf("adaregister: no getdev CCB, can't register device\n");
1064 		return(CAM_REQ_CMP_ERR);
1065 	}
1066 
1067 	softc = (struct ada_softc *)malloc(sizeof(*softc), M_DEVBUF,
1068 	    M_NOWAIT|M_ZERO);
1069 
1070 	if (softc == NULL) {
1071 		printf("adaregister: Unable to probe new device. "
1072 		    "Unable to allocate softc\n");
1073 		return(CAM_REQ_CMP_ERR);
1074 	}
1075 
1076 	bioq_init(&softc->bio_queue);
1077 	bioq_init(&softc->trim_queue);
1078 
1079 	if ((cgd->ident_data.capabilities1 & ATA_SUPPORT_DMA) &&
1080 	    (cgd->inq_flags & SID_DMA))
1081 		softc->flags |= ADA_FLAG_CAN_DMA;
1082 	if (cgd->ident_data.support.command2 & ATA_SUPPORT_ADDRESS48) {
1083 		softc->flags |= ADA_FLAG_CAN_48BIT;
1084 		if (cgd->inq_flags & SID_DMA48)
1085 			softc->flags |= ADA_FLAG_CAN_DMA48;
1086 	}
1087 	if (cgd->ident_data.support.command2 & ATA_SUPPORT_FLUSHCACHE)
1088 		softc->flags |= ADA_FLAG_CAN_FLUSHCACHE;
1089 	if (cgd->ident_data.support.command1 & ATA_SUPPORT_POWERMGT)
1090 		softc->flags |= ADA_FLAG_CAN_POWERMGT;
1091 	if ((cgd->ident_data.satacapabilities & ATA_SUPPORT_NCQ) &&
1092 	    (cgd->inq_flags & SID_DMA) && (cgd->inq_flags & SID_CmdQue))
1093 		softc->flags |= ADA_FLAG_CAN_NCQ;
1094 	if ((cgd->ident_data.support_dsm & ATA_SUPPORT_DSM_TRIM) &&
1095 	    (cgd->inq_flags & SID_DMA)) {
1096 		softc->flags |= ADA_FLAG_CAN_TRIM;
1097 		softc->trim_max_ranges = TRIM_MAX_RANGES;
1098 		if (cgd->ident_data.max_dsm_blocks != 0) {
1099 			softc->trim_max_ranges =
1100 			    min(cgd->ident_data.max_dsm_blocks * 64,
1101 				softc->trim_max_ranges);
1102 		}
1103 	}
1104 	if (cgd->ident_data.support.command2 & ATA_SUPPORT_CFA)
1105 		softc->flags |= ADA_FLAG_CAN_CFA;
1106 
1107 	periph->softc = softc;
1108 
1109 	/*
1110 	 * See if this device has any quirks.
1111 	 */
1112 	match = cam_quirkmatch((caddr_t)&cgd->ident_data,
1113 			       (caddr_t)ada_quirk_table,
1114 			       sizeof(ada_quirk_table)/sizeof(*ada_quirk_table),
1115 			       sizeof(*ada_quirk_table), ata_identify_match);
1116 	if (match != NULL)
1117 		softc->quirks = ((struct ada_quirk_entry *)match)->quirks;
1118 	else
1119 		softc->quirks = ADA_Q_NONE;
1120 
1121 	bzero(&cpi, sizeof(cpi));
1122 	xpt_setup_ccb(&cpi.ccb_h, periph->path, CAM_PRIORITY_NONE);
1123 	cpi.ccb_h.func_code = XPT_PATH_INQ;
1124 	xpt_action((union ccb *)&cpi);
1125 
1126 	TASK_INIT(&softc->sysctl_task, 0, adasysctlinit, periph);
1127 
1128 	/*
1129 	 * Register this media as a disk
1130 	 */
1131 	(void)cam_periph_hold(periph, PRIBIO);
1132 	cam_periph_unlock(periph);
1133 	snprintf(announce_buf, sizeof(announce_buf),
1134 	    "kern.cam.ada.%d.quirks", periph->unit_number);
1135 	quirks = softc->quirks;
1136 	TUNABLE_INT_FETCH(announce_buf, &quirks);
1137 	softc->quirks = quirks;
1138 	softc->read_ahead = -1;
1139 	snprintf(announce_buf, sizeof(announce_buf),
1140 	    "kern.cam.ada.%d.read_ahead", periph->unit_number);
1141 	TUNABLE_INT_FETCH(announce_buf, &softc->read_ahead);
1142 	softc->write_cache = -1;
1143 	snprintf(announce_buf, sizeof(announce_buf),
1144 	    "kern.cam.ada.%d.write_cache", periph->unit_number);
1145 	TUNABLE_INT_FETCH(announce_buf, &softc->write_cache);
1146 	softc->sort_io_queue = -1;
1147 	adagetparams(periph, cgd);
1148 	softc->disk = disk_alloc();
1149 	softc->disk->d_devstat = devstat_new_entry(periph->periph_name,
1150 			  periph->unit_number, softc->params.secsize,
1151 			  DEVSTAT_ALL_SUPPORTED,
1152 			  DEVSTAT_TYPE_DIRECT |
1153 			  XPORT_DEVSTAT_TYPE(cpi.transport),
1154 			  DEVSTAT_PRIORITY_DISK);
1155 	softc->disk->d_open = adaopen;
1156 	softc->disk->d_close = adaclose;
1157 	softc->disk->d_strategy = adastrategy;
1158 	softc->disk->d_getattr = adagetattr;
1159 	softc->disk->d_dump = adadump;
1160 	softc->disk->d_name = "ada";
1161 	softc->disk->d_drv1 = periph;
1162 	maxio = cpi.maxio;		/* Honor max I/O size of SIM */
1163 	if (maxio == 0)
1164 		maxio = DFLTPHYS;	/* traditional default */
1165 	else if (maxio > MAXPHYS)
1166 		maxio = MAXPHYS;	/* for safety */
1167 	if (softc->flags & ADA_FLAG_CAN_48BIT)
1168 		maxio = min(maxio, 65536 * softc->params.secsize);
1169 	else					/* 28bit ATA command limit */
1170 		maxio = min(maxio, 256 * softc->params.secsize);
1171 	softc->disk->d_maxsize = maxio;
1172 	softc->disk->d_unit = periph->unit_number;
1173 	softc->disk->d_flags = 0;
1174 	if (softc->flags & ADA_FLAG_CAN_FLUSHCACHE)
1175 		softc->disk->d_flags |= DISKFLAG_CANFLUSHCACHE;
1176 	if ((softc->flags & ADA_FLAG_CAN_TRIM) ||
1177 	    ((softc->flags & ADA_FLAG_CAN_CFA) &&
1178 	    !(softc->flags & ADA_FLAG_CAN_48BIT)))
1179 		softc->disk->d_flags |= DISKFLAG_CANDELETE;
1180 	if ((cpi.hba_misc & PIM_UNMAPPED) != 0)
1181 		softc->disk->d_flags |= DISKFLAG_UNMAPPED_BIO;
1182 	strlcpy(softc->disk->d_descr, cgd->ident_data.model,
1183 	    MIN(sizeof(softc->disk->d_descr), sizeof(cgd->ident_data.model)));
1184 	strlcpy(softc->disk->d_ident, cgd->ident_data.serial,
1185 	    MIN(sizeof(softc->disk->d_ident), sizeof(cgd->ident_data.serial)));
1186 	softc->disk->d_hba_vendor = cpi.hba_vendor;
1187 	softc->disk->d_hba_device = cpi.hba_device;
1188 	softc->disk->d_hba_subvendor = cpi.hba_subvendor;
1189 	softc->disk->d_hba_subdevice = cpi.hba_subdevice;
1190 
1191 	softc->disk->d_sectorsize = softc->params.secsize;
1192 	softc->disk->d_mediasize = (off_t)softc->params.sectors *
1193 	    softc->params.secsize;
1194 	if (ata_physical_sector_size(&cgd->ident_data) !=
1195 	    softc->params.secsize) {
1196 		softc->disk->d_stripesize =
1197 		    ata_physical_sector_size(&cgd->ident_data);
1198 		softc->disk->d_stripeoffset = (softc->disk->d_stripesize -
1199 		    ata_logical_sector_offset(&cgd->ident_data)) %
1200 		    softc->disk->d_stripesize;
1201 	} else if (softc->quirks & ADA_Q_4K) {
1202 		softc->disk->d_stripesize = 4096;
1203 		softc->disk->d_stripeoffset = 0;
1204 	}
1205 	softc->disk->d_fwsectors = softc->params.secs_per_track;
1206 	softc->disk->d_fwheads = softc->params.heads;
1207 	ata_disk_firmware_geom_adjust(softc->disk);
1208 
1209 	if (ada_legacy_aliases) {
1210 #ifdef ATA_STATIC_ID
1211 		legacy_id = xpt_path_legacy_ata_id(periph->path);
1212 #else
1213 		legacy_id = softc->disk->d_unit;
1214 #endif
1215 		if (legacy_id >= 0) {
1216 			snprintf(announce_buf, sizeof(announce_buf),
1217 			    "kern.devalias.%s%d",
1218 			    softc->disk->d_name, softc->disk->d_unit);
1219 			snprintf(buf1, sizeof(buf1),
1220 			    "ad%d", legacy_id);
1221 			setenv(announce_buf, buf1);
1222 		}
1223 	} else
1224 		legacy_id = -1;
1225 	disk_create(softc->disk, DISK_VERSION);
1226 	cam_periph_lock(periph);
1227 	cam_periph_unhold(periph);
1228 
1229 	dp = &softc->params;
1230 	snprintf(announce_buf, sizeof(announce_buf),
1231 		"%juMB (%ju %u byte sectors: %dH %dS/T %dC)",
1232 		(uintmax_t)(((uintmax_t)dp->secsize *
1233 		dp->sectors) / (1024*1024)),
1234 		(uintmax_t)dp->sectors,
1235 		dp->secsize, dp->heads,
1236 		dp->secs_per_track, dp->cylinders);
1237 	xpt_announce_periph(periph, announce_buf);
1238 	if (legacy_id >= 0)
1239 		printf("%s%d: Previously was known as ad%d\n",
1240 		       periph->periph_name, periph->unit_number, legacy_id);
1241 
1242 	/*
1243 	 * Create our sysctl variables, now that we know
1244 	 * we have successfully attached.
1245 	 */
1246 	cam_periph_acquire(periph);
1247 	taskqueue_enqueue(taskqueue_thread, &softc->sysctl_task);
1248 
1249 	/*
1250 	 * Add async callbacks for bus reset and
1251 	 * bus device reset calls.  I don't bother
1252 	 * checking if this fails as, in most cases,
1253 	 * the system will function just fine without
1254 	 * them and the only alternative would be to
1255 	 * not attach the device on failure.
1256 	 */
1257 	xpt_register_async(AC_SENT_BDR | AC_BUS_RESET | AC_LOST_DEVICE |
1258 	    AC_GETDEV_CHANGED | AC_ADVINFO_CHANGED,
1259 	    adaasync, periph, periph->path);
1260 
1261 	/*
1262 	 * Schedule a periodic event to occasionally send an
1263 	 * ordered tag to a device.
1264 	 */
1265 	callout_init_mtx(&softc->sendordered_c, periph->sim->mtx, 0);
1266 	callout_reset(&softc->sendordered_c,
1267 	    (ada_default_timeout * hz) / ADA_ORDEREDTAG_INTERVAL,
1268 	    adasendorderedtag, softc);
1269 
1270 	if (ADA_RA >= 0 &&
1271 	    cgd->ident_data.support.command1 & ATA_SUPPORT_LOOKAHEAD) {
1272 		softc->state = ADA_STATE_RAHEAD;
1273 		cam_periph_acquire(periph);
1274 		cam_freeze_devq_arg(periph->path,
1275 		    RELSIM_RELEASE_RUNLEVEL, CAM_RL_DEV + 1);
1276 		xpt_schedule(periph, CAM_PRIORITY_DEV);
1277 	} else if (ADA_WC >= 0 &&
1278 	    cgd->ident_data.support.command1 & ATA_SUPPORT_WRITECACHE) {
1279 		softc->state = ADA_STATE_WCACHE;
1280 		cam_periph_acquire(periph);
1281 		cam_freeze_devq_arg(periph->path,
1282 		    RELSIM_RELEASE_RUNLEVEL, CAM_RL_DEV + 1);
1283 		xpt_schedule(periph, CAM_PRIORITY_DEV);
1284 	} else
1285 		softc->state = ADA_STATE_NORMAL;
1286 
1287 	return(CAM_REQ_CMP);
1288 }
1289 
1290 static void
1291 adastart(struct cam_periph *periph, union ccb *start_ccb)
1292 {
1293 	struct ada_softc *softc = (struct ada_softc *)periph->softc;
1294 	struct ccb_ataio *ataio = &start_ccb->ataio;
1295 
1296 	CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("adastart\n"));
1297 
1298 	switch (softc->state) {
1299 	case ADA_STATE_NORMAL:
1300 	{
1301 		struct bio *bp;
1302 		u_int8_t tag_code;
1303 
1304 		/* Execute immediate CCB if waiting. */
1305 		if (periph->immediate_priority <= periph->pinfo.priority) {
1306 			CAM_DEBUG(periph->path, CAM_DEBUG_SUBTRACE,
1307 					("queuing for immediate ccb\n"));
1308 			start_ccb->ccb_h.ccb_state = ADA_CCB_WAITING;
1309 			SLIST_INSERT_HEAD(&periph->ccb_list, &start_ccb->ccb_h,
1310 					  periph_links.sle);
1311 			periph->immediate_priority = CAM_PRIORITY_NONE;
1312 			wakeup(&periph->ccb_list);
1313 			/* Have more work to do, so ensure we stay scheduled */
1314 			adaschedule(periph);
1315 			break;
1316 		}
1317 		/* Run TRIM if not running yet. */
1318 		if (!softc->trim_running &&
1319 		    (bp = bioq_first(&softc->trim_queue)) != 0) {
1320 			struct trim_request *req = &softc->trim_req;
1321 			struct bio *bp1;
1322 			uint64_t lastlba = (uint64_t)-1;
1323 			int bps = 0, c, lastcount = 0, off, ranges = 0;
1324 
1325 			softc->trim_running = 1;
1326 			bzero(req, sizeof(*req));
1327 			bp1 = bp;
1328 			do {
1329 				uint64_t lba = bp1->bio_pblkno;
1330 				int count = bp1->bio_bcount /
1331 				    softc->params.secsize;
1332 
1333 				bioq_remove(&softc->trim_queue, bp1);
1334 
1335 				/* Try to extend the previous range. */
1336 				if (lba == lastlba) {
1337 					c = min(count, 0xffff - lastcount);
1338 					lastcount += c;
1339 					off = (ranges - 1) * 8;
1340 					req->data[off + 6] = lastcount & 0xff;
1341 					req->data[off + 7] =
1342 					    (lastcount >> 8) & 0xff;
1343 					count -= c;
1344 					lba += c;
1345 				}
1346 
1347 				while (count > 0) {
1348 					c = min(count, 0xffff);
1349 					off = ranges * 8;
1350 					req->data[off + 0] = lba & 0xff;
1351 					req->data[off + 1] = (lba >> 8) & 0xff;
1352 					req->data[off + 2] = (lba >> 16) & 0xff;
1353 					req->data[off + 3] = (lba >> 24) & 0xff;
1354 					req->data[off + 4] = (lba >> 32) & 0xff;
1355 					req->data[off + 5] = (lba >> 40) & 0xff;
1356 					req->data[off + 6] = c & 0xff;
1357 					req->data[off + 7] = (c >> 8) & 0xff;
1358 					lba += c;
1359 					count -= c;
1360 					lastcount = c;
1361 					ranges++;
1362 				}
1363 				lastlba = lba;
1364 				req->bps[bps++] = bp1;
1365 				bp1 = bioq_first(&softc->trim_queue);
1366 				if (bps >= TRIM_MAX_BIOS ||
1367 				    bp1 == NULL ||
1368 				    bp1->bio_bcount / softc->params.secsize >
1369 				    (softc->trim_max_ranges - ranges) * 0xffff)
1370 					break;
1371 			} while (1);
1372 			cam_fill_ataio(ataio,
1373 			    ada_retry_count,
1374 			    adadone,
1375 			    CAM_DIR_OUT,
1376 			    0,
1377 			    req->data,
1378 			    ((ranges + 63) / 64) * 512,
1379 			    ada_default_timeout * 1000);
1380 			ata_48bit_cmd(ataio, ATA_DATA_SET_MANAGEMENT,
1381 			    ATA_DSM_TRIM, 0, (ranges + 63) / 64);
1382 			start_ccb->ccb_h.ccb_state = ADA_CCB_TRIM;
1383 			goto out;
1384 		}
1385 		/* Run regular command. */
1386 		bp = bioq_first(&softc->bio_queue);
1387 		if (bp == NULL) {
1388 			xpt_release_ccb(start_ccb);
1389 			break;
1390 		}
1391 		bioq_remove(&softc->bio_queue, bp);
1392 
1393 		if ((bp->bio_flags & BIO_ORDERED) != 0
1394 		 || (softc->flags & ADA_FLAG_NEED_OTAG) != 0) {
1395 			softc->flags &= ~ADA_FLAG_NEED_OTAG;
1396 			softc->ordered_tag_count++;
1397 			tag_code = 0;
1398 		} else {
1399 			tag_code = 1;
1400 		}
1401 		switch (bp->bio_cmd) {
1402 		case BIO_READ:
1403 		case BIO_WRITE:
1404 		{
1405 			uint64_t lba = bp->bio_pblkno;
1406 			uint16_t count = bp->bio_bcount / softc->params.secsize;
1407 #ifdef ADA_TEST_FAILURE
1408 			int fail = 0;
1409 
1410 			/*
1411 			 * Support the failure ioctls.  If the command is a
1412 			 * read, and there are pending forced read errors, or
1413 			 * if a write and pending write errors, then fail this
1414 			 * operation with EIO.  This is useful for testing
1415 			 * purposes.  Also, support having every Nth read fail.
1416 			 *
1417 			 * This is a rather blunt tool.
1418 			 */
1419 			if (bp->bio_cmd == BIO_READ) {
1420 				if (softc->force_read_error) {
1421 					softc->force_read_error--;
1422 					fail = 1;
1423 				}
1424 				if (softc->periodic_read_error > 0) {
1425 					if (++softc->periodic_read_count >=
1426 					    softc->periodic_read_error) {
1427 						softc->periodic_read_count = 0;
1428 						fail = 1;
1429 					}
1430 				}
1431 			} else {
1432 				if (softc->force_write_error) {
1433 					softc->force_write_error--;
1434 					fail = 1;
1435 				}
1436 			}
1437 			if (fail) {
1438 				bp->bio_error = EIO;
1439 				bp->bio_flags |= BIO_ERROR;
1440 				biodone(bp);
1441 				xpt_release_ccb(start_ccb);
1442 				adaschedule(periph);
1443 				return;
1444 			}
1445 #endif
1446 			KASSERT((bp->bio_flags & BIO_UNMAPPED) == 0 ||
1447 			    round_page(bp->bio_bcount + bp->bio_ma_offset) /
1448 			    PAGE_SIZE == bp->bio_ma_n,
1449 			    ("Short bio %p", bp));
1450 			cam_fill_ataio(ataio,
1451 			    ada_retry_count,
1452 			    adadone,
1453 			    (bp->bio_cmd == BIO_READ ? CAM_DIR_IN :
1454 				CAM_DIR_OUT) | ((bp->bio_flags & BIO_UNMAPPED)
1455 				!= 0 ? CAM_DATA_BIO : 0),
1456 			    tag_code,
1457 			    ((bp->bio_flags & BIO_UNMAPPED) != 0) ? (void *)bp :
1458 				bp->bio_data,
1459 			    bp->bio_bcount,
1460 			    ada_default_timeout*1000);
1461 
1462 			if ((softc->flags & ADA_FLAG_CAN_NCQ) && tag_code) {
1463 				if (bp->bio_cmd == BIO_READ) {
1464 					ata_ncq_cmd(ataio, ATA_READ_FPDMA_QUEUED,
1465 					    lba, count);
1466 				} else {
1467 					ata_ncq_cmd(ataio, ATA_WRITE_FPDMA_QUEUED,
1468 					    lba, count);
1469 				}
1470 			} else if ((softc->flags & ADA_FLAG_CAN_48BIT) &&
1471 			    (lba + count >= ATA_MAX_28BIT_LBA ||
1472 			    count > 256)) {
1473 				if (softc->flags & ADA_FLAG_CAN_DMA48) {
1474 					if (bp->bio_cmd == BIO_READ) {
1475 						ata_48bit_cmd(ataio, ATA_READ_DMA48,
1476 						    0, lba, count);
1477 					} else {
1478 						ata_48bit_cmd(ataio, ATA_WRITE_DMA48,
1479 						    0, lba, count);
1480 					}
1481 				} else {
1482 					if (bp->bio_cmd == BIO_READ) {
1483 						ata_48bit_cmd(ataio, ATA_READ_MUL48,
1484 						    0, lba, count);
1485 					} else {
1486 						ata_48bit_cmd(ataio, ATA_WRITE_MUL48,
1487 						    0, lba, count);
1488 					}
1489 				}
1490 			} else {
1491 				if (count == 256)
1492 					count = 0;
1493 				if (softc->flags & ADA_FLAG_CAN_DMA) {
1494 					if (bp->bio_cmd == BIO_READ) {
1495 						ata_28bit_cmd(ataio, ATA_READ_DMA,
1496 						    0, lba, count);
1497 					} else {
1498 						ata_28bit_cmd(ataio, ATA_WRITE_DMA,
1499 						    0, lba, count);
1500 					}
1501 				} else {
1502 					if (bp->bio_cmd == BIO_READ) {
1503 						ata_28bit_cmd(ataio, ATA_READ_MUL,
1504 						    0, lba, count);
1505 					} else {
1506 						ata_28bit_cmd(ataio, ATA_WRITE_MUL,
1507 						    0, lba, count);
1508 					}
1509 				}
1510 			}
1511 			break;
1512 		}
1513 		case BIO_DELETE:
1514 		{
1515 			uint64_t lba = bp->bio_pblkno;
1516 			uint16_t count = bp->bio_bcount / softc->params.secsize;
1517 
1518 			cam_fill_ataio(ataio,
1519 			    ada_retry_count,
1520 			    adadone,
1521 			    CAM_DIR_NONE,
1522 			    0,
1523 			    NULL,
1524 			    0,
1525 			    ada_default_timeout*1000);
1526 
1527 			if (count >= 256)
1528 				count = 0;
1529 			ata_28bit_cmd(ataio, ATA_CFA_ERASE, 0, lba, count);
1530 			break;
1531 		}
1532 		case BIO_FLUSH:
1533 			cam_fill_ataio(ataio,
1534 			    1,
1535 			    adadone,
1536 			    CAM_DIR_NONE,
1537 			    0,
1538 			    NULL,
1539 			    0,
1540 			    ada_default_timeout*1000);
1541 
1542 			if (softc->flags & ADA_FLAG_CAN_48BIT)
1543 				ata_48bit_cmd(ataio, ATA_FLUSHCACHE48, 0, 0, 0);
1544 			else
1545 				ata_28bit_cmd(ataio, ATA_FLUSHCACHE, 0, 0, 0);
1546 			break;
1547 		}
1548 		start_ccb->ccb_h.ccb_state = ADA_CCB_BUFFER_IO;
1549 out:
1550 		start_ccb->ccb_h.ccb_bp = bp;
1551 		softc->outstanding_cmds++;
1552 		xpt_action(start_ccb);
1553 
1554 		/* May have more work to do, so ensure we stay scheduled */
1555 		adaschedule(periph);
1556 		break;
1557 	}
1558 	case ADA_STATE_RAHEAD:
1559 	case ADA_STATE_WCACHE:
1560 	{
1561 		if (softc->flags & ADA_FLAG_PACK_INVALID) {
1562 			softc->state = ADA_STATE_NORMAL;
1563 			xpt_release_ccb(start_ccb);
1564 			cam_release_devq(periph->path,
1565 			    RELSIM_RELEASE_RUNLEVEL, 0, CAM_RL_DEV + 1, FALSE);
1566 			adaschedule(periph);
1567 			cam_periph_release_locked(periph);
1568 			return;
1569 		}
1570 
1571 		cam_fill_ataio(ataio,
1572 		    1,
1573 		    adadone,
1574 		    CAM_DIR_NONE,
1575 		    0,
1576 		    NULL,
1577 		    0,
1578 		    ada_default_timeout*1000);
1579 
1580 		if (softc->state == ADA_STATE_RAHEAD) {
1581 			ata_28bit_cmd(ataio, ATA_SETFEATURES, ADA_RA ?
1582 			    ATA_SF_ENAB_RCACHE : ATA_SF_DIS_RCACHE, 0, 0);
1583 			start_ccb->ccb_h.ccb_state = ADA_CCB_RAHEAD;
1584 		} else {
1585 			ata_28bit_cmd(ataio, ATA_SETFEATURES, ADA_WC ?
1586 			    ATA_SF_ENAB_WCACHE : ATA_SF_DIS_WCACHE, 0, 0);
1587 			start_ccb->ccb_h.ccb_state = ADA_CCB_WCACHE;
1588 		}
1589 		xpt_action(start_ccb);
1590 		break;
1591 	}
1592 	}
1593 }
1594 
1595 static void
1596 adadone(struct cam_periph *periph, union ccb *done_ccb)
1597 {
1598 	struct ada_softc *softc;
1599 	struct ccb_ataio *ataio;
1600 	struct ccb_getdev *cgd;
1601 
1602 	softc = (struct ada_softc *)periph->softc;
1603 	ataio = &done_ccb->ataio;
1604 
1605 	CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("adadone\n"));
1606 
1607 	switch (ataio->ccb_h.ccb_state & ADA_CCB_TYPE_MASK) {
1608 	case ADA_CCB_BUFFER_IO:
1609 	case ADA_CCB_TRIM:
1610 	{
1611 		struct bio *bp;
1612 
1613 		bp = (struct bio *)done_ccb->ccb_h.ccb_bp;
1614 		if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
1615 			int error;
1616 
1617 			error = adaerror(done_ccb, 0, 0);
1618 			if (error == ERESTART) {
1619 				/* A retry was scheduled, so just return. */
1620 				return;
1621 			}
1622 			if (error != 0) {
1623 				if (error == ENXIO &&
1624 				    (softc->flags & ADA_FLAG_PACK_INVALID) == 0) {
1625 					/*
1626 					 * Catastrophic error.  Mark our pack as
1627 					 * invalid.
1628 					 */
1629 					/*
1630 					 * XXX See if this is really a media
1631 					 * XXX change first?
1632 					 */
1633 					xpt_print(periph->path,
1634 					    "Invalidating pack\n");
1635 					softc->flags |= ADA_FLAG_PACK_INVALID;
1636 				}
1637 				bp->bio_error = error;
1638 				bp->bio_resid = bp->bio_bcount;
1639 				bp->bio_flags |= BIO_ERROR;
1640 			} else {
1641 				bp->bio_resid = ataio->resid;
1642 				bp->bio_error = 0;
1643 				if (bp->bio_resid != 0)
1644 					bp->bio_flags |= BIO_ERROR;
1645 			}
1646 			if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
1647 				cam_release_devq(done_ccb->ccb_h.path,
1648 						 /*relsim_flags*/0,
1649 						 /*reduction*/0,
1650 						 /*timeout*/0,
1651 						 /*getcount_only*/0);
1652 		} else {
1653 			if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
1654 				panic("REQ_CMP with QFRZN");
1655 			bp->bio_resid = ataio->resid;
1656 			if (ataio->resid > 0)
1657 				bp->bio_flags |= BIO_ERROR;
1658 		}
1659 		softc->outstanding_cmds--;
1660 		if (softc->outstanding_cmds == 0)
1661 			softc->flags |= ADA_FLAG_WENT_IDLE;
1662 		if ((ataio->ccb_h.ccb_state & ADA_CCB_TYPE_MASK) ==
1663 		    ADA_CCB_TRIM) {
1664 			struct trim_request *req =
1665 			    (struct trim_request *)ataio->data_ptr;
1666 			int i;
1667 
1668 			for (i = 1; i < TRIM_MAX_BIOS && req->bps[i]; i++) {
1669 				struct bio *bp1 = req->bps[i];
1670 
1671 				bp1->bio_resid = bp->bio_resid;
1672 				bp1->bio_error = bp->bio_error;
1673 				if (bp->bio_flags & BIO_ERROR)
1674 					bp1->bio_flags |= BIO_ERROR;
1675 				biodone(bp1);
1676 			}
1677 			softc->trim_running = 0;
1678 			biodone(bp);
1679 			adaschedule(periph);
1680 		} else
1681 			biodone(bp);
1682 		break;
1683 	}
1684 	case ADA_CCB_RAHEAD:
1685 	{
1686 		if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
1687 			if (adaerror(done_ccb, 0, 0) == ERESTART) {
1688 				return;
1689 			} else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
1690 				cam_release_devq(done_ccb->ccb_h.path,
1691 				    /*relsim_flags*/0,
1692 				    /*reduction*/0,
1693 				    /*timeout*/0,
1694 				    /*getcount_only*/0);
1695 			}
1696 		}
1697 
1698 		/*
1699 		 * Since our peripheral may be invalidated by an error
1700 		 * above or an external event, we must release our CCB
1701 		 * before releasing the reference on the peripheral.
1702 		 * The peripheral will only go away once the last reference
1703 		 * is removed, and we need it around for the CCB release
1704 		 * operation.
1705 		 */
1706 		cgd = (struct ccb_getdev *)done_ccb;
1707 		xpt_setup_ccb(&cgd->ccb_h, periph->path, CAM_PRIORITY_NORMAL);
1708 		cgd->ccb_h.func_code = XPT_GDEV_TYPE;
1709 		xpt_action((union ccb *)cgd);
1710 		if (ADA_WC >= 0 &&
1711 		    cgd->ident_data.support.command1 & ATA_SUPPORT_WRITECACHE) {
1712 			softc->state = ADA_STATE_WCACHE;
1713 			xpt_release_ccb(done_ccb);
1714 			xpt_schedule(periph, CAM_PRIORITY_DEV);
1715 			return;
1716 		}
1717 		softc->state = ADA_STATE_NORMAL;
1718 		xpt_release_ccb(done_ccb);
1719 		cam_release_devq(periph->path,
1720 		    RELSIM_RELEASE_RUNLEVEL, 0, CAM_RL_DEV + 1, FALSE);
1721 		adaschedule(periph);
1722 		cam_periph_release_locked(periph);
1723 		return;
1724 	}
1725 	case ADA_CCB_WCACHE:
1726 	{
1727 		if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
1728 			if (adaerror(done_ccb, 0, 0) == ERESTART) {
1729 				return;
1730 			} else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
1731 				cam_release_devq(done_ccb->ccb_h.path,
1732 				    /*relsim_flags*/0,
1733 				    /*reduction*/0,
1734 				    /*timeout*/0,
1735 				    /*getcount_only*/0);
1736 			}
1737 		}
1738 
1739 		softc->state = ADA_STATE_NORMAL;
1740 		/*
1741 		 * Since our peripheral may be invalidated by an error
1742 		 * above or an external event, we must release our CCB
1743 		 * before releasing the reference on the peripheral.
1744 		 * The peripheral will only go away once the last reference
1745 		 * is removed, and we need it around for the CCB release
1746 		 * operation.
1747 		 */
1748 		xpt_release_ccb(done_ccb);
1749 		cam_release_devq(periph->path,
1750 		    RELSIM_RELEASE_RUNLEVEL, 0, CAM_RL_DEV + 1, FALSE);
1751 		adaschedule(periph);
1752 		cam_periph_release_locked(periph);
1753 		return;
1754 	}
1755 	case ADA_CCB_WAITING:
1756 	{
1757 		/* Caller will release the CCB */
1758 		wakeup(&done_ccb->ccb_h.cbfcnp);
1759 		return;
1760 	}
1761 	case ADA_CCB_DUMP:
1762 		/* No-op.  We're polling */
1763 		return;
1764 	default:
1765 		break;
1766 	}
1767 	xpt_release_ccb(done_ccb);
1768 }
1769 
1770 static int
1771 adaerror(union ccb *ccb, u_int32_t cam_flags, u_int32_t sense_flags)
1772 {
1773 
1774 	return(cam_periph_error(ccb, cam_flags, sense_flags, NULL));
1775 }
1776 
1777 static void
1778 adagetparams(struct cam_periph *periph, struct ccb_getdev *cgd)
1779 {
1780 	struct ada_softc *softc = (struct ada_softc *)periph->softc;
1781 	struct disk_params *dp = &softc->params;
1782 	u_int64_t lbasize48;
1783 	u_int32_t lbasize;
1784 
1785 	dp->secsize = ata_logical_sector_size(&cgd->ident_data);
1786 	if ((cgd->ident_data.atavalid & ATA_FLAG_54_58) &&
1787 		cgd->ident_data.current_heads && cgd->ident_data.current_sectors) {
1788 		dp->heads = cgd->ident_data.current_heads;
1789 		dp->secs_per_track = cgd->ident_data.current_sectors;
1790 		dp->cylinders = cgd->ident_data.cylinders;
1791 		dp->sectors = (u_int32_t)cgd->ident_data.current_size_1 |
1792 			  ((u_int32_t)cgd->ident_data.current_size_2 << 16);
1793 	} else {
1794 		dp->heads = cgd->ident_data.heads;
1795 		dp->secs_per_track = cgd->ident_data.sectors;
1796 		dp->cylinders = cgd->ident_data.cylinders;
1797 		dp->sectors = cgd->ident_data.cylinders * dp->heads * dp->secs_per_track;
1798 	}
1799 	lbasize = (u_int32_t)cgd->ident_data.lba_size_1 |
1800 		  ((u_int32_t)cgd->ident_data.lba_size_2 << 16);
1801 
1802 	/* use the 28bit LBA size if valid or bigger than the CHS mapping */
1803 	if (cgd->ident_data.cylinders == 16383 || dp->sectors < lbasize)
1804 		dp->sectors = lbasize;
1805 
1806 	/* use the 48bit LBA size if valid */
1807 	lbasize48 = ((u_int64_t)cgd->ident_data.lba_size48_1) |
1808 		    ((u_int64_t)cgd->ident_data.lba_size48_2 << 16) |
1809 		    ((u_int64_t)cgd->ident_data.lba_size48_3 << 32) |
1810 		    ((u_int64_t)cgd->ident_data.lba_size48_4 << 48);
1811 	if ((cgd->ident_data.support.command2 & ATA_SUPPORT_ADDRESS48) &&
1812 	    lbasize48 > ATA_MAX_28BIT_LBA)
1813 		dp->sectors = lbasize48;
1814 }
1815 
1816 static void
1817 adasendorderedtag(void *arg)
1818 {
1819 	struct ada_softc *softc = arg;
1820 
1821 	if (ada_send_ordered) {
1822 		if ((softc->ordered_tag_count == 0)
1823 		 && ((softc->flags & ADA_FLAG_WENT_IDLE) == 0)) {
1824 			softc->flags |= ADA_FLAG_NEED_OTAG;
1825 		}
1826 		if (softc->outstanding_cmds > 0)
1827 			softc->flags &= ~ADA_FLAG_WENT_IDLE;
1828 
1829 		softc->ordered_tag_count = 0;
1830 	}
1831 	/* Queue us up again */
1832 	callout_reset(&softc->sendordered_c,
1833 	    (ada_default_timeout * hz) / ADA_ORDEREDTAG_INTERVAL,
1834 	    adasendorderedtag, softc);
1835 }
1836 
1837 /*
1838  * Step through all ADA peripheral drivers, and if the device is still open,
1839  * sync the disk cache to physical media.
1840  */
1841 static void
1842 adaflush(void)
1843 {
1844 	struct cam_periph *periph;
1845 	struct ada_softc *softc;
1846 	union ccb *ccb;
1847 	int error;
1848 
1849 	CAM_PERIPH_FOREACH(periph, &adadriver) {
1850 		/* If we paniced with lock held - not recurse here. */
1851 		if (cam_periph_owned(periph))
1852 			continue;
1853 		cam_periph_lock(periph);
1854 		softc = (struct ada_softc *)periph->softc;
1855 		/*
1856 		 * We only sync the cache if the drive is still open, and
1857 		 * if the drive is capable of it..
1858 		 */
1859 		if (((softc->flags & ADA_FLAG_OPEN) == 0) ||
1860 		    (softc->flags & ADA_FLAG_CAN_FLUSHCACHE) == 0) {
1861 			cam_periph_unlock(periph);
1862 			continue;
1863 		}
1864 
1865 		ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL);
1866 		cam_fill_ataio(&ccb->ataio,
1867 				    0,
1868 				    adadone,
1869 				    CAM_DIR_NONE,
1870 				    0,
1871 				    NULL,
1872 				    0,
1873 				    ada_default_timeout*1000);
1874 		if (softc->flags & ADA_FLAG_CAN_48BIT)
1875 			ata_48bit_cmd(&ccb->ataio, ATA_FLUSHCACHE48, 0, 0, 0);
1876 		else
1877 			ata_28bit_cmd(&ccb->ataio, ATA_FLUSHCACHE, 0, 0, 0);
1878 
1879 		error = cam_periph_runccb(ccb, adaerror, /*cam_flags*/0,
1880 		    /*sense_flags*/ SF_NO_RECOVERY | SF_NO_RETRY,
1881 		    softc->disk->d_devstat);
1882 		if (error != 0)
1883 			xpt_print(periph->path, "Synchronize cache failed\n");
1884 		xpt_release_ccb(ccb);
1885 		cam_periph_unlock(periph);
1886 	}
1887 }
1888 
1889 static void
1890 adaspindown(uint8_t cmd, int flags)
1891 {
1892 	struct cam_periph *periph;
1893 	struct ada_softc *softc;
1894 	union ccb *ccb;
1895 	int error;
1896 
1897 	CAM_PERIPH_FOREACH(periph, &adadriver) {
1898 		/* If we paniced with lock held - not recurse here. */
1899 		if (cam_periph_owned(periph))
1900 			continue;
1901 		cam_periph_lock(periph);
1902 		softc = (struct ada_softc *)periph->softc;
1903 		/*
1904 		 * We only spin-down the drive if it is capable of it..
1905 		 */
1906 		if ((softc->flags & ADA_FLAG_CAN_POWERMGT) == 0) {
1907 			cam_periph_unlock(periph);
1908 			continue;
1909 		}
1910 
1911 		if (bootverbose)
1912 			xpt_print(periph->path, "spin-down\n");
1913 
1914 		ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL);
1915 		cam_fill_ataio(&ccb->ataio,
1916 				    0,
1917 				    adadone,
1918 				    CAM_DIR_NONE | flags,
1919 				    0,
1920 				    NULL,
1921 				    0,
1922 				    ada_default_timeout*1000);
1923 		ata_28bit_cmd(&ccb->ataio, cmd, 0, 0, 0);
1924 
1925 		error = cam_periph_runccb(ccb, adaerror, /*cam_flags*/0,
1926 		    /*sense_flags*/ SF_NO_RECOVERY | SF_NO_RETRY,
1927 		    softc->disk->d_devstat);
1928 		if (error != 0)
1929 			xpt_print(periph->path, "Spin-down disk failed\n");
1930 		xpt_release_ccb(ccb);
1931 		cam_periph_unlock(periph);
1932 	}
1933 }
1934 
1935 static void
1936 adashutdown(void *arg, int howto)
1937 {
1938 
1939 	adaflush();
1940 	if (ada_spindown_shutdown != 0 &&
1941 	    (howto & (RB_HALT | RB_POWEROFF)) != 0)
1942 		adaspindown(ATA_STANDBY_IMMEDIATE, 0);
1943 }
1944 
1945 static void
1946 adasuspend(void *arg)
1947 {
1948 
1949 	adaflush();
1950 	if (ada_spindown_suspend != 0)
1951 		adaspindown(ATA_SLEEP, CAM_DEV_QFREEZE);
1952 }
1953 
1954 static void
1955 adaresume(void *arg)
1956 {
1957 	struct cam_periph *periph;
1958 	struct ada_softc *softc;
1959 
1960 	if (ada_spindown_suspend == 0)
1961 		return;
1962 
1963 	CAM_PERIPH_FOREACH(periph, &adadriver) {
1964 		cam_periph_lock(periph);
1965 		softc = (struct ada_softc *)periph->softc;
1966 		/*
1967 		 * We only spin-down the drive if it is capable of it..
1968 		 */
1969 		if ((softc->flags & ADA_FLAG_CAN_POWERMGT) == 0) {
1970 			cam_periph_unlock(periph);
1971 			continue;
1972 		}
1973 
1974 		if (bootverbose)
1975 			xpt_print(periph->path, "resume\n");
1976 
1977 		/*
1978 		 * Drop freeze taken due to CAM_DEV_QFREEZE flag set on
1979 		 * sleep request.
1980 		 */
1981 		cam_release_devq(periph->path,
1982 			 /*relsim_flags*/0,
1983 			 /*openings*/0,
1984 			 /*timeout*/0,
1985 			 /*getcount_only*/0);
1986 
1987 		cam_periph_unlock(periph);
1988 	}
1989 }
1990 
1991 #endif /* _KERNEL */
1992