xref: /freebsd/sys/cam/ata/ata_da.c (revision 6ab38b8e25f31df8140b99c09160d0207f912ef3)
1 /*-
2  * Copyright (c) 2009 Alexander Motin <mav@FreeBSD.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer,
10  *    without modification, immediately at the beginning of the file.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  */
26 
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
29 
30 #include "opt_ada.h"
31 
32 #include <sys/param.h>
33 
34 #ifdef _KERNEL
35 #include <sys/systm.h>
36 #include <sys/kernel.h>
37 #include <sys/bio.h>
38 #include <sys/sysctl.h>
39 #include <sys/taskqueue.h>
40 #include <sys/lock.h>
41 #include <sys/mutex.h>
42 #include <sys/conf.h>
43 #include <sys/devicestat.h>
44 #include <sys/eventhandler.h>
45 #include <sys/malloc.h>
46 #include <sys/cons.h>
47 #include <sys/proc.h>
48 #include <sys/reboot.h>
49 #include <geom/geom_disk.h>
50 #endif /* _KERNEL */
51 
52 #ifndef _KERNEL
53 #include <stdio.h>
54 #include <string.h>
55 #endif /* _KERNEL */
56 
57 #include <cam/cam.h>
58 #include <cam/cam_ccb.h>
59 #include <cam/cam_periph.h>
60 #include <cam/cam_xpt_periph.h>
61 #include <cam/cam_sim.h>
62 
63 #include <cam/ata/ata_all.h>
64 
65 #include <machine/md_var.h>	/* geometry translation */
66 
67 #ifdef _KERNEL
68 
69 #define ATA_MAX_28BIT_LBA               268435455UL
70 
71 typedef enum {
72 	ADA_STATE_RAHEAD,
73 	ADA_STATE_WCACHE,
74 	ADA_STATE_NORMAL
75 } ada_state;
76 
77 typedef enum {
78 	ADA_FLAG_CAN_48BIT	= 0x0002,
79 	ADA_FLAG_CAN_FLUSHCACHE	= 0x0004,
80 	ADA_FLAG_CAN_NCQ	= 0x0008,
81 	ADA_FLAG_CAN_DMA	= 0x0010,
82 	ADA_FLAG_NEED_OTAG	= 0x0020,
83 	ADA_FLAG_WENT_IDLE	= 0x0040,
84 	ADA_FLAG_CAN_TRIM	= 0x0080,
85 	ADA_FLAG_OPEN		= 0x0100,
86 	ADA_FLAG_SCTX_INIT	= 0x0200,
87 	ADA_FLAG_CAN_CFA        = 0x0400,
88 	ADA_FLAG_CAN_POWERMGT   = 0x0800,
89 	ADA_FLAG_CAN_DMA48	= 0x1000,
90 	ADA_FLAG_DIRTY		= 0x2000
91 } ada_flags;
92 
93 typedef enum {
94 	ADA_Q_NONE		= 0x00,
95 	ADA_Q_4K		= 0x01,
96 } ada_quirks;
97 
98 #define ADA_Q_BIT_STRING	\
99 	"\020"			\
100 	"\0014K"
101 
102 typedef enum {
103 	ADA_CCB_RAHEAD		= 0x01,
104 	ADA_CCB_WCACHE		= 0x02,
105 	ADA_CCB_BUFFER_IO	= 0x03,
106 	ADA_CCB_WAITING		= 0x04,
107 	ADA_CCB_DUMP		= 0x05,
108 	ADA_CCB_TRIM		= 0x06,
109 	ADA_CCB_TYPE_MASK	= 0x0F,
110 } ada_ccb_state;
111 
112 /* Offsets into our private area for storing information */
113 #define ccb_state	ppriv_field0
114 #define ccb_bp		ppriv_ptr1
115 
116 struct disk_params {
117 	u_int8_t  heads;
118 	u_int8_t  secs_per_track;
119 	u_int32_t cylinders;
120 	u_int32_t secsize;	/* Number of bytes/logical sector */
121 	u_int64_t sectors;	/* Total number sectors */
122 };
123 
124 #define TRIM_MAX_BLOCKS	8
125 #define TRIM_MAX_RANGES	(TRIM_MAX_BLOCKS * ATA_DSM_BLK_RANGES)
126 #define TRIM_MAX_BIOS	(TRIM_MAX_RANGES * 4)
127 struct trim_request {
128 	uint8_t		data[TRIM_MAX_RANGES * ATA_DSM_RANGE_SIZE];
129 	struct bio	*bps[TRIM_MAX_BIOS];
130 };
131 
132 struct ada_softc {
133 	struct	 bio_queue_head bio_queue;
134 	struct	 bio_queue_head trim_queue;
135 	ada_state state;
136 	ada_flags flags;
137 	ada_quirks quirks;
138 	int	 sort_io_queue;
139 	int	 ordered_tag_count;
140 	int	 outstanding_cmds;
141 	int	 trim_max_ranges;
142 	int	 trim_running;
143 	int	 read_ahead;
144 	int	 write_cache;
145 #ifdef ADA_TEST_FAILURE
146 	int      force_read_error;
147 	int      force_write_error;
148 	int      periodic_read_error;
149 	int      periodic_read_count;
150 #endif
151 	struct	 disk_params params;
152 	struct	 disk *disk;
153 	struct task		sysctl_task;
154 	struct sysctl_ctx_list	sysctl_ctx;
155 	struct sysctl_oid	*sysctl_tree;
156 	struct callout		sendordered_c;
157 	struct trim_request	trim_req;
158 };
159 
160 struct ada_quirk_entry {
161 	struct scsi_inquiry_pattern inq_pat;
162 	ada_quirks quirks;
163 };
164 
165 static struct ada_quirk_entry ada_quirk_table[] =
166 {
167 	{
168 		/* Hitachi Advanced Format (4k) drives */
169 		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "Hitachi H??????????E3*", "*" },
170 		/*quirks*/ADA_Q_4K
171 	},
172 	{
173 		/* Samsung Advanced Format (4k) drives */
174 		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "SAMSUNG HD155UI*", "*" },
175 		/*quirks*/ADA_Q_4K
176 	},
177 	{
178 		/* Samsung Advanced Format (4k) drives */
179 		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "SAMSUNG HD204UI*", "*" },
180 		/*quirks*/ADA_Q_4K
181 	},
182 	{
183 		/* Seagate Barracuda Green Advanced Format (4k) drives */
184 		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "ST????DL*", "*" },
185 		/*quirks*/ADA_Q_4K
186 	},
187 	{
188 		/* Seagate Barracuda Advanced Format (4k) drives */
189 		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "ST???DM*", "*" },
190 		/*quirks*/ADA_Q_4K
191 	},
192 	{
193 		/* Seagate Barracuda Advanced Format (4k) drives */
194 		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "ST????DM*", "*" },
195 		/*quirks*/ADA_Q_4K
196 	},
197 	{
198 		/* Seagate Momentus Advanced Format (4k) drives */
199 		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "ST9500423AS*", "*" },
200 		/*quirks*/ADA_Q_4K
201 	},
202 	{
203 		/* Seagate Momentus Advanced Format (4k) drives */
204 		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "ST9500424AS*", "*" },
205 		/*quirks*/ADA_Q_4K
206 	},
207 	{
208 		/* Seagate Momentus Advanced Format (4k) drives */
209 		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "ST9640423AS*", "*" },
210 		/*quirks*/ADA_Q_4K
211 	},
212 	{
213 		/* Seagate Momentus Advanced Format (4k) drives */
214 		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "ST9640424AS*", "*" },
215 		/*quirks*/ADA_Q_4K
216 	},
217 	{
218 		/* Seagate Momentus Advanced Format (4k) drives */
219 		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "ST9750420AS*", "*" },
220 		/*quirks*/ADA_Q_4K
221 	},
222 	{
223 		/* Seagate Momentus Advanced Format (4k) drives */
224 		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "ST9750422AS*", "*" },
225 		/*quirks*/ADA_Q_4K
226 	},
227 	{
228 		/* Seagate Momentus Advanced Format (4k) drives */
229 		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "ST9750423AS*", "*" },
230 		/*quirks*/ADA_Q_4K
231 	},
232 	{
233 		/* Seagate Momentus Thin Advanced Format (4k) drives */
234 		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "ST???LT*", "*" },
235 		/*quirks*/ADA_Q_4K
236 	},
237 	{
238 		/* WDC Caviar Green Advanced Format (4k) drives */
239 		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "WDC WD????RS*", "*" },
240 		/*quirks*/ADA_Q_4K
241 	},
242 	{
243 		/* WDC Caviar Green Advanced Format (4k) drives */
244 		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "WDC WD????RX*", "*" },
245 		/*quirks*/ADA_Q_4K
246 	},
247 	{
248 		/* WDC Caviar Green Advanced Format (4k) drives */
249 		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "WDC WD??????RS*", "*" },
250 		/*quirks*/ADA_Q_4K
251 	},
252 	{
253 		/* WDC Caviar Green Advanced Format (4k) drives */
254 		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "WDC WD??????RX*", "*" },
255 		/*quirks*/ADA_Q_4K
256 	},
257 	{
258 		/* WDC Scorpio Black Advanced Format (4k) drives */
259 		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "WDC WD???PKT*", "*" },
260 		/*quirks*/ADA_Q_4K
261 	},
262 	{
263 		/* WDC Scorpio Black Advanced Format (4k) drives */
264 		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "WDC WD?????PKT*", "*" },
265 		/*quirks*/ADA_Q_4K
266 	},
267 	{
268 		/* WDC Scorpio Blue Advanced Format (4k) drives */
269 		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "WDC WD???PVT*", "*" },
270 		/*quirks*/ADA_Q_4K
271 	},
272 	{
273 		/* WDC Scorpio Blue Advanced Format (4k) drives */
274 		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "WDC WD?????PVT*", "*" },
275 		/*quirks*/ADA_Q_4K
276 	},
277 	/* SSDs */
278 	{
279 		/*
280 		 * Corsair Force 2 SSDs
281 		 * 4k optimised & trim only works in 4k requests + 4k aligned
282 		 */
283 		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "Corsair CSSD-F*", "*" },
284 		/*quirks*/ADA_Q_4K
285 	},
286 	{
287 		/*
288 		 * Corsair Force 3 SSDs
289 		 * 4k optimised & trim only works in 4k requests + 4k aligned
290 		 */
291 		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "Corsair Force 3*", "*" },
292 		/*quirks*/ADA_Q_4K
293 	},
294 	{
295 		/*
296 		 * Corsair Neutron GTX SSDs
297 		 * 4k optimised & trim only works in 4k requests + 4k aligned
298 		 */
299 		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "Corsair Neutron GTX*", "*" },
300 		/*quirks*/ADA_Q_4K
301 	},
302 	{
303 		/*
304 		 * Corsair Force GT SSDs
305 		 * 4k optimised & trim only works in 4k requests + 4k aligned
306 		 */
307 		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "Corsair Force GT*", "*" },
308 		/*quirks*/ADA_Q_4K
309 	},
310 	{
311 		/*
312 		 * Crucial M4 SSDs
313 		 * 4k optimised & trim only works in 4k requests + 4k aligned
314 		 */
315 		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "M4-CT???M4SSD2*", "*" },
316 		/*quirks*/ADA_Q_4K
317 	},
318 	{
319 		/*
320 		 * Crucial RealSSD C300 SSDs
321 		 * 4k optimised
322 		 */
323 		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "C300-CTFDDAC???MAG*",
324 		"*" }, /*quirks*/ADA_Q_4K
325 	},
326 	{
327 		/*
328 		 * Intel 320 Series SSDs
329 		 * 4k optimised & trim only works in 4k requests + 4k aligned
330 		 */
331 		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "INTEL SSDSA2CW*", "*" },
332 		/*quirks*/ADA_Q_4K
333 	},
334 	{
335 		/*
336 		 * Intel 330 Series SSDs
337 		 * 4k optimised & trim only works in 4k requests + 4k aligned
338 		 */
339 		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "INTEL SSDSC2CT*", "*" },
340 		/*quirks*/ADA_Q_4K
341 	},
342 	{
343 		/*
344 		 * Intel 510 Series SSDs
345 		 * 4k optimised & trim only works in 4k requests + 4k aligned
346 		 */
347 		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "INTEL SSDSC2MH*", "*" },
348 		/*quirks*/ADA_Q_4K
349 	},
350 	{
351 		/*
352 		 * Intel 520 Series SSDs
353 		 * 4k optimised & trim only works in 4k requests + 4k aligned
354 		 */
355 		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "INTEL SSDSC2BW*", "*" },
356 		/*quirks*/ADA_Q_4K
357 	},
358 	{
359 		/*
360 		 * Intel X25-M Series SSDs
361 		 * 4k optimised & trim only works in 4k requests + 4k aligned
362 		 */
363 		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "INTEL SSDSA2M*", "*" },
364 		/*quirks*/ADA_Q_4K
365 	},
366 	{
367 		/*
368 		 * Kingston E100 Series SSDs
369 		 * 4k optimised & trim only works in 4k requests + 4k aligned
370 		 */
371 		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "KINGSTON SE100S3*", "*" },
372 		/*quirks*/ADA_Q_4K
373 	},
374 	{
375 		/*
376 		 * Kingston HyperX 3k SSDs
377 		 * 4k optimised & trim only works in 4k requests + 4k aligned
378 		 */
379 		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "KINGSTON SH103S3*", "*" },
380 		/*quirks*/ADA_Q_4K
381 	},
382 	{
383 		/*
384 		 * Marvell SSDs (entry taken from OpenSolaris)
385 		 * 4k optimised & trim only works in 4k requests + 4k aligned
386 		 */
387 		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "MARVELL SD88SA02*", "*" },
388 		/*quirks*/ADA_Q_4K
389 	},
390 	{
391 		/*
392 		 * OCZ Agility 2 SSDs
393 		 * 4k optimised & trim only works in 4k requests + 4k aligned
394 		 */
395 		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "OCZ-AGILITY2*", "*" },
396 		/*quirks*/ADA_Q_4K
397 	},
398 	{
399 		/*
400 		 * OCZ Agility 3 SSDs
401 		 * 4k optimised & trim only works in 4k requests + 4k aligned
402 		 */
403 		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "OCZ-AGILITY3*", "*" },
404 		/*quirks*/ADA_Q_4K
405 	},
406 	{
407 		/*
408 		 * OCZ Deneva R Series SSDs
409 		 * 4k optimised & trim only works in 4k requests + 4k aligned
410 		 */
411 		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "DENRSTE251M45*", "*" },
412 		/*quirks*/ADA_Q_4K
413 	},
414 	{
415 		/*
416 		 * OCZ Vertex 2 SSDs (inc pro series)
417 		 * 4k optimised & trim only works in 4k requests + 4k aligned
418 		 */
419 		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "OCZ?VERTEX2*", "*" },
420 		/*quirks*/ADA_Q_4K
421 	},
422 	{
423 		/*
424 		 * OCZ Vertex 3 SSDs
425 		 * 4k optimised & trim only works in 4k requests + 4k aligned
426 		 */
427 		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "OCZ-VERTEX3*", "*" },
428 		/*quirks*/ADA_Q_4K
429 	},
430 	{
431 		/*
432 		 * OCZ Vertex 4 SSDs
433 		 * 4k optimised & trim only works in 4k requests + 4k aligned
434 		 */
435 		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "OCZ-VERTEX4*", "*" },
436 		/*quirks*/ADA_Q_4K
437 	},
438 	{
439 		/*
440 		 * Samsung 830 Series SSDs
441 		 * 4k optimised
442 		 */
443 		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "SAMSUNG SSD 830 Series*", "*" },
444 		/*quirks*/ADA_Q_4K
445 	},
446 	{
447 		/*
448 		 * SuperTalent TeraDrive CT SSDs
449 		 * 4k optimised & trim only works in 4k requests + 4k aligned
450 		 */
451 		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "FTM??CT25H*", "*" },
452 		/*quirks*/ADA_Q_4K
453 	},
454 	{
455 		/*
456 		 * XceedIOPS SATA SSDs
457 		 * 4k optimised
458 		 */
459 		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "SG9XCS2D*", "*" },
460 		/*quirks*/ADA_Q_4K
461 	},
462 	{
463 		/* Default */
464 		{
465 		  T_ANY, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED,
466 		  /*vendor*/"*", /*product*/"*", /*revision*/"*"
467 		},
468 		/*quirks*/0
469 	},
470 };
471 
472 static	disk_strategy_t	adastrategy;
473 static	dumper_t	adadump;
474 static	periph_init_t	adainit;
475 static	void		adaasync(void *callback_arg, u_int32_t code,
476 				struct cam_path *path, void *arg);
477 static	void		adasysctlinit(void *context, int pending);
478 static	periph_ctor_t	adaregister;
479 static	periph_dtor_t	adacleanup;
480 static	periph_start_t	adastart;
481 static	periph_oninv_t	adaoninvalidate;
482 static	void		adadone(struct cam_periph *periph,
483 			       union ccb *done_ccb);
484 static  int		adaerror(union ccb *ccb, u_int32_t cam_flags,
485 				u_int32_t sense_flags);
486 static void		adagetparams(struct cam_periph *periph,
487 				struct ccb_getdev *cgd);
488 static timeout_t	adasendorderedtag;
489 static void		adashutdown(void *arg, int howto);
490 static void		adasuspend(void *arg);
491 static void		adaresume(void *arg);
492 
493 #ifndef	ADA_DEFAULT_LEGACY_ALIASES
494 #define	ADA_DEFAULT_LEGACY_ALIASES	1
495 #endif
496 
497 #ifndef ADA_DEFAULT_TIMEOUT
498 #define ADA_DEFAULT_TIMEOUT 30	/* Timeout in seconds */
499 #endif
500 
501 #ifndef	ADA_DEFAULT_RETRY
502 #define	ADA_DEFAULT_RETRY	4
503 #endif
504 
505 #ifndef	ADA_DEFAULT_SEND_ORDERED
506 #define	ADA_DEFAULT_SEND_ORDERED	1
507 #endif
508 
509 #ifndef	ADA_DEFAULT_SPINDOWN_SHUTDOWN
510 #define	ADA_DEFAULT_SPINDOWN_SHUTDOWN	1
511 #endif
512 
513 #ifndef	ADA_DEFAULT_SPINDOWN_SUSPEND
514 #define	ADA_DEFAULT_SPINDOWN_SUSPEND	1
515 #endif
516 
517 #ifndef	ADA_DEFAULT_READ_AHEAD
518 #define	ADA_DEFAULT_READ_AHEAD	1
519 #endif
520 
521 #ifndef	ADA_DEFAULT_WRITE_CACHE
522 #define	ADA_DEFAULT_WRITE_CACHE	1
523 #endif
524 
525 #define	ADA_RA	(softc->read_ahead >= 0 ? \
526 		 softc->read_ahead : ada_read_ahead)
527 #define	ADA_WC	(softc->write_cache >= 0 ? \
528 		 softc->write_cache : ada_write_cache)
529 #define	ADA_SIO	(softc->sort_io_queue >= 0 ? \
530 		 softc->sort_io_queue : cam_sort_io_queues)
531 
532 /*
533  * Most platforms map firmware geometry to actual, but some don't.  If
534  * not overridden, default to nothing.
535  */
536 #ifndef ata_disk_firmware_geom_adjust
537 #define	ata_disk_firmware_geom_adjust(disk)
538 #endif
539 
540 static int ada_legacy_aliases = ADA_DEFAULT_LEGACY_ALIASES;
541 static int ada_retry_count = ADA_DEFAULT_RETRY;
542 static int ada_default_timeout = ADA_DEFAULT_TIMEOUT;
543 static int ada_send_ordered = ADA_DEFAULT_SEND_ORDERED;
544 static int ada_spindown_shutdown = ADA_DEFAULT_SPINDOWN_SHUTDOWN;
545 static int ada_spindown_suspend = ADA_DEFAULT_SPINDOWN_SUSPEND;
546 static int ada_read_ahead = ADA_DEFAULT_READ_AHEAD;
547 static int ada_write_cache = ADA_DEFAULT_WRITE_CACHE;
548 
549 static SYSCTL_NODE(_kern_cam, OID_AUTO, ada, CTLFLAG_RD, 0,
550             "CAM Direct Access Disk driver");
551 SYSCTL_INT(_kern_cam_ada, OID_AUTO, legacy_aliases, CTLFLAG_RW,
552            &ada_legacy_aliases, 0, "Create legacy-like device aliases");
553 TUNABLE_INT("kern.cam.ada.legacy_aliases", &ada_legacy_aliases);
554 SYSCTL_INT(_kern_cam_ada, OID_AUTO, retry_count, CTLFLAG_RW,
555            &ada_retry_count, 0, "Normal I/O retry count");
556 TUNABLE_INT("kern.cam.ada.retry_count", &ada_retry_count);
557 SYSCTL_INT(_kern_cam_ada, OID_AUTO, default_timeout, CTLFLAG_RW,
558            &ada_default_timeout, 0, "Normal I/O timeout (in seconds)");
559 TUNABLE_INT("kern.cam.ada.default_timeout", &ada_default_timeout);
560 SYSCTL_INT(_kern_cam_ada, OID_AUTO, send_ordered, CTLFLAG_RW,
561            &ada_send_ordered, 0, "Send Ordered Tags");
562 TUNABLE_INT("kern.cam.ada.send_ordered", &ada_send_ordered);
563 SYSCTL_INT(_kern_cam_ada, OID_AUTO, spindown_shutdown, CTLFLAG_RW,
564            &ada_spindown_shutdown, 0, "Spin down upon shutdown");
565 TUNABLE_INT("kern.cam.ada.spindown_shutdown", &ada_spindown_shutdown);
566 SYSCTL_INT(_kern_cam_ada, OID_AUTO, spindown_suspend, CTLFLAG_RW,
567            &ada_spindown_suspend, 0, "Spin down upon suspend");
568 TUNABLE_INT("kern.cam.ada.spindown_suspend", &ada_spindown_suspend);
569 SYSCTL_INT(_kern_cam_ada, OID_AUTO, read_ahead, CTLFLAG_RW,
570            &ada_read_ahead, 0, "Enable disk read-ahead");
571 TUNABLE_INT("kern.cam.ada.read_ahead", &ada_read_ahead);
572 SYSCTL_INT(_kern_cam_ada, OID_AUTO, write_cache, CTLFLAG_RW,
573            &ada_write_cache, 0, "Enable disk write cache");
574 TUNABLE_INT("kern.cam.ada.write_cache", &ada_write_cache);
575 
576 /*
577  * ADA_ORDEREDTAG_INTERVAL determines how often, relative
578  * to the default timeout, we check to see whether an ordered
579  * tagged transaction is appropriate to prevent simple tag
580  * starvation.  Since we'd like to ensure that there is at least
581  * 1/2 of the timeout length left for a starved transaction to
582  * complete after we've sent an ordered tag, we must poll at least
583  * four times in every timeout period.  This takes care of the worst
584  * case where a starved transaction starts during an interval that
585  * meets the requirement "don't send an ordered tag" test so it takes
586  * us two intervals to determine that a tag must be sent.
587  */
588 #ifndef ADA_ORDEREDTAG_INTERVAL
589 #define ADA_ORDEREDTAG_INTERVAL 4
590 #endif
591 
592 static struct periph_driver adadriver =
593 {
594 	adainit, "ada",
595 	TAILQ_HEAD_INITIALIZER(adadriver.units), /* generation */ 0
596 };
597 
598 PERIPHDRIVER_DECLARE(ada, adadriver);
599 
600 static MALLOC_DEFINE(M_ATADA, "ata_da", "ata_da buffers");
601 
602 static int
603 adaopen(struct disk *dp)
604 {
605 	struct cam_periph *periph;
606 	struct ada_softc *softc;
607 	int error;
608 
609 	periph = (struct cam_periph *)dp->d_drv1;
610 	if (cam_periph_acquire(periph) != CAM_REQ_CMP) {
611 		return(ENXIO);
612 	}
613 
614 	cam_periph_lock(periph);
615 	if ((error = cam_periph_hold(periph, PRIBIO|PCATCH)) != 0) {
616 		cam_periph_unlock(periph);
617 		cam_periph_release(periph);
618 		return (error);
619 	}
620 
621 	CAM_DEBUG(periph->path, CAM_DEBUG_TRACE | CAM_DEBUG_PERIPH,
622 	    ("adaopen\n"));
623 
624 	softc = (struct ada_softc *)periph->softc;
625 	softc->flags |= ADA_FLAG_OPEN;
626 
627 	cam_periph_unhold(periph);
628 	cam_periph_unlock(periph);
629 	return (0);
630 }
631 
632 static int
633 adaclose(struct disk *dp)
634 {
635 	struct	cam_periph *periph;
636 	struct	ada_softc *softc;
637 	union ccb *ccb;
638 	int error;
639 
640 	periph = (struct cam_periph *)dp->d_drv1;
641 	cam_periph_lock(periph);
642 	if (cam_periph_hold(periph, PRIBIO) != 0) {
643 		cam_periph_unlock(periph);
644 		cam_periph_release(periph);
645 		return (0);
646 	}
647 
648 	softc = (struct ada_softc *)periph->softc;
649 
650 	CAM_DEBUG(periph->path, CAM_DEBUG_TRACE | CAM_DEBUG_PERIPH,
651 	    ("adaclose\n"));
652 
653 	/* We only sync the cache if the drive is capable of it. */
654 	if ((softc->flags & ADA_FLAG_DIRTY) != 0 &&
655 	    (softc->flags & ADA_FLAG_CAN_FLUSHCACHE) != 0 &&
656 	    (periph->flags & CAM_PERIPH_INVALID) == 0) {
657 
658 		ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL);
659 		cam_fill_ataio(&ccb->ataio,
660 				    1,
661 				    adadone,
662 				    CAM_DIR_NONE,
663 				    0,
664 				    NULL,
665 				    0,
666 				    ada_default_timeout*1000);
667 
668 		if (softc->flags & ADA_FLAG_CAN_48BIT)
669 			ata_48bit_cmd(&ccb->ataio, ATA_FLUSHCACHE48, 0, 0, 0);
670 		else
671 			ata_28bit_cmd(&ccb->ataio, ATA_FLUSHCACHE, 0, 0, 0);
672 		error = cam_periph_runccb(ccb, adaerror, /*cam_flags*/0,
673 		    /*sense_flags*/0, softc->disk->d_devstat);
674 
675 		if (error != 0)
676 			xpt_print(periph->path, "Synchronize cache failed\n");
677 		else
678 			softc->flags &= ~ADA_FLAG_DIRTY;
679 		xpt_release_ccb(ccb);
680 	}
681 
682 	softc->flags &= ~ADA_FLAG_OPEN;
683 	cam_periph_unhold(periph);
684 	cam_periph_unlock(periph);
685 	cam_periph_release(periph);
686 	return (0);
687 }
688 
689 static void
690 adaschedule(struct cam_periph *periph)
691 {
692 	struct ada_softc *softc = (struct ada_softc *)periph->softc;
693 	uint32_t prio;
694 
695 	if (softc->state != ADA_STATE_NORMAL)
696 		return;
697 
698 	/* Check if cam_periph_getccb() was called. */
699 	prio = periph->immediate_priority;
700 
701 	/* Check if we have more work to do. */
702 	if (bioq_first(&softc->bio_queue) ||
703 	    (!softc->trim_running && bioq_first(&softc->trim_queue))) {
704 		prio = CAM_PRIORITY_NORMAL;
705 	}
706 
707 	/* Schedule CCB if any of above is true. */
708 	if (prio != CAM_PRIORITY_NONE)
709 		xpt_schedule(periph, prio);
710 }
711 
712 /*
713  * Actually translate the requested transfer into one the physical driver
714  * can understand.  The transfer is described by a buf and will include
715  * only one physical transfer.
716  */
717 static void
718 adastrategy(struct bio *bp)
719 {
720 	struct cam_periph *periph;
721 	struct ada_softc *softc;
722 
723 	periph = (struct cam_periph *)bp->bio_disk->d_drv1;
724 	softc = (struct ada_softc *)periph->softc;
725 
726 	cam_periph_lock(periph);
727 
728 	CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("adastrategy(%p)\n", bp));
729 
730 	/*
731 	 * If the device has been made invalid, error out
732 	 */
733 	if ((periph->flags & CAM_PERIPH_INVALID) != 0) {
734 		cam_periph_unlock(periph);
735 		biofinish(bp, NULL, ENXIO);
736 		return;
737 	}
738 
739 	/*
740 	 * Place it in the queue of disk activities for this disk
741 	 */
742 	if (bp->bio_cmd == BIO_DELETE &&
743 	    (softc->flags & ADA_FLAG_CAN_TRIM)) {
744 		if (ADA_SIO)
745 		    bioq_disksort(&softc->trim_queue, bp);
746 		else
747 		    bioq_insert_tail(&softc->trim_queue, bp);
748 	} else {
749 		if (ADA_SIO)
750 		    bioq_disksort(&softc->bio_queue, bp);
751 		else
752 		    bioq_insert_tail(&softc->bio_queue, bp);
753 	}
754 
755 	/*
756 	 * Schedule ourselves for performing the work.
757 	 */
758 	adaschedule(periph);
759 	cam_periph_unlock(periph);
760 
761 	return;
762 }
763 
764 static int
765 adadump(void *arg, void *virtual, vm_offset_t physical, off_t offset, size_t length)
766 {
767 	struct	    cam_periph *periph;
768 	struct	    ada_softc *softc;
769 	u_int	    secsize;
770 	union	    ccb ccb;
771 	struct	    disk *dp;
772 	uint64_t    lba;
773 	uint16_t    count;
774 	int	    error = 0;
775 
776 	dp = arg;
777 	periph = dp->d_drv1;
778 	softc = (struct ada_softc *)periph->softc;
779 	cam_periph_lock(periph);
780 	secsize = softc->params.secsize;
781 	lba = offset / secsize;
782 	count = length / secsize;
783 
784 	if ((periph->flags & CAM_PERIPH_INVALID) != 0) {
785 		cam_periph_unlock(periph);
786 		return (ENXIO);
787 	}
788 
789 	if (length > 0) {
790 		xpt_setup_ccb(&ccb.ccb_h, periph->path, CAM_PRIORITY_NORMAL);
791 		ccb.ccb_h.ccb_state = ADA_CCB_DUMP;
792 		cam_fill_ataio(&ccb.ataio,
793 		    0,
794 		    adadone,
795 		    CAM_DIR_OUT,
796 		    0,
797 		    (u_int8_t *) virtual,
798 		    length,
799 		    ada_default_timeout*1000);
800 		if ((softc->flags & ADA_FLAG_CAN_48BIT) &&
801 		    (lba + count >= ATA_MAX_28BIT_LBA ||
802 		    count >= 256)) {
803 			ata_48bit_cmd(&ccb.ataio, ATA_WRITE_DMA48,
804 			    0, lba, count);
805 		} else {
806 			ata_28bit_cmd(&ccb.ataio, ATA_WRITE_DMA,
807 			    0, lba, count);
808 		}
809 		xpt_polled_action(&ccb);
810 
811 		error = cam_periph_error(&ccb,
812 		    0, SF_NO_RECOVERY | SF_NO_RETRY, NULL);
813 		if ((ccb.ccb_h.status & CAM_DEV_QFRZN) != 0)
814 			cam_release_devq(ccb.ccb_h.path, /*relsim_flags*/0,
815 			    /*reduction*/0, /*timeout*/0, /*getcount_only*/0);
816 		if (error != 0)
817 			printf("Aborting dump due to I/O error.\n");
818 
819 		cam_periph_unlock(periph);
820 		return (error);
821 	}
822 
823 	if (softc->flags & ADA_FLAG_CAN_FLUSHCACHE) {
824 		xpt_setup_ccb(&ccb.ccb_h, periph->path, CAM_PRIORITY_NORMAL);
825 
826 		ccb.ccb_h.ccb_state = ADA_CCB_DUMP;
827 		cam_fill_ataio(&ccb.ataio,
828 				    0,
829 				    adadone,
830 				    CAM_DIR_NONE,
831 				    0,
832 				    NULL,
833 				    0,
834 				    ada_default_timeout*1000);
835 
836 		if (softc->flags & ADA_FLAG_CAN_48BIT)
837 			ata_48bit_cmd(&ccb.ataio, ATA_FLUSHCACHE48, 0, 0, 0);
838 		else
839 			ata_28bit_cmd(&ccb.ataio, ATA_FLUSHCACHE, 0, 0, 0);
840 		xpt_polled_action(&ccb);
841 
842 		error = cam_periph_error(&ccb,
843 		    0, SF_NO_RECOVERY | SF_NO_RETRY, NULL);
844 		if ((ccb.ccb_h.status & CAM_DEV_QFRZN) != 0)
845 			cam_release_devq(ccb.ccb_h.path, /*relsim_flags*/0,
846 			    /*reduction*/0, /*timeout*/0, /*getcount_only*/0);
847 		if (error != 0)
848 			xpt_print(periph->path, "Synchronize cache failed\n");
849 	}
850 	cam_periph_unlock(periph);
851 	return (error);
852 }
853 
854 static void
855 adainit(void)
856 {
857 	cam_status status;
858 
859 	/*
860 	 * Install a global async callback.  This callback will
861 	 * receive async callbacks like "new device found".
862 	 */
863 	status = xpt_register_async(AC_FOUND_DEVICE, adaasync, NULL, NULL);
864 
865 	if (status != CAM_REQ_CMP) {
866 		printf("ada: Failed to attach master async callback "
867 		       "due to status 0x%x!\n", status);
868 	} else if (ada_send_ordered) {
869 
870 		/* Register our event handlers */
871 		if ((EVENTHANDLER_REGISTER(power_suspend, adasuspend,
872 					   NULL, EVENTHANDLER_PRI_LAST)) == NULL)
873 		    printf("adainit: power event registration failed!\n");
874 		if ((EVENTHANDLER_REGISTER(power_resume, adaresume,
875 					   NULL, EVENTHANDLER_PRI_LAST)) == NULL)
876 		    printf("adainit: power event registration failed!\n");
877 		if ((EVENTHANDLER_REGISTER(shutdown_post_sync, adashutdown,
878 					   NULL, SHUTDOWN_PRI_DEFAULT)) == NULL)
879 		    printf("adainit: shutdown event registration failed!\n");
880 	}
881 }
882 
883 /*
884  * Callback from GEOM, called when it has finished cleaning up its
885  * resources.
886  */
887 static void
888 adadiskgonecb(struct disk *dp)
889 {
890 	struct cam_periph *periph;
891 
892 	periph = (struct cam_periph *)dp->d_drv1;
893 
894 	cam_periph_release(periph);
895 }
896 
897 static void
898 adaoninvalidate(struct cam_periph *periph)
899 {
900 	struct ada_softc *softc;
901 
902 	softc = (struct ada_softc *)periph->softc;
903 
904 	/*
905 	 * De-register any async callbacks.
906 	 */
907 	xpt_register_async(0, adaasync, periph, periph->path);
908 
909 	/*
910 	 * Return all queued I/O with ENXIO.
911 	 * XXX Handle any transactions queued to the card
912 	 *     with XPT_ABORT_CCB.
913 	 */
914 	bioq_flush(&softc->bio_queue, NULL, ENXIO);
915 	bioq_flush(&softc->trim_queue, NULL, ENXIO);
916 
917 	disk_gone(softc->disk);
918 }
919 
920 static void
921 adacleanup(struct cam_periph *periph)
922 {
923 	struct ada_softc *softc;
924 
925 	softc = (struct ada_softc *)periph->softc;
926 
927 	cam_periph_unlock(periph);
928 
929 	/*
930 	 * If we can't free the sysctl tree, oh well...
931 	 */
932 	if ((softc->flags & ADA_FLAG_SCTX_INIT) != 0
933 	    && sysctl_ctx_free(&softc->sysctl_ctx) != 0) {
934 		xpt_print(periph->path, "can't remove sysctl context\n");
935 	}
936 
937 	disk_destroy(softc->disk);
938 	callout_drain(&softc->sendordered_c);
939 	free(softc, M_DEVBUF);
940 	cam_periph_lock(periph);
941 }
942 
943 static void
944 adaasync(void *callback_arg, u_int32_t code,
945 	struct cam_path *path, void *arg)
946 {
947 	struct ccb_getdev cgd;
948 	struct cam_periph *periph;
949 	struct ada_softc *softc;
950 
951 	periph = (struct cam_periph *)callback_arg;
952 	switch (code) {
953 	case AC_FOUND_DEVICE:
954 	{
955 		struct ccb_getdev *cgd;
956 		cam_status status;
957 
958 		cgd = (struct ccb_getdev *)arg;
959 		if (cgd == NULL)
960 			break;
961 
962 		if (cgd->protocol != PROTO_ATA)
963 			break;
964 
965 		/*
966 		 * Allocate a peripheral instance for
967 		 * this device and start the probe
968 		 * process.
969 		 */
970 		status = cam_periph_alloc(adaregister, adaoninvalidate,
971 					  adacleanup, adastart,
972 					  "ada", CAM_PERIPH_BIO,
973 					  cgd->ccb_h.path, adaasync,
974 					  AC_FOUND_DEVICE, cgd);
975 
976 		if (status != CAM_REQ_CMP
977 		 && status != CAM_REQ_INPROG)
978 			printf("adaasync: Unable to attach to new device "
979 				"due to status 0x%x\n", status);
980 		break;
981 	}
982 	case AC_GETDEV_CHANGED:
983 	{
984 		softc = (struct ada_softc *)periph->softc;
985 		xpt_setup_ccb(&cgd.ccb_h, periph->path, CAM_PRIORITY_NORMAL);
986 		cgd.ccb_h.func_code = XPT_GDEV_TYPE;
987 		xpt_action((union ccb *)&cgd);
988 
989 		if ((cgd.ident_data.capabilities1 & ATA_SUPPORT_DMA) &&
990 		    (cgd.inq_flags & SID_DMA))
991 			softc->flags |= ADA_FLAG_CAN_DMA;
992 		else
993 			softc->flags &= ~ADA_FLAG_CAN_DMA;
994 		if (cgd.ident_data.support.command2 & ATA_SUPPORT_ADDRESS48) {
995 			softc->flags |= ADA_FLAG_CAN_48BIT;
996 			if (cgd.inq_flags & SID_DMA48)
997 				softc->flags |= ADA_FLAG_CAN_DMA48;
998 			else
999 				softc->flags &= ~ADA_FLAG_CAN_DMA48;
1000 		} else
1001 			softc->flags &= ~(ADA_FLAG_CAN_48BIT |
1002 			    ADA_FLAG_CAN_DMA48);
1003 		if ((cgd.ident_data.satacapabilities & ATA_SUPPORT_NCQ) &&
1004 		    (cgd.inq_flags & SID_DMA) && (cgd.inq_flags & SID_CmdQue))
1005 			softc->flags |= ADA_FLAG_CAN_NCQ;
1006 		else
1007 			softc->flags &= ~ADA_FLAG_CAN_NCQ;
1008 		if ((cgd.ident_data.support_dsm & ATA_SUPPORT_DSM_TRIM) &&
1009 		    (cgd.inq_flags & SID_DMA))
1010 			softc->flags |= ADA_FLAG_CAN_TRIM;
1011 		else
1012 			softc->flags &= ~ADA_FLAG_CAN_TRIM;
1013 
1014 		cam_periph_async(periph, code, path, arg);
1015 		break;
1016 	}
1017 	case AC_ADVINFO_CHANGED:
1018 	{
1019 		uintptr_t buftype;
1020 
1021 		buftype = (uintptr_t)arg;
1022 		if (buftype == CDAI_TYPE_PHYS_PATH) {
1023 			struct ada_softc *softc;
1024 
1025 			softc = periph->softc;
1026 			disk_attr_changed(softc->disk, "GEOM::physpath",
1027 					  M_NOWAIT);
1028 		}
1029 		break;
1030 	}
1031 	case AC_SENT_BDR:
1032 	case AC_BUS_RESET:
1033 	{
1034 		softc = (struct ada_softc *)periph->softc;
1035 		cam_periph_async(periph, code, path, arg);
1036 		if (softc->state != ADA_STATE_NORMAL)
1037 			break;
1038 		xpt_setup_ccb(&cgd.ccb_h, periph->path, CAM_PRIORITY_NORMAL);
1039 		cgd.ccb_h.func_code = XPT_GDEV_TYPE;
1040 		xpt_action((union ccb *)&cgd);
1041 		if (ADA_RA >= 0 &&
1042 		    cgd.ident_data.support.command1 & ATA_SUPPORT_LOOKAHEAD)
1043 			softc->state = ADA_STATE_RAHEAD;
1044 		else if (ADA_WC >= 0 &&
1045 		    cgd.ident_data.support.command1 & ATA_SUPPORT_WRITECACHE)
1046 			softc->state = ADA_STATE_WCACHE;
1047 		else
1048 		    break;
1049 		cam_periph_acquire(periph);
1050 		xpt_schedule(periph, CAM_PRIORITY_DEV);
1051 	}
1052 	default:
1053 		cam_periph_async(periph, code, path, arg);
1054 		break;
1055 	}
1056 }
1057 
1058 static void
1059 adasysctlinit(void *context, int pending)
1060 {
1061 	struct cam_periph *periph;
1062 	struct ada_softc *softc;
1063 	char tmpstr[80], tmpstr2[80];
1064 
1065 	periph = (struct cam_periph *)context;
1066 
1067 	/* periph was held for us when this task was enqueued */
1068 	if ((periph->flags & CAM_PERIPH_INVALID) != 0) {
1069 		cam_periph_release(periph);
1070 		return;
1071 	}
1072 
1073 	softc = (struct ada_softc *)periph->softc;
1074 	snprintf(tmpstr, sizeof(tmpstr), "CAM ADA unit %d", periph->unit_number);
1075 	snprintf(tmpstr2, sizeof(tmpstr2), "%d", periph->unit_number);
1076 
1077 	sysctl_ctx_init(&softc->sysctl_ctx);
1078 	softc->flags |= ADA_FLAG_SCTX_INIT;
1079 	softc->sysctl_tree = SYSCTL_ADD_NODE(&softc->sysctl_ctx,
1080 		SYSCTL_STATIC_CHILDREN(_kern_cam_ada), OID_AUTO, tmpstr2,
1081 		CTLFLAG_RD, 0, tmpstr);
1082 	if (softc->sysctl_tree == NULL) {
1083 		printf("adasysctlinit: unable to allocate sysctl tree\n");
1084 		cam_periph_release(periph);
1085 		return;
1086 	}
1087 
1088 	SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
1089 		OID_AUTO, "read_ahead", CTLFLAG_RW | CTLFLAG_MPSAFE,
1090 		&softc->read_ahead, 0, "Enable disk read ahead.");
1091 	SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
1092 		OID_AUTO, "write_cache", CTLFLAG_RW | CTLFLAG_MPSAFE,
1093 		&softc->write_cache, 0, "Enable disk write cache.");
1094 	SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
1095 		OID_AUTO, "sort_io_queue", CTLFLAG_RW | CTLFLAG_MPSAFE,
1096 		&softc->sort_io_queue, 0,
1097 		"Sort IO queue to try and optimise disk access patterns");
1098 #ifdef ADA_TEST_FAILURE
1099 	/*
1100 	 * Add a 'door bell' sysctl which allows one to set it from userland
1101 	 * and cause something bad to happen.  For the moment, we only allow
1102 	 * whacking the next read or write.
1103 	 */
1104 	SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
1105 		OID_AUTO, "force_read_error", CTLFLAG_RW | CTLFLAG_MPSAFE,
1106 		&softc->force_read_error, 0,
1107 		"Force a read error for the next N reads.");
1108 	SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
1109 		OID_AUTO, "force_write_error", CTLFLAG_RW | CTLFLAG_MPSAFE,
1110 		&softc->force_write_error, 0,
1111 		"Force a write error for the next N writes.");
1112 	SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
1113 		OID_AUTO, "periodic_read_error", CTLFLAG_RW | CTLFLAG_MPSAFE,
1114 		&softc->periodic_read_error, 0,
1115 		"Force a read error every N reads (don't set too low).");
1116 #endif
1117 	cam_periph_release(periph);
1118 }
1119 
1120 static int
1121 adagetattr(struct bio *bp)
1122 {
1123 	int ret;
1124 	struct cam_periph *periph;
1125 
1126 	periph = (struct cam_periph *)bp->bio_disk->d_drv1;
1127 	cam_periph_lock(periph);
1128 	ret = xpt_getattr(bp->bio_data, bp->bio_length, bp->bio_attribute,
1129 	    periph->path);
1130 	cam_periph_unlock(periph);
1131 	if (ret == 0)
1132 		bp->bio_completed = bp->bio_length;
1133 	return ret;
1134 }
1135 
1136 static cam_status
1137 adaregister(struct cam_periph *periph, void *arg)
1138 {
1139 	struct ada_softc *softc;
1140 	struct ccb_pathinq cpi;
1141 	struct ccb_getdev *cgd;
1142 	char   announce_buf[80], buf1[32];
1143 	struct disk_params *dp;
1144 	caddr_t match;
1145 	u_int maxio;
1146 	int legacy_id, quirks;
1147 
1148 	cgd = (struct ccb_getdev *)arg;
1149 	if (cgd == NULL) {
1150 		printf("adaregister: no getdev CCB, can't register device\n");
1151 		return(CAM_REQ_CMP_ERR);
1152 	}
1153 
1154 	softc = (struct ada_softc *)malloc(sizeof(*softc), M_DEVBUF,
1155 	    M_NOWAIT|M_ZERO);
1156 
1157 	if (softc == NULL) {
1158 		printf("adaregister: Unable to probe new device. "
1159 		    "Unable to allocate softc\n");
1160 		return(CAM_REQ_CMP_ERR);
1161 	}
1162 
1163 	bioq_init(&softc->bio_queue);
1164 	bioq_init(&softc->trim_queue);
1165 
1166 	if ((cgd->ident_data.capabilities1 & ATA_SUPPORT_DMA) &&
1167 	    (cgd->inq_flags & SID_DMA))
1168 		softc->flags |= ADA_FLAG_CAN_DMA;
1169 	if (cgd->ident_data.support.command2 & ATA_SUPPORT_ADDRESS48) {
1170 		softc->flags |= ADA_FLAG_CAN_48BIT;
1171 		if (cgd->inq_flags & SID_DMA48)
1172 			softc->flags |= ADA_FLAG_CAN_DMA48;
1173 	}
1174 	if (cgd->ident_data.support.command2 & ATA_SUPPORT_FLUSHCACHE)
1175 		softc->flags |= ADA_FLAG_CAN_FLUSHCACHE;
1176 	if (cgd->ident_data.support.command1 & ATA_SUPPORT_POWERMGT)
1177 		softc->flags |= ADA_FLAG_CAN_POWERMGT;
1178 	if ((cgd->ident_data.satacapabilities & ATA_SUPPORT_NCQ) &&
1179 	    (cgd->inq_flags & SID_DMA) && (cgd->inq_flags & SID_CmdQue))
1180 		softc->flags |= ADA_FLAG_CAN_NCQ;
1181 	if ((cgd->ident_data.support_dsm & ATA_SUPPORT_DSM_TRIM) &&
1182 	    (cgd->inq_flags & SID_DMA)) {
1183 		softc->flags |= ADA_FLAG_CAN_TRIM;
1184 		softc->trim_max_ranges = TRIM_MAX_RANGES;
1185 		if (cgd->ident_data.max_dsm_blocks != 0) {
1186 			softc->trim_max_ranges =
1187 			    min(cgd->ident_data.max_dsm_blocks *
1188 				ATA_DSM_BLK_RANGES, softc->trim_max_ranges);
1189 		}
1190 	}
1191 	if (cgd->ident_data.support.command2 & ATA_SUPPORT_CFA)
1192 		softc->flags |= ADA_FLAG_CAN_CFA;
1193 
1194 	periph->softc = softc;
1195 
1196 	/*
1197 	 * See if this device has any quirks.
1198 	 */
1199 	match = cam_quirkmatch((caddr_t)&cgd->ident_data,
1200 			       (caddr_t)ada_quirk_table,
1201 			       sizeof(ada_quirk_table)/sizeof(*ada_quirk_table),
1202 			       sizeof(*ada_quirk_table), ata_identify_match);
1203 	if (match != NULL)
1204 		softc->quirks = ((struct ada_quirk_entry *)match)->quirks;
1205 	else
1206 		softc->quirks = ADA_Q_NONE;
1207 
1208 	bzero(&cpi, sizeof(cpi));
1209 	xpt_setup_ccb(&cpi.ccb_h, periph->path, CAM_PRIORITY_NONE);
1210 	cpi.ccb_h.func_code = XPT_PATH_INQ;
1211 	xpt_action((union ccb *)&cpi);
1212 
1213 	TASK_INIT(&softc->sysctl_task, 0, adasysctlinit, periph);
1214 
1215 	/*
1216 	 * Register this media as a disk
1217 	 */
1218 	(void)cam_periph_hold(periph, PRIBIO);
1219 	cam_periph_unlock(periph);
1220 	snprintf(announce_buf, sizeof(announce_buf),
1221 	    "kern.cam.ada.%d.quirks", periph->unit_number);
1222 	quirks = softc->quirks;
1223 	TUNABLE_INT_FETCH(announce_buf, &quirks);
1224 	softc->quirks = quirks;
1225 	softc->read_ahead = -1;
1226 	snprintf(announce_buf, sizeof(announce_buf),
1227 	    "kern.cam.ada.%d.read_ahead", periph->unit_number);
1228 	TUNABLE_INT_FETCH(announce_buf, &softc->read_ahead);
1229 	softc->write_cache = -1;
1230 	snprintf(announce_buf, sizeof(announce_buf),
1231 	    "kern.cam.ada.%d.write_cache", periph->unit_number);
1232 	TUNABLE_INT_FETCH(announce_buf, &softc->write_cache);
1233 	/* Disable queue sorting for non-rotational media by default. */
1234 	if (cgd->ident_data.media_rotation_rate == 1)
1235 		softc->sort_io_queue = 0;
1236 	else
1237 		softc->sort_io_queue = -1;
1238 	adagetparams(periph, cgd);
1239 	softc->disk = disk_alloc();
1240 	softc->disk->d_devstat = devstat_new_entry(periph->periph_name,
1241 			  periph->unit_number, softc->params.secsize,
1242 			  DEVSTAT_ALL_SUPPORTED,
1243 			  DEVSTAT_TYPE_DIRECT |
1244 			  XPORT_DEVSTAT_TYPE(cpi.transport),
1245 			  DEVSTAT_PRIORITY_DISK);
1246 	softc->disk->d_open = adaopen;
1247 	softc->disk->d_close = adaclose;
1248 	softc->disk->d_strategy = adastrategy;
1249 	softc->disk->d_getattr = adagetattr;
1250 	softc->disk->d_dump = adadump;
1251 	softc->disk->d_gone = adadiskgonecb;
1252 	softc->disk->d_name = "ada";
1253 	softc->disk->d_drv1 = periph;
1254 	maxio = cpi.maxio;		/* Honor max I/O size of SIM */
1255 	if (maxio == 0)
1256 		maxio = DFLTPHYS;	/* traditional default */
1257 	else if (maxio > MAXPHYS)
1258 		maxio = MAXPHYS;	/* for safety */
1259 	if (softc->flags & ADA_FLAG_CAN_48BIT)
1260 		maxio = min(maxio, 65536 * softc->params.secsize);
1261 	else					/* 28bit ATA command limit */
1262 		maxio = min(maxio, 256 * softc->params.secsize);
1263 	softc->disk->d_maxsize = maxio;
1264 	softc->disk->d_unit = periph->unit_number;
1265 	softc->disk->d_flags = 0;
1266 	if (softc->flags & ADA_FLAG_CAN_FLUSHCACHE)
1267 		softc->disk->d_flags |= DISKFLAG_CANFLUSHCACHE;
1268 	if (softc->flags & ADA_FLAG_CAN_TRIM) {
1269 		softc->disk->d_flags |= DISKFLAG_CANDELETE;
1270 		softc->disk->d_delmaxsize = softc->params.secsize *
1271 					    ATA_DSM_RANGE_MAX *
1272 					    softc->trim_max_ranges;
1273 	} else if ((softc->flags & ADA_FLAG_CAN_CFA) &&
1274 	    !(softc->flags & ADA_FLAG_CAN_48BIT)) {
1275 		softc->disk->d_flags |= DISKFLAG_CANDELETE;
1276 		softc->disk->d_delmaxsize = 256 * softc->params.secsize;
1277 	} else
1278 		softc->disk->d_delmaxsize = maxio;
1279 	if ((cpi.hba_misc & PIM_UNMAPPED) != 0)
1280 		softc->disk->d_flags |= DISKFLAG_UNMAPPED_BIO;
1281 	strlcpy(softc->disk->d_descr, cgd->ident_data.model,
1282 	    MIN(sizeof(softc->disk->d_descr), sizeof(cgd->ident_data.model)));
1283 	strlcpy(softc->disk->d_ident, cgd->ident_data.serial,
1284 	    MIN(sizeof(softc->disk->d_ident), sizeof(cgd->ident_data.serial)));
1285 	softc->disk->d_hba_vendor = cpi.hba_vendor;
1286 	softc->disk->d_hba_device = cpi.hba_device;
1287 	softc->disk->d_hba_subvendor = cpi.hba_subvendor;
1288 	softc->disk->d_hba_subdevice = cpi.hba_subdevice;
1289 
1290 	softc->disk->d_sectorsize = softc->params.secsize;
1291 	softc->disk->d_mediasize = (off_t)softc->params.sectors *
1292 	    softc->params.secsize;
1293 	if (ata_physical_sector_size(&cgd->ident_data) !=
1294 	    softc->params.secsize) {
1295 		softc->disk->d_stripesize =
1296 		    ata_physical_sector_size(&cgd->ident_data);
1297 		softc->disk->d_stripeoffset = (softc->disk->d_stripesize -
1298 		    ata_logical_sector_offset(&cgd->ident_data)) %
1299 		    softc->disk->d_stripesize;
1300 	} else if (softc->quirks & ADA_Q_4K) {
1301 		softc->disk->d_stripesize = 4096;
1302 		softc->disk->d_stripeoffset = 0;
1303 	}
1304 	softc->disk->d_fwsectors = softc->params.secs_per_track;
1305 	softc->disk->d_fwheads = softc->params.heads;
1306 	ata_disk_firmware_geom_adjust(softc->disk);
1307 
1308 	if (ada_legacy_aliases) {
1309 #ifdef ATA_STATIC_ID
1310 		legacy_id = xpt_path_legacy_ata_id(periph->path);
1311 #else
1312 		legacy_id = softc->disk->d_unit;
1313 #endif
1314 		if (legacy_id >= 0) {
1315 			snprintf(announce_buf, sizeof(announce_buf),
1316 			    "kern.devalias.%s%d",
1317 			    softc->disk->d_name, softc->disk->d_unit);
1318 			snprintf(buf1, sizeof(buf1),
1319 			    "ad%d", legacy_id);
1320 			setenv(announce_buf, buf1);
1321 		}
1322 	} else
1323 		legacy_id = -1;
1324 	/*
1325 	 * Acquire a reference to the periph before we register with GEOM.
1326 	 * We'll release this reference once GEOM calls us back (via
1327 	 * adadiskgonecb()) telling us that our provider has been freed.
1328 	 */
1329 	if (cam_periph_acquire(periph) != CAM_REQ_CMP) {
1330 		xpt_print(periph->path, "%s: lost periph during "
1331 			  "registration!\n", __func__);
1332 		cam_periph_lock(periph);
1333 		return (CAM_REQ_CMP_ERR);
1334 	}
1335 	disk_create(softc->disk, DISK_VERSION);
1336 	cam_periph_lock(periph);
1337 	cam_periph_unhold(periph);
1338 
1339 	dp = &softc->params;
1340 	snprintf(announce_buf, sizeof(announce_buf),
1341 		"%juMB (%ju %u byte sectors: %dH %dS/T %dC)",
1342 		(uintmax_t)(((uintmax_t)dp->secsize *
1343 		dp->sectors) / (1024*1024)),
1344 		(uintmax_t)dp->sectors,
1345 		dp->secsize, dp->heads,
1346 		dp->secs_per_track, dp->cylinders);
1347 	xpt_announce_periph(periph, announce_buf);
1348 	xpt_announce_quirks(periph, softc->quirks, ADA_Q_BIT_STRING);
1349 	if (legacy_id >= 0)
1350 		printf("%s%d: Previously was known as ad%d\n",
1351 		       periph->periph_name, periph->unit_number, legacy_id);
1352 
1353 	/*
1354 	 * Create our sysctl variables, now that we know
1355 	 * we have successfully attached.
1356 	 */
1357 	cam_periph_acquire(periph);
1358 	taskqueue_enqueue(taskqueue_thread, &softc->sysctl_task);
1359 
1360 	/*
1361 	 * Add async callbacks for bus reset and
1362 	 * bus device reset calls.  I don't bother
1363 	 * checking if this fails as, in most cases,
1364 	 * the system will function just fine without
1365 	 * them and the only alternative would be to
1366 	 * not attach the device on failure.
1367 	 */
1368 	xpt_register_async(AC_SENT_BDR | AC_BUS_RESET | AC_LOST_DEVICE |
1369 	    AC_GETDEV_CHANGED | AC_ADVINFO_CHANGED,
1370 	    adaasync, periph, periph->path);
1371 
1372 	/*
1373 	 * Schedule a periodic event to occasionally send an
1374 	 * ordered tag to a device.
1375 	 */
1376 	callout_init_mtx(&softc->sendordered_c, periph->sim->mtx, 0);
1377 	callout_reset(&softc->sendordered_c,
1378 	    (ada_default_timeout * hz) / ADA_ORDEREDTAG_INTERVAL,
1379 	    adasendorderedtag, softc);
1380 
1381 	if (ADA_RA >= 0 &&
1382 	    cgd->ident_data.support.command1 & ATA_SUPPORT_LOOKAHEAD) {
1383 		softc->state = ADA_STATE_RAHEAD;
1384 		cam_periph_acquire(periph);
1385 		xpt_schedule(periph, CAM_PRIORITY_DEV);
1386 	} else if (ADA_WC >= 0 &&
1387 	    cgd->ident_data.support.command1 & ATA_SUPPORT_WRITECACHE) {
1388 		softc->state = ADA_STATE_WCACHE;
1389 		cam_periph_acquire(periph);
1390 		xpt_schedule(periph, CAM_PRIORITY_DEV);
1391 	} else
1392 		softc->state = ADA_STATE_NORMAL;
1393 
1394 	return(CAM_REQ_CMP);
1395 }
1396 
1397 static void
1398 adastart(struct cam_periph *periph, union ccb *start_ccb)
1399 {
1400 	struct ada_softc *softc = (struct ada_softc *)periph->softc;
1401 	struct ccb_ataio *ataio = &start_ccb->ataio;
1402 
1403 	CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("adastart\n"));
1404 
1405 	switch (softc->state) {
1406 	case ADA_STATE_NORMAL:
1407 	{
1408 		struct bio *bp;
1409 		u_int8_t tag_code;
1410 
1411 		/* Execute immediate CCB if waiting. */
1412 		if (periph->immediate_priority <= periph->pinfo.priority) {
1413 			CAM_DEBUG(periph->path, CAM_DEBUG_SUBTRACE,
1414 					("queuing for immediate ccb\n"));
1415 			start_ccb->ccb_h.ccb_state = ADA_CCB_WAITING;
1416 			SLIST_INSERT_HEAD(&periph->ccb_list, &start_ccb->ccb_h,
1417 					  periph_links.sle);
1418 			periph->immediate_priority = CAM_PRIORITY_NONE;
1419 			wakeup(&periph->ccb_list);
1420 			/* Have more work to do, so ensure we stay scheduled */
1421 			adaschedule(periph);
1422 			break;
1423 		}
1424 		/* Run TRIM if not running yet. */
1425 		if (!softc->trim_running &&
1426 		    (bp = bioq_first(&softc->trim_queue)) != 0) {
1427 			struct trim_request *req = &softc->trim_req;
1428 			struct bio *bp1;
1429 			uint64_t lastlba = (uint64_t)-1;
1430 			int bps = 0, c, lastcount = 0, off, ranges = 0;
1431 
1432 			softc->trim_running = 1;
1433 			bzero(req, sizeof(*req));
1434 			bp1 = bp;
1435 			do {
1436 				uint64_t lba = bp1->bio_pblkno;
1437 				int count = bp1->bio_bcount /
1438 				    softc->params.secsize;
1439 
1440 				bioq_remove(&softc->trim_queue, bp1);
1441 
1442 				/* Try to extend the previous range. */
1443 				if (lba == lastlba) {
1444 					c = min(count, ATA_DSM_RANGE_MAX - lastcount);
1445 					lastcount += c;
1446 					off = (ranges - 1) * ATA_DSM_RANGE_SIZE;
1447 					req->data[off + 6] = lastcount & 0xff;
1448 					req->data[off + 7] =
1449 					    (lastcount >> 8) & 0xff;
1450 					count -= c;
1451 					lba += c;
1452 				}
1453 
1454 				while (count > 0) {
1455 					c = min(count, ATA_DSM_RANGE_MAX);
1456 					off = ranges * ATA_DSM_RANGE_SIZE;
1457 					req->data[off + 0] = lba & 0xff;
1458 					req->data[off + 1] = (lba >> 8) & 0xff;
1459 					req->data[off + 2] = (lba >> 16) & 0xff;
1460 					req->data[off + 3] = (lba >> 24) & 0xff;
1461 					req->data[off + 4] = (lba >> 32) & 0xff;
1462 					req->data[off + 5] = (lba >> 40) & 0xff;
1463 					req->data[off + 6] = c & 0xff;
1464 					req->data[off + 7] = (c >> 8) & 0xff;
1465 					lba += c;
1466 					count -= c;
1467 					lastcount = c;
1468 					ranges++;
1469 					/*
1470 					 * Its the caller's responsibility to ensure the
1471 					 * request will fit so we don't need to check for
1472 					 * overrun here
1473 					 */
1474 				}
1475 				lastlba = lba;
1476 				req->bps[bps++] = bp1;
1477 				bp1 = bioq_first(&softc->trim_queue);
1478 				if (bps >= TRIM_MAX_BIOS ||
1479 				    bp1 == NULL ||
1480 				    bp1->bio_bcount / softc->params.secsize >
1481 				    (softc->trim_max_ranges - ranges) *
1482 				    ATA_DSM_RANGE_MAX)
1483 					break;
1484 			} while (1);
1485 			cam_fill_ataio(ataio,
1486 			    ada_retry_count,
1487 			    adadone,
1488 			    CAM_DIR_OUT,
1489 			    0,
1490 			    req->data,
1491 			    ((ranges + ATA_DSM_BLK_RANGES - 1) /
1492 			        ATA_DSM_BLK_RANGES) * ATA_DSM_BLK_SIZE,
1493 			    ada_default_timeout * 1000);
1494 			ata_48bit_cmd(ataio, ATA_DATA_SET_MANAGEMENT,
1495 			    ATA_DSM_TRIM, 0, (ranges + ATA_DSM_BLK_RANGES -
1496 			    1) / ATA_DSM_BLK_RANGES);
1497 			start_ccb->ccb_h.ccb_state = ADA_CCB_TRIM;
1498 			goto out;
1499 		}
1500 		/* Run regular command. */
1501 		bp = bioq_first(&softc->bio_queue);
1502 		if (bp == NULL) {
1503 			xpt_release_ccb(start_ccb);
1504 			break;
1505 		}
1506 		bioq_remove(&softc->bio_queue, bp);
1507 
1508 		if ((bp->bio_flags & BIO_ORDERED) != 0
1509 		 || (softc->flags & ADA_FLAG_NEED_OTAG) != 0) {
1510 			softc->flags &= ~ADA_FLAG_NEED_OTAG;
1511 			softc->ordered_tag_count++;
1512 			tag_code = 0;
1513 		} else {
1514 			tag_code = 1;
1515 		}
1516 		switch (bp->bio_cmd) {
1517 		case BIO_WRITE:
1518 			softc->flags |= ADA_FLAG_DIRTY;
1519 			/* FALLTHROUGH */
1520 		case BIO_READ:
1521 		{
1522 			uint64_t lba = bp->bio_pblkno;
1523 			uint16_t count = bp->bio_bcount / softc->params.secsize;
1524 #ifdef ADA_TEST_FAILURE
1525 			int fail = 0;
1526 
1527 			/*
1528 			 * Support the failure ioctls.  If the command is a
1529 			 * read, and there are pending forced read errors, or
1530 			 * if a write and pending write errors, then fail this
1531 			 * operation with EIO.  This is useful for testing
1532 			 * purposes.  Also, support having every Nth read fail.
1533 			 *
1534 			 * This is a rather blunt tool.
1535 			 */
1536 			if (bp->bio_cmd == BIO_READ) {
1537 				if (softc->force_read_error) {
1538 					softc->force_read_error--;
1539 					fail = 1;
1540 				}
1541 				if (softc->periodic_read_error > 0) {
1542 					if (++softc->periodic_read_count >=
1543 					    softc->periodic_read_error) {
1544 						softc->periodic_read_count = 0;
1545 						fail = 1;
1546 					}
1547 				}
1548 			} else {
1549 				if (softc->force_write_error) {
1550 					softc->force_write_error--;
1551 					fail = 1;
1552 				}
1553 			}
1554 			if (fail) {
1555 				bp->bio_error = EIO;
1556 				bp->bio_flags |= BIO_ERROR;
1557 				biodone(bp);
1558 				xpt_release_ccb(start_ccb);
1559 				adaschedule(periph);
1560 				return;
1561 			}
1562 #endif
1563 			KASSERT((bp->bio_flags & BIO_UNMAPPED) == 0 ||
1564 			    round_page(bp->bio_bcount + bp->bio_ma_offset) /
1565 			    PAGE_SIZE == bp->bio_ma_n,
1566 			    ("Short bio %p", bp));
1567 			cam_fill_ataio(ataio,
1568 			    ada_retry_count,
1569 			    adadone,
1570 			    (bp->bio_cmd == BIO_READ ? CAM_DIR_IN :
1571 				CAM_DIR_OUT) | ((bp->bio_flags & BIO_UNMAPPED)
1572 				!= 0 ? CAM_DATA_BIO : 0),
1573 			    tag_code,
1574 			    ((bp->bio_flags & BIO_UNMAPPED) != 0) ? (void *)bp :
1575 				bp->bio_data,
1576 			    bp->bio_bcount,
1577 			    ada_default_timeout*1000);
1578 
1579 			if ((softc->flags & ADA_FLAG_CAN_NCQ) && tag_code) {
1580 				if (bp->bio_cmd == BIO_READ) {
1581 					ata_ncq_cmd(ataio, ATA_READ_FPDMA_QUEUED,
1582 					    lba, count);
1583 				} else {
1584 					ata_ncq_cmd(ataio, ATA_WRITE_FPDMA_QUEUED,
1585 					    lba, count);
1586 				}
1587 			} else if ((softc->flags & ADA_FLAG_CAN_48BIT) &&
1588 			    (lba + count >= ATA_MAX_28BIT_LBA ||
1589 			    count > 256)) {
1590 				if (softc->flags & ADA_FLAG_CAN_DMA48) {
1591 					if (bp->bio_cmd == BIO_READ) {
1592 						ata_48bit_cmd(ataio, ATA_READ_DMA48,
1593 						    0, lba, count);
1594 					} else {
1595 						ata_48bit_cmd(ataio, ATA_WRITE_DMA48,
1596 						    0, lba, count);
1597 					}
1598 				} else {
1599 					if (bp->bio_cmd == BIO_READ) {
1600 						ata_48bit_cmd(ataio, ATA_READ_MUL48,
1601 						    0, lba, count);
1602 					} else {
1603 						ata_48bit_cmd(ataio, ATA_WRITE_MUL48,
1604 						    0, lba, count);
1605 					}
1606 				}
1607 			} else {
1608 				if (count == 256)
1609 					count = 0;
1610 				if (softc->flags & ADA_FLAG_CAN_DMA) {
1611 					if (bp->bio_cmd == BIO_READ) {
1612 						ata_28bit_cmd(ataio, ATA_READ_DMA,
1613 						    0, lba, count);
1614 					} else {
1615 						ata_28bit_cmd(ataio, ATA_WRITE_DMA,
1616 						    0, lba, count);
1617 					}
1618 				} else {
1619 					if (bp->bio_cmd == BIO_READ) {
1620 						ata_28bit_cmd(ataio, ATA_READ_MUL,
1621 						    0, lba, count);
1622 					} else {
1623 						ata_28bit_cmd(ataio, ATA_WRITE_MUL,
1624 						    0, lba, count);
1625 					}
1626 				}
1627 			}
1628 			break;
1629 		}
1630 		case BIO_DELETE:
1631 		{
1632 			uint64_t lba = bp->bio_pblkno;
1633 			uint16_t count = bp->bio_bcount / softc->params.secsize;
1634 
1635 			cam_fill_ataio(ataio,
1636 			    ada_retry_count,
1637 			    adadone,
1638 			    CAM_DIR_NONE,
1639 			    0,
1640 			    NULL,
1641 			    0,
1642 			    ada_default_timeout*1000);
1643 
1644 			if (count >= 256)
1645 				count = 0;
1646 			ata_28bit_cmd(ataio, ATA_CFA_ERASE, 0, lba, count);
1647 			break;
1648 		}
1649 		case BIO_FLUSH:
1650 			cam_fill_ataio(ataio,
1651 			    1,
1652 			    adadone,
1653 			    CAM_DIR_NONE,
1654 			    0,
1655 			    NULL,
1656 			    0,
1657 			    ada_default_timeout*1000);
1658 
1659 			if (softc->flags & ADA_FLAG_CAN_48BIT)
1660 				ata_48bit_cmd(ataio, ATA_FLUSHCACHE48, 0, 0, 0);
1661 			else
1662 				ata_28bit_cmd(ataio, ATA_FLUSHCACHE, 0, 0, 0);
1663 			break;
1664 		}
1665 		start_ccb->ccb_h.ccb_state = ADA_CCB_BUFFER_IO;
1666 out:
1667 		start_ccb->ccb_h.ccb_bp = bp;
1668 		softc->outstanding_cmds++;
1669 		xpt_action(start_ccb);
1670 
1671 		/* May have more work to do, so ensure we stay scheduled */
1672 		adaschedule(periph);
1673 		break;
1674 	}
1675 	case ADA_STATE_RAHEAD:
1676 	case ADA_STATE_WCACHE:
1677 	{
1678 		if ((periph->flags & CAM_PERIPH_INVALID) != 0) {
1679 			softc->state = ADA_STATE_NORMAL;
1680 			xpt_release_ccb(start_ccb);
1681 			cam_periph_release_locked(periph);
1682 			return;
1683 		}
1684 
1685 		cam_fill_ataio(ataio,
1686 		    1,
1687 		    adadone,
1688 		    CAM_DIR_NONE,
1689 		    0,
1690 		    NULL,
1691 		    0,
1692 		    ada_default_timeout*1000);
1693 
1694 		if (softc->state == ADA_STATE_RAHEAD) {
1695 			ata_28bit_cmd(ataio, ATA_SETFEATURES, ADA_RA ?
1696 			    ATA_SF_ENAB_RCACHE : ATA_SF_DIS_RCACHE, 0, 0);
1697 			start_ccb->ccb_h.ccb_state = ADA_CCB_RAHEAD;
1698 		} else {
1699 			ata_28bit_cmd(ataio, ATA_SETFEATURES, ADA_WC ?
1700 			    ATA_SF_ENAB_WCACHE : ATA_SF_DIS_WCACHE, 0, 0);
1701 			start_ccb->ccb_h.ccb_state = ADA_CCB_WCACHE;
1702 		}
1703 		start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
1704 		xpt_action(start_ccb);
1705 		break;
1706 	}
1707 	}
1708 }
1709 
1710 static void
1711 adadone(struct cam_periph *periph, union ccb *done_ccb)
1712 {
1713 	struct ada_softc *softc;
1714 	struct ccb_ataio *ataio;
1715 	struct ccb_getdev *cgd;
1716 	struct cam_path *path;
1717 	int state;
1718 
1719 	softc = (struct ada_softc *)periph->softc;
1720 	ataio = &done_ccb->ataio;
1721 	path = done_ccb->ccb_h.path;
1722 
1723 	CAM_DEBUG(path, CAM_DEBUG_TRACE, ("adadone\n"));
1724 
1725 	state = ataio->ccb_h.ccb_state & ADA_CCB_TYPE_MASK;
1726 	switch (state) {
1727 	case ADA_CCB_BUFFER_IO:
1728 	case ADA_CCB_TRIM:
1729 	{
1730 		struct bio *bp;
1731 		int error;
1732 
1733 		if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
1734 			error = adaerror(done_ccb, 0, 0);
1735 			if (error == ERESTART) {
1736 				/* A retry was scheduled, so just return. */
1737 				return;
1738 			}
1739 			if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
1740 				cam_release_devq(path,
1741 						 /*relsim_flags*/0,
1742 						 /*reduction*/0,
1743 						 /*timeout*/0,
1744 						 /*getcount_only*/0);
1745 		} else {
1746 			if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
1747 				panic("REQ_CMP with QFRZN");
1748 			error = 0;
1749 		}
1750 		bp = (struct bio *)done_ccb->ccb_h.ccb_bp;
1751 		bp->bio_error = error;
1752 		if (error != 0) {
1753 			bp->bio_resid = bp->bio_bcount;
1754 			bp->bio_flags |= BIO_ERROR;
1755 		} else {
1756 			if (state == ADA_CCB_TRIM)
1757 				bp->bio_resid = 0;
1758 			else
1759 				bp->bio_resid = ataio->resid;
1760 			if (bp->bio_resid > 0)
1761 				bp->bio_flags |= BIO_ERROR;
1762 		}
1763 		softc->outstanding_cmds--;
1764 		if (softc->outstanding_cmds == 0)
1765 			softc->flags |= ADA_FLAG_WENT_IDLE;
1766 		if (state == ADA_CCB_TRIM) {
1767 			struct trim_request *req =
1768 			    (struct trim_request *)ataio->data_ptr;
1769 			int i;
1770 
1771 			for (i = 1; i < TRIM_MAX_BIOS && req->bps[i]; i++) {
1772 				struct bio *bp1 = req->bps[i];
1773 
1774 				bp1->bio_error = bp->bio_error;
1775 				if (bp->bio_flags & BIO_ERROR) {
1776 					bp1->bio_flags |= BIO_ERROR;
1777 					bp1->bio_resid = bp1->bio_bcount;
1778 				} else
1779 					bp1->bio_resid = 0;
1780 				biodone(bp1);
1781 			}
1782 			softc->trim_running = 0;
1783 			biodone(bp);
1784 			adaschedule(periph);
1785 		} else
1786 			biodone(bp);
1787 		break;
1788 	}
1789 	case ADA_CCB_RAHEAD:
1790 	{
1791 		if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
1792 			if (adaerror(done_ccb, 0, 0) == ERESTART) {
1793 out:
1794 				/* Drop freeze taken due to CAM_DEV_QFREEZE */
1795 				cam_release_devq(path, 0, 0, 0, FALSE);
1796 				return;
1797 			} else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
1798 				cam_release_devq(path,
1799 				    /*relsim_flags*/0,
1800 				    /*reduction*/0,
1801 				    /*timeout*/0,
1802 				    /*getcount_only*/0);
1803 			}
1804 		}
1805 
1806 		/*
1807 		 * Since our peripheral may be invalidated by an error
1808 		 * above or an external event, we must release our CCB
1809 		 * before releasing the reference on the peripheral.
1810 		 * The peripheral will only go away once the last reference
1811 		 * is removed, and we need it around for the CCB release
1812 		 * operation.
1813 		 */
1814 		cgd = (struct ccb_getdev *)done_ccb;
1815 		xpt_setup_ccb(&cgd->ccb_h, path, CAM_PRIORITY_NORMAL);
1816 		cgd->ccb_h.func_code = XPT_GDEV_TYPE;
1817 		xpt_action((union ccb *)cgd);
1818 		if (ADA_WC >= 0 &&
1819 		    cgd->ident_data.support.command1 & ATA_SUPPORT_WRITECACHE) {
1820 			softc->state = ADA_STATE_WCACHE;
1821 			xpt_release_ccb(done_ccb);
1822 			xpt_schedule(periph, CAM_PRIORITY_DEV);
1823 			goto out;
1824 		}
1825 		softc->state = ADA_STATE_NORMAL;
1826 		xpt_release_ccb(done_ccb);
1827 		/* Drop freeze taken due to CAM_DEV_QFREEZE */
1828 		cam_release_devq(path, 0, 0, 0, FALSE);
1829 		adaschedule(periph);
1830 		cam_periph_release_locked(periph);
1831 		return;
1832 	}
1833 	case ADA_CCB_WCACHE:
1834 	{
1835 		if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
1836 			if (adaerror(done_ccb, 0, 0) == ERESTART) {
1837 				goto out;
1838 			} else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
1839 				cam_release_devq(path,
1840 				    /*relsim_flags*/0,
1841 				    /*reduction*/0,
1842 				    /*timeout*/0,
1843 				    /*getcount_only*/0);
1844 			}
1845 		}
1846 
1847 		softc->state = ADA_STATE_NORMAL;
1848 		/*
1849 		 * Since our peripheral may be invalidated by an error
1850 		 * above or an external event, we must release our CCB
1851 		 * before releasing the reference on the peripheral.
1852 		 * The peripheral will only go away once the last reference
1853 		 * is removed, and we need it around for the CCB release
1854 		 * operation.
1855 		 */
1856 		xpt_release_ccb(done_ccb);
1857 		/* Drop freeze taken due to CAM_DEV_QFREEZE */
1858 		cam_release_devq(path, 0, 0, 0, FALSE);
1859 		adaschedule(periph);
1860 		cam_periph_release_locked(periph);
1861 		return;
1862 	}
1863 	case ADA_CCB_WAITING:
1864 	{
1865 		/* Caller will release the CCB */
1866 		wakeup(&done_ccb->ccb_h.cbfcnp);
1867 		return;
1868 	}
1869 	case ADA_CCB_DUMP:
1870 		/* No-op.  We're polling */
1871 		return;
1872 	default:
1873 		break;
1874 	}
1875 	xpt_release_ccb(done_ccb);
1876 }
1877 
1878 static int
1879 adaerror(union ccb *ccb, u_int32_t cam_flags, u_int32_t sense_flags)
1880 {
1881 
1882 	return(cam_periph_error(ccb, cam_flags, sense_flags, NULL));
1883 }
1884 
1885 static void
1886 adagetparams(struct cam_periph *periph, struct ccb_getdev *cgd)
1887 {
1888 	struct ada_softc *softc = (struct ada_softc *)periph->softc;
1889 	struct disk_params *dp = &softc->params;
1890 	u_int64_t lbasize48;
1891 	u_int32_t lbasize;
1892 
1893 	dp->secsize = ata_logical_sector_size(&cgd->ident_data);
1894 	if ((cgd->ident_data.atavalid & ATA_FLAG_54_58) &&
1895 		cgd->ident_data.current_heads && cgd->ident_data.current_sectors) {
1896 		dp->heads = cgd->ident_data.current_heads;
1897 		dp->secs_per_track = cgd->ident_data.current_sectors;
1898 		dp->cylinders = cgd->ident_data.cylinders;
1899 		dp->sectors = (u_int32_t)cgd->ident_data.current_size_1 |
1900 			  ((u_int32_t)cgd->ident_data.current_size_2 << 16);
1901 	} else {
1902 		dp->heads = cgd->ident_data.heads;
1903 		dp->secs_per_track = cgd->ident_data.sectors;
1904 		dp->cylinders = cgd->ident_data.cylinders;
1905 		dp->sectors = cgd->ident_data.cylinders * dp->heads * dp->secs_per_track;
1906 	}
1907 	lbasize = (u_int32_t)cgd->ident_data.lba_size_1 |
1908 		  ((u_int32_t)cgd->ident_data.lba_size_2 << 16);
1909 
1910 	/* use the 28bit LBA size if valid or bigger than the CHS mapping */
1911 	if (cgd->ident_data.cylinders == 16383 || dp->sectors < lbasize)
1912 		dp->sectors = lbasize;
1913 
1914 	/* use the 48bit LBA size if valid */
1915 	lbasize48 = ((u_int64_t)cgd->ident_data.lba_size48_1) |
1916 		    ((u_int64_t)cgd->ident_data.lba_size48_2 << 16) |
1917 		    ((u_int64_t)cgd->ident_data.lba_size48_3 << 32) |
1918 		    ((u_int64_t)cgd->ident_data.lba_size48_4 << 48);
1919 	if ((cgd->ident_data.support.command2 & ATA_SUPPORT_ADDRESS48) &&
1920 	    lbasize48 > ATA_MAX_28BIT_LBA)
1921 		dp->sectors = lbasize48;
1922 }
1923 
1924 static void
1925 adasendorderedtag(void *arg)
1926 {
1927 	struct ada_softc *softc = arg;
1928 
1929 	if (ada_send_ordered) {
1930 		if ((softc->ordered_tag_count == 0)
1931 		 && ((softc->flags & ADA_FLAG_WENT_IDLE) == 0)) {
1932 			softc->flags |= ADA_FLAG_NEED_OTAG;
1933 		}
1934 		if (softc->outstanding_cmds > 0)
1935 			softc->flags &= ~ADA_FLAG_WENT_IDLE;
1936 
1937 		softc->ordered_tag_count = 0;
1938 	}
1939 	/* Queue us up again */
1940 	callout_reset(&softc->sendordered_c,
1941 	    (ada_default_timeout * hz) / ADA_ORDEREDTAG_INTERVAL,
1942 	    adasendorderedtag, softc);
1943 }
1944 
1945 /*
1946  * Step through all ADA peripheral drivers, and if the device is still open,
1947  * sync the disk cache to physical media.
1948  */
1949 static void
1950 adaflush(void)
1951 {
1952 	struct cam_periph *periph;
1953 	struct ada_softc *softc;
1954 	union ccb *ccb;
1955 	int error;
1956 
1957 	CAM_PERIPH_FOREACH(periph, &adadriver) {
1958 		softc = (struct ada_softc *)periph->softc;
1959 		if (SCHEDULER_STOPPED()) {
1960 			/* If we paniced with the lock held, do not recurse. */
1961 			if (!cam_periph_owned(periph) &&
1962 			    (softc->flags & ADA_FLAG_OPEN)) {
1963 				adadump(softc->disk, NULL, 0, 0, 0);
1964 			}
1965 			continue;
1966 		}
1967 		cam_periph_lock(periph);
1968 		/*
1969 		 * We only sync the cache if the drive is still open, and
1970 		 * if the drive is capable of it..
1971 		 */
1972 		if (((softc->flags & ADA_FLAG_OPEN) == 0) ||
1973 		    (softc->flags & ADA_FLAG_CAN_FLUSHCACHE) == 0) {
1974 			cam_periph_unlock(periph);
1975 			continue;
1976 		}
1977 
1978 		ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL);
1979 		cam_fill_ataio(&ccb->ataio,
1980 				    0,
1981 				    adadone,
1982 				    CAM_DIR_NONE,
1983 				    0,
1984 				    NULL,
1985 				    0,
1986 				    ada_default_timeout*1000);
1987 		if (softc->flags & ADA_FLAG_CAN_48BIT)
1988 			ata_48bit_cmd(&ccb->ataio, ATA_FLUSHCACHE48, 0, 0, 0);
1989 		else
1990 			ata_28bit_cmd(&ccb->ataio, ATA_FLUSHCACHE, 0, 0, 0);
1991 
1992 		error = cam_periph_runccb(ccb, adaerror, /*cam_flags*/0,
1993 		    /*sense_flags*/ SF_NO_RECOVERY | SF_NO_RETRY,
1994 		    softc->disk->d_devstat);
1995 		if (error != 0)
1996 			xpt_print(periph->path, "Synchronize cache failed\n");
1997 		xpt_release_ccb(ccb);
1998 		cam_periph_unlock(periph);
1999 	}
2000 }
2001 
2002 static void
2003 adaspindown(uint8_t cmd, int flags)
2004 {
2005 	struct cam_periph *periph;
2006 	struct ada_softc *softc;
2007 	union ccb *ccb;
2008 	int error;
2009 
2010 	CAM_PERIPH_FOREACH(periph, &adadriver) {
2011 		/* If we paniced with lock held - not recurse here. */
2012 		if (cam_periph_owned(periph))
2013 			continue;
2014 		cam_periph_lock(periph);
2015 		softc = (struct ada_softc *)periph->softc;
2016 		/*
2017 		 * We only spin-down the drive if it is capable of it..
2018 		 */
2019 		if ((softc->flags & ADA_FLAG_CAN_POWERMGT) == 0) {
2020 			cam_periph_unlock(periph);
2021 			continue;
2022 		}
2023 
2024 		if (bootverbose)
2025 			xpt_print(periph->path, "spin-down\n");
2026 
2027 		ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL);
2028 		cam_fill_ataio(&ccb->ataio,
2029 				    0,
2030 				    adadone,
2031 				    CAM_DIR_NONE | flags,
2032 				    0,
2033 				    NULL,
2034 				    0,
2035 				    ada_default_timeout*1000);
2036 		ata_28bit_cmd(&ccb->ataio, cmd, 0, 0, 0);
2037 
2038 		error = cam_periph_runccb(ccb, adaerror, /*cam_flags*/0,
2039 		    /*sense_flags*/ SF_NO_RECOVERY | SF_NO_RETRY,
2040 		    softc->disk->d_devstat);
2041 		if (error != 0)
2042 			xpt_print(periph->path, "Spin-down disk failed\n");
2043 		xpt_release_ccb(ccb);
2044 		cam_periph_unlock(periph);
2045 	}
2046 }
2047 
2048 static void
2049 adashutdown(void *arg, int howto)
2050 {
2051 
2052 	adaflush();
2053 	if (ada_spindown_shutdown != 0 &&
2054 	    (howto & (RB_HALT | RB_POWEROFF)) != 0)
2055 		adaspindown(ATA_STANDBY_IMMEDIATE, 0);
2056 }
2057 
2058 static void
2059 adasuspend(void *arg)
2060 {
2061 
2062 	adaflush();
2063 	if (ada_spindown_suspend != 0)
2064 		adaspindown(ATA_SLEEP, CAM_DEV_QFREEZE);
2065 }
2066 
2067 static void
2068 adaresume(void *arg)
2069 {
2070 	struct cam_periph *periph;
2071 	struct ada_softc *softc;
2072 
2073 	if (ada_spindown_suspend == 0)
2074 		return;
2075 
2076 	CAM_PERIPH_FOREACH(periph, &adadriver) {
2077 		cam_periph_lock(periph);
2078 		softc = (struct ada_softc *)periph->softc;
2079 		/*
2080 		 * We only spin-down the drive if it is capable of it..
2081 		 */
2082 		if ((softc->flags & ADA_FLAG_CAN_POWERMGT) == 0) {
2083 			cam_periph_unlock(periph);
2084 			continue;
2085 		}
2086 
2087 		if (bootverbose)
2088 			xpt_print(periph->path, "resume\n");
2089 
2090 		/*
2091 		 * Drop freeze taken due to CAM_DEV_QFREEZE flag set on
2092 		 * sleep request.
2093 		 */
2094 		cam_release_devq(periph->path,
2095 			 /*relsim_flags*/0,
2096 			 /*openings*/0,
2097 			 /*timeout*/0,
2098 			 /*getcount_only*/0);
2099 
2100 		cam_periph_unlock(periph);
2101 	}
2102 }
2103 
2104 #endif /* _KERNEL */
2105