xref: /freebsd/sys/cam/scsi/scsi_da.c (revision 1c05a6ea6b849ff95e539c31adea887c644a6a01)
1 /*-
2  * Implementation of SCSI Direct Access Peripheral driver for CAM.
3  *
4  * Copyright (c) 1997 Justin T. Gibbs.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions, and the following disclaimer,
12  *    without modification, immediately at the beginning of the file.
13  * 2. The name of the author may not be used to endorse or promote products
14  *    derived from this software without specific prior written permission.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
20  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31 
32 #include <sys/param.h>
33 
34 #ifdef _KERNEL
35 #include <sys/systm.h>
36 #include <sys/kernel.h>
37 #include <sys/bio.h>
38 #include <sys/sysctl.h>
39 #include <sys/taskqueue.h>
40 #include <sys/lock.h>
41 #include <sys/mutex.h>
42 #include <sys/conf.h>
43 #include <sys/devicestat.h>
44 #include <sys/eventhandler.h>
45 #include <sys/malloc.h>
46 #include <sys/cons.h>
47 #include <sys/endian.h>
48 #include <sys/proc.h>
49 #include <sys/sbuf.h>
50 #include <geom/geom.h>
51 #include <geom/geom_disk.h>
52 #endif /* _KERNEL */
53 
54 #ifndef _KERNEL
55 #include <stdio.h>
56 #include <string.h>
57 #endif /* _KERNEL */
58 
59 #include <cam/cam.h>
60 #include <cam/cam_ccb.h>
61 #include <cam/cam_periph.h>
62 #include <cam/cam_xpt_periph.h>
63 #include <cam/cam_sim.h>
64 #include <cam/cam_iosched.h>
65 
66 #include <cam/scsi/scsi_message.h>
67 #include <cam/scsi/scsi_da.h>
68 
69 #ifdef _KERNEL
70 /*
71  * Note that there are probe ordering dependencies here.  The order isn't
72  * controlled by this enumeration, but by explicit state transitions in
73  * dastart() and dadone().  Here are some of the dependencies:
74  *
75  * 1. RC should come first, before RC16, unless there is evidence that RC16
76  *    is supported.
77  * 2. BDC needs to come before any of the ATA probes, or the ZONE probe.
78  * 3. The ATA probes should go in this order:
79  *    ATA -> LOGDIR -> IDDIR -> SUP -> ATA_ZONE
80  */
81 typedef enum {
82 	DA_STATE_PROBE_RC,
83 	DA_STATE_PROBE_RC16,
84 	DA_STATE_PROBE_LBP,
85 	DA_STATE_PROBE_BLK_LIMITS,
86 	DA_STATE_PROBE_BDC,
87 	DA_STATE_PROBE_ATA,
88 	DA_STATE_PROBE_ATA_LOGDIR,
89 	DA_STATE_PROBE_ATA_IDDIR,
90 	DA_STATE_PROBE_ATA_SUP,
91 	DA_STATE_PROBE_ATA_ZONE,
92 	DA_STATE_PROBE_ZONE,
93 	DA_STATE_NORMAL
94 } da_state;
95 
96 typedef enum {
97 	DA_FLAG_PACK_INVALID	= 0x000001,
98 	DA_FLAG_NEW_PACK	= 0x000002,
99 	DA_FLAG_PACK_LOCKED	= 0x000004,
100 	DA_FLAG_PACK_REMOVABLE	= 0x000008,
101 	DA_FLAG_NEED_OTAG	= 0x000020,
102 	DA_FLAG_WAS_OTAG	= 0x000040,
103 	DA_FLAG_RETRY_UA	= 0x000080,
104 	DA_FLAG_OPEN		= 0x000100,
105 	DA_FLAG_SCTX_INIT	= 0x000200,
106 	DA_FLAG_CAN_RC16	= 0x000400,
107 	DA_FLAG_PROBED		= 0x000800,
108 	DA_FLAG_DIRTY		= 0x001000,
109 	DA_FLAG_ANNOUNCED	= 0x002000,
110 	DA_FLAG_CAN_ATA_DMA	= 0x004000,
111 	DA_FLAG_CAN_ATA_LOG	= 0x008000,
112 	DA_FLAG_CAN_ATA_IDLOG	= 0x010000,
113 	DA_FLAG_CAN_ATA_SUPCAP	= 0x020000,
114 	DA_FLAG_CAN_ATA_ZONE	= 0x040000
115 } da_flags;
116 
117 typedef enum {
118 	DA_Q_NONE		= 0x00,
119 	DA_Q_NO_SYNC_CACHE	= 0x01,
120 	DA_Q_NO_6_BYTE		= 0x02,
121 	DA_Q_NO_PREVENT		= 0x04,
122 	DA_Q_4K			= 0x08,
123 	DA_Q_NO_RC16		= 0x10,
124 	DA_Q_NO_UNMAP		= 0x20,
125 	DA_Q_RETRY_BUSY		= 0x40,
126 	DA_Q_SMR_DM		= 0x80,
127 	DA_Q_STRICT_UNMAP	= 0x100
128 } da_quirks;
129 
130 #define DA_Q_BIT_STRING		\
131 	"\020"			\
132 	"\001NO_SYNC_CACHE"	\
133 	"\002NO_6_BYTE"		\
134 	"\003NO_PREVENT"	\
135 	"\0044K"		\
136 	"\005NO_RC16"		\
137 	"\006NO_UNMAP"		\
138 	"\007RETRY_BUSY"	\
139 	"\010SMR_DM"		\
140 	"\011STRICT_UNMAP"
141 
142 typedef enum {
143 	DA_CCB_PROBE_RC		= 0x01,
144 	DA_CCB_PROBE_RC16	= 0x02,
145 	DA_CCB_PROBE_LBP	= 0x03,
146 	DA_CCB_PROBE_BLK_LIMITS	= 0x04,
147 	DA_CCB_PROBE_BDC	= 0x05,
148 	DA_CCB_PROBE_ATA	= 0x06,
149 	DA_CCB_BUFFER_IO	= 0x07,
150 	DA_CCB_DUMP		= 0x0A,
151 	DA_CCB_DELETE		= 0x0B,
152  	DA_CCB_TUR		= 0x0C,
153 	DA_CCB_PROBE_ZONE	= 0x0D,
154 	DA_CCB_PROBE_ATA_LOGDIR	= 0x0E,
155 	DA_CCB_PROBE_ATA_IDDIR	= 0x0F,
156 	DA_CCB_PROBE_ATA_SUP	= 0x10,
157 	DA_CCB_PROBE_ATA_ZONE	= 0x11,
158 	DA_CCB_TYPE_MASK	= 0x1F,
159 	DA_CCB_RETRY_UA		= 0x20
160 } da_ccb_state;
161 
162 /*
163  * Order here is important for method choice
164  *
165  * We prefer ATA_TRIM as tests run against a Sandforce 2281 SSD attached to
166  * LSI 2008 (mps) controller (FW: v12, Drv: v14) resulted 20% quicker deletes
167  * using ATA_TRIM than the corresponding UNMAP results for a real world mysql
168  * import taking 5mins.
169  *
170  */
171 typedef enum {
172 	DA_DELETE_NONE,
173 	DA_DELETE_DISABLE,
174 	DA_DELETE_ATA_TRIM,
175 	DA_DELETE_UNMAP,
176 	DA_DELETE_WS16,
177 	DA_DELETE_WS10,
178 	DA_DELETE_ZERO,
179 	DA_DELETE_MIN = DA_DELETE_ATA_TRIM,
180 	DA_DELETE_MAX = DA_DELETE_ZERO
181 } da_delete_methods;
182 
183 /*
184  * For SCSI, host managed drives show up as a separate device type.  For
185  * ATA, host managed drives also have a different device signature.
186  * XXX KDM figure out the ATA host managed signature.
187  */
188 typedef enum {
189 	DA_ZONE_NONE		= 0x00,
190 	DA_ZONE_DRIVE_MANAGED	= 0x01,
191 	DA_ZONE_HOST_AWARE	= 0x02,
192 	DA_ZONE_HOST_MANAGED	= 0x03
193 } da_zone_mode;
194 
195 /*
196  * We distinguish between these interface cases in addition to the drive type:
197  * o ATA drive behind a SCSI translation layer that knows about ZBC/ZAC
198  * o ATA drive behind a SCSI translation layer that does not know about
199  *   ZBC/ZAC, and so needs to be managed via ATA passthrough.  In this
200  *   case, we would need to share the ATA code with the ada(4) driver.
201  * o SCSI drive.
202  */
203 typedef enum {
204 	DA_ZONE_IF_SCSI,
205 	DA_ZONE_IF_ATA_PASS,
206 	DA_ZONE_IF_ATA_SAT,
207 } da_zone_interface;
208 
209 typedef enum {
210 	DA_ZONE_FLAG_RZ_SUP		= 0x0001,
211 	DA_ZONE_FLAG_OPEN_SUP		= 0x0002,
212 	DA_ZONE_FLAG_CLOSE_SUP		= 0x0004,
213 	DA_ZONE_FLAG_FINISH_SUP		= 0x0008,
214 	DA_ZONE_FLAG_RWP_SUP		= 0x0010,
215 	DA_ZONE_FLAG_SUP_MASK		= (DA_ZONE_FLAG_RZ_SUP |
216 					   DA_ZONE_FLAG_OPEN_SUP |
217 					   DA_ZONE_FLAG_CLOSE_SUP |
218 					   DA_ZONE_FLAG_FINISH_SUP |
219 					   DA_ZONE_FLAG_RWP_SUP),
220 	DA_ZONE_FLAG_URSWRZ		= 0x0020,
221 	DA_ZONE_FLAG_OPT_SEQ_SET	= 0x0040,
222 	DA_ZONE_FLAG_OPT_NONSEQ_SET	= 0x0080,
223 	DA_ZONE_FLAG_MAX_SEQ_SET	= 0x0100,
224 	DA_ZONE_FLAG_SET_MASK		= (DA_ZONE_FLAG_OPT_SEQ_SET |
225 					   DA_ZONE_FLAG_OPT_NONSEQ_SET |
226 					   DA_ZONE_FLAG_MAX_SEQ_SET)
227 } da_zone_flags;
228 
229 static struct da_zone_desc {
230 	da_zone_flags value;
231 	const char *desc;
232 } da_zone_desc_table[] = {
233 	{DA_ZONE_FLAG_RZ_SUP, "Report Zones" },
234 	{DA_ZONE_FLAG_OPEN_SUP, "Open" },
235 	{DA_ZONE_FLAG_CLOSE_SUP, "Close" },
236 	{DA_ZONE_FLAG_FINISH_SUP, "Finish" },
237 	{DA_ZONE_FLAG_RWP_SUP, "Reset Write Pointer" },
238 };
239 
240 typedef void da_delete_func_t (struct cam_periph *periph, union ccb *ccb,
241 			      struct bio *bp);
242 static da_delete_func_t da_delete_trim;
243 static da_delete_func_t da_delete_unmap;
244 static da_delete_func_t da_delete_ws;
245 
246 static const void * da_delete_functions[] = {
247 	NULL,
248 	NULL,
249 	da_delete_trim,
250 	da_delete_unmap,
251 	da_delete_ws,
252 	da_delete_ws,
253 	da_delete_ws
254 };
255 
256 static const char *da_delete_method_names[] =
257     { "NONE", "DISABLE", "ATA_TRIM", "UNMAP", "WS16", "WS10", "ZERO" };
258 static const char *da_delete_method_desc[] =
259     { "NONE", "DISABLED", "ATA TRIM", "UNMAP", "WRITE SAME(16) with UNMAP",
260       "WRITE SAME(10) with UNMAP", "ZERO" };
261 
262 /* Offsets into our private area for storing information */
263 #define ccb_state	ppriv_field0
264 #define ccb_bp		ppriv_ptr1
265 
266 struct disk_params {
267 	u_int8_t  heads;
268 	u_int32_t cylinders;
269 	u_int8_t  secs_per_track;
270 	u_int32_t secsize;	/* Number of bytes/sector */
271 	u_int64_t sectors;	/* total number sectors */
272 	u_int     stripesize;
273 	u_int     stripeoffset;
274 };
275 
276 #define UNMAP_RANGE_MAX		0xffffffff
277 #define UNMAP_HEAD_SIZE		8
278 #define UNMAP_RANGE_SIZE	16
279 #define UNMAP_MAX_RANGES	2048 /* Protocol Max is 4095 */
280 #define UNMAP_BUF_SIZE		((UNMAP_MAX_RANGES * UNMAP_RANGE_SIZE) + \
281 				UNMAP_HEAD_SIZE)
282 
283 #define WS10_MAX_BLKS		0xffff
284 #define WS16_MAX_BLKS		0xffffffff
285 #define ATA_TRIM_MAX_RANGES	((UNMAP_BUF_SIZE / \
286 	(ATA_DSM_RANGE_SIZE * ATA_DSM_BLK_SIZE)) * ATA_DSM_BLK_SIZE)
287 
288 #define DA_WORK_TUR		(1 << 16)
289 
290 struct da_softc {
291 	struct   cam_iosched_softc *cam_iosched;
292 	struct	 bio_queue_head delete_run_queue;
293 	LIST_HEAD(, ccb_hdr) pending_ccbs;
294 	int	 refcount;		/* Active xpt_action() calls */
295 	da_state state;
296 	da_flags flags;
297 	da_quirks quirks;
298 	int	 minimum_cmd_size;
299 	int	 error_inject;
300 	int	 trim_max_ranges;
301 	int	 delete_available;	/* Delete methods possibly available */
302 	da_zone_mode 			zone_mode;
303 	da_zone_interface		zone_interface;
304 	da_zone_flags			zone_flags;
305 	struct ata_gp_log_dir		ata_logdir;
306 	int				valid_logdir_len;
307 	struct ata_identify_log_pages	ata_iddir;
308 	int				valid_iddir_len;
309 	uint64_t			optimal_seq_zones;
310 	uint64_t			optimal_nonseq_zones;
311 	uint64_t			max_seq_zones;
312 	u_int	 		maxio;
313 	uint32_t		unmap_max_ranges;
314 	uint32_t		unmap_max_lba; /* Max LBAs in UNMAP req */
315 	uint32_t		unmap_gran;
316 	uint32_t		unmap_gran_align;
317 	uint64_t		ws_max_blks;
318 	da_delete_methods	delete_method_pref;
319 	da_delete_methods	delete_method;
320 	da_delete_func_t	*delete_func;
321 	int			unmappedio;
322 	int			rotating;
323 	struct	 disk_params params;
324 	struct	 disk *disk;
325 	union	 ccb saved_ccb;
326 	struct task		sysctl_task;
327 	struct sysctl_ctx_list	sysctl_ctx;
328 	struct sysctl_oid	*sysctl_tree;
329 	struct callout		sendordered_c;
330 	uint64_t wwpn;
331 	uint8_t	 unmap_buf[UNMAP_BUF_SIZE];
332 	struct scsi_read_capacity_data_long rcaplong;
333 	struct callout		mediapoll_c;
334 #ifdef CAM_IO_STATS
335 	struct sysctl_ctx_list	sysctl_stats_ctx;
336 	struct sysctl_oid	*sysctl_stats_tree;
337 	u_int	errors;
338 	u_int	timeouts;
339 	u_int	invalidations;
340 #endif
341 #define DA_ANNOUNCETMP_SZ 80
342 	char			announce_temp[DA_ANNOUNCETMP_SZ];
343 #define DA_ANNOUNCE_SZ 400
344 	char			announcebuf[DA_ANNOUNCE_SZ];
345 };
346 
347 #define dadeleteflag(softc, delete_method, enable)			\
348 	if (enable) {							\
349 		softc->delete_available |= (1 << delete_method);	\
350 	} else {							\
351 		softc->delete_available &= ~(1 << delete_method);	\
352 	}
353 
354 struct da_quirk_entry {
355 	struct scsi_inquiry_pattern inq_pat;
356 	da_quirks quirks;
357 };
358 
359 static const char quantum[] = "QUANTUM";
360 static const char microp[] = "MICROP";
361 
362 static struct da_quirk_entry da_quirk_table[] =
363 {
364 	/* SPI, FC devices */
365 	{
366 		/*
367 		 * Fujitsu M2513A MO drives.
368 		 * Tested devices: M2513A2 firmware versions 1200 & 1300.
369 		 * (dip switch selects whether T_DIRECT or T_OPTICAL device)
370 		 * Reported by: W.Scholten <whs@xs4all.nl>
371 		 */
372 		{T_DIRECT, SIP_MEDIA_REMOVABLE, "FUJITSU", "M2513A", "*"},
373 		/*quirks*/ DA_Q_NO_SYNC_CACHE
374 	},
375 	{
376 		/* See above. */
377 		{T_OPTICAL, SIP_MEDIA_REMOVABLE, "FUJITSU", "M2513A", "*"},
378 		/*quirks*/ DA_Q_NO_SYNC_CACHE
379 	},
380 	{
381 		/*
382 		 * This particular Fujitsu drive doesn't like the
383 		 * synchronize cache command.
384 		 * Reported by: Tom Jackson <toj@gorilla.net>
385 		 */
386 		{T_DIRECT, SIP_MEDIA_FIXED, "FUJITSU", "M2954*", "*"},
387 		/*quirks*/ DA_Q_NO_SYNC_CACHE
388 	},
389 	{
390 		/*
391 		 * This drive doesn't like the synchronize cache command
392 		 * either.  Reported by: Matthew Jacob <mjacob@feral.com>
393 		 * in NetBSD PR kern/6027, August 24, 1998.
394 		 */
395 		{T_DIRECT, SIP_MEDIA_FIXED, microp, "2217*", "*"},
396 		/*quirks*/ DA_Q_NO_SYNC_CACHE
397 	},
398 	{
399 		/*
400 		 * This drive doesn't like the synchronize cache command
401 		 * either.  Reported by: Hellmuth Michaelis (hm@kts.org)
402 		 * (PR 8882).
403 		 */
404 		{T_DIRECT, SIP_MEDIA_FIXED, microp, "2112*", "*"},
405 		/*quirks*/ DA_Q_NO_SYNC_CACHE
406 	},
407 	{
408 		/*
409 		 * Doesn't like the synchronize cache command.
410 		 * Reported by: Blaz Zupan <blaz@gold.amis.net>
411 		 */
412 		{T_DIRECT, SIP_MEDIA_FIXED, "NEC", "D3847*", "*"},
413 		/*quirks*/ DA_Q_NO_SYNC_CACHE
414 	},
415 	{
416 		/*
417 		 * Doesn't like the synchronize cache command.
418 		 * Reported by: Blaz Zupan <blaz@gold.amis.net>
419 		 */
420 		{T_DIRECT, SIP_MEDIA_FIXED, quantum, "MAVERICK 540S", "*"},
421 		/*quirks*/ DA_Q_NO_SYNC_CACHE
422 	},
423 	{
424 		/*
425 		 * Doesn't like the synchronize cache command.
426 		 */
427 		{T_DIRECT, SIP_MEDIA_FIXED, quantum, "LPS525S", "*"},
428 		/*quirks*/ DA_Q_NO_SYNC_CACHE
429 	},
430 	{
431 		/*
432 		 * Doesn't like the synchronize cache command.
433 		 * Reported by: walter@pelissero.de
434 		 */
435 		{T_DIRECT, SIP_MEDIA_FIXED, quantum, "LPS540S", "*"},
436 		/*quirks*/ DA_Q_NO_SYNC_CACHE
437 	},
438 	{
439 		/*
440 		 * Doesn't work correctly with 6 byte reads/writes.
441 		 * Returns illegal request, and points to byte 9 of the
442 		 * 6-byte CDB.
443 		 * Reported by:  Adam McDougall <bsdx@spawnet.com>
444 		 */
445 		{T_DIRECT, SIP_MEDIA_FIXED, quantum, "VIKING 4*", "*"},
446 		/*quirks*/ DA_Q_NO_6_BYTE
447 	},
448 	{
449 		/* See above. */
450 		{T_DIRECT, SIP_MEDIA_FIXED, quantum, "VIKING 2*", "*"},
451 		/*quirks*/ DA_Q_NO_6_BYTE
452 	},
453 	{
454 		/*
455 		 * Doesn't like the synchronize cache command.
456 		 * Reported by: walter@pelissero.de
457 		 */
458 		{T_DIRECT, SIP_MEDIA_FIXED, "CONNER", "CP3500*", "*"},
459 		/*quirks*/ DA_Q_NO_SYNC_CACHE
460 	},
461 	{
462 		/*
463 		 * The CISS RAID controllers do not support SYNC_CACHE
464 		 */
465 		{T_DIRECT, SIP_MEDIA_FIXED, "COMPAQ", "RAID*", "*"},
466 		/*quirks*/ DA_Q_NO_SYNC_CACHE
467 	},
468 	{
469 		/*
470 		 * The STEC SSDs sometimes hang on UNMAP.
471 		 */
472 		{T_DIRECT, SIP_MEDIA_FIXED, "STEC", "*", "*"},
473 		/*quirks*/ DA_Q_NO_UNMAP
474 	},
475 	{
476 		/*
477 		 * VMware returns BUSY status when storage has transient
478 		 * connectivity problems, so better wait.
479 		 * Also VMware returns odd errors on misaligned UNMAPs.
480 		 */
481 		{T_DIRECT, SIP_MEDIA_FIXED, "VMware*", "*", "*"},
482 		/*quirks*/ DA_Q_RETRY_BUSY | DA_Q_STRICT_UNMAP
483 	},
484 	/* USB mass storage devices supported by umass(4) */
485 	{
486 		/*
487 		 * EXATELECOM (Sigmatel) i-Bead 100/105 USB Flash MP3 Player
488 		 * PR: kern/51675
489 		 */
490 		{T_DIRECT, SIP_MEDIA_REMOVABLE, "EXATEL", "i-BEAD10*", "*"},
491 		/*quirks*/ DA_Q_NO_SYNC_CACHE
492 	},
493 	{
494 		/*
495 		 * Power Quotient Int. (PQI) USB flash key
496 		 * PR: kern/53067
497 		 */
498 		{T_DIRECT, SIP_MEDIA_REMOVABLE, "Generic*", "USB Flash Disk*",
499 		"*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
500 	},
501  	{
502  		/*
503  		 * Creative Nomad MUVO mp3 player (USB)
504  		 * PR: kern/53094
505  		 */
506  		{T_DIRECT, SIP_MEDIA_REMOVABLE, "CREATIVE", "NOMAD_MUVO", "*"},
507  		/*quirks*/ DA_Q_NO_SYNC_CACHE|DA_Q_NO_PREVENT
508  	},
509 	{
510 		/*
511 		 * Jungsoft NEXDISK USB flash key
512 		 * PR: kern/54737
513 		 */
514 		{T_DIRECT, SIP_MEDIA_REMOVABLE, "JUNGSOFT", "NEXDISK*", "*"},
515 		/*quirks*/ DA_Q_NO_SYNC_CACHE
516 	},
517 	{
518 		/*
519 		 * FreeDik USB Mini Data Drive
520 		 * PR: kern/54786
521 		 */
522 		{T_DIRECT, SIP_MEDIA_REMOVABLE, "FreeDik*", "Mini Data Drive",
523 		"*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
524 	},
525 	{
526 		/*
527 		 * Sigmatel USB Flash MP3 Player
528 		 * PR: kern/57046
529 		 */
530 		{T_DIRECT, SIP_MEDIA_REMOVABLE, "SigmaTel", "MSCN", "*"},
531 		/*quirks*/ DA_Q_NO_SYNC_CACHE|DA_Q_NO_PREVENT
532 	},
533 	{
534 		/*
535 		 * Neuros USB Digital Audio Computer
536 		 * PR: kern/63645
537 		 */
538 		{T_DIRECT, SIP_MEDIA_REMOVABLE, "NEUROS", "dig. audio comp.",
539 		"*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
540 	},
541 	{
542 		/*
543 		 * SEAGRAND NP-900 MP3 Player
544 		 * PR: kern/64563
545 		 */
546 		{T_DIRECT, SIP_MEDIA_REMOVABLE, "SEAGRAND", "NP-900*", "*"},
547 		/*quirks*/ DA_Q_NO_SYNC_CACHE|DA_Q_NO_PREVENT
548 	},
549 	{
550 		/*
551 		 * iRiver iFP MP3 player (with UMS Firmware)
552 		 * PR: kern/54881, i386/63941, kern/66124
553 		 */
554 		{T_DIRECT, SIP_MEDIA_REMOVABLE, "iRiver", "iFP*", "*"},
555 		/*quirks*/ DA_Q_NO_SYNC_CACHE
556  	},
557 	{
558 		/*
559 		 * Frontier Labs NEX IA+ Digital Audio Player, rev 1.10/0.01
560 		 * PR: kern/70158
561 		 */
562 		{T_DIRECT, SIP_MEDIA_REMOVABLE, "FL" , "Nex*", "*"},
563 		/*quirks*/ DA_Q_NO_SYNC_CACHE
564 	},
565 	{
566 		/*
567 		 * ZICPlay USB MP3 Player with FM
568 		 * PR: kern/75057
569 		 */
570 		{T_DIRECT, SIP_MEDIA_REMOVABLE, "ACTIONS*" , "USB DISK*", "*"},
571 		/*quirks*/ DA_Q_NO_SYNC_CACHE
572 	},
573 	{
574 		/*
575 		 * TEAC USB floppy mechanisms
576 		 */
577 		{T_DIRECT, SIP_MEDIA_REMOVABLE, "TEAC" , "FD-05*", "*"},
578 		/*quirks*/ DA_Q_NO_SYNC_CACHE
579 	},
580 	{
581 		/*
582 		 * Kingston DataTraveler II+ USB Pen-Drive.
583 		 * Reported by: Pawel Jakub Dawidek <pjd@FreeBSD.org>
584 		 */
585 		{T_DIRECT, SIP_MEDIA_REMOVABLE, "Kingston" , "DataTraveler II+",
586 		"*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
587 	},
588 	{
589 		/*
590 		 * USB DISK Pro PMAP
591 		 * Reported by: jhs
592 		 * PR: usb/96381
593 		 */
594 		{T_DIRECT, SIP_MEDIA_REMOVABLE, " ", "USB DISK Pro", "PMAP"},
595 		/*quirks*/ DA_Q_NO_SYNC_CACHE
596 	},
597 	{
598 		/*
599 		 * Motorola E398 Mobile Phone (TransFlash memory card).
600 		 * Reported by: Wojciech A. Koszek <dunstan@FreeBSD.czest.pl>
601 		 * PR: usb/89889
602 		 */
603 		{T_DIRECT, SIP_MEDIA_REMOVABLE, "Motorola" , "Motorola Phone",
604 		"*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
605 	},
606 	{
607 		/*
608 		 * Qware BeatZkey! Pro
609 		 * PR: usb/79164
610 		 */
611 		{T_DIRECT, SIP_MEDIA_REMOVABLE, "GENERIC", "USB DISK DEVICE",
612 		"*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
613 	},
614 	{
615 		/*
616 		 * Time DPA20B 1GB MP3 Player
617 		 * PR: usb/81846
618 		 */
619 		{T_DIRECT, SIP_MEDIA_REMOVABLE, "USB2.0*", "(FS) FLASH DISK*",
620 		"*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
621 	},
622 	{
623 		/*
624 		 * Samsung USB key 128Mb
625 		 * PR: usb/90081
626 		 */
627 		{T_DIRECT, SIP_MEDIA_REMOVABLE, "USB-DISK", "FreeDik-FlashUsb",
628 		"*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
629 	},
630 	{
631 		/*
632 		 * Kingston DataTraveler 2.0 USB Flash memory.
633 		 * PR: usb/89196
634 		 */
635 		{T_DIRECT, SIP_MEDIA_REMOVABLE, "Kingston", "DataTraveler 2.0",
636 		"*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
637 	},
638 	{
639 		/*
640 		 * Creative MUVO Slim mp3 player (USB)
641 		 * PR: usb/86131
642 		 */
643 		{T_DIRECT, SIP_MEDIA_REMOVABLE, "CREATIVE", "MuVo Slim",
644 		"*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE|DA_Q_NO_PREVENT
645 		},
646 	{
647 		/*
648 		 * United MP5512 Portable MP3 Player (2-in-1 USB DISK/MP3)
649 		 * PR: usb/80487
650 		 */
651 		{T_DIRECT, SIP_MEDIA_REMOVABLE, "Generic*", "MUSIC DISK",
652 		"*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
653 	},
654 	{
655 		/*
656 		 * SanDisk Micro Cruzer 128MB
657 		 * PR: usb/75970
658 		 */
659 		{T_DIRECT, SIP_MEDIA_REMOVABLE, "SanDisk" , "Micro Cruzer",
660 		"*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
661 	},
662 	{
663 		/*
664 		 * TOSHIBA TransMemory USB sticks
665 		 * PR: kern/94660
666 		 */
667 		{T_DIRECT, SIP_MEDIA_REMOVABLE, "TOSHIBA", "TransMemory",
668 		"*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
669 	},
670 	{
671 		/*
672 		 * PNY USB 3.0 Flash Drives
673 		*/
674 		{T_DIRECT, SIP_MEDIA_REMOVABLE, "PNY", "USB 3.0 FD*",
675 		"*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE | DA_Q_NO_RC16
676 	},
677 	{
678 		/*
679 		 * PNY USB Flash keys
680 		 * PR: usb/75578, usb/72344, usb/65436
681 		 */
682 		{T_DIRECT, SIP_MEDIA_REMOVABLE, "*" , "USB DISK*",
683 		"*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
684 	},
685 	{
686 		/*
687 		 * Genesys GL3224
688 		 */
689 		{T_DIRECT, SIP_MEDIA_REMOVABLE, "Generic*", "STORAGE DEVICE*",
690 		"120?"}, /*quirks*/ DA_Q_NO_SYNC_CACHE | DA_Q_4K | DA_Q_NO_RC16
691 	},
692 	{
693 		/*
694 		 * Genesys 6-in-1 Card Reader
695 		 * PR: usb/94647
696 		 */
697 		{T_DIRECT, SIP_MEDIA_REMOVABLE, "Generic*", "STORAGE DEVICE*",
698 		"*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
699 	},
700 	{
701 		/*
702 		 * Rekam Digital CAMERA
703 		 * PR: usb/98713
704 		 */
705 		{T_DIRECT, SIP_MEDIA_REMOVABLE, "CAMERA*", "4MP-9J6*",
706 		"*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
707 	},
708 	{
709 		/*
710 		 * iRiver H10 MP3 player
711 		 * PR: usb/102547
712 		 */
713 		{T_DIRECT, SIP_MEDIA_REMOVABLE, "iriver", "H10*",
714 		"*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
715 	},
716 	{
717 		/*
718 		 * iRiver U10 MP3 player
719 		 * PR: usb/92306
720 		 */
721 		{T_DIRECT, SIP_MEDIA_REMOVABLE, "iriver", "U10*",
722 		"*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
723 	},
724 	{
725 		/*
726 		 * X-Micro Flash Disk
727 		 * PR: usb/96901
728 		 */
729 		{T_DIRECT, SIP_MEDIA_REMOVABLE, "X-Micro", "Flash Disk",
730 		"*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
731 	},
732 	{
733 		/*
734 		 * EasyMP3 EM732X USB 2.0 Flash MP3 Player
735 		 * PR: usb/96546
736 		 */
737 		{T_DIRECT, SIP_MEDIA_REMOVABLE, "EM732X", "MP3 Player*",
738 		"1.00"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
739 	},
740 	{
741 		/*
742 		 * Denver MP3 player
743 		 * PR: usb/107101
744 		 */
745 		{T_DIRECT, SIP_MEDIA_REMOVABLE, "DENVER", "MP3 PLAYER",
746 		 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
747 	},
748 	{
749 		/*
750 		 * Philips USB Key Audio KEY013
751 		 * PR: usb/68412
752 		 */
753 		{T_DIRECT, SIP_MEDIA_REMOVABLE, "PHILIPS", "Key*", "*"},
754 		/*quirks*/ DA_Q_NO_SYNC_CACHE | DA_Q_NO_PREVENT
755 	},
756 	{
757 		/*
758 		 * JNC MP3 Player
759 		 * PR: usb/94439
760 		 */
761 		{T_DIRECT, SIP_MEDIA_REMOVABLE, "JNC*" , "MP3 Player*",
762 		 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
763 	},
764 	{
765 		/*
766 		 * SAMSUNG MP0402H
767 		 * PR: usb/108427
768 		 */
769 		{T_DIRECT, SIP_MEDIA_FIXED, "SAMSUNG", "MP0402H", "*"},
770 		/*quirks*/ DA_Q_NO_SYNC_CACHE
771 	},
772 	{
773 		/*
774 		 * I/O Magic USB flash - Giga Bank
775 		 * PR: usb/108810
776 		 */
777 		{T_DIRECT, SIP_MEDIA_FIXED, "GS-Magic", "stor*", "*"},
778 		/*quirks*/ DA_Q_NO_SYNC_CACHE
779 	},
780 	{
781 		/*
782 		 * JoyFly 128mb USB Flash Drive
783 		 * PR: 96133
784 		 */
785 		{T_DIRECT, SIP_MEDIA_REMOVABLE, "USB 2.0", "Flash Disk*",
786 		 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
787 	},
788 	{
789 		/*
790 		 * ChipsBnk usb stick
791 		 * PR: 103702
792 		 */
793 		{T_DIRECT, SIP_MEDIA_REMOVABLE, "ChipsBnk", "USB*",
794 		 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
795 	},
796 	{
797 		/*
798 		 * Storcase (Kingston) InfoStation IFS FC2/SATA-R 201A
799 		 * PR: 129858
800 		 */
801 		{T_DIRECT, SIP_MEDIA_FIXED, "IFS", "FC2/SATA-R*",
802 		 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
803 	},
804 	{
805 		/*
806 		 * Samsung YP-U3 mp3-player
807 		 * PR: 125398
808 		 */
809 		{T_DIRECT, SIP_MEDIA_REMOVABLE, "Samsung", "YP-U3",
810 		 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
811 	},
812 	{
813 		{T_DIRECT, SIP_MEDIA_REMOVABLE, "Netac", "OnlyDisk*",
814 		 "2000"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
815 	},
816 	{
817 		/*
818 		 * Sony Cyber-Shot DSC cameras
819 		 * PR: usb/137035
820 		 */
821 		{T_DIRECT, SIP_MEDIA_REMOVABLE, "Sony", "Sony DSC", "*"},
822 		/*quirks*/ DA_Q_NO_SYNC_CACHE | DA_Q_NO_PREVENT
823 	},
824 	{
825 		{T_DIRECT, SIP_MEDIA_REMOVABLE, "Kingston", "DataTraveler G3",
826 		 "1.00"}, /*quirks*/ DA_Q_NO_PREVENT
827 	},
828 	{
829 		/* At least several Transcent USB sticks lie on RC16. */
830 		{T_DIRECT, SIP_MEDIA_REMOVABLE, "JetFlash", "Transcend*",
831 		 "*"}, /*quirks*/ DA_Q_NO_RC16
832 	},
833 	{
834 		/*
835 		 * I-O Data USB Flash Disk
836 		 * PR: usb/211716
837 		 */
838 		{T_DIRECT, SIP_MEDIA_REMOVABLE, "I-O DATA", "USB Flash Disk*",
839 		 "*"}, /*quirks*/ DA_Q_NO_RC16
840 	},
841 	/* ATA/SATA devices over SAS/USB/... */
842 	{
843 		/* Hitachi Advanced Format (4k) drives */
844 		{ T_DIRECT, SIP_MEDIA_FIXED, "Hitachi", "H??????????E3*", "*" },
845 		/*quirks*/DA_Q_4K
846 	},
847 	{
848 		/* Micron Advanced Format (4k) drives */
849 		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "Micron 5100 MTFDDAK*", "*" },
850 		/*quirks*/DA_Q_4K
851 	},
852 	{
853 		/* Samsung Advanced Format (4k) drives */
854 		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "SAMSUNG HD155UI*", "*" },
855 		/*quirks*/DA_Q_4K
856 	},
857 	{
858 		/* Samsung Advanced Format (4k) drives */
859 		{ T_DIRECT, SIP_MEDIA_FIXED, "SAMSUNG", "HD155UI*", "*" },
860 		/*quirks*/DA_Q_4K
861 	},
862 	{
863 		/* Samsung Advanced Format (4k) drives */
864 		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "SAMSUNG HD204UI*", "*" },
865 		/*quirks*/DA_Q_4K
866 	},
867 	{
868 		/* Samsung Advanced Format (4k) drives */
869 		{ T_DIRECT, SIP_MEDIA_FIXED, "SAMSUNG", "HD204UI*", "*" },
870 		/*quirks*/DA_Q_4K
871 	},
872 	{
873 		/* Seagate Barracuda Green Advanced Format (4k) drives */
874 		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST????DL*", "*" },
875 		/*quirks*/DA_Q_4K
876 	},
877 	{
878 		/* Seagate Barracuda Green Advanced Format (4k) drives */
879 		{ T_DIRECT, SIP_MEDIA_FIXED, "ST????DL", "*", "*" },
880 		/*quirks*/DA_Q_4K
881 	},
882 	{
883 		/* Seagate Barracuda Green Advanced Format (4k) drives */
884 		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST???DM*", "*" },
885 		/*quirks*/DA_Q_4K
886 	},
887 	{
888 		/* Seagate Barracuda Green Advanced Format (4k) drives */
889 		{ T_DIRECT, SIP_MEDIA_FIXED, "ST???DM*", "*", "*" },
890 		/*quirks*/DA_Q_4K
891 	},
892 	{
893 		/* Seagate Barracuda Green Advanced Format (4k) drives */
894 		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST????DM*", "*" },
895 		/*quirks*/DA_Q_4K
896 	},
897 	{
898 		/* Seagate Barracuda Green Advanced Format (4k) drives */
899 		{ T_DIRECT, SIP_MEDIA_FIXED, "ST????DM", "*", "*" },
900 		/*quirks*/DA_Q_4K
901 	},
902 	{
903 		/* Seagate Momentus Advanced Format (4k) drives */
904 		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST9500423AS*", "*" },
905 		/*quirks*/DA_Q_4K
906 	},
907 	{
908 		/* Seagate Momentus Advanced Format (4k) drives */
909 		{ T_DIRECT, SIP_MEDIA_FIXED, "ST950042", "3AS*", "*" },
910 		/*quirks*/DA_Q_4K
911 	},
912 	{
913 		/* Seagate Momentus Advanced Format (4k) drives */
914 		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST9500424AS*", "*" },
915 		/*quirks*/DA_Q_4K
916 	},
917 	{
918 		/* Seagate Momentus Advanced Format (4k) drives */
919 		{ T_DIRECT, SIP_MEDIA_FIXED, "ST950042", "4AS*", "*" },
920 		/*quirks*/DA_Q_4K
921 	},
922 	{
923 		/* Seagate Momentus Advanced Format (4k) drives */
924 		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST9640423AS*", "*" },
925 		/*quirks*/DA_Q_4K
926 	},
927 	{
928 		/* Seagate Momentus Advanced Format (4k) drives */
929 		{ T_DIRECT, SIP_MEDIA_FIXED, "ST964042", "3AS*", "*" },
930 		/*quirks*/DA_Q_4K
931 	},
932 	{
933 		/* Seagate Momentus Advanced Format (4k) drives */
934 		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST9640424AS*", "*" },
935 		/*quirks*/DA_Q_4K
936 	},
937 	{
938 		/* Seagate Momentus Advanced Format (4k) drives */
939 		{ T_DIRECT, SIP_MEDIA_FIXED, "ST964042", "4AS*", "*" },
940 		/*quirks*/DA_Q_4K
941 	},
942 	{
943 		/* Seagate Momentus Advanced Format (4k) drives */
944 		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST9750420AS*", "*" },
945 		/*quirks*/DA_Q_4K
946 	},
947 	{
948 		/* Seagate Momentus Advanced Format (4k) drives */
949 		{ T_DIRECT, SIP_MEDIA_FIXED, "ST975042", "0AS*", "*" },
950 		/*quirks*/DA_Q_4K
951 	},
952 	{
953 		/* Seagate Momentus Advanced Format (4k) drives */
954 		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST9750422AS*", "*" },
955 		/*quirks*/DA_Q_4K
956 	},
957 	{
958 		/* Seagate Momentus Advanced Format (4k) drives */
959 		{ T_DIRECT, SIP_MEDIA_FIXED, "ST975042", "2AS*", "*" },
960 		/*quirks*/DA_Q_4K
961 	},
962 	{
963 		/* Seagate Momentus Advanced Format (4k) drives */
964 		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST9750423AS*", "*" },
965 		/*quirks*/DA_Q_4K
966 	},
967 	{
968 		/* Seagate Momentus Advanced Format (4k) drives */
969 		{ T_DIRECT, SIP_MEDIA_FIXED, "ST975042", "3AS*", "*" },
970 		/*quirks*/DA_Q_4K
971 	},
972 	{
973 		/* Seagate Momentus Thin Advanced Format (4k) drives */
974 		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST???LT*", "*" },
975 		/*quirks*/DA_Q_4K
976 	},
977 	{
978 		/* Seagate Momentus Thin Advanced Format (4k) drives */
979 		{ T_DIRECT, SIP_MEDIA_FIXED, "ST???LT*", "*", "*" },
980 		/*quirks*/DA_Q_4K
981 	},
982 	{
983 		/* WDC Caviar Green Advanced Format (4k) drives */
984 		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD????RS*", "*" },
985 		/*quirks*/DA_Q_4K
986 	},
987 	{
988 		/* WDC Caviar Green Advanced Format (4k) drives */
989 		{ T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "??RS*", "*" },
990 		/*quirks*/DA_Q_4K
991 	},
992 	{
993 		/* WDC Caviar Green Advanced Format (4k) drives */
994 		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD????RX*", "*" },
995 		/*quirks*/DA_Q_4K
996 	},
997 	{
998 		/* WDC Caviar Green Advanced Format (4k) drives */
999 		{ T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "??RX*", "*" },
1000 		/*quirks*/DA_Q_4K
1001 	},
1002 	{
1003 		/* WDC Caviar Green Advanced Format (4k) drives */
1004 		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD??????RS*", "*" },
1005 		/*quirks*/DA_Q_4K
1006 	},
1007 	{
1008 		/* WDC Caviar Green Advanced Format (4k) drives */
1009 		{ T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "????RS*", "*" },
1010 		/*quirks*/DA_Q_4K
1011 	},
1012 	{
1013 		/* WDC Caviar Green Advanced Format (4k) drives */
1014 		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD??????RX*", "*" },
1015 		/*quirks*/DA_Q_4K
1016 	},
1017 	{
1018 		/* WDC Caviar Green Advanced Format (4k) drives */
1019 		{ T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "????RX*", "*" },
1020 		/*quirks*/DA_Q_4K
1021 	},
1022 	{
1023 		/* WDC Scorpio Black Advanced Format (4k) drives */
1024 		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD???PKT*", "*" },
1025 		/*quirks*/DA_Q_4K
1026 	},
1027 	{
1028 		/* WDC Scorpio Black Advanced Format (4k) drives */
1029 		{ T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "?PKT*", "*" },
1030 		/*quirks*/DA_Q_4K
1031 	},
1032 	{
1033 		/* WDC Scorpio Black Advanced Format (4k) drives */
1034 		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD?????PKT*", "*" },
1035 		/*quirks*/DA_Q_4K
1036 	},
1037 	{
1038 		/* WDC Scorpio Black Advanced Format (4k) drives */
1039 		{ T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "???PKT*", "*" },
1040 		/*quirks*/DA_Q_4K
1041 	},
1042 	{
1043 		/* WDC Scorpio Blue Advanced Format (4k) drives */
1044 		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD???PVT*", "*" },
1045 		/*quirks*/DA_Q_4K
1046 	},
1047 	{
1048 		/* WDC Scorpio Blue Advanced Format (4k) drives */
1049 		{ T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "?PVT*", "*" },
1050 		/*quirks*/DA_Q_4K
1051 	},
1052 	{
1053 		/* WDC Scorpio Blue Advanced Format (4k) drives */
1054 		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD?????PVT*", "*" },
1055 		/*quirks*/DA_Q_4K
1056 	},
1057 	{
1058 		/* WDC Scorpio Blue Advanced Format (4k) drives */
1059 		{ T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "???PVT*", "*" },
1060 		/*quirks*/DA_Q_4K
1061 	},
1062 	{
1063 		/*
1064 		 * Olympus FE-210 camera
1065 		 */
1066 		{T_DIRECT, SIP_MEDIA_REMOVABLE, "OLYMPUS", "FE210*",
1067 		"*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
1068 	},
1069 	{
1070 		/*
1071 		 * LG UP3S MP3 player
1072 		 */
1073 		{T_DIRECT, SIP_MEDIA_REMOVABLE, "LG", "UP3S",
1074 		"*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
1075 	},
1076 	{
1077 		/*
1078 		 * Laser MP3-2GA13 MP3 player
1079 		 */
1080 		{T_DIRECT, SIP_MEDIA_REMOVABLE, "USB 2.0", "(HS) Flash Disk",
1081 		"*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
1082 	},
1083 	{
1084 		/*
1085 		 * LaCie external 250GB Hard drive des by Porsche
1086 		 * Submitted by: Ben Stuyts <ben@altesco.nl>
1087 		 * PR: 121474
1088 		 */
1089 		{T_DIRECT, SIP_MEDIA_FIXED, "SAMSUNG", "HM250JI", "*"},
1090 		/*quirks*/ DA_Q_NO_SYNC_CACHE
1091 	},
1092 	/* SATA SSDs */
1093 	{
1094 		/*
1095 		 * Corsair Force 2 SSDs
1096 		 * 4k optimised & trim only works in 4k requests + 4k aligned
1097 		 */
1098 		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "Corsair CSSD-F*", "*" },
1099 		/*quirks*/DA_Q_4K
1100 	},
1101 	{
1102 		/*
1103 		 * Corsair Force 3 SSDs
1104 		 * 4k optimised & trim only works in 4k requests + 4k aligned
1105 		 */
1106 		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "Corsair Force 3*", "*" },
1107 		/*quirks*/DA_Q_4K
1108 	},
1109         {
1110 		/*
1111 		 * Corsair Neutron GTX SSDs
1112 		 * 4k optimised & trim only works in 4k requests + 4k aligned
1113 		 */
1114 		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "Corsair Neutron GTX*", "*" },
1115 		/*quirks*/DA_Q_4K
1116 	},
1117 	{
1118 		/*
1119 		 * Corsair Force GT & GS SSDs
1120 		 * 4k optimised & trim only works in 4k requests + 4k aligned
1121 		 */
1122 		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "Corsair Force G*", "*" },
1123 		/*quirks*/DA_Q_4K
1124 	},
1125 	{
1126 		/*
1127 		 * Crucial M4 SSDs
1128 		 * 4k optimised & trim only works in 4k requests + 4k aligned
1129 		 */
1130 		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "M4-CT???M4SSD2*", "*" },
1131 		/*quirks*/DA_Q_4K
1132 	},
1133 	{
1134 		/*
1135 		 * Crucial RealSSD C300 SSDs
1136 		 * 4k optimised
1137 		 */
1138 		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "C300-CTFDDAC???MAG*",
1139 		"*" }, /*quirks*/DA_Q_4K
1140 	},
1141 	{
1142 		/*
1143 		 * Intel 320 Series SSDs
1144 		 * 4k optimised & trim only works in 4k requests + 4k aligned
1145 		 */
1146 		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "INTEL SSDSA2CW*", "*" },
1147 		/*quirks*/DA_Q_4K
1148 	},
1149 	{
1150 		/*
1151 		 * Intel 330 Series SSDs
1152 		 * 4k optimised & trim only works in 4k requests + 4k aligned
1153 		 */
1154 		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "INTEL SSDSC2CT*", "*" },
1155 		/*quirks*/DA_Q_4K
1156 	},
1157 	{
1158 		/*
1159 		 * Intel 510 Series SSDs
1160 		 * 4k optimised & trim only works in 4k requests + 4k aligned
1161 		 */
1162 		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "INTEL SSDSC2MH*", "*" },
1163 		/*quirks*/DA_Q_4K
1164 	},
1165 	{
1166 		/*
1167 		 * Intel 520 Series SSDs
1168 		 * 4k optimised & trim only works in 4k requests + 4k aligned
1169 		 */
1170 		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "INTEL SSDSC2BW*", "*" },
1171 		/*quirks*/DA_Q_4K
1172 	},
1173 	{
1174 		/*
1175 		 * Intel S3610 Series SSDs
1176 		 * 4k optimised & trim only works in 4k requests + 4k aligned
1177 		 */
1178 		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "INTEL SSDSC2BX*", "*" },
1179 		/*quirks*/DA_Q_4K
1180 	},
1181 	{
1182 		/*
1183 		 * Intel X25-M Series SSDs
1184 		 * 4k optimised & trim only works in 4k requests + 4k aligned
1185 		 */
1186 		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "INTEL SSDSA2M*", "*" },
1187 		/*quirks*/DA_Q_4K
1188 	},
1189 	{
1190 		/*
1191 		 * Kingston E100 Series SSDs
1192 		 * 4k optimised & trim only works in 4k requests + 4k aligned
1193 		 */
1194 		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "KINGSTON SE100S3*", "*" },
1195 		/*quirks*/DA_Q_4K
1196 	},
1197 	{
1198 		/*
1199 		 * Kingston HyperX 3k SSDs
1200 		 * 4k optimised & trim only works in 4k requests + 4k aligned
1201 		 */
1202 		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "KINGSTON SH103S3*", "*" },
1203 		/*quirks*/DA_Q_4K
1204 	},
1205 	{
1206 		/*
1207 		 * Marvell SSDs (entry taken from OpenSolaris)
1208 		 * 4k optimised & trim only works in 4k requests + 4k aligned
1209 		 */
1210 		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "MARVELL SD88SA02*", "*" },
1211 		/*quirks*/DA_Q_4K
1212 	},
1213 	{
1214 		/*
1215 		 * OCZ Agility 2 SSDs
1216 		 * 4k optimised & trim only works in 4k requests + 4k aligned
1217 		 */
1218 		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "OCZ-AGILITY2*", "*" },
1219 		/*quirks*/DA_Q_4K
1220 	},
1221 	{
1222 		/*
1223 		 * OCZ Agility 3 SSDs
1224 		 * 4k optimised & trim only works in 4k requests + 4k aligned
1225 		 */
1226 		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "OCZ-AGILITY3*", "*" },
1227 		/*quirks*/DA_Q_4K
1228 	},
1229 	{
1230 		/*
1231 		 * OCZ Deneva R Series SSDs
1232 		 * 4k optimised & trim only works in 4k requests + 4k aligned
1233 		 */
1234 		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "DENRSTE251M45*", "*" },
1235 		/*quirks*/DA_Q_4K
1236 	},
1237 	{
1238 		/*
1239 		 * OCZ Vertex 2 SSDs (inc pro series)
1240 		 * 4k optimised & trim only works in 4k requests + 4k aligned
1241 		 */
1242 		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "OCZ?VERTEX2*", "*" },
1243 		/*quirks*/DA_Q_4K
1244 	},
1245 	{
1246 		/*
1247 		 * OCZ Vertex 3 SSDs
1248 		 * 4k optimised & trim only works in 4k requests + 4k aligned
1249 		 */
1250 		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "OCZ-VERTEX3*", "*" },
1251 		/*quirks*/DA_Q_4K
1252 	},
1253 	{
1254 		/*
1255 		 * OCZ Vertex 4 SSDs
1256 		 * 4k optimised & trim only works in 4k requests + 4k aligned
1257 		 */
1258 		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "OCZ-VERTEX4*", "*" },
1259 		/*quirks*/DA_Q_4K
1260 	},
1261 	{
1262 		/*
1263 		 * Samsung 750 Series SSDs
1264 		 * 4k optimised & trim only works in 4k requests + 4k aligned
1265 		 */
1266 		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "Samsung SSD 750*", "*" },
1267 		/*quirks*/DA_Q_4K
1268 	},
1269 	{
1270 		/*
1271 		 * Samsung 830 Series SSDs
1272 		 * 4k optimised & trim only works in 4k requests + 4k aligned
1273 		 */
1274 		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "SAMSUNG SSD 830 Series*", "*" },
1275 		/*quirks*/DA_Q_4K
1276 	},
1277 	{
1278 		/*
1279 		 * Samsung 840 SSDs
1280 		 * 4k optimised & trim only works in 4k requests + 4k aligned
1281 		 */
1282 		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "Samsung SSD 840*", "*" },
1283 		/*quirks*/DA_Q_4K
1284 	},
1285 	{
1286 		/*
1287 		 * Samsung 845 SSDs
1288 		 * 4k optimised & trim only works in 4k requests + 4k aligned
1289 		 */
1290 		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "Samsung SSD 845*", "*" },
1291 		/*quirks*/DA_Q_4K
1292 	},
1293 	{
1294 		/*
1295 		 * Samsung 850 SSDs
1296 		 * 4k optimised & trim only works in 4k requests + 4k aligned
1297 		 */
1298 		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "Samsung SSD 850*", "*" },
1299 		/*quirks*/DA_Q_4K
1300 	},
1301 	{
1302 		/*
1303 		 * Samsung 843T Series SSDs (MZ7WD*)
1304 		 * Samsung PM851 Series SSDs (MZ7TE*)
1305 		 * Samsung PM853T Series SSDs (MZ7GE*)
1306 		 * Samsung SM863 Series SSDs (MZ7KM*)
1307 		 * 4k optimised
1308 		 */
1309 		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "SAMSUNG MZ7*", "*" },
1310 		/*quirks*/DA_Q_4K
1311 	},
1312 	{
1313 		/*
1314 		 * SuperTalent TeraDrive CT SSDs
1315 		 * 4k optimised & trim only works in 4k requests + 4k aligned
1316 		 */
1317 		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "FTM??CT25H*", "*" },
1318 		/*quirks*/DA_Q_4K
1319 	},
1320 	{
1321 		/*
1322 		 * XceedIOPS SATA SSDs
1323 		 * 4k optimised
1324 		 */
1325 		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "SG9XCS2D*", "*" },
1326 		/*quirks*/DA_Q_4K
1327 	},
1328 	{
1329 		/*
1330 		 * Hama Innostor USB-Stick
1331 		 */
1332 		{ T_DIRECT, SIP_MEDIA_REMOVABLE, "Innostor", "Innostor*", "*" },
1333 		/*quirks*/DA_Q_NO_RC16
1334 	},
1335 	{
1336 		/*
1337 		 * Seagate Lamarr 8TB Shingled Magnetic Recording (SMR)
1338 		 * Drive Managed SATA hard drive.  This drive doesn't report
1339 		 * in firmware that it is a drive managed SMR drive.
1340 		 */
1341 		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST8000AS0002*", "*" },
1342 		/*quirks*/DA_Q_SMR_DM
1343 	},
1344 	{
1345 		/*
1346 		 * MX-ES USB Drive by Mach Xtreme
1347 		 */
1348 		{ T_DIRECT, SIP_MEDIA_REMOVABLE, "MX", "MXUB3*", "*"},
1349 		/*quirks*/DA_Q_NO_RC16
1350 	},
1351 };
1352 
1353 static	disk_strategy_t	dastrategy;
1354 static	dumper_t	dadump;
1355 static	periph_init_t	dainit;
1356 static	void		daasync(void *callback_arg, u_int32_t code,
1357 				struct cam_path *path, void *arg);
1358 static	void		dasysctlinit(void *context, int pending);
1359 static	int		dasysctlsofttimeout(SYSCTL_HANDLER_ARGS);
1360 static	int		dacmdsizesysctl(SYSCTL_HANDLER_ARGS);
1361 static	int		dadeletemethodsysctl(SYSCTL_HANDLER_ARGS);
1362 static	int		dazonemodesysctl(SYSCTL_HANDLER_ARGS);
1363 static	int		dazonesupsysctl(SYSCTL_HANDLER_ARGS);
1364 static	int		dadeletemaxsysctl(SYSCTL_HANDLER_ARGS);
1365 static	void		dadeletemethodset(struct da_softc *softc,
1366 					  da_delete_methods delete_method);
1367 static	off_t		dadeletemaxsize(struct da_softc *softc,
1368 					da_delete_methods delete_method);
1369 static	void		dadeletemethodchoose(struct da_softc *softc,
1370 					     da_delete_methods default_method);
1371 static	void		daprobedone(struct cam_periph *periph, union ccb *ccb);
1372 
1373 static	periph_ctor_t	daregister;
1374 static	periph_dtor_t	dacleanup;
1375 static	periph_start_t	dastart;
1376 static	periph_oninv_t	daoninvalidate;
1377 static	void		dazonedone(struct cam_periph *periph, union ccb *ccb);
1378 static	void		dadone(struct cam_periph *periph,
1379 			       union ccb *done_ccb);
1380 static  int		daerror(union ccb *ccb, u_int32_t cam_flags,
1381 				u_int32_t sense_flags);
1382 static void		daprevent(struct cam_periph *periph, int action);
1383 static void		dareprobe(struct cam_periph *periph);
1384 static void		dasetgeom(struct cam_periph *periph, uint32_t block_len,
1385 				  uint64_t maxsector,
1386 				  struct scsi_read_capacity_data_long *rcaplong,
1387 				  size_t rcap_size);
1388 static timeout_t	dasendorderedtag;
1389 static void		dashutdown(void *arg, int howto);
1390 static timeout_t	damediapoll;
1391 
1392 #ifndef	DA_DEFAULT_POLL_PERIOD
1393 #define	DA_DEFAULT_POLL_PERIOD	3
1394 #endif
1395 
1396 #ifndef DA_DEFAULT_TIMEOUT
1397 #define DA_DEFAULT_TIMEOUT 60	/* Timeout in seconds */
1398 #endif
1399 
1400 #ifndef DA_DEFAULT_SOFTTIMEOUT
1401 #define DA_DEFAULT_SOFTTIMEOUT	0
1402 #endif
1403 
1404 #ifndef	DA_DEFAULT_RETRY
1405 #define	DA_DEFAULT_RETRY	4
1406 #endif
1407 
1408 #ifndef	DA_DEFAULT_SEND_ORDERED
1409 #define	DA_DEFAULT_SEND_ORDERED	1
1410 #endif
1411 
1412 static int da_poll_period = DA_DEFAULT_POLL_PERIOD;
1413 static int da_retry_count = DA_DEFAULT_RETRY;
1414 static int da_default_timeout = DA_DEFAULT_TIMEOUT;
1415 static sbintime_t da_default_softtimeout = DA_DEFAULT_SOFTTIMEOUT;
1416 static int da_send_ordered = DA_DEFAULT_SEND_ORDERED;
1417 
1418 static SYSCTL_NODE(_kern_cam, OID_AUTO, da, CTLFLAG_RD, 0,
1419             "CAM Direct Access Disk driver");
1420 SYSCTL_INT(_kern_cam_da, OID_AUTO, poll_period, CTLFLAG_RWTUN,
1421            &da_poll_period, 0, "Media polling period in seconds");
1422 SYSCTL_INT(_kern_cam_da, OID_AUTO, retry_count, CTLFLAG_RWTUN,
1423            &da_retry_count, 0, "Normal I/O retry count");
1424 SYSCTL_INT(_kern_cam_da, OID_AUTO, default_timeout, CTLFLAG_RWTUN,
1425            &da_default_timeout, 0, "Normal I/O timeout (in seconds)");
1426 SYSCTL_INT(_kern_cam_da, OID_AUTO, send_ordered, CTLFLAG_RWTUN,
1427            &da_send_ordered, 0, "Send Ordered Tags");
1428 
1429 SYSCTL_PROC(_kern_cam_da, OID_AUTO, default_softtimeout,
1430     CTLTYPE_UINT | CTLFLAG_RW, NULL, 0, dasysctlsofttimeout, "I",
1431     "Soft I/O timeout (ms)");
1432 TUNABLE_INT64("kern.cam.da.default_softtimeout", &da_default_softtimeout);
1433 
1434 /*
1435  * DA_ORDEREDTAG_INTERVAL determines how often, relative
1436  * to the default timeout, we check to see whether an ordered
1437  * tagged transaction is appropriate to prevent simple tag
1438  * starvation.  Since we'd like to ensure that there is at least
1439  * 1/2 of the timeout length left for a starved transaction to
1440  * complete after we've sent an ordered tag, we must poll at least
1441  * four times in every timeout period.  This takes care of the worst
1442  * case where a starved transaction starts during an interval that
1443  * meets the requirement "don't send an ordered tag" test so it takes
1444  * us two intervals to determine that a tag must be sent.
1445  */
1446 #ifndef DA_ORDEREDTAG_INTERVAL
1447 #define DA_ORDEREDTAG_INTERVAL 4
1448 #endif
1449 
1450 static struct periph_driver dadriver =
1451 {
1452 	dainit, "da",
1453 	TAILQ_HEAD_INITIALIZER(dadriver.units), /* generation */ 0
1454 };
1455 
1456 PERIPHDRIVER_DECLARE(da, dadriver);
1457 
1458 static MALLOC_DEFINE(M_SCSIDA, "scsi_da", "scsi_da buffers");
1459 
1460 static int
1461 daopen(struct disk *dp)
1462 {
1463 	struct cam_periph *periph;
1464 	struct da_softc *softc;
1465 	int error;
1466 
1467 	periph = (struct cam_periph *)dp->d_drv1;
1468 	if (cam_periph_acquire(periph) != CAM_REQ_CMP) {
1469 		return (ENXIO);
1470 	}
1471 
1472 	cam_periph_lock(periph);
1473 	if ((error = cam_periph_hold(periph, PRIBIO|PCATCH)) != 0) {
1474 		cam_periph_unlock(periph);
1475 		cam_periph_release(periph);
1476 		return (error);
1477 	}
1478 
1479 	CAM_DEBUG(periph->path, CAM_DEBUG_TRACE | CAM_DEBUG_PERIPH,
1480 	    ("daopen\n"));
1481 
1482 	softc = (struct da_softc *)periph->softc;
1483 	dareprobe(periph);
1484 
1485 	/* Wait for the disk size update.  */
1486 	error = cam_periph_sleep(periph, &softc->disk->d_mediasize, PRIBIO,
1487 	    "dareprobe", 0);
1488 	if (error != 0)
1489 		xpt_print(periph->path, "unable to retrieve capacity data\n");
1490 
1491 	if (periph->flags & CAM_PERIPH_INVALID)
1492 		error = ENXIO;
1493 
1494 	if (error == 0 && (softc->flags & DA_FLAG_PACK_REMOVABLE) != 0 &&
1495 	    (softc->quirks & DA_Q_NO_PREVENT) == 0)
1496 		daprevent(periph, PR_PREVENT);
1497 
1498 	if (error == 0) {
1499 		softc->flags &= ~DA_FLAG_PACK_INVALID;
1500 		softc->flags |= DA_FLAG_OPEN;
1501 	}
1502 
1503 	cam_periph_unhold(periph);
1504 	cam_periph_unlock(periph);
1505 
1506 	if (error != 0)
1507 		cam_periph_release(periph);
1508 
1509 	return (error);
1510 }
1511 
1512 static int
1513 daclose(struct disk *dp)
1514 {
1515 	struct	cam_periph *periph;
1516 	struct	da_softc *softc;
1517 	union	ccb *ccb;
1518 	int error;
1519 
1520 	periph = (struct cam_periph *)dp->d_drv1;
1521 	softc = (struct da_softc *)periph->softc;
1522 	cam_periph_lock(periph);
1523 	CAM_DEBUG(periph->path, CAM_DEBUG_TRACE | CAM_DEBUG_PERIPH,
1524 	    ("daclose\n"));
1525 
1526 	if (cam_periph_hold(periph, PRIBIO) == 0) {
1527 
1528 		/* Flush disk cache. */
1529 		if ((softc->flags & DA_FLAG_DIRTY) != 0 &&
1530 		    (softc->quirks & DA_Q_NO_SYNC_CACHE) == 0 &&
1531 		    (softc->flags & DA_FLAG_PACK_INVALID) == 0) {
1532 			ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL);
1533 			scsi_synchronize_cache(&ccb->csio, /*retries*/1,
1534 			    /*cbfcnp*/dadone, MSG_SIMPLE_Q_TAG,
1535 			    /*begin_lba*/0, /*lb_count*/0, SSD_FULL_SIZE,
1536 			    5 * 60 * 1000);
1537 			error = cam_periph_runccb(ccb, daerror, /*cam_flags*/0,
1538 			    /*sense_flags*/SF_RETRY_UA | SF_QUIET_IR,
1539 			    softc->disk->d_devstat);
1540 			softc->flags &= ~DA_FLAG_DIRTY;
1541 			xpt_release_ccb(ccb);
1542 		}
1543 
1544 		/* Allow medium removal. */
1545 		if ((softc->flags & DA_FLAG_PACK_REMOVABLE) != 0 &&
1546 		    (softc->quirks & DA_Q_NO_PREVENT) == 0)
1547 			daprevent(periph, PR_ALLOW);
1548 
1549 		cam_periph_unhold(periph);
1550 	}
1551 
1552 	/*
1553 	 * If we've got removeable media, mark the blocksize as
1554 	 * unavailable, since it could change when new media is
1555 	 * inserted.
1556 	 */
1557 	if ((softc->flags & DA_FLAG_PACK_REMOVABLE) != 0)
1558 		softc->disk->d_devstat->flags |= DEVSTAT_BS_UNAVAILABLE;
1559 
1560 	softc->flags &= ~DA_FLAG_OPEN;
1561 	while (softc->refcount != 0)
1562 		cam_periph_sleep(periph, &softc->refcount, PRIBIO, "daclose", 1);
1563 	cam_periph_unlock(periph);
1564 	cam_periph_release(periph);
1565 	return (0);
1566 }
1567 
1568 static void
1569 daschedule(struct cam_periph *periph)
1570 {
1571 	struct da_softc *softc = (struct da_softc *)periph->softc;
1572 
1573 	if (softc->state != DA_STATE_NORMAL)
1574 		return;
1575 
1576 	cam_iosched_schedule(softc->cam_iosched, periph);
1577 }
1578 
1579 /*
1580  * Actually translate the requested transfer into one the physical driver
1581  * can understand.  The transfer is described by a buf and will include
1582  * only one physical transfer.
1583  */
1584 static void
1585 dastrategy(struct bio *bp)
1586 {
1587 	struct cam_periph *periph;
1588 	struct da_softc *softc;
1589 
1590 	periph = (struct cam_periph *)bp->bio_disk->d_drv1;
1591 	softc = (struct da_softc *)periph->softc;
1592 
1593 	cam_periph_lock(periph);
1594 
1595 	/*
1596 	 * If the device has been made invalid, error out
1597 	 */
1598 	if ((softc->flags & DA_FLAG_PACK_INVALID)) {
1599 		cam_periph_unlock(periph);
1600 		biofinish(bp, NULL, ENXIO);
1601 		return;
1602 	}
1603 
1604 	CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dastrategy(%p)\n", bp));
1605 
1606 	/*
1607 	 * Zone commands must be ordered, because they can depend on the
1608 	 * effects of previously issued commands, and they may affect
1609 	 * commands after them.
1610 	 */
1611 	if (bp->bio_cmd == BIO_ZONE)
1612 		bp->bio_flags |= BIO_ORDERED;
1613 
1614 	/*
1615 	 * Place it in the queue of disk activities for this disk
1616 	 */
1617 	cam_iosched_queue_work(softc->cam_iosched, bp);
1618 
1619 	/*
1620 	 * Schedule ourselves for performing the work.
1621 	 */
1622 	daschedule(periph);
1623 	cam_periph_unlock(periph);
1624 
1625 	return;
1626 }
1627 
1628 static int
1629 dadump(void *arg, void *virtual, vm_offset_t physical, off_t offset, size_t length)
1630 {
1631 	struct	    cam_periph *periph;
1632 	struct	    da_softc *softc;
1633 	u_int	    secsize;
1634 	struct	    ccb_scsiio csio;
1635 	struct	    disk *dp;
1636 	int	    error = 0;
1637 
1638 	dp = arg;
1639 	periph = dp->d_drv1;
1640 	softc = (struct da_softc *)periph->softc;
1641 	cam_periph_lock(periph);
1642 	secsize = softc->params.secsize;
1643 
1644 	if ((softc->flags & DA_FLAG_PACK_INVALID) != 0) {
1645 		cam_periph_unlock(periph);
1646 		return (ENXIO);
1647 	}
1648 
1649 	memset(&csio, 0, sizeof(csio));
1650 	if (length > 0) {
1651 		xpt_setup_ccb(&csio.ccb_h, periph->path, CAM_PRIORITY_NORMAL);
1652 		csio.ccb_h.ccb_state = DA_CCB_DUMP;
1653 		scsi_read_write(&csio,
1654 				/*retries*/0,
1655 				dadone,
1656 				MSG_ORDERED_Q_TAG,
1657 				/*read*/SCSI_RW_WRITE,
1658 				/*byte2*/0,
1659 				/*minimum_cmd_size*/ softc->minimum_cmd_size,
1660 				offset / secsize,
1661 				length / secsize,
1662 				/*data_ptr*/(u_int8_t *) virtual,
1663 				/*dxfer_len*/length,
1664 				/*sense_len*/SSD_FULL_SIZE,
1665 				da_default_timeout * 1000);
1666 		xpt_polled_action((union ccb *)&csio);
1667 
1668 		error = cam_periph_error((union ccb *)&csio,
1669 		    0, SF_NO_RECOVERY | SF_NO_RETRY, NULL);
1670 		if ((csio.ccb_h.status & CAM_DEV_QFRZN) != 0)
1671 			cam_release_devq(csio.ccb_h.path, /*relsim_flags*/0,
1672 			    /*reduction*/0, /*timeout*/0, /*getcount_only*/0);
1673 		if (error != 0)
1674 			printf("Aborting dump due to I/O error.\n");
1675 		cam_periph_unlock(periph);
1676 		return (error);
1677 	}
1678 
1679 	/*
1680 	 * Sync the disk cache contents to the physical media.
1681 	 */
1682 	if ((softc->quirks & DA_Q_NO_SYNC_CACHE) == 0) {
1683 
1684 		xpt_setup_ccb(&csio.ccb_h, periph->path, CAM_PRIORITY_NORMAL);
1685 		csio.ccb_h.ccb_state = DA_CCB_DUMP;
1686 		scsi_synchronize_cache(&csio,
1687 				       /*retries*/0,
1688 				       /*cbfcnp*/dadone,
1689 				       MSG_SIMPLE_Q_TAG,
1690 				       /*begin_lba*/0,/* Cover the whole disk */
1691 				       /*lb_count*/0,
1692 				       SSD_FULL_SIZE,
1693 				       5 * 1000);
1694 		xpt_polled_action((union ccb *)&csio);
1695 
1696 		error = cam_periph_error((union ccb *)&csio,
1697 		    0, SF_NO_RECOVERY | SF_NO_RETRY | SF_QUIET_IR, NULL);
1698 		if ((csio.ccb_h.status & CAM_DEV_QFRZN) != 0)
1699 			cam_release_devq(csio.ccb_h.path, /*relsim_flags*/0,
1700 			    /*reduction*/0, /*timeout*/0, /*getcount_only*/0);
1701 		if (error != 0)
1702 			xpt_print(periph->path, "Synchronize cache failed\n");
1703 	}
1704 	cam_periph_unlock(periph);
1705 	return (error);
1706 }
1707 
1708 static int
1709 dagetattr(struct bio *bp)
1710 {
1711 	int ret;
1712 	struct cam_periph *periph;
1713 
1714 	periph = (struct cam_periph *)bp->bio_disk->d_drv1;
1715 	cam_periph_lock(periph);
1716 	ret = xpt_getattr(bp->bio_data, bp->bio_length, bp->bio_attribute,
1717 	    periph->path);
1718 	cam_periph_unlock(periph);
1719 	if (ret == 0)
1720 		bp->bio_completed = bp->bio_length;
1721 	return ret;
1722 }
1723 
1724 static void
1725 dainit(void)
1726 {
1727 	cam_status status;
1728 
1729 	/*
1730 	 * Install a global async callback.  This callback will
1731 	 * receive async callbacks like "new device found".
1732 	 */
1733 	status = xpt_register_async(AC_FOUND_DEVICE, daasync, NULL, NULL);
1734 
1735 	if (status != CAM_REQ_CMP) {
1736 		printf("da: Failed to attach master async callback "
1737 		       "due to status 0x%x!\n", status);
1738 	} else if (da_send_ordered) {
1739 
1740 		/* Register our shutdown event handler */
1741 		if ((EVENTHANDLER_REGISTER(shutdown_post_sync, dashutdown,
1742 					   NULL, SHUTDOWN_PRI_DEFAULT)) == NULL)
1743 		    printf("dainit: shutdown event registration failed!\n");
1744 	}
1745 }
1746 
1747 /*
1748  * Callback from GEOM, called when it has finished cleaning up its
1749  * resources.
1750  */
1751 static void
1752 dadiskgonecb(struct disk *dp)
1753 {
1754 	struct cam_periph *periph;
1755 
1756 	periph = (struct cam_periph *)dp->d_drv1;
1757 	cam_periph_release(periph);
1758 }
1759 
1760 static void
1761 daoninvalidate(struct cam_periph *periph)
1762 {
1763 	struct da_softc *softc;
1764 
1765 	softc = (struct da_softc *)periph->softc;
1766 
1767 	/*
1768 	 * De-register any async callbacks.
1769 	 */
1770 	xpt_register_async(0, daasync, periph, periph->path);
1771 
1772 	softc->flags |= DA_FLAG_PACK_INVALID;
1773 #ifdef CAM_IO_STATS
1774 	softc->invalidations++;
1775 #endif
1776 
1777 	/*
1778 	 * Return all queued I/O with ENXIO.
1779 	 * XXX Handle any transactions queued to the card
1780 	 *     with XPT_ABORT_CCB.
1781 	 */
1782 	cam_iosched_flush(softc->cam_iosched, NULL, ENXIO);
1783 
1784 	/*
1785 	 * Tell GEOM that we've gone away, we'll get a callback when it is
1786 	 * done cleaning up its resources.
1787 	 */
1788 	disk_gone(softc->disk);
1789 }
1790 
1791 static void
1792 dacleanup(struct cam_periph *periph)
1793 {
1794 	struct da_softc *softc;
1795 
1796 	softc = (struct da_softc *)periph->softc;
1797 
1798 	cam_periph_unlock(periph);
1799 
1800 	cam_iosched_fini(softc->cam_iosched);
1801 
1802 	/*
1803 	 * If we can't free the sysctl tree, oh well...
1804 	 */
1805 	if ((softc->flags & DA_FLAG_SCTX_INIT) != 0) {
1806 #ifdef CAM_IO_STATS
1807 		if (sysctl_ctx_free(&softc->sysctl_stats_ctx) != 0)
1808 			xpt_print(periph->path,
1809 			    "can't remove sysctl stats context\n");
1810 #endif
1811 		if (sysctl_ctx_free(&softc->sysctl_ctx) != 0)
1812 			xpt_print(periph->path,
1813 			    "can't remove sysctl context\n");
1814 	}
1815 
1816 	callout_drain(&softc->mediapoll_c);
1817 	disk_destroy(softc->disk);
1818 	callout_drain(&softc->sendordered_c);
1819 	free(softc, M_DEVBUF);
1820 	cam_periph_lock(periph);
1821 }
1822 
1823 static void
1824 daasync(void *callback_arg, u_int32_t code,
1825 	struct cam_path *path, void *arg)
1826 {
1827 	struct cam_periph *periph;
1828 	struct da_softc *softc;
1829 
1830 	periph = (struct cam_periph *)callback_arg;
1831 	switch (code) {
1832 	case AC_FOUND_DEVICE:
1833 	{
1834 		struct ccb_getdev *cgd;
1835 		cam_status status;
1836 
1837 		cgd = (struct ccb_getdev *)arg;
1838 		if (cgd == NULL)
1839 			break;
1840 
1841 		if (cgd->protocol != PROTO_SCSI)
1842 			break;
1843 		if (SID_QUAL(&cgd->inq_data) != SID_QUAL_LU_CONNECTED)
1844 			break;
1845 		if (SID_TYPE(&cgd->inq_data) != T_DIRECT
1846 		    && SID_TYPE(&cgd->inq_data) != T_RBC
1847 		    && SID_TYPE(&cgd->inq_data) != T_OPTICAL
1848 		    && SID_TYPE(&cgd->inq_data) != T_ZBC_HM)
1849 			break;
1850 
1851 		/*
1852 		 * Allocate a peripheral instance for
1853 		 * this device and start the probe
1854 		 * process.
1855 		 */
1856 		status = cam_periph_alloc(daregister, daoninvalidate,
1857 					  dacleanup, dastart,
1858 					  "da", CAM_PERIPH_BIO,
1859 					  path, daasync,
1860 					  AC_FOUND_DEVICE, cgd);
1861 
1862 		if (status != CAM_REQ_CMP
1863 		 && status != CAM_REQ_INPROG)
1864 			printf("daasync: Unable to attach to new device "
1865 				"due to status 0x%x\n", status);
1866 		return;
1867 	}
1868 	case AC_ADVINFO_CHANGED:
1869 	{
1870 		uintptr_t buftype;
1871 
1872 		buftype = (uintptr_t)arg;
1873 		if (buftype == CDAI_TYPE_PHYS_PATH) {
1874 			struct da_softc *softc;
1875 
1876 			softc = periph->softc;
1877 			disk_attr_changed(softc->disk, "GEOM::physpath",
1878 					  M_NOWAIT);
1879 		}
1880 		break;
1881 	}
1882 	case AC_UNIT_ATTENTION:
1883 	{
1884 		union ccb *ccb;
1885 		int error_code, sense_key, asc, ascq;
1886 
1887 		softc = (struct da_softc *)periph->softc;
1888 		ccb = (union ccb *)arg;
1889 
1890 		/*
1891 		 * Handle all UNIT ATTENTIONs except our own,
1892 		 * as they will be handled by daerror().
1893 		 */
1894 		if (xpt_path_periph(ccb->ccb_h.path) != periph &&
1895 		    scsi_extract_sense_ccb(ccb,
1896 		     &error_code, &sense_key, &asc, &ascq)) {
1897 			if (asc == 0x2A && ascq == 0x09) {
1898 				xpt_print(ccb->ccb_h.path,
1899 				    "Capacity data has changed\n");
1900 				softc->flags &= ~DA_FLAG_PROBED;
1901 				dareprobe(periph);
1902 			} else if (asc == 0x28 && ascq == 0x00) {
1903 				softc->flags &= ~DA_FLAG_PROBED;
1904 				disk_media_changed(softc->disk, M_NOWAIT);
1905 			} else if (asc == 0x3F && ascq == 0x03) {
1906 				xpt_print(ccb->ccb_h.path,
1907 				    "INQUIRY data has changed\n");
1908 				softc->flags &= ~DA_FLAG_PROBED;
1909 				dareprobe(periph);
1910 			}
1911 		}
1912 		cam_periph_async(periph, code, path, arg);
1913 		break;
1914 	}
1915 	case AC_SCSI_AEN:
1916 		softc = (struct da_softc *)periph->softc;
1917 		if (!cam_iosched_has_work_flags(softc->cam_iosched, DA_WORK_TUR)) {
1918 			if (cam_periph_acquire(periph) == CAM_REQ_CMP) {
1919 				cam_iosched_set_work_flags(softc->cam_iosched, DA_WORK_TUR);
1920 				daschedule(periph);
1921 			}
1922 		}
1923 		/* FALLTHROUGH */
1924 	case AC_SENT_BDR:
1925 	case AC_BUS_RESET:
1926 	{
1927 		struct ccb_hdr *ccbh;
1928 
1929 		softc = (struct da_softc *)periph->softc;
1930 		/*
1931 		 * Don't fail on the expected unit attention
1932 		 * that will occur.
1933 		 */
1934 		softc->flags |= DA_FLAG_RETRY_UA;
1935 		LIST_FOREACH(ccbh, &softc->pending_ccbs, periph_links.le)
1936 			ccbh->ccb_state |= DA_CCB_RETRY_UA;
1937 		break;
1938 	}
1939 	case AC_INQ_CHANGED:
1940 		softc = (struct da_softc *)periph->softc;
1941 		softc->flags &= ~DA_FLAG_PROBED;
1942 		dareprobe(periph);
1943 		break;
1944 	default:
1945 		break;
1946 	}
1947 	cam_periph_async(periph, code, path, arg);
1948 }
1949 
1950 static void
1951 dasysctlinit(void *context, int pending)
1952 {
1953 	struct cam_periph *periph;
1954 	struct da_softc *softc;
1955 	char tmpstr[80], tmpstr2[80];
1956 	struct ccb_trans_settings cts;
1957 
1958 	periph = (struct cam_periph *)context;
1959 	/*
1960 	 * periph was held for us when this task was enqueued
1961 	 */
1962 	if (periph->flags & CAM_PERIPH_INVALID) {
1963 		cam_periph_release(periph);
1964 		return;
1965 	}
1966 
1967 	softc = (struct da_softc *)periph->softc;
1968 	snprintf(tmpstr, sizeof(tmpstr), "CAM DA unit %d", periph->unit_number);
1969 	snprintf(tmpstr2, sizeof(tmpstr2), "%d", periph->unit_number);
1970 
1971 	sysctl_ctx_init(&softc->sysctl_ctx);
1972 	softc->flags |= DA_FLAG_SCTX_INIT;
1973 	softc->sysctl_tree = SYSCTL_ADD_NODE_WITH_LABEL(&softc->sysctl_ctx,
1974 		SYSCTL_STATIC_CHILDREN(_kern_cam_da), OID_AUTO, tmpstr2,
1975 		CTLFLAG_RD, 0, tmpstr, "device_index");
1976 	if (softc->sysctl_tree == NULL) {
1977 		printf("dasysctlinit: unable to allocate sysctl tree\n");
1978 		cam_periph_release(periph);
1979 		return;
1980 	}
1981 
1982 	/*
1983 	 * Now register the sysctl handler, so the user can change the value on
1984 	 * the fly.
1985 	 */
1986 	SYSCTL_ADD_PROC(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
1987 		OID_AUTO, "delete_method", CTLTYPE_STRING | CTLFLAG_RWTUN,
1988 		softc, 0, dadeletemethodsysctl, "A",
1989 		"BIO_DELETE execution method");
1990 	SYSCTL_ADD_PROC(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
1991 		OID_AUTO, "delete_max", CTLTYPE_U64 | CTLFLAG_RW,
1992 		softc, 0, dadeletemaxsysctl, "Q",
1993 		"Maximum BIO_DELETE size");
1994 	SYSCTL_ADD_PROC(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
1995 		OID_AUTO, "minimum_cmd_size", CTLTYPE_INT | CTLFLAG_RW,
1996 		&softc->minimum_cmd_size, 0, dacmdsizesysctl, "I",
1997 		"Minimum CDB size");
1998 
1999 	SYSCTL_ADD_PROC(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
2000 		OID_AUTO, "zone_mode", CTLTYPE_STRING | CTLFLAG_RD,
2001 		softc, 0, dazonemodesysctl, "A",
2002 		"Zone Mode");
2003 	SYSCTL_ADD_PROC(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
2004 		OID_AUTO, "zone_support", CTLTYPE_STRING | CTLFLAG_RD,
2005 		softc, 0, dazonesupsysctl, "A",
2006 		"Zone Support");
2007 	SYSCTL_ADD_UQUAD(&softc->sysctl_ctx,
2008 		SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO,
2009 		"optimal_seq_zones", CTLFLAG_RD, &softc->optimal_seq_zones,
2010 		"Optimal Number of Open Sequential Write Preferred Zones");
2011 	SYSCTL_ADD_UQUAD(&softc->sysctl_ctx,
2012 		SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO,
2013 		"optimal_nonseq_zones", CTLFLAG_RD,
2014 		&softc->optimal_nonseq_zones,
2015 		"Optimal Number of Non-Sequentially Written Sequential Write "
2016 		"Preferred Zones");
2017 	SYSCTL_ADD_UQUAD(&softc->sysctl_ctx,
2018 		SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO,
2019 		"max_seq_zones", CTLFLAG_RD, &softc->max_seq_zones,
2020 		"Maximum Number of Open Sequential Write Required Zones");
2021 
2022 	SYSCTL_ADD_INT(&softc->sysctl_ctx,
2023 		       SYSCTL_CHILDREN(softc->sysctl_tree),
2024 		       OID_AUTO,
2025 		       "error_inject",
2026 		       CTLFLAG_RW,
2027 		       &softc->error_inject,
2028 		       0,
2029 		       "error_inject leaf");
2030 
2031 	SYSCTL_ADD_INT(&softc->sysctl_ctx,
2032 		       SYSCTL_CHILDREN(softc->sysctl_tree),
2033 		       OID_AUTO,
2034 		       "unmapped_io",
2035 		       CTLFLAG_RD,
2036 		       &softc->unmappedio,
2037 		       0,
2038 		       "Unmapped I/O leaf");
2039 
2040 	SYSCTL_ADD_INT(&softc->sysctl_ctx,
2041 		       SYSCTL_CHILDREN(softc->sysctl_tree),
2042 		       OID_AUTO,
2043 		       "rotating",
2044 		       CTLFLAG_RD,
2045 		       &softc->rotating,
2046 		       0,
2047 		       "Rotating media");
2048 
2049 	/*
2050 	 * Add some addressing info.
2051 	 */
2052 	memset(&cts, 0, sizeof (cts));
2053 	xpt_setup_ccb(&cts.ccb_h, periph->path, CAM_PRIORITY_NONE);
2054 	cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS;
2055 	cts.type = CTS_TYPE_CURRENT_SETTINGS;
2056 	cam_periph_lock(periph);
2057 	xpt_action((union ccb *)&cts);
2058 	cam_periph_unlock(periph);
2059 	if (cts.ccb_h.status != CAM_REQ_CMP) {
2060 		cam_periph_release(periph);
2061 		return;
2062 	}
2063 	if (cts.protocol == PROTO_SCSI && cts.transport == XPORT_FC) {
2064 		struct ccb_trans_settings_fc *fc = &cts.xport_specific.fc;
2065 		if (fc->valid & CTS_FC_VALID_WWPN) {
2066 			softc->wwpn = fc->wwpn;
2067 			SYSCTL_ADD_UQUAD(&softc->sysctl_ctx,
2068 			    SYSCTL_CHILDREN(softc->sysctl_tree),
2069 			    OID_AUTO, "wwpn", CTLFLAG_RD,
2070 			    &softc->wwpn, "World Wide Port Name");
2071 		}
2072 	}
2073 
2074 #ifdef CAM_IO_STATS
2075 	/*
2076 	 * Now add some useful stats.
2077 	 * XXX These should live in cam_periph and be common to all periphs
2078 	 */
2079 	softc->sysctl_stats_tree = SYSCTL_ADD_NODE(&softc->sysctl_stats_ctx,
2080 	    SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "stats",
2081 	    CTLFLAG_RD, 0, "Statistics");
2082 	SYSCTL_ADD_INT(&softc->sysctl_stats_ctx,
2083 		       SYSCTL_CHILDREN(softc->sysctl_stats_tree),
2084 		       OID_AUTO,
2085 		       "errors",
2086 		       CTLFLAG_RD,
2087 		       &softc->errors,
2088 		       0,
2089 		       "Transport errors reported by the SIM");
2090 	SYSCTL_ADD_INT(&softc->sysctl_stats_ctx,
2091 		       SYSCTL_CHILDREN(softc->sysctl_stats_tree),
2092 		       OID_AUTO,
2093 		       "timeouts",
2094 		       CTLFLAG_RD,
2095 		       &softc->timeouts,
2096 		       0,
2097 		       "Device timeouts reported by the SIM");
2098 	SYSCTL_ADD_INT(&softc->sysctl_stats_ctx,
2099 		       SYSCTL_CHILDREN(softc->sysctl_stats_tree),
2100 		       OID_AUTO,
2101 		       "pack_invalidations",
2102 		       CTLFLAG_RD,
2103 		       &softc->invalidations,
2104 		       0,
2105 		       "Device pack invalidations");
2106 #endif
2107 
2108 	cam_iosched_sysctl_init(softc->cam_iosched, &softc->sysctl_ctx,
2109 	    softc->sysctl_tree);
2110 
2111 	cam_periph_release(periph);
2112 }
2113 
2114 static int
2115 dadeletemaxsysctl(SYSCTL_HANDLER_ARGS)
2116 {
2117 	int error;
2118 	uint64_t value;
2119 	struct da_softc *softc;
2120 
2121 	softc = (struct da_softc *)arg1;
2122 
2123 	value = softc->disk->d_delmaxsize;
2124 	error = sysctl_handle_64(oidp, &value, 0, req);
2125 	if ((error != 0) || (req->newptr == NULL))
2126 		return (error);
2127 
2128 	/* only accept values smaller than the calculated value */
2129 	if (value > dadeletemaxsize(softc, softc->delete_method)) {
2130 		return (EINVAL);
2131 	}
2132 	softc->disk->d_delmaxsize = value;
2133 
2134 	return (0);
2135 }
2136 
2137 static int
2138 dacmdsizesysctl(SYSCTL_HANDLER_ARGS)
2139 {
2140 	int error, value;
2141 
2142 	value = *(int *)arg1;
2143 
2144 	error = sysctl_handle_int(oidp, &value, 0, req);
2145 
2146 	if ((error != 0)
2147 	 || (req->newptr == NULL))
2148 		return (error);
2149 
2150 	/*
2151 	 * Acceptable values here are 6, 10, 12 or 16.
2152 	 */
2153 	if (value < 6)
2154 		value = 6;
2155 	else if ((value > 6)
2156 	      && (value <= 10))
2157 		value = 10;
2158 	else if ((value > 10)
2159 	      && (value <= 12))
2160 		value = 12;
2161 	else if (value > 12)
2162 		value = 16;
2163 
2164 	*(int *)arg1 = value;
2165 
2166 	return (0);
2167 }
2168 
2169 static int
2170 dasysctlsofttimeout(SYSCTL_HANDLER_ARGS)
2171 {
2172 	sbintime_t value;
2173 	int error;
2174 
2175 	value = da_default_softtimeout / SBT_1MS;
2176 
2177 	error = sysctl_handle_int(oidp, (int *)&value, 0, req);
2178 	if ((error != 0) || (req->newptr == NULL))
2179 		return (error);
2180 
2181 	/* XXX Should clip this to a reasonable level */
2182 	if (value > da_default_timeout * 1000)
2183 		return (EINVAL);
2184 
2185 	da_default_softtimeout = value * SBT_1MS;
2186 	return (0);
2187 }
2188 
2189 static void
2190 dadeletemethodset(struct da_softc *softc, da_delete_methods delete_method)
2191 {
2192 
2193 	softc->delete_method = delete_method;
2194 	softc->disk->d_delmaxsize = dadeletemaxsize(softc, delete_method);
2195 	softc->delete_func = da_delete_functions[delete_method];
2196 
2197 	if (softc->delete_method > DA_DELETE_DISABLE)
2198 		softc->disk->d_flags |= DISKFLAG_CANDELETE;
2199 	else
2200 		softc->disk->d_flags &= ~DISKFLAG_CANDELETE;
2201 }
2202 
2203 static off_t
2204 dadeletemaxsize(struct da_softc *softc, da_delete_methods delete_method)
2205 {
2206 	off_t sectors;
2207 
2208 	switch(delete_method) {
2209 	case DA_DELETE_UNMAP:
2210 		sectors = (off_t)softc->unmap_max_lba;
2211 		break;
2212 	case DA_DELETE_ATA_TRIM:
2213 		sectors = (off_t)ATA_DSM_RANGE_MAX * softc->trim_max_ranges;
2214 		break;
2215 	case DA_DELETE_WS16:
2216 		sectors = omin(softc->ws_max_blks, WS16_MAX_BLKS);
2217 		break;
2218 	case DA_DELETE_ZERO:
2219 	case DA_DELETE_WS10:
2220 		sectors = omin(softc->ws_max_blks, WS10_MAX_BLKS);
2221 		break;
2222 	default:
2223 		return 0;
2224 	}
2225 
2226 	return (off_t)softc->params.secsize *
2227 	    omin(sectors, softc->params.sectors);
2228 }
2229 
2230 static void
2231 daprobedone(struct cam_periph *periph, union ccb *ccb)
2232 {
2233 	struct da_softc *softc;
2234 
2235 	softc = (struct da_softc *)periph->softc;
2236 
2237 	dadeletemethodchoose(softc, DA_DELETE_NONE);
2238 
2239 	if (bootverbose && (softc->flags & DA_FLAG_ANNOUNCED) == 0) {
2240 		char buf[80];
2241 		int i, sep;
2242 
2243 		snprintf(buf, sizeof(buf), "Delete methods: <");
2244 		sep = 0;
2245 		for (i = 0; i <= DA_DELETE_MAX; i++) {
2246 			if ((softc->delete_available & (1 << i)) == 0 &&
2247 			    i != softc->delete_method)
2248 				continue;
2249 			if (sep)
2250 				strlcat(buf, ",", sizeof(buf));
2251 			strlcat(buf, da_delete_method_names[i],
2252 			    sizeof(buf));
2253 			if (i == softc->delete_method)
2254 				strlcat(buf, "(*)", sizeof(buf));
2255 			sep = 1;
2256 		}
2257 		strlcat(buf, ">", sizeof(buf));
2258 		printf("%s%d: %s\n", periph->periph_name,
2259 		    periph->unit_number, buf);
2260 	}
2261 
2262 	/*
2263 	 * Since our peripheral may be invalidated by an error
2264 	 * above or an external event, we must release our CCB
2265 	 * before releasing the probe lock on the peripheral.
2266 	 * The peripheral will only go away once the last lock
2267 	 * is removed, and we need it around for the CCB release
2268 	 * operation.
2269 	 */
2270 	xpt_release_ccb(ccb);
2271 	softc->state = DA_STATE_NORMAL;
2272 	softc->flags |= DA_FLAG_PROBED;
2273 	daschedule(periph);
2274 	wakeup(&softc->disk->d_mediasize);
2275 	if ((softc->flags & DA_FLAG_ANNOUNCED) == 0) {
2276 		softc->flags |= DA_FLAG_ANNOUNCED;
2277 		cam_periph_unhold(periph);
2278 	} else
2279 		cam_periph_release_locked(periph);
2280 }
2281 
2282 static void
2283 dadeletemethodchoose(struct da_softc *softc, da_delete_methods default_method)
2284 {
2285 	int i, methods;
2286 
2287 	/* If available, prefer the method requested by user. */
2288 	i = softc->delete_method_pref;
2289 	methods = softc->delete_available | (1 << DA_DELETE_DISABLE);
2290 	if (methods & (1 << i)) {
2291 		dadeletemethodset(softc, i);
2292 		return;
2293 	}
2294 
2295 	/* Use the pre-defined order to choose the best performing delete. */
2296 	for (i = DA_DELETE_MIN; i <= DA_DELETE_MAX; i++) {
2297 		if (i == DA_DELETE_ZERO)
2298 			continue;
2299 		if (softc->delete_available & (1 << i)) {
2300 			dadeletemethodset(softc, i);
2301 			return;
2302 		}
2303 	}
2304 
2305 	/* Fallback to default. */
2306 	dadeletemethodset(softc, default_method);
2307 }
2308 
2309 static int
2310 dadeletemethodsysctl(SYSCTL_HANDLER_ARGS)
2311 {
2312 	char buf[16];
2313 	const char *p;
2314 	struct da_softc *softc;
2315 	int i, error, methods, value;
2316 
2317 	softc = (struct da_softc *)arg1;
2318 
2319 	value = softc->delete_method;
2320 	if (value < 0 || value > DA_DELETE_MAX)
2321 		p = "UNKNOWN";
2322 	else
2323 		p = da_delete_method_names[value];
2324 	strncpy(buf, p, sizeof(buf));
2325 	error = sysctl_handle_string(oidp, buf, sizeof(buf), req);
2326 	if (error != 0 || req->newptr == NULL)
2327 		return (error);
2328 	methods = softc->delete_available | (1 << DA_DELETE_DISABLE);
2329 	for (i = 0; i <= DA_DELETE_MAX; i++) {
2330 		if (strcmp(buf, da_delete_method_names[i]) == 0)
2331 			break;
2332 	}
2333 	if (i > DA_DELETE_MAX)
2334 		return (EINVAL);
2335 	softc->delete_method_pref = i;
2336 	dadeletemethodchoose(softc, DA_DELETE_NONE);
2337 	return (0);
2338 }
2339 
2340 static int
2341 dazonemodesysctl(SYSCTL_HANDLER_ARGS)
2342 {
2343 	char tmpbuf[40];
2344 	struct da_softc *softc;
2345 	int error;
2346 
2347 	softc = (struct da_softc *)arg1;
2348 
2349 	switch (softc->zone_mode) {
2350 	case DA_ZONE_DRIVE_MANAGED:
2351 		snprintf(tmpbuf, sizeof(tmpbuf), "Drive Managed");
2352 		break;
2353 	case DA_ZONE_HOST_AWARE:
2354 		snprintf(tmpbuf, sizeof(tmpbuf), "Host Aware");
2355 		break;
2356 	case DA_ZONE_HOST_MANAGED:
2357 		snprintf(tmpbuf, sizeof(tmpbuf), "Host Managed");
2358 		break;
2359 	case DA_ZONE_NONE:
2360 	default:
2361 		snprintf(tmpbuf, sizeof(tmpbuf), "Not Zoned");
2362 		break;
2363 	}
2364 
2365 	error = sysctl_handle_string(oidp, tmpbuf, sizeof(tmpbuf), req);
2366 
2367 	return (error);
2368 }
2369 
2370 static int
2371 dazonesupsysctl(SYSCTL_HANDLER_ARGS)
2372 {
2373 	char tmpbuf[180];
2374 	struct da_softc *softc;
2375 	struct sbuf sb;
2376 	int error, first;
2377 	unsigned int i;
2378 
2379 	softc = (struct da_softc *)arg1;
2380 
2381 	error = 0;
2382 	first = 1;
2383 	sbuf_new(&sb, tmpbuf, sizeof(tmpbuf), 0);
2384 
2385 	for (i = 0; i < sizeof(da_zone_desc_table) /
2386 	     sizeof(da_zone_desc_table[0]); i++) {
2387 		if (softc->zone_flags & da_zone_desc_table[i].value) {
2388 			if (first == 0)
2389 				sbuf_printf(&sb, ", ");
2390 			else
2391 				first = 0;
2392 			sbuf_cat(&sb, da_zone_desc_table[i].desc);
2393 		}
2394 	}
2395 
2396 	if (first == 1)
2397 		sbuf_printf(&sb, "None");
2398 
2399 	sbuf_finish(&sb);
2400 
2401 	error = sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req);
2402 
2403 	return (error);
2404 }
2405 
2406 static cam_status
2407 daregister(struct cam_periph *periph, void *arg)
2408 {
2409 	struct da_softc *softc;
2410 	struct ccb_pathinq cpi;
2411 	struct ccb_getdev *cgd;
2412 	char tmpstr[80];
2413 	caddr_t match;
2414 
2415 	cgd = (struct ccb_getdev *)arg;
2416 	if (cgd == NULL) {
2417 		printf("daregister: no getdev CCB, can't register device\n");
2418 		return(CAM_REQ_CMP_ERR);
2419 	}
2420 
2421 	softc = (struct da_softc *)malloc(sizeof(*softc), M_DEVBUF,
2422 	    M_NOWAIT|M_ZERO);
2423 
2424 	if (softc == NULL) {
2425 		printf("daregister: Unable to probe new device. "
2426 		       "Unable to allocate softc\n");
2427 		return(CAM_REQ_CMP_ERR);
2428 	}
2429 
2430 	if (cam_iosched_init(&softc->cam_iosched, periph) != 0) {
2431 		printf("daregister: Unable to probe new device. "
2432 		       "Unable to allocate iosched memory\n");
2433 		free(softc, M_DEVBUF);
2434 		return(CAM_REQ_CMP_ERR);
2435 	}
2436 
2437 	LIST_INIT(&softc->pending_ccbs);
2438 	softc->state = DA_STATE_PROBE_RC;
2439 	bioq_init(&softc->delete_run_queue);
2440 	if (SID_IS_REMOVABLE(&cgd->inq_data))
2441 		softc->flags |= DA_FLAG_PACK_REMOVABLE;
2442 	softc->unmap_max_ranges = UNMAP_MAX_RANGES;
2443 	softc->unmap_max_lba = UNMAP_RANGE_MAX;
2444 	softc->unmap_gran = 0;
2445 	softc->unmap_gran_align = 0;
2446 	softc->ws_max_blks = WS16_MAX_BLKS;
2447 	softc->trim_max_ranges = ATA_TRIM_MAX_RANGES;
2448 	softc->rotating = 1;
2449 
2450 	periph->softc = softc;
2451 
2452 	/*
2453 	 * See if this device has any quirks.
2454 	 */
2455 	match = cam_quirkmatch((caddr_t)&cgd->inq_data,
2456 			       (caddr_t)da_quirk_table,
2457 			       nitems(da_quirk_table),
2458 			       sizeof(*da_quirk_table), scsi_inquiry_match);
2459 
2460 	if (match != NULL)
2461 		softc->quirks = ((struct da_quirk_entry *)match)->quirks;
2462 	else
2463 		softc->quirks = DA_Q_NONE;
2464 
2465 	/* Check if the SIM does not want 6 byte commands */
2466 	bzero(&cpi, sizeof(cpi));
2467 	xpt_setup_ccb(&cpi.ccb_h, periph->path, CAM_PRIORITY_NORMAL);
2468 	cpi.ccb_h.func_code = XPT_PATH_INQ;
2469 	xpt_action((union ccb *)&cpi);
2470 	if (cpi.ccb_h.status == CAM_REQ_CMP && (cpi.hba_misc & PIM_NO_6_BYTE))
2471 		softc->quirks |= DA_Q_NO_6_BYTE;
2472 
2473 	if (SID_TYPE(&cgd->inq_data) == T_ZBC_HM)
2474 		softc->zone_mode = DA_ZONE_HOST_MANAGED;
2475 	else if (softc->quirks & DA_Q_SMR_DM)
2476 		softc->zone_mode = DA_ZONE_DRIVE_MANAGED;
2477 	else
2478 		softc->zone_mode = DA_ZONE_NONE;
2479 
2480 	if (softc->zone_mode != DA_ZONE_NONE) {
2481 		if (scsi_vpd_supported_page(periph, SVPD_ATA_INFORMATION)) {
2482 			if (scsi_vpd_supported_page(periph, SVPD_ZONED_BDC))
2483 				softc->zone_interface = DA_ZONE_IF_ATA_SAT;
2484 			else
2485 				softc->zone_interface = DA_ZONE_IF_ATA_PASS;
2486 		} else
2487 			softc->zone_interface = DA_ZONE_IF_SCSI;
2488 	}
2489 
2490 	TASK_INIT(&softc->sysctl_task, 0, dasysctlinit, periph);
2491 
2492 	/*
2493 	 * Take an exclusive refcount on the periph while dastart is called
2494 	 * to finish the probe.  The reference will be dropped in dadone at
2495 	 * the end of probe.
2496 	 */
2497 	(void)cam_periph_hold(periph, PRIBIO);
2498 
2499 	/*
2500 	 * Schedule a periodic event to occasionally send an
2501 	 * ordered tag to a device.
2502 	 */
2503 	callout_init_mtx(&softc->sendordered_c, cam_periph_mtx(periph), 0);
2504 	callout_reset(&softc->sendordered_c,
2505 	    (da_default_timeout * hz) / DA_ORDEREDTAG_INTERVAL,
2506 	    dasendorderedtag, softc);
2507 
2508 	cam_periph_unlock(periph);
2509 	/*
2510 	 * RBC devices don't have to support READ(6), only READ(10).
2511 	 */
2512 	if (softc->quirks & DA_Q_NO_6_BYTE || SID_TYPE(&cgd->inq_data) == T_RBC)
2513 		softc->minimum_cmd_size = 10;
2514 	else
2515 		softc->minimum_cmd_size = 6;
2516 
2517 	/*
2518 	 * Load the user's default, if any.
2519 	 */
2520 	snprintf(tmpstr, sizeof(tmpstr), "kern.cam.da.%d.minimum_cmd_size",
2521 		 periph->unit_number);
2522 	TUNABLE_INT_FETCH(tmpstr, &softc->minimum_cmd_size);
2523 
2524 	/*
2525 	 * 6, 10, 12 and 16 are the currently permissible values.
2526 	 */
2527 	if (softc->minimum_cmd_size > 12)
2528 		softc->minimum_cmd_size = 16;
2529 	else if (softc->minimum_cmd_size > 10)
2530 		softc->minimum_cmd_size = 12;
2531 	else if (softc->minimum_cmd_size > 6)
2532 		softc->minimum_cmd_size = 10;
2533 	else
2534 		softc->minimum_cmd_size = 6;
2535 
2536 	/* Predict whether device may support READ CAPACITY(16). */
2537 	if (SID_ANSI_REV(&cgd->inq_data) >= SCSI_REV_SPC3 &&
2538 	    (softc->quirks & DA_Q_NO_RC16) == 0) {
2539 		softc->flags |= DA_FLAG_CAN_RC16;
2540 		softc->state = DA_STATE_PROBE_RC16;
2541 	}
2542 
2543 	/*
2544 	 * Register this media as a disk.
2545 	 */
2546 	softc->disk = disk_alloc();
2547 	softc->disk->d_devstat = devstat_new_entry(periph->periph_name,
2548 			  periph->unit_number, 0,
2549 			  DEVSTAT_BS_UNAVAILABLE,
2550 			  SID_TYPE(&cgd->inq_data) |
2551 			  XPORT_DEVSTAT_TYPE(cpi.transport),
2552 			  DEVSTAT_PRIORITY_DISK);
2553 	softc->disk->d_open = daopen;
2554 	softc->disk->d_close = daclose;
2555 	softc->disk->d_strategy = dastrategy;
2556 	softc->disk->d_dump = dadump;
2557 	softc->disk->d_getattr = dagetattr;
2558 	softc->disk->d_gone = dadiskgonecb;
2559 	softc->disk->d_name = "da";
2560 	softc->disk->d_drv1 = periph;
2561 	if (cpi.maxio == 0)
2562 		softc->maxio = DFLTPHYS;	/* traditional default */
2563 	else if (cpi.maxio > MAXPHYS)
2564 		softc->maxio = MAXPHYS;		/* for safety */
2565 	else
2566 		softc->maxio = cpi.maxio;
2567 	softc->disk->d_maxsize = softc->maxio;
2568 	softc->disk->d_unit = periph->unit_number;
2569 	softc->disk->d_flags = DISKFLAG_DIRECT_COMPLETION | DISKFLAG_CANZONE;
2570 	if ((softc->quirks & DA_Q_NO_SYNC_CACHE) == 0)
2571 		softc->disk->d_flags |= DISKFLAG_CANFLUSHCACHE;
2572 	if ((cpi.hba_misc & PIM_UNMAPPED) != 0) {
2573 		softc->unmappedio = 1;
2574 		softc->disk->d_flags |= DISKFLAG_UNMAPPED_BIO;
2575 	}
2576 	cam_strvis(softc->disk->d_descr, cgd->inq_data.vendor,
2577 	    sizeof(cgd->inq_data.vendor), sizeof(softc->disk->d_descr));
2578 	strlcat(softc->disk->d_descr, " ", sizeof(softc->disk->d_descr));
2579 	cam_strvis(&softc->disk->d_descr[strlen(softc->disk->d_descr)],
2580 	    cgd->inq_data.product, sizeof(cgd->inq_data.product),
2581 	    sizeof(softc->disk->d_descr) - strlen(softc->disk->d_descr));
2582 	softc->disk->d_hba_vendor = cpi.hba_vendor;
2583 	softc->disk->d_hba_device = cpi.hba_device;
2584 	softc->disk->d_hba_subvendor = cpi.hba_subvendor;
2585 	softc->disk->d_hba_subdevice = cpi.hba_subdevice;
2586 
2587 	/*
2588 	 * Acquire a reference to the periph before we register with GEOM.
2589 	 * We'll release this reference once GEOM calls us back (via
2590 	 * dadiskgonecb()) telling us that our provider has been freed.
2591 	 */
2592 	if (cam_periph_acquire(periph) != CAM_REQ_CMP) {
2593 		xpt_print(periph->path, "%s: lost periph during "
2594 			  "registration!\n", __func__);
2595 		cam_periph_lock(periph);
2596 		return (CAM_REQ_CMP_ERR);
2597 	}
2598 
2599 	disk_create(softc->disk, DISK_VERSION);
2600 	cam_periph_lock(periph);
2601 
2602 	/*
2603 	 * Add async callbacks for events of interest.
2604 	 * I don't bother checking if this fails as,
2605 	 * in most cases, the system will function just
2606 	 * fine without them and the only alternative
2607 	 * would be to not attach the device on failure.
2608 	 */
2609 	xpt_register_async(AC_SENT_BDR | AC_BUS_RESET | AC_LOST_DEVICE |
2610 	    AC_ADVINFO_CHANGED | AC_SCSI_AEN | AC_UNIT_ATTENTION |
2611 	    AC_INQ_CHANGED, daasync, periph, periph->path);
2612 
2613 	/*
2614 	 * Emit an attribute changed notification just in case
2615 	 * physical path information arrived before our async
2616 	 * event handler was registered, but after anyone attaching
2617 	 * to our disk device polled it.
2618 	 */
2619 	disk_attr_changed(softc->disk, "GEOM::physpath", M_NOWAIT);
2620 
2621 	/*
2622 	 * Schedule a periodic media polling events.
2623 	 */
2624 	callout_init_mtx(&softc->mediapoll_c, cam_periph_mtx(periph), 0);
2625 	if ((softc->flags & DA_FLAG_PACK_REMOVABLE) &&
2626 	    (cgd->inq_flags & SID_AEN) == 0 &&
2627 	    da_poll_period != 0)
2628 		callout_reset(&softc->mediapoll_c, da_poll_period * hz,
2629 		    damediapoll, periph);
2630 
2631 	xpt_schedule(periph, CAM_PRIORITY_DEV);
2632 
2633 	return(CAM_REQ_CMP);
2634 }
2635 
2636 static int
2637 da_zone_bio_to_scsi(int disk_zone_cmd)
2638 {
2639 	switch (disk_zone_cmd) {
2640 	case DISK_ZONE_OPEN:
2641 		return ZBC_OUT_SA_OPEN;
2642 	case DISK_ZONE_CLOSE:
2643 		return ZBC_OUT_SA_CLOSE;
2644 	case DISK_ZONE_FINISH:
2645 		return ZBC_OUT_SA_FINISH;
2646 	case DISK_ZONE_RWP:
2647 		return ZBC_OUT_SA_RWP;
2648 	}
2649 
2650 	return -1;
2651 }
2652 
2653 static int
2654 da_zone_cmd(struct cam_periph *periph, union ccb *ccb, struct bio *bp,
2655 	    int *queue_ccb)
2656 {
2657 	struct da_softc *softc;
2658 	int error;
2659 
2660 	error = 0;
2661 
2662 	if (bp->bio_cmd != BIO_ZONE) {
2663 		error = EINVAL;
2664 		goto bailout;
2665 	}
2666 
2667 	softc = periph->softc;
2668 
2669 	switch (bp->bio_zone.zone_cmd) {
2670 	case DISK_ZONE_OPEN:
2671 	case DISK_ZONE_CLOSE:
2672 	case DISK_ZONE_FINISH:
2673 	case DISK_ZONE_RWP: {
2674 		int zone_flags;
2675 		int zone_sa;
2676 		uint64_t lba;
2677 
2678 		zone_sa = da_zone_bio_to_scsi(bp->bio_zone.zone_cmd);
2679 		if (zone_sa == -1) {
2680 			xpt_print(periph->path, "Cannot translate zone "
2681 			    "cmd %#x to SCSI\n", bp->bio_zone.zone_cmd);
2682 			error = EINVAL;
2683 			goto bailout;
2684 		}
2685 
2686 		zone_flags = 0;
2687 		lba = bp->bio_zone.zone_params.rwp.id;
2688 
2689 		if (bp->bio_zone.zone_params.rwp.flags &
2690 		    DISK_ZONE_RWP_FLAG_ALL)
2691 			zone_flags |= ZBC_OUT_ALL;
2692 
2693 		if (softc->zone_interface != DA_ZONE_IF_ATA_PASS) {
2694 			scsi_zbc_out(&ccb->csio,
2695 				     /*retries*/ da_retry_count,
2696 				     /*cbfcnp*/ dadone,
2697 				     /*tag_action*/ MSG_SIMPLE_Q_TAG,
2698 				     /*service_action*/ zone_sa,
2699 				     /*zone_id*/ lba,
2700 				     /*zone_flags*/ zone_flags,
2701 				     /*data_ptr*/ NULL,
2702 				     /*dxfer_len*/ 0,
2703 				     /*sense_len*/ SSD_FULL_SIZE,
2704 				     /*timeout*/ da_default_timeout * 1000);
2705 		} else {
2706 			/*
2707 			 * Note that in this case, even though we can
2708 			 * technically use NCQ, we don't bother for several
2709 			 * reasons:
2710 			 * 1. It hasn't been tested on a SAT layer that
2711 			 *    supports it.  This is new as of SAT-4.
2712 			 * 2. Even when there is a SAT layer that supports
2713 			 *    it, that SAT layer will also probably support
2714 			 *    ZBC -> ZAC translation, since they are both
2715 			 *    in the SAT-4 spec.
2716 			 * 3. Translation will likely be preferable to ATA
2717 			 *    passthrough.  LSI / Avago at least single
2718 			 *    steps ATA passthrough commands in the HBA,
2719 			 *    regardless of protocol, so unless that
2720 			 *    changes, there is a performance penalty for
2721 			 *    doing ATA passthrough no matter whether
2722 			 *    you're using NCQ/FPDMA, DMA or PIO.
2723 			 * 4. It requires a 32-byte CDB, which at least at
2724 			 *    this point in CAM requires a CDB pointer, which
2725 			 *    would require us to allocate an additional bit
2726 			 *    of storage separate from the CCB.
2727 			 */
2728 			error = scsi_ata_zac_mgmt_out(&ccb->csio,
2729 			    /*retries*/ da_retry_count,
2730 			    /*cbfcnp*/ dadone,
2731 			    /*tag_action*/ MSG_SIMPLE_Q_TAG,
2732 			    /*use_ncq*/ 0,
2733 			    /*zm_action*/ zone_sa,
2734 			    /*zone_id*/ lba,
2735 			    /*zone_flags*/ zone_flags,
2736 			    /*data_ptr*/ NULL,
2737 			    /*dxfer_len*/ 0,
2738 			    /*cdb_storage*/ NULL,
2739 			    /*cdb_storage_len*/ 0,
2740 			    /*sense_len*/ SSD_FULL_SIZE,
2741 			    /*timeout*/ da_default_timeout * 1000);
2742 			if (error != 0) {
2743 				error = EINVAL;
2744 				xpt_print(periph->path,
2745 				    "scsi_ata_zac_mgmt_out() returned an "
2746 				    "error!");
2747 				goto bailout;
2748 			}
2749 		}
2750 		*queue_ccb = 1;
2751 
2752 		break;
2753 	}
2754 	case DISK_ZONE_REPORT_ZONES: {
2755 		uint8_t *rz_ptr;
2756 		uint32_t num_entries, alloc_size;
2757 		struct disk_zone_report *rep;
2758 
2759 		rep = &bp->bio_zone.zone_params.report;
2760 
2761 		num_entries = rep->entries_allocated;
2762 		if (num_entries == 0) {
2763 			xpt_print(periph->path, "No entries allocated for "
2764 			    "Report Zones request\n");
2765 			error = EINVAL;
2766 			goto bailout;
2767 		}
2768 		alloc_size = sizeof(struct scsi_report_zones_hdr) +
2769 		    (sizeof(struct scsi_report_zones_desc) * num_entries);
2770 		alloc_size = min(alloc_size, softc->disk->d_maxsize);
2771 		rz_ptr = malloc(alloc_size, M_SCSIDA, M_NOWAIT | M_ZERO);
2772 		if (rz_ptr == NULL) {
2773 			xpt_print(periph->path, "Unable to allocate memory "
2774 			   "for Report Zones request\n");
2775 			error = ENOMEM;
2776 			goto bailout;
2777 		}
2778 
2779 		if (softc->zone_interface != DA_ZONE_IF_ATA_PASS) {
2780 			scsi_zbc_in(&ccb->csio,
2781 				    /*retries*/ da_retry_count,
2782 				    /*cbcfnp*/ dadone,
2783 				    /*tag_action*/ MSG_SIMPLE_Q_TAG,
2784 				    /*service_action*/ ZBC_IN_SA_REPORT_ZONES,
2785 				    /*zone_start_lba*/ rep->starting_id,
2786 				    /*zone_options*/ rep->rep_options,
2787 				    /*data_ptr*/ rz_ptr,
2788 				    /*dxfer_len*/ alloc_size,
2789 				    /*sense_len*/ SSD_FULL_SIZE,
2790 				    /*timeout*/ da_default_timeout * 1000);
2791 		} else {
2792 			/*
2793 			 * Note that in this case, even though we can
2794 			 * technically use NCQ, we don't bother for several
2795 			 * reasons:
2796 			 * 1. It hasn't been tested on a SAT layer that
2797 			 *    supports it.  This is new as of SAT-4.
2798 			 * 2. Even when there is a SAT layer that supports
2799 			 *    it, that SAT layer will also probably support
2800 			 *    ZBC -> ZAC translation, since they are both
2801 			 *    in the SAT-4 spec.
2802 			 * 3. Translation will likely be preferable to ATA
2803 			 *    passthrough.  LSI / Avago at least single
2804 			 *    steps ATA passthrough commands in the HBA,
2805 			 *    regardless of protocol, so unless that
2806 			 *    changes, there is a performance penalty for
2807 			 *    doing ATA passthrough no matter whether
2808 			 *    you're using NCQ/FPDMA, DMA or PIO.
2809 			 * 4. It requires a 32-byte CDB, which at least at
2810 			 *    this point in CAM requires a CDB pointer, which
2811 			 *    would require us to allocate an additional bit
2812 			 *    of storage separate from the CCB.
2813 			 */
2814 			error = scsi_ata_zac_mgmt_in(&ccb->csio,
2815 			    /*retries*/ da_retry_count,
2816 			    /*cbcfnp*/ dadone,
2817 			    /*tag_action*/ MSG_SIMPLE_Q_TAG,
2818 			    /*use_ncq*/ 0,
2819 			    /*zm_action*/ ATA_ZM_REPORT_ZONES,
2820 			    /*zone_id*/ rep->starting_id,
2821 			    /*zone_flags*/ rep->rep_options,
2822 			    /*data_ptr*/ rz_ptr,
2823 			    /*dxfer_len*/ alloc_size,
2824 			    /*cdb_storage*/ NULL,
2825 			    /*cdb_storage_len*/ 0,
2826 			    /*sense_len*/ SSD_FULL_SIZE,
2827 			    /*timeout*/ da_default_timeout * 1000);
2828 			if (error != 0) {
2829 				error = EINVAL;
2830 				xpt_print(periph->path,
2831 				    "scsi_ata_zac_mgmt_in() returned an "
2832 				    "error!");
2833 				goto bailout;
2834 			}
2835 		}
2836 
2837 		/*
2838 		 * For BIO_ZONE, this isn't normally needed.  However, it
2839 		 * is used by devstat_end_transaction_bio() to determine
2840 		 * how much data was transferred.
2841 		 */
2842 		/*
2843 		 * XXX KDM we have a problem.  But I'm not sure how to fix
2844 		 * it.  devstat uses bio_bcount - bio_resid to calculate
2845 		 * the amount of data transferred.   The GEOM disk code
2846 		 * uses bio_length - bio_resid to calculate the amount of
2847 		 * data in bio_completed.  We have different structure
2848 		 * sizes above and below the ada(4) driver.  So, if we
2849 		 * use the sizes above, the amount transferred won't be
2850 		 * quite accurate for devstat.  If we use different sizes
2851 		 * for bio_bcount and bio_length (above and below
2852 		 * respectively), then the residual needs to match one or
2853 		 * the other.  Everything is calculated after the bio
2854 		 * leaves the driver, so changing the values around isn't
2855 		 * really an option.  For now, just set the count to the
2856 		 * passed in length.  This means that the calculations
2857 		 * above (e.g. bio_completed) will be correct, but the
2858 		 * amount of data reported to devstat will be slightly
2859 		 * under or overstated.
2860 		 */
2861 		bp->bio_bcount = bp->bio_length;
2862 
2863 		*queue_ccb = 1;
2864 
2865 		break;
2866 	}
2867 	case DISK_ZONE_GET_PARAMS: {
2868 		struct disk_zone_disk_params *params;
2869 
2870 		params = &bp->bio_zone.zone_params.disk_params;
2871 		bzero(params, sizeof(*params));
2872 
2873 		switch (softc->zone_mode) {
2874 		case DA_ZONE_DRIVE_MANAGED:
2875 			params->zone_mode = DISK_ZONE_MODE_DRIVE_MANAGED;
2876 			break;
2877 		case DA_ZONE_HOST_AWARE:
2878 			params->zone_mode = DISK_ZONE_MODE_HOST_AWARE;
2879 			break;
2880 		case DA_ZONE_HOST_MANAGED:
2881 			params->zone_mode = DISK_ZONE_MODE_HOST_MANAGED;
2882 			break;
2883 		default:
2884 		case DA_ZONE_NONE:
2885 			params->zone_mode = DISK_ZONE_MODE_NONE;
2886 			break;
2887 		}
2888 
2889 		if (softc->zone_flags & DA_ZONE_FLAG_URSWRZ)
2890 			params->flags |= DISK_ZONE_DISK_URSWRZ;
2891 
2892 		if (softc->zone_flags & DA_ZONE_FLAG_OPT_SEQ_SET) {
2893 			params->optimal_seq_zones = softc->optimal_seq_zones;
2894 			params->flags |= DISK_ZONE_OPT_SEQ_SET;
2895 		}
2896 
2897 		if (softc->zone_flags & DA_ZONE_FLAG_OPT_NONSEQ_SET) {
2898 			params->optimal_nonseq_zones =
2899 			    softc->optimal_nonseq_zones;
2900 			params->flags |= DISK_ZONE_OPT_NONSEQ_SET;
2901 		}
2902 
2903 		if (softc->zone_flags & DA_ZONE_FLAG_MAX_SEQ_SET) {
2904 			params->max_seq_zones = softc->max_seq_zones;
2905 			params->flags |= DISK_ZONE_MAX_SEQ_SET;
2906 		}
2907 		if (softc->zone_flags & DA_ZONE_FLAG_RZ_SUP)
2908 			params->flags |= DISK_ZONE_RZ_SUP;
2909 
2910 		if (softc->zone_flags & DA_ZONE_FLAG_OPEN_SUP)
2911 			params->flags |= DISK_ZONE_OPEN_SUP;
2912 
2913 		if (softc->zone_flags & DA_ZONE_FLAG_CLOSE_SUP)
2914 			params->flags |= DISK_ZONE_CLOSE_SUP;
2915 
2916 		if (softc->zone_flags & DA_ZONE_FLAG_FINISH_SUP)
2917 			params->flags |= DISK_ZONE_FINISH_SUP;
2918 
2919 		if (softc->zone_flags & DA_ZONE_FLAG_RWP_SUP)
2920 			params->flags |= DISK_ZONE_RWP_SUP;
2921 		break;
2922 	}
2923 	default:
2924 		break;
2925 	}
2926 bailout:
2927 	return (error);
2928 }
2929 
2930 static void
2931 dastart(struct cam_periph *periph, union ccb *start_ccb)
2932 {
2933 	struct da_softc *softc;
2934 
2935 	softc = (struct da_softc *)periph->softc;
2936 
2937 	CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dastart\n"));
2938 
2939 skipstate:
2940 	switch (softc->state) {
2941 	case DA_STATE_NORMAL:
2942 	{
2943 		struct bio *bp;
2944 		uint8_t tag_code;
2945 
2946 more:
2947 		bp = cam_iosched_next_bio(softc->cam_iosched);
2948 		if (bp == NULL) {
2949 			if (cam_iosched_has_work_flags(softc->cam_iosched, DA_WORK_TUR)) {
2950 				cam_iosched_clr_work_flags(softc->cam_iosched, DA_WORK_TUR);
2951 				scsi_test_unit_ready(&start_ccb->csio,
2952 				     /*retries*/ da_retry_count,
2953 				     dadone,
2954 				     MSG_SIMPLE_Q_TAG,
2955 				     SSD_FULL_SIZE,
2956 				     da_default_timeout * 1000);
2957 				start_ccb->ccb_h.ccb_bp = NULL;
2958 				start_ccb->ccb_h.ccb_state = DA_CCB_TUR;
2959 				xpt_action(start_ccb);
2960 			} else
2961 				xpt_release_ccb(start_ccb);
2962 			break;
2963 		}
2964 
2965 		if (bp->bio_cmd == BIO_DELETE) {
2966 			if (softc->delete_func != NULL) {
2967 				softc->delete_func(periph, start_ccb, bp);
2968 				goto out;
2969 			} else {
2970 				/* Not sure this is possible, but failsafe by lying and saying "sure, done." */
2971 				biofinish(bp, NULL, 0);
2972 				goto more;
2973 			}
2974 		}
2975 
2976 		if (cam_iosched_has_work_flags(softc->cam_iosched, DA_WORK_TUR)) {
2977 			cam_iosched_clr_work_flags(softc->cam_iosched, DA_WORK_TUR);
2978 			cam_periph_release_locked(periph);	/* XXX is this still valid? I think so but unverified */
2979 		}
2980 
2981 		if ((bp->bio_flags & BIO_ORDERED) != 0 ||
2982 		    (softc->flags & DA_FLAG_NEED_OTAG) != 0) {
2983 			softc->flags &= ~DA_FLAG_NEED_OTAG;
2984 			softc->flags |= DA_FLAG_WAS_OTAG;
2985 			tag_code = MSG_ORDERED_Q_TAG;
2986 		} else {
2987 			tag_code = MSG_SIMPLE_Q_TAG;
2988 		}
2989 
2990 		switch (bp->bio_cmd) {
2991 		case BIO_WRITE:
2992 		case BIO_READ:
2993 		{
2994 			void *data_ptr;
2995 			int rw_op;
2996 
2997 			biotrack(bp, __func__);
2998 
2999 			if (bp->bio_cmd == BIO_WRITE) {
3000 				softc->flags |= DA_FLAG_DIRTY;
3001 				rw_op = SCSI_RW_WRITE;
3002 			} else {
3003 				rw_op = SCSI_RW_READ;
3004 			}
3005 
3006 			data_ptr = bp->bio_data;
3007 			if ((bp->bio_flags & (BIO_UNMAPPED|BIO_VLIST)) != 0) {
3008 				rw_op |= SCSI_RW_BIO;
3009 				data_ptr = bp;
3010 			}
3011 
3012 			scsi_read_write(&start_ccb->csio,
3013 					/*retries*/da_retry_count,
3014 					/*cbfcnp*/dadone,
3015 					/*tag_action*/tag_code,
3016 					rw_op,
3017 					/*byte2*/0,
3018 					softc->minimum_cmd_size,
3019 					/*lba*/bp->bio_pblkno,
3020 					/*block_count*/bp->bio_bcount /
3021 					softc->params.secsize,
3022 					data_ptr,
3023 					/*dxfer_len*/ bp->bio_bcount,
3024 					/*sense_len*/SSD_FULL_SIZE,
3025 					da_default_timeout * 1000);
3026 #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING)
3027 			start_ccb->csio.bio = bp;
3028 #endif
3029 			break;
3030 		}
3031 		case BIO_FLUSH:
3032 			/*
3033 			 * BIO_FLUSH doesn't currently communicate
3034 			 * range data, so we synchronize the cache
3035 			 * over the whole disk.  We also force
3036 			 * ordered tag semantics the flush applies
3037 			 * to all previously queued I/O.
3038 			 */
3039 			scsi_synchronize_cache(&start_ccb->csio,
3040 					       /*retries*/1,
3041 					       /*cbfcnp*/dadone,
3042 					       MSG_ORDERED_Q_TAG,
3043 					       /*begin_lba*/0,
3044 					       /*lb_count*/0,
3045 					       SSD_FULL_SIZE,
3046 					       da_default_timeout*1000);
3047 			break;
3048 		case BIO_ZONE: {
3049 			int error, queue_ccb;
3050 
3051 			queue_ccb = 0;
3052 
3053 			error = da_zone_cmd(periph, start_ccb, bp,&queue_ccb);
3054 			if ((error != 0)
3055 			 || (queue_ccb == 0)) {
3056 				biofinish(bp, NULL, error);
3057 				xpt_release_ccb(start_ccb);
3058 				return;
3059 			}
3060 			break;
3061 		}
3062 		}
3063 		start_ccb->ccb_h.ccb_state = DA_CCB_BUFFER_IO;
3064 		start_ccb->ccb_h.flags |= CAM_UNLOCKED;
3065 		start_ccb->ccb_h.softtimeout = sbttotv(da_default_softtimeout);
3066 
3067 out:
3068 		LIST_INSERT_HEAD(&softc->pending_ccbs,
3069 				 &start_ccb->ccb_h, periph_links.le);
3070 
3071 		/* We expect a unit attention from this device */
3072 		if ((softc->flags & DA_FLAG_RETRY_UA) != 0) {
3073 			start_ccb->ccb_h.ccb_state |= DA_CCB_RETRY_UA;
3074 			softc->flags &= ~DA_FLAG_RETRY_UA;
3075 		}
3076 
3077 		start_ccb->ccb_h.ccb_bp = bp;
3078 		softc->refcount++;
3079 		cam_periph_unlock(periph);
3080 		xpt_action(start_ccb);
3081 		cam_periph_lock(periph);
3082 		softc->refcount--;
3083 
3084 		/* May have more work to do, so ensure we stay scheduled */
3085 		daschedule(periph);
3086 		break;
3087 	}
3088 	case DA_STATE_PROBE_RC:
3089 	{
3090 		struct scsi_read_capacity_data *rcap;
3091 
3092 		rcap = (struct scsi_read_capacity_data *)
3093 		    malloc(sizeof(*rcap), M_SCSIDA, M_NOWAIT|M_ZERO);
3094 		if (rcap == NULL) {
3095 			printf("dastart: Couldn't malloc read_capacity data\n");
3096 			/* da_free_periph??? */
3097 			break;
3098 		}
3099 		scsi_read_capacity(&start_ccb->csio,
3100 				   /*retries*/da_retry_count,
3101 				   dadone,
3102 				   MSG_SIMPLE_Q_TAG,
3103 				   rcap,
3104 				   SSD_FULL_SIZE,
3105 				   /*timeout*/5000);
3106 		start_ccb->ccb_h.ccb_bp = NULL;
3107 		start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_RC;
3108 		xpt_action(start_ccb);
3109 		break;
3110 	}
3111 	case DA_STATE_PROBE_RC16:
3112 	{
3113 		struct scsi_read_capacity_data_long *rcaplong;
3114 
3115 		rcaplong = (struct scsi_read_capacity_data_long *)
3116 			malloc(sizeof(*rcaplong), M_SCSIDA, M_NOWAIT|M_ZERO);
3117 		if (rcaplong == NULL) {
3118 			printf("dastart: Couldn't malloc read_capacity data\n");
3119 			/* da_free_periph??? */
3120 			break;
3121 		}
3122 		scsi_read_capacity_16(&start_ccb->csio,
3123 				      /*retries*/ da_retry_count,
3124 				      /*cbfcnp*/ dadone,
3125 				      /*tag_action*/ MSG_SIMPLE_Q_TAG,
3126 				      /*lba*/ 0,
3127 				      /*reladr*/ 0,
3128 				      /*pmi*/ 0,
3129 				      /*rcap_buf*/ (uint8_t *)rcaplong,
3130 				      /*rcap_buf_len*/ sizeof(*rcaplong),
3131 				      /*sense_len*/ SSD_FULL_SIZE,
3132 				      /*timeout*/ da_default_timeout * 1000);
3133 		start_ccb->ccb_h.ccb_bp = NULL;
3134 		start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_RC16;
3135 		xpt_action(start_ccb);
3136 		break;
3137 	}
3138 	case DA_STATE_PROBE_LBP:
3139 	{
3140 		struct scsi_vpd_logical_block_prov *lbp;
3141 
3142 		if (!scsi_vpd_supported_page(periph, SVPD_LBP)) {
3143 			/*
3144 			 * If we get here we don't support any SBC-3 delete
3145 			 * methods with UNMAP as the Logical Block Provisioning
3146 			 * VPD page support is required for devices which
3147 			 * support it according to T10/1799-D Revision 31
3148 			 * however older revisions of the spec don't mandate
3149 			 * this so we currently don't remove these methods
3150 			 * from the available set.
3151 			 */
3152 			softc->state = DA_STATE_PROBE_BLK_LIMITS;
3153 			goto skipstate;
3154 		}
3155 
3156 		lbp = (struct scsi_vpd_logical_block_prov *)
3157 			malloc(sizeof(*lbp), M_SCSIDA, M_NOWAIT|M_ZERO);
3158 
3159 		if (lbp == NULL) {
3160 			printf("dastart: Couldn't malloc lbp data\n");
3161 			/* da_free_periph??? */
3162 			break;
3163 		}
3164 
3165 		scsi_inquiry(&start_ccb->csio,
3166 			     /*retries*/da_retry_count,
3167 			     /*cbfcnp*/dadone,
3168 			     /*tag_action*/MSG_SIMPLE_Q_TAG,
3169 			     /*inq_buf*/(u_int8_t *)lbp,
3170 			     /*inq_len*/sizeof(*lbp),
3171 			     /*evpd*/TRUE,
3172 			     /*page_code*/SVPD_LBP,
3173 			     /*sense_len*/SSD_MIN_SIZE,
3174 			     /*timeout*/da_default_timeout * 1000);
3175 		start_ccb->ccb_h.ccb_bp = NULL;
3176 		start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_LBP;
3177 		xpt_action(start_ccb);
3178 		break;
3179 	}
3180 	case DA_STATE_PROBE_BLK_LIMITS:
3181 	{
3182 		struct scsi_vpd_block_limits *block_limits;
3183 
3184 		if (!scsi_vpd_supported_page(periph, SVPD_BLOCK_LIMITS)) {
3185 			/* Not supported skip to next probe */
3186 			softc->state = DA_STATE_PROBE_BDC;
3187 			goto skipstate;
3188 		}
3189 
3190 		block_limits = (struct scsi_vpd_block_limits *)
3191 			malloc(sizeof(*block_limits), M_SCSIDA, M_NOWAIT|M_ZERO);
3192 
3193 		if (block_limits == NULL) {
3194 			printf("dastart: Couldn't malloc block_limits data\n");
3195 			/* da_free_periph??? */
3196 			break;
3197 		}
3198 
3199 		scsi_inquiry(&start_ccb->csio,
3200 			     /*retries*/da_retry_count,
3201 			     /*cbfcnp*/dadone,
3202 			     /*tag_action*/MSG_SIMPLE_Q_TAG,
3203 			     /*inq_buf*/(u_int8_t *)block_limits,
3204 			     /*inq_len*/sizeof(*block_limits),
3205 			     /*evpd*/TRUE,
3206 			     /*page_code*/SVPD_BLOCK_LIMITS,
3207 			     /*sense_len*/SSD_MIN_SIZE,
3208 			     /*timeout*/da_default_timeout * 1000);
3209 		start_ccb->ccb_h.ccb_bp = NULL;
3210 		start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_BLK_LIMITS;
3211 		xpt_action(start_ccb);
3212 		break;
3213 	}
3214 	case DA_STATE_PROBE_BDC:
3215 	{
3216 		struct scsi_vpd_block_characteristics *bdc;
3217 
3218 		if (!scsi_vpd_supported_page(periph, SVPD_BDC)) {
3219 			softc->state = DA_STATE_PROBE_ATA;
3220 			goto skipstate;
3221 		}
3222 
3223 		bdc = (struct scsi_vpd_block_characteristics *)
3224 			malloc(sizeof(*bdc), M_SCSIDA, M_NOWAIT|M_ZERO);
3225 
3226 		if (bdc == NULL) {
3227 			printf("dastart: Couldn't malloc bdc data\n");
3228 			/* da_free_periph??? */
3229 			break;
3230 		}
3231 
3232 		scsi_inquiry(&start_ccb->csio,
3233 			     /*retries*/da_retry_count,
3234 			     /*cbfcnp*/dadone,
3235 			     /*tag_action*/MSG_SIMPLE_Q_TAG,
3236 			     /*inq_buf*/(u_int8_t *)bdc,
3237 			     /*inq_len*/sizeof(*bdc),
3238 			     /*evpd*/TRUE,
3239 			     /*page_code*/SVPD_BDC,
3240 			     /*sense_len*/SSD_MIN_SIZE,
3241 			     /*timeout*/da_default_timeout * 1000);
3242 		start_ccb->ccb_h.ccb_bp = NULL;
3243 		start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_BDC;
3244 		xpt_action(start_ccb);
3245 		break;
3246 	}
3247 	case DA_STATE_PROBE_ATA:
3248 	{
3249 		struct ata_params *ata_params;
3250 
3251 		if (!scsi_vpd_supported_page(periph, SVPD_ATA_INFORMATION)) {
3252 			if ((softc->zone_mode == DA_ZONE_HOST_AWARE)
3253 			 || (softc->zone_mode == DA_ZONE_HOST_MANAGED)) {
3254 				/*
3255 				 * Note that if the ATA VPD page isn't
3256 				 * supported, we aren't talking to an ATA
3257 				 * device anyway.  Support for that VPD
3258 				 * page is mandatory for SCSI to ATA (SAT)
3259 				 * translation layers.
3260 				 */
3261 				softc->state = DA_STATE_PROBE_ZONE;
3262 				goto skipstate;
3263 			}
3264 			daprobedone(periph, start_ccb);
3265 			break;
3266 		}
3267 
3268 		ata_params = (struct ata_params*)
3269 			malloc(sizeof(*ata_params), M_SCSIDA,M_NOWAIT|M_ZERO);
3270 
3271 		if (ata_params == NULL) {
3272 			xpt_print(periph->path, "Couldn't malloc ata_params "
3273 			    "data\n");
3274 			/* da_free_periph??? */
3275 			break;
3276 		}
3277 
3278 		scsi_ata_identify(&start_ccb->csio,
3279 				  /*retries*/da_retry_count,
3280 				  /*cbfcnp*/dadone,
3281                                   /*tag_action*/MSG_SIMPLE_Q_TAG,
3282 				  /*data_ptr*/(u_int8_t *)ata_params,
3283 				  /*dxfer_len*/sizeof(*ata_params),
3284 				  /*sense_len*/SSD_FULL_SIZE,
3285 				  /*timeout*/da_default_timeout * 1000);
3286 		start_ccb->ccb_h.ccb_bp = NULL;
3287 		start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_ATA;
3288 		xpt_action(start_ccb);
3289 		break;
3290 	}
3291 	case DA_STATE_PROBE_ATA_LOGDIR:
3292 	{
3293 		struct ata_gp_log_dir *log_dir;
3294 		int retval;
3295 
3296 		retval = 0;
3297 
3298 		if ((softc->flags & DA_FLAG_CAN_ATA_LOG) == 0) {
3299 			/*
3300 			 * If we don't have log support, not much point in
3301 			 * trying to probe zone support.
3302 			 */
3303 			daprobedone(periph, start_ccb);
3304 			break;
3305 		}
3306 
3307 		/*
3308 		 * If we have an ATA device (the SCSI ATA Information VPD
3309 		 * page should be present and the ATA identify should have
3310 		 * succeeded) and it supports logs, ask for the log directory.
3311 		 */
3312 
3313 		log_dir = malloc(sizeof(*log_dir), M_SCSIDA, M_NOWAIT|M_ZERO);
3314 		if (log_dir == NULL) {
3315 			xpt_print(periph->path, "Couldn't malloc log_dir "
3316 			    "data\n");
3317 			daprobedone(periph, start_ccb);
3318 			break;
3319 		}
3320 
3321 		retval = scsi_ata_read_log(&start_ccb->csio,
3322 		    /*retries*/ da_retry_count,
3323 		    /*cbfcnp*/ dadone,
3324 		    /*tag_action*/ MSG_SIMPLE_Q_TAG,
3325 		    /*log_address*/ ATA_LOG_DIRECTORY,
3326 		    /*page_number*/ 0,
3327 		    /*block_count*/ 1,
3328 		    /*protocol*/ softc->flags & DA_FLAG_CAN_ATA_DMA ?
3329 				 AP_PROTO_DMA : AP_PROTO_PIO_IN,
3330 		    /*data_ptr*/ (uint8_t *)log_dir,
3331 		    /*dxfer_len*/ sizeof(*log_dir),
3332 		    /*sense_len*/ SSD_FULL_SIZE,
3333 		    /*timeout*/ da_default_timeout * 1000);
3334 
3335 		if (retval != 0) {
3336 			xpt_print(periph->path, "scsi_ata_read_log() failed!");
3337 			free(log_dir, M_SCSIDA);
3338 			daprobedone(periph, start_ccb);
3339 			break;
3340 		}
3341 		start_ccb->ccb_h.ccb_bp = NULL;
3342 		start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_ATA_LOGDIR;
3343 		xpt_action(start_ccb);
3344 		break;
3345 	}
3346 	case DA_STATE_PROBE_ATA_IDDIR:
3347 	{
3348 		struct ata_identify_log_pages *id_dir;
3349 		int retval;
3350 
3351 		retval = 0;
3352 
3353 		/*
3354 		 * Check here to see whether the Identify Device log is
3355 		 * supported in the directory of logs.  If so, continue
3356 		 * with requesting the log of identify device pages.
3357 		 */
3358 		if ((softc->flags & DA_FLAG_CAN_ATA_IDLOG) == 0) {
3359 			daprobedone(periph, start_ccb);
3360 			break;
3361 		}
3362 
3363 		id_dir = malloc(sizeof(*id_dir), M_SCSIDA, M_NOWAIT | M_ZERO);
3364 		if (id_dir == NULL) {
3365 			xpt_print(periph->path, "Couldn't malloc id_dir "
3366 			    "data\n");
3367 			daprobedone(periph, start_ccb);
3368 			break;
3369 		}
3370 
3371 		retval = scsi_ata_read_log(&start_ccb->csio,
3372 		    /*retries*/ da_retry_count,
3373 		    /*cbfcnp*/ dadone,
3374 		    /*tag_action*/ MSG_SIMPLE_Q_TAG,
3375 		    /*log_address*/ ATA_IDENTIFY_DATA_LOG,
3376 		    /*page_number*/ ATA_IDL_PAGE_LIST,
3377 		    /*block_count*/ 1,
3378 		    /*protocol*/ softc->flags & DA_FLAG_CAN_ATA_DMA ?
3379 				 AP_PROTO_DMA : AP_PROTO_PIO_IN,
3380 		    /*data_ptr*/ (uint8_t *)id_dir,
3381 		    /*dxfer_len*/ sizeof(*id_dir),
3382 		    /*sense_len*/ SSD_FULL_SIZE,
3383 		    /*timeout*/ da_default_timeout * 1000);
3384 
3385 		if (retval != 0) {
3386 			xpt_print(periph->path, "scsi_ata_read_log() failed!");
3387 			free(id_dir, M_SCSIDA);
3388 			daprobedone(periph, start_ccb);
3389 			break;
3390 		}
3391 		start_ccb->ccb_h.ccb_bp = NULL;
3392 		start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_ATA_IDDIR;
3393 		xpt_action(start_ccb);
3394 		break;
3395 	}
3396 	case DA_STATE_PROBE_ATA_SUP:
3397 	{
3398 		struct ata_identify_log_sup_cap *sup_cap;
3399 		int retval;
3400 
3401 		retval = 0;
3402 
3403 		/*
3404 		 * Check here to see whether the Supported Capabilities log
3405 		 * is in the list of Identify Device logs.
3406 		 */
3407 		if ((softc->flags & DA_FLAG_CAN_ATA_SUPCAP) == 0) {
3408 			daprobedone(periph, start_ccb);
3409 			break;
3410 		}
3411 
3412 		sup_cap = malloc(sizeof(*sup_cap), M_SCSIDA, M_NOWAIT|M_ZERO);
3413 		if (sup_cap == NULL) {
3414 			xpt_print(periph->path, "Couldn't malloc sup_cap "
3415 			    "data\n");
3416 			daprobedone(periph, start_ccb);
3417 			break;
3418 		}
3419 
3420 		retval = scsi_ata_read_log(&start_ccb->csio,
3421 		    /*retries*/ da_retry_count,
3422 		    /*cbfcnp*/ dadone,
3423 		    /*tag_action*/ MSG_SIMPLE_Q_TAG,
3424 		    /*log_address*/ ATA_IDENTIFY_DATA_LOG,
3425 		    /*page_number*/ ATA_IDL_SUP_CAP,
3426 		    /*block_count*/ 1,
3427 		    /*protocol*/ softc->flags & DA_FLAG_CAN_ATA_DMA ?
3428 				 AP_PROTO_DMA : AP_PROTO_PIO_IN,
3429 		    /*data_ptr*/ (uint8_t *)sup_cap,
3430 		    /*dxfer_len*/ sizeof(*sup_cap),
3431 		    /*sense_len*/ SSD_FULL_SIZE,
3432 		    /*timeout*/ da_default_timeout * 1000);
3433 
3434 		if (retval != 0) {
3435 			xpt_print(periph->path, "scsi_ata_read_log() failed!");
3436 			free(sup_cap, M_SCSIDA);
3437 			daprobedone(periph, start_ccb);
3438 			break;
3439 
3440 		}
3441 
3442 		start_ccb->ccb_h.ccb_bp = NULL;
3443 		start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_ATA_SUP;
3444 		xpt_action(start_ccb);
3445 		break;
3446 	}
3447 	case DA_STATE_PROBE_ATA_ZONE:
3448 	{
3449 		struct ata_zoned_info_log *ata_zone;
3450 		int retval;
3451 
3452 		retval = 0;
3453 
3454 		/*
3455 		 * Check here to see whether the zoned device information
3456 		 * page is supported.  If so, continue on to request it.
3457 		 * If not, skip to DA_STATE_PROBE_LOG or done.
3458 		 */
3459 		if ((softc->flags & DA_FLAG_CAN_ATA_ZONE) == 0) {
3460 			daprobedone(periph, start_ccb);
3461 			break;
3462 		}
3463 		ata_zone = malloc(sizeof(*ata_zone), M_SCSIDA,
3464 				  M_NOWAIT|M_ZERO);
3465 		if (ata_zone == NULL) {
3466 			xpt_print(periph->path, "Couldn't malloc ata_zone "
3467 			    "data\n");
3468 			daprobedone(periph, start_ccb);
3469 			break;
3470 		}
3471 
3472 		retval = scsi_ata_read_log(&start_ccb->csio,
3473 		    /*retries*/ da_retry_count,
3474 		    /*cbfcnp*/ dadone,
3475 		    /*tag_action*/ MSG_SIMPLE_Q_TAG,
3476 		    /*log_address*/ ATA_IDENTIFY_DATA_LOG,
3477 		    /*page_number*/ ATA_IDL_ZDI,
3478 		    /*block_count*/ 1,
3479 		    /*protocol*/ softc->flags & DA_FLAG_CAN_ATA_DMA ?
3480 				 AP_PROTO_DMA : AP_PROTO_PIO_IN,
3481 		    /*data_ptr*/ (uint8_t *)ata_zone,
3482 		    /*dxfer_len*/ sizeof(*ata_zone),
3483 		    /*sense_len*/ SSD_FULL_SIZE,
3484 		    /*timeout*/ da_default_timeout * 1000);
3485 
3486 		if (retval != 0) {
3487 			xpt_print(periph->path, "scsi_ata_read_log() failed!");
3488 			free(ata_zone, M_SCSIDA);
3489 			daprobedone(periph, start_ccb);
3490 			break;
3491 		}
3492 		start_ccb->ccb_h.ccb_bp = NULL;
3493 		start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_ATA_ZONE;
3494 		xpt_action(start_ccb);
3495 
3496 		break;
3497 	}
3498 	case DA_STATE_PROBE_ZONE:
3499 	{
3500 		struct scsi_vpd_zoned_bdc *bdc;
3501 
3502 		/*
3503 		 * Note that this page will be supported for SCSI protocol
3504 		 * devices that support ZBC (SMR devices), as well as ATA
3505 		 * protocol devices that are behind a SAT (SCSI to ATA
3506 		 * Translation) layer that supports converting ZBC commands
3507 		 * to their ZAC equivalents.
3508 		 */
3509 		if (!scsi_vpd_supported_page(periph, SVPD_ZONED_BDC)) {
3510 			daprobedone(periph, start_ccb);
3511 			break;
3512 		}
3513 		bdc = (struct scsi_vpd_zoned_bdc *)
3514 			malloc(sizeof(*bdc), M_SCSIDA, M_NOWAIT|M_ZERO);
3515 
3516 		if (bdc == NULL) {
3517 			xpt_release_ccb(start_ccb);
3518 			xpt_print(periph->path, "Couldn't malloc zone VPD "
3519 			    "data\n");
3520 			break;
3521 		}
3522 		scsi_inquiry(&start_ccb->csio,
3523 			     /*retries*/da_retry_count,
3524 			     /*cbfcnp*/dadone,
3525 			     /*tag_action*/MSG_SIMPLE_Q_TAG,
3526 			     /*inq_buf*/(u_int8_t *)bdc,
3527 			     /*inq_len*/sizeof(*bdc),
3528 			     /*evpd*/TRUE,
3529 			     /*page_code*/SVPD_ZONED_BDC,
3530 			     /*sense_len*/SSD_FULL_SIZE,
3531 			     /*timeout*/da_default_timeout * 1000);
3532 		start_ccb->ccb_h.ccb_bp = NULL;
3533 		start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_ZONE;
3534 		xpt_action(start_ccb);
3535 		break;
3536 	}
3537 	}
3538 }
3539 
3540 /*
3541  * In each of the methods below, while its the caller's
3542  * responsibility to ensure the request will fit into a
3543  * single device request, we might have changed the delete
3544  * method due to the device incorrectly advertising either
3545  * its supported methods or limits.
3546  *
3547  * To prevent this causing further issues we validate the
3548  * against the methods limits, and warn which would
3549  * otherwise be unnecessary.
3550  */
3551 static void
3552 da_delete_unmap(struct cam_periph *periph, union ccb *ccb, struct bio *bp)
3553 {
3554 	struct da_softc *softc = (struct da_softc *)periph->softc;;
3555 	struct bio *bp1;
3556 	uint8_t *buf = softc->unmap_buf;
3557 	struct scsi_unmap_desc *d = (void *)&buf[UNMAP_HEAD_SIZE];
3558 	uint64_t lba, lastlba = (uint64_t)-1;
3559 	uint64_t totalcount = 0;
3560 	uint64_t count;
3561 	uint32_t c, lastcount = 0, ranges = 0;
3562 
3563 	/*
3564 	 * Currently this doesn't take the UNMAP
3565 	 * Granularity and Granularity Alignment
3566 	 * fields into account.
3567 	 *
3568 	 * This could result in both unoptimal unmap
3569 	 * requests as as well as UNMAP calls unmapping
3570 	 * fewer LBA's than requested.
3571 	 */
3572 
3573 	bzero(softc->unmap_buf, sizeof(softc->unmap_buf));
3574 	bp1 = bp;
3575 	do {
3576 		/*
3577 		 * Note: ada and da are different in how they store the
3578 		 * pending bp's in a trim. ada stores all of them in the
3579 		 * trim_req.bps. da stores all but the first one in the
3580 		 * delete_run_queue. ada then completes all the bps in
3581 		 * its adadone() loop. da completes all the bps in the
3582 		 * delete_run_queue in dadone, and relies on the biodone
3583 		 * after to complete. This should be reconciled since there's
3584 		 * no real reason to do it differently. XXX
3585 		 */
3586 		if (bp1 != bp)
3587 			bioq_insert_tail(&softc->delete_run_queue, bp1);
3588 		lba = bp1->bio_pblkno;
3589 		count = bp1->bio_bcount / softc->params.secsize;
3590 
3591 		/* Try to extend the previous range. */
3592 		if (lba == lastlba) {
3593 			c = omin(count, UNMAP_RANGE_MAX - lastcount);
3594 			lastlba += c;
3595 			lastcount += c;
3596 			scsi_ulto4b(lastcount, d[ranges - 1].length);
3597 			count -= c;
3598 			lba += c;
3599 			totalcount += c;
3600 		} else if ((softc->quirks & DA_Q_STRICT_UNMAP) &&
3601 		    softc->unmap_gran != 0) {
3602 			/* Align length of the previous range. */
3603 			if ((c = lastcount % softc->unmap_gran) != 0) {
3604 				if (lastcount <= c) {
3605 					totalcount -= lastcount;
3606 					lastlba = (uint64_t)-1;
3607 					lastcount = 0;
3608 					ranges--;
3609 				} else {
3610 					totalcount -= c;
3611 					lastlba -= c;
3612 					lastcount -= c;
3613 					scsi_ulto4b(lastcount, d[ranges - 1].length);
3614 				}
3615 			}
3616 			/* Align beginning of the new range. */
3617 			c = (lba - softc->unmap_gran_align) % softc->unmap_gran;
3618 			if (c != 0) {
3619 				c = softc->unmap_gran - c;
3620 				if (count <= c) {
3621 					count = 0;
3622 				} else {
3623 					lba += c;
3624 					count -= c;
3625 				}
3626 			}
3627 		}
3628 
3629 		while (count > 0) {
3630 			c = omin(count, UNMAP_RANGE_MAX);
3631 			if (totalcount + c > softc->unmap_max_lba ||
3632 			    ranges >= softc->unmap_max_ranges) {
3633 				xpt_print(periph->path,
3634 				    "%s issuing short delete %ld > %ld"
3635 				    "|| %d >= %d",
3636 				    da_delete_method_desc[softc->delete_method],
3637 				    totalcount + c, softc->unmap_max_lba,
3638 				    ranges, softc->unmap_max_ranges);
3639 				break;
3640 			}
3641 			scsi_u64to8b(lba, d[ranges].lba);
3642 			scsi_ulto4b(c, d[ranges].length);
3643 			lba += c;
3644 			totalcount += c;
3645 			ranges++;
3646 			count -= c;
3647 			lastlba = lba;
3648 			lastcount = c;
3649 		}
3650 		bp1 = cam_iosched_next_trim(softc->cam_iosched);
3651 		if (bp1 == NULL)
3652 			break;
3653 		if (ranges >= softc->unmap_max_ranges ||
3654 		    totalcount + bp1->bio_bcount /
3655 		    softc->params.secsize > softc->unmap_max_lba) {
3656 			cam_iosched_put_back_trim(softc->cam_iosched, bp1);
3657 			break;
3658 		}
3659 	} while (1);
3660 
3661 	/* Align length of the last range. */
3662 	if ((softc->quirks & DA_Q_STRICT_UNMAP) && softc->unmap_gran != 0 &&
3663 	    (c = lastcount % softc->unmap_gran) != 0) {
3664 		if (lastcount <= c)
3665 			ranges--;
3666 		else
3667 			scsi_ulto4b(lastcount - c, d[ranges - 1].length);
3668 	}
3669 
3670 	scsi_ulto2b(ranges * 16 + 6, &buf[0]);
3671 	scsi_ulto2b(ranges * 16, &buf[2]);
3672 
3673 	scsi_unmap(&ccb->csio,
3674 		   /*retries*/da_retry_count,
3675 		   /*cbfcnp*/dadone,
3676 		   /*tag_action*/MSG_SIMPLE_Q_TAG,
3677 		   /*byte2*/0,
3678 		   /*data_ptr*/ buf,
3679 		   /*dxfer_len*/ ranges * 16 + 8,
3680 		   /*sense_len*/SSD_FULL_SIZE,
3681 		   da_default_timeout * 1000);
3682 	ccb->ccb_h.ccb_state = DA_CCB_DELETE;
3683 	ccb->ccb_h.flags |= CAM_UNLOCKED;
3684 	cam_iosched_submit_trim(softc->cam_iosched);
3685 }
3686 
3687 static void
3688 da_delete_trim(struct cam_periph *periph, union ccb *ccb, struct bio *bp)
3689 {
3690 	struct da_softc *softc = (struct da_softc *)periph->softc;
3691 	struct bio *bp1;
3692 	uint8_t *buf = softc->unmap_buf;
3693 	uint64_t lastlba = (uint64_t)-1;
3694 	uint64_t count;
3695 	uint64_t lba;
3696 	uint32_t lastcount = 0, c, requestcount;
3697 	int ranges = 0, off, block_count;
3698 
3699 	bzero(softc->unmap_buf, sizeof(softc->unmap_buf));
3700 	bp1 = bp;
3701 	do {
3702 		if (bp1 != bp)//XXX imp XXX
3703 			bioq_insert_tail(&softc->delete_run_queue, bp1);
3704 		lba = bp1->bio_pblkno;
3705 		count = bp1->bio_bcount / softc->params.secsize;
3706 		requestcount = count;
3707 
3708 		/* Try to extend the previous range. */
3709 		if (lba == lastlba) {
3710 			c = omin(count, ATA_DSM_RANGE_MAX - lastcount);
3711 			lastcount += c;
3712 			off = (ranges - 1) * 8;
3713 			buf[off + 6] = lastcount & 0xff;
3714 			buf[off + 7] = (lastcount >> 8) & 0xff;
3715 			count -= c;
3716 			lba += c;
3717 		}
3718 
3719 		while (count > 0) {
3720 			c = omin(count, ATA_DSM_RANGE_MAX);
3721 			off = ranges * 8;
3722 
3723 			buf[off + 0] = lba & 0xff;
3724 			buf[off + 1] = (lba >> 8) & 0xff;
3725 			buf[off + 2] = (lba >> 16) & 0xff;
3726 			buf[off + 3] = (lba >> 24) & 0xff;
3727 			buf[off + 4] = (lba >> 32) & 0xff;
3728 			buf[off + 5] = (lba >> 40) & 0xff;
3729 			buf[off + 6] = c & 0xff;
3730 			buf[off + 7] = (c >> 8) & 0xff;
3731 			lba += c;
3732 			ranges++;
3733 			count -= c;
3734 			lastcount = c;
3735 			if (count != 0 && ranges == softc->trim_max_ranges) {
3736 				xpt_print(periph->path,
3737 				    "%s issuing short delete %ld > %ld\n",
3738 				    da_delete_method_desc[softc->delete_method],
3739 				    requestcount,
3740 				    (softc->trim_max_ranges - ranges) *
3741 				    ATA_DSM_RANGE_MAX);
3742 				break;
3743 			}
3744 		}
3745 		lastlba = lba;
3746 		bp1 = cam_iosched_next_trim(softc->cam_iosched);
3747 		if (bp1 == NULL)
3748 			break;
3749 		if (bp1->bio_bcount / softc->params.secsize >
3750 		    (softc->trim_max_ranges - ranges) * ATA_DSM_RANGE_MAX) {
3751 			cam_iosched_put_back_trim(softc->cam_iosched, bp1);
3752 			break;
3753 		}
3754 	} while (1);
3755 
3756 	block_count = howmany(ranges, ATA_DSM_BLK_RANGES);
3757 	scsi_ata_trim(&ccb->csio,
3758 		      /*retries*/da_retry_count,
3759 		      /*cbfcnp*/dadone,
3760 		      /*tag_action*/MSG_SIMPLE_Q_TAG,
3761 		      block_count,
3762 		      /*data_ptr*/buf,
3763 		      /*dxfer_len*/block_count * ATA_DSM_BLK_SIZE,
3764 		      /*sense_len*/SSD_FULL_SIZE,
3765 		      da_default_timeout * 1000);
3766 	ccb->ccb_h.ccb_state = DA_CCB_DELETE;
3767 	ccb->ccb_h.flags |= CAM_UNLOCKED;
3768 	cam_iosched_submit_trim(softc->cam_iosched);
3769 }
3770 
3771 /*
3772  * We calculate ws_max_blks here based off d_delmaxsize instead
3773  * of using softc->ws_max_blks as it is absolute max for the
3774  * device not the protocol max which may well be lower.
3775  */
3776 static void
3777 da_delete_ws(struct cam_periph *periph, union ccb *ccb, struct bio *bp)
3778 {
3779 	struct da_softc *softc;
3780 	struct bio *bp1;
3781 	uint64_t ws_max_blks;
3782 	uint64_t lba;
3783 	uint64_t count; /* forward compat with WS32 */
3784 
3785 	softc = (struct da_softc *)periph->softc;
3786 	ws_max_blks = softc->disk->d_delmaxsize / softc->params.secsize;
3787 	lba = bp->bio_pblkno;
3788 	count = 0;
3789 	bp1 = bp;
3790 	do {
3791 		if (bp1 != bp)//XXX imp XXX
3792 			bioq_insert_tail(&softc->delete_run_queue, bp1);
3793 		count += bp1->bio_bcount / softc->params.secsize;
3794 		if (count > ws_max_blks) {
3795 			xpt_print(periph->path,
3796 			    "%s issuing short delete %ld > %ld\n",
3797 			    da_delete_method_desc[softc->delete_method],
3798 			    count, ws_max_blks);
3799 			count = omin(count, ws_max_blks);
3800 			break;
3801 		}
3802 		bp1 = cam_iosched_next_trim(softc->cam_iosched);
3803 		if (bp1 == NULL)
3804 			break;
3805 		if (lba + count != bp1->bio_pblkno ||
3806 		    count + bp1->bio_bcount /
3807 		    softc->params.secsize > ws_max_blks) {
3808 			cam_iosched_put_back_trim(softc->cam_iosched, bp1);
3809 			break;
3810 		}
3811 	} while (1);
3812 
3813 	scsi_write_same(&ccb->csio,
3814 			/*retries*/da_retry_count,
3815 			/*cbfcnp*/dadone,
3816 			/*tag_action*/MSG_SIMPLE_Q_TAG,
3817 			/*byte2*/softc->delete_method ==
3818 			    DA_DELETE_ZERO ? 0 : SWS_UNMAP,
3819 			softc->delete_method == DA_DELETE_WS16 ? 16 : 10,
3820 			/*lba*/lba,
3821 			/*block_count*/count,
3822 			/*data_ptr*/ __DECONST(void *, zero_region),
3823 			/*dxfer_len*/ softc->params.secsize,
3824 			/*sense_len*/SSD_FULL_SIZE,
3825 			da_default_timeout * 1000);
3826 	ccb->ccb_h.ccb_state = DA_CCB_DELETE;
3827 	ccb->ccb_h.flags |= CAM_UNLOCKED;
3828 	cam_iosched_submit_trim(softc->cam_iosched);
3829 }
3830 
3831 static int
3832 cmd6workaround(union ccb *ccb)
3833 {
3834 	struct scsi_rw_6 cmd6;
3835 	struct scsi_rw_10 *cmd10;
3836 	struct da_softc *softc;
3837 	u_int8_t *cdb;
3838 	struct bio *bp;
3839 	int frozen;
3840 
3841 	cdb = ccb->csio.cdb_io.cdb_bytes;
3842 	softc = (struct da_softc *)xpt_path_periph(ccb->ccb_h.path)->softc;
3843 
3844 	if (ccb->ccb_h.ccb_state == DA_CCB_DELETE) {
3845 		da_delete_methods old_method = softc->delete_method;
3846 
3847 		/*
3848 		 * Typically there are two reasons for failure here
3849 		 * 1. Delete method was detected as supported but isn't
3850 		 * 2. Delete failed due to invalid params e.g. too big
3851 		 *
3852 		 * While we will attempt to choose an alternative delete method
3853 		 * this may result in short deletes if the existing delete
3854 		 * requests from geom are big for the new method chosen.
3855 		 *
3856 		 * This method assumes that the error which triggered this
3857 		 * will not retry the io otherwise a panic will occur
3858 		 */
3859 		dadeleteflag(softc, old_method, 0);
3860 		dadeletemethodchoose(softc, DA_DELETE_DISABLE);
3861 		if (softc->delete_method == DA_DELETE_DISABLE)
3862 			xpt_print(ccb->ccb_h.path,
3863 				  "%s failed, disabling BIO_DELETE\n",
3864 				  da_delete_method_desc[old_method]);
3865 		else
3866 			xpt_print(ccb->ccb_h.path,
3867 				  "%s failed, switching to %s BIO_DELETE\n",
3868 				  da_delete_method_desc[old_method],
3869 				  da_delete_method_desc[softc->delete_method]);
3870 
3871 		while ((bp = bioq_takefirst(&softc->delete_run_queue)) != NULL)
3872 			cam_iosched_queue_work(softc->cam_iosched, bp);
3873 		cam_iosched_queue_work(softc->cam_iosched,
3874 		    (struct bio *)ccb->ccb_h.ccb_bp);
3875 		ccb->ccb_h.ccb_bp = NULL;
3876 		return (0);
3877 	}
3878 
3879 	/* Detect unsupported PREVENT ALLOW MEDIUM REMOVAL. */
3880 	if ((ccb->ccb_h.flags & CAM_CDB_POINTER) == 0 &&
3881 	    (*cdb == PREVENT_ALLOW) &&
3882 	    (softc->quirks & DA_Q_NO_PREVENT) == 0) {
3883 		if (bootverbose)
3884 			xpt_print(ccb->ccb_h.path,
3885 			    "PREVENT ALLOW MEDIUM REMOVAL not supported.\n");
3886 		softc->quirks |= DA_Q_NO_PREVENT;
3887 		return (0);
3888 	}
3889 
3890 	/* Detect unsupported SYNCHRONIZE CACHE(10). */
3891 	if ((ccb->ccb_h.flags & CAM_CDB_POINTER) == 0 &&
3892 	    (*cdb == SYNCHRONIZE_CACHE) &&
3893 	    (softc->quirks & DA_Q_NO_SYNC_CACHE) == 0) {
3894 		if (bootverbose)
3895 			xpt_print(ccb->ccb_h.path,
3896 			    "SYNCHRONIZE CACHE(10) not supported.\n");
3897 		softc->quirks |= DA_Q_NO_SYNC_CACHE;
3898 		softc->disk->d_flags &= ~DISKFLAG_CANFLUSHCACHE;
3899 		return (0);
3900 	}
3901 
3902 	/* Translation only possible if CDB is an array and cmd is R/W6 */
3903 	if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0 ||
3904 	    (*cdb != READ_6 && *cdb != WRITE_6))
3905 		return 0;
3906 
3907 	xpt_print(ccb->ccb_h.path, "READ(6)/WRITE(6) not supported, "
3908 	    "increasing minimum_cmd_size to 10.\n");
3909  	softc->minimum_cmd_size = 10;
3910 
3911 	bcopy(cdb, &cmd6, sizeof(struct scsi_rw_6));
3912 	cmd10 = (struct scsi_rw_10 *)cdb;
3913 	cmd10->opcode = (cmd6.opcode == READ_6) ? READ_10 : WRITE_10;
3914 	cmd10->byte2 = 0;
3915 	scsi_ulto4b(scsi_3btoul(cmd6.addr), cmd10->addr);
3916 	cmd10->reserved = 0;
3917 	scsi_ulto2b(cmd6.length, cmd10->length);
3918 	cmd10->control = cmd6.control;
3919 	ccb->csio.cdb_len = sizeof(*cmd10);
3920 
3921 	/* Requeue request, unfreezing queue if necessary */
3922 	frozen = (ccb->ccb_h.status & CAM_DEV_QFRZN) != 0;
3923  	ccb->ccb_h.status = CAM_REQUEUE_REQ;
3924 	xpt_action(ccb);
3925 	if (frozen) {
3926 		cam_release_devq(ccb->ccb_h.path,
3927 				 /*relsim_flags*/0,
3928 				 /*reduction*/0,
3929 				 /*timeout*/0,
3930 				 /*getcount_only*/0);
3931 	}
3932 	return (ERESTART);
3933 }
3934 
3935 static void
3936 dazonedone(struct cam_periph *periph, union ccb *ccb)
3937 {
3938 	struct da_softc *softc;
3939 	struct bio *bp;
3940 
3941 	softc = periph->softc;
3942 	bp = (struct bio *)ccb->ccb_h.ccb_bp;
3943 
3944 	switch (bp->bio_zone.zone_cmd) {
3945 	case DISK_ZONE_OPEN:
3946 	case DISK_ZONE_CLOSE:
3947 	case DISK_ZONE_FINISH:
3948 	case DISK_ZONE_RWP:
3949 		break;
3950 	case DISK_ZONE_REPORT_ZONES: {
3951 		uint32_t avail_len;
3952 		struct disk_zone_report *rep;
3953 		struct scsi_report_zones_hdr *hdr;
3954 		struct scsi_report_zones_desc *desc;
3955 		struct disk_zone_rep_entry *entry;
3956 		uint32_t num_alloced, hdr_len, num_avail;
3957 		uint32_t num_to_fill, i;
3958 		int ata;
3959 
3960 		rep = &bp->bio_zone.zone_params.report;
3961 		avail_len = ccb->csio.dxfer_len - ccb->csio.resid;
3962 		/*
3963 		 * Note that bio_resid isn't normally used for zone
3964 		 * commands, but it is used by devstat_end_transaction_bio()
3965 		 * to determine how much data was transferred.  Because
3966 		 * the size of the SCSI/ATA data structures is different
3967 		 * than the size of the BIO interface structures, the
3968 		 * amount of data actually transferred from the drive will
3969 		 * be different than the amount of data transferred to
3970 		 * the user.
3971 		 */
3972 		bp->bio_resid = ccb->csio.resid;
3973 		num_alloced = rep->entries_allocated;
3974 		hdr = (struct scsi_report_zones_hdr *)ccb->csio.data_ptr;
3975 		if (avail_len < sizeof(*hdr)) {
3976 			/*
3977 			 * Is there a better error than EIO here?  We asked
3978 			 * for at least the header, and we got less than
3979 			 * that.
3980 			 */
3981 			bp->bio_error = EIO;
3982 			bp->bio_flags |= BIO_ERROR;
3983 			bp->bio_resid = bp->bio_bcount;
3984 			break;
3985 		}
3986 
3987 		if (softc->zone_interface == DA_ZONE_IF_ATA_PASS)
3988 			ata = 1;
3989 		else
3990 			ata = 0;
3991 
3992 		hdr_len = ata ? le32dec(hdr->length) :
3993 				scsi_4btoul(hdr->length);
3994 		if (hdr_len > 0)
3995 			rep->entries_available = hdr_len / sizeof(*desc);
3996 		else
3997 			rep->entries_available = 0;
3998 		/*
3999 		 * NOTE: using the same values for the BIO version of the
4000 		 * same field as the SCSI/ATA values.  This means we could
4001 		 * get some additional values that aren't defined in bio.h
4002 		 * if more values of the same field are defined later.
4003 		 */
4004 		rep->header.same = hdr->byte4 & SRZ_SAME_MASK;
4005 		rep->header.maximum_lba = ata ?  le64dec(hdr->maximum_lba) :
4006 					  scsi_8btou64(hdr->maximum_lba);
4007 		/*
4008 		 * If the drive reports no entries that match the query,
4009 		 * we're done.
4010 		 */
4011 		if (hdr_len == 0) {
4012 			rep->entries_filled = 0;
4013 			break;
4014 		}
4015 
4016 		num_avail = min((avail_len - sizeof(*hdr)) / sizeof(*desc),
4017 				hdr_len / sizeof(*desc));
4018 		/*
4019 		 * If the drive didn't return any data, then we're done.
4020 		 */
4021 		if (num_avail == 0) {
4022 			rep->entries_filled = 0;
4023 			break;
4024 		}
4025 
4026 		num_to_fill = min(num_avail, rep->entries_allocated);
4027 		/*
4028 		 * If the user didn't allocate any entries for us to fill,
4029 		 * we're done.
4030 		 */
4031 		if (num_to_fill == 0) {
4032 			rep->entries_filled = 0;
4033 			break;
4034 		}
4035 
4036 		for (i = 0, desc = &hdr->desc_list[0], entry=&rep->entries[0];
4037 		     i < num_to_fill; i++, desc++, entry++) {
4038 			/*
4039 			 * NOTE: we're mapping the values here directly
4040 			 * from the SCSI/ATA bit definitions to the bio.h
4041 			 * definitons.  There is also a warning in
4042 			 * disk_zone.h, but the impact is that if
4043 			 * additional values are added in the SCSI/ATA
4044 			 * specs these will be visible to consumers of
4045 			 * this interface.
4046 			 */
4047 			entry->zone_type = desc->zone_type & SRZ_TYPE_MASK;
4048 			entry->zone_condition =
4049 			    (desc->zone_flags & SRZ_ZONE_COND_MASK) >>
4050 			    SRZ_ZONE_COND_SHIFT;
4051 			entry->zone_flags |= desc->zone_flags &
4052 			    (SRZ_ZONE_NON_SEQ|SRZ_ZONE_RESET);
4053 			entry->zone_length =
4054 			    ata ? le64dec(desc->zone_length) :
4055 				  scsi_8btou64(desc->zone_length);
4056 			entry->zone_start_lba =
4057 			    ata ? le64dec(desc->zone_start_lba) :
4058 				  scsi_8btou64(desc->zone_start_lba);
4059 			entry->write_pointer_lba =
4060 			    ata ? le64dec(desc->write_pointer_lba) :
4061 				  scsi_8btou64(desc->write_pointer_lba);
4062 		}
4063 		rep->entries_filled = num_to_fill;
4064 		break;
4065 	}
4066 	case DISK_ZONE_GET_PARAMS:
4067 	default:
4068 		/*
4069 		 * In theory we should not get a GET_PARAMS bio, since it
4070 		 * should be handled without queueing the command to the
4071 		 * drive.
4072 		 */
4073 		panic("%s: Invalid zone command %d", __func__,
4074 		    bp->bio_zone.zone_cmd);
4075 		break;
4076 	}
4077 
4078 	if (bp->bio_zone.zone_cmd == DISK_ZONE_REPORT_ZONES)
4079 		free(ccb->csio.data_ptr, M_SCSIDA);
4080 }
4081 
4082 static void
4083 dadone(struct cam_periph *periph, union ccb *done_ccb)
4084 {
4085 	struct da_softc *softc;
4086 	struct ccb_scsiio *csio;
4087 	u_int32_t  priority;
4088 	da_ccb_state state;
4089 
4090 	softc = (struct da_softc *)periph->softc;
4091 	priority = done_ccb->ccb_h.pinfo.priority;
4092 
4093 	CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dadone\n"));
4094 
4095 	csio = &done_ccb->csio;
4096 #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING)
4097 	if (csio->bio != NULL)
4098 		biotrack(csio->bio, __func__);
4099 #endif
4100 	state = csio->ccb_h.ccb_state & DA_CCB_TYPE_MASK;
4101 	switch (state) {
4102 	case DA_CCB_BUFFER_IO:
4103 	case DA_CCB_DELETE:
4104 	{
4105 		struct bio *bp, *bp1;
4106 
4107 		cam_periph_lock(periph);
4108 		bp = (struct bio *)done_ccb->ccb_h.ccb_bp;
4109 		if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
4110 			int error;
4111 			int sf;
4112 
4113 			if ((csio->ccb_h.ccb_state & DA_CCB_RETRY_UA) != 0)
4114 				sf = SF_RETRY_UA;
4115 			else
4116 				sf = 0;
4117 
4118 			error = daerror(done_ccb, CAM_RETRY_SELTO, sf);
4119 			if (error == ERESTART) {
4120 				/*
4121 				 * A retry was scheduled, so
4122 				 * just return.
4123 				 */
4124 				cam_periph_unlock(periph);
4125 				return;
4126 			}
4127 			bp = (struct bio *)done_ccb->ccb_h.ccb_bp;
4128 			if (error != 0) {
4129 				int queued_error;
4130 
4131 				/*
4132 				 * return all queued I/O with EIO, so that
4133 				 * the client can retry these I/Os in the
4134 				 * proper order should it attempt to recover.
4135 				 */
4136 				queued_error = EIO;
4137 
4138 				if (error == ENXIO
4139 				 && (softc->flags & DA_FLAG_PACK_INVALID)== 0) {
4140 					/*
4141 					 * Catastrophic error.  Mark our pack as
4142 					 * invalid.
4143 					 */
4144 					/*
4145 					 * XXX See if this is really a media
4146 					 * XXX change first?
4147 					 */
4148 					xpt_print(periph->path,
4149 					    "Invalidating pack\n");
4150 					softc->flags |= DA_FLAG_PACK_INVALID;
4151 #ifdef CAM_IO_STATS
4152 					softc->invalidations++;
4153 #endif
4154 					queued_error = ENXIO;
4155 				}
4156 				cam_iosched_flush(softc->cam_iosched, NULL,
4157 					   queued_error);
4158 				if (bp != NULL) {
4159 					bp->bio_error = error;
4160 					bp->bio_resid = bp->bio_bcount;
4161 					bp->bio_flags |= BIO_ERROR;
4162 				}
4163 			} else if (bp != NULL) {
4164 				if (state == DA_CCB_DELETE)
4165 					bp->bio_resid = 0;
4166 				else
4167 					bp->bio_resid = csio->resid;
4168 				bp->bio_error = 0;
4169 				if (bp->bio_resid != 0)
4170 					bp->bio_flags |= BIO_ERROR;
4171 			}
4172 			if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
4173 				cam_release_devq(done_ccb->ccb_h.path,
4174 						 /*relsim_flags*/0,
4175 						 /*reduction*/0,
4176 						 /*timeout*/0,
4177 						 /*getcount_only*/0);
4178 		} else if (bp != NULL) {
4179 			if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
4180 				panic("REQ_CMP with QFRZN");
4181 			if (bp->bio_cmd == BIO_ZONE)
4182 				dazonedone(periph, done_ccb);
4183 			else if (state == DA_CCB_DELETE)
4184 				bp->bio_resid = 0;
4185 			else
4186 				bp->bio_resid = csio->resid;
4187 			if ((csio->resid > 0)
4188 			 && (bp->bio_cmd != BIO_ZONE))
4189 				bp->bio_flags |= BIO_ERROR;
4190 			if (softc->error_inject != 0) {
4191 				bp->bio_error = softc->error_inject;
4192 				bp->bio_resid = bp->bio_bcount;
4193 				bp->bio_flags |= BIO_ERROR;
4194 				softc->error_inject = 0;
4195 			}
4196 		}
4197 
4198 		if (bp != NULL)
4199 			biotrack(bp, __func__);
4200 		LIST_REMOVE(&done_ccb->ccb_h, periph_links.le);
4201 		if (LIST_EMPTY(&softc->pending_ccbs))
4202 			softc->flags |= DA_FLAG_WAS_OTAG;
4203 
4204 		/*
4205 		 * We need to call cam_iosched before we call biodone so that we
4206 		 * don't measure any activity that happens in the completion
4207 		 * routine, which in the case of sendfile can be quite
4208 		 * extensive.
4209 		 */
4210 		cam_iosched_bio_complete(softc->cam_iosched, bp, done_ccb);
4211 		xpt_release_ccb(done_ccb);
4212 		if (state == DA_CCB_DELETE) {
4213 			TAILQ_HEAD(, bio) queue;
4214 
4215 			TAILQ_INIT(&queue);
4216 			TAILQ_CONCAT(&queue, &softc->delete_run_queue.queue, bio_queue);
4217 			softc->delete_run_queue.insert_point = NULL;
4218 			/*
4219 			 * Normally, the xpt_release_ccb() above would make sure
4220 			 * that when we have more work to do, that work would
4221 			 * get kicked off. However, we specifically keep
4222 			 * delete_running set to 0 before the call above to
4223 			 * allow other I/O to progress when many BIO_DELETE
4224 			 * requests are pushed down. We set delete_running to 0
4225 			 * and call daschedule again so that we don't stall if
4226 			 * there are no other I/Os pending apart from BIO_DELETEs.
4227 			 */
4228 			cam_iosched_trim_done(softc->cam_iosched);
4229 			daschedule(periph);
4230 			cam_periph_unlock(periph);
4231 			while ((bp1 = TAILQ_FIRST(&queue)) != NULL) {
4232 				TAILQ_REMOVE(&queue, bp1, bio_queue);
4233 				bp1->bio_error = bp->bio_error;
4234 				if (bp->bio_flags & BIO_ERROR) {
4235 					bp1->bio_flags |= BIO_ERROR;
4236 					bp1->bio_resid = bp1->bio_bcount;
4237 				} else
4238 					bp1->bio_resid = 0;
4239 				biodone(bp1);
4240 			}
4241 		} else {
4242 			daschedule(periph);
4243 			cam_periph_unlock(periph);
4244 		}
4245 		if (bp != NULL)
4246 			biodone(bp);
4247 		return;
4248 	}
4249 	case DA_CCB_PROBE_RC:
4250 	case DA_CCB_PROBE_RC16:
4251 	{
4252 		struct	   scsi_read_capacity_data *rdcap;
4253 		struct     scsi_read_capacity_data_long *rcaplong;
4254 		char	   *announce_buf;
4255 		int	   lbp;
4256 
4257 		lbp = 0;
4258 		rdcap = NULL;
4259 		rcaplong = NULL;
4260 		/* XXX TODO: can this be a malloc? */
4261 		announce_buf = softc->announce_temp;
4262 		bzero(announce_buf, DA_ANNOUNCETMP_SZ);
4263 
4264 		if (state == DA_CCB_PROBE_RC)
4265 			rdcap =(struct scsi_read_capacity_data *)csio->data_ptr;
4266 		else
4267 			rcaplong = (struct scsi_read_capacity_data_long *)
4268 				csio->data_ptr;
4269 
4270 		if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
4271 			struct disk_params *dp;
4272 			uint32_t block_size;
4273 			uint64_t maxsector;
4274 			u_int lalba;	/* Lowest aligned LBA. */
4275 
4276 			if (state == DA_CCB_PROBE_RC) {
4277 				block_size = scsi_4btoul(rdcap->length);
4278 				maxsector = scsi_4btoul(rdcap->addr);
4279 				lalba = 0;
4280 
4281 				/*
4282 				 * According to SBC-2, if the standard 10
4283 				 * byte READ CAPACITY command returns 2^32,
4284 				 * we should issue the 16 byte version of
4285 				 * the command, since the device in question
4286 				 * has more sectors than can be represented
4287 				 * with the short version of the command.
4288 				 */
4289 				if (maxsector == 0xffffffff) {
4290 					free(rdcap, M_SCSIDA);
4291 					xpt_release_ccb(done_ccb);
4292 					softc->state = DA_STATE_PROBE_RC16;
4293 					xpt_schedule(periph, priority);
4294 					return;
4295 				}
4296 			} else {
4297 				block_size = scsi_4btoul(rcaplong->length);
4298 				maxsector = scsi_8btou64(rcaplong->addr);
4299 				lalba = scsi_2btoul(rcaplong->lalba_lbp);
4300 			}
4301 
4302 			/*
4303 			 * Because GEOM code just will panic us if we
4304 			 * give them an 'illegal' value we'll avoid that
4305 			 * here.
4306 			 */
4307 			if (block_size == 0) {
4308 				block_size = 512;
4309 				if (maxsector == 0)
4310 					maxsector = -1;
4311 			}
4312 			if (block_size >= MAXPHYS) {
4313 				xpt_print(periph->path,
4314 				    "unsupportable block size %ju\n",
4315 				    (uintmax_t) block_size);
4316 				announce_buf = NULL;
4317 				cam_periph_invalidate(periph);
4318 			} else {
4319 				/*
4320 				 * We pass rcaplong into dasetgeom(),
4321 				 * because it will only use it if it is
4322 				 * non-NULL.
4323 				 */
4324 				dasetgeom(periph, block_size, maxsector,
4325 					  rcaplong, sizeof(*rcaplong));
4326 				lbp = (lalba & SRC16_LBPME_A);
4327 				dp = &softc->params;
4328 				snprintf(announce_buf, DA_ANNOUNCETMP_SZ,
4329 				    "%juMB (%ju %u byte sectors)",
4330 				    ((uintmax_t)dp->secsize * dp->sectors) /
4331 				     (1024 * 1024),
4332 				    (uintmax_t)dp->sectors, dp->secsize);
4333 			}
4334 		} else {
4335 			int	error;
4336 
4337 			/*
4338 			 * Retry any UNIT ATTENTION type errors.  They
4339 			 * are expected at boot.
4340 			 */
4341 			error = daerror(done_ccb, CAM_RETRY_SELTO,
4342 					SF_RETRY_UA|SF_NO_PRINT);
4343 			if (error == ERESTART) {
4344 				/*
4345 				 * A retry was scheuled, so
4346 				 * just return.
4347 				 */
4348 				return;
4349 			} else if (error != 0) {
4350 				int asc, ascq;
4351 				int sense_key, error_code;
4352 				int have_sense;
4353 				cam_status status;
4354 				struct ccb_getdev cgd;
4355 
4356 				/* Don't wedge this device's queue */
4357 				status = done_ccb->ccb_h.status;
4358 				if ((status & CAM_DEV_QFRZN) != 0)
4359 					cam_release_devq(done_ccb->ccb_h.path,
4360 							 /*relsim_flags*/0,
4361 							 /*reduction*/0,
4362 							 /*timeout*/0,
4363 							 /*getcount_only*/0);
4364 
4365 
4366 				xpt_setup_ccb(&cgd.ccb_h,
4367 					      done_ccb->ccb_h.path,
4368 					      CAM_PRIORITY_NORMAL);
4369 				cgd.ccb_h.func_code = XPT_GDEV_TYPE;
4370 				xpt_action((union ccb *)&cgd);
4371 
4372 				if (scsi_extract_sense_ccb(done_ccb,
4373 				    &error_code, &sense_key, &asc, &ascq))
4374 					have_sense = TRUE;
4375 				else
4376 					have_sense = FALSE;
4377 
4378 				/*
4379 				 * If we tried READ CAPACITY(16) and failed,
4380 				 * fallback to READ CAPACITY(10).
4381 				 */
4382 				if ((state == DA_CCB_PROBE_RC16) &&
4383 				    (softc->flags & DA_FLAG_CAN_RC16) &&
4384 				    (((csio->ccb_h.status & CAM_STATUS_MASK) ==
4385 					CAM_REQ_INVALID) ||
4386 				     ((have_sense) &&
4387 				      (error_code == SSD_CURRENT_ERROR) &&
4388 				      (sense_key == SSD_KEY_ILLEGAL_REQUEST)))) {
4389 					softc->flags &= ~DA_FLAG_CAN_RC16;
4390 					free(rdcap, M_SCSIDA);
4391 					xpt_release_ccb(done_ccb);
4392 					softc->state = DA_STATE_PROBE_RC;
4393 					xpt_schedule(periph, priority);
4394 					return;
4395 				}
4396 
4397 				/*
4398 				 * Attach to anything that claims to be a
4399 				 * direct access or optical disk device,
4400 				 * as long as it doesn't return a "Logical
4401 				 * unit not supported" (0x25) error.
4402 				 * "Internal Target Failure" (0x44) is also
4403 				 * special and typically means that the
4404 				 * device is a SATA drive behind a SATL
4405 				 * translation that's fallen into a
4406 				 * terminally fatal state.
4407 				 */
4408 				if ((have_sense)
4409 				 && (asc != 0x25) && (asc != 0x44)
4410 				 && (error_code == SSD_CURRENT_ERROR)) {
4411 					const char *sense_key_desc;
4412 					const char *asc_desc;
4413 
4414 					dasetgeom(periph, 512, -1, NULL, 0);
4415 					scsi_sense_desc(sense_key, asc, ascq,
4416 							&cgd.inq_data,
4417 							&sense_key_desc,
4418 							&asc_desc);
4419 					snprintf(announce_buf,
4420 					    DA_ANNOUNCETMP_SZ,
4421 					    "Attempt to query device "
4422 					    "size failed: %s, %s",
4423 					    sense_key_desc, asc_desc);
4424 				} else {
4425 					if (have_sense)
4426 						scsi_sense_print(
4427 							&done_ccb->csio);
4428 					else {
4429 						xpt_print(periph->path,
4430 						    "got CAM status %#x\n",
4431 						    done_ccb->ccb_h.status);
4432 					}
4433 
4434 					xpt_print(periph->path, "fatal error, "
4435 					    "failed to attach to device\n");
4436 
4437 					announce_buf = NULL;
4438 
4439 					/*
4440 					 * Free up resources.
4441 					 */
4442 					cam_periph_invalidate(periph);
4443 				}
4444 			}
4445 		}
4446 		free(csio->data_ptr, M_SCSIDA);
4447 		if (announce_buf != NULL &&
4448 		    ((softc->flags & DA_FLAG_ANNOUNCED) == 0)) {
4449 			struct sbuf sb;
4450 
4451 			sbuf_new(&sb, softc->announcebuf, DA_ANNOUNCE_SZ,
4452 			    SBUF_FIXEDLEN);
4453 			xpt_announce_periph_sbuf(periph, &sb, announce_buf);
4454 			xpt_announce_quirks_sbuf(periph, &sb, softc->quirks,
4455 			    DA_Q_BIT_STRING);
4456 			sbuf_finish(&sb);
4457 			sbuf_putbuf(&sb);
4458 
4459 			/*
4460 			 * Create our sysctl variables, now that we know
4461 			 * we have successfully attached.
4462 			 */
4463 			/* increase the refcount */
4464 			if (cam_periph_acquire(periph) == CAM_REQ_CMP) {
4465 
4466 				taskqueue_enqueue(taskqueue_thread,
4467 						  &softc->sysctl_task);
4468 			} else {
4469 				/* XXX This message is useless! */
4470 				xpt_print(periph->path, "fatal error, "
4471 				    "could not acquire reference count\n");
4472 			}
4473 		}
4474 
4475 		/* We already probed the device. */
4476 		if (softc->flags & DA_FLAG_PROBED) {
4477 			daprobedone(periph, done_ccb);
4478 			return;
4479 		}
4480 
4481 		/* Ensure re-probe doesn't see old delete. */
4482 		softc->delete_available = 0;
4483 		dadeleteflag(softc, DA_DELETE_ZERO, 1);
4484 		if (lbp && (softc->quirks & DA_Q_NO_UNMAP) == 0) {
4485 			/*
4486 			 * Based on older SBC-3 spec revisions
4487 			 * any of the UNMAP methods "may" be
4488 			 * available via LBP given this flag so
4489 			 * we flag all of them as available and
4490 			 * then remove those which further
4491 			 * probes confirm aren't available
4492 			 * later.
4493 			 *
4494 			 * We could also check readcap(16) p_type
4495 			 * flag to exclude one or more invalid
4496 			 * write same (X) types here
4497 			 */
4498 			dadeleteflag(softc, DA_DELETE_WS16, 1);
4499 			dadeleteflag(softc, DA_DELETE_WS10, 1);
4500 			dadeleteflag(softc, DA_DELETE_UNMAP, 1);
4501 
4502 			xpt_release_ccb(done_ccb);
4503 			softc->state = DA_STATE_PROBE_LBP;
4504 			xpt_schedule(periph, priority);
4505 			return;
4506 		}
4507 
4508 		xpt_release_ccb(done_ccb);
4509 		softc->state = DA_STATE_PROBE_BDC;
4510 		xpt_schedule(periph, priority);
4511 		return;
4512 	}
4513 	case DA_CCB_PROBE_LBP:
4514 	{
4515 		struct scsi_vpd_logical_block_prov *lbp;
4516 
4517 		lbp = (struct scsi_vpd_logical_block_prov *)csio->data_ptr;
4518 
4519 		if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
4520 			/*
4521 			 * T10/1799-D Revision 31 states at least one of these
4522 			 * must be supported but we don't currently enforce this.
4523 			 */
4524 			dadeleteflag(softc, DA_DELETE_WS16,
4525 				     (lbp->flags & SVPD_LBP_WS16));
4526 			dadeleteflag(softc, DA_DELETE_WS10,
4527 				     (lbp->flags & SVPD_LBP_WS10));
4528 			dadeleteflag(softc, DA_DELETE_UNMAP,
4529 				     (lbp->flags & SVPD_LBP_UNMAP));
4530 		} else {
4531 			int error;
4532 			error = daerror(done_ccb, CAM_RETRY_SELTO,
4533 					SF_RETRY_UA|SF_NO_PRINT);
4534 			if (error == ERESTART)
4535 				return;
4536 			else if (error != 0) {
4537 				if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
4538 					/* Don't wedge this device's queue */
4539 					cam_release_devq(done_ccb->ccb_h.path,
4540 							 /*relsim_flags*/0,
4541 							 /*reduction*/0,
4542 							 /*timeout*/0,
4543 							 /*getcount_only*/0);
4544 				}
4545 
4546 				/*
4547 				 * Failure indicates we don't support any SBC-3
4548 				 * delete methods with UNMAP
4549 				 */
4550 			}
4551 		}
4552 
4553 		free(lbp, M_SCSIDA);
4554 		xpt_release_ccb(done_ccb);
4555 		softc->state = DA_STATE_PROBE_BLK_LIMITS;
4556 		xpt_schedule(periph, priority);
4557 		return;
4558 	}
4559 	case DA_CCB_PROBE_BLK_LIMITS:
4560 	{
4561 		struct scsi_vpd_block_limits *block_limits;
4562 
4563 		block_limits = (struct scsi_vpd_block_limits *)csio->data_ptr;
4564 
4565 		if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
4566 			uint32_t max_txfer_len = scsi_4btoul(
4567 				block_limits->max_txfer_len);
4568 			uint32_t max_unmap_lba_cnt = scsi_4btoul(
4569 				block_limits->max_unmap_lba_cnt);
4570 			uint32_t max_unmap_blk_cnt = scsi_4btoul(
4571 				block_limits->max_unmap_blk_cnt);
4572 			uint32_t unmap_gran = scsi_4btoul(
4573 				block_limits->opt_unmap_grain);
4574 			uint32_t unmap_gran_align = scsi_4btoul(
4575 				block_limits->unmap_grain_align);
4576 			uint64_t ws_max_blks = scsi_8btou64(
4577 				block_limits->max_write_same_length);
4578 
4579 			if (max_txfer_len != 0) {
4580 				softc->disk->d_maxsize = MIN(softc->maxio,
4581 				    (off_t)max_txfer_len * softc->params.secsize);
4582 			}
4583 
4584 			/*
4585 			 * We should already support UNMAP but we check lba
4586 			 * and block count to be sure
4587 			 */
4588 			if (max_unmap_lba_cnt != 0x00L &&
4589 			    max_unmap_blk_cnt != 0x00L) {
4590 				softc->unmap_max_lba = max_unmap_lba_cnt;
4591 				softc->unmap_max_ranges = min(max_unmap_blk_cnt,
4592 					UNMAP_MAX_RANGES);
4593 				if (unmap_gran > 1) {
4594 					softc->unmap_gran = unmap_gran;
4595 					if (unmap_gran_align & 0x80000000) {
4596 						softc->unmap_gran_align =
4597 						    unmap_gran_align &
4598 						    0x7fffffff;
4599 					}
4600 				}
4601 			} else {
4602 				/*
4603 				 * Unexpected UNMAP limits which means the
4604 				 * device doesn't actually support UNMAP
4605 				 */
4606 				dadeleteflag(softc, DA_DELETE_UNMAP, 0);
4607 			}
4608 
4609 			if (ws_max_blks != 0x00L)
4610 				softc->ws_max_blks = ws_max_blks;
4611 		} else {
4612 			int error;
4613 			error = daerror(done_ccb, CAM_RETRY_SELTO,
4614 					SF_RETRY_UA|SF_NO_PRINT);
4615 			if (error == ERESTART)
4616 				return;
4617 			else if (error != 0) {
4618 				if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
4619 					/* Don't wedge this device's queue */
4620 					cam_release_devq(done_ccb->ccb_h.path,
4621 							 /*relsim_flags*/0,
4622 							 /*reduction*/0,
4623 							 /*timeout*/0,
4624 							 /*getcount_only*/0);
4625 				}
4626 
4627 				/*
4628 				 * Failure here doesn't mean UNMAP is not
4629 				 * supported as this is an optional page.
4630 				 */
4631 				softc->unmap_max_lba = 1;
4632 				softc->unmap_max_ranges = 1;
4633 			}
4634 		}
4635 
4636 		free(block_limits, M_SCSIDA);
4637 		xpt_release_ccb(done_ccb);
4638 		softc->state = DA_STATE_PROBE_BDC;
4639 		xpt_schedule(periph, priority);
4640 		return;
4641 	}
4642 	case DA_CCB_PROBE_BDC:
4643 	{
4644 		struct scsi_vpd_block_device_characteristics *bdc;
4645 
4646 		bdc = (struct scsi_vpd_block_device_characteristics *)
4647 		    csio->data_ptr;
4648 
4649 		if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
4650 			uint32_t valid_len;
4651 
4652 			/*
4653 			 * Disable queue sorting for non-rotational media
4654 			 * by default.
4655 			 */
4656 			u_int16_t old_rate = softc->disk->d_rotation_rate;
4657 
4658 			valid_len = csio->dxfer_len - csio->resid;
4659 			if (SBDC_IS_PRESENT(bdc, valid_len,
4660 			    medium_rotation_rate)) {
4661 				softc->disk->d_rotation_rate =
4662 					scsi_2btoul(bdc->medium_rotation_rate);
4663 				if (softc->disk->d_rotation_rate ==
4664 				    SVPD_BDC_RATE_NON_ROTATING) {
4665 					cam_iosched_set_sort_queue(
4666 					    softc->cam_iosched, 0);
4667 					softc->rotating = 0;
4668 				}
4669 				if (softc->disk->d_rotation_rate != old_rate) {
4670 					disk_attr_changed(softc->disk,
4671 					    "GEOM::rotation_rate", M_NOWAIT);
4672 				}
4673 			}
4674 			if ((SBDC_IS_PRESENT(bdc, valid_len, flags))
4675 			 && (softc->zone_mode == DA_ZONE_NONE)) {
4676 				int ata_proto;
4677 
4678 				if (scsi_vpd_supported_page(periph,
4679 				    SVPD_ATA_INFORMATION))
4680 					ata_proto = 1;
4681 				else
4682 					ata_proto = 0;
4683 
4684 				/*
4685 				 * The Zoned field will only be set for
4686 				 * Drive Managed and Host Aware drives.  If
4687 				 * they are Host Managed, the device type
4688 				 * in the standard INQUIRY data should be
4689 				 * set to T_ZBC_HM (0x14).
4690 				 */
4691 				if ((bdc->flags & SVPD_ZBC_MASK) ==
4692 				     SVPD_HAW_ZBC) {
4693 					softc->zone_mode = DA_ZONE_HOST_AWARE;
4694 					softc->zone_interface = (ata_proto) ?
4695 					   DA_ZONE_IF_ATA_SAT : DA_ZONE_IF_SCSI;
4696 				} else if ((bdc->flags & SVPD_ZBC_MASK) ==
4697 				     SVPD_DM_ZBC) {
4698 					softc->zone_mode =DA_ZONE_DRIVE_MANAGED;
4699 					softc->zone_interface = (ata_proto) ?
4700 					   DA_ZONE_IF_ATA_SAT : DA_ZONE_IF_SCSI;
4701 				} else if ((bdc->flags & SVPD_ZBC_MASK) !=
4702 					  SVPD_ZBC_NR) {
4703 					xpt_print(periph->path, "Unknown zoned "
4704 					    "type %#x",
4705 					    bdc->flags & SVPD_ZBC_MASK);
4706 				}
4707 			}
4708 		} else {
4709 			int error;
4710 			error = daerror(done_ccb, CAM_RETRY_SELTO,
4711 					SF_RETRY_UA|SF_NO_PRINT);
4712 			if (error == ERESTART)
4713 				return;
4714 			else if (error != 0) {
4715 				if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
4716 					/* Don't wedge this device's queue */
4717 					cam_release_devq(done_ccb->ccb_h.path,
4718 							 /*relsim_flags*/0,
4719 							 /*reduction*/0,
4720 							 /*timeout*/0,
4721 							 /*getcount_only*/0);
4722 				}
4723 			}
4724 		}
4725 
4726 		free(bdc, M_SCSIDA);
4727 		xpt_release_ccb(done_ccb);
4728 		softc->state = DA_STATE_PROBE_ATA;
4729 		xpt_schedule(periph, priority);
4730 		return;
4731 	}
4732 	case DA_CCB_PROBE_ATA:
4733 	{
4734 		int i;
4735 		struct ata_params *ata_params;
4736 		int continue_probe;
4737 		int error;
4738 		int16_t *ptr;
4739 
4740 		ata_params = (struct ata_params *)csio->data_ptr;
4741 		ptr = (uint16_t *)ata_params;
4742 		continue_probe = 0;
4743 		error = 0;
4744 
4745 		if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
4746 			uint16_t old_rate;
4747 
4748 			for (i = 0; i < sizeof(*ata_params) / 2; i++)
4749 				ptr[i] = le16toh(ptr[i]);
4750 			if (ata_params->support_dsm & ATA_SUPPORT_DSM_TRIM &&
4751 			    (softc->quirks & DA_Q_NO_UNMAP) == 0) {
4752 				dadeleteflag(softc, DA_DELETE_ATA_TRIM, 1);
4753 				if (ata_params->max_dsm_blocks != 0)
4754 					softc->trim_max_ranges = min(
4755 					  softc->trim_max_ranges,
4756 					  ata_params->max_dsm_blocks *
4757 					  ATA_DSM_BLK_RANGES);
4758 			}
4759 			/*
4760 			 * Disable queue sorting for non-rotational media
4761 			 * by default.
4762 			 */
4763 			old_rate = softc->disk->d_rotation_rate;
4764 			softc->disk->d_rotation_rate =
4765 			    ata_params->media_rotation_rate;
4766 			if (softc->disk->d_rotation_rate ==
4767 			    ATA_RATE_NON_ROTATING) {
4768 				cam_iosched_set_sort_queue(softc->cam_iosched, 0);
4769 				softc->rotating = 0;
4770 			}
4771 			if (softc->disk->d_rotation_rate != old_rate) {
4772 				disk_attr_changed(softc->disk,
4773 				    "GEOM::rotation_rate", M_NOWAIT);
4774 			}
4775 
4776 			if (ata_params->capabilities1 & ATA_SUPPORT_DMA)
4777 				softc->flags |= DA_FLAG_CAN_ATA_DMA;
4778 
4779 			if (ata_params->support.extension &
4780 			    ATA_SUPPORT_GENLOG)
4781 				softc->flags |= DA_FLAG_CAN_ATA_LOG;
4782 
4783 			/*
4784 			 * At this point, if we have a SATA host aware drive,
4785 			 * we communicate via ATA passthrough unless the
4786 			 * SAT layer supports ZBC -> ZAC translation.  In
4787 			 * that case,
4788 			 */
4789 			/*
4790 			 * XXX KDM figure out how to detect a host managed
4791 			 * SATA drive.
4792 			 */
4793 			if (softc->zone_mode == DA_ZONE_NONE) {
4794 				/*
4795 				 * Note that we don't override the zone
4796 				 * mode or interface if it has already been
4797 				 * set.  This is because it has either been
4798 				 * set as a quirk, or when we probed the
4799 				 * SCSI Block Device Characteristics page,
4800 				 * the zoned field was set.  The latter
4801 				 * means that the SAT layer supports ZBC to
4802 				 * ZAC translation, and we would prefer to
4803 				 * use that if it is available.
4804 				 */
4805 				if ((ata_params->support3 &
4806 				    ATA_SUPPORT_ZONE_MASK) ==
4807 				    ATA_SUPPORT_ZONE_HOST_AWARE) {
4808 					softc->zone_mode = DA_ZONE_HOST_AWARE;
4809 					softc->zone_interface =
4810 					    DA_ZONE_IF_ATA_PASS;
4811 				} else if ((ata_params->support3 &
4812 					    ATA_SUPPORT_ZONE_MASK) ==
4813 					    ATA_SUPPORT_ZONE_DEV_MANAGED) {
4814 					softc->zone_mode =DA_ZONE_DRIVE_MANAGED;
4815 					softc->zone_interface =
4816 					    DA_ZONE_IF_ATA_PASS;
4817 				}
4818 			}
4819 
4820 		} else {
4821 			error = daerror(done_ccb, CAM_RETRY_SELTO,
4822 					SF_RETRY_UA|SF_NO_PRINT);
4823 			if (error == ERESTART)
4824 				return;
4825 			else if (error != 0) {
4826 				if ((done_ccb->ccb_h.status &
4827 				     CAM_DEV_QFRZN) != 0) {
4828 					/* Don't wedge this device's queue */
4829 					cam_release_devq(done_ccb->ccb_h.path,
4830 							 /*relsim_flags*/0,
4831 							 /*reduction*/0,
4832 							 /*timeout*/0,
4833 							 /*getcount_only*/0);
4834 				}
4835 			}
4836 		}
4837 
4838 		free(ata_params, M_SCSIDA);
4839 		if ((softc->zone_mode == DA_ZONE_HOST_AWARE)
4840 		 || (softc->zone_mode == DA_ZONE_HOST_MANAGED)) {
4841 			/*
4842 			 * If the ATA IDENTIFY failed, we could be talking
4843 			 * to a SCSI drive, although that seems unlikely,
4844 			 * since the drive did report that it supported the
4845 			 * ATA Information VPD page.  If the ATA IDENTIFY
4846 			 * succeeded, and the SAT layer doesn't support
4847 			 * ZBC -> ZAC translation, continue on to get the
4848 			 * directory of ATA logs, and complete the rest of
4849 			 * the ZAC probe.  If the SAT layer does support
4850 			 * ZBC -> ZAC translation, we want to use that,
4851 			 * and we'll probe the SCSI Zoned Block Device
4852 			 * Characteristics VPD page next.
4853 			 */
4854 			if ((error == 0)
4855 			 && (softc->flags & DA_FLAG_CAN_ATA_LOG)
4856 			 && (softc->zone_interface == DA_ZONE_IF_ATA_PASS))
4857 				softc->state = DA_STATE_PROBE_ATA_LOGDIR;
4858 			else
4859 				softc->state = DA_STATE_PROBE_ZONE;
4860 			continue_probe = 1;
4861 		}
4862 		if (continue_probe != 0) {
4863 			xpt_release_ccb(done_ccb);
4864 			xpt_schedule(periph, priority);
4865 			return;
4866 		} else
4867 			daprobedone(periph, done_ccb);
4868 		return;
4869 	}
4870 	case DA_CCB_PROBE_ATA_LOGDIR:
4871 	{
4872 		int error;
4873 
4874 		if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
4875 			error = 0;
4876 			softc->valid_logdir_len = 0;
4877 			bzero(&softc->ata_logdir, sizeof(softc->ata_logdir));
4878 			softc->valid_logdir_len =
4879 				csio->dxfer_len - csio->resid;
4880 			if (softc->valid_logdir_len > 0)
4881 				bcopy(csio->data_ptr, &softc->ata_logdir,
4882 				    min(softc->valid_logdir_len,
4883 					sizeof(softc->ata_logdir)));
4884 			/*
4885 			 * Figure out whether the Identify Device log is
4886 			 * supported.  The General Purpose log directory
4887 			 * has a header, and lists the number of pages
4888 			 * available for each GP log identified by the
4889 			 * offset into the list.
4890 			 */
4891 			if ((softc->valid_logdir_len >=
4892 			    ((ATA_IDENTIFY_DATA_LOG + 1) * sizeof(uint16_t)))
4893 			 && (le16dec(softc->ata_logdir.header) ==
4894 			     ATA_GP_LOG_DIR_VERSION)
4895 			 && (le16dec(&softc->ata_logdir.num_pages[
4896 			     (ATA_IDENTIFY_DATA_LOG *
4897 			     sizeof(uint16_t)) - sizeof(uint16_t)]) > 0)){
4898 				softc->flags |= DA_FLAG_CAN_ATA_IDLOG;
4899 			} else {
4900 				softc->flags &= ~DA_FLAG_CAN_ATA_IDLOG;
4901 			}
4902 		} else {
4903 			error = daerror(done_ccb, CAM_RETRY_SELTO,
4904 					SF_RETRY_UA|SF_NO_PRINT);
4905 			if (error == ERESTART)
4906 				return;
4907 			else if (error != 0) {
4908 				/*
4909 				 * If we can't get the ATA log directory,
4910 				 * then ATA logs are effectively not
4911 				 * supported even if the bit is set in the
4912 				 * identify data.
4913 				 */
4914 				softc->flags &= ~(DA_FLAG_CAN_ATA_LOG |
4915 						  DA_FLAG_CAN_ATA_IDLOG);
4916 				if ((done_ccb->ccb_h.status &
4917 				     CAM_DEV_QFRZN) != 0) {
4918 					/* Don't wedge this device's queue */
4919 					cam_release_devq(done_ccb->ccb_h.path,
4920 							 /*relsim_flags*/0,
4921 							 /*reduction*/0,
4922 							 /*timeout*/0,
4923 							 /*getcount_only*/0);
4924 				}
4925 			}
4926 		}
4927 
4928 		free(csio->data_ptr, M_SCSIDA);
4929 
4930 		if ((error == 0)
4931 		 && (softc->flags & DA_FLAG_CAN_ATA_IDLOG)) {
4932 			softc->state = DA_STATE_PROBE_ATA_IDDIR;
4933 			xpt_release_ccb(done_ccb);
4934 			xpt_schedule(periph, priority);
4935 			return;
4936 		}
4937 		daprobedone(periph, done_ccb);
4938 		return;
4939 	}
4940 	case DA_CCB_PROBE_ATA_IDDIR:
4941 	{
4942 		int error;
4943 
4944 		if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
4945 			off_t entries_offset, max_entries;
4946 			error = 0;
4947 
4948 			softc->valid_iddir_len = 0;
4949 			bzero(&softc->ata_iddir, sizeof(softc->ata_iddir));
4950 			softc->flags &= ~(DA_FLAG_CAN_ATA_SUPCAP |
4951 					  DA_FLAG_CAN_ATA_ZONE);
4952 			softc->valid_iddir_len =
4953 				csio->dxfer_len - csio->resid;
4954 			if (softc->valid_iddir_len > 0)
4955 				bcopy(csio->data_ptr, &softc->ata_iddir,
4956 				    min(softc->valid_iddir_len,
4957 					sizeof(softc->ata_iddir)));
4958 
4959 			entries_offset =
4960 			    __offsetof(struct ata_identify_log_pages,entries);
4961 			max_entries = softc->valid_iddir_len - entries_offset;
4962 			if ((softc->valid_iddir_len > (entries_offset + 1))
4963 			 && (le64dec(softc->ata_iddir.header) ==
4964 			     ATA_IDLOG_REVISION)
4965 			 && (softc->ata_iddir.entry_count > 0)) {
4966 				int num_entries, i;
4967 
4968 				num_entries = softc->ata_iddir.entry_count;
4969 				num_entries = min(num_entries,
4970 				   softc->valid_iddir_len - entries_offset);
4971 				for (i = 0; i < num_entries &&
4972 				     i < max_entries; i++) {
4973 					if (softc->ata_iddir.entries[i] ==
4974 					    ATA_IDL_SUP_CAP)
4975 						softc->flags |=
4976 						    DA_FLAG_CAN_ATA_SUPCAP;
4977 					else if (softc->ata_iddir.entries[i]==
4978 						 ATA_IDL_ZDI)
4979 						softc->flags |=
4980 						    DA_FLAG_CAN_ATA_ZONE;
4981 
4982 					if ((softc->flags &
4983 					     DA_FLAG_CAN_ATA_SUPCAP)
4984 					 && (softc->flags &
4985 					     DA_FLAG_CAN_ATA_ZONE))
4986 						break;
4987 				}
4988 			}
4989 		} else {
4990 			error = daerror(done_ccb, CAM_RETRY_SELTO,
4991 					SF_RETRY_UA|SF_NO_PRINT);
4992 			if (error == ERESTART)
4993 				return;
4994 			else if (error != 0) {
4995 				/*
4996 				 * If we can't get the ATA Identify Data log
4997 				 * directory, then it effectively isn't
4998 				 * supported even if the ATA Log directory
4999 				 * a non-zero number of pages present for
5000 				 * this log.
5001 				 */
5002 				softc->flags &= ~DA_FLAG_CAN_ATA_IDLOG;
5003 				if ((done_ccb->ccb_h.status &
5004 				     CAM_DEV_QFRZN) != 0) {
5005 					/* Don't wedge this device's queue */
5006 					cam_release_devq(done_ccb->ccb_h.path,
5007 							 /*relsim_flags*/0,
5008 							 /*reduction*/0,
5009 							 /*timeout*/0,
5010 							 /*getcount_only*/0);
5011 				}
5012 			}
5013 		}
5014 
5015 		free(csio->data_ptr, M_SCSIDA);
5016 
5017 		if ((error == 0)
5018 		 && (softc->flags & DA_FLAG_CAN_ATA_SUPCAP)) {
5019 			softc->state = DA_STATE_PROBE_ATA_SUP;
5020 			xpt_release_ccb(done_ccb);
5021 			xpt_schedule(periph, priority);
5022 			return;
5023 		}
5024 		daprobedone(periph, done_ccb);
5025 		return;
5026 	}
5027 	case DA_CCB_PROBE_ATA_SUP:
5028 	{
5029 		int error;
5030 
5031 		if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
5032 			uint32_t valid_len;
5033 			size_t needed_size;
5034 			struct ata_identify_log_sup_cap *sup_cap;
5035 			error = 0;
5036 
5037 			sup_cap = (struct ata_identify_log_sup_cap *)
5038 			    csio->data_ptr;
5039 			valid_len = csio->dxfer_len - csio->resid;
5040 			needed_size =
5041 			    __offsetof(struct ata_identify_log_sup_cap,
5042 			    sup_zac_cap) + 1 + sizeof(sup_cap->sup_zac_cap);
5043 			if (valid_len >= needed_size) {
5044 				uint64_t zoned, zac_cap;
5045 
5046 				zoned = le64dec(sup_cap->zoned_cap);
5047 				if (zoned & ATA_ZONED_VALID) {
5048 					/*
5049 					 * This should have already been
5050 					 * set, because this is also in the
5051 					 * ATA identify data.
5052 					 */
5053 					if ((zoned & ATA_ZONED_MASK) ==
5054 					    ATA_SUPPORT_ZONE_HOST_AWARE)
5055 						softc->zone_mode =
5056 						    DA_ZONE_HOST_AWARE;
5057 					else if ((zoned & ATA_ZONED_MASK) ==
5058 					    ATA_SUPPORT_ZONE_DEV_MANAGED)
5059 						softc->zone_mode =
5060 						    DA_ZONE_DRIVE_MANAGED;
5061 				}
5062 
5063 				zac_cap = le64dec(sup_cap->sup_zac_cap);
5064 				if (zac_cap & ATA_SUP_ZAC_CAP_VALID) {
5065 					if (zac_cap & ATA_REPORT_ZONES_SUP)
5066 						softc->zone_flags |=
5067 						    DA_ZONE_FLAG_RZ_SUP;
5068 					if (zac_cap & ATA_ND_OPEN_ZONE_SUP)
5069 						softc->zone_flags |=
5070 						    DA_ZONE_FLAG_OPEN_SUP;
5071 					if (zac_cap & ATA_ND_CLOSE_ZONE_SUP)
5072 						softc->zone_flags |=
5073 						    DA_ZONE_FLAG_CLOSE_SUP;
5074 					if (zac_cap & ATA_ND_FINISH_ZONE_SUP)
5075 						softc->zone_flags |=
5076 						    DA_ZONE_FLAG_FINISH_SUP;
5077 					if (zac_cap & ATA_ND_RWP_SUP)
5078 						softc->zone_flags |=
5079 						    DA_ZONE_FLAG_RWP_SUP;
5080 				} else {
5081 					/*
5082 					 * This field was introduced in
5083 					 * ACS-4, r08 on April 28th, 2015.
5084 					 * If the drive firmware was written
5085 					 * to an earlier spec, it won't have
5086 					 * the field.  So, assume all
5087 					 * commands are supported.
5088 					 */
5089 					softc->zone_flags |=
5090 					    DA_ZONE_FLAG_SUP_MASK;
5091 				}
5092 
5093 			}
5094 		} else {
5095 			error = daerror(done_ccb, CAM_RETRY_SELTO,
5096 					SF_RETRY_UA|SF_NO_PRINT);
5097 			if (error == ERESTART)
5098 				return;
5099 			else if (error != 0) {
5100 				/*
5101 				 * If we can't get the ATA Identify Data
5102 				 * Supported Capabilities page, clear the
5103 				 * flag...
5104 				 */
5105 				softc->flags &= ~DA_FLAG_CAN_ATA_SUPCAP;
5106 				/*
5107 				 * And clear zone capabilities.
5108 				 */
5109 				softc->zone_flags &= ~DA_ZONE_FLAG_SUP_MASK;
5110 				if ((done_ccb->ccb_h.status &
5111 				     CAM_DEV_QFRZN) != 0) {
5112 					/* Don't wedge this device's queue */
5113 					cam_release_devq(done_ccb->ccb_h.path,
5114 							 /*relsim_flags*/0,
5115 							 /*reduction*/0,
5116 							 /*timeout*/0,
5117 							 /*getcount_only*/0);
5118 				}
5119 			}
5120 		}
5121 
5122 		free(csio->data_ptr, M_SCSIDA);
5123 
5124 		if ((error == 0)
5125 		 && (softc->flags & DA_FLAG_CAN_ATA_ZONE)) {
5126 			softc->state = DA_STATE_PROBE_ATA_ZONE;
5127 			xpt_release_ccb(done_ccb);
5128 			xpt_schedule(periph, priority);
5129 			return;
5130 		}
5131 		daprobedone(periph, done_ccb);
5132 		return;
5133 	}
5134 	case DA_CCB_PROBE_ATA_ZONE:
5135 	{
5136 		int error;
5137 
5138 		if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
5139 			struct ata_zoned_info_log *zi_log;
5140 			uint32_t valid_len;
5141 			size_t needed_size;
5142 
5143 			zi_log = (struct ata_zoned_info_log *)csio->data_ptr;
5144 
5145 			valid_len = csio->dxfer_len - csio->resid;
5146 			needed_size = __offsetof(struct ata_zoned_info_log,
5147 			    version_info) + 1 + sizeof(zi_log->version_info);
5148 			if (valid_len >= needed_size) {
5149 				uint64_t tmpvar;
5150 
5151 				tmpvar = le64dec(zi_log->zoned_cap);
5152 				if (tmpvar & ATA_ZDI_CAP_VALID) {
5153 					if (tmpvar & ATA_ZDI_CAP_URSWRZ)
5154 						softc->zone_flags |=
5155 						    DA_ZONE_FLAG_URSWRZ;
5156 					else
5157 						softc->zone_flags &=
5158 						    ~DA_ZONE_FLAG_URSWRZ;
5159 				}
5160 				tmpvar = le64dec(zi_log->optimal_seq_zones);
5161 				if (tmpvar & ATA_ZDI_OPT_SEQ_VALID) {
5162 					softc->zone_flags |=
5163 					    DA_ZONE_FLAG_OPT_SEQ_SET;
5164 					softc->optimal_seq_zones = (tmpvar &
5165 					    ATA_ZDI_OPT_SEQ_MASK);
5166 				} else {
5167 					softc->zone_flags &=
5168 					    ~DA_ZONE_FLAG_OPT_SEQ_SET;
5169 					softc->optimal_seq_zones = 0;
5170 				}
5171 
5172 				tmpvar =le64dec(zi_log->optimal_nonseq_zones);
5173 				if (tmpvar & ATA_ZDI_OPT_NS_VALID) {
5174 					softc->zone_flags |=
5175 					    DA_ZONE_FLAG_OPT_NONSEQ_SET;
5176 					softc->optimal_nonseq_zones =
5177 					    (tmpvar & ATA_ZDI_OPT_NS_MASK);
5178 				} else {
5179 					softc->zone_flags &=
5180 					    ~DA_ZONE_FLAG_OPT_NONSEQ_SET;
5181 					softc->optimal_nonseq_zones = 0;
5182 				}
5183 
5184 				tmpvar = le64dec(zi_log->max_seq_req_zones);
5185 				if (tmpvar & ATA_ZDI_MAX_SEQ_VALID) {
5186 					softc->zone_flags |=
5187 					    DA_ZONE_FLAG_MAX_SEQ_SET;
5188 					softc->max_seq_zones =
5189 					    (tmpvar & ATA_ZDI_MAX_SEQ_MASK);
5190 				} else {
5191 					softc->zone_flags &=
5192 					    ~DA_ZONE_FLAG_MAX_SEQ_SET;
5193 					softc->max_seq_zones = 0;
5194 				}
5195 			}
5196 		} else {
5197 			error = daerror(done_ccb, CAM_RETRY_SELTO,
5198 					SF_RETRY_UA|SF_NO_PRINT);
5199 			if (error == ERESTART)
5200 				return;
5201 			else if (error != 0) {
5202 				softc->flags &= ~DA_FLAG_CAN_ATA_ZONE;
5203 				softc->flags &= ~DA_ZONE_FLAG_SET_MASK;
5204 
5205 				if ((done_ccb->ccb_h.status &
5206 				     CAM_DEV_QFRZN) != 0) {
5207 					/* Don't wedge this device's queue */
5208 					cam_release_devq(done_ccb->ccb_h.path,
5209 							 /*relsim_flags*/0,
5210 							 /*reduction*/0,
5211 							 /*timeout*/0,
5212 							 /*getcount_only*/0);
5213 				}
5214 			}
5215 
5216 		}
5217 		free(csio->data_ptr, M_SCSIDA);
5218 
5219 		daprobedone(periph, done_ccb);
5220 		return;
5221 	}
5222 	case DA_CCB_PROBE_ZONE:
5223 	{
5224 		int error;
5225 
5226 		if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
5227 			uint32_t valid_len;
5228 			size_t needed_len;
5229 			struct scsi_vpd_zoned_bdc *zoned_bdc;
5230 
5231 			error = 0;
5232 			zoned_bdc = (struct scsi_vpd_zoned_bdc *)
5233 				csio->data_ptr;
5234 			valid_len = csio->dxfer_len - csio->resid;
5235 			needed_len = __offsetof(struct scsi_vpd_zoned_bdc,
5236 			    max_seq_req_zones) + 1 +
5237 			    sizeof(zoned_bdc->max_seq_req_zones);
5238 			if ((valid_len >= needed_len)
5239 			 && (scsi_2btoul(zoned_bdc->page_length) >=
5240 			     SVPD_ZBDC_PL)) {
5241 				if (zoned_bdc->flags & SVPD_ZBDC_URSWRZ)
5242 					softc->zone_flags |=
5243 					    DA_ZONE_FLAG_URSWRZ;
5244 				else
5245 					softc->zone_flags &=
5246 					    ~DA_ZONE_FLAG_URSWRZ;
5247 				softc->optimal_seq_zones =
5248 				    scsi_4btoul(zoned_bdc->optimal_seq_zones);
5249 				softc->zone_flags |= DA_ZONE_FLAG_OPT_SEQ_SET;
5250 				softc->optimal_nonseq_zones = scsi_4btoul(
5251 				    zoned_bdc->optimal_nonseq_zones);
5252 				softc->zone_flags |=
5253 				    DA_ZONE_FLAG_OPT_NONSEQ_SET;
5254 				softc->max_seq_zones =
5255 				    scsi_4btoul(zoned_bdc->max_seq_req_zones);
5256 				softc->zone_flags |= DA_ZONE_FLAG_MAX_SEQ_SET;
5257 			}
5258 			/*
5259 			 * All of the zone commands are mandatory for SCSI
5260 			 * devices.
5261 			 *
5262 			 * XXX KDM this is valid as of September 2015.
5263 			 * Re-check this assumption once the SAT spec is
5264 			 * updated to support SCSI ZBC to ATA ZAC mapping.
5265 			 * Since ATA allows zone commands to be reported
5266 			 * as supported or not, this may not necessarily
5267 			 * be true for an ATA device behind a SAT (SCSI to
5268 			 * ATA Translation) layer.
5269 			 */
5270 			softc->zone_flags |= DA_ZONE_FLAG_SUP_MASK;
5271 		} else {
5272 			error = daerror(done_ccb, CAM_RETRY_SELTO,
5273 					SF_RETRY_UA|SF_NO_PRINT);
5274 			if (error == ERESTART)
5275 				return;
5276 			else if (error != 0) {
5277 				if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
5278 					/* Don't wedge this device's queue */
5279 					cam_release_devq(done_ccb->ccb_h.path,
5280 							 /*relsim_flags*/0,
5281 							 /*reduction*/0,
5282 							 /*timeout*/0,
5283 							 /*getcount_only*/0);
5284 				}
5285 			}
5286 		}
5287 		daprobedone(periph, done_ccb);
5288 		return;
5289 	}
5290 	case DA_CCB_DUMP:
5291 		/* No-op.  We're polling */
5292 		return;
5293 	case DA_CCB_TUR:
5294 	{
5295 		if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
5296 
5297 			if (daerror(done_ccb, CAM_RETRY_SELTO,
5298 			    SF_RETRY_UA | SF_NO_RECOVERY | SF_NO_PRINT) ==
5299 			    ERESTART)
5300 				return;
5301 			if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
5302 				cam_release_devq(done_ccb->ccb_h.path,
5303 						 /*relsim_flags*/0,
5304 						 /*reduction*/0,
5305 						 /*timeout*/0,
5306 						 /*getcount_only*/0);
5307 		}
5308 		xpt_release_ccb(done_ccb);
5309 		cam_periph_release_locked(periph);
5310 		return;
5311 	}
5312 	default:
5313 		break;
5314 	}
5315 	xpt_release_ccb(done_ccb);
5316 }
5317 
5318 static void
5319 dareprobe(struct cam_periph *periph)
5320 {
5321 	struct da_softc	  *softc;
5322 	cam_status status;
5323 
5324 	softc = (struct da_softc *)periph->softc;
5325 
5326 	/* Probe in progress; don't interfere. */
5327 	if (softc->state != DA_STATE_NORMAL)
5328 		return;
5329 
5330 	status = cam_periph_acquire(periph);
5331 	KASSERT(status == CAM_REQ_CMP,
5332 	    ("dareprobe: cam_periph_acquire failed"));
5333 
5334 	if (softc->flags & DA_FLAG_CAN_RC16)
5335 		softc->state = DA_STATE_PROBE_RC16;
5336 	else
5337 		softc->state = DA_STATE_PROBE_RC;
5338 
5339 	xpt_schedule(periph, CAM_PRIORITY_DEV);
5340 }
5341 
5342 static int
5343 daerror(union ccb *ccb, u_int32_t cam_flags, u_int32_t sense_flags)
5344 {
5345 	struct da_softc	  *softc;
5346 	struct cam_periph *periph;
5347 	int error, error_code, sense_key, asc, ascq;
5348 
5349 #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING)
5350 	if (ccb->csio.bio != NULL)
5351 		biotrack(ccb->csio.bio, __func__);
5352 #endif
5353 
5354 	periph = xpt_path_periph(ccb->ccb_h.path);
5355 	softc = (struct da_softc *)periph->softc;
5356 
5357  	/*
5358 	 * Automatically detect devices that do not support
5359  	 * READ(6)/WRITE(6) and upgrade to using 10 byte cdbs.
5360  	 */
5361 	error = 0;
5362 	if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INVALID) {
5363 		error = cmd6workaround(ccb);
5364 	} else if (scsi_extract_sense_ccb(ccb,
5365 	    &error_code, &sense_key, &asc, &ascq)) {
5366 		if (sense_key == SSD_KEY_ILLEGAL_REQUEST)
5367  			error = cmd6workaround(ccb);
5368 		/*
5369 		 * If the target replied with CAPACITY DATA HAS CHANGED UA,
5370 		 * query the capacity and notify upper layers.
5371 		 */
5372 		else if (sense_key == SSD_KEY_UNIT_ATTENTION &&
5373 		    asc == 0x2A && ascq == 0x09) {
5374 			xpt_print(periph->path, "Capacity data has changed\n");
5375 			softc->flags &= ~DA_FLAG_PROBED;
5376 			dareprobe(periph);
5377 			sense_flags |= SF_NO_PRINT;
5378 		} else if (sense_key == SSD_KEY_UNIT_ATTENTION &&
5379 		    asc == 0x28 && ascq == 0x00) {
5380 			softc->flags &= ~DA_FLAG_PROBED;
5381 			disk_media_changed(softc->disk, M_NOWAIT);
5382 		} else if (sense_key == SSD_KEY_UNIT_ATTENTION &&
5383 		    asc == 0x3F && ascq == 0x03) {
5384 			xpt_print(periph->path, "INQUIRY data has changed\n");
5385 			softc->flags &= ~DA_FLAG_PROBED;
5386 			dareprobe(periph);
5387 			sense_flags |= SF_NO_PRINT;
5388 		} else if (sense_key == SSD_KEY_NOT_READY &&
5389 		    asc == 0x3a && (softc->flags & DA_FLAG_PACK_INVALID) == 0) {
5390 			softc->flags |= DA_FLAG_PACK_INVALID;
5391 			disk_media_gone(softc->disk, M_NOWAIT);
5392 		}
5393 	}
5394 	if (error == ERESTART)
5395 		return (ERESTART);
5396 
5397 #ifdef CAM_IO_STATS
5398 	switch (ccb->ccb_h.status & CAM_STATUS_MASK) {
5399 	case CAM_CMD_TIMEOUT:
5400 		softc->timeouts++;
5401 		break;
5402 	case CAM_REQ_ABORTED:
5403 	case CAM_REQ_CMP_ERR:
5404 	case CAM_REQ_TERMIO:
5405 	case CAM_UNREC_HBA_ERROR:
5406 	case CAM_DATA_RUN_ERR:
5407 		softc->errors++;
5408 		break;
5409 	default:
5410 		break;
5411 	}
5412 #endif
5413 
5414 	/*
5415 	 * XXX
5416 	 * Until we have a better way of doing pack validation,
5417 	 * don't treat UAs as errors.
5418 	 */
5419 	sense_flags |= SF_RETRY_UA;
5420 
5421 	if (softc->quirks & DA_Q_RETRY_BUSY)
5422 		sense_flags |= SF_RETRY_BUSY;
5423 	return(cam_periph_error(ccb, cam_flags, sense_flags,
5424 				&softc->saved_ccb));
5425 }
5426 
5427 static void
5428 damediapoll(void *arg)
5429 {
5430 	struct cam_periph *periph = arg;
5431 	struct da_softc *softc = periph->softc;
5432 
5433 	if (!cam_iosched_has_work_flags(softc->cam_iosched, DA_WORK_TUR) &&
5434 	    LIST_EMPTY(&softc->pending_ccbs)) {
5435 		if (cam_periph_acquire(periph) == CAM_REQ_CMP) {
5436 			cam_iosched_set_work_flags(softc->cam_iosched, DA_WORK_TUR);
5437 			daschedule(periph);
5438 		}
5439 	}
5440 	/* Queue us up again */
5441 	if (da_poll_period != 0)
5442 		callout_schedule(&softc->mediapoll_c, da_poll_period * hz);
5443 }
5444 
5445 static void
5446 daprevent(struct cam_periph *periph, int action)
5447 {
5448 	struct	da_softc *softc;
5449 	union	ccb *ccb;
5450 	int	error;
5451 
5452 	softc = (struct da_softc *)periph->softc;
5453 
5454 	if (((action == PR_ALLOW)
5455 	  && (softc->flags & DA_FLAG_PACK_LOCKED) == 0)
5456 	 || ((action == PR_PREVENT)
5457 	  && (softc->flags & DA_FLAG_PACK_LOCKED) != 0)) {
5458 		return;
5459 	}
5460 
5461 	ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL);
5462 
5463 	scsi_prevent(&ccb->csio,
5464 		     /*retries*/1,
5465 		     /*cbcfp*/dadone,
5466 		     MSG_SIMPLE_Q_TAG,
5467 		     action,
5468 		     SSD_FULL_SIZE,
5469 		     5000);
5470 
5471 	error = cam_periph_runccb(ccb, daerror, CAM_RETRY_SELTO,
5472 	    SF_RETRY_UA | SF_NO_PRINT, softc->disk->d_devstat);
5473 
5474 	if (error == 0) {
5475 		if (action == PR_ALLOW)
5476 			softc->flags &= ~DA_FLAG_PACK_LOCKED;
5477 		else
5478 			softc->flags |= DA_FLAG_PACK_LOCKED;
5479 	}
5480 
5481 	xpt_release_ccb(ccb);
5482 }
5483 
5484 static void
5485 dasetgeom(struct cam_periph *periph, uint32_t block_len, uint64_t maxsector,
5486 	  struct scsi_read_capacity_data_long *rcaplong, size_t rcap_len)
5487 {
5488 	struct ccb_calc_geometry ccg;
5489 	struct da_softc *softc;
5490 	struct disk_params *dp;
5491 	u_int lbppbe, lalba;
5492 	int error;
5493 
5494 	softc = (struct da_softc *)periph->softc;
5495 
5496 	dp = &softc->params;
5497 	dp->secsize = block_len;
5498 	dp->sectors = maxsector + 1;
5499 	if (rcaplong != NULL) {
5500 		lbppbe = rcaplong->prot_lbppbe & SRC16_LBPPBE;
5501 		lalba = scsi_2btoul(rcaplong->lalba_lbp);
5502 		lalba &= SRC16_LALBA_A;
5503 	} else {
5504 		lbppbe = 0;
5505 		lalba = 0;
5506 	}
5507 
5508 	if (lbppbe > 0) {
5509 		dp->stripesize = block_len << lbppbe;
5510 		dp->stripeoffset = (dp->stripesize - block_len * lalba) %
5511 		    dp->stripesize;
5512 	} else if (softc->quirks & DA_Q_4K) {
5513 		dp->stripesize = 4096;
5514 		dp->stripeoffset = 0;
5515 	} else if (softc->unmap_gran != 0) {
5516 		dp->stripesize = block_len * softc->unmap_gran;
5517 		dp->stripeoffset = (dp->stripesize - block_len *
5518 		    softc->unmap_gran_align) % dp->stripesize;
5519 	} else {
5520 		dp->stripesize = 0;
5521 		dp->stripeoffset = 0;
5522 	}
5523 	/*
5524 	 * Have the controller provide us with a geometry
5525 	 * for this disk.  The only time the geometry
5526 	 * matters is when we boot and the controller
5527 	 * is the only one knowledgeable enough to come
5528 	 * up with something that will make this a bootable
5529 	 * device.
5530 	 */
5531 	xpt_setup_ccb(&ccg.ccb_h, periph->path, CAM_PRIORITY_NORMAL);
5532 	ccg.ccb_h.func_code = XPT_CALC_GEOMETRY;
5533 	ccg.block_size = dp->secsize;
5534 	ccg.volume_size = dp->sectors;
5535 	ccg.heads = 0;
5536 	ccg.secs_per_track = 0;
5537 	ccg.cylinders = 0;
5538 	xpt_action((union ccb*)&ccg);
5539 	if ((ccg.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
5540 		/*
5541 		 * We don't know what went wrong here- but just pick
5542 		 * a geometry so we don't have nasty things like divide
5543 		 * by zero.
5544 		 */
5545 		dp->heads = 255;
5546 		dp->secs_per_track = 255;
5547 		dp->cylinders = dp->sectors / (255 * 255);
5548 		if (dp->cylinders == 0) {
5549 			dp->cylinders = 1;
5550 		}
5551 	} else {
5552 		dp->heads = ccg.heads;
5553 		dp->secs_per_track = ccg.secs_per_track;
5554 		dp->cylinders = ccg.cylinders;
5555 	}
5556 
5557 	/*
5558 	 * If the user supplied a read capacity buffer, and if it is
5559 	 * different than the previous buffer, update the data in the EDT.
5560 	 * If it's the same, we don't bother.  This avoids sending an
5561 	 * update every time someone opens this device.
5562 	 */
5563 	if ((rcaplong != NULL)
5564 	 && (bcmp(rcaplong, &softc->rcaplong,
5565 		  min(sizeof(softc->rcaplong), rcap_len)) != 0)) {
5566 		struct ccb_dev_advinfo cdai;
5567 
5568 		xpt_setup_ccb(&cdai.ccb_h, periph->path, CAM_PRIORITY_NORMAL);
5569 		cdai.ccb_h.func_code = XPT_DEV_ADVINFO;
5570 		cdai.buftype = CDAI_TYPE_RCAPLONG;
5571 		cdai.flags = CDAI_FLAG_STORE;
5572 		cdai.bufsiz = rcap_len;
5573 		cdai.buf = (uint8_t *)rcaplong;
5574 		xpt_action((union ccb *)&cdai);
5575 		if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0)
5576 			cam_release_devq(cdai.ccb_h.path, 0, 0, 0, FALSE);
5577 		if (cdai.ccb_h.status != CAM_REQ_CMP) {
5578 			xpt_print(periph->path, "%s: failed to set read "
5579 				  "capacity advinfo\n", __func__);
5580 			/* Use cam_error_print() to decode the status */
5581 			cam_error_print((union ccb *)&cdai, CAM_ESF_CAM_STATUS,
5582 					CAM_EPF_ALL);
5583 		} else {
5584 			bcopy(rcaplong, &softc->rcaplong,
5585 			      min(sizeof(softc->rcaplong), rcap_len));
5586 		}
5587 	}
5588 
5589 	softc->disk->d_sectorsize = softc->params.secsize;
5590 	softc->disk->d_mediasize = softc->params.secsize * (off_t)softc->params.sectors;
5591 	softc->disk->d_stripesize = softc->params.stripesize;
5592 	softc->disk->d_stripeoffset = softc->params.stripeoffset;
5593 	/* XXX: these are not actually "firmware" values, so they may be wrong */
5594 	softc->disk->d_fwsectors = softc->params.secs_per_track;
5595 	softc->disk->d_fwheads = softc->params.heads;
5596 	softc->disk->d_devstat->block_size = softc->params.secsize;
5597 	softc->disk->d_devstat->flags &= ~DEVSTAT_BS_UNAVAILABLE;
5598 
5599 	error = disk_resize(softc->disk, M_NOWAIT);
5600 	if (error != 0)
5601 		xpt_print(periph->path, "disk_resize(9) failed, error = %d\n", error);
5602 }
5603 
5604 static void
5605 dasendorderedtag(void *arg)
5606 {
5607 	struct da_softc *softc = arg;
5608 
5609 	if (da_send_ordered) {
5610 		if (!LIST_EMPTY(&softc->pending_ccbs)) {
5611 			if ((softc->flags & DA_FLAG_WAS_OTAG) == 0)
5612 				softc->flags |= DA_FLAG_NEED_OTAG;
5613 			softc->flags &= ~DA_FLAG_WAS_OTAG;
5614 		}
5615 	}
5616 	/* Queue us up again */
5617 	callout_reset(&softc->sendordered_c,
5618 	    (da_default_timeout * hz) / DA_ORDEREDTAG_INTERVAL,
5619 	    dasendorderedtag, softc);
5620 }
5621 
5622 /*
5623  * Step through all DA peripheral drivers, and if the device is still open,
5624  * sync the disk cache to physical media.
5625  */
5626 static void
5627 dashutdown(void * arg, int howto)
5628 {
5629 	struct cam_periph *periph;
5630 	struct da_softc *softc;
5631 	union ccb *ccb;
5632 	int error;
5633 
5634 	CAM_PERIPH_FOREACH(periph, &dadriver) {
5635 		softc = (struct da_softc *)periph->softc;
5636 		if (SCHEDULER_STOPPED()) {
5637 			/* If we paniced with the lock held, do not recurse. */
5638 			if (!cam_periph_owned(periph) &&
5639 			    (softc->flags & DA_FLAG_OPEN)) {
5640 				dadump(softc->disk, NULL, 0, 0, 0);
5641 			}
5642 			continue;
5643 		}
5644 		cam_periph_lock(periph);
5645 
5646 		/*
5647 		 * We only sync the cache if the drive is still open, and
5648 		 * if the drive is capable of it..
5649 		 */
5650 		if (((softc->flags & DA_FLAG_OPEN) == 0)
5651 		 || (softc->quirks & DA_Q_NO_SYNC_CACHE)) {
5652 			cam_periph_unlock(periph);
5653 			continue;
5654 		}
5655 
5656 		ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL);
5657 		scsi_synchronize_cache(&ccb->csio,
5658 				       /*retries*/0,
5659 				       /*cbfcnp*/dadone,
5660 				       MSG_SIMPLE_Q_TAG,
5661 				       /*begin_lba*/0, /* whole disk */
5662 				       /*lb_count*/0,
5663 				       SSD_FULL_SIZE,
5664 				       60 * 60 * 1000);
5665 
5666 		error = cam_periph_runccb(ccb, daerror, /*cam_flags*/0,
5667 		    /*sense_flags*/ SF_NO_RECOVERY | SF_NO_RETRY | SF_QUIET_IR,
5668 		    softc->disk->d_devstat);
5669 		if (error != 0)
5670 			xpt_print(periph->path, "Synchronize cache failed\n");
5671 		xpt_release_ccb(ccb);
5672 		cam_periph_unlock(periph);
5673 	}
5674 }
5675 
5676 #else /* !_KERNEL */
5677 
5678 /*
5679  * XXX These are only left out of the kernel build to silence warnings.  If,
5680  * for some reason these functions are used in the kernel, the ifdefs should
5681  * be moved so they are included both in the kernel and userland.
5682  */
5683 void
5684 scsi_format_unit(struct ccb_scsiio *csio, u_int32_t retries,
5685 		 void (*cbfcnp)(struct cam_periph *, union ccb *),
5686 		 u_int8_t tag_action, u_int8_t byte2, u_int16_t ileave,
5687 		 u_int8_t *data_ptr, u_int32_t dxfer_len, u_int8_t sense_len,
5688 		 u_int32_t timeout)
5689 {
5690 	struct scsi_format_unit *scsi_cmd;
5691 
5692 	scsi_cmd = (struct scsi_format_unit *)&csio->cdb_io.cdb_bytes;
5693 	scsi_cmd->opcode = FORMAT_UNIT;
5694 	scsi_cmd->byte2 = byte2;
5695 	scsi_ulto2b(ileave, scsi_cmd->interleave);
5696 
5697 	cam_fill_csio(csio,
5698 		      retries,
5699 		      cbfcnp,
5700 		      /*flags*/ (dxfer_len > 0) ? CAM_DIR_OUT : CAM_DIR_NONE,
5701 		      tag_action,
5702 		      data_ptr,
5703 		      dxfer_len,
5704 		      sense_len,
5705 		      sizeof(*scsi_cmd),
5706 		      timeout);
5707 }
5708 
5709 void
5710 scsi_read_defects(struct ccb_scsiio *csio, uint32_t retries,
5711 		  void (*cbfcnp)(struct cam_periph *, union ccb *),
5712 		  uint8_t tag_action, uint8_t list_format,
5713 		  uint32_t addr_desc_index, uint8_t *data_ptr,
5714 		  uint32_t dxfer_len, int minimum_cmd_size,
5715 		  uint8_t sense_len, uint32_t timeout)
5716 {
5717 	uint8_t cdb_len;
5718 
5719 	/*
5720 	 * These conditions allow using the 10 byte command.  Otherwise we
5721 	 * need to use the 12 byte command.
5722 	 */
5723 	if ((minimum_cmd_size <= 10)
5724 	 && (addr_desc_index == 0)
5725 	 && (dxfer_len <= SRDD10_MAX_LENGTH)) {
5726 		struct scsi_read_defect_data_10 *cdb10;
5727 
5728 		cdb10 = (struct scsi_read_defect_data_10 *)
5729 			&csio->cdb_io.cdb_bytes;
5730 
5731 		cdb_len = sizeof(*cdb10);
5732 		bzero(cdb10, cdb_len);
5733                 cdb10->opcode = READ_DEFECT_DATA_10;
5734                 cdb10->format = list_format;
5735                 scsi_ulto2b(dxfer_len, cdb10->alloc_length);
5736 	} else {
5737 		struct scsi_read_defect_data_12 *cdb12;
5738 
5739 		cdb12 = (struct scsi_read_defect_data_12 *)
5740 			&csio->cdb_io.cdb_bytes;
5741 
5742 		cdb_len = sizeof(*cdb12);
5743 		bzero(cdb12, cdb_len);
5744                 cdb12->opcode = READ_DEFECT_DATA_12;
5745                 cdb12->format = list_format;
5746                 scsi_ulto4b(dxfer_len, cdb12->alloc_length);
5747 		scsi_ulto4b(addr_desc_index, cdb12->address_descriptor_index);
5748 	}
5749 
5750 	cam_fill_csio(csio,
5751 		      retries,
5752 		      cbfcnp,
5753 		      /*flags*/ CAM_DIR_IN,
5754 		      tag_action,
5755 		      data_ptr,
5756 		      dxfer_len,
5757 		      sense_len,
5758 		      cdb_len,
5759 		      timeout);
5760 }
5761 
5762 void
5763 scsi_sanitize(struct ccb_scsiio *csio, u_int32_t retries,
5764 	      void (*cbfcnp)(struct cam_periph *, union ccb *),
5765 	      u_int8_t tag_action, u_int8_t byte2, u_int16_t control,
5766 	      u_int8_t *data_ptr, u_int32_t dxfer_len, u_int8_t sense_len,
5767 	      u_int32_t timeout)
5768 {
5769 	struct scsi_sanitize *scsi_cmd;
5770 
5771 	scsi_cmd = (struct scsi_sanitize *)&csio->cdb_io.cdb_bytes;
5772 	scsi_cmd->opcode = SANITIZE;
5773 	scsi_cmd->byte2 = byte2;
5774 	scsi_cmd->control = control;
5775 	scsi_ulto2b(dxfer_len, scsi_cmd->length);
5776 
5777 	cam_fill_csio(csio,
5778 		      retries,
5779 		      cbfcnp,
5780 		      /*flags*/ (dxfer_len > 0) ? CAM_DIR_OUT : CAM_DIR_NONE,
5781 		      tag_action,
5782 		      data_ptr,
5783 		      dxfer_len,
5784 		      sense_len,
5785 		      sizeof(*scsi_cmd),
5786 		      timeout);
5787 }
5788 
5789 #endif /* _KERNEL */
5790 
5791 void
5792 scsi_zbc_out(struct ccb_scsiio *csio, uint32_t retries,
5793 	     void (*cbfcnp)(struct cam_periph *, union ccb *),
5794 	     uint8_t tag_action, uint8_t service_action, uint64_t zone_id,
5795 	     uint8_t zone_flags, uint8_t *data_ptr, uint32_t dxfer_len,
5796 	     uint8_t sense_len, uint32_t timeout)
5797 {
5798 	struct scsi_zbc_out *scsi_cmd;
5799 
5800 	scsi_cmd = (struct scsi_zbc_out *)&csio->cdb_io.cdb_bytes;
5801 	scsi_cmd->opcode = ZBC_OUT;
5802 	scsi_cmd->service_action = service_action;
5803 	scsi_u64to8b(zone_id, scsi_cmd->zone_id);
5804 	scsi_cmd->zone_flags = zone_flags;
5805 
5806 	cam_fill_csio(csio,
5807 		      retries,
5808 		      cbfcnp,
5809 		      /*flags*/ (dxfer_len > 0) ? CAM_DIR_OUT : CAM_DIR_NONE,
5810 		      tag_action,
5811 		      data_ptr,
5812 		      dxfer_len,
5813 		      sense_len,
5814 		      sizeof(*scsi_cmd),
5815 		      timeout);
5816 }
5817 
5818 void
5819 scsi_zbc_in(struct ccb_scsiio *csio, uint32_t retries,
5820 	    void (*cbfcnp)(struct cam_periph *, union ccb *),
5821 	    uint8_t tag_action, uint8_t service_action, uint64_t zone_start_lba,
5822 	    uint8_t zone_options, uint8_t *data_ptr, uint32_t dxfer_len,
5823 	    uint8_t sense_len, uint32_t timeout)
5824 {
5825 	struct scsi_zbc_in *scsi_cmd;
5826 
5827 	scsi_cmd = (struct scsi_zbc_in *)&csio->cdb_io.cdb_bytes;
5828 	scsi_cmd->opcode = ZBC_IN;
5829 	scsi_cmd->service_action = service_action;
5830 	scsi_ulto4b(dxfer_len, scsi_cmd->length);
5831 	scsi_u64to8b(zone_start_lba, scsi_cmd->zone_start_lba);
5832 	scsi_cmd->zone_options = zone_options;
5833 
5834 	cam_fill_csio(csio,
5835 		      retries,
5836 		      cbfcnp,
5837 		      /*flags*/ (dxfer_len > 0) ? CAM_DIR_IN : CAM_DIR_NONE,
5838 		      tag_action,
5839 		      data_ptr,
5840 		      dxfer_len,
5841 		      sense_len,
5842 		      sizeof(*scsi_cmd),
5843 		      timeout);
5844 
5845 }
5846 
5847 int
5848 scsi_ata_zac_mgmt_out(struct ccb_scsiio *csio, uint32_t retries,
5849 		      void (*cbfcnp)(struct cam_periph *, union ccb *),
5850 		      uint8_t tag_action, int use_ncq,
5851 		      uint8_t zm_action, uint64_t zone_id, uint8_t zone_flags,
5852 		      uint8_t *data_ptr, uint32_t dxfer_len,
5853 		      uint8_t *cdb_storage, size_t cdb_storage_len,
5854 		      uint8_t sense_len, uint32_t timeout)
5855 {
5856 	uint8_t command_out, protocol, ata_flags;
5857 	uint16_t features_out;
5858 	uint32_t sectors_out, auxiliary;
5859 	int retval;
5860 
5861 	retval = 0;
5862 
5863 	if (use_ncq == 0) {
5864 		command_out = ATA_ZAC_MANAGEMENT_OUT;
5865 		features_out = (zm_action & 0xf) | (zone_flags << 8);
5866 		ata_flags = AP_FLAG_BYT_BLOK_BLOCKS;
5867 		if (dxfer_len == 0) {
5868 			protocol = AP_PROTO_NON_DATA;
5869 			ata_flags |= AP_FLAG_TLEN_NO_DATA;
5870 			sectors_out = 0;
5871 		} else {
5872 			protocol = AP_PROTO_DMA;
5873 			ata_flags |= AP_FLAG_TLEN_SECT_CNT |
5874 				     AP_FLAG_TDIR_TO_DEV;
5875 			sectors_out = ((dxfer_len >> 9) & 0xffff);
5876 		}
5877 		auxiliary = 0;
5878 	} else {
5879 		ata_flags = AP_FLAG_BYT_BLOK_BLOCKS;
5880 		if (dxfer_len == 0) {
5881 			command_out = ATA_NCQ_NON_DATA;
5882 			features_out = ATA_NCQ_ZAC_MGMT_OUT;
5883 			/*
5884 			 * We're assuming the SCSI to ATA translation layer
5885 			 * will set the NCQ tag number in the tag field.
5886 			 * That isn't clear from the SAT-4 spec (as of rev 05).
5887 			 */
5888 			sectors_out = 0;
5889 			ata_flags |= AP_FLAG_TLEN_NO_DATA;
5890 		} else {
5891 			command_out = ATA_SEND_FPDMA_QUEUED;
5892 			/*
5893 			 * Note that we're defaulting to normal priority,
5894 			 * and assuming that the SCSI to ATA translation
5895 			 * layer will insert the NCQ tag number in the tag
5896 			 * field.  That isn't clear in the SAT-4 spec (as
5897 			 * of rev 05).
5898 			 */
5899 			sectors_out = ATA_SFPDMA_ZAC_MGMT_OUT << 8;
5900 
5901 			ata_flags |= AP_FLAG_TLEN_FEAT |
5902 				     AP_FLAG_TDIR_TO_DEV;
5903 
5904 			/*
5905 			 * For SEND FPDMA QUEUED, the transfer length is
5906 			 * encoded in the FEATURE register, and 0 means
5907 			 * that 65536 512 byte blocks are to be tranferred.
5908 			 * In practice, it seems unlikely that we'll see
5909 			 * a transfer that large, and it may confuse the
5910 			 * the SAT layer, because generally that means that
5911 			 * 0 bytes should be transferred.
5912 			 */
5913 			if (dxfer_len == (65536 * 512)) {
5914 				features_out = 0;
5915 			} else if (dxfer_len <= (65535 * 512)) {
5916 				features_out = ((dxfer_len >> 9) & 0xffff);
5917 			} else {
5918 				/* The transfer is too big. */
5919 				retval = 1;
5920 				goto bailout;
5921 			}
5922 
5923 		}
5924 
5925 		auxiliary = (zm_action & 0xf) | (zone_flags << 8);
5926 		protocol = AP_PROTO_FPDMA;
5927 	}
5928 
5929 	protocol |= AP_EXTEND;
5930 
5931 	retval = scsi_ata_pass(csio,
5932 	    retries,
5933 	    cbfcnp,
5934 	    /*flags*/ (dxfer_len > 0) ? CAM_DIR_OUT : CAM_DIR_NONE,
5935 	    tag_action,
5936 	    /*protocol*/ protocol,
5937 	    /*ata_flags*/ ata_flags,
5938 	    /*features*/ features_out,
5939 	    /*sector_count*/ sectors_out,
5940 	    /*lba*/ zone_id,
5941 	    /*command*/ command_out,
5942 	    /*device*/ 0,
5943 	    /*icc*/ 0,
5944 	    /*auxiliary*/ auxiliary,
5945 	    /*control*/ 0,
5946 	    /*data_ptr*/ data_ptr,
5947 	    /*dxfer_len*/ dxfer_len,
5948 	    /*cdb_storage*/ cdb_storage,
5949 	    /*cdb_storage_len*/ cdb_storage_len,
5950 	    /*minimum_cmd_size*/ 0,
5951 	    /*sense_len*/ SSD_FULL_SIZE,
5952 	    /*timeout*/ timeout);
5953 
5954 bailout:
5955 
5956 	return (retval);
5957 }
5958 
5959 int
5960 scsi_ata_zac_mgmt_in(struct ccb_scsiio *csio, uint32_t retries,
5961 		     void (*cbfcnp)(struct cam_periph *, union ccb *),
5962 		     uint8_t tag_action, int use_ncq,
5963 		     uint8_t zm_action, uint64_t zone_id, uint8_t zone_flags,
5964 		     uint8_t *data_ptr, uint32_t dxfer_len,
5965 		     uint8_t *cdb_storage, size_t cdb_storage_len,
5966 		     uint8_t sense_len, uint32_t timeout)
5967 {
5968 	uint8_t command_out, protocol;
5969 	uint16_t features_out, sectors_out;
5970 	uint32_t auxiliary;
5971 	int ata_flags;
5972 	int retval;
5973 
5974 	retval = 0;
5975 	ata_flags = AP_FLAG_TDIR_FROM_DEV | AP_FLAG_BYT_BLOK_BLOCKS;
5976 
5977 	if (use_ncq == 0) {
5978 		command_out = ATA_ZAC_MANAGEMENT_IN;
5979 		/* XXX KDM put a macro here */
5980 		features_out = (zm_action & 0xf) | (zone_flags << 8);
5981 		sectors_out = dxfer_len >> 9; /* XXX KDM macro */
5982 		protocol = AP_PROTO_DMA;
5983 		ata_flags |= AP_FLAG_TLEN_SECT_CNT;
5984 		auxiliary = 0;
5985 	} else {
5986 		ata_flags |= AP_FLAG_TLEN_FEAT;
5987 
5988 		command_out = ATA_RECV_FPDMA_QUEUED;
5989 		sectors_out = ATA_RFPDMA_ZAC_MGMT_IN << 8;
5990 
5991 		/*
5992 		 * For RECEIVE FPDMA QUEUED, the transfer length is
5993 		 * encoded in the FEATURE register, and 0 means
5994 		 * that 65536 512 byte blocks are to be tranferred.
5995 		 * In practice, it seems unlikely that we'll see
5996 		 * a transfer that large, and it may confuse the
5997 		 * the SAT layer, because generally that means that
5998 		 * 0 bytes should be transferred.
5999 		 */
6000 		if (dxfer_len == (65536 * 512)) {
6001 			features_out = 0;
6002 		} else if (dxfer_len <= (65535 * 512)) {
6003 			features_out = ((dxfer_len >> 9) & 0xffff);
6004 		} else {
6005 			/* The transfer is too big. */
6006 			retval = 1;
6007 			goto bailout;
6008 		}
6009 		auxiliary = (zm_action & 0xf) | (zone_flags << 8),
6010 		protocol = AP_PROTO_FPDMA;
6011 	}
6012 
6013 	protocol |= AP_EXTEND;
6014 
6015 	retval = scsi_ata_pass(csio,
6016 	    retries,
6017 	    cbfcnp,
6018 	    /*flags*/ CAM_DIR_IN,
6019 	    tag_action,
6020 	    /*protocol*/ protocol,
6021 	    /*ata_flags*/ ata_flags,
6022 	    /*features*/ features_out,
6023 	    /*sector_count*/ sectors_out,
6024 	    /*lba*/ zone_id,
6025 	    /*command*/ command_out,
6026 	    /*device*/ 0,
6027 	    /*icc*/ 0,
6028 	    /*auxiliary*/ auxiliary,
6029 	    /*control*/ 0,
6030 	    /*data_ptr*/ data_ptr,
6031 	    /*dxfer_len*/ (dxfer_len >> 9) * 512, /* XXX KDM */
6032 	    /*cdb_storage*/ cdb_storage,
6033 	    /*cdb_storage_len*/ cdb_storage_len,
6034 	    /*minimum_cmd_size*/ 0,
6035 	    /*sense_len*/ SSD_FULL_SIZE,
6036 	    /*timeout*/ timeout);
6037 
6038 bailout:
6039 	return (retval);
6040 }
6041