xref: /freebsd/sys/cam/scsi/scsi_da.c (revision 123af6ec70016f5556da5972d4d63c7d175c06d3)
1 /*-
2  * Implementation of SCSI Direct Access Peripheral driver for CAM.
3  *
4  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
5  *
6  * Copyright (c) 1997 Justin T. Gibbs.
7  * All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions, and the following disclaimer,
14  *    without modification, immediately at the beginning of the file.
15  * 2. The name of the author may not be used to endorse or promote products
16  *    derived from this software without specific prior written permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
22  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  */
30 
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
33 
34 #include <sys/param.h>
35 
36 #ifdef _KERNEL
37 #include "opt_da.h"
38 #include <sys/systm.h>
39 #include <sys/kernel.h>
40 #include <sys/bio.h>
41 #include <sys/sysctl.h>
42 #include <sys/taskqueue.h>
43 #include <sys/lock.h>
44 #include <sys/mutex.h>
45 #include <sys/conf.h>
46 #include <sys/devicestat.h>
47 #include <sys/eventhandler.h>
48 #include <sys/malloc.h>
49 #include <sys/cons.h>
50 #include <sys/endian.h>
51 #include <sys/proc.h>
52 #include <sys/sbuf.h>
53 #include <geom/geom.h>
54 #include <geom/geom_disk.h>
55 #include <machine/atomic.h>
56 #endif /* _KERNEL */
57 
58 #ifndef _KERNEL
59 #include <stdio.h>
60 #include <string.h>
61 #endif /* _KERNEL */
62 
63 #include <cam/cam.h>
64 #include <cam/cam_ccb.h>
65 #include <cam/cam_periph.h>
66 #include <cam/cam_xpt_periph.h>
67 #include <cam/cam_sim.h>
68 #include <cam/cam_iosched.h>
69 
70 #include <cam/scsi/scsi_message.h>
71 #include <cam/scsi/scsi_da.h>
72 
73 #ifdef _KERNEL
74 /*
75  * Note that there are probe ordering dependencies here.  The order isn't
76  * controlled by this enumeration, but by explicit state transitions in
77  * dastart() and dadone().  Here are some of the dependencies:
78  *
79  * 1. RC should come first, before RC16, unless there is evidence that RC16
80  *    is supported.
81  * 2. BDC needs to come before any of the ATA probes, or the ZONE probe.
82  * 3. The ATA probes should go in this order:
83  *    ATA -> LOGDIR -> IDDIR -> SUP -> ATA_ZONE
84  */
85 typedef enum {
86 	DA_STATE_PROBE_WP,
87 	DA_STATE_PROBE_RC,
88 	DA_STATE_PROBE_RC16,
89 	DA_STATE_PROBE_LBP,
90 	DA_STATE_PROBE_BLK_LIMITS,
91 	DA_STATE_PROBE_BDC,
92 	DA_STATE_PROBE_ATA,
93 	DA_STATE_PROBE_ATA_LOGDIR,
94 	DA_STATE_PROBE_ATA_IDDIR,
95 	DA_STATE_PROBE_ATA_SUP,
96 	DA_STATE_PROBE_ATA_ZONE,
97 	DA_STATE_PROBE_ZONE,
98 	DA_STATE_NORMAL
99 } da_state;
100 
101 typedef enum {
102 	DA_FLAG_PACK_INVALID	= 0x000001,
103 	DA_FLAG_NEW_PACK	= 0x000002,
104 	DA_FLAG_PACK_LOCKED	= 0x000004,
105 	DA_FLAG_PACK_REMOVABLE	= 0x000008,
106 	DA_FLAG_NEED_OTAG	= 0x000020,
107 	DA_FLAG_WAS_OTAG	= 0x000040,
108 	DA_FLAG_RETRY_UA	= 0x000080,
109 	DA_FLAG_OPEN		= 0x000100,
110 	DA_FLAG_SCTX_INIT	= 0x000200,
111 	DA_FLAG_CAN_RC16	= 0x000400,
112 	DA_FLAG_PROBED		= 0x000800,
113 	DA_FLAG_DIRTY		= 0x001000,
114 	DA_FLAG_ANNOUNCED	= 0x002000,
115 	DA_FLAG_CAN_ATA_DMA	= 0x004000,
116 	DA_FLAG_CAN_ATA_LOG	= 0x008000,
117 	DA_FLAG_CAN_ATA_IDLOG	= 0x010000,
118 	DA_FLAG_CAN_ATA_SUPCAP	= 0x020000,
119 	DA_FLAG_CAN_ATA_ZONE	= 0x040000,
120 	DA_FLAG_TUR_PENDING	= 0x080000
121 } da_flags;
122 
123 typedef enum {
124 	DA_Q_NONE		= 0x00,
125 	DA_Q_NO_SYNC_CACHE	= 0x01,
126 	DA_Q_NO_6_BYTE		= 0x02,
127 	DA_Q_NO_PREVENT		= 0x04,
128 	DA_Q_4K			= 0x08,
129 	DA_Q_NO_RC16		= 0x10,
130 	DA_Q_NO_UNMAP		= 0x20,
131 	DA_Q_RETRY_BUSY		= 0x40,
132 	DA_Q_SMR_DM		= 0x80,
133 	DA_Q_STRICT_UNMAP	= 0x100,
134 	DA_Q_128KB		= 0x200
135 } da_quirks;
136 
137 #define DA_Q_BIT_STRING		\
138 	"\020"			\
139 	"\001NO_SYNC_CACHE"	\
140 	"\002NO_6_BYTE"		\
141 	"\003NO_PREVENT"	\
142 	"\0044K"		\
143 	"\005NO_RC16"		\
144 	"\006NO_UNMAP"		\
145 	"\007RETRY_BUSY"	\
146 	"\010SMR_DM"		\
147 	"\011STRICT_UNMAP"	\
148 	"\012128KB"
149 
150 typedef enum {
151 	DA_CCB_PROBE_RC		= 0x01,
152 	DA_CCB_PROBE_RC16	= 0x02,
153 	DA_CCB_PROBE_LBP	= 0x03,
154 	DA_CCB_PROBE_BLK_LIMITS	= 0x04,
155 	DA_CCB_PROBE_BDC	= 0x05,
156 	DA_CCB_PROBE_ATA	= 0x06,
157 	DA_CCB_BUFFER_IO	= 0x07,
158 	DA_CCB_DUMP		= 0x0A,
159 	DA_CCB_DELETE		= 0x0B,
160 	DA_CCB_TUR		= 0x0C,
161 	DA_CCB_PROBE_ZONE	= 0x0D,
162 	DA_CCB_PROBE_ATA_LOGDIR	= 0x0E,
163 	DA_CCB_PROBE_ATA_IDDIR	= 0x0F,
164 	DA_CCB_PROBE_ATA_SUP	= 0x10,
165 	DA_CCB_PROBE_ATA_ZONE	= 0x11,
166 	DA_CCB_PROBE_WP		= 0x12,
167 	DA_CCB_TYPE_MASK	= 0x1F,
168 	DA_CCB_RETRY_UA		= 0x20
169 } da_ccb_state;
170 
171 /*
172  * Order here is important for method choice
173  *
174  * We prefer ATA_TRIM as tests run against a Sandforce 2281 SSD attached to
175  * LSI 2008 (mps) controller (FW: v12, Drv: v14) resulted 20% quicker deletes
176  * using ATA_TRIM than the corresponding UNMAP results for a real world mysql
177  * import taking 5mins.
178  *
179  */
180 typedef enum {
181 	DA_DELETE_NONE,
182 	DA_DELETE_DISABLE,
183 	DA_DELETE_ATA_TRIM,
184 	DA_DELETE_UNMAP,
185 	DA_DELETE_WS16,
186 	DA_DELETE_WS10,
187 	DA_DELETE_ZERO,
188 	DA_DELETE_MIN = DA_DELETE_ATA_TRIM,
189 	DA_DELETE_MAX = DA_DELETE_ZERO
190 } da_delete_methods;
191 
192 /*
193  * For SCSI, host managed drives show up as a separate device type.  For
194  * ATA, host managed drives also have a different device signature.
195  * XXX KDM figure out the ATA host managed signature.
196  */
197 typedef enum {
198 	DA_ZONE_NONE		= 0x00,
199 	DA_ZONE_DRIVE_MANAGED	= 0x01,
200 	DA_ZONE_HOST_AWARE	= 0x02,
201 	DA_ZONE_HOST_MANAGED	= 0x03
202 } da_zone_mode;
203 
204 /*
205  * We distinguish between these interface cases in addition to the drive type:
206  * o ATA drive behind a SCSI translation layer that knows about ZBC/ZAC
207  * o ATA drive behind a SCSI translation layer that does not know about
208  *   ZBC/ZAC, and so needs to be managed via ATA passthrough.  In this
209  *   case, we would need to share the ATA code with the ada(4) driver.
210  * o SCSI drive.
211  */
212 typedef enum {
213 	DA_ZONE_IF_SCSI,
214 	DA_ZONE_IF_ATA_PASS,
215 	DA_ZONE_IF_ATA_SAT,
216 } da_zone_interface;
217 
218 typedef enum {
219 	DA_ZONE_FLAG_RZ_SUP		= 0x0001,
220 	DA_ZONE_FLAG_OPEN_SUP		= 0x0002,
221 	DA_ZONE_FLAG_CLOSE_SUP		= 0x0004,
222 	DA_ZONE_FLAG_FINISH_SUP		= 0x0008,
223 	DA_ZONE_FLAG_RWP_SUP		= 0x0010,
224 	DA_ZONE_FLAG_SUP_MASK		= (DA_ZONE_FLAG_RZ_SUP |
225 					   DA_ZONE_FLAG_OPEN_SUP |
226 					   DA_ZONE_FLAG_CLOSE_SUP |
227 					   DA_ZONE_FLAG_FINISH_SUP |
228 					   DA_ZONE_FLAG_RWP_SUP),
229 	DA_ZONE_FLAG_URSWRZ		= 0x0020,
230 	DA_ZONE_FLAG_OPT_SEQ_SET	= 0x0040,
231 	DA_ZONE_FLAG_OPT_NONSEQ_SET	= 0x0080,
232 	DA_ZONE_FLAG_MAX_SEQ_SET	= 0x0100,
233 	DA_ZONE_FLAG_SET_MASK		= (DA_ZONE_FLAG_OPT_SEQ_SET |
234 					   DA_ZONE_FLAG_OPT_NONSEQ_SET |
235 					   DA_ZONE_FLAG_MAX_SEQ_SET)
236 } da_zone_flags;
237 
238 static struct da_zone_desc {
239 	da_zone_flags value;
240 	const char *desc;
241 } da_zone_desc_table[] = {
242 	{DA_ZONE_FLAG_RZ_SUP, "Report Zones" },
243 	{DA_ZONE_FLAG_OPEN_SUP, "Open" },
244 	{DA_ZONE_FLAG_CLOSE_SUP, "Close" },
245 	{DA_ZONE_FLAG_FINISH_SUP, "Finish" },
246 	{DA_ZONE_FLAG_RWP_SUP, "Reset Write Pointer" },
247 };
248 
249 typedef void da_delete_func_t (struct cam_periph *periph, union ccb *ccb,
250 			      struct bio *bp);
251 static da_delete_func_t da_delete_trim;
252 static da_delete_func_t da_delete_unmap;
253 static da_delete_func_t da_delete_ws;
254 
255 static const void * da_delete_functions[] = {
256 	NULL,
257 	NULL,
258 	da_delete_trim,
259 	da_delete_unmap,
260 	da_delete_ws,
261 	da_delete_ws,
262 	da_delete_ws
263 };
264 
265 static const char *da_delete_method_names[] =
266     { "NONE", "DISABLE", "ATA_TRIM", "UNMAP", "WS16", "WS10", "ZERO" };
267 static const char *da_delete_method_desc[] =
268     { "NONE", "DISABLED", "ATA TRIM", "UNMAP", "WRITE SAME(16) with UNMAP",
269       "WRITE SAME(10) with UNMAP", "ZERO" };
270 
271 /* Offsets into our private area for storing information */
272 #define ccb_state	ppriv_field0
273 #define ccb_bp		ppriv_ptr1
274 
275 struct disk_params {
276 	u_int8_t  heads;
277 	u_int32_t cylinders;
278 	u_int8_t  secs_per_track;
279 	u_int32_t secsize;	/* Number of bytes/sector */
280 	u_int64_t sectors;	/* total number sectors */
281 	u_int     stripesize;
282 	u_int     stripeoffset;
283 };
284 
285 #define UNMAP_RANGE_MAX		0xffffffff
286 #define UNMAP_HEAD_SIZE		8
287 #define UNMAP_RANGE_SIZE	16
288 #define UNMAP_MAX_RANGES	2048 /* Protocol Max is 4095 */
289 #define UNMAP_BUF_SIZE		((UNMAP_MAX_RANGES * UNMAP_RANGE_SIZE) + \
290 				UNMAP_HEAD_SIZE)
291 
292 #define WS10_MAX_BLKS		0xffff
293 #define WS16_MAX_BLKS		0xffffffff
294 #define ATA_TRIM_MAX_RANGES	((UNMAP_BUF_SIZE / \
295 	(ATA_DSM_RANGE_SIZE * ATA_DSM_BLK_SIZE)) * ATA_DSM_BLK_SIZE)
296 
297 #define DA_WORK_TUR		(1 << 16)
298 
299 typedef enum {
300 	DA_REF_OPEN = 1,
301 	DA_REF_OPEN_HOLD,
302 	DA_REF_CLOSE_HOLD,
303 	DA_REF_PROBE_HOLD,
304 	DA_REF_TUR,
305 	DA_REF_GEOM,
306 	DA_REF_SYSCTL,
307 	DA_REF_REPROBE,
308 	DA_REF_MAX		/* KEEP LAST */
309 } da_ref_token;
310 
311 struct da_softc {
312 	struct   cam_iosched_softc *cam_iosched;
313 	struct	 bio_queue_head delete_run_queue;
314 	LIST_HEAD(, ccb_hdr) pending_ccbs;
315 	int	 refcount;		/* Active xpt_action() calls */
316 	da_state state;
317 	da_flags flags;
318 	da_quirks quirks;
319 	int	 minimum_cmd_size;
320 	int	 error_inject;
321 	int	 trim_max_ranges;
322 	int	 delete_available;	/* Delete methods possibly available */
323 	da_zone_mode			zone_mode;
324 	da_zone_interface		zone_interface;
325 	da_zone_flags			zone_flags;
326 	struct ata_gp_log_dir		ata_logdir;
327 	int				valid_logdir_len;
328 	struct ata_identify_log_pages	ata_iddir;
329 	int				valid_iddir_len;
330 	uint64_t			optimal_seq_zones;
331 	uint64_t			optimal_nonseq_zones;
332 	uint64_t			max_seq_zones;
333 	u_int			maxio;
334 	uint32_t		unmap_max_ranges;
335 	uint32_t		unmap_max_lba; /* Max LBAs in UNMAP req */
336 	uint32_t		unmap_gran;
337 	uint32_t		unmap_gran_align;
338 	uint64_t		ws_max_blks;
339 	uint64_t		trim_count;
340 	uint64_t		trim_ranges;
341 	uint64_t		trim_lbas;
342 	da_delete_methods	delete_method_pref;
343 	da_delete_methods	delete_method;
344 	da_delete_func_t	*delete_func;
345 	int			unmappedio;
346 	int			rotating;
347 	struct	 disk_params params;
348 	struct	 disk *disk;
349 	union	 ccb saved_ccb;
350 	struct task		sysctl_task;
351 	struct sysctl_ctx_list	sysctl_ctx;
352 	struct sysctl_oid	*sysctl_tree;
353 	struct callout		sendordered_c;
354 	uint64_t wwpn;
355 	uint8_t	 unmap_buf[UNMAP_BUF_SIZE];
356 	struct scsi_read_capacity_data_long rcaplong;
357 	struct callout		mediapoll_c;
358 	int			ref_flags[DA_REF_MAX];
359 #ifdef CAM_IO_STATS
360 	struct sysctl_ctx_list	sysctl_stats_ctx;
361 	struct sysctl_oid	*sysctl_stats_tree;
362 	u_int	errors;
363 	u_int	timeouts;
364 	u_int	invalidations;
365 #endif
366 #define DA_ANNOUNCETMP_SZ 160
367 	char			announce_temp[DA_ANNOUNCETMP_SZ];
368 #define DA_ANNOUNCE_SZ 400
369 	char			announcebuf[DA_ANNOUNCE_SZ];
370 };
371 
372 #define dadeleteflag(softc, delete_method, enable)			\
373 	if (enable) {							\
374 		softc->delete_available |= (1 << delete_method);	\
375 	} else {							\
376 		softc->delete_available &= ~(1 << delete_method);	\
377 	}
378 
379 struct da_quirk_entry {
380 	struct scsi_inquiry_pattern inq_pat;
381 	da_quirks quirks;
382 };
383 
384 static const char quantum[] = "QUANTUM";
385 static const char microp[] = "MICROP";
386 
387 static struct da_quirk_entry da_quirk_table[] =
388 {
389 	/* SPI, FC devices */
390 	{
391 		/*
392 		 * Fujitsu M2513A MO drives.
393 		 * Tested devices: M2513A2 firmware versions 1200 & 1300.
394 		 * (dip switch selects whether T_DIRECT or T_OPTICAL device)
395 		 * Reported by: W.Scholten <whs@xs4all.nl>
396 		 */
397 		{T_DIRECT, SIP_MEDIA_REMOVABLE, "FUJITSU", "M2513A", "*"},
398 		/*quirks*/ DA_Q_NO_SYNC_CACHE
399 	},
400 	{
401 		/* See above. */
402 		{T_OPTICAL, SIP_MEDIA_REMOVABLE, "FUJITSU", "M2513A", "*"},
403 		/*quirks*/ DA_Q_NO_SYNC_CACHE
404 	},
405 	{
406 		/*
407 		 * This particular Fujitsu drive doesn't like the
408 		 * synchronize cache command.
409 		 * Reported by: Tom Jackson <toj@gorilla.net>
410 		 */
411 		{T_DIRECT, SIP_MEDIA_FIXED, "FUJITSU", "M2954*", "*"},
412 		/*quirks*/ DA_Q_NO_SYNC_CACHE
413 	},
414 	{
415 		/*
416 		 * This drive doesn't like the synchronize cache command
417 		 * either.  Reported by: Matthew Jacob <mjacob@feral.com>
418 		 * in NetBSD PR kern/6027, August 24, 1998.
419 		 */
420 		{T_DIRECT, SIP_MEDIA_FIXED, microp, "2217*", "*"},
421 		/*quirks*/ DA_Q_NO_SYNC_CACHE
422 	},
423 	{
424 		/*
425 		 * This drive doesn't like the synchronize cache command
426 		 * either.  Reported by: Hellmuth Michaelis (hm@kts.org)
427 		 * (PR 8882).
428 		 */
429 		{T_DIRECT, SIP_MEDIA_FIXED, microp, "2112*", "*"},
430 		/*quirks*/ DA_Q_NO_SYNC_CACHE
431 	},
432 	{
433 		/*
434 		 * Doesn't like the synchronize cache command.
435 		 * Reported by: Blaz Zupan <blaz@gold.amis.net>
436 		 */
437 		{T_DIRECT, SIP_MEDIA_FIXED, "NEC", "D3847*", "*"},
438 		/*quirks*/ DA_Q_NO_SYNC_CACHE
439 	},
440 	{
441 		/*
442 		 * Doesn't like the synchronize cache command.
443 		 * Reported by: Blaz Zupan <blaz@gold.amis.net>
444 		 */
445 		{T_DIRECT, SIP_MEDIA_FIXED, quantum, "MAVERICK 540S", "*"},
446 		/*quirks*/ DA_Q_NO_SYNC_CACHE
447 	},
448 	{
449 		/*
450 		 * Doesn't like the synchronize cache command.
451 		 */
452 		{T_DIRECT, SIP_MEDIA_FIXED, quantum, "LPS525S", "*"},
453 		/*quirks*/ DA_Q_NO_SYNC_CACHE
454 	},
455 	{
456 		/*
457 		 * Doesn't like the synchronize cache command.
458 		 * Reported by: walter@pelissero.de
459 		 */
460 		{T_DIRECT, SIP_MEDIA_FIXED, quantum, "LPS540S", "*"},
461 		/*quirks*/ DA_Q_NO_SYNC_CACHE
462 	},
463 	{
464 		/*
465 		 * Doesn't work correctly with 6 byte reads/writes.
466 		 * Returns illegal request, and points to byte 9 of the
467 		 * 6-byte CDB.
468 		 * Reported by:  Adam McDougall <bsdx@spawnet.com>
469 		 */
470 		{T_DIRECT, SIP_MEDIA_FIXED, quantum, "VIKING 4*", "*"},
471 		/*quirks*/ DA_Q_NO_6_BYTE
472 	},
473 	{
474 		/* See above. */
475 		{T_DIRECT, SIP_MEDIA_FIXED, quantum, "VIKING 2*", "*"},
476 		/*quirks*/ DA_Q_NO_6_BYTE
477 	},
478 	{
479 		/*
480 		 * Doesn't like the synchronize cache command.
481 		 * Reported by: walter@pelissero.de
482 		 */
483 		{T_DIRECT, SIP_MEDIA_FIXED, "CONNER", "CP3500*", "*"},
484 		/*quirks*/ DA_Q_NO_SYNC_CACHE
485 	},
486 	{
487 		/*
488 		 * The CISS RAID controllers do not support SYNC_CACHE
489 		 */
490 		{T_DIRECT, SIP_MEDIA_FIXED, "COMPAQ", "RAID*", "*"},
491 		/*quirks*/ DA_Q_NO_SYNC_CACHE
492 	},
493 	{
494 		/*
495 		 * The STEC SSDs sometimes hang on UNMAP.
496 		 */
497 		{T_DIRECT, SIP_MEDIA_FIXED, "STEC", "*", "*"},
498 		/*quirks*/ DA_Q_NO_UNMAP
499 	},
500 	{
501 		/*
502 		 * VMware returns BUSY status when storage has transient
503 		 * connectivity problems, so better wait.
504 		 * Also VMware returns odd errors on misaligned UNMAPs.
505 		 */
506 		{T_DIRECT, SIP_MEDIA_FIXED, "VMware*", "*", "*"},
507 		/*quirks*/ DA_Q_RETRY_BUSY | DA_Q_STRICT_UNMAP
508 	},
509 	/* USB mass storage devices supported by umass(4) */
510 	{
511 		/*
512 		 * EXATELECOM (Sigmatel) i-Bead 100/105 USB Flash MP3 Player
513 		 * PR: kern/51675
514 		 */
515 		{T_DIRECT, SIP_MEDIA_REMOVABLE, "EXATEL", "i-BEAD10*", "*"},
516 		/*quirks*/ DA_Q_NO_SYNC_CACHE
517 	},
518 	{
519 		/*
520 		 * Power Quotient Int. (PQI) USB flash key
521 		 * PR: kern/53067
522 		 */
523 		{T_DIRECT, SIP_MEDIA_REMOVABLE, "Generic*", "USB Flash Disk*",
524 		"*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
525 	},
526 	{
527 		/*
528 		 * Creative Nomad MUVO mp3 player (USB)
529 		 * PR: kern/53094
530 		 */
531 		{T_DIRECT, SIP_MEDIA_REMOVABLE, "CREATIVE", "NOMAD_MUVO", "*"},
532 		/*quirks*/ DA_Q_NO_SYNC_CACHE|DA_Q_NO_PREVENT
533 	},
534 	{
535 		/*
536 		 * Jungsoft NEXDISK USB flash key
537 		 * PR: kern/54737
538 		 */
539 		{T_DIRECT, SIP_MEDIA_REMOVABLE, "JUNGSOFT", "NEXDISK*", "*"},
540 		/*quirks*/ DA_Q_NO_SYNC_CACHE
541 	},
542 	{
543 		/*
544 		 * FreeDik USB Mini Data Drive
545 		 * PR: kern/54786
546 		 */
547 		{T_DIRECT, SIP_MEDIA_REMOVABLE, "FreeDik*", "Mini Data Drive",
548 		"*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
549 	},
550 	{
551 		/*
552 		 * Sigmatel USB Flash MP3 Player
553 		 * PR: kern/57046
554 		 */
555 		{T_DIRECT, SIP_MEDIA_REMOVABLE, "SigmaTel", "MSCN", "*"},
556 		/*quirks*/ DA_Q_NO_SYNC_CACHE|DA_Q_NO_PREVENT
557 	},
558 	{
559 		/*
560 		 * Neuros USB Digital Audio Computer
561 		 * PR: kern/63645
562 		 */
563 		{T_DIRECT, SIP_MEDIA_REMOVABLE, "NEUROS", "dig. audio comp.",
564 		"*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
565 	},
566 	{
567 		/*
568 		 * SEAGRAND NP-900 MP3 Player
569 		 * PR: kern/64563
570 		 */
571 		{T_DIRECT, SIP_MEDIA_REMOVABLE, "SEAGRAND", "NP-900*", "*"},
572 		/*quirks*/ DA_Q_NO_SYNC_CACHE|DA_Q_NO_PREVENT
573 	},
574 	{
575 		/*
576 		 * iRiver iFP MP3 player (with UMS Firmware)
577 		 * PR: kern/54881, i386/63941, kern/66124
578 		 */
579 		{T_DIRECT, SIP_MEDIA_REMOVABLE, "iRiver", "iFP*", "*"},
580 		/*quirks*/ DA_Q_NO_SYNC_CACHE
581 	},
582 	{
583 		/*
584 		 * Frontier Labs NEX IA+ Digital Audio Player, rev 1.10/0.01
585 		 * PR: kern/70158
586 		 */
587 		{T_DIRECT, SIP_MEDIA_REMOVABLE, "FL" , "Nex*", "*"},
588 		/*quirks*/ DA_Q_NO_SYNC_CACHE
589 	},
590 	{
591 		/*
592 		 * ZICPlay USB MP3 Player with FM
593 		 * PR: kern/75057
594 		 */
595 		{T_DIRECT, SIP_MEDIA_REMOVABLE, "ACTIONS*" , "USB DISK*", "*"},
596 		/*quirks*/ DA_Q_NO_SYNC_CACHE
597 	},
598 	{
599 		/*
600 		 * TEAC USB floppy mechanisms
601 		 */
602 		{T_DIRECT, SIP_MEDIA_REMOVABLE, "TEAC" , "FD-05*", "*"},
603 		/*quirks*/ DA_Q_NO_SYNC_CACHE
604 	},
605 	{
606 		/*
607 		 * Kingston DataTraveler II+ USB Pen-Drive.
608 		 * Reported by: Pawel Jakub Dawidek <pjd@FreeBSD.org>
609 		 */
610 		{T_DIRECT, SIP_MEDIA_REMOVABLE, "Kingston" , "DataTraveler II+",
611 		"*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
612 	},
613 	{
614 		/*
615 		 * USB DISK Pro PMAP
616 		 * Reported by: jhs
617 		 * PR: usb/96381
618 		 */
619 		{T_DIRECT, SIP_MEDIA_REMOVABLE, " ", "USB DISK Pro", "PMAP"},
620 		/*quirks*/ DA_Q_NO_SYNC_CACHE
621 	},
622 	{
623 		/*
624 		 * Motorola E398 Mobile Phone (TransFlash memory card).
625 		 * Reported by: Wojciech A. Koszek <dunstan@FreeBSD.czest.pl>
626 		 * PR: usb/89889
627 		 */
628 		{T_DIRECT, SIP_MEDIA_REMOVABLE, "Motorola" , "Motorola Phone",
629 		"*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
630 	},
631 	{
632 		/*
633 		 * Qware BeatZkey! Pro
634 		 * PR: usb/79164
635 		 */
636 		{T_DIRECT, SIP_MEDIA_REMOVABLE, "GENERIC", "USB DISK DEVICE",
637 		"*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
638 	},
639 	{
640 		/*
641 		 * Time DPA20B 1GB MP3 Player
642 		 * PR: usb/81846
643 		 */
644 		{T_DIRECT, SIP_MEDIA_REMOVABLE, "USB2.0*", "(FS) FLASH DISK*",
645 		"*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
646 	},
647 	{
648 		/*
649 		 * Samsung USB key 128Mb
650 		 * PR: usb/90081
651 		 */
652 		{T_DIRECT, SIP_MEDIA_REMOVABLE, "USB-DISK", "FreeDik-FlashUsb",
653 		"*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
654 	},
655 	{
656 		/*
657 		 * Kingston DataTraveler 2.0 USB Flash memory.
658 		 * PR: usb/89196
659 		 */
660 		{T_DIRECT, SIP_MEDIA_REMOVABLE, "Kingston", "DataTraveler 2.0",
661 		"*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
662 	},
663 	{
664 		/*
665 		 * Creative MUVO Slim mp3 player (USB)
666 		 * PR: usb/86131
667 		 */
668 		{T_DIRECT, SIP_MEDIA_REMOVABLE, "CREATIVE", "MuVo Slim",
669 		"*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE|DA_Q_NO_PREVENT
670 		},
671 	{
672 		/*
673 		 * United MP5512 Portable MP3 Player (2-in-1 USB DISK/MP3)
674 		 * PR: usb/80487
675 		 */
676 		{T_DIRECT, SIP_MEDIA_REMOVABLE, "Generic*", "MUSIC DISK",
677 		"*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
678 	},
679 	{
680 		/*
681 		 * SanDisk Micro Cruzer 128MB
682 		 * PR: usb/75970
683 		 */
684 		{T_DIRECT, SIP_MEDIA_REMOVABLE, "SanDisk" , "Micro Cruzer",
685 		"*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
686 	},
687 	{
688 		/*
689 		 * TOSHIBA TransMemory USB sticks
690 		 * PR: kern/94660
691 		 */
692 		{T_DIRECT, SIP_MEDIA_REMOVABLE, "TOSHIBA", "TransMemory",
693 		"*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
694 	},
695 	{
696 		/*
697 		 * PNY USB 3.0 Flash Drives
698 		*/
699 		{T_DIRECT, SIP_MEDIA_REMOVABLE, "PNY", "USB 3.0 FD*",
700 		"*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE | DA_Q_NO_RC16
701 	},
702 	{
703 		/*
704 		 * PNY USB Flash keys
705 		 * PR: usb/75578, usb/72344, usb/65436
706 		 */
707 		{T_DIRECT, SIP_MEDIA_REMOVABLE, "*" , "USB DISK*",
708 		"*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
709 	},
710 	{
711 		/*
712 		 * Genesys GL3224
713 		 */
714 		{T_DIRECT, SIP_MEDIA_REMOVABLE, "Generic*", "STORAGE DEVICE*",
715 		"120?"}, /*quirks*/ DA_Q_NO_SYNC_CACHE | DA_Q_4K | DA_Q_NO_RC16
716 	},
717 	{
718 		/*
719 		 * Genesys 6-in-1 Card Reader
720 		 * PR: usb/94647
721 		 */
722 		{T_DIRECT, SIP_MEDIA_REMOVABLE, "Generic*", "STORAGE DEVICE*",
723 		"*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
724 	},
725 	{
726 		/*
727 		 * Rekam Digital CAMERA
728 		 * PR: usb/98713
729 		 */
730 		{T_DIRECT, SIP_MEDIA_REMOVABLE, "CAMERA*", "4MP-9J6*",
731 		"*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
732 	},
733 	{
734 		/*
735 		 * iRiver H10 MP3 player
736 		 * PR: usb/102547
737 		 */
738 		{T_DIRECT, SIP_MEDIA_REMOVABLE, "iriver", "H10*",
739 		"*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
740 	},
741 	{
742 		/*
743 		 * iRiver U10 MP3 player
744 		 * PR: usb/92306
745 		 */
746 		{T_DIRECT, SIP_MEDIA_REMOVABLE, "iriver", "U10*",
747 		"*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
748 	},
749 	{
750 		/*
751 		 * X-Micro Flash Disk
752 		 * PR: usb/96901
753 		 */
754 		{T_DIRECT, SIP_MEDIA_REMOVABLE, "X-Micro", "Flash Disk",
755 		"*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
756 	},
757 	{
758 		/*
759 		 * EasyMP3 EM732X USB 2.0 Flash MP3 Player
760 		 * PR: usb/96546
761 		 */
762 		{T_DIRECT, SIP_MEDIA_REMOVABLE, "EM732X", "MP3 Player*",
763 		"1.00"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
764 	},
765 	{
766 		/*
767 		 * Denver MP3 player
768 		 * PR: usb/107101
769 		 */
770 		{T_DIRECT, SIP_MEDIA_REMOVABLE, "DENVER", "MP3 PLAYER",
771 		 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
772 	},
773 	{
774 		/*
775 		 * Philips USB Key Audio KEY013
776 		 * PR: usb/68412
777 		 */
778 		{T_DIRECT, SIP_MEDIA_REMOVABLE, "PHILIPS", "Key*", "*"},
779 		/*quirks*/ DA_Q_NO_SYNC_CACHE | DA_Q_NO_PREVENT
780 	},
781 	{
782 		/*
783 		 * JNC MP3 Player
784 		 * PR: usb/94439
785 		 */
786 		{T_DIRECT, SIP_MEDIA_REMOVABLE, "JNC*" , "MP3 Player*",
787 		 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
788 	},
789 	{
790 		/*
791 		 * SAMSUNG MP0402H
792 		 * PR: usb/108427
793 		 */
794 		{T_DIRECT, SIP_MEDIA_FIXED, "SAMSUNG", "MP0402H", "*"},
795 		/*quirks*/ DA_Q_NO_SYNC_CACHE
796 	},
797 	{
798 		/*
799 		 * I/O Magic USB flash - Giga Bank
800 		 * PR: usb/108810
801 		 */
802 		{T_DIRECT, SIP_MEDIA_FIXED, "GS-Magic", "stor*", "*"},
803 		/*quirks*/ DA_Q_NO_SYNC_CACHE
804 	},
805 	{
806 		/*
807 		 * JoyFly 128mb USB Flash Drive
808 		 * PR: 96133
809 		 */
810 		{T_DIRECT, SIP_MEDIA_REMOVABLE, "USB 2.0", "Flash Disk*",
811 		 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
812 	},
813 	{
814 		/*
815 		 * ChipsBnk usb stick
816 		 * PR: 103702
817 		 */
818 		{T_DIRECT, SIP_MEDIA_REMOVABLE, "ChipsBnk", "USB*",
819 		 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
820 	},
821 	{
822 		/*
823 		 * Storcase (Kingston) InfoStation IFS FC2/SATA-R 201A
824 		 * PR: 129858
825 		 */
826 		{T_DIRECT, SIP_MEDIA_FIXED, "IFS", "FC2/SATA-R*",
827 		 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
828 	},
829 	{
830 		/*
831 		 * Samsung YP-U3 mp3-player
832 		 * PR: 125398
833 		 */
834 		{T_DIRECT, SIP_MEDIA_REMOVABLE, "Samsung", "YP-U3",
835 		 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
836 	},
837 	{
838 		{T_DIRECT, SIP_MEDIA_REMOVABLE, "Netac", "OnlyDisk*",
839 		 "2000"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
840 	},
841 	{
842 		/*
843 		 * Sony Cyber-Shot DSC cameras
844 		 * PR: usb/137035
845 		 */
846 		{T_DIRECT, SIP_MEDIA_REMOVABLE, "Sony", "Sony DSC", "*"},
847 		/*quirks*/ DA_Q_NO_SYNC_CACHE | DA_Q_NO_PREVENT
848 	},
849 	{
850 		{T_DIRECT, SIP_MEDIA_REMOVABLE, "Kingston", "DataTraveler G3",
851 		 "1.00"}, /*quirks*/ DA_Q_NO_PREVENT
852 	},
853 	{
854 		/* At least several Transcent USB sticks lie on RC16. */
855 		{T_DIRECT, SIP_MEDIA_REMOVABLE, "JetFlash", "Transcend*",
856 		 "*"}, /*quirks*/ DA_Q_NO_RC16
857 	},
858 	{
859 		/*
860 		 * I-O Data USB Flash Disk
861 		 * PR: usb/211716
862 		 */
863 		{T_DIRECT, SIP_MEDIA_REMOVABLE, "I-O DATA", "USB Flash Disk*",
864 		 "*"}, /*quirks*/ DA_Q_NO_RC16
865 	},
866 	{
867 		/*
868 		 * SLC CHIPFANCIER USB drives
869 		 * PR: usb/234503 (RC10 right, RC16 wrong)
870 		 * 16GB, 32GB and 128GB confirmed to have same issue
871 		 */
872 		{T_DIRECT, SIP_MEDIA_REMOVABLE, "*SLC", "CHIPFANCIER",
873 		 "*"}, /*quirks*/ DA_Q_NO_RC16
874        },
875 	/* ATA/SATA devices over SAS/USB/... */
876 	{
877 		/* Sandisk X400 */
878 		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "SanDisk SD8SB8U1*", "*" },
879 		/*quirks*/DA_Q_128KB
880 	},
881 	{
882 		/* Hitachi Advanced Format (4k) drives */
883 		{ T_DIRECT, SIP_MEDIA_FIXED, "Hitachi", "H??????????E3*", "*" },
884 		/*quirks*/DA_Q_4K
885 	},
886 	{
887 		/* Micron Advanced Format (4k) drives */
888 		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "Micron 5100 MTFDDAK*", "*" },
889 		/*quirks*/DA_Q_4K
890 	},
891 	{
892 		/* Samsung Advanced Format (4k) drives */
893 		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "SAMSUNG HD155UI*", "*" },
894 		/*quirks*/DA_Q_4K
895 	},
896 	{
897 		/* Samsung Advanced Format (4k) drives */
898 		{ T_DIRECT, SIP_MEDIA_FIXED, "SAMSUNG", "HD155UI*", "*" },
899 		/*quirks*/DA_Q_4K
900 	},
901 	{
902 		/* Samsung Advanced Format (4k) drives */
903 		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "SAMSUNG HD204UI*", "*" },
904 		/*quirks*/DA_Q_4K
905 	},
906 	{
907 		/* Samsung Advanced Format (4k) drives */
908 		{ T_DIRECT, SIP_MEDIA_FIXED, "SAMSUNG", "HD204UI*", "*" },
909 		/*quirks*/DA_Q_4K
910 	},
911 	{
912 		/* Seagate Barracuda Green Advanced Format (4k) drives */
913 		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST????DL*", "*" },
914 		/*quirks*/DA_Q_4K
915 	},
916 	{
917 		/* Seagate Barracuda Green Advanced Format (4k) drives */
918 		{ T_DIRECT, SIP_MEDIA_FIXED, "ST????DL", "*", "*" },
919 		/*quirks*/DA_Q_4K
920 	},
921 	{
922 		/* Seagate Barracuda Green Advanced Format (4k) drives */
923 		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST???DM*", "*" },
924 		/*quirks*/DA_Q_4K
925 	},
926 	{
927 		/* Seagate Barracuda Green Advanced Format (4k) drives */
928 		{ T_DIRECT, SIP_MEDIA_FIXED, "ST???DM*", "*", "*" },
929 		/*quirks*/DA_Q_4K
930 	},
931 	{
932 		/* Seagate Barracuda Green Advanced Format (4k) drives */
933 		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST????DM*", "*" },
934 		/*quirks*/DA_Q_4K
935 	},
936 	{
937 		/* Seagate Barracuda Green Advanced Format (4k) drives */
938 		{ T_DIRECT, SIP_MEDIA_FIXED, "ST????DM", "*", "*" },
939 		/*quirks*/DA_Q_4K
940 	},
941 	{
942 		/* Seagate Momentus Advanced Format (4k) drives */
943 		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST9500423AS*", "*" },
944 		/*quirks*/DA_Q_4K
945 	},
946 	{
947 		/* Seagate Momentus Advanced Format (4k) drives */
948 		{ T_DIRECT, SIP_MEDIA_FIXED, "ST950042", "3AS*", "*" },
949 		/*quirks*/DA_Q_4K
950 	},
951 	{
952 		/* Seagate Momentus Advanced Format (4k) drives */
953 		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST9500424AS*", "*" },
954 		/*quirks*/DA_Q_4K
955 	},
956 	{
957 		/* Seagate Momentus Advanced Format (4k) drives */
958 		{ T_DIRECT, SIP_MEDIA_FIXED, "ST950042", "4AS*", "*" },
959 		/*quirks*/DA_Q_4K
960 	},
961 	{
962 		/* Seagate Momentus Advanced Format (4k) drives */
963 		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST9640423AS*", "*" },
964 		/*quirks*/DA_Q_4K
965 	},
966 	{
967 		/* Seagate Momentus Advanced Format (4k) drives */
968 		{ T_DIRECT, SIP_MEDIA_FIXED, "ST964042", "3AS*", "*" },
969 		/*quirks*/DA_Q_4K
970 	},
971 	{
972 		/* Seagate Momentus Advanced Format (4k) drives */
973 		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST9640424AS*", "*" },
974 		/*quirks*/DA_Q_4K
975 	},
976 	{
977 		/* Seagate Momentus Advanced Format (4k) drives */
978 		{ T_DIRECT, SIP_MEDIA_FIXED, "ST964042", "4AS*", "*" },
979 		/*quirks*/DA_Q_4K
980 	},
981 	{
982 		/* Seagate Momentus Advanced Format (4k) drives */
983 		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST9750420AS*", "*" },
984 		/*quirks*/DA_Q_4K
985 	},
986 	{
987 		/* Seagate Momentus Advanced Format (4k) drives */
988 		{ T_DIRECT, SIP_MEDIA_FIXED, "ST975042", "0AS*", "*" },
989 		/*quirks*/DA_Q_4K
990 	},
991 	{
992 		/* Seagate Momentus Advanced Format (4k) drives */
993 		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST9750422AS*", "*" },
994 		/*quirks*/DA_Q_4K
995 	},
996 	{
997 		/* Seagate Momentus Advanced Format (4k) drives */
998 		{ T_DIRECT, SIP_MEDIA_FIXED, "ST975042", "2AS*", "*" },
999 		/*quirks*/DA_Q_4K
1000 	},
1001 	{
1002 		/* Seagate Momentus Advanced Format (4k) drives */
1003 		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST9750423AS*", "*" },
1004 		/*quirks*/DA_Q_4K
1005 	},
1006 	{
1007 		/* Seagate Momentus Advanced Format (4k) drives */
1008 		{ T_DIRECT, SIP_MEDIA_FIXED, "ST975042", "3AS*", "*" },
1009 		/*quirks*/DA_Q_4K
1010 	},
1011 	{
1012 		/* Seagate Momentus Thin Advanced Format (4k) drives */
1013 		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST???LT*", "*" },
1014 		/*quirks*/DA_Q_4K
1015 	},
1016 	{
1017 		/* Seagate Momentus Thin Advanced Format (4k) drives */
1018 		{ T_DIRECT, SIP_MEDIA_FIXED, "ST???LT*", "*", "*" },
1019 		/*quirks*/DA_Q_4K
1020 	},
1021 	{
1022 		/* WDC Caviar Green Advanced Format (4k) drives */
1023 		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD????RS*", "*" },
1024 		/*quirks*/DA_Q_4K
1025 	},
1026 	{
1027 		/* WDC Caviar Green Advanced Format (4k) drives */
1028 		{ T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "??RS*", "*" },
1029 		/*quirks*/DA_Q_4K
1030 	},
1031 	{
1032 		/* WDC Caviar Green Advanced Format (4k) drives */
1033 		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD????RX*", "*" },
1034 		/*quirks*/DA_Q_4K
1035 	},
1036 	{
1037 		/* WDC Caviar Green Advanced Format (4k) drives */
1038 		{ T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "??RX*", "*" },
1039 		/*quirks*/DA_Q_4K
1040 	},
1041 	{
1042 		/* WDC Caviar Green Advanced Format (4k) drives */
1043 		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD??????RS*", "*" },
1044 		/*quirks*/DA_Q_4K
1045 	},
1046 	{
1047 		/* WDC Caviar Green Advanced Format (4k) drives */
1048 		{ T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "????RS*", "*" },
1049 		/*quirks*/DA_Q_4K
1050 	},
1051 	{
1052 		/* WDC Caviar Green Advanced Format (4k) drives */
1053 		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD??????RX*", "*" },
1054 		/*quirks*/DA_Q_4K
1055 	},
1056 	{
1057 		/* WDC Caviar Green Advanced Format (4k) drives */
1058 		{ T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "????RX*", "*" },
1059 		/*quirks*/DA_Q_4K
1060 	},
1061 	{
1062 		/* WDC Scorpio Black Advanced Format (4k) drives */
1063 		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD???PKT*", "*" },
1064 		/*quirks*/DA_Q_4K
1065 	},
1066 	{
1067 		/* WDC Scorpio Black Advanced Format (4k) drives */
1068 		{ T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "?PKT*", "*" },
1069 		/*quirks*/DA_Q_4K
1070 	},
1071 	{
1072 		/* WDC Scorpio Black Advanced Format (4k) drives */
1073 		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD?????PKT*", "*" },
1074 		/*quirks*/DA_Q_4K
1075 	},
1076 	{
1077 		/* WDC Scorpio Black Advanced Format (4k) drives */
1078 		{ T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "???PKT*", "*" },
1079 		/*quirks*/DA_Q_4K
1080 	},
1081 	{
1082 		/* WDC Scorpio Blue Advanced Format (4k) drives */
1083 		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD???PVT*", "*" },
1084 		/*quirks*/DA_Q_4K
1085 	},
1086 	{
1087 		/* WDC Scorpio Blue Advanced Format (4k) drives */
1088 		{ T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "?PVT*", "*" },
1089 		/*quirks*/DA_Q_4K
1090 	},
1091 	{
1092 		/* WDC Scorpio Blue Advanced Format (4k) drives */
1093 		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD?????PVT*", "*" },
1094 		/*quirks*/DA_Q_4K
1095 	},
1096 	{
1097 		/* WDC Scorpio Blue Advanced Format (4k) drives */
1098 		{ T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "???PVT*", "*" },
1099 		/*quirks*/DA_Q_4K
1100 	},
1101 	{
1102 		/*
1103 		 * Olympus digital cameras (C-3040ZOOM, C-2040ZOOM, C-1)
1104 		 * PR: usb/97472
1105 		 */
1106 		{ T_DIRECT, SIP_MEDIA_REMOVABLE, "OLYMPUS", "C*", "*"},
1107 		/*quirks*/ DA_Q_NO_6_BYTE | DA_Q_NO_SYNC_CACHE
1108 	},
1109 	{
1110 		/*
1111 		 * Olympus digital cameras (D-370)
1112 		 * PR: usb/97472
1113 		 */
1114 		{ T_DIRECT, SIP_MEDIA_REMOVABLE, "OLYMPUS", "D*", "*"},
1115 		/*quirks*/ DA_Q_NO_6_BYTE
1116 	},
1117 	{
1118 		/*
1119 		 * Olympus digital cameras (E-100RS, E-10).
1120 		 * PR: usb/97472
1121 		 */
1122 		{ T_DIRECT, SIP_MEDIA_REMOVABLE, "OLYMPUS", "E*", "*"},
1123 		/*quirks*/ DA_Q_NO_6_BYTE | DA_Q_NO_SYNC_CACHE
1124 	},
1125 	{
1126 		/*
1127 		 * Olympus FE-210 camera
1128 		 */
1129 		{T_DIRECT, SIP_MEDIA_REMOVABLE, "OLYMPUS", "FE210*",
1130 		"*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
1131 	},
1132 	{
1133 		/*
1134 		* Pentax Digital Camera
1135 		* PR: usb/93389
1136 		*/
1137 		{T_DIRECT, SIP_MEDIA_REMOVABLE, "PENTAX", "DIGITAL CAMERA",
1138 		"*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
1139 	},
1140 	{
1141 		/*
1142 		 * LG UP3S MP3 player
1143 		 */
1144 		{T_DIRECT, SIP_MEDIA_REMOVABLE, "LG", "UP3S",
1145 		"*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
1146 	},
1147 	{
1148 		/*
1149 		 * Laser MP3-2GA13 MP3 player
1150 		 */
1151 		{T_DIRECT, SIP_MEDIA_REMOVABLE, "USB 2.0", "(HS) Flash Disk",
1152 		"*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
1153 	},
1154 	{
1155 		/*
1156 		 * LaCie external 250GB Hard drive des by Porsche
1157 		 * Submitted by: Ben Stuyts <ben@altesco.nl>
1158 		 * PR: 121474
1159 		 */
1160 		{T_DIRECT, SIP_MEDIA_FIXED, "SAMSUNG", "HM250JI", "*"},
1161 		/*quirks*/ DA_Q_NO_SYNC_CACHE
1162 	},
1163 	/* SATA SSDs */
1164 	{
1165 		/*
1166 		 * Corsair Force 2 SSDs
1167 		 * 4k optimised & trim only works in 4k requests + 4k aligned
1168 		 */
1169 		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "Corsair CSSD-F*", "*" },
1170 		/*quirks*/DA_Q_4K
1171 	},
1172 	{
1173 		/*
1174 		 * Corsair Force 3 SSDs
1175 		 * 4k optimised & trim only works in 4k requests + 4k aligned
1176 		 */
1177 		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "Corsair Force 3*", "*" },
1178 		/*quirks*/DA_Q_4K
1179 	},
1180         {
1181 		/*
1182 		 * Corsair Neutron GTX SSDs
1183 		 * 4k optimised & trim only works in 4k requests + 4k aligned
1184 		 */
1185 		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "Corsair Neutron GTX*", "*" },
1186 		/*quirks*/DA_Q_4K
1187 	},
1188 	{
1189 		/*
1190 		 * Corsair Force GT & GS SSDs
1191 		 * 4k optimised & trim only works in 4k requests + 4k aligned
1192 		 */
1193 		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "Corsair Force G*", "*" },
1194 		/*quirks*/DA_Q_4K
1195 	},
1196 	{
1197 		/*
1198 		 * Crucial M4 SSDs
1199 		 * 4k optimised & trim only works in 4k requests + 4k aligned
1200 		 */
1201 		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "M4-CT???M4SSD2*", "*" },
1202 		/*quirks*/DA_Q_4K
1203 	},
1204 	{
1205 		/*
1206 		 * Crucial RealSSD C300 SSDs
1207 		 * 4k optimised
1208 		 */
1209 		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "C300-CTFDDAC???MAG*",
1210 		"*" }, /*quirks*/DA_Q_4K
1211 	},
1212 	{
1213 		/*
1214 		 * Intel 320 Series SSDs
1215 		 * 4k optimised & trim only works in 4k requests + 4k aligned
1216 		 */
1217 		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "INTEL SSDSA2CW*", "*" },
1218 		/*quirks*/DA_Q_4K
1219 	},
1220 	{
1221 		/*
1222 		 * Intel 330 Series SSDs
1223 		 * 4k optimised & trim only works in 4k requests + 4k aligned
1224 		 */
1225 		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "INTEL SSDSC2CT*", "*" },
1226 		/*quirks*/DA_Q_4K
1227 	},
1228 	{
1229 		/*
1230 		 * Intel 510 Series SSDs
1231 		 * 4k optimised & trim only works in 4k requests + 4k aligned
1232 		 */
1233 		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "INTEL SSDSC2MH*", "*" },
1234 		/*quirks*/DA_Q_4K
1235 	},
1236 	{
1237 		/*
1238 		 * Intel 520 Series SSDs
1239 		 * 4k optimised & trim only works in 4k requests + 4k aligned
1240 		 */
1241 		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "INTEL SSDSC2BW*", "*" },
1242 		/*quirks*/DA_Q_4K
1243 	},
1244 	{
1245 		/*
1246 		 * Intel S3610 Series SSDs
1247 		 * 4k optimised & trim only works in 4k requests + 4k aligned
1248 		 */
1249 		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "INTEL SSDSC2BX*", "*" },
1250 		/*quirks*/DA_Q_4K
1251 	},
1252 	{
1253 		/*
1254 		 * Intel X25-M Series SSDs
1255 		 * 4k optimised & trim only works in 4k requests + 4k aligned
1256 		 */
1257 		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "INTEL SSDSA2M*", "*" },
1258 		/*quirks*/DA_Q_4K
1259 	},
1260 	{
1261 		/*
1262 		 * Kingston E100 Series SSDs
1263 		 * 4k optimised & trim only works in 4k requests + 4k aligned
1264 		 */
1265 		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "KINGSTON SE100S3*", "*" },
1266 		/*quirks*/DA_Q_4K
1267 	},
1268 	{
1269 		/*
1270 		 * Kingston HyperX 3k SSDs
1271 		 * 4k optimised & trim only works in 4k requests + 4k aligned
1272 		 */
1273 		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "KINGSTON SH103S3*", "*" },
1274 		/*quirks*/DA_Q_4K
1275 	},
1276 	{
1277 		/*
1278 		 * Marvell SSDs (entry taken from OpenSolaris)
1279 		 * 4k optimised & trim only works in 4k requests + 4k aligned
1280 		 */
1281 		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "MARVELL SD88SA02*", "*" },
1282 		/*quirks*/DA_Q_4K
1283 	},
1284 	{
1285 		/*
1286 		 * OCZ Agility 2 SSDs
1287 		 * 4k optimised & trim only works in 4k requests + 4k aligned
1288 		 */
1289 		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "OCZ-AGILITY2*", "*" },
1290 		/*quirks*/DA_Q_4K
1291 	},
1292 	{
1293 		/*
1294 		 * OCZ Agility 3 SSDs
1295 		 * 4k optimised & trim only works in 4k requests + 4k aligned
1296 		 */
1297 		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "OCZ-AGILITY3*", "*" },
1298 		/*quirks*/DA_Q_4K
1299 	},
1300 	{
1301 		/*
1302 		 * OCZ Deneva R Series SSDs
1303 		 * 4k optimised & trim only works in 4k requests + 4k aligned
1304 		 */
1305 		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "DENRSTE251M45*", "*" },
1306 		/*quirks*/DA_Q_4K
1307 	},
1308 	{
1309 		/*
1310 		 * OCZ Vertex 2 SSDs (inc pro series)
1311 		 * 4k optimised & trim only works in 4k requests + 4k aligned
1312 		 */
1313 		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "OCZ?VERTEX2*", "*" },
1314 		/*quirks*/DA_Q_4K
1315 	},
1316 	{
1317 		/*
1318 		 * OCZ Vertex 3 SSDs
1319 		 * 4k optimised & trim only works in 4k requests + 4k aligned
1320 		 */
1321 		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "OCZ-VERTEX3*", "*" },
1322 		/*quirks*/DA_Q_4K
1323 	},
1324 	{
1325 		/*
1326 		 * OCZ Vertex 4 SSDs
1327 		 * 4k optimised & trim only works in 4k requests + 4k aligned
1328 		 */
1329 		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "OCZ-VERTEX4*", "*" },
1330 		/*quirks*/DA_Q_4K
1331 	},
1332 	{
1333 		/*
1334 		 * Samsung 750 Series SSDs
1335 		 * 4k optimised & trim only works in 4k requests + 4k aligned
1336 		 */
1337 		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "Samsung SSD 750*", "*" },
1338 		/*quirks*/DA_Q_4K
1339 	},
1340 	{
1341 		/*
1342 		 * Samsung 830 Series SSDs
1343 		 * 4k optimised & trim only works in 4k requests + 4k aligned
1344 		 */
1345 		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "SAMSUNG SSD 830 Series*", "*" },
1346 		/*quirks*/DA_Q_4K
1347 	},
1348 	{
1349 		/*
1350 		 * Samsung 840 SSDs
1351 		 * 4k optimised & trim only works in 4k requests + 4k aligned
1352 		 */
1353 		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "Samsung SSD 840*", "*" },
1354 		/*quirks*/DA_Q_4K
1355 	},
1356 	{
1357 		/*
1358 		 * Samsung 845 SSDs
1359 		 * 4k optimised & trim only works in 4k requests + 4k aligned
1360 		 */
1361 		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "Samsung SSD 845*", "*" },
1362 		/*quirks*/DA_Q_4K
1363 	},
1364 	{
1365 		/*
1366 		 * Samsung 850 SSDs
1367 		 * 4k optimised & trim only works in 4k requests + 4k aligned
1368 		 */
1369 		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "Samsung SSD 850*", "*" },
1370 		/*quirks*/DA_Q_4K
1371 	},
1372 	{
1373 		/*
1374 		 * Samsung 843T Series SSDs (MZ7WD*)
1375 		 * Samsung PM851 Series SSDs (MZ7TE*)
1376 		 * Samsung PM853T Series SSDs (MZ7GE*)
1377 		 * Samsung SM863 Series SSDs (MZ7KM*)
1378 		 * 4k optimised
1379 		 */
1380 		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "SAMSUNG MZ7*", "*" },
1381 		/*quirks*/DA_Q_4K
1382 	},
1383 	{
1384 		/*
1385 		 * Same as for SAMSUNG MZ7* but enable the quirks for SSD
1386 		 * starting with MZ7* too
1387 		 */
1388 		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "MZ7*", "*" },
1389 		/*quirks*/DA_Q_4K
1390 	},
1391 	{
1392 		/*
1393 		 * SuperTalent TeraDrive CT SSDs
1394 		 * 4k optimised & trim only works in 4k requests + 4k aligned
1395 		 */
1396 		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "FTM??CT25H*", "*" },
1397 		/*quirks*/DA_Q_4K
1398 	},
1399 	{
1400 		/*
1401 		 * XceedIOPS SATA SSDs
1402 		 * 4k optimised
1403 		 */
1404 		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "SG9XCS2D*", "*" },
1405 		/*quirks*/DA_Q_4K
1406 	},
1407 	{
1408 		/*
1409 		 * Hama Innostor USB-Stick
1410 		 */
1411 		{ T_DIRECT, SIP_MEDIA_REMOVABLE, "Innostor", "Innostor*", "*" },
1412 		/*quirks*/DA_Q_NO_RC16
1413 	},
1414 	{
1415 		/*
1416 		 * Seagate Lamarr 8TB Shingled Magnetic Recording (SMR)
1417 		 * Drive Managed SATA hard drive.  This drive doesn't report
1418 		 * in firmware that it is a drive managed SMR drive.
1419 		 */
1420 		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST8000AS000[23]*", "*" },
1421 		/*quirks*/DA_Q_SMR_DM
1422 	},
1423 	{
1424 		/*
1425 		 * MX-ES USB Drive by Mach Xtreme
1426 		 */
1427 		{ T_DIRECT, SIP_MEDIA_REMOVABLE, "MX", "MXUB3*", "*"},
1428 		/*quirks*/DA_Q_NO_RC16
1429 	},
1430 };
1431 
1432 static	disk_strategy_t	dastrategy;
1433 static	dumper_t	dadump;
1434 static	periph_init_t	dainit;
1435 static	void		daasync(void *callback_arg, u_int32_t code,
1436 				struct cam_path *path, void *arg);
1437 static	void		dasysctlinit(void *context, int pending);
1438 static	int		dasysctlsofttimeout(SYSCTL_HANDLER_ARGS);
1439 static	int		dacmdsizesysctl(SYSCTL_HANDLER_ARGS);
1440 static	int		dadeletemethodsysctl(SYSCTL_HANDLER_ARGS);
1441 static	int		dazonemodesysctl(SYSCTL_HANDLER_ARGS);
1442 static	int		dazonesupsysctl(SYSCTL_HANDLER_ARGS);
1443 static	int		dadeletemaxsysctl(SYSCTL_HANDLER_ARGS);
1444 static	void		dadeletemethodset(struct da_softc *softc,
1445 					  da_delete_methods delete_method);
1446 static	off_t		dadeletemaxsize(struct da_softc *softc,
1447 					da_delete_methods delete_method);
1448 static	void		dadeletemethodchoose(struct da_softc *softc,
1449 					     da_delete_methods default_method);
1450 static	void		daprobedone(struct cam_periph *periph, union ccb *ccb);
1451 
1452 static	periph_ctor_t	daregister;
1453 static	periph_dtor_t	dacleanup;
1454 static	periph_start_t	dastart;
1455 static	periph_oninv_t	daoninvalidate;
1456 static	void		dazonedone(struct cam_periph *periph, union ccb *ccb);
1457 static	void		dadone(struct cam_periph *periph,
1458 			       union ccb *done_ccb);
1459 static void		dadone_probewp(struct cam_periph *periph,
1460 				       union ccb *done_ccb);
1461 static void		dadone_proberc(struct cam_periph *periph,
1462 				       union ccb *done_ccb);
1463 static void		dadone_probelbp(struct cam_periph *periph,
1464 					union ccb *done_ccb);
1465 static void		dadone_probeblklimits(struct cam_periph *periph,
1466 					      union ccb *done_ccb);
1467 static void		dadone_probebdc(struct cam_periph *periph,
1468 					union ccb *done_ccb);
1469 static void		dadone_probeata(struct cam_periph *periph,
1470 					union ccb *done_ccb);
1471 static void		dadone_probeatalogdir(struct cam_periph *periph,
1472 					      union ccb *done_ccb);
1473 static void		dadone_probeataiddir(struct cam_periph *periph,
1474 					     union ccb *done_ccb);
1475 static void		dadone_probeatasup(struct cam_periph *periph,
1476 					   union ccb *done_ccb);
1477 static void		dadone_probeatazone(struct cam_periph *periph,
1478 					    union ccb *done_ccb);
1479 static void		dadone_probezone(struct cam_periph *periph,
1480 					 union ccb *done_ccb);
1481 static void		dadone_tur(struct cam_periph *periph,
1482 				   union ccb *done_ccb);
1483 static  int		daerror(union ccb *ccb, u_int32_t cam_flags,
1484 				u_int32_t sense_flags);
1485 static void		daprevent(struct cam_periph *periph, int action);
1486 static void		dareprobe(struct cam_periph *periph);
1487 static void		dasetgeom(struct cam_periph *periph, uint32_t block_len,
1488 				  uint64_t maxsector,
1489 				  struct scsi_read_capacity_data_long *rcaplong,
1490 				  size_t rcap_size);
1491 static timeout_t	dasendorderedtag;
1492 static void		dashutdown(void *arg, int howto);
1493 static timeout_t	damediapoll;
1494 
1495 #ifndef	DA_DEFAULT_POLL_PERIOD
1496 #define	DA_DEFAULT_POLL_PERIOD	3
1497 #endif
1498 
1499 #ifndef DA_DEFAULT_TIMEOUT
1500 #define DA_DEFAULT_TIMEOUT 60	/* Timeout in seconds */
1501 #endif
1502 
1503 #ifndef DA_DEFAULT_SOFTTIMEOUT
1504 #define DA_DEFAULT_SOFTTIMEOUT	0
1505 #endif
1506 
1507 #ifndef	DA_DEFAULT_RETRY
1508 #define	DA_DEFAULT_RETRY	4
1509 #endif
1510 
1511 #ifndef	DA_DEFAULT_SEND_ORDERED
1512 #define	DA_DEFAULT_SEND_ORDERED	1
1513 #endif
1514 
1515 static int da_poll_period = DA_DEFAULT_POLL_PERIOD;
1516 static int da_retry_count = DA_DEFAULT_RETRY;
1517 static int da_default_timeout = DA_DEFAULT_TIMEOUT;
1518 static sbintime_t da_default_softtimeout = DA_DEFAULT_SOFTTIMEOUT;
1519 static int da_send_ordered = DA_DEFAULT_SEND_ORDERED;
1520 static int da_disable_wp_detection = 0;
1521 
1522 static SYSCTL_NODE(_kern_cam, OID_AUTO, da, CTLFLAG_RD, 0,
1523             "CAM Direct Access Disk driver");
1524 SYSCTL_INT(_kern_cam_da, OID_AUTO, poll_period, CTLFLAG_RWTUN,
1525            &da_poll_period, 0, "Media polling period in seconds");
1526 SYSCTL_INT(_kern_cam_da, OID_AUTO, retry_count, CTLFLAG_RWTUN,
1527            &da_retry_count, 0, "Normal I/O retry count");
1528 SYSCTL_INT(_kern_cam_da, OID_AUTO, default_timeout, CTLFLAG_RWTUN,
1529            &da_default_timeout, 0, "Normal I/O timeout (in seconds)");
1530 SYSCTL_INT(_kern_cam_da, OID_AUTO, send_ordered, CTLFLAG_RWTUN,
1531            &da_send_ordered, 0, "Send Ordered Tags");
1532 SYSCTL_INT(_kern_cam_da, OID_AUTO, disable_wp_detection, CTLFLAG_RWTUN,
1533            &da_disable_wp_detection, 0,
1534 	   "Disable detection of write-protected disks");
1535 
1536 SYSCTL_PROC(_kern_cam_da, OID_AUTO, default_softtimeout,
1537     CTLTYPE_UINT | CTLFLAG_RW, NULL, 0, dasysctlsofttimeout, "I",
1538     "Soft I/O timeout (ms)");
1539 TUNABLE_INT64("kern.cam.da.default_softtimeout", &da_default_softtimeout);
1540 
1541 /*
1542  * DA_ORDEREDTAG_INTERVAL determines how often, relative
1543  * to the default timeout, we check to see whether an ordered
1544  * tagged transaction is appropriate to prevent simple tag
1545  * starvation.  Since we'd like to ensure that there is at least
1546  * 1/2 of the timeout length left for a starved transaction to
1547  * complete after we've sent an ordered tag, we must poll at least
1548  * four times in every timeout period.  This takes care of the worst
1549  * case where a starved transaction starts during an interval that
1550  * meets the requirement "don't send an ordered tag" test so it takes
1551  * us two intervals to determine that a tag must be sent.
1552  */
1553 #ifndef DA_ORDEREDTAG_INTERVAL
1554 #define DA_ORDEREDTAG_INTERVAL 4
1555 #endif
1556 
1557 static struct periph_driver dadriver =
1558 {
1559 	dainit, "da",
1560 	TAILQ_HEAD_INITIALIZER(dadriver.units), /* generation */ 0
1561 };
1562 
1563 PERIPHDRIVER_DECLARE(da, dadriver);
1564 
1565 static MALLOC_DEFINE(M_SCSIDA, "scsi_da", "scsi_da buffers");
1566 
1567 /*
1568  * This driver takes out references / holds in well defined pairs, never
1569  * recursively. These macros / inline functions enforce those rules. They
1570  * are only enabled with DA_TRACK_REFS or INVARIANTS. If DA_TRACK_REFS is
1571  * defined to be 2 or larger, the tracking also includes debug printfs.
1572  */
1573 #if defined(DA_TRACK_REFS) || defined(INVARIANTS)
1574 
1575 #ifndef DA_TRACK_REFS
1576 #define DA_TRACK_REFS 1
1577 #endif
1578 
1579 #if DA_TRACK_REFS > 1
1580 static const char *da_ref_text[] = {
1581 	"bogus",
1582 	"open",
1583 	"open hold",
1584 	"close hold",
1585 	"reprobe hold",
1586 	"Test Unit Ready",
1587 	"Geom",
1588 	"sysctl",
1589 	"reprobe",
1590 	"max -- also bogus"
1591 };
1592 
1593 #define DA_PERIPH_PRINT(periph, msg, args...)		\
1594 	CAM_PERIPH_PRINT(periph, msg, ##args)
1595 #else
1596 #define DA_PERIPH_PRINT(periph, msg, args...)
1597 #endif
1598 
1599 static inline void
1600 token_sanity(da_ref_token token)
1601 {
1602 	if ((unsigned)token >= DA_REF_MAX)
1603 		panic("Bad token value passed in %d\n", token);
1604 }
1605 
1606 static inline int
1607 da_periph_hold(struct cam_periph *periph, int priority, da_ref_token token)
1608 {
1609 	int err = cam_periph_hold(periph, priority);
1610 
1611 	token_sanity(token);
1612 	DA_PERIPH_PRINT(periph, "Holding device %s (%d): %d\n",
1613 	    da_ref_text[token], token, err);
1614 	if (err == 0) {
1615 		int cnt;
1616 		struct da_softc *softc = periph->softc;
1617 
1618 		cnt = atomic_fetchadd_int(&softc->ref_flags[token], 1);
1619 		if (cnt != 0)
1620 			panic("Re-holding for reason %d, cnt = %d", token, cnt);
1621 	}
1622 	return (err);
1623 }
1624 
1625 static inline void
1626 da_periph_unhold(struct cam_periph *periph, da_ref_token token)
1627 {
1628 	int cnt;
1629 	struct da_softc *softc = periph->softc;
1630 
1631 	token_sanity(token);
1632 	DA_PERIPH_PRINT(periph, "Unholding device %s (%d)\n",
1633 	    da_ref_text[token], token);
1634 	cnt = atomic_fetchadd_int(&softc->ref_flags[token], -1);
1635 	if (cnt != 1)
1636 		panic("Unholding %d with cnt = %d", token, cnt);
1637 	cam_periph_unhold(periph);
1638 }
1639 
1640 static inline int
1641 da_periph_acquire(struct cam_periph *periph, da_ref_token token)
1642 {
1643 	int err = cam_periph_acquire(periph);
1644 
1645 	token_sanity(token);
1646 	DA_PERIPH_PRINT(periph, "acquiring device %s (%d): %d\n",
1647 	    da_ref_text[token], token, err);
1648 	if (err == 0) {
1649 		int cnt;
1650 		struct da_softc *softc = periph->softc;
1651 
1652 		cnt = atomic_fetchadd_int(&softc->ref_flags[token], 1);
1653 		if (cnt != 0)
1654 			panic("Re-refing for reason %d, cnt = %d", token, cnt);
1655 	}
1656 	return (err);
1657 }
1658 
1659 static inline void
1660 da_periph_release(struct cam_periph *periph, da_ref_token token)
1661 {
1662 	int cnt;
1663 	struct da_softc *softc = periph->softc;
1664 
1665 	token_sanity(token);
1666 	DA_PERIPH_PRINT(periph, "releasing device %s (%d)\n",
1667 	    da_ref_text[token], token);
1668 	cnt = atomic_fetchadd_int(&softc->ref_flags[token], -1);
1669 	if (cnt != 1)
1670 		panic("Releasing %d with cnt = %d", token, cnt);
1671 	cam_periph_release(periph);
1672 }
1673 
1674 static inline void
1675 da_periph_release_locked(struct cam_periph *periph, da_ref_token token)
1676 {
1677 	int cnt;
1678 	struct da_softc *softc = periph->softc;
1679 
1680 	token_sanity(token);
1681 	DA_PERIPH_PRINT(periph, "releasing device (locked) %s (%d)\n",
1682 	    da_ref_text[token], token);
1683 	cnt = atomic_fetchadd_int(&softc->ref_flags[token], -1);
1684 	if (cnt != 1)
1685 		panic("Unholding %d with cnt = %d", token, cnt);
1686 	cam_periph_release_locked(periph);
1687 }
1688 
1689 #define cam_periph_hold POISON
1690 #define cam_periph_unhold POISON
1691 #define cam_periph_acquire POISON
1692 #define cam_periph_release POISON
1693 #define cam_periph_release_locked POISON
1694 
1695 #else
1696 #define	da_periph_hold(periph, prio, token)	cam_periph_hold((periph), (prio))
1697 #define da_periph_unhold(periph, token)		cam_periph_unhold((periph))
1698 #define da_periph_acquire(periph, token)	cam_periph_acquire((periph))
1699 #define da_periph_release(periph, token)	cam_periph_release((periph))
1700 #define da_periph_release_locked(periph, token)	cam_periph_release_locked((periph))
1701 #endif
1702 
1703 static int
1704 daopen(struct disk *dp)
1705 {
1706 	struct cam_periph *periph;
1707 	struct da_softc *softc;
1708 	int error;
1709 
1710 	periph = (struct cam_periph *)dp->d_drv1;
1711 	if (da_periph_acquire(periph, DA_REF_OPEN) != 0) {
1712 		return (ENXIO);
1713 	}
1714 
1715 	cam_periph_lock(periph);
1716 	if ((error = da_periph_hold(periph, PRIBIO|PCATCH, DA_REF_OPEN_HOLD)) != 0) {
1717 		cam_periph_unlock(periph);
1718 		da_periph_release(periph, DA_REF_OPEN);
1719 		return (error);
1720 	}
1721 
1722 	CAM_DEBUG(periph->path, CAM_DEBUG_TRACE | CAM_DEBUG_PERIPH,
1723 	    ("daopen\n"));
1724 
1725 	softc = (struct da_softc *)periph->softc;
1726 	dareprobe(periph);
1727 
1728 	/* Wait for the disk size update.  */
1729 	error = cam_periph_sleep(periph, &softc->disk->d_mediasize, PRIBIO,
1730 	    "dareprobe", 0);
1731 	if (error != 0)
1732 		xpt_print(periph->path, "unable to retrieve capacity data\n");
1733 
1734 	if (periph->flags & CAM_PERIPH_INVALID)
1735 		error = ENXIO;
1736 
1737 	if (error == 0 && (softc->flags & DA_FLAG_PACK_REMOVABLE) != 0 &&
1738 	    (softc->quirks & DA_Q_NO_PREVENT) == 0)
1739 		daprevent(periph, PR_PREVENT);
1740 
1741 	if (error == 0) {
1742 		softc->flags &= ~DA_FLAG_PACK_INVALID;
1743 		softc->flags |= DA_FLAG_OPEN;
1744 	}
1745 
1746 	da_periph_unhold(periph, DA_REF_OPEN_HOLD);
1747 	cam_periph_unlock(periph);
1748 
1749 	if (error != 0)
1750 		da_periph_release(periph, DA_REF_OPEN);
1751 
1752 	return (error);
1753 }
1754 
1755 static int
1756 daclose(struct disk *dp)
1757 {
1758 	struct	cam_periph *periph;
1759 	struct	da_softc *softc;
1760 	union	ccb *ccb;
1761 
1762 	periph = (struct cam_periph *)dp->d_drv1;
1763 	softc = (struct da_softc *)periph->softc;
1764 	cam_periph_lock(periph);
1765 	CAM_DEBUG(periph->path, CAM_DEBUG_TRACE | CAM_DEBUG_PERIPH,
1766 	    ("daclose\n"));
1767 
1768 	if (da_periph_hold(periph, PRIBIO, DA_REF_CLOSE_HOLD) == 0) {
1769 
1770 		/* Flush disk cache. */
1771 		if ((softc->flags & DA_FLAG_DIRTY) != 0 &&
1772 		    (softc->quirks & DA_Q_NO_SYNC_CACHE) == 0 &&
1773 		    (softc->flags & DA_FLAG_PACK_INVALID) == 0) {
1774 			ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL);
1775 			scsi_synchronize_cache(&ccb->csio, /*retries*/1,
1776 			    /*cbfcnp*/NULL, MSG_SIMPLE_Q_TAG,
1777 			    /*begin_lba*/0, /*lb_count*/0, SSD_FULL_SIZE,
1778 			    5 * 60 * 1000);
1779 			cam_periph_runccb(ccb, daerror, /*cam_flags*/0,
1780 			    /*sense_flags*/SF_RETRY_UA | SF_QUIET_IR,
1781 			    softc->disk->d_devstat);
1782 			softc->flags &= ~DA_FLAG_DIRTY;
1783 			xpt_release_ccb(ccb);
1784 		}
1785 
1786 		/* Allow medium removal. */
1787 		if ((softc->flags & DA_FLAG_PACK_REMOVABLE) != 0 &&
1788 		    (softc->quirks & DA_Q_NO_PREVENT) == 0)
1789 			daprevent(periph, PR_ALLOW);
1790 
1791 		da_periph_unhold(periph, DA_REF_CLOSE_HOLD);
1792 	}
1793 
1794 	/*
1795 	 * If we've got removeable media, mark the blocksize as
1796 	 * unavailable, since it could change when new media is
1797 	 * inserted.
1798 	 */
1799 	if ((softc->flags & DA_FLAG_PACK_REMOVABLE) != 0)
1800 		softc->disk->d_devstat->flags |= DEVSTAT_BS_UNAVAILABLE;
1801 
1802 	softc->flags &= ~DA_FLAG_OPEN;
1803 	while (softc->refcount != 0)
1804 		cam_periph_sleep(periph, &softc->refcount, PRIBIO, "daclose", 1);
1805 	cam_periph_unlock(periph);
1806 	da_periph_release(periph, DA_REF_OPEN);
1807 	return (0);
1808 }
1809 
1810 static void
1811 daschedule(struct cam_periph *periph)
1812 {
1813 	struct da_softc *softc = (struct da_softc *)periph->softc;
1814 
1815 	if (softc->state != DA_STATE_NORMAL)
1816 		return;
1817 
1818 	cam_iosched_schedule(softc->cam_iosched, periph);
1819 }
1820 
1821 /*
1822  * Actually translate the requested transfer into one the physical driver
1823  * can understand.  The transfer is described by a buf and will include
1824  * only one physical transfer.
1825  */
1826 static void
1827 dastrategy(struct bio *bp)
1828 {
1829 	struct cam_periph *periph;
1830 	struct da_softc *softc;
1831 
1832 	periph = (struct cam_periph *)bp->bio_disk->d_drv1;
1833 	softc = (struct da_softc *)periph->softc;
1834 
1835 	cam_periph_lock(periph);
1836 
1837 	/*
1838 	 * If the device has been made invalid, error out
1839 	 */
1840 	if ((softc->flags & DA_FLAG_PACK_INVALID)) {
1841 		cam_periph_unlock(periph);
1842 		biofinish(bp, NULL, ENXIO);
1843 		return;
1844 	}
1845 
1846 	CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dastrategy(%p)\n", bp));
1847 
1848 	/*
1849 	 * Zone commands must be ordered, because they can depend on the
1850 	 * effects of previously issued commands, and they may affect
1851 	 * commands after them.
1852 	 */
1853 	if (bp->bio_cmd == BIO_ZONE)
1854 		bp->bio_flags |= BIO_ORDERED;
1855 
1856 	/*
1857 	 * Place it in the queue of disk activities for this disk
1858 	 */
1859 	cam_iosched_queue_work(softc->cam_iosched, bp);
1860 
1861 	/*
1862 	 * Schedule ourselves for performing the work.
1863 	 */
1864 	daschedule(periph);
1865 	cam_periph_unlock(periph);
1866 
1867 	return;
1868 }
1869 
1870 static int
1871 dadump(void *arg, void *virtual, vm_offset_t physical, off_t offset, size_t length)
1872 {
1873 	struct	    cam_periph *periph;
1874 	struct	    da_softc *softc;
1875 	u_int	    secsize;
1876 	struct	    ccb_scsiio csio;
1877 	struct	    disk *dp;
1878 	int	    error = 0;
1879 
1880 	dp = arg;
1881 	periph = dp->d_drv1;
1882 	softc = (struct da_softc *)periph->softc;
1883 	secsize = softc->params.secsize;
1884 
1885 	if ((softc->flags & DA_FLAG_PACK_INVALID) != 0)
1886 		return (ENXIO);
1887 
1888 	memset(&csio, 0, sizeof(csio));
1889 	if (length > 0) {
1890 		xpt_setup_ccb(&csio.ccb_h, periph->path, CAM_PRIORITY_NORMAL);
1891 		csio.ccb_h.ccb_state = DA_CCB_DUMP;
1892 		scsi_read_write(&csio,
1893 				/*retries*/0,
1894 				/*cbfcnp*/NULL,
1895 				MSG_ORDERED_Q_TAG,
1896 				/*read*/SCSI_RW_WRITE,
1897 				/*byte2*/0,
1898 				/*minimum_cmd_size*/ softc->minimum_cmd_size,
1899 				offset / secsize,
1900 				length / secsize,
1901 				/*data_ptr*/(u_int8_t *) virtual,
1902 				/*dxfer_len*/length,
1903 				/*sense_len*/SSD_FULL_SIZE,
1904 				da_default_timeout * 1000);
1905 		error = cam_periph_runccb((union ccb *)&csio, cam_periph_error,
1906 		    0, SF_NO_RECOVERY | SF_NO_RETRY, NULL);
1907 		if (error != 0)
1908 			printf("Aborting dump due to I/O error.\n");
1909 		return (error);
1910 	}
1911 
1912 	/*
1913 	 * Sync the disk cache contents to the physical media.
1914 	 */
1915 	if ((softc->quirks & DA_Q_NO_SYNC_CACHE) == 0) {
1916 
1917 		xpt_setup_ccb(&csio.ccb_h, periph->path, CAM_PRIORITY_NORMAL);
1918 		csio.ccb_h.ccb_state = DA_CCB_DUMP;
1919 		scsi_synchronize_cache(&csio,
1920 				       /*retries*/0,
1921 				       /*cbfcnp*/NULL,
1922 				       MSG_SIMPLE_Q_TAG,
1923 				       /*begin_lba*/0,/* Cover the whole disk */
1924 				       /*lb_count*/0,
1925 				       SSD_FULL_SIZE,
1926 				       5 * 1000);
1927 		error = cam_periph_runccb((union ccb *)&csio, cam_periph_error,
1928 		    0, SF_NO_RECOVERY | SF_NO_RETRY, NULL);
1929 		if (error != 0)
1930 			xpt_print(periph->path, "Synchronize cache failed\n");
1931 	}
1932 	return (error);
1933 }
1934 
1935 static int
1936 dagetattr(struct bio *bp)
1937 {
1938 	int ret;
1939 	struct cam_periph *periph;
1940 
1941 	periph = (struct cam_periph *)bp->bio_disk->d_drv1;
1942 	cam_periph_lock(periph);
1943 	ret = xpt_getattr(bp->bio_data, bp->bio_length, bp->bio_attribute,
1944 	    periph->path);
1945 	cam_periph_unlock(periph);
1946 	if (ret == 0)
1947 		bp->bio_completed = bp->bio_length;
1948 	return ret;
1949 }
1950 
1951 static void
1952 dainit(void)
1953 {
1954 	cam_status status;
1955 
1956 	/*
1957 	 * Install a global async callback.  This callback will
1958 	 * receive async callbacks like "new device found".
1959 	 */
1960 	status = xpt_register_async(AC_FOUND_DEVICE, daasync, NULL, NULL);
1961 
1962 	if (status != CAM_REQ_CMP) {
1963 		printf("da: Failed to attach master async callback "
1964 		       "due to status 0x%x!\n", status);
1965 	} else if (da_send_ordered) {
1966 
1967 		/* Register our shutdown event handler */
1968 		if ((EVENTHANDLER_REGISTER(shutdown_post_sync, dashutdown,
1969 					   NULL, SHUTDOWN_PRI_DEFAULT)) == NULL)
1970 		    printf("dainit: shutdown event registration failed!\n");
1971 	}
1972 }
1973 
1974 /*
1975  * Callback from GEOM, called when it has finished cleaning up its
1976  * resources.
1977  */
1978 static void
1979 dadiskgonecb(struct disk *dp)
1980 {
1981 	struct cam_periph *periph;
1982 
1983 	periph = (struct cam_periph *)dp->d_drv1;
1984 	da_periph_release(periph, DA_REF_GEOM);
1985 }
1986 
1987 static void
1988 daoninvalidate(struct cam_periph *periph)
1989 {
1990 	struct da_softc *softc;
1991 
1992 	cam_periph_assert(periph, MA_OWNED);
1993 	softc = (struct da_softc *)periph->softc;
1994 
1995 	/*
1996 	 * De-register any async callbacks.
1997 	 */
1998 	xpt_register_async(0, daasync, periph, periph->path);
1999 
2000 	softc->flags |= DA_FLAG_PACK_INVALID;
2001 #ifdef CAM_IO_STATS
2002 	softc->invalidations++;
2003 #endif
2004 
2005 	/*
2006 	 * Return all queued I/O with ENXIO.
2007 	 * XXX Handle any transactions queued to the card
2008 	 *     with XPT_ABORT_CCB.
2009 	 */
2010 	cam_iosched_flush(softc->cam_iosched, NULL, ENXIO);
2011 
2012 	/*
2013 	 * Tell GEOM that we've gone away, we'll get a callback when it is
2014 	 * done cleaning up its resources.
2015 	 */
2016 	disk_gone(softc->disk);
2017 }
2018 
2019 static void
2020 dacleanup(struct cam_periph *periph)
2021 {
2022 	struct da_softc *softc;
2023 
2024 	softc = (struct da_softc *)periph->softc;
2025 
2026 	cam_periph_unlock(periph);
2027 
2028 	cam_iosched_fini(softc->cam_iosched);
2029 
2030 	/*
2031 	 * If we can't free the sysctl tree, oh well...
2032 	 */
2033 	if ((softc->flags & DA_FLAG_SCTX_INIT) != 0) {
2034 #ifdef CAM_IO_STATS
2035 		if (sysctl_ctx_free(&softc->sysctl_stats_ctx) != 0)
2036 			xpt_print(periph->path,
2037 			    "can't remove sysctl stats context\n");
2038 #endif
2039 		if (sysctl_ctx_free(&softc->sysctl_ctx) != 0)
2040 			xpt_print(periph->path,
2041 			    "can't remove sysctl context\n");
2042 	}
2043 
2044 	callout_drain(&softc->mediapoll_c);
2045 	disk_destroy(softc->disk);
2046 	callout_drain(&softc->sendordered_c);
2047 	free(softc, M_DEVBUF);
2048 	cam_periph_lock(periph);
2049 }
2050 
2051 static void
2052 daasync(void *callback_arg, u_int32_t code,
2053 	struct cam_path *path, void *arg)
2054 {
2055 	struct cam_periph *periph;
2056 	struct da_softc *softc;
2057 
2058 	periph = (struct cam_periph *)callback_arg;
2059 	switch (code) {
2060 	case AC_FOUND_DEVICE:	/* callback to create periph, no locking yet */
2061 	{
2062 		struct ccb_getdev *cgd;
2063 		cam_status status;
2064 
2065 		cgd = (struct ccb_getdev *)arg;
2066 		if (cgd == NULL)
2067 			break;
2068 
2069 		if (cgd->protocol != PROTO_SCSI)
2070 			break;
2071 		if (SID_QUAL(&cgd->inq_data) != SID_QUAL_LU_CONNECTED)
2072 			break;
2073 		if (SID_TYPE(&cgd->inq_data) != T_DIRECT
2074 		    && SID_TYPE(&cgd->inq_data) != T_RBC
2075 		    && SID_TYPE(&cgd->inq_data) != T_OPTICAL
2076 		    && SID_TYPE(&cgd->inq_data) != T_ZBC_HM)
2077 			break;
2078 
2079 		/*
2080 		 * Allocate a peripheral instance for
2081 		 * this device and start the probe
2082 		 * process.
2083 		 */
2084 		status = cam_periph_alloc(daregister, daoninvalidate,
2085 					  dacleanup, dastart,
2086 					  "da", CAM_PERIPH_BIO,
2087 					  path, daasync,
2088 					  AC_FOUND_DEVICE, cgd);
2089 
2090 		if (status != CAM_REQ_CMP
2091 		 && status != CAM_REQ_INPROG)
2092 			printf("daasync: Unable to attach to new device "
2093 				"due to status 0x%x\n", status);
2094 		return;
2095 	}
2096 	case AC_ADVINFO_CHANGED:	/* Doesn't touch periph */
2097 	{
2098 		uintptr_t buftype;
2099 
2100 		buftype = (uintptr_t)arg;
2101 		if (buftype == CDAI_TYPE_PHYS_PATH) {
2102 			struct da_softc *softc;
2103 
2104 			softc = periph->softc;
2105 			disk_attr_changed(softc->disk, "GEOM::physpath",
2106 					  M_NOWAIT);
2107 		}
2108 		break;
2109 	}
2110 	case AC_UNIT_ATTENTION:
2111 	{
2112 		union ccb *ccb;
2113 		int error_code, sense_key, asc, ascq;
2114 
2115 		softc = (struct da_softc *)periph->softc;
2116 		ccb = (union ccb *)arg;
2117 
2118 		/*
2119 		 * Handle all UNIT ATTENTIONs except our own, as they will be
2120 		 * handled by daerror(). Since this comes from a different periph,
2121 		 * that periph's lock is held, not ours, so we have to take it ours
2122 		 * out to touch softc flags.
2123 		 */
2124 		if (xpt_path_periph(ccb->ccb_h.path) != periph &&
2125 		    scsi_extract_sense_ccb(ccb,
2126 		     &error_code, &sense_key, &asc, &ascq)) {
2127 			if (asc == 0x2A && ascq == 0x09) {
2128 				xpt_print(ccb->ccb_h.path,
2129 				    "Capacity data has changed\n");
2130 				cam_periph_lock(periph);
2131 				softc->flags &= ~DA_FLAG_PROBED;
2132 				cam_periph_unlock(periph);
2133 				dareprobe(periph);
2134 			} else if (asc == 0x28 && ascq == 0x00) {
2135 				cam_periph_lock(periph);
2136 				softc->flags &= ~DA_FLAG_PROBED;
2137 				cam_periph_unlock(periph);
2138 				disk_media_changed(softc->disk, M_NOWAIT);
2139 			} else if (asc == 0x3F && ascq == 0x03) {
2140 				xpt_print(ccb->ccb_h.path,
2141 				    "INQUIRY data has changed\n");
2142 				cam_periph_lock(periph);
2143 				softc->flags &= ~DA_FLAG_PROBED;
2144 				cam_periph_unlock(periph);
2145 				dareprobe(periph);
2146 			}
2147 		}
2148 		break;
2149 	}
2150 	case AC_SCSI_AEN:		/* Called for this path: periph locked */
2151 		/*
2152 		 * Appears to be currently unused for SCSI devices, only ata SIMs
2153 		 * generate this.
2154 		 */
2155 		cam_periph_assert(periph, MA_OWNED);
2156 		softc = (struct da_softc *)periph->softc;
2157 		if (!cam_iosched_has_work_flags(softc->cam_iosched, DA_WORK_TUR) &&
2158 		    (softc->flags & DA_FLAG_TUR_PENDING) == 0) {
2159 			if (da_periph_acquire(periph, DA_REF_TUR) == 0) {
2160 				cam_iosched_set_work_flags(softc->cam_iosched, DA_WORK_TUR);
2161 				daschedule(periph);
2162 			}
2163 		}
2164 		/* FALLTHROUGH */
2165 	case AC_SENT_BDR:		/* Called for this path: periph locked */
2166 	case AC_BUS_RESET:		/* Called for this path: periph locked */
2167 	{
2168 		struct ccb_hdr *ccbh;
2169 
2170 		cam_periph_assert(periph, MA_OWNED);
2171 		softc = (struct da_softc *)periph->softc;
2172 		/*
2173 		 * Don't fail on the expected unit attention
2174 		 * that will occur.
2175 		 */
2176 		softc->flags |= DA_FLAG_RETRY_UA;
2177 		LIST_FOREACH(ccbh, &softc->pending_ccbs, periph_links.le)
2178 			ccbh->ccb_state |= DA_CCB_RETRY_UA;
2179 		break;
2180 	}
2181 	case AC_INQ_CHANGED:		/* Called for this path: periph locked */
2182 		cam_periph_assert(periph, MA_OWNED);
2183 		softc = (struct da_softc *)periph->softc;
2184 		softc->flags &= ~DA_FLAG_PROBED;
2185 		dareprobe(periph);
2186 		break;
2187 	default:
2188 		break;
2189 	}
2190 	cam_periph_async(periph, code, path, arg);
2191 }
2192 
2193 static void
2194 dasysctlinit(void *context, int pending)
2195 {
2196 	struct cam_periph *periph;
2197 	struct da_softc *softc;
2198 	char tmpstr[32], tmpstr2[16];
2199 	struct ccb_trans_settings cts;
2200 
2201 	periph = (struct cam_periph *)context;
2202 	/*
2203 	 * periph was held for us when this task was enqueued
2204 	 */
2205 	if (periph->flags & CAM_PERIPH_INVALID) {
2206 		da_periph_release(periph, DA_REF_SYSCTL);
2207 		return;
2208 	}
2209 
2210 	softc = (struct da_softc *)periph->softc;
2211 	snprintf(tmpstr, sizeof(tmpstr), "CAM DA unit %d", periph->unit_number);
2212 	snprintf(tmpstr2, sizeof(tmpstr2), "%d", periph->unit_number);
2213 
2214 	sysctl_ctx_init(&softc->sysctl_ctx);
2215 	cam_periph_lock(periph);
2216 	softc->flags |= DA_FLAG_SCTX_INIT;
2217 	cam_periph_unlock(periph);
2218 	softc->sysctl_tree = SYSCTL_ADD_NODE_WITH_LABEL(&softc->sysctl_ctx,
2219 		SYSCTL_STATIC_CHILDREN(_kern_cam_da), OID_AUTO, tmpstr2,
2220 		CTLFLAG_RD, 0, tmpstr, "device_index");
2221 	if (softc->sysctl_tree == NULL) {
2222 		printf("dasysctlinit: unable to allocate sysctl tree\n");
2223 		da_periph_release(periph, DA_REF_SYSCTL);
2224 		return;
2225 	}
2226 
2227 	/*
2228 	 * Now register the sysctl handler, so the user can change the value on
2229 	 * the fly.
2230 	 */
2231 	SYSCTL_ADD_PROC(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
2232 		OID_AUTO, "delete_method", CTLTYPE_STRING | CTLFLAG_RWTUN,
2233 		softc, 0, dadeletemethodsysctl, "A",
2234 		"BIO_DELETE execution method");
2235 	SYSCTL_ADD_PROC(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
2236 		OID_AUTO, "delete_max", CTLTYPE_U64 | CTLFLAG_RW,
2237 		softc, 0, dadeletemaxsysctl, "Q",
2238 		"Maximum BIO_DELETE size");
2239 	SYSCTL_ADD_PROC(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
2240 		OID_AUTO, "minimum_cmd_size", CTLTYPE_INT | CTLFLAG_RW,
2241 		&softc->minimum_cmd_size, 0, dacmdsizesysctl, "I",
2242 		"Minimum CDB size");
2243 	SYSCTL_ADD_UQUAD(&softc->sysctl_ctx,
2244 		SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO,
2245 		"trim_count", CTLFLAG_RD, &softc->trim_count,
2246 		"Total number of unmap/dsm commands sent");
2247 	SYSCTL_ADD_UQUAD(&softc->sysctl_ctx,
2248 		SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO,
2249 		"trim_ranges", CTLFLAG_RD, &softc->trim_ranges,
2250 		"Total number of ranges in unmap/dsm commands");
2251 	SYSCTL_ADD_UQUAD(&softc->sysctl_ctx,
2252 		SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO,
2253 		"trim_lbas", CTLFLAG_RD, &softc->trim_lbas,
2254 		"Total lbas in the unmap/dsm commands sent");
2255 
2256 	SYSCTL_ADD_PROC(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
2257 		OID_AUTO, "zone_mode", CTLTYPE_STRING | CTLFLAG_RD,
2258 		softc, 0, dazonemodesysctl, "A",
2259 		"Zone Mode");
2260 	SYSCTL_ADD_PROC(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
2261 		OID_AUTO, "zone_support", CTLTYPE_STRING | CTLFLAG_RD,
2262 		softc, 0, dazonesupsysctl, "A",
2263 		"Zone Support");
2264 	SYSCTL_ADD_UQUAD(&softc->sysctl_ctx,
2265 		SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO,
2266 		"optimal_seq_zones", CTLFLAG_RD, &softc->optimal_seq_zones,
2267 		"Optimal Number of Open Sequential Write Preferred Zones");
2268 	SYSCTL_ADD_UQUAD(&softc->sysctl_ctx,
2269 		SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO,
2270 		"optimal_nonseq_zones", CTLFLAG_RD,
2271 		&softc->optimal_nonseq_zones,
2272 		"Optimal Number of Non-Sequentially Written Sequential Write "
2273 		"Preferred Zones");
2274 	SYSCTL_ADD_UQUAD(&softc->sysctl_ctx,
2275 		SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO,
2276 		"max_seq_zones", CTLFLAG_RD, &softc->max_seq_zones,
2277 		"Maximum Number of Open Sequential Write Required Zones");
2278 
2279 	SYSCTL_ADD_INT(&softc->sysctl_ctx,
2280 		       SYSCTL_CHILDREN(softc->sysctl_tree),
2281 		       OID_AUTO,
2282 		       "error_inject",
2283 		       CTLFLAG_RW,
2284 		       &softc->error_inject,
2285 		       0,
2286 		       "error_inject leaf");
2287 
2288 	SYSCTL_ADD_INT(&softc->sysctl_ctx,
2289 		       SYSCTL_CHILDREN(softc->sysctl_tree),
2290 		       OID_AUTO,
2291 		       "unmapped_io",
2292 		       CTLFLAG_RD,
2293 		       &softc->unmappedio,
2294 		       0,
2295 		       "Unmapped I/O leaf");
2296 
2297 	SYSCTL_ADD_INT(&softc->sysctl_ctx,
2298 		       SYSCTL_CHILDREN(softc->sysctl_tree),
2299 		       OID_AUTO,
2300 		       "rotating",
2301 		       CTLFLAG_RD,
2302 		       &softc->rotating,
2303 		       0,
2304 		       "Rotating media");
2305 
2306 #ifdef CAM_TEST_FAILURE
2307 	SYSCTL_ADD_PROC(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
2308 		OID_AUTO, "invalidate", CTLTYPE_U64 | CTLFLAG_RW | CTLFLAG_MPSAFE,
2309 		periph, 0, cam_periph_invalidate_sysctl, "I",
2310 		"Write 1 to invalidate the drive immediately");
2311 #endif
2312 
2313 	/*
2314 	 * Add some addressing info.
2315 	 */
2316 	memset(&cts, 0, sizeof (cts));
2317 	xpt_setup_ccb(&cts.ccb_h, periph->path, CAM_PRIORITY_NONE);
2318 	cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS;
2319 	cts.type = CTS_TYPE_CURRENT_SETTINGS;
2320 	cam_periph_lock(periph);
2321 	xpt_action((union ccb *)&cts);
2322 	cam_periph_unlock(periph);
2323 	if (cts.ccb_h.status != CAM_REQ_CMP) {
2324 		da_periph_release(periph, DA_REF_SYSCTL);
2325 		return;
2326 	}
2327 	if (cts.protocol == PROTO_SCSI && cts.transport == XPORT_FC) {
2328 		struct ccb_trans_settings_fc *fc = &cts.xport_specific.fc;
2329 		if (fc->valid & CTS_FC_VALID_WWPN) {
2330 			softc->wwpn = fc->wwpn;
2331 			SYSCTL_ADD_UQUAD(&softc->sysctl_ctx,
2332 			    SYSCTL_CHILDREN(softc->sysctl_tree),
2333 			    OID_AUTO, "wwpn", CTLFLAG_RD,
2334 			    &softc->wwpn, "World Wide Port Name");
2335 		}
2336 	}
2337 
2338 #ifdef CAM_IO_STATS
2339 	/*
2340 	 * Now add some useful stats.
2341 	 * XXX These should live in cam_periph and be common to all periphs
2342 	 */
2343 	softc->sysctl_stats_tree = SYSCTL_ADD_NODE(&softc->sysctl_stats_ctx,
2344 	    SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "stats",
2345 	    CTLFLAG_RD, 0, "Statistics");
2346 	SYSCTL_ADD_INT(&softc->sysctl_stats_ctx,
2347 		       SYSCTL_CHILDREN(softc->sysctl_stats_tree),
2348 		       OID_AUTO,
2349 		       "errors",
2350 		       CTLFLAG_RD,
2351 		       &softc->errors,
2352 		       0,
2353 		       "Transport errors reported by the SIM");
2354 	SYSCTL_ADD_INT(&softc->sysctl_stats_ctx,
2355 		       SYSCTL_CHILDREN(softc->sysctl_stats_tree),
2356 		       OID_AUTO,
2357 		       "timeouts",
2358 		       CTLFLAG_RD,
2359 		       &softc->timeouts,
2360 		       0,
2361 		       "Device timeouts reported by the SIM");
2362 	SYSCTL_ADD_INT(&softc->sysctl_stats_ctx,
2363 		       SYSCTL_CHILDREN(softc->sysctl_stats_tree),
2364 		       OID_AUTO,
2365 		       "pack_invalidations",
2366 		       CTLFLAG_RD,
2367 		       &softc->invalidations,
2368 		       0,
2369 		       "Device pack invalidations");
2370 #endif
2371 
2372 	cam_iosched_sysctl_init(softc->cam_iosched, &softc->sysctl_ctx,
2373 	    softc->sysctl_tree);
2374 
2375 	da_periph_release(periph, DA_REF_SYSCTL);
2376 }
2377 
2378 static int
2379 dadeletemaxsysctl(SYSCTL_HANDLER_ARGS)
2380 {
2381 	int error;
2382 	uint64_t value;
2383 	struct da_softc *softc;
2384 
2385 	softc = (struct da_softc *)arg1;
2386 
2387 	value = softc->disk->d_delmaxsize;
2388 	error = sysctl_handle_64(oidp, &value, 0, req);
2389 	if ((error != 0) || (req->newptr == NULL))
2390 		return (error);
2391 
2392 	/* only accept values smaller than the calculated value */
2393 	if (value > dadeletemaxsize(softc, softc->delete_method)) {
2394 		return (EINVAL);
2395 	}
2396 	softc->disk->d_delmaxsize = value;
2397 
2398 	return (0);
2399 }
2400 
2401 static int
2402 dacmdsizesysctl(SYSCTL_HANDLER_ARGS)
2403 {
2404 	int error, value;
2405 
2406 	value = *(int *)arg1;
2407 
2408 	error = sysctl_handle_int(oidp, &value, 0, req);
2409 
2410 	if ((error != 0)
2411 	 || (req->newptr == NULL))
2412 		return (error);
2413 
2414 	/*
2415 	 * Acceptable values here are 6, 10, 12 or 16.
2416 	 */
2417 	if (value < 6)
2418 		value = 6;
2419 	else if ((value > 6)
2420 	      && (value <= 10))
2421 		value = 10;
2422 	else if ((value > 10)
2423 	      && (value <= 12))
2424 		value = 12;
2425 	else if (value > 12)
2426 		value = 16;
2427 
2428 	*(int *)arg1 = value;
2429 
2430 	return (0);
2431 }
2432 
2433 static int
2434 dasysctlsofttimeout(SYSCTL_HANDLER_ARGS)
2435 {
2436 	sbintime_t value;
2437 	int error;
2438 
2439 	value = da_default_softtimeout / SBT_1MS;
2440 
2441 	error = sysctl_handle_int(oidp, (int *)&value, 0, req);
2442 	if ((error != 0) || (req->newptr == NULL))
2443 		return (error);
2444 
2445 	/* XXX Should clip this to a reasonable level */
2446 	if (value > da_default_timeout * 1000)
2447 		return (EINVAL);
2448 
2449 	da_default_softtimeout = value * SBT_1MS;
2450 	return (0);
2451 }
2452 
2453 static void
2454 dadeletemethodset(struct da_softc *softc, da_delete_methods delete_method)
2455 {
2456 
2457 	softc->delete_method = delete_method;
2458 	softc->disk->d_delmaxsize = dadeletemaxsize(softc, delete_method);
2459 	softc->delete_func = da_delete_functions[delete_method];
2460 
2461 	if (softc->delete_method > DA_DELETE_DISABLE)
2462 		softc->disk->d_flags |= DISKFLAG_CANDELETE;
2463 	else
2464 		softc->disk->d_flags &= ~DISKFLAG_CANDELETE;
2465 }
2466 
2467 static off_t
2468 dadeletemaxsize(struct da_softc *softc, da_delete_methods delete_method)
2469 {
2470 	off_t sectors;
2471 
2472 	switch(delete_method) {
2473 	case DA_DELETE_UNMAP:
2474 		sectors = (off_t)softc->unmap_max_lba;
2475 		break;
2476 	case DA_DELETE_ATA_TRIM:
2477 		sectors = (off_t)ATA_DSM_RANGE_MAX * softc->trim_max_ranges;
2478 		break;
2479 	case DA_DELETE_WS16:
2480 		sectors = omin(softc->ws_max_blks, WS16_MAX_BLKS);
2481 		break;
2482 	case DA_DELETE_ZERO:
2483 	case DA_DELETE_WS10:
2484 		sectors = omin(softc->ws_max_blks, WS10_MAX_BLKS);
2485 		break;
2486 	default:
2487 		return 0;
2488 	}
2489 
2490 	return (off_t)softc->params.secsize *
2491 	    omin(sectors, softc->params.sectors);
2492 }
2493 
2494 static void
2495 daprobedone(struct cam_periph *periph, union ccb *ccb)
2496 {
2497 	struct da_softc *softc;
2498 
2499 	softc = (struct da_softc *)periph->softc;
2500 
2501 	cam_periph_assert(periph, MA_OWNED);
2502 
2503 	dadeletemethodchoose(softc, DA_DELETE_NONE);
2504 
2505 	if (bootverbose && (softc->flags & DA_FLAG_ANNOUNCED) == 0) {
2506 		char buf[80];
2507 		int i, sep;
2508 
2509 		snprintf(buf, sizeof(buf), "Delete methods: <");
2510 		sep = 0;
2511 		for (i = 0; i <= DA_DELETE_MAX; i++) {
2512 			if ((softc->delete_available & (1 << i)) == 0 &&
2513 			    i != softc->delete_method)
2514 				continue;
2515 			if (sep)
2516 				strlcat(buf, ",", sizeof(buf));
2517 			strlcat(buf, da_delete_method_names[i],
2518 			    sizeof(buf));
2519 			if (i == softc->delete_method)
2520 				strlcat(buf, "(*)", sizeof(buf));
2521 			sep = 1;
2522 		}
2523 		strlcat(buf, ">", sizeof(buf));
2524 		printf("%s%d: %s\n", periph->periph_name,
2525 		    periph->unit_number, buf);
2526 	}
2527 	if ((softc->disk->d_flags & DISKFLAG_WRITE_PROTECT) != 0 &&
2528 	    (softc->flags & DA_FLAG_ANNOUNCED) == 0) {
2529 		printf("%s%d: Write Protected\n", periph->periph_name,
2530 		    periph->unit_number);
2531 	}
2532 
2533 	/*
2534 	 * Since our peripheral may be invalidated by an error
2535 	 * above or an external event, we must release our CCB
2536 	 * before releasing the probe lock on the peripheral.
2537 	 * The peripheral will only go away once the last lock
2538 	 * is removed, and we need it around for the CCB release
2539 	 * operation.
2540 	 */
2541 	xpt_release_ccb(ccb);
2542 	softc->state = DA_STATE_NORMAL;
2543 	softc->flags |= DA_FLAG_PROBED;
2544 	daschedule(periph);
2545 	wakeup(&softc->disk->d_mediasize);
2546 	if ((softc->flags & DA_FLAG_ANNOUNCED) == 0) {
2547 		softc->flags |= DA_FLAG_ANNOUNCED;
2548 		da_periph_unhold(periph, DA_REF_PROBE_HOLD);
2549 	} else
2550 		da_periph_release_locked(periph, DA_REF_REPROBE);
2551 }
2552 
2553 static void
2554 dadeletemethodchoose(struct da_softc *softc, da_delete_methods default_method)
2555 {
2556 	int i, methods;
2557 
2558 	/* If available, prefer the method requested by user. */
2559 	i = softc->delete_method_pref;
2560 	methods = softc->delete_available | (1 << DA_DELETE_DISABLE);
2561 	if (methods & (1 << i)) {
2562 		dadeletemethodset(softc, i);
2563 		return;
2564 	}
2565 
2566 	/* Use the pre-defined order to choose the best performing delete. */
2567 	for (i = DA_DELETE_MIN; i <= DA_DELETE_MAX; i++) {
2568 		if (i == DA_DELETE_ZERO)
2569 			continue;
2570 		if (softc->delete_available & (1 << i)) {
2571 			dadeletemethodset(softc, i);
2572 			return;
2573 		}
2574 	}
2575 
2576 	/* Fallback to default. */
2577 	dadeletemethodset(softc, default_method);
2578 }
2579 
2580 static int
2581 dadeletemethodsysctl(SYSCTL_HANDLER_ARGS)
2582 {
2583 	char buf[16];
2584 	const char *p;
2585 	struct da_softc *softc;
2586 	int i, error, value;
2587 
2588 	softc = (struct da_softc *)arg1;
2589 
2590 	value = softc->delete_method;
2591 	if (value < 0 || value > DA_DELETE_MAX)
2592 		p = "UNKNOWN";
2593 	else
2594 		p = da_delete_method_names[value];
2595 	strncpy(buf, p, sizeof(buf));
2596 	error = sysctl_handle_string(oidp, buf, sizeof(buf), req);
2597 	if (error != 0 || req->newptr == NULL)
2598 		return (error);
2599 	for (i = 0; i <= DA_DELETE_MAX; i++) {
2600 		if (strcmp(buf, da_delete_method_names[i]) == 0)
2601 			break;
2602 	}
2603 	if (i > DA_DELETE_MAX)
2604 		return (EINVAL);
2605 	softc->delete_method_pref = i;
2606 	dadeletemethodchoose(softc, DA_DELETE_NONE);
2607 	return (0);
2608 }
2609 
2610 static int
2611 dazonemodesysctl(SYSCTL_HANDLER_ARGS)
2612 {
2613 	char tmpbuf[40];
2614 	struct da_softc *softc;
2615 	int error;
2616 
2617 	softc = (struct da_softc *)arg1;
2618 
2619 	switch (softc->zone_mode) {
2620 	case DA_ZONE_DRIVE_MANAGED:
2621 		snprintf(tmpbuf, sizeof(tmpbuf), "Drive Managed");
2622 		break;
2623 	case DA_ZONE_HOST_AWARE:
2624 		snprintf(tmpbuf, sizeof(tmpbuf), "Host Aware");
2625 		break;
2626 	case DA_ZONE_HOST_MANAGED:
2627 		snprintf(tmpbuf, sizeof(tmpbuf), "Host Managed");
2628 		break;
2629 	case DA_ZONE_NONE:
2630 	default:
2631 		snprintf(tmpbuf, sizeof(tmpbuf), "Not Zoned");
2632 		break;
2633 	}
2634 
2635 	error = sysctl_handle_string(oidp, tmpbuf, sizeof(tmpbuf), req);
2636 
2637 	return (error);
2638 }
2639 
2640 static int
2641 dazonesupsysctl(SYSCTL_HANDLER_ARGS)
2642 {
2643 	char tmpbuf[180];
2644 	struct da_softc *softc;
2645 	struct sbuf sb;
2646 	int error, first;
2647 	unsigned int i;
2648 
2649 	softc = (struct da_softc *)arg1;
2650 
2651 	error = 0;
2652 	first = 1;
2653 	sbuf_new(&sb, tmpbuf, sizeof(tmpbuf), 0);
2654 
2655 	for (i = 0; i < sizeof(da_zone_desc_table) /
2656 	     sizeof(da_zone_desc_table[0]); i++) {
2657 		if (softc->zone_flags & da_zone_desc_table[i].value) {
2658 			if (first == 0)
2659 				sbuf_printf(&sb, ", ");
2660 			else
2661 				first = 0;
2662 			sbuf_cat(&sb, da_zone_desc_table[i].desc);
2663 		}
2664 	}
2665 
2666 	if (first == 1)
2667 		sbuf_printf(&sb, "None");
2668 
2669 	sbuf_finish(&sb);
2670 
2671 	error = sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req);
2672 
2673 	return (error);
2674 }
2675 
2676 static cam_status
2677 daregister(struct cam_periph *periph, void *arg)
2678 {
2679 	struct da_softc *softc;
2680 	struct ccb_pathinq cpi;
2681 	struct ccb_getdev *cgd;
2682 	char tmpstr[80];
2683 	caddr_t match;
2684 
2685 	cgd = (struct ccb_getdev *)arg;
2686 	if (cgd == NULL) {
2687 		printf("daregister: no getdev CCB, can't register device\n");
2688 		return(CAM_REQ_CMP_ERR);
2689 	}
2690 
2691 	softc = (struct da_softc *)malloc(sizeof(*softc), M_DEVBUF,
2692 	    M_NOWAIT|M_ZERO);
2693 
2694 	if (softc == NULL) {
2695 		printf("daregister: Unable to probe new device. "
2696 		       "Unable to allocate softc\n");
2697 		return(CAM_REQ_CMP_ERR);
2698 	}
2699 
2700 	if (cam_iosched_init(&softc->cam_iosched, periph) != 0) {
2701 		printf("daregister: Unable to probe new device. "
2702 		       "Unable to allocate iosched memory\n");
2703 		free(softc, M_DEVBUF);
2704 		return(CAM_REQ_CMP_ERR);
2705 	}
2706 
2707 	LIST_INIT(&softc->pending_ccbs);
2708 	softc->state = DA_STATE_PROBE_WP;
2709 	bioq_init(&softc->delete_run_queue);
2710 	if (SID_IS_REMOVABLE(&cgd->inq_data))
2711 		softc->flags |= DA_FLAG_PACK_REMOVABLE;
2712 	softc->unmap_max_ranges = UNMAP_MAX_RANGES;
2713 	softc->unmap_max_lba = UNMAP_RANGE_MAX;
2714 	softc->unmap_gran = 0;
2715 	softc->unmap_gran_align = 0;
2716 	softc->ws_max_blks = WS16_MAX_BLKS;
2717 	softc->trim_max_ranges = ATA_TRIM_MAX_RANGES;
2718 	softc->rotating = 1;
2719 
2720 	periph->softc = softc;
2721 
2722 	/*
2723 	 * See if this device has any quirks.
2724 	 */
2725 	match = cam_quirkmatch((caddr_t)&cgd->inq_data,
2726 			       (caddr_t)da_quirk_table,
2727 			       nitems(da_quirk_table),
2728 			       sizeof(*da_quirk_table), scsi_inquiry_match);
2729 
2730 	if (match != NULL)
2731 		softc->quirks = ((struct da_quirk_entry *)match)->quirks;
2732 	else
2733 		softc->quirks = DA_Q_NONE;
2734 
2735 	/* Check if the SIM does not want 6 byte commands */
2736 	xpt_path_inq(&cpi, periph->path);
2737 	if (cpi.ccb_h.status == CAM_REQ_CMP && (cpi.hba_misc & PIM_NO_6_BYTE))
2738 		softc->quirks |= DA_Q_NO_6_BYTE;
2739 
2740 	if (SID_TYPE(&cgd->inq_data) == T_ZBC_HM)
2741 		softc->zone_mode = DA_ZONE_HOST_MANAGED;
2742 	else if (softc->quirks & DA_Q_SMR_DM)
2743 		softc->zone_mode = DA_ZONE_DRIVE_MANAGED;
2744 	else
2745 		softc->zone_mode = DA_ZONE_NONE;
2746 
2747 	if (softc->zone_mode != DA_ZONE_NONE) {
2748 		if (scsi_vpd_supported_page(periph, SVPD_ATA_INFORMATION)) {
2749 			if (scsi_vpd_supported_page(periph, SVPD_ZONED_BDC))
2750 				softc->zone_interface = DA_ZONE_IF_ATA_SAT;
2751 			else
2752 				softc->zone_interface = DA_ZONE_IF_ATA_PASS;
2753 		} else
2754 			softc->zone_interface = DA_ZONE_IF_SCSI;
2755 	}
2756 
2757 	TASK_INIT(&softc->sysctl_task, 0, dasysctlinit, periph);
2758 
2759 	/*
2760 	 * Take an exclusive section lock qon the periph while dastart is called
2761 	 * to finish the probe.  The lock will be dropped in dadone at the end
2762 	 * of probe. This locks out daopen and daclose from racing with the
2763 	 * probe.
2764 	 *
2765 	 * XXX if cam_periph_hold returns an error, we don't hold a refcount.
2766 	 */
2767 	(void)da_periph_hold(periph, PRIBIO, DA_REF_PROBE_HOLD);
2768 
2769 	/*
2770 	 * Schedule a periodic event to occasionally send an
2771 	 * ordered tag to a device.
2772 	 */
2773 	callout_init_mtx(&softc->sendordered_c, cam_periph_mtx(periph), 0);
2774 	callout_reset(&softc->sendordered_c,
2775 	    (da_default_timeout * hz) / DA_ORDEREDTAG_INTERVAL,
2776 	    dasendorderedtag, periph);
2777 
2778 	cam_periph_unlock(periph);
2779 	/*
2780 	 * RBC devices don't have to support READ(6), only READ(10).
2781 	 */
2782 	if (softc->quirks & DA_Q_NO_6_BYTE || SID_TYPE(&cgd->inq_data) == T_RBC)
2783 		softc->minimum_cmd_size = 10;
2784 	else
2785 		softc->minimum_cmd_size = 6;
2786 
2787 	/*
2788 	 * Load the user's default, if any.
2789 	 */
2790 	snprintf(tmpstr, sizeof(tmpstr), "kern.cam.da.%d.minimum_cmd_size",
2791 		 periph->unit_number);
2792 	TUNABLE_INT_FETCH(tmpstr, &softc->minimum_cmd_size);
2793 
2794 	/*
2795 	 * 6, 10, 12 and 16 are the currently permissible values.
2796 	 */
2797 	if (softc->minimum_cmd_size > 12)
2798 		softc->minimum_cmd_size = 16;
2799 	else if (softc->minimum_cmd_size > 10)
2800 		softc->minimum_cmd_size = 12;
2801 	else if (softc->minimum_cmd_size > 6)
2802 		softc->minimum_cmd_size = 10;
2803 	else
2804 		softc->minimum_cmd_size = 6;
2805 
2806 	/* Predict whether device may support READ CAPACITY(16). */
2807 	if (SID_ANSI_REV(&cgd->inq_data) >= SCSI_REV_SPC3 &&
2808 	    (softc->quirks & DA_Q_NO_RC16) == 0) {
2809 		softc->flags |= DA_FLAG_CAN_RC16;
2810 	}
2811 
2812 	/*
2813 	 * Register this media as a disk.
2814 	 */
2815 	softc->disk = disk_alloc();
2816 	softc->disk->d_devstat = devstat_new_entry(periph->periph_name,
2817 			  periph->unit_number, 0,
2818 			  DEVSTAT_BS_UNAVAILABLE,
2819 			  SID_TYPE(&cgd->inq_data) |
2820 			  XPORT_DEVSTAT_TYPE(cpi.transport),
2821 			  DEVSTAT_PRIORITY_DISK);
2822 	softc->disk->d_open = daopen;
2823 	softc->disk->d_close = daclose;
2824 	softc->disk->d_strategy = dastrategy;
2825 	softc->disk->d_dump = dadump;
2826 	softc->disk->d_getattr = dagetattr;
2827 	softc->disk->d_gone = dadiskgonecb;
2828 	softc->disk->d_name = "da";
2829 	softc->disk->d_drv1 = periph;
2830 	if (cpi.maxio == 0)
2831 		softc->maxio = DFLTPHYS;	/* traditional default */
2832 	else if (cpi.maxio > MAXPHYS)
2833 		softc->maxio = MAXPHYS;		/* for safety */
2834 	else
2835 		softc->maxio = cpi.maxio;
2836 	if (softc->quirks & DA_Q_128KB)
2837 		softc->maxio = min(softc->maxio, 128 * 1024);
2838 	softc->disk->d_maxsize = softc->maxio;
2839 	softc->disk->d_unit = periph->unit_number;
2840 	softc->disk->d_flags = DISKFLAG_DIRECT_COMPLETION | DISKFLAG_CANZONE;
2841 	if ((softc->quirks & DA_Q_NO_SYNC_CACHE) == 0)
2842 		softc->disk->d_flags |= DISKFLAG_CANFLUSHCACHE;
2843 	if ((cpi.hba_misc & PIM_UNMAPPED) != 0) {
2844 		softc->unmappedio = 1;
2845 		softc->disk->d_flags |= DISKFLAG_UNMAPPED_BIO;
2846 	}
2847 	cam_strvis(softc->disk->d_descr, cgd->inq_data.vendor,
2848 	    sizeof(cgd->inq_data.vendor), sizeof(softc->disk->d_descr));
2849 	strlcat(softc->disk->d_descr, " ", sizeof(softc->disk->d_descr));
2850 	cam_strvis(&softc->disk->d_descr[strlen(softc->disk->d_descr)],
2851 	    cgd->inq_data.product, sizeof(cgd->inq_data.product),
2852 	    sizeof(softc->disk->d_descr) - strlen(softc->disk->d_descr));
2853 	softc->disk->d_hba_vendor = cpi.hba_vendor;
2854 	softc->disk->d_hba_device = cpi.hba_device;
2855 	softc->disk->d_hba_subvendor = cpi.hba_subvendor;
2856 	softc->disk->d_hba_subdevice = cpi.hba_subdevice;
2857 
2858 	/*
2859 	 * Acquire a reference to the periph before we register with GEOM.
2860 	 * We'll release this reference once GEOM calls us back (via
2861 	 * dadiskgonecb()) telling us that our provider has been freed.
2862 	 */
2863 	if (da_periph_acquire(periph, DA_REF_GEOM) != 0) {
2864 		xpt_print(periph->path, "%s: lost periph during "
2865 			  "registration!\n", __func__);
2866 		cam_periph_lock(periph);
2867 		return (CAM_REQ_CMP_ERR);
2868 	}
2869 
2870 	disk_create(softc->disk, DISK_VERSION);
2871 	cam_periph_lock(periph);
2872 
2873 	/*
2874 	 * Add async callbacks for events of interest.
2875 	 * I don't bother checking if this fails as,
2876 	 * in most cases, the system will function just
2877 	 * fine without them and the only alternative
2878 	 * would be to not attach the device on failure.
2879 	 */
2880 	xpt_register_async(AC_SENT_BDR | AC_BUS_RESET | AC_LOST_DEVICE |
2881 	    AC_ADVINFO_CHANGED | AC_SCSI_AEN | AC_UNIT_ATTENTION |
2882 	    AC_INQ_CHANGED, daasync, periph, periph->path);
2883 
2884 	/*
2885 	 * Emit an attribute changed notification just in case
2886 	 * physical path information arrived before our async
2887 	 * event handler was registered, but after anyone attaching
2888 	 * to our disk device polled it.
2889 	 */
2890 	disk_attr_changed(softc->disk, "GEOM::physpath", M_NOWAIT);
2891 
2892 	/*
2893 	 * Schedule a periodic media polling events.
2894 	 */
2895 	callout_init_mtx(&softc->mediapoll_c, cam_periph_mtx(periph), 0);
2896 	if ((softc->flags & DA_FLAG_PACK_REMOVABLE) &&
2897 	    (cgd->inq_flags & SID_AEN) == 0 &&
2898 	    da_poll_period != 0)
2899 		callout_reset(&softc->mediapoll_c, da_poll_period * hz,
2900 		    damediapoll, periph);
2901 
2902 	xpt_schedule(periph, CAM_PRIORITY_DEV);
2903 
2904 	return(CAM_REQ_CMP);
2905 }
2906 
2907 static int
2908 da_zone_bio_to_scsi(int disk_zone_cmd)
2909 {
2910 	switch (disk_zone_cmd) {
2911 	case DISK_ZONE_OPEN:
2912 		return ZBC_OUT_SA_OPEN;
2913 	case DISK_ZONE_CLOSE:
2914 		return ZBC_OUT_SA_CLOSE;
2915 	case DISK_ZONE_FINISH:
2916 		return ZBC_OUT_SA_FINISH;
2917 	case DISK_ZONE_RWP:
2918 		return ZBC_OUT_SA_RWP;
2919 	}
2920 
2921 	return -1;
2922 }
2923 
2924 static int
2925 da_zone_cmd(struct cam_periph *periph, union ccb *ccb, struct bio *bp,
2926 	    int *queue_ccb)
2927 {
2928 	struct da_softc *softc;
2929 	int error;
2930 
2931 	error = 0;
2932 
2933 	if (bp->bio_cmd != BIO_ZONE) {
2934 		error = EINVAL;
2935 		goto bailout;
2936 	}
2937 
2938 	softc = periph->softc;
2939 
2940 	switch (bp->bio_zone.zone_cmd) {
2941 	case DISK_ZONE_OPEN:
2942 	case DISK_ZONE_CLOSE:
2943 	case DISK_ZONE_FINISH:
2944 	case DISK_ZONE_RWP: {
2945 		int zone_flags;
2946 		int zone_sa;
2947 		uint64_t lba;
2948 
2949 		zone_sa = da_zone_bio_to_scsi(bp->bio_zone.zone_cmd);
2950 		if (zone_sa == -1) {
2951 			xpt_print(periph->path, "Cannot translate zone "
2952 			    "cmd %#x to SCSI\n", bp->bio_zone.zone_cmd);
2953 			error = EINVAL;
2954 			goto bailout;
2955 		}
2956 
2957 		zone_flags = 0;
2958 		lba = bp->bio_zone.zone_params.rwp.id;
2959 
2960 		if (bp->bio_zone.zone_params.rwp.flags &
2961 		    DISK_ZONE_RWP_FLAG_ALL)
2962 			zone_flags |= ZBC_OUT_ALL;
2963 
2964 		if (softc->zone_interface != DA_ZONE_IF_ATA_PASS) {
2965 			scsi_zbc_out(&ccb->csio,
2966 				     /*retries*/ da_retry_count,
2967 				     /*cbfcnp*/ dadone,
2968 				     /*tag_action*/ MSG_SIMPLE_Q_TAG,
2969 				     /*service_action*/ zone_sa,
2970 				     /*zone_id*/ lba,
2971 				     /*zone_flags*/ zone_flags,
2972 				     /*data_ptr*/ NULL,
2973 				     /*dxfer_len*/ 0,
2974 				     /*sense_len*/ SSD_FULL_SIZE,
2975 				     /*timeout*/ da_default_timeout * 1000);
2976 		} else {
2977 			/*
2978 			 * Note that in this case, even though we can
2979 			 * technically use NCQ, we don't bother for several
2980 			 * reasons:
2981 			 * 1. It hasn't been tested on a SAT layer that
2982 			 *    supports it.  This is new as of SAT-4.
2983 			 * 2. Even when there is a SAT layer that supports
2984 			 *    it, that SAT layer will also probably support
2985 			 *    ZBC -> ZAC translation, since they are both
2986 			 *    in the SAT-4 spec.
2987 			 * 3. Translation will likely be preferable to ATA
2988 			 *    passthrough.  LSI / Avago at least single
2989 			 *    steps ATA passthrough commands in the HBA,
2990 			 *    regardless of protocol, so unless that
2991 			 *    changes, there is a performance penalty for
2992 			 *    doing ATA passthrough no matter whether
2993 			 *    you're using NCQ/FPDMA, DMA or PIO.
2994 			 * 4. It requires a 32-byte CDB, which at least at
2995 			 *    this point in CAM requires a CDB pointer, which
2996 			 *    would require us to allocate an additional bit
2997 			 *    of storage separate from the CCB.
2998 			 */
2999 			error = scsi_ata_zac_mgmt_out(&ccb->csio,
3000 			    /*retries*/ da_retry_count,
3001 			    /*cbfcnp*/ dadone,
3002 			    /*tag_action*/ MSG_SIMPLE_Q_TAG,
3003 			    /*use_ncq*/ 0,
3004 			    /*zm_action*/ zone_sa,
3005 			    /*zone_id*/ lba,
3006 			    /*zone_flags*/ zone_flags,
3007 			    /*data_ptr*/ NULL,
3008 			    /*dxfer_len*/ 0,
3009 			    /*cdb_storage*/ NULL,
3010 			    /*cdb_storage_len*/ 0,
3011 			    /*sense_len*/ SSD_FULL_SIZE,
3012 			    /*timeout*/ da_default_timeout * 1000);
3013 			if (error != 0) {
3014 				error = EINVAL;
3015 				xpt_print(periph->path,
3016 				    "scsi_ata_zac_mgmt_out() returned an "
3017 				    "error!");
3018 				goto bailout;
3019 			}
3020 		}
3021 		*queue_ccb = 1;
3022 
3023 		break;
3024 	}
3025 	case DISK_ZONE_REPORT_ZONES: {
3026 		uint8_t *rz_ptr;
3027 		uint32_t num_entries, alloc_size;
3028 		struct disk_zone_report *rep;
3029 
3030 		rep = &bp->bio_zone.zone_params.report;
3031 
3032 		num_entries = rep->entries_allocated;
3033 		if (num_entries == 0) {
3034 			xpt_print(periph->path, "No entries allocated for "
3035 			    "Report Zones request\n");
3036 			error = EINVAL;
3037 			goto bailout;
3038 		}
3039 		alloc_size = sizeof(struct scsi_report_zones_hdr) +
3040 		    (sizeof(struct scsi_report_zones_desc) * num_entries);
3041 		alloc_size = min(alloc_size, softc->disk->d_maxsize);
3042 		rz_ptr = malloc(alloc_size, M_SCSIDA, M_NOWAIT | M_ZERO);
3043 		if (rz_ptr == NULL) {
3044 			xpt_print(periph->path, "Unable to allocate memory "
3045 			   "for Report Zones request\n");
3046 			error = ENOMEM;
3047 			goto bailout;
3048 		}
3049 
3050 		if (softc->zone_interface != DA_ZONE_IF_ATA_PASS) {
3051 			scsi_zbc_in(&ccb->csio,
3052 				    /*retries*/ da_retry_count,
3053 				    /*cbcfnp*/ dadone,
3054 				    /*tag_action*/ MSG_SIMPLE_Q_TAG,
3055 				    /*service_action*/ ZBC_IN_SA_REPORT_ZONES,
3056 				    /*zone_start_lba*/ rep->starting_id,
3057 				    /*zone_options*/ rep->rep_options,
3058 				    /*data_ptr*/ rz_ptr,
3059 				    /*dxfer_len*/ alloc_size,
3060 				    /*sense_len*/ SSD_FULL_SIZE,
3061 				    /*timeout*/ da_default_timeout * 1000);
3062 		} else {
3063 			/*
3064 			 * Note that in this case, even though we can
3065 			 * technically use NCQ, we don't bother for several
3066 			 * reasons:
3067 			 * 1. It hasn't been tested on a SAT layer that
3068 			 *    supports it.  This is new as of SAT-4.
3069 			 * 2. Even when there is a SAT layer that supports
3070 			 *    it, that SAT layer will also probably support
3071 			 *    ZBC -> ZAC translation, since they are both
3072 			 *    in the SAT-4 spec.
3073 			 * 3. Translation will likely be preferable to ATA
3074 			 *    passthrough.  LSI / Avago at least single
3075 			 *    steps ATA passthrough commands in the HBA,
3076 			 *    regardless of protocol, so unless that
3077 			 *    changes, there is a performance penalty for
3078 			 *    doing ATA passthrough no matter whether
3079 			 *    you're using NCQ/FPDMA, DMA or PIO.
3080 			 * 4. It requires a 32-byte CDB, which at least at
3081 			 *    this point in CAM requires a CDB pointer, which
3082 			 *    would require us to allocate an additional bit
3083 			 *    of storage separate from the CCB.
3084 			 */
3085 			error = scsi_ata_zac_mgmt_in(&ccb->csio,
3086 			    /*retries*/ da_retry_count,
3087 			    /*cbcfnp*/ dadone,
3088 			    /*tag_action*/ MSG_SIMPLE_Q_TAG,
3089 			    /*use_ncq*/ 0,
3090 			    /*zm_action*/ ATA_ZM_REPORT_ZONES,
3091 			    /*zone_id*/ rep->starting_id,
3092 			    /*zone_flags*/ rep->rep_options,
3093 			    /*data_ptr*/ rz_ptr,
3094 			    /*dxfer_len*/ alloc_size,
3095 			    /*cdb_storage*/ NULL,
3096 			    /*cdb_storage_len*/ 0,
3097 			    /*sense_len*/ SSD_FULL_SIZE,
3098 			    /*timeout*/ da_default_timeout * 1000);
3099 			if (error != 0) {
3100 				error = EINVAL;
3101 				xpt_print(periph->path,
3102 				    "scsi_ata_zac_mgmt_in() returned an "
3103 				    "error!");
3104 				goto bailout;
3105 			}
3106 		}
3107 
3108 		/*
3109 		 * For BIO_ZONE, this isn't normally needed.  However, it
3110 		 * is used by devstat_end_transaction_bio() to determine
3111 		 * how much data was transferred.
3112 		 */
3113 		/*
3114 		 * XXX KDM we have a problem.  But I'm not sure how to fix
3115 		 * it.  devstat uses bio_bcount - bio_resid to calculate
3116 		 * the amount of data transferred.   The GEOM disk code
3117 		 * uses bio_length - bio_resid to calculate the amount of
3118 		 * data in bio_completed.  We have different structure
3119 		 * sizes above and below the ada(4) driver.  So, if we
3120 		 * use the sizes above, the amount transferred won't be
3121 		 * quite accurate for devstat.  If we use different sizes
3122 		 * for bio_bcount and bio_length (above and below
3123 		 * respectively), then the residual needs to match one or
3124 		 * the other.  Everything is calculated after the bio
3125 		 * leaves the driver, so changing the values around isn't
3126 		 * really an option.  For now, just set the count to the
3127 		 * passed in length.  This means that the calculations
3128 		 * above (e.g. bio_completed) will be correct, but the
3129 		 * amount of data reported to devstat will be slightly
3130 		 * under or overstated.
3131 		 */
3132 		bp->bio_bcount = bp->bio_length;
3133 
3134 		*queue_ccb = 1;
3135 
3136 		break;
3137 	}
3138 	case DISK_ZONE_GET_PARAMS: {
3139 		struct disk_zone_disk_params *params;
3140 
3141 		params = &bp->bio_zone.zone_params.disk_params;
3142 		bzero(params, sizeof(*params));
3143 
3144 		switch (softc->zone_mode) {
3145 		case DA_ZONE_DRIVE_MANAGED:
3146 			params->zone_mode = DISK_ZONE_MODE_DRIVE_MANAGED;
3147 			break;
3148 		case DA_ZONE_HOST_AWARE:
3149 			params->zone_mode = DISK_ZONE_MODE_HOST_AWARE;
3150 			break;
3151 		case DA_ZONE_HOST_MANAGED:
3152 			params->zone_mode = DISK_ZONE_MODE_HOST_MANAGED;
3153 			break;
3154 		default:
3155 		case DA_ZONE_NONE:
3156 			params->zone_mode = DISK_ZONE_MODE_NONE;
3157 			break;
3158 		}
3159 
3160 		if (softc->zone_flags & DA_ZONE_FLAG_URSWRZ)
3161 			params->flags |= DISK_ZONE_DISK_URSWRZ;
3162 
3163 		if (softc->zone_flags & DA_ZONE_FLAG_OPT_SEQ_SET) {
3164 			params->optimal_seq_zones = softc->optimal_seq_zones;
3165 			params->flags |= DISK_ZONE_OPT_SEQ_SET;
3166 		}
3167 
3168 		if (softc->zone_flags & DA_ZONE_FLAG_OPT_NONSEQ_SET) {
3169 			params->optimal_nonseq_zones =
3170 			    softc->optimal_nonseq_zones;
3171 			params->flags |= DISK_ZONE_OPT_NONSEQ_SET;
3172 		}
3173 
3174 		if (softc->zone_flags & DA_ZONE_FLAG_MAX_SEQ_SET) {
3175 			params->max_seq_zones = softc->max_seq_zones;
3176 			params->flags |= DISK_ZONE_MAX_SEQ_SET;
3177 		}
3178 		if (softc->zone_flags & DA_ZONE_FLAG_RZ_SUP)
3179 			params->flags |= DISK_ZONE_RZ_SUP;
3180 
3181 		if (softc->zone_flags & DA_ZONE_FLAG_OPEN_SUP)
3182 			params->flags |= DISK_ZONE_OPEN_SUP;
3183 
3184 		if (softc->zone_flags & DA_ZONE_FLAG_CLOSE_SUP)
3185 			params->flags |= DISK_ZONE_CLOSE_SUP;
3186 
3187 		if (softc->zone_flags & DA_ZONE_FLAG_FINISH_SUP)
3188 			params->flags |= DISK_ZONE_FINISH_SUP;
3189 
3190 		if (softc->zone_flags & DA_ZONE_FLAG_RWP_SUP)
3191 			params->flags |= DISK_ZONE_RWP_SUP;
3192 		break;
3193 	}
3194 	default:
3195 		break;
3196 	}
3197 bailout:
3198 	return (error);
3199 }
3200 
3201 static void
3202 dastart(struct cam_periph *periph, union ccb *start_ccb)
3203 {
3204 	struct da_softc *softc;
3205 
3206 	cam_periph_assert(periph, MA_OWNED);
3207 	softc = (struct da_softc *)periph->softc;
3208 
3209 	CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dastart\n"));
3210 
3211 skipstate:
3212 	switch (softc->state) {
3213 	case DA_STATE_NORMAL:
3214 	{
3215 		struct bio *bp;
3216 		uint8_t tag_code;
3217 
3218 more:
3219 		bp = cam_iosched_next_bio(softc->cam_iosched);
3220 		if (bp == NULL) {
3221 			if (cam_iosched_has_work_flags(softc->cam_iosched,
3222 			    DA_WORK_TUR)) {
3223 				softc->flags |= DA_FLAG_TUR_PENDING;
3224 				cam_iosched_clr_work_flags(softc->cam_iosched,
3225 				    DA_WORK_TUR);
3226 				scsi_test_unit_ready(&start_ccb->csio,
3227 				     /*retries*/ da_retry_count,
3228 				     dadone_tur,
3229 				     MSG_SIMPLE_Q_TAG,
3230 				     SSD_FULL_SIZE,
3231 				     da_default_timeout * 1000);
3232 				start_ccb->ccb_h.ccb_bp = NULL;
3233 				start_ccb->ccb_h.ccb_state = DA_CCB_TUR;
3234 				xpt_action(start_ccb);
3235 			} else
3236 				xpt_release_ccb(start_ccb);
3237 			break;
3238 		}
3239 
3240 		if (bp->bio_cmd == BIO_DELETE) {
3241 			if (softc->delete_func != NULL) {
3242 				softc->delete_func(periph, start_ccb, bp);
3243 				goto out;
3244 			} else {
3245 				/*
3246 				 * Not sure this is possible, but failsafe by
3247 				 * lying and saying "sure, done."
3248 				 */
3249 				biofinish(bp, NULL, 0);
3250 				goto more;
3251 			}
3252 		}
3253 
3254 		if (cam_iosched_has_work_flags(softc->cam_iosched,
3255 		    DA_WORK_TUR)) {
3256 			cam_iosched_clr_work_flags(softc->cam_iosched,
3257 			    DA_WORK_TUR);
3258 			da_periph_release_locked(periph, DA_REF_TUR);
3259 		}
3260 
3261 		if ((bp->bio_flags & BIO_ORDERED) != 0 ||
3262 		    (softc->flags & DA_FLAG_NEED_OTAG) != 0) {
3263 			softc->flags &= ~DA_FLAG_NEED_OTAG;
3264 			softc->flags |= DA_FLAG_WAS_OTAG;
3265 			tag_code = MSG_ORDERED_Q_TAG;
3266 		} else {
3267 			tag_code = MSG_SIMPLE_Q_TAG;
3268 		}
3269 
3270 		switch (bp->bio_cmd) {
3271 		case BIO_WRITE:
3272 		case BIO_READ:
3273 		{
3274 			void *data_ptr;
3275 			int rw_op;
3276 
3277 			biotrack(bp, __func__);
3278 
3279 			if (bp->bio_cmd == BIO_WRITE) {
3280 				softc->flags |= DA_FLAG_DIRTY;
3281 				rw_op = SCSI_RW_WRITE;
3282 			} else {
3283 				rw_op = SCSI_RW_READ;
3284 			}
3285 
3286 			data_ptr = bp->bio_data;
3287 			if ((bp->bio_flags & (BIO_UNMAPPED|BIO_VLIST)) != 0) {
3288 				rw_op |= SCSI_RW_BIO;
3289 				data_ptr = bp;
3290 			}
3291 
3292 			scsi_read_write(&start_ccb->csio,
3293 					/*retries*/da_retry_count,
3294 					/*cbfcnp*/dadone,
3295 					/*tag_action*/tag_code,
3296 					rw_op,
3297 					/*byte2*/0,
3298 					softc->minimum_cmd_size,
3299 					/*lba*/bp->bio_pblkno,
3300 					/*block_count*/bp->bio_bcount /
3301 					softc->params.secsize,
3302 					data_ptr,
3303 					/*dxfer_len*/ bp->bio_bcount,
3304 					/*sense_len*/SSD_FULL_SIZE,
3305 					da_default_timeout * 1000);
3306 #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING)
3307 			start_ccb->csio.bio = bp;
3308 #endif
3309 			break;
3310 		}
3311 		case BIO_FLUSH:
3312 			/*
3313 			 * If we don't support sync cache, or the disk
3314 			 * isn't dirty, FLUSH is a no-op.  Use the
3315 			 * allocated CCB for the next bio if one is
3316 			 * available.
3317 			 */
3318 			if ((softc->quirks & DA_Q_NO_SYNC_CACHE) != 0 ||
3319 			    (softc->flags & DA_FLAG_DIRTY) == 0) {
3320 				biodone(bp);
3321 				goto skipstate;
3322 			}
3323 
3324 			/*
3325 			 * BIO_FLUSH doesn't currently communicate
3326 			 * range data, so we synchronize the cache
3327 			 * over the whole disk.
3328 			 */
3329 			scsi_synchronize_cache(&start_ccb->csio,
3330 					       /*retries*/1,
3331 					       /*cbfcnp*/dadone,
3332 					       /*tag_action*/tag_code,
3333 					       /*begin_lba*/0,
3334 					       /*lb_count*/0,
3335 					       SSD_FULL_SIZE,
3336 					       da_default_timeout*1000);
3337 			/*
3338 			 * Clear the dirty flag before sending the command.
3339 			 * Either this sync cache will be successful, or it
3340 			 * will fail after a retry.  If it fails, it is
3341 			 * unlikely to be successful if retried later, so
3342 			 * we'll save ourselves time by just marking the
3343 			 * device clean.
3344 			 */
3345 			softc->flags &= ~DA_FLAG_DIRTY;
3346 			break;
3347 		case BIO_ZONE: {
3348 			int error, queue_ccb;
3349 
3350 			queue_ccb = 0;
3351 
3352 			error = da_zone_cmd(periph, start_ccb, bp,&queue_ccb);
3353 			if ((error != 0)
3354 			 || (queue_ccb == 0)) {
3355 				biofinish(bp, NULL, error);
3356 				xpt_release_ccb(start_ccb);
3357 				return;
3358 			}
3359 			break;
3360 		}
3361 		}
3362 		start_ccb->ccb_h.ccb_state = DA_CCB_BUFFER_IO;
3363 		start_ccb->ccb_h.flags |= CAM_UNLOCKED;
3364 		start_ccb->ccb_h.softtimeout = sbttotv(da_default_softtimeout);
3365 
3366 out:
3367 		LIST_INSERT_HEAD(&softc->pending_ccbs,
3368 				 &start_ccb->ccb_h, periph_links.le);
3369 
3370 		/* We expect a unit attention from this device */
3371 		if ((softc->flags & DA_FLAG_RETRY_UA) != 0) {
3372 			start_ccb->ccb_h.ccb_state |= DA_CCB_RETRY_UA;
3373 			softc->flags &= ~DA_FLAG_RETRY_UA;
3374 		}
3375 
3376 		start_ccb->ccb_h.ccb_bp = bp;
3377 		softc->refcount++;
3378 		cam_periph_unlock(periph);
3379 		xpt_action(start_ccb);
3380 		cam_periph_lock(periph);
3381 
3382 		/* May have more work to do, so ensure we stay scheduled */
3383 		daschedule(periph);
3384 		break;
3385 	}
3386 	case DA_STATE_PROBE_WP:
3387 	{
3388 		void  *mode_buf;
3389 		int    mode_buf_len;
3390 
3391 		if (da_disable_wp_detection) {
3392 			if ((softc->flags & DA_FLAG_CAN_RC16) != 0)
3393 				softc->state = DA_STATE_PROBE_RC16;
3394 			else
3395 				softc->state = DA_STATE_PROBE_RC;
3396 			goto skipstate;
3397 		}
3398 		mode_buf_len = 192;
3399 		mode_buf = malloc(mode_buf_len, M_SCSIDA, M_NOWAIT);
3400 		if (mode_buf == NULL) {
3401 			xpt_print(periph->path, "Unable to send mode sense - "
3402 			    "malloc failure\n");
3403 			if ((softc->flags & DA_FLAG_CAN_RC16) != 0)
3404 				softc->state = DA_STATE_PROBE_RC16;
3405 			else
3406 				softc->state = DA_STATE_PROBE_RC;
3407 			goto skipstate;
3408 		}
3409 		scsi_mode_sense_len(&start_ccb->csio,
3410 				    /*retries*/ da_retry_count,
3411 				    /*cbfcnp*/ dadone_probewp,
3412 				    /*tag_action*/ MSG_SIMPLE_Q_TAG,
3413 				    /*dbd*/ FALSE,
3414 				    /*pc*/ SMS_PAGE_CTRL_CURRENT,
3415 				    /*page*/ SMS_ALL_PAGES_PAGE,
3416 				    /*param_buf*/ mode_buf,
3417 				    /*param_len*/ mode_buf_len,
3418 				    /*minimum_cmd_size*/ softc->minimum_cmd_size,
3419 				    /*sense_len*/ SSD_FULL_SIZE,
3420 				    /*timeout*/ da_default_timeout * 1000);
3421 		start_ccb->ccb_h.ccb_bp = NULL;
3422 		start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_WP;
3423 		xpt_action(start_ccb);
3424 		break;
3425 	}
3426 	case DA_STATE_PROBE_RC:
3427 	{
3428 		struct scsi_read_capacity_data *rcap;
3429 
3430 		rcap = (struct scsi_read_capacity_data *)
3431 		    malloc(sizeof(*rcap), M_SCSIDA, M_NOWAIT|M_ZERO);
3432 		if (rcap == NULL) {
3433 			printf("dastart: Couldn't malloc read_capacity data\n");
3434 			/* da_free_periph??? */
3435 			break;
3436 		}
3437 		scsi_read_capacity(&start_ccb->csio,
3438 				   /*retries*/da_retry_count,
3439 				   dadone_proberc,
3440 				   MSG_SIMPLE_Q_TAG,
3441 				   rcap,
3442 				   SSD_FULL_SIZE,
3443 				   /*timeout*/5000);
3444 		start_ccb->ccb_h.ccb_bp = NULL;
3445 		start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_RC;
3446 		xpt_action(start_ccb);
3447 		break;
3448 	}
3449 	case DA_STATE_PROBE_RC16:
3450 	{
3451 		struct scsi_read_capacity_data_long *rcaplong;
3452 
3453 		rcaplong = (struct scsi_read_capacity_data_long *)
3454 			malloc(sizeof(*rcaplong), M_SCSIDA, M_NOWAIT|M_ZERO);
3455 		if (rcaplong == NULL) {
3456 			printf("dastart: Couldn't malloc read_capacity data\n");
3457 			/* da_free_periph??? */
3458 			break;
3459 		}
3460 		scsi_read_capacity_16(&start_ccb->csio,
3461 				      /*retries*/ da_retry_count,
3462 				      /*cbfcnp*/ dadone_proberc,
3463 				      /*tag_action*/ MSG_SIMPLE_Q_TAG,
3464 				      /*lba*/ 0,
3465 				      /*reladr*/ 0,
3466 				      /*pmi*/ 0,
3467 				      /*rcap_buf*/ (uint8_t *)rcaplong,
3468 				      /*rcap_buf_len*/ sizeof(*rcaplong),
3469 				      /*sense_len*/ SSD_FULL_SIZE,
3470 				      /*timeout*/ da_default_timeout * 1000);
3471 		start_ccb->ccb_h.ccb_bp = NULL;
3472 		start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_RC16;
3473 		xpt_action(start_ccb);
3474 		break;
3475 	}
3476 	case DA_STATE_PROBE_LBP:
3477 	{
3478 		struct scsi_vpd_logical_block_prov *lbp;
3479 
3480 		if (!scsi_vpd_supported_page(periph, SVPD_LBP)) {
3481 			/*
3482 			 * If we get here we don't support any SBC-3 delete
3483 			 * methods with UNMAP as the Logical Block Provisioning
3484 			 * VPD page support is required for devices which
3485 			 * support it according to T10/1799-D Revision 31
3486 			 * however older revisions of the spec don't mandate
3487 			 * this so we currently don't remove these methods
3488 			 * from the available set.
3489 			 */
3490 			softc->state = DA_STATE_PROBE_BLK_LIMITS;
3491 			goto skipstate;
3492 		}
3493 
3494 		lbp = (struct scsi_vpd_logical_block_prov *)
3495 			malloc(sizeof(*lbp), M_SCSIDA, M_NOWAIT|M_ZERO);
3496 
3497 		if (lbp == NULL) {
3498 			printf("dastart: Couldn't malloc lbp data\n");
3499 			/* da_free_periph??? */
3500 			break;
3501 		}
3502 
3503 		scsi_inquiry(&start_ccb->csio,
3504 			     /*retries*/da_retry_count,
3505 			     /*cbfcnp*/dadone_probelbp,
3506 			     /*tag_action*/MSG_SIMPLE_Q_TAG,
3507 			     /*inq_buf*/(u_int8_t *)lbp,
3508 			     /*inq_len*/sizeof(*lbp),
3509 			     /*evpd*/TRUE,
3510 			     /*page_code*/SVPD_LBP,
3511 			     /*sense_len*/SSD_MIN_SIZE,
3512 			     /*timeout*/da_default_timeout * 1000);
3513 		start_ccb->ccb_h.ccb_bp = NULL;
3514 		start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_LBP;
3515 		xpt_action(start_ccb);
3516 		break;
3517 	}
3518 	case DA_STATE_PROBE_BLK_LIMITS:
3519 	{
3520 		struct scsi_vpd_block_limits *block_limits;
3521 
3522 		if (!scsi_vpd_supported_page(periph, SVPD_BLOCK_LIMITS)) {
3523 			/* Not supported skip to next probe */
3524 			softc->state = DA_STATE_PROBE_BDC;
3525 			goto skipstate;
3526 		}
3527 
3528 		block_limits = (struct scsi_vpd_block_limits *)
3529 			malloc(sizeof(*block_limits), M_SCSIDA, M_NOWAIT|M_ZERO);
3530 
3531 		if (block_limits == NULL) {
3532 			printf("dastart: Couldn't malloc block_limits data\n");
3533 			/* da_free_periph??? */
3534 			break;
3535 		}
3536 
3537 		scsi_inquiry(&start_ccb->csio,
3538 			     /*retries*/da_retry_count,
3539 			     /*cbfcnp*/dadone_probeblklimits,
3540 			     /*tag_action*/MSG_SIMPLE_Q_TAG,
3541 			     /*inq_buf*/(u_int8_t *)block_limits,
3542 			     /*inq_len*/sizeof(*block_limits),
3543 			     /*evpd*/TRUE,
3544 			     /*page_code*/SVPD_BLOCK_LIMITS,
3545 			     /*sense_len*/SSD_MIN_SIZE,
3546 			     /*timeout*/da_default_timeout * 1000);
3547 		start_ccb->ccb_h.ccb_bp = NULL;
3548 		start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_BLK_LIMITS;
3549 		xpt_action(start_ccb);
3550 		break;
3551 	}
3552 	case DA_STATE_PROBE_BDC:
3553 	{
3554 		struct scsi_vpd_block_characteristics *bdc;
3555 
3556 		if (!scsi_vpd_supported_page(periph, SVPD_BDC)) {
3557 			softc->state = DA_STATE_PROBE_ATA;
3558 			goto skipstate;
3559 		}
3560 
3561 		bdc = (struct scsi_vpd_block_characteristics *)
3562 			malloc(sizeof(*bdc), M_SCSIDA, M_NOWAIT|M_ZERO);
3563 
3564 		if (bdc == NULL) {
3565 			printf("dastart: Couldn't malloc bdc data\n");
3566 			/* da_free_periph??? */
3567 			break;
3568 		}
3569 
3570 		scsi_inquiry(&start_ccb->csio,
3571 			     /*retries*/da_retry_count,
3572 			     /*cbfcnp*/dadone_probebdc,
3573 			     /*tag_action*/MSG_SIMPLE_Q_TAG,
3574 			     /*inq_buf*/(u_int8_t *)bdc,
3575 			     /*inq_len*/sizeof(*bdc),
3576 			     /*evpd*/TRUE,
3577 			     /*page_code*/SVPD_BDC,
3578 			     /*sense_len*/SSD_MIN_SIZE,
3579 			     /*timeout*/da_default_timeout * 1000);
3580 		start_ccb->ccb_h.ccb_bp = NULL;
3581 		start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_BDC;
3582 		xpt_action(start_ccb);
3583 		break;
3584 	}
3585 	case DA_STATE_PROBE_ATA:
3586 	{
3587 		struct ata_params *ata_params;
3588 
3589 		if (!scsi_vpd_supported_page(periph, SVPD_ATA_INFORMATION)) {
3590 			if ((softc->zone_mode == DA_ZONE_HOST_AWARE)
3591 			 || (softc->zone_mode == DA_ZONE_HOST_MANAGED)) {
3592 				/*
3593 				 * Note that if the ATA VPD page isn't
3594 				 * supported, we aren't talking to an ATA
3595 				 * device anyway.  Support for that VPD
3596 				 * page is mandatory for SCSI to ATA (SAT)
3597 				 * translation layers.
3598 				 */
3599 				softc->state = DA_STATE_PROBE_ZONE;
3600 				goto skipstate;
3601 			}
3602 			daprobedone(periph, start_ccb);
3603 			break;
3604 		}
3605 
3606 		ata_params = (struct ata_params*)
3607 			malloc(sizeof(*ata_params), M_SCSIDA,M_NOWAIT|M_ZERO);
3608 
3609 		if (ata_params == NULL) {
3610 			xpt_print(periph->path, "Couldn't malloc ata_params "
3611 			    "data\n");
3612 			/* da_free_periph??? */
3613 			break;
3614 		}
3615 
3616 		scsi_ata_identify(&start_ccb->csio,
3617 				  /*retries*/da_retry_count,
3618 				  /*cbfcnp*/dadone_probeata,
3619                                   /*tag_action*/MSG_SIMPLE_Q_TAG,
3620 				  /*data_ptr*/(u_int8_t *)ata_params,
3621 				  /*dxfer_len*/sizeof(*ata_params),
3622 				  /*sense_len*/SSD_FULL_SIZE,
3623 				  /*timeout*/da_default_timeout * 1000);
3624 		start_ccb->ccb_h.ccb_bp = NULL;
3625 		start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_ATA;
3626 		xpt_action(start_ccb);
3627 		break;
3628 	}
3629 	case DA_STATE_PROBE_ATA_LOGDIR:
3630 	{
3631 		struct ata_gp_log_dir *log_dir;
3632 		int retval;
3633 
3634 		retval = 0;
3635 
3636 		if ((softc->flags & DA_FLAG_CAN_ATA_LOG) == 0) {
3637 			/*
3638 			 * If we don't have log support, not much point in
3639 			 * trying to probe zone support.
3640 			 */
3641 			daprobedone(periph, start_ccb);
3642 			break;
3643 		}
3644 
3645 		/*
3646 		 * If we have an ATA device (the SCSI ATA Information VPD
3647 		 * page should be present and the ATA identify should have
3648 		 * succeeded) and it supports logs, ask for the log directory.
3649 		 */
3650 
3651 		log_dir = malloc(sizeof(*log_dir), M_SCSIDA, M_NOWAIT|M_ZERO);
3652 		if (log_dir == NULL) {
3653 			xpt_print(periph->path, "Couldn't malloc log_dir "
3654 			    "data\n");
3655 			daprobedone(periph, start_ccb);
3656 			break;
3657 		}
3658 
3659 		retval = scsi_ata_read_log(&start_ccb->csio,
3660 		    /*retries*/ da_retry_count,
3661 		    /*cbfcnp*/ dadone_probeatalogdir,
3662 		    /*tag_action*/ MSG_SIMPLE_Q_TAG,
3663 		    /*log_address*/ ATA_LOG_DIRECTORY,
3664 		    /*page_number*/ 0,
3665 		    /*block_count*/ 1,
3666 		    /*protocol*/ softc->flags & DA_FLAG_CAN_ATA_DMA ?
3667 				 AP_PROTO_DMA : AP_PROTO_PIO_IN,
3668 		    /*data_ptr*/ (uint8_t *)log_dir,
3669 		    /*dxfer_len*/ sizeof(*log_dir),
3670 		    /*sense_len*/ SSD_FULL_SIZE,
3671 		    /*timeout*/ da_default_timeout * 1000);
3672 
3673 		if (retval != 0) {
3674 			xpt_print(periph->path, "scsi_ata_read_log() failed!");
3675 			free(log_dir, M_SCSIDA);
3676 			daprobedone(periph, start_ccb);
3677 			break;
3678 		}
3679 		start_ccb->ccb_h.ccb_bp = NULL;
3680 		start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_ATA_LOGDIR;
3681 		xpt_action(start_ccb);
3682 		break;
3683 	}
3684 	case DA_STATE_PROBE_ATA_IDDIR:
3685 	{
3686 		struct ata_identify_log_pages *id_dir;
3687 		int retval;
3688 
3689 		retval = 0;
3690 
3691 		/*
3692 		 * Check here to see whether the Identify Device log is
3693 		 * supported in the directory of logs.  If so, continue
3694 		 * with requesting the log of identify device pages.
3695 		 */
3696 		if ((softc->flags & DA_FLAG_CAN_ATA_IDLOG) == 0) {
3697 			daprobedone(periph, start_ccb);
3698 			break;
3699 		}
3700 
3701 		id_dir = malloc(sizeof(*id_dir), M_SCSIDA, M_NOWAIT | M_ZERO);
3702 		if (id_dir == NULL) {
3703 			xpt_print(periph->path, "Couldn't malloc id_dir "
3704 			    "data\n");
3705 			daprobedone(periph, start_ccb);
3706 			break;
3707 		}
3708 
3709 		retval = scsi_ata_read_log(&start_ccb->csio,
3710 		    /*retries*/ da_retry_count,
3711 		    /*cbfcnp*/ dadone_probeataiddir,
3712 		    /*tag_action*/ MSG_SIMPLE_Q_TAG,
3713 		    /*log_address*/ ATA_IDENTIFY_DATA_LOG,
3714 		    /*page_number*/ ATA_IDL_PAGE_LIST,
3715 		    /*block_count*/ 1,
3716 		    /*protocol*/ softc->flags & DA_FLAG_CAN_ATA_DMA ?
3717 				 AP_PROTO_DMA : AP_PROTO_PIO_IN,
3718 		    /*data_ptr*/ (uint8_t *)id_dir,
3719 		    /*dxfer_len*/ sizeof(*id_dir),
3720 		    /*sense_len*/ SSD_FULL_SIZE,
3721 		    /*timeout*/ da_default_timeout * 1000);
3722 
3723 		if (retval != 0) {
3724 			xpt_print(periph->path, "scsi_ata_read_log() failed!");
3725 			free(id_dir, M_SCSIDA);
3726 			daprobedone(periph, start_ccb);
3727 			break;
3728 		}
3729 		start_ccb->ccb_h.ccb_bp = NULL;
3730 		start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_ATA_IDDIR;
3731 		xpt_action(start_ccb);
3732 		break;
3733 	}
3734 	case DA_STATE_PROBE_ATA_SUP:
3735 	{
3736 		struct ata_identify_log_sup_cap *sup_cap;
3737 		int retval;
3738 
3739 		retval = 0;
3740 
3741 		/*
3742 		 * Check here to see whether the Supported Capabilities log
3743 		 * is in the list of Identify Device logs.
3744 		 */
3745 		if ((softc->flags & DA_FLAG_CAN_ATA_SUPCAP) == 0) {
3746 			daprobedone(periph, start_ccb);
3747 			break;
3748 		}
3749 
3750 		sup_cap = malloc(sizeof(*sup_cap), M_SCSIDA, M_NOWAIT|M_ZERO);
3751 		if (sup_cap == NULL) {
3752 			xpt_print(periph->path, "Couldn't malloc sup_cap "
3753 			    "data\n");
3754 			daprobedone(periph, start_ccb);
3755 			break;
3756 		}
3757 
3758 		retval = scsi_ata_read_log(&start_ccb->csio,
3759 		    /*retries*/ da_retry_count,
3760 		    /*cbfcnp*/ dadone_probeatasup,
3761 		    /*tag_action*/ MSG_SIMPLE_Q_TAG,
3762 		    /*log_address*/ ATA_IDENTIFY_DATA_LOG,
3763 		    /*page_number*/ ATA_IDL_SUP_CAP,
3764 		    /*block_count*/ 1,
3765 		    /*protocol*/ softc->flags & DA_FLAG_CAN_ATA_DMA ?
3766 				 AP_PROTO_DMA : AP_PROTO_PIO_IN,
3767 		    /*data_ptr*/ (uint8_t *)sup_cap,
3768 		    /*dxfer_len*/ sizeof(*sup_cap),
3769 		    /*sense_len*/ SSD_FULL_SIZE,
3770 		    /*timeout*/ da_default_timeout * 1000);
3771 
3772 		if (retval != 0) {
3773 			xpt_print(periph->path, "scsi_ata_read_log() failed!");
3774 			free(sup_cap, M_SCSIDA);
3775 			daprobedone(periph, start_ccb);
3776 			break;
3777 
3778 		}
3779 
3780 		start_ccb->ccb_h.ccb_bp = NULL;
3781 		start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_ATA_SUP;
3782 		xpt_action(start_ccb);
3783 		break;
3784 	}
3785 	case DA_STATE_PROBE_ATA_ZONE:
3786 	{
3787 		struct ata_zoned_info_log *ata_zone;
3788 		int retval;
3789 
3790 		retval = 0;
3791 
3792 		/*
3793 		 * Check here to see whether the zoned device information
3794 		 * page is supported.  If so, continue on to request it.
3795 		 * If not, skip to DA_STATE_PROBE_LOG or done.
3796 		 */
3797 		if ((softc->flags & DA_FLAG_CAN_ATA_ZONE) == 0) {
3798 			daprobedone(periph, start_ccb);
3799 			break;
3800 		}
3801 		ata_zone = malloc(sizeof(*ata_zone), M_SCSIDA,
3802 				  M_NOWAIT|M_ZERO);
3803 		if (ata_zone == NULL) {
3804 			xpt_print(periph->path, "Couldn't malloc ata_zone "
3805 			    "data\n");
3806 			daprobedone(periph, start_ccb);
3807 			break;
3808 		}
3809 
3810 		retval = scsi_ata_read_log(&start_ccb->csio,
3811 		    /*retries*/ da_retry_count,
3812 		    /*cbfcnp*/ dadone_probeatazone,
3813 		    /*tag_action*/ MSG_SIMPLE_Q_TAG,
3814 		    /*log_address*/ ATA_IDENTIFY_DATA_LOG,
3815 		    /*page_number*/ ATA_IDL_ZDI,
3816 		    /*block_count*/ 1,
3817 		    /*protocol*/ softc->flags & DA_FLAG_CAN_ATA_DMA ?
3818 				 AP_PROTO_DMA : AP_PROTO_PIO_IN,
3819 		    /*data_ptr*/ (uint8_t *)ata_zone,
3820 		    /*dxfer_len*/ sizeof(*ata_zone),
3821 		    /*sense_len*/ SSD_FULL_SIZE,
3822 		    /*timeout*/ da_default_timeout * 1000);
3823 
3824 		if (retval != 0) {
3825 			xpt_print(periph->path, "scsi_ata_read_log() failed!");
3826 			free(ata_zone, M_SCSIDA);
3827 			daprobedone(periph, start_ccb);
3828 			break;
3829 		}
3830 		start_ccb->ccb_h.ccb_bp = NULL;
3831 		start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_ATA_ZONE;
3832 		xpt_action(start_ccb);
3833 
3834 		break;
3835 	}
3836 	case DA_STATE_PROBE_ZONE:
3837 	{
3838 		struct scsi_vpd_zoned_bdc *bdc;
3839 
3840 		/*
3841 		 * Note that this page will be supported for SCSI protocol
3842 		 * devices that support ZBC (SMR devices), as well as ATA
3843 		 * protocol devices that are behind a SAT (SCSI to ATA
3844 		 * Translation) layer that supports converting ZBC commands
3845 		 * to their ZAC equivalents.
3846 		 */
3847 		if (!scsi_vpd_supported_page(periph, SVPD_ZONED_BDC)) {
3848 			daprobedone(periph, start_ccb);
3849 			break;
3850 		}
3851 		bdc = (struct scsi_vpd_zoned_bdc *)
3852 			malloc(sizeof(*bdc), M_SCSIDA, M_NOWAIT|M_ZERO);
3853 
3854 		if (bdc == NULL) {
3855 			xpt_release_ccb(start_ccb);
3856 			xpt_print(periph->path, "Couldn't malloc zone VPD "
3857 			    "data\n");
3858 			break;
3859 		}
3860 		scsi_inquiry(&start_ccb->csio,
3861 			     /*retries*/da_retry_count,
3862 			     /*cbfcnp*/dadone_probezone,
3863 			     /*tag_action*/MSG_SIMPLE_Q_TAG,
3864 			     /*inq_buf*/(u_int8_t *)bdc,
3865 			     /*inq_len*/sizeof(*bdc),
3866 			     /*evpd*/TRUE,
3867 			     /*page_code*/SVPD_ZONED_BDC,
3868 			     /*sense_len*/SSD_FULL_SIZE,
3869 			     /*timeout*/da_default_timeout * 1000);
3870 		start_ccb->ccb_h.ccb_bp = NULL;
3871 		start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_ZONE;
3872 		xpt_action(start_ccb);
3873 		break;
3874 	}
3875 	}
3876 }
3877 
3878 /*
3879  * In each of the methods below, while its the caller's
3880  * responsibility to ensure the request will fit into a
3881  * single device request, we might have changed the delete
3882  * method due to the device incorrectly advertising either
3883  * its supported methods or limits.
3884  *
3885  * To prevent this causing further issues we validate the
3886  * against the methods limits, and warn which would
3887  * otherwise be unnecessary.
3888  */
3889 static void
3890 da_delete_unmap(struct cam_periph *periph, union ccb *ccb, struct bio *bp)
3891 {
3892 	struct da_softc *softc = (struct da_softc *)periph->softc;;
3893 	struct bio *bp1;
3894 	uint8_t *buf = softc->unmap_buf;
3895 	struct scsi_unmap_desc *d = (void *)&buf[UNMAP_HEAD_SIZE];
3896 	uint64_t lba, lastlba = (uint64_t)-1;
3897 	uint64_t totalcount = 0;
3898 	uint64_t count;
3899 	uint32_t c, lastcount = 0, ranges = 0;
3900 
3901 	/*
3902 	 * Currently this doesn't take the UNMAP
3903 	 * Granularity and Granularity Alignment
3904 	 * fields into account.
3905 	 *
3906 	 * This could result in both unoptimal unmap
3907 	 * requests as as well as UNMAP calls unmapping
3908 	 * fewer LBA's than requested.
3909 	 */
3910 
3911 	bzero(softc->unmap_buf, sizeof(softc->unmap_buf));
3912 	bp1 = bp;
3913 	do {
3914 		/*
3915 		 * Note: ada and da are different in how they store the
3916 		 * pending bp's in a trim. ada stores all of them in the
3917 		 * trim_req.bps. da stores all but the first one in the
3918 		 * delete_run_queue. ada then completes all the bps in
3919 		 * its adadone() loop. da completes all the bps in the
3920 		 * delete_run_queue in dadone, and relies on the biodone
3921 		 * after to complete. This should be reconciled since there's
3922 		 * no real reason to do it differently. XXX
3923 		 */
3924 		if (bp1 != bp)
3925 			bioq_insert_tail(&softc->delete_run_queue, bp1);
3926 		lba = bp1->bio_pblkno;
3927 		count = bp1->bio_bcount / softc->params.secsize;
3928 
3929 		/* Try to extend the previous range. */
3930 		if (lba == lastlba) {
3931 			c = omin(count, UNMAP_RANGE_MAX - lastcount);
3932 			lastlba += c;
3933 			lastcount += c;
3934 			scsi_ulto4b(lastcount, d[ranges - 1].length);
3935 			count -= c;
3936 			lba += c;
3937 			totalcount += c;
3938 		} else if ((softc->quirks & DA_Q_STRICT_UNMAP) &&
3939 		    softc->unmap_gran != 0) {
3940 			/* Align length of the previous range. */
3941 			if ((c = lastcount % softc->unmap_gran) != 0) {
3942 				if (lastcount <= c) {
3943 					totalcount -= lastcount;
3944 					lastlba = (uint64_t)-1;
3945 					lastcount = 0;
3946 					ranges--;
3947 				} else {
3948 					totalcount -= c;
3949 					lastlba -= c;
3950 					lastcount -= c;
3951 					scsi_ulto4b(lastcount,
3952 					    d[ranges - 1].length);
3953 				}
3954 			}
3955 			/* Align beginning of the new range. */
3956 			c = (lba - softc->unmap_gran_align) % softc->unmap_gran;
3957 			if (c != 0) {
3958 				c = softc->unmap_gran - c;
3959 				if (count <= c) {
3960 					count = 0;
3961 				} else {
3962 					lba += c;
3963 					count -= c;
3964 				}
3965 			}
3966 		}
3967 
3968 		while (count > 0) {
3969 			c = omin(count, UNMAP_RANGE_MAX);
3970 			if (totalcount + c > softc->unmap_max_lba ||
3971 			    ranges >= softc->unmap_max_ranges) {
3972 				xpt_print(periph->path,
3973 				    "%s issuing short delete %ld > %ld"
3974 				    "|| %d >= %d",
3975 				    da_delete_method_desc[softc->delete_method],
3976 				    totalcount + c, softc->unmap_max_lba,
3977 				    ranges, softc->unmap_max_ranges);
3978 				break;
3979 			}
3980 			scsi_u64to8b(lba, d[ranges].lba);
3981 			scsi_ulto4b(c, d[ranges].length);
3982 			lba += c;
3983 			totalcount += c;
3984 			ranges++;
3985 			count -= c;
3986 			lastlba = lba;
3987 			lastcount = c;
3988 		}
3989 		bp1 = cam_iosched_next_trim(softc->cam_iosched);
3990 		if (bp1 == NULL)
3991 			break;
3992 		if (ranges >= softc->unmap_max_ranges ||
3993 		    totalcount + bp1->bio_bcount /
3994 		    softc->params.secsize > softc->unmap_max_lba) {
3995 			cam_iosched_put_back_trim(softc->cam_iosched, bp1);
3996 			break;
3997 		}
3998 	} while (1);
3999 
4000 	/* Align length of the last range. */
4001 	if ((softc->quirks & DA_Q_STRICT_UNMAP) && softc->unmap_gran != 0 &&
4002 	    (c = lastcount % softc->unmap_gran) != 0) {
4003 		if (lastcount <= c)
4004 			ranges--;
4005 		else
4006 			scsi_ulto4b(lastcount - c, d[ranges - 1].length);
4007 	}
4008 
4009 	scsi_ulto2b(ranges * 16 + 6, &buf[0]);
4010 	scsi_ulto2b(ranges * 16, &buf[2]);
4011 
4012 	scsi_unmap(&ccb->csio,
4013 		   /*retries*/da_retry_count,
4014 		   /*cbfcnp*/dadone,
4015 		   /*tag_action*/MSG_SIMPLE_Q_TAG,
4016 		   /*byte2*/0,
4017 		   /*data_ptr*/ buf,
4018 		   /*dxfer_len*/ ranges * 16 + 8,
4019 		   /*sense_len*/SSD_FULL_SIZE,
4020 		   da_default_timeout * 1000);
4021 	ccb->ccb_h.ccb_state = DA_CCB_DELETE;
4022 	ccb->ccb_h.flags |= CAM_UNLOCKED;
4023 	softc->trim_count++;
4024 	softc->trim_ranges += ranges;
4025 	softc->trim_lbas += totalcount;
4026 	cam_iosched_submit_trim(softc->cam_iosched);
4027 }
4028 
4029 static void
4030 da_delete_trim(struct cam_periph *periph, union ccb *ccb, struct bio *bp)
4031 {
4032 	struct da_softc *softc = (struct da_softc *)periph->softc;
4033 	struct bio *bp1;
4034 	uint8_t *buf = softc->unmap_buf;
4035 	uint64_t lastlba = (uint64_t)-1;
4036 	uint64_t count;
4037 	uint64_t lba;
4038 	uint32_t lastcount = 0, c, requestcount;
4039 	int ranges = 0, off, block_count;
4040 
4041 	bzero(softc->unmap_buf, sizeof(softc->unmap_buf));
4042 	bp1 = bp;
4043 	do {
4044 		if (bp1 != bp)//XXX imp XXX
4045 			bioq_insert_tail(&softc->delete_run_queue, bp1);
4046 		lba = bp1->bio_pblkno;
4047 		count = bp1->bio_bcount / softc->params.secsize;
4048 		requestcount = count;
4049 
4050 		/* Try to extend the previous range. */
4051 		if (lba == lastlba) {
4052 			c = omin(count, ATA_DSM_RANGE_MAX - lastcount);
4053 			lastcount += c;
4054 			off = (ranges - 1) * 8;
4055 			buf[off + 6] = lastcount & 0xff;
4056 			buf[off + 7] = (lastcount >> 8) & 0xff;
4057 			count -= c;
4058 			lba += c;
4059 		}
4060 
4061 		while (count > 0) {
4062 			c = omin(count, ATA_DSM_RANGE_MAX);
4063 			off = ranges * 8;
4064 
4065 			buf[off + 0] = lba & 0xff;
4066 			buf[off + 1] = (lba >> 8) & 0xff;
4067 			buf[off + 2] = (lba >> 16) & 0xff;
4068 			buf[off + 3] = (lba >> 24) & 0xff;
4069 			buf[off + 4] = (lba >> 32) & 0xff;
4070 			buf[off + 5] = (lba >> 40) & 0xff;
4071 			buf[off + 6] = c & 0xff;
4072 			buf[off + 7] = (c >> 8) & 0xff;
4073 			lba += c;
4074 			ranges++;
4075 			count -= c;
4076 			lastcount = c;
4077 			if (count != 0 && ranges == softc->trim_max_ranges) {
4078 				xpt_print(periph->path,
4079 				    "%s issuing short delete %ld > %ld\n",
4080 				    da_delete_method_desc[softc->delete_method],
4081 				    requestcount,
4082 				    (softc->trim_max_ranges - ranges) *
4083 				    ATA_DSM_RANGE_MAX);
4084 				break;
4085 			}
4086 		}
4087 		lastlba = lba;
4088 		bp1 = cam_iosched_next_trim(softc->cam_iosched);
4089 		if (bp1 == NULL)
4090 			break;
4091 		if (bp1->bio_bcount / softc->params.secsize >
4092 		    (softc->trim_max_ranges - ranges) * ATA_DSM_RANGE_MAX) {
4093 			cam_iosched_put_back_trim(softc->cam_iosched, bp1);
4094 			break;
4095 		}
4096 	} while (1);
4097 
4098 	block_count = howmany(ranges, ATA_DSM_BLK_RANGES);
4099 	scsi_ata_trim(&ccb->csio,
4100 		      /*retries*/da_retry_count,
4101 		      /*cbfcnp*/dadone,
4102 		      /*tag_action*/MSG_SIMPLE_Q_TAG,
4103 		      block_count,
4104 		      /*data_ptr*/buf,
4105 		      /*dxfer_len*/block_count * ATA_DSM_BLK_SIZE,
4106 		      /*sense_len*/SSD_FULL_SIZE,
4107 		      da_default_timeout * 1000);
4108 	ccb->ccb_h.ccb_state = DA_CCB_DELETE;
4109 	ccb->ccb_h.flags |= CAM_UNLOCKED;
4110 	cam_iosched_submit_trim(softc->cam_iosched);
4111 }
4112 
4113 /*
4114  * We calculate ws_max_blks here based off d_delmaxsize instead
4115  * of using softc->ws_max_blks as it is absolute max for the
4116  * device not the protocol max which may well be lower.
4117  */
4118 static void
4119 da_delete_ws(struct cam_periph *periph, union ccb *ccb, struct bio *bp)
4120 {
4121 	struct da_softc *softc;
4122 	struct bio *bp1;
4123 	uint64_t ws_max_blks;
4124 	uint64_t lba;
4125 	uint64_t count; /* forward compat with WS32 */
4126 
4127 	softc = (struct da_softc *)periph->softc;
4128 	ws_max_blks = softc->disk->d_delmaxsize / softc->params.secsize;
4129 	lba = bp->bio_pblkno;
4130 	count = 0;
4131 	bp1 = bp;
4132 	do {
4133 		if (bp1 != bp)//XXX imp XXX
4134 			bioq_insert_tail(&softc->delete_run_queue, bp1);
4135 		count += bp1->bio_bcount / softc->params.secsize;
4136 		if (count > ws_max_blks) {
4137 			xpt_print(periph->path,
4138 			    "%s issuing short delete %ld > %ld\n",
4139 			    da_delete_method_desc[softc->delete_method],
4140 			    count, ws_max_blks);
4141 			count = omin(count, ws_max_blks);
4142 			break;
4143 		}
4144 		bp1 = cam_iosched_next_trim(softc->cam_iosched);
4145 		if (bp1 == NULL)
4146 			break;
4147 		if (lba + count != bp1->bio_pblkno ||
4148 		    count + bp1->bio_bcount /
4149 		    softc->params.secsize > ws_max_blks) {
4150 			cam_iosched_put_back_trim(softc->cam_iosched, bp1);
4151 			break;
4152 		}
4153 	} while (1);
4154 
4155 	scsi_write_same(&ccb->csio,
4156 			/*retries*/da_retry_count,
4157 			/*cbfcnp*/dadone,
4158 			/*tag_action*/MSG_SIMPLE_Q_TAG,
4159 			/*byte2*/softc->delete_method ==
4160 			    DA_DELETE_ZERO ? 0 : SWS_UNMAP,
4161 			softc->delete_method == DA_DELETE_WS16 ? 16 : 10,
4162 			/*lba*/lba,
4163 			/*block_count*/count,
4164 			/*data_ptr*/ __DECONST(void *, zero_region),
4165 			/*dxfer_len*/ softc->params.secsize,
4166 			/*sense_len*/SSD_FULL_SIZE,
4167 			da_default_timeout * 1000);
4168 	ccb->ccb_h.ccb_state = DA_CCB_DELETE;
4169 	ccb->ccb_h.flags |= CAM_UNLOCKED;
4170 	cam_iosched_submit_trim(softc->cam_iosched);
4171 }
4172 
4173 static int
4174 cmd6workaround(union ccb *ccb)
4175 {
4176 	struct scsi_rw_6 cmd6;
4177 	struct scsi_rw_10 *cmd10;
4178 	struct da_softc *softc;
4179 	u_int8_t *cdb;
4180 	struct bio *bp;
4181 	int frozen;
4182 
4183 	cdb = ccb->csio.cdb_io.cdb_bytes;
4184 	softc = (struct da_softc *)xpt_path_periph(ccb->ccb_h.path)->softc;
4185 
4186 	if (ccb->ccb_h.ccb_state == DA_CCB_DELETE) {
4187 		da_delete_methods old_method = softc->delete_method;
4188 
4189 		/*
4190 		 * Typically there are two reasons for failure here
4191 		 * 1. Delete method was detected as supported but isn't
4192 		 * 2. Delete failed due to invalid params e.g. too big
4193 		 *
4194 		 * While we will attempt to choose an alternative delete method
4195 		 * this may result in short deletes if the existing delete
4196 		 * requests from geom are big for the new method chosen.
4197 		 *
4198 		 * This method assumes that the error which triggered this
4199 		 * will not retry the io otherwise a panic will occur
4200 		 */
4201 		dadeleteflag(softc, old_method, 0);
4202 		dadeletemethodchoose(softc, DA_DELETE_DISABLE);
4203 		if (softc->delete_method == DA_DELETE_DISABLE)
4204 			xpt_print(ccb->ccb_h.path,
4205 				  "%s failed, disabling BIO_DELETE\n",
4206 				  da_delete_method_desc[old_method]);
4207 		else
4208 			xpt_print(ccb->ccb_h.path,
4209 				  "%s failed, switching to %s BIO_DELETE\n",
4210 				  da_delete_method_desc[old_method],
4211 				  da_delete_method_desc[softc->delete_method]);
4212 
4213 		while ((bp = bioq_takefirst(&softc->delete_run_queue)) != NULL)
4214 			cam_iosched_queue_work(softc->cam_iosched, bp);
4215 		cam_iosched_queue_work(softc->cam_iosched,
4216 		    (struct bio *)ccb->ccb_h.ccb_bp);
4217 		ccb->ccb_h.ccb_bp = NULL;
4218 		return (0);
4219 	}
4220 
4221 	/* Detect unsupported PREVENT ALLOW MEDIUM REMOVAL. */
4222 	if ((ccb->ccb_h.flags & CAM_CDB_POINTER) == 0 &&
4223 	    (*cdb == PREVENT_ALLOW) &&
4224 	    (softc->quirks & DA_Q_NO_PREVENT) == 0) {
4225 		if (bootverbose)
4226 			xpt_print(ccb->ccb_h.path,
4227 			    "PREVENT ALLOW MEDIUM REMOVAL not supported.\n");
4228 		softc->quirks |= DA_Q_NO_PREVENT;
4229 		return (0);
4230 	}
4231 
4232 	/* Detect unsupported SYNCHRONIZE CACHE(10). */
4233 	if ((ccb->ccb_h.flags & CAM_CDB_POINTER) == 0 &&
4234 	    (*cdb == SYNCHRONIZE_CACHE) &&
4235 	    (softc->quirks & DA_Q_NO_SYNC_CACHE) == 0) {
4236 		if (bootverbose)
4237 			xpt_print(ccb->ccb_h.path,
4238 			    "SYNCHRONIZE CACHE(10) not supported.\n");
4239 		softc->quirks |= DA_Q_NO_SYNC_CACHE;
4240 		softc->disk->d_flags &= ~DISKFLAG_CANFLUSHCACHE;
4241 		return (0);
4242 	}
4243 
4244 	/* Translation only possible if CDB is an array and cmd is R/W6 */
4245 	if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0 ||
4246 	    (*cdb != READ_6 && *cdb != WRITE_6))
4247 		return 0;
4248 
4249 	xpt_print(ccb->ccb_h.path, "READ(6)/WRITE(6) not supported, "
4250 	    "increasing minimum_cmd_size to 10.\n");
4251 	softc->minimum_cmd_size = 10;
4252 
4253 	bcopy(cdb, &cmd6, sizeof(struct scsi_rw_6));
4254 	cmd10 = (struct scsi_rw_10 *)cdb;
4255 	cmd10->opcode = (cmd6.opcode == READ_6) ? READ_10 : WRITE_10;
4256 	cmd10->byte2 = 0;
4257 	scsi_ulto4b(scsi_3btoul(cmd6.addr), cmd10->addr);
4258 	cmd10->reserved = 0;
4259 	scsi_ulto2b(cmd6.length, cmd10->length);
4260 	cmd10->control = cmd6.control;
4261 	ccb->csio.cdb_len = sizeof(*cmd10);
4262 
4263 	/* Requeue request, unfreezing queue if necessary */
4264 	frozen = (ccb->ccb_h.status & CAM_DEV_QFRZN) != 0;
4265 	ccb->ccb_h.status = CAM_REQUEUE_REQ;
4266 	xpt_action(ccb);
4267 	if (frozen) {
4268 		cam_release_devq(ccb->ccb_h.path,
4269 				 /*relsim_flags*/0,
4270 				 /*reduction*/0,
4271 				 /*timeout*/0,
4272 				 /*getcount_only*/0);
4273 	}
4274 	return (ERESTART);
4275 }
4276 
4277 static void
4278 dazonedone(struct cam_periph *periph, union ccb *ccb)
4279 {
4280 	struct da_softc *softc;
4281 	struct bio *bp;
4282 
4283 	softc = periph->softc;
4284 	bp = (struct bio *)ccb->ccb_h.ccb_bp;
4285 
4286 	switch (bp->bio_zone.zone_cmd) {
4287 	case DISK_ZONE_OPEN:
4288 	case DISK_ZONE_CLOSE:
4289 	case DISK_ZONE_FINISH:
4290 	case DISK_ZONE_RWP:
4291 		break;
4292 	case DISK_ZONE_REPORT_ZONES: {
4293 		uint32_t avail_len;
4294 		struct disk_zone_report *rep;
4295 		struct scsi_report_zones_hdr *hdr;
4296 		struct scsi_report_zones_desc *desc;
4297 		struct disk_zone_rep_entry *entry;
4298 		uint32_t hdr_len, num_avail;
4299 		uint32_t num_to_fill, i;
4300 		int ata;
4301 
4302 		rep = &bp->bio_zone.zone_params.report;
4303 		avail_len = ccb->csio.dxfer_len - ccb->csio.resid;
4304 		/*
4305 		 * Note that bio_resid isn't normally used for zone
4306 		 * commands, but it is used by devstat_end_transaction_bio()
4307 		 * to determine how much data was transferred.  Because
4308 		 * the size of the SCSI/ATA data structures is different
4309 		 * than the size of the BIO interface structures, the
4310 		 * amount of data actually transferred from the drive will
4311 		 * be different than the amount of data transferred to
4312 		 * the user.
4313 		 */
4314 		bp->bio_resid = ccb->csio.resid;
4315 		hdr = (struct scsi_report_zones_hdr *)ccb->csio.data_ptr;
4316 		if (avail_len < sizeof(*hdr)) {
4317 			/*
4318 			 * Is there a better error than EIO here?  We asked
4319 			 * for at least the header, and we got less than
4320 			 * that.
4321 			 */
4322 			bp->bio_error = EIO;
4323 			bp->bio_flags |= BIO_ERROR;
4324 			bp->bio_resid = bp->bio_bcount;
4325 			break;
4326 		}
4327 
4328 		if (softc->zone_interface == DA_ZONE_IF_ATA_PASS)
4329 			ata = 1;
4330 		else
4331 			ata = 0;
4332 
4333 		hdr_len = ata ? le32dec(hdr->length) :
4334 				scsi_4btoul(hdr->length);
4335 		if (hdr_len > 0)
4336 			rep->entries_available = hdr_len / sizeof(*desc);
4337 		else
4338 			rep->entries_available = 0;
4339 		/*
4340 		 * NOTE: using the same values for the BIO version of the
4341 		 * same field as the SCSI/ATA values.  This means we could
4342 		 * get some additional values that aren't defined in bio.h
4343 		 * if more values of the same field are defined later.
4344 		 */
4345 		rep->header.same = hdr->byte4 & SRZ_SAME_MASK;
4346 		rep->header.maximum_lba = ata ?  le64dec(hdr->maximum_lba) :
4347 					  scsi_8btou64(hdr->maximum_lba);
4348 		/*
4349 		 * If the drive reports no entries that match the query,
4350 		 * we're done.
4351 		 */
4352 		if (hdr_len == 0) {
4353 			rep->entries_filled = 0;
4354 			break;
4355 		}
4356 
4357 		num_avail = min((avail_len - sizeof(*hdr)) / sizeof(*desc),
4358 				hdr_len / sizeof(*desc));
4359 		/*
4360 		 * If the drive didn't return any data, then we're done.
4361 		 */
4362 		if (num_avail == 0) {
4363 			rep->entries_filled = 0;
4364 			break;
4365 		}
4366 
4367 		num_to_fill = min(num_avail, rep->entries_allocated);
4368 		/*
4369 		 * If the user didn't allocate any entries for us to fill,
4370 		 * we're done.
4371 		 */
4372 		if (num_to_fill == 0) {
4373 			rep->entries_filled = 0;
4374 			break;
4375 		}
4376 
4377 		for (i = 0, desc = &hdr->desc_list[0], entry=&rep->entries[0];
4378 		     i < num_to_fill; i++, desc++, entry++) {
4379 			/*
4380 			 * NOTE: we're mapping the values here directly
4381 			 * from the SCSI/ATA bit definitions to the bio.h
4382 			 * definitons.  There is also a warning in
4383 			 * disk_zone.h, but the impact is that if
4384 			 * additional values are added in the SCSI/ATA
4385 			 * specs these will be visible to consumers of
4386 			 * this interface.
4387 			 */
4388 			entry->zone_type = desc->zone_type & SRZ_TYPE_MASK;
4389 			entry->zone_condition =
4390 			    (desc->zone_flags & SRZ_ZONE_COND_MASK) >>
4391 			    SRZ_ZONE_COND_SHIFT;
4392 			entry->zone_flags |= desc->zone_flags &
4393 			    (SRZ_ZONE_NON_SEQ|SRZ_ZONE_RESET);
4394 			entry->zone_length =
4395 			    ata ? le64dec(desc->zone_length) :
4396 				  scsi_8btou64(desc->zone_length);
4397 			entry->zone_start_lba =
4398 			    ata ? le64dec(desc->zone_start_lba) :
4399 				  scsi_8btou64(desc->zone_start_lba);
4400 			entry->write_pointer_lba =
4401 			    ata ? le64dec(desc->write_pointer_lba) :
4402 				  scsi_8btou64(desc->write_pointer_lba);
4403 		}
4404 		rep->entries_filled = num_to_fill;
4405 		break;
4406 	}
4407 	case DISK_ZONE_GET_PARAMS:
4408 	default:
4409 		/*
4410 		 * In theory we should not get a GET_PARAMS bio, since it
4411 		 * should be handled without queueing the command to the
4412 		 * drive.
4413 		 */
4414 		panic("%s: Invalid zone command %d", __func__,
4415 		    bp->bio_zone.zone_cmd);
4416 		break;
4417 	}
4418 
4419 	if (bp->bio_zone.zone_cmd == DISK_ZONE_REPORT_ZONES)
4420 		free(ccb->csio.data_ptr, M_SCSIDA);
4421 }
4422 
4423 static void
4424 dadone(struct cam_periph *periph, union ccb *done_ccb)
4425 {
4426 	struct bio *bp, *bp1;
4427 	struct da_softc *softc;
4428 	struct ccb_scsiio *csio;
4429 	u_int32_t  priority;
4430 	da_ccb_state state;
4431 
4432 	CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dadone\n"));
4433 
4434 	softc = (struct da_softc *)periph->softc;
4435 	priority = done_ccb->ccb_h.pinfo.priority;
4436 	csio = &done_ccb->csio;
4437 
4438 #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING)
4439 	if (csio->bio != NULL)
4440 		biotrack(csio->bio, __func__);
4441 #endif
4442 	state = csio->ccb_h.ccb_state & DA_CCB_TYPE_MASK;
4443 
4444 	cam_periph_lock(periph);
4445 	bp = (struct bio *)done_ccb->ccb_h.ccb_bp;
4446 	if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
4447 		int error;
4448 		int sf;
4449 
4450 		if ((csio->ccb_h.ccb_state & DA_CCB_RETRY_UA) != 0)
4451 			sf = SF_RETRY_UA;
4452 		else
4453 			sf = 0;
4454 
4455 		error = daerror(done_ccb, CAM_RETRY_SELTO, sf);
4456 		if (error == ERESTART) {
4457 			/* A retry was scheduled, so just return. */
4458 			cam_periph_unlock(periph);
4459 			return;
4460 		}
4461 		bp = (struct bio *)done_ccb->ccb_h.ccb_bp;
4462 		if (error != 0) {
4463 			int queued_error;
4464 
4465 			/*
4466 			 * return all queued I/O with EIO, so that
4467 			 * the client can retry these I/Os in the
4468 			 * proper order should it attempt to recover.
4469 			 */
4470 			queued_error = EIO;
4471 
4472 			if (error == ENXIO
4473 			 && (softc->flags & DA_FLAG_PACK_INVALID)== 0) {
4474 				/*
4475 				 * Catastrophic error.  Mark our pack as
4476 				 * invalid.
4477 				 *
4478 				 * XXX See if this is really a media
4479 				 * XXX change first?
4480 				 */
4481 				xpt_print(periph->path, "Invalidating pack\n");
4482 				softc->flags |= DA_FLAG_PACK_INVALID;
4483 #ifdef CAM_IO_STATS
4484 				softc->invalidations++;
4485 #endif
4486 				queued_error = ENXIO;
4487 			}
4488 			cam_iosched_flush(softc->cam_iosched, NULL,
4489 			   queued_error);
4490 			if (bp != NULL) {
4491 				bp->bio_error = error;
4492 				bp->bio_resid = bp->bio_bcount;
4493 				bp->bio_flags |= BIO_ERROR;
4494 			}
4495 		} else if (bp != NULL) {
4496 			if (state == DA_CCB_DELETE)
4497 				bp->bio_resid = 0;
4498 			else
4499 				bp->bio_resid = csio->resid;
4500 			bp->bio_error = 0;
4501 			if (bp->bio_resid != 0)
4502 				bp->bio_flags |= BIO_ERROR;
4503 		}
4504 		if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
4505 			cam_release_devq(done_ccb->ccb_h.path,
4506 					 /*relsim_flags*/0,
4507 					 /*reduction*/0,
4508 					 /*timeout*/0,
4509 					 /*getcount_only*/0);
4510 	} else if (bp != NULL) {
4511 		if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
4512 			panic("REQ_CMP with QFRZN");
4513 		if (bp->bio_cmd == BIO_ZONE)
4514 			dazonedone(periph, done_ccb);
4515 		else if (state == DA_CCB_DELETE)
4516 			bp->bio_resid = 0;
4517 		else
4518 			bp->bio_resid = csio->resid;
4519 		if ((csio->resid > 0) && (bp->bio_cmd != BIO_ZONE))
4520 			bp->bio_flags |= BIO_ERROR;
4521 		if (softc->error_inject != 0) {
4522 			bp->bio_error = softc->error_inject;
4523 			bp->bio_resid = bp->bio_bcount;
4524 			bp->bio_flags |= BIO_ERROR;
4525 			softc->error_inject = 0;
4526 		}
4527 	}
4528 
4529 	if (bp != NULL)
4530 		biotrack(bp, __func__);
4531 	LIST_REMOVE(&done_ccb->ccb_h, periph_links.le);
4532 	if (LIST_EMPTY(&softc->pending_ccbs))
4533 		softc->flags |= DA_FLAG_WAS_OTAG;
4534 
4535 	/*
4536 	 * We need to call cam_iosched before we call biodone so that we don't
4537 	 * measure any activity that happens in the completion routine, which in
4538 	 * the case of sendfile can be quite extensive. Release the periph
4539 	 * refcount taken in dastart() for each CCB.
4540 	 */
4541 	cam_iosched_bio_complete(softc->cam_iosched, bp, done_ccb);
4542 	xpt_release_ccb(done_ccb);
4543 	KASSERT(softc->refcount >= 1, ("dadone softc %p refcount %d", softc, softc->refcount));
4544 	softc->refcount--;
4545 	if (state == DA_CCB_DELETE) {
4546 		TAILQ_HEAD(, bio) queue;
4547 
4548 		TAILQ_INIT(&queue);
4549 		TAILQ_CONCAT(&queue, &softc->delete_run_queue.queue, bio_queue);
4550 		softc->delete_run_queue.insert_point = NULL;
4551 		/*
4552 		 * Normally, the xpt_release_ccb() above would make sure
4553 		 * that when we have more work to do, that work would
4554 		 * get kicked off. However, we specifically keep
4555 		 * delete_running set to 0 before the call above to
4556 		 * allow other I/O to progress when many BIO_DELETE
4557 		 * requests are pushed down. We set delete_running to 0
4558 		 * and call daschedule again so that we don't stall if
4559 		 * there are no other I/Os pending apart from BIO_DELETEs.
4560 		 */
4561 		cam_iosched_trim_done(softc->cam_iosched);
4562 		daschedule(periph);
4563 		cam_periph_unlock(periph);
4564 		while ((bp1 = TAILQ_FIRST(&queue)) != NULL) {
4565 			TAILQ_REMOVE(&queue, bp1, bio_queue);
4566 			bp1->bio_error = bp->bio_error;
4567 			if (bp->bio_flags & BIO_ERROR) {
4568 				bp1->bio_flags |= BIO_ERROR;
4569 				bp1->bio_resid = bp1->bio_bcount;
4570 			} else
4571 				bp1->bio_resid = 0;
4572 			biodone(bp1);
4573 		}
4574 	} else {
4575 		daschedule(periph);
4576 		cam_periph_unlock(periph);
4577 	}
4578 	if (bp != NULL)
4579 		biodone(bp);
4580 	return;
4581 }
4582 
4583 static void
4584 dadone_probewp(struct cam_periph *periph, union ccb *done_ccb)
4585 {
4586 	struct scsi_mode_header_6 *mode_hdr6;
4587 	struct scsi_mode_header_10 *mode_hdr10;
4588 	struct da_softc *softc;
4589 	struct ccb_scsiio *csio;
4590 	u_int32_t  priority;
4591 	uint8_t dev_spec;
4592 
4593 	CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dadone_probewp\n"));
4594 
4595 	softc = (struct da_softc *)periph->softc;
4596 	priority = done_ccb->ccb_h.pinfo.priority;
4597 	csio = &done_ccb->csio;
4598 
4599 	cam_periph_assert(periph, MA_OWNED);
4600 
4601 	if (softc->minimum_cmd_size > 6) {
4602 		mode_hdr10 = (struct scsi_mode_header_10 *)csio->data_ptr;
4603 		dev_spec = mode_hdr10->dev_spec;
4604 	} else {
4605 		mode_hdr6 = (struct scsi_mode_header_6 *)csio->data_ptr;
4606 		dev_spec = mode_hdr6->dev_spec;
4607 	}
4608 	if (cam_ccb_status(done_ccb) == CAM_REQ_CMP) {
4609 		if ((dev_spec & 0x80) != 0)
4610 			softc->disk->d_flags |= DISKFLAG_WRITE_PROTECT;
4611 		else
4612 			softc->disk->d_flags &= ~DISKFLAG_WRITE_PROTECT;
4613 	} else {
4614 		int error;
4615 
4616 		error = daerror(done_ccb, CAM_RETRY_SELTO,
4617 				SF_RETRY_UA|SF_NO_PRINT);
4618 		if (error == ERESTART)
4619 			return;
4620 		else if (error != 0) {
4621 			if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
4622 				/* Don't wedge this device's queue */
4623 				cam_release_devq(done_ccb->ccb_h.path,
4624 						 /*relsim_flags*/0,
4625 						 /*reduction*/0,
4626 						 /*timeout*/0,
4627 						 /*getcount_only*/0);
4628 			}
4629 		}
4630 	}
4631 
4632 	free(csio->data_ptr, M_SCSIDA);
4633 	xpt_release_ccb(done_ccb);
4634 	if ((softc->flags & DA_FLAG_CAN_RC16) != 0)
4635 		softc->state = DA_STATE_PROBE_RC16;
4636 	else
4637 		softc->state = DA_STATE_PROBE_RC;
4638 	xpt_schedule(periph, priority);
4639 	return;
4640 }
4641 
4642 static void
4643 dadone_proberc(struct cam_periph *periph, union ccb *done_ccb)
4644 {
4645 	struct scsi_read_capacity_data *rdcap;
4646 	struct scsi_read_capacity_data_long *rcaplong;
4647 	struct da_softc *softc;
4648 	struct ccb_scsiio *csio;
4649 	da_ccb_state state;
4650 	char *announce_buf;
4651 	u_int32_t  priority;
4652 	int lbp;
4653 
4654 	CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dadone_proberc\n"));
4655 
4656 	softc = (struct da_softc *)periph->softc;
4657 	priority = done_ccb->ccb_h.pinfo.priority;
4658 	csio = &done_ccb->csio;
4659 	state = csio->ccb_h.ccb_state & DA_CCB_TYPE_MASK;
4660 
4661 	lbp = 0;
4662 	rdcap = NULL;
4663 	rcaplong = NULL;
4664 	/* XXX TODO: can this be a malloc? */
4665 	announce_buf = softc->announce_temp;
4666 	bzero(announce_buf, DA_ANNOUNCETMP_SZ);
4667 
4668 	if (state == DA_CCB_PROBE_RC)
4669 		rdcap =(struct scsi_read_capacity_data *)csio->data_ptr;
4670 	else
4671 		rcaplong = (struct scsi_read_capacity_data_long *)
4672 			csio->data_ptr;
4673 
4674 	cam_periph_assert(periph, MA_OWNED);
4675 
4676 	if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
4677 		struct disk_params *dp;
4678 		uint32_t block_size;
4679 		uint64_t maxsector;
4680 		u_int lalba;	/* Lowest aligned LBA. */
4681 
4682 		if (state == DA_CCB_PROBE_RC) {
4683 			block_size = scsi_4btoul(rdcap->length);
4684 			maxsector = scsi_4btoul(rdcap->addr);
4685 			lalba = 0;
4686 
4687 			/*
4688 			 * According to SBC-2, if the standard 10
4689 			 * byte READ CAPACITY command returns 2^32,
4690 			 * we should issue the 16 byte version of
4691 			 * the command, since the device in question
4692 			 * has more sectors than can be represented
4693 			 * with the short version of the command.
4694 			 */
4695 			if (maxsector == 0xffffffff) {
4696 				free(rdcap, M_SCSIDA);
4697 				xpt_release_ccb(done_ccb);
4698 				softc->state = DA_STATE_PROBE_RC16;
4699 				xpt_schedule(periph, priority);
4700 				return;
4701 			}
4702 		} else {
4703 			block_size = scsi_4btoul(rcaplong->length);
4704 			maxsector = scsi_8btou64(rcaplong->addr);
4705 			lalba = scsi_2btoul(rcaplong->lalba_lbp);
4706 		}
4707 
4708 		/*
4709 		 * Because GEOM code just will panic us if we
4710 		 * give them an 'illegal' value we'll avoid that
4711 		 * here.
4712 		 */
4713 		if (block_size == 0) {
4714 			block_size = 512;
4715 			if (maxsector == 0)
4716 				maxsector = -1;
4717 		}
4718 		if (block_size >= MAXPHYS) {
4719 			xpt_print(periph->path,
4720 			    "unsupportable block size %ju\n",
4721 			    (uintmax_t) block_size);
4722 			announce_buf = NULL;
4723 			cam_periph_invalidate(periph);
4724 		} else {
4725 			/*
4726 			 * We pass rcaplong into dasetgeom(),
4727 			 * because it will only use it if it is
4728 			 * non-NULL.
4729 			 */
4730 			dasetgeom(periph, block_size, maxsector,
4731 				  rcaplong, sizeof(*rcaplong));
4732 			lbp = (lalba & SRC16_LBPME_A);
4733 			dp = &softc->params;
4734 			snprintf(announce_buf, DA_ANNOUNCETMP_SZ,
4735 			    "%juMB (%ju %u byte sectors)",
4736 			    ((uintmax_t)dp->secsize * dp->sectors) /
4737 			     (1024 * 1024),
4738 			    (uintmax_t)dp->sectors, dp->secsize);
4739 		}
4740 	} else {
4741 		int error;
4742 
4743 		/*
4744 		 * Retry any UNIT ATTENTION type errors.  They
4745 		 * are expected at boot.
4746 		 */
4747 		error = daerror(done_ccb, CAM_RETRY_SELTO,
4748 				SF_RETRY_UA|SF_NO_PRINT);
4749 		if (error == ERESTART) {
4750 			/*
4751 			 * A retry was scheuled, so
4752 			 * just return.
4753 			 */
4754 			return;
4755 		} else if (error != 0) {
4756 			int asc, ascq;
4757 			int sense_key, error_code;
4758 			int have_sense;
4759 			cam_status status;
4760 			struct ccb_getdev cgd;
4761 
4762 			/* Don't wedge this device's queue */
4763 			status = done_ccb->ccb_h.status;
4764 			if ((status & CAM_DEV_QFRZN) != 0)
4765 				cam_release_devq(done_ccb->ccb_h.path,
4766 						 /*relsim_flags*/0,
4767 						 /*reduction*/0,
4768 						 /*timeout*/0,
4769 						 /*getcount_only*/0);
4770 
4771 
4772 			xpt_setup_ccb(&cgd.ccb_h, done_ccb->ccb_h.path,
4773 				      CAM_PRIORITY_NORMAL);
4774 			cgd.ccb_h.func_code = XPT_GDEV_TYPE;
4775 			xpt_action((union ccb *)&cgd);
4776 
4777 			if (scsi_extract_sense_ccb(done_ccb,
4778 			    &error_code, &sense_key, &asc, &ascq))
4779 				have_sense = TRUE;
4780 			else
4781 				have_sense = FALSE;
4782 
4783 			/*
4784 			 * If we tried READ CAPACITY(16) and failed,
4785 			 * fallback to READ CAPACITY(10).
4786 			 */
4787 			if ((state == DA_CCB_PROBE_RC16) &&
4788 			    (softc->flags & DA_FLAG_CAN_RC16) &&
4789 			    (((csio->ccb_h.status & CAM_STATUS_MASK) ==
4790 				CAM_REQ_INVALID) ||
4791 			     ((have_sense) &&
4792 			      (error_code == SSD_CURRENT_ERROR ||
4793 			       error_code == SSD_DESC_CURRENT_ERROR) &&
4794 			      (sense_key == SSD_KEY_ILLEGAL_REQUEST)))) {
4795 				cam_periph_assert(periph, MA_OWNED);
4796 				softc->flags &= ~DA_FLAG_CAN_RC16;
4797 				free(rdcap, M_SCSIDA);
4798 				xpt_release_ccb(done_ccb);
4799 				softc->state = DA_STATE_PROBE_RC;
4800 				xpt_schedule(periph, priority);
4801 				return;
4802 			}
4803 
4804 			/*
4805 			 * Attach to anything that claims to be a
4806 			 * direct access or optical disk device,
4807 			 * as long as it doesn't return a "Logical
4808 			 * unit not supported" (0x25) error.
4809 			 * "Internal Target Failure" (0x44) is also
4810 			 * special and typically means that the
4811 			 * device is a SATA drive behind a SATL
4812 			 * translation that's fallen into a
4813 			 * terminally fatal state.
4814 			 */
4815 			if ((have_sense)
4816 			 && (asc != 0x25) && (asc != 0x44)
4817 			 && (error_code == SSD_CURRENT_ERROR
4818 			  || error_code == SSD_DESC_CURRENT_ERROR)) {
4819 				const char *sense_key_desc;
4820 				const char *asc_desc;
4821 
4822 				dasetgeom(periph, 512, -1, NULL, 0);
4823 				scsi_sense_desc(sense_key, asc, ascq,
4824 						&cgd.inq_data, &sense_key_desc,
4825 						&asc_desc);
4826 				snprintf(announce_buf, DA_ANNOUNCETMP_SZ,
4827 				    "Attempt to query device "
4828 				    "size failed: %s, %s",
4829 				    sense_key_desc, asc_desc);
4830 			} else {
4831 				if (have_sense)
4832 					scsi_sense_print(&done_ccb->csio);
4833 				else {
4834 					xpt_print(periph->path,
4835 					    "got CAM status %#x\n",
4836 					    done_ccb->ccb_h.status);
4837 				}
4838 
4839 				xpt_print(periph->path, "fatal error, "
4840 				    "failed to attach to device\n");
4841 
4842 				announce_buf = NULL;
4843 
4844 				/*
4845 				 * Free up resources.
4846 				 */
4847 				cam_periph_invalidate(periph);
4848 			}
4849 		}
4850 	}
4851 	free(csio->data_ptr, M_SCSIDA);
4852 	if (announce_buf != NULL &&
4853 	    ((softc->flags & DA_FLAG_ANNOUNCED) == 0)) {
4854 		struct sbuf sb;
4855 
4856 		sbuf_new(&sb, softc->announcebuf, DA_ANNOUNCE_SZ,
4857 		    SBUF_FIXEDLEN);
4858 		xpt_announce_periph_sbuf(periph, &sb, announce_buf);
4859 		xpt_announce_quirks_sbuf(periph, &sb, softc->quirks,
4860 		    DA_Q_BIT_STRING);
4861 		sbuf_finish(&sb);
4862 		sbuf_putbuf(&sb);
4863 
4864 		/*
4865 		 * Create our sysctl variables, now that we know
4866 		 * we have successfully attached.
4867 		 */
4868 		/* increase the refcount */
4869 		if (da_periph_acquire(periph, DA_REF_SYSCTL) == 0) {
4870 			taskqueue_enqueue(taskqueue_thread,
4871 					  &softc->sysctl_task);
4872 		} else {
4873 			/* XXX This message is useless! */
4874 			xpt_print(periph->path, "fatal error, "
4875 			    "could not acquire reference count\n");
4876 		}
4877 	}
4878 
4879 	/* We already probed the device. */
4880 	if (softc->flags & DA_FLAG_PROBED) {
4881 		daprobedone(periph, done_ccb);
4882 		return;
4883 	}
4884 
4885 	/* Ensure re-probe doesn't see old delete. */
4886 	softc->delete_available = 0;
4887 	dadeleteflag(softc, DA_DELETE_ZERO, 1);
4888 	if (lbp && (softc->quirks & DA_Q_NO_UNMAP) == 0) {
4889 		/*
4890 		 * Based on older SBC-3 spec revisions
4891 		 * any of the UNMAP methods "may" be
4892 		 * available via LBP given this flag so
4893 		 * we flag all of them as available and
4894 		 * then remove those which further
4895 		 * probes confirm aren't available
4896 		 * later.
4897 		 *
4898 		 * We could also check readcap(16) p_type
4899 		 * flag to exclude one or more invalid
4900 		 * write same (X) types here
4901 		 */
4902 		dadeleteflag(softc, DA_DELETE_WS16, 1);
4903 		dadeleteflag(softc, DA_DELETE_WS10, 1);
4904 		dadeleteflag(softc, DA_DELETE_UNMAP, 1);
4905 
4906 		xpt_release_ccb(done_ccb);
4907 		softc->state = DA_STATE_PROBE_LBP;
4908 		xpt_schedule(periph, priority);
4909 		return;
4910 	}
4911 
4912 	xpt_release_ccb(done_ccb);
4913 	softc->state = DA_STATE_PROBE_BDC;
4914 	xpt_schedule(periph, priority);
4915 	return;
4916 }
4917 
4918 static void
4919 dadone_probelbp(struct cam_periph *periph, union ccb *done_ccb)
4920 {
4921 	struct scsi_vpd_logical_block_prov *lbp;
4922 	struct da_softc *softc;
4923 	struct ccb_scsiio *csio;
4924 	u_int32_t  priority;
4925 
4926 	CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dadone_probelbp\n"));
4927 
4928 	softc = (struct da_softc *)periph->softc;
4929 	priority = done_ccb->ccb_h.pinfo.priority;
4930 	csio = &done_ccb->csio;
4931 	lbp = (struct scsi_vpd_logical_block_prov *)csio->data_ptr;
4932 
4933 	cam_periph_assert(periph, MA_OWNED);
4934 
4935 	if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
4936 		/*
4937 		 * T10/1799-D Revision 31 states at least one of these
4938 		 * must be supported but we don't currently enforce this.
4939 		 */
4940 		dadeleteflag(softc, DA_DELETE_WS16,
4941 		     (lbp->flags & SVPD_LBP_WS16));
4942 		dadeleteflag(softc, DA_DELETE_WS10,
4943 			     (lbp->flags & SVPD_LBP_WS10));
4944 		dadeleteflag(softc, DA_DELETE_UNMAP,
4945 			     (lbp->flags & SVPD_LBP_UNMAP));
4946 	} else {
4947 		int error;
4948 		error = daerror(done_ccb, CAM_RETRY_SELTO,
4949 				SF_RETRY_UA|SF_NO_PRINT);
4950 		if (error == ERESTART)
4951 			return;
4952 		else if (error != 0) {
4953 			if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
4954 				/* Don't wedge this device's queue */
4955 				cam_release_devq(done_ccb->ccb_h.path,
4956 						 /*relsim_flags*/0,
4957 						 /*reduction*/0,
4958 						 /*timeout*/0,
4959 						 /*getcount_only*/0);
4960 			}
4961 
4962 			/*
4963 			 * Failure indicates we don't support any SBC-3
4964 			 * delete methods with UNMAP
4965 			 */
4966 		}
4967 	}
4968 
4969 	free(lbp, M_SCSIDA);
4970 	xpt_release_ccb(done_ccb);
4971 	softc->state = DA_STATE_PROBE_BLK_LIMITS;
4972 	xpt_schedule(periph, priority);
4973 	return;
4974 }
4975 
4976 static void
4977 dadone_probeblklimits(struct cam_periph *periph, union ccb *done_ccb)
4978 {
4979 	struct scsi_vpd_block_limits *block_limits;
4980 	struct da_softc *softc;
4981 	struct ccb_scsiio *csio;
4982 	u_int32_t  priority;
4983 
4984 	CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dadone_probeblklimits\n"));
4985 
4986 	softc = (struct da_softc *)periph->softc;
4987 	priority = done_ccb->ccb_h.pinfo.priority;
4988 	csio = &done_ccb->csio;
4989 	block_limits = (struct scsi_vpd_block_limits *)csio->data_ptr;
4990 
4991 	cam_periph_assert(periph, MA_OWNED);
4992 
4993 	if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
4994 		uint32_t max_txfer_len = scsi_4btoul(
4995 			block_limits->max_txfer_len);
4996 		uint32_t max_unmap_lba_cnt = scsi_4btoul(
4997 			block_limits->max_unmap_lba_cnt);
4998 		uint32_t max_unmap_blk_cnt = scsi_4btoul(
4999 			block_limits->max_unmap_blk_cnt);
5000 		uint32_t unmap_gran = scsi_4btoul(
5001 			block_limits->opt_unmap_grain);
5002 		uint32_t unmap_gran_align = scsi_4btoul(
5003 			block_limits->unmap_grain_align);
5004 		uint64_t ws_max_blks = scsi_8btou64(
5005 			block_limits->max_write_same_length);
5006 
5007 		if (max_txfer_len != 0) {
5008 			softc->disk->d_maxsize = MIN(softc->maxio,
5009 			    (off_t)max_txfer_len * softc->params.secsize);
5010 		}
5011 
5012 		/*
5013 		 * We should already support UNMAP but we check lba
5014 		 * and block count to be sure
5015 		 */
5016 		if (max_unmap_lba_cnt != 0x00L &&
5017 		    max_unmap_blk_cnt != 0x00L) {
5018 			softc->unmap_max_lba = max_unmap_lba_cnt;
5019 			softc->unmap_max_ranges = min(max_unmap_blk_cnt,
5020 				UNMAP_MAX_RANGES);
5021 			if (unmap_gran > 1) {
5022 				softc->unmap_gran = unmap_gran;
5023 				if (unmap_gran_align & 0x80000000) {
5024 					softc->unmap_gran_align =
5025 					    unmap_gran_align & 0x7fffffff;
5026 				}
5027 			}
5028 		} else {
5029 			/*
5030 			 * Unexpected UNMAP limits which means the
5031 			 * device doesn't actually support UNMAP
5032 			 */
5033 			dadeleteflag(softc, DA_DELETE_UNMAP, 0);
5034 		}
5035 
5036 		if (ws_max_blks != 0x00L)
5037 			softc->ws_max_blks = ws_max_blks;
5038 	} else {
5039 		int error;
5040 		error = daerror(done_ccb, CAM_RETRY_SELTO,
5041 				SF_RETRY_UA|SF_NO_PRINT);
5042 		if (error == ERESTART)
5043 			return;
5044 		else if (error != 0) {
5045 			if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
5046 				/* Don't wedge this device's queue */
5047 				cam_release_devq(done_ccb->ccb_h.path,
5048 						 /*relsim_flags*/0,
5049 						 /*reduction*/0,
5050 						 /*timeout*/0,
5051 						 /*getcount_only*/0);
5052 			}
5053 
5054 			/*
5055 			 * Failure here doesn't mean UNMAP is not
5056 			 * supported as this is an optional page.
5057 			 */
5058 			softc->unmap_max_lba = 1;
5059 			softc->unmap_max_ranges = 1;
5060 		}
5061 	}
5062 
5063 	free(block_limits, M_SCSIDA);
5064 	xpt_release_ccb(done_ccb);
5065 	softc->state = DA_STATE_PROBE_BDC;
5066 	xpt_schedule(periph, priority);
5067 	return;
5068 }
5069 
5070 static void
5071 dadone_probebdc(struct cam_periph *periph, union ccb *done_ccb)
5072 {
5073 	struct scsi_vpd_block_device_characteristics *bdc;
5074 	struct da_softc *softc;
5075 	struct ccb_scsiio *csio;
5076 	u_int32_t  priority;
5077 
5078 	CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dadone_probebdc\n"));
5079 
5080 	softc = (struct da_softc *)periph->softc;
5081 	priority = done_ccb->ccb_h.pinfo.priority;
5082 	csio = &done_ccb->csio;
5083 	bdc = (struct scsi_vpd_block_device_characteristics *)csio->data_ptr;
5084 
5085 	cam_periph_assert(periph, MA_OWNED);
5086 
5087 	if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
5088 		uint32_t valid_len;
5089 
5090 		/*
5091 		 * Disable queue sorting for non-rotational media
5092 		 * by default.
5093 		 */
5094 		u_int16_t old_rate = softc->disk->d_rotation_rate;
5095 
5096 		valid_len = csio->dxfer_len - csio->resid;
5097 		if (SBDC_IS_PRESENT(bdc, valid_len,
5098 		    medium_rotation_rate)) {
5099 			softc->disk->d_rotation_rate =
5100 				scsi_2btoul(bdc->medium_rotation_rate);
5101 			if (softc->disk->d_rotation_rate ==
5102 			    SVPD_BDC_RATE_NON_ROTATING) {
5103 				cam_iosched_set_sort_queue(
5104 				    softc->cam_iosched, 0);
5105 				softc->rotating = 0;
5106 			}
5107 			if (softc->disk->d_rotation_rate != old_rate) {
5108 				disk_attr_changed(softc->disk,
5109 				    "GEOM::rotation_rate", M_NOWAIT);
5110 			}
5111 		}
5112 		if ((SBDC_IS_PRESENT(bdc, valid_len, flags))
5113 		 && (softc->zone_mode == DA_ZONE_NONE)) {
5114 			int ata_proto;
5115 
5116 			if (scsi_vpd_supported_page(periph,
5117 			    SVPD_ATA_INFORMATION))
5118 				ata_proto = 1;
5119 			else
5120 				ata_proto = 0;
5121 
5122 			/*
5123 			 * The Zoned field will only be set for
5124 			 * Drive Managed and Host Aware drives.  If
5125 			 * they are Host Managed, the device type
5126 			 * in the standard INQUIRY data should be
5127 			 * set to T_ZBC_HM (0x14).
5128 			 */
5129 			if ((bdc->flags & SVPD_ZBC_MASK) ==
5130 			     SVPD_HAW_ZBC) {
5131 				softc->zone_mode = DA_ZONE_HOST_AWARE;
5132 				softc->zone_interface = (ata_proto) ?
5133 				   DA_ZONE_IF_ATA_SAT : DA_ZONE_IF_SCSI;
5134 			} else if ((bdc->flags & SVPD_ZBC_MASK) ==
5135 			     SVPD_DM_ZBC) {
5136 				softc->zone_mode =DA_ZONE_DRIVE_MANAGED;
5137 				softc->zone_interface = (ata_proto) ?
5138 				   DA_ZONE_IF_ATA_SAT : DA_ZONE_IF_SCSI;
5139 			} else if ((bdc->flags & SVPD_ZBC_MASK) !=
5140 				  SVPD_ZBC_NR) {
5141 				xpt_print(periph->path, "Unknown zoned "
5142 				    "type %#x",
5143 				    bdc->flags & SVPD_ZBC_MASK);
5144 			}
5145 		}
5146 	} else {
5147 		int error;
5148 		error = daerror(done_ccb, CAM_RETRY_SELTO,
5149 				SF_RETRY_UA|SF_NO_PRINT);
5150 		if (error == ERESTART)
5151 			return;
5152 		else if (error != 0) {
5153 			if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
5154 				/* Don't wedge this device's queue */
5155 				cam_release_devq(done_ccb->ccb_h.path,
5156 						 /*relsim_flags*/0,
5157 						 /*reduction*/0,
5158 						 /*timeout*/0,
5159 						 /*getcount_only*/0);
5160 			}
5161 		}
5162 	}
5163 
5164 	free(bdc, M_SCSIDA);
5165 	xpt_release_ccb(done_ccb);
5166 	softc->state = DA_STATE_PROBE_ATA;
5167 	xpt_schedule(periph, priority);
5168 	return;
5169 }
5170 
5171 static void
5172 dadone_probeata(struct cam_periph *periph, union ccb *done_ccb)
5173 {
5174 	struct ata_params *ata_params;
5175 	struct ccb_scsiio *csio;
5176 	struct da_softc *softc;
5177 	u_int32_t  priority;
5178 	int continue_probe;
5179 	int error, i;
5180 	int16_t *ptr;
5181 
5182 	CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dadone_probeata\n"));
5183 
5184 	softc = (struct da_softc *)periph->softc;
5185 	priority = done_ccb->ccb_h.pinfo.priority;
5186 	csio = &done_ccb->csio;
5187 	ata_params = (struct ata_params *)csio->data_ptr;
5188 	ptr = (uint16_t *)ata_params;
5189 	continue_probe = 0;
5190 	error = 0;
5191 
5192 	cam_periph_assert(periph, MA_OWNED);
5193 
5194 	if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
5195 		uint16_t old_rate;
5196 
5197 		for (i = 0; i < sizeof(*ata_params) / 2; i++)
5198 			ptr[i] = le16toh(ptr[i]);
5199 		if (ata_params->support_dsm & ATA_SUPPORT_DSM_TRIM &&
5200 		    (softc->quirks & DA_Q_NO_UNMAP) == 0) {
5201 			dadeleteflag(softc, DA_DELETE_ATA_TRIM, 1);
5202 			if (ata_params->max_dsm_blocks != 0)
5203 				softc->trim_max_ranges = min(
5204 				  softc->trim_max_ranges,
5205 				  ata_params->max_dsm_blocks *
5206 				  ATA_DSM_BLK_RANGES);
5207 		}
5208 		/*
5209 		 * Disable queue sorting for non-rotational media
5210 		 * by default.
5211 		 */
5212 		old_rate = softc->disk->d_rotation_rate;
5213 		softc->disk->d_rotation_rate = ata_params->media_rotation_rate;
5214 		if (softc->disk->d_rotation_rate == ATA_RATE_NON_ROTATING) {
5215 			cam_iosched_set_sort_queue(softc->cam_iosched, 0);
5216 			softc->rotating = 0;
5217 		}
5218 		if (softc->disk->d_rotation_rate != old_rate) {
5219 			disk_attr_changed(softc->disk,
5220 			    "GEOM::rotation_rate", M_NOWAIT);
5221 		}
5222 
5223 		cam_periph_assert(periph, MA_OWNED);
5224 		if (ata_params->capabilities1 & ATA_SUPPORT_DMA)
5225 			softc->flags |= DA_FLAG_CAN_ATA_DMA;
5226 
5227 		if (ata_params->support.extension & ATA_SUPPORT_GENLOG)
5228 			softc->flags |= DA_FLAG_CAN_ATA_LOG;
5229 
5230 		/*
5231 		 * At this point, if we have a SATA host aware drive,
5232 		 * we communicate via ATA passthrough unless the
5233 		 * SAT layer supports ZBC -> ZAC translation.  In
5234 		 * that case,
5235 		 *
5236 		 * XXX KDM figure out how to detect a host managed
5237 		 * SATA drive.
5238 		 */
5239 		if (softc->zone_mode == DA_ZONE_NONE) {
5240 			/*
5241 			 * Note that we don't override the zone
5242 			 * mode or interface if it has already been
5243 			 * set.  This is because it has either been
5244 			 * set as a quirk, or when we probed the
5245 			 * SCSI Block Device Characteristics page,
5246 			 * the zoned field was set.  The latter
5247 			 * means that the SAT layer supports ZBC to
5248 			 * ZAC translation, and we would prefer to
5249 			 * use that if it is available.
5250 			 */
5251 			if ((ata_params->support3 &
5252 			    ATA_SUPPORT_ZONE_MASK) ==
5253 			    ATA_SUPPORT_ZONE_HOST_AWARE) {
5254 				softc->zone_mode = DA_ZONE_HOST_AWARE;
5255 				softc->zone_interface =
5256 				    DA_ZONE_IF_ATA_PASS;
5257 			} else if ((ata_params->support3 &
5258 				    ATA_SUPPORT_ZONE_MASK) ==
5259 				    ATA_SUPPORT_ZONE_DEV_MANAGED) {
5260 				softc->zone_mode =DA_ZONE_DRIVE_MANAGED;
5261 				softc->zone_interface = DA_ZONE_IF_ATA_PASS;
5262 			}
5263 		}
5264 
5265 	} else {
5266 		error = daerror(done_ccb, CAM_RETRY_SELTO,
5267 				SF_RETRY_UA|SF_NO_PRINT);
5268 		if (error == ERESTART)
5269 			return;
5270 		else if (error != 0) {
5271 			if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
5272 				/* Don't wedge this device's queue */
5273 				cam_release_devq(done_ccb->ccb_h.path,
5274 						 /*relsim_flags*/0,
5275 						 /*reduction*/0,
5276 						 /*timeout*/0,
5277 						 /*getcount_only*/0);
5278 			}
5279 		}
5280 	}
5281 
5282 	free(ata_params, M_SCSIDA);
5283 	if ((softc->zone_mode == DA_ZONE_HOST_AWARE)
5284 	 || (softc->zone_mode == DA_ZONE_HOST_MANAGED)) {
5285 		/*
5286 		 * If the ATA IDENTIFY failed, we could be talking
5287 		 * to a SCSI drive, although that seems unlikely,
5288 		 * since the drive did report that it supported the
5289 		 * ATA Information VPD page.  If the ATA IDENTIFY
5290 		 * succeeded, and the SAT layer doesn't support
5291 		 * ZBC -> ZAC translation, continue on to get the
5292 		 * directory of ATA logs, and complete the rest of
5293 		 * the ZAC probe.  If the SAT layer does support
5294 		 * ZBC -> ZAC translation, we want to use that,
5295 		 * and we'll probe the SCSI Zoned Block Device
5296 		 * Characteristics VPD page next.
5297 		 */
5298 		if ((error == 0)
5299 		 && (softc->flags & DA_FLAG_CAN_ATA_LOG)
5300 		 && (softc->zone_interface == DA_ZONE_IF_ATA_PASS))
5301 			softc->state = DA_STATE_PROBE_ATA_LOGDIR;
5302 		else
5303 			softc->state = DA_STATE_PROBE_ZONE;
5304 		continue_probe = 1;
5305 	}
5306 	if (continue_probe != 0) {
5307 		xpt_release_ccb(done_ccb);
5308 		xpt_schedule(periph, priority);
5309 		return;
5310 	} else
5311 		daprobedone(periph, done_ccb);
5312 	return;
5313 }
5314 
5315 static void
5316 dadone_probeatalogdir(struct cam_periph *periph, union ccb *done_ccb)
5317 {
5318 	struct da_softc *softc;
5319 	struct ccb_scsiio *csio;
5320 	u_int32_t  priority;
5321 	int error;
5322 
5323 	CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dadone_probeatalogdir\n"));
5324 
5325 	softc = (struct da_softc *)periph->softc;
5326 	priority = done_ccb->ccb_h.pinfo.priority;
5327 	csio = &done_ccb->csio;
5328 
5329 	cam_periph_assert(periph, MA_OWNED);
5330 	if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
5331 		error = 0;
5332 		softc->valid_logdir_len = 0;
5333 		bzero(&softc->ata_logdir, sizeof(softc->ata_logdir));
5334 		softc->valid_logdir_len = csio->dxfer_len - csio->resid;
5335 		if (softc->valid_logdir_len > 0)
5336 			bcopy(csio->data_ptr, &softc->ata_logdir,
5337 			    min(softc->valid_logdir_len,
5338 				sizeof(softc->ata_logdir)));
5339 		/*
5340 		 * Figure out whether the Identify Device log is
5341 		 * supported.  The General Purpose log directory
5342 		 * has a header, and lists the number of pages
5343 		 * available for each GP log identified by the
5344 		 * offset into the list.
5345 		 */
5346 		if ((softc->valid_logdir_len >=
5347 		    ((ATA_IDENTIFY_DATA_LOG + 1) * sizeof(uint16_t)))
5348 		 && (le16dec(softc->ata_logdir.header) ==
5349 		     ATA_GP_LOG_DIR_VERSION)
5350 		 && (le16dec(&softc->ata_logdir.num_pages[
5351 		     (ATA_IDENTIFY_DATA_LOG *
5352 		     sizeof(uint16_t)) - sizeof(uint16_t)]) > 0)){
5353 			softc->flags |= DA_FLAG_CAN_ATA_IDLOG;
5354 		} else {
5355 			softc->flags &= ~DA_FLAG_CAN_ATA_IDLOG;
5356 		}
5357 	} else {
5358 		error = daerror(done_ccb, CAM_RETRY_SELTO,
5359 				SF_RETRY_UA|SF_NO_PRINT);
5360 		if (error == ERESTART)
5361 			return;
5362 		else if (error != 0) {
5363 			/*
5364 			 * If we can't get the ATA log directory,
5365 			 * then ATA logs are effectively not
5366 			 * supported even if the bit is set in the
5367 			 * identify data.
5368 			 */
5369 			softc->flags &= ~(DA_FLAG_CAN_ATA_LOG |
5370 					  DA_FLAG_CAN_ATA_IDLOG);
5371 			if ((done_ccb->ccb_h.status &
5372 			     CAM_DEV_QFRZN) != 0) {
5373 				/* Don't wedge this device's queue */
5374 				cam_release_devq(done_ccb->ccb_h.path,
5375 						 /*relsim_flags*/0,
5376 						 /*reduction*/0,
5377 						 /*timeout*/0,
5378 						 /*getcount_only*/0);
5379 			}
5380 		}
5381 	}
5382 
5383 	free(csio->data_ptr, M_SCSIDA);
5384 
5385 	if ((error == 0)
5386 	 && (softc->flags & DA_FLAG_CAN_ATA_IDLOG)) {
5387 		softc->state = DA_STATE_PROBE_ATA_IDDIR;
5388 		xpt_release_ccb(done_ccb);
5389 		xpt_schedule(periph, priority);
5390 		return;
5391 	}
5392 	daprobedone(periph, done_ccb);
5393 	return;
5394 }
5395 
5396 static void
5397 dadone_probeataiddir(struct cam_periph *periph, union ccb *done_ccb)
5398 {
5399 	struct da_softc *softc;
5400 	struct ccb_scsiio *csio;
5401 	u_int32_t  priority;
5402 	int error;
5403 
5404 	CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dadone_probeataiddir\n"));
5405 
5406 	softc = (struct da_softc *)periph->softc;
5407 	priority = done_ccb->ccb_h.pinfo.priority;
5408 	csio = &done_ccb->csio;
5409 
5410 	cam_periph_assert(periph, MA_OWNED);
5411 
5412 	if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
5413 		off_t entries_offset, max_entries;
5414 		error = 0;
5415 
5416 		softc->valid_iddir_len = 0;
5417 		bzero(&softc->ata_iddir, sizeof(softc->ata_iddir));
5418 		softc->flags &= ~(DA_FLAG_CAN_ATA_SUPCAP |
5419 				  DA_FLAG_CAN_ATA_ZONE);
5420 		softc->valid_iddir_len = csio->dxfer_len - csio->resid;
5421 		if (softc->valid_iddir_len > 0)
5422 			bcopy(csio->data_ptr, &softc->ata_iddir,
5423 			    min(softc->valid_iddir_len,
5424 				sizeof(softc->ata_iddir)));
5425 
5426 		entries_offset =
5427 		    __offsetof(struct ata_identify_log_pages,entries);
5428 		max_entries = softc->valid_iddir_len - entries_offset;
5429 		if ((softc->valid_iddir_len > (entries_offset + 1))
5430 		 && (le64dec(softc->ata_iddir.header) == ATA_IDLOG_REVISION)
5431 		 && (softc->ata_iddir.entry_count > 0)) {
5432 			int num_entries, i;
5433 
5434 			num_entries = softc->ata_iddir.entry_count;
5435 			num_entries = min(num_entries,
5436 			   softc->valid_iddir_len - entries_offset);
5437 			for (i = 0; i < num_entries && i < max_entries; i++) {
5438 				if (softc->ata_iddir.entries[i] ==
5439 				    ATA_IDL_SUP_CAP)
5440 					softc->flags |= DA_FLAG_CAN_ATA_SUPCAP;
5441 				else if (softc->ata_iddir.entries[i] ==
5442 					 ATA_IDL_ZDI)
5443 					softc->flags |= DA_FLAG_CAN_ATA_ZONE;
5444 
5445 				if ((softc->flags & DA_FLAG_CAN_ATA_SUPCAP)
5446 				 && (softc->flags & DA_FLAG_CAN_ATA_ZONE))
5447 					break;
5448 			}
5449 		}
5450 	} else {
5451 		error = daerror(done_ccb, CAM_RETRY_SELTO,
5452 				SF_RETRY_UA|SF_NO_PRINT);
5453 		if (error == ERESTART)
5454 			return;
5455 		else if (error != 0) {
5456 			/*
5457 			 * If we can't get the ATA Identify Data log
5458 			 * directory, then it effectively isn't
5459 			 * supported even if the ATA Log directory
5460 			 * a non-zero number of pages present for
5461 			 * this log.
5462 			 */
5463 			softc->flags &= ~DA_FLAG_CAN_ATA_IDLOG;
5464 			if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
5465 				/* Don't wedge this device's queue */
5466 				cam_release_devq(done_ccb->ccb_h.path,
5467 						 /*relsim_flags*/0,
5468 						 /*reduction*/0,
5469 						 /*timeout*/0,
5470 						 /*getcount_only*/0);
5471 			}
5472 		}
5473 	}
5474 
5475 	free(csio->data_ptr, M_SCSIDA);
5476 
5477 	if ((error == 0) && (softc->flags & DA_FLAG_CAN_ATA_SUPCAP)) {
5478 		softc->state = DA_STATE_PROBE_ATA_SUP;
5479 		xpt_release_ccb(done_ccb);
5480 		xpt_schedule(periph, priority);
5481 		return;
5482 	}
5483 	daprobedone(periph, done_ccb);
5484 	return;
5485 }
5486 
5487 static void
5488 dadone_probeatasup(struct cam_periph *periph, union ccb *done_ccb)
5489 {
5490 	struct da_softc *softc;
5491 	struct ccb_scsiio *csio;
5492 	u_int32_t  priority;
5493 	int error;
5494 
5495 	CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dadone_probeatasup\n"));
5496 
5497 	softc = (struct da_softc *)periph->softc;
5498 	priority = done_ccb->ccb_h.pinfo.priority;
5499 	csio = &done_ccb->csio;
5500 
5501 	cam_periph_assert(periph, MA_OWNED);
5502 
5503 	if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
5504 		uint32_t valid_len;
5505 		size_t needed_size;
5506 		struct ata_identify_log_sup_cap *sup_cap;
5507 		error = 0;
5508 
5509 		sup_cap = (struct ata_identify_log_sup_cap *)csio->data_ptr;
5510 		valid_len = csio->dxfer_len - csio->resid;
5511 		needed_size = __offsetof(struct ata_identify_log_sup_cap,
5512 		    sup_zac_cap) + 1 + sizeof(sup_cap->sup_zac_cap);
5513 		if (valid_len >= needed_size) {
5514 			uint64_t zoned, zac_cap;
5515 
5516 			zoned = le64dec(sup_cap->zoned_cap);
5517 			if (zoned & ATA_ZONED_VALID) {
5518 				/*
5519 				 * This should have already been
5520 				 * set, because this is also in the
5521 				 * ATA identify data.
5522 				 */
5523 				if ((zoned & ATA_ZONED_MASK) ==
5524 				    ATA_SUPPORT_ZONE_HOST_AWARE)
5525 					softc->zone_mode = DA_ZONE_HOST_AWARE;
5526 				else if ((zoned & ATA_ZONED_MASK) ==
5527 				    ATA_SUPPORT_ZONE_DEV_MANAGED)
5528 					softc->zone_mode =
5529 					    DA_ZONE_DRIVE_MANAGED;
5530 			}
5531 
5532 			zac_cap = le64dec(sup_cap->sup_zac_cap);
5533 			if (zac_cap & ATA_SUP_ZAC_CAP_VALID) {
5534 				if (zac_cap & ATA_REPORT_ZONES_SUP)
5535 					softc->zone_flags |=
5536 					    DA_ZONE_FLAG_RZ_SUP;
5537 				if (zac_cap & ATA_ND_OPEN_ZONE_SUP)
5538 					softc->zone_flags |=
5539 					    DA_ZONE_FLAG_OPEN_SUP;
5540 				if (zac_cap & ATA_ND_CLOSE_ZONE_SUP)
5541 					softc->zone_flags |=
5542 					    DA_ZONE_FLAG_CLOSE_SUP;
5543 				if (zac_cap & ATA_ND_FINISH_ZONE_SUP)
5544 					softc->zone_flags |=
5545 					    DA_ZONE_FLAG_FINISH_SUP;
5546 				if (zac_cap & ATA_ND_RWP_SUP)
5547 					softc->zone_flags |=
5548 					    DA_ZONE_FLAG_RWP_SUP;
5549 			} else {
5550 				/*
5551 				 * This field was introduced in
5552 				 * ACS-4, r08 on April 28th, 2015.
5553 				 * If the drive firmware was written
5554 				 * to an earlier spec, it won't have
5555 				 * the field.  So, assume all
5556 				 * commands are supported.
5557 				 */
5558 				softc->zone_flags |= DA_ZONE_FLAG_SUP_MASK;
5559 			}
5560 		}
5561 	} else {
5562 		error = daerror(done_ccb, CAM_RETRY_SELTO,
5563 				SF_RETRY_UA|SF_NO_PRINT);
5564 		if (error == ERESTART)
5565 			return;
5566 		else if (error != 0) {
5567 			/*
5568 			 * If we can't get the ATA Identify Data
5569 			 * Supported Capabilities page, clear the
5570 			 * flag...
5571 			 */
5572 			softc->flags &= ~DA_FLAG_CAN_ATA_SUPCAP;
5573 			/*
5574 			 * And clear zone capabilities.
5575 			 */
5576 			softc->zone_flags &= ~DA_ZONE_FLAG_SUP_MASK;
5577 			if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
5578 				/* Don't wedge this device's queue */
5579 				cam_release_devq(done_ccb->ccb_h.path,
5580 						 /*relsim_flags*/0,
5581 						 /*reduction*/0,
5582 						 /*timeout*/0,
5583 						 /*getcount_only*/0);
5584 			}
5585 		}
5586 	}
5587 
5588 	free(csio->data_ptr, M_SCSIDA);
5589 
5590 	if ((error == 0) && (softc->flags & DA_FLAG_CAN_ATA_ZONE)) {
5591 		softc->state = DA_STATE_PROBE_ATA_ZONE;
5592 		xpt_release_ccb(done_ccb);
5593 		xpt_schedule(periph, priority);
5594 		return;
5595 	}
5596 	daprobedone(periph, done_ccb);
5597 	return;
5598 }
5599 
5600 static void
5601 dadone_probeatazone(struct cam_periph *periph, union ccb *done_ccb)
5602 {
5603 	struct da_softc *softc;
5604 	struct ccb_scsiio *csio;
5605 	int error;
5606 
5607 	CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dadone_probeatazone\n"));
5608 
5609 	softc = (struct da_softc *)periph->softc;
5610 	csio = &done_ccb->csio;
5611 
5612 	cam_periph_assert(periph, MA_OWNED);
5613 
5614 	if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
5615 		struct ata_zoned_info_log *zi_log;
5616 		uint32_t valid_len;
5617 		size_t needed_size;
5618 
5619 		zi_log = (struct ata_zoned_info_log *)csio->data_ptr;
5620 
5621 		valid_len = csio->dxfer_len - csio->resid;
5622 		needed_size = __offsetof(struct ata_zoned_info_log,
5623 		    version_info) + 1 + sizeof(zi_log->version_info);
5624 		if (valid_len >= needed_size) {
5625 			uint64_t tmpvar;
5626 
5627 			tmpvar = le64dec(zi_log->zoned_cap);
5628 			if (tmpvar & ATA_ZDI_CAP_VALID) {
5629 				if (tmpvar & ATA_ZDI_CAP_URSWRZ)
5630 					softc->zone_flags |=
5631 					    DA_ZONE_FLAG_URSWRZ;
5632 				else
5633 					softc->zone_flags &=
5634 					    ~DA_ZONE_FLAG_URSWRZ;
5635 			}
5636 			tmpvar = le64dec(zi_log->optimal_seq_zones);
5637 			if (tmpvar & ATA_ZDI_OPT_SEQ_VALID) {
5638 				softc->zone_flags |= DA_ZONE_FLAG_OPT_SEQ_SET;
5639 				softc->optimal_seq_zones = (tmpvar &
5640 				    ATA_ZDI_OPT_SEQ_MASK);
5641 			} else {
5642 				softc->zone_flags &= ~DA_ZONE_FLAG_OPT_SEQ_SET;
5643 				softc->optimal_seq_zones = 0;
5644 			}
5645 
5646 			tmpvar =le64dec(zi_log->optimal_nonseq_zones);
5647 			if (tmpvar & ATA_ZDI_OPT_NS_VALID) {
5648 				softc->zone_flags |=
5649 				    DA_ZONE_FLAG_OPT_NONSEQ_SET;
5650 				softc->optimal_nonseq_zones =
5651 				    (tmpvar & ATA_ZDI_OPT_NS_MASK);
5652 			} else {
5653 				softc->zone_flags &=
5654 				    ~DA_ZONE_FLAG_OPT_NONSEQ_SET;
5655 				softc->optimal_nonseq_zones = 0;
5656 			}
5657 
5658 			tmpvar = le64dec(zi_log->max_seq_req_zones);
5659 			if (tmpvar & ATA_ZDI_MAX_SEQ_VALID) {
5660 				softc->zone_flags |= DA_ZONE_FLAG_MAX_SEQ_SET;
5661 				softc->max_seq_zones =
5662 				    (tmpvar & ATA_ZDI_MAX_SEQ_MASK);
5663 			} else {
5664 				softc->zone_flags &= ~DA_ZONE_FLAG_MAX_SEQ_SET;
5665 				softc->max_seq_zones = 0;
5666 			}
5667 		}
5668 	} else {
5669 		error = daerror(done_ccb, CAM_RETRY_SELTO,
5670 				SF_RETRY_UA|SF_NO_PRINT);
5671 		if (error == ERESTART)
5672 			return;
5673 		else if (error != 0) {
5674 			softc->flags &= ~DA_FLAG_CAN_ATA_ZONE;
5675 			softc->flags &= ~DA_ZONE_FLAG_SET_MASK;
5676 
5677 			if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
5678 				/* Don't wedge this device's queue */
5679 				cam_release_devq(done_ccb->ccb_h.path,
5680 						 /*relsim_flags*/0,
5681 						 /*reduction*/0,
5682 						 /*timeout*/0,
5683 						 /*getcount_only*/0);
5684 			}
5685 		}
5686 
5687 	}
5688 
5689 	free(csio->data_ptr, M_SCSIDA);
5690 
5691 	daprobedone(periph, done_ccb);
5692 	return;
5693 }
5694 
5695 static void
5696 dadone_probezone(struct cam_periph *periph, union ccb *done_ccb)
5697 {
5698 	struct da_softc *softc;
5699 	struct ccb_scsiio *csio;
5700 	int error;
5701 
5702 	CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dadone_probezone\n"));
5703 
5704 	softc = (struct da_softc *)periph->softc;
5705 	csio = &done_ccb->csio;
5706 
5707 	cam_periph_assert(periph, MA_OWNED);
5708 
5709 	if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
5710 		uint32_t valid_len;
5711 		size_t needed_len;
5712 		struct scsi_vpd_zoned_bdc *zoned_bdc;
5713 
5714 		error = 0;
5715 		zoned_bdc = (struct scsi_vpd_zoned_bdc *)csio->data_ptr;
5716 		valid_len = csio->dxfer_len - csio->resid;
5717 		needed_len = __offsetof(struct scsi_vpd_zoned_bdc,
5718 		    max_seq_req_zones) + 1 +
5719 		    sizeof(zoned_bdc->max_seq_req_zones);
5720 		if ((valid_len >= needed_len)
5721 		 && (scsi_2btoul(zoned_bdc->page_length) >= SVPD_ZBDC_PL)) {
5722 			if (zoned_bdc->flags & SVPD_ZBDC_URSWRZ)
5723 				softc->zone_flags |= DA_ZONE_FLAG_URSWRZ;
5724 			else
5725 				softc->zone_flags &= ~DA_ZONE_FLAG_URSWRZ;
5726 			softc->optimal_seq_zones =
5727 			    scsi_4btoul(zoned_bdc->optimal_seq_zones);
5728 			softc->zone_flags |= DA_ZONE_FLAG_OPT_SEQ_SET;
5729 			softc->optimal_nonseq_zones = scsi_4btoul(
5730 			    zoned_bdc->optimal_nonseq_zones);
5731 			softc->zone_flags |= DA_ZONE_FLAG_OPT_NONSEQ_SET;
5732 			softc->max_seq_zones =
5733 			    scsi_4btoul(zoned_bdc->max_seq_req_zones);
5734 			softc->zone_flags |= DA_ZONE_FLAG_MAX_SEQ_SET;
5735 		}
5736 		/*
5737 		 * All of the zone commands are mandatory for SCSI
5738 		 * devices.
5739 		 *
5740 		 * XXX KDM this is valid as of September 2015.
5741 		 * Re-check this assumption once the SAT spec is
5742 		 * updated to support SCSI ZBC to ATA ZAC mapping.
5743 		 * Since ATA allows zone commands to be reported
5744 		 * as supported or not, this may not necessarily
5745 		 * be true for an ATA device behind a SAT (SCSI to
5746 		 * ATA Translation) layer.
5747 		 */
5748 		softc->zone_flags |= DA_ZONE_FLAG_SUP_MASK;
5749 	} else {
5750 		error = daerror(done_ccb, CAM_RETRY_SELTO,
5751 				SF_RETRY_UA|SF_NO_PRINT);
5752 		if (error == ERESTART)
5753 			return;
5754 		else if (error != 0) {
5755 			if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
5756 				/* Don't wedge this device's queue */
5757 				cam_release_devq(done_ccb->ccb_h.path,
5758 						 /*relsim_flags*/0,
5759 						 /*reduction*/0,
5760 						 /*timeout*/0,
5761 						 /*getcount_only*/0);
5762 			}
5763 		}
5764 	}
5765 
5766 	free(csio->data_ptr, M_SCSIDA);
5767 
5768 	daprobedone(periph, done_ccb);
5769 	return;
5770 }
5771 
5772 static void
5773 dadone_tur(struct cam_periph *periph, union ccb *done_ccb)
5774 {
5775 	struct da_softc *softc;
5776 	struct ccb_scsiio *csio;
5777 
5778 	CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dadone_tur\n"));
5779 
5780 	softc = (struct da_softc *)periph->softc;
5781 	csio = &done_ccb->csio;
5782 
5783 	cam_periph_assert(periph, MA_OWNED);
5784 
5785 	if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
5786 
5787 		if (daerror(done_ccb, CAM_RETRY_SELTO,
5788 		    SF_RETRY_UA | SF_NO_RECOVERY | SF_NO_PRINT) == ERESTART)
5789 			return;	/* Will complete again, keep reference */
5790 		if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
5791 			cam_release_devq(done_ccb->ccb_h.path,
5792 					 /*relsim_flags*/0,
5793 					 /*reduction*/0,
5794 					 /*timeout*/0,
5795 					 /*getcount_only*/0);
5796 	}
5797 	xpt_release_ccb(done_ccb);
5798 	softc->flags &= ~DA_FLAG_TUR_PENDING;
5799 	da_periph_release_locked(periph, DA_REF_TUR);
5800 	return;
5801 }
5802 
5803 static void
5804 dareprobe(struct cam_periph *periph)
5805 {
5806 	struct da_softc	  *softc;
5807 	int status;
5808 
5809 	softc = (struct da_softc *)periph->softc;
5810 
5811 	/* Probe in progress; don't interfere. */
5812 	if (softc->state != DA_STATE_NORMAL)
5813 		return;
5814 
5815 	status = da_periph_acquire(periph, DA_REF_REPROBE);
5816 	KASSERT(status == 0, ("dareprobe: cam_periph_acquire failed"));
5817 
5818 	softc->state = DA_STATE_PROBE_WP;
5819 	xpt_schedule(periph, CAM_PRIORITY_DEV);
5820 }
5821 
5822 static int
5823 daerror(union ccb *ccb, u_int32_t cam_flags, u_int32_t sense_flags)
5824 {
5825 	struct da_softc	  *softc;
5826 	struct cam_periph *periph;
5827 	int error, error_code, sense_key, asc, ascq;
5828 
5829 #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING)
5830 	if (ccb->csio.bio != NULL)
5831 		biotrack(ccb->csio.bio, __func__);
5832 #endif
5833 
5834 	periph = xpt_path_periph(ccb->ccb_h.path);
5835 	softc = (struct da_softc *)periph->softc;
5836 
5837 	cam_periph_assert(periph, MA_OWNED);
5838 
5839 	/*
5840 	 * Automatically detect devices that do not support
5841 	 * READ(6)/WRITE(6) and upgrade to using 10 byte cdbs.
5842 	 */
5843 	error = 0;
5844 	if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INVALID) {
5845 		error = cmd6workaround(ccb);
5846 	} else if (scsi_extract_sense_ccb(ccb,
5847 	    &error_code, &sense_key, &asc, &ascq)) {
5848 		if (sense_key == SSD_KEY_ILLEGAL_REQUEST)
5849 			error = cmd6workaround(ccb);
5850 		/*
5851 		 * If the target replied with CAPACITY DATA HAS CHANGED UA,
5852 		 * query the capacity and notify upper layers.
5853 		 */
5854 		else if (sense_key == SSD_KEY_UNIT_ATTENTION &&
5855 		    asc == 0x2A && ascq == 0x09) {
5856 			xpt_print(periph->path, "Capacity data has changed\n");
5857 			softc->flags &= ~DA_FLAG_PROBED;
5858 			dareprobe(periph);
5859 			sense_flags |= SF_NO_PRINT;
5860 		} else if (sense_key == SSD_KEY_UNIT_ATTENTION &&
5861 		    asc == 0x28 && ascq == 0x00) {
5862 			softc->flags &= ~DA_FLAG_PROBED;
5863 			disk_media_changed(softc->disk, M_NOWAIT);
5864 		} else if (sense_key == SSD_KEY_UNIT_ATTENTION &&
5865 		    asc == 0x3F && ascq == 0x03) {
5866 			xpt_print(periph->path, "INQUIRY data has changed\n");
5867 			softc->flags &= ~DA_FLAG_PROBED;
5868 			dareprobe(periph);
5869 			sense_flags |= SF_NO_PRINT;
5870 		} else if (sense_key == SSD_KEY_NOT_READY &&
5871 		    asc == 0x3a && (softc->flags & DA_FLAG_PACK_INVALID) == 0) {
5872 			softc->flags |= DA_FLAG_PACK_INVALID;
5873 			disk_media_gone(softc->disk, M_NOWAIT);
5874 		}
5875 	}
5876 	if (error == ERESTART)
5877 		return (ERESTART);
5878 
5879 #ifdef CAM_IO_STATS
5880 	switch (ccb->ccb_h.status & CAM_STATUS_MASK) {
5881 	case CAM_CMD_TIMEOUT:
5882 		softc->timeouts++;
5883 		break;
5884 	case CAM_REQ_ABORTED:
5885 	case CAM_REQ_CMP_ERR:
5886 	case CAM_REQ_TERMIO:
5887 	case CAM_UNREC_HBA_ERROR:
5888 	case CAM_DATA_RUN_ERR:
5889 		softc->errors++;
5890 		break;
5891 	default:
5892 		break;
5893 	}
5894 #endif
5895 
5896 	/*
5897 	 * XXX
5898 	 * Until we have a better way of doing pack validation,
5899 	 * don't treat UAs as errors.
5900 	 */
5901 	sense_flags |= SF_RETRY_UA;
5902 
5903 	if (softc->quirks & DA_Q_RETRY_BUSY)
5904 		sense_flags |= SF_RETRY_BUSY;
5905 	return(cam_periph_error(ccb, cam_flags, sense_flags));
5906 }
5907 
5908 static void
5909 damediapoll(void *arg)
5910 {
5911 	struct cam_periph *periph = arg;
5912 	struct da_softc *softc = periph->softc;
5913 
5914 	if (!cam_iosched_has_work_flags(softc->cam_iosched, DA_WORK_TUR) &&
5915 	    (softc->flags & DA_FLAG_TUR_PENDING) == 0 &&
5916 	    LIST_EMPTY(&softc->pending_ccbs)) {
5917 		if (da_periph_acquire(periph, DA_REF_TUR) == 0) {
5918 			cam_iosched_set_work_flags(softc->cam_iosched, DA_WORK_TUR);
5919 			daschedule(periph);
5920 		}
5921 	}
5922 	/* Queue us up again */
5923 	if (da_poll_period != 0)
5924 		callout_schedule(&softc->mediapoll_c, da_poll_period * hz);
5925 }
5926 
5927 static void
5928 daprevent(struct cam_periph *periph, int action)
5929 {
5930 	struct	da_softc *softc;
5931 	union	ccb *ccb;
5932 	int	error;
5933 
5934 	cam_periph_assert(periph, MA_OWNED);
5935 	softc = (struct da_softc *)periph->softc;
5936 
5937 	if (((action == PR_ALLOW)
5938 	  && (softc->flags & DA_FLAG_PACK_LOCKED) == 0)
5939 	 || ((action == PR_PREVENT)
5940 	  && (softc->flags & DA_FLAG_PACK_LOCKED) != 0)) {
5941 		return;
5942 	}
5943 
5944 	ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL);
5945 
5946 	scsi_prevent(&ccb->csio,
5947 		     /*retries*/1,
5948 		     /*cbcfp*/NULL,
5949 		     MSG_SIMPLE_Q_TAG,
5950 		     action,
5951 		     SSD_FULL_SIZE,
5952 		     5000);
5953 
5954 	error = cam_periph_runccb(ccb, daerror, CAM_RETRY_SELTO,
5955 	    SF_RETRY_UA | SF_NO_PRINT, softc->disk->d_devstat);
5956 
5957 	if (error == 0) {
5958 		if (action == PR_ALLOW)
5959 			softc->flags &= ~DA_FLAG_PACK_LOCKED;
5960 		else
5961 			softc->flags |= DA_FLAG_PACK_LOCKED;
5962 	}
5963 
5964 	xpt_release_ccb(ccb);
5965 }
5966 
5967 static void
5968 dasetgeom(struct cam_periph *periph, uint32_t block_len, uint64_t maxsector,
5969 	  struct scsi_read_capacity_data_long *rcaplong, size_t rcap_len)
5970 {
5971 	struct ccb_calc_geometry ccg;
5972 	struct da_softc *softc;
5973 	struct disk_params *dp;
5974 	u_int lbppbe, lalba;
5975 	int error;
5976 
5977 	softc = (struct da_softc *)periph->softc;
5978 
5979 	dp = &softc->params;
5980 	dp->secsize = block_len;
5981 	dp->sectors = maxsector + 1;
5982 	if (rcaplong != NULL) {
5983 		lbppbe = rcaplong->prot_lbppbe & SRC16_LBPPBE;
5984 		lalba = scsi_2btoul(rcaplong->lalba_lbp);
5985 		lalba &= SRC16_LALBA_A;
5986 	} else {
5987 		lbppbe = 0;
5988 		lalba = 0;
5989 	}
5990 
5991 	if (lbppbe > 0) {
5992 		dp->stripesize = block_len << lbppbe;
5993 		dp->stripeoffset = (dp->stripesize - block_len * lalba) %
5994 		    dp->stripesize;
5995 	} else if (softc->quirks & DA_Q_4K) {
5996 		dp->stripesize = 4096;
5997 		dp->stripeoffset = 0;
5998 	} else if (softc->unmap_gran != 0) {
5999 		dp->stripesize = block_len * softc->unmap_gran;
6000 		dp->stripeoffset = (dp->stripesize - block_len *
6001 		    softc->unmap_gran_align) % dp->stripesize;
6002 	} else {
6003 		dp->stripesize = 0;
6004 		dp->stripeoffset = 0;
6005 	}
6006 	/*
6007 	 * Have the controller provide us with a geometry
6008 	 * for this disk.  The only time the geometry
6009 	 * matters is when we boot and the controller
6010 	 * is the only one knowledgeable enough to come
6011 	 * up with something that will make this a bootable
6012 	 * device.
6013 	 */
6014 	xpt_setup_ccb(&ccg.ccb_h, periph->path, CAM_PRIORITY_NORMAL);
6015 	ccg.ccb_h.func_code = XPT_CALC_GEOMETRY;
6016 	ccg.block_size = dp->secsize;
6017 	ccg.volume_size = dp->sectors;
6018 	ccg.heads = 0;
6019 	ccg.secs_per_track = 0;
6020 	ccg.cylinders = 0;
6021 	xpt_action((union ccb*)&ccg);
6022 	if ((ccg.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
6023 		/*
6024 		 * We don't know what went wrong here- but just pick
6025 		 * a geometry so we don't have nasty things like divide
6026 		 * by zero.
6027 		 */
6028 		dp->heads = 255;
6029 		dp->secs_per_track = 255;
6030 		dp->cylinders = dp->sectors / (255 * 255);
6031 		if (dp->cylinders == 0) {
6032 			dp->cylinders = 1;
6033 		}
6034 	} else {
6035 		dp->heads = ccg.heads;
6036 		dp->secs_per_track = ccg.secs_per_track;
6037 		dp->cylinders = ccg.cylinders;
6038 	}
6039 
6040 	/*
6041 	 * If the user supplied a read capacity buffer, and if it is
6042 	 * different than the previous buffer, update the data in the EDT.
6043 	 * If it's the same, we don't bother.  This avoids sending an
6044 	 * update every time someone opens this device.
6045 	 */
6046 	if ((rcaplong != NULL)
6047 	 && (bcmp(rcaplong, &softc->rcaplong,
6048 		  min(sizeof(softc->rcaplong), rcap_len)) != 0)) {
6049 		struct ccb_dev_advinfo cdai;
6050 
6051 		xpt_setup_ccb(&cdai.ccb_h, periph->path, CAM_PRIORITY_NORMAL);
6052 		cdai.ccb_h.func_code = XPT_DEV_ADVINFO;
6053 		cdai.buftype = CDAI_TYPE_RCAPLONG;
6054 		cdai.flags = CDAI_FLAG_STORE;
6055 		cdai.bufsiz = rcap_len;
6056 		cdai.buf = (uint8_t *)rcaplong;
6057 		xpt_action((union ccb *)&cdai);
6058 		if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0)
6059 			cam_release_devq(cdai.ccb_h.path, 0, 0, 0, FALSE);
6060 		if (cdai.ccb_h.status != CAM_REQ_CMP) {
6061 			xpt_print(periph->path, "%s: failed to set read "
6062 				  "capacity advinfo\n", __func__);
6063 			/* Use cam_error_print() to decode the status */
6064 			cam_error_print((union ccb *)&cdai, CAM_ESF_CAM_STATUS,
6065 					CAM_EPF_ALL);
6066 		} else {
6067 			bcopy(rcaplong, &softc->rcaplong,
6068 			      min(sizeof(softc->rcaplong), rcap_len));
6069 		}
6070 	}
6071 
6072 	softc->disk->d_sectorsize = softc->params.secsize;
6073 	softc->disk->d_mediasize = softc->params.secsize * (off_t)softc->params.sectors;
6074 	softc->disk->d_stripesize = softc->params.stripesize;
6075 	softc->disk->d_stripeoffset = softc->params.stripeoffset;
6076 	/* XXX: these are not actually "firmware" values, so they may be wrong */
6077 	softc->disk->d_fwsectors = softc->params.secs_per_track;
6078 	softc->disk->d_fwheads = softc->params.heads;
6079 	softc->disk->d_devstat->block_size = softc->params.secsize;
6080 	softc->disk->d_devstat->flags &= ~DEVSTAT_BS_UNAVAILABLE;
6081 
6082 	error = disk_resize(softc->disk, M_NOWAIT);
6083 	if (error != 0)
6084 		xpt_print(periph->path, "disk_resize(9) failed, error = %d\n", error);
6085 }
6086 
6087 static void
6088 dasendorderedtag(void *arg)
6089 {
6090 	struct cam_periph *periph = arg;
6091 	struct da_softc *softc = periph->softc;
6092 
6093 	cam_periph_assert(periph, MA_OWNED);
6094 	if (da_send_ordered) {
6095 		if (!LIST_EMPTY(&softc->pending_ccbs)) {
6096 			if ((softc->flags & DA_FLAG_WAS_OTAG) == 0)
6097 				softc->flags |= DA_FLAG_NEED_OTAG;
6098 			softc->flags &= ~DA_FLAG_WAS_OTAG;
6099 		}
6100 	}
6101 
6102 	/* Queue us up again */
6103 	callout_reset(&softc->sendordered_c,
6104 	    (da_default_timeout * hz) / DA_ORDEREDTAG_INTERVAL,
6105 	    dasendorderedtag, periph);
6106 }
6107 
6108 /*
6109  * Step through all DA peripheral drivers, and if the device is still open,
6110  * sync the disk cache to physical media.
6111  */
6112 static void
6113 dashutdown(void * arg, int howto)
6114 {
6115 	struct cam_periph *periph;
6116 	struct da_softc *softc;
6117 	union ccb *ccb;
6118 	int error;
6119 
6120 	CAM_PERIPH_FOREACH(periph, &dadriver) {
6121 		softc = (struct da_softc *)periph->softc;
6122 		if (SCHEDULER_STOPPED()) {
6123 			/* If we paniced with the lock held, do not recurse. */
6124 			if (!cam_periph_owned(periph) &&
6125 			    (softc->flags & DA_FLAG_OPEN)) {
6126 				dadump(softc->disk, NULL, 0, 0, 0);
6127 			}
6128 			continue;
6129 		}
6130 		cam_periph_lock(periph);
6131 
6132 		/*
6133 		 * We only sync the cache if the drive is still open, and
6134 		 * if the drive is capable of it..
6135 		 */
6136 		if (((softc->flags & DA_FLAG_OPEN) == 0)
6137 		 || (softc->quirks & DA_Q_NO_SYNC_CACHE)) {
6138 			cam_periph_unlock(periph);
6139 			continue;
6140 		}
6141 
6142 		ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL);
6143 		scsi_synchronize_cache(&ccb->csio,
6144 				       /*retries*/0,
6145 				       /*cbfcnp*/NULL,
6146 				       MSG_SIMPLE_Q_TAG,
6147 				       /*begin_lba*/0, /* whole disk */
6148 				       /*lb_count*/0,
6149 				       SSD_FULL_SIZE,
6150 				       60 * 60 * 1000);
6151 
6152 		error = cam_periph_runccb(ccb, daerror, /*cam_flags*/0,
6153 		    /*sense_flags*/ SF_NO_RECOVERY | SF_NO_RETRY | SF_QUIET_IR,
6154 		    softc->disk->d_devstat);
6155 		if (error != 0)
6156 			xpt_print(periph->path, "Synchronize cache failed\n");
6157 		xpt_release_ccb(ccb);
6158 		cam_periph_unlock(periph);
6159 	}
6160 }
6161 
6162 #else /* !_KERNEL */
6163 
6164 /*
6165  * XXX These are only left out of the kernel build to silence warnings.  If,
6166  * for some reason these functions are used in the kernel, the ifdefs should
6167  * be moved so they are included both in the kernel and userland.
6168  */
6169 void
6170 scsi_format_unit(struct ccb_scsiio *csio, u_int32_t retries,
6171 		 void (*cbfcnp)(struct cam_periph *, union ccb *),
6172 		 u_int8_t tag_action, u_int8_t byte2, u_int16_t ileave,
6173 		 u_int8_t *data_ptr, u_int32_t dxfer_len, u_int8_t sense_len,
6174 		 u_int32_t timeout)
6175 {
6176 	struct scsi_format_unit *scsi_cmd;
6177 
6178 	scsi_cmd = (struct scsi_format_unit *)&csio->cdb_io.cdb_bytes;
6179 	scsi_cmd->opcode = FORMAT_UNIT;
6180 	scsi_cmd->byte2 = byte2;
6181 	scsi_ulto2b(ileave, scsi_cmd->interleave);
6182 
6183 	cam_fill_csio(csio,
6184 		      retries,
6185 		      cbfcnp,
6186 		      /*flags*/ (dxfer_len > 0) ? CAM_DIR_OUT : CAM_DIR_NONE,
6187 		      tag_action,
6188 		      data_ptr,
6189 		      dxfer_len,
6190 		      sense_len,
6191 		      sizeof(*scsi_cmd),
6192 		      timeout);
6193 }
6194 
6195 void
6196 scsi_read_defects(struct ccb_scsiio *csio, uint32_t retries,
6197 		  void (*cbfcnp)(struct cam_periph *, union ccb *),
6198 		  uint8_t tag_action, uint8_t list_format,
6199 		  uint32_t addr_desc_index, uint8_t *data_ptr,
6200 		  uint32_t dxfer_len, int minimum_cmd_size,
6201 		  uint8_t sense_len, uint32_t timeout)
6202 {
6203 	uint8_t cdb_len;
6204 
6205 	/*
6206 	 * These conditions allow using the 10 byte command.  Otherwise we
6207 	 * need to use the 12 byte command.
6208 	 */
6209 	if ((minimum_cmd_size <= 10)
6210 	 && (addr_desc_index == 0)
6211 	 && (dxfer_len <= SRDD10_MAX_LENGTH)) {
6212 		struct scsi_read_defect_data_10 *cdb10;
6213 
6214 		cdb10 = (struct scsi_read_defect_data_10 *)
6215 			&csio->cdb_io.cdb_bytes;
6216 
6217 		cdb_len = sizeof(*cdb10);
6218 		bzero(cdb10, cdb_len);
6219                 cdb10->opcode = READ_DEFECT_DATA_10;
6220                 cdb10->format = list_format;
6221                 scsi_ulto2b(dxfer_len, cdb10->alloc_length);
6222 	} else {
6223 		struct scsi_read_defect_data_12 *cdb12;
6224 
6225 		cdb12 = (struct scsi_read_defect_data_12 *)
6226 			&csio->cdb_io.cdb_bytes;
6227 
6228 		cdb_len = sizeof(*cdb12);
6229 		bzero(cdb12, cdb_len);
6230                 cdb12->opcode = READ_DEFECT_DATA_12;
6231                 cdb12->format = list_format;
6232                 scsi_ulto4b(dxfer_len, cdb12->alloc_length);
6233 		scsi_ulto4b(addr_desc_index, cdb12->address_descriptor_index);
6234 	}
6235 
6236 	cam_fill_csio(csio,
6237 		      retries,
6238 		      cbfcnp,
6239 		      /*flags*/ CAM_DIR_IN,
6240 		      tag_action,
6241 		      data_ptr,
6242 		      dxfer_len,
6243 		      sense_len,
6244 		      cdb_len,
6245 		      timeout);
6246 }
6247 
6248 void
6249 scsi_sanitize(struct ccb_scsiio *csio, u_int32_t retries,
6250 	      void (*cbfcnp)(struct cam_periph *, union ccb *),
6251 	      u_int8_t tag_action, u_int8_t byte2, u_int16_t control,
6252 	      u_int8_t *data_ptr, u_int32_t dxfer_len, u_int8_t sense_len,
6253 	      u_int32_t timeout)
6254 {
6255 	struct scsi_sanitize *scsi_cmd;
6256 
6257 	scsi_cmd = (struct scsi_sanitize *)&csio->cdb_io.cdb_bytes;
6258 	scsi_cmd->opcode = SANITIZE;
6259 	scsi_cmd->byte2 = byte2;
6260 	scsi_cmd->control = control;
6261 	scsi_ulto2b(dxfer_len, scsi_cmd->length);
6262 
6263 	cam_fill_csio(csio,
6264 		      retries,
6265 		      cbfcnp,
6266 		      /*flags*/ (dxfer_len > 0) ? CAM_DIR_OUT : CAM_DIR_NONE,
6267 		      tag_action,
6268 		      data_ptr,
6269 		      dxfer_len,
6270 		      sense_len,
6271 		      sizeof(*scsi_cmd),
6272 		      timeout);
6273 }
6274 
6275 #endif /* _KERNEL */
6276 
6277 void
6278 scsi_zbc_out(struct ccb_scsiio *csio, uint32_t retries,
6279 	     void (*cbfcnp)(struct cam_periph *, union ccb *),
6280 	     uint8_t tag_action, uint8_t service_action, uint64_t zone_id,
6281 	     uint8_t zone_flags, uint8_t *data_ptr, uint32_t dxfer_len,
6282 	     uint8_t sense_len, uint32_t timeout)
6283 {
6284 	struct scsi_zbc_out *scsi_cmd;
6285 
6286 	scsi_cmd = (struct scsi_zbc_out *)&csio->cdb_io.cdb_bytes;
6287 	scsi_cmd->opcode = ZBC_OUT;
6288 	scsi_cmd->service_action = service_action;
6289 	scsi_u64to8b(zone_id, scsi_cmd->zone_id);
6290 	scsi_cmd->zone_flags = zone_flags;
6291 
6292 	cam_fill_csio(csio,
6293 		      retries,
6294 		      cbfcnp,
6295 		      /*flags*/ (dxfer_len > 0) ? CAM_DIR_OUT : CAM_DIR_NONE,
6296 		      tag_action,
6297 		      data_ptr,
6298 		      dxfer_len,
6299 		      sense_len,
6300 		      sizeof(*scsi_cmd),
6301 		      timeout);
6302 }
6303 
6304 void
6305 scsi_zbc_in(struct ccb_scsiio *csio, uint32_t retries,
6306 	    void (*cbfcnp)(struct cam_periph *, union ccb *),
6307 	    uint8_t tag_action, uint8_t service_action, uint64_t zone_start_lba,
6308 	    uint8_t zone_options, uint8_t *data_ptr, uint32_t dxfer_len,
6309 	    uint8_t sense_len, uint32_t timeout)
6310 {
6311 	struct scsi_zbc_in *scsi_cmd;
6312 
6313 	scsi_cmd = (struct scsi_zbc_in *)&csio->cdb_io.cdb_bytes;
6314 	scsi_cmd->opcode = ZBC_IN;
6315 	scsi_cmd->service_action = service_action;
6316 	scsi_ulto4b(dxfer_len, scsi_cmd->length);
6317 	scsi_u64to8b(zone_start_lba, scsi_cmd->zone_start_lba);
6318 	scsi_cmd->zone_options = zone_options;
6319 
6320 	cam_fill_csio(csio,
6321 		      retries,
6322 		      cbfcnp,
6323 		      /*flags*/ (dxfer_len > 0) ? CAM_DIR_IN : CAM_DIR_NONE,
6324 		      tag_action,
6325 		      data_ptr,
6326 		      dxfer_len,
6327 		      sense_len,
6328 		      sizeof(*scsi_cmd),
6329 		      timeout);
6330 
6331 }
6332 
6333 int
6334 scsi_ata_zac_mgmt_out(struct ccb_scsiio *csio, uint32_t retries,
6335 		      void (*cbfcnp)(struct cam_periph *, union ccb *),
6336 		      uint8_t tag_action, int use_ncq,
6337 		      uint8_t zm_action, uint64_t zone_id, uint8_t zone_flags,
6338 		      uint8_t *data_ptr, uint32_t dxfer_len,
6339 		      uint8_t *cdb_storage, size_t cdb_storage_len,
6340 		      uint8_t sense_len, uint32_t timeout)
6341 {
6342 	uint8_t command_out, protocol, ata_flags;
6343 	uint16_t features_out;
6344 	uint32_t sectors_out, auxiliary;
6345 	int retval;
6346 
6347 	retval = 0;
6348 
6349 	if (use_ncq == 0) {
6350 		command_out = ATA_ZAC_MANAGEMENT_OUT;
6351 		features_out = (zm_action & 0xf) | (zone_flags << 8);
6352 		ata_flags = AP_FLAG_BYT_BLOK_BLOCKS;
6353 		if (dxfer_len == 0) {
6354 			protocol = AP_PROTO_NON_DATA;
6355 			ata_flags |= AP_FLAG_TLEN_NO_DATA;
6356 			sectors_out = 0;
6357 		} else {
6358 			protocol = AP_PROTO_DMA;
6359 			ata_flags |= AP_FLAG_TLEN_SECT_CNT |
6360 				     AP_FLAG_TDIR_TO_DEV;
6361 			sectors_out = ((dxfer_len >> 9) & 0xffff);
6362 		}
6363 		auxiliary = 0;
6364 	} else {
6365 		ata_flags = AP_FLAG_BYT_BLOK_BLOCKS;
6366 		if (dxfer_len == 0) {
6367 			command_out = ATA_NCQ_NON_DATA;
6368 			features_out = ATA_NCQ_ZAC_MGMT_OUT;
6369 			/*
6370 			 * We're assuming the SCSI to ATA translation layer
6371 			 * will set the NCQ tag number in the tag field.
6372 			 * That isn't clear from the SAT-4 spec (as of rev 05).
6373 			 */
6374 			sectors_out = 0;
6375 			ata_flags |= AP_FLAG_TLEN_NO_DATA;
6376 		} else {
6377 			command_out = ATA_SEND_FPDMA_QUEUED;
6378 			/*
6379 			 * Note that we're defaulting to normal priority,
6380 			 * and assuming that the SCSI to ATA translation
6381 			 * layer will insert the NCQ tag number in the tag
6382 			 * field.  That isn't clear in the SAT-4 spec (as
6383 			 * of rev 05).
6384 			 */
6385 			sectors_out = ATA_SFPDMA_ZAC_MGMT_OUT << 8;
6386 
6387 			ata_flags |= AP_FLAG_TLEN_FEAT |
6388 				     AP_FLAG_TDIR_TO_DEV;
6389 
6390 			/*
6391 			 * For SEND FPDMA QUEUED, the transfer length is
6392 			 * encoded in the FEATURE register, and 0 means
6393 			 * that 65536 512 byte blocks are to be tranferred.
6394 			 * In practice, it seems unlikely that we'll see
6395 			 * a transfer that large, and it may confuse the
6396 			 * the SAT layer, because generally that means that
6397 			 * 0 bytes should be transferred.
6398 			 */
6399 			if (dxfer_len == (65536 * 512)) {
6400 				features_out = 0;
6401 			} else if (dxfer_len <= (65535 * 512)) {
6402 				features_out = ((dxfer_len >> 9) & 0xffff);
6403 			} else {
6404 				/* The transfer is too big. */
6405 				retval = 1;
6406 				goto bailout;
6407 			}
6408 
6409 		}
6410 
6411 		auxiliary = (zm_action & 0xf) | (zone_flags << 8);
6412 		protocol = AP_PROTO_FPDMA;
6413 	}
6414 
6415 	protocol |= AP_EXTEND;
6416 
6417 	retval = scsi_ata_pass(csio,
6418 	    retries,
6419 	    cbfcnp,
6420 	    /*flags*/ (dxfer_len > 0) ? CAM_DIR_OUT : CAM_DIR_NONE,
6421 	    tag_action,
6422 	    /*protocol*/ protocol,
6423 	    /*ata_flags*/ ata_flags,
6424 	    /*features*/ features_out,
6425 	    /*sector_count*/ sectors_out,
6426 	    /*lba*/ zone_id,
6427 	    /*command*/ command_out,
6428 	    /*device*/ 0,
6429 	    /*icc*/ 0,
6430 	    /*auxiliary*/ auxiliary,
6431 	    /*control*/ 0,
6432 	    /*data_ptr*/ data_ptr,
6433 	    /*dxfer_len*/ dxfer_len,
6434 	    /*cdb_storage*/ cdb_storage,
6435 	    /*cdb_storage_len*/ cdb_storage_len,
6436 	    /*minimum_cmd_size*/ 0,
6437 	    /*sense_len*/ SSD_FULL_SIZE,
6438 	    /*timeout*/ timeout);
6439 
6440 bailout:
6441 
6442 	return (retval);
6443 }
6444 
6445 int
6446 scsi_ata_zac_mgmt_in(struct ccb_scsiio *csio, uint32_t retries,
6447 		     void (*cbfcnp)(struct cam_periph *, union ccb *),
6448 		     uint8_t tag_action, int use_ncq,
6449 		     uint8_t zm_action, uint64_t zone_id, uint8_t zone_flags,
6450 		     uint8_t *data_ptr, uint32_t dxfer_len,
6451 		     uint8_t *cdb_storage, size_t cdb_storage_len,
6452 		     uint8_t sense_len, uint32_t timeout)
6453 {
6454 	uint8_t command_out, protocol;
6455 	uint16_t features_out, sectors_out;
6456 	uint32_t auxiliary;
6457 	int ata_flags;
6458 	int retval;
6459 
6460 	retval = 0;
6461 	ata_flags = AP_FLAG_TDIR_FROM_DEV | AP_FLAG_BYT_BLOK_BLOCKS;
6462 
6463 	if (use_ncq == 0) {
6464 		command_out = ATA_ZAC_MANAGEMENT_IN;
6465 		/* XXX KDM put a macro here */
6466 		features_out = (zm_action & 0xf) | (zone_flags << 8);
6467 		sectors_out = dxfer_len >> 9; /* XXX KDM macro */
6468 		protocol = AP_PROTO_DMA;
6469 		ata_flags |= AP_FLAG_TLEN_SECT_CNT;
6470 		auxiliary = 0;
6471 	} else {
6472 		ata_flags |= AP_FLAG_TLEN_FEAT;
6473 
6474 		command_out = ATA_RECV_FPDMA_QUEUED;
6475 		sectors_out = ATA_RFPDMA_ZAC_MGMT_IN << 8;
6476 
6477 		/*
6478 		 * For RECEIVE FPDMA QUEUED, the transfer length is
6479 		 * encoded in the FEATURE register, and 0 means
6480 		 * that 65536 512 byte blocks are to be tranferred.
6481 		 * In practice, it seems unlikely that we'll see
6482 		 * a transfer that large, and it may confuse the
6483 		 * the SAT layer, because generally that means that
6484 		 * 0 bytes should be transferred.
6485 		 */
6486 		if (dxfer_len == (65536 * 512)) {
6487 			features_out = 0;
6488 		} else if (dxfer_len <= (65535 * 512)) {
6489 			features_out = ((dxfer_len >> 9) & 0xffff);
6490 		} else {
6491 			/* The transfer is too big. */
6492 			retval = 1;
6493 			goto bailout;
6494 		}
6495 		auxiliary = (zm_action & 0xf) | (zone_flags << 8),
6496 		protocol = AP_PROTO_FPDMA;
6497 	}
6498 
6499 	protocol |= AP_EXTEND;
6500 
6501 	retval = scsi_ata_pass(csio,
6502 	    retries,
6503 	    cbfcnp,
6504 	    /*flags*/ CAM_DIR_IN,
6505 	    tag_action,
6506 	    /*protocol*/ protocol,
6507 	    /*ata_flags*/ ata_flags,
6508 	    /*features*/ features_out,
6509 	    /*sector_count*/ sectors_out,
6510 	    /*lba*/ zone_id,
6511 	    /*command*/ command_out,
6512 	    /*device*/ 0,
6513 	    /*icc*/ 0,
6514 	    /*auxiliary*/ auxiliary,
6515 	    /*control*/ 0,
6516 	    /*data_ptr*/ data_ptr,
6517 	    /*dxfer_len*/ (dxfer_len >> 9) * 512, /* XXX KDM */
6518 	    /*cdb_storage*/ cdb_storage,
6519 	    /*cdb_storage_len*/ cdb_storage_len,
6520 	    /*minimum_cmd_size*/ 0,
6521 	    /*sense_len*/ SSD_FULL_SIZE,
6522 	    /*timeout*/ timeout);
6523 
6524 bailout:
6525 	return (retval);
6526 }
6527