xref: /titanic_52/usr/src/uts/intel/io/dktp/dcdev/dadk.c (revision 67e3a03ed4a2813074d36330f062ed6e593a4937)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 /*
30  * Direct Attached Disk
31  */
32 
33 #include <sys/file.h>
34 #include <sys/scsi/scsi.h>
35 #include <sys/var.h>
36 #include <sys/proc.h>
37 #include <sys/dktp/cm.h>
38 #include <sys/vtoc.h>
39 #include <sys/dkio.h>
40 #include <sys/policy.h>
41 #include <sys/priv.h>
42 
43 #include <sys/dktp/dadev.h>
44 #include <sys/dktp/fctypes.h>
45 #include <sys/dktp/flowctrl.h>
46 #include <sys/dktp/tgcom.h>
47 #include <sys/dktp/tgdk.h>
48 #include <sys/dktp/bbh.h>
49 #include <sys/dktp/dadkio.h>
50 #include <sys/dktp/dadk.h>
51 #include <sys/cdio.h>
52 
53 /*
54  * Local Function Prototypes
55  */
56 static void dadk_restart(void *pktp);
57 static void dadk_pktcb(struct cmpkt *pktp);
58 static void dadk_iodone(struct buf *bp);
59 static void dadk_polldone(struct buf *bp);
60 static void dadk_setcap(struct dadk *dadkp);
61 static void dadk_create_errstats(struct dadk *dadkp, int instance);
62 static void dadk_destroy_errstats(struct dadk *dadkp);
63 
64 static int dadk_chkerr(struct cmpkt *pktp);
65 static int dadk_ioprep(struct dadk *dadkp, struct cmpkt *pktp);
66 static int dadk_iosetup(struct dadk *dadkp, struct cmpkt *pktp);
67 static int dadk_ioretry(struct cmpkt *pktp, int action);
68 
69 static struct cmpkt *dadk_pktprep(struct dadk *dadkp, struct cmpkt *in_pktp,
70     struct buf *bp, void (*cb_func)(struct buf *), int (*func)(caddr_t),
71     caddr_t arg);
72 
73 static int  dadk_pkt(opaque_t com_data, struct buf *bp, int (*func)(caddr_t),
74     caddr_t arg);
75 static void dadk_transport(opaque_t com_data, struct buf *bp);
76 static int dadk_ctl_ioctl(struct dadk *, uint32_t, uintptr_t, int);
77 
78 struct tgcom_objops dadk_com_ops = {
79 	nodev,
80 	nodev,
81 	dadk_pkt,
82 	dadk_transport,
83 	0, 0
84 };
85 
86 /*
87  * architecture dependent allocation restrictions for dadk_iob_alloc(). For
88  * x86, we'll set dma_attr_addr_hi to dadk_max_phys_addr and dma_attr_sgllen
89  * to dadk_sgl_size during _init().
90  */
91 #if defined(__sparc)
92 static ddi_dma_attr_t dadk_alloc_attr = {
93 	DMA_ATTR_V0,	/* version number */
94 	0x0,		/* lowest usable address */
95 	0xFFFFFFFFull,	/* high DMA address range */
96 	0xFFFFFFFFull,	/* DMA counter register */
97 	1,		/* DMA address alignment */
98 	1,		/* DMA burstsizes */
99 	1,		/* min effective DMA size */
100 	0xFFFFFFFFull,	/* max DMA xfer size */
101 	0xFFFFFFFFull,	/* segment boundary */
102 	1,		/* s/g list length */
103 	512,		/* granularity of device */
104 	0,		/* DMA transfer flags */
105 };
106 #elif defined(__x86)
107 static ddi_dma_attr_t dadk_alloc_attr = {
108 	DMA_ATTR_V0,	/* version number */
109 	0x0,		/* lowest usable address */
110 	0x0,		/* high DMA address range [set in _init()] */
111 	0xFFFFull,	/* DMA counter register */
112 	512,		/* DMA address alignment */
113 	1,		/* DMA burstsizes */
114 	1,		/* min effective DMA size */
115 	0xFFFFFFFFull,	/* max DMA xfer size */
116 	0xFFFFFFFFull,	/* segment boundary */
117 	0,		/* s/g list length [set in _init()] */
118 	512,		/* granularity of device */
119 	0,		/* DMA transfer flags */
120 };
121 
122 uint64_t dadk_max_phys_addr = 0xFFFFFFFFull;
123 int dadk_sgl_size = 0xFF;
124 #endif
125 
126 static int dadk_rmb_ioctl(struct dadk *dadkp, int cmd, intptr_t arg, int flags,
127     int silent);
128 static void dadk_rmb_iodone(struct buf *bp);
129 
130 static int dadk_dk_buf_setup(struct dadk *dadkp, opaque_t *cmdp,
131     dev_t dev, enum uio_seg dataspace, int rw);
132 static void dadk_dk(struct dadk *dadkp, struct dadkio_rwcmd *scmdp,
133     struct buf *bp);
134 static void dadkmin(struct buf *bp);
135 static int dadk_dk_strategy(struct buf *bp);
136 static void dadk_recorderr(struct cmpkt *pktp, struct dadkio_rwcmd *rwcmdp);
137 
138 struct tgdk_objops dadk_ops = {
139 	dadk_init,
140 	dadk_free,
141 	dadk_probe,
142 	dadk_attach,
143 	dadk_open,
144 	dadk_close,
145 	dadk_ioctl,
146 	dadk_strategy,
147 	dadk_setgeom,
148 	dadk_getgeom,
149 	dadk_iob_alloc,
150 	dadk_iob_free,
151 	dadk_iob_htoc,
152 	dadk_iob_xfer,
153 	dadk_dump,
154 	dadk_getphygeom,
155 	dadk_set_bbhobj,
156 	dadk_check_media,
157 	dadk_inquiry,
158 	dadk_cleanup,
159 	0
160 };
161 
162 /*
163  * Local static data
164  */
165 
166 #ifdef	DADK_DEBUG
167 #define	DENT	0x0001
168 #define	DERR	0x0002
169 #define	DIO	0x0004
170 #define	DGEOM	0x0010
171 #define	DSTATE  0x0020
172 static	int	dadk_debug = DGEOM;
173 
174 #endif	/* DADK_DEBUG */
175 
176 static int dadk_check_media_time = 3000000;	/* 3 Second State Check */
177 static int dadk_dk_maxphys = 0x80000;
178 
179 static char	*dadk_cmds[] = {
180 	"\000Unknown",			/* unknown 		*/
181 	"\001read sector",		/* DCMD_READ 1		*/
182 	"\002write sector",		/* DCMD_WRITE 2		*/
183 	"\003format track",		/* DCMD_FMTTRK 3	*/
184 	"\004format whole drive",	/* DCMD_FMTDRV 4	*/
185 	"\005recalibrate",		/* DCMD_RECAL  5	*/
186 	"\006seek sector",		/* DCMD_SEEK   6	*/
187 	"\007read verify",		/* DCMD_RDVER  7	*/
188 	"\010read defect list",		/* DCMD_GETDEF 8	*/
189 	"\011lock door",		/* DCMD_LOCK   9	*/
190 	"\012unlock door",		/* DCMD_UNLOCK 10	*/
191 	"\013start motor",		/* DCMD_START_MOTOR 11	*/
192 	"\014stop motor",		/* DCMD_STOP_MOTOR 12	*/
193 	"\015eject",			/* DCMD_EJECT  13	*/
194 	"\016update geometry",		/* DCMD_UPDATE_GEOM  14	*/
195 	"\017get state",		/* DCMD_GET_STATE  15	*/
196 	"\020cdrom pause",		/* DCMD_PAUSE  16	*/
197 	"\021cdrom resume",		/* DCMD_RESUME  17	*/
198 	"\022cdrom play track index",	/* DCMD_PLAYTRKIND  18	*/
199 	"\023cdrom play msf",		/* DCMD_PLAYMSF  19	*/
200 	"\024cdrom sub channel",	/* DCMD_SUBCHNL  20	*/
201 	"\025cdrom read mode 1",	/* DCMD_READMODE1  21	*/
202 	"\026cdrom read toc header",	/* DCMD_READTOCHDR  22	*/
203 	"\027cdrom read toc entry",	/* DCMD_READTOCENT  23	*/
204 	"\030cdrom read offset",	/* DCMD_READOFFSET  24	*/
205 	"\031cdrom read mode 2",	/* DCMD_READMODE2  25	*/
206 	"\032cdrom volume control",	/* DCMD_VOLCTRL  26	*/
207 	"\033flush cache",		/* DCMD_FLUSH_CACHE  27	*/
208 	NULL
209 };
210 
211 static char *dadk_sense[] = {
212 	"\000Success",			/* DERR_SUCCESS		*/
213 	"\001address mark not found",	/* DERR_AMNF		*/
214 	"\002track 0 not found",	/* DERR_TKONF		*/
215 	"\003aborted command",		/* DERR_ABORT		*/
216 	"\004write fault",		/* DERR_DWF		*/
217 	"\005ID not found",		/* DERR_IDNF		*/
218 	"\006drive busy",		/* DERR_BUSY		*/
219 	"\007uncorrectable data error",	/* DERR_UNC		*/
220 	"\010bad block detected",	/* DERR_BBK		*/
221 	"\011invalid command",		/* DERR_INVCDB		*/
222 	"\012device hard error",	/* DERR_HARD		*/
223 	"\013illegal length indicated", /* DERR_ILI		*/
224 	"\014end of media",		/* DERR_EOM		*/
225 	"\015media change requested",	/* DERR_MCR		*/
226 	"\016recovered from error",	/* DERR_RECOVER		*/
227 	"\017device not ready",		/* DERR_NOTREADY	*/
228 	"\020medium error",		/* DERR_MEDIUM		*/
229 	"\021hardware error",		/* DERR_HW		*/
230 	"\022illegal request",		/* DERR_ILL		*/
231 	"\023unit attention",		/* DERR_UNIT_ATTN	*/
232 	"\024data protection",		/* DERR_DATA_PROT	*/
233 	"\025miscompare",		/* DERR_MISCOMPARE	*/
234 	"\026ICRC error during UDMA",	/* DERR_ICRC		*/
235 	"\027reserved",			/* DERR_RESV		*/
236 	NULL
237 };
238 
239 static char *dadk_name = "Disk";
240 
241 /*
242  *	This is the loadable module wrapper
243  */
244 #include <sys/modctl.h>
245 
246 extern struct mod_ops mod_miscops;
247 
248 static struct modlmisc modlmisc = {
249 	&mod_miscops,	/* Type of module */
250 	"Direct Attached Disk %I%"
251 };
252 
253 static struct modlinkage modlinkage = {
254 	MODREV_1, (void *)&modlmisc, NULL
255 };
256 
257 int
258 _init(void)
259 {
260 #ifdef DADK_DEBUG
261 	if (dadk_debug & DENT)
262 		PRF("dadk_init: call\n");
263 #endif
264 
265 #if defined(__x86)
266 	/* set the max physical address for iob allocs on x86 */
267 	dadk_alloc_attr.dma_attr_addr_hi = dadk_max_phys_addr;
268 
269 	/*
270 	 * set the sgllen for iob allocs on x86. If this is set less than
271 	 * the number of pages the buffer will take (taking into account
272 	 * alignment), it would force the allocator to try and allocate
273 	 * contiguous pages.
274 	 */
275 	dadk_alloc_attr.dma_attr_sgllen = dadk_sgl_size;
276 #endif
277 
278 	return (mod_install(&modlinkage));
279 }
280 
281 int
282 _fini(void)
283 {
284 #ifdef DADK_DEBUG
285 	if (dadk_debug & DENT)
286 		PRF("dadk_fini: call\n");
287 #endif
288 
289 	return (mod_remove(&modlinkage));
290 }
291 
292 int
293 _info(struct modinfo *modinfop)
294 {
295 	return (mod_info(&modlinkage, modinfop));
296 }
297 
298 struct tgdk_obj *
299 dadk_create()
300 {
301 	struct tgdk_obj *dkobjp;
302 	struct dadk *dadkp;
303 
304 	dkobjp = kmem_zalloc((sizeof (*dkobjp) + sizeof (*dadkp)), KM_NOSLEEP);
305 	if (!dkobjp)
306 		return (NULL);
307 	dadkp = (struct dadk *)(dkobjp+1);
308 
309 	dkobjp->tg_ops  = (struct  tgdk_objops *)&dadk_ops;
310 	dkobjp->tg_data = (opaque_t)dadkp;
311 	dkobjp->tg_ext = &(dkobjp->tg_extblk);
312 	dadkp->dad_extp = &(dkobjp->tg_extblk);
313 
314 #ifdef DADK_DEBUG
315 	if (dadk_debug & DENT)
316 		PRF("dadk_create: tgdkobjp= 0x%x dadkp= 0x%x\n", dkobjp, dadkp);
317 #endif
318 	return (dkobjp);
319 }
320 
321 int
322 dadk_init(opaque_t objp, opaque_t devp, opaque_t flcobjp, opaque_t queobjp,
323 	opaque_t bbhobjp, void *lkarg)
324 {
325 	struct dadk *dadkp = (struct dadk *)objp;
326 	struct scsi_device *sdevp = (struct scsi_device *)devp;
327 
328 	dadkp->dad_sd = devp;
329 	dadkp->dad_ctlobjp = (opaque_t)sdevp->sd_address.a_hba_tran;
330 	sdevp->sd_private = (caddr_t)dadkp;
331 
332 	/* initialize the communication object */
333 	dadkp->dad_com.com_data = (opaque_t)dadkp;
334 	dadkp->dad_com.com_ops  = &dadk_com_ops;
335 
336 	dadkp->dad_bbhobjp = bbhobjp;
337 	BBH_INIT(bbhobjp);
338 
339 	dadkp->dad_flcobjp = flcobjp;
340 	mutex_init(&dadkp->dad_cmd_mutex, NULL, MUTEX_DRIVER, NULL);
341 	dadkp->dad_cmd_count = 0;
342 	return (FLC_INIT(flcobjp, &(dadkp->dad_com), queobjp, lkarg));
343 }
344 
345 int
346 dadk_free(struct tgdk_obj *dkobjp)
347 {
348 	TGDK_CLEANUP(dkobjp);
349 	kmem_free(dkobjp, (sizeof (*dkobjp) + sizeof (struct dadk)));
350 
351 	return (DDI_SUCCESS);
352 }
353 
354 void
355 dadk_cleanup(struct tgdk_obj *dkobjp)
356 {
357 	struct dadk *dadkp;
358 
359 	dadkp = (struct dadk *)(dkobjp->tg_data);
360 	if (dadkp->dad_sd)
361 		dadkp->dad_sd->sd_private = NULL;
362 	if (dadkp->dad_bbhobjp) {
363 		BBH_FREE(dadkp->dad_bbhobjp);
364 		dadkp->dad_bbhobjp = NULL;
365 	}
366 	if (dadkp->dad_flcobjp) {
367 		FLC_FREE(dadkp->dad_flcobjp);
368 		dadkp->dad_flcobjp = NULL;
369 	}
370 	mutex_destroy(&dadkp->dad_cmd_mutex);
371 }
372 
373 /* ARGSUSED */
374 int
375 dadk_probe(opaque_t objp, int kmsflg)
376 {
377 	struct dadk *dadkp = (struct dadk *)objp;
378 	struct scsi_device *devp;
379 	char   name[80];
380 
381 	devp = dadkp->dad_sd;
382 	if (!devp->sd_inq || (devp->sd_inq->inq_dtype == DTYPE_NOTPRESENT) ||
383 	    (devp->sd_inq->inq_dtype == DTYPE_UNKNOWN)) {
384 		return (DDI_PROBE_FAILURE);
385 	}
386 
387 	switch (devp->sd_inq->inq_dtype) {
388 		case DTYPE_DIRECT:
389 			dadkp->dad_ctype = DKC_DIRECT;
390 			dadkp->dad_extp->tg_nodetype = DDI_NT_BLOCK;
391 			dadkp->dad_extp->tg_ctype = DKC_DIRECT;
392 			break;
393 		case DTYPE_RODIRECT: /* eg cdrom */
394 			dadkp->dad_ctype = DKC_CDROM;
395 			dadkp->dad_extp->tg_rdonly = 1;
396 			dadkp->dad_rdonly = 1;
397 			dadkp->dad_cdrom = 1;
398 			dadkp->dad_extp->tg_nodetype = DDI_NT_CD;
399 			dadkp->dad_extp->tg_ctype = DKC_CDROM;
400 			break;
401 		case DTYPE_WORM:
402 		case DTYPE_OPTICAL:
403 		default:
404 			return (DDI_PROBE_FAILURE);
405 	}
406 
407 	dadkp->dad_extp->tg_rmb = dadkp->dad_rmb = devp->sd_inq->inq_rmb;
408 
409 	dadkp->dad_secshf = SCTRSHFT;
410 	dadkp->dad_blkshf = 0;
411 
412 	/* display the device name */
413 	(void) strcpy(name, "Vendor '");
414 	gda_inqfill((caddr_t)devp->sd_inq->inq_vid, 8, &name[strlen(name)]);
415 	(void) strcat(name, "' Product '");
416 	gda_inqfill((caddr_t)devp->sd_inq->inq_pid, 16, &name[strlen(name)]);
417 	(void) strcat(name, "'");
418 	gda_log(devp->sd_dev, dadk_name, CE_NOTE, "!<%s>\n", name);
419 
420 	return (DDI_PROBE_SUCCESS);
421 }
422 
423 
424 /* ARGSUSED */
425 int
426 dadk_attach(opaque_t objp)
427 {
428 	return (DDI_SUCCESS);
429 }
430 
431 int
432 dadk_set_bbhobj(opaque_t objp, opaque_t bbhobjp)
433 {
434 	struct dadk *dadkp = (struct dadk *)objp;
435 	/* free the old bbh object */
436 	if (dadkp->dad_bbhobjp)
437 		BBH_FREE(dadkp->dad_bbhobjp);
438 
439 	/* initialize the new bbh object */
440 	dadkp->dad_bbhobjp = bbhobjp;
441 	BBH_INIT(bbhobjp);
442 
443 	return (DDI_SUCCESS);
444 }
445 
446 /* ARGSUSED */
447 int
448 dadk_open(opaque_t objp, int flag)
449 {
450 	struct dadk *dadkp = (struct dadk *)objp;
451 	int error;
452 	int wce;
453 
454 	if (!dadkp->dad_rmb) {
455 		if (dadkp->dad_phyg.g_cap) {
456 			FLC_START_KSTAT(dadkp->dad_flcobjp, "disk",
457 			    ddi_get_instance(CTL_DIP_DEV(dadkp->dad_ctlobjp)));
458 			return (DDI_SUCCESS);
459 		}
460 	} else {
461 		mutex_enter(&dadkp->dad_mutex);
462 		dadkp->dad_iostate = DKIO_NONE;
463 		cv_broadcast(&dadkp->dad_state_cv);
464 		mutex_exit(&dadkp->dad_mutex);
465 
466 		if (dadk_rmb_ioctl(dadkp, DCMD_START_MOTOR, 0, 0,
467 		    DADK_SILENT) ||
468 		    dadk_rmb_ioctl(dadkp, DCMD_LOCK, 0, 0, DADK_SILENT) ||
469 		    dadk_rmb_ioctl(dadkp, DCMD_UPDATE_GEOM, 0, 0,
470 		    DADK_SILENT)) {
471 			return (DDI_FAILURE);
472 		}
473 
474 		mutex_enter(&dadkp->dad_mutex);
475 		dadkp->dad_iostate = DKIO_INSERTED;
476 		cv_broadcast(&dadkp->dad_state_cv);
477 		mutex_exit(&dadkp->dad_mutex);
478 	}
479 
480 	/*
481 	 * get write cache enable state
482 	 * If there is an error, must assume that write cache
483 	 * is enabled.
484 	 * NOTE: Since there is currently no Solaris mechanism to
485 	 * change the state of the Write Cache Enable feature,
486 	 * this code just checks the value of the WCE bit
487 	 * obtained at device init time.  If a mechanism
488 	 * is added to the driver to change WCE, dad_wce
489 	 * must be updated appropriately.
490 	 */
491 	error = dadk_ctl_ioctl(dadkp, DIOCTL_GETWCE,
492 	    (uintptr_t)&wce, FKIOCTL | FNATIVE);
493 	mutex_enter(&dadkp->dad_mutex);
494 	dadkp->dad_wce = (error != 0) || (wce != 0);
495 	mutex_exit(&dadkp->dad_mutex);
496 
497 	/* logical disk geometry */
498 	(void) dadk_ctl_ioctl(dadkp, DIOCTL_GETGEOM,
499 	    (uintptr_t)&dadkp->dad_logg, FKIOCTL | FNATIVE);
500 	if (dadkp->dad_logg.g_cap == 0)
501 		return (DDI_FAILURE);
502 
503 	/* get physical disk geometry */
504 	(void) dadk_ctl_ioctl(dadkp, DIOCTL_GETPHYGEOM,
505 	    (uintptr_t)&dadkp->dad_phyg, FKIOCTL | FNATIVE);
506 	if (dadkp->dad_phyg.g_cap == 0)
507 		return (DDI_FAILURE);
508 
509 	dadk_setcap(dadkp);
510 
511 	dadk_create_errstats(dadkp,
512 	    ddi_get_instance(CTL_DIP_DEV(dadkp->dad_ctlobjp)));
513 
514 	/* start profiling */
515 	FLC_START_KSTAT(dadkp->dad_flcobjp, "disk",
516 	    ddi_get_instance(CTL_DIP_DEV(dadkp->dad_ctlobjp)));
517 
518 	return (DDI_SUCCESS);
519 }
520 
521 static void
522 dadk_setcap(struct dadk *dadkp)
523 {
524 	int	 totsize;
525 	int	 i;
526 
527 	totsize = dadkp->dad_phyg.g_secsiz;
528 
529 	if (totsize == 0) {
530 		if (dadkp->dad_cdrom) {
531 			totsize = 2048;
532 		} else {
533 			totsize = NBPSCTR;
534 		}
535 	} else {
536 		/* Round down sector size to multiple of 512B */
537 		totsize &= ~(NBPSCTR-1);
538 	}
539 	dadkp->dad_phyg.g_secsiz = totsize;
540 
541 	/* set sec,block shift factor - (512->0, 1024->1, 2048->2, etc.) */
542 	totsize >>= SCTRSHFT;
543 	for (i = 0; totsize != 1; i++, totsize >>= 1)
544 		;
545 	dadkp->dad_blkshf = i;
546 	dadkp->dad_secshf = i + SCTRSHFT;
547 }
548 
549 
550 static void
551 dadk_create_errstats(struct dadk *dadkp, int instance)
552 {
553 	dadk_errstats_t *dep;
554 	char kstatname[KSTAT_STRLEN];
555 	dadk_ioc_string_t dadk_ioc_string;
556 
557 	if (dadkp->dad_errstats)
558 		return;
559 
560 	(void) sprintf(kstatname, "cmdk%d,error", instance);
561 	dadkp->dad_errstats = kstat_create("cmdkerror", instance,
562 	    kstatname, "device_error", KSTAT_TYPE_NAMED,
563 	    sizeof (dadk_errstats_t) / sizeof (kstat_named_t),
564 	    KSTAT_FLAG_PERSISTENT);
565 
566 	if (!dadkp->dad_errstats)
567 		return;
568 
569 	dep = (dadk_errstats_t *)dadkp->dad_errstats->ks_data;
570 
571 	kstat_named_init(&dep->dadk_softerrs,
572 	    "Soft Errors", KSTAT_DATA_UINT32);
573 	kstat_named_init(&dep->dadk_harderrs,
574 	    "Hard Errors", KSTAT_DATA_UINT32);
575 	kstat_named_init(&dep->dadk_transerrs,
576 	    "Transport Errors", KSTAT_DATA_UINT32);
577 	kstat_named_init(&dep->dadk_model,
578 	    "Model", KSTAT_DATA_CHAR);
579 	kstat_named_init(&dep->dadk_revision,
580 	    "Revision", KSTAT_DATA_CHAR);
581 	kstat_named_init(&dep->dadk_serial,
582 	    "Serial No", KSTAT_DATA_CHAR);
583 	kstat_named_init(&dep->dadk_capacity,
584 	    "Size", KSTAT_DATA_ULONGLONG);
585 	kstat_named_init(&dep->dadk_rq_media_err,
586 	    "Media Error", KSTAT_DATA_UINT32);
587 	kstat_named_init(&dep->dadk_rq_ntrdy_err,
588 	    "Device Not Ready", KSTAT_DATA_UINT32);
589 	kstat_named_init(&dep->dadk_rq_nodev_err,
590 	    "No Device", KSTAT_DATA_UINT32);
591 	kstat_named_init(&dep->dadk_rq_recov_err,
592 	    "Recoverable", KSTAT_DATA_UINT32);
593 	kstat_named_init(&dep->dadk_rq_illrq_err,
594 	    "Illegal Request", KSTAT_DATA_UINT32);
595 
596 	dadkp->dad_errstats->ks_private = dep;
597 	dadkp->dad_errstats->ks_update = nulldev;
598 	kstat_install(dadkp->dad_errstats);
599 
600 	/* get model */
601 	dep->dadk_model.value.c[0] = 0;
602 	dadk_ioc_string.is_buf = &dep->dadk_model.value.c[0];
603 	dadk_ioc_string.is_size = sizeof (dep->dadk_model.value.c);
604 	(void) dadk_ctl_ioctl(dadkp, DIOCTL_GETMODEL,
605 	    (uintptr_t)&dadk_ioc_string, FKIOCTL | FNATIVE);
606 
607 	/* get serial */
608 	dep->dadk_serial.value.c[0] = 0;
609 	dadk_ioc_string.is_buf = &dep->dadk_serial.value.c[0];
610 	dadk_ioc_string.is_size = sizeof (dep->dadk_serial.value.c);
611 	(void) dadk_ctl_ioctl(dadkp, DIOCTL_GETSERIAL,
612 	    (uintptr_t)&dadk_ioc_string, FKIOCTL | FNATIVE);
613 
614 	/* Get revision */
615 	dep->dadk_revision.value.c[0] = 0;
616 
617 	/* Get capacity */
618 
619 	dep->dadk_capacity.value.ui64 =
620 	    (uint64_t)dadkp->dad_logg.g_cap *
621 	    (uint64_t)dadkp->dad_logg.g_secsiz;
622 }
623 
624 
625 int
626 dadk_close(opaque_t objp)
627 {
628 	struct dadk *dadkp = (struct dadk *)objp;
629 
630 	if (dadkp->dad_rmb) {
631 		(void) dadk_rmb_ioctl(dadkp, DCMD_STOP_MOTOR, 0, 0,
632 		    DADK_SILENT);
633 		(void) dadk_rmb_ioctl(dadkp, DCMD_UNLOCK, 0, 0, DADK_SILENT);
634 	}
635 	FLC_STOP_KSTAT(dadkp->dad_flcobjp);
636 
637 	dadk_destroy_errstats(dadkp);
638 
639 	return (DDI_SUCCESS);
640 }
641 
642 static void
643 dadk_destroy_errstats(struct dadk *dadkp)
644 {
645 	if (!dadkp->dad_errstats)
646 		return;
647 
648 	kstat_delete(dadkp->dad_errstats);
649 	dadkp->dad_errstats = NULL;
650 }
651 
652 
653 int
654 dadk_strategy(opaque_t objp, struct buf *bp)
655 {
656 	struct dadk *dadkp = (struct dadk *)objp;
657 
658 	if (dadkp->dad_rdonly && !(bp->b_flags & B_READ)) {
659 		bioerror(bp, EROFS);
660 		return (DDI_FAILURE);
661 	}
662 
663 	if (bp->b_bcount & (dadkp->DAD_SECSIZ-1)) {
664 		bioerror(bp, ENXIO);
665 		return (DDI_FAILURE);
666 	}
667 
668 	SET_BP_SEC(bp, (LBLK2SEC(GET_BP_SEC(bp), dadkp->dad_blkshf)));
669 	mutex_enter(&dadkp->dad_cmd_mutex);
670 	dadkp->dad_cmd_count++;
671 	mutex_exit(&dadkp->dad_cmd_mutex);
672 	FLC_ENQUE(dadkp->dad_flcobjp, bp);
673 
674 	return (DDI_SUCCESS);
675 }
676 
677 int
678 dadk_dump(opaque_t objp, struct buf *bp)
679 {
680 	struct dadk *dadkp = (struct dadk *)objp;
681 	struct cmpkt *pktp;
682 
683 	if (dadkp->dad_rdonly) {
684 		bioerror(bp, EROFS);
685 		return (DDI_FAILURE);
686 	}
687 
688 	if (bp->b_bcount & (dadkp->DAD_SECSIZ-1)) {
689 		bioerror(bp, ENXIO);
690 		return (DDI_FAILURE);
691 	}
692 
693 	SET_BP_SEC(bp, (LBLK2SEC(GET_BP_SEC(bp), dadkp->dad_blkshf)));
694 
695 	pktp = dadk_pktprep(dadkp, NULL, bp, dadk_polldone, NULL, NULL);
696 	if (!pktp) {
697 		cmn_err(CE_WARN, "no resources for dumping");
698 		bioerror(bp, EIO);
699 		return (DDI_FAILURE);
700 	}
701 	pktp->cp_flags |= CPF_NOINTR;
702 
703 	(void) dadk_ioprep(dadkp, pktp);
704 	dadk_transport(dadkp, bp);
705 	pktp->cp_byteleft -= pktp->cp_bytexfer;
706 
707 	while (geterror(bp) == 0 && pktp->cp_byteleft != 0) {
708 		(void) dadk_iosetup(dadkp, pktp);
709 		dadk_transport(dadkp, bp);
710 		pktp->cp_byteleft -= pktp->cp_bytexfer;
711 	}
712 
713 	if (pktp->cp_private)
714 		BBH_FREEHANDLE(dadkp->dad_bbhobjp, pktp->cp_private);
715 	gda_free(dadkp->dad_ctlobjp, pktp, NULL);
716 	return (DDI_SUCCESS);
717 }
718 
719 /* ARGSUSED  */
720 int
721 dadk_ioctl(opaque_t objp, dev_t dev, int cmd, intptr_t arg, int flag,
722 	cred_t *cred_p, int *rval_p)
723 {
724 	struct dadk *dadkp = (struct dadk *)objp;
725 
726 	switch (cmd) {
727 	case DKIOCGETDEF:
728 		{
729 		struct buf	*bp;
730 		int		err, head;
731 		unsigned char	*secbuf;
732 		STRUCT_DECL(defect_header, adh);
733 
734 		STRUCT_INIT(adh, flag & FMODELS);
735 
736 		/*
737 		 * copyin header ....
738 		 * yields head number and buffer address
739 		 */
740 		if (ddi_copyin((caddr_t)arg, STRUCT_BUF(adh), STRUCT_SIZE(adh),
741 		    flag))
742 			return (EFAULT);
743 		head = STRUCT_FGET(adh, head);
744 		if (head < 0 || head >= dadkp->dad_phyg.g_head)
745 			return (ENXIO);
746 		secbuf = kmem_zalloc(NBPSCTR, KM_SLEEP);
747 		if (!secbuf)
748 			return (ENOMEM);
749 		bp = getrbuf(KM_SLEEP);
750 		if (!bp) {
751 			kmem_free(secbuf, NBPSCTR);
752 			return (ENOMEM);
753 		}
754 
755 		bp->b_edev = dev;
756 		bp->b_dev  = cmpdev(dev);
757 		bp->b_flags = B_BUSY;
758 		bp->b_resid = 0;
759 		bp->b_bcount = NBPSCTR;
760 		bp->b_un.b_addr = (caddr_t)secbuf;
761 		bp->b_blkno = head; /* I had to put it somwhere! */
762 		bp->b_forw = (struct buf *)dadkp;
763 		bp->b_back = (struct buf *)DCMD_GETDEF;
764 
765 		mutex_enter(&dadkp->dad_cmd_mutex);
766 		dadkp->dad_cmd_count++;
767 		mutex_exit(&dadkp->dad_cmd_mutex);
768 		FLC_ENQUE(dadkp->dad_flcobjp, bp);
769 		err = biowait(bp);
770 		if (!err) {
771 			if (ddi_copyout((caddr_t)secbuf,
772 			    STRUCT_FGETP(adh, buffer), NBPSCTR, flag))
773 				err = ENXIO;
774 		}
775 		kmem_free(secbuf, NBPSCTR);
776 		freerbuf(bp);
777 		return (err);
778 		}
779 	case DIOCTL_RWCMD:
780 		{
781 		struct dadkio_rwcmd *rwcmdp;
782 		int status, rw;
783 
784 		/*
785 		 * copied in by cmdk and, if necessary, converted to the
786 		 * correct datamodel
787 		 */
788 		rwcmdp = (struct dadkio_rwcmd *)(intptr_t)arg;
789 
790 		/*
791 		 * handle the complex cases here; we pass these
792 		 * through to the driver, which will queue them and
793 		 * handle the requests asynchronously.  The simpler
794 		 * cases ,which can return immediately, fail here, and
795 		 * the request reverts to the dadk_ioctl routine, while
796 		 *  will reroute them directly to the ata driver.
797 		 */
798 		switch (rwcmdp->cmd) {
799 			case DADKIO_RWCMD_READ :
800 				/*FALLTHROUGH*/
801 			case DADKIO_RWCMD_WRITE:
802 				rw = ((rwcmdp->cmd == DADKIO_RWCMD_WRITE) ?
803 				    B_WRITE : B_READ);
804 				status = dadk_dk_buf_setup(dadkp,
805 				    (opaque_t)rwcmdp, dev, ((flag &FKIOCTL) ?
806 				    UIO_SYSSPACE : UIO_USERSPACE), rw);
807 				return (status);
808 			default:
809 				return (EINVAL);
810 			}
811 		}
812 	case DKIOC_UPDATEFW:
813 
814 		/*
815 		 * Require PRIV_ALL privilege to invoke DKIOC_UPDATEFW
816 		 * to protect the firmware update from malicious use
817 		 */
818 		if (PRIV_POLICY(cred_p, PRIV_ALL, B_FALSE, EPERM, NULL) != 0)
819 			return (EPERM);
820 		else
821 			return (dadk_ctl_ioctl(dadkp, cmd, arg, flag));
822 
823 	case DKIOCFLUSHWRITECACHE:
824 		{
825 			struct buf *bp;
826 			int err = 0;
827 			struct dk_callback *dkc = (struct dk_callback *)arg;
828 			struct cmpkt *pktp;
829 			int is_sync = 1;
830 
831 			mutex_enter(&dadkp->dad_mutex);
832 			if (dadkp->dad_noflush || !  dadkp->dad_wce) {
833 				err = dadkp->dad_noflush ? ENOTSUP : 0;
834 				mutex_exit(&dadkp->dad_mutex);
835 				/*
836 				 * If a callback was requested: a
837 				 * callback will always be done if the
838 				 * caller saw the DKIOCFLUSHWRITECACHE
839 				 * ioctl return 0, and never done if the
840 				 * caller saw the ioctl return an error.
841 				 */
842 				if ((flag & FKIOCTL) && dkc != NULL &&
843 				    dkc->dkc_callback != NULL) {
844 					(*dkc->dkc_callback)(dkc->dkc_cookie,
845 					    err);
846 					/*
847 					 * Did callback and reported error.
848 					 * Since we did a callback, ioctl
849 					 * should return 0.
850 					 */
851 					err = 0;
852 				}
853 				return (err);
854 			}
855 			mutex_exit(&dadkp->dad_mutex);
856 
857 			bp = getrbuf(KM_SLEEP);
858 
859 			bp->b_edev = dev;
860 			bp->b_dev  = cmpdev(dev);
861 			bp->b_flags = B_BUSY;
862 			bp->b_resid = 0;
863 			bp->b_bcount = 0;
864 			SET_BP_SEC(bp, 0);
865 
866 			if ((flag & FKIOCTL) && dkc != NULL &&
867 			    dkc->dkc_callback != NULL) {
868 				struct dk_callback *dkc2 =
869 				    (struct dk_callback *)kmem_zalloc(
870 				    sizeof (struct dk_callback), KM_SLEEP);
871 
872 				bcopy(dkc, dkc2, sizeof (*dkc2));
873 				/*
874 				 * Borrow b_list to carry private data
875 				 * to the b_iodone func.
876 				 */
877 				bp->b_list = (struct buf *)dkc2;
878 				bp->b_iodone = dadk_flushdone;
879 				is_sync = 0;
880 			}
881 
882 			/*
883 			 * Setup command pkt
884 			 * dadk_pktprep() can't fail since DDI_DMA_SLEEP set
885 			 */
886 			pktp = dadk_pktprep(dadkp, NULL, bp,
887 			    dadk_iodone, DDI_DMA_SLEEP, NULL);
888 
889 			pktp->cp_time = DADK_FLUSH_CACHE_TIME;
890 
891 			*((char *)(pktp->cp_cdbp)) = DCMD_FLUSH_CACHE;
892 			pktp->cp_byteleft = 0;
893 			pktp->cp_private = NULL;
894 			pktp->cp_secleft = 0;
895 			pktp->cp_srtsec = -1;
896 			pktp->cp_bytexfer = 0;
897 
898 			CTL_IOSETUP(dadkp->dad_ctlobjp, pktp);
899 
900 			mutex_enter(&dadkp->dad_cmd_mutex);
901 			dadkp->dad_cmd_count++;
902 			mutex_exit(&dadkp->dad_cmd_mutex);
903 			FLC_ENQUE(dadkp->dad_flcobjp, bp);
904 
905 			if (is_sync) {
906 				err = biowait(bp);
907 				freerbuf(bp);
908 			}
909 			return (err);
910 		}
911 	default:
912 		if (!dadkp->dad_rmb)
913 			return (dadk_ctl_ioctl(dadkp, cmd, arg, flag));
914 	}
915 
916 	switch (cmd) {
917 	case CDROMSTOP:
918 		return (dadk_rmb_ioctl(dadkp, DCMD_STOP_MOTOR, 0,
919 		    0, DADK_SILENT));
920 	case CDROMSTART:
921 		return (dadk_rmb_ioctl(dadkp, DCMD_START_MOTOR, 0,
922 		    0, DADK_SILENT));
923 	case DKIOCLOCK:
924 		return (dadk_rmb_ioctl(dadkp, DCMD_LOCK, 0, 0, DADK_SILENT));
925 	case DKIOCUNLOCK:
926 		return (dadk_rmb_ioctl(dadkp, DCMD_UNLOCK, 0, 0, DADK_SILENT));
927 	case DKIOCEJECT:
928 	case CDROMEJECT:
929 		{
930 			int ret;
931 
932 			if (ret = dadk_rmb_ioctl(dadkp, DCMD_UNLOCK, 0, 0,
933 			    DADK_SILENT)) {
934 				return (ret);
935 			}
936 			if (ret = dadk_rmb_ioctl(dadkp, DCMD_EJECT, 0, 0,
937 			    DADK_SILENT)) {
938 				return (ret);
939 			}
940 			mutex_enter(&dadkp->dad_mutex);
941 			dadkp->dad_iostate = DKIO_EJECTED;
942 			cv_broadcast(&dadkp->dad_state_cv);
943 			mutex_exit(&dadkp->dad_mutex);
944 
945 			return (0);
946 
947 		}
948 	default:
949 		return (ENOTTY);
950 	/*
951 	 * cdrom audio commands
952 	 */
953 	case CDROMPAUSE:
954 		cmd = DCMD_PAUSE;
955 		break;
956 	case CDROMRESUME:
957 		cmd = DCMD_RESUME;
958 		break;
959 	case CDROMPLAYMSF:
960 		cmd = DCMD_PLAYMSF;
961 		break;
962 	case CDROMPLAYTRKIND:
963 		cmd = DCMD_PLAYTRKIND;
964 		break;
965 	case CDROMREADTOCHDR:
966 		cmd = DCMD_READTOCHDR;
967 		break;
968 	case CDROMREADTOCENTRY:
969 		cmd = DCMD_READTOCENT;
970 		break;
971 	case CDROMVOLCTRL:
972 		cmd = DCMD_VOLCTRL;
973 		break;
974 	case CDROMSUBCHNL:
975 		cmd = DCMD_SUBCHNL;
976 		break;
977 	case CDROMREADMODE2:
978 		cmd = DCMD_READMODE2;
979 		break;
980 	case CDROMREADMODE1:
981 		cmd = DCMD_READMODE1;
982 		break;
983 	case CDROMREADOFFSET:
984 		cmd = DCMD_READOFFSET;
985 		break;
986 	}
987 	return (dadk_rmb_ioctl(dadkp, cmd, arg, flag, 0));
988 }
989 
990 int
991 dadk_flushdone(struct buf *bp)
992 {
993 	struct dk_callback *dkc = (struct dk_callback *)bp->b_list;
994 
995 	ASSERT(dkc != NULL && dkc->dkc_callback != NULL);
996 
997 	(*dkc->dkc_callback)(dkc->dkc_cookie, geterror(bp));
998 
999 	kmem_free(dkc, sizeof (*dkc));
1000 	freerbuf(bp);
1001 	return (0);
1002 }
1003 
1004 int
1005 dadk_getphygeom(opaque_t objp, struct tgdk_geom *dkgeom_p)
1006 {
1007 	struct dadk *dadkp = (struct dadk *)objp;
1008 
1009 	bcopy((caddr_t)&dadkp->dad_phyg, (caddr_t)dkgeom_p,
1010 	    sizeof (struct tgdk_geom));
1011 	return (DDI_SUCCESS);
1012 }
1013 
1014 int
1015 dadk_getgeom(opaque_t objp, struct tgdk_geom *dkgeom_p)
1016 {
1017 	struct dadk *dadkp = (struct dadk *)objp;
1018 	bcopy((caddr_t)&dadkp->dad_logg, (caddr_t)dkgeom_p,
1019 	    sizeof (struct tgdk_geom));
1020 	return (DDI_SUCCESS);
1021 }
1022 
1023 int
1024 dadk_setgeom(opaque_t objp, struct tgdk_geom *dkgeom_p)
1025 {
1026 	struct dadk *dadkp = (struct dadk *)objp;
1027 
1028 	dadkp->dad_logg.g_cyl = dkgeom_p->g_cyl;
1029 	dadkp->dad_logg.g_head = dkgeom_p->g_head;
1030 	dadkp->dad_logg.g_sec = dkgeom_p->g_sec;
1031 	dadkp->dad_logg.g_cap = dkgeom_p->g_cap;
1032 	return (DDI_SUCCESS);
1033 }
1034 
1035 
1036 tgdk_iob_handle
1037 dadk_iob_alloc(opaque_t objp, daddr_t blkno, ssize_t xfer, int kmsflg)
1038 {
1039 	struct dadk *dadkp = (struct dadk *)objp;
1040 	struct buf *bp;
1041 	struct tgdk_iob *iobp;
1042 	size_t rlen;
1043 
1044 	iobp = kmem_zalloc(sizeof (*iobp), kmsflg);
1045 	if (iobp == NULL)
1046 		return (NULL);
1047 	if ((bp = getrbuf(kmsflg)) == NULL) {
1048 		kmem_free(iobp, sizeof (*iobp));
1049 		return (NULL);
1050 	}
1051 
1052 	iobp->b_psec  = LBLK2SEC(blkno, dadkp->dad_blkshf);
1053 	iobp->b_pbyteoff = (blkno & ((1<<dadkp->dad_blkshf) - 1)) << SCTRSHFT;
1054 	iobp->b_pbytecnt = ((iobp->b_pbyteoff + xfer + dadkp->DAD_SECSIZ - 1)
1055 	    >> dadkp->dad_secshf) << dadkp->dad_secshf;
1056 
1057 	bp->b_un.b_addr = 0;
1058 	/*
1059 	 * use i_ddi_mem_alloc() for now until we have an interface to allocate
1060 	 * memory for DMA which doesn't require a DMA handle. ddi_iopb_alloc()
1061 	 * is obsolete and we want more flexibility in controlling the DMA
1062 	 * address constraints..
1063 	 */
1064 	if (i_ddi_mem_alloc((dadkp->dad_sd)->sd_dev, &dadk_alloc_attr,
1065 	    (size_t)iobp->b_pbytecnt, ((kmsflg == KM_SLEEP) ? 1 : 0), 0, NULL,
1066 	    &bp->b_un.b_addr, &rlen, NULL) != DDI_SUCCESS) {
1067 		freerbuf(bp);
1068 		kmem_free(iobp, sizeof (*iobp));
1069 		return (NULL);
1070 	}
1071 	iobp->b_flag |= IOB_BPALLOC | IOB_BPBUFALLOC;
1072 	iobp->b_bp = bp;
1073 	iobp->b_lblk = blkno;
1074 	iobp->b_xfer = xfer;
1075 	iobp->b_lblk = blkno;
1076 	iobp->b_xfer = xfer;
1077 	return (iobp);
1078 }
1079 
1080 /* ARGSUSED */
1081 int
1082 dadk_iob_free(opaque_t objp, struct tgdk_iob *iobp)
1083 {
1084 	struct buf *bp;
1085 
1086 	if (iobp) {
1087 		if (iobp->b_bp && (iobp->b_flag & IOB_BPALLOC)) {
1088 			bp = iobp->b_bp;
1089 			if (bp->b_un.b_addr && (iobp->b_flag & IOB_BPBUFALLOC))
1090 				i_ddi_mem_free((caddr_t)bp->b_un.b_addr, NULL);
1091 			freerbuf(bp);
1092 		}
1093 		kmem_free(iobp, sizeof (*iobp));
1094 	}
1095 	return (DDI_SUCCESS);
1096 }
1097 
1098 /* ARGSUSED */
1099 caddr_t
1100 dadk_iob_htoc(opaque_t objp, struct tgdk_iob *iobp)
1101 {
1102 	return (iobp->b_bp->b_un.b_addr+iobp->b_pbyteoff);
1103 }
1104 
1105 
1106 caddr_t
1107 dadk_iob_xfer(opaque_t objp, struct tgdk_iob *iobp, int rw)
1108 {
1109 	struct dadk	*dadkp = (struct dadk *)objp;
1110 	struct buf	*bp;
1111 	int		err;
1112 
1113 	bp = iobp->b_bp;
1114 	if (dadkp->dad_rdonly && !(rw & B_READ)) {
1115 		bioerror(bp, EROFS);
1116 		return (NULL);
1117 	}
1118 
1119 	bp->b_flags |= (B_BUSY | rw);
1120 	bp->b_bcount = iobp->b_pbytecnt;
1121 	SET_BP_SEC(bp, iobp->b_psec);
1122 	bp->av_back = (struct buf *)0;
1123 	bp->b_resid = 0;
1124 
1125 	/* call flow control */
1126 	mutex_enter(&dadkp->dad_cmd_mutex);
1127 	dadkp->dad_cmd_count++;
1128 	mutex_exit(&dadkp->dad_cmd_mutex);
1129 	FLC_ENQUE(dadkp->dad_flcobjp, bp);
1130 	err = biowait(bp);
1131 
1132 	bp->b_bcount = iobp->b_xfer;
1133 	bp->b_flags &= ~(B_DONE|B_BUSY);
1134 
1135 	if (err)
1136 		return (NULL);
1137 
1138 	return (bp->b_un.b_addr+iobp->b_pbyteoff);
1139 }
1140 
1141 static void
1142 dadk_transport(opaque_t com_data, struct buf *bp)
1143 {
1144 	struct dadk *dadkp = (struct dadk *)com_data;
1145 
1146 	if (CTL_TRANSPORT(dadkp->dad_ctlobjp, GDA_BP_PKT(bp)) ==
1147 	    CTL_SEND_SUCCESS)
1148 		return;
1149 	dadk_restart((void*)GDA_BP_PKT(bp));
1150 }
1151 
1152 static int
1153 dadk_pkt(opaque_t com_data, struct buf *bp, int (*func)(caddr_t), caddr_t arg)
1154 {
1155 	struct cmpkt *pktp;
1156 	struct dadk *dadkp = (struct dadk *)com_data;
1157 
1158 	if (GDA_BP_PKT(bp))
1159 		return (DDI_SUCCESS);
1160 
1161 	pktp = dadk_pktprep(dadkp, NULL, bp, dadk_iodone, func, arg);
1162 	if (!pktp)
1163 		return (DDI_FAILURE);
1164 
1165 	return (dadk_ioprep(dadkp, pktp));
1166 }
1167 
1168 /*
1169  * Read, Write preparation
1170  */
1171 static int
1172 dadk_ioprep(struct dadk *dadkp, struct cmpkt *pktp)
1173 {
1174 	struct buf *bp;
1175 
1176 	bp = pktp->cp_bp;
1177 	if (bp->b_forw == (struct buf *)dadkp)
1178 		*((char *)(pktp->cp_cdbp)) = (char)(intptr_t)bp->b_back;
1179 
1180 	else if (bp->b_flags & B_READ)
1181 		*((char *)(pktp->cp_cdbp)) = DCMD_READ;
1182 	else
1183 		*((char *)(pktp->cp_cdbp)) = DCMD_WRITE;
1184 	pktp->cp_byteleft = bp->b_bcount;
1185 
1186 	/* setup the bad block list handle */
1187 	pktp->cp_private = BBH_GETHANDLE(dadkp->dad_bbhobjp, bp);
1188 	return (dadk_iosetup(dadkp, pktp));
1189 }
1190 
1191 static int
1192 dadk_iosetup(struct dadk *dadkp, struct cmpkt *pktp)
1193 {
1194 	struct buf	*bp;
1195 	bbh_cookie_t	bbhckp;
1196 	int		seccnt;
1197 
1198 	seccnt = pktp->cp_bytexfer >> dadkp->dad_secshf;
1199 	pktp->cp_secleft -= seccnt;
1200 
1201 	if (pktp->cp_secleft) {
1202 		pktp->cp_srtsec += seccnt;
1203 	} else {
1204 		/* get the first cookie from the bad block list */
1205 		if (!pktp->cp_private) {
1206 			bp = pktp->cp_bp;
1207 			pktp->cp_srtsec  = GET_BP_SEC(bp);
1208 			pktp->cp_secleft = (bp->b_bcount >> dadkp->dad_secshf);
1209 		} else {
1210 			bbhckp = BBH_HTOC(dadkp->dad_bbhobjp,
1211 			    pktp->cp_private);
1212 			pktp->cp_srtsec = BBH_GETCK_SECTOR(dadkp->dad_bbhobjp,
1213 			    bbhckp);
1214 			pktp->cp_secleft = BBH_GETCK_SECLEN(dadkp->dad_bbhobjp,
1215 			    bbhckp);
1216 		}
1217 	}
1218 
1219 	pktp->cp_bytexfer = pktp->cp_secleft << dadkp->dad_secshf;
1220 
1221 	if (CTL_IOSETUP(dadkp->dad_ctlobjp, pktp)) {
1222 		return (DDI_SUCCESS);
1223 	} else {
1224 		return (DDI_FAILURE);
1225 	}
1226 
1227 
1228 
1229 
1230 }
1231 
1232 static struct cmpkt *
1233 dadk_pktprep(struct dadk *dadkp, struct cmpkt *in_pktp, struct buf *bp,
1234     void (*cb_func)(struct buf *), int (*func)(caddr_t), caddr_t arg)
1235 {
1236 	struct cmpkt *pktp;
1237 
1238 	pktp = gda_pktprep(dadkp->dad_ctlobjp, in_pktp, (opaque_t)bp, func,
1239 	    arg);
1240 
1241 	if (pktp) {
1242 		pktp->cp_callback = dadk_pktcb;
1243 		pktp->cp_time = DADK_IO_TIME;
1244 		pktp->cp_flags = 0;
1245 		pktp->cp_iodone = cb_func;
1246 		pktp->cp_dev_private = (opaque_t)dadkp;
1247 
1248 	}
1249 
1250 	return (pktp);
1251 }
1252 
1253 
1254 static void
1255 dadk_restart(void *vpktp)
1256 {
1257 	struct cmpkt *pktp = (struct cmpkt *)vpktp;
1258 
1259 	if (dadk_ioretry(pktp, QUE_COMMAND) == JUST_RETURN)
1260 		return;
1261 	pktp->cp_iodone(pktp->cp_bp);
1262 }
1263 
1264 static int
1265 dadk_ioretry(struct cmpkt *pktp, int action)
1266 {
1267 	struct buf *bp;
1268 	struct dadk *dadkp = PKT2DADK(pktp);
1269 
1270 	switch (action) {
1271 	case QUE_COMMAND:
1272 		if (pktp->cp_retry++ < DADK_RETRY_COUNT) {
1273 			CTL_IOSETUP(dadkp->dad_ctlobjp, pktp);
1274 			if (CTL_TRANSPORT(dadkp->dad_ctlobjp, pktp) ==
1275 			    CTL_SEND_SUCCESS) {
1276 				return (JUST_RETURN);
1277 			}
1278 			gda_log(dadkp->dad_sd->sd_dev, dadk_name,
1279 			    CE_WARN, "transport of command fails\n");
1280 		} else
1281 			gda_log(dadkp->dad_sd->sd_dev,
1282 			    dadk_name, CE_WARN,
1283 			    "exceeds maximum number of retries\n");
1284 		bioerror(pktp->cp_bp, ENXIO);
1285 		/*FALLTHROUGH*/
1286 	case COMMAND_DONE_ERROR:
1287 		bp = pktp->cp_bp;
1288 		bp->b_resid += pktp->cp_byteleft - pktp->cp_bytexfer +
1289 		    pktp->cp_resid;
1290 		if (geterror(bp) == 0) {
1291 			if ((*((char *)(pktp->cp_cdbp)) == DCMD_FLUSH_CACHE) &&
1292 			    (pktp->cp_dev_private == (opaque_t)dadkp) &&
1293 			    ((int)(*(char *)pktp->cp_scbp) == DERR_ABORT)) {
1294 				/*
1295 				 * Flag "unimplemented" responses for
1296 				 * DCMD_FLUSH_CACHE as ENOTSUP
1297 				 */
1298 				bioerror(bp, ENOTSUP);
1299 				mutex_enter(&dadkp->dad_mutex);
1300 				dadkp->dad_noflush = 1;
1301 				mutex_exit(&dadkp->dad_mutex);
1302 			} else {
1303 				bioerror(bp, EIO);
1304 			}
1305 		}
1306 		/*FALLTHROUGH*/
1307 	case COMMAND_DONE:
1308 	default:
1309 		return (COMMAND_DONE);
1310 	}
1311 }
1312 
1313 
1314 static void
1315 dadk_pktcb(struct cmpkt *pktp)
1316 {
1317 	int action;
1318 	struct dadkio_rwcmd *rwcmdp;
1319 
1320 	rwcmdp = (struct dadkio_rwcmd *)pktp->cp_passthru;  /* ioctl packet */
1321 
1322 	if (pktp->cp_reason == CPS_SUCCESS) {
1323 		if (rwcmdp && (rwcmdp != (opaque_t)DADK_SILENT))
1324 			rwcmdp->status.status = DADKIO_STAT_NO_ERROR;
1325 		pktp->cp_iodone(pktp->cp_bp);
1326 		return;
1327 	}
1328 
1329 	if (rwcmdp && (rwcmdp != (opaque_t)DADK_SILENT)) {
1330 		if (pktp->cp_reason == CPS_CHKERR)
1331 			dadk_recorderr(pktp, rwcmdp);
1332 		dadk_iodone(pktp->cp_bp);
1333 		return;
1334 	}
1335 
1336 	if (pktp->cp_reason == CPS_CHKERR)
1337 		action = dadk_chkerr(pktp);
1338 	else
1339 		action = COMMAND_DONE_ERROR;
1340 
1341 	if (action == JUST_RETURN)
1342 		return;
1343 
1344 	if (action != COMMAND_DONE) {
1345 		if ((dadk_ioretry(pktp, action)) == JUST_RETURN)
1346 			return;
1347 	}
1348 	pktp->cp_iodone(pktp->cp_bp);
1349 }
1350 
1351 
1352 
1353 static struct dadkio_derr dadk_errtab[] = {
1354 	{COMMAND_DONE, GDA_INFORMATIONAL},	/*  0 DERR_SUCCESS	*/
1355 	{QUE_COMMAND, GDA_FATAL},		/*  1 DERR_AMNF		*/
1356 	{QUE_COMMAND, GDA_FATAL},		/*  2 DERR_TKONF	*/
1357 	{COMMAND_DONE_ERROR, GDA_INFORMATIONAL}, /* 3 DERR_ABORT	*/
1358 	{QUE_COMMAND, GDA_RETRYABLE},		/*  4 DERR_DWF		*/
1359 	{QUE_COMMAND, GDA_FATAL},		/*  5 DERR_IDNF		*/
1360 	{JUST_RETURN, GDA_INFORMATIONAL},	/*  6 DERR_BUSY		*/
1361 	{QUE_COMMAND, GDA_FATAL},		/*  7 DERR_UNC		*/
1362 	{QUE_COMMAND, GDA_RETRYABLE},		/*  8 DERR_BBK		*/
1363 	{COMMAND_DONE_ERROR, GDA_FATAL},	/*  9 DERR_INVCDB	*/
1364 	{COMMAND_DONE_ERROR, GDA_FATAL},	/* 10 DERR_HARD		*/
1365 	{COMMAND_DONE_ERROR, GDA_FATAL},	/* 11 DERR_ILI		*/
1366 	{COMMAND_DONE_ERROR, GDA_FATAL},	/* 12 DERR_EOM		*/
1367 	{COMMAND_DONE, GDA_INFORMATIONAL},	/* 13 DERR_MCR		*/
1368 	{COMMAND_DONE, GDA_INFORMATIONAL},	/* 14 DERR_RECOVER	*/
1369 	{COMMAND_DONE_ERROR, GDA_FATAL},	/* 15 DERR_NOTREADY	*/
1370 	{QUE_COMMAND, GDA_RETRYABLE},		/* 16 DERR_MEDIUM	*/
1371 	{COMMAND_DONE_ERROR, GDA_FATAL},	/* 17 DERR_HW		*/
1372 	{COMMAND_DONE, GDA_FATAL},		/* 18 DERR_ILL		*/
1373 	{COMMAND_DONE, GDA_FATAL},		/* 19 DERR_UNIT_ATTN	*/
1374 	{COMMAND_DONE_ERROR, GDA_FATAL},	/* 20 DERR_DATA_PROT	*/
1375 	{COMMAND_DONE_ERROR, GDA_FATAL},	/* 21 DERR_MISCOMPARE	*/
1376 	{QUE_COMMAND, GDA_RETRYABLE},		/* 22 DERR_ICRC		*/
1377 	{COMMAND_DONE_ERROR, GDA_FATAL},	/* 23 DERR_RESV		*/
1378 };
1379 
1380 static int
1381 dadk_chkerr(struct cmpkt *pktp)
1382 {
1383 	int err_blkno;
1384 	struct dadk *dadkp = PKT2DADK(pktp);
1385 	dadk_errstats_t *dep;
1386 	int scb = *(char *)pktp->cp_scbp;
1387 
1388 	if (scb == DERR_SUCCESS) {
1389 		if (pktp->cp_retry != 0 && dadkp->dad_errstats != NULL) {
1390 			dep = (dadk_errstats_t *)
1391 			    dadkp->dad_errstats->ks_data;
1392 			dep->dadk_rq_recov_err.value.ui32++;
1393 		}
1394 		return (COMMAND_DONE);
1395 	}
1396 
1397 	if (pktp->cp_retry) {
1398 		err_blkno = pktp->cp_srtsec + ((pktp->cp_bytexfer -
1399 		    pktp->cp_resid) >> dadkp->dad_secshf);
1400 	} else
1401 		err_blkno = -1;
1402 
1403 	if (dadkp->dad_errstats != NULL) {
1404 		dep = (dadk_errstats_t *)dadkp->dad_errstats->ks_data;
1405 
1406 		switch (dadk_errtab[scb].d_severity) {
1407 			case GDA_RETRYABLE:
1408 				dep->dadk_softerrs.value.ui32++;
1409 				break;
1410 
1411 			case GDA_FATAL:
1412 				dep->dadk_harderrs.value.ui32++;
1413 				break;
1414 
1415 			default:
1416 				break;
1417 		}
1418 
1419 		switch (scb) {
1420 			case DERR_INVCDB:
1421 			case DERR_ILI:
1422 			case DERR_EOM:
1423 			case DERR_HW:
1424 			case DERR_ICRC:
1425 				dep->dadk_transerrs.value.ui32++;
1426 				break;
1427 
1428 			case DERR_AMNF:
1429 			case DERR_TKONF:
1430 			case DERR_DWF:
1431 			case DERR_BBK:
1432 			case DERR_UNC:
1433 			case DERR_HARD:
1434 			case DERR_MEDIUM:
1435 			case DERR_DATA_PROT:
1436 			case DERR_MISCOMP:
1437 				dep->dadk_rq_media_err.value.ui32++;
1438 				break;
1439 
1440 			case DERR_NOTREADY:
1441 				dep->dadk_rq_ntrdy_err.value.ui32++;
1442 				break;
1443 
1444 			case DERR_IDNF:
1445 			case DERR_UNIT_ATTN:
1446 				dep->dadk_rq_nodev_err.value.ui32++;
1447 				break;
1448 
1449 			case DERR_ILL:
1450 			case DERR_RESV:
1451 				dep->dadk_rq_illrq_err.value.ui32++;
1452 				break;
1453 
1454 			default:
1455 				break;
1456 		}
1457 	}
1458 
1459 	/* if attempting to read a sector from a cdrom audio disk */
1460 	if ((dadkp->dad_cdrom) &&
1461 	    (*((char *)(pktp->cp_cdbp)) == DCMD_READ) &&
1462 	    (scb == DERR_ILL)) {
1463 		return (COMMAND_DONE);
1464 	}
1465 	if (pktp->cp_passthru == NULL) {
1466 		gda_errmsg(dadkp->dad_sd, pktp, dadk_name,
1467 		    dadk_errtab[scb].d_severity, pktp->cp_srtsec,
1468 		    err_blkno, dadk_cmds, dadk_sense);
1469 	}
1470 
1471 	if (scb == DERR_BUSY) {
1472 		(void) timeout(dadk_restart, (void *)pktp, DADK_BSY_TIMEOUT);
1473 	}
1474 
1475 	return (dadk_errtab[scb].d_action);
1476 }
1477 
1478 static void
1479 dadk_recorderr(struct cmpkt *pktp, struct dadkio_rwcmd *rwcmdp)
1480 {
1481 	struct dadk *dadkp;
1482 	int scb;
1483 
1484 	dadkp = PKT2DADK(pktp);
1485 	scb = (int)(*(char *)pktp->cp_scbp);
1486 
1487 
1488 	rwcmdp->status.failed_blk = rwcmdp->blkaddr +
1489 	    ((pktp->cp_bytexfer - pktp->cp_resid) >> dadkp->dad_secshf);
1490 
1491 	rwcmdp->status.resid = pktp->cp_bp->b_resid +
1492 	    pktp->cp_byteleft - pktp->cp_bytexfer + pktp->cp_resid;
1493 	switch ((int)(* (char *)pktp->cp_scbp)) {
1494 	case DERR_AMNF:
1495 	case DERR_ABORT:
1496 		rwcmdp->status.status = DADKIO_STAT_ILLEGAL_REQUEST;
1497 		break;
1498 	case DERR_DWF:
1499 	case DERR_IDNF:
1500 		rwcmdp->status.status = DADKIO_STAT_ILLEGAL_ADDRESS;
1501 		break;
1502 	case DERR_TKONF:
1503 	case DERR_UNC:
1504 	case DERR_BBK:
1505 		rwcmdp->status.status = DADKIO_STAT_MEDIUM_ERROR;
1506 		rwcmdp->status.failed_blk_is_valid = 1;
1507 		rwcmdp->status.resid = 0;
1508 		break;
1509 	case DERR_BUSY:
1510 		rwcmdp->status.status = DADKIO_STAT_NOT_READY;
1511 		break;
1512 	case DERR_INVCDB:
1513 	case DERR_HARD:
1514 		rwcmdp->status.status = DADKIO_STAT_HARDWARE_ERROR;
1515 		break;
1516 	case DERR_ICRC:
1517 	default:
1518 		rwcmdp->status.status = DADKIO_STAT_NOT_SUPPORTED;
1519 	}
1520 
1521 	if (rwcmdp->flags & DADKIO_FLAG_SILENT)
1522 		return;
1523 	gda_errmsg(dadkp->dad_sd, pktp, dadk_name, dadk_errtab[scb].d_severity,
1524 	    rwcmdp->blkaddr, rwcmdp->status.failed_blk,
1525 	    dadk_cmds, dadk_sense);
1526 }
1527 
1528 /*ARGSUSED*/
1529 static void
1530 dadk_polldone(struct buf *bp)
1531 {
1532 	struct cmpkt *pktp;
1533 	struct dadk *dadkp;
1534 
1535 	pktp  = GDA_BP_PKT(bp);
1536 	dadkp = PKT2DADK(pktp);
1537 	mutex_enter(&dadkp->dad_cmd_mutex);
1538 	dadkp->dad_cmd_count--;
1539 	mutex_exit(&dadkp->dad_cmd_mutex);
1540 }
1541 
1542 static void
1543 dadk_iodone(struct buf *bp)
1544 {
1545 	struct cmpkt *pktp;
1546 	struct dadk *dadkp;
1547 
1548 	pktp  = GDA_BP_PKT(bp);
1549 	dadkp = PKT2DADK(pktp);
1550 
1551 	/* check for all iodone */
1552 	pktp->cp_byteleft -= pktp->cp_bytexfer;
1553 	if (geterror(bp) == 0 && pktp->cp_byteleft != 0) {
1554 		pktp->cp_retry = 0;
1555 		(void) dadk_iosetup(dadkp, pktp);
1556 
1557 
1558 	/* 	transport the next one */
1559 		if (CTL_TRANSPORT(dadkp->dad_ctlobjp, pktp) == CTL_SEND_SUCCESS)
1560 			return;
1561 		if ((dadk_ioretry(pktp, QUE_COMMAND)) == JUST_RETURN)
1562 			return;
1563 	}
1564 
1565 	/* start next one */
1566 	FLC_DEQUE(dadkp->dad_flcobjp, bp);
1567 
1568 	/* free pkt */
1569 	if (pktp->cp_private)
1570 		BBH_FREEHANDLE(dadkp->dad_bbhobjp, pktp->cp_private);
1571 	gda_free(dadkp->dad_ctlobjp, pktp, NULL);
1572 	mutex_enter(&dadkp->dad_cmd_mutex);
1573 	dadkp->dad_cmd_count--;
1574 	mutex_exit(&dadkp->dad_cmd_mutex);
1575 	biodone(bp);
1576 }
1577 
1578 int
1579 dadk_check_media(opaque_t objp, int *state)
1580 {
1581 	struct dadk *dadkp = (struct dadk *)objp;
1582 
1583 	if (!dadkp->dad_rmb) {
1584 		return (ENXIO);
1585 	}
1586 #ifdef DADK_DEBUG
1587 	if (dadk_debug & DSTATE)
1588 		PRF("dadk_check_media: user state %x disk state %x\n",
1589 		    *state, dadkp->dad_iostate);
1590 #endif
1591 	/*
1592 	 * If state already changed just return
1593 	 */
1594 	if (*state != dadkp->dad_iostate) {
1595 		*state = dadkp->dad_iostate;
1596 		return (0);
1597 	}
1598 
1599 	/*
1600 	 * Startup polling on thread state
1601 	 */
1602 	mutex_enter(&dadkp->dad_mutex);
1603 	if (dadkp->dad_thread_cnt == 0) {
1604 		/*
1605 		 * One thread per removable dadk device
1606 		 */
1607 		(void) thread_create(NULL, 0, dadk_watch_thread, dadkp, 0, &p0,
1608 		    TS_RUN, v.v_maxsyspri - 2);
1609 	}
1610 	dadkp->dad_thread_cnt++;
1611 
1612 	/*
1613 	 * Wait for state to change
1614 	 */
1615 	do {
1616 		if (cv_wait_sig(&dadkp->dad_state_cv, &dadkp->dad_mutex) == 0) {
1617 			dadkp->dad_thread_cnt--;
1618 			mutex_exit(&dadkp->dad_mutex);
1619 			return (EINTR);
1620 		}
1621 	} while (*state == dadkp->dad_iostate);
1622 	*state = dadkp->dad_iostate;
1623 	dadkp->dad_thread_cnt--;
1624 	mutex_exit(&dadkp->dad_mutex);
1625 	return (0);
1626 }
1627 
1628 
1629 #define	MEDIA_ACCESS_DELAY 2000000
1630 
1631 static void
1632 dadk_watch_thread(struct dadk *dadkp)
1633 {
1634 	enum dkio_state state;
1635 	int interval;
1636 
1637 	interval = drv_usectohz(dadk_check_media_time);
1638 
1639 	do {
1640 		if (dadk_rmb_ioctl(dadkp, DCMD_GET_STATE, (intptr_t)&state, 0,
1641 		    DADK_SILENT)) {
1642 			/*
1643 			 * Assume state remained the same
1644 			 */
1645 			state = dadkp->dad_iostate;
1646 		}
1647 
1648 		/*
1649 		 * now signal the waiting thread if this is *not* the
1650 		 * specified state;
1651 		 * delay the signal if the state is DKIO_INSERTED
1652 		 * to allow the target to recover
1653 		 */
1654 		if (state != dadkp->dad_iostate) {
1655 
1656 			dadkp->dad_iostate = state;
1657 			if (state == DKIO_INSERTED) {
1658 				/*
1659 				 * delay the signal to give the drive a chance
1660 				 * to do what it apparently needs to do
1661 				 */
1662 				(void) timeout((void(*)(void *))cv_broadcast,
1663 				    (void *)&dadkp->dad_state_cv,
1664 				    drv_usectohz((clock_t)MEDIA_ACCESS_DELAY));
1665 			} else {
1666 				cv_broadcast(&dadkp->dad_state_cv);
1667 			}
1668 		}
1669 		delay(interval);
1670 	} while (dadkp->dad_thread_cnt);
1671 }
1672 
1673 int
1674 dadk_inquiry(opaque_t objp, opaque_t *inqpp)
1675 {
1676 	struct dadk *dadkp = (struct dadk *)objp;
1677 	struct scsi_inquiry **sinqpp = (struct scsi_inquiry **)inqpp;
1678 
1679 	if (dadkp && dadkp->dad_sd && dadkp->dad_sd->sd_inq) {
1680 		*sinqpp = dadkp->dad_sd->sd_inq;
1681 		return (DDI_SUCCESS);
1682 	}
1683 
1684 	return (DDI_FAILURE);
1685 }
1686 
1687 static int
1688 dadk_rmb_ioctl(struct dadk *dadkp, int cmd, intptr_t arg, int flags, int silent)
1689 
1690 {
1691 	struct buf *bp;
1692 	int err;
1693 	struct cmpkt *pktp;
1694 
1695 	if ((bp = getrbuf(KM_SLEEP)) == NULL) {
1696 		return (ENOMEM);
1697 	}
1698 	pktp = dadk_pktprep(dadkp, NULL, bp, dadk_rmb_iodone, NULL, NULL);
1699 	if (!pktp) {
1700 		freerbuf(bp);
1701 		return (ENOMEM);
1702 	}
1703 	bp->b_back  = (struct buf *)arg;
1704 	bp->b_forw  = (struct buf *)dadkp->dad_flcobjp;
1705 	pktp->cp_passthru = (opaque_t)(intptr_t)silent;
1706 
1707 	err = dadk_ctl_ioctl(dadkp, cmd, (uintptr_t)pktp, flags);
1708 	freerbuf(bp);
1709 	gda_free(dadkp->dad_ctlobjp, pktp, NULL);
1710 	return (err);
1711 
1712 
1713 }
1714 
1715 static void
1716 dadk_rmb_iodone(struct buf *bp)
1717 {
1718 	struct cmpkt *pktp;
1719 	struct dadk *dadkp;
1720 
1721 	pktp  = GDA_BP_PKT(bp);
1722 	dadkp = PKT2DADK(pktp);
1723 
1724 	bp->b_flags &= ~(B_DONE|B_BUSY);
1725 
1726 	/* Start next one */
1727 	FLC_DEQUE(dadkp->dad_flcobjp, bp);
1728 
1729 	mutex_enter(&dadkp->dad_cmd_mutex);
1730 	dadkp->dad_cmd_count--;
1731 	mutex_exit(&dadkp->dad_cmd_mutex);
1732 	biodone(bp);
1733 }
1734 
1735 static int
1736 dadk_dk_buf_setup(struct dadk *dadkp, opaque_t *cmdp, dev_t dev,
1737 	enum uio_seg dataspace, int rw)
1738 {
1739 	struct dadkio_rwcmd *rwcmdp = (struct dadkio_rwcmd *)cmdp;
1740 	struct buf	*bp;
1741 	struct iovec	aiov;
1742 	struct uio	auio;
1743 	struct uio	*uio = &auio;
1744 	int		status;
1745 
1746 	bp = getrbuf(KM_SLEEP);
1747 
1748 	bp->av_forw = bp->b_forw = (struct buf *)dadkp;
1749 	bp->b_back  = (struct buf *)rwcmdp;	/* ioctl packet */
1750 
1751 	bzero((caddr_t)&auio, sizeof (struct uio));
1752 	bzero((caddr_t)&aiov, sizeof (struct iovec));
1753 	aiov.iov_base = rwcmdp->bufaddr;
1754 	aiov.iov_len = rwcmdp->buflen;
1755 	uio->uio_iov = &aiov;
1756 
1757 	uio->uio_iovcnt = 1;
1758 	uio->uio_resid = rwcmdp->buflen;
1759 	uio->uio_segflg = dataspace;
1760 
1761 	/* Let physio do the rest... */
1762 	status = physio(dadk_dk_strategy, bp, dev, rw, dadkmin, uio);
1763 
1764 	freerbuf(bp);
1765 	return (status);
1766 
1767 }
1768 
1769 /* Do not let a user gendisk request get too big or */
1770 /* else we could use to many resources.		    */
1771 
1772 static void
1773 dadkmin(struct buf *bp)
1774 {
1775 	if (bp->b_bcount > dadk_dk_maxphys)
1776 		bp->b_bcount = dadk_dk_maxphys;
1777 }
1778 
1779 static int
1780 dadk_dk_strategy(struct buf *bp)
1781 {
1782 	dadk_dk((struct dadk *)bp->av_forw, (struct dadkio_rwcmd *)bp->b_back,
1783 	    bp);
1784 	return (0);
1785 }
1786 
1787 static void
1788 dadk_dk(struct dadk *dadkp, struct dadkio_rwcmd *rwcmdp, struct buf *bp)
1789 {
1790 	struct  cmpkt *pktp;
1791 
1792 	pktp = dadk_pktprep(dadkp, NULL, bp, dadk_iodone, NULL, NULL);
1793 	if (!pktp) {
1794 		bioerror(bp, ENOMEM);
1795 		biodone(bp);
1796 		return;
1797 	}
1798 
1799 	pktp->cp_passthru = rwcmdp;
1800 
1801 	(void) dadk_ioprep(dadkp, pktp);
1802 
1803 	mutex_enter(&dadkp->dad_cmd_mutex);
1804 	dadkp->dad_cmd_count++;
1805 	mutex_exit(&dadkp->dad_cmd_mutex);
1806 	FLC_ENQUE(dadkp->dad_flcobjp, bp);
1807 }
1808 
1809 /*
1810  * There is no existing way to notify cmdk module
1811  * when the command completed, so add this function
1812  * to calculate how many on-going commands.
1813  */
1814 int
1815 dadk_getcmds(opaque_t objp)
1816 {
1817 	struct dadk *dadkp = (struct dadk *)objp;
1818 	int count;
1819 
1820 	mutex_enter(&dadkp->dad_cmd_mutex);
1821 	count = dadkp->dad_cmd_count;
1822 	mutex_exit(&dadkp->dad_cmd_mutex);
1823 	return (count);
1824 }
1825 
1826 /*
1827  * this function was used to calc the cmd for CTL_IOCTL
1828  */
1829 static int
1830 dadk_ctl_ioctl(struct dadk *dadkp, uint32_t cmd, uintptr_t arg, int flag)
1831 {
1832 	int error;
1833 	mutex_enter(&dadkp->dad_cmd_mutex);
1834 	dadkp->dad_cmd_count++;
1835 	mutex_exit(&dadkp->dad_cmd_mutex);
1836 	error = CTL_IOCTL(dadkp->dad_ctlobjp, cmd, arg, flag);
1837 	mutex_enter(&dadkp->dad_cmd_mutex);
1838 	dadkp->dad_cmd_count--;
1839 	mutex_exit(&dadkp->dad_cmd_mutex);
1840 	return (error);
1841 }
1842