xref: /illumos-gate/usr/src/uts/intel/io/dktp/dcdev/dadk.c (revision 89b2a9fbeabf42fa54594df0e5927bcc50a07cc9)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 /*
28  * Direct Attached Disk
29  */
30 
31 #include <sys/file.h>
32 #include <sys/scsi/scsi.h>
33 #include <sys/var.h>
34 #include <sys/proc.h>
35 #include <sys/dktp/cm.h>
36 #include <sys/vtoc.h>
37 #include <sys/dkio.h>
38 #include <sys/policy.h>
39 #include <sys/priv.h>
40 
41 #include <sys/dktp/dadev.h>
42 #include <sys/dktp/fctypes.h>
43 #include <sys/dktp/flowctrl.h>
44 #include <sys/dktp/tgcom.h>
45 #include <sys/dktp/tgdk.h>
46 #include <sys/dktp/bbh.h>
47 #include <sys/dktp/dadkio.h>
48 #include <sys/dktp/dadk.h>
49 #include <sys/cdio.h>
50 
51 /*
52  * Local Function Prototypes
53  */
54 static void dadk_restart(void *pktp);
55 static void dadk_pktcb(struct cmpkt *pktp);
56 static void dadk_iodone(struct buf *bp);
57 static void dadk_polldone(struct buf *bp);
58 static void dadk_setcap(struct dadk *dadkp);
59 static void dadk_create_errstats(struct dadk *dadkp, int instance);
60 static void dadk_destroy_errstats(struct dadk *dadkp);
61 
62 static int dadk_chkerr(struct cmpkt *pktp);
63 static int dadk_ioprep(struct dadk *dadkp, struct cmpkt *pktp);
64 static int dadk_iosetup(struct dadk *dadkp, struct cmpkt *pktp);
65 static int dadk_ioretry(struct cmpkt *pktp, int action);
66 
67 static struct cmpkt *dadk_pktprep(struct dadk *dadkp, struct cmpkt *in_pktp,
68     struct buf *bp, void (*cb_func)(struct buf *), int (*func)(caddr_t),
69     caddr_t arg);
70 
71 static int  dadk_pkt(opaque_t com_data, struct buf *bp, int (*func)(caddr_t),
72     caddr_t arg);
73 static void dadk_transport(opaque_t com_data, struct buf *bp);
74 static int dadk_ctl_ioctl(struct dadk *, uint32_t, uintptr_t, int);
75 
76 struct tgcom_objops dadk_com_ops = {
77 	nodev,
78 	nodev,
79 	dadk_pkt,
80 	dadk_transport,
81 	0, 0
82 };
83 
84 /*
85  * architecture dependent allocation restrictions for dadk_iob_alloc(). For
86  * x86, we'll set dma_attr_addr_hi to dadk_max_phys_addr and dma_attr_sgllen
87  * to dadk_sgl_size during _init().
88  */
89 #if defined(__sparc)
90 static ddi_dma_attr_t dadk_alloc_attr = {
91 	DMA_ATTR_V0,	/* version number */
92 	0x0,		/* lowest usable address */
93 	0xFFFFFFFFull,	/* high DMA address range */
94 	0xFFFFFFFFull,	/* DMA counter register */
95 	1,		/* DMA address alignment */
96 	1,		/* DMA burstsizes */
97 	1,		/* min effective DMA size */
98 	0xFFFFFFFFull,	/* max DMA xfer size */
99 	0xFFFFFFFFull,	/* segment boundary */
100 	1,		/* s/g list length */
101 	512,		/* granularity of device */
102 	0,		/* DMA transfer flags */
103 };
104 #elif defined(__x86)
105 static ddi_dma_attr_t dadk_alloc_attr = {
106 	DMA_ATTR_V0,	/* version number */
107 	0x0,		/* lowest usable address */
108 	0x0,		/* high DMA address range [set in _init()] */
109 	0xFFFFull,	/* DMA counter register */
110 	512,		/* DMA address alignment */
111 	1,		/* DMA burstsizes */
112 	1,		/* min effective DMA size */
113 	0xFFFFFFFFull,	/* max DMA xfer size */
114 	0xFFFFFFFFull,	/* segment boundary */
115 	0,		/* s/g list length [set in _init()] */
116 	512,		/* granularity of device */
117 	0,		/* DMA transfer flags */
118 };
119 
120 uint64_t dadk_max_phys_addr = 0xFFFFFFFFull;
121 int dadk_sgl_size = 0xFF;
122 #endif
123 
124 static int dadk_rmb_ioctl(struct dadk *dadkp, int cmd, intptr_t arg, int flags,
125     int silent);
126 static void dadk_rmb_iodone(struct buf *bp);
127 
128 static int dadk_dk_buf_setup(struct dadk *dadkp, opaque_t *cmdp,
129     dev_t dev, enum uio_seg dataspace, int rw);
130 static void dadk_dk(struct dadk *dadkp, struct dadkio_rwcmd *scmdp,
131     struct buf *bp);
132 static void dadkmin(struct buf *bp);
133 static int dadk_dk_strategy(struct buf *bp);
134 static void dadk_recorderr(struct cmpkt *pktp, struct dadkio_rwcmd *rwcmdp);
135 
136 struct tgdk_objops dadk_ops = {
137 	dadk_init,
138 	dadk_free,
139 	dadk_probe,
140 	dadk_attach,
141 	dadk_open,
142 	dadk_close,
143 	dadk_ioctl,
144 	dadk_strategy,
145 	dadk_setgeom,
146 	dadk_getgeom,
147 	dadk_iob_alloc,
148 	dadk_iob_free,
149 	dadk_iob_htoc,
150 	dadk_iob_xfer,
151 	dadk_dump,
152 	dadk_getphygeom,
153 	dadk_set_bbhobj,
154 	dadk_check_media,
155 	dadk_inquiry,
156 	dadk_cleanup,
157 	0
158 };
159 
160 /*
161  * Local static data
162  */
163 
164 #ifdef	DADK_DEBUG
165 #define	DENT	0x0001
166 #define	DERR	0x0002
167 #define	DIO	0x0004
168 #define	DGEOM	0x0010
169 #define	DSTATE  0x0020
170 static	int	dadk_debug = DGEOM;
171 
172 #endif	/* DADK_DEBUG */
173 
174 static int dadk_check_media_time = 3000000;	/* 3 Second State Check */
175 static int dadk_dk_maxphys = 0x80000;
176 
177 static char	*dadk_cmds[] = {
178 	"\000Unknown",			/* unknown 		*/
179 	"\001read sector",		/* DCMD_READ 1		*/
180 	"\002write sector",		/* DCMD_WRITE 2		*/
181 	"\003format track",		/* DCMD_FMTTRK 3	*/
182 	"\004format whole drive",	/* DCMD_FMTDRV 4	*/
183 	"\005recalibrate",		/* DCMD_RECAL  5	*/
184 	"\006seek sector",		/* DCMD_SEEK   6	*/
185 	"\007read verify",		/* DCMD_RDVER  7	*/
186 	"\010read defect list",		/* DCMD_GETDEF 8	*/
187 	"\011lock door",		/* DCMD_LOCK   9	*/
188 	"\012unlock door",		/* DCMD_UNLOCK 10	*/
189 	"\013start motor",		/* DCMD_START_MOTOR 11	*/
190 	"\014stop motor",		/* DCMD_STOP_MOTOR 12	*/
191 	"\015eject",			/* DCMD_EJECT  13	*/
192 	"\016update geometry",		/* DCMD_UPDATE_GEOM  14	*/
193 	"\017get state",		/* DCMD_GET_STATE  15	*/
194 	"\020cdrom pause",		/* DCMD_PAUSE  16	*/
195 	"\021cdrom resume",		/* DCMD_RESUME  17	*/
196 	"\022cdrom play track index",	/* DCMD_PLAYTRKIND  18	*/
197 	"\023cdrom play msf",		/* DCMD_PLAYMSF  19	*/
198 	"\024cdrom sub channel",	/* DCMD_SUBCHNL  20	*/
199 	"\025cdrom read mode 1",	/* DCMD_READMODE1  21	*/
200 	"\026cdrom read toc header",	/* DCMD_READTOCHDR  22	*/
201 	"\027cdrom read toc entry",	/* DCMD_READTOCENT  23	*/
202 	"\030cdrom read offset",	/* DCMD_READOFFSET  24	*/
203 	"\031cdrom read mode 2",	/* DCMD_READMODE2  25	*/
204 	"\032cdrom volume control",	/* DCMD_VOLCTRL  26	*/
205 	"\033flush cache",		/* DCMD_FLUSH_CACHE  27	*/
206 	NULL
207 };
208 
209 static char *dadk_sense[] = {
210 	"\000Success",			/* DERR_SUCCESS		*/
211 	"\001address mark not found",	/* DERR_AMNF		*/
212 	"\002track 0 not found",	/* DERR_TKONF		*/
213 	"\003aborted command",		/* DERR_ABORT		*/
214 	"\004write fault",		/* DERR_DWF		*/
215 	"\005ID not found",		/* DERR_IDNF		*/
216 	"\006drive busy",		/* DERR_BUSY		*/
217 	"\007uncorrectable data error",	/* DERR_UNC		*/
218 	"\010bad block detected",	/* DERR_BBK		*/
219 	"\011invalid command",		/* DERR_INVCDB		*/
220 	"\012device hard error",	/* DERR_HARD		*/
221 	"\013illegal length indicated", /* DERR_ILI		*/
222 	"\014end of media",		/* DERR_EOM		*/
223 	"\015media change requested",	/* DERR_MCR		*/
224 	"\016recovered from error",	/* DERR_RECOVER		*/
225 	"\017device not ready",		/* DERR_NOTREADY	*/
226 	"\020medium error",		/* DERR_MEDIUM		*/
227 	"\021hardware error",		/* DERR_HW		*/
228 	"\022illegal request",		/* DERR_ILL		*/
229 	"\023unit attention",		/* DERR_UNIT_ATTN	*/
230 	"\024data protection",		/* DERR_DATA_PROT	*/
231 	"\025miscompare",		/* DERR_MISCOMPARE	*/
232 	"\026ICRC error during UDMA",	/* DERR_ICRC		*/
233 	"\027reserved",			/* DERR_RESV		*/
234 	NULL
235 };
236 
237 static char *dadk_name = "Disk";
238 
239 /*
240  *	This is the loadable module wrapper
241  */
242 #include <sys/modctl.h>
243 
244 extern struct mod_ops mod_miscops;
245 
246 static struct modlmisc modlmisc = {
247 	&mod_miscops,	/* Type of module */
248 	"Direct Attached Disk"
249 };
250 
251 static struct modlinkage modlinkage = {
252 	MODREV_1, (void *)&modlmisc, NULL
253 };
254 
255 int
256 _init(void)
257 {
258 #ifdef DADK_DEBUG
259 	if (dadk_debug & DENT)
260 		PRF("dadk_init: call\n");
261 #endif
262 
263 #if defined(__x86)
264 	/* set the max physical address for iob allocs on x86 */
265 	dadk_alloc_attr.dma_attr_addr_hi = dadk_max_phys_addr;
266 
267 	/*
268 	 * set the sgllen for iob allocs on x86. If this is set less than
269 	 * the number of pages the buffer will take (taking into account
270 	 * alignment), it would force the allocator to try and allocate
271 	 * contiguous pages.
272 	 */
273 	dadk_alloc_attr.dma_attr_sgllen = dadk_sgl_size;
274 #endif
275 
276 	return (mod_install(&modlinkage));
277 }
278 
279 int
280 _fini(void)
281 {
282 #ifdef DADK_DEBUG
283 	if (dadk_debug & DENT)
284 		PRF("dadk_fini: call\n");
285 #endif
286 
287 	return (mod_remove(&modlinkage));
288 }
289 
290 int
291 _info(struct modinfo *modinfop)
292 {
293 	return (mod_info(&modlinkage, modinfop));
294 }
295 
296 struct tgdk_obj *
297 dadk_create()
298 {
299 	struct tgdk_obj *dkobjp;
300 	struct dadk *dadkp;
301 
302 	dkobjp = kmem_zalloc((sizeof (*dkobjp) + sizeof (*dadkp)), KM_NOSLEEP);
303 	if (!dkobjp)
304 		return (NULL);
305 	dadkp = (struct dadk *)(dkobjp+1);
306 
307 	dkobjp->tg_ops  = (struct  tgdk_objops *)&dadk_ops;
308 	dkobjp->tg_data = (opaque_t)dadkp;
309 	dkobjp->tg_ext = &(dkobjp->tg_extblk);
310 	dadkp->dad_extp = &(dkobjp->tg_extblk);
311 
312 #ifdef DADK_DEBUG
313 	if (dadk_debug & DENT)
314 		PRF("dadk_create: tgdkobjp= 0x%x dadkp= 0x%x\n", dkobjp, dadkp);
315 #endif
316 	return (dkobjp);
317 }
318 
319 int
320 dadk_init(opaque_t objp, opaque_t devp, opaque_t flcobjp, opaque_t queobjp,
321 	opaque_t bbhobjp, void *lkarg)
322 {
323 	struct dadk *dadkp = (struct dadk *)objp;
324 	struct scsi_device *sdevp = (struct scsi_device *)devp;
325 
326 	dadkp->dad_sd = devp;
327 	dadkp->dad_ctlobjp = (opaque_t)sdevp->sd_address.a_hba_tran;
328 	sdevp->sd_private = (caddr_t)dadkp;
329 
330 	/* initialize the communication object */
331 	dadkp->dad_com.com_data = (opaque_t)dadkp;
332 	dadkp->dad_com.com_ops  = &dadk_com_ops;
333 
334 	dadkp->dad_bbhobjp = bbhobjp;
335 	BBH_INIT(bbhobjp);
336 
337 	dadkp->dad_flcobjp = flcobjp;
338 	mutex_init(&dadkp->dad_cmd_mutex, NULL, MUTEX_DRIVER, NULL);
339 	dadkp->dad_cmd_count = 0;
340 	return (FLC_INIT(flcobjp, &(dadkp->dad_com), queobjp, lkarg));
341 }
342 
343 int
344 dadk_free(struct tgdk_obj *dkobjp)
345 {
346 	TGDK_CLEANUP(dkobjp);
347 	kmem_free(dkobjp, (sizeof (*dkobjp) + sizeof (struct dadk)));
348 
349 	return (DDI_SUCCESS);
350 }
351 
352 void
353 dadk_cleanup(struct tgdk_obj *dkobjp)
354 {
355 	struct dadk *dadkp;
356 
357 	dadkp = (struct dadk *)(dkobjp->tg_data);
358 	if (dadkp->dad_sd)
359 		dadkp->dad_sd->sd_private = NULL;
360 	if (dadkp->dad_bbhobjp) {
361 		BBH_FREE(dadkp->dad_bbhobjp);
362 		dadkp->dad_bbhobjp = NULL;
363 	}
364 	if (dadkp->dad_flcobjp) {
365 		FLC_FREE(dadkp->dad_flcobjp);
366 		dadkp->dad_flcobjp = NULL;
367 	}
368 	mutex_destroy(&dadkp->dad_cmd_mutex);
369 }
370 
371 /* ARGSUSED */
372 int
373 dadk_probe(opaque_t objp, int kmsflg)
374 {
375 	struct dadk *dadkp = (struct dadk *)objp;
376 	struct scsi_device *devp;
377 	char   name[80];
378 
379 	devp = dadkp->dad_sd;
380 	if (!devp->sd_inq || (devp->sd_inq->inq_dtype == DTYPE_NOTPRESENT) ||
381 	    (devp->sd_inq->inq_dtype == DTYPE_UNKNOWN)) {
382 		return (DDI_PROBE_FAILURE);
383 	}
384 
385 	switch (devp->sd_inq->inq_dtype) {
386 		case DTYPE_DIRECT:
387 			dadkp->dad_ctype = DKC_DIRECT;
388 			dadkp->dad_extp->tg_nodetype = DDI_NT_BLOCK;
389 			dadkp->dad_extp->tg_ctype = DKC_DIRECT;
390 			break;
391 		case DTYPE_RODIRECT: /* eg cdrom */
392 			dadkp->dad_ctype = DKC_CDROM;
393 			dadkp->dad_extp->tg_rdonly = 1;
394 			dadkp->dad_rdonly = 1;
395 			dadkp->dad_cdrom = 1;
396 			dadkp->dad_extp->tg_nodetype = DDI_NT_CD;
397 			dadkp->dad_extp->tg_ctype = DKC_CDROM;
398 			break;
399 		case DTYPE_WORM:
400 		case DTYPE_OPTICAL:
401 		default:
402 			return (DDI_PROBE_FAILURE);
403 	}
404 
405 	dadkp->dad_extp->tg_rmb = dadkp->dad_rmb = devp->sd_inq->inq_rmb;
406 
407 	dadkp->dad_secshf = SCTRSHFT;
408 	dadkp->dad_blkshf = 0;
409 
410 	/* display the device name */
411 	(void) strcpy(name, "Vendor '");
412 	gda_inqfill((caddr_t)devp->sd_inq->inq_vid, 8, &name[strlen(name)]);
413 	(void) strcat(name, "' Product '");
414 	gda_inqfill((caddr_t)devp->sd_inq->inq_pid, 16, &name[strlen(name)]);
415 	(void) strcat(name, "'");
416 	gda_log(devp->sd_dev, dadk_name, CE_NOTE, "!<%s>\n", name);
417 
418 	return (DDI_PROBE_SUCCESS);
419 }
420 
421 
422 /* ARGSUSED */
423 int
424 dadk_attach(opaque_t objp)
425 {
426 	return (DDI_SUCCESS);
427 }
428 
429 int
430 dadk_set_bbhobj(opaque_t objp, opaque_t bbhobjp)
431 {
432 	struct dadk *dadkp = (struct dadk *)objp;
433 	/* free the old bbh object */
434 	if (dadkp->dad_bbhobjp)
435 		BBH_FREE(dadkp->dad_bbhobjp);
436 
437 	/* initialize the new bbh object */
438 	dadkp->dad_bbhobjp = bbhobjp;
439 	BBH_INIT(bbhobjp);
440 
441 	return (DDI_SUCCESS);
442 }
443 
444 /* ARGSUSED */
445 int
446 dadk_open(opaque_t objp, int flag)
447 {
448 	struct dadk *dadkp = (struct dadk *)objp;
449 	int error;
450 	int wce;
451 
452 	if (!dadkp->dad_rmb) {
453 		if (dadkp->dad_phyg.g_cap) {
454 			FLC_START_KSTAT(dadkp->dad_flcobjp, "disk",
455 			    ddi_get_instance(CTL_DIP_DEV(dadkp->dad_ctlobjp)));
456 			return (DDI_SUCCESS);
457 		}
458 	} else {
459 		mutex_enter(&dadkp->dad_mutex);
460 		dadkp->dad_iostate = DKIO_NONE;
461 		cv_broadcast(&dadkp->dad_state_cv);
462 		mutex_exit(&dadkp->dad_mutex);
463 
464 		if (dadk_rmb_ioctl(dadkp, DCMD_START_MOTOR, 0, 0,
465 		    DADK_SILENT) ||
466 		    dadk_rmb_ioctl(dadkp, DCMD_LOCK, 0, 0, DADK_SILENT) ||
467 		    dadk_rmb_ioctl(dadkp, DCMD_UPDATE_GEOM, 0, 0,
468 		    DADK_SILENT)) {
469 			return (DDI_FAILURE);
470 		}
471 
472 		mutex_enter(&dadkp->dad_mutex);
473 		dadkp->dad_iostate = DKIO_INSERTED;
474 		cv_broadcast(&dadkp->dad_state_cv);
475 		mutex_exit(&dadkp->dad_mutex);
476 	}
477 
478 	/*
479 	 * get write cache enable state
480 	 * If there is an error, must assume that write cache
481 	 * is enabled.
482 	 * NOTE: Since there is currently no Solaris mechanism to
483 	 * change the state of the Write Cache Enable feature,
484 	 * this code just checks the value of the WCE bit
485 	 * obtained at device init time.  If a mechanism
486 	 * is added to the driver to change WCE, dad_wce
487 	 * must be updated appropriately.
488 	 */
489 	error = dadk_ctl_ioctl(dadkp, DIOCTL_GETWCE,
490 	    (uintptr_t)&wce, FKIOCTL | FNATIVE);
491 	mutex_enter(&dadkp->dad_mutex);
492 	dadkp->dad_wce = (error != 0) || (wce != 0);
493 	mutex_exit(&dadkp->dad_mutex);
494 
495 	/* logical disk geometry */
496 	(void) dadk_ctl_ioctl(dadkp, DIOCTL_GETGEOM,
497 	    (uintptr_t)&dadkp->dad_logg, FKIOCTL | FNATIVE);
498 	if (dadkp->dad_logg.g_cap == 0)
499 		return (DDI_FAILURE);
500 
501 	/* get physical disk geometry */
502 	(void) dadk_ctl_ioctl(dadkp, DIOCTL_GETPHYGEOM,
503 	    (uintptr_t)&dadkp->dad_phyg, FKIOCTL | FNATIVE);
504 	if (dadkp->dad_phyg.g_cap == 0)
505 		return (DDI_FAILURE);
506 
507 	dadk_setcap(dadkp);
508 
509 	dadk_create_errstats(dadkp,
510 	    ddi_get_instance(CTL_DIP_DEV(dadkp->dad_ctlobjp)));
511 
512 	/* start profiling */
513 	FLC_START_KSTAT(dadkp->dad_flcobjp, "disk",
514 	    ddi_get_instance(CTL_DIP_DEV(dadkp->dad_ctlobjp)));
515 
516 	return (DDI_SUCCESS);
517 }
518 
519 static void
520 dadk_setcap(struct dadk *dadkp)
521 {
522 	int	 totsize;
523 	int	 i;
524 
525 	totsize = dadkp->dad_phyg.g_secsiz;
526 
527 	if (totsize == 0) {
528 		if (dadkp->dad_cdrom) {
529 			totsize = 2048;
530 		} else {
531 			totsize = NBPSCTR;
532 		}
533 	} else {
534 		/* Round down sector size to multiple of 512B */
535 		totsize &= ~(NBPSCTR-1);
536 	}
537 	dadkp->dad_phyg.g_secsiz = totsize;
538 
539 	/* set sec,block shift factor - (512->0, 1024->1, 2048->2, etc.) */
540 	totsize >>= SCTRSHFT;
541 	for (i = 0; totsize != 1; i++, totsize >>= 1)
542 		;
543 	dadkp->dad_blkshf = i;
544 	dadkp->dad_secshf = i + SCTRSHFT;
545 }
546 
547 
548 static void
549 dadk_create_errstats(struct dadk *dadkp, int instance)
550 {
551 	dadk_errstats_t *dep;
552 	char kstatname[KSTAT_STRLEN];
553 	dadk_ioc_string_t dadk_ioc_string;
554 
555 	if (dadkp->dad_errstats)
556 		return;
557 
558 	(void) sprintf(kstatname, "cmdk%d,error", instance);
559 	dadkp->dad_errstats = kstat_create("cmdkerror", instance,
560 	    kstatname, "device_error", KSTAT_TYPE_NAMED,
561 	    sizeof (dadk_errstats_t) / sizeof (kstat_named_t),
562 	    KSTAT_FLAG_PERSISTENT);
563 
564 	if (!dadkp->dad_errstats)
565 		return;
566 
567 	dep = (dadk_errstats_t *)dadkp->dad_errstats->ks_data;
568 
569 	kstat_named_init(&dep->dadk_softerrs,
570 	    "Soft Errors", KSTAT_DATA_UINT32);
571 	kstat_named_init(&dep->dadk_harderrs,
572 	    "Hard Errors", KSTAT_DATA_UINT32);
573 	kstat_named_init(&dep->dadk_transerrs,
574 	    "Transport Errors", KSTAT_DATA_UINT32);
575 	kstat_named_init(&dep->dadk_model,
576 	    "Model", KSTAT_DATA_CHAR);
577 	kstat_named_init(&dep->dadk_revision,
578 	    "Revision", KSTAT_DATA_CHAR);
579 	kstat_named_init(&dep->dadk_serial,
580 	    "Serial No", KSTAT_DATA_CHAR);
581 	kstat_named_init(&dep->dadk_capacity,
582 	    "Size", KSTAT_DATA_ULONGLONG);
583 	kstat_named_init(&dep->dadk_rq_media_err,
584 	    "Media Error", KSTAT_DATA_UINT32);
585 	kstat_named_init(&dep->dadk_rq_ntrdy_err,
586 	    "Device Not Ready", KSTAT_DATA_UINT32);
587 	kstat_named_init(&dep->dadk_rq_nodev_err,
588 	    "No Device", KSTAT_DATA_UINT32);
589 	kstat_named_init(&dep->dadk_rq_recov_err,
590 	    "Recoverable", KSTAT_DATA_UINT32);
591 	kstat_named_init(&dep->dadk_rq_illrq_err,
592 	    "Illegal Request", KSTAT_DATA_UINT32);
593 
594 	dadkp->dad_errstats->ks_private = dep;
595 	dadkp->dad_errstats->ks_update = nulldev;
596 	kstat_install(dadkp->dad_errstats);
597 
598 	/* get model */
599 	dep->dadk_model.value.c[0] = 0;
600 	dadk_ioc_string.is_buf = &dep->dadk_model.value.c[0];
601 	dadk_ioc_string.is_size = sizeof (dep->dadk_model.value.c);
602 	(void) dadk_ctl_ioctl(dadkp, DIOCTL_GETMODEL,
603 	    (uintptr_t)&dadk_ioc_string, FKIOCTL | FNATIVE);
604 
605 	/* get serial */
606 	dep->dadk_serial.value.c[0] = 0;
607 	dadk_ioc_string.is_buf = &dep->dadk_serial.value.c[0];
608 	dadk_ioc_string.is_size = sizeof (dep->dadk_serial.value.c);
609 	(void) dadk_ctl_ioctl(dadkp, DIOCTL_GETSERIAL,
610 	    (uintptr_t)&dadk_ioc_string, FKIOCTL | FNATIVE);
611 
612 	/* Get revision */
613 	dep->dadk_revision.value.c[0] = 0;
614 
615 	/* Get capacity */
616 
617 	dep->dadk_capacity.value.ui64 =
618 	    (uint64_t)dadkp->dad_logg.g_cap *
619 	    (uint64_t)dadkp->dad_logg.g_secsiz;
620 }
621 
622 
623 int
624 dadk_close(opaque_t objp)
625 {
626 	struct dadk *dadkp = (struct dadk *)objp;
627 
628 	if (dadkp->dad_rmb) {
629 		(void) dadk_rmb_ioctl(dadkp, DCMD_STOP_MOTOR, 0, 0,
630 		    DADK_SILENT);
631 		(void) dadk_rmb_ioctl(dadkp, DCMD_UNLOCK, 0, 0, DADK_SILENT);
632 	}
633 	FLC_STOP_KSTAT(dadkp->dad_flcobjp);
634 
635 	dadk_destroy_errstats(dadkp);
636 
637 	return (DDI_SUCCESS);
638 }
639 
640 static void
641 dadk_destroy_errstats(struct dadk *dadkp)
642 {
643 	if (!dadkp->dad_errstats)
644 		return;
645 
646 	kstat_delete(dadkp->dad_errstats);
647 	dadkp->dad_errstats = NULL;
648 }
649 
650 
651 int
652 dadk_strategy(opaque_t objp, struct buf *bp)
653 {
654 	struct dadk *dadkp = (struct dadk *)objp;
655 
656 	if (dadkp->dad_rdonly && !(bp->b_flags & B_READ)) {
657 		bioerror(bp, EROFS);
658 		return (DDI_FAILURE);
659 	}
660 
661 	if (bp->b_bcount & (dadkp->DAD_SECSIZ-1)) {
662 		bioerror(bp, ENXIO);
663 		return (DDI_FAILURE);
664 	}
665 
666 	SET_BP_SEC(bp, (LBLK2SEC(GET_BP_SEC(bp), dadkp->dad_blkshf)));
667 	mutex_enter(&dadkp->dad_cmd_mutex);
668 	dadkp->dad_cmd_count++;
669 	mutex_exit(&dadkp->dad_cmd_mutex);
670 	FLC_ENQUE(dadkp->dad_flcobjp, bp);
671 
672 	return (DDI_SUCCESS);
673 }
674 
675 int
676 dadk_dump(opaque_t objp, struct buf *bp)
677 {
678 	struct dadk *dadkp = (struct dadk *)objp;
679 	struct cmpkt *pktp;
680 
681 	if (dadkp->dad_rdonly) {
682 		bioerror(bp, EROFS);
683 		return (DDI_FAILURE);
684 	}
685 
686 	if (bp->b_bcount & (dadkp->DAD_SECSIZ-1)) {
687 		bioerror(bp, ENXIO);
688 		return (DDI_FAILURE);
689 	}
690 
691 	SET_BP_SEC(bp, (LBLK2SEC(GET_BP_SEC(bp), dadkp->dad_blkshf)));
692 
693 	pktp = dadk_pktprep(dadkp, NULL, bp, dadk_polldone, NULL, NULL);
694 	if (!pktp) {
695 		cmn_err(CE_WARN, "no resources for dumping");
696 		bioerror(bp, EIO);
697 		return (DDI_FAILURE);
698 	}
699 	pktp->cp_flags |= CPF_NOINTR;
700 
701 	(void) dadk_ioprep(dadkp, pktp);
702 	dadk_transport(dadkp, bp);
703 	pktp->cp_byteleft -= pktp->cp_bytexfer;
704 
705 	while (geterror(bp) == 0 && pktp->cp_byteleft != 0) {
706 		(void) dadk_iosetup(dadkp, pktp);
707 		dadk_transport(dadkp, bp);
708 		pktp->cp_byteleft -= pktp->cp_bytexfer;
709 	}
710 
711 	if (pktp->cp_private)
712 		BBH_FREEHANDLE(dadkp->dad_bbhobjp, pktp->cp_private);
713 	gda_free(dadkp->dad_ctlobjp, pktp, NULL);
714 	return (DDI_SUCCESS);
715 }
716 
717 /* ARGSUSED  */
718 int
719 dadk_ioctl(opaque_t objp, dev_t dev, int cmd, intptr_t arg, int flag,
720 	cred_t *cred_p, int *rval_p)
721 {
722 	struct dadk *dadkp = (struct dadk *)objp;
723 
724 	switch (cmd) {
725 	case DKIOCGETDEF:
726 		{
727 		struct buf	*bp;
728 		int		err, head;
729 		unsigned char	*secbuf;
730 		STRUCT_DECL(defect_header, adh);
731 
732 		STRUCT_INIT(adh, flag & FMODELS);
733 
734 		/*
735 		 * copyin header ....
736 		 * yields head number and buffer address
737 		 */
738 		if (ddi_copyin((caddr_t)arg, STRUCT_BUF(adh), STRUCT_SIZE(adh),
739 		    flag))
740 			return (EFAULT);
741 		head = STRUCT_FGET(adh, head);
742 		if (head < 0 || head >= dadkp->dad_phyg.g_head)
743 			return (ENXIO);
744 		secbuf = kmem_zalloc(NBPSCTR, KM_SLEEP);
745 		if (!secbuf)
746 			return (ENOMEM);
747 		bp = getrbuf(KM_SLEEP);
748 		if (!bp) {
749 			kmem_free(secbuf, NBPSCTR);
750 			return (ENOMEM);
751 		}
752 
753 		bp->b_edev = dev;
754 		bp->b_dev  = cmpdev(dev);
755 		bp->b_flags = B_BUSY;
756 		bp->b_resid = 0;
757 		bp->b_bcount = NBPSCTR;
758 		bp->b_un.b_addr = (caddr_t)secbuf;
759 		bp->b_blkno = head; /* I had to put it somwhere! */
760 		bp->b_forw = (struct buf *)dadkp;
761 		bp->b_back = (struct buf *)DCMD_GETDEF;
762 
763 		mutex_enter(&dadkp->dad_cmd_mutex);
764 		dadkp->dad_cmd_count++;
765 		mutex_exit(&dadkp->dad_cmd_mutex);
766 		FLC_ENQUE(dadkp->dad_flcobjp, bp);
767 		err = biowait(bp);
768 		if (!err) {
769 			if (ddi_copyout((caddr_t)secbuf,
770 			    STRUCT_FGETP(adh, buffer), NBPSCTR, flag))
771 				err = ENXIO;
772 		}
773 		kmem_free(secbuf, NBPSCTR);
774 		freerbuf(bp);
775 		return (err);
776 		}
777 	case DIOCTL_RWCMD:
778 		{
779 		struct dadkio_rwcmd *rwcmdp;
780 		int status, rw;
781 
782 		/*
783 		 * copied in by cmdk and, if necessary, converted to the
784 		 * correct datamodel
785 		 */
786 		rwcmdp = (struct dadkio_rwcmd *)(intptr_t)arg;
787 
788 		/*
789 		 * handle the complex cases here; we pass these
790 		 * through to the driver, which will queue them and
791 		 * handle the requests asynchronously.  The simpler
792 		 * cases ,which can return immediately, fail here, and
793 		 * the request reverts to the dadk_ioctl routine, while
794 		 *  will reroute them directly to the ata driver.
795 		 */
796 		switch (rwcmdp->cmd) {
797 			case DADKIO_RWCMD_READ :
798 				/*FALLTHROUGH*/
799 			case DADKIO_RWCMD_WRITE:
800 				rw = ((rwcmdp->cmd == DADKIO_RWCMD_WRITE) ?
801 				    B_WRITE : B_READ);
802 				status = dadk_dk_buf_setup(dadkp,
803 				    (opaque_t)rwcmdp, dev, ((flag &FKIOCTL) ?
804 				    UIO_SYSSPACE : UIO_USERSPACE), rw);
805 				return (status);
806 			default:
807 				return (EINVAL);
808 			}
809 		}
810 	case DKIOC_UPDATEFW:
811 
812 		/*
813 		 * Require PRIV_ALL privilege to invoke DKIOC_UPDATEFW
814 		 * to protect the firmware update from malicious use
815 		 */
816 		if (PRIV_POLICY(cred_p, PRIV_ALL, B_FALSE, EPERM, NULL) != 0)
817 			return (EPERM);
818 		else
819 			return (dadk_ctl_ioctl(dadkp, cmd, arg, flag));
820 
821 	case DKIOCFLUSHWRITECACHE:
822 		{
823 			struct buf *bp;
824 			int err = 0;
825 			struct dk_callback *dkc = (struct dk_callback *)arg;
826 			struct cmpkt *pktp;
827 			int is_sync = 1;
828 
829 			mutex_enter(&dadkp->dad_mutex);
830 			if (dadkp->dad_noflush || !  dadkp->dad_wce) {
831 				err = dadkp->dad_noflush ? ENOTSUP : 0;
832 				mutex_exit(&dadkp->dad_mutex);
833 				/*
834 				 * If a callback was requested: a
835 				 * callback will always be done if the
836 				 * caller saw the DKIOCFLUSHWRITECACHE
837 				 * ioctl return 0, and never done if the
838 				 * caller saw the ioctl return an error.
839 				 */
840 				if ((flag & FKIOCTL) && dkc != NULL &&
841 				    dkc->dkc_callback != NULL) {
842 					(*dkc->dkc_callback)(dkc->dkc_cookie,
843 					    err);
844 					/*
845 					 * Did callback and reported error.
846 					 * Since we did a callback, ioctl
847 					 * should return 0.
848 					 */
849 					err = 0;
850 				}
851 				return (err);
852 			}
853 			mutex_exit(&dadkp->dad_mutex);
854 
855 			bp = getrbuf(KM_SLEEP);
856 
857 			bp->b_edev = dev;
858 			bp->b_dev  = cmpdev(dev);
859 			bp->b_flags = B_BUSY;
860 			bp->b_resid = 0;
861 			bp->b_bcount = 0;
862 			SET_BP_SEC(bp, 0);
863 
864 			if ((flag & FKIOCTL) && dkc != NULL &&
865 			    dkc->dkc_callback != NULL) {
866 				struct dk_callback *dkc2 =
867 				    (struct dk_callback *)kmem_zalloc(
868 				    sizeof (struct dk_callback), KM_SLEEP);
869 
870 				bcopy(dkc, dkc2, sizeof (*dkc2));
871 				/*
872 				 * Borrow b_list to carry private data
873 				 * to the b_iodone func.
874 				 */
875 				bp->b_list = (struct buf *)dkc2;
876 				bp->b_iodone = dadk_flushdone;
877 				is_sync = 0;
878 			}
879 
880 			/*
881 			 * Setup command pkt
882 			 * dadk_pktprep() can't fail since DDI_DMA_SLEEP set
883 			 */
884 			pktp = dadk_pktprep(dadkp, NULL, bp,
885 			    dadk_iodone, DDI_DMA_SLEEP, NULL);
886 
887 			pktp->cp_time = DADK_FLUSH_CACHE_TIME;
888 
889 			*((char *)(pktp->cp_cdbp)) = DCMD_FLUSH_CACHE;
890 			pktp->cp_byteleft = 0;
891 			pktp->cp_private = NULL;
892 			pktp->cp_secleft = 0;
893 			pktp->cp_srtsec = -1;
894 			pktp->cp_bytexfer = 0;
895 
896 			CTL_IOSETUP(dadkp->dad_ctlobjp, pktp);
897 
898 			mutex_enter(&dadkp->dad_cmd_mutex);
899 			dadkp->dad_cmd_count++;
900 			mutex_exit(&dadkp->dad_cmd_mutex);
901 			FLC_ENQUE(dadkp->dad_flcobjp, bp);
902 
903 			if (is_sync) {
904 				err = biowait(bp);
905 				freerbuf(bp);
906 			}
907 			return (err);
908 		}
909 	default:
910 		if (!dadkp->dad_rmb)
911 			return (dadk_ctl_ioctl(dadkp, cmd, arg, flag));
912 	}
913 
914 	switch (cmd) {
915 	case CDROMSTOP:
916 		return (dadk_rmb_ioctl(dadkp, DCMD_STOP_MOTOR, 0,
917 		    0, DADK_SILENT));
918 	case CDROMSTART:
919 		return (dadk_rmb_ioctl(dadkp, DCMD_START_MOTOR, 0,
920 		    0, DADK_SILENT));
921 	case DKIOCLOCK:
922 		return (dadk_rmb_ioctl(dadkp, DCMD_LOCK, 0, 0, DADK_SILENT));
923 	case DKIOCUNLOCK:
924 		return (dadk_rmb_ioctl(dadkp, DCMD_UNLOCK, 0, 0, DADK_SILENT));
925 	case DKIOCEJECT:
926 	case CDROMEJECT:
927 		{
928 			int ret;
929 
930 			if (ret = dadk_rmb_ioctl(dadkp, DCMD_UNLOCK, 0, 0,
931 			    DADK_SILENT)) {
932 				return (ret);
933 			}
934 			if (ret = dadk_rmb_ioctl(dadkp, DCMD_EJECT, 0, 0,
935 			    DADK_SILENT)) {
936 				return (ret);
937 			}
938 			mutex_enter(&dadkp->dad_mutex);
939 			dadkp->dad_iostate = DKIO_EJECTED;
940 			cv_broadcast(&dadkp->dad_state_cv);
941 			mutex_exit(&dadkp->dad_mutex);
942 
943 			return (0);
944 
945 		}
946 	default:
947 		return (ENOTTY);
948 	/*
949 	 * cdrom audio commands
950 	 */
951 	case CDROMPAUSE:
952 		cmd = DCMD_PAUSE;
953 		break;
954 	case CDROMRESUME:
955 		cmd = DCMD_RESUME;
956 		break;
957 	case CDROMPLAYMSF:
958 		cmd = DCMD_PLAYMSF;
959 		break;
960 	case CDROMPLAYTRKIND:
961 		cmd = DCMD_PLAYTRKIND;
962 		break;
963 	case CDROMREADTOCHDR:
964 		cmd = DCMD_READTOCHDR;
965 		break;
966 	case CDROMREADTOCENTRY:
967 		cmd = DCMD_READTOCENT;
968 		break;
969 	case CDROMVOLCTRL:
970 		cmd = DCMD_VOLCTRL;
971 		break;
972 	case CDROMSUBCHNL:
973 		cmd = DCMD_SUBCHNL;
974 		break;
975 	case CDROMREADMODE2:
976 		cmd = DCMD_READMODE2;
977 		break;
978 	case CDROMREADMODE1:
979 		cmd = DCMD_READMODE1;
980 		break;
981 	case CDROMREADOFFSET:
982 		cmd = DCMD_READOFFSET;
983 		break;
984 	}
985 	return (dadk_rmb_ioctl(dadkp, cmd, arg, flag, 0));
986 }
987 
988 int
989 dadk_flushdone(struct buf *bp)
990 {
991 	struct dk_callback *dkc = (struct dk_callback *)bp->b_list;
992 
993 	ASSERT(dkc != NULL && dkc->dkc_callback != NULL);
994 
995 	(*dkc->dkc_callback)(dkc->dkc_cookie, geterror(bp));
996 
997 	kmem_free(dkc, sizeof (*dkc));
998 	freerbuf(bp);
999 	return (0);
1000 }
1001 
1002 int
1003 dadk_getphygeom(opaque_t objp, struct tgdk_geom *dkgeom_p)
1004 {
1005 	struct dadk *dadkp = (struct dadk *)objp;
1006 
1007 	bcopy((caddr_t)&dadkp->dad_phyg, (caddr_t)dkgeom_p,
1008 	    sizeof (struct tgdk_geom));
1009 	return (DDI_SUCCESS);
1010 }
1011 
1012 int
1013 dadk_getgeom(opaque_t objp, struct tgdk_geom *dkgeom_p)
1014 {
1015 	struct dadk *dadkp = (struct dadk *)objp;
1016 	bcopy((caddr_t)&dadkp->dad_logg, (caddr_t)dkgeom_p,
1017 	    sizeof (struct tgdk_geom));
1018 	return (DDI_SUCCESS);
1019 }
1020 
1021 int
1022 dadk_setgeom(opaque_t objp, struct tgdk_geom *dkgeom_p)
1023 {
1024 	struct dadk *dadkp = (struct dadk *)objp;
1025 
1026 	dadkp->dad_logg.g_cyl = dkgeom_p->g_cyl;
1027 	dadkp->dad_logg.g_head = dkgeom_p->g_head;
1028 	dadkp->dad_logg.g_sec = dkgeom_p->g_sec;
1029 	dadkp->dad_logg.g_cap = dkgeom_p->g_cap;
1030 	return (DDI_SUCCESS);
1031 }
1032 
1033 
1034 tgdk_iob_handle
1035 dadk_iob_alloc(opaque_t objp, daddr_t blkno, ssize_t xfer, int kmsflg)
1036 {
1037 	struct dadk *dadkp = (struct dadk *)objp;
1038 	struct buf *bp;
1039 	struct tgdk_iob *iobp;
1040 	size_t rlen;
1041 
1042 	iobp = kmem_zalloc(sizeof (*iobp), kmsflg);
1043 	if (iobp == NULL)
1044 		return (NULL);
1045 	if ((bp = getrbuf(kmsflg)) == NULL) {
1046 		kmem_free(iobp, sizeof (*iobp));
1047 		return (NULL);
1048 	}
1049 
1050 	iobp->b_psec  = LBLK2SEC(blkno, dadkp->dad_blkshf);
1051 	iobp->b_pbyteoff = (blkno & ((1<<dadkp->dad_blkshf) - 1)) << SCTRSHFT;
1052 	iobp->b_pbytecnt = ((iobp->b_pbyteoff + xfer + dadkp->DAD_SECSIZ - 1)
1053 	    >> dadkp->dad_secshf) << dadkp->dad_secshf;
1054 
1055 	bp->b_un.b_addr = 0;
1056 	/*
1057 	 * use i_ddi_mem_alloc() for now until we have an interface to allocate
1058 	 * memory for DMA which doesn't require a DMA handle. ddi_iopb_alloc()
1059 	 * is obsolete and we want more flexibility in controlling the DMA
1060 	 * address constraints..
1061 	 */
1062 	if (i_ddi_mem_alloc((dadkp->dad_sd)->sd_dev, &dadk_alloc_attr,
1063 	    (size_t)iobp->b_pbytecnt, ((kmsflg == KM_SLEEP) ? 1 : 0), 0, NULL,
1064 	    &bp->b_un.b_addr, &rlen, NULL) != DDI_SUCCESS) {
1065 		freerbuf(bp);
1066 		kmem_free(iobp, sizeof (*iobp));
1067 		return (NULL);
1068 	}
1069 	iobp->b_flag |= IOB_BPALLOC | IOB_BPBUFALLOC;
1070 	iobp->b_bp = bp;
1071 	iobp->b_lblk = blkno;
1072 	iobp->b_xfer = xfer;
1073 	iobp->b_lblk = blkno;
1074 	iobp->b_xfer = xfer;
1075 	return (iobp);
1076 }
1077 
1078 /* ARGSUSED */
1079 int
1080 dadk_iob_free(opaque_t objp, struct tgdk_iob *iobp)
1081 {
1082 	struct buf *bp;
1083 
1084 	if (iobp) {
1085 		if (iobp->b_bp && (iobp->b_flag & IOB_BPALLOC)) {
1086 			bp = iobp->b_bp;
1087 			if (bp->b_un.b_addr && (iobp->b_flag & IOB_BPBUFALLOC))
1088 				i_ddi_mem_free((caddr_t)bp->b_un.b_addr, NULL);
1089 			freerbuf(bp);
1090 		}
1091 		kmem_free(iobp, sizeof (*iobp));
1092 	}
1093 	return (DDI_SUCCESS);
1094 }
1095 
1096 /* ARGSUSED */
1097 caddr_t
1098 dadk_iob_htoc(opaque_t objp, struct tgdk_iob *iobp)
1099 {
1100 	return (iobp->b_bp->b_un.b_addr+iobp->b_pbyteoff);
1101 }
1102 
1103 
1104 caddr_t
1105 dadk_iob_xfer(opaque_t objp, struct tgdk_iob *iobp, int rw)
1106 {
1107 	struct dadk	*dadkp = (struct dadk *)objp;
1108 	struct buf	*bp;
1109 	int		err;
1110 
1111 	bp = iobp->b_bp;
1112 	if (dadkp->dad_rdonly && !(rw & B_READ)) {
1113 		bioerror(bp, EROFS);
1114 		return (NULL);
1115 	}
1116 
1117 	bp->b_flags |= (B_BUSY | rw);
1118 	bp->b_bcount = iobp->b_pbytecnt;
1119 	SET_BP_SEC(bp, iobp->b_psec);
1120 	bp->av_back = (struct buf *)0;
1121 	bp->b_resid = 0;
1122 
1123 	/* call flow control */
1124 	mutex_enter(&dadkp->dad_cmd_mutex);
1125 	dadkp->dad_cmd_count++;
1126 	mutex_exit(&dadkp->dad_cmd_mutex);
1127 	FLC_ENQUE(dadkp->dad_flcobjp, bp);
1128 	err = biowait(bp);
1129 
1130 	bp->b_bcount = iobp->b_xfer;
1131 	bp->b_flags &= ~(B_DONE|B_BUSY);
1132 
1133 	if (err)
1134 		return (NULL);
1135 
1136 	return (bp->b_un.b_addr+iobp->b_pbyteoff);
1137 }
1138 
1139 static void
1140 dadk_transport(opaque_t com_data, struct buf *bp)
1141 {
1142 	struct dadk *dadkp = (struct dadk *)com_data;
1143 
1144 	if (CTL_TRANSPORT(dadkp->dad_ctlobjp, GDA_BP_PKT(bp)) ==
1145 	    CTL_SEND_SUCCESS)
1146 		return;
1147 	dadk_restart((void*)GDA_BP_PKT(bp));
1148 }
1149 
1150 static int
1151 dadk_pkt(opaque_t com_data, struct buf *bp, int (*func)(caddr_t), caddr_t arg)
1152 {
1153 	struct cmpkt *pktp;
1154 	struct dadk *dadkp = (struct dadk *)com_data;
1155 
1156 	if (GDA_BP_PKT(bp))
1157 		return (DDI_SUCCESS);
1158 
1159 	pktp = dadk_pktprep(dadkp, NULL, bp, dadk_iodone, func, arg);
1160 	if (!pktp)
1161 		return (DDI_FAILURE);
1162 
1163 	return (dadk_ioprep(dadkp, pktp));
1164 }
1165 
1166 /*
1167  * Read, Write preparation
1168  */
1169 static int
1170 dadk_ioprep(struct dadk *dadkp, struct cmpkt *pktp)
1171 {
1172 	struct buf *bp;
1173 
1174 	bp = pktp->cp_bp;
1175 	if (bp->b_forw == (struct buf *)dadkp)
1176 		*((char *)(pktp->cp_cdbp)) = (char)(intptr_t)bp->b_back;
1177 
1178 	else if (bp->b_flags & B_READ)
1179 		*((char *)(pktp->cp_cdbp)) = DCMD_READ;
1180 	else
1181 		*((char *)(pktp->cp_cdbp)) = DCMD_WRITE;
1182 	pktp->cp_byteleft = bp->b_bcount;
1183 
1184 	/* setup the bad block list handle */
1185 	pktp->cp_private = BBH_GETHANDLE(dadkp->dad_bbhobjp, bp);
1186 	return (dadk_iosetup(dadkp, pktp));
1187 }
1188 
1189 static int
1190 dadk_iosetup(struct dadk *dadkp, struct cmpkt *pktp)
1191 {
1192 	struct buf	*bp;
1193 	bbh_cookie_t	bbhckp;
1194 	int		seccnt;
1195 
1196 	seccnt = pktp->cp_bytexfer >> dadkp->dad_secshf;
1197 	pktp->cp_secleft -= seccnt;
1198 
1199 	if (pktp->cp_secleft) {
1200 		pktp->cp_srtsec += seccnt;
1201 	} else {
1202 		/* get the first cookie from the bad block list */
1203 		if (!pktp->cp_private) {
1204 			bp = pktp->cp_bp;
1205 			pktp->cp_srtsec  = GET_BP_SEC(bp);
1206 			pktp->cp_secleft = (bp->b_bcount >> dadkp->dad_secshf);
1207 		} else {
1208 			bbhckp = BBH_HTOC(dadkp->dad_bbhobjp,
1209 			    pktp->cp_private);
1210 			pktp->cp_srtsec = BBH_GETCK_SECTOR(dadkp->dad_bbhobjp,
1211 			    bbhckp);
1212 			pktp->cp_secleft = BBH_GETCK_SECLEN(dadkp->dad_bbhobjp,
1213 			    bbhckp);
1214 		}
1215 	}
1216 
1217 	pktp->cp_bytexfer = pktp->cp_secleft << dadkp->dad_secshf;
1218 
1219 	if (CTL_IOSETUP(dadkp->dad_ctlobjp, pktp)) {
1220 		return (DDI_SUCCESS);
1221 	} else {
1222 		return (DDI_FAILURE);
1223 	}
1224 
1225 
1226 
1227 
1228 }
1229 
1230 static struct cmpkt *
1231 dadk_pktprep(struct dadk *dadkp, struct cmpkt *in_pktp, struct buf *bp,
1232     void (*cb_func)(struct buf *), int (*func)(caddr_t), caddr_t arg)
1233 {
1234 	struct cmpkt *pktp;
1235 
1236 	pktp = gda_pktprep(dadkp->dad_ctlobjp, in_pktp, (opaque_t)bp, func,
1237 	    arg);
1238 
1239 	if (pktp) {
1240 		pktp->cp_callback = dadk_pktcb;
1241 		pktp->cp_time = DADK_IO_TIME;
1242 		pktp->cp_flags = 0;
1243 		pktp->cp_iodone = cb_func;
1244 		pktp->cp_dev_private = (opaque_t)dadkp;
1245 
1246 	}
1247 
1248 	return (pktp);
1249 }
1250 
1251 
1252 static void
1253 dadk_restart(void *vpktp)
1254 {
1255 	struct cmpkt *pktp = (struct cmpkt *)vpktp;
1256 
1257 	if (dadk_ioretry(pktp, QUE_COMMAND) == JUST_RETURN)
1258 		return;
1259 	pktp->cp_iodone(pktp->cp_bp);
1260 }
1261 
1262 static int
1263 dadk_ioretry(struct cmpkt *pktp, int action)
1264 {
1265 	struct buf *bp;
1266 	struct dadk *dadkp = PKT2DADK(pktp);
1267 
1268 	switch (action) {
1269 	case QUE_COMMAND:
1270 		if (pktp->cp_retry++ < DADK_RETRY_COUNT) {
1271 			CTL_IOSETUP(dadkp->dad_ctlobjp, pktp);
1272 			if (CTL_TRANSPORT(dadkp->dad_ctlobjp, pktp) ==
1273 			    CTL_SEND_SUCCESS) {
1274 				return (JUST_RETURN);
1275 			}
1276 			gda_log(dadkp->dad_sd->sd_dev, dadk_name,
1277 			    CE_WARN, "transport of command fails\n");
1278 		} else
1279 			gda_log(dadkp->dad_sd->sd_dev,
1280 			    dadk_name, CE_WARN,
1281 			    "exceeds maximum number of retries\n");
1282 		bioerror(pktp->cp_bp, ENXIO);
1283 		/*FALLTHROUGH*/
1284 	case COMMAND_DONE_ERROR:
1285 		bp = pktp->cp_bp;
1286 		bp->b_resid += pktp->cp_byteleft - pktp->cp_bytexfer +
1287 		    pktp->cp_resid;
1288 		if (geterror(bp) == 0) {
1289 			if ((*((char *)(pktp->cp_cdbp)) == DCMD_FLUSH_CACHE) &&
1290 			    (pktp->cp_dev_private == (opaque_t)dadkp) &&
1291 			    ((int)(*(char *)pktp->cp_scbp) == DERR_ABORT)) {
1292 				/*
1293 				 * Flag "unimplemented" responses for
1294 				 * DCMD_FLUSH_CACHE as ENOTSUP
1295 				 */
1296 				bioerror(bp, ENOTSUP);
1297 				mutex_enter(&dadkp->dad_mutex);
1298 				dadkp->dad_noflush = 1;
1299 				mutex_exit(&dadkp->dad_mutex);
1300 			} else {
1301 				bioerror(bp, EIO);
1302 			}
1303 		}
1304 		/*FALLTHROUGH*/
1305 	case COMMAND_DONE:
1306 	default:
1307 		return (COMMAND_DONE);
1308 	}
1309 }
1310 
1311 
1312 static void
1313 dadk_pktcb(struct cmpkt *pktp)
1314 {
1315 	int action;
1316 	struct dadkio_rwcmd *rwcmdp;
1317 
1318 	rwcmdp = (struct dadkio_rwcmd *)pktp->cp_passthru;  /* ioctl packet */
1319 
1320 	if (pktp->cp_reason == CPS_SUCCESS) {
1321 		if (rwcmdp && (rwcmdp != (opaque_t)DADK_SILENT))
1322 			rwcmdp->status.status = DADKIO_STAT_NO_ERROR;
1323 		pktp->cp_iodone(pktp->cp_bp);
1324 		return;
1325 	}
1326 
1327 	if (rwcmdp && (rwcmdp != (opaque_t)DADK_SILENT)) {
1328 		if (pktp->cp_reason == CPS_CHKERR)
1329 			dadk_recorderr(pktp, rwcmdp);
1330 		dadk_iodone(pktp->cp_bp);
1331 		return;
1332 	}
1333 
1334 	if (pktp->cp_reason == CPS_CHKERR)
1335 		action = dadk_chkerr(pktp);
1336 	else
1337 		action = COMMAND_DONE_ERROR;
1338 
1339 	if (action == JUST_RETURN)
1340 		return;
1341 
1342 	/*
1343 	 * If we are panicking don't retry the command
1344 	 * just fail it so we can go down completing all
1345 	 * of the buffers.
1346 	 */
1347 	if (ddi_in_panic() && action == QUE_COMMAND)
1348 		action = COMMAND_DONE_ERROR;
1349 
1350 	if (action != COMMAND_DONE) {
1351 		if ((dadk_ioretry(pktp, action)) == JUST_RETURN)
1352 			return;
1353 	}
1354 	pktp->cp_iodone(pktp->cp_bp);
1355 }
1356 
1357 
1358 
1359 static struct dadkio_derr dadk_errtab[] = {
1360 	{COMMAND_DONE, GDA_INFORMATIONAL},	/*  0 DERR_SUCCESS	*/
1361 	{QUE_COMMAND, GDA_FATAL},		/*  1 DERR_AMNF		*/
1362 	{QUE_COMMAND, GDA_FATAL},		/*  2 DERR_TKONF	*/
1363 	{COMMAND_DONE_ERROR, GDA_INFORMATIONAL}, /* 3 DERR_ABORT	*/
1364 	{QUE_COMMAND, GDA_RETRYABLE},		/*  4 DERR_DWF		*/
1365 	{QUE_COMMAND, GDA_FATAL},		/*  5 DERR_IDNF		*/
1366 	{JUST_RETURN, GDA_INFORMATIONAL},	/*  6 DERR_BUSY		*/
1367 	{QUE_COMMAND, GDA_FATAL},		/*  7 DERR_UNC		*/
1368 	{QUE_COMMAND, GDA_RETRYABLE},		/*  8 DERR_BBK		*/
1369 	{COMMAND_DONE_ERROR, GDA_FATAL},	/*  9 DERR_INVCDB	*/
1370 	{COMMAND_DONE_ERROR, GDA_FATAL},	/* 10 DERR_HARD		*/
1371 	{COMMAND_DONE_ERROR, GDA_FATAL},	/* 11 DERR_ILI		*/
1372 	{COMMAND_DONE_ERROR, GDA_FATAL},	/* 12 DERR_EOM		*/
1373 	{COMMAND_DONE, GDA_INFORMATIONAL},	/* 13 DERR_MCR		*/
1374 	{COMMAND_DONE, GDA_INFORMATIONAL},	/* 14 DERR_RECOVER	*/
1375 	{COMMAND_DONE_ERROR, GDA_FATAL},	/* 15 DERR_NOTREADY	*/
1376 	{QUE_COMMAND, GDA_RETRYABLE},		/* 16 DERR_MEDIUM	*/
1377 	{COMMAND_DONE_ERROR, GDA_FATAL},	/* 17 DERR_HW		*/
1378 	{COMMAND_DONE, GDA_FATAL},		/* 18 DERR_ILL		*/
1379 	{COMMAND_DONE, GDA_FATAL},		/* 19 DERR_UNIT_ATTN	*/
1380 	{COMMAND_DONE_ERROR, GDA_FATAL},	/* 20 DERR_DATA_PROT	*/
1381 	{COMMAND_DONE_ERROR, GDA_FATAL},	/* 21 DERR_MISCOMPARE	*/
1382 	{QUE_COMMAND, GDA_RETRYABLE},		/* 22 DERR_ICRC		*/
1383 	{COMMAND_DONE_ERROR, GDA_FATAL},	/* 23 DERR_RESV		*/
1384 };
1385 
1386 static int
1387 dadk_chkerr(struct cmpkt *pktp)
1388 {
1389 	daddr_t err_blkno;
1390 	struct dadk *dadkp = PKT2DADK(pktp);
1391 	dadk_errstats_t *dep;
1392 	int scb = *(char *)pktp->cp_scbp;
1393 
1394 	if (scb == DERR_SUCCESS) {
1395 		if (pktp->cp_retry != 0 && dadkp->dad_errstats != NULL) {
1396 			dep = (dadk_errstats_t *)
1397 			    dadkp->dad_errstats->ks_data;
1398 			dep->dadk_rq_recov_err.value.ui32++;
1399 		}
1400 		return (COMMAND_DONE);
1401 	}
1402 
1403 	if (pktp->cp_retry) {
1404 		err_blkno = pktp->cp_srtsec + ((pktp->cp_bytexfer -
1405 		    pktp->cp_resid) >> dadkp->dad_secshf);
1406 	} else
1407 		err_blkno = -1;
1408 
1409 	if (dadkp->dad_errstats != NULL) {
1410 		dep = (dadk_errstats_t *)dadkp->dad_errstats->ks_data;
1411 
1412 		switch (dadk_errtab[scb].d_severity) {
1413 			case GDA_RETRYABLE:
1414 				dep->dadk_softerrs.value.ui32++;
1415 				break;
1416 
1417 			case GDA_FATAL:
1418 				dep->dadk_harderrs.value.ui32++;
1419 				break;
1420 
1421 			default:
1422 				break;
1423 		}
1424 
1425 		switch (scb) {
1426 			case DERR_INVCDB:
1427 			case DERR_ILI:
1428 			case DERR_EOM:
1429 			case DERR_HW:
1430 			case DERR_ICRC:
1431 				dep->dadk_transerrs.value.ui32++;
1432 				break;
1433 
1434 			case DERR_AMNF:
1435 			case DERR_TKONF:
1436 			case DERR_DWF:
1437 			case DERR_BBK:
1438 			case DERR_UNC:
1439 			case DERR_HARD:
1440 			case DERR_MEDIUM:
1441 			case DERR_DATA_PROT:
1442 			case DERR_MISCOMP:
1443 				dep->dadk_rq_media_err.value.ui32++;
1444 				break;
1445 
1446 			case DERR_NOTREADY:
1447 				dep->dadk_rq_ntrdy_err.value.ui32++;
1448 				break;
1449 
1450 			case DERR_IDNF:
1451 			case DERR_UNIT_ATTN:
1452 				dep->dadk_rq_nodev_err.value.ui32++;
1453 				break;
1454 
1455 			case DERR_ILL:
1456 			case DERR_RESV:
1457 				dep->dadk_rq_illrq_err.value.ui32++;
1458 				break;
1459 
1460 			default:
1461 				break;
1462 		}
1463 	}
1464 
1465 	/* if attempting to read a sector from a cdrom audio disk */
1466 	if ((dadkp->dad_cdrom) &&
1467 	    (*((char *)(pktp->cp_cdbp)) == DCMD_READ) &&
1468 	    (scb == DERR_ILL)) {
1469 		return (COMMAND_DONE);
1470 	}
1471 	if (pktp->cp_passthru == NULL) {
1472 		gda_errmsg(dadkp->dad_sd, pktp, dadk_name,
1473 		    dadk_errtab[scb].d_severity, pktp->cp_srtsec,
1474 		    err_blkno, dadk_cmds, dadk_sense);
1475 	}
1476 
1477 	if (scb == DERR_BUSY) {
1478 		(void) timeout(dadk_restart, (void *)pktp, DADK_BSY_TIMEOUT);
1479 	}
1480 
1481 	return (dadk_errtab[scb].d_action);
1482 }
1483 
1484 static void
1485 dadk_recorderr(struct cmpkt *pktp, struct dadkio_rwcmd *rwcmdp)
1486 {
1487 	struct dadk *dadkp;
1488 	int scb;
1489 
1490 	dadkp = PKT2DADK(pktp);
1491 	scb = (int)(*(char *)pktp->cp_scbp);
1492 
1493 
1494 	rwcmdp->status.failed_blk = rwcmdp->blkaddr +
1495 	    ((pktp->cp_bytexfer - pktp->cp_resid) >> dadkp->dad_secshf);
1496 
1497 	rwcmdp->status.resid = pktp->cp_bp->b_resid +
1498 	    pktp->cp_byteleft - pktp->cp_bytexfer + pktp->cp_resid;
1499 	switch ((int)(* (char *)pktp->cp_scbp)) {
1500 	case DERR_AMNF:
1501 	case DERR_ABORT:
1502 		rwcmdp->status.status = DADKIO_STAT_ILLEGAL_REQUEST;
1503 		break;
1504 	case DERR_DWF:
1505 	case DERR_IDNF:
1506 		rwcmdp->status.status = DADKIO_STAT_ILLEGAL_ADDRESS;
1507 		break;
1508 	case DERR_TKONF:
1509 	case DERR_UNC:
1510 	case DERR_BBK:
1511 		rwcmdp->status.status = DADKIO_STAT_MEDIUM_ERROR;
1512 		rwcmdp->status.failed_blk_is_valid = 1;
1513 		rwcmdp->status.resid = 0;
1514 		break;
1515 	case DERR_BUSY:
1516 		rwcmdp->status.status = DADKIO_STAT_NOT_READY;
1517 		break;
1518 	case DERR_INVCDB:
1519 	case DERR_HARD:
1520 		rwcmdp->status.status = DADKIO_STAT_HARDWARE_ERROR;
1521 		break;
1522 	case DERR_ICRC:
1523 	default:
1524 		rwcmdp->status.status = DADKIO_STAT_NOT_SUPPORTED;
1525 	}
1526 
1527 	if (rwcmdp->flags & DADKIO_FLAG_SILENT)
1528 		return;
1529 	gda_errmsg(dadkp->dad_sd, pktp, dadk_name, dadk_errtab[scb].d_severity,
1530 	    rwcmdp->blkaddr, rwcmdp->status.failed_blk,
1531 	    dadk_cmds, dadk_sense);
1532 }
1533 
1534 /*ARGSUSED*/
1535 static void
1536 dadk_polldone(struct buf *bp)
1537 {
1538 	struct cmpkt *pktp;
1539 	struct dadk *dadkp;
1540 
1541 	pktp  = GDA_BP_PKT(bp);
1542 	dadkp = PKT2DADK(pktp);
1543 	mutex_enter(&dadkp->dad_cmd_mutex);
1544 	dadkp->dad_cmd_count--;
1545 	mutex_exit(&dadkp->dad_cmd_mutex);
1546 }
1547 
1548 static void
1549 dadk_iodone(struct buf *bp)
1550 {
1551 	struct cmpkt *pktp;
1552 	struct dadk *dadkp;
1553 
1554 	pktp  = GDA_BP_PKT(bp);
1555 	dadkp = PKT2DADK(pktp);
1556 
1557 	/* check for all iodone */
1558 	pktp->cp_byteleft -= pktp->cp_bytexfer;
1559 	if (geterror(bp) == 0 && pktp->cp_byteleft != 0) {
1560 		pktp->cp_retry = 0;
1561 		(void) dadk_iosetup(dadkp, pktp);
1562 
1563 
1564 	/* 	transport the next one */
1565 		if (CTL_TRANSPORT(dadkp->dad_ctlobjp, pktp) == CTL_SEND_SUCCESS)
1566 			return;
1567 		if ((dadk_ioretry(pktp, QUE_COMMAND)) == JUST_RETURN)
1568 			return;
1569 	}
1570 
1571 	/* start next one */
1572 	FLC_DEQUE(dadkp->dad_flcobjp, bp);
1573 
1574 	/* free pkt */
1575 	if (pktp->cp_private)
1576 		BBH_FREEHANDLE(dadkp->dad_bbhobjp, pktp->cp_private);
1577 	gda_free(dadkp->dad_ctlobjp, pktp, NULL);
1578 	mutex_enter(&dadkp->dad_cmd_mutex);
1579 	dadkp->dad_cmd_count--;
1580 	mutex_exit(&dadkp->dad_cmd_mutex);
1581 	biodone(bp);
1582 }
1583 
1584 int
1585 dadk_check_media(opaque_t objp, int *state)
1586 {
1587 	struct dadk *dadkp = (struct dadk *)objp;
1588 
1589 	if (!dadkp->dad_rmb) {
1590 		return (ENXIO);
1591 	}
1592 #ifdef DADK_DEBUG
1593 	if (dadk_debug & DSTATE)
1594 		PRF("dadk_check_media: user state %x disk state %x\n",
1595 		    *state, dadkp->dad_iostate);
1596 #endif
1597 	/*
1598 	 * If state already changed just return
1599 	 */
1600 	if (*state != dadkp->dad_iostate) {
1601 		*state = dadkp->dad_iostate;
1602 		return (0);
1603 	}
1604 
1605 	/*
1606 	 * Startup polling on thread state
1607 	 */
1608 	mutex_enter(&dadkp->dad_mutex);
1609 	if (dadkp->dad_thread_cnt == 0) {
1610 		/*
1611 		 * One thread per removable dadk device
1612 		 */
1613 		(void) thread_create(NULL, 0, dadk_watch_thread, dadkp, 0, &p0,
1614 		    TS_RUN, v.v_maxsyspri - 2);
1615 	}
1616 	dadkp->dad_thread_cnt++;
1617 
1618 	/*
1619 	 * Wait for state to change
1620 	 */
1621 	do {
1622 		if (cv_wait_sig(&dadkp->dad_state_cv, &dadkp->dad_mutex) == 0) {
1623 			dadkp->dad_thread_cnt--;
1624 			mutex_exit(&dadkp->dad_mutex);
1625 			return (EINTR);
1626 		}
1627 	} while (*state == dadkp->dad_iostate);
1628 	*state = dadkp->dad_iostate;
1629 	dadkp->dad_thread_cnt--;
1630 	mutex_exit(&dadkp->dad_mutex);
1631 	return (0);
1632 }
1633 
1634 
1635 #define	MEDIA_ACCESS_DELAY 2000000
1636 
1637 static void
1638 dadk_watch_thread(struct dadk *dadkp)
1639 {
1640 	enum dkio_state state;
1641 	int interval;
1642 
1643 	interval = drv_usectohz(dadk_check_media_time);
1644 
1645 	do {
1646 		if (dadk_rmb_ioctl(dadkp, DCMD_GET_STATE, (intptr_t)&state, 0,
1647 		    DADK_SILENT)) {
1648 			/*
1649 			 * Assume state remained the same
1650 			 */
1651 			state = dadkp->dad_iostate;
1652 		}
1653 
1654 		/*
1655 		 * now signal the waiting thread if this is *not* the
1656 		 * specified state;
1657 		 * delay the signal if the state is DKIO_INSERTED
1658 		 * to allow the target to recover
1659 		 */
1660 		if (state != dadkp->dad_iostate) {
1661 
1662 			dadkp->dad_iostate = state;
1663 			if (state == DKIO_INSERTED) {
1664 				/*
1665 				 * delay the signal to give the drive a chance
1666 				 * to do what it apparently needs to do
1667 				 */
1668 				(void) timeout((void(*)(void *))cv_broadcast,
1669 				    (void *)&dadkp->dad_state_cv,
1670 				    drv_usectohz((clock_t)MEDIA_ACCESS_DELAY));
1671 			} else {
1672 				cv_broadcast(&dadkp->dad_state_cv);
1673 			}
1674 		}
1675 		delay(interval);
1676 	} while (dadkp->dad_thread_cnt);
1677 }
1678 
1679 int
1680 dadk_inquiry(opaque_t objp, opaque_t *inqpp)
1681 {
1682 	struct dadk *dadkp = (struct dadk *)objp;
1683 	struct scsi_inquiry **sinqpp = (struct scsi_inquiry **)inqpp;
1684 
1685 	if (dadkp && dadkp->dad_sd && dadkp->dad_sd->sd_inq) {
1686 		*sinqpp = dadkp->dad_sd->sd_inq;
1687 		return (DDI_SUCCESS);
1688 	}
1689 
1690 	return (DDI_FAILURE);
1691 }
1692 
1693 static int
1694 dadk_rmb_ioctl(struct dadk *dadkp, int cmd, intptr_t arg, int flags, int silent)
1695 
1696 {
1697 	struct buf *bp;
1698 	int err;
1699 	struct cmpkt *pktp;
1700 
1701 	if ((bp = getrbuf(KM_SLEEP)) == NULL) {
1702 		return (ENOMEM);
1703 	}
1704 	pktp = dadk_pktprep(dadkp, NULL, bp, dadk_rmb_iodone, NULL, NULL);
1705 	if (!pktp) {
1706 		freerbuf(bp);
1707 		return (ENOMEM);
1708 	}
1709 	bp->b_back  = (struct buf *)arg;
1710 	bp->b_forw  = (struct buf *)dadkp->dad_flcobjp;
1711 	pktp->cp_passthru = (opaque_t)(intptr_t)silent;
1712 
1713 	err = dadk_ctl_ioctl(dadkp, cmd, (uintptr_t)pktp, flags);
1714 	freerbuf(bp);
1715 	gda_free(dadkp->dad_ctlobjp, pktp, NULL);
1716 	return (err);
1717 
1718 
1719 }
1720 
1721 static void
1722 dadk_rmb_iodone(struct buf *bp)
1723 {
1724 	struct cmpkt *pktp;
1725 	struct dadk *dadkp;
1726 
1727 	pktp  = GDA_BP_PKT(bp);
1728 	dadkp = PKT2DADK(pktp);
1729 
1730 	bp->b_flags &= ~(B_DONE|B_BUSY);
1731 
1732 	/* Start next one */
1733 	FLC_DEQUE(dadkp->dad_flcobjp, bp);
1734 
1735 	mutex_enter(&dadkp->dad_cmd_mutex);
1736 	dadkp->dad_cmd_count--;
1737 	mutex_exit(&dadkp->dad_cmd_mutex);
1738 	biodone(bp);
1739 }
1740 
1741 static int
1742 dadk_dk_buf_setup(struct dadk *dadkp, opaque_t *cmdp, dev_t dev,
1743 	enum uio_seg dataspace, int rw)
1744 {
1745 	struct dadkio_rwcmd *rwcmdp = (struct dadkio_rwcmd *)cmdp;
1746 	struct buf	*bp;
1747 	struct iovec	aiov;
1748 	struct uio	auio;
1749 	struct uio	*uio = &auio;
1750 	int		status;
1751 
1752 	bp = getrbuf(KM_SLEEP);
1753 
1754 	bp->av_forw = bp->b_forw = (struct buf *)dadkp;
1755 	bp->b_back  = (struct buf *)rwcmdp;	/* ioctl packet */
1756 
1757 	bzero((caddr_t)&auio, sizeof (struct uio));
1758 	bzero((caddr_t)&aiov, sizeof (struct iovec));
1759 	aiov.iov_base = rwcmdp->bufaddr;
1760 	aiov.iov_len = rwcmdp->buflen;
1761 	uio->uio_iov = &aiov;
1762 
1763 	uio->uio_iovcnt = 1;
1764 	uio->uio_resid = rwcmdp->buflen;
1765 	uio->uio_segflg = dataspace;
1766 
1767 	/* Let physio do the rest... */
1768 	status = physio(dadk_dk_strategy, bp, dev, rw, dadkmin, uio);
1769 
1770 	freerbuf(bp);
1771 	return (status);
1772 
1773 }
1774 
1775 /* Do not let a user gendisk request get too big or */
1776 /* else we could use to many resources.		    */
1777 
1778 static void
1779 dadkmin(struct buf *bp)
1780 {
1781 	if (bp->b_bcount > dadk_dk_maxphys)
1782 		bp->b_bcount = dadk_dk_maxphys;
1783 }
1784 
1785 static int
1786 dadk_dk_strategy(struct buf *bp)
1787 {
1788 	dadk_dk((struct dadk *)bp->av_forw, (struct dadkio_rwcmd *)bp->b_back,
1789 	    bp);
1790 	return (0);
1791 }
1792 
1793 static void
1794 dadk_dk(struct dadk *dadkp, struct dadkio_rwcmd *rwcmdp, struct buf *bp)
1795 {
1796 	struct  cmpkt *pktp;
1797 
1798 	pktp = dadk_pktprep(dadkp, NULL, bp, dadk_iodone, NULL, NULL);
1799 	if (!pktp) {
1800 		bioerror(bp, ENOMEM);
1801 		biodone(bp);
1802 		return;
1803 	}
1804 
1805 	pktp->cp_passthru = rwcmdp;
1806 
1807 	(void) dadk_ioprep(dadkp, pktp);
1808 
1809 	mutex_enter(&dadkp->dad_cmd_mutex);
1810 	dadkp->dad_cmd_count++;
1811 	mutex_exit(&dadkp->dad_cmd_mutex);
1812 	FLC_ENQUE(dadkp->dad_flcobjp, bp);
1813 }
1814 
1815 /*
1816  * There is no existing way to notify cmdk module
1817  * when the command completed, so add this function
1818  * to calculate how many on-going commands.
1819  */
1820 int
1821 dadk_getcmds(opaque_t objp)
1822 {
1823 	struct dadk *dadkp = (struct dadk *)objp;
1824 	int count;
1825 
1826 	mutex_enter(&dadkp->dad_cmd_mutex);
1827 	count = dadkp->dad_cmd_count;
1828 	mutex_exit(&dadkp->dad_cmd_mutex);
1829 	return (count);
1830 }
1831 
1832 /*
1833  * this function was used to calc the cmd for CTL_IOCTL
1834  */
1835 static int
1836 dadk_ctl_ioctl(struct dadk *dadkp, uint32_t cmd, uintptr_t arg, int flag)
1837 {
1838 	int error;
1839 	mutex_enter(&dadkp->dad_cmd_mutex);
1840 	dadkp->dad_cmd_count++;
1841 	mutex_exit(&dadkp->dad_cmd_mutex);
1842 	error = CTL_IOCTL(dadkp->dad_ctlobjp, cmd, arg, flag);
1843 	mutex_enter(&dadkp->dad_cmd_mutex);
1844 	dadkp->dad_cmd_count--;
1845 	mutex_exit(&dadkp->dad_cmd_mutex);
1846 	return (error);
1847 }
1848