xref: /titanic_52/usr/src/uts/intel/io/dktp/dcdev/dadk.c (revision 1e49577a7fcde812700ded04431b49d67cc57d6d)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright (c) 2006, 2010, Oracle and/or its affiliates. All rights reserved.
24  */
25 
26 /*
27  * Direct Attached Disk
28  */
29 
30 #include <sys/file.h>
31 #include <sys/scsi/scsi.h>
32 #include <sys/var.h>
33 #include <sys/proc.h>
34 #include <sys/dktp/cm.h>
35 #include <sys/vtoc.h>
36 #include <sys/dkio.h>
37 #include <sys/policy.h>
38 #include <sys/priv.h>
39 
40 #include <sys/dktp/dadev.h>
41 #include <sys/dktp/fctypes.h>
42 #include <sys/dktp/flowctrl.h>
43 #include <sys/dktp/tgcom.h>
44 #include <sys/dktp/tgdk.h>
45 #include <sys/dktp/bbh.h>
46 #include <sys/dktp/dadkio.h>
47 #include <sys/dktp/dadk.h>
48 #include <sys/cdio.h>
49 
50 /*
51  * Local Function Prototypes
52  */
53 static void dadk_restart(void *pktp);
54 static void dadk_pktcb(struct cmpkt *pktp);
55 static void dadk_iodone(struct buf *bp);
56 static void dadk_polldone(struct buf *bp);
57 static void dadk_setcap(struct dadk *dadkp);
58 static void dadk_create_errstats(struct dadk *dadkp, int instance);
59 static void dadk_destroy_errstats(struct dadk *dadkp);
60 
61 static int dadk_chkerr(struct cmpkt *pktp);
62 static int dadk_ioprep(struct dadk *dadkp, struct cmpkt *pktp);
63 static int dadk_iosetup(struct dadk *dadkp, struct cmpkt *pktp);
64 static int dadk_ioretry(struct cmpkt *pktp, int action);
65 
66 static struct cmpkt *dadk_pktprep(struct dadk *dadkp, struct cmpkt *in_pktp,
67     struct buf *bp, void (*cb_func)(struct buf *), int (*func)(caddr_t),
68     caddr_t arg);
69 
70 static int  dadk_pkt(opaque_t com_data, struct buf *bp, int (*func)(caddr_t),
71     caddr_t arg);
72 static void dadk_transport(opaque_t com_data, struct buf *bp);
73 static int dadk_ctl_ioctl(struct dadk *, uint32_t, uintptr_t, int);
74 
75 struct tgcom_objops dadk_com_ops = {
76 	nodev,
77 	nodev,
78 	dadk_pkt,
79 	dadk_transport,
80 	0, 0
81 };
82 
83 /*
84  * architecture dependent allocation restrictions for dadk_iob_alloc(). For
85  * x86, we'll set dma_attr_addr_hi to dadk_max_phys_addr and dma_attr_sgllen
86  * to dadk_sgl_size during _init().
87  */
88 #if defined(__sparc)
89 static ddi_dma_attr_t dadk_alloc_attr = {
90 	DMA_ATTR_V0,	/* version number */
91 	0x0,		/* lowest usable address */
92 	0xFFFFFFFFull,	/* high DMA address range */
93 	0xFFFFFFFFull,	/* DMA counter register */
94 	1,		/* DMA address alignment */
95 	1,		/* DMA burstsizes */
96 	1,		/* min effective DMA size */
97 	0xFFFFFFFFull,	/* max DMA xfer size */
98 	0xFFFFFFFFull,	/* segment boundary */
99 	1,		/* s/g list length */
100 	512,		/* granularity of device */
101 	0,		/* DMA transfer flags */
102 };
103 #elif defined(__x86)
104 static ddi_dma_attr_t dadk_alloc_attr = {
105 	DMA_ATTR_V0,	/* version number */
106 	0x0,		/* lowest usable address */
107 	0x0,		/* high DMA address range [set in _init()] */
108 	0xFFFFull,	/* DMA counter register */
109 	512,		/* DMA address alignment */
110 	1,		/* DMA burstsizes */
111 	1,		/* min effective DMA size */
112 	0xFFFFFFFFull,	/* max DMA xfer size */
113 	0xFFFFFFFFull,	/* segment boundary */
114 	0,		/* s/g list length [set in _init()] */
115 	512,		/* granularity of device */
116 	0,		/* DMA transfer flags */
117 };
118 
119 uint64_t dadk_max_phys_addr = 0xFFFFFFFFull;
120 int dadk_sgl_size = 0xFF;
121 #endif
122 
123 static int dadk_rmb_ioctl(struct dadk *dadkp, int cmd, intptr_t arg, int flags,
124     int silent);
125 static void dadk_rmb_iodone(struct buf *bp);
126 
127 static int dadk_dk_buf_setup(struct dadk *dadkp, opaque_t *cmdp,
128     dev_t dev, enum uio_seg dataspace, int rw);
129 static void dadk_dk(struct dadk *dadkp, struct dadkio_rwcmd *scmdp,
130     struct buf *bp);
131 static void dadkmin(struct buf *bp);
132 static int dadk_dk_strategy(struct buf *bp);
133 static void dadk_recorderr(struct cmpkt *pktp, struct dadkio_rwcmd *rwcmdp);
134 
135 struct tgdk_objops dadk_ops = {
136 	dadk_init,
137 	dadk_free,
138 	dadk_probe,
139 	dadk_attach,
140 	dadk_open,
141 	dadk_close,
142 	dadk_ioctl,
143 	dadk_strategy,
144 	dadk_setgeom,
145 	dadk_getgeom,
146 	dadk_iob_alloc,
147 	dadk_iob_free,
148 	dadk_iob_htoc,
149 	dadk_iob_xfer,
150 	dadk_dump,
151 	dadk_getphygeom,
152 	dadk_set_bbhobj,
153 	dadk_check_media,
154 	dadk_inquiry,
155 	dadk_cleanup,
156 	0
157 };
158 
159 /*
160  * Local static data
161  */
162 
163 #ifdef	DADK_DEBUG
164 #define	DENT	0x0001
165 #define	DERR	0x0002
166 #define	DIO	0x0004
167 #define	DGEOM	0x0010
168 #define	DSTATE  0x0020
169 static	int	dadk_debug = DGEOM;
170 
171 #endif	/* DADK_DEBUG */
172 
173 static int dadk_check_media_time = 3000000;	/* 3 Second State Check */
174 static int dadk_dk_maxphys = 0x80000;
175 
176 static char	*dadk_cmds[] = {
177 	"\000Unknown",			/* unknown 		*/
178 	"\001read sector",		/* DCMD_READ 1		*/
179 	"\002write sector",		/* DCMD_WRITE 2		*/
180 	"\003format track",		/* DCMD_FMTTRK 3	*/
181 	"\004format whole drive",	/* DCMD_FMTDRV 4	*/
182 	"\005recalibrate",		/* DCMD_RECAL  5	*/
183 	"\006seek sector",		/* DCMD_SEEK   6	*/
184 	"\007read verify",		/* DCMD_RDVER  7	*/
185 	"\010read defect list",		/* DCMD_GETDEF 8	*/
186 	"\011lock door",		/* DCMD_LOCK   9	*/
187 	"\012unlock door",		/* DCMD_UNLOCK 10	*/
188 	"\013start motor",		/* DCMD_START_MOTOR 11	*/
189 	"\014stop motor",		/* DCMD_STOP_MOTOR 12	*/
190 	"\015eject",			/* DCMD_EJECT  13	*/
191 	"\016update geometry",		/* DCMD_UPDATE_GEOM  14	*/
192 	"\017get state",		/* DCMD_GET_STATE  15	*/
193 	"\020cdrom pause",		/* DCMD_PAUSE  16	*/
194 	"\021cdrom resume",		/* DCMD_RESUME  17	*/
195 	"\022cdrom play track index",	/* DCMD_PLAYTRKIND  18	*/
196 	"\023cdrom play msf",		/* DCMD_PLAYMSF  19	*/
197 	"\024cdrom sub channel",	/* DCMD_SUBCHNL  20	*/
198 	"\025cdrom read mode 1",	/* DCMD_READMODE1  21	*/
199 	"\026cdrom read toc header",	/* DCMD_READTOCHDR  22	*/
200 	"\027cdrom read toc entry",	/* DCMD_READTOCENT  23	*/
201 	"\030cdrom read offset",	/* DCMD_READOFFSET  24	*/
202 	"\031cdrom read mode 2",	/* DCMD_READMODE2  25	*/
203 	"\032cdrom volume control",	/* DCMD_VOLCTRL  26	*/
204 	"\033flush cache",		/* DCMD_FLUSH_CACHE  27	*/
205 	NULL
206 };
207 
208 static char *dadk_sense[] = {
209 	"\000Success",			/* DERR_SUCCESS		*/
210 	"\001address mark not found",	/* DERR_AMNF		*/
211 	"\002track 0 not found",	/* DERR_TKONF		*/
212 	"\003aborted command",		/* DERR_ABORT		*/
213 	"\004write fault",		/* DERR_DWF		*/
214 	"\005ID not found",		/* DERR_IDNF		*/
215 	"\006drive busy",		/* DERR_BUSY		*/
216 	"\007uncorrectable data error",	/* DERR_UNC		*/
217 	"\010bad block detected",	/* DERR_BBK		*/
218 	"\011invalid command",		/* DERR_INVCDB		*/
219 	"\012device hard error",	/* DERR_HARD		*/
220 	"\013illegal length indicated", /* DERR_ILI		*/
221 	"\014end of media",		/* DERR_EOM		*/
222 	"\015media change requested",	/* DERR_MCR		*/
223 	"\016recovered from error",	/* DERR_RECOVER		*/
224 	"\017device not ready",		/* DERR_NOTREADY	*/
225 	"\020medium error",		/* DERR_MEDIUM		*/
226 	"\021hardware error",		/* DERR_HW		*/
227 	"\022illegal request",		/* DERR_ILL		*/
228 	"\023unit attention",		/* DERR_UNIT_ATTN	*/
229 	"\024data protection",		/* DERR_DATA_PROT	*/
230 	"\025miscompare",		/* DERR_MISCOMPARE	*/
231 	"\026ICRC error during UDMA",	/* DERR_ICRC		*/
232 	"\027reserved",			/* DERR_RESV		*/
233 	NULL
234 };
235 
236 static char *dadk_name = "Disk";
237 
238 /*
239  *	This is the loadable module wrapper
240  */
241 #include <sys/modctl.h>
242 
243 extern struct mod_ops mod_miscops;
244 
245 static struct modlmisc modlmisc = {
246 	&mod_miscops,	/* Type of module */
247 	"Direct Attached Disk"
248 };
249 
250 static struct modlinkage modlinkage = {
251 	MODREV_1, (void *)&modlmisc, NULL
252 };
253 
254 int
255 _init(void)
256 {
257 #ifdef DADK_DEBUG
258 	if (dadk_debug & DENT)
259 		PRF("dadk_init: call\n");
260 #endif
261 
262 #if defined(__x86)
263 	/* set the max physical address for iob allocs on x86 */
264 	dadk_alloc_attr.dma_attr_addr_hi = dadk_max_phys_addr;
265 
266 	/*
267 	 * set the sgllen for iob allocs on x86. If this is set less than
268 	 * the number of pages the buffer will take (taking into account
269 	 * alignment), it would force the allocator to try and allocate
270 	 * contiguous pages.
271 	 */
272 	dadk_alloc_attr.dma_attr_sgllen = dadk_sgl_size;
273 #endif
274 
275 	return (mod_install(&modlinkage));
276 }
277 
278 int
279 _fini(void)
280 {
281 #ifdef DADK_DEBUG
282 	if (dadk_debug & DENT)
283 		PRF("dadk_fini: call\n");
284 #endif
285 
286 	return (mod_remove(&modlinkage));
287 }
288 
289 int
290 _info(struct modinfo *modinfop)
291 {
292 	return (mod_info(&modlinkage, modinfop));
293 }
294 
295 struct tgdk_obj *
296 dadk_create()
297 {
298 	struct tgdk_obj *dkobjp;
299 	struct dadk *dadkp;
300 
301 	dkobjp = kmem_zalloc((sizeof (*dkobjp) + sizeof (*dadkp)), KM_NOSLEEP);
302 	if (!dkobjp)
303 		return (NULL);
304 	dadkp = (struct dadk *)(dkobjp+1);
305 
306 	dkobjp->tg_ops  = (struct  tgdk_objops *)&dadk_ops;
307 	dkobjp->tg_data = (opaque_t)dadkp;
308 	dkobjp->tg_ext = &(dkobjp->tg_extblk);
309 	dadkp->dad_extp = &(dkobjp->tg_extblk);
310 
311 #ifdef DADK_DEBUG
312 	if (dadk_debug & DENT)
313 		PRF("dadk_create: tgdkobjp= 0x%x dadkp= 0x%x\n", dkobjp, dadkp);
314 #endif
315 	return (dkobjp);
316 }
317 
318 int
319 dadk_init(opaque_t objp, opaque_t devp, opaque_t flcobjp, opaque_t queobjp,
320 	opaque_t bbhobjp, void *lkarg)
321 {
322 	struct dadk *dadkp = (struct dadk *)objp;
323 	struct scsi_device *sdevp = (struct scsi_device *)devp;
324 
325 	dadkp->dad_sd = devp;
326 	dadkp->dad_ctlobjp = (opaque_t)sdevp->sd_address.a_hba_tran;
327 	sdevp->sd_private = (caddr_t)dadkp;
328 
329 	/* initialize the communication object */
330 	dadkp->dad_com.com_data = (opaque_t)dadkp;
331 	dadkp->dad_com.com_ops  = &dadk_com_ops;
332 
333 	dadkp->dad_bbhobjp = bbhobjp;
334 	BBH_INIT(bbhobjp);
335 
336 	dadkp->dad_flcobjp = flcobjp;
337 	mutex_init(&dadkp->dad_cmd_mutex, NULL, MUTEX_DRIVER, NULL);
338 	dadkp->dad_cmd_count = 0;
339 	return (FLC_INIT(flcobjp, &(dadkp->dad_com), queobjp, lkarg));
340 }
341 
342 int
343 dadk_free(struct tgdk_obj *dkobjp)
344 {
345 	TGDK_CLEANUP(dkobjp);
346 	kmem_free(dkobjp, (sizeof (*dkobjp) + sizeof (struct dadk)));
347 
348 	return (DDI_SUCCESS);
349 }
350 
351 void
352 dadk_cleanup(struct tgdk_obj *dkobjp)
353 {
354 	struct dadk *dadkp;
355 
356 	dadkp = (struct dadk *)(dkobjp->tg_data);
357 	if (dadkp->dad_sd)
358 		dadkp->dad_sd->sd_private = NULL;
359 	if (dadkp->dad_bbhobjp) {
360 		BBH_FREE(dadkp->dad_bbhobjp);
361 		dadkp->dad_bbhobjp = NULL;
362 	}
363 	if (dadkp->dad_flcobjp) {
364 		FLC_FREE(dadkp->dad_flcobjp);
365 		dadkp->dad_flcobjp = NULL;
366 	}
367 	mutex_destroy(&dadkp->dad_cmd_mutex);
368 }
369 
370 /* ARGSUSED */
371 int
372 dadk_probe(opaque_t objp, int kmsflg)
373 {
374 	struct dadk *dadkp = (struct dadk *)objp;
375 	struct scsi_device *devp;
376 	char   name[80];
377 
378 	devp = dadkp->dad_sd;
379 	if (!devp->sd_inq || (devp->sd_inq->inq_dtype == DTYPE_NOTPRESENT) ||
380 	    (devp->sd_inq->inq_dtype == DTYPE_UNKNOWN)) {
381 		return (DDI_PROBE_FAILURE);
382 	}
383 
384 	switch (devp->sd_inq->inq_dtype) {
385 		case DTYPE_DIRECT:
386 			dadkp->dad_ctype = DKC_DIRECT;
387 			dadkp->dad_extp->tg_nodetype = DDI_NT_BLOCK;
388 			dadkp->dad_extp->tg_ctype = DKC_DIRECT;
389 			break;
390 		case DTYPE_RODIRECT: /* eg cdrom */
391 			dadkp->dad_ctype = DKC_CDROM;
392 			dadkp->dad_extp->tg_rdonly = 1;
393 			dadkp->dad_rdonly = 1;
394 			dadkp->dad_cdrom = 1;
395 			dadkp->dad_extp->tg_nodetype = DDI_NT_CD;
396 			dadkp->dad_extp->tg_ctype = DKC_CDROM;
397 			break;
398 		case DTYPE_WORM:
399 		case DTYPE_OPTICAL:
400 		default:
401 			return (DDI_PROBE_FAILURE);
402 	}
403 
404 	dadkp->dad_extp->tg_rmb = dadkp->dad_rmb = devp->sd_inq->inq_rmb;
405 
406 	dadkp->dad_secshf = SCTRSHFT;
407 	dadkp->dad_blkshf = 0;
408 
409 	/* display the device name */
410 	(void) strcpy(name, "Vendor '");
411 	gda_inqfill((caddr_t)devp->sd_inq->inq_vid, 8, &name[strlen(name)]);
412 	(void) strcat(name, "' Product '");
413 	gda_inqfill((caddr_t)devp->sd_inq->inq_pid, 16, &name[strlen(name)]);
414 	(void) strcat(name, "'");
415 	gda_log(devp->sd_dev, dadk_name, CE_NOTE, "!<%s>\n", name);
416 
417 	return (DDI_PROBE_SUCCESS);
418 }
419 
420 
421 /* ARGSUSED */
422 int
423 dadk_attach(opaque_t objp)
424 {
425 	return (DDI_SUCCESS);
426 }
427 
428 int
429 dadk_set_bbhobj(opaque_t objp, opaque_t bbhobjp)
430 {
431 	struct dadk *dadkp = (struct dadk *)objp;
432 	/* free the old bbh object */
433 	if (dadkp->dad_bbhobjp)
434 		BBH_FREE(dadkp->dad_bbhobjp);
435 
436 	/* initialize the new bbh object */
437 	dadkp->dad_bbhobjp = bbhobjp;
438 	BBH_INIT(bbhobjp);
439 
440 	return (DDI_SUCCESS);
441 }
442 
443 /* ARGSUSED */
444 int
445 dadk_open(opaque_t objp, int flag)
446 {
447 	struct dadk *dadkp = (struct dadk *)objp;
448 	int error;
449 	int wce;
450 
451 	if (!dadkp->dad_rmb) {
452 		if (dadkp->dad_phyg.g_cap) {
453 			FLC_START_KSTAT(dadkp->dad_flcobjp, "disk",
454 			    ddi_get_instance(CTL_DIP_DEV(dadkp->dad_ctlobjp)));
455 			return (DDI_SUCCESS);
456 		}
457 	} else {
458 		mutex_enter(&dadkp->dad_mutex);
459 		dadkp->dad_iostate = DKIO_NONE;
460 		cv_broadcast(&dadkp->dad_state_cv);
461 		mutex_exit(&dadkp->dad_mutex);
462 
463 		if (dadk_rmb_ioctl(dadkp, DCMD_START_MOTOR, 0, 0,
464 		    DADK_SILENT) ||
465 		    dadk_rmb_ioctl(dadkp, DCMD_LOCK, 0, 0, DADK_SILENT) ||
466 		    dadk_rmb_ioctl(dadkp, DCMD_UPDATE_GEOM, 0, 0,
467 		    DADK_SILENT)) {
468 			return (DDI_FAILURE);
469 		}
470 
471 		mutex_enter(&dadkp->dad_mutex);
472 		dadkp->dad_iostate = DKIO_INSERTED;
473 		cv_broadcast(&dadkp->dad_state_cv);
474 		mutex_exit(&dadkp->dad_mutex);
475 	}
476 
477 	/*
478 	 * get write cache enable state
479 	 * If there is an error, must assume that write cache
480 	 * is enabled.
481 	 * NOTE: Since there is currently no Solaris mechanism to
482 	 * change the state of the Write Cache Enable feature,
483 	 * this code just checks the value of the WCE bit
484 	 * obtained at device init time.  If a mechanism
485 	 * is added to the driver to change WCE, dad_wce
486 	 * must be updated appropriately.
487 	 */
488 	error = dadk_ctl_ioctl(dadkp, DIOCTL_GETWCE,
489 	    (uintptr_t)&wce, FKIOCTL | FNATIVE);
490 	mutex_enter(&dadkp->dad_mutex);
491 	dadkp->dad_wce = (error != 0) || (wce != 0);
492 	mutex_exit(&dadkp->dad_mutex);
493 
494 	/* logical disk geometry */
495 	(void) dadk_ctl_ioctl(dadkp, DIOCTL_GETGEOM,
496 	    (uintptr_t)&dadkp->dad_logg, FKIOCTL | FNATIVE);
497 	if (dadkp->dad_logg.g_cap == 0)
498 		return (DDI_FAILURE);
499 
500 	/* get physical disk geometry */
501 	(void) dadk_ctl_ioctl(dadkp, DIOCTL_GETPHYGEOM,
502 	    (uintptr_t)&dadkp->dad_phyg, FKIOCTL | FNATIVE);
503 	if (dadkp->dad_phyg.g_cap == 0)
504 		return (DDI_FAILURE);
505 
506 	dadk_setcap(dadkp);
507 
508 	dadk_create_errstats(dadkp,
509 	    ddi_get_instance(CTL_DIP_DEV(dadkp->dad_ctlobjp)));
510 
511 	/* start profiling */
512 	FLC_START_KSTAT(dadkp->dad_flcobjp, "disk",
513 	    ddi_get_instance(CTL_DIP_DEV(dadkp->dad_ctlobjp)));
514 
515 	return (DDI_SUCCESS);
516 }
517 
518 static void
519 dadk_setcap(struct dadk *dadkp)
520 {
521 	int	 totsize;
522 	int	 i;
523 
524 	totsize = dadkp->dad_phyg.g_secsiz;
525 
526 	if (totsize == 0) {
527 		if (dadkp->dad_cdrom) {
528 			totsize = 2048;
529 		} else {
530 			totsize = NBPSCTR;
531 		}
532 	} else {
533 		/* Round down sector size to multiple of 512B */
534 		totsize &= ~(NBPSCTR-1);
535 	}
536 	dadkp->dad_phyg.g_secsiz = totsize;
537 
538 	/* set sec,block shift factor - (512->0, 1024->1, 2048->2, etc.) */
539 	totsize >>= SCTRSHFT;
540 	for (i = 0; totsize != 1; i++, totsize >>= 1)
541 		;
542 	dadkp->dad_blkshf = i;
543 	dadkp->dad_secshf = i + SCTRSHFT;
544 }
545 
546 
547 static void
548 dadk_create_errstats(struct dadk *dadkp, int instance)
549 {
550 	dadk_errstats_t *dep;
551 	char kstatname[KSTAT_STRLEN];
552 	dadk_ioc_string_t dadk_ioc_string;
553 
554 	if (dadkp->dad_errstats)
555 		return;
556 
557 	(void) sprintf(kstatname, "cmdk%d,error", instance);
558 	dadkp->dad_errstats = kstat_create("cmdkerror", instance,
559 	    kstatname, "device_error", KSTAT_TYPE_NAMED,
560 	    sizeof (dadk_errstats_t) / sizeof (kstat_named_t),
561 	    KSTAT_FLAG_PERSISTENT);
562 
563 	if (!dadkp->dad_errstats)
564 		return;
565 
566 	dep = (dadk_errstats_t *)dadkp->dad_errstats->ks_data;
567 
568 	kstat_named_init(&dep->dadk_softerrs,
569 	    "Soft Errors", KSTAT_DATA_UINT32);
570 	kstat_named_init(&dep->dadk_harderrs,
571 	    "Hard Errors", KSTAT_DATA_UINT32);
572 	kstat_named_init(&dep->dadk_transerrs,
573 	    "Transport Errors", KSTAT_DATA_UINT32);
574 	kstat_named_init(&dep->dadk_model,
575 	    "Model", KSTAT_DATA_CHAR);
576 	kstat_named_init(&dep->dadk_revision,
577 	    "Revision", KSTAT_DATA_CHAR);
578 	kstat_named_init(&dep->dadk_serial,
579 	    "Serial No", KSTAT_DATA_CHAR);
580 	kstat_named_init(&dep->dadk_capacity,
581 	    "Size", KSTAT_DATA_ULONGLONG);
582 	kstat_named_init(&dep->dadk_rq_media_err,
583 	    "Media Error", KSTAT_DATA_UINT32);
584 	kstat_named_init(&dep->dadk_rq_ntrdy_err,
585 	    "Device Not Ready", KSTAT_DATA_UINT32);
586 	kstat_named_init(&dep->dadk_rq_nodev_err,
587 	    "No Device", KSTAT_DATA_UINT32);
588 	kstat_named_init(&dep->dadk_rq_recov_err,
589 	    "Recoverable", KSTAT_DATA_UINT32);
590 	kstat_named_init(&dep->dadk_rq_illrq_err,
591 	    "Illegal Request", KSTAT_DATA_UINT32);
592 
593 	dadkp->dad_errstats->ks_private = dep;
594 	dadkp->dad_errstats->ks_update = nulldev;
595 	kstat_install(dadkp->dad_errstats);
596 
597 	/* get model */
598 	dep->dadk_model.value.c[0] = 0;
599 	dadk_ioc_string.is_buf = &dep->dadk_model.value.c[0];
600 	dadk_ioc_string.is_size = sizeof (dep->dadk_model.value.c);
601 	(void) dadk_ctl_ioctl(dadkp, DIOCTL_GETMODEL,
602 	    (uintptr_t)&dadk_ioc_string, FKIOCTL | FNATIVE);
603 
604 	/* get serial */
605 	dep->dadk_serial.value.c[0] = 0;
606 	dadk_ioc_string.is_buf = &dep->dadk_serial.value.c[0];
607 	dadk_ioc_string.is_size = sizeof (dep->dadk_serial.value.c);
608 	(void) dadk_ctl_ioctl(dadkp, DIOCTL_GETSERIAL,
609 	    (uintptr_t)&dadk_ioc_string, FKIOCTL | FNATIVE);
610 
611 	/* Get revision */
612 	dep->dadk_revision.value.c[0] = 0;
613 
614 	/* Get capacity */
615 
616 	dep->dadk_capacity.value.ui64 =
617 	    (uint64_t)dadkp->dad_logg.g_cap *
618 	    (uint64_t)dadkp->dad_logg.g_secsiz;
619 }
620 
621 
622 int
623 dadk_close(opaque_t objp)
624 {
625 	struct dadk *dadkp = (struct dadk *)objp;
626 
627 	if (dadkp->dad_rmb) {
628 		(void) dadk_rmb_ioctl(dadkp, DCMD_STOP_MOTOR, 0, 0,
629 		    DADK_SILENT);
630 		(void) dadk_rmb_ioctl(dadkp, DCMD_UNLOCK, 0, 0, DADK_SILENT);
631 	}
632 	FLC_STOP_KSTAT(dadkp->dad_flcobjp);
633 
634 	dadk_destroy_errstats(dadkp);
635 
636 	return (DDI_SUCCESS);
637 }
638 
639 static void
640 dadk_destroy_errstats(struct dadk *dadkp)
641 {
642 	if (!dadkp->dad_errstats)
643 		return;
644 
645 	kstat_delete(dadkp->dad_errstats);
646 	dadkp->dad_errstats = NULL;
647 }
648 
649 
650 int
651 dadk_strategy(opaque_t objp, struct buf *bp)
652 {
653 	struct dadk *dadkp = (struct dadk *)objp;
654 
655 	if (dadkp->dad_rdonly && !(bp->b_flags & B_READ)) {
656 		bioerror(bp, EROFS);
657 		return (DDI_FAILURE);
658 	}
659 
660 	if (bp->b_bcount & (dadkp->DAD_SECSIZ-1)) {
661 		bioerror(bp, ENXIO);
662 		return (DDI_FAILURE);
663 	}
664 
665 	SET_BP_SEC(bp, (LBLK2SEC(GET_BP_SEC(bp), dadkp->dad_blkshf)));
666 	mutex_enter(&dadkp->dad_cmd_mutex);
667 	dadkp->dad_cmd_count++;
668 	mutex_exit(&dadkp->dad_cmd_mutex);
669 	FLC_ENQUE(dadkp->dad_flcobjp, bp);
670 
671 	return (DDI_SUCCESS);
672 }
673 
674 int
675 dadk_dump(opaque_t objp, struct buf *bp)
676 {
677 	struct dadk *dadkp = (struct dadk *)objp;
678 	struct cmpkt *pktp;
679 
680 	if (dadkp->dad_rdonly) {
681 		bioerror(bp, EROFS);
682 		return (DDI_FAILURE);
683 	}
684 
685 	if (bp->b_bcount & (dadkp->DAD_SECSIZ-1)) {
686 		bioerror(bp, ENXIO);
687 		return (DDI_FAILURE);
688 	}
689 
690 	SET_BP_SEC(bp, (LBLK2SEC(GET_BP_SEC(bp), dadkp->dad_blkshf)));
691 
692 	pktp = dadk_pktprep(dadkp, NULL, bp, dadk_polldone, NULL, NULL);
693 	if (!pktp) {
694 		cmn_err(CE_WARN, "no resources for dumping");
695 		bioerror(bp, EIO);
696 		return (DDI_FAILURE);
697 	}
698 	pktp->cp_flags |= CPF_NOINTR;
699 
700 	(void) dadk_ioprep(dadkp, pktp);
701 	dadk_transport(dadkp, bp);
702 	pktp->cp_byteleft -= pktp->cp_bytexfer;
703 
704 	while (geterror(bp) == 0 && pktp->cp_byteleft != 0) {
705 		(void) dadk_iosetup(dadkp, pktp);
706 		dadk_transport(dadkp, bp);
707 		pktp->cp_byteleft -= pktp->cp_bytexfer;
708 	}
709 
710 	if (pktp->cp_private)
711 		BBH_FREEHANDLE(dadkp->dad_bbhobjp, pktp->cp_private);
712 	gda_free(dadkp->dad_ctlobjp, pktp, NULL);
713 	return (DDI_SUCCESS);
714 }
715 
716 /* ARGSUSED  */
717 int
718 dadk_ioctl(opaque_t objp, dev_t dev, int cmd, intptr_t arg, int flag,
719 	cred_t *cred_p, int *rval_p)
720 {
721 	struct dadk *dadkp = (struct dadk *)objp;
722 
723 	switch (cmd) {
724 	case DKIOCGETDEF:
725 		{
726 		struct buf	*bp;
727 		int		err, head;
728 		unsigned char	*secbuf;
729 		STRUCT_DECL(defect_header, adh);
730 
731 		STRUCT_INIT(adh, flag & FMODELS);
732 
733 		/*
734 		 * copyin header ....
735 		 * yields head number and buffer address
736 		 */
737 		if (ddi_copyin((caddr_t)arg, STRUCT_BUF(adh), STRUCT_SIZE(adh),
738 		    flag))
739 			return (EFAULT);
740 		head = STRUCT_FGET(adh, head);
741 		if (head < 0 || head >= dadkp->dad_phyg.g_head)
742 			return (ENXIO);
743 		secbuf = kmem_zalloc(NBPSCTR, KM_SLEEP);
744 		if (!secbuf)
745 			return (ENOMEM);
746 		bp = getrbuf(KM_SLEEP);
747 		if (!bp) {
748 			kmem_free(secbuf, NBPSCTR);
749 			return (ENOMEM);
750 		}
751 
752 		bp->b_edev = dev;
753 		bp->b_dev  = cmpdev(dev);
754 		bp->b_flags = B_BUSY;
755 		bp->b_resid = 0;
756 		bp->b_bcount = NBPSCTR;
757 		bp->b_un.b_addr = (caddr_t)secbuf;
758 		bp->b_blkno = head; /* I had to put it somwhere! */
759 		bp->b_forw = (struct buf *)dadkp;
760 		bp->b_back = (struct buf *)DCMD_GETDEF;
761 
762 		mutex_enter(&dadkp->dad_cmd_mutex);
763 		dadkp->dad_cmd_count++;
764 		mutex_exit(&dadkp->dad_cmd_mutex);
765 		FLC_ENQUE(dadkp->dad_flcobjp, bp);
766 		err = biowait(bp);
767 		if (!err) {
768 			if (ddi_copyout((caddr_t)secbuf,
769 			    STRUCT_FGETP(adh, buffer), NBPSCTR, flag))
770 				err = ENXIO;
771 		}
772 		kmem_free(secbuf, NBPSCTR);
773 		freerbuf(bp);
774 		return (err);
775 		}
776 	case DIOCTL_RWCMD:
777 		{
778 		struct dadkio_rwcmd *rwcmdp;
779 		int status, rw;
780 
781 		/*
782 		 * copied in by cmdk and, if necessary, converted to the
783 		 * correct datamodel
784 		 */
785 		rwcmdp = (struct dadkio_rwcmd *)(intptr_t)arg;
786 
787 		/*
788 		 * handle the complex cases here; we pass these
789 		 * through to the driver, which will queue them and
790 		 * handle the requests asynchronously.  The simpler
791 		 * cases ,which can return immediately, fail here, and
792 		 * the request reverts to the dadk_ioctl routine, while
793 		 *  will reroute them directly to the ata driver.
794 		 */
795 		switch (rwcmdp->cmd) {
796 			case DADKIO_RWCMD_READ :
797 				/*FALLTHROUGH*/
798 			case DADKIO_RWCMD_WRITE:
799 				rw = ((rwcmdp->cmd == DADKIO_RWCMD_WRITE) ?
800 				    B_WRITE : B_READ);
801 				status = dadk_dk_buf_setup(dadkp,
802 				    (opaque_t)rwcmdp, dev, ((flag &FKIOCTL) ?
803 				    UIO_SYSSPACE : UIO_USERSPACE), rw);
804 				return (status);
805 			default:
806 				return (EINVAL);
807 			}
808 		}
809 	case DKIOC_UPDATEFW:
810 
811 		/*
812 		 * Require PRIV_ALL privilege to invoke DKIOC_UPDATEFW
813 		 * to protect the firmware update from malicious use
814 		 */
815 		if (PRIV_POLICY(cred_p, PRIV_ALL, B_FALSE, EPERM, NULL) != 0)
816 			return (EPERM);
817 		else
818 			return (dadk_ctl_ioctl(dadkp, cmd, arg, flag));
819 
820 	case DKIOCFLUSHWRITECACHE:
821 		{
822 			struct buf *bp;
823 			int err = 0;
824 			struct dk_callback *dkc = (struct dk_callback *)arg;
825 			struct cmpkt *pktp;
826 			int is_sync = 1;
827 
828 			mutex_enter(&dadkp->dad_mutex);
829 			if (dadkp->dad_noflush || !  dadkp->dad_wce) {
830 				err = dadkp->dad_noflush ? ENOTSUP : 0;
831 				mutex_exit(&dadkp->dad_mutex);
832 				/*
833 				 * If a callback was requested: a
834 				 * callback will always be done if the
835 				 * caller saw the DKIOCFLUSHWRITECACHE
836 				 * ioctl return 0, and never done if the
837 				 * caller saw the ioctl return an error.
838 				 */
839 				if ((flag & FKIOCTL) && dkc != NULL &&
840 				    dkc->dkc_callback != NULL) {
841 					(*dkc->dkc_callback)(dkc->dkc_cookie,
842 					    err);
843 					/*
844 					 * Did callback and reported error.
845 					 * Since we did a callback, ioctl
846 					 * should return 0.
847 					 */
848 					err = 0;
849 				}
850 				return (err);
851 			}
852 			mutex_exit(&dadkp->dad_mutex);
853 
854 			bp = getrbuf(KM_SLEEP);
855 
856 			bp->b_edev = dev;
857 			bp->b_dev  = cmpdev(dev);
858 			bp->b_flags = B_BUSY;
859 			bp->b_resid = 0;
860 			bp->b_bcount = 0;
861 			SET_BP_SEC(bp, 0);
862 
863 			if ((flag & FKIOCTL) && dkc != NULL &&
864 			    dkc->dkc_callback != NULL) {
865 				struct dk_callback *dkc2 =
866 				    (struct dk_callback *)kmem_zalloc(
867 				    sizeof (struct dk_callback), KM_SLEEP);
868 
869 				bcopy(dkc, dkc2, sizeof (*dkc2));
870 				bp->b_private = dkc2;
871 				bp->b_iodone = dadk_flushdone;
872 				is_sync = 0;
873 			}
874 
875 			/*
876 			 * Setup command pkt
877 			 * dadk_pktprep() can't fail since DDI_DMA_SLEEP set
878 			 */
879 			pktp = dadk_pktprep(dadkp, NULL, bp,
880 			    dadk_iodone, DDI_DMA_SLEEP, NULL);
881 
882 			pktp->cp_time = DADK_FLUSH_CACHE_TIME;
883 
884 			*((char *)(pktp->cp_cdbp)) = DCMD_FLUSH_CACHE;
885 			pktp->cp_byteleft = 0;
886 			pktp->cp_private = NULL;
887 			pktp->cp_secleft = 0;
888 			pktp->cp_srtsec = -1;
889 			pktp->cp_bytexfer = 0;
890 
891 			CTL_IOSETUP(dadkp->dad_ctlobjp, pktp);
892 
893 			mutex_enter(&dadkp->dad_cmd_mutex);
894 			dadkp->dad_cmd_count++;
895 			mutex_exit(&dadkp->dad_cmd_mutex);
896 			FLC_ENQUE(dadkp->dad_flcobjp, bp);
897 
898 			if (is_sync) {
899 				err = biowait(bp);
900 				freerbuf(bp);
901 			}
902 			return (err);
903 		}
904 	default:
905 		if (!dadkp->dad_rmb)
906 			return (dadk_ctl_ioctl(dadkp, cmd, arg, flag));
907 	}
908 
909 	switch (cmd) {
910 	case CDROMSTOP:
911 		return (dadk_rmb_ioctl(dadkp, DCMD_STOP_MOTOR, 0,
912 		    0, DADK_SILENT));
913 	case CDROMSTART:
914 		return (dadk_rmb_ioctl(dadkp, DCMD_START_MOTOR, 0,
915 		    0, DADK_SILENT));
916 	case DKIOCLOCK:
917 		return (dadk_rmb_ioctl(dadkp, DCMD_LOCK, 0, 0, DADK_SILENT));
918 	case DKIOCUNLOCK:
919 		return (dadk_rmb_ioctl(dadkp, DCMD_UNLOCK, 0, 0, DADK_SILENT));
920 	case DKIOCEJECT:
921 	case CDROMEJECT:
922 		{
923 			int ret;
924 
925 			if (ret = dadk_rmb_ioctl(dadkp, DCMD_UNLOCK, 0, 0,
926 			    DADK_SILENT)) {
927 				return (ret);
928 			}
929 			if (ret = dadk_rmb_ioctl(dadkp, DCMD_EJECT, 0, 0,
930 			    DADK_SILENT)) {
931 				return (ret);
932 			}
933 			mutex_enter(&dadkp->dad_mutex);
934 			dadkp->dad_iostate = DKIO_EJECTED;
935 			cv_broadcast(&dadkp->dad_state_cv);
936 			mutex_exit(&dadkp->dad_mutex);
937 
938 			return (0);
939 
940 		}
941 	default:
942 		return (ENOTTY);
943 	/*
944 	 * cdrom audio commands
945 	 */
946 	case CDROMPAUSE:
947 		cmd = DCMD_PAUSE;
948 		break;
949 	case CDROMRESUME:
950 		cmd = DCMD_RESUME;
951 		break;
952 	case CDROMPLAYMSF:
953 		cmd = DCMD_PLAYMSF;
954 		break;
955 	case CDROMPLAYTRKIND:
956 		cmd = DCMD_PLAYTRKIND;
957 		break;
958 	case CDROMREADTOCHDR:
959 		cmd = DCMD_READTOCHDR;
960 		break;
961 	case CDROMREADTOCENTRY:
962 		cmd = DCMD_READTOCENT;
963 		break;
964 	case CDROMVOLCTRL:
965 		cmd = DCMD_VOLCTRL;
966 		break;
967 	case CDROMSUBCHNL:
968 		cmd = DCMD_SUBCHNL;
969 		break;
970 	case CDROMREADMODE2:
971 		cmd = DCMD_READMODE2;
972 		break;
973 	case CDROMREADMODE1:
974 		cmd = DCMD_READMODE1;
975 		break;
976 	case CDROMREADOFFSET:
977 		cmd = DCMD_READOFFSET;
978 		break;
979 	}
980 	return (dadk_rmb_ioctl(dadkp, cmd, arg, flag, 0));
981 }
982 
983 int
984 dadk_flushdone(struct buf *bp)
985 {
986 	struct dk_callback *dkc = bp->b_private;
987 
988 	ASSERT(dkc != NULL && dkc->dkc_callback != NULL);
989 
990 	(*dkc->dkc_callback)(dkc->dkc_cookie, geterror(bp));
991 
992 	kmem_free(dkc, sizeof (*dkc));
993 	freerbuf(bp);
994 	return (0);
995 }
996 
997 int
998 dadk_getphygeom(opaque_t objp, struct tgdk_geom *dkgeom_p)
999 {
1000 	struct dadk *dadkp = (struct dadk *)objp;
1001 
1002 	bcopy((caddr_t)&dadkp->dad_phyg, (caddr_t)dkgeom_p,
1003 	    sizeof (struct tgdk_geom));
1004 	return (DDI_SUCCESS);
1005 }
1006 
1007 int
1008 dadk_getgeom(opaque_t objp, struct tgdk_geom *dkgeom_p)
1009 {
1010 	struct dadk *dadkp = (struct dadk *)objp;
1011 	bcopy((caddr_t)&dadkp->dad_logg, (caddr_t)dkgeom_p,
1012 	    sizeof (struct tgdk_geom));
1013 	return (DDI_SUCCESS);
1014 }
1015 
1016 int
1017 dadk_setgeom(opaque_t objp, struct tgdk_geom *dkgeom_p)
1018 {
1019 	struct dadk *dadkp = (struct dadk *)objp;
1020 
1021 	dadkp->dad_logg.g_cyl = dkgeom_p->g_cyl;
1022 	dadkp->dad_logg.g_head = dkgeom_p->g_head;
1023 	dadkp->dad_logg.g_sec = dkgeom_p->g_sec;
1024 	dadkp->dad_logg.g_cap = dkgeom_p->g_cap;
1025 	return (DDI_SUCCESS);
1026 }
1027 
1028 
1029 tgdk_iob_handle
1030 dadk_iob_alloc(opaque_t objp, daddr_t blkno, ssize_t xfer, int kmsflg)
1031 {
1032 	struct dadk *dadkp = (struct dadk *)objp;
1033 	struct buf *bp;
1034 	struct tgdk_iob *iobp;
1035 	size_t rlen;
1036 
1037 	iobp = kmem_zalloc(sizeof (*iobp), kmsflg);
1038 	if (iobp == NULL)
1039 		return (NULL);
1040 	if ((bp = getrbuf(kmsflg)) == NULL) {
1041 		kmem_free(iobp, sizeof (*iobp));
1042 		return (NULL);
1043 	}
1044 
1045 	iobp->b_psec  = LBLK2SEC(blkno, dadkp->dad_blkshf);
1046 	iobp->b_pbyteoff = (blkno & ((1<<dadkp->dad_blkshf) - 1)) << SCTRSHFT;
1047 	iobp->b_pbytecnt = ((iobp->b_pbyteoff + xfer + dadkp->DAD_SECSIZ - 1)
1048 	    >> dadkp->dad_secshf) << dadkp->dad_secshf;
1049 
1050 	bp->b_un.b_addr = 0;
1051 	/*
1052 	 * use i_ddi_mem_alloc() for now until we have an interface to allocate
1053 	 * memory for DMA which doesn't require a DMA handle. ddi_iopb_alloc()
1054 	 * is obsolete and we want more flexibility in controlling the DMA
1055 	 * address constraints..
1056 	 */
1057 	if (i_ddi_mem_alloc((dadkp->dad_sd)->sd_dev, &dadk_alloc_attr,
1058 	    (size_t)iobp->b_pbytecnt, ((kmsflg == KM_SLEEP) ? 1 : 0), 0, NULL,
1059 	    &bp->b_un.b_addr, &rlen, NULL) != DDI_SUCCESS) {
1060 		freerbuf(bp);
1061 		kmem_free(iobp, sizeof (*iobp));
1062 		return (NULL);
1063 	}
1064 	iobp->b_flag |= IOB_BPALLOC | IOB_BPBUFALLOC;
1065 	iobp->b_bp = bp;
1066 	iobp->b_lblk = blkno;
1067 	iobp->b_xfer = xfer;
1068 	iobp->b_lblk = blkno;
1069 	iobp->b_xfer = xfer;
1070 	return (iobp);
1071 }
1072 
1073 /* ARGSUSED */
1074 int
1075 dadk_iob_free(opaque_t objp, struct tgdk_iob *iobp)
1076 {
1077 	struct buf *bp;
1078 
1079 	if (iobp) {
1080 		if (iobp->b_bp && (iobp->b_flag & IOB_BPALLOC)) {
1081 			bp = iobp->b_bp;
1082 			if (bp->b_un.b_addr && (iobp->b_flag & IOB_BPBUFALLOC))
1083 				i_ddi_mem_free((caddr_t)bp->b_un.b_addr, NULL);
1084 			freerbuf(bp);
1085 		}
1086 		kmem_free(iobp, sizeof (*iobp));
1087 	}
1088 	return (DDI_SUCCESS);
1089 }
1090 
1091 /* ARGSUSED */
1092 caddr_t
1093 dadk_iob_htoc(opaque_t objp, struct tgdk_iob *iobp)
1094 {
1095 	return (iobp->b_bp->b_un.b_addr+iobp->b_pbyteoff);
1096 }
1097 
1098 
1099 caddr_t
1100 dadk_iob_xfer(opaque_t objp, struct tgdk_iob *iobp, int rw)
1101 {
1102 	struct dadk	*dadkp = (struct dadk *)objp;
1103 	struct buf	*bp;
1104 	int		err;
1105 
1106 	bp = iobp->b_bp;
1107 	if (dadkp->dad_rdonly && !(rw & B_READ)) {
1108 		bioerror(bp, EROFS);
1109 		return (NULL);
1110 	}
1111 
1112 	bp->b_flags |= (B_BUSY | rw);
1113 	bp->b_bcount = iobp->b_pbytecnt;
1114 	SET_BP_SEC(bp, iobp->b_psec);
1115 	bp->av_back = (struct buf *)0;
1116 	bp->b_resid = 0;
1117 
1118 	/* call flow control */
1119 	mutex_enter(&dadkp->dad_cmd_mutex);
1120 	dadkp->dad_cmd_count++;
1121 	mutex_exit(&dadkp->dad_cmd_mutex);
1122 	FLC_ENQUE(dadkp->dad_flcobjp, bp);
1123 	err = biowait(bp);
1124 
1125 	bp->b_bcount = iobp->b_xfer;
1126 	bp->b_flags &= ~(B_DONE|B_BUSY);
1127 
1128 	if (err)
1129 		return (NULL);
1130 
1131 	return (bp->b_un.b_addr+iobp->b_pbyteoff);
1132 }
1133 
1134 static void
1135 dadk_transport(opaque_t com_data, struct buf *bp)
1136 {
1137 	struct dadk *dadkp = (struct dadk *)com_data;
1138 
1139 	if (CTL_TRANSPORT(dadkp->dad_ctlobjp, GDA_BP_PKT(bp)) ==
1140 	    CTL_SEND_SUCCESS)
1141 		return;
1142 	dadk_restart((void*)GDA_BP_PKT(bp));
1143 }
1144 
1145 static int
1146 dadk_pkt(opaque_t com_data, struct buf *bp, int (*func)(caddr_t), caddr_t arg)
1147 {
1148 	struct cmpkt *pktp;
1149 	struct dadk *dadkp = (struct dadk *)com_data;
1150 
1151 	if (GDA_BP_PKT(bp))
1152 		return (DDI_SUCCESS);
1153 
1154 	pktp = dadk_pktprep(dadkp, NULL, bp, dadk_iodone, func, arg);
1155 	if (!pktp)
1156 		return (DDI_FAILURE);
1157 
1158 	return (dadk_ioprep(dadkp, pktp));
1159 }
1160 
1161 /*
1162  * Read, Write preparation
1163  */
1164 static int
1165 dadk_ioprep(struct dadk *dadkp, struct cmpkt *pktp)
1166 {
1167 	struct buf *bp;
1168 
1169 	bp = pktp->cp_bp;
1170 	if (bp->b_forw == (struct buf *)dadkp)
1171 		*((char *)(pktp->cp_cdbp)) = (char)(intptr_t)bp->b_back;
1172 
1173 	else if (bp->b_flags & B_READ)
1174 		*((char *)(pktp->cp_cdbp)) = DCMD_READ;
1175 	else
1176 		*((char *)(pktp->cp_cdbp)) = DCMD_WRITE;
1177 	pktp->cp_byteleft = bp->b_bcount;
1178 
1179 	/* setup the bad block list handle */
1180 	pktp->cp_private = BBH_GETHANDLE(dadkp->dad_bbhobjp, bp);
1181 	return (dadk_iosetup(dadkp, pktp));
1182 }
1183 
1184 static int
1185 dadk_iosetup(struct dadk *dadkp, struct cmpkt *pktp)
1186 {
1187 	struct buf	*bp;
1188 	bbh_cookie_t	bbhckp;
1189 	int		seccnt;
1190 
1191 	seccnt = pktp->cp_bytexfer >> dadkp->dad_secshf;
1192 	pktp->cp_secleft -= seccnt;
1193 
1194 	if (pktp->cp_secleft) {
1195 		pktp->cp_srtsec += seccnt;
1196 	} else {
1197 		/* get the first cookie from the bad block list */
1198 		if (!pktp->cp_private) {
1199 			bp = pktp->cp_bp;
1200 			pktp->cp_srtsec  = GET_BP_SEC(bp);
1201 			pktp->cp_secleft = (bp->b_bcount >> dadkp->dad_secshf);
1202 		} else {
1203 			bbhckp = BBH_HTOC(dadkp->dad_bbhobjp,
1204 			    pktp->cp_private);
1205 			pktp->cp_srtsec = BBH_GETCK_SECTOR(dadkp->dad_bbhobjp,
1206 			    bbhckp);
1207 			pktp->cp_secleft = BBH_GETCK_SECLEN(dadkp->dad_bbhobjp,
1208 			    bbhckp);
1209 		}
1210 	}
1211 
1212 	pktp->cp_bytexfer = pktp->cp_secleft << dadkp->dad_secshf;
1213 
1214 	if (CTL_IOSETUP(dadkp->dad_ctlobjp, pktp)) {
1215 		return (DDI_SUCCESS);
1216 	} else {
1217 		return (DDI_FAILURE);
1218 	}
1219 
1220 
1221 
1222 
1223 }
1224 
1225 static struct cmpkt *
1226 dadk_pktprep(struct dadk *dadkp, struct cmpkt *in_pktp, struct buf *bp,
1227     void (*cb_func)(struct buf *), int (*func)(caddr_t), caddr_t arg)
1228 {
1229 	struct cmpkt *pktp;
1230 
1231 	pktp = gda_pktprep(dadkp->dad_ctlobjp, in_pktp, (opaque_t)bp, func,
1232 	    arg);
1233 
1234 	if (pktp) {
1235 		pktp->cp_callback = dadk_pktcb;
1236 		pktp->cp_time = DADK_IO_TIME;
1237 		pktp->cp_flags = 0;
1238 		pktp->cp_iodone = cb_func;
1239 		pktp->cp_dev_private = (opaque_t)dadkp;
1240 
1241 	}
1242 
1243 	return (pktp);
1244 }
1245 
1246 
1247 static void
1248 dadk_restart(void *vpktp)
1249 {
1250 	struct cmpkt *pktp = (struct cmpkt *)vpktp;
1251 
1252 	if (dadk_ioretry(pktp, QUE_COMMAND) == JUST_RETURN)
1253 		return;
1254 	pktp->cp_iodone(pktp->cp_bp);
1255 }
1256 
1257 static int
1258 dadk_ioretry(struct cmpkt *pktp, int action)
1259 {
1260 	struct buf *bp;
1261 	struct dadk *dadkp = PKT2DADK(pktp);
1262 
1263 	switch (action) {
1264 	case QUE_COMMAND:
1265 		if (pktp->cp_retry++ < DADK_RETRY_COUNT) {
1266 			CTL_IOSETUP(dadkp->dad_ctlobjp, pktp);
1267 			if (CTL_TRANSPORT(dadkp->dad_ctlobjp, pktp) ==
1268 			    CTL_SEND_SUCCESS) {
1269 				return (JUST_RETURN);
1270 			}
1271 			gda_log(dadkp->dad_sd->sd_dev, dadk_name,
1272 			    CE_WARN, "transport of command fails\n");
1273 		} else
1274 			gda_log(dadkp->dad_sd->sd_dev,
1275 			    dadk_name, CE_WARN,
1276 			    "exceeds maximum number of retries\n");
1277 		bioerror(pktp->cp_bp, ENXIO);
1278 		/*FALLTHROUGH*/
1279 	case COMMAND_DONE_ERROR:
1280 		bp = pktp->cp_bp;
1281 		bp->b_resid += pktp->cp_byteleft - pktp->cp_bytexfer +
1282 		    pktp->cp_resid;
1283 		if (geterror(bp) == 0) {
1284 			if ((*((char *)(pktp->cp_cdbp)) == DCMD_FLUSH_CACHE) &&
1285 			    (pktp->cp_dev_private == (opaque_t)dadkp) &&
1286 			    ((int)(*(char *)pktp->cp_scbp) == DERR_ABORT)) {
1287 				/*
1288 				 * Flag "unimplemented" responses for
1289 				 * DCMD_FLUSH_CACHE as ENOTSUP
1290 				 */
1291 				bioerror(bp, ENOTSUP);
1292 				mutex_enter(&dadkp->dad_mutex);
1293 				dadkp->dad_noflush = 1;
1294 				mutex_exit(&dadkp->dad_mutex);
1295 			} else {
1296 				bioerror(bp, EIO);
1297 			}
1298 		}
1299 		/*FALLTHROUGH*/
1300 	case COMMAND_DONE:
1301 	default:
1302 		return (COMMAND_DONE);
1303 	}
1304 }
1305 
1306 
1307 static void
1308 dadk_pktcb(struct cmpkt *pktp)
1309 {
1310 	int action;
1311 	struct dadkio_rwcmd *rwcmdp;
1312 
1313 	rwcmdp = (struct dadkio_rwcmd *)pktp->cp_passthru;  /* ioctl packet */
1314 
1315 	if (pktp->cp_reason == CPS_SUCCESS) {
1316 		if (rwcmdp && (rwcmdp != (opaque_t)DADK_SILENT))
1317 			rwcmdp->status.status = DADKIO_STAT_NO_ERROR;
1318 		pktp->cp_iodone(pktp->cp_bp);
1319 		return;
1320 	}
1321 
1322 	if (rwcmdp && (rwcmdp != (opaque_t)DADK_SILENT)) {
1323 		if (pktp->cp_reason == CPS_CHKERR)
1324 			dadk_recorderr(pktp, rwcmdp);
1325 		dadk_iodone(pktp->cp_bp);
1326 		return;
1327 	}
1328 
1329 	if (pktp->cp_reason == CPS_CHKERR)
1330 		action = dadk_chkerr(pktp);
1331 	else
1332 		action = COMMAND_DONE_ERROR;
1333 
1334 	if (action == JUST_RETURN)
1335 		return;
1336 
1337 	/*
1338 	 * If we are panicking don't retry the command
1339 	 * just fail it so we can go down completing all
1340 	 * of the buffers.
1341 	 */
1342 	if (ddi_in_panic() && action == QUE_COMMAND)
1343 		action = COMMAND_DONE_ERROR;
1344 
1345 	if (action != COMMAND_DONE) {
1346 		if ((dadk_ioretry(pktp, action)) == JUST_RETURN)
1347 			return;
1348 	}
1349 	pktp->cp_iodone(pktp->cp_bp);
1350 }
1351 
1352 
1353 
1354 static struct dadkio_derr dadk_errtab[] = {
1355 	{COMMAND_DONE, GDA_INFORMATIONAL},	/*  0 DERR_SUCCESS	*/
1356 	{QUE_COMMAND, GDA_FATAL},		/*  1 DERR_AMNF		*/
1357 	{QUE_COMMAND, GDA_FATAL},		/*  2 DERR_TKONF	*/
1358 	{COMMAND_DONE_ERROR, GDA_INFORMATIONAL}, /* 3 DERR_ABORT	*/
1359 	{QUE_COMMAND, GDA_RETRYABLE},		/*  4 DERR_DWF		*/
1360 	{QUE_COMMAND, GDA_FATAL},		/*  5 DERR_IDNF		*/
1361 	{JUST_RETURN, GDA_INFORMATIONAL},	/*  6 DERR_BUSY		*/
1362 	{QUE_COMMAND, GDA_FATAL},		/*  7 DERR_UNC		*/
1363 	{QUE_COMMAND, GDA_RETRYABLE},		/*  8 DERR_BBK		*/
1364 	{COMMAND_DONE_ERROR, GDA_FATAL},	/*  9 DERR_INVCDB	*/
1365 	{COMMAND_DONE_ERROR, GDA_FATAL},	/* 10 DERR_HARD		*/
1366 	{COMMAND_DONE_ERROR, GDA_FATAL},	/* 11 DERR_ILI		*/
1367 	{COMMAND_DONE_ERROR, GDA_FATAL},	/* 12 DERR_EOM		*/
1368 	{COMMAND_DONE, GDA_INFORMATIONAL},	/* 13 DERR_MCR		*/
1369 	{COMMAND_DONE, GDA_INFORMATIONAL},	/* 14 DERR_RECOVER	*/
1370 	{COMMAND_DONE_ERROR, GDA_FATAL},	/* 15 DERR_NOTREADY	*/
1371 	{QUE_COMMAND, GDA_RETRYABLE},		/* 16 DERR_MEDIUM	*/
1372 	{COMMAND_DONE_ERROR, GDA_FATAL},	/* 17 DERR_HW		*/
1373 	{COMMAND_DONE, GDA_FATAL},		/* 18 DERR_ILL		*/
1374 	{COMMAND_DONE, GDA_FATAL},		/* 19 DERR_UNIT_ATTN	*/
1375 	{COMMAND_DONE_ERROR, GDA_FATAL},	/* 20 DERR_DATA_PROT	*/
1376 	{COMMAND_DONE_ERROR, GDA_FATAL},	/* 21 DERR_MISCOMPARE	*/
1377 	{QUE_COMMAND, GDA_RETRYABLE},		/* 22 DERR_ICRC		*/
1378 	{COMMAND_DONE_ERROR, GDA_FATAL},	/* 23 DERR_RESV		*/
1379 };
1380 
1381 static int
1382 dadk_chkerr(struct cmpkt *pktp)
1383 {
1384 	daddr_t err_blkno;
1385 	struct dadk *dadkp = PKT2DADK(pktp);
1386 	dadk_errstats_t *dep;
1387 	int scb = *(char *)pktp->cp_scbp;
1388 
1389 	if (scb == DERR_SUCCESS) {
1390 		if (pktp->cp_retry != 0 && dadkp->dad_errstats != NULL) {
1391 			dep = (dadk_errstats_t *)
1392 			    dadkp->dad_errstats->ks_data;
1393 			dep->dadk_rq_recov_err.value.ui32++;
1394 		}
1395 		return (COMMAND_DONE);
1396 	}
1397 
1398 	if (pktp->cp_retry) {
1399 		err_blkno = pktp->cp_srtsec + ((pktp->cp_bytexfer -
1400 		    pktp->cp_resid) >> dadkp->dad_secshf);
1401 	} else
1402 		err_blkno = -1;
1403 
1404 	if (dadkp->dad_errstats != NULL) {
1405 		dep = (dadk_errstats_t *)dadkp->dad_errstats->ks_data;
1406 
1407 		switch (dadk_errtab[scb].d_severity) {
1408 			case GDA_RETRYABLE:
1409 				dep->dadk_softerrs.value.ui32++;
1410 				break;
1411 
1412 			case GDA_FATAL:
1413 				dep->dadk_harderrs.value.ui32++;
1414 				break;
1415 
1416 			default:
1417 				break;
1418 		}
1419 
1420 		switch (scb) {
1421 			case DERR_INVCDB:
1422 			case DERR_ILI:
1423 			case DERR_EOM:
1424 			case DERR_HW:
1425 			case DERR_ICRC:
1426 				dep->dadk_transerrs.value.ui32++;
1427 				break;
1428 
1429 			case DERR_AMNF:
1430 			case DERR_TKONF:
1431 			case DERR_DWF:
1432 			case DERR_BBK:
1433 			case DERR_UNC:
1434 			case DERR_HARD:
1435 			case DERR_MEDIUM:
1436 			case DERR_DATA_PROT:
1437 			case DERR_MISCOMP:
1438 				dep->dadk_rq_media_err.value.ui32++;
1439 				break;
1440 
1441 			case DERR_NOTREADY:
1442 				dep->dadk_rq_ntrdy_err.value.ui32++;
1443 				break;
1444 
1445 			case DERR_IDNF:
1446 			case DERR_UNIT_ATTN:
1447 				dep->dadk_rq_nodev_err.value.ui32++;
1448 				break;
1449 
1450 			case DERR_ILL:
1451 			case DERR_RESV:
1452 				dep->dadk_rq_illrq_err.value.ui32++;
1453 				break;
1454 
1455 			default:
1456 				break;
1457 		}
1458 	}
1459 
1460 	/* if attempting to read a sector from a cdrom audio disk */
1461 	if ((dadkp->dad_cdrom) &&
1462 	    (*((char *)(pktp->cp_cdbp)) == DCMD_READ) &&
1463 	    (scb == DERR_ILL)) {
1464 		return (COMMAND_DONE);
1465 	}
1466 	if (pktp->cp_passthru == NULL) {
1467 		gda_errmsg(dadkp->dad_sd, pktp, dadk_name,
1468 		    dadk_errtab[scb].d_severity, pktp->cp_srtsec,
1469 		    err_blkno, dadk_cmds, dadk_sense);
1470 	}
1471 
1472 	if (scb == DERR_BUSY) {
1473 		(void) timeout(dadk_restart, (void *)pktp, DADK_BSY_TIMEOUT);
1474 	}
1475 
1476 	return (dadk_errtab[scb].d_action);
1477 }
1478 
1479 static void
1480 dadk_recorderr(struct cmpkt *pktp, struct dadkio_rwcmd *rwcmdp)
1481 {
1482 	struct dadk *dadkp;
1483 	int scb;
1484 
1485 	dadkp = PKT2DADK(pktp);
1486 	scb = (int)(*(char *)pktp->cp_scbp);
1487 
1488 
1489 	rwcmdp->status.failed_blk = rwcmdp->blkaddr +
1490 	    ((pktp->cp_bytexfer - pktp->cp_resid) >> dadkp->dad_secshf);
1491 
1492 	rwcmdp->status.resid = pktp->cp_bp->b_resid +
1493 	    pktp->cp_byteleft - pktp->cp_bytexfer + pktp->cp_resid;
1494 	switch ((int)(* (char *)pktp->cp_scbp)) {
1495 	case DERR_AMNF:
1496 	case DERR_ABORT:
1497 		rwcmdp->status.status = DADKIO_STAT_ILLEGAL_REQUEST;
1498 		break;
1499 	case DERR_DWF:
1500 	case DERR_IDNF:
1501 		rwcmdp->status.status = DADKIO_STAT_ILLEGAL_ADDRESS;
1502 		break;
1503 	case DERR_TKONF:
1504 	case DERR_UNC:
1505 	case DERR_BBK:
1506 		rwcmdp->status.status = DADKIO_STAT_MEDIUM_ERROR;
1507 		rwcmdp->status.failed_blk_is_valid = 1;
1508 		rwcmdp->status.resid = 0;
1509 		break;
1510 	case DERR_BUSY:
1511 		rwcmdp->status.status = DADKIO_STAT_NOT_READY;
1512 		break;
1513 	case DERR_INVCDB:
1514 	case DERR_HARD:
1515 		rwcmdp->status.status = DADKIO_STAT_HARDWARE_ERROR;
1516 		break;
1517 	case DERR_ICRC:
1518 	default:
1519 		rwcmdp->status.status = DADKIO_STAT_NOT_SUPPORTED;
1520 	}
1521 
1522 	if (rwcmdp->flags & DADKIO_FLAG_SILENT)
1523 		return;
1524 	gda_errmsg(dadkp->dad_sd, pktp, dadk_name, dadk_errtab[scb].d_severity,
1525 	    rwcmdp->blkaddr, rwcmdp->status.failed_blk,
1526 	    dadk_cmds, dadk_sense);
1527 }
1528 
1529 /*ARGSUSED*/
1530 static void
1531 dadk_polldone(struct buf *bp)
1532 {
1533 	struct cmpkt *pktp;
1534 	struct dadk *dadkp;
1535 
1536 	pktp  = GDA_BP_PKT(bp);
1537 	dadkp = PKT2DADK(pktp);
1538 	mutex_enter(&dadkp->dad_cmd_mutex);
1539 	dadkp->dad_cmd_count--;
1540 	mutex_exit(&dadkp->dad_cmd_mutex);
1541 }
1542 
1543 static void
1544 dadk_iodone(struct buf *bp)
1545 {
1546 	struct cmpkt *pktp;
1547 	struct dadk *dadkp;
1548 
1549 	pktp  = GDA_BP_PKT(bp);
1550 	dadkp = PKT2DADK(pktp);
1551 
1552 	/* check for all iodone */
1553 	pktp->cp_byteleft -= pktp->cp_bytexfer;
1554 	if (geterror(bp) == 0 && pktp->cp_byteleft != 0) {
1555 		pktp->cp_retry = 0;
1556 		(void) dadk_iosetup(dadkp, pktp);
1557 
1558 
1559 	/* 	transport the next one */
1560 		if (CTL_TRANSPORT(dadkp->dad_ctlobjp, pktp) == CTL_SEND_SUCCESS)
1561 			return;
1562 		if ((dadk_ioretry(pktp, QUE_COMMAND)) == JUST_RETURN)
1563 			return;
1564 	}
1565 
1566 	/* start next one */
1567 	FLC_DEQUE(dadkp->dad_flcobjp, bp);
1568 
1569 	/* free pkt */
1570 	if (pktp->cp_private)
1571 		BBH_FREEHANDLE(dadkp->dad_bbhobjp, pktp->cp_private);
1572 	gda_free(dadkp->dad_ctlobjp, pktp, NULL);
1573 	mutex_enter(&dadkp->dad_cmd_mutex);
1574 	dadkp->dad_cmd_count--;
1575 	mutex_exit(&dadkp->dad_cmd_mutex);
1576 	biodone(bp);
1577 }
1578 
1579 int
1580 dadk_check_media(opaque_t objp, int *state)
1581 {
1582 	struct dadk *dadkp = (struct dadk *)objp;
1583 
1584 	if (!dadkp->dad_rmb) {
1585 		return (ENXIO);
1586 	}
1587 #ifdef DADK_DEBUG
1588 	if (dadk_debug & DSTATE)
1589 		PRF("dadk_check_media: user state %x disk state %x\n",
1590 		    *state, dadkp->dad_iostate);
1591 #endif
1592 	/*
1593 	 * If state already changed just return
1594 	 */
1595 	if (*state != dadkp->dad_iostate) {
1596 		*state = dadkp->dad_iostate;
1597 		return (0);
1598 	}
1599 
1600 	/*
1601 	 * Startup polling on thread state
1602 	 */
1603 	mutex_enter(&dadkp->dad_mutex);
1604 	if (dadkp->dad_thread_cnt == 0) {
1605 		/*
1606 		 * One thread per removable dadk device
1607 		 */
1608 		(void) thread_create(NULL, 0, dadk_watch_thread, dadkp, 0, &p0,
1609 		    TS_RUN, v.v_maxsyspri - 2);
1610 	}
1611 	dadkp->dad_thread_cnt++;
1612 
1613 	/*
1614 	 * Wait for state to change
1615 	 */
1616 	do {
1617 		if (cv_wait_sig(&dadkp->dad_state_cv, &dadkp->dad_mutex) == 0) {
1618 			dadkp->dad_thread_cnt--;
1619 			mutex_exit(&dadkp->dad_mutex);
1620 			return (EINTR);
1621 		}
1622 	} while (*state == dadkp->dad_iostate);
1623 	*state = dadkp->dad_iostate;
1624 	dadkp->dad_thread_cnt--;
1625 	mutex_exit(&dadkp->dad_mutex);
1626 	return (0);
1627 }
1628 
1629 
1630 #define	MEDIA_ACCESS_DELAY 2000000
1631 
1632 static void
1633 dadk_watch_thread(struct dadk *dadkp)
1634 {
1635 	enum dkio_state state;
1636 	int interval;
1637 
1638 	interval = drv_usectohz(dadk_check_media_time);
1639 
1640 	do {
1641 		if (dadk_rmb_ioctl(dadkp, DCMD_GET_STATE, (intptr_t)&state, 0,
1642 		    DADK_SILENT)) {
1643 			/*
1644 			 * Assume state remained the same
1645 			 */
1646 			state = dadkp->dad_iostate;
1647 		}
1648 
1649 		/*
1650 		 * now signal the waiting thread if this is *not* the
1651 		 * specified state;
1652 		 * delay the signal if the state is DKIO_INSERTED
1653 		 * to allow the target to recover
1654 		 */
1655 		if (state != dadkp->dad_iostate) {
1656 
1657 			dadkp->dad_iostate = state;
1658 			if (state == DKIO_INSERTED) {
1659 				/*
1660 				 * delay the signal to give the drive a chance
1661 				 * to do what it apparently needs to do
1662 				 */
1663 				(void) timeout((void(*)(void *))cv_broadcast,
1664 				    (void *)&dadkp->dad_state_cv,
1665 				    drv_usectohz((clock_t)MEDIA_ACCESS_DELAY));
1666 			} else {
1667 				cv_broadcast(&dadkp->dad_state_cv);
1668 			}
1669 		}
1670 		delay(interval);
1671 	} while (dadkp->dad_thread_cnt);
1672 }
1673 
1674 int
1675 dadk_inquiry(opaque_t objp, opaque_t *inqpp)
1676 {
1677 	struct dadk *dadkp = (struct dadk *)objp;
1678 	struct scsi_inquiry **sinqpp = (struct scsi_inquiry **)inqpp;
1679 
1680 	if (dadkp && dadkp->dad_sd && dadkp->dad_sd->sd_inq) {
1681 		*sinqpp = dadkp->dad_sd->sd_inq;
1682 		return (DDI_SUCCESS);
1683 	}
1684 
1685 	return (DDI_FAILURE);
1686 }
1687 
1688 static int
1689 dadk_rmb_ioctl(struct dadk *dadkp, int cmd, intptr_t arg, int flags, int silent)
1690 
1691 {
1692 	struct buf *bp;
1693 	int err;
1694 	struct cmpkt *pktp;
1695 
1696 	if ((bp = getrbuf(KM_SLEEP)) == NULL) {
1697 		return (ENOMEM);
1698 	}
1699 	pktp = dadk_pktprep(dadkp, NULL, bp, dadk_rmb_iodone, NULL, NULL);
1700 	if (!pktp) {
1701 		freerbuf(bp);
1702 		return (ENOMEM);
1703 	}
1704 	bp->b_back  = (struct buf *)arg;
1705 	bp->b_forw  = (struct buf *)dadkp->dad_flcobjp;
1706 	pktp->cp_passthru = (opaque_t)(intptr_t)silent;
1707 
1708 	err = dadk_ctl_ioctl(dadkp, cmd, (uintptr_t)pktp, flags);
1709 	freerbuf(bp);
1710 	gda_free(dadkp->dad_ctlobjp, pktp, NULL);
1711 	return (err);
1712 
1713 
1714 }
1715 
1716 static void
1717 dadk_rmb_iodone(struct buf *bp)
1718 {
1719 	struct cmpkt *pktp;
1720 	struct dadk *dadkp;
1721 
1722 	pktp  = GDA_BP_PKT(bp);
1723 	dadkp = PKT2DADK(pktp);
1724 
1725 	bp->b_flags &= ~(B_DONE|B_BUSY);
1726 
1727 	/* Start next one */
1728 	FLC_DEQUE(dadkp->dad_flcobjp, bp);
1729 
1730 	mutex_enter(&dadkp->dad_cmd_mutex);
1731 	dadkp->dad_cmd_count--;
1732 	mutex_exit(&dadkp->dad_cmd_mutex);
1733 	biodone(bp);
1734 }
1735 
1736 static int
1737 dadk_dk_buf_setup(struct dadk *dadkp, opaque_t *cmdp, dev_t dev,
1738 	enum uio_seg dataspace, int rw)
1739 {
1740 	struct dadkio_rwcmd *rwcmdp = (struct dadkio_rwcmd *)cmdp;
1741 	struct buf	*bp;
1742 	struct iovec	aiov;
1743 	struct uio	auio;
1744 	struct uio	*uio = &auio;
1745 	int		status;
1746 
1747 	bp = getrbuf(KM_SLEEP);
1748 
1749 	bp->av_forw = bp->b_forw = (struct buf *)dadkp;
1750 	bp->b_back  = (struct buf *)rwcmdp;	/* ioctl packet */
1751 
1752 	bzero((caddr_t)&auio, sizeof (struct uio));
1753 	bzero((caddr_t)&aiov, sizeof (struct iovec));
1754 	aiov.iov_base = rwcmdp->bufaddr;
1755 	aiov.iov_len = rwcmdp->buflen;
1756 	uio->uio_iov = &aiov;
1757 
1758 	uio->uio_iovcnt = 1;
1759 	uio->uio_resid = rwcmdp->buflen;
1760 	uio->uio_segflg = dataspace;
1761 
1762 	/* Let physio do the rest... */
1763 	status = physio(dadk_dk_strategy, bp, dev, rw, dadkmin, uio);
1764 
1765 	freerbuf(bp);
1766 	return (status);
1767 
1768 }
1769 
1770 /* Do not let a user gendisk request get too big or */
1771 /* else we could use to many resources.		    */
1772 
1773 static void
1774 dadkmin(struct buf *bp)
1775 {
1776 	if (bp->b_bcount > dadk_dk_maxphys)
1777 		bp->b_bcount = dadk_dk_maxphys;
1778 }
1779 
1780 static int
1781 dadk_dk_strategy(struct buf *bp)
1782 {
1783 	dadk_dk((struct dadk *)bp->av_forw, (struct dadkio_rwcmd *)bp->b_back,
1784 	    bp);
1785 	return (0);
1786 }
1787 
1788 static void
1789 dadk_dk(struct dadk *dadkp, struct dadkio_rwcmd *rwcmdp, struct buf *bp)
1790 {
1791 	struct  cmpkt *pktp;
1792 
1793 	pktp = dadk_pktprep(dadkp, NULL, bp, dadk_iodone, NULL, NULL);
1794 	if (!pktp) {
1795 		bioerror(bp, ENOMEM);
1796 		biodone(bp);
1797 		return;
1798 	}
1799 
1800 	pktp->cp_passthru = rwcmdp;
1801 
1802 	(void) dadk_ioprep(dadkp, pktp);
1803 
1804 	mutex_enter(&dadkp->dad_cmd_mutex);
1805 	dadkp->dad_cmd_count++;
1806 	mutex_exit(&dadkp->dad_cmd_mutex);
1807 	FLC_ENQUE(dadkp->dad_flcobjp, bp);
1808 }
1809 
1810 /*
1811  * There is no existing way to notify cmdk module
1812  * when the command completed, so add this function
1813  * to calculate how many on-going commands.
1814  */
1815 int
1816 dadk_getcmds(opaque_t objp)
1817 {
1818 	struct dadk *dadkp = (struct dadk *)objp;
1819 	int count;
1820 
1821 	mutex_enter(&dadkp->dad_cmd_mutex);
1822 	count = dadkp->dad_cmd_count;
1823 	mutex_exit(&dadkp->dad_cmd_mutex);
1824 	return (count);
1825 }
1826 
1827 /*
1828  * this function was used to calc the cmd for CTL_IOCTL
1829  */
1830 static int
1831 dadk_ctl_ioctl(struct dadk *dadkp, uint32_t cmd, uintptr_t arg, int flag)
1832 {
1833 	int error;
1834 	mutex_enter(&dadkp->dad_cmd_mutex);
1835 	dadkp->dad_cmd_count++;
1836 	mutex_exit(&dadkp->dad_cmd_mutex);
1837 	error = CTL_IOCTL(dadkp->dad_ctlobjp, cmd, arg, flag);
1838 	mutex_enter(&dadkp->dad_cmd_mutex);
1839 	dadkp->dad_cmd_count--;
1840 	mutex_exit(&dadkp->dad_cmd_mutex);
1841 	return (error);
1842 }
1843