xref: /titanic_44/usr/src/uts/intel/io/dktp/dcdev/dadk.c (revision e4b86885570d77af552e9cf94f142f4d744fb8c8)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 /*
30  * Direct Attached Disk
31  */
32 
33 #include <sys/file.h>
34 #include <sys/scsi/scsi.h>
35 #include <sys/var.h>
36 #include <sys/proc.h>
37 #include <sys/dktp/cm.h>
38 #include <sys/vtoc.h>
39 #include <sys/dkio.h>
40 #include <sys/policy.h>
41 #include <sys/priv.h>
42 
43 #include <sys/dktp/dadev.h>
44 #include <sys/dktp/fctypes.h>
45 #include <sys/dktp/flowctrl.h>
46 #include <sys/dktp/tgcom.h>
47 #include <sys/dktp/tgdk.h>
48 #include <sys/dktp/bbh.h>
49 #include <sys/dktp/dadkio.h>
50 #include <sys/dktp/dadk.h>
51 #include <sys/cdio.h>
52 
53 /*
54  * Local Function Prototypes
55  */
56 static void dadk_restart(void *pktp);
57 static void dadk_pktcb(struct cmpkt *pktp);
58 static void dadk_iodone(struct buf *bp);
59 static void dadk_polldone(struct buf *bp);
60 static void dadk_setcap(struct dadk *dadkp);
61 static void dadk_create_errstats(struct dadk *dadkp, int instance);
62 static void dadk_destroy_errstats(struct dadk *dadkp);
63 
64 static int dadk_chkerr(struct cmpkt *pktp);
65 static int dadk_ioprep(struct dadk *dadkp, struct cmpkt *pktp);
66 static int dadk_iosetup(struct dadk *dadkp, struct cmpkt *pktp);
67 static int dadk_ioretry(struct cmpkt *pktp, int action);
68 
69 static struct cmpkt *dadk_pktprep(struct dadk *dadkp, struct cmpkt *in_pktp,
70     struct buf *bp, void (*cb_func)(struct buf *), int (*func)(caddr_t),
71     caddr_t arg);
72 
73 static int  dadk_pkt(opaque_t com_data, struct buf *bp, int (*func)(caddr_t),
74     caddr_t arg);
75 static void dadk_transport(opaque_t com_data, struct buf *bp);
76 static int dadk_ctl_ioctl(struct dadk *, uint32_t, uintptr_t, int);
77 
78 struct tgcom_objops dadk_com_ops = {
79 	nodev,
80 	nodev,
81 	dadk_pkt,
82 	dadk_transport,
83 	0, 0
84 };
85 
86 /*
87  * architecture dependent allocation restrictions for dadk_iob_alloc(). For
88  * x86, we'll set dma_attr_addr_hi to dadk_max_phys_addr and dma_attr_sgllen
89  * to dadk_sgl_size during _init().
90  */
91 #if defined(__sparc)
92 static ddi_dma_attr_t dadk_alloc_attr = {
93 	DMA_ATTR_V0,	/* version number */
94 	0x0,		/* lowest usable address */
95 	0xFFFFFFFFull,	/* high DMA address range */
96 	0xFFFFFFFFull,	/* DMA counter register */
97 	1,		/* DMA address alignment */
98 	1,		/* DMA burstsizes */
99 	1,		/* min effective DMA size */
100 	0xFFFFFFFFull,	/* max DMA xfer size */
101 	0xFFFFFFFFull,	/* segment boundary */
102 	1,		/* s/g list length */
103 	512,		/* granularity of device */
104 	0,		/* DMA transfer flags */
105 };
106 #elif defined(__x86)
107 static ddi_dma_attr_t dadk_alloc_attr = {
108 	DMA_ATTR_V0,	/* version number */
109 	0x0,		/* lowest usable address */
110 	0x0,		/* high DMA address range [set in _init()] */
111 	0xFFFFull,	/* DMA counter register */
112 	512,		/* DMA address alignment */
113 	1,		/* DMA burstsizes */
114 	1,		/* min effective DMA size */
115 	0xFFFFFFFFull,	/* max DMA xfer size */
116 	0xFFFFFFFFull,	/* segment boundary */
117 	0,		/* s/g list length [set in _init()] */
118 	512,		/* granularity of device */
119 	0,		/* DMA transfer flags */
120 };
121 
122 uint64_t dadk_max_phys_addr = 0xFFFFFFFFull;
123 int dadk_sgl_size = 0xFF;
124 #endif
125 
126 static int dadk_rmb_ioctl(struct dadk *dadkp, int cmd, intptr_t arg, int flags,
127     int silent);
128 static void dadk_rmb_iodone(struct buf *bp);
129 
130 static int dadk_dk_buf_setup(struct dadk *dadkp, opaque_t *cmdp,
131     dev_t dev, enum uio_seg dataspace, int rw);
132 static void dadk_dk(struct dadk *dadkp, struct dadkio_rwcmd *scmdp,
133     struct buf *bp);
134 static void dadkmin(struct buf *bp);
135 static int dadk_dk_strategy(struct buf *bp);
136 static void dadk_recorderr(struct cmpkt *pktp, struct dadkio_rwcmd *rwcmdp);
137 
138 struct tgdk_objops dadk_ops = {
139 	dadk_init,
140 	dadk_free,
141 	dadk_probe,
142 	dadk_attach,
143 	dadk_open,
144 	dadk_close,
145 	dadk_ioctl,
146 	dadk_strategy,
147 	dadk_setgeom,
148 	dadk_getgeom,
149 	dadk_iob_alloc,
150 	dadk_iob_free,
151 	dadk_iob_htoc,
152 	dadk_iob_xfer,
153 	dadk_dump,
154 	dadk_getphygeom,
155 	dadk_set_bbhobj,
156 	dadk_check_media,
157 	dadk_inquiry,
158 	dadk_cleanup,
159 	0
160 };
161 
162 /*
163  * Local static data
164  */
165 
166 #ifdef	DADK_DEBUG
167 #define	DENT	0x0001
168 #define	DERR	0x0002
169 #define	DIO	0x0004
170 #define	DGEOM	0x0010
171 #define	DSTATE  0x0020
172 static	int	dadk_debug = DGEOM;
173 
174 #endif	/* DADK_DEBUG */
175 
176 static int dadk_check_media_time = 3000000;	/* 3 Second State Check */
177 static int dadk_dk_maxphys = 0x80000;
178 
179 static char	*dadk_cmds[] = {
180 	"\000Unknown",			/* unknown 		*/
181 	"\001read sector",		/* DCMD_READ 1		*/
182 	"\002write sector",		/* DCMD_WRITE 2		*/
183 	"\003format track",		/* DCMD_FMTTRK 3	*/
184 	"\004format whole drive",	/* DCMD_FMTDRV 4	*/
185 	"\005recalibrate",		/* DCMD_RECAL  5	*/
186 	"\006seek sector",		/* DCMD_SEEK   6	*/
187 	"\007read verify",		/* DCMD_RDVER  7	*/
188 	"\010read defect list",		/* DCMD_GETDEF 8	*/
189 	"\011lock door",		/* DCMD_LOCK   9	*/
190 	"\012unlock door",		/* DCMD_UNLOCK 10	*/
191 	"\013start motor",		/* DCMD_START_MOTOR 11	*/
192 	"\014stop motor",		/* DCMD_STOP_MOTOR 12	*/
193 	"\015eject",			/* DCMD_EJECT  13	*/
194 	"\016update geometry",		/* DCMD_UPDATE_GEOM  14	*/
195 	"\017get state",		/* DCMD_GET_STATE  15	*/
196 	"\020cdrom pause",		/* DCMD_PAUSE  16	*/
197 	"\021cdrom resume",		/* DCMD_RESUME  17	*/
198 	"\022cdrom play track index",	/* DCMD_PLAYTRKIND  18	*/
199 	"\023cdrom play msf",		/* DCMD_PLAYMSF  19	*/
200 	"\024cdrom sub channel",	/* DCMD_SUBCHNL  20	*/
201 	"\025cdrom read mode 1",	/* DCMD_READMODE1  21	*/
202 	"\026cdrom read toc header",	/* DCMD_READTOCHDR  22	*/
203 	"\027cdrom read toc entry",	/* DCMD_READTOCENT  23	*/
204 	"\030cdrom read offset",	/* DCMD_READOFFSET  24	*/
205 	"\031cdrom read mode 2",	/* DCMD_READMODE2  25	*/
206 	"\032cdrom volume control",	/* DCMD_VOLCTRL  26	*/
207 	"\033flush cache",		/* DCMD_FLUSH_CACHE  27	*/
208 	NULL
209 };
210 
211 static char *dadk_sense[] = {
212 	"\000Success",			/* DERR_SUCCESS		*/
213 	"\001address mark not found",	/* DERR_AMNF		*/
214 	"\002track 0 not found",	/* DERR_TKONF		*/
215 	"\003aborted command",		/* DERR_ABORT		*/
216 	"\004write fault",		/* DERR_DWF		*/
217 	"\005ID not found",		/* DERR_IDNF		*/
218 	"\006drive busy",		/* DERR_BUSY		*/
219 	"\007uncorrectable data error",	/* DERR_UNC		*/
220 	"\010bad block detected",	/* DERR_BBK		*/
221 	"\011invalid command",		/* DERR_INVCDB		*/
222 	"\012device hard error",	/* DERR_HARD		*/
223 	"\013illegal length indicated", /* DERR_ILI		*/
224 	"\014end of media",		/* DERR_EOM		*/
225 	"\015media change requested",	/* DERR_MCR		*/
226 	"\016recovered from error",	/* DERR_RECOVER		*/
227 	"\017device not ready",		/* DERR_NOTREADY	*/
228 	"\020medium error",		/* DERR_MEDIUM		*/
229 	"\021hardware error",		/* DERR_HW		*/
230 	"\022illegal request",		/* DERR_ILL		*/
231 	"\023unit attention",		/* DERR_UNIT_ATTN	*/
232 	"\024data protection",		/* DERR_DATA_PROT	*/
233 	"\025miscompare",		/* DERR_MISCOMPARE	*/
234 	"\026ICRC error during UDMA",	/* DERR_ICRC		*/
235 	"\027reserved",			/* DERR_RESV		*/
236 	NULL
237 };
238 
239 static char *dadk_name = "Disk";
240 
241 /*
242  *	This is the loadable module wrapper
243  */
244 #include <sys/modctl.h>
245 
246 extern struct mod_ops mod_miscops;
247 
248 static struct modlmisc modlmisc = {
249 	&mod_miscops,	/* Type of module */
250 	"Direct Attached Disk"
251 };
252 
253 static struct modlinkage modlinkage = {
254 	MODREV_1, (void *)&modlmisc, NULL
255 };
256 
257 int
258 _init(void)
259 {
260 #ifdef DADK_DEBUG
261 	if (dadk_debug & DENT)
262 		PRF("dadk_init: call\n");
263 #endif
264 
265 #if defined(__x86)
266 	/* set the max physical address for iob allocs on x86 */
267 	dadk_alloc_attr.dma_attr_addr_hi = dadk_max_phys_addr;
268 
269 	/*
270 	 * set the sgllen for iob allocs on x86. If this is set less than
271 	 * the number of pages the buffer will take (taking into account
272 	 * alignment), it would force the allocator to try and allocate
273 	 * contiguous pages.
274 	 */
275 	dadk_alloc_attr.dma_attr_sgllen = dadk_sgl_size;
276 #endif
277 
278 	return (mod_install(&modlinkage));
279 }
280 
281 int
282 _fini(void)
283 {
284 #ifdef DADK_DEBUG
285 	if (dadk_debug & DENT)
286 		PRF("dadk_fini: call\n");
287 #endif
288 
289 	return (mod_remove(&modlinkage));
290 }
291 
292 int
293 _info(struct modinfo *modinfop)
294 {
295 	return (mod_info(&modlinkage, modinfop));
296 }
297 
298 struct tgdk_obj *
299 dadk_create()
300 {
301 	struct tgdk_obj *dkobjp;
302 	struct dadk *dadkp;
303 
304 	dkobjp = kmem_zalloc((sizeof (*dkobjp) + sizeof (*dadkp)), KM_NOSLEEP);
305 	if (!dkobjp)
306 		return (NULL);
307 	dadkp = (struct dadk *)(dkobjp+1);
308 
309 	dkobjp->tg_ops  = (struct  tgdk_objops *)&dadk_ops;
310 	dkobjp->tg_data = (opaque_t)dadkp;
311 	dkobjp->tg_ext = &(dkobjp->tg_extblk);
312 	dadkp->dad_extp = &(dkobjp->tg_extblk);
313 
314 #ifdef DADK_DEBUG
315 	if (dadk_debug & DENT)
316 		PRF("dadk_create: tgdkobjp= 0x%x dadkp= 0x%x\n", dkobjp, dadkp);
317 #endif
318 	return (dkobjp);
319 }
320 
321 int
322 dadk_init(opaque_t objp, opaque_t devp, opaque_t flcobjp, opaque_t queobjp,
323 	opaque_t bbhobjp, void *lkarg)
324 {
325 	struct dadk *dadkp = (struct dadk *)objp;
326 	struct scsi_device *sdevp = (struct scsi_device *)devp;
327 
328 	dadkp->dad_sd = devp;
329 	dadkp->dad_ctlobjp = (opaque_t)sdevp->sd_address.a_hba_tran;
330 	sdevp->sd_private = (caddr_t)dadkp;
331 
332 	/* initialize the communication object */
333 	dadkp->dad_com.com_data = (opaque_t)dadkp;
334 	dadkp->dad_com.com_ops  = &dadk_com_ops;
335 
336 	dadkp->dad_bbhobjp = bbhobjp;
337 	BBH_INIT(bbhobjp);
338 
339 	dadkp->dad_flcobjp = flcobjp;
340 	mutex_init(&dadkp->dad_cmd_mutex, NULL, MUTEX_DRIVER, NULL);
341 	dadkp->dad_cmd_count = 0;
342 	return (FLC_INIT(flcobjp, &(dadkp->dad_com), queobjp, lkarg));
343 }
344 
345 int
346 dadk_free(struct tgdk_obj *dkobjp)
347 {
348 	TGDK_CLEANUP(dkobjp);
349 	kmem_free(dkobjp, (sizeof (*dkobjp) + sizeof (struct dadk)));
350 
351 	return (DDI_SUCCESS);
352 }
353 
354 void
355 dadk_cleanup(struct tgdk_obj *dkobjp)
356 {
357 	struct dadk *dadkp;
358 
359 	dadkp = (struct dadk *)(dkobjp->tg_data);
360 	if (dadkp->dad_sd)
361 		dadkp->dad_sd->sd_private = NULL;
362 	if (dadkp->dad_bbhobjp) {
363 		BBH_FREE(dadkp->dad_bbhobjp);
364 		dadkp->dad_bbhobjp = NULL;
365 	}
366 	if (dadkp->dad_flcobjp) {
367 		FLC_FREE(dadkp->dad_flcobjp);
368 		dadkp->dad_flcobjp = NULL;
369 	}
370 	mutex_destroy(&dadkp->dad_cmd_mutex);
371 }
372 
373 /* ARGSUSED */
374 int
375 dadk_probe(opaque_t objp, int kmsflg)
376 {
377 	struct dadk *dadkp = (struct dadk *)objp;
378 	struct scsi_device *devp;
379 	char   name[80];
380 
381 	devp = dadkp->dad_sd;
382 	if (!devp->sd_inq || (devp->sd_inq->inq_dtype == DTYPE_NOTPRESENT) ||
383 	    (devp->sd_inq->inq_dtype == DTYPE_UNKNOWN)) {
384 		return (DDI_PROBE_FAILURE);
385 	}
386 
387 	switch (devp->sd_inq->inq_dtype) {
388 		case DTYPE_DIRECT:
389 			dadkp->dad_ctype = DKC_DIRECT;
390 			dadkp->dad_extp->tg_nodetype = DDI_NT_BLOCK;
391 			dadkp->dad_extp->tg_ctype = DKC_DIRECT;
392 			break;
393 		case DTYPE_RODIRECT: /* eg cdrom */
394 			dadkp->dad_ctype = DKC_CDROM;
395 			dadkp->dad_extp->tg_rdonly = 1;
396 			dadkp->dad_rdonly = 1;
397 			dadkp->dad_cdrom = 1;
398 			dadkp->dad_extp->tg_nodetype = DDI_NT_CD;
399 			dadkp->dad_extp->tg_ctype = DKC_CDROM;
400 			break;
401 		case DTYPE_WORM:
402 		case DTYPE_OPTICAL:
403 		default:
404 			return (DDI_PROBE_FAILURE);
405 	}
406 
407 	dadkp->dad_extp->tg_rmb = dadkp->dad_rmb = devp->sd_inq->inq_rmb;
408 
409 	dadkp->dad_secshf = SCTRSHFT;
410 	dadkp->dad_blkshf = 0;
411 
412 	/* display the device name */
413 	(void) strcpy(name, "Vendor '");
414 	gda_inqfill((caddr_t)devp->sd_inq->inq_vid, 8, &name[strlen(name)]);
415 	(void) strcat(name, "' Product '");
416 	gda_inqfill((caddr_t)devp->sd_inq->inq_pid, 16, &name[strlen(name)]);
417 	(void) strcat(name, "'");
418 	gda_log(devp->sd_dev, dadk_name, CE_NOTE, "!<%s>\n", name);
419 
420 	return (DDI_PROBE_SUCCESS);
421 }
422 
423 
424 /* ARGSUSED */
425 int
426 dadk_attach(opaque_t objp)
427 {
428 	return (DDI_SUCCESS);
429 }
430 
431 int
432 dadk_set_bbhobj(opaque_t objp, opaque_t bbhobjp)
433 {
434 	struct dadk *dadkp = (struct dadk *)objp;
435 	/* free the old bbh object */
436 	if (dadkp->dad_bbhobjp)
437 		BBH_FREE(dadkp->dad_bbhobjp);
438 
439 	/* initialize the new bbh object */
440 	dadkp->dad_bbhobjp = bbhobjp;
441 	BBH_INIT(bbhobjp);
442 
443 	return (DDI_SUCCESS);
444 }
445 
446 /* ARGSUSED */
447 int
448 dadk_open(opaque_t objp, int flag)
449 {
450 	struct dadk *dadkp = (struct dadk *)objp;
451 	int error;
452 	int wce;
453 
454 	if (!dadkp->dad_rmb) {
455 		if (dadkp->dad_phyg.g_cap) {
456 			FLC_START_KSTAT(dadkp->dad_flcobjp, "disk",
457 			    ddi_get_instance(CTL_DIP_DEV(dadkp->dad_ctlobjp)));
458 			return (DDI_SUCCESS);
459 		}
460 	} else {
461 		mutex_enter(&dadkp->dad_mutex);
462 		dadkp->dad_iostate = DKIO_NONE;
463 		cv_broadcast(&dadkp->dad_state_cv);
464 		mutex_exit(&dadkp->dad_mutex);
465 
466 		if (dadk_rmb_ioctl(dadkp, DCMD_START_MOTOR, 0, 0,
467 		    DADK_SILENT) ||
468 		    dadk_rmb_ioctl(dadkp, DCMD_LOCK, 0, 0, DADK_SILENT) ||
469 		    dadk_rmb_ioctl(dadkp, DCMD_UPDATE_GEOM, 0, 0,
470 		    DADK_SILENT)) {
471 			return (DDI_FAILURE);
472 		}
473 
474 		mutex_enter(&dadkp->dad_mutex);
475 		dadkp->dad_iostate = DKIO_INSERTED;
476 		cv_broadcast(&dadkp->dad_state_cv);
477 		mutex_exit(&dadkp->dad_mutex);
478 	}
479 
480 	/*
481 	 * get write cache enable state
482 	 * If there is an error, must assume that write cache
483 	 * is enabled.
484 	 * NOTE: Since there is currently no Solaris mechanism to
485 	 * change the state of the Write Cache Enable feature,
486 	 * this code just checks the value of the WCE bit
487 	 * obtained at device init time.  If a mechanism
488 	 * is added to the driver to change WCE, dad_wce
489 	 * must be updated appropriately.
490 	 */
491 	error = dadk_ctl_ioctl(dadkp, DIOCTL_GETWCE,
492 	    (uintptr_t)&wce, FKIOCTL | FNATIVE);
493 	mutex_enter(&dadkp->dad_mutex);
494 	dadkp->dad_wce = (error != 0) || (wce != 0);
495 	mutex_exit(&dadkp->dad_mutex);
496 
497 	/* logical disk geometry */
498 	(void) dadk_ctl_ioctl(dadkp, DIOCTL_GETGEOM,
499 	    (uintptr_t)&dadkp->dad_logg, FKIOCTL | FNATIVE);
500 	if (dadkp->dad_logg.g_cap == 0)
501 		return (DDI_FAILURE);
502 
503 	/* get physical disk geometry */
504 	(void) dadk_ctl_ioctl(dadkp, DIOCTL_GETPHYGEOM,
505 	    (uintptr_t)&dadkp->dad_phyg, FKIOCTL | FNATIVE);
506 	if (dadkp->dad_phyg.g_cap == 0)
507 		return (DDI_FAILURE);
508 
509 	dadk_setcap(dadkp);
510 
511 	dadk_create_errstats(dadkp,
512 	    ddi_get_instance(CTL_DIP_DEV(dadkp->dad_ctlobjp)));
513 
514 	/* start profiling */
515 	FLC_START_KSTAT(dadkp->dad_flcobjp, "disk",
516 	    ddi_get_instance(CTL_DIP_DEV(dadkp->dad_ctlobjp)));
517 
518 	return (DDI_SUCCESS);
519 }
520 
521 static void
522 dadk_setcap(struct dadk *dadkp)
523 {
524 	int	 totsize;
525 	int	 i;
526 
527 	totsize = dadkp->dad_phyg.g_secsiz;
528 
529 	if (totsize == 0) {
530 		if (dadkp->dad_cdrom) {
531 			totsize = 2048;
532 		} else {
533 			totsize = NBPSCTR;
534 		}
535 	} else {
536 		/* Round down sector size to multiple of 512B */
537 		totsize &= ~(NBPSCTR-1);
538 	}
539 	dadkp->dad_phyg.g_secsiz = totsize;
540 
541 	/* set sec,block shift factor - (512->0, 1024->1, 2048->2, etc.) */
542 	totsize >>= SCTRSHFT;
543 	for (i = 0; totsize != 1; i++, totsize >>= 1)
544 		;
545 	dadkp->dad_blkshf = i;
546 	dadkp->dad_secshf = i + SCTRSHFT;
547 }
548 
549 
550 static void
551 dadk_create_errstats(struct dadk *dadkp, int instance)
552 {
553 	dadk_errstats_t *dep;
554 	char kstatname[KSTAT_STRLEN];
555 	dadk_ioc_string_t dadk_ioc_string;
556 
557 	if (dadkp->dad_errstats)
558 		return;
559 
560 	(void) sprintf(kstatname, "cmdk%d,error", instance);
561 	dadkp->dad_errstats = kstat_create("cmdkerror", instance,
562 	    kstatname, "device_error", KSTAT_TYPE_NAMED,
563 	    sizeof (dadk_errstats_t) / sizeof (kstat_named_t),
564 	    KSTAT_FLAG_PERSISTENT);
565 
566 	if (!dadkp->dad_errstats)
567 		return;
568 
569 	dep = (dadk_errstats_t *)dadkp->dad_errstats->ks_data;
570 
571 	kstat_named_init(&dep->dadk_softerrs,
572 	    "Soft Errors", KSTAT_DATA_UINT32);
573 	kstat_named_init(&dep->dadk_harderrs,
574 	    "Hard Errors", KSTAT_DATA_UINT32);
575 	kstat_named_init(&dep->dadk_transerrs,
576 	    "Transport Errors", KSTAT_DATA_UINT32);
577 	kstat_named_init(&dep->dadk_model,
578 	    "Model", KSTAT_DATA_CHAR);
579 	kstat_named_init(&dep->dadk_revision,
580 	    "Revision", KSTAT_DATA_CHAR);
581 	kstat_named_init(&dep->dadk_serial,
582 	    "Serial No", KSTAT_DATA_CHAR);
583 	kstat_named_init(&dep->dadk_capacity,
584 	    "Size", KSTAT_DATA_ULONGLONG);
585 	kstat_named_init(&dep->dadk_rq_media_err,
586 	    "Media Error", KSTAT_DATA_UINT32);
587 	kstat_named_init(&dep->dadk_rq_ntrdy_err,
588 	    "Device Not Ready", KSTAT_DATA_UINT32);
589 	kstat_named_init(&dep->dadk_rq_nodev_err,
590 	    "No Device", KSTAT_DATA_UINT32);
591 	kstat_named_init(&dep->dadk_rq_recov_err,
592 	    "Recoverable", KSTAT_DATA_UINT32);
593 	kstat_named_init(&dep->dadk_rq_illrq_err,
594 	    "Illegal Request", KSTAT_DATA_UINT32);
595 
596 	dadkp->dad_errstats->ks_private = dep;
597 	dadkp->dad_errstats->ks_update = nulldev;
598 	kstat_install(dadkp->dad_errstats);
599 
600 	/* get model */
601 	dep->dadk_model.value.c[0] = 0;
602 	dadk_ioc_string.is_buf = &dep->dadk_model.value.c[0];
603 	dadk_ioc_string.is_size = sizeof (dep->dadk_model.value.c);
604 	(void) dadk_ctl_ioctl(dadkp, DIOCTL_GETMODEL,
605 	    (uintptr_t)&dadk_ioc_string, FKIOCTL | FNATIVE);
606 
607 	/* get serial */
608 	dep->dadk_serial.value.c[0] = 0;
609 	dadk_ioc_string.is_buf = &dep->dadk_serial.value.c[0];
610 	dadk_ioc_string.is_size = sizeof (dep->dadk_serial.value.c);
611 	(void) dadk_ctl_ioctl(dadkp, DIOCTL_GETSERIAL,
612 	    (uintptr_t)&dadk_ioc_string, FKIOCTL | FNATIVE);
613 
614 	/* Get revision */
615 	dep->dadk_revision.value.c[0] = 0;
616 
617 	/* Get capacity */
618 
619 	dep->dadk_capacity.value.ui64 =
620 	    (uint64_t)dadkp->dad_logg.g_cap *
621 	    (uint64_t)dadkp->dad_logg.g_secsiz;
622 }
623 
624 
625 int
626 dadk_close(opaque_t objp)
627 {
628 	struct dadk *dadkp = (struct dadk *)objp;
629 
630 	if (dadkp->dad_rmb) {
631 		(void) dadk_rmb_ioctl(dadkp, DCMD_STOP_MOTOR, 0, 0,
632 		    DADK_SILENT);
633 		(void) dadk_rmb_ioctl(dadkp, DCMD_UNLOCK, 0, 0, DADK_SILENT);
634 	}
635 	FLC_STOP_KSTAT(dadkp->dad_flcobjp);
636 
637 	dadk_destroy_errstats(dadkp);
638 
639 	return (DDI_SUCCESS);
640 }
641 
642 static void
643 dadk_destroy_errstats(struct dadk *dadkp)
644 {
645 	if (!dadkp->dad_errstats)
646 		return;
647 
648 	kstat_delete(dadkp->dad_errstats);
649 	dadkp->dad_errstats = NULL;
650 }
651 
652 
653 int
654 dadk_strategy(opaque_t objp, struct buf *bp)
655 {
656 	struct dadk *dadkp = (struct dadk *)objp;
657 
658 	if (dadkp->dad_rdonly && !(bp->b_flags & B_READ)) {
659 		bioerror(bp, EROFS);
660 		return (DDI_FAILURE);
661 	}
662 
663 	if (bp->b_bcount & (dadkp->DAD_SECSIZ-1)) {
664 		bioerror(bp, ENXIO);
665 		return (DDI_FAILURE);
666 	}
667 
668 	SET_BP_SEC(bp, (LBLK2SEC(GET_BP_SEC(bp), dadkp->dad_blkshf)));
669 	mutex_enter(&dadkp->dad_cmd_mutex);
670 	dadkp->dad_cmd_count++;
671 	mutex_exit(&dadkp->dad_cmd_mutex);
672 	FLC_ENQUE(dadkp->dad_flcobjp, bp);
673 
674 	return (DDI_SUCCESS);
675 }
676 
677 int
678 dadk_dump(opaque_t objp, struct buf *bp)
679 {
680 	struct dadk *dadkp = (struct dadk *)objp;
681 	struct cmpkt *pktp;
682 
683 	if (dadkp->dad_rdonly) {
684 		bioerror(bp, EROFS);
685 		return (DDI_FAILURE);
686 	}
687 
688 	if (bp->b_bcount & (dadkp->DAD_SECSIZ-1)) {
689 		bioerror(bp, ENXIO);
690 		return (DDI_FAILURE);
691 	}
692 
693 	SET_BP_SEC(bp, (LBLK2SEC(GET_BP_SEC(bp), dadkp->dad_blkshf)));
694 
695 	pktp = dadk_pktprep(dadkp, NULL, bp, dadk_polldone, NULL, NULL);
696 	if (!pktp) {
697 		cmn_err(CE_WARN, "no resources for dumping");
698 		bioerror(bp, EIO);
699 		return (DDI_FAILURE);
700 	}
701 	pktp->cp_flags |= CPF_NOINTR;
702 
703 	(void) dadk_ioprep(dadkp, pktp);
704 	dadk_transport(dadkp, bp);
705 	pktp->cp_byteleft -= pktp->cp_bytexfer;
706 
707 	while (geterror(bp) == 0 && pktp->cp_byteleft != 0) {
708 		(void) dadk_iosetup(dadkp, pktp);
709 		dadk_transport(dadkp, bp);
710 		pktp->cp_byteleft -= pktp->cp_bytexfer;
711 	}
712 
713 	if (pktp->cp_private)
714 		BBH_FREEHANDLE(dadkp->dad_bbhobjp, pktp->cp_private);
715 	gda_free(dadkp->dad_ctlobjp, pktp, NULL);
716 	return (DDI_SUCCESS);
717 }
718 
719 /* ARGSUSED  */
720 int
721 dadk_ioctl(opaque_t objp, dev_t dev, int cmd, intptr_t arg, int flag,
722 	cred_t *cred_p, int *rval_p)
723 {
724 	struct dadk *dadkp = (struct dadk *)objp;
725 
726 	switch (cmd) {
727 	case DKIOCGETDEF:
728 		{
729 		struct buf	*bp;
730 		int		err, head;
731 		unsigned char	*secbuf;
732 		STRUCT_DECL(defect_header, adh);
733 
734 		STRUCT_INIT(adh, flag & FMODELS);
735 
736 		/*
737 		 * copyin header ....
738 		 * yields head number and buffer address
739 		 */
740 		if (ddi_copyin((caddr_t)arg, STRUCT_BUF(adh), STRUCT_SIZE(adh),
741 		    flag))
742 			return (EFAULT);
743 		head = STRUCT_FGET(adh, head);
744 		if (head < 0 || head >= dadkp->dad_phyg.g_head)
745 			return (ENXIO);
746 		secbuf = kmem_zalloc(NBPSCTR, KM_SLEEP);
747 		if (!secbuf)
748 			return (ENOMEM);
749 		bp = getrbuf(KM_SLEEP);
750 		if (!bp) {
751 			kmem_free(secbuf, NBPSCTR);
752 			return (ENOMEM);
753 		}
754 
755 		bp->b_edev = dev;
756 		bp->b_dev  = cmpdev(dev);
757 		bp->b_flags = B_BUSY;
758 		bp->b_resid = 0;
759 		bp->b_bcount = NBPSCTR;
760 		bp->b_un.b_addr = (caddr_t)secbuf;
761 		bp->b_blkno = head; /* I had to put it somwhere! */
762 		bp->b_forw = (struct buf *)dadkp;
763 		bp->b_back = (struct buf *)DCMD_GETDEF;
764 
765 		mutex_enter(&dadkp->dad_cmd_mutex);
766 		dadkp->dad_cmd_count++;
767 		mutex_exit(&dadkp->dad_cmd_mutex);
768 		FLC_ENQUE(dadkp->dad_flcobjp, bp);
769 		err = biowait(bp);
770 		if (!err) {
771 			if (ddi_copyout((caddr_t)secbuf,
772 			    STRUCT_FGETP(adh, buffer), NBPSCTR, flag))
773 				err = ENXIO;
774 		}
775 		kmem_free(secbuf, NBPSCTR);
776 		freerbuf(bp);
777 		return (err);
778 		}
779 	case DIOCTL_RWCMD:
780 		{
781 		struct dadkio_rwcmd *rwcmdp;
782 		int status, rw;
783 
784 		/*
785 		 * copied in by cmdk and, if necessary, converted to the
786 		 * correct datamodel
787 		 */
788 		rwcmdp = (struct dadkio_rwcmd *)(intptr_t)arg;
789 
790 		/*
791 		 * handle the complex cases here; we pass these
792 		 * through to the driver, which will queue them and
793 		 * handle the requests asynchronously.  The simpler
794 		 * cases ,which can return immediately, fail here, and
795 		 * the request reverts to the dadk_ioctl routine, while
796 		 *  will reroute them directly to the ata driver.
797 		 */
798 		switch (rwcmdp->cmd) {
799 			case DADKIO_RWCMD_READ :
800 				/*FALLTHROUGH*/
801 			case DADKIO_RWCMD_WRITE:
802 				rw = ((rwcmdp->cmd == DADKIO_RWCMD_WRITE) ?
803 				    B_WRITE : B_READ);
804 				status = dadk_dk_buf_setup(dadkp,
805 				    (opaque_t)rwcmdp, dev, ((flag &FKIOCTL) ?
806 				    UIO_SYSSPACE : UIO_USERSPACE), rw);
807 				return (status);
808 			default:
809 				return (EINVAL);
810 			}
811 		}
812 	case DKIOC_UPDATEFW:
813 
814 		/*
815 		 * Require PRIV_ALL privilege to invoke DKIOC_UPDATEFW
816 		 * to protect the firmware update from malicious use
817 		 */
818 		if (PRIV_POLICY(cred_p, PRIV_ALL, B_FALSE, EPERM, NULL) != 0)
819 			return (EPERM);
820 		else
821 			return (dadk_ctl_ioctl(dadkp, cmd, arg, flag));
822 
823 	case DKIOCFLUSHWRITECACHE:
824 		{
825 			struct buf *bp;
826 			int err = 0;
827 			struct dk_callback *dkc = (struct dk_callback *)arg;
828 			struct cmpkt *pktp;
829 			int is_sync = 1;
830 
831 			mutex_enter(&dadkp->dad_mutex);
832 			if (dadkp->dad_noflush || !  dadkp->dad_wce) {
833 				err = dadkp->dad_noflush ? ENOTSUP : 0;
834 				mutex_exit(&dadkp->dad_mutex);
835 				/*
836 				 * If a callback was requested: a
837 				 * callback will always be done if the
838 				 * caller saw the DKIOCFLUSHWRITECACHE
839 				 * ioctl return 0, and never done if the
840 				 * caller saw the ioctl return an error.
841 				 */
842 				if ((flag & FKIOCTL) && dkc != NULL &&
843 				    dkc->dkc_callback != NULL) {
844 					(*dkc->dkc_callback)(dkc->dkc_cookie,
845 					    err);
846 					/*
847 					 * Did callback and reported error.
848 					 * Since we did a callback, ioctl
849 					 * should return 0.
850 					 */
851 					err = 0;
852 				}
853 				return (err);
854 			}
855 			mutex_exit(&dadkp->dad_mutex);
856 
857 			bp = getrbuf(KM_SLEEP);
858 
859 			bp->b_edev = dev;
860 			bp->b_dev  = cmpdev(dev);
861 			bp->b_flags = B_BUSY;
862 			bp->b_resid = 0;
863 			bp->b_bcount = 0;
864 			SET_BP_SEC(bp, 0);
865 
866 			if ((flag & FKIOCTL) && dkc != NULL &&
867 			    dkc->dkc_callback != NULL) {
868 				struct dk_callback *dkc2 =
869 				    (struct dk_callback *)kmem_zalloc(
870 				    sizeof (struct dk_callback), KM_SLEEP);
871 
872 				bcopy(dkc, dkc2, sizeof (*dkc2));
873 				/*
874 				 * Borrow b_list to carry private data
875 				 * to the b_iodone func.
876 				 */
877 				bp->b_list = (struct buf *)dkc2;
878 				bp->b_iodone = dadk_flushdone;
879 				is_sync = 0;
880 			}
881 
882 			/*
883 			 * Setup command pkt
884 			 * dadk_pktprep() can't fail since DDI_DMA_SLEEP set
885 			 */
886 			pktp = dadk_pktprep(dadkp, NULL, bp,
887 			    dadk_iodone, DDI_DMA_SLEEP, NULL);
888 
889 			pktp->cp_time = DADK_FLUSH_CACHE_TIME;
890 
891 			*((char *)(pktp->cp_cdbp)) = DCMD_FLUSH_CACHE;
892 			pktp->cp_byteleft = 0;
893 			pktp->cp_private = NULL;
894 			pktp->cp_secleft = 0;
895 			pktp->cp_srtsec = -1;
896 			pktp->cp_bytexfer = 0;
897 
898 			CTL_IOSETUP(dadkp->dad_ctlobjp, pktp);
899 
900 			mutex_enter(&dadkp->dad_cmd_mutex);
901 			dadkp->dad_cmd_count++;
902 			mutex_exit(&dadkp->dad_cmd_mutex);
903 			FLC_ENQUE(dadkp->dad_flcobjp, bp);
904 
905 			if (is_sync) {
906 				err = biowait(bp);
907 				freerbuf(bp);
908 			}
909 			return (err);
910 		}
911 	default:
912 		if (!dadkp->dad_rmb)
913 			return (dadk_ctl_ioctl(dadkp, cmd, arg, flag));
914 	}
915 
916 	switch (cmd) {
917 	case CDROMSTOP:
918 		return (dadk_rmb_ioctl(dadkp, DCMD_STOP_MOTOR, 0,
919 		    0, DADK_SILENT));
920 	case CDROMSTART:
921 		return (dadk_rmb_ioctl(dadkp, DCMD_START_MOTOR, 0,
922 		    0, DADK_SILENT));
923 	case DKIOCLOCK:
924 		return (dadk_rmb_ioctl(dadkp, DCMD_LOCK, 0, 0, DADK_SILENT));
925 	case DKIOCUNLOCK:
926 		return (dadk_rmb_ioctl(dadkp, DCMD_UNLOCK, 0, 0, DADK_SILENT));
927 	case DKIOCEJECT:
928 	case CDROMEJECT:
929 		{
930 			int ret;
931 
932 			if (ret = dadk_rmb_ioctl(dadkp, DCMD_UNLOCK, 0, 0,
933 			    DADK_SILENT)) {
934 				return (ret);
935 			}
936 			if (ret = dadk_rmb_ioctl(dadkp, DCMD_EJECT, 0, 0,
937 			    DADK_SILENT)) {
938 				return (ret);
939 			}
940 			mutex_enter(&dadkp->dad_mutex);
941 			dadkp->dad_iostate = DKIO_EJECTED;
942 			cv_broadcast(&dadkp->dad_state_cv);
943 			mutex_exit(&dadkp->dad_mutex);
944 
945 			return (0);
946 
947 		}
948 	default:
949 		return (ENOTTY);
950 	/*
951 	 * cdrom audio commands
952 	 */
953 	case CDROMPAUSE:
954 		cmd = DCMD_PAUSE;
955 		break;
956 	case CDROMRESUME:
957 		cmd = DCMD_RESUME;
958 		break;
959 	case CDROMPLAYMSF:
960 		cmd = DCMD_PLAYMSF;
961 		break;
962 	case CDROMPLAYTRKIND:
963 		cmd = DCMD_PLAYTRKIND;
964 		break;
965 	case CDROMREADTOCHDR:
966 		cmd = DCMD_READTOCHDR;
967 		break;
968 	case CDROMREADTOCENTRY:
969 		cmd = DCMD_READTOCENT;
970 		break;
971 	case CDROMVOLCTRL:
972 		cmd = DCMD_VOLCTRL;
973 		break;
974 	case CDROMSUBCHNL:
975 		cmd = DCMD_SUBCHNL;
976 		break;
977 	case CDROMREADMODE2:
978 		cmd = DCMD_READMODE2;
979 		break;
980 	case CDROMREADMODE1:
981 		cmd = DCMD_READMODE1;
982 		break;
983 	case CDROMREADOFFSET:
984 		cmd = DCMD_READOFFSET;
985 		break;
986 	}
987 	return (dadk_rmb_ioctl(dadkp, cmd, arg, flag, 0));
988 }
989 
990 int
991 dadk_flushdone(struct buf *bp)
992 {
993 	struct dk_callback *dkc = (struct dk_callback *)bp->b_list;
994 
995 	ASSERT(dkc != NULL && dkc->dkc_callback != NULL);
996 
997 	(*dkc->dkc_callback)(dkc->dkc_cookie, geterror(bp));
998 
999 	kmem_free(dkc, sizeof (*dkc));
1000 	freerbuf(bp);
1001 	return (0);
1002 }
1003 
1004 int
1005 dadk_getphygeom(opaque_t objp, struct tgdk_geom *dkgeom_p)
1006 {
1007 	struct dadk *dadkp = (struct dadk *)objp;
1008 
1009 	bcopy((caddr_t)&dadkp->dad_phyg, (caddr_t)dkgeom_p,
1010 	    sizeof (struct tgdk_geom));
1011 	return (DDI_SUCCESS);
1012 }
1013 
1014 int
1015 dadk_getgeom(opaque_t objp, struct tgdk_geom *dkgeom_p)
1016 {
1017 	struct dadk *dadkp = (struct dadk *)objp;
1018 	bcopy((caddr_t)&dadkp->dad_logg, (caddr_t)dkgeom_p,
1019 	    sizeof (struct tgdk_geom));
1020 	return (DDI_SUCCESS);
1021 }
1022 
1023 int
1024 dadk_setgeom(opaque_t objp, struct tgdk_geom *dkgeom_p)
1025 {
1026 	struct dadk *dadkp = (struct dadk *)objp;
1027 
1028 	dadkp->dad_logg.g_cyl = dkgeom_p->g_cyl;
1029 	dadkp->dad_logg.g_head = dkgeom_p->g_head;
1030 	dadkp->dad_logg.g_sec = dkgeom_p->g_sec;
1031 	dadkp->dad_logg.g_cap = dkgeom_p->g_cap;
1032 	return (DDI_SUCCESS);
1033 }
1034 
1035 
1036 tgdk_iob_handle
1037 dadk_iob_alloc(opaque_t objp, daddr_t blkno, ssize_t xfer, int kmsflg)
1038 {
1039 	struct dadk *dadkp = (struct dadk *)objp;
1040 	struct buf *bp;
1041 	struct tgdk_iob *iobp;
1042 	size_t rlen;
1043 
1044 	iobp = kmem_zalloc(sizeof (*iobp), kmsflg);
1045 	if (iobp == NULL)
1046 		return (NULL);
1047 	if ((bp = getrbuf(kmsflg)) == NULL) {
1048 		kmem_free(iobp, sizeof (*iobp));
1049 		return (NULL);
1050 	}
1051 
1052 	iobp->b_psec  = LBLK2SEC(blkno, dadkp->dad_blkshf);
1053 	iobp->b_pbyteoff = (blkno & ((1<<dadkp->dad_blkshf) - 1)) << SCTRSHFT;
1054 	iobp->b_pbytecnt = ((iobp->b_pbyteoff + xfer + dadkp->DAD_SECSIZ - 1)
1055 	    >> dadkp->dad_secshf) << dadkp->dad_secshf;
1056 
1057 	bp->b_un.b_addr = 0;
1058 	/*
1059 	 * use i_ddi_mem_alloc() for now until we have an interface to allocate
1060 	 * memory for DMA which doesn't require a DMA handle. ddi_iopb_alloc()
1061 	 * is obsolete and we want more flexibility in controlling the DMA
1062 	 * address constraints..
1063 	 */
1064 	if (i_ddi_mem_alloc((dadkp->dad_sd)->sd_dev, &dadk_alloc_attr,
1065 	    (size_t)iobp->b_pbytecnt, ((kmsflg == KM_SLEEP) ? 1 : 0), 0, NULL,
1066 	    &bp->b_un.b_addr, &rlen, NULL) != DDI_SUCCESS) {
1067 		freerbuf(bp);
1068 		kmem_free(iobp, sizeof (*iobp));
1069 		return (NULL);
1070 	}
1071 	iobp->b_flag |= IOB_BPALLOC | IOB_BPBUFALLOC;
1072 	iobp->b_bp = bp;
1073 	iobp->b_lblk = blkno;
1074 	iobp->b_xfer = xfer;
1075 	iobp->b_lblk = blkno;
1076 	iobp->b_xfer = xfer;
1077 	return (iobp);
1078 }
1079 
1080 /* ARGSUSED */
1081 int
1082 dadk_iob_free(opaque_t objp, struct tgdk_iob *iobp)
1083 {
1084 	struct buf *bp;
1085 
1086 	if (iobp) {
1087 		if (iobp->b_bp && (iobp->b_flag & IOB_BPALLOC)) {
1088 			bp = iobp->b_bp;
1089 			if (bp->b_un.b_addr && (iobp->b_flag & IOB_BPBUFALLOC))
1090 				i_ddi_mem_free((caddr_t)bp->b_un.b_addr, NULL);
1091 			freerbuf(bp);
1092 		}
1093 		kmem_free(iobp, sizeof (*iobp));
1094 	}
1095 	return (DDI_SUCCESS);
1096 }
1097 
1098 /* ARGSUSED */
1099 caddr_t
1100 dadk_iob_htoc(opaque_t objp, struct tgdk_iob *iobp)
1101 {
1102 	return (iobp->b_bp->b_un.b_addr+iobp->b_pbyteoff);
1103 }
1104 
1105 
1106 caddr_t
1107 dadk_iob_xfer(opaque_t objp, struct tgdk_iob *iobp, int rw)
1108 {
1109 	struct dadk	*dadkp = (struct dadk *)objp;
1110 	struct buf	*bp;
1111 	int		err;
1112 
1113 	bp = iobp->b_bp;
1114 	if (dadkp->dad_rdonly && !(rw & B_READ)) {
1115 		bioerror(bp, EROFS);
1116 		return (NULL);
1117 	}
1118 
1119 	bp->b_flags |= (B_BUSY | rw);
1120 	bp->b_bcount = iobp->b_pbytecnt;
1121 	SET_BP_SEC(bp, iobp->b_psec);
1122 	bp->av_back = (struct buf *)0;
1123 	bp->b_resid = 0;
1124 
1125 	/* call flow control */
1126 	mutex_enter(&dadkp->dad_cmd_mutex);
1127 	dadkp->dad_cmd_count++;
1128 	mutex_exit(&dadkp->dad_cmd_mutex);
1129 	FLC_ENQUE(dadkp->dad_flcobjp, bp);
1130 	err = biowait(bp);
1131 
1132 	bp->b_bcount = iobp->b_xfer;
1133 	bp->b_flags &= ~(B_DONE|B_BUSY);
1134 
1135 	if (err)
1136 		return (NULL);
1137 
1138 	return (bp->b_un.b_addr+iobp->b_pbyteoff);
1139 }
1140 
1141 static void
1142 dadk_transport(opaque_t com_data, struct buf *bp)
1143 {
1144 	struct dadk *dadkp = (struct dadk *)com_data;
1145 
1146 	if (CTL_TRANSPORT(dadkp->dad_ctlobjp, GDA_BP_PKT(bp)) ==
1147 	    CTL_SEND_SUCCESS)
1148 		return;
1149 	dadk_restart((void*)GDA_BP_PKT(bp));
1150 }
1151 
1152 static int
1153 dadk_pkt(opaque_t com_data, struct buf *bp, int (*func)(caddr_t), caddr_t arg)
1154 {
1155 	struct cmpkt *pktp;
1156 	struct dadk *dadkp = (struct dadk *)com_data;
1157 
1158 	if (GDA_BP_PKT(bp))
1159 		return (DDI_SUCCESS);
1160 
1161 	pktp = dadk_pktprep(dadkp, NULL, bp, dadk_iodone, func, arg);
1162 	if (!pktp)
1163 		return (DDI_FAILURE);
1164 
1165 	return (dadk_ioprep(dadkp, pktp));
1166 }
1167 
1168 /*
1169  * Read, Write preparation
1170  */
1171 static int
1172 dadk_ioprep(struct dadk *dadkp, struct cmpkt *pktp)
1173 {
1174 	struct buf *bp;
1175 
1176 	bp = pktp->cp_bp;
1177 	if (bp->b_forw == (struct buf *)dadkp)
1178 		*((char *)(pktp->cp_cdbp)) = (char)(intptr_t)bp->b_back;
1179 
1180 	else if (bp->b_flags & B_READ)
1181 		*((char *)(pktp->cp_cdbp)) = DCMD_READ;
1182 	else
1183 		*((char *)(pktp->cp_cdbp)) = DCMD_WRITE;
1184 	pktp->cp_byteleft = bp->b_bcount;
1185 
1186 	/* setup the bad block list handle */
1187 	pktp->cp_private = BBH_GETHANDLE(dadkp->dad_bbhobjp, bp);
1188 	return (dadk_iosetup(dadkp, pktp));
1189 }
1190 
1191 static int
1192 dadk_iosetup(struct dadk *dadkp, struct cmpkt *pktp)
1193 {
1194 	struct buf	*bp;
1195 	bbh_cookie_t	bbhckp;
1196 	int		seccnt;
1197 
1198 	seccnt = pktp->cp_bytexfer >> dadkp->dad_secshf;
1199 	pktp->cp_secleft -= seccnt;
1200 
1201 	if (pktp->cp_secleft) {
1202 		pktp->cp_srtsec += seccnt;
1203 	} else {
1204 		/* get the first cookie from the bad block list */
1205 		if (!pktp->cp_private) {
1206 			bp = pktp->cp_bp;
1207 			pktp->cp_srtsec  = GET_BP_SEC(bp);
1208 			pktp->cp_secleft = (bp->b_bcount >> dadkp->dad_secshf);
1209 		} else {
1210 			bbhckp = BBH_HTOC(dadkp->dad_bbhobjp,
1211 			    pktp->cp_private);
1212 			pktp->cp_srtsec = BBH_GETCK_SECTOR(dadkp->dad_bbhobjp,
1213 			    bbhckp);
1214 			pktp->cp_secleft = BBH_GETCK_SECLEN(dadkp->dad_bbhobjp,
1215 			    bbhckp);
1216 		}
1217 	}
1218 
1219 	pktp->cp_bytexfer = pktp->cp_secleft << dadkp->dad_secshf;
1220 
1221 	if (CTL_IOSETUP(dadkp->dad_ctlobjp, pktp)) {
1222 		return (DDI_SUCCESS);
1223 	} else {
1224 		return (DDI_FAILURE);
1225 	}
1226 
1227 
1228 
1229 
1230 }
1231 
1232 static struct cmpkt *
1233 dadk_pktprep(struct dadk *dadkp, struct cmpkt *in_pktp, struct buf *bp,
1234     void (*cb_func)(struct buf *), int (*func)(caddr_t), caddr_t arg)
1235 {
1236 	struct cmpkt *pktp;
1237 
1238 	pktp = gda_pktprep(dadkp->dad_ctlobjp, in_pktp, (opaque_t)bp, func,
1239 	    arg);
1240 
1241 	if (pktp) {
1242 		pktp->cp_callback = dadk_pktcb;
1243 		pktp->cp_time = DADK_IO_TIME;
1244 		pktp->cp_flags = 0;
1245 		pktp->cp_iodone = cb_func;
1246 		pktp->cp_dev_private = (opaque_t)dadkp;
1247 
1248 	}
1249 
1250 	return (pktp);
1251 }
1252 
1253 
1254 static void
1255 dadk_restart(void *vpktp)
1256 {
1257 	struct cmpkt *pktp = (struct cmpkt *)vpktp;
1258 
1259 	if (dadk_ioretry(pktp, QUE_COMMAND) == JUST_RETURN)
1260 		return;
1261 	pktp->cp_iodone(pktp->cp_bp);
1262 }
1263 
1264 static int
1265 dadk_ioretry(struct cmpkt *pktp, int action)
1266 {
1267 	struct buf *bp;
1268 	struct dadk *dadkp = PKT2DADK(pktp);
1269 
1270 	switch (action) {
1271 	case QUE_COMMAND:
1272 		if (pktp->cp_retry++ < DADK_RETRY_COUNT) {
1273 			CTL_IOSETUP(dadkp->dad_ctlobjp, pktp);
1274 			if (CTL_TRANSPORT(dadkp->dad_ctlobjp, pktp) ==
1275 			    CTL_SEND_SUCCESS) {
1276 				return (JUST_RETURN);
1277 			}
1278 			gda_log(dadkp->dad_sd->sd_dev, dadk_name,
1279 			    CE_WARN, "transport of command fails\n");
1280 		} else
1281 			gda_log(dadkp->dad_sd->sd_dev,
1282 			    dadk_name, CE_WARN,
1283 			    "exceeds maximum number of retries\n");
1284 		bioerror(pktp->cp_bp, ENXIO);
1285 		/*FALLTHROUGH*/
1286 	case COMMAND_DONE_ERROR:
1287 		bp = pktp->cp_bp;
1288 		bp->b_resid += pktp->cp_byteleft - pktp->cp_bytexfer +
1289 		    pktp->cp_resid;
1290 		if (geterror(bp) == 0) {
1291 			if ((*((char *)(pktp->cp_cdbp)) == DCMD_FLUSH_CACHE) &&
1292 			    (pktp->cp_dev_private == (opaque_t)dadkp) &&
1293 			    ((int)(*(char *)pktp->cp_scbp) == DERR_ABORT)) {
1294 				/*
1295 				 * Flag "unimplemented" responses for
1296 				 * DCMD_FLUSH_CACHE as ENOTSUP
1297 				 */
1298 				bioerror(bp, ENOTSUP);
1299 				mutex_enter(&dadkp->dad_mutex);
1300 				dadkp->dad_noflush = 1;
1301 				mutex_exit(&dadkp->dad_mutex);
1302 			} else {
1303 				bioerror(bp, EIO);
1304 			}
1305 		}
1306 		/*FALLTHROUGH*/
1307 	case COMMAND_DONE:
1308 	default:
1309 		return (COMMAND_DONE);
1310 	}
1311 }
1312 
1313 
1314 static void
1315 dadk_pktcb(struct cmpkt *pktp)
1316 {
1317 	int action;
1318 	struct dadkio_rwcmd *rwcmdp;
1319 
1320 	rwcmdp = (struct dadkio_rwcmd *)pktp->cp_passthru;  /* ioctl packet */
1321 
1322 	if (pktp->cp_reason == CPS_SUCCESS) {
1323 		if (rwcmdp && (rwcmdp != (opaque_t)DADK_SILENT))
1324 			rwcmdp->status.status = DADKIO_STAT_NO_ERROR;
1325 		pktp->cp_iodone(pktp->cp_bp);
1326 		return;
1327 	}
1328 
1329 	if (rwcmdp && (rwcmdp != (opaque_t)DADK_SILENT)) {
1330 		if (pktp->cp_reason == CPS_CHKERR)
1331 			dadk_recorderr(pktp, rwcmdp);
1332 		dadk_iodone(pktp->cp_bp);
1333 		return;
1334 	}
1335 
1336 	if (pktp->cp_reason == CPS_CHKERR)
1337 		action = dadk_chkerr(pktp);
1338 	else
1339 		action = COMMAND_DONE_ERROR;
1340 
1341 	if (action == JUST_RETURN)
1342 		return;
1343 
1344 	/*
1345 	 * If we are panicking don't retry the command
1346 	 * just fail it so we can go down completing all
1347 	 * of the buffers.
1348 	 */
1349 	if (ddi_in_panic() && action == QUE_COMMAND)
1350 		action = COMMAND_DONE_ERROR;
1351 
1352 	if (action != COMMAND_DONE) {
1353 		if ((dadk_ioretry(pktp, action)) == JUST_RETURN)
1354 			return;
1355 	}
1356 	pktp->cp_iodone(pktp->cp_bp);
1357 }
1358 
1359 
1360 
1361 static struct dadkio_derr dadk_errtab[] = {
1362 	{COMMAND_DONE, GDA_INFORMATIONAL},	/*  0 DERR_SUCCESS	*/
1363 	{QUE_COMMAND, GDA_FATAL},		/*  1 DERR_AMNF		*/
1364 	{QUE_COMMAND, GDA_FATAL},		/*  2 DERR_TKONF	*/
1365 	{COMMAND_DONE_ERROR, GDA_INFORMATIONAL}, /* 3 DERR_ABORT	*/
1366 	{QUE_COMMAND, GDA_RETRYABLE},		/*  4 DERR_DWF		*/
1367 	{QUE_COMMAND, GDA_FATAL},		/*  5 DERR_IDNF		*/
1368 	{JUST_RETURN, GDA_INFORMATIONAL},	/*  6 DERR_BUSY		*/
1369 	{QUE_COMMAND, GDA_FATAL},		/*  7 DERR_UNC		*/
1370 	{QUE_COMMAND, GDA_RETRYABLE},		/*  8 DERR_BBK		*/
1371 	{COMMAND_DONE_ERROR, GDA_FATAL},	/*  9 DERR_INVCDB	*/
1372 	{COMMAND_DONE_ERROR, GDA_FATAL},	/* 10 DERR_HARD		*/
1373 	{COMMAND_DONE_ERROR, GDA_FATAL},	/* 11 DERR_ILI		*/
1374 	{COMMAND_DONE_ERROR, GDA_FATAL},	/* 12 DERR_EOM		*/
1375 	{COMMAND_DONE, GDA_INFORMATIONAL},	/* 13 DERR_MCR		*/
1376 	{COMMAND_DONE, GDA_INFORMATIONAL},	/* 14 DERR_RECOVER	*/
1377 	{COMMAND_DONE_ERROR, GDA_FATAL},	/* 15 DERR_NOTREADY	*/
1378 	{QUE_COMMAND, GDA_RETRYABLE},		/* 16 DERR_MEDIUM	*/
1379 	{COMMAND_DONE_ERROR, GDA_FATAL},	/* 17 DERR_HW		*/
1380 	{COMMAND_DONE, GDA_FATAL},		/* 18 DERR_ILL		*/
1381 	{COMMAND_DONE, GDA_FATAL},		/* 19 DERR_UNIT_ATTN	*/
1382 	{COMMAND_DONE_ERROR, GDA_FATAL},	/* 20 DERR_DATA_PROT	*/
1383 	{COMMAND_DONE_ERROR, GDA_FATAL},	/* 21 DERR_MISCOMPARE	*/
1384 	{QUE_COMMAND, GDA_RETRYABLE},		/* 22 DERR_ICRC		*/
1385 	{COMMAND_DONE_ERROR, GDA_FATAL},	/* 23 DERR_RESV		*/
1386 };
1387 
1388 static int
1389 dadk_chkerr(struct cmpkt *pktp)
1390 {
1391 	int err_blkno;
1392 	struct dadk *dadkp = PKT2DADK(pktp);
1393 	dadk_errstats_t *dep;
1394 	int scb = *(char *)pktp->cp_scbp;
1395 
1396 	if (scb == DERR_SUCCESS) {
1397 		if (pktp->cp_retry != 0 && dadkp->dad_errstats != NULL) {
1398 			dep = (dadk_errstats_t *)
1399 			    dadkp->dad_errstats->ks_data;
1400 			dep->dadk_rq_recov_err.value.ui32++;
1401 		}
1402 		return (COMMAND_DONE);
1403 	}
1404 
1405 	if (pktp->cp_retry) {
1406 		err_blkno = pktp->cp_srtsec + ((pktp->cp_bytexfer -
1407 		    pktp->cp_resid) >> dadkp->dad_secshf);
1408 	} else
1409 		err_blkno = -1;
1410 
1411 	if (dadkp->dad_errstats != NULL) {
1412 		dep = (dadk_errstats_t *)dadkp->dad_errstats->ks_data;
1413 
1414 		switch (dadk_errtab[scb].d_severity) {
1415 			case GDA_RETRYABLE:
1416 				dep->dadk_softerrs.value.ui32++;
1417 				break;
1418 
1419 			case GDA_FATAL:
1420 				dep->dadk_harderrs.value.ui32++;
1421 				break;
1422 
1423 			default:
1424 				break;
1425 		}
1426 
1427 		switch (scb) {
1428 			case DERR_INVCDB:
1429 			case DERR_ILI:
1430 			case DERR_EOM:
1431 			case DERR_HW:
1432 			case DERR_ICRC:
1433 				dep->dadk_transerrs.value.ui32++;
1434 				break;
1435 
1436 			case DERR_AMNF:
1437 			case DERR_TKONF:
1438 			case DERR_DWF:
1439 			case DERR_BBK:
1440 			case DERR_UNC:
1441 			case DERR_HARD:
1442 			case DERR_MEDIUM:
1443 			case DERR_DATA_PROT:
1444 			case DERR_MISCOMP:
1445 				dep->dadk_rq_media_err.value.ui32++;
1446 				break;
1447 
1448 			case DERR_NOTREADY:
1449 				dep->dadk_rq_ntrdy_err.value.ui32++;
1450 				break;
1451 
1452 			case DERR_IDNF:
1453 			case DERR_UNIT_ATTN:
1454 				dep->dadk_rq_nodev_err.value.ui32++;
1455 				break;
1456 
1457 			case DERR_ILL:
1458 			case DERR_RESV:
1459 				dep->dadk_rq_illrq_err.value.ui32++;
1460 				break;
1461 
1462 			default:
1463 				break;
1464 		}
1465 	}
1466 
1467 	/* if attempting to read a sector from a cdrom audio disk */
1468 	if ((dadkp->dad_cdrom) &&
1469 	    (*((char *)(pktp->cp_cdbp)) == DCMD_READ) &&
1470 	    (scb == DERR_ILL)) {
1471 		return (COMMAND_DONE);
1472 	}
1473 	if (pktp->cp_passthru == NULL) {
1474 		gda_errmsg(dadkp->dad_sd, pktp, dadk_name,
1475 		    dadk_errtab[scb].d_severity, pktp->cp_srtsec,
1476 		    err_blkno, dadk_cmds, dadk_sense);
1477 	}
1478 
1479 	if (scb == DERR_BUSY) {
1480 		(void) timeout(dadk_restart, (void *)pktp, DADK_BSY_TIMEOUT);
1481 	}
1482 
1483 	return (dadk_errtab[scb].d_action);
1484 }
1485 
1486 static void
1487 dadk_recorderr(struct cmpkt *pktp, struct dadkio_rwcmd *rwcmdp)
1488 {
1489 	struct dadk *dadkp;
1490 	int scb;
1491 
1492 	dadkp = PKT2DADK(pktp);
1493 	scb = (int)(*(char *)pktp->cp_scbp);
1494 
1495 
1496 	rwcmdp->status.failed_blk = rwcmdp->blkaddr +
1497 	    ((pktp->cp_bytexfer - pktp->cp_resid) >> dadkp->dad_secshf);
1498 
1499 	rwcmdp->status.resid = pktp->cp_bp->b_resid +
1500 	    pktp->cp_byteleft - pktp->cp_bytexfer + pktp->cp_resid;
1501 	switch ((int)(* (char *)pktp->cp_scbp)) {
1502 	case DERR_AMNF:
1503 	case DERR_ABORT:
1504 		rwcmdp->status.status = DADKIO_STAT_ILLEGAL_REQUEST;
1505 		break;
1506 	case DERR_DWF:
1507 	case DERR_IDNF:
1508 		rwcmdp->status.status = DADKIO_STAT_ILLEGAL_ADDRESS;
1509 		break;
1510 	case DERR_TKONF:
1511 	case DERR_UNC:
1512 	case DERR_BBK:
1513 		rwcmdp->status.status = DADKIO_STAT_MEDIUM_ERROR;
1514 		rwcmdp->status.failed_blk_is_valid = 1;
1515 		rwcmdp->status.resid = 0;
1516 		break;
1517 	case DERR_BUSY:
1518 		rwcmdp->status.status = DADKIO_STAT_NOT_READY;
1519 		break;
1520 	case DERR_INVCDB:
1521 	case DERR_HARD:
1522 		rwcmdp->status.status = DADKIO_STAT_HARDWARE_ERROR;
1523 		break;
1524 	case DERR_ICRC:
1525 	default:
1526 		rwcmdp->status.status = DADKIO_STAT_NOT_SUPPORTED;
1527 	}
1528 
1529 	if (rwcmdp->flags & DADKIO_FLAG_SILENT)
1530 		return;
1531 	gda_errmsg(dadkp->dad_sd, pktp, dadk_name, dadk_errtab[scb].d_severity,
1532 	    rwcmdp->blkaddr, rwcmdp->status.failed_blk,
1533 	    dadk_cmds, dadk_sense);
1534 }
1535 
1536 /*ARGSUSED*/
1537 static void
1538 dadk_polldone(struct buf *bp)
1539 {
1540 	struct cmpkt *pktp;
1541 	struct dadk *dadkp;
1542 
1543 	pktp  = GDA_BP_PKT(bp);
1544 	dadkp = PKT2DADK(pktp);
1545 	mutex_enter(&dadkp->dad_cmd_mutex);
1546 	dadkp->dad_cmd_count--;
1547 	mutex_exit(&dadkp->dad_cmd_mutex);
1548 }
1549 
1550 static void
1551 dadk_iodone(struct buf *bp)
1552 {
1553 	struct cmpkt *pktp;
1554 	struct dadk *dadkp;
1555 
1556 	pktp  = GDA_BP_PKT(bp);
1557 	dadkp = PKT2DADK(pktp);
1558 
1559 	/* check for all iodone */
1560 	pktp->cp_byteleft -= pktp->cp_bytexfer;
1561 	if (geterror(bp) == 0 && pktp->cp_byteleft != 0) {
1562 		pktp->cp_retry = 0;
1563 		(void) dadk_iosetup(dadkp, pktp);
1564 
1565 
1566 	/* 	transport the next one */
1567 		if (CTL_TRANSPORT(dadkp->dad_ctlobjp, pktp) == CTL_SEND_SUCCESS)
1568 			return;
1569 		if ((dadk_ioretry(pktp, QUE_COMMAND)) == JUST_RETURN)
1570 			return;
1571 	}
1572 
1573 	/* start next one */
1574 	FLC_DEQUE(dadkp->dad_flcobjp, bp);
1575 
1576 	/* free pkt */
1577 	if (pktp->cp_private)
1578 		BBH_FREEHANDLE(dadkp->dad_bbhobjp, pktp->cp_private);
1579 	gda_free(dadkp->dad_ctlobjp, pktp, NULL);
1580 	mutex_enter(&dadkp->dad_cmd_mutex);
1581 	dadkp->dad_cmd_count--;
1582 	mutex_exit(&dadkp->dad_cmd_mutex);
1583 	biodone(bp);
1584 }
1585 
1586 int
1587 dadk_check_media(opaque_t objp, int *state)
1588 {
1589 	struct dadk *dadkp = (struct dadk *)objp;
1590 
1591 	if (!dadkp->dad_rmb) {
1592 		return (ENXIO);
1593 	}
1594 #ifdef DADK_DEBUG
1595 	if (dadk_debug & DSTATE)
1596 		PRF("dadk_check_media: user state %x disk state %x\n",
1597 		    *state, dadkp->dad_iostate);
1598 #endif
1599 	/*
1600 	 * If state already changed just return
1601 	 */
1602 	if (*state != dadkp->dad_iostate) {
1603 		*state = dadkp->dad_iostate;
1604 		return (0);
1605 	}
1606 
1607 	/*
1608 	 * Startup polling on thread state
1609 	 */
1610 	mutex_enter(&dadkp->dad_mutex);
1611 	if (dadkp->dad_thread_cnt == 0) {
1612 		/*
1613 		 * One thread per removable dadk device
1614 		 */
1615 		(void) thread_create(NULL, 0, dadk_watch_thread, dadkp, 0, &p0,
1616 		    TS_RUN, v.v_maxsyspri - 2);
1617 	}
1618 	dadkp->dad_thread_cnt++;
1619 
1620 	/*
1621 	 * Wait for state to change
1622 	 */
1623 	do {
1624 		if (cv_wait_sig(&dadkp->dad_state_cv, &dadkp->dad_mutex) == 0) {
1625 			dadkp->dad_thread_cnt--;
1626 			mutex_exit(&dadkp->dad_mutex);
1627 			return (EINTR);
1628 		}
1629 	} while (*state == dadkp->dad_iostate);
1630 	*state = dadkp->dad_iostate;
1631 	dadkp->dad_thread_cnt--;
1632 	mutex_exit(&dadkp->dad_mutex);
1633 	return (0);
1634 }
1635 
1636 
1637 #define	MEDIA_ACCESS_DELAY 2000000
1638 
1639 static void
1640 dadk_watch_thread(struct dadk *dadkp)
1641 {
1642 	enum dkio_state state;
1643 	int interval;
1644 
1645 	interval = drv_usectohz(dadk_check_media_time);
1646 
1647 	do {
1648 		if (dadk_rmb_ioctl(dadkp, DCMD_GET_STATE, (intptr_t)&state, 0,
1649 		    DADK_SILENT)) {
1650 			/*
1651 			 * Assume state remained the same
1652 			 */
1653 			state = dadkp->dad_iostate;
1654 		}
1655 
1656 		/*
1657 		 * now signal the waiting thread if this is *not* the
1658 		 * specified state;
1659 		 * delay the signal if the state is DKIO_INSERTED
1660 		 * to allow the target to recover
1661 		 */
1662 		if (state != dadkp->dad_iostate) {
1663 
1664 			dadkp->dad_iostate = state;
1665 			if (state == DKIO_INSERTED) {
1666 				/*
1667 				 * delay the signal to give the drive a chance
1668 				 * to do what it apparently needs to do
1669 				 */
1670 				(void) timeout((void(*)(void *))cv_broadcast,
1671 				    (void *)&dadkp->dad_state_cv,
1672 				    drv_usectohz((clock_t)MEDIA_ACCESS_DELAY));
1673 			} else {
1674 				cv_broadcast(&dadkp->dad_state_cv);
1675 			}
1676 		}
1677 		delay(interval);
1678 	} while (dadkp->dad_thread_cnt);
1679 }
1680 
1681 int
1682 dadk_inquiry(opaque_t objp, opaque_t *inqpp)
1683 {
1684 	struct dadk *dadkp = (struct dadk *)objp;
1685 	struct scsi_inquiry **sinqpp = (struct scsi_inquiry **)inqpp;
1686 
1687 	if (dadkp && dadkp->dad_sd && dadkp->dad_sd->sd_inq) {
1688 		*sinqpp = dadkp->dad_sd->sd_inq;
1689 		return (DDI_SUCCESS);
1690 	}
1691 
1692 	return (DDI_FAILURE);
1693 }
1694 
1695 static int
1696 dadk_rmb_ioctl(struct dadk *dadkp, int cmd, intptr_t arg, int flags, int silent)
1697 
1698 {
1699 	struct buf *bp;
1700 	int err;
1701 	struct cmpkt *pktp;
1702 
1703 	if ((bp = getrbuf(KM_SLEEP)) == NULL) {
1704 		return (ENOMEM);
1705 	}
1706 	pktp = dadk_pktprep(dadkp, NULL, bp, dadk_rmb_iodone, NULL, NULL);
1707 	if (!pktp) {
1708 		freerbuf(bp);
1709 		return (ENOMEM);
1710 	}
1711 	bp->b_back  = (struct buf *)arg;
1712 	bp->b_forw  = (struct buf *)dadkp->dad_flcobjp;
1713 	pktp->cp_passthru = (opaque_t)(intptr_t)silent;
1714 
1715 	err = dadk_ctl_ioctl(dadkp, cmd, (uintptr_t)pktp, flags);
1716 	freerbuf(bp);
1717 	gda_free(dadkp->dad_ctlobjp, pktp, NULL);
1718 	return (err);
1719 
1720 
1721 }
1722 
1723 static void
1724 dadk_rmb_iodone(struct buf *bp)
1725 {
1726 	struct cmpkt *pktp;
1727 	struct dadk *dadkp;
1728 
1729 	pktp  = GDA_BP_PKT(bp);
1730 	dadkp = PKT2DADK(pktp);
1731 
1732 	bp->b_flags &= ~(B_DONE|B_BUSY);
1733 
1734 	/* Start next one */
1735 	FLC_DEQUE(dadkp->dad_flcobjp, bp);
1736 
1737 	mutex_enter(&dadkp->dad_cmd_mutex);
1738 	dadkp->dad_cmd_count--;
1739 	mutex_exit(&dadkp->dad_cmd_mutex);
1740 	biodone(bp);
1741 }
1742 
1743 static int
1744 dadk_dk_buf_setup(struct dadk *dadkp, opaque_t *cmdp, dev_t dev,
1745 	enum uio_seg dataspace, int rw)
1746 {
1747 	struct dadkio_rwcmd *rwcmdp = (struct dadkio_rwcmd *)cmdp;
1748 	struct buf	*bp;
1749 	struct iovec	aiov;
1750 	struct uio	auio;
1751 	struct uio	*uio = &auio;
1752 	int		status;
1753 
1754 	bp = getrbuf(KM_SLEEP);
1755 
1756 	bp->av_forw = bp->b_forw = (struct buf *)dadkp;
1757 	bp->b_back  = (struct buf *)rwcmdp;	/* ioctl packet */
1758 
1759 	bzero((caddr_t)&auio, sizeof (struct uio));
1760 	bzero((caddr_t)&aiov, sizeof (struct iovec));
1761 	aiov.iov_base = rwcmdp->bufaddr;
1762 	aiov.iov_len = rwcmdp->buflen;
1763 	uio->uio_iov = &aiov;
1764 
1765 	uio->uio_iovcnt = 1;
1766 	uio->uio_resid = rwcmdp->buflen;
1767 	uio->uio_segflg = dataspace;
1768 
1769 	/* Let physio do the rest... */
1770 	status = physio(dadk_dk_strategy, bp, dev, rw, dadkmin, uio);
1771 
1772 	freerbuf(bp);
1773 	return (status);
1774 
1775 }
1776 
1777 /* Do not let a user gendisk request get too big or */
1778 /* else we could use to many resources.		    */
1779 
1780 static void
1781 dadkmin(struct buf *bp)
1782 {
1783 	if (bp->b_bcount > dadk_dk_maxphys)
1784 		bp->b_bcount = dadk_dk_maxphys;
1785 }
1786 
1787 static int
1788 dadk_dk_strategy(struct buf *bp)
1789 {
1790 	dadk_dk((struct dadk *)bp->av_forw, (struct dadkio_rwcmd *)bp->b_back,
1791 	    bp);
1792 	return (0);
1793 }
1794 
1795 static void
1796 dadk_dk(struct dadk *dadkp, struct dadkio_rwcmd *rwcmdp, struct buf *bp)
1797 {
1798 	struct  cmpkt *pktp;
1799 
1800 	pktp = dadk_pktprep(dadkp, NULL, bp, dadk_iodone, NULL, NULL);
1801 	if (!pktp) {
1802 		bioerror(bp, ENOMEM);
1803 		biodone(bp);
1804 		return;
1805 	}
1806 
1807 	pktp->cp_passthru = rwcmdp;
1808 
1809 	(void) dadk_ioprep(dadkp, pktp);
1810 
1811 	mutex_enter(&dadkp->dad_cmd_mutex);
1812 	dadkp->dad_cmd_count++;
1813 	mutex_exit(&dadkp->dad_cmd_mutex);
1814 	FLC_ENQUE(dadkp->dad_flcobjp, bp);
1815 }
1816 
1817 /*
1818  * There is no existing way to notify cmdk module
1819  * when the command completed, so add this function
1820  * to calculate how many on-going commands.
1821  */
1822 int
1823 dadk_getcmds(opaque_t objp)
1824 {
1825 	struct dadk *dadkp = (struct dadk *)objp;
1826 	int count;
1827 
1828 	mutex_enter(&dadkp->dad_cmd_mutex);
1829 	count = dadkp->dad_cmd_count;
1830 	mutex_exit(&dadkp->dad_cmd_mutex);
1831 	return (count);
1832 }
1833 
1834 /*
1835  * this function was used to calc the cmd for CTL_IOCTL
1836  */
1837 static int
1838 dadk_ctl_ioctl(struct dadk *dadkp, uint32_t cmd, uintptr_t arg, int flag)
1839 {
1840 	int error;
1841 	mutex_enter(&dadkp->dad_cmd_mutex);
1842 	dadkp->dad_cmd_count++;
1843 	mutex_exit(&dadkp->dad_cmd_mutex);
1844 	error = CTL_IOCTL(dadkp->dad_ctlobjp, cmd, arg, flag);
1845 	mutex_enter(&dadkp->dad_cmd_mutex);
1846 	dadkp->dad_cmd_count--;
1847 	mutex_exit(&dadkp->dad_cmd_mutex);
1848 	return (error);
1849 }
1850