xref: /titanic_41/usr/src/uts/intel/io/dktp/dcdev/dadk.c (revision 8275a87e46b79352e8c1a918b91373159c477438)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 /*
30  * Direct Attached Disk
31  */
32 
33 #include <sys/file.h>
34 #include <sys/scsi/scsi.h>
35 #include <sys/var.h>
36 #include <sys/proc.h>
37 #include <sys/dktp/cm.h>
38 #include <sys/vtoc.h>
39 #include <sys/dkio.h>
40 #include <sys/policy.h>
41 #include <sys/priv.h>
42 
43 #include <sys/dktp/dadev.h>
44 #include <sys/dktp/fctypes.h>
45 #include <sys/dktp/flowctrl.h>
46 #include <sys/dktp/tgcom.h>
47 #include <sys/dktp/tgdk.h>
48 #include <sys/dktp/bbh.h>
49 #include <sys/dktp/dadkio.h>
50 #include <sys/dktp/dadk.h>
51 #include <sys/cdio.h>
52 
53 /*
54  * Local Function Prototypes
55  */
56 static void dadk_restart(void *pktp);
57 static void dadk_pktcb(struct cmpkt *pktp);
58 static void dadk_iodone(struct buf *bp);
59 static void dadk_polldone(struct buf *bp);
60 static void dadk_setcap(struct dadk *dadkp);
61 static void dadk_create_errstats(struct dadk *dadkp, int instance);
62 static void dadk_destroy_errstats(struct dadk *dadkp);
63 
64 static int dadk_chkerr(struct cmpkt *pktp);
65 static int dadk_ioprep(struct dadk *dadkp, struct cmpkt *pktp);
66 static int dadk_iosetup(struct dadk *dadkp, struct cmpkt *pktp);
67 static int dadk_ioretry(struct cmpkt *pktp, int action);
68 
69 static struct cmpkt *dadk_pktprep(struct dadk *dadkp, struct cmpkt *in_pktp,
70     struct buf *bp, void (*cb_func)(struct buf *), int (*func)(caddr_t),
71     caddr_t arg);
72 
73 static int  dadk_pkt(opaque_t com_data, struct buf *bp, int (*func)(caddr_t),
74     caddr_t arg);
75 static void dadk_transport(opaque_t com_data, struct buf *bp);
76 
77 struct tgcom_objops dadk_com_ops = {
78 	nodev,
79 	nodev,
80 	dadk_pkt,
81 	dadk_transport,
82 	0, 0
83 };
84 
85 /*
86  * architecture dependent allocation restrictions for dadk_iob_alloc(). For
87  * x86, we'll set dma_attr_addr_hi to dadk_max_phys_addr and dma_attr_sgllen
88  * to dadk_sgl_size during _init().
89  */
90 #if defined(__sparc)
91 static ddi_dma_attr_t dadk_alloc_attr = {
92 	DMA_ATTR_V0,	/* version number */
93 	0x0,		/* lowest usable address */
94 	0xFFFFFFFFull,	/* high DMA address range */
95 	0xFFFFFFFFull,	/* DMA counter register */
96 	1,		/* DMA address alignment */
97 	1,		/* DMA burstsizes */
98 	1,		/* min effective DMA size */
99 	0xFFFFFFFFull,	/* max DMA xfer size */
100 	0xFFFFFFFFull,	/* segment boundary */
101 	1,		/* s/g list length */
102 	512,		/* granularity of device */
103 	0,		/* DMA transfer flags */
104 };
105 #elif defined(__x86)
106 static ddi_dma_attr_t dadk_alloc_attr = {
107 	DMA_ATTR_V0,	/* version number */
108 	0x0,		/* lowest usable address */
109 	0x0,		/* high DMA address range [set in _init()] */
110 	0xFFFFull,	/* DMA counter register */
111 	512,		/* DMA address alignment */
112 	1,		/* DMA burstsizes */
113 	1,		/* min effective DMA size */
114 	0xFFFFFFFFull,	/* max DMA xfer size */
115 	0xFFFFFFFFull,	/* segment boundary */
116 	0,		/* s/g list length [set in _init()] */
117 	512,		/* granularity of device */
118 	0,		/* DMA transfer flags */
119 };
120 
121 uint64_t dadk_max_phys_addr = 0xFFFFFFFFull;
122 int dadk_sgl_size = 0xFF;
123 #endif
124 
125 static int dadk_rmb_ioctl(struct dadk *dadkp, int cmd, intptr_t arg, int flags,
126     int silent);
127 static void dadk_rmb_iodone(struct buf *bp);
128 
129 static int dadk_dk_buf_setup(struct dadk *dadkp, opaque_t *cmdp,
130     dev_t dev, enum uio_seg dataspace, int rw);
131 static void dadk_dk(struct dadk *dadkp, struct dadkio_rwcmd *scmdp,
132     struct buf *bp);
133 static void dadkmin(struct buf *bp);
134 static int dadk_dk_strategy(struct buf *bp);
135 static void dadk_recorderr(struct cmpkt *pktp, struct dadkio_rwcmd *rwcmdp);
136 
137 struct tgdk_objops dadk_ops = {
138 	dadk_init,
139 	dadk_free,
140 	dadk_probe,
141 	dadk_attach,
142 	dadk_open,
143 	dadk_close,
144 	dadk_ioctl,
145 	dadk_strategy,
146 	dadk_setgeom,
147 	dadk_getgeom,
148 	dadk_iob_alloc,
149 	dadk_iob_free,
150 	dadk_iob_htoc,
151 	dadk_iob_xfer,
152 	dadk_dump,
153 	dadk_getphygeom,
154 	dadk_set_bbhobj,
155 	dadk_check_media,
156 	dadk_inquiry,
157 	dadk_cleanup,
158 	0
159 };
160 
161 /*
162  * Local static data
163  */
164 
165 #ifdef	DADK_DEBUG
166 #define	DENT	0x0001
167 #define	DERR	0x0002
168 #define	DIO	0x0004
169 #define	DGEOM	0x0010
170 #define	DSTATE  0x0020
171 static	int	dadk_debug = DGEOM;
172 
173 #endif	/* DADK_DEBUG */
174 
175 static int dadk_check_media_time = 3000000;	/* 3 Second State Check */
176 static int dadk_dk_maxphys = 0x80000;
177 
178 static char	*dadk_cmds[] = {
179 	"\000Unknown",			/* unknown 		*/
180 	"\001read sector",		/* DCMD_READ 1		*/
181 	"\002write sector",		/* DCMD_WRITE 2		*/
182 	"\003format track",		/* DCMD_FMTTRK 3	*/
183 	"\004format whole drive",	/* DCMD_FMTDRV 4	*/
184 	"\005recalibrate",		/* DCMD_RECAL  5	*/
185 	"\006seek sector",		/* DCMD_SEEK   6	*/
186 	"\007read verify",		/* DCMD_RDVER  7	*/
187 	"\010read defect list",		/* DCMD_GETDEF 8	*/
188 	"\011lock door",		/* DCMD_LOCK   9	*/
189 	"\012unlock door",		/* DCMD_UNLOCK 10	*/
190 	"\013start motor",		/* DCMD_START_MOTOR 11	*/
191 	"\014stop motor",		/* DCMD_STOP_MOTOR 12	*/
192 	"\015eject",			/* DCMD_EJECT  13	*/
193 	"\016update geometry",		/* DCMD_UPDATE_GEOM  14	*/
194 	"\017get state",		/* DCMD_GET_STATE  15	*/
195 	"\020cdrom pause",		/* DCMD_PAUSE  16	*/
196 	"\021cdrom resume",		/* DCMD_RESUME  17	*/
197 	"\022cdrom play track index",	/* DCMD_PLAYTRKIND  18	*/
198 	"\023cdrom play msf",		/* DCMD_PLAYMSF  19	*/
199 	"\024cdrom sub channel",	/* DCMD_SUBCHNL  20	*/
200 	"\025cdrom read mode 1",	/* DCMD_READMODE1  21	*/
201 	"\026cdrom read toc header",	/* DCMD_READTOCHDR  22	*/
202 	"\027cdrom read toc entry",	/* DCMD_READTOCENT  23	*/
203 	"\030cdrom read offset",	/* DCMD_READOFFSET  24	*/
204 	"\031cdrom read mode 2",	/* DCMD_READMODE2  25	*/
205 	"\032cdrom volume control",	/* DCMD_VOLCTRL  26	*/
206 	"\033flush cache",		/* DCMD_FLUSH_CACHE  27	*/
207 	NULL
208 };
209 
210 static char *dadk_sense[] = {
211 	"\000Success",			/* DERR_SUCCESS		*/
212 	"\001address mark not found",	/* DERR_AMNF		*/
213 	"\002track 0 not found",	/* DERR_TKONF		*/
214 	"\003aborted command",		/* DERR_ABORT		*/
215 	"\004write fault",		/* DERR_DWF		*/
216 	"\005ID not found",		/* DERR_IDNF		*/
217 	"\006drive busy",		/* DERR_BUSY		*/
218 	"\007uncorrectable data error",	/* DERR_UNC		*/
219 	"\010bad block detected",	/* DERR_BBK		*/
220 	"\011invalid command",		/* DERR_INVCDB		*/
221 	"\012device hard error",	/* DERR_HARD		*/
222 	"\013illegal length indicated", /* DERR_ILI		*/
223 	"\014end of media",		/* DERR_EOM		*/
224 	"\015media change requested",	/* DERR_MCR		*/
225 	"\016recovered from error",	/* DERR_RECOVER		*/
226 	"\017device not ready",		/* DERR_NOTREADY	*/
227 	"\020medium error",		/* DERR_MEDIUM		*/
228 	"\021hardware error",		/* DERR_HW		*/
229 	"\022illegal request",		/* DERR_ILL		*/
230 	"\023unit attention",		/* DERR_UNIT_ATTN	*/
231 	"\024data protection",		/* DERR_DATA_PROT	*/
232 	"\025miscompare",		/* DERR_MISCOMPARE	*/
233 	"\026ICRC error during UDMA",	/* DERR_ICRC		*/
234 	"\027reserved",			/* DERR_RESV		*/
235 	NULL
236 };
237 
238 static char *dadk_name = "Disk";
239 
240 /*
241  *	This is the loadable module wrapper
242  */
243 #include <sys/modctl.h>
244 
245 extern struct mod_ops mod_miscops;
246 
247 static struct modlmisc modlmisc = {
248 	&mod_miscops,	/* Type of module */
249 	"Direct Attached Disk %I%"
250 };
251 
252 static struct modlinkage modlinkage = {
253 	MODREV_1, (void *)&modlmisc, NULL
254 };
255 
256 int
257 _init(void)
258 {
259 #ifdef DADK_DEBUG
260 	if (dadk_debug & DENT)
261 		PRF("dadk_init: call\n");
262 #endif
263 
264 #if defined(__x86)
265 	/* set the max physical address for iob allocs on x86 */
266 	dadk_alloc_attr.dma_attr_addr_hi = dadk_max_phys_addr;
267 
268 	/*
269 	 * set the sgllen for iob allocs on x86. If this is set less than
270 	 * the number of pages the buffer will take (taking into account
271 	 * alignment), it would force the allocator to try and allocate
272 	 * contiguous pages.
273 	 */
274 	dadk_alloc_attr.dma_attr_sgllen = dadk_sgl_size;
275 #endif
276 
277 	return (mod_install(&modlinkage));
278 }
279 
280 int
281 _fini(void)
282 {
283 #ifdef DADK_DEBUG
284 	if (dadk_debug & DENT)
285 		PRF("dadk_fini: call\n");
286 #endif
287 
288 	return (mod_remove(&modlinkage));
289 }
290 
291 int
292 _info(struct modinfo *modinfop)
293 {
294 	return (mod_info(&modlinkage, modinfop));
295 }
296 
297 struct tgdk_obj *
298 dadk_create()
299 {
300 	struct tgdk_obj *dkobjp;
301 	struct dadk *dadkp;
302 
303 	dkobjp = kmem_zalloc((sizeof (*dkobjp) + sizeof (*dadkp)), KM_NOSLEEP);
304 	if (!dkobjp)
305 		return (NULL);
306 	dadkp = (struct dadk *)(dkobjp+1);
307 
308 	dkobjp->tg_ops  = (struct  tgdk_objops *)&dadk_ops;
309 	dkobjp->tg_data = (opaque_t)dadkp;
310 	dkobjp->tg_ext = &(dkobjp->tg_extblk);
311 	dadkp->dad_extp = &(dkobjp->tg_extblk);
312 
313 #ifdef DADK_DEBUG
314 	if (dadk_debug & DENT)
315 		PRF("dadk_create: tgdkobjp= 0x%x dadkp= 0x%x\n", dkobjp, dadkp);
316 #endif
317 	return (dkobjp);
318 }
319 
320 int
321 dadk_init(opaque_t objp, opaque_t devp, opaque_t flcobjp, opaque_t queobjp,
322 	opaque_t bbhobjp, void *lkarg)
323 {
324 	struct dadk *dadkp = (struct dadk *)objp;
325 	struct scsi_device *sdevp = (struct scsi_device *)devp;
326 
327 	dadkp->dad_sd = devp;
328 	dadkp->dad_ctlobjp = (opaque_t)sdevp->sd_address.a_hba_tran;
329 	sdevp->sd_private = (caddr_t)dadkp;
330 
331 	/* initialize the communication object */
332 	dadkp->dad_com.com_data = (opaque_t)dadkp;
333 	dadkp->dad_com.com_ops  = &dadk_com_ops;
334 
335 	dadkp->dad_bbhobjp = bbhobjp;
336 	BBH_INIT(bbhobjp);
337 
338 	dadkp->dad_flcobjp = flcobjp;
339 	return (FLC_INIT(flcobjp, &(dadkp->dad_com), queobjp, lkarg));
340 }
341 
342 int
343 dadk_free(struct tgdk_obj *dkobjp)
344 {
345 	TGDK_CLEANUP(dkobjp);
346 	kmem_free(dkobjp, (sizeof (*dkobjp) + sizeof (struct dadk)));
347 
348 	return (DDI_SUCCESS);
349 }
350 
351 void
352 dadk_cleanup(struct tgdk_obj *dkobjp)
353 {
354 	struct dadk *dadkp;
355 
356 	dadkp = (struct dadk *)(dkobjp->tg_data);
357 	if (dadkp->dad_sd)
358 		dadkp->dad_sd->sd_private = NULL;
359 	if (dadkp->dad_bbhobjp) {
360 		BBH_FREE(dadkp->dad_bbhobjp);
361 		dadkp->dad_bbhobjp = NULL;
362 	}
363 	if (dadkp->dad_flcobjp) {
364 		FLC_FREE(dadkp->dad_flcobjp);
365 		dadkp->dad_flcobjp = NULL;
366 	}
367 }
368 
369 /* ARGSUSED */
370 int
371 dadk_probe(opaque_t objp, int kmsflg)
372 {
373 	struct dadk *dadkp = (struct dadk *)objp;
374 	struct scsi_device *devp;
375 	char   name[80];
376 
377 	devp = dadkp->dad_sd;
378 	if (!devp->sd_inq || (devp->sd_inq->inq_dtype == DTYPE_NOTPRESENT) ||
379 		(devp->sd_inq->inq_dtype == DTYPE_UNKNOWN)) {
380 		return (DDI_PROBE_FAILURE);
381 	}
382 
383 	switch (devp->sd_inq->inq_dtype) {
384 		case DTYPE_DIRECT:
385 			dadkp->dad_ctype = DKC_DIRECT;
386 			dadkp->dad_extp->tg_nodetype = DDI_NT_BLOCK;
387 			dadkp->dad_extp->tg_ctype = DKC_DIRECT;
388 			break;
389 		case DTYPE_RODIRECT: /* eg cdrom */
390 			dadkp->dad_ctype = DKC_CDROM;
391 			dadkp->dad_extp->tg_rdonly = 1;
392 			dadkp->dad_rdonly = 1;
393 			dadkp->dad_cdrom = 1;
394 			dadkp->dad_extp->tg_nodetype = DDI_NT_CD;
395 			dadkp->dad_extp->tg_ctype = DKC_CDROM;
396 			break;
397 		case DTYPE_WORM:
398 		case DTYPE_OPTICAL:
399 		default:
400 			return (DDI_PROBE_FAILURE);
401 	}
402 
403 	dadkp->dad_extp->tg_rmb = dadkp->dad_rmb = devp->sd_inq->inq_rmb;
404 
405 	dadkp->dad_secshf = SCTRSHFT;
406 	dadkp->dad_blkshf = 0;
407 
408 	/* display the device name */
409 	(void) strcpy(name, "Vendor '");
410 	gda_inqfill((caddr_t)devp->sd_inq->inq_vid, 8, &name[strlen(name)]);
411 	(void) strcat(name, "' Product '");
412 	gda_inqfill((caddr_t)devp->sd_inq->inq_pid, 16, &name[strlen(name)]);
413 	(void) strcat(name, "'");
414 	gda_log(devp->sd_dev, dadk_name, CE_NOTE, "!<%s>\n", name);
415 
416 	return (DDI_PROBE_SUCCESS);
417 }
418 
419 
420 /* ARGSUSED */
421 int
422 dadk_attach(opaque_t objp)
423 {
424 	return (DDI_SUCCESS);
425 }
426 
427 int
428 dadk_set_bbhobj(opaque_t objp, opaque_t bbhobjp)
429 {
430 	struct dadk *dadkp = (struct dadk *)objp;
431 	/* free the old bbh object */
432 	if (dadkp->dad_bbhobjp)
433 		BBH_FREE(dadkp->dad_bbhobjp);
434 
435 	/* initialize the new bbh object */
436 	dadkp->dad_bbhobjp = bbhobjp;
437 	BBH_INIT(bbhobjp);
438 
439 	return (DDI_SUCCESS);
440 }
441 
442 /* ARGSUSED */
443 int
444 dadk_open(opaque_t objp, int flag)
445 {
446 	struct dadk *dadkp = (struct dadk *)objp;
447 	int error;
448 	int wce;
449 
450 	if (!dadkp->dad_rmb) {
451 		if (dadkp->dad_phyg.g_cap) {
452 			FLC_START_KSTAT(dadkp->dad_flcobjp, "disk",
453 			    ddi_get_instance(CTL_DIP_DEV(dadkp->dad_ctlobjp)));
454 			return (DDI_SUCCESS);
455 		}
456 	} else {
457 	    mutex_enter(&dadkp->dad_mutex);
458 	    dadkp->dad_iostate = DKIO_NONE;
459 	    cv_broadcast(&dadkp->dad_state_cv);
460 	    mutex_exit(&dadkp->dad_mutex);
461 
462 	    if (dadk_rmb_ioctl(dadkp, DCMD_START_MOTOR, 0, 0, DADK_SILENT) ||
463 		dadk_rmb_ioctl(dadkp, DCMD_LOCK, 0, 0, DADK_SILENT) ||
464 		dadk_rmb_ioctl(dadkp, DCMD_UPDATE_GEOM, 0, 0, DADK_SILENT)) {
465 		    return (DDI_FAILURE);
466 	    }
467 
468 	    mutex_enter(&dadkp->dad_mutex);
469 	    dadkp->dad_iostate = DKIO_INSERTED;
470 	    cv_broadcast(&dadkp->dad_state_cv);
471 	    mutex_exit(&dadkp->dad_mutex);
472 	}
473 
474 	/*
475 	 * get write cache enable state
476 	 * If there is an error, must assume that write cache
477 	 * is enabled.
478 	 * NOTE: Since there is currently no Solaris mechanism to
479 	 * change the state of the Write Cache Enable feature,
480 	 * this code just checks the value of the WCE bit
481 	 * obtained at device init time.  If a mechanism
482 	 * is added to the driver to change WCE, dad_wce
483 	 * must be updated appropriately.
484 	 */
485 	error = CTL_IOCTL(dadkp->dad_ctlobjp, DIOCTL_GETWCE,
486 	    (uintptr_t)&wce, FKIOCTL | FNATIVE);
487 	mutex_enter(&dadkp->dad_mutex);
488 	dadkp->dad_wce = (error != 0) || (wce != 0);
489 	mutex_exit(&dadkp->dad_mutex);
490 
491 	/* logical disk geometry */
492 	CTL_IOCTL(dadkp->dad_ctlobjp, DIOCTL_GETGEOM,
493 	    (uintptr_t)&dadkp->dad_logg, FKIOCTL | FNATIVE);
494 	if (dadkp->dad_logg.g_cap == 0)
495 		return (DDI_FAILURE);
496 
497 	/* get physical disk geometry */
498 	CTL_IOCTL(dadkp->dad_ctlobjp, DIOCTL_GETPHYGEOM,
499 	    (uintptr_t)&dadkp->dad_phyg, FKIOCTL | FNATIVE);
500 	if (dadkp->dad_phyg.g_cap == 0)
501 		return (DDI_FAILURE);
502 
503 	dadk_setcap(dadkp);
504 
505 	dadk_create_errstats(dadkp,
506 	    ddi_get_instance(CTL_DIP_DEV(dadkp->dad_ctlobjp)));
507 
508 	/* start profiling */
509 	FLC_START_KSTAT(dadkp->dad_flcobjp, "disk",
510 		ddi_get_instance(CTL_DIP_DEV(dadkp->dad_ctlobjp)));
511 
512 	return (DDI_SUCCESS);
513 }
514 
515 static void
516 dadk_setcap(struct dadk *dadkp)
517 {
518 	int	 totsize;
519 	int	 i;
520 
521 	totsize = dadkp->dad_phyg.g_secsiz;
522 
523 	if (totsize == 0) {
524 		if (dadkp->dad_cdrom) {
525 			totsize = 2048;
526 		} else {
527 			totsize = NBPSCTR;
528 		}
529 	} else {
530 		/* Round down sector size to multiple of 512B */
531 		totsize &= ~(NBPSCTR-1);
532 	}
533 	dadkp->dad_phyg.g_secsiz = totsize;
534 
535 	/* set sec,block shift factor - (512->0, 1024->1, 2048->2, etc.) */
536 	totsize >>= SCTRSHFT;
537 	for (i = 0; totsize != 1; i++, totsize >>= 1);
538 	dadkp->dad_blkshf = i;
539 	dadkp->dad_secshf = i + SCTRSHFT;
540 }
541 
542 
543 static void
544 dadk_create_errstats(struct dadk *dadkp, int instance)
545 {
546 	dadk_errstats_t *dep;
547 	char kstatname[KSTAT_STRLEN];
548 	dadk_ioc_string_t dadk_ioc_string;
549 
550 	if (dadkp->dad_errstats)
551 		return;
552 
553 	(void) sprintf(kstatname, "cmdk%d,error", instance);
554 	dadkp->dad_errstats = kstat_create("cmdkerror", instance,
555 	    kstatname, "device_error", KSTAT_TYPE_NAMED,
556 	    sizeof (dadk_errstats_t) / sizeof (kstat_named_t),
557 	    KSTAT_FLAG_PERSISTENT);
558 
559 	if (!dadkp->dad_errstats)
560 		return;
561 
562 	dep = (dadk_errstats_t *)dadkp->dad_errstats->ks_data;
563 
564 	kstat_named_init(&dep->dadk_softerrs,
565 	    "Soft Errors", KSTAT_DATA_UINT32);
566 	kstat_named_init(&dep->dadk_harderrs,
567 	    "Hard Errors", KSTAT_DATA_UINT32);
568 	kstat_named_init(&dep->dadk_transerrs,
569 	    "Transport Errors", KSTAT_DATA_UINT32);
570 	kstat_named_init(&dep->dadk_model,
571 	    "Model", KSTAT_DATA_CHAR);
572 	kstat_named_init(&dep->dadk_revision,
573 	    "Revision", KSTAT_DATA_CHAR);
574 	kstat_named_init(&dep->dadk_serial,
575 	    "Serial No", KSTAT_DATA_CHAR);
576 	kstat_named_init(&dep->dadk_capacity,
577 	    "Size", KSTAT_DATA_ULONGLONG);
578 	kstat_named_init(&dep->dadk_rq_media_err,
579 	    "Media Error", KSTAT_DATA_UINT32);
580 	kstat_named_init(&dep->dadk_rq_ntrdy_err,
581 	    "Device Not Ready", KSTAT_DATA_UINT32);
582 	kstat_named_init(&dep->dadk_rq_nodev_err,
583 	    "No Device", KSTAT_DATA_UINT32);
584 	kstat_named_init(&dep->dadk_rq_recov_err,
585 	    "Recoverable", KSTAT_DATA_UINT32);
586 	kstat_named_init(&dep->dadk_rq_illrq_err,
587 	    "Illegal Request", KSTAT_DATA_UINT32);
588 
589 	dadkp->dad_errstats->ks_private = dep;
590 	dadkp->dad_errstats->ks_update = nulldev;
591 	kstat_install(dadkp->dad_errstats);
592 
593 	/* get model */
594 	dep->dadk_model.value.c[0] = 0;
595 	dadk_ioc_string.is_buf = &dep->dadk_model.value.c[0];
596 	dadk_ioc_string.is_size = sizeof (dep->dadk_model.value.c);
597 	CTL_IOCTL(dadkp->dad_ctlobjp, DIOCTL_GETMODEL,
598 	    (uintptr_t)&dadk_ioc_string, FKIOCTL | FNATIVE);
599 
600 	/* get serial */
601 	dep->dadk_serial.value.c[0] = 0;
602 	dadk_ioc_string.is_buf = &dep->dadk_serial.value.c[0];
603 	dadk_ioc_string.is_size = sizeof (dep->dadk_serial.value.c);
604 	CTL_IOCTL(dadkp->dad_ctlobjp, DIOCTL_GETSERIAL,
605 	    (uintptr_t)&dadk_ioc_string, FKIOCTL | FNATIVE);
606 
607 	/* Get revision */
608 	dep->dadk_revision.value.c[0] = 0;
609 
610 	/* Get capacity */
611 
612 	dep->dadk_capacity.value.ui64 =
613 	    (uint64_t)dadkp->dad_logg.g_cap *
614 	    (uint64_t)dadkp->dad_logg.g_secsiz;
615 }
616 
617 
618 int
619 dadk_close(opaque_t objp)
620 {
621 	struct dadk *dadkp = (struct dadk *)objp;
622 
623 	if (dadkp->dad_rmb) {
624 		(void) dadk_rmb_ioctl(dadkp, DCMD_STOP_MOTOR, 0, 0,
625 		    DADK_SILENT);
626 		(void) dadk_rmb_ioctl(dadkp, DCMD_UNLOCK, 0, 0, DADK_SILENT);
627 	}
628 	FLC_STOP_KSTAT(dadkp->dad_flcobjp);
629 
630 	dadk_destroy_errstats(dadkp);
631 
632 	return (DDI_SUCCESS);
633 }
634 
635 static void
636 dadk_destroy_errstats(struct dadk *dadkp)
637 {
638 	if (!dadkp->dad_errstats)
639 		return;
640 
641 	kstat_delete(dadkp->dad_errstats);
642 	dadkp->dad_errstats = NULL;
643 }
644 
645 
646 int
647 dadk_strategy(opaque_t objp, struct buf *bp)
648 {
649 	struct dadk *dadkp = (struct dadk *)objp;
650 
651 	if (dadkp->dad_rdonly && !(bp->b_flags & B_READ)) {
652 		bioerror(bp, EROFS);
653 		return (DDI_FAILURE);
654 	}
655 
656 	if (bp->b_bcount & (dadkp->DAD_SECSIZ-1)) {
657 		bioerror(bp, ENXIO);
658 		return (DDI_FAILURE);
659 	}
660 
661 	SET_BP_SEC(bp, (LBLK2SEC(GET_BP_SEC(bp), dadkp->dad_blkshf)));
662 	FLC_ENQUE(dadkp->dad_flcobjp, bp);
663 
664 	return (DDI_SUCCESS);
665 }
666 
667 int
668 dadk_dump(opaque_t objp, struct buf *bp)
669 {
670 	struct dadk *dadkp = (struct dadk *)objp;
671 	struct cmpkt *pktp;
672 
673 	if (dadkp->dad_rdonly) {
674 		bioerror(bp, EROFS);
675 		return (DDI_FAILURE);
676 	}
677 
678 	if (bp->b_bcount & (dadkp->DAD_SECSIZ-1)) {
679 		bioerror(bp, ENXIO);
680 		return (DDI_FAILURE);
681 	}
682 
683 	SET_BP_SEC(bp, (LBLK2SEC(GET_BP_SEC(bp), dadkp->dad_blkshf)));
684 
685 	pktp = dadk_pktprep(dadkp, NULL, bp, dadk_polldone, NULL, NULL);
686 	if (!pktp) {
687 		cmn_err(CE_WARN, "no resources for dumping");
688 		bioerror(bp, EIO);
689 		return (DDI_FAILURE);
690 	}
691 	pktp->cp_flags |= CPF_NOINTR;
692 
693 	(void) dadk_ioprep(dadkp, pktp);
694 	dadk_transport(dadkp, bp);
695 	pktp->cp_byteleft -= pktp->cp_bytexfer;
696 
697 	while (geterror(bp) == 0 && pktp->cp_byteleft != 0) {
698 		(void) dadk_iosetup(dadkp, pktp);
699 		dadk_transport(dadkp, bp);
700 		pktp->cp_byteleft -= pktp->cp_bytexfer;
701 	}
702 
703 	if (pktp->cp_private)
704 		BBH_FREEHANDLE(dadkp->dad_bbhobjp, pktp->cp_private);
705 	gda_free(dadkp->dad_ctlobjp, pktp, NULL);
706 	return (DDI_SUCCESS);
707 }
708 
709 /* ARGSUSED  */
710 int
711 dadk_ioctl(opaque_t objp, dev_t dev, int cmd, intptr_t arg, int flag,
712 	cred_t *cred_p, int *rval_p)
713 {
714 	struct dadk *dadkp = (struct dadk *)objp;
715 
716 	switch (cmd) {
717 	case DKIOCGETDEF:
718 	    {
719 		struct buf	*bp;
720 		int		err, head;
721 		unsigned char	*secbuf;
722 		STRUCT_DECL(defect_header, adh);
723 
724 		STRUCT_INIT(adh, flag & FMODELS);
725 
726 		/*
727 		 * copyin header ....
728 		 * yields head number and buffer address
729 		 */
730 		if (ddi_copyin((caddr_t)arg, STRUCT_BUF(adh), STRUCT_SIZE(adh),
731 		    flag))
732 			return (EFAULT);
733 		head = STRUCT_FGET(adh, head);
734 		if (head < 0 || head >= dadkp->dad_phyg.g_head)
735 			return (ENXIO);
736 		secbuf = kmem_zalloc(NBPSCTR, KM_SLEEP);
737 		if (!secbuf)
738 			return (ENOMEM);
739 		bp = getrbuf(KM_SLEEP);
740 		if (!bp) {
741 			kmem_free(secbuf, NBPSCTR);
742 			return (ENOMEM);
743 		}
744 
745 		bp->b_edev = dev;
746 		bp->b_dev  = cmpdev(dev);
747 		bp->b_flags = B_BUSY;
748 		bp->b_resid = 0;
749 		bp->b_bcount = NBPSCTR;
750 		bp->b_un.b_addr = (caddr_t)secbuf;
751 		bp->b_blkno = head; /* I had to put it somwhere! */
752 		bp->b_forw = (struct buf *)dadkp;
753 		bp->b_back = (struct buf *)DCMD_GETDEF;
754 
755 		FLC_ENQUE(dadkp->dad_flcobjp, bp);
756 		err = biowait(bp);
757 		if (!err) {
758 			if (ddi_copyout((caddr_t)secbuf,
759 			    STRUCT_FGETP(adh, buffer), NBPSCTR, flag))
760 				err = ENXIO;
761 		}
762 		kmem_free(secbuf, NBPSCTR);
763 		freerbuf(bp);
764 		return (err);
765 	    }
766 	case DIOCTL_RWCMD:
767 	    {
768 		struct dadkio_rwcmd *rwcmdp;
769 		int status, rw;
770 
771 		/*
772 		 * copied in by cmdk and, if necessary, converted to the
773 		 * correct datamodel
774 		 */
775 		rwcmdp = (struct dadkio_rwcmd *)(intptr_t)arg;
776 
777 		/*
778 		 * handle the complex cases here; we pass these
779 		 * through to the driver, which will queue them and
780 		 * handle the requests asynchronously.  The simpler
781 		 * cases ,which can return immediately, fail here, and
782 		 * the request reverts to the dadk_ioctl routine, while
783 		 *  will reroute them directly to the ata driver.
784 		 */
785 		switch (rwcmdp->cmd) {
786 			case DADKIO_RWCMD_READ :
787 				/*FALLTHROUGH*/
788 			case DADKIO_RWCMD_WRITE:
789 				rw = ((rwcmdp->cmd == DADKIO_RWCMD_WRITE) ?
790 				    B_WRITE : B_READ);
791 				status = dadk_dk_buf_setup(dadkp,
792 				    (opaque_t)rwcmdp, dev, ((flag &FKIOCTL) ?
793 				    UIO_SYSSPACE : UIO_USERSPACE), rw);
794 				return (status);
795 			default:
796 				return (EINVAL);
797 		}
798 	    }
799 	case DKIOC_UPDATEFW:
800 
801 		/*
802 		 * Require PRIV_ALL privilege to invoke DKIOC_UPDATEFW
803 		 * to protect the firmware update from malicious use
804 		 */
805 		if (PRIV_POLICY(cred_p, PRIV_ALL, B_FALSE, EPERM, NULL) != 0)
806 			return (EPERM);
807 		else
808 			return (CTL_IOCTL(dadkp->dad_ctlobjp, cmd, arg, flag));
809 
810 	case DKIOCFLUSHWRITECACHE:
811 		{
812 			struct buf *bp;
813 			int err = 0;
814 			struct dk_callback *dkc = (struct dk_callback *)arg;
815 			struct cmpkt *pktp;
816 			int is_sync = 1;
817 
818 			mutex_enter(&dadkp->dad_mutex);
819 			if (dadkp->dad_noflush || !  dadkp->dad_wce) {
820 				err = dadkp->dad_noflush ? ENOTSUP : 0;
821 				mutex_exit(&dadkp->dad_mutex);
822 				/*
823 				 * If a callback was requested: a
824 				 * callback will always be done if the
825 				 * caller saw the DKIOCFLUSHWRITECACHE
826 				 * ioctl return 0, and never done if the
827 				 * caller saw the ioctl return an error.
828 				 */
829 				if ((flag & FKIOCTL) && dkc != NULL &&
830 				    dkc->dkc_callback != NULL) {
831 					(*dkc->dkc_callback)(dkc->dkc_cookie,
832 					    err);
833 					/*
834 					 * Did callback and reported error.
835 					 * Since we did a callback, ioctl
836 					 * should return 0.
837 					 */
838 					err = 0;
839 				}
840 				return (err);
841 			}
842 			mutex_exit(&dadkp->dad_mutex);
843 
844 			bp = getrbuf(KM_SLEEP);
845 
846 			bp->b_edev = dev;
847 			bp->b_dev  = cmpdev(dev);
848 			bp->b_flags = B_BUSY;
849 			bp->b_resid = 0;
850 			bp->b_bcount = 0;
851 			SET_BP_SEC(bp, 0);
852 
853 			if ((flag & FKIOCTL) && dkc != NULL &&
854 			    dkc->dkc_callback != NULL) {
855 				struct dk_callback *dkc2 =
856 				    (struct dk_callback *)kmem_zalloc(
857 				    sizeof (struct dk_callback), KM_SLEEP);
858 
859 				bcopy(dkc, dkc2, sizeof (*dkc2));
860 				/*
861 				 * Borrow b_list to carry private data
862 				 * to the b_iodone func.
863 				 */
864 				bp->b_list = (struct buf *)dkc2;
865 				bp->b_iodone = dadk_flushdone;
866 				is_sync = 0;
867 			}
868 
869 			/*
870 			 * Setup command pkt
871 			 * dadk_pktprep() can't fail since DDI_DMA_SLEEP set
872 			 */
873 			pktp = dadk_pktprep(dadkp, NULL, bp,
874 			    dadk_iodone, DDI_DMA_SLEEP, NULL);
875 
876 			pktp->cp_time = DADK_FLUSH_CACHE_TIME;
877 
878 			*((char *)(pktp->cp_cdbp)) = DCMD_FLUSH_CACHE;
879 			pktp->cp_byteleft = 0;
880 			pktp->cp_private = NULL;
881 			pktp->cp_secleft = 0;
882 			pktp->cp_srtsec = -1;
883 			pktp->cp_bytexfer = 0;
884 
885 			CTL_IOSETUP(dadkp->dad_ctlobjp, pktp);
886 
887 			FLC_ENQUE(dadkp->dad_flcobjp, bp);
888 
889 			if (is_sync) {
890 				err = biowait(bp);
891 				freerbuf(bp);
892 			}
893 			return (err);
894 		}
895 	default:
896 		if (!dadkp->dad_rmb)
897 			return (CTL_IOCTL(dadkp->dad_ctlobjp, cmd, arg, flag));
898 	}
899 
900 	switch (cmd) {
901 	case CDROMSTOP:
902 		return (dadk_rmb_ioctl(dadkp, DCMD_STOP_MOTOR, 0,
903 			0, DADK_SILENT));
904 	case CDROMSTART:
905 		return (dadk_rmb_ioctl(dadkp, DCMD_START_MOTOR, 0,
906 			0, DADK_SILENT));
907 	case DKIOCLOCK:
908 		return (dadk_rmb_ioctl(dadkp, DCMD_LOCK, 0, 0, DADK_SILENT));
909 	case DKIOCUNLOCK:
910 		return (dadk_rmb_ioctl(dadkp, DCMD_UNLOCK, 0, 0, DADK_SILENT));
911 	case DKIOCEJECT:
912 	case CDROMEJECT:
913 		{
914 			int ret;
915 
916 			if (ret = dadk_rmb_ioctl(dadkp, DCMD_UNLOCK, 0, 0,
917 				DADK_SILENT)) {
918 				return (ret);
919 			}
920 			if (ret = dadk_rmb_ioctl(dadkp, DCMD_EJECT, 0, 0,
921 				DADK_SILENT)) {
922 				return (ret);
923 			}
924 			mutex_enter(&dadkp->dad_mutex);
925 			dadkp->dad_iostate = DKIO_EJECTED;
926 			cv_broadcast(&dadkp->dad_state_cv);
927 			mutex_exit(&dadkp->dad_mutex);
928 
929 			return (0);
930 
931 		}
932 	default:
933 		return (ENOTTY);
934 	/*
935 	 * cdrom audio commands
936 	 */
937 	case CDROMPAUSE:
938 		cmd = DCMD_PAUSE;
939 		break;
940 	case CDROMRESUME:
941 		cmd = DCMD_RESUME;
942 		break;
943 	case CDROMPLAYMSF:
944 		cmd = DCMD_PLAYMSF;
945 		break;
946 	case CDROMPLAYTRKIND:
947 		cmd = DCMD_PLAYTRKIND;
948 		break;
949 	case CDROMREADTOCHDR:
950 		cmd = DCMD_READTOCHDR;
951 		break;
952 	case CDROMREADTOCENTRY:
953 		cmd = DCMD_READTOCENT;
954 		break;
955 	case CDROMVOLCTRL:
956 		cmd = DCMD_VOLCTRL;
957 		break;
958 	case CDROMSUBCHNL:
959 		cmd = DCMD_SUBCHNL;
960 		break;
961 	case CDROMREADMODE2:
962 		cmd = DCMD_READMODE2;
963 		break;
964 	case CDROMREADMODE1:
965 		cmd = DCMD_READMODE1;
966 		break;
967 	case CDROMREADOFFSET:
968 		cmd = DCMD_READOFFSET;
969 		break;
970 	}
971 	return (dadk_rmb_ioctl(dadkp, cmd, arg, flag, 0));
972 }
973 
974 int
975 dadk_flushdone(struct buf *bp)
976 {
977 	struct dk_callback *dkc = (struct dk_callback *)bp->b_list;
978 
979 	ASSERT(dkc != NULL && dkc->dkc_callback != NULL);
980 
981 	(*dkc->dkc_callback)(dkc->dkc_cookie, geterror(bp));
982 
983 	kmem_free(dkc, sizeof (*dkc));
984 	freerbuf(bp);
985 	return (0);
986 }
987 
988 int
989 dadk_getphygeom(opaque_t objp, struct tgdk_geom *dkgeom_p)
990 {
991 	struct dadk *dadkp = (struct dadk *)objp;
992 
993 	bcopy((caddr_t)&dadkp->dad_phyg, (caddr_t)dkgeom_p,
994 	    sizeof (struct tgdk_geom));
995 	return (DDI_SUCCESS);
996 }
997 
998 int
999 dadk_getgeom(opaque_t objp, struct tgdk_geom *dkgeom_p)
1000 {
1001 	struct dadk *dadkp = (struct dadk *)objp;
1002 	bcopy((caddr_t)&dadkp->dad_logg, (caddr_t)dkgeom_p,
1003 	    sizeof (struct tgdk_geom));
1004 	return (DDI_SUCCESS);
1005 }
1006 
1007 int
1008 dadk_setgeom(opaque_t objp, struct tgdk_geom *dkgeom_p)
1009 {
1010 	struct dadk *dadkp = (struct dadk *)objp;
1011 
1012 	dadkp->dad_logg.g_cyl = dkgeom_p->g_cyl;
1013 	dadkp->dad_logg.g_head = dkgeom_p->g_head;
1014 	dadkp->dad_logg.g_sec = dkgeom_p->g_sec;
1015 	dadkp->dad_logg.g_cap = dkgeom_p->g_cap;
1016 	return (DDI_SUCCESS);
1017 }
1018 
1019 
1020 tgdk_iob_handle
1021 dadk_iob_alloc(opaque_t objp, daddr_t blkno, ssize_t xfer, int kmsflg)
1022 {
1023 	struct dadk *dadkp = (struct dadk *)objp;
1024 	struct buf *bp;
1025 	struct tgdk_iob *iobp;
1026 	size_t rlen;
1027 
1028 	iobp = kmem_zalloc(sizeof (*iobp), kmsflg);
1029 	if (iobp == NULL)
1030 		return (NULL);
1031 	if ((bp = getrbuf(kmsflg)) == NULL) {
1032 		kmem_free(iobp, sizeof (*iobp));
1033 		return (NULL);
1034 	}
1035 
1036 	iobp->b_psec  = LBLK2SEC(blkno, dadkp->dad_blkshf);
1037 	iobp->b_pbyteoff = (blkno & ((1<<dadkp->dad_blkshf) - 1)) << SCTRSHFT;
1038 	iobp->b_pbytecnt = ((iobp->b_pbyteoff + xfer + dadkp->DAD_SECSIZ - 1)
1039 				>> dadkp->dad_secshf) << dadkp->dad_secshf;
1040 
1041 	bp->b_un.b_addr = 0;
1042 	/*
1043 	 * use i_ddi_mem_alloc() for now until we have an interface to allocate
1044 	 * memory for DMA which doesn't require a DMA handle. ddi_iopb_alloc()
1045 	 * is obsolete and we want more flexibility in controlling the DMA
1046 	 * address constraints..
1047 	 */
1048 	if (i_ddi_mem_alloc((dadkp->dad_sd)->sd_dev, &dadk_alloc_attr,
1049 	    (size_t)iobp->b_pbytecnt, ((kmsflg == KM_SLEEP) ? 1 : 0), 0, NULL,
1050 	    &bp->b_un.b_addr, &rlen, NULL) != DDI_SUCCESS) {
1051 		freerbuf(bp);
1052 		kmem_free(iobp, sizeof (*iobp));
1053 		return (NULL);
1054 	}
1055 	iobp->b_flag |= IOB_BPALLOC | IOB_BPBUFALLOC;
1056 	iobp->b_bp = bp;
1057 	iobp->b_lblk = blkno;
1058 	iobp->b_xfer = xfer;
1059 	iobp->b_lblk = blkno;
1060 	iobp->b_xfer = xfer;
1061 	return (iobp);
1062 }
1063 
1064 /* ARGSUSED */
1065 int
1066 dadk_iob_free(opaque_t objp, struct tgdk_iob *iobp)
1067 {
1068 	struct buf *bp;
1069 
1070 	if (iobp) {
1071 		if (iobp->b_bp && (iobp->b_flag & IOB_BPALLOC)) {
1072 			bp = iobp->b_bp;
1073 			if (bp->b_un.b_addr && (iobp->b_flag & IOB_BPBUFALLOC))
1074 				i_ddi_mem_free((caddr_t)bp->b_un.b_addr, NULL);
1075 			freerbuf(bp);
1076 		}
1077 		kmem_free(iobp, sizeof (*iobp));
1078 	}
1079 	return (DDI_SUCCESS);
1080 }
1081 
1082 /* ARGSUSED */
1083 caddr_t
1084 dadk_iob_htoc(opaque_t objp, struct tgdk_iob *iobp)
1085 {
1086 	return (iobp->b_bp->b_un.b_addr+iobp->b_pbyteoff);
1087 }
1088 
1089 
1090 caddr_t
1091 dadk_iob_xfer(opaque_t objp, struct tgdk_iob *iobp, int rw)
1092 {
1093 	struct dadk	*dadkp = (struct dadk *)objp;
1094 	struct buf	*bp;
1095 	int		err;
1096 
1097 	bp = iobp->b_bp;
1098 	if (dadkp->dad_rdonly && !(rw & B_READ)) {
1099 		bioerror(bp, EROFS);
1100 		return (NULL);
1101 	}
1102 
1103 	bp->b_flags |= (B_BUSY | rw);
1104 	bp->b_bcount = iobp->b_pbytecnt;
1105 	SET_BP_SEC(bp, iobp->b_psec);
1106 	bp->av_back = (struct buf *)0;
1107 	bp->b_resid = 0;
1108 
1109 	/* call flow control */
1110 	FLC_ENQUE(dadkp->dad_flcobjp, bp);
1111 	err = biowait(bp);
1112 
1113 	bp->b_bcount = iobp->b_xfer;
1114 	bp->b_flags &= ~(B_DONE|B_BUSY);
1115 
1116 	if (err)
1117 		return (NULL);
1118 
1119 	return (bp->b_un.b_addr+iobp->b_pbyteoff);
1120 }
1121 
1122 static void
1123 dadk_transport(opaque_t com_data, struct buf *bp)
1124 {
1125 	struct dadk *dadkp = (struct dadk *)com_data;
1126 
1127 	if (CTL_TRANSPORT(dadkp->dad_ctlobjp, GDA_BP_PKT(bp)) ==
1128 	    CTL_SEND_SUCCESS)
1129 		return;
1130 	dadk_restart((void*)GDA_BP_PKT(bp));
1131 }
1132 
1133 static int
1134 dadk_pkt(opaque_t com_data, struct buf *bp, int (*func)(caddr_t), caddr_t arg)
1135 {
1136 	struct cmpkt *pktp;
1137 	struct dadk *dadkp = (struct dadk *)com_data;
1138 
1139 	if (GDA_BP_PKT(bp))
1140 		return (DDI_SUCCESS);
1141 
1142 	pktp = dadk_pktprep(dadkp, NULL, bp, dadk_iodone, func, arg);
1143 	if (!pktp)
1144 		return (DDI_FAILURE);
1145 
1146 	return (dadk_ioprep(dadkp, pktp));
1147 }
1148 
1149 /*
1150  * Read, Write preparation
1151  */
1152 static int
1153 dadk_ioprep(struct dadk *dadkp, struct cmpkt *pktp)
1154 {
1155 	struct buf *bp;
1156 
1157 	bp = pktp->cp_bp;
1158 	if (bp->b_forw == (struct buf *)dadkp)
1159 		*((char *)(pktp->cp_cdbp)) = (char)(intptr_t)bp->b_back;
1160 
1161 	else if (bp->b_flags & B_READ)
1162 		*((char *)(pktp->cp_cdbp)) = DCMD_READ;
1163 	else
1164 		*((char *)(pktp->cp_cdbp)) = DCMD_WRITE;
1165 	pktp->cp_byteleft = bp->b_bcount;
1166 
1167 	/* setup the bad block list handle */
1168 	pktp->cp_private = BBH_GETHANDLE(dadkp->dad_bbhobjp, bp);
1169 	return (dadk_iosetup(dadkp, pktp));
1170 }
1171 
1172 static int
1173 dadk_iosetup(struct dadk *dadkp, struct cmpkt *pktp)
1174 {
1175 	struct buf	*bp;
1176 	bbh_cookie_t	bbhckp;
1177 	int		seccnt;
1178 
1179 	seccnt = pktp->cp_bytexfer >> dadkp->dad_secshf;
1180 	pktp->cp_secleft -= seccnt;
1181 
1182 	if (pktp->cp_secleft) {
1183 		pktp->cp_srtsec += seccnt;
1184 	} else {
1185 		/* get the first cookie from the bad block list */
1186 		if (!pktp->cp_private) {
1187 			bp = pktp->cp_bp;
1188 			pktp->cp_srtsec  = GET_BP_SEC(bp);
1189 			pktp->cp_secleft = (bp->b_bcount >> dadkp->dad_secshf);
1190 		} else {
1191 			bbhckp = BBH_HTOC(dadkp->dad_bbhobjp,
1192 			    pktp->cp_private);
1193 			pktp->cp_srtsec = BBH_GETCK_SECTOR(dadkp->dad_bbhobjp,
1194 			    bbhckp);
1195 			pktp->cp_secleft = BBH_GETCK_SECLEN(dadkp->dad_bbhobjp,
1196 			    bbhckp);
1197 		}
1198 	}
1199 
1200 	pktp->cp_bytexfer = pktp->cp_secleft << dadkp->dad_secshf;
1201 
1202 	if (CTL_IOSETUP(dadkp->dad_ctlobjp, pktp)) {
1203 		return (DDI_SUCCESS);
1204 	} else {
1205 		return (DDI_FAILURE);
1206 	}
1207 
1208 
1209 
1210 
1211 }
1212 
1213 static struct cmpkt *
1214 dadk_pktprep(struct dadk *dadkp, struct cmpkt *in_pktp, struct buf *bp,
1215     void (*cb_func)(struct buf *), int (*func)(caddr_t), caddr_t arg)
1216 {
1217 	struct cmpkt *pktp;
1218 
1219 	pktp = gda_pktprep(dadkp->dad_ctlobjp, in_pktp, (opaque_t)bp, func,
1220 	    arg);
1221 
1222 	if (pktp) {
1223 		pktp->cp_callback = dadk_pktcb;
1224 		pktp->cp_time = DADK_IO_TIME;
1225 		pktp->cp_flags = 0;
1226 		pktp->cp_iodone = cb_func;
1227 		pktp->cp_dev_private = (opaque_t)dadkp;
1228 
1229 	}
1230 
1231 	return (pktp);
1232 }
1233 
1234 
1235 static void
1236 dadk_restart(void *vpktp)
1237 {
1238 	struct cmpkt *pktp = (struct cmpkt *)vpktp;
1239 
1240 	if (dadk_ioretry(pktp, QUE_COMMAND) == JUST_RETURN)
1241 		return;
1242 	pktp->cp_iodone(pktp->cp_bp);
1243 }
1244 
1245 static int
1246 dadk_ioretry(struct cmpkt *pktp, int action)
1247 {
1248 	struct buf *bp;
1249 	struct dadk *dadkp = PKT2DADK(pktp);
1250 
1251 	switch (action) {
1252 	case QUE_COMMAND:
1253 		if (pktp->cp_retry++ < DADK_RETRY_COUNT) {
1254 			CTL_IOSETUP(dadkp->dad_ctlobjp, pktp);
1255 			if (CTL_TRANSPORT(dadkp->dad_ctlobjp, pktp) ==
1256 				CTL_SEND_SUCCESS) {
1257 				return (JUST_RETURN);
1258 			}
1259 			gda_log(dadkp->dad_sd->sd_dev, dadk_name,
1260 				CE_WARN,
1261 				"transport of command fails\n");
1262 		} else
1263 			gda_log(dadkp->dad_sd->sd_dev,
1264 				dadk_name, CE_WARN,
1265 				"exceeds maximum number of retries\n");
1266 		bioerror(pktp->cp_bp, ENXIO);
1267 		/*FALLTHROUGH*/
1268 	case COMMAND_DONE_ERROR:
1269 		bp = pktp->cp_bp;
1270 		bp->b_resid += pktp->cp_byteleft - pktp->cp_bytexfer +
1271 		    pktp->cp_resid;
1272 		if (geterror(bp) == 0) {
1273 			if ((*((char *)(pktp->cp_cdbp)) == DCMD_FLUSH_CACHE) &&
1274 			    (pktp->cp_dev_private == (opaque_t)dadkp) &&
1275 			    ((int)(*(char *)pktp->cp_scbp) == DERR_ABORT)) {
1276 				/*
1277 				 * Flag "unimplemented" responses for
1278 				 * DCMD_FLUSH_CACHE as ENOTSUP
1279 				 */
1280 				bioerror(bp, ENOTSUP);
1281 				mutex_enter(&dadkp->dad_mutex);
1282 				dadkp->dad_noflush = 1;
1283 				mutex_exit(&dadkp->dad_mutex);
1284 			} else {
1285 				bioerror(bp, EIO);
1286 			}
1287 		}
1288 		/*FALLTHROUGH*/
1289 	case COMMAND_DONE:
1290 	default:
1291 		return (COMMAND_DONE);
1292 	}
1293 }
1294 
1295 
1296 static void
1297 dadk_pktcb(struct cmpkt *pktp)
1298 {
1299 	int action;
1300 	struct dadkio_rwcmd *rwcmdp;
1301 
1302 	rwcmdp = (struct dadkio_rwcmd *)pktp->cp_passthru;  /* ioctl packet */
1303 
1304 	if (pktp->cp_reason == CPS_SUCCESS) {
1305 		if (rwcmdp && (rwcmdp != (opaque_t)DADK_SILENT))
1306 			rwcmdp->status.status = DADKIO_STAT_NO_ERROR;
1307 		pktp->cp_iodone(pktp->cp_bp);
1308 		return;
1309 	}
1310 
1311 	if (rwcmdp && (rwcmdp != (opaque_t)DADK_SILENT)) {
1312 		if (pktp->cp_reason == CPS_CHKERR)
1313 			dadk_recorderr(pktp, rwcmdp);
1314 		dadk_iodone(pktp->cp_bp);
1315 		return;
1316 	}
1317 
1318 	if (pktp->cp_reason == CPS_CHKERR)
1319 		action = dadk_chkerr(pktp);
1320 	else
1321 		action = COMMAND_DONE_ERROR;
1322 
1323 	if (action == JUST_RETURN)
1324 		return;
1325 
1326 	if (action != COMMAND_DONE) {
1327 		if ((dadk_ioretry(pktp, action)) == JUST_RETURN)
1328 			return;
1329 	}
1330 	pktp->cp_iodone(pktp->cp_bp);
1331 }
1332 
1333 
1334 
1335 static struct dadkio_derr dadk_errtab[] = {
1336 	{COMMAND_DONE, GDA_INFORMATIONAL},	/*  0 DERR_SUCCESS	*/
1337 	{QUE_COMMAND, GDA_FATAL},		/*  1 DERR_AMNF		*/
1338 	{QUE_COMMAND, GDA_FATAL},		/*  2 DERR_TKONF	*/
1339 	{COMMAND_DONE_ERROR, GDA_INFORMATIONAL}, /* 3 DERR_ABORT	*/
1340 	{QUE_COMMAND, GDA_RETRYABLE},		/*  4 DERR_DWF		*/
1341 	{QUE_COMMAND, GDA_FATAL},		/*  5 DERR_IDNF		*/
1342 	{JUST_RETURN, GDA_INFORMATIONAL},	/*  6 DERR_BUSY		*/
1343 	{QUE_COMMAND, GDA_FATAL},		/*  7 DERR_UNC		*/
1344 	{QUE_COMMAND, GDA_RETRYABLE},		/*  8 DERR_BBK		*/
1345 	{COMMAND_DONE_ERROR, GDA_FATAL},	/*  9 DERR_INVCDB	*/
1346 	{COMMAND_DONE_ERROR, GDA_FATAL},	/* 10 DERR_HARD		*/
1347 	{COMMAND_DONE_ERROR, GDA_FATAL},	/* 11 DERR_ILI		*/
1348 	{COMMAND_DONE_ERROR, GDA_FATAL},	/* 12 DERR_EOM		*/
1349 	{COMMAND_DONE, GDA_INFORMATIONAL},	/* 13 DERR_MCR		*/
1350 	{COMMAND_DONE, GDA_INFORMATIONAL},	/* 14 DERR_RECOVER	*/
1351 	{COMMAND_DONE_ERROR, GDA_FATAL},	/* 15 DERR_NOTREADY	*/
1352 	{QUE_COMMAND, GDA_RETRYABLE},		/* 16 DERR_MEDIUM	*/
1353 	{COMMAND_DONE_ERROR, GDA_FATAL},	/* 17 DERR_HW		*/
1354 	{COMMAND_DONE, GDA_FATAL},		/* 18 DERR_ILL		*/
1355 	{COMMAND_DONE, GDA_FATAL},		/* 19 DERR_UNIT_ATTN	*/
1356 	{COMMAND_DONE_ERROR, GDA_FATAL},	/* 20 DERR_DATA_PROT	*/
1357 	{COMMAND_DONE_ERROR, GDA_FATAL},	/* 21 DERR_MISCOMPARE	*/
1358 	{QUE_COMMAND, GDA_RETRYABLE},		/* 22 DERR_ICRC		*/
1359 	{COMMAND_DONE_ERROR, GDA_FATAL},	/* 23 DERR_RESV		*/
1360 };
1361 
1362 static int
1363 dadk_chkerr(struct cmpkt *pktp)
1364 {
1365 	int err_blkno;
1366 	struct dadk *dadkp = PKT2DADK(pktp);
1367 	dadk_errstats_t *dep;
1368 	int scb = *(char *)pktp->cp_scbp;
1369 
1370 	if (scb == DERR_SUCCESS) {
1371 		if (pktp->cp_retry != 0 && dadkp->dad_errstats != NULL) {
1372 			dep = (dadk_errstats_t *)
1373 			    dadkp->dad_errstats->ks_data;
1374 			dep->dadk_rq_recov_err.value.ui32++;
1375 		}
1376 		return (COMMAND_DONE);
1377 	}
1378 
1379 	if (pktp->cp_retry) {
1380 		err_blkno = pktp->cp_srtsec + ((pktp->cp_bytexfer -
1381 			pktp->cp_resid) >> dadkp->dad_secshf);
1382 	} else
1383 		err_blkno = -1;
1384 
1385 	if (dadkp->dad_errstats != NULL) {
1386 		dep = (dadk_errstats_t *)dadkp->dad_errstats->ks_data;
1387 
1388 		switch (dadk_errtab[scb].d_severity) {
1389 			case GDA_RETRYABLE:
1390 				dep->dadk_softerrs.value.ui32++;
1391 				break;
1392 
1393 			case GDA_FATAL:
1394 				dep->dadk_harderrs.value.ui32++;
1395 				break;
1396 
1397 			default:
1398 				break;
1399 		}
1400 
1401 		switch (scb) {
1402 			case DERR_INVCDB:
1403 			case DERR_ILI:
1404 			case DERR_EOM:
1405 			case DERR_HW:
1406 			case DERR_ICRC:
1407 				dep->dadk_transerrs.value.ui32++;
1408 				break;
1409 
1410 			case DERR_AMNF:
1411 			case DERR_TKONF:
1412 			case DERR_DWF:
1413 			case DERR_BBK:
1414 			case DERR_UNC:
1415 			case DERR_HARD:
1416 			case DERR_MEDIUM:
1417 			case DERR_DATA_PROT:
1418 			case DERR_MISCOMP:
1419 				dep->dadk_rq_media_err.value.ui32++;
1420 				break;
1421 
1422 			case DERR_NOTREADY:
1423 				dep->dadk_rq_ntrdy_err.value.ui32++;
1424 				break;
1425 
1426 			case DERR_IDNF:
1427 			case DERR_UNIT_ATTN:
1428 				dep->dadk_rq_nodev_err.value.ui32++;
1429 				break;
1430 
1431 			case DERR_ILL:
1432 			case DERR_RESV:
1433 				dep->dadk_rq_illrq_err.value.ui32++;
1434 				break;
1435 
1436 			default:
1437 				break;
1438 		}
1439 	}
1440 
1441 	/* if attempting to read a sector from a cdrom audio disk */
1442 	if ((dadkp->dad_cdrom) &&
1443 	    (*((char *)(pktp->cp_cdbp)) == DCMD_READ) &&
1444 	    (scb == DERR_ILL)) {
1445 		return (COMMAND_DONE);
1446 	}
1447 	if (pktp->cp_passthru == NULL) {
1448 		gda_errmsg(dadkp->dad_sd, pktp, dadk_name,
1449 		    dadk_errtab[scb].d_severity, pktp->cp_srtsec,
1450 		    err_blkno, dadk_cmds, dadk_sense);
1451 	}
1452 
1453 	if (scb == DERR_BUSY) {
1454 		(void) timeout(dadk_restart, (void *)pktp, DADK_BSY_TIMEOUT);
1455 	}
1456 
1457 	return (dadk_errtab[scb].d_action);
1458 }
1459 
1460 static void
1461 dadk_recorderr(struct cmpkt *pktp, struct dadkio_rwcmd *rwcmdp)
1462 {
1463 	struct dadk *dadkp;
1464 	int scb;
1465 
1466 	dadkp = PKT2DADK(pktp);
1467 	scb = (int)(*(char *)pktp->cp_scbp);
1468 
1469 
1470 	rwcmdp->status.failed_blk = rwcmdp->blkaddr +
1471 		((pktp->cp_bytexfer -
1472 		pktp->cp_resid) >> dadkp->dad_secshf);
1473 
1474 	rwcmdp->status.resid = pktp->cp_bp->b_resid +
1475 		pktp->cp_byteleft - pktp->cp_bytexfer + pktp->cp_resid;
1476 	switch ((int)(* (char *)pktp->cp_scbp)) {
1477 	case DERR_AMNF:
1478 	case DERR_ABORT:
1479 		rwcmdp->status.status = DADKIO_STAT_ILLEGAL_REQUEST;
1480 		break;
1481 	case DERR_DWF:
1482 	case DERR_IDNF:
1483 		rwcmdp->status.status = DADKIO_STAT_ILLEGAL_ADDRESS;
1484 		break;
1485 	case DERR_TKONF:
1486 	case DERR_UNC:
1487 	case DERR_BBK:
1488 		rwcmdp->status.status = DADKIO_STAT_MEDIUM_ERROR;
1489 		rwcmdp->status.failed_blk_is_valid = 1;
1490 		rwcmdp->status.resid = 0;
1491 		break;
1492 	case DERR_BUSY:
1493 		rwcmdp->status.status = DADKIO_STAT_NOT_READY;
1494 		break;
1495 	case DERR_INVCDB:
1496 	case DERR_HARD:
1497 		rwcmdp->status.status = DADKIO_STAT_HARDWARE_ERROR;
1498 		break;
1499 	case DERR_ICRC:
1500 	default:
1501 		rwcmdp->status.status = DADKIO_STAT_NOT_SUPPORTED;
1502 	}
1503 
1504 	if (rwcmdp->flags & DADKIO_FLAG_SILENT)
1505 		return;
1506 	gda_errmsg(dadkp->dad_sd, pktp, dadk_name, dadk_errtab[scb].d_severity,
1507 		rwcmdp->blkaddr, rwcmdp->status.failed_blk,
1508 		dadk_cmds, dadk_sense);
1509 }
1510 
1511 /*ARGSUSED*/
1512 static void
1513 dadk_polldone(struct buf *bp)
1514 {
1515 }
1516 
1517 static void
1518 dadk_iodone(struct buf *bp)
1519 {
1520 	struct cmpkt *pktp;
1521 	struct dadk *dadkp;
1522 
1523 	pktp  = GDA_BP_PKT(bp);
1524 	dadkp = PKT2DADK(pktp);
1525 
1526 	/* check for all iodone */
1527 	pktp->cp_byteleft -= pktp->cp_bytexfer;
1528 	if (geterror(bp) == 0 && pktp->cp_byteleft != 0) {
1529 		pktp->cp_retry = 0;
1530 		(void) dadk_iosetup(dadkp, pktp);
1531 
1532 
1533 	/* 	transport the next one */
1534 		if (CTL_TRANSPORT(dadkp->dad_ctlobjp, pktp) == CTL_SEND_SUCCESS)
1535 			return;
1536 		if ((dadk_ioretry(pktp, QUE_COMMAND)) == JUST_RETURN)
1537 			return;
1538 	}
1539 
1540 	/* start next one */
1541 	FLC_DEQUE(dadkp->dad_flcobjp, bp);
1542 
1543 	/* free pkt */
1544 	if (pktp->cp_private)
1545 		BBH_FREEHANDLE(dadkp->dad_bbhobjp, pktp->cp_private);
1546 	gda_free(dadkp->dad_ctlobjp, pktp, NULL);
1547 	biodone(bp);
1548 }
1549 
1550 int
1551 dadk_check_media(opaque_t objp, int *state)
1552 {
1553 	struct dadk *dadkp = (struct dadk *)objp;
1554 
1555 	if (!dadkp->dad_rmb) {
1556 		return (ENXIO);
1557 	}
1558 #ifdef DADK_DEBUG
1559 	if (dadk_debug & DSTATE)
1560 		PRF("dadk_check_media: user state %x disk state %x\n",
1561 			*state, dadkp->dad_iostate);
1562 #endif
1563 	/*
1564 	 * If state already changed just return
1565 	 */
1566 	if (*state != dadkp->dad_iostate) {
1567 		*state = dadkp->dad_iostate;
1568 		return (0);
1569 	}
1570 
1571 	/*
1572 	 * Startup polling on thread state
1573 	 */
1574 	mutex_enter(&dadkp->dad_mutex);
1575 	if (dadkp->dad_thread_cnt == 0) {
1576 		/*
1577 		 * One thread per removable dadk device
1578 		 */
1579 		(void) thread_create(NULL, 0, dadk_watch_thread, dadkp, 0, &p0,
1580 		    TS_RUN, v.v_maxsyspri - 2);
1581 	}
1582 	dadkp->dad_thread_cnt++;
1583 
1584 	/*
1585 	 * Wait for state to change
1586 	 */
1587 	do {
1588 		if (cv_wait_sig(&dadkp->dad_state_cv, &dadkp->dad_mutex) == 0) {
1589 			dadkp->dad_thread_cnt--;
1590 			mutex_exit(&dadkp->dad_mutex);
1591 			return (EINTR);
1592 		}
1593 	} while (*state == dadkp->dad_iostate);
1594 	*state = dadkp->dad_iostate;
1595 	dadkp->dad_thread_cnt--;
1596 	mutex_exit(&dadkp->dad_mutex);
1597 	return (0);
1598 }
1599 
1600 
1601 #define	MEDIA_ACCESS_DELAY 2000000
1602 
1603 static void
1604 dadk_watch_thread(struct dadk *dadkp)
1605 {
1606 	enum dkio_state state;
1607 	int interval;
1608 
1609 	interval = drv_usectohz(dadk_check_media_time);
1610 
1611 	do {
1612 		if (dadk_rmb_ioctl(dadkp, DCMD_GET_STATE, (intptr_t)&state, 0,
1613 		    DADK_SILENT)) {
1614 			/*
1615 			 * Assume state remained the same
1616 			 */
1617 			state = dadkp->dad_iostate;
1618 		}
1619 
1620 		/*
1621 		 * now signal the waiting thread if this is *not* the
1622 		 * specified state;
1623 		 * delay the signal if the state is DKIO_INSERTED
1624 		 * to allow the target to recover
1625 		 */
1626 		if (state != dadkp->dad_iostate) {
1627 
1628 			dadkp->dad_iostate = state;
1629 			if (state == DKIO_INSERTED) {
1630 				/*
1631 				 * delay the signal to give the drive a chance
1632 				 * to do what it apparently needs to do
1633 				 */
1634 				(void) timeout((void(*)(void *))cv_broadcast,
1635 				    (void *)&dadkp->dad_state_cv,
1636 				    drv_usectohz((clock_t)MEDIA_ACCESS_DELAY));
1637 			} else {
1638 				cv_broadcast(&dadkp->dad_state_cv);
1639 			}
1640 		}
1641 		delay(interval);
1642 	} while (dadkp->dad_thread_cnt);
1643 }
1644 
1645 int
1646 dadk_inquiry(opaque_t objp, opaque_t *inqpp)
1647 {
1648 	struct dadk *dadkp = (struct dadk *)objp;
1649 	struct scsi_inquiry **sinqpp = (struct scsi_inquiry **)inqpp;
1650 
1651 	if (dadkp && dadkp->dad_sd && dadkp->dad_sd->sd_inq) {
1652 		*sinqpp = dadkp->dad_sd->sd_inq;
1653 		return (DDI_SUCCESS);
1654 	}
1655 
1656 	return (DDI_FAILURE);
1657 }
1658 
1659 static int
1660 dadk_rmb_ioctl(struct dadk *dadkp, int cmd, intptr_t arg, int flags, int silent)
1661 
1662 {
1663 	struct buf *bp;
1664 	int err;
1665 	struct cmpkt *pktp;
1666 
1667 	if ((bp = getrbuf(KM_SLEEP)) == NULL) {
1668 		return (ENOMEM);
1669 	}
1670 	pktp = dadk_pktprep(dadkp, NULL, bp, dadk_rmb_iodone, NULL, NULL);
1671 	if (!pktp) {
1672 		freerbuf(bp);
1673 		return (ENOMEM);
1674 	}
1675 	bp->b_back  = (struct buf *)arg;
1676 	bp->b_forw  = (struct buf *)dadkp->dad_flcobjp;
1677 	pktp->cp_passthru = (opaque_t)(intptr_t)silent;
1678 
1679 	err = CTL_IOCTL(dadkp->dad_ctlobjp, cmd, (uintptr_t)pktp, flags);
1680 	freerbuf(bp);
1681 	gda_free(dadkp->dad_ctlobjp, pktp, NULL);
1682 	return (err);
1683 
1684 
1685 }
1686 
1687 static void
1688 dadk_rmb_iodone(struct buf *bp)
1689 {
1690 	struct cmpkt *pktp;
1691 	struct dadk *dadkp;
1692 
1693 	pktp  = GDA_BP_PKT(bp);
1694 	dadkp = PKT2DADK(pktp);
1695 
1696 	bp->b_flags &= ~(B_DONE|B_BUSY);
1697 
1698 	/* Start next one */
1699 	FLC_DEQUE(dadkp->dad_flcobjp, bp);
1700 
1701 	biodone(bp);
1702 }
1703 
1704 static int
1705 dadk_dk_buf_setup(struct dadk *dadkp, opaque_t *cmdp, dev_t dev,
1706 	enum uio_seg dataspace, int rw)
1707 {
1708 	struct dadkio_rwcmd *rwcmdp = (struct dadkio_rwcmd *)cmdp;
1709 	struct buf	*bp;
1710 	struct iovec	aiov;
1711 	struct uio	auio;
1712 	struct uio	*uio = &auio;
1713 	int		status;
1714 
1715 	bp = getrbuf(KM_SLEEP);
1716 
1717 	bp->av_forw = bp->b_forw = (struct buf *)dadkp;
1718 	bp->b_back  = (struct buf *)rwcmdp;	/* ioctl packet */
1719 
1720 	bzero((caddr_t)&auio, sizeof (struct uio));
1721 	bzero((caddr_t)&aiov, sizeof (struct iovec));
1722 	aiov.iov_base = rwcmdp->bufaddr;
1723 	aiov.iov_len = rwcmdp->buflen;
1724 	uio->uio_iov = &aiov;
1725 
1726 	uio->uio_iovcnt = 1;
1727 	uio->uio_resid = rwcmdp->buflen;
1728 	uio->uio_segflg = dataspace;
1729 
1730 	/* Let physio do the rest... */
1731 	status = physio(dadk_dk_strategy, bp, dev, rw, dadkmin, uio);
1732 
1733 	freerbuf(bp);
1734 	return (status);
1735 
1736 }
1737 
1738 /* Do not let a user gendisk request get too big or */
1739 /* else we could use to many resources.		    */
1740 
1741 static void
1742 dadkmin(struct buf *bp)
1743 {
1744 	if (bp->b_bcount > dadk_dk_maxphys)
1745 		bp->b_bcount = dadk_dk_maxphys;
1746 }
1747 
1748 static int
1749 dadk_dk_strategy(struct buf *bp)
1750 {
1751 	dadk_dk((struct dadk *)bp->av_forw, (struct dadkio_rwcmd *)bp->b_back,
1752 	    bp);
1753 	return (0);
1754 }
1755 
1756 static void
1757 dadk_dk(struct dadk *dadkp, struct dadkio_rwcmd *rwcmdp, struct buf *bp)
1758 {
1759 	struct  cmpkt *pktp;
1760 
1761 	pktp = dadk_pktprep(dadkp, NULL, bp, dadk_iodone, NULL, NULL);
1762 	if (!pktp) {
1763 		bioerror(bp, ENOMEM);
1764 		biodone(bp);
1765 		return;
1766 	}
1767 
1768 	pktp->cp_passthru = rwcmdp;
1769 
1770 	(void) dadk_ioprep(dadkp, pktp);
1771 
1772 	FLC_ENQUE(dadkp->dad_flcobjp, bp);
1773 }
1774