xref: /titanic_41/usr/src/uts/intel/io/dktp/dcdev/dadk.c (revision 5ce5f3670f7934e376808da0d1309924ecf8f9e5)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 /*
30  * Direct Attached Disk
31  */
32 
33 #include <sys/file.h>
34 #include <sys/scsi/scsi.h>
35 #include <sys/var.h>
36 #include <sys/proc.h>
37 #include <sys/dktp/cm.h>
38 #include <sys/vtoc.h>
39 #include <sys/dkio.h>
40 
41 #include <sys/dktp/dadev.h>
42 #include <sys/dktp/fctypes.h>
43 #include <sys/dktp/flowctrl.h>
44 #include <sys/dktp/tgcom.h>
45 #include <sys/dktp/tgdk.h>
46 #include <sys/dktp/bbh.h>
47 #include <sys/dktp/dadkio.h>
48 #include <sys/dktp/dadk.h>
49 #include <sys/cdio.h>
50 
51 /*
52  * Local Function Prototypes
53  */
54 static void dadk_restart(void *pktp);
55 static void dadk_pktcb(struct cmpkt *pktp);
56 static void dadk_iodone(struct buf *bp);
57 static void dadk_polldone(struct buf *bp);
58 static void dadk_setcap(struct dadk *dadkp);
59 static void dadk_create_errstats(struct dadk *dadkp, int instance);
60 static void dadk_destroy_errstats(struct dadk *dadkp);
61 
62 static int dadk_chkerr(struct cmpkt *pktp);
63 static int dadk_ioprep(struct dadk *dadkp, struct cmpkt *pktp);
64 static int dadk_iosetup(struct dadk *dadkp, struct cmpkt *pktp);
65 static int dadk_ioretry(struct cmpkt *pktp, int action);
66 
67 static struct cmpkt *dadk_pktprep(struct dadk *dadkp, struct cmpkt *in_pktp,
68     struct buf *bp, void (*cb_func)(struct buf *), int (*func)(caddr_t),
69     caddr_t arg);
70 
71 static int  dadk_pkt(opaque_t com_data, struct buf *bp, int (*func)(caddr_t),
72     caddr_t arg);
73 static void dadk_transport(opaque_t com_data, struct buf *bp);
74 
75 struct tgcom_objops dadk_com_ops = {
76 	nodev,
77 	nodev,
78 	dadk_pkt,
79 	dadk_transport,
80 	0, 0
81 };
82 
83 /*
84  * architecture dependent allocation restrictions for dadk_iob_alloc(). For
85  * x86, we'll set dma_attr_addr_hi to dadk_max_phys_addr and dma_attr_sgllen
86  * to dadk_sgl_size during _init().
87  */
88 #if defined(__sparc)
89 static ddi_dma_attr_t dadk_alloc_attr = {
90 	DMA_ATTR_V0,	/* version number */
91 	0x0,		/* lowest usable address */
92 	0xFFFFFFFFull,	/* high DMA address range */
93 	0xFFFFFFFFull,	/* DMA counter register */
94 	1,		/* DMA address alignment */
95 	1,		/* DMA burstsizes */
96 	1,		/* min effective DMA size */
97 	0xFFFFFFFFull,	/* max DMA xfer size */
98 	0xFFFFFFFFull,	/* segment boundary */
99 	1,		/* s/g list length */
100 	512,		/* granularity of device */
101 	0,		/* DMA transfer flags */
102 };
103 #elif defined(__x86)
104 static ddi_dma_attr_t dadk_alloc_attr = {
105 	DMA_ATTR_V0,	/* version number */
106 	0x0,		/* lowest usable address */
107 	0x0,		/* high DMA address range [set in _init()] */
108 	0xFFFFull,	/* DMA counter register */
109 	512,		/* DMA address alignment */
110 	1,		/* DMA burstsizes */
111 	1,		/* min effective DMA size */
112 	0xFFFFFFFFull,	/* max DMA xfer size */
113 	0xFFFFFFFFull,	/* segment boundary */
114 	0,		/* s/g list length [set in _init()] */
115 	512,		/* granularity of device */
116 	0,		/* DMA transfer flags */
117 };
118 
119 uint64_t dadk_max_phys_addr = 0xFFFFFFFFull;
120 int dadk_sgl_size = 0xFF;
121 #endif
122 
123 static int dadk_rmb_ioctl(struct dadk *dadkp, int cmd, intptr_t arg, int flags,
124     int silent);
125 static void dadk_rmb_iodone(struct buf *bp);
126 
127 static int dadk_dk_buf_setup(struct dadk *dadkp, opaque_t *cmdp,
128     dev_t dev, enum uio_seg dataspace, int rw);
129 static void dadk_dk(struct dadk *dadkp, struct dadkio_rwcmd *scmdp,
130     struct buf *bp);
131 static void dadkmin(struct buf *bp);
132 static int dadk_dk_strategy(struct buf *bp);
133 static void dadk_recorderr(struct cmpkt *pktp, struct dadkio_rwcmd *rwcmdp);
134 
135 struct tgdk_objops dadk_ops = {
136 	dadk_init,
137 	dadk_free,
138 	dadk_probe,
139 	dadk_attach,
140 	dadk_open,
141 	dadk_close,
142 	dadk_ioctl,
143 	dadk_strategy,
144 	dadk_setgeom,
145 	dadk_getgeom,
146 	dadk_iob_alloc,
147 	dadk_iob_free,
148 	dadk_iob_htoc,
149 	dadk_iob_xfer,
150 	dadk_dump,
151 	dadk_getphygeom,
152 	dadk_set_bbhobj,
153 	dadk_check_media,
154 	dadk_inquiry,
155 	dadk_cleanup,
156 	0
157 };
158 
159 /*
160  * Local static data
161  */
162 
163 #ifdef	DADK_DEBUG
164 #define	DENT	0x0001
165 #define	DERR	0x0002
166 #define	DIO	0x0004
167 #define	DGEOM	0x0010
168 #define	DSTATE  0x0020
169 static	int	dadk_debug = DGEOM;
170 
171 #endif	/* DADK_DEBUG */
172 
173 static int dadk_check_media_time = 3000000;	/* 3 Second State Check */
174 static int dadk_dk_maxphys = 0x80000;
175 
176 static char	*dadk_cmds[] = {
177 	"\000Unknown",			/* unknown 		*/
178 	"\001read sector",		/* DCMD_READ 1		*/
179 	"\002write sector",		/* DCMD_WRITE 2		*/
180 	"\003format track",		/* DCMD_FMTTRK 3	*/
181 	"\004format whole drive",	/* DCMD_FMTDRV 4	*/
182 	"\005recalibrate",		/* DCMD_RECAL  5	*/
183 	"\006seek sector",		/* DCMD_SEEK   6	*/
184 	"\007read verify",		/* DCMD_RDVER  7	*/
185 	"\010read defect list",		/* DCMD_GETDEF 8	*/
186 	"\011lock door",		/* DCMD_LOCK   9	*/
187 	"\012unlock door",		/* DCMD_UNLOCK 10	*/
188 	"\013start motor",		/* DCMD_START_MOTOR 11	*/
189 	"\014stop motor",		/* DCMD_STOP_MOTOR 12	*/
190 	"\015eject",			/* DCMD_EJECT  13	*/
191 	"\016update geometry",		/* DCMD_UPDATE_GEOM  14	*/
192 	"\017get state",		/* DCMD_GET_STATE  15	*/
193 	"\020cdrom pause",		/* DCMD_PAUSE  16	*/
194 	"\021cdrom resume",		/* DCMD_RESUME  17	*/
195 	"\022cdrom play track index",	/* DCMD_PLAYTRKIND  18	*/
196 	"\023cdrom play msf",		/* DCMD_PLAYMSF  19	*/
197 	"\024cdrom sub channel",	/* DCMD_SUBCHNL  20	*/
198 	"\025cdrom read mode 1",	/* DCMD_READMODE1  21	*/
199 	"\026cdrom read toc header",	/* DCMD_READTOCHDR  22	*/
200 	"\027cdrom read toc entry",	/* DCMD_READTOCENT  23	*/
201 	"\030cdrom read offset",	/* DCMD_READOFFSET  24	*/
202 	"\031cdrom read mode 2",	/* DCMD_READMODE2  25	*/
203 	"\032cdrom volume control",	/* DCMD_VOLCTRL  26	*/
204 	"\033flush cache",		/* DCMD_FLUSH_CACHE  27	*/
205 	NULL
206 };
207 
208 static char *dadk_sense[] = {
209 	"\000Success",			/* DERR_SUCCESS		*/
210 	"\001address mark not found",	/* DERR_AMNF		*/
211 	"\002track 0 not found",	/* DERR_TKONF		*/
212 	"\003aborted command",		/* DERR_ABORT		*/
213 	"\004write fault",		/* DERR_DWF		*/
214 	"\005ID not found",		/* DERR_IDNF		*/
215 	"\006drive busy",		/* DERR_BUSY		*/
216 	"\007uncorrectable data error",	/* DERR_UNC		*/
217 	"\010bad block detected",	/* DERR_BBK		*/
218 	"\011invalid command",		/* DERR_INVCDB		*/
219 	"\012device hard error",	/* DERR_HARD		*/
220 	"\013illegal length indicated", /* DERR_ILI		*/
221 	"\014end of media",		/* DERR_EOM		*/
222 	"\015media change requested",	/* DERR_MCR		*/
223 	"\016recovered from error",	/* DERR_RECOVER		*/
224 	"\017device not ready",		/* DERR_NOTREADY	*/
225 	"\020medium error",		/* DERR_MEDIUM		*/
226 	"\021hardware error",		/* DERR_HW		*/
227 	"\022illegal request",		/* DERR_ILL		*/
228 	"\023unit attention",		/* DERR_UNIT_ATTN	*/
229 	"\024data protection",		/* DERR_DATA_PROT	*/
230 	"\025miscompare",		/* DERR_MISCOMPARE	*/
231 	"\026ICRC error during UDMA",	/* DERR_ICRC		*/
232 	"\027reserved",			/* DERR_RESV		*/
233 	NULL
234 };
235 
236 static char *dadk_name = "Disk";
237 
238 /*
239  *	This is the loadable module wrapper
240  */
241 #include <sys/modctl.h>
242 
243 extern struct mod_ops mod_miscops;
244 
245 static struct modlmisc modlmisc = {
246 	&mod_miscops,	/* Type of module */
247 	"Direct Attached Disk %I%"
248 };
249 
250 static struct modlinkage modlinkage = {
251 	MODREV_1, (void *)&modlmisc, NULL
252 };
253 
254 int
255 _init(void)
256 {
257 #ifdef DADK_DEBUG
258 	if (dadk_debug & DENT)
259 		PRF("dadk_init: call\n");
260 #endif
261 
262 #if defined(__x86)
263 	/* set the max physical address for iob allocs on x86 */
264 	dadk_alloc_attr.dma_attr_addr_hi = dadk_max_phys_addr;
265 
266 	/*
267 	 * set the sgllen for iob allocs on x86. If this is set less than
268 	 * the number of pages the buffer will take (taking into account
269 	 * alignment), it would force the allocator to try and allocate
270 	 * contiguous pages.
271 	 */
272 	dadk_alloc_attr.dma_attr_sgllen = dadk_sgl_size;
273 #endif
274 
275 	return (mod_install(&modlinkage));
276 }
277 
278 int
279 _fini(void)
280 {
281 #ifdef DADK_DEBUG
282 	if (dadk_debug & DENT)
283 		PRF("dadk_fini: call\n");
284 #endif
285 
286 	return (mod_remove(&modlinkage));
287 }
288 
289 int
290 _info(struct modinfo *modinfop)
291 {
292 	return (mod_info(&modlinkage, modinfop));
293 }
294 
295 struct tgdk_obj *
296 dadk_create()
297 {
298 	struct tgdk_obj *dkobjp;
299 	struct dadk *dadkp;
300 
301 	dkobjp = kmem_zalloc((sizeof (*dkobjp) + sizeof (*dadkp)), KM_NOSLEEP);
302 	if (!dkobjp)
303 		return (NULL);
304 	dadkp = (struct dadk *)(dkobjp+1);
305 
306 	dkobjp->tg_ops  = (struct  tgdk_objops *)&dadk_ops;
307 	dkobjp->tg_data = (opaque_t)dadkp;
308 	dkobjp->tg_ext = &(dkobjp->tg_extblk);
309 	dadkp->dad_extp = &(dkobjp->tg_extblk);
310 
311 #ifdef DADK_DEBUG
312 	if (dadk_debug & DENT)
313 		PRF("dadk_create: tgdkobjp= 0x%x dadkp= 0x%x\n", dkobjp, dadkp);
314 #endif
315 	return (dkobjp);
316 }
317 
318 int
319 dadk_init(opaque_t objp, opaque_t devp, opaque_t flcobjp, opaque_t queobjp,
320 	opaque_t bbhobjp, void *lkarg)
321 {
322 	struct dadk *dadkp = (struct dadk *)objp;
323 	struct scsi_device *sdevp = (struct scsi_device *)devp;
324 
325 	dadkp->dad_sd = devp;
326 	dadkp->dad_ctlobjp = (opaque_t)sdevp->sd_address.a_hba_tran;
327 	sdevp->sd_private = (caddr_t)dadkp;
328 
329 	/* initialize the communication object */
330 	dadkp->dad_com.com_data = (opaque_t)dadkp;
331 	dadkp->dad_com.com_ops  = &dadk_com_ops;
332 
333 	dadkp->dad_bbhobjp = bbhobjp;
334 	BBH_INIT(bbhobjp);
335 
336 	dadkp->dad_flcobjp = flcobjp;
337 	return (FLC_INIT(flcobjp, &(dadkp->dad_com), queobjp, lkarg));
338 }
339 
340 int
341 dadk_free(struct tgdk_obj *dkobjp)
342 {
343 	TGDK_CLEANUP(dkobjp);
344 	kmem_free(dkobjp, (sizeof (*dkobjp) + sizeof (struct dadk)));
345 
346 	return (DDI_SUCCESS);
347 }
348 
349 void
350 dadk_cleanup(struct tgdk_obj *dkobjp)
351 {
352 	struct dadk *dadkp;
353 
354 	dadkp = (struct dadk *)(dkobjp->tg_data);
355 	if (dadkp->dad_sd)
356 		dadkp->dad_sd->sd_private = NULL;
357 	if (dadkp->dad_bbhobjp) {
358 		BBH_FREE(dadkp->dad_bbhobjp);
359 		dadkp->dad_bbhobjp = NULL;
360 	}
361 	if (dadkp->dad_flcobjp) {
362 		FLC_FREE(dadkp->dad_flcobjp);
363 		dadkp->dad_flcobjp = NULL;
364 	}
365 }
366 
367 /* ARGSUSED */
368 int
369 dadk_probe(opaque_t objp, int kmsflg)
370 {
371 	struct dadk *dadkp = (struct dadk *)objp;
372 	struct scsi_device *devp;
373 	char   name[80];
374 
375 	devp = dadkp->dad_sd;
376 	if (!devp->sd_inq || (devp->sd_inq->inq_dtype == DTYPE_NOTPRESENT) ||
377 		(devp->sd_inq->inq_dtype == DTYPE_UNKNOWN)) {
378 		return (DDI_PROBE_FAILURE);
379 	}
380 
381 	switch (devp->sd_inq->inq_dtype) {
382 		case DTYPE_DIRECT:
383 			dadkp->dad_ctype = DKC_DIRECT;
384 			dadkp->dad_extp->tg_nodetype = DDI_NT_BLOCK;
385 			dadkp->dad_extp->tg_ctype = DKC_DIRECT;
386 			break;
387 		case DTYPE_RODIRECT: /* eg cdrom */
388 			dadkp->dad_ctype = DKC_CDROM;
389 			dadkp->dad_extp->tg_rdonly = 1;
390 			dadkp->dad_rdonly = 1;
391 			dadkp->dad_cdrom = 1;
392 			dadkp->dad_extp->tg_nodetype = DDI_NT_CD;
393 			dadkp->dad_extp->tg_ctype = DKC_CDROM;
394 			break;
395 		case DTYPE_WORM:
396 		case DTYPE_OPTICAL:
397 		default:
398 			return (DDI_PROBE_FAILURE);
399 	}
400 
401 	dadkp->dad_extp->tg_rmb = dadkp->dad_rmb = devp->sd_inq->inq_rmb;
402 
403 	dadkp->dad_secshf = SCTRSHFT;
404 	dadkp->dad_blkshf = 0;
405 
406 	/* display the device name */
407 	(void) strcpy(name, "Vendor '");
408 	gda_inqfill((caddr_t)devp->sd_inq->inq_vid, 8, &name[strlen(name)]);
409 	(void) strcat(name, "' Product '");
410 	gda_inqfill((caddr_t)devp->sd_inq->inq_pid, 16, &name[strlen(name)]);
411 	(void) strcat(name, "'");
412 	gda_log(devp->sd_dev, dadk_name, CE_NOTE, "!<%s>\n", name);
413 
414 	return (DDI_PROBE_SUCCESS);
415 }
416 
417 
418 /* ARGSUSED */
419 int
420 dadk_attach(opaque_t objp)
421 {
422 	return (DDI_SUCCESS);
423 }
424 
425 int
426 dadk_set_bbhobj(opaque_t objp, opaque_t bbhobjp)
427 {
428 	struct dadk *dadkp = (struct dadk *)objp;
429 	/* free the old bbh object */
430 	if (dadkp->dad_bbhobjp)
431 		BBH_FREE(dadkp->dad_bbhobjp);
432 
433 	/* initialize the new bbh object */
434 	dadkp->dad_bbhobjp = bbhobjp;
435 	BBH_INIT(bbhobjp);
436 
437 	return (DDI_SUCCESS);
438 }
439 
440 /* ARGSUSED */
441 int
442 dadk_open(opaque_t objp, int flag)
443 {
444 	struct dadk *dadkp = (struct dadk *)objp;
445 	int error;
446 	int wce;
447 
448 	if (!dadkp->dad_rmb) {
449 		if (dadkp->dad_phyg.g_cap) {
450 			FLC_START_KSTAT(dadkp->dad_flcobjp, "disk",
451 			    ddi_get_instance(CTL_DIP_DEV(dadkp->dad_ctlobjp)));
452 			return (DDI_SUCCESS);
453 		}
454 	} else {
455 	    mutex_enter(&dadkp->dad_mutex);
456 	    dadkp->dad_iostate = DKIO_NONE;
457 	    cv_broadcast(&dadkp->dad_state_cv);
458 	    mutex_exit(&dadkp->dad_mutex);
459 
460 	    if (dadk_rmb_ioctl(dadkp, DCMD_START_MOTOR, 0, 0, DADK_SILENT) ||
461 		dadk_rmb_ioctl(dadkp, DCMD_LOCK, 0, 0, DADK_SILENT) ||
462 		dadk_rmb_ioctl(dadkp, DCMD_UPDATE_GEOM, 0, 0, DADK_SILENT)) {
463 		    return (DDI_FAILURE);
464 	    }
465 
466 	    mutex_enter(&dadkp->dad_mutex);
467 	    dadkp->dad_iostate = DKIO_INSERTED;
468 	    cv_broadcast(&dadkp->dad_state_cv);
469 	    mutex_exit(&dadkp->dad_mutex);
470 	}
471 
472 	/*
473 	 * get write cache enable state
474 	 * If there is an error, must assume that write cache
475 	 * is enabled.
476 	 * NOTE: Since there is currently no Solaris mechanism to
477 	 * change the state of the Write Cache Enable feature,
478 	 * this code just checks the value of the WCE bit
479 	 * obtained at device init time.  If a mechanism
480 	 * is added to the driver to change WCE, dad_wce
481 	 * must be updated appropriately.
482 	 */
483 	error = CTL_IOCTL(dadkp->dad_ctlobjp, DIOCTL_GETWCE,
484 	    (uintptr_t)&wce, FKIOCTL | FNATIVE);
485 	mutex_enter(&dadkp->dad_mutex);
486 	dadkp->dad_wce = (error != 0) || (wce != 0);
487 	mutex_exit(&dadkp->dad_mutex);
488 
489 	/* logical disk geometry */
490 	CTL_IOCTL(dadkp->dad_ctlobjp, DIOCTL_GETGEOM,
491 	    (uintptr_t)&dadkp->dad_logg, FKIOCTL | FNATIVE);
492 	if (dadkp->dad_logg.g_cap == 0)
493 		return (DDI_FAILURE);
494 
495 	/* get physical disk geometry */
496 	CTL_IOCTL(dadkp->dad_ctlobjp, DIOCTL_GETPHYGEOM,
497 	    (uintptr_t)&dadkp->dad_phyg, FKIOCTL | FNATIVE);
498 	if (dadkp->dad_phyg.g_cap == 0)
499 		return (DDI_FAILURE);
500 
501 	dadk_setcap(dadkp);
502 
503 	dadk_create_errstats(dadkp,
504 	    ddi_get_instance(CTL_DIP_DEV(dadkp->dad_ctlobjp)));
505 
506 	/* start profiling */
507 	FLC_START_KSTAT(dadkp->dad_flcobjp, "disk",
508 		ddi_get_instance(CTL_DIP_DEV(dadkp->dad_ctlobjp)));
509 
510 	return (DDI_SUCCESS);
511 }
512 
513 static void
514 dadk_setcap(struct dadk *dadkp)
515 {
516 	int	 totsize;
517 	int	 i;
518 
519 	totsize = dadkp->dad_phyg.g_secsiz;
520 
521 	if (totsize == 0) {
522 		if (dadkp->dad_cdrom) {
523 			totsize = 2048;
524 		} else {
525 			totsize = NBPSCTR;
526 		}
527 	} else {
528 		/* Round down sector size to multiple of 512B */
529 		totsize &= ~(NBPSCTR-1);
530 	}
531 	dadkp->dad_phyg.g_secsiz = totsize;
532 
533 	/* set sec,block shift factor - (512->0, 1024->1, 2048->2, etc.) */
534 	totsize >>= SCTRSHFT;
535 	for (i = 0; totsize != 1; i++, totsize >>= 1);
536 	dadkp->dad_blkshf = i;
537 	dadkp->dad_secshf = i + SCTRSHFT;
538 }
539 
540 
541 static void
542 dadk_create_errstats(struct dadk *dadkp, int instance)
543 {
544 	dadk_errstats_t *dep;
545 	char kstatname[KSTAT_STRLEN];
546 	dadk_ioc_string_t dadk_ioc_string;
547 
548 	if (dadkp->dad_errstats)
549 		return;
550 
551 	(void) sprintf(kstatname, "cmdk%d,error", instance);
552 	dadkp->dad_errstats = kstat_create("cmdkerror", instance,
553 	    kstatname, "device_error", KSTAT_TYPE_NAMED,
554 	    sizeof (dadk_errstats_t) / sizeof (kstat_named_t),
555 	    KSTAT_FLAG_PERSISTENT);
556 
557 	if (!dadkp->dad_errstats)
558 		return;
559 
560 	dep = (dadk_errstats_t *)dadkp->dad_errstats->ks_data;
561 
562 	kstat_named_init(&dep->dadk_softerrs,
563 	    "Soft Errors", KSTAT_DATA_UINT32);
564 	kstat_named_init(&dep->dadk_harderrs,
565 	    "Hard Errors", KSTAT_DATA_UINT32);
566 	kstat_named_init(&dep->dadk_transerrs,
567 	    "Transport Errors", KSTAT_DATA_UINT32);
568 	kstat_named_init(&dep->dadk_model,
569 	    "Model", KSTAT_DATA_CHAR);
570 	kstat_named_init(&dep->dadk_revision,
571 	    "Revision", KSTAT_DATA_CHAR);
572 	kstat_named_init(&dep->dadk_serial,
573 	    "Serial No", KSTAT_DATA_CHAR);
574 	kstat_named_init(&dep->dadk_capacity,
575 	    "Size", KSTAT_DATA_ULONGLONG);
576 	kstat_named_init(&dep->dadk_rq_media_err,
577 	    "Media Error", KSTAT_DATA_UINT32);
578 	kstat_named_init(&dep->dadk_rq_ntrdy_err,
579 	    "Device Not Ready", KSTAT_DATA_UINT32);
580 	kstat_named_init(&dep->dadk_rq_nodev_err,
581 	    "No Device", KSTAT_DATA_UINT32);
582 	kstat_named_init(&dep->dadk_rq_recov_err,
583 	    "Recoverable", KSTAT_DATA_UINT32);
584 	kstat_named_init(&dep->dadk_rq_illrq_err,
585 	    "Illegal Request", KSTAT_DATA_UINT32);
586 
587 	dadkp->dad_errstats->ks_private = dep;
588 	dadkp->dad_errstats->ks_update = nulldev;
589 	kstat_install(dadkp->dad_errstats);
590 
591 	/* get model */
592 	dep->dadk_model.value.c[0] = 0;
593 	dadk_ioc_string.is_buf = &dep->dadk_model.value.c[0];
594 	dadk_ioc_string.is_size = sizeof (dep->dadk_model.value.c);
595 	CTL_IOCTL(dadkp->dad_ctlobjp, DIOCTL_GETMODEL,
596 	    (uintptr_t)&dadk_ioc_string, FKIOCTL | FNATIVE);
597 
598 	/* get serial */
599 	dep->dadk_serial.value.c[0] = 0;
600 	dadk_ioc_string.is_buf = &dep->dadk_serial.value.c[0];
601 	dadk_ioc_string.is_size = sizeof (dep->dadk_serial.value.c);
602 	CTL_IOCTL(dadkp->dad_ctlobjp, DIOCTL_GETSERIAL,
603 	    (uintptr_t)&dadk_ioc_string, FKIOCTL | FNATIVE);
604 
605 	/* Get revision */
606 	dep->dadk_revision.value.c[0] = 0;
607 
608 	/* Get capacity */
609 
610 	dep->dadk_capacity.value.ui64 =
611 	    (uint64_t)dadkp->dad_logg.g_cap *
612 	    (uint64_t)dadkp->dad_logg.g_secsiz;
613 }
614 
615 
616 int
617 dadk_close(opaque_t objp)
618 {
619 	struct dadk *dadkp = (struct dadk *)objp;
620 
621 	if (dadkp->dad_rmb) {
622 		(void) dadk_rmb_ioctl(dadkp, DCMD_STOP_MOTOR, 0, 0,
623 		    DADK_SILENT);
624 		(void) dadk_rmb_ioctl(dadkp, DCMD_UNLOCK, 0, 0, DADK_SILENT);
625 	}
626 	FLC_STOP_KSTAT(dadkp->dad_flcobjp);
627 
628 	dadk_destroy_errstats(dadkp);
629 
630 	return (DDI_SUCCESS);
631 }
632 
633 static void
634 dadk_destroy_errstats(struct dadk *dadkp)
635 {
636 	if (!dadkp->dad_errstats)
637 		return;
638 
639 	kstat_delete(dadkp->dad_errstats);
640 	dadkp->dad_errstats = NULL;
641 }
642 
643 
644 int
645 dadk_strategy(opaque_t objp, struct buf *bp)
646 {
647 	struct dadk *dadkp = (struct dadk *)objp;
648 
649 	if (dadkp->dad_rdonly && !(bp->b_flags & B_READ)) {
650 		bioerror(bp, EROFS);
651 		return (DDI_FAILURE);
652 	}
653 
654 	if (bp->b_bcount & (dadkp->DAD_SECSIZ-1)) {
655 		bioerror(bp, ENXIO);
656 		return (DDI_FAILURE);
657 	}
658 
659 	SET_BP_SEC(bp, (LBLK2SEC(GET_BP_SEC(bp), dadkp->dad_blkshf)));
660 	FLC_ENQUE(dadkp->dad_flcobjp, bp);
661 
662 	return (DDI_SUCCESS);
663 }
664 
665 int
666 dadk_dump(opaque_t objp, struct buf *bp)
667 {
668 	struct dadk *dadkp = (struct dadk *)objp;
669 	struct cmpkt *pktp;
670 
671 	if (dadkp->dad_rdonly) {
672 		bioerror(bp, EROFS);
673 		return (DDI_FAILURE);
674 	}
675 
676 	if (bp->b_bcount & (dadkp->DAD_SECSIZ-1)) {
677 		bioerror(bp, ENXIO);
678 		return (DDI_FAILURE);
679 	}
680 
681 	SET_BP_SEC(bp, (LBLK2SEC(GET_BP_SEC(bp), dadkp->dad_blkshf)));
682 
683 	pktp = dadk_pktprep(dadkp, NULL, bp, dadk_polldone, NULL, NULL);
684 	if (!pktp) {
685 		cmn_err(CE_WARN, "no resources for dumping");
686 		bioerror(bp, EIO);
687 		return (DDI_FAILURE);
688 	}
689 	pktp->cp_flags |= CPF_NOINTR;
690 
691 	(void) dadk_ioprep(dadkp, pktp);
692 	dadk_transport(dadkp, bp);
693 	pktp->cp_byteleft -= pktp->cp_bytexfer;
694 
695 	while (geterror(bp) == 0 && pktp->cp_byteleft != 0) {
696 		(void) dadk_iosetup(dadkp, pktp);
697 		dadk_transport(dadkp, bp);
698 		pktp->cp_byteleft -= pktp->cp_bytexfer;
699 	}
700 
701 	if (pktp->cp_private)
702 		BBH_FREEHANDLE(dadkp->dad_bbhobjp, pktp->cp_private);
703 	gda_free(dadkp->dad_ctlobjp, pktp, NULL);
704 	return (DDI_SUCCESS);
705 }
706 
707 /* ARGSUSED  */
708 int
709 dadk_ioctl(opaque_t objp, dev_t dev, int cmd, intptr_t arg, int flag,
710 	cred_t *cred_p, int *rval_p)
711 {
712 	struct dadk *dadkp = (struct dadk *)objp;
713 
714 	switch (cmd) {
715 	case DKIOCGETDEF:
716 	    {
717 		struct buf	*bp;
718 		int		err, head;
719 		unsigned char	*secbuf;
720 		STRUCT_DECL(defect_header, adh);
721 
722 		STRUCT_INIT(adh, flag & FMODELS);
723 
724 		/*
725 		 * copyin header ....
726 		 * yields head number and buffer address
727 		 */
728 		if (ddi_copyin((caddr_t)arg, STRUCT_BUF(adh), STRUCT_SIZE(adh),
729 		    flag))
730 			return (EFAULT);
731 		head = STRUCT_FGET(adh, head);
732 		if (head < 0 || head >= dadkp->dad_phyg.g_head)
733 			return (ENXIO);
734 		secbuf = kmem_zalloc(NBPSCTR, KM_SLEEP);
735 		if (!secbuf)
736 			return (ENOMEM);
737 		bp = getrbuf(KM_SLEEP);
738 		if (!bp) {
739 			kmem_free(secbuf, NBPSCTR);
740 			return (ENOMEM);
741 		}
742 
743 		bp->b_edev = dev;
744 		bp->b_dev  = cmpdev(dev);
745 		bp->b_flags = B_BUSY;
746 		bp->b_resid = 0;
747 		bp->b_bcount = NBPSCTR;
748 		bp->b_un.b_addr = (caddr_t)secbuf;
749 		bp->b_blkno = head; /* I had to put it somwhere! */
750 		bp->b_forw = (struct buf *)dadkp;
751 		bp->b_back = (struct buf *)DCMD_GETDEF;
752 
753 		FLC_ENQUE(dadkp->dad_flcobjp, bp);
754 		err = biowait(bp);
755 		if (!err) {
756 			if (ddi_copyout((caddr_t)secbuf,
757 			    STRUCT_FGETP(adh, buffer), NBPSCTR, flag))
758 				err = ENXIO;
759 		}
760 		kmem_free(secbuf, NBPSCTR);
761 		freerbuf(bp);
762 		return (err);
763 	    }
764 	case DIOCTL_RWCMD:
765 	    {
766 		struct dadkio_rwcmd *rwcmdp;
767 		int status, rw;
768 
769 		/*
770 		 * copied in by cmdk and, if necessary, converted to the
771 		 * correct datamodel
772 		 */
773 		rwcmdp = (struct dadkio_rwcmd *)(intptr_t)arg;
774 
775 		/*
776 		 * handle the complex cases here; we pass these
777 		 * through to the driver, which will queue them and
778 		 * handle the requests asynchronously.  The simpler
779 		 * cases ,which can return immediately, fail here, and
780 		 * the request reverts to the dadk_ioctl routine, while
781 		 *  will reroute them directly to the ata driver.
782 		 */
783 		switch (rwcmdp->cmd) {
784 			case DADKIO_RWCMD_READ :
785 				/*FALLTHROUGH*/
786 			case DADKIO_RWCMD_WRITE:
787 				rw = ((rwcmdp->cmd == DADKIO_RWCMD_WRITE) ?
788 				    B_WRITE : B_READ);
789 				status = dadk_dk_buf_setup(dadkp,
790 				    (opaque_t)rwcmdp, dev, ((flag &FKIOCTL) ?
791 				    UIO_SYSSPACE : UIO_USERSPACE), rw);
792 				return (status);
793 			default:
794 				return (EINVAL);
795 		}
796 	    }
797 	case DKIOCFLUSHWRITECACHE:
798 		{
799 			struct buf *bp;
800 			int err = 0;
801 			struct dk_callback *dkc = (struct dk_callback *)arg;
802 			struct cmpkt *pktp;
803 			int is_sync = 1;
804 
805 			mutex_enter(&dadkp->dad_mutex);
806 			if (dadkp->dad_noflush || !  dadkp->dad_wce) {
807 				err = dadkp->dad_noflush ? ENOTSUP : 0;
808 				mutex_exit(&dadkp->dad_mutex);
809 				/*
810 				 * If a callback was requested: a
811 				 * callback will always be done if the
812 				 * caller saw the DKIOCFLUSHWRITECACHE
813 				 * ioctl return 0, and never done if the
814 				 * caller saw the ioctl return an error.
815 				 */
816 				if ((flag & FKIOCTL) && dkc != NULL &&
817 				    dkc->dkc_callback != NULL) {
818 					(*dkc->dkc_callback)(dkc->dkc_cookie,
819 					    err);
820 					/*
821 					 * Did callback and reported error.
822 					 * Since we did a callback, ioctl
823 					 * should return 0.
824 					 */
825 					err = 0;
826 				}
827 				return (err);
828 			}
829 			mutex_exit(&dadkp->dad_mutex);
830 
831 			bp = getrbuf(KM_SLEEP);
832 
833 			bp->b_edev = dev;
834 			bp->b_dev  = cmpdev(dev);
835 			bp->b_flags = B_BUSY;
836 			bp->b_resid = 0;
837 			bp->b_bcount = 0;
838 			SET_BP_SEC(bp, 0);
839 
840 			if ((flag & FKIOCTL) && dkc != NULL &&
841 			    dkc->dkc_callback != NULL) {
842 				struct dk_callback *dkc2 =
843 				    (struct dk_callback *)kmem_zalloc(
844 				    sizeof (struct dk_callback), KM_SLEEP);
845 
846 				bcopy(dkc, dkc2, sizeof (*dkc2));
847 				/*
848 				 * Borrow b_list to carry private data
849 				 * to the b_iodone func.
850 				 */
851 				bp->b_list = (struct buf *)dkc2;
852 				bp->b_iodone = dadk_flushdone;
853 				is_sync = 0;
854 			}
855 
856 			/*
857 			 * Setup command pkt
858 			 * dadk_pktprep() can't fail since DDI_DMA_SLEEP set
859 			 */
860 			pktp = dadk_pktprep(dadkp, NULL, bp,
861 			    dadk_iodone, DDI_DMA_SLEEP, NULL);
862 
863 			pktp->cp_time = DADK_FLUSH_CACHE_TIME;
864 
865 			*((char *)(pktp->cp_cdbp)) = DCMD_FLUSH_CACHE;
866 			pktp->cp_byteleft = 0;
867 			pktp->cp_private = NULL;
868 			pktp->cp_secleft = 0;
869 			pktp->cp_srtsec = -1;
870 			pktp->cp_bytexfer = 0;
871 
872 			CTL_IOSETUP(dadkp->dad_ctlobjp, pktp);
873 
874 			FLC_ENQUE(dadkp->dad_flcobjp, bp);
875 
876 			if (is_sync) {
877 				err = biowait(bp);
878 				freerbuf(bp);
879 			}
880 			return (err);
881 		}
882 	default:
883 		if (!dadkp->dad_rmb)
884 			return (CTL_IOCTL(dadkp->dad_ctlobjp, cmd, arg, flag));
885 	}
886 
887 	switch (cmd) {
888 	case CDROMSTOP:
889 		return (dadk_rmb_ioctl(dadkp, DCMD_STOP_MOTOR, 0,
890 			0, DADK_SILENT));
891 	case CDROMSTART:
892 		return (dadk_rmb_ioctl(dadkp, DCMD_START_MOTOR, 0,
893 			0, DADK_SILENT));
894 	case DKIOCLOCK:
895 		return (dadk_rmb_ioctl(dadkp, DCMD_LOCK, 0, 0, DADK_SILENT));
896 	case DKIOCUNLOCK:
897 		return (dadk_rmb_ioctl(dadkp, DCMD_UNLOCK, 0, 0, DADK_SILENT));
898 	case DKIOCEJECT:
899 	case CDROMEJECT:
900 		{
901 			int ret;
902 
903 			if (ret = dadk_rmb_ioctl(dadkp, DCMD_UNLOCK, 0, 0,
904 				DADK_SILENT)) {
905 				return (ret);
906 			}
907 			if (ret = dadk_rmb_ioctl(dadkp, DCMD_EJECT, 0, 0,
908 				DADK_SILENT)) {
909 				return (ret);
910 			}
911 			mutex_enter(&dadkp->dad_mutex);
912 			dadkp->dad_iostate = DKIO_EJECTED;
913 			cv_broadcast(&dadkp->dad_state_cv);
914 			mutex_exit(&dadkp->dad_mutex);
915 
916 			return (0);
917 
918 		}
919 	default:
920 		return (ENOTTY);
921 	/*
922 	 * cdrom audio commands
923 	 */
924 	case CDROMPAUSE:
925 		cmd = DCMD_PAUSE;
926 		break;
927 	case CDROMRESUME:
928 		cmd = DCMD_RESUME;
929 		break;
930 	case CDROMPLAYMSF:
931 		cmd = DCMD_PLAYMSF;
932 		break;
933 	case CDROMPLAYTRKIND:
934 		cmd = DCMD_PLAYTRKIND;
935 		break;
936 	case CDROMREADTOCHDR:
937 		cmd = DCMD_READTOCHDR;
938 		break;
939 	case CDROMREADTOCENTRY:
940 		cmd = DCMD_READTOCENT;
941 		break;
942 	case CDROMVOLCTRL:
943 		cmd = DCMD_VOLCTRL;
944 		break;
945 	case CDROMSUBCHNL:
946 		cmd = DCMD_SUBCHNL;
947 		break;
948 	case CDROMREADMODE2:
949 		cmd = DCMD_READMODE2;
950 		break;
951 	case CDROMREADMODE1:
952 		cmd = DCMD_READMODE1;
953 		break;
954 	case CDROMREADOFFSET:
955 		cmd = DCMD_READOFFSET;
956 		break;
957 	}
958 	return (dadk_rmb_ioctl(dadkp, cmd, arg, flag, 0));
959 }
960 
961 int
962 dadk_flushdone(struct buf *bp)
963 {
964 	struct dk_callback *dkc = (struct dk_callback *)bp->b_list;
965 
966 	ASSERT(dkc != NULL && dkc->dkc_callback != NULL);
967 
968 	(*dkc->dkc_callback)(dkc->dkc_cookie, geterror(bp));
969 
970 	kmem_free(dkc, sizeof (*dkc));
971 	freerbuf(bp);
972 	return (0);
973 }
974 
975 int
976 dadk_getphygeom(opaque_t objp, struct tgdk_geom *dkgeom_p)
977 {
978 	struct dadk *dadkp = (struct dadk *)objp;
979 
980 	bcopy((caddr_t)&dadkp->dad_phyg, (caddr_t)dkgeom_p,
981 	    sizeof (struct tgdk_geom));
982 	return (DDI_SUCCESS);
983 }
984 
985 int
986 dadk_getgeom(opaque_t objp, struct tgdk_geom *dkgeom_p)
987 {
988 	struct dadk *dadkp = (struct dadk *)objp;
989 	bcopy((caddr_t)&dadkp->dad_logg, (caddr_t)dkgeom_p,
990 	    sizeof (struct tgdk_geom));
991 	return (DDI_SUCCESS);
992 }
993 
994 int
995 dadk_setgeom(opaque_t objp, struct tgdk_geom *dkgeom_p)
996 {
997 	struct dadk *dadkp = (struct dadk *)objp;
998 
999 	dadkp->dad_logg.g_cyl = dkgeom_p->g_cyl;
1000 	dadkp->dad_logg.g_head = dkgeom_p->g_head;
1001 	dadkp->dad_logg.g_sec = dkgeom_p->g_sec;
1002 	dadkp->dad_logg.g_cap = dkgeom_p->g_cap;
1003 	return (DDI_SUCCESS);
1004 }
1005 
1006 
1007 tgdk_iob_handle
1008 dadk_iob_alloc(opaque_t objp, daddr_t blkno, ssize_t xfer, int kmsflg)
1009 {
1010 	struct dadk *dadkp = (struct dadk *)objp;
1011 	struct buf *bp;
1012 	struct tgdk_iob *iobp;
1013 	size_t rlen;
1014 
1015 	iobp = kmem_zalloc(sizeof (*iobp), kmsflg);
1016 	if (iobp == NULL)
1017 		return (NULL);
1018 	if ((bp = getrbuf(kmsflg)) == NULL) {
1019 		kmem_free(iobp, sizeof (*iobp));
1020 		return (NULL);
1021 	}
1022 
1023 	iobp->b_psec  = LBLK2SEC(blkno, dadkp->dad_blkshf);
1024 	iobp->b_pbyteoff = (blkno & ((1<<dadkp->dad_blkshf) - 1)) << SCTRSHFT;
1025 	iobp->b_pbytecnt = ((iobp->b_pbyteoff + xfer + dadkp->DAD_SECSIZ - 1)
1026 				>> dadkp->dad_secshf) << dadkp->dad_secshf;
1027 
1028 	bp->b_un.b_addr = 0;
1029 	/*
1030 	 * use i_ddi_mem_alloc() for now until we have an interface to allocate
1031 	 * memory for DMA which doesn't require a DMA handle. ddi_iopb_alloc()
1032 	 * is obsolete and we want more flexibility in controlling the DMA
1033 	 * address constraints..
1034 	 */
1035 	if (i_ddi_mem_alloc((dadkp->dad_sd)->sd_dev, &dadk_alloc_attr,
1036 	    (size_t)iobp->b_pbytecnt, ((kmsflg == KM_SLEEP) ? 1 : 0), 0, NULL,
1037 	    &bp->b_un.b_addr, &rlen, NULL) != DDI_SUCCESS) {
1038 		freerbuf(bp);
1039 		kmem_free(iobp, sizeof (*iobp));
1040 		return (NULL);
1041 	}
1042 	iobp->b_flag |= IOB_BPALLOC | IOB_BPBUFALLOC;
1043 	iobp->b_bp = bp;
1044 	iobp->b_lblk = blkno;
1045 	iobp->b_xfer = xfer;
1046 	iobp->b_lblk = blkno;
1047 	iobp->b_xfer = xfer;
1048 	return (iobp);
1049 }
1050 
1051 /* ARGSUSED */
1052 int
1053 dadk_iob_free(opaque_t objp, struct tgdk_iob *iobp)
1054 {
1055 	struct buf *bp;
1056 
1057 	if (iobp) {
1058 		if (iobp->b_bp && (iobp->b_flag & IOB_BPALLOC)) {
1059 			bp = iobp->b_bp;
1060 			if (bp->b_un.b_addr && (iobp->b_flag & IOB_BPBUFALLOC))
1061 				i_ddi_mem_free((caddr_t)bp->b_un.b_addr, NULL);
1062 			freerbuf(bp);
1063 		}
1064 		kmem_free(iobp, sizeof (*iobp));
1065 	}
1066 	return (DDI_SUCCESS);
1067 }
1068 
1069 /* ARGSUSED */
1070 caddr_t
1071 dadk_iob_htoc(opaque_t objp, struct tgdk_iob *iobp)
1072 {
1073 	return (iobp->b_bp->b_un.b_addr+iobp->b_pbyteoff);
1074 }
1075 
1076 
1077 caddr_t
1078 dadk_iob_xfer(opaque_t objp, struct tgdk_iob *iobp, int rw)
1079 {
1080 	struct dadk	*dadkp = (struct dadk *)objp;
1081 	struct buf	*bp;
1082 	int		err;
1083 
1084 	bp = iobp->b_bp;
1085 	if (dadkp->dad_rdonly && !(rw & B_READ)) {
1086 		bioerror(bp, EROFS);
1087 		return (NULL);
1088 	}
1089 
1090 	bp->b_flags |= (B_BUSY | rw);
1091 	bp->b_bcount = iobp->b_pbytecnt;
1092 	SET_BP_SEC(bp, iobp->b_psec);
1093 	bp->av_back = (struct buf *)0;
1094 	bp->b_resid = 0;
1095 
1096 	/* call flow control */
1097 	FLC_ENQUE(dadkp->dad_flcobjp, bp);
1098 	err = biowait(bp);
1099 
1100 	bp->b_bcount = iobp->b_xfer;
1101 	bp->b_flags &= ~(B_DONE|B_BUSY);
1102 
1103 	if (err)
1104 		return (NULL);
1105 
1106 	return (bp->b_un.b_addr+iobp->b_pbyteoff);
1107 }
1108 
1109 static void
1110 dadk_transport(opaque_t com_data, struct buf *bp)
1111 {
1112 	struct dadk *dadkp = (struct dadk *)com_data;
1113 
1114 	if (CTL_TRANSPORT(dadkp->dad_ctlobjp, GDA_BP_PKT(bp)) ==
1115 	    CTL_SEND_SUCCESS)
1116 		return;
1117 	dadk_restart((void*)GDA_BP_PKT(bp));
1118 }
1119 
1120 static int
1121 dadk_pkt(opaque_t com_data, struct buf *bp, int (*func)(caddr_t), caddr_t arg)
1122 {
1123 	struct cmpkt *pktp;
1124 	struct dadk *dadkp = (struct dadk *)com_data;
1125 
1126 	if (GDA_BP_PKT(bp))
1127 		return (DDI_SUCCESS);
1128 
1129 	pktp = dadk_pktprep(dadkp, NULL, bp, dadk_iodone, func, arg);
1130 	if (!pktp)
1131 		return (DDI_FAILURE);
1132 
1133 	return (dadk_ioprep(dadkp, pktp));
1134 }
1135 
1136 /*
1137  * Read, Write preparation
1138  */
1139 static int
1140 dadk_ioprep(struct dadk *dadkp, struct cmpkt *pktp)
1141 {
1142 	struct buf *bp;
1143 
1144 	bp = pktp->cp_bp;
1145 	if (bp->b_forw == (struct buf *)dadkp)
1146 		*((char *)(pktp->cp_cdbp)) = (char)(intptr_t)bp->b_back;
1147 
1148 	else if (bp->b_flags & B_READ)
1149 		*((char *)(pktp->cp_cdbp)) = DCMD_READ;
1150 	else
1151 		*((char *)(pktp->cp_cdbp)) = DCMD_WRITE;
1152 	pktp->cp_byteleft = bp->b_bcount;
1153 
1154 	/* setup the bad block list handle */
1155 	pktp->cp_private = BBH_GETHANDLE(dadkp->dad_bbhobjp, bp);
1156 	return (dadk_iosetup(dadkp, pktp));
1157 }
1158 
1159 static int
1160 dadk_iosetup(struct dadk *dadkp, struct cmpkt *pktp)
1161 {
1162 	struct buf	*bp;
1163 	bbh_cookie_t	bbhckp;
1164 	int		seccnt;
1165 
1166 	seccnt = pktp->cp_bytexfer >> dadkp->dad_secshf;
1167 	pktp->cp_secleft -= seccnt;
1168 
1169 	if (pktp->cp_secleft) {
1170 		pktp->cp_srtsec += seccnt;
1171 	} else {
1172 		/* get the first cookie from the bad block list */
1173 		if (!pktp->cp_private) {
1174 			bp = pktp->cp_bp;
1175 			pktp->cp_srtsec  = GET_BP_SEC(bp);
1176 			pktp->cp_secleft = (bp->b_bcount >> dadkp->dad_secshf);
1177 		} else {
1178 			bbhckp = BBH_HTOC(dadkp->dad_bbhobjp,
1179 			    pktp->cp_private);
1180 			pktp->cp_srtsec = BBH_GETCK_SECTOR(dadkp->dad_bbhobjp,
1181 			    bbhckp);
1182 			pktp->cp_secleft = BBH_GETCK_SECLEN(dadkp->dad_bbhobjp,
1183 			    bbhckp);
1184 		}
1185 	}
1186 
1187 	pktp->cp_bytexfer = pktp->cp_secleft << dadkp->dad_secshf;
1188 
1189 	if (CTL_IOSETUP(dadkp->dad_ctlobjp, pktp)) {
1190 		return (DDI_SUCCESS);
1191 	} else {
1192 		return (DDI_FAILURE);
1193 	}
1194 
1195 
1196 
1197 
1198 }
1199 
1200 static struct cmpkt *
1201 dadk_pktprep(struct dadk *dadkp, struct cmpkt *in_pktp, struct buf *bp,
1202     void (*cb_func)(struct buf *), int (*func)(caddr_t), caddr_t arg)
1203 {
1204 	struct cmpkt *pktp;
1205 
1206 	pktp = gda_pktprep(dadkp->dad_ctlobjp, in_pktp, (opaque_t)bp, func,
1207 	    arg);
1208 
1209 	if (pktp) {
1210 		pktp->cp_callback = dadk_pktcb;
1211 		pktp->cp_time = DADK_IO_TIME;
1212 		pktp->cp_flags = 0;
1213 		pktp->cp_iodone = cb_func;
1214 		pktp->cp_dev_private = (opaque_t)dadkp;
1215 
1216 	}
1217 
1218 	return (pktp);
1219 }
1220 
1221 
1222 static void
1223 dadk_restart(void *vpktp)
1224 {
1225 	struct cmpkt *pktp = (struct cmpkt *)vpktp;
1226 
1227 	if (dadk_ioretry(pktp, QUE_COMMAND) == JUST_RETURN)
1228 		return;
1229 	pktp->cp_iodone(pktp->cp_bp);
1230 }
1231 
1232 static int
1233 dadk_ioretry(struct cmpkt *pktp, int action)
1234 {
1235 	struct buf *bp;
1236 	struct dadk *dadkp = PKT2DADK(pktp);
1237 
1238 	switch (action) {
1239 	case QUE_COMMAND:
1240 		if (pktp->cp_retry++ < DADK_RETRY_COUNT) {
1241 			CTL_IOSETUP(dadkp->dad_ctlobjp, pktp);
1242 			if (CTL_TRANSPORT(dadkp->dad_ctlobjp, pktp) ==
1243 				CTL_SEND_SUCCESS) {
1244 				return (JUST_RETURN);
1245 			}
1246 			gda_log(dadkp->dad_sd->sd_dev, dadk_name,
1247 				CE_WARN,
1248 				"transport of command fails\n");
1249 		} else
1250 			gda_log(dadkp->dad_sd->sd_dev,
1251 				dadk_name, CE_WARN,
1252 				"exceeds maximum number of retries\n");
1253 		bioerror(pktp->cp_bp, ENXIO);
1254 		/*FALLTHROUGH*/
1255 	case COMMAND_DONE_ERROR:
1256 		bp = pktp->cp_bp;
1257 		bp->b_resid += pktp->cp_byteleft - pktp->cp_bytexfer +
1258 		    pktp->cp_resid;
1259 		if (geterror(bp) == 0) {
1260 			if ((*((char *)(pktp->cp_cdbp)) == DCMD_FLUSH_CACHE) &&
1261 			    (pktp->cp_dev_private == (opaque_t)dadkp) &&
1262 			    ((int)(*(char *)pktp->cp_scbp) == DERR_ABORT)) {
1263 				/*
1264 				 * Flag "unimplemented" responses for
1265 				 * DCMD_FLUSH_CACHE as ENOTSUP
1266 				 */
1267 				bioerror(bp, ENOTSUP);
1268 				mutex_enter(&dadkp->dad_mutex);
1269 				dadkp->dad_noflush = 1;
1270 				mutex_exit(&dadkp->dad_mutex);
1271 			} else {
1272 				bioerror(bp, EIO);
1273 			}
1274 		}
1275 		/*FALLTHROUGH*/
1276 	case COMMAND_DONE:
1277 	default:
1278 		return (COMMAND_DONE);
1279 	}
1280 }
1281 
1282 
1283 static void
1284 dadk_pktcb(struct cmpkt *pktp)
1285 {
1286 	int action;
1287 	struct dadkio_rwcmd *rwcmdp;
1288 
1289 	rwcmdp = (struct dadkio_rwcmd *)pktp->cp_passthru;  /* ioctl packet */
1290 
1291 	if (pktp->cp_reason == CPS_SUCCESS) {
1292 		if (rwcmdp && (rwcmdp != (opaque_t)DADK_SILENT))
1293 			rwcmdp->status.status = DADKIO_STAT_NO_ERROR;
1294 		pktp->cp_iodone(pktp->cp_bp);
1295 		return;
1296 	}
1297 
1298 	if (rwcmdp && (rwcmdp != (opaque_t)DADK_SILENT)) {
1299 		if (pktp->cp_reason == CPS_CHKERR)
1300 			dadk_recorderr(pktp, rwcmdp);
1301 		dadk_iodone(pktp->cp_bp);
1302 		return;
1303 	}
1304 
1305 	if (pktp->cp_reason == CPS_CHKERR)
1306 		action = dadk_chkerr(pktp);
1307 	else
1308 		action = COMMAND_DONE_ERROR;
1309 
1310 	if (action == JUST_RETURN)
1311 		return;
1312 
1313 	if (action != COMMAND_DONE) {
1314 		if ((dadk_ioretry(pktp, action)) == JUST_RETURN)
1315 			return;
1316 	}
1317 	pktp->cp_iodone(pktp->cp_bp);
1318 }
1319 
1320 
1321 
1322 static struct dadkio_derr dadk_errtab[] = {
1323 	{COMMAND_DONE, GDA_INFORMATIONAL},	/*  0 DERR_SUCCESS	*/
1324 	{QUE_COMMAND, GDA_FATAL},		/*  1 DERR_AMNF		*/
1325 	{QUE_COMMAND, GDA_FATAL},		/*  2 DERR_TKONF	*/
1326 	{COMMAND_DONE_ERROR, GDA_INFORMATIONAL}, /* 3 DERR_ABORT	*/
1327 	{QUE_COMMAND, GDA_RETRYABLE},		/*  4 DERR_DWF		*/
1328 	{QUE_COMMAND, GDA_FATAL},		/*  5 DERR_IDNF		*/
1329 	{JUST_RETURN, GDA_INFORMATIONAL},	/*  6 DERR_BUSY		*/
1330 	{QUE_COMMAND, GDA_FATAL},		/*  7 DERR_UNC		*/
1331 	{QUE_COMMAND, GDA_RETRYABLE},		/*  8 DERR_BBK		*/
1332 	{COMMAND_DONE_ERROR, GDA_FATAL},	/*  9 DERR_INVCDB	*/
1333 	{COMMAND_DONE_ERROR, GDA_FATAL},	/* 10 DERR_HARD		*/
1334 	{COMMAND_DONE_ERROR, GDA_FATAL},	/* 11 DERR_ILI		*/
1335 	{COMMAND_DONE_ERROR, GDA_FATAL},	/* 12 DERR_EOM		*/
1336 	{COMMAND_DONE, GDA_INFORMATIONAL},	/* 13 DERR_MCR		*/
1337 	{COMMAND_DONE, GDA_INFORMATIONAL},	/* 14 DERR_RECOVER	*/
1338 	{COMMAND_DONE_ERROR, GDA_FATAL},	/* 15 DERR_NOTREADY	*/
1339 	{QUE_COMMAND, GDA_RETRYABLE},		/* 16 DERR_MEDIUM	*/
1340 	{COMMAND_DONE_ERROR, GDA_FATAL},	/* 17 DERR_HW		*/
1341 	{COMMAND_DONE, GDA_FATAL},		/* 18 DERR_ILL		*/
1342 	{COMMAND_DONE, GDA_FATAL},		/* 19 DERR_UNIT_ATTN	*/
1343 	{COMMAND_DONE_ERROR, GDA_FATAL},	/* 20 DERR_DATA_PROT	*/
1344 	{COMMAND_DONE_ERROR, GDA_FATAL},	/* 21 DERR_MISCOMPARE	*/
1345 	{QUE_COMMAND, GDA_RETRYABLE},		/* 22 DERR_ICRC		*/
1346 	{COMMAND_DONE_ERROR, GDA_FATAL},	/* 23 DERR_RESV		*/
1347 };
1348 
1349 static int
1350 dadk_chkerr(struct cmpkt *pktp)
1351 {
1352 	int err_blkno;
1353 	struct dadk *dadkp = PKT2DADK(pktp);
1354 	dadk_errstats_t *dep;
1355 	int scb = *(char *)pktp->cp_scbp;
1356 
1357 	if (scb == DERR_SUCCESS) {
1358 		if (pktp->cp_retry != 0 && dadkp->dad_errstats != NULL) {
1359 			dep = (dadk_errstats_t *)
1360 			    dadkp->dad_errstats->ks_data;
1361 			dep->dadk_rq_recov_err.value.ui32++;
1362 		}
1363 		return (COMMAND_DONE);
1364 	}
1365 
1366 	if (pktp->cp_retry) {
1367 		err_blkno = pktp->cp_srtsec + ((pktp->cp_bytexfer -
1368 			pktp->cp_resid) >> dadkp->dad_secshf);
1369 	} else
1370 		err_blkno = -1;
1371 
1372 	if (dadkp->dad_errstats != NULL) {
1373 		dep = (dadk_errstats_t *)dadkp->dad_errstats->ks_data;
1374 
1375 		switch (dadk_errtab[scb].d_severity) {
1376 			case GDA_RETRYABLE:
1377 				dep->dadk_softerrs.value.ui32++;
1378 				break;
1379 
1380 			case GDA_FATAL:
1381 				dep->dadk_harderrs.value.ui32++;
1382 				break;
1383 
1384 			default:
1385 				break;
1386 		}
1387 
1388 		switch (scb) {
1389 			case DERR_INVCDB:
1390 			case DERR_ILI:
1391 			case DERR_EOM:
1392 			case DERR_HW:
1393 			case DERR_ICRC:
1394 				dep->dadk_transerrs.value.ui32++;
1395 				break;
1396 
1397 			case DERR_AMNF:
1398 			case DERR_TKONF:
1399 			case DERR_DWF:
1400 			case DERR_BBK:
1401 			case DERR_UNC:
1402 			case DERR_HARD:
1403 			case DERR_MEDIUM:
1404 			case DERR_DATA_PROT:
1405 			case DERR_MISCOMP:
1406 				dep->dadk_rq_media_err.value.ui32++;
1407 				break;
1408 
1409 			case DERR_NOTREADY:
1410 				dep->dadk_rq_ntrdy_err.value.ui32++;
1411 				break;
1412 
1413 			case DERR_IDNF:
1414 			case DERR_UNIT_ATTN:
1415 				dep->dadk_rq_nodev_err.value.ui32++;
1416 				break;
1417 
1418 			case DERR_ILL:
1419 			case DERR_RESV:
1420 				dep->dadk_rq_illrq_err.value.ui32++;
1421 				break;
1422 
1423 			default:
1424 				break;
1425 		}
1426 	}
1427 
1428 	/* if attempting to read a sector from a cdrom audio disk */
1429 	if ((dadkp->dad_cdrom) &&
1430 	    (*((char *)(pktp->cp_cdbp)) == DCMD_READ) &&
1431 	    (scb == DERR_ILL)) {
1432 		return (COMMAND_DONE);
1433 	}
1434 	if (pktp->cp_passthru == NULL) {
1435 		gda_errmsg(dadkp->dad_sd, pktp, dadk_name,
1436 		    dadk_errtab[scb].d_severity, pktp->cp_srtsec,
1437 		    err_blkno, dadk_cmds, dadk_sense);
1438 	}
1439 
1440 	if (scb == DERR_BUSY) {
1441 		(void) timeout(dadk_restart, (void *)pktp, DADK_BSY_TIMEOUT);
1442 	}
1443 
1444 	return (dadk_errtab[scb].d_action);
1445 }
1446 
1447 static void
1448 dadk_recorderr(struct cmpkt *pktp, struct dadkio_rwcmd *rwcmdp)
1449 {
1450 	struct dadk *dadkp;
1451 	int scb;
1452 
1453 	dadkp = PKT2DADK(pktp);
1454 	scb = (int)(*(char *)pktp->cp_scbp);
1455 
1456 
1457 	rwcmdp->status.failed_blk = rwcmdp->blkaddr +
1458 		((pktp->cp_bytexfer -
1459 		pktp->cp_resid) >> dadkp->dad_secshf);
1460 
1461 	rwcmdp->status.resid = pktp->cp_bp->b_resid +
1462 		pktp->cp_byteleft - pktp->cp_bytexfer + pktp->cp_resid;
1463 	switch ((int)(* (char *)pktp->cp_scbp)) {
1464 	case DERR_AMNF:
1465 	case DERR_ABORT:
1466 		rwcmdp->status.status = DADKIO_STAT_ILLEGAL_REQUEST;
1467 		break;
1468 	case DERR_DWF:
1469 	case DERR_IDNF:
1470 		rwcmdp->status.status = DADKIO_STAT_ILLEGAL_ADDRESS;
1471 		break;
1472 	case DERR_TKONF:
1473 	case DERR_UNC:
1474 	case DERR_BBK:
1475 		rwcmdp->status.status = DADKIO_STAT_MEDIUM_ERROR;
1476 		rwcmdp->status.failed_blk_is_valid = 1;
1477 		rwcmdp->status.resid = 0;
1478 		break;
1479 	case DERR_BUSY:
1480 		rwcmdp->status.status = DADKIO_STAT_NOT_READY;
1481 		break;
1482 	case DERR_INVCDB:
1483 	case DERR_HARD:
1484 		rwcmdp->status.status = DADKIO_STAT_HARDWARE_ERROR;
1485 		break;
1486 	case DERR_ICRC:
1487 	default:
1488 		rwcmdp->status.status = DADKIO_STAT_NOT_SUPPORTED;
1489 	}
1490 
1491 	if (rwcmdp->flags & DADKIO_FLAG_SILENT)
1492 		return;
1493 	gda_errmsg(dadkp->dad_sd, pktp, dadk_name, dadk_errtab[scb].d_severity,
1494 		rwcmdp->blkaddr, rwcmdp->status.failed_blk,
1495 		dadk_cmds, dadk_sense);
1496 }
1497 
1498 /*ARGSUSED*/
1499 static void
1500 dadk_polldone(struct buf *bp)
1501 {
1502 }
1503 
1504 static void
1505 dadk_iodone(struct buf *bp)
1506 {
1507 	struct cmpkt *pktp;
1508 	struct dadk *dadkp;
1509 
1510 	pktp  = GDA_BP_PKT(bp);
1511 	dadkp = PKT2DADK(pktp);
1512 
1513 	/* check for all iodone */
1514 	pktp->cp_byteleft -= pktp->cp_bytexfer;
1515 	if (geterror(bp) == 0 && pktp->cp_byteleft != 0) {
1516 		pktp->cp_retry = 0;
1517 		(void) dadk_iosetup(dadkp, pktp);
1518 
1519 
1520 	/* 	transport the next one */
1521 		if (CTL_TRANSPORT(dadkp->dad_ctlobjp, pktp) == CTL_SEND_SUCCESS)
1522 			return;
1523 		if ((dadk_ioretry(pktp, QUE_COMMAND)) == JUST_RETURN)
1524 			return;
1525 	}
1526 
1527 	/* start next one */
1528 	FLC_DEQUE(dadkp->dad_flcobjp, bp);
1529 
1530 	/* free pkt */
1531 	if (pktp->cp_private)
1532 		BBH_FREEHANDLE(dadkp->dad_bbhobjp, pktp->cp_private);
1533 	gda_free(dadkp->dad_ctlobjp, pktp, NULL);
1534 	biodone(bp);
1535 }
1536 
1537 int
1538 dadk_check_media(opaque_t objp, int *state)
1539 {
1540 	struct dadk *dadkp = (struct dadk *)objp;
1541 
1542 	if (!dadkp->dad_rmb) {
1543 		return (ENXIO);
1544 	}
1545 #ifdef DADK_DEBUG
1546 	if (dadk_debug & DSTATE)
1547 		PRF("dadk_check_media: user state %x disk state %x\n",
1548 			*state, dadkp->dad_iostate);
1549 #endif
1550 	/*
1551 	 * If state already changed just return
1552 	 */
1553 	if (*state != dadkp->dad_iostate) {
1554 		*state = dadkp->dad_iostate;
1555 		return (0);
1556 	}
1557 
1558 	/*
1559 	 * Startup polling on thread state
1560 	 */
1561 	mutex_enter(&dadkp->dad_mutex);
1562 	if (dadkp->dad_thread_cnt == 0) {
1563 		/*
1564 		 * One thread per removable dadk device
1565 		 */
1566 		(void) thread_create(NULL, 0, dadk_watch_thread, dadkp, 0, &p0,
1567 		    TS_RUN, v.v_maxsyspri - 2);
1568 	}
1569 	dadkp->dad_thread_cnt++;
1570 
1571 	/*
1572 	 * Wait for state to change
1573 	 */
1574 	do {
1575 		if (cv_wait_sig(&dadkp->dad_state_cv, &dadkp->dad_mutex) == 0) {
1576 			dadkp->dad_thread_cnt--;
1577 			mutex_exit(&dadkp->dad_mutex);
1578 			return (EINTR);
1579 		}
1580 	} while (*state == dadkp->dad_iostate);
1581 	*state = dadkp->dad_iostate;
1582 	dadkp->dad_thread_cnt--;
1583 	mutex_exit(&dadkp->dad_mutex);
1584 	return (0);
1585 }
1586 
1587 
1588 #define	MEDIA_ACCESS_DELAY 2000000
1589 
1590 static void
1591 dadk_watch_thread(struct dadk *dadkp)
1592 {
1593 	enum dkio_state state;
1594 	int interval;
1595 
1596 	interval = drv_usectohz(dadk_check_media_time);
1597 
1598 	do {
1599 		if (dadk_rmb_ioctl(dadkp, DCMD_GET_STATE, (intptr_t)&state, 0,
1600 		    DADK_SILENT)) {
1601 			/*
1602 			 * Assume state remained the same
1603 			 */
1604 			state = dadkp->dad_iostate;
1605 		}
1606 
1607 		/*
1608 		 * now signal the waiting thread if this is *not* the
1609 		 * specified state;
1610 		 * delay the signal if the state is DKIO_INSERTED
1611 		 * to allow the target to recover
1612 		 */
1613 		if (state != dadkp->dad_iostate) {
1614 
1615 			dadkp->dad_iostate = state;
1616 			if (state == DKIO_INSERTED) {
1617 				/*
1618 				 * delay the signal to give the drive a chance
1619 				 * to do what it apparently needs to do
1620 				 */
1621 				(void) timeout((void(*)(void *))cv_broadcast,
1622 				    (void *)&dadkp->dad_state_cv,
1623 				    drv_usectohz((clock_t)MEDIA_ACCESS_DELAY));
1624 			} else {
1625 				cv_broadcast(&dadkp->dad_state_cv);
1626 			}
1627 		}
1628 		delay(interval);
1629 	} while (dadkp->dad_thread_cnt);
1630 }
1631 
1632 int
1633 dadk_inquiry(opaque_t objp, opaque_t *inqpp)
1634 {
1635 	struct dadk *dadkp = (struct dadk *)objp;
1636 	struct scsi_inquiry **sinqpp = (struct scsi_inquiry **)inqpp;
1637 
1638 	if (dadkp && dadkp->dad_sd && dadkp->dad_sd->sd_inq) {
1639 		*sinqpp = dadkp->dad_sd->sd_inq;
1640 		return (DDI_SUCCESS);
1641 	}
1642 
1643 	return (DDI_FAILURE);
1644 }
1645 
1646 static int
1647 dadk_rmb_ioctl(struct dadk *dadkp, int cmd, intptr_t arg, int flags, int silent)
1648 
1649 {
1650 	struct buf *bp;
1651 	int err;
1652 	struct cmpkt *pktp;
1653 
1654 	if ((bp = getrbuf(KM_SLEEP)) == NULL) {
1655 		return (ENOMEM);
1656 	}
1657 	pktp = dadk_pktprep(dadkp, NULL, bp, dadk_rmb_iodone, NULL, NULL);
1658 	if (!pktp) {
1659 		freerbuf(bp);
1660 		return (ENOMEM);
1661 	}
1662 	bp->b_back  = (struct buf *)arg;
1663 	bp->b_forw  = (struct buf *)dadkp->dad_flcobjp;
1664 	pktp->cp_passthru = (opaque_t)(intptr_t)silent;
1665 
1666 	err = CTL_IOCTL(dadkp->dad_ctlobjp, cmd, (uintptr_t)pktp, flags);
1667 	freerbuf(bp);
1668 	gda_free(dadkp->dad_ctlobjp, pktp, NULL);
1669 	return (err);
1670 
1671 
1672 }
1673 
1674 static void
1675 dadk_rmb_iodone(struct buf *bp)
1676 {
1677 	struct cmpkt *pktp;
1678 	struct dadk *dadkp;
1679 
1680 	pktp  = GDA_BP_PKT(bp);
1681 	dadkp = PKT2DADK(pktp);
1682 
1683 	bp->b_flags &= ~(B_DONE|B_BUSY);
1684 
1685 	/* Start next one */
1686 	FLC_DEQUE(dadkp->dad_flcobjp, bp);
1687 
1688 	biodone(bp);
1689 }
1690 
1691 static int
1692 dadk_dk_buf_setup(struct dadk *dadkp, opaque_t *cmdp, dev_t dev,
1693 	enum uio_seg dataspace, int rw)
1694 {
1695 	struct dadkio_rwcmd *rwcmdp = (struct dadkio_rwcmd *)cmdp;
1696 	struct buf	*bp;
1697 	struct iovec	aiov;
1698 	struct uio	auio;
1699 	struct uio	*uio = &auio;
1700 	int		status;
1701 
1702 	bp = getrbuf(KM_SLEEP);
1703 
1704 	bp->av_forw = bp->b_forw = (struct buf *)dadkp;
1705 	bp->b_back  = (struct buf *)rwcmdp;	/* ioctl packet */
1706 
1707 	bzero((caddr_t)&auio, sizeof (struct uio));
1708 	bzero((caddr_t)&aiov, sizeof (struct iovec));
1709 	aiov.iov_base = rwcmdp->bufaddr;
1710 	aiov.iov_len = rwcmdp->buflen;
1711 	uio->uio_iov = &aiov;
1712 
1713 	uio->uio_iovcnt = 1;
1714 	uio->uio_resid = rwcmdp->buflen;
1715 	uio->uio_segflg = dataspace;
1716 
1717 	/* Let physio do the rest... */
1718 	status = physio(dadk_dk_strategy, bp, dev, rw, dadkmin, uio);
1719 
1720 	freerbuf(bp);
1721 	return (status);
1722 
1723 }
1724 
1725 /* Do not let a user gendisk request get too big or */
1726 /* else we could use to many resources.		    */
1727 
1728 static void
1729 dadkmin(struct buf *bp)
1730 {
1731 	if (bp->b_bcount > dadk_dk_maxphys)
1732 		bp->b_bcount = dadk_dk_maxphys;
1733 }
1734 
1735 static int
1736 dadk_dk_strategy(struct buf *bp)
1737 {
1738 	dadk_dk((struct dadk *)bp->av_forw, (struct dadkio_rwcmd *)bp->b_back,
1739 	    bp);
1740 	return (0);
1741 }
1742 
1743 static void
1744 dadk_dk(struct dadk *dadkp, struct dadkio_rwcmd *rwcmdp, struct buf *bp)
1745 {
1746 	struct  cmpkt *pktp;
1747 
1748 	pktp = dadk_pktprep(dadkp, NULL, bp, dadk_iodone, NULL, NULL);
1749 	if (!pktp) {
1750 		bioerror(bp, ENOMEM);
1751 		biodone(bp);
1752 		return;
1753 	}
1754 
1755 	pktp->cp_passthru = rwcmdp;
1756 
1757 	(void) dadk_ioprep(dadkp, pktp);
1758 
1759 	FLC_ENQUE(dadkp->dad_flcobjp, bp);
1760 }
1761