xref: /titanic_44/usr/src/uts/intel/io/dktp/dcdev/dadk.c (revision 942214a9d3873106f26cc86dd4aef6ac6176b830)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 /*
30  * Direct Attached Disk
31  */
32 
33 #include <sys/file.h>
34 #include <sys/scsi/scsi.h>
35 #include <sys/var.h>
36 #include <sys/proc.h>
37 #include <sys/dktp/cm.h>
38 #include <sys/vtoc.h>
39 #include <sys/dkio.h>
40 
41 #include <sys/dktp/dadev.h>
42 #include <sys/dktp/fctypes.h>
43 #include <sys/dktp/flowctrl.h>
44 #include <sys/dktp/tgcom.h>
45 #include <sys/dktp/tgdk.h>
46 #include <sys/dktp/bbh.h>
47 #include <sys/dktp/dadkio.h>
48 #include <sys/dktp/dadk.h>
49 #include <sys/cdio.h>
50 
51 /*
52  * Local Function Prototypes
53  */
54 static void dadk_restart(void *pktp);
55 static void dadk_pktcb(struct cmpkt *pktp);
56 static void dadk_iodone(struct buf *bp);
57 static void dadk_polldone(struct buf *bp);
58 static void dadk_setcap(struct dadk *dadkp);
59 static void dadk_create_errstats(struct dadk *dadkp, int instance);
60 static void dadk_destroy_errstats(struct dadk *dadkp);
61 
62 static int dadk_chkerr(struct cmpkt *pktp);
63 static int dadk_ioprep(struct dadk *dadkp, struct cmpkt *pktp);
64 static int dadk_iosetup(struct dadk *dadkp, struct cmpkt *pktp);
65 static int dadk_ioretry(struct cmpkt *pktp, int action);
66 
67 static struct cmpkt *dadk_pktprep(struct dadk *dadkp, struct cmpkt *in_pktp,
68     struct buf *bp, void (*cb_func)(struct buf *), int (*func)(caddr_t),
69     caddr_t arg);
70 
71 static int  dadk_pkt(opaque_t com_data, struct buf *bp, int (*func)(caddr_t),
72     caddr_t arg);
73 static void dadk_transport(opaque_t com_data, struct buf *bp);
74 
75 struct tgcom_objops dadk_com_ops = {
76 	nodev,
77 	nodev,
78 	dadk_pkt,
79 	dadk_transport,
80 	0, 0
81 };
82 
83 /*
84  * architecture dependent allocation restrictions for dadk_iob_alloc(). For
85  * x86, we'll set dma_attr_addr_hi to dadk_max_phys_addr and dma_attr_sgllen
86  * to dadk_sgl_size during _init().
87  */
88 #if defined(__sparc)
89 static ddi_dma_attr_t dadk_alloc_attr = {
90 	DMA_ATTR_V0,	/* version number */
91 	0x0,		/* lowest usable address */
92 	0xFFFFFFFFull,	/* high DMA address range */
93 	0xFFFFFFFFull,	/* DMA counter register */
94 	1,		/* DMA address alignment */
95 	1,		/* DMA burstsizes */
96 	1,		/* min effective DMA size */
97 	0xFFFFFFFFull,	/* max DMA xfer size */
98 	0xFFFFFFFFull,	/* segment boundary */
99 	1,		/* s/g list length */
100 	512,		/* granularity of device */
101 	0,		/* DMA transfer flags */
102 };
103 #elif defined(__x86)
104 static ddi_dma_attr_t dadk_alloc_attr = {
105 	DMA_ATTR_V0,	/* version number */
106 	0x0,		/* lowest usable address */
107 	0x0,		/* high DMA address range [set in _init()] */
108 	0xFFFFull,	/* DMA counter register */
109 	512,		/* DMA address alignment */
110 	1,		/* DMA burstsizes */
111 	1,		/* min effective DMA size */
112 	0xFFFFFFFFull,	/* max DMA xfer size */
113 	0xFFFFFFFFull,	/* segment boundary */
114 	0,		/* s/g list length [set in _init()] */
115 	512,		/* granularity of device */
116 	0,		/* DMA transfer flags */
117 };
118 
119 uint64_t dadk_max_phys_addr = 0xFFFFFFFFull;
120 int dadk_sgl_size = 0xFF;
121 #endif
122 
123 static int dadk_rmb_ioctl(struct dadk *dadkp, int cmd, intptr_t arg, int flags,
124     int silent);
125 static void dadk_rmb_iodone(struct buf *bp);
126 
127 static int dadk_dk_buf_setup(struct dadk *dadkp, opaque_t *cmdp,
128     dev_t dev, enum uio_seg dataspace, int rw);
129 static void dadk_dk(struct dadk *dadkp, struct dadkio_rwcmd *scmdp,
130     struct buf *bp);
131 static void dadkmin(struct buf *bp);
132 static int dadk_dk_strategy(struct buf *bp);
133 static void dadk_recorderr(struct cmpkt *pktp, struct dadkio_rwcmd *rwcmdp);
134 
135 struct tgdk_objops dadk_ops = {
136 	dadk_init,
137 	dadk_free,
138 	dadk_probe,
139 	dadk_attach,
140 	dadk_open,
141 	dadk_close,
142 	dadk_ioctl,
143 	dadk_strategy,
144 	dadk_setgeom,
145 	dadk_getgeom,
146 	dadk_iob_alloc,
147 	dadk_iob_free,
148 	dadk_iob_htoc,
149 	dadk_iob_xfer,
150 	dadk_dump,
151 	dadk_getphygeom,
152 	dadk_set_bbhobj,
153 	dadk_check_media,
154 	dadk_inquiry,
155 	dadk_cleanup,
156 	0
157 };
158 
159 /*
160  * Local static data
161  */
162 
163 #ifdef	DADK_DEBUG
164 #define	DENT	0x0001
165 #define	DERR	0x0002
166 #define	DIO	0x0004
167 #define	DGEOM	0x0010
168 #define	DSTATE  0x0020
169 static	int	dadk_debug = DGEOM;
170 
171 #endif	/* DADK_DEBUG */
172 
173 static int dadk_check_media_time = 3000000;	/* 3 Second State Check */
174 static int dadk_dk_maxphys = 0x80000;
175 
176 static char	*dadk_cmds[] = {
177 	"\000Unknown",			/* unknown 		*/
178 	"\001read sector",		/* DCMD_READ 1		*/
179 	"\002write sector",		/* DCMD_WRITE 2		*/
180 	"\003format track",		/* DCMD_FMTTRK 3	*/
181 	"\004format whole drive",	/* DCMD_FMTDRV 4	*/
182 	"\005recalibrate",		/* DCMD_RECAL  5	*/
183 	"\006seek sector",		/* DCMD_SEEK   6	*/
184 	"\007read verify",		/* DCMD_RDVER  7	*/
185 	"\010read defect list",		/* DCMD_GETDEF 8	*/
186 	"\011lock door",		/* DCMD_LOCK   9	*/
187 	"\012unlock door",		/* DCMD_UNLOCK 10	*/
188 	"\013start motor",		/* DCMD_START_MOTOR 11	*/
189 	"\014stop motor",		/* DCMD_STOP_MOTOR 12	*/
190 	"\015eject",			/* DCMD_EJECT  13	*/
191 	"\016update geometry",		/* DCMD_UPDATE_GEOM  14	*/
192 	"\017get state",		/* DCMD_GET_STATE  15	*/
193 	"\020cdrom pause",		/* DCMD_PAUSE  16	*/
194 	"\021cdrom resume",		/* DCMD_RESUME  17	*/
195 	"\022cdrom play track index",	/* DCMD_PLAYTRKIND  18	*/
196 	"\023cdrom play msf",		/* DCMD_PLAYMSF  19	*/
197 	"\024cdrom sub channel",	/* DCMD_SUBCHNL  20	*/
198 	"\025cdrom read mode 1",	/* DCMD_READMODE1  21	*/
199 	"\026cdrom read toc header",	/* DCMD_READTOCHDR  22	*/
200 	"\027cdrom read toc entry",	/* DCMD_READTOCENT  23	*/
201 	"\030cdrom read offset",	/* DCMD_READOFFSET  24	*/
202 	"\031cdrom read mode 2",	/* DCMD_READMODE2  25	*/
203 	"\032cdrom volume control",	/* DCMD_VOLCTRL  26	*/
204 	"\033flush cache",		/* DCMD_FLUSH_CACHE  27	*/
205 	NULL
206 };
207 
208 static char *dadk_sense[] = {
209 	"\000Success",			/* DERR_SUCCESS		*/
210 	"\001address mark not found",	/* DERR_AMNF		*/
211 	"\002track 0 not found",	/* DERR_TKONF		*/
212 	"\003aborted command",		/* DERR_ABORT		*/
213 	"\004write fault",		/* DERR_DWF		*/
214 	"\005ID not found",		/* DERR_IDNF		*/
215 	"\006drive busy",		/* DERR_BUSY		*/
216 	"\007uncorrectable data error",	/* DERR_UNC		*/
217 	"\010bad block detected",	/* DERR_BBK		*/
218 	"\011invalid command",		/* DERR_INVCDB		*/
219 	"\012device hard error",	/* DERR_HARD		*/
220 	"\013illegal length indicated", /* DERR_ILI		*/
221 	"\014end of media",		/* DERR_EOM		*/
222 	"\015media change requested",	/* DERR_MCR		*/
223 	"\016recovered from error",	/* DERR_RECOVER		*/
224 	"\017device not ready",		/* DERR_NOTREADY	*/
225 	"\020medium error",		/* DERR_MEDIUM		*/
226 	"\021hardware error",		/* DERR_HW		*/
227 	"\022illegal request",		/* DERR_ILL		*/
228 	"\023unit attention",		/* DERR_UNIT_ATTN	*/
229 	"\024data protection",		/* DERR_DATA_PROT	*/
230 	"\025miscompare",		/* DERR_MISCOMPARE	*/
231 	"\026ICRC error during UDMA",	/* DERR_ICRC		*/
232 	"\027reserved",			/* DERR_RESV		*/
233 	NULL
234 };
235 
236 static char *dadk_name = "Disk";
237 
238 /*
239  *	This is the loadable module wrapper
240  */
241 #include <sys/modctl.h>
242 
243 extern struct mod_ops mod_miscops;
244 
245 static struct modlmisc modlmisc = {
246 	&mod_miscops,	/* Type of module */
247 	"Direct Attached Disk %I%"
248 };
249 
250 static struct modlinkage modlinkage = {
251 	MODREV_1, (void *)&modlmisc, NULL
252 };
253 
254 int
255 _init(void)
256 {
257 #ifdef DADK_DEBUG
258 	if (dadk_debug & DENT)
259 		PRF("dadk_init: call\n");
260 #endif
261 
262 #if defined(__x86)
263 	/* set the max physical address for iob allocs on x86 */
264 	dadk_alloc_attr.dma_attr_addr_hi = dadk_max_phys_addr;
265 
266 	/*
267 	 * set the sgllen for iob allocs on x86. If this is set less than
268 	 * the number of pages the buffer will take (taking into account
269 	 * alignment), it would force the allocator to try and allocate
270 	 * contiguous pages.
271 	 */
272 	dadk_alloc_attr.dma_attr_sgllen = dadk_sgl_size;
273 #endif
274 
275 	return (mod_install(&modlinkage));
276 }
277 
278 int
279 _fini(void)
280 {
281 #ifdef DADK_DEBUG
282 	if (dadk_debug & DENT)
283 		PRF("dadk_fini: call\n");
284 #endif
285 
286 	return (mod_remove(&modlinkage));
287 }
288 
289 int
290 _info(struct modinfo *modinfop)
291 {
292 	return (mod_info(&modlinkage, modinfop));
293 }
294 
295 struct tgdk_obj *
296 dadk_create()
297 {
298 	struct tgdk_obj *dkobjp;
299 	struct dadk *dadkp;
300 
301 	dkobjp = kmem_zalloc((sizeof (*dkobjp) + sizeof (*dadkp)), KM_NOSLEEP);
302 	if (!dkobjp)
303 		return (NULL);
304 	dadkp = (struct dadk *)(dkobjp+1);
305 
306 	dkobjp->tg_ops  = (struct  tgdk_objops *)&dadk_ops;
307 	dkobjp->tg_data = (opaque_t)dadkp;
308 	dkobjp->tg_ext = &(dkobjp->tg_extblk);
309 	dadkp->dad_extp = &(dkobjp->tg_extblk);
310 
311 #ifdef DADK_DEBUG
312 	if (dadk_debug & DENT)
313 		PRF("dadk_create: tgdkobjp= 0x%x dadkp= 0x%x\n", dkobjp, dadkp);
314 #endif
315 	return (dkobjp);
316 }
317 
318 int
319 dadk_init(opaque_t objp, opaque_t devp, opaque_t flcobjp, opaque_t queobjp,
320 	opaque_t bbhobjp, void *lkarg)
321 {
322 	struct dadk *dadkp = (struct dadk *)objp;
323 	struct scsi_device *sdevp = (struct scsi_device *)devp;
324 
325 	dadkp->dad_sd = devp;
326 	dadkp->dad_ctlobjp = (opaque_t)sdevp->sd_address.a_hba_tran;
327 	sdevp->sd_private = (caddr_t)dadkp;
328 
329 	/* initialize the communication object */
330 	dadkp->dad_com.com_data = (opaque_t)dadkp;
331 	dadkp->dad_com.com_ops  = &dadk_com_ops;
332 
333 	dadkp->dad_bbhobjp = bbhobjp;
334 	BBH_INIT(bbhobjp);
335 
336 	dadkp->dad_flcobjp = flcobjp;
337 	return (FLC_INIT(flcobjp, &(dadkp->dad_com), queobjp, lkarg));
338 }
339 
340 int
341 dadk_free(struct tgdk_obj *dkobjp)
342 {
343 	TGDK_CLEANUP(dkobjp);
344 	kmem_free(dkobjp, (sizeof (*dkobjp) + sizeof (struct dadk)));
345 
346 	return (DDI_SUCCESS);
347 }
348 
349 void
350 dadk_cleanup(struct tgdk_obj *dkobjp)
351 {
352 	struct dadk *dadkp;
353 
354 	dadkp = (struct dadk *)(dkobjp->tg_data);
355 	if (dadkp->dad_sd)
356 		dadkp->dad_sd->sd_private = NULL;
357 	if (dadkp->dad_bbhobjp) {
358 		BBH_FREE(dadkp->dad_bbhobjp);
359 		dadkp->dad_bbhobjp = NULL;
360 	}
361 	if (dadkp->dad_flcobjp) {
362 		FLC_FREE(dadkp->dad_flcobjp);
363 		dadkp->dad_flcobjp = NULL;
364 	}
365 }
366 
367 /* ARGSUSED */
368 int
369 dadk_probe(opaque_t objp, int kmsflg)
370 {
371 	struct dadk *dadkp = (struct dadk *)objp;
372 	struct scsi_device *devp;
373 	char   name[80];
374 
375 	devp = dadkp->dad_sd;
376 	if (!devp->sd_inq || (devp->sd_inq->inq_dtype == DTYPE_NOTPRESENT) ||
377 		(devp->sd_inq->inq_dtype == DTYPE_UNKNOWN)) {
378 		return (DDI_PROBE_FAILURE);
379 	}
380 
381 	switch (devp->sd_inq->inq_dtype) {
382 		case DTYPE_DIRECT:
383 			dadkp->dad_ctype = DKC_DIRECT;
384 			dadkp->dad_extp->tg_nodetype = DDI_NT_BLOCK;
385 			dadkp->dad_extp->tg_ctype = DKC_DIRECT;
386 			break;
387 		case DTYPE_RODIRECT: /* eg cdrom */
388 			dadkp->dad_ctype = DKC_CDROM;
389 			dadkp->dad_extp->tg_rdonly = 1;
390 			dadkp->dad_rdonly = 1;
391 			dadkp->dad_cdrom = 1;
392 			dadkp->dad_extp->tg_nodetype = DDI_NT_CD;
393 			dadkp->dad_extp->tg_ctype = DKC_CDROM;
394 			break;
395 		case DTYPE_WORM:
396 		case DTYPE_OPTICAL:
397 		default:
398 			return (DDI_PROBE_FAILURE);
399 	}
400 
401 	dadkp->dad_extp->tg_rmb = dadkp->dad_rmb = devp->sd_inq->inq_rmb;
402 
403 	dadkp->dad_secshf = SCTRSHFT;
404 	dadkp->dad_blkshf = 0;
405 
406 	/* display the device name */
407 	(void) strcpy(name, "Vendor '");
408 	gda_inqfill((caddr_t)devp->sd_inq->inq_vid, 8, &name[strlen(name)]);
409 	(void) strcat(name, "' Product '");
410 	gda_inqfill((caddr_t)devp->sd_inq->inq_pid, 16, &name[strlen(name)]);
411 	(void) strcat(name, "'");
412 	gda_log(devp->sd_dev, dadk_name, CE_NOTE, "!<%s>\n", name);
413 
414 	return (DDI_PROBE_SUCCESS);
415 }
416 
417 
418 /* ARGSUSED */
419 int
420 dadk_attach(opaque_t objp)
421 {
422 	return (DDI_SUCCESS);
423 }
424 
425 int
426 dadk_set_bbhobj(opaque_t objp, opaque_t bbhobjp)
427 {
428 	struct dadk *dadkp = (struct dadk *)objp;
429 	/* free the old bbh object */
430 	if (dadkp->dad_bbhobjp)
431 		BBH_FREE(dadkp->dad_bbhobjp);
432 
433 	/* initialize the new bbh object */
434 	dadkp->dad_bbhobjp = bbhobjp;
435 	BBH_INIT(bbhobjp);
436 
437 	return (DDI_SUCCESS);
438 }
439 
440 /* ARGSUSED */
441 int
442 dadk_open(opaque_t objp, int flag)
443 {
444 	struct dadk *dadkp = (struct dadk *)objp;
445 	int error;
446 	int wce;
447 
448 	if (!dadkp->dad_rmb) {
449 		if (dadkp->dad_phyg.g_cap) {
450 			FLC_START_KSTAT(dadkp->dad_flcobjp, "disk",
451 			    ddi_get_instance(CTL_DIP_DEV(dadkp->dad_ctlobjp)));
452 			return (DDI_SUCCESS);
453 		}
454 	} else {
455 	    mutex_enter(&dadkp->dad_mutex);
456 	    dadkp->dad_iostate = DKIO_NONE;
457 	    cv_broadcast(&dadkp->dad_state_cv);
458 	    mutex_exit(&dadkp->dad_mutex);
459 
460 	    if (dadk_rmb_ioctl(dadkp, DCMD_START_MOTOR, 0, 0, DADK_SILENT) ||
461 		dadk_rmb_ioctl(dadkp, DCMD_LOCK, 0, 0, DADK_SILENT) ||
462 		dadk_rmb_ioctl(dadkp, DCMD_UPDATE_GEOM, 0, 0, DADK_SILENT)) {
463 		    return (DDI_FAILURE);
464 	    }
465 
466 	    mutex_enter(&dadkp->dad_mutex);
467 	    dadkp->dad_iostate = DKIO_INSERTED;
468 	    cv_broadcast(&dadkp->dad_state_cv);
469 	    mutex_exit(&dadkp->dad_mutex);
470 	}
471 
472 	/*
473 	 * get write cache enable state
474 	 * If there is an error, must assume that write cache
475 	 * is enabled.
476 	 * NOTE: Since there is currently no Solaris mechanism to
477 	 * change the state of the Write Cache Enable feature,
478 	 * this code just checks the value of the WCE bit
479 	 * obtained at device init time.  If a mechanism
480 	 * is added to the driver to change WCE, dad_wce
481 	 * must be updated appropriately.
482 	 */
483 	error = CTL_IOCTL(dadkp->dad_ctlobjp, DIOCTL_GETWCE,
484 	    (uintptr_t)&wce, FKIOCTL | FNATIVE);
485 	mutex_enter(&dadkp->dad_mutex);
486 	dadkp->dad_wce = (error != 0) || (wce != 0);
487 	mutex_exit(&dadkp->dad_mutex);
488 
489 	/* logical disk geometry */
490 	CTL_IOCTL(dadkp->dad_ctlobjp, DIOCTL_GETGEOM,
491 	    (uintptr_t)&dadkp->dad_logg, FKIOCTL | FNATIVE);
492 	if (dadkp->dad_logg.g_cap == 0)
493 		return (DDI_FAILURE);
494 
495 	/* get physical disk geometry */
496 	CTL_IOCTL(dadkp->dad_ctlobjp, DIOCTL_GETPHYGEOM,
497 	    (uintptr_t)&dadkp->dad_phyg, FKIOCTL | FNATIVE);
498 	if (dadkp->dad_phyg.g_cap == 0)
499 		return (DDI_FAILURE);
500 
501 	dadk_setcap(dadkp);
502 
503 	dadk_create_errstats(dadkp,
504 	    ddi_get_instance(CTL_DIP_DEV(dadkp->dad_ctlobjp)));
505 
506 	/* start profiling */
507 	FLC_START_KSTAT(dadkp->dad_flcobjp, "disk",
508 		ddi_get_instance(CTL_DIP_DEV(dadkp->dad_ctlobjp)));
509 
510 	return (DDI_SUCCESS);
511 }
512 
513 static void
514 dadk_setcap(struct dadk *dadkp)
515 {
516 	int	 totsize;
517 	int	 i;
518 
519 	totsize = dadkp->dad_phyg.g_secsiz;
520 
521 	if (totsize == 0) {
522 		if (dadkp->dad_cdrom) {
523 			totsize = 2048;
524 		} else {
525 			totsize = NBPSCTR;
526 		}
527 	} else {
528 		/* Round down sector size to multiple of 512B */
529 		totsize &= ~(NBPSCTR-1);
530 	}
531 	dadkp->dad_phyg.g_secsiz = totsize;
532 
533 	/* set sec,block shift factor - (512->0, 1024->1, 2048->2, etc.) */
534 	totsize >>= SCTRSHFT;
535 	for (i = 0; totsize != 1; i++, totsize >>= 1);
536 	dadkp->dad_blkshf = i;
537 	dadkp->dad_secshf = i + SCTRSHFT;
538 }
539 
540 
541 static void
542 dadk_create_errstats(struct dadk *dadkp, int instance)
543 {
544 	dadk_errstats_t *dep;
545 	char kstatname[KSTAT_STRLEN];
546 	dadk_ioc_string_t dadk_ioc_string;
547 
548 	if (dadkp->dad_errstats)
549 		return;
550 
551 	(void) sprintf(kstatname, "cmdk%d,error", instance);
552 	dadkp->dad_errstats = kstat_create("cmdkerror", instance,
553 	    kstatname, "device_error", KSTAT_TYPE_NAMED,
554 	    sizeof (dadk_errstats_t) / sizeof (kstat_named_t),
555 	    KSTAT_FLAG_PERSISTENT);
556 
557 	if (!dadkp->dad_errstats)
558 		return;
559 
560 	dep = (dadk_errstats_t *)dadkp->dad_errstats->ks_data;
561 
562 	kstat_named_init(&dep->dadk_softerrs,
563 	    "Soft Errors", KSTAT_DATA_UINT32);
564 	kstat_named_init(&dep->dadk_harderrs,
565 	    "Hard Errors", KSTAT_DATA_UINT32);
566 	kstat_named_init(&dep->dadk_transerrs,
567 	    "Transport Errors", KSTAT_DATA_UINT32);
568 	kstat_named_init(&dep->dadk_model,
569 	    "Model", KSTAT_DATA_CHAR);
570 	kstat_named_init(&dep->dadk_revision,
571 	    "Revision", KSTAT_DATA_CHAR);
572 	kstat_named_init(&dep->dadk_serial,
573 	    "Serial No", KSTAT_DATA_CHAR);
574 	kstat_named_init(&dep->dadk_capacity,
575 	    "Size", KSTAT_DATA_ULONGLONG);
576 	kstat_named_init(&dep->dadk_rq_media_err,
577 	    "Media Error", KSTAT_DATA_UINT32);
578 	kstat_named_init(&dep->dadk_rq_ntrdy_err,
579 	    "Device Not Ready", KSTAT_DATA_UINT32);
580 	kstat_named_init(&dep->dadk_rq_nodev_err,
581 	    "No Device", KSTAT_DATA_UINT32);
582 	kstat_named_init(&dep->dadk_rq_recov_err,
583 	    "Recoverable", KSTAT_DATA_UINT32);
584 	kstat_named_init(&dep->dadk_rq_illrq_err,
585 	    "Illegal Request", KSTAT_DATA_UINT32);
586 
587 	dadkp->dad_errstats->ks_private = dep;
588 	dadkp->dad_errstats->ks_update = nulldev;
589 	kstat_install(dadkp->dad_errstats);
590 
591 	/* get model */
592 	dep->dadk_model.value.c[0] = 0;
593 	dadk_ioc_string.is_buf = &dep->dadk_model.value.c[0];
594 	dadk_ioc_string.is_size = 16;
595 	CTL_IOCTL(dadkp->dad_ctlobjp, DIOCTL_GETMODEL,
596 	    (uintptr_t)&dadk_ioc_string, FKIOCTL | FNATIVE);
597 
598 	/* get serial */
599 	dep->dadk_serial.value.c[0] = 0;
600 	dadk_ioc_string.is_buf = &dep->dadk_serial.value.c[0];
601 	dadk_ioc_string.is_size = 16;
602 	CTL_IOCTL(dadkp->dad_ctlobjp, DIOCTL_GETSERIAL,
603 	    (uintptr_t)&dadk_ioc_string, FKIOCTL | FNATIVE);
604 
605 	/* Get revision */
606 	dep->dadk_revision.value.c[0] = 0;
607 
608 	/* Get capacity */
609 
610 	dep->dadk_capacity.value.ui64 =
611 	    (uint64_t)dadkp->dad_logg.g_cap *
612 	    (uint64_t)dadkp->dad_logg.g_secsiz;
613 }
614 
615 
616 int
617 dadk_close(opaque_t objp)
618 {
619 	struct dadk *dadkp = (struct dadk *)objp;
620 
621 	if (dadkp->dad_rmb) {
622 		(void) dadk_rmb_ioctl(dadkp, DCMD_STOP_MOTOR, 0, 0,
623 		    DADK_SILENT);
624 		(void) dadk_rmb_ioctl(dadkp, DCMD_UNLOCK, 0, 0, DADK_SILENT);
625 	}
626 	FLC_STOP_KSTAT(dadkp->dad_flcobjp);
627 
628 	dadk_destroy_errstats(dadkp);
629 
630 	return (DDI_SUCCESS);
631 }
632 
633 static void
634 dadk_destroy_errstats(struct dadk *dadkp)
635 {
636 	if (!dadkp->dad_errstats)
637 		return;
638 
639 	kstat_delete(dadkp->dad_errstats);
640 	dadkp->dad_errstats = NULL;
641 }
642 
643 
644 int
645 dadk_strategy(opaque_t objp, struct buf *bp)
646 {
647 	struct dadk *dadkp = (struct dadk *)objp;
648 
649 	if (dadkp->dad_rdonly && !(bp->b_flags & B_READ)) {
650 		bioerror(bp, EROFS);
651 		return (DDI_FAILURE);
652 	}
653 
654 	if (bp->b_bcount & (dadkp->DAD_SECSIZ-1)) {
655 		bioerror(bp, ENXIO);
656 		return (DDI_FAILURE);
657 	}
658 
659 	SET_BP_SEC(bp, (LBLK2SEC(GET_BP_SEC(bp), dadkp->dad_blkshf)));
660 	FLC_ENQUE(dadkp->dad_flcobjp, bp);
661 
662 	return (DDI_SUCCESS);
663 }
664 
665 int
666 dadk_dump(opaque_t objp, struct buf *bp)
667 {
668 	struct dadk *dadkp = (struct dadk *)objp;
669 	struct cmpkt *pktp;
670 
671 	if (dadkp->dad_rdonly) {
672 		bioerror(bp, EROFS);
673 		return (DDI_FAILURE);
674 	}
675 
676 	if (bp->b_bcount & (dadkp->DAD_SECSIZ-1)) {
677 		bioerror(bp, ENXIO);
678 		return (DDI_FAILURE);
679 	}
680 
681 	SET_BP_SEC(bp, (LBLK2SEC(GET_BP_SEC(bp), dadkp->dad_blkshf)));
682 
683 	pktp = dadk_pktprep(dadkp, NULL, bp, dadk_polldone, NULL, NULL);
684 	if (!pktp) {
685 		cmn_err(CE_WARN, "no resources for dumping");
686 		bioerror(bp, EIO);
687 		return (DDI_FAILURE);
688 	}
689 	pktp->cp_flags |= CPF_NOINTR;
690 
691 	(void) dadk_ioprep(dadkp, pktp);
692 	dadk_transport(dadkp, bp);
693 	pktp->cp_byteleft -= pktp->cp_bytexfer;
694 
695 	while (geterror(bp) == 0 && pktp->cp_byteleft != 0) {
696 		(void) dadk_iosetup(dadkp, pktp);
697 		dadk_transport(dadkp, bp);
698 		pktp->cp_byteleft -= pktp->cp_bytexfer;
699 	}
700 
701 	if (pktp->cp_private)
702 		BBH_FREEHANDLE(dadkp->dad_bbhobjp, pktp->cp_private);
703 	gda_free(dadkp->dad_ctlobjp, pktp, NULL);
704 	return (DDI_SUCCESS);
705 }
706 
707 /* ARGSUSED  */
708 int
709 dadk_ioctl(opaque_t objp, dev_t dev, int cmd, intptr_t arg, int flag,
710 	cred_t *cred_p, int *rval_p)
711 {
712 	struct dadk *dadkp = (struct dadk *)objp;
713 
714 	switch (cmd) {
715 	case DKIOCGETDEF:
716 	    {
717 		struct buf	*bp;
718 		int		err, head;
719 		unsigned char	*secbuf;
720 		STRUCT_DECL(defect_header, adh);
721 
722 		STRUCT_INIT(adh, flag & FMODELS);
723 
724 		/*
725 		 * copyin header ....
726 		 * yields head number and buffer address
727 		 */
728 		if (ddi_copyin((caddr_t)arg, STRUCT_BUF(adh), STRUCT_SIZE(adh),
729 		    flag))
730 			return (EFAULT);
731 		head = STRUCT_FGET(adh, head);
732 		if (head < 0 || head >= dadkp->dad_phyg.g_head)
733 			return (ENXIO);
734 		secbuf = kmem_zalloc(NBPSCTR, KM_SLEEP);
735 		if (!secbuf)
736 			return (ENOMEM);
737 		bp = getrbuf(KM_SLEEP);
738 		if (!bp) {
739 			kmem_free(secbuf, NBPSCTR);
740 			return (ENOMEM);
741 		}
742 
743 		bp->b_edev = dev;
744 		bp->b_dev  = cmpdev(dev);
745 		bp->b_flags = B_BUSY;
746 		bp->b_resid = 0;
747 		bp->b_bcount = NBPSCTR;
748 		bp->b_un.b_addr = (caddr_t)secbuf;
749 		bp->b_blkno = head; /* I had to put it somwhere! */
750 		bp->b_forw = (struct buf *)dadkp;
751 		bp->b_back = (struct buf *)DCMD_GETDEF;
752 
753 		FLC_ENQUE(dadkp->dad_flcobjp, bp);
754 		err = biowait(bp);
755 		if (!err) {
756 			if (ddi_copyout((caddr_t)secbuf,
757 			    STRUCT_FGETP(adh, buffer), NBPSCTR, flag))
758 				err = ENXIO;
759 		}
760 		kmem_free(secbuf, NBPSCTR);
761 		freerbuf(bp);
762 		return (err);
763 	    }
764 	case DIOCTL_RWCMD:
765 	    {
766 		struct dadkio_rwcmd *rwcmdp;
767 		int status, rw;
768 
769 		/*
770 		 * copied in by cmdk and, if necessary, converted to the
771 		 * correct datamodel
772 		 */
773 		rwcmdp = (struct dadkio_rwcmd *)(intptr_t)arg;
774 
775 		/*
776 		 * handle the complex cases here; we pass these
777 		 * through to the driver, which will queue them and
778 		 * handle the requests asynchronously.  The simpler
779 		 * cases ,which can return immediately, fail here, and
780 		 * the request reverts to the dadk_ioctl routine, while
781 		 *  will reroute them directly to the ata driver.
782 		 */
783 		switch (rwcmdp->cmd) {
784 			case DADKIO_RWCMD_READ :
785 				/*FALLTHROUGH*/
786 			case DADKIO_RWCMD_WRITE:
787 				rw = ((rwcmdp->cmd == DADKIO_RWCMD_WRITE) ?
788 				    B_WRITE : B_READ);
789 				status = dadk_dk_buf_setup(dadkp,
790 				    (opaque_t)rwcmdp, dev, ((flag &FKIOCTL) ?
791 				    UIO_SYSSPACE : UIO_USERSPACE), rw);
792 				return (status);
793 			default:
794 				return (EINVAL);
795 		}
796 	    }
797 	case DKIOCFLUSHWRITECACHE:
798 		{
799 			struct buf *bp;
800 			int err = 0;
801 			struct dk_callback *dkc = (struct dk_callback *)arg;
802 			struct cmpkt *pktp;
803 			int is_sync = 1;
804 
805 			mutex_enter(&dadkp->dad_mutex);
806 			if (dadkp->dad_noflush || !  dadkp->dad_wce) {
807 				err = dadkp->dad_noflush ? ENOTSUP : 0;
808 				mutex_exit(&dadkp->dad_mutex);
809 				/*
810 				 * If a callback was requested: a
811 				 * callback will always be done if the
812 				 * caller saw the DKIOCFLUSHWRITECACHE
813 				 * ioctl return 0, and never done if the
814 				 * caller saw the ioctl return an error.
815 				 */
816 				if ((flag & FKIOCTL) && dkc != NULL &&
817 				    dkc->dkc_callback != NULL) {
818 					(*dkc->dkc_callback)(dkc->dkc_cookie,
819 					    err);
820 					/*
821 					 * Did callback and reported error.
822 					 * Since we did a callback, ioctl
823 					 * should return 0.
824 					 */
825 					err = 0;
826 				}
827 				return (err);
828 			}
829 			mutex_exit(&dadkp->dad_mutex);
830 
831 			bp = getrbuf(KM_SLEEP);
832 
833 			bp->b_edev = dev;
834 			bp->b_dev  = cmpdev(dev);
835 			bp->b_flags = B_BUSY;
836 			bp->b_resid = 0;
837 			bp->b_bcount = 0;
838 			SET_BP_SEC(bp, 0);
839 
840 			if ((flag & FKIOCTL) && dkc != NULL &&
841 			    dkc->dkc_callback != NULL) {
842 				struct dk_callback *dkc2 =
843 				    (struct dk_callback *)kmem_zalloc(
844 				    sizeof (struct dk_callback), KM_SLEEP);
845 
846 				bcopy(dkc, dkc2, sizeof (*dkc2));
847 				/*
848 				 * Borrow b_list to carry private data
849 				 * to the b_iodone func.
850 				 */
851 				bp->b_list = (struct buf *)dkc2;
852 				bp->b_iodone = dadk_flushdone;
853 				is_sync = 0;
854 			}
855 
856 			/*
857 			 * Setup command pkt
858 			 * dadk_pktprep() can't fail since DDI_DMA_SLEEP set
859 			 */
860 			pktp = dadk_pktprep(dadkp, NULL, bp,
861 			    dadk_iodone, DDI_DMA_SLEEP, NULL);
862 
863 			pktp->cp_time = DADK_FLUSH_CACHE_TIME;
864 
865 			*((char *)(pktp->cp_cdbp)) = DCMD_FLUSH_CACHE;
866 			pktp->cp_byteleft = 0;
867 			pktp->cp_private = NULL;
868 			pktp->cp_secleft = 0;
869 			pktp->cp_srtsec = -1;
870 			pktp->cp_bytexfer = 0;
871 
872 			CTL_IOSETUP(dadkp->dad_ctlobjp, pktp);
873 
874 			FLC_ENQUE(dadkp->dad_flcobjp, bp);
875 
876 			if (is_sync) {
877 				err = biowait(bp);
878 				freerbuf(bp);
879 			}
880 			return (err);
881 		}
882 	default:
883 		if (!dadkp->dad_rmb)
884 			return (CTL_IOCTL(dadkp->dad_ctlobjp, cmd, arg, flag));
885 	}
886 
887 	switch (cmd) {
888 	case CDROMSTOP:
889 		return (dadk_rmb_ioctl(dadkp, DCMD_STOP_MOTOR, 0,
890 			0, DADK_SILENT));
891 	case CDROMSTART:
892 		return (dadk_rmb_ioctl(dadkp, DCMD_START_MOTOR, 0,
893 			0, DADK_SILENT));
894 	case DKIOCLOCK:
895 		return (dadk_rmb_ioctl(dadkp, DCMD_LOCK, 0, 0, DADK_SILENT));
896 	case DKIOCUNLOCK:
897 		return (dadk_rmb_ioctl(dadkp, DCMD_UNLOCK, 0, 0, DADK_SILENT));
898 	case DKIOCEJECT:
899 	case CDROMEJECT:
900 		{
901 			int ret;
902 
903 			if (ret = dadk_rmb_ioctl(dadkp, DCMD_UNLOCK, 0, 0,
904 				DADK_SILENT)) {
905 				return (ret);
906 			}
907 			if (ret = dadk_rmb_ioctl(dadkp, DCMD_EJECT, 0, 0,
908 				DADK_SILENT)) {
909 				return (ret);
910 			}
911 			mutex_enter(&dadkp->dad_mutex);
912 			dadkp->dad_iostate = DKIO_EJECTED;
913 			cv_broadcast(&dadkp->dad_state_cv);
914 			mutex_exit(&dadkp->dad_mutex);
915 
916 			return (0);
917 
918 		}
919 	default:
920 		return (ENOTTY);
921 	/*
922 	 * cdrom audio commands
923 	 */
924 	case CDROMPAUSE:
925 		cmd = DCMD_PAUSE;
926 		break;
927 	case CDROMRESUME:
928 		cmd = DCMD_RESUME;
929 		break;
930 	case CDROMPLAYMSF:
931 		cmd = DCMD_PLAYMSF;
932 		break;
933 	case CDROMPLAYTRKIND:
934 		cmd = DCMD_PLAYTRKIND;
935 		break;
936 	case CDROMREADTOCHDR:
937 		cmd = DCMD_READTOCHDR;
938 		break;
939 	case CDROMREADTOCENTRY:
940 		cmd = DCMD_READTOCENT;
941 		break;
942 	case CDROMVOLCTRL:
943 		cmd = DCMD_VOLCTRL;
944 		break;
945 	case CDROMSUBCHNL:
946 		cmd = DCMD_SUBCHNL;
947 		break;
948 	case CDROMREADMODE2:
949 		cmd = DCMD_READMODE2;
950 		break;
951 	case CDROMREADMODE1:
952 		cmd = DCMD_READMODE1;
953 		break;
954 	case CDROMREADOFFSET:
955 		cmd = DCMD_READOFFSET;
956 		break;
957 	}
958 	return (dadk_rmb_ioctl(dadkp, cmd, arg, flag, 0));
959 }
960 
961 int
962 dadk_flushdone(struct buf *bp)
963 {
964 	struct dk_callback *dkc = (struct dk_callback *)bp->b_list;
965 
966 	ASSERT(dkc != NULL && dkc->dkc_callback != NULL);
967 
968 	(*dkc->dkc_callback)(dkc->dkc_cookie, geterror(bp));
969 
970 	kmem_free(dkc, sizeof (*dkc));
971 	freerbuf(bp);
972 	return (0);
973 }
974 
975 int
976 dadk_getphygeom(opaque_t objp, struct tgdk_geom *dkgeom_p)
977 {
978 	struct dadk *dadkp = (struct dadk *)objp;
979 
980 	bcopy((caddr_t)&dadkp->dad_phyg, (caddr_t)dkgeom_p,
981 	    sizeof (struct tgdk_geom));
982 	return (DDI_SUCCESS);
983 }
984 
985 int
986 dadk_getgeom(opaque_t objp, struct tgdk_geom *dkgeom_p)
987 {
988 	struct dadk *dadkp = (struct dadk *)objp;
989 	bcopy((caddr_t)&dadkp->dad_logg, (caddr_t)dkgeom_p,
990 	    sizeof (struct tgdk_geom));
991 	return (DDI_SUCCESS);
992 }
993 
994 int
995 dadk_setgeom(opaque_t objp, struct tgdk_geom *dkgeom_p)
996 {
997 	struct dadk *dadkp = (struct dadk *)objp;
998 
999 	dadkp->dad_logg.g_cyl = dkgeom_p->g_cyl;
1000 	dadkp->dad_logg.g_head = dkgeom_p->g_head;
1001 	dadkp->dad_logg.g_sec = dkgeom_p->g_sec;
1002 	dadkp->dad_logg.g_cap = dkgeom_p->g_cap;
1003 	return (DDI_SUCCESS);
1004 }
1005 
1006 
1007 tgdk_iob_handle
1008 dadk_iob_alloc(opaque_t objp, daddr_t blkno, ssize_t xfer, int kmsflg)
1009 {
1010 	struct dadk *dadkp = (struct dadk *)objp;
1011 	struct buf *bp;
1012 	struct tgdk_iob *iobp;
1013 	size_t rlen;
1014 
1015 	iobp = kmem_zalloc(sizeof (*iobp), kmsflg);
1016 	if (iobp == NULL)
1017 		return (NULL);
1018 	if ((bp = getrbuf(kmsflg)) == NULL) {
1019 		kmem_free(iobp, sizeof (*iobp));
1020 		return (NULL);
1021 	}
1022 
1023 	iobp->b_psec  = LBLK2SEC(blkno, dadkp->dad_blkshf);
1024 	iobp->b_pbyteoff = (blkno & ((1<<dadkp->dad_blkshf) - 1)) << SCTRSHFT;
1025 	iobp->b_pbytecnt = ((iobp->b_pbyteoff + xfer + dadkp->DAD_SECSIZ - 1)
1026 				>> dadkp->dad_secshf) << dadkp->dad_secshf;
1027 
1028 	bp->b_un.b_addr = 0;
1029 	/*
1030 	 * use i_ddi_mem_alloc() for now until we have an interface to allocate
1031 	 * memory for DMA which doesn't require a DMA handle. ddi_iopb_alloc()
1032 	 * is obsolete and we want more flexibility in controlling the DMA
1033 	 * address constraints..
1034 	 */
1035 	if (i_ddi_mem_alloc((dadkp->dad_sd)->sd_dev, &dadk_alloc_attr,
1036 	    (size_t)iobp->b_pbytecnt, ((kmsflg == KM_SLEEP) ? 1 : 0), 0, NULL,
1037 	    &bp->b_un.b_addr, &rlen, NULL) != DDI_SUCCESS) {
1038 		freerbuf(bp);
1039 		kmem_free(iobp, sizeof (*iobp));
1040 		return (NULL);
1041 	}
1042 	iobp->b_flag |= IOB_BPALLOC | IOB_BPBUFALLOC;
1043 	iobp->b_bp = bp;
1044 	iobp->b_lblk = blkno;
1045 	iobp->b_xfer = xfer;
1046 	iobp->b_lblk = blkno;
1047 	iobp->b_xfer = xfer;
1048 	return (iobp);
1049 }
1050 
1051 /* ARGSUSED */
1052 int
1053 dadk_iob_free(opaque_t objp, struct tgdk_iob *iobp)
1054 {
1055 	struct buf *bp;
1056 
1057 	if (iobp) {
1058 		if (iobp->b_bp && (iobp->b_flag & IOB_BPALLOC)) {
1059 			bp = iobp->b_bp;
1060 			if (bp->b_un.b_addr && (iobp->b_flag & IOB_BPBUFALLOC))
1061 				i_ddi_mem_free((caddr_t)bp->b_un.b_addr, NULL);
1062 			freerbuf(bp);
1063 		}
1064 		kmem_free(iobp, sizeof (*iobp));
1065 	}
1066 	return (DDI_SUCCESS);
1067 }
1068 
1069 /* ARGSUSED */
1070 caddr_t
1071 dadk_iob_htoc(opaque_t objp, struct tgdk_iob *iobp)
1072 {
1073 	return (iobp->b_bp->b_un.b_addr+iobp->b_pbyteoff);
1074 }
1075 
1076 
1077 caddr_t
1078 dadk_iob_xfer(opaque_t objp, struct tgdk_iob *iobp, int rw)
1079 {
1080 	struct dadk	*dadkp = (struct dadk *)objp;
1081 	struct buf	*bp;
1082 	int		err;
1083 
1084 	bp = iobp->b_bp;
1085 	if (dadkp->dad_rdonly && !(rw & B_READ)) {
1086 		bioerror(bp, EROFS);
1087 		return (NULL);
1088 	}
1089 
1090 	bp->b_flags |= (B_BUSY | rw);
1091 	bp->b_bcount = iobp->b_pbytecnt;
1092 	SET_BP_SEC(bp, iobp->b_psec);
1093 	bp->av_back = (struct buf *)0;
1094 	bp->b_resid = 0;
1095 
1096 	/* call flow control */
1097 	FLC_ENQUE(dadkp->dad_flcobjp, bp);
1098 	err = biowait(bp);
1099 
1100 	bp->b_bcount = iobp->b_xfer;
1101 	bp->b_flags &= ~(B_DONE|B_BUSY);
1102 
1103 	if (err)
1104 		return (NULL);
1105 
1106 	return (bp->b_un.b_addr+iobp->b_pbyteoff);
1107 }
1108 
1109 static void
1110 dadk_transport(opaque_t com_data, struct buf *bp)
1111 {
1112 	struct dadk *dadkp = (struct dadk *)com_data;
1113 
1114 	if (CTL_TRANSPORT(dadkp->dad_ctlobjp, GDA_BP_PKT(bp)) ==
1115 	    CTL_SEND_SUCCESS)
1116 		return;
1117 	dadk_restart((void*)GDA_BP_PKT(bp));
1118 }
1119 
1120 static int
1121 dadk_pkt(opaque_t com_data, struct buf *bp, int (*func)(caddr_t), caddr_t arg)
1122 {
1123 	struct cmpkt *pktp;
1124 	struct dadk *dadkp = (struct dadk *)com_data;
1125 
1126 	if (GDA_BP_PKT(bp))
1127 		return (DDI_SUCCESS);
1128 
1129 	pktp = dadk_pktprep(dadkp, NULL, bp, dadk_iodone, func, arg);
1130 	if (!pktp)
1131 		return (DDI_FAILURE);
1132 
1133 	return (dadk_ioprep(dadkp, pktp));
1134 }
1135 
1136 /*
1137  * Read, Write preparation
1138  */
1139 static int
1140 dadk_ioprep(struct dadk *dadkp, struct cmpkt *pktp)
1141 {
1142 	struct buf *bp;
1143 
1144 	bp = pktp->cp_bp;
1145 	if (bp->b_forw == (struct buf *)dadkp)
1146 		*((char *)(pktp->cp_cdbp)) = (char)(intptr_t)bp->b_back;
1147 
1148 	else if (bp->b_flags & B_READ)
1149 		*((char *)(pktp->cp_cdbp)) = DCMD_READ;
1150 	else
1151 		*((char *)(pktp->cp_cdbp)) = DCMD_WRITE;
1152 	pktp->cp_byteleft = bp->b_bcount;
1153 
1154 	/* setup the bad block list handle */
1155 	pktp->cp_private = BBH_GETHANDLE(dadkp->dad_bbhobjp, bp);
1156 	return (dadk_iosetup(dadkp, pktp));
1157 }
1158 
1159 static int
1160 dadk_iosetup(struct dadk *dadkp, struct cmpkt *pktp)
1161 {
1162 	struct buf	*bp;
1163 	bbh_cookie_t	bbhckp;
1164 	int		seccnt;
1165 
1166 	seccnt = pktp->cp_bytexfer >> dadkp->dad_secshf;
1167 	pktp->cp_secleft -= seccnt;
1168 
1169 	if (pktp->cp_secleft) {
1170 		pktp->cp_srtsec += seccnt;
1171 	} else {
1172 		/* get the first cookie from the bad block list */
1173 		if (!pktp->cp_private) {
1174 			bp = pktp->cp_bp;
1175 			pktp->cp_srtsec  = GET_BP_SEC(bp);
1176 			pktp->cp_secleft = (bp->b_bcount >> dadkp->dad_secshf);
1177 		} else {
1178 			bbhckp = BBH_HTOC(dadkp->dad_bbhobjp,
1179 			    pktp->cp_private);
1180 			pktp->cp_srtsec = BBH_GETCK_SECTOR(dadkp->dad_bbhobjp,
1181 			    bbhckp);
1182 			pktp->cp_secleft = BBH_GETCK_SECLEN(dadkp->dad_bbhobjp,
1183 			    bbhckp);
1184 		}
1185 	}
1186 
1187 	pktp->cp_bytexfer = pktp->cp_secleft << dadkp->dad_secshf;
1188 
1189 	if (CTL_IOSETUP(dadkp->dad_ctlobjp, pktp)) {
1190 		return (DDI_SUCCESS);
1191 	} else {
1192 		return (DDI_FAILURE);
1193 	}
1194 
1195 
1196 
1197 
1198 }
1199 
1200 static struct cmpkt *
1201 dadk_pktprep(struct dadk *dadkp, struct cmpkt *in_pktp, struct buf *bp,
1202     void (*cb_func)(struct buf *), int (*func)(caddr_t), caddr_t arg)
1203 {
1204 	struct cmpkt *pktp;
1205 
1206 	pktp = gda_pktprep(dadkp->dad_ctlobjp, in_pktp, (opaque_t)bp, func,
1207 	    arg);
1208 
1209 	if (pktp) {
1210 		pktp->cp_callback = dadk_pktcb;
1211 		pktp->cp_time = DADK_IO_TIME;
1212 		pktp->cp_flags = 0;
1213 		pktp->cp_iodone = cb_func;
1214 		pktp->cp_dev_private = (opaque_t)dadkp;
1215 
1216 	}
1217 
1218 	return (pktp);
1219 }
1220 
1221 
1222 static void
1223 dadk_restart(void *vpktp)
1224 {
1225 	struct cmpkt *pktp = (struct cmpkt *)vpktp;
1226 
1227 	if (dadk_ioretry(pktp, QUE_COMMAND) == JUST_RETURN)
1228 		return;
1229 	pktp->cp_iodone(pktp->cp_bp);
1230 }
1231 
1232 static int
1233 dadk_ioretry(struct cmpkt *pktp, int action)
1234 {
1235 	struct buf *bp;
1236 	struct dadk *dadkp = PKT2DADK(pktp);
1237 
1238 	switch (action) {
1239 	case QUE_COMMAND:
1240 		if (pktp->cp_retry++ < DADK_RETRY_COUNT) {
1241 			CTL_IOSETUP(dadkp->dad_ctlobjp, pktp);
1242 			if (CTL_TRANSPORT(dadkp->dad_ctlobjp, pktp) ==
1243 				CTL_SEND_SUCCESS) {
1244 				return (JUST_RETURN);
1245 			}
1246 			gda_log(dadkp->dad_sd->sd_dev, dadk_name,
1247 				CE_WARN,
1248 				"transport of command fails\n");
1249 		} else
1250 			gda_log(dadkp->dad_sd->sd_dev,
1251 				dadk_name, CE_WARN,
1252 				"exceeds maximum number of retries\n");
1253 		bioerror(pktp->cp_bp, ENXIO);
1254 		/*FALLTHROUGH*/
1255 	case COMMAND_DONE_ERROR:
1256 		bp = pktp->cp_bp;
1257 		bp->b_resid += pktp->cp_byteleft - pktp->cp_bytexfer +
1258 		    pktp->cp_resid;
1259 		if (geterror(bp) == 0) {
1260 			if ((*((char *)(pktp->cp_cdbp)) == DCMD_FLUSH_CACHE) &&
1261 			    (pktp->cp_dev_private == (opaque_t)dadkp) &&
1262 			    ((int)(*(char *)pktp->cp_scbp) == DERR_ABORT)) {
1263 				/*
1264 				 * Flag "unimplemented" responses for
1265 				 * DCMD_FLUSH_CACHE as ENOTSUP
1266 				 */
1267 				bioerror(bp, ENOTSUP);
1268 				mutex_enter(&dadkp->dad_mutex);
1269 				dadkp->dad_noflush = 1;
1270 				mutex_exit(&dadkp->dad_mutex);
1271 			} else {
1272 				bioerror(bp, EIO);
1273 			}
1274 		}
1275 		/*FALLTHROUGH*/
1276 	case COMMAND_DONE:
1277 	default:
1278 		return (COMMAND_DONE);
1279 	}
1280 }
1281 
1282 
1283 static void
1284 dadk_pktcb(struct cmpkt *pktp)
1285 {
1286 	int action;
1287 	struct dadkio_rwcmd *rwcmdp;
1288 
1289 	rwcmdp = (struct dadkio_rwcmd *)pktp->cp_passthru;  /* ioctl packet */
1290 
1291 	if (pktp->cp_reason == CPS_SUCCESS) {
1292 		if (rwcmdp && (rwcmdp != (opaque_t)DADK_SILENT))
1293 			rwcmdp->status.status = DADKIO_STAT_NO_ERROR;
1294 		pktp->cp_iodone(pktp->cp_bp);
1295 		return;
1296 	}
1297 
1298 	if (rwcmdp && (rwcmdp != (opaque_t)DADK_SILENT)) {
1299 		if (pktp->cp_reason == CPS_CHKERR)
1300 			dadk_recorderr(pktp, rwcmdp);
1301 		dadk_iodone(pktp->cp_bp);
1302 		return;
1303 	}
1304 
1305 	if (pktp->cp_reason == CPS_CHKERR)
1306 		action = dadk_chkerr(pktp);
1307 	else
1308 		action = COMMAND_DONE_ERROR;
1309 
1310 	if (action == JUST_RETURN)
1311 		return;
1312 
1313 	if (action != COMMAND_DONE) {
1314 		if ((dadk_ioretry(pktp, action)) == JUST_RETURN)
1315 			return;
1316 	}
1317 	pktp->cp_iodone(pktp->cp_bp);
1318 }
1319 
1320 
1321 
1322 static struct dadkio_derr dadk_errtab[] = {
1323 	{COMMAND_DONE, GDA_INFORMATIONAL},	/*  0 DERR_SUCCESS	*/
1324 	{QUE_COMMAND, GDA_FATAL},		/*  1 DERR_AMNF		*/
1325 	{QUE_COMMAND, GDA_FATAL},		/*  2 DERR_TKONF	*/
1326 	{COMMAND_DONE_ERROR, GDA_INFORMATIONAL}, /* 3 DERR_ABORT	*/
1327 	{QUE_COMMAND, GDA_RETRYABLE},		/*  4 DERR_DWF		*/
1328 	{QUE_COMMAND, GDA_FATAL},		/*  5 DERR_IDNF		*/
1329 	{JUST_RETURN, GDA_INFORMATIONAL},	/*  6 DERR_BUSY		*/
1330 	{QUE_COMMAND, GDA_FATAL},		/*  7 DERR_UNC		*/
1331 	{QUE_COMMAND, GDA_RETRYABLE},		/*  8 DERR_BBK		*/
1332 	{COMMAND_DONE_ERROR, GDA_FATAL},	/*  9 DERR_INVCDB	*/
1333 	{COMMAND_DONE_ERROR, GDA_FATAL},	/* 10 DERR_HARD		*/
1334 	{COMMAND_DONE_ERROR, GDA_FATAL},	/* 11 DERR_ILI		*/
1335 	{COMMAND_DONE_ERROR, GDA_FATAL},	/* 12 DERR_EOM		*/
1336 	{COMMAND_DONE, GDA_INFORMATIONAL},	/* 13 DERR_MCR		*/
1337 	{COMMAND_DONE, GDA_INFORMATIONAL},	/* 14 DERR_RECOVER	*/
1338 	{COMMAND_DONE_ERROR, GDA_FATAL},	/* 15 DERR_NOTREADY	*/
1339 	{QUE_COMMAND, GDA_RETRYABLE},		/* 16 DERR_MEDIUM	*/
1340 	{COMMAND_DONE_ERROR, GDA_FATAL},	/* 17 DERR_HW		*/
1341 	{COMMAND_DONE, GDA_FATAL},		/* 18 DERR_ILL		*/
1342 	{COMMAND_DONE, GDA_FATAL},		/* 19 DERR_UNIT_ATTN	*/
1343 	{COMMAND_DONE_ERROR, GDA_FATAL},	/* 20 DERR_DATA_PROT	*/
1344 	{COMMAND_DONE_ERROR, GDA_FATAL},	/* 21 DERR_MISCOMPARE	*/
1345 	{QUE_COMMAND, GDA_RETRYABLE},		/* 22 DERR_ICRC		*/
1346 	{COMMAND_DONE_ERROR, GDA_FATAL},	/* 23 DERR_RESV		*/
1347 };
1348 
1349 static int
1350 dadk_chkerr(struct cmpkt *pktp)
1351 {
1352 	int err_blkno;
1353 	struct dadk *dadkp = PKT2DADK(pktp);
1354 	dadk_errstats_t *dep;
1355 	int scb = *(char *)pktp->cp_scbp;
1356 	int action;
1357 
1358 	if (scb == DERR_SUCCESS) {
1359 		if (pktp->cp_retry != 0 && dadkp->dad_errstats != NULL) {
1360 			dep = (dadk_errstats_t *)
1361 			    dadkp->dad_errstats->ks_data;
1362 			dep->dadk_rq_recov_err.value.ui32++;
1363 		}
1364 		return (COMMAND_DONE);
1365 	}
1366 
1367 	/* check error code table */
1368 	action = dadk_errtab[scb].d_action;
1369 
1370 	if (pktp->cp_retry) {
1371 		err_blkno = pktp->cp_srtsec + ((pktp->cp_bytexfer -
1372 			pktp->cp_resid) >> dadkp->dad_secshf);
1373 	} else
1374 		err_blkno = -1;
1375 
1376 	if (dadkp->dad_errstats != NULL) {
1377 		dep = (dadk_errstats_t *)dadkp->dad_errstats->ks_data;
1378 
1379 		if (action == GDA_RETRYABLE)
1380 			dep->dadk_softerrs.value.ui32++;
1381 		else if (action == GDA_FATAL)
1382 			dep->dadk_harderrs.value.ui32++;
1383 
1384 		switch (scb) {
1385 			case DERR_INVCDB:
1386 			case DERR_ILI:
1387 			case DERR_EOM:
1388 			case DERR_HW:
1389 			case DERR_ICRC:
1390 				dep->dadk_transerrs.value.ui32++;
1391 				break;
1392 
1393 			case DERR_AMNF:
1394 			case DERR_TKONF:
1395 			case DERR_DWF:
1396 			case DERR_BBK:
1397 			case DERR_UNC:
1398 			case DERR_HARD:
1399 			case DERR_MEDIUM:
1400 			case DERR_DATA_PROT:
1401 			case DERR_MISCOMP:
1402 				dep->dadk_rq_media_err.value.ui32++;
1403 				break;
1404 
1405 			case DERR_NOTREADY:
1406 				dep->dadk_rq_ntrdy_err.value.ui32++;
1407 				break;
1408 
1409 			case DERR_IDNF:
1410 			case DERR_UNIT_ATTN:
1411 				dep->dadk_rq_nodev_err.value.ui32++;
1412 				break;
1413 
1414 			case DERR_ILL:
1415 			case DERR_RESV:
1416 				dep->dadk_rq_illrq_err.value.ui32++;
1417 				break;
1418 
1419 			default:
1420 				break;
1421 		}
1422 	}
1423 
1424 	/* if attempting to read a sector from a cdrom audio disk */
1425 	if ((dadkp->dad_cdrom) &&
1426 	    (*((char *)(pktp->cp_cdbp)) == DCMD_READ) &&
1427 	    (scb == DERR_ILL)) {
1428 		return (COMMAND_DONE);
1429 	}
1430 	if (pktp->cp_passthru == NULL) {
1431 		gda_errmsg(dadkp->dad_sd, pktp, dadk_name,
1432 		    dadk_errtab[scb].d_severity, pktp->cp_srtsec,
1433 		    err_blkno, dadk_cmds, dadk_sense);
1434 	}
1435 
1436 	if (scb == DERR_BUSY) {
1437 		(void) timeout(dadk_restart, (void *)pktp, DADK_BSY_TIMEOUT);
1438 	}
1439 
1440 	return (action);
1441 }
1442 
1443 static void
1444 dadk_recorderr(struct cmpkt *pktp, struct dadkio_rwcmd *rwcmdp)
1445 {
1446 	struct dadk *dadkp;
1447 	int scb;
1448 
1449 	dadkp = PKT2DADK(pktp);
1450 	scb = (int)(*(char *)pktp->cp_scbp);
1451 
1452 
1453 	rwcmdp->status.failed_blk = rwcmdp->blkaddr +
1454 		((pktp->cp_bytexfer -
1455 		pktp->cp_resid) >> dadkp->dad_secshf);
1456 
1457 	rwcmdp->status.resid = pktp->cp_bp->b_resid +
1458 		pktp->cp_byteleft - pktp->cp_bytexfer + pktp->cp_resid;
1459 	switch ((int)(* (char *)pktp->cp_scbp)) {
1460 	case DERR_AMNF:
1461 	case DERR_ABORT:
1462 		rwcmdp->status.status = DADKIO_STAT_ILLEGAL_REQUEST;
1463 		break;
1464 	case DERR_DWF:
1465 	case DERR_IDNF:
1466 		rwcmdp->status.status = DADKIO_STAT_ILLEGAL_ADDRESS;
1467 		break;
1468 	case DERR_TKONF:
1469 	case DERR_UNC:
1470 	case DERR_BBK:
1471 		rwcmdp->status.status = DADKIO_STAT_MEDIUM_ERROR;
1472 		rwcmdp->status.failed_blk_is_valid = 1;
1473 		rwcmdp->status.resid = 0;
1474 		break;
1475 	case DERR_BUSY:
1476 		rwcmdp->status.status = DADKIO_STAT_NOT_READY;
1477 		break;
1478 	case DERR_INVCDB:
1479 	case DERR_HARD:
1480 		rwcmdp->status.status = DADKIO_STAT_HARDWARE_ERROR;
1481 		break;
1482 	case DERR_ICRC:
1483 	default:
1484 		rwcmdp->status.status = DADKIO_STAT_NOT_SUPPORTED;
1485 	}
1486 
1487 	if (rwcmdp->flags & DADKIO_FLAG_SILENT)
1488 		return;
1489 	gda_errmsg(dadkp->dad_sd, pktp, dadk_name, dadk_errtab[scb].d_severity,
1490 		rwcmdp->blkaddr, rwcmdp->status.failed_blk,
1491 		dadk_cmds, dadk_sense);
1492 }
1493 
1494 /*ARGSUSED*/
1495 static void
1496 dadk_polldone(struct buf *bp)
1497 {
1498 }
1499 
1500 static void
1501 dadk_iodone(struct buf *bp)
1502 {
1503 	struct cmpkt *pktp;
1504 	struct dadk *dadkp;
1505 
1506 	pktp  = GDA_BP_PKT(bp);
1507 	dadkp = PKT2DADK(pktp);
1508 
1509 	/* check for all iodone */
1510 	pktp->cp_byteleft -= pktp->cp_bytexfer;
1511 	if (geterror(bp) == 0 && pktp->cp_byteleft != 0) {
1512 		pktp->cp_retry = 0;
1513 		(void) dadk_iosetup(dadkp, pktp);
1514 
1515 
1516 	/* 	transport the next one */
1517 		if (CTL_TRANSPORT(dadkp->dad_ctlobjp, pktp) == CTL_SEND_SUCCESS)
1518 			return;
1519 		if ((dadk_ioretry(pktp, QUE_COMMAND)) == JUST_RETURN)
1520 			return;
1521 	}
1522 
1523 	/* start next one */
1524 	FLC_DEQUE(dadkp->dad_flcobjp, bp);
1525 
1526 	/* free pkt */
1527 	if (pktp->cp_private)
1528 		BBH_FREEHANDLE(dadkp->dad_bbhobjp, pktp->cp_private);
1529 	gda_free(dadkp->dad_ctlobjp, pktp, NULL);
1530 	biodone(bp);
1531 }
1532 
1533 int
1534 dadk_check_media(opaque_t objp, int *state)
1535 {
1536 	struct dadk *dadkp = (struct dadk *)objp;
1537 
1538 	if (!dadkp->dad_rmb) {
1539 		return (ENXIO);
1540 	}
1541 #ifdef DADK_DEBUG
1542 	if (dadk_debug & DSTATE)
1543 		PRF("dadk_check_media: user state %x disk state %x\n",
1544 			*state, dadkp->dad_iostate);
1545 #endif
1546 	/*
1547 	 * If state already changed just return
1548 	 */
1549 	if (*state != dadkp->dad_iostate) {
1550 		*state = dadkp->dad_iostate;
1551 		return (0);
1552 	}
1553 
1554 	/*
1555 	 * Startup polling on thread state
1556 	 */
1557 	mutex_enter(&dadkp->dad_mutex);
1558 	if (dadkp->dad_thread_cnt == 0) {
1559 		/*
1560 		 * One thread per removable dadk device
1561 		 */
1562 		(void) thread_create(NULL, 0, dadk_watch_thread, dadkp, 0, &p0,
1563 		    TS_RUN, v.v_maxsyspri - 2);
1564 	}
1565 	dadkp->dad_thread_cnt++;
1566 
1567 	/*
1568 	 * Wait for state to change
1569 	 */
1570 	do {
1571 		if (cv_wait_sig(&dadkp->dad_state_cv, &dadkp->dad_mutex) == 0) {
1572 			dadkp->dad_thread_cnt--;
1573 			mutex_exit(&dadkp->dad_mutex);
1574 			return (EINTR);
1575 		}
1576 	} while (*state == dadkp->dad_iostate);
1577 	*state = dadkp->dad_iostate;
1578 	dadkp->dad_thread_cnt--;
1579 	mutex_exit(&dadkp->dad_mutex);
1580 	return (0);
1581 }
1582 
1583 
1584 #define	MEDIA_ACCESS_DELAY 2000000
1585 
1586 static void
1587 dadk_watch_thread(struct dadk *dadkp)
1588 {
1589 	enum dkio_state state;
1590 	int interval;
1591 
1592 	interval = drv_usectohz(dadk_check_media_time);
1593 
1594 	do {
1595 		if (dadk_rmb_ioctl(dadkp, DCMD_GET_STATE, (intptr_t)&state, 0,
1596 		    DADK_SILENT)) {
1597 			/*
1598 			 * Assume state remained the same
1599 			 */
1600 			state = dadkp->dad_iostate;
1601 		}
1602 
1603 		/*
1604 		 * now signal the waiting thread if this is *not* the
1605 		 * specified state;
1606 		 * delay the signal if the state is DKIO_INSERTED
1607 		 * to allow the target to recover
1608 		 */
1609 		if (state != dadkp->dad_iostate) {
1610 
1611 			dadkp->dad_iostate = state;
1612 			if (state == DKIO_INSERTED) {
1613 				/*
1614 				 * delay the signal to give the drive a chance
1615 				 * to do what it apparently needs to do
1616 				 */
1617 				(void) timeout((void(*)(void *))cv_broadcast,
1618 				    (void *)&dadkp->dad_state_cv,
1619 				    drv_usectohz((clock_t)MEDIA_ACCESS_DELAY));
1620 			} else {
1621 				cv_broadcast(&dadkp->dad_state_cv);
1622 			}
1623 		}
1624 		delay(interval);
1625 	} while (dadkp->dad_thread_cnt);
1626 }
1627 
1628 int
1629 dadk_inquiry(opaque_t objp, opaque_t *inqpp)
1630 {
1631 	struct dadk *dadkp = (struct dadk *)objp;
1632 	struct scsi_inquiry **sinqpp = (struct scsi_inquiry **)inqpp;
1633 
1634 	if (dadkp && dadkp->dad_sd && dadkp->dad_sd->sd_inq) {
1635 		*sinqpp = dadkp->dad_sd->sd_inq;
1636 		return (DDI_SUCCESS);
1637 	}
1638 
1639 	return (DDI_FAILURE);
1640 }
1641 
1642 static int
1643 dadk_rmb_ioctl(struct dadk *dadkp, int cmd, intptr_t arg, int flags, int silent)
1644 
1645 {
1646 	struct buf *bp;
1647 	int err;
1648 	struct cmpkt *pktp;
1649 
1650 	if ((bp = getrbuf(KM_SLEEP)) == NULL) {
1651 		return (ENOMEM);
1652 	}
1653 	pktp = dadk_pktprep(dadkp, NULL, bp, dadk_rmb_iodone, NULL, NULL);
1654 	if (!pktp) {
1655 		freerbuf(bp);
1656 		return (ENOMEM);
1657 	}
1658 	bp->b_back  = (struct buf *)arg;
1659 	bp->b_forw  = (struct buf *)dadkp->dad_flcobjp;
1660 	pktp->cp_passthru = (opaque_t)(intptr_t)silent;
1661 
1662 	err = CTL_IOCTL(dadkp->dad_ctlobjp, cmd, (uintptr_t)pktp, flags);
1663 	freerbuf(bp);
1664 	gda_free(dadkp->dad_ctlobjp, pktp, NULL);
1665 	return (err);
1666 
1667 
1668 }
1669 
1670 static void
1671 dadk_rmb_iodone(struct buf *bp)
1672 {
1673 	struct cmpkt *pktp;
1674 	struct dadk *dadkp;
1675 
1676 	pktp  = GDA_BP_PKT(bp);
1677 	dadkp = PKT2DADK(pktp);
1678 
1679 	bp->b_flags &= ~(B_DONE|B_BUSY);
1680 
1681 	/* Start next one */
1682 	FLC_DEQUE(dadkp->dad_flcobjp, bp);
1683 
1684 	biodone(bp);
1685 }
1686 
1687 static int
1688 dadk_dk_buf_setup(struct dadk *dadkp, opaque_t *cmdp, dev_t dev,
1689 	enum uio_seg dataspace, int rw)
1690 {
1691 	struct dadkio_rwcmd *rwcmdp = (struct dadkio_rwcmd *)cmdp;
1692 	struct buf	*bp;
1693 	struct iovec	aiov;
1694 	struct uio	auio;
1695 	struct uio	*uio = &auio;
1696 	int		status;
1697 
1698 	bp = getrbuf(KM_SLEEP);
1699 
1700 	bp->av_forw = bp->b_forw = (struct buf *)dadkp;
1701 	bp->b_back  = (struct buf *)rwcmdp;	/* ioctl packet */
1702 
1703 	bzero((caddr_t)&auio, sizeof (struct uio));
1704 	bzero((caddr_t)&aiov, sizeof (struct iovec));
1705 	aiov.iov_base = rwcmdp->bufaddr;
1706 	aiov.iov_len = rwcmdp->buflen;
1707 	uio->uio_iov = &aiov;
1708 
1709 	uio->uio_iovcnt = 1;
1710 	uio->uio_resid = rwcmdp->buflen;
1711 	uio->uio_segflg = dataspace;
1712 
1713 	/* Let physio do the rest... */
1714 	status = physio(dadk_dk_strategy, bp, dev, rw, dadkmin, uio);
1715 
1716 	freerbuf(bp);
1717 	return (status);
1718 
1719 }
1720 
1721 /* Do not let a user gendisk request get too big or */
1722 /* else we could use to many resources.		    */
1723 
1724 static void
1725 dadkmin(struct buf *bp)
1726 {
1727 	if (bp->b_bcount > dadk_dk_maxphys)
1728 		bp->b_bcount = dadk_dk_maxphys;
1729 }
1730 
1731 static int
1732 dadk_dk_strategy(struct buf *bp)
1733 {
1734 	dadk_dk((struct dadk *)bp->av_forw, (struct dadkio_rwcmd *)bp->b_back,
1735 	    bp);
1736 	return (0);
1737 }
1738 
1739 static void
1740 dadk_dk(struct dadk *dadkp, struct dadkio_rwcmd *rwcmdp, struct buf *bp)
1741 {
1742 	struct  cmpkt *pktp;
1743 
1744 	pktp = dadk_pktprep(dadkp, NULL, bp, dadk_iodone, NULL, NULL);
1745 	if (!pktp) {
1746 		bioerror(bp, ENOMEM);
1747 		biodone(bp);
1748 		return;
1749 	}
1750 
1751 	pktp->cp_passthru = rwcmdp;
1752 
1753 	(void) dadk_ioprep(dadkp, pktp);
1754 
1755 	FLC_ENQUE(dadkp->dad_flcobjp, bp);
1756 }
1757