xref: /illumos-gate/usr/src/uts/sun/io/dada/targets/dad.c (revision 99dda20867d903eec23291ba1ecb18a82d70096b)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 /*
30  * Direct Attached  disk driver for SPARC machines.
31  */
32 
33 /*
34  * Includes, Declarations and Local Data
35  */
36 #include <sys/dada/dada.h>
37 #include <sys/dkbad.h>
38 #include <sys/dklabel.h>
39 #include <sys/dkio.h>
40 #include <sys/cdio.h>
41 #include <sys/vtoc.h>
42 #include <sys/dada/targets/daddef.h>
43 #include <sys/dada/targets/dadpriv.h>
44 #include <sys/file.h>
45 #include <sys/stat.h>
46 #include <sys/kstat.h>
47 #include <sys/vtrace.h>
48 #include <sys/aio_req.h>
49 #include <sys/note.h>
50 #include <sys/cmlb.h>
51 
52 /*
53  * Global Error Levels for Error Reporting
54  */
55 int dcd_error_level	= DCD_ERR_RETRYABLE;
56 /*
57  * Local Static Data
58  */
59 
60 static int dcd_io_time		= DCD_IO_TIME;
61 static int dcd_retry_count	= DCD_RETRY_COUNT;
62 #ifndef lint
63 static int dcd_report_pfa = 1;
64 #endif
65 static int dcd_rot_delay = 4;
66 static int dcd_poll_busycnt = DCD_POLL_TIMEOUT;
67 
68 /*
69  * Local Function Prototypes
70  */
71 
72 static int dcdopen(dev_t *dev_p, int flag, int otyp, cred_t *cred_p);
73 static int dcdclose(dev_t dev, int flag, int otyp, cred_t *cred_p);
74 static int dcdstrategy(struct buf *bp);
75 static int dcddump(dev_t dev, caddr_t addr, daddr_t blkno, int nblk);
76 static int dcdioctl(dev_t, int, intptr_t, int, cred_t *, int *);
77 static int dcdread(dev_t dev, struct uio *uio, cred_t *cred_p);
78 static int dcdwrite(dev_t dev, struct uio *uio, cred_t *cred_p);
79 static int dcd_prop_op(dev_t, dev_info_t *, ddi_prop_op_t, int,
80     char *, caddr_t, int *);
81 static int dcdaread(dev_t dev, struct aio_req *aio, cred_t *cred_p);
82 static int dcdawrite(dev_t dev, struct aio_req *aio, cred_t *cred_p);
83 
84 
85 static void dcd_free_softstate(struct dcd_disk *un, dev_info_t *devi);
86 static int dcd_doattach(dev_info_t *devi, int (*f)());
87 static int dcd_validate_geometry(struct dcd_disk *un);
88 static ddi_devid_t dcd_get_devid(struct dcd_disk *un);
89 static ddi_devid_t  dcd_create_devid(struct dcd_disk *un);
90 static int dcd_make_devid_from_serial(struct dcd_disk *un);
91 static void dcd_validate_model_serial(char *str, int *retlen, int totallen);
92 static int dcd_read_deviceid(struct dcd_disk *un);
93 static int dcd_write_deviceid(struct dcd_disk *un);
94 static int dcd_poll(struct dcd_pkt *pkt);
95 static char *dcd_rname(int reason);
96 static void dcd_flush_cache(struct dcd_disk *un);
97 
98 static int dcd_compute_dk_capacity(struct dcd_device *devp,
99     diskaddr_t *capacity);
100 static int dcd_send_lb_rw_cmd(dev_info_t *devinfo, void *bufaddr,
101     diskaddr_t start_block, size_t reqlength, uchar_t cmd);
102 
103 static void dcdmin(struct buf *bp);
104 
105 static int dcdioctl_cmd(dev_t, struct udcd_cmd *,
106     enum uio_seg, enum uio_seg);
107 
108 static void dcdstart(struct dcd_disk *un);
109 static void dcddone_and_mutex_exit(struct dcd_disk *un, struct buf *bp);
110 static void make_dcd_cmd(struct dcd_disk *un, struct buf *bp, int (*f)());
111 static void dcdudcdmin(struct buf *bp);
112 
113 static int dcdrunout(caddr_t);
114 static int dcd_check_wp(dev_t dev);
115 static int dcd_unit_ready(dev_t dev);
116 static void dcd_handle_tran_busy(struct buf *bp, struct diskhd *dp,
117     struct dcd_disk *un);
118 static void dcdintr(struct dcd_pkt *pkt);
119 static int dcd_handle_incomplete(struct dcd_disk *un, struct buf *bp);
120 static void dcd_offline(struct dcd_disk *un, int bechatty);
121 static int dcd_ready_and_valid(dev_t dev, struct dcd_disk *un);
122 static void dcd_reset_disk(struct dcd_disk *un, struct dcd_pkt *pkt);
123 static void dcd_translate(struct dadkio_status32 *statp, struct udcd_cmd *cmdp);
124 static int dcdflushdone(struct buf *bp);
125 
126 /* Function prototypes for cmlb */
127 
128 static int dcd_lb_rdwr(dev_info_t *devi, uchar_t cmd, void *bufaddr,
129     diskaddr_t start_block, size_t reqlength, void *tg_cookie);
130 
131 static int dcd_lb_getphygeom(dev_info_t *devi, cmlb_geom_t *phygeomp);
132 static int dcd_lb_getinfo(dev_info_t *devi, int cmd, void *arg,
133     void *tg_cookie);
134 
135 
136 static cmlb_tg_ops_t dcd_lb_ops = {
137 	TG_DK_OPS_VERSION_1,
138 	dcd_lb_rdwr,
139 	dcd_lb_getinfo
140 };
141 
142 /*
143  * Error and Logging Functions
144  */
145 #ifndef lint
146 static void clean_print(dev_info_t *dev, char *label, uint_t level,
147     char *title, char *data, int len);
148 static void dcdrestart(void *arg);
149 #endif /* lint */
150 
151 static int dcd_check_error(struct dcd_disk *un, struct buf *bp);
152 
153 /*
154  * Error statistics create/update functions
155  */
156 static int dcd_create_errstats(struct dcd_disk *, int);
157 
158 
159 
160 /*PRINTFLIKE4*/
161 extern void dcd_log(dev_info_t *, char *, uint_t, const char *, ...)
162     __KPRINTFLIKE(4);
163 extern void makecommand(struct dcd_pkt *, int, uchar_t, uint32_t,
164     uchar_t, uint32_t, uchar_t, uchar_t);
165 
166 
167 /*
168  * Configuration Routines
169  */
170 static int dcdinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg,
171     void **result);
172 static int dcdprobe(dev_info_t *devi);
173 static int dcdattach(dev_info_t *devi, ddi_attach_cmd_t cmd);
174 static int dcddetach(dev_info_t *devi, ddi_detach_cmd_t cmd);
175 static int dcdreset(dev_info_t *dip, ddi_reset_cmd_t cmd);
176 static int dcd_dr_detach(dev_info_t *devi);
177 static int dcdpower(dev_info_t *devi, int component, int level);
178 
179 static void *dcd_state;
180 static int dcd_max_instance;
181 static char *dcd_label = "dad";
182 
183 static char *diskokay = "disk okay\n";
184 
185 #if DEBUG || lint
186 #define	DCDDEBUG
187 #endif
188 
189 int dcd_test_flag = 0;
190 /*
191  * Debugging macros
192  */
193 #ifdef	DCDDEBUG
194 static int dcddebug = 0;
195 #define	DEBUGGING	(dcddebug > 1)
196 #define	DAD_DEBUG	if (dcddebug == 1) dcd_log
197 #define	DAD_DEBUG2	if (dcddebug > 1) dcd_log
198 #else	/* DCDDEBUG */
199 #define	dcddebug		(0)
200 #define	DEBUGGING	(0)
201 #define	DAD_DEBUG	if (0) dcd_log
202 #define	DAD_DEBUG2	if (0) dcd_log
203 #endif
204 
205 /*
206  * we use pkt_private area for storing bp and retry_count
207  * XXX: Really is this usefull.
208  */
209 struct dcd_pkt_private {
210 	struct buf	*dcdpp_bp;
211 	short		 dcdpp_retry_count;
212 	short		 dcdpp_victim_retry_count;
213 };
214 
215 
216 _NOTE(SCHEME_PROTECTS_DATA("Unique per pkt", dcd_pkt_private buf))
217 
218 #define	PP_LEN	(sizeof (struct dcd_pkt_private))
219 
220 #define	PKT_SET_BP(pkt, bp)	\
221 	((struct dcd_pkt_private *)pkt->pkt_private)->dcdpp_bp = bp
222 #define	PKT_GET_BP(pkt) \
223 	(((struct dcd_pkt_private *)pkt->pkt_private)->dcdpp_bp)
224 
225 
226 #define	PKT_SET_RETRY_CNT(pkt, n) \
227 	((struct dcd_pkt_private *)pkt->pkt_private)->dcdpp_retry_count = n
228 
229 #define	PKT_GET_RETRY_CNT(pkt) \
230 	(((struct dcd_pkt_private *)pkt->pkt_private)->dcdpp_retry_count)
231 
232 #define	PKT_INCR_RETRY_CNT(pkt, n) \
233 	((struct dcd_pkt_private *)pkt->pkt_private)->dcdpp_retry_count += n
234 
235 #define	PKT_SET_VICTIM_RETRY_CNT(pkt, n) \
236 	((struct dcd_pkt_private *)pkt->pkt_private)->dcdpp_victim_retry_count \
237 			= n
238 
239 #define	PKT_GET_VICTIM_RETRY_CNT(pkt) \
240 	(((struct dcd_pkt_private *)pkt->pkt_private)->dcdpp_victim_retry_count)
241 #define	PKT_INCR_VICTIM_RETRY_CNT(pkt, n) \
242 	((struct dcd_pkt_private *)pkt->pkt_private)->dcdpp_victim_retry_count \
243 			+= n
244 
245 #define	DISK_NOT_READY_RETRY_COUNT	(dcd_retry_count / 2)
246 
247 
248 /*
249  * Urk!
250  */
251 #define	SET_BP_ERROR(bp, err)	\
252 	bioerror(bp, err);
253 
254 #define	IOSP			KSTAT_IO_PTR(un->un_stats)
255 #define	IO_PARTITION_STATS	un->un_pstats[DCDPART(bp->b_edev)]
256 #define	IOSP_PARTITION		KSTAT_IO_PTR(IO_PARTITION_STATS)
257 
258 #define	DCD_DO_KSTATS(un, kstat_function, bp) \
259 	ASSERT(mutex_owned(DCD_MUTEX)); \
260 	if (bp != un->un_sbufp) { \
261 		if (un->un_stats) { \
262 			kstat_function(IOSP); \
263 		} \
264 		if (IO_PARTITION_STATS) { \
265 			kstat_function(IOSP_PARTITION); \
266 		} \
267 	}
268 
269 #define	DCD_DO_ERRSTATS(un, x) \
270 	if (un->un_errstats) { \
271 		struct dcd_errstats *dtp; \
272 		dtp = (struct dcd_errstats *)un->un_errstats->ks_data; \
273 		dtp->x.value.ui32++; \
274 	}
275 
276 #define	GET_SOFT_STATE(dev)						\
277 	struct dcd_disk *un;					\
278 	int instance, part;					\
279 	minor_t minor = getminor(dev);				\
280 									\
281 	part = minor & DCDPART_MASK;					\
282 	instance = minor >> DCDUNIT_SHIFT;				\
283 	if ((un = ddi_get_soft_state(dcd_state, instance)) == NULL)	\
284 		return (ENXIO);
285 
286 #define	LOGICAL_BLOCK_ALIGN(blkno, blknoshift) \
287 		(((blkno) & ((1 << (blknoshift)) - 1)) == 0)
288 
289 /*
290  * After the following number of sectors, the cylinder number spills over
291  * 0xFFFF if sectors = 63 and heads = 16.
292  */
293 #define	NUM_SECTORS_32G	0x3EFFC10
294 
295 /*
296  * Configuration Data
297  */
298 
299 /*
300  * Device driver ops vector
301  */
302 
303 static struct cb_ops dcd_cb_ops = {
304 	dcdopen,		/* open */
305 	dcdclose,		/* close */
306 	dcdstrategy,		/* strategy */
307 	nodev,			/* print */
308 	dcddump,		/* dump */
309 	dcdread,		/* read */
310 	dcdwrite,		/* write */
311 	dcdioctl,		/* ioctl */
312 	nodev,			/* devmap */
313 	nodev,			/* mmap */
314 	nodev,			/* segmap */
315 	nochpoll,		/* poll */
316 	dcd_prop_op,		/* cb_prop_op */
317 	0,			/* streamtab  */
318 	D_64BIT | D_MP | D_NEW,	/* Driver compatibility flag */
319 	CB_REV,			/* cb_rev */
320 	dcdaread, 		/* async I/O read entry point */
321 	dcdawrite		/* async I/O write entry point */
322 };
323 
324 static struct dev_ops dcd_ops = {
325 	DEVO_REV,		/* devo_rev, */
326 	0,			/* refcnt  */
327 	dcdinfo,		/* info */
328 	nulldev,		/* identify */
329 	dcdprobe,		/* probe */
330 	dcdattach,		/* attach */
331 	dcddetach,		/* detach */
332 	dcdreset,		/* reset */
333 	&dcd_cb_ops,		/* driver operations */
334 	(struct bus_ops *)0,	/* bus operations */
335 	dcdpower		/* power */
336 };
337 
338 
339 /*
340  * This is the loadable module wrapper.
341  */
342 #include <sys/modctl.h>
343 
344 static struct modldrv modldrv = {
345 	&mod_driverops,		/* Type of module. This one is a driver */
346 	"DAD Disk Driver %I%",	/* Name of the module. */
347 	&dcd_ops,	/* driver ops */
348 };
349 
350 
351 
352 static struct modlinkage modlinkage = {
353 	MODREV_1, &modldrv, NULL
354 };
355 
356 /*
357  * the dcd_attach_mutex only protects dcd_max_instance in multi-threaded
358  * attach situations
359  */
360 static kmutex_t dcd_attach_mutex;
361 
362 int
363 _init(void)
364 {
365 	int e;
366 
367 	if ((e = ddi_soft_state_init(&dcd_state, sizeof (struct dcd_disk),
368 	    DCD_MAXUNIT)) != 0)
369 		return (e);
370 
371 	mutex_init(&dcd_attach_mutex, NULL, MUTEX_DRIVER, NULL);
372 	e = mod_install(&modlinkage);
373 	if (e != 0) {
374 		mutex_destroy(&dcd_attach_mutex);
375 		ddi_soft_state_fini(&dcd_state);
376 		return (e);
377 	}
378 
379 	return (e);
380 }
381 
382 int
383 _fini(void)
384 {
385 	int e;
386 
387 	if ((e = mod_remove(&modlinkage)) != 0)
388 		return (e);
389 
390 	ddi_soft_state_fini(&dcd_state);
391 	mutex_destroy(&dcd_attach_mutex);
392 
393 	return (e);
394 }
395 
396 int
397 _info(struct modinfo *modinfop)
398 {
399 
400 	return (mod_info(&modlinkage, modinfop));
401 }
402 
403 static int
404 dcdprobe(dev_info_t *devi)
405 {
406 	struct dcd_device *devp;
407 	int rval = DDI_PROBE_PARTIAL;
408 	int instance;
409 
410 	devp = ddi_get_driver_private(devi);
411 	instance = ddi_get_instance(devi);
412 
413 	/*
414 	 * Keep a count of how many disks (ie. highest instance no) we have
415 	 * XXX currently not used but maybe useful later again
416 	 */
417 	mutex_enter(&dcd_attach_mutex);
418 	if (instance > dcd_max_instance)
419 		dcd_max_instance = instance;
420 	mutex_exit(&dcd_attach_mutex);
421 
422 	DAD_DEBUG2(devp->dcd_dev, dcd_label, DCD_DEBUG,
423 		    "dcdprobe:\n");
424 
425 	if (ddi_get_soft_state(dcd_state, instance) != NULL)
426 		return (DDI_PROBE_PARTIAL);
427 
428 	/*
429 	 * Turn around and call utility probe routine
430 	 * to see whether we actually have a disk at
431 	 */
432 
433 	DAD_DEBUG2(devp->dcd_dev, dcd_label, DCD_DEBUG,
434 	    "dcdprobe: %x\n", dcd_probe(devp, NULL_FUNC));
435 
436 	switch (dcd_probe(devp, NULL_FUNC)) {
437 	default:
438 	case DCDPROBE_NORESP:
439 	case DCDPROBE_NONCCS:
440 	case DCDPROBE_NOMEM:
441 	case DCDPROBE_FAILURE:
442 	case DCDPROBE_BUSY:
443 		break;
444 
445 	case DCDPROBE_EXISTS:
446 		/*
447 		 * Check whether it is a ATA device and then
448 		 * return  SUCCESS.
449 		 */
450 		DAD_DEBUG2(devp->dcd_dev, dcd_label, DCD_DEBUG,
451 		    "config %x\n", devp->dcd_ident->dcd_config);
452 		if ((devp->dcd_ident->dcd_config & ATAPI_DEVICE) == 0) {
453 			if (devp->dcd_ident->dcd_config & ATANON_REMOVABLE) {
454 				rval = DDI_PROBE_SUCCESS;
455 			} else
456 				rval = DDI_PROBE_FAILURE;
457 		} else {
458 			rval = DDI_PROBE_FAILURE;
459 		}
460 		break;
461 	}
462 	dcd_unprobe(devp);
463 
464 	DAD_DEBUG2(devp->dcd_dev, dcd_label, DCD_DEBUG,
465 	    "dcdprobe returns %x\n", rval);
466 
467 	return (rval);
468 }
469 
470 
471 /*ARGSUSED*/
472 static int
473 dcdattach(dev_info_t *devi, ddi_attach_cmd_t cmd)
474 {
475 	int instance, rval;
476 	struct dcd_device *devp;
477 	struct dcd_disk *un;
478 	struct diskhd *dp;
479 	char	*pm_comp[] =
480 	    { "NAME=ide-disk", "0=standby", "1=idle", "2=active" };
481 
482 	/* CONSTCOND */
483 	ASSERT(NO_COMPETING_THREADS);
484 
485 
486 	devp = ddi_get_driver_private(devi);
487 	instance = ddi_get_instance(devi);
488 	DAD_DEBUG2(devp->dcd_dev, dcd_label, DCD_DEBUG, "Attach Started\n");
489 
490 	switch (cmd) {
491 	case DDI_ATTACH:
492 		break;
493 
494 	case DDI_RESUME:
495 		if (!(un = ddi_get_soft_state(dcd_state, instance)))
496 			return (DDI_FAILURE);
497 		mutex_enter(DCD_MUTEX);
498 		Restore_state(un);
499 		/*
500 		 * Restore the state which was saved to give the
501 		 * the right state in un_last_state
502 		 */
503 		un->un_last_state = un->un_save_state;
504 		un->un_throttle = 2;
505 		cv_broadcast(&un->un_suspend_cv);
506 		/*
507 		 * Raise the power level of the device to active.
508 		 */
509 		mutex_exit(DCD_MUTEX);
510 		(void) pm_raise_power(DCD_DEVINFO, 0, DCD_DEVICE_ACTIVE);
511 		mutex_enter(DCD_MUTEX);
512 
513 		/*
514 		 * start unit - if this is a low-activity device
515 		 * commands in queue will have to wait until new
516 		 * commands come in, which may take awhile.
517 		 * Also, we specifically don't check un_ncmds
518 		 * because we know that there really are no
519 		 * commands in progress after the unit was suspended
520 		 * and we could have reached the throttle level, been
521 		 * suspended, and have no new commands coming in for
522 		 * awhile.  Highly unlikely, but so is the low-
523 		 * activity disk scenario.
524 		 */
525 		dp = &un->un_utab;
526 		if (dp->b_actf && (dp->b_forw == NULL)) {
527 			dcdstart(un);
528 		}
529 
530 		mutex_exit(DCD_MUTEX);
531 		return (DDI_SUCCESS);
532 
533 	default:
534 		return (DDI_FAILURE);
535 	}
536 
537 	if (dcd_doattach(devi, SLEEP_FUNC) == DDI_FAILURE) {
538 		return (DDI_FAILURE);
539 	}
540 
541 	if (!(un = (struct dcd_disk *)
542 	    ddi_get_soft_state(dcd_state, instance))) {
543 		return (DDI_FAILURE);
544 	}
545 	devp->dcd_private = (ataopaque_t)un;
546 
547 	/*
548 	 * Add a zero-length attribute to tell the world we support
549 	 * kernel ioctls (for layered drivers)
550 	 */
551 	(void) ddi_prop_create(DDI_DEV_T_NONE, devi, DDI_PROP_CANSLEEP,
552 	    DDI_KERNEL_IOCTL, NULL, 0);
553 
554 	/*
555 	 * Since the dad device does not have the 'reg' property,
556 	 * cpr will not call its DDI_SUSPEND/DDI_RESUME entries.
557 	 * The following code is to tell cpr that this device
558 	 * does need to be suspended and resumed.
559 	 */
560 	(void) ddi_prop_update_string(DDI_DEV_T_NONE, devi,
561 	    "pm-hardware-state", (caddr_t)"needs-suspend-resume");
562 
563 	/*
564 	 * Initialize power management bookkeeping;
565 	 * Create components - In IDE case there are 3 levels and one
566 	 * component. The levels being - active, idle, standby.
567 	 */
568 
569 	rval = ddi_prop_update_string_array(DDI_DEV_T_NONE,
570 	    devi, "pm-components", pm_comp, 4);
571 	if (rval == DDI_PROP_SUCCESS) {
572 		/*
573 		 * Ignore the return value of pm_raise_power
574 		 * Even if we check the return values and
575 		 * remove the property created above, PM
576 		 * framework will not honour the change after
577 		 * first call to pm_raise_power. Hence, the
578 		 * removal of that property does not help if
579 		 * pm_raise_power fails.
580 		 */
581 		(void) pm_raise_power(DCD_DEVINFO, 0, DCD_DEVICE_ACTIVE);
582 	}
583 
584 	ddi_report_dev(devi);
585 
586 	cmlb_alloc_handle(&un->un_dklbhandle);
587 
588 	if (cmlb_attach(devi,
589 		&dcd_lb_ops,
590 		0,
591 		0,
592 		0,
593 		DDI_NT_BLOCK_CHAN,
594 		CMLB_FAKE_GEOM_LABEL_IOCTLS_VTOC8,
595 		un->un_dklbhandle,
596 		0) != 0) {
597 		cmlb_free_handle(&un->un_dklbhandle);
598 		dcd_free_softstate(un, devi);
599 		return (DDI_FAILURE);
600 	}
601 
602 	mutex_enter(DCD_MUTEX);
603 	(void) dcd_validate_geometry(un);
604 
605 	/* Get devid; create a devid ONLY IF could not get ID */
606 	if (dcd_get_devid(un) == NULL) {
607 		/* Create the fab'd devid */
608 		(void) dcd_create_devid(un);
609 	}
610 	mutex_exit(DCD_MUTEX);
611 
612 	return (DDI_SUCCESS);
613 }
614 
615 static void
616 dcd_free_softstate(struct dcd_disk *un, dev_info_t *devi)
617 {
618 	struct dcd_device		*devp;
619 	int instance = ddi_get_instance(devi);
620 
621 	devp = ddi_get_driver_private(devi);
622 
623 	if (un) {
624 		sema_destroy(&un->un_semoclose);
625 		cv_destroy(&un->un_sbuf_cv);
626 		cv_destroy(&un->un_state_cv);
627 		cv_destroy(&un->un_disk_busy_cv);
628 		cv_destroy(&un->un_suspend_cv);
629 
630 		/*
631 		 * Deallocate command packet resources.
632 		 */
633 		if (un->un_sbufp)
634 			freerbuf(un->un_sbufp);
635 		if (un->un_dp) {
636 			kmem_free((caddr_t)un->un_dp, sizeof (*un->un_dp));
637 		}
638 		/*
639 		 * Unregister the devid and free devid resources allocated
640 		 */
641 		ddi_devid_unregister(DCD_DEVINFO);
642 		if (un->un_devid) {
643 			ddi_devid_free(un->un_devid);
644 			un->un_devid = NULL;
645 		}
646 
647 		/*
648 		 * Delete kstats. Kstats for non CD devices are deleted
649 		 * in dcdclose.
650 		 */
651 		if (un->un_stats) {
652 			kstat_delete(un->un_stats);
653 		}
654 
655 	}
656 
657 	/*
658 	 * Cleanup scsi_device resources.
659 	 */
660 	ddi_soft_state_free(dcd_state, instance);
661 	devp->dcd_private = (ataopaque_t)0;
662 	/* unprobe scsi device */
663 	dcd_unprobe(devp);
664 
665 	/* Remove properties created during attach */
666 	ddi_prop_remove_all(devi);
667 }
668 
669 static int
670 dcddetach(dev_info_t *devi, ddi_detach_cmd_t cmd)
671 {
672 	int instance;
673 	struct dcd_disk *un;
674 	clock_t	wait_cmds_complete;
675 	instance = ddi_get_instance(devi);
676 
677 	if (!(un = ddi_get_soft_state(dcd_state, instance)))
678 		return (DDI_FAILURE);
679 
680 	switch (cmd) {
681 	case DDI_DETACH:
682 		return (dcd_dr_detach(devi));
683 
684 	case DDI_SUSPEND:
685 		mutex_enter(DCD_MUTEX);
686 		if (un->un_state == DCD_STATE_SUSPENDED) {
687 			mutex_exit(DCD_MUTEX);
688 			return (DDI_SUCCESS);
689 		}
690 		un->un_throttle = 0;
691 		/*
692 		 * Save the last state first
693 		 */
694 		un->un_save_state = un->un_last_state;
695 
696 		New_state(un, DCD_STATE_SUSPENDED);
697 
698 		/*
699 		 * wait till current operation completed. If we are
700 		 * in the resource wait state (with an intr outstanding)
701 		 * then we need to wait till the intr completes and
702 		 * starts the next cmd. We wait for
703 		 * DCD_WAIT_CMDS_COMPLETE seconds before failing the
704 		 * DDI_SUSPEND.
705 		 */
706 		wait_cmds_complete = ddi_get_lbolt();
707 		wait_cmds_complete +=
708 			DCD_WAIT_CMDS_COMPLETE * drv_usectohz(1000000);
709 
710 		while (un->un_ncmds) {
711 			if (cv_timedwait(&un->un_disk_busy_cv,
712 			    DCD_MUTEX, wait_cmds_complete) == -1) {
713 				/*
714 				 * commands Didn't finish in the
715 				 * specified time, fail the DDI_SUSPEND.
716 				 */
717 				DAD_DEBUG2(DCD_DEVINFO, dcd_label,
718 				    DCD_DEBUG, "dcddetach: SUSPEND "
719 				    "failed due to outstanding cmds\n");
720 				Restore_state(un);
721 				mutex_exit(DCD_MUTEX);
722 				return (DDI_FAILURE);
723 			}
724 		}
725 		mutex_exit(DCD_MUTEX);
726 		return (DDI_SUCCESS);
727 	}
728 	return (DDI_FAILURE);
729 }
730 
731 /*
732  * The reset entry point gets invoked at the system shutdown time or through
733  * CPR code at system suspend.
734  * Will be flushing the cache and expect this to be last I/O operation to the
735  * disk before system reset/power off.
736  */
737 /*ARGSUSED*/
738 static int
739 dcdreset(dev_info_t *dip, ddi_reset_cmd_t cmd)
740 {
741 	struct dcd_disk *un;
742 	int instance;
743 
744 	instance = ddi_get_instance(dip);
745 
746 	if (!(un = ddi_get_soft_state(dcd_state, instance)))
747 		return (DDI_FAILURE);
748 
749 	dcd_flush_cache(un);
750 
751 	return (DDI_SUCCESS);
752 }
753 
754 
755 static int
756 dcd_dr_detach(dev_info_t *devi)
757 {
758 	struct dcd_device	*devp;
759 	struct dcd_disk		*un;
760 
761 	/*
762 	 * Get scsi_device structure for this instance.
763 	 */
764 	if ((devp = ddi_get_driver_private(devi)) == NULL)
765 		return (DDI_FAILURE);
766 
767 	/*
768 	 * Get dcd_disk structure containing target 'private' information
769 	 */
770 	un = (struct dcd_disk *)devp->dcd_private;
771 
772 	/*
773 	 * Verify there are NO outstanding commands issued to this device.
774 	 * ie, un_ncmds == 0.
775 	 * It's possible to have outstanding commands through the physio
776 	 * code path, even though everything's closed.
777 	 */
778 #ifndef lint
779 	_NOTE(COMPETING_THREADS_NOW);
780 #endif
781 	mutex_enter(DCD_MUTEX);
782 	if (un->un_ncmds) {
783 		mutex_exit(DCD_MUTEX);
784 		_NOTE(NO_COMPETING_THREADS_NOW);
785 		return (DDI_FAILURE);
786 	}
787 
788 	mutex_exit(DCD_MUTEX);
789 
790 	cmlb_detach(un->un_dklbhandle, 0);
791 	cmlb_free_handle(&un->un_dklbhandle);
792 
793 
794 	/*
795 	 * Lower the power state of the device
796 	 * i.e. the minimum power consumption state - sleep.
797 	 */
798 	(void) pm_lower_power(DCD_DEVINFO, 0, DCD_DEVICE_STANDBY);
799 
800 	_NOTE(NO_COMPETING_THREADS_NOW);
801 
802 	/*
803 	 * at this point there are no competing threads anymore
804 	 * release active MT locks and all device resources.
805 	 */
806 	dcd_free_softstate(un, devi);
807 
808 	return (DDI_SUCCESS);
809 }
810 
811 static int
812 dcdpower(dev_info_t *devi, int component, int level)
813 {
814 	struct dcd_pkt *pkt;
815 	struct dcd_disk *un;
816 	int	instance;
817 	uchar_t	cmd;
818 
819 
820 	instance = ddi_get_instance(devi);
821 
822 	if (!(un = ddi_get_soft_state(dcd_state, instance)) ||
823 		(DCD_DEVICE_STANDBY > level) || (level > DCD_DEVICE_ACTIVE) ||
824 		component != 0) {
825 		return (DDI_FAILURE);
826 	}
827 
828 	mutex_enter(DCD_MUTEX);
829 	/*
830 	 * if there are active commands for the device or device will be
831 	 * active soon. At the same time there is request to lower power
832 	 * return failure.
833 	 */
834 	if ((un->un_ncmds) && (level != DCD_DEVICE_ACTIVE)) {
835 		mutex_exit(DCD_MUTEX);
836 		return (DDI_FAILURE);
837 	}
838 
839 	if ((un->un_state == DCD_STATE_OFFLINE) ||
840 	    (un->un_state == DCD_STATE_FATAL)) {
841 		mutex_exit(DCD_MUTEX);
842 		return (DDI_FAILURE);
843 	}
844 
845 	if (level == DCD_DEVICE_ACTIVE) {
846 		/*
847 		 * No need to fire any command, just set the state structure
848 		 * to indicate previous state and set the level to active
849 		 */
850 		un->un_power_level = DCD_DEVICE_ACTIVE;
851 		if (un->un_state == DCD_STATE_PM_SUSPENDED)
852 			Restore_state(un);
853 		mutex_exit(DCD_MUTEX);
854 	} else {
855 		pkt = dcd_init_pkt(ROUTE, (struct dcd_pkt *)NULL,
856 		    NULL, (uint32_t)sizeof (struct dcd_cmd), 2, PP_LEN,
857 		    PKT_CONSISTENT, NULL_FUNC, NULL);
858 
859 		if (pkt == (struct dcd_pkt *)NULL) {
860 			mutex_exit(DCD_MUTEX);
861 			return (DDI_FAILURE);
862 		}
863 
864 		switch (level) {
865 		case DCD_DEVICE_IDLE:
866 			cmd = ATA_IDLE_IMMEDIATE;
867 			break;
868 
869 		case DCD_DEVICE_STANDBY:
870 			cmd = ATA_STANDBY_IMMEDIATE;
871 			break;
872 		}
873 
874 		makecommand(pkt, 0, cmd, 0, 0, 0, NO_DATA_XFER, 0);
875 		mutex_exit(DCD_MUTEX);
876 		/*
877 		 * Issue the appropriate command
878 		 */
879 		if ((dcd_poll(pkt)) || (SCBP_C(pkt) != STATUS_GOOD)) {
880 			dcd_destroy_pkt(pkt);
881 			return (DDI_FAILURE);
882 		}
883 		dcd_destroy_pkt(pkt);
884 		mutex_enter(DCD_MUTEX);
885 		if (un->un_state != DCD_STATE_PM_SUSPENDED)
886 			New_state(un, DCD_STATE_PM_SUSPENDED);
887 		un->un_power_level = level;
888 		mutex_exit(DCD_MUTEX);
889 	}
890 
891 	return (DDI_SUCCESS);
892 }
893 
894 static int
895 dcd_doattach(dev_info_t *devi, int (*canwait)())
896 {
897 	struct dcd_device *devp;
898 	struct dcd_disk *un = (struct dcd_disk *)0;
899 	int instance;
900 	int km_flags = (canwait != NULL_FUNC)? KM_SLEEP : KM_NOSLEEP;
901 	int rval;
902 	char *prop_template = "target%x-dcd-options";
903 	int options;
904 	char    prop_str[32];
905 	int target;
906 	diskaddr_t capacity;
907 
908 	devp = ddi_get_driver_private(devi);
909 
910 	/*
911 	 * Call the routine scsi_probe to do some of the dirty work.
912 	 * If the INQUIRY command succeeds, the field dcd_inq in the
913 	 * device structure will be filled in. The dcd_sense structure
914 	 * will also be allocated.
915 	 */
916 
917 	switch (dcd_probe(devp, canwait)) {
918 	default:
919 		return (DDI_FAILURE);
920 
921 	case DCDPROBE_EXISTS:
922 		if ((devp->dcd_ident->dcd_config & ATAPI_DEVICE) == 0) {
923 			if (devp->dcd_ident->dcd_config & ATANON_REMOVABLE) {
924 				rval = DDI_SUCCESS;
925 			} else {
926 				rval = DDI_FAILURE;
927 				goto error;
928 			}
929 		} else {
930 			rval = DDI_FAILURE;
931 			goto error;
932 		}
933 	}
934 
935 
936 	instance = ddi_get_instance(devp->dcd_dev);
937 
938 	if (ddi_soft_state_zalloc(dcd_state, instance) != DDI_SUCCESS) {
939 		rval = DDI_FAILURE;
940 		goto error;
941 	}
942 
943 	un = ddi_get_soft_state(dcd_state, instance);
944 
945 	un->un_sbufp = getrbuf(km_flags);
946 	if (un->un_sbufp == (struct buf *)NULL) {
947 		rval = DDI_FAILURE;
948 		goto error;
949 	}
950 
951 
952 	un->un_dcd = devp;
953 	un->un_power_level = -1;
954 	un->un_tgattribute.media_is_writable = 1;
955 
956 	sema_init(&un->un_semoclose, 1, NULL, SEMA_DRIVER, NULL);
957 	cv_init(&un->un_sbuf_cv, NULL, CV_DRIVER, NULL);
958 	cv_init(&un->un_state_cv, NULL, CV_DRIVER, NULL);
959 	/* Initialize power management conditional variable */
960 	cv_init(&un->un_disk_busy_cv, NULL, CV_DRIVER, NULL);
961 	cv_init(&un->un_suspend_cv, NULL, CV_DRIVER, NULL);
962 
963 	if (un->un_dp == 0) {
964 		/*
965 		 * Assume CCS drive, assume parity, but call
966 		 * it a CDROM if it is a RODIRECT device.
967 		 */
968 		un->un_dp = (struct dcd_drivetype *)
969 		    kmem_zalloc(sizeof (struct dcd_drivetype), km_flags);
970 		if (!un->un_dp) {
971 			rval = DDI_FAILURE;
972 			goto error;
973 		}
974 		if ((devp->dcd_ident->dcd_config & ATAPI_DEVICE) == 0) {
975 			if (devp->dcd_ident->dcd_config & ATANON_REMOVABLE) {
976 				un->un_dp->ctype = CTYPE_DISK;
977 			}
978 		} else  {
979 			rval = DDI_FAILURE;
980 			goto error;
981 		}
982 		un->un_dp->name = "CCS";
983 		un->un_dp->options = 0;
984 	}
985 
986 	/*
987 	 * Allow I/O requests at un_secsize offset in multiple of un_secsize.
988 	 */
989 	un->un_secsize = DEV_BSIZE;
990 
991 	/*
992 	 * If the device is not a removable media device, make sure that
993 	 * that the device is ready, by issuing the another identify but
994 	 * not needed. Get the capacity from identify data and store here.
995 	 */
996 	if (dcd_compute_dk_capacity(devp, &capacity) == 0) {
997 		un->un_diskcapacity = capacity;
998 		un->un_lbasize = DEV_BSIZE;
999 	}
1000 
1001 	DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG, "Geometry Data\n");
1002 	DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG, "cyls %x, heads %x",
1003 	    devp->dcd_ident->dcd_fixcyls,
1004 	    devp->dcd_ident->dcd_heads);
1005 	DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG, "sectors %x,",
1006 	    devp->dcd_ident->dcd_sectors);
1007 	DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG, "capacity %llx\n",
1008 	    capacity);
1009 
1010 	DAD_DEBUG(DCD_DEVINFO, dcd_label, DCD_DEBUG,
1011 	    "dcdprobe: drive selected\n");
1012 
1013 	/*
1014 	 * Check for the property target<n>-dcd-options to find the option
1015 	 * set by the HBA driver for this target so that we can set the
1016 	 * Unit structure variable so that we can send commands accordingly.
1017 	 */
1018 	target = devp->dcd_address->a_target;
1019 	(void) sprintf(prop_str, prop_template, target);
1020 	options = ddi_prop_get_int(DDI_DEV_T_ANY, devi, DDI_PROP_NOTPROM,
1021 	    prop_str, -1);
1022 	if (options < 0) {
1023 		DAD_DEBUG(DCD_DEVINFO, dcd_label, DCD_DEBUG,
1024 		    "No per target properties");
1025 	} else {
1026 		if ((options & DCD_DMA_MODE) == DCD_DMA_MODE) {
1027 			un->un_dp->options |= DMA_SUPPORTTED;
1028 			un->un_dp->dma_mode = (options >> 3) & 0x03;
1029 			DAD_DEBUG(DCD_DEVINFO, dcd_label, DCD_DEBUG,
1030 			    "mode %x\n", un->un_dp->dma_mode);
1031 		} else {
1032 			un->un_dp->options &= ~DMA_SUPPORTTED;
1033 			un->un_dp->pio_mode = options & 0x7;
1034 			if (options & DCD_BLOCK_MODE)
1035 				un->un_dp->options |= BLOCK_MODE;
1036 			DAD_DEBUG(DCD_DEVINFO, dcd_label, DCD_DEBUG,
1037 			    "mode %x\n", un->un_dp->pio_mode);
1038 		}
1039 		DAD_DEBUG(DCD_DEVINFO, dcd_label, DCD_DEBUG,
1040 		    "options %x,", un->un_dp->options);
1041 	}
1042 
1043 	un->un_throttle = 2;
1044 	/*
1045 	 * set default max_xfer_size - This should depend on whether the
1046 	 * Block mode is supported by the device or not.
1047 	 */
1048 	un->un_max_xfer_size = MAX_ATA_XFER_SIZE;
1049 
1050 	/*
1051 	 * Set write cache enable softstate
1052 	 *
1053 	 * WCE is only supported in ATAPI-4 or higher; for
1054 	 * lower rev devices, must assume write cache is
1055 	 * enabled.
1056 	 */
1057 	mutex_enter(DCD_MUTEX);
1058 	un->un_write_cache_enabled = (devp->dcd_ident->dcd_majvers == 0xffff) ||
1059 	    ((devp->dcd_ident->dcd_majvers & IDENTIFY_80_ATAPI_4) == 0) ||
1060 	    (devp->dcd_ident->dcd_features85 & IDENTIFY_85_WCE) != 0;
1061 	mutex_exit(DCD_MUTEX);
1062 
1063 	DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG,
1064 	    "dcd_doattach returns good\n");
1065 
1066 	return (rval);
1067 
1068 error:
1069 	DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG, "dcd_doattach failed\n");
1070 	dcd_free_softstate(un, devi);
1071 	return (rval);
1072 }
1073 
1074 #ifdef NOTNEEDED
1075 /*
1076  * This routine is used to set the block mode of operation by issuing the
1077  * Set Block mode ata command with the maximum block mode possible
1078  */
1079 dcd_set_multiple(struct dcd_disk *un)
1080 {
1081 	int status;
1082 	struct udcd_cmd ucmd;
1083 	struct dcd_cmd cdb;
1084 	dev_t	dev;
1085 
1086 
1087 	/* Zero all the required structure */
1088 	(void) bzero((caddr_t)&ucmd, sizeof (ucmd));
1089 
1090 	(void) bzero((caddr_t)&cdb, sizeof (struct dcd_cmd));
1091 
1092 	cdb.cmd = ATA_SET_MULTIPLE;
1093 	/*
1094 	 * Here we should pass what needs to go into sector count REGISTER.
1095 	 * Eventhough this field indicates the number of bytes to read we
1096 	 * need to specify the block factor in terms of bytes so that it
1097 	 * will be programmed by the HBA driver into the sector count register.
1098 	 */
1099 	cdb.size = un->un_lbasize * un->un_dp->block_factor;
1100 
1101 	cdb.sector_num.lba_num = 0;
1102 	cdb.address_mode = ADD_LBA_MODE;
1103 	cdb.direction = NO_DATA_XFER;
1104 
1105 	ucmd.udcd_flags = 0;
1106 	ucmd.udcd_cmd = &cdb;
1107 	ucmd.udcd_bufaddr = NULL;
1108 	ucmd.udcd_buflen = 0;
1109 	ucmd.udcd_flags |= UDCD_SILENT;
1110 
1111 	dev = makedevice(ddi_driver_major(DCD_DEVINFO),
1112 	    ddi_get_instance(DCD_DEVINFO) << DCDUNIT_SHIFT);
1113 
1114 
1115 	status = dcdioctl_cmd(dev, &ucmd, UIO_SYSSPACE, UIO_SYSSPACE);
1116 
1117 	return (status);
1118 }
1119 /*
1120  * The following routine is used only for setting the transfer mode
1121  * and it is not designed for transferring any other features subcommand.
1122  */
1123 dcd_set_features(struct dcd_disk *un, uchar_t mode)
1124 {
1125 	int status;
1126 	struct udcd_cmd ucmd;
1127 	struct dcd_cmd cdb;
1128 	dev_t	dev;
1129 
1130 
1131 	/* Zero all the required structure */
1132 	(void) bzero((caddr_t)&ucmd, sizeof (ucmd));
1133 
1134 	(void) bzero((caddr_t)&cdb, sizeof (struct dcd_cmd));
1135 
1136 	cdb.cmd = ATA_SET_FEATURES;
1137 	/*
1138 	 * Here we need to pass what needs to go into the sector count register
1139 	 * But in the case of SET FEATURES command the value taken in the
1140 	 * sector count register depends what type of subcommand is
1141 	 * passed in the features register. Since we have defined the size to
1142 	 * be the size in bytes in this context it does not indicate bytes
1143 	 * instead it indicates the mode to be programmed.
1144 	 */
1145 	cdb.size = un->un_lbasize * mode;
1146 
1147 	cdb.sector_num.lba_num = 0;
1148 	cdb.address_mode = ADD_LBA_MODE;
1149 	cdb.direction = NO_DATA_XFER;
1150 	cdb.features = ATA_FEATURE_SET_MODE;
1151 	DAD_DEBUG(DCD_DEVINFO, dcd_label, DCD_DEBUG,
1152 	    "size %x, features %x, cmd %x\n",
1153 	    cdb.size, cdb.features, cdb.cmd);
1154 
1155 	ucmd.udcd_flags = 0;
1156 	ucmd.udcd_cmd = &cdb;
1157 	ucmd.udcd_bufaddr = NULL;
1158 	ucmd.udcd_buflen = 0;
1159 	ucmd.udcd_flags |= UDCD_SILENT;
1160 
1161 	dev = makedevice(ddi_driver_major(DCD_DEVINFO),
1162 	    ddi_get_instance(DCD_DEVINFO) << DCDUNIT_SHIFT);
1163 
1164 	status = dcdioctl_cmd(dev, &ucmd, UIO_SYSSPACE, UIO_SYSSPACE);
1165 
1166 	return (status);
1167 }
1168 #endif
1169 
1170 /*
1171  * Validate the geometry for this disk, e.g.,
1172  * see whether it has a valid label.
1173  */
1174 static int
1175 dcd_validate_geometry(struct dcd_disk *un)
1176 {
1177 	int secsize = 0;
1178 	struct  dcd_device *devp;
1179 	int secdiv;
1180 	int rval;
1181 
1182 	ASSERT(mutex_owned(DCD_MUTEX));
1183 	DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG,
1184 	    "dcd_validate_geometry: started \n");
1185 
1186 	if (un->un_lbasize < 0) {
1187 		return (DCD_BAD_LABEL);
1188 	}
1189 
1190 	if (un->un_state == DCD_STATE_PM_SUSPENDED) {
1191 		mutex_exit(DCD_MUTEX);
1192 		if (pm_raise_power(DCD_DEVINFO, 0, DCD_DEVICE_ACTIVE)
1193 			!= DDI_SUCCESS) {
1194 			mutex_enter(DCD_MUTEX);
1195 			return (DCD_BAD_LABEL);
1196 		}
1197 		mutex_enter(DCD_MUTEX);
1198 	}
1199 
1200 	secsize = un->un_secsize;
1201 
1202 	/*
1203 	 * take a log base 2 of sector size (sorry)
1204 	 */
1205 	for (secdiv = 0; secsize = secsize >> 1; secdiv++)
1206 		;
1207 	un->un_secdiv = secdiv;
1208 
1209 	/*
1210 	 * Only DIRECT ACCESS devices will have Sun labels.
1211 	 * CD's supposedly have a Sun label, too
1212 	 */
1213 
1214 	devp = un->un_dcd;
1215 
1216 	if (((devp->dcd_ident->dcd_config & ATAPI_DEVICE) == 0) &&
1217 	    (devp->dcd_ident->dcd_config & ATANON_REMOVABLE)) {
1218 		mutex_exit(DCD_MUTEX);
1219 		rval = cmlb_validate(un->un_dklbhandle, 0, 0);
1220 		mutex_enter(DCD_MUTEX);
1221 		if (rval == ENOMEM)
1222 			return (DCD_NO_MEM_FOR_LABEL);
1223 		else if (rval != 0)
1224 			return (DCD_BAD_LABEL);
1225 	} else {
1226 		/* it should never get here. */
1227 		return (DCD_BAD_LABEL);
1228 	}
1229 
1230 	/*
1231 	 * take a log base 2 of logical block size
1232 	 */
1233 	secsize = un->un_lbasize;
1234 	for (secdiv = 0; secsize = secsize >> 1; secdiv++)
1235 		;
1236 	un->un_lbadiv = secdiv;
1237 
1238 	/*
1239 	 * take a log base 2 of the multiple of DEV_BSIZE blocks that
1240 	 * make up one logical block
1241 	 */
1242 	secsize = un->un_lbasize >> DEV_BSHIFT;
1243 	for (secdiv = 0; secsize = secsize >> 1; secdiv++)
1244 		;
1245 	un->un_blknoshift = secdiv;
1246 	return (0);
1247 }
1248 
1249 /*
1250  * Unix Entry Points
1251  */
1252 
1253 /* ARGSUSED3 */
1254 static int
1255 dcdopen(dev_t *dev_p, int flag, int otyp, cred_t *cred_p)
1256 {
1257 	dev_t dev = *dev_p;
1258 	int rval = EIO;
1259 	int partmask;
1260 	int nodelay = (flag & (FNDELAY | FNONBLOCK));
1261 	int i;
1262 	char kstatname[KSTAT_STRLEN];
1263 	diskaddr_t lblocks;
1264 	char *partname;
1265 
1266 	GET_SOFT_STATE(dev);
1267 
1268 	DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG,
1269 	    "Inside Open flag %x, otyp %x\n", flag, otyp);
1270 
1271 	if (otyp >= OTYPCNT) {
1272 		return (EINVAL);
1273 	}
1274 
1275 	partmask = 1 << part;
1276 
1277 	/*
1278 	 * We use a semaphore here in order to serialize
1279 	 * open and close requests on the device.
1280 	 */
1281 	sema_p(&un->un_semoclose);
1282 
1283 	mutex_enter(DCD_MUTEX);
1284 
1285 	if ((un->un_state & DCD_STATE_FATAL) == DCD_STATE_FATAL) {
1286 		rval = ENXIO;
1287 		goto done;
1288 	}
1289 
1290 	while (un->un_state == DCD_STATE_SUSPENDED) {
1291 		cv_wait(&un->un_suspend_cv, DCD_MUTEX);
1292 	}
1293 
1294 	if ((un->un_state == DCD_STATE_PM_SUSPENDED) && (!nodelay)) {
1295 		mutex_exit(DCD_MUTEX);
1296 		if (pm_raise_power(DCD_DEVINFO, 0, DCD_DEVICE_ACTIVE)
1297 		    != DDI_SUCCESS) {
1298 			mutex_enter(DCD_MUTEX);
1299 			rval = EIO;
1300 			goto done;
1301 		}
1302 		mutex_enter(DCD_MUTEX);
1303 	}
1304 
1305 	/*
1306 	 * set make_dcd_cmd() flags and stat_size here since these
1307 	 * are unlikely to change
1308 	 */
1309 	un->un_cmd_flags = 0;
1310 
1311 	un->un_cmd_stat_size = 2;
1312 
1313 	DAD_DEBUG(DCD_DEVINFO, dcd_label, DCD_DEBUG, "dcdopen un=0x%p\n",
1314 	    (void *)un);
1315 	/*
1316 	 * check for previous exclusive open
1317 	 */
1318 	DAD_DEBUG(DCD_DEVINFO, dcd_label, DCD_DEBUG,
1319 	    "exclopen=%x, flag=%x, regopen=%x\n",
1320 	    un->un_exclopen, flag, un->un_ocmap.regopen[otyp]);
1321 	DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG,
1322 	    "Exclusive open flag %x, partmask %x\n",
1323 	    un->un_exclopen, partmask);
1324 
1325 	if (un->un_exclopen & (partmask)) {
1326 failed_exclusive:
1327 		DAD_DEBUG(DCD_DEVINFO, dcd_label, DCD_DEBUG,
1328 		    "exclusive open fails\n");
1329 		rval = EBUSY;
1330 		goto done;
1331 	}
1332 
1333 	if (flag & FEXCL) {
1334 		int i;
1335 		if (un->un_ocmap.lyropen[part]) {
1336 			goto failed_exclusive;
1337 		}
1338 		for (i = 0; i < (OTYPCNT - 1); i++) {
1339 			if (un->un_ocmap.regopen[i] & (partmask)) {
1340 				goto failed_exclusive;
1341 			}
1342 		}
1343 	}
1344 	if (flag & FWRITE) {
1345 		mutex_exit(DCD_MUTEX);
1346 		if (dcd_check_wp(dev)) {
1347 			sema_v(&un->un_semoclose);
1348 			return (EROFS);
1349 		}
1350 		mutex_enter(DCD_MUTEX);
1351 	}
1352 
1353 	DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG,
1354 	    "Check Write Protect handled\n");
1355 
1356 	if (!nodelay) {
1357 		mutex_exit(DCD_MUTEX);
1358 		if ((rval = dcd_ready_and_valid(dev, un)) != 0) {
1359 			rval = EIO;
1360 		}
1361 		(void) pm_idle_component(DCD_DEVINFO, 0);
1362 		/*
1363 		 * Fail if device is not ready or if the number of disk
1364 		 * blocks is zero or negative for non CD devices.
1365 		 */
1366 		if (rval || cmlb_partinfo(un->un_dklbhandle,
1367 		    part, &lblocks, NULL, &partname, NULL, 0) ||
1368 		    lblocks <= 0) {
1369 			rval = EIO;
1370 			mutex_enter(DCD_MUTEX);
1371 			goto done;
1372 		}
1373 		mutex_enter(DCD_MUTEX);
1374 	}
1375 
1376 	if (otyp == OTYP_LYR) {
1377 		un->un_ocmap.lyropen[part]++;
1378 	} else {
1379 		un->un_ocmap.regopen[otyp] |= partmask;
1380 	}
1381 
1382 	/*
1383 	 * set up open and exclusive open flags
1384 	 */
1385 	if (flag & FEXCL) {
1386 		un->un_exclopen |= (partmask);
1387 	}
1388 
1389 
1390 	DAD_DEBUG(DCD_DEVINFO, dcd_label, DCD_DEBUG,
1391 	    "open of part %d type %d\n",
1392 	    part, otyp);
1393 
1394 	DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG,
1395 	    "Kstats getting updated\n");
1396 	/*
1397 	 * only create kstats for disks, CD kstats created in dcdattach
1398 	 */
1399 	_NOTE(NO_COMPETING_THREADS_NOW);
1400 	mutex_exit(DCD_MUTEX);
1401 	if (un->un_stats == (kstat_t *)0) {
1402 		un->un_stats = kstat_create("dad", instance,
1403 		    NULL, "disk", KSTAT_TYPE_IO, 1,
1404 		    KSTAT_FLAG_PERSISTENT);
1405 		if (un->un_stats) {
1406 			un->un_stats->ks_lock = DCD_MUTEX;
1407 			kstat_install(un->un_stats);
1408 		}
1409 
1410 		/*
1411 		 * set up partition statistics for each partition
1412 		 * with number of blocks > 0
1413 		 */
1414 		if (!nodelay) {
1415 			for (i = 0; i < NDKMAP; i++) {
1416 				if ((un->un_pstats[i] == (kstat_t *)0) &&
1417 				    (cmlb_partinfo(un->un_dklbhandle,
1418 				    i, &lblocks, NULL, &partname,
1419 				    NULL, 0) == 0) && lblocks > 0) {
1420 					(void) sprintf(kstatname, "dad%d,%s",
1421 					    instance, partname);
1422 					un->un_pstats[i] = kstat_create("dad",
1423 					    instance,
1424 					    kstatname,
1425 					    "partition",
1426 					    KSTAT_TYPE_IO,
1427 					    1,
1428 					    KSTAT_FLAG_PERSISTENT);
1429 					if (un->un_pstats[i]) {
1430 						un->un_pstats[i]->ks_lock =
1431 						    DCD_MUTEX;
1432 						kstat_install(un->un_pstats[i]);
1433 					}
1434 				}
1435 			}
1436 		}
1437 		/*
1438 		 * set up error kstats
1439 		 */
1440 		(void) dcd_create_errstats(un, instance);
1441 	}
1442 #ifndef lint
1443 	_NOTE(COMPETING_THREADS_NOW);
1444 #endif
1445 
1446 	sema_v(&un->un_semoclose);
1447 	DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG, "Open success\n");
1448 	return (0);
1449 
1450 done:
1451 	mutex_exit(DCD_MUTEX);
1452 	sema_v(&un->un_semoclose);
1453 	return (rval);
1454 
1455 }
1456 
1457 /*
1458  * Test if disk is ready and has a valid geometry.
1459  */
1460 static int
1461 dcd_ready_and_valid(dev_t dev, struct dcd_disk *un)
1462 {
1463 	int rval = 1;
1464 	int g_error = 0;
1465 
1466 	mutex_enter(DCD_MUTEX);
1467 	/*
1468 	 * cmds outstanding
1469 	 */
1470 	if (un->un_ncmds == 0) {
1471 		(void) dcd_unit_ready(dev);
1472 	}
1473 
1474 	/*
1475 	 * If device is not yet ready here, inform it is offline
1476 	 */
1477 	if (un->un_state == DCD_STATE_NORMAL) {
1478 		rval = dcd_unit_ready(dev);
1479 		if (rval != 0 && rval != EACCES) {
1480 			dcd_offline(un, 1);
1481 			goto done;
1482 		}
1483 	}
1484 
1485 	if (un->un_format_in_progress == 0) {
1486 		g_error = dcd_validate_geometry(un);
1487 	}
1488 
1489 	/*
1490 	 * check if geometry was valid. We don't check the validity of
1491 	 * geometry for CDROMS.
1492 	 */
1493 
1494 	if (g_error == DCD_BAD_LABEL) {
1495 		rval = 1;
1496 		goto done;
1497 	}
1498 
1499 
1500 	/*
1501 	 * the state has changed; inform the media watch routines
1502 	 */
1503 	un->un_mediastate = DKIO_INSERTED;
1504 	cv_broadcast(&un->un_state_cv);
1505 	rval = 0;
1506 
1507 done:
1508 	mutex_exit(DCD_MUTEX);
1509 	return (rval);
1510 }
1511 
1512 
1513 /*ARGSUSED*/
1514 static int
1515 dcdclose(dev_t dev, int flag, int otyp, cred_t *cred_p)
1516 {
1517 	uchar_t *cp;
1518 	int i;
1519 
1520 	GET_SOFT_STATE(dev);
1521 
1522 
1523 	if (otyp >= OTYPCNT)
1524 		return (ENXIO);
1525 
1526 	DAD_DEBUG(DCD_DEVINFO, dcd_label, DCD_DEBUG,
1527 	    "close of part %d type %d\n",
1528 	    part, otyp);
1529 	sema_p(&un->un_semoclose);
1530 
1531 	mutex_enter(DCD_MUTEX);
1532 
1533 	if (un->un_exclopen & (1<<part)) {
1534 		un->un_exclopen &= ~(1<<part);
1535 	}
1536 
1537 	if (otyp == OTYP_LYR) {
1538 		un->un_ocmap.lyropen[part] -= 1;
1539 	} else {
1540 		un->un_ocmap.regopen[otyp] &= ~(1<<part);
1541 	}
1542 
1543 	cp = &un->un_ocmap.chkd[0];
1544 	while (cp < &un->un_ocmap.chkd[OCSIZE]) {
1545 		if (*cp != (uchar_t)0) {
1546 			break;
1547 		}
1548 		cp++;
1549 	}
1550 
1551 	if (cp == &un->un_ocmap.chkd[OCSIZE]) {
1552 		DAD_DEBUG(DCD_DEVINFO, dcd_label, DCD_DEBUG, "last close\n");
1553 		if (un->un_state == DCD_STATE_OFFLINE) {
1554 			dcd_offline(un, 1);
1555 		}
1556 
1557 		mutex_exit(DCD_MUTEX);
1558 		(void) cmlb_close(un->un_dklbhandle, 0);
1559 
1560 		_NOTE(NO_COMPETING_THREADS_NOW);
1561 		if (un->un_stats) {
1562 			kstat_delete(un->un_stats);
1563 			un->un_stats = 0;
1564 		}
1565 		for (i = 0; i < NDKMAP; i++) {
1566 			if (un->un_pstats[i]) {
1567 				kstat_delete(un->un_pstats[i]);
1568 				un->un_pstats[i] = (kstat_t *)0;
1569 			}
1570 		}
1571 
1572 		if (un->un_errstats) {
1573 			kstat_delete(un->un_errstats);
1574 			un->un_errstats = (kstat_t *)0;
1575 		}
1576 		mutex_enter(DCD_MUTEX);
1577 
1578 #ifndef lint
1579 		_NOTE(COMPETING_THREADS_NOW);
1580 #endif
1581 	}
1582 
1583 	mutex_exit(DCD_MUTEX);
1584 	sema_v(&un->un_semoclose);
1585 	return (0);
1586 }
1587 
1588 static void
1589 dcd_offline(struct dcd_disk *un, int bechatty)
1590 {
1591 	if (bechatty)
1592 		dcd_log(DCD_DEVINFO, dcd_label, CE_WARN, "offline\n");
1593 
1594 	mutex_exit(DCD_MUTEX);
1595 	cmlb_invalidate(un->un_dklbhandle, 0);
1596 	mutex_enter(DCD_MUTEX);
1597 }
1598 
1599 /*
1600  * Given the device number return the devinfo pointer
1601  * from the scsi_device structure.
1602  */
1603 /*ARGSUSED*/
1604 static int
1605 dcdinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result)
1606 {
1607 	dev_t dev;
1608 	struct dcd_disk *un;
1609 	int instance, error;
1610 
1611 
1612 	switch (infocmd) {
1613 	case DDI_INFO_DEVT2DEVINFO:
1614 		dev = (dev_t)arg;
1615 		instance = DCDUNIT(dev);
1616 		if ((un = ddi_get_soft_state(dcd_state, instance)) == NULL)
1617 			return (DDI_FAILURE);
1618 		*result = (void *) DCD_DEVINFO;
1619 		error = DDI_SUCCESS;
1620 		break;
1621 	case DDI_INFO_DEVT2INSTANCE:
1622 		dev = (dev_t)arg;
1623 		instance = DCDUNIT(dev);
1624 		*result = (void *)(uintptr_t)instance;
1625 		error = DDI_SUCCESS;
1626 		break;
1627 	default:
1628 		error = DDI_FAILURE;
1629 	}
1630 	return (error);
1631 }
1632 
1633 /*
1634  * property operation routine.	return the number of blocks for the partition
1635  * in question or forward the request to the propery facilities.
1636  */
1637 static int
1638 dcd_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, int mod_flags,
1639     char *name, caddr_t valuep, int *lengthp)
1640 {
1641 	int		instance = ddi_get_instance(dip);
1642 	struct dcd_disk	*un;
1643 	uint64_t	nblocks64;
1644 	diskaddr_t lblocks;
1645 
1646 	/*
1647 	 * Our dynamic properties are all device specific and size oriented.
1648 	 * Requests issued under conditions where size is valid are passed
1649 	 * to ddi_prop_op_nblocks with the size information, otherwise the
1650 	 * request is passed to ddi_prop_op. Size depends on valid geometry.
1651 	 */
1652 	un = ddi_get_soft_state(dcd_state, instance);
1653 	if ((dev == DDI_DEV_T_ANY) || (un == NULL)) {
1654 		return (ddi_prop_op(dev, dip, prop_op, mod_flags,
1655 		    name, valuep, lengthp));
1656 	} else {
1657 		if (cmlb_partinfo(
1658 		    un->un_dklbhandle,
1659 		    DCDPART(dev),
1660 		    &lblocks,
1661 		    NULL,
1662 		    NULL,
1663 		    NULL,
1664 		    0)) {
1665 			return (ddi_prop_op(dev, dip, prop_op, mod_flags,
1666 			    name, valuep, lengthp));
1667 		}
1668 
1669 		/* get nblocks value */
1670 		nblocks64 = (ulong_t)lblocks;
1671 
1672 		return (ddi_prop_op_nblocks(dev, dip, prop_op, mod_flags,
1673 		    name, valuep, lengthp, nblocks64));
1674 	}
1675 }
1676 
1677 /*
1678  * These routines perform raw i/o operations.
1679  */
1680 /*ARGSUSED*/
1681 void
1682 dcduscsimin(struct buf *bp)
1683 {
1684 
1685 }
1686 
1687 
1688 static void
1689 dcdmin(struct buf *bp)
1690 {
1691 	struct dcd_disk *un;
1692 	int instance;
1693 	minor_t minor = getminor(bp->b_edev);
1694 	instance = minor >> DCDUNIT_SHIFT;
1695 	un = ddi_get_soft_state(dcd_state, instance);
1696 
1697 	if (bp->b_bcount > un->un_max_xfer_size)
1698 		bp->b_bcount = un->un_max_xfer_size;
1699 }
1700 
1701 
1702 /* ARGSUSED2 */
1703 static int
1704 dcdread(dev_t dev, struct uio *uio, cred_t *cred_p)
1705 {
1706 	int secmask;
1707 	GET_SOFT_STATE(dev);
1708 #ifdef lint
1709 	part = part;
1710 #endif /* lint */
1711 	secmask = un->un_secsize - 1;
1712 
1713 	if (uio->uio_loffset & ((offset_t)(secmask))) {
1714 		DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG,
1715 		    "file offset not modulo %d\n",
1716 		    un->un_secsize);
1717 		return (EINVAL);
1718 	} else if (uio->uio_iov->iov_len & (secmask)) {
1719 		DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG,
1720 		    "transfer length not modulo %d\n", un->un_secsize);
1721 		return (EINVAL);
1722 	}
1723 	return (physio(dcdstrategy, (struct buf *)0, dev, B_READ, dcdmin, uio));
1724 }
1725 
1726 /* ARGSUSED2 */
1727 static int
1728 dcdaread(dev_t dev, struct aio_req *aio, cred_t *cred_p)
1729 {
1730 	int secmask;
1731 	struct uio *uio = aio->aio_uio;
1732 	GET_SOFT_STATE(dev);
1733 #ifdef lint
1734 	part = part;
1735 #endif /* lint */
1736 	secmask = un->un_secsize - 1;
1737 
1738 	if (uio->uio_loffset & ((offset_t)(secmask))) {
1739 		DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG,
1740 		    "file offset not modulo %d\n",
1741 		    un->un_secsize);
1742 		return (EINVAL);
1743 	} else if (uio->uio_iov->iov_len & (secmask)) {
1744 		DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG,
1745 		    "transfer length not modulo %d\n", un->un_secsize);
1746 		return (EINVAL);
1747 	}
1748 	return (aphysio(dcdstrategy, anocancel, dev, B_READ, dcdmin, aio));
1749 }
1750 
1751 /* ARGSUSED2 */
1752 static int
1753 dcdwrite(dev_t dev, struct uio *uio, cred_t *cred_p)
1754 {
1755 	int secmask;
1756 	GET_SOFT_STATE(dev);
1757 #ifdef lint
1758 	part = part;
1759 #endif /* lint */
1760 	secmask = un->un_secsize - 1;
1761 
1762 	if (uio->uio_loffset & ((offset_t)(secmask))) {
1763 		DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG,
1764 		    "file offset not modulo %d\n",
1765 		    un->un_secsize);
1766 		return (EINVAL);
1767 	} else if (uio->uio_iov->iov_len & (secmask)) {
1768 		DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG,
1769 		    "transfer length not modulo %d\n", un->un_secsize);
1770 		return (EINVAL);
1771 	}
1772 	return (physio(dcdstrategy, (struct buf *)0, dev, B_WRITE, dcdmin,
1773 	    uio));
1774 }
1775 
1776 /* ARGSUSED2 */
1777 static int
1778 dcdawrite(dev_t dev, struct aio_req *aio, cred_t *cred_p)
1779 {
1780 	int secmask;
1781 	struct uio *uio = aio->aio_uio;
1782 	GET_SOFT_STATE(dev);
1783 #ifdef lint
1784 	part = part;
1785 #endif /* lint */
1786 	secmask = un->un_secsize - 1;
1787 
1788 	if (uio->uio_loffset & ((offset_t)(secmask))) {
1789 		DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG,
1790 		    "file offset not modulo %d\n",
1791 		    un->un_secsize);
1792 		return (EINVAL);
1793 	} else if (uio->uio_iov->iov_len & (secmask)) {
1794 		DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG,
1795 		    "transfer length not modulo %d\n", un->un_secsize);
1796 		return (EINVAL);
1797 	}
1798 	return (aphysio(dcdstrategy, anocancel, dev, B_WRITE, dcdmin, aio));
1799 }
1800 
1801 /*
1802  * strategy routine
1803  */
1804 static int
1805 dcdstrategy(struct buf *bp)
1806 {
1807 	struct dcd_disk *un;
1808 	struct diskhd *dp;
1809 	int i;
1810 	minor_t minor = getminor(bp->b_edev);
1811 	diskaddr_t p_lblksrt;
1812 	diskaddr_t lblocks;
1813 	diskaddr_t bn;
1814 
1815 	if ((un = ddi_get_soft_state(dcd_state,
1816 	    minor >> DCDUNIT_SHIFT)) == NULL ||
1817 	    un->un_state == DCD_STATE_DUMPING ||
1818 	    ((un->un_state  & DCD_STATE_FATAL) == DCD_STATE_FATAL)) {
1819 		SET_BP_ERROR(bp, ((un) ? ENXIO : EIO));
1820 error:
1821 		bp->b_resid = bp->b_bcount;
1822 		biodone(bp);
1823 		return (0);
1824 	}
1825 
1826 	/*
1827 	 * If the request size (buf->b_bcount)is greater than the size
1828 	 * (un->un_max_xfer_size) supported by the target driver fail
1829 	 * the request with EINVAL error code.
1830 	 *
1831 	 * We are not supposed to receive requests exceeding
1832 	 * un->un_max_xfer_size size because the caller is expected to
1833 	 * check what is the maximum size that is supported by this
1834 	 * driver either through ioctl or dcdmin routine(which is private
1835 	 * to this driver).
1836 	 * But we have seen cases (like meta driver(md))where dcdstrategy
1837 	 * called with more than supported size and cause data corruption.
1838 	 */
1839 
1840 	if (bp->b_bcount > un->un_max_xfer_size) {
1841 		SET_BP_ERROR(bp, EINVAL);
1842 		goto error;
1843 	}
1844 
1845 	TRACE_2(TR_FAC_DADA, TR_DCDSTRATEGY_START,
1846 	    "dcdstrategy_start: bp 0x%p un 0x%p", bp, un);
1847 
1848 	/*
1849 	 * Commands may sneak in while we released the mutex in
1850 	 * DDI_SUSPEND, we should block new commands.
1851 	 */
1852 	mutex_enter(DCD_MUTEX);
1853 	while (un->un_state == DCD_STATE_SUSPENDED) {
1854 		cv_wait(&un->un_suspend_cv, DCD_MUTEX);
1855 	}
1856 
1857 	if (un->un_state == DCD_STATE_PM_SUSPENDED) {
1858 		mutex_exit(DCD_MUTEX);
1859 		(void) pm_idle_component(DCD_DEVINFO, 0);
1860 		if (pm_raise_power(DCD_DEVINFO, 0,
1861 			DCD_DEVICE_ACTIVE) !=  DDI_SUCCESS) {
1862 			SET_BP_ERROR(bp, EIO);
1863 			goto error;
1864 		}
1865 		mutex_enter(DCD_MUTEX);
1866 	}
1867 	mutex_exit(DCD_MUTEX);
1868 
1869 	/*
1870 	 * Map-in the buffer in case starting address is not word aligned.
1871 	 */
1872 
1873 	if (((uintptr_t)bp->b_un.b_addr) & 0x1)
1874 		bp_mapin(bp);
1875 
1876 	bp->b_flags &= ~(B_DONE|B_ERROR);
1877 	bp->b_resid = 0;
1878 	bp->av_forw = 0;
1879 
1880 	DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG,
1881 	    "bp->b_bcount %lx\n", bp->b_bcount);
1882 
1883 	if (bp != un->un_sbufp) {
1884 validated:	if (cmlb_partinfo(un->un_dklbhandle,
1885 		    minor & DCDPART_MASK,
1886 		    &lblocks,
1887 		    &p_lblksrt,
1888 		    NULL,
1889 		    NULL,
1890 		    0) == 0) {
1891 
1892 			bn = dkblock(bp);
1893 
1894 			DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG,
1895 			    "dkblock(bp) is %llu\n", bn);
1896 
1897 			i = 0;
1898 			if (bn < 0) {
1899 				i = -1;
1900 			} else if (bn >= lblocks) {
1901 				/*
1902 				 * For proper comparison, file system block
1903 				 * number has to be scaled to actual CD
1904 				 * transfer size.
1905 				 * Since all the CDROM operations
1906 				 * that have Sun Labels are in the correct
1907 				 * block size this will work for CD's.	This
1908 				 * will have to change when we have different
1909 				 * sector sizes.
1910 				 *
1911 				 * if bn == lblocks,
1912 				 * Not an error, resid == count
1913 				 */
1914 				if (bn > lblocks) {
1915 					i = -1;
1916 				} else {
1917 					i = 1;
1918 				}
1919 			} else if (bp->b_bcount & (un->un_secsize-1)) {
1920 				/*
1921 				 * This should really be:
1922 				 *
1923 				 * ... if (bp->b_bcount & (un->un_lbasize-1))
1924 				 *
1925 				 */
1926 				i = -1;
1927 			} else {
1928 				if (!bp->b_bcount) {
1929 					printf("Waring : Zero read or Write\n");
1930 					goto error;
1931 				}
1932 				/*
1933 				 * sort by absolute block number.
1934 				 */
1935 				bp->b_resid = bn;
1936 				bp->b_resid += p_lblksrt;
1937 				/*
1938 				 * zero out av_back - this will be a signal
1939 				 * to dcdstart to go and fetch the resources
1940 				 */
1941 				bp->av_back = NO_PKT_ALLOCATED;
1942 			}
1943 
1944 			/*
1945 			 * Check to see whether or not we are done
1946 			 * (with or without errors).
1947 			 */
1948 
1949 			if (i != 0) {
1950 				if (i < 0) {
1951 					bp->b_flags |= B_ERROR;
1952 				}
1953 				goto error;
1954 			}
1955 		} else {
1956 			/*
1957 			 * opened in NDELAY/NONBLOCK mode?
1958 			 * Check if disk is ready and has a valid geometry
1959 			 */
1960 			if (dcd_ready_and_valid(bp->b_edev, un) == 0) {
1961 				goto validated;
1962 			} else {
1963 				dcd_log(DCD_DEVINFO, dcd_label, CE_WARN,
1964 				    "i/o to invalid geometry\n");
1965 				SET_BP_ERROR(bp, EIO);
1966 				goto error;
1967 			}
1968 		}
1969 	} else if (BP_HAS_NO_PKT(bp)) {
1970 		struct udcd_cmd *tscmdp;
1971 		struct dcd_cmd *tcmdp;
1972 		/*
1973 		 * This indicates that it is a special buffer
1974 		 * This could be a udcd-cmd and hence call bp_mapin just
1975 		 * in case that it could be a PIO command issued.
1976 		 */
1977 		tscmdp = (struct udcd_cmd *)bp->b_forw;
1978 		tcmdp = tscmdp->udcd_cmd;
1979 		if ((tcmdp->cmd != ATA_READ_DMA) && (tcmdp->cmd != 0xc9) &&
1980 		    (tcmdp->cmd != ATA_WRITE_DMA) && (tcmdp->cmd != 0xcb) &&
1981 		    (tcmdp->cmd != IDENTIFY_DMA) &&
1982 		    (tcmdp->cmd != ATA_FLUSH_CACHE)) {
1983 			bp_mapin(bp);
1984 		}
1985 	}
1986 
1987 	/*
1988 	 * We are doing it a bit non-standard. That is, the
1989 	 * head of the b_actf chain is *not* the active command-
1990 	 * it is just the head of the wait queue. The reason
1991 	 * we do this is that the head of the b_actf chain is
1992 	 * guaranteed to not be moved by disksort(), so that
1993 	 * our restart command (pointed to by
1994 	 * b_forw) and the head of the wait queue (b_actf) can
1995 	 * have resources granted without it getting lost in
1996 	 * the queue at some later point (where we would have
1997 	 * to go and look for it).
1998 	 */
1999 	mutex_enter(DCD_MUTEX);
2000 
2001 	DCD_DO_KSTATS(un, kstat_waitq_enter, bp);
2002 
2003 	dp = &un->un_utab;
2004 
2005 	if (dp->b_actf == NULL) {
2006 		dp->b_actf = bp;
2007 		dp->b_actl = bp;
2008 	} else if ((un->un_state == DCD_STATE_SUSPENDED) &&
2009 	    bp == un->un_sbufp) {
2010 		bp->b_actf = dp->b_actf;
2011 		dp->b_actf = bp;
2012 	} else {
2013 		TRACE_3(TR_FAC_DADA, TR_DCDSTRATEGY_DISKSORT_START,
2014 		    "dcdstrategy_disksort_start: dp 0x%p bp 0x%p un 0x%p",
2015 		    dp, bp, un);
2016 		disksort(dp, bp);
2017 		TRACE_0(TR_FAC_DADA, TR_DCDSTRATEGY_DISKSORT_END,
2018 		    "dcdstrategy_disksort_end");
2019 	}
2020 
2021 	DAD_DEBUG(DCD_DEVINFO, dcd_label, DCD_DEBUG,
2022 	    "ncmd %x , throttle %x, forw 0x%p\n",
2023 	    un->un_ncmds, un->un_throttle, (void *)dp->b_forw);
2024 	ASSERT(un->un_ncmds >= 0);
2025 	ASSERT(un->un_throttle >= 0);
2026 	if ((un->un_ncmds < un->un_throttle) && (dp->b_forw == NULL)) {
2027 		dcdstart(un);
2028 	} else if (BP_HAS_NO_PKT(dp->b_actf)) {
2029 		struct buf *cmd_bp;
2030 
2031 		cmd_bp = dp->b_actf;
2032 		cmd_bp->av_back = ALLOCATING_PKT;
2033 		mutex_exit(DCD_MUTEX);
2034 		/*
2035 		 * try and map this one
2036 		 */
2037 		TRACE_0(TR_FAC_DADA, TR_DCDSTRATEGY_SMALL_WINDOW_START,
2038 		    "dcdstrategy_small_window_call (begin)");
2039 
2040 		make_dcd_cmd(un, cmd_bp, NULL_FUNC);
2041 
2042 		TRACE_0(TR_FAC_DADA, TR_DCDSTRATEGY_SMALL_WINDOW_END,
2043 		    "dcdstrategy_small_window_call (end)");
2044 
2045 		/*
2046 		 * there is a small window where the active cmd
2047 		 * completes before make_dcd_cmd returns.
2048 		 * consequently, this cmd never gets started so
2049 		 * we start it from here
2050 		 */
2051 		mutex_enter(DCD_MUTEX);
2052 		if ((un->un_ncmds < un->un_throttle) &&
2053 		    (dp->b_forw == NULL)) {
2054 			dcdstart(un);
2055 		}
2056 	}
2057 	mutex_exit(DCD_MUTEX);
2058 
2059 done:
2060 	TRACE_0(TR_FAC_DADA, TR_DCDSTRATEGY_END, "dcdstrategy_end");
2061 	return (0);
2062 }
2063 
2064 
2065 /*
2066  * Unit start and Completion
2067  * NOTE: we assume that the caller has at least checked for:
2068  *		(un->un_ncmds < un->un_throttle)
2069  *	if not, there is no real harm done, dcd_transport() will
2070  *	return BUSY
2071  */
2072 static void
2073 dcdstart(struct dcd_disk *un)
2074 {
2075 	int status, sort_key;
2076 	struct buf *bp;
2077 	struct diskhd *dp;
2078 	uchar_t state = un->un_last_state;
2079 
2080 	TRACE_1(TR_FAC_DADA, TR_DCDSTART_START, "dcdstart_start: un 0x%p", un);
2081 
2082 retry:
2083 	ASSERT(mutex_owned(DCD_MUTEX));
2084 
2085 	dp = &un->un_utab;
2086 	if (((bp = dp->b_actf) == NULL) || (bp->av_back == ALLOCATING_PKT) ||
2087 	    (dp->b_forw != NULL)) {
2088 		TRACE_0(TR_FAC_DADA, TR_DCDSTART_NO_WORK_END,
2089 		    "dcdstart_end (no work)");
2090 		return;
2091 	}
2092 
2093 	/*
2094 	 * remove from active queue
2095 	 */
2096 	dp->b_actf = bp->b_actf;
2097 	bp->b_actf = 0;
2098 
2099 	/*
2100 	 * increment ncmds before calling dcd_transport because dcdintr
2101 	 * may be called before we return from dcd_transport!
2102 	 */
2103 	un->un_ncmds++;
2104 
2105 	/*
2106 	 * If measuring stats, mark exit from wait queue and
2107 	 * entrance into run 'queue' if and only if we are
2108 	 * going to actually start a command.
2109 	 * Normally the bp already has a packet at this point
2110 	 */
2111 	DCD_DO_KSTATS(un, kstat_waitq_to_runq, bp);
2112 
2113 	mutex_exit(DCD_MUTEX);
2114 
2115 	if (BP_HAS_NO_PKT(bp)) {
2116 		make_dcd_cmd(un, bp, dcdrunout);
2117 		if (BP_HAS_NO_PKT(bp) && !(bp->b_flags & B_ERROR)) {
2118 			mutex_enter(DCD_MUTEX);
2119 			DCD_DO_KSTATS(un, kstat_runq_back_to_waitq, bp);
2120 
2121 			bp->b_actf = dp->b_actf;
2122 			dp->b_actf = bp;
2123 			New_state(un, DCD_STATE_RWAIT);
2124 			un->un_ncmds--;
2125 			TRACE_0(TR_FAC_DADA, TR_DCDSTART_NO_RESOURCES_END,
2126 			    "dcdstart_end (No Resources)");
2127 			goto done;
2128 
2129 		} else if (bp->b_flags & B_ERROR) {
2130 			mutex_enter(DCD_MUTEX);
2131 			DCD_DO_KSTATS(un, kstat_runq_exit, bp);
2132 
2133 			un->un_ncmds--;
2134 			bp->b_resid = bp->b_bcount;
2135 			if (bp->b_error == 0) {
2136 				SET_BP_ERROR(bp, EIO);
2137 			}
2138 
2139 			/*
2140 			 * restore old state
2141 			 */
2142 			un->un_state = un->un_last_state;
2143 			un->un_last_state = state;
2144 
2145 			mutex_exit(DCD_MUTEX);
2146 
2147 			biodone(bp);
2148 			mutex_enter(DCD_MUTEX);
2149 			if (un->un_state == DCD_STATE_SUSPENDED) {
2150 				cv_broadcast(&un->un_disk_busy_cv);
2151 			}
2152 
2153 			if ((un->un_ncmds < un->un_throttle) &&
2154 			    (dp->b_forw == NULL)) {
2155 				goto retry;
2156 			} else {
2157 				goto done;
2158 			}
2159 		}
2160 	}
2161 
2162 	/*
2163 	 * Restore resid from the packet, b_resid had been the
2164 	 * disksort key.
2165 	 */
2166 	sort_key = bp->b_resid;
2167 	bp->b_resid = BP_PKT(bp)->pkt_resid;
2168 	BP_PKT(bp)->pkt_resid = 0;
2169 
2170 	DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG,
2171 	    "bp->b_resid %lx, pkt_resid %lx\n",
2172 	    bp->b_resid, BP_PKT(bp)->pkt_resid);
2173 
2174 	/*
2175 	 * We used to check whether or not to try and link commands here.
2176 	 * Since we have found that there is no performance improvement
2177 	 * for linked commands, this has not made much sense.
2178 	 */
2179 	if ((status = dcd_transport((struct dcd_pkt *)BP_PKT(bp)))
2180 	    != TRAN_ACCEPT) {
2181 		mutex_enter(DCD_MUTEX);
2182 		un->un_ncmds--;
2183 		DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG,
2184 		    "transport returned %x\n", status);
2185 		if (status == TRAN_BUSY) {
2186 			DCD_DO_ERRSTATS(un, dcd_transerrs);
2187 			DCD_DO_KSTATS(un, kstat_runq_back_to_waitq, bp);
2188 			dcd_handle_tran_busy(bp, dp, un);
2189 			if (un->un_ncmds > 0) {
2190 				bp->b_resid = sort_key;
2191 			}
2192 		} else {
2193 			DCD_DO_KSTATS(un, kstat_runq_exit, bp);
2194 			mutex_exit(DCD_MUTEX);
2195 
2196 			dcd_log(DCD_DEVINFO, dcd_label, CE_WARN,
2197 			    "transport rejected (%d)\n",
2198 			    status);
2199 			SET_BP_ERROR(bp, EIO);
2200 			bp->b_resid = bp->b_bcount;
2201 			if (bp != un->un_sbufp) {
2202 				dcd_destroy_pkt(BP_PKT(bp));
2203 			}
2204 			biodone(bp);
2205 
2206 			mutex_enter(DCD_MUTEX);
2207 			if (un->un_state == DCD_STATE_SUSPENDED) {
2208 				cv_broadcast(&un->un_disk_busy_cv);
2209 			}
2210 			if ((un->un_ncmds < un->un_throttle) &&
2211 			    (dp->b_forw == NULL)) {
2212 					goto retry;
2213 			}
2214 		}
2215 	} else {
2216 		mutex_enter(DCD_MUTEX);
2217 
2218 		if (dp->b_actf && BP_HAS_NO_PKT(dp->b_actf)) {
2219 			struct buf *cmd_bp;
2220 
2221 			cmd_bp = dp->b_actf;
2222 			cmd_bp->av_back = ALLOCATING_PKT;
2223 			mutex_exit(DCD_MUTEX);
2224 			/*
2225 			 * try and map this one
2226 			 */
2227 			TRACE_0(TR_FAC_DADA, TR_DCASTART_SMALL_WINDOW_START,
2228 			    "dcdstart_small_window_start");
2229 
2230 			make_dcd_cmd(un, cmd_bp, NULL_FUNC);
2231 
2232 			TRACE_0(TR_FAC_DADA, TR_DCDSTART_SMALL_WINDOW_END,
2233 			    "dcdstart_small_window_end");
2234 			/*
2235 			 * there is a small window where the active cmd
2236 			 * completes before make_dcd_cmd returns.
2237 			 * consequently, this cmd never gets started so
2238 			 * we start it from here
2239 			 */
2240 			mutex_enter(DCD_MUTEX);
2241 			if ((un->un_ncmds < un->un_throttle) &&
2242 			    (dp->b_forw == NULL)) {
2243 				goto retry;
2244 			}
2245 		}
2246 	}
2247 
2248 done:
2249 	ASSERT(mutex_owned(DCD_MUTEX));
2250 	TRACE_0(TR_FAC_DADA, TR_DCDSTART_END, "dcdstart_end");
2251 }
2252 
2253 /*
2254  * make_dcd_cmd: create a pkt
2255  */
2256 static void
2257 make_dcd_cmd(struct dcd_disk *un, struct buf *bp, int (*func)())
2258 {
2259 	auto int count, com, direction;
2260 	struct dcd_pkt *pkt;
2261 	int flags, tval;
2262 
2263 	_NOTE(DATA_READABLE_WITHOUT_LOCK(dcd_disk::un_dp))
2264 	TRACE_3(TR_FAC_DADA, TR_MAKE_DCD_CMD_START,
2265 	    "make_dcd_cmd_start: un 0x%p bp 0x%p un 0x%p", un, bp, un);
2266 
2267 
2268 	flags = un->un_cmd_flags;
2269 
2270 	if (bp != un->un_sbufp) {
2271 		int partition = DCDPART(bp->b_edev);
2272 		diskaddr_t p_lblksrt;
2273 		diskaddr_t lblocks;
2274 		long secnt;
2275 		uint32_t blkno;
2276 		int dkl_nblk, delta;
2277 		long resid;
2278 
2279 		if (cmlb_partinfo(un->un_dklbhandle,
2280 		    partition,
2281 		    &lblocks,
2282 		    &p_lblksrt,
2283 		    NULL,
2284 		    NULL,
2285 		    0) != NULL) {
2286 			lblocks = 0;
2287 			p_lblksrt = 0;
2288 		}
2289 
2290 		dkl_nblk = (int)lblocks;
2291 
2292 		/*
2293 		 * Make sure we don't run off the end of a partition.
2294 		 *
2295 		 * Put this test here so that we can adjust b_count
2296 		 * to accurately reflect the actual amount we are
2297 		 * goint to transfer.
2298 		 */
2299 
2300 		/*
2301 		 * First, compute partition-relative block number
2302 		 */
2303 		blkno = dkblock(bp);
2304 		secnt = (bp->b_bcount + (un->un_secsize - 1)) >> un->un_secdiv;
2305 		count = MIN(secnt, dkl_nblk - blkno);
2306 		if (count != secnt) {
2307 			/*
2308 			 * We have an overrun
2309 			 */
2310 			resid = (secnt - count) << un->un_secdiv;
2311 			DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG,
2312 			    "overrun by %ld sectors\n",
2313 			    secnt - count);
2314 			bp->b_bcount -= resid;
2315 		} else {
2316 			resid = 0;
2317 		}
2318 
2319 		/*
2320 		 * Adjust block number to absolute
2321 		 */
2322 		delta = (int)p_lblksrt;
2323 		blkno += delta;
2324 
2325 		mutex_enter(DCD_MUTEX);
2326 		/*
2327 		 * This is for devices having block size different from
2328 		 * from DEV_BSIZE (e.g. 2K CDROMs).
2329 		 */
2330 		if (un->un_lbasize != un->un_secsize) {
2331 			blkno >>= un->un_blknoshift;
2332 			count >>= un->un_blknoshift;
2333 		}
2334 		mutex_exit(DCD_MUTEX);
2335 
2336 		TRACE_0(TR_FAC_DADA, TR_MAKE_DCD_CMD_INIT_PKT_START,
2337 		    "make_dcd_cmd_init_pkt_call (begin)");
2338 		pkt = dcd_init_pkt(ROUTE, NULL, bp,
2339 		    (uint32_t)sizeof (struct dcd_cmd),
2340 		    un->un_cmd_stat_size, PP_LEN, PKT_CONSISTENT,
2341 		    func, (caddr_t)un);
2342 		TRACE_1(TR_FAC_DADA, TR_MAKE_DCD_CMD_INIT_PKT_END,
2343 		    "make_dcd_cmd_init_pkt_call (end): pkt 0x%p", pkt);
2344 		if (!pkt) {
2345 			bp->b_bcount += resid;
2346 			bp->av_back = NO_PKT_ALLOCATED;
2347 			TRACE_0(TR_FAC_DADA,
2348 			    TR_MAKE_DCD_CMD_NO_PKT_ALLOCATED1_END,
2349 			    "make_dcd_cmd_end (NO_PKT_ALLOCATED1)");
2350 			return;
2351 		}
2352 		if (bp->b_flags & B_READ) {
2353 			if ((un->un_dp->options & DMA_SUPPORTTED) ==
2354 			    DMA_SUPPORTTED) {
2355 				com = ATA_READ_DMA;
2356 			} else {
2357 				if (un->un_dp->options & BLOCK_MODE)
2358 					com = ATA_READ_MULTIPLE;
2359 				else
2360 					com = ATA_READ;
2361 			}
2362 			direction = DATA_READ;
2363 		} else {
2364 			if ((un->un_dp->options & DMA_SUPPORTTED) ==
2365 			    DMA_SUPPORTTED) {
2366 				com = ATA_WRITE_DMA;
2367 			} else {
2368 				if (un->un_dp->options & BLOCK_MODE)
2369 					com = ATA_WRITE_MULTIPLE;
2370 				else
2371 					com = ATA_WRITE;
2372 			}
2373 			direction = DATA_WRITE;
2374 		}
2375 
2376 		/*
2377 		 * Save the resid in the packet, temporarily until
2378 		 * we transport the command.
2379 		 */
2380 		pkt->pkt_resid = resid;
2381 
2382 		makecommand(pkt, flags, com, blkno, ADD_LBA_MODE,
2383 		    bp->b_bcount, direction, 0);
2384 		tval = dcd_io_time;
2385 	} else {
2386 
2387 		struct udcd_cmd *scmd = (struct udcd_cmd *)bp->b_forw;
2388 
2389 		/*
2390 		 * set options
2391 		 */
2392 		if ((scmd->udcd_flags & UDCD_SILENT) && !(DEBUGGING)) {
2393 			flags |= FLAG_SILENT;
2394 		}
2395 		if (scmd->udcd_flags &  UDCD_DIAGNOSE)
2396 			flags |= FLAG_DIAGNOSE;
2397 
2398 		if (scmd->udcd_flags & UDCD_NOINTR)
2399 			flags |= FLAG_NOINTR;
2400 
2401 		pkt = dcd_init_pkt(ROUTE, (struct dcd_pkt *)NULL,
2402 		    (bp->b_bcount)? bp: NULL,
2403 		    (uint32_t)sizeof (struct dcd_cmd),
2404 		    2, PP_LEN, PKT_CONSISTENT, func, (caddr_t)un);
2405 
2406 		if (!pkt) {
2407 			bp->av_back = NO_PKT_ALLOCATED;
2408 			return;
2409 		}
2410 
2411 		makecommand(pkt, 0, scmd->udcd_cmd->cmd,
2412 		    scmd->udcd_cmd->sector_num.lba_num,
2413 		    scmd->udcd_cmd->address_mode,
2414 		    scmd->udcd_cmd->size,
2415 		    scmd->udcd_cmd->direction, scmd->udcd_cmd->features);
2416 
2417 		pkt->pkt_flags = flags;
2418 		if (scmd->udcd_timeout == 0)
2419 			tval = dcd_io_time;
2420 		else
2421 			tval = scmd->udcd_timeout;
2422 		/* UDAD interface should be decided. */
2423 		DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG,
2424 		    "udcd interface\n");
2425 	}
2426 
2427 	pkt->pkt_comp = dcdintr;
2428 	pkt->pkt_time = tval;
2429 	PKT_SET_BP(pkt, bp);
2430 	bp->av_back = (struct buf *)pkt;
2431 
2432 	TRACE_0(TR_FAC_DADA, TR_MAKE_DCD_CMD_END, "make_dcd_cmd_end");
2433 }
2434 
2435 /*
2436  * Command completion processing
2437  */
2438 static void
2439 dcdintr(struct dcd_pkt *pkt)
2440 {
2441 	struct dcd_disk *un;
2442 	struct buf *bp;
2443 	int action;
2444 	int status;
2445 
2446 	bp = PKT_GET_BP(pkt);
2447 	un = ddi_get_soft_state(dcd_state, DCDUNIT(bp->b_edev));
2448 
2449 	TRACE_1(TR_FAC_DADA, TR_DCDINTR_START, "dcdintr_start: un 0x%p", un);
2450 	DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG, "dcdintr\n");
2451 
2452 	mutex_enter(DCD_MUTEX);
2453 	un->un_ncmds--;
2454 	DCD_DO_KSTATS(un, kstat_runq_exit, bp);
2455 	ASSERT(un->un_ncmds >= 0);
2456 
2457 	DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG,
2458 	    "reason %x and Status %x\n", pkt->pkt_reason, SCBP_C(pkt));
2459 
2460 	/*
2461 	 * do most common case first
2462 	 */
2463 	if ((pkt->pkt_reason == CMD_CMPLT) && (SCBP_C(pkt) == 0)) {
2464 		int com = GETATACMD((struct dcd_cmd *)pkt->pkt_cdbp);
2465 
2466 		if (un->un_state == DCD_STATE_OFFLINE) {
2467 			un->un_state = un->un_last_state;
2468 			dcd_log(DCD_DEVINFO, dcd_label, CE_NOTE,
2469 			    (const char *) diskokay);
2470 		}
2471 		/*
2472 		 * If the command is a read or a write, and we have
2473 		 * a non-zero pkt_resid, that is an error. We should
2474 		 * attempt to retry the operation if possible.
2475 		 */
2476 		action = COMMAND_DONE;
2477 		if (pkt->pkt_resid && (com == ATA_READ || com == ATA_WRITE)) {
2478 			DCD_DO_ERRSTATS(un, dcd_harderrs);
2479 			if ((int)PKT_GET_RETRY_CNT(pkt) < dcd_retry_count) {
2480 				PKT_INCR_RETRY_CNT(pkt, 1);
2481 				action = QUE_COMMAND;
2482 			} else {
2483 				/*
2484 				 * if we have exhausted retries
2485 				 * a command with a residual is in error in
2486 				 * this case.
2487 				 */
2488 				action = COMMAND_DONE_ERROR;
2489 			}
2490 			dcd_log(DCD_DEVINFO, dcd_label,
2491 			    CE_WARN, "incomplete %s- %s\n",
2492 			    (bp->b_flags & B_READ)? "read" : "write",
2493 			    (action == QUE_COMMAND)? "retrying" :
2494 			    "giving up");
2495 		}
2496 
2497 		/*
2498 		 * pkt_resid will reflect, at this point, a residual
2499 		 * of how many bytes left to be transferred there were
2500 		 * from the actual scsi command. Add this to b_resid i.e
2501 		 * the amount this driver could not see to transfer,
2502 		 * to get the total number of bytes not transfered.
2503 		 */
2504 		if (action != QUE_COMMAND) {
2505 			bp->b_resid += pkt->pkt_resid;
2506 		}
2507 
2508 	} else if (pkt->pkt_reason != CMD_CMPLT) {
2509 		action = dcd_handle_incomplete(un, bp);
2510 	}
2511 
2512 	/*
2513 	 * If we are in the middle of syncing or dumping, we have got
2514 	 * here because dcd_transport has called us explictly after
2515 	 * completing the command in a polled mode. We don't want to
2516 	 * have a recursive call into dcd_transport again.
2517 	 */
2518 	if (ddi_in_panic() && (action == QUE_COMMAND)) {
2519 		action = COMMAND_DONE_ERROR;
2520 	}
2521 
2522 	/*
2523 	 * save pkt reason; consecutive failures are not reported unless
2524 	 * fatal
2525 	 * do not reset last_pkt_reason when the cmd was retried and
2526 	 * succeeded because
2527 	 * there maybe more commands comming back with last_pkt_reason
2528 	 */
2529 	if ((un->un_last_pkt_reason != pkt->pkt_reason) &&
2530 	    ((pkt->pkt_reason != CMD_CMPLT) ||
2531 	    (PKT_GET_RETRY_CNT(pkt) == 0))) {
2532 		un->un_last_pkt_reason = pkt->pkt_reason;
2533 	}
2534 
2535 	switch (action) {
2536 	case COMMAND_DONE_ERROR:
2537 error:
2538 		if (bp->b_resid == 0) {
2539 			bp->b_resid = bp->b_bcount;
2540 		}
2541 		if (bp->b_error == 0) {
2542 			struct	dcd_cmd *cdbp = (struct dcd_cmd *)pkt->pkt_cdbp;
2543 			if (cdbp->cmd == ATA_FLUSH_CACHE &&
2544 			    (pkt->pkt_scbp[0] & STATUS_ATA_ERR) &&
2545 			    (pkt->pkt_scbp[1] & ERR_ABORT)) {
2546 				SET_BP_ERROR(bp, ENOTSUP);
2547 				un->un_flush_not_supported = 1;
2548 			} else {
2549 				SET_BP_ERROR(bp, EIO);
2550 			}
2551 		}
2552 		bp->b_flags |= B_ERROR;
2553 		/*FALLTHROUGH*/
2554 	case COMMAND_DONE:
2555 		dcddone_and_mutex_exit(un, bp);
2556 
2557 		TRACE_0(TR_FAC_DADA, TR_DCDINTR_COMMAND_DONE_END,
2558 		    "dcdintr_end (COMMAND_DONE)");
2559 		return;
2560 
2561 	case QUE_COMMAND:
2562 		if (un->un_ncmds >= un->un_throttle) {
2563 			struct diskhd *dp = &un->un_utab;
2564 
2565 			bp->b_actf = dp->b_actf;
2566 			dp->b_actf = bp;
2567 
2568 			DCD_DO_KSTATS(un, kstat_waitq_enter, bp);
2569 
2570 			mutex_exit(DCD_MUTEX);
2571 			goto exit;
2572 		}
2573 
2574 		un->un_ncmds++;
2575 		/* reset the pkt reason again */
2576 		pkt->pkt_reason = 0;
2577 		DCD_DO_KSTATS(un, kstat_runq_enter, bp);
2578 		mutex_exit(DCD_MUTEX);
2579 		if ((status = dcd_transport(BP_PKT(bp))) != TRAN_ACCEPT) {
2580 			struct diskhd *dp = &un->un_utab;
2581 
2582 			mutex_enter(DCD_MUTEX);
2583 			un->un_ncmds--;
2584 			if (status == TRAN_BUSY) {
2585 				DCD_DO_KSTATS(un, kstat_runq_back_to_waitq, bp);
2586 				dcd_handle_tran_busy(bp, dp, un);
2587 				mutex_exit(DCD_MUTEX);
2588 				goto exit;
2589 			}
2590 			DCD_DO_ERRSTATS(un, dcd_transerrs);
2591 			DCD_DO_KSTATS(un, kstat_runq_exit, bp);
2592 
2593 			dcd_log(DCD_DEVINFO, dcd_label, CE_WARN,
2594 			    "requeue of command fails (%x)\n", status);
2595 			SET_BP_ERROR(bp, EIO);
2596 			bp->b_resid = bp->b_bcount;
2597 
2598 			dcddone_and_mutex_exit(un, bp);
2599 			goto exit;
2600 		}
2601 		break;
2602 
2603 	case JUST_RETURN:
2604 	default:
2605 		DCD_DO_KSTATS(un, kstat_waitq_enter, bp);
2606 		mutex_exit(DCD_MUTEX);
2607 		break;
2608 	}
2609 
2610 exit:
2611 	TRACE_0(TR_FAC_DADA, TR_DCDINTR_END, "dcdintr_end");
2612 }
2613 
2614 
2615 /*
2616  * Done with a command.
2617  */
2618 static void
2619 dcddone_and_mutex_exit(struct dcd_disk *un, register struct buf *bp)
2620 {
2621 	struct diskhd *dp;
2622 
2623 	TRACE_1(TR_FAC_DADA, TR_DCDONE_START, "dcddone_start: un 0x%p", un);
2624 
2625 	_NOTE(LOCK_RELEASED_AS_SIDE_EFFECT(&un->un_dcd->dcd_mutex));
2626 
2627 	dp = &un->un_utab;
2628 	if (bp == dp->b_forw) {
2629 		dp->b_forw = NULL;
2630 	}
2631 
2632 	if (un->un_stats) {
2633 		ulong_t n_done = bp->b_bcount - bp->b_resid;
2634 		if (bp->b_flags & B_READ) {
2635 			IOSP->reads++;
2636 			IOSP->nread += n_done;
2637 		} else {
2638 			IOSP->writes++;
2639 			IOSP->nwritten += n_done;
2640 		}
2641 	}
2642 	if (IO_PARTITION_STATS) {
2643 		ulong_t n_done = bp->b_bcount - bp->b_resid;
2644 		if (bp->b_flags & B_READ) {
2645 			IOSP_PARTITION->reads++;
2646 			IOSP_PARTITION->nread += n_done;
2647 		} else {
2648 			IOSP_PARTITION->writes++;
2649 			IOSP_PARTITION->nwritten += n_done;
2650 		}
2651 	}
2652 
2653 	/*
2654 	 * Start the next one before releasing resources on this one
2655 	 */
2656 	if (un->un_state == DCD_STATE_SUSPENDED) {
2657 		cv_broadcast(&un->un_disk_busy_cv);
2658 	} else if (dp->b_actf && (un->un_ncmds < un->un_throttle) &&
2659 	    (dp->b_forw == NULL && un->un_state != DCD_STATE_SUSPENDED)) {
2660 		dcdstart(un);
2661 	}
2662 
2663 	mutex_exit(DCD_MUTEX);
2664 
2665 	if (bp != un->un_sbufp) {
2666 		dcd_destroy_pkt(BP_PKT(bp));
2667 		DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG,
2668 		    "regular done: resid %ld\n", bp->b_resid);
2669 	} else {
2670 		ASSERT(un->un_sbuf_busy);
2671 	}
2672 	TRACE_0(TR_FAC_DADA, TR_DCDDONE_BIODONE_CALL, "dcddone_biodone_call");
2673 
2674 	biodone(bp);
2675 
2676 	(void) pm_idle_component(DCD_DEVINFO, 0);
2677 
2678 	TRACE_0(TR_FAC_DADA, TR_DCDDONE_END, "dcddone end");
2679 }
2680 
2681 
2682 /*
2683  * reset the disk unless the transport layer has already
2684  * cleared the problem
2685  */
2686 #define	C1	(STAT_ATA_BUS_RESET|STAT_ATA_DEV_RESET|STAT_ATA_ABORTED)
2687 static void
2688 dcd_reset_disk(struct dcd_disk *un, struct dcd_pkt *pkt)
2689 {
2690 
2691 	if ((pkt->pkt_statistics & C1) == 0) {
2692 		mutex_exit(DCD_MUTEX);
2693 		if (!dcd_reset(ROUTE, RESET_ALL)) {
2694 			DAD_DEBUG(DCD_DEVINFO, dcd_label, DCD_DEBUG,
2695 			    "Reset failed");
2696 		}
2697 		mutex_enter(DCD_MUTEX);
2698 	}
2699 }
2700 
2701 static int
2702 dcd_handle_incomplete(struct dcd_disk *un, struct buf *bp)
2703 {
2704 	static char *fail = "ATA transport failed: reason '%s': %s\n";
2705 	static char *notresp = "disk not responding to selection\n";
2706 	int rval = COMMAND_DONE_ERROR;
2707 	int action = COMMAND_SOFT_ERROR;
2708 	struct dcd_pkt *pkt = BP_PKT(bp);
2709 	int be_chatty = (un->un_state != DCD_STATE_SUSPENDED) &&
2710 	    (bp != un->un_sbufp || !(pkt->pkt_flags & FLAG_SILENT));
2711 
2712 	ASSERT(mutex_owned(DCD_MUTEX));
2713 
2714 	switch (pkt->pkt_reason) {
2715 
2716 	case CMD_TIMEOUT:
2717 		/*
2718 		 * This Indicates the already the HBA would  have reset
2719 		 * so Just indicate to retry the command
2720 		 */
2721 		break;
2722 
2723 	case CMD_INCOMPLETE:
2724 		action = dcd_check_error(un, bp);
2725 		DCD_DO_ERRSTATS(un, dcd_transerrs);
2726 		if (action == COMMAND_HARD_ERROR) {
2727 			(void) dcd_reset_disk(un, pkt);
2728 		}
2729 		break;
2730 
2731 	case CMD_FATAL:
2732 		/*
2733 		 * Something drastic has gone wrong
2734 		 */
2735 		break;
2736 	case CMD_DMA_DERR:
2737 	case CMD_DATA_OVR:
2738 		/* FALLTHROUGH */
2739 
2740 	default:
2741 		/*
2742 		 * the target may still be running the	command,
2743 		 * so we should try and reset that target.
2744 		 */
2745 		DCD_DO_ERRSTATS(un, dcd_transerrs);
2746 		if ((pkt->pkt_reason != CMD_RESET) &&
2747 			(pkt->pkt_reason != CMD_ABORTED)) {
2748 			(void) dcd_reset_disk(un, pkt);
2749 		}
2750 		break;
2751 	}
2752 
2753 	/*
2754 	 * If pkt_reason is CMD_RESET/ABORTED, chances are that this pkt got
2755 	 * reset/aborted because another disk on this bus caused it.
2756 	 * The disk that caused it, should get CMD_TIMEOUT with pkt_statistics
2757 	 * of STAT_TIMEOUT/STAT_DEV_RESET
2758 	 */
2759 	if ((pkt->pkt_reason == CMD_RESET) ||(pkt->pkt_reason == CMD_ABORTED)) {
2760 		/* To be written : XXX */
2761 		DAD_DEBUG(DCD_DEVINFO, dcd_label, DCD_DEBUG,
2762 		    "Command aborted\n");
2763 	}
2764 
2765 	if (bp == un->un_sbufp && (pkt->pkt_flags & FLAG_DIAGNOSE)) {
2766 		rval = COMMAND_DONE_ERROR;
2767 	} else {
2768 		if ((rval == COMMAND_DONE_ERROR) &&
2769 		    (action == COMMAND_SOFT_ERROR) &&
2770 		    ((int)PKT_GET_RETRY_CNT(pkt) < dcd_retry_count)) {
2771 			PKT_INCR_RETRY_CNT(pkt, 1);
2772 			rval = QUE_COMMAND;
2773 		}
2774 	}
2775 
2776 	if (pkt->pkt_reason == CMD_INCOMPLETE && rval == COMMAND_DONE_ERROR) {
2777 		/*
2778 		 * Looks like someone turned off this shoebox.
2779 		 */
2780 		if (un->un_state != DCD_STATE_OFFLINE) {
2781 			dcd_log(DCD_DEVINFO, dcd_label, CE_WARN,
2782 			(const char *) notresp);
2783 			New_state(un, DCD_STATE_OFFLINE);
2784 		}
2785 	} else if (pkt->pkt_reason == CMD_FATAL) {
2786 		/*
2787 		 * Suppressing the following message for the time being
2788 		 * dcd_log(DCD_DEVINFO, dcd_label, CE_WARN,
2789 		 * (const char *) notresp);
2790 		 */
2791 		PKT_INCR_RETRY_CNT(pkt, 6);
2792 		rval = COMMAND_DONE_ERROR;
2793 		New_state(un, DCD_STATE_FATAL);
2794 	} else if (be_chatty) {
2795 		int in_panic = ddi_in_panic();
2796 		if (!in_panic || (rval == COMMAND_DONE_ERROR)) {
2797 			if (((pkt->pkt_reason != un->un_last_pkt_reason) &&
2798 			    (pkt->pkt_reason != CMD_RESET)) ||
2799 			    (rval == COMMAND_DONE_ERROR) ||
2800 			    (dcd_error_level == DCD_ERR_ALL)) {
2801 				dcd_log(DCD_DEVINFO, dcd_label, CE_WARN,
2802 				    fail, dcd_rname(pkt->pkt_reason),
2803 				    (rval == COMMAND_DONE_ERROR) ?
2804 				    "giving up": "retrying command");
2805 				DAD_DEBUG(DCD_DEVINFO, dcd_label, DCD_DEBUG,
2806 				    "retrycount=%x\n",
2807 				    PKT_GET_RETRY_CNT(pkt));
2808 			}
2809 		}
2810 	}
2811 error:
2812 	return (rval);
2813 }
2814 
2815 static int
2816 dcd_check_error(struct dcd_disk *un, struct buf *bp)
2817 {
2818 	struct diskhd *dp = &un->un_utab;
2819 	struct dcd_pkt *pkt = BP_PKT(bp);
2820 	int rval = 0;
2821 	unsigned char status;
2822 	unsigned char error;
2823 
2824 	TRACE_0(TR_FAC_DADA, TR_DCD_CHECK_ERROR_START, "dcd_check_error_start");
2825 	ASSERT(mutex_owned(DCD_MUTEX));
2826 
2827 	DAD_DEBUG(DCD_DEVINFO, dcd_label, DCD_DEBUG,
2828 	    "Pkt: 0x%p dp: 0x%p\n", (void *)pkt, (void *)dp);
2829 
2830 	/*
2831 	 * Here we need to check status first and then if error is indicated
2832 	 * Then the error register.
2833 	 */
2834 
2835 	status = (pkt->pkt_scbp)[0];
2836 	if ((status & STATUS_ATA_DWF) == STATUS_ATA_DWF) {
2837 		/*
2838 		 * There has been a Device Fault  - reason for such error
2839 		 * is vendor specific
2840 		 * Action to be taken is - Indicate error and reset device.
2841 		 */
2842 
2843 		dcd_log(DCD_DEVINFO, dcd_label, CE_WARN, "Device Fault\n");
2844 		rval = COMMAND_HARD_ERROR;
2845 	} else if ((status & STATUS_ATA_CORR) == STATUS_ATA_CORR) {
2846 
2847 		/*
2848 		 * The sector read or written is marginal and hence ECC
2849 		 * Correction has been applied. Indicate to repair
2850 		 * Here we need to probably re-assign based on the badblock
2851 		 * mapping.
2852 		 */
2853 
2854 		dcd_log(DCD_DEVINFO, dcd_label, CE_WARN,
2855 		    "Soft Error on block %x\n",
2856 		    ((struct dcd_cmd *)pkt->pkt_cdbp)->sector_num.lba_num);
2857 		rval = COMMAND_SOFT_ERROR;
2858 	} else if ((status & STATUS_ATA_ERR) == STATUS_ATA_ERR) {
2859 		error = pkt->pkt_scbp[1];
2860 
2861 		dcd_log(DCD_DEVINFO, dcd_label, CE_WARN,
2862 		    "Command:0x%x,Error:0x%x,Status:0x%x\n",
2863 		    GETATACMD((struct dcd_cmd *)pkt->pkt_cdbp),
2864 		    error, status);
2865 		if ((error &  ERR_AMNF) == ERR_AMNF) {
2866 			/* Address make not found */
2867 			dcd_log(DCD_DEVINFO, dcd_label, CE_WARN,
2868 			    "Address Mark Not Found");
2869 		} else if ((error & ERR_TKONF) == ERR_TKONF) {
2870 			/* Track 0 Not found */
2871 			dcd_log(DCD_DEVINFO, dcd_label, CE_WARN,
2872 			    "Track 0 Not found \n");
2873 		} else if ((error & ERR_IDNF) == ERR_IDNF) {
2874 			dcd_log(DCD_DEVINFO, dcd_label, CE_WARN,
2875 			    " ID not found \n");
2876 		} else if ((error &  ERR_UNC) == ERR_UNC) {
2877 			dcd_log(DCD_DEVINFO, dcd_label, CE_WARN,
2878 			    "Uncorrectable data Error: Block %x\n",
2879 		((struct dcd_cmd *)pkt->pkt_cdbp)->sector_num.lba_num);
2880 		} else if ((error & ERR_BBK) == ERR_BBK) {
2881 			dcd_log(DCD_DEVINFO, dcd_label, CE_WARN,
2882 			    "Bad block detected: Block %x\n",
2883 			((struct dcd_cmd *)pkt->pkt_cdbp)->sector_num.lba_num);
2884 		} else if ((error & ERR_ABORT) == ERR_ABORT) {
2885 			/* Aborted Command */
2886 			dcd_log(DCD_DEVINFO, dcd_label, CE_WARN,
2887 			    " Aborted Command \n");
2888 		}
2889 		/*
2890 		 * Return the soft error so that the command
2891 		 * will be retried.
2892 		 */
2893 		rval = COMMAND_SOFT_ERROR;
2894 	}
2895 
2896 	TRACE_0(TR_FAC_DADA, TR_DCD_CHECK_ERROR_END, "dcd_check_error_end");
2897 	return (rval);
2898 }
2899 
2900 
2901 /*
2902  *	System Crash Dump routine
2903  */
2904 
2905 #define	NDUMP_RETRIES	5
2906 
2907 static int
2908 dcddump(dev_t dev, caddr_t addr, daddr_t blkno, int nblk)
2909 {
2910 	struct dcd_pkt *pkt;
2911 	int i;
2912 	struct buf local, *bp;
2913 	int err;
2914 	unsigned char com;
2915 	diskaddr_t p_lblksrt;
2916 	diskaddr_t lblocks;
2917 
2918 	GET_SOFT_STATE(dev);
2919 #ifdef lint
2920 	part = part;
2921 #endif /* lint */
2922 
2923 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*un))
2924 
2925 	if ((un->un_state & DCD_STATE_FATAL) == DCD_STATE_FATAL)
2926 		return (ENXIO);
2927 
2928 	if (cmlb_partinfo(un->un_dklbhandle, DCDPART(dev),
2929 	    &lblocks, &p_lblksrt, NULL, NULL, 0))
2930 		return (ENXIO);
2931 
2932 	if (blkno+nblk > lblocks) {
2933 		return (EINVAL);
2934 	}
2935 
2936 
2937 	if ((un->un_state == DCD_STATE_SUSPENDED) ||
2938 	    (un->un_state == DCD_STATE_PM_SUSPENDED)) {
2939 		if (pm_raise_power(DCD_DEVINFO, 0,
2940 		    DCD_DEVICE_ACTIVE) != DDI_SUCCESS) {
2941 			return (EIO);
2942 		}
2943 	}
2944 
2945 	/*
2946 	 * When cpr calls dcddump, we know that dad is in a
2947 	 * a good state, so no bus reset is required
2948 	 */
2949 	un->un_throttle = 0;
2950 
2951 	if ((un->un_state != DCD_STATE_SUSPENDED) &&
2952 	    (un->un_state != DCD_STATE_DUMPING)) {
2953 
2954 		New_state(un, DCD_STATE_DUMPING);
2955 
2956 		/*
2957 		 * Reset the bus. I'd like to not have to do this,
2958 		 * but this is the safest thing to do...
2959 		 */
2960 
2961 		if (dcd_reset(ROUTE, RESET_ALL) == 0) {
2962 			return (EIO);
2963 		}
2964 
2965 	}
2966 
2967 	blkno += p_lblksrt;
2968 
2969 	/*
2970 	 * It should be safe to call the allocator here without
2971 	 * worrying about being locked for DVMA mapping because
2972 	 * the address we're passed is already a DVMA mapping
2973 	 *
2974 	 * We are also not going to worry about semaphore ownership
2975 	 * in the dump buffer. Dumping is single threaded at present.
2976 	 */
2977 
2978 	bp = &local;
2979 	bzero((caddr_t)bp, sizeof (*bp));
2980 	bp->b_flags = B_BUSY;
2981 	bp->b_un.b_addr = addr;
2982 	bp->b_bcount = nblk << DEV_BSHIFT;
2983 	bp->b_resid = 0;
2984 
2985 	for (i = 0; i < NDUMP_RETRIES; i++) {
2986 		bp->b_flags &= ~B_ERROR;
2987 		if ((pkt = dcd_init_pkt(ROUTE, NULL, bp,
2988 		    (uint32_t)sizeof (struct dcd_cmd), 2, PP_LEN,
2989 		    PKT_CONSISTENT, NULL_FUNC, NULL)) != NULL) {
2990 			break;
2991 		}
2992 		if (i == 0) {
2993 			if (bp->b_flags & B_ERROR) {
2994 				dcd_log(DCD_DEVINFO, dcd_label, CE_WARN,
2995 				    "no resources for dumping; "
2996 				    "error code: 0x%x, retrying",
2997 				    geterror(bp));
2998 			} else {
2999 				dcd_log(DCD_DEVINFO, dcd_label, CE_WARN,
3000 				    "no resources for dumping; retrying");
3001 			}
3002 		} else if (i != (NDUMP_RETRIES - 1)) {
3003 			if (bp->b_flags & B_ERROR) {
3004 				dcd_log(DCD_DEVINFO, dcd_label, CE_CONT, "no "
3005 				    "resources for dumping; error code: 0x%x, "
3006 				    "retrying\n", geterror(bp));
3007 			}
3008 		} else {
3009 			if (bp->b_flags & B_ERROR) {
3010 				dcd_log(DCD_DEVINFO, dcd_label, CE_CONT,
3011 				    "no resources for dumping; "
3012 				    "error code: 0x%x, retries failed, "
3013 				    "giving up.\n", geterror(bp));
3014 			} else {
3015 				dcd_log(DCD_DEVINFO, dcd_label, CE_CONT,
3016 				    "no resources for dumping; "
3017 				    "retries failed, giving up.\n");
3018 			}
3019 			return (EIO);
3020 		}
3021 		delay(10);
3022 	}
3023 	if ((un->un_dp->options & DMA_SUPPORTTED) == DMA_SUPPORTTED) {
3024 		com = ATA_WRITE_DMA;
3025 	} else {
3026 		if (un->un_dp->options & BLOCK_MODE)
3027 			com = ATA_WRITE_MULTIPLE;
3028 		else
3029 			com = ATA_WRITE;
3030 	}
3031 
3032 	makecommand(pkt, 0, com, blkno, ADD_LBA_MODE,
3033 	    (int)nblk*un->un_secsize, DATA_WRITE, 0);
3034 
3035 	for (err = EIO, i = 0; i < NDUMP_RETRIES && err == EIO; i++) {
3036 
3037 		if (dcd_poll(pkt) == 0) {
3038 			switch (SCBP_C(pkt)) {
3039 			case STATUS_GOOD:
3040 				if (pkt->pkt_resid == 0) {
3041 					err = 0;
3042 				}
3043 				break;
3044 			case STATUS_ATA_BUSY:
3045 				(void) dcd_reset(ROUTE, RESET_TARGET);
3046 				break;
3047 			default:
3048 				mutex_enter(DCD_MUTEX);
3049 				(void) dcd_reset_disk(un, pkt);
3050 				mutex_exit(DCD_MUTEX);
3051 				break;
3052 			}
3053 		} else if (i > NDUMP_RETRIES/2) {
3054 			(void) dcd_reset(ROUTE, RESET_ALL);
3055 		}
3056 
3057 	}
3058 	dcd_destroy_pkt(pkt);
3059 	return (err);
3060 }
3061 
3062 /*
3063  * This routine implements the ioctl calls.  It is called
3064  * from the device switch at normal priority.
3065  */
3066 /* ARGSUSED3 */
3067 static int
3068 dcdioctl(dev_t dev, int cmd, intptr_t arg, int flag,
3069 	cred_t *cred_p, int *rval_p)
3070 {
3071 	auto int32_t data[512 / (sizeof (int32_t))];
3072 	struct dk_cinfo *info;
3073 	struct dk_minfo media_info;
3074 	struct udcd_cmd *scmd;
3075 	int i, err;
3076 	enum uio_seg uioseg = 0;
3077 	enum dkio_state state = 0;
3078 #ifdef _MULTI_DATAMODEL
3079 	struct dadkio_rwcmd rwcmd;
3080 #endif
3081 	struct dadkio_rwcmd32 rwcmd32;
3082 	struct dcd_cmd dcdcmd;
3083 
3084 	GET_SOFT_STATE(dev);
3085 #ifdef lint
3086 	part = part;
3087 	state = state;
3088 	uioseg = uioseg;
3089 #endif  /* lint */
3090 
3091 	DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG,
3092 	    "dcd_ioctl : cmd %x, arg %lx\n", cmd, arg);
3093 
3094 	bzero((caddr_t)data, sizeof (data));
3095 
3096 	switch (cmd) {
3097 
3098 #ifdef DCDDEBUG
3099 /*
3100  * Following ioctl are for testing RESET/ABORTS
3101  */
3102 #define	DKIOCRESET	(DKIOC|14)
3103 #define	DKIOCABORT	(DKIOC|15)
3104 
3105 	case DKIOCRESET:
3106 		if (ddi_copyin((caddr_t)arg, (caddr_t)data, 4, flag))
3107 			return (EFAULT);
3108 		DAD_DEBUG(DCD_DEVINFO, dcd_label, DCD_DEBUG,
3109 		    "DKIOCRESET: data = 0x%x\n", data[0]);
3110 		if (dcd_reset(ROUTE, data[0])) {
3111 			return (0);
3112 		} else {
3113 			return (EIO);
3114 		}
3115 	case DKIOCABORT:
3116 		DAD_DEBUG(DCD_DEVINFO, dcd_label, DCD_DEBUG,
3117 		    "DKIOCABORT:\n");
3118 		if (dcd_abort(ROUTE, (struct dcd_pkt *)0)) {
3119 			return (0);
3120 		} else {
3121 			return (EIO);
3122 		}
3123 #endif
3124 
3125 	case DKIOCINFO:
3126 		/*
3127 		 * Controller Information
3128 		 */
3129 		info = (struct dk_cinfo *)data;
3130 
3131 		mutex_enter(DCD_MUTEX);
3132 		switch (un->un_dp->ctype) {
3133 		default:
3134 			info->dki_ctype = DKC_DIRECT;
3135 			break;
3136 		}
3137 		mutex_exit(DCD_MUTEX);
3138 		info->dki_cnum = ddi_get_instance(ddi_get_parent(DCD_DEVINFO));
3139 		(void) strcpy(info->dki_cname,
3140 		    ddi_get_name(ddi_get_parent(DCD_DEVINFO)));
3141 		/*
3142 		 * Unit Information
3143 		 */
3144 		info->dki_unit = ddi_get_instance(DCD_DEVINFO);
3145 		info->dki_slave = (Tgt(DCD_DCD_DEVP)<<3);
3146 		(void) strcpy(info->dki_dname, ddi_driver_name(DCD_DEVINFO));
3147 		info->dki_flags = DKI_FMTVOL;
3148 		info->dki_partition = DCDPART(dev);
3149 
3150 		/*
3151 		 * Max Transfer size of this device in blocks
3152 		 */
3153 		info->dki_maxtransfer = un->un_max_xfer_size / DEV_BSIZE;
3154 
3155 		/*
3156 		 * We can't get from here to there yet
3157 		 */
3158 		info->dki_addr = 0;
3159 		info->dki_space = 0;
3160 		info->dki_prio = 0;
3161 		info->dki_vec = 0;
3162 
3163 		i = sizeof (struct dk_cinfo);
3164 		if (ddi_copyout((caddr_t)data, (caddr_t)arg, i, flag))
3165 			return (EFAULT);
3166 		else
3167 			return (0);
3168 
3169 	case DKIOCGMEDIAINFO:
3170 		/*
3171 		 * As dad target driver is used for IDE disks only
3172 		 * Can keep the return value hardcoded to FIXED_DISK
3173 		 */
3174 		media_info.dki_media_type = DK_FIXED_DISK;
3175 
3176 		mutex_enter(DCD_MUTEX);
3177 		media_info.dki_lbsize = un->un_lbasize;
3178 		media_info.dki_capacity = un->un_diskcapacity;
3179 		mutex_exit(DCD_MUTEX);
3180 
3181 		if (ddi_copyout(&media_info, (caddr_t)arg,
3182 		    sizeof (struct dk_minfo), flag))
3183 			return (EFAULT);
3184 		else
3185 			return (0);
3186 
3187 	case DKIOCGGEOM:
3188 	case DKIOCGVTOC:
3189 	case DKIOCGETEFI:
3190 
3191 		mutex_enter(DCD_MUTEX);
3192 		if (un->un_ncmds == 0) {
3193 			if ((err = dcd_unit_ready(dev)) != 0) {
3194 				mutex_exit(DCD_MUTEX);
3195 				return (err);
3196 			}
3197 		}
3198 
3199 		mutex_exit(DCD_MUTEX);
3200 		err = cmlb_ioctl(un->un_dklbhandle, dev, cmd,
3201 		    arg, flag, cred_p, rval_p, 0);
3202 		return (err);
3203 
3204 	case DKIOCGAPART:
3205 	case DKIOCSAPART:
3206 	case DKIOCSGEOM:
3207 	case DKIOCSVTOC:
3208 	case DKIOCSETEFI:
3209 	case DKIOCPARTITION:
3210 	case DKIOCPARTINFO:
3211 	case DKIOCGMBOOT:
3212 	case DKIOCSMBOOT:
3213 
3214 		err = cmlb_ioctl(un->un_dklbhandle, dev, cmd,
3215 		    arg, flag, cred_p, rval_p, 0);
3216 		return (err);
3217 
3218 	case DIOCTL_RWCMD:
3219 		if (drv_priv(cred_p) != 0) {
3220 			return (EPERM);
3221 		}
3222 
3223 #ifdef _MULTI_DATAMODEL
3224 		switch (ddi_model_convert_from(flag & FMODELS)) {
3225 		case DDI_MODEL_NONE:
3226 			if (ddi_copyin((caddr_t)arg, (caddr_t)&rwcmd,
3227 			    sizeof (struct dadkio_rwcmd), flag)) {
3228 				return (EFAULT);
3229 			}
3230 			rwcmd32.cmd = rwcmd.cmd;
3231 			rwcmd32.flags = rwcmd.flags;
3232 			rwcmd32.blkaddr = rwcmd.blkaddr;
3233 			rwcmd32.buflen = rwcmd.buflen;
3234 			rwcmd32.bufaddr = (caddr32_t)(uintptr_t)rwcmd.bufaddr;
3235 			break;
3236 		case DDI_MODEL_ILP32:
3237 			if (ddi_copyin((caddr_t)arg, (caddr_t)&rwcmd32,
3238 			    sizeof (struct dadkio_rwcmd32), flag)) {
3239 				return (EFAULT);
3240 			}
3241 			break;
3242 		}
3243 #else
3244 		if (ddi_copyin((caddr_t)arg, (caddr_t)&rwcmd32,
3245 		    sizeof (struct dadkio_rwcmd32), flag)) {
3246 			return (EFAULT);
3247 		}
3248 #endif
3249 		mutex_enter(DCD_MUTEX);
3250 
3251 		uioseg  = UIO_SYSSPACE;
3252 		scmd = (struct udcd_cmd *)data;
3253 		scmd->udcd_cmd = &dcdcmd;
3254 		/*
3255 		 * Convert the dadkio_rwcmd structure to udcd_cmd so that
3256 		 * it can take the normal path to get the io done
3257 		 */
3258 		if (rwcmd32.cmd == DADKIO_RWCMD_READ) {
3259 			if ((un->un_dp->options & DMA_SUPPORTTED) ==
3260 			    DMA_SUPPORTTED)
3261 				scmd->udcd_cmd->cmd = ATA_READ_DMA;
3262 			else
3263 				scmd->udcd_cmd->cmd = ATA_READ;
3264 			scmd->udcd_cmd->address_mode = ADD_LBA_MODE;
3265 			scmd->udcd_cmd->direction = DATA_READ;
3266 			scmd->udcd_flags |= UDCD_READ|UDCD_SILENT;
3267 		} else if (rwcmd32.cmd == DADKIO_RWCMD_WRITE) {
3268 			if ((un->un_dp->options & DMA_SUPPORTTED) ==
3269 			    DMA_SUPPORTTED)
3270 				scmd->udcd_cmd->cmd = ATA_WRITE_DMA;
3271 			else
3272 				scmd->udcd_cmd->cmd = ATA_WRITE;
3273 			scmd->udcd_cmd->direction = DATA_WRITE;
3274 			scmd->udcd_flags |= UDCD_WRITE|UDCD_SILENT;
3275 		} else {
3276 			mutex_exit(DCD_MUTEX);
3277 			return (EINVAL);
3278 		}
3279 
3280 		scmd->udcd_cmd->address_mode = ADD_LBA_MODE;
3281 		scmd->udcd_cmd->features = 0;
3282 		scmd->udcd_cmd->size = rwcmd32.buflen;
3283 		scmd->udcd_cmd->sector_num.lba_num = rwcmd32.blkaddr;
3284 		scmd->udcd_bufaddr = (caddr_t)(uintptr_t)rwcmd32.bufaddr;
3285 		scmd->udcd_buflen = rwcmd32.buflen;
3286 		scmd->udcd_timeout = (ushort_t)dcd_io_time;
3287 		scmd->udcd_resid = 0ULL;
3288 		scmd->udcd_status = 0;
3289 		scmd->udcd_error_reg = 0;
3290 		scmd->udcd_status_reg = 0;
3291 
3292 		mutex_exit(DCD_MUTEX);
3293 
3294 		i = dcdioctl_cmd(dev, scmd, UIO_SYSSPACE, UIO_USERSPACE);
3295 		mutex_enter(DCD_MUTEX);
3296 		/*
3297 		 * After return convert the status from scmd to
3298 		 * dadkio_status
3299 		 */
3300 		(void) dcd_translate(&(rwcmd32.status), scmd);
3301 		rwcmd32.status.resid = scmd->udcd_resid;
3302 		mutex_exit(DCD_MUTEX);
3303 
3304 #ifdef _MULTI_DATAMODEL
3305 		switch (ddi_model_convert_from(flag & FMODELS)) {
3306 		case DDI_MODEL_NONE: {
3307 			int counter;
3308 			rwcmd.status.status = rwcmd32.status.status;
3309 			rwcmd.status.resid  = rwcmd32.status.resid;
3310 			rwcmd.status.failed_blk_is_valid =
3311 			    rwcmd32.status.failed_blk_is_valid;
3312 			rwcmd.status.failed_blk = rwcmd32.status.failed_blk;
3313 			rwcmd.status.fru_code_is_valid =
3314 			    rwcmd32.status.fru_code_is_valid;
3315 			rwcmd.status.fru_code = rwcmd32.status.fru_code;
3316 			for (counter = 0;
3317 			    counter < DADKIO_ERROR_INFO_LEN; counter++)
3318 				rwcmd.status.add_error_info[counter] =
3319 				    rwcmd32.status.add_error_info[counter];
3320 			}
3321 			/* Copy out the result back to the user program */
3322 			if (ddi_copyout((caddr_t)&rwcmd, (caddr_t)arg,
3323 			    sizeof (struct dadkio_rwcmd), flag)) {
3324 				if (i != 0) {
3325 					i = EFAULT;
3326 				}
3327 			}
3328 			break;
3329 		case DDI_MODEL_ILP32:
3330 			/* Copy out the result back to the user program */
3331 			if (ddi_copyout((caddr_t)&rwcmd32, (caddr_t)arg,
3332 			    sizeof (struct dadkio_rwcmd32), flag)) {
3333 				if (i != 0) {
3334 					i = EFAULT;
3335 				}
3336 			}
3337 			break;
3338 		}
3339 #else
3340 		/* Copy out the result back to the user program  */
3341 		if (ddi_copyout((caddr_t)&rwcmd32, (caddr_t)arg,
3342 		    sizeof (struct dadkio_rwcmd32), flag)) {
3343 			if (i != 0)
3344 				i = EFAULT;
3345 		}
3346 #endif
3347 		return (i);
3348 
3349 	case UDCDCMD:	{
3350 #ifdef	_MULTI_DATAMODEL
3351 		/*
3352 		 * For use when a 32 bit app makes a call into a
3353 		 * 64 bit ioctl
3354 		 */
3355 		struct udcd_cmd32	udcd_cmd_32_for_64;
3356 		struct udcd_cmd32	*ucmd32 = &udcd_cmd_32_for_64;
3357 		model_t			model;
3358 #endif /* _MULTI_DATAMODEL */
3359 
3360 		if (drv_priv(cred_p) != 0) {
3361 			return (EPERM);
3362 		}
3363 
3364 		scmd = (struct udcd_cmd *)data;
3365 
3366 #ifdef _MULTI_DATAMODEL
3367 		switch (model = ddi_model_convert_from(flag & FMODELS)) {
3368 		case DDI_MODEL_ILP32:
3369 			if (ddi_copyin((caddr_t)arg, ucmd32,
3370 			    sizeof (struct udcd_cmd32), flag)) {
3371 				return (EFAULT);
3372 			}
3373 			/*
3374 			 * Convert the ILP32 uscsi data from the
3375 			 * application to LP64 for internal use.
3376 			 */
3377 			udcd_cmd32toudcd_cmd(ucmd32, scmd);
3378 			break;
3379 		case DDI_MODEL_NONE:
3380 			if (ddi_copyin((caddr_t)arg, scmd, sizeof (*scmd),
3381 			    flag)) {
3382 				return (EFAULT);
3383 			}
3384 			break;
3385 		}
3386 #else /* ! _MULTI_DATAMODEL */
3387 		if (ddi_copyin((caddr_t)arg, (caddr_t)scmd,
3388 		    sizeof (*scmd), flag)) {
3389 			return (EFAULT);
3390 		}
3391 #endif /* ! _MULTI_DATAMODEL */
3392 
3393 		scmd->udcd_flags &= ~UDCD_NOINTR;
3394 		uioseg = (flag & FKIOCTL)? UIO_SYSSPACE: UIO_USERSPACE;
3395 
3396 		i = dcdioctl_cmd(dev, scmd, uioseg, uioseg);
3397 #ifdef _MULTI_DATAMODEL
3398 		switch (model) {
3399 		case DDI_MODEL_ILP32:
3400 			/*
3401 			 * Convert back to ILP32 before copyout to the
3402 			 * application
3403 			 */
3404 			udcd_cmdtoudcd_cmd32(scmd, ucmd32);
3405 			if (ddi_copyout(ucmd32, (caddr_t)arg,
3406 			    sizeof (*ucmd32), flag)) {
3407 				if (i != 0)
3408 					i = EFAULT;
3409 			}
3410 			break;
3411 		case DDI_MODEL_NONE:
3412 			if (ddi_copyout(scmd, (caddr_t)arg, sizeof (*scmd),
3413 			    flag)) {
3414 				if (i != 0)
3415 					i = EFAULT;
3416 			}
3417 			break;
3418 		}
3419 #else /* ! _MULTI_DATAMODE */
3420 		if (ddi_copyout((caddr_t)scmd, (caddr_t)arg,
3421 		    sizeof (*scmd), flag)) {
3422 			if (i != 0)
3423 				i = EFAULT;
3424 		}
3425 #endif
3426 		return (i);
3427 	}
3428 	case DKIOCFLUSHWRITECACHE:	{
3429 		struct dk_callback *dkc = (struct dk_callback *)arg;
3430 		struct dcd_pkt *pkt;
3431 		struct buf *bp;
3432 		int is_sync = 1;
3433 
3434 		mutex_enter(DCD_MUTEX);
3435 		if (un->un_flush_not_supported ||
3436 		    ! un->un_write_cache_enabled) {
3437 			i = un->un_flush_not_supported ? ENOTSUP : 0;
3438 			mutex_exit(DCD_MUTEX);
3439 			/*
3440 			 * If a callback was requested: a callback will
3441 			 * always be done if the caller saw the
3442 			 * DKIOCFLUSHWRITECACHE ioctl return 0, and
3443 			 * never done if the caller saw the ioctl return
3444 			 * an error.
3445 			 */
3446 			if ((flag & FKIOCTL) && dkc != NULL &&
3447 			    dkc->dkc_callback != NULL) {
3448 				(*dkc->dkc_callback)(dkc->dkc_cookie, i);
3449 				/*
3450 				 * Did callback and reported error.
3451 				 * Since we did a callback, ioctl
3452 				 * should return 0.
3453 				 */
3454 				i = 0;
3455 			}
3456 			return (i);
3457 		}
3458 
3459 		/*
3460 		 * Get the special buffer
3461 		 */
3462 		while (un->un_sbuf_busy) {
3463 			cv_wait(&un->un_sbuf_cv, DCD_MUTEX);
3464 		}
3465 		un->un_sbuf_busy = 1;
3466 		bp  = un->un_sbufp;
3467 		mutex_exit(DCD_MUTEX);
3468 
3469 		pkt = dcd_init_pkt(ROUTE, (struct dcd_pkt *)NULL,
3470 		    NULL, (uint32_t)sizeof (struct dcd_cmd),
3471 		    2, PP_LEN, PKT_CONSISTENT, SLEEP_FUNC, (caddr_t)un);
3472 		ASSERT(pkt != NULL);
3473 
3474 		makecommand(pkt, un->un_cmd_flags | FLAG_SILENT,
3475 		    ATA_FLUSH_CACHE, 0, ADD_LBA_MODE, 0, NO_DATA_XFER, 0);
3476 
3477 		pkt->pkt_comp = dcdintr;
3478 		pkt->pkt_time = DCD_FLUSH_TIME;
3479 		PKT_SET_BP(pkt, bp);
3480 
3481 		bp->av_back = (struct buf *)pkt;
3482 		bp->b_forw = NULL;
3483 		bp->b_flags = B_BUSY;
3484 		bp->b_error = 0;
3485 		bp->b_edev = dev;
3486 		bp->b_dev = cmpdev(dev);
3487 		bp->b_bcount = 0;
3488 		bp->b_blkno = 0;
3489 		bp->b_un.b_addr = 0;
3490 		bp->b_iodone = NULL;
3491 		bp->b_list = NULL;
3492 
3493 		if ((flag & FKIOCTL) && dkc != NULL &&
3494 		    dkc->dkc_callback != NULL) {
3495 			struct dk_callback *dkc2 = (struct dk_callback *)
3496 			    kmem_zalloc(sizeof (*dkc2), KM_SLEEP);
3497 			bcopy(dkc, dkc2, sizeof (*dkc2));
3498 
3499 			bp->b_list = (struct buf *)dkc2;
3500 			bp->b_iodone = dcdflushdone;
3501 			is_sync = 0;
3502 		}
3503 
3504 		(void) dcdstrategy(bp);
3505 
3506 		i = 0;
3507 		if (is_sync) {
3508 			i = biowait(bp);
3509 			(void) dcdflushdone(bp);
3510 		}
3511 
3512 		return (i);
3513 	}
3514 	default:
3515 		break;
3516 	}
3517 	return (ENOTTY);
3518 }
3519 
3520 
3521 static int
3522 dcdflushdone(struct buf *bp)
3523 {
3524 	struct dcd_disk *un = ddi_get_soft_state(dcd_state,
3525 	    DCDUNIT(bp->b_edev));
3526 	struct dcd_pkt *pkt = BP_PKT(bp);
3527 	struct dk_callback *dkc = (struct dk_callback *)bp->b_list;
3528 
3529 	ASSERT(un != NULL);
3530 	ASSERT(bp == un->un_sbufp);
3531 	ASSERT(pkt != NULL);
3532 
3533 	dcd_destroy_pkt(pkt);
3534 	bp->av_back = NO_PKT_ALLOCATED;
3535 
3536 	if (dkc != NULL) {
3537 		ASSERT(bp->b_iodone != NULL);
3538 		(*dkc->dkc_callback)(dkc->dkc_cookie, geterror(bp));
3539 		kmem_free(dkc, sizeof (*dkc));
3540 		bp->b_iodone = NULL;
3541 		bp->b_list = NULL;
3542 	}
3543 
3544 	/*
3545 	 * Tell anybody who cares that the buffer is now free
3546 	 */
3547 	mutex_enter(DCD_MUTEX);
3548 	un->un_sbuf_busy = 0;
3549 	cv_signal(&un->un_sbuf_cv);
3550 	mutex_exit(DCD_MUTEX);
3551 	return (0);
3552 }
3553 
3554 /*
3555  * dcdrunout:
3556  *	the callback function for resource allocation
3557  *
3558  * XXX it would be preferable that dcdrunout() scans the whole
3559  *	list for possible candidates for dcdstart(); this avoids
3560  *	that a bp at the head of the list whose request cannot be
3561  *	satisfied is retried again and again
3562  */
3563 /*ARGSUSED*/
3564 static int
3565 dcdrunout(caddr_t arg)
3566 {
3567 	int serviced;
3568 	struct dcd_disk *un;
3569 	struct diskhd *dp;
3570 
3571 	TRACE_1(TR_FAC_DADA, TR_DCDRUNOUT_START, "dcdrunout_start: arg 0x%p",
3572 	    arg);
3573 	serviced = 1;
3574 
3575 	un = (struct dcd_disk *)arg;
3576 	dp = &un->un_utab;
3577 
3578 	/*
3579 	 * We now support passing a structure to the callback
3580 	 * routine.
3581 	 */
3582 	ASSERT(un != NULL);
3583 	mutex_enter(DCD_MUTEX);
3584 	if ((un->un_ncmds < un->un_throttle) && (dp->b_forw == NULL)) {
3585 		dcdstart(un);
3586 	}
3587 	if (un->un_state == DCD_STATE_RWAIT) {
3588 		serviced = 0;
3589 	}
3590 	mutex_exit(DCD_MUTEX);
3591 	TRACE_1(TR_FAC_DADA, TR_DCDRUNOUT_END,
3592 	    "dcdrunout_end: serviced %d", serviced);
3593 	return (serviced);
3594 }
3595 
3596 
3597 /*
3598  * This routine called to see whether unit is (still) there. Must not
3599  * be called when un->un_sbufp is in use, and must not be called with
3600  * an unattached disk. Soft state of disk is restored to what it was
3601  * upon entry- up to caller to set the correct state.
3602  *
3603  * We enter with the disk mutex held.
3604  */
3605 
3606 /* ARGSUSED0 */
3607 static int
3608 dcd_unit_ready(dev_t dev)
3609 {
3610 #ifndef lint
3611 	auto struct udcd_cmd dcmd, *com = &dcmd;
3612 	auto struct dcd_cmd cmdblk;
3613 #endif
3614 	int error;
3615 #ifndef lint
3616 	GET_SOFT_STATE(dev);
3617 #endif
3618 
3619 	/*
3620 	 * Now that we protect the special buffer with
3621 	 * a mutex, we could probably do a mutex_tryenter
3622 	 * on it here and return failure if it were held...
3623 	 */
3624 
3625 	error = 0;
3626 	return (error);
3627 }
3628 
3629 /* ARGSUSED0 */
3630 int
3631 dcdioctl_cmd(dev_t devp, struct udcd_cmd *in, enum uio_seg cdbspace,
3632     enum uio_seg dataspace)
3633 {
3634 
3635 	struct buf *bp;
3636 	struct	udcd_cmd *scmd;
3637 	struct dcd_pkt *pkt;
3638 	int	err, rw;
3639 	caddr_t	cdb;
3640 	int	flags = 0;
3641 
3642 	GET_SOFT_STATE(devp);
3643 
3644 #ifdef lint
3645 	part = part;
3646 #endif
3647 
3648 	/*
3649 	 * Is this a request to reset the bus?
3650 	 * if so, we need to do reseting.
3651 	 */
3652 
3653 	if (in->udcd_flags & UDCD_RESET) {
3654 		int flag = RESET_TARGET;
3655 		err = dcd_reset(ROUTE, flag) ? 0: EIO;
3656 		return (err);
3657 	}
3658 
3659 	scmd = in;
3660 
3661 
3662 	/* Do some sanity checks */
3663 	if (scmd->udcd_buflen <= 0) {
3664 		if (scmd->udcd_flags & (UDCD_READ | UDCD_WRITE)) {
3665 			return (EINVAL);
3666 		} else {
3667 			scmd->udcd_buflen = 0;
3668 		}
3669 	}
3670 
3671 	/* Make a copy of the dcd_cmd passed  */
3672 	cdb = kmem_zalloc(sizeof (struct dcd_cmd), KM_SLEEP);
3673 	if (cdbspace == UIO_SYSSPACE) {
3674 		flags |= FKIOCTL;
3675 	}
3676 
3677 	if (ddi_copyin((void *)scmd->udcd_cmd, cdb, sizeof (struct dcd_cmd),
3678 	    flags)) {
3679 		kmem_free(cdb, sizeof (struct dcd_cmd));
3680 		return (EFAULT);
3681 	}
3682 	scmd = (struct udcd_cmd *)kmem_alloc(sizeof (*scmd), KM_SLEEP);
3683 	bcopy((caddr_t)in, (caddr_t)scmd, sizeof (*scmd));
3684 	scmd->udcd_cmd = (struct dcd_cmd *)cdb;
3685 	rw = (scmd->udcd_flags & UDCD_READ) ? B_READ: B_WRITE;
3686 
3687 
3688 	/*
3689 	 * Get the special buffer
3690 	 */
3691 
3692 	mutex_enter(DCD_MUTEX);
3693 	while (un->un_sbuf_busy) {
3694 		if (cv_wait_sig(&un->un_sbuf_cv, DCD_MUTEX) == 0) {
3695 			kmem_free(scmd->udcd_cmd, sizeof (struct dcd_cmd));
3696 			kmem_free((caddr_t)scmd, sizeof (*scmd));
3697 			mutex_exit(DCD_MUTEX);
3698 			return (EINTR);
3699 		}
3700 	}
3701 
3702 	un->un_sbuf_busy = 1;
3703 	bp  = un->un_sbufp;
3704 	mutex_exit(DCD_MUTEX);
3705 
3706 
3707 	/*
3708 	 * If we are going to do actual I/O, let physio do all the
3709 	 * things
3710 	 */
3711 	DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG,
3712 	    "dcdioctl_cmd : buflen %x\n", scmd->udcd_buflen);
3713 
3714 	if (scmd->udcd_buflen) {
3715 		auto struct iovec aiov;
3716 		auto struct uio auio;
3717 		struct uio *uio = &auio;
3718 
3719 		bzero((caddr_t)&auio, sizeof (struct uio));
3720 		bzero((caddr_t)&aiov, sizeof (struct iovec));
3721 
3722 		aiov.iov_base = scmd->udcd_bufaddr;
3723 		aiov.iov_len = scmd->udcd_buflen;
3724 
3725 		uio->uio_iov = &aiov;
3726 		uio->uio_iovcnt = 1;
3727 		uio->uio_resid = scmd->udcd_buflen;
3728 		uio->uio_segflg = dataspace;
3729 
3730 		/*
3731 		 * Let physio do the rest...
3732 		 */
3733 		bp->av_back = NO_PKT_ALLOCATED;
3734 		bp->b_forw = (struct buf *)scmd;
3735 		err = physio(dcdstrategy, bp, devp, rw, dcdudcdmin, uio);
3736 	} else {
3737 		/*
3738 		 * We have to mimic what physio would do here.
3739 		 */
3740 		bp->av_back = NO_PKT_ALLOCATED;
3741 		bp->b_forw = (struct buf *)scmd;
3742 		bp->b_flags = B_BUSY | rw;
3743 		bp->b_edev = devp;
3744 		bp->b_dev = cmpdev(devp);
3745 		bp->b_bcount = bp->b_blkno = 0;
3746 		(void) dcdstrategy(bp);
3747 		err = biowait(bp);
3748 	}
3749 
3750 done:
3751 	if ((pkt = BP_PKT(bp)) != NULL) {
3752 		bp->av_back = NO_PKT_ALLOCATED;
3753 		/* we need to update the completion status of udcd command */
3754 		in->udcd_resid = bp->b_resid;
3755 		in->udcd_status_reg = SCBP_C(pkt);
3756 		/* XXX: we need to give error_reg also */
3757 		dcd_destroy_pkt(pkt);
3758 	}
3759 	/*
3760 	 * Tell anybody who cares that the buffer is now free
3761 	 */
3762 	mutex_enter(DCD_MUTEX);
3763 	un->un_sbuf_busy = 0;
3764 	cv_signal(&un->un_sbuf_cv);
3765 	mutex_exit(DCD_MUTEX);
3766 
3767 	kmem_free(scmd->udcd_cmd, sizeof (struct dcd_cmd));
3768 	kmem_free((caddr_t)scmd, sizeof (*scmd));
3769 	return (err);
3770 }
3771 
3772 static void
3773 dcdudcdmin(struct buf *bp)
3774 {
3775 
3776 #ifdef lint
3777 	bp = bp;
3778 #endif
3779 
3780 }
3781 
3782 /*
3783  * restart a cmd from timeout() context
3784  *
3785  * the cmd is expected to be in un_utab.b_forw. If this pointer is non-zero
3786  * a restart timeout request has been issued and no new timeouts should
3787  * be requested. b_forw is reset when the cmd eventually completes in
3788  * dcddone_and_mutex_exit()
3789  */
3790 void
3791 dcdrestart(void *arg)
3792 {
3793 	struct dcd_disk *un = (struct dcd_disk *)arg;
3794 	struct buf *bp;
3795 	int status;
3796 
3797 	DAD_DEBUG(DCD_DEVINFO, dcd_label, DCD_DEBUG, "dcdrestart\n");
3798 
3799 	mutex_enter(DCD_MUTEX);
3800 	bp = un->un_utab.b_forw;
3801 	if (bp) {
3802 		un->un_ncmds++;
3803 		DCD_DO_KSTATS(un, kstat_waitq_to_runq, bp);
3804 	}
3805 
3806 
3807 	if (bp) {
3808 		struct dcd_pkt *pkt = BP_PKT(bp);
3809 
3810 		mutex_exit(DCD_MUTEX);
3811 
3812 		pkt->pkt_flags = 0;
3813 
3814 		if ((status = dcd_transport(pkt)) != TRAN_ACCEPT) {
3815 			mutex_enter(DCD_MUTEX);
3816 			DCD_DO_KSTATS(un, kstat_runq_back_to_waitq, bp);
3817 			un->un_ncmds--;
3818 			if (status == TRAN_BUSY) {
3819 				/* XXX : To be checked */
3820 				/*
3821 				 * if (un->un_throttle > 1) {
3822 				 *	ASSERT(un->un_ncmds >= 0);
3823 				 *	un->un_throttle = un->un_ncmds;
3824 				 * }
3825 				 */
3826 				un->un_reissued_timeid =
3827 				timeout(dcdrestart, (caddr_t)un,
3828 				    DCD_BSY_TIMEOUT/500);
3829 				mutex_exit(DCD_MUTEX);
3830 				return;
3831 			}
3832 			DCD_DO_ERRSTATS(un, dcd_transerrs);
3833 			dcd_log(DCD_DEVINFO, dcd_label, CE_WARN,
3834 			    "dcdrestart transport failed (%x)\n", status);
3835 			bp->b_resid = bp->b_bcount;
3836 			SET_BP_ERROR(bp, EIO);
3837 
3838 			DCD_DO_KSTATS(un, kstat_waitq_exit, bp);
3839 			un->un_reissued_timeid = 0L;
3840 			dcddone_and_mutex_exit(un, bp);
3841 			return;
3842 		}
3843 		mutex_enter(DCD_MUTEX);
3844 	}
3845 	un->un_reissued_timeid = 0L;
3846 	mutex_exit(DCD_MUTEX);
3847 	DAD_DEBUG(DCD_DEVINFO, dcd_label, DCD_DEBUG, "dcdrestart done\n");
3848 }
3849 
3850 /*
3851  * This routine gets called to reset the throttle to its saved
3852  * value wheneven we lower the throttle.
3853  */
3854 void
3855 dcd_reset_throttle(caddr_t arg)
3856 {
3857 	struct dcd_disk *un = (struct dcd_disk *)arg;
3858 	struct diskhd *dp;
3859 
3860 	mutex_enter(DCD_MUTEX);
3861 	dp = &un->un_utab;
3862 
3863 	/*
3864 	 * start any commands that didn't start while throttling.
3865 	 */
3866 	if (dp->b_actf && (un->un_ncmds < un->un_throttle) &&
3867 	    (dp->b_forw == NULL)) {
3868 		dcdstart(un);
3869 	}
3870 	mutex_exit(DCD_MUTEX);
3871 }
3872 
3873 
3874 /*
3875  * This routine handles the case when a TRAN_BUSY is
3876  * returned by HBA.
3877  *
3878  * If there are some commands already in the transport, the
3879  * bp can be put back on queue and it will
3880  * be retried when the queue is emptied after command
3881  * completes. But if there is no command in the tranport
3882  * and it still return busy, we have to retry the command
3883  * after some time like 10ms.
3884  */
3885 /* ARGSUSED0 */
3886 static void
3887 dcd_handle_tran_busy(struct buf *bp, struct diskhd *dp, struct dcd_disk *un)
3888 {
3889 	ASSERT(mutex_owned(DCD_MUTEX));
3890 
3891 
3892 	if (dp->b_forw == NULL || dp->b_forw == bp) {
3893 		dp->b_forw = bp;
3894 	} else if (dp->b_forw != bp) {
3895 		bp->b_actf = dp->b_actf;
3896 		dp->b_actf = bp;
3897 
3898 	}
3899 	if (!un->un_reissued_timeid) {
3900 		un->un_reissued_timeid =
3901 			timeout(dcdrestart, (caddr_t)un, DCD_BSY_TIMEOUT/500);
3902 	}
3903 }
3904 
3905 static int
3906 dcd_write_deviceid(struct dcd_disk *un)
3907 {
3908 
3909 	int 	status;
3910 	diskaddr_t blk;
3911 	struct udcd_cmd ucmd;
3912 	struct dcd_cmd cdb;
3913 	struct dk_devid	*dkdevid;
3914 	uint_t *ip, chksum;
3915 	int	i;
3916 	dev_t	dev;
3917 
3918 	mutex_exit(DCD_MUTEX);
3919 	if (cmlb_get_devid_block(un->un_dklbhandle, &blk, 0)) {
3920 		mutex_enter(DCD_MUTEX);
3921 		return (EINVAL);
3922 	}
3923 	mutex_enter(DCD_MUTEX);
3924 
3925 	/* Allocate the buffer */
3926 	dkdevid = kmem_zalloc(un->un_secsize, KM_SLEEP);
3927 
3928 	/* Fill in the revision */
3929 	dkdevid->dkd_rev_hi = DK_DEVID_REV_MSB;
3930 	dkdevid->dkd_rev_lo = DK_DEVID_REV_LSB;
3931 
3932 	/* Copy in the device id */
3933 	bcopy(un->un_devid, &dkdevid->dkd_devid,
3934 	    ddi_devid_sizeof(un->un_devid));
3935 
3936 	/* Calculate the chksum */
3937 	chksum = 0;
3938 	ip = (uint_t *)dkdevid;
3939 	for (i = 0; i < ((un->un_secsize - sizeof (int))/sizeof (int)); i++)
3940 		chksum ^= ip[i];
3941 
3942 	/* Fill in the checksum */
3943 	DKD_FORMCHKSUM(chksum, dkdevid);
3944 
3945 	(void) bzero((caddr_t)&ucmd, sizeof (ucmd));
3946 	(void) bzero((caddr_t)&cdb, sizeof (struct dcd_cmd));
3947 
3948 	if ((un->un_dp->options & DMA_SUPPORTTED) == DMA_SUPPORTTED) {
3949 		cdb.cmd = ATA_WRITE_DMA;
3950 	} else {
3951 		if (un->un_dp->options & BLOCK_MODE)
3952 			cdb.cmd = ATA_WRITE_MULTIPLE;
3953 		else
3954 			cdb.cmd = ATA_WRITE;
3955 	}
3956 	cdb.size = un->un_secsize;
3957 	cdb.sector_num.lba_num = blk;
3958 	cdb.address_mode = ADD_LBA_MODE;
3959 	cdb.direction = DATA_WRITE;
3960 
3961 	ucmd.udcd_flags = UDCD_WRITE;
3962 	ucmd.udcd_cmd =  &cdb;
3963 	ucmd.udcd_bufaddr = (caddr_t)dkdevid;
3964 	ucmd.udcd_buflen = un->un_secsize;
3965 	ucmd.udcd_flags |= UDCD_SILENT;
3966 	dev = makedevice(ddi_driver_major(DCD_DEVINFO),
3967 	    ddi_get_instance(DCD_DEVINFO) << DCDUNIT_SHIFT);
3968 	mutex_exit(DCD_MUTEX);
3969 	status = dcdioctl_cmd(dev, &ucmd, UIO_SYSSPACE, UIO_SYSSPACE);
3970 	mutex_enter(DCD_MUTEX);
3971 
3972 	kmem_free(dkdevid, un->un_secsize);
3973 	return (status);
3974 }
3975 
3976 static int
3977 dcd_read_deviceid(struct dcd_disk *un)
3978 {
3979 	int status;
3980 	diskaddr_t blk;
3981 	struct udcd_cmd ucmd;
3982 	struct dcd_cmd cdb;
3983 	struct dk_devid *dkdevid;
3984 	uint_t *ip;
3985 	int chksum;
3986 	int i, sz;
3987 	dev_t dev;
3988 
3989 	mutex_exit(DCD_MUTEX);
3990 	if (cmlb_get_devid_block(un->un_dklbhandle, &blk, 0)) {
3991 		mutex_enter(DCD_MUTEX);
3992 		return (EINVAL);
3993 	}
3994 	mutex_enter(DCD_MUTEX);
3995 
3996 	dkdevid = kmem_alloc(un->un_secsize, KM_SLEEP);
3997 
3998 	(void) bzero((caddr_t)&ucmd, sizeof (ucmd));
3999 	(void) bzero((caddr_t)&cdb, sizeof (cdb));
4000 
4001 	if ((un->un_dp->options & DMA_SUPPORTTED) == DMA_SUPPORTTED) {
4002 		cdb.cmd = ATA_READ_DMA;
4003 	} else {
4004 		if (un->un_dp->options & BLOCK_MODE)
4005 			cdb.cmd = ATA_READ_MULTIPLE;
4006 		else
4007 			cdb.cmd = ATA_READ;
4008 	}
4009 	cdb.size = un->un_secsize;
4010 	cdb.sector_num.lba_num = blk;
4011 	cdb.address_mode = ADD_LBA_MODE;
4012 	cdb.direction = DATA_READ;
4013 
4014 	ucmd.udcd_flags = UDCD_READ;
4015 	ucmd.udcd_cmd =  &cdb;
4016 	ucmd.udcd_bufaddr = (caddr_t)dkdevid;
4017 	ucmd.udcd_buflen = un->un_secsize;
4018 	ucmd.udcd_flags |= UDCD_SILENT;
4019 	dev = makedevice(ddi_driver_major(DCD_DEVINFO),
4020 	    ddi_get_instance(DCD_DEVINFO) << DCDUNIT_SHIFT);
4021 	mutex_exit(DCD_MUTEX);
4022 	status = dcdioctl_cmd(dev, &ucmd, UIO_SYSSPACE, UIO_SYSSPACE);
4023 	mutex_enter(DCD_MUTEX);
4024 
4025 	if (status != 0) {
4026 		kmem_free((caddr_t)dkdevid, un->un_secsize);
4027 		return (status);
4028 	}
4029 
4030 	/* Validate the revision */
4031 
4032 	if ((dkdevid->dkd_rev_hi != DK_DEVID_REV_MSB) ||
4033 	    (dkdevid->dkd_rev_lo != DK_DEVID_REV_LSB)) {
4034 		kmem_free((caddr_t)dkdevid, un->un_secsize);
4035 		return (EINVAL);
4036 	}
4037 
4038 	/* Calculate the checksum */
4039 	chksum = 0;
4040 	ip = (uint_t *)dkdevid;
4041 	for (i = 0; i < ((un->un_secsize - sizeof (int))/sizeof (int)); i++)
4042 		chksum ^= ip[i];
4043 
4044 	/* Compare the checksums */
4045 
4046 	if (DKD_GETCHKSUM(dkdevid) != chksum) {
4047 		kmem_free((caddr_t)dkdevid, un->un_secsize);
4048 		return (EINVAL);
4049 	}
4050 
4051 	/* VAlidate the device id */
4052 	if (ddi_devid_valid((ddi_devid_t)&dkdevid->dkd_devid) != DDI_SUCCESS) {
4053 		kmem_free((caddr_t)dkdevid, un->un_secsize);
4054 		return (EINVAL);
4055 	}
4056 
4057 	/* return a copy of the device id */
4058 	sz = ddi_devid_sizeof((ddi_devid_t)&dkdevid->dkd_devid);
4059 	un->un_devid = (ddi_devid_t)kmem_alloc(sz, KM_SLEEP);
4060 	bcopy(&dkdevid->dkd_devid, un->un_devid, sz);
4061 	kmem_free((caddr_t)dkdevid, un->un_secsize);
4062 
4063 	return (0);
4064 }
4065 
4066 /*
4067  * Return the device id for the device.
4068  * 1. If the device ID exists then just return it - nothing to do in that case.
4069  * 2. Build one from the drives model number and serial number.
4070  * 3. If there is a problem in building it from serial/model #, then try
4071  * to read it from the acyl region of the disk.
4072  * Note: If this function is unable to return a valid ID then the calling
4073  * point will invoke the routine to create a fabricated ID ans stor it on the
4074  * acyl region of the disk.
4075  */
4076 static ddi_devid_t
4077 dcd_get_devid(struct dcd_disk *un)
4078 {
4079 	int		rc;
4080 
4081 	/* If already registered, return that value */
4082 	if (un->un_devid != NULL)
4083 		return (un->un_devid);
4084 
4085 	/* Build a devid from model and serial number, if present */
4086 	rc = dcd_make_devid_from_serial(un);
4087 
4088 	if (rc != DDI_SUCCESS) {
4089 		/* Read the devid from the disk. */
4090 		if (dcd_read_deviceid(un))
4091 			return (NULL);
4092 	}
4093 
4094 	(void) ddi_devid_register(DCD_DEVINFO, un->un_devid);
4095 	return (un->un_devid);
4096 }
4097 
4098 
4099 static ddi_devid_t
4100 dcd_create_devid(struct dcd_disk *un)
4101 {
4102 	if (ddi_devid_init(DCD_DEVINFO, DEVID_FAB, 0, NULL, (ddi_devid_t *)
4103 	    &un->un_devid) == DDI_FAILURE)
4104 		return (NULL);
4105 
4106 	if (dcd_write_deviceid(un)) {
4107 		ddi_devid_free(un->un_devid);
4108 		un->un_devid = NULL;
4109 		return (NULL);
4110 	}
4111 
4112 	(void) ddi_devid_register(DCD_DEVINFO, un->un_devid);
4113 	return (un->un_devid);
4114 }
4115 
4116 /*
4117  * Build a devid from the model and serial number, if present
4118  * Return DDI_SUCCESS or DDI_FAILURE.
4119  */
4120 static int
4121 dcd_make_devid_from_serial(struct dcd_disk *un)
4122 {
4123 	int	rc = DDI_SUCCESS;
4124 	char	*hwid;
4125 	char	*model;
4126 	int	model_len;
4127 	char	*serno;
4128 	int	serno_len;
4129 	int	total_len;
4130 
4131 	/* initialize the model and serial number information */
4132 	model = un->un_dcd->dcd_ident->dcd_model;
4133 	model_len = DCD_MODEL_NUMBER_LENGTH;
4134 	serno = un->un_dcd->dcd_ident->dcd_drvser;
4135 	serno_len = DCD_SERIAL_NUMBER_LENGTH;
4136 
4137 	/* Verify the model and serial number */
4138 	dcd_validate_model_serial(model, &model_len, model_len);
4139 	if (model_len == 0) {
4140 		rc = DDI_FAILURE;
4141 		goto out;
4142 	}
4143 	dcd_validate_model_serial(serno, &serno_len, serno_len);
4144 	if (serno_len == 0) {
4145 		rc = DDI_FAILURE;
4146 		goto out;
4147 	}
4148 
4149 	/*
4150 	 * The device ID will be concatenation of the model number,
4151 	 * the '=' separator, the serial number. Allocate
4152 	 * the string and concatenate the components.
4153 	 */
4154 	total_len = model_len + 1 + serno_len;
4155 	hwid = kmem_alloc(total_len, KM_SLEEP);
4156 	bcopy((caddr_t)model, (caddr_t)hwid, model_len);
4157 	bcopy((caddr_t)"=", (caddr_t)&hwid[model_len], 1);
4158 	bcopy((caddr_t)serno, (caddr_t)&hwid[model_len + 1], serno_len);
4159 
4160 	/* Initialize the device ID, trailing NULL not included */
4161 	rc = ddi_devid_init(DCD_DEVINFO, DEVID_ATA_SERIAL, total_len,
4162 	    hwid, (ddi_devid_t *)&un->un_devid);
4163 
4164 	/* Free the allocated string */
4165 	kmem_free(hwid, total_len);
4166 
4167 out:	return (rc);
4168 }
4169 
4170 /*
4171  * Test for a valid model or serial number. Assume that a valid representation
4172  * contains at least one character that is neither a space, 0 digit, or NULL.
4173  * Trim trailing blanks and NULLS from returned length.
4174  */
4175 static void
4176 dcd_validate_model_serial(char *str, int *retlen, int totallen)
4177 {
4178 	char		ch;
4179 	boolean_t	ret = B_FALSE;
4180 	int		i;
4181 	int		tb;
4182 
4183 	for (i = 0, tb = 0; i < totallen; i++) {
4184 		ch = *str++;
4185 		if ((ch != ' ') && (ch != '\0') && (ch != '0'))
4186 			ret = B_TRUE;
4187 		if ((ch == ' ') || (ch == '\0'))
4188 			tb++;
4189 		else
4190 			tb = 0;
4191 	}
4192 
4193 	if (ret == B_TRUE) {
4194 		/* Atleast one non 0 or blank character. */
4195 		*retlen = totallen - tb;
4196 	} else {
4197 		*retlen = 0;
4198 	}
4199 }
4200 
4201 #ifndef lint
4202 void
4203 clean_print(dev_info_t *dev, char *label, uint_t level,
4204 	char *title, char *data, int len)
4205 {
4206 	int	i;
4207 	char	buf[256];
4208 
4209 	(void) sprintf(buf, "%s:", title);
4210 	for (i = 0; i < len; i++) {
4211 		(void) sprintf(&buf[strlen(buf)], "0x%x ", (data[i] & 0xff));
4212 	}
4213 	(void) sprintf(&buf[strlen(buf)], "\n");
4214 
4215 	dcd_log(dev, label, level, "%s", buf);
4216 }
4217 #endif /* Not lint */
4218 
4219 #ifndef lint
4220 /*
4221  * Print a piece of inquiry data- cleaned up for non-printable characters
4222  * and stopping at the first space character after the beginning of the
4223  * passed string;
4224  */
4225 
4226 void
4227 inq_fill(char *p, int l, char *s)
4228 {
4229 	unsigned i = 0;
4230 	char c;
4231 
4232 	while (i++ < l) {
4233 		if ((c = *p++) < ' ' || c >= 0177) {
4234 			c = '*';
4235 		} else if (i != 1 && c == ' ') {
4236 			break;
4237 		}
4238 		*s++ = c;
4239 	}
4240 	*s++ = 0;
4241 }
4242 #endif /* Not lint */
4243 
4244 char *
4245 dcd_sname(uchar_t status)
4246 {
4247 	switch (status & STATUS_ATA_MASK) {
4248 	case STATUS_GOOD:
4249 		return ("good status");
4250 
4251 	case STATUS_ATA_BUSY:
4252 		return ("busy");
4253 
4254 	default:
4255 		return ("<unknown status>");
4256 	}
4257 }
4258 
4259 /* ARGSUSED0 */
4260 char *
4261 dcd_rname(int reason)
4262 {
4263 	static char *rnames[] = {
4264 		"cmplt",
4265 		"incomplete",
4266 		"dma_derr",
4267 		"tran_err",
4268 		"reset",
4269 		"aborted",
4270 		"timeout",
4271 		"data_ovr",
4272 	};
4273 	if (reason > CMD_DATA_OVR) {
4274 		return ("<unknown reason>");
4275 	} else {
4276 		return (rnames[reason]);
4277 	}
4278 }
4279 
4280 
4281 
4282 /* ARGSUSED0 */
4283 int
4284 dcd_check_wp(dev_t dev)
4285 {
4286 
4287 	return (0);
4288 }
4289 
4290 /*
4291  * Create device error kstats
4292  */
4293 static int
4294 dcd_create_errstats(struct dcd_disk *un, int instance)
4295 {
4296 
4297 	char kstatname[KSTAT_STRLEN];
4298 
4299 	if (un->un_errstats == (kstat_t *)0) {
4300 		(void) sprintf(kstatname, "dad%d,error", instance);
4301 		un->un_errstats = kstat_create("daderror", instance, kstatname,
4302 		    "device_error", KSTAT_TYPE_NAMED,
4303 		    sizeof (struct dcd_errstats)/ sizeof (kstat_named_t),
4304 		    KSTAT_FLAG_PERSISTENT);
4305 
4306 		if (un->un_errstats) {
4307 			struct dcd_errstats *dtp;
4308 
4309 			dtp = (struct dcd_errstats *)un->un_errstats->ks_data;
4310 			kstat_named_init(&dtp->dcd_softerrs, "Soft Errors",
4311 			    KSTAT_DATA_UINT32);
4312 			kstat_named_init(&dtp->dcd_harderrs, "Hard Errors",
4313 			    KSTAT_DATA_UINT32);
4314 			kstat_named_init(&dtp->dcd_transerrs,
4315 			    "Transport Errors", KSTAT_DATA_UINT32);
4316 			kstat_named_init(&dtp->dcd_model, "Model",
4317 			    KSTAT_DATA_CHAR);
4318 			kstat_named_init(&dtp->dcd_revision, "Revision",
4319 			    KSTAT_DATA_CHAR);
4320 			kstat_named_init(&dtp->dcd_serial, "Serial No",
4321 			    KSTAT_DATA_CHAR);
4322 			kstat_named_init(&dtp->dcd_capacity, "Size",
4323 			    KSTAT_DATA_ULONGLONG);
4324 			kstat_named_init(&dtp->dcd_rq_media_err, "Media Error",
4325 			    KSTAT_DATA_UINT32);
4326 			kstat_named_init(&dtp->dcd_rq_ntrdy_err,
4327 			    "Device Not Ready", KSTAT_DATA_UINT32);
4328 			kstat_named_init(&dtp->dcd_rq_nodev_err, " No Device",
4329 			    KSTAT_DATA_UINT32);
4330 			kstat_named_init(&dtp->dcd_rq_recov_err, "Recoverable",
4331 			    KSTAT_DATA_UINT32);
4332 			kstat_named_init(&dtp->dcd_rq_illrq_err,
4333 			    "Illegal Request", KSTAT_DATA_UINT32);
4334 
4335 			un->un_errstats->ks_private = un;
4336 			un->un_errstats->ks_update = nulldev;
4337 			kstat_install(un->un_errstats);
4338 
4339 			(void) strncpy(&dtp->dcd_model.value.c[0],
4340 			    un->un_dcd->dcd_ident->dcd_model, 16);
4341 			(void) strncpy(&dtp->dcd_serial.value.c[0],
4342 			    un->un_dcd->dcd_ident->dcd_drvser, 16);
4343 			(void) strncpy(&dtp->dcd_revision.value.c[0],
4344 			    un->un_dcd->dcd_ident->dcd_fw, 8);
4345 			dtp->dcd_capacity.value.ui64 =
4346 			    (uint64_t)((uint64_t)un->un_diskcapacity *
4347 			    (uint64_t)un->un_lbasize);
4348 		}
4349 	}
4350 	return (0);
4351 }
4352 
4353 
4354 /*
4355  * This has been moved from DADA layer as this does not do anything other than
4356  * retrying the command when it is busy or it does not complete
4357  */
4358 int
4359 dcd_poll(struct dcd_pkt *pkt)
4360 {
4361 	int	busy_count, rval = -1, savef;
4362 	clock_t	savet;
4363 	void	(*savec)();
4364 
4365 
4366 	/*
4367 	 * Save old flags
4368 	 */
4369 	savef = pkt->pkt_flags;
4370 	savec = pkt->pkt_comp;
4371 	savet = pkt->pkt_time;
4372 
4373 	pkt->pkt_flags |= FLAG_NOINTR;
4374 
4375 
4376 	/*
4377 	 * Set the Pkt_comp to NULL
4378 	 */
4379 
4380 	pkt->pkt_comp = 0;
4381 
4382 	/*
4383 	 * Set the Pkt time for the polled command
4384 	 */
4385 	if (pkt->pkt_time == 0) {
4386 		pkt->pkt_time = DCD_POLL_TIMEOUT;
4387 	}
4388 
4389 
4390 	/* Now transport the command */
4391 	for (busy_count = 0; busy_count < dcd_poll_busycnt; busy_count++) {
4392 		if ((rval = dcd_transport(pkt)) == TRAN_ACCEPT) {
4393 			if (pkt->pkt_reason == CMD_INCOMPLETE &&
4394 			    pkt->pkt_state == 0) {
4395 				delay(100);
4396 			} else if (pkt->pkt_reason  == CMD_CMPLT) {
4397 				rval = 0;
4398 				break;
4399 			}
4400 		}
4401 		if (rval == TRAN_BUSY)  {
4402 			delay(100);
4403 			continue;
4404 		}
4405 	}
4406 
4407 	pkt->pkt_flags = savef;
4408 	pkt->pkt_comp = savec;
4409 	pkt->pkt_time = savet;
4410 	return (rval);
4411 }
4412 
4413 
4414 void
4415 dcd_translate(struct dadkio_status32 *statp, struct udcd_cmd *cmdp)
4416 {
4417 	if (cmdp->udcd_status_reg & STATUS_ATA_BUSY)
4418 		statp->status = DADKIO_STAT_NOT_READY;
4419 	else if (cmdp->udcd_status_reg & STATUS_ATA_DWF)
4420 		statp->status = DADKIO_STAT_HARDWARE_ERROR;
4421 	else if (cmdp->udcd_status_reg & STATUS_ATA_CORR)
4422 		statp->status = DADKIO_STAT_SOFT_ERROR;
4423 	else if (cmdp->udcd_status_reg & STATUS_ATA_ERR) {
4424 		/*
4425 		 * The error register is valid only when BSY and DRQ not set
4426 		 * Assumed that HBA has checked this before it gives the data
4427 		 */
4428 		if (cmdp->udcd_error_reg & ERR_AMNF)
4429 			statp->status = DADKIO_STAT_NOT_FORMATTED;
4430 		else if (cmdp->udcd_error_reg & ERR_TKONF)
4431 			statp->status = DADKIO_STAT_NOT_FORMATTED;
4432 		else if (cmdp->udcd_error_reg & ERR_ABORT)
4433 			statp->status = DADKIO_STAT_ILLEGAL_REQUEST;
4434 		else if (cmdp->udcd_error_reg & ERR_IDNF)
4435 			statp->status = DADKIO_STAT_NOT_FORMATTED;
4436 		else if (cmdp->udcd_error_reg & ERR_UNC)
4437 			statp->status = DADKIO_STAT_BUS_ERROR;
4438 		else if (cmdp->udcd_error_reg & ERR_BBK)
4439 			statp->status = DADKIO_STAT_MEDIUM_ERROR;
4440 	} else
4441 		statp->status = DADKIO_STAT_NO_ERROR;
4442 }
4443 
4444 static void
4445 dcd_flush_cache(struct dcd_disk *un)
4446 {
4447 	struct dcd_pkt *pkt;
4448 	int retry_count;
4449 
4450 
4451 	if ((pkt = dcd_init_pkt(ROUTE, NULL, NULL,
4452 	    (uint32_t)sizeof (struct dcd_cmd), 2, PP_LEN,
4453 	    PKT_CONSISTENT, NULL_FUNC, NULL)) == NULL) {
4454 		return;
4455 	}
4456 
4457 	makecommand(pkt, 0, ATA_FLUSH_CACHE, 0, ADD_LBA_MODE, 0,
4458 	    NO_DATA_XFER, 0);
4459 
4460 	/*
4461 	 * Send the command. There are chances it might fail on some
4462 	 * disks since it is not a mandatory command as per ata-4. Try
4463 	 * 3 times if it fails. The retry count has been randomly selected.
4464 	 * There is a need for retry since as per the spec FLUSH CACHE can fail
4465 	 * as a result of unrecoverable error encountered during execution
4466 	 * of writing data and subsequent command should continue flushing
4467 	 * cache.
4468 	 */
4469 	for (retry_count = 0; retry_count < 3; retry_count++) {
4470 		/*
4471 		 * Set the packet fields.
4472 		 */
4473 		pkt->pkt_comp = 0;
4474 		pkt->pkt_time = DCD_POLL_TIMEOUT;
4475 		pkt->pkt_flags |= FLAG_FORCENOINTR;
4476 		pkt->pkt_flags |= FLAG_NOINTR;
4477 		if (dcd_transport(pkt) == TRAN_ACCEPT) {
4478 			if (pkt->pkt_reason  == CMD_CMPLT) {
4479 				break;
4480 			}
4481 		}
4482 		/*
4483 		 * Note the wait time value of 100ms is same as in the
4484 		 * dcd_poll routine.
4485 		 */
4486 		drv_usecwait(1000000);
4487 	}
4488 	(void) dcd_destroy_pkt(pkt);
4489 }
4490 
4491 static int
4492 dcd_send_lb_rw_cmd(dev_info_t *devi, void *bufaddr,
4493     diskaddr_t start_block, size_t reqlength, uchar_t cmd)
4494 {
4495 	struct dcd_pkt *pkt;
4496 	struct buf *bp;
4497 	diskaddr_t real_addr = start_block;
4498 	size_t buffer_size = reqlength;
4499 	uchar_t command, tmp;
4500 	int i, rval = 0;
4501 	struct dcd_disk *un;
4502 
4503 	un = ddi_get_soft_state(dcd_state, ddi_get_instance(devi));
4504 	if (un == NULL)
4505 		return (ENXIO);
4506 
4507 	bp = dcd_alloc_consistent_buf(ROUTE, (struct buf *)NULL,
4508 	    buffer_size, B_READ, NULL_FUNC, NULL);
4509 	if (!bp) {
4510 		dcd_log(DCD_DEVINFO, dcd_label, CE_WARN,
4511 		    "no bp for disk label\n");
4512 		return (ENOMEM);
4513 	}
4514 
4515 	pkt = dcd_init_pkt(ROUTE, (struct dcd_pkt *)NULL,
4516 	    bp, (uint32_t)sizeof (struct dcd_cmd), 2, PP_LEN,
4517 	    PKT_CONSISTENT, NULL_FUNC, NULL);
4518 
4519 	if (!pkt) {
4520 		dcd_log(DCD_DEVINFO, dcd_label, CE_WARN,
4521 		    "no memory for disk label\n");
4522 		dcd_free_consistent_buf(bp);
4523 		return (ENOMEM);
4524 	}
4525 
4526 	if (cmd == TG_READ) {
4527 		bzero(bp->b_un.b_addr, buffer_size);
4528 		tmp = DATA_READ;
4529 	} else {
4530 		bcopy((caddr_t)bufaddr, bp->b_un.b_addr, buffer_size);
4531 		tmp = DATA_WRITE;
4532 	}
4533 
4534 	mutex_enter(DCD_MUTEX);
4535 	if ((un->un_dp->options & DMA_SUPPORTTED) == DMA_SUPPORTTED) {
4536 		if (cmd == TG_READ) {
4537 			command = ATA_READ_DMA;
4538 		} else {
4539 			command = ATA_WRITE_DMA;
4540 		}
4541 	} else {
4542 		if (cmd == TG_READ) {
4543 			if (un->un_dp->options & BLOCK_MODE)
4544 				command = ATA_READ_MULTIPLE;
4545 			else
4546 				command = ATA_READ;
4547 		} else {
4548 			if (un->un_dp->options & BLOCK_MODE)
4549 				command = ATA_READ_MULTIPLE;
4550 			else
4551 				command = ATA_WRITE;
4552 		}
4553 	}
4554 	mutex_exit(DCD_MUTEX);
4555 	(void) makecommand(pkt, 0, command, real_addr, ADD_LBA_MODE,
4556 	    buffer_size, tmp, 0);
4557 
4558 	for (i = 0; i < 3; i++) {
4559 		if (dcd_poll(pkt) || SCBP_C(pkt) != STATUS_GOOD ||
4560 		    (pkt->pkt_state & STATE_XFERRED_DATA) == 0 ||
4561 		    (pkt->pkt_resid != 0)) {
4562 			DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG,
4563 			    "Status %x, state %x, resid %lx\n",
4564 			    SCBP_C(pkt), pkt->pkt_state, pkt->pkt_resid);
4565 			rval = EIO;
4566 		} else {
4567 			break;
4568 		}
4569 	}
4570 
4571 	if (rval != 0) {
4572 		dcd_destroy_pkt(pkt);
4573 		dcd_free_consistent_buf(bp);
4574 		return (EIO);
4575 	}
4576 
4577 	if (cmd == TG_READ) {
4578 		bcopy(bp->b_un.b_addr, bufaddr, reqlength);
4579 		rval = 0;
4580 	}
4581 
4582 	dcd_destroy_pkt(pkt);
4583 	dcd_free_consistent_buf(bp);
4584 	return (rval);
4585 }
4586 
4587 static int dcd_compute_dk_capacity(struct dcd_device *devp,
4588     diskaddr_t *capacity)
4589 {
4590 	diskaddr_t cap;
4591 	diskaddr_t no_of_lbasec;
4592 
4593 	cap = devp->dcd_ident->dcd_fixcyls *
4594 	    devp->dcd_ident->dcd_heads *
4595 	    devp->dcd_ident->dcd_sectors;
4596 	no_of_lbasec = devp->dcd_ident->dcd_addrsec[1];
4597 	no_of_lbasec = no_of_lbasec << 16;
4598 	no_of_lbasec = no_of_lbasec | devp->dcd_ident->dcd_addrsec[0];
4599 
4600 	if (no_of_lbasec > cap) {
4601 		cap = no_of_lbasec;
4602 	}
4603 
4604 	if (cap != ((uint32_t)-1))
4605 		*capacity = cap;
4606 	else
4607 		return (EINVAL);
4608 	return (0);
4609 }
4610 
4611 /*ARGSUSED5*/
4612 static int
4613 dcd_lb_rdwr(dev_info_t *devi, uchar_t cmd, void *bufaddr,
4614     diskaddr_t start_block, size_t reqlength, void *tg_cookie)
4615 {
4616 	if (cmd != TG_READ && cmd != TG_WRITE)
4617 		return (EINVAL);
4618 
4619 	return (dcd_send_lb_rw_cmd(devi, bufaddr, start_block,
4620 	    reqlength, cmd));
4621 }
4622 
4623 static int
4624 dcd_lb_getphygeom(dev_info_t *devi, cmlb_geom_t *phygeomp)
4625 {
4626 	struct dcd_device *devp;
4627 	uint32_t no_of_lbasec, capacity, calculated_cylinders;
4628 
4629 	devp = ddi_get_driver_private(devi);
4630 
4631 	if ((devp->dcd_ident->dcd_config & ATAPI_DEVICE) == 0) {
4632 		if (devp->dcd_ident->dcd_config & ATANON_REMOVABLE) {
4633 			phygeomp->g_ncyl = devp->dcd_ident->dcd_fixcyls - 2;
4634 			phygeomp->g_acyl = 2;
4635 			phygeomp->g_nhead = devp->dcd_ident->dcd_heads;
4636 			phygeomp->g_nsect = devp->dcd_ident->dcd_sectors;
4637 
4638 			no_of_lbasec = devp->dcd_ident->dcd_addrsec[1];
4639 			no_of_lbasec = no_of_lbasec << 16;
4640 			no_of_lbasec = no_of_lbasec |
4641 			    devp->dcd_ident->dcd_addrsec[0];
4642 			capacity = devp->dcd_ident->dcd_fixcyls *
4643 			    devp->dcd_ident->dcd_heads *
4644 			    devp->dcd_ident->dcd_sectors;
4645 			if (no_of_lbasec > capacity) {
4646 				capacity = no_of_lbasec;
4647 				if (capacity > NUM_SECTORS_32G) {
4648 					/*
4649 					 * if the capacity is greater than 32G,
4650 					 * then 255 is the sectors per track.
4651 					 * This should be good until 128G disk
4652 					 * capacity, which is the current ATA-4
4653 					 * limitation.
4654 					 */
4655 					phygeomp->g_nsect = 255;
4656 				}
4657 
4658 				/*
4659 				 * If the disk capacity is >= 128GB then no. of
4660 				 * addressable sectors will be set to 0xfffffff
4661 				 * in the IDENTIFY info. In that case set the
4662 				 *  no. of pcyl to the Max. 16bit value.
4663 				 */
4664 
4665 				calculated_cylinders = (capacity) /
4666 				    (phygeomp->g_nhead * phygeomp->g_nsect);
4667 				if (calculated_cylinders >= USHRT_MAX) {
4668 					phygeomp->g_ncyl = USHRT_MAX - 2;
4669 				} else {
4670 					phygeomp->g_ncyl =
4671 					    calculated_cylinders - 2;
4672 				}
4673 			}
4674 
4675 			phygeomp->g_capacity = capacity;
4676 			phygeomp->g_intrlv = 0;
4677 			phygeomp->g_rpm = 5400;
4678 			phygeomp->g_secsize = devp->dcd_ident->dcd_secsiz;
4679 
4680 			return (0);
4681 		} else
4682 			return (ENOTSUP);
4683 	} else {
4684 		return (EINVAL);
4685 	}
4686 }
4687 
4688 
4689 /*ARGSUSED3*/
4690 static int
4691 dcd_lb_getinfo(dev_info_t *devi, int cmd,  void *arg, void *tg_cookie)
4692 {
4693 	struct dcd_disk *un;
4694 
4695 	un = ddi_get_soft_state(dcd_state, ddi_get_instance(devi));
4696 
4697 	if (un == NULL)
4698 		return (ENXIO);
4699 
4700 	switch (cmd) {
4701 	case TG_GETPHYGEOM:
4702 		return (dcd_lb_getphygeom(devi, (cmlb_geom_t *)arg));
4703 
4704 	case TG_GETVIRTGEOM:
4705 		return (-1);
4706 
4707 	case TG_GETCAPACITY:
4708 	case TG_GETBLOCKSIZE:
4709 		mutex_enter(DCD_MUTEX);
4710 		if (un->un_diskcapacity <= 0) {
4711 			mutex_exit(DCD_MUTEX);
4712 			dcd_log(DCD_DEVINFO, dcd_label, CE_WARN,
4713 				"invalid disk capacity\n");
4714 			return (EIO);
4715 		}
4716 		if (cmd == TG_GETCAPACITY)
4717 			*(diskaddr_t *)arg = un->un_diskcapacity;
4718 		else
4719 			*(uint32_t *)arg = DEV_BSIZE;
4720 
4721 		DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG, "capacity %x\n",
4722 		    un->un_diskcapacity);
4723 		mutex_exit(DCD_MUTEX);
4724 		return (0);
4725 
4726 	case TG_GETATTR:
4727 		mutex_enter(DCD_MUTEX);
4728 		*(tg_attribute_t *)arg = un->un_tgattribute;
4729 		DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG,
4730 		    "media_is_writable %x\n",
4731 		    un->un_tgattribute.media_is_writable);
4732 		mutex_exit(DCD_MUTEX);
4733 		return (0);
4734 	default:
4735 		return (ENOTTY);
4736 	}
4737 }
4738