xref: /illumos-gate/usr/src/uts/intel/io/dktp/disk/cmdk.c (revision cf327f5a61bfa78d5cf81410e439640e480f850b)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #include <sys/scsi/scsi.h>
28 #include <sys/dktp/cm.h>
29 #include <sys/dktp/quetypes.h>
30 #include <sys/dktp/queue.h>
31 #include <sys/dktp/fctypes.h>
32 #include <sys/dktp/flowctrl.h>
33 #include <sys/dktp/cmdev.h>
34 #include <sys/dkio.h>
35 #include <sys/dktp/tgdk.h>
36 #include <sys/dktp/dadk.h>
37 #include <sys/dktp/bbh.h>
38 #include <sys/dktp/altsctr.h>
39 #include <sys/dktp/cmdk.h>
40 
41 #include <sys/stat.h>
42 #include <sys/vtoc.h>
43 #include <sys/file.h>
44 #include <sys/dktp/dadkio.h>
45 #include <sys/aio_req.h>
46 
47 #include <sys/cmlb.h>
48 
49 /*
50  * Local Static Data
51  */
52 #ifdef CMDK_DEBUG
53 #define	DENT	0x0001
54 #define	DIO	0x0002
55 
56 static	int	cmdk_debug = DIO;
57 #endif
58 
59 #ifndef	TRUE
60 #define	TRUE	1
61 #endif
62 
63 #ifndef	FALSE
64 #define	FALSE	0
65 #endif
66 
67 /*
68  * NDKMAP is the base number for accessing the fdisk partitions.
69  * c?d?p0 --> cmdk@?,?:q
70  */
71 #define	PARTITION0_INDEX	(NDKMAP + 0)
72 
73 #define	DKTP_DATA		(dkp->dk_tgobjp)->tg_data
74 #define	DKTP_EXT		(dkp->dk_tgobjp)->tg_ext
75 
76 static void *cmdk_state;
77 
78 /*
79  * the cmdk_attach_mutex protects cmdk_max_instance in multi-threaded
80  * attach situations
81  */
82 static kmutex_t cmdk_attach_mutex;
83 static int cmdk_max_instance = 0;
84 
85 /*
86  * Panic dumpsys state
87  * There is only a single flag that is not mutex locked since
88  * the system is prevented from thread switching and cmdk_dump
89  * will only be called in a single threaded operation.
90  */
91 static int	cmdk_indump;
92 
93 /*
94  * Local Function Prototypes
95  */
96 static int cmdk_create_obj(dev_info_t *dip, struct cmdk *dkp);
97 static void cmdk_destroy_obj(dev_info_t *dip, struct cmdk *dkp);
98 static void cmdkmin(struct buf *bp);
99 static int cmdkrw(dev_t dev, struct uio *uio, int flag);
100 static int cmdkarw(dev_t dev, struct aio_req *aio, int flag);
101 
102 /*
103  * Bad Block Handling Functions Prototypes
104  */
105 static void cmdk_bbh_reopen(struct cmdk *dkp);
106 static opaque_t cmdk_bbh_gethandle(opaque_t bbh_data, struct buf *bp);
107 static bbh_cookie_t cmdk_bbh_htoc(opaque_t bbh_data, opaque_t handle);
108 static void cmdk_bbh_freehandle(opaque_t bbh_data, opaque_t handle);
109 static void cmdk_bbh_close(struct cmdk *dkp);
110 static void cmdk_bbh_setalts_idx(struct cmdk *dkp);
111 static int cmdk_bbh_bsearch(struct alts_ent *buf, int cnt, daddr32_t key);
112 
113 static struct bbh_objops cmdk_bbh_ops = {
114 	nulldev,
115 	nulldev,
116 	cmdk_bbh_gethandle,
117 	cmdk_bbh_htoc,
118 	cmdk_bbh_freehandle,
119 	0, 0
120 };
121 
122 static int cmdkopen(dev_t *dev_p, int flag, int otyp, cred_t *credp);
123 static int cmdkclose(dev_t dev, int flag, int otyp, cred_t *credp);
124 static int cmdkstrategy(struct buf *bp);
125 static int cmdkdump(dev_t dev, caddr_t addr, daddr_t blkno, int nblk);
126 static int cmdkioctl(dev_t, int, intptr_t, int, cred_t *, int *);
127 static int cmdkread(dev_t dev, struct uio *uio, cred_t *credp);
128 static int cmdkwrite(dev_t dev, struct uio *uio, cred_t *credp);
129 static int cmdk_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op,
130     int mod_flags, char *name, caddr_t valuep, int *lengthp);
131 static int cmdkaread(dev_t dev, struct aio_req *aio, cred_t *credp);
132 static int cmdkawrite(dev_t dev, struct aio_req *aio, cred_t *credp);
133 
134 /*
135  * Device driver ops vector
136  */
137 
138 static struct cb_ops cmdk_cb_ops = {
139 	cmdkopen, 		/* open */
140 	cmdkclose, 		/* close */
141 	cmdkstrategy, 		/* strategy */
142 	nodev, 			/* print */
143 	cmdkdump, 		/* dump */
144 	cmdkread, 		/* read */
145 	cmdkwrite, 		/* write */
146 	cmdkioctl, 		/* ioctl */
147 	nodev, 			/* devmap */
148 	nodev, 			/* mmap */
149 	nodev, 			/* segmap */
150 	nochpoll, 		/* poll */
151 	cmdk_prop_op, 		/* cb_prop_op */
152 	0, 			/* streamtab  */
153 	D_64BIT | D_MP | D_NEW,	/* Driver comaptibility flag */
154 	CB_REV,			/* cb_rev */
155 	cmdkaread,		/* async read */
156 	cmdkawrite		/* async write */
157 };
158 
159 static int cmdkinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg,
160     void **result);
161 static int cmdkprobe(dev_info_t *dip);
162 static int cmdkattach(dev_info_t *dip, ddi_attach_cmd_t cmd);
163 static int cmdkdetach(dev_info_t *dip, ddi_detach_cmd_t cmd);
164 
165 static void cmdk_setup_pm(dev_info_t *dip, struct cmdk *dkp);
166 static int cmdkresume(dev_info_t *dip);
167 static int cmdksuspend(dev_info_t *dip);
168 static int cmdkpower(dev_info_t *dip, int component, int level);
169 
170 struct dev_ops cmdk_ops = {
171 	DEVO_REV, 		/* devo_rev, */
172 	0, 			/* refcnt  */
173 	cmdkinfo,		/* info */
174 	nulldev, 		/* identify */
175 	cmdkprobe, 		/* probe */
176 	cmdkattach, 		/* attach */
177 	cmdkdetach,		/* detach */
178 	nodev, 			/* reset */
179 	&cmdk_cb_ops, 		/* driver operations */
180 	(struct bus_ops *)0,	/* bus operations */
181 	cmdkpower		/* power */
182 };
183 
184 /*
185  * This is the loadable module wrapper.
186  */
187 #include <sys/modctl.h>
188 
189 extern struct mod_ops mod_driverops;
190 
191 static struct modldrv modldrv = {
192 	&mod_driverops, 	/* Type of module. This one is a driver */
193 	"Common Direct Access Disk",
194 	&cmdk_ops, 				/* driver ops 		*/
195 };
196 
197 static struct modlinkage modlinkage = {
198 	MODREV_1, (void *)&modldrv, NULL
199 };
200 
201 /* Function prototypes for cmlb callbacks */
202 
203 static int cmdk_lb_rdwr(dev_info_t *dip, uchar_t cmd, void *bufaddr,
204     diskaddr_t start, size_t length, void *tg_cookie);
205 
206 static int cmdk_lb_getinfo(dev_info_t *dip, int cmd,  void *arg,
207     void *tg_cookie);
208 
209 static void cmdk_devid_setup(struct cmdk *dkp);
210 static int cmdk_devid_modser(struct cmdk *dkp);
211 static int cmdk_get_modser(struct cmdk *dkp, int ioccmd, char *buf, int len);
212 static int cmdk_devid_fabricate(struct cmdk *dkp);
213 static int cmdk_devid_read(struct cmdk *dkp);
214 
215 static cmlb_tg_ops_t cmdk_lb_ops = {
216 	TG_DK_OPS_VERSION_1,
217 	cmdk_lb_rdwr,
218 	cmdk_lb_getinfo
219 };
220 
221 static boolean_t
222 cmdk_isopen(struct cmdk *dkp, dev_t dev)
223 {
224 	int		part, otyp;
225 	ulong_t		partbit;
226 
227 	ASSERT(MUTEX_HELD((&dkp->dk_mutex)));
228 
229 	part = CMDKPART(dev);
230 	partbit = 1 << part;
231 
232 	/* account for close */
233 	if (dkp->dk_open_lyr[part] != 0)
234 		return (B_TRUE);
235 	for (otyp = 0; otyp < OTYPCNT; otyp++)
236 		if (dkp->dk_open_reg[otyp] & partbit)
237 			return (B_TRUE);
238 	return (B_FALSE);
239 }
240 
241 int
242 _init(void)
243 {
244 	int 	rval;
245 
246 	if (rval = ddi_soft_state_init(&cmdk_state, sizeof (struct cmdk), 7))
247 		return (rval);
248 
249 	mutex_init(&cmdk_attach_mutex, NULL, MUTEX_DRIVER, NULL);
250 	if ((rval = mod_install(&modlinkage)) != 0) {
251 		mutex_destroy(&cmdk_attach_mutex);
252 		ddi_soft_state_fini(&cmdk_state);
253 	}
254 	return (rval);
255 }
256 
257 int
258 _fini(void)
259 {
260 	return (EBUSY);
261 
262 	/*
263 	 * This has been commented out until cmdk is a true
264 	 * unloadable module. Right now x86's are panicking on
265 	 * a diskless reconfig boot.
266 	 */
267 
268 #if 0 	/* bugid 1186679 */
269 	int	rval;
270 
271 	rval = mod_remove(&modlinkage);
272 	if (rval != 0)
273 		return (rval);
274 
275 	mutex_destroy(&cmdk_attach_mutex);
276 	ddi_soft_state_fini(&cmdk_state);
277 
278 	return (0);
279 #endif
280 }
281 
282 int
283 _info(struct modinfo *modinfop)
284 {
285 	return (mod_info(&modlinkage, modinfop));
286 }
287 
288 /*
289  * Autoconfiguration Routines
290  */
291 static int
292 cmdkprobe(dev_info_t *dip)
293 {
294 	int 	instance;
295 	int	status;
296 	struct	cmdk	*dkp;
297 
298 	instance = ddi_get_instance(dip);
299 
300 	if (ddi_get_soft_state(cmdk_state, instance))
301 		return (DDI_PROBE_PARTIAL);
302 
303 	if ((ddi_soft_state_zalloc(cmdk_state, instance) != DDI_SUCCESS) ||
304 	    ((dkp = ddi_get_soft_state(cmdk_state, instance)) == NULL))
305 		return (DDI_PROBE_PARTIAL);
306 
307 	mutex_init(&dkp->dk_mutex, NULL, MUTEX_DRIVER, NULL);
308 	rw_init(&dkp->dk_bbh_mutex, NULL, RW_DRIVER, NULL);
309 	dkp->dk_dip = dip;
310 	mutex_enter(&dkp->dk_mutex);
311 
312 	dkp->dk_dev = makedevice(ddi_driver_major(dip),
313 	    ddi_get_instance(dip) << CMDK_UNITSHF);
314 
315 	/* linkage to dadk and strategy */
316 	if (cmdk_create_obj(dip, dkp) != DDI_SUCCESS) {
317 		mutex_exit(&dkp->dk_mutex);
318 		mutex_destroy(&dkp->dk_mutex);
319 		rw_destroy(&dkp->dk_bbh_mutex);
320 		ddi_soft_state_free(cmdk_state, instance);
321 		return (DDI_PROBE_PARTIAL);
322 	}
323 
324 	status = dadk_probe(DKTP_DATA, KM_NOSLEEP);
325 	if (status != DDI_PROBE_SUCCESS) {
326 		cmdk_destroy_obj(dip, dkp);	/* dadk/strategy linkage  */
327 		mutex_exit(&dkp->dk_mutex);
328 		mutex_destroy(&dkp->dk_mutex);
329 		rw_destroy(&dkp->dk_bbh_mutex);
330 		ddi_soft_state_free(cmdk_state, instance);
331 		return (status);
332 	}
333 
334 	mutex_exit(&dkp->dk_mutex);
335 #ifdef CMDK_DEBUG
336 	if (cmdk_debug & DENT)
337 		PRF("cmdkprobe: instance= %d name= `%s`\n",
338 		    instance, ddi_get_name_addr(dip));
339 #endif
340 	return (status);
341 }
342 
343 static int
344 cmdkattach(dev_info_t *dip, ddi_attach_cmd_t cmd)
345 {
346 	int 		instance;
347 	struct		cmdk *dkp;
348 	char 		*node_type;
349 
350 	switch (cmd) {
351 	case DDI_ATTACH:
352 		break;
353 	case DDI_RESUME:
354 		return (cmdkresume(dip));
355 	default:
356 		return (DDI_FAILURE);
357 	}
358 
359 	instance = ddi_get_instance(dip);
360 	if (!(dkp = ddi_get_soft_state(cmdk_state, instance)))
361 		return (DDI_FAILURE);
362 
363 	dkp->dk_pm_level = CMDK_SPINDLE_UNINIT;
364 	mutex_init(&dkp->dk_mutex, NULL, MUTEX_DRIVER, NULL);
365 
366 	mutex_enter(&dkp->dk_mutex);
367 
368 	/* dadk_attach is an empty function that only returns SUCCESS */
369 	(void) dadk_attach(DKTP_DATA);
370 
371 	node_type = (DKTP_EXT->tg_nodetype);
372 
373 	/*
374 	 * this open allows cmlb to read the device
375 	 * and determine the label types
376 	 * so that cmlb can create minor nodes for device
377 	 */
378 
379 	/* open the target disk	 */
380 	if (dadk_open(DKTP_DATA, 0) != DDI_SUCCESS)
381 		goto fail2;
382 
383 #ifdef _ILP32
384 	{
385 		struct  tgdk_geom phyg;
386 		(void) dadk_getphygeom(DKTP_DATA, &phyg);
387 		if ((phyg.g_cap - 1) > DK_MAX_BLOCKS) {
388 			(void) dadk_close(DKTP_DATA);
389 			goto fail2;
390 		}
391 	}
392 #endif
393 
394 
395 	/* mark as having opened target */
396 	dkp->dk_flag |= CMDK_TGDK_OPEN;
397 
398 	cmlb_alloc_handle((cmlb_handle_t *)&dkp->dk_cmlbhandle);
399 
400 	if (cmlb_attach(dip,
401 	    &cmdk_lb_ops,
402 	    DTYPE_DIRECT,		/* device_type */
403 	    0,				/* removable */
404 	    0,				/* hot pluggable XXX */
405 	    node_type,
406 	    CMLB_CREATE_ALTSLICE_VTOC_16_DTYPE_DIRECT,	/* alter_behaviour */
407 	    dkp->dk_cmlbhandle,
408 	    0) != 0)
409 		goto fail1;
410 
411 	/* Calling validate will create minor nodes according to disk label */
412 	(void) cmlb_validate(dkp->dk_cmlbhandle, 0, 0);
413 
414 	/* set bbh (Bad Block Handling) */
415 	cmdk_bbh_reopen(dkp);
416 
417 	/* setup devid string */
418 	cmdk_devid_setup(dkp);
419 
420 	mutex_enter(&cmdk_attach_mutex);
421 	if (instance > cmdk_max_instance)
422 		cmdk_max_instance = instance;
423 	mutex_exit(&cmdk_attach_mutex);
424 
425 	mutex_exit(&dkp->dk_mutex);
426 
427 	/*
428 	 * Add a zero-length attribute to tell the world we support
429 	 * kernel ioctls (for layered drivers)
430 	 */
431 	(void) ddi_prop_create(DDI_DEV_T_NONE, dip, DDI_PROP_CANSLEEP,
432 	    DDI_KERNEL_IOCTL, NULL, 0);
433 	ddi_report_dev(dip);
434 
435 	/*
436 	 * Initialize power management
437 	 */
438 	mutex_init(&dkp->dk_pm_mutex, NULL, MUTEX_DRIVER, NULL);
439 	cv_init(&dkp->dk_suspend_cv,   NULL, CV_DRIVER, NULL);
440 	cmdk_setup_pm(dip, dkp);
441 
442 	return (DDI_SUCCESS);
443 
444 fail1:
445 	cmlb_free_handle(&dkp->dk_cmlbhandle);
446 	(void) dadk_close(DKTP_DATA);
447 fail2:
448 	cmdk_destroy_obj(dip, dkp);
449 	rw_destroy(&dkp->dk_bbh_mutex);
450 	mutex_exit(&dkp->dk_mutex);
451 	mutex_destroy(&dkp->dk_mutex);
452 	ddi_soft_state_free(cmdk_state, instance);
453 	return (DDI_FAILURE);
454 }
455 
456 
457 static int
458 cmdkdetach(dev_info_t *dip, ddi_detach_cmd_t cmd)
459 {
460 	struct cmdk	*dkp;
461 	int 		instance;
462 	int		max_instance;
463 
464 	switch (cmd) {
465 	case DDI_DETACH:
466 		/* return (DDI_FAILURE); */
467 		break;
468 	case DDI_SUSPEND:
469 		return (cmdksuspend(dip));
470 	default:
471 #ifdef CMDK_DEBUG
472 		if (cmdk_debug & DIO) {
473 			PRF("cmdkdetach: cmd = %d unknown\n", cmd);
474 		}
475 #endif
476 		return (DDI_FAILURE);
477 	}
478 
479 	mutex_enter(&cmdk_attach_mutex);
480 	max_instance = cmdk_max_instance;
481 	mutex_exit(&cmdk_attach_mutex);
482 
483 	/* check if any instance of driver is open */
484 	for (instance = 0; instance < max_instance; instance++) {
485 		dkp = ddi_get_soft_state(cmdk_state, instance);
486 		if (!dkp)
487 			continue;
488 		if (dkp->dk_flag & CMDK_OPEN)
489 			return (DDI_FAILURE);
490 	}
491 
492 	instance = ddi_get_instance(dip);
493 	if (!(dkp = ddi_get_soft_state(cmdk_state, instance)))
494 		return (DDI_SUCCESS);
495 
496 	mutex_enter(&dkp->dk_mutex);
497 
498 	/*
499 	 * The cmdk_part_info call at the end of cmdkattach may have
500 	 * caused cmdk_reopen to do a TGDK_OPEN, make sure we close on
501 	 * detach for case when cmdkopen/cmdkclose never occurs.
502 	 */
503 	if (dkp->dk_flag & CMDK_TGDK_OPEN) {
504 		dkp->dk_flag &= ~CMDK_TGDK_OPEN;
505 		(void) dadk_close(DKTP_DATA);
506 	}
507 
508 	cmlb_detach(dkp->dk_cmlbhandle, 0);
509 	cmlb_free_handle(&dkp->dk_cmlbhandle);
510 	ddi_prop_remove_all(dip);
511 
512 	cmdk_destroy_obj(dip, dkp);	/* dadk/strategy linkage  */
513 	mutex_exit(&dkp->dk_mutex);
514 	mutex_destroy(&dkp->dk_mutex);
515 	rw_destroy(&dkp->dk_bbh_mutex);
516 	mutex_destroy(&dkp->dk_pm_mutex);
517 	cv_destroy(&dkp->dk_suspend_cv);
518 	ddi_soft_state_free(cmdk_state, instance);
519 
520 	return (DDI_SUCCESS);
521 }
522 
523 static int
524 cmdkinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result)
525 {
526 	dev_t		dev = (dev_t)arg;
527 	int 		instance;
528 	struct	cmdk	*dkp;
529 
530 #ifdef lint
531 	dip = dip;	/* no one ever uses this */
532 #endif
533 #ifdef CMDK_DEBUG
534 	if (cmdk_debug & DENT)
535 		PRF("cmdkinfo: call\n");
536 #endif
537 	instance = CMDKUNIT(dev);
538 
539 	switch (infocmd) {
540 		case DDI_INFO_DEVT2DEVINFO:
541 			if (!(dkp = ddi_get_soft_state(cmdk_state, instance)))
542 				return (DDI_FAILURE);
543 			*result = (void *) dkp->dk_dip;
544 			break;
545 		case DDI_INFO_DEVT2INSTANCE:
546 			*result = (void *)(intptr_t)instance;
547 			break;
548 		default:
549 			return (DDI_FAILURE);
550 	}
551 	return (DDI_SUCCESS);
552 }
553 
554 /*
555  * Initialize the power management components
556  */
557 static void
558 cmdk_setup_pm(dev_info_t *dip, struct cmdk *dkp)
559 {
560 	char *pm_comp[] = { "NAME=cmdk", "0=off", "1=on", NULL };
561 
562 	/*
563 	 * Since the cmdk device does not the 'reg' property,
564 	 * cpr will not call its DDI_SUSPEND/DDI_RESUME entries.
565 	 * The following code is to tell cpr that this device
566 	 * DOES need to be suspended and resumed.
567 	 */
568 	(void) ddi_prop_update_string(DDI_DEV_T_NONE, dip,
569 	    "pm-hardware-state", "needs-suspend-resume");
570 
571 	if (ddi_prop_update_string_array(DDI_DEV_T_NONE, dip,
572 	    "pm-components", pm_comp, 3) == DDI_PROP_SUCCESS) {
573 		if (pm_raise_power(dip, 0, CMDK_SPINDLE_ON) == DDI_SUCCESS) {
574 			mutex_enter(&dkp->dk_pm_mutex);
575 			dkp->dk_pm_level = CMDK_SPINDLE_ON;
576 			dkp->dk_pm_is_enabled = 1;
577 			mutex_exit(&dkp->dk_pm_mutex);
578 		} else {
579 			mutex_enter(&dkp->dk_pm_mutex);
580 			dkp->dk_pm_level = CMDK_SPINDLE_OFF;
581 			dkp->dk_pm_is_enabled = 0;
582 			mutex_exit(&dkp->dk_pm_mutex);
583 		}
584 	} else {
585 		mutex_enter(&dkp->dk_pm_mutex);
586 		dkp->dk_pm_level = CMDK_SPINDLE_UNINIT;
587 		dkp->dk_pm_is_enabled = 0;
588 		mutex_exit(&dkp->dk_pm_mutex);
589 	}
590 }
591 
592 /*
593  * suspend routine, it will be run when get the command
594  * DDI_SUSPEND at detach(9E) from system power management
595  */
596 static int
597 cmdksuspend(dev_info_t *dip)
598 {
599 	struct cmdk	*dkp;
600 	int		instance;
601 	clock_t		count = 0;
602 
603 	instance = ddi_get_instance(dip);
604 	if (!(dkp = ddi_get_soft_state(cmdk_state, instance)))
605 		return (DDI_FAILURE);
606 	mutex_enter(&dkp->dk_mutex);
607 	if (dkp->dk_flag & CMDK_SUSPEND) {
608 		mutex_exit(&dkp->dk_mutex);
609 		return (DDI_SUCCESS);
610 	}
611 	dkp->dk_flag |= CMDK_SUSPEND;
612 
613 	/* need to wait a while */
614 	while (dadk_getcmds(DKTP_DATA) != 0) {
615 		delay(drv_usectohz(1000000));
616 		if (count > 60) {
617 			dkp->dk_flag &= ~CMDK_SUSPEND;
618 			cv_broadcast(&dkp->dk_suspend_cv);
619 			mutex_exit(&dkp->dk_mutex);
620 			return (DDI_FAILURE);
621 		}
622 		count++;
623 	}
624 	mutex_exit(&dkp->dk_mutex);
625 	return (DDI_SUCCESS);
626 }
627 
628 /*
629  * resume routine, it will be run when get the command
630  * DDI_RESUME at attach(9E) from system power management
631  */
632 static int
633 cmdkresume(dev_info_t *dip)
634 {
635 	struct cmdk	*dkp;
636 	int		instance;
637 
638 	instance = ddi_get_instance(dip);
639 	if (!(dkp = ddi_get_soft_state(cmdk_state, instance)))
640 		return (DDI_FAILURE);
641 	mutex_enter(&dkp->dk_mutex);
642 	if (!(dkp->dk_flag & CMDK_SUSPEND)) {
643 		mutex_exit(&dkp->dk_mutex);
644 		return (DDI_FAILURE);
645 	}
646 	dkp->dk_pm_level = CMDK_SPINDLE_ON;
647 	dkp->dk_flag &= ~CMDK_SUSPEND;
648 	cv_broadcast(&dkp->dk_suspend_cv);
649 	mutex_exit(&dkp->dk_mutex);
650 	return (DDI_SUCCESS);
651 
652 }
653 
654 /*
655  * power management entry point, it was used to
656  * change power management component.
657  * Actually, the real hard drive suspend/resume
658  * was handled in ata, so this function is not
659  * doing any real work other than verifying that
660  * the disk is idle.
661  */
662 static int
663 cmdkpower(dev_info_t *dip, int component, int level)
664 {
665 	struct cmdk	*dkp;
666 	int		instance;
667 
668 	instance = ddi_get_instance(dip);
669 	if (!(dkp = ddi_get_soft_state(cmdk_state, instance)) ||
670 	    component != 0 || level > CMDK_SPINDLE_ON ||
671 	    level < CMDK_SPINDLE_OFF) {
672 		return (DDI_FAILURE);
673 	}
674 
675 	mutex_enter(&dkp->dk_pm_mutex);
676 	if (dkp->dk_pm_is_enabled && dkp->dk_pm_level == level) {
677 		mutex_exit(&dkp->dk_pm_mutex);
678 		return (DDI_SUCCESS);
679 	}
680 	mutex_exit(&dkp->dk_pm_mutex);
681 
682 	if ((level == CMDK_SPINDLE_OFF) &&
683 	    (dadk_getcmds(DKTP_DATA) != 0)) {
684 		return (DDI_FAILURE);
685 	}
686 
687 	mutex_enter(&dkp->dk_pm_mutex);
688 	dkp->dk_pm_level = level;
689 	mutex_exit(&dkp->dk_pm_mutex);
690 	return (DDI_SUCCESS);
691 }
692 
693 static int
694 cmdk_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, int mod_flags,
695     char *name, caddr_t valuep, int *lengthp)
696 {
697 	struct	cmdk	*dkp;
698 
699 #ifdef CMDK_DEBUG
700 	if (cmdk_debug & DENT)
701 		PRF("cmdk_prop_op: call\n");
702 #endif
703 
704 	dkp = ddi_get_soft_state(cmdk_state, ddi_get_instance(dip));
705 	if (dkp == NULL)
706 		return (ddi_prop_op(dev, dip, prop_op, mod_flags,
707 		    name, valuep, lengthp));
708 
709 	return (cmlb_prop_op(dkp->dk_cmlbhandle,
710 	    dev, dip, prop_op, mod_flags, name, valuep, lengthp,
711 	    CMDKPART(dev), NULL));
712 }
713 
714 /*
715  * dump routine
716  */
717 static int
718 cmdkdump(dev_t dev, caddr_t addr, daddr_t blkno, int nblk)
719 {
720 	int 		instance;
721 	struct	cmdk	*dkp;
722 	diskaddr_t	p_lblksrt;
723 	diskaddr_t	p_lblkcnt;
724 	struct	buf	local;
725 	struct	buf	*bp;
726 
727 #ifdef CMDK_DEBUG
728 	if (cmdk_debug & DENT)
729 		PRF("cmdkdump: call\n");
730 #endif
731 	instance = CMDKUNIT(dev);
732 	if (!(dkp = ddi_get_soft_state(cmdk_state, instance)) || (blkno < 0))
733 		return (ENXIO);
734 
735 	if (cmlb_partinfo(
736 	    dkp->dk_cmlbhandle,
737 	    CMDKPART(dev),
738 	    &p_lblkcnt,
739 	    &p_lblksrt,
740 	    NULL,
741 	    NULL,
742 	    0)) {
743 		return (ENXIO);
744 	}
745 
746 	if ((blkno+nblk) > p_lblkcnt)
747 		return (EINVAL);
748 
749 	cmdk_indump = 1;	/* Tell disk targets we are panic dumpping */
750 
751 	bp = &local;
752 	bzero(bp, sizeof (*bp));
753 	bp->b_flags = B_BUSY;
754 	bp->b_un.b_addr = addr;
755 	bp->b_bcount = nblk << SCTRSHFT;
756 	SET_BP_SEC(bp, ((ulong_t)(p_lblksrt + blkno)));
757 
758 	(void) dadk_dump(DKTP_DATA, bp);
759 	return (bp->b_error);
760 }
761 
762 /*
763  * Copy in the dadkio_rwcmd according to the user's data model.  If needed,
764  * convert it for our internal use.
765  */
766 static int
767 rwcmd_copyin(struct dadkio_rwcmd *rwcmdp, caddr_t inaddr, int flag)
768 {
769 	switch (ddi_model_convert_from(flag)) {
770 		case DDI_MODEL_ILP32: {
771 			struct dadkio_rwcmd32 cmd32;
772 
773 			if (ddi_copyin(inaddr, &cmd32,
774 			    sizeof (struct dadkio_rwcmd32), flag)) {
775 				return (EFAULT);
776 			}
777 
778 			rwcmdp->cmd = cmd32.cmd;
779 			rwcmdp->flags = cmd32.flags;
780 			rwcmdp->blkaddr = (blkaddr_t)cmd32.blkaddr;
781 			rwcmdp->buflen = cmd32.buflen;
782 			rwcmdp->bufaddr = (caddr_t)(intptr_t)cmd32.bufaddr;
783 			/*
784 			 * Note: we do not convert the 'status' field,
785 			 * as it should not contain valid data at this
786 			 * point.
787 			 */
788 			bzero(&rwcmdp->status, sizeof (rwcmdp->status));
789 			break;
790 		}
791 		case DDI_MODEL_NONE: {
792 			if (ddi_copyin(inaddr, rwcmdp,
793 			    sizeof (struct dadkio_rwcmd), flag)) {
794 				return (EFAULT);
795 			}
796 		}
797 	}
798 	return (0);
799 }
800 
801 /*
802  * If necessary, convert the internal rwcmdp and status to the appropriate
803  * data model and copy it out to the user.
804  */
805 static int
806 rwcmd_copyout(struct dadkio_rwcmd *rwcmdp, caddr_t outaddr, int flag)
807 {
808 	switch (ddi_model_convert_from(flag)) {
809 		case DDI_MODEL_ILP32: {
810 			struct dadkio_rwcmd32 cmd32;
811 
812 			cmd32.cmd = rwcmdp->cmd;
813 			cmd32.flags = rwcmdp->flags;
814 			cmd32.blkaddr = rwcmdp->blkaddr;
815 			cmd32.buflen = rwcmdp->buflen;
816 			ASSERT64(((uintptr_t)rwcmdp->bufaddr >> 32) == 0);
817 			cmd32.bufaddr = (caddr32_t)(uintptr_t)rwcmdp->bufaddr;
818 
819 			cmd32.status.status = rwcmdp->status.status;
820 			cmd32.status.resid = rwcmdp->status.resid;
821 			cmd32.status.failed_blk_is_valid =
822 			    rwcmdp->status.failed_blk_is_valid;
823 			cmd32.status.failed_blk = rwcmdp->status.failed_blk;
824 			cmd32.status.fru_code_is_valid =
825 			    rwcmdp->status.fru_code_is_valid;
826 			cmd32.status.fru_code = rwcmdp->status.fru_code;
827 
828 			bcopy(rwcmdp->status.add_error_info,
829 			    cmd32.status.add_error_info, DADKIO_ERROR_INFO_LEN);
830 
831 			if (ddi_copyout(&cmd32, outaddr,
832 			    sizeof (struct dadkio_rwcmd32), flag))
833 				return (EFAULT);
834 			break;
835 		}
836 		case DDI_MODEL_NONE: {
837 			if (ddi_copyout(rwcmdp, outaddr,
838 			    sizeof (struct dadkio_rwcmd), flag))
839 			return (EFAULT);
840 		}
841 	}
842 	return (0);
843 }
844 
845 /*
846  * ioctl routine
847  */
848 static int
849 cmdkioctl(dev_t dev, int cmd, intptr_t arg, int flag, cred_t *credp, int *rvalp)
850 {
851 	int 		instance;
852 	struct scsi_device *devp;
853 	struct cmdk	*dkp;
854 	char 		data[NBPSCTR];
855 
856 	instance = CMDKUNIT(dev);
857 	if (!(dkp = ddi_get_soft_state(cmdk_state, instance)))
858 		return (ENXIO);
859 
860 	mutex_enter(&dkp->dk_mutex);
861 	while (dkp->dk_flag & CMDK_SUSPEND) {
862 		cv_wait(&dkp->dk_suspend_cv, &dkp->dk_mutex);
863 	}
864 	mutex_exit(&dkp->dk_mutex);
865 
866 	bzero(data, sizeof (data));
867 
868 	switch (cmd) {
869 
870 	case DKIOCGMEDIAINFO: {
871 		struct dk_minfo	media_info;
872 		struct  tgdk_geom phyg;
873 
874 		/* dadk_getphygeom always returns success */
875 		(void) dadk_getphygeom(DKTP_DATA, &phyg);
876 
877 		media_info.dki_lbsize = phyg.g_secsiz;
878 		media_info.dki_capacity = phyg.g_cap;
879 		media_info.dki_media_type = DK_FIXED_DISK;
880 
881 		if (ddi_copyout(&media_info, (void *)arg,
882 		    sizeof (struct dk_minfo), flag)) {
883 			return (EFAULT);
884 		} else {
885 			return (0);
886 		}
887 	}
888 
889 	case DKIOCINFO: {
890 		struct dk_cinfo *info = (struct dk_cinfo *)data;
891 
892 		/* controller information */
893 		info->dki_ctype = (DKTP_EXT->tg_ctype);
894 		info->dki_cnum = ddi_get_instance(ddi_get_parent(dkp->dk_dip));
895 		(void) strcpy(info->dki_cname,
896 		    ddi_get_name(ddi_get_parent(dkp->dk_dip)));
897 
898 		/* Unit Information */
899 		info->dki_unit = ddi_get_instance(dkp->dk_dip);
900 		devp = ddi_get_driver_private(dkp->dk_dip);
901 		info->dki_slave = (CMDEV_TARG(devp)<<3) | CMDEV_LUN(devp);
902 		(void) strcpy(info->dki_dname, ddi_driver_name(dkp->dk_dip));
903 		info->dki_flags = DKI_FMTVOL;
904 		info->dki_partition = CMDKPART(dev);
905 
906 		info->dki_maxtransfer = maxphys / DEV_BSIZE;
907 		info->dki_addr = 1;
908 		info->dki_space = 0;
909 		info->dki_prio = 0;
910 		info->dki_vec = 0;
911 
912 		if (ddi_copyout(data, (void *)arg, sizeof (*info), flag))
913 			return (EFAULT);
914 		else
915 			return (0);
916 	}
917 
918 	case DKIOCSTATE: {
919 		int	state;
920 		int	rval;
921 		diskaddr_t	p_lblksrt;
922 		diskaddr_t	p_lblkcnt;
923 
924 		if (ddi_copyin((void *)arg, &state, sizeof (int), flag))
925 			return (EFAULT);
926 
927 		/* dadk_check_media blocks until state changes */
928 		if (rval = dadk_check_media(DKTP_DATA, &state))
929 			return (rval);
930 
931 		if (state == DKIO_INSERTED) {
932 
933 			if (cmlb_validate(dkp->dk_cmlbhandle, 0, 0) != 0)
934 				return (ENXIO);
935 
936 			if (cmlb_partinfo(dkp->dk_cmlbhandle, CMDKPART(dev),
937 			    &p_lblkcnt, &p_lblksrt, NULL, NULL, 0))
938 				return (ENXIO);
939 
940 			if (p_lblkcnt <= 0)
941 				return (ENXIO);
942 		}
943 
944 		if (ddi_copyout(&state, (caddr_t)arg, sizeof (int), flag))
945 			return (EFAULT);
946 
947 		return (0);
948 	}
949 
950 	/*
951 	 * is media removable?
952 	 */
953 	case DKIOCREMOVABLE: {
954 		int i;
955 
956 		i = (DKTP_EXT->tg_rmb) ? 1 : 0;
957 
958 		if (ddi_copyout(&i, (caddr_t)arg, sizeof (int), flag))
959 			return (EFAULT);
960 
961 		return (0);
962 	}
963 
964 	case DKIOCADDBAD:
965 		/*
966 		 * This is not an update mechanism to add bad blocks
967 		 * to the bad block structures stored on disk.
968 		 *
969 		 * addbadsec(1M) will update the bad block data on disk
970 		 * and use this ioctl to force the driver to re-initialize
971 		 * the list of bad blocks in the driver.
972 		 */
973 
974 		/* start BBH */
975 		cmdk_bbh_reopen(dkp);
976 		return (0);
977 
978 	case DKIOCG_PHYGEOM:
979 	case DKIOCG_VIRTGEOM:
980 	case DKIOCGGEOM:
981 	case DKIOCSGEOM:
982 	case DKIOCGAPART:
983 	case DKIOCSAPART:
984 	case DKIOCGVTOC:
985 	case DKIOCSVTOC:
986 	case DKIOCPARTINFO:
987 	case DKIOCGEXTVTOC:
988 	case DKIOCSEXTVTOC:
989 	case DKIOCEXTPARTINFO:
990 	case DKIOCGMBOOT:
991 	case DKIOCSMBOOT:
992 	case DKIOCGETEFI:
993 	case DKIOCSETEFI:
994 	case DKIOCPARTITION:
995 	{
996 		int rc;
997 
998 		rc = cmlb_ioctl(dkp->dk_cmlbhandle, dev, cmd, arg, flag,
999 		    credp, rvalp, 0);
1000 		if (cmd == DKIOCSVTOC)
1001 			cmdk_devid_setup(dkp);
1002 		return (rc);
1003 	}
1004 
1005 	case DIOCTL_RWCMD: {
1006 		struct	dadkio_rwcmd *rwcmdp;
1007 		int	status;
1008 
1009 		rwcmdp = kmem_alloc(sizeof (struct dadkio_rwcmd), KM_SLEEP);
1010 
1011 		status = rwcmd_copyin(rwcmdp, (caddr_t)arg, flag);
1012 
1013 		if (status == 0) {
1014 			bzero(&(rwcmdp->status), sizeof (struct dadkio_status));
1015 			status = dadk_ioctl(DKTP_DATA,
1016 			    dev,
1017 			    cmd,
1018 			    (uintptr_t)rwcmdp,
1019 			    flag,
1020 			    credp,
1021 			    rvalp);
1022 		}
1023 		if (status == 0)
1024 			status = rwcmd_copyout(rwcmdp, (caddr_t)arg, flag);
1025 
1026 		kmem_free(rwcmdp, sizeof (struct dadkio_rwcmd));
1027 		return (status);
1028 	}
1029 
1030 	default:
1031 		return (dadk_ioctl(DKTP_DATA,
1032 		    dev,
1033 		    cmd,
1034 		    arg,
1035 		    flag,
1036 		    credp,
1037 		    rvalp));
1038 	}
1039 }
1040 
1041 /*ARGSUSED1*/
1042 static int
1043 cmdkclose(dev_t dev, int flag, int otyp, cred_t *credp)
1044 {
1045 	int		part;
1046 	ulong_t		partbit;
1047 	int 		instance;
1048 	struct cmdk	*dkp;
1049 	int		lastclose = 1;
1050 	int		i;
1051 
1052 	instance = CMDKUNIT(dev);
1053 	if (!(dkp = ddi_get_soft_state(cmdk_state, instance)) ||
1054 	    (otyp >= OTYPCNT))
1055 		return (ENXIO);
1056 
1057 	mutex_enter(&dkp->dk_mutex);
1058 
1059 	/* check if device has been opened */
1060 	ASSERT(cmdk_isopen(dkp, dev));
1061 	if (!(dkp->dk_flag & CMDK_OPEN)) {
1062 		mutex_exit(&dkp->dk_mutex);
1063 		return (ENXIO);
1064 	}
1065 
1066 	while (dkp->dk_flag & CMDK_SUSPEND) {
1067 		cv_wait(&dkp->dk_suspend_cv, &dkp->dk_mutex);
1068 	}
1069 
1070 	part = CMDKPART(dev);
1071 	partbit = 1 << part;
1072 
1073 	/* account for close */
1074 	if (otyp == OTYP_LYR) {
1075 		ASSERT(dkp->dk_open_lyr[part] > 0);
1076 		if (dkp->dk_open_lyr[part])
1077 			dkp->dk_open_lyr[part]--;
1078 	} else {
1079 		ASSERT((dkp->dk_open_reg[otyp] & partbit) != 0);
1080 		dkp->dk_open_reg[otyp] &= ~partbit;
1081 	}
1082 	dkp->dk_open_exl &= ~partbit;
1083 
1084 	for (i = 0; i < CMDK_MAXPART; i++)
1085 		if (dkp->dk_open_lyr[i] != 0) {
1086 			lastclose = 0;
1087 			break;
1088 		}
1089 
1090 	if (lastclose)
1091 		for (i = 0; i < OTYPCNT; i++)
1092 			if (dkp->dk_open_reg[i] != 0) {
1093 				lastclose = 0;
1094 				break;
1095 			}
1096 
1097 	mutex_exit(&dkp->dk_mutex);
1098 
1099 	if (lastclose)
1100 		cmlb_invalidate(dkp->dk_cmlbhandle, 0);
1101 
1102 	return (DDI_SUCCESS);
1103 }
1104 
1105 /*ARGSUSED3*/
1106 static int
1107 cmdkopen(dev_t *dev_p, int flag, int otyp, cred_t *credp)
1108 {
1109 	dev_t		dev = *dev_p;
1110 	int 		part;
1111 	ulong_t		partbit;
1112 	int 		instance;
1113 	struct	cmdk	*dkp;
1114 	diskaddr_t	p_lblksrt;
1115 	diskaddr_t	p_lblkcnt;
1116 	int		i;
1117 	int		nodelay;
1118 
1119 	instance = CMDKUNIT(dev);
1120 	if (!(dkp = ddi_get_soft_state(cmdk_state, instance)))
1121 		return (ENXIO);
1122 
1123 	if (otyp >= OTYPCNT)
1124 		return (EINVAL);
1125 
1126 	mutex_enter(&dkp->dk_mutex);
1127 	while (dkp->dk_flag & CMDK_SUSPEND) {
1128 		cv_wait(&dkp->dk_suspend_cv, &dkp->dk_mutex);
1129 	}
1130 	mutex_exit(&dkp->dk_mutex);
1131 
1132 	part = CMDKPART(dev);
1133 	partbit = 1 << part;
1134 	nodelay = (flag & (FNDELAY | FNONBLOCK));
1135 
1136 	mutex_enter(&dkp->dk_mutex);
1137 
1138 	if (cmlb_validate(dkp->dk_cmlbhandle, 0, 0) != 0) {
1139 
1140 		/* fail if not doing non block open */
1141 		if (!nodelay) {
1142 			mutex_exit(&dkp->dk_mutex);
1143 			return (ENXIO);
1144 		}
1145 	} else if (cmlb_partinfo(dkp->dk_cmlbhandle, part, &p_lblkcnt,
1146 	    &p_lblksrt, NULL, NULL, 0) == 0) {
1147 
1148 		if (p_lblkcnt <= 0 && (!nodelay || otyp != OTYP_CHR)) {
1149 			mutex_exit(&dkp->dk_mutex);
1150 			return (ENXIO);
1151 		}
1152 	} else {
1153 		/* fail if not doing non block open */
1154 		if (!nodelay) {
1155 			mutex_exit(&dkp->dk_mutex);
1156 			return (ENXIO);
1157 		}
1158 	}
1159 
1160 	if ((DKTP_EXT->tg_rdonly) && (flag & FWRITE)) {
1161 		mutex_exit(&dkp->dk_mutex);
1162 		return (EROFS);
1163 	}
1164 
1165 	/* check for part already opend exclusively */
1166 	if (dkp->dk_open_exl & partbit)
1167 		goto excl_open_fail;
1168 
1169 	/* check if we can establish exclusive open */
1170 	if (flag & FEXCL) {
1171 		if (dkp->dk_open_lyr[part])
1172 			goto excl_open_fail;
1173 		for (i = 0; i < OTYPCNT; i++) {
1174 			if (dkp->dk_open_reg[i] & partbit)
1175 				goto excl_open_fail;
1176 		}
1177 	}
1178 
1179 	/* open will succeed, account for open */
1180 	dkp->dk_flag |= CMDK_OPEN;
1181 	if (otyp == OTYP_LYR)
1182 		dkp->dk_open_lyr[part]++;
1183 	else
1184 		dkp->dk_open_reg[otyp] |= partbit;
1185 	if (flag & FEXCL)
1186 		dkp->dk_open_exl |= partbit;
1187 
1188 	mutex_exit(&dkp->dk_mutex);
1189 	return (DDI_SUCCESS);
1190 
1191 excl_open_fail:
1192 	mutex_exit(&dkp->dk_mutex);
1193 	return (EBUSY);
1194 }
1195 
1196 /*
1197  * read routine
1198  */
1199 /*ARGSUSED2*/
1200 static int
1201 cmdkread(dev_t dev, struct uio *uio, cred_t *credp)
1202 {
1203 	return (cmdkrw(dev, uio, B_READ));
1204 }
1205 
1206 /*
1207  * async read routine
1208  */
1209 /*ARGSUSED2*/
1210 static int
1211 cmdkaread(dev_t dev, struct aio_req *aio, cred_t *credp)
1212 {
1213 	return (cmdkarw(dev, aio, B_READ));
1214 }
1215 
1216 /*
1217  * write routine
1218  */
1219 /*ARGSUSED2*/
1220 static int
1221 cmdkwrite(dev_t dev, struct uio *uio, cred_t *credp)
1222 {
1223 	return (cmdkrw(dev, uio, B_WRITE));
1224 }
1225 
1226 /*
1227  * async write routine
1228  */
1229 /*ARGSUSED2*/
1230 static int
1231 cmdkawrite(dev_t dev, struct aio_req *aio, cred_t *credp)
1232 {
1233 	return (cmdkarw(dev, aio, B_WRITE));
1234 }
1235 
1236 static void
1237 cmdkmin(struct buf *bp)
1238 {
1239 	if (bp->b_bcount > DK_MAXRECSIZE)
1240 		bp->b_bcount = DK_MAXRECSIZE;
1241 }
1242 
1243 static int
1244 cmdkrw(dev_t dev, struct uio *uio, int flag)
1245 {
1246 	int 		instance;
1247 	struct	cmdk	*dkp;
1248 
1249 	instance = CMDKUNIT(dev);
1250 	if (!(dkp = ddi_get_soft_state(cmdk_state, instance)))
1251 		return (ENXIO);
1252 
1253 	mutex_enter(&dkp->dk_mutex);
1254 	while (dkp->dk_flag & CMDK_SUSPEND) {
1255 		cv_wait(&dkp->dk_suspend_cv, &dkp->dk_mutex);
1256 	}
1257 	mutex_exit(&dkp->dk_mutex);
1258 
1259 	return (physio(cmdkstrategy, (struct buf *)0, dev, flag, cmdkmin, uio));
1260 }
1261 
1262 static int
1263 cmdkarw(dev_t dev, struct aio_req *aio, int flag)
1264 {
1265 	int 		instance;
1266 	struct	cmdk	*dkp;
1267 
1268 	instance = CMDKUNIT(dev);
1269 	if (!(dkp = ddi_get_soft_state(cmdk_state, instance)))
1270 		return (ENXIO);
1271 
1272 	mutex_enter(&dkp->dk_mutex);
1273 	while (dkp->dk_flag & CMDK_SUSPEND) {
1274 		cv_wait(&dkp->dk_suspend_cv, &dkp->dk_mutex);
1275 	}
1276 	mutex_exit(&dkp->dk_mutex);
1277 
1278 	return (aphysio(cmdkstrategy, anocancel, dev, flag, cmdkmin, aio));
1279 }
1280 
1281 /*
1282  * strategy routine
1283  */
1284 static int
1285 cmdkstrategy(struct buf *bp)
1286 {
1287 	int 		instance;
1288 	struct	cmdk 	*dkp;
1289 	long		d_cnt;
1290 	diskaddr_t	p_lblksrt;
1291 	diskaddr_t	p_lblkcnt;
1292 
1293 	instance = CMDKUNIT(bp->b_edev);
1294 	if (cmdk_indump || !(dkp = ddi_get_soft_state(cmdk_state, instance)) ||
1295 	    (dkblock(bp) < 0)) {
1296 		bp->b_resid = bp->b_bcount;
1297 		SETBPERR(bp, ENXIO);
1298 		biodone(bp);
1299 		return (0);
1300 	}
1301 
1302 	mutex_enter(&dkp->dk_mutex);
1303 	ASSERT(cmdk_isopen(dkp, bp->b_edev));
1304 	while (dkp->dk_flag & CMDK_SUSPEND) {
1305 		cv_wait(&dkp->dk_suspend_cv, &dkp->dk_mutex);
1306 	}
1307 	mutex_exit(&dkp->dk_mutex);
1308 
1309 	bp->b_flags &= ~(B_DONE|B_ERROR);
1310 	bp->b_resid = 0;
1311 	bp->av_back = NULL;
1312 
1313 	/*
1314 	 * only re-read the vtoc if necessary (force == FALSE)
1315 	 */
1316 	if (cmlb_partinfo(dkp->dk_cmlbhandle, CMDKPART(bp->b_edev),
1317 	    &p_lblkcnt, &p_lblksrt, NULL, NULL, 0)) {
1318 		SETBPERR(bp, ENXIO);
1319 	}
1320 
1321 	if ((bp->b_bcount & (NBPSCTR-1)) || (dkblock(bp) > p_lblkcnt))
1322 		SETBPERR(bp, ENXIO);
1323 
1324 	if ((bp->b_flags & B_ERROR) || (dkblock(bp) == p_lblkcnt)) {
1325 		bp->b_resid = bp->b_bcount;
1326 		biodone(bp);
1327 		return (0);
1328 	}
1329 
1330 	d_cnt = bp->b_bcount >> SCTRSHFT;
1331 	if ((dkblock(bp) + d_cnt) > p_lblkcnt) {
1332 		bp->b_resid = ((dkblock(bp) + d_cnt) - p_lblkcnt) << SCTRSHFT;
1333 		bp->b_bcount -= bp->b_resid;
1334 	}
1335 
1336 	SET_BP_SEC(bp, ((ulong_t)(p_lblksrt + dkblock(bp))));
1337 	if (dadk_strategy(DKTP_DATA, bp) != DDI_SUCCESS) {
1338 		bp->b_resid += bp->b_bcount;
1339 		biodone(bp);
1340 	}
1341 	return (0);
1342 }
1343 
1344 static int
1345 cmdk_create_obj(dev_info_t *dip, struct cmdk *dkp)
1346 {
1347 	struct scsi_device *devp;
1348 	opaque_t	queobjp = NULL;
1349 	opaque_t	flcobjp = NULL;
1350 	char		que_keyvalp[64];
1351 	int		que_keylen;
1352 	char		flc_keyvalp[64];
1353 	int		flc_keylen;
1354 
1355 	ASSERT(mutex_owned(&dkp->dk_mutex));
1356 
1357 	/* Create linkage to queueing routines based on property */
1358 	que_keylen = sizeof (que_keyvalp);
1359 	if (ddi_prop_op(DDI_DEV_T_NONE, dip, PROP_LEN_AND_VAL_BUF,
1360 	    DDI_PROP_CANSLEEP, "queue", que_keyvalp, &que_keylen) !=
1361 	    DDI_PROP_SUCCESS) {
1362 		cmn_err(CE_WARN, "cmdk_create_obj: queue property undefined");
1363 		return (DDI_FAILURE);
1364 	}
1365 	que_keyvalp[que_keylen] = (char)0;
1366 
1367 	if (strcmp(que_keyvalp, "qfifo") == 0) {
1368 		queobjp = (opaque_t)qfifo_create();
1369 	} else if (strcmp(que_keyvalp, "qsort") == 0) {
1370 		queobjp = (opaque_t)qsort_create();
1371 	} else {
1372 		return (DDI_FAILURE);
1373 	}
1374 
1375 	/* Create linkage to dequeueing routines based on property */
1376 	flc_keylen = sizeof (flc_keyvalp);
1377 	if (ddi_prop_op(DDI_DEV_T_NONE, dip, PROP_LEN_AND_VAL_BUF,
1378 	    DDI_PROP_CANSLEEP, "flow_control", flc_keyvalp, &flc_keylen) !=
1379 	    DDI_PROP_SUCCESS) {
1380 		cmn_err(CE_WARN,
1381 		    "cmdk_create_obj: flow-control property undefined");
1382 		return (DDI_FAILURE);
1383 	}
1384 
1385 	flc_keyvalp[flc_keylen] = (char)0;
1386 
1387 	if (strcmp(flc_keyvalp, "dsngl") == 0) {
1388 		flcobjp = (opaque_t)dsngl_create();
1389 	} else if (strcmp(flc_keyvalp, "dmult") == 0) {
1390 		flcobjp = (opaque_t)dmult_create();
1391 	} else {
1392 		return (DDI_FAILURE);
1393 	}
1394 
1395 	/* populate bbh_obj object stored in dkp */
1396 	dkp->dk_bbh_obj.bbh_data = dkp;
1397 	dkp->dk_bbh_obj.bbh_ops = &cmdk_bbh_ops;
1398 
1399 	/* create linkage to dadk */
1400 	dkp->dk_tgobjp = (opaque_t)dadk_create();
1401 
1402 	devp = ddi_get_driver_private(dip);
1403 	(void) dadk_init(DKTP_DATA, devp, flcobjp, queobjp, &dkp->dk_bbh_obj,
1404 	    NULL);
1405 
1406 	return (DDI_SUCCESS);
1407 }
1408 
1409 static void
1410 cmdk_destroy_obj(dev_info_t *dip, struct cmdk *dkp)
1411 {
1412 	char		que_keyvalp[64];
1413 	int		que_keylen;
1414 	char		flc_keyvalp[64];
1415 	int		flc_keylen;
1416 
1417 	ASSERT(mutex_owned(&dkp->dk_mutex));
1418 
1419 	(void) dadk_free((dkp->dk_tgobjp));
1420 	dkp->dk_tgobjp = NULL;
1421 
1422 	que_keylen = sizeof (que_keyvalp);
1423 	if (ddi_prop_op(DDI_DEV_T_NONE, dip, PROP_LEN_AND_VAL_BUF,
1424 	    DDI_PROP_CANSLEEP, "queue", que_keyvalp, &que_keylen) !=
1425 	    DDI_PROP_SUCCESS) {
1426 		cmn_err(CE_WARN, "cmdk_destroy_obj: queue property undefined");
1427 		return;
1428 	}
1429 	que_keyvalp[que_keylen] = (char)0;
1430 
1431 	flc_keylen = sizeof (flc_keyvalp);
1432 	if (ddi_prop_op(DDI_DEV_T_NONE, dip, PROP_LEN_AND_VAL_BUF,
1433 	    DDI_PROP_CANSLEEP, "flow_control", flc_keyvalp, &flc_keylen) !=
1434 	    DDI_PROP_SUCCESS) {
1435 		cmn_err(CE_WARN,
1436 		    "cmdk_destroy_obj: flow-control property undefined");
1437 		return;
1438 	}
1439 	flc_keyvalp[flc_keylen] = (char)0;
1440 }
1441 /*ARGSUSED5*/
1442 static int
1443 cmdk_lb_rdwr(dev_info_t *dip, uchar_t cmd, void *bufaddr,
1444     diskaddr_t start, size_t count, void *tg_cookie)
1445 {
1446 	struct cmdk	*dkp;
1447 	opaque_t	handle;
1448 	int		rc = 0;
1449 	char		*bufa;
1450 
1451 	dkp = ddi_get_soft_state(cmdk_state, ddi_get_instance(dip));
1452 	if (dkp == NULL)
1453 		return (ENXIO);
1454 
1455 	if (cmd != TG_READ && cmd != TG_WRITE)
1456 		return (EINVAL);
1457 
1458 	/* count must be multiple of 512 */
1459 	count = (count + NBPSCTR - 1) & -NBPSCTR;
1460 	handle = dadk_iob_alloc(DKTP_DATA, start, count, KM_SLEEP);
1461 	if (!handle)
1462 		return (ENOMEM);
1463 
1464 	if (cmd == TG_READ) {
1465 		bufa = dadk_iob_xfer(DKTP_DATA, handle, B_READ);
1466 		if (!bufa)
1467 			rc = EIO;
1468 		else
1469 			bcopy(bufa, bufaddr, count);
1470 	} else {
1471 		bufa = dadk_iob_htoc(DKTP_DATA, handle);
1472 		bcopy(bufaddr, bufa, count);
1473 		bufa = dadk_iob_xfer(DKTP_DATA, handle, B_WRITE);
1474 		if (!bufa)
1475 			rc = EIO;
1476 	}
1477 	(void) dadk_iob_free(DKTP_DATA, handle);
1478 
1479 	return (rc);
1480 }
1481 
1482 /*ARGSUSED3*/
1483 static int
1484 cmdk_lb_getinfo(dev_info_t *dip, int cmd, void *arg, void *tg_cookie)
1485 {
1486 
1487 	struct cmdk		*dkp;
1488 	struct tgdk_geom	phyg;
1489 
1490 
1491 	dkp = ddi_get_soft_state(cmdk_state, ddi_get_instance(dip));
1492 	if (dkp == NULL)
1493 		return (ENXIO);
1494 
1495 	switch (cmd) {
1496 	case TG_GETPHYGEOM: {
1497 		cmlb_geom_t *phygeomp = (cmlb_geom_t *)arg;
1498 
1499 		/* dadk_getphygeom always returns success */
1500 		(void) dadk_getphygeom(DKTP_DATA, &phyg);
1501 
1502 		phygeomp->g_capacity	= phyg.g_cap;
1503 		phygeomp->g_nsect	= phyg.g_sec;
1504 		phygeomp->g_nhead	= phyg.g_head;
1505 		phygeomp->g_acyl	= phyg.g_acyl;
1506 		phygeomp->g_ncyl	= phyg.g_cyl;
1507 		phygeomp->g_secsize	= phyg.g_secsiz;
1508 		phygeomp->g_intrlv	= 1;
1509 		phygeomp->g_rpm		= 3600;
1510 
1511 		return (0);
1512 	}
1513 
1514 	case TG_GETVIRTGEOM: {
1515 		cmlb_geom_t *virtgeomp = (cmlb_geom_t *)arg;
1516 		diskaddr_t		capacity;
1517 
1518 		(void) dadk_getgeom(DKTP_DATA, &phyg);
1519 		capacity = phyg.g_cap;
1520 
1521 		/*
1522 		 * If the controller returned us something that doesn't
1523 		 * really fit into an Int 13/function 8 geometry
1524 		 * result, just fail the ioctl.  See PSARC 1998/313.
1525 		 */
1526 		if (capacity < 0 || capacity >= 63 * 254 * 1024)
1527 			return (EINVAL);
1528 
1529 		virtgeomp->g_capacity	= capacity;
1530 		virtgeomp->g_nsect	= 63;
1531 		virtgeomp->g_nhead	= 254;
1532 		virtgeomp->g_ncyl	= capacity / (63 * 254);
1533 		virtgeomp->g_acyl	= 0;
1534 		virtgeomp->g_secsize	= 512;
1535 		virtgeomp->g_intrlv	= 1;
1536 		virtgeomp->g_rpm	= 3600;
1537 
1538 		return (0);
1539 	}
1540 
1541 	case TG_GETCAPACITY:
1542 	case TG_GETBLOCKSIZE:
1543 	{
1544 
1545 		/* dadk_getphygeom always returns success */
1546 		(void) dadk_getphygeom(DKTP_DATA, &phyg);
1547 		if (cmd == TG_GETCAPACITY)
1548 			*(diskaddr_t *)arg = phyg.g_cap;
1549 		else
1550 			*(uint32_t *)arg = (uint32_t)phyg.g_secsiz;
1551 
1552 		return (0);
1553 	}
1554 
1555 	case TG_GETATTR: {
1556 		tg_attribute_t *tgattribute = (tg_attribute_t *)arg;
1557 		if ((DKTP_EXT->tg_rdonly))
1558 			tgattribute->media_is_writable = FALSE;
1559 		else
1560 			tgattribute->media_is_writable = TRUE;
1561 
1562 		return (0);
1563 	}
1564 
1565 	default:
1566 		return (ENOTTY);
1567 	}
1568 }
1569 
1570 
1571 
1572 
1573 
1574 /*
1575  * Create and register the devid.
1576  * There are 4 different ways we can get a device id:
1577  *    1. Already have one - nothing to do
1578  *    2. Build one from the drive's model and serial numbers
1579  *    3. Read one from the disk (first sector of last track)
1580  *    4. Fabricate one and write it on the disk.
1581  * If any of these succeeds, register the deviceid
1582  */
1583 static void
1584 cmdk_devid_setup(struct cmdk *dkp)
1585 {
1586 	int	rc;
1587 
1588 	/* Try options until one succeeds, or all have failed */
1589 
1590 	/* 1. All done if already registered */
1591 	if (dkp->dk_devid != NULL)
1592 		return;
1593 
1594 	/* 2. Build a devid from the model and serial number */
1595 	rc = cmdk_devid_modser(dkp);
1596 	if (rc != DDI_SUCCESS) {
1597 		/* 3. Read devid from the disk, if present */
1598 		rc = cmdk_devid_read(dkp);
1599 
1600 		/* 4. otherwise make one up and write it on the disk */
1601 		if (rc != DDI_SUCCESS)
1602 			rc = cmdk_devid_fabricate(dkp);
1603 	}
1604 
1605 	/* If we managed to get a devid any of the above ways, register it */
1606 	if (rc == DDI_SUCCESS)
1607 		(void) ddi_devid_register(dkp->dk_dip, dkp->dk_devid);
1608 
1609 }
1610 
1611 /*
1612  * Build a devid from the model and serial number
1613  * Return DDI_SUCCESS or DDI_FAILURE.
1614  */
1615 static int
1616 cmdk_devid_modser(struct cmdk *dkp)
1617 {
1618 	int	rc = DDI_FAILURE;
1619 	char	*hwid;
1620 	int	modlen;
1621 	int	serlen;
1622 
1623 	/*
1624 	 * device ID is a concatenation of model number, '=', serial number.
1625 	 */
1626 	hwid = kmem_alloc(CMDK_HWIDLEN, KM_SLEEP);
1627 	modlen = cmdk_get_modser(dkp, DIOCTL_GETMODEL, hwid, CMDK_HWIDLEN);
1628 	if (modlen == 0) {
1629 		rc = DDI_FAILURE;
1630 		goto err;
1631 	}
1632 	hwid[modlen++] = '=';
1633 	serlen = cmdk_get_modser(dkp, DIOCTL_GETSERIAL,
1634 	    hwid + modlen, CMDK_HWIDLEN - modlen);
1635 	if (serlen == 0) {
1636 		rc = DDI_FAILURE;
1637 		goto err;
1638 	}
1639 	hwid[modlen + serlen] = 0;
1640 
1641 	/* Initialize the device ID, trailing NULL not included */
1642 	rc = ddi_devid_init(dkp->dk_dip, DEVID_ATA_SERIAL, modlen + serlen,
1643 	    hwid, (ddi_devid_t *)&dkp->dk_devid);
1644 	if (rc != DDI_SUCCESS) {
1645 		rc = DDI_FAILURE;
1646 		goto err;
1647 	}
1648 
1649 	rc = DDI_SUCCESS;
1650 
1651 err:
1652 	kmem_free(hwid, CMDK_HWIDLEN);
1653 	return (rc);
1654 }
1655 
1656 static int
1657 cmdk_get_modser(struct cmdk *dkp, int ioccmd, char *buf, int len)
1658 {
1659 	dadk_ioc_string_t strarg;
1660 	int		rval;
1661 	char		*s;
1662 	char		ch;
1663 	boolean_t	ret;
1664 	int		i;
1665 	int		tb;
1666 
1667 	strarg.is_buf = buf;
1668 	strarg.is_size = len;
1669 	if (dadk_ioctl(DKTP_DATA,
1670 	    dkp->dk_dev,
1671 	    ioccmd,
1672 	    (uintptr_t)&strarg,
1673 	    FNATIVE | FKIOCTL,
1674 	    NULL,
1675 	    &rval) != 0)
1676 		return (0);
1677 
1678 	/*
1679 	 * valid model/serial string must contain a non-zero non-space
1680 	 * trim trailing spaces/NULL
1681 	 */
1682 	ret = B_FALSE;
1683 	s = buf;
1684 	for (i = 0; i < strarg.is_size; i++) {
1685 		ch = *s++;
1686 		if (ch != ' ' && ch != '\0')
1687 			tb = i + 1;
1688 		if (ch != ' ' && ch != '\0' && ch != '0')
1689 			ret = B_TRUE;
1690 	}
1691 
1692 	if (ret == B_FALSE)
1693 		return (0);
1694 
1695 	return (tb);
1696 }
1697 
1698 /*
1699  * Read a devid from on the first block of the last track of
1700  * the last cylinder.  Make sure what we read is a valid devid.
1701  * Return DDI_SUCCESS or DDI_FAILURE.
1702  */
1703 static int
1704 cmdk_devid_read(struct cmdk *dkp)
1705 {
1706 	diskaddr_t	blk;
1707 	struct dk_devid *dkdevidp;
1708 	uint_t		*ip;
1709 	int		chksum;
1710 	int		i, sz;
1711 	tgdk_iob_handle	handle = NULL;
1712 	int		rc = DDI_FAILURE;
1713 
1714 	if (cmlb_get_devid_block(dkp->dk_cmlbhandle, &blk, 0))
1715 		goto err;
1716 
1717 	/* read the devid */
1718 	handle = dadk_iob_alloc(DKTP_DATA, blk, NBPSCTR, KM_SLEEP);
1719 	if (handle == NULL)
1720 		goto err;
1721 
1722 	dkdevidp = (struct dk_devid *)dadk_iob_xfer(DKTP_DATA, handle, B_READ);
1723 	if (dkdevidp == NULL)
1724 		goto err;
1725 
1726 	/* Validate the revision */
1727 	if ((dkdevidp->dkd_rev_hi != DK_DEVID_REV_MSB) ||
1728 	    (dkdevidp->dkd_rev_lo != DK_DEVID_REV_LSB))
1729 		goto err;
1730 
1731 	/* Calculate the checksum */
1732 	chksum = 0;
1733 	ip = (uint_t *)dkdevidp;
1734 	for (i = 0; i < ((NBPSCTR - sizeof (int))/sizeof (int)); i++)
1735 		chksum ^= ip[i];
1736 	if (DKD_GETCHKSUM(dkdevidp) != chksum)
1737 		goto err;
1738 
1739 	/* Validate the device id */
1740 	if (ddi_devid_valid((ddi_devid_t)dkdevidp->dkd_devid) != DDI_SUCCESS)
1741 		goto err;
1742 
1743 	/* keep a copy of the device id */
1744 	sz = ddi_devid_sizeof((ddi_devid_t)dkdevidp->dkd_devid);
1745 	dkp->dk_devid = kmem_alloc(sz, KM_SLEEP);
1746 	bcopy(dkdevidp->dkd_devid, dkp->dk_devid, sz);
1747 
1748 	rc = DDI_SUCCESS;
1749 
1750 err:
1751 	if (handle != NULL)
1752 		(void) dadk_iob_free(DKTP_DATA, handle);
1753 	return (rc);
1754 }
1755 
1756 /*
1757  * Create a devid and write it on the first block of the last track of
1758  * the last cylinder.
1759  * Return DDI_SUCCESS or DDI_FAILURE.
1760  */
1761 static int
1762 cmdk_devid_fabricate(struct cmdk *dkp)
1763 {
1764 	ddi_devid_t	devid = NULL;	/* devid made by ddi_devid_init  */
1765 	struct dk_devid	*dkdevidp;	/* devid struct stored on disk */
1766 	diskaddr_t	blk;
1767 	tgdk_iob_handle	handle = NULL;
1768 	uint_t		*ip, chksum;
1769 	int		i;
1770 	int		rc;
1771 
1772 	rc = ddi_devid_init(dkp->dk_dip, DEVID_FAB, 0, NULL, &devid);
1773 	if (rc != DDI_SUCCESS)
1774 		goto err;
1775 
1776 	if (cmlb_get_devid_block(dkp->dk_cmlbhandle, &blk, 0)) {
1777 		/* no device id block address */
1778 		return (DDI_FAILURE);
1779 	}
1780 
1781 	handle = dadk_iob_alloc(DKTP_DATA, blk, NBPSCTR, KM_SLEEP);
1782 	if (!handle)
1783 		goto err;
1784 
1785 	/* Locate the buffer */
1786 	dkdevidp = (struct dk_devid *)dadk_iob_htoc(DKTP_DATA, handle);
1787 
1788 	/* Fill in the revision */
1789 	bzero(dkdevidp, NBPSCTR);
1790 	dkdevidp->dkd_rev_hi = DK_DEVID_REV_MSB;
1791 	dkdevidp->dkd_rev_lo = DK_DEVID_REV_LSB;
1792 
1793 	/* Copy in the device id */
1794 	i = ddi_devid_sizeof(devid);
1795 	if (i > DK_DEVID_SIZE)
1796 		goto err;
1797 	bcopy(devid, dkdevidp->dkd_devid, i);
1798 
1799 	/* Calculate the chksum */
1800 	chksum = 0;
1801 	ip = (uint_t *)dkdevidp;
1802 	for (i = 0; i < ((NBPSCTR - sizeof (int))/sizeof (int)); i++)
1803 		chksum ^= ip[i];
1804 
1805 	/* Fill in the checksum */
1806 	DKD_FORMCHKSUM(chksum, dkdevidp);
1807 
1808 	/* write the devid */
1809 	(void) dadk_iob_xfer(DKTP_DATA, handle, B_WRITE);
1810 
1811 	dkp->dk_devid = devid;
1812 
1813 	rc = DDI_SUCCESS;
1814 
1815 err:
1816 	if (handle != NULL)
1817 		(void) dadk_iob_free(DKTP_DATA, handle);
1818 
1819 	if (rc != DDI_SUCCESS && devid != NULL)
1820 		ddi_devid_free(devid);
1821 
1822 	return (rc);
1823 }
1824 
1825 static void
1826 cmdk_bbh_free_alts(struct cmdk *dkp)
1827 {
1828 	if (dkp->dk_alts_hdl) {
1829 		(void) dadk_iob_free(DKTP_DATA, dkp->dk_alts_hdl);
1830 		kmem_free(dkp->dk_slc_cnt,
1831 		    NDKMAP * (sizeof (uint32_t) + sizeof (struct alts_ent *)));
1832 		dkp->dk_alts_hdl = NULL;
1833 	}
1834 }
1835 
1836 static void
1837 cmdk_bbh_reopen(struct cmdk *dkp)
1838 {
1839 	tgdk_iob_handle 	handle = NULL;
1840 	diskaddr_t		slcb, slcn, slce;
1841 	struct	alts_parttbl	*ap;
1842 	struct	alts_ent	*enttblp;
1843 	uint32_t		altused;
1844 	uint32_t		altbase;
1845 	uint32_t		altlast;
1846 	int			alts;
1847 	uint16_t		vtoctag;
1848 	int			i, j;
1849 
1850 	/* find slice with V_ALTSCTR tag */
1851 	for (alts = 0; alts < NDKMAP; alts++) {
1852 		if (cmlb_partinfo(
1853 		    dkp->dk_cmlbhandle,
1854 		    alts,
1855 		    &slcn,
1856 		    &slcb,
1857 		    NULL,
1858 		    &vtoctag,
1859 		    0)) {
1860 			goto empty;	/* no partition table exists */
1861 		}
1862 
1863 		if (vtoctag == V_ALTSCTR && slcn > 1)
1864 			break;
1865 	}
1866 	if (alts >= NDKMAP) {
1867 		goto empty;	/* no V_ALTSCTR slice defined */
1868 	}
1869 
1870 	/* read in ALTS label block */
1871 	handle = dadk_iob_alloc(DKTP_DATA, slcb, NBPSCTR, KM_SLEEP);
1872 	if (!handle) {
1873 		goto empty;
1874 	}
1875 
1876 	ap = (struct alts_parttbl *)dadk_iob_xfer(DKTP_DATA, handle, B_READ);
1877 	if (!ap || (ap->alts_sanity != ALTS_SANITY)) {
1878 		goto empty;
1879 	}
1880 
1881 	altused = ap->alts_ent_used;	/* number of BB entries */
1882 	altbase = ap->alts_ent_base;	/* blk offset from begin slice */
1883 	altlast = ap->alts_ent_end;	/* blk offset to last block */
1884 	/* ((altused * sizeof (struct alts_ent) + NBPSCTR - 1) & ~NBPSCTR) */
1885 
1886 	if (altused == 0 ||
1887 	    altbase < 1 ||
1888 	    altbase > altlast ||
1889 	    altlast >= slcn) {
1890 		goto empty;
1891 	}
1892 	(void) dadk_iob_free(DKTP_DATA, handle);
1893 
1894 	/* read in ALTS remapping table */
1895 	handle = dadk_iob_alloc(DKTP_DATA,
1896 	    slcb + altbase,
1897 	    (altlast - altbase + 1) << SCTRSHFT, KM_SLEEP);
1898 	if (!handle) {
1899 		goto empty;
1900 	}
1901 
1902 	enttblp = (struct alts_ent *)dadk_iob_xfer(DKTP_DATA, handle, B_READ);
1903 	if (!enttblp) {
1904 		goto empty;
1905 	}
1906 
1907 	rw_enter(&dkp->dk_bbh_mutex, RW_WRITER);
1908 
1909 	/* allocate space for dk_slc_cnt and dk_slc_ent tables */
1910 	if (dkp->dk_slc_cnt == NULL) {
1911 		dkp->dk_slc_cnt = kmem_alloc(NDKMAP *
1912 		    (sizeof (long) + sizeof (struct alts_ent *)), KM_SLEEP);
1913 	}
1914 	dkp->dk_slc_ent = (struct alts_ent **)(dkp->dk_slc_cnt + NDKMAP);
1915 
1916 	/* free previous BB table (if any) */
1917 	if (dkp->dk_alts_hdl) {
1918 		(void) dadk_iob_free(DKTP_DATA, dkp->dk_alts_hdl);
1919 		dkp->dk_alts_hdl = NULL;
1920 		dkp->dk_altused = 0;
1921 	}
1922 
1923 	/* save linkage to new BB table */
1924 	dkp->dk_alts_hdl = handle;
1925 	dkp->dk_altused = altused;
1926 
1927 	/*
1928 	 * build indexes to BB table by slice
1929 	 * effectively we have
1930 	 *	struct alts_ent *enttblp[altused];
1931 	 *
1932 	 *	uint32_t	dk_slc_cnt[NDKMAP];
1933 	 *	struct alts_ent *dk_slc_ent[NDKMAP];
1934 	 */
1935 	for (i = 0; i < NDKMAP; i++) {
1936 		if (cmlb_partinfo(
1937 		    dkp->dk_cmlbhandle,
1938 		    i,
1939 		    &slcn,
1940 		    &slcb,
1941 		    NULL,
1942 		    NULL,
1943 		    0)) {
1944 			goto empty1;
1945 		}
1946 
1947 		dkp->dk_slc_cnt[i] = 0;
1948 		if (slcn == 0)
1949 			continue;	/* slice is not allocated */
1950 
1951 		/* last block in slice */
1952 		slce = slcb + slcn - 1;
1953 
1954 		/* find first remap entry in after beginnning of slice */
1955 		for (j = 0; j < altused; j++) {
1956 			if (enttblp[j].bad_start + enttblp[j].bad_end >= slcb)
1957 				break;
1958 		}
1959 		dkp->dk_slc_ent[i] = enttblp + j;
1960 
1961 		/* count remap entrys until end of slice */
1962 		for (; j < altused && enttblp[j].bad_start <= slce; j++) {
1963 			dkp->dk_slc_cnt[i] += 1;
1964 		}
1965 	}
1966 
1967 	rw_exit(&dkp->dk_bbh_mutex);
1968 	return;
1969 
1970 empty:
1971 	rw_enter(&dkp->dk_bbh_mutex, RW_WRITER);
1972 empty1:
1973 	if (handle && handle != dkp->dk_alts_hdl)
1974 		(void) dadk_iob_free(DKTP_DATA, handle);
1975 
1976 	if (dkp->dk_alts_hdl) {
1977 		(void) dadk_iob_free(DKTP_DATA, dkp->dk_alts_hdl);
1978 		dkp->dk_alts_hdl = NULL;
1979 	}
1980 
1981 	rw_exit(&dkp->dk_bbh_mutex);
1982 }
1983 
1984 /*ARGSUSED*/
1985 static bbh_cookie_t
1986 cmdk_bbh_htoc(opaque_t bbh_data, opaque_t handle)
1987 {
1988 	struct	bbh_handle *hp;
1989 	bbh_cookie_t ckp;
1990 
1991 	hp = (struct  bbh_handle *)handle;
1992 	ckp = hp->h_cktab + hp->h_idx;
1993 	hp->h_idx++;
1994 	return (ckp);
1995 }
1996 
1997 /*ARGSUSED*/
1998 static void
1999 cmdk_bbh_freehandle(opaque_t bbh_data, opaque_t handle)
2000 {
2001 	struct	bbh_handle *hp;
2002 
2003 	hp = (struct  bbh_handle *)handle;
2004 	kmem_free(handle, (sizeof (struct bbh_handle) +
2005 	    (hp->h_totck * (sizeof (struct bbh_cookie)))));
2006 }
2007 
2008 
2009 /*
2010  *	cmdk_bbh_gethandle remaps the bad sectors to alternates.
2011  *	There are 7 different cases when the comparison is made
2012  *	between the bad sector cluster and the disk section.
2013  *
2014  *	bad sector cluster	gggggggggggbbbbbbbggggggggggg
2015  *	case 1:			   ddddd
2016  *	case 2:				   -d-----
2017  *	case 3:					     ddddd
2018  *	case 4:			         dddddddddddd
2019  *	case 5:			      ddddddd-----
2020  *	case 6:			           ---ddddddd
2021  *	case 7:			           ddddddd
2022  *
2023  *	where:  g = good sector,	b = bad sector
2024  *		d = sector in disk section
2025  *		- = disk section may be extended to cover those disk area
2026  */
2027 
2028 static opaque_t
2029 cmdk_bbh_gethandle(opaque_t bbh_data, struct buf *bp)
2030 {
2031 	struct cmdk		*dkp = (struct cmdk *)bbh_data;
2032 	struct bbh_handle	*hp;
2033 	struct bbh_cookie	*ckp;
2034 	struct alts_ent		*altp;
2035 	uint32_t		alts_used;
2036 	uint32_t		part = CMDKPART(bp->b_edev);
2037 	daddr32_t		lastsec;
2038 	long			d_count;
2039 	int			i;
2040 	int			idx;
2041 	int			cnt;
2042 
2043 	if (part >= V_NUMPAR)
2044 		return (NULL);
2045 
2046 	/*
2047 	 * This if statement is atomic and it will succeed
2048 	 * if there are no bad blocks (almost always)
2049 	 *
2050 	 * so this if is performed outside of the rw_enter for speed
2051 	 * and then repeated inside the rw_enter for safety
2052 	 */
2053 	if (!dkp->dk_alts_hdl) {
2054 		return (NULL);
2055 	}
2056 
2057 	rw_enter(&dkp->dk_bbh_mutex, RW_READER);
2058 
2059 	if (dkp->dk_alts_hdl == NULL) {
2060 		rw_exit(&dkp->dk_bbh_mutex);
2061 		return (NULL);
2062 	}
2063 
2064 	alts_used = dkp->dk_slc_cnt[part];
2065 	if (alts_used == 0) {
2066 		rw_exit(&dkp->dk_bbh_mutex);
2067 		return (NULL);
2068 	}
2069 	altp = dkp->dk_slc_ent[part];
2070 
2071 	/*
2072 	 * binary search for the largest bad sector index in the alternate
2073 	 * entry table which overlaps or larger than the starting d_sec
2074 	 */
2075 	i = cmdk_bbh_bsearch(altp, alts_used, GET_BP_SEC(bp));
2076 	/* if starting sector is > the largest bad sector, return */
2077 	if (i == -1) {
2078 		rw_exit(&dkp->dk_bbh_mutex);
2079 		return (NULL);
2080 	}
2081 	/* i is the starting index.  Set altp to the starting entry addr */
2082 	altp += i;
2083 
2084 	d_count = bp->b_bcount >> SCTRSHFT;
2085 	lastsec = GET_BP_SEC(bp) + d_count - 1;
2086 
2087 	/* calculate the number of bad sectors */
2088 	for (idx = i, cnt = 0; idx < alts_used; idx++, altp++, cnt++) {
2089 		if (lastsec < altp->bad_start)
2090 			break;
2091 	}
2092 
2093 	if (!cnt) {
2094 		rw_exit(&dkp->dk_bbh_mutex);
2095 		return (NULL);
2096 	}
2097 
2098 	/* calculate the maximum number of reserved cookies */
2099 	cnt <<= 1;
2100 	cnt++;
2101 
2102 	/* allocate the handle */
2103 	hp = (struct bbh_handle *)kmem_zalloc((sizeof (*hp) +
2104 	    (cnt * sizeof (*ckp))), KM_SLEEP);
2105 
2106 	hp->h_idx = 0;
2107 	hp->h_totck = cnt;
2108 	ckp = hp->h_cktab = (struct bbh_cookie *)(hp + 1);
2109 	ckp[0].ck_sector = GET_BP_SEC(bp);
2110 	ckp[0].ck_seclen = d_count;
2111 
2112 	altp = dkp->dk_slc_ent[part];
2113 	altp += i;
2114 	for (idx = 0; i < alts_used; i++, altp++) {
2115 		/* CASE 1: */
2116 		if (lastsec < altp->bad_start)
2117 			break;
2118 
2119 		/* CASE 3: */
2120 		if (ckp[idx].ck_sector > altp->bad_end)
2121 			continue;
2122 
2123 		/* CASE 2 and 7: */
2124 		if ((ckp[idx].ck_sector >= altp->bad_start) &&
2125 		    (lastsec <= altp->bad_end)) {
2126 			ckp[idx].ck_sector = altp->good_start +
2127 			    ckp[idx].ck_sector - altp->bad_start;
2128 			break;
2129 		}
2130 
2131 		/* at least one bad sector in our section.  break it. */
2132 		/* CASE 5: */
2133 		if ((lastsec >= altp->bad_start) &&
2134 		    (lastsec <= altp->bad_end)) {
2135 			ckp[idx+1].ck_seclen = lastsec - altp->bad_start + 1;
2136 			ckp[idx].ck_seclen -= ckp[idx+1].ck_seclen;
2137 			ckp[idx+1].ck_sector = altp->good_start;
2138 			break;
2139 		}
2140 		/* CASE 6: */
2141 		if ((ckp[idx].ck_sector <= altp->bad_end) &&
2142 		    (ckp[idx].ck_sector >= altp->bad_start)) {
2143 			ckp[idx+1].ck_seclen = ckp[idx].ck_seclen;
2144 			ckp[idx].ck_seclen = altp->bad_end -
2145 			    ckp[idx].ck_sector + 1;
2146 			ckp[idx+1].ck_seclen -= ckp[idx].ck_seclen;
2147 			ckp[idx].ck_sector = altp->good_start +
2148 			    ckp[idx].ck_sector - altp->bad_start;
2149 			idx++;
2150 			ckp[idx].ck_sector = altp->bad_end + 1;
2151 			continue;	/* check rest of section */
2152 		}
2153 
2154 		/* CASE 4: */
2155 		ckp[idx].ck_seclen = altp->bad_start - ckp[idx].ck_sector;
2156 		ckp[idx+1].ck_sector = altp->good_start;
2157 		ckp[idx+1].ck_seclen = altp->bad_end - altp->bad_start + 1;
2158 		idx += 2;
2159 		ckp[idx].ck_sector = altp->bad_end + 1;
2160 		ckp[idx].ck_seclen = lastsec - altp->bad_end;
2161 	}
2162 
2163 	rw_exit(&dkp->dk_bbh_mutex);
2164 	return ((opaque_t)hp);
2165 }
2166 
2167 static int
2168 cmdk_bbh_bsearch(struct alts_ent *buf, int cnt, daddr32_t key)
2169 {
2170 	int	i;
2171 	int	ind;
2172 	int	interval;
2173 	int	mystatus = -1;
2174 
2175 	if (!cnt)
2176 		return (mystatus);
2177 
2178 	ind = 1; /* compiler complains about possible uninitialized var	*/
2179 	for (i = 1; i <= cnt; i <<= 1)
2180 		ind = i;
2181 
2182 	for (interval = ind; interval; ) {
2183 		if ((key >= buf[ind-1].bad_start) &&
2184 		    (key <= buf[ind-1].bad_end)) {
2185 			return (ind-1);
2186 		} else {
2187 			interval >>= 1;
2188 			if (key < buf[ind-1].bad_start) {
2189 				/* record the largest bad sector index */
2190 				mystatus = ind-1;
2191 				if (!interval)
2192 					break;
2193 				ind = ind - interval;
2194 			} else {
2195 				/*
2196 				 * if key is larger than the last element
2197 				 * then break
2198 				 */
2199 				if ((ind == cnt) || !interval)
2200 					break;
2201 				if ((ind+interval) <= cnt)
2202 					ind += interval;
2203 			}
2204 		}
2205 	}
2206 	return (mystatus);
2207 }
2208