xref: /titanic_44/usr/src/uts/sun4u/ngdr/io/dr.c (revision 587032cf0967234b39ccb50adca936a367841063)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 /*
29  * PIM-DR layer of DR driver.  Provides interface between user
30  * level applications and the PSM-DR layer.
31  */
32 
33 #include <sys/note.h>
34 #include <sys/debug.h>
35 #include <sys/types.h>
36 #include <sys/errno.h>
37 #include <sys/cred.h>
38 #include <sys/dditypes.h>
39 #include <sys/devops.h>
40 #include <sys/modctl.h>
41 #include <sys/poll.h>
42 #include <sys/conf.h>
43 #include <sys/ddi.h>
44 #include <sys/sunddi.h>
45 #include <sys/sunndi.h>
46 #include <sys/stat.h>
47 #include <sys/kmem.h>
48 #include <sys/processor.h>
49 #include <sys/cpuvar.h>
50 #include <sys/mem_config.h>
51 
52 #include <sys/autoconf.h>
53 #include <sys/cmn_err.h>
54 
55 #include <sys/ddi_impldefs.h>
56 #include <sys/promif.h>
57 #include <sys/machsystm.h>
58 
59 #include <sys/dr.h>
60 #include <sys/drmach.h>
61 #include <sys/dr_util.h>
62 
63 extern int		 nulldev();
64 extern int		 nodev();
65 extern struct memlist	*phys_install;
66 
67 #ifdef DEBUG
68 uint_t	dr_debug = 0;			/* dr.h for bit values */
69 #endif /* DEBUG */
70 
71 /*
72  * NOTE: state_str, nt_str and SBD_CMD_STR are only used in a debug
73  * kernel.  They are, however, referenced during both debug and non-debug
74  * compiles.
75  */
76 
77 static char *state_str[] = {
78 	"EMPTY", "OCCUPIED", "CONNECTED", "UNCONFIGURED",
79 	"PARTIAL", "CONFIGURED", "RELEASE", "UNREFERENCED",
80 	"FATAL"
81 };
82 
83 #define	SBD_CMD_STR(c) \
84 	(((c) == SBD_CMD_ASSIGN)	? "ASSIGN"	: \
85 	((c) == SBD_CMD_UNASSIGN)	? "UNASSIGN"	: \
86 	((c) == SBD_CMD_POWERON)	? "POWERON"	: \
87 	((c) == SBD_CMD_POWEROFF)	? "POWEROFF"	: \
88 	((c) == SBD_CMD_TEST)		? "TEST"	: \
89 	((c) == SBD_CMD_CONNECT)	? "CONNECT"	: \
90 	((c) == SBD_CMD_DISCONNECT)	? "DISCONNECT"	: \
91 	((c) == SBD_CMD_CONFIGURE)	? "CONFIGURE"	: \
92 	((c) == SBD_CMD_UNCONFIGURE)	? "UNCONFIGURE"	: \
93 	((c) == SBD_CMD_GETNCM)		? "GETNCM"	: \
94 	((c) == SBD_CMD_PASSTHRU)	? "PASSTHRU"	: \
95 	((c) == SBD_CMD_STATUS)		? "STATUS"	: "unknown")
96 
97 #define	DR_GET_BOARD_DEVUNIT(sb, ut, un) (&((sb)->b_dev[NIX(ut)][un]))
98 
99 #define	DR_MAKE_MINOR(i, b)	(((i) << 16) | (b))
100 #define	DR_MINOR2INST(m)	(((m) >> 16) & 0xffff)
101 #define	DR_MINOR2BNUM(m)	((m) & 0xffff)
102 
103 /* for the DR*INTERNAL_ERROR macros.  see sys/dr.h. */
104 static char *dr_ie_fmt = "dr.c %d";
105 
106 /* struct for drmach device name to sbd_comp_type_t mapping */
107 typedef	struct {
108 	char		*s_devtype;
109 	sbd_comp_type_t	s_nodetype;
110 } dr_devname_t;
111 
112 /* struct to map starfire device attributes - name:sbd_comp_type_t */
113 static	dr_devname_t	dr_devattr[] = {
114 	{ DRMACH_DEVTYPE_MEM,	SBD_COMP_MEM },
115 	{ DRMACH_DEVTYPE_CPU,	SBD_COMP_CPU },
116 	{ DRMACH_DEVTYPE_PCI,	SBD_COMP_IO },
117 #if defined(DRMACH_DEVTYPE_SBUS)
118 	{ DRMACH_DEVTYPE_SBUS,	SBD_COMP_IO },
119 #endif
120 #if defined(DRMACH_DEVTYPE_WCI)
121 	{ DRMACH_DEVTYPE_WCI,	SBD_COMP_IO },
122 #endif
123 	/* last s_devtype must be NULL, s_nodetype must be SBD_COMP_UNKNOWN */
124 	{ NULL,			SBD_COMP_UNKNOWN }
125 };
126 
127 /*
128  * Per instance soft-state structure.
129  */
130 typedef struct dr_softstate {
131 	dev_info_t	*dip;
132 	dr_board_t	*boards;
133 	kmutex_t	i_lock;
134 	int		 dr_initialized;
135 } dr_softstate_t;
136 
137 /*
138  * dr Global data elements
139  */
140 struct dr_global {
141 	dr_softstate_t	*softsp;	/* pointer to initialize soft state */
142 	kmutex_t	lock;
143 } dr_g;
144 
145 dr_unsafe_devs_t	dr_unsafe_devs;
146 
147 /*
148  * Table of known passthru commands.
149  */
150 
151 struct {
152 	char	*pt_name;
153 	int	(*pt_func)(dr_handle_t *);
154 } pt_arr[] = {
155 	"quiesce",		dr_pt_test_suspend,
156 };
157 
158 int dr_modunload_okay = 0;		/* set to non-zero to allow unload */
159 
160 /*
161  * State transition table.  States valid transitions for "board" state.
162  * Recall that non-zero return value terminates operation, however
163  * the herrno value is what really indicates an error , if any.
164  */
165 static int
166 _cmd2index(int c)
167 {
168 	/*
169 	 * Translate DR CMD to index into dr_state_transition.
170 	 */
171 	switch (c) {
172 	case SBD_CMD_CONNECT:		return (0);
173 	case SBD_CMD_DISCONNECT:	return (1);
174 	case SBD_CMD_CONFIGURE:		return (2);
175 	case SBD_CMD_UNCONFIGURE:	return (3);
176 	case SBD_CMD_ASSIGN:		return (4);
177 	case SBD_CMD_UNASSIGN:		return (5);
178 	case SBD_CMD_POWERON:		return (6);
179 	case SBD_CMD_POWEROFF:		return (7);
180 	case SBD_CMD_TEST:		return (8);
181 	default:			return (-1);
182 	}
183 }
184 
185 #define	CMD2INDEX(c)	_cmd2index(c)
186 
187 static struct dr_state_trans {
188 	int	x_cmd;
189 	struct {
190 		int	x_rv;		/* return value of pre_op */
191 		int	x_err;		/* error, if any */
192 	} x_op[DR_STATE_MAX];
193 } dr_state_transition[] = {
194 	{ SBD_CMD_CONNECT,
195 		{
196 			{ 0, 0 },			/* empty */
197 			{ 0, 0 },			/* occupied */
198 			{ -1, ESBD_STATE },		/* connected */
199 			{ -1, ESBD_STATE },		/* unconfigured */
200 			{ -1, ESBD_STATE },		/* partial */
201 			{ -1, ESBD_STATE },		/* configured */
202 			{ -1, ESBD_STATE },		/* release */
203 			{ -1, ESBD_STATE },		/* unreferenced */
204 			{ -1, ESBD_FATAL_STATE },	/* fatal */
205 		}
206 	},
207 	{ SBD_CMD_DISCONNECT,
208 		{
209 			{ -1, ESBD_STATE },		/* empty */
210 			{ 0, 0 },			/* occupied */
211 			{ 0, 0 },			/* connected */
212 			{ 0, 0 },			/* unconfigured */
213 			{ -1, ESBD_STATE },		/* partial */
214 			{ -1, ESBD_STATE },		/* configured */
215 			{ -1, ESBD_STATE },		/* release */
216 			{ -1, ESBD_STATE },		/* unreferenced */
217 			{ -1, ESBD_FATAL_STATE },	/* fatal */
218 		}
219 	},
220 	{ SBD_CMD_CONFIGURE,
221 		{
222 			{ -1, ESBD_STATE },		/* empty */
223 			{ -1, ESBD_STATE },		/* occupied */
224 			{ 0, 0 },			/* connected */
225 			{ 0, 0 },			/* unconfigured */
226 			{ 0, 0 },			/* partial */
227 			{ 0, 0 },			/* configured */
228 			{ -1, ESBD_STATE },		/* release */
229 			{ -1, ESBD_STATE },		/* unreferenced */
230 			{ -1, ESBD_FATAL_STATE },	/* fatal */
231 		}
232 	},
233 	{ SBD_CMD_UNCONFIGURE,
234 		{
235 			{ -1, ESBD_STATE },		/* empty */
236 			{ -1, ESBD_STATE },		/* occupied */
237 			{ -1, ESBD_STATE },		/* connected */
238 			{ -1, ESBD_STATE },		/* unconfigured */
239 			{ 0, 0 },			/* partial */
240 			{ 0, 0 },			/* configured */
241 			{ 0, 0 },			/* release */
242 			{ 0, 0 },			/* unreferenced */
243 			{ -1, ESBD_FATAL_STATE },	/* fatal */
244 		}
245 	},
246 	{ SBD_CMD_ASSIGN,
247 		{
248 			{ 0, 0 },			/* empty */
249 			{ 0, 0 },			/* occupied */
250 			{ -1, ESBD_STATE },		/* connected */
251 			{ -1, ESBD_STATE },		/* unconfigured */
252 			{ -1, ESBD_STATE },		/* partial */
253 			{ -1, ESBD_STATE },		/* configured */
254 			{ -1, ESBD_STATE },		/* release */
255 			{ -1, ESBD_STATE },		/* unreferenced */
256 			{ -1, ESBD_FATAL_STATE },	/* fatal */
257 		}
258 	},
259 	{ SBD_CMD_UNASSIGN,
260 		{
261 			{ 0, 0 },			/* empty */
262 			{ 0, 0 },			/* occupied */
263 			{ -1, ESBD_STATE },		/* connected */
264 			{ -1, ESBD_STATE },		/* unconfigured */
265 			{ -1, ESBD_STATE },		/* partial */
266 			{ -1, ESBD_STATE },		/* configured */
267 			{ -1, ESBD_STATE },		/* release */
268 			{ -1, ESBD_STATE },		/* unreferenced */
269 			{ -1, ESBD_FATAL_STATE },	/* fatal */
270 		}
271 	},
272 	{ SBD_CMD_POWERON,
273 		{
274 			{ 0, 0 },			/* empty */
275 			{ 0, 0 },			/* occupied */
276 			{ -1, ESBD_STATE },		/* connected */
277 			{ -1, ESBD_STATE },		/* unconfigured */
278 			{ -1, ESBD_STATE },		/* partial */
279 			{ -1, ESBD_STATE },		/* configured */
280 			{ -1, ESBD_STATE },		/* release */
281 			{ -1, ESBD_STATE },		/* unreferenced */
282 			{ -1, ESBD_FATAL_STATE },	/* fatal */
283 		}
284 	},
285 	{ SBD_CMD_POWEROFF,
286 		{
287 			{ 0, 0 },			/* empty */
288 			{ 0, 0 },			/* occupied */
289 			{ -1, ESBD_STATE },		/* connected */
290 			{ -1, ESBD_STATE },		/* unconfigured */
291 			{ -1, ESBD_STATE },		/* partial */
292 			{ -1, ESBD_STATE },		/* configured */
293 			{ -1, ESBD_STATE },		/* release */
294 			{ -1, ESBD_STATE },		/* unreferenced */
295 			{ -1, ESBD_FATAL_STATE },	/* fatal */
296 		}
297 	},
298 	{ SBD_CMD_TEST,
299 		{
300 			{ 0, 0 },			/* empty */
301 			{ 0, 0 },			/* occupied */
302 			{ -1, ESBD_STATE },		/* connected */
303 			{ -1, ESBD_STATE },		/* unconfigured */
304 			{ -1, ESBD_STATE },		/* partial */
305 			{ -1, ESBD_STATE },		/* configured */
306 			{ -1, ESBD_STATE },		/* release */
307 			{ -1, ESBD_STATE },		/* unreferenced */
308 			{ -1, ESBD_FATAL_STATE },	/* fatal */
309 		}
310 	},
311 };
312 
313 /*
314  * Global R/W lock to synchronize access across
315  * multiple boards.  Users wanting multi-board access
316  * must grab WRITE lock, others must grab READ lock.
317  */
318 krwlock_t	dr_grwlock;
319 
320 /*
321  * Head of the boardlist used as a reference point for
322  * locating board structs.
323  * TODO: eliminate dr_boardlist
324  */
325 dr_board_t	*dr_boardlist;
326 
327 /*
328  * DR support functions.
329  */
330 static dr_devset_t	dr_dev2devset(sbd_comp_id_t *cid);
331 static int		dr_check_transition(dr_board_t *bp,
332 					dr_devset_t *devsetp,
333 					struct dr_state_trans *transp,
334 					int cmd);
335 static int		dr_check_unit_attached(dr_common_unit_t *dp);
336 static sbd_error_t	*dr_init_devlists(dr_board_t *bp);
337 static void		dr_board_discovery(dr_board_t *bp);
338 static int		dr_board_init(dr_board_t *bp, dev_info_t *dip,
339 					int bd);
340 static void		dr_board_destroy(dr_board_t *bp);
341 static void		dr_board_transition(dr_board_t *bp, dr_state_t st);
342 
343 /*
344  * DR driver (DDI) entry points.
345  */
346 static int	dr_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd,
347 				void *arg, void **result);
348 static int	dr_detach(dev_info_t *dip, ddi_detach_cmd_t cmd);
349 static int	dr_attach(dev_info_t *dip, ddi_attach_cmd_t cmd);
350 static int	dr_probe(dev_info_t *dip);
351 static int	dr_ioctl(dev_t dev, int cmd, intptr_t arg, int mode,
352 				cred_t *cred_p, int *rval_p);
353 static int	dr_close(dev_t dev, int flag, int otyp, cred_t *cred_p);
354 static int	dr_open(dev_t *dev, int flag, int otyp, cred_t *cred_p);
355 
356 /*
357  * DR command processing operations.
358  */
359 
360 static int	dr_copyin_iocmd(dr_handle_t *hp);
361 static int	dr_copyout_iocmd(dr_handle_t *hp);
362 static int	dr_copyout_errs(dr_handle_t *hp);
363 static int	dr_pre_op(dr_handle_t *hp);
364 static int	dr_post_op(dr_handle_t *hp);
365 static int	dr_exec_op(dr_handle_t *hp);
366 static void	dr_assign_board(dr_handle_t *hp);
367 static void	dr_unassign_board(dr_handle_t *hp);
368 static void	dr_connect(dr_handle_t *hp);
369 static int	dr_disconnect(dr_handle_t *hp);
370 static void	dr_dev_configure(dr_handle_t *hp);
371 static void	dr_dev_release(dr_handle_t *hp);
372 static int	dr_dev_unconfigure(dr_handle_t *hp);
373 static void	dr_dev_cancel(dr_handle_t *hp);
374 static int	dr_dev_status(dr_handle_t *hp);
375 static int	dr_get_ncm(dr_handle_t *hp);
376 static int	dr_pt_ioctl(dr_handle_t *hp);
377 static void	dr_poweron_board(dr_handle_t *hp);
378 static void	dr_poweroff_board(dr_handle_t *hp);
379 static void	dr_test_board(dr_handle_t *hp);
380 
381 
382 
383 /*
384  * Autoconfiguration data structures
385  */
386 
387 struct cb_ops dr_cb_ops = {
388 	dr_open,	/* open */
389 	dr_close,	/* close */
390 	nodev,		/* strategy */
391 	nodev,		/* print */
392 	nodev,		/* dump */
393 	nodev,		/* read */
394 	nodev,		/* write */
395 	dr_ioctl,	/* ioctl */
396 	nodev,		/* devmap */
397 	nodev,		/* mmap */
398 	nodev,		/* segmap */
399 	nochpoll,	/* chpoll */
400 	ddi_prop_op,	/* cb_prop_op */
401 	NULL,		/* struct streamtab */
402 	D_NEW | D_MP | D_MTSAFE,	/* compatibility flags */
403 	CB_REV,		/* Rev */
404 	nodev,		/* cb_aread */
405 	nodev		/* cb_awrite */
406 };
407 
408 struct dev_ops dr_dev_ops = {
409 	DEVO_REV,	/* build version */
410 	0,		/* dev ref count */
411 	dr_getinfo,	/* getinfo */
412 	nulldev,	/* identify */
413 	dr_probe,	/* probe */
414 	dr_attach,	/* attach */
415 	dr_detach,	/* detach */
416 	nodev,		/* reset */
417 	&dr_cb_ops,	/* cb_ops */
418 	(struct bus_ops *)NULL, /* bus ops */
419 	NULL		/* power */
420 };
421 
422 extern struct mod_ops mod_driverops;
423 
424 static struct modldrv modldrv = {
425 	&mod_driverops,
426 	"Dynamic Reconfiguration %I%",
427 	&dr_dev_ops
428 };
429 
430 static struct modlinkage modlinkage = {
431 	MODREV_1,
432 	(void *)&modldrv,
433 	NULL
434 };
435 
436 /*
437  * Driver entry points.
438  */
439 int
440 _init(void)
441 {
442 	int	err;
443 
444 	/*
445 	 * If you need to support multiple nodes (instances), then
446 	 * whatever the maximum number of supported nodes is would
447 	 * need to passed as the third parameter to ddi_soft_state_init().
448 	 * Alternative would be to dynamically fini and re-init the
449 	 * soft state structure each time a node is attached.
450 	 */
451 	err = ddi_soft_state_init((void **)&dr_g.softsp,
452 		sizeof (dr_softstate_t), 1);
453 	if (err)
454 		return (err);
455 
456 	mutex_init(&dr_g.lock, NULL, MUTEX_DRIVER, NULL);
457 	rw_init(&dr_grwlock, NULL, RW_DEFAULT, NULL);
458 
459 	return (mod_install(&modlinkage));
460 }
461 
462 int
463 _fini(void)
464 {
465 	int	err;
466 
467 	if ((err = mod_remove(&modlinkage)) != 0)
468 		return (err);
469 
470 	mutex_destroy(&dr_g.lock);
471 	rw_destroy(&dr_grwlock);
472 
473 	ddi_soft_state_fini((void **)&dr_g.softsp);
474 
475 	return (0);
476 }
477 
478 int
479 _info(struct modinfo *modinfop)
480 {
481 	return (mod_info(&modlinkage, modinfop));
482 }
483 
484 /*ARGSUSED1*/
485 static int
486 dr_open(dev_t *dev, int flag, int otyp, cred_t *cred_p)
487 {
488 	int		 instance;
489 	dr_softstate_t	*softsp;
490 	dr_board_t	*bp;
491 	/*
492 	 * Don't open unless we've attached.
493 	 */
494 	instance = DR_MINOR2INST(getminor(*dev));
495 	softsp = ddi_get_soft_state(dr_g.softsp, instance);
496 	if (softsp == NULL)
497 		return (ENXIO);
498 
499 	mutex_enter(&softsp->i_lock);
500 	if (!softsp->dr_initialized) {
501 		int		 bd;
502 		int		 rv = 0;
503 
504 		bp = softsp->boards;
505 
506 		/* initialize each array element */
507 		for (bd = 0; bd < MAX_BOARDS; bd++, bp++) {
508 			rv = dr_board_init(bp, softsp->dip, bd);
509 			if (rv)
510 				break;
511 		}
512 
513 		if (rv == 0) {
514 			softsp->dr_initialized = 1;
515 		} else {
516 			/* destroy elements initialized thus far */
517 			while (--bp >= softsp->boards)
518 				dr_board_destroy(bp);
519 
520 
521 			/* TODO: should this be another errno val ? */
522 			mutex_exit(&softsp->i_lock);
523 			return (ENXIO);
524 		}
525 	}
526 	mutex_exit(&softsp->i_lock);
527 
528 	bp = &softsp->boards[DR_MINOR2BNUM(getminor(*dev))];
529 
530 	/*
531 	 * prevent opening of a dyn-ap for a board
532 	 * that does not exist
533 	 */
534 	if (!bp->b_assigned) {
535 		if (drmach_board_lookup(bp->b_num, &bp->b_id) != 0)
536 			return (ENODEV);
537 	}
538 
539 	return (0);
540 }
541 
542 /*ARGSUSED*/
543 static int
544 dr_close(dev_t dev, int flag, int otyp, cred_t *cred_p)
545 {
546 	return (0);
547 }
548 
549 /*
550  * Enable/disable DR features.
551  */
552 int dr_enable = 1;
553 
554 /*ARGSUSED3*/
555 static int
556 dr_ioctl(dev_t dev, int cmd, intptr_t arg, int mode,
557 	cred_t *cred_p, int *rval_p)
558 {
559 	static int	dr_dev_type_to_nt(char *);
560 
561 	int		rv = 0;
562 	int		instance;
563 	int		bd;
564 	dr_handle_t	*hp;
565 	dr_softstate_t	*softsp;
566 	static fn_t	f = "dr_ioctl";
567 
568 	PR_ALL("%s...\n", f);
569 
570 	instance = DR_MINOR2INST(getminor(dev));
571 	softsp = ddi_get_soft_state(dr_g.softsp, instance);
572 	if (softsp == NULL) {
573 		cmn_err(CE_WARN, "dr%d: module not yet attached", instance);
574 		return (ENXIO);
575 	}
576 
577 	if (!dr_enable) {
578 		switch (cmd) {
579 			case SBD_CMD_STATUS:
580 			case SBD_CMD_GETNCM:
581 			case SBD_CMD_PASSTHRU:
582 				break;
583 			default:
584 				return (ENOTSUP);
585 		}
586 	}
587 
588 	bd = DR_MINOR2BNUM(getminor(dev));
589 	if (bd >= MAX_BOARDS)
590 		return (ENXIO);
591 
592 	/* get and initialize storage for new handle */
593 	hp = GETSTRUCT(dr_handle_t, 1);
594 	hp->h_bd = &softsp->boards[bd];
595 	hp->h_err = NULL;
596 	hp->h_dev = getminor(dev);
597 	hp->h_cmd = cmd;
598 	hp->h_mode = mode;
599 	hp->h_iap = (sbd_ioctl_arg_t *)arg;
600 
601 	/* copy sbd command into handle */
602 	rv = dr_copyin_iocmd(hp);
603 	if (rv) {
604 		FREESTRUCT(hp, dr_handle_t, 1);
605 		return (EINVAL);
606 	}
607 
608 	/* translate canonical name to component type */
609 	if (hp->h_sbdcmd.cmd_cm.c_id.c_name[0] != '\0') {
610 		hp->h_sbdcmd.cmd_cm.c_id.c_type =
611 			dr_dev_type_to_nt(hp->h_sbdcmd.cmd_cm.c_id.c_name);
612 
613 		PR_ALL("%s: c_name = %s, c_type = %d\n",
614 			f,
615 			hp->h_sbdcmd.cmd_cm.c_id.c_name,
616 			hp->h_sbdcmd.cmd_cm.c_id.c_type);
617 	} else {
618 		/*EMPTY*/
619 		PR_ALL("%s: c_name is NULL\n", f);
620 	}
621 
622 	/* determine scope of operation */
623 	hp->h_devset = dr_dev2devset(&hp->h_sbdcmd.cmd_cm.c_id);
624 
625 	switch (hp->h_cmd) {
626 	case SBD_CMD_STATUS:
627 	case SBD_CMD_GETNCM:
628 		/* no locks needed for these commands */
629 		break;
630 
631 	default:
632 		rw_enter(&dr_grwlock, RW_WRITER);
633 		mutex_enter(&hp->h_bd->b_lock);
634 
635 		/*
636 		 * If we're dealing with memory at all, then we have
637 		 * to keep the "exclusive" global lock held.  This is
638 		 * necessary since we will probably need to look at
639 		 * multiple board structs.  Otherwise, we only have
640 		 * to deal with the board in question and so can drop
641 		 * the global lock to "shared".
642 		 */
643 		rv = DEVSET_IN_SET(hp->h_devset, SBD_COMP_MEM, DEVSET_ANYUNIT);
644 		if (rv == 0)
645 			rw_downgrade(&dr_grwlock);
646 		break;
647 	}
648 	rv = 0;
649 
650 	if (rv == 0)
651 		rv = dr_pre_op(hp);
652 	if (rv == 0)
653 		rv = dr_exec_op(hp);
654 	if (rv == 0)
655 		rv = dr_post_op(hp);
656 
657 	if (rv == -1)
658 		rv = EIO;
659 
660 	if (hp->h_err != NULL)
661 		if (!(rv = dr_copyout_errs(hp)))
662 			rv = EIO;
663 
664 	/* undo locking, if any, done before dr_pre_op */
665 	switch (hp->h_cmd) {
666 	case SBD_CMD_STATUS:
667 	case SBD_CMD_GETNCM:
668 		break;
669 
670 	case SBD_CMD_ASSIGN:
671 	case SBD_CMD_UNASSIGN:
672 	case SBD_CMD_POWERON:
673 	case SBD_CMD_POWEROFF:
674 	case SBD_CMD_CONNECT:
675 	case SBD_CMD_CONFIGURE:
676 	case SBD_CMD_UNCONFIGURE:
677 	case SBD_CMD_DISCONNECT:
678 		/* Board changed state. Log a sysevent. */
679 		if (rv == 0)
680 			(void) drmach_log_sysevent(hp->h_bd->b_num, "",
681 				SE_SLEEP, 1);
682 		/* Fall through */
683 
684 	default:
685 		mutex_exit(&hp->h_bd->b_lock);
686 		rw_exit(&dr_grwlock);
687 	}
688 
689 	if (hp->h_opts.size != 0)
690 		FREESTRUCT(hp->h_opts.copts, char, hp->h_opts.size);
691 
692 	FREESTRUCT(hp, dr_handle_t, 1);
693 
694 	return (rv);
695 }
696 
697 /*ARGSUSED*/
698 static int
699 dr_probe(dev_info_t *dip)
700 {
701 	return (DDI_PROBE_SUCCESS);
702 }
703 
704 static int
705 dr_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
706 {
707 	int		rv, rv2;
708 	int		bd;
709 	int 		instance;
710 	sbd_error_t	*err;
711 	dr_softstate_t	*softsp;
712 
713 	instance = ddi_get_instance(dip);
714 
715 	switch (cmd) {
716 
717 	case DDI_ATTACH:
718 
719 		rw_enter(&dr_grwlock, RW_WRITER);
720 
721 		rv = ddi_soft_state_zalloc(dr_g.softsp, instance);
722 		if (rv != DDI_SUCCESS) {
723 			cmn_err(CE_WARN, "dr%d: failed to alloc soft-state",
724 				instance);
725 			return (DDI_FAILURE);
726 		}
727 
728 		/* initialize softstate structure */
729 		softsp = ddi_get_soft_state(dr_g.softsp, instance);
730 		softsp->dip = dip;
731 
732 		mutex_init(&softsp->i_lock, NULL, MUTEX_DRIVER, NULL);
733 
734 		/* allocate board array (aka boardlist) */
735 		softsp->boards = GETSTRUCT(dr_board_t, MAX_BOARDS);
736 
737 		/* TODO: eliminate dr_boardlist */
738 		dr_boardlist = softsp->boards;
739 
740 		/* initialize each array element */
741 		rv = DDI_SUCCESS;
742 		for (bd = 0; bd < MAX_BOARDS; bd++) {
743 			dr_board_t	*bp = &softsp->boards[bd];
744 			char		*p, *name;
745 			int		 l, minor_num;
746 
747 			/*
748 			 * initialized board attachment point path
749 			 * (relative to pseudo) in a form immediately
750 			 * reusable as an cfgadm command argument.
751 			 * TODO: clean this up
752 			 */
753 			p = bp->b_path;
754 			l = sizeof (bp->b_path);
755 			(void) snprintf(p, l, "dr@%d:", instance);
756 			while (*p != '\0') {
757 				l--;
758 				p++;
759 			}
760 
761 			name = p;
762 			err = drmach_board_name(bd, p, l);
763 			if (err) {
764 				sbd_err_clear(&err);
765 				rv = DDI_FAILURE;
766 				break;
767 			}
768 
769 			minor_num = DR_MAKE_MINOR(instance, bd);
770 			rv = ddi_create_minor_node(dip, name, S_IFCHR,
771 				minor_num, DDI_NT_SBD_ATTACHMENT_POINT, NULL);
772 			if (rv != DDI_SUCCESS)
773 				rv = DDI_FAILURE;
774 		}
775 
776 		if (rv == DDI_SUCCESS) {
777 			/*
778 			 * Announce the node's presence.
779 			 */
780 			ddi_report_dev(dip);
781 		} else {
782 			ddi_remove_minor_node(dip, NULL);
783 		}
784 		/*
785 		 * Init registered unsafe devs.
786 		 */
787 		dr_unsafe_devs.devnames = NULL;
788 		rv2 = ddi_prop_lookup_string_array(DDI_DEV_T_ANY, dip,
789 			DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
790 			"unsupported-io-drivers", &dr_unsafe_devs.devnames,
791 			&dr_unsafe_devs.ndevs);
792 
793 		if (rv2 != DDI_PROP_SUCCESS)
794 			dr_unsafe_devs.ndevs = 0;
795 
796 		rw_exit(&dr_grwlock);
797 		return (rv);
798 
799 	default:
800 		return (DDI_FAILURE);
801 	}
802 
803 	/*NOTREACHED*/
804 }
805 
806 static int
807 dr_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
808 {
809 	int 		instance;
810 	dr_softstate_t	*softsp;
811 
812 	switch (cmd) {
813 	case DDI_DETACH:
814 		if (!dr_modunload_okay)
815 			return (DDI_FAILURE);
816 
817 		rw_enter(&dr_grwlock, RW_WRITER);
818 
819 		instance = ddi_get_instance(dip);
820 		softsp = ddi_get_soft_state(dr_g.softsp, instance);
821 
822 		/* TODO: eliminate dr_boardlist */
823 		ASSERT(softsp->boards == dr_boardlist);
824 
825 		/* remove all minor nodes */
826 		ddi_remove_minor_node(dip, NULL);
827 
828 		if (softsp->dr_initialized) {
829 			int bd;
830 
831 			for (bd = 0; bd < MAX_BOARDS; bd++)
832 				dr_board_destroy(&softsp->boards[bd]);
833 		}
834 
835 		FREESTRUCT(softsp->boards, dr_board_t, MAX_BOARDS);
836 		mutex_destroy(&softsp->i_lock);
837 		ddi_soft_state_free(dr_g.softsp, instance);
838 
839 		rw_exit(&dr_grwlock);
840 		return (DDI_SUCCESS);
841 
842 	default:
843 		return (DDI_FAILURE);
844 	}
845 	/*NOTREACHED*/
846 }
847 
848 static int
849 dr_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **result)
850 {
851 	_NOTE(ARGUNUSED(dip))
852 
853 	dev_t		dev = (dev_t)arg;
854 	int		instance, error;
855 	dr_softstate_t	*softsp;
856 
857 	*result = NULL;
858 	error = DDI_SUCCESS;
859 	instance = DR_MINOR2INST(getminor(dev));
860 
861 	switch (cmd) {
862 	case DDI_INFO_DEVT2DEVINFO:
863 		softsp = ddi_get_soft_state(dr_g.softsp, instance);
864 		if (softsp == NULL)
865 			return (DDI_FAILURE);
866 		*result = (void *)softsp->dip;
867 		break;
868 
869 	case DDI_INFO_DEVT2INSTANCE:
870 		*result = (void *)(uintptr_t)instance;
871 		break;
872 
873 	default:
874 		error = DDI_FAILURE;
875 		break;
876 	}
877 
878 	return (error);
879 }
880 
881 /*
882  * DR operations.
883  */
884 
885 static int
886 dr_copyin_iocmd(dr_handle_t *hp)
887 {
888 	static fn_t	f = "dr_copyin_iocmd";
889 	sbd_cmd_t	*scp = &hp->h_sbdcmd;
890 
891 	if (hp->h_iap == NULL)
892 		return (EINVAL);
893 
894 	bzero((caddr_t)scp, sizeof (sbd_cmd_t));
895 
896 #ifdef _MULTI_DATAMODEL
897 	if (ddi_model_convert_from(hp->h_mode & FMODELS) == DDI_MODEL_ILP32) {
898 		sbd_cmd32_t	scmd32;
899 
900 		bzero((caddr_t)&scmd32, sizeof (sbd_cmd32_t));
901 
902 		if (ddi_copyin((void *)hp->h_iap, (void *)&scmd32,
903 			sizeof (sbd_cmd32_t), hp->h_mode)) {
904 			cmn_err(CE_WARN,
905 				"%s: (32bit) failed to copyin "
906 					"sbdcmd-struct", f);
907 			return (EFAULT);
908 		}
909 		scp->cmd_cm.c_id.c_type = scmd32.cmd_cm.c_id.c_type;
910 		scp->cmd_cm.c_id.c_unit = scmd32.cmd_cm.c_id.c_unit;
911 		bcopy(&scmd32.cmd_cm.c_id.c_name[0],
912 			&scp->cmd_cm.c_id.c_name[0], OBP_MAXPROPNAME);
913 		scp->cmd_cm.c_flags = scmd32.cmd_cm.c_flags;
914 		scp->cmd_cm.c_len = scmd32.cmd_cm.c_len;
915 		scp->cmd_cm.c_opts = (caddr_t)(uintptr_t)scmd32.cmd_cm.c_opts;
916 
917 		switch (hp->h_cmd) {
918 		case SBD_CMD_STATUS:
919 			scp->cmd_stat.s_nbytes = scmd32.cmd_stat.s_nbytes;
920 			scp->cmd_stat.s_statp =
921 				(caddr_t)(uintptr_t)scmd32.cmd_stat.s_statp;
922 			break;
923 		default:
924 			break;
925 
926 		}
927 	} else
928 #endif /* _MULTI_DATAMODEL */
929 	if (ddi_copyin((void *)hp->h_iap, (void *)scp,
930 		sizeof (sbd_cmd_t), hp->h_mode) != 0) {
931 		cmn_err(CE_WARN,
932 			"%s: failed to copyin sbdcmd-struct", f);
933 		return (EFAULT);
934 	}
935 
936 	if ((hp->h_opts.size = scp->cmd_cm.c_len) != 0) {
937 		hp->h_opts.copts = GETSTRUCT(char, scp->cmd_cm.c_len + 1);
938 		++hp->h_opts.size;
939 		if (ddi_copyin((void *)scp->cmd_cm.c_opts,
940 			(void *)hp->h_opts.copts,
941 			scp->cmd_cm.c_len, hp->h_mode) != 0) {
942 			cmn_err(CE_WARN, "%s: failed to copyin options", f);
943 			return (EFAULT);
944 		}
945 	}
946 	return (0);
947 }
948 
949 static int
950 dr_copyout_iocmd(dr_handle_t *hp)
951 {
952 	static fn_t	f = "dr_copyout_iocmd";
953 	sbd_cmd_t	*scp = &hp->h_sbdcmd;
954 
955 	if (hp->h_iap == NULL)
956 		return (EINVAL);
957 
958 #ifdef _MULTI_DATAMODEL
959 	if (ddi_model_convert_from(hp->h_mode & FMODELS) == DDI_MODEL_ILP32) {
960 		sbd_cmd32_t	scmd32;
961 
962 		scmd32.cmd_cm.c_id.c_type = scp->cmd_cm.c_id.c_type;
963 		scmd32.cmd_cm.c_id.c_unit = scp->cmd_cm.c_id.c_unit;
964 		bcopy(&scp->cmd_cm.c_id.c_name[0],
965 			&scmd32.cmd_cm.c_id.c_name[0], OBP_MAXPROPNAME);
966 
967 		scmd32.cmd_cm.c_flags = scp->cmd_cm.c_flags;
968 		scmd32.cmd_cm.c_len = scp->cmd_cm.c_len;
969 		scmd32.cmd_cm.c_opts = (caddr32_t)(uintptr_t)scp->cmd_cm.c_opts;
970 
971 		switch (hp->h_cmd) {
972 		case SBD_CMD_GETNCM:
973 			scmd32.cmd_getncm.g_ncm = scp->cmd_getncm.g_ncm;
974 			break;
975 		default:
976 			break;
977 		}
978 
979 		if (ddi_copyout((void *)&scmd32, (void *)hp->h_iap,
980 			sizeof (sbd_cmd32_t), hp->h_mode)) {
981 			cmn_err(CE_WARN,
982 				"%s: (32bit) failed to copyout "
983 					"sbdcmd-struct", f);
984 			return (EFAULT);
985 		}
986 	} else
987 #endif /* _MULTI_DATAMODEL */
988 	if (ddi_copyout((void *)scp, (void *)hp->h_iap,
989 		sizeof (sbd_cmd_t), hp->h_mode) != 0) {
990 		cmn_err(CE_WARN,
991 			"%s: failed to copyout sbdcmd-struct", f);
992 		return (EFAULT);
993 	}
994 
995 	return (0);
996 }
997 
998 static int
999 dr_copyout_errs(dr_handle_t *hp)
1000 {
1001 	static fn_t	f = "dr_copyout_errs";
1002 
1003 	if (hp->h_err == NULL)
1004 		return (0);
1005 
1006 	if (hp->h_err->e_code) {
1007 		PR_ALL("%s: error %d %s",
1008 			f, hp->h_err->e_code, hp->h_err->e_rsc);
1009 	}
1010 
1011 #ifdef _MULTI_DATAMODEL
1012 	if (ddi_model_convert_from(hp->h_mode & FMODELS) == DDI_MODEL_ILP32) {
1013 		sbd_error32_t	*serr32p;
1014 
1015 		serr32p = GETSTRUCT(sbd_error32_t, 1);
1016 
1017 		serr32p->e_code = hp->h_err->e_code;
1018 		bcopy(&hp->h_err->e_rsc[0], &serr32p->e_rsc[0],
1019 			MAXPATHLEN);
1020 		if (ddi_copyout((void *)serr32p,
1021 			(void *)&((sbd_ioctl_arg32_t *)hp->h_iap)->i_err,
1022 			sizeof (sbd_error32_t), hp->h_mode)) {
1023 			cmn_err(CE_WARN,
1024 				"%s: (32bit) failed to copyout", f);
1025 			return (EFAULT);
1026 		}
1027 		FREESTRUCT(serr32p, sbd_error32_t, 1);
1028 	} else
1029 #endif /* _MULTI_DATAMODEL */
1030 	if (ddi_copyout((void *)hp->h_err,
1031 		(void *)&hp->h_iap->i_err,
1032 		sizeof (sbd_error_t), hp->h_mode)) {
1033 		cmn_err(CE_WARN,
1034 			"%s: failed to copyout", f);
1035 		return (EFAULT);
1036 	}
1037 
1038 	sbd_err_clear(&hp->h_err);
1039 
1040 	return (0);
1041 
1042 }
1043 
1044 /*
1045  * pre-op entry point must sbd_err_set_c(), if needed.
1046  * Return value of non-zero indicates failure.
1047  */
1048 static int
1049 dr_pre_op(dr_handle_t *hp)
1050 {
1051 	int		rv = 0, t;
1052 	int		cmd, serr = 0;
1053 	dr_devset_t	devset;
1054 	dr_board_t	*bp = hp->h_bd;
1055 	dr_handle_t	*shp = hp;
1056 	static fn_t	f = "dr_pre_op";
1057 
1058 	cmd = hp->h_cmd;
1059 	devset = shp->h_devset;
1060 
1061 	PR_ALL("%s (cmd = %s)...\n", f, SBD_CMD_STR(cmd));
1062 
1063 	hp->h_err = drmach_pre_op(cmd, bp->b_id, &hp->h_opts);
1064 	if (hp->h_err != NULL) {
1065 		PR_ALL("drmach_pre_op failed for cmd %s(%d)\n",
1066 			SBD_CMD_STR(cmd), cmd);
1067 		return (-1);
1068 	}
1069 
1070 	/*
1071 	 * Check for valid state transitions.
1072 	 */
1073 	if ((t = CMD2INDEX(cmd)) != -1) {
1074 		struct dr_state_trans	*transp;
1075 		int			state_err;
1076 
1077 		transp = &dr_state_transition[t];
1078 		ASSERT(transp->x_cmd == cmd);
1079 
1080 		state_err = dr_check_transition(bp, &devset, transp, cmd);
1081 
1082 		if (state_err < 0) {
1083 			/*
1084 			 * Invalidate device.
1085 			 */
1086 			dr_op_err(CE_IGNORE, hp, ESBD_INVAL, NULL);
1087 			serr = -1;
1088 			PR_ALL("%s: invalid devset (0x%x)\n",
1089 				f, (uint_t)devset);
1090 		} else if (state_err != 0) {
1091 			/*
1092 			 * State transition is not a valid one.
1093 			 */
1094 			dr_op_err(CE_IGNORE, hp,
1095 				transp->x_op[state_err].x_err, NULL);
1096 
1097 			serr = transp->x_op[state_err].x_rv;
1098 
1099 			PR_ALL("%s: invalid state %s(%d) for cmd %s(%d)\n",
1100 				f, state_str[state_err], state_err,
1101 				SBD_CMD_STR(cmd), cmd);
1102 		} else {
1103 			shp->h_devset = devset;
1104 		}
1105 	}
1106 
1107 	if (serr) {
1108 		rv = -1;
1109 	}
1110 
1111 	return (rv);
1112 }
1113 
1114 static int
1115 dr_post_op(dr_handle_t *hp)
1116 {
1117 	int		rv = 0;
1118 	int		cmd;
1119 	dr_board_t	*bp = hp->h_bd;
1120 	static fn_t	f = "dr_post_op";
1121 
1122 	cmd = hp->h_cmd;
1123 
1124 	PR_ALL("%s (cmd = %s)...\n", f, SBD_CMD_STR(cmd));
1125 
1126 	/* errors should have been caught by now */
1127 	ASSERT(hp->h_err == NULL);
1128 
1129 	hp->h_err = drmach_post_op(cmd, bp->b_id, &hp->h_opts);
1130 	if (hp->h_err != NULL) {
1131 		PR_ALL("drmach_post_op failed for cmd %s(%d)\n",
1132 			SBD_CMD_STR(cmd), cmd);
1133 		return (-1);
1134 	}
1135 
1136 	switch (cmd) {
1137 	case SBD_CMD_CONFIGURE:
1138 	case SBD_CMD_UNCONFIGURE:
1139 	case SBD_CMD_CONNECT:
1140 	case SBD_CMD_DISCONNECT:
1141 	case SBD_CMD_GETNCM:
1142 	case SBD_CMD_STATUS:
1143 		break;
1144 
1145 	default:
1146 		break;
1147 	}
1148 
1149 	return (rv);
1150 }
1151 
1152 static int
1153 dr_exec_op(dr_handle_t *hp)
1154 {
1155 	int		rv = 0;
1156 	static fn_t	f = "dr_exec_op";
1157 
1158 	/* errors should have been caught by now */
1159 	ASSERT(hp->h_err == NULL);
1160 
1161 	switch (hp->h_cmd) {
1162 	case SBD_CMD_ASSIGN:
1163 		dr_assign_board(hp);
1164 		break;
1165 
1166 	case SBD_CMD_UNASSIGN:
1167 		dr_unassign_board(hp);
1168 		break;
1169 
1170 	case SBD_CMD_POWEROFF:
1171 		dr_poweroff_board(hp);
1172 		break;
1173 
1174 	case SBD_CMD_POWERON:
1175 		dr_poweron_board(hp);
1176 		break;
1177 
1178 	case SBD_CMD_TEST:
1179 		dr_test_board(hp);
1180 		break;
1181 
1182 	case SBD_CMD_CONNECT:
1183 		dr_connect(hp);
1184 		break;
1185 
1186 	case SBD_CMD_CONFIGURE:
1187 		dr_dev_configure(hp);
1188 		break;
1189 
1190 	case SBD_CMD_UNCONFIGURE:
1191 		dr_dev_release(hp);
1192 		if (hp->h_err == NULL)
1193 			rv = dr_dev_unconfigure(hp);
1194 		else
1195 			dr_dev_cancel(hp);
1196 		break;
1197 
1198 	case SBD_CMD_DISCONNECT:
1199 		rv = dr_disconnect(hp);
1200 		break;
1201 
1202 	case SBD_CMD_STATUS:
1203 		rv = dr_dev_status(hp);
1204 		break;
1205 
1206 	case SBD_CMD_GETNCM:
1207 		hp->h_sbdcmd.cmd_getncm.g_ncm = dr_get_ncm(hp);
1208 		rv = dr_copyout_iocmd(hp);
1209 		break;
1210 
1211 	case SBD_CMD_PASSTHRU:
1212 		rv = dr_pt_ioctl(hp);
1213 		break;
1214 
1215 	default:
1216 		cmn_err(CE_WARN,
1217 			"%s: unknown command (%d)",
1218 			f, hp->h_cmd);
1219 		break;
1220 	}
1221 
1222 	if (hp->h_err != NULL) {
1223 		rv = -1;
1224 	}
1225 
1226 	return (rv);
1227 }
1228 
1229 static void
1230 dr_assign_board(dr_handle_t *hp)
1231 {
1232 	dr_board_t *bp = hp->h_bd;
1233 
1234 	hp->h_err = drmach_board_assign(bp->b_num, &bp->b_id);
1235 	if (hp->h_err == NULL) {
1236 		bp->b_assigned = 1;
1237 	}
1238 }
1239 
1240 static void
1241 dr_unassign_board(dr_handle_t *hp)
1242 {
1243 	dr_board_t *bp = hp->h_bd;
1244 
1245 	/*
1246 	 * Block out status during unassign.
1247 	 * Not doing cv_wait_sig here as starfire SSP software
1248 	 * ignores unassign failure and removes board from
1249 	 * domain mask causing system panic.
1250 	 * TODO: Change cv_wait to cv_wait_sig when SSP software
1251 	 * handles unassign failure.
1252 	 */
1253 	dr_lock_status(bp);
1254 
1255 	hp->h_err = drmach_board_unassign(bp->b_id);
1256 	if (hp->h_err == NULL) {
1257 		/*
1258 		 * clear drmachid_t handle; not valid after board unassign
1259 		 */
1260 		bp->b_id = 0;
1261 		bp->b_assigned = 0;
1262 	}
1263 
1264 	dr_unlock_status(bp);
1265 }
1266 
1267 static void
1268 dr_poweron_board(dr_handle_t *hp)
1269 {
1270 	dr_board_t *bp = hp->h_bd;
1271 
1272 	hp->h_err = drmach_board_poweron(bp->b_id);
1273 }
1274 
1275 static void
1276 dr_poweroff_board(dr_handle_t *hp)
1277 {
1278 	dr_board_t *bp = hp->h_bd;
1279 
1280 	hp->h_err = drmach_board_poweroff(bp->b_id);
1281 }
1282 
1283 static void
1284 dr_test_board(dr_handle_t *hp)
1285 {
1286 	dr_board_t *bp = hp->h_bd;
1287 	hp->h_err = drmach_board_test(bp->b_id, &hp->h_opts,
1288 	    dr_cmd_flags(hp) & SBD_FLAG_FORCE);
1289 }
1290 
1291 /*
1292  * Create and populate the component nodes for a board.  Assumes that the
1293  * devlists for the board have been initialized.
1294  */
1295 static void
1296 dr_make_comp_nodes(dr_board_t *bp) {
1297 
1298 	int	i;
1299 
1300 	/*
1301 	 * Make nodes for the individual components on the board.
1302 	 * First we need to initialize memory unit data structures of board
1303 	 * structure.
1304 	 */
1305 	for (i = 0; i < MAX_MEM_UNITS_PER_BOARD; i++) {
1306 		dr_mem_unit_t *mp;
1307 
1308 		mp = dr_get_mem_unit(bp, i);
1309 		dr_init_mem_unit(mp);
1310 	}
1311 
1312 	/*
1313 	 * Initialize cpu unit data structures.
1314 	 */
1315 	for (i = 0; i < MAX_CPU_UNITS_PER_BOARD; i++) {
1316 		dr_cpu_unit_t *cp;
1317 
1318 		cp = dr_get_cpu_unit(bp, i);
1319 		dr_init_cpu_unit(cp);
1320 	}
1321 
1322 	/*
1323 	 * Initialize io unit data structures.
1324 	 */
1325 	for (i = 0; i < MAX_IO_UNITS_PER_BOARD; i++) {
1326 		dr_io_unit_t *ip;
1327 
1328 		ip = dr_get_io_unit(bp, i);
1329 		dr_init_io_unit(ip);
1330 	}
1331 
1332 	dr_board_transition(bp, DR_STATE_CONNECTED);
1333 
1334 	bp->b_rstate = SBD_STAT_CONNECTED;
1335 	bp->b_ostate = SBD_STAT_UNCONFIGURED;
1336 	bp->b_cond = SBD_COND_OK;
1337 	(void) drv_getparm(TIME, (void *)&bp->b_time);
1338 
1339 }
1340 
1341 /*
1342  * Only do work if called to operate on an entire board
1343  * which doesn't already have components present.
1344  */
1345 static void
1346 dr_connect(dr_handle_t *hp)
1347 {
1348 	dr_board_t	*bp = hp->h_bd;
1349 	static fn_t	f = "dr_connect";
1350 
1351 	PR_ALL("%s...\n", f);
1352 
1353 	if (DR_DEVS_PRESENT(bp)) {
1354 		/*
1355 		 * Board already has devices present.
1356 		 */
1357 		PR_ALL("%s: devices already present (0x%lx)\n",
1358 			f, DR_DEVS_PRESENT(bp));
1359 		return;
1360 	}
1361 
1362 	hp->h_err = drmach_board_connect(bp->b_id, &hp->h_opts);
1363 	if (hp->h_err)
1364 		return;
1365 
1366 	hp->h_err = dr_init_devlists(bp);
1367 	if (hp->h_err)
1368 		return;
1369 	else if (bp->b_ndev == 0) {
1370 		dr_op_err(CE_WARN, hp, ESBD_EMPTY_BD, bp->b_path);
1371 		return;
1372 	} else {
1373 		dr_make_comp_nodes(bp);
1374 		return;
1375 	}
1376 	/*NOTREACHED*/
1377 }
1378 
1379 static int
1380 dr_disconnect(dr_handle_t *hp)
1381 {
1382 	int		i;
1383 	dr_devset_t	devset;
1384 	dr_board_t	*bp = hp->h_bd;
1385 	static fn_t	f = "dr_disconnect";
1386 
1387 	PR_ALL("%s...\n", f);
1388 
1389 	/*
1390 	 * Only devices which are present, but
1391 	 * unattached can be disconnected.
1392 	 */
1393 	devset = hp->h_devset & DR_DEVS_PRESENT(bp) &
1394 		DR_DEVS_UNATTACHED(bp);
1395 
1396 	if ((devset == 0) && DR_DEVS_PRESENT(bp)) {
1397 		dr_op_err(CE_IGNORE, hp, ESBD_EMPTY_BD, bp->b_path);
1398 		return (0);
1399 	}
1400 
1401 	/*
1402 	 * Block out status during disconnect.
1403 	 */
1404 	mutex_enter(&bp->b_slock);
1405 	while (bp->b_sflags & DR_BSLOCK) {
1406 		if (cv_wait_sig(&bp->b_scv, &bp->b_slock) == 0) {
1407 			mutex_exit(&bp->b_slock);
1408 			return (EINTR);
1409 		}
1410 	}
1411 	bp->b_sflags |= DR_BSLOCK;
1412 	mutex_exit(&bp->b_slock);
1413 
1414 	hp->h_err = drmach_board_disconnect(bp->b_id, &hp->h_opts);
1415 
1416 	DR_DEVS_DISCONNECT(bp, devset);
1417 
1418 	ASSERT((DR_DEVS_ATTACHED(bp) & devset) == 0);
1419 
1420 	/*
1421 	 * Update per-device state transitions.
1422 	 */
1423 	for (i = 0; i < MAX_CPU_UNITS_PER_BOARD; i++) {
1424 		dr_cpu_unit_t *cp;
1425 
1426 		if (!DEVSET_IN_SET(devset, SBD_COMP_CPU, i))
1427 			continue;
1428 
1429 		cp = dr_get_cpu_unit(bp, i);
1430 		if (dr_disconnect_cpu(cp) == 0)
1431 			dr_device_transition(&cp->sbc_cm, DR_STATE_EMPTY);
1432 		else if (cp->sbc_cm.sbdev_error != NULL)
1433 			DRERR_SET_C(&hp->h_err, &cp->sbc_cm.sbdev_error);
1434 
1435 		ASSERT(cp->sbc_cm.sbdev_error == NULL);
1436 	}
1437 
1438 	for (i = 0; i < MAX_MEM_UNITS_PER_BOARD; i++) {
1439 		dr_mem_unit_t *mp;
1440 
1441 		if (!DEVSET_IN_SET(devset, SBD_COMP_MEM, i))
1442 			continue;
1443 
1444 		mp = dr_get_mem_unit(bp, i);
1445 		if (dr_disconnect_mem(mp) == 0)
1446 			dr_device_transition(&mp->sbm_cm, DR_STATE_EMPTY);
1447 		else if (mp->sbm_cm.sbdev_error != NULL)
1448 			DRERR_SET_C(&hp->h_err, &mp->sbm_cm.sbdev_error);
1449 
1450 		ASSERT(mp->sbm_cm.sbdev_error == NULL);
1451 	}
1452 
1453 	for (i = 0; i < MAX_IO_UNITS_PER_BOARD; i++) {
1454 		dr_io_unit_t *ip;
1455 
1456 		if (!DEVSET_IN_SET(devset, SBD_COMP_IO, i))
1457 			continue;
1458 
1459 		ip = dr_get_io_unit(bp, i);
1460 		if (dr_disconnect_io(ip) == 0)
1461 			dr_device_transition(&ip->sbi_cm, DR_STATE_EMPTY);
1462 		else if (ip->sbi_cm.sbdev_error != NULL)
1463 			DRERR_SET_C(&hp->h_err, &ip->sbi_cm.sbdev_error);
1464 
1465 		ASSERT(ip->sbi_cm.sbdev_error == NULL);
1466 	}
1467 	if (hp->h_err) {
1468 		/*
1469 		 * For certain errors, drmach_board_disconnect will mark
1470 		 * the board as unusable; in these cases the devtree must
1471 		 * be purged so that status calls will succeed.
1472 		 * XXX
1473 		 * This implementation checks for discrete error codes -
1474 		 * someday, the i/f to drmach_board_disconnect should be
1475 		 * changed to avoid the e_code testing.
1476 		 */
1477 		if ((hp->h_err->e_code == ESTC_MBXRPLY) ||
1478 			(hp->h_err->e_code == ESTC_MBXRQST) ||
1479 			(hp->h_err->e_code == ESTC_SMS_ERR_UNRECOVERABLE) ||
1480 			(hp->h_err->e_code == ESTC_SMS_ERR_RECOVERABLE) ||
1481 			(hp->h_err->e_code == ESTC_DEPROBE)) {
1482 			bp->b_ostate = SBD_STAT_UNCONFIGURED;
1483 			bp->b_busy = 0;
1484 			(void) drv_getparm(TIME, (void *)&bp->b_time);
1485 
1486 			if (drmach_board_deprobe(bp->b_id))
1487 				goto disconnect_done;
1488 			else
1489 				bp->b_ndev = 0;
1490 		}
1491 
1492 		/*
1493 		 * If the disconnect failed in a recoverable way,
1494 		 * more work is required.
1495 		 * XXX
1496 		 * This implementation checks for discrete error codes -
1497 		 * someday, the i/f to drmach_board_disconnect should be
1498 		 * changed to avoid the e_code testing.
1499 		 */
1500 		if ((hp->h_err->e_code == ESTC_MBXRQST) ||
1501 		    (hp->h_err->e_code == ESTC_SMS_ERR_RECOVERABLE) ||
1502 		    (hp->h_err->e_code == ESTC_DEPROBE)) {
1503 			/*
1504 			 * With this failure, the board has been deprobed
1505 			 * by IKP, and reprobed.  We've already gotten rid
1506 			 * of the old devtree, now we need to reconstruct it
1507 			 * based on the new IKP probe
1508 			 */
1509 			if (dr_init_devlists(bp) || (bp->b_ndev == 0))
1510 				goto disconnect_done;
1511 
1512 			dr_make_comp_nodes(bp);
1513 		}
1514 	}
1515 	/*
1516 	 * Once all the components on a board have been disconnect
1517 	 * the board's state can transition to disconnected and
1518 	 * we can allow the deprobe to take place.
1519 	 */
1520 	if (hp->h_err == NULL && DR_DEVS_PRESENT(bp) == 0) {
1521 		dr_board_transition(bp, DR_STATE_OCCUPIED);
1522 		bp->b_rstate = SBD_STAT_DISCONNECTED;
1523 		bp->b_ostate = SBD_STAT_UNCONFIGURED;
1524 		bp->b_busy = 0;
1525 		(void) drv_getparm(TIME, (void *)&bp->b_time);
1526 
1527 		hp->h_err = drmach_board_deprobe(bp->b_id);
1528 
1529 		if (hp->h_err == NULL) {
1530 			bp->b_ndev = 0;
1531 			dr_board_transition(bp, DR_STATE_EMPTY);
1532 			bp->b_rstate = SBD_STAT_EMPTY;
1533 			(void) drv_getparm(TIME, (void *)&bp->b_time);
1534 		}
1535 	}
1536 
1537 disconnect_done:
1538 	dr_unlock_status(bp);
1539 
1540 	return (0);
1541 }
1542 
1543 /*
1544  * Check if a particular device is a valid target of the current
1545  * operation. Return 1 if it is a valid target, and 0 otherwise.
1546  */
1547 static int
1548 dr_dev_is_target(dr_dev_unit_t *dp, int present_only, uint_t uset)
1549 {
1550 	dr_common_unit_t *cp;
1551 	int		 is_present;
1552 	int		 is_attached;
1553 
1554 	cp = &dp->du_common;
1555 
1556 	/* check if the user requested this device */
1557 	if ((uset & (1 << cp->sbdev_unum)) == 0) {
1558 		return (0);
1559 	}
1560 
1561 	is_present = DR_DEV_IS_PRESENT(cp) ? 1 : 0;
1562 	is_attached = DR_DEV_IS_ATTACHED(cp) ? 1 : 0;
1563 
1564 	/*
1565 	 * If the present_only flag is set, a valid target
1566 	 * must be present but not attached. Otherwise, it
1567 	 * must be both present and attached.
1568 	 */
1569 	if (is_present && (present_only ^ is_attached)) {
1570 		/* sanity check */
1571 		ASSERT(cp->sbdev_id != (drmachid_t)0);
1572 
1573 		return (1);
1574 	}
1575 
1576 	return (0);
1577 }
1578 
1579 static void
1580 dr_dev_make_list(dr_handle_t *hp, sbd_comp_type_t type, int present_only,
1581 	dr_common_unit_t ***devlist, int *devnum)
1582 {
1583 	dr_board_t	*bp = hp->h_bd;
1584 	int		 unum;
1585 	int		 nunits;
1586 	uint_t		 uset;
1587 	int		 len;
1588 	dr_common_unit_t **list, **wp;
1589 
1590 	switch (type) {
1591 	case SBD_COMP_CPU:
1592 		nunits = MAX_CPU_UNITS_PER_BOARD;
1593 		break;
1594 	case SBD_COMP_MEM:
1595 		nunits = MAX_MEM_UNITS_PER_BOARD;
1596 		break;
1597 	case SBD_COMP_IO:
1598 		nunits = MAX_IO_UNITS_PER_BOARD;
1599 		break;
1600 	default:
1601 		/* catch this in debug kernels */
1602 		ASSERT(0);
1603 		break;
1604 	}
1605 
1606 	/* allocate list storage. */
1607 	len = sizeof (dr_common_unit_t *) * (nunits + 1);
1608 	list = kmem_zalloc(len, KM_SLEEP);
1609 
1610 	/* record length of storage in first element */
1611 	*list++ = (dr_common_unit_t *)(uintptr_t)len;
1612 
1613 	/* get bit array signifying which units are to be involved */
1614 	uset = DEVSET_GET_UNITSET(hp->h_devset, type);
1615 
1616 	/*
1617 	 * Adjust the loop count for CPU devices since all cores
1618 	 * in a CMP will be examined in a single iteration.
1619 	 */
1620 	if (type == SBD_COMP_CPU) {
1621 		nunits = MAX_CMP_UNITS_PER_BOARD;
1622 	}
1623 
1624 	/* populate list */
1625 	for (wp = list, unum = 0; unum < nunits; unum++) {
1626 
1627 		dr_dev_unit_t	*dp;
1628 		int		core;
1629 		int		cunum;
1630 
1631 		dp = DR_GET_BOARD_DEVUNIT(bp, type, unum);
1632 		if (dr_dev_is_target(dp, present_only, uset)) {
1633 			*wp++ = &dp->du_common;
1634 		}
1635 
1636 		/* further processing is only required for CPUs */
1637 		if (type != SBD_COMP_CPU) {
1638 			continue;
1639 		}
1640 
1641 		/*
1642 		 * Add any additional cores from the current CPU
1643 		 * device. This is to ensure that all the cores
1644 		 * are grouped together in the device list, and
1645 		 * consequently sequenced together during the actual
1646 		 * operation.
1647 		 */
1648 		for (core = 1; core < MAX_CORES_PER_CMP; core++) {
1649 
1650 			cunum = DR_CMP_CORE_UNUM(unum, core);
1651 			dp = DR_GET_BOARD_DEVUNIT(bp, type, cunum);
1652 
1653 			if (dr_dev_is_target(dp, present_only, uset)) {
1654 				*wp++ = &dp->du_common;
1655 			}
1656 		}
1657 	}
1658 
1659 	/* calculate number of units in list, return result and list pointer */
1660 	*devnum = wp - list;
1661 	*devlist = list;
1662 }
1663 
1664 static void
1665 dr_dev_clean_up(dr_handle_t *hp, dr_common_unit_t **list, int devnum)
1666 {
1667 	int len;
1668 	int n = 0;
1669 	dr_common_unit_t *cp, **rp = list;
1670 
1671 	/*
1672 	 * move first encountered unit error to handle if handle
1673 	 * does not yet have a recorded error.
1674 	 */
1675 	if (hp->h_err == NULL) {
1676 		while (n++ < devnum) {
1677 			cp = *rp++;
1678 			if (cp->sbdev_error != NULL) {
1679 				hp->h_err = cp->sbdev_error;
1680 				cp->sbdev_error = NULL;
1681 				break;
1682 			}
1683 		}
1684 	}
1685 
1686 	/* free remaining unit errors */
1687 	while (n++ < devnum) {
1688 		cp = *rp++;
1689 		if (cp->sbdev_error != NULL) {
1690 			sbd_err_clear(&cp->sbdev_error);
1691 			cp->sbdev_error = NULL;
1692 		}
1693 	}
1694 
1695 	/* free list */
1696 	list -= 1;
1697 	len = (int)(uintptr_t)list[0];
1698 	kmem_free(list, len);
1699 }
1700 
1701 static int
1702 dr_dev_walk(dr_handle_t *hp, sbd_comp_type_t type, int present_only,
1703 		int (*pre_op)(dr_handle_t *, dr_common_unit_t **, int),
1704 		void (*op)(dr_handle_t *, dr_common_unit_t *),
1705 		int (*post_op)(dr_handle_t *, dr_common_unit_t **, int),
1706 		void (*board_op)(dr_handle_t *, dr_common_unit_t **, int))
1707 {
1708 	int			  devnum, rv;
1709 	dr_common_unit_t	**devlist;
1710 
1711 	dr_dev_make_list(hp, type, present_only, &devlist, &devnum);
1712 
1713 	rv = 0;
1714 	if (devnum > 0) {
1715 		rv = (*pre_op)(hp, devlist, devnum);
1716 		if (rv == 0) {
1717 			int n;
1718 
1719 			for (n = 0; n < devnum; n++)
1720 				(*op)(hp, devlist[n]);
1721 
1722 			rv = (*post_op)(hp, devlist, devnum);
1723 
1724 			(*board_op)(hp, devlist, devnum);
1725 		}
1726 	}
1727 
1728 	dr_dev_clean_up(hp, devlist, devnum);
1729 	return (rv);
1730 }
1731 
1732 /*ARGSUSED*/
1733 static int
1734 dr_dev_noop(dr_handle_t *hp, dr_common_unit_t **devlist, int devnum)
1735 {
1736 	return (0);
1737 }
1738 
1739 static void
1740 dr_attach_update_state(dr_handle_t *hp,
1741 	dr_common_unit_t **devlist, int devnum)
1742 {
1743 	dr_board_t	*bp = hp->h_bd;
1744 	int		i;
1745 	dr_devset_t	devs_unattached, devs_present;
1746 	static fn_t	f = "dr_post_attach_devlist";
1747 
1748 	for (i = 0; i < devnum; i++) {
1749 		dr_common_unit_t *cp = devlist[i];
1750 
1751 		if (dr_check_unit_attached(cp) == -1) {
1752 			PR_ALL("%s: ERROR %s not attached\n",
1753 				f, cp->sbdev_path);
1754 			continue;
1755 		}
1756 
1757 		DR_DEV_SET_ATTACHED(cp);
1758 
1759 		dr_device_transition(cp, DR_STATE_CONFIGURED);
1760 		cp->sbdev_cond = SBD_COND_OK;
1761 	}
1762 
1763 	devs_present = DR_DEVS_PRESENT(bp);
1764 	devs_unattached = DR_DEVS_UNATTACHED(bp);
1765 
1766 	switch (bp->b_state) {
1767 	case DR_STATE_CONNECTED:
1768 	case DR_STATE_UNCONFIGURED:
1769 		ASSERT(devs_present);
1770 
1771 		if (devs_unattached == 0) {
1772 			/*
1773 			 * All devices finally attached.
1774 			 */
1775 			dr_board_transition(bp, DR_STATE_CONFIGURED);
1776 			hp->h_bd->b_ostate = SBD_STAT_CONFIGURED;
1777 			hp->h_bd->b_rstate = SBD_STAT_CONNECTED;
1778 			hp->h_bd->b_cond = SBD_COND_OK;
1779 			hp->h_bd->b_busy = 0;
1780 			(void) drv_getparm(TIME, (void *)&hp->h_bd->b_time);
1781 		} else if (devs_present != devs_unattached) {
1782 			/*
1783 			 * Only some devices are fully attached.
1784 			 */
1785 			dr_board_transition(bp, DR_STATE_PARTIAL);
1786 			hp->h_bd->b_rstate = SBD_STAT_CONNECTED;
1787 			hp->h_bd->b_ostate = SBD_STAT_CONFIGURED;
1788 			(void) drv_getparm(TIME, (void *)&hp->h_bd->b_time);
1789 		}
1790 		break;
1791 
1792 	case DR_STATE_PARTIAL:
1793 		ASSERT(devs_present);
1794 		/*
1795 		 * All devices finally attached.
1796 		 */
1797 		if (devs_unattached == 0) {
1798 			dr_board_transition(bp, DR_STATE_CONFIGURED);
1799 			hp->h_bd->b_rstate = SBD_STAT_CONNECTED;
1800 			hp->h_bd->b_ostate = SBD_STAT_CONFIGURED;
1801 			hp->h_bd->b_cond = SBD_COND_OK;
1802 			hp->h_bd->b_busy = 0;
1803 			(void) drv_getparm(TIME, (void *)&hp->h_bd->b_time);
1804 		}
1805 		break;
1806 
1807 	default:
1808 		break;
1809 	}
1810 }
1811 
1812 static void
1813 dr_dev_configure(dr_handle_t *hp)
1814 {
1815 	int rv;
1816 
1817 	rv = dr_dev_walk(hp, SBD_COMP_CPU, 1,
1818 		dr_pre_attach_cpu,
1819 		dr_attach_cpu,
1820 		dr_post_attach_cpu,
1821 		dr_attach_update_state);
1822 
1823 	if (rv >= 0) {
1824 		rv = dr_dev_walk(hp, SBD_COMP_MEM, 1,
1825 			dr_pre_attach_mem,
1826 			dr_attach_mem,
1827 			dr_post_attach_mem,
1828 			dr_attach_update_state);
1829 	}
1830 
1831 	if (rv >= 0) {
1832 		(void) dr_dev_walk(hp, SBD_COMP_IO, 1,
1833 			dr_pre_attach_io,
1834 			dr_attach_io,
1835 			dr_post_attach_io,
1836 			dr_attach_update_state);
1837 	}
1838 }
1839 
1840 static void
1841 dr_release_update_state(dr_handle_t *hp,
1842 	dr_common_unit_t **devlist, int devnum)
1843 {
1844 	_NOTE(ARGUNUSED(devlist))
1845 	_NOTE(ARGUNUSED(devnum))
1846 
1847 	dr_board_t *bp = hp->h_bd;
1848 
1849 	/*
1850 	 * If the entire board was released and all components
1851 	 * unreferenced then transfer it to the UNREFERENCED state.
1852 	 */
1853 	if ((bp->b_state != DR_STATE_RELEASE) &&
1854 		(DR_DEVS_RELEASED(bp) == DR_DEVS_ATTACHED(bp))) {
1855 		dr_board_transition(bp, DR_STATE_RELEASE);
1856 		hp->h_bd->b_busy = 1;
1857 	}
1858 }
1859 
1860 /* called by dr_release_done [below] and dr_release_mem_done [dr_mem.c] */
1861 int
1862 dr_release_dev_done(dr_common_unit_t *cp)
1863 {
1864 	if (cp->sbdev_state == DR_STATE_RELEASE) {
1865 		ASSERT(DR_DEV_IS_RELEASED(cp));
1866 
1867 		DR_DEV_SET_UNREFERENCED(cp);
1868 
1869 		dr_device_transition(cp, DR_STATE_UNREFERENCED);
1870 
1871 		return (0);
1872 	} else {
1873 		return (-1);
1874 	}
1875 }
1876 
1877 static void
1878 dr_release_done(dr_handle_t *hp, dr_common_unit_t *cp)
1879 {
1880 	_NOTE(ARGUNUSED(hp))
1881 
1882 	dr_board_t		*bp;
1883 	static fn_t		f = "dr_release_done";
1884 
1885 	PR_ALL("%s...\n", f);
1886 
1887 	/* get board pointer & sanity check */
1888 	bp = cp->sbdev_bp;
1889 	ASSERT(bp == hp->h_bd);
1890 
1891 	/*
1892 	 * Transfer the device which just completed its release
1893 	 * to the UNREFERENCED state.
1894 	 */
1895 	switch (cp->sbdev_type) {
1896 	case SBD_COMP_MEM:
1897 		dr_release_mem_done(cp);
1898 		break;
1899 
1900 	default:
1901 		DR_DEV_SET_RELEASED(cp);
1902 
1903 		dr_device_transition(cp, DR_STATE_RELEASE);
1904 
1905 		(void) dr_release_dev_done(cp);
1906 		break;
1907 	}
1908 
1909 	/*
1910 	 * If we're not already in the RELEASE state for this
1911 	 * board and we now have released all that were previously
1912 	 * attached, then transfer the board to the RELEASE state.
1913 	 */
1914 	if ((bp->b_state == DR_STATE_RELEASE) &&
1915 		(DR_DEVS_RELEASED(bp) == DR_DEVS_UNREFERENCED(bp))) {
1916 		dr_board_transition(bp, DR_STATE_UNREFERENCED);
1917 		bp->b_busy = 1;
1918 		(void) drv_getparm(TIME, (void *)&bp->b_time);
1919 	}
1920 }
1921 
1922 static void
1923 dr_dev_release_mem(dr_handle_t *hp, dr_common_unit_t *dv)
1924 {
1925 	dr_release_mem(dv);
1926 	dr_release_done(hp, dv);
1927 }
1928 
1929 static void
1930 dr_dev_release(dr_handle_t *hp)
1931 {
1932 	int rv;
1933 
1934 	hp->h_bd->b_busy = 1;
1935 
1936 	rv = dr_dev_walk(hp, SBD_COMP_CPU, 0,
1937 		dr_pre_release_cpu,
1938 		dr_release_done,
1939 		dr_dev_noop,
1940 		dr_release_update_state);
1941 
1942 	if (rv >= 0) {
1943 		rv = dr_dev_walk(hp, SBD_COMP_MEM, 0,
1944 			dr_pre_release_mem,
1945 			dr_dev_release_mem,
1946 			dr_dev_noop,
1947 			dr_release_update_state);
1948 	}
1949 
1950 	if (rv >= 0) {
1951 		rv = dr_dev_walk(hp, SBD_COMP_IO, 0,
1952 			dr_pre_release_io,
1953 			dr_release_done,
1954 			dr_dev_noop,
1955 			dr_release_update_state);
1956 
1957 	}
1958 
1959 	if (rv < 0)
1960 		hp->h_bd->b_busy = 0;
1961 	/* else, b_busy will be cleared in dr_detach_update_state() */
1962 }
1963 
1964 static void
1965 dr_detach_update_state(dr_handle_t *hp,
1966 	dr_common_unit_t **devlist, int devnum)
1967 {
1968 	dr_board_t	*bp = hp->h_bd;
1969 	int		i;
1970 	dr_state_t	bstate;
1971 	static fn_t	f = "dr_detach_update_state";
1972 
1973 	for (i = 0; i < devnum; i++) {
1974 		dr_common_unit_t *cp = devlist[i];
1975 
1976 		if (dr_check_unit_attached(cp) >= 0) {
1977 			/*
1978 			 * Device is still attached probably due
1979 			 * to an error.  Need to keep track of it.
1980 			 */
1981 			PR_ALL("%s: ERROR %s not detached\n",
1982 				f, cp->sbdev_path);
1983 
1984 			continue;
1985 		}
1986 
1987 		DR_DEV_CLR_ATTACHED(cp);
1988 		DR_DEV_CLR_RELEASED(cp);
1989 		DR_DEV_CLR_UNREFERENCED(cp);
1990 		dr_device_transition(cp, DR_STATE_UNCONFIGURED);
1991 	}
1992 
1993 	bstate = bp->b_state;
1994 	if (bstate != DR_STATE_UNCONFIGURED) {
1995 		if (DR_DEVS_PRESENT(bp) == DR_DEVS_UNATTACHED(bp)) {
1996 			/*
1997 			 * All devices are finally detached.
1998 			 */
1999 			dr_board_transition(bp, DR_STATE_UNCONFIGURED);
2000 			hp->h_bd->b_ostate = SBD_STAT_UNCONFIGURED;
2001 			(void) drv_getparm(TIME, (void *)&hp->h_bd->b_time);
2002 		} else if ((bp->b_state != DR_STATE_PARTIAL) &&
2003 			(DR_DEVS_ATTACHED(bp) !=
2004 			DR_DEVS_PRESENT(bp))) {
2005 			/*
2006 			 * Some devices remain attached.
2007 			 */
2008 			dr_board_transition(bp, DR_STATE_PARTIAL);
2009 			(void) drv_getparm(TIME, (void *)&hp->h_bd->b_time);
2010 		}
2011 
2012 		if ((hp->h_devset & DR_DEVS_UNATTACHED(bp)) == hp->h_devset)
2013 			hp->h_bd->b_busy = 0;
2014 	}
2015 }
2016 
2017 static int
2018 dr_dev_unconfigure(dr_handle_t *hp)
2019 {
2020 	dr_board_t	*bp = hp->h_bd;
2021 
2022 	/*
2023 	 * Block out status during IO unconfig.
2024 	 */
2025 	mutex_enter(&bp->b_slock);
2026 	while (bp->b_sflags & DR_BSLOCK) {
2027 		if (cv_wait_sig(&bp->b_scv, &bp->b_slock) == 0) {
2028 			mutex_exit(&bp->b_slock);
2029 			return (EINTR);
2030 		}
2031 	}
2032 	bp->b_sflags |= DR_BSLOCK;
2033 	mutex_exit(&bp->b_slock);
2034 
2035 	(void) dr_dev_walk(hp, SBD_COMP_IO, 0,
2036 		dr_pre_detach_io,
2037 		dr_detach_io,
2038 		dr_post_detach_io,
2039 		dr_detach_update_state);
2040 
2041 	dr_unlock_status(bp);
2042 
2043 	(void) dr_dev_walk(hp, SBD_COMP_CPU, 0,
2044 		dr_pre_detach_cpu,
2045 		dr_detach_cpu,
2046 		dr_post_detach_cpu,
2047 		dr_detach_update_state);
2048 
2049 	(void) dr_dev_walk(hp, SBD_COMP_MEM, 0,
2050 		dr_pre_detach_mem,
2051 		dr_detach_mem,
2052 		dr_post_detach_mem,
2053 		dr_detach_update_state);
2054 
2055 	return (0);
2056 }
2057 
2058 static void
2059 dr_dev_cancel(dr_handle_t *hp)
2060 {
2061 	int		i;
2062 	dr_devset_t	devset;
2063 	dr_board_t	*bp = hp->h_bd;
2064 	static fn_t	f = "dr_dev_cancel";
2065 
2066 	PR_ALL("%s...\n", f);
2067 
2068 	/*
2069 	 * Only devices which have been "released" are
2070 	 * subject to cancellation.
2071 	 */
2072 	devset = hp->h_devset & DR_DEVS_RELEASED(bp);
2073 
2074 	/*
2075 	 * Nothing to do for CPUs or IO other than change back
2076 	 * their state.
2077 	 */
2078 	for (i = 0; i < MAX_CPU_UNITS_PER_BOARD; i++) {
2079 		dr_cpu_unit_t	*cp;
2080 		dr_state_t	nstate;
2081 
2082 		if (!DEVSET_IN_SET(devset, SBD_COMP_CPU, i))
2083 			continue;
2084 
2085 		cp = dr_get_cpu_unit(bp, i);
2086 		if (dr_cancel_cpu(cp) == 0)
2087 			nstate = DR_STATE_CONFIGURED;
2088 		else
2089 			nstate = DR_STATE_FATAL;
2090 
2091 		dr_device_transition(&cp->sbc_cm, nstate);
2092 	}
2093 
2094 	for (i = 0; i < MAX_IO_UNITS_PER_BOARD; i++) {
2095 		dr_io_unit_t *ip;
2096 
2097 		if (!DEVSET_IN_SET(devset, SBD_COMP_IO, i))
2098 			continue;
2099 		ip = dr_get_io_unit(bp, i);
2100 		dr_device_transition(&ip->sbi_cm, DR_STATE_CONFIGURED);
2101 	}
2102 	for (i = 0; i < MAX_MEM_UNITS_PER_BOARD; i++) {
2103 		dr_mem_unit_t	*mp;
2104 		dr_state_t	nstate;
2105 
2106 		if (!DEVSET_IN_SET(devset, SBD_COMP_MEM, i))
2107 			continue;
2108 
2109 		mp = dr_get_mem_unit(bp, i);
2110 		if (dr_cancel_mem(mp) == 0)
2111 			nstate = DR_STATE_CONFIGURED;
2112 		else
2113 			nstate = DR_STATE_FATAL;
2114 
2115 		dr_device_transition(&mp->sbm_cm, nstate);
2116 	}
2117 
2118 	PR_ALL("%s: unreleasing devset (0x%x)\n", f, (uint_t)devset);
2119 
2120 	DR_DEVS_CANCEL(bp, devset);
2121 
2122 	if (DR_DEVS_RELEASED(bp) == 0) {
2123 		dr_state_t	new_state;
2124 		/*
2125 		 * If the board no longer has any released devices
2126 		 * than transfer it back to the CONFIG/PARTIAL state.
2127 		 */
2128 		if (DR_DEVS_ATTACHED(bp) == DR_DEVS_PRESENT(bp))
2129 			new_state = DR_STATE_CONFIGURED;
2130 		else
2131 			new_state = DR_STATE_PARTIAL;
2132 		if (bp->b_state != new_state) {
2133 			dr_board_transition(bp, new_state);
2134 		}
2135 		hp->h_bd->b_ostate = SBD_STAT_CONFIGURED;
2136 		hp->h_bd->b_busy = 0;
2137 		(void) drv_getparm(TIME, (void *)&hp->h_bd->b_time);
2138 	}
2139 }
2140 
2141 static int
2142 dr_dev_status(dr_handle_t *hp)
2143 {
2144 	int		nstat, mode, ncm, sz, pbsz, pnstat;
2145 	dr_handle_t	*shp;
2146 	dr_devset_t	devset = 0;
2147 	sbd_stat_t	*dstatp = NULL;
2148 	sbd_dev_stat_t	*devstatp;
2149 	dr_board_t	*bp;
2150 	drmach_status_t	 pstat;
2151 	int		rv = 0;
2152 
2153 #ifdef _MULTI_DATAMODEL
2154 	int sz32 = 0;
2155 #endif /* _MULTI_DATAMODEL */
2156 
2157 	static fn_t	f = "dr_status";
2158 
2159 	PR_ALL("%s...\n", f);
2160 
2161 	mode = hp->h_mode;
2162 	shp = hp;
2163 	devset = shp->h_devset;
2164 	bp = hp->h_bd;
2165 
2166 	/*
2167 	 * Block out disconnect, unassign, IO unconfigure and
2168 	 * devinfo branch creation during status.
2169 	 */
2170 	mutex_enter(&bp->b_slock);
2171 	while (bp->b_sflags & DR_BSLOCK) {
2172 		if (cv_wait_sig(&bp->b_scv, &bp->b_slock) == 0) {
2173 			mutex_exit(&bp->b_slock);
2174 			return (EINTR);
2175 		}
2176 	}
2177 	bp->b_sflags |= DR_BSLOCK;
2178 	mutex_exit(&bp->b_slock);
2179 
2180 	ncm = 1;
2181 	if (hp->h_sbdcmd.cmd_cm.c_id.c_type == SBD_COMP_NONE) {
2182 		if (dr_cmd_flags(hp) & SBD_FLAG_ALLCMP) {
2183 		/*
2184 		 * Calculate the maximum number of components possible
2185 		 * for a board.  This number will be used to size the
2186 		 * status scratch buffer used by board and component
2187 		 * status functions.
2188 		 * This buffer may differ in size from what is provided
2189 		 * by the plugin, since the known component set on the
2190 		 * board may change between the plugin's GETNCM call, and
2191 		 * the status call.  Sizing will be adjusted to the plugin's
2192 		 * receptacle buffer at copyout time.
2193 		 */
2194 			ncm = MAX_CPU_UNITS_PER_BOARD +
2195 				MAX_MEM_UNITS_PER_BOARD +
2196 				MAX_IO_UNITS_PER_BOARD;
2197 
2198 		} else {
2199 			/*
2200 			 * In the case of c_type == SBD_COMP_NONE, and
2201 			 * SBD_FLAG_ALLCMP not specified, only the board
2202 			 * info is to be returned, no components.
2203 			 */
2204 			ncm = 0;
2205 			devset = 0;
2206 		}
2207 	}
2208 
2209 	sz = sizeof (sbd_stat_t);
2210 	if (ncm > 1)
2211 		sz += sizeof (sbd_dev_stat_t) * (ncm - 1);
2212 
2213 
2214 	pbsz = (int)hp->h_sbdcmd.cmd_stat.s_nbytes;
2215 	pnstat = (pbsz - sizeof (sbd_stat_t))/sizeof (sbd_dev_stat_t);
2216 
2217 	/*
2218 	 * s_nbytes describes the size of the preallocated user
2219 	 * buffer into which the application is execting to
2220 	 * receive the sbd_stat_t and sbd_dev_stat_t structures.
2221 	 */
2222 
2223 #ifdef _MULTI_DATAMODEL
2224 
2225 	/*
2226 	 * More buffer space is required for the 64bit to 32bit
2227 	 * conversion of data structures.
2228 	 */
2229 	if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
2230 		sz32 = sizeof (sbd_stat32_t);
2231 		if (ncm > 1)
2232 			sz32  += sizeof (sbd_dev_stat32_t) * (ncm - 1);
2233 		pnstat = (pbsz - sizeof (sbd_stat32_t))/
2234 			sizeof (sbd_dev_stat32_t);
2235 	}
2236 
2237 	sz += sz32;
2238 #endif
2239 	/*
2240 	 * Since one sbd_dev_stat_t is included in the sbd_stat_t,
2241 	 * increment the plugin's nstat count.
2242 	 */
2243 	++pnstat;
2244 
2245 	if (bp->b_id == 0) {
2246 		bzero(&pstat, sizeof (pstat));
2247 	} else {
2248 		sbd_error_t *err;
2249 
2250 		err = drmach_status(bp->b_id, &pstat);
2251 		if (err) {
2252 			DRERR_SET_C(&hp->h_err, &err);
2253 			rv = EIO;
2254 			goto status_done;
2255 		}
2256 	}
2257 
2258 	dstatp = (sbd_stat_t *)GETSTRUCT(char, sz);
2259 
2260 	devstatp = &dstatp->s_stat[0];
2261 
2262 	dstatp->s_board = bp->b_num;
2263 
2264 	/*
2265 	 * Detect transitions between empty and disconnected.
2266 	 */
2267 	if (!pstat.empty && (bp->b_rstate == SBD_STAT_EMPTY))
2268 		bp->b_rstate = SBD_STAT_DISCONNECTED;
2269 	else if (pstat.empty && (bp->b_rstate == SBD_STAT_DISCONNECTED))
2270 		bp->b_rstate = SBD_STAT_EMPTY;
2271 
2272 	dstatp->s_rstate = bp->b_rstate;
2273 	dstatp->s_ostate = bp->b_ostate;
2274 	dstatp->s_cond = bp->b_cond = pstat.cond;
2275 	dstatp->s_busy = bp->b_busy | pstat.busy;
2276 	dstatp->s_time = bp->b_time;
2277 	dstatp->s_power = pstat.powered;
2278 	dstatp->s_assigned = bp->b_assigned = pstat.assigned;
2279 	dstatp->s_nstat = nstat = 0;
2280 	bcopy(&pstat.type[0], &dstatp->s_type[0], SBD_TYPE_LEN);
2281 	bcopy(&pstat.info[0], &dstatp->s_info[0], SBD_MAX_INFO);
2282 
2283 	devset &= DR_DEVS_PRESENT(bp);
2284 	if (devset == 0) {
2285 		/*
2286 		 * No device chosen.
2287 		 */
2288 		PR_ALL("%s: no device present\n", f);
2289 	}
2290 
2291 	if (DEVSET_IN_SET(devset, SBD_COMP_CPU, DEVSET_ANYUNIT))
2292 		if ((nstat = dr_cpu_status(hp, devset, devstatp)) > 0) {
2293 			dstatp->s_nstat += nstat;
2294 			devstatp += nstat;
2295 		}
2296 
2297 	if (DEVSET_IN_SET(devset, SBD_COMP_MEM, DEVSET_ANYUNIT))
2298 		if ((nstat = dr_mem_status(hp, devset, devstatp)) > 0) {
2299 			dstatp->s_nstat += nstat;
2300 			devstatp += nstat;
2301 		}
2302 
2303 	if (DEVSET_IN_SET(devset, SBD_COMP_IO, DEVSET_ANYUNIT))
2304 		if ((nstat = dr_io_status(hp, devset, devstatp)) > 0) {
2305 			dstatp->s_nstat += nstat;
2306 			devstatp += nstat;
2307 		}
2308 
2309 	/*
2310 	 * Due to a possible change in number of components between
2311 	 * the time of plugin's GETNCM call and now, there may be
2312 	 * more or less components than the plugin's buffer can
2313 	 * hold.  Adjust s_nstat accordingly.
2314 	 */
2315 
2316 	dstatp->s_nstat = dstatp->s_nstat > pnstat ? pnstat : dstatp->s_nstat;
2317 
2318 
2319 #ifdef _MULTI_DATAMODEL
2320 	if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
2321 		int		i, j;
2322 		sbd_stat32_t	*dstat32p;
2323 
2324 		dstat32p = (sbd_stat32_t *)devstatp;
2325 
2326 		/* Alignment Paranoia */
2327 		if ((ulong_t)dstat32p & 0x1) {
2328 			PR_ALL("%s: alignment: sz=0x%lx dstat32p=0x%p\n",
2329 				f, sizeof (sbd_stat32_t), dstat32p);
2330 			DR_OP_INTERNAL_ERROR(hp);
2331 			rv = EINVAL;
2332 			goto status_done;
2333 		}
2334 
2335 		/* paranoia: detect buffer overrun */
2336 		if ((caddr_t)&dstat32p->s_stat[dstatp->s_nstat] >
2337 				((caddr_t)dstatp) + sz) {
2338 			DR_OP_INTERNAL_ERROR(hp);
2339 			rv = EINVAL;
2340 			goto status_done;
2341 		}
2342 
2343 		/* copy sbd_stat_t structure members */
2344 #define	_SBD_STAT(t, m) dstat32p->m = (t)dstatp->m
2345 		_SBD_STAT(int32_t, s_board);
2346 		_SBD_STAT(int32_t, s_rstate);
2347 		_SBD_STAT(int32_t, s_ostate);
2348 		_SBD_STAT(int32_t, s_cond);
2349 		_SBD_STAT(int32_t, s_busy);
2350 		_SBD_STAT(time32_t, s_time);
2351 		_SBD_STAT(uint32_t, s_power);
2352 		_SBD_STAT(uint32_t, s_assigned);
2353 		_SBD_STAT(int32_t, s_nstat);
2354 		bcopy(&dstatp->s_type[0], &dstat32p->s_type[0],
2355 			SBD_TYPE_LEN);
2356 		bcopy(&dstatp->s_info[0], &dstat32p->s_info[0],
2357 			SBD_MAX_INFO);
2358 #undef _SBD_STAT
2359 
2360 		for (i = 0; i < dstatp->s_nstat; i++) {
2361 			sbd_dev_stat_t		*dsp = &dstatp->s_stat[i];
2362 			sbd_dev_stat32_t	*ds32p = &dstat32p->s_stat[i];
2363 #define	_SBD_DEV_STAT(t, m) ds32p->m = (t)dsp->m
2364 
2365 			/* copy sbd_cm_stat_t structure members */
2366 			_SBD_DEV_STAT(int32_t, ds_type);
2367 			_SBD_DEV_STAT(int32_t, ds_unit);
2368 			_SBD_DEV_STAT(int32_t, ds_ostate);
2369 			_SBD_DEV_STAT(int32_t, ds_cond);
2370 			_SBD_DEV_STAT(int32_t, ds_busy);
2371 			_SBD_DEV_STAT(int32_t, ds_suspend);
2372 			_SBD_DEV_STAT(time32_t, ds_time);
2373 			bcopy(&dsp->ds_name[0], &ds32p->ds_name[0],
2374 			    OBP_MAXPROPNAME);
2375 
2376 			switch (dsp->ds_type) {
2377 			case SBD_COMP_CPU:
2378 				/* copy sbd_cpu_stat_t structure members */
2379 				_SBD_DEV_STAT(int32_t, d_cpu.cs_isbootproc);
2380 				_SBD_DEV_STAT(int32_t, d_cpu.cs_cpuid);
2381 				_SBD_DEV_STAT(int32_t, d_cpu.cs_speed);
2382 				_SBD_DEV_STAT(int32_t, d_cpu.cs_ecache);
2383 				break;
2384 
2385 			case SBD_COMP_MEM:
2386 				/* copy sbd_mem_stat_t structure members */
2387 				_SBD_DEV_STAT(int32_t, d_mem.ms_interleave);
2388 				_SBD_DEV_STAT(uint32_t, d_mem.ms_basepfn);
2389 				_SBD_DEV_STAT(uint32_t, d_mem.ms_totpages);
2390 				_SBD_DEV_STAT(uint32_t, d_mem.ms_detpages);
2391 				_SBD_DEV_STAT(int32_t, d_mem.ms_pageslost);
2392 				_SBD_DEV_STAT(uint32_t, d_mem.ms_managed_pages);
2393 				_SBD_DEV_STAT(uint32_t, d_mem.ms_noreloc_pages);
2394 				_SBD_DEV_STAT(uint32_t, d_mem.ms_noreloc_first);
2395 				_SBD_DEV_STAT(uint32_t, d_mem.ms_noreloc_last);
2396 				_SBD_DEV_STAT(int32_t, d_mem.ms_cage_enabled);
2397 				_SBD_DEV_STAT(int32_t, d_mem.ms_peer_is_target);
2398 				bcopy(&dsp->d_mem.ms_peer_ap_id[0],
2399 					&ds32p->d_mem.ms_peer_ap_id[0],
2400 					sizeof (ds32p->d_mem.ms_peer_ap_id));
2401 				break;
2402 
2403 			case SBD_COMP_IO:
2404 				/* copy sbd_io_stat_t structure members */
2405 				_SBD_DEV_STAT(int32_t, d_io.is_referenced);
2406 				_SBD_DEV_STAT(int32_t, d_io.is_unsafe_count);
2407 
2408 				for (j = 0; j < SBD_MAX_UNSAFE; j++)
2409 					_SBD_DEV_STAT(int32_t,
2410 						d_io.is_unsafe_list[j]);
2411 
2412 				bcopy(&dsp->d_io.is_pathname[0],
2413 				    &ds32p->d_io.is_pathname[0], MAXPATHLEN);
2414 				break;
2415 
2416 			case SBD_COMP_CMP:
2417 				/* copy sbd_cmp_stat_t structure members */
2418 				bcopy(&dsp->d_cmp.ps_cpuid[0],
2419 					&ds32p->d_cmp.ps_cpuid[0],
2420 					sizeof (ds32p->d_cmp.ps_cpuid));
2421 				_SBD_DEV_STAT(int32_t, d_cmp.ps_ncores);
2422 				_SBD_DEV_STAT(int32_t, d_cmp.ps_speed);
2423 				_SBD_DEV_STAT(int32_t, d_cmp.ps_ecache);
2424 				break;
2425 
2426 			default:
2427 				cmn_err(CE_WARN, "%s: unknown dev type (%d)",
2428 				    f, (int)dsp->ds_type);
2429 				rv = EFAULT;
2430 				goto status_done;
2431 			}
2432 #undef _SBD_DEV_STAT
2433 		}
2434 
2435 
2436 		if (ddi_copyout((void *)dstat32p,
2437 			hp->h_sbdcmd.cmd_stat.s_statp, pbsz, mode) != 0) {
2438 			cmn_err(CE_WARN,
2439 				"%s: failed to copyout status "
2440 				"for board %d", f, bp->b_num);
2441 			rv = EFAULT;
2442 			goto status_done;
2443 		}
2444 	} else
2445 #endif /* _MULTI_DATAMODEL */
2446 
2447 	if (ddi_copyout((void *)dstatp, hp->h_sbdcmd.cmd_stat.s_statp,
2448 		pbsz, mode) != 0) {
2449 		cmn_err(CE_WARN,
2450 			"%s: failed to copyout status for board %d",
2451 			f, bp->b_num);
2452 		rv = EFAULT;
2453 		goto status_done;
2454 	}
2455 
2456 status_done:
2457 	if (dstatp != NULL)
2458 		FREESTRUCT(dstatp, char, sz);
2459 
2460 	dr_unlock_status(bp);
2461 
2462 	return (rv);
2463 }
2464 
2465 static int
2466 dr_get_ncm(dr_handle_t *hp)
2467 {
2468 	int		i;
2469 	int		ncm = 0;
2470 	dr_devset_t	devset;
2471 
2472 	devset = DR_DEVS_PRESENT(hp->h_bd);
2473 	if (hp->h_sbdcmd.cmd_cm.c_id.c_type != SBD_COMP_NONE)
2474 		devset &= DEVSET(hp->h_sbdcmd.cmd_cm.c_id.c_type,
2475 			DEVSET_ANYUNIT);
2476 
2477 	/*
2478 	 * Handle CPUs first to deal with possible CMP
2479 	 * devices. If the CPU is a CMP, we need to only
2480 	 * increment ncm once even if there are multiple
2481 	 * cores for that CMP present in the devset.
2482 	 */
2483 	for (i = 0; i < MAX_CMP_UNITS_PER_BOARD; i++) {
2484 		if (devset & DEVSET(SBD_COMP_CMP, i)) {
2485 			ncm++;
2486 		}
2487 	}
2488 
2489 	/* eliminate the CPU information from the devset */
2490 	devset &= ~(DEVSET(SBD_COMP_CMP, DEVSET_ANYUNIT));
2491 
2492 	for (i = 0; i < (sizeof (dr_devset_t) * 8); i++) {
2493 		ncm += devset & 0x1;
2494 		devset >>= 1;
2495 	}
2496 
2497 	return (ncm);
2498 }
2499 
2500 /* used by dr_mem.c */
2501 /* TODO: eliminate dr_boardlist */
2502 dr_board_t *
2503 dr_lookup_board(int board_num)
2504 {
2505 	dr_board_t *bp;
2506 
2507 	ASSERT(board_num >= 0 && board_num < MAX_BOARDS);
2508 
2509 	bp = &dr_boardlist[board_num];
2510 	ASSERT(bp->b_num == board_num);
2511 
2512 	return (bp);
2513 }
2514 
2515 static dr_dev_unit_t *
2516 dr_get_dev_unit(dr_board_t *bp, sbd_comp_type_t nt, int unit_num)
2517 {
2518 	dr_dev_unit_t	*dp;
2519 
2520 	dp = DR_GET_BOARD_DEVUNIT(bp, nt, unit_num);
2521 	ASSERT(dp->du_common.sbdev_bp == bp);
2522 	ASSERT(dp->du_common.sbdev_unum == unit_num);
2523 	ASSERT(dp->du_common.sbdev_type == nt);
2524 
2525 	return (dp);
2526 }
2527 
2528 dr_cpu_unit_t *
2529 dr_get_cpu_unit(dr_board_t *bp, int unit_num)
2530 {
2531 	dr_dev_unit_t	*dp;
2532 
2533 	ASSERT(unit_num >= 0 && unit_num < MAX_CPU_UNITS_PER_BOARD);
2534 
2535 	dp = dr_get_dev_unit(bp, SBD_COMP_CPU, unit_num);
2536 	return (&dp->du_cpu);
2537 }
2538 
2539 dr_mem_unit_t *
2540 dr_get_mem_unit(dr_board_t *bp, int unit_num)
2541 {
2542 	dr_dev_unit_t	*dp;
2543 
2544 	ASSERT(unit_num >= 0 && unit_num < MAX_MEM_UNITS_PER_BOARD);
2545 
2546 	dp = dr_get_dev_unit(bp, SBD_COMP_MEM, unit_num);
2547 	return (&dp->du_mem);
2548 }
2549 
2550 dr_io_unit_t *
2551 dr_get_io_unit(dr_board_t *bp, int unit_num)
2552 {
2553 	dr_dev_unit_t	*dp;
2554 
2555 	ASSERT(unit_num >= 0 && unit_num < MAX_IO_UNITS_PER_BOARD);
2556 
2557 	dp = dr_get_dev_unit(bp, SBD_COMP_IO, unit_num);
2558 	return (&dp->du_io);
2559 }
2560 
2561 dr_common_unit_t *
2562 dr_get_common_unit(dr_board_t *bp, sbd_comp_type_t nt, int unum)
2563 {
2564 	dr_dev_unit_t	*dp;
2565 
2566 	dp = dr_get_dev_unit(bp, nt, unum);
2567 	return (&dp->du_common);
2568 }
2569 
2570 static dr_devset_t
2571 dr_dev2devset(sbd_comp_id_t *cid)
2572 {
2573 	static fn_t	f = "dr_dev2devset";
2574 
2575 	dr_devset_t	devset;
2576 	int		unit = cid->c_unit;
2577 
2578 	switch (cid->c_type) {
2579 		case SBD_COMP_NONE:
2580 			devset =  DEVSET(SBD_COMP_CPU, DEVSET_ANYUNIT);
2581 			devset |= DEVSET(SBD_COMP_MEM, DEVSET_ANYUNIT);
2582 			devset |= DEVSET(SBD_COMP_IO,  DEVSET_ANYUNIT);
2583 			PR_ALL("%s: COMP_NONE devset = 0x%lx\n", f, devset);
2584 			break;
2585 
2586 		case SBD_COMP_CPU:
2587 			if ((unit > MAX_CPU_UNITS_PER_BOARD) || (unit < 0)) {
2588 				cmn_err(CE_WARN,
2589 					"%s: invalid cpu unit# = %d",
2590 					f, unit);
2591 				devset = 0;
2592 			} else {
2593 				/*
2594 				 * Generate a devset that includes all the
2595 				 * cores of a CMP device. If this is not a
2596 				 * CMP, the extra cores will be eliminated
2597 				 * later since they are not present. This is
2598 				 * also true for CMP devices that do not have
2599 				 * all cores active.
2600 				 */
2601 				devset = DEVSET(SBD_COMP_CMP, unit);
2602 			}
2603 
2604 			PR_ALL("%s: CPU devset = 0x%lx\n", f, devset);
2605 			break;
2606 
2607 		case SBD_COMP_MEM:
2608 			if (unit == SBD_NULL_UNIT) {
2609 				unit = 0;
2610 				cid->c_unit = 0;
2611 			}
2612 
2613 			if ((unit > MAX_MEM_UNITS_PER_BOARD) || (unit < 0)) {
2614 				cmn_err(CE_WARN,
2615 					"%s: invalid mem unit# = %d",
2616 					f, unit);
2617 				devset = 0;
2618 			} else
2619 				devset = DEVSET(cid->c_type, unit);
2620 
2621 			PR_ALL("%s: MEM devset = 0x%lx\n", f, devset);
2622 			break;
2623 
2624 		case SBD_COMP_IO:
2625 			if ((unit > MAX_IO_UNITS_PER_BOARD) || (unit < 0)) {
2626 				cmn_err(CE_WARN,
2627 					"%s: invalid io unit# = %d",
2628 					f, unit);
2629 				devset = 0;
2630 			} else
2631 				devset = DEVSET(cid->c_type, unit);
2632 
2633 			PR_ALL("%s: IO devset = 0x%lx\n", f, devset);
2634 			break;
2635 
2636 		default:
2637 		case SBD_COMP_UNKNOWN:
2638 			devset = 0;
2639 			break;
2640 	}
2641 
2642 	return (devset);
2643 }
2644 
2645 /*
2646  * Converts a dynamic attachment point name to a SBD_COMP_* type.
2647  * Returns SDB_COMP_UNKNOWN if name is not recognized.
2648  */
2649 static int
2650 dr_dev_type_to_nt(char *type)
2651 {
2652 	int i;
2653 
2654 	for (i = 0; dr_devattr[i].s_nodetype != SBD_COMP_UNKNOWN; i++)
2655 		if (strcmp(dr_devattr[i].s_devtype, type) == 0)
2656 			break;
2657 
2658 	return (dr_devattr[i].s_nodetype);
2659 }
2660 
2661 /*
2662  * Converts a SBD_COMP_* type to a dynamic attachment point name.
2663  * Return NULL if SBD_COMP_ type is not recognized.
2664  */
2665 char *
2666 dr_nt_to_dev_type(int nt)
2667 {
2668 	int i;
2669 
2670 	for (i = 0; dr_devattr[i].s_nodetype != SBD_COMP_UNKNOWN; i++)
2671 		if (dr_devattr[i].s_nodetype == nt)
2672 			break;
2673 
2674 	return (dr_devattr[i].s_devtype);
2675 }
2676 
2677 
2678 /*
2679  * State transition policy is that if there is some component for which
2680  * the state transition is valid, then let it through. The exception is
2681  * SBD_CMD_DISCONNECT. On disconnect, the state transition must be valid
2682  * for ALL components.
2683  * Returns the state that is in error, if any.
2684  */
2685 static int
2686 dr_check_transition(dr_board_t *bp, dr_devset_t *devsetp,
2687 			struct dr_state_trans *transp, int cmd)
2688 {
2689 	int			s, ut;
2690 	int			state_err = 0;
2691 	dr_devset_t		devset;
2692 	dr_common_unit_t	*cp;
2693 	static fn_t		f = "dr_check_transition";
2694 
2695 	devset = *devsetp;
2696 
2697 	if (DEVSET_IN_SET(devset, SBD_COMP_CPU, DEVSET_ANYUNIT)) {
2698 		for (ut = 0; ut < MAX_CPU_UNITS_PER_BOARD; ut++) {
2699 			if (DEVSET_IN_SET(devset, SBD_COMP_CPU, ut) == 0)
2700 				continue;
2701 
2702 			cp = dr_get_common_unit(bp, SBD_COMP_CPU, ut);
2703 			s = (int)cp->sbdev_state;
2704 			if (!DR_DEV_IS_PRESENT(cp)) {
2705 				DEVSET_DEL(devset, SBD_COMP_CPU, ut);
2706 			} else {
2707 				if (transp->x_op[s].x_rv) {
2708 					if (!state_err)
2709 						state_err = s;
2710 					DEVSET_DEL(devset, SBD_COMP_CPU, ut);
2711 				}
2712 			}
2713 		}
2714 	}
2715 	if (DEVSET_IN_SET(devset, SBD_COMP_MEM, DEVSET_ANYUNIT)) {
2716 		for (ut = 0; ut < MAX_MEM_UNITS_PER_BOARD; ut++) {
2717 			if (DEVSET_IN_SET(devset, SBD_COMP_MEM, ut) == 0)
2718 				continue;
2719 
2720 			cp = dr_get_common_unit(bp, SBD_COMP_MEM, ut);
2721 			s = (int)cp->sbdev_state;
2722 			if (!DR_DEV_IS_PRESENT(cp)) {
2723 				DEVSET_DEL(devset, SBD_COMP_MEM, ut);
2724 			} else {
2725 				if (transp->x_op[s].x_rv) {
2726 					if (!state_err)
2727 						state_err = s;
2728 					DEVSET_DEL(devset, SBD_COMP_MEM, ut);
2729 				}
2730 			}
2731 		}
2732 	}
2733 	if (DEVSET_IN_SET(devset, SBD_COMP_IO, DEVSET_ANYUNIT)) {
2734 		for (ut = 0; ut < MAX_IO_UNITS_PER_BOARD; ut++) {
2735 			if (DEVSET_IN_SET(devset, SBD_COMP_IO, ut) == 0)
2736 				continue;
2737 
2738 			cp = dr_get_common_unit(bp, SBD_COMP_IO, ut);
2739 			s = (int)cp->sbdev_state;
2740 			if (!DR_DEV_IS_PRESENT(cp)) {
2741 				DEVSET_DEL(devset, SBD_COMP_IO, ut);
2742 			} else {
2743 				if (transp->x_op[s].x_rv) {
2744 					if (!state_err)
2745 						state_err = s;
2746 					DEVSET_DEL(devset, SBD_COMP_IO, ut);
2747 				}
2748 			}
2749 		}
2750 	}
2751 
2752 	PR_ALL("%s: requested devset = 0x%x, final devset = 0x%x\n",
2753 		f, (uint_t)*devsetp, (uint_t)devset);
2754 
2755 	*devsetp = devset;
2756 	/*
2757 	 * If there are some remaining components for which
2758 	 * this state transition is valid, then allow them
2759 	 * through, otherwise if none are left then return
2760 	 * the state error. The exception is SBD_CMD_DISCONNECT.
2761 	 * On disconnect, the state transition must be valid for ALL
2762 	 * components.
2763 	 */
2764 	if (cmd == SBD_CMD_DISCONNECT)
2765 		return (state_err);
2766 	return (devset ? 0 : state_err);
2767 }
2768 
2769 void
2770 dr_device_transition(dr_common_unit_t *cp, dr_state_t st)
2771 {
2772 	PR_STATE("%s STATE %s(%d) -> %s(%d)\n",
2773 		cp->sbdev_path,
2774 		state_str[cp->sbdev_state], cp->sbdev_state,
2775 		state_str[st], st);
2776 
2777 	cp->sbdev_state = st;
2778 	if (st == DR_STATE_CONFIGURED) {
2779 		cp->sbdev_ostate = SBD_STAT_CONFIGURED;
2780 		if (cp->sbdev_bp->b_ostate != SBD_STAT_CONFIGURED) {
2781 			cp->sbdev_bp->b_ostate = SBD_STAT_CONFIGURED;
2782 			(void) drv_getparm(TIME,
2783 				(void *) &cp->sbdev_bp->b_time);
2784 		}
2785 	} else
2786 		cp->sbdev_ostate = SBD_STAT_UNCONFIGURED;
2787 
2788 	(void) drv_getparm(TIME, (void *) &cp->sbdev_time);
2789 }
2790 
2791 static void
2792 dr_board_transition(dr_board_t *bp, dr_state_t st)
2793 {
2794 	PR_STATE("BOARD %d STATE: %s(%d) -> %s(%d)\n",
2795 		bp->b_num,
2796 		state_str[bp->b_state], bp->b_state,
2797 		state_str[st], st);
2798 
2799 	bp->b_state = st;
2800 }
2801 
2802 void
2803 dr_op_err(int ce, dr_handle_t *hp, int code, char *fmt, ...)
2804 {
2805 	sbd_error_t	*err;
2806 	va_list		args;
2807 
2808 	va_start(args, fmt);
2809 	err = drerr_new_v(code, fmt, args);
2810 	va_end(args);
2811 
2812 	if (ce != CE_IGNORE)
2813 		sbd_err_log(err, ce);
2814 
2815 	DRERR_SET_C(&hp->h_err, &err);
2816 }
2817 
2818 void
2819 dr_dev_err(int ce, dr_common_unit_t *cp, int code)
2820 {
2821 	sbd_error_t	*err;
2822 
2823 	err = drerr_new(0, code, cp->sbdev_path, NULL);
2824 
2825 	if (ce != CE_IGNORE)
2826 		sbd_err_log(err, ce);
2827 
2828 	DRERR_SET_C(&cp->sbdev_error, &err);
2829 }
2830 
2831 /*
2832  * A callback routine.  Called from the drmach layer as a result of
2833  * call to drmach_board_find_devices from dr_init_devlists.
2834  */
2835 static sbd_error_t *
2836 dr_dev_found(void *data, const char *name, int unum, drmachid_t id)
2837 {
2838 	dr_board_t	*bp = data;
2839 	dr_dev_unit_t	*dp;
2840 	int		 nt;
2841 	static fn_t	f = "dr_dev_found";
2842 
2843 	PR_ALL("%s (board = %d, name = %s, unum = %d, id = %p)...\n",
2844 		f, bp->b_num, name, unum, id);
2845 
2846 	nt = dr_dev_type_to_nt((char *)name);
2847 	if (nt == SBD_COMP_UNKNOWN) {
2848 		/*
2849 		 * this should not happen.  When it does, it indicates
2850 		 * a missmatch in devices supported by the drmach layer
2851 		 * vs devices supported by this layer.
2852 		 */
2853 		return (DR_INTERNAL_ERROR());
2854 	}
2855 
2856 	dp = DR_GET_BOARD_DEVUNIT(bp, nt, unum);
2857 
2858 	/* sanity check */
2859 	ASSERT(dp->du_common.sbdev_bp == bp);
2860 	ASSERT(dp->du_common.sbdev_unum == unum);
2861 	ASSERT(dp->du_common.sbdev_type == nt);
2862 
2863 	/* render dynamic attachment point path of this unit */
2864 	(void) snprintf(dp->du_common.sbdev_path,
2865 		sizeof (dp->du_common.sbdev_path),
2866 		(nt == SBD_COMP_MEM ? "%s::%s" : "%s::%s%d"),
2867 		bp->b_path, name, DR_UNUM2SBD_UNUM(unum, nt));
2868 
2869 	dp->du_common.sbdev_id = id;
2870 	DR_DEV_SET_PRESENT(&dp->du_common);
2871 
2872 	bp->b_ndev++;
2873 
2874 	return (NULL);
2875 }
2876 
2877 static sbd_error_t *
2878 dr_init_devlists(dr_board_t *bp)
2879 {
2880 	int		i;
2881 	sbd_error_t	*err;
2882 	dr_dev_unit_t	*dp;
2883 	static fn_t	f = "dr_init_devlists";
2884 
2885 	PR_ALL("%s (%s)...\n", f, bp->b_path);
2886 
2887 	/* sanity check */
2888 	ASSERT(bp->b_ndev == 0);
2889 
2890 	DR_DEVS_DISCONNECT(bp, (uint_t)-1);
2891 
2892 	/*
2893 	 * This routine builds the board's devlist and initializes
2894 	 * the common portion of the unit data structures.
2895 	 * Note: because the common portion is considered
2896 	 * uninitialized, the dr_get_*_unit() routines can not
2897 	 * be used.
2898 	 */
2899 
2900 	/*
2901 	 * Clear out old entries, if any.
2902 	 */
2903 	for (i = 0; i < MAX_CPU_UNITS_PER_BOARD; i++) {
2904 		dp = DR_GET_BOARD_DEVUNIT(bp, SBD_COMP_CPU, i);
2905 
2906 		bzero(dp, sizeof (*dp));
2907 		dp->du_common.sbdev_bp = bp;
2908 		dp->du_common.sbdev_unum = i;
2909 		dp->du_common.sbdev_type = SBD_COMP_CPU;
2910 	}
2911 
2912 	for (i = 0; i < MAX_MEM_UNITS_PER_BOARD; i++) {
2913 		dp = DR_GET_BOARD_DEVUNIT(bp, SBD_COMP_MEM, i);
2914 
2915 		bzero(dp, sizeof (*dp));
2916 		dp->du_common.sbdev_bp = bp;
2917 		dp->du_common.sbdev_unum = i;
2918 		dp->du_common.sbdev_type = SBD_COMP_MEM;
2919 	}
2920 
2921 	for (i = 0; i < MAX_IO_UNITS_PER_BOARD; i++) {
2922 		dp = DR_GET_BOARD_DEVUNIT(bp, SBD_COMP_IO, i);
2923 
2924 		bzero(dp, sizeof (*dp));
2925 		dp->du_common.sbdev_bp = bp;
2926 		dp->du_common.sbdev_unum = i;
2927 		dp->du_common.sbdev_type = SBD_COMP_IO;
2928 	}
2929 
2930 	err = NULL;
2931 	if (bp->b_id) {
2932 		/* find devices on this board */
2933 		err = drmach_board_find_devices(
2934 			bp->b_id, bp, dr_dev_found);
2935 	}
2936 
2937 	return (err);
2938 }
2939 
2940 /*
2941  * Return the unit number of the respective drmachid if
2942  * it's found to be attached.
2943  */
2944 static int
2945 dr_check_unit_attached(dr_common_unit_t *cp)
2946 {
2947 	int		rv = 0;
2948 	processorid_t	cpuid;
2949 	uint64_t	basepa, endpa;
2950 	struct memlist	*ml;
2951 	extern struct memlist	*phys_install;
2952 	sbd_error_t	*err;
2953 	int		yes;
2954 	static fn_t	f = "dr_check_unit_attached";
2955 
2956 	switch (cp->sbdev_type) {
2957 	case SBD_COMP_CPU:
2958 		err = drmach_cpu_get_id(cp->sbdev_id, &cpuid);
2959 		if (err) {
2960 			DRERR_SET_C(&cp->sbdev_error, &err);
2961 			rv = -1;
2962 			break;
2963 		}
2964 		mutex_enter(&cpu_lock);
2965 		if (cpu_get(cpuid) == NULL)
2966 			rv = -1;
2967 		mutex_exit(&cpu_lock);
2968 		break;
2969 
2970 	case SBD_COMP_MEM:
2971 		err = drmach_mem_get_base_physaddr(cp->sbdev_id, &basepa);
2972 		if (err) {
2973 			DRERR_SET_C(&cp->sbdev_error, &err);
2974 			rv = -1;
2975 			break;
2976 		}
2977 
2978 		/*
2979 		 * basepa may not be on a alignment boundary, make it so.
2980 		 */
2981 		err = drmach_mem_get_slice_size(cp->sbdev_id, &endpa);
2982 		if (err) {
2983 			DRERR_SET_C(&cp->sbdev_error, &err);
2984 			rv = -1;
2985 			break;
2986 		}
2987 
2988 		basepa &= ~(endpa - 1);
2989 		endpa += basepa;
2990 
2991 		/*
2992 		 * Check if base address is in phys_install.
2993 		 */
2994 		memlist_read_lock();
2995 		for (ml = phys_install; ml; ml = ml->next)
2996 			if ((endpa <= ml->address) ||
2997 				(basepa >= (ml->address + ml->size)))
2998 				continue;
2999 			else
3000 				break;
3001 		memlist_read_unlock();
3002 		if (ml == NULL)
3003 			rv = -1;
3004 		break;
3005 
3006 	case SBD_COMP_IO:
3007 		err = drmach_io_is_attached(cp->sbdev_id, &yes);
3008 		if (err) {
3009 			DRERR_SET_C(&cp->sbdev_error, &err);
3010 			rv = -1;
3011 			break;
3012 		} else if (!yes)
3013 			rv = -1;
3014 		break;
3015 
3016 	default:
3017 		PR_ALL("%s: unexpected nodetype(%d) for id 0x%p\n",
3018 			f, cp->sbdev_type, cp->sbdev_id);
3019 		rv = -1;
3020 		break;
3021 	}
3022 
3023 	return (rv);
3024 }
3025 
3026 /*
3027  * See if drmach recognizes the passthru command.  DRMACH expects the
3028  * id to identify the thing to which the command is being applied.  Using
3029  * nonsense SBD terms, that information has been perversely encoded in the
3030  * c_id member of the sbd_cmd_t structure.  This logic reads those tea
3031  * leaves, finds the associated drmach id, then calls drmach to process
3032  * the passthru command.
3033  */
3034 static int
3035 dr_pt_try_drmach(dr_handle_t *hp)
3036 {
3037 	dr_board_t	*bp = hp->h_bd;
3038 	sbd_comp_id_t	*comp_id = &hp->h_sbdcmd.cmd_cm.c_id;
3039 	drmachid_t	 id;
3040 
3041 	if (comp_id->c_type == SBD_COMP_NONE) {
3042 		id = bp->b_id;
3043 	} else {
3044 		sbd_comp_type_t	 nt;
3045 
3046 		nt = dr_dev_type_to_nt(comp_id->c_name);
3047 		if (nt == SBD_COMP_UNKNOWN) {
3048 			dr_op_err(CE_IGNORE, hp, ESBD_INVAL, comp_id->c_name);
3049 			id = 0;
3050 		} else {
3051 			/* pt command applied to dynamic attachment point */
3052 			dr_common_unit_t *cp;
3053 			cp = dr_get_common_unit(bp, nt, comp_id->c_unit);
3054 			id = cp->sbdev_id;
3055 		}
3056 	}
3057 
3058 	if (hp->h_err == NULL)
3059 		hp->h_err = drmach_passthru(id, &hp->h_opts);
3060 
3061 	return (hp->h_err == NULL ? 0 : -1);
3062 }
3063 
3064 static int
3065 dr_pt_ioctl(dr_handle_t *hp)
3066 {
3067 	int		cmd, rv, len;
3068 	int32_t		sz;
3069 	int		found;
3070 	char		*copts;
3071 	static fn_t	f = "dr_pt_ioctl";
3072 
3073 	PR_ALL("%s...\n", f);
3074 
3075 	sz = hp->h_opts.size;
3076 	copts = hp->h_opts.copts;
3077 
3078 	if (sz == 0 || copts == (char *)NULL) {
3079 		cmn_err(CE_WARN, "%s: invalid passthru args", f);
3080 		return (EINVAL);
3081 	}
3082 
3083 	found = 0;
3084 	for (cmd = 0; cmd < (sizeof (pt_arr) / sizeof (pt_arr[0])); cmd++) {
3085 		len = strlen(pt_arr[cmd].pt_name);
3086 		found = (strncmp(pt_arr[cmd].pt_name, copts, len) == 0);
3087 		if (found)
3088 			break;
3089 	}
3090 
3091 	if (found)
3092 		rv = (*pt_arr[cmd].pt_func)(hp);
3093 	else
3094 		rv = dr_pt_try_drmach(hp);
3095 
3096 	return (rv);
3097 }
3098 
3099 /*
3100  * Called at driver load time to determine the state and condition
3101  * of an existing board in the system.
3102  */
3103 static void
3104 dr_board_discovery(dr_board_t *bp)
3105 {
3106 	int			i;
3107 	dr_devset_t		devs_lost, devs_attached = 0;
3108 	dr_cpu_unit_t		*cp;
3109 	dr_mem_unit_t		*mp;
3110 	dr_io_unit_t		*ip;
3111 	static fn_t		f = "dr_board_discovery";
3112 
3113 	if (DR_DEVS_PRESENT(bp) == 0) {
3114 		PR_ALL("%s: board %d has no devices present\n",
3115 			f, bp->b_num);
3116 		return;
3117 	}
3118 
3119 	/*
3120 	 * Check for existence of cpus.
3121 	 */
3122 	for (i = 0; i < MAX_CPU_UNITS_PER_BOARD; i++) {
3123 		cp = dr_get_cpu_unit(bp, i);
3124 
3125 		if (!DR_DEV_IS_PRESENT(&cp->sbc_cm))
3126 			continue;
3127 
3128 		if (dr_check_unit_attached(&cp->sbc_cm) >= 0) {
3129 			DR_DEV_SET_ATTACHED(&cp->sbc_cm);
3130 			DEVSET_ADD(devs_attached, SBD_COMP_CPU, i);
3131 			PR_ALL("%s: board %d, cpu-unit %d - attached\n",
3132 				f, bp->b_num, i);
3133 		}
3134 		dr_init_cpu_unit(cp);
3135 	}
3136 
3137 	/*
3138 	 * Check for existence of memory.
3139 	 */
3140 	for (i = 0; i < MAX_MEM_UNITS_PER_BOARD; i++) {
3141 		mp = dr_get_mem_unit(bp, i);
3142 
3143 		if (!DR_DEV_IS_PRESENT(&mp->sbm_cm))
3144 			continue;
3145 
3146 		if (dr_check_unit_attached(&mp->sbm_cm) >= 0) {
3147 			DR_DEV_SET_ATTACHED(&mp->sbm_cm);
3148 			DEVSET_ADD(devs_attached, SBD_COMP_MEM, i);
3149 			PR_ALL("%s: board %d, mem-unit %d - attached\n",
3150 				f, bp->b_num, i);
3151 		}
3152 		dr_init_mem_unit(mp);
3153 	}
3154 
3155 	/*
3156 	 * Check for i/o state.
3157 	 */
3158 	for (i = 0; i < MAX_IO_UNITS_PER_BOARD; i++) {
3159 		ip = dr_get_io_unit(bp, i);
3160 
3161 		if (!DR_DEV_IS_PRESENT(&ip->sbi_cm))
3162 			continue;
3163 
3164 		if (dr_check_unit_attached(&ip->sbi_cm) >= 0) {
3165 			/*
3166 			 * Found it!
3167 			 */
3168 			DR_DEV_SET_ATTACHED(&ip->sbi_cm);
3169 			DEVSET_ADD(devs_attached, SBD_COMP_IO, i);
3170 			PR_ALL("%s: board %d, io-unit %d - attached\n",
3171 				f, bp->b_num, i);
3172 		}
3173 		dr_init_io_unit(ip);
3174 	}
3175 
3176 	DR_DEVS_CONFIGURE(bp, devs_attached);
3177 	if (devs_attached && ((devs_lost = DR_DEVS_UNATTACHED(bp)) != 0)) {
3178 		int		ut;
3179 		/*
3180 		 * It is not legal on board discovery to have a
3181 		 * board that is only partially attached.  A board
3182 		 * is either all attached or all connected.  If a
3183 		 * board has at least one attached device, then
3184 		 * the the remaining devices, if any, must have
3185 		 * been lost or disconnected.  These devices can
3186 		 * only be recovered by a full attach from scratch.
3187 		 * Note that devices previously in the unreferenced
3188 		 * state are subsequently lost until the next full
3189 		 * attach.  This is necessary since the driver unload
3190 		 * that must have occurred would have wiped out the
3191 		 * information necessary to re-configure the device
3192 		 * back online, e.g. memlist.
3193 		 */
3194 		PR_ALL("%s: some devices LOST (0x%lx)...\n", f, devs_lost);
3195 
3196 		for (ut = 0; ut < MAX_CPU_UNITS_PER_BOARD; ut++) {
3197 			if (!DEVSET_IN_SET(devs_lost, SBD_COMP_CPU, ut))
3198 				continue;
3199 
3200 			cp = dr_get_cpu_unit(bp, ut);
3201 			dr_device_transition(&cp->sbc_cm, DR_STATE_EMPTY);
3202 		}
3203 
3204 		for (ut = 0; ut < MAX_MEM_UNITS_PER_BOARD; ut++) {
3205 			if (!DEVSET_IN_SET(devs_lost, SBD_COMP_MEM, ut))
3206 				continue;
3207 
3208 			mp = dr_get_mem_unit(bp, ut);
3209 			dr_device_transition(&mp->sbm_cm, DR_STATE_EMPTY);
3210 		}
3211 
3212 		for (ut = 0; ut < MAX_IO_UNITS_PER_BOARD; ut++) {
3213 			if (!DEVSET_IN_SET(devs_lost, SBD_COMP_IO, ut))
3214 				continue;
3215 
3216 			ip = dr_get_io_unit(bp, ut);
3217 			dr_device_transition(&ip->sbi_cm, DR_STATE_EMPTY);
3218 		}
3219 
3220 		DR_DEVS_DISCONNECT(bp, devs_lost);
3221 	}
3222 }
3223 
3224 static int
3225 dr_board_init(dr_board_t *bp, dev_info_t *dip, int bd)
3226 {
3227 	sbd_error_t	*err;
3228 
3229 	mutex_init(&bp->b_lock, NULL, MUTEX_DRIVER, NULL);
3230 	mutex_init(&bp->b_slock, NULL, MUTEX_DRIVER, NULL);
3231 	cv_init(&bp->b_scv, NULL, CV_DRIVER, NULL);
3232 	bp->b_rstate = SBD_STAT_EMPTY;
3233 	bp->b_ostate = SBD_STAT_UNCONFIGURED;
3234 	bp->b_cond = SBD_COND_UNKNOWN;
3235 	(void) drv_getparm(TIME, (void *)&bp->b_time);
3236 
3237 	(void) drmach_board_lookup(bd, &bp->b_id);
3238 	bp->b_num = bd;
3239 	bp->b_dip = dip;
3240 
3241 	bp->b_dev[NIX(SBD_COMP_CPU)] = GETSTRUCT(dr_dev_unit_t,
3242 		MAX_CPU_UNITS_PER_BOARD);
3243 
3244 	bp->b_dev[NIX(SBD_COMP_MEM)] = GETSTRUCT(dr_dev_unit_t,
3245 		MAX_MEM_UNITS_PER_BOARD);
3246 
3247 	bp->b_dev[NIX(SBD_COMP_IO)] = GETSTRUCT(dr_dev_unit_t,
3248 		MAX_IO_UNITS_PER_BOARD);
3249 
3250 	/*
3251 	 * Initialize the devlists
3252 	 */
3253 	err = dr_init_devlists(bp);
3254 	if (err) {
3255 		sbd_err_clear(&err);
3256 		dr_board_destroy(bp);
3257 		return (-1);
3258 	} else if (bp->b_ndev == 0) {
3259 		dr_board_transition(bp, DR_STATE_EMPTY);
3260 	} else {
3261 		/*
3262 		 * Couldn't have made it down here without
3263 		 * having found at least one device.
3264 		 */
3265 		ASSERT(DR_DEVS_PRESENT(bp) != 0);
3266 		/*
3267 		 * Check the state of any possible devices on the
3268 		 * board.
3269 		 */
3270 		dr_board_discovery(bp);
3271 
3272 		bp->b_assigned = 1;
3273 
3274 		if (DR_DEVS_UNATTACHED(bp) == 0) {
3275 			/*
3276 			 * The board has no unattached devices, therefore
3277 			 * by reason of insanity it must be configured!
3278 			 */
3279 			dr_board_transition(bp, DR_STATE_CONFIGURED);
3280 			bp->b_ostate = SBD_STAT_CONFIGURED;
3281 			bp->b_rstate = SBD_STAT_CONNECTED;
3282 			bp->b_cond = SBD_COND_OK;
3283 			(void) drv_getparm(TIME, (void *)&bp->b_time);
3284 		} else if (DR_DEVS_ATTACHED(bp)) {
3285 			dr_board_transition(bp, DR_STATE_PARTIAL);
3286 			bp->b_ostate = SBD_STAT_CONFIGURED;
3287 			bp->b_rstate = SBD_STAT_CONNECTED;
3288 			bp->b_cond = SBD_COND_OK;
3289 			(void) drv_getparm(TIME, (void *)&bp->b_time);
3290 		} else {
3291 			dr_board_transition(bp, DR_STATE_CONNECTED);
3292 			bp->b_rstate = SBD_STAT_CONNECTED;
3293 			(void) drv_getparm(TIME, (void *)&bp->b_time);
3294 		}
3295 	}
3296 
3297 	return (0);
3298 }
3299 
3300 static void
3301 dr_board_destroy(dr_board_t *bp)
3302 {
3303 	PR_ALL("dr_board_destroy: num %d, path %s\n",
3304 		bp->b_num, bp->b_path);
3305 
3306 	dr_board_transition(bp, DR_STATE_EMPTY);
3307 	bp->b_rstate = SBD_STAT_EMPTY;
3308 	(void) drv_getparm(TIME, (void *)&bp->b_time);
3309 
3310 	/*
3311 	 * Free up MEM unit structs.
3312 	 */
3313 	FREESTRUCT(bp->b_dev[NIX(SBD_COMP_MEM)],
3314 		dr_dev_unit_t, MAX_MEM_UNITS_PER_BOARD);
3315 	bp->b_dev[NIX(SBD_COMP_MEM)] = NULL;
3316 	/*
3317 	 * Free up CPU unit structs.
3318 	 */
3319 	FREESTRUCT(bp->b_dev[NIX(SBD_COMP_CPU)],
3320 		dr_dev_unit_t, MAX_CPU_UNITS_PER_BOARD);
3321 	bp->b_dev[NIX(SBD_COMP_CPU)] = NULL;
3322 	/*
3323 	 * Free up IO unit structs.
3324 	 */
3325 	FREESTRUCT(bp->b_dev[NIX(SBD_COMP_IO)],
3326 		dr_dev_unit_t, MAX_IO_UNITS_PER_BOARD);
3327 	bp->b_dev[NIX(SBD_COMP_IO)] = NULL;
3328 
3329 	mutex_destroy(&bp->b_lock);
3330 	mutex_destroy(&bp->b_slock);
3331 	cv_destroy(&bp->b_scv);
3332 }
3333 
3334 void
3335 dr_lock_status(dr_board_t *bp)
3336 {
3337 	mutex_enter(&bp->b_slock);
3338 	while (bp->b_sflags & DR_BSLOCK)
3339 		cv_wait(&bp->b_scv, &bp->b_slock);
3340 	bp->b_sflags |= DR_BSLOCK;
3341 	mutex_exit(&bp->b_slock);
3342 }
3343 
3344 void
3345 dr_unlock_status(dr_board_t *bp)
3346 {
3347 	mutex_enter(&bp->b_slock);
3348 	bp->b_sflags &= ~DR_BSLOCK;
3349 	cv_signal(&bp->b_scv);
3350 	mutex_exit(&bp->b_slock);
3351 }
3352 
3353 /*
3354  * Extract flags passed via ioctl.
3355  */
3356 int
3357 dr_cmd_flags(dr_handle_t *hp)
3358 {
3359 	return (hp->h_sbdcmd.cmd_cm.c_flags);
3360 }
3361