xref: /titanic_52/usr/src/uts/sun4u/ngdr/io/dr.c (revision d58fda4376e4bf67072ce2e69f6f47036f9dbb68)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License, Version 1.0 only
6  * (the "License").  You may not use this file except in compliance
7  * with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 /*
23  * Copyright 2004 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 /*
30  * PIM-DR layer of DR driver.  Provides interface between user
31  * level applications and the PSM-DR layer.
32  */
33 
34 #include <sys/note.h>
35 #include <sys/debug.h>
36 #include <sys/types.h>
37 #include <sys/errno.h>
38 #include <sys/cred.h>
39 #include <sys/dditypes.h>
40 #include <sys/devops.h>
41 #include <sys/modctl.h>
42 #include <sys/poll.h>
43 #include <sys/conf.h>
44 #include <sys/ddi.h>
45 #include <sys/sunddi.h>
46 #include <sys/sunndi.h>
47 #include <sys/stat.h>
48 #include <sys/kmem.h>
49 #include <sys/processor.h>
50 #include <sys/cpuvar.h>
51 #include <sys/mem_config.h>
52 
53 #include <sys/autoconf.h>
54 #include <sys/cmn_err.h>
55 
56 #include <sys/ddi_impldefs.h>
57 #include <sys/promif.h>
58 #include <sys/machsystm.h>
59 
60 #include <sys/dr.h>
61 #include <sys/drmach.h>
62 #include <sys/dr_util.h>
63 
64 extern int		 nulldev();
65 extern int		 nodev();
66 extern struct memlist	*phys_install;
67 
68 #ifdef DEBUG
69 uint_t	dr_debug = 0;			/* dr.h for bit values */
70 #endif /* DEBUG */
71 
72 /*
73  * NOTE: state_str, nt_str and SBD_CMD_STR are only used in a debug
74  * kernel.  They are, however, referenced during both debug and non-debug
75  * compiles.
76  */
77 
78 static char *state_str[] = {
79 	"EMPTY", "OCCUPIED", "CONNECTED", "UNCONFIGURED",
80 	"PARTIAL", "CONFIGURED", "RELEASE", "UNREFERENCED",
81 	"FATAL"
82 };
83 
84 #define	SBD_CMD_STR(c) \
85 	(((c) == SBD_CMD_ASSIGN)	? "ASSIGN"	: \
86 	((c) == SBD_CMD_UNASSIGN)	? "UNASSIGN"	: \
87 	((c) == SBD_CMD_POWERON)	? "POWERON"	: \
88 	((c) == SBD_CMD_POWEROFF)	? "POWEROFF"	: \
89 	((c) == SBD_CMD_TEST)		? "TEST"	: \
90 	((c) == SBD_CMD_CONNECT)	? "CONNECT"	: \
91 	((c) == SBD_CMD_DISCONNECT)	? "DISCONNECT"	: \
92 	((c) == SBD_CMD_CONFIGURE)	? "CONFIGURE"	: \
93 	((c) == SBD_CMD_UNCONFIGURE)	? "UNCONFIGURE"	: \
94 	((c) == SBD_CMD_GETNCM)		? "GETNCM"	: \
95 	((c) == SBD_CMD_PASSTHRU)	? "PASSTHRU"	: \
96 	((c) == SBD_CMD_STATUS)		? "STATUS"	: "unknown")
97 
98 #define	DR_GET_BOARD_DEVUNIT(sb, ut, un) (&((sb)->b_dev[NIX(ut)][un]))
99 
100 #define	DR_MAKE_MINOR(i, b)	(((i) << 16) | (b))
101 #define	DR_MINOR2INST(m)	(((m) >> 16) & 0xffff)
102 #define	DR_MINOR2BNUM(m)	((m) & 0xffff)
103 
104 /* for the DR*INTERNAL_ERROR macros.  see sys/dr.h. */
105 static char *dr_ie_fmt = "dr.c %d";
106 
107 /* struct for drmach device name to sbd_comp_type_t mapping */
108 typedef	struct {
109 	char		*s_devtype;
110 	sbd_comp_type_t	s_nodetype;
111 } dr_devname_t;
112 
113 /* struct to map starfire device attributes - name:sbd_comp_type_t */
114 static	dr_devname_t	dr_devattr[] = {
115 	{ DRMACH_DEVTYPE_MEM,	SBD_COMP_MEM },
116 	{ DRMACH_DEVTYPE_CPU,	SBD_COMP_CPU },
117 	{ DRMACH_DEVTYPE_PCI,	SBD_COMP_IO },
118 	{ DRMACH_DEVTYPE_SBUS,	SBD_COMP_IO },
119 #if defined(DRMACH_DEVTYPE_WCI)
120 	{ DRMACH_DEVTYPE_WCI,	SBD_COMP_IO },
121 #endif
122 	/* last s_devtype must be NULL, s_nodetype must be SBD_COMP_UNKNOWN */
123 	{ NULL,			SBD_COMP_UNKNOWN }
124 };
125 
126 /*
127  * Per instance soft-state structure.
128  */
129 typedef struct dr_softstate {
130 	dev_info_t	*dip;
131 	dr_board_t	*boards;
132 	kmutex_t	i_lock;
133 	int		 dr_initialized;
134 } dr_softstate_t;
135 
136 /*
137  * dr Global data elements
138  */
139 struct dr_global {
140 	dr_softstate_t	*softsp;	/* pointer to initialize soft state */
141 	kmutex_t	lock;
142 } dr_g;
143 
144 dr_unsafe_devs_t	dr_unsafe_devs;
145 
146 /*
147  * Table of known passthru commands.
148  */
149 
150 struct {
151 	char	*pt_name;
152 	int	(*pt_func)(dr_handle_t *);
153 } pt_arr[] = {
154 	"quiesce",		dr_pt_test_suspend,
155 };
156 
157 int dr_modunload_okay = 0;		/* set to non-zero to allow unload */
158 
159 /*
160  * State transition table.  States valid transitions for "board" state.
161  * Recall that non-zero return value terminates operation, however
162  * the herrno value is what really indicates an error , if any.
163  */
164 static int
165 _cmd2index(int c)
166 {
167 	/*
168 	 * Translate DR CMD to index into dr_state_transition.
169 	 */
170 	switch (c) {
171 	case SBD_CMD_CONNECT:		return (0);
172 	case SBD_CMD_DISCONNECT:	return (1);
173 	case SBD_CMD_CONFIGURE:		return (2);
174 	case SBD_CMD_UNCONFIGURE:	return (3);
175 	case SBD_CMD_ASSIGN:		return (4);
176 	case SBD_CMD_UNASSIGN:		return (5);
177 	case SBD_CMD_POWERON:		return (6);
178 	case SBD_CMD_POWEROFF:		return (7);
179 	case SBD_CMD_TEST:		return (8);
180 	default:			return (-1);
181 	}
182 }
183 
184 #define	CMD2INDEX(c)	_cmd2index(c)
185 
186 static struct dr_state_trans {
187 	int	x_cmd;
188 	struct {
189 		int	x_rv;		/* return value of pre_op */
190 		int	x_err;		/* error, if any */
191 	} x_op[DR_STATE_MAX];
192 } dr_state_transition[] = {
193 	{ SBD_CMD_CONNECT,
194 		{
195 			{ 0, 0 },			/* empty */
196 			{ 0, 0 },			/* occupied */
197 			{ -1, ESBD_STATE },		/* connected */
198 			{ -1, ESBD_STATE },		/* unconfigured */
199 			{ -1, ESBD_STATE },		/* partial */
200 			{ -1, ESBD_STATE },		/* configured */
201 			{ -1, ESBD_STATE },		/* release */
202 			{ -1, ESBD_STATE },		/* unreferenced */
203 			{ -1, ESBD_FATAL_STATE },	/* fatal */
204 		}
205 	},
206 	{ SBD_CMD_DISCONNECT,
207 		{
208 			{ -1, ESBD_STATE },		/* empty */
209 			{ 0, 0 },			/* occupied */
210 			{ 0, 0 },			/* connected */
211 			{ 0, 0 },			/* unconfigured */
212 			{ -1, ESBD_STATE },		/* partial */
213 			{ -1, ESBD_STATE },		/* configured */
214 			{ -1, ESBD_STATE },		/* release */
215 			{ -1, ESBD_STATE },		/* unreferenced */
216 			{ -1, ESBD_FATAL_STATE },	/* fatal */
217 		}
218 	},
219 	{ SBD_CMD_CONFIGURE,
220 		{
221 			{ -1, ESBD_STATE },		/* empty */
222 			{ -1, ESBD_STATE },		/* occupied */
223 			{ 0, 0 },			/* connected */
224 			{ 0, 0 },			/* unconfigured */
225 			{ 0, 0 },			/* partial */
226 			{ 0, 0 },			/* configured */
227 			{ -1, ESBD_STATE },		/* release */
228 			{ -1, ESBD_STATE },		/* unreferenced */
229 			{ -1, ESBD_FATAL_STATE },	/* fatal */
230 		}
231 	},
232 	{ SBD_CMD_UNCONFIGURE,
233 		{
234 			{ -1, ESBD_STATE },		/* empty */
235 			{ -1, ESBD_STATE },		/* occupied */
236 			{ -1, ESBD_STATE },		/* connected */
237 			{ -1, ESBD_STATE },		/* unconfigured */
238 			{ 0, 0 },			/* partial */
239 			{ 0, 0 },			/* configured */
240 			{ 0, 0 },			/* release */
241 			{ 0, 0 },			/* unreferenced */
242 			{ -1, ESBD_FATAL_STATE },	/* fatal */
243 		}
244 	},
245 	{ SBD_CMD_ASSIGN,
246 		{
247 			{ 0, 0 },			/* empty */
248 			{ 0, 0 },			/* occupied */
249 			{ -1, ESBD_STATE },		/* connected */
250 			{ -1, ESBD_STATE },		/* unconfigured */
251 			{ -1, ESBD_STATE },		/* partial */
252 			{ -1, ESBD_STATE },		/* configured */
253 			{ -1, ESBD_STATE },		/* release */
254 			{ -1, ESBD_STATE },		/* unreferenced */
255 			{ -1, ESBD_FATAL_STATE },	/* fatal */
256 		}
257 	},
258 	{ SBD_CMD_UNASSIGN,
259 		{
260 			{ 0, 0 },			/* empty */
261 			{ 0, 0 },			/* occupied */
262 			{ -1, ESBD_STATE },		/* connected */
263 			{ -1, ESBD_STATE },		/* unconfigured */
264 			{ -1, ESBD_STATE },		/* partial */
265 			{ -1, ESBD_STATE },		/* configured */
266 			{ -1, ESBD_STATE },		/* release */
267 			{ -1, ESBD_STATE },		/* unreferenced */
268 			{ -1, ESBD_FATAL_STATE },	/* fatal */
269 		}
270 	},
271 	{ SBD_CMD_POWERON,
272 		{
273 			{ 0, 0 },			/* empty */
274 			{ 0, 0 },			/* occupied */
275 			{ -1, ESBD_STATE },		/* connected */
276 			{ -1, ESBD_STATE },		/* unconfigured */
277 			{ -1, ESBD_STATE },		/* partial */
278 			{ -1, ESBD_STATE },		/* configured */
279 			{ -1, ESBD_STATE },		/* release */
280 			{ -1, ESBD_STATE },		/* unreferenced */
281 			{ -1, ESBD_FATAL_STATE },	/* fatal */
282 		}
283 	},
284 	{ SBD_CMD_POWEROFF,
285 		{
286 			{ 0, 0 },			/* empty */
287 			{ 0, 0 },			/* occupied */
288 			{ -1, ESBD_STATE },		/* connected */
289 			{ -1, ESBD_STATE },		/* unconfigured */
290 			{ -1, ESBD_STATE },		/* partial */
291 			{ -1, ESBD_STATE },		/* configured */
292 			{ -1, ESBD_STATE },		/* release */
293 			{ -1, ESBD_STATE },		/* unreferenced */
294 			{ -1, ESBD_FATAL_STATE },	/* fatal */
295 		}
296 	},
297 	{ SBD_CMD_TEST,
298 		{
299 			{ 0, 0 },			/* empty */
300 			{ 0, 0 },			/* occupied */
301 			{ -1, ESBD_STATE },		/* connected */
302 			{ -1, ESBD_STATE },		/* unconfigured */
303 			{ -1, ESBD_STATE },		/* partial */
304 			{ -1, ESBD_STATE },		/* configured */
305 			{ -1, ESBD_STATE },		/* release */
306 			{ -1, ESBD_STATE },		/* unreferenced */
307 			{ -1, ESBD_FATAL_STATE },	/* fatal */
308 		}
309 	},
310 };
311 
312 /*
313  * Global R/W lock to synchronize access across
314  * multiple boards.  Users wanting multi-board access
315  * must grab WRITE lock, others must grab READ lock.
316  */
317 krwlock_t	dr_grwlock;
318 
319 /*
320  * Head of the boardlist used as a reference point for
321  * locating board structs.
322  * TODO: eliminate dr_boardlist
323  */
324 dr_board_t	*dr_boardlist;
325 
326 /*
327  * DR support functions.
328  */
329 static dr_devset_t	dr_dev2devset(sbd_comp_id_t *cid);
330 static int		dr_check_transition(dr_board_t *bp,
331 					dr_devset_t *devsetp,
332 					struct dr_state_trans *transp,
333 					int cmd);
334 static int		dr_check_unit_attached(dr_common_unit_t *dp);
335 static sbd_error_t	*dr_init_devlists(dr_board_t *bp);
336 static void		dr_board_discovery(dr_board_t *bp);
337 static int		dr_board_init(dr_board_t *bp, dev_info_t *dip,
338 					int bd);
339 static void		dr_board_destroy(dr_board_t *bp);
340 static void		dr_board_transition(dr_board_t *bp, dr_state_t st);
341 
342 /*
343  * DR driver (DDI) entry points.
344  */
345 static int	dr_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd,
346 				void *arg, void **result);
347 static int	dr_detach(dev_info_t *dip, ddi_detach_cmd_t cmd);
348 static int	dr_attach(dev_info_t *dip, ddi_attach_cmd_t cmd);
349 static int	dr_probe(dev_info_t *dip);
350 static int	dr_ioctl(dev_t dev, int cmd, intptr_t arg, int mode,
351 				cred_t *cred_p, int *rval_p);
352 static int	dr_close(dev_t dev, int flag, int otyp, cred_t *cred_p);
353 static int	dr_open(dev_t *dev, int flag, int otyp, cred_t *cred_p);
354 
355 /*
356  * DR command processing operations.
357  */
358 
359 static int	dr_copyin_iocmd(dr_handle_t *hp);
360 static int	dr_copyout_iocmd(dr_handle_t *hp);
361 static int	dr_copyout_errs(dr_handle_t *hp);
362 static int	dr_pre_op(dr_handle_t *hp);
363 static int	dr_post_op(dr_handle_t *hp);
364 static int	dr_exec_op(dr_handle_t *hp);
365 static void	dr_assign_board(dr_handle_t *hp);
366 static void	dr_unassign_board(dr_handle_t *hp);
367 static void	dr_connect(dr_handle_t *hp);
368 static int	dr_disconnect(dr_handle_t *hp);
369 static void	dr_dev_configure(dr_handle_t *hp);
370 static void	dr_dev_release(dr_handle_t *hp);
371 static int	dr_dev_unconfigure(dr_handle_t *hp);
372 static void	dr_dev_cancel(dr_handle_t *hp);
373 static int	dr_dev_status(dr_handle_t *hp);
374 static int	dr_get_ncm(dr_handle_t *hp);
375 static int	dr_pt_ioctl(dr_handle_t *hp);
376 static void	dr_poweron_board(dr_handle_t *hp);
377 static void	dr_poweroff_board(dr_handle_t *hp);
378 static void	dr_test_board(dr_handle_t *hp);
379 
380 
381 
382 /*
383  * Autoconfiguration data structures
384  */
385 
386 struct cb_ops dr_cb_ops = {
387 	dr_open,	/* open */
388 	dr_close,	/* close */
389 	nodev,		/* strategy */
390 	nodev,		/* print */
391 	nodev,		/* dump */
392 	nodev,		/* read */
393 	nodev,		/* write */
394 	dr_ioctl,	/* ioctl */
395 	nodev,		/* devmap */
396 	nodev,		/* mmap */
397 	nodev,		/* segmap */
398 	nochpoll,	/* chpoll */
399 	ddi_prop_op,	/* cb_prop_op */
400 	NULL,		/* struct streamtab */
401 	D_NEW | D_MP | D_MTSAFE,	/* compatibility flags */
402 	CB_REV,		/* Rev */
403 	nodev,		/* cb_aread */
404 	nodev		/* cb_awrite */
405 };
406 
407 struct dev_ops dr_dev_ops = {
408 	DEVO_REV,	/* build version */
409 	0,		/* dev ref count */
410 	dr_getinfo,	/* getinfo */
411 	nulldev,	/* identify */
412 	dr_probe,	/* probe */
413 	dr_attach,	/* attach */
414 	dr_detach,	/* detach */
415 	nodev,		/* reset */
416 	&dr_cb_ops,	/* cb_ops */
417 	(struct bus_ops *)NULL, /* bus ops */
418 	NULL		/* power */
419 };
420 
421 extern struct mod_ops mod_driverops;
422 
423 static struct modldrv modldrv = {
424 	&mod_driverops,
425 	"Dynamic Reconfiguration %I%",
426 	&dr_dev_ops
427 };
428 
429 static struct modlinkage modlinkage = {
430 	MODREV_1,
431 	(void *)&modldrv,
432 	NULL
433 };
434 
435 /*
436  * Driver entry points.
437  */
438 int
439 _init(void)
440 {
441 	int	err;
442 
443 	/*
444 	 * If you need to support multiple nodes (instances), then
445 	 * whatever the maximum number of supported nodes is would
446 	 * need to passed as the third parameter to ddi_soft_state_init().
447 	 * Alternative would be to dynamically fini and re-init the
448 	 * soft state structure each time a node is attached.
449 	 */
450 	err = ddi_soft_state_init((void **)&dr_g.softsp,
451 					sizeof (dr_softstate_t), 1);
452 	if (err)
453 		return (err);
454 
455 	mutex_init(&dr_g.lock, NULL, MUTEX_DRIVER, NULL);
456 	rw_init(&dr_grwlock, NULL, RW_DEFAULT, NULL);
457 
458 	return (mod_install(&modlinkage));
459 }
460 
461 int
462 _fini(void)
463 {
464 	int	err;
465 
466 	if ((err = mod_remove(&modlinkage)) != 0)
467 		return (err);
468 
469 	mutex_destroy(&dr_g.lock);
470 	rw_destroy(&dr_grwlock);
471 
472 	ddi_soft_state_fini((void **)&dr_g.softsp);
473 
474 	return (0);
475 }
476 
477 int
478 _info(struct modinfo *modinfop)
479 {
480 	return (mod_info(&modlinkage, modinfop));
481 }
482 
483 /*ARGSUSED1*/
484 static int
485 dr_open(dev_t *dev, int flag, int otyp, cred_t *cred_p)
486 {
487 	int		 instance;
488 	dr_softstate_t	*softsp;
489 	dr_board_t	*bp;
490 	/*
491 	 * Don't open unless we've attached.
492 	 */
493 	instance = DR_MINOR2INST(getminor(*dev));
494 	softsp = ddi_get_soft_state(dr_g.softsp, instance);
495 	if (softsp == NULL)
496 		return (ENXIO);
497 
498 	mutex_enter(&softsp->i_lock);
499 	if (!softsp->dr_initialized) {
500 		int		 bd;
501 		int		 rv = 0;
502 
503 		bp = softsp->boards;
504 
505 		/* initialize each array element */
506 		for (bd = 0; bd < MAX_BOARDS; bd++, bp++) {
507 			rv = dr_board_init(bp, softsp->dip, bd);
508 			if (rv)
509 				break;
510 		}
511 
512 		if (rv == 0) {
513 			softsp->dr_initialized = 1;
514 		} else {
515 			/* destroy elements initialized thus far */
516 			while (--bp >= softsp->boards)
517 				dr_board_destroy(bp);
518 
519 
520 			/* TODO: should this be another errno val ? */
521 			mutex_exit(&softsp->i_lock);
522 			return (ENXIO);
523 		}
524 	}
525 	mutex_exit(&softsp->i_lock);
526 
527 	bp = &softsp->boards[DR_MINOR2BNUM(getminor(*dev))];
528 
529 	/*
530 	 * prevent opening of a dyn-ap for a board
531 	 * that does not exist
532 	 */
533 	if (!bp->b_assigned) {
534 		if (drmach_board_lookup(bp->b_num, &bp->b_id) != 0)
535 			return (ENODEV);
536 	}
537 
538 	return (0);
539 }
540 
541 /*ARGSUSED*/
542 static int
543 dr_close(dev_t dev, int flag, int otyp, cred_t *cred_p)
544 {
545 	return (0);
546 }
547 
548 /*
549  * Enable/disable Starcat DR features.
550  */
551 #ifndef _STARFIRE
552 int dr_enable = 1;
553 int slot1_dr_enable = 1;
554 #endif /* _STARFIRE */
555 
556 /*ARGSUSED3*/
557 static int
558 dr_ioctl(dev_t dev, int cmd, intptr_t arg, int mode,
559 	cred_t *cred_p, int *rval_p)
560 {
561 	static int	dr_dev_type_to_nt(char *);
562 
563 	int		rv = 0;
564 	int		instance;
565 	int		bd;
566 	dr_handle_t	*hp;
567 	dr_softstate_t	*softsp;
568 	static fn_t	f = "dr_ioctl";
569 
570 	PR_ALL("%s...\n", f);
571 
572 	instance = DR_MINOR2INST(getminor(dev));
573 	softsp = ddi_get_soft_state(dr_g.softsp, instance);
574 	if (softsp == NULL) {
575 		cmn_err(CE_WARN, "dr%d: module not yet attached", instance);
576 		return (ENXIO);
577 	}
578 
579 #ifndef _STARFIRE
580 	if (!dr_enable) {
581 		switch (cmd) {
582 			case SBD_CMD_STATUS:
583 			case SBD_CMD_GETNCM:
584 			case SBD_CMD_PASSTHRU:
585 				break;
586 			default:
587 				return (ENOTSUP);
588 		}
589 	}
590 #endif /* _STARFIRE */
591 
592 	bd = DR_MINOR2BNUM(getminor(dev));
593 	if (bd >= MAX_BOARDS)
594 		return (ENXIO);
595 
596 #ifndef _STARFIRE
597 	if (!slot1_dr_enable && (bd & 0x1)) {
598 		switch (cmd) {
599 			case SBD_CMD_STATUS:
600 			case SBD_CMD_GETNCM:
601 			case SBD_CMD_PASSTHRU:
602 				break;
603 			default:
604 				return (ENOTSUP);
605 		}
606 	}
607 #endif /* _STARFIRE */
608 
609 	/* get and initialize storage for new handle */
610 	hp = GETSTRUCT(dr_handle_t, 1);
611 	hp->h_bd = &softsp->boards[bd];
612 	hp->h_err = NULL;
613 	hp->h_dev = getminor(dev);
614 	hp->h_cmd = cmd;
615 	hp->h_mode = mode;
616 	hp->h_iap = (sbd_ioctl_arg_t *)arg;
617 
618 	/* copy sbd command into handle */
619 	rv = dr_copyin_iocmd(hp);
620 	if (rv) {
621 		FREESTRUCT(hp, dr_handle_t, 1);
622 		return (EINVAL);
623 	}
624 
625 	/* translate canonical name to component type */
626 	if (hp->h_sbdcmd.cmd_cm.c_id.c_name[0] != '\0') {
627 		hp->h_sbdcmd.cmd_cm.c_id.c_type =
628 			dr_dev_type_to_nt(hp->h_sbdcmd.cmd_cm.c_id.c_name);
629 
630 		PR_ALL("%s: c_name = %s, c_type = %d\n",
631 			f,
632 			hp->h_sbdcmd.cmd_cm.c_id.c_name,
633 			hp->h_sbdcmd.cmd_cm.c_id.c_type);
634 	} else {
635 		/*EMPTY*/
636 		PR_ALL("%s: c_name is NULL\n", f);
637 	}
638 
639 	/* determine scope of operation */
640 	hp->h_devset = dr_dev2devset(&hp->h_sbdcmd.cmd_cm.c_id);
641 
642 	switch (hp->h_cmd) {
643 	case SBD_CMD_STATUS:
644 	case SBD_CMD_GETNCM:
645 		/* no locks needed for these commands */
646 		break;
647 
648 	default:
649 		rw_enter(&dr_grwlock, RW_WRITER);
650 		mutex_enter(&hp->h_bd->b_lock);
651 
652 		/*
653 		 * If we're dealing with memory at all, then we have
654 		 * to keep the "exclusive" global lock held.  This is
655 		 * necessary since we will probably need to look at
656 		 * multiple board structs.  Otherwise, we only have
657 		 * to deal with the board in question and so can drop
658 		 * the global lock to "shared".
659 		 */
660 		rv = DEVSET_IN_SET(hp->h_devset, SBD_COMP_MEM, DEVSET_ANYUNIT);
661 		if (rv == 0)
662 			rw_downgrade(&dr_grwlock);
663 		break;
664 	}
665 	rv = 0;
666 
667 	if (rv == 0)
668 		rv = dr_pre_op(hp);
669 	if (rv == 0)
670 		rv = dr_exec_op(hp);
671 	if (rv == 0)
672 		rv = dr_post_op(hp);
673 
674 	if (rv == -1)
675 		rv = EIO;
676 
677 	if (hp->h_err != NULL)
678 		if (!(rv = dr_copyout_errs(hp)))
679 			rv = EIO;
680 
681 	/* undo locking, if any, done before dr_pre_op */
682 	switch (hp->h_cmd) {
683 	case SBD_CMD_STATUS:
684 	case SBD_CMD_GETNCM:
685 		break;
686 
687 	case SBD_CMD_ASSIGN:
688 	case SBD_CMD_UNASSIGN:
689 	case SBD_CMD_POWERON:
690 	case SBD_CMD_POWEROFF:
691 	case SBD_CMD_CONNECT:
692 	case SBD_CMD_CONFIGURE:
693 	case SBD_CMD_UNCONFIGURE:
694 	case SBD_CMD_DISCONNECT:
695 		/* Board changed state. Log a sysevent. */
696 		if (rv == 0)
697 			(void) drmach_log_sysevent(hp->h_bd->b_num, "",
698 						    SE_SLEEP, 1);
699 		/* Fall through */
700 
701 	default:
702 		mutex_exit(&hp->h_bd->b_lock);
703 		rw_exit(&dr_grwlock);
704 	}
705 
706 	if (hp->h_opts.size != 0)
707 		FREESTRUCT(hp->h_opts.copts, char, hp->h_opts.size);
708 
709 	FREESTRUCT(hp, dr_handle_t, 1);
710 
711 	return (rv);
712 }
713 
714 /*ARGSUSED*/
715 static int
716 dr_probe(dev_info_t *dip)
717 {
718 	return (DDI_PROBE_SUCCESS);
719 }
720 
721 static int
722 dr_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
723 {
724 	int		rv, rv2;
725 	int		bd;
726 	int 		instance;
727 	sbd_error_t	*err;
728 	dr_softstate_t	*softsp;
729 
730 	instance = ddi_get_instance(dip);
731 
732 	switch (cmd) {
733 
734 	case DDI_ATTACH:
735 
736 		rw_enter(&dr_grwlock, RW_WRITER);
737 
738 		rv = ddi_soft_state_zalloc(dr_g.softsp, instance);
739 		if (rv != DDI_SUCCESS) {
740 			cmn_err(CE_WARN, "dr%d: failed to alloc soft-state",
741 				instance);
742 			return (DDI_FAILURE);
743 		}
744 
745 		/* initialize softstate structure */
746 		softsp = ddi_get_soft_state(dr_g.softsp, instance);
747 		softsp->dip = dip;
748 
749 		mutex_init(&softsp->i_lock, NULL, MUTEX_DRIVER, NULL);
750 
751 		/* allocate board array (aka boardlist) */
752 		softsp->boards = GETSTRUCT(dr_board_t, MAX_BOARDS);
753 
754 		/* TODO: eliminate dr_boardlist */
755 		dr_boardlist = softsp->boards;
756 
757 		/* initialize each array element */
758 		rv = DDI_SUCCESS;
759 		for (bd = 0; bd < MAX_BOARDS; bd++) {
760 			dr_board_t	*bp = &softsp->boards[bd];
761 			char		*p, *name;
762 			int		 l, minor_num;
763 
764 			/*
765 			 * initialized board attachment point path
766 			 * (relative to pseudo) in a form immediately
767 			 * reusable as an cfgadm command argument.
768 			 * TODO: clean this up
769 			 */
770 			p = bp->b_path;
771 			l = sizeof (bp->b_path);
772 			(void) snprintf(p, l, "dr@%d:", instance);
773 			while (*p != '\0') {
774 				l--;
775 				p++;
776 			}
777 
778 			name = p;
779 			err = drmach_board_name(bd, p, l);
780 			if (err) {
781 				sbd_err_clear(&err);
782 				rv = DDI_FAILURE;
783 				break;
784 			}
785 
786 			minor_num = DR_MAKE_MINOR(instance, bd);
787 			rv = ddi_create_minor_node(dip, name, S_IFCHR,
788 				minor_num, DDI_NT_SBD_ATTACHMENT_POINT, NULL);
789 			if (rv != DDI_SUCCESS)
790 				rv = DDI_FAILURE;
791 		}
792 
793 		if (rv == DDI_SUCCESS) {
794 			/*
795 			 * Announce the node's presence.
796 			 */
797 			ddi_report_dev(dip);
798 		} else {
799 			ddi_remove_minor_node(dip, NULL);
800 		}
801 		/*
802 		 * Init registered unsafe devs.
803 		 */
804 		dr_unsafe_devs.devnames = NULL;
805 		rv2 = ddi_prop_lookup_string_array(DDI_DEV_T_ANY, dip,
806 			DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
807 			"unsupported-io-drivers", &dr_unsafe_devs.devnames,
808 			&dr_unsafe_devs.ndevs);
809 
810 		if (rv2 != DDI_PROP_SUCCESS)
811 			dr_unsafe_devs.ndevs = 0;
812 
813 		rw_exit(&dr_grwlock);
814 		return (rv);
815 
816 	default:
817 		return (DDI_FAILURE);
818 	}
819 
820 	/*NOTREACHED*/
821 }
822 
823 static int
824 dr_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
825 {
826 	int 		instance;
827 	dr_softstate_t	*softsp;
828 
829 	switch (cmd) {
830 	case DDI_DETACH:
831 		if (!dr_modunload_okay)
832 			return (DDI_FAILURE);
833 
834 		rw_enter(&dr_grwlock, RW_WRITER);
835 
836 		instance = ddi_get_instance(dip);
837 		softsp = ddi_get_soft_state(dr_g.softsp, instance);
838 
839 		/* TODO: eliminate dr_boardlist */
840 		ASSERT(softsp->boards == dr_boardlist);
841 
842 		/* remove all minor nodes */
843 		ddi_remove_minor_node(dip, NULL);
844 
845 		if (softsp->dr_initialized) {
846 			int bd;
847 
848 			for (bd = 0; bd < MAX_BOARDS; bd++)
849 				dr_board_destroy(&softsp->boards[bd]);
850 		}
851 
852 		FREESTRUCT(softsp->boards, dr_board_t, MAX_BOARDS);
853 		mutex_destroy(&softsp->i_lock);
854 		ddi_soft_state_free(dr_g.softsp, instance);
855 
856 		rw_exit(&dr_grwlock);
857 		return (DDI_SUCCESS);
858 
859 	default:
860 		return (DDI_FAILURE);
861 	}
862 	/*NOTREACHED*/
863 }
864 
865 static int
866 dr_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **result)
867 {
868 	_NOTE(ARGUNUSED(dip))
869 
870 	dev_t		dev = (dev_t)arg;
871 	int		instance, error;
872 	dr_softstate_t	*softsp;
873 
874 	*result = NULL;
875 	error = DDI_SUCCESS;
876 	instance = DR_MINOR2INST(getminor(dev));
877 
878 	switch (cmd) {
879 	case DDI_INFO_DEVT2DEVINFO:
880 		softsp = ddi_get_soft_state(dr_g.softsp, instance);
881 		if (softsp == NULL)
882 			return (DDI_FAILURE);
883 		*result = (void *)softsp->dip;
884 		break;
885 
886 	case DDI_INFO_DEVT2INSTANCE:
887 		*result = (void *)instance;
888 		break;
889 
890 	default:
891 		error = DDI_FAILURE;
892 		break;
893 	}
894 
895 	return (error);
896 }
897 
898 /*
899  * DR operations.
900  */
901 
902 static int
903 dr_copyin_iocmd(dr_handle_t *hp)
904 {
905 	static fn_t	f = "dr_copyin_iocmd";
906 	sbd_cmd_t	*scp = &hp->h_sbdcmd;
907 
908 	if (hp->h_iap == NULL)
909 		return (EINVAL);
910 
911 	bzero((caddr_t)scp, sizeof (sbd_cmd_t));
912 
913 #ifdef _MULTI_DATAMODEL
914 	if (ddi_model_convert_from(hp->h_mode & FMODELS) == DDI_MODEL_ILP32) {
915 		sbd_cmd32_t	scmd32;
916 
917 		bzero((caddr_t)&scmd32, sizeof (sbd_cmd32_t));
918 
919 		if (ddi_copyin((void *)hp->h_iap, (void *)&scmd32,
920 				sizeof (sbd_cmd32_t), hp->h_mode)) {
921 			cmn_err(CE_WARN,
922 				"%s: (32bit) failed to copyin "
923 					"sbdcmd-struct", f);
924 			return (EFAULT);
925 		}
926 		scp->cmd_cm.c_id.c_type = scmd32.cmd_cm.c_id.c_type;
927 		scp->cmd_cm.c_id.c_unit = scmd32.cmd_cm.c_id.c_unit;
928 		bcopy(&scmd32.cmd_cm.c_id.c_name[0],
929 			&scp->cmd_cm.c_id.c_name[0], OBP_MAXPROPNAME);
930 		scp->cmd_cm.c_flags = scmd32.cmd_cm.c_flags;
931 		scp->cmd_cm.c_len = scmd32.cmd_cm.c_len;
932 		scp->cmd_cm.c_opts = (caddr_t)scmd32.cmd_cm.c_opts;
933 
934 		switch (hp->h_cmd) {
935 		case SBD_CMD_STATUS:
936 			scp->cmd_stat.s_nbytes = scmd32.cmd_stat.s_nbytes;
937 			scp->cmd_stat.s_statp =
938 				(caddr_t)scmd32.cmd_stat.s_statp;
939 			break;
940 		default:
941 			break;
942 
943 		}
944 	} else
945 #endif /* _MULTI_DATAMODEL */
946 	if (ddi_copyin((void *)hp->h_iap, (void *)scp,
947 			sizeof (sbd_cmd_t), hp->h_mode) != 0) {
948 		cmn_err(CE_WARN,
949 			"%s: failed to copyin sbdcmd-struct", f);
950 		return (EFAULT);
951 	}
952 
953 	if ((hp->h_opts.size = scp->cmd_cm.c_len) != 0) {
954 		hp->h_opts.copts = GETSTRUCT(char, scp->cmd_cm.c_len + 1);
955 		++hp->h_opts.size;
956 		if (ddi_copyin((void *)scp->cmd_cm.c_opts,
957 			(void *)hp->h_opts.copts,
958 			scp->cmd_cm.c_len, hp->h_mode) != 0) {
959 			cmn_err(CE_WARN, "%s: failed to copyin options", f);
960 			return (EFAULT);
961 		}
962 	}
963 	return (0);
964 }
965 
966 static int
967 dr_copyout_iocmd(dr_handle_t *hp)
968 {
969 	static fn_t	f = "dr_copyout_iocmd";
970 	sbd_cmd_t	*scp = &hp->h_sbdcmd;
971 
972 	if (hp->h_iap == NULL)
973 		return (EINVAL);
974 
975 #ifdef _MULTI_DATAMODEL
976 	if (ddi_model_convert_from(hp->h_mode & FMODELS) == DDI_MODEL_ILP32) {
977 		sbd_cmd32_t	scmd32;
978 
979 		scmd32.cmd_cm.c_id.c_type = scp->cmd_cm.c_id.c_type;
980 		scmd32.cmd_cm.c_id.c_unit = scp->cmd_cm.c_id.c_unit;
981 		bcopy(&scp->cmd_cm.c_id.c_name[0],
982 			&scmd32.cmd_cm.c_id.c_name[0], OBP_MAXPROPNAME);
983 
984 		scmd32.cmd_cm.c_flags = scp->cmd_cm.c_flags;
985 		scmd32.cmd_cm.c_len = scp->cmd_cm.c_len;
986 		scmd32.cmd_cm.c_opts = (caddr32_t)scp->cmd_cm.c_opts;
987 
988 		switch (hp->h_cmd) {
989 		case SBD_CMD_GETNCM:
990 			scmd32.cmd_getncm.g_ncm = scp->cmd_getncm.g_ncm;
991 			break;
992 		default:
993 			break;
994 		}
995 
996 		if (ddi_copyout((void *)&scmd32, (void *)hp->h_iap,
997 				sizeof (sbd_cmd32_t), hp->h_mode)) {
998 			cmn_err(CE_WARN,
999 				"%s: (32bit) failed to copyout "
1000 					"sbdcmd-struct", f);
1001 			return (EFAULT);
1002 		}
1003 	} else
1004 #endif /* _MULTI_DATAMODEL */
1005 	if (ddi_copyout((void *)scp, (void *)hp->h_iap,
1006 			sizeof (sbd_cmd_t), hp->h_mode) != 0) {
1007 		cmn_err(CE_WARN,
1008 			"%s: failed to copyout sbdcmd-struct", f);
1009 		return (EFAULT);
1010 	}
1011 
1012 	return (0);
1013 }
1014 
1015 static int
1016 dr_copyout_errs(dr_handle_t *hp)
1017 {
1018 	static fn_t	f = "dr_copyout_errs";
1019 
1020 	if (hp->h_err == NULL)
1021 		return (0);
1022 
1023 	if (hp->h_err->e_code) {
1024 		PR_ALL("%s: error %d %s",
1025 			f, hp->h_err->e_code, hp->h_err->e_rsc);
1026 	}
1027 
1028 #ifdef _MULTI_DATAMODEL
1029 	if (ddi_model_convert_from(hp->h_mode & FMODELS) == DDI_MODEL_ILP32) {
1030 		sbd_error32_t	*serr32p;
1031 
1032 		serr32p = GETSTRUCT(sbd_error32_t, 1);
1033 
1034 		serr32p->e_code = hp->h_err->e_code;
1035 		bcopy(&hp->h_err->e_rsc[0], &serr32p->e_rsc[0],
1036 			MAXPATHLEN);
1037 		if (ddi_copyout((void *)serr32p,
1038 			(void *)&((sbd_ioctl_arg32_t *)hp->h_iap)->i_err,
1039 				sizeof (sbd_error32_t), hp->h_mode)) {
1040 			cmn_err(CE_WARN,
1041 				"%s: (32bit) failed to copyout", f);
1042 			return (EFAULT);
1043 		}
1044 		FREESTRUCT(serr32p, sbd_error32_t, 1);
1045 	} else
1046 #endif /* _MULTI_DATAMODEL */
1047 	if (ddi_copyout((void *)hp->h_err,
1048 		(void *)&hp->h_iap->i_err,
1049 			sizeof (sbd_error_t), hp->h_mode)) {
1050 		cmn_err(CE_WARN,
1051 			"%s: failed to copyout", f);
1052 		return (EFAULT);
1053 	}
1054 
1055 	sbd_err_clear(&hp->h_err);
1056 
1057 	return (0);
1058 
1059 }
1060 
1061 /*
1062  * pre-op entry point must sbd_err_set_c(), if needed.
1063  * Return value of non-zero indicates failure.
1064  */
1065 static int
1066 dr_pre_op(dr_handle_t *hp)
1067 {
1068 	int		rv = 0, t;
1069 	int		cmd, serr = 0;
1070 	dr_devset_t	devset;
1071 	dr_board_t	*bp = hp->h_bd;
1072 	dr_handle_t	*shp = hp;
1073 	static fn_t	f = "dr_pre_op";
1074 
1075 	cmd = hp->h_cmd;
1076 	devset = shp->h_devset;
1077 
1078 	PR_ALL("%s (cmd = %s)...\n", f, (uint_t)SBD_CMD_STR(cmd));
1079 
1080 	hp->h_err = drmach_pre_op(cmd, bp->b_id, &hp->h_opts);
1081 	if (hp->h_err != NULL) {
1082 		PR_ALL("drmach_pre_op failed for cmd %s(%d)\n",
1083 			SBD_CMD_STR(cmd), cmd);
1084 		return (-1);
1085 	}
1086 
1087 	/*
1088 	 * Check for valid state transitions.
1089 	 */
1090 	if ((t = CMD2INDEX(cmd)) != -1) {
1091 		struct dr_state_trans	*transp;
1092 		int			state_err;
1093 
1094 		transp = &dr_state_transition[t];
1095 		ASSERT(transp->x_cmd == cmd);
1096 
1097 		state_err = dr_check_transition(bp, &devset, transp, cmd);
1098 
1099 		if (state_err < 0) {
1100 			/*
1101 			 * Invalidate device.
1102 			 */
1103 			dr_op_err(CE_IGNORE, hp, ESBD_INVAL, NULL);
1104 			serr = -1;
1105 			PR_ALL("%s: invalid devset (0x%x)\n",
1106 				f, (uint_t)devset);
1107 		} else if (state_err != 0) {
1108 			/*
1109 			 * State transition is not a valid one.
1110 			 */
1111 			dr_op_err(CE_IGNORE, hp,
1112 				transp->x_op[state_err].x_err, NULL);
1113 
1114 			serr = transp->x_op[state_err].x_rv;
1115 
1116 			PR_ALL("%s: invalid state %s(%d) for cmd %s(%d)\n",
1117 				f, state_str[state_err], state_err,
1118 				SBD_CMD_STR(cmd), cmd);
1119 		} else {
1120 			shp->h_devset = devset;
1121 		}
1122 	}
1123 
1124 	if (serr) {
1125 		rv = -1;
1126 	}
1127 
1128 	return (rv);
1129 }
1130 
1131 static int
1132 dr_post_op(dr_handle_t *hp)
1133 {
1134 	int		rv = 0;
1135 	int		cmd;
1136 	dr_board_t	*bp = hp->h_bd;
1137 	static fn_t	f = "dr_post_op";
1138 
1139 	cmd = hp->h_cmd;
1140 
1141 	PR_ALL("%s (cmd = %s)...\n", f, (uint_t)SBD_CMD_STR(cmd));
1142 
1143 	/* errors should have been caught by now */
1144 	ASSERT(hp->h_err == NULL);
1145 
1146 	hp->h_err = drmach_post_op(cmd, bp->b_id, &hp->h_opts);
1147 	if (hp->h_err != NULL) {
1148 		PR_ALL("drmach_post_op failed for cmd %s(%d)\n",
1149 			SBD_CMD_STR(cmd), cmd);
1150 		return (-1);
1151 	}
1152 
1153 	switch (cmd) {
1154 	case SBD_CMD_CONFIGURE:
1155 	case SBD_CMD_UNCONFIGURE:
1156 	case SBD_CMD_CONNECT:
1157 	case SBD_CMD_DISCONNECT:
1158 	case SBD_CMD_GETNCM:
1159 	case SBD_CMD_STATUS:
1160 		break;
1161 
1162 	default:
1163 		break;
1164 	}
1165 
1166 	return (rv);
1167 }
1168 
1169 static int
1170 dr_exec_op(dr_handle_t *hp)
1171 {
1172 	int		rv = 0;
1173 	static fn_t	f = "dr_exec_op";
1174 
1175 	/* errors should have been caught by now */
1176 	ASSERT(hp->h_err == NULL);
1177 
1178 	switch (hp->h_cmd) {
1179 	case SBD_CMD_ASSIGN:
1180 		dr_assign_board(hp);
1181 		break;
1182 
1183 	case SBD_CMD_UNASSIGN:
1184 		dr_unassign_board(hp);
1185 		break;
1186 
1187 	case SBD_CMD_POWEROFF:
1188 		dr_poweroff_board(hp);
1189 		break;
1190 
1191 	case SBD_CMD_POWERON:
1192 		dr_poweron_board(hp);
1193 		break;
1194 
1195 	case SBD_CMD_TEST:
1196 		dr_test_board(hp);
1197 		break;
1198 
1199 	case SBD_CMD_CONNECT:
1200 		dr_connect(hp);
1201 		break;
1202 
1203 	case SBD_CMD_CONFIGURE:
1204 		dr_dev_configure(hp);
1205 		break;
1206 
1207 	case SBD_CMD_UNCONFIGURE:
1208 		dr_dev_release(hp);
1209 		if (hp->h_err == NULL)
1210 			rv = dr_dev_unconfigure(hp);
1211 		else
1212 			dr_dev_cancel(hp);
1213 		break;
1214 
1215 	case SBD_CMD_DISCONNECT:
1216 		rv = dr_disconnect(hp);
1217 		break;
1218 
1219 	case SBD_CMD_STATUS:
1220 		rv = dr_dev_status(hp);
1221 		break;
1222 
1223 	case SBD_CMD_GETNCM:
1224 		hp->h_sbdcmd.cmd_getncm.g_ncm = dr_get_ncm(hp);
1225 		rv = dr_copyout_iocmd(hp);
1226 		break;
1227 
1228 	case SBD_CMD_PASSTHRU:
1229 		rv = dr_pt_ioctl(hp);
1230 		break;
1231 
1232 	default:
1233 		cmn_err(CE_WARN,
1234 			"%s: unknown command (%d)",
1235 			f, hp->h_cmd);
1236 		break;
1237 	}
1238 
1239 	if (hp->h_err != NULL) {
1240 		rv = -1;
1241 	}
1242 
1243 	return (rv);
1244 }
1245 
1246 static void
1247 dr_assign_board(dr_handle_t *hp)
1248 {
1249 	dr_board_t *bp = hp->h_bd;
1250 
1251 	hp->h_err = drmach_board_assign(bp->b_num, &bp->b_id);
1252 	if (hp->h_err == NULL) {
1253 		bp->b_assigned = 1;
1254 	}
1255 }
1256 
1257 static void
1258 dr_unassign_board(dr_handle_t *hp)
1259 {
1260 	dr_board_t *bp = hp->h_bd;
1261 
1262 	/*
1263 	 * Block out status during unassign.
1264 	 * Not doing cv_wait_sig here as starfire SSP software
1265 	 * ignores unassign failure and removes board from
1266 	 * domain mask causing system panic.
1267 	 * TODO: Change cv_wait to cv_wait_sig when SSP software
1268 	 * handles unassign failure.
1269 	 */
1270 	dr_lock_status(bp);
1271 
1272 	hp->h_err = drmach_board_unassign(bp->b_id);
1273 	if (hp->h_err == NULL) {
1274 		/*
1275 		 * clear drmachid_t handle; not valid after board unassign
1276 		 */
1277 		bp->b_id = 0;
1278 		bp->b_assigned = 0;
1279 	}
1280 
1281 	dr_unlock_status(bp);
1282 }
1283 
1284 static void
1285 dr_poweron_board(dr_handle_t *hp)
1286 {
1287 	dr_board_t *bp = hp->h_bd;
1288 
1289 	hp->h_err = drmach_board_poweron(bp->b_id);
1290 }
1291 
1292 static void
1293 dr_poweroff_board(dr_handle_t *hp)
1294 {
1295 	dr_board_t *bp = hp->h_bd;
1296 
1297 	hp->h_err = drmach_board_poweroff(bp->b_id);
1298 }
1299 
1300 static void
1301 dr_test_board(dr_handle_t *hp)
1302 {
1303 	dr_board_t *bp = hp->h_bd;
1304 	hp->h_err = drmach_board_test(bp->b_id, &hp->h_opts,
1305 	    dr_cmd_flags(hp) & SBD_FLAG_FORCE);
1306 }
1307 
1308 /*
1309  * Create and populate the component nodes for a board.  Assumes that the
1310  * devlists for the board have been initialized.
1311  */
1312 static void
1313 dr_make_comp_nodes(dr_board_t *bp) {
1314 
1315 	int	i;
1316 
1317 	/*
1318 	 * Make nodes for the individual components on the board.
1319 	 * First we need to initialize memory unit data structures of board
1320 	 * structure.
1321 	 */
1322 	for (i = 0; i < MAX_MEM_UNITS_PER_BOARD; i++) {
1323 		dr_mem_unit_t *mp;
1324 
1325 		mp = dr_get_mem_unit(bp, i);
1326 		dr_init_mem_unit(mp);
1327 	}
1328 
1329 	/*
1330 	 * Initialize cpu unit data structures.
1331 	 */
1332 	for (i = 0; i < MAX_CPU_UNITS_PER_BOARD; i++) {
1333 		dr_cpu_unit_t *cp;
1334 
1335 		cp = dr_get_cpu_unit(bp, i);
1336 		dr_init_cpu_unit(cp);
1337 	}
1338 
1339 	/*
1340 	 * Initialize io unit data structures.
1341 	 */
1342 	for (i = 0; i < MAX_IO_UNITS_PER_BOARD; i++) {
1343 		dr_io_unit_t *ip;
1344 
1345 		ip = dr_get_io_unit(bp, i);
1346 		dr_init_io_unit(ip);
1347 	}
1348 
1349 	dr_board_transition(bp, DR_STATE_CONNECTED);
1350 
1351 	bp->b_rstate = SBD_STAT_CONNECTED;
1352 	bp->b_ostate = SBD_STAT_UNCONFIGURED;
1353 	bp->b_cond = SBD_COND_OK;
1354 	(void) drv_getparm(TIME, (void *)&bp->b_time);
1355 
1356 }
1357 
1358 /*
1359  * Only do work if called to operate on an entire board
1360  * which doesn't already have components present.
1361  */
1362 static void
1363 dr_connect(dr_handle_t *hp)
1364 {
1365 	dr_board_t	*bp = hp->h_bd;
1366 	static fn_t	f = "dr_connect";
1367 
1368 	PR_ALL("%s...\n", f);
1369 
1370 	if (DR_DEVS_PRESENT(bp)) {
1371 		/*
1372 		 * Board already has devices present.
1373 		 */
1374 		PR_ALL("%s: devices already present (0x%x)\n",
1375 			f, DR_DEVS_PRESENT(bp));
1376 		return;
1377 	}
1378 
1379 	hp->h_err = drmach_board_connect(bp->b_id, &hp->h_opts);
1380 	if (hp->h_err)
1381 		return;
1382 
1383 	hp->h_err = dr_init_devlists(bp);
1384 	if (hp->h_err)
1385 		return;
1386 	else if (bp->b_ndev == 0) {
1387 		dr_op_err(CE_WARN, hp, ESBD_EMPTY_BD, bp->b_path);
1388 		return;
1389 	} else {
1390 		dr_make_comp_nodes(bp);
1391 		return;
1392 	}
1393 	/*NOTREACHED*/
1394 }
1395 
1396 static int
1397 dr_disconnect(dr_handle_t *hp)
1398 {
1399 	int		i;
1400 	dr_devset_t	devset;
1401 	dr_board_t	*bp = hp->h_bd;
1402 	static fn_t	f = "dr_disconnect";
1403 
1404 	PR_ALL("%s...\n", f);
1405 
1406 	/*
1407 	 * Only devices which are present, but
1408 	 * unattached can be disconnected.
1409 	 */
1410 	devset = hp->h_devset & DR_DEVS_PRESENT(bp) &
1411 			DR_DEVS_UNATTACHED(bp);
1412 
1413 	if ((devset == 0) && DR_DEVS_PRESENT(bp)) {
1414 		dr_op_err(CE_IGNORE, hp, ESBD_EMPTY_BD, bp->b_path);
1415 		return (0);
1416 	}
1417 
1418 	/*
1419 	 * Block out status during disconnect.
1420 	 */
1421 	mutex_enter(&bp->b_slock);
1422 	while (bp->b_sflags & DR_BSLOCK) {
1423 		if (cv_wait_sig(&bp->b_scv, &bp->b_slock) == 0) {
1424 			mutex_exit(&bp->b_slock);
1425 			return (EINTR);
1426 		}
1427 	}
1428 	bp->b_sflags |= DR_BSLOCK;
1429 	mutex_exit(&bp->b_slock);
1430 
1431 	hp->h_err = drmach_board_disconnect(bp->b_id, &hp->h_opts);
1432 
1433 	DR_DEVS_DISCONNECT(bp, devset);
1434 
1435 	ASSERT((DR_DEVS_ATTACHED(bp) & devset) == 0);
1436 
1437 	/*
1438 	 * Update per-device state transitions.
1439 	 */
1440 	for (i = 0; i < MAX_CPU_UNITS_PER_BOARD; i++) {
1441 		dr_cpu_unit_t *cp;
1442 
1443 		if (!DEVSET_IN_SET(devset, SBD_COMP_CPU, i))
1444 			continue;
1445 
1446 		cp = dr_get_cpu_unit(bp, i);
1447 		if (dr_disconnect_cpu(cp) == 0)
1448 			dr_device_transition(&cp->sbc_cm, DR_STATE_EMPTY);
1449 		else if (cp->sbc_cm.sbdev_error != NULL)
1450 			DRERR_SET_C(&hp->h_err, &cp->sbc_cm.sbdev_error);
1451 
1452 		ASSERT(cp->sbc_cm.sbdev_error == NULL);
1453 	}
1454 
1455 	for (i = 0; i < MAX_MEM_UNITS_PER_BOARD; i++) {
1456 		dr_mem_unit_t *mp;
1457 
1458 		if (!DEVSET_IN_SET(devset, SBD_COMP_MEM, i))
1459 			continue;
1460 
1461 		mp = dr_get_mem_unit(bp, i);
1462 		if (dr_disconnect_mem(mp) == 0)
1463 			dr_device_transition(&mp->sbm_cm, DR_STATE_EMPTY);
1464 		else if (mp->sbm_cm.sbdev_error != NULL)
1465 			DRERR_SET_C(&hp->h_err, &mp->sbm_cm.sbdev_error);
1466 
1467 		ASSERT(mp->sbm_cm.sbdev_error == NULL);
1468 	}
1469 
1470 	for (i = 0; i < MAX_IO_UNITS_PER_BOARD; i++) {
1471 		dr_io_unit_t *ip;
1472 
1473 		if (!DEVSET_IN_SET(devset, SBD_COMP_IO, i))
1474 			continue;
1475 
1476 		ip = dr_get_io_unit(bp, i);
1477 		if (dr_disconnect_io(ip) == 0)
1478 			dr_device_transition(&ip->sbi_cm, DR_STATE_EMPTY);
1479 		else if (ip->sbi_cm.sbdev_error != NULL)
1480 			DRERR_SET_C(&hp->h_err, &ip->sbi_cm.sbdev_error);
1481 
1482 		ASSERT(ip->sbi_cm.sbdev_error == NULL);
1483 	}
1484 	if (hp->h_err) {
1485 		/*
1486 		 * For certain errors, drmach_board_disconnect will mark
1487 		 * the board as unusable; in these cases the devtree must
1488 		 * be purged so that status calls will succeed.
1489 		 * XXX
1490 		 * This implementation checks for discrete error codes -
1491 		 * someday, the i/f to drmach_board_disconnect should be
1492 		 * changed to avoid the e_code testing.
1493 		 */
1494 		if ((hp->h_err->e_code == ESTC_MBXRPLY) ||
1495 			(hp->h_err->e_code == ESTC_MBXRQST) ||
1496 			(hp->h_err->e_code == ESTC_SMS_ERR_UNRECOVERABLE) ||
1497 			(hp->h_err->e_code == ESTC_SMS_ERR_RECOVERABLE) ||
1498 			(hp->h_err->e_code == ESTC_DEPROBE)) {
1499 			bp->b_ostate = SBD_STAT_UNCONFIGURED;
1500 			bp->b_busy = 0;
1501 			(void) drv_getparm(TIME, (void *)&bp->b_time);
1502 
1503 			if (drmach_board_deprobe(bp->b_id))
1504 				goto disconnect_done;
1505 			else
1506 				bp->b_ndev = 0;
1507 		}
1508 
1509 		/*
1510 		 * If the disconnect failed in a recoverable way,
1511 		 * more work is required.
1512 		 * XXX
1513 		 * This implementation checks for discrete error codes -
1514 		 * someday, the i/f to drmach_board_disconnect should be
1515 		 * changed to avoid the e_code testing.
1516 		 */
1517 		if ((hp->h_err->e_code == ESTC_MBXRQST) ||
1518 		    (hp->h_err->e_code == ESTC_SMS_ERR_RECOVERABLE) ||
1519 		    (hp->h_err->e_code == ESTC_DEPROBE)) {
1520 			/*
1521 			 * With this failure, the board has been deprobed
1522 			 * by IKP, and reprobed.  We've already gotten rid
1523 			 * of the old devtree, now we need to reconstruct it
1524 			 * based on the new IKP probe
1525 			 */
1526 			if (dr_init_devlists(bp) || (bp->b_ndev == 0))
1527 				goto disconnect_done;
1528 
1529 			dr_make_comp_nodes(bp);
1530 		}
1531 	}
1532 	/*
1533 	 * Once all the components on a board have been disconnect
1534 	 * the board's state can transition to disconnected and
1535 	 * we can allow the deprobe to take place.
1536 	 */
1537 	if (hp->h_err == NULL && DR_DEVS_PRESENT(bp) == 0) {
1538 		dr_board_transition(bp, DR_STATE_OCCUPIED);
1539 		bp->b_rstate = SBD_STAT_DISCONNECTED;
1540 		bp->b_ostate = SBD_STAT_UNCONFIGURED;
1541 		bp->b_busy = 0;
1542 		(void) drv_getparm(TIME, (void *)&bp->b_time);
1543 
1544 		hp->h_err = drmach_board_deprobe(bp->b_id);
1545 
1546 		if (hp->h_err == NULL) {
1547 			bp->b_ndev = 0;
1548 			dr_board_transition(bp, DR_STATE_EMPTY);
1549 			bp->b_rstate = SBD_STAT_EMPTY;
1550 			(void) drv_getparm(TIME, (void *)&bp->b_time);
1551 		}
1552 	}
1553 
1554 disconnect_done:
1555 	dr_unlock_status(bp);
1556 
1557 	return (0);
1558 }
1559 
1560 /*
1561  * Check if a particular device is a valid target of the current
1562  * operation. Return 1 if it is a valid target, and 0 otherwise.
1563  */
1564 static int
1565 dr_dev_is_target(dr_dev_unit_t *dp, int present_only, uint_t uset)
1566 {
1567 	dr_common_unit_t *cp;
1568 	int		 is_present;
1569 	int		 is_attached;
1570 
1571 	cp = &dp->du_common;
1572 
1573 	/* check if the user requested this device */
1574 	if ((uset & (1 << cp->sbdev_unum)) == 0) {
1575 		return (0);
1576 	}
1577 
1578 	is_present = DR_DEV_IS_PRESENT(cp) ? 1 : 0;
1579 	is_attached = DR_DEV_IS_ATTACHED(cp) ? 1 : 0;
1580 
1581 	/*
1582 	 * If the present_only flag is set, a valid target
1583 	 * must be present but not attached. Otherwise, it
1584 	 * must be both present and attached.
1585 	 */
1586 	if (is_present && (present_only ^ is_attached)) {
1587 		/* sanity check */
1588 		ASSERT(cp->sbdev_id != (drmachid_t)0);
1589 
1590 		return (1);
1591 	}
1592 
1593 	return (0);
1594 }
1595 
1596 static void
1597 dr_dev_make_list(dr_handle_t *hp, sbd_comp_type_t type, int present_only,
1598 	dr_common_unit_t ***devlist, int *devnum)
1599 {
1600 	dr_board_t	*bp = hp->h_bd;
1601 	int		 unum;
1602 	int		 nunits;
1603 	uint_t		 uset;
1604 	int		 len;
1605 	dr_common_unit_t **list, **wp;
1606 
1607 	switch (type) {
1608 	case SBD_COMP_CPU:
1609 		nunits = MAX_CPU_UNITS_PER_BOARD;
1610 		break;
1611 	case SBD_COMP_MEM:
1612 		nunits = MAX_MEM_UNITS_PER_BOARD;
1613 		break;
1614 	case SBD_COMP_IO:
1615 		nunits = MAX_IO_UNITS_PER_BOARD;
1616 		break;
1617 	default:
1618 		/* catch this in debug kernels */
1619 		ASSERT(0);
1620 		break;
1621 	}
1622 
1623 	/* allocate list storage. */
1624 	len = sizeof (dr_common_unit_t *) * (nunits + 1);
1625 	list = kmem_zalloc(len, KM_SLEEP);
1626 
1627 	/* record length of storage in first element */
1628 	*list++ = (dr_common_unit_t *)len;
1629 
1630 	/* get bit array signifying which units are to be involved */
1631 	uset = DEVSET_GET_UNITSET(hp->h_devset, type);
1632 
1633 	/*
1634 	 * Adjust the loop count for CPU devices since all cores
1635 	 * in a CMP will be examined in a single iteration.
1636 	 */
1637 	if (type == SBD_COMP_CPU) {
1638 		nunits = MAX_CMP_UNITS_PER_BOARD;
1639 	}
1640 
1641 	/* populate list */
1642 	for (wp = list, unum = 0; unum < nunits; unum++) {
1643 
1644 		dr_dev_unit_t	*dp;
1645 		int		core;
1646 		int		cunum;
1647 
1648 		dp = DR_GET_BOARD_DEVUNIT(bp, type, unum);
1649 		if (dr_dev_is_target(dp, present_only, uset)) {
1650 			*wp++ = &dp->du_common;
1651 		}
1652 
1653 		/* further processing is only required for CPUs */
1654 		if (type != SBD_COMP_CPU) {
1655 			continue;
1656 		}
1657 
1658 		/*
1659 		 * Add any additional cores from the current CPU
1660 		 * device. This is to ensure that all the cores
1661 		 * are grouped together in the device list, and
1662 		 * consequently sequenced together during the actual
1663 		 * operation.
1664 		 */
1665 		for (core = 1; core < MAX_CORES_PER_CMP; core++) {
1666 
1667 			cunum = DR_CMP_CORE_UNUM(unum, core);
1668 			dp = DR_GET_BOARD_DEVUNIT(bp, type, cunum);
1669 
1670 			if (dr_dev_is_target(dp, present_only, uset)) {
1671 				*wp++ = &dp->du_common;
1672 			}
1673 		}
1674 	}
1675 
1676 	/* calculate number of units in list, return result and list pointer */
1677 	*devnum = wp - list;
1678 	*devlist = list;
1679 }
1680 
1681 static void
1682 dr_dev_clean_up(dr_handle_t *hp, dr_common_unit_t **list, int devnum)
1683 {
1684 	int len;
1685 	int n = 0;
1686 	dr_common_unit_t *cp, **rp = list;
1687 
1688 	/*
1689 	 * move first encountered unit error to handle if handle
1690 	 * does not yet have a recorded error.
1691 	 */
1692 	if (hp->h_err == NULL) {
1693 		while (n++ < devnum) {
1694 			cp = *rp++;
1695 			if (cp->sbdev_error != NULL) {
1696 				hp->h_err = cp->sbdev_error;
1697 				cp->sbdev_error = NULL;
1698 				break;
1699 			}
1700 		}
1701 	}
1702 
1703 	/* free remaining unit errors */
1704 	while (n++ < devnum) {
1705 		cp = *rp++;
1706 		if (cp->sbdev_error != NULL) {
1707 			sbd_err_clear(&cp->sbdev_error);
1708 			cp->sbdev_error = NULL;
1709 		}
1710 	}
1711 
1712 	/* free list */
1713 	list -= 1;
1714 	len = (int)list[0];
1715 	kmem_free(list, len);
1716 }
1717 
1718 static int
1719 dr_dev_walk(dr_handle_t *hp, sbd_comp_type_t type, int present_only,
1720 		int (*pre_op)(dr_handle_t *, dr_common_unit_t **, int),
1721 		void (*op)(dr_handle_t *, dr_common_unit_t *),
1722 		int (*post_op)(dr_handle_t *, dr_common_unit_t **, int),
1723 		void (*board_op)(dr_handle_t *, dr_common_unit_t **, int))
1724 {
1725 	int			  devnum, rv;
1726 	dr_common_unit_t	**devlist;
1727 
1728 	dr_dev_make_list(hp, type, present_only, &devlist, &devnum);
1729 
1730 	rv = 0;
1731 	if (devnum > 0) {
1732 		rv = (*pre_op)(hp, devlist, devnum);
1733 		if (rv == 0) {
1734 			int n;
1735 
1736 			for (n = 0; n < devnum; n++)
1737 				(*op)(hp, devlist[n]);
1738 
1739 			rv = (*post_op)(hp, devlist, devnum);
1740 
1741 			(*board_op)(hp, devlist, devnum);
1742 		}
1743 	}
1744 
1745 	dr_dev_clean_up(hp, devlist, devnum);
1746 	return (rv);
1747 }
1748 
1749 /*ARGSUSED*/
1750 static int
1751 dr_dev_noop(dr_handle_t *hp, dr_common_unit_t **devlist, int devnum)
1752 {
1753 	return (0);
1754 }
1755 
1756 static void
1757 dr_attach_update_state(dr_handle_t *hp,
1758 	dr_common_unit_t **devlist, int devnum)
1759 {
1760 	dr_board_t	*bp = hp->h_bd;
1761 	int		i;
1762 	dr_devset_t	devs_unattached, devs_present;
1763 	static fn_t	f = "dr_post_attach_devlist";
1764 
1765 	for (i = 0; i < devnum; i++) {
1766 		dr_common_unit_t *cp = devlist[i];
1767 
1768 		if (dr_check_unit_attached(cp) == -1) {
1769 			PR_ALL("%s: ERROR %s not attached\n",
1770 				f, cp->sbdev_path);
1771 			continue;
1772 		}
1773 
1774 		DR_DEV_SET_ATTACHED(cp);
1775 
1776 		dr_device_transition(cp, DR_STATE_CONFIGURED);
1777 		cp->sbdev_cond = SBD_COND_OK;
1778 	}
1779 
1780 	devs_present = DR_DEVS_PRESENT(bp);
1781 	devs_unattached = DR_DEVS_UNATTACHED(bp);
1782 
1783 	switch (bp->b_state) {
1784 	case DR_STATE_CONNECTED:
1785 	case DR_STATE_UNCONFIGURED:
1786 		ASSERT(devs_present);
1787 
1788 		if (devs_unattached == 0) {
1789 			/*
1790 			 * All devices finally attached.
1791 			 */
1792 			dr_board_transition(bp, DR_STATE_CONFIGURED);
1793 			hp->h_bd->b_ostate = SBD_STAT_CONFIGURED;
1794 			hp->h_bd->b_rstate = SBD_STAT_CONNECTED;
1795 			hp->h_bd->b_cond = SBD_COND_OK;
1796 			hp->h_bd->b_busy = 0;
1797 			(void) drv_getparm(TIME, (void *)&hp->h_bd->b_time);
1798 		} else if (devs_present != devs_unattached) {
1799 			/*
1800 			 * Only some devices are fully attached.
1801 			 */
1802 			dr_board_transition(bp, DR_STATE_PARTIAL);
1803 			hp->h_bd->b_rstate = SBD_STAT_CONNECTED;
1804 			hp->h_bd->b_ostate = SBD_STAT_CONFIGURED;
1805 			(void) drv_getparm(TIME, (void *)&hp->h_bd->b_time);
1806 		}
1807 		break;
1808 
1809 	case DR_STATE_PARTIAL:
1810 		ASSERT(devs_present);
1811 		/*
1812 		 * All devices finally attached.
1813 		 */
1814 		if (devs_unattached == 0) {
1815 			dr_board_transition(bp, DR_STATE_CONFIGURED);
1816 			hp->h_bd->b_rstate = SBD_STAT_CONNECTED;
1817 			hp->h_bd->b_ostate = SBD_STAT_CONFIGURED;
1818 			hp->h_bd->b_cond = SBD_COND_OK;
1819 			hp->h_bd->b_busy = 0;
1820 			(void) drv_getparm(TIME, (void *)&hp->h_bd->b_time);
1821 		}
1822 		break;
1823 
1824 	default:
1825 		break;
1826 	}
1827 }
1828 
1829 static void
1830 dr_dev_configure(dr_handle_t *hp)
1831 {
1832 	int rv;
1833 
1834 	rv = dr_dev_walk(hp, SBD_COMP_CPU, 1,
1835 		dr_pre_attach_cpu,
1836 		dr_attach_cpu,
1837 		dr_post_attach_cpu,
1838 		dr_attach_update_state);
1839 
1840 	if (rv >= 0) {
1841 		rv = dr_dev_walk(hp, SBD_COMP_MEM, 1,
1842 			dr_pre_attach_mem,
1843 			dr_attach_mem,
1844 			dr_post_attach_mem,
1845 			dr_attach_update_state);
1846 	}
1847 
1848 	if (rv >= 0) {
1849 		(void) dr_dev_walk(hp, SBD_COMP_IO, 1,
1850 			dr_pre_attach_io,
1851 			dr_attach_io,
1852 			dr_post_attach_io,
1853 			dr_attach_update_state);
1854 	}
1855 }
1856 
1857 static void
1858 dr_release_update_state(dr_handle_t *hp,
1859 	dr_common_unit_t **devlist, int devnum)
1860 {
1861 	_NOTE(ARGUNUSED(devlist))
1862 	_NOTE(ARGUNUSED(devnum))
1863 
1864 	dr_board_t *bp = hp->h_bd;
1865 
1866 	/*
1867 	 * If the entire board was released and all components
1868 	 * unreferenced then transfer it to the UNREFERENCED state.
1869 	 */
1870 	if ((bp->b_state != DR_STATE_RELEASE) &&
1871 		(DR_DEVS_RELEASED(bp) == DR_DEVS_ATTACHED(bp))) {
1872 		dr_board_transition(bp, DR_STATE_RELEASE);
1873 		hp->h_bd->b_busy = 1;
1874 	}
1875 }
1876 
1877 /* called by dr_release_done [below] and dr_release_mem_done [dr_mem.c] */
1878 int
1879 dr_release_dev_done(dr_common_unit_t *cp)
1880 {
1881 	if (cp->sbdev_state == DR_STATE_RELEASE) {
1882 		ASSERT(DR_DEV_IS_RELEASED(cp));
1883 
1884 		DR_DEV_SET_UNREFERENCED(cp);
1885 
1886 		dr_device_transition(cp, DR_STATE_UNREFERENCED);
1887 
1888 		return (0);
1889 	} else {
1890 		return (-1);
1891 	}
1892 }
1893 
1894 static void
1895 dr_release_done(dr_handle_t *hp, dr_common_unit_t *cp)
1896 {
1897 	_NOTE(ARGUNUSED(hp))
1898 
1899 	dr_board_t		*bp;
1900 	static fn_t		f = "dr_release_done";
1901 
1902 	PR_ALL("%s...\n", f);
1903 
1904 	/* get board pointer & sanity check */
1905 	bp = cp->sbdev_bp;
1906 	ASSERT(bp == hp->h_bd);
1907 
1908 	/*
1909 	 * Transfer the device which just completed its release
1910 	 * to the UNREFERENCED state.
1911 	 */
1912 	switch (cp->sbdev_type) {
1913 	case SBD_COMP_MEM:
1914 		dr_release_mem_done(cp);
1915 		break;
1916 
1917 	default:
1918 		DR_DEV_SET_RELEASED(cp);
1919 
1920 		dr_device_transition(cp, DR_STATE_RELEASE);
1921 
1922 		(void) dr_release_dev_done(cp);
1923 		break;
1924 	}
1925 
1926 	/*
1927 	 * If we're not already in the RELEASE state for this
1928 	 * board and we now have released all that were previously
1929 	 * attached, then transfer the board to the RELEASE state.
1930 	 */
1931 	if ((bp->b_state == DR_STATE_RELEASE) &&
1932 		(DR_DEVS_RELEASED(bp) == DR_DEVS_UNREFERENCED(bp))) {
1933 		dr_board_transition(bp, DR_STATE_UNREFERENCED);
1934 		bp->b_busy = 1;
1935 		(void) drv_getparm(TIME, (void *)&bp->b_time);
1936 	}
1937 }
1938 
1939 static void
1940 dr_dev_release_mem(dr_handle_t *hp, dr_common_unit_t *dv)
1941 {
1942 	dr_release_mem(dv);
1943 	dr_release_done(hp, dv);
1944 }
1945 
1946 static void
1947 dr_dev_release(dr_handle_t *hp)
1948 {
1949 	int rv;
1950 
1951 	hp->h_bd->b_busy = 1;
1952 
1953 	rv = dr_dev_walk(hp, SBD_COMP_CPU, 0,
1954 		dr_pre_release_cpu,
1955 		dr_release_done,
1956 		dr_dev_noop,
1957 		dr_release_update_state);
1958 
1959 	if (rv >= 0) {
1960 		rv = dr_dev_walk(hp, SBD_COMP_MEM, 0,
1961 			dr_pre_release_mem,
1962 			dr_dev_release_mem,
1963 			dr_dev_noop,
1964 			dr_release_update_state);
1965 	}
1966 
1967 	if (rv >= 0) {
1968 		rv = dr_dev_walk(hp, SBD_COMP_IO, 0,
1969 			dr_pre_release_io,
1970 			dr_release_done,
1971 			dr_dev_noop,
1972 			dr_release_update_state);
1973 
1974 	}
1975 
1976 	if (rv < 0)
1977 		hp->h_bd->b_busy = 0;
1978 	/* else, b_busy will be cleared in dr_detach_update_state() */
1979 }
1980 
1981 static void
1982 dr_detach_update_state(dr_handle_t *hp,
1983 	dr_common_unit_t **devlist, int devnum)
1984 {
1985 	dr_board_t	*bp = hp->h_bd;
1986 	int		i;
1987 	dr_state_t	bstate;
1988 	static fn_t	f = "dr_detach_update_state";
1989 
1990 	for (i = 0; i < devnum; i++) {
1991 		dr_common_unit_t *cp = devlist[i];
1992 
1993 		if (dr_check_unit_attached(cp) >= 0) {
1994 			/*
1995 			 * Device is still attached probably due
1996 			 * to an error.  Need to keep track of it.
1997 			 */
1998 			PR_ALL("%s: ERROR %s not detached\n",
1999 				f, cp->sbdev_path);
2000 
2001 			continue;
2002 		}
2003 
2004 		DR_DEV_CLR_ATTACHED(cp);
2005 		DR_DEV_CLR_RELEASED(cp);
2006 		DR_DEV_CLR_UNREFERENCED(cp);
2007 		dr_device_transition(cp, DR_STATE_UNCONFIGURED);
2008 	}
2009 
2010 	bstate = bp->b_state;
2011 	if (bstate != DR_STATE_UNCONFIGURED) {
2012 		if (DR_DEVS_PRESENT(bp) == DR_DEVS_UNATTACHED(bp)) {
2013 			/*
2014 			 * All devices are finally detached.
2015 			 */
2016 			dr_board_transition(bp, DR_STATE_UNCONFIGURED);
2017 			hp->h_bd->b_ostate = SBD_STAT_UNCONFIGURED;
2018 			(void) drv_getparm(TIME, (void *)&hp->h_bd->b_time);
2019 		} else if ((bp->b_state != DR_STATE_PARTIAL) &&
2020 				(DR_DEVS_ATTACHED(bp) !=
2021 					DR_DEVS_PRESENT(bp))) {
2022 			/*
2023 			 * Some devices remain attached.
2024 			 */
2025 			dr_board_transition(bp, DR_STATE_PARTIAL);
2026 			(void) drv_getparm(TIME, (void *)&hp->h_bd->b_time);
2027 		}
2028 
2029 		if ((hp->h_devset & DR_DEVS_UNATTACHED(bp)) == hp->h_devset)
2030 			hp->h_bd->b_busy = 0;
2031 	}
2032 }
2033 
2034 static int
2035 dr_dev_unconfigure(dr_handle_t *hp)
2036 {
2037 	dr_board_t	*bp = hp->h_bd;
2038 
2039 	/*
2040 	 * Block out status during IO unconfig.
2041 	 */
2042 	mutex_enter(&bp->b_slock);
2043 	while (bp->b_sflags & DR_BSLOCK) {
2044 		if (cv_wait_sig(&bp->b_scv, &bp->b_slock) == 0) {
2045 			mutex_exit(&bp->b_slock);
2046 			return (EINTR);
2047 		}
2048 	}
2049 	bp->b_sflags |= DR_BSLOCK;
2050 	mutex_exit(&bp->b_slock);
2051 
2052 	(void) dr_dev_walk(hp, SBD_COMP_IO, 0,
2053 		dr_pre_detach_io,
2054 		dr_detach_io,
2055 		dr_post_detach_io,
2056 		dr_detach_update_state);
2057 
2058 	dr_unlock_status(bp);
2059 
2060 	(void) dr_dev_walk(hp, SBD_COMP_CPU, 0,
2061 		dr_pre_detach_cpu,
2062 		dr_detach_cpu,
2063 		dr_post_detach_cpu,
2064 		dr_detach_update_state);
2065 
2066 	(void) dr_dev_walk(hp, SBD_COMP_MEM, 0,
2067 		dr_pre_detach_mem,
2068 		dr_detach_mem,
2069 		dr_post_detach_mem,
2070 		dr_detach_update_state);
2071 
2072 	return (0);
2073 }
2074 
2075 static void
2076 dr_dev_cancel(dr_handle_t *hp)
2077 {
2078 	int		i;
2079 	dr_devset_t	devset;
2080 	dr_board_t	*bp = hp->h_bd;
2081 	static fn_t	f = "dr_dev_cancel";
2082 
2083 	PR_ALL("%s...\n", f);
2084 
2085 	/*
2086 	 * Only devices which have been "released" are
2087 	 * subject to cancellation.
2088 	 */
2089 	devset = hp->h_devset & DR_DEVS_RELEASED(bp);
2090 
2091 	/*
2092 	 * Nothing to do for CPUs or IO other than change back
2093 	 * their state.
2094 	 */
2095 	for (i = 0; i < MAX_CPU_UNITS_PER_BOARD; i++) {
2096 		dr_cpu_unit_t	*cp;
2097 		dr_state_t	nstate;
2098 
2099 		if (!DEVSET_IN_SET(devset, SBD_COMP_CPU, i))
2100 			continue;
2101 
2102 		cp = dr_get_cpu_unit(bp, i);
2103 		if (dr_cancel_cpu(cp) == 0)
2104 			nstate = DR_STATE_CONFIGURED;
2105 		else
2106 			nstate = DR_STATE_FATAL;
2107 
2108 		dr_device_transition(&cp->sbc_cm, nstate);
2109 	}
2110 
2111 	for (i = 0; i < MAX_IO_UNITS_PER_BOARD; i++) {
2112 		dr_io_unit_t *ip;
2113 
2114 		if (!DEVSET_IN_SET(devset, SBD_COMP_IO, i))
2115 			continue;
2116 		ip = dr_get_io_unit(bp, i);
2117 		dr_device_transition(&ip->sbi_cm, DR_STATE_CONFIGURED);
2118 	}
2119 	for (i = 0; i < MAX_MEM_UNITS_PER_BOARD; i++) {
2120 		dr_mem_unit_t	*mp;
2121 		dr_state_t	nstate;
2122 
2123 		if (!DEVSET_IN_SET(devset, SBD_COMP_MEM, i))
2124 			continue;
2125 
2126 		mp = dr_get_mem_unit(bp, i);
2127 		if (dr_cancel_mem(mp) == 0)
2128 			nstate = DR_STATE_CONFIGURED;
2129 		else
2130 			nstate = DR_STATE_FATAL;
2131 
2132 		dr_device_transition(&mp->sbm_cm, nstate);
2133 	}
2134 
2135 	PR_ALL("%s: unreleasing devset (0x%x)\n", f, (uint_t)devset);
2136 
2137 	DR_DEVS_CANCEL(bp, devset);
2138 
2139 	if (DR_DEVS_RELEASED(bp) == 0) {
2140 		dr_state_t	new_state;
2141 		/*
2142 		 * If the board no longer has any released devices
2143 		 * than transfer it back to the CONFIG/PARTIAL state.
2144 		 */
2145 		if (DR_DEVS_ATTACHED(bp) == DR_DEVS_PRESENT(bp))
2146 			new_state = DR_STATE_CONFIGURED;
2147 		else
2148 			new_state = DR_STATE_PARTIAL;
2149 		if (bp->b_state != new_state) {
2150 			dr_board_transition(bp, new_state);
2151 		}
2152 		hp->h_bd->b_ostate = SBD_STAT_CONFIGURED;
2153 		hp->h_bd->b_busy = 0;
2154 		(void) drv_getparm(TIME, (void *)&hp->h_bd->b_time);
2155 	}
2156 }
2157 
2158 static int
2159 dr_dev_status(dr_handle_t *hp)
2160 {
2161 	int		nstat, mode, ncm, sz, pbsz, pnstat;
2162 	dr_handle_t	*shp;
2163 	dr_devset_t	devset = 0;
2164 	sbd_stat_t	*dstatp = NULL;
2165 	sbd_dev_stat_t	*devstatp;
2166 	dr_board_t	*bp;
2167 	drmach_status_t	 pstat;
2168 	int		rv = 0;
2169 
2170 #ifdef _MULTI_DATAMODEL
2171 	int sz32 = 0;
2172 #endif /* _MULTI_DATAMODEL */
2173 
2174 	static fn_t	f = "dr_status";
2175 
2176 	PR_ALL("%s...\n", f);
2177 
2178 	mode = hp->h_mode;
2179 	shp = hp;
2180 	devset = shp->h_devset;
2181 	bp = hp->h_bd;
2182 
2183 	/*
2184 	 * Block out disconnect, unassign, IO unconfigure and
2185 	 * devinfo branch creation during status.
2186 	 */
2187 	mutex_enter(&bp->b_slock);
2188 	while (bp->b_sflags & DR_BSLOCK) {
2189 		if (cv_wait_sig(&bp->b_scv, &bp->b_slock) == 0) {
2190 			mutex_exit(&bp->b_slock);
2191 			return (EINTR);
2192 		}
2193 	}
2194 	bp->b_sflags |= DR_BSLOCK;
2195 	mutex_exit(&bp->b_slock);
2196 
2197 	ncm = 1;
2198 	if (hp->h_sbdcmd.cmd_cm.c_id.c_type == SBD_COMP_NONE) {
2199 		if (dr_cmd_flags(hp) & SBD_FLAG_ALLCMP) {
2200 		/*
2201 		 * Calculate the maximum number of components possible
2202 		 * for a board.  This number will be used to size the
2203 		 * status scratch buffer used by board and component
2204 		 * status functions.
2205 		 * This buffer may differ in size from what is provided
2206 		 * by the plugin, since the known component set on the
2207 		 * board may change between the plugin's GETNCM call, and
2208 		 * the status call.  Sizing will be adjusted to the plugin's
2209 		 * receptacle buffer at copyout time.
2210 		 */
2211 			ncm = MAX_CPU_UNITS_PER_BOARD +
2212 				MAX_MEM_UNITS_PER_BOARD +
2213 				MAX_IO_UNITS_PER_BOARD;
2214 
2215 		} else {
2216 			/*
2217 			 * In the case of c_type == SBD_COMP_NONE, and
2218 			 * SBD_FLAG_ALLCMP not specified, only the board
2219 			 * info is to be returned, no components.
2220 			 */
2221 			ncm = 0;
2222 			devset = 0;
2223 		}
2224 	}
2225 
2226 	sz = sizeof (sbd_stat_t);
2227 	if (ncm > 1)
2228 		sz += sizeof (sbd_dev_stat_t) * (ncm - 1);
2229 
2230 
2231 	pbsz = (int)hp->h_sbdcmd.cmd_stat.s_nbytes;
2232 	pnstat = (pbsz - sizeof (sbd_stat_t))/sizeof (sbd_dev_stat_t);
2233 
2234 	/*
2235 	 * s_nbytes describes the size of the preallocated user
2236 	 * buffer into which the application is execting to
2237 	 * receive the sbd_stat_t and sbd_dev_stat_t structures.
2238 	 */
2239 
2240 #ifdef _MULTI_DATAMODEL
2241 
2242 	/*
2243 	 * More buffer space is required for the 64bit to 32bit
2244 	 * conversion of data structures.
2245 	 */
2246 	if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
2247 		sz32 = sizeof (sbd_stat32_t);
2248 		if (ncm > 1)
2249 			sz32  += sizeof (sbd_dev_stat32_t) * (ncm - 1);
2250 		pnstat = (pbsz - sizeof (sbd_stat32_t))/
2251 				sizeof (sbd_dev_stat32_t);
2252 	}
2253 
2254 	sz += sz32;
2255 #endif
2256 	/*
2257 	 * Since one sbd_dev_stat_t is included in the sbd_stat_t,
2258 	 * increment the plugin's nstat count.
2259 	 */
2260 	++pnstat;
2261 
2262 	if (bp->b_id == 0) {
2263 		bzero(&pstat, sizeof (pstat));
2264 	} else {
2265 		sbd_error_t *err;
2266 
2267 		err = drmach_status(bp->b_id, &pstat);
2268 		if (err) {
2269 			DRERR_SET_C(&hp->h_err, &err);
2270 			rv = EIO;
2271 			goto status_done;
2272 		}
2273 	}
2274 
2275 	dstatp = (sbd_stat_t *)GETSTRUCT(char, sz);
2276 
2277 	devstatp = &dstatp->s_stat[0];
2278 
2279 	dstatp->s_board = bp->b_num;
2280 
2281 	/*
2282 	 * Detect transitions between empty and disconnected.
2283 	 */
2284 	if (!pstat.empty && (bp->b_rstate == SBD_STAT_EMPTY))
2285 		bp->b_rstate = SBD_STAT_DISCONNECTED;
2286 	else if (pstat.empty && (bp->b_rstate == SBD_STAT_DISCONNECTED))
2287 		bp->b_rstate = SBD_STAT_EMPTY;
2288 
2289 	dstatp->s_rstate = bp->b_rstate;
2290 	dstatp->s_ostate = bp->b_ostate;
2291 	dstatp->s_cond = bp->b_cond = pstat.cond;
2292 	dstatp->s_busy = bp->b_busy | pstat.busy;
2293 	dstatp->s_time = bp->b_time;
2294 	dstatp->s_power = pstat.powered;
2295 	dstatp->s_assigned = bp->b_assigned = pstat.assigned;
2296 	dstatp->s_nstat = nstat = 0;
2297 	bcopy(&pstat.type[0], &dstatp->s_type[0], SBD_TYPE_LEN);
2298 	bcopy(&pstat.info[0], &dstatp->s_info[0], SBD_MAX_INFO);
2299 
2300 	devset &= DR_DEVS_PRESENT(bp);
2301 	if (devset == 0) {
2302 		/*
2303 		 * No device chosen.
2304 		 */
2305 		PR_ALL("%s: no device present\n", f);
2306 	}
2307 
2308 	if (DEVSET_IN_SET(devset, SBD_COMP_CPU, DEVSET_ANYUNIT))
2309 		if ((nstat = dr_cpu_status(hp, devset, devstatp)) > 0) {
2310 			dstatp->s_nstat += nstat;
2311 			devstatp += nstat;
2312 		}
2313 
2314 	if (DEVSET_IN_SET(devset, SBD_COMP_MEM, DEVSET_ANYUNIT))
2315 		if ((nstat = dr_mem_status(hp, devset, devstatp)) > 0) {
2316 			dstatp->s_nstat += nstat;
2317 			devstatp += nstat;
2318 		}
2319 
2320 	if (DEVSET_IN_SET(devset, SBD_COMP_IO, DEVSET_ANYUNIT))
2321 		if ((nstat = dr_io_status(hp, devset, devstatp)) > 0) {
2322 			dstatp->s_nstat += nstat;
2323 			devstatp += nstat;
2324 		}
2325 
2326 	/*
2327 	 * Due to a possible change in number of components between
2328 	 * the time of plugin's GETNCM call and now, there may be
2329 	 * more or less components than the plugin's buffer can
2330 	 * hold.  Adjust s_nstat accordingly.
2331 	 */
2332 
2333 	dstatp->s_nstat = dstatp->s_nstat > pnstat ? pnstat : dstatp->s_nstat;
2334 
2335 
2336 #ifdef _MULTI_DATAMODEL
2337 	if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
2338 		int		i, j;
2339 		sbd_stat32_t	*dstat32p;
2340 
2341 		dstat32p = (sbd_stat32_t *)devstatp;
2342 
2343 		/* Alignment Paranoia */
2344 		if ((ulong_t)dstat32p & 0x1) {
2345 			PR_ALL("%s: alignment: sz=0x%x dstat32p=0x%x\n",
2346 				f, sizeof (sbd_stat32_t), dstat32p);
2347 			DR_OP_INTERNAL_ERROR(hp);
2348 			rv = EINVAL;
2349 			goto status_done;
2350 		}
2351 
2352 		/* paranoia: detect buffer overrun */
2353 		if ((caddr_t)&dstat32p->s_stat[dstatp->s_nstat] >
2354 				((caddr_t)dstatp) + sz) {
2355 			DR_OP_INTERNAL_ERROR(hp);
2356 			rv = EINVAL;
2357 			goto status_done;
2358 		}
2359 
2360 		/* copy sbd_stat_t structure members */
2361 #define	_SBD_STAT(t, m) dstat32p->m = (t)dstatp->m
2362 		_SBD_STAT(int32_t, s_board);
2363 		_SBD_STAT(int32_t, s_rstate);
2364 		_SBD_STAT(int32_t, s_ostate);
2365 		_SBD_STAT(int32_t, s_cond);
2366 		_SBD_STAT(int32_t, s_busy);
2367 		_SBD_STAT(time32_t, s_time);
2368 		_SBD_STAT(uint32_t, s_power);
2369 		_SBD_STAT(uint32_t, s_assigned);
2370 		_SBD_STAT(int32_t, s_nstat);
2371 		bcopy(&dstatp->s_type[0], &dstat32p->s_type[0],
2372 			SBD_TYPE_LEN);
2373 		bcopy(&dstatp->s_info[0], &dstat32p->s_info[0],
2374 			SBD_MAX_INFO);
2375 #undef _SBD_STAT
2376 
2377 		for (i = 0; i < dstatp->s_nstat; i++) {
2378 			sbd_dev_stat_t		*dsp = &dstatp->s_stat[i];
2379 			sbd_dev_stat32_t	*ds32p = &dstat32p->s_stat[i];
2380 #define	_SBD_DEV_STAT(t, m) ds32p->m = (t)dsp->m
2381 
2382 			/* copy sbd_cm_stat_t structure members */
2383 			_SBD_DEV_STAT(int32_t, ds_type);
2384 			_SBD_DEV_STAT(int32_t, ds_unit);
2385 			_SBD_DEV_STAT(int32_t, ds_ostate);
2386 			_SBD_DEV_STAT(int32_t, ds_cond);
2387 			_SBD_DEV_STAT(int32_t, ds_busy);
2388 			_SBD_DEV_STAT(int32_t, ds_suspend);
2389 			_SBD_DEV_STAT(time32_t, ds_time);
2390 			bcopy(&dsp->ds_name[0], &ds32p->ds_name[0],
2391 			    OBP_MAXPROPNAME);
2392 
2393 			switch (dsp->ds_type) {
2394 			case SBD_COMP_CPU:
2395 				/* copy sbd_cpu_stat_t structure members */
2396 				_SBD_DEV_STAT(int32_t, d_cpu.cs_isbootproc);
2397 				_SBD_DEV_STAT(int32_t, d_cpu.cs_cpuid);
2398 				_SBD_DEV_STAT(int32_t, d_cpu.cs_speed);
2399 				_SBD_DEV_STAT(int32_t, d_cpu.cs_ecache);
2400 				break;
2401 
2402 			case SBD_COMP_MEM:
2403 				/* copy sbd_mem_stat_t structure members */
2404 				_SBD_DEV_STAT(int32_t, d_mem.ms_interleave);
2405 				_SBD_DEV_STAT(uint32_t, d_mem.ms_basepfn);
2406 				_SBD_DEV_STAT(uint32_t, d_mem.ms_totpages);
2407 				_SBD_DEV_STAT(uint32_t, d_mem.ms_detpages);
2408 				_SBD_DEV_STAT(int32_t, d_mem.ms_pageslost);
2409 				_SBD_DEV_STAT(uint32_t, d_mem.ms_managed_pages);
2410 				_SBD_DEV_STAT(uint32_t, d_mem.ms_noreloc_pages);
2411 				_SBD_DEV_STAT(uint32_t, d_mem.ms_noreloc_first);
2412 				_SBD_DEV_STAT(uint32_t, d_mem.ms_noreloc_last);
2413 				_SBD_DEV_STAT(int32_t, d_mem.ms_cage_enabled);
2414 				_SBD_DEV_STAT(int32_t, d_mem.ms_peer_is_target);
2415 				bcopy(&dsp->d_mem.ms_peer_ap_id[0],
2416 					&ds32p->d_mem.ms_peer_ap_id[0],
2417 					sizeof (ds32p->d_mem.ms_peer_ap_id));
2418 				break;
2419 
2420 			case SBD_COMP_IO:
2421 				/* copy sbd_io_stat_t structure members */
2422 				_SBD_DEV_STAT(int32_t, d_io.is_referenced);
2423 				_SBD_DEV_STAT(int32_t, d_io.is_unsafe_count);
2424 
2425 				for (j = 0; j < SBD_MAX_UNSAFE; j++)
2426 					_SBD_DEV_STAT(int32_t,
2427 						d_io.is_unsafe_list[j]);
2428 
2429 				bcopy(&dsp->d_io.is_pathname[0],
2430 				    &ds32p->d_io.is_pathname[0], MAXPATHLEN);
2431 				break;
2432 
2433 			case SBD_COMP_CMP:
2434 				/* copy sbd_cmp_stat_t structure members */
2435 				bcopy(&dsp->d_cmp.ps_cpuid[0],
2436 					&ds32p->d_cmp.ps_cpuid[0],
2437 					sizeof (ds32p->d_cmp.ps_cpuid));
2438 				_SBD_DEV_STAT(int32_t, d_cmp.ps_ncores);
2439 				_SBD_DEV_STAT(int32_t, d_cmp.ps_speed);
2440 				_SBD_DEV_STAT(int32_t, d_cmp.ps_ecache);
2441 				break;
2442 
2443 			default:
2444 				cmn_err(CE_WARN, "%s: unknown dev type (%d)",
2445 				    f, (int)dsp->ds_type);
2446 				rv = EFAULT;
2447 				goto status_done;
2448 			}
2449 #undef _SBD_DEV_STAT
2450 		}
2451 
2452 
2453 		if (ddi_copyout((void *)dstat32p,
2454 			hp->h_sbdcmd.cmd_stat.s_statp, pbsz, mode) != 0) {
2455 			cmn_err(CE_WARN,
2456 				"%s: failed to copyout status "
2457 				"for board %d", f, bp->b_num);
2458 			rv = EFAULT;
2459 			goto status_done;
2460 		}
2461 	} else
2462 #endif /* _MULTI_DATAMODEL */
2463 
2464 	if (ddi_copyout((void *)dstatp, hp->h_sbdcmd.cmd_stat.s_statp,
2465 		pbsz, mode) != 0) {
2466 		cmn_err(CE_WARN,
2467 			"%s: failed to copyout status for board %d",
2468 			f, bp->b_num);
2469 		rv = EFAULT;
2470 		goto status_done;
2471 	}
2472 
2473 status_done:
2474 	if (dstatp != NULL)
2475 		FREESTRUCT(dstatp, char, sz);
2476 
2477 	dr_unlock_status(bp);
2478 
2479 	return (rv);
2480 }
2481 
2482 static int
2483 dr_get_ncm(dr_handle_t *hp)
2484 {
2485 	int		i;
2486 	int		ncm = 0;
2487 	dr_devset_t	devset;
2488 
2489 	devset = DR_DEVS_PRESENT(hp->h_bd);
2490 	if (hp->h_sbdcmd.cmd_cm.c_id.c_type != SBD_COMP_NONE)
2491 		devset &= DEVSET(hp->h_sbdcmd.cmd_cm.c_id.c_type,
2492 				DEVSET_ANYUNIT);
2493 
2494 	/*
2495 	 * Handle CPUs first to deal with possible CMP
2496 	 * devices. If the CPU is a CMP, we need to only
2497 	 * increment ncm once even if there are multiple
2498 	 * cores for that CMP present in the devset.
2499 	 */
2500 	for (i = 0; i < MAX_CMP_UNITS_PER_BOARD; i++) {
2501 		if (devset & DEVSET(SBD_COMP_CMP, i)) {
2502 			ncm++;
2503 		}
2504 	}
2505 
2506 	/* eliminate the CPU information from the devset */
2507 	devset &= ~(DEVSET(SBD_COMP_CMP, DEVSET_ANYUNIT));
2508 
2509 	for (i = 0; i < (sizeof (dr_devset_t) * 8); i++) {
2510 		ncm += devset & 0x1;
2511 		devset >>= 1;
2512 	}
2513 
2514 	return (ncm);
2515 }
2516 
2517 /* used by dr_mem.c */
2518 /* TODO: eliminate dr_boardlist */
2519 dr_board_t *
2520 dr_lookup_board(int board_num)
2521 {
2522 	dr_board_t *bp;
2523 
2524 	ASSERT(board_num >= 0 && board_num < MAX_BOARDS);
2525 
2526 	bp = &dr_boardlist[board_num];
2527 	ASSERT(bp->b_num == board_num);
2528 
2529 	return (bp);
2530 }
2531 
2532 static dr_dev_unit_t *
2533 dr_get_dev_unit(dr_board_t *bp, sbd_comp_type_t nt, int unit_num)
2534 {
2535 	dr_dev_unit_t	*dp;
2536 
2537 	dp = DR_GET_BOARD_DEVUNIT(bp, nt, unit_num);
2538 	ASSERT(dp->du_common.sbdev_bp == bp);
2539 	ASSERT(dp->du_common.sbdev_unum == unit_num);
2540 	ASSERT(dp->du_common.sbdev_type == nt);
2541 
2542 	return (dp);
2543 }
2544 
2545 dr_cpu_unit_t *
2546 dr_get_cpu_unit(dr_board_t *bp, int unit_num)
2547 {
2548 	dr_dev_unit_t	*dp;
2549 
2550 	ASSERT(unit_num >= 0 && unit_num < MAX_CPU_UNITS_PER_BOARD);
2551 
2552 	dp = dr_get_dev_unit(bp, SBD_COMP_CPU, unit_num);
2553 	return (&dp->du_cpu);
2554 }
2555 
2556 dr_mem_unit_t *
2557 dr_get_mem_unit(dr_board_t *bp, int unit_num)
2558 {
2559 	dr_dev_unit_t	*dp;
2560 
2561 	ASSERT(unit_num >= 0 && unit_num < MAX_MEM_UNITS_PER_BOARD);
2562 
2563 	dp = dr_get_dev_unit(bp, SBD_COMP_MEM, unit_num);
2564 	return (&dp->du_mem);
2565 }
2566 
2567 dr_io_unit_t *
2568 dr_get_io_unit(dr_board_t *bp, int unit_num)
2569 {
2570 	dr_dev_unit_t	*dp;
2571 
2572 	ASSERT(unit_num >= 0 && unit_num < MAX_IO_UNITS_PER_BOARD);
2573 
2574 	dp = dr_get_dev_unit(bp, SBD_COMP_IO, unit_num);
2575 	return (&dp->du_io);
2576 }
2577 
2578 dr_common_unit_t *
2579 dr_get_common_unit(dr_board_t *bp, sbd_comp_type_t nt, int unum)
2580 {
2581 	dr_dev_unit_t	*dp;
2582 
2583 	dp = dr_get_dev_unit(bp, nt, unum);
2584 	return (&dp->du_common);
2585 }
2586 
2587 static dr_devset_t
2588 dr_dev2devset(sbd_comp_id_t *cid)
2589 {
2590 	static fn_t	f = "dr_dev2devset";
2591 
2592 	dr_devset_t	devset;
2593 	int		unit = cid->c_unit;
2594 
2595 	switch (cid->c_type) {
2596 		case SBD_COMP_NONE:
2597 			devset =  DEVSET(SBD_COMP_CPU, DEVSET_ANYUNIT);
2598 			devset |= DEVSET(SBD_COMP_MEM, DEVSET_ANYUNIT);
2599 			devset |= DEVSET(SBD_COMP_IO,  DEVSET_ANYUNIT);
2600 			PR_ALL("%s: COMP_NONE devset = 0x%x\n", f, devset);
2601 			break;
2602 
2603 		case SBD_COMP_CPU:
2604 			if ((unit > MAX_CPU_UNITS_PER_BOARD) || (unit < 0)) {
2605 				cmn_err(CE_WARN,
2606 					"%s: invalid cpu unit# = %d",
2607 					f, unit);
2608 				devset = 0;
2609 			} else {
2610 				/*
2611 				 * Generate a devset that includes all the
2612 				 * cores of a CMP device. If this is not a
2613 				 * CMP, the extra cores will be eliminated
2614 				 * later since they are not present. This is
2615 				 * also true for CMP devices that do not have
2616 				 * all cores active.
2617 				 */
2618 				devset = DEVSET(SBD_COMP_CMP, unit);
2619 			}
2620 
2621 			PR_ALL("%s: CPU devset = 0x%x\n", f, devset);
2622 			break;
2623 
2624 		case SBD_COMP_MEM:
2625 			if (unit == SBD_NULL_UNIT) {
2626 				unit = 0;
2627 				cid->c_unit = 0;
2628 			}
2629 
2630 			if ((unit > MAX_MEM_UNITS_PER_BOARD) || (unit < 0)) {
2631 				cmn_err(CE_WARN,
2632 					"%s: invalid mem unit# = %d",
2633 					f, unit);
2634 				devset = 0;
2635 			} else
2636 				devset = DEVSET(cid->c_type, unit);
2637 
2638 			PR_ALL("%s: MEM devset = 0x%x\n", f, devset);
2639 			break;
2640 
2641 		case SBD_COMP_IO:
2642 			if ((unit > MAX_IO_UNITS_PER_BOARD) || (unit < 0)) {
2643 				cmn_err(CE_WARN,
2644 					"%s: invalid io unit# = %d",
2645 					f, unit);
2646 				devset = 0;
2647 			} else
2648 				devset = DEVSET(cid->c_type, unit);
2649 
2650 			PR_ALL("%s: IO devset = 0x%x\n", f, devset);
2651 			break;
2652 
2653 		default:
2654 		case SBD_COMP_UNKNOWN:
2655 			devset = 0;
2656 			break;
2657 	}
2658 
2659 	return (devset);
2660 }
2661 
2662 /*
2663  * Converts a dynamic attachment point name to a SBD_COMP_* type.
2664  * Returns SDB_COMP_UNKNOWN if name is not recognized.
2665  */
2666 static int
2667 dr_dev_type_to_nt(char *type)
2668 {
2669 	int i;
2670 
2671 	for (i = 0; dr_devattr[i].s_nodetype != SBD_COMP_UNKNOWN; i++)
2672 		if (strcmp(dr_devattr[i].s_devtype, type) == 0)
2673 			break;
2674 
2675 	return (dr_devattr[i].s_nodetype);
2676 }
2677 
2678 /*
2679  * Converts a SBD_COMP_* type to a dynamic attachment point name.
2680  * Return NULL if SBD_COMP_ type is not recognized.
2681  */
2682 char *
2683 dr_nt_to_dev_type(int nt)
2684 {
2685 	int i;
2686 
2687 	for (i = 0; dr_devattr[i].s_nodetype != SBD_COMP_UNKNOWN; i++)
2688 		if (dr_devattr[i].s_nodetype == nt)
2689 			break;
2690 
2691 	return (dr_devattr[i].s_devtype);
2692 }
2693 
2694 
2695 /*
2696  * State transition policy is that if there is some component for which
2697  * the state transition is valid, then let it through. The exception is
2698  * SBD_CMD_DISCONNECT. On disconnect, the state transition must be valid
2699  * for ALL components.
2700  * Returns the state that is in error, if any.
2701  */
2702 static int
2703 dr_check_transition(dr_board_t *bp, dr_devset_t *devsetp,
2704 			struct dr_state_trans *transp, int cmd)
2705 {
2706 	int			s, ut;
2707 	int			state_err = 0;
2708 	dr_devset_t		devset;
2709 	dr_common_unit_t	*cp;
2710 	static fn_t		f = "dr_check_transition";
2711 
2712 	devset = *devsetp;
2713 
2714 	if (DEVSET_IN_SET(devset, SBD_COMP_CPU, DEVSET_ANYUNIT)) {
2715 		for (ut = 0; ut < MAX_CPU_UNITS_PER_BOARD; ut++) {
2716 			if (DEVSET_IN_SET(devset, SBD_COMP_CPU, ut) == 0)
2717 				continue;
2718 
2719 			cp = dr_get_common_unit(bp, SBD_COMP_CPU, ut);
2720 			s = (int)cp->sbdev_state;
2721 			if (!DR_DEV_IS_PRESENT(cp)) {
2722 				DEVSET_DEL(devset, SBD_COMP_CPU, ut);
2723 			} else {
2724 				if (transp->x_op[s].x_rv) {
2725 					if (!state_err)
2726 						state_err = s;
2727 					DEVSET_DEL(devset, SBD_COMP_CPU, ut);
2728 				}
2729 			}
2730 		}
2731 	}
2732 	if (DEVSET_IN_SET(devset, SBD_COMP_MEM, DEVSET_ANYUNIT)) {
2733 		for (ut = 0; ut < MAX_MEM_UNITS_PER_BOARD; ut++) {
2734 			if (DEVSET_IN_SET(devset, SBD_COMP_MEM, ut) == 0)
2735 				continue;
2736 
2737 			cp = dr_get_common_unit(bp, SBD_COMP_MEM, ut);
2738 			s = (int)cp->sbdev_state;
2739 			if (!DR_DEV_IS_PRESENT(cp)) {
2740 				DEVSET_DEL(devset, SBD_COMP_MEM, ut);
2741 			} else {
2742 				if (transp->x_op[s].x_rv) {
2743 					if (!state_err)
2744 						state_err = s;
2745 					DEVSET_DEL(devset, SBD_COMP_MEM, ut);
2746 				}
2747 			}
2748 		}
2749 	}
2750 	if (DEVSET_IN_SET(devset, SBD_COMP_IO, DEVSET_ANYUNIT)) {
2751 		for (ut = 0; ut < MAX_IO_UNITS_PER_BOARD; ut++) {
2752 			if (DEVSET_IN_SET(devset, SBD_COMP_IO, ut) == 0)
2753 				continue;
2754 
2755 			cp = dr_get_common_unit(bp, SBD_COMP_IO, ut);
2756 			s = (int)cp->sbdev_state;
2757 			if (!DR_DEV_IS_PRESENT(cp)) {
2758 				DEVSET_DEL(devset, SBD_COMP_IO, ut);
2759 			} else {
2760 				if (transp->x_op[s].x_rv) {
2761 					if (!state_err)
2762 						state_err = s;
2763 					DEVSET_DEL(devset, SBD_COMP_IO, ut);
2764 				}
2765 			}
2766 		}
2767 	}
2768 
2769 	PR_ALL("%s: requested devset = 0x%x, final devset = 0x%x\n",
2770 		f, (uint_t)*devsetp, (uint_t)devset);
2771 
2772 	*devsetp = devset;
2773 	/*
2774 	 * If there are some remaining components for which
2775 	 * this state transition is valid, then allow them
2776 	 * through, otherwise if none are left then return
2777 	 * the state error. The exception is SBD_CMD_DISCONNECT.
2778 	 * On disconnect, the state transition must be valid for ALL
2779 	 * components.
2780 	 */
2781 	if (cmd == SBD_CMD_DISCONNECT)
2782 		return (state_err);
2783 	return (devset ? 0 : state_err);
2784 }
2785 
2786 void
2787 dr_device_transition(dr_common_unit_t *cp, dr_state_t st)
2788 {
2789 	PR_STATE("%s STATE %s(%d) -> %s(%d)\n",
2790 		cp->sbdev_path,
2791 		state_str[cp->sbdev_state], cp->sbdev_state,
2792 		state_str[st], st);
2793 
2794 	cp->sbdev_state = st;
2795 	if (st == DR_STATE_CONFIGURED) {
2796 		cp->sbdev_ostate = SBD_STAT_CONFIGURED;
2797 		if (cp->sbdev_bp->b_ostate != SBD_STAT_CONFIGURED) {
2798 			cp->sbdev_bp->b_ostate = SBD_STAT_CONFIGURED;
2799 			(void) drv_getparm(TIME,
2800 				(void *) &cp->sbdev_bp->b_time);
2801 		}
2802 	} else
2803 		cp->sbdev_ostate = SBD_STAT_UNCONFIGURED;
2804 
2805 	(void) drv_getparm(TIME, (void *) &cp->sbdev_time);
2806 }
2807 
2808 static void
2809 dr_board_transition(dr_board_t *bp, dr_state_t st)
2810 {
2811 	PR_STATE("BOARD %d STATE: %s(%d) -> %s(%d)\n",
2812 		bp->b_num,
2813 		state_str[bp->b_state], bp->b_state,
2814 		state_str[st], st);
2815 
2816 	bp->b_state = st;
2817 }
2818 
2819 void
2820 dr_op_err(int ce, dr_handle_t *hp, int code, char *fmt, ...)
2821 {
2822 	sbd_error_t	*err;
2823 	va_list		args;
2824 
2825 	va_start(args, fmt);
2826 	err = drerr_new_v(code, fmt, args);
2827 	va_end(args);
2828 
2829 	if (ce != CE_IGNORE)
2830 		sbd_err_log(err, ce);
2831 
2832 	DRERR_SET_C(&hp->h_err, &err);
2833 }
2834 
2835 void
2836 dr_dev_err(int ce, dr_common_unit_t *cp, int code)
2837 {
2838 	sbd_error_t	*err;
2839 
2840 	err = drerr_new(0, code, cp->sbdev_path, NULL);
2841 
2842 	if (ce != CE_IGNORE)
2843 		sbd_err_log(err, ce);
2844 
2845 	DRERR_SET_C(&cp->sbdev_error, &err);
2846 }
2847 
2848 /*
2849  * A callback routine.  Called from the drmach layer as a result of
2850  * call to drmach_board_find_devices from dr_init_devlists.
2851  */
2852 static sbd_error_t *
2853 dr_dev_found(void *data, const char *name, int unum, drmachid_t id)
2854 {
2855 	dr_board_t	*bp = data;
2856 	dr_dev_unit_t	*dp;
2857 	int		 nt;
2858 	static fn_t	f = "dr_dev_found";
2859 
2860 	PR_ALL("%s (board = %d, name = %s, unum = %d, id = %p)...\n",
2861 		f, bp->b_num, name, unum, id);
2862 
2863 	nt = dr_dev_type_to_nt((char *)name);
2864 	if (nt == SBD_COMP_UNKNOWN) {
2865 		/*
2866 		 * this should not happen.  When it does, it indicates
2867 		 * a missmatch in devices supported by the drmach layer
2868 		 * vs devices supported by this layer.
2869 		 */
2870 		return (DR_INTERNAL_ERROR());
2871 	}
2872 
2873 	dp = DR_GET_BOARD_DEVUNIT(bp, nt, unum);
2874 
2875 	/* sanity check */
2876 	ASSERT(dp->du_common.sbdev_bp == bp);
2877 	ASSERT(dp->du_common.sbdev_unum == unum);
2878 	ASSERT(dp->du_common.sbdev_type == nt);
2879 
2880 	/* render dynamic attachment point path of this unit */
2881 	(void) snprintf(dp->du_common.sbdev_path,
2882 			sizeof (dp->du_common.sbdev_path),
2883 			(nt == SBD_COMP_MEM ? "%s::%s" : "%s::%s%d"),
2884 			bp->b_path, name, DR_UNUM2SBD_UNUM(unum));
2885 
2886 	dp->du_common.sbdev_id = id;
2887 	DR_DEV_SET_PRESENT(&dp->du_common);
2888 
2889 	bp->b_ndev++;
2890 
2891 	return (NULL);
2892 }
2893 
2894 static sbd_error_t *
2895 dr_init_devlists(dr_board_t *bp)
2896 {
2897 	int		i;
2898 	sbd_error_t	*err;
2899 	dr_dev_unit_t	*dp;
2900 	static fn_t	f = "dr_init_devlists";
2901 
2902 	PR_ALL("%s (%s)...\n", f, bp->b_path);
2903 
2904 	/* sanity check */
2905 	ASSERT(bp->b_ndev == 0);
2906 
2907 	DR_DEVS_DISCONNECT(bp, (uint_t)-1);
2908 
2909 	/*
2910 	 * This routine builds the board's devlist and initializes
2911 	 * the common portion of the unit data structures.
2912 	 * Note: because the common portion is considered
2913 	 * uninitialized, the dr_get_*_unit() routines can not
2914 	 * be used.
2915 	 */
2916 
2917 	/*
2918 	 * Clear out old entries, if any.
2919 	 */
2920 	for (i = 0; i < MAX_CPU_UNITS_PER_BOARD; i++) {
2921 		dp = DR_GET_BOARD_DEVUNIT(bp, SBD_COMP_CPU, i);
2922 
2923 		bzero(dp, sizeof (*dp));
2924 		dp->du_common.sbdev_bp = bp;
2925 		dp->du_common.sbdev_unum = i;
2926 		dp->du_common.sbdev_type = SBD_COMP_CPU;
2927 	}
2928 
2929 	for (i = 0; i < MAX_MEM_UNITS_PER_BOARD; i++) {
2930 		dp = DR_GET_BOARD_DEVUNIT(bp, SBD_COMP_MEM, i);
2931 
2932 		bzero(dp, sizeof (*dp));
2933 		dp->du_common.sbdev_bp = bp;
2934 		dp->du_common.sbdev_unum = i;
2935 		dp->du_common.sbdev_type = SBD_COMP_MEM;
2936 	}
2937 
2938 	for (i = 0; i < MAX_IO_UNITS_PER_BOARD; i++) {
2939 		dp = DR_GET_BOARD_DEVUNIT(bp, SBD_COMP_IO, i);
2940 
2941 		bzero(dp, sizeof (*dp));
2942 		dp->du_common.sbdev_bp = bp;
2943 		dp->du_common.sbdev_unum = i;
2944 		dp->du_common.sbdev_type = SBD_COMP_IO;
2945 	}
2946 
2947 	err = NULL;
2948 	if (bp->b_id) {
2949 		/* find devices on this board */
2950 		err = drmach_board_find_devices(
2951 			bp->b_id, bp, dr_dev_found);
2952 	}
2953 
2954 	return (err);
2955 }
2956 
2957 /*
2958  * Return the unit number of the respective drmachid if
2959  * it's found to be attached.
2960  */
2961 static int
2962 dr_check_unit_attached(dr_common_unit_t *cp)
2963 {
2964 	int		rv = 0;
2965 	processorid_t	cpuid;
2966 	uint64_t	basepa, endpa;
2967 	struct memlist	*ml;
2968 	extern struct memlist	*phys_install;
2969 	sbd_error_t	*err;
2970 	int		yes;
2971 	static fn_t	f = "dr_check_unit_attached";
2972 
2973 	switch (cp->sbdev_type) {
2974 	case SBD_COMP_CPU:
2975 		err = drmach_cpu_get_id(cp->sbdev_id, &cpuid);
2976 		if (err) {
2977 			DRERR_SET_C(&cp->sbdev_error, &err);
2978 			rv = -1;
2979 			break;
2980 		}
2981 		mutex_enter(&cpu_lock);
2982 		if (cpu_get(cpuid) == NULL)
2983 			rv = -1;
2984 		mutex_exit(&cpu_lock);
2985 		break;
2986 
2987 	case SBD_COMP_MEM:
2988 		err = drmach_mem_get_base_physaddr(cp->sbdev_id, &basepa);
2989 		if (err) {
2990 			DRERR_SET_C(&cp->sbdev_error, &err);
2991 			rv = -1;
2992 			break;
2993 		}
2994 
2995 		/*
2996 		 * basepa may not be on a alignment boundary, make it so.
2997 		 */
2998 		err = drmach_mem_get_slice_size(cp->sbdev_id, &endpa);
2999 		if (err) {
3000 			DRERR_SET_C(&cp->sbdev_error, &err);
3001 			rv = -1;
3002 			break;
3003 		}
3004 
3005 		basepa &= ~(endpa - 1);
3006 		endpa += basepa;
3007 
3008 		/*
3009 		 * Check if base address is in phys_install.
3010 		 */
3011 		memlist_read_lock();
3012 		for (ml = phys_install; ml; ml = ml->next)
3013 			if ((endpa <= ml->address) ||
3014 					(basepa >= (ml->address + ml->size)))
3015 				continue;
3016 			else
3017 				break;
3018 		memlist_read_unlock();
3019 		if (ml == NULL)
3020 			rv = -1;
3021 		break;
3022 
3023 	case SBD_COMP_IO:
3024 		err = drmach_io_is_attached(cp->sbdev_id, &yes);
3025 		if (err) {
3026 			DRERR_SET_C(&cp->sbdev_error, &err);
3027 			rv = -1;
3028 			break;
3029 		} else if (!yes)
3030 			rv = -1;
3031 		break;
3032 
3033 	default:
3034 		PR_ALL("%s: unexpected nodetype(%d) for id 0x%x\n",
3035 			f, cp->sbdev_type, cp->sbdev_id);
3036 		rv = -1;
3037 		break;
3038 	}
3039 
3040 	return (rv);
3041 }
3042 
3043 /*
3044  * See if drmach recognizes the passthru command.  DRMACH expects the
3045  * id to identify the thing to which the command is being applied.  Using
3046  * nonsense SBD terms, that information has been perversely encoded in the
3047  * c_id member of the sbd_cmd_t structure.  This logic reads those tea
3048  * leaves, finds the associated drmach id, then calls drmach to process
3049  * the passthru command.
3050  */
3051 static int
3052 dr_pt_try_drmach(dr_handle_t *hp)
3053 {
3054 	dr_board_t	*bp = hp->h_bd;
3055 	sbd_comp_id_t	*comp_id = &hp->h_sbdcmd.cmd_cm.c_id;
3056 	drmachid_t	 id;
3057 
3058 	if (comp_id->c_type == SBD_COMP_NONE) {
3059 		id = bp->b_id;
3060 	} else {
3061 		sbd_comp_type_t	 nt;
3062 
3063 		nt = dr_dev_type_to_nt(comp_id->c_name);
3064 		if (nt == SBD_COMP_UNKNOWN) {
3065 			dr_op_err(CE_IGNORE, hp, ESBD_INVAL, comp_id->c_name);
3066 			id = 0;
3067 		} else {
3068 			/* pt command applied to dynamic attachment point */
3069 			dr_common_unit_t *cp;
3070 			cp = dr_get_common_unit(bp, nt, comp_id->c_unit);
3071 			id = cp->sbdev_id;
3072 		}
3073 	}
3074 
3075 	if (hp->h_err == NULL)
3076 		hp->h_err = drmach_passthru(id, &hp->h_opts);
3077 
3078 	return (hp->h_err == NULL ? 0 : -1);
3079 }
3080 
3081 static int
3082 dr_pt_ioctl(dr_handle_t *hp)
3083 {
3084 	int		cmd, rv, len;
3085 	int32_t		sz;
3086 	int		found;
3087 	char		*copts;
3088 	static fn_t	f = "dr_pt_ioctl";
3089 
3090 	PR_ALL("%s...\n", f);
3091 
3092 	sz = hp->h_opts.size;
3093 	copts = hp->h_opts.copts;
3094 
3095 	if (sz == 0 || copts == (char *)NULL) {
3096 		cmn_err(CE_WARN, "%s: invalid passthru args", f);
3097 		return (EINVAL);
3098 	}
3099 
3100 	found = 0;
3101 	for (cmd = 0; cmd < (sizeof (pt_arr) / sizeof (pt_arr[0])); cmd++) {
3102 		len = strlen(pt_arr[cmd].pt_name);
3103 		found = (strncmp(pt_arr[cmd].pt_name, copts, len) == 0);
3104 		if (found)
3105 			break;
3106 	}
3107 
3108 	if (found)
3109 		rv = (*pt_arr[cmd].pt_func)(hp);
3110 	else
3111 		rv = dr_pt_try_drmach(hp);
3112 
3113 	return (rv);
3114 }
3115 
3116 /*
3117  * Called at driver load time to determine the state and condition
3118  * of an existing board in the system.
3119  */
3120 static void
3121 dr_board_discovery(dr_board_t *bp)
3122 {
3123 	int			i;
3124 	dr_devset_t		devs_lost, devs_attached = 0;
3125 	dr_cpu_unit_t		*cp;
3126 	dr_mem_unit_t		*mp;
3127 	dr_io_unit_t		*ip;
3128 	static fn_t		f = "dr_board_discovery";
3129 
3130 	if (DR_DEVS_PRESENT(bp) == 0) {
3131 		PR_ALL("%s: board %d has no devices present\n",
3132 			f, bp->b_num);
3133 		return;
3134 	}
3135 
3136 	/*
3137 	 * Check for existence of cpus.
3138 	 */
3139 	for (i = 0; i < MAX_CPU_UNITS_PER_BOARD; i++) {
3140 		cp = dr_get_cpu_unit(bp, i);
3141 
3142 		if (!DR_DEV_IS_PRESENT(&cp->sbc_cm))
3143 			continue;
3144 
3145 		if (dr_check_unit_attached(&cp->sbc_cm) >= 0) {
3146 			DR_DEV_SET_ATTACHED(&cp->sbc_cm);
3147 			DEVSET_ADD(devs_attached, SBD_COMP_CPU, i);
3148 			PR_ALL("%s: board %d, cpu-unit %d - attached\n",
3149 				f, bp->b_num, i);
3150 		}
3151 		dr_init_cpu_unit(cp);
3152 	}
3153 
3154 	/*
3155 	 * Check for existence of memory.
3156 	 */
3157 	for (i = 0; i < MAX_MEM_UNITS_PER_BOARD; i++) {
3158 		mp = dr_get_mem_unit(bp, i);
3159 
3160 		if (!DR_DEV_IS_PRESENT(&mp->sbm_cm))
3161 			continue;
3162 
3163 		if (dr_check_unit_attached(&mp->sbm_cm) >= 0) {
3164 			DR_DEV_SET_ATTACHED(&mp->sbm_cm);
3165 			DEVSET_ADD(devs_attached, SBD_COMP_MEM, i);
3166 			PR_ALL("%s: board %d, mem-unit %d - attached\n",
3167 				f, bp->b_num, i);
3168 		}
3169 		dr_init_mem_unit(mp);
3170 	}
3171 
3172 	/*
3173 	 * Check for i/o state.
3174 	 */
3175 	for (i = 0; i < MAX_IO_UNITS_PER_BOARD; i++) {
3176 		ip = dr_get_io_unit(bp, i);
3177 
3178 		if (!DR_DEV_IS_PRESENT(&ip->sbi_cm))
3179 			continue;
3180 
3181 		if (dr_check_unit_attached(&ip->sbi_cm) >= 0) {
3182 			/*
3183 			 * Found it!
3184 			 */
3185 			DR_DEV_SET_ATTACHED(&ip->sbi_cm);
3186 			DEVSET_ADD(devs_attached, SBD_COMP_IO, i);
3187 			PR_ALL("%s: board %d, io-unit %d - attached\n",
3188 				f, bp->b_num, i);
3189 		}
3190 		dr_init_io_unit(ip);
3191 	}
3192 
3193 	DR_DEVS_CONFIGURE(bp, devs_attached);
3194 	if (devs_attached && ((devs_lost = DR_DEVS_UNATTACHED(bp)) != 0)) {
3195 		int		ut;
3196 		/*
3197 		 * It is not legal on board discovery to have a
3198 		 * board that is only partially attached.  A board
3199 		 * is either all attached or all connected.  If a
3200 		 * board has at least one attached device, then
3201 		 * the the remaining devices, if any, must have
3202 		 * been lost or disconnected.  These devices can
3203 		 * only be recovered by a full attach from scratch.
3204 		 * Note that devices previously in the unreferenced
3205 		 * state are subsequently lost until the next full
3206 		 * attach.  This is necessary since the driver unload
3207 		 * that must have occurred would have wiped out the
3208 		 * information necessary to re-configure the device
3209 		 * back online, e.g. memlist.
3210 		 */
3211 		PR_ALL("%s: some devices LOST (0x%x)...\n", f, devs_lost);
3212 
3213 		for (ut = 0; ut < MAX_CPU_UNITS_PER_BOARD; ut++) {
3214 			if (!DEVSET_IN_SET(devs_lost, SBD_COMP_CPU, ut))
3215 				continue;
3216 
3217 			cp = dr_get_cpu_unit(bp, ut);
3218 			dr_device_transition(&cp->sbc_cm, DR_STATE_EMPTY);
3219 		}
3220 
3221 		for (ut = 0; ut < MAX_MEM_UNITS_PER_BOARD; ut++) {
3222 			if (!DEVSET_IN_SET(devs_lost, SBD_COMP_MEM, ut))
3223 				continue;
3224 
3225 			mp = dr_get_mem_unit(bp, ut);
3226 			dr_device_transition(&mp->sbm_cm, DR_STATE_EMPTY);
3227 		}
3228 
3229 		for (ut = 0; ut < MAX_IO_UNITS_PER_BOARD; ut++) {
3230 			if (!DEVSET_IN_SET(devs_lost, SBD_COMP_IO, ut))
3231 				continue;
3232 
3233 			ip = dr_get_io_unit(bp, ut);
3234 			dr_device_transition(&ip->sbi_cm, DR_STATE_EMPTY);
3235 		}
3236 
3237 		DR_DEVS_DISCONNECT(bp, devs_lost);
3238 	}
3239 }
3240 
3241 static int
3242 dr_board_init(dr_board_t *bp, dev_info_t *dip, int bd)
3243 {
3244 	sbd_error_t	*err;
3245 
3246 	mutex_init(&bp->b_lock, NULL, MUTEX_DRIVER, NULL);
3247 	mutex_init(&bp->b_slock, NULL, MUTEX_DRIVER, NULL);
3248 	cv_init(&bp->b_scv, NULL, CV_DRIVER, NULL);
3249 	bp->b_rstate = SBD_STAT_EMPTY;
3250 	bp->b_ostate = SBD_STAT_UNCONFIGURED;
3251 	bp->b_cond = SBD_COND_UNKNOWN;
3252 	(void) drv_getparm(TIME, (void *)&bp->b_time);
3253 
3254 	(void) drmach_board_lookup(bd, &bp->b_id);
3255 	bp->b_num = bd;
3256 	bp->b_dip = dip;
3257 
3258 	bp->b_dev[NIX(SBD_COMP_CPU)] = GETSTRUCT(dr_dev_unit_t,
3259 						MAX_CPU_UNITS_PER_BOARD);
3260 
3261 	bp->b_dev[NIX(SBD_COMP_MEM)] = GETSTRUCT(dr_dev_unit_t,
3262 						MAX_MEM_UNITS_PER_BOARD);
3263 
3264 	bp->b_dev[NIX(SBD_COMP_IO)] = GETSTRUCT(dr_dev_unit_t,
3265 						MAX_IO_UNITS_PER_BOARD);
3266 
3267 	/*
3268 	 * Initialize the devlists
3269 	 */
3270 	err = dr_init_devlists(bp);
3271 	if (err) {
3272 		sbd_err_clear(&err);
3273 		dr_board_destroy(bp);
3274 		return (-1);
3275 	} else if (bp->b_ndev == 0) {
3276 		dr_board_transition(bp, DR_STATE_EMPTY);
3277 	} else {
3278 		/*
3279 		 * Couldn't have made it down here without
3280 		 * having found at least one device.
3281 		 */
3282 		ASSERT(DR_DEVS_PRESENT(bp) != 0);
3283 		/*
3284 		 * Check the state of any possible devices on the
3285 		 * board.
3286 		 */
3287 		dr_board_discovery(bp);
3288 
3289 		bp->b_assigned = 1;
3290 
3291 		if (DR_DEVS_UNATTACHED(bp) == 0) {
3292 			/*
3293 			 * The board has no unattached devices, therefore
3294 			 * by reason of insanity it must be configured!
3295 			 */
3296 			dr_board_transition(bp, DR_STATE_CONFIGURED);
3297 			bp->b_ostate = SBD_STAT_CONFIGURED;
3298 			bp->b_rstate = SBD_STAT_CONNECTED;
3299 			bp->b_cond = SBD_COND_OK;
3300 			(void) drv_getparm(TIME, (void *)&bp->b_time);
3301 		} else if (DR_DEVS_ATTACHED(bp)) {
3302 			dr_board_transition(bp, DR_STATE_PARTIAL);
3303 			bp->b_ostate = SBD_STAT_CONFIGURED;
3304 			bp->b_rstate = SBD_STAT_CONNECTED;
3305 			bp->b_cond = SBD_COND_OK;
3306 			(void) drv_getparm(TIME, (void *)&bp->b_time);
3307 		} else {
3308 			dr_board_transition(bp, DR_STATE_CONNECTED);
3309 			bp->b_rstate = SBD_STAT_CONNECTED;
3310 			(void) drv_getparm(TIME, (void *)&bp->b_time);
3311 		}
3312 	}
3313 
3314 	return (0);
3315 }
3316 
3317 static void
3318 dr_board_destroy(dr_board_t *bp)
3319 {
3320 	PR_ALL("dr_board_destroy: num %d, path %s\n",
3321 		bp->b_num, bp->b_path);
3322 
3323 	dr_board_transition(bp, DR_STATE_EMPTY);
3324 	bp->b_rstate = SBD_STAT_EMPTY;
3325 	(void) drv_getparm(TIME, (void *)&bp->b_time);
3326 
3327 	/*
3328 	 * Free up MEM unit structs.
3329 	 */
3330 	FREESTRUCT(bp->b_dev[NIX(SBD_COMP_MEM)],
3331 			dr_dev_unit_t, MAX_MEM_UNITS_PER_BOARD);
3332 	bp->b_dev[NIX(SBD_COMP_MEM)] = NULL;
3333 	/*
3334 	 * Free up CPU unit structs.
3335 	 */
3336 	FREESTRUCT(bp->b_dev[NIX(SBD_COMP_CPU)],
3337 			dr_dev_unit_t, MAX_CPU_UNITS_PER_BOARD);
3338 	bp->b_dev[NIX(SBD_COMP_CPU)] = NULL;
3339 	/*
3340 	 * Free up IO unit structs.
3341 	 */
3342 	FREESTRUCT(bp->b_dev[NIX(SBD_COMP_IO)],
3343 			dr_dev_unit_t, MAX_IO_UNITS_PER_BOARD);
3344 	bp->b_dev[NIX(SBD_COMP_IO)] = NULL;
3345 
3346 	mutex_destroy(&bp->b_lock);
3347 	mutex_destroy(&bp->b_slock);
3348 	cv_destroy(&bp->b_scv);
3349 }
3350 
3351 void
3352 dr_lock_status(dr_board_t *bp)
3353 {
3354 	mutex_enter(&bp->b_slock);
3355 	while (bp->b_sflags & DR_BSLOCK)
3356 		cv_wait(&bp->b_scv, &bp->b_slock);
3357 	bp->b_sflags |= DR_BSLOCK;
3358 	mutex_exit(&bp->b_slock);
3359 }
3360 
3361 void
3362 dr_unlock_status(dr_board_t *bp)
3363 {
3364 	mutex_enter(&bp->b_slock);
3365 	bp->b_sflags &= ~DR_BSLOCK;
3366 	cv_signal(&bp->b_scv);
3367 	mutex_exit(&bp->b_slock);
3368 }
3369 
3370 /*
3371  * Extract flags passed via ioctl.
3372  */
3373 int
3374 dr_cmd_flags(dr_handle_t *hp)
3375 {
3376 	return (hp->h_sbdcmd.cmd_cm.c_flags);
3377 }
3378