xref: /titanic_50/usr/src/uts/sun4u/io/sbd.c (revision 0a0e9771ca0211c15f3ac4466b661c145feeb9e4)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 /*
29  * safari system board DR module.
30  */
31 
32 #include <sys/debug.h>
33 #include <sys/types.h>
34 #include <sys/errno.h>
35 #include <sys/cred.h>
36 #include <sys/dditypes.h>
37 #include <sys/devops.h>
38 #include <sys/modctl.h>
39 #include <sys/poll.h>
40 #include <sys/conf.h>
41 #include <sys/ddi.h>
42 #include <sys/sunddi.h>
43 #include <sys/sunndi.h>
44 #include <sys/ndi_impldefs.h>
45 #include <sys/stat.h>
46 #include <sys/kmem.h>
47 #include <sys/cpuvar.h>
48 #include <sys/mem_config.h>
49 #include <sys/mem_cage.h>
50 
51 #include <sys/autoconf.h>
52 #include <sys/cmn_err.h>
53 
54 #include <sys/ddi_impldefs.h>
55 #include <sys/machsystm.h>
56 #include <sys/param.h>
57 
58 #include <sys/sbdpriv.h>
59 #include <sys/sbd_io.h>
60 
61 /* start sbd includes */
62 
63 #include <sys/systm.h>
64 #include <sys/sysmacros.h>
65 #include <sys/x_call.h>
66 #include <sys/membar.h>
67 #include <vm/seg_kmem.h>
68 
69 extern int nulldev();
70 extern int nodev();
71 
72 typedef struct {		/* arg to sbd_get_handle */
73 	dev_t	dev;
74 	int	cmd;
75 	int	mode;
76 	sbd_ioctl_arg_t *ioargp;
77 } sbd_init_arg_t;
78 
79 
80 /*
81  * sbd support operations.
82  */
83 static void	sbd_exec_op(sbd_handle_t *hp);
84 static void	sbd_dev_configure(sbd_handle_t *hp);
85 static int	sbd_dev_release(sbd_handle_t *hp);
86 static int	sbd_dev_unconfigure(sbd_handle_t *hp);
87 static void	sbd_attach_cpu(sbd_handle_t *hp, sbderror_t *ep,
88 				dev_info_t *dip, int unit);
89 static void	sbd_detach_cpu(sbd_handle_t *hp, sbderror_t *ep,
90 				dev_info_t *dip, int unit);
91 static int	sbd_detach_mem(sbd_handle_t *hp, sbderror_t *ep, int unit);
92 static void	sbd_cancel(sbd_handle_t *hp);
93 void 	sbd_errno_decode(int err, sbderror_t *ep, dev_info_t *dip);
94 int		sbd_dealloc_instance(sbd_board_t *sbp, int max_boards);
95 int		sbd_errno2ecode(int error);
96 #pragma weak sbdp_cpu_get_impl
97 
98 #ifdef DEBUG
99 uint_t	sbd_debug	=	(uint_t)0x0;
100 
101 #ifdef SBD_DEBUG_ERRS
102 /* controls which errors are injected */
103 uint_t	sbd_err_debug	=	(uint_t)0x0;
104 
105 /* controls printing about error injection */
106 uint_t	sbd_print_errs	=	(uint_t)0x0;
107 
108 #endif /* SBD_DEBUG_ERRS */
109 
110 #endif /* DEBUG */
111 
112 char	*sbd_state_str[] = {
113 	"EMPTY", "OCCUPIED", "CONNECTED", "UNCONFIGURED",
114 	"PARTIAL", "CONFIGURED", "RELEASE", "UNREFERENCED",
115 	"FATAL"
116 };
117 
118 /*	Note: this must be changed in tandem with sbd_ioctl.h	*/
119 char	*sbd_ct_str[] = {
120 	"NONE", "CPU", "MEM", "IO", "UNKNOWN"
121 };
122 
123 /*	Note: this must also be changed in tandem with sbd_ioctl.h */
124 #define	SBD_CMD_STR(c) \
125 	(((c) == SBD_CMD_ASSIGN)	? "ASSIGN"	: \
126 	((c) == SBD_CMD_UNASSIGN)	? "UNASSIGN"	: \
127 	((c) == SBD_CMD_POWERON)	? "POWERON"	: \
128 	((c) == SBD_CMD_POWEROFF)	? "POWEROFF"	: \
129 	((c) == SBD_CMD_TEST)		? "TEST"	: \
130 	((c) == SBD_CMD_CONNECT)	? "CONNECT"	: \
131 	((c) == SBD_CMD_CONFIGURE)	? "CONFIGURE"	: \
132 	((c) == SBD_CMD_UNCONFIGURE)	? "UNCONFIGURE"	: \
133 	((c) == SBD_CMD_DISCONNECT)	? "DISCONNECT"	: \
134 	((c) == SBD_CMD_STATUS)		? "STATUS"	: \
135 	((c) == SBD_CMD_GETNCM)		? "GETNCM"	: \
136 	((c) == SBD_CMD_PASSTHRU)	? "PASSTHRU"	: "unknown")
137 
138 /*
139  * Defines and structures for device tree naming and mapping
140  * to node types
141  */
142 
143 sbd_devattr_t *sbd_devattr;
144 
145 /* defines to access the attribute struct */
146 #define	SBD_DEVNAME(i)		sbd_devattr[i].s_devname
147 #define	SBD_OTYPE(i)		sbd_devattr[(i)].s_obp_type
148 #define	SBD_COMP(i)		sbd_devattr[i].s_dnodetype
149 
150 /*
151  * State transition table.  States valid transitions for "board" state.
152  * Recall that non-zero return value terminates operation, however
153  * the herrno value is what really indicates an error , if any.
154  */
155 static int
156 _cmd2index(int c)
157 {
158 	/*
159 	 * Translate DR CMD to index into sbd_state_transition.
160 	 */
161 	switch (c) {
162 	case SBD_CMD_CONNECT:		return (0);
163 	case SBD_CMD_DISCONNECT:	return (1);
164 	case SBD_CMD_CONFIGURE:		return (2);
165 	case SBD_CMD_UNCONFIGURE:	return (3);
166 	case SBD_CMD_POWEROFF:		return (4);
167 	case SBD_CMD_POWERON:		return (5);
168 	case SBD_CMD_UNASSIGN:		return (6);
169 	case SBD_CMD_ASSIGN:		return (7);
170 	case SBD_CMD_TEST:		return (8);
171 	default:			return (-1);
172 	}
173 }
174 
175 #define	CMD2INDEX(c)	_cmd2index(c)
176 
177 static struct sbd_state_trans {
178 	int	x_cmd;
179 	struct {
180 		int	x_rv;		/* return value of pre_op */
181 		int	x_err;		/* errno, if any */
182 	} x_op[SBD_NUM_STATES];
183 } sbd_state_transition[] = {
184 	{ SBD_CMD_CONNECT,
185 		{
186 			{ 0, 0 },	/* empty */
187 			{ 0, 0 },	/* occupied */
188 			{ 1, EIO },	/* connected */
189 			{ 1, EIO },	/* unconfigured */
190 			{ 1, EIO },	/* partial */
191 			{ 1, EIO },	/* configured */
192 			{ 1, EIO },	/* release */
193 			{ 1, EIO },	/* unreferenced */
194 			{ 1, EIO },	/* fatal */
195 		}
196 	},
197 	{ SBD_CMD_DISCONNECT,
198 		{
199 			{ 1, EIO },	/* empty */
200 			{ 0, 0 },	/* occupied */
201 			{ 0, 0 },	/* connected */
202 			{ 0, 0 },	/* unconfigured */
203 			{ 1, EIO },	/* partial */
204 			{ 1, EIO },	/* configured */
205 			{ 1, EIO },	/* release */
206 			{ 1, EIO },	/* unreferenced */
207 			{ 1, EIO },	/* fatal */
208 		}
209 	},
210 	{ SBD_CMD_CONFIGURE,
211 		{
212 			{ 1, EIO },	/* empty */
213 			{ 1, EIO },	/* occupied */
214 			{ 0, 0 },	/* connected */
215 			{ 0, 0 },	/* unconfigured */
216 			{ 0, 0 },	/* partial */
217 			{ 1, 0 },	/* configured */
218 			{ 0, 0 },	/* release */
219 			{ 0, 0 },	/* unreferenced */
220 			{ 1, EIO },	/* fatal */
221 		}
222 	},
223 	{ SBD_CMD_UNCONFIGURE,
224 		{
225 			{ 1, EIO },	/* empty */
226 			{ 1, EIO },	/* occupied */
227 			{ 1, EIO },	/* connected */
228 			{ 1, EIO },	/* unconfigured */
229 			{ 1, EIO },	/* partial */
230 			{ 0, 0 },	/* configured */
231 			{ 0, 0 },	/* release */
232 			{ 0, 0 },	/* unreferenced */
233 			{ 1, EIO },	/* fatal */
234 		}
235 	},
236 	{ SBD_CMD_POWEROFF,
237 		{
238 			{ 1, EIO },	/* empty */
239 			{ 0, 0 },	/* occupied */
240 			{ 1, EIO },	/* connected */
241 			{ 1, EIO },	/* unconfigured */
242 			{ 1, EIO },	/* partial */
243 			{ 1, EIO },	/* configured */
244 			{ 1, EIO },	/* release */
245 			{ 1, EIO },	/* unreferenced */
246 			{ 1, EIO },	/* fatal */
247 		}
248 	},
249 	{ SBD_CMD_POWERON,
250 		{
251 			{ 1, EIO },	/* empty */
252 			{ 0, 0 },	/* occupied */
253 			{ 1, EIO },	/* connected */
254 			{ 1, EIO },	/* unconfigured */
255 			{ 1, EIO },	/* partial */
256 			{ 1, EIO },	/* configured */
257 			{ 1, EIO },	/* release */
258 			{ 1, EIO },	/* unreferenced */
259 			{ 1, EIO },	/* fatal */
260 		}
261 	},
262 	{ SBD_CMD_UNASSIGN,
263 		{
264 			{ 1, EIO },	/* empty */
265 			{ 0, 0 },	/* occupied */
266 			{ 1, EIO },	/* connected */
267 			{ 1, EIO },	/* unconfigured */
268 			{ 1, EIO },	/* partial */
269 			{ 1, EIO },	/* configured */
270 			{ 1, EIO },	/* release */
271 			{ 1, EIO },	/* unreferenced */
272 			{ 1, EIO },	/* fatal */
273 		}
274 	},
275 	{ SBD_CMD_ASSIGN,
276 		{
277 			{ 1, EIO },	/* empty */
278 			{ 0, 0 },	/* occupied */
279 			{ 1, EIO },	/* connected */
280 			{ 1, EIO },	/* unconfigured */
281 			{ 1, EIO },	/* partial */
282 			{ 1, EIO },	/* configured */
283 			{ 1, EIO },	/* release */
284 			{ 1, EIO },	/* unreferenced */
285 			{ 1, EIO },	/* fatal */
286 		}
287 	},
288 	{ SBD_CMD_TEST,
289 		{
290 			{ 1, EIO },	/* empty */
291 			{ 0, 0 },	/* occupied */
292 			{ 1, EIO },	/* connected */
293 			{ 1, EIO },	/* unconfigured */
294 			{ 1, EIO },	/* partial */
295 			{ 1, EIO },	/* configured */
296 			{ 1, EIO },	/* release */
297 			{ 1, EIO },	/* unreferenced */
298 			{ 1, EIO },	/* fatal */
299 		}
300 	},
301 };
302 
303 /*
304  * Global R/W lock to synchronize access across
305  * multiple boards.  Users wanting multi-board access
306  * must grab WRITE lock, others must grab READ lock.
307  */
308 krwlock_t	sbd_grwlock;
309 
310 /*
311  * Global to determine if an event needs to be sent
312  */
313 char send_event = 0;
314 
315 /*
316  * Required/Expected functions.
317  */
318 
319 static sbd_handle_t	*sbd_get_handle(dev_t dev, sbd_softstate_t *softsp,
320 				intptr_t arg, sbd_init_arg_t *iap);
321 static void		sbd_release_handle(sbd_handle_t *hp);
322 static int		sbd_pre_op(sbd_handle_t *hp);
323 static void		sbd_post_op(sbd_handle_t *hp);
324 static int		sbd_probe_board(sbd_handle_t *hp);
325 static int		sbd_deprobe_board(sbd_handle_t *hp);
326 static void		sbd_connect(sbd_handle_t *hp);
327 static void		sbd_assign_board(sbd_handle_t *hp);
328 static void		sbd_unassign_board(sbd_handle_t *hp);
329 static void		sbd_poweron_board(sbd_handle_t *hp);
330 static void		sbd_poweroff_board(sbd_handle_t *hp);
331 static void		sbd_test_board(sbd_handle_t *hp);
332 
333 static int		sbd_disconnect(sbd_handle_t *hp);
334 static sbd_devlist_t	*sbd_get_attach_devlist(sbd_handle_t *hp,
335 					int32_t *devnump, int32_t pass);
336 static int		sbd_pre_attach_devlist(sbd_handle_t *hp,
337 					sbd_devlist_t *devlist, int32_t devnum);
338 static int		sbd_post_attach_devlist(sbd_handle_t *hp,
339 					sbd_devlist_t *devlist, int32_t devnum);
340 static sbd_devlist_t	*sbd_get_release_devlist(sbd_handle_t *hp,
341 					int32_t *devnump, int32_t pass);
342 static int		sbd_pre_release_devlist(sbd_handle_t *hp,
343 					sbd_devlist_t *devlist, int32_t devnum);
344 static int		sbd_post_release_devlist(sbd_handle_t *hp,
345 					sbd_devlist_t *devlist, int32_t devnum);
346 static void		sbd_release_done(sbd_handle_t *hp,
347 					sbd_comp_type_t nodetype,
348 					dev_info_t *dip);
349 static sbd_devlist_t	*sbd_get_detach_devlist(sbd_handle_t *hp,
350 					int32_t *devnump, int32_t pass);
351 static int		sbd_pre_detach_devlist(sbd_handle_t *hp,
352 					sbd_devlist_t *devlist, int32_t devnum);
353 static int		sbd_post_detach_devlist(sbd_handle_t *hp,
354 					sbd_devlist_t *devlist, int32_t devnum);
355 static void		sbd_status(sbd_handle_t *hp);
356 static void		sbd_get_ncm(sbd_handle_t *hp);
357 
358 
359 /*
360  * Support functions.
361  */
362 static sbd_devset_t	sbd_dev2devset(sbd_comp_id_t *cid);
363 static int		sbd_copyin_ioarg(sbd_handle_t *hp, int mode, int cmd,
364 				sbd_cmd_t *cmdp, sbd_ioctl_arg_t *iap);
365 static int		sbd_copyout_errs(int mode, sbd_ioctl_arg_t *iap,
366 					void *arg);
367 static int		sbd_copyout_ioarg(int mode, int cmd, sbd_cmd_t *scp,
368 				sbd_ioctl_arg_t *iap);
369 static int		sbd_check_transition(sbd_board_t *sbp,
370 					sbd_devset_t *devsetp,
371 					struct sbd_state_trans *transp);
372 static sbd_devlist_t	*sbd_get_devlist(sbd_handle_t *hp,
373 					sbd_board_t *sbp,
374 					sbd_comp_type_t nodetype,
375 					int max_units, uint_t uset,
376 					int *count, int present_only);
377 static int		sbd_mem_status(sbd_handle_t *hp, sbd_devset_t devset,
378 					sbd_dev_stat_t *dsp);
379 
380 static int		sbd_init_devlists(sbd_board_t *sbp);
381 static int		sbd_name_to_idx(char *name);
382 static int		sbd_otype_to_idx(char *otpye);
383 static int		sbd_setup_devlists(dev_info_t *dip, void *arg);
384 static void		sbd_init_mem_devlists(sbd_board_t *sbp);
385 static void		sbd_init_cpu_unit(sbd_board_t *sbp, int unit);
386 static void		sbd_board_discovery(sbd_board_t *sbp);
387 static void		sbd_board_init(sbd_board_t *sbp,
388 				sbd_softstate_t *softsp,
389 				int bd, dev_info_t *dip, int wnode);
390 static void		sbd_board_destroy(sbd_board_t *sbp);
391 static int		sbd_check_unit_attached(sbd_board_t *sbp,
392 				dev_info_t *dip, int unit,
393 				sbd_comp_type_t nodetype, sbderror_t *ep);
394 
395 static sbd_state_t 	rstate_cvt(sbd_istate_t state);
396 
397 /*
398  * Autoconfiguration data structures
399  */
400 
401 extern struct mod_ops mod_miscops;
402 
403 static struct modlmisc modlmisc = {
404 	&mod_miscops,
405 	"System Board DR"
406 };
407 
408 static struct modlinkage modlinkage = {
409 	MODREV_1,
410 	(void *)&modlmisc,
411 	NULL
412 };
413 
414 static int sbd_instances = 0;
415 
416 /*
417  * dr Global data elements
418  */
419 sbd_global sbd_g;
420 
421 /*
422  * We want to be able to unload the module when we wish to do so, but we don't
423  * want anything else to unload it.  Unloading cannot occur until
424  * sbd_teardown_instance is called by an explicit IOCTL into the parent node.
425  * This support is for debugging purposes and should it be expected to work
426  * on the field, it should be enhanced:
427  * Currently, there is still a window where sbd_teardow_instance gets called,
428  * sbd_prevent_unloading now = 0, the driver doesn't get unloaded, and
429  * sbd_setup_instance gets called.  This may cause a panic.
430  */
431 int sbd_prevent_unloading = 1;
432 
433 /*
434  * Driver entry points.
435  */
436 int
437 _init(void)
438 {
439 	int	err;
440 
441 	/*
442 	 * If you need to support multiple nodes (instances), then
443 	 * whatever the maximum number of supported nodes is would
444 	 * need to passed as the third parameter to ddi_soft_state_init().
445 	 * Alternative would be to dynamically fini and re-init the
446 	 * soft state structure each time a node is attached.
447 	 */
448 	err = ddi_soft_state_init((void **)&sbd_g.softsp,
449 		sizeof (sbd_softstate_t), SBD_MAX_INSTANCES);
450 	if (err)
451 		return (err);
452 
453 	if ((err = mod_install(&modlinkage)) != 0) {
454 		ddi_soft_state_fini((void **)&sbd_g.softsp);
455 		return (err);
456 	}
457 
458 	/* Get the array of names from platform helper routine */
459 	sbd_devattr = sbdp_get_devattr();
460 
461 	return (err);
462 }
463 
464 int
465 _fini(void)
466 {
467 	int	err;
468 
469 	if (sbd_prevent_unloading)
470 		return (DDI_FAILURE);
471 
472 	ASSERT(sbd_instances == 0);
473 
474 	if ((err = mod_remove(&modlinkage)) != 0)
475 		return (err);
476 
477 	ddi_soft_state_fini((void **)&sbd_g.softsp);
478 
479 	return (0);
480 }
481 
482 int
483 _info(struct modinfo *modinfop)
484 {
485 	return (mod_info(&modlinkage, modinfop));
486 }
487 
488 int
489 sbd_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, char *event)
490 {
491 	int		rv = 0, instance;
492 	sbd_handle_t	*hp;
493 	sbd_softstate_t	*softsp;
494 	sbd_init_arg_t	init_arg;
495 	static fn_t	f = "sbd_ioctl";
496 	int		dr_avail;
497 
498 	PR_BYP("sbd_ioctl cmd=%x, arg=%lx\n", cmd, arg);
499 
500 	/* Note: this must also be changed in tandem with sbd_ioctl.h */
501 	switch (cmd) {
502 		case SBD_CMD_ASSIGN:
503 		case SBD_CMD_UNASSIGN:
504 		case SBD_CMD_POWERON:
505 		case SBD_CMD_POWEROFF:
506 		case SBD_CMD_TEST:
507 		case SBD_CMD_CONNECT:
508 		case SBD_CMD_CONFIGURE:
509 		case SBD_CMD_UNCONFIGURE:
510 		case SBD_CMD_DISCONNECT:
511 		case SBD_CMD_STATUS:
512 		case SBD_CMD_GETNCM:
513 		case SBD_CMD_PASSTHRU:
514 			break;
515 		default:
516 			return (ENOTTY);
517 	}
518 
519 	instance = SBD_GET_MINOR2INST(getminor(dev));
520 	if ((softsp = (sbd_softstate_t *)GET_SOFTC(instance)) == NULL) {
521 		cmn_err(CE_WARN,
522 			"sbd:%s:%d: module not yet attached",
523 			f, instance);
524 		return (ENXIO);
525 	}
526 
527 	init_arg.dev = dev;
528 	init_arg.cmd = cmd;
529 	init_arg.mode = mode;
530 	init_arg.ioargp = (sbd_ioctl_arg_t *)arg;
531 
532 	hp = sbd_get_handle(dev, softsp, arg, &init_arg);
533 	/* Check to see if we support dr */
534 	dr_avail = sbdp_dr_avail();
535 	if (dr_avail != 1) {
536 		switch (hp->h_cmd) {
537 			case SBD_CMD_STATUS:
538 			case SBD_CMD_GETNCM:
539 			case SBD_CMD_PASSTHRU:
540 				break;
541 			default:
542 				sbd_release_handle(hp);
543 				return (ENOTSUP);
544 		}
545 	}
546 
547 	switch (hp->h_cmd) {
548 	case SBD_CMD_STATUS:
549 	case SBD_CMD_GETNCM:
550 	case SBD_CMD_PASSTHRU:
551 		/* no locks needed for these commands */
552 		break;
553 
554 	default:
555 		rw_enter(&sbd_grwlock, RW_WRITER);
556 		mutex_enter(&SBDH2BD(hp->h_sbd)->sb_mutex);
557 
558 		/*
559 		 * If we're dealing with memory at all, then we have
560 		 * to keep the "exclusive" global lock held.  This is
561 		 * necessary since we will probably need to look at
562 		 * multiple board structs.  Otherwise, we only have
563 		 * to deal with the board in question and so can drop
564 		 * the global lock to "shared".
565 		 */
566 		/*
567 		 * XXX This is incorrect. The sh_devset has not
568 		 * been set at this point - it is 0.
569 		 */
570 		rv = DEVSET_IN_SET(HD2MACHHD(hp)->sh_devset,
571 		    SBD_COMP_MEM, DEVSET_ANYUNIT);
572 		if (rv == 0)
573 			rw_downgrade(&sbd_grwlock);
574 		break;
575 	}
576 
577 	/*
578 	 * Before any operations happen, reset the event flag
579 	 */
580 	send_event = 0;
581 
582 	if (sbd_pre_op(hp) == 0) {
583 		sbd_exec_op(hp);
584 		sbd_post_op(hp);
585 	}
586 
587 	rv = SBD_GET_ERRNO(SBD_HD2ERR(hp));
588 	*event = send_event;
589 
590 	/* undo locking, if any, done before sbd_pre_op */
591 	switch (hp->h_cmd) {
592 	case SBD_CMD_STATUS:
593 	case SBD_CMD_GETNCM:
594 	case SBD_CMD_PASSTHRU:
595 		break;
596 	default:
597 		mutex_exit(&SBDH2BD(hp->h_sbd)->sb_mutex);
598 		rw_exit(&sbd_grwlock);
599 	}
600 
601 	sbd_release_handle(hp);
602 
603 	return (rv);
604 }
605 
606 int
607 sbd_setup_instance(int instance, dev_info_t *root, int max_boards, int wnode,
608 		caddr_t sbdp_arg)
609 {
610 	int 		b;
611 	sbd_softstate_t	*softsp;
612 	sbd_board_t	*sbd_boardlist;
613 	static fn_t	f = "sbd_setup_instance";
614 
615 	sbd_instances++;
616 
617 	if (sbdp_setup_instance(sbdp_arg) != DDI_SUCCESS) {
618 		sbd_instances--;
619 		return (DDI_FAILURE);
620 	}
621 
622 	if (ALLOC_SOFTC(instance) != DDI_SUCCESS) {
623 		cmn_err(CE_WARN,
624 			"sbd:%s:%d: failed to alloc soft-state",
625 			f, instance);
626 		sbdp_teardown_instance(sbdp_arg);
627 		sbd_instances--;
628 		return (DDI_FAILURE);
629 	}
630 
631 	softsp = (sbd_softstate_t *)GET_SOFTC(instance);
632 
633 	if (softsp == NULL) {
634 		cmn_err(CE_WARN,
635 			"sbd:%s:%d: failed to get soft-state instance",
636 			f, instance);
637 		goto exit;
638 	}
639 
640 	sbd_boardlist = GETSTRUCT(sbd_board_t, max_boards);
641 	if (sbd_boardlist == NULL) {
642 		cmn_err(CE_WARN,
643 			"sbd:%s: failed to alloc board list %d",
644 			f, instance);
645 		goto exit;
646 	}
647 
648 
649 	softsp->sbd_boardlist  = (void *)sbd_boardlist;
650 	softsp->max_boards  = max_boards;
651 	softsp->wnode  = wnode;
652 
653 
654 	for (b = 0; b < max_boards; b++) {
655 		sbd_board_init(sbd_boardlist++, softsp, b, root, wnode);
656 	}
657 
658 
659 	return (DDI_SUCCESS);
660 exit:
661 	(void) sbdp_teardown_instance(sbdp_arg);
662 	FREE_SOFTC(instance);
663 	sbd_instances--;
664 	return (DDI_FAILURE);
665 }
666 
667 int
668 sbd_teardown_instance(int instance, caddr_t sbdp_arg)
669 {
670 	sbd_softstate_t	*softsp;
671 
672 	if (sbdp_teardown_instance(sbdp_arg) != DDI_SUCCESS)
673 		return (DDI_FAILURE);
674 
675 	softsp = (sbd_softstate_t *)GET_SOFTC(instance);
676 	if (softsp == NULL) {
677 		return (DDI_FAILURE);
678 	}
679 
680 	(void) sbd_dealloc_instance((sbd_board_t *)softsp->sbd_boardlist,
681 		softsp->max_boards);
682 
683 	FREE_SOFTC(instance);
684 	sbd_instances--;
685 	sbd_prevent_unloading = 0;
686 
687 	return (DDI_SUCCESS);
688 }
689 
690 static void
691 sbd_exec_op(sbd_handle_t *hp)
692 {
693 	sbd_board_t	*sbp = SBDH2BD(hp->h_sbd);
694 	static fn_t	f = "sbd_exec_op";
695 
696 	switch (hp->h_cmd) {
697 		int	dev_canceled;
698 
699 	case SBD_CMD_CONNECT:
700 		if (sbd_probe_board(hp))
701 			break;
702 
703 		sbd_connect(hp);
704 		break;
705 
706 	case SBD_CMD_CONFIGURE:
707 		sbd_dev_configure(hp);
708 		break;
709 
710 	case SBD_CMD_UNCONFIGURE:
711 		if (((dev_canceled = sbd_dev_release(hp)) == 0) &&
712 		    (SBD_GET_ERRNO(SBD_HD2ERR(hp)) == 0 &&
713 		    SBD_GET_ERR(SBD_HD2ERR(hp)) == 0))
714 			dev_canceled = sbd_dev_unconfigure(hp);
715 
716 		if (dev_canceled)
717 			sbd_cancel(hp);
718 		break;
719 
720 	case SBD_CMD_DISCONNECT:
721 		mutex_enter(&sbp->sb_slock);
722 		if (sbd_disconnect(hp) == 0)
723 			(void) sbd_deprobe_board(hp);
724 		mutex_exit(&sbp->sb_slock);
725 		break;
726 
727 	case SBD_CMD_STATUS:
728 		sbd_status(hp);
729 		break;
730 
731 	case SBD_CMD_GETNCM:
732 		sbd_get_ncm(hp);
733 		break;
734 
735 	case SBD_CMD_ASSIGN:
736 		sbd_assign_board(hp);
737 		break;
738 
739 	case SBD_CMD_UNASSIGN:
740 		sbd_unassign_board(hp);
741 		break;
742 
743 	case SBD_CMD_POWEROFF:
744 		sbd_poweroff_board(hp);
745 		break;
746 
747 	case SBD_CMD_POWERON:
748 		sbd_poweron_board(hp);
749 		break;
750 
751 	case SBD_CMD_TEST:
752 		sbd_test_board(hp);
753 		break;
754 
755 	case SBD_CMD_PASSTHRU:
756 	{
757 		int			rv;
758 		sbdp_handle_t		*hdp;
759 		sbderror_t		*ep = SBD_HD2ERR(hp);
760 		sbdp_ioctl_arg_t	ia, *iap;
761 
762 		iap = &ia;
763 
764 		iap->h_dev = hp->h_dev;
765 		iap->h_cmd = hp->h_cmd;
766 		iap->h_iap = (intptr_t)hp->h_iap;
767 		iap->h_mode = hp->h_mode;
768 
769 		hdp = sbd_get_sbdp_handle(sbp, hp);
770 		rv = sbdp_ioctl(hdp, iap);
771 		if (rv != 0) {
772 			SBD_GET_PERR(hdp->h_err, SBD_HD2ERR(hp));
773 			ep->e_errno = rv;
774 		}
775 		sbd_release_sbdp_handle(hdp);
776 		break;
777 	}
778 
779 	default:
780 		SBD_SET_ERRNO(SBD_HD2ERR(hp), ENOTTY);
781 		cmn_err(CE_WARN,
782 			"sbd:%s: unknown command (%d)",
783 			f, hp->h_cmd);
784 		break;
785 
786 	}
787 
788 	if (SBD_GET_ERR(SBD_HD2ERR(hp)))
789 		PR_BYP("XXX e_code=%d", SBD_GET_ERR(SBD_HD2ERR(hp)));
790 	if (SBD_GET_ERRNO(SBD_HD2ERR(hp)))
791 		PR_BYP("XXX errno=%d", SBD_GET_ERRNO(SBD_HD2ERR(hp)));
792 }
793 
794 sbd_comp_type_t
795 sbd_get_devtype(sbd_handle_t *hp, dev_info_t *dip)
796 {
797 	sbd_board_t	*sbp = hp ? SBDH2BD(hp->h_sbd) : NULL;
798 	sbd_istate_t	bstate;
799 	dev_info_t	**devlist;
800 	int		i;
801 	char		device[OBP_MAXDRVNAME];
802 	int		devicelen;
803 
804 	devicelen = sizeof (device);
805 
806 	bstate = sbp ? SBD_BOARD_STATE(sbp) : SBD_STATE_EMPTY;
807 	/*
808 	 * if the board's connected or configured, search the
809 	 * devlists.  Otherwise check the device tree
810 	 */
811 	switch (bstate) {
812 
813 	case SBD_STATE_CONNECTED:
814 	case SBD_STATE_CONFIGURED:
815 	case SBD_STATE_UNREFERENCED:
816 	case SBD_STATE_UNCONFIGURED:
817 		devlist = sbp->sb_devlist[NIX(SBD_COMP_MEM)];
818 		for (i = 0; i < MAX_MEM_UNITS_PER_BOARD; i++)
819 			if (devlist[i] == dip)
820 				return (SBD_COMP_MEM);
821 
822 		devlist = sbp->sb_devlist[NIX(SBD_COMP_CPU)];
823 		for (i = 0; i < MAX_CPU_UNITS_PER_BOARD; i++)
824 			if (devlist[i] == dip)
825 				return (SBD_COMP_CPU);
826 
827 		devlist = sbp->sb_devlist[NIX(SBD_COMP_IO)];
828 		for (i = 0; i < MAX_IO_UNITS_PER_BOARD; i++)
829 			if (devlist[i] == dip)
830 				return (SBD_COMP_IO);
831 		/*FALLTHROUGH*/
832 
833 	default:
834 		if (ddi_getlongprop_buf(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
835 		    OBP_DEVICETYPE,  (caddr_t)device, &devicelen))
836 			break;
837 
838 		for (i = 0; SBD_COMP(i) != SBD_COMP_UNKNOWN; i++) {
839 			if (strcmp(device, SBD_OTYPE(i)) != 0)
840 				continue;
841 			return (SBD_COMP(i));
842 		}
843 
844 		break;
845 	}
846 	return (SBD_COMP_UNKNOWN);
847 }
848 
849 static void
850 sbd_dev_configure(sbd_handle_t *hp)
851 {
852 	int		n, unit;
853 	int32_t		pass, devnum;
854 	dev_info_t	*dip;
855 	sbd_devlist_t	*devlist;
856 	sbdp_handle_t	*hdp;
857 	sbd_comp_type_t	nodetype;
858 	sbd_board_t	*sbp = SBDH2BD(hp->h_sbd);
859 
860 	pass = 1;
861 
862 	hdp = sbd_get_sbdp_handle(sbp, hp);
863 	while ((devlist = sbd_get_attach_devlist(hp, &devnum, pass)) != NULL) {
864 		int	err;
865 
866 		err = sbd_pre_attach_devlist(hp, devlist, devnum);
867 		if (err < 0) {
868 			break;
869 		} else if (err > 0) {
870 			pass++;
871 			continue;
872 		}
873 
874 		for (n = 0; n < devnum; n++) {
875 			sbderror_t	*ep;
876 
877 			ep = &devlist[n].dv_error;
878 			SBD_SET_ERRNO(ep, 0);
879 			SBD_SET_ERR(ep, 0);
880 			dip = devlist[n].dv_dip;
881 			nodetype = sbd_get_devtype(hp, dip);
882 
883 			unit = sbdp_get_unit_num(hdp, dip);
884 			if (unit < 0) {
885 				SBD_GET_PERR(hdp->h_err, SBD_HD2ERR(hp));
886 				break;
887 			}
888 
889 			switch (nodetype) {
890 			case SBD_COMP_MEM:
891 				sbd_attach_mem(hp, ep);
892 				if (SBD_GET_ERR(ep) == ESBD_CPUONLINE) {
893 					FREESTRUCT(devlist, sbd_devlist_t,
894 						MAX_MEM_UNITS_PER_BOARD);
895 					sbd_release_sbdp_handle(hdp);
896 					return;
897 				}
898 				break;
899 
900 			case SBD_COMP_CPU:
901 				sbd_attach_cpu(hp, ep, dip, unit);
902 				break;
903 
904 			case SBD_COMP_IO:
905 				sbd_attach_io(hp, ep, dip, unit);
906 				break;
907 
908 			default:
909 				SBD_SET_ERRNO(ep, ENOTTY);
910 				break;
911 			}
912 
913 			if (sbd_set_err_in_hdl(hp, ep) == 0)
914 				continue;
915 		}
916 
917 		err = sbd_post_attach_devlist(hp, devlist, devnum);
918 		if (err < 0)
919 			break;
920 
921 		pass++;
922 	}
923 	sbd_release_sbdp_handle(hdp);
924 }
925 
926 static int
927 sbd_dev_release(sbd_handle_t *hp)
928 {
929 	int		n, unit;
930 	int32_t		pass, devnum;
931 	dev_info_t	*dip;
932 	sbd_board_t	*sbp = SBDH2BD(hp->h_sbd);
933 	sbdp_handle_t	*hdp;
934 	sbd_devlist_t	*devlist;
935 	sbd_comp_type_t	nodetype;
936 	int		err = 0;
937 	int		dev_canceled;
938 
939 	pass = 1;
940 	hdp = sbd_get_sbdp_handle(sbp, hp);
941 
942 	sbp->sb_busy = 1;
943 	while ((devlist =
944 		sbd_get_release_devlist(hp, &devnum, pass)) != NULL) {
945 
946 		err = sbd_pre_release_devlist(hp, devlist, devnum);
947 		if (err < 0) {
948 			dev_canceled = 1;
949 			break;
950 		} else if (err > 0) {
951 			pass++;
952 			continue;
953 		}
954 
955 		dev_canceled = 0;
956 		for (n = 0; n < devnum; n++) {
957 			dip = devlist[n].dv_dip;
958 			nodetype = sbd_get_devtype(hp, dip);
959 
960 			unit = sbdp_get_unit_num(hdp, dip);
961 			if (unit < 0) {
962 				SBD_GET_PERR(hdp->h_err, SBD_HD2ERR(hp));
963 				break;
964 			}
965 
966 			if ((nodetype == SBD_COMP_MEM) &&
967 			    sbd_release_mem(hp, dip, unit)) {
968 
969 				dev_canceled++;
970 			}
971 
972 			sbd_release_done(hp, nodetype, dip);
973 		}
974 
975 		err = sbd_post_release_devlist(hp, devlist, devnum);
976 
977 		if (err < 0)
978 			break;
979 
980 		if (dev_canceled)
981 			break;
982 
983 		pass++;
984 	}
985 	sbp->sb_busy = 0;
986 
987 	sbd_release_sbdp_handle(hdp);
988 
989 	if (dev_canceled)
990 		return (dev_canceled);
991 
992 	return (err);
993 }
994 
995 static int
996 sbd_dev_unconfigure(sbd_handle_t *hp)
997 {
998 	int		n, unit;
999 	int32_t		pass, devnum;
1000 	dev_info_t	*dip;
1001 	sbd_devlist_t	*devlist;
1002 	sbdp_handle_t	*hdp;
1003 	sbd_comp_type_t	nodetype;
1004 	sbd_board_t	*sbp = SBDH2BD(hp->h_sbd);
1005 	int		dev_canceled = 0;
1006 	static fn_t	f = "sbd_dev_unconfigure";
1007 
1008 	PR_ALL("%s...\n", f);
1009 
1010 	pass = 1;
1011 	hdp = sbd_get_sbdp_handle(sbp, hp);
1012 
1013 	while ((devlist = sbd_get_detach_devlist(hp, &devnum, pass)) != NULL) {
1014 		int	err, detach_err = 0;
1015 
1016 		err = sbd_pre_detach_devlist(hp, devlist, devnum);
1017 		if (err) {
1018 			/*
1019 			 * Only cancel the operation for memory in
1020 			 * case of failure.
1021 			 */
1022 			nodetype = sbd_get_devtype(hp, devlist->dv_dip);
1023 			if (nodetype == SBD_COMP_MEM)
1024 				dev_canceled = 1;
1025 			(void) sbd_post_detach_devlist(hp, devlist, devnum);
1026 			break;
1027 		}
1028 
1029 		for (n = 0; n < devnum; n++) {
1030 			sbderror_t	*ep;
1031 
1032 			ep = &devlist[n].dv_error;
1033 			SBD_SET_ERRNO(ep, 0);
1034 			SBD_SET_ERR(ep, 0);
1035 			dip = devlist[n].dv_dip;
1036 			nodetype = sbd_get_devtype(hp, dip);
1037 
1038 			unit = sbdp_get_unit_num(hdp, dip);
1039 			if (unit < 0) {
1040 				SBD_GET_PERR(hdp->h_err, SBD_HD2ERR(hp));
1041 				break;
1042 			}
1043 
1044 			switch (nodetype) {
1045 			case SBD_COMP_MEM:
1046 				dev_canceled = sbd_detach_mem(hp, ep, unit);
1047 				break;
1048 
1049 			case SBD_COMP_CPU:
1050 				sbd_detach_cpu(hp, ep, dip, unit);
1051 				break;
1052 
1053 			case SBD_COMP_IO:
1054 				sbd_detach_io(hp, ep, dip, unit);
1055 				break;
1056 
1057 			default:
1058 				SBD_SET_ERRNO(ep, ENOTTY);
1059 				break;
1060 			}
1061 
1062 			if (sbd_set_err_in_hdl(hp, ep) == 0) {
1063 				detach_err = -1;
1064 				break;
1065 			}
1066 
1067 		}
1068 		err = sbd_post_detach_devlist(hp, devlist, devnum);
1069 		if ((err < 0) || (detach_err < 0))
1070 			break;
1071 
1072 		pass++;
1073 	}
1074 
1075 	sbd_release_sbdp_handle(hdp);
1076 	return (dev_canceled);
1077 }
1078 
1079 int
1080 sbd_errno2ecode(int error)
1081 {
1082 	int	rv;
1083 
1084 	switch (error) {
1085 	case EBUSY:
1086 		rv = ESBD_BUSY;
1087 		break;
1088 	case EINVAL:
1089 		rv = ESBD_INVAL;
1090 		break;
1091 	case EALREADY:
1092 		rv = ESBD_ALREADY;
1093 		break;
1094 	case ENODEV:
1095 		rv = ESBD_NODEV;
1096 		break;
1097 	case ENOMEM:
1098 		rv = ESBD_NOMEM;
1099 		break;
1100 	default:
1101 		rv = ESBD_INVAL;
1102 	}
1103 
1104 	return (rv);
1105 }
1106 
1107 static void
1108 sbd_attach_cpu(sbd_handle_t *hp, sbderror_t *ep, dev_info_t *dip, int unit)
1109 {
1110 	int rv = 0;
1111 	processorid_t	cpuid;
1112 	sbdp_handle_t	*hdp;
1113 	sbd_board_t	*sbp = SBDH2BD(hp->h_sbd);
1114 	static fn_t	f = "sbd_attach_cpu";
1115 	char		*pathname;
1116 
1117 	ASSERT(MUTEX_HELD(&cpu_lock));
1118 
1119 	ASSERT(dip);
1120 
1121 	/*
1122 	 * With the introduction of CMP devices, the CPU nodes
1123 	 * are no longer directly under the top node. Since
1124 	 * there is no plan to support CPU attach in the near
1125 	 * future, a branch configure operation is not required.
1126 	 */
1127 
1128 	hdp = sbd_get_sbdp_handle(sbp, hp);
1129 	cpuid = sbdp_get_cpuid(hdp, dip);
1130 	if (cpuid < 0) {
1131 		rv = -1;
1132 		SBD_GET_PERR(hdp->h_err, ep);
1133 	} else if ((rv = cpu_configure(cpuid)) != 0) {
1134 		cmn_err(CE_WARN,
1135 			"sbd:%s: cpu_configure for cpuid %d failed",
1136 			f, cpuid);
1137 		SBD_SET_ERR(ep, sbd_errno2ecode(rv));
1138 	}
1139 	sbd_release_sbdp_handle(hdp);
1140 
1141 	if (rv == 0) {
1142 		ASSERT(sbp->sb_cpupath[unit] != NULL);
1143 		pathname = sbp->sb_cpupath[unit];
1144 		(void) ddi_pathname(dip, pathname);
1145 	}
1146 }
1147 
1148 /*
1149  *	translate errno
1150  */
1151 void
1152 sbd_errno_decode(int err, sbderror_t *ep, dev_info_t *dip)
1153 {
1154 	ASSERT(err != 0);
1155 
1156 	switch (err) {
1157 	case ENOMEM:
1158 		SBD_SET_ERR(ep, ESBD_NOMEM);
1159 		break;
1160 
1161 	case EBUSY:
1162 		SBD_SET_ERR(ep, ESBD_BUSY);
1163 		break;
1164 
1165 	case EIO:
1166 		SBD_SET_ERR(ep, ESBD_IO);
1167 		break;
1168 
1169 	case ENXIO:
1170 		SBD_SET_ERR(ep, ESBD_NODEV);
1171 		break;
1172 
1173 	case EINVAL:
1174 		SBD_SET_ERR(ep, ESBD_INVAL);
1175 		break;
1176 
1177 	case EFAULT:
1178 	default:
1179 		SBD_SET_ERR(ep, ESBD_FAULT);
1180 		break;
1181 	}
1182 
1183 	(void) ddi_pathname(dip, SBD_GET_ERRSTR(ep));
1184 }
1185 
1186 static void
1187 sbd_detach_cpu(sbd_handle_t *hp, sbderror_t *ep, dev_info_t *dip, int unit)
1188 {
1189 	processorid_t	cpuid;
1190 	int		rv;
1191 	sbdp_handle_t	*hdp;
1192 	sbd_board_t	*sbp = SBDH2BD(hp->h_sbd);
1193 	sbd_error_t	*spe;
1194 	static fn_t	f = "sbd_detach_cpu";
1195 
1196 	ASSERT(MUTEX_HELD(&cpu_lock));
1197 
1198 	ASSERT(dip);
1199 	hdp = sbd_get_sbdp_handle(sbp, hp);
1200 	spe = hdp->h_err;
1201 	cpuid = sbdp_get_cpuid(hdp, dip);
1202 	if (cpuid < 0) {
1203 		SBD_GET_PERR(spe, ep);
1204 		sbd_release_sbdp_handle(hdp);
1205 		return;
1206 	}
1207 
1208 	if ((rv = cpu_unconfigure(cpuid)) != 0) {
1209 		SBD_SET_ERR(ep, sbd_errno2ecode(rv));
1210 		SBD_SET_ERRSTR(ep, sbp->sb_cpupath[unit]);
1211 		cmn_err(CE_WARN,
1212 			"sbd:%s: cpu_unconfigure for cpu %d failed",
1213 			f, cpuid);
1214 		sbd_release_sbdp_handle(hdp);
1215 		return;
1216 	}
1217 	sbd_release_sbdp_handle(hdp);
1218 
1219 	/*
1220 	 * Since CPU nodes are no longer configured in CPU
1221 	 * attach, the corresponding branch unconfigure
1222 	 * operation that would be performed here is also
1223 	 * no longer required.
1224 	 */
1225 }
1226 
1227 
1228 int
1229 sbd_detach_mem(sbd_handle_t *hp, sbderror_t *ep, int unit)
1230 {
1231 	sbd_mem_unit_t	*mp;
1232 	sbd_board_t	*sbp = SBDH2BD(hp->h_sbd);
1233 	int		i, rv;
1234 	static fn_t	f = "sbd_detach_mem";
1235 
1236 	mp = SBD_GET_BOARD_MEMUNIT(sbp, unit);
1237 
1238 	if (sbd_detach_memory(hp, ep, mp, unit)) {
1239 		cmn_err(CE_WARN, "%s: detach fail", f);
1240 		return (-1);
1241 	}
1242 
1243 	/*
1244 	 * Now detach mem devinfo nodes with status lock held.
1245 	 */
1246 	for (i = 0; i < SBD_NUM_MC_PER_BOARD; i++) {
1247 		dev_info_t	*fdip = NULL;
1248 
1249 		if (mp->sbm_dip[i] == NULL)
1250 			continue;
1251 		ASSERT(e_ddi_branch_held(mp->sbm_dip[i]));
1252 		mutex_enter(&sbp->sb_slock);
1253 		rv = e_ddi_branch_unconfigure(mp->sbm_dip[i], &fdip,
1254 		    DEVI_BRANCH_EVENT);
1255 		mutex_exit(&sbp->sb_slock);
1256 		if (rv) {
1257 			/*
1258 			 * If non-NULL, fdip is returned held and must be
1259 			 * released.
1260 			 */
1261 			if (fdip != NULL) {
1262 				sbd_errno_decode(rv, ep, fdip);
1263 				ddi_release_devi(fdip);
1264 			} else {
1265 				sbd_errno_decode(rv, ep, mp->sbm_dip[i]);
1266 			}
1267 		}
1268 	}
1269 
1270 	return (0);
1271 }
1272 
1273 /* start beginning of sbd.c */
1274 
1275 /*
1276  * MDR          memory support - somewhat disabled for now.
1277  * UNSAFE       unsafe driver code - I don't think we want this.
1278  *              need to check.
1279  * DEVNODE      This driver creates attachment points for individual
1280  *              components as well as boards.  We only need board
1281  *              support.
1282  * DEV2DEVSET   Put only present devices in devset.
1283  */
1284 
1285 
1286 static sbd_state_t
1287 rstate_cvt(sbd_istate_t state)
1288 {
1289 	sbd_state_t cs;
1290 
1291 	switch (state) {
1292 	case SBD_STATE_EMPTY:
1293 		cs = SBD_STAT_EMPTY;
1294 		break;
1295 	case SBD_STATE_OCCUPIED:
1296 	case SBD_STATE_FATAL:
1297 		cs = SBD_STAT_DISCONNECTED;
1298 		break;
1299 	case SBD_STATE_CONFIGURED:
1300 	case SBD_STATE_CONNECTED:
1301 	case SBD_STATE_UNCONFIGURED:
1302 	case SBD_STATE_PARTIAL:
1303 	case SBD_STATE_RELEASE:
1304 	case SBD_STATE_UNREFERENCED:
1305 		cs = SBD_STAT_CONNECTED;
1306 		break;
1307 	default:
1308 		cs = SBD_STAT_NONE;
1309 		break;
1310 	}
1311 
1312 	return (cs);
1313 }
1314 
1315 
1316 sbd_state_t
1317 ostate_cvt(sbd_istate_t state)
1318 {
1319 	sbd_state_t cs;
1320 
1321 	switch (state) {
1322 	case SBD_STATE_EMPTY:
1323 	case SBD_STATE_OCCUPIED:
1324 	case SBD_STATE_UNCONFIGURED:
1325 	case SBD_STATE_CONNECTED:
1326 	case SBD_STATE_FATAL:
1327 		cs = SBD_STAT_UNCONFIGURED;
1328 		break;
1329 	case SBD_STATE_PARTIAL:
1330 	case SBD_STATE_CONFIGURED:
1331 	case SBD_STATE_RELEASE:
1332 	case SBD_STATE_UNREFERENCED:
1333 		cs = SBD_STAT_CONFIGURED;
1334 		break;
1335 	default:
1336 		cs = SBD_STAT_NONE;
1337 		break;
1338 	}
1339 
1340 	return (cs);
1341 }
1342 
1343 int
1344 sbd_dealloc_instance(sbd_board_t *sbp, int max_boards)
1345 {
1346 	int		b;
1347 	sbd_board_t    *list = sbp;
1348 	static fn_t	f = "sbd_dealloc_instance";
1349 
1350 	PR_ALL("%s...\n", f);
1351 
1352 	if (sbp == NULL) {
1353 		return (-1);
1354 	}
1355 
1356 	for (b = 0; b < max_boards; b++) {
1357 		sbd_board_destroy(sbp++);
1358 	}
1359 
1360 	FREESTRUCT(list, sbd_board_t, max_boards);
1361 
1362 	return (0);
1363 }
1364 
1365 static sbd_devset_t
1366 sbd_dev2devset(sbd_comp_id_t *cid)
1367 {
1368 	static fn_t	f = "sbd_dev2devset";
1369 
1370 	sbd_devset_t	devset;
1371 	int		unit = cid->c_unit;
1372 
1373 	switch (cid->c_type) {
1374 		case SBD_COMP_NONE:
1375 			devset =  DEVSET(SBD_COMP_CPU, DEVSET_ANYUNIT);
1376 			devset |= DEVSET(SBD_COMP_MEM, DEVSET_ANYUNIT);
1377 			devset |= DEVSET(SBD_COMP_IO,  DEVSET_ANYUNIT);
1378 			break;
1379 
1380 		case SBD_COMP_CPU:
1381 			if ((unit > MAX_CPU_UNITS_PER_BOARD) || (unit < 0)) {
1382 				PR_ALL("%s: invalid cpu unit# = %d",
1383 					f, unit);
1384 				devset = 0;
1385 			} else
1386 				/*
1387 				 * Generate a devset that includes all the
1388 				 * cores of a CMP device. If this is not a
1389 				 * CMP, the extra cores will be eliminated
1390 				 * later since they are not present. This is
1391 				 * also true for CMP devices that do not have
1392 				 * all cores active.
1393 				 */
1394 				devset = DEVSET(SBD_COMP_CMP, unit);
1395 
1396 			break;
1397 
1398 		case SBD_COMP_MEM:
1399 
1400 			if ((unit > MAX_MEM_UNITS_PER_BOARD) || (unit < 0)) {
1401 #ifdef XXX_jeffco
1402 				PR_ALL("%s: invalid mem unit# = %d",
1403 					f, unit);
1404 				devset = 0;
1405 #endif
1406 				devset = DEVSET(cid->c_type, 0);
1407 				PR_ALL("%s: adjusted MEM devset = 0x%x\n",
1408 					f, devset);
1409 			} else
1410 				devset = DEVSET(cid->c_type, unit);
1411 			break;
1412 
1413 		case SBD_COMP_IO:
1414 			if ((unit > MAX_IO_UNITS_PER_BOARD) || (unit < 0)) {
1415 				PR_ALL("%s: invalid io unit# = %d",
1416 					f, unit);
1417 				devset = 0;
1418 			} else
1419 				devset = DEVSET(cid->c_type, unit);
1420 
1421 			break;
1422 
1423 		default:
1424 		case SBD_COMP_UNKNOWN:
1425 			devset = 0;
1426 			break;
1427 	}
1428 
1429 	return (devset);
1430 }
1431 
1432 /*
1433  * Simple mutex for covering handle list ops as it is only
1434  * used "infrequently". No need to add another mutex to the sbd_board_t.
1435  */
1436 static kmutex_t sbd_handle_list_mutex;
1437 
1438 static sbd_handle_t *
1439 sbd_get_handle(dev_t dev, sbd_softstate_t *softsp, intptr_t arg,
1440 	sbd_init_arg_t *iap)
1441 {
1442 	sbd_handle_t		*hp;
1443 	sbderror_t		*ep;
1444 	sbd_priv_handle_t	*shp;
1445 	sbd_board_t		*sbp = softsp->sbd_boardlist;
1446 	int			board;
1447 
1448 	board = SBDGETSLOT(dev);
1449 	ASSERT(board < softsp->max_boards);
1450 	sbp += board;
1451 
1452 	/*
1453 	 * Brand-new handle.
1454 	 */
1455 	shp = kmem_zalloc(sizeof (sbd_priv_handle_t), KM_SLEEP);
1456 	shp->sh_arg = (void *)arg;
1457 
1458 	hp = MACHHD2HD(shp);
1459 
1460 	ep = &shp->sh_err;
1461 
1462 	hp->h_err = ep;
1463 	hp->h_sbd = (void *) sbp;
1464 	hp->h_dev = iap->dev;
1465 	hp->h_cmd = iap->cmd;
1466 	hp->h_mode = iap->mode;
1467 	sbd_init_err(ep);
1468 
1469 	mutex_enter(&sbd_handle_list_mutex);
1470 	shp->sh_next = sbp->sb_handle;
1471 	sbp->sb_handle = shp;
1472 	mutex_exit(&sbd_handle_list_mutex);
1473 
1474 	return (hp);
1475 }
1476 
1477 void
1478 sbd_init_err(sbderror_t *ep)
1479 {
1480 	ep->e_errno = 0;
1481 	ep->e_code = 0;
1482 	ep->e_rsc[0] = '\0';
1483 }
1484 
1485 int
1486 sbd_set_err_in_hdl(sbd_handle_t *hp, sbderror_t *ep)
1487 {
1488 	sbderror_t	*hep = SBD_HD2ERR(hp);
1489 
1490 	/*
1491 	 * If there is an error logged already, don't rewrite it
1492 	 */
1493 	if (SBD_GET_ERR(hep) || SBD_GET_ERRNO(hep)) {
1494 		return (0);
1495 	}
1496 
1497 	if (SBD_GET_ERR(ep) || SBD_GET_ERRNO(ep)) {
1498 		SBD_SET_ERR(hep, SBD_GET_ERR(ep));
1499 		SBD_SET_ERRNO(hep, SBD_GET_ERRNO(ep));
1500 		SBD_SET_ERRSTR(hep, SBD_GET_ERRSTR(ep));
1501 		return (0);
1502 	}
1503 
1504 	return (-1);
1505 }
1506 
1507 static void
1508 sbd_release_handle(sbd_handle_t *hp)
1509 {
1510 	sbd_priv_handle_t	*shp, **shpp;
1511 	sbd_board_t		*sbp;
1512 	static fn_t		f = "sbd_release_handle";
1513 
1514 	if (hp == NULL)
1515 		return;
1516 
1517 	sbp = SBDH2BD(hp->h_sbd);
1518 
1519 	shp = HD2MACHHD(hp);
1520 
1521 	mutex_enter(&sbd_handle_list_mutex);
1522 	/*
1523 	 * Locate the handle in the board's reference list.
1524 	 */
1525 	for (shpp = &sbp->sb_handle; (*shpp) && ((*shpp) != shp);
1526 	    shpp = &((*shpp)->sh_next))
1527 		/* empty */;
1528 
1529 	if (*shpp == NULL) {
1530 		cmn_err(CE_PANIC,
1531 			"sbd:%s: handle not found in board %d",
1532 			f, sbp->sb_num);
1533 		/*NOTREACHED*/
1534 	} else {
1535 		*shpp = shp->sh_next;
1536 	}
1537 	mutex_exit(&sbd_handle_list_mutex);
1538 
1539 	if (hp->h_opts.copts != NULL) {
1540 		FREESTRUCT(hp->h_opts.copts, char, hp->h_opts.size);
1541 	}
1542 
1543 	FREESTRUCT(shp, sbd_priv_handle_t, 1);
1544 }
1545 
1546 sbdp_handle_t *
1547 sbd_get_sbdp_handle(sbd_board_t *sbp, sbd_handle_t *hp)
1548 {
1549 	sbdp_handle_t		*hdp;
1550 
1551 	hdp = kmem_zalloc(sizeof (sbdp_handle_t), KM_SLEEP);
1552 	hdp->h_err = kmem_zalloc(sizeof (sbd_error_t), KM_SLEEP);
1553 	if (sbp == NULL) {
1554 		hdp->h_board = -1;
1555 		hdp->h_wnode = -1;
1556 	} else {
1557 		hdp->h_board = sbp->sb_num;
1558 		hdp->h_wnode = sbp->sb_wnode;
1559 	}
1560 
1561 	if (hp == NULL) {
1562 		hdp->h_flags = 0;
1563 		hdp->h_opts = NULL;
1564 	} else {
1565 		hdp->h_flags = SBD_2_SBDP_FLAGS(hp->h_flags);
1566 		hdp->h_opts = &hp->h_opts;
1567 	}
1568 
1569 	return (hdp);
1570 }
1571 
1572 void
1573 sbd_release_sbdp_handle(sbdp_handle_t *hdp)
1574 {
1575 	if (hdp == NULL)
1576 		return;
1577 
1578 	kmem_free(hdp->h_err, sizeof (sbd_error_t));
1579 	kmem_free(hdp, sizeof (sbdp_handle_t));
1580 }
1581 
1582 void
1583 sbd_reset_error_sbdph(sbdp_handle_t *hdp)
1584 {
1585 	if ((hdp != NULL) && (hdp->h_err != NULL)) {
1586 		bzero(hdp->h_err, sizeof (sbd_error_t));
1587 	}
1588 }
1589 
1590 static int
1591 sbd_copyin_ioarg(sbd_handle_t *hp, int mode, int cmd, sbd_cmd_t *cmdp,
1592 	sbd_ioctl_arg_t *iap)
1593 {
1594 	static fn_t	f = "sbd_copyin_ioarg";
1595 
1596 	if (iap == NULL)
1597 		return (EINVAL);
1598 
1599 	bzero((caddr_t)cmdp, sizeof (sbd_cmd_t));
1600 
1601 #ifdef _MULTI_DATAMODEL
1602 	if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
1603 		sbd_cmd32_t	scmd32;
1604 
1605 		bzero((caddr_t)&scmd32, sizeof (sbd_cmd32_t));
1606 
1607 		if (ddi_copyin((void *)iap, (void *)&scmd32,
1608 				sizeof (sbd_cmd32_t), mode)) {
1609 			cmn_err(CE_WARN,
1610 				"sbd:%s: (32bit) failed to copyin "
1611 					"sbdcmd-struct", f);
1612 			return (EFAULT);
1613 		}
1614 		cmdp->cmd_cm.c_id.c_type = scmd32.cmd_cm.c_id.c_type;
1615 		cmdp->cmd_cm.c_id.c_unit = scmd32.cmd_cm.c_id.c_unit;
1616 		bcopy(&scmd32.cmd_cm.c_id.c_name[0],
1617 			&cmdp->cmd_cm.c_id.c_name[0], OBP_MAXPROPNAME);
1618 		cmdp->cmd_cm.c_flags = scmd32.cmd_cm.c_flags;
1619 		cmdp->cmd_cm.c_len = scmd32.cmd_cm.c_len;
1620 		cmdp->cmd_cm.c_opts = (caddr_t)(uintptr_t)scmd32.cmd_cm.c_opts;
1621 
1622 		if (cmd == SBD_CMD_PASSTHRU) {
1623 			PR_BYP("passthru copyin: iap=%p, sz=%ld", (void *)iap,
1624 				sizeof (sbd_cmd32_t));
1625 			PR_BYP("passthru copyin: c_opts=%x, c_len=%d",
1626 				scmd32.cmd_cm.c_opts,
1627 				scmd32.cmd_cm.c_len);
1628 		}
1629 
1630 		switch (cmd) {
1631 		case SBD_CMD_STATUS:
1632 			cmdp->cmd_stat.s_nbytes = scmd32.cmd_stat.s_nbytes;
1633 			cmdp->cmd_stat.s_statp =
1634 				(caddr_t)(uintptr_t)scmd32.cmd_stat.s_statp;
1635 			break;
1636 		default:
1637 			break;
1638 
1639 		}
1640 	} else
1641 #endif /* _MULTI_DATAMODEL */
1642 	if (ddi_copyin((void *)iap, (void *)cmdp,
1643 			sizeof (sbd_cmd_t), mode) != 0) {
1644 		cmn_err(CE_WARN,
1645 			"sbd:%s: failed to copyin sbd cmd_t struct", f);
1646 		return (EFAULT);
1647 	}
1648 	/*
1649 	 * A user may set platform specific options so we need to
1650 	 * copy them in
1651 	 */
1652 	if ((cmd != SBD_CMD_STATUS) && ((hp->h_opts.size = cmdp->cmd_cm.c_len)
1653 	    > 0)) {
1654 		hp->h_opts.size += 1;	/* For null termination of string. */
1655 		hp->h_opts.copts = GETSTRUCT(char, hp->h_opts.size);
1656 		if (ddi_copyin((void *)cmdp->cmd_cm.c_opts,
1657 		    (void *)hp->h_opts.copts,
1658 		    cmdp->cmd_cm.c_len, hp->h_mode) != 0) {
1659 			/* copts is freed in sbd_release_handle(). */
1660 			cmn_err(CE_WARN,
1661 			    "sbd:%s: failed to copyin options", f);
1662 			return (EFAULT);
1663 		}
1664 	}
1665 
1666 	return (0);
1667 }
1668 
1669 static int
1670 sbd_copyout_ioarg(int mode, int cmd, sbd_cmd_t *scp, sbd_ioctl_arg_t *iap)
1671 {
1672 	static fn_t	f = "sbd_copyout_ioarg";
1673 
1674 	if ((iap == NULL) || (scp == NULL))
1675 		return (EINVAL);
1676 
1677 #ifdef _MULTI_DATAMODEL
1678 	if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
1679 		sbd_cmd32_t	scmd32;
1680 
1681 		scmd32.cmd_cm.c_id.c_type = scp->cmd_cm.c_id.c_type;
1682 		scmd32.cmd_cm.c_id.c_unit = scp->cmd_cm.c_id.c_unit;
1683 		bcopy(scp->cmd_cm.c_id.c_name,
1684 			scmd32.cmd_cm.c_id.c_name, OBP_MAXPROPNAME);
1685 
1686 		scmd32.cmd_cm.c_flags = scp->cmd_cm.c_flags;
1687 
1688 		switch (cmd) {
1689 		case SBD_CMD_GETNCM:
1690 			scmd32.cmd_getncm.g_ncm = scp->cmd_getncm.g_ncm;
1691 			break;
1692 		default:
1693 			break;
1694 		}
1695 
1696 		if (ddi_copyout((void *)&scmd32, (void *)iap,
1697 				sizeof (sbd_cmd32_t), mode)) {
1698 			cmn_err(CE_WARN,
1699 				"sbd:%s: (32bit) failed to copyout "
1700 					"sbdcmd struct", f);
1701 			return (EFAULT);
1702 		}
1703 	} else
1704 #endif /* _MULTI_DATAMODEL */
1705 	if (ddi_copyout((void *)scp, (void *)iap,
1706 			sizeof (sbd_cmd_t), mode) != 0) {
1707 		cmn_err(CE_WARN,
1708 			"sbd:%s: failed to copyout sbdcmd struct", f);
1709 		return (EFAULT);
1710 	}
1711 
1712 	return (0);
1713 }
1714 
1715 static int
1716 sbd_copyout_errs(int mode, sbd_ioctl_arg_t *iap, void *arg)
1717 {
1718 	static fn_t	f = "sbd_copyout_errs";
1719 	sbd_ioctl_arg_t	*uap;
1720 
1721 	uap = (sbd_ioctl_arg_t *)arg;
1722 
1723 #ifdef _MULTI_DATAMODEL
1724 	if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
1725 		sbd_error32_t err32;
1726 		sbd_ioctl_arg32_t *uap32;
1727 
1728 		uap32 = (sbd_ioctl_arg32_t *)arg;
1729 
1730 		err32.e_code = iap->ie_code;
1731 		(void) strcpy(err32.e_rsc, iap->ie_rsc);
1732 
1733 		if (ddi_copyout((void *)&err32, (void *)&uap32->i_err,
1734 				sizeof (sbd_error32_t), mode)) {
1735 			cmn_err(CE_WARN,
1736 				"sbd:%s: failed to copyout ioctl32 errs",
1737 				f);
1738 			return (EFAULT);
1739 		}
1740 	} else
1741 #endif /* _MULTI_DATAMODEL */
1742 	if (ddi_copyout((void *)&iap->i_err, (void *)&uap->i_err,
1743 			sizeof (sbd_error_t), mode) != 0) {
1744 		cmn_err(CE_WARN,
1745 			"sbd:%s: failed to copyout ioctl errs", f);
1746 		return (EFAULT);
1747 	}
1748 
1749 	return (0);
1750 }
1751 
1752 /*
1753  * State transition policy is that if at least one
1754  * device cannot make the transition, then none of
1755  * the requested devices are allowed to transition.
1756  *
1757  * Returns the state that is in error, if any.
1758  */
1759 static int
1760 sbd_check_transition(sbd_board_t *sbp, sbd_devset_t *devsetp,
1761 			struct sbd_state_trans *transp)
1762 {
1763 	int	s, ut;
1764 	int	state_err = 0;
1765 	sbd_devset_t	devset;
1766 	static fn_t	f = "sbd_check_transition";
1767 
1768 	devset = *devsetp;
1769 
1770 	if (!devset) {
1771 		/*
1772 		 * Transition does not deal with any components.
1773 		 * This is the case for addboard/deleteboard.
1774 		 */
1775 		PR_ALL("%s: no devs: requested devset = 0x%x,"
1776 			" final devset = 0x%x\n",
1777 			f, (uint_t)*devsetp, (uint_t)devset);
1778 
1779 		return (0);
1780 	}
1781 
1782 	if (DEVSET_IN_SET(devset, SBD_COMP_MEM, DEVSET_ANYUNIT)) {
1783 		for (ut = 0; ut < MAX_MEM_UNITS_PER_BOARD; ut++) {
1784 			if (DEVSET_IN_SET(devset, SBD_COMP_MEM, ut) == 0)
1785 				continue;
1786 			s = (int)SBD_DEVICE_STATE(sbp, SBD_COMP_MEM, ut);
1787 			if (transp->x_op[s].x_rv) {
1788 				if (!state_err)
1789 					state_err = s;
1790 				DEVSET_DEL(devset, SBD_COMP_MEM, ut);
1791 			}
1792 		}
1793 	}
1794 
1795 	if (DEVSET_IN_SET(devset, SBD_COMP_CPU, DEVSET_ANYUNIT)) {
1796 		for (ut = 0; ut < MAX_CPU_UNITS_PER_BOARD; ut++) {
1797 			if (DEVSET_IN_SET(devset, SBD_COMP_CPU, ut) == 0)
1798 				continue;
1799 			s = (int)SBD_DEVICE_STATE(sbp, SBD_COMP_CPU, ut);
1800 			if (transp->x_op[s].x_rv) {
1801 				if (!state_err)
1802 					state_err = s;
1803 				DEVSET_DEL(devset, SBD_COMP_CPU, ut);
1804 			}
1805 		}
1806 	}
1807 
1808 	if (DEVSET_IN_SET(devset, SBD_COMP_IO, DEVSET_ANYUNIT)) {
1809 		for (ut = 0; ut < MAX_IO_UNITS_PER_BOARD; ut++) {
1810 			if (DEVSET_IN_SET(devset, SBD_COMP_IO, ut) == 0)
1811 				continue;
1812 			s = (int)SBD_DEVICE_STATE(sbp, SBD_COMP_IO, ut);
1813 			if (transp->x_op[s].x_rv) {
1814 				if (!state_err)
1815 					state_err = s;
1816 				DEVSET_DEL(devset, SBD_COMP_IO, ut);
1817 			}
1818 		}
1819 	}
1820 
1821 	PR_ALL("%s: requested devset = 0x%x, final devset = 0x%x\n",
1822 		f, (uint_t)*devsetp, (uint_t)devset);
1823 
1824 	*devsetp = devset;
1825 	/*
1826 	 * If there are some remaining components for which
1827 	 * this state transition is valid, then allow them
1828 	 * through, otherwise if none are left then return
1829 	 * the state error.
1830 	 */
1831 	return (devset ? 0 : state_err);
1832 }
1833 
1834 /*
1835  * pre-op entry point must SET_ERRNO(), if needed.
1836  * Return value of non-zero indicates failure.
1837  */
1838 static int
1839 sbd_pre_op(sbd_handle_t *hp)
1840 {
1841 	int		rv = 0, t;
1842 	int		cmd, serr = 0;
1843 	sbd_devset_t	devset;
1844 	sbd_board_t	*sbp = SBDH2BD(hp->h_sbd);
1845 	sbd_priv_handle_t	*shp = HD2MACHHD(hp);
1846 	sbderror_t	*ep = SBD_HD2ERR(hp);
1847 	sbd_cmd_t	*cmdp;
1848 	static fn_t	f = "sbd_pre_op";
1849 
1850 	cmd = hp->h_cmd;
1851 	devset = shp->sh_devset;
1852 
1853 	switch (cmd) {
1854 		case SBD_CMD_CONNECT:
1855 		case SBD_CMD_DISCONNECT:
1856 		case SBD_CMD_UNCONFIGURE:
1857 		case SBD_CMD_CONFIGURE:
1858 		case SBD_CMD_ASSIGN:
1859 		case SBD_CMD_UNASSIGN:
1860 		case SBD_CMD_POWERON:
1861 		case SBD_CMD_POWEROFF:
1862 		case SBD_CMD_TEST:
1863 		/* ioctls allowed if caller has write permission */
1864 		if (!(hp->h_mode & FWRITE)) {
1865 			SBD_SET_ERRNO(ep, EPERM);
1866 			return (-1);
1867 		}
1868 
1869 		default:
1870 		break;
1871 	}
1872 
1873 	hp->h_iap = GETSTRUCT(sbd_ioctl_arg_t, 1);
1874 	rv = sbd_copyin_ioarg(hp, hp->h_mode, cmd,
1875 		(sbd_cmd_t *)hp->h_iap, shp->sh_arg);
1876 	if (rv) {
1877 		SBD_SET_ERRNO(ep, rv);
1878 		FREESTRUCT(hp->h_iap, sbd_ioctl_arg_t, 1);
1879 		hp->h_iap = NULL;
1880 		cmn_err(CE_WARN, "%s: copyin fail", f);
1881 		return (-1);
1882 	} else {
1883 		cmdp =  (sbd_cmd_t *)hp->h_iap;
1884 		if (cmdp->cmd_cm.c_id.c_name[0] != '\0') {
1885 
1886 			cmdp->cmd_cm.c_id.c_type = SBD_COMP(sbd_name_to_idx(
1887 				cmdp->cmd_cm.c_id.c_name));
1888 			if (cmdp->cmd_cm.c_id.c_type == SBD_COMP_MEM) {
1889 				if (cmdp->cmd_cm.c_id.c_unit == -1)
1890 					cmdp->cmd_cm.c_id.c_unit = 0;
1891 			}
1892 		}
1893 		devset = shp->sh_orig_devset = shp->sh_devset =
1894 		    sbd_dev2devset(&cmdp->cmd_cm.c_id);
1895 		if (devset == 0) {
1896 			SBD_SET_ERRNO(ep, EINVAL);
1897 			FREESTRUCT(hp->h_iap, sbd_ioctl_arg_t, 1);
1898 			hp->h_iap = NULL;
1899 			return (-1);
1900 		}
1901 	}
1902 
1903 	/*
1904 	 * Always turn on these bits ala Sunfire DR.
1905 	 */
1906 	hp->h_flags |= SBD_FLAG_DEVI_FORCE;
1907 
1908 	if (cmdp->cmd_cm.c_flags & SBD_FLAG_FORCE)
1909 		hp->h_flags |= SBD_IOCTL_FLAG_FORCE;
1910 
1911 	/*
1912 	 * Check for valid state transitions.
1913 	 */
1914 	if (!serr && ((t = CMD2INDEX(cmd)) != -1)) {
1915 		struct sbd_state_trans	*transp;
1916 		int			state_err;
1917 
1918 		transp = &sbd_state_transition[t];
1919 		ASSERT(transp->x_cmd == cmd);
1920 
1921 		state_err = sbd_check_transition(sbp, &devset, transp);
1922 
1923 		if (state_err < 0) {
1924 			/*
1925 			 * Invalidate device.
1926 			 */
1927 			SBD_SET_ERRNO(ep, ENOTTY);
1928 			serr = -1;
1929 			PR_ALL("%s: invalid devset (0x%x)\n",
1930 				f, (uint_t)devset);
1931 		} else if (state_err != 0) {
1932 			/*
1933 			 * State transition is not a valid one.
1934 			 */
1935 			SBD_SET_ERRNO(ep, transp->x_op[state_err].x_err);
1936 			serr = transp->x_op[state_err].x_rv;
1937 			PR_ALL("%s: invalid state %s(%d) for cmd %s(%d)\n",
1938 				f, sbd_state_str[state_err], state_err,
1939 				SBD_CMD_STR(cmd), cmd);
1940 		}
1941 		if (serr && SBD_GET_ERRNO(ep) != 0) {
1942 			/*
1943 			 * A state transition error occurred.
1944 			 */
1945 			if (serr < 0) {
1946 				SBD_SET_ERR(ep, ESBD_INVAL);
1947 			} else {
1948 				SBD_SET_ERR(ep, ESBD_STATE);
1949 			}
1950 			PR_ALL("%s: invalid state transition\n", f);
1951 		} else {
1952 			shp->sh_devset = devset;
1953 		}
1954 	}
1955 
1956 	if (serr && !rv && hp->h_iap) {
1957 
1958 		/*
1959 		 * There was a state error.  We successfully copied
1960 		 * in the ioctl argument, so let's fill in the
1961 		 * error and copy it back out.
1962 		 */
1963 
1964 		if (SBD_GET_ERR(ep) && SBD_GET_ERRNO(ep) == 0)
1965 			SBD_SET_ERRNO(ep, EIO);
1966 
1967 		SBD_SET_IOCTL_ERR(&hp->h_iap->i_err,
1968 			ep->e_code,
1969 			ep->e_rsc);
1970 		(void) sbd_copyout_errs(hp->h_mode, hp->h_iap, shp->sh_arg);
1971 		FREESTRUCT(hp->h_iap, sbd_ioctl_arg_t, 1);
1972 		hp->h_iap = NULL;
1973 		rv = -1;
1974 	}
1975 
1976 	return (rv);
1977 }
1978 
1979 static void
1980 sbd_post_op(sbd_handle_t *hp)
1981 {
1982 	int		cmd;
1983 	sbderror_t	*ep = SBD_HD2ERR(hp);
1984 	sbd_priv_handle_t	*shp = HD2MACHHD(hp);
1985 	sbd_board_t    *sbp = SBDH2BD(hp->h_sbd);
1986 
1987 	cmd = hp->h_cmd;
1988 
1989 	switch (cmd) {
1990 		case SBD_CMD_CONFIGURE:
1991 		case SBD_CMD_UNCONFIGURE:
1992 		case SBD_CMD_CONNECT:
1993 		case SBD_CMD_DISCONNECT:
1994 			sbp->sb_time = gethrestime_sec();
1995 			break;
1996 
1997 		default:
1998 			break;
1999 	}
2000 
2001 	if (SBD_GET_ERR(ep) && SBD_GET_ERRNO(ep) == 0) {
2002 		SBD_SET_ERRNO(ep, EIO);
2003 	}
2004 
2005 	if (shp->sh_arg != NULL) {
2006 
2007 		if (SBD_GET_ERR(ep) != ESBD_NOERROR) {
2008 
2009 			SBD_SET_IOCTL_ERR(&hp->h_iap->i_err,
2010 				ep->e_code,
2011 				ep->e_rsc);
2012 
2013 			(void) sbd_copyout_errs(hp->h_mode, hp->h_iap,
2014 					shp->sh_arg);
2015 		}
2016 
2017 		if (hp->h_iap != NULL) {
2018 			FREESTRUCT(hp->h_iap, sbd_ioctl_arg_t, 1);
2019 			hp->h_iap = NULL;
2020 		}
2021 	}
2022 }
2023 
2024 static int
2025 sbd_probe_board(sbd_handle_t *hp)
2026 {
2027 	int		rv;
2028 	sbd_board_t    *sbp;
2029 	sbdp_handle_t	*hdp;
2030 	static fn_t	f = "sbd_probe_board";
2031 
2032 	sbp = SBDH2BD(hp->h_sbd);
2033 
2034 	ASSERT(sbp != NULL);
2035 	PR_ALL("%s for board %d", f, sbp->sb_num);
2036 
2037 
2038 	hdp = sbd_get_sbdp_handle(sbp, hp);
2039 
2040 	if ((rv = sbdp_connect_board(hdp)) != 0) {
2041 		sbderror_t	*ep = SBD_HD2ERR(hp);
2042 
2043 		SBD_GET_PERR(hdp->h_err, ep);
2044 	}
2045 
2046 	/*
2047 	 * We need to force a recache after the connect.  The cached
2048 	 * info may be incorrect
2049 	 */
2050 	mutex_enter(&sbp->sb_flags_mutex);
2051 	sbp->sb_flags &= ~SBD_BOARD_STATUS_CACHED;
2052 	mutex_exit(&sbp->sb_flags_mutex);
2053 
2054 	SBD_INJECT_ERR(SBD_PROBE_BOARD_PSEUDO_ERR, hp->h_err, EIO,
2055 		ESGT_PROBE, NULL);
2056 
2057 	sbd_release_sbdp_handle(hdp);
2058 
2059 	return (rv);
2060 }
2061 
2062 static int
2063 sbd_deprobe_board(sbd_handle_t *hp)
2064 {
2065 	int		rv;
2066 	sbdp_handle_t	*hdp;
2067 	sbd_board_t	*sbp;
2068 	static fn_t	f = "sbd_deprobe_board";
2069 
2070 	PR_ALL("%s...\n", f);
2071 
2072 	sbp = SBDH2BD(hp->h_sbd);
2073 
2074 	hdp = sbd_get_sbdp_handle(sbp, hp);
2075 
2076 	if ((rv = sbdp_disconnect_board(hdp)) != 0) {
2077 		sbderror_t	*ep = SBD_HD2ERR(hp);
2078 
2079 		SBD_GET_PERR(hdp->h_err, ep);
2080 	}
2081 
2082 	mutex_enter(&sbp->sb_flags_mutex);
2083 	sbp->sb_flags &= ~SBD_BOARD_STATUS_CACHED;
2084 	mutex_exit(&sbp->sb_flags_mutex);
2085 
2086 	SBD_INJECT_ERR(SBD_DEPROBE_BOARD_PSEUDO_ERR, hp->h_err, EIO,
2087 		ESGT_DEPROBE, NULL);
2088 
2089 	sbd_release_sbdp_handle(hdp);
2090 	return (rv);
2091 }
2092 
2093 /*
2094  * Check if a CPU node is part of a CMP.
2095  */
2096 int
2097 sbd_is_cmp_child(dev_info_t *dip)
2098 {
2099 	dev_info_t *pdip;
2100 
2101 	if (strcmp(ddi_node_name(dip), "cpu") != 0) {
2102 		return (0);
2103 	}
2104 
2105 	pdip = ddi_get_parent(dip);
2106 
2107 	ASSERT(pdip);
2108 
2109 	if (strcmp(ddi_node_name(pdip), "cmp") == 0) {
2110 		return (1);
2111 	}
2112 
2113 	return (0);
2114 }
2115 
2116 /*
2117  * Returns the nodetype if dip is a top dip on the board of
2118  * interest or SBD_COMP_UNKNOWN otherwise
2119  */
2120 static sbd_comp_type_t
2121 get_node_type(sbd_board_t *sbp, dev_info_t *dip, int *unitp)
2122 {
2123 	int		idx, unit;
2124 	sbd_handle_t	*hp;
2125 	sbdp_handle_t	*hdp;
2126 	char		otype[OBP_MAXDRVNAME];
2127 	int		otypelen;
2128 
2129 	ASSERT(sbp);
2130 
2131 	if (unitp)
2132 		*unitp = -1;
2133 
2134 	hp = MACHBD2HD(sbp);
2135 
2136 	hdp = sbd_get_sbdp_handle(sbp, hp);
2137 	if (sbdp_get_board_num(hdp, dip) != sbp->sb_num) {
2138 		sbd_release_sbdp_handle(hdp);
2139 		return (SBD_COMP_UNKNOWN);
2140 	}
2141 
2142 	/*
2143 	 * sbdp_get_unit_num will return (-1) for cmp as there
2144 	 * is no "device_type" property associated with cmp.
2145 	 * Therefore we will just skip getting unit number for
2146 	 * cmp.  Callers of this function need to check the
2147 	 * value set in unitp before using it to dereference
2148 	 * an array.
2149 	 */
2150 	if (strcmp(ddi_node_name(dip), "cmp") == 0) {
2151 		sbd_release_sbdp_handle(hdp);
2152 		return (SBD_COMP_CMP);
2153 	}
2154 
2155 	otypelen = sizeof (otype);
2156 	if (ddi_getlongprop_buf(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
2157 	    OBP_DEVICETYPE,  (caddr_t)otype, &otypelen)) {
2158 		sbd_release_sbdp_handle(hdp);
2159 		return (SBD_COMP_UNKNOWN);
2160 	}
2161 
2162 	idx = sbd_otype_to_idx(otype);
2163 
2164 	if (SBD_COMP(idx) == SBD_COMP_UNKNOWN) {
2165 		sbd_release_sbdp_handle(hdp);
2166 		return (SBD_COMP_UNKNOWN);
2167 	}
2168 
2169 	unit = sbdp_get_unit_num(hdp, dip);
2170 	if (unit == -1) {
2171 		cmn_err(CE_WARN,
2172 			"get_node_type: %s unit fail %p", otype, (void *)dip);
2173 		sbd_release_sbdp_handle(hdp);
2174 		return (SBD_COMP_UNKNOWN);
2175 	}
2176 
2177 	sbd_release_sbdp_handle(hdp);
2178 
2179 	if (unitp)
2180 		*unitp = unit;
2181 
2182 	return (SBD_COMP(idx));
2183 }
2184 
2185 typedef struct {
2186 	sbd_board_t	*sbp;
2187 	int		nmc;
2188 	int		hold;
2189 } walk_tree_t;
2190 
2191 static int
2192 sbd_setup_devlists(dev_info_t *dip, void *arg)
2193 {
2194 	walk_tree_t	*wp;
2195 	dev_info_t	**devlist = NULL;
2196 	char		*pathname = NULL;
2197 	sbd_mem_unit_t	*mp;
2198 	static fn_t	f = "sbd_setup_devlists";
2199 	sbd_board_t	*sbp;
2200 	int		unit;
2201 	sbd_comp_type_t nodetype;
2202 
2203 	ASSERT(dip);
2204 
2205 	wp = (walk_tree_t *)arg;
2206 
2207 	if (wp == NULL) {
2208 		PR_ALL("%s:bad arg\n", f);
2209 		return (DDI_WALK_TERMINATE);
2210 	}
2211 
2212 	sbp = wp->sbp;
2213 
2214 	nodetype = get_node_type(sbp, dip, &unit);
2215 
2216 	switch (nodetype) {
2217 
2218 	case SBD_COMP_CPU:
2219 		pathname = sbp->sb_cpupath[unit];
2220 		break;
2221 
2222 	case SBD_COMP_MEM:
2223 		pathname = sbp->sb_mempath[unit];
2224 		break;
2225 
2226 	case SBD_COMP_IO:
2227 		pathname = sbp->sb_iopath[unit];
2228 		break;
2229 
2230 	case SBD_COMP_CMP:
2231 	case SBD_COMP_UNKNOWN:
2232 		/*
2233 		 * This dip is not of interest to us
2234 		 */
2235 		return (DDI_WALK_CONTINUE);
2236 
2237 	default:
2238 		ASSERT(0);
2239 		return (DDI_WALK_CONTINUE);
2240 	}
2241 
2242 	/*
2243 	 * dip's parent is being held busy by ddi_walk_devs(),
2244 	 * so dip doesn't have to be held while calling ddi_pathname()
2245 	 */
2246 	if (pathname) {
2247 		(void) ddi_pathname(dip, pathname);
2248 	}
2249 
2250 	devlist = sbp->sb_devlist[NIX(nodetype)];
2251 
2252 	/*
2253 	 * The branch rooted at dip should already be held,
2254 	 * unless we are dealing with a core of a CMP.
2255 	 */
2256 	ASSERT(sbd_is_cmp_child(dip) || e_ddi_branch_held(dip));
2257 	devlist[unit] = dip;
2258 
2259 	/*
2260 	 * This test is required if multiple devices are considered
2261 	 * as one. This is the case for memory-controller nodes.
2262 	 */
2263 	if (!SBD_DEV_IS_PRESENT(sbp, nodetype, unit)) {
2264 		sbp->sb_ndev++;
2265 		SBD_DEV_SET_PRESENT(sbp, nodetype, unit);
2266 	}
2267 
2268 	if (nodetype == SBD_COMP_MEM) {
2269 		mp = SBD_GET_BOARD_MEMUNIT(sbp, unit);
2270 		ASSERT(wp->nmc < SBD_NUM_MC_PER_BOARD);
2271 		mp->sbm_dip[wp->nmc++] = dip;
2272 	}
2273 
2274 	return (DDI_WALK_CONTINUE);
2275 }
2276 
2277 /*
2278  * This routine is used to construct the memory devlist.
2279  * In Starcat and Serengeti platforms, a system board can contain up to
2280  * four memory controllers (MC).  The MCs have been programmed by POST for
2281  * optimum memory interleaving amongst their peers on the same board.
2282  * This DR driver does not support deinterleaving.  Therefore, the smallest
2283  * unit of memory that can be manipulated by this driver is all of the
2284  * memory on a board.  Because of this restriction, a board's memory devlist
2285  * is populated with only one of the four (possible) MC dnodes on that board.
2286  * Care must be taken to ensure that the selected MC dnode represents the
2287  * lowest physical address to which memory on the board will respond to.
2288  * This is required in order to preserve the semantics of
2289  * sbdp_get_base_physaddr() when applied to a MC dnode stored in the
2290  * memory devlist.
2291  */
2292 static void
2293 sbd_init_mem_devlists(sbd_board_t *sbp)
2294 {
2295 	dev_info_t	**devlist;
2296 	sbd_mem_unit_t	*mp;
2297 	dev_info_t	*mc_dip;
2298 	sbdp_handle_t	*hdp;
2299 	uint64_t	mc_pa, lowest_pa;
2300 	int		i;
2301 	sbd_handle_t	*hp = MACHBD2HD(sbp);
2302 
2303 	devlist = sbp->sb_devlist[NIX(SBD_COMP_MEM)];
2304 
2305 	mp = SBD_GET_BOARD_MEMUNIT(sbp, 0);
2306 
2307 	mc_dip = mp->sbm_dip[0];
2308 	if (mc_dip == NULL)
2309 		return;		/* No MC dips found for this board */
2310 
2311 	hdp = sbd_get_sbdp_handle(sbp, hp);
2312 
2313 	if (sbdphw_get_base_physaddr(hdp, mc_dip, &mc_pa)) {
2314 		/* TODO: log complaint about dnode */
2315 
2316 pretend_no_mem:
2317 		/*
2318 		 * We are here because sbdphw_get_base_physaddr() failed.
2319 		 * Although it is very unlikely to happen, it did.  Lucky us.
2320 		 * Since we can no longer examine _all_ of the MCs on this
2321 		 * board to determine which one is programmed to the lowest
2322 		 * physical address, we cannot involve any of the MCs on
2323 		 * this board in DR operations.  To ensure this, we pretend
2324 		 * that this board does not contain any memory.
2325 		 *
2326 		 * Paranoia: clear the dev_present mask.
2327 		 */
2328 		if (SBD_DEV_IS_PRESENT(sbp, SBD_COMP_MEM, 0)) {
2329 			ASSERT(sbp->sb_ndev != 0);
2330 			SBD_DEV_CLR_PRESENT(sbp, SBD_COMP_MEM, 0);
2331 			sbp->sb_ndev--;
2332 		}
2333 
2334 		for (i = 0; i < SBD_NUM_MC_PER_BOARD; i++) {
2335 			mp->sbm_dip[i] = NULL;
2336 		}
2337 
2338 		sbd_release_sbdp_handle(hdp);
2339 		return;
2340 	}
2341 
2342 	/* assume this one will win. */
2343 	devlist[0] = mc_dip;
2344 	mp->sbm_cm.sbdev_dip = mc_dip;
2345 	lowest_pa = mc_pa;
2346 
2347 	/*
2348 	 * We know the base physical address of one of the MC devices.  Now
2349 	 * we will enumerate through all of the remaining MC devices on
2350 	 * the board to find which of them is programmed to the lowest
2351 	 * physical address.
2352 	 */
2353 	for (i = 1; i < SBD_NUM_MC_PER_BOARD; i++) {
2354 		mc_dip = mp->sbm_dip[i];
2355 		if (mc_dip == NULL) {
2356 			break;
2357 		}
2358 
2359 		if (sbdphw_get_base_physaddr(hdp, mc_dip, &mc_pa)) {
2360 			cmn_err(CE_NOTE, "No mem on board %d unit %d",
2361 				sbp->sb_num, i);
2362 			break;
2363 		}
2364 		if (mc_pa < lowest_pa) {
2365 			mp->sbm_cm.sbdev_dip = mc_dip;
2366 			devlist[0] = mc_dip;
2367 			lowest_pa = mc_pa;
2368 		}
2369 	}
2370 
2371 	sbd_release_sbdp_handle(hdp);
2372 }
2373 
2374 static int
2375 sbd_name_to_idx(char *name)
2376 {
2377 	int idx;
2378 
2379 	for (idx = 0; SBD_COMP(idx) != SBD_COMP_UNKNOWN; idx++) {
2380 		if (strcmp(name, SBD_DEVNAME(idx)) == 0) {
2381 			break;
2382 		}
2383 	}
2384 
2385 	return (idx);
2386 }
2387 
2388 static int
2389 sbd_otype_to_idx(char *otype)
2390 {
2391 	int idx;
2392 
2393 	for (idx = 0; SBD_COMP(idx) != SBD_COMP_UNKNOWN; idx++) {
2394 
2395 		if (strcmp(otype, SBD_OTYPE(idx)) == 0) {
2396 			break;
2397 		}
2398 	}
2399 
2400 	return (idx);
2401 }
2402 
2403 static int
2404 sbd_init_devlists(sbd_board_t *sbp)
2405 {
2406 	int		i;
2407 	sbd_dev_unit_t	*dp;
2408 	sbd_mem_unit_t	*mp;
2409 	walk_tree_t	*wp, walk = {0};
2410 	dev_info_t	*pdip;
2411 	static fn_t	f = "sbd_init_devlists";
2412 
2413 	PR_ALL("%s (board = %d)...\n", f, sbp->sb_num);
2414 
2415 	wp = &walk;
2416 
2417 	SBD_DEVS_DISCONNECT(sbp, (uint_t)-1);
2418 
2419 	/*
2420 	 * Clear out old entries, if any.
2421 	 */
2422 
2423 	for (i = 0; i < MAX_MEM_UNITS_PER_BOARD; i++) {
2424 		sbp->sb_devlist[NIX(SBD_COMP_MEM)][i] = NULL;
2425 		dp = (sbd_dev_unit_t *)SBD_GET_BOARD_MEMUNIT(sbp, i);
2426 		dp->u_common.sbdev_sbp = sbp;
2427 		dp->u_common.sbdev_unum = i;
2428 		dp->u_common.sbdev_type = SBD_COMP_MEM;
2429 	}
2430 
2431 	mp = SBD_GET_BOARD_MEMUNIT(sbp, 0);
2432 	ASSERT(mp != NULL);
2433 	for (i = 0; i < SBD_NUM_MC_PER_BOARD; i++) {
2434 		mp->sbm_dip[i] = NULL;
2435 	}
2436 
2437 	for (i = 0; i < MAX_CPU_UNITS_PER_BOARD; i++) {
2438 		sbp->sb_devlist[NIX(SBD_COMP_CPU)][i] = NULL;
2439 		dp = (sbd_dev_unit_t *)SBD_GET_BOARD_CPUUNIT(sbp, i);
2440 		dp->u_common.sbdev_sbp = sbp;
2441 		dp->u_common.sbdev_unum = i;
2442 		dp->u_common.sbdev_type = SBD_COMP_CPU;
2443 	}
2444 	for (i = 0; i < MAX_IO_UNITS_PER_BOARD; i++) {
2445 		sbp->sb_devlist[NIX(SBD_COMP_IO)][i] = NULL;
2446 		dp = (sbd_dev_unit_t *)SBD_GET_BOARD_IOUNIT(sbp, i);
2447 		dp->u_common.sbdev_sbp = sbp;
2448 		dp->u_common.sbdev_unum = i;
2449 		dp->u_common.sbdev_type = SBD_COMP_IO;
2450 	}
2451 
2452 	wp->sbp = sbp;
2453 	wp->nmc = 0;
2454 	sbp->sb_ndev = 0;
2455 
2456 	/*
2457 	 * ddi_walk_devs() requires that topdip's parent be held.
2458 	 */
2459 	pdip = ddi_get_parent(sbp->sb_topdip);
2460 	if (pdip) {
2461 		ndi_hold_devi(pdip);
2462 		ndi_devi_enter(pdip, &i);
2463 	}
2464 	ddi_walk_devs(sbp->sb_topdip, sbd_setup_devlists, (void *) wp);
2465 	if (pdip) {
2466 		ndi_devi_exit(pdip, i);
2467 		ndi_rele_devi(pdip);
2468 	}
2469 
2470 	/*
2471 	 * There is no point checking all the components if there
2472 	 * are no devices.
2473 	 */
2474 	if (sbp->sb_ndev == 0) {
2475 		sbp->sb_memaccess_ok = 0;
2476 		return (sbp->sb_ndev);
2477 	}
2478 
2479 	/*
2480 	 * Initialize cpu sections before calling sbd_init_mem_devlists
2481 	 * which will access the mmus.
2482 	 */
2483 	sbp->sb_memaccess_ok = 1;
2484 	for (i = 0; i < MAX_CPU_UNITS_PER_BOARD; i++) {
2485 		if (SBD_DEV_IS_PRESENT(sbp, SBD_COMP_CPU, i)) {
2486 			sbd_init_cpu_unit(sbp, i);
2487 			if (sbd_connect_cpu(sbp, i)) {
2488 				SBD_SET_ERR(HD2MACHERR(MACHBD2HD(sbp)),
2489 					ESBD_CPUSTART);
2490 			}
2491 
2492 		}
2493 	}
2494 
2495 	if (sbp->sb_memaccess_ok) {
2496 		sbd_init_mem_devlists(sbp);
2497 	} else {
2498 		cmn_err(CE_WARN, "unable to access memory on board %d",
2499 		    sbp->sb_num);
2500 	}
2501 
2502 	return (sbp->sb_ndev);
2503 }
2504 
2505 static void
2506 sbd_init_cpu_unit(sbd_board_t *sbp, int unit)
2507 {
2508 	sbd_istate_t	new_state;
2509 	sbd_cpu_unit_t	*cp;
2510 	int		cpuid;
2511 	dev_info_t	*dip;
2512 	sbdp_handle_t	*hdp;
2513 	sbd_handle_t	*hp = MACHBD2HD(sbp);
2514 	extern kmutex_t	cpu_lock;
2515 
2516 	if (SBD_DEV_IS_ATTACHED(sbp, SBD_COMP_CPU, unit)) {
2517 		new_state = SBD_STATE_CONFIGURED;
2518 	} else if (SBD_DEV_IS_PRESENT(sbp, SBD_COMP_CPU, unit)) {
2519 		new_state = SBD_STATE_CONNECTED;
2520 	} else {
2521 		new_state = SBD_STATE_EMPTY;
2522 	}
2523 
2524 	dip = sbp->sb_devlist[NIX(SBD_COMP_CPU)][unit];
2525 
2526 	cp = SBD_GET_BOARD_CPUUNIT(sbp, unit);
2527 
2528 	hdp = sbd_get_sbdp_handle(sbp, hp);
2529 
2530 	cpuid = sbdp_get_cpuid(hdp, dip);
2531 
2532 	cp->sbc_cpu_id = cpuid;
2533 
2534 	if (&sbdp_cpu_get_impl)
2535 		cp->sbc_cpu_impl = sbdp_cpu_get_impl(hdp, dip);
2536 	else
2537 		cp->sbc_cpu_impl = -1;
2538 
2539 	mutex_enter(&cpu_lock);
2540 	if ((cpuid >= 0) && cpu[cpuid])
2541 		cp->sbc_cpu_flags = cpu[cpuid]->cpu_flags;
2542 	else
2543 		cp->sbc_cpu_flags = CPU_OFFLINE | CPU_POWEROFF;
2544 	mutex_exit(&cpu_lock);
2545 
2546 	sbd_cpu_set_prop(cp, dip);
2547 
2548 	cp->sbc_cm.sbdev_cond = sbd_get_comp_cond(dip);
2549 	sbd_release_sbdp_handle(hdp);
2550 
2551 	/*
2552 	 * Any changes to the cpu should be performed above
2553 	 * this call to ensure the cpu is fully initialized
2554 	 * before transitioning to the new state.
2555 	 */
2556 	SBD_DEVICE_TRANSITION(sbp, SBD_COMP_CPU, unit, new_state);
2557 }
2558 
2559 /*
2560  * Only do work if called to operate on an entire board
2561  * which doesn't already have components present.
2562  */
2563 static void
2564 sbd_connect(sbd_handle_t *hp)
2565 {
2566 	sbd_board_t	*sbp;
2567 	sbderror_t	*ep;
2568 	static fn_t	f = "sbd_connect";
2569 
2570 	sbp = SBDH2BD(hp->h_sbd);
2571 
2572 	PR_ALL("%s board %d\n", f, sbp->sb_num);
2573 
2574 	ep = HD2MACHERR(hp);
2575 
2576 	if (SBD_DEVS_PRESENT(sbp)) {
2577 		/*
2578 		 * Board already has devices present.
2579 		 */
2580 		PR_ALL("%s: devices already present (0x%x)\n",
2581 			f, SBD_DEVS_PRESENT(sbp));
2582 		SBD_SET_ERRNO(ep, EINVAL);
2583 		return;
2584 	}
2585 
2586 	if (sbd_init_devlists(sbp) == 0) {
2587 		cmn_err(CE_WARN, "%s: no devices present on board %d",
2588 			f, sbp->sb_num);
2589 		SBD_SET_ERR(ep, ESBD_NODEV);
2590 		return;
2591 	} else {
2592 		int	i;
2593 
2594 		/*
2595 		 * Initialize mem-unit section of board structure.
2596 		 */
2597 		for (i = 0; i < MAX_MEM_UNITS_PER_BOARD; i++)
2598 			if (SBD_DEV_IS_PRESENT(sbp, SBD_COMP_MEM, i))
2599 				sbd_init_mem_unit(sbp, i, SBD_HD2ERR(hp));
2600 
2601 		/*
2602 		 * Initialize sb_io sections.
2603 		 */
2604 		for (i = 0; i < MAX_IO_UNITS_PER_BOARD; i++)
2605 			if (SBD_DEV_IS_PRESENT(sbp, SBD_COMP_IO, i))
2606 				sbd_init_io_unit(sbp, i);
2607 
2608 		SBD_BOARD_TRANSITION(sbp, SBD_STATE_CONNECTED);
2609 		sbp->sb_rstate = SBD_STAT_CONNECTED;
2610 		sbp->sb_ostate = SBD_STAT_UNCONFIGURED;
2611 		(void) drv_getparm(TIME, (void *)&sbp->sb_time);
2612 		SBD_INJECT_ERR(SBD_CONNECT_BOARD_PSEUDO_ERR, hp->h_err, EIO,
2613 			ESBD_INTERNAL, NULL);
2614 	}
2615 }
2616 
2617 static int
2618 sbd_disconnect(sbd_handle_t *hp)
2619 {
2620 	int		i;
2621 	sbd_devset_t	devset;
2622 	sbd_board_t	*sbp;
2623 	static fn_t	f = "sbd_disconnect it";
2624 
2625 	PR_ALL("%s ...\n", f);
2626 
2627 	sbp = SBDH2BD(hp->h_sbd);
2628 
2629 	/*
2630 	 * Only devices which are present, but
2631 	 * unattached can be disconnected.
2632 	 */
2633 	devset = HD2MACHHD(hp)->sh_devset & SBD_DEVS_PRESENT(sbp) &
2634 			SBD_DEVS_UNATTACHED(sbp);
2635 
2636 	ASSERT((SBD_DEVS_ATTACHED(sbp) & devset) == 0);
2637 
2638 	/*
2639 	 * Update per-device state transitions.
2640 	 */
2641 
2642 	for (i = 0; i < MAX_MEM_UNITS_PER_BOARD; i++)
2643 		if (DEVSET_IN_SET(devset, SBD_COMP_MEM, i)) {
2644 			if (sbd_disconnect_mem(hp, i) == 0) {
2645 				SBD_DEVICE_TRANSITION(sbp, SBD_COMP_MEM, i,
2646 							SBD_STATE_EMPTY);
2647 				SBD_DEV_CLR_PRESENT(sbp, SBD_COMP_MEM, i);
2648 			}
2649 		}
2650 
2651 	for (i = 0; i < MAX_CPU_UNITS_PER_BOARD; i++)
2652 		if (DEVSET_IN_SET(devset, SBD_COMP_CPU, i)) {
2653 			if (sbd_disconnect_cpu(hp, i) == 0) {
2654 				SBD_DEVICE_TRANSITION(sbp, SBD_COMP_CPU, i,
2655 							SBD_STATE_EMPTY);
2656 				SBD_DEV_CLR_PRESENT(sbp, SBD_COMP_CPU, i);
2657 			}
2658 		}
2659 
2660 	for (i = 0; i < MAX_IO_UNITS_PER_BOARD; i++)
2661 		if (DEVSET_IN_SET(devset, SBD_COMP_IO, i)) {
2662 			if (sbd_disconnect_io(hp, i) == 0) {
2663 				SBD_DEVICE_TRANSITION(sbp, SBD_COMP_IO, i,
2664 							SBD_STATE_EMPTY);
2665 				SBD_DEV_CLR_PRESENT(sbp, SBD_COMP_IO, i);
2666 			}
2667 		}
2668 
2669 	/*
2670 	 * Once all the components on a board have been disconnect
2671 	 * the board's state can transition to disconnected and
2672 	 * we can allow the deprobe to take place.
2673 	 */
2674 	if (SBD_DEVS_PRESENT(sbp) == 0) {
2675 		SBD_BOARD_TRANSITION(sbp, SBD_STATE_OCCUPIED);
2676 		sbp->sb_rstate = SBD_STAT_DISCONNECTED;
2677 		sbp->sb_ostate = SBD_STAT_UNCONFIGURED;
2678 		(void) drv_getparm(TIME, (void *)&sbp->sb_time);
2679 		SBD_INJECT_ERR(SBD_DISCONNECT_BOARD_PSEUDO_ERR, hp->h_err, EIO,
2680 			ESBD_INTERNAL, NULL);
2681 		return (0);
2682 	} else {
2683 		cmn_err(CE_WARN, "%s: could not disconnect devices on board %d",
2684 			f, sbp->sb_num);
2685 		return (-1);
2686 	}
2687 }
2688 
2689 static void
2690 sbd_test_board(sbd_handle_t *hp)
2691 {
2692 	sbd_board_t	*sbp;
2693 	sbdp_handle_t	*hdp;
2694 
2695 	sbp = SBDH2BD(hp->h_sbd);
2696 
2697 	PR_ALL("sbd_test_board: board %d\n", sbp->sb_num);
2698 
2699 
2700 	hdp = sbd_get_sbdp_handle(sbp, hp);
2701 
2702 	if (sbdp_test_board(hdp, &hp->h_opts) != 0) {
2703 		sbderror_t	*ep = SBD_HD2ERR(hp);
2704 
2705 		SBD_GET_PERR(hdp->h_err, ep);
2706 	}
2707 
2708 	SBD_INJECT_ERR(SBD_TEST_BOARD_PSEUDO_ERR, hp->h_err, EIO,
2709 		ESBD_INTERNAL, NULL);
2710 
2711 	sbd_release_sbdp_handle(hdp);
2712 }
2713 
2714 static void
2715 sbd_assign_board(sbd_handle_t *hp)
2716 {
2717 	sbd_board_t	*sbp;
2718 	sbdp_handle_t	*hdp;
2719 
2720 	sbp = SBDH2BD(hp->h_sbd);
2721 
2722 	PR_ALL("sbd_assign_board: board %d\n", sbp->sb_num);
2723 
2724 	hdp = sbd_get_sbdp_handle(sbp, hp);
2725 
2726 	if (sbdp_assign_board(hdp) != 0) {
2727 		sbderror_t	*ep = SBD_HD2ERR(hp);
2728 
2729 		SBD_GET_PERR(hdp->h_err, ep);
2730 	}
2731 
2732 	SBD_INJECT_ERR(SBD_ASSIGN_BOARD_PSEUDO_ERR, hp->h_err, EIO,
2733 		ESBD_INTERNAL, NULL);
2734 
2735 	sbd_release_sbdp_handle(hdp);
2736 }
2737 
2738 static void
2739 sbd_unassign_board(sbd_handle_t *hp)
2740 {
2741 	sbd_board_t	*sbp;
2742 	sbdp_handle_t	*hdp;
2743 
2744 	sbp = SBDH2BD(hp->h_sbd);
2745 
2746 	PR_ALL("sbd_unassign_board: board %d\n", sbp->sb_num);
2747 
2748 	hdp = sbd_get_sbdp_handle(sbp, hp);
2749 
2750 	if (sbdp_unassign_board(hdp) != 0) {
2751 		sbderror_t	*ep = SBD_HD2ERR(hp);
2752 
2753 		SBD_GET_PERR(hdp->h_err, ep);
2754 	}
2755 
2756 	SBD_INJECT_ERR(SBD_ASSIGN_BOARD_PSEUDO_ERR, hp->h_err, EIO,
2757 		ESBD_INTERNAL, NULL);
2758 
2759 	sbd_release_sbdp_handle(hdp);
2760 }
2761 
2762 static void
2763 sbd_poweron_board(sbd_handle_t *hp)
2764 {
2765 	sbd_board_t	*sbp;
2766 	sbdp_handle_t	*hdp;
2767 
2768 	sbp = SBDH2BD(hp->h_sbd);
2769 
2770 	PR_ALL("sbd_poweron_board: %d\n", sbp->sb_num);
2771 
2772 	hdp = sbd_get_sbdp_handle(sbp, hp);
2773 
2774 	if (sbdp_poweron_board(hdp) != 0) {
2775 		sbderror_t	*ep = SBD_HD2ERR(hp);
2776 
2777 		SBD_GET_PERR(hdp->h_err, ep);
2778 	}
2779 
2780 	SBD_INJECT_ERR(SBD_POWERON_BOARD_PSEUDO_ERR, hp->h_err, EIO,
2781 		ESBD_INTERNAL, NULL);
2782 
2783 	sbd_release_sbdp_handle(hdp);
2784 }
2785 
2786 static void
2787 sbd_poweroff_board(sbd_handle_t *hp)
2788 {
2789 	sbd_board_t	*sbp;
2790 	sbdp_handle_t	*hdp;
2791 
2792 	sbp = SBDH2BD(hp->h_sbd);
2793 
2794 	PR_ALL("sbd_poweroff_board: %d\n", sbp->sb_num);
2795 
2796 	hdp = sbd_get_sbdp_handle(sbp, hp);
2797 
2798 	if (sbdp_poweroff_board(hdp) != 0) {
2799 		sbderror_t	*ep = SBD_HD2ERR(hp);
2800 
2801 		SBD_GET_PERR(hdp->h_err, ep);
2802 	}
2803 
2804 	SBD_INJECT_ERR(SBD_POWEROFF_BOARD_PSEUDO_ERR, hp->h_err, EIO,
2805 		ESBD_INTERNAL, NULL);
2806 
2807 	sbd_release_sbdp_handle(hdp);
2808 }
2809 
2810 
2811 /*
2812  * Return a list of the dip's of devices that are
2813  * either present and attached, or present only but
2814  * not yet attached for the given board.
2815  */
2816 sbd_devlist_t *
2817 sbd_get_devlist(sbd_handle_t *hp, sbd_board_t *sbp, sbd_comp_type_t nodetype,
2818 		int max_units, uint_t uset, int *count, int present_only)
2819 {
2820 	int		i, ix;
2821 	sbd_devlist_t	*ret_devlist;
2822 	dev_info_t	**devlist;
2823 	sbdp_handle_t	*hdp;
2824 
2825 	*count = 0;
2826 	ret_devlist = GETSTRUCT(sbd_devlist_t, max_units);
2827 	devlist = sbp->sb_devlist[NIX(nodetype)];
2828 	/*
2829 	 * Turn into binary value since we're going
2830 	 * to be using XOR for a comparison.
2831 	 * if (present_only) then
2832 	 *	dev must be PRESENT, but NOT ATTACHED.
2833 	 * else
2834 	 *	dev must be PRESENT AND ATTACHED.
2835 	 * endif
2836 	 */
2837 	if (present_only)
2838 		present_only = 1;
2839 
2840 	hdp = sbd_get_sbdp_handle(sbp, hp);
2841 
2842 	for (i = ix = 0; (i < max_units) && uset; i++) {
2843 		int	ut, is_present, is_attached;
2844 		dev_info_t *dip;
2845 		sbderror_t *ep = SBD_HD2ERR(hp);
2846 		int	nunits, distance, j;
2847 
2848 		/*
2849 		 * For CMPs, we would like to perform DR operation on
2850 		 * all the cores before moving onto the next chip.
2851 		 * Therefore, when constructing the devlist, we process
2852 		 * all the cores together.
2853 		 */
2854 		if (nodetype == SBD_COMP_CPU) {
2855 			/*
2856 			 * Number of units to process in the inner loop
2857 			 */
2858 			nunits = MAX_CORES_PER_CMP;
2859 			/*
2860 			 * The distance between the units in the
2861 			 * board's sb_devlist structure.
2862 			 */
2863 			distance = MAX_CMP_UNITS_PER_BOARD;
2864 		} else {
2865 			nunits = 1;
2866 			distance = 0;
2867 		}
2868 
2869 		for (j = 0; j < nunits; j++) {
2870 			if ((dip = devlist[i + j * distance]) == NULL)
2871 				continue;
2872 
2873 			ut = sbdp_get_unit_num(hdp, dip);
2874 
2875 			if (ut == -1) {
2876 				SBD_GET_PERR(hdp->h_err, ep);
2877 				PR_ALL("sbd_get_devlist bad unit %d"
2878 				    " code %d errno %d",
2879 				    i, ep->e_code, ep->e_errno);
2880 			}
2881 
2882 			if ((uset & (1 << ut)) == 0)
2883 				continue;
2884 			uset &= ~(1 << ut);
2885 			is_present = SBD_DEV_IS_PRESENT(sbp, nodetype, ut) ?
2886 			    1 : 0;
2887 			is_attached = SBD_DEV_IS_ATTACHED(sbp, nodetype, ut) ?
2888 			    1 : 0;
2889 
2890 			if (is_present && (present_only ^ is_attached)) {
2891 				ret_devlist[ix].dv_dip = dip;
2892 				sbd_init_err(&ret_devlist[ix].dv_error);
2893 				ix++;
2894 			}
2895 		}
2896 	}
2897 	sbd_release_sbdp_handle(hdp);
2898 
2899 	if ((*count = ix) == 0) {
2900 		FREESTRUCT(ret_devlist, sbd_devlist_t, max_units);
2901 		ret_devlist = NULL;
2902 	}
2903 
2904 	return (ret_devlist);
2905 }
2906 
2907 static sbd_devlist_t *
2908 sbd_get_attach_devlist(sbd_handle_t *hp, int32_t *devnump, int32_t pass)
2909 {
2910 	sbd_board_t	*sbp;
2911 	uint_t		uset;
2912 	sbd_devset_t	devset;
2913 	sbd_devlist_t	*attach_devlist;
2914 	static int	next_pass = 1;
2915 	static fn_t	f = "sbd_get_attach_devlist";
2916 
2917 	PR_ALL("%s (pass = %d)...\n", f, pass);
2918 
2919 	sbp = SBDH2BD(hp->h_sbd);
2920 	devset = HD2MACHHD(hp)->sh_devset;
2921 
2922 	*devnump = 0;
2923 	attach_devlist = NULL;
2924 
2925 	/*
2926 	 * We switch on next_pass for the cases where a board
2927 	 * does not contain a particular type of component.
2928 	 * In these situations we don't want to return NULL
2929 	 * prematurely.  We need to check other devices and
2930 	 * we don't want to check the same type multiple times.
2931 	 * For example, if there were no cpus, then on pass 1
2932 	 * we would drop through and return the memory nodes.
2933 	 * However, on pass 2 we would switch back to the memory
2934 	 * nodes thereby returning them twice!  Using next_pass
2935 	 * forces us down to the end (or next item).
2936 	 */
2937 	if (pass == 1)
2938 		next_pass = 1;
2939 
2940 	switch (next_pass) {
2941 	case 1:
2942 		if (DEVSET_IN_SET(devset, SBD_COMP_CPU, DEVSET_ANYUNIT)) {
2943 			uset = DEVSET_GET_UNITSET(devset, SBD_COMP_CPU);
2944 
2945 			attach_devlist = sbd_get_devlist(hp, sbp, SBD_COMP_CPU,
2946 						MAX_CPU_UNITS_PER_BOARD,
2947 						uset, devnump, 1);
2948 
2949 			DEVSET_DEL(devset, SBD_COMP_CPU, DEVSET_ANYUNIT);
2950 			if (!devset || attach_devlist) {
2951 				next_pass = 2;
2952 				return (attach_devlist);
2953 			}
2954 			/*
2955 			 * If the caller is interested in the entire
2956 			 * board, but there aren't any cpus, then just
2957 			 * fall through to check for the next component.
2958 			 */
2959 		}
2960 		/*FALLTHROUGH*/
2961 
2962 	case 2:
2963 		if (DEVSET_IN_SET(devset, SBD_COMP_MEM, DEVSET_ANYUNIT)) {
2964 			uset = DEVSET_GET_UNITSET(devset, SBD_COMP_MEM);
2965 
2966 			attach_devlist = sbd_get_devlist(hp, sbp, SBD_COMP_MEM,
2967 						MAX_MEM_UNITS_PER_BOARD,
2968 						uset, devnump, 1);
2969 
2970 			DEVSET_DEL(devset, SBD_COMP_MEM, DEVSET_ANYUNIT);
2971 			if (!devset || attach_devlist) {
2972 				next_pass = 3;
2973 				return (attach_devlist);
2974 			}
2975 			/*
2976 			 * If the caller is interested in the entire
2977 			 * board, but there isn't any memory, then
2978 			 * just fall through to next component.
2979 			 */
2980 		}
2981 		/*FALLTHROUGH*/
2982 
2983 
2984 	case 3:
2985 		next_pass = -1;
2986 		if (DEVSET_IN_SET(devset, SBD_COMP_IO, DEVSET_ANYUNIT)) {
2987 			uset = DEVSET_GET_UNITSET(devset, SBD_COMP_IO);
2988 
2989 			attach_devlist = sbd_get_devlist(hp, sbp, SBD_COMP_IO,
2990 						MAX_IO_UNITS_PER_BOARD,
2991 						uset, devnump, 1);
2992 
2993 			DEVSET_DEL(devset, SBD_COMP_IO, DEVSET_ANYUNIT);
2994 			if (!devset || attach_devlist) {
2995 				next_pass = 4;
2996 				return (attach_devlist);
2997 			}
2998 		}
2999 		/*FALLTHROUGH*/
3000 
3001 	default:
3002 		*devnump = 0;
3003 		return (NULL);
3004 	}
3005 	/*NOTREACHED*/
3006 }
3007 
3008 static int
3009 sbd_pre_attach_devlist(sbd_handle_t *hp, sbd_devlist_t *devlist,
3010 	int32_t devnum)
3011 {
3012 	int		max_units = 0, rv = 0;
3013 	sbd_comp_type_t	nodetype;
3014 	static fn_t	f = "sbd_pre_attach_devlist";
3015 
3016 	/*
3017 	 * In this driver, all entries in a devlist[] are
3018 	 * of the same nodetype.
3019 	 */
3020 	nodetype = sbd_get_devtype(hp, devlist->dv_dip);
3021 
3022 	PR_ALL("%s (nt = %s(%d), num = %d)...\n",
3023 		f, sbd_ct_str[(int)nodetype], (int)nodetype, devnum);
3024 
3025 	switch (nodetype) {
3026 
3027 	case SBD_COMP_MEM:
3028 		max_units = MAX_MEM_UNITS_PER_BOARD;
3029 		rv = sbd_pre_attach_mem(hp, devlist, devnum);
3030 		break;
3031 
3032 	case SBD_COMP_CPU:
3033 		max_units = MAX_CPU_UNITS_PER_BOARD;
3034 		rv = sbd_pre_attach_cpu(hp, devlist, devnum);
3035 		break;
3036 
3037 	case SBD_COMP_IO:
3038 		max_units = MAX_IO_UNITS_PER_BOARD;
3039 		break;
3040 
3041 	default:
3042 		rv = -1;
3043 		break;
3044 	}
3045 
3046 	if (rv && max_units) {
3047 		int	i;
3048 		/*
3049 		 * Need to clean up devlist
3050 		 * if pre-op is going to fail.
3051 		 */
3052 		for (i = 0; i < max_units; i++) {
3053 			if (SBD_GET_ERRSTR(&devlist[i].dv_error)) {
3054 				SBD_FREE_ERR(&devlist[i].dv_error);
3055 			} else {
3056 				break;
3057 			}
3058 		}
3059 		FREESTRUCT(devlist, sbd_devlist_t, max_units);
3060 	}
3061 
3062 	/*
3063 	 * If an error occurred, return "continue"
3064 	 * indication so that we can continue attaching
3065 	 * as much as possible.
3066 	 */
3067 	return (rv ? -1 : 0);
3068 }
3069 
3070 static int
3071 sbd_post_attach_devlist(sbd_handle_t *hp, sbd_devlist_t *devlist,
3072 			int32_t devnum)
3073 {
3074 	int		i, max_units = 0, rv = 0;
3075 	sbd_devset_t	devs_unattached, devs_present;
3076 	sbd_comp_type_t	nodetype;
3077 	sbd_board_t 	*sbp = SBDH2BD(hp->h_sbd);
3078 	sbdp_handle_t	*hdp;
3079 	static fn_t	f = "sbd_post_attach_devlist";
3080 
3081 	sbp = SBDH2BD(hp->h_sbd);
3082 	nodetype = sbd_get_devtype(hp, devlist->dv_dip);
3083 
3084 	PR_ALL("%s (nt = %s(%d), num = %d)...\n",
3085 		f, sbd_ct_str[(int)nodetype], (int)nodetype, devnum);
3086 
3087 	hdp = sbd_get_sbdp_handle(sbp, hp);
3088 
3089 	/*
3090 	 * Need to free up devlist[] created earlier in
3091 	 * sbd_get_attach_devlist().
3092 	 */
3093 	switch (nodetype) {
3094 	case SBD_COMP_CPU:
3095 		max_units = MAX_CPU_UNITS_PER_BOARD;
3096 		rv = sbd_post_attach_cpu(hp, devlist, devnum);
3097 		break;
3098 
3099 
3100 	case SBD_COMP_MEM:
3101 		max_units = MAX_MEM_UNITS_PER_BOARD;
3102 
3103 		rv = sbd_post_attach_mem(hp, devlist, devnum);
3104 		break;
3105 
3106 	case SBD_COMP_IO:
3107 		max_units = MAX_IO_UNITS_PER_BOARD;
3108 		break;
3109 
3110 	default:
3111 		rv = -1;
3112 		break;
3113 	}
3114 
3115 
3116 	for (i = 0; i < devnum; i++) {
3117 		int		unit;
3118 		dev_info_t	*dip;
3119 		sbderror_t	*ep;
3120 
3121 		ep = &devlist[i].dv_error;
3122 
3123 		if (sbd_set_err_in_hdl(hp, ep) == 0)
3124 			continue;
3125 
3126 		dip = devlist[i].dv_dip;
3127 		nodetype = sbd_get_devtype(hp, dip);
3128 		unit = sbdp_get_unit_num(hdp, dip);
3129 
3130 		if (unit == -1) {
3131 			SBD_GET_PERR(hdp->h_err, ep);
3132 			continue;
3133 		}
3134 
3135 		unit = sbd_check_unit_attached(sbp, dip, unit, nodetype, ep);
3136 
3137 		if (unit == -1) {
3138 			PR_ALL("%s: ERROR (nt=%s, b=%d, u=%d) not attached\n",
3139 				f, sbd_ct_str[(int)nodetype], sbp->sb_num, i);
3140 			continue;
3141 		}
3142 
3143 		SBD_DEV_SET_ATTACHED(sbp, nodetype, unit);
3144 		SBD_DEVICE_TRANSITION(sbp, nodetype, unit,
3145 						SBD_STATE_CONFIGURED);
3146 	}
3147 	sbd_release_sbdp_handle(hdp);
3148 
3149 	if (rv) {
3150 		PR_ALL("%s: errno %d, ecode %d during attach\n",
3151 			f, SBD_GET_ERRNO(SBD_HD2ERR(hp)),
3152 			SBD_GET_ERR(HD2MACHERR(hp)));
3153 	}
3154 
3155 	devs_present = SBD_DEVS_PRESENT(sbp);
3156 	devs_unattached = SBD_DEVS_UNATTACHED(sbp);
3157 
3158 	switch (SBD_BOARD_STATE(sbp)) {
3159 	case SBD_STATE_CONNECTED:
3160 	case SBD_STATE_UNCONFIGURED:
3161 		ASSERT(devs_present);
3162 
3163 		if (devs_unattached == 0) {
3164 			/*
3165 			 * All devices finally attached.
3166 			 */
3167 			SBD_BOARD_TRANSITION(sbp, SBD_STATE_CONFIGURED);
3168 			sbp->sb_rstate = SBD_STAT_CONNECTED;
3169 			sbp->sb_ostate = SBD_STAT_CONFIGURED;
3170 		} else if (devs_present != devs_unattached) {
3171 			/*
3172 			 * Only some devices are fully attached.
3173 			 */
3174 			SBD_BOARD_TRANSITION(sbp, SBD_STATE_PARTIAL);
3175 			sbp->sb_rstate = SBD_STAT_CONNECTED;
3176 			sbp->sb_ostate = SBD_STAT_UNCONFIGURED;
3177 		}
3178 		(void) drv_getparm(TIME, (void *)&sbp->sb_time);
3179 		break;
3180 
3181 	case SBD_STATE_PARTIAL:
3182 		ASSERT(devs_present);
3183 		/*
3184 		 * All devices finally attached.
3185 		 */
3186 		if (devs_unattached == 0) {
3187 			SBD_BOARD_TRANSITION(sbp, SBD_STATE_CONFIGURED);
3188 			sbp->sb_rstate = SBD_STAT_CONNECTED;
3189 			sbp->sb_ostate = SBD_STAT_CONFIGURED;
3190 			(void) drv_getparm(TIME, (void *)&sbp->sb_time);
3191 		}
3192 		break;
3193 
3194 	default:
3195 		break;
3196 	}
3197 
3198 	if (max_units && devlist) {
3199 		int	i;
3200 
3201 		for (i = 0; i < max_units; i++) {
3202 			if (SBD_GET_ERRSTR(&devlist[i].dv_error)) {
3203 				SBD_FREE_ERR(&devlist[i].dv_error);
3204 			} else {
3205 				break;
3206 			}
3207 		}
3208 		FREESTRUCT(devlist, sbd_devlist_t, max_units);
3209 	}
3210 
3211 	/*
3212 	 * Our policy is to attach all components that are
3213 	 * possible, thus we always return "success" on the
3214 	 * pre and post operations.
3215 	 */
3216 	return (0);
3217 }
3218 
3219 /*
3220  * We only need to "release" cpu and memory devices.
3221  */
3222 static sbd_devlist_t *
3223 sbd_get_release_devlist(sbd_handle_t *hp, int32_t *devnump, int32_t pass)
3224 {
3225 	sbd_board_t	*sbp;
3226 	uint_t		uset;
3227 	sbd_devset_t	devset;
3228 	sbd_devlist_t	*release_devlist;
3229 	static int	next_pass = 1;
3230 	static fn_t	f = "sbd_get_release_devlist";
3231 
3232 	PR_ALL("%s (pass = %d)...\n", f, pass);
3233 
3234 	sbp = SBDH2BD(hp->h_sbd);
3235 	devset = HD2MACHHD(hp)->sh_devset;
3236 
3237 	*devnump = 0;
3238 	release_devlist = NULL;
3239 
3240 	/*
3241 	 * We switch on next_pass for the cases where a board
3242 	 * does not contain a particular type of component.
3243 	 * In these situations we don't want to return NULL
3244 	 * prematurely.  We need to check other devices and
3245 	 * we don't want to check the same type multiple times.
3246 	 * For example, if there were no cpus, then on pass 1
3247 	 * we would drop through and return the memory nodes.
3248 	 * However, on pass 2 we would switch back to the memory
3249 	 * nodes thereby returning them twice!  Using next_pass
3250 	 * forces us down to the end (or next item).
3251 	 */
3252 	if (pass == 1)
3253 		next_pass = 1;
3254 
3255 	switch (next_pass) {
3256 	case 1:
3257 		if (DEVSET_IN_SET(devset, SBD_COMP_MEM, DEVSET_ANYUNIT)) {
3258 			uset = DEVSET_GET_UNITSET(devset, SBD_COMP_MEM);
3259 
3260 			release_devlist = sbd_get_devlist(hp, sbp,
3261 						SBD_COMP_MEM,
3262 						MAX_MEM_UNITS_PER_BOARD,
3263 						uset, devnump, 0);
3264 
3265 			DEVSET_DEL(devset, SBD_COMP_MEM, DEVSET_ANYUNIT);
3266 			if (!devset || release_devlist) {
3267 				next_pass = 2;
3268 				return (release_devlist);
3269 			}
3270 			/*
3271 			 * If the caller is interested in the entire
3272 			 * board, but there isn't any memory, then
3273 			 * just fall through to next component.
3274 			 */
3275 		}
3276 		/*FALLTHROUGH*/
3277 
3278 
3279 	case 2:
3280 		if (DEVSET_IN_SET(devset, SBD_COMP_CPU, DEVSET_ANYUNIT)) {
3281 			uset = DEVSET_GET_UNITSET(devset, SBD_COMP_CPU);
3282 
3283 			release_devlist = sbd_get_devlist(hp, sbp,
3284 						SBD_COMP_CPU,
3285 						MAX_CPU_UNITS_PER_BOARD,
3286 						uset, devnump, 0);
3287 
3288 			DEVSET_DEL(devset, SBD_COMP_CPU, DEVSET_ANYUNIT);
3289 			if (!devset || release_devlist) {
3290 				next_pass = 3;
3291 				return (release_devlist);
3292 			}
3293 			/*
3294 			 * If the caller is interested in the entire
3295 			 * board, but there aren't any cpus, then just
3296 			 * fall through to check for the next component.
3297 			 */
3298 		}
3299 		/*FALLTHROUGH*/
3300 
3301 
3302 	case 3:
3303 		next_pass = -1;
3304 		if (DEVSET_IN_SET(devset, SBD_COMP_IO, DEVSET_ANYUNIT)) {
3305 			uset = DEVSET_GET_UNITSET(devset, SBD_COMP_IO);
3306 
3307 			release_devlist = sbd_get_devlist(hp, sbp,
3308 						SBD_COMP_IO,
3309 						MAX_IO_UNITS_PER_BOARD,
3310 						uset, devnump, 0);
3311 
3312 			DEVSET_DEL(devset, SBD_COMP_IO, DEVSET_ANYUNIT);
3313 			if (!devset || release_devlist) {
3314 				next_pass = 4;
3315 				return (release_devlist);
3316 			}
3317 		}
3318 		/*FALLTHROUGH*/
3319 
3320 	default:
3321 		*devnump = 0;
3322 		return (NULL);
3323 	}
3324 	/*NOTREACHED*/
3325 }
3326 
3327 static int
3328 sbd_pre_release_devlist(sbd_handle_t *hp, sbd_devlist_t *devlist,
3329 			int32_t devnum)
3330 {
3331 	int		max_units = 0, rv = 0;
3332 	sbd_comp_type_t	nodetype;
3333 	static fn_t	f = "sbd_pre_release_devlist";
3334 
3335 	nodetype = sbd_get_devtype(hp, devlist->dv_dip);
3336 
3337 	PR_ALL("%s (nt = %s(%d), num = %d)...\n",
3338 		f, sbd_ct_str[(int)nodetype], (int)nodetype, devnum);
3339 
3340 	switch (nodetype) {
3341 	case SBD_COMP_CPU: {
3342 		int			i, mem_present = 0;
3343 		sbd_board_t		*sbp = SBDH2BD(hp->h_sbd);
3344 		sbd_devset_t		devset;
3345 		sbd_priv_handle_t	*shp = HD2MACHHD(hp);
3346 
3347 		max_units = MAX_CPU_UNITS_PER_BOARD;
3348 
3349 		devset = shp->sh_orig_devset;
3350 
3351 		for (i = 0; i < MAX_MEM_UNITS_PER_BOARD; i++) {
3352 			/*
3353 			 * if client also requested to unconfigure memory
3354 			 * the we allow the operation. Therefore
3355 			 * we need to warranty that memory gets unconfig
3356 			 * before cpus
3357 			 */
3358 
3359 			if (DEVSET_IN_SET(devset, SBD_COMP_MEM, i)) {
3360 				continue;
3361 			}
3362 			if (SBD_DEV_IS_ATTACHED(sbp, SBD_COMP_MEM, i)) {
3363 				mem_present = 1;
3364 				break;
3365 			}
3366 		}
3367 		if (mem_present) {
3368 			sbderror_t	*ep = SBD_HD2ERR(hp);
3369 			SBD_SET_ERR(ep, ESBD_MEMONLINE);
3370 			SBD_SET_ERRSTR(ep, sbp->sb_mempath[i]);
3371 			rv = -1;
3372 		} else {
3373 			rv = sbd_pre_release_cpu(hp, devlist, devnum);
3374 		}
3375 
3376 		break;
3377 
3378 	}
3379 	case SBD_COMP_MEM:
3380 		max_units = MAX_MEM_UNITS_PER_BOARD;
3381 		rv = sbd_pre_release_mem(hp, devlist, devnum);
3382 		break;
3383 
3384 
3385 	case SBD_COMP_IO:
3386 		max_units = MAX_IO_UNITS_PER_BOARD;
3387 		rv = sbd_pre_release_io(hp, devlist, devnum);
3388 		break;
3389 
3390 	default:
3391 		rv = -1;
3392 		break;
3393 	}
3394 
3395 	if (rv && max_units) {
3396 		int	i;
3397 
3398 		/*
3399 		 * the individual pre_release component routines should
3400 		 * have set the error in the handle.  No need to set it
3401 		 * here
3402 		 *
3403 		 * Need to clean up dynamically allocated devlist
3404 		 * if pre-op is going to fail.
3405 		 */
3406 		for (i = 0; i < max_units; i++) {
3407 			if (SBD_GET_ERRSTR(&devlist[i].dv_error)) {
3408 				SBD_FREE_ERR(&devlist[i].dv_error);
3409 			} else {
3410 				break;
3411 			}
3412 		}
3413 		FREESTRUCT(devlist, sbd_devlist_t, max_units);
3414 	}
3415 
3416 	return (rv ? -1 : 0);
3417 }
3418 
3419 static int
3420 sbd_post_release_devlist(sbd_handle_t *hp, sbd_devlist_t *devlist,
3421 			int32_t devnum)
3422 {
3423 	int		i, max_units = 0;
3424 	sbd_comp_type_t	nodetype;
3425 	sbd_board_t	*sbp = SBDH2BD(hp->h_sbd);
3426 	sbdp_handle_t	*hdp;
3427 	sbd_error_t	*spe;
3428 	static fn_t	f = "sbd_post_release_devlist";
3429 
3430 	nodetype = sbd_get_devtype(hp, devlist->dv_dip);
3431 	ASSERT(nodetype >= SBD_COMP_CPU && nodetype <= SBD_COMP_IO);
3432 
3433 	PR_ALL("%s (nt = %s(%d), num = %d)...\n",
3434 		f, sbd_ct_str[(int)nodetype], (int)nodetype, devnum);
3435 
3436 	/*
3437 	 * Need to free up devlist[] created earlier in
3438 	 * sbd_get_release_devlist().
3439 	 */
3440 	switch (nodetype) {
3441 	case SBD_COMP_CPU:
3442 		max_units = MAX_CPU_UNITS_PER_BOARD;
3443 		break;
3444 
3445 	case SBD_COMP_MEM:
3446 		max_units = MAX_MEM_UNITS_PER_BOARD;
3447 		break;
3448 
3449 	case SBD_COMP_IO:
3450 		/*
3451 		 *  Need to check if specific I/O is referenced and
3452 		 *  fail post-op.
3453 		 */
3454 
3455 		if (sbd_check_io_refs(hp, devlist, devnum) > 0) {
3456 				PR_IO("%s: error - I/O devices ref'd\n", f);
3457 		}
3458 
3459 		max_units = MAX_IO_UNITS_PER_BOARD;
3460 		break;
3461 
3462 	default:
3463 		{
3464 			cmn_err(CE_WARN, "%s: invalid nodetype (%d)",
3465 				f, (int)nodetype);
3466 			SBD_SET_ERR(HD2MACHERR(hp), ESBD_INVAL);
3467 		}
3468 		break;
3469 	}
3470 	hdp = sbd_get_sbdp_handle(sbp, hp);
3471 	spe = hdp->h_err;
3472 
3473 	for (i = 0; i < devnum; i++) {
3474 		int		unit;
3475 		sbderror_t	*ep;
3476 
3477 		ep = &devlist[i].dv_error;
3478 
3479 		if (sbd_set_err_in_hdl(hp, ep) == 0) {
3480 			continue;
3481 		}
3482 
3483 		unit = sbdp_get_unit_num(hdp, devlist[i].dv_dip);
3484 		if (unit == -1) {
3485 			SBD_GET_PERR(hdp->h_err, SBD_HD2ERR(hp));
3486 			PR_ALL("%s bad unit num: %d code %d",
3487 			    f, unit, spe->e_code);
3488 			continue;
3489 		}
3490 	}
3491 	sbd_release_sbdp_handle(hdp);
3492 
3493 	if (SBD_GET_ERRNO(SBD_HD2ERR(hp))) {
3494 		PR_ALL("%s: errno %d, ecode %d during release\n",
3495 			f, SBD_GET_ERRNO(SBD_HD2ERR(hp)),
3496 			SBD_GET_ERR(SBD_HD2ERR(hp)));
3497 	}
3498 
3499 	if (max_units && devlist) {
3500 		int	i;
3501 
3502 		for (i = 0; i < max_units; i++) {
3503 			if (SBD_GET_ERRSTR(&devlist[i].dv_error)) {
3504 				SBD_FREE_ERR(&devlist[i].dv_error);
3505 			} else {
3506 				break;
3507 			}
3508 		}
3509 		FREESTRUCT(devlist, sbd_devlist_t, max_units);
3510 	}
3511 
3512 	return (SBD_GET_ERRNO(SBD_HD2ERR(hp)) ? -1 : 0);
3513 }
3514 
3515 static void
3516 sbd_release_dev_done(sbd_board_t *sbp, sbd_comp_type_t nodetype, int unit)
3517 {
3518 	SBD_DEV_SET_UNREFERENCED(sbp, nodetype, unit);
3519 	SBD_DEVICE_TRANSITION(sbp, nodetype, unit, SBD_STATE_UNREFERENCED);
3520 }
3521 
3522 static void
3523 sbd_release_done(sbd_handle_t *hp, sbd_comp_type_t nodetype, dev_info_t *dip)
3524 {
3525 	int		unit;
3526 	sbd_board_t	*sbp = SBDH2BD(hp->h_sbd);
3527 	sbderror_t	*ep;
3528 	static fn_t	f = "sbd_release_done";
3529 	sbdp_handle_t	*hdp;
3530 
3531 	PR_ALL("%s...\n", f);
3532 
3533 	hdp = sbd_get_sbdp_handle(sbp, hp);
3534 	ep = SBD_HD2ERR(hp);
3535 
3536 	if ((unit = sbdp_get_unit_num(hdp, dip)) < 0) {
3537 		cmn_err(CE_WARN,
3538 			"sbd:%s: unable to get unit for dip (0x%p)",
3539 			f, (void *)dip);
3540 		SBD_GET_PERR(hdp->h_err, ep);
3541 		sbd_release_sbdp_handle(hdp);
3542 		return;
3543 	}
3544 	sbd_release_sbdp_handle(hdp);
3545 
3546 	/*
3547 	 * Transfer the device which just completed its release
3548 	 * to the UNREFERENCED state.
3549 	 */
3550 	switch (nodetype) {
3551 
3552 	case SBD_COMP_MEM:
3553 		sbd_release_mem_done((void *)hp, unit);
3554 		break;
3555 
3556 	default:
3557 		sbd_release_dev_done(sbp, nodetype, unit);
3558 		break;
3559 	}
3560 
3561 	/*
3562 	 * If the entire board was released and all components
3563 	 * unreferenced then transfer it to the UNREFERENCED state.
3564 	 */
3565 	if (SBD_DEVS_RELEASED(sbp) == SBD_DEVS_UNREFERENCED(sbp)) {
3566 		SBD_BOARD_TRANSITION(sbp, SBD_STATE_UNREFERENCED);
3567 		(void) drv_getparm(TIME, (void *)&sbp->sb_time);
3568 	}
3569 }
3570 
3571 static sbd_devlist_t *
3572 sbd_get_detach_devlist(sbd_handle_t *hp, int32_t *devnump, int32_t pass)
3573 {
3574 	sbd_board_t	*sbp;
3575 	uint_t		uset;
3576 	sbd_devset_t	devset;
3577 	sbd_devlist_t	*detach_devlist;
3578 	static int	next_pass = 1;
3579 	static fn_t	f = "sbd_get_detach_devlist";
3580 
3581 	PR_ALL("%s (pass = %d)...\n", f, pass);
3582 
3583 	sbp = SBDH2BD(hp->h_sbd);
3584 	devset = HD2MACHHD(hp)->sh_devset;
3585 
3586 	*devnump = 0;
3587 	detach_devlist = NULL;
3588 
3589 	/*
3590 	 * We switch on next_pass for the cases where a board
3591 	 * does not contain a particular type of component.
3592 	 * In these situations we don't want to return NULL
3593 	 * prematurely.  We need to check other devices and
3594 	 * we don't want to check the same type multiple times.
3595 	 * For example, if there were no cpus, then on pass 1
3596 	 * we would drop through and return the memory nodes.
3597 	 * However, on pass 2 we would switch back to the memory
3598 	 * nodes thereby returning them twice!  Using next_pass
3599 	 * forces us down to the end (or next item).
3600 	 */
3601 	if (pass == 1)
3602 		next_pass = 1;
3603 
3604 	switch (next_pass) {
3605 	case 1:
3606 		if (DEVSET_IN_SET(devset, SBD_COMP_MEM, DEVSET_ANYUNIT)) {
3607 			uset = DEVSET_GET_UNITSET(devset, SBD_COMP_MEM);
3608 
3609 			detach_devlist = sbd_get_devlist(hp, sbp,
3610 						SBD_COMP_MEM,
3611 						MAX_MEM_UNITS_PER_BOARD,
3612 						uset, devnump, 0);
3613 
3614 			DEVSET_DEL(devset, SBD_COMP_MEM, DEVSET_ANYUNIT);
3615 			if (!devset || detach_devlist) {
3616 				next_pass = 2;
3617 				return (detach_devlist);
3618 			}
3619 			/*
3620 			 * If the caller is interested in the entire
3621 			 * board, but there isn't any memory, then
3622 			 * just fall through to next component.
3623 			 */
3624 		}
3625 		/*FALLTHROUGH*/
3626 
3627 	case 2:
3628 		if (DEVSET_IN_SET(devset, SBD_COMP_CPU, DEVSET_ANYUNIT)) {
3629 			uset = DEVSET_GET_UNITSET(devset, SBD_COMP_CPU);
3630 
3631 			detach_devlist = sbd_get_devlist(hp, sbp,
3632 						SBD_COMP_CPU,
3633 						MAX_CPU_UNITS_PER_BOARD,
3634 						uset, devnump, 0);
3635 
3636 			DEVSET_DEL(devset, SBD_COMP_CPU, DEVSET_ANYUNIT);
3637 			if (!devset || detach_devlist) {
3638 				next_pass = 2;
3639 				return (detach_devlist);
3640 			}
3641 			/*
3642 			 * If the caller is interested in the entire
3643 			 * board, but there aren't any cpus, then just
3644 			 * fall through to check for the next component.
3645 			 */
3646 		}
3647 		/*FALLTHROUGH*/
3648 
3649 	case 3:
3650 		next_pass = -1;
3651 		if (DEVSET_IN_SET(devset, SBD_COMP_IO, DEVSET_ANYUNIT)) {
3652 			uset = DEVSET_GET_UNITSET(devset, SBD_COMP_IO);
3653 
3654 			detach_devlist = sbd_get_devlist(hp, sbp,
3655 						SBD_COMP_IO,
3656 						MAX_IO_UNITS_PER_BOARD,
3657 						uset, devnump, 0);
3658 
3659 			DEVSET_DEL(devset, SBD_COMP_IO, DEVSET_ANYUNIT);
3660 			if (!devset || detach_devlist) {
3661 				next_pass = 4;
3662 				return (detach_devlist);
3663 			}
3664 		}
3665 		/*FALLTHROUGH*/
3666 
3667 	default:
3668 		*devnump = 0;
3669 		return (NULL);
3670 	}
3671 	/*NOTREACHED*/
3672 }
3673 
3674 static int
3675 sbd_pre_detach_devlist(sbd_handle_t *hp, sbd_devlist_t *devlist,
3676 	int32_t devnum)
3677 {
3678 	int		rv = 0;
3679 	sbd_comp_type_t	nodetype;
3680 	static fn_t	f = "sbd_pre_detach_devlist";
3681 
3682 	nodetype = sbd_get_devtype(hp, devlist->dv_dip);
3683 
3684 	PR_ALL("%s (nt = %s(%d), num = %d)...\n",
3685 		f, sbd_ct_str[(int)nodetype], (int)nodetype, devnum);
3686 
3687 	switch (nodetype) {
3688 	case SBD_COMP_CPU:
3689 		rv = sbd_pre_detach_cpu(hp, devlist, devnum);
3690 		break;
3691 
3692 	case SBD_COMP_MEM:
3693 		rv = sbd_pre_detach_mem(hp, devlist, devnum);
3694 		break;
3695 
3696 	case SBD_COMP_IO:
3697 		rv = sbd_pre_detach_io(hp, devlist, devnum);
3698 		break;
3699 
3700 	default:
3701 		rv = -1;
3702 		break;
3703 	}
3704 
3705 	/*
3706 	 * We want to continue attempting to detach
3707 	 * other components.
3708 	 */
3709 	return (rv);
3710 }
3711 
3712 static int
3713 sbd_post_detach_devlist(sbd_handle_t *hp, sbd_devlist_t *devlist,
3714 			int32_t devnum)
3715 {
3716 	int		i, max_units = 0, rv = 0;
3717 	sbd_comp_type_t	nodetype;
3718 	sbd_board_t	*sbp;
3719 	sbd_istate_t	bstate;
3720 	static fn_t	f = "sbd_post_detach_devlist";
3721 	sbdp_handle_t	*hdp;
3722 
3723 	sbp = SBDH2BD(hp->h_sbd);
3724 	nodetype = sbd_get_devtype(hp, devlist->dv_dip);
3725 
3726 	hdp = sbd_get_sbdp_handle(sbp, hp);
3727 
3728 	PR_ALL("%s (nt = %s(%d), num = %d)...\n",
3729 		f, sbd_ct_str[(int)nodetype], (int)nodetype, devnum);
3730 
3731 	/*
3732 	 * Need to free up devlist[] created earlier in
3733 	 * sbd_get_detach_devlist().
3734 	 */
3735 	switch (nodetype) {
3736 	case SBD_COMP_CPU:
3737 		max_units = MAX_CPU_UNITS_PER_BOARD;
3738 		rv = sbd_post_detach_cpu(hp, devlist, devnum);
3739 		break;
3740 
3741 	case SBD_COMP_MEM:
3742 		max_units = MAX_MEM_UNITS_PER_BOARD;
3743 		rv = sbd_post_detach_mem(hp, devlist, devnum);
3744 		break;
3745 
3746 	case SBD_COMP_IO:
3747 		max_units = MAX_IO_UNITS_PER_BOARD;
3748 		rv = sbd_post_detach_io(hp, devlist, devnum);
3749 		break;
3750 
3751 	default:
3752 		rv = -1;
3753 		break;
3754 	}
3755 
3756 
3757 	for (i = 0; i < devnum; i++) {
3758 		int		unit;
3759 		sbderror_t	*ep;
3760 		dev_info_t	*dip;
3761 
3762 		ep = &devlist[i].dv_error;
3763 
3764 		if (sbd_set_err_in_hdl(hp, ep) == 0)
3765 			continue;
3766 
3767 		dip = devlist[i].dv_dip;
3768 		unit = sbdp_get_unit_num(hdp, dip);
3769 		if (unit == -1) {
3770 			if (hp->h_flags & SBD_IOCTL_FLAG_FORCE)
3771 				continue;
3772 			else {
3773 				SBD_GET_PERR(hdp->h_err, ep);
3774 				break;
3775 			}
3776 		}
3777 		nodetype = sbd_get_devtype(hp, dip);
3778 
3779 		if (sbd_check_unit_attached(sbp, dip, unit, nodetype,
3780 		    ep) >= 0) {
3781 			/*
3782 			 * Device is still attached probably due
3783 			 * to an error.  Need to keep track of it.
3784 			 */
3785 			PR_ALL("%s: ERROR (nt=%s, b=%d, u=%d) not detached\n",
3786 				f, sbd_ct_str[(int)nodetype], sbp->sb_num,
3787 				unit);
3788 			continue;
3789 		}
3790 
3791 		SBD_DEV_CLR_ATTACHED(sbp, nodetype, unit);
3792 		SBD_DEV_CLR_RELEASED(sbp, nodetype, unit);
3793 		SBD_DEV_CLR_UNREFERENCED(sbp, nodetype, unit);
3794 		SBD_DEVICE_TRANSITION(sbp, nodetype, unit,
3795 						SBD_STATE_UNCONFIGURED);
3796 	}
3797 	sbd_release_sbdp_handle(hdp);
3798 
3799 	bstate = SBD_BOARD_STATE(sbp);
3800 	if (bstate != SBD_STATE_UNCONFIGURED) {
3801 		if (SBD_DEVS_PRESENT(sbp) == SBD_DEVS_UNATTACHED(sbp)) {
3802 			/*
3803 			 * All devices are finally detached.
3804 			 */
3805 			SBD_BOARD_TRANSITION(sbp, SBD_STATE_UNCONFIGURED);
3806 		} else if ((SBD_BOARD_STATE(sbp) != SBD_STATE_PARTIAL) &&
3807 				SBD_DEVS_ATTACHED(sbp)) {
3808 			/*
3809 			 * Some devices remain attached.
3810 			 */
3811 			SBD_BOARD_TRANSITION(sbp, SBD_STATE_PARTIAL);
3812 		}
3813 	}
3814 
3815 	if (rv) {
3816 		PR_ALL("%s: errno %d, ecode %d during detach\n",
3817 			f, SBD_GET_ERRNO(SBD_HD2ERR(hp)),
3818 			SBD_GET_ERR(HD2MACHERR(hp)));
3819 	}
3820 
3821 	if (max_units && devlist) {
3822 		int	i;
3823 
3824 		for (i = 0; i < max_units; i++) {
3825 			if (SBD_GET_ERRSTR(&devlist[i].dv_error)) {
3826 				SBD_FREE_ERR(&devlist[i].dv_error);
3827 			} else {
3828 				break;
3829 			}
3830 		}
3831 		FREESTRUCT(devlist, sbd_devlist_t, max_units);
3832 	}
3833 
3834 	return (SBD_GET_ERRNO(SBD_HD2ERR(hp)) ? -1 : 0);
3835 }
3836 
3837 /*
3838  * Return the unit number of the respective dip if
3839  * it's found to be attached.
3840  */
3841 static int
3842 sbd_check_unit_attached(sbd_board_t *sbp, dev_info_t *dip, int unit,
3843 	sbd_comp_type_t nodetype, sbderror_t *ep)
3844 {
3845 	int		rv = -1;
3846 	processorid_t	cpuid;
3847 	uint64_t	basepa, endpa;
3848 	struct memlist	*ml;
3849 	extern struct memlist	*phys_install;
3850 	sbdp_handle_t	*hdp;
3851 	sbd_handle_t	*hp = MACHBD2HD(sbp);
3852 	static fn_t	f = "sbd_check_unit_attached";
3853 
3854 	hdp = sbd_get_sbdp_handle(sbp, hp);
3855 
3856 	switch (nodetype) {
3857 
3858 	case SBD_COMP_CPU:
3859 		cpuid = sbdp_get_cpuid(hdp, dip);
3860 		if (cpuid < 0) {
3861 			break;
3862 		}
3863 		mutex_enter(&cpu_lock);
3864 		if (cpu_get(cpuid) != NULL)
3865 			rv = unit;
3866 		mutex_exit(&cpu_lock);
3867 		break;
3868 
3869 	case SBD_COMP_MEM:
3870 		if (sbdphw_get_base_physaddr(hdp, dip, &basepa)) {
3871 			break;
3872 		}
3873 		if (sbdp_get_mem_alignment(hdp, dip, &endpa)) {
3874 			cmn_err(CE_WARN, "%s sbdp_get_mem_alignment fail", f);
3875 			break;
3876 		}
3877 
3878 		basepa &= ~(endpa - 1);
3879 		endpa += basepa;
3880 		/*
3881 		 * Check if base address is in phys_install.
3882 		 */
3883 		memlist_read_lock();
3884 		for (ml = phys_install; ml; ml = ml->next)
3885 			if ((endpa <= ml->address) ||
3886 					(basepa >= (ml->address + ml->size)))
3887 				continue;
3888 			else
3889 				break;
3890 		memlist_read_unlock();
3891 		if (ml != NULL)
3892 			rv = unit;
3893 		break;
3894 
3895 	case SBD_COMP_IO:
3896 	{
3897 		dev_info_t	*tdip, *pdip;
3898 
3899 		tdip = dip;
3900 
3901 		/*
3902 		 * ddi_walk_devs() requires that topdip's parent be held.
3903 		 */
3904 		pdip = ddi_get_parent(sbp->sb_topdip);
3905 		if (pdip) {
3906 			ndi_hold_devi(pdip);
3907 			ndi_devi_enter(pdip, &rv);
3908 		}
3909 		ddi_walk_devs(sbp->sb_topdip, sbd_check_io_attached,
3910 			(void *)&tdip);
3911 		if (pdip) {
3912 			ndi_devi_exit(pdip, rv);
3913 			ndi_rele_devi(pdip);
3914 		}
3915 
3916 		if (tdip == NULL)
3917 			rv = unit;
3918 		else
3919 			rv = -1;
3920 		break;
3921 	}
3922 
3923 	default:
3924 		PR_ALL("%s: unexpected nodetype(%d) for dip 0x%p\n",
3925 			f, nodetype, (void *)dip);
3926 		rv = -1;
3927 		break;
3928 	}
3929 
3930 	/*
3931 	 * Save the error that sbdp sent us and report it
3932 	 */
3933 	if (rv == -1)
3934 		SBD_GET_PERR(hdp->h_err, ep);
3935 
3936 	sbd_release_sbdp_handle(hdp);
3937 
3938 	return (rv);
3939 }
3940 
3941 /*
3942  * Return memhandle, if in fact, this memunit is the owner of
3943  * a scheduled memory delete.
3944  */
3945 int
3946 sbd_get_memhandle(sbd_handle_t *hp, dev_info_t *dip, memhandle_t *mhp)
3947 {
3948 	sbd_board_t	*sbp = SBDH2BD(hp->h_sbd);
3949 	sbd_mem_unit_t	*mp;
3950 	sbdp_handle_t	*hdp;
3951 	int		unit;
3952 	static fn_t	f = "sbd_get_memhandle";
3953 
3954 	PR_MEM("%s...\n", f);
3955 
3956 	hdp = sbd_get_sbdp_handle(sbp, hp);
3957 
3958 	unit = sbdp_get_unit_num(hdp, dip);
3959 	if (unit == -1) {
3960 		SBD_GET_PERR(hdp->h_err, SBD_HD2ERR(hp));
3961 		sbd_release_sbdp_handle(hdp);
3962 		return (-1);
3963 	}
3964 	sbd_release_sbdp_handle(hdp);
3965 
3966 	mp = SBD_GET_BOARD_MEMUNIT(sbp, unit);
3967 
3968 	if (mp->sbm_flags & SBD_MFLAG_RELOWNER) {
3969 		*mhp = mp->sbm_memhandle;
3970 		return (0);
3971 	} else {
3972 		SBD_SET_ERR(SBD_HD2ERR(hp), ESBD_INTERNAL);
3973 		SBD_SET_ERRSTR(SBD_HD2ERR(hp), sbp->sb_mempath[unit]);
3974 		return (-1);
3975 	}
3976 	/*NOTREACHED*/
3977 }
3978 
3979 
3980 static int
3981 sbd_cpu_cnt(sbd_handle_t *hp, sbd_devset_t devset)
3982 {
3983 	int		c, cix;
3984 	sbd_board_t	*sbp;
3985 
3986 	sbp = SBDH2BD(hp->h_sbd);
3987 
3988 	/*
3989 	 * Only look for requested devices that are actually present.
3990 	 */
3991 	devset &= SBD_DEVS_PRESENT(sbp);
3992 
3993 	for (c = cix = 0; c < MAX_CMP_UNITS_PER_BOARD; c++) {
3994 		/*
3995 		 * Index for core 1 , if exists.
3996 		 * With the current implementation it is
3997 		 * MAX_CMP_UNITS_PER_BOARD off from core 0.
3998 		 * The calculation will need to change if
3999 		 * the assumption is no longer true.
4000 		 */
4001 		int		c1 = c + MAX_CMP_UNITS_PER_BOARD;
4002 
4003 		if (DEVSET_IN_SET(devset, SBD_COMP_CMP, c) == 0) {
4004 			continue;
4005 		}
4006 
4007 		/*
4008 		 * Check to see if the dip(s) exist for this chip
4009 		 */
4010 		if ((sbp->sb_devlist[NIX(SBD_COMP_CMP)][c] == NULL) &&
4011 		    (sbp->sb_devlist[NIX(SBD_COMP_CMP)][c1] == NULL))
4012 			continue;
4013 
4014 		cix++;
4015 	}
4016 
4017 	return (cix);
4018 }
4019 
4020 static int
4021 sbd_mem_cnt(sbd_handle_t *hp, sbd_devset_t devset)
4022 {
4023 	int		i, ix;
4024 	sbd_board_t	*sbp = SBDH2BD(hp->h_sbd);
4025 
4026 	/*
4027 	 * Only look for requested devices that are actually present.
4028 	 */
4029 	devset &= SBD_DEVS_PRESENT(sbp);
4030 
4031 	for (i = ix = 0; i < MAX_MEM_UNITS_PER_BOARD; i++) {
4032 		dev_info_t	*dip;
4033 
4034 		if (DEVSET_IN_SET(devset, SBD_COMP_MEM, i) == 0) {
4035 			continue;
4036 		}
4037 
4038 		dip = sbp->sb_devlist[NIX(SBD_COMP_MEM)][i];
4039 		if (dip == NULL)
4040 			continue;
4041 
4042 		ix++;
4043 	}
4044 
4045 	return (ix);
4046 }
4047 
4048 /*
4049  * NOTE: This routine is only partially smart about multiple
4050  *	 mem-units.  Need to make mem-status structure smart
4051  *	 about them also.
4052  */
4053 static int
4054 sbd_mem_status(sbd_handle_t *hp, sbd_devset_t devset, sbd_dev_stat_t *dsp)
4055 {
4056 	int		m, mix, rv;
4057 	memdelstat_t	mdst;
4058 	memquery_t	mq;
4059 	sbd_board_t	*sbp;
4060 	sbd_mem_unit_t	*mp;
4061 	sbd_mem_stat_t	*msp;
4062 	extern int	kcage_on;
4063 	int		i;
4064 	static fn_t	f = "sbd_mem_status";
4065 
4066 	sbp = SBDH2BD(hp->h_sbd);
4067 
4068 	/*
4069 	 * Check the present devset and access the dip with
4070 	 * status lock held to protect agains a concurrent
4071 	 * unconfigure or disconnect thread.
4072 	 */
4073 	mutex_enter(&sbp->sb_slock);
4074 
4075 	/*
4076 	 * Only look for requested devices that are actually present.
4077 	 */
4078 	devset &= SBD_DEVS_PRESENT(sbp);
4079 
4080 	for (m = mix = 0; m < MAX_MEM_UNITS_PER_BOARD; m++) {
4081 		dev_info_t	*dip;
4082 
4083 
4084 		if (DEVSET_IN_SET(devset, SBD_COMP_MEM, m) == 0)
4085 			continue;
4086 
4087 		/*
4088 		 * Check to make sure the memory unit is in a state
4089 		 * where its fully initialized.
4090 		 */
4091 		if (SBD_DEVICE_STATE(sbp, SBD_COMP_MEM, m) == SBD_STATE_EMPTY)
4092 			continue;
4093 
4094 		dip = sbp->sb_devlist[NIX(SBD_COMP_MEM)][m];
4095 		if (dip == NULL)
4096 			continue;
4097 
4098 		mp = SBD_GET_BOARD_MEMUNIT(sbp, m);
4099 
4100 		msp = &dsp->d_mem;
4101 
4102 		bzero((caddr_t)msp, sizeof (*msp));
4103 		msp->ms_type = SBD_COMP_MEM;
4104 
4105 		/*
4106 		 * The plugin expects -1 for the mem unit
4107 		 */
4108 		msp->ms_cm.c_id.c_unit = -1;
4109 
4110 		/*
4111 		 * Get the memory name from what sbdp gave us
4112 		 */
4113 		for (i = 0; SBD_COMP(i) != SBD_COMP_UNKNOWN; i++) {
4114 			if (SBD_COMP(i) == SBD_COMP_MEM) {
4115 				(void) strcpy(msp->ms_name, SBD_DEVNAME(i));
4116 			}
4117 		}
4118 		msp->ms_cm.c_cond = mp->sbm_cm.sbdev_cond;
4119 		msp->ms_cm.c_busy = mp->sbm_cm.sbdev_busy;
4120 		msp->ms_cm.c_time = mp->sbm_cm.sbdev_time;
4121 
4122 		/* XXX revisit this after memory conversion */
4123 		msp->ms_ostate = ostate_cvt(SBD_DEVICE_STATE(
4124 			sbp, SBD_COMP_MEM, m));
4125 
4126 		msp->ms_basepfn = mp->sbm_basepfn;
4127 		msp->ms_pageslost = mp->sbm_pageslost;
4128 		msp->ms_cage_enabled = kcage_on;
4129 		msp->ms_interleave = mp->sbm_interleave;
4130 
4131 		if (mp->sbm_flags & SBD_MFLAG_RELOWNER)
4132 			rv = kphysm_del_status(mp->sbm_memhandle, &mdst);
4133 		else
4134 			rv = KPHYSM_EHANDLE;	/* force 'if' to fail */
4135 
4136 		if (rv == KPHYSM_OK) {
4137 			msp->ms_totpages += mdst.phys_pages;
4138 
4139 			/*
4140 			 * Any pages above managed is "free",
4141 			 * i.e. it's collected.
4142 			 */
4143 			msp->ms_detpages += (uint_t)(mdst.collected +
4144 							mdst.phys_pages -
4145 							mdst.managed);
4146 		} else {
4147 			msp->ms_totpages += (uint_t)mp->sbm_npages;
4148 
4149 			/*
4150 			 * If we're UNREFERENCED or UNCONFIGURED,
4151 			 * then the number of detached pages is
4152 			 * however many pages are on the board.
4153 			 * I.e. detached = not in use by OS.
4154 			 */
4155 			switch (msp->ms_cm.c_ostate) {
4156 			/*
4157 			 * changed to use cfgadm states
4158 			 *
4159 			 * was:
4160 			 *	case SFDR_STATE_UNREFERENCED:
4161 			 *	case SFDR_STATE_UNCONFIGURED:
4162 			 */
4163 			case SBD_STAT_UNCONFIGURED:
4164 				msp->ms_detpages = msp->ms_totpages;
4165 				break;
4166 
4167 			default:
4168 				break;
4169 			}
4170 		}
4171 
4172 		rv = kphysm_del_span_query(mp->sbm_basepfn,
4173 						mp->sbm_npages, &mq);
4174 		if (rv == KPHYSM_OK) {
4175 			msp->ms_managed_pages = mq.managed;
4176 			msp->ms_noreloc_pages = mq.nonrelocatable;
4177 			msp->ms_noreloc_first = mq.first_nonrelocatable;
4178 			msp->ms_noreloc_last = mq.last_nonrelocatable;
4179 			msp->ms_cm.c_sflags = 0;
4180 			if (mq.nonrelocatable) {
4181 				SBD_SET_SUSPEND(SBD_CMD_UNCONFIGURE,
4182 				    dsp->ds_suspend);
4183 			}
4184 		} else {
4185 			PR_MEM("%s: kphysm_del_span_query() = %d\n", f, rv);
4186 		}
4187 
4188 		mix++;
4189 		dsp++;
4190 	}
4191 
4192 	mutex_exit(&sbp->sb_slock);
4193 
4194 	return (mix);
4195 }
4196 
4197 static void
4198 sbd_cancel(sbd_handle_t *hp)
4199 {
4200 	int		i;
4201 	sbd_devset_t	devset;
4202 	sbd_board_t	*sbp = SBDH2BD(hp->h_sbd);
4203 	static fn_t	f = "sbd_cancel";
4204 	int		rv;
4205 
4206 	PR_ALL("%s...\n", f);
4207 
4208 	/*
4209 	 * Only devices which have been "released" are
4210 	 * subject to cancellation.
4211 	 */
4212 	devset = HD2MACHHD(hp)->sh_devset & SBD_DEVS_UNREFERENCED(sbp);
4213 
4214 	/*
4215 	 * Nothing to do for CPUs or IO other than change back
4216 	 * their state.
4217 	 */
4218 	for (i = 0; i < MAX_CPU_UNITS_PER_BOARD; i++) {
4219 		if (!DEVSET_IN_SET(devset, SBD_COMP_CPU, i))
4220 			continue;
4221 		if (sbd_cancel_cpu(hp, i) != SBD_CPUERR_FATAL) {
4222 			SBD_DEVICE_TRANSITION(sbp, SBD_COMP_CPU, i,
4223 						SBD_STATE_CONFIGURED);
4224 		} else {
4225 			SBD_DEVICE_TRANSITION(sbp, SBD_COMP_CPU, i,
4226 						SBD_STATE_FATAL);
4227 		}
4228 	}
4229 
4230 	for (i = 0; i < MAX_IO_UNITS_PER_BOARD; i++) {
4231 		if (!DEVSET_IN_SET(devset, SBD_COMP_IO, i))
4232 			continue;
4233 		SBD_DEVICE_TRANSITION(sbp, SBD_COMP_IO, i,
4234 					SBD_STATE_CONFIGURED);
4235 	}
4236 
4237 	for (i = 0; i < MAX_MEM_UNITS_PER_BOARD; i++) {
4238 		if (!DEVSET_IN_SET(devset, SBD_COMP_MEM, i))
4239 			continue;
4240 		if ((rv = sbd_cancel_mem(hp, i)) == 0) {
4241 			SBD_DEVICE_TRANSITION(sbp, SBD_COMP_MEM, i,
4242 						SBD_STATE_CONFIGURED);
4243 		} else if (rv == -1) {
4244 			SBD_DEVICE_TRANSITION(sbp, SBD_COMP_MEM, i,
4245 						SBD_STATE_FATAL);
4246 		}
4247 	}
4248 
4249 	PR_ALL("%s: unreleasing devset (0x%x)\n", f, (uint_t)devset);
4250 
4251 	SBD_DEVS_CANCEL(sbp, devset);
4252 
4253 	if (SBD_DEVS_UNREFERENCED(sbp) == 0) {
4254 		sbd_istate_t	new_state;
4255 		/*
4256 		 * If the board no longer has any released devices
4257 		 * than transfer it back to the CONFIG/PARTIAL state.
4258 		 */
4259 		if (SBD_DEVS_ATTACHED(sbp) == SBD_DEVS_PRESENT(sbp))
4260 			new_state = SBD_STATE_CONFIGURED;
4261 		else
4262 			new_state = SBD_STATE_PARTIAL;
4263 		if (SBD_BOARD_STATE(sbp) != new_state) {
4264 			SBD_BOARD_TRANSITION(sbp, new_state);
4265 		}
4266 		sbp->sb_ostate = SBD_STAT_CONFIGURED;
4267 		(void) drv_getparm(TIME, (void *)&sbp->sb_time);
4268 	}
4269 }
4270 
4271 static void
4272 sbd_get_ncm(sbd_handle_t *hp)
4273 {
4274 	sbd_devset_t devset;
4275 	sbd_priv_handle_t	*shp = HD2MACHHD(hp);
4276 	sbd_cmd_t		*cmdp =  (sbd_cmd_t *)hp->h_iap;
4277 	int			error;
4278 
4279 	/* pre_op restricted the devices to those selected by the ioctl */
4280 	devset = shp->sh_devset;
4281 
4282 	cmdp->cmd_getncm.g_ncm = sbd_cpu_cnt(hp, devset)
4283 		+ sbd_io_cnt(hp, devset) + sbd_mem_cnt(hp, devset);
4284 
4285 	error = sbd_copyout_ioarg(hp->h_mode, hp->h_cmd, cmdp,
4286 		(sbd_ioctl_arg_t *)shp->sh_arg);
4287 
4288 	if (error != 0)
4289 		SBD_SET_ERRNO(SBD_HD2ERR(hp), error);
4290 }
4291 
4292 static void
4293 sbd_status(sbd_handle_t *hp)
4294 {
4295 	int			nstat, mode, ncm, sz, cksz;
4296 	sbd_priv_handle_t	*shp = HD2MACHHD(hp);
4297 	sbd_devset_t		devset;
4298 	sbd_board_t		*sbp = SBDH2BD(hp->h_sbd);
4299 	sbd_stat_t		*dstatp;
4300 	sbd_cmd_t		*cmdp =  (sbd_cmd_t *)hp->h_iap;
4301 	sbdp_handle_t		*hdp;
4302 	sbd_dev_stat_t		*devstatp;
4303 
4304 #ifdef _MULTI_DATAMODEL
4305 	int			sz32;
4306 	sbd_stat32_t		*dstat32p;
4307 #endif /* _MULTI_DATAMODEL */
4308 
4309 	static fn_t	f = "sbd_status";
4310 
4311 	mode = hp->h_mode;
4312 	devset = shp->sh_devset;
4313 
4314 	devset &= SBD_DEVS_PRESENT(sbp);
4315 
4316 	if (cmdp->cmd_cm.c_id.c_type == SBD_COMP_NONE) {
4317 		if (cmdp->cmd_cm.c_flags & SBD_FLAG_ALLCMP) {
4318 			/*
4319 			 * Get the number of components "ncm" on the board.
4320 			 * Calculate size of buffer required to store one
4321 			 * sbd_stat_t structure plus ncm-1 sbd_dev_stat_t
4322 			 * structures. Note that sbd_stat_t already contains
4323 			 * one sbd_dev_stat_t, so only an additional ncm-1
4324 			 * sbd_dev_stat_t structures need to be accounted for
4325 			 * in the calculation when more than one component
4326 			 * is present.
4327 			 */
4328 			ncm = sbd_cpu_cnt(hp, devset) + sbd_io_cnt(hp, devset) +
4329 			    sbd_mem_cnt(hp, devset);
4330 
4331 		} else {
4332 			/*
4333 			 * In the case of c_type == SBD_COMP_NONE, and
4334 			 * SBD_FLAG_ALLCMP not specified, only the board
4335 			 * info is to be returned, no components.
4336 			 */
4337 			ncm = 0;
4338 			devset = 0;
4339 		}
4340 	} else {
4341 		/* Confirm that only one component is selected. */
4342 		ncm = sbd_cpu_cnt(hp, devset) + sbd_io_cnt(hp, devset) +
4343 		    sbd_mem_cnt(hp, devset);
4344 		if (ncm != 1) {
4345 			PR_ALL("%s: expected ncm of 1, got %d, devset 0x%x\n",
4346 			    f, ncm, devset);
4347 			SBD_SET_ERRNO(SBD_HD2ERR(hp), EINVAL);
4348 			return;
4349 		}
4350 	}
4351 
4352 	sz = sizeof (sbd_stat_t);
4353 	if (ncm > 1)
4354 		sz += sizeof (sbd_dev_stat_t) * (ncm - 1);
4355 
4356 	cksz = sz;
4357 
4358 	/*
4359 	 * s_nbytes describes the size of the preallocated user
4360 	 * buffer into which the application is executing to
4361 	 * receive the sbd_stat_t and sbd_dev_stat_t structures.
4362 	 * This buffer must be at least the required (sz) size.
4363 	 */
4364 
4365 #ifdef _MULTI_DATAMODEL
4366 
4367 	/*
4368 	 * More buffer space is required for the 64bit to 32bit
4369 	 * conversion of data structures.
4370 	 */
4371 	if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
4372 		sz32 = sizeof (sbd_stat32_t);
4373 		if (ncm > 1)
4374 			sz32  += sizeof (sbd_dev_stat32_t) * (ncm - 1);
4375 		cksz = sz32;
4376 	} else
4377 		sz32 = 0;
4378 #endif
4379 
4380 	if ((int)cmdp->cmd_stat.s_nbytes < cksz) {
4381 		PR_ALL("%s: ncm=%d s_nbytes = 0x%x\n", f, ncm,
4382 		    cmdp->cmd_stat.s_nbytes);
4383 		PR_ALL("%s: expected size of 0x%x\n", f, cksz);
4384 		SBD_SET_ERRNO(SBD_HD2ERR(hp), EINVAL);
4385 		return;
4386 	}
4387 
4388 	dstatp = kmem_zalloc(sz, KM_SLEEP);
4389 	devstatp = &dstatp->s_stat[0];
4390 
4391 #ifdef _MULTI_DATAMODEL
4392 	if (sz32 != 0)
4393 		dstat32p = kmem_zalloc(sz32, KM_SLEEP);
4394 #endif
4395 
4396 	/*
4397 	 * if connected or better, provide cached status if available,
4398 	 * otherwise call sbdp for status
4399 	 */
4400 	mutex_enter(&sbp->sb_flags_mutex);
4401 	switch (sbp->sb_state) {
4402 
4403 	case	SBD_STATE_CONNECTED:
4404 	case	SBD_STATE_PARTIAL:
4405 	case	SBD_STATE_CONFIGURED:
4406 		if (sbp->sb_flags & SBD_BOARD_STATUS_CACHED) {
4407 			bcopy(&sbp->sb_stat, dstatp, sizeof (sbd_stat_t));
4408 			dstatp->s_rstate = rstate_cvt(sbp->sb_state);
4409 			dstatp->s_ostate = ostate_cvt(sbp->sb_state);
4410 			dstatp->s_busy = sbp->sb_busy;
4411 			dstatp->s_time = sbp->sb_time;
4412 			dstatp->s_cond = sbp->sb_cond;
4413 			break;
4414 		}
4415 	/*FALLTHROUGH*/
4416 
4417 	default:
4418 		sbp->sb_flags &= ~SBD_BOARD_STATUS_CACHED;
4419 		dstatp->s_board = sbp->sb_num;
4420 		dstatp->s_ostate = ostate_cvt(sbp->sb_state);
4421 		dstatp->s_time = sbp->sb_time;
4422 
4423 		hdp = sbd_get_sbdp_handle(sbp, hp);
4424 
4425 		if (sbdp_get_board_status(hdp, dstatp) != 0) {
4426 			SBD_GET_PERR(hdp->h_err, SBD_HD2ERR(hp));
4427 			sbd_release_sbdp_handle(hdp);
4428 #ifdef _MULTI_DATAMODEL
4429 			if (sz32 != 0)
4430 				kmem_free(dstat32p, sz32);
4431 #endif
4432 			kmem_free(dstatp, sz);
4433 			mutex_exit(&sbp->sb_flags_mutex);
4434 			return;
4435 		}
4436 		/*
4437 		 * Do not cache status if the busy flag has
4438 		 * been set by the call to sbdp_get_board_status().
4439 		 */
4440 		if (!dstatp->s_busy) {
4441 			/* Can get board busy flag now */
4442 			dstatp->s_busy = sbp->sb_busy;
4443 			sbp->sb_cond = (sbd_cond_t)dstatp->s_cond;
4444 			bcopy(dstatp, &sbp->sb_stat,
4445 				sizeof (sbd_stat_t));
4446 			sbp->sb_flags |= SBD_BOARD_STATUS_CACHED;
4447 		}
4448 		sbd_release_sbdp_handle(hdp);
4449 		break;
4450 	}
4451 	mutex_exit(&sbp->sb_flags_mutex);
4452 
4453 	if (DEVSET_IN_SET(devset, SBD_COMP_CPU, DEVSET_ANYUNIT))
4454 		if ((nstat = sbd_cpu_flags(hp, devset, devstatp)) > 0) {
4455 			dstatp->s_nstat += nstat;
4456 			devstatp += nstat;
4457 		}
4458 
4459 	if (DEVSET_IN_SET(devset, SBD_COMP_MEM, DEVSET_ANYUNIT))
4460 		if ((nstat = sbd_mem_status(hp, devset, devstatp)) > 0) {
4461 			dstatp->s_nstat += nstat;
4462 			devstatp += nstat;
4463 		}
4464 
4465 	if (DEVSET_IN_SET(devset, SBD_COMP_IO, DEVSET_ANYUNIT))
4466 		if ((nstat = sbd_io_status(hp, devset, devstatp)) > 0) {
4467 			dstatp->s_nstat += nstat;
4468 			devstatp += nstat;
4469 		}
4470 
4471 	/* paranoia: detect buffer overrun */
4472 	if ((caddr_t)devstatp > ((caddr_t)dstatp) + sz) {
4473 		PR_ALL("%s: buffer overrun\n", f);
4474 #ifdef _MULTI_DATAMODEL
4475 		if (sz32 != 0)
4476 			kmem_free(dstat32p, sz32);
4477 #endif
4478 		kmem_free(dstatp, sz);
4479 		SBD_SET_ERRNO(SBD_HD2ERR(hp), EINVAL);
4480 		return;
4481 	}
4482 
4483 /* if necessary, move data into intermediate device status buffer */
4484 #ifdef _MULTI_DATAMODEL
4485 	if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
4486 		int		i, j;
4487 
4488 		ASSERT(sz32 != 0);
4489 		/* paranoia: detect buffer overrun */
4490 		if ((caddr_t)&dstat32p->s_stat[dstatp->s_nstat] >
4491 		    ((caddr_t)dstat32p) + sz32) {
4492 			cmn_err(CE_WARN,
4493 				"sbd:%s: buffer32 overrun", f);
4494 #ifdef _MULTI_DATAMODEL
4495 			if (sz32 != 0)
4496 				kmem_free(dstat32p, sz32);
4497 #endif
4498 			kmem_free(dstatp, sz);
4499 			SBD_SET_ERRNO(SBD_HD2ERR(hp), EINVAL);
4500 			return;
4501 		}
4502 
4503 		/*
4504 		 * initialize 32 bit sbd board status structure
4505 		 */
4506 		dstat32p->s_board = (int32_t)dstatp->s_board;
4507 		dstat32p->s_nstat = (int32_t)dstatp->s_nstat;
4508 		dstat32p->s_rstate = dstatp->s_rstate;
4509 		dstat32p->s_ostate = dstatp->s_ostate;
4510 		dstat32p->s_cond = dstatp->s_cond;
4511 		dstat32p->s_busy = dstatp->s_busy;
4512 		dstat32p->s_time = dstatp->s_time;
4513 		dstat32p->s_assigned = dstatp->s_assigned;
4514 		dstat32p->s_power = dstatp->s_power;
4515 		dstat32p->s_platopts = (int32_t)dstatp->s_platopts;
4516 		(void) strcpy(dstat32p->s_type, dstatp->s_type);
4517 
4518 		for (i = 0; i < dstatp->s_nstat; i++) {
4519 			sbd_dev_stat_t	*dsp = &dstatp->s_stat[i];
4520 			sbd_dev_stat32_t	*ds32p = &dstat32p->s_stat[i];
4521 
4522 			/*
4523 			 * copy common data for the device
4524 			 */
4525 			ds32p->d_cm.ci_type = (int32_t)dsp->d_cm.ci_type;
4526 			ds32p->d_cm.ci_unit = (int32_t)dsp->d_cm.ci_unit;
4527 			ds32p->d_cm.c_ostate = (int32_t)dsp->d_cm.c_ostate;
4528 			ds32p->d_cm.c_cond = (int32_t)dsp->d_cm.c_cond;
4529 			ds32p->d_cm.c_busy = (int32_t)dsp->d_cm.c_busy;
4530 			ds32p->d_cm.c_time = (time32_t)dsp->d_cm.c_time;
4531 			ds32p->d_cm.c_sflags = (int32_t)dsp->d_cm.c_sflags;
4532 			(void) strcpy(ds32p->d_cm.ci_name, dsp->d_cm.ci_name);
4533 
4534 			/* copy type specific data for the device */
4535 			switch (dsp->d_cm.ci_type) {
4536 
4537 			case SBD_COMP_CPU:
4538 				ds32p->d_cpu.cs_isbootproc =
4539 					(int32_t)dsp->d_cpu.cs_isbootproc;
4540 				ds32p->d_cpu.cs_cpuid =
4541 					(int32_t)dsp->d_cpu.cs_cpuid;
4542 				ds32p->d_cpu.cs_speed =
4543 					(int32_t)dsp->d_cpu.cs_speed;
4544 				ds32p->d_cpu.cs_ecache =
4545 					(int32_t)dsp->d_cpu.cs_ecache;
4546 				break;
4547 
4548 			case SBD_COMP_MEM:
4549 				ds32p->d_mem.ms_type =
4550 					(int32_t)dsp->d_mem.ms_type;
4551 				ds32p->d_mem.ms_ostate =
4552 					(int32_t)dsp->d_mem.ms_ostate;
4553 				ds32p->d_mem.ms_cond =
4554 					(int32_t)dsp->d_mem.ms_cond;
4555 				ds32p->d_mem.ms_interleave =
4556 					(uint32_t)dsp->d_mem.ms_interleave;
4557 				ds32p->d_mem.ms_basepfn =
4558 					(uint32_t)dsp->d_mem.ms_basepfn;
4559 				ds32p->d_mem.ms_totpages =
4560 					(uint32_t)dsp->d_mem.ms_totpages;
4561 				ds32p->d_mem.ms_detpages =
4562 					(uint32_t)dsp->d_mem.ms_detpages;
4563 				ds32p->d_mem.ms_pageslost =
4564 					(int32_t)dsp->d_mem.ms_pageslost;
4565 				ds32p->d_mem.ms_managed_pages =
4566 					(int32_t)dsp->d_mem.ms_managed_pages;
4567 				ds32p->d_mem.ms_noreloc_pages =
4568 					(int32_t)dsp->d_mem.ms_noreloc_pages;
4569 				ds32p->d_mem.ms_noreloc_first =
4570 					(int32_t)dsp->d_mem.ms_noreloc_first;
4571 				ds32p->d_mem.ms_noreloc_last =
4572 					(int32_t)dsp->d_mem.ms_noreloc_last;
4573 				ds32p->d_mem.ms_cage_enabled =
4574 					(int32_t)dsp->d_mem.ms_cage_enabled;
4575 				ds32p->d_mem.ms_peer_is_target =
4576 					(int32_t)dsp->d_mem.ms_peer_is_target;
4577 				(void) strcpy(ds32p->d_mem.ms_peer_ap_id,
4578 					dsp->d_mem.ms_peer_ap_id);
4579 				break;
4580 
4581 
4582 			case SBD_COMP_IO:
4583 
4584 				ds32p->d_io.is_type =
4585 					(int32_t)dsp->d_io.is_type;
4586 				ds32p->d_io.is_unsafe_count =
4587 					(int32_t)dsp->d_io.is_unsafe_count;
4588 				ds32p->d_io.is_referenced =
4589 					(int32_t)dsp->d_io.is_referenced;
4590 				for (j = 0; j < SBD_MAX_UNSAFE; j++)
4591 					ds32p->d_io.is_unsafe_list[j] =
4592 					    (int32_t)
4593 					    ds32p->d_io.is_unsafe_list[j];
4594 				bcopy(dsp->d_io.is_pathname,
4595 				    ds32p->d_io.is_pathname, MAXPATHLEN);
4596 				break;
4597 
4598 			case SBD_COMP_CMP:
4599 				/* copy sbd_cmp_stat_t structure members */
4600 				bcopy(&dsp->d_cmp.ps_cpuid[0],
4601 					&ds32p->d_cmp.ps_cpuid[0],
4602 					sizeof (ds32p->d_cmp.ps_cpuid));
4603 				ds32p->d_cmp.ps_ncores =
4604 					(int32_t)dsp->d_cmp.ps_ncores;
4605 				ds32p->d_cmp.ps_speed =
4606 					(int32_t)dsp->d_cmp.ps_speed;
4607 				ds32p->d_cmp.ps_ecache =
4608 					(int32_t)dsp->d_cmp.ps_ecache;
4609 				break;
4610 
4611 			default:
4612 				cmn_err(CE_WARN,
4613 				    "sbd:%s: unknown dev type (%d)", f,
4614 				    (int)dsp->d_cm.c_id.c_type);
4615 				break;
4616 			}
4617 		}
4618 
4619 		if (ddi_copyout((void *)dstat32p,
4620 		    cmdp->cmd_stat.s_statp, sz32, mode) != 0) {
4621 			cmn_err(CE_WARN,
4622 				"sbd:%s: failed to copyout status "
4623 				"for board %d", f, sbp->sb_num);
4624 			SBD_SET_ERRNO(SBD_HD2ERR(hp), EFAULT);
4625 		}
4626 	} else
4627 #endif /* _MULTI_DATAMODEL */
4628 	if (ddi_copyout((void *)dstatp, cmdp->cmd_stat.s_statp,
4629 	    sz, mode) != 0) {
4630 		cmn_err(CE_WARN,
4631 			"sbd:%s: failed to copyout status for board %d",
4632 			f, sbp->sb_num);
4633 		SBD_SET_ERRNO(SBD_HD2ERR(hp), EFAULT);
4634 	}
4635 
4636 #ifdef _MULTI_DATAMODEL
4637 	if (sz32 != 0)
4638 		kmem_free(dstat32p, sz32);
4639 #endif
4640 	kmem_free(dstatp, sz);
4641 }
4642 
4643 /*
4644  * Called at driver load time to determine the state and condition
4645  * of an existing board in the system.
4646  */
4647 static void
4648 sbd_board_discovery(sbd_board_t *sbp)
4649 {
4650 	int		i;
4651 	dev_info_t	*dip;
4652 	sbd_devset_t	devs_lost, devs_attached = 0;
4653 	extern kmutex_t	cpu_lock;
4654 	sbdp_handle_t	*hdp;
4655 	static fn_t	f = "sbd_board_discovery";
4656 	sbderror_t	error, *ep;
4657 	sbd_handle_t	*hp = MACHBD2HD(sbp);
4658 
4659 	if (SBD_DEVS_PRESENT(sbp) == 0) {
4660 		PR_ALL("%s: board %d has no devices present\n",
4661 			f, sbp->sb_num);
4662 		return;
4663 	}
4664 
4665 	ep = &error;
4666 	bzero(ep, sizeof (sbderror_t));
4667 
4668 	/*
4669 	 * Check for existence of cpus.
4670 	 */
4671 
4672 	hdp = sbd_get_sbdp_handle(sbp, hp);
4673 
4674 	for (i = 0; i < MAX_CPU_UNITS_PER_BOARD; i++) {
4675 		processorid_t	cpuid;
4676 
4677 		if (!SBD_DEV_IS_PRESENT(sbp, SBD_COMP_CPU, i))
4678 			continue;
4679 
4680 		dip = sbp->sb_devlist[NIX(SBD_COMP_CPU)][i];
4681 
4682 		if (dip != NULL) {
4683 			cpuid = sbdp_get_cpuid(hdp, dip);
4684 
4685 			if (cpuid < 0) {
4686 				SBD_GET_PERR(hdp->h_err,
4687 				    ep);
4688 				continue;
4689 			}
4690 
4691 			mutex_enter(&cpu_lock);	/* needed to call cpu_get() */
4692 			if (cpu_get(cpuid)) {
4693 				SBD_DEV_SET_ATTACHED(sbp, SBD_COMP_CPU, i);
4694 				DEVSET_ADD(devs_attached, SBD_COMP_CPU, i);
4695 				PR_ALL("%s: board %d, cpuid %d - attached\n",
4696 					f, sbp->sb_num, cpuid);
4697 			}
4698 			mutex_exit(&cpu_lock);
4699 			sbd_init_cpu_unit(sbp, i);
4700 		}
4701 	}
4702 
4703 	/*
4704 	 * Check for existence of memory.
4705 	 */
4706 	for (i = 0; i < MAX_MEM_UNITS_PER_BOARD; i++) {
4707 		uint64_t	basepa, endpa;
4708 		struct memlist	*ml;
4709 		extern struct memlist	*phys_install;
4710 
4711 		if (!SBD_DEV_IS_PRESENT(sbp, SBD_COMP_MEM, i))
4712 			continue;
4713 
4714 		dip = sbp->sb_devlist[NIX(SBD_COMP_MEM)][i];
4715 		if (dip == NULL)
4716 			continue;
4717 
4718 		if (sbdphw_get_base_physaddr(hdp, dip, &basepa)) {
4719 			/* omit phantom memory controllers on I/O boards */
4720 			if (SBD_DEV_IS_PRESENT(sbp, SBD_COMP_MEM, i)) {
4721 				ASSERT(sbp->sb_ndev != 0);
4722 				SBD_DEV_CLR_PRESENT(sbp, SBD_COMP_MEM, i);
4723 				sbp->sb_ndev--;
4724 			}
4725 			sbp->sb_devlist[NIX(SBD_COMP_MEM)][i] = NULL;
4726 			continue;
4727 		}
4728 
4729 		/*
4730 		 * basepa may not be on a alignment boundary, make it so.
4731 		 */
4732 		if (sbdp_get_mem_alignment(hdp, dip, &endpa)) {
4733 			cmn_err(CE_WARN, "%s sbdp_get_mem_alignment fail", f);
4734 			continue;
4735 		}
4736 
4737 		basepa &= ~(endpa - 1);
4738 		endpa += basepa;
4739 
4740 		/*
4741 		 * Check if base address is in phys_install.
4742 		 */
4743 		memlist_read_lock();
4744 		for (ml = phys_install; ml; ml = ml->next)
4745 			if ((endpa <= ml->address) ||
4746 					(basepa >= (ml->address + ml->size)))
4747 				continue;
4748 			else
4749 				break;
4750 		memlist_read_unlock();
4751 
4752 		if (ml) {
4753 			SBD_DEV_SET_ATTACHED(sbp, SBD_COMP_MEM, i);
4754 			DEVSET_ADD(devs_attached, SBD_COMP_MEM, i);
4755 			PR_ALL("%s: board %d, mem-unit %d - attached\n",
4756 				f, sbp->sb_num, i);
4757 		}
4758 		sbd_init_mem_unit(sbp, i, ep);
4759 	}
4760 	sbd_release_sbdp_handle(hdp);
4761 
4762 	/*
4763 	 * If so far we have found an error, we just log it but continue
4764 	 */
4765 	if (SBD_GET_ERRNO(ep) != 0)
4766 		cmn_err(CE_WARN, "%s errno has occurred: errno %d", f,
4767 			SBD_GET_ERRNO(ep));
4768 
4769 	/*
4770 	 * Check for i/o state.
4771 	 */
4772 	for (i = 0; i < MAX_IO_UNITS_PER_BOARD; i++) {
4773 
4774 		if (!SBD_DEV_IS_PRESENT(sbp, SBD_COMP_IO, i))
4775 			continue;
4776 
4777 		dip = sbp->sb_devlist[NIX(SBD_COMP_IO)][i];
4778 		if (dip == NULL)
4779 			continue;
4780 
4781 		ASSERT(e_ddi_branch_held(dip));
4782 
4783 		/*
4784 		 * XXX Is the devstate check needed ?
4785 		 */
4786 		if (i_ddi_devi_attached(dip) ||
4787 		    ddi_get_devstate(dip) == DDI_DEVSTATE_UP) {
4788 
4789 			/*
4790 			 * Found it!
4791 			 */
4792 			SBD_DEV_SET_ATTACHED(sbp, SBD_COMP_IO, i);
4793 			DEVSET_ADD(devs_attached, SBD_COMP_IO, i);
4794 			PR_ALL("%s: board %d, io-unit %d - attached\n",
4795 				f, sbp->sb_num, i);
4796 		}
4797 		sbd_init_io_unit(sbp, i);
4798 	}
4799 
4800 	SBD_DEVS_CONFIGURE(sbp, devs_attached);
4801 	if (devs_attached && ((devs_lost = SBD_DEVS_UNATTACHED(sbp)) != 0)) {
4802 		int		ut;
4803 		/*
4804 		 * A prior comment stated that a partially configured
4805 		 * board was not permitted. The Serengeti architecture
4806 		 * makes this possible, so the SB_DEVS_DISCONNECT
4807 		 * at the end of this block has been removed.
4808 		 */
4809 
4810 		PR_ALL("%s: some devices not configured (0x%x)...\n",
4811 			f, devs_lost);
4812 
4813 		for (ut = 0; ut < MAX_CPU_UNITS_PER_BOARD; ut++)
4814 			if (DEVSET_IN_SET(devs_lost, SBD_COMP_CPU, ut)) {
4815 				SBD_DEVICE_TRANSITION(sbp, SBD_COMP_CPU,
4816 					ut, SBD_STATE_UNCONFIGURED);
4817 			}
4818 
4819 		for (ut = 0; ut < MAX_MEM_UNITS_PER_BOARD; ut++)
4820 			if (DEVSET_IN_SET(devs_lost, SBD_COMP_MEM, ut)) {
4821 				SBD_DEVICE_TRANSITION(sbp, SBD_COMP_MEM,
4822 					ut, SBD_STATE_UNCONFIGURED);
4823 			}
4824 
4825 		for (ut = 0; ut < MAX_IO_UNITS_PER_BOARD; ut++)
4826 			if (DEVSET_IN_SET(devs_lost, SBD_COMP_IO, ut)) {
4827 				SBD_DEVICE_TRANSITION(sbp, SBD_COMP_IO,
4828 					ut, SBD_STATE_UNCONFIGURED);
4829 			}
4830 	}
4831 }
4832 
4833 static int
4834 hold_rele_branch(dev_info_t *rdip, void *arg)
4835 {
4836 	walk_tree_t	*wp = (walk_tree_t *)arg;
4837 
4838 	ASSERT(wp && (wp->hold == 0 || wp->hold == 1));
4839 
4840 	switch (get_node_type(wp->sbp, rdip, NULL)) {
4841 		case SBD_COMP_CMP:
4842 		case SBD_COMP_MEM:
4843 		case SBD_COMP_IO:
4844 			break;
4845 		case SBD_COMP_CPU:
4846 
4847 			/*
4848 			 * All CPU nodes under CMP nodes should have
4849 			 * gotten pruned when the CMP node was first
4850 			 * encountered.
4851 			 */
4852 			ASSERT(!sbd_is_cmp_child(rdip));
4853 
4854 			break;
4855 
4856 		case SBD_COMP_UNKNOWN:
4857 			/* Not of interest to us */
4858 			return (DDI_WALK_CONTINUE);
4859 		default:
4860 			ASSERT(0);
4861 			return (DDI_WALK_PRUNECHILD);
4862 	}
4863 
4864 	if (wp->hold) {
4865 		ASSERT(!e_ddi_branch_held(rdip));
4866 		e_ddi_branch_hold(rdip);
4867 	} else {
4868 		ASSERT(e_ddi_branch_held(rdip));
4869 		e_ddi_branch_rele(rdip);
4870 	}
4871 
4872 	return (DDI_WALK_PRUNECHILD);
4873 }
4874 
4875 static void
4876 sbd_board_init(sbd_board_t *sbp, sbd_softstate_t *softsp,
4877 	int bd, dev_info_t *top_dip, int wnode)
4878 {
4879 	int		i;
4880 	dev_info_t	*pdip;
4881 	int		circ;
4882 	walk_tree_t	walk = {0};
4883 
4884 	mutex_init(&sbp->sb_mutex, NULL, MUTEX_DRIVER, NULL);
4885 	mutex_init(&sbp->sb_flags_mutex, NULL, MUTEX_DRIVER, NULL);
4886 	mutex_init(&sbp->sb_slock, NULL, MUTEX_DRIVER, NULL);
4887 
4888 	sbp->sb_ref = 0;
4889 	sbp->sb_num = bd;
4890 	sbp->sb_time = gethrestime_sec();
4891 	/*
4892 	 * For serengeti, top_dip doesn't need to be held because
4893 	 * sbp i.e. sbd_board_t will be destroyed in sbd_teardown_instance()
4894 	 * before top_dip detaches. For Daktari, top_dip is the
4895 	 * root node which never has to be held.
4896 	 */
4897 	sbp->sb_topdip = top_dip;
4898 	sbp->sb_cpuid = -1;
4899 	sbp->sb_softsp = (void *) softsp;
4900 	sbp->sb_cond = SBD_COND_UNKNOWN;
4901 	sbp->sb_wnode = wnode;
4902 	sbp->sb_memaccess_ok = 1;
4903 
4904 	ASSERT(MAX_IO_UNITS_PER_BOARD <= SBD_MAX_UNITS_PER_BOARD);
4905 	ASSERT(MAX_CPU_UNITS_PER_BOARD <= SBD_MAX_UNITS_PER_BOARD);
4906 	ASSERT(MAX_MEM_UNITS_PER_BOARD <= SBD_MAX_UNITS_PER_BOARD);
4907 
4908 	/*
4909 	 * Allocate the devlist for cpus.
4910 	 */
4911 	sbp->sb_devlist[NIX(SBD_COMP_CPU)] = GETSTRUCT(dev_info_t *,
4912 						MAX_CPU_UNITS_PER_BOARD);
4913 
4914 	/*
4915 	 * Allocate the devlist for mem.
4916 	 */
4917 	sbp->sb_devlist[NIX(SBD_COMP_MEM)] = GETSTRUCT(dev_info_t *,
4918 						MAX_MEM_UNITS_PER_BOARD);
4919 
4920 	/*
4921 	 * Allocate the devlist for io.
4922 	 */
4923 	sbp->sb_devlist[NIX(SBD_COMP_IO)] = GETSTRUCT(dev_info_t *,
4924 						MAX_IO_UNITS_PER_BOARD);
4925 
4926 
4927 	sbp->sb_dev[NIX(SBD_COMP_CPU)] = GETSTRUCT(sbd_dev_unit_t,
4928 						MAX_CPU_UNITS_PER_BOARD);
4929 
4930 	sbp->sb_dev[NIX(SBD_COMP_MEM)] = GETSTRUCT(sbd_dev_unit_t,
4931 						MAX_MEM_UNITS_PER_BOARD);
4932 
4933 	sbp->sb_dev[NIX(SBD_COMP_IO)] = GETSTRUCT(sbd_dev_unit_t,
4934 						MAX_IO_UNITS_PER_BOARD);
4935 
4936 	for (i = 0; i < MAX_CPU_UNITS_PER_BOARD; i++) {
4937 		sbp->sb_cpupath[i] = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
4938 	}
4939 
4940 	for (i = 0; i < MAX_MEM_UNITS_PER_BOARD; i++) {
4941 		sbp->sb_mempath[i] = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
4942 	}
4943 
4944 	for (i = 0; i < MAX_IO_UNITS_PER_BOARD; i++) {
4945 		sbp->sb_iopath[i] = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
4946 	}
4947 
4948 	/*
4949 	 * Walk the device tree, find all top dips on this board and
4950 	 * hold the branches rooted at them
4951 	 */
4952 	ASSERT(sbp->sb_topdip);
4953 	pdip = ddi_get_parent(sbp->sb_topdip);
4954 	if (pdip)
4955 		ndi_devi_enter(pdip, &circ);
4956 	walk.sbp = sbp;
4957 	walk.hold = 1;
4958 	ddi_walk_devs(sbp->sb_topdip, hold_rele_branch, (void *)&walk);
4959 	if (pdip)
4960 		ndi_devi_exit(pdip, circ);
4961 
4962 	/*
4963 	 * Initialize the devlists
4964 	 */
4965 	if (sbd_init_devlists(sbp) == 0) {
4966 		SBD_BOARD_TRANSITION(sbp, SBD_STATE_EMPTY);
4967 	} else {
4968 		/*
4969 		 * Couldn't have made it down here without
4970 		 * having found at least one device.
4971 		 */
4972 		ASSERT(SBD_DEVS_PRESENT(sbp) != 0);
4973 		/*
4974 		 * Check the state of any possible devices on the
4975 		 * board.
4976 		 */
4977 		sbd_board_discovery(sbp);
4978 
4979 		if (SBD_DEVS_UNATTACHED(sbp) == 0) {
4980 			/*
4981 			 * The board has no unattached devices, therefore
4982 			 * by reason of insanity it must be configured!
4983 			 */
4984 			SBD_BOARD_TRANSITION(sbp, SBD_STATE_CONFIGURED);
4985 			sbp->sb_cond = SBD_COND_OK;
4986 		} else if (SBD_DEVS_ATTACHED(sbp)) {
4987 			SBD_BOARD_TRANSITION(sbp, SBD_STATE_PARTIAL);
4988 		} else {
4989 			SBD_BOARD_TRANSITION(sbp, SBD_STATE_CONNECTED);
4990 		}
4991 	}
4992 }
4993 
4994 static void
4995 sbd_board_destroy(sbd_board_t *sbp)
4996 {
4997 	int		i;
4998 	dev_info_t	*pdip;
4999 	int		circ;
5000 	walk_tree_t	walk = {0};
5001 
5002 	SBD_BOARD_TRANSITION(sbp, SBD_STATE_EMPTY);
5003 
5004 #ifdef DEBUG
5005 	for (i = 0; i < MAX_MEM_UNITS_PER_BOARD; i++) {
5006 		sbd_mem_unit_t *mp;
5007 
5008 		mp = SBD_GET_BOARD_MEMUNIT(sbp, i);
5009 		ASSERT(mp->sbm_mlist == NULL);
5010 	}
5011 #endif /* DEBUG */
5012 
5013 	/*
5014 	 * Free up MEM unit structs.
5015 	 */
5016 	FREESTRUCT(sbp->sb_dev[NIX(SBD_COMP_MEM)],
5017 			sbd_dev_unit_t, MAX_MEM_UNITS_PER_BOARD);
5018 	sbp->sb_dev[NIX(SBD_COMP_MEM)] = NULL;
5019 
5020 	/*
5021 	 * Free up CPU unit structs.
5022 	 */
5023 	FREESTRUCT(sbp->sb_dev[NIX(SBD_COMP_CPU)],
5024 			sbd_dev_unit_t, MAX_CPU_UNITS_PER_BOARD);
5025 	sbp->sb_dev[NIX(SBD_COMP_CPU)] = NULL;
5026 
5027 	/*
5028 	 * Free up IO unit structs.
5029 	 */
5030 	FREESTRUCT(sbp->sb_dev[NIX(SBD_COMP_IO)],
5031 			sbd_dev_unit_t, MAX_IO_UNITS_PER_BOARD);
5032 	sbp->sb_dev[NIX(SBD_COMP_IO)] = NULL;
5033 
5034 	/*
5035 	 * free up CPU devlists.
5036 	 */
5037 
5038 	for (i = 0; i < MAX_CPU_UNITS_PER_BOARD; i++) {
5039 		kmem_free((caddr_t)sbp->sb_cpupath[i], MAXPATHLEN);
5040 	}
5041 	FREESTRUCT(sbp->sb_devlist[NIX(SBD_COMP_CPU)], dev_info_t *,
5042 		MAX_CPU_UNITS_PER_BOARD);
5043 	sbp->sb_devlist[NIX(SBD_COMP_CPU)] = NULL;
5044 
5045 	/*
5046 	 * free up MEM devlists.
5047 	 */
5048 	for (i = 0; i < MAX_MEM_UNITS_PER_BOARD; i++) {
5049 		kmem_free((caddr_t)sbp->sb_mempath[i], MAXPATHLEN);
5050 	}
5051 	FREESTRUCT(sbp->sb_devlist[NIX(SBD_COMP_MEM)], dev_info_t *,
5052 		MAX_MEM_UNITS_PER_BOARD);
5053 	sbp->sb_devlist[NIX(SBD_COMP_MEM)] = NULL;
5054 
5055 	/*
5056 	 * free up IO devlists.
5057 	 */
5058 	for (i = 0; i <  MAX_IO_UNITS_PER_BOARD; i++) {
5059 		kmem_free((caddr_t)sbp->sb_iopath[i], MAXPATHLEN);
5060 	}
5061 	FREESTRUCT(sbp->sb_devlist[NIX(SBD_COMP_IO)], dev_info_t *,
5062 		MAX_IO_UNITS_PER_BOARD);
5063 	sbp->sb_devlist[NIX(SBD_COMP_IO)] = NULL;
5064 
5065 	/*
5066 	 * Release all branches held earlier
5067 	 */
5068 	ASSERT(sbp->sb_topdip);
5069 	pdip = ddi_get_parent(sbp->sb_topdip);
5070 	if (pdip)
5071 		ndi_devi_enter(pdip, &circ);
5072 	walk.sbp = sbp;
5073 	walk.hold = 0;
5074 	ddi_walk_devs(sbp->sb_topdip, hold_rele_branch, (void *)&walk);
5075 	if (pdip)
5076 		ndi_devi_exit(pdip, circ);
5077 
5078 	mutex_destroy(&sbp->sb_slock);
5079 	mutex_destroy(&sbp->sb_flags_mutex);
5080 	mutex_destroy(&sbp->sb_mutex);
5081 }
5082 
5083 sbd_comp_type_t
5084 sbd_cm_type(char *name)
5085 {
5086 	sbd_comp_type_t type = SBD_COMP_UNKNOWN;
5087 	int i;
5088 
5089 	/* look up type in table */
5090 	for (i = 0; SBD_COMP(i) != SBD_COMP_UNKNOWN; i++) {
5091 		if (strcmp(name, SBD_OTYPE(i)) == 0) {
5092 			type = SBD_COMP(i);
5093 			break;
5094 		}
5095 	}
5096 
5097 	return (type);
5098 }
5099 
5100 /*
5101  * There are certain cases where obp marks components as failed
5102  * If the status is ok the node won't have any status property. It
5103  * is only there if the status is other than ok.
5104  *
5105  * The translation is as follows:
5106  * If there is no status prop, the the cond is SBD_COND_OK
5107  * If we find a status prop but can't get to it then cond is SBD_COND_UNKNOWN
5108  * if we find a stat and it is failed the cond is SBD_COND_FAILED
5109  * If the stat is disabled, the cond is SBD_COND_UNUSABLE
5110  * Otherwise we return con as SBD_COND_OK
5111  */
5112 sbd_cond_t
5113 sbd_get_comp_cond(dev_info_t *dip)
5114 {
5115 	int			len;
5116 	char			*status_buf;
5117 	static const char	*status = "status";
5118 	static const char	*failed = "fail";
5119 	static const char	*disabled = "disabled";
5120 
5121 	if (dip == NULL) {
5122 		PR_BYP("dip is NULL\n");
5123 		return (SBD_COND_UNKNOWN);
5124 	}
5125 
5126 	/*
5127 	 * If retired, return FAILED
5128 	 */
5129 	if (DEVI(dip)->devi_flags & DEVI_RETIRED) {
5130 		PR_CPU("dip is retired\n");
5131 		return (SBD_COND_FAILED);
5132 	}
5133 
5134 	if (ddi_getproplen(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
5135 	    (char *)status, &len) != DDI_PROP_SUCCESS) {
5136 		PR_CPU("status in sbd is ok\n");
5137 		return (SBD_COND_OK);
5138 	}
5139 
5140 	status_buf = kmem_zalloc(sizeof (char) * OBP_MAXPROPNAME, KM_SLEEP);
5141 	if (ddi_getlongprop_buf(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
5142 	    (char *)status, status_buf, &len) != DDI_PROP_SUCCESS) {
5143 		PR_CPU("status in sbd is unknown\n");
5144 		return (SBD_COND_UNKNOWN);
5145 	}
5146 
5147 	if (strncmp(status_buf, failed, strlen(failed)) == 0) {
5148 		PR_CPU("status in sbd is failed\n");
5149 		kmem_free(status_buf, sizeof (char) * OBP_MAXPROPNAME);
5150 		return (SBD_COND_FAILED);
5151 	}
5152 
5153 	if (strcmp(status_buf, disabled) == 0) {
5154 		PR_CPU("status in sbd is unusable\n");
5155 		kmem_free(status_buf, sizeof (char) * OBP_MAXPROPNAME);
5156 		return (SBD_COND_UNUSABLE);
5157 	}
5158 
5159 	kmem_free(status_buf, sizeof (char) * OBP_MAXPROPNAME);
5160 	return (SBD_COND_OK);
5161 }
5162 
5163 #ifdef SBD_DEBUG_ERRS
5164 
5165 /* function to simulate errors throughout the sbd code */
5166 void
5167 sbd_inject_err(int error, sbderror_t *ep, int Errno, int ecode,
5168 	char *rsc)
5169 {
5170 	static fn_t	f = "sbd_inject_err";
5171 
5172 	if (sbd_err_debug == 0)
5173 		return;
5174 
5175 	if (ep == NULL) {
5176 		cmn_err(CE_WARN, "%s ep is NULL", f);
5177 		return;
5178 	}
5179 
5180 	if (SBD_GET_ERRNO(ep) != 0) {
5181 		cmn_err(CE_WARN, "%s errno already set to %d", f,
5182 			SBD_GET_ERRNO(ep));
5183 		return;
5184 	}
5185 
5186 	if (SBD_GET_ERR(ep) != 0) {
5187 		cmn_err(CE_WARN, "%s code already set to %d", f,
5188 			SBD_GET_ERR(ep));
5189 		return;
5190 	}
5191 
5192 	if ((sbd_err_debug & (1 << error)) != 0) {
5193 		ep->e_errno = Errno;
5194 		ep->e_code = ecode;
5195 
5196 		if (rsc != NULL)
5197 			bcopy((caddr_t)rsc,
5198 			(caddr_t)ep->e_rsc,
5199 			sizeof (ep->e_rsc));
5200 
5201 		if (Errno != 0)
5202 			PR_ERR_ERRNO("%s set errno to %d", f, ep->e_errno);
5203 
5204 		if (ecode != 0)
5205 			PR_ERR_ECODE("%s set ecode to %d", f, ep->e_code);
5206 
5207 		if (rsc != NULL)
5208 			PR_ERR_RSC("%s set rsc to %s", f, ep->e_rsc);
5209 	}
5210 }
5211 #endif
5212