xref: /illumos-gate/usr/src/uts/sun4u/io/sbd.c (revision 8c4267180173328ebba9487634f0f232387d067f)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2010 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 /*
28  * Copyright 2023 Oxide Computer Company
29  */
30 
31 /*
32  * safari system board DR module.
33  */
34 
35 #include <sys/debug.h>
36 #include <sys/types.h>
37 #include <sys/errno.h>
38 #include <sys/cred.h>
39 #include <sys/dditypes.h>
40 #include <sys/devops.h>
41 #include <sys/modctl.h>
42 #include <sys/poll.h>
43 #include <sys/conf.h>
44 #include <sys/ddi.h>
45 #include <sys/sunddi.h>
46 #include <sys/sunndi.h>
47 #include <sys/ndi_impldefs.h>
48 #include <sys/stat.h>
49 #include <sys/kmem.h>
50 #include <sys/cpuvar.h>
51 #include <sys/mem_config.h>
52 #include <sys/mem_cage.h>
53 
54 #include <sys/autoconf.h>
55 #include <sys/cmn_err.h>
56 
57 #include <sys/ddi_impldefs.h>
58 #include <sys/machsystm.h>
59 #include <sys/param.h>
60 
61 #include <sys/sbdpriv.h>
62 #include <sys/sbd_io.h>
63 
64 /* start sbd includes */
65 
66 #include <sys/systm.h>
67 #include <sys/sysmacros.h>
68 #include <sys/x_call.h>
69 #include <sys/membar.h>
70 #include <vm/seg_kmem.h>
71 
72 extern int nulldev();
73 extern int nodev();
74 
75 typedef struct {		/* arg to sbd_get_handle */
76 	dev_t	dev;
77 	int	cmd;
78 	int	mode;
79 	sbd_ioctl_arg_t *ioargp;
80 } sbd_init_arg_t;
81 
82 
83 /*
84  * sbd support operations.
85  */
86 static void	sbd_exec_op(sbd_handle_t *hp);
87 static void	sbd_dev_configure(sbd_handle_t *hp);
88 static int	sbd_dev_release(sbd_handle_t *hp);
89 static int	sbd_dev_unconfigure(sbd_handle_t *hp);
90 static void	sbd_attach_cpu(sbd_handle_t *hp, sbderror_t *ep,
91 				dev_info_t *dip, int unit);
92 static void	sbd_detach_cpu(sbd_handle_t *hp, sbderror_t *ep,
93 				dev_info_t *dip, int unit);
94 static int	sbd_detach_mem(sbd_handle_t *hp, sbderror_t *ep, int unit);
95 static void	sbd_cancel(sbd_handle_t *hp);
96 void 	sbd_errno_decode(int err, sbderror_t *ep, dev_info_t *dip);
97 int		sbd_dealloc_instance(sbd_board_t *sbp, int max_boards);
98 int		sbd_errno2ecode(int error);
99 #pragma weak sbdp_cpu_get_impl
100 
101 #ifdef DEBUG
102 uint_t	sbd_debug	=	(uint_t)0x0;
103 
104 #ifdef SBD_DEBUG_ERRS
105 /* controls which errors are injected */
106 uint_t	sbd_err_debug	=	(uint_t)0x0;
107 
108 /* controls printing about error injection */
109 uint_t	sbd_print_errs	=	(uint_t)0x0;
110 
111 #endif /* SBD_DEBUG_ERRS */
112 
113 #endif /* DEBUG */
114 
115 char	*sbd_state_str[] = {
116 	"EMPTY", "OCCUPIED", "CONNECTED", "UNCONFIGURED",
117 	"PARTIAL", "CONFIGURED", "RELEASE", "UNREFERENCED",
118 	"FATAL"
119 };
120 
121 /*	Note: this must be changed in tandem with sbd_ioctl.h	*/
122 char	*sbd_ct_str[] = {
123 	"NONE", "CPU", "MEM", "IO", "UNKNOWN"
124 };
125 
126 /*	Note: this must also be changed in tandem with sbd_ioctl.h */
127 #define	SBD_CMD_STR(c) \
128 	(((c) == SBD_CMD_ASSIGN)	? "ASSIGN"	: \
129 	((c) == SBD_CMD_UNASSIGN)	? "UNASSIGN"	: \
130 	((c) == SBD_CMD_POWERON)	? "POWERON"	: \
131 	((c) == SBD_CMD_POWEROFF)	? "POWEROFF"	: \
132 	((c) == SBD_CMD_TEST)		? "TEST"	: \
133 	((c) == SBD_CMD_CONNECT)	? "CONNECT"	: \
134 	((c) == SBD_CMD_CONFIGURE)	? "CONFIGURE"	: \
135 	((c) == SBD_CMD_UNCONFIGURE)	? "UNCONFIGURE"	: \
136 	((c) == SBD_CMD_DISCONNECT)	? "DISCONNECT"	: \
137 	((c) == SBD_CMD_STATUS)		? "STATUS"	: \
138 	((c) == SBD_CMD_GETNCM)		? "GETNCM"	: \
139 	((c) == SBD_CMD_PASSTHRU)	? "PASSTHRU"	: "unknown")
140 
141 /*
142  * Defines and structures for device tree naming and mapping
143  * to node types
144  */
145 
146 sbd_devattr_t *sbd_devattr;
147 
148 /* defines to access the attribute struct */
149 #define	SBD_DEVNAME(i)		sbd_devattr[i].s_devname
150 #define	SBD_OTYPE(i)		sbd_devattr[(i)].s_obp_type
151 #define	SBD_COMP(i)		sbd_devattr[i].s_dnodetype
152 
153 /*
154  * State transition table.  States valid transitions for "board" state.
155  * Recall that non-zero return value terminates operation, however
156  * the herrno value is what really indicates an error , if any.
157  */
158 static int
159 _cmd2index(int c)
160 {
161 	/*
162 	 * Translate DR CMD to index into sbd_state_transition.
163 	 */
164 	switch (c) {
165 	case SBD_CMD_CONNECT:		return (0);
166 	case SBD_CMD_DISCONNECT:	return (1);
167 	case SBD_CMD_CONFIGURE:		return (2);
168 	case SBD_CMD_UNCONFIGURE:	return (3);
169 	case SBD_CMD_POWEROFF:		return (4);
170 	case SBD_CMD_POWERON:		return (5);
171 	case SBD_CMD_UNASSIGN:		return (6);
172 	case SBD_CMD_ASSIGN:		return (7);
173 	case SBD_CMD_TEST:		return (8);
174 	default:			return (-1);
175 	}
176 }
177 
178 #define	CMD2INDEX(c)	_cmd2index(c)
179 
180 static struct sbd_state_trans {
181 	int	x_cmd;
182 	struct {
183 		int	x_rv;		/* return value of pre_op */
184 		int	x_err;		/* errno, if any */
185 	} x_op[SBD_NUM_STATES];
186 } sbd_state_transition[] = {
187 	{ SBD_CMD_CONNECT,
188 		{
189 			{ 0, 0 },	/* empty */
190 			{ 0, 0 },	/* occupied */
191 			{ 1, EIO },	/* connected */
192 			{ 1, EIO },	/* unconfigured */
193 			{ 1, EIO },	/* partial */
194 			{ 1, EIO },	/* configured */
195 			{ 1, EIO },	/* release */
196 			{ 1, EIO },	/* unreferenced */
197 			{ 1, EIO },	/* fatal */
198 		}
199 	},
200 	{ SBD_CMD_DISCONNECT,
201 		{
202 			{ 1, EIO },	/* empty */
203 			{ 0, 0 },	/* occupied */
204 			{ 0, 0 },	/* connected */
205 			{ 0, 0 },	/* unconfigured */
206 			{ 1, EIO },	/* partial */
207 			{ 1, EIO },	/* configured */
208 			{ 1, EIO },	/* release */
209 			{ 1, EIO },	/* unreferenced */
210 			{ 1, EIO },	/* fatal */
211 		}
212 	},
213 	{ SBD_CMD_CONFIGURE,
214 		{
215 			{ 1, EIO },	/* empty */
216 			{ 1, EIO },	/* occupied */
217 			{ 0, 0 },	/* connected */
218 			{ 0, 0 },	/* unconfigured */
219 			{ 0, 0 },	/* partial */
220 			{ 1, 0 },	/* configured */
221 			{ 0, 0 },	/* release */
222 			{ 0, 0 },	/* unreferenced */
223 			{ 1, EIO },	/* fatal */
224 		}
225 	},
226 	{ SBD_CMD_UNCONFIGURE,
227 		{
228 			{ 1, EIO },	/* empty */
229 			{ 1, EIO },	/* occupied */
230 			{ 1, EIO },	/* connected */
231 			{ 1, EIO },	/* unconfigured */
232 			{ 1, EIO },	/* partial */
233 			{ 0, 0 },	/* configured */
234 			{ 0, 0 },	/* release */
235 			{ 0, 0 },	/* unreferenced */
236 			{ 1, EIO },	/* fatal */
237 		}
238 	},
239 	{ SBD_CMD_POWEROFF,
240 		{
241 			{ 1, EIO },	/* empty */
242 			{ 0, 0 },	/* occupied */
243 			{ 1, EIO },	/* connected */
244 			{ 1, EIO },	/* unconfigured */
245 			{ 1, EIO },	/* partial */
246 			{ 1, EIO },	/* configured */
247 			{ 1, EIO },	/* release */
248 			{ 1, EIO },	/* unreferenced */
249 			{ 1, EIO },	/* fatal */
250 		}
251 	},
252 	{ SBD_CMD_POWERON,
253 		{
254 			{ 1, EIO },	/* empty */
255 			{ 0, 0 },	/* occupied */
256 			{ 1, EIO },	/* connected */
257 			{ 1, EIO },	/* unconfigured */
258 			{ 1, EIO },	/* partial */
259 			{ 1, EIO },	/* configured */
260 			{ 1, EIO },	/* release */
261 			{ 1, EIO },	/* unreferenced */
262 			{ 1, EIO },	/* fatal */
263 		}
264 	},
265 	{ SBD_CMD_UNASSIGN,
266 		{
267 			{ 1, EIO },	/* empty */
268 			{ 0, 0 },	/* occupied */
269 			{ 1, EIO },	/* connected */
270 			{ 1, EIO },	/* unconfigured */
271 			{ 1, EIO },	/* partial */
272 			{ 1, EIO },	/* configured */
273 			{ 1, EIO },	/* release */
274 			{ 1, EIO },	/* unreferenced */
275 			{ 1, EIO },	/* fatal */
276 		}
277 	},
278 	{ SBD_CMD_ASSIGN,
279 		{
280 			{ 1, EIO },	/* empty */
281 			{ 0, 0 },	/* occupied */
282 			{ 1, EIO },	/* connected */
283 			{ 1, EIO },	/* unconfigured */
284 			{ 1, EIO },	/* partial */
285 			{ 1, EIO },	/* configured */
286 			{ 1, EIO },	/* release */
287 			{ 1, EIO },	/* unreferenced */
288 			{ 1, EIO },	/* fatal */
289 		}
290 	},
291 	{ SBD_CMD_TEST,
292 		{
293 			{ 1, EIO },	/* empty */
294 			{ 0, 0 },	/* occupied */
295 			{ 1, EIO },	/* connected */
296 			{ 1, EIO },	/* unconfigured */
297 			{ 1, EIO },	/* partial */
298 			{ 1, EIO },	/* configured */
299 			{ 1, EIO },	/* release */
300 			{ 1, EIO },	/* unreferenced */
301 			{ 1, EIO },	/* fatal */
302 		}
303 	},
304 };
305 
306 /*
307  * Global R/W lock to synchronize access across
308  * multiple boards.  Users wanting multi-board access
309  * must grab WRITE lock, others must grab READ lock.
310  */
311 krwlock_t	sbd_grwlock;
312 
313 /*
314  * Global to determine if an event needs to be sent
315  */
316 char send_event = 0;
317 
318 /*
319  * Required/Expected functions.
320  */
321 
322 static sbd_handle_t	*sbd_get_handle(dev_t dev, sbd_softstate_t *softsp,
323 				intptr_t arg, sbd_init_arg_t *iap);
324 static void		sbd_release_handle(sbd_handle_t *hp);
325 static int		sbd_pre_op(sbd_handle_t *hp);
326 static void		sbd_post_op(sbd_handle_t *hp);
327 static int		sbd_probe_board(sbd_handle_t *hp);
328 static int		sbd_deprobe_board(sbd_handle_t *hp);
329 static void		sbd_connect(sbd_handle_t *hp);
330 static void		sbd_assign_board(sbd_handle_t *hp);
331 static void		sbd_unassign_board(sbd_handle_t *hp);
332 static void		sbd_poweron_board(sbd_handle_t *hp);
333 static void		sbd_poweroff_board(sbd_handle_t *hp);
334 static void		sbd_test_board(sbd_handle_t *hp);
335 
336 static int		sbd_disconnect(sbd_handle_t *hp);
337 static sbd_devlist_t	*sbd_get_attach_devlist(sbd_handle_t *hp,
338 					int32_t *devnump, int32_t pass);
339 static int		sbd_pre_attach_devlist(sbd_handle_t *hp,
340 					sbd_devlist_t *devlist, int32_t devnum);
341 static int		sbd_post_attach_devlist(sbd_handle_t *hp,
342 					sbd_devlist_t *devlist, int32_t devnum);
343 static sbd_devlist_t	*sbd_get_release_devlist(sbd_handle_t *hp,
344 					int32_t *devnump, int32_t pass);
345 static int		sbd_pre_release_devlist(sbd_handle_t *hp,
346 					sbd_devlist_t *devlist, int32_t devnum);
347 static int		sbd_post_release_devlist(sbd_handle_t *hp,
348 					sbd_devlist_t *devlist, int32_t devnum);
349 static void		sbd_release_done(sbd_handle_t *hp,
350 					sbd_comp_type_t nodetype,
351 					dev_info_t *dip);
352 static sbd_devlist_t	*sbd_get_detach_devlist(sbd_handle_t *hp,
353 					int32_t *devnump, int32_t pass);
354 static int		sbd_pre_detach_devlist(sbd_handle_t *hp,
355 					sbd_devlist_t *devlist, int32_t devnum);
356 static int		sbd_post_detach_devlist(sbd_handle_t *hp,
357 					sbd_devlist_t *devlist, int32_t devnum);
358 static void		sbd_status(sbd_handle_t *hp);
359 static void		sbd_get_ncm(sbd_handle_t *hp);
360 
361 
362 /*
363  * Support functions.
364  */
365 static sbd_devset_t	sbd_dev2devset(sbd_comp_id_t *cid);
366 static int		sbd_copyin_ioarg(sbd_handle_t *hp, int mode, int cmd,
367 				sbd_cmd_t *cmdp, sbd_ioctl_arg_t *iap);
368 static int		sbd_copyout_errs(int mode, sbd_ioctl_arg_t *iap,
369 					void *arg);
370 static int		sbd_copyout_ioarg(int mode, int cmd, sbd_cmd_t *scp,
371 				sbd_ioctl_arg_t *iap);
372 static int		sbd_check_transition(sbd_board_t *sbp,
373 					sbd_devset_t *devsetp,
374 					struct sbd_state_trans *transp);
375 static sbd_devlist_t	*sbd_get_devlist(sbd_handle_t *hp,
376 					sbd_board_t *sbp,
377 					sbd_comp_type_t nodetype,
378 					int max_units, uint_t uset,
379 					int *count, int present_only);
380 static int		sbd_mem_status(sbd_handle_t *hp, sbd_devset_t devset,
381 					sbd_dev_stat_t *dsp);
382 
383 static int		sbd_init_devlists(sbd_board_t *sbp);
384 static int		sbd_name_to_idx(char *name);
385 static int		sbd_otype_to_idx(char *otpye);
386 static int		sbd_setup_devlists(dev_info_t *dip, void *arg);
387 static void		sbd_init_mem_devlists(sbd_board_t *sbp);
388 static void		sbd_init_cpu_unit(sbd_board_t *sbp, int unit);
389 static void		sbd_board_discovery(sbd_board_t *sbp);
390 static void		sbd_board_init(sbd_board_t *sbp,
391 				sbd_softstate_t *softsp,
392 				int bd, dev_info_t *dip, int wnode);
393 static void		sbd_board_destroy(sbd_board_t *sbp);
394 static int		sbd_check_unit_attached(sbd_board_t *sbp,
395 				dev_info_t *dip, int unit,
396 				sbd_comp_type_t nodetype, sbderror_t *ep);
397 
398 static sbd_state_t 	rstate_cvt(sbd_istate_t state);
399 
400 /*
401  * Autoconfiguration data structures
402  */
403 
404 extern struct mod_ops mod_miscops;
405 
406 static struct modlmisc modlmisc = {
407 	&mod_miscops,
408 	"System Board DR"
409 };
410 
411 static struct modlinkage modlinkage = {
412 	MODREV_1,
413 	(void *)&modlmisc,
414 	NULL
415 };
416 
417 static int sbd_instances = 0;
418 
419 /*
420  * dr Global data elements
421  */
422 sbd_global sbd_g;
423 
424 /*
425  * We want to be able to unload the module when we wish to do so, but we don't
426  * want anything else to unload it.  Unloading cannot occur until
427  * sbd_teardown_instance is called by an explicit IOCTL into the parent node.
428  * This support is for debugging purposes and should it be expected to work
429  * on the field, it should be enhanced:
430  * Currently, there is still a window where sbd_teardow_instance gets called,
431  * sbd_prevent_unloading now = 0, the driver doesn't get unloaded, and
432  * sbd_setup_instance gets called.  This may cause a panic.
433  */
434 int sbd_prevent_unloading = 1;
435 
436 /*
437  * Driver entry points.
438  */
439 int
440 _init(void)
441 {
442 	int	err;
443 
444 	/*
445 	 * If you need to support multiple nodes (instances), then
446 	 * whatever the maximum number of supported nodes is would
447 	 * need to passed as the third parameter to ddi_soft_state_init().
448 	 * Alternative would be to dynamically fini and re-init the
449 	 * soft state structure each time a node is attached.
450 	 */
451 	err = ddi_soft_state_init((void **)&sbd_g.softsp,
452 		sizeof (sbd_softstate_t), SBD_MAX_INSTANCES);
453 	if (err)
454 		return (err);
455 
456 	if ((err = mod_install(&modlinkage)) != 0) {
457 		ddi_soft_state_fini((void **)&sbd_g.softsp);
458 		return (err);
459 	}
460 
461 	/* Get the array of names from platform helper routine */
462 	sbd_devattr = sbdp_get_devattr();
463 
464 	return (err);
465 }
466 
467 int
468 _fini(void)
469 {
470 	int	err;
471 
472 	if (sbd_prevent_unloading)
473 		return (DDI_FAILURE);
474 
475 	ASSERT(sbd_instances == 0);
476 
477 	if ((err = mod_remove(&modlinkage)) != 0)
478 		return (err);
479 
480 	ddi_soft_state_fini((void **)&sbd_g.softsp);
481 
482 	return (0);
483 }
484 
485 int
486 _info(struct modinfo *modinfop)
487 {
488 	return (mod_info(&modlinkage, modinfop));
489 }
490 
491 int
492 sbd_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, char *event)
493 {
494 	int		rv = 0, instance;
495 	sbd_handle_t	*hp;
496 	sbd_softstate_t	*softsp;
497 	sbd_init_arg_t	init_arg;
498 	static fn_t	f = "sbd_ioctl";
499 	int		dr_avail;
500 
501 	PR_BYP("sbd_ioctl cmd=%x, arg=%lx\n", cmd, arg);
502 
503 	/* Note: this must also be changed in tandem with sbd_ioctl.h */
504 	switch (cmd) {
505 		case SBD_CMD_ASSIGN:
506 		case SBD_CMD_UNASSIGN:
507 		case SBD_CMD_POWERON:
508 		case SBD_CMD_POWEROFF:
509 		case SBD_CMD_TEST:
510 		case SBD_CMD_CONNECT:
511 		case SBD_CMD_CONFIGURE:
512 		case SBD_CMD_UNCONFIGURE:
513 		case SBD_CMD_DISCONNECT:
514 		case SBD_CMD_STATUS:
515 		case SBD_CMD_GETNCM:
516 		case SBD_CMD_PASSTHRU:
517 			break;
518 		default:
519 			return (ENOTTY);
520 	}
521 
522 	instance = SBD_GET_MINOR2INST(getminor(dev));
523 	if ((softsp = (sbd_softstate_t *)GET_SOFTC(instance)) == NULL) {
524 		cmn_err(CE_WARN,
525 			"sbd:%s:%d: module not yet attached",
526 			f, instance);
527 		return (ENXIO);
528 	}
529 
530 	init_arg.dev = dev;
531 	init_arg.cmd = cmd;
532 	init_arg.mode = mode;
533 	init_arg.ioargp = (sbd_ioctl_arg_t *)arg;
534 
535 	hp = sbd_get_handle(dev, softsp, arg, &init_arg);
536 	/* Check to see if we support dr */
537 	dr_avail = sbdp_dr_avail();
538 	if (dr_avail != 1) {
539 		switch (hp->h_cmd) {
540 			case SBD_CMD_STATUS:
541 			case SBD_CMD_GETNCM:
542 			case SBD_CMD_PASSTHRU:
543 				break;
544 			default:
545 				sbd_release_handle(hp);
546 				return (ENOTSUP);
547 		}
548 	}
549 
550 	switch (hp->h_cmd) {
551 	case SBD_CMD_STATUS:
552 	case SBD_CMD_GETNCM:
553 	case SBD_CMD_PASSTHRU:
554 		/* no locks needed for these commands */
555 		break;
556 
557 	default:
558 		rw_enter(&sbd_grwlock, RW_WRITER);
559 		mutex_enter(&SBDH2BD(hp->h_sbd)->sb_mutex);
560 
561 		/*
562 		 * If we're dealing with memory at all, then we have
563 		 * to keep the "exclusive" global lock held.  This is
564 		 * necessary since we will probably need to look at
565 		 * multiple board structs.  Otherwise, we only have
566 		 * to deal with the board in question and so can drop
567 		 * the global lock to "shared".
568 		 */
569 		/*
570 		 * XXX This is incorrect. The sh_devset has not
571 		 * been set at this point - it is 0.
572 		 */
573 		rv = DEVSET_IN_SET(HD2MACHHD(hp)->sh_devset,
574 		    SBD_COMP_MEM, DEVSET_ANYUNIT);
575 		if (rv == 0)
576 			rw_downgrade(&sbd_grwlock);
577 		break;
578 	}
579 
580 	/*
581 	 * Before any operations happen, reset the event flag
582 	 */
583 	send_event = 0;
584 
585 	if (sbd_pre_op(hp) == 0) {
586 		sbd_exec_op(hp);
587 		sbd_post_op(hp);
588 	}
589 
590 	rv = SBD_GET_ERRNO(SBD_HD2ERR(hp));
591 	*event = send_event;
592 
593 	/* undo locking, if any, done before sbd_pre_op */
594 	switch (hp->h_cmd) {
595 	case SBD_CMD_STATUS:
596 	case SBD_CMD_GETNCM:
597 	case SBD_CMD_PASSTHRU:
598 		break;
599 	default:
600 		mutex_exit(&SBDH2BD(hp->h_sbd)->sb_mutex);
601 		rw_exit(&sbd_grwlock);
602 	}
603 
604 	sbd_release_handle(hp);
605 
606 	return (rv);
607 }
608 
609 int
610 sbd_setup_instance(int instance, dev_info_t *root, int max_boards, int wnode,
611 		caddr_t sbdp_arg)
612 {
613 	int 		b;
614 	sbd_softstate_t	*softsp;
615 	sbd_board_t	*sbd_boardlist;
616 	static fn_t	f = "sbd_setup_instance";
617 
618 	sbd_instances++;
619 
620 	if (sbdp_setup_instance(sbdp_arg) != DDI_SUCCESS) {
621 		sbd_instances--;
622 		return (DDI_FAILURE);
623 	}
624 
625 	if (ALLOC_SOFTC(instance) != DDI_SUCCESS) {
626 		cmn_err(CE_WARN,
627 			"sbd:%s:%d: failed to alloc soft-state",
628 			f, instance);
629 		(void) sbdp_teardown_instance(sbdp_arg);
630 		sbd_instances--;
631 		return (DDI_FAILURE);
632 	}
633 
634 	softsp = (sbd_softstate_t *)GET_SOFTC(instance);
635 
636 	if (softsp == NULL) {
637 		cmn_err(CE_WARN,
638 			"sbd:%s:%d: failed to get soft-state instance",
639 			f, instance);
640 		goto exit;
641 	}
642 
643 	sbd_boardlist = GETSTRUCT(sbd_board_t, max_boards);
644 	if (sbd_boardlist == NULL) {
645 		cmn_err(CE_WARN,
646 			"sbd:%s: failed to alloc board list %d",
647 			f, instance);
648 		goto exit;
649 	}
650 
651 
652 	softsp->sbd_boardlist  = (void *)sbd_boardlist;
653 	softsp->max_boards  = max_boards;
654 	softsp->wnode  = wnode;
655 
656 
657 	for (b = 0; b < max_boards; b++) {
658 		sbd_board_init(sbd_boardlist++, softsp, b, root, wnode);
659 	}
660 
661 
662 	return (DDI_SUCCESS);
663 exit:
664 	(void) sbdp_teardown_instance(sbdp_arg);
665 	FREE_SOFTC(instance);
666 	sbd_instances--;
667 	return (DDI_FAILURE);
668 }
669 
670 int
671 sbd_teardown_instance(int instance, caddr_t sbdp_arg)
672 {
673 	sbd_softstate_t	*softsp;
674 
675 	if (sbdp_teardown_instance(sbdp_arg) != DDI_SUCCESS)
676 		return (DDI_FAILURE);
677 
678 	softsp = (sbd_softstate_t *)GET_SOFTC(instance);
679 	if (softsp == NULL) {
680 		return (DDI_FAILURE);
681 	}
682 
683 	(void) sbd_dealloc_instance((sbd_board_t *)softsp->sbd_boardlist,
684 		softsp->max_boards);
685 
686 	FREE_SOFTC(instance);
687 	sbd_instances--;
688 	sbd_prevent_unloading = 0;
689 
690 	return (DDI_SUCCESS);
691 }
692 
693 static void
694 sbd_exec_op(sbd_handle_t *hp)
695 {
696 	sbd_board_t	*sbp = SBDH2BD(hp->h_sbd);
697 	static fn_t	f = "sbd_exec_op";
698 
699 	switch (hp->h_cmd) {
700 		int	dev_canceled;
701 
702 	case SBD_CMD_CONNECT:
703 		if (sbd_probe_board(hp))
704 			break;
705 
706 		sbd_connect(hp);
707 		break;
708 
709 	case SBD_CMD_CONFIGURE:
710 		sbd_dev_configure(hp);
711 		break;
712 
713 	case SBD_CMD_UNCONFIGURE:
714 		if (((dev_canceled = sbd_dev_release(hp)) == 0) &&
715 		    (SBD_GET_ERRNO(SBD_HD2ERR(hp)) == 0 &&
716 		    SBD_GET_ERR(SBD_HD2ERR(hp)) == 0))
717 			dev_canceled = sbd_dev_unconfigure(hp);
718 
719 		if (dev_canceled)
720 			sbd_cancel(hp);
721 		break;
722 
723 	case SBD_CMD_DISCONNECT:
724 		mutex_enter(&sbp->sb_slock);
725 		if (sbd_disconnect(hp) == 0)
726 			(void) sbd_deprobe_board(hp);
727 		mutex_exit(&sbp->sb_slock);
728 		break;
729 
730 	case SBD_CMD_STATUS:
731 		sbd_status(hp);
732 		break;
733 
734 	case SBD_CMD_GETNCM:
735 		sbd_get_ncm(hp);
736 		break;
737 
738 	case SBD_CMD_ASSIGN:
739 		sbd_assign_board(hp);
740 		break;
741 
742 	case SBD_CMD_UNASSIGN:
743 		sbd_unassign_board(hp);
744 		break;
745 
746 	case SBD_CMD_POWEROFF:
747 		sbd_poweroff_board(hp);
748 		break;
749 
750 	case SBD_CMD_POWERON:
751 		sbd_poweron_board(hp);
752 		break;
753 
754 	case SBD_CMD_TEST:
755 		sbd_test_board(hp);
756 		break;
757 
758 	case SBD_CMD_PASSTHRU:
759 	{
760 		int			rv;
761 		sbdp_handle_t		*hdp;
762 		sbderror_t		*ep = SBD_HD2ERR(hp);
763 		sbdp_ioctl_arg_t	ia, *iap;
764 
765 		iap = &ia;
766 
767 		iap->h_dev = hp->h_dev;
768 		iap->h_cmd = hp->h_cmd;
769 		iap->h_iap = (intptr_t)hp->h_iap;
770 		iap->h_mode = hp->h_mode;
771 
772 		hdp = sbd_get_sbdp_handle(sbp, hp);
773 		rv = sbdp_ioctl(hdp, iap);
774 		if (rv != 0) {
775 			SBD_GET_PERR(hdp->h_err, SBD_HD2ERR(hp));
776 			ep->e_errno = rv;
777 		}
778 		sbd_release_sbdp_handle(hdp);
779 		break;
780 	}
781 
782 	default:
783 		SBD_SET_ERRNO(SBD_HD2ERR(hp), ENOTTY);
784 		cmn_err(CE_WARN,
785 			"sbd:%s: unknown command (%d)",
786 			f, hp->h_cmd);
787 		break;
788 
789 	}
790 
791 	if (SBD_GET_ERR(SBD_HD2ERR(hp)))
792 		PR_BYP("XXX e_code=%d", SBD_GET_ERR(SBD_HD2ERR(hp)));
793 	if (SBD_GET_ERRNO(SBD_HD2ERR(hp)))
794 		PR_BYP("XXX errno=%d", SBD_GET_ERRNO(SBD_HD2ERR(hp)));
795 }
796 
797 sbd_comp_type_t
798 sbd_get_devtype(sbd_handle_t *hp, dev_info_t *dip)
799 {
800 	sbd_board_t	*sbp = hp ? SBDH2BD(hp->h_sbd) : NULL;
801 	sbd_istate_t	bstate;
802 	dev_info_t	**devlist;
803 	int		i;
804 	char		device[OBP_MAXDRVNAME];
805 	int		devicelen;
806 
807 	devicelen = sizeof (device);
808 
809 	bstate = sbp ? SBD_BOARD_STATE(sbp) : SBD_STATE_EMPTY;
810 	/*
811 	 * if the board's connected or configured, search the
812 	 * devlists.  Otherwise check the device tree
813 	 */
814 	switch (bstate) {
815 
816 	case SBD_STATE_CONNECTED:
817 	case SBD_STATE_CONFIGURED:
818 	case SBD_STATE_UNREFERENCED:
819 	case SBD_STATE_UNCONFIGURED:
820 		devlist = sbp->sb_devlist[NIX(SBD_COMP_MEM)];
821 		for (i = 0; i < MAX_MEM_UNITS_PER_BOARD; i++)
822 			if (devlist[i] == dip)
823 				return (SBD_COMP_MEM);
824 
825 		devlist = sbp->sb_devlist[NIX(SBD_COMP_CPU)];
826 		for (i = 0; i < MAX_CPU_UNITS_PER_BOARD; i++)
827 			if (devlist[i] == dip)
828 				return (SBD_COMP_CPU);
829 
830 		devlist = sbp->sb_devlist[NIX(SBD_COMP_IO)];
831 		for (i = 0; i < MAX_IO_UNITS_PER_BOARD; i++)
832 			if (devlist[i] == dip)
833 				return (SBD_COMP_IO);
834 		/*FALLTHROUGH*/
835 
836 	default:
837 		if (ddi_getlongprop_buf(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
838 		    OBP_DEVICETYPE,  (caddr_t)device, &devicelen))
839 			break;
840 
841 		for (i = 0; SBD_COMP(i) != SBD_COMP_UNKNOWN; i++) {
842 			if (strcmp(device, SBD_OTYPE(i)) != 0)
843 				continue;
844 			return (SBD_COMP(i));
845 		}
846 
847 		break;
848 	}
849 	return (SBD_COMP_UNKNOWN);
850 }
851 
852 static void
853 sbd_dev_configure(sbd_handle_t *hp)
854 {
855 	int		n, unit;
856 	int32_t		pass, devnum;
857 	dev_info_t	*dip;
858 	sbd_devlist_t	*devlist;
859 	sbdp_handle_t	*hdp;
860 	sbd_comp_type_t	nodetype;
861 	sbd_board_t	*sbp = SBDH2BD(hp->h_sbd);
862 
863 	pass = 1;
864 
865 	hdp = sbd_get_sbdp_handle(sbp, hp);
866 	while ((devlist = sbd_get_attach_devlist(hp, &devnum, pass)) != NULL) {
867 		int	err;
868 
869 		err = sbd_pre_attach_devlist(hp, devlist, devnum);
870 		if (err < 0) {
871 			break;
872 		} else if (err > 0) {
873 			pass++;
874 			continue;
875 		}
876 
877 		for (n = 0; n < devnum; n++) {
878 			sbderror_t	*ep;
879 
880 			ep = &devlist[n].dv_error;
881 			SBD_SET_ERRNO(ep, 0);
882 			SBD_SET_ERR(ep, 0);
883 			dip = devlist[n].dv_dip;
884 			nodetype = sbd_get_devtype(hp, dip);
885 
886 			unit = sbdp_get_unit_num(hdp, dip);
887 			if (unit < 0) {
888 				SBD_GET_PERR(hdp->h_err, SBD_HD2ERR(hp));
889 				break;
890 			}
891 
892 			switch (nodetype) {
893 			case SBD_COMP_MEM:
894 				sbd_attach_mem(hp, ep);
895 				if (SBD_GET_ERR(ep) == ESBD_CPUONLINE) {
896 					FREESTRUCT(devlist, sbd_devlist_t,
897 						MAX_MEM_UNITS_PER_BOARD);
898 					sbd_release_sbdp_handle(hdp);
899 					return;
900 				}
901 				break;
902 
903 			case SBD_COMP_CPU:
904 				sbd_attach_cpu(hp, ep, dip, unit);
905 				break;
906 
907 			case SBD_COMP_IO:
908 				sbd_attach_io(hp, ep, dip, unit);
909 				break;
910 
911 			default:
912 				SBD_SET_ERRNO(ep, ENOTTY);
913 				break;
914 			}
915 
916 			if (sbd_set_err_in_hdl(hp, ep) == 0)
917 				continue;
918 		}
919 
920 		err = sbd_post_attach_devlist(hp, devlist, devnum);
921 		if (err < 0)
922 			break;
923 
924 		pass++;
925 	}
926 	sbd_release_sbdp_handle(hdp);
927 }
928 
929 static int
930 sbd_dev_release(sbd_handle_t *hp)
931 {
932 	int		n, unit;
933 	int32_t		pass, devnum;
934 	dev_info_t	*dip;
935 	sbd_board_t	*sbp = SBDH2BD(hp->h_sbd);
936 	sbdp_handle_t	*hdp;
937 	sbd_devlist_t	*devlist;
938 	sbd_comp_type_t	nodetype;
939 	int		err = 0;
940 	int		dev_canceled;
941 
942 	pass = 1;
943 	hdp = sbd_get_sbdp_handle(sbp, hp);
944 
945 	sbp->sb_busy = 1;
946 	while ((devlist =
947 		sbd_get_release_devlist(hp, &devnum, pass)) != NULL) {
948 
949 		err = sbd_pre_release_devlist(hp, devlist, devnum);
950 		if (err < 0) {
951 			dev_canceled = 1;
952 			break;
953 		} else if (err > 0) {
954 			pass++;
955 			continue;
956 		}
957 
958 		dev_canceled = 0;
959 		for (n = 0; n < devnum; n++) {
960 			dip = devlist[n].dv_dip;
961 			nodetype = sbd_get_devtype(hp, dip);
962 
963 			unit = sbdp_get_unit_num(hdp, dip);
964 			if (unit < 0) {
965 				SBD_GET_PERR(hdp->h_err, SBD_HD2ERR(hp));
966 				break;
967 			}
968 
969 			if ((nodetype == SBD_COMP_MEM) &&
970 			    sbd_release_mem(hp, dip, unit)) {
971 
972 				dev_canceled++;
973 			}
974 
975 			sbd_release_done(hp, nodetype, dip);
976 		}
977 
978 		err = sbd_post_release_devlist(hp, devlist, devnum);
979 
980 		if (err < 0)
981 			break;
982 
983 		if (dev_canceled)
984 			break;
985 
986 		pass++;
987 	}
988 	sbp->sb_busy = 0;
989 
990 	sbd_release_sbdp_handle(hdp);
991 
992 	if (dev_canceled)
993 		return (dev_canceled);
994 
995 	return (err);
996 }
997 
998 static int
999 sbd_dev_unconfigure(sbd_handle_t *hp)
1000 {
1001 	int		n, unit;
1002 	int32_t		pass, devnum;
1003 	dev_info_t	*dip;
1004 	sbd_devlist_t	*devlist;
1005 	sbdp_handle_t	*hdp;
1006 	sbd_comp_type_t	nodetype;
1007 	sbd_board_t	*sbp = SBDH2BD(hp->h_sbd);
1008 	int		dev_canceled = 0;
1009 	static fn_t	f = "sbd_dev_unconfigure";
1010 
1011 	PR_ALL("%s...\n", f);
1012 
1013 	pass = 1;
1014 	hdp = sbd_get_sbdp_handle(sbp, hp);
1015 
1016 	while ((devlist = sbd_get_detach_devlist(hp, &devnum, pass)) != NULL) {
1017 		int	err, detach_err = 0;
1018 
1019 		err = sbd_pre_detach_devlist(hp, devlist, devnum);
1020 		if (err) {
1021 			/*
1022 			 * Only cancel the operation for memory in
1023 			 * case of failure.
1024 			 */
1025 			nodetype = sbd_get_devtype(hp, devlist->dv_dip);
1026 			if (nodetype == SBD_COMP_MEM)
1027 				dev_canceled = 1;
1028 			(void) sbd_post_detach_devlist(hp, devlist, devnum);
1029 			break;
1030 		}
1031 
1032 		for (n = 0; n < devnum; n++) {
1033 			sbderror_t	*ep;
1034 
1035 			ep = &devlist[n].dv_error;
1036 			SBD_SET_ERRNO(ep, 0);
1037 			SBD_SET_ERR(ep, 0);
1038 			dip = devlist[n].dv_dip;
1039 			nodetype = sbd_get_devtype(hp, dip);
1040 
1041 			unit = sbdp_get_unit_num(hdp, dip);
1042 			if (unit < 0) {
1043 				SBD_GET_PERR(hdp->h_err, SBD_HD2ERR(hp));
1044 				break;
1045 			}
1046 
1047 			switch (nodetype) {
1048 			case SBD_COMP_MEM:
1049 				dev_canceled = sbd_detach_mem(hp, ep, unit);
1050 				break;
1051 
1052 			case SBD_COMP_CPU:
1053 				sbd_detach_cpu(hp, ep, dip, unit);
1054 				break;
1055 
1056 			case SBD_COMP_IO:
1057 				sbd_detach_io(hp, ep, dip, unit);
1058 				break;
1059 
1060 			default:
1061 				SBD_SET_ERRNO(ep, ENOTTY);
1062 				break;
1063 			}
1064 
1065 			if (sbd_set_err_in_hdl(hp, ep) == 0) {
1066 				detach_err = -1;
1067 				break;
1068 			}
1069 
1070 		}
1071 		err = sbd_post_detach_devlist(hp, devlist, devnum);
1072 		if ((err < 0) || (detach_err < 0))
1073 			break;
1074 
1075 		pass++;
1076 	}
1077 
1078 	sbd_release_sbdp_handle(hdp);
1079 	return (dev_canceled);
1080 }
1081 
1082 int
1083 sbd_errno2ecode(int error)
1084 {
1085 	int	rv;
1086 
1087 	switch (error) {
1088 	case EBUSY:
1089 		rv = ESBD_BUSY;
1090 		break;
1091 	case EINVAL:
1092 		rv = ESBD_INVAL;
1093 		break;
1094 	case EALREADY:
1095 		rv = ESBD_ALREADY;
1096 		break;
1097 	case ENODEV:
1098 		rv = ESBD_NODEV;
1099 		break;
1100 	case ENOMEM:
1101 		rv = ESBD_NOMEM;
1102 		break;
1103 	default:
1104 		rv = ESBD_INVAL;
1105 	}
1106 
1107 	return (rv);
1108 }
1109 
1110 static void
1111 sbd_attach_cpu(sbd_handle_t *hp, sbderror_t *ep, dev_info_t *dip, int unit)
1112 {
1113 	int rv = 0;
1114 	processorid_t	cpuid;
1115 	sbdp_handle_t	*hdp;
1116 	sbd_board_t	*sbp = SBDH2BD(hp->h_sbd);
1117 	static fn_t	f = "sbd_attach_cpu";
1118 	char		*pathname;
1119 
1120 	ASSERT(MUTEX_HELD(&cpu_lock));
1121 
1122 	ASSERT(dip);
1123 
1124 	/*
1125 	 * With the introduction of CMP devices, the CPU nodes
1126 	 * are no longer directly under the top node. Since
1127 	 * there is no plan to support CPU attach in the near
1128 	 * future, a branch configure operation is not required.
1129 	 */
1130 
1131 	hdp = sbd_get_sbdp_handle(sbp, hp);
1132 	cpuid = sbdp_get_cpuid(hdp, dip);
1133 	if (cpuid < 0) {
1134 		rv = -1;
1135 		SBD_GET_PERR(hdp->h_err, ep);
1136 	} else if ((rv = cpu_configure(cpuid)) != 0) {
1137 		cmn_err(CE_WARN,
1138 			"sbd:%s: cpu_configure for cpuid %d failed",
1139 			f, cpuid);
1140 		SBD_SET_ERR(ep, sbd_errno2ecode(rv));
1141 	}
1142 	sbd_release_sbdp_handle(hdp);
1143 
1144 	if (rv == 0) {
1145 		ASSERT(sbp->sb_cpupath[unit] != NULL);
1146 		pathname = sbp->sb_cpupath[unit];
1147 		(void) ddi_pathname(dip, pathname);
1148 	}
1149 }
1150 
1151 /*
1152  *	translate errno
1153  */
1154 void
1155 sbd_errno_decode(int err, sbderror_t *ep, dev_info_t *dip)
1156 {
1157 	ASSERT(err != 0);
1158 
1159 	switch (err) {
1160 	case ENOMEM:
1161 		SBD_SET_ERR(ep, ESBD_NOMEM);
1162 		break;
1163 
1164 	case EBUSY:
1165 		SBD_SET_ERR(ep, ESBD_BUSY);
1166 		break;
1167 
1168 	case EIO:
1169 		SBD_SET_ERR(ep, ESBD_IO);
1170 		break;
1171 
1172 	case ENXIO:
1173 		SBD_SET_ERR(ep, ESBD_NODEV);
1174 		break;
1175 
1176 	case EINVAL:
1177 		SBD_SET_ERR(ep, ESBD_INVAL);
1178 		break;
1179 
1180 	case EFAULT:
1181 	default:
1182 		SBD_SET_ERR(ep, ESBD_FAULT);
1183 		break;
1184 	}
1185 
1186 	(void) ddi_pathname(dip, SBD_GET_ERRSTR(ep));
1187 }
1188 
1189 static void
1190 sbd_detach_cpu(sbd_handle_t *hp, sbderror_t *ep, dev_info_t *dip, int unit)
1191 {
1192 	processorid_t	cpuid;
1193 	int		rv;
1194 	sbdp_handle_t	*hdp;
1195 	sbd_board_t	*sbp = SBDH2BD(hp->h_sbd);
1196 	sbd_error_t	*spe;
1197 	static fn_t	f = "sbd_detach_cpu";
1198 
1199 	ASSERT(MUTEX_HELD(&cpu_lock));
1200 
1201 	ASSERT(dip);
1202 	hdp = sbd_get_sbdp_handle(sbp, hp);
1203 	spe = hdp->h_err;
1204 	cpuid = sbdp_get_cpuid(hdp, dip);
1205 	if (cpuid < 0) {
1206 		SBD_GET_PERR(spe, ep);
1207 		sbd_release_sbdp_handle(hdp);
1208 		return;
1209 	}
1210 
1211 	if ((rv = cpu_unconfigure(cpuid)) != 0) {
1212 		SBD_SET_ERR(ep, sbd_errno2ecode(rv));
1213 		SBD_SET_ERRSTR(ep, sbp->sb_cpupath[unit]);
1214 		cmn_err(CE_WARN,
1215 			"sbd:%s: cpu_unconfigure for cpu %d failed",
1216 			f, cpuid);
1217 		sbd_release_sbdp_handle(hdp);
1218 		return;
1219 	}
1220 	sbd_release_sbdp_handle(hdp);
1221 
1222 	/*
1223 	 * Since CPU nodes are no longer configured in CPU
1224 	 * attach, the corresponding branch unconfigure
1225 	 * operation that would be performed here is also
1226 	 * no longer required.
1227 	 */
1228 }
1229 
1230 
1231 int
1232 sbd_detach_mem(sbd_handle_t *hp, sbderror_t *ep, int unit)
1233 {
1234 	sbd_mem_unit_t	*mp;
1235 	sbd_board_t	*sbp = SBDH2BD(hp->h_sbd);
1236 	int		i, rv;
1237 	static fn_t	f = "sbd_detach_mem";
1238 
1239 	mp = SBD_GET_BOARD_MEMUNIT(sbp, unit);
1240 
1241 	if (sbd_detach_memory(hp, ep, mp, unit)) {
1242 		cmn_err(CE_WARN, "%s: detach fail", f);
1243 		return (-1);
1244 	}
1245 
1246 	/*
1247 	 * Now detach mem devinfo nodes with status lock held.
1248 	 */
1249 	for (i = 0; i < SBD_NUM_MC_PER_BOARD; i++) {
1250 		dev_info_t	*fdip = NULL;
1251 
1252 		if (mp->sbm_dip[i] == NULL)
1253 			continue;
1254 		ASSERT(e_ddi_branch_held(mp->sbm_dip[i]));
1255 		mutex_enter(&sbp->sb_slock);
1256 		rv = e_ddi_branch_unconfigure(mp->sbm_dip[i], &fdip,
1257 		    DEVI_BRANCH_EVENT);
1258 		mutex_exit(&sbp->sb_slock);
1259 		if (rv) {
1260 			/*
1261 			 * If non-NULL, fdip is returned held and must be
1262 			 * released.
1263 			 */
1264 			if (fdip != NULL) {
1265 				sbd_errno_decode(rv, ep, fdip);
1266 				ddi_release_devi(fdip);
1267 			} else {
1268 				sbd_errno_decode(rv, ep, mp->sbm_dip[i]);
1269 			}
1270 		}
1271 	}
1272 
1273 	return (0);
1274 }
1275 
1276 /* start beginning of sbd.c */
1277 
1278 /*
1279  * MDR          memory support - somewhat disabled for now.
1280  * UNSAFE       unsafe driver code - I don't think we want this.
1281  *              need to check.
1282  * DEVNODE      This driver creates attachment points for individual
1283  *              components as well as boards.  We only need board
1284  *              support.
1285  * DEV2DEVSET   Put only present devices in devset.
1286  */
1287 
1288 
1289 static sbd_state_t
1290 rstate_cvt(sbd_istate_t state)
1291 {
1292 	sbd_state_t cs;
1293 
1294 	switch (state) {
1295 	case SBD_STATE_EMPTY:
1296 		cs = SBD_STAT_EMPTY;
1297 		break;
1298 	case SBD_STATE_OCCUPIED:
1299 	case SBD_STATE_FATAL:
1300 		cs = SBD_STAT_DISCONNECTED;
1301 		break;
1302 	case SBD_STATE_CONFIGURED:
1303 	case SBD_STATE_CONNECTED:
1304 	case SBD_STATE_UNCONFIGURED:
1305 	case SBD_STATE_PARTIAL:
1306 	case SBD_STATE_RELEASE:
1307 	case SBD_STATE_UNREFERENCED:
1308 		cs = SBD_STAT_CONNECTED;
1309 		break;
1310 	default:
1311 		cs = SBD_STAT_NONE;
1312 		break;
1313 	}
1314 
1315 	return (cs);
1316 }
1317 
1318 
1319 sbd_state_t
1320 ostate_cvt(sbd_istate_t state)
1321 {
1322 	sbd_state_t cs;
1323 
1324 	switch (state) {
1325 	case SBD_STATE_EMPTY:
1326 	case SBD_STATE_OCCUPIED:
1327 	case SBD_STATE_UNCONFIGURED:
1328 	case SBD_STATE_CONNECTED:
1329 	case SBD_STATE_FATAL:
1330 		cs = SBD_STAT_UNCONFIGURED;
1331 		break;
1332 	case SBD_STATE_PARTIAL:
1333 	case SBD_STATE_CONFIGURED:
1334 	case SBD_STATE_RELEASE:
1335 	case SBD_STATE_UNREFERENCED:
1336 		cs = SBD_STAT_CONFIGURED;
1337 		break;
1338 	default:
1339 		cs = SBD_STAT_NONE;
1340 		break;
1341 	}
1342 
1343 	return (cs);
1344 }
1345 
1346 int
1347 sbd_dealloc_instance(sbd_board_t *sbp, int max_boards)
1348 {
1349 	int		b;
1350 	sbd_board_t    *list = sbp;
1351 	static fn_t	f = "sbd_dealloc_instance";
1352 
1353 	PR_ALL("%s...\n", f);
1354 
1355 	if (sbp == NULL) {
1356 		return (-1);
1357 	}
1358 
1359 	for (b = 0; b < max_boards; b++) {
1360 		sbd_board_destroy(sbp++);
1361 	}
1362 
1363 	FREESTRUCT(list, sbd_board_t, max_boards);
1364 
1365 	return (0);
1366 }
1367 
1368 static sbd_devset_t
1369 sbd_dev2devset(sbd_comp_id_t *cid)
1370 {
1371 	static fn_t	f = "sbd_dev2devset";
1372 
1373 	sbd_devset_t	devset;
1374 	int		unit = cid->c_unit;
1375 
1376 	switch (cid->c_type) {
1377 		case SBD_COMP_NONE:
1378 			devset =  DEVSET(SBD_COMP_CPU, DEVSET_ANYUNIT);
1379 			devset |= DEVSET(SBD_COMP_MEM, DEVSET_ANYUNIT);
1380 			devset |= DEVSET(SBD_COMP_IO,  DEVSET_ANYUNIT);
1381 			break;
1382 
1383 		case SBD_COMP_CPU:
1384 			if ((unit > MAX_CPU_UNITS_PER_BOARD) || (unit < 0)) {
1385 				PR_ALL("%s: invalid cpu unit# = %d",
1386 					f, unit);
1387 				devset = 0;
1388 			} else
1389 				/*
1390 				 * Generate a devset that includes all the
1391 				 * cores of a CMP device. If this is not a
1392 				 * CMP, the extra cores will be eliminated
1393 				 * later since they are not present. This is
1394 				 * also true for CMP devices that do not have
1395 				 * all cores active.
1396 				 */
1397 				devset = DEVSET(SBD_COMP_CMP, unit);
1398 
1399 			break;
1400 
1401 		case SBD_COMP_MEM:
1402 
1403 			if ((unit > MAX_MEM_UNITS_PER_BOARD) || (unit < 0)) {
1404 #ifdef XXX_jeffco
1405 				PR_ALL("%s: invalid mem unit# = %d",
1406 					f, unit);
1407 				devset = 0;
1408 #endif
1409 				devset = DEVSET(cid->c_type, 0);
1410 				PR_ALL("%s: adjusted MEM devset = 0x%x\n",
1411 					f, devset);
1412 			} else
1413 				devset = DEVSET(cid->c_type, unit);
1414 			break;
1415 
1416 		case SBD_COMP_IO:
1417 			if ((unit > MAX_IO_UNITS_PER_BOARD) || (unit < 0)) {
1418 				PR_ALL("%s: invalid io unit# = %d",
1419 					f, unit);
1420 				devset = 0;
1421 			} else
1422 				devset = DEVSET(cid->c_type, unit);
1423 
1424 			break;
1425 
1426 		default:
1427 		case SBD_COMP_UNKNOWN:
1428 			devset = 0;
1429 			break;
1430 	}
1431 
1432 	return (devset);
1433 }
1434 
1435 /*
1436  * Simple mutex for covering handle list ops as it is only
1437  * used "infrequently". No need to add another mutex to the sbd_board_t.
1438  */
1439 static kmutex_t sbd_handle_list_mutex;
1440 
1441 static sbd_handle_t *
1442 sbd_get_handle(dev_t dev, sbd_softstate_t *softsp, intptr_t arg,
1443 	sbd_init_arg_t *iap)
1444 {
1445 	sbd_handle_t		*hp;
1446 	sbderror_t		*ep;
1447 	sbd_priv_handle_t	*shp;
1448 	sbd_board_t		*sbp = softsp->sbd_boardlist;
1449 	int			board;
1450 
1451 	board = SBDGETSLOT(dev);
1452 	ASSERT(board < softsp->max_boards);
1453 	sbp += board;
1454 
1455 	/*
1456 	 * Brand-new handle.
1457 	 */
1458 	shp = kmem_zalloc(sizeof (sbd_priv_handle_t), KM_SLEEP);
1459 	shp->sh_arg = (void *)arg;
1460 
1461 	hp = MACHHD2HD(shp);
1462 
1463 	ep = &shp->sh_err;
1464 
1465 	hp->h_err = ep;
1466 	hp->h_sbd = (void *) sbp;
1467 	hp->h_dev = iap->dev;
1468 	hp->h_cmd = iap->cmd;
1469 	hp->h_mode = iap->mode;
1470 	sbd_init_err(ep);
1471 
1472 	mutex_enter(&sbd_handle_list_mutex);
1473 	shp->sh_next = sbp->sb_handle;
1474 	sbp->sb_handle = shp;
1475 	mutex_exit(&sbd_handle_list_mutex);
1476 
1477 	return (hp);
1478 }
1479 
1480 void
1481 sbd_init_err(sbderror_t *ep)
1482 {
1483 	ep->e_errno = 0;
1484 	ep->e_code = 0;
1485 	ep->e_rsc[0] = '\0';
1486 }
1487 
1488 int
1489 sbd_set_err_in_hdl(sbd_handle_t *hp, sbderror_t *ep)
1490 {
1491 	sbderror_t	*hep = SBD_HD2ERR(hp);
1492 
1493 	/*
1494 	 * If there is an error logged already, don't rewrite it
1495 	 */
1496 	if (SBD_GET_ERR(hep) || SBD_GET_ERRNO(hep)) {
1497 		return (0);
1498 	}
1499 
1500 	if (SBD_GET_ERR(ep) || SBD_GET_ERRNO(ep)) {
1501 		SBD_SET_ERR(hep, SBD_GET_ERR(ep));
1502 		SBD_SET_ERRNO(hep, SBD_GET_ERRNO(ep));
1503 		SBD_SET_ERRSTR(hep, SBD_GET_ERRSTR(ep));
1504 		return (0);
1505 	}
1506 
1507 	return (-1);
1508 }
1509 
1510 static void
1511 sbd_release_handle(sbd_handle_t *hp)
1512 {
1513 	sbd_priv_handle_t	*shp, **shpp;
1514 	sbd_board_t		*sbp;
1515 	static fn_t		f = "sbd_release_handle";
1516 
1517 	if (hp == NULL)
1518 		return;
1519 
1520 	sbp = SBDH2BD(hp->h_sbd);
1521 
1522 	shp = HD2MACHHD(hp);
1523 
1524 	mutex_enter(&sbd_handle_list_mutex);
1525 	/*
1526 	 * Locate the handle in the board's reference list.
1527 	 */
1528 	for (shpp = &sbp->sb_handle; (*shpp) && ((*shpp) != shp);
1529 	    shpp = &((*shpp)->sh_next))
1530 		/* empty */;
1531 
1532 	if (*shpp == NULL) {
1533 		cmn_err(CE_PANIC,
1534 			"sbd:%s: handle not found in board %d",
1535 			f, sbp->sb_num);
1536 		/*NOTREACHED*/
1537 	} else {
1538 		*shpp = shp->sh_next;
1539 	}
1540 	mutex_exit(&sbd_handle_list_mutex);
1541 
1542 	if (hp->h_opts.copts != NULL) {
1543 		FREESTRUCT(hp->h_opts.copts, char, hp->h_opts.size);
1544 	}
1545 
1546 	FREESTRUCT(shp, sbd_priv_handle_t, 1);
1547 }
1548 
1549 sbdp_handle_t *
1550 sbd_get_sbdp_handle(sbd_board_t *sbp, sbd_handle_t *hp)
1551 {
1552 	sbdp_handle_t		*hdp;
1553 
1554 	hdp = kmem_zalloc(sizeof (sbdp_handle_t), KM_SLEEP);
1555 	hdp->h_err = kmem_zalloc(sizeof (sbd_error_t), KM_SLEEP);
1556 	if (sbp == NULL) {
1557 		hdp->h_board = -1;
1558 		hdp->h_wnode = -1;
1559 	} else {
1560 		hdp->h_board = sbp->sb_num;
1561 		hdp->h_wnode = sbp->sb_wnode;
1562 	}
1563 
1564 	if (hp == NULL) {
1565 		hdp->h_flags = 0;
1566 		hdp->h_opts = NULL;
1567 	} else {
1568 		hdp->h_flags = SBD_2_SBDP_FLAGS(hp->h_flags);
1569 		hdp->h_opts = &hp->h_opts;
1570 	}
1571 
1572 	return (hdp);
1573 }
1574 
1575 void
1576 sbd_release_sbdp_handle(sbdp_handle_t *hdp)
1577 {
1578 	if (hdp == NULL)
1579 		return;
1580 
1581 	kmem_free(hdp->h_err, sizeof (sbd_error_t));
1582 	kmem_free(hdp, sizeof (sbdp_handle_t));
1583 }
1584 
1585 void
1586 sbd_reset_error_sbdph(sbdp_handle_t *hdp)
1587 {
1588 	if ((hdp != NULL) && (hdp->h_err != NULL)) {
1589 		bzero(hdp->h_err, sizeof (sbd_error_t));
1590 	}
1591 }
1592 
1593 static int
1594 sbd_copyin_ioarg(sbd_handle_t *hp, int mode, int cmd, sbd_cmd_t *cmdp,
1595 	sbd_ioctl_arg_t *iap)
1596 {
1597 	static fn_t	f = "sbd_copyin_ioarg";
1598 
1599 	if (iap == NULL)
1600 		return (EINVAL);
1601 
1602 	bzero((caddr_t)cmdp, sizeof (sbd_cmd_t));
1603 
1604 #ifdef _MULTI_DATAMODEL
1605 	if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
1606 		sbd_cmd32_t	scmd32;
1607 
1608 		bzero((caddr_t)&scmd32, sizeof (sbd_cmd32_t));
1609 
1610 		if (ddi_copyin((void *)iap, (void *)&scmd32,
1611 				sizeof (sbd_cmd32_t), mode)) {
1612 			cmn_err(CE_WARN,
1613 				"sbd:%s: (32bit) failed to copyin "
1614 					"sbdcmd-struct", f);
1615 			return (EFAULT);
1616 		}
1617 		cmdp->cmd_cm.c_id.c_type = scmd32.cmd_cm.c_id.c_type;
1618 		cmdp->cmd_cm.c_id.c_unit = scmd32.cmd_cm.c_id.c_unit;
1619 		bcopy(&scmd32.cmd_cm.c_id.c_name[0],
1620 			&cmdp->cmd_cm.c_id.c_name[0], OBP_MAXPROPNAME);
1621 		cmdp->cmd_cm.c_flags = scmd32.cmd_cm.c_flags;
1622 		cmdp->cmd_cm.c_len = scmd32.cmd_cm.c_len;
1623 		cmdp->cmd_cm.c_opts = (caddr_t)(uintptr_t)scmd32.cmd_cm.c_opts;
1624 
1625 		if (cmd == SBD_CMD_PASSTHRU) {
1626 			PR_BYP("passthru copyin: iap=%p, sz=%ld", (void *)iap,
1627 				sizeof (sbd_cmd32_t));
1628 			PR_BYP("passthru copyin: c_opts=%x, c_len=%d",
1629 				scmd32.cmd_cm.c_opts,
1630 				scmd32.cmd_cm.c_len);
1631 		}
1632 
1633 		switch (cmd) {
1634 		case SBD_CMD_STATUS:
1635 			cmdp->cmd_stat.s_nbytes = scmd32.cmd_stat.s_nbytes;
1636 			cmdp->cmd_stat.s_statp =
1637 				(caddr_t)(uintptr_t)scmd32.cmd_stat.s_statp;
1638 			break;
1639 		default:
1640 			break;
1641 
1642 		}
1643 	} else
1644 #endif /* _MULTI_DATAMODEL */
1645 	if (ddi_copyin((void *)iap, (void *)cmdp,
1646 			sizeof (sbd_cmd_t), mode) != 0) {
1647 		cmn_err(CE_WARN,
1648 			"sbd:%s: failed to copyin sbd cmd_t struct", f);
1649 		return (EFAULT);
1650 	}
1651 	/*
1652 	 * A user may set platform specific options so we need to
1653 	 * copy them in
1654 	 */
1655 	if ((cmd != SBD_CMD_STATUS) && ((hp->h_opts.size = cmdp->cmd_cm.c_len)
1656 	    > 0)) {
1657 		hp->h_opts.size += 1;	/* For null termination of string. */
1658 		hp->h_opts.copts = GETSTRUCT(char, hp->h_opts.size);
1659 		if (ddi_copyin((void *)cmdp->cmd_cm.c_opts,
1660 		    (void *)hp->h_opts.copts,
1661 		    cmdp->cmd_cm.c_len, hp->h_mode) != 0) {
1662 			/* copts is freed in sbd_release_handle(). */
1663 			cmn_err(CE_WARN,
1664 			    "sbd:%s: failed to copyin options", f);
1665 			return (EFAULT);
1666 		}
1667 	}
1668 
1669 	return (0);
1670 }
1671 
1672 static int
1673 sbd_copyout_ioarg(int mode, int cmd, sbd_cmd_t *scp, sbd_ioctl_arg_t *iap)
1674 {
1675 	static fn_t	f = "sbd_copyout_ioarg";
1676 
1677 	if ((iap == NULL) || (scp == NULL))
1678 		return (EINVAL);
1679 
1680 #ifdef _MULTI_DATAMODEL
1681 	if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
1682 		sbd_cmd32_t	scmd32;
1683 
1684 		scmd32.cmd_cm.c_id.c_type = scp->cmd_cm.c_id.c_type;
1685 		scmd32.cmd_cm.c_id.c_unit = scp->cmd_cm.c_id.c_unit;
1686 		bcopy(scp->cmd_cm.c_id.c_name,
1687 			scmd32.cmd_cm.c_id.c_name, OBP_MAXPROPNAME);
1688 
1689 		scmd32.cmd_cm.c_flags = scp->cmd_cm.c_flags;
1690 
1691 		switch (cmd) {
1692 		case SBD_CMD_GETNCM:
1693 			scmd32.cmd_getncm.g_ncm = scp->cmd_getncm.g_ncm;
1694 			break;
1695 		default:
1696 			break;
1697 		}
1698 
1699 		if (ddi_copyout((void *)&scmd32, (void *)iap,
1700 				sizeof (sbd_cmd32_t), mode)) {
1701 			cmn_err(CE_WARN,
1702 				"sbd:%s: (32bit) failed to copyout "
1703 					"sbdcmd struct", f);
1704 			return (EFAULT);
1705 		}
1706 	} else
1707 #endif /* _MULTI_DATAMODEL */
1708 	if (ddi_copyout((void *)scp, (void *)iap,
1709 			sizeof (sbd_cmd_t), mode) != 0) {
1710 		cmn_err(CE_WARN,
1711 			"sbd:%s: failed to copyout sbdcmd struct", f);
1712 		return (EFAULT);
1713 	}
1714 
1715 	return (0);
1716 }
1717 
1718 static int
1719 sbd_copyout_errs(int mode, sbd_ioctl_arg_t *iap, void *arg)
1720 {
1721 	static fn_t	f = "sbd_copyout_errs";
1722 	sbd_ioctl_arg_t	*uap;
1723 
1724 	uap = (sbd_ioctl_arg_t *)arg;
1725 
1726 #ifdef _MULTI_DATAMODEL
1727 	if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
1728 		sbd_error32_t err32;
1729 		sbd_ioctl_arg32_t *uap32;
1730 
1731 		uap32 = (sbd_ioctl_arg32_t *)arg;
1732 
1733 		err32.e_code = iap->ie_code;
1734 		(void) strcpy(err32.e_rsc, iap->ie_rsc);
1735 
1736 		if (ddi_copyout((void *)&err32, (void *)&uap32->i_err,
1737 				sizeof (sbd_error32_t), mode)) {
1738 			cmn_err(CE_WARN,
1739 				"sbd:%s: failed to copyout ioctl32 errs",
1740 				f);
1741 			return (EFAULT);
1742 		}
1743 	} else
1744 #endif /* _MULTI_DATAMODEL */
1745 	if (ddi_copyout((void *)&iap->i_err, (void *)&uap->i_err,
1746 			sizeof (sbd_error_t), mode) != 0) {
1747 		cmn_err(CE_WARN,
1748 			"sbd:%s: failed to copyout ioctl errs", f);
1749 		return (EFAULT);
1750 	}
1751 
1752 	return (0);
1753 }
1754 
1755 /*
1756  * State transition policy is that if at least one
1757  * device cannot make the transition, then none of
1758  * the requested devices are allowed to transition.
1759  *
1760  * Returns the state that is in error, if any.
1761  */
1762 static int
1763 sbd_check_transition(sbd_board_t *sbp, sbd_devset_t *devsetp,
1764 			struct sbd_state_trans *transp)
1765 {
1766 	int	s, ut;
1767 	int	state_err = 0;
1768 	sbd_devset_t	devset;
1769 	static fn_t	f = "sbd_check_transition";
1770 
1771 	devset = *devsetp;
1772 
1773 	if (!devset) {
1774 		/*
1775 		 * Transition does not deal with any components.
1776 		 * This is the case for addboard/deleteboard.
1777 		 */
1778 		PR_ALL("%s: no devs: requested devset = 0x%x,"
1779 			" final devset = 0x%x\n",
1780 			f, (uint_t)*devsetp, (uint_t)devset);
1781 
1782 		return (0);
1783 	}
1784 
1785 	if (DEVSET_IN_SET(devset, SBD_COMP_MEM, DEVSET_ANYUNIT)) {
1786 		for (ut = 0; ut < MAX_MEM_UNITS_PER_BOARD; ut++) {
1787 			if (DEVSET_IN_SET(devset, SBD_COMP_MEM, ut) == 0)
1788 				continue;
1789 			s = (int)SBD_DEVICE_STATE(sbp, SBD_COMP_MEM, ut);
1790 			if (transp->x_op[s].x_rv) {
1791 				if (!state_err)
1792 					state_err = s;
1793 				DEVSET_DEL(devset, SBD_COMP_MEM, ut);
1794 			}
1795 		}
1796 	}
1797 
1798 	if (DEVSET_IN_SET(devset, SBD_COMP_CPU, DEVSET_ANYUNIT)) {
1799 		for (ut = 0; ut < MAX_CPU_UNITS_PER_BOARD; ut++) {
1800 			if (DEVSET_IN_SET(devset, SBD_COMP_CPU, ut) == 0)
1801 				continue;
1802 			s = (int)SBD_DEVICE_STATE(sbp, SBD_COMP_CPU, ut);
1803 			if (transp->x_op[s].x_rv) {
1804 				if (!state_err)
1805 					state_err = s;
1806 				DEVSET_DEL(devset, SBD_COMP_CPU, ut);
1807 			}
1808 		}
1809 	}
1810 
1811 	if (DEVSET_IN_SET(devset, SBD_COMP_IO, DEVSET_ANYUNIT)) {
1812 		for (ut = 0; ut < MAX_IO_UNITS_PER_BOARD; ut++) {
1813 			if (DEVSET_IN_SET(devset, SBD_COMP_IO, ut) == 0)
1814 				continue;
1815 			s = (int)SBD_DEVICE_STATE(sbp, SBD_COMP_IO, ut);
1816 			if (transp->x_op[s].x_rv) {
1817 				if (!state_err)
1818 					state_err = s;
1819 				DEVSET_DEL(devset, SBD_COMP_IO, ut);
1820 			}
1821 		}
1822 	}
1823 
1824 	PR_ALL("%s: requested devset = 0x%x, final devset = 0x%x\n",
1825 		f, (uint_t)*devsetp, (uint_t)devset);
1826 
1827 	*devsetp = devset;
1828 	/*
1829 	 * If there are some remaining components for which
1830 	 * this state transition is valid, then allow them
1831 	 * through, otherwise if none are left then return
1832 	 * the state error.
1833 	 */
1834 	return (devset ? 0 : state_err);
1835 }
1836 
1837 /*
1838  * pre-op entry point must SET_ERRNO(), if needed.
1839  * Return value of non-zero indicates failure.
1840  */
1841 static int
1842 sbd_pre_op(sbd_handle_t *hp)
1843 {
1844 	int		rv = 0, t;
1845 	int		cmd, serr = 0;
1846 	sbd_devset_t	devset;
1847 	sbd_board_t	*sbp = SBDH2BD(hp->h_sbd);
1848 	sbd_priv_handle_t	*shp = HD2MACHHD(hp);
1849 	sbderror_t	*ep = SBD_HD2ERR(hp);
1850 	sbd_cmd_t	*cmdp;
1851 	static fn_t	f = "sbd_pre_op";
1852 
1853 	cmd = hp->h_cmd;
1854 	devset = shp->sh_devset;
1855 
1856 	switch (cmd) {
1857 		case SBD_CMD_CONNECT:
1858 		case SBD_CMD_DISCONNECT:
1859 		case SBD_CMD_UNCONFIGURE:
1860 		case SBD_CMD_CONFIGURE:
1861 		case SBD_CMD_ASSIGN:
1862 		case SBD_CMD_UNASSIGN:
1863 		case SBD_CMD_POWERON:
1864 		case SBD_CMD_POWEROFF:
1865 		case SBD_CMD_TEST:
1866 		/* ioctls allowed if caller has write permission */
1867 		if (!(hp->h_mode & FWRITE)) {
1868 			SBD_SET_ERRNO(ep, EPERM);
1869 			return (-1);
1870 		}
1871 
1872 		default:
1873 		break;
1874 	}
1875 
1876 	hp->h_iap = GETSTRUCT(sbd_ioctl_arg_t, 1);
1877 	rv = sbd_copyin_ioarg(hp, hp->h_mode, cmd,
1878 		(sbd_cmd_t *)hp->h_iap, shp->sh_arg);
1879 	if (rv) {
1880 		SBD_SET_ERRNO(ep, rv);
1881 		FREESTRUCT(hp->h_iap, sbd_ioctl_arg_t, 1);
1882 		hp->h_iap = NULL;
1883 		cmn_err(CE_WARN, "%s: copyin fail", f);
1884 		return (-1);
1885 	} else {
1886 		cmdp =  (sbd_cmd_t *)hp->h_iap;
1887 		if (cmdp->cmd_cm.c_id.c_name[0] != '\0') {
1888 
1889 			cmdp->cmd_cm.c_id.c_type = SBD_COMP(sbd_name_to_idx(
1890 				cmdp->cmd_cm.c_id.c_name));
1891 			if (cmdp->cmd_cm.c_id.c_type == SBD_COMP_MEM) {
1892 				if (cmdp->cmd_cm.c_id.c_unit == -1)
1893 					cmdp->cmd_cm.c_id.c_unit = 0;
1894 			}
1895 		}
1896 		devset = shp->sh_orig_devset = shp->sh_devset =
1897 		    sbd_dev2devset(&cmdp->cmd_cm.c_id);
1898 		if (devset == 0) {
1899 			SBD_SET_ERRNO(ep, EINVAL);
1900 			FREESTRUCT(hp->h_iap, sbd_ioctl_arg_t, 1);
1901 			hp->h_iap = NULL;
1902 			return (-1);
1903 		}
1904 	}
1905 
1906 	/*
1907 	 * Always turn on these bits ala Sunfire DR.
1908 	 */
1909 	hp->h_flags |= SBD_FLAG_DEVI_FORCE;
1910 
1911 	if (cmdp->cmd_cm.c_flags & SBD_FLAG_FORCE)
1912 		hp->h_flags |= SBD_IOCTL_FLAG_FORCE;
1913 
1914 	/*
1915 	 * Check for valid state transitions.
1916 	 */
1917 	if (!serr && ((t = CMD2INDEX(cmd)) != -1)) {
1918 		struct sbd_state_trans	*transp;
1919 		int			state_err;
1920 
1921 		transp = &sbd_state_transition[t];
1922 		ASSERT(transp->x_cmd == cmd);
1923 
1924 		state_err = sbd_check_transition(sbp, &devset, transp);
1925 
1926 		if (state_err < 0) {
1927 			/*
1928 			 * Invalidate device.
1929 			 */
1930 			SBD_SET_ERRNO(ep, ENOTTY);
1931 			serr = -1;
1932 			PR_ALL("%s: invalid devset (0x%x)\n",
1933 				f, (uint_t)devset);
1934 		} else if (state_err != 0) {
1935 			/*
1936 			 * State transition is not a valid one.
1937 			 */
1938 			SBD_SET_ERRNO(ep, transp->x_op[state_err].x_err);
1939 			serr = transp->x_op[state_err].x_rv;
1940 			PR_ALL("%s: invalid state %s(%d) for cmd %s(%d)\n",
1941 				f, sbd_state_str[state_err], state_err,
1942 				SBD_CMD_STR(cmd), cmd);
1943 		}
1944 		if (serr && SBD_GET_ERRNO(ep) != 0) {
1945 			/*
1946 			 * A state transition error occurred.
1947 			 */
1948 			if (serr < 0) {
1949 				SBD_SET_ERR(ep, ESBD_INVAL);
1950 			} else {
1951 				SBD_SET_ERR(ep, ESBD_STATE);
1952 			}
1953 			PR_ALL("%s: invalid state transition\n", f);
1954 		} else {
1955 			shp->sh_devset = devset;
1956 		}
1957 	}
1958 
1959 	if (serr && !rv && hp->h_iap) {
1960 
1961 		/*
1962 		 * There was a state error.  We successfully copied
1963 		 * in the ioctl argument, so let's fill in the
1964 		 * error and copy it back out.
1965 		 */
1966 
1967 		if (SBD_GET_ERR(ep) && SBD_GET_ERRNO(ep) == 0)
1968 			SBD_SET_ERRNO(ep, EIO);
1969 
1970 		SBD_SET_IOCTL_ERR(&hp->h_iap->i_err,
1971 			ep->e_code,
1972 			ep->e_rsc);
1973 		(void) sbd_copyout_errs(hp->h_mode, hp->h_iap, shp->sh_arg);
1974 		FREESTRUCT(hp->h_iap, sbd_ioctl_arg_t, 1);
1975 		hp->h_iap = NULL;
1976 		rv = -1;
1977 	}
1978 
1979 	return (rv);
1980 }
1981 
1982 static void
1983 sbd_post_op(sbd_handle_t *hp)
1984 {
1985 	int		cmd;
1986 	sbderror_t	*ep = SBD_HD2ERR(hp);
1987 	sbd_priv_handle_t	*shp = HD2MACHHD(hp);
1988 	sbd_board_t    *sbp = SBDH2BD(hp->h_sbd);
1989 
1990 	cmd = hp->h_cmd;
1991 
1992 	switch (cmd) {
1993 		case SBD_CMD_CONFIGURE:
1994 		case SBD_CMD_UNCONFIGURE:
1995 		case SBD_CMD_CONNECT:
1996 		case SBD_CMD_DISCONNECT:
1997 			sbp->sb_time = gethrestime_sec();
1998 			break;
1999 
2000 		default:
2001 			break;
2002 	}
2003 
2004 	if (SBD_GET_ERR(ep) && SBD_GET_ERRNO(ep) == 0) {
2005 		SBD_SET_ERRNO(ep, EIO);
2006 	}
2007 
2008 	if (shp->sh_arg != NULL) {
2009 
2010 		if (SBD_GET_ERR(ep) != ESBD_NOERROR) {
2011 
2012 			SBD_SET_IOCTL_ERR(&hp->h_iap->i_err,
2013 				ep->e_code,
2014 				ep->e_rsc);
2015 
2016 			(void) sbd_copyout_errs(hp->h_mode, hp->h_iap,
2017 					shp->sh_arg);
2018 		}
2019 
2020 		if (hp->h_iap != NULL) {
2021 			FREESTRUCT(hp->h_iap, sbd_ioctl_arg_t, 1);
2022 			hp->h_iap = NULL;
2023 		}
2024 	}
2025 }
2026 
2027 static int
2028 sbd_probe_board(sbd_handle_t *hp)
2029 {
2030 	int		rv;
2031 	sbd_board_t    *sbp;
2032 	sbdp_handle_t	*hdp;
2033 	static fn_t	f = "sbd_probe_board";
2034 
2035 	sbp = SBDH2BD(hp->h_sbd);
2036 
2037 	ASSERT(sbp != NULL);
2038 	PR_ALL("%s for board %d", f, sbp->sb_num);
2039 
2040 
2041 	hdp = sbd_get_sbdp_handle(sbp, hp);
2042 
2043 	if ((rv = sbdp_connect_board(hdp)) != 0) {
2044 		sbderror_t	*ep = SBD_HD2ERR(hp);
2045 
2046 		SBD_GET_PERR(hdp->h_err, ep);
2047 	}
2048 
2049 	/*
2050 	 * We need to force a recache after the connect.  The cached
2051 	 * info may be incorrect
2052 	 */
2053 	mutex_enter(&sbp->sb_flags_mutex);
2054 	sbp->sb_flags &= ~SBD_BOARD_STATUS_CACHED;
2055 	mutex_exit(&sbp->sb_flags_mutex);
2056 
2057 	SBD_INJECT_ERR(SBD_PROBE_BOARD_PSEUDO_ERR, hp->h_err, EIO,
2058 		ESGT_PROBE, NULL);
2059 
2060 	sbd_release_sbdp_handle(hdp);
2061 
2062 	return (rv);
2063 }
2064 
2065 static int
2066 sbd_deprobe_board(sbd_handle_t *hp)
2067 {
2068 	int		rv;
2069 	sbdp_handle_t	*hdp;
2070 	sbd_board_t	*sbp;
2071 	static fn_t	f = "sbd_deprobe_board";
2072 
2073 	PR_ALL("%s...\n", f);
2074 
2075 	sbp = SBDH2BD(hp->h_sbd);
2076 
2077 	hdp = sbd_get_sbdp_handle(sbp, hp);
2078 
2079 	if ((rv = sbdp_disconnect_board(hdp)) != 0) {
2080 		sbderror_t	*ep = SBD_HD2ERR(hp);
2081 
2082 		SBD_GET_PERR(hdp->h_err, ep);
2083 	}
2084 
2085 	mutex_enter(&sbp->sb_flags_mutex);
2086 	sbp->sb_flags &= ~SBD_BOARD_STATUS_CACHED;
2087 	mutex_exit(&sbp->sb_flags_mutex);
2088 
2089 	SBD_INJECT_ERR(SBD_DEPROBE_BOARD_PSEUDO_ERR, hp->h_err, EIO,
2090 		ESGT_DEPROBE, NULL);
2091 
2092 	sbd_release_sbdp_handle(hdp);
2093 	return (rv);
2094 }
2095 
2096 /*
2097  * Check if a CPU node is part of a CMP.
2098  */
2099 int
2100 sbd_is_cmp_child(dev_info_t *dip)
2101 {
2102 	dev_info_t *pdip;
2103 
2104 	if (strcmp(ddi_node_name(dip), "cpu") != 0) {
2105 		return (0);
2106 	}
2107 
2108 	pdip = ddi_get_parent(dip);
2109 
2110 	ASSERT(pdip);
2111 
2112 	if (strcmp(ddi_node_name(pdip), "cmp") == 0) {
2113 		return (1);
2114 	}
2115 
2116 	return (0);
2117 }
2118 
2119 /*
2120  * Returns the nodetype if dip is a top dip on the board of
2121  * interest or SBD_COMP_UNKNOWN otherwise
2122  */
2123 static sbd_comp_type_t
2124 get_node_type(sbd_board_t *sbp, dev_info_t *dip, int *unitp)
2125 {
2126 	int		idx, unit;
2127 	sbd_handle_t	*hp;
2128 	sbdp_handle_t	*hdp;
2129 	char		otype[OBP_MAXDRVNAME];
2130 	int		otypelen;
2131 
2132 	ASSERT(sbp);
2133 
2134 	if (unitp)
2135 		*unitp = -1;
2136 
2137 	hp = MACHBD2HD(sbp);
2138 
2139 	hdp = sbd_get_sbdp_handle(sbp, hp);
2140 	if (sbdp_get_board_num(hdp, dip) != sbp->sb_num) {
2141 		sbd_release_sbdp_handle(hdp);
2142 		return (SBD_COMP_UNKNOWN);
2143 	}
2144 
2145 	/*
2146 	 * sbdp_get_unit_num will return (-1) for cmp as there
2147 	 * is no "device_type" property associated with cmp.
2148 	 * Therefore we will just skip getting unit number for
2149 	 * cmp.  Callers of this function need to check the
2150 	 * value set in unitp before using it to dereference
2151 	 * an array.
2152 	 */
2153 	if (strcmp(ddi_node_name(dip), "cmp") == 0) {
2154 		sbd_release_sbdp_handle(hdp);
2155 		return (SBD_COMP_CMP);
2156 	}
2157 
2158 	otypelen = sizeof (otype);
2159 	if (ddi_getlongprop_buf(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
2160 	    OBP_DEVICETYPE,  (caddr_t)otype, &otypelen)) {
2161 		sbd_release_sbdp_handle(hdp);
2162 		return (SBD_COMP_UNKNOWN);
2163 	}
2164 
2165 	idx = sbd_otype_to_idx(otype);
2166 
2167 	if (SBD_COMP(idx) == SBD_COMP_UNKNOWN) {
2168 		sbd_release_sbdp_handle(hdp);
2169 		return (SBD_COMP_UNKNOWN);
2170 	}
2171 
2172 	unit = sbdp_get_unit_num(hdp, dip);
2173 	if (unit == -1) {
2174 		cmn_err(CE_WARN,
2175 			"get_node_type: %s unit fail %p", otype, (void *)dip);
2176 		sbd_release_sbdp_handle(hdp);
2177 		return (SBD_COMP_UNKNOWN);
2178 	}
2179 
2180 	sbd_release_sbdp_handle(hdp);
2181 
2182 	if (unitp)
2183 		*unitp = unit;
2184 
2185 	return (SBD_COMP(idx));
2186 }
2187 
2188 typedef struct {
2189 	sbd_board_t	*sbp;
2190 	int		nmc;
2191 	int		hold;
2192 } walk_tree_t;
2193 
2194 static int
2195 sbd_setup_devlists(dev_info_t *dip, void *arg)
2196 {
2197 	walk_tree_t	*wp;
2198 	dev_info_t	**devlist = NULL;
2199 	char		*pathname = NULL;
2200 	sbd_mem_unit_t	*mp;
2201 	static fn_t	f = "sbd_setup_devlists";
2202 	sbd_board_t	*sbp;
2203 	int		unit;
2204 	sbd_comp_type_t nodetype;
2205 
2206 	ASSERT(dip);
2207 
2208 	wp = (walk_tree_t *)arg;
2209 
2210 	if (wp == NULL) {
2211 		PR_ALL("%s:bad arg\n", f);
2212 		return (DDI_WALK_TERMINATE);
2213 	}
2214 
2215 	sbp = wp->sbp;
2216 
2217 	nodetype = get_node_type(sbp, dip, &unit);
2218 
2219 	switch (nodetype) {
2220 
2221 	case SBD_COMP_CPU:
2222 		pathname = sbp->sb_cpupath[unit];
2223 		break;
2224 
2225 	case SBD_COMP_MEM:
2226 		pathname = sbp->sb_mempath[unit];
2227 		break;
2228 
2229 	case SBD_COMP_IO:
2230 		pathname = sbp->sb_iopath[unit];
2231 		break;
2232 
2233 	case SBD_COMP_CMP:
2234 	case SBD_COMP_UNKNOWN:
2235 		/*
2236 		 * This dip is not of interest to us
2237 		 */
2238 		return (DDI_WALK_CONTINUE);
2239 
2240 	default:
2241 		ASSERT(0);
2242 		return (DDI_WALK_CONTINUE);
2243 	}
2244 
2245 	/*
2246 	 * dip's parent is being held busy by ddi_walk_devs(),
2247 	 * so dip doesn't have to be held while calling ddi_pathname()
2248 	 */
2249 	if (pathname) {
2250 		(void) ddi_pathname(dip, pathname);
2251 	}
2252 
2253 	devlist = sbp->sb_devlist[NIX(nodetype)];
2254 
2255 	/*
2256 	 * The branch rooted at dip should already be held,
2257 	 * unless we are dealing with a core of a CMP.
2258 	 */
2259 	ASSERT(sbd_is_cmp_child(dip) || e_ddi_branch_held(dip));
2260 	devlist[unit] = dip;
2261 
2262 	/*
2263 	 * This test is required if multiple devices are considered
2264 	 * as one. This is the case for memory-controller nodes.
2265 	 */
2266 	if (!SBD_DEV_IS_PRESENT(sbp, nodetype, unit)) {
2267 		sbp->sb_ndev++;
2268 		SBD_DEV_SET_PRESENT(sbp, nodetype, unit);
2269 	}
2270 
2271 	if (nodetype == SBD_COMP_MEM) {
2272 		mp = SBD_GET_BOARD_MEMUNIT(sbp, unit);
2273 		ASSERT(wp->nmc < SBD_NUM_MC_PER_BOARD);
2274 		mp->sbm_dip[wp->nmc++] = dip;
2275 	}
2276 
2277 	return (DDI_WALK_CONTINUE);
2278 }
2279 
2280 /*
2281  * This routine is used to construct the memory devlist.
2282  * In Starcat and Serengeti platforms, a system board can contain up to
2283  * four memory controllers (MC).  The MCs have been programmed by POST for
2284  * optimum memory interleaving amongst their peers on the same board.
2285  * This DR driver does not support deinterleaving.  Therefore, the smallest
2286  * unit of memory that can be manipulated by this driver is all of the
2287  * memory on a board.  Because of this restriction, a board's memory devlist
2288  * is populated with only one of the four (possible) MC dnodes on that board.
2289  * Care must be taken to ensure that the selected MC dnode represents the
2290  * lowest physical address to which memory on the board will respond to.
2291  * This is required in order to preserve the semantics of
2292  * sbdp_get_base_physaddr() when applied to a MC dnode stored in the
2293  * memory devlist.
2294  */
2295 static void
2296 sbd_init_mem_devlists(sbd_board_t *sbp)
2297 {
2298 	dev_info_t	**devlist;
2299 	sbd_mem_unit_t	*mp;
2300 	dev_info_t	*mc_dip;
2301 	sbdp_handle_t	*hdp;
2302 	uint64_t	mc_pa, lowest_pa;
2303 	int		i;
2304 	sbd_handle_t	*hp = MACHBD2HD(sbp);
2305 
2306 	devlist = sbp->sb_devlist[NIX(SBD_COMP_MEM)];
2307 
2308 	mp = SBD_GET_BOARD_MEMUNIT(sbp, 0);
2309 
2310 	mc_dip = mp->sbm_dip[0];
2311 	if (mc_dip == NULL)
2312 		return;		/* No MC dips found for this board */
2313 
2314 	hdp = sbd_get_sbdp_handle(sbp, hp);
2315 
2316 	if (sbdphw_get_base_physaddr(hdp, mc_dip, &mc_pa)) {
2317 		/* TODO: log complaint about dnode */
2318 
2319 pretend_no_mem:
2320 		/*
2321 		 * We are here because sbdphw_get_base_physaddr() failed.
2322 		 * Although it is very unlikely to happen, it did.  Lucky us.
2323 		 * Since we can no longer examine _all_ of the MCs on this
2324 		 * board to determine which one is programmed to the lowest
2325 		 * physical address, we cannot involve any of the MCs on
2326 		 * this board in DR operations.  To ensure this, we pretend
2327 		 * that this board does not contain any memory.
2328 		 *
2329 		 * Paranoia: clear the dev_present mask.
2330 		 */
2331 		if (SBD_DEV_IS_PRESENT(sbp, SBD_COMP_MEM, 0)) {
2332 			ASSERT(sbp->sb_ndev != 0);
2333 			SBD_DEV_CLR_PRESENT(sbp, SBD_COMP_MEM, 0);
2334 			sbp->sb_ndev--;
2335 		}
2336 
2337 		for (i = 0; i < SBD_NUM_MC_PER_BOARD; i++) {
2338 			mp->sbm_dip[i] = NULL;
2339 		}
2340 
2341 		sbd_release_sbdp_handle(hdp);
2342 		return;
2343 	}
2344 
2345 	/* assume this one will win. */
2346 	devlist[0] = mc_dip;
2347 	mp->sbm_cm.sbdev_dip = mc_dip;
2348 	lowest_pa = mc_pa;
2349 
2350 	/*
2351 	 * We know the base physical address of one of the MC devices.  Now
2352 	 * we will enumerate through all of the remaining MC devices on
2353 	 * the board to find which of them is programmed to the lowest
2354 	 * physical address.
2355 	 */
2356 	for (i = 1; i < SBD_NUM_MC_PER_BOARD; i++) {
2357 		mc_dip = mp->sbm_dip[i];
2358 		if (mc_dip == NULL) {
2359 			break;
2360 		}
2361 
2362 		if (sbdphw_get_base_physaddr(hdp, mc_dip, &mc_pa)) {
2363 			cmn_err(CE_NOTE, "No mem on board %d unit %d",
2364 				sbp->sb_num, i);
2365 			break;
2366 		}
2367 		if (mc_pa < lowest_pa) {
2368 			mp->sbm_cm.sbdev_dip = mc_dip;
2369 			devlist[0] = mc_dip;
2370 			lowest_pa = mc_pa;
2371 		}
2372 	}
2373 
2374 	sbd_release_sbdp_handle(hdp);
2375 }
2376 
2377 static int
2378 sbd_name_to_idx(char *name)
2379 {
2380 	int idx;
2381 
2382 	for (idx = 0; SBD_COMP(idx) != SBD_COMP_UNKNOWN; idx++) {
2383 		if (strcmp(name, SBD_DEVNAME(idx)) == 0) {
2384 			break;
2385 		}
2386 	}
2387 
2388 	return (idx);
2389 }
2390 
2391 static int
2392 sbd_otype_to_idx(char *otype)
2393 {
2394 	int idx;
2395 
2396 	for (idx = 0; SBD_COMP(idx) != SBD_COMP_UNKNOWN; idx++) {
2397 
2398 		if (strcmp(otype, SBD_OTYPE(idx)) == 0) {
2399 			break;
2400 		}
2401 	}
2402 
2403 	return (idx);
2404 }
2405 
2406 static int
2407 sbd_init_devlists(sbd_board_t *sbp)
2408 {
2409 	int		i;
2410 	sbd_dev_unit_t	*dp;
2411 	sbd_mem_unit_t	*mp;
2412 	walk_tree_t	*wp, walk = {0};
2413 	dev_info_t	*pdip;
2414 	static fn_t	f = "sbd_init_devlists";
2415 
2416 	PR_ALL("%s (board = %d)...\n", f, sbp->sb_num);
2417 
2418 	wp = &walk;
2419 
2420 	SBD_DEVS_DISCONNECT(sbp, (uint_t)-1);
2421 
2422 	/*
2423 	 * Clear out old entries, if any.
2424 	 */
2425 
2426 	for (i = 0; i < MAX_MEM_UNITS_PER_BOARD; i++) {
2427 		sbp->sb_devlist[NIX(SBD_COMP_MEM)][i] = NULL;
2428 		dp = (sbd_dev_unit_t *)SBD_GET_BOARD_MEMUNIT(sbp, i);
2429 		dp->u_common.sbdev_sbp = sbp;
2430 		dp->u_common.sbdev_unum = i;
2431 		dp->u_common.sbdev_type = SBD_COMP_MEM;
2432 	}
2433 
2434 	mp = SBD_GET_BOARD_MEMUNIT(sbp, 0);
2435 	ASSERT(mp != NULL);
2436 	for (i = 0; i < SBD_NUM_MC_PER_BOARD; i++) {
2437 		mp->sbm_dip[i] = NULL;
2438 	}
2439 
2440 	for (i = 0; i < MAX_CPU_UNITS_PER_BOARD; i++) {
2441 		sbp->sb_devlist[NIX(SBD_COMP_CPU)][i] = NULL;
2442 		dp = (sbd_dev_unit_t *)SBD_GET_BOARD_CPUUNIT(sbp, i);
2443 		dp->u_common.sbdev_sbp = sbp;
2444 		dp->u_common.sbdev_unum = i;
2445 		dp->u_common.sbdev_type = SBD_COMP_CPU;
2446 	}
2447 	for (i = 0; i < MAX_IO_UNITS_PER_BOARD; i++) {
2448 		sbp->sb_devlist[NIX(SBD_COMP_IO)][i] = NULL;
2449 		dp = (sbd_dev_unit_t *)SBD_GET_BOARD_IOUNIT(sbp, i);
2450 		dp->u_common.sbdev_sbp = sbp;
2451 		dp->u_common.sbdev_unum = i;
2452 		dp->u_common.sbdev_type = SBD_COMP_IO;
2453 	}
2454 
2455 	wp->sbp = sbp;
2456 	wp->nmc = 0;
2457 	sbp->sb_ndev = 0;
2458 
2459 	/*
2460 	 * ddi_walk_devs() requires that topdip's parent be held.
2461 	 */
2462 	pdip = ddi_get_parent(sbp->sb_topdip);
2463 	if (pdip) {
2464 		ndi_hold_devi(pdip);
2465 		ndi_devi_enter(pdip);
2466 	}
2467 	ddi_walk_devs(sbp->sb_topdip, sbd_setup_devlists, (void *) wp);
2468 	if (pdip) {
2469 		ndi_devi_exit(pdip);
2470 		ndi_rele_devi(pdip);
2471 	}
2472 
2473 	/*
2474 	 * There is no point checking all the components if there
2475 	 * are no devices.
2476 	 */
2477 	if (sbp->sb_ndev == 0) {
2478 		sbp->sb_memaccess_ok = 0;
2479 		return (sbp->sb_ndev);
2480 	}
2481 
2482 	/*
2483 	 * Initialize cpu sections before calling sbd_init_mem_devlists
2484 	 * which will access the mmus.
2485 	 */
2486 	sbp->sb_memaccess_ok = 1;
2487 	for (i = 0; i < MAX_CPU_UNITS_PER_BOARD; i++) {
2488 		if (SBD_DEV_IS_PRESENT(sbp, SBD_COMP_CPU, i)) {
2489 			sbd_init_cpu_unit(sbp, i);
2490 			if (sbd_connect_cpu(sbp, i)) {
2491 				SBD_SET_ERR(HD2MACHERR(MACHBD2HD(sbp)),
2492 					ESBD_CPUSTART);
2493 			}
2494 
2495 		}
2496 	}
2497 
2498 	if (sbp->sb_memaccess_ok) {
2499 		sbd_init_mem_devlists(sbp);
2500 	} else {
2501 		cmn_err(CE_WARN, "unable to access memory on board %d",
2502 		    sbp->sb_num);
2503 	}
2504 
2505 	return (sbp->sb_ndev);
2506 }
2507 
2508 static void
2509 sbd_init_cpu_unit(sbd_board_t *sbp, int unit)
2510 {
2511 	sbd_istate_t	new_state;
2512 	sbd_cpu_unit_t	*cp;
2513 	int		cpuid;
2514 	dev_info_t	*dip;
2515 	sbdp_handle_t	*hdp;
2516 	sbd_handle_t	*hp = MACHBD2HD(sbp);
2517 	extern kmutex_t	cpu_lock;
2518 
2519 	if (SBD_DEV_IS_ATTACHED(sbp, SBD_COMP_CPU, unit)) {
2520 		new_state = SBD_STATE_CONFIGURED;
2521 	} else if (SBD_DEV_IS_PRESENT(sbp, SBD_COMP_CPU, unit)) {
2522 		new_state = SBD_STATE_CONNECTED;
2523 	} else {
2524 		new_state = SBD_STATE_EMPTY;
2525 	}
2526 
2527 	dip = sbp->sb_devlist[NIX(SBD_COMP_CPU)][unit];
2528 
2529 	cp = SBD_GET_BOARD_CPUUNIT(sbp, unit);
2530 
2531 	hdp = sbd_get_sbdp_handle(sbp, hp);
2532 
2533 	cpuid = sbdp_get_cpuid(hdp, dip);
2534 
2535 	cp->sbc_cpu_id = cpuid;
2536 
2537 	if (&sbdp_cpu_get_impl)
2538 		cp->sbc_cpu_impl = sbdp_cpu_get_impl(hdp, dip);
2539 	else
2540 		cp->sbc_cpu_impl = -1;
2541 
2542 	mutex_enter(&cpu_lock);
2543 	if ((cpuid >= 0) && cpu[cpuid])
2544 		cp->sbc_cpu_flags = cpu[cpuid]->cpu_flags;
2545 	else
2546 		cp->sbc_cpu_flags = CPU_OFFLINE | CPU_POWEROFF;
2547 	mutex_exit(&cpu_lock);
2548 
2549 	sbd_cpu_set_prop(cp, dip);
2550 
2551 	cp->sbc_cm.sbdev_cond = sbd_get_comp_cond(dip);
2552 	sbd_release_sbdp_handle(hdp);
2553 
2554 	/*
2555 	 * Any changes to the cpu should be performed above
2556 	 * this call to ensure the cpu is fully initialized
2557 	 * before transitioning to the new state.
2558 	 */
2559 	SBD_DEVICE_TRANSITION(sbp, SBD_COMP_CPU, unit, new_state);
2560 }
2561 
2562 /*
2563  * Only do work if called to operate on an entire board
2564  * which doesn't already have components present.
2565  */
2566 static void
2567 sbd_connect(sbd_handle_t *hp)
2568 {
2569 	sbd_board_t	*sbp;
2570 	sbderror_t	*ep;
2571 	static fn_t	f = "sbd_connect";
2572 
2573 	sbp = SBDH2BD(hp->h_sbd);
2574 
2575 	PR_ALL("%s board %d\n", f, sbp->sb_num);
2576 
2577 	ep = HD2MACHERR(hp);
2578 
2579 	if (SBD_DEVS_PRESENT(sbp)) {
2580 		/*
2581 		 * Board already has devices present.
2582 		 */
2583 		PR_ALL("%s: devices already present (0x%x)\n",
2584 			f, SBD_DEVS_PRESENT(sbp));
2585 		SBD_SET_ERRNO(ep, EINVAL);
2586 		return;
2587 	}
2588 
2589 	if (sbd_init_devlists(sbp) == 0) {
2590 		cmn_err(CE_WARN, "%s: no devices present on board %d",
2591 			f, sbp->sb_num);
2592 		SBD_SET_ERR(ep, ESBD_NODEV);
2593 		return;
2594 	} else {
2595 		int	i;
2596 
2597 		/*
2598 		 * Initialize mem-unit section of board structure.
2599 		 */
2600 		for (i = 0; i < MAX_MEM_UNITS_PER_BOARD; i++)
2601 			if (SBD_DEV_IS_PRESENT(sbp, SBD_COMP_MEM, i))
2602 				sbd_init_mem_unit(sbp, i, SBD_HD2ERR(hp));
2603 
2604 		/*
2605 		 * Initialize sb_io sections.
2606 		 */
2607 		for (i = 0; i < MAX_IO_UNITS_PER_BOARD; i++)
2608 			if (SBD_DEV_IS_PRESENT(sbp, SBD_COMP_IO, i))
2609 				sbd_init_io_unit(sbp, i);
2610 
2611 		SBD_BOARD_TRANSITION(sbp, SBD_STATE_CONNECTED);
2612 		sbp->sb_rstate = SBD_STAT_CONNECTED;
2613 		sbp->sb_ostate = SBD_STAT_UNCONFIGURED;
2614 		(void) drv_getparm(TIME, (void *)&sbp->sb_time);
2615 		SBD_INJECT_ERR(SBD_CONNECT_BOARD_PSEUDO_ERR, hp->h_err, EIO,
2616 			ESBD_INTERNAL, NULL);
2617 	}
2618 }
2619 
2620 static int
2621 sbd_disconnect(sbd_handle_t *hp)
2622 {
2623 	int		i;
2624 	sbd_devset_t	devset;
2625 	sbd_board_t	*sbp;
2626 	static fn_t	f = "sbd_disconnect it";
2627 
2628 	PR_ALL("%s ...\n", f);
2629 
2630 	sbp = SBDH2BD(hp->h_sbd);
2631 
2632 	/*
2633 	 * Only devices which are present, but
2634 	 * unattached can be disconnected.
2635 	 */
2636 	devset = HD2MACHHD(hp)->sh_devset & SBD_DEVS_PRESENT(sbp) &
2637 			SBD_DEVS_UNATTACHED(sbp);
2638 
2639 	ASSERT((SBD_DEVS_ATTACHED(sbp) & devset) == 0);
2640 
2641 	/*
2642 	 * Update per-device state transitions.
2643 	 */
2644 
2645 	for (i = 0; i < MAX_MEM_UNITS_PER_BOARD; i++)
2646 		if (DEVSET_IN_SET(devset, SBD_COMP_MEM, i)) {
2647 			if (sbd_disconnect_mem(hp, i) == 0) {
2648 				SBD_DEVICE_TRANSITION(sbp, SBD_COMP_MEM, i,
2649 							SBD_STATE_EMPTY);
2650 				SBD_DEV_CLR_PRESENT(sbp, SBD_COMP_MEM, i);
2651 			}
2652 		}
2653 
2654 	for (i = 0; i < MAX_CPU_UNITS_PER_BOARD; i++)
2655 		if (DEVSET_IN_SET(devset, SBD_COMP_CPU, i)) {
2656 			if (sbd_disconnect_cpu(hp, i) == 0) {
2657 				SBD_DEVICE_TRANSITION(sbp, SBD_COMP_CPU, i,
2658 							SBD_STATE_EMPTY);
2659 				SBD_DEV_CLR_PRESENT(sbp, SBD_COMP_CPU, i);
2660 			}
2661 		}
2662 
2663 	for (i = 0; i < MAX_IO_UNITS_PER_BOARD; i++)
2664 		if (DEVSET_IN_SET(devset, SBD_COMP_IO, i)) {
2665 			if (sbd_disconnect_io(hp, i) == 0) {
2666 				SBD_DEVICE_TRANSITION(sbp, SBD_COMP_IO, i,
2667 							SBD_STATE_EMPTY);
2668 				SBD_DEV_CLR_PRESENT(sbp, SBD_COMP_IO, i);
2669 			}
2670 		}
2671 
2672 	/*
2673 	 * Once all the components on a board have been disconnect
2674 	 * the board's state can transition to disconnected and
2675 	 * we can allow the deprobe to take place.
2676 	 */
2677 	if (SBD_DEVS_PRESENT(sbp) == 0) {
2678 		SBD_BOARD_TRANSITION(sbp, SBD_STATE_OCCUPIED);
2679 		sbp->sb_rstate = SBD_STAT_DISCONNECTED;
2680 		sbp->sb_ostate = SBD_STAT_UNCONFIGURED;
2681 		(void) drv_getparm(TIME, (void *)&sbp->sb_time);
2682 		SBD_INJECT_ERR(SBD_DISCONNECT_BOARD_PSEUDO_ERR, hp->h_err, EIO,
2683 			ESBD_INTERNAL, NULL);
2684 		return (0);
2685 	} else {
2686 		cmn_err(CE_WARN, "%s: could not disconnect devices on board %d",
2687 			f, sbp->sb_num);
2688 		return (-1);
2689 	}
2690 }
2691 
2692 static void
2693 sbd_test_board(sbd_handle_t *hp)
2694 {
2695 	sbd_board_t	*sbp;
2696 	sbdp_handle_t	*hdp;
2697 
2698 	sbp = SBDH2BD(hp->h_sbd);
2699 
2700 	PR_ALL("sbd_test_board: board %d\n", sbp->sb_num);
2701 
2702 
2703 	hdp = sbd_get_sbdp_handle(sbp, hp);
2704 
2705 	if (sbdp_test_board(hdp, &hp->h_opts) != 0) {
2706 		sbderror_t	*ep = SBD_HD2ERR(hp);
2707 
2708 		SBD_GET_PERR(hdp->h_err, ep);
2709 	}
2710 
2711 	SBD_INJECT_ERR(SBD_TEST_BOARD_PSEUDO_ERR, hp->h_err, EIO,
2712 		ESBD_INTERNAL, NULL);
2713 
2714 	sbd_release_sbdp_handle(hdp);
2715 }
2716 
2717 static void
2718 sbd_assign_board(sbd_handle_t *hp)
2719 {
2720 	sbd_board_t	*sbp;
2721 	sbdp_handle_t	*hdp;
2722 
2723 	sbp = SBDH2BD(hp->h_sbd);
2724 
2725 	PR_ALL("sbd_assign_board: board %d\n", sbp->sb_num);
2726 
2727 	hdp = sbd_get_sbdp_handle(sbp, hp);
2728 
2729 	if (sbdp_assign_board(hdp) != 0) {
2730 		sbderror_t	*ep = SBD_HD2ERR(hp);
2731 
2732 		SBD_GET_PERR(hdp->h_err, ep);
2733 	}
2734 
2735 	SBD_INJECT_ERR(SBD_ASSIGN_BOARD_PSEUDO_ERR, hp->h_err, EIO,
2736 		ESBD_INTERNAL, NULL);
2737 
2738 	sbd_release_sbdp_handle(hdp);
2739 }
2740 
2741 static void
2742 sbd_unassign_board(sbd_handle_t *hp)
2743 {
2744 	sbd_board_t	*sbp;
2745 	sbdp_handle_t	*hdp;
2746 
2747 	sbp = SBDH2BD(hp->h_sbd);
2748 
2749 	PR_ALL("sbd_unassign_board: board %d\n", sbp->sb_num);
2750 
2751 	hdp = sbd_get_sbdp_handle(sbp, hp);
2752 
2753 	if (sbdp_unassign_board(hdp) != 0) {
2754 		sbderror_t	*ep = SBD_HD2ERR(hp);
2755 
2756 		SBD_GET_PERR(hdp->h_err, ep);
2757 	}
2758 
2759 	SBD_INJECT_ERR(SBD_ASSIGN_BOARD_PSEUDO_ERR, hp->h_err, EIO,
2760 		ESBD_INTERNAL, NULL);
2761 
2762 	sbd_release_sbdp_handle(hdp);
2763 }
2764 
2765 static void
2766 sbd_poweron_board(sbd_handle_t *hp)
2767 {
2768 	sbd_board_t	*sbp;
2769 	sbdp_handle_t	*hdp;
2770 
2771 	sbp = SBDH2BD(hp->h_sbd);
2772 
2773 	PR_ALL("sbd_poweron_board: %d\n", sbp->sb_num);
2774 
2775 	hdp = sbd_get_sbdp_handle(sbp, hp);
2776 
2777 	if (sbdp_poweron_board(hdp) != 0) {
2778 		sbderror_t	*ep = SBD_HD2ERR(hp);
2779 
2780 		SBD_GET_PERR(hdp->h_err, ep);
2781 	}
2782 
2783 	SBD_INJECT_ERR(SBD_POWERON_BOARD_PSEUDO_ERR, hp->h_err, EIO,
2784 		ESBD_INTERNAL, NULL);
2785 
2786 	sbd_release_sbdp_handle(hdp);
2787 }
2788 
2789 static void
2790 sbd_poweroff_board(sbd_handle_t *hp)
2791 {
2792 	sbd_board_t	*sbp;
2793 	sbdp_handle_t	*hdp;
2794 
2795 	sbp = SBDH2BD(hp->h_sbd);
2796 
2797 	PR_ALL("sbd_poweroff_board: %d\n", sbp->sb_num);
2798 
2799 	hdp = sbd_get_sbdp_handle(sbp, hp);
2800 
2801 	if (sbdp_poweroff_board(hdp) != 0) {
2802 		sbderror_t	*ep = SBD_HD2ERR(hp);
2803 
2804 		SBD_GET_PERR(hdp->h_err, ep);
2805 	}
2806 
2807 	SBD_INJECT_ERR(SBD_POWEROFF_BOARD_PSEUDO_ERR, hp->h_err, EIO,
2808 		ESBD_INTERNAL, NULL);
2809 
2810 	sbd_release_sbdp_handle(hdp);
2811 }
2812 
2813 
2814 /*
2815  * Return a list of the dip's of devices that are
2816  * either present and attached, or present only but
2817  * not yet attached for the given board.
2818  */
2819 sbd_devlist_t *
2820 sbd_get_devlist(sbd_handle_t *hp, sbd_board_t *sbp, sbd_comp_type_t nodetype,
2821 		int max_units, uint_t uset, int *count, int present_only)
2822 {
2823 	int		i, ix;
2824 	sbd_devlist_t	*ret_devlist;
2825 	dev_info_t	**devlist;
2826 	sbdp_handle_t	*hdp;
2827 
2828 	*count = 0;
2829 	ret_devlist = GETSTRUCT(sbd_devlist_t, max_units);
2830 	devlist = sbp->sb_devlist[NIX(nodetype)];
2831 	/*
2832 	 * Turn into binary value since we're going
2833 	 * to be using XOR for a comparison.
2834 	 * if (present_only) then
2835 	 *	dev must be PRESENT, but NOT ATTACHED.
2836 	 * else
2837 	 *	dev must be PRESENT AND ATTACHED.
2838 	 * endif
2839 	 */
2840 	if (present_only)
2841 		present_only = 1;
2842 
2843 	hdp = sbd_get_sbdp_handle(sbp, hp);
2844 
2845 	for (i = ix = 0; (i < max_units) && uset; i++) {
2846 		int	ut, is_present, is_attached;
2847 		dev_info_t *dip;
2848 		sbderror_t *ep = SBD_HD2ERR(hp);
2849 		int	nunits, distance, j;
2850 
2851 		/*
2852 		 * For CMPs, we would like to perform DR operation on
2853 		 * all the cores before moving onto the next chip.
2854 		 * Therefore, when constructing the devlist, we process
2855 		 * all the cores together.
2856 		 */
2857 		if (nodetype == SBD_COMP_CPU) {
2858 			/*
2859 			 * Number of units to process in the inner loop
2860 			 */
2861 			nunits = MAX_CORES_PER_CMP;
2862 			/*
2863 			 * The distance between the units in the
2864 			 * board's sb_devlist structure.
2865 			 */
2866 			distance = MAX_CMP_UNITS_PER_BOARD;
2867 		} else {
2868 			nunits = 1;
2869 			distance = 0;
2870 		}
2871 
2872 		for (j = 0; j < nunits; j++) {
2873 			if ((dip = devlist[i + j * distance]) == NULL)
2874 				continue;
2875 
2876 			ut = sbdp_get_unit_num(hdp, dip);
2877 
2878 			if (ut == -1) {
2879 				SBD_GET_PERR(hdp->h_err, ep);
2880 				PR_ALL("sbd_get_devlist bad unit %d"
2881 				    " code %d errno %d",
2882 				    i, ep->e_code, ep->e_errno);
2883 			}
2884 
2885 			if ((uset & (1 << ut)) == 0)
2886 				continue;
2887 			uset &= ~(1 << ut);
2888 			is_present = SBD_DEV_IS_PRESENT(sbp, nodetype, ut) ?
2889 			    1 : 0;
2890 			is_attached = SBD_DEV_IS_ATTACHED(sbp, nodetype, ut) ?
2891 			    1 : 0;
2892 
2893 			if (is_present && (present_only ^ is_attached)) {
2894 				ret_devlist[ix].dv_dip = dip;
2895 				sbd_init_err(&ret_devlist[ix].dv_error);
2896 				ix++;
2897 			}
2898 		}
2899 	}
2900 	sbd_release_sbdp_handle(hdp);
2901 
2902 	if ((*count = ix) == 0) {
2903 		FREESTRUCT(ret_devlist, sbd_devlist_t, max_units);
2904 		ret_devlist = NULL;
2905 	}
2906 
2907 	return (ret_devlist);
2908 }
2909 
2910 static sbd_devlist_t *
2911 sbd_get_attach_devlist(sbd_handle_t *hp, int32_t *devnump, int32_t pass)
2912 {
2913 	sbd_board_t	*sbp;
2914 	uint_t		uset;
2915 	sbd_devset_t	devset;
2916 	sbd_devlist_t	*attach_devlist;
2917 	static int	next_pass = 1;
2918 	static fn_t	f = "sbd_get_attach_devlist";
2919 
2920 	PR_ALL("%s (pass = %d)...\n", f, pass);
2921 
2922 	sbp = SBDH2BD(hp->h_sbd);
2923 	devset = HD2MACHHD(hp)->sh_devset;
2924 
2925 	*devnump = 0;
2926 	attach_devlist = NULL;
2927 
2928 	/*
2929 	 * We switch on next_pass for the cases where a board
2930 	 * does not contain a particular type of component.
2931 	 * In these situations we don't want to return NULL
2932 	 * prematurely.  We need to check other devices and
2933 	 * we don't want to check the same type multiple times.
2934 	 * For example, if there were no cpus, then on pass 1
2935 	 * we would drop through and return the memory nodes.
2936 	 * However, on pass 2 we would switch back to the memory
2937 	 * nodes thereby returning them twice!  Using next_pass
2938 	 * forces us down to the end (or next item).
2939 	 */
2940 	if (pass == 1)
2941 		next_pass = 1;
2942 
2943 	switch (next_pass) {
2944 	case 1:
2945 		if (DEVSET_IN_SET(devset, SBD_COMP_CPU, DEVSET_ANYUNIT)) {
2946 			uset = DEVSET_GET_UNITSET(devset, SBD_COMP_CPU);
2947 
2948 			attach_devlist = sbd_get_devlist(hp, sbp, SBD_COMP_CPU,
2949 						MAX_CPU_UNITS_PER_BOARD,
2950 						uset, devnump, 1);
2951 
2952 			DEVSET_DEL(devset, SBD_COMP_CPU, DEVSET_ANYUNIT);
2953 			if (!devset || attach_devlist) {
2954 				next_pass = 2;
2955 				return (attach_devlist);
2956 			}
2957 			/*
2958 			 * If the caller is interested in the entire
2959 			 * board, but there aren't any cpus, then just
2960 			 * fall through to check for the next component.
2961 			 */
2962 		}
2963 		/*FALLTHROUGH*/
2964 
2965 	case 2:
2966 		if (DEVSET_IN_SET(devset, SBD_COMP_MEM, DEVSET_ANYUNIT)) {
2967 			uset = DEVSET_GET_UNITSET(devset, SBD_COMP_MEM);
2968 
2969 			attach_devlist = sbd_get_devlist(hp, sbp, SBD_COMP_MEM,
2970 						MAX_MEM_UNITS_PER_BOARD,
2971 						uset, devnump, 1);
2972 
2973 			DEVSET_DEL(devset, SBD_COMP_MEM, DEVSET_ANYUNIT);
2974 			if (!devset || attach_devlist) {
2975 				next_pass = 3;
2976 				return (attach_devlist);
2977 			}
2978 			/*
2979 			 * If the caller is interested in the entire
2980 			 * board, but there isn't any memory, then
2981 			 * just fall through to next component.
2982 			 */
2983 		}
2984 		/*FALLTHROUGH*/
2985 
2986 
2987 	case 3:
2988 		next_pass = -1;
2989 		if (DEVSET_IN_SET(devset, SBD_COMP_IO, DEVSET_ANYUNIT)) {
2990 			uset = DEVSET_GET_UNITSET(devset, SBD_COMP_IO);
2991 
2992 			attach_devlist = sbd_get_devlist(hp, sbp, SBD_COMP_IO,
2993 						MAX_IO_UNITS_PER_BOARD,
2994 						uset, devnump, 1);
2995 
2996 			DEVSET_DEL(devset, SBD_COMP_IO, DEVSET_ANYUNIT);
2997 			if (!devset || attach_devlist) {
2998 				next_pass = 4;
2999 				return (attach_devlist);
3000 			}
3001 		}
3002 		/*FALLTHROUGH*/
3003 
3004 	default:
3005 		*devnump = 0;
3006 		return (NULL);
3007 	}
3008 	/*NOTREACHED*/
3009 }
3010 
3011 static int
3012 sbd_pre_attach_devlist(sbd_handle_t *hp, sbd_devlist_t *devlist,
3013 	int32_t devnum)
3014 {
3015 	int		max_units = 0, rv = 0;
3016 	sbd_comp_type_t	nodetype;
3017 	static fn_t	f = "sbd_pre_attach_devlist";
3018 
3019 	/*
3020 	 * In this driver, all entries in a devlist[] are
3021 	 * of the same nodetype.
3022 	 */
3023 	nodetype = sbd_get_devtype(hp, devlist->dv_dip);
3024 
3025 	PR_ALL("%s (nt = %s(%d), num = %d)...\n",
3026 		f, sbd_ct_str[(int)nodetype], (int)nodetype, devnum);
3027 
3028 	switch (nodetype) {
3029 
3030 	case SBD_COMP_MEM:
3031 		max_units = MAX_MEM_UNITS_PER_BOARD;
3032 		rv = sbd_pre_attach_mem(hp, devlist, devnum);
3033 		break;
3034 
3035 	case SBD_COMP_CPU:
3036 		max_units = MAX_CPU_UNITS_PER_BOARD;
3037 		rv = sbd_pre_attach_cpu(hp, devlist, devnum);
3038 		break;
3039 
3040 	case SBD_COMP_IO:
3041 		max_units = MAX_IO_UNITS_PER_BOARD;
3042 		break;
3043 
3044 	default:
3045 		rv = -1;
3046 		break;
3047 	}
3048 
3049 	if (rv && max_units) {
3050 		int	i;
3051 		/*
3052 		 * Need to clean up devlist
3053 		 * if pre-op is going to fail.
3054 		 */
3055 		for (i = 0; i < max_units; i++) {
3056 			if (SBD_GET_ERRSTR(&devlist[i].dv_error)) {
3057 				SBD_FREE_ERR(&devlist[i].dv_error);
3058 			} else {
3059 				break;
3060 			}
3061 		}
3062 		FREESTRUCT(devlist, sbd_devlist_t, max_units);
3063 	}
3064 
3065 	/*
3066 	 * If an error occurred, return "continue"
3067 	 * indication so that we can continue attaching
3068 	 * as much as possible.
3069 	 */
3070 	return (rv ? -1 : 0);
3071 }
3072 
3073 static int
3074 sbd_post_attach_devlist(sbd_handle_t *hp, sbd_devlist_t *devlist,
3075 			int32_t devnum)
3076 {
3077 	int		i, max_units = 0, rv = 0;
3078 	sbd_devset_t	devs_unattached, devs_present;
3079 	sbd_comp_type_t	nodetype;
3080 	sbd_board_t 	*sbp = SBDH2BD(hp->h_sbd);
3081 	sbdp_handle_t	*hdp;
3082 	static fn_t	f = "sbd_post_attach_devlist";
3083 
3084 	sbp = SBDH2BD(hp->h_sbd);
3085 	nodetype = sbd_get_devtype(hp, devlist->dv_dip);
3086 
3087 	PR_ALL("%s (nt = %s(%d), num = %d)...\n",
3088 		f, sbd_ct_str[(int)nodetype], (int)nodetype, devnum);
3089 
3090 	hdp = sbd_get_sbdp_handle(sbp, hp);
3091 
3092 	/*
3093 	 * Need to free up devlist[] created earlier in
3094 	 * sbd_get_attach_devlist().
3095 	 */
3096 	switch (nodetype) {
3097 	case SBD_COMP_CPU:
3098 		max_units = MAX_CPU_UNITS_PER_BOARD;
3099 		rv = sbd_post_attach_cpu(hp, devlist, devnum);
3100 		break;
3101 
3102 
3103 	case SBD_COMP_MEM:
3104 		max_units = MAX_MEM_UNITS_PER_BOARD;
3105 
3106 		rv = sbd_post_attach_mem(hp, devlist, devnum);
3107 		break;
3108 
3109 	case SBD_COMP_IO:
3110 		max_units = MAX_IO_UNITS_PER_BOARD;
3111 		break;
3112 
3113 	default:
3114 		rv = -1;
3115 		break;
3116 	}
3117 
3118 
3119 	for (i = 0; i < devnum; i++) {
3120 		int		unit;
3121 		dev_info_t	*dip;
3122 		sbderror_t	*ep;
3123 
3124 		ep = &devlist[i].dv_error;
3125 
3126 		if (sbd_set_err_in_hdl(hp, ep) == 0)
3127 			continue;
3128 
3129 		dip = devlist[i].dv_dip;
3130 		nodetype = sbd_get_devtype(hp, dip);
3131 		unit = sbdp_get_unit_num(hdp, dip);
3132 
3133 		if (unit == -1) {
3134 			SBD_GET_PERR(hdp->h_err, ep);
3135 			continue;
3136 		}
3137 
3138 		unit = sbd_check_unit_attached(sbp, dip, unit, nodetype, ep);
3139 
3140 		if (unit == -1) {
3141 			PR_ALL("%s: ERROR (nt=%s, b=%d, u=%d) not attached\n",
3142 				f, sbd_ct_str[(int)nodetype], sbp->sb_num, i);
3143 			continue;
3144 		}
3145 
3146 		SBD_DEV_SET_ATTACHED(sbp, nodetype, unit);
3147 		SBD_DEVICE_TRANSITION(sbp, nodetype, unit,
3148 						SBD_STATE_CONFIGURED);
3149 	}
3150 	sbd_release_sbdp_handle(hdp);
3151 
3152 	if (rv) {
3153 		PR_ALL("%s: errno %d, ecode %d during attach\n",
3154 			f, SBD_GET_ERRNO(SBD_HD2ERR(hp)),
3155 			SBD_GET_ERR(HD2MACHERR(hp)));
3156 	}
3157 
3158 	devs_present = SBD_DEVS_PRESENT(sbp);
3159 	devs_unattached = SBD_DEVS_UNATTACHED(sbp);
3160 
3161 	switch (SBD_BOARD_STATE(sbp)) {
3162 	case SBD_STATE_CONNECTED:
3163 	case SBD_STATE_UNCONFIGURED:
3164 		ASSERT(devs_present);
3165 
3166 		if (devs_unattached == 0) {
3167 			/*
3168 			 * All devices finally attached.
3169 			 */
3170 			SBD_BOARD_TRANSITION(sbp, SBD_STATE_CONFIGURED);
3171 			sbp->sb_rstate = SBD_STAT_CONNECTED;
3172 			sbp->sb_ostate = SBD_STAT_CONFIGURED;
3173 		} else if (devs_present != devs_unattached) {
3174 			/*
3175 			 * Only some devices are fully attached.
3176 			 */
3177 			SBD_BOARD_TRANSITION(sbp, SBD_STATE_PARTIAL);
3178 			sbp->sb_rstate = SBD_STAT_CONNECTED;
3179 			sbp->sb_ostate = SBD_STAT_UNCONFIGURED;
3180 		}
3181 		(void) drv_getparm(TIME, (void *)&sbp->sb_time);
3182 		break;
3183 
3184 	case SBD_STATE_PARTIAL:
3185 		ASSERT(devs_present);
3186 		/*
3187 		 * All devices finally attached.
3188 		 */
3189 		if (devs_unattached == 0) {
3190 			SBD_BOARD_TRANSITION(sbp, SBD_STATE_CONFIGURED);
3191 			sbp->sb_rstate = SBD_STAT_CONNECTED;
3192 			sbp->sb_ostate = SBD_STAT_CONFIGURED;
3193 			(void) drv_getparm(TIME, (void *)&sbp->sb_time);
3194 		}
3195 		break;
3196 
3197 	default:
3198 		break;
3199 	}
3200 
3201 	if (max_units && devlist) {
3202 		int	i;
3203 
3204 		for (i = 0; i < max_units; i++) {
3205 			if (SBD_GET_ERRSTR(&devlist[i].dv_error)) {
3206 				SBD_FREE_ERR(&devlist[i].dv_error);
3207 			} else {
3208 				break;
3209 			}
3210 		}
3211 		FREESTRUCT(devlist, sbd_devlist_t, max_units);
3212 	}
3213 
3214 	/*
3215 	 * Our policy is to attach all components that are
3216 	 * possible, thus we always return "success" on the
3217 	 * pre and post operations.
3218 	 */
3219 	return (0);
3220 }
3221 
3222 /*
3223  * We only need to "release" cpu and memory devices.
3224  */
3225 static sbd_devlist_t *
3226 sbd_get_release_devlist(sbd_handle_t *hp, int32_t *devnump, int32_t pass)
3227 {
3228 	sbd_board_t	*sbp;
3229 	uint_t		uset;
3230 	sbd_devset_t	devset;
3231 	sbd_devlist_t	*release_devlist;
3232 	static int	next_pass = 1;
3233 	static fn_t	f = "sbd_get_release_devlist";
3234 
3235 	PR_ALL("%s (pass = %d)...\n", f, pass);
3236 
3237 	sbp = SBDH2BD(hp->h_sbd);
3238 	devset = HD2MACHHD(hp)->sh_devset;
3239 
3240 	*devnump = 0;
3241 	release_devlist = NULL;
3242 
3243 	/*
3244 	 * We switch on next_pass for the cases where a board
3245 	 * does not contain a particular type of component.
3246 	 * In these situations we don't want to return NULL
3247 	 * prematurely.  We need to check other devices and
3248 	 * we don't want to check the same type multiple times.
3249 	 * For example, if there were no cpus, then on pass 1
3250 	 * we would drop through and return the memory nodes.
3251 	 * However, on pass 2 we would switch back to the memory
3252 	 * nodes thereby returning them twice!  Using next_pass
3253 	 * forces us down to the end (or next item).
3254 	 */
3255 	if (pass == 1)
3256 		next_pass = 1;
3257 
3258 	switch (next_pass) {
3259 	case 1:
3260 		if (DEVSET_IN_SET(devset, SBD_COMP_MEM, DEVSET_ANYUNIT)) {
3261 			uset = DEVSET_GET_UNITSET(devset, SBD_COMP_MEM);
3262 
3263 			release_devlist = sbd_get_devlist(hp, sbp,
3264 						SBD_COMP_MEM,
3265 						MAX_MEM_UNITS_PER_BOARD,
3266 						uset, devnump, 0);
3267 
3268 			DEVSET_DEL(devset, SBD_COMP_MEM, DEVSET_ANYUNIT);
3269 			if (!devset || release_devlist) {
3270 				next_pass = 2;
3271 				return (release_devlist);
3272 			}
3273 			/*
3274 			 * If the caller is interested in the entire
3275 			 * board, but there isn't any memory, then
3276 			 * just fall through to next component.
3277 			 */
3278 		}
3279 		/*FALLTHROUGH*/
3280 
3281 
3282 	case 2:
3283 		if (DEVSET_IN_SET(devset, SBD_COMP_CPU, DEVSET_ANYUNIT)) {
3284 			uset = DEVSET_GET_UNITSET(devset, SBD_COMP_CPU);
3285 
3286 			release_devlist = sbd_get_devlist(hp, sbp,
3287 						SBD_COMP_CPU,
3288 						MAX_CPU_UNITS_PER_BOARD,
3289 						uset, devnump, 0);
3290 
3291 			DEVSET_DEL(devset, SBD_COMP_CPU, DEVSET_ANYUNIT);
3292 			if (!devset || release_devlist) {
3293 				next_pass = 3;
3294 				return (release_devlist);
3295 			}
3296 			/*
3297 			 * If the caller is interested in the entire
3298 			 * board, but there aren't any cpus, then just
3299 			 * fall through to check for the next component.
3300 			 */
3301 		}
3302 		/*FALLTHROUGH*/
3303 
3304 
3305 	case 3:
3306 		next_pass = -1;
3307 		if (DEVSET_IN_SET(devset, SBD_COMP_IO, DEVSET_ANYUNIT)) {
3308 			uset = DEVSET_GET_UNITSET(devset, SBD_COMP_IO);
3309 
3310 			release_devlist = sbd_get_devlist(hp, sbp,
3311 						SBD_COMP_IO,
3312 						MAX_IO_UNITS_PER_BOARD,
3313 						uset, devnump, 0);
3314 
3315 			DEVSET_DEL(devset, SBD_COMP_IO, DEVSET_ANYUNIT);
3316 			if (!devset || release_devlist) {
3317 				next_pass = 4;
3318 				return (release_devlist);
3319 			}
3320 		}
3321 		/*FALLTHROUGH*/
3322 
3323 	default:
3324 		*devnump = 0;
3325 		return (NULL);
3326 	}
3327 	/*NOTREACHED*/
3328 }
3329 
3330 static int
3331 sbd_pre_release_devlist(sbd_handle_t *hp, sbd_devlist_t *devlist,
3332 			int32_t devnum)
3333 {
3334 	int		max_units = 0, rv = 0;
3335 	sbd_comp_type_t	nodetype;
3336 	static fn_t	f = "sbd_pre_release_devlist";
3337 
3338 	nodetype = sbd_get_devtype(hp, devlist->dv_dip);
3339 
3340 	PR_ALL("%s (nt = %s(%d), num = %d)...\n",
3341 		f, sbd_ct_str[(int)nodetype], (int)nodetype, devnum);
3342 
3343 	switch (nodetype) {
3344 	case SBD_COMP_CPU: {
3345 		int			i, mem_present = 0;
3346 		sbd_board_t		*sbp = SBDH2BD(hp->h_sbd);
3347 		sbd_devset_t		devset;
3348 		sbd_priv_handle_t	*shp = HD2MACHHD(hp);
3349 
3350 		max_units = MAX_CPU_UNITS_PER_BOARD;
3351 
3352 		devset = shp->sh_orig_devset;
3353 
3354 		for (i = 0; i < MAX_MEM_UNITS_PER_BOARD; i++) {
3355 			/*
3356 			 * if client also requested to unconfigure memory
3357 			 * the we allow the operation. Therefore
3358 			 * we need to warranty that memory gets unconfig
3359 			 * before cpus
3360 			 */
3361 
3362 			if (DEVSET_IN_SET(devset, SBD_COMP_MEM, i)) {
3363 				continue;
3364 			}
3365 			if (SBD_DEV_IS_ATTACHED(sbp, SBD_COMP_MEM, i)) {
3366 				mem_present = 1;
3367 				break;
3368 			}
3369 		}
3370 		if (mem_present) {
3371 			sbderror_t	*ep = SBD_HD2ERR(hp);
3372 			SBD_SET_ERR(ep, ESBD_MEMONLINE);
3373 			SBD_SET_ERRSTR(ep, sbp->sb_mempath[i]);
3374 			rv = -1;
3375 		} else {
3376 			rv = sbd_pre_release_cpu(hp, devlist, devnum);
3377 		}
3378 
3379 		break;
3380 
3381 	}
3382 	case SBD_COMP_MEM:
3383 		max_units = MAX_MEM_UNITS_PER_BOARD;
3384 		rv = sbd_pre_release_mem(hp, devlist, devnum);
3385 		break;
3386 
3387 
3388 	case SBD_COMP_IO:
3389 		max_units = MAX_IO_UNITS_PER_BOARD;
3390 		rv = sbd_pre_release_io(hp, devlist, devnum);
3391 		break;
3392 
3393 	default:
3394 		rv = -1;
3395 		break;
3396 	}
3397 
3398 	if (rv && max_units) {
3399 		int	i;
3400 
3401 		/*
3402 		 * the individual pre_release component routines should
3403 		 * have set the error in the handle.  No need to set it
3404 		 * here
3405 		 *
3406 		 * Need to clean up dynamically allocated devlist
3407 		 * if pre-op is going to fail.
3408 		 */
3409 		for (i = 0; i < max_units; i++) {
3410 			if (SBD_GET_ERRSTR(&devlist[i].dv_error)) {
3411 				SBD_FREE_ERR(&devlist[i].dv_error);
3412 			} else {
3413 				break;
3414 			}
3415 		}
3416 		FREESTRUCT(devlist, sbd_devlist_t, max_units);
3417 	}
3418 
3419 	return (rv ? -1 : 0);
3420 }
3421 
3422 static int
3423 sbd_post_release_devlist(sbd_handle_t *hp, sbd_devlist_t *devlist,
3424 			int32_t devnum)
3425 {
3426 	int		i, max_units = 0;
3427 	sbd_comp_type_t	nodetype;
3428 	sbd_board_t	*sbp = SBDH2BD(hp->h_sbd);
3429 	sbdp_handle_t	*hdp;
3430 	sbd_error_t	*spe;
3431 	static fn_t	f = "sbd_post_release_devlist";
3432 
3433 	nodetype = sbd_get_devtype(hp, devlist->dv_dip);
3434 	ASSERT(nodetype >= SBD_COMP_CPU && nodetype <= SBD_COMP_IO);
3435 
3436 	PR_ALL("%s (nt = %s(%d), num = %d)...\n",
3437 		f, sbd_ct_str[(int)nodetype], (int)nodetype, devnum);
3438 
3439 	/*
3440 	 * Need to free up devlist[] created earlier in
3441 	 * sbd_get_release_devlist().
3442 	 */
3443 	switch (nodetype) {
3444 	case SBD_COMP_CPU:
3445 		max_units = MAX_CPU_UNITS_PER_BOARD;
3446 		break;
3447 
3448 	case SBD_COMP_MEM:
3449 		max_units = MAX_MEM_UNITS_PER_BOARD;
3450 		break;
3451 
3452 	case SBD_COMP_IO:
3453 		/*
3454 		 *  Need to check if specific I/O is referenced and
3455 		 *  fail post-op.
3456 		 */
3457 
3458 		if (sbd_check_io_refs(hp, devlist, devnum) > 0) {
3459 				PR_IO("%s: error - I/O devices ref'd\n", f);
3460 		}
3461 
3462 		max_units = MAX_IO_UNITS_PER_BOARD;
3463 		break;
3464 
3465 	default:
3466 		{
3467 			cmn_err(CE_WARN, "%s: invalid nodetype (%d)",
3468 				f, (int)nodetype);
3469 			SBD_SET_ERR(HD2MACHERR(hp), ESBD_INVAL);
3470 		}
3471 		break;
3472 	}
3473 	hdp = sbd_get_sbdp_handle(sbp, hp);
3474 	spe = hdp->h_err;
3475 
3476 	for (i = 0; i < devnum; i++) {
3477 		int		unit;
3478 		sbderror_t	*ep;
3479 
3480 		ep = &devlist[i].dv_error;
3481 
3482 		if (sbd_set_err_in_hdl(hp, ep) == 0) {
3483 			continue;
3484 		}
3485 
3486 		unit = sbdp_get_unit_num(hdp, devlist[i].dv_dip);
3487 		if (unit == -1) {
3488 			SBD_GET_PERR(hdp->h_err, SBD_HD2ERR(hp));
3489 			PR_ALL("%s bad unit num: %d code %d",
3490 			    f, unit, spe->e_code);
3491 			continue;
3492 		}
3493 	}
3494 	sbd_release_sbdp_handle(hdp);
3495 
3496 	if (SBD_GET_ERRNO(SBD_HD2ERR(hp))) {
3497 		PR_ALL("%s: errno %d, ecode %d during release\n",
3498 			f, SBD_GET_ERRNO(SBD_HD2ERR(hp)),
3499 			SBD_GET_ERR(SBD_HD2ERR(hp)));
3500 	}
3501 
3502 	if (max_units && devlist) {
3503 		int	i;
3504 
3505 		for (i = 0; i < max_units; i++) {
3506 			if (SBD_GET_ERRSTR(&devlist[i].dv_error)) {
3507 				SBD_FREE_ERR(&devlist[i].dv_error);
3508 			} else {
3509 				break;
3510 			}
3511 		}
3512 		FREESTRUCT(devlist, sbd_devlist_t, max_units);
3513 	}
3514 
3515 	return (SBD_GET_ERRNO(SBD_HD2ERR(hp)) ? -1 : 0);
3516 }
3517 
3518 static void
3519 sbd_release_dev_done(sbd_board_t *sbp, sbd_comp_type_t nodetype, int unit)
3520 {
3521 	SBD_DEV_SET_UNREFERENCED(sbp, nodetype, unit);
3522 	SBD_DEVICE_TRANSITION(sbp, nodetype, unit, SBD_STATE_UNREFERENCED);
3523 }
3524 
3525 static void
3526 sbd_release_done(sbd_handle_t *hp, sbd_comp_type_t nodetype, dev_info_t *dip)
3527 {
3528 	int		unit;
3529 	sbd_board_t	*sbp = SBDH2BD(hp->h_sbd);
3530 	sbderror_t	*ep;
3531 	static fn_t	f = "sbd_release_done";
3532 	sbdp_handle_t	*hdp;
3533 
3534 	PR_ALL("%s...\n", f);
3535 
3536 	hdp = sbd_get_sbdp_handle(sbp, hp);
3537 	ep = SBD_HD2ERR(hp);
3538 
3539 	if ((unit = sbdp_get_unit_num(hdp, dip)) < 0) {
3540 		cmn_err(CE_WARN,
3541 			"sbd:%s: unable to get unit for dip (0x%p)",
3542 			f, (void *)dip);
3543 		SBD_GET_PERR(hdp->h_err, ep);
3544 		sbd_release_sbdp_handle(hdp);
3545 		return;
3546 	}
3547 	sbd_release_sbdp_handle(hdp);
3548 
3549 	/*
3550 	 * Transfer the device which just completed its release
3551 	 * to the UNREFERENCED state.
3552 	 */
3553 	switch (nodetype) {
3554 
3555 	case SBD_COMP_MEM:
3556 		sbd_release_mem_done((void *)hp, unit);
3557 		break;
3558 
3559 	default:
3560 		sbd_release_dev_done(sbp, nodetype, unit);
3561 		break;
3562 	}
3563 
3564 	/*
3565 	 * If the entire board was released and all components
3566 	 * unreferenced then transfer it to the UNREFERENCED state.
3567 	 */
3568 	if (SBD_DEVS_RELEASED(sbp) == SBD_DEVS_UNREFERENCED(sbp)) {
3569 		SBD_BOARD_TRANSITION(sbp, SBD_STATE_UNREFERENCED);
3570 		(void) drv_getparm(TIME, (void *)&sbp->sb_time);
3571 	}
3572 }
3573 
3574 static sbd_devlist_t *
3575 sbd_get_detach_devlist(sbd_handle_t *hp, int32_t *devnump, int32_t pass)
3576 {
3577 	sbd_board_t	*sbp;
3578 	uint_t		uset;
3579 	sbd_devset_t	devset;
3580 	sbd_devlist_t	*detach_devlist;
3581 	static int	next_pass = 1;
3582 	static fn_t	f = "sbd_get_detach_devlist";
3583 
3584 	PR_ALL("%s (pass = %d)...\n", f, pass);
3585 
3586 	sbp = SBDH2BD(hp->h_sbd);
3587 	devset = HD2MACHHD(hp)->sh_devset;
3588 
3589 	*devnump = 0;
3590 	detach_devlist = NULL;
3591 
3592 	/*
3593 	 * We switch on next_pass for the cases where a board
3594 	 * does not contain a particular type of component.
3595 	 * In these situations we don't want to return NULL
3596 	 * prematurely.  We need to check other devices and
3597 	 * we don't want to check the same type multiple times.
3598 	 * For example, if there were no cpus, then on pass 1
3599 	 * we would drop through and return the memory nodes.
3600 	 * However, on pass 2 we would switch back to the memory
3601 	 * nodes thereby returning them twice!  Using next_pass
3602 	 * forces us down to the end (or next item).
3603 	 */
3604 	if (pass == 1)
3605 		next_pass = 1;
3606 
3607 	switch (next_pass) {
3608 	case 1:
3609 		if (DEVSET_IN_SET(devset, SBD_COMP_MEM, DEVSET_ANYUNIT)) {
3610 			uset = DEVSET_GET_UNITSET(devset, SBD_COMP_MEM);
3611 
3612 			detach_devlist = sbd_get_devlist(hp, sbp,
3613 						SBD_COMP_MEM,
3614 						MAX_MEM_UNITS_PER_BOARD,
3615 						uset, devnump, 0);
3616 
3617 			DEVSET_DEL(devset, SBD_COMP_MEM, DEVSET_ANYUNIT);
3618 			if (!devset || detach_devlist) {
3619 				next_pass = 2;
3620 				return (detach_devlist);
3621 			}
3622 			/*
3623 			 * If the caller is interested in the entire
3624 			 * board, but there isn't any memory, then
3625 			 * just fall through to next component.
3626 			 */
3627 		}
3628 		/*FALLTHROUGH*/
3629 
3630 	case 2:
3631 		if (DEVSET_IN_SET(devset, SBD_COMP_CPU, DEVSET_ANYUNIT)) {
3632 			uset = DEVSET_GET_UNITSET(devset, SBD_COMP_CPU);
3633 
3634 			detach_devlist = sbd_get_devlist(hp, sbp,
3635 						SBD_COMP_CPU,
3636 						MAX_CPU_UNITS_PER_BOARD,
3637 						uset, devnump, 0);
3638 
3639 			DEVSET_DEL(devset, SBD_COMP_CPU, DEVSET_ANYUNIT);
3640 			if (!devset || detach_devlist) {
3641 				next_pass = 2;
3642 				return (detach_devlist);
3643 			}
3644 			/*
3645 			 * If the caller is interested in the entire
3646 			 * board, but there aren't any cpus, then just
3647 			 * fall through to check for the next component.
3648 			 */
3649 		}
3650 		/*FALLTHROUGH*/
3651 
3652 	case 3:
3653 		next_pass = -1;
3654 		if (DEVSET_IN_SET(devset, SBD_COMP_IO, DEVSET_ANYUNIT)) {
3655 			uset = DEVSET_GET_UNITSET(devset, SBD_COMP_IO);
3656 
3657 			detach_devlist = sbd_get_devlist(hp, sbp,
3658 						SBD_COMP_IO,
3659 						MAX_IO_UNITS_PER_BOARD,
3660 						uset, devnump, 0);
3661 
3662 			DEVSET_DEL(devset, SBD_COMP_IO, DEVSET_ANYUNIT);
3663 			if (!devset || detach_devlist) {
3664 				next_pass = 4;
3665 				return (detach_devlist);
3666 			}
3667 		}
3668 		/*FALLTHROUGH*/
3669 
3670 	default:
3671 		*devnump = 0;
3672 		return (NULL);
3673 	}
3674 	/*NOTREACHED*/
3675 }
3676 
3677 static int
3678 sbd_pre_detach_devlist(sbd_handle_t *hp, sbd_devlist_t *devlist,
3679 	int32_t devnum)
3680 {
3681 	int		rv = 0;
3682 	sbd_comp_type_t	nodetype;
3683 	static fn_t	f = "sbd_pre_detach_devlist";
3684 
3685 	nodetype = sbd_get_devtype(hp, devlist->dv_dip);
3686 
3687 	PR_ALL("%s (nt = %s(%d), num = %d)...\n",
3688 		f, sbd_ct_str[(int)nodetype], (int)nodetype, devnum);
3689 
3690 	switch (nodetype) {
3691 	case SBD_COMP_CPU:
3692 		rv = sbd_pre_detach_cpu(hp, devlist, devnum);
3693 		break;
3694 
3695 	case SBD_COMP_MEM:
3696 		rv = sbd_pre_detach_mem(hp, devlist, devnum);
3697 		break;
3698 
3699 	case SBD_COMP_IO:
3700 		rv = sbd_pre_detach_io(hp, devlist, devnum);
3701 		break;
3702 
3703 	default:
3704 		rv = -1;
3705 		break;
3706 	}
3707 
3708 	/*
3709 	 * We want to continue attempting to detach
3710 	 * other components.
3711 	 */
3712 	return (rv);
3713 }
3714 
3715 static int
3716 sbd_post_detach_devlist(sbd_handle_t *hp, sbd_devlist_t *devlist,
3717 			int32_t devnum)
3718 {
3719 	int		i, max_units = 0, rv = 0;
3720 	sbd_comp_type_t	nodetype;
3721 	sbd_board_t	*sbp;
3722 	sbd_istate_t	bstate;
3723 	static fn_t	f = "sbd_post_detach_devlist";
3724 	sbdp_handle_t	*hdp;
3725 
3726 	sbp = SBDH2BD(hp->h_sbd);
3727 	nodetype = sbd_get_devtype(hp, devlist->dv_dip);
3728 
3729 	hdp = sbd_get_sbdp_handle(sbp, hp);
3730 
3731 	PR_ALL("%s (nt = %s(%d), num = %d)...\n",
3732 		f, sbd_ct_str[(int)nodetype], (int)nodetype, devnum);
3733 
3734 	/*
3735 	 * Need to free up devlist[] created earlier in
3736 	 * sbd_get_detach_devlist().
3737 	 */
3738 	switch (nodetype) {
3739 	case SBD_COMP_CPU:
3740 		max_units = MAX_CPU_UNITS_PER_BOARD;
3741 		rv = sbd_post_detach_cpu(hp, devlist, devnum);
3742 		break;
3743 
3744 	case SBD_COMP_MEM:
3745 		max_units = MAX_MEM_UNITS_PER_BOARD;
3746 		rv = sbd_post_detach_mem(hp, devlist, devnum);
3747 		break;
3748 
3749 	case SBD_COMP_IO:
3750 		max_units = MAX_IO_UNITS_PER_BOARD;
3751 		rv = sbd_post_detach_io(hp, devlist, devnum);
3752 		break;
3753 
3754 	default:
3755 		rv = -1;
3756 		break;
3757 	}
3758 
3759 
3760 	for (i = 0; i < devnum; i++) {
3761 		int		unit;
3762 		sbderror_t	*ep;
3763 		dev_info_t	*dip;
3764 
3765 		ep = &devlist[i].dv_error;
3766 
3767 		if (sbd_set_err_in_hdl(hp, ep) == 0)
3768 			continue;
3769 
3770 		dip = devlist[i].dv_dip;
3771 		unit = sbdp_get_unit_num(hdp, dip);
3772 		if (unit == -1) {
3773 			if (hp->h_flags & SBD_IOCTL_FLAG_FORCE)
3774 				continue;
3775 			else {
3776 				SBD_GET_PERR(hdp->h_err, ep);
3777 				break;
3778 			}
3779 		}
3780 		nodetype = sbd_get_devtype(hp, dip);
3781 
3782 		if (sbd_check_unit_attached(sbp, dip, unit, nodetype,
3783 		    ep) >= 0) {
3784 			/*
3785 			 * Device is still attached probably due
3786 			 * to an error.  Need to keep track of it.
3787 			 */
3788 			PR_ALL("%s: ERROR (nt=%s, b=%d, u=%d) not detached\n",
3789 				f, sbd_ct_str[(int)nodetype], sbp->sb_num,
3790 				unit);
3791 			continue;
3792 		}
3793 
3794 		SBD_DEV_CLR_ATTACHED(sbp, nodetype, unit);
3795 		SBD_DEV_CLR_RELEASED(sbp, nodetype, unit);
3796 		SBD_DEV_CLR_UNREFERENCED(sbp, nodetype, unit);
3797 		SBD_DEVICE_TRANSITION(sbp, nodetype, unit,
3798 						SBD_STATE_UNCONFIGURED);
3799 	}
3800 	sbd_release_sbdp_handle(hdp);
3801 
3802 	bstate = SBD_BOARD_STATE(sbp);
3803 	if (bstate != SBD_STATE_UNCONFIGURED) {
3804 		if (SBD_DEVS_PRESENT(sbp) == SBD_DEVS_UNATTACHED(sbp)) {
3805 			/*
3806 			 * All devices are finally detached.
3807 			 */
3808 			SBD_BOARD_TRANSITION(sbp, SBD_STATE_UNCONFIGURED);
3809 		} else if ((SBD_BOARD_STATE(sbp) != SBD_STATE_PARTIAL) &&
3810 				SBD_DEVS_ATTACHED(sbp)) {
3811 			/*
3812 			 * Some devices remain attached.
3813 			 */
3814 			SBD_BOARD_TRANSITION(sbp, SBD_STATE_PARTIAL);
3815 		}
3816 	}
3817 
3818 	if (rv) {
3819 		PR_ALL("%s: errno %d, ecode %d during detach\n",
3820 			f, SBD_GET_ERRNO(SBD_HD2ERR(hp)),
3821 			SBD_GET_ERR(HD2MACHERR(hp)));
3822 	}
3823 
3824 	if (max_units && devlist) {
3825 		int	i;
3826 
3827 		for (i = 0; i < max_units; i++) {
3828 			if (SBD_GET_ERRSTR(&devlist[i].dv_error)) {
3829 				SBD_FREE_ERR(&devlist[i].dv_error);
3830 			} else {
3831 				break;
3832 			}
3833 		}
3834 		FREESTRUCT(devlist, sbd_devlist_t, max_units);
3835 	}
3836 
3837 	return (SBD_GET_ERRNO(SBD_HD2ERR(hp)) ? -1 : 0);
3838 }
3839 
3840 /*
3841  * Return the unit number of the respective dip if
3842  * it's found to be attached.
3843  */
3844 static int
3845 sbd_check_unit_attached(sbd_board_t *sbp, dev_info_t *dip, int unit,
3846 	sbd_comp_type_t nodetype, sbderror_t *ep)
3847 {
3848 	int		rv = -1;
3849 	processorid_t	cpuid;
3850 	uint64_t	basepa, endpa;
3851 	struct memlist	*ml;
3852 	extern struct memlist	*phys_install;
3853 	sbdp_handle_t	*hdp;
3854 	sbd_handle_t	*hp = MACHBD2HD(sbp);
3855 	static fn_t	f = "sbd_check_unit_attached";
3856 
3857 	hdp = sbd_get_sbdp_handle(sbp, hp);
3858 
3859 	switch (nodetype) {
3860 
3861 	case SBD_COMP_CPU:
3862 		cpuid = sbdp_get_cpuid(hdp, dip);
3863 		if (cpuid < 0) {
3864 			break;
3865 		}
3866 		mutex_enter(&cpu_lock);
3867 		if (cpu_get(cpuid) != NULL)
3868 			rv = unit;
3869 		mutex_exit(&cpu_lock);
3870 		break;
3871 
3872 	case SBD_COMP_MEM:
3873 		if (sbdphw_get_base_physaddr(hdp, dip, &basepa)) {
3874 			break;
3875 		}
3876 		if (sbdp_get_mem_alignment(hdp, dip, &endpa)) {
3877 			cmn_err(CE_WARN, "%s sbdp_get_mem_alignment fail", f);
3878 			break;
3879 		}
3880 
3881 		basepa &= ~(endpa - 1);
3882 		endpa += basepa;
3883 		/*
3884 		 * Check if base address is in phys_install.
3885 		 */
3886 		memlist_read_lock();
3887 		for (ml = phys_install; ml; ml = ml->ml_next)
3888 			if ((endpa <= ml->ml_address) ||
3889 			    (basepa >= (ml->ml_address + ml->ml_size)))
3890 				continue;
3891 			else
3892 				break;
3893 		memlist_read_unlock();
3894 		if (ml != NULL)
3895 			rv = unit;
3896 		break;
3897 
3898 	case SBD_COMP_IO:
3899 	{
3900 		dev_info_t	*tdip, *pdip;
3901 
3902 		tdip = dip;
3903 
3904 		/*
3905 		 * ddi_walk_devs() requires that topdip's parent be held.
3906 		 */
3907 		pdip = ddi_get_parent(sbp->sb_topdip);
3908 		if (pdip) {
3909 			ndi_hold_devi(pdip);
3910 			ndi_devi_enter(pdip);
3911 		}
3912 		ddi_walk_devs(sbp->sb_topdip, sbd_check_io_attached,
3913 			(void *)&tdip);
3914 		if (pdip) {
3915 			ndi_devi_exit(pdip);
3916 			ndi_rele_devi(pdip);
3917 		}
3918 
3919 		if (tdip == NULL)
3920 			rv = unit;
3921 		else
3922 			rv = -1;
3923 		break;
3924 	}
3925 
3926 	default:
3927 		PR_ALL("%s: unexpected nodetype(%d) for dip 0x%p\n",
3928 			f, nodetype, (void *)dip);
3929 		rv = -1;
3930 		break;
3931 	}
3932 
3933 	/*
3934 	 * Save the error that sbdp sent us and report it
3935 	 */
3936 	if (rv == -1)
3937 		SBD_GET_PERR(hdp->h_err, ep);
3938 
3939 	sbd_release_sbdp_handle(hdp);
3940 
3941 	return (rv);
3942 }
3943 
3944 /*
3945  * Return memhandle, if in fact, this memunit is the owner of
3946  * a scheduled memory delete.
3947  */
3948 int
3949 sbd_get_memhandle(sbd_handle_t *hp, dev_info_t *dip, memhandle_t *mhp)
3950 {
3951 	sbd_board_t	*sbp = SBDH2BD(hp->h_sbd);
3952 	sbd_mem_unit_t	*mp;
3953 	sbdp_handle_t	*hdp;
3954 	int		unit;
3955 	static fn_t	f = "sbd_get_memhandle";
3956 
3957 	PR_MEM("%s...\n", f);
3958 
3959 	hdp = sbd_get_sbdp_handle(sbp, hp);
3960 
3961 	unit = sbdp_get_unit_num(hdp, dip);
3962 	if (unit == -1) {
3963 		SBD_GET_PERR(hdp->h_err, SBD_HD2ERR(hp));
3964 		sbd_release_sbdp_handle(hdp);
3965 		return (-1);
3966 	}
3967 	sbd_release_sbdp_handle(hdp);
3968 
3969 	mp = SBD_GET_BOARD_MEMUNIT(sbp, unit);
3970 
3971 	if (mp->sbm_flags & SBD_MFLAG_RELOWNER) {
3972 		*mhp = mp->sbm_memhandle;
3973 		return (0);
3974 	} else {
3975 		SBD_SET_ERR(SBD_HD2ERR(hp), ESBD_INTERNAL);
3976 		SBD_SET_ERRSTR(SBD_HD2ERR(hp), sbp->sb_mempath[unit]);
3977 		return (-1);
3978 	}
3979 	/*NOTREACHED*/
3980 }
3981 
3982 
3983 static int
3984 sbd_cpu_cnt(sbd_handle_t *hp, sbd_devset_t devset)
3985 {
3986 	int		c, cix;
3987 	sbd_board_t	*sbp;
3988 
3989 	sbp = SBDH2BD(hp->h_sbd);
3990 
3991 	/*
3992 	 * Only look for requested devices that are actually present.
3993 	 */
3994 	devset &= SBD_DEVS_PRESENT(sbp);
3995 
3996 	for (c = cix = 0; c < MAX_CMP_UNITS_PER_BOARD; c++) {
3997 		/*
3998 		 * Index for core 1 , if exists.
3999 		 * With the current implementation it is
4000 		 * MAX_CMP_UNITS_PER_BOARD off from core 0.
4001 		 * The calculation will need to change if
4002 		 * the assumption is no longer true.
4003 		 */
4004 		int		c1 = c + MAX_CMP_UNITS_PER_BOARD;
4005 
4006 		if (DEVSET_IN_SET(devset, SBD_COMP_CMP, c) == 0) {
4007 			continue;
4008 		}
4009 
4010 		/*
4011 		 * Check to see if the dip(s) exist for this chip
4012 		 */
4013 		if ((sbp->sb_devlist[NIX(SBD_COMP_CMP)][c] == NULL) &&
4014 		    (sbp->sb_devlist[NIX(SBD_COMP_CMP)][c1] == NULL))
4015 			continue;
4016 
4017 		cix++;
4018 	}
4019 
4020 	return (cix);
4021 }
4022 
4023 static int
4024 sbd_mem_cnt(sbd_handle_t *hp, sbd_devset_t devset)
4025 {
4026 	int		i, ix;
4027 	sbd_board_t	*sbp = SBDH2BD(hp->h_sbd);
4028 
4029 	/*
4030 	 * Only look for requested devices that are actually present.
4031 	 */
4032 	devset &= SBD_DEVS_PRESENT(sbp);
4033 
4034 	for (i = ix = 0; i < MAX_MEM_UNITS_PER_BOARD; i++) {
4035 		dev_info_t	*dip;
4036 
4037 		if (DEVSET_IN_SET(devset, SBD_COMP_MEM, i) == 0) {
4038 			continue;
4039 		}
4040 
4041 		dip = sbp->sb_devlist[NIX(SBD_COMP_MEM)][i];
4042 		if (dip == NULL)
4043 			continue;
4044 
4045 		ix++;
4046 	}
4047 
4048 	return (ix);
4049 }
4050 
4051 /*
4052  * NOTE: This routine is only partially smart about multiple
4053  *	 mem-units.  Need to make mem-status structure smart
4054  *	 about them also.
4055  */
4056 static int
4057 sbd_mem_status(sbd_handle_t *hp, sbd_devset_t devset, sbd_dev_stat_t *dsp)
4058 {
4059 	int		m, mix, rv;
4060 	memdelstat_t	mdst;
4061 	memquery_t	mq;
4062 	sbd_board_t	*sbp;
4063 	sbd_mem_unit_t	*mp;
4064 	sbd_mem_stat_t	*msp;
4065 	extern int	kcage_on;
4066 	int		i;
4067 	static fn_t	f = "sbd_mem_status";
4068 
4069 	sbp = SBDH2BD(hp->h_sbd);
4070 
4071 	/*
4072 	 * Check the present devset and access the dip with
4073 	 * status lock held to protect agains a concurrent
4074 	 * unconfigure or disconnect thread.
4075 	 */
4076 	mutex_enter(&sbp->sb_slock);
4077 
4078 	/*
4079 	 * Only look for requested devices that are actually present.
4080 	 */
4081 	devset &= SBD_DEVS_PRESENT(sbp);
4082 
4083 	for (m = mix = 0; m < MAX_MEM_UNITS_PER_BOARD; m++) {
4084 		dev_info_t	*dip;
4085 
4086 
4087 		if (DEVSET_IN_SET(devset, SBD_COMP_MEM, m) == 0)
4088 			continue;
4089 
4090 		/*
4091 		 * Check to make sure the memory unit is in a state
4092 		 * where its fully initialized.
4093 		 */
4094 		if (SBD_DEVICE_STATE(sbp, SBD_COMP_MEM, m) == SBD_STATE_EMPTY)
4095 			continue;
4096 
4097 		dip = sbp->sb_devlist[NIX(SBD_COMP_MEM)][m];
4098 		if (dip == NULL)
4099 			continue;
4100 
4101 		mp = SBD_GET_BOARD_MEMUNIT(sbp, m);
4102 
4103 		msp = &dsp->d_mem;
4104 
4105 		bzero((caddr_t)msp, sizeof (*msp));
4106 		msp->ms_type = SBD_COMP_MEM;
4107 
4108 		/*
4109 		 * The plugin expects -1 for the mem unit
4110 		 */
4111 		msp->ms_cm.c_id.c_unit = -1;
4112 
4113 		/*
4114 		 * Get the memory name from what sbdp gave us
4115 		 */
4116 		for (i = 0; SBD_COMP(i) != SBD_COMP_UNKNOWN; i++) {
4117 			if (SBD_COMP(i) == SBD_COMP_MEM) {
4118 				(void) strcpy(msp->ms_name, SBD_DEVNAME(i));
4119 			}
4120 		}
4121 		msp->ms_cm.c_cond = mp->sbm_cm.sbdev_cond;
4122 		msp->ms_cm.c_busy = mp->sbm_cm.sbdev_busy;
4123 		msp->ms_cm.c_time = mp->sbm_cm.sbdev_time;
4124 
4125 		/* XXX revisit this after memory conversion */
4126 		msp->ms_ostate = ostate_cvt(SBD_DEVICE_STATE(
4127 			sbp, SBD_COMP_MEM, m));
4128 
4129 		msp->ms_basepfn = mp->sbm_basepfn;
4130 		msp->ms_pageslost = mp->sbm_pageslost;
4131 		msp->ms_cage_enabled = kcage_on;
4132 		msp->ms_interleave = mp->sbm_interleave;
4133 
4134 		if (mp->sbm_flags & SBD_MFLAG_RELOWNER)
4135 			rv = kphysm_del_status(mp->sbm_memhandle, &mdst);
4136 		else
4137 			rv = KPHYSM_EHANDLE;	/* force 'if' to fail */
4138 
4139 		if (rv == KPHYSM_OK) {
4140 			msp->ms_totpages += mdst.phys_pages;
4141 
4142 			/*
4143 			 * Any pages above managed is "free",
4144 			 * i.e. it's collected.
4145 			 */
4146 			msp->ms_detpages += (uint_t)(mdst.collected +
4147 							mdst.phys_pages -
4148 							mdst.managed);
4149 		} else {
4150 			msp->ms_totpages += (uint_t)mp->sbm_npages;
4151 
4152 			/*
4153 			 * If we're UNREFERENCED or UNCONFIGURED,
4154 			 * then the number of detached pages is
4155 			 * however many pages are on the board.
4156 			 * I.e. detached = not in use by OS.
4157 			 */
4158 			switch (msp->ms_cm.c_ostate) {
4159 			/*
4160 			 * changed to use cfgadm states
4161 			 *
4162 			 * was:
4163 			 *	case SFDR_STATE_UNREFERENCED:
4164 			 *	case SFDR_STATE_UNCONFIGURED:
4165 			 */
4166 			case SBD_STAT_UNCONFIGURED:
4167 				msp->ms_detpages = msp->ms_totpages;
4168 				break;
4169 
4170 			default:
4171 				break;
4172 			}
4173 		}
4174 
4175 		rv = kphysm_del_span_query(mp->sbm_basepfn,
4176 						mp->sbm_npages, &mq);
4177 		if (rv == KPHYSM_OK) {
4178 			msp->ms_managed_pages = mq.managed;
4179 			msp->ms_noreloc_pages = mq.nonrelocatable;
4180 			msp->ms_noreloc_first = mq.first_nonrelocatable;
4181 			msp->ms_noreloc_last = mq.last_nonrelocatable;
4182 			msp->ms_cm.c_sflags = 0;
4183 			if (mq.nonrelocatable) {
4184 				SBD_SET_SUSPEND(SBD_CMD_UNCONFIGURE,
4185 				    dsp->ds_suspend);
4186 			}
4187 		} else {
4188 			PR_MEM("%s: kphysm_del_span_query() = %d\n", f, rv);
4189 		}
4190 
4191 		mix++;
4192 		dsp++;
4193 	}
4194 
4195 	mutex_exit(&sbp->sb_slock);
4196 
4197 	return (mix);
4198 }
4199 
4200 static void
4201 sbd_cancel(sbd_handle_t *hp)
4202 {
4203 	int		i;
4204 	sbd_devset_t	devset;
4205 	sbd_board_t	*sbp = SBDH2BD(hp->h_sbd);
4206 	static fn_t	f = "sbd_cancel";
4207 	int		rv;
4208 
4209 	PR_ALL("%s...\n", f);
4210 
4211 	/*
4212 	 * Only devices which have been "released" are
4213 	 * subject to cancellation.
4214 	 */
4215 	devset = HD2MACHHD(hp)->sh_devset & SBD_DEVS_UNREFERENCED(sbp);
4216 
4217 	/*
4218 	 * Nothing to do for CPUs or IO other than change back
4219 	 * their state.
4220 	 */
4221 	for (i = 0; i < MAX_CPU_UNITS_PER_BOARD; i++) {
4222 		if (!DEVSET_IN_SET(devset, SBD_COMP_CPU, i))
4223 			continue;
4224 		if (sbd_cancel_cpu(hp, i) != SBD_CPUERR_FATAL) {
4225 			SBD_DEVICE_TRANSITION(sbp, SBD_COMP_CPU, i,
4226 						SBD_STATE_CONFIGURED);
4227 		} else {
4228 			SBD_DEVICE_TRANSITION(sbp, SBD_COMP_CPU, i,
4229 						SBD_STATE_FATAL);
4230 		}
4231 	}
4232 
4233 	for (i = 0; i < MAX_IO_UNITS_PER_BOARD; i++) {
4234 		if (!DEVSET_IN_SET(devset, SBD_COMP_IO, i))
4235 			continue;
4236 		SBD_DEVICE_TRANSITION(sbp, SBD_COMP_IO, i,
4237 					SBD_STATE_CONFIGURED);
4238 	}
4239 
4240 	for (i = 0; i < MAX_MEM_UNITS_PER_BOARD; i++) {
4241 		if (!DEVSET_IN_SET(devset, SBD_COMP_MEM, i))
4242 			continue;
4243 		if ((rv = sbd_cancel_mem(hp, i)) == 0) {
4244 			SBD_DEVICE_TRANSITION(sbp, SBD_COMP_MEM, i,
4245 						SBD_STATE_CONFIGURED);
4246 		} else if (rv == -1) {
4247 			SBD_DEVICE_TRANSITION(sbp, SBD_COMP_MEM, i,
4248 						SBD_STATE_FATAL);
4249 		}
4250 	}
4251 
4252 	PR_ALL("%s: unreleasing devset (0x%x)\n", f, (uint_t)devset);
4253 
4254 	SBD_DEVS_CANCEL(sbp, devset);
4255 
4256 	if (SBD_DEVS_UNREFERENCED(sbp) == 0) {
4257 		sbd_istate_t	new_state;
4258 		/*
4259 		 * If the board no longer has any released devices
4260 		 * than transfer it back to the CONFIG/PARTIAL state.
4261 		 */
4262 		if (SBD_DEVS_ATTACHED(sbp) == SBD_DEVS_PRESENT(sbp))
4263 			new_state = SBD_STATE_CONFIGURED;
4264 		else
4265 			new_state = SBD_STATE_PARTIAL;
4266 		if (SBD_BOARD_STATE(sbp) != new_state) {
4267 			SBD_BOARD_TRANSITION(sbp, new_state);
4268 		}
4269 		sbp->sb_ostate = SBD_STAT_CONFIGURED;
4270 		(void) drv_getparm(TIME, (void *)&sbp->sb_time);
4271 	}
4272 }
4273 
4274 static void
4275 sbd_get_ncm(sbd_handle_t *hp)
4276 {
4277 	sbd_devset_t devset;
4278 	sbd_priv_handle_t	*shp = HD2MACHHD(hp);
4279 	sbd_cmd_t		*cmdp =  (sbd_cmd_t *)hp->h_iap;
4280 	int			error;
4281 
4282 	/* pre_op restricted the devices to those selected by the ioctl */
4283 	devset = shp->sh_devset;
4284 
4285 	cmdp->cmd_getncm.g_ncm = sbd_cpu_cnt(hp, devset)
4286 		+ sbd_io_cnt(hp, devset) + sbd_mem_cnt(hp, devset);
4287 
4288 	error = sbd_copyout_ioarg(hp->h_mode, hp->h_cmd, cmdp,
4289 		(sbd_ioctl_arg_t *)shp->sh_arg);
4290 
4291 	if (error != 0)
4292 		SBD_SET_ERRNO(SBD_HD2ERR(hp), error);
4293 }
4294 
4295 static void
4296 sbd_status(sbd_handle_t *hp)
4297 {
4298 	int			nstat, mode, ncm, sz, cksz;
4299 	sbd_priv_handle_t	*shp = HD2MACHHD(hp);
4300 	sbd_devset_t		devset;
4301 	sbd_board_t		*sbp = SBDH2BD(hp->h_sbd);
4302 	sbd_stat_t		*dstatp;
4303 	sbd_cmd_t		*cmdp =  (sbd_cmd_t *)hp->h_iap;
4304 	sbdp_handle_t		*hdp;
4305 	sbd_dev_stat_t		*devstatp;
4306 
4307 #ifdef _MULTI_DATAMODEL
4308 	int			sz32;
4309 	sbd_stat32_t		*dstat32p;
4310 #endif /* _MULTI_DATAMODEL */
4311 
4312 	static fn_t	f = "sbd_status";
4313 
4314 	mode = hp->h_mode;
4315 	devset = shp->sh_devset;
4316 
4317 	devset &= SBD_DEVS_PRESENT(sbp);
4318 
4319 	if (cmdp->cmd_cm.c_id.c_type == SBD_COMP_NONE) {
4320 		if (cmdp->cmd_cm.c_flags & SBD_FLAG_ALLCMP) {
4321 			/*
4322 			 * Get the number of components "ncm" on the board.
4323 			 * Calculate size of buffer required to store one
4324 			 * sbd_stat_t structure plus ncm-1 sbd_dev_stat_t
4325 			 * structures. Note that sbd_stat_t already contains
4326 			 * one sbd_dev_stat_t, so only an additional ncm-1
4327 			 * sbd_dev_stat_t structures need to be accounted for
4328 			 * in the calculation when more than one component
4329 			 * is present.
4330 			 */
4331 			ncm = sbd_cpu_cnt(hp, devset) + sbd_io_cnt(hp, devset) +
4332 			    sbd_mem_cnt(hp, devset);
4333 
4334 		} else {
4335 			/*
4336 			 * In the case of c_type == SBD_COMP_NONE, and
4337 			 * SBD_FLAG_ALLCMP not specified, only the board
4338 			 * info is to be returned, no components.
4339 			 */
4340 			ncm = 0;
4341 			devset = 0;
4342 		}
4343 	} else {
4344 		/* Confirm that only one component is selected. */
4345 		ncm = sbd_cpu_cnt(hp, devset) + sbd_io_cnt(hp, devset) +
4346 		    sbd_mem_cnt(hp, devset);
4347 		if (ncm != 1) {
4348 			PR_ALL("%s: expected ncm of 1, got %d, devset 0x%x\n",
4349 			    f, ncm, devset);
4350 			SBD_SET_ERRNO(SBD_HD2ERR(hp), EINVAL);
4351 			return;
4352 		}
4353 	}
4354 
4355 	sz = sizeof (sbd_stat_t);
4356 	if (ncm > 1)
4357 		sz += sizeof (sbd_dev_stat_t) * (ncm - 1);
4358 
4359 	cksz = sz;
4360 
4361 	/*
4362 	 * s_nbytes describes the size of the preallocated user
4363 	 * buffer into which the application is executing to
4364 	 * receive the sbd_stat_t and sbd_dev_stat_t structures.
4365 	 * This buffer must be at least the required (sz) size.
4366 	 */
4367 
4368 #ifdef _MULTI_DATAMODEL
4369 
4370 	/*
4371 	 * More buffer space is required for the 64bit to 32bit
4372 	 * conversion of data structures.
4373 	 */
4374 	if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
4375 		sz32 = sizeof (sbd_stat32_t);
4376 		if (ncm > 1)
4377 			sz32  += sizeof (sbd_dev_stat32_t) * (ncm - 1);
4378 		cksz = sz32;
4379 	} else
4380 		sz32 = 0;
4381 #endif
4382 
4383 	if ((int)cmdp->cmd_stat.s_nbytes < cksz) {
4384 		PR_ALL("%s: ncm=%d s_nbytes = 0x%x\n", f, ncm,
4385 		    cmdp->cmd_stat.s_nbytes);
4386 		PR_ALL("%s: expected size of 0x%x\n", f, cksz);
4387 		SBD_SET_ERRNO(SBD_HD2ERR(hp), EINVAL);
4388 		return;
4389 	}
4390 
4391 	dstatp = kmem_zalloc(sz, KM_SLEEP);
4392 	devstatp = &dstatp->s_stat[0];
4393 
4394 #ifdef _MULTI_DATAMODEL
4395 	if (sz32 != 0)
4396 		dstat32p = kmem_zalloc(sz32, KM_SLEEP);
4397 #endif
4398 
4399 	/*
4400 	 * if connected or better, provide cached status if available,
4401 	 * otherwise call sbdp for status
4402 	 */
4403 	mutex_enter(&sbp->sb_flags_mutex);
4404 	switch (sbp->sb_state) {
4405 
4406 	case	SBD_STATE_CONNECTED:
4407 	case	SBD_STATE_PARTIAL:
4408 	case	SBD_STATE_CONFIGURED:
4409 		if (sbp->sb_flags & SBD_BOARD_STATUS_CACHED) {
4410 			bcopy(&sbp->sb_stat, dstatp, sizeof (sbd_stat_t));
4411 			dstatp->s_rstate = rstate_cvt(sbp->sb_state);
4412 			dstatp->s_ostate = ostate_cvt(sbp->sb_state);
4413 			dstatp->s_busy = sbp->sb_busy;
4414 			dstatp->s_time = sbp->sb_time;
4415 			dstatp->s_cond = sbp->sb_cond;
4416 			break;
4417 		}
4418 	/*FALLTHROUGH*/
4419 
4420 	default:
4421 		sbp->sb_flags &= ~SBD_BOARD_STATUS_CACHED;
4422 		dstatp->s_board = sbp->sb_num;
4423 		dstatp->s_ostate = ostate_cvt(sbp->sb_state);
4424 		dstatp->s_time = sbp->sb_time;
4425 
4426 		hdp = sbd_get_sbdp_handle(sbp, hp);
4427 
4428 		if (sbdp_get_board_status(hdp, dstatp) != 0) {
4429 			SBD_GET_PERR(hdp->h_err, SBD_HD2ERR(hp));
4430 			sbd_release_sbdp_handle(hdp);
4431 #ifdef _MULTI_DATAMODEL
4432 			if (sz32 != 0)
4433 				kmem_free(dstat32p, sz32);
4434 #endif
4435 			kmem_free(dstatp, sz);
4436 			mutex_exit(&sbp->sb_flags_mutex);
4437 			return;
4438 		}
4439 		/*
4440 		 * Do not cache status if the busy flag has
4441 		 * been set by the call to sbdp_get_board_status().
4442 		 */
4443 		if (!dstatp->s_busy) {
4444 			/* Can get board busy flag now */
4445 			dstatp->s_busy = sbp->sb_busy;
4446 			sbp->sb_cond = (sbd_cond_t)dstatp->s_cond;
4447 			bcopy(dstatp, &sbp->sb_stat,
4448 				sizeof (sbd_stat_t));
4449 			sbp->sb_flags |= SBD_BOARD_STATUS_CACHED;
4450 		}
4451 		sbd_release_sbdp_handle(hdp);
4452 		break;
4453 	}
4454 	mutex_exit(&sbp->sb_flags_mutex);
4455 
4456 	if (DEVSET_IN_SET(devset, SBD_COMP_CPU, DEVSET_ANYUNIT))
4457 		if ((nstat = sbd_cpu_flags(hp, devset, devstatp)) > 0) {
4458 			dstatp->s_nstat += nstat;
4459 			devstatp += nstat;
4460 		}
4461 
4462 	if (DEVSET_IN_SET(devset, SBD_COMP_MEM, DEVSET_ANYUNIT))
4463 		if ((nstat = sbd_mem_status(hp, devset, devstatp)) > 0) {
4464 			dstatp->s_nstat += nstat;
4465 			devstatp += nstat;
4466 		}
4467 
4468 	if (DEVSET_IN_SET(devset, SBD_COMP_IO, DEVSET_ANYUNIT))
4469 		if ((nstat = sbd_io_status(hp, devset, devstatp)) > 0) {
4470 			dstatp->s_nstat += nstat;
4471 			devstatp += nstat;
4472 		}
4473 
4474 	/* paranoia: detect buffer overrun */
4475 	if ((caddr_t)devstatp > ((caddr_t)dstatp) + sz) {
4476 		PR_ALL("%s: buffer overrun\n", f);
4477 #ifdef _MULTI_DATAMODEL
4478 		if (sz32 != 0)
4479 			kmem_free(dstat32p, sz32);
4480 #endif
4481 		kmem_free(dstatp, sz);
4482 		SBD_SET_ERRNO(SBD_HD2ERR(hp), EINVAL);
4483 		return;
4484 	}
4485 
4486 /* if necessary, move data into intermediate device status buffer */
4487 #ifdef _MULTI_DATAMODEL
4488 	if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
4489 		int		i, j;
4490 
4491 		ASSERT(sz32 != 0);
4492 		/* paranoia: detect buffer overrun */
4493 		if ((caddr_t)&dstat32p->s_stat[dstatp->s_nstat] >
4494 		    ((caddr_t)dstat32p) + sz32) {
4495 			cmn_err(CE_WARN,
4496 				"sbd:%s: buffer32 overrun", f);
4497 #ifdef _MULTI_DATAMODEL
4498 			if (sz32 != 0)
4499 				kmem_free(dstat32p, sz32);
4500 #endif
4501 			kmem_free(dstatp, sz);
4502 			SBD_SET_ERRNO(SBD_HD2ERR(hp), EINVAL);
4503 			return;
4504 		}
4505 
4506 		/*
4507 		 * initialize 32 bit sbd board status structure
4508 		 */
4509 		dstat32p->s_board = (int32_t)dstatp->s_board;
4510 		dstat32p->s_nstat = (int32_t)dstatp->s_nstat;
4511 		dstat32p->s_rstate = dstatp->s_rstate;
4512 		dstat32p->s_ostate = dstatp->s_ostate;
4513 		dstat32p->s_cond = dstatp->s_cond;
4514 		dstat32p->s_busy = dstatp->s_busy;
4515 		dstat32p->s_time = dstatp->s_time;
4516 		dstat32p->s_assigned = dstatp->s_assigned;
4517 		dstat32p->s_power = dstatp->s_power;
4518 		dstat32p->s_platopts = (int32_t)dstatp->s_platopts;
4519 		(void) strcpy(dstat32p->s_type, dstatp->s_type);
4520 
4521 		for (i = 0; i < dstatp->s_nstat; i++) {
4522 			sbd_dev_stat_t	*dsp = &dstatp->s_stat[i];
4523 			sbd_dev_stat32_t	*ds32p = &dstat32p->s_stat[i];
4524 
4525 			/*
4526 			 * copy common data for the device
4527 			 */
4528 			ds32p->d_cm.ci_type = (int32_t)dsp->d_cm.ci_type;
4529 			ds32p->d_cm.ci_unit = (int32_t)dsp->d_cm.ci_unit;
4530 			ds32p->d_cm.c_ostate = (int32_t)dsp->d_cm.c_ostate;
4531 			ds32p->d_cm.c_cond = (int32_t)dsp->d_cm.c_cond;
4532 			ds32p->d_cm.c_busy = (int32_t)dsp->d_cm.c_busy;
4533 			ds32p->d_cm.c_time = (time32_t)dsp->d_cm.c_time;
4534 			ds32p->d_cm.c_sflags = (int32_t)dsp->d_cm.c_sflags;
4535 			(void) strcpy(ds32p->d_cm.ci_name, dsp->d_cm.ci_name);
4536 
4537 			/* copy type specific data for the device */
4538 			switch (dsp->d_cm.ci_type) {
4539 
4540 			case SBD_COMP_CPU:
4541 				ds32p->d_cpu.cs_isbootproc =
4542 					(int32_t)dsp->d_cpu.cs_isbootproc;
4543 				ds32p->d_cpu.cs_cpuid =
4544 					(int32_t)dsp->d_cpu.cs_cpuid;
4545 				ds32p->d_cpu.cs_speed =
4546 					(int32_t)dsp->d_cpu.cs_speed;
4547 				ds32p->d_cpu.cs_ecache =
4548 					(int32_t)dsp->d_cpu.cs_ecache;
4549 				break;
4550 
4551 			case SBD_COMP_MEM:
4552 				ds32p->d_mem.ms_type =
4553 					(int32_t)dsp->d_mem.ms_type;
4554 				ds32p->d_mem.ms_ostate =
4555 					(int32_t)dsp->d_mem.ms_ostate;
4556 				ds32p->d_mem.ms_cond =
4557 					(int32_t)dsp->d_mem.ms_cond;
4558 				ds32p->d_mem.ms_interleave =
4559 					(uint32_t)dsp->d_mem.ms_interleave;
4560 				ds32p->d_mem.ms_basepfn =
4561 					(uint32_t)dsp->d_mem.ms_basepfn;
4562 				ds32p->d_mem.ms_totpages =
4563 					(uint32_t)dsp->d_mem.ms_totpages;
4564 				ds32p->d_mem.ms_detpages =
4565 					(uint32_t)dsp->d_mem.ms_detpages;
4566 				ds32p->d_mem.ms_pageslost =
4567 					(int32_t)dsp->d_mem.ms_pageslost;
4568 				ds32p->d_mem.ms_managed_pages =
4569 					(int32_t)dsp->d_mem.ms_managed_pages;
4570 				ds32p->d_mem.ms_noreloc_pages =
4571 					(int32_t)dsp->d_mem.ms_noreloc_pages;
4572 				ds32p->d_mem.ms_noreloc_first =
4573 					(int32_t)dsp->d_mem.ms_noreloc_first;
4574 				ds32p->d_mem.ms_noreloc_last =
4575 					(int32_t)dsp->d_mem.ms_noreloc_last;
4576 				ds32p->d_mem.ms_cage_enabled =
4577 					(int32_t)dsp->d_mem.ms_cage_enabled;
4578 				ds32p->d_mem.ms_peer_is_target =
4579 					(int32_t)dsp->d_mem.ms_peer_is_target;
4580 				(void) strcpy(ds32p->d_mem.ms_peer_ap_id,
4581 					dsp->d_mem.ms_peer_ap_id);
4582 				break;
4583 
4584 
4585 			case SBD_COMP_IO:
4586 
4587 				ds32p->d_io.is_type =
4588 					(int32_t)dsp->d_io.is_type;
4589 				ds32p->d_io.is_unsafe_count =
4590 					(int32_t)dsp->d_io.is_unsafe_count;
4591 				ds32p->d_io.is_referenced =
4592 					(int32_t)dsp->d_io.is_referenced;
4593 				for (j = 0; j < SBD_MAX_UNSAFE; j++)
4594 					ds32p->d_io.is_unsafe_list[j] =
4595 					    (int32_t)
4596 					    ds32p->d_io.is_unsafe_list[j];
4597 				bcopy(dsp->d_io.is_pathname,
4598 				    ds32p->d_io.is_pathname, MAXPATHLEN);
4599 				break;
4600 
4601 			case SBD_COMP_CMP:
4602 				/* copy sbd_cmp_stat_t structure members */
4603 				bcopy(&dsp->d_cmp.ps_cpuid[0],
4604 					&ds32p->d_cmp.ps_cpuid[0],
4605 					sizeof (ds32p->d_cmp.ps_cpuid));
4606 				ds32p->d_cmp.ps_ncores =
4607 					(int32_t)dsp->d_cmp.ps_ncores;
4608 				ds32p->d_cmp.ps_speed =
4609 					(int32_t)dsp->d_cmp.ps_speed;
4610 				ds32p->d_cmp.ps_ecache =
4611 					(int32_t)dsp->d_cmp.ps_ecache;
4612 				break;
4613 
4614 			default:
4615 				cmn_err(CE_WARN,
4616 				    "sbd:%s: unknown dev type (%d)", f,
4617 				    (int)dsp->d_cm.c_id.c_type);
4618 				break;
4619 			}
4620 		}
4621 
4622 		if (ddi_copyout((void *)dstat32p,
4623 		    cmdp->cmd_stat.s_statp, sz32, mode) != 0) {
4624 			cmn_err(CE_WARN,
4625 				"sbd:%s: failed to copyout status "
4626 				"for board %d", f, sbp->sb_num);
4627 			SBD_SET_ERRNO(SBD_HD2ERR(hp), EFAULT);
4628 		}
4629 	} else
4630 #endif /* _MULTI_DATAMODEL */
4631 	if (ddi_copyout((void *)dstatp, cmdp->cmd_stat.s_statp,
4632 	    sz, mode) != 0) {
4633 		cmn_err(CE_WARN,
4634 			"sbd:%s: failed to copyout status for board %d",
4635 			f, sbp->sb_num);
4636 		SBD_SET_ERRNO(SBD_HD2ERR(hp), EFAULT);
4637 	}
4638 
4639 #ifdef _MULTI_DATAMODEL
4640 	if (sz32 != 0)
4641 		kmem_free(dstat32p, sz32);
4642 #endif
4643 	kmem_free(dstatp, sz);
4644 }
4645 
4646 /*
4647  * Called at driver load time to determine the state and condition
4648  * of an existing board in the system.
4649  */
4650 static void
4651 sbd_board_discovery(sbd_board_t *sbp)
4652 {
4653 	int		i;
4654 	dev_info_t	*dip;
4655 	sbd_devset_t	devs_lost, devs_attached = 0;
4656 	extern kmutex_t	cpu_lock;
4657 	sbdp_handle_t	*hdp;
4658 	static fn_t	f = "sbd_board_discovery";
4659 	sbderror_t	error, *ep;
4660 	sbd_handle_t	*hp = MACHBD2HD(sbp);
4661 
4662 	if (SBD_DEVS_PRESENT(sbp) == 0) {
4663 		PR_ALL("%s: board %d has no devices present\n",
4664 			f, sbp->sb_num);
4665 		return;
4666 	}
4667 
4668 	ep = &error;
4669 	bzero(ep, sizeof (sbderror_t));
4670 
4671 	/*
4672 	 * Check for existence of cpus.
4673 	 */
4674 
4675 	hdp = sbd_get_sbdp_handle(sbp, hp);
4676 
4677 	for (i = 0; i < MAX_CPU_UNITS_PER_BOARD; i++) {
4678 		processorid_t	cpuid;
4679 
4680 		if (!SBD_DEV_IS_PRESENT(sbp, SBD_COMP_CPU, i))
4681 			continue;
4682 
4683 		dip = sbp->sb_devlist[NIX(SBD_COMP_CPU)][i];
4684 
4685 		if (dip != NULL) {
4686 			cpuid = sbdp_get_cpuid(hdp, dip);
4687 
4688 			if (cpuid < 0) {
4689 				SBD_GET_PERR(hdp->h_err,
4690 				    ep);
4691 				continue;
4692 			}
4693 
4694 			mutex_enter(&cpu_lock);	/* needed to call cpu_get() */
4695 			if (cpu_get(cpuid)) {
4696 				SBD_DEV_SET_ATTACHED(sbp, SBD_COMP_CPU, i);
4697 				DEVSET_ADD(devs_attached, SBD_COMP_CPU, i);
4698 				PR_ALL("%s: board %d, cpuid %d - attached\n",
4699 					f, sbp->sb_num, cpuid);
4700 			}
4701 			mutex_exit(&cpu_lock);
4702 			sbd_init_cpu_unit(sbp, i);
4703 		}
4704 	}
4705 
4706 	/*
4707 	 * Check for existence of memory.
4708 	 */
4709 	for (i = 0; i < MAX_MEM_UNITS_PER_BOARD; i++) {
4710 		uint64_t	basepa, endpa;
4711 		struct memlist	*ml;
4712 		extern struct memlist	*phys_install;
4713 
4714 		if (!SBD_DEV_IS_PRESENT(sbp, SBD_COMP_MEM, i))
4715 			continue;
4716 
4717 		dip = sbp->sb_devlist[NIX(SBD_COMP_MEM)][i];
4718 		if (dip == NULL)
4719 			continue;
4720 
4721 		if (sbdphw_get_base_physaddr(hdp, dip, &basepa)) {
4722 			/* omit phantom memory controllers on I/O boards */
4723 			if (SBD_DEV_IS_PRESENT(sbp, SBD_COMP_MEM, i)) {
4724 				ASSERT(sbp->sb_ndev != 0);
4725 				SBD_DEV_CLR_PRESENT(sbp, SBD_COMP_MEM, i);
4726 				sbp->sb_ndev--;
4727 			}
4728 			sbp->sb_devlist[NIX(SBD_COMP_MEM)][i] = NULL;
4729 			continue;
4730 		}
4731 
4732 		/*
4733 		 * basepa may not be on a alignment boundary, make it so.
4734 		 */
4735 		if (sbdp_get_mem_alignment(hdp, dip, &endpa)) {
4736 			cmn_err(CE_WARN, "%s sbdp_get_mem_alignment fail", f);
4737 			continue;
4738 		}
4739 
4740 		basepa &= ~(endpa - 1);
4741 		endpa += basepa;
4742 
4743 		/*
4744 		 * Check if base address is in phys_install.
4745 		 */
4746 		memlist_read_lock();
4747 		for (ml = phys_install; ml; ml = ml->ml_next)
4748 			if ((endpa <= ml->ml_address) ||
4749 			    (basepa >= (ml->ml_address + ml->ml_size)))
4750 				continue;
4751 			else
4752 				break;
4753 		memlist_read_unlock();
4754 
4755 		if (ml) {
4756 			SBD_DEV_SET_ATTACHED(sbp, SBD_COMP_MEM, i);
4757 			DEVSET_ADD(devs_attached, SBD_COMP_MEM, i);
4758 			PR_ALL("%s: board %d, mem-unit %d - attached\n",
4759 				f, sbp->sb_num, i);
4760 		}
4761 		sbd_init_mem_unit(sbp, i, ep);
4762 	}
4763 	sbd_release_sbdp_handle(hdp);
4764 
4765 	/*
4766 	 * If so far we have found an error, we just log it but continue
4767 	 */
4768 	if (SBD_GET_ERRNO(ep) != 0)
4769 		cmn_err(CE_WARN, "%s errno has occurred: errno %d", f,
4770 			SBD_GET_ERRNO(ep));
4771 
4772 	/*
4773 	 * Check for i/o state.
4774 	 */
4775 	for (i = 0; i < MAX_IO_UNITS_PER_BOARD; i++) {
4776 
4777 		if (!SBD_DEV_IS_PRESENT(sbp, SBD_COMP_IO, i))
4778 			continue;
4779 
4780 		dip = sbp->sb_devlist[NIX(SBD_COMP_IO)][i];
4781 		if (dip == NULL)
4782 			continue;
4783 
4784 		ASSERT(e_ddi_branch_held(dip));
4785 
4786 		/*
4787 		 * XXX Is the devstate check needed ?
4788 		 */
4789 		if (i_ddi_devi_attached(dip) ||
4790 		    ddi_get_devstate(dip) == DDI_DEVSTATE_UP) {
4791 
4792 			/*
4793 			 * Found it!
4794 			 */
4795 			SBD_DEV_SET_ATTACHED(sbp, SBD_COMP_IO, i);
4796 			DEVSET_ADD(devs_attached, SBD_COMP_IO, i);
4797 			PR_ALL("%s: board %d, io-unit %d - attached\n",
4798 				f, sbp->sb_num, i);
4799 		}
4800 		sbd_init_io_unit(sbp, i);
4801 	}
4802 
4803 	SBD_DEVS_CONFIGURE(sbp, devs_attached);
4804 	if (devs_attached && ((devs_lost = SBD_DEVS_UNATTACHED(sbp)) != 0)) {
4805 		int		ut;
4806 		/*
4807 		 * A prior comment stated that a partially configured
4808 		 * board was not permitted. The Serengeti architecture
4809 		 * makes this possible, so the SB_DEVS_DISCONNECT
4810 		 * at the end of this block has been removed.
4811 		 */
4812 
4813 		PR_ALL("%s: some devices not configured (0x%x)...\n",
4814 			f, devs_lost);
4815 
4816 		for (ut = 0; ut < MAX_CPU_UNITS_PER_BOARD; ut++)
4817 			if (DEVSET_IN_SET(devs_lost, SBD_COMP_CPU, ut)) {
4818 				SBD_DEVICE_TRANSITION(sbp, SBD_COMP_CPU,
4819 					ut, SBD_STATE_UNCONFIGURED);
4820 			}
4821 
4822 		for (ut = 0; ut < MAX_MEM_UNITS_PER_BOARD; ut++)
4823 			if (DEVSET_IN_SET(devs_lost, SBD_COMP_MEM, ut)) {
4824 				SBD_DEVICE_TRANSITION(sbp, SBD_COMP_MEM,
4825 					ut, SBD_STATE_UNCONFIGURED);
4826 			}
4827 
4828 		for (ut = 0; ut < MAX_IO_UNITS_PER_BOARD; ut++)
4829 			if (DEVSET_IN_SET(devs_lost, SBD_COMP_IO, ut)) {
4830 				SBD_DEVICE_TRANSITION(sbp, SBD_COMP_IO,
4831 					ut, SBD_STATE_UNCONFIGURED);
4832 			}
4833 	}
4834 }
4835 
4836 static int
4837 hold_rele_branch(dev_info_t *rdip, void *arg)
4838 {
4839 	walk_tree_t	*wp = (walk_tree_t *)arg;
4840 
4841 	ASSERT(wp && (wp->hold == 0 || wp->hold == 1));
4842 
4843 	switch (get_node_type(wp->sbp, rdip, NULL)) {
4844 		case SBD_COMP_CMP:
4845 		case SBD_COMP_MEM:
4846 		case SBD_COMP_IO:
4847 			break;
4848 		case SBD_COMP_CPU:
4849 
4850 			/*
4851 			 * All CPU nodes under CMP nodes should have
4852 			 * gotten pruned when the CMP node was first
4853 			 * encountered.
4854 			 */
4855 			ASSERT(!sbd_is_cmp_child(rdip));
4856 
4857 			break;
4858 
4859 		case SBD_COMP_UNKNOWN:
4860 			/* Not of interest to us */
4861 			return (DDI_WALK_CONTINUE);
4862 		default:
4863 			ASSERT(0);
4864 			return (DDI_WALK_PRUNECHILD);
4865 	}
4866 
4867 	if (wp->hold) {
4868 		ASSERT(!e_ddi_branch_held(rdip));
4869 		e_ddi_branch_hold(rdip);
4870 	} else {
4871 		ASSERT(e_ddi_branch_held(rdip));
4872 		e_ddi_branch_rele(rdip);
4873 	}
4874 
4875 	return (DDI_WALK_PRUNECHILD);
4876 }
4877 
4878 static void
4879 sbd_board_init(sbd_board_t *sbp, sbd_softstate_t *softsp,
4880 	int bd, dev_info_t *top_dip, int wnode)
4881 {
4882 	int		i;
4883 	dev_info_t	*pdip;
4884 	walk_tree_t	walk = {0};
4885 
4886 	mutex_init(&sbp->sb_mutex, NULL, MUTEX_DRIVER, NULL);
4887 	mutex_init(&sbp->sb_flags_mutex, NULL, MUTEX_DRIVER, NULL);
4888 	mutex_init(&sbp->sb_slock, NULL, MUTEX_DRIVER, NULL);
4889 
4890 	sbp->sb_ref = 0;
4891 	sbp->sb_num = bd;
4892 	sbp->sb_time = gethrestime_sec();
4893 	/*
4894 	 * For serengeti, top_dip doesn't need to be held because
4895 	 * sbp i.e. sbd_board_t will be destroyed in sbd_teardown_instance()
4896 	 * before top_dip detaches. For Daktari, top_dip is the
4897 	 * root node which never has to be held.
4898 	 */
4899 	sbp->sb_topdip = top_dip;
4900 	sbp->sb_cpuid = -1;
4901 	sbp->sb_softsp = (void *) softsp;
4902 	sbp->sb_cond = SBD_COND_UNKNOWN;
4903 	sbp->sb_wnode = wnode;
4904 	sbp->sb_memaccess_ok = 1;
4905 
4906 	ASSERT(MAX_IO_UNITS_PER_BOARD <= SBD_MAX_UNITS_PER_BOARD);
4907 	ASSERT(MAX_CPU_UNITS_PER_BOARD <= SBD_MAX_UNITS_PER_BOARD);
4908 	ASSERT(MAX_MEM_UNITS_PER_BOARD <= SBD_MAX_UNITS_PER_BOARD);
4909 
4910 	/*
4911 	 * Allocate the devlist for cpus.
4912 	 */
4913 	sbp->sb_devlist[NIX(SBD_COMP_CPU)] = GETSTRUCT(dev_info_t *,
4914 						MAX_CPU_UNITS_PER_BOARD);
4915 
4916 	/*
4917 	 * Allocate the devlist for mem.
4918 	 */
4919 	sbp->sb_devlist[NIX(SBD_COMP_MEM)] = GETSTRUCT(dev_info_t *,
4920 						MAX_MEM_UNITS_PER_BOARD);
4921 
4922 	/*
4923 	 * Allocate the devlist for io.
4924 	 */
4925 	sbp->sb_devlist[NIX(SBD_COMP_IO)] = GETSTRUCT(dev_info_t *,
4926 						MAX_IO_UNITS_PER_BOARD);
4927 
4928 
4929 	sbp->sb_dev[NIX(SBD_COMP_CPU)] = GETSTRUCT(sbd_dev_unit_t,
4930 						MAX_CPU_UNITS_PER_BOARD);
4931 
4932 	sbp->sb_dev[NIX(SBD_COMP_MEM)] = GETSTRUCT(sbd_dev_unit_t,
4933 						MAX_MEM_UNITS_PER_BOARD);
4934 
4935 	sbp->sb_dev[NIX(SBD_COMP_IO)] = GETSTRUCT(sbd_dev_unit_t,
4936 						MAX_IO_UNITS_PER_BOARD);
4937 
4938 	for (i = 0; i < MAX_CPU_UNITS_PER_BOARD; i++) {
4939 		sbp->sb_cpupath[i] = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
4940 	}
4941 
4942 	for (i = 0; i < MAX_MEM_UNITS_PER_BOARD; i++) {
4943 		sbp->sb_mempath[i] = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
4944 	}
4945 
4946 	for (i = 0; i < MAX_IO_UNITS_PER_BOARD; i++) {
4947 		sbp->sb_iopath[i] = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
4948 	}
4949 
4950 	/*
4951 	 * Walk the device tree, find all top dips on this board and
4952 	 * hold the branches rooted at them
4953 	 */
4954 	ASSERT(sbp->sb_topdip);
4955 	pdip = ddi_get_parent(sbp->sb_topdip);
4956 	if (pdip)
4957 		ndi_devi_enter(pdip);
4958 	walk.sbp = sbp;
4959 	walk.hold = 1;
4960 	ddi_walk_devs(sbp->sb_topdip, hold_rele_branch, (void *)&walk);
4961 	if (pdip)
4962 		ndi_devi_exit(pdip);
4963 
4964 	/*
4965 	 * Initialize the devlists
4966 	 */
4967 	if (sbd_init_devlists(sbp) == 0) {
4968 		SBD_BOARD_TRANSITION(sbp, SBD_STATE_EMPTY);
4969 	} else {
4970 		/*
4971 		 * Couldn't have made it down here without
4972 		 * having found at least one device.
4973 		 */
4974 		ASSERT(SBD_DEVS_PRESENT(sbp) != 0);
4975 		/*
4976 		 * Check the state of any possible devices on the
4977 		 * board.
4978 		 */
4979 		sbd_board_discovery(sbp);
4980 
4981 		if (SBD_DEVS_UNATTACHED(sbp) == 0) {
4982 			/*
4983 			 * The board has no unattached devices, therefore
4984 			 * by reason of insanity it must be configured!
4985 			 */
4986 			SBD_BOARD_TRANSITION(sbp, SBD_STATE_CONFIGURED);
4987 			sbp->sb_cond = SBD_COND_OK;
4988 		} else if (SBD_DEVS_ATTACHED(sbp)) {
4989 			SBD_BOARD_TRANSITION(sbp, SBD_STATE_PARTIAL);
4990 		} else {
4991 			SBD_BOARD_TRANSITION(sbp, SBD_STATE_CONNECTED);
4992 		}
4993 	}
4994 }
4995 
4996 static void
4997 sbd_board_destroy(sbd_board_t *sbp)
4998 {
4999 	int		i;
5000 	dev_info_t	*pdip;
5001 	walk_tree_t	walk = {0};
5002 
5003 	SBD_BOARD_TRANSITION(sbp, SBD_STATE_EMPTY);
5004 
5005 #ifdef DEBUG
5006 	for (i = 0; i < MAX_MEM_UNITS_PER_BOARD; i++) {
5007 		sbd_mem_unit_t *mp;
5008 
5009 		mp = SBD_GET_BOARD_MEMUNIT(sbp, i);
5010 		ASSERT(mp->sbm_mlist == NULL);
5011 	}
5012 #endif /* DEBUG */
5013 
5014 	/*
5015 	 * Free up MEM unit structs.
5016 	 */
5017 	FREESTRUCT(sbp->sb_dev[NIX(SBD_COMP_MEM)],
5018 			sbd_dev_unit_t, MAX_MEM_UNITS_PER_BOARD);
5019 	sbp->sb_dev[NIX(SBD_COMP_MEM)] = NULL;
5020 
5021 	/*
5022 	 * Free up CPU unit structs.
5023 	 */
5024 	FREESTRUCT(sbp->sb_dev[NIX(SBD_COMP_CPU)],
5025 			sbd_dev_unit_t, MAX_CPU_UNITS_PER_BOARD);
5026 	sbp->sb_dev[NIX(SBD_COMP_CPU)] = NULL;
5027 
5028 	/*
5029 	 * Free up IO unit structs.
5030 	 */
5031 	FREESTRUCT(sbp->sb_dev[NIX(SBD_COMP_IO)],
5032 			sbd_dev_unit_t, MAX_IO_UNITS_PER_BOARD);
5033 	sbp->sb_dev[NIX(SBD_COMP_IO)] = NULL;
5034 
5035 	/*
5036 	 * free up CPU devlists.
5037 	 */
5038 
5039 	for (i = 0; i < MAX_CPU_UNITS_PER_BOARD; i++) {
5040 		kmem_free((caddr_t)sbp->sb_cpupath[i], MAXPATHLEN);
5041 	}
5042 	FREESTRUCT(sbp->sb_devlist[NIX(SBD_COMP_CPU)], dev_info_t *,
5043 		MAX_CPU_UNITS_PER_BOARD);
5044 	sbp->sb_devlist[NIX(SBD_COMP_CPU)] = NULL;
5045 
5046 	/*
5047 	 * free up MEM devlists.
5048 	 */
5049 	for (i = 0; i < MAX_MEM_UNITS_PER_BOARD; i++) {
5050 		kmem_free((caddr_t)sbp->sb_mempath[i], MAXPATHLEN);
5051 	}
5052 	FREESTRUCT(sbp->sb_devlist[NIX(SBD_COMP_MEM)], dev_info_t *,
5053 		MAX_MEM_UNITS_PER_BOARD);
5054 	sbp->sb_devlist[NIX(SBD_COMP_MEM)] = NULL;
5055 
5056 	/*
5057 	 * free up IO devlists.
5058 	 */
5059 	for (i = 0; i <  MAX_IO_UNITS_PER_BOARD; i++) {
5060 		kmem_free((caddr_t)sbp->sb_iopath[i], MAXPATHLEN);
5061 	}
5062 	FREESTRUCT(sbp->sb_devlist[NIX(SBD_COMP_IO)], dev_info_t *,
5063 		MAX_IO_UNITS_PER_BOARD);
5064 	sbp->sb_devlist[NIX(SBD_COMP_IO)] = NULL;
5065 
5066 	/*
5067 	 * Release all branches held earlier
5068 	 */
5069 	ASSERT(sbp->sb_topdip);
5070 	pdip = ddi_get_parent(sbp->sb_topdip);
5071 	if (pdip)
5072 		ndi_devi_enter(pdip);
5073 	walk.sbp = sbp;
5074 	walk.hold = 0;
5075 	ddi_walk_devs(sbp->sb_topdip, hold_rele_branch, (void *)&walk);
5076 	if (pdip)
5077 		ndi_devi_exit(pdip);
5078 
5079 	mutex_destroy(&sbp->sb_slock);
5080 	mutex_destroy(&sbp->sb_flags_mutex);
5081 	mutex_destroy(&sbp->sb_mutex);
5082 }
5083 
5084 sbd_comp_type_t
5085 sbd_cm_type(char *name)
5086 {
5087 	sbd_comp_type_t type = SBD_COMP_UNKNOWN;
5088 	int i;
5089 
5090 	/* look up type in table */
5091 	for (i = 0; SBD_COMP(i) != SBD_COMP_UNKNOWN; i++) {
5092 		if (strcmp(name, SBD_OTYPE(i)) == 0) {
5093 			type = SBD_COMP(i);
5094 			break;
5095 		}
5096 	}
5097 
5098 	return (type);
5099 }
5100 
5101 /*
5102  * There are certain cases where obp marks components as failed
5103  * If the status is ok the node won't have any status property. It
5104  * is only there if the status is other than ok.
5105  *
5106  * The translation is as follows:
5107  * If there is no status prop, the the cond is SBD_COND_OK
5108  * If we find a status prop but can't get to it then cond is SBD_COND_UNKNOWN
5109  * if we find a stat and it is failed the cond is SBD_COND_FAILED
5110  * If the stat is disabled, the cond is SBD_COND_UNUSABLE
5111  * Otherwise we return con as SBD_COND_OK
5112  */
5113 sbd_cond_t
5114 sbd_get_comp_cond(dev_info_t *dip)
5115 {
5116 	int			len;
5117 	char			*status_buf;
5118 	static const char	*status = "status";
5119 	static const char	*failed = "fail";
5120 	static const char	*disabled = "disabled";
5121 
5122 	if (dip == NULL) {
5123 		PR_BYP("dip is NULL\n");
5124 		return (SBD_COND_UNKNOWN);
5125 	}
5126 
5127 	/*
5128 	 * If retired, return FAILED
5129 	 */
5130 	if (DEVI(dip)->devi_flags & DEVI_RETIRED) {
5131 		PR_CPU("dip is retired\n");
5132 		return (SBD_COND_FAILED);
5133 	}
5134 
5135 	if (ddi_getproplen(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
5136 	    (char *)status, &len) != DDI_PROP_SUCCESS) {
5137 		PR_CPU("status in sbd is ok\n");
5138 		return (SBD_COND_OK);
5139 	}
5140 
5141 	status_buf = kmem_zalloc(sizeof (char) * OBP_MAXPROPNAME, KM_SLEEP);
5142 	if (ddi_getlongprop_buf(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
5143 	    (char *)status, status_buf, &len) != DDI_PROP_SUCCESS) {
5144 		PR_CPU("status in sbd is unknown\n");
5145 		return (SBD_COND_UNKNOWN);
5146 	}
5147 
5148 	if (strncmp(status_buf, failed, strlen(failed)) == 0) {
5149 		PR_CPU("status in sbd is failed\n");
5150 		kmem_free(status_buf, sizeof (char) * OBP_MAXPROPNAME);
5151 		return (SBD_COND_FAILED);
5152 	}
5153 
5154 	if (strcmp(status_buf, disabled) == 0) {
5155 		PR_CPU("status in sbd is unusable\n");
5156 		kmem_free(status_buf, sizeof (char) * OBP_MAXPROPNAME);
5157 		return (SBD_COND_UNUSABLE);
5158 	}
5159 
5160 	kmem_free(status_buf, sizeof (char) * OBP_MAXPROPNAME);
5161 	return (SBD_COND_OK);
5162 }
5163 
5164 #ifdef SBD_DEBUG_ERRS
5165 
5166 /* function to simulate errors throughout the sbd code */
5167 void
5168 sbd_inject_err(int error, sbderror_t *ep, int Errno, int ecode,
5169 	char *rsc)
5170 {
5171 	static fn_t	f = "sbd_inject_err";
5172 
5173 	if (sbd_err_debug == 0)
5174 		return;
5175 
5176 	if (ep == NULL) {
5177 		cmn_err(CE_WARN, "%s ep is NULL", f);
5178 		return;
5179 	}
5180 
5181 	if (SBD_GET_ERRNO(ep) != 0) {
5182 		cmn_err(CE_WARN, "%s errno already set to %d", f,
5183 			SBD_GET_ERRNO(ep));
5184 		return;
5185 	}
5186 
5187 	if (SBD_GET_ERR(ep) != 0) {
5188 		cmn_err(CE_WARN, "%s code already set to %d", f,
5189 			SBD_GET_ERR(ep));
5190 		return;
5191 	}
5192 
5193 	if ((sbd_err_debug & (1 << error)) != 0) {
5194 		ep->e_errno = Errno;
5195 		ep->e_code = ecode;
5196 
5197 		if (rsc != NULL)
5198 			bcopy((caddr_t)rsc,
5199 			(caddr_t)ep->e_rsc,
5200 			sizeof (ep->e_rsc));
5201 
5202 		if (Errno != 0)
5203 			PR_ERR_ERRNO("%s set errno to %d", f, ep->e_errno);
5204 
5205 		if (ecode != 0)
5206 			PR_ERR_ECODE("%s set ecode to %d", f, ep->e_code);
5207 
5208 		if (rsc != NULL)
5209 			PR_ERR_RSC("%s set rsc to %s", f, ep->e_rsc);
5210 	}
5211 }
5212 #endif
5213