xref: /titanic_50/usr/src/uts/sun4u/starfire/io/drmach.c (revision 9e293969c29a9c274758e70e5a7349223cef86c1)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2010 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #include <sys/debug.h>
28 #include <sys/types.h>
29 #include <sys/varargs.h>
30 #include <sys/errno.h>
31 #include <sys/cred.h>
32 #include <sys/dditypes.h>
33 #include <sys/devops.h>
34 #include <sys/modctl.h>
35 #include <sys/poll.h>
36 #include <sys/conf.h>
37 #include <sys/ddi.h>
38 #include <sys/sunddi.h>
39 #include <sys/sunndi.h>
40 #include <sys/ndi_impldefs.h>
41 #include <sys/stat.h>
42 #include <sys/kmem.h>
43 #include <sys/vmem.h>
44 #include <sys/processor.h>
45 #include <sys/spitregs.h>
46 #include <sys/cpuvar.h>
47 #include <sys/cpupart.h>
48 #include <sys/mem_config.h>
49 #include <sys/ddi_impldefs.h>
50 #include <sys/systm.h>
51 #include <sys/machsystm.h>
52 #include <sys/autoconf.h>
53 #include <sys/cmn_err.h>
54 #include <sys/sysmacros.h>
55 #include <sys/x_call.h>
56 #include <sys/promif.h>
57 #include <sys/prom_plat.h>
58 #include <sys/membar.h>
59 #include <vm/seg_kmem.h>
60 #include <sys/mem_cage.h>
61 #include <sys/stack.h>
62 #include <sys/archsystm.h>
63 #include <vm/hat_sfmmu.h>
64 #include <sys/pte.h>
65 #include <sys/mmu.h>
66 #include <sys/cpu_module.h>
67 #include <sys/obpdefs.h>
68 #include <sys/note.h>
69 
70 #include <sys/starfire.h>	/* plat_max_... decls */
71 #include <sys/cvc.h>
72 #include <sys/cpu_sgnblk_defs.h>
73 #include <sys/drmach.h>
74 #include <sys/dr_util.h>
75 #include <sys/pda.h>
76 
77 #include <sys/sysevent.h>
78 #include <sys/sysevent/dr.h>
79 #include <sys/sysevent/eventdefs.h>
80 
81 
82 extern void		bcopy32_il(uint64_t, uint64_t);
83 extern void		flush_ecache_il(
84 				uint64_t physaddr, int size, int linesz);
85 extern uint_t		ldphysio_il(uint64_t physaddr);
86 extern void		stphysio_il(uint64_t physaddr, uint_t value);
87 
88 extern uint64_t		mc_get_mem_alignment(void);
89 extern uint64_t		mc_get_asr_addr(pnode_t);
90 extern uint64_t		mc_get_idle_addr(pnode_t);
91 extern uint64_t		mc_get_alignment_mask(pnode_t);
92 extern int		mc_read_asr(pnode_t, uint_t *);
93 extern int		mc_write_asr(pnode_t, uint_t);
94 extern uint64_t		mc_asr_to_pa(uint_t);
95 extern uint_t		mc_pa_to_asr(uint_t, uint64_t);
96 
97 extern int		pc_madr_add(int, int, int, int);
98 
99 typedef struct {
100 	struct drmach_node	*node;
101 	void			*data;
102 } drmach_node_walk_args_t;
103 
104 typedef struct drmach_node {
105 	void		*here;
106 
107 	pnode_t		 (*get_dnode)(struct drmach_node *node);
108 	int		 (*walk)(struct drmach_node *node, void *data,
109 				int (*cb)(drmach_node_walk_args_t *args));
110 } drmach_node_t;
111 
112 typedef struct {
113 	int		 min_index;
114 	int		 max_index;
115 	int		 arr_sz;
116 	drmachid_t	*arr;
117 } drmach_array_t;
118 
119 typedef struct {
120 	void		*isa;
121 
122 	sbd_error_t	*(*release)(drmachid_t);
123 	sbd_error_t	*(*status)(drmachid_t, drmach_status_t *);
124 
125 	char		 name[MAXNAMELEN];
126 } drmach_common_t;
127 
128 typedef struct {
129 	drmach_common_t	 cm;
130 	int		 bnum;
131 	int		 assigned;
132 	int		 powered;
133 	int		 connect_cpuid;
134 	int		 cond;
135 	drmach_node_t	*tree;
136 	drmach_array_t	*devices;
137 } drmach_board_t;
138 
139 typedef struct {
140 	drmach_common_t	 cm;
141 	drmach_board_t	*bp;
142 	int		 unum;
143 	int		 busy;
144 	int		 powered;
145 	const char	*type;
146 	drmach_node_t	*node;
147 } drmach_device_t;
148 
149 typedef struct {
150 	int		 flags;
151 	drmach_device_t	*dp;
152 	sbd_error_t	*err;
153 	dev_info_t	*dip;
154 } drmach_config_args_t;
155 
156 typedef struct {
157 	uint64_t	 idle_addr;
158 	drmach_device_t	*mem;
159 } drmach_mc_idle_script_t;
160 
161 typedef struct {
162 	uint64_t	masr_addr;
163 	uint_t		masr;
164 	uint_t		_filler;
165 } drmach_rename_script_t;
166 
167 typedef struct {
168 	void		(*run)(void *arg);
169 	caddr_t		data;
170 	pda_handle_t	*ph;
171 	struct memlist	*c_ml;
172 	uint64_t	s_copybasepa;
173 	uint64_t	t_copybasepa;
174 	drmach_device_t	*restless_mc;	/* diagnostic output */
175 } drmach_copy_rename_program_t;
176 
177 typedef enum {
178 	DO_IDLE,
179 	DO_UNIDLE,
180 	DO_PAUSE,
181 	DO_UNPAUSE
182 } drmach_iopc_op_t;
183 
184 typedef struct {
185 	drmach_board_t	*obj;
186 	int		 ndevs;
187 	void		*a;
188 	sbd_error_t	*(*found)(void *a, const char *, int, drmachid_t);
189 	sbd_error_t	*err;
190 } drmach_board_cb_data_t;
191 
192 static caddr_t		 drmach_shutdown_va;
193 
194 static int		 drmach_initialized;
195 static drmach_array_t	*drmach_boards;
196 
197 static int		 drmach_cpu_delay = 100;
198 static int		 drmach_cpu_ntries = 50000;
199 
200 volatile uchar_t	*drmach_xt_mb;
201 
202 /*
203  * Do not change the drmach_shutdown_mbox structure without
204  * considering the drmach_shutdown_asm assembly language code.
205  */
206 struct drmach_shutdown_mbox {
207 	uint64_t	estack;
208 	uint64_t	flushaddr;
209 	int		size;
210 	int		linesize;
211 	uint64_t	physaddr;
212 };
213 struct drmach_shutdown_mbox	*drmach_shutdown_asm_mbox;
214 
215 static int		drmach_fini(void);
216 static sbd_error_t	*drmach_device_new(drmach_node_t *,
217 				drmach_board_t *, drmach_device_t **);
218 static sbd_error_t	*drmach_cpu_new(drmach_device_t *);
219 static sbd_error_t	*drmach_mem_new(drmach_device_t *);
220 static sbd_error_t	*drmach_io_new(drmach_device_t *);
221 static sbd_error_t	*drmach_board_release(drmachid_t);
222 static sbd_error_t	*drmach_board_status(drmachid_t, drmach_status_t *);
223 static sbd_error_t	*drmach_cpu_release(drmachid_t);
224 static sbd_error_t	*drmach_cpu_status(drmachid_t, drmach_status_t *);
225 static sbd_error_t	*drmach_io_release(drmachid_t);
226 static sbd_error_t	*drmach_io_status(drmachid_t, drmach_status_t *);
227 static sbd_error_t	*drmach_mem_release(drmachid_t);
228 static sbd_error_t	*drmach_mem_status(drmachid_t, drmach_status_t *);
229 
230 extern struct cpu	*SIGBCPU;
231 
232 #ifdef DEBUG
233 
234 #define	DRMACH_PR		if (drmach_debug) printf
235 int drmach_debug = 0;		 /* set to non-zero to enable debug messages */
236 #else
237 
238 #define	DRMACH_PR		_NOTE(CONSTANTCONDITION) if (0) printf
239 #endif /* DEBUG */
240 
241 #define	DRMACH_OBJ(id)		((drmach_common_t *)id)
242 
243 #define	DRMACH_IS_BOARD_ID(id)	\
244 	((id != 0) &&		\
245 	(DRMACH_OBJ(id)->isa == (void *)drmach_board_new))
246 
247 #define	DRMACH_IS_CPU_ID(id)	\
248 	((id != 0) &&		\
249 	(DRMACH_OBJ(id)->isa == (void *)drmach_cpu_new))
250 
251 #define	DRMACH_IS_MEM_ID(id)	\
252 	((id != 0) &&		\
253 	(DRMACH_OBJ(id)->isa == (void *)drmach_mem_new))
254 
255 #define	DRMACH_IS_IO_ID(id)	\
256 	((id != 0) &&		\
257 	(DRMACH_OBJ(id)->isa == (void *)drmach_io_new))
258 
259 #define	DRMACH_IS_DEVICE_ID(id)					\
260 	((id != 0) &&						\
261 	(DRMACH_OBJ(id)->isa == (void *)drmach_cpu_new ||	\
262 	    DRMACH_OBJ(id)->isa == (void *)drmach_mem_new ||	\
263 	    DRMACH_OBJ(id)->isa == (void *)drmach_io_new))
264 
265 #define	DRMACH_IS_ID(id)					\
266 	((id != 0) &&						\
267 	(DRMACH_OBJ(id)->isa == (void *)drmach_board_new ||	\
268 	    DRMACH_OBJ(id)->isa == (void *)drmach_cpu_new ||	\
269 	    DRMACH_OBJ(id)->isa == (void *)drmach_mem_new ||	\
270 	    DRMACH_OBJ(id)->isa == (void *)drmach_io_new))
271 
272 #define	DRMACH_CPUID2BNUM(cpuid) \
273 	((cpuid) / MAX_CPU_UNITS_PER_BOARD)
274 
275 #define	DRMACH_INTERNAL_ERROR() \
276 	drerr_new(1, ESTF_INTERNAL, drmach_ie_fmt, __LINE__)
277 static char		*drmach_ie_fmt = "drmach.c %d";
278 
279 static struct {
280 	const char	 *name;
281 	const char	 *type;
282 	sbd_error_t	 *(*new)(drmach_device_t *);
283 } name2type[] = {
284 	{ "SUNW,UltraSPARC",	DRMACH_DEVTYPE_CPU,  drmach_cpu_new },
285 	{ "mem-unit",		DRMACH_DEVTYPE_MEM,  drmach_mem_new },
286 	{ "pci",		DRMACH_DEVTYPE_PCI,  drmach_io_new  },
287 	{ "sbus",		DRMACH_DEVTYPE_SBUS, drmach_io_new  },
288 };
289 
290 /* node types to cleanup when a board is unconfigured */
291 #define	MISC_COUNTER_TIMER_DEVNAME	"counter-timer"
292 #define	MISC_PERF_COUNTER_DEVNAME	"perf-counter"
293 
294 /* utility */
295 #define	MBYTE	(1048576ull)
296 
297 /*
298  * This is necessary because the CPU support needs
299  * to call cvc_assign_iocpu.
300  */
301 #ifndef lint
302 char _depends_on[] = "drv/cvc";
303 #endif  /* lint */
304 
305 /*
306  * drmach autoconfiguration data structures and interfaces
307  */
308 
309 extern struct mod_ops mod_miscops;
310 
311 static struct modlmisc modlmisc = {
312 	&mod_miscops,
313 	"Sun Enterprise 10000 DR"
314 };
315 
316 static struct modlinkage modlinkage = {
317 	MODREV_1,
318 	(void *)&modlmisc,
319 	NULL
320 };
321 
322 static kmutex_t drmach_i_lock;
323 
324 int
325 _init(void)
326 {
327 	int err;
328 
329 	/* check that we have the correct version of obp */
330 	if (prom_test("SUNW,UE10000,add-brd") != 0) {
331 
332 		cmn_err(CE_WARN, "!OBP/SSP upgrade is required to enable "
333 		    "DR Functionality");
334 
335 		return (-1);
336 	}
337 
338 	mutex_init(&drmach_i_lock, NULL, MUTEX_DRIVER, NULL);
339 
340 	drmach_xt_mb = (uchar_t *)vmem_alloc(static_alloc_arena,
341 	    NCPU * sizeof (uchar_t), VM_SLEEP);
342 	drmach_shutdown_asm_mbox = (struct drmach_shutdown_mbox *)
343 	    vmem_alloc(static_alloc_arena, sizeof (struct drmach_shutdown_mbox),
344 	    VM_SLEEP);
345 
346 	if ((err = mod_install(&modlinkage)) != 0) {
347 		mutex_destroy(&drmach_i_lock);
348 		vmem_free(static_alloc_arena, (void *)drmach_xt_mb,
349 		    NCPU * sizeof (uchar_t));
350 		vmem_free(static_alloc_arena, (void *)drmach_shutdown_asm_mbox,
351 		    sizeof (struct drmach_shutdown_mbox));
352 	}
353 
354 	return (err);
355 }
356 
357 int
358 _fini(void)
359 {
360 	if (drmach_fini())
361 		return (DDI_FAILURE);
362 	else
363 		return (mod_remove(&modlinkage));
364 }
365 
366 int
367 _info(struct modinfo *modinfop)
368 {
369 	return (mod_info(&modlinkage, modinfop));
370 }
371 
372 static pnode_t
373 drmach_node_obp_get_dnode(drmach_node_t *np)
374 {
375 	return ((pnode_t)(uintptr_t)np->here);
376 }
377 
378 static int
379 drmach_node_obp_walk(drmach_node_t *np, void *data,
380 		int (*cb)(drmach_node_walk_args_t *args))
381 {
382 	pnode_t			nodeid;
383 	int			rv;
384 	drmach_node_walk_args_t	args;
385 
386 	/* initialized args structure for callback */
387 	args.node = np;
388 	args.data = data;
389 
390 	nodeid = prom_childnode(prom_rootnode());
391 
392 	/* save our new position with in the tree */
393 	np->here = (void *)(uintptr_t)nodeid;
394 
395 	rv = 0;
396 	while (nodeid != OBP_NONODE) {
397 		rv = (*cb)(&args);
398 		if (rv)
399 			break;
400 
401 		nodeid = prom_nextnode(nodeid);
402 
403 		/* save our new position with in the tree */
404 		np->here = (void *)(uintptr_t)nodeid;
405 	}
406 
407 	return (rv);
408 }
409 
410 static drmach_node_t *
411 drmach_node_new(void)
412 {
413 	drmach_node_t *np;
414 
415 	np = kmem_zalloc(sizeof (drmach_node_t), KM_SLEEP);
416 
417 	np->get_dnode = drmach_node_obp_get_dnode;
418 	np->walk = drmach_node_obp_walk;
419 
420 	return (np);
421 }
422 
423 static void
424 drmach_node_dispose(drmach_node_t *np)
425 {
426 	kmem_free(np, sizeof (*np));
427 }
428 
429 static dev_info_t *
430 drmach_node_get_dip(drmach_node_t *np)
431 {
432 	pnode_t nodeid;
433 
434 	nodeid = np->get_dnode(np);
435 	if (nodeid == OBP_NONODE)
436 		return (NULL);
437 	else {
438 		dev_info_t *dip;
439 
440 		/* The root node doesn't have to be held */
441 		dip = e_ddi_nodeid_to_dip(nodeid);
442 		if (dip) {
443 			/*
444 			 * Branch rooted at dip is already held, so release
445 			 * hold acquired in e_ddi_nodeid_to_dip()
446 			 */
447 			ddi_release_devi(dip);
448 			ASSERT(e_ddi_branch_held(dip));
449 		}
450 
451 		return (dip);
452 	}
453 	/*NOTREACHED*/
454 }
455 
456 static pnode_t
457 drmach_node_get_dnode(drmach_node_t *np)
458 {
459 	return (np->get_dnode(np));
460 }
461 
462 static int
463 drmach_node_walk(drmach_node_t *np, void *param,
464 		int (*cb)(drmach_node_walk_args_t *args))
465 {
466 	return (np->walk(np, param, cb));
467 }
468 
469 static int
470 drmach_node_get_prop(drmach_node_t *np, char *name, void *buf)
471 {
472 	pnode_t	nodeid;
473 	int	rv;
474 
475 	nodeid = np->get_dnode(np);
476 	if (nodeid == OBP_NONODE)
477 		rv = -1;
478 	else if (prom_getproplen(nodeid, (caddr_t)name) < 0)
479 		rv = -1;
480 	else {
481 		(void) prom_getprop(nodeid, (caddr_t)name, (caddr_t)buf);
482 		rv = 0;
483 	}
484 
485 	return (rv);
486 }
487 
488 static int
489 drmach_node_get_proplen(drmach_node_t *np, char *name, int *len)
490 {
491 	pnode_t	 nodeid;
492 	int	 rv;
493 
494 	nodeid = np->get_dnode(np);
495 	if (nodeid == OBP_NONODE)
496 		rv = -1;
497 	else {
498 		*len = prom_getproplen(nodeid, (caddr_t)name);
499 		rv = (*len < 0 ? -1 : 0);
500 	}
501 
502 	return (rv);
503 }
504 
505 static drmachid_t
506 drmach_node_dup(drmach_node_t *np)
507 {
508 	drmach_node_t *dup;
509 
510 	dup = drmach_node_new();
511 	dup->here = np->here;
512 
513 	return (dup);
514 }
515 
516 /*
517  * drmach_array provides convenient array construction, access,
518  * bounds checking and array destruction logic.
519  */
520 
521 static drmach_array_t *
522 drmach_array_new(int min_index, int max_index)
523 {
524 	drmach_array_t *arr;
525 
526 	arr = kmem_zalloc(sizeof (drmach_array_t), KM_SLEEP);
527 
528 	arr->arr_sz = (max_index - min_index + 1) * sizeof (void *);
529 	if (arr->arr_sz > 0) {
530 		arr->min_index = min_index;
531 		arr->max_index = max_index;
532 
533 		arr->arr = kmem_zalloc(arr->arr_sz, KM_SLEEP);
534 		return (arr);
535 	} else {
536 		kmem_free(arr, sizeof (*arr));
537 		return (0);
538 	}
539 }
540 
541 static int
542 drmach_array_set(drmach_array_t *arr, int idx, drmachid_t val)
543 {
544 	if (idx < arr->min_index || idx > arr->max_index)
545 		return (-1);
546 	else {
547 		arr->arr[idx - arr->min_index] = val;
548 		return (0);
549 	}
550 	/*NOTREACHED*/
551 }
552 
553 static int
554 drmach_array_get(drmach_array_t *arr, int idx, drmachid_t *val)
555 {
556 	if (idx < arr->min_index || idx > arr->max_index)
557 		return (-1);
558 	else {
559 		*val = arr->arr[idx - arr->min_index];
560 		return (0);
561 	}
562 	/*NOTREACHED*/
563 }
564 
565 static int
566 drmach_array_first(drmach_array_t *arr, int *idx, drmachid_t *val)
567 {
568 	int rv;
569 
570 	*idx = arr->min_index;
571 	while ((rv = drmach_array_get(arr, *idx, val)) == 0 && *val == NULL)
572 		*idx += 1;
573 
574 	return (rv);
575 }
576 
577 static int
578 drmach_array_next(drmach_array_t *arr, int *idx, drmachid_t *val)
579 {
580 	int rv;
581 
582 	*idx += 1;
583 	while ((rv = drmach_array_get(arr, *idx, val)) == 0 && *val == NULL)
584 		*idx += 1;
585 
586 	return (rv);
587 }
588 
589 static void
590 drmach_array_dispose(drmach_array_t *arr, void (*disposer)(drmachid_t))
591 {
592 	drmachid_t	val;
593 	int		idx;
594 	int		rv;
595 
596 	rv = drmach_array_first(arr, &idx, &val);
597 	while (rv == 0) {
598 		(*disposer)(val);
599 		rv = drmach_array_next(arr, &idx, &val);
600 	}
601 
602 	kmem_free(arr->arr, arr->arr_sz);
603 	kmem_free(arr, sizeof (*arr));
604 }
605 
606 /*ARGSUSED*/
607 static int
608 drmach_prom_select(pnode_t nodeid, void *arg, uint_t flags)
609 {
610 	int			rprop[64];
611 	pnode_t			saved;
612 	drmach_config_args_t	*ap = (drmach_config_args_t *)arg;
613 	drmach_device_t		*dp = ap->dp;
614 	sbd_error_t		*err;
615 
616 	saved = drmach_node_get_dnode(dp->node);
617 
618 	if (nodeid != saved)
619 		return (DDI_FAILURE);
620 
621 	if (saved == OBP_NONODE) {
622 		err = DRMACH_INTERNAL_ERROR();
623 		DRERR_SET_C(&ap->err, &err);
624 		return (DDI_FAILURE);
625 	}
626 
627 	if (prom_getprop(nodeid, OBP_REG, (caddr_t)rprop) <= 0) {
628 		return (DDI_FAILURE);
629 	}
630 
631 	return (DDI_SUCCESS);
632 }
633 
634 /*ARGSUSED*/
635 static void
636 drmach_branch_callback(dev_info_t *rdip, void *arg, uint_t flags)
637 {
638 	drmach_config_args_t	*ap = (drmach_config_args_t *)arg;
639 
640 	ASSERT(ap->dip == NULL);
641 
642 	ap->dip = rdip;
643 }
644 
645 sbd_error_t *
646 drmach_configure(drmachid_t id, int flags)
647 {
648 	drmach_device_t		*dp;
649 	sbd_error_t		*err;
650 	drmach_config_args_t	ca;
651 	devi_branch_t		b = {0};
652 	dev_info_t		*fdip = NULL;
653 
654 	if (!DRMACH_IS_DEVICE_ID(id))
655 		return (drerr_new(0, ESTF_INAPPROP, NULL));
656 	dp = id;
657 
658 	ca.dp = dp;
659 	ca.flags = flags;
660 	ca.err = NULL;		/* will be set if error detected */
661 	ca.dip = NULL;
662 
663 	b.arg = &ca;
664 	b.type = DEVI_BRANCH_PROM;
665 	b.create.prom_branch_select = drmach_prom_select;
666 	b.devi_branch_callback = drmach_branch_callback;
667 
668 	if (e_ddi_branch_create(ddi_root_node(), &b, &fdip,
669 	    DEVI_BRANCH_CHILD | DEVI_BRANCH_CONFIGURE) != 0) {
670 		char *path = kmem_alloc(MAXPATHLEN, KM_SLEEP);
671 
672 		/*
673 		 * If non-NULL, fdip is returned held and must be released.
674 		 */
675 		if (fdip != NULL) {
676 			(void) ddi_pathname(fdip, path);
677 			ddi_release_devi(fdip);
678 		} else if (ca.dip != NULL) {
679 			/* safe to call ddi_pathname as dip already held */
680 			(void) ddi_pathname(ca.dip, path);
681 		} else {
682 			(void) strcpy(path, "<none>");
683 		}
684 
685 		err = drerr_new(1, ESTF_DRVFAIL, path);
686 		DRERR_SET_C(&ca.err, &err);
687 		kmem_free(path, MAXPATHLEN);
688 	}
689 
690 	return (ca.err);
691 }
692 
693 static sbd_error_t *
694 drmach_device_new(drmach_node_t *node,
695 	drmach_board_t *bp, drmach_device_t **dpp)
696 {
697 	int		 i;
698 	int		 rv;
699 	drmach_device_t	*dp;
700 	sbd_error_t	*err;
701 	char		 name[OBP_MAXDRVNAME];
702 
703 	rv = drmach_node_get_prop(node, OBP_NAME, name);
704 	if (rv) {
705 		/* every node is expected to have a name */
706 		err = drerr_new(1, ESTF_GETPROP,
707 		    "PROM Node 0x%x: property %s",
708 		    (uint_t)node->get_dnode(node), OBP_NAME);
709 
710 		return (err);
711 	}
712 
713 	/*
714 	 * The node currently being examined is not listed in the name2type[]
715 	 * array.  In this case, the node is no interest to drmach.  Both
716 	 * dp and err are initialized here to yield nothing (no device or
717 	 * error structure) for this case.
718 	 */
719 	for (i = 0; i < sizeof (name2type) / sizeof (name2type[0]); i++)
720 		if (strcmp(name2type[i].name, name) == 0)
721 			break;
722 
723 	if (i < sizeof (name2type) / sizeof (name2type[0])) {
724 		dp = kmem_zalloc(sizeof (drmach_device_t), KM_SLEEP);
725 
726 		dp->bp = bp;
727 		dp->unum = -1;
728 		dp->node = drmach_node_dup(node);
729 		dp->type = name2type[i].type;
730 
731 		err = (name2type[i].new)(dp);
732 		if (err) {
733 			drmach_node_dispose(node);
734 			kmem_free(dp, sizeof (*dp));
735 			dp = NULL;
736 		}
737 
738 		*dpp = dp;
739 		return (err);
740 	}
741 
742 	/*
743 	 * The node currently being examined is not listed in the name2type[]
744 	 * array.  In this case, the node is no interest to drmach.  Both
745 	 * dp and err are initialized here to yield nothing (no device or
746 	 * error structure) for this case.
747 	 */
748 	*dpp = NULL;
749 	return (NULL);
750 }
751 
752 static void
753 drmach_device_dispose(drmachid_t id)
754 {
755 	drmach_device_t *self = id;
756 
757 	if (self->node)
758 		drmach_node_dispose(self->node);
759 
760 	kmem_free(self, sizeof (*self));
761 }
762 
763 static sbd_error_t *
764 drmach_device_get_prop(drmach_device_t *dp, char *name, void *buf)
765 {
766 	sbd_error_t	*err = NULL;
767 	int		 rv;
768 
769 	rv = drmach_node_get_prop(dp->node, name, buf);
770 	if (rv) {
771 		err = drerr_new(1, ESTF_GETPROP,
772 		    "%s::%s: property %s",
773 		    dp->bp->cm.name, dp->cm.name, name);
774 	}
775 
776 	return (err);
777 }
778 
779 static sbd_error_t *
780 drmach_device_get_proplen(drmach_device_t *dp, char *name, int *len)
781 {
782 	sbd_error_t	*err = NULL;
783 	int		 rv;
784 
785 	rv = drmach_node_get_proplen(dp->node, name, len);
786 	if (rv) {
787 		err = drerr_new(1, ESTF_GETPROPLEN,
788 		    "%s::%s: property %s",
789 		    dp->bp->cm.name, dp->cm.name, name);
790 	}
791 
792 	return (err);
793 }
794 
795 static drmach_board_t *
796 drmach_board_new(int bnum)
797 {
798 	drmach_board_t	*bp;
799 
800 	bp = kmem_zalloc(sizeof (drmach_board_t), KM_SLEEP);
801 
802 	bp->cm.isa = (void *)drmach_board_new;
803 	bp->cm.release = drmach_board_release;
804 	bp->cm.status = drmach_board_status;
805 
806 	(void) drmach_board_name(bnum, bp->cm.name, sizeof (bp->cm.name));
807 
808 	bp->bnum = bnum;
809 	bp->devices = NULL;
810 	bp->connect_cpuid = -1;
811 	bp->tree = drmach_node_new();
812 	bp->assigned = !drmach_initialized;
813 	bp->powered = !drmach_initialized;
814 
815 	(void) drmach_array_set(drmach_boards, bnum, bp);
816 	return (bp);
817 }
818 
819 static void
820 drmach_board_dispose(drmachid_t id)
821 {
822 	drmach_board_t *bp;
823 
824 	ASSERT(DRMACH_IS_BOARD_ID(id));
825 	bp = id;
826 
827 	if (bp->tree)
828 		drmach_node_dispose(bp->tree);
829 
830 	if (bp->devices)
831 		drmach_array_dispose(bp->devices, drmach_device_dispose);
832 
833 	kmem_free(bp, sizeof (*bp));
834 }
835 
836 static sbd_error_t *
837 drmach_board_status(drmachid_t id, drmach_status_t *stat)
838 {
839 	sbd_error_t	*err = NULL;
840 	drmach_board_t	*bp;
841 
842 	if (!DRMACH_IS_BOARD_ID(id))
843 		return (drerr_new(0, ESTF_INAPPROP, NULL));
844 	bp = id;
845 
846 	stat->assigned = bp->assigned;
847 	stat->powered = bp->powered;
848 	stat->busy = 0;			/* assume not busy */
849 	stat->configured = 0;		/* assume not configured */
850 	stat->empty = 0;
851 	stat->cond = bp->cond = SBD_COND_OK;
852 	(void) strncpy(stat->type, "System Brd", sizeof (stat->type));
853 	stat->info[0] = '\0';
854 
855 	if (bp->devices) {
856 		int		 rv;
857 		int		 d_idx;
858 		drmachid_t	 d_id;
859 
860 		rv = drmach_array_first(bp->devices, &d_idx, &d_id);
861 		while (rv == 0) {
862 			drmach_status_t	d_stat;
863 
864 			err = drmach_status(d_id, &d_stat);
865 			if (err)
866 				break;
867 
868 			stat->busy |= d_stat.busy;
869 			stat->configured |= d_stat.configured;
870 
871 			rv = drmach_array_next(bp->devices, &d_idx, &d_id);
872 		}
873 	}
874 
875 	return (err);
876 }
877 
878 /* a simple routine to reduce redundancy of this common logic */
879 static pda_handle_t
880 drmach_pda_open(void)
881 {
882 	pda_handle_t ph;
883 
884 	ph = pda_open();
885 	if (ph == NULL) {
886 		/* catch in debug kernels */
887 		ASSERT(0);
888 		cmn_err(CE_WARN, "pda_open failed");
889 	}
890 
891 	return (ph);
892 }
893 
894 #ifdef DEBUG
895 int drmach_init_break = 0;
896 #endif
897 
898 static int
899 hold_rele_branch(dev_info_t *rdip, void *arg)
900 {
901 	int	i;
902 	int	*holdp = (int *)arg;
903 	char	*name = ddi_node_name(rdip);
904 
905 	/*
906 	 * For Starfire, we must be children of the root devinfo node
907 	 */
908 	ASSERT(ddi_get_parent(rdip) == ddi_root_node());
909 
910 	for (i = 0; i < sizeof (name2type) / sizeof (name2type[0]); i++)
911 		if (strcmp(name2type[i].name, name) == 0)
912 			break;
913 
914 	if (i == sizeof (name2type) / sizeof (name2type[0])) {
915 		/* Not of interest to us */
916 		return (DDI_WALK_PRUNECHILD);
917 	}
918 
919 	if (*holdp) {
920 		ASSERT(!e_ddi_branch_held(rdip));
921 		e_ddi_branch_hold(rdip);
922 	} else {
923 		ASSERT(e_ddi_branch_held(rdip));
924 		e_ddi_branch_rele(rdip);
925 	}
926 
927 	return (DDI_WALK_PRUNECHILD);
928 }
929 
930 static int
931 drmach_init(void)
932 {
933 	pnode_t		nodeid;
934 	dev_info_t	*rdip;
935 	int		hold, circ;
936 
937 #ifdef DEBUG
938 	if (drmach_init_break)
939 		debug_enter("drmach_init: drmach_init_break set\n");
940 #endif
941 	mutex_enter(&drmach_i_lock);
942 	if (drmach_initialized) {
943 		mutex_exit(&drmach_i_lock);
944 		return (0);
945 	}
946 
947 	drmach_boards = drmach_array_new(0, MAX_BOARDS - 1);
948 
949 	nodeid = prom_childnode(prom_rootnode());
950 	do {
951 		int		 bnum;
952 		drmachid_t	 id;
953 
954 		bnum = -1;
955 		(void) prom_getprop(nodeid, OBP_BOARDNUM, (caddr_t)&bnum);
956 		if (bnum == -1)
957 			continue;
958 
959 		if (drmach_array_get(drmach_boards, bnum, &id) == -1) {
960 			cmn_err(CE_WARN, "OBP node 0x%x has"
961 			    " invalid property value, %s=%d",
962 			    nodeid, OBP_BOARDNUM, bnum);
963 
964 			/* clean up */
965 			drmach_array_dispose(
966 			    drmach_boards, drmach_board_dispose);
967 
968 			mutex_exit(&drmach_i_lock);
969 			return (-1);
970 		} else if (id == NULL)
971 			(void) drmach_board_new(bnum);
972 	} while ((nodeid = prom_nextnode(nodeid)) != OBP_NONODE);
973 
974 	drmach_shutdown_va = vmem_alloc(heap_arena, PAGESIZE, VM_SLEEP);
975 
976 	/*
977 	 * Walk immediate children of devinfo root node and hold
978 	 * all devinfo branches of interest.
979 	 */
980 	hold = 1;
981 	rdip = ddi_root_node();
982 
983 	ndi_devi_enter(rdip, &circ);
984 	ddi_walk_devs(ddi_get_child(rdip), hold_rele_branch, &hold);
985 	ndi_devi_exit(rdip, circ);
986 
987 	drmach_initialized = 1;
988 
989 	mutex_exit(&drmach_i_lock);
990 
991 	return (0);
992 }
993 
994 static int
995 drmach_fini(void)
996 {
997 	dev_info_t	*rdip;
998 	int		hold, circ;
999 
1000 	if (drmach_initialized) {
1001 		int		busy = 0;
1002 		int		rv;
1003 		int		idx;
1004 		drmachid_t	id;
1005 
1006 		ASSERT(drmach_boards != NULL);
1007 
1008 		rv = drmach_array_first(drmach_boards, &idx, &id);
1009 		while (rv == 0) {
1010 			sbd_error_t	*err;
1011 			drmach_status_t stat;
1012 
1013 			err = drmach_board_status(id, &stat);
1014 			if (err) {
1015 				/* catch in debug kernels */
1016 				ASSERT(0);
1017 				sbd_err_clear(&err);
1018 				busy = 1;
1019 			} else
1020 				busy |= stat.busy;
1021 
1022 			rv = drmach_array_next(drmach_boards, &idx, &id);
1023 		}
1024 
1025 		if (busy)
1026 			return (-1);
1027 
1028 		drmach_array_dispose(drmach_boards, drmach_board_dispose);
1029 		drmach_boards = NULL;
1030 
1031 		vmem_free(heap_arena, drmach_shutdown_va, PAGESIZE);
1032 
1033 		/*
1034 		 * Walk immediate children of the root devinfo node
1035 		 * releasing holds acquired on branches in drmach_init()
1036 		 */
1037 		hold = 0;
1038 		rdip = ddi_root_node();
1039 
1040 		ndi_devi_enter(rdip, &circ);
1041 		ddi_walk_devs(ddi_get_child(rdip), hold_rele_branch, &hold);
1042 		ndi_devi_exit(rdip, circ);
1043 
1044 		mutex_destroy(&drmach_i_lock);
1045 
1046 		drmach_initialized = 0;
1047 	}
1048 	if (drmach_xt_mb != NULL) {
1049 		vmem_free(static_alloc_arena, (void *)drmach_xt_mb,
1050 		    NCPU * sizeof (uchar_t));
1051 	}
1052 	if (drmach_shutdown_asm_mbox != NULL) {
1053 		vmem_free(static_alloc_arena, (void *)drmach_shutdown_asm_mbox,
1054 		    sizeof (struct drmach_shutdown_mbox));
1055 	}
1056 	return (0);
1057 }
1058 
1059 static sbd_error_t *
1060 drmach_get_mc_asr_addr(drmachid_t id, uint64_t *pa)
1061 {
1062 	drmach_device_t	*dp;
1063 	pnode_t		nodeid;
1064 	uint64_t	addr;
1065 
1066 	if (!DRMACH_IS_MEM_ID(id))
1067 		return (drerr_new(0, ESTF_INAPPROP, NULL));
1068 	dp = id;
1069 
1070 	nodeid = drmach_node_get_dnode(dp->node);
1071 	if (nodeid == OBP_NONODE || nodeid == OBP_BADNODE)
1072 		return (DRMACH_INTERNAL_ERROR());
1073 
1074 	addr = mc_get_asr_addr(nodeid);
1075 	if (addr == (uint64_t)-1)
1076 		return (DRMACH_INTERNAL_ERROR());
1077 
1078 	*pa = addr;
1079 	return (NULL);
1080 }
1081 
1082 static sbd_error_t *
1083 drmach_get_mc_idle_addr(drmachid_t id, uint64_t *pa)
1084 {
1085 	drmach_device_t	*dp;
1086 	pnode_t		nodeid;
1087 	uint64_t	addr;
1088 
1089 	if (!DRMACH_IS_MEM_ID(id))
1090 		return (drerr_new(0, ESTF_INAPPROP, NULL));
1091 	dp = id;
1092 
1093 	nodeid = drmach_node_get_dnode(dp->node);
1094 	if (nodeid == OBP_NONODE || nodeid == OBP_BADNODE)
1095 		return (DRMACH_INTERNAL_ERROR());
1096 
1097 	addr = mc_get_idle_addr(nodeid);
1098 	if (addr == (uint64_t)-1)
1099 		return (DRMACH_INTERNAL_ERROR());
1100 
1101 	*pa = addr;
1102 	return (NULL);
1103 }
1104 
1105 static sbd_error_t *
1106 drmach_read_mc_asr(drmachid_t id, uint_t *mcregp)
1107 {
1108 	drmach_device_t	*dp;
1109 	pnode_t		 nodeid;
1110 	sbd_error_t	*err;
1111 
1112 	if (!DRMACH_IS_MEM_ID(id))
1113 		return (drerr_new(0, ESTF_INAPPROP, NULL));
1114 	dp = id;
1115 
1116 	nodeid = drmach_node_get_dnode(dp->node);
1117 	if (nodeid == OBP_NONODE || nodeid == OBP_BADNODE)
1118 		err = DRMACH_INTERNAL_ERROR();
1119 	else if (mc_read_asr(nodeid, mcregp) == -1)
1120 		err = DRMACH_INTERNAL_ERROR();
1121 	else
1122 		err = NULL;
1123 
1124 	return (err);
1125 }
1126 
1127 static sbd_error_t *
1128 drmach_write_mc_asr(drmachid_t id, uint_t mcreg)
1129 {
1130 	drmach_device_t	*dp;
1131 	pnode_t		 nodeid;
1132 	sbd_error_t	*err;
1133 
1134 	if (!DRMACH_IS_MEM_ID(id))
1135 		return (drerr_new(0, ESTF_INAPPROP, NULL));
1136 	dp = id;
1137 
1138 	nodeid = drmach_node_get_dnode(dp->node);
1139 	if (nodeid == OBP_NONODE || nodeid == OBP_BADNODE)
1140 		err = DRMACH_INTERNAL_ERROR();
1141 	else if (mc_write_asr(nodeid, mcreg) == -1)
1142 		err = DRMACH_INTERNAL_ERROR();
1143 	else
1144 		err = NULL;
1145 
1146 	return (err);
1147 }
1148 
1149 static sbd_error_t *
1150 drmach_prep_rename_script(drmach_device_t *s_mem, drmach_device_t *t_mem,
1151 	uint64_t t_slice_offset, caddr_t buf, int buflen)
1152 {
1153 	int			i, b, m;
1154 	drmach_mc_idle_script_t	*isp;
1155 	drmach_rename_script_t	*rsp;
1156 	int			s_bd, t_bd;
1157 	uint_t			s_masr, t_masr;
1158 	uint64_t		s_new_basepa, t_new_basepa;
1159 	int			b_idx, rv;
1160 	sbd_error_t		*err;
1161 	drmachid_t		 b_id;
1162 	drmach_board_t		*brd;
1163 
1164 #ifdef DEBUG
1165 	/*
1166 	 * Starfire CPU/MEM/IO boards have only one MC per board.
1167 	 * This function has been coded with that fact in mind.
1168 	 */
1169 	ASSERT(MAX_MEM_UNITS_PER_BOARD == 1);
1170 
1171 	/*
1172 	 * calculate the maximum space that could be consumed,
1173 	 * then verify the available buffer space is adequate.
1174 	 */
1175 	m  = sizeof (drmach_mc_idle_script_t *) * 2; /* two MCs */
1176 	b  = sizeof (drmach_rename_script_t *) * 3 * MAX_CPU_UNITS_PER_BOARD;
1177 	b += sizeof (drmach_rename_script_t *) * 3 * MAX_IO_UNITS_PER_BOARD;
1178 	b *= MAX_BOARDS;
1179 	b += sizeof (drmach_rename_script_t *) * 3;
1180 	b += sizeof (drmach_rename_script_t *) * 1;
1181 	ASSERT(m + b < buflen);
1182 #endif
1183 
1184 	/*
1185 	 * construct an array of MC idle register addresses of
1186 	 * both MCs.  The array is zero terminated -- as expected
1187 	 * by drmach_copy_rename_prog__relocatable().
1188 	 */
1189 	isp = (drmach_mc_idle_script_t *)buf;
1190 
1191 	/* source mc */
1192 	err = drmach_get_mc_idle_addr(s_mem, &isp->idle_addr);
1193 	if (err)
1194 		return (err);
1195 	isp->mem = s_mem;
1196 	isp += 1;
1197 
1198 	/* target mc */
1199 	err = drmach_get_mc_idle_addr(t_mem, &isp->idle_addr);
1200 	if (err)
1201 		return (err);
1202 	isp->mem = t_mem;
1203 	isp += 1;
1204 
1205 	/* terminator */
1206 	isp->idle_addr = 0;
1207 	isp->mem = NULL;
1208 	isp += 1;
1209 
1210 	/* fetch source mc asr register value */
1211 	err = drmach_read_mc_asr(s_mem, &s_masr);
1212 	if (err)
1213 		return (err);
1214 	else if (s_masr & STARFIRE_MC_INTERLEAVE_MASK) {
1215 		return (drerr_new(1, ESTF_INTERBOARD, "%s::%s",
1216 		    s_mem->bp->cm.name, s_mem->cm.name));
1217 	}
1218 
1219 	/* fetch target mc asr register value */
1220 	err = drmach_read_mc_asr(t_mem, &t_masr);
1221 	if (err)
1222 		return (err);
1223 	else if (t_masr & STARFIRE_MC_INTERLEAVE_MASK) {
1224 		return (drerr_new(1, ESTF_INTERBOARD, "%s::%s",
1225 		    t_mem->bp->cm.name, t_mem->cm.name));
1226 	}
1227 
1228 	/* get new source base pa from target's masr */
1229 	s_new_basepa = mc_asr_to_pa(t_masr);
1230 
1231 	/*
1232 	 * remove any existing slice offset to realign
1233 	 * memory with board's slice boundary
1234 	 */
1235 	s_new_basepa &= ~ (mc_get_mem_alignment() - 1);
1236 
1237 	/* get new target base pa from source's masr */
1238 	t_new_basepa  = mc_asr_to_pa(s_masr);
1239 
1240 	/* remove any existing slice offset, then apply new offset */
1241 	t_new_basepa &= ~ (mc_get_mem_alignment() - 1);
1242 	t_new_basepa += t_slice_offset;
1243 
1244 	/* encode new base pa into s_masr.  turn off mem present bit */
1245 	s_masr  = mc_pa_to_asr(s_masr, s_new_basepa);
1246 	s_masr &= ~STARFIRE_MC_MEM_PRESENT_MASK;
1247 
1248 	/* encode new base pa into t_masr.  turn on mem present bit */
1249 	t_masr  = mc_pa_to_asr(t_masr, t_new_basepa);
1250 	t_masr |= STARFIRE_MC_MEM_PRESENT_MASK;
1251 
1252 	/*
1253 	 * Step 0:	Mark source memory as not present.
1254 	 */
1255 	m = 0;
1256 	rsp = (drmach_rename_script_t *)isp;
1257 	err = drmach_get_mc_asr_addr(s_mem, &rsp[m].masr_addr);
1258 	if (err)
1259 		return (err);
1260 	rsp[m].masr = s_masr;
1261 	m++;
1262 
1263 	/*
1264 	 * Step 1:	Write source base address to target MC
1265 	 *		with present bit off.
1266 	 */
1267 	err = drmach_get_mc_asr_addr(t_mem, &rsp[m].masr_addr);
1268 	if (err)
1269 		return (err);
1270 	rsp[m].masr = t_masr & ~STARFIRE_MC_MEM_PRESENT_MASK;
1271 	m++;
1272 
1273 	/*
1274 	 * Step 2:	Now rewrite target reg with present bit on.
1275 	 */
1276 	rsp[m].masr_addr = rsp[m-1].masr_addr;
1277 	rsp[m].masr = t_masr;
1278 	m++;
1279 
1280 	s_bd = s_mem->bp->bnum;
1281 	t_bd = t_mem->bp->bnum;
1282 
1283 	DRMACH_PR("preparing script for CPU and IO units:\n");
1284 
1285 	rv = drmach_array_first(drmach_boards, &b_idx, &b_id);
1286 	if (rv) {
1287 		/* catch this in debug kernels */
1288 		ASSERT(0);
1289 		return (DRMACH_INTERNAL_ERROR());
1290 	}
1291 
1292 	do {
1293 		int			 d_idx;
1294 		drmachid_t		 d_id;
1295 		drmach_device_t		*device;
1296 
1297 		ASSERT(DRMACH_IS_BOARD_ID(b_id));
1298 		brd = b_id;
1299 		b = brd->bnum;
1300 
1301 		/*
1302 		 * Step 3:	Update PC MADR tables for CPUs.
1303 		 */
1304 		if (brd->devices == NULL) {
1305 			/* devices not initialized */
1306 			continue;
1307 		}
1308 
1309 		rv = drmach_array_first(brd->devices, &d_idx, &d_id);
1310 		if (rv) {
1311 			/* must mean no devices on this board */
1312 			break;
1313 		}
1314 
1315 		DRMACH_PR("\t%s\n", brd->cm.name);
1316 
1317 		do {
1318 			ASSERT(DRMACH_IS_DEVICE_ID(d_id));
1319 
1320 			if (!DRMACH_IS_CPU_ID(d_id))
1321 				continue;
1322 
1323 			device = d_id;
1324 			i = device->unum;
1325 
1326 			DRMACH_PR("\t\t%s\n", device->cm.name);
1327 
1328 			/*
1329 			 * Disabled detaching mem node.
1330 			 */
1331 			rsp[m].masr_addr = STARFIRE_PC_MADR_ADDR(b, s_bd, i);
1332 			rsp[m].masr = s_masr;
1333 			m++;
1334 			/*
1335 			 * Always write masr with present bit
1336 			 * off and then again with it on.
1337 			 */
1338 			rsp[m].masr_addr = STARFIRE_PC_MADR_ADDR(b, t_bd, i);
1339 			rsp[m].masr = t_masr & ~STARFIRE_MC_MEM_PRESENT_MASK;
1340 			m++;
1341 			rsp[m].masr_addr = rsp[m-1].masr_addr;
1342 			rsp[m].masr = t_masr;
1343 			m++;
1344 
1345 		} while (drmach_array_next(brd->devices, &d_idx, &d_id) == 0);
1346 
1347 		/*
1348 		 * Step 4:	Update PC MADR tables for IOs.
1349 		 */
1350 		rv = drmach_array_first(brd->devices, &d_idx, &d_id);
1351 		/* this worked for previous loop, must work here too */
1352 		ASSERT(rv == 0);
1353 
1354 		do {
1355 			ASSERT(DRMACH_IS_DEVICE_ID(d_id));
1356 
1357 			if (!DRMACH_IS_IO_ID(d_id))
1358 				continue;
1359 
1360 			device = d_id;
1361 			i = device->unum;
1362 
1363 			DRMACH_PR("\t\t%s\n", device->cm.name);
1364 
1365 			/*
1366 			 * Disabled detaching mem node.
1367 			 */
1368 			rsp[m].masr_addr = STARFIRE_PC_MADR_ADDR(b, s_bd, i+4);
1369 			rsp[m].masr = s_masr;
1370 			m++;
1371 			/*
1372 			 * Always write masr with present bit
1373 			 * off and then again with it on.
1374 			 */
1375 			rsp[m].masr_addr = STARFIRE_PC_MADR_ADDR(b, t_bd, i+4);
1376 			rsp[m].masr = t_masr & ~STARFIRE_MC_MEM_PRESENT_MASK;
1377 			m++;
1378 			rsp[m].masr_addr = rsp[m-1].masr_addr;
1379 			rsp[m].masr = t_masr;
1380 			m++;
1381 
1382 		} while (drmach_array_next(brd->devices, &d_idx, &d_id) == 0);
1383 	} while (drmach_array_next(drmach_boards, &b_idx, &b_id) == 0);
1384 
1385 	/*
1386 	 * Zero masr_addr value indicates the END.
1387 	 */
1388 	rsp[m].masr_addr = 0ull;
1389 	rsp[m].masr = 0;
1390 	DRMACH_PR("number of steps in rename script = %d\n", m);
1391 	m++;
1392 
1393 	/* paranoia */
1394 	ASSERT((caddr_t)&rsp[m] <= buf + buflen);
1395 
1396 #ifdef DEBUG
1397 	{
1398 		int	j;
1399 
1400 		DRMACH_PR("mc idle register address list:");
1401 		isp = (drmach_mc_idle_script_t *)buf;
1402 		DRMACH_PR("source mc idle addr 0x%lx, mem id %p",
1403 		    isp[0].idle_addr, (void *)isp[0].mem);
1404 		DRMACH_PR("target mc idle addr 0x%lx, mem id %p",
1405 		    isp[1].idle_addr, (void *)isp[1].mem);
1406 		ASSERT(isp[2].idle_addr == 0);
1407 
1408 		DRMACH_PR("copy-rename script:");
1409 		for (j = 0; j < m; j++) {
1410 			DRMACH_PR("0x%lx = 0x%08x",
1411 			    rsp[j].masr_addr, rsp[j].masr);
1412 		}
1413 
1414 		DELAY(1000000);
1415 	}
1416 #endif
1417 
1418 	/* return number of bytes consumed */
1419 	b = (caddr_t)&rsp[m] - buf;
1420 	DRMACH_PR("total number of bytes consumed is %d\n", b);
1421 	ASSERT(b <= buflen);
1422 
1423 #ifdef lint
1424 	buflen = buflen;
1425 #endif
1426 
1427 	return (NULL);
1428 }
1429 
1430 /*
1431  * The routine performs the necessary memory COPY and MC adr SWITCH.
1432  * Both operations MUST be at the same "level" so that the stack is
1433  * maintained correctly between the copy and switch.  The switch
1434  * portion implements a caching mechanism to guarantee the code text
1435  * is cached prior to execution.  This is to guard against possible
1436  * memory access while the MC adr's are being modified.
1437  *
1438  * IMPORTANT: The _drmach_copy_rename_end() function must immediately
1439  * follow drmach_copy_rename_prog__relocatable() so that the correct
1440  * "length" of the drmach_copy_rename_prog__relocatable can be
1441  * calculated.  This routine MUST be a LEAF function, i.e. it can
1442  * make NO function calls, primarily for two reasons:
1443  *
1444  *	1. We must keep the stack consistent across the "switch".
1445  *	2. Function calls are compiled to relative offsets, and
1446  *	   we execute this function we'll be executing it from
1447  *	   a copied version in a different area of memory, thus
1448  *	   the relative offsets will be bogus.
1449  *
1450  * Moreover, it must have the "__relocatable" suffix to inform DTrace
1451  * providers (and anything else, for that matter) that this
1452  * function's text is manually relocated elsewhere before it is
1453  * executed.  That is, it cannot be safely instrumented with any
1454  * methodology that is PC-relative.
1455  */
1456 static void
1457 drmach_copy_rename_prog__relocatable(drmach_copy_rename_program_t *prog)
1458 {
1459 	extern void drmach_exec_script_il(drmach_rename_script_t *rsp);
1460 
1461 	drmach_mc_idle_script_t		*isp;
1462 	struct memlist			*ml;
1463 	int				csize;
1464 	int				lnsize;
1465 	uint64_t			caddr;
1466 
1467 	isp = (drmach_mc_idle_script_t *)prog->data;
1468 
1469 	caddr = ecache_flushaddr;
1470 	csize = (cpunodes[CPU->cpu_id].ecache_size << 1);
1471 	lnsize = cpunodes[CPU->cpu_id].ecache_linesize;
1472 
1473 	/*
1474 	 * DO COPY.
1475 	 */
1476 	for (ml = prog->c_ml; ml; ml = ml->ml_next) {
1477 		uint64_t	s_pa, t_pa;
1478 		uint64_t	nbytes;
1479 
1480 		s_pa = prog->s_copybasepa + ml->ml_address;
1481 		t_pa = prog->t_copybasepa + ml->ml_address;
1482 		nbytes = ml->ml_size;
1483 
1484 		while (nbytes != 0ull) {
1485 			/*
1486 			 * This copy does NOT use an ASI
1487 			 * that avoids the Ecache, therefore
1488 			 * the dst_pa addresses may remain
1489 			 * in our Ecache after the dst_pa
1490 			 * has been removed from the system.
1491 			 * A subsequent write-back to memory
1492 			 * will cause an ARB-stop because the
1493 			 * physical address no longer exists
1494 			 * in the system. Therefore we must
1495 			 * flush out local Ecache after we
1496 			 * finish the copy.
1497 			 */
1498 
1499 			/* copy 32 bytes at src_pa to dst_pa */
1500 			bcopy32_il(s_pa, t_pa);
1501 
1502 			/* increment by 32 bytes */
1503 			s_pa += (4 * sizeof (uint64_t));
1504 			t_pa += (4 * sizeof (uint64_t));
1505 
1506 			/* decrement by 32 bytes */
1507 			nbytes -= (4 * sizeof (uint64_t));
1508 		}
1509 	}
1510 
1511 	/*
1512 	 * Since bcopy32_il() does NOT use an ASI to bypass
1513 	 * the Ecache, we need to flush our Ecache after
1514 	 * the copy is complete.
1515 	 */
1516 	flush_ecache_il(caddr, csize, lnsize);		/* inline version */
1517 
1518 	/*
1519 	 * Wait for MCs to go idle.
1520 	 */
1521 	do {
1522 		register int	t = 10;
1523 		register uint_t	v;
1524 
1525 		/* loop t cycles waiting for each mc to indicate it's idle */
1526 		do {
1527 			v = ldphysio_il(isp->idle_addr)
1528 			    & STARFIRE_MC_IDLE_MASK;
1529 
1530 		} while (v != STARFIRE_MC_IDLE_MASK && t-- > 0);
1531 
1532 		/* bailout if timedout */
1533 		if (t <= 0) {
1534 			prog->restless_mc = isp->mem;
1535 			return;
1536 		}
1537 
1538 		isp += 1;
1539 
1540 		/* stop if terminating zero has been reached */
1541 	} while (isp->idle_addr != 0);
1542 
1543 	/* advance passed terminating zero */
1544 	isp += 1;
1545 
1546 	/*
1547 	 * The following inline assembly routine caches
1548 	 * the rename script and then caches the code that
1549 	 * will do the rename.  This is necessary
1550 	 * so that we don't have any memory references during
1551 	 * the reprogramming.  We accomplish this by first
1552 	 * jumping through the code to guarantee it's cached
1553 	 * before we actually execute it.
1554 	 */
1555 	drmach_exec_script_il((drmach_rename_script_t *)isp);
1556 }
1557 
1558 static void
1559 drmach_copy_rename_end(void)
1560 {
1561 	/*
1562 	 * IMPORTANT:	This function's location MUST be located immediately
1563 	 *		following drmach_copy_rename_prog__relocatable to
1564 	 *		accurately estimate its size.  Note that this assumes
1565 	 *		the compiler keeps these functions in the order in
1566 	 *		which they appear :-o
1567 	 */
1568 }
1569 
1570 sbd_error_t *
1571 drmach_copy_rename_init(drmachid_t t_id, uint64_t t_slice_offset,
1572 	drmachid_t s_id, struct memlist *c_ml, drmachid_t *pgm_id)
1573 {
1574 	drmach_device_t	*s_mem;
1575 	drmach_device_t	*t_mem;
1576 	struct memlist	*x_ml;
1577 	uint64_t	off_mask, s_copybasepa, t_copybasepa, t_basepa;
1578 	int		len;
1579 	caddr_t		bp, wp;
1580 	pda_handle_t	ph;
1581 	sbd_error_t	*err;
1582 	drmach_copy_rename_program_t *prog;
1583 
1584 	if (!DRMACH_IS_MEM_ID(s_id))
1585 		return (drerr_new(0, ESTF_INAPPROP, NULL));
1586 	if (!DRMACH_IS_MEM_ID(t_id))
1587 		return (drerr_new(0, ESTF_INAPPROP, NULL));
1588 	s_mem = s_id;
1589 	t_mem = t_id;
1590 
1591 	/* get starting physical address of target memory */
1592 	err = drmach_mem_get_base_physaddr(t_id, &t_basepa);
1593 	if (err)
1594 		return (err);
1595 
1596 	/* calculate slice offset mask from slice size */
1597 	off_mask = mc_get_mem_alignment() - 1;
1598 
1599 	/* calculate source and target base pa */
1600 	s_copybasepa = c_ml->ml_address;
1601 	t_copybasepa =
1602 	    t_basepa + ((c_ml->ml_address & off_mask) - t_slice_offset);
1603 
1604 	/* paranoia */
1605 	ASSERT((c_ml->ml_address & off_mask) >= t_slice_offset);
1606 
1607 	/* adjust copy memlist addresses to be relative to copy base pa */
1608 	x_ml = c_ml;
1609 	while (x_ml != NULL) {
1610 		x_ml->ml_address -= s_copybasepa;
1611 		x_ml = x_ml->ml_next;
1612 	}
1613 
1614 #ifdef DEBUG
1615 	{
1616 	uint64_t s_basepa, s_size, t_size;
1617 
1618 	x_ml = c_ml;
1619 	while (x_ml->ml_next != NULL)
1620 		x_ml = x_ml->ml_next;
1621 
1622 	DRMACH_PR("source copy span: base pa 0x%lx, end pa 0x%lx\n",
1623 	    s_copybasepa,
1624 	    s_copybasepa + x_ml->ml_address + x_ml->ml_size);
1625 
1626 	DRMACH_PR("target copy span: base pa 0x%lx, end pa 0x%lx\n",
1627 	    t_copybasepa,
1628 	    t_copybasepa + x_ml->ml_address + x_ml->ml_size);
1629 
1630 	DRMACH_PR("copy memlist (relative to copy base pa):\n");
1631 	MEMLIST_DUMP(c_ml);
1632 
1633 	err = drmach_mem_get_base_physaddr(s_id, &s_basepa);
1634 	ASSERT(err == NULL);
1635 
1636 	err = drmach_mem_get_size(s_id, &s_size);
1637 	ASSERT(err == NULL);
1638 
1639 	err = drmach_mem_get_size(t_id, &t_size);
1640 	ASSERT(err == NULL);
1641 
1642 	DRMACH_PR("current source base pa 0x%lx, size 0x%lx\n",
1643 	    s_basepa, s_size);
1644 	DRMACH_PR("current target base pa 0x%lx, size 0x%lx\n",
1645 	    t_basepa, t_size);
1646 
1647 	ASSERT(s_copybasepa + x_ml->ml_address + x_ml->ml_size <=
1648 	    s_basepa + s_size);
1649 	ASSERT(t_copybasepa + x_ml->ml_address + x_ml->ml_size <=
1650 	    t_basepa + t_size);
1651 	}
1652 #endif
1653 
1654 	ph = drmach_pda_open();
1655 	if (ph == NULL)
1656 		return (DRMACH_INTERNAL_ERROR());
1657 
1658 	/*
1659 	 * bp will be page aligned, since we're calling
1660 	 * kmem_zalloc() with an exact multiple of PAGESIZE.
1661 	 */
1662 	wp = bp = kmem_zalloc(PAGESIZE, KM_SLEEP);
1663 
1664 	/* allocate space for copy rename struct */
1665 	len = sizeof (drmach_copy_rename_program_t);
1666 	DRMACH_PR("prog = 0x%p, header len %d\n", (void *)wp, len);
1667 	prog = (drmach_copy_rename_program_t *)wp;
1668 	wp += (len + ecache_alignsize - 1) & ~ (ecache_alignsize - 1);
1669 
1670 	/*
1671 	 * Copy the code for the copy-rename routine into
1672 	 * a page aligned piece of memory.  We do this to guarantee
1673 	 * that we're executing within the same page and thus reduce
1674 	 * the possibility of cache collisions between different
1675 	 * pages.
1676 	 */
1677 	len = (int)((ulong_t)drmach_copy_rename_end -
1678 	    (ulong_t)drmach_copy_rename_prog__relocatable);
1679 	ASSERT(wp + len < bp + PAGESIZE);
1680 	bcopy((caddr_t)drmach_copy_rename_prog__relocatable, wp, len);
1681 
1682 	DRMACH_PR("copy-rename function 0x%p, len %d\n", (void *)wp, len);
1683 	prog->run = (void (*)())wp;
1684 	wp += (len + ecache_alignsize - 1) & ~ (ecache_alignsize - 1);
1685 
1686 	/*
1687 	 * Prepare data page that will contain script of
1688 	 * operations to perform during copy-rename.
1689 	 * Allocate temporary buffer to hold script.
1690 	 */
1691 	err = drmach_prep_rename_script(s_mem, t_mem, t_slice_offset,
1692 	    wp, PAGESIZE - (wp - bp));
1693 	if (err) {
1694 		(void) drmach_copy_rename_fini(prog);
1695 		return (err);
1696 	}
1697 
1698 	DRMACH_PR("copy-rename script 0x%p, len %d\n", (void *)wp, len);
1699 	prog->data = wp;
1700 	wp += (len + ecache_alignsize - 1) & ~ (ecache_alignsize - 1);
1701 
1702 	prog->ph = ph;
1703 	prog->s_copybasepa = s_copybasepa;
1704 	prog->t_copybasepa = t_copybasepa;
1705 	prog->c_ml = c_ml;
1706 	*pgm_id = prog;
1707 
1708 	return (NULL);
1709 }
1710 
1711 sbd_error_t *
1712 drmach_copy_rename_fini(drmachid_t id)
1713 {
1714 	drmach_copy_rename_program_t	*prog = id;
1715 	sbd_error_t			*err = NULL;
1716 
1717 	if (prog->c_ml != NULL)
1718 		memlist_delete(prog->c_ml);
1719 
1720 	if (prog->ph != NULL)
1721 		pda_close(prog->ph);
1722 
1723 	if (prog->restless_mc != 0) {
1724 		cmn_err(CE_WARN, "MC did not idle; OBP Node 0x%x",
1725 		    (uint_t)drmach_node_get_dnode(prog->restless_mc->node));
1726 
1727 		err = DRMACH_INTERNAL_ERROR();
1728 	}
1729 
1730 	kmem_free(prog, PAGESIZE);
1731 
1732 	return (err);
1733 }
1734 
1735 static sbd_error_t *
1736 drmach_io_new(drmach_device_t *dp)
1737 {
1738 	sbd_error_t	*err;
1739 	int		 portid;
1740 
1741 	err = drmach_device_get_prop(dp, "upa-portid", &portid);
1742 	if (err == NULL) {
1743 		ASSERT(portid & 0x40);
1744 		dp->unum = portid & 1;
1745 	}
1746 
1747 	dp->cm.isa = (void *)drmach_io_new;
1748 	dp->cm.release = drmach_io_release;
1749 	dp->cm.status = drmach_io_status;
1750 
1751 	(void) snprintf(dp->cm.name, sizeof (dp->cm.name), "%s%d", dp->type,
1752 	    dp->unum);
1753 
1754 	return (err);
1755 }
1756 
1757 static void
1758 drmach_iopc_op(pda_handle_t ph, drmach_iopc_op_t op)
1759 {
1760 	register int b;
1761 
1762 	for (b = 0; b < MAX_BOARDS; b++) {
1763 		int		p;
1764 		ushort_t	bda_ioc;
1765 		board_desc_t	*bdesc;
1766 
1767 		if (pda_board_present(ph, b) == 0)
1768 			continue;
1769 
1770 		bdesc = (board_desc_t *)pda_get_board_info(ph, b);
1771 		/*
1772 		 * Update PCs for IOCs.
1773 		 */
1774 		bda_ioc = bdesc->bda_ioc;
1775 		for (p = 0; p < MAX_IOCS; p++) {
1776 			u_longlong_t	idle_addr;
1777 			uchar_t		value;
1778 
1779 			if (BDA_NBL(bda_ioc, p) != BDAN_GOOD)
1780 				continue;
1781 
1782 			idle_addr = STARFIRE_BB_PC_ADDR(b, p, 1);
1783 
1784 			switch (op) {
1785 			case DO_PAUSE:
1786 				value = STARFIRE_BB_PC_PAUSE(p);
1787 				break;
1788 
1789 			case DO_IDLE:
1790 				value = STARFIRE_BB_PC_IDLE(p);
1791 				break;
1792 
1793 			case DO_UNPAUSE:
1794 				value = ldbphysio(idle_addr);
1795 				value &= ~STARFIRE_BB_PC_PAUSE(p);
1796 				break;
1797 
1798 			case DO_UNIDLE:
1799 				value = ldbphysio(idle_addr);
1800 				value &= ~STARFIRE_BB_PC_IDLE(p);
1801 				break;
1802 
1803 			default:
1804 				cmn_err(CE_PANIC,
1805 				    "drmach_iopc_op: unknown op (%d)",
1806 				    (int)op);
1807 				/*NOTREACHED*/
1808 			}
1809 			stbphysio(idle_addr, value);
1810 		}
1811 	}
1812 }
1813 
1814 void
1815 drmach_copy_rename(drmachid_t id)
1816 {
1817 	drmach_copy_rename_program_t	*prog = id;
1818 	uint64_t			neer;
1819 
1820 	/*
1821 	 * UPA IDLE
1822 	 * Protocol = PAUSE -> IDLE -> UNPAUSE
1823 	 * In reality since we only "idle" the IOPCs it's sufficient
1824 	 * to just issue the IDLE operation since (in theory) all IOPCs
1825 	 * in the field are PC6.  However, we'll be robust and do the
1826 	 * proper workaround protocol so that we never have to worry!
1827 	 */
1828 	drmach_iopc_op(prog->ph, DO_PAUSE);
1829 	drmach_iopc_op(prog->ph, DO_IDLE);
1830 	DELAY(100);
1831 	drmach_iopc_op(prog->ph, DO_UNPAUSE);
1832 	DELAY(100);
1833 
1834 	/* disable CE reporting */
1835 	neer = get_error_enable();
1836 	set_error_enable(neer & ~EER_CEEN);
1837 
1838 	/* run the copy/rename program */
1839 	prog->run(prog);
1840 
1841 	/* enable CE reporting */
1842 	set_error_enable(neer);
1843 
1844 	/*
1845 	 * UPA UNIDLE
1846 	 * Protocol = UNIDLE
1847 	 */
1848 	drmach_iopc_op(prog->ph, DO_UNIDLE);
1849 	DELAY(100);
1850 }
1851 
1852 /*
1853  * The counter-timer and perf-counter nodes are not being cleaned
1854  * up after a board that was present at start of day is detached.
1855  * If the board has become unconfigured with this operation, walk
1856  * the prom tree and find all counter-timer and perf-counter nodes
1857  * that have the same board number as the board that was just
1858  * unconfigured and remove them.
1859  */
1860 static sbd_error_t *
1861 drmach_remove_counter_nodes(drmachid_t id)
1862 {
1863 	int		num;
1864 	char		name[OBP_MAXDRVNAME];
1865 	pnode_t		child;
1866 	dev_info_t	*dip;
1867 	sbd_error_t	*err;
1868 	drmach_status_t	stat;
1869 	drmach_board_t	*bp;
1870 
1871 	if (!DRMACH_IS_BOARD_ID(id)) {
1872 		return (drerr_new(0, ESTF_INAPPROP, NULL));
1873 	}
1874 
1875 	if ((err = drmach_board_status(id, &stat)) != NULL) {
1876 		return (err);
1877 	}
1878 
1879 	/*
1880 	 * Only clean up the counter-timer and perf-counter
1881 	 * nodes when the entire board is unconfigured.
1882 	 */
1883 	if (stat.configured) {
1884 		return (NULL);
1885 	}
1886 
1887 	bp = (drmach_board_t *)id;
1888 
1889 	err = NULL;
1890 
1891 	for (child = prom_childnode(prom_rootnode()); child != OBP_NONODE;
1892 	    child = prom_nextnode(child)) {
1893 
1894 		if (prom_getprop(child, OBP_BOARDNUM, (caddr_t)&num) == -1) {
1895 			continue;
1896 		}
1897 
1898 		if (bp->bnum != num) {
1899 			continue;
1900 		}
1901 
1902 		if (prom_getprop(child, OBP_NAME, (caddr_t)name) == -1) {
1903 			continue;
1904 		}
1905 
1906 		if (strncmp(name, MISC_COUNTER_TIMER_DEVNAME, OBP_MAXDRVNAME) &&
1907 		    strncmp(name, MISC_PERF_COUNTER_DEVNAME, OBP_MAXDRVNAME)) {
1908 				continue;
1909 		}
1910 
1911 		/* Root node doesn't have to be held */
1912 		dip = e_ddi_nodeid_to_dip(child);
1913 
1914 		/*
1915 		 * If the node is only in the OBP tree, then
1916 		 * we don't have to remove it.
1917 		 */
1918 		if (dip) {
1919 			dev_info_t *fdip = NULL;
1920 
1921 			DRMACH_PR("removing %s devinfo node\n", name);
1922 
1923 			e_ddi_branch_hold(dip);
1924 			ddi_release_devi(dip); /* held in e_ddi_nodeid_to_dip */
1925 
1926 			if (e_ddi_branch_destroy(dip, &fdip, 0)) {
1927 				char *path = kmem_alloc(MAXPATHLEN, KM_SLEEP);
1928 
1929 				/*
1930 				 * If non-NULL, fdip is held and must be
1931 				 * released.
1932 				 */
1933 				if (fdip != NULL) {
1934 					(void) ddi_pathname(fdip, path);
1935 					ddi_release_devi(fdip);
1936 				} else {
1937 					(void) ddi_pathname(dip, path);
1938 				}
1939 
1940 				err = drerr_new(1, ESTF_DRVFAIL, path);
1941 				kmem_free(path, MAXPATHLEN);
1942 				e_ddi_branch_rele(dip);
1943 				break;
1944 			}
1945 		}
1946 	}
1947 
1948 	return (err);
1949 }
1950 
1951 /*ARGSUSED*/
1952 sbd_error_t *
1953 drmach_pre_op(int cmd, drmachid_t id, drmach_opts_t *opts)
1954 {
1955 	/* allow status and ncm operations to always succeed */
1956 	if ((cmd == SBD_CMD_STATUS) || (cmd == SBD_CMD_GETNCM)) {
1957 		return (NULL);
1958 	}
1959 
1960 	/* check all other commands for the required option string */
1961 	if ((opts->size > 0) && (opts->copts != NULL)) {
1962 
1963 		DRMACH_PR("platform options: %s\n", opts->copts);
1964 
1965 		if (strstr(opts->copts, "xfdr") != NULL) {
1966 			return (NULL);
1967 		}
1968 	}
1969 
1970 	return (drerr_new(0, ESTF_SUPPORT, NULL));
1971 }
1972 
1973 /*ARGSUSED*/
1974 sbd_error_t *
1975 drmach_post_op(int cmd, drmachid_t id, drmach_opts_t *opts)
1976 {
1977 	sbd_error_t	*err = NULL;
1978 
1979 	switch (cmd) {
1980 	case SBD_CMD_UNCONFIGURE:
1981 
1982 		err = drmach_remove_counter_nodes(id);
1983 		break;
1984 
1985 	case SBD_CMD_CONFIGURE:
1986 	case SBD_CMD_DISCONNECT:
1987 	case SBD_CMD_CONNECT:
1988 	case SBD_CMD_GETNCM:
1989 	case SBD_CMD_STATUS:
1990 		break;
1991 
1992 	default:
1993 		break;
1994 	}
1995 
1996 	return (err);
1997 }
1998 
1999 sbd_error_t *
2000 drmach_board_assign(int bnum, drmachid_t *id)
2001 {
2002 	sbd_error_t	*err;
2003 
2004 	if (!drmach_initialized && drmach_init() == -1) {
2005 		err = DRMACH_INTERNAL_ERROR();
2006 	} else if (drmach_array_get(drmach_boards, bnum, id) == -1) {
2007 		err = drerr_new(1, ESTF_BNUM, "%d", bnum);
2008 	} else if (*id != NULL) {
2009 		err = NULL;
2010 	} else {
2011 		drmach_board_t	*bp;
2012 
2013 		*id  = (drmachid_t)drmach_board_new(bnum);
2014 		bp = *id;
2015 		bp->assigned = 1;
2016 		err = NULL;
2017 	}
2018 
2019 	return (err);
2020 }
2021 
2022 static int
2023 drmach_attach_board(void *arg)
2024 {
2025 	drmach_board_t	*obj = (drmach_board_t *)arg;
2026 	cpuset_t	cset;
2027 	int		retval;
2028 
2029 	/*
2030 	 * OBP disables traps during the board probe.
2031 	 * So, in order to prevent cross-call/cross-trap timeouts,
2032 	 * and thus panics, we effectively block anybody from
2033 	 * issuing xc's/xt's by doing a promsafe_xc_attention.
2034 	 * In the previous version of Starfire DR (2.6), a timeout
2035 	 * suspension mechanism was implemented in the send-mondo
2036 	 * assembly.  That mechanism is unnecessary with the
2037 	 * existence of xc_attention/xc_dismissed.
2038 	 */
2039 	cset = cpu_ready_set;
2040 	promsafe_xc_attention(cset);
2041 
2042 	retval = prom_starfire_add_brd(obj->connect_cpuid);
2043 
2044 	xc_dismissed(cset);
2045 
2046 	return (retval);
2047 }
2048 
2049 sbd_error_t *
2050 drmach_board_connect(drmachid_t id, drmach_opts_t *opts)
2051 {
2052 	drmach_board_t	*obj = (drmach_board_t *)id;
2053 	int		retval;
2054 	sbd_error_t	*err;
2055 	char		*cptr, *copts;
2056 
2057 	if (!DRMACH_IS_BOARD_ID(id))
2058 		return (drerr_new(0, ESTF_INAPPROP, NULL));
2059 
2060 	if (opts->size > 0)
2061 		copts = opts->copts;
2062 
2063 	if ((cptr = strstr(copts, "cpuid=")) != NULL) {
2064 		int cpuid;
2065 
2066 		cptr += strlen("cpuid=");
2067 		cpuid = stoi(&cptr);
2068 
2069 		if (DRMACH_CPUID2BNUM(cpuid) == obj->bnum) {
2070 			obj->connect_cpuid = cpuid;
2071 			obj->assigned = 1;
2072 		} else
2073 			return (drerr_new(1, ESTF_SETCPUVAL, "%d", cpuid));
2074 	} else {
2075 		/* cpuid was not specified */
2076 		obj->connect_cpuid = -1;
2077 	}
2078 
2079 	if (obj->connect_cpuid == -1) {
2080 		err =  drerr_new(1, ESTF_NOCPUID, obj->cm.name);
2081 		return (err);
2082 	}
2083 
2084 	cmn_err(CE_CONT, "DRMACH: PROM attach %s CPU %d\n",
2085 	    obj->cm.name, obj->connect_cpuid);
2086 
2087 	retval = prom_tree_update(drmach_attach_board, obj);
2088 
2089 	if (retval == 0)
2090 		err = NULL;
2091 	else {
2092 		cmn_err(CE_WARN, "prom error: prom_starfire_add_brd(%d) "
2093 		    "returned %d", obj->connect_cpuid, retval);
2094 
2095 		err = drerr_new(1, ESTF_PROBE, obj->cm.name);
2096 	}
2097 
2098 	obj->connect_cpuid = -1;
2099 
2100 	return (err);
2101 }
2102 
2103 /*ARGSUSED*/
2104 sbd_error_t *
2105 drmach_board_disconnect(drmachid_t id, drmach_opts_t *opts)
2106 {
2107 	drmach_board_t		*bp;
2108 	int			rv;
2109 	int			d_idx;	/* device index */
2110 	drmachid_t		d_id;	/* device ID */
2111 	sbd_error_t		*err;
2112 
2113 	if (!DRMACH_IS_BOARD_ID(id))
2114 		return (drerr_new(0, ESTF_INAPPROP, NULL));
2115 
2116 	bp = id;
2117 
2118 	/*
2119 	 * We need to make sure all of the board's device nodes
2120 	 * have been removed from the Solaris device tree before
2121 	 * continuing with the disconnect. Otherwise, we could
2122 	 * disconnect the board and remove the OBP device tree
2123 	 * nodes with Solaris device tree nodes remaining.
2124 	 *
2125 	 * On Starfire, Solaris device tree nodes are deleted
2126 	 * during unconfigure by drmach_unconfigure(). It's
2127 	 * necessary to do this here because drmach_unconfigure()
2128 	 * failures are not handled during unconfigure.
2129 	 */
2130 	if (bp->devices) {
2131 		rv = drmach_array_first(bp->devices, &d_idx, &d_id);
2132 		while (rv == 0) {
2133 			err = drmach_unconfigure(d_id, DRMACH_DEVI_REMOVE);
2134 			if (err)
2135 				return (err);
2136 
2137 			rv = drmach_array_next(bp->devices, &d_idx, &d_id);
2138 		}
2139 	}
2140 
2141 	/*
2142 	 * Starfire board Solaris device tree counter nodes,
2143 	 * which are only present on start-of-day boards, are
2144 	 * removed in the dr_post_op() code flow after the
2145 	 * board is unconfigured. We call the counter node
2146 	 * removal function here because unconfigure errors
2147 	 * can cause the dr_post_op() function to be skipped
2148 	 * after an unconfigure operation even though all of
2149 	 * the board's devices have been transitioned to the
2150 	 * unconfigured state.
2151 	 */
2152 	err = drmach_remove_counter_nodes(id);
2153 	if (err)
2154 		return (err);
2155 
2156 	return (NULL);
2157 }
2158 
2159 static int
2160 drmach_board_find_devices_cb(drmach_node_walk_args_t *args)
2161 {
2162 	drmach_node_t			*node = args->node;
2163 	drmach_board_cb_data_t		*data = args->data;
2164 	drmach_board_t			*obj = data->obj;
2165 
2166 	int		 rv;
2167 	int		 bnum;
2168 	drmach_device_t	*device;
2169 
2170 	rv = drmach_node_get_prop(node, OBP_BOARDNUM, &bnum);
2171 	if (rv) {
2172 		/*
2173 		 * if the node does not have a board# property, then
2174 		 * by that information alone it is known that drmach
2175 		 * is not interested in it.
2176 		 */
2177 		return (0);
2178 	} else if (bnum != obj->bnum)
2179 		return (0);
2180 
2181 	/*
2182 	 * Create a device data structure from this node data.
2183 	 * The call may yield nothing if the node is not of interest
2184 	 * to drmach.
2185 	 */
2186 	data->err = drmach_device_new(node, obj, &device);
2187 	if (data->err)
2188 		return (-1);
2189 	else if (device == NULL) {
2190 		/*
2191 		 * drmach_device_new examined the node we passed in
2192 		 * and determined that it was one not of interest to
2193 		 * drmach.  So, it is skipped.
2194 		 */
2195 		return (0);
2196 	}
2197 
2198 	rv = drmach_array_set(obj->devices, data->ndevs++, device);
2199 	if (rv) {
2200 		drmach_device_dispose(device);
2201 		data->err = DRMACH_INTERNAL_ERROR();
2202 		return (-1);
2203 	}
2204 
2205 	data->err = (*data->found)(data->a, device->type, device->unum, device);
2206 	return (data->err == NULL ? 0 : -1);
2207 }
2208 
2209 sbd_error_t *
2210 drmach_board_find_devices(drmachid_t id, void *a,
2211 	sbd_error_t *(*found)(void *a, const char *, int, drmachid_t))
2212 {
2213 	extern int		 plat_max_cpu_units_per_board();
2214 	extern int		 plat_max_mem_units_per_board();
2215 	extern int		 plat_max_io_units_per_board();
2216 
2217 	drmach_board_t		*obj = (drmach_board_t *)id;
2218 	sbd_error_t		*err;
2219 	int			 max_devices;
2220 	int			 rv;
2221 	drmach_board_cb_data_t	data;
2222 
2223 	max_devices  = plat_max_cpu_units_per_board();
2224 	max_devices += plat_max_mem_units_per_board();
2225 	max_devices += plat_max_io_units_per_board();
2226 
2227 	obj->devices = drmach_array_new(0, max_devices);
2228 
2229 	data.obj = obj;
2230 	data.ndevs = 0;
2231 	data.found = found;
2232 	data.a = a;
2233 	data.err = NULL;
2234 
2235 	rv = drmach_node_walk(obj->tree, &data, drmach_board_find_devices_cb);
2236 	if (rv == 0)
2237 		err = NULL;
2238 	else {
2239 		drmach_array_dispose(obj->devices, drmach_device_dispose);
2240 		obj->devices = NULL;
2241 
2242 		if (data.err)
2243 			err = data.err;
2244 		else
2245 			err = DRMACH_INTERNAL_ERROR();
2246 	}
2247 
2248 	return (err);
2249 }
2250 
2251 int
2252 drmach_board_lookup(int bnum, drmachid_t *id)
2253 {
2254 	int	rv = 0;
2255 
2256 	if (!drmach_initialized && drmach_init() == -1) {
2257 		*id = 0;
2258 		rv = -1;
2259 	} else if (drmach_array_get(drmach_boards, bnum, id)) {
2260 		*id = 0;
2261 		rv = -1;
2262 	}
2263 	return (rv);
2264 }
2265 
2266 sbd_error_t *
2267 drmach_board_name(int bnum, char *buf, int buflen)
2268 {
2269 	(void) snprintf(buf, buflen, "SB%d", bnum);
2270 	return (NULL);
2271 }
2272 
2273 sbd_error_t *
2274 drmach_board_poweroff(drmachid_t id)
2275 {
2276 	drmach_board_t	*bp;
2277 	sbd_error_t	*err;
2278 	drmach_status_t	 stat;
2279 
2280 	if (!DRMACH_IS_BOARD_ID(id))
2281 		return (drerr_new(0, ESTF_INAPPROP, NULL));
2282 	bp = id;
2283 
2284 	err = drmach_board_status(id, &stat);
2285 	if (err)
2286 		return (err);
2287 	else if (stat.configured || stat.busy)
2288 		return (drerr_new(0, ESTF_CONFIGBUSY, bp->cm.name));
2289 	else {
2290 		/* board power off is essentially a noop for Starfire */
2291 		bp->powered = 0;
2292 		return (NULL);
2293 	}
2294 	/*NOTREACHED*/
2295 }
2296 
2297 sbd_error_t *
2298 drmach_board_poweron(drmachid_t id)
2299 {
2300 	drmach_board_t	*bp;
2301 
2302 	if (!DRMACH_IS_BOARD_ID(id))
2303 		return (drerr_new(0, ESTF_INAPPROP, NULL));
2304 	bp = id;
2305 
2306 	/* board power on is essentially a noop for Starfire */
2307 	bp->powered = 1;
2308 
2309 	return (NULL);
2310 }
2311 
2312 static sbd_error_t *
2313 drmach_board_release(drmachid_t id)
2314 {
2315 	if (!DRMACH_IS_BOARD_ID(id))
2316 		return (drerr_new(0, ESTF_INAPPROP, NULL));
2317 	return (NULL);
2318 }
2319 
2320 /*ARGSUSED*/
2321 sbd_error_t *
2322 drmach_board_test(drmachid_t id, drmach_opts_t *opts, int force)
2323 {
2324 	return (NULL);
2325 }
2326 
2327 sbd_error_t *
2328 drmach_board_unassign(drmachid_t id)
2329 {
2330 	drmach_board_t	*bp;
2331 	sbd_error_t	*err;
2332 	drmach_status_t	 stat;
2333 
2334 	if (!DRMACH_IS_BOARD_ID(id))
2335 		return (drerr_new(0, ESTF_INAPPROP, NULL));
2336 	bp = id;
2337 
2338 	err = drmach_board_status(id, &stat);
2339 	if (err)
2340 		return (err);
2341 	else if (stat.configured || stat.busy)
2342 		return (drerr_new(0, ESTF_CONFIGBUSY, bp->cm.name));
2343 	else if (drmach_array_set(drmach_boards, bp->bnum, 0) != 0)
2344 		return (DRMACH_INTERNAL_ERROR());
2345 	else {
2346 		drmach_board_dispose(bp);
2347 		return (NULL);
2348 	}
2349 	/*NOTREACHED*/
2350 }
2351 
2352 static sbd_error_t *
2353 drmach_cpu_new(drmach_device_t *dp)
2354 {
2355 	sbd_error_t	*err;
2356 	int		 portid;
2357 
2358 	err = drmach_device_get_prop(dp, "upa-portid", &portid);
2359 	if (err == NULL)
2360 		dp->unum = portid & 3;
2361 
2362 	dp->cm.isa = (void *)drmach_cpu_new;
2363 	dp->cm.release = drmach_cpu_release;
2364 	dp->cm.status = drmach_cpu_status;
2365 
2366 	(void) snprintf(dp->cm.name, sizeof (dp->cm.name), "%s%d", dp->type,
2367 	    dp->unum);
2368 
2369 	return (err);
2370 }
2371 
2372 /*
2373  * drmach_cpu_obp_detach()
2374  *  This requires two steps, first, we must put the cpuid into the OBP
2375  *  idle loop (Idle in Program) state.  Then we call OBP to place the CPU
2376  *  into the "Detached" state, which does any special processing to
2377  *  actually detach the cpu, such as flushing ecache, and also ensures
2378  *  that a subsequent breakpoint won't restart the cpu (if it was just in
2379  *  Idle in Program state).
2380  */
2381 static void
2382 drmach_cpu_obp_detach(int cpuid)
2383 {
2384 	/*
2385 	 * Cpu may not be under OBP's control. Eg, if cpu exited to download
2386 	 * helper on a prior attach.
2387 	 */
2388 	if (CPU_SGN_EXISTS(cpuid) &&
2389 	    !SGN_CPU_IS_OS(cpuid) &&
2390 	    !SGN_CPU_IS_OBP(cpuid)) {
2391 		cmn_err(CE_WARN,
2392 		    "unexpected signature (0x%x) for cpu %d",
2393 		    get_cpu_sgn(cpuid), cpuid);
2394 	}
2395 
2396 	/*
2397 	 * Now we place the CPU into the "Detached" idle loop in OBP.
2398 	 * This is so that the CPU won't be restarted if we break into
2399 	 * OBP with a breakpoint or BREAK key from the console, and also
2400 	 * if we need to do any special processing, such as flushing the
2401 	 * cpu's ecache, disabling interrupts (by turning of the ET bit in
2402 	 * the PSR) and/or spinning in BBSRAM rather than global memory.
2403 	 */
2404 	DRMACH_PR("prom_starfire_rm_cpu(%d)\n", cpuid);
2405 	prom_starfire_rm_cpu(cpuid);
2406 }
2407 
2408 /*
2409  * drmach_cpu_obp_is_detached() returns TRUE if the cpu sigblock signature state
2410  * is SIGBST_DETACHED; otherwise it returns FALSE. This routine should only
2411  * be called after we have asked OBP to detach the CPU. It should NOT be
2412  * called as a check during any other flow.
2413  */
2414 static int
2415 drmach_cpu_obp_is_detached(int cpuid)
2416 {
2417 	if (!CPU_SGN_EXISTS(cpuid) ||
2418 	    (SGN_CPU_IS_OS(cpuid) && SGN_CPU_STATE_IS_DETACHED(cpuid)))
2419 		return (1);
2420 	else
2421 		return (0);
2422 }
2423 
2424 static int
2425 drmach_cpu_start(struct cpu *cp)
2426 {
2427 	int		cpuid = cp->cpu_id;
2428 	int		ntries = drmach_cpu_ntries;
2429 	extern void	restart_other_cpu(int);
2430 
2431 	ASSERT(MUTEX_HELD(&cpu_lock));
2432 	ASSERT(cpunodes[cpuid].nodeid != (pnode_t)0);
2433 
2434 	cp->cpu_flags &= ~CPU_POWEROFF;
2435 
2436 	/*
2437 	 * NOTE: restart_other_cpu pauses cpus during the
2438 	 *	 slave cpu start.  This helps to quiesce the
2439 	 *	 bus traffic a bit which makes the tick sync
2440 	 *	 routine in the prom more robust.
2441 	 */
2442 	DRMACH_PR("COLD START for cpu (%d)\n", cpuid);
2443 
2444 	prom_starfire_add_cpu(cpuid);
2445 
2446 	restart_other_cpu(cpuid);
2447 
2448 	/*
2449 	 * Wait for the cpu to reach its idle thread before
2450 	 * we zap him with a request to blow away the mappings
2451 	 * he (might) have for the drmach_shutdown_asm code
2452 	 * he may have executed on unconfigure.
2453 	 */
2454 	while ((cp->cpu_thread != cp->cpu_idle_thread) && (ntries > 0)) {
2455 		DELAY(drmach_cpu_delay);
2456 		ntries--;
2457 	}
2458 
2459 	DRMACH_PR("waited %d out of %d loops for cpu %d\n",
2460 	    drmach_cpu_ntries - ntries, drmach_cpu_ntries, cpuid);
2461 
2462 	xt_one(cpuid, vtag_flushpage_tl1,
2463 	    (uint64_t)drmach_shutdown_va, (uint64_t)ksfmmup);
2464 
2465 	return (0);
2466 }
2467 
2468 /*
2469  * A detaching CPU is xcalled with an xtrap to drmach_cpu_stop_self() after
2470  * it has been offlined. The function of this routine is to get the cpu
2471  * spinning in a safe place. The requirement is that the system will not
2472  * reference anything on the detaching board (memory and i/o is detached
2473  * elsewhere) and that the CPU not reference anything on any other board
2474  * in the system.  This isolation is required during and after the writes
2475  * to the domain masks to remove the board from the domain.
2476  *
2477  * To accomplish this isolation the following is done:
2478  *	1) Create a locked mapping to a location in BBSRAM where
2479  *	   the cpu will execute.
2480  *	2) Copy the target function (drmach_shutdown_asm) in which
2481  *	   the cpu will execute into BBSRAM.
2482  *	3) Jump into function with BBSRAM.
2483  *	   Function will:
2484  *	   3.1) Flush its Ecache (displacement).
2485  *	   3.2) Flush its Dcache with HW mechanism.
2486  *	   3.3) Flush its Icache with HW mechanism.
2487  *	   3.4) Flush all valid and _unlocked_ D-TLB entries.
2488  *	   3.5) Flush all valid and _unlocked_ I-TLB entries.
2489  *	   3.6) Clear xt_mb to signal completion. Note: cache line is
2490  *		recovered by drmach_cpu_poweroff().
2491  *	4) Jump into a tight loop.
2492  */
2493 #define	DRMACH_BBSRAM_OFFSET	0x1000
2494 
2495 static void
2496 drmach_cpu_stop_self(void)
2497 {
2498 	int		cpuid = (int)CPU->cpu_id;
2499 	tte_t		tte;
2500 	volatile uint_t	*src, *dst;
2501 	uint_t		funclen;
2502 	uint64_t	bbsram_pa, bbsram_offset;
2503 	uint_t		bbsram_pfn;
2504 	uint64_t	bbsram_addr;
2505 	void		(*bbsram_func)(uint64_t);
2506 	extern void	drmach_shutdown_asm(uint64_t);
2507 	extern void	drmach_shutdown_asm_end(void);
2508 
2509 	funclen = (uint_t)drmach_shutdown_asm_end - (uint_t)drmach_shutdown_asm;
2510 	ASSERT(funclen <= MMU_PAGESIZE);
2511 	/*
2512 	 * We'll start from the 0th's base.
2513 	 */
2514 	bbsram_pa = STARFIRE_UPAID2UPS(cpuid) | STARFIRE_PSI_BASE;
2515 	bbsram_offset = bbsram_pa | 0xfe0ULL;
2516 	bbsram_pa += ldphysio(bbsram_offset) + DRMACH_BBSRAM_OFFSET;
2517 
2518 	bbsram_pfn = (uint_t)(bbsram_pa >> MMU_PAGESHIFT);
2519 
2520 	bbsram_addr = (uint64_t)drmach_shutdown_va;
2521 	drmach_shutdown_asm_mbox->estack = bbsram_addr + (uint64_t)funclen;
2522 
2523 	tte.tte_inthi = TTE_VALID_INT | TTE_SZ_INT(TTE8K) |
2524 	    TTE_PFN_INTHI(bbsram_pfn);
2525 	tte.tte_intlo = TTE_PFN_INTLO(bbsram_pfn) |
2526 	    TTE_HWWR_INT | TTE_PRIV_INT | TTE_LCK_INT;
2527 	sfmmu_dtlb_ld_kva(drmach_shutdown_va, &tte);	/* load dtlb */
2528 	sfmmu_itlb_ld_kva(drmach_shutdown_va, &tte);	/* load itlb */
2529 
2530 	for (src = (uint_t *)drmach_shutdown_asm, dst = (uint_t *)bbsram_addr;
2531 	    src < (uint_t *)drmach_shutdown_asm_end; src++, dst++)
2532 		*dst = *src;
2533 
2534 	bbsram_func = (void (*)())bbsram_addr;
2535 	drmach_shutdown_asm_mbox->flushaddr = ecache_flushaddr;
2536 	drmach_shutdown_asm_mbox->size = (cpunodes[cpuid].ecache_size << 1);
2537 	drmach_shutdown_asm_mbox->linesize = cpunodes[cpuid].ecache_linesize;
2538 	drmach_shutdown_asm_mbox->physaddr =
2539 	    va_to_pa((void *)&drmach_xt_mb[cpuid]);
2540 
2541 	/*
2542 	 * Signal to drmach_cpu_poweroff() is via drmach_xt_mb cleared
2543 	 * by asm code
2544 	 */
2545 
2546 	(*bbsram_func)(va_to_pa((void *)drmach_shutdown_asm_mbox));
2547 }
2548 
2549 static void
2550 drmach_cpu_shutdown_self(void)
2551 {
2552 	cpu_t		*cp = CPU;
2553 	int		cpuid = cp->cpu_id;
2554 	extern void	flush_windows(void);
2555 
2556 	flush_windows();
2557 
2558 	(void) spl8();
2559 
2560 	ASSERT(cp->cpu_intr_actv == 0);
2561 	ASSERT(cp->cpu_thread == cp->cpu_idle_thread ||
2562 	    cp->cpu_thread == cp->cpu_startup_thread);
2563 
2564 	cp->cpu_flags = CPU_OFFLINE | CPU_QUIESCED | CPU_POWEROFF;
2565 
2566 	drmach_cpu_stop_self();
2567 
2568 	cmn_err(CE_PANIC, "CPU %d FAILED TO SHUTDOWN", cpuid);
2569 }
2570 
2571 /* a helper routine to keep the math in one place */
2572 static processorid_t
2573 drmach_cpu_calc_id(drmach_device_t *dp)
2574 {
2575 	return (dp->bp->bnum * MAX_CPU_UNITS_PER_BOARD + dp->unum);
2576 }
2577 
2578 /*
2579  * Move bootproc (SIGBCPU) to another cpu.  If dst_cpu is NULL, a
2580  * destination cpu is chosen from the set of cpus not located on the
2581  * same board as the current bootproc cpu.
2582  */
2583 static sbd_error_t *
2584 drmach_cpu_juggle_bootproc(drmach_device_t *dst_cpu)
2585 {
2586 	processorid_t	 cpuid;
2587 	struct cpu	*cp;
2588 	sbd_error_t	*err;
2589 	int		 rv;
2590 
2591 	ASSERT(MUTEX_HELD(&cpu_lock));
2592 
2593 	/* dst_cpu is NULL when target cpu is unspecified. So, pick one. */
2594 	if (dst_cpu == NULL) {
2595 		int avoid_board = DRMACH_CPUID2BNUM(SIGBCPU->cpu_id);
2596 		int max_cpuid = MAX_BOARDS * MAX_CPU_UNITS_PER_BOARD;
2597 
2598 		for (cpuid = 0; cpuid < max_cpuid; cpuid++)
2599 			if (DRMACH_CPUID2BNUM(cpuid) != avoid_board) {
2600 				cp = cpu_get(cpuid);
2601 				if (cp != NULL && cpu_is_online(cp))
2602 					break;
2603 			}
2604 
2605 		if (cpuid == max_cpuid) {
2606 			err = drerr_new(1, ESTF_JUGGLE, NULL);
2607 			return (err);
2608 		}
2609 
2610 		/* else, cp points to the selected target cpu */
2611 	} else {
2612 		cpuid = drmach_cpu_calc_id(dst_cpu);
2613 
2614 		if ((cp = cpu_get(cpuid)) == NULL) {
2615 			err = drerr_new(1, ESTF_NODEV, "%s::%s",
2616 			    dst_cpu->bp->cm.name, dst_cpu->cm.name);
2617 			return (err);
2618 		}
2619 
2620 		if (cpuid == SIGBCPU->cpu_id) {
2621 			cmn_err(CE_WARN,
2622 			    "SIGBCPU(%d) same as new selection(%d)",
2623 			    SIGBCPU->cpu_id, cpuid);
2624 
2625 			/* technically not an error, but a no-op */
2626 			return (NULL);
2627 		}
2628 	}
2629 
2630 	cmn_err(CE_NOTE, "?relocating SIGBCPU from %d to %d",
2631 	    SIGBCPU->cpu_id, cpuid);
2632 
2633 	DRMACH_PR("moving SIGBCPU to CPU %d\n", cpuid);
2634 
2635 	/*
2636 	 * Tell OBP to initialize cvc-offset field of new CPU0
2637 	 * so that it's in sync with OBP and cvc_server
2638 	 */
2639 	prom_starfire_init_console(cpuid);
2640 
2641 	/*
2642 	 * Assign cvc to new cpu0's bbsram for I/O.  This has to be
2643 	 * done BEFORE cpu0 is moved via obp, since this logic
2644 	 * will cause obp_helper to switch to a different bbsram for
2645 	 * cvc I/O.  We don't want cvc writing to a buffer from which
2646 	 * nobody will pick up the data!
2647 	 */
2648 	cvc_assign_iocpu(cpuid);
2649 
2650 	rv = prom_starfire_move_cpu0(cpuid);
2651 
2652 	if (rv == 0) {
2653 		SIGBCPU = cp;
2654 
2655 		DRMACH_PR("successfully juggled to CPU %d\n", cpuid);
2656 		return (NULL);
2657 	} else {
2658 		DRMACH_PR("prom error: prom_starfire_move_cpu0(%d) "
2659 		    "returned %d\n", cpuid, rv);
2660 
2661 		/*
2662 		 * The move failed, hopefully obp_helper is still back
2663 		 * at the old bootproc.  Move cvc back there.
2664 		 */
2665 		cvc_assign_iocpu(SIGBCPU->cpu_id);
2666 
2667 
2668 		err = drerr_new(1, ESTF_MOVESIGB, "CPU %d", cpuid);
2669 		return (err);
2670 	}
2671 	/*NOTREACHED*/
2672 }
2673 
2674 static sbd_error_t *
2675 drmach_cpu_release(drmachid_t id)
2676 {
2677 	drmach_device_t	*dp;
2678 	processorid_t	 cpuid;
2679 	struct cpu	*cp;
2680 	sbd_error_t	*err;
2681 
2682 	if (!DRMACH_IS_CPU_ID(id))
2683 		return (drerr_new(0, ESTF_INAPPROP, NULL));
2684 	dp = id;
2685 	cpuid = drmach_cpu_calc_id(dp);
2686 
2687 	ASSERT(MUTEX_HELD(&cpu_lock));
2688 
2689 	cp = cpu_get(cpuid);
2690 	if (cp == NULL)
2691 		err = DRMACH_INTERNAL_ERROR();
2692 	else if (SIGBCPU->cpu_id == cp->cpu_id)
2693 		err = drmach_cpu_juggle_bootproc(NULL);
2694 	else
2695 		err = NULL;
2696 
2697 	return (err);
2698 }
2699 
2700 static sbd_error_t *
2701 drmach_cpu_status(drmachid_t id, drmach_status_t *stat)
2702 {
2703 	drmach_device_t *dp;
2704 
2705 	ASSERT(DRMACH_IS_CPU_ID(id));
2706 	dp = id;
2707 
2708 	stat->assigned = dp->bp->assigned;
2709 	stat->powered = dp->bp->powered;
2710 	mutex_enter(&cpu_lock);
2711 	stat->configured = (cpu_get(drmach_cpu_calc_id(dp)) != NULL);
2712 	mutex_exit(&cpu_lock);
2713 	stat->busy = dp->busy;
2714 	(void) strncpy(stat->type, dp->type, sizeof (stat->type));
2715 	stat->info[0] = '\0';
2716 
2717 	return (NULL);
2718 }
2719 
2720 sbd_error_t *
2721 drmach_cpu_disconnect(drmachid_t id)
2722 {
2723 	drmach_device_t	*cpu;
2724 	int		 cpuid;
2725 	int		 ntries;
2726 	int		 p;
2727 	u_longlong_t	 pc_addr;
2728 	uchar_t		 rvalue;
2729 
2730 	if (!DRMACH_IS_CPU_ID(id))
2731 		return (drerr_new(0, ESTF_INAPPROP, NULL));
2732 	cpu = id;
2733 
2734 	cpuid = drmach_cpu_calc_id(cpu);
2735 	if (SIGBCPU->cpu_id == cpuid) {
2736 		/* this cpu is SIGBCPU, can't disconnect */
2737 		return (drerr_new(1, ESTF_HASSIGB, "%s::%s",
2738 		    cpu->bp->cm.name, cpu->cm.name));
2739 	}
2740 
2741 	/*
2742 	 * Make sure SIGBST_DETACHED is set before
2743 	 * mapping out the sig block.
2744 	 */
2745 	ntries = drmach_cpu_ntries;
2746 	while (!drmach_cpu_obp_is_detached(cpuid) && ntries) {
2747 		DELAY(drmach_cpu_delay);
2748 		ntries--;
2749 	}
2750 	if (!drmach_cpu_obp_is_detached(cpuid)) {
2751 		cmn_err(CE_WARN, "failed to mark cpu %d detached in sigblock",
2752 		    cpuid);
2753 	}
2754 
2755 	/* map out signature block */
2756 	if (CPU_SGN_EXISTS(cpuid)) {
2757 		CPU_SGN_MAPOUT(cpuid);
2758 	}
2759 
2760 	/*
2761 	 * We now PC IDLE the processor to guarantee we
2762 	 * stop any transactions from coming from it.
2763 	 */
2764 	p = cpu->unum & 1;
2765 	pc_addr = STARFIRE_BB_PC_ADDR(cpu->bp->bnum, cpu->unum, 0);
2766 
2767 	DRMACH_PR("PC idle cpu %d (addr = 0x%llx, port = %d, p = %d)",
2768 	    drmach_cpu_calc_id(cpu), pc_addr, cpu->unum, p);
2769 
2770 	rvalue = ldbphysio(pc_addr);
2771 	rvalue |= STARFIRE_BB_PC_IDLE(p);
2772 	stbphysio(pc_addr, rvalue);
2773 	DELAY(50000);
2774 
2775 	return (NULL);
2776 }
2777 
2778 sbd_error_t *
2779 drmach_cpu_get_id(drmachid_t id, processorid_t *cpuid)
2780 {
2781 	drmach_device_t *cpu;
2782 
2783 	if (!DRMACH_IS_CPU_ID(id))
2784 		return (drerr_new(0, ESTF_INAPPROP, NULL));
2785 	cpu = id;
2786 
2787 	*cpuid = drmach_cpu_calc_id(cpu);
2788 	return (NULL);
2789 }
2790 
2791 sbd_error_t *
2792 drmach_cpu_get_impl(drmachid_t id, int *ip)
2793 {
2794 	drmach_device_t *cpu;
2795 	int		impl;
2796 
2797 	if (!DRMACH_IS_CPU_ID(id))
2798 		return (drerr_new(0, ESTF_INAPPROP, NULL));
2799 
2800 	cpu = id;
2801 
2802 	if (drmach_node_get_prop(cpu->node, "implementation#", &impl) == -1) {
2803 		return (DRMACH_INTERNAL_ERROR());
2804 	}
2805 
2806 	*ip = impl;
2807 
2808 	return (NULL);
2809 }
2810 
2811 void
2812 drmach_cpu_flush_ecache_sync(void)
2813 {
2814 	ASSERT(curthread->t_bound_cpu == CPU);
2815 
2816 	/*
2817 	 * Now let's flush our ecache thereby removing all references
2818 	 * to the target (detaching) memory from all ecache's in
2819 	 * system.
2820 	 */
2821 	cpu_flush_ecache();
2822 
2823 	/*
2824 	 * Delay 100 usec out of paranoia to insure everything
2825 	 * (hardware queues) has drained before we start reprogramming
2826 	 * the hardware.
2827 	 */
2828 	DELAY(100);
2829 }
2830 
2831 sbd_error_t *
2832 drmach_get_dip(drmachid_t id, dev_info_t **dip)
2833 {
2834 	drmach_device_t	*dp;
2835 
2836 	if (!DRMACH_IS_DEVICE_ID(id))
2837 		return (drerr_new(0, ESTF_INAPPROP, NULL));
2838 	dp = id;
2839 
2840 	*dip = drmach_node_get_dip(dp->node);
2841 	return (NULL);
2842 }
2843 
2844 sbd_error_t *
2845 drmach_io_is_attached(drmachid_t id, int *yes)
2846 {
2847 	drmach_device_t *dp;
2848 	dev_info_t	*dip;
2849 	int		state;
2850 
2851 	if (!DRMACH_IS_IO_ID(id))
2852 		return (drerr_new(0, ESTF_INAPPROP, NULL));
2853 	dp = id;
2854 
2855 	dip = drmach_node_get_dip(dp->node);
2856 	if (dip == NULL) {
2857 		*yes = 0;
2858 		return (NULL);
2859 	}
2860 
2861 	state = ddi_get_devstate(dip);
2862 	*yes = (i_ddi_devi_attached(dip) || (state == DDI_DEVSTATE_UP));
2863 
2864 	return (NULL);
2865 }
2866 
2867 sbd_error_t *
2868 drmach_io_pre_release(drmachid_t id)
2869 {
2870 	if (!DRMACH_IS_IO_ID(id))
2871 		return (drerr_new(0, ESTF_INAPPROP, NULL));
2872 	return (NULL);
2873 }
2874 
2875 static sbd_error_t *
2876 drmach_io_release(drmachid_t id)
2877 {
2878 	if (!DRMACH_IS_IO_ID(id))
2879 		return (drerr_new(0, ESTF_INAPPROP, NULL));
2880 	return (NULL);
2881 }
2882 
2883 sbd_error_t *
2884 drmach_io_unrelease(drmachid_t id)
2885 {
2886 	if (!DRMACH_IS_IO_ID(id))
2887 		return (drerr_new(0, ESTF_INAPPROP, NULL));
2888 	return (NULL);
2889 }
2890 
2891 /*ARGSUSED*/
2892 sbd_error_t *
2893 drmach_io_post_release(drmachid_t id)
2894 {
2895 	return (NULL);
2896 }
2897 
2898 /*ARGSUSED*/
2899 sbd_error_t *
2900 drmach_io_post_attach(drmachid_t id)
2901 {
2902 	return (NULL);
2903 }
2904 
2905 static sbd_error_t *
2906 drmach_io_status(drmachid_t id, drmach_status_t *stat)
2907 {
2908 	drmach_device_t *dp;
2909 	sbd_error_t	*err;
2910 	int		 configured;
2911 
2912 	ASSERT(DRMACH_IS_IO_ID(id));
2913 	dp = id;
2914 
2915 	err = drmach_io_is_attached(id, &configured);
2916 	if (err)
2917 		return (err);
2918 
2919 	stat->assigned = dp->bp->assigned;
2920 	stat->powered = dp->bp->powered;
2921 	stat->configured = (configured != 0);
2922 	stat->busy = dp->busy;
2923 	(void) strncpy(stat->type, dp->type, sizeof (stat->type));
2924 	stat->info[0] = '\0';
2925 
2926 	return (NULL);
2927 }
2928 
2929 static sbd_error_t *
2930 drmach_mem_new(drmach_device_t *dp)
2931 {
2932 	dp->unum = 0;
2933 	dp->cm.isa = (void *)drmach_mem_new;
2934 	dp->cm.release = drmach_mem_release;
2935 	dp->cm.status = drmach_mem_status;
2936 
2937 	(void) snprintf(dp->cm.name, sizeof (dp->cm.name), "%s", dp->type);
2938 
2939 	return (NULL);
2940 }
2941 
2942 sbd_error_t *
2943 drmach_mem_add_span(drmachid_t id, uint64_t basepa, uint64_t size)
2944 {
2945 	pfn_t		basepfn = (pfn_t)(basepa >> PAGESHIFT);
2946 	pgcnt_t		npages = (pgcnt_t)(size >> PAGESHIFT);
2947 	pda_handle_t	ph;
2948 	int		rv;
2949 
2950 	ASSERT(size != 0);
2951 
2952 	if (!DRMACH_IS_MEM_ID(id))
2953 		return (drerr_new(0, ESTF_INAPPROP, NULL));
2954 
2955 	rv = kcage_range_add(basepfn, npages, KCAGE_DOWN);
2956 	if (rv == ENOMEM) {
2957 		cmn_err(CE_WARN, "%lu megabytes not available to kernel cage",
2958 		    (ulong_t)(size == 0 ? 0 : size / MBYTE));
2959 	} else if (rv != 0) {
2960 		/* catch this in debug kernels */
2961 		ASSERT(0);
2962 
2963 		cmn_err(CE_WARN, "unexpected kcage_range_add"
2964 		    " return value %d", rv);
2965 	}
2966 
2967 	/*
2968 	 * Update the PDA (post2obp) structure with the
2969 	 * range of the newly added memory.
2970 	 */
2971 	ph = drmach_pda_open();
2972 	if (ph != NULL) {
2973 		pda_mem_add_span(ph, basepa, size);
2974 		pda_close(ph);
2975 	}
2976 
2977 	return (NULL);
2978 }
2979 
2980 sbd_error_t *
2981 drmach_mem_del_span(drmachid_t id, uint64_t basepa, uint64_t size)
2982 {
2983 	drmach_device_t	*mem = id;
2984 	pfn_t		basepfn = (pfn_t)(basepa >> PAGESHIFT);
2985 	pgcnt_t		npages = (pgcnt_t)(size >> PAGESHIFT);
2986 	uint_t		mcreg;
2987 	sbd_error_t	*err;
2988 	pda_handle_t	ph;
2989 	int		rv;
2990 
2991 	err = drmach_read_mc_asr(id, &mcreg);
2992 	if (err)
2993 		return (err);
2994 	else if (mcreg & STARFIRE_MC_INTERLEAVE_MASK) {
2995 		return (drerr_new(1, ESTF_INTERBOARD, "%s::%s",
2996 		    mem->bp->cm.name, mem->cm.name));
2997 	}
2998 
2999 	if (size > 0) {
3000 		rv = kcage_range_delete_post_mem_del(basepfn, npages);
3001 		if (rv != 0) {
3002 			cmn_err(CE_WARN,
3003 			    "unexpected kcage_range_delete_post_mem_del"
3004 			    " return value %d", rv);
3005 			return (DRMACH_INTERNAL_ERROR());
3006 		}
3007 	}
3008 
3009 	/*
3010 	 * Update the PDA (post2obp) structure with the
3011 	 * range of removed memory.
3012 	 */
3013 	ph = drmach_pda_open();
3014 	if (ph != NULL) {
3015 		if (size > 0)
3016 			pda_mem_del_span(ph, basepa, size);
3017 
3018 		/* update PDA to board's new mc register settings */
3019 		pda_mem_sync(ph, mem->bp->bnum, 0);
3020 
3021 		pda_close(ph);
3022 	}
3023 
3024 	return (NULL);
3025 }
3026 
3027 /* support routine for enable and disable */
3028 static sbd_error_t *
3029 drmach_mem_update_interconnect(drmachid_t id, uint_t mcreg)
3030 {
3031 	drmach_device_t	*dp;
3032 	pda_handle_t	 ph;
3033 	int		 b;
3034 
3035 	if (!DRMACH_IS_MEM_ID(id))
3036 		return (drerr_new(0, ESTF_INAPPROP, NULL));
3037 	dp = id;
3038 
3039 	ph = drmach_pda_open();
3040 	if (ph == NULL)
3041 		return (DRMACH_INTERNAL_ERROR());
3042 
3043 	for (b = 0; b < MAX_BOARDS; b++) {
3044 		int		p;
3045 		int		rv;
3046 		ushort_t	bda_proc, bda_ioc;
3047 		board_desc_t	*bdesc;
3048 
3049 		if (pda_board_present(ph, b) == 0)
3050 			continue;
3051 
3052 		bdesc = (board_desc_t *)pda_get_board_info(ph, b);
3053 
3054 		/*
3055 		 * Update PCs for CPUs.
3056 		 */
3057 
3058 		/* make sure definition in platmod is in sync with pda */
3059 		ASSERT(MAX_PROCMODS == MAX_CPU_UNITS_PER_BOARD);
3060 
3061 		bda_proc = bdesc->bda_proc;
3062 		for (p = 0; p < MAX_PROCMODS; p++) {
3063 			if (BDA_NBL(bda_proc, p) != BDAN_GOOD)
3064 				continue;
3065 
3066 			rv = pc_madr_add(b, dp->bp->bnum, p, mcreg);
3067 			if (rv) {
3068 				pda_close(ph);
3069 				return (DRMACH_INTERNAL_ERROR());
3070 			}
3071 		}
3072 
3073 		/*
3074 		 * Update PCs for IOCs.
3075 		 */
3076 
3077 		/* make sure definition in platmod is in sync with pda */
3078 		ASSERT(MAX_IOCS == MAX_IO_UNITS_PER_BOARD);
3079 
3080 		bda_ioc = bdesc->bda_ioc;
3081 		for (p = 0; p < MAX_IOCS; p++) {
3082 			if (BDA_NBL(bda_ioc, p) != BDAN_GOOD)
3083 				continue;
3084 
3085 			rv = pc_madr_add(b, dp->bp->bnum, p + 4, mcreg);
3086 			if (rv) {
3087 				pda_close(ph);
3088 				return (DRMACH_INTERNAL_ERROR());
3089 			}
3090 		}
3091 	}
3092 
3093 	pda_close(ph);
3094 	return (NULL);
3095 }
3096 
3097 sbd_error_t *
3098 drmach_mem_disable(drmachid_t id)
3099 {
3100 	sbd_error_t	*err;
3101 	uint_t		 mcreg;
3102 
3103 	err = drmach_read_mc_asr(id, &mcreg);
3104 	if (err == NULL) {
3105 		ASSERT(mcreg & STARFIRE_MC_MEM_PRESENT_MASK);
3106 
3107 		/* Turn off presence bit. */
3108 		mcreg &= ~STARFIRE_MC_MEM_PRESENT_MASK;
3109 
3110 		err = drmach_mem_update_interconnect(id, mcreg);
3111 		if (err == NULL)
3112 			err = drmach_write_mc_asr(id, mcreg);
3113 	}
3114 
3115 	return (err);
3116 }
3117 
3118 sbd_error_t *
3119 drmach_mem_enable(drmachid_t id)
3120 {
3121 	sbd_error_t	*err;
3122 	uint_t		 mcreg;
3123 
3124 	err = drmach_read_mc_asr(id, &mcreg);
3125 	if (err == NULL) {
3126 		mcreg |= STARFIRE_MC_MEM_PRESENT_MASK;
3127 
3128 		err = drmach_write_mc_asr(id, mcreg);
3129 		if (err == NULL)
3130 			err = drmach_mem_update_interconnect(id, mcreg);
3131 	}
3132 
3133 	return (err);
3134 }
3135 
3136 sbd_error_t *
3137 drmach_mem_get_alignment(drmachid_t id, uint64_t *mask)
3138 {
3139 	drmach_device_t	*mem;
3140 	sbd_error_t	*err;
3141 	pnode_t		 nodeid;
3142 
3143 	if (!DRMACH_IS_MEM_ID(id))
3144 		return (drerr_new(0, ESTF_INAPPROP, NULL));
3145 	mem = id;
3146 
3147 	nodeid = drmach_node_get_dnode(mem->node);
3148 	if (nodeid == OBP_NONODE || nodeid == OBP_BADNODE)
3149 		err = DRMACH_INTERNAL_ERROR();
3150 	else {
3151 		uint64_t size;
3152 
3153 		size = mc_get_alignment_mask(nodeid);
3154 		if (size == (uint64_t)-1)
3155 			err = DRMACH_INTERNAL_ERROR();
3156 		else {
3157 			*mask = size - 1;
3158 			err = NULL;
3159 		}
3160 	}
3161 
3162 	return (err);
3163 }
3164 
3165 sbd_error_t *
3166 drmach_mem_get_base_physaddr(drmachid_t id, uint64_t *pa)
3167 {
3168 	sbd_error_t	*err;
3169 	uint_t		 mcreg;
3170 
3171 	err = drmach_read_mc_asr(id, &mcreg);
3172 	if (err == NULL)
3173 		*pa = mc_asr_to_pa(mcreg);
3174 
3175 	return (err);
3176 }
3177 
3178 /*
3179  * Use of this routine after copy/rename will yield incorrect results,
3180  * because the OBP MEMAVAIL property will not correctly reflect the
3181  * programming of the MCs.
3182  */
3183 sbd_error_t *
3184 drmach_mem_get_memlist(drmachid_t id, struct memlist **ml)
3185 {
3186 	drmach_device_t	*mem;
3187 	int		rv, i, rlen, rblks;
3188 	sbd_error_t	*err;
3189 	struct memlist	*mlist;
3190 	struct sf_memunit_regspec *rlist;
3191 
3192 	if (!DRMACH_IS_MEM_ID(id))
3193 		return (drerr_new(0, ESTF_INAPPROP, NULL));
3194 	mem = id;
3195 
3196 	err = drmach_device_get_proplen(mem, "dr-available", &rlen);
3197 	if (err)
3198 		return (err);
3199 
3200 	rlist = kmem_zalloc(rlen, KM_SLEEP);
3201 
3202 	err = drmach_device_get_prop(mem, "dr-available", rlist);
3203 	if (err) {
3204 		kmem_free(rlist, rlen);
3205 		return (err);
3206 	}
3207 
3208 	mlist = NULL;
3209 	rblks = rlen / sizeof (struct sf_memunit_regspec);
3210 	for (i = 0; i < rblks; i++) {
3211 		uint64_t	addr, size;
3212 
3213 		addr  = (uint64_t)rlist[i].regspec_addr_hi << 32;
3214 		addr |= (uint64_t)rlist[i].regspec_addr_lo;
3215 		size  = (uint64_t)rlist[i].regspec_size_hi << 32;
3216 		size |= (uint64_t)rlist[i].regspec_size_lo;
3217 
3218 		mlist = memlist_add_span(mlist, addr, size);
3219 	}
3220 
3221 	kmem_free(rlist, rlen);
3222 
3223 	/*
3224 	 * Make sure the incoming memlist doesn't already
3225 	 * intersect with what's present in the system (phys_install).
3226 	 */
3227 	memlist_read_lock();
3228 	rv = memlist_intersect(phys_install, mlist);
3229 	memlist_read_unlock();
3230 	if (rv) {
3231 #ifdef DEBUG
3232 		DRMACH_PR("OBP derived memlist intersects"
3233 		    " with phys_install\n");
3234 		memlist_dump(mlist);
3235 
3236 		DRMACH_PR("phys_install memlist:\n");
3237 		memlist_dump(phys_install);
3238 #endif
3239 
3240 		memlist_delete(mlist);
3241 		return (DRMACH_INTERNAL_ERROR());
3242 	}
3243 
3244 #ifdef DEBUG
3245 	DRMACH_PR("OBP derived memlist:");
3246 	memlist_dump(mlist);
3247 #endif
3248 
3249 	*ml = mlist;
3250 	return (NULL);
3251 }
3252 
3253 sbd_error_t *
3254 drmach_mem_get_size(drmachid_t id, uint64_t *bytes)
3255 {
3256 	drmach_device_t	*mem;
3257 	pda_handle_t	ph;
3258 	pgcnt_t		npages;
3259 
3260 	if (!DRMACH_IS_MEM_ID(id))
3261 		return (drerr_new(0, ESTF_INAPPROP, NULL));
3262 	mem = id;
3263 
3264 	ph = drmach_pda_open();
3265 	if (ph == NULL)
3266 		return (DRMACH_INTERNAL_ERROR());
3267 
3268 	npages = pda_get_mem_size(ph, mem->bp->bnum);
3269 	*bytes = (uint64_t)npages << PAGESHIFT;
3270 
3271 	pda_close(ph);
3272 	return (NULL);
3273 }
3274 
3275 sbd_error_t *
3276 drmach_mem_get_slice_size(drmachid_t id, uint64_t *bytes)
3277 {
3278 	if (!DRMACH_IS_MEM_ID(id))
3279 		return (drerr_new(0, ESTF_INAPPROP, NULL));
3280 
3281 	*bytes = mc_get_mem_alignment();
3282 	return (NULL);
3283 }
3284 
3285 /* field debugging tool */
3286 processorid_t drmach_mem_cpu_affinity_nail = 0;
3287 
3288 processorid_t
3289 drmach_mem_cpu_affinity(drmachid_t id)
3290 {
3291 	drmach_device_t	*mp;
3292 	drmach_board_t	*bp;
3293 	processorid_t	 cpuid;
3294 
3295 	if (!DRMACH_IS_MEM_ID(id))
3296 		return (CPU_CURRENT);
3297 
3298 	if (drmach_mem_cpu_affinity_nail) {
3299 		cpuid = drmach_mem_cpu_affinity_nail;
3300 
3301 		if (cpuid < 0 || cpuid > NCPU)
3302 			return (CPU_CURRENT);
3303 
3304 		mutex_enter(&cpu_lock);
3305 		if (cpu[cpuid] == NULL || !CPU_ACTIVE(cpu[cpuid]))
3306 			cpuid = CPU_CURRENT;
3307 		mutex_exit(&cpu_lock);
3308 
3309 		return (cpuid);
3310 	}
3311 
3312 	/* try to choose a proc on the target board */
3313 	mp = id;
3314 	bp = mp->bp;
3315 	if (bp->devices) {
3316 		int		rv;
3317 		int		d_idx;
3318 		drmachid_t	d_id;
3319 
3320 		rv = drmach_array_first(bp->devices, &d_idx, &d_id);
3321 		while (rv == 0) {
3322 			if (DRMACH_IS_CPU_ID(d_id)) {
3323 				cpuid = drmach_cpu_calc_id(d_id);
3324 
3325 				mutex_enter(&cpu_lock);
3326 				if (cpu[cpuid] && CPU_ACTIVE(cpu[cpuid])) {
3327 					mutex_exit(&cpu_lock);
3328 					DRMACH_PR("drmach_mem_cpu_affinity: "
3329 					    "selected cpuid=%d\n", cpuid);
3330 					return (cpuid);
3331 				} else {
3332 					mutex_exit(&cpu_lock);
3333 				}
3334 			}
3335 
3336 			rv = drmach_array_next(bp->devices, &d_idx, &d_id);
3337 		}
3338 	}
3339 
3340 	/* otherwise, this proc, wherever it is */
3341 	DRMACH_PR("drmach_mem_cpu_affinity: using default CPU_CURRENT\n");
3342 
3343 	return (CPU_CURRENT);
3344 }
3345 
3346 static sbd_error_t *
3347 drmach_mem_release(drmachid_t id)
3348 {
3349 	if (!DRMACH_IS_MEM_ID(id))
3350 		return (drerr_new(0, ESTF_INAPPROP, NULL));
3351 	return (NULL);
3352 }
3353 
3354 static sbd_error_t *
3355 drmach_mem_status(drmachid_t id, drmach_status_t *stat)
3356 {
3357 	drmach_device_t *dp;
3358 	sbd_error_t	*err;
3359 	uint64_t	 pa, slice_size;
3360 	struct memlist	*ml;
3361 
3362 	ASSERT(DRMACH_IS_MEM_ID(id));
3363 	dp = id;
3364 
3365 	/* get starting physical address of target memory */
3366 	err = drmach_mem_get_base_physaddr(id, &pa);
3367 	if (err)
3368 		return (err);
3369 
3370 	/* round down to slice boundary */
3371 	slice_size = mc_get_mem_alignment();
3372 	pa &= ~ (slice_size - 1);
3373 
3374 	/* stop at first span that is in slice */
3375 	memlist_read_lock();
3376 	for (ml = phys_install; ml; ml = ml->ml_next)
3377 		if (ml->ml_address >= pa && ml->ml_address < pa + slice_size)
3378 			break;
3379 	memlist_read_unlock();
3380 
3381 	stat->assigned = dp->bp->assigned;
3382 	stat->powered = dp->bp->powered;
3383 	stat->configured = (ml != NULL);
3384 	stat->busy = dp->busy;
3385 	(void) strncpy(stat->type, dp->type, sizeof (stat->type));
3386 	stat->info[0] = '\0';
3387 
3388 	return (NULL);
3389 }
3390 
3391 static int
3392 drmach_detach_board(void *arg)
3393 {
3394 	cpuset_t	cset;
3395 	int		retval;
3396 	drmach_board_t	*bp = (drmach_board_t *)arg;
3397 
3398 	cset = cpu_ready_set;
3399 	promsafe_xc_attention(cset);
3400 
3401 	retval = prom_starfire_rm_brd(bp->bnum);
3402 
3403 	xc_dismissed(cset);
3404 
3405 	return (retval);
3406 }
3407 
3408 sbd_error_t *
3409 drmach_board_deprobe(drmachid_t id)
3410 {
3411 	drmach_board_t	*bp;
3412 	int		 retval;
3413 
3414 	if (!DRMACH_IS_BOARD_ID(id))
3415 		return (drerr_new(0, ESTF_INAPPROP, NULL));
3416 	bp = id;
3417 
3418 	cmn_err(CE_CONT, "DR: PROM detach board %d\n", bp->bnum);
3419 
3420 	retval = prom_tree_update(drmach_detach_board, bp);
3421 
3422 	if (retval == 0)
3423 		return (NULL);
3424 	else {
3425 		cmn_err(CE_WARN, "prom error: prom_starfire_rm_brd(%d) "
3426 		    "returned %d", bp->bnum, retval);
3427 		return (drerr_new(1, ESTF_DEPROBE, "%s", bp->cm.name));
3428 	}
3429 }
3430 
3431 /*ARGSUSED*/
3432 static sbd_error_t *
3433 drmach_pt_juggle_bootproc(drmachid_t id, drmach_opts_t *opts)
3434 {
3435 	drmach_device_t	*cpu;
3436 	sbd_error_t	*err;
3437 
3438 	if (!DRMACH_IS_CPU_ID(id))
3439 		return (drerr_new(0, ESTF_INAPPROP, NULL));
3440 	cpu = id;
3441 
3442 	mutex_enter(&cpu_lock);
3443 
3444 	err = drmach_cpu_juggle_bootproc(cpu);
3445 
3446 	mutex_exit(&cpu_lock);
3447 
3448 	return (err);
3449 }
3450 
3451 /*ARGSUSED*/
3452 static sbd_error_t *
3453 drmach_pt_dump_pdainfo(drmachid_t id, drmach_opts_t *opts)
3454 {
3455 	drmach_board_t	*bp;
3456 	int		board;
3457 	int		i;
3458 	pda_handle_t	ph;
3459 	board_desc_t	*bdesc;
3460 
3461 	if (!DRMACH_IS_BOARD_ID(id))
3462 		return (drerr_new(0, ESTF_INAPPROP, NULL));
3463 	bp = id;
3464 	board = bp->bnum;
3465 
3466 	ph = drmach_pda_open();
3467 	if (ph == NULL)
3468 		return (DRMACH_INTERNAL_ERROR());
3469 
3470 	if (pda_board_present(ph, board) == 0) {
3471 		cmn_err(CE_CONT, "board %d is MISSING\n", board);
3472 		pda_close(ph);
3473 		return (DRMACH_INTERNAL_ERROR());
3474 	}
3475 
3476 	cmn_err(CE_CONT, "board %d is PRESENT\n", board);
3477 
3478 	bdesc = (board_desc_t *)pda_get_board_info(ph, board);
3479 	if (bdesc == NULL) {
3480 		cmn_err(CE_CONT,
3481 		    "no board descriptor found for board %d\n",
3482 		    board);
3483 		pda_close(ph);
3484 		return (DRMACH_INTERNAL_ERROR());
3485 	}
3486 
3487 	/* make sure definition in platmod is in sync with pda */
3488 	ASSERT(MAX_PROCMODS == MAX_CPU_UNITS_PER_BOARD);
3489 
3490 	for (i = 0; i < MAX_PROCMODS; i++) {
3491 		if (BDA_NBL(bdesc->bda_proc, i) == BDAN_GOOD)
3492 			cmn_err(CE_CONT,
3493 			    "proc %d.%d PRESENT\n", board, i);
3494 		else
3495 			cmn_err(CE_CONT,
3496 			    "proc %d.%d MISSING\n", board, i);
3497 	}
3498 
3499 	for (i = 0; i < MAX_MGROUPS; i++) {
3500 		if (BDA_NBL(bdesc->bda_mgroup, i) == BDAN_GOOD)
3501 			cmn_err(CE_CONT,
3502 			    "mgroup %d.%d PRESENT\n", board, i);
3503 		else
3504 			cmn_err(CE_CONT,
3505 			    "mgroup %d.%d MISSING\n", board, i);
3506 	}
3507 
3508 	/* make sure definition in platmod is in sync with pda */
3509 	ASSERT(MAX_IOCS == MAX_IO_UNITS_PER_BOARD);
3510 
3511 	for (i = 0; i < MAX_IOCS; i++) {
3512 		int	s;
3513 
3514 		if (BDA_NBL(bdesc->bda_ioc, i) == BDAN_GOOD) {
3515 			cmn_err(CE_CONT,
3516 			    "ioc %d.%d PRESENT\n", board, i);
3517 			for (s = 0; s < MAX_SLOTS_PER_IOC; s++) {
3518 				if (BDA_NBL(bdesc->bda_ios[i], s) != BDAN_GOOD)
3519 					continue;
3520 				cmn_err(CE_CONT,
3521 				    "..scard %d.%d.%d PRESENT\n",
3522 				    board, i, s);
3523 			}
3524 		} else {
3525 			cmn_err(CE_CONT,
3526 			    "ioc %d.%d MISSING\n",
3527 			    board, i);
3528 		}
3529 	}
3530 
3531 	cmn_err(CE_CONT,
3532 	    "board %d memsize = %d pages\n",
3533 	    board, pda_get_mem_size(ph, board));
3534 
3535 	pda_close(ph);
3536 
3537 	return (NULL);
3538 }
3539 
3540 /*ARGSUSED*/
3541 sbd_error_t *
3542 drmach_pt_readmem(drmachid_t id, drmach_opts_t *opts)
3543 {
3544 	struct memlist	*ml;
3545 	uint64_t	src_pa;
3546 	uint64_t	dst_pa;
3547 	uint64_t	dst;
3548 
3549 	dst_pa = va_to_pa(&dst);
3550 
3551 	memlist_read_lock();
3552 	for (ml = phys_install; ml; ml = ml->ml_next) {
3553 		uint64_t	nbytes;
3554 
3555 		src_pa = ml->ml_address;
3556 		nbytes = ml->ml_size;
3557 
3558 		while (nbytes != 0ull) {
3559 
3560 			/* copy 32 bytes at arc_pa to dst_pa */
3561 			bcopy32_il(src_pa, dst_pa);
3562 
3563 			/* increment by 32 bytes */
3564 			src_pa += (4 * sizeof (uint64_t));
3565 
3566 			/* decrement by 32 bytes */
3567 			nbytes -= (4 * sizeof (uint64_t));
3568 		}
3569 	}
3570 	memlist_read_unlock();
3571 
3572 	return (NULL);
3573 }
3574 
3575 static struct {
3576 	const char	*name;
3577 	sbd_error_t	*(*handler)(drmachid_t id, drmach_opts_t *opts);
3578 } drmach_pt_arr[] = {
3579 	{ "juggle",		drmach_pt_juggle_bootproc	},
3580 	{ "pda",		drmach_pt_dump_pdainfo		},
3581 	{ "readmem",		drmach_pt_readmem		},
3582 
3583 	/* the following line must always be last */
3584 	{ NULL,			NULL				}
3585 };
3586 
3587 /*ARGSUSED*/
3588 sbd_error_t *
3589 drmach_passthru(drmachid_t id, drmach_opts_t *opts)
3590 {
3591 	int		i;
3592 	sbd_error_t	*err;
3593 
3594 	i = 0;
3595 	while (drmach_pt_arr[i].name != NULL) {
3596 		int len = strlen(drmach_pt_arr[i].name);
3597 
3598 		if (strncmp(drmach_pt_arr[i].name, opts->copts, len) == 0)
3599 			break;
3600 
3601 		i += 1;
3602 	}
3603 
3604 	if (drmach_pt_arr[i].name == NULL)
3605 		err = drerr_new(0, ESTF_UNKPTCMD, opts->copts);
3606 	else
3607 		err = (*drmach_pt_arr[i].handler)(id, opts);
3608 
3609 	return (err);
3610 }
3611 
3612 sbd_error_t *
3613 drmach_release(drmachid_t id)
3614 {
3615 	drmach_common_t *cp;
3616 	if (!DRMACH_IS_DEVICE_ID(id))
3617 		return (drerr_new(0, ESTF_INAPPROP, NULL));
3618 	cp = id;
3619 
3620 	return (cp->release(id));
3621 }
3622 
3623 sbd_error_t *
3624 drmach_status(drmachid_t id, drmach_status_t *stat)
3625 {
3626 	drmach_common_t *cp;
3627 
3628 	if (!DRMACH_IS_ID(id))
3629 		return (drerr_new(0, ESTF_NOTID, NULL));
3630 	cp = id;
3631 
3632 	return (cp->status(id, stat));
3633 }
3634 
3635 sbd_error_t *
3636 drmach_unconfigure(drmachid_t id, int flags)
3637 {
3638 	drmach_device_t	*dp;
3639 	pnode_t		 nodeid;
3640 	dev_info_t	*dip, *fdip = NULL;
3641 
3642 	if (!DRMACH_IS_DEVICE_ID(id))
3643 		return (drerr_new(0, ESTF_INAPPROP, NULL));
3644 
3645 	dp = id;
3646 
3647 	nodeid = drmach_node_get_dnode(dp->node);
3648 	if (nodeid == OBP_NONODE)
3649 		return (DRMACH_INTERNAL_ERROR());
3650 
3651 	dip = e_ddi_nodeid_to_dip(nodeid);
3652 	if (dip == NULL)
3653 		return (NULL);
3654 
3655 	/*
3656 	 * Branch already held, so hold acquired in
3657 	 * e_ddi_nodeid_to_dip() can be released
3658 	 */
3659 	ddi_release_devi(dip);
3660 
3661 	if (flags & DEVI_BRANCH_DESTROY)
3662 		flags |= DEVI_BRANCH_EVENT;
3663 
3664 	/*
3665 	 * Force flag is no longer necessary. See starcat/io/drmach.c
3666 	 * for details.
3667 	 */
3668 	ASSERT(e_ddi_branch_held(dip));
3669 	if (e_ddi_branch_unconfigure(dip, &fdip, flags)) {
3670 		sbd_error_t	*err;
3671 		char		*path = kmem_alloc(MAXPATHLEN, KM_SLEEP);
3672 
3673 		/*
3674 		 * If non-NULL, fdip is returned held and must be released.
3675 		 */
3676 		if (fdip != NULL) {
3677 			(void) ddi_pathname(fdip, path);
3678 			ndi_rele_devi(fdip);
3679 		} else {
3680 			(void) ddi_pathname(dip, path);
3681 		}
3682 
3683 		err = drerr_new(1, ESTF_DRVFAIL, path);
3684 
3685 		kmem_free(path, MAXPATHLEN);
3686 
3687 		return (err);
3688 	}
3689 
3690 	return (NULL);
3691 }
3692 
3693 /*
3694  * drmach interfaces to legacy Starfire platmod logic
3695  * linkage via runtime symbol look up, called from plat_cpu_power*
3696  */
3697 
3698 /*
3699  * Start up a cpu.  It is possible that we're attempting to restart
3700  * the cpu after an UNCONFIGURE in which case the cpu will be
3701  * spinning in its cache.  So, all we have to do is wakeup him up.
3702  * Under normal circumstances the cpu will be coming from a previous
3703  * CONNECT and thus will be spinning in OBP.  In both cases, the
3704  * startup sequence is the same.
3705  */
3706 int
3707 drmach_cpu_poweron(struct cpu *cp)
3708 {
3709 	DRMACH_PR("drmach_cpu_poweron: starting cpuid %d\n", cp->cpu_id);
3710 
3711 	ASSERT(MUTEX_HELD(&cpu_lock));
3712 
3713 	if (drmach_cpu_start(cp) != 0)
3714 		return (EBUSY);
3715 	else
3716 		return (0);
3717 }
3718 
3719 int
3720 drmach_cpu_poweroff(struct cpu *cp)
3721 {
3722 	int		ntries, cnt;
3723 	processorid_t	cpuid = cp->cpu_id;
3724 	void		drmach_cpu_shutdown_self(void);
3725 
3726 	DRMACH_PR("drmach_cpu_poweroff: stopping cpuid %d\n", cp->cpu_id);
3727 
3728 	ASSERT(MUTEX_HELD(&cpu_lock));
3729 
3730 	/*
3731 	 * Capture all CPUs (except for detaching proc) to prevent
3732 	 * crosscalls to the detaching proc until it has cleared its
3733 	 * bit in cpu_ready_set.
3734 	 *
3735 	 * The CPU's remain paused and the prom_mutex is known to be free.
3736 	 * This prevents the x-trap victim from blocking when doing prom
3737 	 * IEEE-1275 calls at a high PIL level.
3738 	 */
3739 	promsafe_pause_cpus();
3740 
3741 	/*
3742 	 * Quiesce interrupts on the target CPU. We do this by setting
3743 	 * the CPU 'not ready'- (i.e. removing the CPU from cpu_ready_set) to
3744 	 * prevent it from receiving cross calls and cross traps.
3745 	 * This prevents the processor from receiving any new soft interrupts.
3746 	 */
3747 	mp_cpu_quiesce(cp);
3748 
3749 	/* setup xt_mb, will be cleared by drmach_shutdown_asm when ready */
3750 	drmach_xt_mb[cpuid] = 0x80;
3751 
3752 	xt_one_unchecked(cpuid, (xcfunc_t *)idle_stop_xcall,
3753 	    (uint64_t)drmach_cpu_shutdown_self, NULL);
3754 
3755 	ntries = drmach_cpu_ntries;
3756 	cnt = 0;
3757 	while (drmach_xt_mb[cpuid] && ntries) {
3758 		DELAY(drmach_cpu_delay);
3759 		ntries--;
3760 		cnt++;
3761 	}
3762 
3763 	drmach_xt_mb[cpuid] = 0;	/* steal the cache line back */
3764 
3765 	start_cpus();
3766 
3767 	DRMACH_PR("waited %d out of %d tries for "
3768 	    "drmach_cpu_shutdown_self on cpu%d",
3769 	    drmach_cpu_ntries - ntries, drmach_cpu_ntries, cp->cpu_id);
3770 
3771 	drmach_cpu_obp_detach(cpuid);
3772 
3773 	CPU_SIGNATURE(OS_SIG, SIGST_DETACHED, SIGSUBST_NULL, cpuid);
3774 
3775 	return (0);
3776 }
3777 
3778 /*ARGSUSED*/
3779 int
3780 drmach_verify_sr(dev_info_t *dip, int sflag)
3781 {
3782 	return (0);
3783 }
3784 
3785 void
3786 drmach_suspend_last(void)
3787 {
3788 }
3789 
3790 void
3791 drmach_resume_first(void)
3792 {
3793 }
3794 
3795 /*
3796  * Log a DR sysevent.
3797  * Return value: 0 success, non-zero failure.
3798  */
3799 int
3800 drmach_log_sysevent(int board, char *hint, int flag, int verbose)
3801 {
3802 	sysevent_t			*ev;
3803 	sysevent_id_t			eid;
3804 	int				rv, km_flag;
3805 	sysevent_value_t		evnt_val;
3806 	sysevent_attr_list_t		*evnt_attr_list = NULL;
3807 	char				attach_pnt[MAXNAMELEN];
3808 
3809 	km_flag = (flag == SE_SLEEP) ? KM_SLEEP : KM_NOSLEEP;
3810 	attach_pnt[0] = '\0';
3811 	if (drmach_board_name(board, attach_pnt, MAXNAMELEN)) {
3812 		rv = -1;
3813 		goto logexit;
3814 	}
3815 	if (verbose)
3816 		DRMACH_PR("drmach_log_sysevent: %s %s, flag: %d, verbose: %d\n",
3817 		    attach_pnt, hint, flag, verbose);
3818 
3819 	if ((ev = sysevent_alloc(EC_DR, ESC_DR_AP_STATE_CHANGE,
3820 	    SUNW_KERN_PUB"dr", km_flag)) == NULL) {
3821 		rv = -2;
3822 		goto logexit;
3823 	}
3824 	evnt_val.value_type = SE_DATA_TYPE_STRING;
3825 	evnt_val.value.sv_string = attach_pnt;
3826 	if ((rv = sysevent_add_attr(&evnt_attr_list, DR_AP_ID,
3827 	    &evnt_val, km_flag)) != 0)
3828 		goto logexit;
3829 
3830 	evnt_val.value_type = SE_DATA_TYPE_STRING;
3831 	evnt_val.value.sv_string = hint;
3832 	if ((rv = sysevent_add_attr(&evnt_attr_list, DR_HINT,
3833 	    &evnt_val, km_flag)) != 0) {
3834 		sysevent_free_attr(evnt_attr_list);
3835 		goto logexit;
3836 	}
3837 
3838 	(void) sysevent_attach_attributes(ev, evnt_attr_list);
3839 
3840 	/*
3841 	 * Log the event but do not sleep waiting for its
3842 	 * delivery. This provides insulation from syseventd.
3843 	 */
3844 	rv = log_sysevent(ev, SE_NOSLEEP, &eid);
3845 
3846 logexit:
3847 	if (ev)
3848 		sysevent_free(ev);
3849 	if ((rv != 0) && verbose)
3850 		cmn_err(CE_WARN,
3851 		    "drmach_log_sysevent failed (rv %d) for %s  %s\n",
3852 		    rv, attach_pnt, hint);
3853 
3854 	return (rv);
3855 }
3856 
3857 /*ARGSUSED*/
3858 int
3859 drmach_allow_memrange_modify(drmachid_t id)
3860 {
3861 	return (1);	/* TRUE */
3862 }
3863