xref: /titanic_50/usr/src/uts/sun4u/opl/io/drmach.c (revision a0563a48b6bba0177dc249048ea515ca080c73af)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2010 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #include <sys/debug.h>
28 #include <sys/types.h>
29 #include <sys/varargs.h>
30 #include <sys/errno.h>
31 #include <sys/cred.h>
32 #include <sys/dditypes.h>
33 #include <sys/devops.h>
34 #include <sys/modctl.h>
35 #include <sys/poll.h>
36 #include <sys/conf.h>
37 #include <sys/ddi.h>
38 #include <sys/sunddi.h>
39 #include <sys/sunndi.h>
40 #include <sys/ndi_impldefs.h>
41 #include <sys/stat.h>
42 #include <sys/kmem.h>
43 #include <sys/vmem.h>
44 #include <sys/opl_olympus_regs.h>
45 #include <sys/cpuvar.h>
46 #include <sys/cpupart.h>
47 #include <sys/mem_config.h>
48 #include <sys/ddi_impldefs.h>
49 #include <sys/systm.h>
50 #include <sys/machsystm.h>
51 #include <sys/autoconf.h>
52 #include <sys/cmn_err.h>
53 #include <sys/sysmacros.h>
54 #include <sys/x_call.h>
55 #include <sys/promif.h>
56 #include <sys/prom_plat.h>
57 #include <sys/membar.h>
58 #include <vm/seg_kmem.h>
59 #include <sys/mem_cage.h>
60 #include <sys/stack.h>
61 #include <sys/archsystm.h>
62 #include <vm/hat_sfmmu.h>
63 #include <sys/pte.h>
64 #include <sys/mmu.h>
65 #include <sys/cpu_module.h>
66 #include <sys/obpdefs.h>
67 #include <sys/note.h>
68 #include <sys/ontrap.h>
69 #include <sys/cpu_sgnblk_defs.h>
70 #include <sys/opl.h>
71 #include <sys/cpu_impl.h>
72 
73 
74 #include <sys/promimpl.h>
75 #include <sys/prom_plat.h>
76 #include <sys/kobj.h>
77 
78 #include <sys/sysevent.h>
79 #include <sys/sysevent/dr.h>
80 #include <sys/sysevent/eventdefs.h>
81 
82 #include <sys/drmach.h>
83 #include <sys/dr_util.h>
84 
85 #include <sys/fcode.h>
86 #include <sys/opl_cfg.h>
87 
88 extern void		bcopy32_il(uint64_t, uint64_t);
89 extern void		flush_cache_il(void);
90 extern void		drmach_sleep_il(void);
91 
92 typedef struct {
93 	struct drmach_node	*node;
94 	void			*data;
95 } drmach_node_walk_args_t;
96 
97 typedef struct drmach_node {
98 	void		*here;
99 
100 	pnode_t		(*get_dnode)(struct drmach_node *node);
101 	int		(*walk)(struct drmach_node *node, void *data,
102 				int (*cb)(drmach_node_walk_args_t *args));
103 	dev_info_t	*(*n_getdip)(struct drmach_node *node);
104 	int		(*n_getproplen)(struct drmach_node *node, char *name,
105 				int *len);
106 	int		(*n_getprop)(struct drmach_node *node, char *name,
107 				void *buf, int len);
108 	int		(*get_parent)(struct drmach_node *node,
109 				struct drmach_node *pnode);
110 } drmach_node_t;
111 
112 typedef struct {
113 	int		 min_index;
114 	int		 max_index;
115 	int		 arr_sz;
116 	drmachid_t	*arr;
117 } drmach_array_t;
118 
119 typedef struct {
120 	void		*isa;
121 
122 	void		(*dispose)(drmachid_t);
123 	sbd_error_t	*(*release)(drmachid_t);
124 	sbd_error_t	*(*status)(drmachid_t, drmach_status_t *);
125 
126 	char		 name[MAXNAMELEN];
127 } drmach_common_t;
128 
129 typedef	struct {
130 	uint32_t	core_present;
131 	uint32_t	core_hotadded;
132 	uint32_t	core_started;
133 } drmach_cmp_t;
134 
135 typedef struct {
136 	drmach_common_t	 cm;
137 	int		 bnum;
138 	int		 assigned;
139 	int		 powered;
140 	int		 connected;
141 	int		 cond;
142 	drmach_node_t	*tree;
143 	drmach_array_t	*devices;
144 	int		boot_board;	/* if board exists on bootup */
145 	drmach_cmp_t	cores[OPL_MAX_COREID_PER_BOARD];
146 } drmach_board_t;
147 
148 typedef struct {
149 	drmach_common_t	 cm;
150 	drmach_board_t	*bp;
151 	int		 unum;
152 	int		portid;
153 	int		 busy;
154 	int		 powered;
155 	const char	*type;
156 	drmach_node_t	*node;
157 } drmach_device_t;
158 
159 typedef struct drmach_cpu {
160 	drmach_device_t  dev;
161 	processorid_t    cpuid;
162 	int		sb;
163 	int		chipid;
164 	int		coreid;
165 	int		strandid;
166 	int		status;
167 #define	OPL_CPU_HOTADDED	1
168 } drmach_cpu_t;
169 
170 typedef struct drmach_mem {
171 	drmach_device_t  dev;
172 	uint64_t	slice_base;
173 	uint64_t	slice_size;
174 	uint64_t	base_pa;	/* lowest installed memory base */
175 	uint64_t	nbytes;		/* size of installed memory */
176 	struct memlist *memlist;
177 } drmach_mem_t;
178 
179 typedef struct drmach_io {
180 	drmach_device_t  dev;
181 	int	channel;
182 	int	leaf;
183 } drmach_io_t;
184 
185 typedef struct drmach_domain_info {
186 	uint32_t	floating;
187 	int		allow_dr;
188 } drmach_domain_info_t;
189 
190 drmach_domain_info_t drmach_domain;
191 
192 typedef struct {
193 	int		 flags;
194 	drmach_device_t	*dp;
195 	sbd_error_t	*err;
196 	dev_info_t	*dip;
197 } drmach_config_args_t;
198 
199 typedef struct {
200 	drmach_board_t	*obj;
201 	int		 ndevs;
202 	void		*a;
203 	sbd_error_t	*(*found)(void *a, const char *, int, drmachid_t);
204 	sbd_error_t	*err;
205 } drmach_board_cb_data_t;
206 
207 static drmach_array_t	*drmach_boards;
208 
209 static sbd_error_t	*drmach_device_new(drmach_node_t *,
210 				drmach_board_t *, int, drmachid_t *);
211 static sbd_error_t	*drmach_cpu_new(drmach_device_t *, drmachid_t *);
212 static sbd_error_t	*drmach_mem_new(drmach_device_t *, drmachid_t *);
213 static sbd_error_t	*drmach_io_new(drmach_device_t *, drmachid_t *);
214 
215 static dev_info_t	*drmach_node_ddi_get_dip(drmach_node_t *np);
216 static int		 drmach_node_ddi_get_prop(drmach_node_t *np,
217 				char *name, void *buf, int len);
218 static int		 drmach_node_ddi_get_proplen(drmach_node_t *np,
219 				char *name, int *len);
220 
221 static int 		drmach_get_portid(drmach_node_t *);
222 static	sbd_error_t	*drmach_i_status(drmachid_t, drmach_status_t *);
223 static int		opl_check_dr_status();
224 static void		drmach_io_dispose(drmachid_t);
225 static sbd_error_t	*drmach_io_release(drmachid_t);
226 static sbd_error_t	*drmach_io_status(drmachid_t, drmach_status_t *);
227 static int 		drmach_init(void);
228 static void 		drmach_fini(void);
229 static void		drmach_swap_pa(drmach_mem_t *, drmach_mem_t *);
230 static drmach_board_t	*drmach_get_board_by_bnum(int);
231 
232 static sbd_error_t	*drmach_board_release(drmachid_t);
233 static sbd_error_t	*drmach_board_status(drmachid_t, drmach_status_t *);
234 static void		drmach_cpu_dispose(drmachid_t);
235 static sbd_error_t	*drmach_cpu_release(drmachid_t);
236 static sbd_error_t	*drmach_cpu_status(drmachid_t, drmach_status_t *);
237 static void		drmach_mem_dispose(drmachid_t);
238 static sbd_error_t	*drmach_mem_release(drmachid_t);
239 static sbd_error_t	*drmach_mem_status(drmachid_t, drmach_status_t *);
240 
241 /* options for the second argument in drmach_add_remove_cpu() */
242 #define	HOTADD_CPU	1
243 #define	HOTREMOVE_CPU	2
244 
245 #define	ON_BOARD_CORE_NUM(x)	(((uint_t)(x) / OPL_MAX_STRANDID_PER_CORE) & \
246 	(OPL_MAX_COREID_PER_BOARD - 1))
247 
248 extern struct cpu	*SIGBCPU;
249 
250 static int		drmach_name2type_idx(char *);
251 static drmach_board_t	*drmach_board_new(int, int);
252 
253 #ifdef DEBUG
254 
255 #define	DRMACH_PR		if (drmach_debug) printf
256 int drmach_debug = 1;		 /* set to non-zero to enable debug messages */
257 #else
258 
259 #define	DRMACH_PR		_NOTE(CONSTANTCONDITION) if (0) printf
260 #endif /* DEBUG */
261 
262 
263 #define	DRMACH_OBJ(id)		((drmach_common_t *)id)
264 
265 #define	DRMACH_NULL_ID(id)	((id) == 0)
266 
267 #define	DRMACH_IS_BOARD_ID(id)	\
268 	((id != 0) &&		\
269 	(DRMACH_OBJ(id)->isa == (void *)drmach_board_new))
270 
271 #define	DRMACH_IS_CPU_ID(id)	\
272 	((id != 0) &&		\
273 	(DRMACH_OBJ(id)->isa == (void *)drmach_cpu_new))
274 
275 #define	DRMACH_IS_MEM_ID(id)	\
276 	((id != 0) &&		\
277 	(DRMACH_OBJ(id)->isa == (void *)drmach_mem_new))
278 
279 #define	DRMACH_IS_IO_ID(id)	\
280 	((id != 0) &&		\
281 	(DRMACH_OBJ(id)->isa == (void *)drmach_io_new))
282 
283 #define	DRMACH_IS_DEVICE_ID(id)					\
284 	((id != 0) &&						\
285 	(DRMACH_OBJ(id)->isa == (void *)drmach_cpu_new ||	\
286 	    DRMACH_OBJ(id)->isa == (void *)drmach_mem_new ||	\
287 	    DRMACH_OBJ(id)->isa == (void *)drmach_io_new))
288 
289 #define	DRMACH_IS_ID(id)					\
290 	((id != 0) &&						\
291 	(DRMACH_OBJ(id)->isa == (void *)drmach_board_new ||	\
292 	    DRMACH_OBJ(id)->isa == (void *)drmach_cpu_new ||	\
293 	    DRMACH_OBJ(id)->isa == (void *)drmach_mem_new ||	\
294 	    DRMACH_OBJ(id)->isa == (void *)drmach_io_new))
295 
296 #define	DRMACH_INTERNAL_ERROR() \
297 	drerr_new(1, EOPL_INTERNAL, drmach_ie_fmt, __LINE__)
298 
299 static char		*drmach_ie_fmt = "drmach.c %d";
300 
301 static struct {
302 	const char	*name;
303 	const char	*type;
304 	sbd_error_t	*(*new)(drmach_device_t *, drmachid_t *);
305 } drmach_name2type[] = {
306 	{ "cpu",	DRMACH_DEVTYPE_CPU,		drmach_cpu_new },
307 	{ "pseudo-mc",	DRMACH_DEVTYPE_MEM,		drmach_mem_new },
308 	{ "pci",	DRMACH_DEVTYPE_PCI,		drmach_io_new  },
309 };
310 
311 /* utility */
312 #define	MBYTE	(1048576ull)
313 
314 /*
315  * drmach autoconfiguration data structures and interfaces
316  */
317 
318 extern struct mod_ops mod_miscops;
319 
320 static struct modlmisc modlmisc = {
321 	&mod_miscops,
322 	"OPL DR 1.1"
323 };
324 
325 static struct modlinkage modlinkage = {
326 	MODREV_1,
327 	(void *)&modlmisc,
328 	NULL
329 };
330 
331 static krwlock_t drmach_boards_rwlock;
332 
333 typedef const char	*fn_t;
334 
335 int
336 _init(void)
337 {
338 	int err;
339 
340 	if ((err = drmach_init()) != 0) {
341 		return (err);
342 	}
343 
344 	if ((err = mod_install(&modlinkage)) != 0) {
345 		drmach_fini();
346 	}
347 
348 	return (err);
349 }
350 
351 int
352 _fini(void)
353 {
354 	int	err;
355 
356 	if ((err = mod_remove(&modlinkage)) == 0)
357 		drmach_fini();
358 
359 	return (err);
360 }
361 
362 int
363 _info(struct modinfo *modinfop)
364 {
365 	return (mod_info(&modlinkage, modinfop));
366 }
367 
368 struct drmach_mc_lookup {
369 	int	bnum;
370 	drmach_board_t	*bp;
371 	dev_info_t *dip;	/* rv - set if found */
372 };
373 
374 #define	_ptob64(p) ((uint64_t)(p) << PAGESHIFT)
375 #define	_b64top(b) ((pgcnt_t)((b) >> PAGESHIFT))
376 
377 static int
378 drmach_setup_mc_info(dev_info_t *dip, drmach_mem_t *mp)
379 {
380 	uint64_t	memory_ranges[128];
381 	int len;
382 	struct memlist	*ml;
383 	int rv;
384 	hwd_sb_t *hwd;
385 	hwd_memory_t *pm;
386 
387 	len = sizeof (memory_ranges);
388 	if (ddi_getlongprop_buf(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
389 	    "sb-mem-ranges", (caddr_t)&memory_ranges[0], &len) !=
390 	    DDI_PROP_SUCCESS) {
391 		mp->slice_base = 0;
392 		mp->slice_size = 0;
393 		return (-1);
394 	}
395 	mp->slice_base = memory_ranges[0];
396 	mp->slice_size = memory_ranges[1];
397 
398 	if (!mp->dev.bp->boot_board) {
399 		int i;
400 
401 		rv = opl_read_hwd(mp->dev.bp->bnum, NULL,  NULL, NULL, &hwd);
402 
403 		if (rv != 0) {
404 			return (-1);
405 		}
406 
407 		ml = NULL;
408 		pm = &hwd->sb_cmu.cmu_memory;
409 		for (i = 0; i < HWD_MAX_MEM_CHUNKS; i++) {
410 			if (pm->mem_chunks[i].chnk_size > 0) {
411 				ml = memlist_add_span(ml,
412 				    pm->mem_chunks[i].chnk_start_address,
413 				    pm->mem_chunks[i].chnk_size);
414 			}
415 		}
416 	} else {
417 		/*
418 		 * we intersect phys_install to get base_pa.
419 		 * This only works at bootup time.
420 		 */
421 
422 		memlist_read_lock();
423 		ml = memlist_dup(phys_install);
424 		memlist_read_unlock();
425 
426 		ml = memlist_del_span(ml, 0ull, mp->slice_base);
427 		if (ml) {
428 			uint64_t basepa, endpa;
429 			endpa = _ptob64(physmax + 1);
430 
431 			basepa = mp->slice_base + mp->slice_size;
432 
433 			ml = memlist_del_span(ml, basepa, endpa - basepa);
434 		}
435 	}
436 
437 	if (ml) {
438 		uint64_t nbytes = 0;
439 		struct memlist *p;
440 		for (p = ml; p; p = p->ml_next) {
441 			nbytes += p->ml_size;
442 		}
443 		if ((mp->nbytes = nbytes) > 0)
444 			mp->base_pa = ml->ml_address;
445 		else
446 			mp->base_pa = 0;
447 		mp->memlist = ml;
448 	} else {
449 		mp->base_pa = 0;
450 		mp->nbytes = 0;
451 	}
452 	return (0);
453 }
454 
455 
456 struct drmach_hotcpu {
457 	drmach_board_t *bp;
458 	int	bnum;
459 	int	core_id;
460 	int 	rv;
461 	int	option;
462 };
463 
464 static int
465 drmach_cpu_cb(dev_info_t *dip, void *arg)
466 {
467 	struct drmach_hotcpu *p = (struct drmach_hotcpu *)arg;
468 	char name[OBP_MAXDRVNAME];
469 	int len = OBP_MAXDRVNAME;
470 	int bnum, core_id, strand_id;
471 	drmach_board_t *bp;
472 
473 	if (dip == ddi_root_node()) {
474 		return (DDI_WALK_CONTINUE);
475 	}
476 
477 	if (ddi_getlongprop_buf(DDI_DEV_T_ANY, dip,
478 	    DDI_PROP_DONTPASS, "name",
479 	    (caddr_t)name, &len) != DDI_PROP_SUCCESS) {
480 		return (DDI_WALK_PRUNECHILD);
481 	}
482 
483 	/* only cmp has board number */
484 	bnum = -1;
485 	len = sizeof (bnum);
486 	if (ddi_getlongprop_buf(DDI_DEV_T_ANY, dip,
487 	    DDI_PROP_DONTPASS, OBP_BOARDNUM,
488 	    (caddr_t)&bnum, &len) != DDI_PROP_SUCCESS) {
489 		bnum = -1;
490 	}
491 
492 	if (strcmp(name, "cmp") == 0) {
493 		if (bnum != p->bnum)
494 			return (DDI_WALK_PRUNECHILD);
495 		return (DDI_WALK_CONTINUE);
496 	}
497 	/* we have already pruned all unwanted cores and cpu's above */
498 	if (strcmp(name, "core") == 0) {
499 		return (DDI_WALK_CONTINUE);
500 	}
501 	if (strcmp(name, "cpu") == 0) {
502 		processorid_t cpuid;
503 		len = sizeof (cpuid);
504 		if (ddi_getlongprop_buf(DDI_DEV_T_ANY, dip,
505 		    DDI_PROP_DONTPASS, "cpuid",
506 		    (caddr_t)&cpuid, &len) != DDI_PROP_SUCCESS) {
507 			p->rv = -1;
508 			return (DDI_WALK_TERMINATE);
509 		}
510 
511 		core_id = p->core_id;
512 
513 		bnum = LSB_ID(cpuid);
514 
515 		if (ON_BOARD_CORE_NUM(cpuid) != core_id)
516 			return (DDI_WALK_CONTINUE);
517 
518 		bp = p->bp;
519 		ASSERT(bnum == bp->bnum);
520 
521 		if (p->option == HOTADD_CPU) {
522 			if (prom_hotaddcpu(cpuid) != 0) {
523 				p->rv = -1;
524 				return (DDI_WALK_TERMINATE);
525 			}
526 			strand_id = STRAND_ID(cpuid);
527 			bp->cores[core_id].core_hotadded |= (1 << strand_id);
528 		} else if (p->option == HOTREMOVE_CPU) {
529 			if (prom_hotremovecpu(cpuid) != 0) {
530 				p->rv = -1;
531 				return (DDI_WALK_TERMINATE);
532 			}
533 			strand_id = STRAND_ID(cpuid);
534 			bp->cores[core_id].core_hotadded &= ~(1 << strand_id);
535 		}
536 		return (DDI_WALK_CONTINUE);
537 	}
538 
539 	return (DDI_WALK_PRUNECHILD);
540 }
541 
542 
543 static int
544 drmach_add_remove_cpu(int bnum, int core_id, int option)
545 {
546 	struct drmach_hotcpu arg;
547 	drmach_board_t *bp;
548 
549 	bp = drmach_get_board_by_bnum(bnum);
550 	ASSERT(bp);
551 
552 	arg.bp = bp;
553 	arg.bnum = bnum;
554 	arg.core_id = core_id;
555 	arg.rv = 0;
556 	arg.option = option;
557 	ddi_walk_devs(ddi_root_node(), drmach_cpu_cb, (void *)&arg);
558 	return (arg.rv);
559 }
560 
561 struct drmach_setup_core_arg {
562 	drmach_board_t *bp;
563 };
564 
565 static int
566 drmach_setup_core_cb(dev_info_t *dip, void *arg)
567 {
568 	struct drmach_setup_core_arg *p = (struct drmach_setup_core_arg *)arg;
569 	char name[OBP_MAXDRVNAME];
570 	int len = OBP_MAXDRVNAME;
571 	int bnum;
572 	int core_id, strand_id;
573 
574 	if (dip == ddi_root_node()) {
575 		return (DDI_WALK_CONTINUE);
576 	}
577 
578 	if (ddi_getlongprop_buf(DDI_DEV_T_ANY, dip,
579 	    DDI_PROP_DONTPASS, "name",
580 	    (caddr_t)name, &len) != DDI_PROP_SUCCESS) {
581 		return (DDI_WALK_PRUNECHILD);
582 	}
583 
584 	/* only cmp has board number */
585 	bnum = -1;
586 	len = sizeof (bnum);
587 	if (ddi_getlongprop_buf(DDI_DEV_T_ANY, dip,
588 	    DDI_PROP_DONTPASS, OBP_BOARDNUM,
589 	    (caddr_t)&bnum, &len) != DDI_PROP_SUCCESS) {
590 		bnum = -1;
591 	}
592 
593 	if (strcmp(name, "cmp") == 0) {
594 		if (bnum != p->bp->bnum)
595 			return (DDI_WALK_PRUNECHILD);
596 		return (DDI_WALK_CONTINUE);
597 	}
598 	/* we have already pruned all unwanted cores and cpu's above */
599 	if (strcmp(name, "core") == 0) {
600 		return (DDI_WALK_CONTINUE);
601 	}
602 	if (strcmp(name, "cpu") == 0) {
603 		processorid_t cpuid;
604 		len = sizeof (cpuid);
605 		if (ddi_getlongprop_buf(DDI_DEV_T_ANY, dip,
606 		    DDI_PROP_DONTPASS, "cpuid",
607 		    (caddr_t)&cpuid, &len) != DDI_PROP_SUCCESS) {
608 			return (DDI_WALK_TERMINATE);
609 		}
610 		bnum = LSB_ID(cpuid);
611 		ASSERT(bnum == p->bp->bnum);
612 		core_id = ON_BOARD_CORE_NUM(cpuid);
613 		strand_id = STRAND_ID(cpuid);
614 		p->bp->cores[core_id].core_present |= (1 << strand_id);
615 		return (DDI_WALK_CONTINUE);
616 	}
617 
618 	return (DDI_WALK_PRUNECHILD);
619 }
620 
621 
622 static void
623 drmach_setup_core_info(drmach_board_t *obj)
624 {
625 	struct drmach_setup_core_arg arg;
626 	int i;
627 
628 	for (i = 0; i < OPL_MAX_COREID_PER_BOARD; i++) {
629 		obj->cores[i].core_present = 0;
630 		obj->cores[i].core_hotadded = 0;
631 		obj->cores[i].core_started = 0;
632 	}
633 	arg.bp = obj;
634 	ddi_walk_devs(ddi_root_node(), drmach_setup_core_cb, (void *)&arg);
635 
636 	for (i = 0; i < OPL_MAX_COREID_PER_BOARD; i++) {
637 		if (obj->boot_board) {
638 			obj->cores[i].core_hotadded =
639 			    obj->cores[i].core_started =
640 			    obj->cores[i].core_present;
641 		}
642 	}
643 }
644 
645 /*
646  * drmach_node_* routines serve the purpose of separating the
647  * rest of the code from the device tree and OBP.  This is necessary
648  * because of In-Kernel-Probing.  Devices probed after stod, are probed
649  * by the in-kernel-prober, not OBP.  These devices, therefore, do not
650  * have dnode ids.
651  */
652 
653 typedef struct {
654 	drmach_node_walk_args_t	*nwargs;
655 	int 			(*cb)(drmach_node_walk_args_t *args);
656 	int			err;
657 } drmach_node_ddi_walk_args_t;
658 
659 static int
660 drmach_node_ddi_walk_cb(dev_info_t *dip, void *arg)
661 {
662 	drmach_node_ddi_walk_args_t	*nargs;
663 
664 	nargs = (drmach_node_ddi_walk_args_t *)arg;
665 
666 	/*
667 	 * dip doesn't have to be held here as we are called
668 	 * from ddi_walk_devs() which holds the dip.
669 	 */
670 	nargs->nwargs->node->here = (void *)dip;
671 
672 	nargs->err = nargs->cb(nargs->nwargs);
673 
674 
675 	/*
676 	 * Set "here" to NULL so that unheld dip is not accessible
677 	 * outside ddi_walk_devs()
678 	 */
679 	nargs->nwargs->node->here = NULL;
680 
681 	if (nargs->err)
682 		return (DDI_WALK_TERMINATE);
683 	else
684 		return (DDI_WALK_CONTINUE);
685 }
686 
687 static int
688 drmach_node_ddi_walk(drmach_node_t *np, void *data,
689 		int (*cb)(drmach_node_walk_args_t *args))
690 {
691 	drmach_node_walk_args_t		args;
692 	drmach_node_ddi_walk_args_t	nargs;
693 
694 
695 	/* initialized args structure for callback */
696 	args.node = np;
697 	args.data = data;
698 
699 	nargs.nwargs = &args;
700 	nargs.cb = cb;
701 	nargs.err = 0;
702 
703 	/*
704 	 * Root node doesn't have to be held in any way.
705 	 */
706 	ddi_walk_devs(ddi_root_node(), drmach_node_ddi_walk_cb, (void *)&nargs);
707 
708 	return (nargs.err);
709 }
710 
711 static int
712 drmach_node_ddi_get_parent(drmach_node_t *np, drmach_node_t *pp)
713 {
714 	dev_info_t	*ndip;
715 	static char	*fn = "drmach_node_ddi_get_parent";
716 
717 	ndip = np->n_getdip(np);
718 	if (ndip == NULL) {
719 		cmn_err(CE_WARN, "%s: NULL dip", fn);
720 		return (-1);
721 	}
722 
723 	bcopy(np, pp, sizeof (drmach_node_t));
724 
725 	pp->here = (void *)ddi_get_parent(ndip);
726 	if (pp->here == NULL) {
727 		cmn_err(CE_WARN, "%s: NULL parent dip", fn);
728 		return (-1);
729 	}
730 
731 	return (0);
732 }
733 
734 /*ARGSUSED*/
735 static pnode_t
736 drmach_node_ddi_get_dnode(drmach_node_t *np)
737 {
738 	return ((pnode_t)NULL);
739 }
740 
741 static drmach_node_t *
742 drmach_node_new(void)
743 {
744 	drmach_node_t *np;
745 
746 	np = kmem_zalloc(sizeof (drmach_node_t), KM_SLEEP);
747 
748 	np->get_dnode = drmach_node_ddi_get_dnode;
749 	np->walk = drmach_node_ddi_walk;
750 	np->n_getdip = drmach_node_ddi_get_dip;
751 	np->n_getproplen = drmach_node_ddi_get_proplen;
752 	np->n_getprop = drmach_node_ddi_get_prop;
753 	np->get_parent = drmach_node_ddi_get_parent;
754 
755 	return (np);
756 }
757 
758 static void
759 drmach_node_dispose(drmach_node_t *np)
760 {
761 	kmem_free(np, sizeof (*np));
762 }
763 
764 static dev_info_t *
765 drmach_node_ddi_get_dip(drmach_node_t *np)
766 {
767 	return ((dev_info_t *)np->here);
768 }
769 
770 static int
771 drmach_node_walk(drmach_node_t *np, void *param,
772 		int (*cb)(drmach_node_walk_args_t *args))
773 {
774 	return (np->walk(np, param, cb));
775 }
776 
777 static int
778 drmach_node_ddi_get_prop(drmach_node_t *np, char *name, void *buf, int len)
779 {
780 	int		rv = 0;
781 	dev_info_t	*ndip;
782 	static char	*fn = "drmach_node_ddi_get_prop";
783 
784 
785 	ndip = np->n_getdip(np);
786 	if (ndip == NULL) {
787 		cmn_err(CE_WARN, "%s: NULL dip", fn);
788 		rv = -1;
789 	} else if (ddi_getlongprop_buf(DDI_DEV_T_ANY, ndip,
790 	    DDI_PROP_DONTPASS, name,
791 	    (caddr_t)buf, &len) != DDI_PROP_SUCCESS) {
792 		rv = -1;
793 	}
794 
795 	return (rv);
796 }
797 
798 static int
799 drmach_node_ddi_get_proplen(drmach_node_t *np, char *name, int *len)
800 {
801 	int		rv = 0;
802 	dev_info_t	*ndip;
803 
804 	ndip = np->n_getdip(np);
805 	if (ndip == NULL) {
806 		rv = -1;
807 	} else if (ddi_getproplen(DDI_DEV_T_ANY, ndip, DDI_PROP_DONTPASS, name,
808 	    len) != DDI_PROP_SUCCESS) {
809 		rv = -1;
810 	}
811 
812 	return (rv);
813 }
814 
815 static drmachid_t
816 drmach_node_dup(drmach_node_t *np)
817 {
818 	drmach_node_t *dup;
819 
820 	dup = drmach_node_new();
821 	dup->here = np->here;
822 	dup->get_dnode = np->get_dnode;
823 	dup->walk = np->walk;
824 	dup->n_getdip = np->n_getdip;
825 	dup->n_getproplen = np->n_getproplen;
826 	dup->n_getprop = np->n_getprop;
827 	dup->get_parent = np->get_parent;
828 
829 	return (dup);
830 }
831 
832 /*
833  * drmach_array provides convenient array construction, access,
834  * bounds checking and array destruction logic.
835  */
836 
837 static drmach_array_t *
838 drmach_array_new(int min_index, int max_index)
839 {
840 	drmach_array_t *arr;
841 
842 	arr = kmem_zalloc(sizeof (drmach_array_t), KM_SLEEP);
843 
844 	arr->arr_sz = (max_index - min_index + 1) * sizeof (void *);
845 	if (arr->arr_sz > 0) {
846 		arr->min_index = min_index;
847 		arr->max_index = max_index;
848 
849 		arr->arr = kmem_zalloc(arr->arr_sz, KM_SLEEP);
850 		return (arr);
851 	} else {
852 		kmem_free(arr, sizeof (*arr));
853 		return (0);
854 	}
855 }
856 
857 static int
858 drmach_array_set(drmach_array_t *arr, int idx, drmachid_t val)
859 {
860 	if (idx < arr->min_index || idx > arr->max_index)
861 		return (-1);
862 	else {
863 		arr->arr[idx - arr->min_index] = val;
864 		return (0);
865 	}
866 	/*NOTREACHED*/
867 }
868 
869 static int
870 drmach_array_get(drmach_array_t *arr, int idx, drmachid_t *val)
871 {
872 	if (idx < arr->min_index || idx > arr->max_index)
873 		return (-1);
874 	else {
875 		*val = arr->arr[idx - arr->min_index];
876 		return (0);
877 	}
878 	/*NOTREACHED*/
879 }
880 
881 static int
882 drmach_array_first(drmach_array_t *arr, int *idx, drmachid_t *val)
883 {
884 	int rv;
885 
886 	*idx = arr->min_index;
887 	while ((rv = drmach_array_get(arr, *idx, val)) == 0 && *val == NULL)
888 		*idx += 1;
889 
890 	return (rv);
891 }
892 
893 static int
894 drmach_array_next(drmach_array_t *arr, int *idx, drmachid_t *val)
895 {
896 	int rv;
897 
898 	*idx += 1;
899 	while ((rv = drmach_array_get(arr, *idx, val)) == 0 && *val == NULL)
900 		*idx += 1;
901 
902 	return (rv);
903 }
904 
905 static void
906 drmach_array_dispose(drmach_array_t *arr, void (*disposer)(drmachid_t))
907 {
908 	drmachid_t	val;
909 	int		idx;
910 	int		rv;
911 
912 	rv = drmach_array_first(arr, &idx, &val);
913 	while (rv == 0) {
914 		(*disposer)(val);
915 		rv = drmach_array_next(arr, &idx, &val);
916 	}
917 
918 	kmem_free(arr->arr, arr->arr_sz);
919 	kmem_free(arr, sizeof (*arr));
920 }
921 
922 static drmach_board_t *
923 drmach_get_board_by_bnum(int bnum)
924 {
925 	drmachid_t id;
926 
927 	if (drmach_array_get(drmach_boards, bnum, &id) == 0)
928 		return ((drmach_board_t *)id);
929 	else
930 		return (NULL);
931 }
932 
933 static pnode_t
934 drmach_node_get_dnode(drmach_node_t *np)
935 {
936 	return (np->get_dnode(np));
937 }
938 
939 /*ARGSUSED*/
940 sbd_error_t *
941 drmach_configure(drmachid_t id, int flags)
942 {
943 	drmach_device_t		*dp;
944 	sbd_error_t		*err = NULL;
945 	dev_info_t		*rdip;
946 	dev_info_t		*fdip = NULL;
947 
948 	if (DRMACH_IS_CPU_ID(id)) {
949 		return (NULL);
950 	}
951 	if (!DRMACH_IS_DEVICE_ID(id))
952 		return (drerr_new(0, EOPL_INAPPROP, NULL));
953 	dp = id;
954 	rdip = dp->node->n_getdip(dp->node);
955 
956 	ASSERT(rdip);
957 
958 	ASSERT(e_ddi_branch_held(rdip));
959 
960 	if (e_ddi_branch_configure(rdip, &fdip, 0) != 0) {
961 		char *path = kmem_alloc(MAXPATHLEN, KM_SLEEP);
962 		dev_info_t *dip = (fdip != NULL) ? fdip : rdip;
963 
964 		(void) ddi_pathname(dip, path);
965 		err = drerr_new(1,  EOPL_DRVFAIL, path);
966 
967 		kmem_free(path, MAXPATHLEN);
968 
969 		/* If non-NULL, fdip is returned held and must be released */
970 		if (fdip != NULL)
971 			ddi_release_devi(fdip);
972 	}
973 
974 	return (err);
975 }
976 
977 
978 static sbd_error_t *
979 drmach_device_new(drmach_node_t *node,
980 	drmach_board_t *bp, int portid, drmachid_t *idp)
981 {
982 	int		 i;
983 	int		 rv;
984 	drmach_device_t	proto;
985 	sbd_error_t	*err;
986 	char		 name[OBP_MAXDRVNAME];
987 
988 	rv = node->n_getprop(node, "name", name, OBP_MAXDRVNAME);
989 	if (rv) {
990 		/* every node is expected to have a name */
991 		err = drerr_new(1, EOPL_GETPROP, "device node %s: property %s",
992 		    ddi_node_name(node->n_getdip(node)), "name");
993 		return (err);
994 	}
995 
996 	/*
997 	 * The node currently being examined is not listed in the name2type[]
998 	 * array.  In this case, the node is no interest to drmach.  Both
999 	 * dp and err are initialized here to yield nothing (no device or
1000 	 * error structure) for this case.
1001 	 */
1002 	i = drmach_name2type_idx(name);
1003 
1004 
1005 	if (i < 0) {
1006 		*idp = (drmachid_t)0;
1007 		return (NULL);
1008 	}
1009 
1010 	/* device specific new function will set unum */
1011 
1012 	bzero(&proto, sizeof (proto));
1013 	proto.type = drmach_name2type[i].type;
1014 	proto.bp = bp;
1015 	proto.node = node;
1016 	proto.portid = portid;
1017 
1018 	return (drmach_name2type[i].new(&proto, idp));
1019 }
1020 
1021 static void
1022 drmach_device_dispose(drmachid_t id)
1023 {
1024 	drmach_device_t *self = id;
1025 
1026 	self->cm.dispose(id);
1027 }
1028 
1029 
1030 static drmach_board_t *
1031 drmach_board_new(int bnum, int boot_board)
1032 {
1033 	drmach_board_t	*bp;
1034 
1035 	bp = kmem_zalloc(sizeof (drmach_board_t), KM_SLEEP);
1036 
1037 	bp->cm.isa = (void *)drmach_board_new;
1038 	bp->cm.release = drmach_board_release;
1039 	bp->cm.status = drmach_board_status;
1040 
1041 	(void) drmach_board_name(bnum, bp->cm.name, sizeof (bp->cm.name));
1042 
1043 	bp->bnum = bnum;
1044 	bp->devices = NULL;
1045 	bp->connected = boot_board;
1046 	bp->tree = drmach_node_new();
1047 	bp->assigned = boot_board;
1048 	bp->powered = boot_board;
1049 	bp->boot_board = boot_board;
1050 
1051 	/*
1052 	 * If this is not bootup initialization, we have to wait till
1053 	 * IKP sets up the device nodes in drmach_board_connect().
1054 	 */
1055 	if (boot_board)
1056 		drmach_setup_core_info(bp);
1057 
1058 	(void) drmach_array_set(drmach_boards, bnum, bp);
1059 	return (bp);
1060 }
1061 
1062 static void
1063 drmach_board_dispose(drmachid_t id)
1064 {
1065 	drmach_board_t *bp;
1066 
1067 	ASSERT(DRMACH_IS_BOARD_ID(id));
1068 	bp = id;
1069 
1070 	if (bp->tree)
1071 		drmach_node_dispose(bp->tree);
1072 
1073 	if (bp->devices)
1074 		drmach_array_dispose(bp->devices, drmach_device_dispose);
1075 
1076 	kmem_free(bp, sizeof (*bp));
1077 }
1078 
1079 static sbd_error_t *
1080 drmach_board_status(drmachid_t id, drmach_status_t *stat)
1081 {
1082 	sbd_error_t	*err = NULL;
1083 	drmach_board_t	*bp;
1084 
1085 	if (!DRMACH_IS_BOARD_ID(id))
1086 		return (drerr_new(0, EOPL_INAPPROP, NULL));
1087 	bp = id;
1088 
1089 	stat->assigned = bp->assigned;
1090 	stat->powered = bp->powered;
1091 	stat->busy = 0;			/* assume not busy */
1092 	stat->configured = 0;		/* assume not configured */
1093 	stat->empty = 0;
1094 	stat->cond = bp->cond = SBD_COND_OK;
1095 	(void) strncpy(stat->type, "System Brd", sizeof (stat->type));
1096 	stat->info[0] = '\0';
1097 
1098 	if (bp->devices) {
1099 		int		 rv;
1100 		int		 d_idx;
1101 		drmachid_t	 d_id;
1102 
1103 		rv = drmach_array_first(bp->devices, &d_idx, &d_id);
1104 		while (rv == 0) {
1105 			drmach_status_t	d_stat;
1106 
1107 			err = drmach_i_status(d_id, &d_stat);
1108 			if (err)
1109 				break;
1110 
1111 			stat->busy |= d_stat.busy;
1112 			stat->configured |= d_stat.configured;
1113 
1114 			rv = drmach_array_next(bp->devices, &d_idx, &d_id);
1115 		}
1116 	}
1117 
1118 	return (err);
1119 }
1120 
1121 int
1122 drmach_board_is_floating(drmachid_t id)
1123 {
1124 	drmach_board_t *bp;
1125 
1126 	if (!DRMACH_IS_BOARD_ID(id))
1127 		return (0);
1128 
1129 	bp = (drmach_board_t *)id;
1130 
1131 	return ((drmach_domain.floating & (1 << bp->bnum)) ? 1 : 0);
1132 }
1133 
1134 static int
1135 drmach_init(void)
1136 {
1137 	dev_info_t	*rdip;
1138 	int		i, rv, len;
1139 	int		*floating;
1140 
1141 	rw_init(&drmach_boards_rwlock, NULL, RW_DEFAULT, NULL);
1142 
1143 	drmach_boards = drmach_array_new(0, MAX_BOARDS - 1);
1144 
1145 	rdip = ddi_root_node();
1146 
1147 	if (ddi_getproplen(DDI_DEV_T_ANY, rdip, DDI_PROP_DONTPASS,
1148 	    "floating-boards", &len) != DDI_PROP_SUCCESS) {
1149 		cmn_err(CE_WARN, "Cannot get floating-boards proplen\n");
1150 	} else {
1151 		floating = (int *)kmem_alloc(len, KM_SLEEP);
1152 		rv = ddi_prop_op(DDI_DEV_T_ANY, rdip, PROP_LEN_AND_VAL_BUF,
1153 		    DDI_PROP_DONTPASS, "floating-boards", (caddr_t)floating,
1154 		    &len);
1155 		if (rv != DDI_PROP_SUCCESS) {
1156 			cmn_err(CE_WARN, "Cannot get floating-boards prop\n");
1157 		} else {
1158 			drmach_domain.floating = 0;
1159 			for (i = 0; i < len / sizeof (int); i++) {
1160 				drmach_domain.floating |= (1 << floating[i]);
1161 			}
1162 		}
1163 		kmem_free(floating, len);
1164 	}
1165 	drmach_domain.allow_dr = opl_check_dr_status();
1166 
1167 	rdip = ddi_get_child(ddi_root_node());
1168 	do {
1169 		int		 bnum;
1170 		drmachid_t	 id;
1171 
1172 		bnum = -1;
1173 		bnum = ddi_getprop(DDI_DEV_T_ANY, rdip, DDI_PROP_DONTPASS,
1174 		    OBP_BOARDNUM, -1);
1175 		if (bnum == -1)
1176 			continue;
1177 
1178 		if (drmach_array_get(drmach_boards, bnum, &id) == -1) {
1179 			cmn_err(CE_WARN, "Device node 0x%p has invalid "
1180 			    "property value, %s=%d", (void *)rdip,
1181 			    OBP_BOARDNUM, bnum);
1182 			goto error;
1183 		} else if (id == NULL) {
1184 			(void) drmach_board_new(bnum, 1);
1185 		}
1186 	} while ((rdip = ddi_get_next_sibling(rdip)) != NULL);
1187 
1188 	opl_hold_devtree();
1189 
1190 	/*
1191 	 * Initialize the IKP feature.
1192 	 *
1193 	 * This can be done only after DR has acquired a hold on all the
1194 	 * device nodes that are interesting to IKP.
1195 	 */
1196 	if (opl_init_cfg() != 0) {
1197 		cmn_err(CE_WARN, "DR - IKP initialization failed");
1198 
1199 		opl_release_devtree();
1200 
1201 		goto error;
1202 	}
1203 
1204 	return (0);
1205 error:
1206 	drmach_array_dispose(drmach_boards, drmach_board_dispose);
1207 	rw_destroy(&drmach_boards_rwlock);
1208 	return (ENXIO);
1209 }
1210 
1211 static void
1212 drmach_fini(void)
1213 {
1214 	rw_enter(&drmach_boards_rwlock, RW_WRITER);
1215 	drmach_array_dispose(drmach_boards, drmach_board_dispose);
1216 	drmach_boards = NULL;
1217 	rw_exit(&drmach_boards_rwlock);
1218 
1219 	/*
1220 	 * Walk immediate children of the root devinfo node
1221 	 * releasing holds acquired on branches in drmach_init()
1222 	 */
1223 
1224 	opl_release_devtree();
1225 
1226 	rw_destroy(&drmach_boards_rwlock);
1227 }
1228 
1229 /*
1230  *	Each system board contains 2 Oberon PCI bridge and
1231  *	1 CMUCH.
1232  *	Each oberon has 2 channels.
1233  *	Each channel has 2 pci-ex leaf.
1234  *	Each CMUCH has 1 pci bus.
1235  *
1236  *
1237  *	Device Path:
1238  *	/pci@<portid>,reg
1239  *
1240  *	where
1241  *	portid[10] = 0
1242  *	portid[9:0] = LLEAF_ID[9:0] of the Oberon Channel
1243  *
1244  *	LLEAF_ID[9:8] = 0
1245  *	LLEAF_ID[8:4] = LSB_ID[4:0]
1246  *	LLEAF_ID[3:1] = IO Channel#[2:0] (0,1,2,3 for Oberon)
1247  *			channel 4 is pcicmu
1248  *	LLEAF_ID[0] = PCI Leaf Number (0 for leaf-A, 1 for leaf-B)
1249  *
1250  *	Properties:
1251  *	name = pci
1252  *	device_type = "pciex"
1253  *	board# = LSBID
1254  *	reg = int32 * 2, Oberon CSR space of the leaf and the UBC space
1255  *	portid = Jupiter Bus Device ID ((LSB_ID << 3)|pciport#)
1256  */
1257 
1258 static sbd_error_t *
1259 drmach_io_new(drmach_device_t *proto, drmachid_t *idp)
1260 {
1261 	drmach_io_t	*ip;
1262 
1263 	int		 portid;
1264 
1265 	portid = proto->portid;
1266 	ASSERT(portid != -1);
1267 	proto->unum = portid & (MAX_IO_UNITS_PER_BOARD - 1);
1268 
1269 	ip = kmem_zalloc(sizeof (drmach_io_t), KM_SLEEP);
1270 	bcopy(proto, &ip->dev, sizeof (ip->dev));
1271 	ip->dev.node = drmach_node_dup(proto->node);
1272 	ip->dev.cm.isa = (void *)drmach_io_new;
1273 	ip->dev.cm.dispose = drmach_io_dispose;
1274 	ip->dev.cm.release = drmach_io_release;
1275 	ip->dev.cm.status = drmach_io_status;
1276 	ip->channel = (portid >> 1) & 0x7;
1277 	ip->leaf = (portid & 0x1);
1278 
1279 	(void) snprintf(ip->dev.cm.name, sizeof (ip->dev.cm.name), "%s%d",
1280 	    ip->dev.type, ip->dev.unum);
1281 
1282 	*idp = (drmachid_t)ip;
1283 	return (NULL);
1284 }
1285 
1286 
1287 static void
1288 drmach_io_dispose(drmachid_t id)
1289 {
1290 	drmach_io_t *self;
1291 
1292 	ASSERT(DRMACH_IS_IO_ID(id));
1293 
1294 	self = id;
1295 	if (self->dev.node)
1296 		drmach_node_dispose(self->dev.node);
1297 
1298 	kmem_free(self, sizeof (*self));
1299 }
1300 
1301 /*ARGSUSED*/
1302 sbd_error_t *
1303 drmach_pre_op(int cmd, drmachid_t id, drmach_opts_t *opts)
1304 {
1305 	drmach_board_t	*bp = (drmach_board_t *)id;
1306 	sbd_error_t	*err = NULL;
1307 
1308 	/* allow status and ncm operations to always succeed */
1309 	if ((cmd == SBD_CMD_STATUS) || (cmd == SBD_CMD_GETNCM)) {
1310 		return (NULL);
1311 	}
1312 
1313 	/* check all other commands for the required option string */
1314 
1315 	if ((opts->size > 0) && (opts->copts != NULL)) {
1316 
1317 		DRMACH_PR("platform options: %s\n", opts->copts);
1318 
1319 		if (strstr(opts->copts, "opldr") == NULL) {
1320 			err = drerr_new(1, EOPL_SUPPORT, NULL);
1321 		}
1322 	} else {
1323 		err = drerr_new(1, EOPL_SUPPORT, NULL);
1324 	}
1325 
1326 	if (!err && id && DRMACH_IS_BOARD_ID(id)) {
1327 		switch (cmd) {
1328 			case SBD_CMD_TEST:
1329 			case SBD_CMD_STATUS:
1330 			case SBD_CMD_GETNCM:
1331 				break;
1332 			case SBD_CMD_CONNECT:
1333 				if (bp->connected)
1334 					err = drerr_new(0, ESBD_STATE, NULL);
1335 				else if (!drmach_domain.allow_dr)
1336 					err = drerr_new(1, EOPL_SUPPORT, NULL);
1337 				break;
1338 			case SBD_CMD_DISCONNECT:
1339 				if (!bp->connected)
1340 					err = drerr_new(0, ESBD_STATE, NULL);
1341 				else if (!drmach_domain.allow_dr)
1342 					err = drerr_new(1, EOPL_SUPPORT, NULL);
1343 				break;
1344 			default:
1345 				if (!drmach_domain.allow_dr)
1346 					err = drerr_new(1, EOPL_SUPPORT, NULL);
1347 				break;
1348 
1349 		}
1350 	}
1351 
1352 	return (err);
1353 }
1354 
1355 /*ARGSUSED*/
1356 sbd_error_t *
1357 drmach_post_op(int cmd, drmachid_t id, drmach_opts_t *opts)
1358 {
1359 	return (NULL);
1360 }
1361 
1362 sbd_error_t *
1363 drmach_board_assign(int bnum, drmachid_t *id)
1364 {
1365 	sbd_error_t	*err = NULL;
1366 
1367 	rw_enter(&drmach_boards_rwlock, RW_WRITER);
1368 
1369 	if (drmach_array_get(drmach_boards, bnum, id) == -1) {
1370 		err = drerr_new(1, EOPL_BNUM, "%d", bnum);
1371 	} else {
1372 		drmach_board_t	*bp;
1373 
1374 		if (*id)
1375 			rw_downgrade(&drmach_boards_rwlock);
1376 
1377 		bp = *id;
1378 		if (!(*id))
1379 			bp = *id  =
1380 			    (drmachid_t)drmach_board_new(bnum, 0);
1381 		bp->assigned = 1;
1382 	}
1383 
1384 	rw_exit(&drmach_boards_rwlock);
1385 
1386 	return (err);
1387 }
1388 
1389 /*ARGSUSED*/
1390 sbd_error_t *
1391 drmach_board_connect(drmachid_t id, drmach_opts_t *opts)
1392 {
1393 	extern int	cpu_alljupiter;
1394 	drmach_board_t	*obj = (drmach_board_t *)id;
1395 	unsigned	cpu_impl;
1396 
1397 	if (!DRMACH_IS_BOARD_ID(id))
1398 		return (drerr_new(0, EOPL_INAPPROP, NULL));
1399 
1400 	if (opl_probe_sb(obj->bnum, &cpu_impl) != 0)
1401 		return (drerr_new(1, EOPL_PROBE, NULL));
1402 
1403 	if (cpu_alljupiter) {
1404 		if (cpu_impl & (1 << OLYMPUS_C_IMPL)) {
1405 			(void) opl_unprobe_sb(obj->bnum);
1406 			return (drerr_new(1, EOPL_MIXED_CPU, NULL));
1407 		}
1408 	}
1409 
1410 	(void) prom_attach_notice(obj->bnum);
1411 
1412 	drmach_setup_core_info(obj);
1413 
1414 	obj->connected = 1;
1415 
1416 	return (NULL);
1417 }
1418 
1419 static int drmach_cache_flush_flag[NCPU];
1420 
1421 /*ARGSUSED*/
1422 static void
1423 drmach_flush_cache(uint64_t id, uint64_t dummy)
1424 {
1425 	extern void cpu_flush_ecache(void);
1426 
1427 	cpu_flush_ecache();
1428 	drmach_cache_flush_flag[id] = 0;
1429 }
1430 
1431 static void
1432 drmach_flush_all()
1433 {
1434 	cpuset_t	xc_cpuset;
1435 	int		i;
1436 
1437 	xc_cpuset = cpu_ready_set;
1438 	for (i = 0; i < NCPU; i++) {
1439 		if (CPU_IN_SET(xc_cpuset, i)) {
1440 			drmach_cache_flush_flag[i] = 1;
1441 			xc_one(i, drmach_flush_cache, i, 0);
1442 			while (drmach_cache_flush_flag[i]) {
1443 				DELAY(1000);
1444 			}
1445 		}
1446 	}
1447 }
1448 
1449 static int
1450 drmach_disconnect_cpus(drmach_board_t *bp)
1451 {
1452 	int i, bnum;
1453 
1454 	bnum = bp->bnum;
1455 
1456 	for (i = 0; i < OPL_MAX_COREID_PER_BOARD; i++) {
1457 		if (bp->cores[i].core_present) {
1458 			if (bp->cores[i].core_started)
1459 				return (-1);
1460 			if (bp->cores[i].core_hotadded) {
1461 				if (drmach_add_remove_cpu(bnum, i,
1462 				    HOTREMOVE_CPU)) {
1463 					cmn_err(CE_WARN, "Failed to remove "
1464 					    "CMP %d on board %d\n", i, bnum);
1465 					return (-1);
1466 				}
1467 			}
1468 		}
1469 	}
1470 	return (0);
1471 }
1472 
1473 /*ARGSUSED*/
1474 sbd_error_t *
1475 drmach_board_disconnect(drmachid_t id, drmach_opts_t *opts)
1476 {
1477 	drmach_board_t *obj;
1478 	int rv = 0;
1479 	sbd_error_t		*err = NULL;
1480 
1481 	if (DRMACH_NULL_ID(id))
1482 		return (NULL);
1483 
1484 	if (!DRMACH_IS_BOARD_ID(id))
1485 		return (drerr_new(0, EOPL_INAPPROP, NULL));
1486 
1487 	obj = (drmach_board_t *)id;
1488 
1489 	if (drmach_disconnect_cpus(obj)) {
1490 		err = drerr_new(1, EOPL_DEPROBE, obj->cm.name);
1491 		return (err);
1492 	}
1493 
1494 	rv = opl_unprobe_sb(obj->bnum);
1495 
1496 	if (rv == 0) {
1497 		(void) prom_detach_notice(obj->bnum);
1498 		obj->connected = 0;
1499 
1500 	} else
1501 		err = drerr_new(1, EOPL_DEPROBE, obj->cm.name);
1502 
1503 	return (err);
1504 }
1505 
1506 static int
1507 drmach_get_portid(drmach_node_t *np)
1508 {
1509 	int		portid;
1510 	char		type[OBP_MAXPROPNAME];
1511 
1512 	if (np->n_getprop(np, "portid", &portid, sizeof (portid)) == 0)
1513 		return (portid);
1514 
1515 	/*
1516 	 * Get the device_type property to see if we should
1517 	 * continue processing this node.
1518 	 */
1519 	if (np->n_getprop(np, "device_type", &type, sizeof (type)) != 0)
1520 		return (-1);
1521 
1522 	if (strcmp(type, OPL_CPU_NODE) == 0) {
1523 		/*
1524 		 * We return cpuid because it has no portid
1525 		 */
1526 		if (np->n_getprop(np, "cpuid", &portid, sizeof (portid)) == 0)
1527 			return (portid);
1528 	}
1529 
1530 	return (-1);
1531 }
1532 
1533 /*
1534  * This is a helper function to determine if a given
1535  * node should be considered for a dr operation according
1536  * to predefined dr type nodes and the node's name.
1537  * Formal Parameter : The name of a device node.
1538  * Return Value: -1, name does not map to a valid dr type.
1539  *		 A value greater or equal to 0, name is a valid dr type.
1540  */
1541 static int
1542 drmach_name2type_idx(char *name)
1543 {
1544 	int 	index, ntypes;
1545 
1546 	if (name == NULL)
1547 		return (-1);
1548 
1549 	/*
1550 	 * Determine how many possible types are currently supported
1551 	 * for dr.
1552 	 */
1553 	ntypes = sizeof (drmach_name2type) / sizeof (drmach_name2type[0]);
1554 
1555 	/* Determine if the node's name correspond to a predefined type. */
1556 	for (index = 0; index < ntypes; index++) {
1557 		if (strcmp(drmach_name2type[index].name, name) == 0)
1558 			/* The node is an allowed type for dr. */
1559 			return (index);
1560 	}
1561 
1562 	/*
1563 	 * If the name of the node does not map to any of the
1564 	 * types in the array drmach_name2type then the node is not of
1565 	 * interest to dr.
1566 	 */
1567 	return (-1);
1568 }
1569 
1570 /*
1571  * there is some complication on OPL:
1572  * - pseudo-mc nodes do not have portid property
1573  * - portid[9:5] of cmp node is LSB #, portid[7:3] of pci is LSB#
1574  * - cmp has board#
1575  * - core and cpu nodes do not have portid and board# properties
1576  * starcat uses portid to derive the board# but that does not work
1577  * for us.  starfire reads board# property to filter the devices.
1578  * That does not work either.  So for these specific device,
1579  * we use specific hard coded methods to get the board# -
1580  * cpu: LSB# = CPUID[9:5]
1581  */
1582 
1583 static int
1584 drmach_board_find_devices_cb(drmach_node_walk_args_t *args)
1585 {
1586 	drmach_node_t			*node = args->node;
1587 	drmach_board_cb_data_t		*data = args->data;
1588 	drmach_board_t			*obj = data->obj;
1589 
1590 	int		rv, portid;
1591 	int		bnum;
1592 	drmachid_t	id;
1593 	drmach_device_t	*device;
1594 	char name[OBP_MAXDRVNAME];
1595 
1596 	portid = drmach_get_portid(node);
1597 	/*
1598 	 * core, cpu and pseudo-mc do not have portid
1599 	 * we use cpuid as the portid of the cpu node
1600 	 * for pseudo-mc, we do not use portid info.
1601 	 */
1602 
1603 	rv = node->n_getprop(node, "name", name, OBP_MAXDRVNAME);
1604 	if (rv)
1605 		return (0);
1606 
1607 
1608 	rv = node->n_getprop(node, OBP_BOARDNUM, &bnum, sizeof (bnum));
1609 
1610 	if (rv) {
1611 		/*
1612 		 * cpu does not have board# property.  We use
1613 		 * CPUID[9:5]
1614 		 */
1615 		if (strcmp("cpu", name) == 0) {
1616 			bnum = (portid >> 5) & 0x1f;
1617 		} else
1618 			return (0);
1619 	}
1620 
1621 
1622 	if (bnum != obj->bnum)
1623 		return (0);
1624 
1625 	if (drmach_name2type_idx(name) < 0) {
1626 		return (0);
1627 	}
1628 
1629 	/*
1630 	 * Create a device data structure from this node data.
1631 	 * The call may yield nothing if the node is not of interest
1632 	 * to drmach.
1633 	 */
1634 	data->err = drmach_device_new(node, obj, portid, &id);
1635 	if (data->err)
1636 		return (-1);
1637 	else if (!id) {
1638 		/*
1639 		 * drmach_device_new examined the node we passed in
1640 		 * and determined that it was one not of interest to
1641 		 * drmach.  So, it is skipped.
1642 		 */
1643 		return (0);
1644 	}
1645 
1646 	rv = drmach_array_set(obj->devices, data->ndevs++, id);
1647 	if (rv) {
1648 		data->err = DRMACH_INTERNAL_ERROR();
1649 		return (-1);
1650 	}
1651 	device = id;
1652 
1653 	data->err = (*data->found)(data->a, device->type, device->unum, id);
1654 	return (data->err == NULL ? 0 : -1);
1655 }
1656 
1657 sbd_error_t *
1658 drmach_board_find_devices(drmachid_t id, void *a,
1659 	sbd_error_t *(*found)(void *a, const char *, int, drmachid_t))
1660 {
1661 	drmach_board_t		*bp = (drmach_board_t *)id;
1662 	sbd_error_t		*err;
1663 	int			 max_devices;
1664 	int			 rv;
1665 	drmach_board_cb_data_t	data;
1666 
1667 
1668 	if (!DRMACH_IS_BOARD_ID(id))
1669 		return (drerr_new(0, EOPL_INAPPROP, NULL));
1670 
1671 	max_devices  = MAX_CPU_UNITS_PER_BOARD;
1672 	max_devices += MAX_MEM_UNITS_PER_BOARD;
1673 	max_devices += MAX_IO_UNITS_PER_BOARD;
1674 
1675 	bp->devices = drmach_array_new(0, max_devices);
1676 
1677 	if (bp->tree == NULL)
1678 		bp->tree = drmach_node_new();
1679 
1680 	data.obj = bp;
1681 	data.ndevs = 0;
1682 	data.found = found;
1683 	data.a = a;
1684 	data.err = NULL;
1685 
1686 	rv = drmach_node_walk(bp->tree, &data, drmach_board_find_devices_cb);
1687 	if (rv == 0)
1688 		err = NULL;
1689 	else {
1690 		drmach_array_dispose(bp->devices, drmach_device_dispose);
1691 		bp->devices = NULL;
1692 
1693 		if (data.err)
1694 			err = data.err;
1695 		else
1696 			err = DRMACH_INTERNAL_ERROR();
1697 	}
1698 
1699 	return (err);
1700 }
1701 
1702 int
1703 drmach_board_lookup(int bnum, drmachid_t *id)
1704 {
1705 	int	rv = 0;
1706 
1707 	rw_enter(&drmach_boards_rwlock, RW_READER);
1708 	if (drmach_array_get(drmach_boards, bnum, id)) {
1709 		*id = 0;
1710 		rv = -1;
1711 	}
1712 	rw_exit(&drmach_boards_rwlock);
1713 	return (rv);
1714 }
1715 
1716 sbd_error_t *
1717 drmach_board_name(int bnum, char *buf, int buflen)
1718 {
1719 	(void) snprintf(buf, buflen, "SB%d", bnum);
1720 	return (NULL);
1721 }
1722 
1723 sbd_error_t *
1724 drmach_board_poweroff(drmachid_t id)
1725 {
1726 	drmach_board_t	*bp;
1727 	sbd_error_t	*err;
1728 	drmach_status_t	 stat;
1729 
1730 	if (DRMACH_NULL_ID(id))
1731 		return (NULL);
1732 
1733 	if (!DRMACH_IS_BOARD_ID(id))
1734 		return (drerr_new(0, EOPL_INAPPROP, NULL));
1735 	bp = id;
1736 
1737 	err = drmach_board_status(id, &stat);
1738 
1739 	if (!err) {
1740 		if (stat.configured || stat.busy)
1741 			err = drerr_new(0, EOPL_CONFIGBUSY, bp->cm.name);
1742 		else {
1743 			bp->powered = 0;
1744 		}
1745 	}
1746 	return (err);
1747 }
1748 
1749 sbd_error_t *
1750 drmach_board_poweron(drmachid_t id)
1751 {
1752 	drmach_board_t	*bp;
1753 
1754 	if (!DRMACH_IS_BOARD_ID(id))
1755 		return (drerr_new(0, EOPL_INAPPROP, NULL));
1756 	bp = id;
1757 
1758 	bp->powered = 1;
1759 
1760 	return (NULL);
1761 }
1762 
1763 static sbd_error_t *
1764 drmach_board_release(drmachid_t id)
1765 {
1766 	if (!DRMACH_IS_BOARD_ID(id))
1767 		return (drerr_new(0, EOPL_INAPPROP, NULL));
1768 	return (NULL);
1769 }
1770 
1771 /*ARGSUSED*/
1772 sbd_error_t *
1773 drmach_board_test(drmachid_t id, drmach_opts_t *opts, int force)
1774 {
1775 	return (NULL);
1776 }
1777 
1778 sbd_error_t *
1779 drmach_board_unassign(drmachid_t id)
1780 {
1781 	drmach_board_t	*bp;
1782 	sbd_error_t	*err;
1783 	drmach_status_t	 stat;
1784 
1785 	if (DRMACH_NULL_ID(id))
1786 		return (NULL);
1787 
1788 	if (!DRMACH_IS_BOARD_ID(id)) {
1789 		return (drerr_new(0, EOPL_INAPPROP, NULL));
1790 	}
1791 	bp = id;
1792 
1793 	rw_enter(&drmach_boards_rwlock, RW_WRITER);
1794 
1795 	err = drmach_board_status(id, &stat);
1796 	if (err) {
1797 		rw_exit(&drmach_boards_rwlock);
1798 		return (err);
1799 	}
1800 	if (stat.configured || stat.busy) {
1801 		err = drerr_new(0, EOPL_CONFIGBUSY, bp->cm.name);
1802 	} else {
1803 		if (drmach_array_set(drmach_boards, bp->bnum, 0) != 0)
1804 			err = DRMACH_INTERNAL_ERROR();
1805 		else
1806 			drmach_board_dispose(bp);
1807 	}
1808 	rw_exit(&drmach_boards_rwlock);
1809 	return (err);
1810 }
1811 
1812 /*
1813  * We have to do more on OPL - e.g. set up sram tte, read cpuid, strand id,
1814  * implementation #, etc
1815  */
1816 
1817 static sbd_error_t *
1818 drmach_cpu_new(drmach_device_t *proto, drmachid_t *idp)
1819 {
1820 	int		 portid;
1821 	drmach_cpu_t	*cp = NULL;
1822 
1823 	/* portid is CPUID of the node */
1824 	portid = proto->portid;
1825 	ASSERT(portid != -1);
1826 
1827 	/* unum = (CMP/CHIP ID) + (ON_BOARD_CORE_NUM * MAX_CMPID_PER_BOARD) */
1828 	proto->unum = ((portid/OPL_MAX_CPUID_PER_CMP) &
1829 	    (OPL_MAX_CMPID_PER_BOARD - 1)) +
1830 	    ((portid & (OPL_MAX_CPUID_PER_CMP - 1)) *
1831 	    (OPL_MAX_CMPID_PER_BOARD));
1832 
1833 	cp = kmem_zalloc(sizeof (drmach_cpu_t), KM_SLEEP);
1834 	bcopy(proto, &cp->dev, sizeof (cp->dev));
1835 	cp->dev.node = drmach_node_dup(proto->node);
1836 	cp->dev.cm.isa = (void *)drmach_cpu_new;
1837 	cp->dev.cm.dispose = drmach_cpu_dispose;
1838 	cp->dev.cm.release = drmach_cpu_release;
1839 	cp->dev.cm.status = drmach_cpu_status;
1840 
1841 	(void) snprintf(cp->dev.cm.name, sizeof (cp->dev.cm.name), "%s%d",
1842 	    cp->dev.type, cp->dev.unum);
1843 
1844 /*
1845  *	CPU ID representation
1846  *	CPUID[9:5] = SB#
1847  *	CPUID[4:3] = Chip#
1848  *	CPUID[2:1] = Core# (Only 2 core for OPL)
1849  *	CPUID[0:0] = Strand#
1850  */
1851 
1852 /*
1853  *	reg property of the strand contains strand ID
1854  *	reg property of the parent node contains core ID
1855  *	We should use them.
1856  */
1857 	cp->cpuid = portid;
1858 	cp->sb = (portid >> 5) & 0x1f;
1859 	cp->chipid = (portid >> 3) & 0x3;
1860 	cp->coreid = (portid >> 1) & 0x3;
1861 	cp->strandid = portid & 0x1;
1862 
1863 	*idp = (drmachid_t)cp;
1864 	return (NULL);
1865 }
1866 
1867 
1868 static void
1869 drmach_cpu_dispose(drmachid_t id)
1870 {
1871 	drmach_cpu_t	*self;
1872 
1873 	ASSERT(DRMACH_IS_CPU_ID(id));
1874 
1875 	self = id;
1876 	if (self->dev.node)
1877 		drmach_node_dispose(self->dev.node);
1878 
1879 	kmem_free(self, sizeof (*self));
1880 }
1881 
1882 static int
1883 drmach_cpu_start(struct cpu *cp)
1884 {
1885 	int		cpuid = cp->cpu_id;
1886 	extern int	restart_other_cpu(int);
1887 
1888 	ASSERT(MUTEX_HELD(&cpu_lock));
1889 	ASSERT(cpunodes[cpuid].nodeid != (pnode_t)0);
1890 
1891 	cp->cpu_flags &= ~CPU_POWEROFF;
1892 
1893 	/*
1894 	 * NOTE: restart_other_cpu pauses cpus during the
1895 	 *	 slave cpu start.  This helps to quiesce the
1896 	 *	 bus traffic a bit which makes the tick sync
1897 	 *	 routine in the prom more robust.
1898 	 */
1899 	DRMACH_PR("COLD START for cpu (%d)\n", cpuid);
1900 
1901 	(void) restart_other_cpu(cpuid);
1902 
1903 	return (0);
1904 }
1905 
1906 static sbd_error_t *
1907 drmach_cpu_release(drmachid_t id)
1908 {
1909 	if (!DRMACH_IS_CPU_ID(id))
1910 		return (drerr_new(0, EOPL_INAPPROP, NULL));
1911 
1912 	return (NULL);
1913 }
1914 
1915 static sbd_error_t *
1916 drmach_cpu_status(drmachid_t id, drmach_status_t *stat)
1917 {
1918 	drmach_cpu_t *cp;
1919 	drmach_device_t *dp;
1920 
1921 	ASSERT(DRMACH_IS_CPU_ID(id));
1922 	cp = (drmach_cpu_t *)id;
1923 	dp = &cp->dev;
1924 
1925 	stat->assigned = dp->bp->assigned;
1926 	stat->powered = dp->bp->powered;
1927 	mutex_enter(&cpu_lock);
1928 	stat->configured = (cpu_get(cp->cpuid) != NULL);
1929 	mutex_exit(&cpu_lock);
1930 	stat->busy = dp->busy;
1931 	(void) strncpy(stat->type, dp->type, sizeof (stat->type));
1932 	stat->info[0] = '\0';
1933 
1934 	return (NULL);
1935 }
1936 
1937 sbd_error_t *
1938 drmach_cpu_disconnect(drmachid_t id)
1939 {
1940 
1941 	if (!DRMACH_IS_CPU_ID(id))
1942 		return (drerr_new(0, EOPL_INAPPROP, NULL));
1943 
1944 	return (NULL);
1945 }
1946 
1947 sbd_error_t *
1948 drmach_cpu_get_id(drmachid_t id, processorid_t *cpuid)
1949 {
1950 	drmach_cpu_t *cpu;
1951 
1952 	if (!DRMACH_IS_CPU_ID(id))
1953 		return (drerr_new(0, EOPL_INAPPROP, NULL));
1954 	cpu = (drmach_cpu_t *)id;
1955 
1956 	/* get from cpu directly on OPL */
1957 	*cpuid = cpu->cpuid;
1958 	return (NULL);
1959 }
1960 
1961 sbd_error_t *
1962 drmach_cpu_get_impl(drmachid_t id, int *ip)
1963 {
1964 	drmach_device_t *cpu;
1965 	drmach_node_t	*np;
1966 	drmach_node_t	pp;
1967 	int		impl;
1968 	char		type[OBP_MAXPROPNAME];
1969 
1970 	if (!DRMACH_IS_CPU_ID(id))
1971 		return (drerr_new(0, EOPL_INAPPROP, NULL));
1972 
1973 	cpu = id;
1974 	np = cpu->node;
1975 
1976 	if (np->get_parent(np, &pp) != 0) {
1977 		return (DRMACH_INTERNAL_ERROR());
1978 	}
1979 
1980 	/* the parent should be core */
1981 
1982 	if (pp.n_getprop(&pp, "device_type", &type, sizeof (type)) != 0) {
1983 		return (drerr_new(0, EOPL_GETPROP, NULL));
1984 	}
1985 
1986 	if (strcmp(type, OPL_CORE_NODE) == 0) {
1987 		if (pp.n_getprop(&pp, "implementation#", &impl,
1988 		    sizeof (impl)) != 0) {
1989 			return (drerr_new(0, EOPL_GETPROP, NULL));
1990 		}
1991 	} else {
1992 		return (DRMACH_INTERNAL_ERROR());
1993 	}
1994 
1995 	*ip = impl;
1996 
1997 	return (NULL);
1998 }
1999 
2000 sbd_error_t *
2001 drmach_get_dip(drmachid_t id, dev_info_t **dip)
2002 {
2003 	drmach_device_t	*dp;
2004 
2005 	if (!DRMACH_IS_DEVICE_ID(id))
2006 		return (drerr_new(0, EOPL_INAPPROP, NULL));
2007 	dp = id;
2008 
2009 	*dip = dp->node->n_getdip(dp->node);
2010 	return (NULL);
2011 }
2012 
2013 sbd_error_t *
2014 drmach_io_is_attached(drmachid_t id, int *yes)
2015 {
2016 	drmach_device_t *dp;
2017 	dev_info_t	*dip;
2018 	int		state;
2019 
2020 	if (!DRMACH_IS_IO_ID(id))
2021 		return (drerr_new(0, EOPL_INAPPROP, NULL));
2022 	dp = id;
2023 
2024 	dip = dp->node->n_getdip(dp->node);
2025 	if (dip == NULL) {
2026 		*yes = 0;
2027 		return (NULL);
2028 	}
2029 
2030 	state = ddi_get_devstate(dip);
2031 	*yes = ((i_ddi_node_state(dip) >= DS_ATTACHED) ||
2032 	    (state == DDI_DEVSTATE_UP));
2033 
2034 	return (NULL);
2035 }
2036 
2037 struct drmach_io_cb {
2038 	char	*name;	/* name of the node */
2039 	int	(*func)(dev_info_t *);
2040 	int	rv;
2041 	dev_info_t *dip;
2042 };
2043 
2044 #define	DRMACH_IO_POST_ATTACH	0
2045 #define	DRMACH_IO_PRE_RELEASE	1
2046 
2047 static int
2048 drmach_io_cb_check(dev_info_t *dip, void *arg)
2049 {
2050 	struct drmach_io_cb *p = (struct drmach_io_cb *)arg;
2051 	char name[OBP_MAXDRVNAME];
2052 	int len = OBP_MAXDRVNAME;
2053 
2054 	if (ddi_getlongprop_buf(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, "name",
2055 	    (caddr_t)name, &len) != DDI_PROP_SUCCESS) {
2056 		return (DDI_WALK_PRUNECHILD);
2057 	}
2058 
2059 	if (strcmp(name, p->name) == 0) {
2060 		ndi_hold_devi(dip);
2061 		p->dip = dip;
2062 		return (DDI_WALK_TERMINATE);
2063 	}
2064 
2065 	return (DDI_WALK_CONTINUE);
2066 }
2067 
2068 
2069 static int
2070 drmach_console_ops(drmachid_t *id, int state)
2071 {
2072 	drmach_io_t *obj = (drmach_io_t *)id;
2073 	struct drmach_io_cb arg;
2074 	int (*msudetp)(dev_info_t *);
2075 	int (*msuattp)(dev_info_t *);
2076 	dev_info_t *dip, *pdip;
2077 	int circ;
2078 
2079 	/* 4 is pcicmu channel */
2080 	if (obj->channel != 4)
2081 		return (0);
2082 
2083 	arg.name = "serial";
2084 	arg.func = NULL;
2085 	if (state == DRMACH_IO_PRE_RELEASE) {
2086 		msudetp = (int (*)(dev_info_t *))
2087 		    modgetsymvalue("oplmsu_dr_detach", 0);
2088 		if (msudetp != NULL)
2089 			arg.func = msudetp;
2090 	} else if (state == DRMACH_IO_POST_ATTACH) {
2091 		msuattp = (int (*)(dev_info_t *))
2092 		    modgetsymvalue("oplmsu_dr_attach", 0);
2093 		if (msuattp != NULL)
2094 			arg.func = msuattp;
2095 	} else {
2096 		return (0);
2097 	}
2098 
2099 	if (arg.func == NULL) {
2100 		return (0);
2101 	}
2102 
2103 	arg.rv = 0;
2104 	arg.dip = NULL;
2105 
2106 	dip = obj->dev.node->n_getdip(obj->dev.node);
2107 	if (pdip = ddi_get_parent(dip)) {
2108 		ndi_hold_devi(pdip);
2109 		ndi_devi_enter(pdip, &circ);
2110 	} else {
2111 		/* this cannot happen unless something bad happens */
2112 		return (-1);
2113 	}
2114 
2115 	ddi_walk_devs(dip, drmach_io_cb_check, (void *)&arg);
2116 
2117 	ndi_devi_exit(pdip, circ);
2118 	ndi_rele_devi(pdip);
2119 
2120 	if (arg.dip) {
2121 		arg.rv = (*arg.func)(arg.dip);
2122 		ndi_rele_devi(arg.dip);
2123 	} else {
2124 		arg.rv = -1;
2125 	}
2126 
2127 	return (arg.rv);
2128 }
2129 
2130 sbd_error_t *
2131 drmach_io_pre_release(drmachid_t id)
2132 {
2133 	int rv;
2134 
2135 	if (!DRMACH_IS_IO_ID(id))
2136 		return (drerr_new(0, EOPL_INAPPROP, NULL));
2137 
2138 	rv = drmach_console_ops(id, DRMACH_IO_PRE_RELEASE);
2139 
2140 	if (rv != 0)
2141 		cmn_err(CE_WARN, "IO callback failed in pre-release\n");
2142 
2143 	return (NULL);
2144 }
2145 
2146 static sbd_error_t *
2147 drmach_io_release(drmachid_t id)
2148 {
2149 	if (!DRMACH_IS_IO_ID(id))
2150 		return (drerr_new(0, EOPL_INAPPROP, NULL));
2151 	return (NULL);
2152 }
2153 
2154 sbd_error_t *
2155 drmach_io_unrelease(drmachid_t id)
2156 {
2157 	if (!DRMACH_IS_IO_ID(id))
2158 		return (drerr_new(0, EOPL_INAPPROP, NULL));
2159 	return (NULL);
2160 }
2161 
2162 /*ARGSUSED*/
2163 sbd_error_t *
2164 drmach_io_post_release(drmachid_t id)
2165 {
2166 	return (NULL);
2167 }
2168 
2169 /*ARGSUSED*/
2170 sbd_error_t *
2171 drmach_io_post_attach(drmachid_t id)
2172 {
2173 	int rv;
2174 
2175 	if (!DRMACH_IS_IO_ID(id))
2176 		return (drerr_new(0, EOPL_INAPPROP, NULL));
2177 
2178 	rv = drmach_console_ops(id, DRMACH_IO_POST_ATTACH);
2179 
2180 	if (rv != 0)
2181 		cmn_err(CE_WARN, "IO callback failed in post-attach\n");
2182 
2183 	return (0);
2184 }
2185 
2186 static sbd_error_t *
2187 drmach_io_status(drmachid_t id, drmach_status_t *stat)
2188 {
2189 	drmach_device_t *dp;
2190 	sbd_error_t	*err;
2191 	int		 configured;
2192 
2193 	ASSERT(DRMACH_IS_IO_ID(id));
2194 	dp = id;
2195 
2196 	err = drmach_io_is_attached(id, &configured);
2197 	if (err)
2198 		return (err);
2199 
2200 	stat->assigned = dp->bp->assigned;
2201 	stat->powered = dp->bp->powered;
2202 	stat->configured = (configured != 0);
2203 	stat->busy = dp->busy;
2204 	(void) strncpy(stat->type, dp->type, sizeof (stat->type));
2205 	stat->info[0] = '\0';
2206 
2207 	return (NULL);
2208 }
2209 
2210 static sbd_error_t *
2211 drmach_mem_new(drmach_device_t *proto, drmachid_t *idp)
2212 {
2213 	dev_info_t *dip;
2214 	int rv;
2215 
2216 	drmach_mem_t	*mp;
2217 
2218 	rv = 0;
2219 
2220 	if ((proto->node->n_getproplen(proto->node, "mc-addr", &rv) < 0) ||
2221 	    (rv <= 0)) {
2222 		*idp = (drmachid_t)0;
2223 		return (NULL);
2224 	}
2225 
2226 	mp = kmem_zalloc(sizeof (drmach_mem_t), KM_SLEEP);
2227 	proto->unum = 0;
2228 
2229 	bcopy(proto, &mp->dev, sizeof (mp->dev));
2230 	mp->dev.node = drmach_node_dup(proto->node);
2231 	mp->dev.cm.isa = (void *)drmach_mem_new;
2232 	mp->dev.cm.dispose = drmach_mem_dispose;
2233 	mp->dev.cm.release = drmach_mem_release;
2234 	mp->dev.cm.status = drmach_mem_status;
2235 
2236 	(void) snprintf(mp->dev.cm.name, sizeof (mp->dev.cm.name), "%s",
2237 	    mp->dev.type);
2238 
2239 	dip = mp->dev.node->n_getdip(mp->dev.node);
2240 	if (drmach_setup_mc_info(dip, mp) != 0) {
2241 		return (drerr_new(1, EOPL_MC_SETUP, NULL));
2242 	}
2243 
2244 	/* make sure we do not create memoryless nodes */
2245 	if (mp->nbytes == 0) {
2246 		*idp = (drmachid_t)NULL;
2247 		kmem_free(mp, sizeof (drmach_mem_t));
2248 	} else
2249 		*idp = (drmachid_t)mp;
2250 
2251 	return (NULL);
2252 }
2253 
2254 static void
2255 drmach_mem_dispose(drmachid_t id)
2256 {
2257 	drmach_mem_t *mp;
2258 
2259 	ASSERT(DRMACH_IS_MEM_ID(id));
2260 
2261 
2262 	mp = id;
2263 
2264 	if (mp->dev.node)
2265 		drmach_node_dispose(mp->dev.node);
2266 
2267 	if (mp->memlist) {
2268 		memlist_delete(mp->memlist);
2269 		mp->memlist = NULL;
2270 	}
2271 
2272 	kmem_free(mp, sizeof (*mp));
2273 }
2274 
2275 sbd_error_t *
2276 drmach_mem_add_span(drmachid_t id, uint64_t basepa, uint64_t size)
2277 {
2278 	pfn_t		basepfn = (pfn_t)(basepa >> PAGESHIFT);
2279 	pgcnt_t		npages = (pgcnt_t)(size >> PAGESHIFT);
2280 	int		rv;
2281 
2282 	ASSERT(size != 0);
2283 
2284 	if (!DRMACH_IS_MEM_ID(id))
2285 		return (drerr_new(0, EOPL_INAPPROP, NULL));
2286 
2287 	rv = kcage_range_add(basepfn, npages, KCAGE_DOWN);
2288 	if (rv == ENOMEM) {
2289 		cmn_err(CE_WARN, "%lu megabytes not available to kernel cage",
2290 		    (ulong_t)(size == 0 ? 0 : size / MBYTE));
2291 	} else if (rv != 0) {
2292 		/* catch this in debug kernels */
2293 		ASSERT(0);
2294 
2295 		cmn_err(CE_WARN, "unexpected kcage_range_add return value %d",
2296 		    rv);
2297 	}
2298 
2299 	if (rv) {
2300 		return (DRMACH_INTERNAL_ERROR());
2301 	}
2302 	else
2303 		return (NULL);
2304 }
2305 
2306 sbd_error_t *
2307 drmach_mem_del_span(drmachid_t id, uint64_t basepa, uint64_t size)
2308 {
2309 	pfn_t		basepfn = (pfn_t)(basepa >> PAGESHIFT);
2310 	pgcnt_t		npages = (pgcnt_t)(size >> PAGESHIFT);
2311 	int		rv;
2312 
2313 	if (!DRMACH_IS_MEM_ID(id))
2314 		return (drerr_new(0, EOPL_INAPPROP, NULL));
2315 
2316 	if (size > 0) {
2317 		rv = kcage_range_delete_post_mem_del(basepfn, npages);
2318 		if (rv != 0) {
2319 			cmn_err(CE_WARN,
2320 			    "unexpected kcage_range_delete_post_mem_del"
2321 			    " return value %d", rv);
2322 			return (DRMACH_INTERNAL_ERROR());
2323 		}
2324 	}
2325 
2326 	return (NULL);
2327 }
2328 
2329 sbd_error_t *
2330 drmach_mem_disable(drmachid_t id)
2331 {
2332 	if (!DRMACH_IS_MEM_ID(id))
2333 		return (drerr_new(0, EOPL_INAPPROP, NULL));
2334 	else {
2335 		drmach_flush_all();
2336 		return (NULL);
2337 	}
2338 }
2339 
2340 sbd_error_t *
2341 drmach_mem_enable(drmachid_t id)
2342 {
2343 	if (!DRMACH_IS_MEM_ID(id))
2344 		return (drerr_new(0, EOPL_INAPPROP, NULL));
2345 	else
2346 		return (NULL);
2347 }
2348 
2349 sbd_error_t *
2350 drmach_mem_get_info(drmachid_t id, drmach_mem_info_t *mem)
2351 {
2352 	drmach_mem_t *mp;
2353 
2354 	if (!DRMACH_IS_MEM_ID(id))
2355 		return (drerr_new(0, EOPL_INAPPROP, NULL));
2356 
2357 	mp = (drmach_mem_t *)id;
2358 
2359 	/*
2360 	 * This is only used by dr to round up/down the memory
2361 	 * for copying. Our unit of memory isolation is 64 MB.
2362 	 */
2363 
2364 	mem->mi_alignment_mask = (64 * 1024 * 1024 - 1);
2365 	mem->mi_basepa = mp->base_pa;
2366 	mem->mi_size = mp->nbytes;
2367 	mem->mi_slice_size = mp->slice_size;
2368 
2369 	return (NULL);
2370 }
2371 
2372 sbd_error_t *
2373 drmach_mem_get_base_physaddr(drmachid_t id, uint64_t *pa)
2374 {
2375 	drmach_mem_t *mp;
2376 
2377 	if (!DRMACH_IS_MEM_ID(id))
2378 		return (drerr_new(0, EOPL_INAPPROP, NULL));
2379 
2380 	mp = (drmach_mem_t *)id;
2381 
2382 	*pa = mp->base_pa;
2383 	return (NULL);
2384 }
2385 
2386 sbd_error_t *
2387 drmach_mem_get_memlist(drmachid_t id, struct memlist **ml)
2388 {
2389 	drmach_mem_t	*mem;
2390 #ifdef	DEBUG
2391 	int		rv;
2392 #endif
2393 	struct memlist	*mlist;
2394 
2395 	if (!DRMACH_IS_MEM_ID(id))
2396 		return (drerr_new(0, EOPL_INAPPROP, NULL));
2397 
2398 	mem = (drmach_mem_t *)id;
2399 	mlist = memlist_dup(mem->memlist);
2400 
2401 #ifdef DEBUG
2402 	/*
2403 	 * Make sure the incoming memlist doesn't already
2404 	 * intersect with what's present in the system (phys_install).
2405 	 */
2406 	memlist_read_lock();
2407 	rv = memlist_intersect(phys_install, mlist);
2408 	memlist_read_unlock();
2409 	if (rv) {
2410 		DRMACH_PR("Derived memlist intersects with phys_install\n");
2411 		memlist_dump(mlist);
2412 
2413 		DRMACH_PR("phys_install memlist:\n");
2414 		memlist_dump(phys_install);
2415 
2416 		memlist_delete(mlist);
2417 		return (DRMACH_INTERNAL_ERROR());
2418 	}
2419 
2420 	DRMACH_PR("Derived memlist:");
2421 	memlist_dump(mlist);
2422 #endif
2423 	*ml = mlist;
2424 
2425 	return (NULL);
2426 }
2427 
2428 sbd_error_t *
2429 drmach_mem_get_slice_size(drmachid_t id, uint64_t *bytes)
2430 {
2431 	drmach_mem_t	*mem;
2432 
2433 	if (!DRMACH_IS_MEM_ID(id))
2434 		return (drerr_new(0, EOPL_INAPPROP, NULL));
2435 
2436 	mem = (drmach_mem_t *)id;
2437 
2438 	*bytes = mem->slice_size;
2439 
2440 	return (NULL);
2441 }
2442 
2443 
2444 /* ARGSUSED */
2445 processorid_t
2446 drmach_mem_cpu_affinity(drmachid_t id)
2447 {
2448 	return (CPU_CURRENT);
2449 }
2450 
2451 static sbd_error_t *
2452 drmach_mem_release(drmachid_t id)
2453 {
2454 	if (!DRMACH_IS_MEM_ID(id))
2455 		return (drerr_new(0, EOPL_INAPPROP, NULL));
2456 	return (NULL);
2457 }
2458 
2459 static sbd_error_t *
2460 drmach_mem_status(drmachid_t id, drmach_status_t *stat)
2461 {
2462 	drmach_mem_t *dp;
2463 	uint64_t	 pa, slice_size;
2464 	struct memlist	*ml;
2465 
2466 	ASSERT(DRMACH_IS_MEM_ID(id));
2467 	dp = id;
2468 
2469 	/* get starting physical address of target memory */
2470 	pa = dp->base_pa;
2471 
2472 	/* round down to slice boundary */
2473 	slice_size = dp->slice_size;
2474 	pa &= ~(slice_size - 1);
2475 
2476 	/* stop at first span that is in slice */
2477 	memlist_read_lock();
2478 	for (ml = phys_install; ml; ml = ml->ml_next)
2479 		if (ml->ml_address >= pa && ml->ml_address < pa + slice_size)
2480 			break;
2481 	memlist_read_unlock();
2482 
2483 	stat->assigned = dp->dev.bp->assigned;
2484 	stat->powered = dp->dev.bp->powered;
2485 	stat->configured = (ml != NULL);
2486 	stat->busy = dp->dev.busy;
2487 	(void) strncpy(stat->type, dp->dev.type, sizeof (stat->type));
2488 	stat->info[0] = '\0';
2489 
2490 	return (NULL);
2491 }
2492 
2493 
2494 sbd_error_t *
2495 drmach_board_deprobe(drmachid_t id)
2496 {
2497 	drmach_board_t	*bp;
2498 
2499 	if (!DRMACH_IS_BOARD_ID(id))
2500 		return (drerr_new(0, EOPL_INAPPROP, NULL));
2501 
2502 	bp = id;
2503 
2504 	cmn_err(CE_CONT, "DR: detach board %d\n", bp->bnum);
2505 
2506 	if (bp->tree) {
2507 		drmach_node_dispose(bp->tree);
2508 		bp->tree = NULL;
2509 	}
2510 	if (bp->devices) {
2511 		drmach_array_dispose(bp->devices, drmach_device_dispose);
2512 		bp->devices = NULL;
2513 	}
2514 
2515 	bp->boot_board = 0;
2516 
2517 	return (NULL);
2518 }
2519 
2520 /*ARGSUSED*/
2521 static sbd_error_t *
2522 drmach_pt_ikprobe(drmachid_t id, drmach_opts_t *opts)
2523 {
2524 	drmach_board_t		*bp = (drmach_board_t *)id;
2525 	sbd_error_t		*err = NULL;
2526 	int	rv;
2527 	unsigned cpu_impl;
2528 
2529 	if (!DRMACH_IS_BOARD_ID(id))
2530 		return (drerr_new(0, EOPL_INAPPROP, NULL));
2531 
2532 	DRMACH_PR("calling opl_probe_board for bnum=%d\n", bp->bnum);
2533 	rv = opl_probe_sb(bp->bnum, &cpu_impl);
2534 	if (rv != 0) {
2535 		err = drerr_new(1, EOPL_PROBE, bp->cm.name);
2536 		return (err);
2537 	}
2538 	return (err);
2539 }
2540 
2541 /*ARGSUSED*/
2542 static sbd_error_t *
2543 drmach_pt_ikdeprobe(drmachid_t id, drmach_opts_t *opts)
2544 {
2545 	drmach_board_t	*bp;
2546 	sbd_error_t	*err = NULL;
2547 	int	rv;
2548 
2549 	if (!DRMACH_IS_BOARD_ID(id))
2550 		return (drerr_new(0, EOPL_INAPPROP, NULL));
2551 	bp = (drmach_board_t *)id;
2552 
2553 	cmn_err(CE_CONT, "DR: in-kernel unprobe board %d\n", bp->bnum);
2554 
2555 	rv = opl_unprobe_sb(bp->bnum);
2556 	if (rv != 0) {
2557 		err = drerr_new(1, EOPL_DEPROBE, bp->cm.name);
2558 	}
2559 
2560 	return (err);
2561 }
2562 
2563 
2564 /*ARGSUSED*/
2565 sbd_error_t *
2566 drmach_pt_readmem(drmachid_t id, drmach_opts_t *opts)
2567 {
2568 	struct memlist	*ml;
2569 	uint64_t	src_pa;
2570 	uint64_t	dst_pa;
2571 	uint64_t	dst;
2572 
2573 	dst_pa = va_to_pa(&dst);
2574 
2575 	memlist_read_lock();
2576 	for (ml = phys_install; ml; ml = ml->ml_next) {
2577 		uint64_t	nbytes;
2578 
2579 		src_pa = ml->ml_address;
2580 		nbytes = ml->ml_size;
2581 
2582 		while (nbytes != 0ull) {
2583 
2584 			/* copy 32 bytes at arc_pa to dst_pa */
2585 			bcopy32_il(src_pa, dst_pa);
2586 
2587 			/* increment by 32 bytes */
2588 			src_pa += (4 * sizeof (uint64_t));
2589 
2590 			/* decrement by 32 bytes */
2591 			nbytes -= (4 * sizeof (uint64_t));
2592 		}
2593 	}
2594 	memlist_read_unlock();
2595 
2596 	return (NULL);
2597 }
2598 
2599 static struct {
2600 	const char	*name;
2601 	sbd_error_t	*(*handler)(drmachid_t id, drmach_opts_t *opts);
2602 } drmach_pt_arr[] = {
2603 	{ "readmem",		drmach_pt_readmem		},
2604 	{ "ikprobe",	drmach_pt_ikprobe	},
2605 	{ "ikdeprobe",	drmach_pt_ikdeprobe	},
2606 
2607 	/* the following line must always be last */
2608 	{ NULL,			NULL				}
2609 };
2610 
2611 /*ARGSUSED*/
2612 sbd_error_t *
2613 drmach_passthru(drmachid_t id, drmach_opts_t *opts)
2614 {
2615 	int		i;
2616 	sbd_error_t	*err;
2617 
2618 	i = 0;
2619 	while (drmach_pt_arr[i].name != NULL) {
2620 		int len = strlen(drmach_pt_arr[i].name);
2621 
2622 		if (strncmp(drmach_pt_arr[i].name, opts->copts, len) == 0)
2623 			break;
2624 
2625 		i += 1;
2626 	}
2627 
2628 	if (drmach_pt_arr[i].name == NULL)
2629 		err = drerr_new(0, EOPL_UNKPTCMD, opts->copts);
2630 	else
2631 		err = (*drmach_pt_arr[i].handler)(id, opts);
2632 
2633 	return (err);
2634 }
2635 
2636 sbd_error_t *
2637 drmach_release(drmachid_t id)
2638 {
2639 	drmach_common_t *cp;
2640 
2641 	if (!DRMACH_IS_DEVICE_ID(id))
2642 		return (drerr_new(0, EOPL_INAPPROP, NULL));
2643 	cp = id;
2644 
2645 	return (cp->release(id));
2646 }
2647 
2648 sbd_error_t *
2649 drmach_status(drmachid_t id, drmach_status_t *stat)
2650 {
2651 	drmach_common_t *cp;
2652 	sbd_error_t	*err;
2653 
2654 	rw_enter(&drmach_boards_rwlock, RW_READER);
2655 
2656 	if (!DRMACH_IS_ID(id)) {
2657 		rw_exit(&drmach_boards_rwlock);
2658 		return (drerr_new(0, EOPL_NOTID, NULL));
2659 	}
2660 	cp = (drmach_common_t *)id;
2661 	err = cp->status(id, stat);
2662 
2663 	rw_exit(&drmach_boards_rwlock);
2664 
2665 	return (err);
2666 }
2667 
2668 static sbd_error_t *
2669 drmach_i_status(drmachid_t id, drmach_status_t *stat)
2670 {
2671 	drmach_common_t *cp;
2672 
2673 	if (!DRMACH_IS_ID(id))
2674 		return (drerr_new(0, EOPL_NOTID, NULL));
2675 	cp = id;
2676 
2677 	return (cp->status(id, stat));
2678 }
2679 
2680 /*ARGSUSED*/
2681 sbd_error_t *
2682 drmach_unconfigure(drmachid_t id, int flags)
2683 {
2684 	drmach_device_t *dp;
2685 	dev_info_t	*rdip, *fdip = NULL;
2686 	char name[OBP_MAXDRVNAME];
2687 	int rv;
2688 
2689 	if (DRMACH_IS_CPU_ID(id))
2690 		return (NULL);
2691 
2692 	if (!DRMACH_IS_DEVICE_ID(id))
2693 		return (drerr_new(0, EOPL_INAPPROP, NULL));
2694 
2695 	dp = id;
2696 
2697 	rdip = dp->node->n_getdip(dp->node);
2698 
2699 	ASSERT(rdip);
2700 
2701 	rv = dp->node->n_getprop(dp->node, "name", name, OBP_MAXDRVNAME);
2702 
2703 	if (rv)
2704 		return (NULL);
2705 
2706 	/*
2707 	 * Note: FORCE flag is no longer necessary under devfs
2708 	 */
2709 
2710 	ASSERT(e_ddi_branch_held(rdip));
2711 	if (e_ddi_branch_unconfigure(rdip, &fdip, 0)) {
2712 		sbd_error_t	*err;
2713 		char		*path = kmem_alloc(MAXPATHLEN, KM_SLEEP);
2714 
2715 		/*
2716 		 * If non-NULL, fdip is returned held and must be released.
2717 		 */
2718 		if (fdip != NULL) {
2719 			(void) ddi_pathname(fdip, path);
2720 			ndi_rele_devi(fdip);
2721 		} else {
2722 			(void) ddi_pathname(rdip, path);
2723 		}
2724 
2725 		err = drerr_new(1, EOPL_DRVFAIL, path);
2726 
2727 		kmem_free(path, MAXPATHLEN);
2728 
2729 		return (err);
2730 	}
2731 
2732 	return (NULL);
2733 }
2734 
2735 
2736 int
2737 drmach_cpu_poweron(struct cpu *cp)
2738 {
2739 	int bnum, cpuid, onb_core_num, strand_id;
2740 	drmach_board_t *bp;
2741 
2742 	DRMACH_PR("drmach_cpu_poweron: starting cpuid %d\n", cp->cpu_id);
2743 
2744 	cpuid = cp->cpu_id;
2745 	bnum = LSB_ID(cpuid);
2746 	onb_core_num = ON_BOARD_CORE_NUM(cpuid);
2747 	strand_id = STRAND_ID(cpuid);
2748 	bp = drmach_get_board_by_bnum(bnum);
2749 
2750 	ASSERT(bp);
2751 	if (bp->cores[onb_core_num].core_hotadded == 0) {
2752 		if (drmach_add_remove_cpu(bnum, onb_core_num,
2753 		    HOTADD_CPU) != 0) {
2754 			cmn_err(CE_WARN, "Failed to add CMP %d on board %d\n",
2755 			    onb_core_num, bnum);
2756 			return (EIO);
2757 		}
2758 	}
2759 
2760 	ASSERT(MUTEX_HELD(&cpu_lock));
2761 
2762 	if (drmach_cpu_start(cp) != 0) {
2763 		if (bp->cores[onb_core_num].core_started == 0) {
2764 			/*
2765 			 * we must undo the hotadd or no one will do that
2766 			 * If this fails, we will do this again in
2767 			 * drmach_board_disconnect.
2768 			 */
2769 			if (drmach_add_remove_cpu(bnum, onb_core_num,
2770 			    HOTREMOVE_CPU) != 0) {
2771 				cmn_err(CE_WARN, "Failed to remove CMP %d "
2772 				    "on board %d\n", onb_core_num, bnum);
2773 			}
2774 		}
2775 		return (EBUSY);
2776 	} else {
2777 		bp->cores[onb_core_num].core_started |= (1 << strand_id);
2778 		return (0);
2779 	}
2780 }
2781 
2782 int
2783 drmach_cpu_poweroff(struct cpu *cp)
2784 {
2785 	int 		rv = 0;
2786 	processorid_t	cpuid = cp->cpu_id;
2787 
2788 	DRMACH_PR("drmach_cpu_poweroff: stopping cpuid %d\n", cp->cpu_id);
2789 
2790 	ASSERT(MUTEX_HELD(&cpu_lock));
2791 
2792 	/*
2793 	 * Capture all CPUs (except for detaching proc) to prevent
2794 	 * crosscalls to the detaching proc until it has cleared its
2795 	 * bit in cpu_ready_set.
2796 	 *
2797 	 * The CPU's remain paused and the prom_mutex is known to be free.
2798 	 * This prevents the x-trap victim from blocking when doing prom
2799 	 * IEEE-1275 calls at a high PIL level.
2800 	 */
2801 
2802 	promsafe_pause_cpus();
2803 
2804 	/*
2805 	 * Quiesce interrupts on the target CPU. We do this by setting
2806 	 * the CPU 'not ready'- (i.e. removing the CPU from cpu_ready_set) to
2807 	 * prevent it from receiving cross calls and cross traps.
2808 	 * This prevents the processor from receiving any new soft interrupts.
2809 	 */
2810 	mp_cpu_quiesce(cp);
2811 
2812 	rv = prom_stopcpu_bycpuid(cpuid);
2813 	if (rv == 0)
2814 		cp->cpu_flags = CPU_OFFLINE | CPU_QUIESCED | CPU_POWEROFF;
2815 
2816 	start_cpus();
2817 
2818 	if (rv == 0) {
2819 		int bnum, onb_core_num, strand_id;
2820 		drmach_board_t *bp;
2821 
2822 		CPU_SIGNATURE(OS_SIG, SIGST_DETACHED, SIGSUBST_NULL, cpuid);
2823 
2824 		bnum = LSB_ID(cpuid);
2825 		onb_core_num = ON_BOARD_CORE_NUM(cpuid);
2826 		strand_id = STRAND_ID(cpuid);
2827 		bp = drmach_get_board_by_bnum(bnum);
2828 		ASSERT(bp);
2829 
2830 		bp->cores[onb_core_num].core_started &= ~(1 << strand_id);
2831 		if (bp->cores[onb_core_num].core_started == 0) {
2832 			if (drmach_add_remove_cpu(bnum, onb_core_num,
2833 			    HOTREMOVE_CPU) != 0) {
2834 				cmn_err(CE_WARN, "Failed to remove CMP %d LSB "
2835 				    "%d\n", onb_core_num, bnum);
2836 				return (EIO);
2837 			}
2838 		}
2839 	}
2840 
2841 	return (rv);
2842 }
2843 
2844 /*ARGSUSED*/
2845 int
2846 drmach_verify_sr(dev_info_t *dip, int sflag)
2847 {
2848 	return (0);
2849 }
2850 
2851 void
2852 drmach_suspend_last(void)
2853 {
2854 }
2855 
2856 void
2857 drmach_resume_first(void)
2858 {
2859 }
2860 
2861 /*
2862  * Log a DR sysevent.
2863  * Return value: 0 success, non-zero failure.
2864  */
2865 int
2866 drmach_log_sysevent(int board, char *hint, int flag, int verbose)
2867 {
2868 	sysevent_t			*ev;
2869 	sysevent_id_t			eid;
2870 	int				rv, km_flag;
2871 	sysevent_value_t		evnt_val;
2872 	sysevent_attr_list_t		*evnt_attr_list = NULL;
2873 	char				attach_pnt[MAXNAMELEN];
2874 
2875 	km_flag = (flag == SE_SLEEP) ? KM_SLEEP : KM_NOSLEEP;
2876 	attach_pnt[0] = '\0';
2877 	if (drmach_board_name(board, attach_pnt, MAXNAMELEN)) {
2878 		rv = -1;
2879 		goto logexit;
2880 	}
2881 	if (verbose) {
2882 		DRMACH_PR("drmach_log_sysevent: %s %s, flag: %d, verbose: %d\n",
2883 		    attach_pnt, hint, flag, verbose);
2884 	}
2885 
2886 	if ((ev = sysevent_alloc(EC_DR, ESC_DR_AP_STATE_CHANGE,
2887 	    SUNW_KERN_PUB"dr", km_flag)) == NULL) {
2888 		rv = -2;
2889 		goto logexit;
2890 	}
2891 	evnt_val.value_type = SE_DATA_TYPE_STRING;
2892 	evnt_val.value.sv_string = attach_pnt;
2893 	if ((rv = sysevent_add_attr(&evnt_attr_list, DR_AP_ID, &evnt_val,
2894 	    km_flag)) != 0)
2895 		goto logexit;
2896 
2897 	evnt_val.value_type = SE_DATA_TYPE_STRING;
2898 	evnt_val.value.sv_string = hint;
2899 	if ((rv = sysevent_add_attr(&evnt_attr_list, DR_HINT, &evnt_val,
2900 	    km_flag)) != 0) {
2901 		sysevent_free_attr(evnt_attr_list);
2902 		goto logexit;
2903 	}
2904 
2905 	(void) sysevent_attach_attributes(ev, evnt_attr_list);
2906 
2907 	/*
2908 	 * Log the event but do not sleep waiting for its
2909 	 * delivery. This provides insulation from syseventd.
2910 	 */
2911 	rv = log_sysevent(ev, SE_NOSLEEP, &eid);
2912 
2913 logexit:
2914 	if (ev)
2915 		sysevent_free(ev);
2916 	if ((rv != 0) && verbose)
2917 		cmn_err(CE_WARN, "drmach_log_sysevent failed (rv %d) for %s "
2918 		    " %s\n", rv, attach_pnt, hint);
2919 
2920 	return (rv);
2921 }
2922 
2923 #define	OPL_DR_STATUS_PROP "dr-status"
2924 
2925 static int
2926 opl_check_dr_status()
2927 {
2928 	pnode_t	node;
2929 	int	rtn, len;
2930 	char	*str;
2931 
2932 	node = prom_rootnode();
2933 	if (node == OBP_BADNODE) {
2934 		return (1);
2935 	}
2936 
2937 	len = prom_getproplen(node, OPL_DR_STATUS_PROP);
2938 	if (len == -1) {
2939 		/*
2940 		 * dr-status doesn't exist when DR is activated and
2941 		 * any warning messages aren't needed.
2942 		 */
2943 		return (1);
2944 	}
2945 
2946 	str = (char *)kmem_zalloc(len+1, KM_SLEEP);
2947 	rtn = prom_getprop(node, OPL_DR_STATUS_PROP, str);
2948 	kmem_free(str, len + 1);
2949 	if (rtn == -1) {
2950 		return (1);
2951 	} else {
2952 		return (0);
2953 	}
2954 }
2955 
2956 /* we are allocating memlist from TLB locked pages to avoid tlbmisses */
2957 
2958 static struct memlist *
2959 drmach_memlist_add_span(drmach_copy_rename_program_t *p,
2960 	struct memlist *mlist, uint64_t base, uint64_t len)
2961 {
2962 	struct memlist	*ml, *tl, *nl;
2963 
2964 	if (len == 0ull)
2965 		return (NULL);
2966 
2967 	if (mlist == NULL) {
2968 		mlist = p->free_mlist;
2969 		if (mlist == NULL)
2970 			return (NULL);
2971 		p->free_mlist = mlist->ml_next;
2972 		mlist->ml_address = base;
2973 		mlist->ml_size = len;
2974 		mlist->ml_next = mlist->ml_prev = NULL;
2975 
2976 		return (mlist);
2977 	}
2978 
2979 	for (tl = ml = mlist; ml; tl = ml, ml = ml->ml_next) {
2980 		if (base < ml->ml_address) {
2981 			if ((base + len) < ml->ml_address) {
2982 				nl = p->free_mlist;
2983 				if (nl == NULL)
2984 					return (NULL);
2985 				p->free_mlist = nl->ml_next;
2986 				nl->ml_address = base;
2987 				nl->ml_size = len;
2988 				nl->ml_next = ml;
2989 				if ((nl->ml_prev = ml->ml_prev) != NULL)
2990 					nl->ml_prev->ml_next = nl;
2991 				ml->ml_prev = nl;
2992 				if (mlist == ml)
2993 					mlist = nl;
2994 			} else {
2995 				ml->ml_size = MAX((base + len),
2996 				    (ml->ml_address + ml->ml_size)) - base;
2997 				ml->ml_address = base;
2998 			}
2999 			break;
3000 
3001 		} else if (base <= (ml->ml_address + ml->ml_size)) {
3002 			ml->ml_size =
3003 			    MAX((base + len), (ml->ml_address + ml->ml_size)) -
3004 			    MIN(ml->ml_address, base);
3005 			ml->ml_address = MIN(ml->ml_address, base);
3006 			break;
3007 		}
3008 	}
3009 	if (ml == NULL) {
3010 		nl = p->free_mlist;
3011 		if (nl == NULL)
3012 			return (NULL);
3013 		p->free_mlist = nl->ml_next;
3014 		nl->ml_address = base;
3015 		nl->ml_size = len;
3016 		nl->ml_next = NULL;
3017 		nl->ml_prev = tl;
3018 		tl->ml_next = nl;
3019 	}
3020 
3021 	return (mlist);
3022 }
3023 
3024 /*
3025  * The routine performs the necessary memory COPY and MC adr SWITCH.
3026  * Both operations MUST be at the same "level" so that the stack is
3027  * maintained correctly between the copy and switch.  The switch
3028  * portion implements a caching mechanism to guarantee the code text
3029  * is cached prior to execution.  This is to guard against possible
3030  * memory access while the MC adr's are being modified.
3031  *
3032  * IMPORTANT: The _drmach_copy_rename_end() function must immediately
3033  * follow drmach_copy_rename_prog__relocatable() so that the correct
3034  * "length" of the drmach_copy_rename_prog__relocatable can be
3035  * calculated.  This routine MUST be a LEAF function, i.e. it can
3036  * make NO function calls, primarily for two reasons:
3037  *
3038  *	1. We must keep the stack consistent across the "switch".
3039  *	2. Function calls are compiled to relative offsets, and
3040  *	   we execute this function we'll be executing it from
3041  *	   a copied version in a different area of memory, thus
3042  *	   the relative offsets will be bogus.
3043  *
3044  * Moreover, it must have the "__relocatable" suffix to inform DTrace
3045  * providers (and anything else, for that matter) that this
3046  * function's text is manually relocated elsewhere before it is
3047  * executed.  That is, it cannot be safely instrumented with any
3048  * methodology that is PC-relative.
3049  */
3050 
3051 /*
3052  * We multiply this to system_clock_frequency so we
3053  * are setting a delay of fmem_timeout second for
3054  * the rename command.
3055  *
3056  * FMEM command itself should complete within 15 sec.
3057  * We add 2 more sec to be conservative.
3058  *
3059  * Note that there is also a SCF BUSY bit checking
3060  * in drmach_asm.s right before FMEM command is
3061  * issued.  XSCF sets the SCF BUSY bit when the
3062  * other domain on the same PSB reboots and it
3063  * will not be able to service the FMEM command
3064  * within 15 sec.   After setting the SCF BUSY
3065  * bit, XSCF will wait a while before servicing
3066  * other reboot command so there is no race
3067  * condition.
3068  */
3069 
3070 static int	fmem_timeout = 17;
3071 
3072 /*
3073  *	The empirical data on some OPL system shows that
3074  *	we can copy 250 MB per second.  We set it to
3075  * 	80 MB to be conservative.  In normal case,
3076  *	this timeout does not affect anything.
3077  */
3078 
3079 static int	min_copy_size_per_sec = 80 * 1024 * 1024;
3080 
3081 /*
3082  *	This is the timeout value for the xcall synchronization
3083  *	to get all the CPU ready to do the parallel copying.
3084  *	Even on a fully loaded system, 10 sec. should be long
3085  *	enough.
3086  */
3087 
3088 static int	cpu_xcall_delay = 10;
3089 int drmach_disable_mcopy = 0;
3090 
3091 /*
3092  * The following delay loop executes sleep instruction to yield the
3093  * CPU to other strands.  If this is not done, some strand will tie
3094  * up the CPU in busy loops while the other strand cannot do useful
3095  * work.  The copy procedure will take a much longer time without this.
3096  */
3097 #define	DR_DELAY_IL(ms, freq)					\
3098 	{							\
3099 		uint64_t start;					\
3100 		uint64_t nstick;				\
3101 		volatile uint64_t now;				\
3102 		nstick = ((uint64_t)ms * freq)/1000;		\
3103 		start = drmach_get_stick_il();			\
3104 		now = start;					\
3105 		while ((now - start) <= nstick) {		\
3106 			drmach_sleep_il();			\
3107 			now = drmach_get_stick_il();		\
3108 		}						\
3109 	}
3110 
3111 /* Each loop is 2ms, timeout at 1000ms */
3112 static int drmach_copy_rename_timeout = 500;
3113 
3114 static int
3115 drmach_copy_rename_prog__relocatable(drmach_copy_rename_program_t *prog,
3116 	int cpuid)
3117 {
3118 	struct memlist		*ml;
3119 	register int		rtn;
3120 	int			i;
3121 	register uint64_t	curr, limit;
3122 	extern uint64_t		drmach_get_stick_il();
3123 	extern void		membar_sync_il();
3124 	extern void		flush_instr_mem_il(void*);
3125 	extern void		flush_windows_il(void);
3126 	uint64_t		copy_start;
3127 
3128 	/*
3129 	 * flush_windows is moved here to make sure all
3130 	 * registers used in the callers are flushed to
3131 	 * memory before the copy.
3132 	 *
3133 	 * If flush_windows() is called too early in the
3134 	 * calling function, the compiler might put some
3135 	 * data in the local registers after flush_windows().
3136 	 * After FMA, if there is any fill trap, the registers
3137 	 * will contain stale data.
3138 	 */
3139 
3140 	flush_windows_il();
3141 
3142 	prog->critical->stat[cpuid] = FMEM_LOOP_COPY_READY;
3143 	membar_sync_il();
3144 
3145 	if (prog->data->cpuid == cpuid) {
3146 		limit = drmach_get_stick_il();
3147 		limit += cpu_xcall_delay * system_clock_freq;
3148 		for (i = 0; i < NCPU; i++) {
3149 			if (CPU_IN_SET(prog->data->cpu_slave_set, i)) {
3150 				/* wait for all CPU's to be ready */
3151 				for (;;) {
3152 					if (prog->critical->stat[i] ==
3153 					    FMEM_LOOP_COPY_READY) {
3154 						break;
3155 					}
3156 					DR_DELAY_IL(1, prog->data->stick_freq);
3157 				}
3158 				curr = drmach_get_stick_il();
3159 				if (curr > limit) {
3160 					prog->data->fmem_status.error =
3161 					    EOPL_FMEM_XC_TIMEOUT;
3162 					return (EOPL_FMEM_XC_TIMEOUT);
3163 				}
3164 			}
3165 		}
3166 		prog->data->fmem_status.stat = FMEM_LOOP_COPY_READY;
3167 		membar_sync_il();
3168 		copy_start = drmach_get_stick_il();
3169 	} else {
3170 		for (;;) {
3171 			if (prog->data->fmem_status.stat ==
3172 			    FMEM_LOOP_COPY_READY) {
3173 				break;
3174 			}
3175 			if (prog->data->fmem_status.error) {
3176 				prog->data->error[cpuid] = EOPL_FMEM_TERMINATE;
3177 				return (EOPL_FMEM_TERMINATE);
3178 			}
3179 			DR_DELAY_IL(1, prog->data->stick_freq);
3180 		}
3181 	}
3182 
3183 	/*
3184 	 * DO COPY.
3185 	 */
3186 	if (CPU_IN_SET(prog->data->cpu_copy_set, cpuid)) {
3187 		for (ml = prog->data->cpu_ml[cpuid]; ml; ml = ml->ml_next) {
3188 			uint64_t	s_pa, t_pa;
3189 			uint64_t	nbytes;
3190 
3191 			s_pa = prog->data->s_copybasepa + ml->ml_address;
3192 			t_pa = prog->data->t_copybasepa + ml->ml_address;
3193 			nbytes = ml->ml_size;
3194 
3195 			while (nbytes != 0ull) {
3196 				/*
3197 				 * If the master has detected error, we just
3198 				 * bail out
3199 				 */
3200 				if (prog->data->fmem_status.error !=
3201 				    ESBD_NOERROR) {
3202 					prog->data->error[cpuid] =
3203 					    EOPL_FMEM_TERMINATE;
3204 					return (EOPL_FMEM_TERMINATE);
3205 				}
3206 				/*
3207 				 * This copy does NOT use an ASI
3208 				 * that avoids the Ecache, therefore
3209 				 * the dst_pa addresses may remain
3210 				 * in our Ecache after the dst_pa
3211 				 * has been removed from the system.
3212 				 * A subsequent write-back to memory
3213 				 * will cause an ARB-stop because the
3214 				 * physical address no longer exists
3215 				 * in the system. Therefore we must
3216 				 * flush out local Ecache after we
3217 				 * finish the copy.
3218 				 */
3219 
3220 				/* copy 32 bytes at src_pa to dst_pa */
3221 				bcopy32_il(s_pa, t_pa);
3222 
3223 				/*
3224 				 * increment the counter to signal that we are
3225 				 * alive
3226 				 */
3227 				prog->stat->nbytes[cpuid] += 32;
3228 
3229 				/* increment by 32 bytes */
3230 				s_pa += (4 * sizeof (uint64_t));
3231 				t_pa += (4 * sizeof (uint64_t));
3232 
3233 				/* decrement by 32 bytes */
3234 				nbytes -= (4 * sizeof (uint64_t));
3235 			}
3236 		}
3237 		prog->critical->stat[cpuid] = FMEM_LOOP_COPY_DONE;
3238 		membar_sync_il();
3239 	}
3240 
3241 	/*
3242 	 * Since bcopy32_il() does NOT use an ASI to bypass
3243 	 * the Ecache, we need to flush our Ecache after
3244 	 * the copy is complete.
3245 	 */
3246 	flush_cache_il();
3247 
3248 	/*
3249 	 * drmach_fmem_exec_script()
3250 	 */
3251 	if (prog->data->cpuid == cpuid) {
3252 		uint64_t	last, now;
3253 
3254 		limit = copy_start + prog->data->copy_delay;
3255 		for (i = 0; i < NCPU; i++) {
3256 			if (!CPU_IN_SET(prog->data->cpu_slave_set, i))
3257 				continue;
3258 
3259 			for (;;) {
3260 				/*
3261 				 * we get FMEM_LOOP_FMEM_READY in
3262 				 * normal case
3263 				 */
3264 				if (prog->critical->stat[i] ==
3265 				    FMEM_LOOP_FMEM_READY) {
3266 					break;
3267 				}
3268 				/* got error traps */
3269 				if (prog->data->error[i] ==
3270 				    EOPL_FMEM_COPY_ERROR) {
3271 					prog->data->fmem_status.error =
3272 					    EOPL_FMEM_COPY_ERROR;
3273 					return (EOPL_FMEM_COPY_ERROR);
3274 				}
3275 				/*
3276 				 * if we have not reached limit, wait
3277 				 * more
3278 				 */
3279 				curr = drmach_get_stick_il();
3280 				if (curr <= limit)
3281 					continue;
3282 
3283 				prog->data->slowest_cpuid = i;
3284 				prog->data->copy_wait_time = curr - copy_start;
3285 
3286 				/* now check if slave is alive */
3287 				last = prog->stat->nbytes[i];
3288 
3289 				DR_DELAY_IL(1, prog->data->stick_freq);
3290 
3291 				now = prog->stat->nbytes[i];
3292 				if (now <= last) {
3293 					/*
3294 					 * no progress, perhaps just
3295 					 * finished
3296 					 */
3297 					DR_DELAY_IL(1, prog->data->stick_freq);
3298 					if (prog->critical->stat[i] ==
3299 					    FMEM_LOOP_FMEM_READY)
3300 						break;
3301 					/* copy error */
3302 					if (prog->data->error[i] ==
3303 					    EOPL_FMEM_COPY_ERROR) {
3304 						prog->data-> fmem_status.error =
3305 						    EOPL_FMEM_COPY_ERROR;
3306 						return (EOPL_FMEM_COPY_ERROR);
3307 					}
3308 
3309 					prog->data->copy_rename_count++;
3310 					if (prog->data->copy_rename_count
3311 					    < drmach_copy_rename_timeout) {
3312 						continue;
3313 					} else {
3314 						prog->data->fmem_status.error =
3315 						    EOPL_FMEM_COPY_TIMEOUT;
3316 						return (EOPL_FMEM_COPY_TIMEOUT);
3317 					}
3318 				}
3319 			}
3320 		}
3321 
3322 		prog->critical->stat[cpuid] = FMEM_LOOP_FMEM_READY;
3323 		prog->data->fmem_status.stat  = FMEM_LOOP_FMEM_READY;
3324 
3325 		membar_sync_il();
3326 		flush_instr_mem_il((void*) (prog->critical));
3327 		/*
3328 		 * drmach_fmem_exec_script()
3329 		 */
3330 		rtn = prog->critical->fmem((void *)prog->critical, PAGESIZE);
3331 		return (rtn);
3332 	} else {
3333 		flush_instr_mem_il((void*) (prog->critical));
3334 		/*
3335 		 * drmach_fmem_loop_script()
3336 		 */
3337 		rtn = prog->critical->loop((void *)(prog->critical), PAGESIZE,
3338 		    (void *)&(prog->critical->stat[cpuid]));
3339 		prog->data->error[cpuid] = rtn;
3340 		/* slave thread does not care the rv */
3341 		return (0);
3342 	}
3343 }
3344 
3345 static void
3346 drmach_copy_rename_end(void)
3347 {
3348 	/*
3349 	 * IMPORTANT:	This function's location MUST be located immediately
3350 	 *		following drmach_copy_rename_prog__relocatable to
3351 	 *		accurately estimate its size.  Note that this assumes
3352 	 *		the compiler keeps these functions in the order in
3353 	 *		which they appear :-o
3354 	 */
3355 }
3356 
3357 
3358 static int
3359 drmach_setup_memlist(drmach_copy_rename_program_t *p)
3360 {
3361 	struct memlist *ml;
3362 	caddr_t buf;
3363 	int nbytes, s, n_elements;
3364 
3365 	nbytes = PAGESIZE;
3366 	n_elements = 0;
3367 	s = roundup(sizeof (struct memlist), sizeof (void *));
3368 	p->free_mlist = NULL;
3369 	buf = p->memlist_buffer;
3370 	while (nbytes >= sizeof (struct memlist)) {
3371 		ml = (struct memlist *)buf;
3372 		ml->ml_next = p->free_mlist;
3373 		p->free_mlist = ml;
3374 		buf += s;
3375 		n_elements++;
3376 		nbytes -= s;
3377 	}
3378 	return (n_elements);
3379 }
3380 
3381 static void
3382 drmach_lock_critical(caddr_t va, caddr_t new_va)
3383 {
3384 	tte_t tte;
3385 	int i;
3386 
3387 	kpreempt_disable();
3388 
3389 	for (i = 0; i < DRMACH_FMEM_LOCKED_PAGES; i++) {
3390 		vtag_flushpage(new_va, (uint64_t)ksfmmup);
3391 		sfmmu_memtte(&tte, va_to_pfn(va), PROC_DATA|HAT_NOSYNC, TTE8K);
3392 		tte.tte_intlo |= TTE_LCK_INT;
3393 		sfmmu_dtlb_ld_kva(new_va, &tte);
3394 		sfmmu_itlb_ld_kva(new_va, &tte);
3395 		va += PAGESIZE;
3396 		new_va += PAGESIZE;
3397 	}
3398 }
3399 
3400 static void
3401 drmach_unlock_critical(caddr_t va)
3402 {
3403 	int i;
3404 
3405 	for (i = 0; i < DRMACH_FMEM_LOCKED_PAGES; i++) {
3406 		vtag_flushpage(va, (uint64_t)ksfmmup);
3407 		va += PAGESIZE;
3408 	}
3409 
3410 	kpreempt_enable();
3411 }
3412 
3413 sbd_error_t *
3414 drmach_copy_rename_init(drmachid_t t_id, drmachid_t s_id,
3415 	struct memlist *c_ml, drmachid_t *pgm_id)
3416 {
3417 	drmach_mem_t	*s_mem;
3418 	drmach_mem_t	*t_mem;
3419 	struct memlist	*x_ml;
3420 	uint64_t	s_copybasepa, t_copybasepa;
3421 	uint_t		len;
3422 	caddr_t		bp, wp;
3423 	int		s_bd, t_bd, cpuid, active_cpus, i;
3424 	int		max_elms, mlist_size, rv;
3425 	uint64_t	c_addr;
3426 	size_t		c_size, copy_sz, sz;
3427 	extern void	drmach_fmem_loop_script();
3428 	extern void	drmach_fmem_loop_script_rtn();
3429 	extern int	drmach_fmem_exec_script();
3430 	extern void	drmach_fmem_exec_script_end();
3431 	sbd_error_t	*err;
3432 	drmach_copy_rename_program_t *prog = NULL;
3433 	drmach_copy_rename_program_t *prog_kmem = NULL;
3434 	void		(*mc_suspend)(void);
3435 	void		(*mc_resume)(void);
3436 	int		(*scf_fmem_start)(int, int);
3437 	int		(*scf_fmem_end)(void);
3438 	int		(*scf_fmem_cancel)(void);
3439 	uint64_t	(*scf_get_base_addr)(void);
3440 
3441 	if (!DRMACH_IS_MEM_ID(s_id))
3442 		return (drerr_new(0, EOPL_INAPPROP, NULL));
3443 	if (!DRMACH_IS_MEM_ID(t_id))
3444 		return (drerr_new(0, EOPL_INAPPROP, NULL));
3445 
3446 	for (i = 0; i < NCPU; i++) {
3447 		int lsb_id, onb_core_num, strand_id;
3448 		drmach_board_t *bp;
3449 
3450 		/*
3451 		 * this kind of CPU will spin in cache
3452 		 */
3453 		if (CPU_IN_SET(cpu_ready_set, i))
3454 			continue;
3455 
3456 		/*
3457 		 * Now check for any inactive CPU's that
3458 		 * have been hotadded.  This can only occur in
3459 		 * error condition in drmach_cpu_poweron().
3460 		 */
3461 		lsb_id = LSB_ID(i);
3462 		onb_core_num = ON_BOARD_CORE_NUM(i);
3463 		strand_id = STRAND_ID(i);
3464 		bp = drmach_get_board_by_bnum(lsb_id);
3465 		if (bp == NULL)
3466 			continue;
3467 		if (bp->cores[onb_core_num].core_hotadded &
3468 		    (1 << strand_id)) {
3469 			if (!(bp->cores[onb_core_num].core_started &
3470 			    (1 << strand_id))) {
3471 				return (drerr_new(1, EOPL_CPU_STATE, NULL));
3472 			}
3473 		}
3474 	}
3475 
3476 	mc_suspend = (void (*)(void))
3477 	    modgetsymvalue("opl_mc_suspend", 0);
3478 	mc_resume = (void (*)(void))
3479 	    modgetsymvalue("opl_mc_resume", 0);
3480 
3481 	if (mc_suspend == NULL || mc_resume == NULL) {
3482 		return (drerr_new(1, EOPL_MC_OPL, NULL));
3483 	}
3484 
3485 	scf_fmem_start = (int (*)(int, int))
3486 	    modgetsymvalue("scf_fmem_start", 0);
3487 	if (scf_fmem_start == NULL) {
3488 		return (drerr_new(1, EOPL_SCF_FMEM, NULL));
3489 	}
3490 	scf_fmem_end = (int (*)(void))
3491 	    modgetsymvalue("scf_fmem_end", 0);
3492 	if (scf_fmem_end == NULL) {
3493 		return (drerr_new(1, EOPL_SCF_FMEM, NULL));
3494 	}
3495 	scf_fmem_cancel = (int (*)(void))
3496 	    modgetsymvalue("scf_fmem_cancel", 0);
3497 	if (scf_fmem_cancel == NULL) {
3498 		return (drerr_new(1, EOPL_SCF_FMEM, NULL));
3499 	}
3500 	scf_get_base_addr = (uint64_t (*)(void))
3501 	    modgetsymvalue("scf_get_base_addr", 0);
3502 	if (scf_get_base_addr == NULL) {
3503 		return (drerr_new(1, EOPL_SCF_FMEM, NULL));
3504 	}
3505 	s_mem = s_id;
3506 	t_mem = t_id;
3507 
3508 	s_bd = s_mem->dev.bp->bnum;
3509 	t_bd = t_mem->dev.bp->bnum;
3510 
3511 	/* calculate source and target base pa */
3512 
3513 	s_copybasepa = s_mem->slice_base;
3514 	t_copybasepa = t_mem->slice_base;
3515 
3516 	/* adjust copy memlist addresses to be relative to copy base pa */
3517 	x_ml = c_ml;
3518 	mlist_size = 0;
3519 	while (x_ml != NULL) {
3520 		x_ml->ml_address -= s_copybasepa;
3521 		x_ml = x_ml->ml_next;
3522 		mlist_size++;
3523 	}
3524 
3525 	/*
3526 	 * bp will be page aligned, since we're calling
3527 	 * kmem_zalloc() with an exact multiple of PAGESIZE.
3528 	 */
3529 
3530 	prog_kmem = (drmach_copy_rename_program_t *)kmem_zalloc(
3531 	    DRMACH_FMEM_LOCKED_PAGES * PAGESIZE, KM_SLEEP);
3532 
3533 	prog_kmem->prog = prog_kmem;
3534 
3535 	/*
3536 	 * To avoid MTLB hit, we allocate a new VM space and remap
3537 	 * the kmem_alloc buffer to that address.  This solves
3538 	 * 2 problems we found:
3539 	 * - the kmem_alloc buffer can be just a chunk inside
3540 	 *   a much larger, e.g. 4MB buffer and MTLB will occur
3541 	 *   if there are both a 4MB and a 8K TLB mapping to
3542 	 *   the same VA range.
3543 	 * - the kmem mapping got dropped into the TLB by other
3544 	 *   strands, unintentionally.
3545 	 * Note that the pointers like data, critical, memlist_buffer,
3546 	 * and stat inside the copy rename structure are mapped to this
3547 	 * alternate VM space so we must make sure we lock the TLB mapping
3548 	 * whenever we access data pointed to by these pointers.
3549 	 */
3550 
3551 	prog = prog_kmem->locked_prog = vmem_alloc(heap_arena,
3552 	    DRMACH_FMEM_LOCKED_PAGES * PAGESIZE, VM_SLEEP);
3553 	wp = bp = (caddr_t)prog;
3554 
3555 	/* Now remap prog_kmem to prog */
3556 	drmach_lock_critical((caddr_t)prog_kmem, (caddr_t)prog);
3557 
3558 	/* All pointers in prog are based on the alternate mapping */
3559 	prog->data = (drmach_copy_rename_data_t *)roundup(((uint64_t)prog +
3560 	    sizeof (drmach_copy_rename_program_t)), sizeof (void *));
3561 
3562 	ASSERT(((uint64_t)prog->data + sizeof (drmach_copy_rename_data_t))
3563 	    <= ((uint64_t)prog + PAGESIZE));
3564 
3565 	prog->critical = (drmach_copy_rename_critical_t *)
3566 	    (wp + DRMACH_FMEM_CRITICAL_PAGE * PAGESIZE);
3567 
3568 	prog->memlist_buffer = (caddr_t)(wp + DRMACH_FMEM_MLIST_PAGE *
3569 	    PAGESIZE);
3570 
3571 	prog->stat = (drmach_cr_stat_t *)(wp + DRMACH_FMEM_STAT_PAGE *
3572 	    PAGESIZE);
3573 
3574 	/* LINTED */
3575 	ASSERT(sizeof (drmach_cr_stat_t) <= ((DRMACH_FMEM_LOCKED_PAGES -
3576 	    DRMACH_FMEM_STAT_PAGE) * PAGESIZE));
3577 
3578 	prog->critical->scf_reg_base = (uint64_t)-1;
3579 	prog->critical->scf_td[0] = (s_bd & 0xff);
3580 	prog->critical->scf_td[1] = (t_bd & 0xff);
3581 	for (i = 2; i < 15; i++) {
3582 		prog->critical->scf_td[i]   = 0;
3583 	}
3584 	prog->critical->scf_td[15] = ((0xaa + s_bd + t_bd) & 0xff);
3585 
3586 	bp = (caddr_t)prog->critical;
3587 	len = sizeof (drmach_copy_rename_critical_t);
3588 	wp = (caddr_t)roundup((uint64_t)bp + len, sizeof (void *));
3589 
3590 	len = (uint_t)((ulong_t)drmach_copy_rename_end -
3591 	    (ulong_t)drmach_copy_rename_prog__relocatable);
3592 
3593 	/*
3594 	 * We always leave 1K nop's to prevent the processor from
3595 	 * speculative execution that causes memory access
3596 	 */
3597 	wp = wp + len + 1024;
3598 
3599 	len = (uint_t)((ulong_t)drmach_fmem_exec_script_end -
3600 	    (ulong_t)drmach_fmem_exec_script);
3601 	/* this is the entry point of the loop script */
3602 	wp = wp + len + 1024;
3603 
3604 	len = (uint_t)((ulong_t)drmach_fmem_exec_script -
3605 	    (ulong_t)drmach_fmem_loop_script);
3606 	wp = wp + len + 1024;
3607 
3608 	/* now we make sure there is 1K extra */
3609 
3610 	if ((wp - bp) > PAGESIZE) {
3611 		err = drerr_new(1, EOPL_FMEM_SETUP, NULL);
3612 		goto out;
3613 	}
3614 
3615 	bp = (caddr_t)prog->critical;
3616 	len = sizeof (drmach_copy_rename_critical_t);
3617 	wp = (caddr_t)roundup((uint64_t)bp + len, sizeof (void *));
3618 
3619 	prog->critical->run = (int (*)())(wp);
3620 	len = (uint_t)((ulong_t)drmach_copy_rename_end -
3621 	    (ulong_t)drmach_copy_rename_prog__relocatable);
3622 
3623 	bcopy((caddr_t)drmach_copy_rename_prog__relocatable, wp, len);
3624 
3625 	wp = (caddr_t)roundup((uint64_t)wp + len, 1024);
3626 
3627 	prog->critical->fmem = (int (*)())(wp);
3628 	len = (int)((ulong_t)drmach_fmem_exec_script_end -
3629 	    (ulong_t)drmach_fmem_exec_script);
3630 	bcopy((caddr_t)drmach_fmem_exec_script, wp, len);
3631 
3632 	len = (int)((ulong_t)drmach_fmem_exec_script_end -
3633 	    (ulong_t)drmach_fmem_exec_script);
3634 	wp = (caddr_t)roundup((uint64_t)wp + len, 1024);
3635 
3636 	prog->critical->loop = (int (*)())(wp);
3637 	len = (int)((ulong_t)drmach_fmem_exec_script -
3638 	    (ulong_t)drmach_fmem_loop_script);
3639 	bcopy((caddr_t)drmach_fmem_loop_script, (void *)wp, len);
3640 	len = (int)((ulong_t)drmach_fmem_loop_script_rtn-
3641 	    (ulong_t)drmach_fmem_loop_script);
3642 	prog->critical->loop_rtn = (void (*)()) (wp+len);
3643 
3644 	prog->data->fmem_status.error = ESBD_NOERROR;
3645 
3646 	/* now we are committed, call SCF, soft suspend mac patrol */
3647 	if ((*scf_fmem_start)(s_bd, t_bd)) {
3648 		err = drerr_new(1, EOPL_SCF_FMEM_START, NULL);
3649 		goto out;
3650 	}
3651 	prog->data->scf_fmem_end = scf_fmem_end;
3652 	prog->data->scf_fmem_cancel = scf_fmem_cancel;
3653 	prog->data->scf_get_base_addr = scf_get_base_addr;
3654 	prog->data->fmem_status.op |= OPL_FMEM_SCF_START;
3655 
3656 	/* soft suspend mac patrol */
3657 	(*mc_suspend)();
3658 	prog->data->fmem_status.op |= OPL_FMEM_MC_SUSPEND;
3659 	prog->data->mc_resume = mc_resume;
3660 
3661 	prog->critical->inst_loop_ret  =
3662 	    *(uint64_t *)(prog->critical->loop_rtn);
3663 
3664 	/*
3665 	 * 0x30800000 is op code "ba,a	+0"
3666 	 */
3667 
3668 	*(uint_t *)(prog->critical->loop_rtn) = (uint_t)(0x30800000);
3669 
3670 	/*
3671 	 * set the value of SCF FMEM TIMEOUT
3672 	 */
3673 	prog->critical->delay = fmem_timeout * system_clock_freq;
3674 
3675 	prog->data->s_mem = (drmachid_t)s_mem;
3676 	prog->data->t_mem = (drmachid_t)t_mem;
3677 
3678 	cpuid = CPU->cpu_id;
3679 	prog->data->cpuid = cpuid;
3680 	prog->data->cpu_ready_set = cpu_ready_set;
3681 	prog->data->cpu_slave_set = cpu_ready_set;
3682 	prog->data->slowest_cpuid = (processorid_t)-1;
3683 	prog->data->copy_wait_time = 0;
3684 	prog->data->copy_rename_count = 0;
3685 	CPUSET_DEL(prog->data->cpu_slave_set, cpuid);
3686 
3687 	for (i = 0; i < NCPU; i++) {
3688 		prog->data->cpu_ml[i] = NULL;
3689 	}
3690 
3691 	/*
3692 	 * max_elms -	max number of memlist structures that
3693 	 * 		may be allocated for the CPU memory list.
3694 	 *		If there are too many memory span (because
3695 	 *		of fragmentation) than number of memlist
3696 	 *		available, we should return error.
3697 	 */
3698 	max_elms = drmach_setup_memlist(prog);
3699 	if (max_elms < mlist_size) {
3700 		err = drerr_new(1, EOPL_FMEM_SETUP, NULL);
3701 		goto err_out;
3702 	}
3703 
3704 	active_cpus = 0;
3705 	if (drmach_disable_mcopy) {
3706 		active_cpus = 1;
3707 		CPUSET_ADD(prog->data->cpu_copy_set, cpuid);
3708 	} else {
3709 		int max_cpu_num;
3710 		/*
3711 		 * The parallel copy procedure is going to split some
3712 		 * of the elements of the original memory copy list.
3713 		 * The number of added elements can be up to
3714 		 * (max_cpu_num - 1).  It means that max_cpu_num
3715 		 * should satisfy the following condition:
3716 		 * (max_cpu_num - 1) + mlist_size <= max_elms.
3717 		 */
3718 		max_cpu_num = max_elms - mlist_size + 1;
3719 
3720 		for (i = 0; i < NCPU; i++) {
3721 			if (CPU_IN_SET(cpu_ready_set, i) &&
3722 			    CPU_ACTIVE(cpu[i])) {
3723 				/*
3724 				 * To reduce the level-2 cache contention only
3725 				 * one strand per core will participate
3726 				 * in the copy. If the strand with even cpu_id
3727 				 * number is present in the ready set, we will
3728 				 * include this strand in the copy set. If it
3729 				 * is not present in the ready set, we check for
3730 				 * the strand with the consecutive odd cpu_id
3731 				 * and include it, provided that it is
3732 				 * present in the ready set.
3733 				 */
3734 				if (!(i & 0x1) ||
3735 				    !CPU_IN_SET(prog->data->cpu_copy_set,
3736 				    i - 1)) {
3737 					CPUSET_ADD(prog->data->cpu_copy_set, i);
3738 					active_cpus++;
3739 					/*
3740 					 * We cannot have more than
3741 					 * max_cpu_num CPUs in the copy
3742 					 * set, because each CPU has to
3743 					 * have at least one element
3744 					 * long memory copy list.
3745 					 */
3746 					if (active_cpus >= max_cpu_num)
3747 						break;
3748 
3749 				}
3750 			}
3751 		}
3752 	}
3753 
3754 	x_ml = c_ml;
3755 	sz = 0;
3756 	while (x_ml != NULL) {
3757 		sz += x_ml->ml_size;
3758 		x_ml = x_ml->ml_next;
3759 	}
3760 
3761 	copy_sz = sz/active_cpus;
3762 	copy_sz = roundup(copy_sz, MMU_PAGESIZE4M);
3763 
3764 	while (sz > copy_sz*active_cpus) {
3765 		copy_sz += MMU_PAGESIZE4M;
3766 	}
3767 
3768 	prog->data->stick_freq = system_clock_freq;
3769 	prog->data->copy_delay = ((copy_sz / min_copy_size_per_sec) + 2) *
3770 	    system_clock_freq;
3771 
3772 	x_ml = c_ml;
3773 	c_addr = x_ml->ml_address;
3774 	c_size = x_ml->ml_size;
3775 
3776 	for (i = 0; i < NCPU; i++) {
3777 		prog->stat->nbytes[i] = 0;
3778 		if (!CPU_IN_SET(prog->data->cpu_copy_set, i)) {
3779 			continue;
3780 		}
3781 		sz = copy_sz;
3782 
3783 		while (sz) {
3784 			if (c_size > sz) {
3785 				if ((prog->data->cpu_ml[i] =
3786 				    drmach_memlist_add_span(prog,
3787 				    prog->data->cpu_ml[i],
3788 				    c_addr, sz)) == NULL) {
3789 					cmn_err(CE_WARN,
3790 					    "Unexpected drmach_memlist_add_span"
3791 					    " failure.");
3792 					err = drerr_new(1, EOPL_FMEM_SETUP,
3793 					    NULL);
3794 					mc_resume();
3795 					goto out;
3796 				}
3797 				c_addr += sz;
3798 				c_size -= sz;
3799 				break;
3800 			} else {
3801 				sz -= c_size;
3802 				if ((prog->data->cpu_ml[i] =
3803 				    drmach_memlist_add_span(prog,
3804 				    prog->data->cpu_ml[i],
3805 				    c_addr, c_size)) == NULL) {
3806 					cmn_err(CE_WARN,
3807 					    "Unexpected drmach_memlist_add_span"
3808 					    " failure.");
3809 					err = drerr_new(1, EOPL_FMEM_SETUP,
3810 					    NULL);
3811 					mc_resume();
3812 					goto out;
3813 				}
3814 
3815 				x_ml = x_ml->ml_next;
3816 				if (x_ml != NULL) {
3817 					c_addr = x_ml->ml_address;
3818 					c_size = x_ml->ml_size;
3819 				} else {
3820 					goto end;
3821 				}
3822 			}
3823 		}
3824 	}
3825 end:
3826 	prog->data->s_copybasepa = s_copybasepa;
3827 	prog->data->t_copybasepa = t_copybasepa;
3828 	prog->data->c_ml = c_ml;
3829 	*pgm_id = prog_kmem;
3830 
3831 	/* Unmap the alternate space.  It will have to be remapped again */
3832 	drmach_unlock_critical((caddr_t)prog);
3833 	return (NULL);
3834 
3835 err_out:
3836 	mc_resume();
3837 	rv = (*prog->data->scf_fmem_cancel)();
3838 	if (rv) {
3839 		cmn_err(CE_WARN, "scf_fmem_cancel() failed rv=0x%x", rv);
3840 	}
3841 out:
3842 	if (prog != NULL) {
3843 		drmach_unlock_critical((caddr_t)prog);
3844 		vmem_free(heap_arena, prog, DRMACH_FMEM_LOCKED_PAGES *
3845 		    PAGESIZE);
3846 	}
3847 	if (prog_kmem != NULL) {
3848 		kmem_free(prog_kmem, DRMACH_FMEM_LOCKED_PAGES * PAGESIZE);
3849 	}
3850 	return (err);
3851 }
3852 
3853 sbd_error_t *
3854 drmach_copy_rename_fini(drmachid_t id)
3855 {
3856 	drmach_copy_rename_program_t	*prog = id;
3857 	sbd_error_t			*err = NULL;
3858 	int				rv;
3859 	uint_t				fmem_error;
3860 
3861 	/*
3862 	 * Note that we have to delay calling SCF to find out the
3863 	 * status of the FMEM operation here because SCF cannot
3864 	 * respond while it is suspended.
3865 	 * This create a small window when we are sure about the
3866 	 * base address of the system board.
3867 	 * If there is any call to mc-opl to get memory unum,
3868 	 * mc-opl will return UNKNOWN as the unum.
3869 	 */
3870 
3871 	/*
3872 	 * we have to remap again because all the pointer like data,
3873 	 * critical in prog are based on the alternate vmem space.
3874 	 */
3875 	(void) drmach_lock_critical((caddr_t)prog, (caddr_t)prog->locked_prog);
3876 
3877 	if (prog->data->c_ml != NULL)
3878 		memlist_delete(prog->data->c_ml);
3879 
3880 	if ((prog->data->fmem_status.op &
3881 	    (OPL_FMEM_SCF_START | OPL_FMEM_MC_SUSPEND)) !=
3882 	    (OPL_FMEM_SCF_START | OPL_FMEM_MC_SUSPEND)) {
3883 		cmn_err(CE_PANIC, "drmach_copy_rename_fini: invalid op "
3884 		    "code %x\n", prog->data->fmem_status.op);
3885 	}
3886 
3887 	fmem_error = prog->data->fmem_status.error;
3888 	if (fmem_error != ESBD_NOERROR) {
3889 		err = drerr_new(1, fmem_error, NULL);
3890 	}
3891 
3892 	/* possible ops are SCF_START, MC_SUSPEND */
3893 	if (prog->critical->fmem_issued) {
3894 		if (fmem_error != ESBD_NOERROR) {
3895 			cmn_err(CE_PANIC, "Irrecoverable FMEM error %d\n",
3896 			    fmem_error);
3897 		}
3898 		rv = (*prog->data->scf_fmem_end)();
3899 		if (rv) {
3900 			cmn_err(CE_PANIC, "scf_fmem_end() failed rv=%d", rv);
3901 		}
3902 		/*
3903 		 * If we get here, rename is successful.
3904 		 * Do all the copy rename post processing.
3905 		 */
3906 		drmach_swap_pa((drmach_mem_t *)prog->data->s_mem,
3907 		    (drmach_mem_t *)prog->data->t_mem);
3908 	} else {
3909 		rv = (*prog->data->scf_fmem_cancel)();
3910 		if (rv) {
3911 			cmn_err(CE_WARN, "scf_fmem_cancel() failed rv=0x%x",
3912 			    rv);
3913 			if (!err) {
3914 				err = drerr_new(1, EOPL_SCF_FMEM_CANCEL,
3915 				    "scf_fmem_cancel() failed. rv = 0x%x", rv);
3916 			}
3917 		}
3918 	}
3919 	/* soft resume mac patrol */
3920 	(*prog->data->mc_resume)();
3921 
3922 	drmach_unlock_critical((caddr_t)prog->locked_prog);
3923 
3924 	vmem_free(heap_arena, prog->locked_prog,
3925 	    DRMACH_FMEM_LOCKED_PAGES * PAGESIZE);
3926 	kmem_free(prog, DRMACH_FMEM_LOCKED_PAGES * PAGESIZE);
3927 	return (err);
3928 }
3929 
3930 /*ARGSUSED*/
3931 static void
3932 drmach_copy_rename_slave(struct regs *rp, drmachid_t id)
3933 {
3934 	drmach_copy_rename_program_t	*prog =
3935 	    (drmach_copy_rename_program_t *)id;
3936 	register int			cpuid;
3937 	extern void			drmach_flush();
3938 	extern void			membar_sync_il();
3939 	extern void			drmach_flush_icache();
3940 	on_trap_data_t			otd;
3941 
3942 	cpuid = CPU->cpu_id;
3943 
3944 	if (on_trap(&otd, OT_DATA_EC)) {
3945 		no_trap();
3946 		prog->data->error[cpuid] = EOPL_FMEM_COPY_ERROR;
3947 		prog->critical->stat[cpuid] = FMEM_LOOP_EXIT;
3948 		drmach_flush_icache();
3949 		membar_sync_il();
3950 		return;
3951 	}
3952 
3953 
3954 	/*
3955 	 * jmp drmach_copy_rename_prog().
3956 	 */
3957 
3958 	drmach_flush(prog->critical, PAGESIZE);
3959 	(void) prog->critical->run(prog, cpuid);
3960 	drmach_flush_icache();
3961 
3962 	no_trap();
3963 
3964 	prog->critical->stat[cpuid] = FMEM_LOOP_EXIT;
3965 
3966 	membar_sync_il();
3967 }
3968 
3969 static void
3970 drmach_swap_pa(drmach_mem_t *s_mem, drmach_mem_t *t_mem)
3971 {
3972 	uint64_t s_base, t_base;
3973 	drmach_board_t *s_board, *t_board;
3974 	struct memlist *ml;
3975 
3976 	s_board = s_mem->dev.bp;
3977 	t_board = t_mem->dev.bp;
3978 	if (s_board == NULL || t_board == NULL) {
3979 		cmn_err(CE_PANIC, "Cannot locate source or target board\n");
3980 		return;
3981 	}
3982 	s_base = s_mem->slice_base;
3983 	t_base = t_mem->slice_base;
3984 
3985 	s_mem->slice_base = t_base;
3986 	s_mem->base_pa = (s_mem->base_pa - s_base) + t_base;
3987 
3988 	for (ml = s_mem->memlist; ml; ml = ml->ml_next) {
3989 		ml->ml_address = ml->ml_address - s_base + t_base;
3990 	}
3991 
3992 	t_mem->slice_base = s_base;
3993 	t_mem->base_pa = (t_mem->base_pa - t_base) + s_base;
3994 
3995 	for (ml = t_mem->memlist; ml; ml = ml->ml_next) {
3996 		ml->ml_address = ml->ml_address - t_base + s_base;
3997 	}
3998 
3999 	/*
4000 	 * IKP has to update the sb-mem-ranges for mac patrol driver
4001 	 * when it resumes, it will re-read the sb-mem-range property
4002 	 * to get the new base address
4003 	 */
4004 	if (oplcfg_pa_swap(s_board->bnum, t_board->bnum) != 0)
4005 		cmn_err(CE_PANIC, "Could not update device nodes\n");
4006 }
4007 
4008 void
4009 drmach_copy_rename(drmachid_t id)
4010 {
4011 	drmach_copy_rename_program_t	*prog_kmem = id;
4012 	drmach_copy_rename_program_t	*prog;
4013 	cpuset_t	cpuset;
4014 	int		cpuid;
4015 	uint64_t	inst;
4016 	register int	rtn;
4017 	extern int	in_sync;
4018 	int		old_in_sync;
4019 	extern void	drmach_sys_trap();
4020 	extern void	drmach_flush();
4021 	extern void	drmach_flush_icache();
4022 	extern uint64_t	patch_inst(uint64_t *, uint64_t);
4023 	on_trap_data_t	otd;
4024 
4025 
4026 	prog = prog_kmem->locked_prog;
4027 
4028 
4029 	/*
4030 	 * We must immediately drop in the TLB because all pointers
4031 	 * are based on the alternate vmem space.
4032 	 */
4033 
4034 	(void) drmach_lock_critical((caddr_t)prog_kmem, (caddr_t)prog);
4035 
4036 	/*
4037 	 * we call scf to get the base address here becuase if scf
4038 	 * has not been suspended yet, the active path can be changing and
4039 	 * sometimes it is not even mapped.  We call the interface when
4040 	 * the OS has been quiesced.
4041 	 */
4042 	prog->critical->scf_reg_base = (*prog->data->scf_get_base_addr)();
4043 
4044 	if (prog->critical->scf_reg_base == (uint64_t)-1 ||
4045 	    prog->critical->scf_reg_base == NULL) {
4046 		prog->data->fmem_status.error = EOPL_FMEM_SCF_ERR;
4047 		drmach_unlock_critical((caddr_t)prog);
4048 		return;
4049 	}
4050 
4051 	cpuset = prog->data->cpu_ready_set;
4052 
4053 	for (cpuid = 0; cpuid < NCPU; cpuid++) {
4054 		if (CPU_IN_SET(cpuset, cpuid)) {
4055 			prog->critical->stat[cpuid] = FMEM_LOOP_START;
4056 			prog->data->error[cpuid] = ESBD_NOERROR;
4057 		}
4058 	}
4059 
4060 	old_in_sync = in_sync;
4061 	in_sync = 1;
4062 	cpuid = CPU->cpu_id;
4063 
4064 	CPUSET_DEL(cpuset, cpuid);
4065 
4066 	for (cpuid = 0; cpuid < NCPU; cpuid++) {
4067 		if (CPU_IN_SET(cpuset, cpuid)) {
4068 			xc_one(cpuid, (xcfunc_t *)drmach_lock_critical,
4069 			    (uint64_t)prog_kmem, (uint64_t)prog);
4070 		}
4071 	}
4072 
4073 	cpuid = CPU->cpu_id;
4074 
4075 	xt_some(cpuset, (xcfunc_t *)drmach_sys_trap,
4076 	    (uint64_t)drmach_copy_rename_slave, (uint64_t)prog);
4077 	xt_sync(cpuset);
4078 
4079 	if (on_trap(&otd, OT_DATA_EC)) {
4080 		rtn = EOPL_FMEM_COPY_ERROR;
4081 		drmach_flush_icache();
4082 		goto done;
4083 	}
4084 
4085 	/*
4086 	 * jmp drmach_copy_rename_prog().
4087 	 */
4088 
4089 	drmach_flush(prog->critical, PAGESIZE);
4090 	rtn = prog->critical->run(prog, cpuid);
4091 
4092 	drmach_flush_icache();
4093 
4094 
4095 done:
4096 	no_trap();
4097 	if (rtn == EOPL_FMEM_HW_ERROR) {
4098 		kpreempt_enable();
4099 		prom_panic("URGENT_ERROR_TRAP is detected during FMEM.\n");
4100 	}
4101 
4102 	/*
4103 	 * In normal case, all slave CPU's are still spinning in
4104 	 * the assembly code.  The master has to patch the instruction
4105 	 * to get them out.
4106 	 * In error case, e.g. COPY_ERROR, some slave CPU's might
4107 	 * have aborted and already returned and sset LOOP_EXIT status.
4108 	 * Some CPU might still be copying.
4109 	 * In any case, some delay is necessary to give them
4110 	 * enough time to set the LOOP_EXIT status.
4111 	 */
4112 
4113 	for (;;) {
4114 		inst = patch_inst((uint64_t *)prog->critical->loop_rtn,
4115 		    prog->critical->inst_loop_ret);
4116 		if (prog->critical->inst_loop_ret == inst) {
4117 			break;
4118 		}
4119 	}
4120 
4121 	for (cpuid = 0; cpuid < NCPU; cpuid++) {
4122 		uint64_t	last, now;
4123 		if (!CPU_IN_SET(cpuset, cpuid)) {
4124 			continue;
4125 		}
4126 		last = prog->stat->nbytes[cpuid];
4127 		/*
4128 		 * Wait for all CPU to exit.
4129 		 * However we do not want an infinite loop
4130 		 * so we detect hangup situation here.
4131 		 * If the slave CPU is still copying data,
4132 		 * we will continue to wait.
4133 		 * In error cases, the master has already set
4134 		 * fmem_status.error to abort the copying.
4135 		 * 1 m.s delay for them to abort copying and
4136 		 * return to drmach_copy_rename_slave to set
4137 		 * FMEM_LOOP_EXIT status should be enough.
4138 		 */
4139 		for (;;) {
4140 			if (prog->critical->stat[cpuid] == FMEM_LOOP_EXIT)
4141 				break;
4142 			drmach_sleep_il();
4143 			drv_usecwait(1000);
4144 			now = prog->stat->nbytes[cpuid];
4145 			if (now <= last) {
4146 				drv_usecwait(1000);
4147 				if (prog->critical->stat[cpuid] ==
4148 				    FMEM_LOOP_EXIT)
4149 					break;
4150 				cmn_err(CE_PANIC, "CPU %d hang during Copy "
4151 				    "Rename", cpuid);
4152 			}
4153 			last = now;
4154 		}
4155 		if (prog->data->error[cpuid] == EOPL_FMEM_HW_ERROR) {
4156 			prom_panic("URGENT_ERROR_TRAP is detected during "
4157 			    "FMEM.\n");
4158 		}
4159 	}
4160 
4161 	/*
4162 	 * This must be done after all strands have exit.
4163 	 * Removing the TLB entry will affect both strands
4164 	 * in the same core.
4165 	 */
4166 
4167 	for (cpuid = 0; cpuid < NCPU; cpuid++) {
4168 		if (CPU_IN_SET(cpuset, cpuid)) {
4169 			xc_one(cpuid, (xcfunc_t *)drmach_unlock_critical,
4170 			    (uint64_t)prog, 0);
4171 		}
4172 	}
4173 
4174 	in_sync = old_in_sync;
4175 
4176 	/*
4177 	 * we should unlock before the following lock to keep the kpreempt
4178 	 * count correct.
4179 	 */
4180 	(void) drmach_unlock_critical((caddr_t)prog);
4181 
4182 	/*
4183 	 * we must remap again.  TLB might have been removed in above xcall.
4184 	 */
4185 
4186 	(void) drmach_lock_critical((caddr_t)prog_kmem, (caddr_t)prog);
4187 
4188 	if (prog->data->fmem_status.error == ESBD_NOERROR)
4189 		prog->data->fmem_status.error = rtn;
4190 
4191 	if (prog->data->copy_wait_time > 0) {
4192 		DRMACH_PR("Unexpected long wait time %ld seconds "
4193 		    "during copy rename on CPU %d\n",
4194 		    prog->data->copy_wait_time/prog->data->stick_freq,
4195 		    prog->data->slowest_cpuid);
4196 	}
4197 	drmach_unlock_critical((caddr_t)prog);
4198 }
4199