xref: /titanic_41/usr/src/uts/sun4u/opl/io/drmach.c (revision 7b9b3bf3fd4f7bfad91fce91e3e9fba62ac85c77)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 
29 #include <sys/debug.h>
30 #include <sys/types.h>
31 #include <sys/varargs.h>
32 #include <sys/errno.h>
33 #include <sys/cred.h>
34 #include <sys/dditypes.h>
35 #include <sys/devops.h>
36 #include <sys/modctl.h>
37 #include <sys/poll.h>
38 #include <sys/conf.h>
39 #include <sys/ddi.h>
40 #include <sys/sunddi.h>
41 #include <sys/sunndi.h>
42 #include <sys/ndi_impldefs.h>
43 #include <sys/stat.h>
44 #include <sys/kmem.h>
45 #include <sys/vmem.h>
46 #include <sys/opl_olympus_regs.h>
47 #include <sys/cpuvar.h>
48 #include <sys/cpupart.h>
49 #include <sys/mem_config.h>
50 #include <sys/ddi_impldefs.h>
51 #include <sys/systm.h>
52 #include <sys/machsystm.h>
53 #include <sys/autoconf.h>
54 #include <sys/cmn_err.h>
55 #include <sys/sysmacros.h>
56 #include <sys/x_call.h>
57 #include <sys/promif.h>
58 #include <sys/prom_plat.h>
59 #include <sys/membar.h>
60 #include <vm/seg_kmem.h>
61 #include <sys/mem_cage.h>
62 #include <sys/stack.h>
63 #include <sys/archsystm.h>
64 #include <vm/hat_sfmmu.h>
65 #include <sys/pte.h>
66 #include <sys/mmu.h>
67 #include <sys/cpu_module.h>
68 #include <sys/obpdefs.h>
69 #include <sys/note.h>
70 #include <sys/ontrap.h>
71 #include <sys/cpu_sgnblk_defs.h>
72 #include <sys/opl.h>
73 #include <sys/cpu_impl.h>
74 
75 
76 #include <sys/promimpl.h>
77 #include <sys/prom_plat.h>
78 #include <sys/kobj.h>
79 
80 #include <sys/sysevent.h>
81 #include <sys/sysevent/dr.h>
82 #include <sys/sysevent/eventdefs.h>
83 
84 #include <sys/drmach.h>
85 #include <sys/dr_util.h>
86 
87 #include <sys/fcode.h>
88 #include <sys/opl_cfg.h>
89 
90 extern void		bcopy32_il(uint64_t, uint64_t);
91 extern void		flush_cache_il(void);
92 extern void		drmach_sleep_il(void);
93 
94 typedef struct {
95 	struct drmach_node	*node;
96 	void			*data;
97 } drmach_node_walk_args_t;
98 
99 typedef struct drmach_node {
100 	void		*here;
101 
102 	pnode_t		(*get_dnode)(struct drmach_node *node);
103 	int		(*walk)(struct drmach_node *node, void *data,
104 				int (*cb)(drmach_node_walk_args_t *args));
105 	dev_info_t	*(*n_getdip)(struct drmach_node *node);
106 	int		(*n_getproplen)(struct drmach_node *node, char *name,
107 				int *len);
108 	int		(*n_getprop)(struct drmach_node *node, char *name,
109 				void *buf, int len);
110 	int		(*get_parent)(struct drmach_node *node,
111 				struct drmach_node *pnode);
112 } drmach_node_t;
113 
114 typedef struct {
115 	int		 min_index;
116 	int		 max_index;
117 	int		 arr_sz;
118 	drmachid_t	*arr;
119 } drmach_array_t;
120 
121 typedef struct {
122 	void		*isa;
123 
124 	void		(*dispose)(drmachid_t);
125 	sbd_error_t	*(*release)(drmachid_t);
126 	sbd_error_t	*(*status)(drmachid_t, drmach_status_t *);
127 
128 	char		 name[MAXNAMELEN];
129 } drmach_common_t;
130 
131 typedef	struct {
132 	uint32_t	core_present;
133 	uint32_t	core_hotadded;
134 	uint32_t	core_started;
135 } drmach_cmp_t;
136 
137 typedef struct {
138 	drmach_common_t	 cm;
139 	int		 bnum;
140 	int		 assigned;
141 	int		 powered;
142 	int		 connected;
143 	int		 cond;
144 	drmach_node_t	*tree;
145 	drmach_array_t	*devices;
146 	int		boot_board;	/* if board exists on bootup */
147 	drmach_cmp_t	cores[OPL_MAX_COREID_PER_BOARD];
148 } drmach_board_t;
149 
150 typedef struct {
151 	drmach_common_t	 cm;
152 	drmach_board_t	*bp;
153 	int		 unum;
154 	int		portid;
155 	int		 busy;
156 	int		 powered;
157 	const char	*type;
158 	drmach_node_t	*node;
159 } drmach_device_t;
160 
161 typedef struct drmach_cpu {
162 	drmach_device_t  dev;
163 	processorid_t    cpuid;
164 	int		sb;
165 	int		chipid;
166 	int		coreid;
167 	int		strandid;
168 	int		status;
169 #define	OPL_CPU_HOTADDED	1
170 } drmach_cpu_t;
171 
172 typedef struct drmach_mem {
173 	drmach_device_t  dev;
174 	uint64_t	slice_base;
175 	uint64_t	slice_size;
176 	uint64_t	base_pa;	/* lowest installed memory base */
177 	uint64_t	nbytes;		/* size of installed memory */
178 	struct memlist *memlist;
179 } drmach_mem_t;
180 
181 typedef struct drmach_io {
182 	drmach_device_t  dev;
183 	int	channel;
184 	int	leaf;
185 } drmach_io_t;
186 
187 typedef struct drmach_domain_info {
188 	uint32_t	floating;
189 	int		allow_dr;
190 } drmach_domain_info_t;
191 
192 drmach_domain_info_t drmach_domain;
193 
194 typedef struct {
195 	int		 flags;
196 	drmach_device_t	*dp;
197 	sbd_error_t	*err;
198 	dev_info_t	*dip;
199 } drmach_config_args_t;
200 
201 typedef struct {
202 	drmach_board_t	*obj;
203 	int		 ndevs;
204 	void		*a;
205 	sbd_error_t	*(*found)(void *a, const char *, int, drmachid_t);
206 	sbd_error_t	*err;
207 } drmach_board_cb_data_t;
208 
209 static drmach_array_t	*drmach_boards;
210 
211 static sbd_error_t	*drmach_device_new(drmach_node_t *,
212 				drmach_board_t *, int, drmachid_t *);
213 static sbd_error_t	*drmach_cpu_new(drmach_device_t *, drmachid_t *);
214 static sbd_error_t	*drmach_mem_new(drmach_device_t *, drmachid_t *);
215 static sbd_error_t	*drmach_io_new(drmach_device_t *, drmachid_t *);
216 
217 static dev_info_t	*drmach_node_ddi_get_dip(drmach_node_t *np);
218 static int		 drmach_node_ddi_get_prop(drmach_node_t *np,
219 				char *name, void *buf, int len);
220 static int		 drmach_node_ddi_get_proplen(drmach_node_t *np,
221 				char *name, int *len);
222 
223 static int 		drmach_get_portid(drmach_node_t *);
224 static	sbd_error_t	*drmach_i_status(drmachid_t, drmach_status_t *);
225 static int		opl_check_dr_status();
226 static void		drmach_io_dispose(drmachid_t);
227 static sbd_error_t	*drmach_io_release(drmachid_t);
228 static sbd_error_t	*drmach_io_status(drmachid_t, drmach_status_t *);
229 static int 		drmach_init(void);
230 static void 		drmach_fini(void);
231 static void		drmach_swap_pa(drmach_mem_t *, drmach_mem_t *);
232 static drmach_board_t	*drmach_get_board_by_bnum(int);
233 
234 /* options for the second argument in drmach_add_remove_cpu() */
235 #define	HOTADD_CPU	1
236 #define	HOTREMOVE_CPU	2
237 
238 #define	ON_BOARD_CORE_NUM(x)	(((uint_t)(x) / OPL_MAX_STRANDID_PER_CORE) & \
239 	(OPL_MAX_COREID_PER_BOARD - 1))
240 
241 extern struct cpu	*SIGBCPU;
242 
243 static int		drmach_name2type_idx(char *);
244 static drmach_board_t	*drmach_board_new(int, int);
245 
246 #ifdef DEBUG
247 
248 #define	DRMACH_PR		if (drmach_debug) printf
249 int drmach_debug = 1;		 /* set to non-zero to enable debug messages */
250 #else
251 
252 #define	DRMACH_PR		_NOTE(CONSTANTCONDITION) if (0) printf
253 #endif /* DEBUG */
254 
255 
256 #define	DRMACH_OBJ(id)		((drmach_common_t *)id)
257 
258 #define	DRMACH_NULL_ID(id)	((id) == 0)
259 
260 #define	DRMACH_IS_BOARD_ID(id)	\
261 	((id != 0) &&		\
262 	(DRMACH_OBJ(id)->isa == (void *)drmach_board_new))
263 
264 #define	DRMACH_IS_CPU_ID(id)	\
265 	((id != 0) &&		\
266 	(DRMACH_OBJ(id)->isa == (void *)drmach_cpu_new))
267 
268 #define	DRMACH_IS_MEM_ID(id)	\
269 	((id != 0) &&		\
270 	(DRMACH_OBJ(id)->isa == (void *)drmach_mem_new))
271 
272 #define	DRMACH_IS_IO_ID(id)	\
273 	((id != 0) &&		\
274 	(DRMACH_OBJ(id)->isa == (void *)drmach_io_new))
275 
276 #define	DRMACH_IS_DEVICE_ID(id)					\
277 	((id != 0) &&						\
278 	(DRMACH_OBJ(id)->isa == (void *)drmach_cpu_new ||	\
279 	    DRMACH_OBJ(id)->isa == (void *)drmach_mem_new ||	\
280 	    DRMACH_OBJ(id)->isa == (void *)drmach_io_new))
281 
282 #define	DRMACH_IS_ID(id)					\
283 	((id != 0) &&						\
284 	(DRMACH_OBJ(id)->isa == (void *)drmach_board_new ||	\
285 	    DRMACH_OBJ(id)->isa == (void *)drmach_cpu_new ||	\
286 	    DRMACH_OBJ(id)->isa == (void *)drmach_mem_new ||	\
287 	    DRMACH_OBJ(id)->isa == (void *)drmach_io_new))
288 
289 #define	DRMACH_INTERNAL_ERROR() \
290 	drerr_new(1, EOPL_INTERNAL, drmach_ie_fmt, __LINE__)
291 
292 static char		*drmach_ie_fmt = "drmach.c %d";
293 
294 static struct {
295 	const char	*name;
296 	const char	*type;
297 	sbd_error_t	*(*new)(drmach_device_t *, drmachid_t *);
298 } drmach_name2type[] = {
299 	{ "cpu",	DRMACH_DEVTYPE_CPU,		drmach_cpu_new },
300 	{ "pseudo-mc",	DRMACH_DEVTYPE_MEM,		drmach_mem_new },
301 	{ "pci",	DRMACH_DEVTYPE_PCI,		drmach_io_new  },
302 };
303 
304 /* utility */
305 #define	MBYTE	(1048576ull)
306 
307 /*
308  * drmach autoconfiguration data structures and interfaces
309  */
310 
311 extern struct mod_ops mod_miscops;
312 
313 static struct modlmisc modlmisc = {
314 	&mod_miscops,
315 	"OPL DR 1.1"
316 };
317 
318 static struct modlinkage modlinkage = {
319 	MODREV_1,
320 	(void *)&modlmisc,
321 	NULL
322 };
323 
324 static krwlock_t drmach_boards_rwlock;
325 
326 typedef const char	*fn_t;
327 
328 int
329 _init(void)
330 {
331 	int err;
332 
333 	if ((err = drmach_init()) != 0) {
334 		return (err);
335 	}
336 
337 	if ((err = mod_install(&modlinkage)) != 0) {
338 		drmach_fini();
339 	}
340 
341 	return (err);
342 }
343 
344 int
345 _fini(void)
346 {
347 	int	err;
348 
349 	if ((err = mod_remove(&modlinkage)) == 0)
350 		drmach_fini();
351 
352 	return (err);
353 }
354 
355 int
356 _info(struct modinfo *modinfop)
357 {
358 	return (mod_info(&modlinkage, modinfop));
359 }
360 
361 struct drmach_mc_lookup {
362 	int	bnum;
363 	drmach_board_t	*bp;
364 	dev_info_t *dip;	/* rv - set if found */
365 };
366 
367 #define	_ptob64(p) ((uint64_t)(p) << PAGESHIFT)
368 #define	_b64top(b) ((pgcnt_t)((b) >> PAGESHIFT))
369 
370 static int
371 drmach_setup_mc_info(dev_info_t *dip, drmach_mem_t *mp)
372 {
373 	uint64_t	memory_ranges[128];
374 	int len;
375 	struct memlist	*ml;
376 	int rv;
377 	hwd_sb_t *hwd;
378 	hwd_memory_t *pm;
379 
380 	len = sizeof (memory_ranges);
381 	if (ddi_getlongprop_buf(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
382 	    "sb-mem-ranges", (caddr_t)&memory_ranges[0], &len) !=
383 	    DDI_PROP_SUCCESS) {
384 		mp->slice_base = 0;
385 		mp->slice_size = 0;
386 		return (-1);
387 	}
388 	mp->slice_base = memory_ranges[0];
389 	mp->slice_size = memory_ranges[1];
390 
391 	if (!mp->dev.bp->boot_board) {
392 		int i;
393 
394 		rv = opl_read_hwd(mp->dev.bp->bnum, NULL,  NULL, NULL, &hwd);
395 
396 		if (rv != 0) {
397 			return (-1);
398 		}
399 
400 		ml = NULL;
401 		pm = &hwd->sb_cmu.cmu_memory;
402 		for (i = 0; i < HWD_MAX_MEM_CHUNKS; i++) {
403 			if (pm->mem_chunks[i].chnk_size > 0) {
404 				ml = memlist_add_span(ml,
405 				    pm->mem_chunks[i].chnk_start_address,
406 				    pm->mem_chunks[i].chnk_size);
407 			}
408 		}
409 	} else {
410 		/*
411 		 * we intersect phys_install to get base_pa.
412 		 * This only works at bootup time.
413 		 */
414 
415 		memlist_read_lock();
416 		ml = memlist_dup(phys_install);
417 		memlist_read_unlock();
418 
419 		ml = memlist_del_span(ml, 0ull, mp->slice_base);
420 		if (ml) {
421 			uint64_t basepa, endpa;
422 			endpa = _ptob64(physmax + 1);
423 
424 			basepa = mp->slice_base + mp->slice_size;
425 
426 			ml = memlist_del_span(ml, basepa, endpa - basepa);
427 		}
428 	}
429 
430 	if (ml) {
431 		uint64_t nbytes = 0;
432 		struct memlist *p;
433 		for (p = ml; p; p = p->next) {
434 			nbytes += p->size;
435 		}
436 		if ((mp->nbytes = nbytes) > 0)
437 			mp->base_pa = ml->address;
438 		else
439 			mp->base_pa = 0;
440 		mp->memlist = ml;
441 	} else {
442 		mp->base_pa = 0;
443 		mp->nbytes = 0;
444 	}
445 	return (0);
446 }
447 
448 
449 struct drmach_hotcpu {
450 	drmach_board_t *bp;
451 	int	bnum;
452 	int	core_id;
453 	int 	rv;
454 	int	option;
455 };
456 
457 static int
458 drmach_cpu_cb(dev_info_t *dip, void *arg)
459 {
460 	struct drmach_hotcpu *p = (struct drmach_hotcpu *)arg;
461 	char name[OBP_MAXDRVNAME];
462 	int len = OBP_MAXDRVNAME;
463 	int bnum, core_id, strand_id;
464 	drmach_board_t *bp;
465 
466 	if (dip == ddi_root_node()) {
467 		return (DDI_WALK_CONTINUE);
468 	}
469 
470 	if (ddi_getlongprop_buf(DDI_DEV_T_ANY, dip,
471 	    DDI_PROP_DONTPASS, "name",
472 	    (caddr_t)name, &len) != DDI_PROP_SUCCESS) {
473 		return (DDI_WALK_PRUNECHILD);
474 	}
475 
476 	/* only cmp has board number */
477 	bnum = -1;
478 	len = sizeof (bnum);
479 	if (ddi_getlongprop_buf(DDI_DEV_T_ANY, dip,
480 	    DDI_PROP_DONTPASS, OBP_BOARDNUM,
481 	    (caddr_t)&bnum, &len) != DDI_PROP_SUCCESS) {
482 		bnum = -1;
483 	}
484 
485 	if (strcmp(name, "cmp") == 0) {
486 		if (bnum != p->bnum)
487 			return (DDI_WALK_PRUNECHILD);
488 		return (DDI_WALK_CONTINUE);
489 	}
490 	/* we have already pruned all unwanted cores and cpu's above */
491 	if (strcmp(name, "core") == 0) {
492 		return (DDI_WALK_CONTINUE);
493 	}
494 	if (strcmp(name, "cpu") == 0) {
495 		processorid_t cpuid;
496 		len = sizeof (cpuid);
497 		if (ddi_getlongprop_buf(DDI_DEV_T_ANY, dip,
498 		    DDI_PROP_DONTPASS, "cpuid",
499 		    (caddr_t)&cpuid, &len) != DDI_PROP_SUCCESS) {
500 			p->rv = -1;
501 			return (DDI_WALK_TERMINATE);
502 		}
503 
504 		core_id = p->core_id;
505 
506 		bnum = LSB_ID(cpuid);
507 
508 		if (ON_BOARD_CORE_NUM(cpuid) != core_id)
509 			return (DDI_WALK_CONTINUE);
510 
511 		bp = p->bp;
512 		ASSERT(bnum == bp->bnum);
513 
514 		if (p->option == HOTADD_CPU) {
515 			if (prom_hotaddcpu(cpuid) != 0) {
516 				p->rv = -1;
517 				return (DDI_WALK_TERMINATE);
518 			}
519 			strand_id = STRAND_ID(cpuid);
520 			bp->cores[core_id].core_hotadded |= (1 << strand_id);
521 		} else if (p->option == HOTREMOVE_CPU) {
522 			if (prom_hotremovecpu(cpuid) != 0) {
523 				p->rv = -1;
524 				return (DDI_WALK_TERMINATE);
525 			}
526 			strand_id = STRAND_ID(cpuid);
527 			bp->cores[core_id].core_hotadded &= ~(1 << strand_id);
528 		}
529 		return (DDI_WALK_CONTINUE);
530 	}
531 
532 	return (DDI_WALK_PRUNECHILD);
533 }
534 
535 
536 static int
537 drmach_add_remove_cpu(int bnum, int core_id, int option)
538 {
539 	struct drmach_hotcpu arg;
540 	drmach_board_t *bp;
541 
542 	bp = drmach_get_board_by_bnum(bnum);
543 	ASSERT(bp);
544 
545 	arg.bp = bp;
546 	arg.bnum = bnum;
547 	arg.core_id = core_id;
548 	arg.rv = 0;
549 	arg.option = option;
550 	ddi_walk_devs(ddi_root_node(), drmach_cpu_cb, (void *)&arg);
551 	return (arg.rv);
552 }
553 
554 struct drmach_setup_core_arg {
555 	drmach_board_t *bp;
556 };
557 
558 static int
559 drmach_setup_core_cb(dev_info_t *dip, void *arg)
560 {
561 	struct drmach_setup_core_arg *p = (struct drmach_setup_core_arg *)arg;
562 	char name[OBP_MAXDRVNAME];
563 	int len = OBP_MAXDRVNAME;
564 	int bnum;
565 	int core_id, strand_id;
566 
567 	if (dip == ddi_root_node()) {
568 		return (DDI_WALK_CONTINUE);
569 	}
570 
571 	if (ddi_getlongprop_buf(DDI_DEV_T_ANY, dip,
572 	    DDI_PROP_DONTPASS, "name",
573 	    (caddr_t)name, &len) != DDI_PROP_SUCCESS) {
574 		return (DDI_WALK_PRUNECHILD);
575 	}
576 
577 	/* only cmp has board number */
578 	bnum = -1;
579 	len = sizeof (bnum);
580 	if (ddi_getlongprop_buf(DDI_DEV_T_ANY, dip,
581 	    DDI_PROP_DONTPASS, OBP_BOARDNUM,
582 	    (caddr_t)&bnum, &len) != DDI_PROP_SUCCESS) {
583 		bnum = -1;
584 	}
585 
586 	if (strcmp(name, "cmp") == 0) {
587 		if (bnum != p->bp->bnum)
588 			return (DDI_WALK_PRUNECHILD);
589 		return (DDI_WALK_CONTINUE);
590 	}
591 	/* we have already pruned all unwanted cores and cpu's above */
592 	if (strcmp(name, "core") == 0) {
593 		return (DDI_WALK_CONTINUE);
594 	}
595 	if (strcmp(name, "cpu") == 0) {
596 		processorid_t cpuid;
597 		len = sizeof (cpuid);
598 		if (ddi_getlongprop_buf(DDI_DEV_T_ANY, dip,
599 		    DDI_PROP_DONTPASS, "cpuid",
600 		    (caddr_t)&cpuid, &len) != DDI_PROP_SUCCESS) {
601 			return (DDI_WALK_TERMINATE);
602 		}
603 		bnum = LSB_ID(cpuid);
604 		ASSERT(bnum == p->bp->bnum);
605 		core_id = ON_BOARD_CORE_NUM(cpuid);
606 		strand_id = STRAND_ID(cpuid);
607 		p->bp->cores[core_id].core_present |= (1 << strand_id);
608 		return (DDI_WALK_CONTINUE);
609 	}
610 
611 	return (DDI_WALK_PRUNECHILD);
612 }
613 
614 
615 static void
616 drmach_setup_core_info(drmach_board_t *obj)
617 {
618 	struct drmach_setup_core_arg arg;
619 	int i;
620 
621 	for (i = 0; i < OPL_MAX_COREID_PER_BOARD; i++) {
622 		obj->cores[i].core_present = 0;
623 		obj->cores[i].core_hotadded = 0;
624 		obj->cores[i].core_started = 0;
625 	}
626 	arg.bp = obj;
627 	ddi_walk_devs(ddi_root_node(), drmach_setup_core_cb, (void *)&arg);
628 
629 	for (i = 0; i < OPL_MAX_COREID_PER_BOARD; i++) {
630 		if (obj->boot_board) {
631 			obj->cores[i].core_hotadded =
632 			    obj->cores[i].core_started =
633 			    obj->cores[i].core_present;
634 		}
635 	}
636 }
637 
638 /*
639  * drmach_node_* routines serve the purpose of separating the
640  * rest of the code from the device tree and OBP.  This is necessary
641  * because of In-Kernel-Probing.  Devices probed after stod, are probed
642  * by the in-kernel-prober, not OBP.  These devices, therefore, do not
643  * have dnode ids.
644  */
645 
646 typedef struct {
647 	drmach_node_walk_args_t	*nwargs;
648 	int 			(*cb)(drmach_node_walk_args_t *args);
649 	int			err;
650 } drmach_node_ddi_walk_args_t;
651 
652 static int
653 drmach_node_ddi_walk_cb(dev_info_t *dip, void *arg)
654 {
655 	drmach_node_ddi_walk_args_t	*nargs;
656 
657 	nargs = (drmach_node_ddi_walk_args_t *)arg;
658 
659 	/*
660 	 * dip doesn't have to be held here as we are called
661 	 * from ddi_walk_devs() which holds the dip.
662 	 */
663 	nargs->nwargs->node->here = (void *)dip;
664 
665 	nargs->err = nargs->cb(nargs->nwargs);
666 
667 
668 	/*
669 	 * Set "here" to NULL so that unheld dip is not accessible
670 	 * outside ddi_walk_devs()
671 	 */
672 	nargs->nwargs->node->here = NULL;
673 
674 	if (nargs->err)
675 		return (DDI_WALK_TERMINATE);
676 	else
677 		return (DDI_WALK_CONTINUE);
678 }
679 
680 static int
681 drmach_node_ddi_walk(drmach_node_t *np, void *data,
682 		int (*cb)(drmach_node_walk_args_t *args))
683 {
684 	drmach_node_walk_args_t		args;
685 	drmach_node_ddi_walk_args_t	nargs;
686 
687 
688 	/* initialized args structure for callback */
689 	args.node = np;
690 	args.data = data;
691 
692 	nargs.nwargs = &args;
693 	nargs.cb = cb;
694 	nargs.err = 0;
695 
696 	/*
697 	 * Root node doesn't have to be held in any way.
698 	 */
699 	ddi_walk_devs(ddi_root_node(), drmach_node_ddi_walk_cb, (void *)&nargs);
700 
701 	return (nargs.err);
702 }
703 
704 static int
705 drmach_node_ddi_get_parent(drmach_node_t *np, drmach_node_t *pp)
706 {
707 	dev_info_t	*ndip;
708 	static char	*fn = "drmach_node_ddi_get_parent";
709 
710 	ndip = np->n_getdip(np);
711 	if (ndip == NULL) {
712 		cmn_err(CE_WARN, "%s: NULL dip", fn);
713 		return (-1);
714 	}
715 
716 	bcopy(np, pp, sizeof (drmach_node_t));
717 
718 	pp->here = (void *)ddi_get_parent(ndip);
719 	if (pp->here == NULL) {
720 		cmn_err(CE_WARN, "%s: NULL parent dip", fn);
721 		return (-1);
722 	}
723 
724 	return (0);
725 }
726 
727 /*ARGSUSED*/
728 static pnode_t
729 drmach_node_ddi_get_dnode(drmach_node_t *np)
730 {
731 	return ((pnode_t)NULL);
732 }
733 
734 static drmach_node_t *
735 drmach_node_new(void)
736 {
737 	drmach_node_t *np;
738 
739 	np = kmem_zalloc(sizeof (drmach_node_t), KM_SLEEP);
740 
741 	np->get_dnode = drmach_node_ddi_get_dnode;
742 	np->walk = drmach_node_ddi_walk;
743 	np->n_getdip = drmach_node_ddi_get_dip;
744 	np->n_getproplen = drmach_node_ddi_get_proplen;
745 	np->n_getprop = drmach_node_ddi_get_prop;
746 	np->get_parent = drmach_node_ddi_get_parent;
747 
748 	return (np);
749 }
750 
751 static void
752 drmach_node_dispose(drmach_node_t *np)
753 {
754 	kmem_free(np, sizeof (*np));
755 }
756 
757 static dev_info_t *
758 drmach_node_ddi_get_dip(drmach_node_t *np)
759 {
760 	return ((dev_info_t *)np->here);
761 }
762 
763 static int
764 drmach_node_walk(drmach_node_t *np, void *param,
765 		int (*cb)(drmach_node_walk_args_t *args))
766 {
767 	return (np->walk(np, param, cb));
768 }
769 
770 static int
771 drmach_node_ddi_get_prop(drmach_node_t *np, char *name, void *buf, int len)
772 {
773 	int		rv = 0;
774 	dev_info_t	*ndip;
775 	static char	*fn = "drmach_node_ddi_get_prop";
776 
777 
778 	ndip = np->n_getdip(np);
779 	if (ndip == NULL) {
780 		cmn_err(CE_WARN, "%s: NULL dip", fn);
781 		rv = -1;
782 	} else if (ddi_getlongprop_buf(DDI_DEV_T_ANY, ndip,
783 	    DDI_PROP_DONTPASS, name,
784 	    (caddr_t)buf, &len) != DDI_PROP_SUCCESS) {
785 		rv = -1;
786 	}
787 
788 	return (rv);
789 }
790 
791 static int
792 drmach_node_ddi_get_proplen(drmach_node_t *np, char *name, int *len)
793 {
794 	int		rv = 0;
795 	dev_info_t	*ndip;
796 
797 	ndip = np->n_getdip(np);
798 	if (ndip == NULL) {
799 		rv = -1;
800 	} else if (ddi_getproplen(DDI_DEV_T_ANY, ndip, DDI_PROP_DONTPASS, name,
801 	    len) != DDI_PROP_SUCCESS) {
802 		rv = -1;
803 	}
804 
805 	return (rv);
806 }
807 
808 static drmachid_t
809 drmach_node_dup(drmach_node_t *np)
810 {
811 	drmach_node_t *dup;
812 
813 	dup = drmach_node_new();
814 	dup->here = np->here;
815 	dup->get_dnode = np->get_dnode;
816 	dup->walk = np->walk;
817 	dup->n_getdip = np->n_getdip;
818 	dup->n_getproplen = np->n_getproplen;
819 	dup->n_getprop = np->n_getprop;
820 	dup->get_parent = np->get_parent;
821 
822 	return (dup);
823 }
824 
825 /*
826  * drmach_array provides convenient array construction, access,
827  * bounds checking and array destruction logic.
828  */
829 
830 static drmach_array_t *
831 drmach_array_new(int min_index, int max_index)
832 {
833 	drmach_array_t *arr;
834 
835 	arr = kmem_zalloc(sizeof (drmach_array_t), KM_SLEEP);
836 
837 	arr->arr_sz = (max_index - min_index + 1) * sizeof (void *);
838 	if (arr->arr_sz > 0) {
839 		arr->min_index = min_index;
840 		arr->max_index = max_index;
841 
842 		arr->arr = kmem_zalloc(arr->arr_sz, KM_SLEEP);
843 		return (arr);
844 	} else {
845 		kmem_free(arr, sizeof (*arr));
846 		return (0);
847 	}
848 }
849 
850 static int
851 drmach_array_set(drmach_array_t *arr, int idx, drmachid_t val)
852 {
853 	if (idx < arr->min_index || idx > arr->max_index)
854 		return (-1);
855 	else {
856 		arr->arr[idx - arr->min_index] = val;
857 		return (0);
858 	}
859 	/*NOTREACHED*/
860 }
861 
862 static int
863 drmach_array_get(drmach_array_t *arr, int idx, drmachid_t *val)
864 {
865 	if (idx < arr->min_index || idx > arr->max_index)
866 		return (-1);
867 	else {
868 		*val = arr->arr[idx - arr->min_index];
869 		return (0);
870 	}
871 	/*NOTREACHED*/
872 }
873 
874 static int
875 drmach_array_first(drmach_array_t *arr, int *idx, drmachid_t *val)
876 {
877 	int rv;
878 
879 	*idx = arr->min_index;
880 	while ((rv = drmach_array_get(arr, *idx, val)) == 0 && *val == NULL)
881 		*idx += 1;
882 
883 	return (rv);
884 }
885 
886 static int
887 drmach_array_next(drmach_array_t *arr, int *idx, drmachid_t *val)
888 {
889 	int rv;
890 
891 	*idx += 1;
892 	while ((rv = drmach_array_get(arr, *idx, val)) == 0 && *val == NULL)
893 		*idx += 1;
894 
895 	return (rv);
896 }
897 
898 static void
899 drmach_array_dispose(drmach_array_t *arr, void (*disposer)(drmachid_t))
900 {
901 	drmachid_t	val;
902 	int		idx;
903 	int		rv;
904 
905 	rv = drmach_array_first(arr, &idx, &val);
906 	while (rv == 0) {
907 		(*disposer)(val);
908 		rv = drmach_array_next(arr, &idx, &val);
909 	}
910 
911 	kmem_free(arr->arr, arr->arr_sz);
912 	kmem_free(arr, sizeof (*arr));
913 }
914 
915 static drmach_board_t *
916 drmach_get_board_by_bnum(int bnum)
917 {
918 	drmachid_t id;
919 
920 	if (drmach_array_get(drmach_boards, bnum, &id) == 0)
921 		return ((drmach_board_t *)id);
922 	else
923 		return (NULL);
924 }
925 
926 static pnode_t
927 drmach_node_get_dnode(drmach_node_t *np)
928 {
929 	return (np->get_dnode(np));
930 }
931 
932 /*ARGSUSED*/
933 sbd_error_t *
934 drmach_configure(drmachid_t id, int flags)
935 {
936 	drmach_device_t		*dp;
937 	sbd_error_t		*err = NULL;
938 	dev_info_t		*rdip;
939 	dev_info_t		*fdip = NULL;
940 
941 	if (DRMACH_IS_CPU_ID(id)) {
942 		return (NULL);
943 	}
944 	if (!DRMACH_IS_DEVICE_ID(id))
945 		return (drerr_new(0, EOPL_INAPPROP, NULL));
946 	dp = id;
947 	rdip = dp->node->n_getdip(dp->node);
948 
949 	ASSERT(rdip);
950 
951 	ASSERT(e_ddi_branch_held(rdip));
952 
953 	if (e_ddi_branch_configure(rdip, &fdip, 0) != 0) {
954 		char *path = kmem_alloc(MAXPATHLEN, KM_SLEEP);
955 		dev_info_t *dip = (fdip != NULL) ? fdip : rdip;
956 
957 		(void) ddi_pathname(dip, path);
958 		err = drerr_new(1,  EOPL_DRVFAIL, path);
959 
960 		kmem_free(path, MAXPATHLEN);
961 
962 		/* If non-NULL, fdip is returned held and must be released */
963 		if (fdip != NULL)
964 			ddi_release_devi(fdip);
965 	}
966 
967 	return (err);
968 }
969 
970 
971 static sbd_error_t *
972 drmach_device_new(drmach_node_t *node,
973 	drmach_board_t *bp, int portid, drmachid_t *idp)
974 {
975 	int		 i;
976 	int		 rv;
977 	drmach_device_t	proto;
978 	sbd_error_t	*err;
979 	char		 name[OBP_MAXDRVNAME];
980 
981 	rv = node->n_getprop(node, "name", name, OBP_MAXDRVNAME);
982 	if (rv) {
983 		/* every node is expected to have a name */
984 		err = drerr_new(1, EOPL_GETPROP, "device node %s: property %s",
985 		    ddi_node_name(node->n_getdip(node)), "name");
986 		return (err);
987 	}
988 
989 	/*
990 	 * The node currently being examined is not listed in the name2type[]
991 	 * array.  In this case, the node is no interest to drmach.  Both
992 	 * dp and err are initialized here to yield nothing (no device or
993 	 * error structure) for this case.
994 	 */
995 	i = drmach_name2type_idx(name);
996 
997 
998 	if (i < 0) {
999 		*idp = (drmachid_t)0;
1000 		return (NULL);
1001 	}
1002 
1003 	/* device specific new function will set unum */
1004 
1005 	bzero(&proto, sizeof (proto));
1006 	proto.type = drmach_name2type[i].type;
1007 	proto.bp = bp;
1008 	proto.node = node;
1009 	proto.portid = portid;
1010 
1011 	return (drmach_name2type[i].new(&proto, idp));
1012 }
1013 
1014 static void
1015 drmach_device_dispose(drmachid_t id)
1016 {
1017 	drmach_device_t *self = id;
1018 
1019 	self->cm.dispose(id);
1020 }
1021 
1022 
1023 static drmach_board_t *
1024 drmach_board_new(int bnum, int boot_board)
1025 {
1026 	static sbd_error_t *drmach_board_release(drmachid_t);
1027 	static sbd_error_t *drmach_board_status(drmachid_t, drmach_status_t *);
1028 
1029 	drmach_board_t	*bp;
1030 
1031 	bp = kmem_zalloc(sizeof (drmach_board_t), KM_SLEEP);
1032 
1033 	bp->cm.isa = (void *)drmach_board_new;
1034 	bp->cm.release = drmach_board_release;
1035 	bp->cm.status = drmach_board_status;
1036 
1037 	(void) drmach_board_name(bnum, bp->cm.name, sizeof (bp->cm.name));
1038 
1039 	bp->bnum = bnum;
1040 	bp->devices = NULL;
1041 	bp->connected = boot_board;
1042 	bp->tree = drmach_node_new();
1043 	bp->assigned = boot_board;
1044 	bp->powered = boot_board;
1045 	bp->boot_board = boot_board;
1046 
1047 	/*
1048 	 * If this is not bootup initialization, we have to wait till
1049 	 * IKP sets up the device nodes in drmach_board_connect().
1050 	 */
1051 	if (boot_board)
1052 		drmach_setup_core_info(bp);
1053 
1054 	drmach_array_set(drmach_boards, bnum, bp);
1055 	return (bp);
1056 }
1057 
1058 static void
1059 drmach_board_dispose(drmachid_t id)
1060 {
1061 	drmach_board_t *bp;
1062 
1063 	ASSERT(DRMACH_IS_BOARD_ID(id));
1064 	bp = id;
1065 
1066 	if (bp->tree)
1067 		drmach_node_dispose(bp->tree);
1068 
1069 	if (bp->devices)
1070 		drmach_array_dispose(bp->devices, drmach_device_dispose);
1071 
1072 	kmem_free(bp, sizeof (*bp));
1073 }
1074 
1075 static sbd_error_t *
1076 drmach_board_status(drmachid_t id, drmach_status_t *stat)
1077 {
1078 	sbd_error_t	*err = NULL;
1079 	drmach_board_t	*bp;
1080 
1081 	if (!DRMACH_IS_BOARD_ID(id))
1082 		return (drerr_new(0, EOPL_INAPPROP, NULL));
1083 	bp = id;
1084 
1085 	stat->assigned = bp->assigned;
1086 	stat->powered = bp->powered;
1087 	stat->busy = 0;			/* assume not busy */
1088 	stat->configured = 0;		/* assume not configured */
1089 	stat->empty = 0;
1090 	stat->cond = bp->cond = SBD_COND_OK;
1091 	strncpy(stat->type, "System Brd", sizeof (stat->type));
1092 	stat->info[0] = '\0';
1093 
1094 	if (bp->devices) {
1095 		int		 rv;
1096 		int		 d_idx;
1097 		drmachid_t	 d_id;
1098 
1099 		rv = drmach_array_first(bp->devices, &d_idx, &d_id);
1100 		while (rv == 0) {
1101 			drmach_status_t	d_stat;
1102 
1103 			err = drmach_i_status(d_id, &d_stat);
1104 			if (err)
1105 				break;
1106 
1107 			stat->busy |= d_stat.busy;
1108 			stat->configured |= d_stat.configured;
1109 
1110 			rv = drmach_array_next(bp->devices, &d_idx, &d_id);
1111 		}
1112 	}
1113 
1114 	return (err);
1115 }
1116 
1117 int
1118 drmach_board_is_floating(drmachid_t id)
1119 {
1120 	drmach_board_t *bp;
1121 
1122 	if (!DRMACH_IS_BOARD_ID(id))
1123 		return (0);
1124 
1125 	bp = (drmach_board_t *)id;
1126 
1127 	return ((drmach_domain.floating & (1 << bp->bnum)) ? 1 : 0);
1128 }
1129 
1130 static int
1131 drmach_init(void)
1132 {
1133 	dev_info_t	*rdip;
1134 	int		i, rv, len;
1135 	int		*floating;
1136 
1137 	rw_init(&drmach_boards_rwlock, NULL, RW_DEFAULT, NULL);
1138 
1139 	drmach_boards = drmach_array_new(0, MAX_BOARDS - 1);
1140 
1141 	rdip = ddi_root_node();
1142 
1143 	if (ddi_getproplen(DDI_DEV_T_ANY, rdip, DDI_PROP_DONTPASS,
1144 	    "floating-boards", &len) != DDI_PROP_SUCCESS) {
1145 		cmn_err(CE_WARN, "Cannot get floating-boards proplen\n");
1146 	} else {
1147 		floating = (int *)kmem_alloc(len, KM_SLEEP);
1148 		rv = ddi_prop_op(DDI_DEV_T_ANY, rdip, PROP_LEN_AND_VAL_BUF,
1149 		    DDI_PROP_DONTPASS, "floating-boards", (caddr_t)floating,
1150 		    &len);
1151 		if (rv != DDI_PROP_SUCCESS) {
1152 			cmn_err(CE_WARN, "Cannot get floating-boards prop\n");
1153 		} else {
1154 			drmach_domain.floating = 0;
1155 			for (i = 0; i < len / sizeof (int); i++) {
1156 				drmach_domain.floating |= (1 << floating[i]);
1157 			}
1158 		}
1159 		kmem_free(floating, len);
1160 	}
1161 	drmach_domain.allow_dr = opl_check_dr_status();
1162 
1163 	rdip = ddi_get_child(ddi_root_node());
1164 	do {
1165 		int		 bnum;
1166 		drmachid_t	 id;
1167 
1168 		bnum = -1;
1169 		bnum = ddi_getprop(DDI_DEV_T_ANY, rdip, DDI_PROP_DONTPASS,
1170 		    OBP_BOARDNUM, -1);
1171 		if (bnum == -1)
1172 			continue;
1173 
1174 		if (drmach_array_get(drmach_boards, bnum, &id) == -1) {
1175 			cmn_err(CE_WARN, "Device node 0x%p has invalid "
1176 			    "property value, %s=%d", rdip, OBP_BOARDNUM, bnum);
1177 			goto error;
1178 		} else if (id == NULL) {
1179 			(void) drmach_board_new(bnum, 1);
1180 		}
1181 	} while ((rdip = ddi_get_next_sibling(rdip)) != NULL);
1182 
1183 	opl_hold_devtree();
1184 
1185 	/*
1186 	 * Initialize the IKP feature.
1187 	 *
1188 	 * This can be done only after DR has acquired a hold on all the
1189 	 * device nodes that are interesting to IKP.
1190 	 */
1191 	if (opl_init_cfg() != 0) {
1192 		cmn_err(CE_WARN, "DR - IKP initialization failed");
1193 
1194 		opl_release_devtree();
1195 
1196 		goto error;
1197 	}
1198 
1199 	return (0);
1200 error:
1201 	drmach_array_dispose(drmach_boards, drmach_board_dispose);
1202 	rw_destroy(&drmach_boards_rwlock);
1203 	return (ENXIO);
1204 }
1205 
1206 static void
1207 drmach_fini(void)
1208 {
1209 	rw_enter(&drmach_boards_rwlock, RW_WRITER);
1210 	drmach_array_dispose(drmach_boards, drmach_board_dispose);
1211 	drmach_boards = NULL;
1212 	rw_exit(&drmach_boards_rwlock);
1213 
1214 	/*
1215 	 * Walk immediate children of the root devinfo node
1216 	 * releasing holds acquired on branches in drmach_init()
1217 	 */
1218 
1219 	opl_release_devtree();
1220 
1221 	rw_destroy(&drmach_boards_rwlock);
1222 }
1223 
1224 /*
1225  *	Each system board contains 2 Oberon PCI bridge and
1226  *	1 CMUCH.
1227  *	Each oberon has 2 channels.
1228  *	Each channel has 2 pci-ex leaf.
1229  *	Each CMUCH has 1 pci bus.
1230  *
1231  *
1232  *	Device Path:
1233  *	/pci@<portid>,reg
1234  *
1235  *	where
1236  *	portid[10] = 0
1237  *	portid[9:0] = LLEAF_ID[9:0] of the Oberon Channel
1238  *
1239  *	LLEAF_ID[9:8] = 0
1240  *	LLEAF_ID[8:4] = LSB_ID[4:0]
1241  *	LLEAF_ID[3:1] = IO Channel#[2:0] (0,1,2,3 for Oberon)
1242  *			channel 4 is pcicmu
1243  *	LLEAF_ID[0] = PCI Leaf Number (0 for leaf-A, 1 for leaf-B)
1244  *
1245  *	Properties:
1246  *	name = pci
1247  *	device_type = "pciex"
1248  *	board# = LSBID
1249  *	reg = int32 * 2, Oberon CSR space of the leaf and the UBC space
1250  *	portid = Jupiter Bus Device ID ((LSB_ID << 3)|pciport#)
1251  */
1252 
1253 static sbd_error_t *
1254 drmach_io_new(drmach_device_t *proto, drmachid_t *idp)
1255 {
1256 	drmach_io_t	*ip;
1257 
1258 	int		 portid;
1259 
1260 	portid = proto->portid;
1261 	ASSERT(portid != -1);
1262 	proto->unum = portid & (MAX_IO_UNITS_PER_BOARD - 1);
1263 
1264 	ip = kmem_zalloc(sizeof (drmach_io_t), KM_SLEEP);
1265 	bcopy(proto, &ip->dev, sizeof (ip->dev));
1266 	ip->dev.node = drmach_node_dup(proto->node);
1267 	ip->dev.cm.isa = (void *)drmach_io_new;
1268 	ip->dev.cm.dispose = drmach_io_dispose;
1269 	ip->dev.cm.release = drmach_io_release;
1270 	ip->dev.cm.status = drmach_io_status;
1271 	ip->channel = (portid >> 1) & 0x7;
1272 	ip->leaf = (portid & 0x1);
1273 
1274 	snprintf(ip->dev.cm.name, sizeof (ip->dev.cm.name), "%s%d",
1275 	    ip->dev.type, ip->dev.unum);
1276 
1277 	*idp = (drmachid_t)ip;
1278 	return (NULL);
1279 }
1280 
1281 
1282 static void
1283 drmach_io_dispose(drmachid_t id)
1284 {
1285 	drmach_io_t *self;
1286 
1287 	ASSERT(DRMACH_IS_IO_ID(id));
1288 
1289 	self = id;
1290 	if (self->dev.node)
1291 		drmach_node_dispose(self->dev.node);
1292 
1293 	kmem_free(self, sizeof (*self));
1294 }
1295 
1296 /*ARGSUSED*/
1297 sbd_error_t *
1298 drmach_pre_op(int cmd, drmachid_t id, drmach_opts_t *opts)
1299 {
1300 	drmach_board_t	*bp = (drmach_board_t *)id;
1301 	sbd_error_t	*err = NULL;
1302 
1303 	/* allow status and ncm operations to always succeed */
1304 	if ((cmd == SBD_CMD_STATUS) || (cmd == SBD_CMD_GETNCM)) {
1305 		return (NULL);
1306 	}
1307 
1308 	/* check all other commands for the required option string */
1309 
1310 	if ((opts->size > 0) && (opts->copts != NULL)) {
1311 
1312 		DRMACH_PR("platform options: %s\n", opts->copts);
1313 
1314 		if (strstr(opts->copts, "opldr") == NULL) {
1315 			err = drerr_new(1, EOPL_SUPPORT, NULL);
1316 		}
1317 	} else {
1318 		err = drerr_new(1, EOPL_SUPPORT, NULL);
1319 	}
1320 
1321 	if (!err && id && DRMACH_IS_BOARD_ID(id)) {
1322 		switch (cmd) {
1323 			case SBD_CMD_TEST:
1324 			case SBD_CMD_STATUS:
1325 			case SBD_CMD_GETNCM:
1326 				break;
1327 			case SBD_CMD_CONNECT:
1328 				if (bp->connected)
1329 					err = drerr_new(0, ESBD_STATE, NULL);
1330 				else if (!drmach_domain.allow_dr)
1331 					err = drerr_new(1, EOPL_SUPPORT, NULL);
1332 				break;
1333 			case SBD_CMD_DISCONNECT:
1334 				if (!bp->connected)
1335 					err = drerr_new(0, ESBD_STATE, NULL);
1336 				else if (!drmach_domain.allow_dr)
1337 					err = drerr_new(1, EOPL_SUPPORT, NULL);
1338 				break;
1339 			default:
1340 				if (!drmach_domain.allow_dr)
1341 					err = drerr_new(1, EOPL_SUPPORT, NULL);
1342 				break;
1343 
1344 		}
1345 	}
1346 
1347 	return (err);
1348 }
1349 
1350 /*ARGSUSED*/
1351 sbd_error_t *
1352 drmach_post_op(int cmd, drmachid_t id, drmach_opts_t *opts)
1353 {
1354 	return (NULL);
1355 }
1356 
1357 sbd_error_t *
1358 drmach_board_assign(int bnum, drmachid_t *id)
1359 {
1360 	sbd_error_t	*err = NULL;
1361 
1362 	rw_enter(&drmach_boards_rwlock, RW_WRITER);
1363 
1364 	if (drmach_array_get(drmach_boards, bnum, id) == -1) {
1365 		err = drerr_new(1, EOPL_BNUM, "%d", bnum);
1366 	} else {
1367 		drmach_board_t	*bp;
1368 
1369 		if (*id)
1370 			rw_downgrade(&drmach_boards_rwlock);
1371 
1372 		bp = *id;
1373 		if (!(*id))
1374 			bp = *id  =
1375 			    (drmachid_t)drmach_board_new(bnum, 0);
1376 		bp->assigned = 1;
1377 	}
1378 
1379 	rw_exit(&drmach_boards_rwlock);
1380 
1381 	return (err);
1382 }
1383 
1384 /*ARGSUSED*/
1385 sbd_error_t *
1386 drmach_board_connect(drmachid_t id, drmach_opts_t *opts)
1387 {
1388 	extern int	cpu_alljupiter;
1389 	drmach_board_t	*obj = (drmach_board_t *)id;
1390 	unsigned	cpu_impl;
1391 
1392 	if (!DRMACH_IS_BOARD_ID(id))
1393 		return (drerr_new(0, EOPL_INAPPROP, NULL));
1394 
1395 	if (opl_probe_sb(obj->bnum, &cpu_impl) != 0)
1396 		return (drerr_new(1, EOPL_PROBE, NULL));
1397 
1398 	if (cpu_alljupiter) {
1399 		if (cpu_impl & (1 << OLYMPUS_C_IMPL)) {
1400 			(void) opl_unprobe_sb(obj->bnum);
1401 			return (drerr_new(1, EOPL_MIXED_CPU, NULL));
1402 		}
1403 	}
1404 
1405 	(void) prom_attach_notice(obj->bnum);
1406 
1407 	drmach_setup_core_info(obj);
1408 
1409 	obj->connected = 1;
1410 
1411 	return (NULL);
1412 }
1413 
1414 static int drmach_cache_flush_flag[NCPU];
1415 
1416 /*ARGSUSED*/
1417 static void
1418 drmach_flush_cache(uint64_t id, uint64_t dummy)
1419 {
1420 	extern void cpu_flush_ecache(void);
1421 
1422 	cpu_flush_ecache();
1423 	drmach_cache_flush_flag[id] = 0;
1424 }
1425 
1426 static void
1427 drmach_flush_all()
1428 {
1429 	cpuset_t	xc_cpuset;
1430 	int		i;
1431 
1432 	xc_cpuset = cpu_ready_set;
1433 	for (i = 0; i < NCPU; i++) {
1434 		if (CPU_IN_SET(xc_cpuset, i)) {
1435 			drmach_cache_flush_flag[i] = 1;
1436 			xc_one(i, drmach_flush_cache, i, 0);
1437 			while (drmach_cache_flush_flag[i]) {
1438 				DELAY(1000);
1439 			}
1440 		}
1441 	}
1442 }
1443 
1444 static int
1445 drmach_disconnect_cpus(drmach_board_t *bp)
1446 {
1447 	int i, bnum;
1448 
1449 	bnum = bp->bnum;
1450 
1451 	for (i = 0; i < OPL_MAX_COREID_PER_BOARD; i++) {
1452 		if (bp->cores[i].core_present) {
1453 			if (bp->cores[i].core_started)
1454 				return (-1);
1455 			if (bp->cores[i].core_hotadded) {
1456 				if (drmach_add_remove_cpu(bnum, i,
1457 				    HOTREMOVE_CPU)) {
1458 					cmn_err(CE_WARN, "Failed to remove "
1459 					    "CMP %d on board %d\n", i, bnum);
1460 					return (-1);
1461 				}
1462 			}
1463 		}
1464 	}
1465 	return (0);
1466 }
1467 
1468 /*ARGSUSED*/
1469 sbd_error_t *
1470 drmach_board_disconnect(drmachid_t id, drmach_opts_t *opts)
1471 {
1472 	drmach_board_t *obj;
1473 	int rv = 0;
1474 	sbd_error_t		*err = NULL;
1475 
1476 	if (DRMACH_NULL_ID(id))
1477 		return (NULL);
1478 
1479 	if (!DRMACH_IS_BOARD_ID(id))
1480 		return (drerr_new(0, EOPL_INAPPROP, NULL));
1481 
1482 	obj = (drmach_board_t *)id;
1483 
1484 	if (drmach_disconnect_cpus(obj)) {
1485 		err = drerr_new(1, EOPL_DEPROBE, obj->cm.name);
1486 		return (err);
1487 	}
1488 
1489 	rv = opl_unprobe_sb(obj->bnum);
1490 
1491 	if (rv == 0) {
1492 		prom_detach_notice(obj->bnum);
1493 		obj->connected = 0;
1494 
1495 	} else
1496 		err = drerr_new(1, EOPL_DEPROBE, obj->cm.name);
1497 
1498 	return (err);
1499 }
1500 
1501 static int
1502 drmach_get_portid(drmach_node_t *np)
1503 {
1504 	int		portid;
1505 	char		type[OBP_MAXPROPNAME];
1506 
1507 	if (np->n_getprop(np, "portid", &portid, sizeof (portid)) == 0)
1508 		return (portid);
1509 
1510 	/*
1511 	 * Get the device_type property to see if we should
1512 	 * continue processing this node.
1513 	 */
1514 	if (np->n_getprop(np, "device_type", &type, sizeof (type)) != 0)
1515 		return (-1);
1516 
1517 	if (strcmp(type, OPL_CPU_NODE) == 0) {
1518 		/*
1519 		 * We return cpuid because it has no portid
1520 		 */
1521 		if (np->n_getprop(np, "cpuid", &portid, sizeof (portid)) == 0)
1522 			return (portid);
1523 	}
1524 
1525 	return (-1);
1526 }
1527 
1528 /*
1529  * This is a helper function to determine if a given
1530  * node should be considered for a dr operation according
1531  * to predefined dr type nodes and the node's name.
1532  * Formal Parameter : The name of a device node.
1533  * Return Value: -1, name does not map to a valid dr type.
1534  *		 A value greater or equal to 0, name is a valid dr type.
1535  */
1536 static int
1537 drmach_name2type_idx(char *name)
1538 {
1539 	int 	index, ntypes;
1540 
1541 	if (name == NULL)
1542 		return (-1);
1543 
1544 	/*
1545 	 * Determine how many possible types are currently supported
1546 	 * for dr.
1547 	 */
1548 	ntypes = sizeof (drmach_name2type) / sizeof (drmach_name2type[0]);
1549 
1550 	/* Determine if the node's name correspond to a predefined type. */
1551 	for (index = 0; index < ntypes; index++) {
1552 		if (strcmp(drmach_name2type[index].name, name) == 0)
1553 			/* The node is an allowed type for dr. */
1554 			return (index);
1555 	}
1556 
1557 	/*
1558 	 * If the name of the node does not map to any of the
1559 	 * types in the array drmach_name2type then the node is not of
1560 	 * interest to dr.
1561 	 */
1562 	return (-1);
1563 }
1564 
1565 /*
1566  * there is some complication on OPL:
1567  * - pseudo-mc nodes do not have portid property
1568  * - portid[9:5] of cmp node is LSB #, portid[7:3] of pci is LSB#
1569  * - cmp has board#
1570  * - core and cpu nodes do not have portid and board# properties
1571  * starcat uses portid to derive the board# but that does not work
1572  * for us.  starfire reads board# property to filter the devices.
1573  * That does not work either.  So for these specific device,
1574  * we use specific hard coded methods to get the board# -
1575  * cpu: LSB# = CPUID[9:5]
1576  */
1577 
1578 static int
1579 drmach_board_find_devices_cb(drmach_node_walk_args_t *args)
1580 {
1581 	drmach_node_t			*node = args->node;
1582 	drmach_board_cb_data_t		*data = args->data;
1583 	drmach_board_t			*obj = data->obj;
1584 
1585 	int		rv, portid;
1586 	int		bnum;
1587 	drmachid_t	id;
1588 	drmach_device_t	*device;
1589 	char name[OBP_MAXDRVNAME];
1590 
1591 	portid = drmach_get_portid(node);
1592 	/*
1593 	 * core, cpu and pseudo-mc do not have portid
1594 	 * we use cpuid as the portid of the cpu node
1595 	 * for pseudo-mc, we do not use portid info.
1596 	 */
1597 
1598 	rv = node->n_getprop(node, "name", name, OBP_MAXDRVNAME);
1599 	if (rv)
1600 		return (0);
1601 
1602 
1603 	rv = node->n_getprop(node, OBP_BOARDNUM, &bnum, sizeof (bnum));
1604 
1605 	if (rv) {
1606 		/*
1607 		 * cpu does not have board# property.  We use
1608 		 * CPUID[9:5]
1609 		 */
1610 		if (strcmp("cpu", name) == 0) {
1611 			bnum = (portid >> 5) & 0x1f;
1612 		} else
1613 			return (0);
1614 	}
1615 
1616 
1617 	if (bnum != obj->bnum)
1618 		return (0);
1619 
1620 	if (drmach_name2type_idx(name) < 0) {
1621 		return (0);
1622 	}
1623 
1624 	/*
1625 	 * Create a device data structure from this node data.
1626 	 * The call may yield nothing if the node is not of interest
1627 	 * to drmach.
1628 	 */
1629 	data->err = drmach_device_new(node, obj, portid, &id);
1630 	if (data->err)
1631 		return (-1);
1632 	else if (!id) {
1633 		/*
1634 		 * drmach_device_new examined the node we passed in
1635 		 * and determined that it was one not of interest to
1636 		 * drmach.  So, it is skipped.
1637 		 */
1638 		return (0);
1639 	}
1640 
1641 	rv = drmach_array_set(obj->devices, data->ndevs++, id);
1642 	if (rv) {
1643 		data->err = DRMACH_INTERNAL_ERROR();
1644 		return (-1);
1645 	}
1646 	device = id;
1647 
1648 	data->err = (*data->found)(data->a, device->type, device->unum, id);
1649 	return (data->err == NULL ? 0 : -1);
1650 }
1651 
1652 sbd_error_t *
1653 drmach_board_find_devices(drmachid_t id, void *a,
1654 	sbd_error_t *(*found)(void *a, const char *, int, drmachid_t))
1655 {
1656 	drmach_board_t		*bp = (drmach_board_t *)id;
1657 	sbd_error_t		*err;
1658 	int			 max_devices;
1659 	int			 rv;
1660 	drmach_board_cb_data_t	data;
1661 
1662 
1663 	if (!DRMACH_IS_BOARD_ID(id))
1664 		return (drerr_new(0, EOPL_INAPPROP, NULL));
1665 
1666 	max_devices  = MAX_CPU_UNITS_PER_BOARD;
1667 	max_devices += MAX_MEM_UNITS_PER_BOARD;
1668 	max_devices += MAX_IO_UNITS_PER_BOARD;
1669 
1670 	bp->devices = drmach_array_new(0, max_devices);
1671 
1672 	if (bp->tree == NULL)
1673 		bp->tree = drmach_node_new();
1674 
1675 	data.obj = bp;
1676 	data.ndevs = 0;
1677 	data.found = found;
1678 	data.a = a;
1679 	data.err = NULL;
1680 
1681 	rv = drmach_node_walk(bp->tree, &data, drmach_board_find_devices_cb);
1682 	if (rv == 0)
1683 		err = NULL;
1684 	else {
1685 		drmach_array_dispose(bp->devices, drmach_device_dispose);
1686 		bp->devices = NULL;
1687 
1688 		if (data.err)
1689 			err = data.err;
1690 		else
1691 			err = DRMACH_INTERNAL_ERROR();
1692 	}
1693 
1694 	return (err);
1695 }
1696 
1697 int
1698 drmach_board_lookup(int bnum, drmachid_t *id)
1699 {
1700 	int	rv = 0;
1701 
1702 	rw_enter(&drmach_boards_rwlock, RW_READER);
1703 	if (drmach_array_get(drmach_boards, bnum, id)) {
1704 		*id = 0;
1705 		rv = -1;
1706 	}
1707 	rw_exit(&drmach_boards_rwlock);
1708 	return (rv);
1709 }
1710 
1711 sbd_error_t *
1712 drmach_board_name(int bnum, char *buf, int buflen)
1713 {
1714 	snprintf(buf, buflen, "SB%d", bnum);
1715 	return (NULL);
1716 }
1717 
1718 sbd_error_t *
1719 drmach_board_poweroff(drmachid_t id)
1720 {
1721 	drmach_board_t	*bp;
1722 	sbd_error_t	*err;
1723 	drmach_status_t	 stat;
1724 
1725 	if (DRMACH_NULL_ID(id))
1726 		return (NULL);
1727 
1728 	if (!DRMACH_IS_BOARD_ID(id))
1729 		return (drerr_new(0, EOPL_INAPPROP, NULL));
1730 	bp = id;
1731 
1732 	err = drmach_board_status(id, &stat);
1733 
1734 	if (!err) {
1735 		if (stat.configured || stat.busy)
1736 			err = drerr_new(0, EOPL_CONFIGBUSY, bp->cm.name);
1737 		else {
1738 			bp->powered = 0;
1739 		}
1740 	}
1741 	return (err);
1742 }
1743 
1744 sbd_error_t *
1745 drmach_board_poweron(drmachid_t id)
1746 {
1747 	drmach_board_t	*bp;
1748 
1749 	if (!DRMACH_IS_BOARD_ID(id))
1750 		return (drerr_new(0, EOPL_INAPPROP, NULL));
1751 	bp = id;
1752 
1753 	bp->powered = 1;
1754 
1755 	return (NULL);
1756 }
1757 
1758 static sbd_error_t *
1759 drmach_board_release(drmachid_t id)
1760 {
1761 	if (!DRMACH_IS_BOARD_ID(id))
1762 		return (drerr_new(0, EOPL_INAPPROP, NULL));
1763 	return (NULL);
1764 }
1765 
1766 /*ARGSUSED*/
1767 sbd_error_t *
1768 drmach_board_test(drmachid_t id, drmach_opts_t *opts, int force)
1769 {
1770 	return (NULL);
1771 }
1772 
1773 sbd_error_t *
1774 drmach_board_unassign(drmachid_t id)
1775 {
1776 	drmach_board_t	*bp;
1777 	sbd_error_t	*err;
1778 	drmach_status_t	 stat;
1779 
1780 	if (DRMACH_NULL_ID(id))
1781 		return (NULL);
1782 
1783 	if (!DRMACH_IS_BOARD_ID(id)) {
1784 		return (drerr_new(0, EOPL_INAPPROP, NULL));
1785 	}
1786 	bp = id;
1787 
1788 	rw_enter(&drmach_boards_rwlock, RW_WRITER);
1789 
1790 	err = drmach_board_status(id, &stat);
1791 	if (err) {
1792 		rw_exit(&drmach_boards_rwlock);
1793 		return (err);
1794 	}
1795 	if (stat.configured || stat.busy) {
1796 		err = drerr_new(0, EOPL_CONFIGBUSY, bp->cm.name);
1797 	} else {
1798 		if (drmach_array_set(drmach_boards, bp->bnum, 0) != 0)
1799 			err = DRMACH_INTERNAL_ERROR();
1800 		else
1801 			drmach_board_dispose(bp);
1802 	}
1803 	rw_exit(&drmach_boards_rwlock);
1804 	return (err);
1805 }
1806 
1807 /*
1808  * We have to do more on OPL - e.g. set up sram tte, read cpuid, strand id,
1809  * implementation #, etc
1810  */
1811 
1812 static sbd_error_t *
1813 drmach_cpu_new(drmach_device_t *proto, drmachid_t *idp)
1814 {
1815 	static void drmach_cpu_dispose(drmachid_t);
1816 	static sbd_error_t *drmach_cpu_release(drmachid_t);
1817 	static sbd_error_t *drmach_cpu_status(drmachid_t, drmach_status_t *);
1818 
1819 	int		 portid;
1820 	drmach_cpu_t	*cp = NULL;
1821 
1822 	/* portid is CPUID of the node */
1823 	portid = proto->portid;
1824 	ASSERT(portid != -1);
1825 
1826 	/* unum = (CMP/CHIP ID) + (ON_BOARD_CORE_NUM * MAX_CMPID_PER_BOARD) */
1827 	proto->unum = ((portid/OPL_MAX_CPUID_PER_CMP) &
1828 	    (OPL_MAX_CMPID_PER_BOARD - 1)) +
1829 	    ((portid & (OPL_MAX_CPUID_PER_CMP - 1)) *
1830 	    (OPL_MAX_CMPID_PER_BOARD));
1831 
1832 	cp = kmem_zalloc(sizeof (drmach_cpu_t), KM_SLEEP);
1833 	bcopy(proto, &cp->dev, sizeof (cp->dev));
1834 	cp->dev.node = drmach_node_dup(proto->node);
1835 	cp->dev.cm.isa = (void *)drmach_cpu_new;
1836 	cp->dev.cm.dispose = drmach_cpu_dispose;
1837 	cp->dev.cm.release = drmach_cpu_release;
1838 	cp->dev.cm.status = drmach_cpu_status;
1839 
1840 	snprintf(cp->dev.cm.name, sizeof (cp->dev.cm.name), "%s%d",
1841 	    cp->dev.type, cp->dev.unum);
1842 
1843 /*
1844  *	CPU ID representation
1845  *	CPUID[9:5] = SB#
1846  *	CPUID[4:3] = Chip#
1847  *	CPUID[2:1] = Core# (Only 2 core for OPL)
1848  *	CPUID[0:0] = Strand#
1849  */
1850 
1851 /*
1852  *	reg property of the strand contains strand ID
1853  *	reg property of the parent node contains core ID
1854  *	We should use them.
1855  */
1856 	cp->cpuid = portid;
1857 	cp->sb = (portid >> 5) & 0x1f;
1858 	cp->chipid = (portid >> 3) & 0x3;
1859 	cp->coreid = (portid >> 1) & 0x3;
1860 	cp->strandid = portid & 0x1;
1861 
1862 	*idp = (drmachid_t)cp;
1863 	return (NULL);
1864 }
1865 
1866 
1867 static void
1868 drmach_cpu_dispose(drmachid_t id)
1869 {
1870 	drmach_cpu_t	*self;
1871 
1872 	ASSERT(DRMACH_IS_CPU_ID(id));
1873 
1874 	self = id;
1875 	if (self->dev.node)
1876 		drmach_node_dispose(self->dev.node);
1877 
1878 	kmem_free(self, sizeof (*self));
1879 }
1880 
1881 static int
1882 drmach_cpu_start(struct cpu *cp)
1883 {
1884 	int		cpuid = cp->cpu_id;
1885 	extern int	restart_other_cpu(int);
1886 
1887 	ASSERT(MUTEX_HELD(&cpu_lock));
1888 	ASSERT(cpunodes[cpuid].nodeid != (pnode_t)0);
1889 
1890 	cp->cpu_flags &= ~CPU_POWEROFF;
1891 
1892 	/*
1893 	 * NOTE: restart_other_cpu pauses cpus during the
1894 	 *	 slave cpu start.  This helps to quiesce the
1895 	 *	 bus traffic a bit which makes the tick sync
1896 	 *	 routine in the prom more robust.
1897 	 */
1898 	DRMACH_PR("COLD START for cpu (%d)\n", cpuid);
1899 
1900 	restart_other_cpu(cpuid);
1901 
1902 	return (0);
1903 }
1904 
1905 static sbd_error_t *
1906 drmach_cpu_release(drmachid_t id)
1907 {
1908 	if (!DRMACH_IS_CPU_ID(id))
1909 		return (drerr_new(0, EOPL_INAPPROP, NULL));
1910 
1911 	return (NULL);
1912 }
1913 
1914 static sbd_error_t *
1915 drmach_cpu_status(drmachid_t id, drmach_status_t *stat)
1916 {
1917 	drmach_cpu_t *cp;
1918 	drmach_device_t *dp;
1919 
1920 	ASSERT(DRMACH_IS_CPU_ID(id));
1921 	cp = (drmach_cpu_t *)id;
1922 	dp = &cp->dev;
1923 
1924 	stat->assigned = dp->bp->assigned;
1925 	stat->powered = dp->bp->powered;
1926 	mutex_enter(&cpu_lock);
1927 	stat->configured = (cpu_get(cp->cpuid) != NULL);
1928 	mutex_exit(&cpu_lock);
1929 	stat->busy = dp->busy;
1930 	strncpy(stat->type, dp->type, sizeof (stat->type));
1931 	stat->info[0] = '\0';
1932 
1933 	return (NULL);
1934 }
1935 
1936 sbd_error_t *
1937 drmach_cpu_disconnect(drmachid_t id)
1938 {
1939 
1940 	if (!DRMACH_IS_CPU_ID(id))
1941 		return (drerr_new(0, EOPL_INAPPROP, NULL));
1942 
1943 	return (NULL);
1944 }
1945 
1946 sbd_error_t *
1947 drmach_cpu_get_id(drmachid_t id, processorid_t *cpuid)
1948 {
1949 	drmach_cpu_t *cpu;
1950 
1951 	if (!DRMACH_IS_CPU_ID(id))
1952 		return (drerr_new(0, EOPL_INAPPROP, NULL));
1953 	cpu = (drmach_cpu_t *)id;
1954 
1955 	/* get from cpu directly on OPL */
1956 	*cpuid = cpu->cpuid;
1957 	return (NULL);
1958 }
1959 
1960 sbd_error_t *
1961 drmach_cpu_get_impl(drmachid_t id, int *ip)
1962 {
1963 	drmach_device_t *cpu;
1964 	drmach_node_t	*np;
1965 	drmach_node_t	pp;
1966 	int		impl;
1967 	char		type[OBP_MAXPROPNAME];
1968 
1969 	if (!DRMACH_IS_CPU_ID(id))
1970 		return (drerr_new(0, EOPL_INAPPROP, NULL));
1971 
1972 	cpu = id;
1973 	np = cpu->node;
1974 
1975 	if (np->get_parent(np, &pp) != 0) {
1976 		return (DRMACH_INTERNAL_ERROR());
1977 	}
1978 
1979 	/* the parent should be core */
1980 
1981 	if (pp.n_getprop(&pp, "device_type", &type, sizeof (type)) != 0) {
1982 		return (drerr_new(0, EOPL_GETPROP, NULL));
1983 	}
1984 
1985 	if (strcmp(type, OPL_CORE_NODE) == 0) {
1986 		if (pp.n_getprop(&pp, "implementation#", &impl,
1987 		    sizeof (impl)) != 0) {
1988 			return (drerr_new(0, EOPL_GETPROP, NULL));
1989 		}
1990 	} else {
1991 		return (DRMACH_INTERNAL_ERROR());
1992 	}
1993 
1994 	*ip = impl;
1995 
1996 	return (NULL);
1997 }
1998 
1999 sbd_error_t *
2000 drmach_get_dip(drmachid_t id, dev_info_t **dip)
2001 {
2002 	drmach_device_t	*dp;
2003 
2004 	if (!DRMACH_IS_DEVICE_ID(id))
2005 		return (drerr_new(0, EOPL_INAPPROP, NULL));
2006 	dp = id;
2007 
2008 	*dip = dp->node->n_getdip(dp->node);
2009 	return (NULL);
2010 }
2011 
2012 sbd_error_t *
2013 drmach_io_is_attached(drmachid_t id, int *yes)
2014 {
2015 	drmach_device_t *dp;
2016 	dev_info_t	*dip;
2017 	int		state;
2018 
2019 	if (!DRMACH_IS_IO_ID(id))
2020 		return (drerr_new(0, EOPL_INAPPROP, NULL));
2021 	dp = id;
2022 
2023 	dip = dp->node->n_getdip(dp->node);
2024 	if (dip == NULL) {
2025 		*yes = 0;
2026 		return (NULL);
2027 	}
2028 
2029 	state = ddi_get_devstate(dip);
2030 	*yes = ((i_ddi_node_state(dip) >= DS_ATTACHED) ||
2031 	    (state == DDI_DEVSTATE_UP));
2032 
2033 	return (NULL);
2034 }
2035 
2036 struct drmach_io_cb {
2037 	char	*name;	/* name of the node */
2038 	int	(*func)(dev_info_t *);
2039 	int	rv;
2040 	dev_info_t *dip;
2041 };
2042 
2043 #define	DRMACH_IO_POST_ATTACH	0
2044 #define	DRMACH_IO_PRE_RELEASE	1
2045 
2046 static int
2047 drmach_io_cb_check(dev_info_t *dip, void *arg)
2048 {
2049 	struct drmach_io_cb *p = (struct drmach_io_cb *)arg;
2050 	char name[OBP_MAXDRVNAME];
2051 	int len = OBP_MAXDRVNAME;
2052 
2053 	if (ddi_getlongprop_buf(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, "name",
2054 	    (caddr_t)name, &len) != DDI_PROP_SUCCESS) {
2055 		return (DDI_WALK_PRUNECHILD);
2056 	}
2057 
2058 	if (strcmp(name, p->name) == 0) {
2059 		ndi_hold_devi(dip);
2060 		p->dip = dip;
2061 		return (DDI_WALK_TERMINATE);
2062 	}
2063 
2064 	return (DDI_WALK_CONTINUE);
2065 }
2066 
2067 
2068 static int
2069 drmach_console_ops(drmachid_t *id, int state)
2070 {
2071 	drmach_io_t *obj = (drmach_io_t *)id;
2072 	struct drmach_io_cb arg;
2073 	int (*msudetp)(dev_info_t *);
2074 	int (*msuattp)(dev_info_t *);
2075 	dev_info_t *dip, *pdip;
2076 	int circ;
2077 
2078 	/* 4 is pcicmu channel */
2079 	if (obj->channel != 4)
2080 		return (0);
2081 
2082 	arg.name = "serial";
2083 	arg.func = NULL;
2084 	if (state == DRMACH_IO_PRE_RELEASE) {
2085 		msudetp = (int (*)(dev_info_t *))
2086 		    modgetsymvalue("oplmsu_dr_detach", 0);
2087 		if (msudetp != NULL)
2088 			arg.func = msudetp;
2089 	} else if (state == DRMACH_IO_POST_ATTACH) {
2090 		msuattp = (int (*)(dev_info_t *))
2091 		    modgetsymvalue("oplmsu_dr_attach", 0);
2092 		if (msuattp != NULL)
2093 			arg.func = msuattp;
2094 	} else {
2095 		return (0);
2096 	}
2097 
2098 	if (arg.func == NULL) {
2099 		return (0);
2100 	}
2101 
2102 	arg.rv = 0;
2103 	arg.dip = NULL;
2104 
2105 	dip = obj->dev.node->n_getdip(obj->dev.node);
2106 	if (pdip = ddi_get_parent(dip)) {
2107 		ndi_hold_devi(pdip);
2108 		ndi_devi_enter(pdip, &circ);
2109 	} else {
2110 		/* this cannot happen unless something bad happens */
2111 		return (-1);
2112 	}
2113 
2114 	ddi_walk_devs(dip, drmach_io_cb_check, (void *)&arg);
2115 
2116 	ndi_devi_exit(pdip, circ);
2117 	ndi_rele_devi(pdip);
2118 
2119 	if (arg.dip) {
2120 		arg.rv = (*arg.func)(arg.dip);
2121 		ndi_rele_devi(arg.dip);
2122 	} else {
2123 		arg.rv = -1;
2124 	}
2125 
2126 	return (arg.rv);
2127 }
2128 
2129 sbd_error_t *
2130 drmach_io_pre_release(drmachid_t id)
2131 {
2132 	int rv;
2133 
2134 	if (!DRMACH_IS_IO_ID(id))
2135 		return (drerr_new(0, EOPL_INAPPROP, NULL));
2136 
2137 	rv = drmach_console_ops(id, DRMACH_IO_PRE_RELEASE);
2138 
2139 	if (rv != 0)
2140 		cmn_err(CE_WARN, "IO callback failed in pre-release\n");
2141 
2142 	return (NULL);
2143 }
2144 
2145 static sbd_error_t *
2146 drmach_io_release(drmachid_t id)
2147 {
2148 	if (!DRMACH_IS_IO_ID(id))
2149 		return (drerr_new(0, EOPL_INAPPROP, NULL));
2150 	return (NULL);
2151 }
2152 
2153 sbd_error_t *
2154 drmach_io_unrelease(drmachid_t id)
2155 {
2156 	if (!DRMACH_IS_IO_ID(id))
2157 		return (drerr_new(0, EOPL_INAPPROP, NULL));
2158 	return (NULL);
2159 }
2160 
2161 /*ARGSUSED*/
2162 sbd_error_t *
2163 drmach_io_post_release(drmachid_t id)
2164 {
2165 	return (NULL);
2166 }
2167 
2168 /*ARGSUSED*/
2169 sbd_error_t *
2170 drmach_io_post_attach(drmachid_t id)
2171 {
2172 	int rv;
2173 
2174 	if (!DRMACH_IS_IO_ID(id))
2175 		return (drerr_new(0, EOPL_INAPPROP, NULL));
2176 
2177 	rv = drmach_console_ops(id, DRMACH_IO_POST_ATTACH);
2178 
2179 	if (rv != 0)
2180 		cmn_err(CE_WARN, "IO callback failed in post-attach\n");
2181 
2182 	return (0);
2183 }
2184 
2185 static sbd_error_t *
2186 drmach_io_status(drmachid_t id, drmach_status_t *stat)
2187 {
2188 	drmach_device_t *dp;
2189 	sbd_error_t	*err;
2190 	int		 configured;
2191 
2192 	ASSERT(DRMACH_IS_IO_ID(id));
2193 	dp = id;
2194 
2195 	err = drmach_io_is_attached(id, &configured);
2196 	if (err)
2197 		return (err);
2198 
2199 	stat->assigned = dp->bp->assigned;
2200 	stat->powered = dp->bp->powered;
2201 	stat->configured = (configured != 0);
2202 	stat->busy = dp->busy;
2203 	strncpy(stat->type, dp->type, sizeof (stat->type));
2204 	stat->info[0] = '\0';
2205 
2206 	return (NULL);
2207 }
2208 
2209 static sbd_error_t *
2210 drmach_mem_new(drmach_device_t *proto, drmachid_t *idp)
2211 {
2212 	static void drmach_mem_dispose(drmachid_t);
2213 	static sbd_error_t *drmach_mem_release(drmachid_t);
2214 	static sbd_error_t *drmach_mem_status(drmachid_t, drmach_status_t *);
2215 	dev_info_t *dip;
2216 	int rv;
2217 
2218 	drmach_mem_t	*mp;
2219 
2220 	rv = 0;
2221 
2222 	if ((proto->node->n_getproplen(proto->node, "mc-addr", &rv) < 0) ||
2223 	    (rv <= 0)) {
2224 		*idp = (drmachid_t)0;
2225 		return (NULL);
2226 	}
2227 
2228 	mp = kmem_zalloc(sizeof (drmach_mem_t), KM_SLEEP);
2229 	proto->unum = 0;
2230 
2231 	bcopy(proto, &mp->dev, sizeof (mp->dev));
2232 	mp->dev.node = drmach_node_dup(proto->node);
2233 	mp->dev.cm.isa = (void *)drmach_mem_new;
2234 	mp->dev.cm.dispose = drmach_mem_dispose;
2235 	mp->dev.cm.release = drmach_mem_release;
2236 	mp->dev.cm.status = drmach_mem_status;
2237 
2238 	snprintf(mp->dev.cm.name, sizeof (mp->dev.cm.name), "%s", mp->dev.type);
2239 
2240 	dip = mp->dev.node->n_getdip(mp->dev.node);
2241 	if (drmach_setup_mc_info(dip, mp) != 0) {
2242 		return (drerr_new(1, EOPL_MC_SETUP, NULL));
2243 	}
2244 
2245 	/* make sure we do not create memoryless nodes */
2246 	if (mp->nbytes == 0) {
2247 		*idp = (drmachid_t)NULL;
2248 		kmem_free(mp, sizeof (drmach_mem_t));
2249 	} else
2250 		*idp = (drmachid_t)mp;
2251 
2252 	return (NULL);
2253 }
2254 
2255 static void
2256 drmach_mem_dispose(drmachid_t id)
2257 {
2258 	drmach_mem_t *mp;
2259 
2260 	ASSERT(DRMACH_IS_MEM_ID(id));
2261 
2262 
2263 	mp = id;
2264 
2265 	if (mp->dev.node)
2266 		drmach_node_dispose(mp->dev.node);
2267 
2268 	if (mp->memlist) {
2269 		memlist_delete(mp->memlist);
2270 		mp->memlist = NULL;
2271 	}
2272 
2273 	kmem_free(mp, sizeof (*mp));
2274 }
2275 
2276 sbd_error_t *
2277 drmach_mem_add_span(drmachid_t id, uint64_t basepa, uint64_t size)
2278 {
2279 	pfn_t		basepfn = (pfn_t)(basepa >> PAGESHIFT);
2280 	pgcnt_t		npages = (pgcnt_t)(size >> PAGESHIFT);
2281 	int		rv;
2282 
2283 	ASSERT(size != 0);
2284 
2285 	if (!DRMACH_IS_MEM_ID(id))
2286 		return (drerr_new(0, EOPL_INAPPROP, NULL));
2287 
2288 	rv = kcage_range_add(basepfn, npages, KCAGE_DOWN);
2289 	if (rv == ENOMEM) {
2290 		cmn_err(CE_WARN, "%ld megabytes not available to kernel cage",
2291 		    (size == 0 ? 0 : size / MBYTE));
2292 	} else if (rv != 0) {
2293 		/* catch this in debug kernels */
2294 		ASSERT(0);
2295 
2296 		cmn_err(CE_WARN, "unexpected kcage_range_add return value %d",
2297 		    rv);
2298 	}
2299 
2300 	if (rv) {
2301 		return (DRMACH_INTERNAL_ERROR());
2302 	}
2303 	else
2304 		return (NULL);
2305 }
2306 
2307 sbd_error_t *
2308 drmach_mem_del_span(drmachid_t id, uint64_t basepa, uint64_t size)
2309 {
2310 	pfn_t		basepfn = (pfn_t)(basepa >> PAGESHIFT);
2311 	pgcnt_t		npages = (pgcnt_t)(size >> PAGESHIFT);
2312 	int		rv;
2313 
2314 	if (!DRMACH_IS_MEM_ID(id))
2315 		return (drerr_new(0, EOPL_INAPPROP, NULL));
2316 
2317 	if (size > 0) {
2318 		rv = kcage_range_delete_post_mem_del(basepfn, npages);
2319 		if (rv != 0) {
2320 			cmn_err(CE_WARN,
2321 			    "unexpected kcage_range_delete_post_mem_del"
2322 			    " return value %d", rv);
2323 			return (DRMACH_INTERNAL_ERROR());
2324 		}
2325 	}
2326 
2327 	return (NULL);
2328 }
2329 
2330 sbd_error_t *
2331 drmach_mem_disable(drmachid_t id)
2332 {
2333 	if (!DRMACH_IS_MEM_ID(id))
2334 		return (drerr_new(0, EOPL_INAPPROP, NULL));
2335 	else {
2336 		drmach_flush_all();
2337 		return (NULL);
2338 	}
2339 }
2340 
2341 sbd_error_t *
2342 drmach_mem_enable(drmachid_t id)
2343 {
2344 	if (!DRMACH_IS_MEM_ID(id))
2345 		return (drerr_new(0, EOPL_INAPPROP, NULL));
2346 	else
2347 		return (NULL);
2348 }
2349 
2350 sbd_error_t *
2351 drmach_mem_get_info(drmachid_t id, drmach_mem_info_t *mem)
2352 {
2353 	drmach_mem_t *mp;
2354 
2355 	if (!DRMACH_IS_MEM_ID(id))
2356 		return (drerr_new(0, EOPL_INAPPROP, NULL));
2357 
2358 	mp = (drmach_mem_t *)id;
2359 
2360 	/*
2361 	 * This is only used by dr to round up/down the memory
2362 	 * for copying. Our unit of memory isolation is 64 MB.
2363 	 */
2364 
2365 	mem->mi_alignment_mask = (64 * 1024 * 1024 - 1);
2366 	mem->mi_basepa = mp->base_pa;
2367 	mem->mi_size = mp->nbytes;
2368 	mem->mi_slice_size = mp->slice_size;
2369 
2370 	return (NULL);
2371 }
2372 
2373 sbd_error_t *
2374 drmach_mem_get_base_physaddr(drmachid_t id, uint64_t *pa)
2375 {
2376 	drmach_mem_t *mp;
2377 
2378 	if (!DRMACH_IS_MEM_ID(id))
2379 		return (drerr_new(0, EOPL_INAPPROP, NULL));
2380 
2381 	mp = (drmach_mem_t *)id;
2382 
2383 	*pa = mp->base_pa;
2384 	return (NULL);
2385 }
2386 
2387 sbd_error_t *
2388 drmach_mem_get_memlist(drmachid_t id, struct memlist **ml)
2389 {
2390 	drmach_mem_t	*mem;
2391 #ifdef	DEBUG
2392 	int		rv;
2393 #endif
2394 	struct memlist	*mlist;
2395 
2396 	if (!DRMACH_IS_MEM_ID(id))
2397 		return (drerr_new(0, EOPL_INAPPROP, NULL));
2398 
2399 	mem = (drmach_mem_t *)id;
2400 	mlist = memlist_dup(mem->memlist);
2401 
2402 #ifdef DEBUG
2403 	/*
2404 	 * Make sure the incoming memlist doesn't already
2405 	 * intersect with what's present in the system (phys_install).
2406 	 */
2407 	memlist_read_lock();
2408 	rv = memlist_intersect(phys_install, mlist);
2409 	memlist_read_unlock();
2410 	if (rv) {
2411 		DRMACH_PR("Derived memlist intersects with phys_install\n");
2412 		memlist_dump(mlist);
2413 
2414 		DRMACH_PR("phys_install memlist:\n");
2415 		memlist_dump(phys_install);
2416 
2417 		memlist_delete(mlist);
2418 		return (DRMACH_INTERNAL_ERROR());
2419 	}
2420 
2421 	DRMACH_PR("Derived memlist:");
2422 	memlist_dump(mlist);
2423 #endif
2424 	*ml = mlist;
2425 
2426 	return (NULL);
2427 }
2428 
2429 sbd_error_t *
2430 drmach_mem_get_slice_size(drmachid_t id, uint64_t *bytes)
2431 {
2432 	drmach_mem_t	*mem;
2433 
2434 	if (!DRMACH_IS_MEM_ID(id))
2435 		return (drerr_new(0, EOPL_INAPPROP, NULL));
2436 
2437 	mem = (drmach_mem_t *)id;
2438 
2439 	*bytes = mem->slice_size;
2440 
2441 	return (NULL);
2442 }
2443 
2444 
2445 /* ARGSUSED */
2446 processorid_t
2447 drmach_mem_cpu_affinity(drmachid_t id)
2448 {
2449 	return (CPU_CURRENT);
2450 }
2451 
2452 static sbd_error_t *
2453 drmach_mem_release(drmachid_t id)
2454 {
2455 	if (!DRMACH_IS_MEM_ID(id))
2456 		return (drerr_new(0, EOPL_INAPPROP, NULL));
2457 	return (NULL);
2458 }
2459 
2460 static sbd_error_t *
2461 drmach_mem_status(drmachid_t id, drmach_status_t *stat)
2462 {
2463 	drmach_mem_t *dp;
2464 	uint64_t	 pa, slice_size;
2465 	struct memlist	*ml;
2466 
2467 	ASSERT(DRMACH_IS_MEM_ID(id));
2468 	dp = id;
2469 
2470 	/* get starting physical address of target memory */
2471 	pa = dp->base_pa;
2472 
2473 	/* round down to slice boundary */
2474 	slice_size = dp->slice_size;
2475 	pa &= ~(slice_size - 1);
2476 
2477 	/* stop at first span that is in slice */
2478 	memlist_read_lock();
2479 	for (ml = phys_install; ml; ml = ml->next)
2480 		if (ml->address >= pa && ml->address < pa + slice_size)
2481 			break;
2482 	memlist_read_unlock();
2483 
2484 	stat->assigned = dp->dev.bp->assigned;
2485 	stat->powered = dp->dev.bp->powered;
2486 	stat->configured = (ml != NULL);
2487 	stat->busy = dp->dev.busy;
2488 	strncpy(stat->type, dp->dev.type, sizeof (stat->type));
2489 	stat->info[0] = '\0';
2490 
2491 	return (NULL);
2492 }
2493 
2494 
2495 sbd_error_t *
2496 drmach_board_deprobe(drmachid_t id)
2497 {
2498 	drmach_board_t	*bp;
2499 
2500 	if (!DRMACH_IS_BOARD_ID(id))
2501 		return (drerr_new(0, EOPL_INAPPROP, NULL));
2502 
2503 	bp = id;
2504 
2505 	cmn_err(CE_CONT, "DR: detach board %d\n", bp->bnum);
2506 
2507 	if (bp->tree) {
2508 		drmach_node_dispose(bp->tree);
2509 		bp->tree = NULL;
2510 	}
2511 	if (bp->devices) {
2512 		drmach_array_dispose(bp->devices, drmach_device_dispose);
2513 		bp->devices = NULL;
2514 	}
2515 
2516 	bp->boot_board = 0;
2517 
2518 	return (NULL);
2519 }
2520 
2521 /*ARGSUSED*/
2522 static sbd_error_t *
2523 drmach_pt_ikprobe(drmachid_t id, drmach_opts_t *opts)
2524 {
2525 	drmach_board_t		*bp = (drmach_board_t *)id;
2526 	sbd_error_t		*err = NULL;
2527 	int	rv;
2528 	unsigned cpu_impl;
2529 
2530 	if (!DRMACH_IS_BOARD_ID(id))
2531 		return (drerr_new(0, EOPL_INAPPROP, NULL));
2532 
2533 	DRMACH_PR("calling opl_probe_board for bnum=%d\n", bp->bnum);
2534 	rv = opl_probe_sb(bp->bnum, &cpu_impl);
2535 	if (rv != 0) {
2536 		err = drerr_new(1, EOPL_PROBE, bp->cm.name);
2537 		return (err);
2538 	}
2539 	return (err);
2540 }
2541 
2542 /*ARGSUSED*/
2543 static sbd_error_t *
2544 drmach_pt_ikdeprobe(drmachid_t id, drmach_opts_t *opts)
2545 {
2546 	drmach_board_t	*bp;
2547 	sbd_error_t	*err = NULL;
2548 	int	rv;
2549 
2550 	if (!DRMACH_IS_BOARD_ID(id))
2551 		return (drerr_new(0, EOPL_INAPPROP, NULL));
2552 	bp = (drmach_board_t *)id;
2553 
2554 	cmn_err(CE_CONT, "DR: in-kernel unprobe board %d\n", bp->bnum);
2555 
2556 	rv = opl_unprobe_sb(bp->bnum);
2557 	if (rv != 0) {
2558 		err = drerr_new(1, EOPL_DEPROBE, bp->cm.name);
2559 	}
2560 
2561 	return (err);
2562 }
2563 
2564 
2565 /*ARGSUSED*/
2566 sbd_error_t *
2567 drmach_pt_readmem(drmachid_t id, drmach_opts_t *opts)
2568 {
2569 	struct memlist	*ml;
2570 	uint64_t	src_pa;
2571 	uint64_t	dst_pa;
2572 	uint64_t	dst;
2573 
2574 	dst_pa = va_to_pa(&dst);
2575 
2576 	memlist_read_lock();
2577 	for (ml = phys_install; ml; ml = ml->next) {
2578 		uint64_t	nbytes;
2579 
2580 		src_pa = ml->address;
2581 		nbytes = ml->size;
2582 
2583 		while (nbytes != 0ull) {
2584 
2585 			/* copy 32 bytes at arc_pa to dst_pa */
2586 			bcopy32_il(src_pa, dst_pa);
2587 
2588 			/* increment by 32 bytes */
2589 			src_pa += (4 * sizeof (uint64_t));
2590 
2591 			/* decrement by 32 bytes */
2592 			nbytes -= (4 * sizeof (uint64_t));
2593 		}
2594 	}
2595 	memlist_read_unlock();
2596 
2597 	return (NULL);
2598 }
2599 
2600 static struct {
2601 	const char	*name;
2602 	sbd_error_t	*(*handler)(drmachid_t id, drmach_opts_t *opts);
2603 } drmach_pt_arr[] = {
2604 	{ "readmem",		drmach_pt_readmem		},
2605 	{ "ikprobe",	drmach_pt_ikprobe	},
2606 	{ "ikdeprobe",	drmach_pt_ikdeprobe	},
2607 
2608 	/* the following line must always be last */
2609 	{ NULL,			NULL				}
2610 };
2611 
2612 /*ARGSUSED*/
2613 sbd_error_t *
2614 drmach_passthru(drmachid_t id, drmach_opts_t *opts)
2615 {
2616 	int		i;
2617 	sbd_error_t	*err;
2618 
2619 	i = 0;
2620 	while (drmach_pt_arr[i].name != NULL) {
2621 		int len = strlen(drmach_pt_arr[i].name);
2622 
2623 		if (strncmp(drmach_pt_arr[i].name, opts->copts, len) == 0)
2624 			break;
2625 
2626 		i += 1;
2627 	}
2628 
2629 	if (drmach_pt_arr[i].name == NULL)
2630 		err = drerr_new(0, EOPL_UNKPTCMD, opts->copts);
2631 	else
2632 		err = (*drmach_pt_arr[i].handler)(id, opts);
2633 
2634 	return (err);
2635 }
2636 
2637 sbd_error_t *
2638 drmach_release(drmachid_t id)
2639 {
2640 	drmach_common_t *cp;
2641 
2642 	if (!DRMACH_IS_DEVICE_ID(id))
2643 		return (drerr_new(0, EOPL_INAPPROP, NULL));
2644 	cp = id;
2645 
2646 	return (cp->release(id));
2647 }
2648 
2649 sbd_error_t *
2650 drmach_status(drmachid_t id, drmach_status_t *stat)
2651 {
2652 	drmach_common_t *cp;
2653 	sbd_error_t	*err;
2654 
2655 	rw_enter(&drmach_boards_rwlock, RW_READER);
2656 
2657 	if (!DRMACH_IS_ID(id)) {
2658 		rw_exit(&drmach_boards_rwlock);
2659 		return (drerr_new(0, EOPL_NOTID, NULL));
2660 	}
2661 	cp = (drmach_common_t *)id;
2662 	err = cp->status(id, stat);
2663 
2664 	rw_exit(&drmach_boards_rwlock);
2665 
2666 	return (err);
2667 }
2668 
2669 static sbd_error_t *
2670 drmach_i_status(drmachid_t id, drmach_status_t *stat)
2671 {
2672 	drmach_common_t *cp;
2673 
2674 	if (!DRMACH_IS_ID(id))
2675 		return (drerr_new(0, EOPL_NOTID, NULL));
2676 	cp = id;
2677 
2678 	return (cp->status(id, stat));
2679 }
2680 
2681 /*ARGSUSED*/
2682 sbd_error_t *
2683 drmach_unconfigure(drmachid_t id, int flags)
2684 {
2685 	drmach_device_t *dp;
2686 	dev_info_t	*rdip, *fdip = NULL;
2687 	char name[OBP_MAXDRVNAME];
2688 	int rv;
2689 
2690 	if (DRMACH_IS_CPU_ID(id))
2691 		return (NULL);
2692 
2693 	if (!DRMACH_IS_DEVICE_ID(id))
2694 		return (drerr_new(0, EOPL_INAPPROP, NULL));
2695 
2696 	dp = id;
2697 
2698 	rdip = dp->node->n_getdip(dp->node);
2699 
2700 	ASSERT(rdip);
2701 
2702 	rv = dp->node->n_getprop(dp->node, "name", name, OBP_MAXDRVNAME);
2703 
2704 	if (rv)
2705 		return (NULL);
2706 
2707 	/*
2708 	 * Note: FORCE flag is no longer necessary under devfs
2709 	 */
2710 
2711 	ASSERT(e_ddi_branch_held(rdip));
2712 	if (e_ddi_branch_unconfigure(rdip, &fdip, 0)) {
2713 		sbd_error_t	*err;
2714 		char		*path = kmem_alloc(MAXPATHLEN, KM_SLEEP);
2715 
2716 		/*
2717 		 * If non-NULL, fdip is returned held and must be released.
2718 		 */
2719 		if (fdip != NULL) {
2720 			(void) ddi_pathname(fdip, path);
2721 			ndi_rele_devi(fdip);
2722 		} else {
2723 			(void) ddi_pathname(rdip, path);
2724 		}
2725 
2726 		err = drerr_new(1, EOPL_DRVFAIL, path);
2727 
2728 		kmem_free(path, MAXPATHLEN);
2729 
2730 		return (err);
2731 	}
2732 
2733 	return (NULL);
2734 }
2735 
2736 
2737 int
2738 drmach_cpu_poweron(struct cpu *cp)
2739 {
2740 	int bnum, cpuid, onb_core_num, strand_id;
2741 	drmach_board_t *bp;
2742 
2743 	DRMACH_PR("drmach_cpu_poweron: starting cpuid %d\n", cp->cpu_id);
2744 
2745 	cpuid = cp->cpu_id;
2746 	bnum = LSB_ID(cpuid);
2747 	onb_core_num = ON_BOARD_CORE_NUM(cpuid);
2748 	strand_id = STRAND_ID(cpuid);
2749 	bp = drmach_get_board_by_bnum(bnum);
2750 
2751 	ASSERT(bp);
2752 	if (bp->cores[onb_core_num].core_hotadded == 0) {
2753 		if (drmach_add_remove_cpu(bnum, onb_core_num,
2754 		    HOTADD_CPU) != 0) {
2755 			cmn_err(CE_WARN, "Failed to add CMP %d on board %d\n",
2756 			    onb_core_num, bnum);
2757 			return (EIO);
2758 		}
2759 	}
2760 
2761 	ASSERT(MUTEX_HELD(&cpu_lock));
2762 
2763 	if (drmach_cpu_start(cp) != 0) {
2764 		if (bp->cores[onb_core_num].core_started == 0) {
2765 			/*
2766 			 * we must undo the hotadd or no one will do that
2767 			 * If this fails, we will do this again in
2768 			 * drmach_board_disconnect.
2769 			 */
2770 			if (drmach_add_remove_cpu(bnum, onb_core_num,
2771 			    HOTREMOVE_CPU) != 0) {
2772 				cmn_err(CE_WARN, "Failed to remove CMP %d "
2773 				    "on board %d\n", onb_core_num, bnum);
2774 			}
2775 		}
2776 		return (EBUSY);
2777 	} else {
2778 		bp->cores[onb_core_num].core_started |= (1 << strand_id);
2779 		return (0);
2780 	}
2781 }
2782 
2783 int
2784 drmach_cpu_poweroff(struct cpu *cp)
2785 {
2786 	int 		rv = 0;
2787 	processorid_t	cpuid = cp->cpu_id;
2788 
2789 	DRMACH_PR("drmach_cpu_poweroff: stopping cpuid %d\n", cp->cpu_id);
2790 
2791 	ASSERT(MUTEX_HELD(&cpu_lock));
2792 
2793 	/*
2794 	 * Capture all CPUs (except for detaching proc) to prevent
2795 	 * crosscalls to the detaching proc until it has cleared its
2796 	 * bit in cpu_ready_set.
2797 	 *
2798 	 * The CPU's remain paused and the prom_mutex is known to be free.
2799 	 * This prevents the x-trap victim from blocking when doing prom
2800 	 * IEEE-1275 calls at a high PIL level.
2801 	 */
2802 
2803 	promsafe_pause_cpus();
2804 
2805 	/*
2806 	 * Quiesce interrupts on the target CPU. We do this by setting
2807 	 * the CPU 'not ready'- (i.e. removing the CPU from cpu_ready_set) to
2808 	 * prevent it from receiving cross calls and cross traps.
2809 	 * This prevents the processor from receiving any new soft interrupts.
2810 	 */
2811 	mp_cpu_quiesce(cp);
2812 
2813 	rv = prom_stopcpu_bycpuid(cpuid);
2814 	if (rv == 0)
2815 		cp->cpu_flags = CPU_OFFLINE | CPU_QUIESCED | CPU_POWEROFF;
2816 
2817 	start_cpus();
2818 
2819 	if (rv == 0) {
2820 		int bnum, onb_core_num, strand_id;
2821 		drmach_board_t *bp;
2822 
2823 		CPU_SIGNATURE(OS_SIG, SIGST_DETACHED, SIGSUBST_NULL, cpuid);
2824 
2825 		bnum = LSB_ID(cpuid);
2826 		onb_core_num = ON_BOARD_CORE_NUM(cpuid);
2827 		strand_id = STRAND_ID(cpuid);
2828 		bp = drmach_get_board_by_bnum(bnum);
2829 		ASSERT(bp);
2830 
2831 		bp->cores[onb_core_num].core_started &= ~(1 << strand_id);
2832 		if (bp->cores[onb_core_num].core_started == 0) {
2833 			if (drmach_add_remove_cpu(bnum, onb_core_num,
2834 			    HOTREMOVE_CPU) != 0) {
2835 				cmn_err(CE_WARN, "Failed to remove CMP %d LSB "
2836 				    "%d\n", onb_core_num, bnum);
2837 				return (EIO);
2838 			}
2839 		}
2840 	}
2841 
2842 	return (rv);
2843 }
2844 
2845 /*ARGSUSED*/
2846 int
2847 drmach_verify_sr(dev_info_t *dip, int sflag)
2848 {
2849 	return (0);
2850 }
2851 
2852 void
2853 drmach_suspend_last(void)
2854 {
2855 }
2856 
2857 void
2858 drmach_resume_first(void)
2859 {
2860 }
2861 
2862 /*
2863  * Log a DR sysevent.
2864  * Return value: 0 success, non-zero failure.
2865  */
2866 int
2867 drmach_log_sysevent(int board, char *hint, int flag, int verbose)
2868 {
2869 	sysevent_t			*ev;
2870 	sysevent_id_t			eid;
2871 	int				rv, km_flag;
2872 	sysevent_value_t		evnt_val;
2873 	sysevent_attr_list_t		*evnt_attr_list = NULL;
2874 	char				attach_pnt[MAXNAMELEN];
2875 
2876 	km_flag = (flag == SE_SLEEP) ? KM_SLEEP : KM_NOSLEEP;
2877 	attach_pnt[0] = '\0';
2878 	if (drmach_board_name(board, attach_pnt, MAXNAMELEN)) {
2879 		rv = -1;
2880 		goto logexit;
2881 	}
2882 	if (verbose) {
2883 		DRMACH_PR("drmach_log_sysevent: %s %s, flag: %d, verbose: %d\n",
2884 		    attach_pnt, hint, flag, verbose);
2885 	}
2886 
2887 	if ((ev = sysevent_alloc(EC_DR, ESC_DR_AP_STATE_CHANGE,
2888 	    SUNW_KERN_PUB"dr", km_flag)) == NULL) {
2889 		rv = -2;
2890 		goto logexit;
2891 	}
2892 	evnt_val.value_type = SE_DATA_TYPE_STRING;
2893 	evnt_val.value.sv_string = attach_pnt;
2894 	if ((rv = sysevent_add_attr(&evnt_attr_list, DR_AP_ID, &evnt_val,
2895 	    km_flag)) != 0)
2896 		goto logexit;
2897 
2898 	evnt_val.value_type = SE_DATA_TYPE_STRING;
2899 	evnt_val.value.sv_string = hint;
2900 	if ((rv = sysevent_add_attr(&evnt_attr_list, DR_HINT, &evnt_val,
2901 	    km_flag)) != 0) {
2902 		sysevent_free_attr(evnt_attr_list);
2903 		goto logexit;
2904 	}
2905 
2906 	(void) sysevent_attach_attributes(ev, evnt_attr_list);
2907 
2908 	/*
2909 	 * Log the event but do not sleep waiting for its
2910 	 * delivery. This provides insulation from syseventd.
2911 	 */
2912 	rv = log_sysevent(ev, SE_NOSLEEP, &eid);
2913 
2914 logexit:
2915 	if (ev)
2916 		sysevent_free(ev);
2917 	if ((rv != 0) && verbose)
2918 		cmn_err(CE_WARN, "drmach_log_sysevent failed (rv %d) for %s "
2919 		    " %s\n", rv, attach_pnt, hint);
2920 
2921 	return (rv);
2922 }
2923 
2924 #define	OPL_DR_STATUS_PROP "dr-status"
2925 
2926 static int
2927 opl_check_dr_status()
2928 {
2929 	pnode_t	node;
2930 	int	rtn, len;
2931 	char	*str;
2932 
2933 	node = prom_rootnode();
2934 	if (node == OBP_BADNODE) {
2935 		return (1);
2936 	}
2937 
2938 	len = prom_getproplen(node, OPL_DR_STATUS_PROP);
2939 	if (len == -1) {
2940 		/*
2941 		 * dr-status doesn't exist when DR is activated and
2942 		 * any warning messages aren't needed.
2943 		 */
2944 		return (1);
2945 	}
2946 
2947 	str = (char *)kmem_zalloc(len+1, KM_SLEEP);
2948 	rtn = prom_getprop(node, OPL_DR_STATUS_PROP, str);
2949 	kmem_free(str, len + 1);
2950 	if (rtn == -1) {
2951 		return (1);
2952 	} else {
2953 		return (0);
2954 	}
2955 }
2956 
2957 /* we are allocating memlist from TLB locked pages to avoid tlbmisses */
2958 
2959 static struct memlist *
2960 drmach_memlist_add_span(drmach_copy_rename_program_t *p,
2961 	struct memlist *mlist, uint64_t base, uint64_t len)
2962 {
2963 	struct memlist	*ml, *tl, *nl;
2964 
2965 	if (len == 0ull)
2966 		return (NULL);
2967 
2968 	if (mlist == NULL) {
2969 		mlist = p->free_mlist;
2970 		if (mlist == NULL)
2971 			return (NULL);
2972 		p->free_mlist = mlist->next;
2973 		mlist->address = base;
2974 		mlist->size = len;
2975 		mlist->next = mlist->prev = NULL;
2976 
2977 		return (mlist);
2978 	}
2979 
2980 	for (tl = ml = mlist; ml; tl = ml, ml = ml->next) {
2981 		if (base < ml->address) {
2982 			if ((base + len) < ml->address) {
2983 				nl = p->free_mlist;
2984 				if (nl == NULL)
2985 					return (NULL);
2986 				p->free_mlist = nl->next;
2987 				nl->address = base;
2988 				nl->size = len;
2989 				nl->next = ml;
2990 				if ((nl->prev = ml->prev) != NULL)
2991 					nl->prev->next = nl;
2992 				ml->prev = nl;
2993 				if (mlist == ml)
2994 					mlist = nl;
2995 			} else {
2996 				ml->size = MAX((base + len), (ml->address +
2997 				    ml->size)) - base;
2998 				ml->address = base;
2999 			}
3000 			break;
3001 
3002 		} else if (base <= (ml->address + ml->size)) {
3003 			ml->size = MAX((base + len), (ml->address + ml->size)) -
3004 			    MIN(ml->address, base);
3005 			ml->address = MIN(ml->address, base);
3006 			break;
3007 		}
3008 	}
3009 	if (ml == NULL) {
3010 		nl = p->free_mlist;
3011 		if (nl == NULL)
3012 			return (NULL);
3013 		p->free_mlist = nl->next;
3014 		nl->address = base;
3015 		nl->size = len;
3016 		nl->next = NULL;
3017 		nl->prev = tl;
3018 		tl->next = nl;
3019 	}
3020 
3021 	return (mlist);
3022 }
3023 
3024 /*
3025  * The routine performs the necessary memory COPY and MC adr SWITCH.
3026  * Both operations MUST be at the same "level" so that the stack is
3027  * maintained correctly between the copy and switch.  The switch
3028  * portion implements a caching mechanism to guarantee the code text
3029  * is cached prior to execution.  This is to guard against possible
3030  * memory access while the MC adr's are being modified.
3031  *
3032  * IMPORTANT: The _drmach_copy_rename_end() function must immediately
3033  * follow drmach_copy_rename_prog__relocatable() so that the correct
3034  * "length" of the drmach_copy_rename_prog__relocatable can be
3035  * calculated.  This routine MUST be a LEAF function, i.e. it can
3036  * make NO function calls, primarily for two reasons:
3037  *
3038  *	1. We must keep the stack consistent across the "switch".
3039  *	2. Function calls are compiled to relative offsets, and
3040  *	   we execute this function we'll be executing it from
3041  *	   a copied version in a different area of memory, thus
3042  *	   the relative offsets will be bogus.
3043  *
3044  * Moreover, it must have the "__relocatable" suffix to inform DTrace
3045  * providers (and anything else, for that matter) that this
3046  * function's text is manually relocated elsewhere before it is
3047  * executed.  That is, it cannot be safely instrumented with any
3048  * methodology that is PC-relative.
3049  */
3050 
3051 /*
3052  * We multiply this to system_clock_frequency so we
3053  * are setting a delay of fmem_timeout second for
3054  * the rename command.
3055  *
3056  * FMEM command itself should complete within 15 sec.
3057  * We add 2 more sec to be conservative.
3058  *
3059  * Note that there is also a SCF BUSY bit checking
3060  * in drmach_asm.s right before FMEM command is
3061  * issued.  XSCF sets the SCF BUSY bit when the
3062  * other domain on the same PSB reboots and it
3063  * will not be able to service the FMEM command
3064  * within 15 sec.   After setting the SCF BUSY
3065  * bit, XSCF will wait a while before servicing
3066  * other reboot command so there is no race
3067  * condition.
3068  */
3069 
3070 static int	fmem_timeout = 17;
3071 
3072 /*
3073  *	The empirical data on some OPL system shows that
3074  *	we can copy 250 MB per second.  We set it to
3075  * 	80 MB to be conservative.  In normal case,
3076  *	this timeout does not affect anything.
3077  */
3078 
3079 static int	min_copy_size_per_sec = 80 * 1024 * 1024;
3080 
3081 /*
3082  *	This is the timeout value for the xcall synchronization
3083  *	to get all the CPU ready to do the parallel copying.
3084  *	Even on a fully loaded system, 10 sec. should be long
3085  *	enough.
3086  */
3087 
3088 static int	cpu_xcall_delay = 10;
3089 int drmach_disable_mcopy = 0;
3090 
3091 /*
3092  * The following delay loop executes sleep instruction to yield the
3093  * CPU to other strands.  If this is not done, some strand will tie
3094  * up the CPU in busy loops while the other strand cannot do useful
3095  * work.  The copy procedure will take a much longer time without this.
3096  */
3097 #define	DR_DELAY_IL(ms, freq)					\
3098 	{							\
3099 		uint64_t start;					\
3100 		uint64_t nstick;				\
3101 		volatile uint64_t now;				\
3102 		nstick = ((uint64_t)ms * freq)/1000;		\
3103 		start = drmach_get_stick_il();			\
3104 		now = start;					\
3105 		while ((now - start) <= nstick) {		\
3106 			drmach_sleep_il();			\
3107 			now = drmach_get_stick_il();		\
3108 		}						\
3109 	}
3110 
3111 static int
3112 drmach_copy_rename_prog__relocatable(drmach_copy_rename_program_t *prog,
3113 	int cpuid)
3114 {
3115 	struct memlist		*ml;
3116 	register int		rtn;
3117 	int			i;
3118 	register uint64_t	curr, limit;
3119 	extern uint64_t		drmach_get_stick_il();
3120 	extern void		membar_sync_il();
3121 	extern void		flush_instr_mem_il(void*);
3122 	extern void		flush_windows_il(void);
3123 	uint64_t		copy_start;
3124 
3125 	/*
3126 	 * flush_windows is moved here to make sure all
3127 	 * registers used in the callers are flushed to
3128 	 * memory before the copy.
3129 	 *
3130 	 * If flush_windows() is called too early in the
3131 	 * calling function, the compiler might put some
3132 	 * data in the local registers after flush_windows().
3133 	 * After FMA, if there is any fill trap, the registers
3134 	 * will contain stale data.
3135 	 */
3136 
3137 	flush_windows_il();
3138 
3139 	prog->critical->stat[cpuid] = FMEM_LOOP_COPY_READY;
3140 	membar_sync_il();
3141 
3142 	if (prog->data->cpuid == cpuid) {
3143 		limit = drmach_get_stick_il();
3144 		limit += cpu_xcall_delay * system_clock_freq;
3145 		for (i = 0; i < NCPU; i++) {
3146 			if (CPU_IN_SET(prog->data->cpu_slave_set, i)) {
3147 				/* wait for all CPU's to be ready */
3148 				for (;;) {
3149 					if (prog->critical->stat[i] ==
3150 					    FMEM_LOOP_COPY_READY) {
3151 						break;
3152 					}
3153 					DR_DELAY_IL(1, prog->data->stick_freq);
3154 				}
3155 				curr = drmach_get_stick_il();
3156 				if (curr > limit) {
3157 					prog->data->fmem_status.error =
3158 					    EOPL_FMEM_XC_TIMEOUT;
3159 					return (EOPL_FMEM_XC_TIMEOUT);
3160 				}
3161 			}
3162 		}
3163 		prog->data->fmem_status.stat = FMEM_LOOP_COPY_READY;
3164 		membar_sync_il();
3165 		copy_start = drmach_get_stick_il();
3166 	} else {
3167 		for (;;) {
3168 			if (prog->data->fmem_status.stat ==
3169 			    FMEM_LOOP_COPY_READY) {
3170 				break;
3171 			}
3172 			if (prog->data->fmem_status.error) {
3173 				prog->data->error[cpuid] = EOPL_FMEM_TERMINATE;
3174 				return (EOPL_FMEM_TERMINATE);
3175 			}
3176 			DR_DELAY_IL(1, prog->data->stick_freq);
3177 		}
3178 	}
3179 
3180 	/*
3181 	 * DO COPY.
3182 	 */
3183 	if (CPU_IN_SET(prog->data->cpu_copy_set, cpuid)) {
3184 		for (ml = prog->data->cpu_ml[cpuid]; ml; ml = ml->next) {
3185 			uint64_t	s_pa, t_pa;
3186 			uint64_t	nbytes;
3187 
3188 			s_pa = prog->data->s_copybasepa + ml->address;
3189 			t_pa = prog->data->t_copybasepa + ml->address;
3190 			nbytes = ml->size;
3191 
3192 			while (nbytes != 0ull) {
3193 				/*
3194 				 * If the master has detected error, we just
3195 				 * bail out
3196 				 */
3197 				if (prog->data->fmem_status.error !=
3198 				    ESBD_NOERROR) {
3199 					prog->data->error[cpuid] =
3200 					    EOPL_FMEM_TERMINATE;
3201 					return (EOPL_FMEM_TERMINATE);
3202 				}
3203 				/*
3204 				 * This copy does NOT use an ASI
3205 				 * that avoids the Ecache, therefore
3206 				 * the dst_pa addresses may remain
3207 				 * in our Ecache after the dst_pa
3208 				 * has been removed from the system.
3209 				 * A subsequent write-back to memory
3210 				 * will cause an ARB-stop because the
3211 				 * physical address no longer exists
3212 				 * in the system. Therefore we must
3213 				 * flush out local Ecache after we
3214 				 * finish the copy.
3215 				 */
3216 
3217 				/* copy 32 bytes at src_pa to dst_pa */
3218 				bcopy32_il(s_pa, t_pa);
3219 
3220 				/*
3221 				 * increment the counter to signal that we are
3222 				 * alive
3223 				 */
3224 				prog->stat->nbytes[cpuid] += 32;
3225 
3226 				/* increment by 32 bytes */
3227 				s_pa += (4 * sizeof (uint64_t));
3228 				t_pa += (4 * sizeof (uint64_t));
3229 
3230 				/* decrement by 32 bytes */
3231 				nbytes -= (4 * sizeof (uint64_t));
3232 			}
3233 		}
3234 		prog->critical->stat[cpuid] = FMEM_LOOP_COPY_DONE;
3235 		membar_sync_il();
3236 	}
3237 
3238 	/*
3239 	 * Since bcopy32_il() does NOT use an ASI to bypass
3240 	 * the Ecache, we need to flush our Ecache after
3241 	 * the copy is complete.
3242 	 */
3243 	flush_cache_il();
3244 
3245 	/*
3246 	 * drmach_fmem_exec_script()
3247 	 */
3248 	if (prog->data->cpuid == cpuid) {
3249 		uint64_t	last, now;
3250 
3251 		limit = copy_start + prog->data->copy_delay;
3252 		for (i = 0; i < NCPU; i++) {
3253 			if (!CPU_IN_SET(prog->data->cpu_slave_set, i))
3254 				continue;
3255 
3256 			for (;;) {
3257 				/*
3258 				 * we get FMEM_LOOP_FMEM_READY in
3259 				 * normal case
3260 				 */
3261 				if (prog->critical->stat[i] ==
3262 				    FMEM_LOOP_FMEM_READY) {
3263 					break;
3264 				}
3265 				/* got error traps */
3266 				if (prog->data->error[i] ==
3267 				    EOPL_FMEM_COPY_ERROR) {
3268 					prog->data->fmem_status.error =
3269 					    EOPL_FMEM_COPY_ERROR;
3270 					return (EOPL_FMEM_COPY_ERROR);
3271 				}
3272 				/*
3273 				 * if we have not reached limit, wait
3274 				 * more
3275 				 */
3276 				curr = drmach_get_stick_il();
3277 				if (curr <= limit)
3278 					continue;
3279 
3280 				prog->data->slowest_cpuid = i;
3281 				prog->data->copy_wait_time = curr - copy_start;
3282 
3283 				/* now check if slave is alive */
3284 				last = prog->stat->nbytes[i];
3285 
3286 				DR_DELAY_IL(1, prog->data->stick_freq);
3287 
3288 				now = prog->stat->nbytes[i];
3289 				if (now <= last) {
3290 					/*
3291 					 * no progress, perhaps just
3292 					 * finished
3293 					 */
3294 					DR_DELAY_IL(1, prog->data->stick_freq);
3295 					if (prog->critical->stat[i] ==
3296 					    FMEM_LOOP_FMEM_READY)
3297 						break;
3298 					/* copy error */
3299 					if (prog->data->error[i] ==
3300 					    EOPL_FMEM_COPY_ERROR) {
3301 						prog->data-> fmem_status.error =
3302 						    EOPL_FMEM_COPY_ERROR;
3303 						return (EOPL_FMEM_COPY_ERROR);
3304 					}
3305 					prog->data->fmem_status.error =
3306 					    EOPL_FMEM_COPY_TIMEOUT;
3307 					return (EOPL_FMEM_COPY_TIMEOUT);
3308 				}
3309 			}
3310 		}
3311 
3312 		prog->critical->stat[cpuid] = FMEM_LOOP_FMEM_READY;
3313 		prog->data->fmem_status.stat  = FMEM_LOOP_FMEM_READY;
3314 
3315 		membar_sync_il();
3316 		flush_instr_mem_il((void*) (prog->critical));
3317 		/*
3318 		 * drmach_fmem_exec_script()
3319 		 */
3320 		rtn = prog->critical->fmem((void *)prog->critical, PAGESIZE);
3321 		return (rtn);
3322 	} else {
3323 		flush_instr_mem_il((void*) (prog->critical));
3324 		/*
3325 		 * drmach_fmem_loop_script()
3326 		 */
3327 		rtn = prog->critical->loop((void *)(prog->critical), PAGESIZE,
3328 		    (void *)&(prog->critical->stat[cpuid]));
3329 		prog->data->error[cpuid] = rtn;
3330 		/* slave thread does not care the rv */
3331 		return (0);
3332 	}
3333 }
3334 
3335 static void
3336 drmach_copy_rename_end(void)
3337 {
3338 	/*
3339 	 * IMPORTANT:	This function's location MUST be located immediately
3340 	 *		following drmach_copy_rename_prog__relocatable to
3341 	 *		accurately estimate its size.  Note that this assumes
3342 	 *		the compiler keeps these functions in the order in
3343 	 *		which they appear :-o
3344 	 */
3345 }
3346 
3347 
3348 static void
3349 drmach_setup_memlist(drmach_copy_rename_program_t *p)
3350 {
3351 	struct memlist *ml;
3352 	caddr_t buf;
3353 	int nbytes, s;
3354 
3355 	nbytes = PAGESIZE;
3356 	s = roundup(sizeof (struct memlist), sizeof (void *));
3357 	p->free_mlist = NULL;
3358 	buf = p->memlist_buffer;
3359 	while (nbytes >= sizeof (struct memlist)) {
3360 		ml = (struct memlist *)buf;
3361 		ml->next = p->free_mlist;
3362 		p->free_mlist = ml;
3363 		buf += s;
3364 		nbytes -= s;
3365 	}
3366 }
3367 
3368 static void
3369 drmach_lock_critical(caddr_t va, caddr_t new_va)
3370 {
3371 	tte_t tte;
3372 	int i;
3373 
3374 	kpreempt_disable();
3375 
3376 	for (i = 0; i < DRMACH_FMEM_LOCKED_PAGES; i++) {
3377 		vtag_flushpage(new_va, (uint64_t)ksfmmup);
3378 		sfmmu_memtte(&tte, va_to_pfn(va), PROC_DATA|HAT_NOSYNC, TTE8K);
3379 		tte.tte_intlo |= TTE_LCK_INT;
3380 		sfmmu_dtlb_ld_kva(new_va, &tte);
3381 		sfmmu_itlb_ld_kva(new_va, &tte);
3382 		va += PAGESIZE;
3383 		new_va += PAGESIZE;
3384 	}
3385 }
3386 
3387 static void
3388 drmach_unlock_critical(caddr_t va)
3389 {
3390 	int i;
3391 
3392 	for (i = 0; i < DRMACH_FMEM_LOCKED_PAGES; i++) {
3393 		vtag_flushpage(va, (uint64_t)ksfmmup);
3394 		va += PAGESIZE;
3395 	}
3396 
3397 	kpreempt_enable();
3398 }
3399 
3400 sbd_error_t *
3401 drmach_copy_rename_init(drmachid_t t_id, drmachid_t s_id,
3402 	struct memlist *c_ml, drmachid_t *pgm_id)
3403 {
3404 	drmach_mem_t	*s_mem;
3405 	drmach_mem_t	*t_mem;
3406 	struct memlist	*x_ml;
3407 	uint64_t	s_copybasepa, t_copybasepa;
3408 	uint_t		len;
3409 	caddr_t		bp, wp;
3410 	int			s_bd, t_bd, cpuid, active_cpus, i;
3411 	uint64_t		c_addr;
3412 	size_t			c_size, copy_sz, sz;
3413 	extern void		drmach_fmem_loop_script();
3414 	extern void		drmach_fmem_loop_script_rtn();
3415 	extern int		drmach_fmem_exec_script();
3416 	extern void		drmach_fmem_exec_script_end();
3417 	sbd_error_t	*err;
3418 	drmach_copy_rename_program_t *prog = NULL;
3419 	drmach_copy_rename_program_t *prog_kmem = NULL;
3420 	void		(*mc_suspend)(void);
3421 	void		(*mc_resume)(void);
3422 	int		(*scf_fmem_start)(int, int);
3423 	int		(*scf_fmem_end)(void);
3424 	int		(*scf_fmem_cancel)(void);
3425 	uint64_t	(*scf_get_base_addr)(void);
3426 
3427 	if (!DRMACH_IS_MEM_ID(s_id))
3428 		return (drerr_new(0, EOPL_INAPPROP, NULL));
3429 	if (!DRMACH_IS_MEM_ID(t_id))
3430 		return (drerr_new(0, EOPL_INAPPROP, NULL));
3431 
3432 	for (i = 0; i < NCPU; i++) {
3433 		int lsb_id, onb_core_num, strand_id;
3434 		drmach_board_t *bp;
3435 
3436 		/*
3437 		 * this kind of CPU will spin in cache
3438 		 */
3439 		if (CPU_IN_SET(cpu_ready_set, i))
3440 			continue;
3441 
3442 		/*
3443 		 * Now check for any inactive CPU's that
3444 		 * have been hotadded.  This can only occur in
3445 		 * error condition in drmach_cpu_poweron().
3446 		 */
3447 		lsb_id = LSB_ID(i);
3448 		onb_core_num = ON_BOARD_CORE_NUM(i);
3449 		strand_id = STRAND_ID(i);
3450 		bp = drmach_get_board_by_bnum(lsb_id);
3451 		if (bp == NULL)
3452 			continue;
3453 		if (bp->cores[onb_core_num].core_hotadded &
3454 		    (1 << strand_id)) {
3455 			if (!(bp->cores[onb_core_num].core_started &
3456 			    (1 << strand_id))) {
3457 				return (drerr_new(1, EOPL_CPU_STATE, NULL));
3458 			}
3459 		}
3460 	}
3461 
3462 	mc_suspend = (void (*)(void))
3463 	    modgetsymvalue("opl_mc_suspend", 0);
3464 	mc_resume = (void (*)(void))
3465 	    modgetsymvalue("opl_mc_resume", 0);
3466 
3467 	if (mc_suspend == NULL || mc_resume == NULL) {
3468 		return (drerr_new(1, EOPL_MC_OPL, NULL));
3469 	}
3470 
3471 	scf_fmem_start = (int (*)(int, int))
3472 	    modgetsymvalue("scf_fmem_start", 0);
3473 	if (scf_fmem_start == NULL) {
3474 		return (drerr_new(1, EOPL_SCF_FMEM, NULL));
3475 	}
3476 	scf_fmem_end = (int (*)(void))
3477 	    modgetsymvalue("scf_fmem_end", 0);
3478 	if (scf_fmem_end == NULL) {
3479 		return (drerr_new(1, EOPL_SCF_FMEM, NULL));
3480 	}
3481 	scf_fmem_cancel = (int (*)(void))
3482 	    modgetsymvalue("scf_fmem_cancel", 0);
3483 	if (scf_fmem_cancel == NULL) {
3484 		return (drerr_new(1, EOPL_SCF_FMEM, NULL));
3485 	}
3486 	scf_get_base_addr = (uint64_t (*)(void))
3487 	    modgetsymvalue("scf_get_base_addr", 0);
3488 	if (scf_get_base_addr == NULL) {
3489 		return (drerr_new(1, EOPL_SCF_FMEM, NULL));
3490 	}
3491 	s_mem = s_id;
3492 	t_mem = t_id;
3493 
3494 	s_bd = s_mem->dev.bp->bnum;
3495 	t_bd = t_mem->dev.bp->bnum;
3496 
3497 	/* calculate source and target base pa */
3498 
3499 	s_copybasepa = s_mem->slice_base;
3500 	t_copybasepa = t_mem->slice_base;
3501 
3502 	/* adjust copy memlist addresses to be relative to copy base pa */
3503 	x_ml = c_ml;
3504 	while (x_ml != NULL) {
3505 		x_ml->address -= s_copybasepa;
3506 		x_ml = x_ml->next;
3507 	}
3508 
3509 	/*
3510 	 * bp will be page aligned, since we're calling
3511 	 * kmem_zalloc() with an exact multiple of PAGESIZE.
3512 	 */
3513 
3514 	prog_kmem = (drmach_copy_rename_program_t *)kmem_zalloc(
3515 	    DRMACH_FMEM_LOCKED_PAGES * PAGESIZE, KM_SLEEP);
3516 
3517 	prog_kmem->prog = prog_kmem;
3518 
3519 	/*
3520 	 * To avoid MTLB hit, we allocate a new VM space and remap
3521 	 * the kmem_alloc buffer to that address.  This solves
3522 	 * 2 problems we found:
3523 	 * - the kmem_alloc buffer can be just a chunk inside
3524 	 *   a much larger, e.g. 4MB buffer and MTLB will occur
3525 	 *   if there are both a 4MB and a 8K TLB mapping to
3526 	 *   the same VA range.
3527 	 * - the kmem mapping got dropped into the TLB by other
3528 	 *   strands, unintentionally.
3529 	 * Note that the pointers like data, critical, memlist_buffer,
3530 	 * and stat inside the copy rename structure are mapped to this
3531 	 * alternate VM space so we must make sure we lock the TLB mapping
3532 	 * whenever we access data pointed to by these pointers.
3533 	 */
3534 
3535 	prog = prog_kmem->locked_prog = vmem_alloc(heap_arena,
3536 	    DRMACH_FMEM_LOCKED_PAGES * PAGESIZE, VM_SLEEP);
3537 	wp = bp = (caddr_t)prog;
3538 
3539 	/* Now remap prog_kmem to prog */
3540 	drmach_lock_critical((caddr_t)prog_kmem, (caddr_t)prog);
3541 
3542 	/* All pointers in prog are based on the alternate mapping */
3543 	prog->data = (drmach_copy_rename_data_t *)roundup(((uint64_t)prog +
3544 	    sizeof (drmach_copy_rename_program_t)), sizeof (void *));
3545 
3546 	ASSERT(((uint64_t)prog->data + sizeof (drmach_copy_rename_data_t))
3547 	    <= ((uint64_t)prog + PAGESIZE));
3548 
3549 	prog->critical = (drmach_copy_rename_critical_t *)
3550 	    (wp + DRMACH_FMEM_CRITICAL_PAGE * PAGESIZE);
3551 
3552 	prog->memlist_buffer = (caddr_t)(wp + DRMACH_FMEM_MLIST_PAGE *
3553 	    PAGESIZE);
3554 
3555 	prog->stat = (drmach_cr_stat_t *)(wp + DRMACH_FMEM_STAT_PAGE *
3556 	    PAGESIZE);
3557 
3558 	/* LINTED */
3559 	ASSERT(sizeof (drmach_cr_stat_t) <= ((DRMACH_FMEM_LOCKED_PAGES -
3560 	    DRMACH_FMEM_STAT_PAGE) * PAGESIZE));
3561 
3562 	prog->critical->scf_reg_base = (uint64_t)-1;
3563 	prog->critical->scf_td[0] = (s_bd & 0xff);
3564 	prog->critical->scf_td[1] = (t_bd & 0xff);
3565 	for (i = 2; i < 15; i++) {
3566 		prog->critical->scf_td[i]   = 0;
3567 	}
3568 	prog->critical->scf_td[15] = ((0xaa + s_bd + t_bd) & 0xff);
3569 
3570 	bp = (caddr_t)prog->critical;
3571 	len = sizeof (drmach_copy_rename_critical_t);
3572 	wp = (caddr_t)roundup((uint64_t)bp + len, sizeof (void *));
3573 
3574 	len = (uint_t)((ulong_t)drmach_copy_rename_end -
3575 	    (ulong_t)drmach_copy_rename_prog__relocatable);
3576 
3577 	/*
3578 	 * We always leave 1K nop's to prevent the processor from
3579 	 * speculative execution that causes memory access
3580 	 */
3581 	wp = wp + len + 1024;
3582 
3583 	len = (uint_t)((ulong_t)drmach_fmem_exec_script_end -
3584 	    (ulong_t)drmach_fmem_exec_script);
3585 	/* this is the entry point of the loop script */
3586 	wp = wp + len + 1024;
3587 
3588 	len = (uint_t)((ulong_t)drmach_fmem_exec_script -
3589 	    (ulong_t)drmach_fmem_loop_script);
3590 	wp = wp + len + 1024;
3591 
3592 	/* now we make sure there is 1K extra */
3593 
3594 	if ((wp - bp) > PAGESIZE) {
3595 		err = drerr_new(1, EOPL_FMEM_SETUP, NULL);
3596 		goto out;
3597 	}
3598 
3599 	bp = (caddr_t)prog->critical;
3600 	len = sizeof (drmach_copy_rename_critical_t);
3601 	wp = (caddr_t)roundup((uint64_t)bp + len, sizeof (void *));
3602 
3603 	prog->critical->run = (int (*)())(wp);
3604 	len = (uint_t)((ulong_t)drmach_copy_rename_end -
3605 	    (ulong_t)drmach_copy_rename_prog__relocatable);
3606 
3607 	bcopy((caddr_t)drmach_copy_rename_prog__relocatable, wp, len);
3608 
3609 	wp = (caddr_t)roundup((uint64_t)wp + len, 1024);
3610 
3611 	prog->critical->fmem = (int (*)())(wp);
3612 	len = (int)((ulong_t)drmach_fmem_exec_script_end -
3613 	    (ulong_t)drmach_fmem_exec_script);
3614 	bcopy((caddr_t)drmach_fmem_exec_script, wp, len);
3615 
3616 	len = (int)((ulong_t)drmach_fmem_exec_script_end -
3617 	    (ulong_t)drmach_fmem_exec_script);
3618 	wp = (caddr_t)roundup((uint64_t)wp + len, 1024);
3619 
3620 	prog->critical->loop = (int (*)())(wp);
3621 	len = (int)((ulong_t)drmach_fmem_exec_script -
3622 	    (ulong_t)drmach_fmem_loop_script);
3623 	bcopy((caddr_t)drmach_fmem_loop_script, (void *)wp, len);
3624 	len = (int)((ulong_t)drmach_fmem_loop_script_rtn-
3625 	    (ulong_t)drmach_fmem_loop_script);
3626 	prog->critical->loop_rtn = (void (*)()) (wp+len);
3627 
3628 	prog->data->fmem_status.error = ESBD_NOERROR;
3629 
3630 	/* now we are committed, call SCF, soft suspend mac patrol */
3631 	if ((*scf_fmem_start)(s_bd, t_bd)) {
3632 		err = drerr_new(1, EOPL_SCF_FMEM_START, NULL);
3633 		goto out;
3634 	}
3635 	prog->data->scf_fmem_end = scf_fmem_end;
3636 	prog->data->scf_fmem_cancel = scf_fmem_cancel;
3637 	prog->data->scf_get_base_addr = scf_get_base_addr;
3638 	prog->data->fmem_status.op |= OPL_FMEM_SCF_START;
3639 
3640 	/* soft suspend mac patrol */
3641 	(*mc_suspend)();
3642 	prog->data->fmem_status.op |= OPL_FMEM_MC_SUSPEND;
3643 	prog->data->mc_resume = mc_resume;
3644 
3645 	prog->critical->inst_loop_ret  =
3646 	    *(uint64_t *)(prog->critical->loop_rtn);
3647 
3648 	/*
3649 	 * 0x30800000 is op code "ba,a	+0"
3650 	 */
3651 
3652 	*(uint_t *)(prog->critical->loop_rtn) = (uint_t)(0x30800000);
3653 
3654 	/*
3655 	 * set the value of SCF FMEM TIMEOUT
3656 	 */
3657 	prog->critical->delay = fmem_timeout * system_clock_freq;
3658 
3659 	prog->data->s_mem = (drmachid_t)s_mem;
3660 	prog->data->t_mem = (drmachid_t)t_mem;
3661 
3662 	cpuid = CPU->cpu_id;
3663 	prog->data->cpuid = cpuid;
3664 	prog->data->cpu_ready_set = cpu_ready_set;
3665 	prog->data->cpu_slave_set = cpu_ready_set;
3666 	prog->data->slowest_cpuid = (processorid_t)-1;
3667 	prog->data->copy_wait_time = 0;
3668 	CPUSET_DEL(prog->data->cpu_slave_set, cpuid);
3669 
3670 	for (i = 0; i < NCPU; i++) {
3671 		prog->data->cpu_ml[i] = NULL;
3672 	}
3673 
3674 	active_cpus = 0;
3675 	if (drmach_disable_mcopy) {
3676 		active_cpus = 1;
3677 		CPUSET_ADD(prog->data->cpu_copy_set, cpuid);
3678 	} else {
3679 		for (i = 0; i < NCPU; i++) {
3680 			if (CPU_IN_SET(cpu_ready_set, i) &&
3681 			    CPU_ACTIVE(cpu[i])) {
3682 				CPUSET_ADD(prog->data->cpu_copy_set, i);
3683 				active_cpus++;
3684 			}
3685 		}
3686 	}
3687 
3688 	drmach_setup_memlist(prog);
3689 
3690 	x_ml = c_ml;
3691 	sz = 0;
3692 	while (x_ml != NULL) {
3693 		sz += x_ml->size;
3694 		x_ml = x_ml->next;
3695 	}
3696 
3697 	copy_sz = sz/active_cpus;
3698 	copy_sz = roundup(copy_sz, MMU_PAGESIZE4M);
3699 
3700 	while (sz > copy_sz*active_cpus) {
3701 		copy_sz += MMU_PAGESIZE4M;
3702 	}
3703 
3704 	prog->data->stick_freq = system_clock_freq;
3705 	prog->data->copy_delay = ((copy_sz / min_copy_size_per_sec) + 2) *
3706 	    system_clock_freq;
3707 
3708 	x_ml = c_ml;
3709 	c_addr = x_ml->address;
3710 	c_size = x_ml->size;
3711 
3712 	for (i = 0; i < NCPU; i++) {
3713 		prog->stat->nbytes[i] = 0;
3714 		if (!CPU_IN_SET(prog->data->cpu_copy_set, i)) {
3715 			continue;
3716 		}
3717 		sz = copy_sz;
3718 
3719 		while (sz) {
3720 			if (c_size > sz) {
3721 				prog->data->cpu_ml[i] =
3722 				    drmach_memlist_add_span(prog,
3723 				    prog->data->cpu_ml[i], c_addr, sz);
3724 				c_addr += sz;
3725 				c_size -= sz;
3726 				break;
3727 			} else {
3728 				sz -= c_size;
3729 				prog->data->cpu_ml[i] =
3730 				    drmach_memlist_add_span(prog,
3731 				    prog->data->cpu_ml[i], c_addr, c_size);
3732 				x_ml = x_ml->next;
3733 				if (x_ml != NULL) {
3734 					c_addr = x_ml->address;
3735 					c_size = x_ml->size;
3736 				} else {
3737 					goto end;
3738 				}
3739 			}
3740 		}
3741 	}
3742 end:
3743 	prog->data->s_copybasepa = s_copybasepa;
3744 	prog->data->t_copybasepa = t_copybasepa;
3745 	prog->data->c_ml = c_ml;
3746 	*pgm_id = prog_kmem;
3747 
3748 	/* Unmap the alternate space.  It will have to be remapped again */
3749 	drmach_unlock_critical((caddr_t)prog);
3750 	return (NULL);
3751 out:
3752 	if (prog != NULL) {
3753 		drmach_unlock_critical((caddr_t)prog);
3754 		vmem_free(heap_arena, prog, DRMACH_FMEM_LOCKED_PAGES *
3755 		    PAGESIZE);
3756 	}
3757 	if (prog_kmem != NULL) {
3758 		kmem_free(prog_kmem, DRMACH_FMEM_LOCKED_PAGES * PAGESIZE);
3759 	}
3760 	return (err);
3761 }
3762 
3763 sbd_error_t *
3764 drmach_copy_rename_fini(drmachid_t id)
3765 {
3766 	drmach_copy_rename_program_t	*prog = id;
3767 	sbd_error_t			*err = NULL;
3768 	int				rv;
3769 	uint_t				fmem_error;
3770 
3771 	/*
3772 	 * Note that we have to delay calling SCF to find out the
3773 	 * status of the FMEM operation here because SCF cannot
3774 	 * respond while it is suspended.
3775 	 * This create a small window when we are sure about the
3776 	 * base address of the system board.
3777 	 * If there is any call to mc-opl to get memory unum,
3778 	 * mc-opl will return UNKNOWN as the unum.
3779 	 */
3780 
3781 	/*
3782 	 * we have to remap again because all the pointer like data,
3783 	 * critical in prog are based on the alternate vmem space.
3784 	 */
3785 	(void) drmach_lock_critical((caddr_t)prog, (caddr_t)prog->locked_prog);
3786 
3787 	if (prog->data->c_ml != NULL)
3788 		memlist_delete(prog->data->c_ml);
3789 
3790 	if ((prog->data->fmem_status.op &
3791 	    (OPL_FMEM_SCF_START | OPL_FMEM_MC_SUSPEND)) !=
3792 	    (OPL_FMEM_SCF_START | OPL_FMEM_MC_SUSPEND)) {
3793 		cmn_err(CE_PANIC, "drmach_copy_rename_fini: invalid op "
3794 		    "code %x\n", prog->data->fmem_status.op);
3795 	}
3796 
3797 	fmem_error = prog->data->fmem_status.error;
3798 	if (fmem_error != ESBD_NOERROR) {
3799 		err = drerr_new(1, fmem_error, NULL);
3800 	}
3801 
3802 	/* possible ops are SCF_START, MC_SUSPEND */
3803 	if (prog->critical->fmem_issued) {
3804 		if (fmem_error != ESBD_NOERROR) {
3805 			cmn_err(CE_PANIC, "Irrecoverable FMEM error %d\n",
3806 			    fmem_error);
3807 		}
3808 		rv = (*prog->data->scf_fmem_end)();
3809 		if (rv) {
3810 			cmn_err(CE_PANIC, "scf_fmem_end() failed rv=%d", rv);
3811 		}
3812 		/*
3813 		 * If we get here, rename is successful.
3814 		 * Do all the copy rename post processing.
3815 		 */
3816 		drmach_swap_pa((drmach_mem_t *)prog->data->s_mem,
3817 		    (drmach_mem_t *)prog->data->t_mem);
3818 	} else {
3819 		rv = (*prog->data->scf_fmem_cancel)();
3820 		if (rv) {
3821 			cmn_err(CE_WARN, "scf_fmem_cancel() failed rv=0x%x",
3822 			    rv);
3823 			if (!err) {
3824 				err = drerr_new(1, EOPL_SCF_FMEM_CANCEL,
3825 				    "scf_fmem_cancel() failed. rv = 0x%x", rv);
3826 			}
3827 		}
3828 	}
3829 	/* soft resume mac patrol */
3830 	(*prog->data->mc_resume)();
3831 
3832 	drmach_unlock_critical((caddr_t)prog->locked_prog);
3833 
3834 	vmem_free(heap_arena, prog->locked_prog,
3835 	    DRMACH_FMEM_LOCKED_PAGES * PAGESIZE);
3836 	kmem_free(prog, DRMACH_FMEM_LOCKED_PAGES * PAGESIZE);
3837 	return (err);
3838 }
3839 
3840 /*ARGSUSED*/
3841 static void
3842 drmach_copy_rename_slave(struct regs *rp, drmachid_t id)
3843 {
3844 	drmach_copy_rename_program_t	*prog =
3845 	    (drmach_copy_rename_program_t *)id;
3846 	register int			cpuid;
3847 	extern void			drmach_flush();
3848 	extern void			membar_sync_il();
3849 	extern void			drmach_flush_icache();
3850 	on_trap_data_t			otd;
3851 
3852 	cpuid = CPU->cpu_id;
3853 
3854 	if (on_trap(&otd, OT_DATA_EC)) {
3855 		no_trap();
3856 		prog->data->error[cpuid] = EOPL_FMEM_COPY_ERROR;
3857 		prog->critical->stat[cpuid] = FMEM_LOOP_EXIT;
3858 		drmach_flush_icache();
3859 		membar_sync_il();
3860 		return;
3861 	}
3862 
3863 
3864 	/*
3865 	 * jmp drmach_copy_rename_prog().
3866 	 */
3867 
3868 	drmach_flush(prog->critical, PAGESIZE);
3869 	(void) prog->critical->run(prog, cpuid);
3870 	drmach_flush_icache();
3871 
3872 	no_trap();
3873 
3874 	prog->critical->stat[cpuid] = FMEM_LOOP_EXIT;
3875 
3876 	membar_sync_il();
3877 }
3878 
3879 static void
3880 drmach_swap_pa(drmach_mem_t *s_mem, drmach_mem_t *t_mem)
3881 {
3882 	uint64_t s_base, t_base;
3883 	drmach_board_t *s_board, *t_board;
3884 	struct memlist *ml;
3885 
3886 	s_board = s_mem->dev.bp;
3887 	t_board = t_mem->dev.bp;
3888 	if (s_board == NULL || t_board == NULL) {
3889 		cmn_err(CE_PANIC, "Cannot locate source or target board\n");
3890 		return;
3891 	}
3892 	s_base = s_mem->slice_base;
3893 	t_base = t_mem->slice_base;
3894 
3895 	s_mem->slice_base = t_base;
3896 	s_mem->base_pa = (s_mem->base_pa - s_base) + t_base;
3897 
3898 	for (ml = s_mem->memlist; ml; ml = ml->next) {
3899 		ml->address = ml->address - s_base + t_base;
3900 	}
3901 
3902 	t_mem->slice_base = s_base;
3903 	t_mem->base_pa = (t_mem->base_pa - t_base) + s_base;
3904 
3905 	for (ml = t_mem->memlist; ml; ml = ml->next) {
3906 		ml->address = ml->address - t_base + s_base;
3907 	}
3908 
3909 	/*
3910 	 * IKP has to update the sb-mem-ranges for mac patrol driver
3911 	 * when it resumes, it will re-read the sb-mem-range property
3912 	 * to get the new base address
3913 	 */
3914 	if (oplcfg_pa_swap(s_board->bnum, t_board->bnum) != 0)
3915 		cmn_err(CE_PANIC, "Could not update device nodes\n");
3916 }
3917 
3918 void
3919 drmach_copy_rename(drmachid_t id)
3920 {
3921 	drmach_copy_rename_program_t	*prog_kmem = id;
3922 	drmach_copy_rename_program_t	*prog;
3923 	cpuset_t	cpuset;
3924 	int		cpuid;
3925 	uint64_t	inst;
3926 	register int	rtn;
3927 	extern int	in_sync;
3928 	int		old_in_sync;
3929 	extern void	drmach_sys_trap();
3930 	extern void	drmach_flush();
3931 	extern void	drmach_flush_icache();
3932 	extern uint64_t	patch_inst(uint64_t *, uint64_t);
3933 	on_trap_data_t	otd;
3934 
3935 
3936 	prog = prog_kmem->locked_prog;
3937 
3938 
3939 	/*
3940 	 * We must immediately drop in the TLB because all pointers
3941 	 * are based on the alternate vmem space.
3942 	 */
3943 
3944 	(void) drmach_lock_critical((caddr_t)prog_kmem, (caddr_t)prog);
3945 
3946 	/*
3947 	 * we call scf to get the base address here becuase if scf
3948 	 * has not been suspended yet, the active path can be changing and
3949 	 * sometimes it is not even mapped.  We call the interface when
3950 	 * the OS has been quiesced.
3951 	 */
3952 	prog->critical->scf_reg_base = (*prog->data->scf_get_base_addr)();
3953 
3954 	if (prog->critical->scf_reg_base == (uint64_t)-1 ||
3955 	    prog->critical->scf_reg_base == NULL) {
3956 		prog->data->fmem_status.error = EOPL_FMEM_SCF_ERR;
3957 		drmach_unlock_critical((caddr_t)prog);
3958 		return;
3959 	}
3960 
3961 	cpuset = prog->data->cpu_ready_set;
3962 
3963 	for (cpuid = 0; cpuid < NCPU; cpuid++) {
3964 		if (CPU_IN_SET(cpuset, cpuid)) {
3965 			prog->critical->stat[cpuid] = FMEM_LOOP_START;
3966 			prog->data->error[cpuid] = ESBD_NOERROR;
3967 		}
3968 	}
3969 
3970 	old_in_sync = in_sync;
3971 	in_sync = 1;
3972 	cpuid = CPU->cpu_id;
3973 
3974 	CPUSET_DEL(cpuset, cpuid);
3975 
3976 	for (cpuid = 0; cpuid < NCPU; cpuid++) {
3977 		if (CPU_IN_SET(cpuset, cpuid)) {
3978 			xc_one(cpuid, (xcfunc_t *)drmach_lock_critical,
3979 			    (uint64_t)prog_kmem, (uint64_t)prog);
3980 		}
3981 	}
3982 
3983 	cpuid = CPU->cpu_id;
3984 
3985 	xt_some(cpuset, (xcfunc_t *)drmach_sys_trap,
3986 	    (uint64_t)drmach_copy_rename_slave, (uint64_t)prog);
3987 	xt_sync(cpuset);
3988 
3989 	if (on_trap(&otd, OT_DATA_EC)) {
3990 		rtn = EOPL_FMEM_COPY_ERROR;
3991 		drmach_flush_icache();
3992 		goto done;
3993 	}
3994 
3995 	/*
3996 	 * jmp drmach_copy_rename_prog().
3997 	 */
3998 
3999 	drmach_flush(prog->critical, PAGESIZE);
4000 	rtn = prog->critical->run(prog, cpuid);
4001 
4002 	drmach_flush_icache();
4003 
4004 
4005 done:
4006 	no_trap();
4007 	if (rtn == EOPL_FMEM_HW_ERROR) {
4008 		kpreempt_enable();
4009 		prom_panic("URGENT_ERROR_TRAP is detected during FMEM.\n");
4010 	}
4011 
4012 	/*
4013 	 * In normal case, all slave CPU's are still spinning in
4014 	 * the assembly code.  The master has to patch the instruction
4015 	 * to get them out.
4016 	 * In error case, e.g. COPY_ERROR, some slave CPU's might
4017 	 * have aborted and already returned and sset LOOP_EXIT status.
4018 	 * Some CPU might still be copying.
4019 	 * In any case, some delay is necessary to give them
4020 	 * enough time to set the LOOP_EXIT status.
4021 	 */
4022 
4023 	for (;;) {
4024 		inst = patch_inst((uint64_t *)prog->critical->loop_rtn,
4025 		    prog->critical->inst_loop_ret);
4026 		if (prog->critical->inst_loop_ret == inst) {
4027 			break;
4028 		}
4029 	}
4030 
4031 	for (cpuid = 0; cpuid < NCPU; cpuid++) {
4032 		uint64_t	last, now;
4033 		if (!CPU_IN_SET(cpuset, cpuid)) {
4034 			continue;
4035 		}
4036 		last = prog->stat->nbytes[cpuid];
4037 		/*
4038 		 * Wait for all CPU to exit.
4039 		 * However we do not want an infinite loop
4040 		 * so we detect hangup situation here.
4041 		 * If the slave CPU is still copying data,
4042 		 * we will continue to wait.
4043 		 * In error cases, the master has already set
4044 		 * fmem_status.error to abort the copying.
4045 		 * 1 m.s delay for them to abort copying and
4046 		 * return to drmach_copy_rename_slave to set
4047 		 * FMEM_LOOP_EXIT status should be enough.
4048 		 */
4049 		for (;;) {
4050 			if (prog->critical->stat[cpuid] == FMEM_LOOP_EXIT)
4051 				break;
4052 			drmach_sleep_il();
4053 			drv_usecwait(1000);
4054 			now = prog->stat->nbytes[cpuid];
4055 			if (now <= last) {
4056 				drv_usecwait(1000);
4057 				if (prog->critical->stat[cpuid] ==
4058 				    FMEM_LOOP_EXIT)
4059 					break;
4060 				cmn_err(CE_PANIC, "CPU %d hang during Copy "
4061 				    "Rename", cpuid);
4062 			}
4063 			last = now;
4064 		}
4065 		if (prog->data->error[cpuid] == EOPL_FMEM_HW_ERROR) {
4066 			prom_panic("URGENT_ERROR_TRAP is detected during "
4067 			    "FMEM.\n");
4068 		}
4069 	}
4070 
4071 	/*
4072 	 * This must be done after all strands have exit.
4073 	 * Removing the TLB entry will affect both strands
4074 	 * in the same core.
4075 	 */
4076 
4077 	for (cpuid = 0; cpuid < NCPU; cpuid++) {
4078 		if (CPU_IN_SET(cpuset, cpuid)) {
4079 			xc_one(cpuid, (xcfunc_t *)drmach_unlock_critical,
4080 			    (uint64_t)prog, 0);
4081 		}
4082 	}
4083 
4084 	in_sync = old_in_sync;
4085 
4086 	/*
4087 	 * we should unlock before the following lock to keep the kpreempt
4088 	 * count correct.
4089 	 */
4090 	(void) drmach_unlock_critical((caddr_t)prog);
4091 
4092 	/*
4093 	 * we must remap again.  TLB might have been removed in above xcall.
4094 	 */
4095 
4096 	(void) drmach_lock_critical((caddr_t)prog_kmem, (caddr_t)prog);
4097 
4098 	if (prog->data->fmem_status.error == ESBD_NOERROR)
4099 		prog->data->fmem_status.error = rtn;
4100 
4101 	if (prog->data->copy_wait_time > 0) {
4102 		DRMACH_PR("Unexpected long wait time %ld seconds "
4103 		    "during copy rename on CPU %d\n",
4104 		    prog->data->copy_wait_time/prog->data->stick_freq,
4105 		    prog->data->slowest_cpuid);
4106 	}
4107 	drmach_unlock_critical((caddr_t)prog);
4108 }
4109