xref: /titanic_41/usr/src/uts/sun4u/opl/os/opl.c (revision 7bce2ddc44146624206b5daadc6ba603fe8bf58c)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 #include <sys/cpuvar.h>
29 #include <sys/systm.h>
30 #include <sys/sysmacros.h>
31 #include <sys/promif.h>
32 #include <sys/platform_module.h>
33 #include <sys/cmn_err.h>
34 #include <sys/errno.h>
35 #include <sys/machsystm.h>
36 #include <sys/bootconf.h>
37 #include <sys/nvpair.h>
38 #include <sys/kobj.h>
39 #include <sys/mem_cage.h>
40 #include <sys/opl.h>
41 #include <sys/scfd/scfostoescf.h>
42 #include <sys/cpu_sgnblk_defs.h>
43 #include <sys/utsname.h>
44 #include <sys/ddi.h>
45 #include <sys/sunndi.h>
46 #include <sys/lgrp.h>
47 #include <sys/memnode.h>
48 #include <sys/sysmacros.h>
49 #include <vm/vm_dep.h>
50 
51 int (*opl_get_mem_unum)(int, uint64_t, char *, int, int *);
52 int (*opl_get_mem_sid)(char *unum, char *buf, int buflen, int *lenp);
53 int (*opl_get_mem_offset)(uint64_t paddr, uint64_t *offp);
54 int (*opl_get_mem_addr)(char *unum, char *sid,
55     uint64_t offset, uint64_t *paddr);
56 
57 /* Memory for fcode claims.  16k times # maximum possible IO units */
58 #define	EFCODE_SIZE	(OPL_MAX_BOARDS * OPL_MAX_IO_UNITS_PER_BOARD * 0x4000)
59 int efcode_size = EFCODE_SIZE;
60 
61 #define	OPL_MC_MEMBOARD_SHIFT 38	/* Boards on 256BG boundary */
62 
63 /* Set the maximum number of boards for DR */
64 int opl_boards = OPL_MAX_BOARDS;
65 
66 void sgn_update_all_cpus(ushort_t, uchar_t, uchar_t);
67 
68 extern int tsb_lgrp_affinity;
69 
70 int opl_tsb_spares = (OPL_MAX_BOARDS) * (OPL_MAX_PCICH_UNITS_PER_BOARD) *
71 	(OPL_MAX_TSBS_PER_PCICH);
72 
73 pgcnt_t opl_startup_cage_size = 0;
74 
75 static struct memlist *opl_memlist_per_board(struct memlist *ml);
76 
77 static enum {
78 	MODEL_FF1 = 0,
79 	MODEL_FF2 = 1,
80 	MODEL_DC = 2
81 } plat_model = -1;
82 
83 int
84 set_platform_max_ncpus(void)
85 {
86 	return (OPL_MAX_CPU_PER_BOARD * OPL_MAX_BOARDS);
87 }
88 
89 int
90 set_platform_tsb_spares(void)
91 {
92 	return (MIN(opl_tsb_spares, MAX_UPA));
93 }
94 
95 #pragma weak mmu_init_large_pages
96 
97 void
98 set_platform_defaults(void)
99 {
100 	extern char *tod_module_name;
101 	extern void cpu_sgn_update(ushort_t, uchar_t, uchar_t, int);
102 	extern int ts_dispatch_extended;
103 	extern void mmu_init_large_pages(size_t);
104 
105 	/* Set the CPU signature function pointer */
106 	cpu_sgn_func = cpu_sgn_update;
107 
108 	/* Set appropriate tod module for OPL platform */
109 	ASSERT(tod_module_name == NULL);
110 	tod_module_name = "todopl";
111 
112 	/*
113 	 * Use the alternate TS dispatch table, which is better tuned
114 	 * for large servers.
115 	 */
116 	if (ts_dispatch_extended == -1)
117 		ts_dispatch_extended = 1;
118 
119 	if ((mmu_page_sizes == max_mmu_page_sizes) &&
120 	    (mmu_ism_pagesize != MMU_PAGESIZE32M)) {
121 		if (&mmu_init_large_pages)
122 			mmu_init_large_pages(mmu_ism_pagesize);
123 	}
124 
125 	tsb_lgrp_affinity = 1;
126 }
127 
128 /*
129  * Convert logical a board number to a physical one.
130  */
131 
132 #define	LSBPROP		"board#"
133 #define	PSBPROP		"physical-board#"
134 
135 int
136 opl_get_physical_board(int id)
137 {
138 	dev_info_t	*root_dip, *dip = NULL;
139 	char		*dname = NULL;
140 	int		circ;
141 
142 	pnode_t		pnode;
143 	char		pname[MAXSYSNAME] = {0};
144 
145 	int		lsb_id;	/* Logical System Board ID */
146 	int		psb_id;	/* Physical System Board ID */
147 
148 
149 	/*
150 	 * This function is called on early stage of bootup when the
151 	 * kernel device tree is not initialized yet, and also
152 	 * later on when the device tree is up. We want to try
153 	 * the fast track first.
154 	 */
155 	root_dip = ddi_root_node();
156 	if (root_dip) {
157 		/* Get from devinfo node */
158 		ndi_devi_enter(root_dip, &circ);
159 		for (dip = ddi_get_child(root_dip); dip;
160 		    dip = ddi_get_next_sibling(dip)) {
161 
162 			dname = ddi_node_name(dip);
163 			if (strncmp(dname, "pseudo-mc", 9) != 0)
164 				continue;
165 
166 			if ((lsb_id = (int)ddi_getprop(DDI_DEV_T_ANY, dip,
167 			    DDI_PROP_DONTPASS, LSBPROP, -1)) == -1)
168 				continue;
169 
170 			if (id == lsb_id) {
171 				if ((psb_id = (int)ddi_getprop(DDI_DEV_T_ANY,
172 				    dip, DDI_PROP_DONTPASS, PSBPROP, -1))
173 				    == -1) {
174 					ndi_devi_exit(root_dip, circ);
175 					return (-1);
176 				} else {
177 					ndi_devi_exit(root_dip, circ);
178 					return (psb_id);
179 				}
180 			}
181 		}
182 		ndi_devi_exit(root_dip, circ);
183 	}
184 
185 	/*
186 	 * We do not have the kernel device tree, or we did not
187 	 * find the node for some reason (let's say the kernel
188 	 * device tree was modified), let's try the OBP tree.
189 	 */
190 	pnode = prom_rootnode();
191 	for (pnode = prom_childnode(pnode); pnode;
192 	    pnode = prom_nextnode(pnode)) {
193 
194 		if ((prom_getprop(pnode, "name", (caddr_t)pname) == -1) ||
195 		    (strncmp(pname, "pseudo-mc", 9) != 0))
196 			continue;
197 
198 		if (prom_getprop(pnode, LSBPROP, (caddr_t)&lsb_id) == -1)
199 			continue;
200 
201 		if (id == lsb_id) {
202 			if (prom_getprop(pnode, PSBPROP,
203 			    (caddr_t)&psb_id) == -1) {
204 				return (-1);
205 			} else {
206 				return (psb_id);
207 			}
208 		}
209 	}
210 
211 	return (-1);
212 }
213 
214 /*
215  * For OPL it's possible that memory from two or more successive boards
216  * will be contiguous across the boards, and therefore represented as a
217  * single chunk.
218  * This function splits such chunks down the board boundaries.
219  */
220 static struct memlist *
221 opl_memlist_per_board(struct memlist *ml)
222 {
223 	uint64_t ssize, low, high, boundary;
224 	struct memlist *head, *tail, *new;
225 
226 	ssize = (1ull << OPL_MC_MEMBOARD_SHIFT);
227 
228 	head = tail = NULL;
229 
230 	for (; ml; ml = ml->next) {
231 		low  = (uint64_t)ml->address;
232 		high = low+(uint64_t)(ml->size);
233 		while (low < high) {
234 			boundary = roundup(low+1, ssize);
235 			boundary = MIN(high, boundary);
236 			new = kmem_zalloc(sizeof (struct memlist), KM_SLEEP);
237 			new->address = low;
238 			new->size = boundary - low;
239 			if (head == NULL)
240 				head = new;
241 			if (tail) {
242 				tail->next = new;
243 				new->prev = tail;
244 			}
245 			tail = new;
246 			low = boundary;
247 		}
248 	}
249 	return (head);
250 }
251 
252 void
253 set_platform_cage_params(void)
254 {
255 	extern pgcnt_t total_pages;
256 	extern struct memlist *phys_avail;
257 	struct memlist *ml, *tml;
258 	int ret;
259 
260 	if (kernel_cage_enable) {
261 		pgcnt_t preferred_cage_size;
262 
263 		preferred_cage_size =
264 			MAX(opl_startup_cage_size, total_pages / 256);
265 
266 		ml = opl_memlist_per_board(phys_avail);
267 
268 		kcage_range_lock();
269 		/*
270 		 * Note: we are assuming that post has load the
271 		 * whole show in to the high end of memory. Having
272 		 * taken this leap, we copy the whole of phys_avail
273 		 * the glist and arrange for the cage to grow
274 		 * downward (descending pfns).
275 		 */
276 		ret = kcage_range_init(ml, 1);
277 
278 		/* free the memlist */
279 		do {
280 			tml = ml->next;
281 			kmem_free(ml, sizeof (struct memlist));
282 			ml = tml;
283 		} while (ml != NULL);
284 
285 		if (ret == 0)
286 			kcage_init(preferred_cage_size);
287 		kcage_range_unlock();
288 	}
289 
290 	if (kcage_on)
291 		cmn_err(CE_NOTE, "!DR Kernel Cage is ENABLED");
292 	else
293 		cmn_err(CE_NOTE, "!DR Kernel Cage is DISABLED");
294 }
295 
296 /*ARGSUSED*/
297 int
298 plat_cpu_poweron(struct cpu *cp)
299 {
300 	int (*opl_cpu_poweron)(struct cpu *) = NULL;
301 
302 	opl_cpu_poweron =
303 	    (int (*)(struct cpu *))kobj_getsymvalue("drmach_cpu_poweron", 0);
304 
305 	if (opl_cpu_poweron == NULL)
306 		return (ENOTSUP);
307 	else
308 		return ((opl_cpu_poweron)(cp));
309 
310 }
311 
312 /*ARGSUSED*/
313 int
314 plat_cpu_poweroff(struct cpu *cp)
315 {
316 	int (*opl_cpu_poweroff)(struct cpu *) = NULL;
317 
318 	opl_cpu_poweroff =
319 	    (int (*)(struct cpu *))kobj_getsymvalue("drmach_cpu_poweroff", 0);
320 
321 	if (opl_cpu_poweroff == NULL)
322 		return (ENOTSUP);
323 	else
324 		return ((opl_cpu_poweroff)(cp));
325 
326 }
327 
328 int
329 plat_max_boards(void)
330 {
331 	return (OPL_MAX_BOARDS);
332 }
333 
334 int
335 plat_max_cpu_units_per_board(void)
336 {
337 	return (OPL_MAX_CPU_PER_BOARD);
338 }
339 
340 int
341 plat_max_mem_units_per_board(void)
342 {
343 	return (OPL_MAX_MEM_UNITS_PER_BOARD);
344 }
345 
346 int
347 plat_max_io_units_per_board(void)
348 {
349 	return (OPL_MAX_IO_UNITS_PER_BOARD);
350 }
351 
352 int
353 plat_max_cmp_units_per_board(void)
354 {
355 	return (OPL_MAX_CMP_UNITS_PER_BOARD);
356 }
357 
358 int
359 plat_max_core_units_per_board(void)
360 {
361 	return (OPL_MAX_CORE_UNITS_PER_BOARD);
362 }
363 
364 int
365 plat_pfn_to_mem_node(pfn_t pfn)
366 {
367 	return (pfn >> mem_node_pfn_shift);
368 }
369 
370 /* ARGSUSED */
371 void
372 plat_build_mem_nodes(u_longlong_t *list, size_t nelems)
373 {
374 	size_t	elem;
375 	pfn_t	basepfn;
376 	pgcnt_t	npgs;
377 	uint64_t	boundary, ssize;
378 	uint64_t	low, high;
379 
380 	/*
381 	 * OPL mem slices are always aligned on a 256GB boundary.
382 	 */
383 	mem_node_pfn_shift = OPL_MC_MEMBOARD_SHIFT - MMU_PAGESHIFT;
384 	mem_node_physalign = 0;
385 
386 	/*
387 	 * Boot install lists are arranged <addr, len>, <addr, len>, ...
388 	 */
389 	ssize = (1ull << OPL_MC_MEMBOARD_SHIFT);
390 	for (elem = 0; elem < nelems; elem += 2) {
391 		low  = (uint64_t)list[elem];
392 		high = low+(uint64_t)(list[elem+1]);
393 		while (low < high) {
394 			boundary = roundup(low+1, ssize);
395 			boundary = MIN(high, boundary);
396 			basepfn = btop(low);
397 			npgs = btop(boundary - low);
398 			mem_node_add_slice(basepfn, basepfn + npgs - 1);
399 			low = boundary;
400 		}
401 	}
402 }
403 
404 /*
405  * Find the CPU associated with a slice at boot-time.
406  */
407 void
408 plat_fill_mc(pnode_t nodeid)
409 {
410 	int board;
411 	int memnode;
412 	struct {
413 		uint64_t	addr;
414 		uint64_t	size;
415 	} mem_range;
416 
417 	if (prom_getprop(nodeid, "board#", (caddr_t)&board) < 0) {
418 		panic("Can not find board# property in mc node %x", nodeid);
419 	}
420 	if (prom_getprop(nodeid, "sb-mem-ranges", (caddr_t)&mem_range) < 0) {
421 		panic("Can not find sb-mem-ranges property in mc node %x",
422 			nodeid);
423 	}
424 	memnode = mem_range.addr >> OPL_MC_MEMBOARD_SHIFT;
425 	plat_assign_lgrphand_to_mem_node(board, memnode);
426 }
427 
428 /*
429  * Return the platform handle for the lgroup containing the given CPU
430  *
431  * For OPL, lgroup platform handle == board #.
432  */
433 
434 extern int mpo_disabled;
435 extern lgrp_handle_t lgrp_default_handle;
436 
437 lgrp_handle_t
438 plat_lgrp_cpu_to_hand(processorid_t id)
439 {
440 	lgrp_handle_t plathand;
441 
442 	/*
443 	 * Return the real platform handle for the CPU until
444 	 * such time as we know that MPO should be disabled.
445 	 * At that point, we set the "mpo_disabled" flag to true,
446 	 * and from that point on, return the default handle.
447 	 *
448 	 * By the time we know that MPO should be disabled, the
449 	 * first CPU will have already been added to a leaf
450 	 * lgroup, but that's ok. The common lgroup code will
451 	 * double check that the boot CPU is in the correct place,
452 	 * and in the case where mpo should be disabled, will move
453 	 * it to the root if necessary.
454 	 */
455 	if (mpo_disabled) {
456 		/* If MPO is disabled, return the default (UMA) handle */
457 		plathand = lgrp_default_handle;
458 	} else
459 		plathand = (lgrp_handle_t)LSB_ID(id);
460 	return (plathand);
461 }
462 
463 /*
464  * Platform specific lgroup initialization
465  */
466 void
467 plat_lgrp_init(void)
468 {
469 	extern uint32_t lgrp_expand_proc_thresh;
470 	extern uint32_t lgrp_expand_proc_diff;
471 
472 	/*
473 	 * Set tuneables for the OPL architecture
474 	 *
475 	 * lgrp_expand_proc_thresh is the minimum load on the lgroups
476 	 * this process is currently running on before considering
477 	 * expanding threads to another lgroup.
478 	 *
479 	 * lgrp_expand_proc_diff determines how much less the remote lgroup
480 	 * must be loaded before expanding to it.
481 	 *
482 	 * Since remote latencies can be costly, attempt to keep 3 threads
483 	 * within the same lgroup before expanding to the next lgroup.
484 	 */
485 	lgrp_expand_proc_thresh = LGRP_LOADAVG_THREAD_MAX * 3;
486 	lgrp_expand_proc_diff = LGRP_LOADAVG_THREAD_MAX;
487 }
488 
489 /*
490  * Platform notification of lgroup (re)configuration changes
491  */
492 /*ARGSUSED*/
493 void
494 plat_lgrp_config(lgrp_config_flag_t evt, uintptr_t arg)
495 {
496 	update_membounds_t *umb;
497 	lgrp_config_mem_rename_t lmr;
498 	int sbd, tbd;
499 	lgrp_handle_t hand, shand, thand;
500 	int mnode, snode, tnode;
501 	pfn_t start, end;
502 
503 	if (mpo_disabled)
504 		return;
505 
506 	switch (evt) {
507 
508 	case LGRP_CONFIG_MEM_ADD:
509 		/*
510 		 * Establish the lgroup handle to memnode translation.
511 		 */
512 		umb = (update_membounds_t *)arg;
513 
514 		hand = umb->u_board;
515 		mnode = plat_pfn_to_mem_node(umb->u_base >> MMU_PAGESHIFT);
516 		plat_assign_lgrphand_to_mem_node(hand, mnode);
517 
518 		break;
519 
520 	case LGRP_CONFIG_MEM_DEL:
521 		/*
522 		 * Special handling for possible memory holes.
523 		 */
524 		umb = (update_membounds_t *)arg;
525 		hand = umb->u_board;
526 		if ((mnode = plat_lgrphand_to_mem_node(hand)) != -1) {
527 			if (mem_node_config[mnode].exists) {
528 				start = mem_node_config[mnode].physbase;
529 				end = mem_node_config[mnode].physmax;
530 				mem_node_pre_del_slice(start, end);
531 				mem_node_post_del_slice(start, end, 0);
532 			}
533 		}
534 
535 		break;
536 
537 	case LGRP_CONFIG_MEM_RENAME:
538 		/*
539 		 * During a DR copy-rename operation, all of the memory
540 		 * on one board is moved to another board -- but the
541 		 * addresses/pfns and memnodes don't change. This means
542 		 * the memory has changed locations without changing identity.
543 		 *
544 		 * Source is where we are copying from and target is where we
545 		 * are copying to.  After source memnode is copied to target
546 		 * memnode, the physical addresses of the target memnode are
547 		 * renamed to match what the source memnode had.  Then target
548 		 * memnode can be removed and source memnode can take its
549 		 * place.
550 		 *
551 		 * To do this, swap the lgroup handle to memnode mappings for
552 		 * the boards, so target lgroup will have source memnode and
553 		 * source lgroup will have empty target memnode which is where
554 		 * its memory will go (if any is added to it later).
555 		 *
556 		 * Then source memnode needs to be removed from its lgroup
557 		 * and added to the target lgroup where the memory was living
558 		 * but under a different name/memnode.  The memory was in the
559 		 * target memnode and now lives in the source memnode with
560 		 * different physical addresses even though it is the same
561 		 * memory.
562 		 */
563 		sbd = arg & 0xffff;
564 		tbd = (arg & 0xffff0000) >> 16;
565 		shand = sbd;
566 		thand = tbd;
567 		snode = plat_lgrphand_to_mem_node(shand);
568 		tnode = plat_lgrphand_to_mem_node(thand);
569 
570 		/*
571 		 * Special handling for possible memory holes.
572 		 */
573 		if (tnode != -1 && mem_node_config[tnode].exists) {
574 			start = mem_node_config[mnode].physbase;
575 			end = mem_node_config[mnode].physmax;
576 			mem_node_pre_del_slice(start, end);
577 			mem_node_post_del_slice(start, end, 0);
578 		}
579 
580 		plat_assign_lgrphand_to_mem_node(thand, snode);
581 		plat_assign_lgrphand_to_mem_node(shand, tnode);
582 
583 		lmr.lmem_rename_from = shand;
584 		lmr.lmem_rename_to = thand;
585 
586 		/*
587 		 * Remove source memnode of copy rename from its lgroup
588 		 * and add it to its new target lgroup
589 		 */
590 		lgrp_config(LGRP_CONFIG_MEM_RENAME, (uintptr_t)snode,
591 		    (uintptr_t)&lmr);
592 
593 		break;
594 
595 	default:
596 		break;
597 	}
598 }
599 
600 /*
601  * Return latency between "from" and "to" lgroups
602  *
603  * This latency number can only be used for relative comparison
604  * between lgroups on the running system, cannot be used across platforms,
605  * and may not reflect the actual latency.  It is platform and implementation
606  * specific, so platform gets to decide its value.  It would be nice if the
607  * number was at least proportional to make comparisons more meaningful though.
608  * NOTE: The numbers below are supposed to be load latencies for uncached
609  * memory divided by 10.
610  *
611  * XXX latency values for Columbus, not Columbus2. Should be fixed later when
612  *	we know the actual numbers for Columbus2.
613  */
614 int
615 plat_lgrp_latency(lgrp_handle_t from, lgrp_handle_t to)
616 {
617 	/*
618 	 * Return min remote latency when there are more than two lgroups
619 	 * (root and child) and getting latency between two different lgroups
620 	 * or root is involved
621 	 */
622 	if (lgrp_optimizations() && (from != to ||
623 	    from == LGRP_DEFAULT_HANDLE || to == LGRP_DEFAULT_HANDLE))
624 		return (27);
625 	else
626 		return (25);
627 }
628 
629 /*
630  * Return platform handle for root lgroup
631  */
632 lgrp_handle_t
633 plat_lgrp_root_hand(void)
634 {
635 	if (mpo_disabled)
636 		return (lgrp_default_handle);
637 
638 	return (LGRP_DEFAULT_HANDLE);
639 }
640 
641 /*ARGSUSED*/
642 void
643 plat_freelist_process(int mnode)
644 {
645 }
646 
647 void
648 load_platform_drivers(void)
649 {
650 	(void) i_ddi_attach_pseudo_node("dr");
651 }
652 
653 /*
654  * No platform drivers on this platform
655  */
656 char *platform_module_list[] = {
657 	(char *)0
658 };
659 
660 /*ARGSUSED*/
661 void
662 plat_tod_fault(enum tod_fault_type tod_bad)
663 {
664 }
665 
666 /*ARGSUSED*/
667 void
668 cpu_sgn_update(ushort_t sgn, uchar_t state, uchar_t sub_state, int cpuid)
669 {
670 	static void (*scf_panic_callback)(int);
671 	static void (*scf_shutdown_callback)(int);
672 
673 	/*
674 	 * This is for notifing system panic/shutdown to SCF.
675 	 * In case of shutdown and panic, SCF call back
676 	 * function should be called.
677 	 *  <SCF call back functions>
678 	 *   scf_panic_callb()   : panicsys()->panic_quiesce_hw()
679 	 *   scf_shutdown_callb(): halt() or power_down() or reboot_machine()
680 	 * cpuid should be -1 and state should be SIGST_EXIT.
681 	 */
682 	if (state == SIGST_EXIT && cpuid == -1) {
683 
684 		/*
685 		 * find the symbol for the SCF panic callback routine in driver
686 		 */
687 		if (scf_panic_callback == NULL)
688 			scf_panic_callback = (void (*)(int))
689 				modgetsymvalue("scf_panic_callb", 0);
690 		if (scf_shutdown_callback == NULL)
691 			scf_shutdown_callback = (void (*)(int))
692 				modgetsymvalue("scf_shutdown_callb", 0);
693 
694 		switch (sub_state) {
695 		case SIGSUBST_PANIC:
696 			if (scf_panic_callback == NULL) {
697 				cmn_err(CE_NOTE, "!cpu_sgn_update: "
698 				    "scf_panic_callb not found\n");
699 				return;
700 			}
701 			scf_panic_callback(SIGSUBST_PANIC);
702 			break;
703 
704 		case SIGSUBST_HALT:
705 			if (scf_shutdown_callback == NULL) {
706 				cmn_err(CE_NOTE, "!cpu_sgn_update: "
707 				    "scf_shutdown_callb not found\n");
708 				return;
709 			}
710 			scf_shutdown_callback(SIGSUBST_HALT);
711 			break;
712 
713 		case SIGSUBST_ENVIRON:
714 			if (scf_shutdown_callback == NULL) {
715 				cmn_err(CE_NOTE, "!cpu_sgn_update: "
716 				    "scf_shutdown_callb not found\n");
717 				return;
718 			}
719 			scf_shutdown_callback(SIGSUBST_ENVIRON);
720 			break;
721 
722 		case SIGSUBST_REBOOT:
723 			if (scf_shutdown_callback == NULL) {
724 				cmn_err(CE_NOTE, "!cpu_sgn_update: "
725 				    "scf_shutdown_callb not found\n");
726 				return;
727 			}
728 			scf_shutdown_callback(SIGSUBST_REBOOT);
729 			break;
730 		}
731 	}
732 }
733 
734 /*ARGSUSED*/
735 int
736 plat_get_mem_unum(int synd_code, uint64_t flt_addr, int flt_bus_id,
737 	int flt_in_memory, ushort_t flt_status,
738 	char *buf, int buflen, int *lenp)
739 {
740 	/*
741 	 * check if it's a Memory error.
742 	 */
743 	if (flt_in_memory) {
744 		if (opl_get_mem_unum != NULL) {
745 			return (opl_get_mem_unum(synd_code, flt_addr,
746 				buf, buflen, lenp));
747 		} else {
748 			return (ENOTSUP);
749 		}
750 	} else {
751 		return (ENOTSUP);
752 	}
753 }
754 
755 /*ARGSUSED*/
756 int
757 plat_get_cpu_unum(int cpuid, char *buf, int buflen, int *lenp)
758 {
759 	int	plen;
760 	int	ret = 0;
761 	char	model[20];
762 	uint_t	sb;
763 	pnode_t	node;
764 
765 	/* determine the platform model once */
766 	if (plat_model == -1) {
767 		plat_model = MODEL_DC; /* Default model */
768 		node = prom_rootnode();
769 		plen = prom_getproplen(node, "model");
770 		if (plen > 0 && plen < sizeof (model)) {
771 			(void) prom_getprop(node, "model", model);
772 			model[plen] = '\0';
773 			if (strcmp(model, "FF1") == 0)
774 				plat_model = MODEL_FF1;
775 			else if (strcmp(model, "FF2") == 0)
776 				plat_model = MODEL_FF2;
777 			else if (strncmp(model, "DC", 2) == 0)
778 				plat_model = MODEL_DC;
779 		}
780 	}
781 
782 	sb = opl_get_physical_board(LSB_ID(cpuid));
783 	if (sb == -1) {
784 		return (ENXIO);
785 	}
786 
787 	switch (plat_model) {
788 	case MODEL_FF1:
789 		plen = snprintf(buf, buflen, "/%s/CPUM%d", "MBU_A",
790 		    CHIP_ID(cpuid) / 2);
791 		break;
792 
793 	case MODEL_FF2:
794 		plen = snprintf(buf, buflen, "/%s/CPUM%d", "MBU_B",
795 		    CHIP_ID(cpuid) / 2);
796 		break;
797 
798 	case MODEL_DC:
799 		plen = snprintf(buf, buflen, "/%s%02d/CPUM%d", "CMU", sb,
800 		    CHIP_ID(cpuid));
801 		break;
802 
803 	default:
804 		/* This should never happen */
805 		return (ENODEV);
806 	}
807 
808 	if (plen >= buflen) {
809 		ret = ENOSPC;
810 	} else {
811 		if (lenp)
812 			*lenp = strlen(buf);
813 	}
814 	return (ret);
815 }
816 
817 #define	SCF_PUTINFO(f, s, p)	\
818 	f(KEY_ESCF, 0x01, 0, s, p)
819 void
820 plat_nodename_set(void)
821 {
822 	void *datap;
823 	static int (*scf_service_function)(uint32_t, uint8_t,
824 	    uint32_t, uint32_t, void *);
825 	int counter = 5;
826 
827 	/*
828 	 * find the symbol for the SCF put routine in driver
829 	 */
830 	if (scf_service_function == NULL)
831 		scf_service_function =
832 			(int (*)(uint32_t, uint8_t, uint32_t, uint32_t, void *))
833 			modgetsymvalue("scf_service_putinfo", 0);
834 
835 	/*
836 	 * If the symbol was found, call it.  Otherwise, log a note (but not to
837 	 * the console).
838 	 */
839 
840 	if (scf_service_function == NULL) {
841 		cmn_err(CE_NOTE,
842 		    "!plat_nodename_set: scf_service_putinfo not found\n");
843 		return;
844 	}
845 
846 	datap =
847 	    (struct utsname *)kmem_zalloc(sizeof (struct utsname), KM_SLEEP);
848 
849 	if (datap == NULL) {
850 		return;
851 	}
852 
853 	bcopy((struct utsname *)&utsname,
854 	    (struct utsname *)datap, sizeof (struct utsname));
855 
856 	while ((SCF_PUTINFO(scf_service_function,
857 		sizeof (struct utsname), datap) == EBUSY) && (counter-- > 0)) {
858 		delay(10 * drv_usectohz(1000000));
859 	}
860 	if (counter == 0)
861 		cmn_err(CE_NOTE,
862 			"!plat_nodename_set: "
863 			"scf_service_putinfo not responding\n");
864 
865 	kmem_free(datap, sizeof (struct utsname));
866 }
867 
868 caddr_t	efcode_vaddr = NULL;
869 
870 /*
871  * Preallocate enough memory for fcode claims.
872  */
873 
874 caddr_t
875 efcode_alloc(caddr_t alloc_base)
876 {
877 	caddr_t efcode_alloc_base = (caddr_t)roundup((uintptr_t)alloc_base,
878 	    MMU_PAGESIZE);
879 	caddr_t vaddr;
880 
881 	/*
882 	 * allocate the physical memory for the Oberon fcode.
883 	 */
884 	if ((vaddr = (caddr_t)BOP_ALLOC(bootops, efcode_alloc_base,
885 	    efcode_size, MMU_PAGESIZE)) == NULL)
886 		cmn_err(CE_PANIC, "Cannot allocate Efcode Memory");
887 
888 	efcode_vaddr = vaddr;
889 
890 	return (efcode_alloc_base + efcode_size);
891 }
892 
893 caddr_t
894 plat_startup_memlist(caddr_t alloc_base)
895 {
896 	caddr_t tmp_alloc_base;
897 
898 	tmp_alloc_base = efcode_alloc(alloc_base);
899 	tmp_alloc_base =
900 	    (caddr_t)roundup((uintptr_t)tmp_alloc_base, ecache_alignsize);
901 	return (tmp_alloc_base);
902 }
903 
904 void
905 startup_platform(void)
906 {
907 }
908 
909 int
910 plat_get_mem_sid(char *unum, char *buf, int buflen, int *lenp)
911 {
912 	if (opl_get_mem_sid == NULL) {
913 		return (ENOTSUP);
914 	}
915 	return (opl_get_mem_sid(unum, buf, buflen, lenp));
916 }
917 
918 int
919 plat_get_mem_offset(uint64_t paddr, uint64_t *offp)
920 {
921 	if (opl_get_mem_offset == NULL) {
922 		return (ENOTSUP);
923 	}
924 	return (opl_get_mem_offset(paddr, offp));
925 }
926 
927 int
928 plat_get_mem_addr(char *unum, char *sid, uint64_t offset, uint64_t *addrp)
929 {
930 	if (opl_get_mem_addr == NULL) {
931 		return (ENOTSUP);
932 	}
933 	return (opl_get_mem_addr(unum, sid, offset, addrp));
934 }
935