xref: /titanic_50/usr/src/uts/sun4u/opl/os/opl.c (revision 60c807700988885656502665e0cf8afd4b4346f7)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 #include <sys/cpuvar.h>
29 #include <sys/systm.h>
30 #include <sys/sysmacros.h>
31 #include <sys/promif.h>
32 #include <sys/platform_module.h>
33 #include <sys/cmn_err.h>
34 #include <sys/errno.h>
35 #include <sys/machsystm.h>
36 #include <sys/bootconf.h>
37 #include <sys/nvpair.h>
38 #include <sys/kobj.h>
39 #include <sys/mem_cage.h>
40 #include <sys/opl.h>
41 #include <sys/scfd/scfostoescf.h>
42 #include <sys/cpu_sgnblk_defs.h>
43 #include <sys/utsname.h>
44 #include <sys/ddi.h>
45 #include <sys/sunndi.h>
46 #include <sys/lgrp.h>
47 #include <sys/memnode.h>
48 #include <sys/sysmacros.h>
49 #include <vm/vm_dep.h>
50 
51 int (*opl_get_mem_unum)(int, uint64_t, char *, int, int *);
52 
53 /* Memory for fcode claims.  16k times # maximum possible IO units */
54 #define	EFCODE_SIZE	(OPL_MAX_BOARDS * OPL_MAX_IO_UNITS_PER_BOARD * 0x4000)
55 int efcode_size = EFCODE_SIZE;
56 
57 #define	OPL_MC_MEMBOARD_SHIFT 38	/* Boards on 256BG boundary */
58 
59 /* Set the maximum number of boards for DR */
60 int opl_boards = OPL_MAX_BOARDS;
61 
62 void sgn_update_all_cpus(ushort_t, uchar_t, uchar_t);
63 
64 extern int tsb_lgrp_affinity;
65 
66 int opl_tsb_spares = (OPL_MAX_BOARDS) * (OPL_MAX_PCICH_UNITS_PER_BOARD) *
67 	(OPL_MAX_TSBS_PER_PCICH);
68 
69 pgcnt_t opl_startup_cage_size = 0;
70 
71 static struct memlist *opl_memlist_per_board(struct memlist *ml);
72 
73 int
74 set_platform_max_ncpus(void)
75 {
76 	return (OPL_MAX_CPU_PER_BOARD * OPL_MAX_BOARDS);
77 }
78 
79 int
80 set_platform_tsb_spares(void)
81 {
82 	return (MIN(opl_tsb_spares, MAX_UPA));
83 }
84 
85 #pragma weak mmu_init_large_pages
86 
87 void
88 set_platform_defaults(void)
89 {
90 	extern char *tod_module_name;
91 	extern void cpu_sgn_update(ushort_t, uchar_t, uchar_t, int);
92 	extern int ts_dispatch_extended;
93 	extern void mmu_init_large_pages(size_t);
94 
95 	/* Set the CPU signature function pointer */
96 	cpu_sgn_func = cpu_sgn_update;
97 
98 	/* Set appropriate tod module for OPL platform */
99 	ASSERT(tod_module_name == NULL);
100 	tod_module_name = "todopl";
101 
102 	/*
103 	 * Use the alternate TS dispatch table, which is better tuned
104 	 * for large servers.
105 	 */
106 	if (ts_dispatch_extended == -1)
107 		ts_dispatch_extended = 1;
108 
109 	if ((mmu_page_sizes == max_mmu_page_sizes) &&
110 	    (mmu_ism_pagesize != MMU_PAGESIZE32M)) {
111 		if (&mmu_init_large_pages)
112 			mmu_init_large_pages(mmu_ism_pagesize);
113 	}
114 
115 	tsb_lgrp_affinity = 1;
116 }
117 
118 /*
119  * Convert logical a board number to a physical one.
120  */
121 
122 #define	LSBPROP		"board#"
123 #define	PSBPROP		"physical-board#"
124 
125 int
126 opl_get_physical_board(int id)
127 {
128 	dev_info_t	*root_dip, *dip = NULL;
129 	char		*dname = NULL;
130 	int		circ;
131 
132 	pnode_t		pnode;
133 	char		pname[MAXSYSNAME] = {0};
134 
135 	int		lsb_id;	/* Logical System Board ID */
136 	int		psb_id;	/* Physical System Board ID */
137 
138 
139 	/*
140 	 * This function is called on early stage of bootup when the
141 	 * kernel device tree is not initialized yet, and also
142 	 * later on when the device tree is up. We want to try
143 	 * the fast track first.
144 	 */
145 	root_dip = ddi_root_node();
146 	if (root_dip) {
147 		/* Get from devinfo node */
148 		ndi_devi_enter(root_dip, &circ);
149 		for (dip = ddi_get_child(root_dip); dip;
150 		    dip = ddi_get_next_sibling(dip)) {
151 
152 			dname = ddi_node_name(dip);
153 			if (strncmp(dname, "pseudo-mc", 9) != 0)
154 				continue;
155 
156 			if ((lsb_id = (int)ddi_getprop(DDI_DEV_T_ANY, dip,
157 			    DDI_PROP_DONTPASS, LSBPROP, -1)) == -1)
158 				continue;
159 
160 			if (id == lsb_id) {
161 				if ((psb_id = (int)ddi_getprop(DDI_DEV_T_ANY,
162 				    dip, DDI_PROP_DONTPASS, PSBPROP, -1))
163 				    == -1) {
164 					ndi_devi_exit(root_dip, circ);
165 					return (-1);
166 				} else {
167 					ndi_devi_exit(root_dip, circ);
168 					return (psb_id);
169 				}
170 			}
171 		}
172 		ndi_devi_exit(root_dip, circ);
173 	}
174 
175 	/*
176 	 * We do not have the kernel device tree, or we did not
177 	 * find the node for some reason (let's say the kernel
178 	 * device tree was modified), let's try the OBP tree.
179 	 */
180 	pnode = prom_rootnode();
181 	for (pnode = prom_childnode(pnode); pnode;
182 	    pnode = prom_nextnode(pnode)) {
183 
184 		if ((prom_getprop(pnode, "name", (caddr_t)pname) == -1) ||
185 		    (strncmp(pname, "pseudo-mc", 9) != 0))
186 			continue;
187 
188 		if (prom_getprop(pnode, LSBPROP, (caddr_t)&lsb_id) == -1)
189 			continue;
190 
191 		if (id == lsb_id) {
192 			if (prom_getprop(pnode, PSBPROP,
193 			    (caddr_t)&psb_id) == -1) {
194 				return (-1);
195 			} else {
196 				return (psb_id);
197 			}
198 		}
199 	}
200 
201 	return (-1);
202 }
203 
204 /*
205  * For OPL it's possible that memory from two or more successive boards
206  * will be contiguous across the boards, and therefore represented as a
207  * single chunk.
208  * This function splits such chunks down the board boundaries.
209  */
210 static struct memlist *
211 opl_memlist_per_board(struct memlist *ml)
212 {
213 	uint64_t ssize, low, high, boundary;
214 	struct memlist *head, *tail, *new;
215 
216 	ssize = (1ull << OPL_MC_MEMBOARD_SHIFT);
217 
218 	head = tail = NULL;
219 
220 	for (; ml; ml = ml->next) {
221 		low  = (uint64_t)ml->address;
222 		high = low+(uint64_t)(ml->size);
223 		while (low < high) {
224 			boundary = roundup(low+1, ssize);
225 			boundary = MIN(high, boundary);
226 			new = kmem_zalloc(sizeof (struct memlist), KM_SLEEP);
227 			new->address = low;
228 			new->size = boundary - low;
229 			if (head == NULL)
230 				head = new;
231 			if (tail) {
232 				tail->next = new;
233 				new->prev = tail;
234 			}
235 			tail = new;
236 			low = boundary;
237 		}
238 	}
239 	return (head);
240 }
241 
242 void
243 set_platform_cage_params(void)
244 {
245 	extern pgcnt_t total_pages;
246 	extern struct memlist *phys_avail;
247 	struct memlist *ml, *tml;
248 	int ret;
249 
250 	if (kernel_cage_enable) {
251 		pgcnt_t preferred_cage_size;
252 
253 		preferred_cage_size =
254 			MAX(opl_startup_cage_size, total_pages / 256);
255 
256 		ml = opl_memlist_per_board(phys_avail);
257 
258 		kcage_range_lock();
259 		/*
260 		 * Note: we are assuming that post has load the
261 		 * whole show in to the high end of memory. Having
262 		 * taken this leap, we copy the whole of phys_avail
263 		 * the glist and arrange for the cage to grow
264 		 * downward (descending pfns).
265 		 */
266 		ret = kcage_range_init(ml, 1);
267 
268 		/* free the memlist */
269 		do {
270 			tml = ml->next;
271 			kmem_free(ml, sizeof (struct memlist));
272 			ml = tml;
273 		} while (ml != NULL);
274 
275 		if (ret == 0)
276 			kcage_init(preferred_cage_size);
277 		kcage_range_unlock();
278 	}
279 
280 	if (kcage_on)
281 		cmn_err(CE_NOTE, "!DR Kernel Cage is ENABLED");
282 	else
283 		cmn_err(CE_NOTE, "!DR Kernel Cage is DISABLED");
284 }
285 
286 /*ARGSUSED*/
287 int
288 plat_cpu_poweron(struct cpu *cp)
289 {
290 	int (*opl_cpu_poweron)(struct cpu *) = NULL;
291 
292 	opl_cpu_poweron =
293 	    (int (*)(struct cpu *))kobj_getsymvalue("drmach_cpu_poweron", 0);
294 
295 	if (opl_cpu_poweron == NULL)
296 		return (ENOTSUP);
297 	else
298 		return ((opl_cpu_poweron)(cp));
299 
300 }
301 
302 /*ARGSUSED*/
303 int
304 plat_cpu_poweroff(struct cpu *cp)
305 {
306 	int (*opl_cpu_poweroff)(struct cpu *) = NULL;
307 
308 	opl_cpu_poweroff =
309 	    (int (*)(struct cpu *))kobj_getsymvalue("drmach_cpu_poweroff", 0);
310 
311 	if (opl_cpu_poweroff == NULL)
312 		return (ENOTSUP);
313 	else
314 		return ((opl_cpu_poweroff)(cp));
315 
316 }
317 
318 int
319 plat_max_boards(void)
320 {
321 	return (OPL_MAX_BOARDS);
322 }
323 
324 int
325 plat_max_cpu_units_per_board(void)
326 {
327 	return (OPL_MAX_CPU_PER_BOARD);
328 }
329 
330 int
331 plat_max_mem_units_per_board(void)
332 {
333 	return (OPL_MAX_MEM_UNITS_PER_BOARD);
334 }
335 
336 int
337 plat_max_io_units_per_board(void)
338 {
339 	return (OPL_MAX_IO_UNITS_PER_BOARD);
340 }
341 
342 int
343 plat_max_cmp_units_per_board(void)
344 {
345 	return (OPL_MAX_CMP_UNITS_PER_BOARD);
346 }
347 
348 int
349 plat_max_core_units_per_board(void)
350 {
351 	return (OPL_MAX_CORE_UNITS_PER_BOARD);
352 }
353 
354 int
355 plat_pfn_to_mem_node(pfn_t pfn)
356 {
357 	return (pfn >> mem_node_pfn_shift);
358 }
359 
360 /* ARGSUSED */
361 void
362 plat_build_mem_nodes(u_longlong_t *list, size_t nelems)
363 {
364 	size_t	elem;
365 	pfn_t	basepfn;
366 	pgcnt_t	npgs;
367 	uint64_t	boundary, ssize;
368 	uint64_t	low, high;
369 
370 	/*
371 	 * OPL mem slices are always aligned on a 256GB boundary.
372 	 */
373 	mem_node_pfn_shift = OPL_MC_MEMBOARD_SHIFT - MMU_PAGESHIFT;
374 	mem_node_physalign = 0;
375 
376 	/*
377 	 * Boot install lists are arranged <addr, len>, <addr, len>, ...
378 	 */
379 	ssize = (1ull << OPL_MC_MEMBOARD_SHIFT);
380 	for (elem = 0; elem < nelems; elem += 2) {
381 		low  = (uint64_t)list[elem];
382 		high = low+(uint64_t)(list[elem+1]);
383 		while (low < high) {
384 			boundary = roundup(low+1, ssize);
385 			boundary = MIN(high, boundary);
386 			basepfn = btop(low);
387 			npgs = btop(boundary - low);
388 			mem_node_add_slice(basepfn, basepfn + npgs - 1);
389 			low = boundary;
390 		}
391 	}
392 }
393 
394 /*
395  * Find the CPU associated with a slice at boot-time.
396  */
397 void
398 plat_fill_mc(pnode_t nodeid)
399 {
400 	int board;
401 	int memnode;
402 	struct {
403 		uint64_t	addr;
404 		uint64_t	size;
405 	} mem_range;
406 
407 	if (prom_getprop(nodeid, "board#", (caddr_t)&board) < 0) {
408 		panic("Can not find board# property in mc node %x", nodeid);
409 	}
410 	if (prom_getprop(nodeid, "sb-mem-ranges", (caddr_t)&mem_range) < 0) {
411 		panic("Can not find sb-mem-ranges property in mc node %x",
412 			nodeid);
413 	}
414 	memnode = mem_range.addr >> OPL_MC_MEMBOARD_SHIFT;
415 	plat_assign_lgrphand_to_mem_node(board, memnode);
416 }
417 
418 /*
419  * Return the platform handle for the lgroup containing the given CPU
420  *
421  * For OPL, lgroup platform handle == board #.
422  */
423 
424 extern int mpo_disabled;
425 extern lgrp_handle_t lgrp_default_handle;
426 
427 lgrp_handle_t
428 plat_lgrp_cpu_to_hand(processorid_t id)
429 {
430 	lgrp_handle_t plathand;
431 
432 	/*
433 	 * Return the real platform handle for the CPU until
434 	 * such time as we know that MPO should be disabled.
435 	 * At that point, we set the "mpo_disabled" flag to true,
436 	 * and from that point on, return the default handle.
437 	 *
438 	 * By the time we know that MPO should be disabled, the
439 	 * first CPU will have already been added to a leaf
440 	 * lgroup, but that's ok. The common lgroup code will
441 	 * double check that the boot CPU is in the correct place,
442 	 * and in the case where mpo should be disabled, will move
443 	 * it to the root if necessary.
444 	 */
445 	if (mpo_disabled) {
446 		/* If MPO is disabled, return the default (UMA) handle */
447 		plathand = lgrp_default_handle;
448 	} else
449 		plathand = (lgrp_handle_t)LSB_ID(id);
450 	return (plathand);
451 }
452 
453 /*
454  * Platform specific lgroup initialization
455  */
456 void
457 plat_lgrp_init(void)
458 {
459 	extern uint32_t lgrp_expand_proc_thresh;
460 	extern uint32_t lgrp_expand_proc_diff;
461 
462 	/*
463 	 * Set tuneables for the OPL architecture
464 	 *
465 	 * lgrp_expand_proc_thresh is the minimum load on the lgroups
466 	 * this process is currently running on before considering
467 	 * expanding threads to another lgroup.
468 	 *
469 	 * lgrp_expand_proc_diff determines how much less the remote lgroup
470 	 * must be loaded before expanding to it.
471 	 *
472 	 * Since remote latencies can be costly, attempt to keep 3 threads
473 	 * within the same lgroup before expanding to the next lgroup.
474 	 */
475 	lgrp_expand_proc_thresh = LGRP_LOADAVG_THREAD_MAX * 3;
476 	lgrp_expand_proc_diff = LGRP_LOADAVG_THREAD_MAX;
477 }
478 
479 /*
480  * Platform notification of lgroup (re)configuration changes
481  */
482 /*ARGSUSED*/
483 void
484 plat_lgrp_config(lgrp_config_flag_t evt, uintptr_t arg)
485 {
486 	update_membounds_t *umb;
487 	lgrp_config_mem_rename_t lmr;
488 	int sbd, tbd;
489 	lgrp_handle_t hand, shand, thand;
490 	int mnode, snode, tnode;
491 	pfn_t start, end;
492 
493 	if (mpo_disabled)
494 		return;
495 
496 	switch (evt) {
497 
498 	case LGRP_CONFIG_MEM_ADD:
499 		/*
500 		 * Establish the lgroup handle to memnode translation.
501 		 */
502 		umb = (update_membounds_t *)arg;
503 
504 		hand = umb->u_board;
505 		mnode = plat_pfn_to_mem_node(umb->u_base >> MMU_PAGESHIFT);
506 		plat_assign_lgrphand_to_mem_node(hand, mnode);
507 
508 		break;
509 
510 	case LGRP_CONFIG_MEM_DEL:
511 		/*
512 		 * Special handling for possible memory holes.
513 		 */
514 		umb = (update_membounds_t *)arg;
515 		hand = umb->u_board;
516 		if ((mnode = plat_lgrphand_to_mem_node(hand)) != -1) {
517 			if (mem_node_config[mnode].exists) {
518 				start = mem_node_config[mnode].physbase;
519 				end = mem_node_config[mnode].physmax;
520 				mem_node_pre_del_slice(start, end);
521 				mem_node_post_del_slice(start, end, 0);
522 			}
523 		}
524 
525 		break;
526 
527 	case LGRP_CONFIG_MEM_RENAME:
528 		/*
529 		 * During a DR copy-rename operation, all of the memory
530 		 * on one board is moved to another board -- but the
531 		 * addresses/pfns and memnodes don't change. This means
532 		 * the memory has changed locations without changing identity.
533 		 *
534 		 * Source is where we are copying from and target is where we
535 		 * are copying to.  After source memnode is copied to target
536 		 * memnode, the physical addresses of the target memnode are
537 		 * renamed to match what the source memnode had.  Then target
538 		 * memnode can be removed and source memnode can take its
539 		 * place.
540 		 *
541 		 * To do this, swap the lgroup handle to memnode mappings for
542 		 * the boards, so target lgroup will have source memnode and
543 		 * source lgroup will have empty target memnode which is where
544 		 * its memory will go (if any is added to it later).
545 		 *
546 		 * Then source memnode needs to be removed from its lgroup
547 		 * and added to the target lgroup where the memory was living
548 		 * but under a different name/memnode.  The memory was in the
549 		 * target memnode and now lives in the source memnode with
550 		 * different physical addresses even though it is the same
551 		 * memory.
552 		 */
553 		sbd = arg & 0xffff;
554 		tbd = (arg & 0xffff0000) >> 16;
555 		shand = sbd;
556 		thand = tbd;
557 		snode = plat_lgrphand_to_mem_node(shand);
558 		tnode = plat_lgrphand_to_mem_node(thand);
559 
560 		/*
561 		 * Special handling for possible memory holes.
562 		 */
563 		if (tnode != -1 && mem_node_config[tnode].exists) {
564 			start = mem_node_config[mnode].physbase;
565 			end = mem_node_config[mnode].physmax;
566 			mem_node_pre_del_slice(start, end);
567 			mem_node_post_del_slice(start, end, 0);
568 		}
569 
570 		plat_assign_lgrphand_to_mem_node(thand, snode);
571 		plat_assign_lgrphand_to_mem_node(shand, tnode);
572 
573 		lmr.lmem_rename_from = shand;
574 		lmr.lmem_rename_to = thand;
575 
576 		/*
577 		 * Remove source memnode of copy rename from its lgroup
578 		 * and add it to its new target lgroup
579 		 */
580 		lgrp_config(LGRP_CONFIG_MEM_RENAME, (uintptr_t)snode,
581 		    (uintptr_t)&lmr);
582 
583 		break;
584 
585 	default:
586 		break;
587 	}
588 }
589 
590 /*
591  * Return latency between "from" and "to" lgroups
592  *
593  * This latency number can only be used for relative comparison
594  * between lgroups on the running system, cannot be used across platforms,
595  * and may not reflect the actual latency.  It is platform and implementation
596  * specific, so platform gets to decide its value.  It would be nice if the
597  * number was at least proportional to make comparisons more meaningful though.
598  * NOTE: The numbers below are supposed to be load latencies for uncached
599  * memory divided by 10.
600  *
601  * XXX latency values for Columbus, not Columbus2. Should be fixed later when
602  *	we know the actual numbers for Columbus2.
603  */
604 int
605 plat_lgrp_latency(lgrp_handle_t from, lgrp_handle_t to)
606 {
607 	/*
608 	 * Return min remote latency when there are more than two lgroups
609 	 * (root and child) and getting latency between two different lgroups
610 	 * or root is involved
611 	 */
612 	if (lgrp_optimizations() && (from != to ||
613 	    from == LGRP_DEFAULT_HANDLE || to == LGRP_DEFAULT_HANDLE))
614 		return (27);
615 	else
616 		return (25);
617 }
618 
619 /*
620  * Return platform handle for root lgroup
621  */
622 lgrp_handle_t
623 plat_lgrp_root_hand(void)
624 {
625 	if (mpo_disabled)
626 		return (lgrp_default_handle);
627 
628 	return (LGRP_DEFAULT_HANDLE);
629 }
630 
631 /*ARGSUSED*/
632 void
633 plat_freelist_process(int mnode)
634 {
635 }
636 
637 void
638 load_platform_drivers(void)
639 {
640 	(void) i_ddi_attach_pseudo_node("dr");
641 }
642 
643 /*
644  * No platform drivers on this platform
645  */
646 char *platform_module_list[] = {
647 	(char *)0
648 };
649 
650 /*ARGSUSED*/
651 void
652 plat_tod_fault(enum tod_fault_type tod_bad)
653 {
654 }
655 
656 /*ARGSUSED*/
657 void
658 cpu_sgn_update(ushort_t sgn, uchar_t state, uchar_t sub_state, int cpuid)
659 {
660 	static void (*scf_panic_callback)(int);
661 	static void (*scf_shutdown_callback)(int);
662 
663 	/*
664 	 * This is for notifing system panic/shutdown to SCF.
665 	 * In case of shutdown and panic, SCF call back
666 	 * function should be called.
667 	 *  <SCF call back functions>
668 	 *   scf_panic_callb()   : panicsys()->panic_quiesce_hw()
669 	 *   scf_shutdown_callb(): halt() or power_down() or reboot_machine()
670 	 * cpuid should be -1 and state should be SIGST_EXIT.
671 	 */
672 	if (state == SIGST_EXIT && cpuid == -1) {
673 
674 		/*
675 		 * find the symbol for the SCF panic callback routine in driver
676 		 */
677 		if (scf_panic_callback == NULL)
678 			scf_panic_callback = (void (*)(int))
679 				modgetsymvalue("scf_panic_callb", 0);
680 		if (scf_shutdown_callback == NULL)
681 			scf_shutdown_callback = (void (*)(int))
682 				modgetsymvalue("scf_shutdown_callb", 0);
683 
684 		switch (sub_state) {
685 		case SIGSUBST_PANIC:
686 			if (scf_panic_callback == NULL) {
687 				cmn_err(CE_NOTE, "!cpu_sgn_update: "
688 				    "scf_panic_callb not found\n");
689 				return;
690 			}
691 			scf_panic_callback(SIGSUBST_PANIC);
692 			break;
693 
694 		case SIGSUBST_HALT:
695 			if (scf_shutdown_callback == NULL) {
696 				cmn_err(CE_NOTE, "!cpu_sgn_update: "
697 				    "scf_shutdown_callb not found\n");
698 				return;
699 			}
700 			scf_shutdown_callback(SIGSUBST_HALT);
701 			break;
702 
703 		case SIGSUBST_ENVIRON:
704 			if (scf_shutdown_callback == NULL) {
705 				cmn_err(CE_NOTE, "!cpu_sgn_update: "
706 				    "scf_shutdown_callb not found\n");
707 				return;
708 			}
709 			scf_shutdown_callback(SIGSUBST_ENVIRON);
710 			break;
711 
712 		case SIGSUBST_REBOOT:
713 			if (scf_shutdown_callback == NULL) {
714 				cmn_err(CE_NOTE, "!cpu_sgn_update: "
715 				    "scf_shutdown_callb not found\n");
716 				return;
717 			}
718 			scf_shutdown_callback(SIGSUBST_REBOOT);
719 			break;
720 		}
721 	}
722 }
723 
724 /*ARGSUSED*/
725 int
726 plat_get_mem_unum(int synd_code, uint64_t flt_addr, int flt_bus_id,
727 	int flt_in_memory, ushort_t flt_status,
728 	char *buf, int buflen, int *lenp)
729 {
730 	/*
731 	 * check if it's a Memory error.
732 	 */
733 	if (flt_in_memory) {
734 		if (opl_get_mem_unum != NULL) {
735 			return (opl_get_mem_unum(synd_code, flt_addr,
736 				buf, buflen, lenp));
737 		} else {
738 			return (ENOTSUP);
739 		}
740 	} else {
741 		return (ENOTSUP);
742 	}
743 }
744 
745 /*ARGSUSED*/
746 int
747 plat_get_cpu_unum(int cpuid, char *buf, int buflen, int *lenp)
748 {
749 	uint_t sb;
750 
751 	sb = opl_get_physical_board(LSB_ID(cpuid));
752 	if (sb == -1) {
753 		return (ENXIO);
754 	}
755 
756 	if (snprintf(buf, buflen, "CMU%d", sb) >= buflen) {
757 		return (ENOSPC);
758 	} else {
759 		if (lenp)
760 			*lenp = strlen(buf);
761 		return (0);
762 	}
763 }
764 
765 #define	SCF_PUTINFO(f, s, p)	\
766 	f(KEY_ESCF, 0x01, 0, s, p)
767 void
768 plat_nodename_set(void)
769 {
770 	void *datap;
771 	static int (*scf_service_function)(uint32_t, uint8_t,
772 	    uint32_t, uint32_t, void *);
773 	int counter = 5;
774 
775 	/*
776 	 * find the symbol for the SCF put routine in driver
777 	 */
778 	if (scf_service_function == NULL)
779 		scf_service_function =
780 			(int (*)(uint32_t, uint8_t, uint32_t, uint32_t, void *))
781 			modgetsymvalue("scf_service_putinfo", 0);
782 
783 	/*
784 	 * If the symbol was found, call it.  Otherwise, log a note (but not to
785 	 * the console).
786 	 */
787 
788 	if (scf_service_function == NULL) {
789 		cmn_err(CE_NOTE,
790 		    "!plat_nodename_set: scf_service_putinfo not found\n");
791 		return;
792 	}
793 
794 	datap =
795 	    (struct utsname *)kmem_zalloc(sizeof (struct utsname), KM_SLEEP);
796 
797 	if (datap == NULL) {
798 		return;
799 	}
800 
801 	bcopy((struct utsname *)&utsname,
802 	    (struct utsname *)datap, sizeof (struct utsname));
803 
804 	while ((SCF_PUTINFO(scf_service_function,
805 		sizeof (struct utsname), datap) == EBUSY) && (counter-- > 0)) {
806 		delay(10 * drv_usectohz(1000000));
807 	}
808 	if (counter == 0)
809 		cmn_err(CE_NOTE,
810 			"!plat_nodename_set: "
811 			"scf_service_putinfo not responding\n");
812 
813 	kmem_free(datap, sizeof (struct utsname));
814 }
815 
816 caddr_t	efcode_vaddr = NULL;
817 
818 /*
819  * Preallocate enough memory for fcode claims.
820  */
821 
822 caddr_t
823 efcode_alloc(caddr_t alloc_base)
824 {
825 	caddr_t efcode_alloc_base = (caddr_t)roundup((uintptr_t)alloc_base,
826 	    MMU_PAGESIZE);
827 	caddr_t vaddr;
828 
829 	/*
830 	 * allocate the physical memory for the Oberon fcode.
831 	 */
832 	if ((vaddr = (caddr_t)BOP_ALLOC(bootops, efcode_alloc_base,
833 	    efcode_size, MMU_PAGESIZE)) == NULL)
834 		cmn_err(CE_PANIC, "Cannot allocate Efcode Memory");
835 
836 	efcode_vaddr = vaddr;
837 
838 	return (efcode_alloc_base + efcode_size);
839 }
840 
841 caddr_t
842 plat_startup_memlist(caddr_t alloc_base)
843 {
844 	caddr_t tmp_alloc_base;
845 
846 	tmp_alloc_base = efcode_alloc(alloc_base);
847 	tmp_alloc_base =
848 	    (caddr_t)roundup((uintptr_t)tmp_alloc_base, ecache_alignsize);
849 	return (tmp_alloc_base);
850 }
851 
852 void
853 startup_platform(void)
854 {
855 }
856