xref: /titanic_44/usr/src/uts/sun4u/opl/os/opl.c (revision 40e5e17b3361b3eea56a9723071c406894a20b78)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 #include <sys/cpuvar.h>
29 #include <sys/systm.h>
30 #include <sys/sysmacros.h>
31 #include <sys/promif.h>
32 #include <sys/platform_module.h>
33 #include <sys/cmn_err.h>
34 #include <sys/errno.h>
35 #include <sys/machsystm.h>
36 #include <sys/bootconf.h>
37 #include <sys/nvpair.h>
38 #include <sys/kobj.h>
39 #include <sys/mem_cage.h>
40 #include <sys/opl.h>
41 #include <sys/scfd/scfostoescf.h>
42 #include <sys/cpu_sgnblk_defs.h>
43 #include <sys/utsname.h>
44 #include <sys/ddi.h>
45 #include <sys/sunndi.h>
46 #include <sys/lgrp.h>
47 #include <sys/memnode.h>
48 #include <sys/sysmacros.h>
49 #include <vm/vm_dep.h>
50 
51 int (*opl_get_mem_unum)(int, uint64_t, char *, int, int *);
52 int (*opl_get_mem_sid)(char *unum, char *buf, int buflen, int *lenp);
53 int (*opl_get_mem_offset)(uint64_t paddr, uint64_t *offp);
54 int (*opl_get_mem_addr)(char *unum, char *sid,
55     uint64_t offset, uint64_t *paddr);
56 
57 /* Memory for fcode claims.  16k times # maximum possible IO units */
58 #define	EFCODE_SIZE	(OPL_MAX_BOARDS * OPL_MAX_IO_UNITS_PER_BOARD * 0x4000)
59 int efcode_size = EFCODE_SIZE;
60 
61 #define	OPL_MC_MEMBOARD_SHIFT 38	/* Boards on 256BG boundary */
62 
63 /* Set the maximum number of boards for DR */
64 int opl_boards = OPL_MAX_BOARDS;
65 
66 void sgn_update_all_cpus(ushort_t, uchar_t, uchar_t);
67 
68 extern int tsb_lgrp_affinity;
69 
70 int opl_tsb_spares = (OPL_MAX_BOARDS) * (OPL_MAX_PCICH_UNITS_PER_BOARD) *
71 	(OPL_MAX_TSBS_PER_PCICH);
72 
73 pgcnt_t opl_startup_cage_size = 0;
74 
75 static opl_model_info_t opl_models[] = {
76 	{ "FF1", OPL_MAX_BOARDS_FF1, FF1, STD_DISPATCH_TABLE },
77 	{ "FF2", OPL_MAX_BOARDS_FF2, FF2, STD_DISPATCH_TABLE },
78 	{ "DC1", OPL_MAX_BOARDS_DC1, DC1, STD_DISPATCH_TABLE },
79 	{ "DC2", OPL_MAX_BOARDS_DC2, DC2, EXT_DISPATCH_TABLE },
80 	{ "DC3", OPL_MAX_BOARDS_DC3, DC3, EXT_DISPATCH_TABLE },
81 };
82 static	int	opl_num_models = sizeof (opl_models)/sizeof (opl_model_info_t);
83 
84 /*
85  * opl_cur_model
86  */
87 static	opl_model_info_t *opl_cur_model = NULL;
88 
89 static struct memlist *opl_memlist_per_board(struct memlist *ml);
90 
91 int
92 set_platform_max_ncpus(void)
93 {
94 	return (OPL_MAX_CPU_PER_BOARD * OPL_MAX_BOARDS);
95 }
96 
97 int
98 set_platform_tsb_spares(void)
99 {
100 	return (MIN(opl_tsb_spares, MAX_UPA));
101 }
102 
103 static void
104 set_model_info()
105 {
106 	extern int ts_dispatch_extended;
107 	char	name[MAXSYSNAME];
108 	int	i;
109 
110 	/*
111 	 * Get model name from the root node.
112 	 *
113 	 * We are using the prom device tree since, at this point,
114 	 * the Solaris device tree is not yet setup.
115 	 */
116 	(void) prom_getprop(prom_rootnode(), "model", (caddr_t)name);
117 
118 	for (i = 0; i < opl_num_models; i++) {
119 		if (strncmp(name, opl_models[i].model_name, MAXSYSNAME) == 0) {
120 			opl_cur_model = &opl_models[i];
121 			break;
122 		}
123 	}
124 
125 	if (i == opl_num_models)
126 		halt("No valid OPL model is found!");
127 
128 	if ((opl_cur_model->model_cmds & EXT_DISPATCH_TABLE) &&
129 				(ts_dispatch_extended == -1)) {
130 		/*
131 		 * Based on a platform model, select a dispatch table.
132 		 * Only DC2 and DC3 systems uses the alternate/extended
133 		 * TS dispatch table.
134 		 * FF1, FF2 and DC1 systems used standard dispatch tables.
135 		 */
136 		ts_dispatch_extended = 1;
137 	}
138 
139 }
140 
141 static void
142 set_max_mmu_ctxdoms()
143 {
144 	extern uint_t	max_mmu_ctxdoms;
145 	int		max_boards;
146 
147 	/*
148 	 * From the model, get the maximum number of boards
149 	 * supported and set the value accordingly. If the model
150 	 * could not be determined or recognized, we assume the max value.
151 	 */
152 	if (opl_cur_model == NULL)
153 		max_boards = OPL_MAX_BOARDS;
154 	else
155 		max_boards = opl_cur_model->model_max_boards;
156 
157 	/*
158 	 * On OPL, cores and MMUs are one-to-one.
159 	 */
160 	max_mmu_ctxdoms = OPL_MAX_CORE_UNITS_PER_BOARD * max_boards;
161 }
162 
163 #pragma weak mmu_init_large_pages
164 
165 void
166 set_platform_defaults(void)
167 {
168 	extern char *tod_module_name;
169 	extern void cpu_sgn_update(ushort_t, uchar_t, uchar_t, int);
170 	extern void mmu_init_large_pages(size_t);
171 
172 	/* Set the CPU signature function pointer */
173 	cpu_sgn_func = cpu_sgn_update;
174 
175 	/* Set appropriate tod module for OPL platform */
176 	ASSERT(tod_module_name == NULL);
177 	tod_module_name = "todopl";
178 
179 	if ((mmu_page_sizes == max_mmu_page_sizes) &&
180 	    (mmu_ism_pagesize != DEFAULT_ISM_PAGESIZE)) {
181 		if (&mmu_init_large_pages)
182 			mmu_init_large_pages(mmu_ism_pagesize);
183 	}
184 
185 	tsb_lgrp_affinity = 1;
186 
187 	set_max_mmu_ctxdoms();
188 }
189 
190 /*
191  * Convert logical a board number to a physical one.
192  */
193 
194 #define	LSBPROP		"board#"
195 #define	PSBPROP		"physical-board#"
196 
197 int
198 opl_get_physical_board(int id)
199 {
200 	dev_info_t	*root_dip, *dip = NULL;
201 	char		*dname = NULL;
202 	int		circ;
203 
204 	pnode_t		pnode;
205 	char		pname[MAXSYSNAME] = {0};
206 
207 	int		lsb_id;	/* Logical System Board ID */
208 	int		psb_id;	/* Physical System Board ID */
209 
210 
211 	/*
212 	 * This function is called on early stage of bootup when the
213 	 * kernel device tree is not initialized yet, and also
214 	 * later on when the device tree is up. We want to try
215 	 * the fast track first.
216 	 */
217 	root_dip = ddi_root_node();
218 	if (root_dip) {
219 		/* Get from devinfo node */
220 		ndi_devi_enter(root_dip, &circ);
221 		for (dip = ddi_get_child(root_dip); dip;
222 		    dip = ddi_get_next_sibling(dip)) {
223 
224 			dname = ddi_node_name(dip);
225 			if (strncmp(dname, "pseudo-mc", 9) != 0)
226 				continue;
227 
228 			if ((lsb_id = (int)ddi_getprop(DDI_DEV_T_ANY, dip,
229 			    DDI_PROP_DONTPASS, LSBPROP, -1)) == -1)
230 				continue;
231 
232 			if (id == lsb_id) {
233 				if ((psb_id = (int)ddi_getprop(DDI_DEV_T_ANY,
234 				    dip, DDI_PROP_DONTPASS, PSBPROP, -1))
235 				    == -1) {
236 					ndi_devi_exit(root_dip, circ);
237 					return (-1);
238 				} else {
239 					ndi_devi_exit(root_dip, circ);
240 					return (psb_id);
241 				}
242 			}
243 		}
244 		ndi_devi_exit(root_dip, circ);
245 	}
246 
247 	/*
248 	 * We do not have the kernel device tree, or we did not
249 	 * find the node for some reason (let's say the kernel
250 	 * device tree was modified), let's try the OBP tree.
251 	 */
252 	pnode = prom_rootnode();
253 	for (pnode = prom_childnode(pnode); pnode;
254 	    pnode = prom_nextnode(pnode)) {
255 
256 		if ((prom_getprop(pnode, "name", (caddr_t)pname) == -1) ||
257 		    (strncmp(pname, "pseudo-mc", 9) != 0))
258 			continue;
259 
260 		if (prom_getprop(pnode, LSBPROP, (caddr_t)&lsb_id) == -1)
261 			continue;
262 
263 		if (id == lsb_id) {
264 			if (prom_getprop(pnode, PSBPROP,
265 			    (caddr_t)&psb_id) == -1) {
266 				return (-1);
267 			} else {
268 				return (psb_id);
269 			}
270 		}
271 	}
272 
273 	return (-1);
274 }
275 
276 /*
277  * For OPL it's possible that memory from two or more successive boards
278  * will be contiguous across the boards, and therefore represented as a
279  * single chunk.
280  * This function splits such chunks down the board boundaries.
281  */
282 static struct memlist *
283 opl_memlist_per_board(struct memlist *ml)
284 {
285 	uint64_t ssize, low, high, boundary;
286 	struct memlist *head, *tail, *new;
287 
288 	ssize = (1ull << OPL_MC_MEMBOARD_SHIFT);
289 
290 	head = tail = NULL;
291 
292 	for (; ml; ml = ml->next) {
293 		low  = (uint64_t)ml->address;
294 		high = low+(uint64_t)(ml->size);
295 		while (low < high) {
296 			boundary = roundup(low+1, ssize);
297 			boundary = MIN(high, boundary);
298 			new = kmem_zalloc(sizeof (struct memlist), KM_SLEEP);
299 			new->address = low;
300 			new->size = boundary - low;
301 			if (head == NULL)
302 				head = new;
303 			if (tail) {
304 				tail->next = new;
305 				new->prev = tail;
306 			}
307 			tail = new;
308 			low = boundary;
309 		}
310 	}
311 	return (head);
312 }
313 
314 void
315 set_platform_cage_params(void)
316 {
317 	extern pgcnt_t total_pages;
318 	extern struct memlist *phys_avail;
319 	struct memlist *ml, *tml;
320 	int ret;
321 
322 	if (kernel_cage_enable) {
323 		pgcnt_t preferred_cage_size;
324 
325 		preferred_cage_size =
326 			MAX(opl_startup_cage_size, total_pages / 256);
327 
328 		ml = opl_memlist_per_board(phys_avail);
329 
330 		kcage_range_lock();
331 		/*
332 		 * Note: we are assuming that post has load the
333 		 * whole show in to the high end of memory. Having
334 		 * taken this leap, we copy the whole of phys_avail
335 		 * the glist and arrange for the cage to grow
336 		 * downward (descending pfns).
337 		 */
338 		ret = kcage_range_init(ml, 1);
339 
340 		/* free the memlist */
341 		do {
342 			tml = ml->next;
343 			kmem_free(ml, sizeof (struct memlist));
344 			ml = tml;
345 		} while (ml != NULL);
346 
347 		if (ret == 0)
348 			kcage_init(preferred_cage_size);
349 		kcage_range_unlock();
350 	}
351 
352 	if (kcage_on)
353 		cmn_err(CE_NOTE, "!DR Kernel Cage is ENABLED");
354 	else
355 		cmn_err(CE_NOTE, "!DR Kernel Cage is DISABLED");
356 }
357 
358 /*ARGSUSED*/
359 int
360 plat_cpu_poweron(struct cpu *cp)
361 {
362 	int (*opl_cpu_poweron)(struct cpu *) = NULL;
363 
364 	opl_cpu_poweron =
365 	    (int (*)(struct cpu *))kobj_getsymvalue("drmach_cpu_poweron", 0);
366 
367 	if (opl_cpu_poweron == NULL)
368 		return (ENOTSUP);
369 	else
370 		return ((opl_cpu_poweron)(cp));
371 
372 }
373 
374 /*ARGSUSED*/
375 int
376 plat_cpu_poweroff(struct cpu *cp)
377 {
378 	int (*opl_cpu_poweroff)(struct cpu *) = NULL;
379 
380 	opl_cpu_poweroff =
381 	    (int (*)(struct cpu *))kobj_getsymvalue("drmach_cpu_poweroff", 0);
382 
383 	if (opl_cpu_poweroff == NULL)
384 		return (ENOTSUP);
385 	else
386 		return ((opl_cpu_poweroff)(cp));
387 
388 }
389 
390 int
391 plat_max_boards(void)
392 {
393 	return (OPL_MAX_BOARDS);
394 }
395 
396 int
397 plat_max_cpu_units_per_board(void)
398 {
399 	return (OPL_MAX_CPU_PER_BOARD);
400 }
401 
402 int
403 plat_max_mem_units_per_board(void)
404 {
405 	return (OPL_MAX_MEM_UNITS_PER_BOARD);
406 }
407 
408 int
409 plat_max_io_units_per_board(void)
410 {
411 	return (OPL_MAX_IO_UNITS_PER_BOARD);
412 }
413 
414 int
415 plat_max_cmp_units_per_board(void)
416 {
417 	return (OPL_MAX_CMP_UNITS_PER_BOARD);
418 }
419 
420 int
421 plat_max_core_units_per_board(void)
422 {
423 	return (OPL_MAX_CORE_UNITS_PER_BOARD);
424 }
425 
426 int
427 plat_pfn_to_mem_node(pfn_t pfn)
428 {
429 	return (pfn >> mem_node_pfn_shift);
430 }
431 
432 /* ARGSUSED */
433 void
434 plat_build_mem_nodes(u_longlong_t *list, size_t nelems)
435 {
436 	size_t	elem;
437 	pfn_t	basepfn;
438 	pgcnt_t	npgs;
439 	uint64_t	boundary, ssize;
440 	uint64_t	low, high;
441 
442 	/*
443 	 * OPL mem slices are always aligned on a 256GB boundary.
444 	 */
445 	mem_node_pfn_shift = OPL_MC_MEMBOARD_SHIFT - MMU_PAGESHIFT;
446 	mem_node_physalign = 0;
447 
448 	/*
449 	 * Boot install lists are arranged <addr, len>, <addr, len>, ...
450 	 */
451 	ssize = (1ull << OPL_MC_MEMBOARD_SHIFT);
452 	for (elem = 0; elem < nelems; elem += 2) {
453 		low  = (uint64_t)list[elem];
454 		high = low+(uint64_t)(list[elem+1]);
455 		while (low < high) {
456 			boundary = roundup(low+1, ssize);
457 			boundary = MIN(high, boundary);
458 			basepfn = btop(low);
459 			npgs = btop(boundary - low);
460 			mem_node_add_slice(basepfn, basepfn + npgs - 1);
461 			low = boundary;
462 		}
463 	}
464 }
465 
466 /*
467  * Find the CPU associated with a slice at boot-time.
468  */
469 void
470 plat_fill_mc(pnode_t nodeid)
471 {
472 	int board;
473 	int memnode;
474 	struct {
475 		uint64_t	addr;
476 		uint64_t	size;
477 	} mem_range;
478 
479 	if (prom_getprop(nodeid, "board#", (caddr_t)&board) < 0) {
480 		panic("Can not find board# property in mc node %x", nodeid);
481 	}
482 	if (prom_getprop(nodeid, "sb-mem-ranges", (caddr_t)&mem_range) < 0) {
483 		panic("Can not find sb-mem-ranges property in mc node %x",
484 			nodeid);
485 	}
486 	memnode = mem_range.addr >> OPL_MC_MEMBOARD_SHIFT;
487 	plat_assign_lgrphand_to_mem_node(board, memnode);
488 }
489 
490 /*
491  * Return the platform handle for the lgroup containing the given CPU
492  *
493  * For OPL, lgroup platform handle == board #.
494  */
495 
496 extern int mpo_disabled;
497 extern lgrp_handle_t lgrp_default_handle;
498 
499 lgrp_handle_t
500 plat_lgrp_cpu_to_hand(processorid_t id)
501 {
502 	lgrp_handle_t plathand;
503 
504 	/*
505 	 * Return the real platform handle for the CPU until
506 	 * such time as we know that MPO should be disabled.
507 	 * At that point, we set the "mpo_disabled" flag to true,
508 	 * and from that point on, return the default handle.
509 	 *
510 	 * By the time we know that MPO should be disabled, the
511 	 * first CPU will have already been added to a leaf
512 	 * lgroup, but that's ok. The common lgroup code will
513 	 * double check that the boot CPU is in the correct place,
514 	 * and in the case where mpo should be disabled, will move
515 	 * it to the root if necessary.
516 	 */
517 	if (mpo_disabled) {
518 		/* If MPO is disabled, return the default (UMA) handle */
519 		plathand = lgrp_default_handle;
520 	} else
521 		plathand = (lgrp_handle_t)LSB_ID(id);
522 	return (plathand);
523 }
524 
525 /*
526  * Platform specific lgroup initialization
527  */
528 void
529 plat_lgrp_init(void)
530 {
531 	extern uint32_t lgrp_expand_proc_thresh;
532 	extern uint32_t lgrp_expand_proc_diff;
533 
534 	/*
535 	 * Set tuneables for the OPL architecture
536 	 *
537 	 * lgrp_expand_proc_thresh is the minimum load on the lgroups
538 	 * this process is currently running on before considering
539 	 * expanding threads to another lgroup.
540 	 *
541 	 * lgrp_expand_proc_diff determines how much less the remote lgroup
542 	 * must be loaded before expanding to it.
543 	 *
544 	 * Since remote latencies can be costly, attempt to keep 3 threads
545 	 * within the same lgroup before expanding to the next lgroup.
546 	 */
547 	lgrp_expand_proc_thresh = LGRP_LOADAVG_THREAD_MAX * 3;
548 	lgrp_expand_proc_diff = LGRP_LOADAVG_THREAD_MAX;
549 }
550 
551 /*
552  * Platform notification of lgroup (re)configuration changes
553  */
554 /*ARGSUSED*/
555 void
556 plat_lgrp_config(lgrp_config_flag_t evt, uintptr_t arg)
557 {
558 	update_membounds_t *umb;
559 	lgrp_config_mem_rename_t lmr;
560 	int sbd, tbd;
561 	lgrp_handle_t hand, shand, thand;
562 	int mnode, snode, tnode;
563 	pfn_t start, end;
564 
565 	if (mpo_disabled)
566 		return;
567 
568 	switch (evt) {
569 
570 	case LGRP_CONFIG_MEM_ADD:
571 		/*
572 		 * Establish the lgroup handle to memnode translation.
573 		 */
574 		umb = (update_membounds_t *)arg;
575 
576 		hand = umb->u_board;
577 		mnode = plat_pfn_to_mem_node(umb->u_base >> MMU_PAGESHIFT);
578 		plat_assign_lgrphand_to_mem_node(hand, mnode);
579 
580 		break;
581 
582 	case LGRP_CONFIG_MEM_DEL:
583 		/*
584 		 * Special handling for possible memory holes.
585 		 */
586 		umb = (update_membounds_t *)arg;
587 		hand = umb->u_board;
588 		if ((mnode = plat_lgrphand_to_mem_node(hand)) != -1) {
589 			if (mem_node_config[mnode].exists) {
590 				start = mem_node_config[mnode].physbase;
591 				end = mem_node_config[mnode].physmax;
592 				mem_node_pre_del_slice(start, end);
593 				mem_node_post_del_slice(start, end, 0);
594 			}
595 		}
596 
597 		break;
598 
599 	case LGRP_CONFIG_MEM_RENAME:
600 		/*
601 		 * During a DR copy-rename operation, all of the memory
602 		 * on one board is moved to another board -- but the
603 		 * addresses/pfns and memnodes don't change. This means
604 		 * the memory has changed locations without changing identity.
605 		 *
606 		 * Source is where we are copying from and target is where we
607 		 * are copying to.  After source memnode is copied to target
608 		 * memnode, the physical addresses of the target memnode are
609 		 * renamed to match what the source memnode had.  Then target
610 		 * memnode can be removed and source memnode can take its
611 		 * place.
612 		 *
613 		 * To do this, swap the lgroup handle to memnode mappings for
614 		 * the boards, so target lgroup will have source memnode and
615 		 * source lgroup will have empty target memnode which is where
616 		 * its memory will go (if any is added to it later).
617 		 *
618 		 * Then source memnode needs to be removed from its lgroup
619 		 * and added to the target lgroup where the memory was living
620 		 * but under a different name/memnode.  The memory was in the
621 		 * target memnode and now lives in the source memnode with
622 		 * different physical addresses even though it is the same
623 		 * memory.
624 		 */
625 		sbd = arg & 0xffff;
626 		tbd = (arg & 0xffff0000) >> 16;
627 		shand = sbd;
628 		thand = tbd;
629 		snode = plat_lgrphand_to_mem_node(shand);
630 		tnode = plat_lgrphand_to_mem_node(thand);
631 
632 		/*
633 		 * Special handling for possible memory holes.
634 		 */
635 		if (tnode != -1 && mem_node_config[tnode].exists) {
636 			start = mem_node_config[tnode].physbase;
637 			end = mem_node_config[tnode].physmax;
638 			mem_node_pre_del_slice(start, end);
639 			mem_node_post_del_slice(start, end, 0);
640 		}
641 
642 		plat_assign_lgrphand_to_mem_node(thand, snode);
643 		plat_assign_lgrphand_to_mem_node(shand, tnode);
644 
645 		lmr.lmem_rename_from = shand;
646 		lmr.lmem_rename_to = thand;
647 
648 		/*
649 		 * Remove source memnode of copy rename from its lgroup
650 		 * and add it to its new target lgroup
651 		 */
652 		lgrp_config(LGRP_CONFIG_MEM_RENAME, (uintptr_t)snode,
653 		    (uintptr_t)&lmr);
654 
655 		break;
656 
657 	default:
658 		break;
659 	}
660 }
661 
662 /*
663  * Return latency between "from" and "to" lgroups
664  *
665  * This latency number can only be used for relative comparison
666  * between lgroups on the running system, cannot be used across platforms,
667  * and may not reflect the actual latency.  It is platform and implementation
668  * specific, so platform gets to decide its value.  It would be nice if the
669  * number was at least proportional to make comparisons more meaningful though.
670  * NOTE: The numbers below are supposed to be load latencies for uncached
671  * memory divided by 10.
672  *
673  */
674 int
675 plat_lgrp_latency(lgrp_handle_t from, lgrp_handle_t to)
676 {
677 	/*
678 	 * Return min remote latency when there are more than two lgroups
679 	 * (root and child) and getting latency between two different lgroups
680 	 * or root is involved
681 	 */
682 	if (lgrp_optimizations() && (from != to ||
683 	    from == LGRP_DEFAULT_HANDLE || to == LGRP_DEFAULT_HANDLE))
684 		return (42);
685 	else
686 		return (35);
687 }
688 
689 /*
690  * Return platform handle for root lgroup
691  */
692 lgrp_handle_t
693 plat_lgrp_root_hand(void)
694 {
695 	if (mpo_disabled)
696 		return (lgrp_default_handle);
697 
698 	return (LGRP_DEFAULT_HANDLE);
699 }
700 
701 /*ARGSUSED*/
702 void
703 plat_freelist_process(int mnode)
704 {
705 }
706 
707 void
708 load_platform_drivers(void)
709 {
710 	(void) i_ddi_attach_pseudo_node("dr");
711 }
712 
713 /*
714  * No platform drivers on this platform
715  */
716 char *platform_module_list[] = {
717 	(char *)0
718 };
719 
720 /*ARGSUSED*/
721 void
722 plat_tod_fault(enum tod_fault_type tod_bad)
723 {
724 }
725 
726 /*ARGSUSED*/
727 void
728 cpu_sgn_update(ushort_t sgn, uchar_t state, uchar_t sub_state, int cpuid)
729 {
730 	static void (*scf_panic_callback)(int);
731 	static void (*scf_shutdown_callback)(int);
732 
733 	/*
734 	 * This is for notifing system panic/shutdown to SCF.
735 	 * In case of shutdown and panic, SCF call back
736 	 * function should be called.
737 	 *  <SCF call back functions>
738 	 *   scf_panic_callb()   : panicsys()->panic_quiesce_hw()
739 	 *   scf_shutdown_callb(): halt() or power_down() or reboot_machine()
740 	 * cpuid should be -1 and state should be SIGST_EXIT.
741 	 */
742 	if (state == SIGST_EXIT && cpuid == -1) {
743 
744 		/*
745 		 * find the symbol for the SCF panic callback routine in driver
746 		 */
747 		if (scf_panic_callback == NULL)
748 			scf_panic_callback = (void (*)(int))
749 				modgetsymvalue("scf_panic_callb", 0);
750 		if (scf_shutdown_callback == NULL)
751 			scf_shutdown_callback = (void (*)(int))
752 				modgetsymvalue("scf_shutdown_callb", 0);
753 
754 		switch (sub_state) {
755 		case SIGSUBST_PANIC:
756 			if (scf_panic_callback == NULL) {
757 				cmn_err(CE_NOTE, "!cpu_sgn_update: "
758 				    "scf_panic_callb not found\n");
759 				return;
760 			}
761 			scf_panic_callback(SIGSUBST_PANIC);
762 			break;
763 
764 		case SIGSUBST_HALT:
765 			if (scf_shutdown_callback == NULL) {
766 				cmn_err(CE_NOTE, "!cpu_sgn_update: "
767 				    "scf_shutdown_callb not found\n");
768 				return;
769 			}
770 			scf_shutdown_callback(SIGSUBST_HALT);
771 			break;
772 
773 		case SIGSUBST_ENVIRON:
774 			if (scf_shutdown_callback == NULL) {
775 				cmn_err(CE_NOTE, "!cpu_sgn_update: "
776 				    "scf_shutdown_callb not found\n");
777 				return;
778 			}
779 			scf_shutdown_callback(SIGSUBST_ENVIRON);
780 			break;
781 
782 		case SIGSUBST_REBOOT:
783 			if (scf_shutdown_callback == NULL) {
784 				cmn_err(CE_NOTE, "!cpu_sgn_update: "
785 				    "scf_shutdown_callb not found\n");
786 				return;
787 			}
788 			scf_shutdown_callback(SIGSUBST_REBOOT);
789 			break;
790 		}
791 	}
792 }
793 
794 /*ARGSUSED*/
795 int
796 plat_get_mem_unum(int synd_code, uint64_t flt_addr, int flt_bus_id,
797 	int flt_in_memory, ushort_t flt_status,
798 	char *buf, int buflen, int *lenp)
799 {
800 	/*
801 	 * check if it's a Memory error.
802 	 */
803 	if (flt_in_memory) {
804 		if (opl_get_mem_unum != NULL) {
805 			return (opl_get_mem_unum(synd_code, flt_addr,
806 				buf, buflen, lenp));
807 		} else {
808 			return (ENOTSUP);
809 		}
810 	} else {
811 		return (ENOTSUP);
812 	}
813 }
814 
815 /*ARGSUSED*/
816 int
817 plat_get_cpu_unum(int cpuid, char *buf, int buflen, int *lenp)
818 {
819 	int	ret = 0;
820 	uint_t	sb;
821 	int	plen;
822 
823 	sb = opl_get_physical_board(LSB_ID(cpuid));
824 	if (sb == -1) {
825 		return (ENXIO);
826 	}
827 
828 	/*
829 	 * opl_cur_model is assigned here
830 	 */
831 	if (opl_cur_model == NULL) {
832 		set_model_info();
833 	}
834 
835 	ASSERT((opl_cur_model - opl_models) == (opl_cur_model->model_type));
836 
837 	switch (opl_cur_model->model_type) {
838 	case FF1:
839 		plen = snprintf(buf, buflen, "/%s/CPUM%d", "MBU_A",
840 		    CHIP_ID(cpuid) / 2);
841 		break;
842 
843 	case FF2:
844 		plen = snprintf(buf, buflen, "/%s/CPUM%d", "MBU_B",
845 		    (CHIP_ID(cpuid) / 2) + (sb * 2));
846 		break;
847 
848 	case DC1:
849 	case DC2:
850 	case DC3:
851 		plen = snprintf(buf, buflen, "/%s%02d/CPUM%d", "CMU", sb,
852 		    CHIP_ID(cpuid));
853 		break;
854 
855 	default:
856 		/* This should never happen */
857 		return (ENODEV);
858 	}
859 
860 	if (plen >= buflen) {
861 		ret = ENOSPC;
862 	} else {
863 		if (lenp)
864 			*lenp = strlen(buf);
865 	}
866 	return (ret);
867 }
868 
869 #define	SCF_PUTINFO(f, s, p)	\
870 	f(KEY_ESCF, 0x01, 0, s, p)
871 void
872 plat_nodename_set(void)
873 {
874 	void *datap;
875 	static int (*scf_service_function)(uint32_t, uint8_t,
876 	    uint32_t, uint32_t, void *);
877 	int counter = 5;
878 
879 	/*
880 	 * find the symbol for the SCF put routine in driver
881 	 */
882 	if (scf_service_function == NULL)
883 		scf_service_function =
884 			(int (*)(uint32_t, uint8_t, uint32_t, uint32_t, void *))
885 			modgetsymvalue("scf_service_putinfo", 0);
886 
887 	/*
888 	 * If the symbol was found, call it.  Otherwise, log a note (but not to
889 	 * the console).
890 	 */
891 
892 	if (scf_service_function == NULL) {
893 		cmn_err(CE_NOTE,
894 		    "!plat_nodename_set: scf_service_putinfo not found\n");
895 		return;
896 	}
897 
898 	datap =
899 	    (struct utsname *)kmem_zalloc(sizeof (struct utsname), KM_SLEEP);
900 
901 	if (datap == NULL) {
902 		return;
903 	}
904 
905 	bcopy((struct utsname *)&utsname,
906 	    (struct utsname *)datap, sizeof (struct utsname));
907 
908 	while ((SCF_PUTINFO(scf_service_function,
909 		sizeof (struct utsname), datap) == EBUSY) && (counter-- > 0)) {
910 		delay(10 * drv_usectohz(1000000));
911 	}
912 	if (counter == 0)
913 		cmn_err(CE_NOTE,
914 			"!plat_nodename_set: "
915 			"scf_service_putinfo not responding\n");
916 
917 	kmem_free(datap, sizeof (struct utsname));
918 }
919 
920 caddr_t	efcode_vaddr = NULL;
921 
922 /*
923  * Preallocate enough memory for fcode claims.
924  */
925 
926 caddr_t
927 efcode_alloc(caddr_t alloc_base)
928 {
929 	caddr_t efcode_alloc_base = (caddr_t)roundup((uintptr_t)alloc_base,
930 	    MMU_PAGESIZE);
931 	caddr_t vaddr;
932 
933 	/*
934 	 * allocate the physical memory for the Oberon fcode.
935 	 */
936 	if ((vaddr = (caddr_t)BOP_ALLOC(bootops, efcode_alloc_base,
937 	    efcode_size, MMU_PAGESIZE)) == NULL)
938 		cmn_err(CE_PANIC, "Cannot allocate Efcode Memory");
939 
940 	efcode_vaddr = vaddr;
941 
942 	return (efcode_alloc_base + efcode_size);
943 }
944 
945 caddr_t
946 plat_startup_memlist(caddr_t alloc_base)
947 {
948 	caddr_t tmp_alloc_base;
949 
950 	tmp_alloc_base = efcode_alloc(alloc_base);
951 	tmp_alloc_base =
952 	    (caddr_t)roundup((uintptr_t)tmp_alloc_base, ecache_alignsize);
953 	return (tmp_alloc_base);
954 }
955 
956 void
957 startup_platform(void)
958 {
959 }
960 
961 void
962 plat_cpuid_to_mmu_ctx_info(processorid_t cpuid, mmu_ctx_info_t *info)
963 {
964 	int	impl;
965 
966 	impl = cpunodes[cpuid].implementation;
967 	if (IS_OLYMPUS_C(impl)) {
968 		info->mmu_idx = MMU_ID(cpuid);
969 		info->mmu_nctxs = 8192;
970 	} else {
971 		cmn_err(CE_PANIC, "Unknown processor %d", impl);
972 	}
973 }
974 
975 int
976 plat_get_mem_sid(char *unum, char *buf, int buflen, int *lenp)
977 {
978 	if (opl_get_mem_sid == NULL) {
979 		return (ENOTSUP);
980 	}
981 	return (opl_get_mem_sid(unum, buf, buflen, lenp));
982 }
983 
984 int
985 plat_get_mem_offset(uint64_t paddr, uint64_t *offp)
986 {
987 	if (opl_get_mem_offset == NULL) {
988 		return (ENOTSUP);
989 	}
990 	return (opl_get_mem_offset(paddr, offp));
991 }
992 
993 int
994 plat_get_mem_addr(char *unum, char *sid, uint64_t offset, uint64_t *addrp)
995 {
996 	if (opl_get_mem_addr == NULL) {
997 		return (ENOTSUP);
998 	}
999 	return (opl_get_mem_addr(unum, sid, offset, addrp));
1000 }
1001