xref: /titanic_50/usr/src/uts/sun4u/starcat/os/starcat.c (revision 33f5ff17089e3a43e6e730bf80384c233123dbd9)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 /*
27  * Copyright 2011 Nexenta Systems, Inc.  All rights reserved.
28  */
29 
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/sysmacros.h>
33 #include <sys/sunddi.h>
34 #include <sys/esunddi.h>
35 #include <sys/sunndi.h>
36 #include <sys/modctl.h>
37 #include <sys/promif.h>
38 #include <sys/machparam.h>
39 #include <sys/kobj.h>
40 #include <sys/cpuvar.h>
41 #include <sys/mem_cage.h>
42 #include <sys/promif.h>
43 #include <sys/promimpl.h>
44 #include <sys/platform_module.h>
45 #include <sys/errno.h>
46 #include <sys/cpu_sgnblk_defs.h>
47 #include <sys/iosramio.h>
48 #include <sys/domaind.h>
49 #include <sys/starcat.h>
50 #include <sys/machsystm.h>
51 #include <sys/bootconf.h>
52 #include <sys/memnode.h>
53 #include <vm/vm_dep.h>
54 #include <vm/page.h>
55 #include <sys/cheetahregs.h>
56 #include <sys/plat_ecc_unum.h>
57 #include <sys/plat_ecc_dimm.h>
58 #include <sys/lgrp.h>
59 #include <sys/dr.h>
60 #include <sys/post/scat_dcd.h>
61 #include <sys/kdi_impl.h>
62 #include <sys/iosramreg.h>
63 #include <sys/iosramvar.h>
64 #include <sys/mc-us3.h>
65 #include <sys/clock_impl.h>
66 
67 /* Preallocation of spare tsb's for DR */
68 int starcat_tsb_spares = STARCAT_SPARE_TSB_MAX;
69 
70 /* Set the maximum number of slot0 + slot1 boards. .. for DR */
71 int starcat_boards = STARCAT_BDSET_MAX * STARCAT_BDSET_SLOT_MAX;
72 
73 /* Maximum number of cpus per board... for DR */
74 int starcat_cpu_per_board = MAX(STARCAT_SLOT0_CPU_MAX, STARCAT_SLOT1_CPU_MAX);
75 
76 /* Maximum number of mem-units per board... for DR */
77 int starcat_mem_per_board = MAX(STARCAT_SLOT0_MEM_MAX, STARCAT_SLOT1_MEM_MAX);
78 
79 /* Maximum number of io-units (buses) per board... for DR */
80 int starcat_io_per_board = 2 * MAX(STARCAT_SLOT0_IO_MAX, STARCAT_SLOT1_IO_MAX);
81 
82 /* Preferred minimum cage size (expressed in pages)... for DR */
83 pgcnt_t starcat_startup_cage_size = 0;
84 
85 /* Platform specific function to get unum information */
86 int (*p2get_mem_unum)(int, uint64_t, char *, int, int *);
87 
88 /* Memory for fcode claims.  16k times # maximum possible schizos */
89 #define	EFCODE_SIZE	(STARCAT_BDSET_MAX * 4 * 0x4000)
90 int efcode_size = EFCODE_SIZE;
91 
92 void sgn_update_all_cpus(ushort_t, uchar_t, uchar_t);
93 
94 /*
95  * The IOSRAM driver is loaded in load_platform_drivers() any cpu signature
96  * usage prior to that time will have not have a function to call.
97  */
98 static int (*iosram_rdp)(uint32_t key, uint32_t off, uint32_t len,
99 	    caddr_t dptr) = prom_starcat_iosram_read;
100 static int (*iosram_wrp)(uint32_t key, uint32_t off, uint32_t len,
101 	    caddr_t dptr) = prom_starcat_iosram_write;
102 
103 plat_dimm_sid_board_t	domain_dimm_sids[STARCAT_BDSET_MAX];
104 
105 /*
106  * set_platform_max_ncpus should return the maximum number of CPUs that the
107  * platform supports.  This function is called from check_cpus() to set the
108  * value of max_ncpus [see PSARC 1997/165 CPU Dynamic Reconfiguration].
109  * Data elements which are allocated based upon max_ncpus are all accessed
110  * via cpu_seqid and not physical IDs.  Previously, the value of max_ncpus
111  * was being set to the largest physical ID, which led to boot problems on
112  * systems with less than 1.25GB of memory.
113  */
114 
115 int
116 set_platform_max_ncpus(void)
117 {
118 	int n;
119 
120 	/*
121 	 * Convert number of slot0 + slot1 boards to number of expander brds
122 	 * and constrain the value to an architecturally plausible range
123 	 */
124 	n = MAX(starcat_boards, STARCAT_BDSET_MIN * STARCAT_BDSET_SLOT_MAX);
125 	n = MIN(n, STARCAT_BDSET_MAX * STARCAT_BDSET_SLOT_MAX);
126 	n = (n + STARCAT_BDSET_SLOT_MAX - 1) / STARCAT_BDSET_SLOT_MAX;
127 
128 	/* return maximum number of cpus possible on N expander boards */
129 	return (n * STARCAT_BDSET_CPU_MAX - STARCAT_SLOT1_CPU_MAX);
130 }
131 
132 int
133 set_platform_tsb_spares()
134 {
135 	return (MIN(starcat_tsb_spares, MAX_UPA));
136 }
137 
138 #pragma weak mmu_init_large_pages
139 
140 void
141 set_platform_defaults(void)
142 {
143 	extern char *tod_module_name;
144 	extern int ts_dispatch_extended;
145 	extern void cpu_sgn_update(ushort_t, uchar_t, uchar_t, int);
146 	extern int tsb_lgrp_affinity;
147 	extern int segkmem_reloc;
148 	extern void mmu_init_large_pages(size_t);
149 	extern int ncpunode;	/* number of CPUs detected by OBP */
150 
151 #ifdef DEBUG
152 	ce_verbose_memory = 2;
153 	ce_verbose_other = 2;
154 #endif
155 
156 	/* Set the CPU signature function pointer */
157 	cpu_sgn_func = cpu_sgn_update;
158 
159 	/* Set appropriate tod module for starcat */
160 	ASSERT(tod_module_name == NULL);
161 	tod_module_name = "todstarcat";
162 
163 	/*
164 	 * Use the alternate TS dispatch table, which is better
165 	 * tuned for large servers.
166 	 */
167 	if (ts_dispatch_extended == -1)
168 		ts_dispatch_extended = 1;
169 
170 	/*
171 	 * Use lgroup-aware TSB allocations on this platform,
172 	 * since they are a considerable performance win.
173 	 */
174 	tsb_lgrp_affinity = 1;
175 
176 	if ((mmu_page_sizes == max_mmu_page_sizes) &&
177 	    (mmu_ism_pagesize != DEFAULT_ISM_PAGESIZE)) {
178 		if (&mmu_init_large_pages)
179 			mmu_init_large_pages(mmu_ism_pagesize);
180 	}
181 
182 	/*
183 	 * KPR (kernel page relocation) is supported on this platform.
184 	 */
185 	if (kernel_cage_enable && ncpunode >= 32) {
186 		segkmem_reloc = 1;
187 		cmn_err(CE_NOTE, "!Kernel Page Relocation is ENABLED");
188 	} else {
189 		cmn_err(CE_NOTE, "!Kernel Page Relocation is DISABLED");
190 	}
191 }
192 
193 #ifdef DEBUG
194 pgcnt_t starcat_cage_size_limit;
195 #endif
196 
197 void
198 set_platform_cage_params(void)
199 {
200 	extern pgcnt_t total_pages;
201 	extern struct memlist *phys_avail;
202 
203 	if (kernel_cage_enable) {
204 		pgcnt_t preferred_cage_size;
205 
206 		preferred_cage_size =
207 		    MAX(starcat_startup_cage_size, total_pages / 256);
208 
209 #ifdef DEBUG
210 		if (starcat_cage_size_limit)
211 			preferred_cage_size = starcat_cage_size_limit;
212 #endif
213 		/*
214 		 * Note: we are assuming that post has load the
215 		 * whole show in to the high end of memory. Having
216 		 * taken this leap, we copy the whole of phys_avail
217 		 * the glist and arrange for the cage to grow
218 		 * downward (descending pfns).
219 		 */
220 		kcage_range_init(phys_avail, KCAGE_DOWN, preferred_cage_size);
221 	}
222 
223 	if (kcage_on)
224 		cmn_err(CE_NOTE, "!DR Kernel Cage is ENABLED");
225 	else
226 		cmn_err(CE_NOTE, "!DR Kernel Cage is DISABLED");
227 }
228 
229 void
230 load_platform_modules(void)
231 {
232 	if (modload("misc", "pcihp") < 0) {
233 		cmn_err(CE_NOTE, "pcihp driver failed to load");
234 	}
235 }
236 
237 /*
238  * Starcat does not support power control of CPUs from the OS.
239  */
240 /*ARGSUSED*/
241 int
242 plat_cpu_poweron(struct cpu *cp)
243 {
244 	int (*starcat_cpu_poweron)(struct cpu *) = NULL;
245 
246 	starcat_cpu_poweron =
247 	    (int (*)(struct cpu *))kobj_getsymvalue("drmach_cpu_poweron", 0);
248 
249 	if (starcat_cpu_poweron == NULL)
250 		return (ENOTSUP);
251 	else
252 		return ((starcat_cpu_poweron)(cp));
253 }
254 
255 /*ARGSUSED*/
256 int
257 plat_cpu_poweroff(struct cpu *cp)
258 {
259 	int (*starcat_cpu_poweroff)(struct cpu *) = NULL;
260 
261 	starcat_cpu_poweroff =
262 	    (int (*)(struct cpu *))kobj_getsymvalue("drmach_cpu_poweroff", 0);
263 
264 	if (starcat_cpu_poweroff == NULL)
265 		return (ENOTSUP);
266 	else
267 		return ((starcat_cpu_poweroff)(cp));
268 }
269 
270 /*
271  * The following are currently private to Starcat DR
272  */
273 int
274 plat_max_boards()
275 {
276 	return (starcat_boards);
277 }
278 
279 int
280 plat_max_cpu_units_per_board()
281 {
282 	return (starcat_cpu_per_board);
283 }
284 
285 int
286 plat_max_mc_units_per_board()
287 {
288 	return (starcat_mem_per_board); /* each CPU has a memory controller */
289 }
290 
291 int
292 plat_max_mem_units_per_board()
293 {
294 	return (starcat_mem_per_board);
295 }
296 
297 int
298 plat_max_io_units_per_board()
299 {
300 	return (starcat_io_per_board);
301 }
302 
303 int
304 plat_max_cpumem_boards(void)
305 {
306 	return (STARCAT_BDSET_MAX);
307 }
308 
309 int
310 plat_pfn_to_mem_node(pfn_t pfn)
311 {
312 	return (pfn >> mem_node_pfn_shift);
313 }
314 
315 #define	STARCAT_MC_MEMBOARD_SHIFT 37	/* Boards on 128BG boundary */
316 
317 /* ARGSUSED */
318 void
319 plat_build_mem_nodes(prom_memlist_t *list, size_t nelems)
320 {
321 	size_t	elem;
322 	pfn_t	basepfn;
323 	pgcnt_t	npgs;
324 
325 	/*
326 	 * Starcat mem slices are always aligned on a 128GB boundary,
327 	 * fixed, and limited to one slice per expander due to design
328 	 * of the centerplane ASICs.
329 	 */
330 	mem_node_pfn_shift = STARCAT_MC_MEMBOARD_SHIFT - MMU_PAGESHIFT;
331 	mem_node_physalign = 0;
332 
333 	/*
334 	 * Boot install lists are arranged <addr, len>, <addr, len>, ...
335 	 */
336 	for (elem = 0; elem < nelems; list++, elem++) {
337 		basepfn = btop(list->addr);
338 		npgs = btop(list->size);
339 		mem_node_add_slice(basepfn, basepfn + npgs - 1);
340 	}
341 }
342 
343 /*
344  * Find the CPU associated with a slice at boot-time.
345  */
346 void
347 plat_fill_mc(pnode_t nodeid)
348 {
349 	int		len;
350 	uint64_t	mc_addr, mask;
351 	uint64_t	mc_decode[MAX_BANKS_PER_MC];
352 	uint32_t	regs[4];
353 	int		local_mc;
354 	int		portid;
355 	int		expnum;
356 	int		i;
357 
358 	/*
359 	 * Memory address decoding registers
360 	 * (see Chap 9 of SPARCV9 JSP-1 US-III implementation)
361 	 */
362 	const uint64_t	mc_decode_addr[MAX_BANKS_PER_MC] = {
363 		0x400028, 0x400010, 0x400018, 0x400020
364 	};
365 
366 	/*
367 	 * Starcat memory controller portid == global CPU id
368 	 */
369 	if ((prom_getprop(nodeid, "portid", (caddr_t)&portid) < 0) ||
370 	    (portid == -1))
371 		return;
372 
373 	expnum = STARCAT_CPUID_TO_EXPANDER(portid);
374 
375 	/*
376 	 * The "reg" property returns 4 32-bit values. The first two are
377 	 * combined to form a 64-bit address.  The second two are for a
378 	 * 64-bit size, but we don't actually need to look at that value.
379 	 */
380 	len = prom_getproplen(nodeid, "reg");
381 	if (len != (sizeof (uint32_t) * 4)) {
382 		prom_printf("Warning: malformed 'reg' property\n");
383 		return;
384 	}
385 	if (prom_getprop(nodeid, "reg", (caddr_t)regs) < 0)
386 		return;
387 	mc_addr = ((uint64_t)regs[0]) << 32;
388 	mc_addr |= (uint64_t)regs[1];
389 
390 	/*
391 	 * Figure out whether the memory controller we are examining
392 	 * belongs to this CPU/CMP or a different one.
393 	 */
394 	if (portid == cpunodes[CPU->cpu_id].portid)
395 		local_mc = 1;
396 	else
397 		local_mc = 0;
398 
399 	for (i = 0; i < MAX_BANKS_PER_MC; i++) {
400 
401 		mask = mc_decode_addr[i];
402 
403 		/*
404 		 * If the memory controller is local to this CPU, we use
405 		 * the special ASI to read the decode registers.
406 		 * Otherwise, we load the values from a magic address in
407 		 * I/O space.
408 		 */
409 		if (local_mc)
410 			mc_decode[i] = lddmcdecode(mask & MC_OFFSET_MASK);
411 		else
412 			mc_decode[i] = lddphysio((mc_addr | mask));
413 
414 		if (mc_decode[i] >> MC_VALID_SHIFT) {
415 			uint64_t base = MC_BASE(mc_decode[i]) << PHYS2UM_SHIFT;
416 			int sliceid = (base >> STARCAT_MC_MEMBOARD_SHIFT);
417 
418 			if (sliceid < max_mem_nodes) {
419 				/*
420 				 * Establish start-of-day mappings of
421 				 * lgroup platform handles to memnodes.
422 				 * Handle == Expander Number
423 				 * Memnode == Fixed 128GB Slice
424 				 */
425 				plat_assign_lgrphand_to_mem_node(expnum,
426 				    sliceid);
427 			}
428 		}
429 	}
430 }
431 
432 /*
433  * Starcat support for lgroups.
434  *
435  * On Starcat, an lgroup platform handle == expander number.
436  * For split-slot configurations (e.g. slot 0 and slot 1 boards
437  * in different domains) an MCPU board has only remote memory.
438  *
439  * The centerplane logic provides fixed 128GB memory slices
440  * each of which map to a memnode.  The initial mapping of
441  * memnodes to lgroup handles is determined at boot time.
442  * A DR addition of memory adds a new mapping. A DR copy-rename
443  * swaps mappings.
444  */
445 
446 /*
447  * Convert board number to expander number.
448  */
449 #define	BOARDNUM_2_EXPANDER(b)	(b >> 1)
450 
451 /*
452  * Return the number of boards configured with NULL LPA.
453  */
454 static int
455 check_for_null_lpa(void)
456 {
457 	gdcd_t	*gdcd;
458 	uint_t	exp, nlpa;
459 
460 	/*
461 	 * Read GDCD from IOSRAM.
462 	 * If this fails indicate a NULL LPA condition.
463 	 */
464 	if ((gdcd = kmem_zalloc(sizeof (gdcd_t), KM_NOSLEEP)) == NULL)
465 		return (EXP_COUNT+1);
466 
467 	if ((*iosram_rdp)(GDCD_MAGIC, 0, sizeof (gdcd_t), (caddr_t)gdcd) ||
468 	    (gdcd->h.dcd_magic != GDCD_MAGIC) ||
469 	    (gdcd->h.dcd_version != DCD_VERSION)) {
470 		kmem_free(gdcd, sizeof (gdcd_t));
471 		cmn_err(CE_WARN, "check_for_null_lpa: failed to access GDCD\n");
472 		return (EXP_COUNT+2);
473 	}
474 
475 	/*
476 	 * Check for NULL LPAs on all slot 0 boards in domain
477 	 * (i.e. in all expanders marked good for this domain).
478 	 */
479 	nlpa = 0;
480 	for (exp = 0; exp < EXP_COUNT; exp++) {
481 		if (RSV_GOOD(gdcd->dcd_slot[exp][0].l1ss_rsv) &&
482 		    (gdcd->dcd_slot[exp][0].l1ss_flags &
483 		    L1SSFLG_THIS_L1_NULL_PROC_LPA))
484 			nlpa++;
485 	}
486 
487 	kmem_free(gdcd, sizeof (gdcd_t));
488 	return (nlpa);
489 }
490 
491 /*
492  * Return the platform handle for the lgroup containing the given CPU
493  *
494  * For Starcat, lgroup platform handle == expander.
495  */
496 
497 extern int mpo_disabled;
498 extern lgrp_handle_t lgrp_default_handle;
499 int null_lpa_boards = -1;
500 
501 lgrp_handle_t
502 plat_lgrp_cpu_to_hand(processorid_t id)
503 {
504 	lgrp_handle_t		plathand;
505 
506 	plathand = STARCAT_CPUID_TO_EXPANDER(id);
507 
508 	/*
509 	 * Return the real platform handle for the CPU until
510 	 * such time as we know that MPO should be disabled.
511 	 * At that point, we set the "mpo_disabled" flag to true,
512 	 * and from that point on, return the default handle.
513 	 *
514 	 * By the time we know that MPO should be disabled, the
515 	 * first CPU will have already been added to a leaf
516 	 * lgroup, but that's ok. The common lgroup code will
517 	 * double check that the boot CPU is in the correct place,
518 	 * and in the case where mpo should be disabled, will move
519 	 * it to the root if necessary.
520 	 */
521 	if (mpo_disabled) {
522 		/* If MPO is disabled, return the default (UMA) handle */
523 		plathand = lgrp_default_handle;
524 	} else {
525 		if (null_lpa_boards > 0) {
526 			/* Determine if MPO should be disabled */
527 			mpo_disabled = 1;
528 			plathand = lgrp_default_handle;
529 		}
530 	}
531 	return (plathand);
532 }
533 
534 /*
535  * Platform specific lgroup initialization
536  */
537 void
538 plat_lgrp_init(void)
539 {
540 	extern uint32_t lgrp_expand_proc_thresh;
541 	extern uint32_t lgrp_expand_proc_diff;
542 
543 	/*
544 	 * Set tuneables for Starcat architecture
545 	 *
546 	 * lgrp_expand_proc_thresh is the minimum load on the lgroups
547 	 * this process is currently running on before considering
548 	 * expanding threads to another lgroup.
549 	 *
550 	 * lgrp_expand_proc_diff determines how much less the remote lgroup
551 	 * must be loaded before expanding to it.
552 	 *
553 	 * Since remote latencies can be costly, attempt to keep 3 threads
554 	 * within the same lgroup before expanding to the next lgroup.
555 	 */
556 	lgrp_expand_proc_thresh = LGRP_LOADAVG_THREAD_MAX * 3;
557 	lgrp_expand_proc_diff = LGRP_LOADAVG_THREAD_MAX;
558 }
559 
560 /*
561  * Platform notification of lgroup (re)configuration changes
562  */
563 /*ARGSUSED*/
564 void
565 plat_lgrp_config(lgrp_config_flag_t evt, uintptr_t arg)
566 {
567 	update_membounds_t	*umb;
568 	lgrp_config_mem_rename_t lmr;
569 	int			sbd, tbd;
570 	lgrp_handle_t		hand, shand, thand;
571 	int			mnode, snode, tnode;
572 
573 	if (mpo_disabled)
574 		return;
575 
576 	switch (evt) {
577 
578 	case LGRP_CONFIG_MEM_ADD:
579 		/*
580 		 * Establish the lgroup handle to memnode translation.
581 		 */
582 		umb = (update_membounds_t *)arg;
583 
584 		hand = BOARDNUM_2_EXPANDER(umb->u_board);
585 		mnode = plat_pfn_to_mem_node(umb->u_base >> MMU_PAGESHIFT);
586 		plat_assign_lgrphand_to_mem_node(hand, mnode);
587 
588 		break;
589 
590 	case LGRP_CONFIG_MEM_DEL:
591 		/* We don't have to do anything */
592 
593 		break;
594 
595 	case LGRP_CONFIG_MEM_RENAME:
596 		/*
597 		 * During a DR copy-rename operation, all of the memory
598 		 * on one board is moved to another board -- but the
599 		 * addresses/pfns and memnodes don't change. This means
600 		 * the memory has changed locations without changing identity.
601 		 *
602 		 * Source is where we are copying from and target is where we
603 		 * are copying to.  After source memnode is copied to target
604 		 * memnode, the physical addresses of the target memnode are
605 		 * renamed to match what the source memnode had.  Then target
606 		 * memnode can be removed and source memnode can take its
607 		 * place.
608 		 *
609 		 * To do this, swap the lgroup handle to memnode mappings for
610 		 * the boards, so target lgroup will have source memnode and
611 		 * source lgroup will have empty target memnode which is where
612 		 * its memory will go (if any is added to it later).
613 		 *
614 		 * Then source memnode needs to be removed from its lgroup
615 		 * and added to the target lgroup where the memory was living
616 		 * but under a different name/memnode.  The memory was in the
617 		 * target memnode and now lives in the source memnode with
618 		 * different physical addresses even though it is the same
619 		 * memory.
620 		 */
621 		sbd = arg & 0xffff;
622 		tbd = (arg & 0xffff0000) >> 16;
623 		shand = BOARDNUM_2_EXPANDER(sbd);
624 		thand = BOARDNUM_2_EXPANDER(tbd);
625 		snode = plat_lgrphand_to_mem_node(shand);
626 		tnode = plat_lgrphand_to_mem_node(thand);
627 
628 		plat_assign_lgrphand_to_mem_node(thand, snode);
629 		plat_assign_lgrphand_to_mem_node(shand, tnode);
630 
631 		lmr.lmem_rename_from = shand;
632 		lmr.lmem_rename_to = thand;
633 
634 		/*
635 		 * Remove source memnode of copy rename from its lgroup
636 		 * and add it to its new target lgroup
637 		 */
638 		lgrp_config(LGRP_CONFIG_MEM_RENAME, (uintptr_t)snode,
639 		    (uintptr_t)&lmr);
640 
641 		break;
642 
643 	default:
644 		break;
645 	}
646 }
647 
648 /*
649  * Return latency between "from" and "to" lgroups
650  *
651  * This latency number can only be used for relative comparison
652  * between lgroups on the running system, cannot be used across platforms,
653  * and may not reflect the actual latency.  It is platform and implementation
654  * specific, so platform gets to decide its value.  It would be nice if the
655  * number was at least proportional to make comparisons more meaningful though.
656  * NOTE: The numbers below are supposed to be load latencies for uncached
657  * memory divided by 10.
658  */
659 int
660 plat_lgrp_latency(lgrp_handle_t from, lgrp_handle_t to)
661 {
662 	/*
663 	 * Return min remote latency when there are more than two lgroups
664 	 * (root and child) and getting latency between two different lgroups
665 	 * or root is involved
666 	 */
667 	if (lgrp_optimizations() && (from != to ||
668 	    from == LGRP_DEFAULT_HANDLE || to == LGRP_DEFAULT_HANDLE))
669 		return (48);
670 	else
671 		return (28);
672 }
673 
674 /*
675  * Return platform handle for root lgroup
676  */
677 lgrp_handle_t
678 plat_lgrp_root_hand(void)
679 {
680 	if (mpo_disabled)
681 		return (lgrp_default_handle);
682 
683 	return (LGRP_DEFAULT_HANDLE);
684 }
685 
686 /* ARGSUSED */
687 void
688 plat_freelist_process(int mnode)
689 {
690 }
691 
692 void
693 load_platform_drivers(void)
694 {
695 	uint_t		tunnel;
696 	pnode_t		nodeid;
697 	dev_info_t	*chosen_devi;
698 	char		chosen_iosram[MAXNAMELEN];
699 
700 	/*
701 	 * Get /chosen node - that's where the tunnel property is
702 	 */
703 	nodeid = prom_chosennode();
704 
705 	/*
706 	 * Get the iosram property from the chosen node.
707 	 */
708 	if (prom_getprop(nodeid, IOSRAM_CHOSEN_PROP, (caddr_t)&tunnel) <= 0) {
709 		prom_printf("Unable to get iosram property\n");
710 		cmn_err(CE_PANIC, "Unable to get iosram property\n");
711 	}
712 
713 	if (prom_phandle_to_path((phandle_t)tunnel, chosen_iosram,
714 	    sizeof (chosen_iosram)) < 0) {
715 		(void) prom_printf("prom_phandle_to_path(0x%x) failed\n",
716 		    tunnel);
717 		cmn_err(CE_PANIC, "prom_phandle_to_path(0x%x) failed\n",
718 		    tunnel);
719 	}
720 
721 	/*
722 	 * Attach all driver instances along the iosram's device path
723 	 */
724 	if (i_ddi_attach_hw_nodes("iosram") != DDI_SUCCESS) {
725 		cmn_err(CE_WARN, "IOSRAM failed to load\n");
726 	}
727 
728 	if ((chosen_devi = e_ddi_hold_devi_by_path(chosen_iosram, 0)) == NULL) {
729 		(void) prom_printf("e_ddi_hold_devi_by_path(%s) failed\n",
730 		    chosen_iosram);
731 		cmn_err(CE_PANIC, "e_ddi_hold_devi_by_path(%s) failed\n",
732 		    chosen_iosram);
733 	}
734 	ndi_rele_devi(chosen_devi);
735 
736 	/*
737 	 * iosram driver is now loaded so we need to set our read and
738 	 * write pointers.
739 	 */
740 	iosram_rdp = (int (*)(uint32_t, uint32_t, uint32_t, caddr_t))
741 	    modgetsymvalue("iosram_rd", 0);
742 	iosram_wrp = (int (*)(uint32_t, uint32_t, uint32_t, caddr_t))
743 	    modgetsymvalue("iosram_wr", 0);
744 
745 	/*
746 	 * Need to check for null proc LPA after IOSRAM driver is loaded
747 	 * and before multiple lgroups created (when start_other_cpus() called)
748 	 */
749 	null_lpa_boards = check_for_null_lpa();
750 
751 	/* load and attach the axq driver */
752 	if (i_ddi_attach_hw_nodes("axq") != DDI_SUCCESS) {
753 		cmn_err(CE_WARN, "AXQ failed to load\n");
754 	}
755 
756 	/* load Starcat Solaris Mailbox Client driver */
757 	if (modload("misc", "scosmb") < 0) {
758 		cmn_err(CE_WARN, "SCOSMB failed to load\n");
759 	}
760 
761 	/* load the DR driver */
762 	if (i_ddi_attach_hw_nodes("dr") != DDI_SUCCESS) {
763 		cmn_err(CE_WARN, "dr failed to load");
764 	}
765 
766 	/*
767 	 * Load the mc-us3 memory driver.
768 	 */
769 	if (i_ddi_attach_hw_nodes("mc-us3") != DDI_SUCCESS)
770 		cmn_err(CE_WARN, "mc-us3 failed to load");
771 	else
772 		(void) ddi_hold_driver(ddi_name_to_major("mc-us3"));
773 
774 	/* Load the schizo pci bus nexus driver. */
775 	if (i_ddi_attach_hw_nodes("pcisch") != DDI_SUCCESS)
776 		cmn_err(CE_WARN, "pcisch failed to load");
777 
778 	plat_ecc_init();
779 }
780 
781 
782 /*
783  * No platform drivers on this platform
784  */
785 char *platform_module_list[] = {
786 	(char *)0
787 };
788 
789 
790 /*ARGSUSED*/
791 void
792 plat_tod_fault(enum tod_fault_type tod_bad)
793 {
794 }
795 
796 /*
797  * Update the signature(s) in the IOSRAM's domain data section.
798  */
799 void
800 cpu_sgn_update(ushort_t sgn, uchar_t state, uchar_t sub_state, int cpuid)
801 {
802 	sig_state_t new_sgn;
803 	sig_state_t current_sgn;
804 
805 	/*
806 	 * If the substate is REBOOT, then check for panic flow
807 	 */
808 	if (sub_state == SIGSUBST_REBOOT) {
809 		(*iosram_rdp)(DOMD_MAGIC, DOMD_DSTATE_OFFSET,
810 		    sizeof (sig_state_t), (caddr_t)&current_sgn);
811 		if (current_sgn.state_t.state == SIGST_EXIT)
812 			sub_state = SIGSUBST_PANIC_REBOOT;
813 	}
814 
815 	/*
816 	 * cpuid == -1 indicates that the operation applies to all cpus.
817 	 */
818 	if (cpuid < 0) {
819 		sgn_update_all_cpus(sgn, state, sub_state);
820 		return;
821 	}
822 
823 	new_sgn.signature = CPU_SIG_BLD(sgn, state, sub_state);
824 	(*iosram_wrp)(DOMD_MAGIC,
825 	    DOMD_CPUSIGS_OFFSET + cpuid * sizeof (sig_state_t),
826 	    sizeof (sig_state_t), (caddr_t)&new_sgn);
827 
828 	/*
829 	 * Under certain conditions we don't update the signature
830 	 * of the domain_state.
831 	 */
832 	if ((sgn == OS_SIG) &&
833 	    ((state == SIGST_OFFLINE) || (state == SIGST_DETACHED)))
834 		return;
835 	(*iosram_wrp)(DOMD_MAGIC, DOMD_DSTATE_OFFSET, sizeof (sig_state_t),
836 	    (caddr_t)&new_sgn);
837 }
838 
839 /*
840  * Update the signature(s) in the IOSRAM's domain data section for all CPUs.
841  */
842 void
843 sgn_update_all_cpus(ushort_t sgn, uchar_t state, uchar_t sub_state)
844 {
845 	sig_state_t new_sgn;
846 	int i = 0;
847 
848 	new_sgn.signature = CPU_SIG_BLD(sgn, state, sub_state);
849 
850 	/*
851 	 * First update the domain_state signature
852 	 */
853 	(*iosram_wrp)(DOMD_MAGIC, DOMD_DSTATE_OFFSET, sizeof (sig_state_t),
854 	    (caddr_t)&new_sgn);
855 
856 	for (i = 0; i < NCPU; i++) {
857 		if (cpu[i] != NULL && (cpu[i]->cpu_flags &
858 		    (CPU_EXISTS|CPU_QUIESCED))) {
859 			(*iosram_wrp)(DOMD_MAGIC,
860 			    DOMD_CPUSIGS_OFFSET + i * sizeof (sig_state_t),
861 			    sizeof (sig_state_t), (caddr_t)&new_sgn);
862 		}
863 	}
864 }
865 
866 ushort_t
867 get_cpu_sgn(int cpuid)
868 {
869 	sig_state_t cpu_sgn;
870 
871 	(*iosram_rdp)(DOMD_MAGIC,
872 	    DOMD_CPUSIGS_OFFSET + cpuid * sizeof (sig_state_t),
873 	    sizeof (sig_state_t), (caddr_t)&cpu_sgn);
874 
875 	return (cpu_sgn.state_t.sig);
876 }
877 
878 uchar_t
879 get_cpu_sgn_state(int cpuid)
880 {
881 	sig_state_t cpu_sgn;
882 
883 	(*iosram_rdp)(DOMD_MAGIC,
884 	    DOMD_CPUSIGS_OFFSET + cpuid * sizeof (sig_state_t),
885 	    sizeof (sig_state_t), (caddr_t)&cpu_sgn);
886 
887 	return (cpu_sgn.state_t.state);
888 }
889 
890 
891 /*
892  * Type of argument passed into plat_get_ecache_cpu via ddi_walk_devs
893  * for matching on specific CPU node in device tree
894  */
895 
896 typedef struct {
897 	char		*jnum;	/* output, kmem_alloc'd	if successful */
898 	int		cpuid;	/* input, to match cpuid/portid/upa-portid */
899 	uint_t		dimm;	/* input, index into ecache-dimm-label */
900 } plat_ecache_cpu_arg_t;
901 
902 
903 /*
904  * plat_get_ecache_cpu is called repeatedly by ddi_walk_devs with pointers
905  * to device tree nodes (dip) and to a plat_ecache_cpu_arg_t structure (arg).
906  * Returning DDI_WALK_CONTINUE tells ddi_walk_devs to keep going, returning
907  * DDI_WALK_TERMINATE ends the walk.  When the node for the specific CPU
908  * being searched for is found, the walk is done.  But before returning to
909  * ddi_walk_devs and plat_get_ecacheunum, we grab this CPU's ecache-dimm-label
910  * property and set the jnum member of the plat_ecache_cpu_arg_t structure to
911  * point to the label corresponding to this specific ecache DIMM.  It is up
912  * to plat_get_ecacheunum to kmem_free this string.
913  */
914 
915 static int
916 plat_get_ecache_cpu(dev_info_t *dip, void *arg)
917 {
918 	char			*devtype;
919 	plat_ecache_cpu_arg_t	*cpuarg;
920 	char			**dimm_labels;
921 	uint_t			numlabels;
922 	int			portid;
923 
924 	/*
925 	 * Check device_type, must be "cpu"
926 	 */
927 
928 	if (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
929 	    "device_type", &devtype) != DDI_PROP_SUCCESS)
930 		return (DDI_WALK_CONTINUE);
931 
932 	if (strcmp(devtype, "cpu")) {
933 		ddi_prop_free((void *)devtype);
934 		return (DDI_WALK_CONTINUE);
935 	}
936 
937 	ddi_prop_free((void *)devtype);
938 
939 	/*
940 	 * Check cpuid, portid, upa-portid (in that order), must
941 	 * match the cpuid being sought
942 	 */
943 
944 	portid = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
945 	    DDI_PROP_DONTPASS, "cpuid", -1);
946 
947 	if (portid == -1)
948 		portid = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
949 		    DDI_PROP_DONTPASS, "portid", -1);
950 
951 	if (portid == -1)
952 		portid = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
953 		    DDI_PROP_DONTPASS, "upa-portid", -1);
954 
955 	cpuarg = (plat_ecache_cpu_arg_t *)arg;
956 
957 	if (portid != cpuarg->cpuid)
958 		return (DDI_WALK_CONTINUE);
959 
960 	/*
961 	 * Found the right CPU, fetch ecache-dimm-label property
962 	 */
963 
964 	if (ddi_prop_lookup_string_array(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
965 	    "ecache-dimm-label", &dimm_labels, &numlabels)
966 	    != DDI_PROP_SUCCESS) {
967 #ifdef	DEBUG
968 		cmn_err(CE_NOTE, "cpuid=%d missing ecache-dimm-label property",
969 		    portid);
970 #endif	/* DEBUG */
971 		return (DDI_WALK_TERMINATE);
972 	}
973 
974 	if (cpuarg->dimm < numlabels) {
975 		cpuarg->jnum = kmem_alloc(strlen(dimm_labels[cpuarg->dimm]) + 1,
976 		    KM_SLEEP);
977 		if (cpuarg->jnum != (char *)NULL)
978 			(void) strcpy(cpuarg->jnum, dimm_labels[cpuarg->dimm]);
979 #ifdef	DEBUG
980 		else
981 			cmn_err(CE_WARN,
982 			    "cannot kmem_alloc for ecache dimm label");
983 #endif	/* DEBUG */
984 	}
985 
986 	ddi_prop_free((void *)dimm_labels);
987 	return (DDI_WALK_TERMINATE);
988 }
989 
990 
991 /*
992  * Bit 4 of physical address indicates ecache 0 or 1
993  */
994 
995 #define	ECACHE_DIMM_MASK	0x10
996 
997 /*
998  * plat_get_ecacheunum is called to generate the unum for an ecache error.
999  * After some initialization, nearly all of the work is done by ddi_walk_devs
1000  * and plat_get_ecache_cpu.
1001  */
1002 
1003 int
1004 plat_get_ecacheunum(int cpuid, unsigned long long physaddr, char *buf,
1005 		    int buflen, int *ustrlen)
1006 {
1007 	plat_ecache_cpu_arg_t	findcpu;
1008 	uint_t	expander, slot, proc;
1009 
1010 	findcpu.jnum = (char *)NULL;
1011 	findcpu.cpuid = cpuid;
1012 
1013 	/*
1014 	 * Bit 4 of physaddr equal 0 maps to E0 and 1 maps to E1
1015 	 * except for Panther and Jaguar where it indicates the reverse
1016 	 */
1017 	if (IS_PANTHER(cpunodes[CPU->cpu_id].implementation) ||
1018 	    IS_JAGUAR(cpunodes[CPU->cpu_id].implementation))
1019 		findcpu.dimm =  (physaddr & ECACHE_DIMM_MASK) ? 0 : 1;
1020 	else
1021 		findcpu.dimm =  (physaddr & ECACHE_DIMM_MASK) ? 1 : 0;
1022 
1023 	/*
1024 	 * Walk the device tree, find this specific CPU, and get the label
1025 	 * for this ecache, returned here in findcpu.jnum
1026 	 */
1027 
1028 	ddi_walk_devs(ddi_root_node(), plat_get_ecache_cpu, (void *)&findcpu);
1029 
1030 	if (findcpu.jnum == (char *)NULL)
1031 		return (-1);
1032 
1033 	expander = STARCAT_CPUID_TO_EXPANDER(cpuid);
1034 	slot = STARCAT_CPUID_TO_BOARDSLOT(cpuid);
1035 
1036 	/*
1037 	 * STARCAT_CPUID_TO_PORTID clears the CoreID bit so that
1038 	 * STARCAT_CPUID_TO_AGENT will return a physical proc (0 - 3).
1039 	 */
1040 	proc = STARCAT_CPUID_TO_AGENT(STARCAT_CPUID_TO_PORTID(cpuid));
1041 
1042 	/*
1043 	 * NOTE: Any modifications to the snprintf() call below will require
1044 	 * changing plat_log_fruid_error() as well!
1045 	 */
1046 	(void) snprintf(buf, buflen, "%s%u/P%u/E%u J%s", (slot ? "IO" : "SB"),
1047 	    expander, proc, findcpu.dimm, findcpu.jnum);
1048 
1049 	*ustrlen = strlen(buf);
1050 
1051 	kmem_free(findcpu.jnum, strlen(findcpu.jnum) + 1);
1052 
1053 	return (0);
1054 }
1055 
1056 /*ARGSUSED*/
1057 int
1058 plat_get_mem_unum(int synd_code, uint64_t flt_addr, int flt_bus_id,
1059     int flt_in_memory, ushort_t flt_status, char *buf, int buflen, int *lenp)
1060 {
1061 	int ret;
1062 
1063 	/*
1064 	 * check if it's a Memory or an Ecache error.
1065 	 */
1066 	if (flt_in_memory) {
1067 		if (p2get_mem_unum != NULL) {
1068 			return (p2get_mem_unum(synd_code, P2ALIGN(flt_addr, 8),
1069 			    buf, buflen, lenp));
1070 		} else {
1071 			return (ENOTSUP);
1072 		}
1073 	} else if (flt_status & ECC_ECACHE) {
1074 		if ((ret = plat_get_ecacheunum(flt_bus_id,
1075 		    P2ALIGN(flt_addr, 8), buf, buflen, lenp)) != 0)
1076 			return (EIO);
1077 	} else {
1078 		return (ENOTSUP);
1079 	}
1080 
1081 	return (ret);
1082 }
1083 
1084 static int (*ecc_mailbox_msg_func)(plat_ecc_message_type_t, void *) = NULL;
1085 
1086 /*
1087  * To keep OS mailbox handling localized, all we do is forward the call to the
1088  * scosmb module (if it is available).
1089  */
1090 int
1091 plat_send_ecc_mailbox_msg(plat_ecc_message_type_t msg_type, void *datap)
1092 {
1093 	/*
1094 	 * find the symbol for the mailbox sender routine in the scosmb module
1095 	 */
1096 	if (ecc_mailbox_msg_func == NULL)
1097 		ecc_mailbox_msg_func = (int (*)(plat_ecc_message_type_t,
1098 		    void *))modgetsymvalue("scosmb_log_ecc_error", 0);
1099 
1100 	/*
1101 	 * If the symbol was found, call it.  Otherwise, there is not much
1102 	 * else we can do and console messages will have to suffice.
1103 	 */
1104 	if (ecc_mailbox_msg_func)
1105 		return ((*ecc_mailbox_msg_func)(msg_type, datap));
1106 	else
1107 		return (ENODEV);
1108 }
1109 
1110 int
1111 plat_make_fru_cpuid(int sb, int m, int proc)
1112 {
1113 	return (MAKE_CPUID(sb, m, proc));
1114 }
1115 
1116 /*
1117  * board number for a given proc
1118  */
1119 int
1120 plat_make_fru_boardnum(int proc)
1121 {
1122 	return (STARCAT_CPUID_TO_EXPANDER(proc));
1123 }
1124 
1125 /*
1126  * This platform hook gets called from mc_add_mem_unum_label() in the mc-us3
1127  * driver giving each platform the opportunity to add platform
1128  * specific label information to the unum for ECC error logging purposes.
1129  */
1130 void
1131 plat_add_mem_unum_label(char *unum, int mcid, int bank, int dimm)
1132 {
1133 	char	new_unum[UNUM_NAMLEN];
1134 	uint_t	expander = STARCAT_CPUID_TO_EXPANDER(mcid);
1135 	uint_t	slot = STARCAT_CPUID_TO_BOARDSLOT(mcid);
1136 
1137 	/*
1138 	 * STARCAT_CPUID_TO_PORTID clears the CoreID bit so that
1139 	 * STARCAT_CPUID_TO_AGENT will return a physical proc (0 - 3).
1140 	 */
1141 	uint_t	proc = STARCAT_CPUID_TO_AGENT(STARCAT_CPUID_TO_PORTID(mcid));
1142 
1143 	/*
1144 	 * NOTE: Any modifications to the two sprintf() calls below will
1145 	 * require changing plat_log_fruid_error() as well!
1146 	 */
1147 	if (dimm == -1)
1148 		(void) snprintf(new_unum, UNUM_NAMLEN, "%s%u/P%u/B%d %s",
1149 		    (slot ? "IO" : "SB"), expander, proc, (bank & 0x1), unum);
1150 	else
1151 		(void) snprintf(new_unum, UNUM_NAMLEN, "%s%u/P%u/B%d/D%d %s",
1152 		    (slot ? "IO" : "SB"), expander,
1153 		    proc, (bank & 0x1), (dimm & 0x3), unum);
1154 
1155 	(void) strcpy(unum, new_unum);
1156 }
1157 
1158 int
1159 plat_get_cpu_unum(int cpuid, char *buf, int buflen, int *lenp)
1160 {
1161 	int	expander = STARCAT_CPUID_TO_EXPANDER(cpuid);
1162 	int	slot = STARCAT_CPUID_TO_BOARDSLOT(cpuid);
1163 
1164 	if (snprintf(buf, buflen, "%s%d", (slot ? "IO" : "SB"), expander)
1165 	    >= buflen) {
1166 		return (ENOSPC);
1167 	} else {
1168 		*lenp = strlen(buf);
1169 		return (0);
1170 	}
1171 }
1172 
1173 /*
1174  * This routine is used by the data bearing mondo (DMV) initialization
1175  * routine to determine the number of hardware and software DMV interrupts
1176  * that a platform supports.
1177  */
1178 void
1179 plat_dmv_params(uint_t *hwint, uint_t *swint)
1180 {
1181 	*hwint = STARCAT_DMV_HWINT;
1182 	*swint = 0;
1183 }
1184 
1185 /*
1186  * If provided, this function will be called whenever the nodename is updated.
1187  * To keep OS mailbox handling localized, all we do is forward the call to the
1188  * scosmb module (if it is available).
1189  */
1190 void
1191 plat_nodename_set(void)
1192 {
1193 	void (*nodename_update_func)(uint64_t) = NULL;
1194 
1195 	/*
1196 	 * find the symbol for the nodename update routine in the scosmb module
1197 	 */
1198 	nodename_update_func = (void (*)(uint64_t))
1199 	    modgetsymvalue("scosmb_update_nodename", 0);
1200 
1201 	/*
1202 	 * If the symbol was found, call it.  Otherwise, log a note (but not to
1203 	 * the console).
1204 	 */
1205 	if (nodename_update_func != NULL) {
1206 		nodename_update_func(0);
1207 	} else {
1208 		cmn_err(CE_NOTE,
1209 		    "!plat_nodename_set: scosmb_update_nodename not found\n");
1210 	}
1211 }
1212 
1213 caddr_t	efcode_vaddr = NULL;
1214 caddr_t efcode_paddr = NULL;
1215 /*
1216  * Preallocate enough memory for fcode claims.
1217  */
1218 
1219 caddr_t
1220 efcode_alloc(caddr_t alloc_base)
1221 {
1222 	caddr_t efcode_alloc_base = (caddr_t)roundup((uintptr_t)alloc_base,
1223 	    MMU_PAGESIZE);
1224 	caddr_t vaddr;
1225 
1226 	/*
1227 	 * allocate the physical memory schizo fcode.
1228 	 */
1229 	if ((vaddr = (caddr_t)BOP_ALLOC(bootops, efcode_alloc_base,
1230 	    efcode_size, MMU_PAGESIZE)) == NULL)
1231 		cmn_err(CE_PANIC, "Cannot allocate Efcode Memory");
1232 
1233 	efcode_vaddr = vaddr;
1234 
1235 	return (efcode_alloc_base + efcode_size);
1236 }
1237 
1238 caddr_t
1239 plat_startup_memlist(caddr_t alloc_base)
1240 {
1241 	caddr_t tmp_alloc_base;
1242 
1243 	tmp_alloc_base = efcode_alloc(alloc_base);
1244 	tmp_alloc_base = (caddr_t)roundup((uintptr_t)tmp_alloc_base,
1245 	    ecache_alignsize);
1246 	return (tmp_alloc_base);
1247 }
1248 
1249 /*
1250  * This is a helper function to determine if a given
1251  * node should be considered for a dr operation according
1252  * to predefined dr names. This is accomplished using
1253  * a function defined in drmach module. The drmach module
1254  * owns the definition of dr allowable names.
1255  * Formal Parameter: The name of a device node.
1256  * Expected Return Value: -1, device node name does not map to a valid dr name.
1257  *               A value greater or equal to 0, name is valid.
1258  */
1259 int
1260 starcat_dr_name(char *name)
1261 {
1262 	int (*drmach_name2type)(char *) = NULL;
1263 
1264 	/* Get a pointer to helper function in the dramch module. */
1265 	drmach_name2type =
1266 	    (int (*)(char *))kobj_getsymvalue("drmach_name2type_idx", 0);
1267 
1268 	if (drmach_name2type == NULL)
1269 		return (-1);
1270 
1271 	return ((*drmach_name2type)(name));
1272 }
1273 
1274 void
1275 startup_platform(void)
1276 {
1277 	/* set per platform constants for mutex backoff */
1278 	mutex_backoff_base = 2;
1279 	mutex_cap_factor = 64;
1280 }
1281 
1282 /*
1283  * KDI functions - used by the in-situ kernel debugger (kmdb) to perform
1284  * platform-specific operations.  These functions execute when the world is
1285  * stopped, and as such cannot make any blocking calls, hold locks, etc.
1286  * promif functions are a special case, and may be used.
1287  */
1288 
1289 static void
1290 starcat_system_claim(void)
1291 {
1292 	lbolt_debug_entry();
1293 
1294 	prom_interpret("sigb-sig! my-sigb-sig!", OBP_SIG, OBP_SIG, 0, 0, 0);
1295 }
1296 
1297 static void
1298 starcat_system_release(void)
1299 {
1300 	prom_interpret("sigb-sig! my-sigb-sig!", OS_SIG, OS_SIG, 0, 0, 0);
1301 
1302 	lbolt_debug_return();
1303 }
1304 
1305 void
1306 plat_kdi_init(kdi_t *kdi)
1307 {
1308 	kdi->pkdi_system_claim = starcat_system_claim;
1309 	kdi->pkdi_system_release = starcat_system_release;
1310 }
1311 
1312 /*
1313  * This function returns 1 if large pages for kernel heap are supported
1314  * and 0 otherwise.
1315  *
1316  * Currently we disable lp kmem support if kpr is going to be enabled
1317  * because in the case of large pages hat_add_callback()/hat_delete_callback()
1318  * cause network performance degradation
1319  */
1320 int
1321 plat_lpkmem_is_supported(void)
1322 {
1323 	extern int segkmem_reloc;
1324 
1325 	if (kernel_cage_enable && (ncpunode >= 32 || segkmem_reloc == 1))
1326 		return (0);
1327 
1328 	return (1);
1329 }
1330