xref: /titanic_52/usr/src/uts/sun4u/starcat/os/starcat.c (revision a83cadce5d3331b64803bfc641036cec23602c74)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 #include <sys/param.h>
29 #include <sys/systm.h>
30 #include <sys/sysmacros.h>
31 #include <sys/sunddi.h>
32 #include <sys/esunddi.h>
33 #include <sys/sunndi.h>
34 #include <sys/modctl.h>
35 #include <sys/promif.h>
36 #include <sys/machparam.h>
37 #include <sys/kobj.h>
38 #include <sys/cpuvar.h>
39 #include <sys/mem_cage.h>
40 #include <sys/promif.h>
41 #include <sys/promimpl.h>
42 #include <sys/platform_module.h>
43 #include <sys/errno.h>
44 #include <sys/cpu_sgnblk_defs.h>
45 #include <sys/iosramio.h>
46 #include <sys/domaind.h>
47 #include <sys/starcat.h>
48 #include <sys/machsystm.h>
49 #include <sys/bootconf.h>
50 #include <sys/memnode.h>
51 #include <vm/vm_dep.h>
52 #include <vm/page.h>
53 #include <sys/cheetahregs.h>
54 #include <sys/plat_ecc_unum.h>
55 #include <sys/plat_ecc_dimm.h>
56 #include <sys/lgrp.h>
57 #include <sys/dr.h>
58 #include <sys/post/scat_dcd.h>
59 #include <sys/kdi_impl.h>
60 #include <sys/iosramreg.h>
61 #include <sys/iosramvar.h>
62 #include <sys/mc-us3.h>
63 
64 /* Preallocation of spare tsb's for DR */
65 int starcat_tsb_spares = STARCAT_SPARE_TSB_MAX;
66 
67 /* Set the maximum number of slot0 + slot1 boards. .. for DR */
68 int starcat_boards = STARCAT_BDSET_MAX * STARCAT_BDSET_SLOT_MAX;
69 
70 /* Maximum number of cpus per board... for DR */
71 int starcat_cpu_per_board = MAX(STARCAT_SLOT0_CPU_MAX, STARCAT_SLOT1_CPU_MAX);
72 
73 /* Maximum number of mem-units per board... for DR */
74 int starcat_mem_per_board = MAX(STARCAT_SLOT0_MEM_MAX, STARCAT_SLOT1_MEM_MAX);
75 
76 /* Maximum number of io-units (buses) per board... for DR */
77 int starcat_io_per_board = 2 * MAX(STARCAT_SLOT0_IO_MAX, STARCAT_SLOT1_IO_MAX);
78 
79 /* Preferred minimum cage size (expressed in pages)... for DR */
80 pgcnt_t starcat_startup_cage_size = 0;
81 
82 /* Platform specific function to get unum information */
83 int (*p2get_mem_unum)(int, uint64_t, char *, int, int *);
84 
85 /* Memory for fcode claims.  16k times # maximum possible schizos */
86 #define	EFCODE_SIZE	(STARCAT_BDSET_MAX * 4 * 0x4000)
87 int efcode_size = EFCODE_SIZE;
88 
89 void sgn_update_all_cpus(ushort_t, uchar_t, uchar_t);
90 
91 /*
92  * The IOSRAM driver is loaded in load_platform_drivers() any cpu signature
93  * usage prior to that time will have not have a function to call.
94  */
95 static int (*iosram_rdp)(uint32_t key, uint32_t off, uint32_t len,
96 	    caddr_t dptr) = prom_starcat_iosram_read;
97 static int (*iosram_wrp)(uint32_t key, uint32_t off, uint32_t len,
98 	    caddr_t dptr) = prom_starcat_iosram_write;
99 
100 plat_dimm_sid_board_t	domain_dimm_sids[STARCAT_BDSET_MAX];
101 
102 /*
103  * set_platform_max_ncpus should return the maximum number of CPUs that the
104  * platform supports.  This function is called from check_cpus() to set the
105  * value of max_ncpus [see PSARC 1997/165 CPU Dynamic Reconfiguration].
106  * Data elements which are allocated based upon max_ncpus are all accessed
107  * via cpu_seqid and not physical IDs.  Previously, the value of max_ncpus
108  * was being set to the largest physical ID, which led to boot problems on
109  * systems with less than 1.25GB of memory.
110  */
111 
112 int
113 set_platform_max_ncpus(void)
114 {
115 	int n;
116 
117 	/*
118 	 * Convert number of slot0 + slot1 boards to number of expander brds
119 	 * and constrain the value to an architecturally plausible range
120 	 */
121 	n = MAX(starcat_boards, STARCAT_BDSET_MIN * STARCAT_BDSET_SLOT_MAX);
122 	n = MIN(n, STARCAT_BDSET_MAX * STARCAT_BDSET_SLOT_MAX);
123 	n = (n + STARCAT_BDSET_SLOT_MAX - 1) / STARCAT_BDSET_SLOT_MAX;
124 
125 	/* return maximum number of cpus possible on N expander boards */
126 	return (n * STARCAT_BDSET_CPU_MAX - STARCAT_SLOT1_CPU_MAX);
127 }
128 
129 int
130 set_platform_tsb_spares()
131 {
132 	return (MIN(starcat_tsb_spares, MAX_UPA));
133 }
134 
135 #pragma weak mmu_init_large_pages
136 
137 void
138 set_platform_defaults(void)
139 {
140 	extern char *tod_module_name;
141 	extern int ts_dispatch_extended;
142 	extern void cpu_sgn_update(ushort_t, uchar_t, uchar_t, int);
143 	extern int tsb_lgrp_affinity;
144 	extern int segkmem_reloc;
145 	extern void mmu_init_large_pages(size_t);
146 	extern int ncpunode;	/* number of CPUs detected by OBP */
147 
148 #ifdef DEBUG
149 	ce_verbose_memory = 2;
150 	ce_verbose_other = 2;
151 #endif
152 
153 	/* Set the CPU signature function pointer */
154 	cpu_sgn_func = cpu_sgn_update;
155 
156 	/* Set appropriate tod module for starcat */
157 	ASSERT(tod_module_name == NULL);
158 	tod_module_name = "todstarcat";
159 
160 	/*
161 	 * Use the alternate TS dispatch table, which is better
162 	 * tuned for large servers.
163 	 */
164 	if (ts_dispatch_extended == -1)
165 		ts_dispatch_extended = 1;
166 
167 	/*
168 	 * Use lgroup-aware TSB allocations on this platform,
169 	 * since they are a considerable performance win.
170 	 */
171 	tsb_lgrp_affinity = 1;
172 
173 	if ((mmu_page_sizes == max_mmu_page_sizes) &&
174 	    (mmu_ism_pagesize != DEFAULT_ISM_PAGESIZE)) {
175 		if (&mmu_init_large_pages)
176 			mmu_init_large_pages(mmu_ism_pagesize);
177 	}
178 
179 	/*
180 	 * KPR (kernel page relocation) is supported on this platform.
181 	 */
182 	if (hat_kpr_enabled && kernel_cage_enable && ncpunode >= 32) {
183 		segkmem_reloc = 1;
184 		cmn_err(CE_NOTE, "!Kernel Page Relocation is ENABLED");
185 	} else {
186 		cmn_err(CE_NOTE, "!Kernel Page Relocation is DISABLED");
187 	}
188 }
189 
190 #ifdef DEBUG
191 pgcnt_t starcat_cage_size_limit;
192 #endif
193 
194 void
195 set_platform_cage_params(void)
196 {
197 	extern pgcnt_t total_pages;
198 	extern struct memlist *phys_avail;
199 
200 	if (kernel_cage_enable) {
201 		pgcnt_t preferred_cage_size;
202 
203 		preferred_cage_size =
204 			MAX(starcat_startup_cage_size, total_pages / 256);
205 
206 #ifdef DEBUG
207 		if (starcat_cage_size_limit)
208 			preferred_cage_size = starcat_cage_size_limit;
209 #endif
210 		/*
211 		 * Note: we are assuming that post has load the
212 		 * whole show in to the high end of memory. Having
213 		 * taken this leap, we copy the whole of phys_avail
214 		 * the glist and arrange for the cage to grow
215 		 * downward (descending pfns).
216 		 */
217 		kcage_range_init(phys_avail, KCAGE_DOWN, preferred_cage_size);
218 	}
219 
220 	if (kcage_on)
221 		cmn_err(CE_NOTE, "!DR Kernel Cage is ENABLED");
222 	else
223 		cmn_err(CE_NOTE, "!DR Kernel Cage is DISABLED");
224 }
225 
226 void
227 load_platform_modules(void)
228 {
229 	if (modload("misc", "pcihp") < 0) {
230 		cmn_err(CE_NOTE, "pcihp driver failed to load");
231 	}
232 }
233 
234 /*
235  * Starcat does not support power control of CPUs from the OS.
236  */
237 /*ARGSUSED*/
238 int
239 plat_cpu_poweron(struct cpu *cp)
240 {
241 	int (*starcat_cpu_poweron)(struct cpu *) = NULL;
242 
243 	starcat_cpu_poweron =
244 		(int (*)(struct cpu *))kobj_getsymvalue(
245 			"drmach_cpu_poweron", 0);
246 
247 	if (starcat_cpu_poweron == NULL)
248 		return (ENOTSUP);
249 	else
250 		return ((starcat_cpu_poweron)(cp));
251 }
252 
253 /*ARGSUSED*/
254 int
255 plat_cpu_poweroff(struct cpu *cp)
256 {
257 	int (*starcat_cpu_poweroff)(struct cpu *) = NULL;
258 
259 	starcat_cpu_poweroff =
260 		(int (*)(struct cpu *))kobj_getsymvalue(
261 			"drmach_cpu_poweroff", 0);
262 
263 	if (starcat_cpu_poweroff == NULL)
264 		return (ENOTSUP);
265 	else
266 		return ((starcat_cpu_poweroff)(cp));
267 }
268 
269 /*
270  * The following are currently private to Starcat DR
271  */
272 int
273 plat_max_boards()
274 {
275 	return (starcat_boards);
276 }
277 
278 int
279 plat_max_cpu_units_per_board()
280 {
281 	return (starcat_cpu_per_board);
282 }
283 
284 int
285 plat_max_mc_units_per_board()
286 {
287 	return (starcat_mem_per_board); /* each CPU has a memory controller */
288 }
289 
290 int
291 plat_max_mem_units_per_board()
292 {
293 	return (starcat_mem_per_board);
294 }
295 
296 int
297 plat_max_io_units_per_board()
298 {
299 	return (starcat_io_per_board);
300 }
301 
302 int
303 plat_max_cpumem_boards(void)
304 {
305 	return (STARCAT_BDSET_MAX);
306 }
307 
308 int
309 plat_pfn_to_mem_node(pfn_t pfn)
310 {
311 	return (pfn >> mem_node_pfn_shift);
312 }
313 
314 #define	STARCAT_MC_MEMBOARD_SHIFT 37	/* Boards on 128BG boundary */
315 
316 /* ARGSUSED */
317 void
318 plat_build_mem_nodes(u_longlong_t *list, size_t nelems)
319 {
320 	size_t	elem;
321 	pfn_t	basepfn;
322 	pgcnt_t	npgs;
323 
324 	/*
325 	 * Starcat mem slices are always aligned on a 128GB boundary,
326 	 * fixed, and limited to one slice per expander due to design
327 	 * of the centerplane ASICs.
328 	 */
329 	mem_node_pfn_shift = STARCAT_MC_MEMBOARD_SHIFT - MMU_PAGESHIFT;
330 	mem_node_physalign = 0;
331 
332 	/*
333 	 * Boot install lists are arranged <addr, len>, <addr, len>, ...
334 	 */
335 	for (elem = 0; elem < nelems; elem += 2) {
336 		basepfn = btop(list[elem]);
337 		npgs = btop(list[elem+1]);
338 		mem_node_add_slice(basepfn, basepfn + npgs - 1);
339 	}
340 }
341 
342 /*
343  * Find the CPU associated with a slice at boot-time.
344  */
345 void
346 plat_fill_mc(pnode_t nodeid)
347 {
348 	int		len;
349 	uint64_t	mc_addr, mask;
350 	uint64_t	mc_decode[MAX_BANKS_PER_MC];
351 	uint32_t	regs[4];
352 	int		local_mc;
353 	int		portid;
354 	int		expnum;
355 	int		i;
356 
357 	/*
358 	 * Memory address decoding registers
359 	 * (see Chap 9 of SPARCV9 JSP-1 US-III implementation)
360 	 */
361 	const uint64_t	mc_decode_addr[MAX_BANKS_PER_MC] = {
362 		0x400028, 0x400010, 0x400018, 0x400020
363 	};
364 
365 	/*
366 	 * Starcat memory controller portid == global CPU id
367 	 */
368 	if ((prom_getprop(nodeid, "portid", (caddr_t)&portid) < 0) ||
369 		(portid == -1))
370 		return;
371 
372 	expnum = STARCAT_CPUID_TO_EXPANDER(portid);
373 
374 	/*
375 	 * The "reg" property returns 4 32-bit values. The first two are
376 	 * combined to form a 64-bit address.  The second two are for a
377 	 * 64-bit size, but we don't actually need to look at that value.
378 	 */
379 	len = prom_getproplen(nodeid, "reg");
380 	if (len != (sizeof (uint32_t) * 4)) {
381 		prom_printf("Warning: malformed 'reg' property\n");
382 		return;
383 	}
384 	if (prom_getprop(nodeid, "reg", (caddr_t)regs) < 0)
385 		return;
386 	mc_addr = ((uint64_t)regs[0]) << 32;
387 	mc_addr |= (uint64_t)regs[1];
388 
389 	/*
390 	 * Figure out whether the memory controller we are examining
391 	 * belongs to this CPU/CMP or a different one.
392 	 */
393 	if (portid == cpunodes[CPU->cpu_id].portid)
394 		local_mc = 1;
395 	else
396 		local_mc = 0;
397 
398 	for (i = 0; i < MAX_BANKS_PER_MC; i++) {
399 
400 		mask = mc_decode_addr[i];
401 
402 		/*
403 		 * If the memory controller is local to this CPU, we use
404 		 * the special ASI to read the decode registers.
405 		 * Otherwise, we load the values from a magic address in
406 		 * I/O space.
407 		 */
408 		if (local_mc)
409 			mc_decode[i] = lddmcdecode(mask & MC_OFFSET_MASK);
410 		else
411 			mc_decode[i] = lddphysio((mc_addr | mask));
412 
413 		if (mc_decode[i] >> MC_VALID_SHIFT) {
414 			uint64_t base = MC_BASE(mc_decode[i]) << PHYS2UM_SHIFT;
415 			int sliceid = (base >> STARCAT_MC_MEMBOARD_SHIFT);
416 
417 			if (sliceid < max_mem_nodes) {
418 				/*
419 				 * Establish start-of-day mappings of
420 				 * lgroup platform handles to memnodes.
421 				 * Handle == Expander Number
422 				 * Memnode == Fixed 128GB Slice
423 				 */
424 				plat_assign_lgrphand_to_mem_node(expnum,
425 				    sliceid);
426 			}
427 		}
428 	}
429 }
430 
431 /*
432  * Starcat support for lgroups.
433  *
434  * On Starcat, an lgroup platform handle == expander number.
435  * For split-slot configurations (e.g. slot 0 and slot 1 boards
436  * in different domains) an MCPU board has only remote memory.
437  *
438  * The centerplane logic provides fixed 128GB memory slices
439  * each of which map to a memnode.  The initial mapping of
440  * memnodes to lgroup handles is determined at boot time.
441  * A DR addition of memory adds a new mapping. A DR copy-rename
442  * swaps mappings.
443  */
444 
445 /*
446  * Convert board number to expander number.
447  */
448 #define	BOARDNUM_2_EXPANDER(b)	(b >> 1)
449 
450 /*
451  * Return the number of boards configured with NULL LPA.
452  */
453 static int
454 check_for_null_lpa(void)
455 {
456 	gdcd_t	*gdcd;
457 	uint_t	exp, nlpa;
458 
459 	/*
460 	 * Read GDCD from IOSRAM.
461 	 * If this fails indicate a NULL LPA condition.
462 	 */
463 	if ((gdcd = kmem_zalloc(sizeof (gdcd_t), KM_NOSLEEP)) == NULL)
464 		return (EXP_COUNT+1);
465 
466 	if ((*iosram_rdp)(GDCD_MAGIC, 0, sizeof (gdcd_t), (caddr_t)gdcd) ||
467 	    (gdcd->h.dcd_magic != GDCD_MAGIC) ||
468 	    (gdcd->h.dcd_version != DCD_VERSION)) {
469 		kmem_free(gdcd, sizeof (gdcd_t));
470 		cmn_err(CE_WARN, "check_for_null_lpa: failed to access GDCD\n");
471 		return (EXP_COUNT+2);
472 	}
473 
474 	/*
475 	 * Check for NULL LPAs on all slot 0 boards in domain
476 	 * (i.e. in all expanders marked good for this domain).
477 	 */
478 	nlpa = 0;
479 	for (exp = 0; exp < EXP_COUNT; exp++) {
480 		if (RSV_GOOD(gdcd->dcd_slot[exp][0].l1ss_rsv) &&
481 		    (gdcd->dcd_slot[exp][0].l1ss_flags &
482 		    L1SSFLG_THIS_L1_NULL_PROC_LPA))
483 			nlpa++;
484 	}
485 
486 	kmem_free(gdcd, sizeof (gdcd_t));
487 	return (nlpa);
488 }
489 
490 /*
491  * Return the platform handle for the lgroup containing the given CPU
492  *
493  * For Starcat, lgroup platform handle == expander.
494  */
495 
496 extern int mpo_disabled;
497 extern lgrp_handle_t lgrp_default_handle;
498 int null_lpa_boards = -1;
499 
500 lgrp_handle_t
501 plat_lgrp_cpu_to_hand(processorid_t id)
502 {
503 	lgrp_handle_t		plathand;
504 
505 	plathand = STARCAT_CPUID_TO_EXPANDER(id);
506 
507 	/*
508 	 * Return the real platform handle for the CPU until
509 	 * such time as we know that MPO should be disabled.
510 	 * At that point, we set the "mpo_disabled" flag to true,
511 	 * and from that point on, return the default handle.
512 	 *
513 	 * By the time we know that MPO should be disabled, the
514 	 * first CPU will have already been added to a leaf
515 	 * lgroup, but that's ok. The common lgroup code will
516 	 * double check that the boot CPU is in the correct place,
517 	 * and in the case where mpo should be disabled, will move
518 	 * it to the root if necessary.
519 	 */
520 	if (mpo_disabled) {
521 		/* If MPO is disabled, return the default (UMA) handle */
522 		plathand = lgrp_default_handle;
523 	} else {
524 		if (null_lpa_boards > 0) {
525 			/* Determine if MPO should be disabled */
526 			mpo_disabled = 1;
527 			plathand = lgrp_default_handle;
528 		}
529 	}
530 	return (plathand);
531 }
532 
533 /*
534  * Platform specific lgroup initialization
535  */
536 void
537 plat_lgrp_init(void)
538 {
539 	extern uint32_t lgrp_expand_proc_thresh;
540 	extern uint32_t lgrp_expand_proc_diff;
541 
542 	/*
543 	 * Set tuneables for Starcat architecture
544 	 *
545 	 * lgrp_expand_proc_thresh is the minimum load on the lgroups
546 	 * this process is currently running on before considering
547 	 * expanding threads to another lgroup.
548 	 *
549 	 * lgrp_expand_proc_diff determines how much less the remote lgroup
550 	 * must be loaded before expanding to it.
551 	 *
552 	 * Since remote latencies can be costly, attempt to keep 3 threads
553 	 * within the same lgroup before expanding to the next lgroup.
554 	 */
555 	lgrp_expand_proc_thresh = LGRP_LOADAVG_THREAD_MAX * 3;
556 	lgrp_expand_proc_diff = LGRP_LOADAVG_THREAD_MAX;
557 }
558 
559 /*
560  * Platform notification of lgroup (re)configuration changes
561  */
562 /*ARGSUSED*/
563 void
564 plat_lgrp_config(lgrp_config_flag_t evt, uintptr_t arg)
565 {
566 	update_membounds_t	*umb;
567 	lgrp_config_mem_rename_t lmr;
568 	int			sbd, tbd;
569 	lgrp_handle_t		hand, shand, thand;
570 	int			mnode, snode, tnode;
571 
572 	if (mpo_disabled)
573 		return;
574 
575 	switch (evt) {
576 
577 	case LGRP_CONFIG_MEM_ADD:
578 		/*
579 		 * Establish the lgroup handle to memnode translation.
580 		 */
581 		umb = (update_membounds_t *)arg;
582 
583 		hand = BOARDNUM_2_EXPANDER(umb->u_board);
584 		mnode = plat_pfn_to_mem_node(umb->u_base >> MMU_PAGESHIFT);
585 		plat_assign_lgrphand_to_mem_node(hand, mnode);
586 
587 		break;
588 
589 	case LGRP_CONFIG_MEM_DEL:
590 		/* We don't have to do anything */
591 
592 		break;
593 
594 	case LGRP_CONFIG_MEM_RENAME:
595 		/*
596 		 * During a DR copy-rename operation, all of the memory
597 		 * on one board is moved to another board -- but the
598 		 * addresses/pfns and memnodes don't change. This means
599 		 * the memory has changed locations without changing identity.
600 		 *
601 		 * Source is where we are copying from and target is where we
602 		 * are copying to.  After source memnode is copied to target
603 		 * memnode, the physical addresses of the target memnode are
604 		 * renamed to match what the source memnode had.  Then target
605 		 * memnode can be removed and source memnode can take its
606 		 * place.
607 		 *
608 		 * To do this, swap the lgroup handle to memnode mappings for
609 		 * the boards, so target lgroup will have source memnode and
610 		 * source lgroup will have empty target memnode which is where
611 		 * its memory will go (if any is added to it later).
612 		 *
613 		 * Then source memnode needs to be removed from its lgroup
614 		 * and added to the target lgroup where the memory was living
615 		 * but under a different name/memnode.  The memory was in the
616 		 * target memnode and now lives in the source memnode with
617 		 * different physical addresses even though it is the same
618 		 * memory.
619 		 */
620 		sbd = arg & 0xffff;
621 		tbd = (arg & 0xffff0000) >> 16;
622 		shand = BOARDNUM_2_EXPANDER(sbd);
623 		thand = BOARDNUM_2_EXPANDER(tbd);
624 		snode = plat_lgrphand_to_mem_node(shand);
625 		tnode = plat_lgrphand_to_mem_node(thand);
626 
627 		plat_assign_lgrphand_to_mem_node(thand, snode);
628 		plat_assign_lgrphand_to_mem_node(shand, tnode);
629 
630 		lmr.lmem_rename_from = shand;
631 		lmr.lmem_rename_to = thand;
632 
633 		/*
634 		 * Remove source memnode of copy rename from its lgroup
635 		 * and add it to its new target lgroup
636 		 */
637 		lgrp_config(LGRP_CONFIG_MEM_RENAME, (uintptr_t)snode,
638 		    (uintptr_t)&lmr);
639 
640 		break;
641 
642 	default:
643 		break;
644 	}
645 }
646 
647 /*
648  * Return latency between "from" and "to" lgroups
649  *
650  * This latency number can only be used for relative comparison
651  * between lgroups on the running system, cannot be used across platforms,
652  * and may not reflect the actual latency.  It is platform and implementation
653  * specific, so platform gets to decide its value.  It would be nice if the
654  * number was at least proportional to make comparisons more meaningful though.
655  * NOTE: The numbers below are supposed to be load latencies for uncached
656  * memory divided by 10.
657  */
658 int
659 plat_lgrp_latency(lgrp_handle_t from, lgrp_handle_t to)
660 {
661 	/*
662 	 * Return min remote latency when there are more than two lgroups
663 	 * (root and child) and getting latency between two different lgroups
664 	 * or root is involved
665 	 */
666 	if (lgrp_optimizations() && (from != to ||
667 	    from == LGRP_DEFAULT_HANDLE || to == LGRP_DEFAULT_HANDLE))
668 		return (48);
669 	else
670 		return (28);
671 }
672 
673 /*
674  * Return platform handle for root lgroup
675  */
676 lgrp_handle_t
677 plat_lgrp_root_hand(void)
678 {
679 	if (mpo_disabled)
680 		return (lgrp_default_handle);
681 
682 	return (LGRP_DEFAULT_HANDLE);
683 }
684 
685 /* ARGSUSED */
686 void
687 plat_freelist_process(int mnode)
688 {
689 }
690 
691 void
692 load_platform_drivers(void)
693 {
694 	uint_t		tunnel;
695 	pnode_t		nodeid;
696 	dev_info_t	*chosen_devi;
697 	char		chosen_iosram[MAXNAMELEN];
698 
699 	/*
700 	 * Get /chosen node - that's where the tunnel property is
701 	 */
702 	nodeid = prom_chosennode();
703 
704 	/*
705 	 * Get the iosram property from the chosen node.
706 	 */
707 	if (prom_getprop(nodeid, IOSRAM_CHOSEN_PROP, (caddr_t)&tunnel) <= 0) {
708 		prom_printf("Unable to get iosram property\n");
709 		cmn_err(CE_PANIC, "Unable to get iosram property\n");
710 	}
711 
712 	if (prom_phandle_to_path((phandle_t)tunnel, chosen_iosram,
713 			sizeof (chosen_iosram)) < 0) {
714 		(void) prom_printf("prom_phandle_to_path(0x%x) failed\n",
715 				tunnel);
716 		cmn_err(CE_PANIC, "prom_phandle_to_path(0x%x) failed\n",
717 				tunnel);
718 	}
719 
720 	/*
721 	 * Attach all driver instances along the iosram's device path
722 	 */
723 	if (i_ddi_attach_hw_nodes("iosram") != DDI_SUCCESS) {
724 		cmn_err(CE_WARN, "IOSRAM failed to load\n");
725 	}
726 
727 	if ((chosen_devi = e_ddi_hold_devi_by_path(chosen_iosram, 0)) == NULL) {
728 		(void) prom_printf("e_ddi_hold_devi_by_path(%s) failed\n",
729 			chosen_iosram);
730 		cmn_err(CE_PANIC, "e_ddi_hold_devi_by_path(%s) failed\n",
731 			chosen_iosram);
732 	}
733 	ndi_rele_devi(chosen_devi);
734 
735 	/*
736 	 * iosram driver is now loaded so we need to set our read and
737 	 * write pointers.
738 	 */
739 	iosram_rdp = (int (*)(uint32_t, uint32_t, uint32_t, caddr_t))
740 			modgetsymvalue("iosram_rd", 0);
741 	iosram_wrp = (int (*)(uint32_t, uint32_t, uint32_t, caddr_t))
742 			modgetsymvalue("iosram_wr", 0);
743 
744 	/*
745 	 * Need to check for null proc LPA after IOSRAM driver is loaded
746 	 * and before multiple lgroups created (when start_other_cpus() called)
747 	 */
748 	null_lpa_boards = check_for_null_lpa();
749 
750 	/* load and attach the axq driver */
751 	if (i_ddi_attach_hw_nodes("axq") != DDI_SUCCESS) {
752 		cmn_err(CE_WARN, "AXQ failed to load\n");
753 	}
754 
755 	/* load Starcat Solaris Mailbox Client driver */
756 	if (modload("misc", "scosmb") < 0) {
757 		cmn_err(CE_WARN, "SCOSMB failed to load\n");
758 	}
759 
760 	/* load the DR driver */
761 	if (i_ddi_attach_hw_nodes("dr") != DDI_SUCCESS) {
762 		cmn_err(CE_WARN, "dr failed to load");
763 	}
764 
765 	/*
766 	 * Load the mc-us3 memory driver.
767 	 */
768 	if (i_ddi_attach_hw_nodes("mc-us3") != DDI_SUCCESS)
769 		cmn_err(CE_WARN, "mc-us3 failed to load");
770 	else
771 		(void) ddi_hold_driver(ddi_name_to_major("mc-us3"));
772 
773 	/* Load the schizo pci bus nexus driver. */
774 	if (i_ddi_attach_hw_nodes("pcisch") != DDI_SUCCESS)
775 		cmn_err(CE_WARN, "pcisch failed to load");
776 
777 	plat_ecc_init();
778 }
779 
780 
781 /*
782  * No platform drivers on this platform
783  */
784 char *platform_module_list[] = {
785 	(char *)0
786 };
787 
788 
789 /*ARGSUSED*/
790 void
791 plat_tod_fault(enum tod_fault_type tod_bad)
792 {
793 }
794 
795 /*
796  * Update the signature(s) in the IOSRAM's domain data section.
797  */
798 void
799 cpu_sgn_update(ushort_t sgn, uchar_t state, uchar_t sub_state, int cpuid)
800 {
801 	sig_state_t new_sgn;
802 	sig_state_t current_sgn;
803 
804 	/*
805 	 * If the substate is REBOOT, then check for panic flow
806 	 */
807 	if (sub_state == SIGSUBST_REBOOT) {
808 		(*iosram_rdp)(DOMD_MAGIC, DOMD_DSTATE_OFFSET,
809 		    sizeof (sig_state_t), (caddr_t)&current_sgn);
810 		if (current_sgn.state_t.state == SIGST_EXIT)
811 			sub_state = SIGSUBST_PANIC_REBOOT;
812 	}
813 
814 	/*
815 	 * cpuid == -1 indicates that the operation applies to all cpus.
816 	 */
817 	if (cpuid < 0) {
818 		sgn_update_all_cpus(sgn, state, sub_state);
819 		return;
820 	}
821 
822 	new_sgn.signature = CPU_SIG_BLD(sgn, state, sub_state);
823 	(*iosram_wrp)(DOMD_MAGIC,
824 	    DOMD_CPUSIGS_OFFSET + cpuid * sizeof (sig_state_t),
825 	    sizeof (sig_state_t), (caddr_t)&new_sgn);
826 
827 	/*
828 	 * Under certain conditions we don't update the signature
829 	 * of the domain_state.
830 	 */
831 	if ((sgn == OS_SIG) &&
832 	    ((state == SIGST_OFFLINE) || (state == SIGST_DETACHED)))
833 		return;
834 	(*iosram_wrp)(DOMD_MAGIC, DOMD_DSTATE_OFFSET, sizeof (sig_state_t),
835 	    (caddr_t)&new_sgn);
836 }
837 
838 /*
839  * Update the signature(s) in the IOSRAM's domain data section for all CPUs.
840  */
841 void
842 sgn_update_all_cpus(ushort_t sgn, uchar_t state, uchar_t sub_state)
843 {
844 	sig_state_t new_sgn;
845 	int i = 0;
846 
847 	new_sgn.signature = CPU_SIG_BLD(sgn, state, sub_state);
848 
849 	/*
850 	 * First update the domain_state signature
851 	 */
852 	(*iosram_wrp)(DOMD_MAGIC, DOMD_DSTATE_OFFSET, sizeof (sig_state_t),
853 	    (caddr_t)&new_sgn);
854 
855 	for (i = 0; i < NCPU; i++) {
856 		if (cpu[i] != NULL && (cpu[i]->cpu_flags &
857 		    (CPU_EXISTS|CPU_QUIESCED))) {
858 			(*iosram_wrp)(DOMD_MAGIC,
859 			    DOMD_CPUSIGS_OFFSET + i * sizeof (sig_state_t),
860 			    sizeof (sig_state_t), (caddr_t)&new_sgn);
861 		}
862 	}
863 }
864 
865 ushort_t
866 get_cpu_sgn(int cpuid)
867 {
868 	sig_state_t cpu_sgn;
869 
870 	(*iosram_rdp)(DOMD_MAGIC,
871 	    DOMD_CPUSIGS_OFFSET + cpuid * sizeof (sig_state_t),
872 	    sizeof (sig_state_t), (caddr_t)&cpu_sgn);
873 
874 	return (cpu_sgn.state_t.sig);
875 }
876 
877 uchar_t
878 get_cpu_sgn_state(int cpuid)
879 {
880 	sig_state_t cpu_sgn;
881 
882 	(*iosram_rdp)(DOMD_MAGIC,
883 	    DOMD_CPUSIGS_OFFSET + cpuid * sizeof (sig_state_t),
884 	    sizeof (sig_state_t), (caddr_t)&cpu_sgn);
885 
886 	return (cpu_sgn.state_t.state);
887 }
888 
889 
890 /*
891  * Type of argument passed into plat_get_ecache_cpu via ddi_walk_devs
892  * for matching on specific CPU node in device tree
893  */
894 
895 typedef struct {
896 	char		*jnum;	/* output, kmem_alloc'd	if successful */
897 	int		cpuid;	/* input, to match cpuid/portid/upa-portid */
898 	uint_t		dimm;	/* input, index into ecache-dimm-label */
899 } plat_ecache_cpu_arg_t;
900 
901 
902 /*
903  * plat_get_ecache_cpu is called repeatedly by ddi_walk_devs with pointers
904  * to device tree nodes (dip) and to a plat_ecache_cpu_arg_t structure (arg).
905  * Returning DDI_WALK_CONTINUE tells ddi_walk_devs to keep going, returning
906  * DDI_WALK_TERMINATE ends the walk.  When the node for the specific CPU
907  * being searched for is found, the walk is done.  But before returning to
908  * ddi_walk_devs and plat_get_ecacheunum, we grab this CPU's ecache-dimm-label
909  * property and set the jnum member of the plat_ecache_cpu_arg_t structure to
910  * point to the label corresponding to this specific ecache DIMM.  It is up
911  * to plat_get_ecacheunum to kmem_free this string.
912  */
913 
914 static int
915 plat_get_ecache_cpu(dev_info_t *dip, void *arg)
916 {
917 	char			*devtype;
918 	plat_ecache_cpu_arg_t	*cpuarg;
919 	char			**dimm_labels;
920 	uint_t			numlabels;
921 	int			portid;
922 
923 	/*
924 	 * Check device_type, must be "cpu"
925 	 */
926 
927 	if (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
928 						"device_type", &devtype)
929 				!= DDI_PROP_SUCCESS)
930 		return (DDI_WALK_CONTINUE);
931 
932 	if (strcmp(devtype, "cpu")) {
933 		ddi_prop_free((void *)devtype);
934 		return (DDI_WALK_CONTINUE);
935 	}
936 
937 	ddi_prop_free((void *)devtype);
938 
939 	/*
940 	 * Check cpuid, portid, upa-portid (in that order), must
941 	 * match the cpuid being sought
942 	 */
943 
944 	portid = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
945 				DDI_PROP_DONTPASS, "cpuid", -1);
946 
947 	if (portid == -1)
948 		portid = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
949 				DDI_PROP_DONTPASS, "portid", -1);
950 
951 	if (portid == -1)
952 		portid = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
953 				DDI_PROP_DONTPASS, "upa-portid", -1);
954 
955 	cpuarg = (plat_ecache_cpu_arg_t *)arg;
956 
957 	if (portid != cpuarg->cpuid)
958 		return (DDI_WALK_CONTINUE);
959 
960 	/*
961 	 * Found the right CPU, fetch ecache-dimm-label property
962 	 */
963 
964 	if (ddi_prop_lookup_string_array(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
965 				"ecache-dimm-label", &dimm_labels, &numlabels)
966 			!= DDI_PROP_SUCCESS) {
967 #ifdef	DEBUG
968 		cmn_err(CE_NOTE, "cpuid=%d missing ecache-dimm-label property",
969 			portid);
970 #endif	/* DEBUG */
971 		return (DDI_WALK_TERMINATE);
972 	}
973 
974 	if (cpuarg->dimm < numlabels) {
975 		cpuarg->jnum = kmem_alloc(
976 					strlen(dimm_labels[cpuarg->dimm]) + 1,
977 					KM_SLEEP);
978 		if (cpuarg->jnum != (char *)NULL)
979 			(void) strcpy(cpuarg->jnum, dimm_labels[cpuarg->dimm]);
980 #ifdef	DEBUG
981 		else
982 			cmn_err(CE_WARN,
983 				"cannot kmem_alloc for ecache dimm label");
984 #endif	/* DEBUG */
985 	}
986 
987 	ddi_prop_free((void *)dimm_labels);
988 	return (DDI_WALK_TERMINATE);
989 }
990 
991 
992 /*
993  * Bit 4 of physical address indicates ecache 0 or 1
994  */
995 
996 #define	ECACHE_DIMM_SHIFT	4
997 #define	ECACHE_DIMM_MASK	0x10
998 
999 /*
1000  * plat_get_ecacheunum is called to generate the unum for an ecache error.
1001  * After some initialization, nearly all of the work is done by ddi_walk_devs
1002  * and plat_get_ecache_cpu.
1003  */
1004 
1005 int
1006 plat_get_ecacheunum(int cpuid, unsigned long long physaddr, char *buf,
1007 		    int buflen, int *ustrlen)
1008 {
1009 	plat_ecache_cpu_arg_t	findcpu;
1010 	uint_t	expander, slot, proc;
1011 
1012 	findcpu.jnum = (char *)NULL;
1013 	findcpu.cpuid = cpuid;
1014 	findcpu.dimm = (physaddr & ECACHE_DIMM_MASK) >> ECACHE_DIMM_SHIFT;
1015 
1016 	/*
1017 	 * Walk the device tree, find this specific CPU, and get the label
1018 	 * for this ecache, returned here in findcpu.jnum
1019 	 */
1020 
1021 	ddi_walk_devs(ddi_root_node(), plat_get_ecache_cpu, (void *)&findcpu);
1022 
1023 	if (findcpu.jnum == (char *)NULL)
1024 		return (-1);
1025 
1026 	expander = STARCAT_CPUID_TO_EXPANDER(cpuid);
1027 	slot = STARCAT_CPUID_TO_BOARDSLOT(cpuid);
1028 
1029 	/*
1030 	 * STARCAT_CPUID_TO_PORTID clears the CoreID bit so that
1031 	 * STARCAT_CPUID_TO_AGENT will return a physical proc (0 - 3).
1032 	 */
1033 	proc = STARCAT_CPUID_TO_AGENT(STARCAT_CPUID_TO_PORTID(cpuid));
1034 
1035 	/*
1036 	 * NOTE: Any modifications to the snprintf() call below will require
1037 	 * changing plat_log_fruid_error() as well!
1038 	 */
1039 	(void) snprintf(buf, buflen, "%s%u/P%u/E%u J%s", (slot ? "IO" : "SB"),
1040 			expander, proc, findcpu.dimm, findcpu.jnum);
1041 
1042 	*ustrlen = strlen(buf);
1043 
1044 	kmem_free(findcpu.jnum, strlen(findcpu.jnum) + 1);
1045 
1046 	return (0);
1047 }
1048 
1049 /*ARGSUSED*/
1050 int
1051 plat_get_mem_unum(int synd_code, uint64_t flt_addr, int flt_bus_id,
1052     int flt_in_memory, ushort_t flt_status, char *buf, int buflen, int *lenp)
1053 {
1054 	int ret;
1055 
1056 	/*
1057 	 * check if it's a Memory or an Ecache error.
1058 	 */
1059 	if (flt_in_memory) {
1060 		if (p2get_mem_unum != NULL) {
1061 			return (p2get_mem_unum(synd_code, P2ALIGN(flt_addr, 8),
1062 				buf, buflen, lenp));
1063 		} else {
1064 			return (ENOTSUP);
1065 		}
1066 	} else if (flt_status & ECC_ECACHE) {
1067 		if ((ret = plat_get_ecacheunum(flt_bus_id,
1068 		    P2ALIGN(flt_addr, 8), buf, buflen, lenp)) != 0)
1069 			return (EIO);
1070 	} else {
1071 		return (ENOTSUP);
1072 	}
1073 
1074 	return (ret);
1075 }
1076 
1077 static int (*ecc_mailbox_msg_func)(plat_ecc_message_type_t, void *) = NULL;
1078 
1079 /*
1080  * To keep OS mailbox handling localized, all we do is forward the call to the
1081  * scosmb module (if it is available).
1082  */
1083 int
1084 plat_send_ecc_mailbox_msg(plat_ecc_message_type_t msg_type, void *datap)
1085 {
1086 	/*
1087 	 * find the symbol for the mailbox sender routine in the scosmb module
1088 	 */
1089 	if (ecc_mailbox_msg_func == NULL)
1090 		ecc_mailbox_msg_func = (int (*)(plat_ecc_message_type_t,
1091 		    void *))modgetsymvalue("scosmb_log_ecc_error", 0);
1092 
1093 	/*
1094 	 * If the symbol was found, call it.  Otherwise, there is not much
1095 	 * else we can do and console messages will have to suffice.
1096 	 */
1097 	if (ecc_mailbox_msg_func)
1098 		return ((*ecc_mailbox_msg_func)(msg_type, datap));
1099 	else
1100 		return (ENODEV);
1101 }
1102 
1103 int
1104 plat_make_fru_cpuid(int sb, int m, int proc)
1105 {
1106 	return (MAKE_CPUID(sb, m, proc));
1107 }
1108 
1109 /*
1110  * board number for a given proc
1111  */
1112 int
1113 plat_make_fru_boardnum(int proc)
1114 {
1115 	return (STARCAT_CPUID_TO_EXPANDER(proc));
1116 }
1117 
1118 /*
1119  * This platform hook gets called from mc_add_mem_unum_label() in the mc-us3
1120  * driver giving each platform the opportunity to add platform
1121  * specific label information to the unum for ECC error logging purposes.
1122  */
1123 void
1124 plat_add_mem_unum_label(char *unum, int mcid, int bank, int dimm)
1125 {
1126 	char	new_unum[UNUM_NAMLEN];
1127 	uint_t	expander = STARCAT_CPUID_TO_EXPANDER(mcid);
1128 	uint_t	slot = STARCAT_CPUID_TO_BOARDSLOT(mcid);
1129 
1130 	/*
1131 	 * STARCAT_CPUID_TO_PORTID clears the CoreID bit so that
1132 	 * STARCAT_CPUID_TO_AGENT will return a physical proc (0 - 3).
1133 	 */
1134 	uint_t	proc = STARCAT_CPUID_TO_AGENT(STARCAT_CPUID_TO_PORTID(mcid));
1135 
1136 	/*
1137 	 * NOTE: Any modifications to the two sprintf() calls below will
1138 	 * require changing plat_log_fruid_error() as well!
1139 	 */
1140 	if (dimm == -1)
1141 		(void) snprintf(new_unum, UNUM_NAMLEN, "%s%u/P%u/B%d %s",
1142 			(slot ? "IO" : "SB"), expander,
1143 			proc, (bank & 0x1), unum);
1144 	else
1145 		(void) snprintf(new_unum, UNUM_NAMLEN, "%s%u/P%u/B%d/D%d %s",
1146 			(slot ? "IO" : "SB"), expander,
1147 			proc, (bank & 0x1), (dimm & 0x3), unum);
1148 
1149 	(void) strcpy(unum, new_unum);
1150 }
1151 
1152 int
1153 plat_get_cpu_unum(int cpuid, char *buf, int buflen, int *lenp)
1154 {
1155 	int	expander = STARCAT_CPUID_TO_EXPANDER(cpuid);
1156 	int	slot = STARCAT_CPUID_TO_BOARDSLOT(cpuid);
1157 
1158 	if (snprintf(buf, buflen, "%s%d", (slot ? "IO" : "SB"), expander)
1159 	    >= buflen) {
1160 		return (ENOSPC);
1161 	} else {
1162 		*lenp = strlen(buf);
1163 		return (0);
1164 	}
1165 }
1166 
1167 /*
1168  * This routine is used by the data bearing mondo (DMV) initialization
1169  * routine to determine the number of hardware and software DMV interrupts
1170  * that a platform supports.
1171  */
1172 void
1173 plat_dmv_params(uint_t *hwint, uint_t *swint)
1174 {
1175 	*hwint = STARCAT_DMV_HWINT;
1176 	*swint = 0;
1177 }
1178 
1179 /*
1180  * If provided, this function will be called whenever the nodename is updated.
1181  * To keep OS mailbox handling localized, all we do is forward the call to the
1182  * scosmb module (if it is available).
1183  */
1184 void
1185 plat_nodename_set(void)
1186 {
1187 	void (*nodename_update_func)(uint64_t) = NULL;
1188 
1189 	/*
1190 	 * find the symbol for the nodename update routine in the scosmb module
1191 	 */
1192 	nodename_update_func = (void (*)(uint64_t))
1193 	    modgetsymvalue("scosmb_update_nodename", 0);
1194 
1195 	/*
1196 	 * If the symbol was found, call it.  Otherwise, log a note (but not to
1197 	 * the console).
1198 	 */
1199 	if (nodename_update_func != NULL) {
1200 		nodename_update_func(0);
1201 	} else {
1202 		cmn_err(CE_NOTE,
1203 		    "!plat_nodename_set: scosmb_update_nodename not found\n");
1204 	}
1205 }
1206 
1207 caddr_t	efcode_vaddr = NULL;
1208 caddr_t efcode_paddr = NULL;
1209 /*
1210  * Preallocate enough memory for fcode claims.
1211  */
1212 
1213 caddr_t
1214 efcode_alloc(caddr_t alloc_base)
1215 {
1216 	caddr_t efcode_alloc_base = (caddr_t)roundup((uintptr_t)alloc_base,
1217 	    MMU_PAGESIZE);
1218 	caddr_t vaddr;
1219 
1220 	/*
1221 	 * allocate the physical memory schizo fcode.
1222 	 */
1223 	if ((vaddr = (caddr_t)BOP_ALLOC(bootops, efcode_alloc_base,
1224 	    efcode_size, MMU_PAGESIZE)) == NULL)
1225 		cmn_err(CE_PANIC, "Cannot allocate Efcode Memory");
1226 
1227 	efcode_vaddr = vaddr;
1228 
1229 	return (efcode_alloc_base + efcode_size);
1230 }
1231 
1232 caddr_t
1233 plat_startup_memlist(caddr_t alloc_base)
1234 {
1235 	caddr_t tmp_alloc_base;
1236 
1237 	tmp_alloc_base = efcode_alloc(alloc_base);
1238 	tmp_alloc_base = (caddr_t)roundup((uintptr_t)tmp_alloc_base,
1239 					    ecache_alignsize);
1240 	return (tmp_alloc_base);
1241 }
1242 
1243 /*
1244  * This is a helper function to determine if a given
1245  * node should be considered for a dr operation according
1246  * to predefined dr names. This is accomplished using
1247  * a function defined in drmach module. The drmach module
1248  * owns the definition of dr allowable names.
1249  * Formal Parameter: The name of a device node.
1250  * Expected Return Value: -1, device node name does not map to a valid dr name.
1251  *               A value greater or equal to 0, name is valid.
1252  */
1253 int
1254 starcat_dr_name(char *name)
1255 {
1256 	int (*drmach_name2type)(char *) = NULL;
1257 
1258 	/* Get a pointer to helper function in the dramch module. */
1259 	drmach_name2type =
1260 	    (int (*)(char *))kobj_getsymvalue("drmach_name2type_idx", 0);
1261 
1262 	if (drmach_name2type == NULL)
1263 		return (-1);
1264 
1265 	return ((*drmach_name2type)(name));
1266 }
1267 
1268 void
1269 startup_platform(void)
1270 {
1271 }
1272 
1273 /*
1274  * KDI functions - used by the in-situ kernel debugger (kmdb) to perform
1275  * platform-specific operations.  These functions execute when the world is
1276  * stopped, and as such cannot make any blocking calls, hold locks, etc.
1277  * promif functions are a special case, and may be used.
1278  */
1279 
1280 static void
1281 starcat_system_claim(void)
1282 {
1283 	prom_interpret("sigb-sig! my-sigb-sig!", OBP_SIG, OBP_SIG, 0, 0, 0);
1284 }
1285 
1286 static void
1287 starcat_system_release(void)
1288 {
1289 	prom_interpret("sigb-sig! my-sigb-sig!", OS_SIG, OS_SIG, 0, 0, 0);
1290 }
1291 
1292 void
1293 plat_kdi_init(kdi_t *kdi)
1294 {
1295 	kdi->pkdi_system_claim = starcat_system_claim;
1296 	kdi->pkdi_system_release = starcat_system_release;
1297 }
1298 
1299 /*
1300  * This function returns 1 if large pages for kernel heap are supported
1301  * and 0 otherwise.
1302  *
1303  * Currently we disable lp kmem support if kpr is going to be enabled
1304  * because in the case of large pages hat_add_callback()/hat_delete_callback()
1305  * cause network performance degradation
1306  */
1307 int
1308 plat_lpkmem_is_supported(void)
1309 {
1310 	extern int segkmem_reloc;
1311 
1312 	if (hat_kpr_enabled && kernel_cage_enable &&
1313 	    (ncpunode >= 32 || segkmem_reloc == 1))
1314 		return (0);
1315 
1316 	return (1);
1317 }
1318