xref: /titanic_41/usr/src/uts/sun4u/starcat/os/starcat.c (revision 0a44ef6d9afbfe052a7e975f55ea0d2954b62a82)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 #include <sys/param.h>
29 #include <sys/systm.h>
30 #include <sys/sysmacros.h>
31 #include <sys/sunddi.h>
32 #include <sys/esunddi.h>
33 #include <sys/sunndi.h>
34 #include <sys/modctl.h>
35 #include <sys/promif.h>
36 #include <sys/machparam.h>
37 #include <sys/kobj.h>
38 #include <sys/cpuvar.h>
39 #include <sys/mem_cage.h>
40 #include <sys/promif.h>
41 #include <sys/promimpl.h>
42 #include <sys/platform_module.h>
43 #include <sys/errno.h>
44 #include <sys/cpu_sgnblk_defs.h>
45 #include <sys/iosramio.h>
46 #include <sys/domaind.h>
47 #include <sys/starcat.h>
48 #include <sys/machsystm.h>
49 #include <sys/bootconf.h>
50 #include <sys/memnode.h>
51 #include <vm/vm_dep.h>
52 #include <vm/page.h>
53 #include <sys/cheetahregs.h>
54 #include <sys/plat_ecc_unum.h>
55 #include <sys/plat_ecc_dimm.h>
56 #include <sys/lgrp.h>
57 #include <sys/dr.h>
58 #include <sys/post/scat_dcd.h>
59 #include <sys/kdi_impl.h>
60 #include <sys/iosramreg.h>
61 #include <sys/iosramvar.h>
62 #include <sys/mc-us3.h>
63 
64 /* Preallocation of spare tsb's for DR */
65 int starcat_tsb_spares = STARCAT_SPARE_TSB_MAX;
66 
67 /* Set the maximum number of slot0 + slot1 boards. .. for DR */
68 int starcat_boards = STARCAT_BDSET_MAX * STARCAT_BDSET_SLOT_MAX;
69 
70 /* Maximum number of cpus per board... for DR */
71 int starcat_cpu_per_board = MAX(STARCAT_SLOT0_CPU_MAX, STARCAT_SLOT1_CPU_MAX);
72 
73 /* Maximum number of mem-units per board... for DR */
74 int starcat_mem_per_board = MAX(STARCAT_SLOT0_MEM_MAX, STARCAT_SLOT1_MEM_MAX);
75 
76 /* Maximum number of io-units (buses) per board... for DR */
77 int starcat_io_per_board = 2 * MAX(STARCAT_SLOT0_IO_MAX, STARCAT_SLOT1_IO_MAX);
78 
79 /* Preferred minimum cage size (expressed in pages)... for DR */
80 pgcnt_t starcat_startup_cage_size = 0;
81 
82 /* Platform specific function to get unum information */
83 int (*p2get_mem_unum)(int, uint64_t, char *, int, int *);
84 
85 /* Memory for fcode claims.  16k times # maximum possible schizos */
86 #define	EFCODE_SIZE	(STARCAT_BDSET_MAX * 4 * 0x4000)
87 int efcode_size = EFCODE_SIZE;
88 
89 void sgn_update_all_cpus(ushort_t, uchar_t, uchar_t);
90 
91 /*
92  * The IOSRAM driver is loaded in load_platform_drivers() any cpu signature
93  * usage prior to that time will have not have a function to call.
94  */
95 static int (*iosram_rdp)(uint32_t key, uint32_t off, uint32_t len,
96 	    caddr_t dptr) = prom_starcat_iosram_read;
97 static int (*iosram_wrp)(uint32_t key, uint32_t off, uint32_t len,
98 	    caddr_t dptr) = prom_starcat_iosram_write;
99 
100 plat_dimm_sid_board_t	domain_dimm_sids[STARCAT_BDSET_MAX];
101 
102 /*
103  * set_platform_max_ncpus should return the maximum number of CPUs that the
104  * platform supports.  This function is called from check_cpus() to set the
105  * value of max_ncpus [see PSARC 1997/165 CPU Dynamic Reconfiguration].
106  * Data elements which are allocated based upon max_ncpus are all accessed
107  * via cpu_seqid and not physical IDs.  Previously, the value of max_ncpus
108  * was being set to the largest physical ID, which led to boot problems on
109  * systems with less than 1.25GB of memory.
110  */
111 
112 int
113 set_platform_max_ncpus(void)
114 {
115 	int n;
116 
117 	/*
118 	 * Convert number of slot0 + slot1 boards to number of expander brds
119 	 * and constrain the value to an architecturally plausible range
120 	 */
121 	n = MAX(starcat_boards, STARCAT_BDSET_MIN * STARCAT_BDSET_SLOT_MAX);
122 	n = MIN(n, STARCAT_BDSET_MAX * STARCAT_BDSET_SLOT_MAX);
123 	n = (n + STARCAT_BDSET_SLOT_MAX - 1) / STARCAT_BDSET_SLOT_MAX;
124 
125 	/* return maximum number of cpus possible on N expander boards */
126 	return (n * STARCAT_BDSET_CPU_MAX - STARCAT_SLOT1_CPU_MAX);
127 }
128 
129 int
130 set_platform_tsb_spares()
131 {
132 	return (MIN(starcat_tsb_spares, MAX_UPA));
133 }
134 
135 #pragma weak mmu_init_large_pages
136 
137 void
138 set_platform_defaults(void)
139 {
140 	extern char *tod_module_name;
141 	extern int ts_dispatch_extended;
142 	extern void cpu_sgn_update(ushort_t, uchar_t, uchar_t, int);
143 	extern int tsb_lgrp_affinity;
144 	extern int segkmem_reloc;
145 	extern void mmu_init_large_pages(size_t);
146 	extern int ncpunode;	/* number of CPUs detected by OBP */
147 
148 #ifdef DEBUG
149 	ce_verbose_memory = 2;
150 	ce_verbose_other = 2;
151 #endif
152 
153 	/* Set the CPU signature function pointer */
154 	cpu_sgn_func = cpu_sgn_update;
155 
156 	/* Set appropriate tod module for starcat */
157 	ASSERT(tod_module_name == NULL);
158 	tod_module_name = "todstarcat";
159 
160 	/*
161 	 * Use the alternate TS dispatch table, which is better
162 	 * tuned for large servers.
163 	 */
164 	if (ts_dispatch_extended == -1)
165 		ts_dispatch_extended = 1;
166 
167 	/*
168 	 * Use lgroup-aware TSB allocations on this platform,
169 	 * since they are a considerable performance win.
170 	 */
171 	tsb_lgrp_affinity = 1;
172 
173 	if ((mmu_page_sizes == max_mmu_page_sizes) &&
174 	    (mmu_ism_pagesize != DEFAULT_ISM_PAGESIZE)) {
175 		if (&mmu_init_large_pages)
176 			mmu_init_large_pages(mmu_ism_pagesize);
177 	}
178 
179 	/*
180 	 * KPR (kernel page relocation) is supported on this platform.
181 	 */
182 	if (hat_kpr_enabled && kernel_cage_enable && ncpunode >= 32) {
183 		segkmem_reloc = 1;
184 		cmn_err(CE_NOTE, "!Kernel Page Relocation is ENABLED");
185 	} else {
186 		cmn_err(CE_NOTE, "!Kernel Page Relocation is DISABLED");
187 	}
188 }
189 
190 #ifdef DEBUG
191 pgcnt_t starcat_cage_size_limit;
192 #endif
193 
194 void
195 set_platform_cage_params(void)
196 {
197 	extern pgcnt_t total_pages;
198 	extern struct memlist *phys_avail;
199 	int ret;
200 
201 	if (kernel_cage_enable) {
202 		pgcnt_t preferred_cage_size;
203 
204 		preferred_cage_size =
205 			MAX(starcat_startup_cage_size, total_pages / 256);
206 
207 #ifdef DEBUG
208 		if (starcat_cage_size_limit)
209 			preferred_cage_size = starcat_cage_size_limit;
210 #endif
211 		kcage_range_lock();
212 		/*
213 		 * Note: we are assuming that post has load the
214 		 * whole show in to the high end of memory. Having
215 		 * taken this leap, we copy the whole of phys_avail
216 		 * the glist and arrange for the cage to grow
217 		 * downward (descending pfns).
218 		 */
219 		ret = kcage_range_init(phys_avail, 1);
220 		if (ret == 0)
221 			kcage_init(preferred_cage_size);
222 		kcage_range_unlock();
223 	}
224 
225 	if (kcage_on)
226 		cmn_err(CE_NOTE, "!DR Kernel Cage is ENABLED");
227 	else
228 		cmn_err(CE_NOTE, "!DR Kernel Cage is DISABLED");
229 }
230 
231 void
232 load_platform_modules(void)
233 {
234 	if (modload("misc", "pcihp") < 0) {
235 		cmn_err(CE_NOTE, "pcihp driver failed to load");
236 	}
237 }
238 
239 /*
240  * Starcat does not support power control of CPUs from the OS.
241  */
242 /*ARGSUSED*/
243 int
244 plat_cpu_poweron(struct cpu *cp)
245 {
246 	int (*starcat_cpu_poweron)(struct cpu *) = NULL;
247 
248 	starcat_cpu_poweron =
249 		(int (*)(struct cpu *))kobj_getsymvalue(
250 			"drmach_cpu_poweron", 0);
251 
252 	if (starcat_cpu_poweron == NULL)
253 		return (ENOTSUP);
254 	else
255 		return ((starcat_cpu_poweron)(cp));
256 }
257 
258 /*ARGSUSED*/
259 int
260 plat_cpu_poweroff(struct cpu *cp)
261 {
262 	int (*starcat_cpu_poweroff)(struct cpu *) = NULL;
263 
264 	starcat_cpu_poweroff =
265 		(int (*)(struct cpu *))kobj_getsymvalue(
266 			"drmach_cpu_poweroff", 0);
267 
268 	if (starcat_cpu_poweroff == NULL)
269 		return (ENOTSUP);
270 	else
271 		return ((starcat_cpu_poweroff)(cp));
272 }
273 
274 /*
275  * The following are currently private to Starcat DR
276  */
277 int
278 plat_max_boards()
279 {
280 	return (starcat_boards);
281 }
282 
283 int
284 plat_max_cpu_units_per_board()
285 {
286 	return (starcat_cpu_per_board);
287 }
288 
289 int
290 plat_max_mc_units_per_board()
291 {
292 	return (starcat_mem_per_board); /* each CPU has a memory controller */
293 }
294 
295 int
296 plat_max_mem_units_per_board()
297 {
298 	return (starcat_mem_per_board);
299 }
300 
301 int
302 plat_max_io_units_per_board()
303 {
304 	return (starcat_io_per_board);
305 }
306 
307 int
308 plat_max_cpumem_boards(void)
309 {
310 	return (STARCAT_BDSET_MAX);
311 }
312 
313 int
314 plat_pfn_to_mem_node(pfn_t pfn)
315 {
316 	return (pfn >> mem_node_pfn_shift);
317 }
318 
319 #define	STARCAT_MC_MEMBOARD_SHIFT 37	/* Boards on 128BG boundary */
320 
321 /* ARGSUSED */
322 void
323 plat_build_mem_nodes(u_longlong_t *list, size_t nelems)
324 {
325 	size_t	elem;
326 	pfn_t	basepfn;
327 	pgcnt_t	npgs;
328 
329 	/*
330 	 * Starcat mem slices are always aligned on a 128GB boundary,
331 	 * fixed, and limited to one slice per expander due to design
332 	 * of the centerplane ASICs.
333 	 */
334 	mem_node_pfn_shift = STARCAT_MC_MEMBOARD_SHIFT - MMU_PAGESHIFT;
335 	mem_node_physalign = 0;
336 
337 	/*
338 	 * Boot install lists are arranged <addr, len>, <addr, len>, ...
339 	 */
340 	for (elem = 0; elem < nelems; elem += 2) {
341 		basepfn = btop(list[elem]);
342 		npgs = btop(list[elem+1]);
343 		mem_node_add_slice(basepfn, basepfn + npgs - 1);
344 	}
345 }
346 
347 /*
348  * Find the CPU associated with a slice at boot-time.
349  */
350 void
351 plat_fill_mc(pnode_t nodeid)
352 {
353 	int		len;
354 	uint64_t	mc_addr, mask;
355 	uint64_t	mc_decode[MAX_BANKS_PER_MC];
356 	uint32_t	regs[4];
357 	int		local_mc;
358 	int		portid;
359 	int		expnum;
360 	int		i;
361 
362 	/*
363 	 * Memory address decoding registers
364 	 * (see Chap 9 of SPARCV9 JSP-1 US-III implementation)
365 	 */
366 	const uint64_t	mc_decode_addr[MAX_BANKS_PER_MC] = {
367 		0x400028, 0x400010, 0x400018, 0x400020
368 	};
369 
370 	/*
371 	 * Starcat memory controller portid == global CPU id
372 	 */
373 	if ((prom_getprop(nodeid, "portid", (caddr_t)&portid) < 0) ||
374 		(portid == -1))
375 		return;
376 
377 	expnum = STARCAT_CPUID_TO_EXPANDER(portid);
378 
379 	/*
380 	 * The "reg" property returns 4 32-bit values. The first two are
381 	 * combined to form a 64-bit address.  The second two are for a
382 	 * 64-bit size, but we don't actually need to look at that value.
383 	 */
384 	len = prom_getproplen(nodeid, "reg");
385 	if (len != (sizeof (uint32_t) * 4)) {
386 		prom_printf("Warning: malformed 'reg' property\n");
387 		return;
388 	}
389 	if (prom_getprop(nodeid, "reg", (caddr_t)regs) < 0)
390 		return;
391 	mc_addr = ((uint64_t)regs[0]) << 32;
392 	mc_addr |= (uint64_t)regs[1];
393 
394 	/*
395 	 * Figure out whether the memory controller we are examining
396 	 * belongs to this CPU/CMP or a different one.
397 	 */
398 	if (portid == cpunodes[CPU->cpu_id].portid)
399 		local_mc = 1;
400 	else
401 		local_mc = 0;
402 
403 	for (i = 0; i < MAX_BANKS_PER_MC; i++) {
404 
405 		mask = mc_decode_addr[i];
406 
407 		/*
408 		 * If the memory controller is local to this CPU, we use
409 		 * the special ASI to read the decode registers.
410 		 * Otherwise, we load the values from a magic address in
411 		 * I/O space.
412 		 */
413 		if (local_mc)
414 			mc_decode[i] = lddmcdecode(mask & MC_OFFSET_MASK);
415 		else
416 			mc_decode[i] = lddphysio((mc_addr | mask));
417 
418 		if (mc_decode[i] >> MC_VALID_SHIFT) {
419 			uint64_t base = MC_BASE(mc_decode[i]) << PHYS2UM_SHIFT;
420 			int sliceid = (base >> STARCAT_MC_MEMBOARD_SHIFT);
421 
422 			if (sliceid < max_mem_nodes) {
423 				/*
424 				 * Establish start-of-day mappings of
425 				 * lgroup platform handles to memnodes.
426 				 * Handle == Expander Number
427 				 * Memnode == Fixed 128GB Slice
428 				 */
429 				plat_assign_lgrphand_to_mem_node(expnum,
430 				    sliceid);
431 			}
432 		}
433 	}
434 }
435 
436 /*
437  * Starcat support for lgroups.
438  *
439  * On Starcat, an lgroup platform handle == expander number.
440  * For split-slot configurations (e.g. slot 0 and slot 1 boards
441  * in different domains) an MCPU board has only remote memory.
442  *
443  * The centerplane logic provides fixed 128GB memory slices
444  * each of which map to a memnode.  The initial mapping of
445  * memnodes to lgroup handles is determined at boot time.
446  * A DR addition of memory adds a new mapping. A DR copy-rename
447  * swaps mappings.
448  */
449 
450 /*
451  * Convert board number to expander number.
452  */
453 #define	BOARDNUM_2_EXPANDER(b)	(b >> 1)
454 
455 /*
456  * Return the number of boards configured with NULL LPA.
457  */
458 static int
459 check_for_null_lpa(void)
460 {
461 	gdcd_t	*gdcd;
462 	uint_t	exp, nlpa;
463 
464 	/*
465 	 * Read GDCD from IOSRAM.
466 	 * If this fails indicate a NULL LPA condition.
467 	 */
468 	if ((gdcd = kmem_zalloc(sizeof (gdcd_t), KM_NOSLEEP)) == NULL)
469 		return (EXP_COUNT+1);
470 
471 	if ((*iosram_rdp)(GDCD_MAGIC, 0, sizeof (gdcd_t), (caddr_t)gdcd) ||
472 	    (gdcd->h.dcd_magic != GDCD_MAGIC) ||
473 	    (gdcd->h.dcd_version != DCD_VERSION)) {
474 		kmem_free(gdcd, sizeof (gdcd_t));
475 		cmn_err(CE_WARN, "check_for_null_lpa: failed to access GDCD\n");
476 		return (EXP_COUNT+2);
477 	}
478 
479 	/*
480 	 * Check for NULL LPAs on all slot 0 boards in domain
481 	 * (i.e. in all expanders marked good for this domain).
482 	 */
483 	nlpa = 0;
484 	for (exp = 0; exp < EXP_COUNT; exp++) {
485 		if (RSV_GOOD(gdcd->dcd_slot[exp][0].l1ss_rsv) &&
486 		    (gdcd->dcd_slot[exp][0].l1ss_flags &
487 		    L1SSFLG_THIS_L1_NULL_PROC_LPA))
488 			nlpa++;
489 	}
490 
491 	kmem_free(gdcd, sizeof (gdcd_t));
492 	return (nlpa);
493 }
494 
495 /*
496  * Return the platform handle for the lgroup containing the given CPU
497  *
498  * For Starcat, lgroup platform handle == expander.
499  */
500 
501 extern int mpo_disabled;
502 extern lgrp_handle_t lgrp_default_handle;
503 int null_lpa_boards = -1;
504 
505 lgrp_handle_t
506 plat_lgrp_cpu_to_hand(processorid_t id)
507 {
508 	lgrp_handle_t		plathand;
509 
510 	plathand = STARCAT_CPUID_TO_EXPANDER(id);
511 
512 	/*
513 	 * Return the real platform handle for the CPU until
514 	 * such time as we know that MPO should be disabled.
515 	 * At that point, we set the "mpo_disabled" flag to true,
516 	 * and from that point on, return the default handle.
517 	 *
518 	 * By the time we know that MPO should be disabled, the
519 	 * first CPU will have already been added to a leaf
520 	 * lgroup, but that's ok. The common lgroup code will
521 	 * double check that the boot CPU is in the correct place,
522 	 * and in the case where mpo should be disabled, will move
523 	 * it to the root if necessary.
524 	 */
525 	if (mpo_disabled) {
526 		/* If MPO is disabled, return the default (UMA) handle */
527 		plathand = lgrp_default_handle;
528 	} else {
529 		if (null_lpa_boards > 0) {
530 			/* Determine if MPO should be disabled */
531 			mpo_disabled = 1;
532 			plathand = lgrp_default_handle;
533 		}
534 	}
535 	return (plathand);
536 }
537 
538 /*
539  * Platform specific lgroup initialization
540  */
541 void
542 plat_lgrp_init(void)
543 {
544 	extern uint32_t lgrp_expand_proc_thresh;
545 	extern uint32_t lgrp_expand_proc_diff;
546 
547 	/*
548 	 * Set tuneables for Starcat architecture
549 	 *
550 	 * lgrp_expand_proc_thresh is the minimum load on the lgroups
551 	 * this process is currently running on before considering
552 	 * expanding threads to another lgroup.
553 	 *
554 	 * lgrp_expand_proc_diff determines how much less the remote lgroup
555 	 * must be loaded before expanding to it.
556 	 *
557 	 * Since remote latencies can be costly, attempt to keep 3 threads
558 	 * within the same lgroup before expanding to the next lgroup.
559 	 */
560 	lgrp_expand_proc_thresh = LGRP_LOADAVG_THREAD_MAX * 3;
561 	lgrp_expand_proc_diff = LGRP_LOADAVG_THREAD_MAX;
562 }
563 
564 /*
565  * Platform notification of lgroup (re)configuration changes
566  */
567 /*ARGSUSED*/
568 void
569 plat_lgrp_config(lgrp_config_flag_t evt, uintptr_t arg)
570 {
571 	update_membounds_t	*umb;
572 	lgrp_config_mem_rename_t lmr;
573 	int			sbd, tbd;
574 	lgrp_handle_t		hand, shand, thand;
575 	int			mnode, snode, tnode;
576 
577 	if (mpo_disabled)
578 		return;
579 
580 	switch (evt) {
581 
582 	case LGRP_CONFIG_MEM_ADD:
583 		/*
584 		 * Establish the lgroup handle to memnode translation.
585 		 */
586 		umb = (update_membounds_t *)arg;
587 
588 		hand = BOARDNUM_2_EXPANDER(umb->u_board);
589 		mnode = plat_pfn_to_mem_node(umb->u_base >> MMU_PAGESHIFT);
590 		plat_assign_lgrphand_to_mem_node(hand, mnode);
591 
592 		break;
593 
594 	case LGRP_CONFIG_MEM_DEL:
595 		/* We don't have to do anything */
596 
597 		break;
598 
599 	case LGRP_CONFIG_MEM_RENAME:
600 		/*
601 		 * During a DR copy-rename operation, all of the memory
602 		 * on one board is moved to another board -- but the
603 		 * addresses/pfns and memnodes don't change. This means
604 		 * the memory has changed locations without changing identity.
605 		 *
606 		 * Source is where we are copying from and target is where we
607 		 * are copying to.  After source memnode is copied to target
608 		 * memnode, the physical addresses of the target memnode are
609 		 * renamed to match what the source memnode had.  Then target
610 		 * memnode can be removed and source memnode can take its
611 		 * place.
612 		 *
613 		 * To do this, swap the lgroup handle to memnode mappings for
614 		 * the boards, so target lgroup will have source memnode and
615 		 * source lgroup will have empty target memnode which is where
616 		 * its memory will go (if any is added to it later).
617 		 *
618 		 * Then source memnode needs to be removed from its lgroup
619 		 * and added to the target lgroup where the memory was living
620 		 * but under a different name/memnode.  The memory was in the
621 		 * target memnode and now lives in the source memnode with
622 		 * different physical addresses even though it is the same
623 		 * memory.
624 		 */
625 		sbd = arg & 0xffff;
626 		tbd = (arg & 0xffff0000) >> 16;
627 		shand = BOARDNUM_2_EXPANDER(sbd);
628 		thand = BOARDNUM_2_EXPANDER(tbd);
629 		snode = plat_lgrphand_to_mem_node(shand);
630 		tnode = plat_lgrphand_to_mem_node(thand);
631 
632 		plat_assign_lgrphand_to_mem_node(thand, snode);
633 		plat_assign_lgrphand_to_mem_node(shand, tnode);
634 
635 		lmr.lmem_rename_from = shand;
636 		lmr.lmem_rename_to = thand;
637 
638 		/*
639 		 * Remove source memnode of copy rename from its lgroup
640 		 * and add it to its new target lgroup
641 		 */
642 		lgrp_config(LGRP_CONFIG_MEM_RENAME, (uintptr_t)snode,
643 		    (uintptr_t)&lmr);
644 
645 		break;
646 
647 	default:
648 		break;
649 	}
650 }
651 
652 /*
653  * Return latency between "from" and "to" lgroups
654  *
655  * This latency number can only be used for relative comparison
656  * between lgroups on the running system, cannot be used across platforms,
657  * and may not reflect the actual latency.  It is platform and implementation
658  * specific, so platform gets to decide its value.  It would be nice if the
659  * number was at least proportional to make comparisons more meaningful though.
660  * NOTE: The numbers below are supposed to be load latencies for uncached
661  * memory divided by 10.
662  */
663 int
664 plat_lgrp_latency(lgrp_handle_t from, lgrp_handle_t to)
665 {
666 	/*
667 	 * Return min remote latency when there are more than two lgroups
668 	 * (root and child) and getting latency between two different lgroups
669 	 * or root is involved
670 	 */
671 	if (lgrp_optimizations() && (from != to ||
672 	    from == LGRP_DEFAULT_HANDLE || to == LGRP_DEFAULT_HANDLE))
673 		return (48);
674 	else
675 		return (28);
676 }
677 
678 /*
679  * Return platform handle for root lgroup
680  */
681 lgrp_handle_t
682 plat_lgrp_root_hand(void)
683 {
684 	if (mpo_disabled)
685 		return (lgrp_default_handle);
686 
687 	return (LGRP_DEFAULT_HANDLE);
688 }
689 
690 /* ARGSUSED */
691 void
692 plat_freelist_process(int mnode)
693 {
694 }
695 
696 void
697 load_platform_drivers(void)
698 {
699 	uint_t		tunnel;
700 	pnode_t		nodeid;
701 	dev_info_t	*chosen_devi;
702 	char		chosen_iosram[MAXNAMELEN];
703 
704 	/*
705 	 * Get /chosen node - that's where the tunnel property is
706 	 */
707 	nodeid = prom_chosennode();
708 
709 	/*
710 	 * Get the iosram property from the chosen node.
711 	 */
712 	if (prom_getprop(nodeid, IOSRAM_CHOSEN_PROP, (caddr_t)&tunnel) <= 0) {
713 		prom_printf("Unable to get iosram property\n");
714 		cmn_err(CE_PANIC, "Unable to get iosram property\n");
715 	}
716 
717 	if (prom_phandle_to_path((phandle_t)tunnel, chosen_iosram,
718 			sizeof (chosen_iosram)) < 0) {
719 		(void) prom_printf("prom_phandle_to_path(0x%x) failed\n",
720 				tunnel);
721 		cmn_err(CE_PANIC, "prom_phandle_to_path(0x%x) failed\n",
722 				tunnel);
723 	}
724 
725 	/*
726 	 * Attach all driver instances along the iosram's device path
727 	 */
728 	if (i_ddi_attach_hw_nodes("iosram") != DDI_SUCCESS) {
729 		cmn_err(CE_WARN, "IOSRAM failed to load\n");
730 	}
731 
732 	if ((chosen_devi = e_ddi_hold_devi_by_path(chosen_iosram, 0)) == NULL) {
733 		(void) prom_printf("e_ddi_hold_devi_by_path(%s) failed\n",
734 			chosen_iosram);
735 		cmn_err(CE_PANIC, "e_ddi_hold_devi_by_path(%s) failed\n",
736 			chosen_iosram);
737 	}
738 	ndi_rele_devi(chosen_devi);
739 
740 	/*
741 	 * iosram driver is now loaded so we need to set our read and
742 	 * write pointers.
743 	 */
744 	iosram_rdp = (int (*)(uint32_t, uint32_t, uint32_t, caddr_t))
745 			modgetsymvalue("iosram_rd", 0);
746 	iosram_wrp = (int (*)(uint32_t, uint32_t, uint32_t, caddr_t))
747 			modgetsymvalue("iosram_wr", 0);
748 
749 	/*
750 	 * Need to check for null proc LPA after IOSRAM driver is loaded
751 	 * and before multiple lgroups created (when start_other_cpus() called)
752 	 */
753 	null_lpa_boards = check_for_null_lpa();
754 
755 	/* load and attach the axq driver */
756 	if (i_ddi_attach_hw_nodes("axq") != DDI_SUCCESS) {
757 		cmn_err(CE_WARN, "AXQ failed to load\n");
758 	}
759 
760 	/* load Starcat Solaris Mailbox Client driver */
761 	if (modload("misc", "scosmb") < 0) {
762 		cmn_err(CE_WARN, "SCOSMB failed to load\n");
763 	}
764 
765 	/* load the DR driver */
766 	if (i_ddi_attach_hw_nodes("dr") != DDI_SUCCESS) {
767 		cmn_err(CE_WARN, "dr failed to load");
768 	}
769 
770 	/*
771 	 * Load the mc-us3 memory driver.
772 	 */
773 	if (i_ddi_attach_hw_nodes("mc-us3") != DDI_SUCCESS)
774 		cmn_err(CE_WARN, "mc-us3 failed to load");
775 	else
776 		(void) ddi_hold_driver(ddi_name_to_major("mc-us3"));
777 
778 	/* Load the schizo pci bus nexus driver. */
779 	if (i_ddi_attach_hw_nodes("pcisch") != DDI_SUCCESS)
780 		cmn_err(CE_WARN, "pcisch failed to load");
781 
782 	plat_ecc_init();
783 }
784 
785 
786 /*
787  * No platform drivers on this platform
788  */
789 char *platform_module_list[] = {
790 	(char *)0
791 };
792 
793 
794 /*ARGSUSED*/
795 void
796 plat_tod_fault(enum tod_fault_type tod_bad)
797 {
798 }
799 
800 /*
801  * Update the signature(s) in the IOSRAM's domain data section.
802  */
803 void
804 cpu_sgn_update(ushort_t sgn, uchar_t state, uchar_t sub_state, int cpuid)
805 {
806 	sig_state_t new_sgn;
807 	sig_state_t current_sgn;
808 
809 	/*
810 	 * If the substate is REBOOT, then check for panic flow
811 	 */
812 	if (sub_state == SIGSUBST_REBOOT) {
813 		(*iosram_rdp)(DOMD_MAGIC, DOMD_DSTATE_OFFSET,
814 		    sizeof (sig_state_t), (caddr_t)&current_sgn);
815 		if (current_sgn.state_t.state == SIGST_EXIT)
816 			sub_state = SIGSUBST_PANIC_REBOOT;
817 	}
818 
819 	/*
820 	 * cpuid == -1 indicates that the operation applies to all cpus.
821 	 */
822 	if (cpuid < 0) {
823 		sgn_update_all_cpus(sgn, state, sub_state);
824 		return;
825 	}
826 
827 	new_sgn.signature = CPU_SIG_BLD(sgn, state, sub_state);
828 	(*iosram_wrp)(DOMD_MAGIC,
829 	    DOMD_CPUSIGS_OFFSET + cpuid * sizeof (sig_state_t),
830 	    sizeof (sig_state_t), (caddr_t)&new_sgn);
831 
832 	/*
833 	 * Under certain conditions we don't update the signature
834 	 * of the domain_state.
835 	 */
836 	if ((sgn == OS_SIG) &&
837 	    ((state == SIGST_OFFLINE) || (state == SIGST_DETACHED)))
838 		return;
839 	(*iosram_wrp)(DOMD_MAGIC, DOMD_DSTATE_OFFSET, sizeof (sig_state_t),
840 	    (caddr_t)&new_sgn);
841 }
842 
843 /*
844  * Update the signature(s) in the IOSRAM's domain data section for all CPUs.
845  */
846 void
847 sgn_update_all_cpus(ushort_t sgn, uchar_t state, uchar_t sub_state)
848 {
849 	sig_state_t new_sgn;
850 	int i = 0;
851 
852 	new_sgn.signature = CPU_SIG_BLD(sgn, state, sub_state);
853 
854 	/*
855 	 * First update the domain_state signature
856 	 */
857 	(*iosram_wrp)(DOMD_MAGIC, DOMD_DSTATE_OFFSET, sizeof (sig_state_t),
858 	    (caddr_t)&new_sgn);
859 
860 	for (i = 0; i < NCPU; i++) {
861 		if (cpu[i] != NULL && (cpu[i]->cpu_flags &
862 		    (CPU_EXISTS|CPU_QUIESCED))) {
863 			(*iosram_wrp)(DOMD_MAGIC,
864 			    DOMD_CPUSIGS_OFFSET + i * sizeof (sig_state_t),
865 			    sizeof (sig_state_t), (caddr_t)&new_sgn);
866 		}
867 	}
868 }
869 
870 ushort_t
871 get_cpu_sgn(int cpuid)
872 {
873 	sig_state_t cpu_sgn;
874 
875 	(*iosram_rdp)(DOMD_MAGIC,
876 	    DOMD_CPUSIGS_OFFSET + cpuid * sizeof (sig_state_t),
877 	    sizeof (sig_state_t), (caddr_t)&cpu_sgn);
878 
879 	return (cpu_sgn.state_t.sig);
880 }
881 
882 uchar_t
883 get_cpu_sgn_state(int cpuid)
884 {
885 	sig_state_t cpu_sgn;
886 
887 	(*iosram_rdp)(DOMD_MAGIC,
888 	    DOMD_CPUSIGS_OFFSET + cpuid * sizeof (sig_state_t),
889 	    sizeof (sig_state_t), (caddr_t)&cpu_sgn);
890 
891 	return (cpu_sgn.state_t.state);
892 }
893 
894 
895 /*
896  * Type of argument passed into plat_get_ecache_cpu via ddi_walk_devs
897  * for matching on specific CPU node in device tree
898  */
899 
900 typedef struct {
901 	char		*jnum;	/* output, kmem_alloc'd	if successful */
902 	int		cpuid;	/* input, to match cpuid/portid/upa-portid */
903 	uint_t		dimm;	/* input, index into ecache-dimm-label */
904 } plat_ecache_cpu_arg_t;
905 
906 
907 /*
908  * plat_get_ecache_cpu is called repeatedly by ddi_walk_devs with pointers
909  * to device tree nodes (dip) and to a plat_ecache_cpu_arg_t structure (arg).
910  * Returning DDI_WALK_CONTINUE tells ddi_walk_devs to keep going, returning
911  * DDI_WALK_TERMINATE ends the walk.  When the node for the specific CPU
912  * being searched for is found, the walk is done.  But before returning to
913  * ddi_walk_devs and plat_get_ecacheunum, we grab this CPU's ecache-dimm-label
914  * property and set the jnum member of the plat_ecache_cpu_arg_t structure to
915  * point to the label corresponding to this specific ecache DIMM.  It is up
916  * to plat_get_ecacheunum to kmem_free this string.
917  */
918 
919 static int
920 plat_get_ecache_cpu(dev_info_t *dip, void *arg)
921 {
922 	char			*devtype;
923 	plat_ecache_cpu_arg_t	*cpuarg;
924 	char			**dimm_labels;
925 	uint_t			numlabels;
926 	int			portid;
927 
928 	/*
929 	 * Check device_type, must be "cpu"
930 	 */
931 
932 	if (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
933 						"device_type", &devtype)
934 				!= DDI_PROP_SUCCESS)
935 		return (DDI_WALK_CONTINUE);
936 
937 	if (strcmp(devtype, "cpu")) {
938 		ddi_prop_free((void *)devtype);
939 		return (DDI_WALK_CONTINUE);
940 	}
941 
942 	ddi_prop_free((void *)devtype);
943 
944 	/*
945 	 * Check cpuid, portid, upa-portid (in that order), must
946 	 * match the cpuid being sought
947 	 */
948 
949 	portid = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
950 				DDI_PROP_DONTPASS, "cpuid", -1);
951 
952 	if (portid == -1)
953 		portid = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
954 				DDI_PROP_DONTPASS, "portid", -1);
955 
956 	if (portid == -1)
957 		portid = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
958 				DDI_PROP_DONTPASS, "upa-portid", -1);
959 
960 	cpuarg = (plat_ecache_cpu_arg_t *)arg;
961 
962 	if (portid != cpuarg->cpuid)
963 		return (DDI_WALK_CONTINUE);
964 
965 	/*
966 	 * Found the right CPU, fetch ecache-dimm-label property
967 	 */
968 
969 	if (ddi_prop_lookup_string_array(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
970 				"ecache-dimm-label", &dimm_labels, &numlabels)
971 			!= DDI_PROP_SUCCESS) {
972 #ifdef	DEBUG
973 		cmn_err(CE_NOTE, "cpuid=%d missing ecache-dimm-label property",
974 			portid);
975 #endif	/* DEBUG */
976 		return (DDI_WALK_TERMINATE);
977 	}
978 
979 	if (cpuarg->dimm < numlabels) {
980 		cpuarg->jnum = kmem_alloc(
981 					strlen(dimm_labels[cpuarg->dimm]) + 1,
982 					KM_SLEEP);
983 		if (cpuarg->jnum != (char *)NULL)
984 			(void) strcpy(cpuarg->jnum, dimm_labels[cpuarg->dimm]);
985 #ifdef	DEBUG
986 		else
987 			cmn_err(CE_WARN,
988 				"cannot kmem_alloc for ecache dimm label");
989 #endif	/* DEBUG */
990 	}
991 
992 	ddi_prop_free((void *)dimm_labels);
993 	return (DDI_WALK_TERMINATE);
994 }
995 
996 
997 /*
998  * Bit 4 of physical address indicates ecache 0 or 1
999  */
1000 
1001 #define	ECACHE_DIMM_SHIFT	4
1002 #define	ECACHE_DIMM_MASK	0x10
1003 
1004 /*
1005  * plat_get_ecacheunum is called to generate the unum for an ecache error.
1006  * After some initialization, nearly all of the work is done by ddi_walk_devs
1007  * and plat_get_ecache_cpu.
1008  */
1009 
1010 int
1011 plat_get_ecacheunum(int cpuid, unsigned long long physaddr, char *buf,
1012 		    int buflen, int *ustrlen)
1013 {
1014 	plat_ecache_cpu_arg_t	findcpu;
1015 	uint_t	expander, slot, proc;
1016 
1017 	findcpu.jnum = (char *)NULL;
1018 	findcpu.cpuid = cpuid;
1019 	findcpu.dimm = (physaddr & ECACHE_DIMM_MASK) >> ECACHE_DIMM_SHIFT;
1020 
1021 	/*
1022 	 * Walk the device tree, find this specific CPU, and get the label
1023 	 * for this ecache, returned here in findcpu.jnum
1024 	 */
1025 
1026 	ddi_walk_devs(ddi_root_node(), plat_get_ecache_cpu, (void *)&findcpu);
1027 
1028 	if (findcpu.jnum == (char *)NULL)
1029 		return (-1);
1030 
1031 	expander = STARCAT_CPUID_TO_EXPANDER(cpuid);
1032 	slot = STARCAT_CPUID_TO_BOARDSLOT(cpuid);
1033 
1034 	/*
1035 	 * STARCAT_CPUID_TO_PORTID clears the CoreID bit so that
1036 	 * STARCAT_CPUID_TO_AGENT will return a physical proc (0 - 3).
1037 	 */
1038 	proc = STARCAT_CPUID_TO_AGENT(STARCAT_CPUID_TO_PORTID(cpuid));
1039 
1040 	/*
1041 	 * NOTE: Any modifications to the snprintf() call below will require
1042 	 * changing plat_log_fruid_error() as well!
1043 	 */
1044 	(void) snprintf(buf, buflen, "%s%u/P%u/E%u J%s", (slot ? "IO" : "SB"),
1045 			expander, proc, findcpu.dimm, findcpu.jnum);
1046 
1047 	*ustrlen = strlen(buf);
1048 
1049 	kmem_free(findcpu.jnum, strlen(findcpu.jnum) + 1);
1050 
1051 	return (0);
1052 }
1053 
1054 /*ARGSUSED*/
1055 int
1056 plat_get_mem_unum(int synd_code, uint64_t flt_addr, int flt_bus_id,
1057     int flt_in_memory, ushort_t flt_status, char *buf, int buflen, int *lenp)
1058 {
1059 	int ret;
1060 
1061 	/*
1062 	 * check if it's a Memory or an Ecache error.
1063 	 */
1064 	if (flt_in_memory) {
1065 		if (p2get_mem_unum != NULL) {
1066 			return (p2get_mem_unum(synd_code, P2ALIGN(flt_addr, 8),
1067 				buf, buflen, lenp));
1068 		} else {
1069 			return (ENOTSUP);
1070 		}
1071 	} else if (flt_status & ECC_ECACHE) {
1072 		if ((ret = plat_get_ecacheunum(flt_bus_id,
1073 		    P2ALIGN(flt_addr, 8), buf, buflen, lenp)) != 0)
1074 			return (EIO);
1075 	} else {
1076 		return (ENOTSUP);
1077 	}
1078 
1079 	return (ret);
1080 }
1081 
1082 static int (*ecc_mailbox_msg_func)(plat_ecc_message_type_t, void *) = NULL;
1083 
1084 /*
1085  * To keep OS mailbox handling localized, all we do is forward the call to the
1086  * scosmb module (if it is available).
1087  */
1088 int
1089 plat_send_ecc_mailbox_msg(plat_ecc_message_type_t msg_type, void *datap)
1090 {
1091 	/*
1092 	 * find the symbol for the mailbox sender routine in the scosmb module
1093 	 */
1094 	if (ecc_mailbox_msg_func == NULL)
1095 		ecc_mailbox_msg_func = (int (*)(plat_ecc_message_type_t,
1096 		    void *))modgetsymvalue("scosmb_log_ecc_error", 0);
1097 
1098 	/*
1099 	 * If the symbol was found, call it.  Otherwise, there is not much
1100 	 * else we can do and console messages will have to suffice.
1101 	 */
1102 	if (ecc_mailbox_msg_func)
1103 		return ((*ecc_mailbox_msg_func)(msg_type, datap));
1104 	else
1105 		return (ENODEV);
1106 }
1107 
1108 int
1109 plat_make_fru_cpuid(int sb, int m, int proc)
1110 {
1111 	return (MAKE_CPUID(sb, m, proc));
1112 }
1113 
1114 /*
1115  * board number for a given proc
1116  */
1117 int
1118 plat_make_fru_boardnum(int proc)
1119 {
1120 	return (STARCAT_CPUID_TO_EXPANDER(proc));
1121 }
1122 
1123 /*
1124  * This platform hook gets called from mc_add_mem_unum_label() in the mc-us3
1125  * driver giving each platform the opportunity to add platform
1126  * specific label information to the unum for ECC error logging purposes.
1127  */
1128 void
1129 plat_add_mem_unum_label(char *unum, int mcid, int bank, int dimm)
1130 {
1131 	char	new_unum[UNUM_NAMLEN];
1132 	uint_t	expander = STARCAT_CPUID_TO_EXPANDER(mcid);
1133 	uint_t	slot = STARCAT_CPUID_TO_BOARDSLOT(mcid);
1134 
1135 	/*
1136 	 * STARCAT_CPUID_TO_PORTID clears the CoreID bit so that
1137 	 * STARCAT_CPUID_TO_AGENT will return a physical proc (0 - 3).
1138 	 */
1139 	uint_t	proc = STARCAT_CPUID_TO_AGENT(STARCAT_CPUID_TO_PORTID(mcid));
1140 
1141 	/*
1142 	 * NOTE: Any modifications to the two sprintf() calls below will
1143 	 * require changing plat_log_fruid_error() as well!
1144 	 */
1145 	if (dimm == -1)
1146 		(void) snprintf(new_unum, UNUM_NAMLEN, "%s%u/P%u/B%d %s",
1147 			(slot ? "IO" : "SB"), expander,
1148 			proc, (bank & 0x1), unum);
1149 	else
1150 		(void) snprintf(new_unum, UNUM_NAMLEN, "%s%u/P%u/B%d/D%d %s",
1151 			(slot ? "IO" : "SB"), expander,
1152 			proc, (bank & 0x1), (dimm & 0x3), unum);
1153 
1154 	(void) strcpy(unum, new_unum);
1155 }
1156 
1157 int
1158 plat_get_cpu_unum(int cpuid, char *buf, int buflen, int *lenp)
1159 {
1160 	int	expander = STARCAT_CPUID_TO_EXPANDER(cpuid);
1161 	int	slot = STARCAT_CPUID_TO_BOARDSLOT(cpuid);
1162 
1163 	if (snprintf(buf, buflen, "%s%d", (slot ? "IO" : "SB"), expander)
1164 	    >= buflen) {
1165 		return (ENOSPC);
1166 	} else {
1167 		*lenp = strlen(buf);
1168 		return (0);
1169 	}
1170 }
1171 
1172 /*
1173  * This routine is used by the data bearing mondo (DMV) initialization
1174  * routine to determine the number of hardware and software DMV interrupts
1175  * that a platform supports.
1176  */
1177 void
1178 plat_dmv_params(uint_t *hwint, uint_t *swint)
1179 {
1180 	*hwint = STARCAT_DMV_HWINT;
1181 	*swint = 0;
1182 }
1183 
1184 /*
1185  * If provided, this function will be called whenever the nodename is updated.
1186  * To keep OS mailbox handling localized, all we do is forward the call to the
1187  * scosmb module (if it is available).
1188  */
1189 void
1190 plat_nodename_set(void)
1191 {
1192 	void (*nodename_update_func)(uint64_t) = NULL;
1193 
1194 	/*
1195 	 * find the symbol for the nodename update routine in the scosmb module
1196 	 */
1197 	nodename_update_func = (void (*)(uint64_t))
1198 	    modgetsymvalue("scosmb_update_nodename", 0);
1199 
1200 	/*
1201 	 * If the symbol was found, call it.  Otherwise, log a note (but not to
1202 	 * the console).
1203 	 */
1204 	if (nodename_update_func != NULL) {
1205 		nodename_update_func(0);
1206 	} else {
1207 		cmn_err(CE_NOTE,
1208 		    "!plat_nodename_set: scosmb_update_nodename not found\n");
1209 	}
1210 }
1211 
1212 caddr_t	efcode_vaddr = NULL;
1213 caddr_t efcode_paddr = NULL;
1214 /*
1215  * Preallocate enough memory for fcode claims.
1216  */
1217 
1218 caddr_t
1219 efcode_alloc(caddr_t alloc_base)
1220 {
1221 	caddr_t efcode_alloc_base = (caddr_t)roundup((uintptr_t)alloc_base,
1222 	    MMU_PAGESIZE);
1223 	caddr_t vaddr;
1224 
1225 	/*
1226 	 * allocate the physical memory schizo fcode.
1227 	 */
1228 	if ((vaddr = (caddr_t)BOP_ALLOC(bootops, efcode_alloc_base,
1229 	    efcode_size, MMU_PAGESIZE)) == NULL)
1230 		cmn_err(CE_PANIC, "Cannot allocate Efcode Memory");
1231 
1232 	efcode_vaddr = vaddr;
1233 
1234 	return (efcode_alloc_base + efcode_size);
1235 }
1236 
1237 caddr_t
1238 plat_startup_memlist(caddr_t alloc_base)
1239 {
1240 	caddr_t tmp_alloc_base;
1241 
1242 	tmp_alloc_base = efcode_alloc(alloc_base);
1243 	tmp_alloc_base = (caddr_t)roundup((uintptr_t)tmp_alloc_base,
1244 					    ecache_alignsize);
1245 	return (tmp_alloc_base);
1246 }
1247 
1248 /*
1249  * This is a helper function to determine if a given
1250  * node should be considered for a dr operation according
1251  * to predefined dr names. This is accomplished using
1252  * a function defined in drmach module. The drmach module
1253  * owns the definition of dr allowable names.
1254  * Formal Parameter: The name of a device node.
1255  * Expected Return Value: -1, device node name does not map to a valid dr name.
1256  *               A value greater or equal to 0, name is valid.
1257  */
1258 int
1259 starcat_dr_name(char *name)
1260 {
1261 	int (*drmach_name2type)(char *) = NULL;
1262 
1263 	/* Get a pointer to helper function in the dramch module. */
1264 	drmach_name2type =
1265 	    (int (*)(char *))kobj_getsymvalue("drmach_name2type_idx", 0);
1266 
1267 	if (drmach_name2type == NULL)
1268 		return (-1);
1269 
1270 	return ((*drmach_name2type)(name));
1271 }
1272 
1273 void
1274 startup_platform(void)
1275 {
1276 }
1277 
1278 /*
1279  * KDI functions - used by the in-situ kernel debugger (kmdb) to perform
1280  * platform-specific operations.  These functions execute when the world is
1281  * stopped, and as such cannot make any blocking calls, hold locks, etc.
1282  * promif functions are a special case, and may be used.
1283  */
1284 
1285 static void
1286 starcat_system_claim(void)
1287 {
1288 	prom_interpret("sigb-sig! my-sigb-sig!", OBP_SIG, OBP_SIG, 0, 0, 0);
1289 }
1290 
1291 static void
1292 starcat_system_release(void)
1293 {
1294 	prom_interpret("sigb-sig! my-sigb-sig!", OS_SIG, OS_SIG, 0, 0, 0);
1295 }
1296 
1297 void
1298 plat_kdi_init(kdi_t *kdi)
1299 {
1300 	kdi->pkdi_system_claim = starcat_system_claim;
1301 	kdi->pkdi_system_release = starcat_system_release;
1302 }
1303 
1304 /*
1305  * This function returns 1 if large pages for kernel heap are supported
1306  * and 0 otherwise.
1307  *
1308  * Currently we disable lp kmem support if kpr is going to be enabled
1309  * because in the case of large pages hat_add_callback()/hat_delete_callback()
1310  * cause network performance degradation
1311  */
1312 int
1313 plat_lpkmem_is_supported(void)
1314 {
1315 	extern int segkmem_reloc;
1316 
1317 	if (hat_kpr_enabled && kernel_cage_enable &&
1318 	    (ncpunode >= 32 || segkmem_reloc == 1))
1319 		return (0);
1320 
1321 	return (1);
1322 }
1323