xref: /titanic_44/usr/src/uts/sun4u/starcat/os/starcat.c (revision 03831d35f7499c87d51205817c93e9a8d42c4bae)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License, Version 1.0 only
6  * (the "License").  You may not use this file except in compliance
7  * with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 /*
23  * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 #include <sys/param.h>
30 #include <sys/systm.h>
31 #include <sys/sysmacros.h>
32 #include <sys/sunddi.h>
33 #include <sys/esunddi.h>
34 #include <sys/sunndi.h>
35 #include <sys/modctl.h>
36 #include <sys/promif.h>
37 #include <sys/machparam.h>
38 #include <sys/kobj.h>
39 #include <sys/cpuvar.h>
40 #include <sys/mem_cage.h>
41 #include <sys/promif.h>
42 #include <sys/promimpl.h>
43 #include <sys/platform_module.h>
44 #include <sys/errno.h>
45 #include <sys/cpu_sgnblk_defs.h>
46 #include <sys/iosramio.h>
47 #include <sys/domaind.h>
48 #include <sys/starcat.h>
49 #include <sys/machsystm.h>
50 #include <sys/bootconf.h>
51 #include <sys/memnode.h>
52 #include <vm/vm_dep.h>
53 #include <vm/page.h>
54 #include <sys/cheetahregs.h>
55 #include <sys/plat_ecc_unum.h>
56 #include <sys/plat_ecc_dimm.h>
57 #include <sys/lgrp.h>
58 #include <sys/dr.h>
59 #include <sys/post/scat_dcd.h>
60 #include <sys/kdi_impl.h>
61 #include <sys/iosramreg.h>
62 #include <sys/iosramvar.h>
63 #include <sys/mc-us3.h>
64 
65 /* Preallocation of spare tsb's for DR */
66 int starcat_tsb_spares = STARCAT_SPARE_TSB_MAX;
67 
68 /* Set the maximum number of slot0 + slot1 boards. .. for DR */
69 int starcat_boards = STARCAT_BDSET_MAX * STARCAT_BDSET_SLOT_MAX;
70 
71 /* Maximum number of cpus per board... for DR */
72 int starcat_cpu_per_board = MAX(STARCAT_SLOT0_CPU_MAX, STARCAT_SLOT1_CPU_MAX);
73 
74 /* Maximum number of mem-units per board... for DR */
75 int starcat_mem_per_board = MAX(STARCAT_SLOT0_MEM_MAX, STARCAT_SLOT1_MEM_MAX);
76 
77 /* Maximum number of io-units (buses) per board... for DR */
78 int starcat_io_per_board = 2 * MAX(STARCAT_SLOT0_IO_MAX, STARCAT_SLOT1_IO_MAX);
79 
80 /* Preferred minimum cage size (expressed in pages)... for DR */
81 pgcnt_t starcat_startup_cage_size = 0;
82 
83 /* Platform specific function to get unum information */
84 int (*p2get_mem_unum)(int, uint64_t, char *, int, int *);
85 
86 /* Memory for fcode claims.  16k times # maximum possible schizos */
87 #define	EFCODE_SIZE	(STARCAT_BDSET_MAX * 4 * 0x4000)
88 int efcode_size = EFCODE_SIZE;
89 
90 void sgn_update_all_cpus(ushort_t, uchar_t, uchar_t);
91 
92 /*
93  * The IOSRAM driver is loaded in load_platform_drivers() any cpu signature
94  * usage prior to that time will have not have a function to call.
95  */
96 static int (*iosram_rdp)(uint32_t key, uint32_t off, uint32_t len,
97 	    caddr_t dptr) = prom_starcat_iosram_read;
98 static int (*iosram_wrp)(uint32_t key, uint32_t off, uint32_t len,
99 	    caddr_t dptr) = prom_starcat_iosram_write;
100 
101 plat_dimm_sid_board_t	domain_dimm_sids[STARCAT_BDSET_MAX];
102 
103 /*
104  * set_platform_max_ncpus should return the maximum number of CPUs that the
105  * platform supports.  This function is called from check_cpus() to set the
106  * value of max_ncpus [see PSARC 1997/165 CPU Dynamic Reconfiguration].
107  * Data elements which are allocated based upon max_ncpus are all accessed
108  * via cpu_seqid and not physical IDs.  Previously, the value of max_ncpus
109  * was being set to the largest physical ID, which led to boot problems on
110  * systems with less than 1.25GB of memory.
111  */
112 
113 int
114 set_platform_max_ncpus(void)
115 {
116 	int n;
117 
118 	/*
119 	 * Convert number of slot0 + slot1 boards to number of expander brds
120 	 * and constrain the value to an architecturally plausible range
121 	 */
122 	n = MAX(starcat_boards, STARCAT_BDSET_MIN * STARCAT_BDSET_SLOT_MAX);
123 	n = MIN(n, STARCAT_BDSET_MAX * STARCAT_BDSET_SLOT_MAX);
124 	n = (n + STARCAT_BDSET_SLOT_MAX - 1) / STARCAT_BDSET_SLOT_MAX;
125 
126 	/* return maximum number of cpus possible on N expander boards */
127 	return (n * STARCAT_BDSET_CPU_MAX - STARCAT_SLOT1_CPU_MAX);
128 }
129 
130 int
131 set_platform_tsb_spares()
132 {
133 	return (MIN(starcat_tsb_spares, MAX_UPA));
134 }
135 
136 #pragma weak mmu_init_large_pages
137 
138 void
139 set_platform_defaults(void)
140 {
141 	extern char *tod_module_name;
142 	extern int ts_dispatch_extended;
143 	extern void cpu_sgn_update(ushort_t, uchar_t, uchar_t, int);
144 	extern int tsb_lgrp_affinity;
145 	extern int segkmem_reloc;
146 	extern void mmu_init_large_pages(size_t);
147 	extern int ncpunode;	/* number of CPUs detected by OBP */
148 
149 #ifdef DEBUG
150 	ce_verbose_memory = 2;
151 	ce_verbose_other = 2;
152 #endif
153 
154 	/* Set the CPU signature function pointer */
155 	cpu_sgn_func = cpu_sgn_update;
156 
157 	/* Set appropriate tod module for starcat */
158 	ASSERT(tod_module_name == NULL);
159 	tod_module_name = "todstarcat";
160 
161 	/*
162 	 * Use the alternate TS dispatch table, which is better
163 	 * tuned for large servers.
164 	 */
165 	if (ts_dispatch_extended == -1)
166 		ts_dispatch_extended = 1;
167 
168 	/*
169 	 * Use lgroup-aware TSB allocations on this platform,
170 	 * since they are a considerable performance win.
171 	 */
172 	tsb_lgrp_affinity = 1;
173 
174 	if ((mmu_page_sizes == max_mmu_page_sizes) &&
175 	    (mmu_ism_pagesize != MMU_PAGESIZE32M)) {
176 		if (&mmu_init_large_pages)
177 			mmu_init_large_pages(mmu_ism_pagesize);
178 	}
179 
180 	/*
181 	 * KPR (kernel page relocation) is supported on this platform.
182 	 */
183 	if (hat_kpr_enabled && kernel_cage_enable && ncpunode >= 32) {
184 		segkmem_reloc = 1;
185 		cmn_err(CE_NOTE, "!Kernel Page Relocation is ENABLED");
186 	} else {
187 		cmn_err(CE_NOTE, "!Kernel Page Relocation is DISABLED");
188 	}
189 }
190 
191 #ifdef DEBUG
192 pgcnt_t starcat_cage_size_limit;
193 #endif
194 
195 void
196 set_platform_cage_params(void)
197 {
198 	extern pgcnt_t total_pages;
199 	extern struct memlist *phys_avail;
200 	int ret;
201 
202 	if (kernel_cage_enable) {
203 		pgcnt_t preferred_cage_size;
204 
205 		preferred_cage_size =
206 			MAX(starcat_startup_cage_size, total_pages / 256);
207 
208 #ifdef DEBUG
209 		if (starcat_cage_size_limit)
210 			preferred_cage_size = starcat_cage_size_limit;
211 #endif
212 		kcage_range_lock();
213 		/*
214 		 * Note: we are assuming that post has load the
215 		 * whole show in to the high end of memory. Having
216 		 * taken this leap, we copy the whole of phys_avail
217 		 * the glist and arrange for the cage to grow
218 		 * downward (descending pfns).
219 		 */
220 		ret = kcage_range_init(phys_avail, 1);
221 		if (ret == 0)
222 			kcage_init(preferred_cage_size);
223 		kcage_range_unlock();
224 	}
225 
226 	if (kcage_on)
227 		cmn_err(CE_NOTE, "!DR Kernel Cage is ENABLED");
228 	else
229 		cmn_err(CE_NOTE, "!DR Kernel Cage is DISABLED");
230 }
231 
232 void
233 load_platform_modules(void)
234 {
235 	if (modload("misc", "pcihp") < 0) {
236 		cmn_err(CE_NOTE, "pcihp driver failed to load");
237 	}
238 }
239 
240 /*
241  * Starcat does not support power control of CPUs from the OS.
242  */
243 /*ARGSUSED*/
244 int
245 plat_cpu_poweron(struct cpu *cp)
246 {
247 	int (*starcat_cpu_poweron)(struct cpu *) = NULL;
248 
249 	starcat_cpu_poweron =
250 		(int (*)(struct cpu *))kobj_getsymvalue(
251 			"drmach_cpu_poweron", 0);
252 
253 	if (starcat_cpu_poweron == NULL)
254 		return (ENOTSUP);
255 	else
256 		return ((starcat_cpu_poweron)(cp));
257 }
258 
259 /*ARGSUSED*/
260 int
261 plat_cpu_poweroff(struct cpu *cp)
262 {
263 	int (*starcat_cpu_poweroff)(struct cpu *) = NULL;
264 
265 	starcat_cpu_poweroff =
266 		(int (*)(struct cpu *))kobj_getsymvalue(
267 			"drmach_cpu_poweroff", 0);
268 
269 	if (starcat_cpu_poweroff == NULL)
270 		return (ENOTSUP);
271 	else
272 		return ((starcat_cpu_poweroff)(cp));
273 }
274 
275 /*
276  * The following are currently private to Starcat DR
277  */
278 int
279 plat_max_boards()
280 {
281 	return (starcat_boards);
282 }
283 
284 int
285 plat_max_cpu_units_per_board()
286 {
287 	return (starcat_cpu_per_board);
288 }
289 
290 int
291 plat_max_mc_units_per_board()
292 {
293 	return (starcat_mem_per_board); /* each CPU has a memory controller */
294 }
295 
296 int
297 plat_max_mem_units_per_board()
298 {
299 	return (starcat_mem_per_board);
300 }
301 
302 int
303 plat_max_io_units_per_board()
304 {
305 	return (starcat_io_per_board);
306 }
307 
308 int
309 plat_max_cpumem_boards(void)
310 {
311 	return (STARCAT_BDSET_MAX);
312 }
313 
314 int
315 plat_pfn_to_mem_node(pfn_t pfn)
316 {
317 	return (pfn >> mem_node_pfn_shift);
318 }
319 
320 #define	STARCAT_MC_MEMBOARD_SHIFT 37	/* Boards on 128BG boundary */
321 
322 /* ARGSUSED */
323 void
324 plat_build_mem_nodes(u_longlong_t *list, size_t nelems)
325 {
326 	size_t	elem;
327 	pfn_t	basepfn;
328 	pgcnt_t	npgs;
329 
330 	/*
331 	 * Starcat mem slices are always aligned on a 128GB boundary,
332 	 * fixed, and limited to one slice per expander due to design
333 	 * of the centerplane ASICs.
334 	 */
335 	mem_node_pfn_shift = STARCAT_MC_MEMBOARD_SHIFT - MMU_PAGESHIFT;
336 	mem_node_physalign = 0;
337 
338 	/*
339 	 * Boot install lists are arranged <addr, len>, <addr, len>, ...
340 	 */
341 	for (elem = 0; elem < nelems; elem += 2) {
342 		basepfn = btop(list[elem]);
343 		npgs = btop(list[elem+1]);
344 		mem_node_add_slice(basepfn, basepfn + npgs - 1);
345 	}
346 }
347 
348 /*
349  * Find the CPU associated with a slice at boot-time.
350  */
351 void
352 plat_fill_mc(pnode_t nodeid)
353 {
354 	int		len;
355 	uint64_t	mc_addr, mask;
356 	uint64_t	mc_decode[MAX_BANKS_PER_MC];
357 	uint32_t	regs[4];
358 	int		local_mc;
359 	int		portid;
360 	int		expnum;
361 	int		i;
362 
363 	/*
364 	 * Memory address decoding registers
365 	 * (see Chap 9 of SPARCV9 JSP-1 US-III implementation)
366 	 */
367 	const uint64_t	mc_decode_addr[MAX_BANKS_PER_MC] = {
368 		0x400028, 0x400010, 0x400018, 0x400020
369 	};
370 
371 	/*
372 	 * Starcat memory controller portid == global CPU id
373 	 */
374 	if ((prom_getprop(nodeid, "portid", (caddr_t)&portid) < 0) ||
375 		(portid == -1))
376 		return;
377 
378 	expnum = STARCAT_CPUID_TO_EXPANDER(portid);
379 
380 	/*
381 	 * The "reg" property returns 4 32-bit values. The first two are
382 	 * combined to form a 64-bit address.  The second two are for a
383 	 * 64-bit size, but we don't actually need to look at that value.
384 	 */
385 	len = prom_getproplen(nodeid, "reg");
386 	if (len != (sizeof (uint32_t) * 4)) {
387 		prom_printf("Warning: malformed 'reg' property\n");
388 		return;
389 	}
390 	if (prom_getprop(nodeid, "reg", (caddr_t)regs) < 0)
391 		return;
392 	mc_addr = ((uint64_t)regs[0]) << 32;
393 	mc_addr |= (uint64_t)regs[1];
394 
395 	/*
396 	 * Figure out whether the memory controller we are examining
397 	 * belongs to this CPU/CMP or a different one.
398 	 */
399 	if (portid == cpunodes[CPU->cpu_id].portid)
400 		local_mc = 1;
401 	else
402 		local_mc = 0;
403 
404 	for (i = 0; i < MAX_BANKS_PER_MC; i++) {
405 
406 		mask = mc_decode_addr[i];
407 
408 		/*
409 		 * If the memory controller is local to this CPU, we use
410 		 * the special ASI to read the decode registers.
411 		 * Otherwise, we load the values from a magic address in
412 		 * I/O space.
413 		 */
414 		if (local_mc)
415 			mc_decode[i] = lddmcdecode(mask & MC_OFFSET_MASK);
416 		else
417 			mc_decode[i] = lddphysio((mc_addr | mask));
418 
419 		if (mc_decode[i] >> MC_VALID_SHIFT) {
420 			uint64_t base = MC_BASE(mc_decode[i]) << PHYS2UM_SHIFT;
421 			int sliceid = (base >> STARCAT_MC_MEMBOARD_SHIFT);
422 
423 			if (sliceid < max_mem_nodes) {
424 				/*
425 				 * Establish start-of-day mappings of
426 				 * lgroup platform handles to memnodes.
427 				 * Handle == Expander Number
428 				 * Memnode == Fixed 128GB Slice
429 				 */
430 				plat_assign_lgrphand_to_mem_node(expnum,
431 				    sliceid);
432 			}
433 		}
434 	}
435 }
436 
437 /*
438  * Starcat support for lgroups.
439  *
440  * On Starcat, an lgroup platform handle == expander number.
441  * For split-slot configurations (e.g. slot 0 and slot 1 boards
442  * in different domains) an MCPU board has only remote memory.
443  *
444  * The centerplane logic provides fixed 128GB memory slices
445  * each of which map to a memnode.  The initial mapping of
446  * memnodes to lgroup handles is determined at boot time.
447  * A DR addition of memory adds a new mapping. A DR copy-rename
448  * swaps mappings.
449  */
450 
451 /*
452  * Convert board number to expander number.
453  */
454 #define	BOARDNUM_2_EXPANDER(b)	(b >> 1)
455 
456 /*
457  * Return the number of boards configured with NULL LPA.
458  */
459 static int
460 check_for_null_lpa(void)
461 {
462 	gdcd_t	*gdcd;
463 	uint_t	exp, nlpa;
464 
465 	/*
466 	 * Read GDCD from IOSRAM.
467 	 * If this fails indicate a NULL LPA condition.
468 	 */
469 	if ((gdcd = kmem_zalloc(sizeof (gdcd_t), KM_NOSLEEP)) == NULL)
470 		return (EXP_COUNT+1);
471 
472 	if ((*iosram_rdp)(GDCD_MAGIC, 0, sizeof (gdcd_t), (caddr_t)gdcd) ||
473 	    (gdcd->h.dcd_magic != GDCD_MAGIC) ||
474 	    (gdcd->h.dcd_version != DCD_VERSION)) {
475 		kmem_free(gdcd, sizeof (gdcd_t));
476 		cmn_err(CE_WARN, "check_for_null_lpa: failed to access GDCD\n");
477 		return (EXP_COUNT+2);
478 	}
479 
480 	/*
481 	 * Check for NULL LPAs on all slot 0 boards in domain
482 	 * (i.e. in all expanders marked good for this domain).
483 	 */
484 	nlpa = 0;
485 	for (exp = 0; exp < EXP_COUNT; exp++) {
486 		if (RSV_GOOD(gdcd->dcd_slot[exp][0].l1ss_rsv) &&
487 		    (gdcd->dcd_slot[exp][0].l1ss_flags &
488 		    L1SSFLG_THIS_L1_NULL_PROC_LPA))
489 			nlpa++;
490 	}
491 
492 	kmem_free(gdcd, sizeof (gdcd_t));
493 	return (nlpa);
494 }
495 
496 /*
497  * Return the platform handle for the lgroup containing the given CPU
498  *
499  * For Starcat, lgroup platform handle == expander.
500  */
501 
502 extern int mpo_disabled;
503 extern lgrp_handle_t lgrp_default_handle;
504 int null_lpa_boards = -1;
505 
506 lgrp_handle_t
507 plat_lgrp_cpu_to_hand(processorid_t id)
508 {
509 	lgrp_handle_t		plathand;
510 
511 	plathand = STARCAT_CPUID_TO_EXPANDER(id);
512 
513 	/*
514 	 * Return the real platform handle for the CPU until
515 	 * such time as we know that MPO should be disabled.
516 	 * At that point, we set the "mpo_disabled" flag to true,
517 	 * and from that point on, return the default handle.
518 	 *
519 	 * By the time we know that MPO should be disabled, the
520 	 * first CPU will have already been added to a leaf
521 	 * lgroup, but that's ok. The common lgroup code will
522 	 * double check that the boot CPU is in the correct place,
523 	 * and in the case where mpo should be disabled, will move
524 	 * it to the root if necessary.
525 	 */
526 	if (mpo_disabled) {
527 		/* If MPO is disabled, return the default (UMA) handle */
528 		plathand = lgrp_default_handle;
529 	} else {
530 		if (null_lpa_boards > 0) {
531 			/* Determine if MPO should be disabled */
532 			mpo_disabled = 1;
533 			plathand = lgrp_default_handle;
534 		}
535 	}
536 	return (plathand);
537 }
538 
539 /*
540  * Platform specific lgroup initialization
541  */
542 void
543 plat_lgrp_init(void)
544 {
545 	extern uint32_t lgrp_expand_proc_thresh;
546 	extern uint32_t lgrp_expand_proc_diff;
547 
548 	/*
549 	 * Set tuneables for Starcat architecture
550 	 *
551 	 * lgrp_expand_proc_thresh is the minimum load on the lgroups
552 	 * this process is currently running on before considering
553 	 * expanding threads to another lgroup.
554 	 *
555 	 * lgrp_expand_proc_diff determines how much less the remote lgroup
556 	 * must be loaded before expanding to it.
557 	 *
558 	 * Since remote latencies can be costly, attempt to keep 3 threads
559 	 * within the same lgroup before expanding to the next lgroup.
560 	 */
561 	lgrp_expand_proc_thresh = LGRP_LOADAVG_THREAD_MAX * 3;
562 	lgrp_expand_proc_diff = LGRP_LOADAVG_THREAD_MAX;
563 }
564 
565 /*
566  * Platform notification of lgroup (re)configuration changes
567  */
568 /*ARGSUSED*/
569 void
570 plat_lgrp_config(lgrp_config_flag_t evt, uintptr_t arg)
571 {
572 	update_membounds_t	*umb;
573 	lgrp_config_mem_rename_t lmr;
574 	int			sbd, tbd;
575 	lgrp_handle_t		hand, shand, thand;
576 	int			mnode, snode, tnode;
577 
578 	if (mpo_disabled)
579 		return;
580 
581 	switch (evt) {
582 
583 	case LGRP_CONFIG_MEM_ADD:
584 		/*
585 		 * Establish the lgroup handle to memnode translation.
586 		 */
587 		umb = (update_membounds_t *)arg;
588 
589 		hand = BOARDNUM_2_EXPANDER(umb->u_board);
590 		mnode = plat_pfn_to_mem_node(umb->u_base >> MMU_PAGESHIFT);
591 		plat_assign_lgrphand_to_mem_node(hand, mnode);
592 
593 		break;
594 
595 	case LGRP_CONFIG_MEM_DEL:
596 		/* We don't have to do anything */
597 
598 		break;
599 
600 	case LGRP_CONFIG_MEM_RENAME:
601 		/*
602 		 * During a DR copy-rename operation, all of the memory
603 		 * on one board is moved to another board -- but the
604 		 * addresses/pfns and memnodes don't change. This means
605 		 * the memory has changed locations without changing identity.
606 		 *
607 		 * Source is where we are copying from and target is where we
608 		 * are copying to.  After source memnode is copied to target
609 		 * memnode, the physical addresses of the target memnode are
610 		 * renamed to match what the source memnode had.  Then target
611 		 * memnode can be removed and source memnode can take its
612 		 * place.
613 		 *
614 		 * To do this, swap the lgroup handle to memnode mappings for
615 		 * the boards, so target lgroup will have source memnode and
616 		 * source lgroup will have empty target memnode which is where
617 		 * its memory will go (if any is added to it later).
618 		 *
619 		 * Then source memnode needs to be removed from its lgroup
620 		 * and added to the target lgroup where the memory was living
621 		 * but under a different name/memnode.  The memory was in the
622 		 * target memnode and now lives in the source memnode with
623 		 * different physical addresses even though it is the same
624 		 * memory.
625 		 */
626 		sbd = arg & 0xffff;
627 		tbd = (arg & 0xffff0000) >> 16;
628 		shand = BOARDNUM_2_EXPANDER(sbd);
629 		thand = BOARDNUM_2_EXPANDER(tbd);
630 		snode = plat_lgrphand_to_mem_node(shand);
631 		tnode = plat_lgrphand_to_mem_node(thand);
632 
633 		plat_assign_lgrphand_to_mem_node(thand, snode);
634 		plat_assign_lgrphand_to_mem_node(shand, tnode);
635 
636 		lmr.lmem_rename_from = shand;
637 		lmr.lmem_rename_to = thand;
638 
639 		/*
640 		 * Remove source memnode of copy rename from its lgroup
641 		 * and add it to its new target lgroup
642 		 */
643 		lgrp_config(LGRP_CONFIG_MEM_RENAME, (uintptr_t)snode,
644 		    (uintptr_t)&lmr);
645 
646 		break;
647 
648 	default:
649 		break;
650 	}
651 }
652 
653 /*
654  * Return latency between "from" and "to" lgroups
655  *
656  * This latency number can only be used for relative comparison
657  * between lgroups on the running system, cannot be used across platforms,
658  * and may not reflect the actual latency.  It is platform and implementation
659  * specific, so platform gets to decide its value.  It would be nice if the
660  * number was at least proportional to make comparisons more meaningful though.
661  * NOTE: The numbers below are supposed to be load latencies for uncached
662  * memory divided by 10.
663  */
664 int
665 plat_lgrp_latency(lgrp_handle_t from, lgrp_handle_t to)
666 {
667 	/*
668 	 * Return min remote latency when there are more than two lgroups
669 	 * (root and child) and getting latency between two different lgroups
670 	 * or root is involved
671 	 */
672 	if (lgrp_optimizations() && (from != to ||
673 	    from == LGRP_DEFAULT_HANDLE || to == LGRP_DEFAULT_HANDLE))
674 		return (48);
675 	else
676 		return (28);
677 }
678 
679 /*
680  * Return platform handle for root lgroup
681  */
682 lgrp_handle_t
683 plat_lgrp_root_hand(void)
684 {
685 	if (mpo_disabled)
686 		return (lgrp_default_handle);
687 
688 	return (LGRP_DEFAULT_HANDLE);
689 }
690 
691 /* ARGSUSED */
692 void
693 plat_freelist_process(int mnode)
694 {
695 }
696 
697 void
698 load_platform_drivers(void)
699 {
700 	uint_t		tunnel;
701 	pnode_t		nodeid;
702 	dev_info_t	*chosen_devi;
703 	char		chosen_iosram[MAXNAMELEN];
704 
705 	/*
706 	 * Get /chosen node - that's where the tunnel property is
707 	 */
708 	nodeid = prom_chosennode();
709 
710 	/*
711 	 * Get the iosram property from the chosen node.
712 	 */
713 	if (prom_getprop(nodeid, IOSRAM_CHOSEN_PROP, (caddr_t)&tunnel) <= 0) {
714 		prom_printf("Unable to get iosram property\n");
715 		cmn_err(CE_PANIC, "Unable to get iosram property\n");
716 	}
717 
718 	if (prom_phandle_to_path((phandle_t)tunnel, chosen_iosram,
719 			sizeof (chosen_iosram)) < 0) {
720 		(void) prom_printf("prom_phandle_to_path(0x%x) failed\n",
721 				tunnel);
722 		cmn_err(CE_PANIC, "prom_phandle_to_path(0x%x) failed\n",
723 				tunnel);
724 	}
725 
726 	/*
727 	 * Attach all driver instances along the iosram's device path
728 	 */
729 	if (i_ddi_attach_hw_nodes("iosram") != DDI_SUCCESS) {
730 		cmn_err(CE_WARN, "IOSRAM failed to load\n");
731 	}
732 
733 	if ((chosen_devi = e_ddi_hold_devi_by_path(chosen_iosram, 0)) == NULL) {
734 		(void) prom_printf("e_ddi_hold_devi_by_path(%s) failed\n",
735 			chosen_iosram);
736 		cmn_err(CE_PANIC, "e_ddi_hold_devi_by_path(%s) failed\n",
737 			chosen_iosram);
738 	}
739 	ndi_rele_devi(chosen_devi);
740 
741 	/*
742 	 * iosram driver is now loaded so we need to set our read and
743 	 * write pointers.
744 	 */
745 	iosram_rdp = (int (*)(uint32_t, uint32_t, uint32_t, caddr_t))
746 			modgetsymvalue("iosram_rd", 0);
747 	iosram_wrp = (int (*)(uint32_t, uint32_t, uint32_t, caddr_t))
748 			modgetsymvalue("iosram_wr", 0);
749 
750 	/*
751 	 * Need to check for null proc LPA after IOSRAM driver is loaded
752 	 * and before multiple lgroups created (when start_other_cpus() called)
753 	 */
754 	null_lpa_boards = check_for_null_lpa();
755 
756 	/* load and attach the axq driver */
757 	if (i_ddi_attach_hw_nodes("axq") != DDI_SUCCESS) {
758 		cmn_err(CE_WARN, "AXQ failed to load\n");
759 	}
760 
761 	/* load Starcat Solaris Mailbox Client driver */
762 	if (modload("misc", "scosmb") < 0) {
763 		cmn_err(CE_WARN, "SCOSMB failed to load\n");
764 	}
765 
766 	/* load the DR driver */
767 	if (i_ddi_attach_hw_nodes("dr") != DDI_SUCCESS) {
768 		cmn_err(CE_WARN, "dr failed to load");
769 	}
770 
771 	/*
772 	 * Load the mc-us3 memory driver.
773 	 */
774 	if (i_ddi_attach_hw_nodes("mc-us3") != DDI_SUCCESS)
775 		cmn_err(CE_WARN, "mc-us3 failed to load");
776 	else
777 		(void) ddi_hold_driver(ddi_name_to_major("mc-us3"));
778 
779 	/* Load the schizo pci bus nexus driver. */
780 	if (i_ddi_attach_hw_nodes("pcisch") != DDI_SUCCESS)
781 		cmn_err(CE_WARN, "pcisch failed to load");
782 
783 	plat_ecc_init();
784 }
785 
786 
787 /*
788  * No platform drivers on this platform
789  */
790 char *platform_module_list[] = {
791 	(char *)0
792 };
793 
794 
795 /*ARGSUSED*/
796 void
797 plat_tod_fault(enum tod_fault_type tod_bad)
798 {
799 }
800 
801 /*
802  * Update the signature(s) in the IOSRAM's domain data section.
803  */
804 void
805 cpu_sgn_update(ushort_t sgn, uchar_t state, uchar_t sub_state, int cpuid)
806 {
807 	sig_state_t new_sgn;
808 	sig_state_t current_sgn;
809 
810 	/*
811 	 * If the substate is REBOOT, then check for panic flow
812 	 */
813 	if (sub_state == SIGSUBST_REBOOT) {
814 		(*iosram_rdp)(DOMD_MAGIC, DOMD_DSTATE_OFFSET,
815 		    sizeof (sig_state_t), (caddr_t)&current_sgn);
816 		if (current_sgn.state_t.state == SIGST_EXIT)
817 			sub_state = SIGSUBST_PANIC_REBOOT;
818 	}
819 
820 	/*
821 	 * cpuid == -1 indicates that the operation applies to all cpus.
822 	 */
823 	if (cpuid < 0) {
824 		sgn_update_all_cpus(sgn, state, sub_state);
825 		return;
826 	}
827 
828 	new_sgn.signature = CPU_SIG_BLD(sgn, state, sub_state);
829 	(*iosram_wrp)(DOMD_MAGIC,
830 	    DOMD_CPUSIGS_OFFSET + cpuid * sizeof (sig_state_t),
831 	    sizeof (sig_state_t), (caddr_t)&new_sgn);
832 
833 	/*
834 	 * Under certain conditions we don't update the signature
835 	 * of the domain_state.
836 	 */
837 	if ((sgn == OS_SIG) &&
838 	    ((state == SIGST_OFFLINE) || (state == SIGST_DETACHED)))
839 		return;
840 	(*iosram_wrp)(DOMD_MAGIC, DOMD_DSTATE_OFFSET, sizeof (sig_state_t),
841 	    (caddr_t)&new_sgn);
842 }
843 
844 /*
845  * Update the signature(s) in the IOSRAM's domain data section for all CPUs.
846  */
847 void
848 sgn_update_all_cpus(ushort_t sgn, uchar_t state, uchar_t sub_state)
849 {
850 	sig_state_t new_sgn;
851 	int i = 0;
852 
853 	new_sgn.signature = CPU_SIG_BLD(sgn, state, sub_state);
854 
855 	/*
856 	 * First update the domain_state signature
857 	 */
858 	(*iosram_wrp)(DOMD_MAGIC, DOMD_DSTATE_OFFSET, sizeof (sig_state_t),
859 	    (caddr_t)&new_sgn);
860 
861 	for (i = 0; i < NCPU; i++) {
862 		if (cpu[i] != NULL && (cpu[i]->cpu_flags &
863 		    (CPU_EXISTS|CPU_QUIESCED))) {
864 			(*iosram_wrp)(DOMD_MAGIC,
865 			    DOMD_CPUSIGS_OFFSET + i * sizeof (sig_state_t),
866 			    sizeof (sig_state_t), (caddr_t)&new_sgn);
867 		}
868 	}
869 }
870 
871 ushort_t
872 get_cpu_sgn(int cpuid)
873 {
874 	sig_state_t cpu_sgn;
875 
876 	(*iosram_rdp)(DOMD_MAGIC,
877 	    DOMD_CPUSIGS_OFFSET + cpuid * sizeof (sig_state_t),
878 	    sizeof (sig_state_t), (caddr_t)&cpu_sgn);
879 
880 	return (cpu_sgn.state_t.sig);
881 }
882 
883 uchar_t
884 get_cpu_sgn_state(int cpuid)
885 {
886 	sig_state_t cpu_sgn;
887 
888 	(*iosram_rdp)(DOMD_MAGIC,
889 	    DOMD_CPUSIGS_OFFSET + cpuid * sizeof (sig_state_t),
890 	    sizeof (sig_state_t), (caddr_t)&cpu_sgn);
891 
892 	return (cpu_sgn.state_t.state);
893 }
894 
895 
896 /*
897  * Type of argument passed into plat_get_ecache_cpu via ddi_walk_devs
898  * for matching on specific CPU node in device tree
899  */
900 
901 typedef struct {
902 	char		*jnum;	/* output, kmem_alloc'd	if successful */
903 	int		cpuid;	/* input, to match cpuid/portid/upa-portid */
904 	uint_t		dimm;	/* input, index into ecache-dimm-label */
905 } plat_ecache_cpu_arg_t;
906 
907 
908 /*
909  * plat_get_ecache_cpu is called repeatedly by ddi_walk_devs with pointers
910  * to device tree nodes (dip) and to a plat_ecache_cpu_arg_t structure (arg).
911  * Returning DDI_WALK_CONTINUE tells ddi_walk_devs to keep going, returning
912  * DDI_WALK_TERMINATE ends the walk.  When the node for the specific CPU
913  * being searched for is found, the walk is done.  But before returning to
914  * ddi_walk_devs and plat_get_ecacheunum, we grab this CPU's ecache-dimm-label
915  * property and set the jnum member of the plat_ecache_cpu_arg_t structure to
916  * point to the label corresponding to this specific ecache DIMM.  It is up
917  * to plat_get_ecacheunum to kmem_free this string.
918  */
919 
920 static int
921 plat_get_ecache_cpu(dev_info_t *dip, void *arg)
922 {
923 	char			*devtype;
924 	plat_ecache_cpu_arg_t	*cpuarg;
925 	char			**dimm_labels;
926 	uint_t			numlabels;
927 	int			portid;
928 
929 	/*
930 	 * Check device_type, must be "cpu"
931 	 */
932 
933 	if (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
934 						"device_type", &devtype)
935 				!= DDI_PROP_SUCCESS)
936 		return (DDI_WALK_CONTINUE);
937 
938 	if (strcmp(devtype, "cpu")) {
939 		ddi_prop_free((void *)devtype);
940 		return (DDI_WALK_CONTINUE);
941 	}
942 
943 	ddi_prop_free((void *)devtype);
944 
945 	/*
946 	 * Check cpuid, portid, upa-portid (in that order), must
947 	 * match the cpuid being sought
948 	 */
949 
950 	portid = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
951 				DDI_PROP_DONTPASS, "cpuid", -1);
952 
953 	if (portid == -1)
954 		portid = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
955 				DDI_PROP_DONTPASS, "portid", -1);
956 
957 	if (portid == -1)
958 		portid = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
959 				DDI_PROP_DONTPASS, "upa-portid", -1);
960 
961 	cpuarg = (plat_ecache_cpu_arg_t *)arg;
962 
963 	if (portid != cpuarg->cpuid)
964 		return (DDI_WALK_CONTINUE);
965 
966 	/*
967 	 * Found the right CPU, fetch ecache-dimm-label property
968 	 */
969 
970 	if (ddi_prop_lookup_string_array(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
971 				"ecache-dimm-label", &dimm_labels, &numlabels)
972 			!= DDI_PROP_SUCCESS) {
973 #ifdef	DEBUG
974 		cmn_err(CE_NOTE, "cpuid=%d missing ecache-dimm-label property",
975 			portid);
976 #endif	/* DEBUG */
977 		return (DDI_WALK_TERMINATE);
978 	}
979 
980 	if (cpuarg->dimm < numlabels) {
981 		cpuarg->jnum = kmem_alloc(
982 					strlen(dimm_labels[cpuarg->dimm]) + 1,
983 					KM_SLEEP);
984 		if (cpuarg->jnum != (char *)NULL)
985 			(void) strcpy(cpuarg->jnum, dimm_labels[cpuarg->dimm]);
986 #ifdef	DEBUG
987 		else
988 			cmn_err(CE_WARN,
989 				"cannot kmem_alloc for ecache dimm label");
990 #endif	/* DEBUG */
991 	}
992 
993 	ddi_prop_free((void *)dimm_labels);
994 	return (DDI_WALK_TERMINATE);
995 }
996 
997 
998 /*
999  * Bit 4 of physical address indicates ecache 0 or 1
1000  */
1001 
1002 #define	ECACHE_DIMM_SHIFT	4
1003 #define	ECACHE_DIMM_MASK	0x10
1004 
1005 /*
1006  * plat_get_ecacheunum is called to generate the unum for an ecache error.
1007  * After some initialization, nearly all of the work is done by ddi_walk_devs
1008  * and plat_get_ecache_cpu.
1009  */
1010 
1011 int
1012 plat_get_ecacheunum(int cpuid, unsigned long long physaddr, char *buf,
1013 		    int buflen, int *ustrlen)
1014 {
1015 	plat_ecache_cpu_arg_t	findcpu;
1016 	uint_t	expander, slot, proc;
1017 
1018 	findcpu.jnum = (char *)NULL;
1019 	findcpu.cpuid = cpuid;
1020 	findcpu.dimm = (physaddr & ECACHE_DIMM_MASK) >> ECACHE_DIMM_SHIFT;
1021 
1022 	/*
1023 	 * Walk the device tree, find this specific CPU, and get the label
1024 	 * for this ecache, returned here in findcpu.jnum
1025 	 */
1026 
1027 	ddi_walk_devs(ddi_root_node(), plat_get_ecache_cpu, (void *)&findcpu);
1028 
1029 	if (findcpu.jnum == (char *)NULL)
1030 		return (-1);
1031 
1032 	expander = STARCAT_CPUID_TO_EXPANDER(cpuid);
1033 	slot = STARCAT_CPUID_TO_BOARDSLOT(cpuid);
1034 
1035 	/*
1036 	 * STARCAT_CPUID_TO_PORTID clears the CoreID bit so that
1037 	 * STARCAT_CPUID_TO_AGENT will return a physical proc (0 - 3).
1038 	 */
1039 	proc = STARCAT_CPUID_TO_AGENT(STARCAT_CPUID_TO_PORTID(cpuid));
1040 
1041 	/*
1042 	 * NOTE: Any modifications to the snprintf() call below will require
1043 	 * changing plat_log_fruid_error() as well!
1044 	 */
1045 	(void) snprintf(buf, buflen, "%s%u/P%u/E%u J%s", (slot ? "IO" : "SB"),
1046 			expander, proc, findcpu.dimm, findcpu.jnum);
1047 
1048 	*ustrlen = strlen(buf);
1049 
1050 	kmem_free(findcpu.jnum, strlen(findcpu.jnum) + 1);
1051 
1052 	return (0);
1053 }
1054 
1055 /*ARGSUSED*/
1056 int
1057 plat_get_mem_unum(int synd_code, uint64_t flt_addr, int flt_bus_id,
1058     int flt_in_memory, ushort_t flt_status, char *buf, int buflen, int *lenp)
1059 {
1060 	int ret;
1061 
1062 	/*
1063 	 * check if it's a Memory or an Ecache error.
1064 	 */
1065 	if (flt_in_memory) {
1066 		if (p2get_mem_unum != NULL) {
1067 			return (p2get_mem_unum(synd_code, P2ALIGN(flt_addr, 8),
1068 				buf, buflen, lenp));
1069 		} else {
1070 			return (ENOTSUP);
1071 		}
1072 	} else if (flt_status & ECC_ECACHE) {
1073 		if ((ret = plat_get_ecacheunum(flt_bus_id,
1074 		    P2ALIGN(flt_addr, 8), buf, buflen, lenp)) != 0)
1075 			return (EIO);
1076 	} else {
1077 		return (ENOTSUP);
1078 	}
1079 
1080 	return (ret);
1081 }
1082 
1083 static int (*ecc_mailbox_msg_func)(plat_ecc_message_type_t, void *) = NULL;
1084 
1085 /*
1086  * To keep OS mailbox handling localized, all we do is forward the call to the
1087  * scosmb module (if it is available).
1088  */
1089 int
1090 plat_send_ecc_mailbox_msg(plat_ecc_message_type_t msg_type, void *datap)
1091 {
1092 	/*
1093 	 * find the symbol for the mailbox sender routine in the scosmb module
1094 	 */
1095 	if (ecc_mailbox_msg_func == NULL)
1096 		ecc_mailbox_msg_func = (int (*)(plat_ecc_message_type_t,
1097 		    void *))modgetsymvalue("scosmb_log_ecc_error", 0);
1098 
1099 	/*
1100 	 * If the symbol was found, call it.  Otherwise, there is not much
1101 	 * else we can do and console messages will have to suffice.
1102 	 */
1103 	if (ecc_mailbox_msg_func)
1104 		return ((*ecc_mailbox_msg_func)(msg_type, datap));
1105 	else
1106 		return (ENODEV);
1107 }
1108 
1109 int
1110 plat_make_fru_cpuid(int sb, int m, int proc)
1111 {
1112 	return (MAKE_CPUID(sb, m, proc));
1113 }
1114 
1115 /*
1116  * board number for a given proc
1117  */
1118 int
1119 plat_make_fru_boardnum(int proc)
1120 {
1121 	return (STARCAT_CPUID_TO_EXPANDER(proc));
1122 }
1123 
1124 /*
1125  * This platform hook gets called from mc_add_mem_unum_label() in the mc-us3
1126  * driver giving each platform the opportunity to add platform
1127  * specific label information to the unum for ECC error logging purposes.
1128  */
1129 void
1130 plat_add_mem_unum_label(char *unum, int mcid, int bank, int dimm)
1131 {
1132 	char	new_unum[UNUM_NAMLEN];
1133 	uint_t	expander = STARCAT_CPUID_TO_EXPANDER(mcid);
1134 	uint_t	slot = STARCAT_CPUID_TO_BOARDSLOT(mcid);
1135 
1136 	/*
1137 	 * STARCAT_CPUID_TO_PORTID clears the CoreID bit so that
1138 	 * STARCAT_CPUID_TO_AGENT will return a physical proc (0 - 3).
1139 	 */
1140 	uint_t	proc = STARCAT_CPUID_TO_AGENT(STARCAT_CPUID_TO_PORTID(mcid));
1141 
1142 	/*
1143 	 * NOTE: Any modifications to the two sprintf() calls below will
1144 	 * require changing plat_log_fruid_error() as well!
1145 	 */
1146 	if (dimm == -1)
1147 		(void) snprintf(new_unum, UNUM_NAMLEN, "%s%u/P%u/B%d %s",
1148 			(slot ? "IO" : "SB"), expander,
1149 			proc, (bank & 0x1), unum);
1150 	else
1151 		(void) snprintf(new_unum, UNUM_NAMLEN, "%s%u/P%u/B%d/D%d %s",
1152 			(slot ? "IO" : "SB"), expander,
1153 			proc, (bank & 0x1), (dimm & 0x3), unum);
1154 
1155 	(void) strcpy(unum, new_unum);
1156 }
1157 
1158 int
1159 plat_get_cpu_unum(int cpuid, char *buf, int buflen, int *lenp)
1160 {
1161 	int	expander = STARCAT_CPUID_TO_EXPANDER(cpuid);
1162 	int	slot = STARCAT_CPUID_TO_BOARDSLOT(cpuid);
1163 
1164 	if (snprintf(buf, buflen, "%s%d", (slot ? "IO" : "SB"), expander)
1165 	    >= buflen) {
1166 		return (ENOSPC);
1167 	} else {
1168 		*lenp = strlen(buf);
1169 		return (0);
1170 	}
1171 }
1172 
1173 /*
1174  * This routine is used by the data bearing mondo (DMV) initialization
1175  * routine to determine the number of hardware and software DMV interrupts
1176  * that a platform supports.
1177  */
1178 void
1179 plat_dmv_params(uint_t *hwint, uint_t *swint)
1180 {
1181 	*hwint = STARCAT_DMV_HWINT;
1182 	*swint = 0;
1183 }
1184 
1185 /*
1186  * If provided, this function will be called whenever the nodename is updated.
1187  * To keep OS mailbox handling localized, all we do is forward the call to the
1188  * scosmb module (if it is available).
1189  */
1190 void
1191 plat_nodename_set(void)
1192 {
1193 	void (*nodename_update_func)(uint64_t) = NULL;
1194 
1195 	/*
1196 	 * find the symbol for the nodename update routine in the scosmb module
1197 	 */
1198 	nodename_update_func = (void (*)(uint64_t))
1199 	    modgetsymvalue("scosmb_update_nodename", 0);
1200 
1201 	/*
1202 	 * If the symbol was found, call it.  Otherwise, log a note (but not to
1203 	 * the console).
1204 	 */
1205 	if (nodename_update_func != NULL) {
1206 		nodename_update_func(0);
1207 	} else {
1208 		cmn_err(CE_NOTE,
1209 		    "!plat_nodename_set: scosmb_update_nodename not found\n");
1210 	}
1211 }
1212 
1213 caddr_t	efcode_vaddr = NULL;
1214 caddr_t efcode_paddr = NULL;
1215 /*
1216  * Preallocate enough memory for fcode claims.
1217  */
1218 
1219 caddr_t
1220 efcode_alloc(caddr_t alloc_base)
1221 {
1222 	caddr_t efcode_alloc_base = (caddr_t)roundup((uintptr_t)alloc_base,
1223 	    MMU_PAGESIZE);
1224 	caddr_t vaddr;
1225 
1226 	/*
1227 	 * allocate the physical memory schizo fcode.
1228 	 */
1229 	if ((vaddr = (caddr_t)BOP_ALLOC(bootops, efcode_alloc_base,
1230 	    efcode_size, MMU_PAGESIZE)) == NULL)
1231 		cmn_err(CE_PANIC, "Cannot allocate Efcode Memory");
1232 
1233 	efcode_vaddr = vaddr;
1234 
1235 	return (efcode_alloc_base + efcode_size);
1236 }
1237 
1238 caddr_t
1239 starcat_startup_memlist(caddr_t alloc_base)
1240 {
1241 	caddr_t tmp_alloc_base;
1242 
1243 	tmp_alloc_base = efcode_alloc(alloc_base);
1244 	tmp_alloc_base = (caddr_t)roundup((uintptr_t)tmp_alloc_base,
1245 					    ecache_alignsize);
1246 	return (tmp_alloc_base);
1247 }
1248 
1249 /*
1250  * This is a helper function to determine if a given
1251  * node should be considered for a dr operation according
1252  * to predefined dr names. This is accomplished using
1253  * a function defined in drmach module. The drmach module
1254  * owns the definition of dr allowable names.
1255  * Formal Parameter: The name of a device node.
1256  * Expected Return Value: -1, device node name does not map to a valid dr name.
1257  *               A value greater or equal to 0, name is valid.
1258  */
1259 int
1260 starcat_dr_name(char *name)
1261 {
1262 	int (*drmach_name2type)(char *) = NULL;
1263 
1264 	/* Get a pointer to helper function in the dramch module. */
1265 	drmach_name2type =
1266 	    (int (*)(char *))kobj_getsymvalue("drmach_name2type_idx", 0);
1267 
1268 	if (drmach_name2type == NULL)
1269 		return (-1);
1270 
1271 	return ((*drmach_name2type)(name));
1272 }
1273 
1274 void
1275 startup_platform(void)
1276 {
1277 }
1278 
1279 /*
1280  * KDI functions - used by the in-situ kernel debugger (kmdb) to perform
1281  * platform-specific operations.  These functions execute when the world is
1282  * stopped, and as such cannot make any blocking calls, hold locks, etc.
1283  * promif functions are a special case, and may be used.
1284  */
1285 
1286 static void
1287 starcat_system_claim(void)
1288 {
1289 	prom_interpret("sigb-sig! my-sigb-sig!", OBP_SIG, OBP_SIG, 0, 0, 0);
1290 }
1291 
1292 static void
1293 starcat_system_release(void)
1294 {
1295 	prom_interpret("sigb-sig! my-sigb-sig!", OS_SIG, OS_SIG, 0, 0, 0);
1296 }
1297 
1298 void
1299 plat_kdi_init(kdi_t *kdi)
1300 {
1301 	kdi->pkdi_system_claim = starcat_system_claim;
1302 	kdi->pkdi_system_release = starcat_system_release;
1303 }
1304 
1305 /*
1306  * This function returns 1 if large pages for kernel heap are supported
1307  * and 0 otherwise.
1308  *
1309  * Currently we disable lp kmem support if kpr is going to be enabled
1310  * because in the case of large pages hat_add_callback()/hat_delete_callback()
1311  * cause network performance degradation
1312  */
1313 int
1314 plat_lpkmem_is_supported(void)
1315 {
1316 	extern int segkmem_reloc;
1317 
1318 	if (hat_kpr_enabled && kernel_cage_enable &&
1319 	    (ncpunode >= 32 || segkmem_reloc == 1))
1320 		return (0);
1321 
1322 	return (1);
1323 }
1324