xref: /titanic_51/usr/src/uts/sun4u/starcat/os/starcat.c (revision c7c6ab2a4af23be725dea6dacf112b0b9f3fe26f)
103831d35Sstevel /*
203831d35Sstevel  * CDDL HEADER START
303831d35Sstevel  *
403831d35Sstevel  * The contents of this file are subject to the terms of the
525cf1a30Sjl139090  * Common Development and Distribution License (the "License").
625cf1a30Sjl139090  * You may not use this file except in compliance with the License.
703831d35Sstevel  *
803831d35Sstevel  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
903831d35Sstevel  * or http://www.opensolaris.org/os/licensing.
1003831d35Sstevel  * See the License for the specific language governing permissions
1103831d35Sstevel  * and limitations under the License.
1203831d35Sstevel  *
1303831d35Sstevel  * When distributing Covered Code, include this CDDL HEADER in each
1403831d35Sstevel  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
1503831d35Sstevel  * If applicable, add the following below this CDDL HEADER, with the
1603831d35Sstevel  * fields enclosed by brackets "[]" replaced with your own identifying
1703831d35Sstevel  * information: Portions Copyright [yyyy] [name of copyright owner]
1803831d35Sstevel  *
1903831d35Sstevel  * CDDL HEADER END
2003831d35Sstevel  */
2103831d35Sstevel /*
22d3d50737SRafael Vanoni  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
2303831d35Sstevel  * Use is subject to license terms.
2403831d35Sstevel  */
2503831d35Sstevel 
26*c7c6ab2aSGarrett D'Amore /*
27*c7c6ab2aSGarrett D'Amore  * Copyright 2011 Nexenta Systems, Inc.  All rights reserved.
28*c7c6ab2aSGarrett D'Amore  */
29*c7c6ab2aSGarrett D'Amore 
3003831d35Sstevel #include <sys/param.h>
3103831d35Sstevel #include <sys/systm.h>
3203831d35Sstevel #include <sys/sysmacros.h>
3303831d35Sstevel #include <sys/sunddi.h>
3403831d35Sstevel #include <sys/esunddi.h>
3503831d35Sstevel #include <sys/sunndi.h>
3603831d35Sstevel #include <sys/modctl.h>
3703831d35Sstevel #include <sys/promif.h>
3803831d35Sstevel #include <sys/machparam.h>
3903831d35Sstevel #include <sys/kobj.h>
4003831d35Sstevel #include <sys/cpuvar.h>
4103831d35Sstevel #include <sys/mem_cage.h>
4203831d35Sstevel #include <sys/promif.h>
4303831d35Sstevel #include <sys/promimpl.h>
4403831d35Sstevel #include <sys/platform_module.h>
4503831d35Sstevel #include <sys/errno.h>
4603831d35Sstevel #include <sys/cpu_sgnblk_defs.h>
4703831d35Sstevel #include <sys/iosramio.h>
4803831d35Sstevel #include <sys/domaind.h>
4903831d35Sstevel #include <sys/starcat.h>
5003831d35Sstevel #include <sys/machsystm.h>
5103831d35Sstevel #include <sys/bootconf.h>
5203831d35Sstevel #include <sys/memnode.h>
5303831d35Sstevel #include <vm/vm_dep.h>
5403831d35Sstevel #include <vm/page.h>
5503831d35Sstevel #include <sys/cheetahregs.h>
5603831d35Sstevel #include <sys/plat_ecc_unum.h>
5703831d35Sstevel #include <sys/plat_ecc_dimm.h>
5803831d35Sstevel #include <sys/lgrp.h>
5903831d35Sstevel #include <sys/dr.h>
6003831d35Sstevel #include <sys/post/scat_dcd.h>
6103831d35Sstevel #include <sys/kdi_impl.h>
6203831d35Sstevel #include <sys/iosramreg.h>
6303831d35Sstevel #include <sys/iosramvar.h>
6403831d35Sstevel #include <sys/mc-us3.h>
65d3d50737SRafael Vanoni #include <sys/clock_impl.h>
6603831d35Sstevel 
6703831d35Sstevel /* Preallocation of spare tsb's for DR */
6803831d35Sstevel int starcat_tsb_spares = STARCAT_SPARE_TSB_MAX;
6903831d35Sstevel 
7003831d35Sstevel /* Set the maximum number of slot0 + slot1 boards. .. for DR */
7103831d35Sstevel int starcat_boards = STARCAT_BDSET_MAX * STARCAT_BDSET_SLOT_MAX;
7203831d35Sstevel 
7303831d35Sstevel /* Maximum number of cpus per board... for DR */
7403831d35Sstevel int starcat_cpu_per_board = MAX(STARCAT_SLOT0_CPU_MAX, STARCAT_SLOT1_CPU_MAX);
7503831d35Sstevel 
7603831d35Sstevel /* Maximum number of mem-units per board... for DR */
7703831d35Sstevel int starcat_mem_per_board = MAX(STARCAT_SLOT0_MEM_MAX, STARCAT_SLOT1_MEM_MAX);
7803831d35Sstevel 
7903831d35Sstevel /* Maximum number of io-units (buses) per board... for DR */
8003831d35Sstevel int starcat_io_per_board = 2 * MAX(STARCAT_SLOT0_IO_MAX, STARCAT_SLOT1_IO_MAX);
8103831d35Sstevel 
8203831d35Sstevel /* Preferred minimum cage size (expressed in pages)... for DR */
8303831d35Sstevel pgcnt_t starcat_startup_cage_size = 0;
8403831d35Sstevel 
8503831d35Sstevel /* Platform specific function to get unum information */
8603831d35Sstevel int (*p2get_mem_unum)(int, uint64_t, char *, int, int *);
8703831d35Sstevel 
8803831d35Sstevel /* Memory for fcode claims.  16k times # maximum possible schizos */
8903831d35Sstevel #define	EFCODE_SIZE	(STARCAT_BDSET_MAX * 4 * 0x4000)
9003831d35Sstevel int efcode_size = EFCODE_SIZE;
9103831d35Sstevel 
9203831d35Sstevel void sgn_update_all_cpus(ushort_t, uchar_t, uchar_t);
9303831d35Sstevel 
9403831d35Sstevel /*
9503831d35Sstevel  * The IOSRAM driver is loaded in load_platform_drivers() any cpu signature
9603831d35Sstevel  * usage prior to that time will have not have a function to call.
9703831d35Sstevel  */
9803831d35Sstevel static int (*iosram_rdp)(uint32_t key, uint32_t off, uint32_t len,
9903831d35Sstevel 	    caddr_t dptr) = prom_starcat_iosram_read;
10003831d35Sstevel static int (*iosram_wrp)(uint32_t key, uint32_t off, uint32_t len,
10103831d35Sstevel 	    caddr_t dptr) = prom_starcat_iosram_write;
10203831d35Sstevel 
10303831d35Sstevel plat_dimm_sid_board_t	domain_dimm_sids[STARCAT_BDSET_MAX];
10403831d35Sstevel 
10503831d35Sstevel /*
10603831d35Sstevel  * set_platform_max_ncpus should return the maximum number of CPUs that the
10703831d35Sstevel  * platform supports.  This function is called from check_cpus() to set the
10803831d35Sstevel  * value of max_ncpus [see PSARC 1997/165 CPU Dynamic Reconfiguration].
10903831d35Sstevel  * Data elements which are allocated based upon max_ncpus are all accessed
11003831d35Sstevel  * via cpu_seqid and not physical IDs.  Previously, the value of max_ncpus
11103831d35Sstevel  * was being set to the largest physical ID, which led to boot problems on
11203831d35Sstevel  * systems with less than 1.25GB of memory.
11303831d35Sstevel  */
11403831d35Sstevel 
11503831d35Sstevel int
11603831d35Sstevel set_platform_max_ncpus(void)
11703831d35Sstevel {
11803831d35Sstevel 	int n;
11903831d35Sstevel 
12003831d35Sstevel 	/*
12103831d35Sstevel 	 * Convert number of slot0 + slot1 boards to number of expander brds
12203831d35Sstevel 	 * and constrain the value to an architecturally plausible range
12303831d35Sstevel 	 */
12403831d35Sstevel 	n = MAX(starcat_boards, STARCAT_BDSET_MIN * STARCAT_BDSET_SLOT_MAX);
12503831d35Sstevel 	n = MIN(n, STARCAT_BDSET_MAX * STARCAT_BDSET_SLOT_MAX);
12603831d35Sstevel 	n = (n + STARCAT_BDSET_SLOT_MAX - 1) / STARCAT_BDSET_SLOT_MAX;
12703831d35Sstevel 
12803831d35Sstevel 	/* return maximum number of cpus possible on N expander boards */
12903831d35Sstevel 	return (n * STARCAT_BDSET_CPU_MAX - STARCAT_SLOT1_CPU_MAX);
13003831d35Sstevel }
13103831d35Sstevel 
13203831d35Sstevel int
13303831d35Sstevel set_platform_tsb_spares()
13403831d35Sstevel {
13503831d35Sstevel 	return (MIN(starcat_tsb_spares, MAX_UPA));
13603831d35Sstevel }
13703831d35Sstevel 
13803831d35Sstevel #pragma weak mmu_init_large_pages
13903831d35Sstevel 
14003831d35Sstevel void
14103831d35Sstevel set_platform_defaults(void)
14203831d35Sstevel {
14303831d35Sstevel 	extern char *tod_module_name;
14403831d35Sstevel 	extern int ts_dispatch_extended;
14503831d35Sstevel 	extern void cpu_sgn_update(ushort_t, uchar_t, uchar_t, int);
14603831d35Sstevel 	extern int tsb_lgrp_affinity;
14703831d35Sstevel 	extern int segkmem_reloc;
14803831d35Sstevel 	extern void mmu_init_large_pages(size_t);
14903831d35Sstevel 	extern int ncpunode;	/* number of CPUs detected by OBP */
15003831d35Sstevel 
15103831d35Sstevel #ifdef DEBUG
15203831d35Sstevel 	ce_verbose_memory = 2;
15303831d35Sstevel 	ce_verbose_other = 2;
15403831d35Sstevel #endif
15503831d35Sstevel 
15603831d35Sstevel 	/* Set the CPU signature function pointer */
15703831d35Sstevel 	cpu_sgn_func = cpu_sgn_update;
15803831d35Sstevel 
15903831d35Sstevel 	/* Set appropriate tod module for starcat */
16003831d35Sstevel 	ASSERT(tod_module_name == NULL);
16103831d35Sstevel 	tod_module_name = "todstarcat";
16203831d35Sstevel 
16303831d35Sstevel 	/*
16403831d35Sstevel 	 * Use the alternate TS dispatch table, which is better
16503831d35Sstevel 	 * tuned for large servers.
16603831d35Sstevel 	 */
16703831d35Sstevel 	if (ts_dispatch_extended == -1)
16803831d35Sstevel 		ts_dispatch_extended = 1;
16903831d35Sstevel 
17003831d35Sstevel 	/*
17103831d35Sstevel 	 * Use lgroup-aware TSB allocations on this platform,
17203831d35Sstevel 	 * since they are a considerable performance win.
17303831d35Sstevel 	 */
17403831d35Sstevel 	tsb_lgrp_affinity = 1;
17503831d35Sstevel 
17603831d35Sstevel 	if ((mmu_page_sizes == max_mmu_page_sizes) &&
177e12a8a13Ssusans 	    (mmu_ism_pagesize != DEFAULT_ISM_PAGESIZE)) {
17803831d35Sstevel 		if (&mmu_init_large_pages)
17903831d35Sstevel 			mmu_init_large_pages(mmu_ism_pagesize);
18003831d35Sstevel 	}
18103831d35Sstevel 
18203831d35Sstevel 	/*
18303831d35Sstevel 	 * KPR (kernel page relocation) is supported on this platform.
18403831d35Sstevel 	 */
185*c7c6ab2aSGarrett D'Amore 	if (kernel_cage_enable && ncpunode >= 32) {
18603831d35Sstevel 		segkmem_reloc = 1;
18703831d35Sstevel 		cmn_err(CE_NOTE, "!Kernel Page Relocation is ENABLED");
18803831d35Sstevel 	} else {
18903831d35Sstevel 		cmn_err(CE_NOTE, "!Kernel Page Relocation is DISABLED");
19003831d35Sstevel 	}
19103831d35Sstevel }
19203831d35Sstevel 
19303831d35Sstevel #ifdef DEBUG
19403831d35Sstevel pgcnt_t starcat_cage_size_limit;
19503831d35Sstevel #endif
19603831d35Sstevel 
19703831d35Sstevel void
19803831d35Sstevel set_platform_cage_params(void)
19903831d35Sstevel {
20003831d35Sstevel 	extern pgcnt_t total_pages;
20103831d35Sstevel 	extern struct memlist *phys_avail;
20203831d35Sstevel 
20303831d35Sstevel 	if (kernel_cage_enable) {
20403831d35Sstevel 		pgcnt_t preferred_cage_size;
20503831d35Sstevel 
20603831d35Sstevel 		preferred_cage_size =
20703831d35Sstevel 		    MAX(starcat_startup_cage_size, total_pages / 256);
20803831d35Sstevel 
20903831d35Sstevel #ifdef DEBUG
21003831d35Sstevel 		if (starcat_cage_size_limit)
21103831d35Sstevel 			preferred_cage_size = starcat_cage_size_limit;
21203831d35Sstevel #endif
21303831d35Sstevel 		/*
21403831d35Sstevel 		 * Note: we are assuming that post has load the
21503831d35Sstevel 		 * whole show in to the high end of memory. Having
21603831d35Sstevel 		 * taken this leap, we copy the whole of phys_avail
21703831d35Sstevel 		 * the glist and arrange for the cage to grow
21803831d35Sstevel 		 * downward (descending pfns).
21903831d35Sstevel 		 */
22085f58038Sdp78419 		kcage_range_init(phys_avail, KCAGE_DOWN, preferred_cage_size);
22103831d35Sstevel 	}
22203831d35Sstevel 
22303831d35Sstevel 	if (kcage_on)
22403831d35Sstevel 		cmn_err(CE_NOTE, "!DR Kernel Cage is ENABLED");
22503831d35Sstevel 	else
22603831d35Sstevel 		cmn_err(CE_NOTE, "!DR Kernel Cage is DISABLED");
22703831d35Sstevel }
22803831d35Sstevel 
22903831d35Sstevel void
23003831d35Sstevel load_platform_modules(void)
23103831d35Sstevel {
23203831d35Sstevel 	if (modload("misc", "pcihp") < 0) {
23303831d35Sstevel 		cmn_err(CE_NOTE, "pcihp driver failed to load");
23403831d35Sstevel 	}
23503831d35Sstevel }
23603831d35Sstevel 
23703831d35Sstevel /*
23803831d35Sstevel  * Starcat does not support power control of CPUs from the OS.
23903831d35Sstevel  */
24003831d35Sstevel /*ARGSUSED*/
24103831d35Sstevel int
24203831d35Sstevel plat_cpu_poweron(struct cpu *cp)
24303831d35Sstevel {
24403831d35Sstevel 	int (*starcat_cpu_poweron)(struct cpu *) = NULL;
24503831d35Sstevel 
24603831d35Sstevel 	starcat_cpu_poweron =
247cbcdac8fSmb91622 	    (int (*)(struct cpu *))kobj_getsymvalue("drmach_cpu_poweron", 0);
24803831d35Sstevel 
24903831d35Sstevel 	if (starcat_cpu_poweron == NULL)
25003831d35Sstevel 		return (ENOTSUP);
25103831d35Sstevel 	else
25203831d35Sstevel 		return ((starcat_cpu_poweron)(cp));
25303831d35Sstevel }
25403831d35Sstevel 
25503831d35Sstevel /*ARGSUSED*/
25603831d35Sstevel int
25703831d35Sstevel plat_cpu_poweroff(struct cpu *cp)
25803831d35Sstevel {
25903831d35Sstevel 	int (*starcat_cpu_poweroff)(struct cpu *) = NULL;
26003831d35Sstevel 
26103831d35Sstevel 	starcat_cpu_poweroff =
262cbcdac8fSmb91622 	    (int (*)(struct cpu *))kobj_getsymvalue("drmach_cpu_poweroff", 0);
26303831d35Sstevel 
26403831d35Sstevel 	if (starcat_cpu_poweroff == NULL)
26503831d35Sstevel 		return (ENOTSUP);
26603831d35Sstevel 	else
26703831d35Sstevel 		return ((starcat_cpu_poweroff)(cp));
26803831d35Sstevel }
26903831d35Sstevel 
27003831d35Sstevel /*
27103831d35Sstevel  * The following are currently private to Starcat DR
27203831d35Sstevel  */
27303831d35Sstevel int
27403831d35Sstevel plat_max_boards()
27503831d35Sstevel {
27603831d35Sstevel 	return (starcat_boards);
27703831d35Sstevel }
27803831d35Sstevel 
27903831d35Sstevel int
28003831d35Sstevel plat_max_cpu_units_per_board()
28103831d35Sstevel {
28203831d35Sstevel 	return (starcat_cpu_per_board);
28303831d35Sstevel }
28403831d35Sstevel 
28503831d35Sstevel int
28603831d35Sstevel plat_max_mc_units_per_board()
28703831d35Sstevel {
28803831d35Sstevel 	return (starcat_mem_per_board); /* each CPU has a memory controller */
28903831d35Sstevel }
29003831d35Sstevel 
29103831d35Sstevel int
29203831d35Sstevel plat_max_mem_units_per_board()
29303831d35Sstevel {
29403831d35Sstevel 	return (starcat_mem_per_board);
29503831d35Sstevel }
29603831d35Sstevel 
29703831d35Sstevel int
29803831d35Sstevel plat_max_io_units_per_board()
29903831d35Sstevel {
30003831d35Sstevel 	return (starcat_io_per_board);
30103831d35Sstevel }
30203831d35Sstevel 
30303831d35Sstevel int
30403831d35Sstevel plat_max_cpumem_boards(void)
30503831d35Sstevel {
30603831d35Sstevel 	return (STARCAT_BDSET_MAX);
30703831d35Sstevel }
30803831d35Sstevel 
30903831d35Sstevel int
31003831d35Sstevel plat_pfn_to_mem_node(pfn_t pfn)
31103831d35Sstevel {
31203831d35Sstevel 	return (pfn >> mem_node_pfn_shift);
31303831d35Sstevel }
31403831d35Sstevel 
31503831d35Sstevel #define	STARCAT_MC_MEMBOARD_SHIFT 37	/* Boards on 128BG boundary */
31603831d35Sstevel 
31703831d35Sstevel /* ARGSUSED */
31803831d35Sstevel void
319986fd29aSsetje plat_build_mem_nodes(prom_memlist_t *list, size_t nelems)
32003831d35Sstevel {
32103831d35Sstevel 	size_t	elem;
32203831d35Sstevel 	pfn_t	basepfn;
32303831d35Sstevel 	pgcnt_t	npgs;
32403831d35Sstevel 
32503831d35Sstevel 	/*
32603831d35Sstevel 	 * Starcat mem slices are always aligned on a 128GB boundary,
32703831d35Sstevel 	 * fixed, and limited to one slice per expander due to design
32803831d35Sstevel 	 * of the centerplane ASICs.
32903831d35Sstevel 	 */
33003831d35Sstevel 	mem_node_pfn_shift = STARCAT_MC_MEMBOARD_SHIFT - MMU_PAGESHIFT;
33103831d35Sstevel 	mem_node_physalign = 0;
33203831d35Sstevel 
33303831d35Sstevel 	/*
33403831d35Sstevel 	 * Boot install lists are arranged <addr, len>, <addr, len>, ...
33503831d35Sstevel 	 */
336986fd29aSsetje 	for (elem = 0; elem < nelems; list++, elem++) {
337986fd29aSsetje 		basepfn = btop(list->addr);
338986fd29aSsetje 		npgs = btop(list->size);
33903831d35Sstevel 		mem_node_add_slice(basepfn, basepfn + npgs - 1);
34003831d35Sstevel 	}
34103831d35Sstevel }
34203831d35Sstevel 
34303831d35Sstevel /*
34403831d35Sstevel  * Find the CPU associated with a slice at boot-time.
34503831d35Sstevel  */
34603831d35Sstevel void
34703831d35Sstevel plat_fill_mc(pnode_t nodeid)
34803831d35Sstevel {
34903831d35Sstevel 	int		len;
35003831d35Sstevel 	uint64_t	mc_addr, mask;
35103831d35Sstevel 	uint64_t	mc_decode[MAX_BANKS_PER_MC];
35203831d35Sstevel 	uint32_t	regs[4];
35303831d35Sstevel 	int		local_mc;
35403831d35Sstevel 	int		portid;
35503831d35Sstevel 	int		expnum;
35603831d35Sstevel 	int		i;
35703831d35Sstevel 
35803831d35Sstevel 	/*
35903831d35Sstevel 	 * Memory address decoding registers
36003831d35Sstevel 	 * (see Chap 9 of SPARCV9 JSP-1 US-III implementation)
36103831d35Sstevel 	 */
36203831d35Sstevel 	const uint64_t	mc_decode_addr[MAX_BANKS_PER_MC] = {
36303831d35Sstevel 		0x400028, 0x400010, 0x400018, 0x400020
36403831d35Sstevel 	};
36503831d35Sstevel 
36603831d35Sstevel 	/*
36703831d35Sstevel 	 * Starcat memory controller portid == global CPU id
36803831d35Sstevel 	 */
36903831d35Sstevel 	if ((prom_getprop(nodeid, "portid", (caddr_t)&portid) < 0) ||
37003831d35Sstevel 	    (portid == -1))
37103831d35Sstevel 		return;
37203831d35Sstevel 
37303831d35Sstevel 	expnum = STARCAT_CPUID_TO_EXPANDER(portid);
37403831d35Sstevel 
37503831d35Sstevel 	/*
37603831d35Sstevel 	 * The "reg" property returns 4 32-bit values. The first two are
37703831d35Sstevel 	 * combined to form a 64-bit address.  The second two are for a
37803831d35Sstevel 	 * 64-bit size, but we don't actually need to look at that value.
37903831d35Sstevel 	 */
38003831d35Sstevel 	len = prom_getproplen(nodeid, "reg");
38103831d35Sstevel 	if (len != (sizeof (uint32_t) * 4)) {
38203831d35Sstevel 		prom_printf("Warning: malformed 'reg' property\n");
38303831d35Sstevel 		return;
38403831d35Sstevel 	}
38503831d35Sstevel 	if (prom_getprop(nodeid, "reg", (caddr_t)regs) < 0)
38603831d35Sstevel 		return;
38703831d35Sstevel 	mc_addr = ((uint64_t)regs[0]) << 32;
38803831d35Sstevel 	mc_addr |= (uint64_t)regs[1];
38903831d35Sstevel 
39003831d35Sstevel 	/*
39103831d35Sstevel 	 * Figure out whether the memory controller we are examining
39203831d35Sstevel 	 * belongs to this CPU/CMP or a different one.
39303831d35Sstevel 	 */
39403831d35Sstevel 	if (portid == cpunodes[CPU->cpu_id].portid)
39503831d35Sstevel 		local_mc = 1;
39603831d35Sstevel 	else
39703831d35Sstevel 		local_mc = 0;
39803831d35Sstevel 
39903831d35Sstevel 	for (i = 0; i < MAX_BANKS_PER_MC; i++) {
40003831d35Sstevel 
40103831d35Sstevel 		mask = mc_decode_addr[i];
40203831d35Sstevel 
40303831d35Sstevel 		/*
40403831d35Sstevel 		 * If the memory controller is local to this CPU, we use
40503831d35Sstevel 		 * the special ASI to read the decode registers.
40603831d35Sstevel 		 * Otherwise, we load the values from a magic address in
40703831d35Sstevel 		 * I/O space.
40803831d35Sstevel 		 */
40903831d35Sstevel 		if (local_mc)
41003831d35Sstevel 			mc_decode[i] = lddmcdecode(mask & MC_OFFSET_MASK);
41103831d35Sstevel 		else
41203831d35Sstevel 			mc_decode[i] = lddphysio((mc_addr | mask));
41303831d35Sstevel 
41403831d35Sstevel 		if (mc_decode[i] >> MC_VALID_SHIFT) {
41503831d35Sstevel 			uint64_t base = MC_BASE(mc_decode[i]) << PHYS2UM_SHIFT;
41603831d35Sstevel 			int sliceid = (base >> STARCAT_MC_MEMBOARD_SHIFT);
41703831d35Sstevel 
41803831d35Sstevel 			if (sliceid < max_mem_nodes) {
41903831d35Sstevel 				/*
42003831d35Sstevel 				 * Establish start-of-day mappings of
42103831d35Sstevel 				 * lgroup platform handles to memnodes.
42203831d35Sstevel 				 * Handle == Expander Number
42303831d35Sstevel 				 * Memnode == Fixed 128GB Slice
42403831d35Sstevel 				 */
42503831d35Sstevel 				plat_assign_lgrphand_to_mem_node(expnum,
42603831d35Sstevel 				    sliceid);
42703831d35Sstevel 			}
42803831d35Sstevel 		}
42903831d35Sstevel 	}
43003831d35Sstevel }
43103831d35Sstevel 
43203831d35Sstevel /*
43303831d35Sstevel  * Starcat support for lgroups.
43403831d35Sstevel  *
43503831d35Sstevel  * On Starcat, an lgroup platform handle == expander number.
43603831d35Sstevel  * For split-slot configurations (e.g. slot 0 and slot 1 boards
43703831d35Sstevel  * in different domains) an MCPU board has only remote memory.
43803831d35Sstevel  *
43903831d35Sstevel  * The centerplane logic provides fixed 128GB memory slices
44003831d35Sstevel  * each of which map to a memnode.  The initial mapping of
44103831d35Sstevel  * memnodes to lgroup handles is determined at boot time.
44203831d35Sstevel  * A DR addition of memory adds a new mapping. A DR copy-rename
44303831d35Sstevel  * swaps mappings.
44403831d35Sstevel  */
44503831d35Sstevel 
44603831d35Sstevel /*
44703831d35Sstevel  * Convert board number to expander number.
44803831d35Sstevel  */
44903831d35Sstevel #define	BOARDNUM_2_EXPANDER(b)	(b >> 1)
45003831d35Sstevel 
45103831d35Sstevel /*
45203831d35Sstevel  * Return the number of boards configured with NULL LPA.
45303831d35Sstevel  */
45403831d35Sstevel static int
45503831d35Sstevel check_for_null_lpa(void)
45603831d35Sstevel {
45703831d35Sstevel 	gdcd_t	*gdcd;
45803831d35Sstevel 	uint_t	exp, nlpa;
45903831d35Sstevel 
46003831d35Sstevel 	/*
46103831d35Sstevel 	 * Read GDCD from IOSRAM.
46203831d35Sstevel 	 * If this fails indicate a NULL LPA condition.
46303831d35Sstevel 	 */
46403831d35Sstevel 	if ((gdcd = kmem_zalloc(sizeof (gdcd_t), KM_NOSLEEP)) == NULL)
46503831d35Sstevel 		return (EXP_COUNT+1);
46603831d35Sstevel 
46703831d35Sstevel 	if ((*iosram_rdp)(GDCD_MAGIC, 0, sizeof (gdcd_t), (caddr_t)gdcd) ||
46803831d35Sstevel 	    (gdcd->h.dcd_magic != GDCD_MAGIC) ||
46903831d35Sstevel 	    (gdcd->h.dcd_version != DCD_VERSION)) {
47003831d35Sstevel 		kmem_free(gdcd, sizeof (gdcd_t));
47103831d35Sstevel 		cmn_err(CE_WARN, "check_for_null_lpa: failed to access GDCD\n");
47203831d35Sstevel 		return (EXP_COUNT+2);
47303831d35Sstevel 	}
47403831d35Sstevel 
47503831d35Sstevel 	/*
47603831d35Sstevel 	 * Check for NULL LPAs on all slot 0 boards in domain
47703831d35Sstevel 	 * (i.e. in all expanders marked good for this domain).
47803831d35Sstevel 	 */
47903831d35Sstevel 	nlpa = 0;
48003831d35Sstevel 	for (exp = 0; exp < EXP_COUNT; exp++) {
48103831d35Sstevel 		if (RSV_GOOD(gdcd->dcd_slot[exp][0].l1ss_rsv) &&
48203831d35Sstevel 		    (gdcd->dcd_slot[exp][0].l1ss_flags &
48303831d35Sstevel 		    L1SSFLG_THIS_L1_NULL_PROC_LPA))
48403831d35Sstevel 			nlpa++;
48503831d35Sstevel 	}
48603831d35Sstevel 
48703831d35Sstevel 	kmem_free(gdcd, sizeof (gdcd_t));
48803831d35Sstevel 	return (nlpa);
48903831d35Sstevel }
49003831d35Sstevel 
49103831d35Sstevel /*
49203831d35Sstevel  * Return the platform handle for the lgroup containing the given CPU
49303831d35Sstevel  *
49403831d35Sstevel  * For Starcat, lgroup platform handle == expander.
49503831d35Sstevel  */
49603831d35Sstevel 
49703831d35Sstevel extern int mpo_disabled;
49803831d35Sstevel extern lgrp_handle_t lgrp_default_handle;
49903831d35Sstevel int null_lpa_boards = -1;
50003831d35Sstevel 
50103831d35Sstevel lgrp_handle_t
50203831d35Sstevel plat_lgrp_cpu_to_hand(processorid_t id)
50303831d35Sstevel {
50403831d35Sstevel 	lgrp_handle_t		plathand;
50503831d35Sstevel 
50603831d35Sstevel 	plathand = STARCAT_CPUID_TO_EXPANDER(id);
50703831d35Sstevel 
50803831d35Sstevel 	/*
50903831d35Sstevel 	 * Return the real platform handle for the CPU until
51003831d35Sstevel 	 * such time as we know that MPO should be disabled.
51103831d35Sstevel 	 * At that point, we set the "mpo_disabled" flag to true,
51203831d35Sstevel 	 * and from that point on, return the default handle.
51303831d35Sstevel 	 *
51403831d35Sstevel 	 * By the time we know that MPO should be disabled, the
51503831d35Sstevel 	 * first CPU will have already been added to a leaf
51603831d35Sstevel 	 * lgroup, but that's ok. The common lgroup code will
51703831d35Sstevel 	 * double check that the boot CPU is in the correct place,
51803831d35Sstevel 	 * and in the case where mpo should be disabled, will move
51903831d35Sstevel 	 * it to the root if necessary.
52003831d35Sstevel 	 */
52103831d35Sstevel 	if (mpo_disabled) {
52203831d35Sstevel 		/* If MPO is disabled, return the default (UMA) handle */
52303831d35Sstevel 		plathand = lgrp_default_handle;
52403831d35Sstevel 	} else {
52503831d35Sstevel 		if (null_lpa_boards > 0) {
52603831d35Sstevel 			/* Determine if MPO should be disabled */
52703831d35Sstevel 			mpo_disabled = 1;
52803831d35Sstevel 			plathand = lgrp_default_handle;
52903831d35Sstevel 		}
53003831d35Sstevel 	}
53103831d35Sstevel 	return (plathand);
53203831d35Sstevel }
53303831d35Sstevel 
53403831d35Sstevel /*
53503831d35Sstevel  * Platform specific lgroup initialization
53603831d35Sstevel  */
53703831d35Sstevel void
53803831d35Sstevel plat_lgrp_init(void)
53903831d35Sstevel {
54003831d35Sstevel 	extern uint32_t lgrp_expand_proc_thresh;
54103831d35Sstevel 	extern uint32_t lgrp_expand_proc_diff;
54203831d35Sstevel 
54303831d35Sstevel 	/*
54403831d35Sstevel 	 * Set tuneables for Starcat architecture
54503831d35Sstevel 	 *
54603831d35Sstevel 	 * lgrp_expand_proc_thresh is the minimum load on the lgroups
54703831d35Sstevel 	 * this process is currently running on before considering
54803831d35Sstevel 	 * expanding threads to another lgroup.
54903831d35Sstevel 	 *
55003831d35Sstevel 	 * lgrp_expand_proc_diff determines how much less the remote lgroup
55103831d35Sstevel 	 * must be loaded before expanding to it.
55203831d35Sstevel 	 *
55303831d35Sstevel 	 * Since remote latencies can be costly, attempt to keep 3 threads
55403831d35Sstevel 	 * within the same lgroup before expanding to the next lgroup.
55503831d35Sstevel 	 */
55603831d35Sstevel 	lgrp_expand_proc_thresh = LGRP_LOADAVG_THREAD_MAX * 3;
55703831d35Sstevel 	lgrp_expand_proc_diff = LGRP_LOADAVG_THREAD_MAX;
55803831d35Sstevel }
55903831d35Sstevel 
56003831d35Sstevel /*
56103831d35Sstevel  * Platform notification of lgroup (re)configuration changes
56203831d35Sstevel  */
56303831d35Sstevel /*ARGSUSED*/
56403831d35Sstevel void
56503831d35Sstevel plat_lgrp_config(lgrp_config_flag_t evt, uintptr_t arg)
56603831d35Sstevel {
56703831d35Sstevel 	update_membounds_t	*umb;
56803831d35Sstevel 	lgrp_config_mem_rename_t lmr;
56903831d35Sstevel 	int			sbd, tbd;
57003831d35Sstevel 	lgrp_handle_t		hand, shand, thand;
57103831d35Sstevel 	int			mnode, snode, tnode;
57203831d35Sstevel 
57303831d35Sstevel 	if (mpo_disabled)
57403831d35Sstevel 		return;
57503831d35Sstevel 
57603831d35Sstevel 	switch (evt) {
57703831d35Sstevel 
57803831d35Sstevel 	case LGRP_CONFIG_MEM_ADD:
57903831d35Sstevel 		/*
58003831d35Sstevel 		 * Establish the lgroup handle to memnode translation.
58103831d35Sstevel 		 */
58203831d35Sstevel 		umb = (update_membounds_t *)arg;
58303831d35Sstevel 
58403831d35Sstevel 		hand = BOARDNUM_2_EXPANDER(umb->u_board);
58503831d35Sstevel 		mnode = plat_pfn_to_mem_node(umb->u_base >> MMU_PAGESHIFT);
58603831d35Sstevel 		plat_assign_lgrphand_to_mem_node(hand, mnode);
58703831d35Sstevel 
58803831d35Sstevel 		break;
58903831d35Sstevel 
59003831d35Sstevel 	case LGRP_CONFIG_MEM_DEL:
59103831d35Sstevel 		/* We don't have to do anything */
59203831d35Sstevel 
59303831d35Sstevel 		break;
59403831d35Sstevel 
59503831d35Sstevel 	case LGRP_CONFIG_MEM_RENAME:
59603831d35Sstevel 		/*
59703831d35Sstevel 		 * During a DR copy-rename operation, all of the memory
59803831d35Sstevel 		 * on one board is moved to another board -- but the
59903831d35Sstevel 		 * addresses/pfns and memnodes don't change. This means
60003831d35Sstevel 		 * the memory has changed locations without changing identity.
60103831d35Sstevel 		 *
60203831d35Sstevel 		 * Source is where we are copying from and target is where we
60303831d35Sstevel 		 * are copying to.  After source memnode is copied to target
60403831d35Sstevel 		 * memnode, the physical addresses of the target memnode are
60503831d35Sstevel 		 * renamed to match what the source memnode had.  Then target
60603831d35Sstevel 		 * memnode can be removed and source memnode can take its
60703831d35Sstevel 		 * place.
60803831d35Sstevel 		 *
60903831d35Sstevel 		 * To do this, swap the lgroup handle to memnode mappings for
61003831d35Sstevel 		 * the boards, so target lgroup will have source memnode and
61103831d35Sstevel 		 * source lgroup will have empty target memnode which is where
61203831d35Sstevel 		 * its memory will go (if any is added to it later).
61303831d35Sstevel 		 *
61403831d35Sstevel 		 * Then source memnode needs to be removed from its lgroup
61503831d35Sstevel 		 * and added to the target lgroup where the memory was living
61603831d35Sstevel 		 * but under a different name/memnode.  The memory was in the
61703831d35Sstevel 		 * target memnode and now lives in the source memnode with
61803831d35Sstevel 		 * different physical addresses even though it is the same
61903831d35Sstevel 		 * memory.
62003831d35Sstevel 		 */
62103831d35Sstevel 		sbd = arg & 0xffff;
62203831d35Sstevel 		tbd = (arg & 0xffff0000) >> 16;
62303831d35Sstevel 		shand = BOARDNUM_2_EXPANDER(sbd);
62403831d35Sstevel 		thand = BOARDNUM_2_EXPANDER(tbd);
62503831d35Sstevel 		snode = plat_lgrphand_to_mem_node(shand);
62603831d35Sstevel 		tnode = plat_lgrphand_to_mem_node(thand);
62703831d35Sstevel 
62803831d35Sstevel 		plat_assign_lgrphand_to_mem_node(thand, snode);
62903831d35Sstevel 		plat_assign_lgrphand_to_mem_node(shand, tnode);
63003831d35Sstevel 
63103831d35Sstevel 		lmr.lmem_rename_from = shand;
63203831d35Sstevel 		lmr.lmem_rename_to = thand;
63303831d35Sstevel 
63403831d35Sstevel 		/*
63503831d35Sstevel 		 * Remove source memnode of copy rename from its lgroup
63603831d35Sstevel 		 * and add it to its new target lgroup
63703831d35Sstevel 		 */
63803831d35Sstevel 		lgrp_config(LGRP_CONFIG_MEM_RENAME, (uintptr_t)snode,
63903831d35Sstevel 		    (uintptr_t)&lmr);
64003831d35Sstevel 
64103831d35Sstevel 		break;
64203831d35Sstevel 
64303831d35Sstevel 	default:
64403831d35Sstevel 		break;
64503831d35Sstevel 	}
64603831d35Sstevel }
64703831d35Sstevel 
64803831d35Sstevel /*
64903831d35Sstevel  * Return latency between "from" and "to" lgroups
65003831d35Sstevel  *
65103831d35Sstevel  * This latency number can only be used for relative comparison
65203831d35Sstevel  * between lgroups on the running system, cannot be used across platforms,
65303831d35Sstevel  * and may not reflect the actual latency.  It is platform and implementation
65403831d35Sstevel  * specific, so platform gets to decide its value.  It would be nice if the
65503831d35Sstevel  * number was at least proportional to make comparisons more meaningful though.
65603831d35Sstevel  * NOTE: The numbers below are supposed to be load latencies for uncached
65703831d35Sstevel  * memory divided by 10.
65803831d35Sstevel  */
65903831d35Sstevel int
66003831d35Sstevel plat_lgrp_latency(lgrp_handle_t from, lgrp_handle_t to)
66103831d35Sstevel {
66203831d35Sstevel 	/*
66303831d35Sstevel 	 * Return min remote latency when there are more than two lgroups
66403831d35Sstevel 	 * (root and child) and getting latency between two different lgroups
66503831d35Sstevel 	 * or root is involved
66603831d35Sstevel 	 */
66703831d35Sstevel 	if (lgrp_optimizations() && (from != to ||
66803831d35Sstevel 	    from == LGRP_DEFAULT_HANDLE || to == LGRP_DEFAULT_HANDLE))
66903831d35Sstevel 		return (48);
67003831d35Sstevel 	else
67103831d35Sstevel 		return (28);
67203831d35Sstevel }
67303831d35Sstevel 
67403831d35Sstevel /*
67503831d35Sstevel  * Return platform handle for root lgroup
67603831d35Sstevel  */
67703831d35Sstevel lgrp_handle_t
67803831d35Sstevel plat_lgrp_root_hand(void)
67903831d35Sstevel {
68003831d35Sstevel 	if (mpo_disabled)
68103831d35Sstevel 		return (lgrp_default_handle);
68203831d35Sstevel 
68303831d35Sstevel 	return (LGRP_DEFAULT_HANDLE);
68403831d35Sstevel }
68503831d35Sstevel 
68603831d35Sstevel /* ARGSUSED */
68703831d35Sstevel void
68803831d35Sstevel plat_freelist_process(int mnode)
68903831d35Sstevel {
69003831d35Sstevel }
69103831d35Sstevel 
69203831d35Sstevel void
69303831d35Sstevel load_platform_drivers(void)
69403831d35Sstevel {
69503831d35Sstevel 	uint_t		tunnel;
69603831d35Sstevel 	pnode_t		nodeid;
69703831d35Sstevel 	dev_info_t	*chosen_devi;
69803831d35Sstevel 	char		chosen_iosram[MAXNAMELEN];
69903831d35Sstevel 
70003831d35Sstevel 	/*
70103831d35Sstevel 	 * Get /chosen node - that's where the tunnel property is
70203831d35Sstevel 	 */
70303831d35Sstevel 	nodeid = prom_chosennode();
70403831d35Sstevel 
70503831d35Sstevel 	/*
70603831d35Sstevel 	 * Get the iosram property from the chosen node.
70703831d35Sstevel 	 */
70803831d35Sstevel 	if (prom_getprop(nodeid, IOSRAM_CHOSEN_PROP, (caddr_t)&tunnel) <= 0) {
70903831d35Sstevel 		prom_printf("Unable to get iosram property\n");
71003831d35Sstevel 		cmn_err(CE_PANIC, "Unable to get iosram property\n");
71103831d35Sstevel 	}
71203831d35Sstevel 
71303831d35Sstevel 	if (prom_phandle_to_path((phandle_t)tunnel, chosen_iosram,
71403831d35Sstevel 	    sizeof (chosen_iosram)) < 0) {
71503831d35Sstevel 		(void) prom_printf("prom_phandle_to_path(0x%x) failed\n",
71603831d35Sstevel 		    tunnel);
71703831d35Sstevel 		cmn_err(CE_PANIC, "prom_phandle_to_path(0x%x) failed\n",
71803831d35Sstevel 		    tunnel);
71903831d35Sstevel 	}
72003831d35Sstevel 
72103831d35Sstevel 	/*
72203831d35Sstevel 	 * Attach all driver instances along the iosram's device path
72303831d35Sstevel 	 */
72403831d35Sstevel 	if (i_ddi_attach_hw_nodes("iosram") != DDI_SUCCESS) {
72503831d35Sstevel 		cmn_err(CE_WARN, "IOSRAM failed to load\n");
72603831d35Sstevel 	}
72703831d35Sstevel 
72803831d35Sstevel 	if ((chosen_devi = e_ddi_hold_devi_by_path(chosen_iosram, 0)) == NULL) {
72903831d35Sstevel 		(void) prom_printf("e_ddi_hold_devi_by_path(%s) failed\n",
73003831d35Sstevel 		    chosen_iosram);
73103831d35Sstevel 		cmn_err(CE_PANIC, "e_ddi_hold_devi_by_path(%s) failed\n",
73203831d35Sstevel 		    chosen_iosram);
73303831d35Sstevel 	}
73403831d35Sstevel 	ndi_rele_devi(chosen_devi);
73503831d35Sstevel 
73603831d35Sstevel 	/*
73703831d35Sstevel 	 * iosram driver is now loaded so we need to set our read and
73803831d35Sstevel 	 * write pointers.
73903831d35Sstevel 	 */
74003831d35Sstevel 	iosram_rdp = (int (*)(uint32_t, uint32_t, uint32_t, caddr_t))
74103831d35Sstevel 	    modgetsymvalue("iosram_rd", 0);
74203831d35Sstevel 	iosram_wrp = (int (*)(uint32_t, uint32_t, uint32_t, caddr_t))
74303831d35Sstevel 	    modgetsymvalue("iosram_wr", 0);
74403831d35Sstevel 
74503831d35Sstevel 	/*
74603831d35Sstevel 	 * Need to check for null proc LPA after IOSRAM driver is loaded
74703831d35Sstevel 	 * and before multiple lgroups created (when start_other_cpus() called)
74803831d35Sstevel 	 */
74903831d35Sstevel 	null_lpa_boards = check_for_null_lpa();
75003831d35Sstevel 
75103831d35Sstevel 	/* load and attach the axq driver */
75203831d35Sstevel 	if (i_ddi_attach_hw_nodes("axq") != DDI_SUCCESS) {
75303831d35Sstevel 		cmn_err(CE_WARN, "AXQ failed to load\n");
75403831d35Sstevel 	}
75503831d35Sstevel 
75603831d35Sstevel 	/* load Starcat Solaris Mailbox Client driver */
75703831d35Sstevel 	if (modload("misc", "scosmb") < 0) {
75803831d35Sstevel 		cmn_err(CE_WARN, "SCOSMB failed to load\n");
75903831d35Sstevel 	}
76003831d35Sstevel 
76103831d35Sstevel 	/* load the DR driver */
76203831d35Sstevel 	if (i_ddi_attach_hw_nodes("dr") != DDI_SUCCESS) {
76303831d35Sstevel 		cmn_err(CE_WARN, "dr failed to load");
76403831d35Sstevel 	}
76503831d35Sstevel 
76603831d35Sstevel 	/*
76703831d35Sstevel 	 * Load the mc-us3 memory driver.
76803831d35Sstevel 	 */
76903831d35Sstevel 	if (i_ddi_attach_hw_nodes("mc-us3") != DDI_SUCCESS)
77003831d35Sstevel 		cmn_err(CE_WARN, "mc-us3 failed to load");
77103831d35Sstevel 	else
77203831d35Sstevel 		(void) ddi_hold_driver(ddi_name_to_major("mc-us3"));
77303831d35Sstevel 
77403831d35Sstevel 	/* Load the schizo pci bus nexus driver. */
77503831d35Sstevel 	if (i_ddi_attach_hw_nodes("pcisch") != DDI_SUCCESS)
77603831d35Sstevel 		cmn_err(CE_WARN, "pcisch failed to load");
77703831d35Sstevel 
77803831d35Sstevel 	plat_ecc_init();
77903831d35Sstevel }
78003831d35Sstevel 
78103831d35Sstevel 
78203831d35Sstevel /*
78303831d35Sstevel  * No platform drivers on this platform
78403831d35Sstevel  */
78503831d35Sstevel char *platform_module_list[] = {
78603831d35Sstevel 	(char *)0
78703831d35Sstevel };
78803831d35Sstevel 
78903831d35Sstevel 
79003831d35Sstevel /*ARGSUSED*/
79103831d35Sstevel void
79203831d35Sstevel plat_tod_fault(enum tod_fault_type tod_bad)
79303831d35Sstevel {
79403831d35Sstevel }
79503831d35Sstevel 
79603831d35Sstevel /*
79703831d35Sstevel  * Update the signature(s) in the IOSRAM's domain data section.
79803831d35Sstevel  */
79903831d35Sstevel void
80003831d35Sstevel cpu_sgn_update(ushort_t sgn, uchar_t state, uchar_t sub_state, int cpuid)
80103831d35Sstevel {
80203831d35Sstevel 	sig_state_t new_sgn;
80303831d35Sstevel 	sig_state_t current_sgn;
80403831d35Sstevel 
80503831d35Sstevel 	/*
80603831d35Sstevel 	 * If the substate is REBOOT, then check for panic flow
80703831d35Sstevel 	 */
80803831d35Sstevel 	if (sub_state == SIGSUBST_REBOOT) {
80903831d35Sstevel 		(*iosram_rdp)(DOMD_MAGIC, DOMD_DSTATE_OFFSET,
81003831d35Sstevel 		    sizeof (sig_state_t), (caddr_t)&current_sgn);
81103831d35Sstevel 		if (current_sgn.state_t.state == SIGST_EXIT)
81203831d35Sstevel 			sub_state = SIGSUBST_PANIC_REBOOT;
81303831d35Sstevel 	}
81403831d35Sstevel 
81503831d35Sstevel 	/*
81603831d35Sstevel 	 * cpuid == -1 indicates that the operation applies to all cpus.
81703831d35Sstevel 	 */
81803831d35Sstevel 	if (cpuid < 0) {
81903831d35Sstevel 		sgn_update_all_cpus(sgn, state, sub_state);
82003831d35Sstevel 		return;
82103831d35Sstevel 	}
82203831d35Sstevel 
82303831d35Sstevel 	new_sgn.signature = CPU_SIG_BLD(sgn, state, sub_state);
82403831d35Sstevel 	(*iosram_wrp)(DOMD_MAGIC,
82503831d35Sstevel 	    DOMD_CPUSIGS_OFFSET + cpuid * sizeof (sig_state_t),
82603831d35Sstevel 	    sizeof (sig_state_t), (caddr_t)&new_sgn);
82703831d35Sstevel 
82803831d35Sstevel 	/*
82903831d35Sstevel 	 * Under certain conditions we don't update the signature
83003831d35Sstevel 	 * of the domain_state.
83103831d35Sstevel 	 */
83203831d35Sstevel 	if ((sgn == OS_SIG) &&
83303831d35Sstevel 	    ((state == SIGST_OFFLINE) || (state == SIGST_DETACHED)))
83403831d35Sstevel 		return;
83503831d35Sstevel 	(*iosram_wrp)(DOMD_MAGIC, DOMD_DSTATE_OFFSET, sizeof (sig_state_t),
83603831d35Sstevel 	    (caddr_t)&new_sgn);
83703831d35Sstevel }
83803831d35Sstevel 
83903831d35Sstevel /*
84003831d35Sstevel  * Update the signature(s) in the IOSRAM's domain data section for all CPUs.
84103831d35Sstevel  */
84203831d35Sstevel void
84303831d35Sstevel sgn_update_all_cpus(ushort_t sgn, uchar_t state, uchar_t sub_state)
84403831d35Sstevel {
84503831d35Sstevel 	sig_state_t new_sgn;
84603831d35Sstevel 	int i = 0;
84703831d35Sstevel 
84803831d35Sstevel 	new_sgn.signature = CPU_SIG_BLD(sgn, state, sub_state);
84903831d35Sstevel 
85003831d35Sstevel 	/*
85103831d35Sstevel 	 * First update the domain_state signature
85203831d35Sstevel 	 */
85303831d35Sstevel 	(*iosram_wrp)(DOMD_MAGIC, DOMD_DSTATE_OFFSET, sizeof (sig_state_t),
85403831d35Sstevel 	    (caddr_t)&new_sgn);
85503831d35Sstevel 
85603831d35Sstevel 	for (i = 0; i < NCPU; i++) {
85703831d35Sstevel 		if (cpu[i] != NULL && (cpu[i]->cpu_flags &
85803831d35Sstevel 		    (CPU_EXISTS|CPU_QUIESCED))) {
85903831d35Sstevel 			(*iosram_wrp)(DOMD_MAGIC,
86003831d35Sstevel 			    DOMD_CPUSIGS_OFFSET + i * sizeof (sig_state_t),
86103831d35Sstevel 			    sizeof (sig_state_t), (caddr_t)&new_sgn);
86203831d35Sstevel 		}
86303831d35Sstevel 	}
86403831d35Sstevel }
86503831d35Sstevel 
86603831d35Sstevel ushort_t
86703831d35Sstevel get_cpu_sgn(int cpuid)
86803831d35Sstevel {
86903831d35Sstevel 	sig_state_t cpu_sgn;
87003831d35Sstevel 
87103831d35Sstevel 	(*iosram_rdp)(DOMD_MAGIC,
87203831d35Sstevel 	    DOMD_CPUSIGS_OFFSET + cpuid * sizeof (sig_state_t),
87303831d35Sstevel 	    sizeof (sig_state_t), (caddr_t)&cpu_sgn);
87403831d35Sstevel 
87503831d35Sstevel 	return (cpu_sgn.state_t.sig);
87603831d35Sstevel }
87703831d35Sstevel 
87803831d35Sstevel uchar_t
87903831d35Sstevel get_cpu_sgn_state(int cpuid)
88003831d35Sstevel {
88103831d35Sstevel 	sig_state_t cpu_sgn;
88203831d35Sstevel 
88303831d35Sstevel 	(*iosram_rdp)(DOMD_MAGIC,
88403831d35Sstevel 	    DOMD_CPUSIGS_OFFSET + cpuid * sizeof (sig_state_t),
88503831d35Sstevel 	    sizeof (sig_state_t), (caddr_t)&cpu_sgn);
88603831d35Sstevel 
88703831d35Sstevel 	return (cpu_sgn.state_t.state);
88803831d35Sstevel }
88903831d35Sstevel 
89003831d35Sstevel 
89103831d35Sstevel /*
89203831d35Sstevel  * Type of argument passed into plat_get_ecache_cpu via ddi_walk_devs
89303831d35Sstevel  * for matching on specific CPU node in device tree
89403831d35Sstevel  */
89503831d35Sstevel 
89603831d35Sstevel typedef struct {
89703831d35Sstevel 	char		*jnum;	/* output, kmem_alloc'd	if successful */
89803831d35Sstevel 	int		cpuid;	/* input, to match cpuid/portid/upa-portid */
89903831d35Sstevel 	uint_t		dimm;	/* input, index into ecache-dimm-label */
90003831d35Sstevel } plat_ecache_cpu_arg_t;
90103831d35Sstevel 
90203831d35Sstevel 
90303831d35Sstevel /*
90403831d35Sstevel  * plat_get_ecache_cpu is called repeatedly by ddi_walk_devs with pointers
90503831d35Sstevel  * to device tree nodes (dip) and to a plat_ecache_cpu_arg_t structure (arg).
90603831d35Sstevel  * Returning DDI_WALK_CONTINUE tells ddi_walk_devs to keep going, returning
90703831d35Sstevel  * DDI_WALK_TERMINATE ends the walk.  When the node for the specific CPU
90803831d35Sstevel  * being searched for is found, the walk is done.  But before returning to
90903831d35Sstevel  * ddi_walk_devs and plat_get_ecacheunum, we grab this CPU's ecache-dimm-label
91003831d35Sstevel  * property and set the jnum member of the plat_ecache_cpu_arg_t structure to
91103831d35Sstevel  * point to the label corresponding to this specific ecache DIMM.  It is up
91203831d35Sstevel  * to plat_get_ecacheunum to kmem_free this string.
91303831d35Sstevel  */
91403831d35Sstevel 
91503831d35Sstevel static int
91603831d35Sstevel plat_get_ecache_cpu(dev_info_t *dip, void *arg)
91703831d35Sstevel {
91803831d35Sstevel 	char			*devtype;
91903831d35Sstevel 	plat_ecache_cpu_arg_t	*cpuarg;
92003831d35Sstevel 	char			**dimm_labels;
92103831d35Sstevel 	uint_t			numlabels;
92203831d35Sstevel 	int			portid;
92303831d35Sstevel 
92403831d35Sstevel 	/*
92503831d35Sstevel 	 * Check device_type, must be "cpu"
92603831d35Sstevel 	 */
92703831d35Sstevel 
92803831d35Sstevel 	if (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
929cbcdac8fSmb91622 	    "device_type", &devtype) != DDI_PROP_SUCCESS)
93003831d35Sstevel 		return (DDI_WALK_CONTINUE);
93103831d35Sstevel 
93203831d35Sstevel 	if (strcmp(devtype, "cpu")) {
93303831d35Sstevel 		ddi_prop_free((void *)devtype);
93403831d35Sstevel 		return (DDI_WALK_CONTINUE);
93503831d35Sstevel 	}
93603831d35Sstevel 
93703831d35Sstevel 	ddi_prop_free((void *)devtype);
93803831d35Sstevel 
93903831d35Sstevel 	/*
94003831d35Sstevel 	 * Check cpuid, portid, upa-portid (in that order), must
94103831d35Sstevel 	 * match the cpuid being sought
94203831d35Sstevel 	 */
94303831d35Sstevel 
94403831d35Sstevel 	portid = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
94503831d35Sstevel 	    DDI_PROP_DONTPASS, "cpuid", -1);
94603831d35Sstevel 
94703831d35Sstevel 	if (portid == -1)
94803831d35Sstevel 		portid = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
94903831d35Sstevel 		    DDI_PROP_DONTPASS, "portid", -1);
95003831d35Sstevel 
95103831d35Sstevel 	if (portid == -1)
95203831d35Sstevel 		portid = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
95303831d35Sstevel 		    DDI_PROP_DONTPASS, "upa-portid", -1);
95403831d35Sstevel 
95503831d35Sstevel 	cpuarg = (plat_ecache_cpu_arg_t *)arg;
95603831d35Sstevel 
95703831d35Sstevel 	if (portid != cpuarg->cpuid)
95803831d35Sstevel 		return (DDI_WALK_CONTINUE);
95903831d35Sstevel 
96003831d35Sstevel 	/*
96103831d35Sstevel 	 * Found the right CPU, fetch ecache-dimm-label property
96203831d35Sstevel 	 */
96303831d35Sstevel 
96403831d35Sstevel 	if (ddi_prop_lookup_string_array(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
96503831d35Sstevel 	    "ecache-dimm-label", &dimm_labels, &numlabels)
96603831d35Sstevel 	    != DDI_PROP_SUCCESS) {
96703831d35Sstevel #ifdef	DEBUG
96803831d35Sstevel 		cmn_err(CE_NOTE, "cpuid=%d missing ecache-dimm-label property",
96903831d35Sstevel 		    portid);
97003831d35Sstevel #endif	/* DEBUG */
97103831d35Sstevel 		return (DDI_WALK_TERMINATE);
97203831d35Sstevel 	}
97303831d35Sstevel 
97403831d35Sstevel 	if (cpuarg->dimm < numlabels) {
975cbcdac8fSmb91622 		cpuarg->jnum = kmem_alloc(strlen(dimm_labels[cpuarg->dimm]) + 1,
97603831d35Sstevel 		    KM_SLEEP);
97703831d35Sstevel 		if (cpuarg->jnum != (char *)NULL)
97803831d35Sstevel 			(void) strcpy(cpuarg->jnum, dimm_labels[cpuarg->dimm]);
97903831d35Sstevel #ifdef	DEBUG
98003831d35Sstevel 		else
98103831d35Sstevel 			cmn_err(CE_WARN,
98203831d35Sstevel 			    "cannot kmem_alloc for ecache dimm label");
98303831d35Sstevel #endif	/* DEBUG */
98403831d35Sstevel 	}
98503831d35Sstevel 
98603831d35Sstevel 	ddi_prop_free((void *)dimm_labels);
98703831d35Sstevel 	return (DDI_WALK_TERMINATE);
98803831d35Sstevel }
98903831d35Sstevel 
99003831d35Sstevel 
99103831d35Sstevel /*
99203831d35Sstevel  * Bit 4 of physical address indicates ecache 0 or 1
99303831d35Sstevel  */
99403831d35Sstevel 
99503831d35Sstevel #define	ECACHE_DIMM_MASK	0x10
99603831d35Sstevel 
99703831d35Sstevel /*
99803831d35Sstevel  * plat_get_ecacheunum is called to generate the unum for an ecache error.
99903831d35Sstevel  * After some initialization, nearly all of the work is done by ddi_walk_devs
100003831d35Sstevel  * and plat_get_ecache_cpu.
100103831d35Sstevel  */
100203831d35Sstevel 
100303831d35Sstevel int
100403831d35Sstevel plat_get_ecacheunum(int cpuid, unsigned long long physaddr, char *buf,
100503831d35Sstevel 		    int buflen, int *ustrlen)
100603831d35Sstevel {
100703831d35Sstevel 	plat_ecache_cpu_arg_t	findcpu;
100803831d35Sstevel 	uint_t	expander, slot, proc;
100903831d35Sstevel 
101003831d35Sstevel 	findcpu.jnum = (char *)NULL;
101103831d35Sstevel 	findcpu.cpuid = cpuid;
1012cbcdac8fSmb91622 
1013cbcdac8fSmb91622 	/*
1014cbcdac8fSmb91622 	 * Bit 4 of physaddr equal 0 maps to E0 and 1 maps to E1
1015cbcdac8fSmb91622 	 * except for Panther and Jaguar where it indicates the reverse
1016cbcdac8fSmb91622 	 */
1017cbcdac8fSmb91622 	if (IS_PANTHER(cpunodes[CPU->cpu_id].implementation) ||
1018cbcdac8fSmb91622 	    IS_JAGUAR(cpunodes[CPU->cpu_id].implementation))
1019cbcdac8fSmb91622 		findcpu.dimm =  (physaddr & ECACHE_DIMM_MASK) ? 0 : 1;
1020cbcdac8fSmb91622 	else
1021cbcdac8fSmb91622 		findcpu.dimm =  (physaddr & ECACHE_DIMM_MASK) ? 1 : 0;
102203831d35Sstevel 
102303831d35Sstevel 	/*
102403831d35Sstevel 	 * Walk the device tree, find this specific CPU, and get the label
102503831d35Sstevel 	 * for this ecache, returned here in findcpu.jnum
102603831d35Sstevel 	 */
102703831d35Sstevel 
102803831d35Sstevel 	ddi_walk_devs(ddi_root_node(), plat_get_ecache_cpu, (void *)&findcpu);
102903831d35Sstevel 
103003831d35Sstevel 	if (findcpu.jnum == (char *)NULL)
103103831d35Sstevel 		return (-1);
103203831d35Sstevel 
103303831d35Sstevel 	expander = STARCAT_CPUID_TO_EXPANDER(cpuid);
103403831d35Sstevel 	slot = STARCAT_CPUID_TO_BOARDSLOT(cpuid);
103503831d35Sstevel 
103603831d35Sstevel 	/*
103703831d35Sstevel 	 * STARCAT_CPUID_TO_PORTID clears the CoreID bit so that
103803831d35Sstevel 	 * STARCAT_CPUID_TO_AGENT will return a physical proc (0 - 3).
103903831d35Sstevel 	 */
104003831d35Sstevel 	proc = STARCAT_CPUID_TO_AGENT(STARCAT_CPUID_TO_PORTID(cpuid));
104103831d35Sstevel 
104203831d35Sstevel 	/*
104303831d35Sstevel 	 * NOTE: Any modifications to the snprintf() call below will require
104403831d35Sstevel 	 * changing plat_log_fruid_error() as well!
104503831d35Sstevel 	 */
104603831d35Sstevel 	(void) snprintf(buf, buflen, "%s%u/P%u/E%u J%s", (slot ? "IO" : "SB"),
104703831d35Sstevel 	    expander, proc, findcpu.dimm, findcpu.jnum);
104803831d35Sstevel 
104903831d35Sstevel 	*ustrlen = strlen(buf);
105003831d35Sstevel 
105103831d35Sstevel 	kmem_free(findcpu.jnum, strlen(findcpu.jnum) + 1);
105203831d35Sstevel 
105303831d35Sstevel 	return (0);
105403831d35Sstevel }
105503831d35Sstevel 
105603831d35Sstevel /*ARGSUSED*/
105703831d35Sstevel int
105803831d35Sstevel plat_get_mem_unum(int synd_code, uint64_t flt_addr, int flt_bus_id,
105903831d35Sstevel     int flt_in_memory, ushort_t flt_status, char *buf, int buflen, int *lenp)
106003831d35Sstevel {
106103831d35Sstevel 	int ret;
106203831d35Sstevel 
106303831d35Sstevel 	/*
106403831d35Sstevel 	 * check if it's a Memory or an Ecache error.
106503831d35Sstevel 	 */
106603831d35Sstevel 	if (flt_in_memory) {
106703831d35Sstevel 		if (p2get_mem_unum != NULL) {
106803831d35Sstevel 			return (p2get_mem_unum(synd_code, P2ALIGN(flt_addr, 8),
106903831d35Sstevel 			    buf, buflen, lenp));
107003831d35Sstevel 		} else {
107103831d35Sstevel 			return (ENOTSUP);
107203831d35Sstevel 		}
107303831d35Sstevel 	} else if (flt_status & ECC_ECACHE) {
107403831d35Sstevel 		if ((ret = plat_get_ecacheunum(flt_bus_id,
107503831d35Sstevel 		    P2ALIGN(flt_addr, 8), buf, buflen, lenp)) != 0)
107603831d35Sstevel 			return (EIO);
107703831d35Sstevel 	} else {
107803831d35Sstevel 		return (ENOTSUP);
107903831d35Sstevel 	}
108003831d35Sstevel 
108103831d35Sstevel 	return (ret);
108203831d35Sstevel }
108303831d35Sstevel 
108403831d35Sstevel static int (*ecc_mailbox_msg_func)(plat_ecc_message_type_t, void *) = NULL;
108503831d35Sstevel 
108603831d35Sstevel /*
108703831d35Sstevel  * To keep OS mailbox handling localized, all we do is forward the call to the
108803831d35Sstevel  * scosmb module (if it is available).
108903831d35Sstevel  */
109003831d35Sstevel int
109103831d35Sstevel plat_send_ecc_mailbox_msg(plat_ecc_message_type_t msg_type, void *datap)
109203831d35Sstevel {
109303831d35Sstevel 	/*
109403831d35Sstevel 	 * find the symbol for the mailbox sender routine in the scosmb module
109503831d35Sstevel 	 */
109603831d35Sstevel 	if (ecc_mailbox_msg_func == NULL)
109703831d35Sstevel 		ecc_mailbox_msg_func = (int (*)(plat_ecc_message_type_t,
109803831d35Sstevel 		    void *))modgetsymvalue("scosmb_log_ecc_error", 0);
109903831d35Sstevel 
110003831d35Sstevel 	/*
110103831d35Sstevel 	 * If the symbol was found, call it.  Otherwise, there is not much
110203831d35Sstevel 	 * else we can do and console messages will have to suffice.
110303831d35Sstevel 	 */
110403831d35Sstevel 	if (ecc_mailbox_msg_func)
110503831d35Sstevel 		return ((*ecc_mailbox_msg_func)(msg_type, datap));
110603831d35Sstevel 	else
110703831d35Sstevel 		return (ENODEV);
110803831d35Sstevel }
110903831d35Sstevel 
111003831d35Sstevel int
111103831d35Sstevel plat_make_fru_cpuid(int sb, int m, int proc)
111203831d35Sstevel {
111303831d35Sstevel 	return (MAKE_CPUID(sb, m, proc));
111403831d35Sstevel }
111503831d35Sstevel 
111603831d35Sstevel /*
111703831d35Sstevel  * board number for a given proc
111803831d35Sstevel  */
111903831d35Sstevel int
112003831d35Sstevel plat_make_fru_boardnum(int proc)
112103831d35Sstevel {
112203831d35Sstevel 	return (STARCAT_CPUID_TO_EXPANDER(proc));
112303831d35Sstevel }
112403831d35Sstevel 
112503831d35Sstevel /*
112603831d35Sstevel  * This platform hook gets called from mc_add_mem_unum_label() in the mc-us3
112703831d35Sstevel  * driver giving each platform the opportunity to add platform
112803831d35Sstevel  * specific label information to the unum for ECC error logging purposes.
112903831d35Sstevel  */
113003831d35Sstevel void
113103831d35Sstevel plat_add_mem_unum_label(char *unum, int mcid, int bank, int dimm)
113203831d35Sstevel {
113303831d35Sstevel 	char	new_unum[UNUM_NAMLEN];
113403831d35Sstevel 	uint_t	expander = STARCAT_CPUID_TO_EXPANDER(mcid);
113503831d35Sstevel 	uint_t	slot = STARCAT_CPUID_TO_BOARDSLOT(mcid);
113603831d35Sstevel 
113703831d35Sstevel 	/*
113803831d35Sstevel 	 * STARCAT_CPUID_TO_PORTID clears the CoreID bit so that
113903831d35Sstevel 	 * STARCAT_CPUID_TO_AGENT will return a physical proc (0 - 3).
114003831d35Sstevel 	 */
114103831d35Sstevel 	uint_t	proc = STARCAT_CPUID_TO_AGENT(STARCAT_CPUID_TO_PORTID(mcid));
114203831d35Sstevel 
114303831d35Sstevel 	/*
114403831d35Sstevel 	 * NOTE: Any modifications to the two sprintf() calls below will
114503831d35Sstevel 	 * require changing plat_log_fruid_error() as well!
114603831d35Sstevel 	 */
114703831d35Sstevel 	if (dimm == -1)
114803831d35Sstevel 		(void) snprintf(new_unum, UNUM_NAMLEN, "%s%u/P%u/B%d %s",
1149cbcdac8fSmb91622 		    (slot ? "IO" : "SB"), expander, proc, (bank & 0x1), unum);
115003831d35Sstevel 	else
115103831d35Sstevel 		(void) snprintf(new_unum, UNUM_NAMLEN, "%s%u/P%u/B%d/D%d %s",
115203831d35Sstevel 		    (slot ? "IO" : "SB"), expander,
115303831d35Sstevel 		    proc, (bank & 0x1), (dimm & 0x3), unum);
115403831d35Sstevel 
115503831d35Sstevel 	(void) strcpy(unum, new_unum);
115603831d35Sstevel }
115703831d35Sstevel 
115803831d35Sstevel int
115903831d35Sstevel plat_get_cpu_unum(int cpuid, char *buf, int buflen, int *lenp)
116003831d35Sstevel {
116103831d35Sstevel 	int	expander = STARCAT_CPUID_TO_EXPANDER(cpuid);
116203831d35Sstevel 	int	slot = STARCAT_CPUID_TO_BOARDSLOT(cpuid);
116303831d35Sstevel 
116403831d35Sstevel 	if (snprintf(buf, buflen, "%s%d", (slot ? "IO" : "SB"), expander)
116503831d35Sstevel 	    >= buflen) {
116603831d35Sstevel 		return (ENOSPC);
116703831d35Sstevel 	} else {
116803831d35Sstevel 		*lenp = strlen(buf);
116903831d35Sstevel 		return (0);
117003831d35Sstevel 	}
117103831d35Sstevel }
117203831d35Sstevel 
117303831d35Sstevel /*
117403831d35Sstevel  * This routine is used by the data bearing mondo (DMV) initialization
117503831d35Sstevel  * routine to determine the number of hardware and software DMV interrupts
117603831d35Sstevel  * that a platform supports.
117703831d35Sstevel  */
117803831d35Sstevel void
117903831d35Sstevel plat_dmv_params(uint_t *hwint, uint_t *swint)
118003831d35Sstevel {
118103831d35Sstevel 	*hwint = STARCAT_DMV_HWINT;
118203831d35Sstevel 	*swint = 0;
118303831d35Sstevel }
118403831d35Sstevel 
118503831d35Sstevel /*
118603831d35Sstevel  * If provided, this function will be called whenever the nodename is updated.
118703831d35Sstevel  * To keep OS mailbox handling localized, all we do is forward the call to the
118803831d35Sstevel  * scosmb module (if it is available).
118903831d35Sstevel  */
119003831d35Sstevel void
119103831d35Sstevel plat_nodename_set(void)
119203831d35Sstevel {
119303831d35Sstevel 	void (*nodename_update_func)(uint64_t) = NULL;
119403831d35Sstevel 
119503831d35Sstevel 	/*
119603831d35Sstevel 	 * find the symbol for the nodename update routine in the scosmb module
119703831d35Sstevel 	 */
119803831d35Sstevel 	nodename_update_func = (void (*)(uint64_t))
119903831d35Sstevel 	    modgetsymvalue("scosmb_update_nodename", 0);
120003831d35Sstevel 
120103831d35Sstevel 	/*
120203831d35Sstevel 	 * If the symbol was found, call it.  Otherwise, log a note (but not to
120303831d35Sstevel 	 * the console).
120403831d35Sstevel 	 */
120503831d35Sstevel 	if (nodename_update_func != NULL) {
120603831d35Sstevel 		nodename_update_func(0);
120703831d35Sstevel 	} else {
120803831d35Sstevel 		cmn_err(CE_NOTE,
120903831d35Sstevel 		    "!plat_nodename_set: scosmb_update_nodename not found\n");
121003831d35Sstevel 	}
121103831d35Sstevel }
121203831d35Sstevel 
121303831d35Sstevel caddr_t	efcode_vaddr = NULL;
121403831d35Sstevel caddr_t efcode_paddr = NULL;
121503831d35Sstevel /*
121603831d35Sstevel  * Preallocate enough memory for fcode claims.
121703831d35Sstevel  */
121803831d35Sstevel 
121903831d35Sstevel caddr_t
122003831d35Sstevel efcode_alloc(caddr_t alloc_base)
122103831d35Sstevel {
122203831d35Sstevel 	caddr_t efcode_alloc_base = (caddr_t)roundup((uintptr_t)alloc_base,
122303831d35Sstevel 	    MMU_PAGESIZE);
122403831d35Sstevel 	caddr_t vaddr;
122503831d35Sstevel 
122603831d35Sstevel 	/*
122703831d35Sstevel 	 * allocate the physical memory schizo fcode.
122803831d35Sstevel 	 */
122903831d35Sstevel 	if ((vaddr = (caddr_t)BOP_ALLOC(bootops, efcode_alloc_base,
123003831d35Sstevel 	    efcode_size, MMU_PAGESIZE)) == NULL)
123103831d35Sstevel 		cmn_err(CE_PANIC, "Cannot allocate Efcode Memory");
123203831d35Sstevel 
123303831d35Sstevel 	efcode_vaddr = vaddr;
123403831d35Sstevel 
123503831d35Sstevel 	return (efcode_alloc_base + efcode_size);
123603831d35Sstevel }
123703831d35Sstevel 
123803831d35Sstevel caddr_t
123925cf1a30Sjl139090 plat_startup_memlist(caddr_t alloc_base)
124003831d35Sstevel {
124103831d35Sstevel 	caddr_t tmp_alloc_base;
124203831d35Sstevel 
124303831d35Sstevel 	tmp_alloc_base = efcode_alloc(alloc_base);
124403831d35Sstevel 	tmp_alloc_base = (caddr_t)roundup((uintptr_t)tmp_alloc_base,
124503831d35Sstevel 	    ecache_alignsize);
124603831d35Sstevel 	return (tmp_alloc_base);
124703831d35Sstevel }
124803831d35Sstevel 
124903831d35Sstevel /*
125003831d35Sstevel  * This is a helper function to determine if a given
125103831d35Sstevel  * node should be considered for a dr operation according
125203831d35Sstevel  * to predefined dr names. This is accomplished using
125303831d35Sstevel  * a function defined in drmach module. The drmach module
125403831d35Sstevel  * owns the definition of dr allowable names.
125503831d35Sstevel  * Formal Parameter: The name of a device node.
125603831d35Sstevel  * Expected Return Value: -1, device node name does not map to a valid dr name.
125703831d35Sstevel  *               A value greater or equal to 0, name is valid.
125803831d35Sstevel  */
125903831d35Sstevel int
126003831d35Sstevel starcat_dr_name(char *name)
126103831d35Sstevel {
126203831d35Sstevel 	int (*drmach_name2type)(char *) = NULL;
126303831d35Sstevel 
126403831d35Sstevel 	/* Get a pointer to helper function in the dramch module. */
126503831d35Sstevel 	drmach_name2type =
126603831d35Sstevel 	    (int (*)(char *))kobj_getsymvalue("drmach_name2type_idx", 0);
126703831d35Sstevel 
126803831d35Sstevel 	if (drmach_name2type == NULL)
126903831d35Sstevel 		return (-1);
127003831d35Sstevel 
127103831d35Sstevel 	return ((*drmach_name2type)(name));
127203831d35Sstevel }
127303831d35Sstevel 
127403831d35Sstevel void
127503831d35Sstevel startup_platform(void)
127603831d35Sstevel {
1277575a7426Spt157919 	/* set per platform constants for mutex backoff */
1278575a7426Spt157919 	mutex_backoff_base = 2;
1279575a7426Spt157919 	mutex_cap_factor = 64;
128003831d35Sstevel }
128103831d35Sstevel 
128203831d35Sstevel /*
128303831d35Sstevel  * KDI functions - used by the in-situ kernel debugger (kmdb) to perform
128403831d35Sstevel  * platform-specific operations.  These functions execute when the world is
128503831d35Sstevel  * stopped, and as such cannot make any blocking calls, hold locks, etc.
128603831d35Sstevel  * promif functions are a special case, and may be used.
128703831d35Sstevel  */
128803831d35Sstevel 
128903831d35Sstevel static void
129003831d35Sstevel starcat_system_claim(void)
129103831d35Sstevel {
1292d3d50737SRafael Vanoni 	lbolt_debug_entry();
1293d3d50737SRafael Vanoni 
129403831d35Sstevel 	prom_interpret("sigb-sig! my-sigb-sig!", OBP_SIG, OBP_SIG, 0, 0, 0);
129503831d35Sstevel }
129603831d35Sstevel 
129703831d35Sstevel static void
129803831d35Sstevel starcat_system_release(void)
129903831d35Sstevel {
130003831d35Sstevel 	prom_interpret("sigb-sig! my-sigb-sig!", OS_SIG, OS_SIG, 0, 0, 0);
1301d3d50737SRafael Vanoni 
1302d3d50737SRafael Vanoni 	lbolt_debug_return();
130303831d35Sstevel }
130403831d35Sstevel 
130503831d35Sstevel void
130603831d35Sstevel plat_kdi_init(kdi_t *kdi)
130703831d35Sstevel {
130803831d35Sstevel 	kdi->pkdi_system_claim = starcat_system_claim;
130903831d35Sstevel 	kdi->pkdi_system_release = starcat_system_release;
131003831d35Sstevel }
131103831d35Sstevel 
131203831d35Sstevel /*
131303831d35Sstevel  * This function returns 1 if large pages for kernel heap are supported
131403831d35Sstevel  * and 0 otherwise.
131503831d35Sstevel  *
131603831d35Sstevel  * Currently we disable lp kmem support if kpr is going to be enabled
131703831d35Sstevel  * because in the case of large pages hat_add_callback()/hat_delete_callback()
131803831d35Sstevel  * cause network performance degradation
131903831d35Sstevel  */
132003831d35Sstevel int
132103831d35Sstevel plat_lpkmem_is_supported(void)
132203831d35Sstevel {
132303831d35Sstevel 	extern int segkmem_reloc;
132403831d35Sstevel 
1325*c7c6ab2aSGarrett D'Amore 	if (kernel_cage_enable && (ncpunode >= 32 || segkmem_reloc == 1))
132603831d35Sstevel 		return (0);
132703831d35Sstevel 
132803831d35Sstevel 	return (1);
132903831d35Sstevel }
1330