125cf1a30Sjl139090 /* 225cf1a30Sjl139090 * CDDL HEADER START 325cf1a30Sjl139090 * 425cf1a30Sjl139090 * The contents of this file are subject to the terms of the 525cf1a30Sjl139090 * Common Development and Distribution License (the "License"). 625cf1a30Sjl139090 * You may not use this file except in compliance with the License. 725cf1a30Sjl139090 * 825cf1a30Sjl139090 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 925cf1a30Sjl139090 * or http://www.opensolaris.org/os/licensing. 1025cf1a30Sjl139090 * See the License for the specific language governing permissions 1125cf1a30Sjl139090 * and limitations under the License. 1225cf1a30Sjl139090 * 1325cf1a30Sjl139090 * When distributing Covered Code, include this CDDL HEADER in each 1425cf1a30Sjl139090 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 1525cf1a30Sjl139090 * If applicable, add the following below this CDDL HEADER, with the 1625cf1a30Sjl139090 * fields enclosed by brackets "[]" replaced with your own identifying 1725cf1a30Sjl139090 * information: Portions Copyright [yyyy] [name of copyright owner] 1825cf1a30Sjl139090 * 1925cf1a30Sjl139090 * CDDL HEADER END 2025cf1a30Sjl139090 */ 2125cf1a30Sjl139090 /* 22*c9d93b53SJames Anderson * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 2325cf1a30Sjl139090 * Use is subject to license terms. 2425cf1a30Sjl139090 */ 2525cf1a30Sjl139090 26e98fafb9Sjl139090 /* 27e98fafb9Sjl139090 * Support for Olympus-C (SPARC64-VI) and Jupiter (SPARC64-VII). 28e98fafb9Sjl139090 */ 29e98fafb9Sjl139090 3025cf1a30Sjl139090 #include <sys/types.h> 3125cf1a30Sjl139090 #include <sys/systm.h> 3225cf1a30Sjl139090 #include <sys/ddi.h> 3325cf1a30Sjl139090 #include <sys/sysmacros.h> 3425cf1a30Sjl139090 #include <sys/archsystm.h> 3525cf1a30Sjl139090 #include <sys/vmsystm.h> 3625cf1a30Sjl139090 #include <sys/machparam.h> 3725cf1a30Sjl139090 #include <sys/machsystm.h> 3825cf1a30Sjl139090 #include <sys/machthread.h> 3925cf1a30Sjl139090 #include <sys/cpu.h> 4025cf1a30Sjl139090 #include <sys/cmp.h> 4125cf1a30Sjl139090 #include <sys/elf_SPARC.h> 4225cf1a30Sjl139090 #include <vm/vm_dep.h> 4325cf1a30Sjl139090 #include <vm/hat_sfmmu.h> 4425cf1a30Sjl139090 #include <vm/seg_kpm.h> 453cbfd4cfSjimand #include <vm/seg_kmem.h> 4625cf1a30Sjl139090 #include <sys/cpuvar.h> 4725cf1a30Sjl139090 #include <sys/opl_olympus_regs.h> 4825cf1a30Sjl139090 #include <sys/opl_module.h> 4925cf1a30Sjl139090 #include <sys/async.h> 5025cf1a30Sjl139090 #include <sys/cmn_err.h> 5125cf1a30Sjl139090 #include <sys/debug.h> 5225cf1a30Sjl139090 #include <sys/dditypes.h> 5325cf1a30Sjl139090 #include <sys/cpu_module.h> 5425cf1a30Sjl139090 #include <sys/sysmacros.h> 5525cf1a30Sjl139090 #include <sys/intreg.h> 5625cf1a30Sjl139090 #include <sys/clock.h> 5725cf1a30Sjl139090 #include <sys/platform_module.h> 5825cf1a30Sjl139090 #include <sys/ontrap.h> 5925cf1a30Sjl139090 #include <sys/panic.h> 6025cf1a30Sjl139090 #include <sys/memlist.h> 6125cf1a30Sjl139090 #include <sys/ndifm.h> 6225cf1a30Sjl139090 #include <sys/ddifm.h> 6325cf1a30Sjl139090 #include <sys/fm/protocol.h> 6425cf1a30Sjl139090 #include <sys/fm/util.h> 6525cf1a30Sjl139090 #include <sys/fm/cpu/SPARC64-VI.h> 6625cf1a30Sjl139090 #include <sys/dtrace.h> 6725cf1a30Sjl139090 #include <sys/watchpoint.h> 6825cf1a30Sjl139090 #include <sys/promif.h> 6925cf1a30Sjl139090 7025cf1a30Sjl139090 /* 7125cf1a30Sjl139090 * Internal functions. 7225cf1a30Sjl139090 */ 7325cf1a30Sjl139090 static int cpu_sync_log_err(void *flt); 7425cf1a30Sjl139090 static void cpu_payload_add_aflt(struct async_flt *, nvlist_t *, nvlist_t *); 7525cf1a30Sjl139090 static void opl_cpu_sync_error(struct regs *, ulong_t, ulong_t, uint_t, uint_t); 7625cf1a30Sjl139090 static int cpu_flt_in_memory(opl_async_flt_t *, uint64_t); 771ba18ff1Sjimand static int prom_SPARC64VII_support_enabled(void); 7850eff769Smb158278 static void opl_ta3(); 79febcc4a5Sjimand static int plat_prom_preserve_kctx_is_supported(void); 8025cf1a30Sjl139090 8125cf1a30Sjl139090 /* 8225cf1a30Sjl139090 * Error counters resetting interval. 8325cf1a30Sjl139090 */ 8425cf1a30Sjl139090 static int opl_async_check_interval = 60; /* 1 min */ 8525cf1a30Sjl139090 861e2e7a75Shuah uint_t cpu_impl_dual_pgsz = 1; 8725cf1a30Sjl139090 8825cf1a30Sjl139090 /* 8925cf1a30Sjl139090 * PA[22:0] represent Displacement in Jupiter 9025cf1a30Sjl139090 * configuration space. 9125cf1a30Sjl139090 */ 9225cf1a30Sjl139090 uint_t root_phys_addr_lo_mask = 0x7fffffu; 9325cf1a30Sjl139090 9425cf1a30Sjl139090 /* 9525cf1a30Sjl139090 * set in /etc/system to control logging of user BERR/TO's 9625cf1a30Sjl139090 */ 9725cf1a30Sjl139090 int cpu_berr_to_verbose = 0; 9825cf1a30Sjl139090 99e98fafb9Sjl139090 /* 100e98fafb9Sjl139090 * Set to 1 if booted with all Jupiter cpus (all-Jupiter features enabled). 101e98fafb9Sjl139090 */ 102e98fafb9Sjl139090 int cpu_alljupiter = 0; 103e98fafb9Sjl139090 1041426d65aSsm142603 /* 1051426d65aSsm142603 * The sfmmu_cext field to be used by processes in a shared context domain. 1061426d65aSsm142603 */ 1071426d65aSsm142603 static uchar_t shctx_cext = TAGACCEXT_MKSZPAIR(DEFAULT_ISM_PAGESZC, TTE8K); 1081426d65aSsm142603 10925cf1a30Sjl139090 static int min_ecache_size; 11025cf1a30Sjl139090 static uint_t priv_hcl_1; 11125cf1a30Sjl139090 static uint_t priv_hcl_2; 11225cf1a30Sjl139090 static uint_t priv_hcl_4; 11325cf1a30Sjl139090 static uint_t priv_hcl_8; 11425cf1a30Sjl139090 11525cf1a30Sjl139090 /* 11625cf1a30Sjl139090 * Olympus error log 11725cf1a30Sjl139090 */ 11825cf1a30Sjl139090 static opl_errlog_t *opl_err_log; 119b9a675d4Smb158278 static int opl_cpu0_log_setup; 12025cf1a30Sjl139090 12125cf1a30Sjl139090 /* 12250eff769Smb158278 * OPL ta 3 save area. 12350eff769Smb158278 */ 12450eff769Smb158278 char *opl_ta3_save; 12550eff769Smb158278 12650eff769Smb158278 /* 12725cf1a30Sjl139090 * UE is classified into four classes (MEM, CHANNEL, CPU, PATH). 12825cf1a30Sjl139090 * No any other ecc_type_info insertion is allowed in between the following 12925cf1a30Sjl139090 * four UE classess. 13025cf1a30Sjl139090 */ 13125cf1a30Sjl139090 ecc_type_to_info_t ecc_type_to_info[] = { 13225cf1a30Sjl139090 SFSR_UE, "UE ", (OPL_ECC_SYNC_TRAP), OPL_CPU_SYNC_UE, 13325cf1a30Sjl139090 "Uncorrectable ECC", FM_EREPORT_PAYLOAD_SYNC, 13425cf1a30Sjl139090 FM_EREPORT_CPU_UE_MEM, 13525cf1a30Sjl139090 SFSR_UE, "UE ", (OPL_ECC_SYNC_TRAP), OPL_CPU_SYNC_UE, 13625cf1a30Sjl139090 "Uncorrectable ECC", FM_EREPORT_PAYLOAD_SYNC, 13725cf1a30Sjl139090 FM_EREPORT_CPU_UE_CHANNEL, 13825cf1a30Sjl139090 SFSR_UE, "UE ", (OPL_ECC_SYNC_TRAP), OPL_CPU_SYNC_UE, 13925cf1a30Sjl139090 "Uncorrectable ECC", FM_EREPORT_PAYLOAD_SYNC, 14025cf1a30Sjl139090 FM_EREPORT_CPU_UE_CPU, 14125cf1a30Sjl139090 SFSR_UE, "UE ", (OPL_ECC_SYNC_TRAP), OPL_CPU_SYNC_UE, 14225cf1a30Sjl139090 "Uncorrectable ECC", FM_EREPORT_PAYLOAD_SYNC, 14325cf1a30Sjl139090 FM_EREPORT_CPU_UE_PATH, 14425cf1a30Sjl139090 SFSR_BERR, "BERR ", (OPL_ECC_SYNC_TRAP), OPL_CPU_SYNC_OTHERS, 14525cf1a30Sjl139090 "Bus Error", FM_EREPORT_PAYLOAD_SYNC, 14625cf1a30Sjl139090 FM_EREPORT_CPU_BERR, 14725cf1a30Sjl139090 SFSR_TO, "TO ", (OPL_ECC_SYNC_TRAP), OPL_CPU_SYNC_OTHERS, 14825cf1a30Sjl139090 "Bus Timeout", FM_EREPORT_PAYLOAD_SYNC, 14925cf1a30Sjl139090 FM_EREPORT_CPU_BTO, 15025cf1a30Sjl139090 SFSR_TLB_MUL, "TLB_MUL ", (OPL_ECC_SYNC_TRAP), OPL_CPU_SYNC_OTHERS, 15125cf1a30Sjl139090 "TLB MultiHit", FM_EREPORT_PAYLOAD_SYNC, 15225cf1a30Sjl139090 FM_EREPORT_CPU_MTLB, 15325cf1a30Sjl139090 SFSR_TLB_PRT, "TLB_PRT ", (OPL_ECC_SYNC_TRAP), OPL_CPU_SYNC_OTHERS, 15425cf1a30Sjl139090 "TLB Parity", FM_EREPORT_PAYLOAD_SYNC, 15525cf1a30Sjl139090 FM_EREPORT_CPU_TLBP, 15625cf1a30Sjl139090 15725cf1a30Sjl139090 UGESR_IAUG_CRE, "IAUG_CRE", OPL_ECC_URGENT_TRAP, OPL_CPU_URGENT, 15825cf1a30Sjl139090 "IAUG CRE", FM_EREPORT_PAYLOAD_URGENT, 15925cf1a30Sjl139090 FM_EREPORT_CPU_CRE, 16025cf1a30Sjl139090 UGESR_IAUG_TSBCTXT, "IAUG_TSBCTXT", 16125cf1a30Sjl139090 OPL_ECC_URGENT_TRAP, OPL_CPU_URGENT, 16225cf1a30Sjl139090 "IAUG TSBCTXT", FM_EREPORT_PAYLOAD_URGENT, 16325cf1a30Sjl139090 FM_EREPORT_CPU_TSBCTX, 16425cf1a30Sjl139090 UGESR_IUG_TSBP, "IUG_TSBP", OPL_ECC_URGENT_TRAP, OPL_CPU_URGENT, 16525cf1a30Sjl139090 "IUG TSBP", FM_EREPORT_PAYLOAD_URGENT, 16625cf1a30Sjl139090 FM_EREPORT_CPU_TSBP, 16725cf1a30Sjl139090 UGESR_IUG_PSTATE, "IUG_PSTATE", OPL_ECC_URGENT_TRAP, OPL_CPU_URGENT, 16825cf1a30Sjl139090 "IUG PSTATE", FM_EREPORT_PAYLOAD_URGENT, 16925cf1a30Sjl139090 FM_EREPORT_CPU_PSTATE, 17025cf1a30Sjl139090 UGESR_IUG_TSTATE, "IUG_TSTATE", OPL_ECC_URGENT_TRAP, OPL_CPU_URGENT, 17125cf1a30Sjl139090 "IUG TSTATE", FM_EREPORT_PAYLOAD_URGENT, 17225cf1a30Sjl139090 FM_EREPORT_CPU_TSTATE, 17325cf1a30Sjl139090 UGESR_IUG_F, "IUG_F", OPL_ECC_URGENT_TRAP, OPL_CPU_URGENT, 17425cf1a30Sjl139090 "IUG FREG", FM_EREPORT_PAYLOAD_URGENT, 17525cf1a30Sjl139090 FM_EREPORT_CPU_IUG_F, 17625cf1a30Sjl139090 UGESR_IUG_R, "IUG_R", OPL_ECC_URGENT_TRAP, OPL_CPU_URGENT, 17725cf1a30Sjl139090 "IUG RREG", FM_EREPORT_PAYLOAD_URGENT, 17825cf1a30Sjl139090 FM_EREPORT_CPU_IUG_R, 17925cf1a30Sjl139090 UGESR_AUG_SDC, "AUG_SDC", OPL_ECC_URGENT_TRAP, OPL_CPU_URGENT, 18025cf1a30Sjl139090 "AUG SDC", FM_EREPORT_PAYLOAD_URGENT, 18125cf1a30Sjl139090 FM_EREPORT_CPU_SDC, 18225cf1a30Sjl139090 UGESR_IUG_WDT, "IUG_WDT", OPL_ECC_URGENT_TRAP, OPL_CPU_URGENT, 18325cf1a30Sjl139090 "IUG WDT", FM_EREPORT_PAYLOAD_URGENT, 18425cf1a30Sjl139090 FM_EREPORT_CPU_WDT, 18525cf1a30Sjl139090 UGESR_IUG_DTLB, "IUG_DTLB", OPL_ECC_URGENT_TRAP, OPL_CPU_URGENT, 18625cf1a30Sjl139090 "IUG DTLB", FM_EREPORT_PAYLOAD_URGENT, 18725cf1a30Sjl139090 FM_EREPORT_CPU_DTLB, 18825cf1a30Sjl139090 UGESR_IUG_ITLB, "IUG_ITLB", OPL_ECC_URGENT_TRAP, OPL_CPU_URGENT, 18925cf1a30Sjl139090 "IUG ITLB", FM_EREPORT_PAYLOAD_URGENT, 19025cf1a30Sjl139090 FM_EREPORT_CPU_ITLB, 19125cf1a30Sjl139090 UGESR_IUG_COREERR, "IUG_COREERR", 19225cf1a30Sjl139090 OPL_ECC_URGENT_TRAP, OPL_CPU_URGENT, 19325cf1a30Sjl139090 "IUG COREERR", FM_EREPORT_PAYLOAD_URGENT, 19425cf1a30Sjl139090 FM_EREPORT_CPU_CORE, 19525cf1a30Sjl139090 UGESR_MULTI_DAE, "MULTI_DAE", OPL_ECC_URGENT_TRAP, OPL_CPU_URGENT, 19625cf1a30Sjl139090 "MULTI DAE", FM_EREPORT_PAYLOAD_URGENT, 19725cf1a30Sjl139090 FM_EREPORT_CPU_DAE, 19825cf1a30Sjl139090 UGESR_MULTI_IAE, "MULTI_IAE", OPL_ECC_URGENT_TRAP, OPL_CPU_URGENT, 19925cf1a30Sjl139090 "MULTI IAE", FM_EREPORT_PAYLOAD_URGENT, 20025cf1a30Sjl139090 FM_EREPORT_CPU_IAE, 20125cf1a30Sjl139090 UGESR_MULTI_UGE, "MULTI_UGE", OPL_ECC_URGENT_TRAP, OPL_CPU_URGENT, 20225cf1a30Sjl139090 "MULTI UGE", FM_EREPORT_PAYLOAD_URGENT, 20325cf1a30Sjl139090 FM_EREPORT_CPU_UGE, 20425cf1a30Sjl139090 0, NULL, 0, 0, 20525cf1a30Sjl139090 NULL, 0, 0, 20625cf1a30Sjl139090 }; 20725cf1a30Sjl139090 20825cf1a30Sjl139090 int (*p2get_mem_info)(int synd_code, uint64_t paddr, 20925cf1a30Sjl139090 uint64_t *mem_sizep, uint64_t *seg_sizep, uint64_t *bank_sizep, 21025cf1a30Sjl139090 int *segsp, int *banksp, int *mcidp); 21125cf1a30Sjl139090 21225cf1a30Sjl139090 21325cf1a30Sjl139090 /* 21450eff769Smb158278 * Setup trap handlers for 0xA, 0x32, 0x40 trap types 21550eff769Smb158278 * and "ta 3" and "ta 4". 21625cf1a30Sjl139090 */ 21725cf1a30Sjl139090 void 21825cf1a30Sjl139090 cpu_init_trap(void) 21925cf1a30Sjl139090 { 22025cf1a30Sjl139090 OPL_SET_TRAP(tt0_iae, opl_serr_instr); 22125cf1a30Sjl139090 OPL_SET_TRAP(tt1_iae, opl_serr_instr); 22225cf1a30Sjl139090 OPL_SET_TRAP(tt0_dae, opl_serr_instr); 22325cf1a30Sjl139090 OPL_SET_TRAP(tt1_dae, opl_serr_instr); 22425cf1a30Sjl139090 OPL_SET_TRAP(tt0_asdat, opl_ugerr_instr); 22525cf1a30Sjl139090 OPL_SET_TRAP(tt1_asdat, opl_ugerr_instr); 22650eff769Smb158278 OPL_SET_TRAP(tt0_flushw, opl_ta3_instr); 22750eff769Smb158278 OPL_PATCH_28(opl_cleanw_patch, opl_ta4_instr); 22825cf1a30Sjl139090 } 22925cf1a30Sjl139090 23025cf1a30Sjl139090 static int 23125cf1a30Sjl139090 getintprop(pnode_t node, char *name, int deflt) 23225cf1a30Sjl139090 { 23325cf1a30Sjl139090 int value; 23425cf1a30Sjl139090 23525cf1a30Sjl139090 switch (prom_getproplen(node, name)) { 23625cf1a30Sjl139090 case sizeof (int): 23725cf1a30Sjl139090 (void) prom_getprop(node, name, (caddr_t)&value); 23825cf1a30Sjl139090 break; 23925cf1a30Sjl139090 24025cf1a30Sjl139090 default: 24125cf1a30Sjl139090 value = deflt; 24225cf1a30Sjl139090 break; 24325cf1a30Sjl139090 } 24425cf1a30Sjl139090 24525cf1a30Sjl139090 return (value); 24625cf1a30Sjl139090 } 24725cf1a30Sjl139090 24825cf1a30Sjl139090 /* 24925cf1a30Sjl139090 * Set the magic constants of the implementation. 25025cf1a30Sjl139090 */ 25125cf1a30Sjl139090 /*ARGSUSED*/ 25225cf1a30Sjl139090 void 25325cf1a30Sjl139090 cpu_fiximp(pnode_t dnode) 25425cf1a30Sjl139090 { 25525cf1a30Sjl139090 int i, a; 25625cf1a30Sjl139090 extern int vac_size, vac_shift; 25725cf1a30Sjl139090 extern uint_t vac_mask; 25825cf1a30Sjl139090 25925cf1a30Sjl139090 static struct { 26025cf1a30Sjl139090 char *name; 26125cf1a30Sjl139090 int *var; 26225cf1a30Sjl139090 int defval; 26325cf1a30Sjl139090 } prop[] = { 26425cf1a30Sjl139090 "l1-dcache-size", &dcache_size, OPL_DCACHE_SIZE, 26525cf1a30Sjl139090 "l1-dcache-line-size", &dcache_linesize, OPL_DCACHE_LSIZE, 26625cf1a30Sjl139090 "l1-icache-size", &icache_size, OPL_ICACHE_SIZE, 26725cf1a30Sjl139090 "l1-icache-line-size", &icache_linesize, OPL_ICACHE_LSIZE, 26825cf1a30Sjl139090 "l2-cache-size", &ecache_size, OPL_ECACHE_SIZE, 26925cf1a30Sjl139090 "l2-cache-line-size", &ecache_alignsize, OPL_ECACHE_LSIZE, 27025cf1a30Sjl139090 "l2-cache-associativity", &ecache_associativity, OPL_ECACHE_NWAY 27125cf1a30Sjl139090 }; 27225cf1a30Sjl139090 27325cf1a30Sjl139090 for (i = 0; i < sizeof (prop) / sizeof (prop[0]); i++) 27425cf1a30Sjl139090 *prop[i].var = getintprop(dnode, prop[i].name, prop[i].defval); 27525cf1a30Sjl139090 27625cf1a30Sjl139090 ecache_setsize = ecache_size / ecache_associativity; 27725cf1a30Sjl139090 27825cf1a30Sjl139090 vac_size = OPL_VAC_SIZE; 27925cf1a30Sjl139090 vac_mask = MMU_PAGEMASK & (vac_size - 1); 28025cf1a30Sjl139090 i = 0; a = vac_size; 28125cf1a30Sjl139090 while (a >>= 1) 28225cf1a30Sjl139090 ++i; 28325cf1a30Sjl139090 vac_shift = i; 28425cf1a30Sjl139090 shm_alignment = vac_size; 28525cf1a30Sjl139090 vac = 1; 28625cf1a30Sjl139090 } 28725cf1a30Sjl139090 288e98fafb9Sjl139090 /* 289e98fafb9Sjl139090 * Enable features for Jupiter-only domains. 290e98fafb9Sjl139090 */ 291e98fafb9Sjl139090 void 292e98fafb9Sjl139090 cpu_fix_alljupiter(void) 293e98fafb9Sjl139090 { 2941ba18ff1Sjimand if (!prom_SPARC64VII_support_enabled()) { 2951ba18ff1Sjimand /* 2961ba18ff1Sjimand * Do not enable all-Jupiter features and do not turn on 2971ba18ff1Sjimand * the cpu_alljupiter flag. 2981ba18ff1Sjimand */ 2991ba18ff1Sjimand return; 3001ba18ff1Sjimand } 3011ba18ff1Sjimand 302e98fafb9Sjl139090 cpu_alljupiter = 1; 303e98fafb9Sjl139090 304e98fafb9Sjl139090 /* 305e98fafb9Sjl139090 * Enable ima hwcap for Jupiter-only domains. DR will prevent 306e98fafb9Sjl139090 * addition of Olympus-C to all-Jupiter domains to preserve ima 307e98fafb9Sjl139090 * hwcap semantics. 308e98fafb9Sjl139090 */ 309e98fafb9Sjl139090 cpu_hwcap_flags |= AV_SPARC_IMA; 3101426d65aSsm142603 3111426d65aSsm142603 /* 31220064263SSean McEnroe * Enable shared context support. 3131426d65aSsm142603 */ 31420064263SSean McEnroe shctx_on = 1; 315e98fafb9Sjl139090 } 316e98fafb9Sjl139090 31751f7a915Shyw #ifdef OLYMPUS_C_REV_B_ERRATA_XCALL 31851f7a915Shyw /* 31951f7a915Shyw * Quick and dirty way to redefine locally in 32051f7a915Shyw * OPL the value of IDSR_BN_SETS to 31 instead 32151f7a915Shyw * of the standard 32 value. This is to workaround 32251f7a915Shyw * REV_B of Olympus_c processor's problem in handling 32351f7a915Shyw * more than 31 xcall broadcast. 32451f7a915Shyw */ 32551f7a915Shyw #undef IDSR_BN_SETS 32651f7a915Shyw #define IDSR_BN_SETS 31 32751f7a915Shyw #endif /* OLYMPUS_C_REV_B_ERRATA_XCALL */ 32851f7a915Shyw 32925cf1a30Sjl139090 void 33025cf1a30Sjl139090 send_mondo_set(cpuset_t set) 33125cf1a30Sjl139090 { 33225cf1a30Sjl139090 int lo, busy, nack, shipped = 0; 33325cf1a30Sjl139090 uint16_t i, cpuids[IDSR_BN_SETS]; 33425cf1a30Sjl139090 uint64_t idsr, nackmask = 0, busymask, curnack, curbusy; 33525cf1a30Sjl139090 uint64_t starttick, endtick, tick, lasttick; 33625cf1a30Sjl139090 #if (NCPU > IDSR_BN_SETS) 33725cf1a30Sjl139090 int index = 0; 33825cf1a30Sjl139090 int ncpuids = 0; 33925cf1a30Sjl139090 #endif 340227649d1Shyw #ifdef OLYMPUS_C_REV_A_ERRATA_XCALL 34125cf1a30Sjl139090 int bn_sets = IDSR_BN_SETS; 34225cf1a30Sjl139090 uint64_t ver; 34325cf1a30Sjl139090 34425cf1a30Sjl139090 ASSERT(NCPU > bn_sets); 34525cf1a30Sjl139090 #endif 34625cf1a30Sjl139090 34725cf1a30Sjl139090 ASSERT(!CPUSET_ISNULL(set)); 34825cf1a30Sjl139090 starttick = lasttick = gettick(); 34925cf1a30Sjl139090 350227649d1Shyw #ifdef OLYMPUS_C_REV_A_ERRATA_XCALL 35125cf1a30Sjl139090 ver = ultra_getver(); 35225cf1a30Sjl139090 if (((ULTRA_VER_IMPL(ver)) == OLYMPUS_C_IMPL) && 35325cf1a30Sjl139090 ((OLYMPUS_REV_MASK(ver)) == OLYMPUS_C_A)) 35425cf1a30Sjl139090 bn_sets = 1; 35525cf1a30Sjl139090 #endif 35625cf1a30Sjl139090 35725cf1a30Sjl139090 #if (NCPU <= IDSR_BN_SETS) 35825cf1a30Sjl139090 for (i = 0; i < NCPU; i++) 35925cf1a30Sjl139090 if (CPU_IN_SET(set, i)) { 36025cf1a30Sjl139090 shipit(i, shipped); 36125cf1a30Sjl139090 nackmask |= IDSR_NACK_BIT(shipped); 36225cf1a30Sjl139090 cpuids[shipped++] = i; 36325cf1a30Sjl139090 CPUSET_DEL(set, i); 36425cf1a30Sjl139090 if (CPUSET_ISNULL(set)) 36525cf1a30Sjl139090 break; 36625cf1a30Sjl139090 } 36725cf1a30Sjl139090 CPU_STATS_ADDQ(CPU, sys, xcalls, shipped); 36825cf1a30Sjl139090 #else 36925cf1a30Sjl139090 for (i = 0; i < NCPU; i++) 37025cf1a30Sjl139090 if (CPU_IN_SET(set, i)) { 37125cf1a30Sjl139090 ncpuids++; 37225cf1a30Sjl139090 37325cf1a30Sjl139090 /* 37425cf1a30Sjl139090 * Ship only to the first (IDSR_BN_SETS) CPUs. If we 37525cf1a30Sjl139090 * find we have shipped to more than (IDSR_BN_SETS) 37625cf1a30Sjl139090 * CPUs, set "index" to the highest numbered CPU in 37725cf1a30Sjl139090 * the set so we can ship to other CPUs a bit later on. 37825cf1a30Sjl139090 */ 379227649d1Shyw #ifdef OLYMPUS_C_REV_A_ERRATA_XCALL 38025cf1a30Sjl139090 if (shipped < bn_sets) { 38125cf1a30Sjl139090 #else 38225cf1a30Sjl139090 if (shipped < IDSR_BN_SETS) { 38325cf1a30Sjl139090 #endif 38425cf1a30Sjl139090 shipit(i, shipped); 38525cf1a30Sjl139090 nackmask |= IDSR_NACK_BIT(shipped); 38625cf1a30Sjl139090 cpuids[shipped++] = i; 38725cf1a30Sjl139090 CPUSET_DEL(set, i); 38825cf1a30Sjl139090 if (CPUSET_ISNULL(set)) 38925cf1a30Sjl139090 break; 39025cf1a30Sjl139090 } else 39125cf1a30Sjl139090 index = (int)i; 39225cf1a30Sjl139090 } 39325cf1a30Sjl139090 39425cf1a30Sjl139090 CPU_STATS_ADDQ(CPU, sys, xcalls, ncpuids); 39525cf1a30Sjl139090 #endif 39625cf1a30Sjl139090 39725cf1a30Sjl139090 busymask = IDSR_NACK_TO_BUSY(nackmask); 39825cf1a30Sjl139090 busy = nack = 0; 39925cf1a30Sjl139090 endtick = starttick + xc_tick_limit; 40025cf1a30Sjl139090 for (;;) { 40125cf1a30Sjl139090 idsr = getidsr(); 40225cf1a30Sjl139090 #if (NCPU <= IDSR_BN_SETS) 40325cf1a30Sjl139090 if (idsr == 0) 40425cf1a30Sjl139090 break; 40525cf1a30Sjl139090 #else 40625cf1a30Sjl139090 if (idsr == 0 && shipped == ncpuids) 40725cf1a30Sjl139090 break; 40825cf1a30Sjl139090 #endif 40925cf1a30Sjl139090 tick = gettick(); 41025cf1a30Sjl139090 /* 41125cf1a30Sjl139090 * If there is a big jump between the current tick 41225cf1a30Sjl139090 * count and lasttick, we have probably hit a break 41325cf1a30Sjl139090 * point. Adjust endtick accordingly to avoid panic. 41425cf1a30Sjl139090 */ 41525cf1a30Sjl139090 if (tick > (lasttick + xc_tick_jump_limit)) 41625cf1a30Sjl139090 endtick += (tick - lasttick); 41725cf1a30Sjl139090 lasttick = tick; 41825cf1a30Sjl139090 if (tick > endtick) { 41925cf1a30Sjl139090 if (panic_quiesce) 42025cf1a30Sjl139090 return; 421e98fafb9Sjl139090 cmn_err(CE_CONT, "send mondo timeout [%d NACK %d " 422e98fafb9Sjl139090 "BUSY]\nIDSR 0x%" PRIx64 " cpuids:", 423e98fafb9Sjl139090 nack, busy, idsr); 424227649d1Shyw #ifdef OLYMPUS_C_REV_A_ERRATA_XCALL 42525cf1a30Sjl139090 for (i = 0; i < bn_sets; i++) { 42625cf1a30Sjl139090 #else 42725cf1a30Sjl139090 for (i = 0; i < IDSR_BN_SETS; i++) { 42825cf1a30Sjl139090 #endif 42925cf1a30Sjl139090 if (idsr & (IDSR_NACK_BIT(i) | 43025cf1a30Sjl139090 IDSR_BUSY_BIT(i))) { 431e98fafb9Sjl139090 cmn_err(CE_CONT, " 0x%x", cpuids[i]); 43225cf1a30Sjl139090 } 43325cf1a30Sjl139090 } 43425cf1a30Sjl139090 cmn_err(CE_CONT, "\n"); 43525cf1a30Sjl139090 cmn_err(CE_PANIC, "send_mondo_set: timeout"); 43625cf1a30Sjl139090 } 43725cf1a30Sjl139090 curnack = idsr & nackmask; 43825cf1a30Sjl139090 curbusy = idsr & busymask; 439227649d1Shyw 440227649d1Shyw #ifdef OLYMPUS_C_REV_B_ERRATA_XCALL 441227649d1Shyw /* 442227649d1Shyw * Only proceed to send more xcalls if all the 443227649d1Shyw * cpus in the previous IDSR_BN_SETS were completed. 444227649d1Shyw */ 445227649d1Shyw if (curbusy) { 446227649d1Shyw busy++; 447227649d1Shyw continue; 448227649d1Shyw } 449227649d1Shyw #endif /* OLYMPUS_C_REV_B_ERRATA_XCALL */ 450227649d1Shyw 45125cf1a30Sjl139090 #if (NCPU > IDSR_BN_SETS) 45225cf1a30Sjl139090 if (shipped < ncpuids) { 45325cf1a30Sjl139090 uint64_t cpus_left; 45425cf1a30Sjl139090 uint16_t next = (uint16_t)index; 45525cf1a30Sjl139090 45625cf1a30Sjl139090 cpus_left = ~(IDSR_NACK_TO_BUSY(curnack) | curbusy) & 45725cf1a30Sjl139090 busymask; 45825cf1a30Sjl139090 45925cf1a30Sjl139090 if (cpus_left) { 46025cf1a30Sjl139090 do { 46125cf1a30Sjl139090 /* 46225cf1a30Sjl139090 * Sequence through and ship to the 46325cf1a30Sjl139090 * remainder of the CPUs in the system 46425cf1a30Sjl139090 * (e.g. other than the first 46525cf1a30Sjl139090 * (IDSR_BN_SETS)) in reverse order. 46625cf1a30Sjl139090 */ 46725cf1a30Sjl139090 lo = lowbit(cpus_left) - 1; 46825cf1a30Sjl139090 i = IDSR_BUSY_IDX(lo); 46925cf1a30Sjl139090 shipit(next, i); 47025cf1a30Sjl139090 shipped++; 47125cf1a30Sjl139090 cpuids[i] = next; 47225cf1a30Sjl139090 47325cf1a30Sjl139090 /* 47425cf1a30Sjl139090 * If we've processed all the CPUs, 47525cf1a30Sjl139090 * exit the loop now and save 47625cf1a30Sjl139090 * instructions. 47725cf1a30Sjl139090 */ 47825cf1a30Sjl139090 if (shipped == ncpuids) 47925cf1a30Sjl139090 break; 48025cf1a30Sjl139090 48125cf1a30Sjl139090 for ((index = ((int)next - 1)); 48225cf1a30Sjl139090 index >= 0; index--) 48325cf1a30Sjl139090 if (CPU_IN_SET(set, index)) { 48425cf1a30Sjl139090 next = (uint16_t)index; 48525cf1a30Sjl139090 break; 48625cf1a30Sjl139090 } 48725cf1a30Sjl139090 48825cf1a30Sjl139090 cpus_left &= ~(1ull << lo); 48925cf1a30Sjl139090 } while (cpus_left); 49025cf1a30Sjl139090 continue; 49125cf1a30Sjl139090 } 49225cf1a30Sjl139090 } 49325cf1a30Sjl139090 #endif 494227649d1Shyw #ifndef OLYMPUS_C_REV_B_ERRATA_XCALL 49525cf1a30Sjl139090 if (curbusy) { 49625cf1a30Sjl139090 busy++; 49725cf1a30Sjl139090 continue; 49825cf1a30Sjl139090 } 499227649d1Shyw #endif /* OLYMPUS_C_REV_B_ERRATA_XCALL */ 50025cf1a30Sjl139090 #ifdef SEND_MONDO_STATS 50125cf1a30Sjl139090 { 50225cf1a30Sjl139090 int n = gettick() - starttick; 50325cf1a30Sjl139090 if (n < 8192) 50425cf1a30Sjl139090 x_nack_stimes[n >> 7]++; 50525cf1a30Sjl139090 } 50625cf1a30Sjl139090 #endif 50725cf1a30Sjl139090 while (gettick() < (tick + sys_clock_mhz)) 50825cf1a30Sjl139090 ; 50925cf1a30Sjl139090 do { 51025cf1a30Sjl139090 lo = lowbit(curnack) - 1; 51125cf1a30Sjl139090 i = IDSR_NACK_IDX(lo); 51225cf1a30Sjl139090 shipit(cpuids[i], i); 51325cf1a30Sjl139090 curnack &= ~(1ull << lo); 51425cf1a30Sjl139090 } while (curnack); 51525cf1a30Sjl139090 nack++; 51625cf1a30Sjl139090 busy = 0; 51725cf1a30Sjl139090 } 51825cf1a30Sjl139090 #ifdef SEND_MONDO_STATS 51925cf1a30Sjl139090 { 52025cf1a30Sjl139090 int n = gettick() - starttick; 52125cf1a30Sjl139090 if (n < 8192) 52225cf1a30Sjl139090 x_set_stimes[n >> 7]++; 52325cf1a30Sjl139090 else 52425cf1a30Sjl139090 x_set_ltimes[(n >> 13) & 0xf]++; 52525cf1a30Sjl139090 } 52625cf1a30Sjl139090 x_set_cpus[shipped]++; 52725cf1a30Sjl139090 #endif 52825cf1a30Sjl139090 } 52925cf1a30Sjl139090 53025cf1a30Sjl139090 /* 53125cf1a30Sjl139090 * Cpu private initialization. 53225cf1a30Sjl139090 */ 53325cf1a30Sjl139090 void 53425cf1a30Sjl139090 cpu_init_private(struct cpu *cp) 53525cf1a30Sjl139090 { 536e98fafb9Sjl139090 if (!((IS_OLYMPUS_C(cpunodes[cp->cpu_id].implementation)) || 537e98fafb9Sjl139090 (IS_JUPITER(cpunodes[cp->cpu_id].implementation)))) { 538e98fafb9Sjl139090 cmn_err(CE_PANIC, "CPU%d Impl %d: Only SPARC64-VI(I) is " 539e98fafb9Sjl139090 "supported", cp->cpu_id, 540e98fafb9Sjl139090 cpunodes[cp->cpu_id].implementation); 54125cf1a30Sjl139090 } 54225cf1a30Sjl139090 54325cf1a30Sjl139090 adjust_hw_copy_limits(cpunodes[cp->cpu_id].ecache_size); 54425cf1a30Sjl139090 } 54525cf1a30Sjl139090 54625cf1a30Sjl139090 void 54725cf1a30Sjl139090 cpu_setup(void) 54825cf1a30Sjl139090 { 54925cf1a30Sjl139090 extern int at_flags; 55025cf1a30Sjl139090 extern int cpc_has_overflow_intr; 55125cf1a30Sjl139090 uint64_t cpu0_log; 55225cf1a30Sjl139090 extern uint64_t opl_cpu0_err_log; 55325cf1a30Sjl139090 55425cf1a30Sjl139090 /* 55525cf1a30Sjl139090 * Initialize Error log Scratch register for error handling. 55625cf1a30Sjl139090 */ 55725cf1a30Sjl139090 55825cf1a30Sjl139090 cpu0_log = va_to_pa(&opl_cpu0_err_log); 55925cf1a30Sjl139090 opl_error_setup(cpu0_log); 560b9a675d4Smb158278 opl_cpu0_log_setup = 1; 56125cf1a30Sjl139090 56225cf1a30Sjl139090 /* 56325cf1a30Sjl139090 * Enable MMU translating multiple page sizes for 56425cf1a30Sjl139090 * sITLB and sDTLB. 56525cf1a30Sjl139090 */ 566*c9d93b53SJames Anderson cpu_early_feature_init(); 56725cf1a30Sjl139090 56825cf1a30Sjl139090 /* 56925cf1a30Sjl139090 * Setup chip-specific trap handlers. 57025cf1a30Sjl139090 */ 57125cf1a30Sjl139090 cpu_init_trap(); 57225cf1a30Sjl139090 57325cf1a30Sjl139090 cache |= (CACHE_VAC | CACHE_PTAG | CACHE_IOCOHERENT); 57425cf1a30Sjl139090 57525cf1a30Sjl139090 at_flags = EF_SPARC_32PLUS | EF_SPARC_SUN_US1 | EF_SPARC_SUN_US3; 57625cf1a30Sjl139090 57725cf1a30Sjl139090 /* 57825cf1a30Sjl139090 * Due to the number of entries in the fully-associative tlb 57925cf1a30Sjl139090 * this may have to be tuned lower than in spitfire. 58025cf1a30Sjl139090 */ 58125cf1a30Sjl139090 pp_slots = MIN(8, MAXPP_SLOTS); 58225cf1a30Sjl139090 58325cf1a30Sjl139090 /* 58425cf1a30Sjl139090 * Block stores do not invalidate all pages of the d$, pagecopy 58525cf1a30Sjl139090 * et. al. need virtual translations with virtual coloring taken 58625cf1a30Sjl139090 * into consideration. prefetch/ldd will pollute the d$ on the 58725cf1a30Sjl139090 * load side. 58825cf1a30Sjl139090 */ 58925cf1a30Sjl139090 pp_consistent_coloring = PPAGE_STORE_VCOLORING | PPAGE_LOADS_POLLUTE; 59025cf1a30Sjl139090 59125cf1a30Sjl139090 if (use_page_coloring) { 59225cf1a30Sjl139090 do_pg_coloring = 1; 59325cf1a30Sjl139090 } 59425cf1a30Sjl139090 59525cf1a30Sjl139090 isa_list = 59625cf1a30Sjl139090 "sparcv9+vis2 sparcv9+vis sparcv9 " 59725cf1a30Sjl139090 "sparcv8plus+vis2 sparcv8plus+vis sparcv8plus " 59825cf1a30Sjl139090 "sparcv8 sparcv8-fsmuld sparcv7 sparc"; 59925cf1a30Sjl139090 6001820c2cdShyw cpu_hwcap_flags = AV_SPARC_VIS | AV_SPARC_VIS2 | 6011820c2cdShyw AV_SPARC_POPC | AV_SPARC_FMAF; 60225cf1a30Sjl139090 60325cf1a30Sjl139090 /* 60425cf1a30Sjl139090 * On SPARC64-VI, there's no hole in the virtual address space 60525cf1a30Sjl139090 */ 60625cf1a30Sjl139090 hole_start = hole_end = 0; 60725cf1a30Sjl139090 60825cf1a30Sjl139090 /* 60925cf1a30Sjl139090 * The kpm mapping window. 61025cf1a30Sjl139090 * kpm_size: 61125cf1a30Sjl139090 * The size of a single kpm range. 61225cf1a30Sjl139090 * The overall size will be: kpm_size * vac_colors. 61325cf1a30Sjl139090 * kpm_vbase: 61425cf1a30Sjl139090 * The virtual start address of the kpm range within the kernel 61525cf1a30Sjl139090 * virtual address space. kpm_vbase has to be kpm_size aligned. 61625cf1a30Sjl139090 */ 61725cf1a30Sjl139090 kpm_size = (size_t)(128ull * 1024 * 1024 * 1024 * 1024); /* 128TB */ 61825cf1a30Sjl139090 kpm_size_shift = 47; 61925cf1a30Sjl139090 kpm_vbase = (caddr_t)0x8000000000000000ull; /* 8EB */ 62025cf1a30Sjl139090 kpm_smallpages = 1; 62125cf1a30Sjl139090 62225cf1a30Sjl139090 /* 62325cf1a30Sjl139090 * The traptrace code uses either %tick or %stick for 62425cf1a30Sjl139090 * timestamping. We have %stick so we can use it. 62525cf1a30Sjl139090 */ 62625cf1a30Sjl139090 traptrace_use_stick = 1; 62725cf1a30Sjl139090 62825cf1a30Sjl139090 /* 62925cf1a30Sjl139090 * SPARC64-VI has a performance counter overflow interrupt 63025cf1a30Sjl139090 */ 63125cf1a30Sjl139090 cpc_has_overflow_intr = 1; 63225cf1a30Sjl139090 63325cf1a30Sjl139090 /* 63425cf1a30Sjl139090 * Declare that this architecture/cpu combination does not support 63525cf1a30Sjl139090 * fpRAS. 63625cf1a30Sjl139090 */ 63725cf1a30Sjl139090 fpras_implemented = 0; 63825cf1a30Sjl139090 } 63925cf1a30Sjl139090 64025cf1a30Sjl139090 /* 64125cf1a30Sjl139090 * Called by setcpudelay 64225cf1a30Sjl139090 */ 64325cf1a30Sjl139090 void 64425cf1a30Sjl139090 cpu_init_tick_freq(void) 64525cf1a30Sjl139090 { 64625cf1a30Sjl139090 /* 64725cf1a30Sjl139090 * For SPARC64-VI we want to use the system clock rate as 64825cf1a30Sjl139090 * the basis for low level timing, due to support of mixed 64925cf1a30Sjl139090 * speed CPUs and power managment. 65025cf1a30Sjl139090 */ 65125cf1a30Sjl139090 if (system_clock_freq == 0) 65225cf1a30Sjl139090 cmn_err(CE_PANIC, "setcpudelay: invalid system_clock_freq"); 65325cf1a30Sjl139090 65425cf1a30Sjl139090 sys_tick_freq = system_clock_freq; 65525cf1a30Sjl139090 } 65625cf1a30Sjl139090 65725cf1a30Sjl139090 #ifdef SEND_MONDO_STATS 65825cf1a30Sjl139090 uint32_t x_one_stimes[64]; 65925cf1a30Sjl139090 uint32_t x_one_ltimes[16]; 66025cf1a30Sjl139090 uint32_t x_set_stimes[64]; 66125cf1a30Sjl139090 uint32_t x_set_ltimes[16]; 66225cf1a30Sjl139090 uint32_t x_set_cpus[NCPU]; 66325cf1a30Sjl139090 uint32_t x_nack_stimes[64]; 66425cf1a30Sjl139090 #endif 66525cf1a30Sjl139090 66625cf1a30Sjl139090 /* 66725cf1a30Sjl139090 * Note: A version of this function is used by the debugger via the KDI, 66825cf1a30Sjl139090 * and must be kept in sync with this version. Any changes made to this 66925cf1a30Sjl139090 * function to support new chips or to accomodate errata must also be included 67025cf1a30Sjl139090 * in the KDI-specific version. See us3_kdi.c. 67125cf1a30Sjl139090 */ 67225cf1a30Sjl139090 void 67325cf1a30Sjl139090 send_one_mondo(int cpuid) 67425cf1a30Sjl139090 { 67525cf1a30Sjl139090 int busy, nack; 67625cf1a30Sjl139090 uint64_t idsr, starttick, endtick, tick, lasttick; 67725cf1a30Sjl139090 uint64_t busymask; 67825cf1a30Sjl139090 67925cf1a30Sjl139090 CPU_STATS_ADDQ(CPU, sys, xcalls, 1); 68025cf1a30Sjl139090 starttick = lasttick = gettick(); 68125cf1a30Sjl139090 shipit(cpuid, 0); 68225cf1a30Sjl139090 endtick = starttick + xc_tick_limit; 68325cf1a30Sjl139090 busy = nack = 0; 68425cf1a30Sjl139090 busymask = IDSR_BUSY; 68525cf1a30Sjl139090 for (;;) { 68625cf1a30Sjl139090 idsr = getidsr(); 68725cf1a30Sjl139090 if (idsr == 0) 68825cf1a30Sjl139090 break; 68925cf1a30Sjl139090 69025cf1a30Sjl139090 tick = gettick(); 69125cf1a30Sjl139090 /* 69225cf1a30Sjl139090 * If there is a big jump between the current tick 69325cf1a30Sjl139090 * count and lasttick, we have probably hit a break 69425cf1a30Sjl139090 * point. Adjust endtick accordingly to avoid panic. 69525cf1a30Sjl139090 */ 69625cf1a30Sjl139090 if (tick > (lasttick + xc_tick_jump_limit)) 69725cf1a30Sjl139090 endtick += (tick - lasttick); 69825cf1a30Sjl139090 lasttick = tick; 69925cf1a30Sjl139090 if (tick > endtick) { 70025cf1a30Sjl139090 if (panic_quiesce) 70125cf1a30Sjl139090 return; 702e98fafb9Sjl139090 cmn_err(CE_PANIC, "send mondo timeout (target 0x%x) " 703e98fafb9Sjl139090 "[%d NACK %d BUSY]", cpuid, nack, busy); 70425cf1a30Sjl139090 } 70525cf1a30Sjl139090 70625cf1a30Sjl139090 if (idsr & busymask) { 70725cf1a30Sjl139090 busy++; 70825cf1a30Sjl139090 continue; 70925cf1a30Sjl139090 } 71025cf1a30Sjl139090 drv_usecwait(1); 71125cf1a30Sjl139090 shipit(cpuid, 0); 71225cf1a30Sjl139090 nack++; 71325cf1a30Sjl139090 busy = 0; 71425cf1a30Sjl139090 } 71525cf1a30Sjl139090 #ifdef SEND_MONDO_STATS 71625cf1a30Sjl139090 { 71725cf1a30Sjl139090 int n = gettick() - starttick; 71825cf1a30Sjl139090 if (n < 8192) 71925cf1a30Sjl139090 x_one_stimes[n >> 7]++; 72025cf1a30Sjl139090 else 72125cf1a30Sjl139090 x_one_ltimes[(n >> 13) & 0xf]++; 72225cf1a30Sjl139090 } 72325cf1a30Sjl139090 #endif 72425cf1a30Sjl139090 } 72525cf1a30Sjl139090 72625cf1a30Sjl139090 /* 72725cf1a30Sjl139090 * init_mmu_page_sizes is set to one after the bootup time initialization 72825cf1a30Sjl139090 * via mmu_init_mmu_page_sizes, to indicate that mmu_page_sizes has a 72925cf1a30Sjl139090 * valid value. 73025cf1a30Sjl139090 * 73125cf1a30Sjl139090 * mmu_disable_ism_large_pages and mmu_disable_large_pages are the mmu-specific 73225cf1a30Sjl139090 * versions of disable_ism_large_pages and disable_large_pages, and feed back 73325cf1a30Sjl139090 * into those two hat variables at hat initialization time. 73425cf1a30Sjl139090 * 73525cf1a30Sjl139090 */ 73625cf1a30Sjl139090 int init_mmu_page_sizes = 0; 737ec25b48fSsusans 738ec25b48fSsusans static uint_t mmu_disable_large_pages = 0; 739ec25b48fSsusans static uint_t mmu_disable_ism_large_pages = ((1 << TTE64K) | 740e12a8a13Ssusans (1 << TTE512K) | (1 << TTE32M) | (1 << TTE256M)); 741ec25b48fSsusans static uint_t mmu_disable_auto_data_large_pages = ((1 << TTE64K) | 742e12a8a13Ssusans (1 << TTE512K) | (1 << TTE32M) | (1 << TTE256M)); 743ec25b48fSsusans static uint_t mmu_disable_auto_text_large_pages = ((1 << TTE64K) | 744ec25b48fSsusans (1 << TTE512K)); 74525cf1a30Sjl139090 74625cf1a30Sjl139090 /* 74725cf1a30Sjl139090 * Re-initialize mmu_page_sizes and friends, for SPARC64-VI mmu support. 74825cf1a30Sjl139090 * Called during very early bootup from check_cpus_set(). 74925cf1a30Sjl139090 * Can be called to verify that mmu_page_sizes are set up correctly. 75025cf1a30Sjl139090 * 75125cf1a30Sjl139090 * Set Olympus defaults. We do not use the function parameter. 75225cf1a30Sjl139090 */ 75325cf1a30Sjl139090 /*ARGSUSED*/ 75420064263SSean McEnroe void 75520064263SSean McEnroe mmu_init_scd(sf_scd_t *scdp) 75620064263SSean McEnroe { 75720064263SSean McEnroe scdp->scd_sfmmup->sfmmu_cext = shctx_cext; 75820064263SSean McEnroe } 75920064263SSean McEnroe 76020064263SSean McEnroe /*ARGSUSED*/ 76125cf1a30Sjl139090 int 76225cf1a30Sjl139090 mmu_init_mmu_page_sizes(int32_t not_used) 76325cf1a30Sjl139090 { 76425cf1a30Sjl139090 if (!init_mmu_page_sizes) { 76525cf1a30Sjl139090 mmu_page_sizes = MMU_PAGE_SIZES; 76625cf1a30Sjl139090 mmu_hashcnt = MAX_HASHCNT; 767e12a8a13Ssusans mmu_ism_pagesize = DEFAULT_ISM_PAGESIZE; 76825cf1a30Sjl139090 mmu_exported_pagesize_mask = (1 << TTE8K) | 76925cf1a30Sjl139090 (1 << TTE64K) | (1 << TTE512K) | (1 << TTE4M) | 77025cf1a30Sjl139090 (1 << TTE32M) | (1 << TTE256M); 77125cf1a30Sjl139090 init_mmu_page_sizes = 1; 77225cf1a30Sjl139090 return (0); 77325cf1a30Sjl139090 } 77425cf1a30Sjl139090 return (1); 77525cf1a30Sjl139090 } 77625cf1a30Sjl139090 77725cf1a30Sjl139090 /* SPARC64-VI worst case DTLB parameters */ 77825cf1a30Sjl139090 #ifndef LOCKED_DTLB_ENTRIES 77925cf1a30Sjl139090 #define LOCKED_DTLB_ENTRIES 5 /* 2 user TSBs, 2 nucleus, + OBP */ 78025cf1a30Sjl139090 #endif 78125cf1a30Sjl139090 #define TOTAL_DTLB_ENTRIES 32 78225cf1a30Sjl139090 #define AVAIL_32M_ENTRIES 0 78325cf1a30Sjl139090 #define AVAIL_256M_ENTRIES 0 78425cf1a30Sjl139090 #define AVAIL_DTLB_ENTRIES (TOTAL_DTLB_ENTRIES - LOCKED_DTLB_ENTRIES) 78525cf1a30Sjl139090 static uint64_t ttecnt_threshold[MMU_PAGE_SIZES] = { 78625cf1a30Sjl139090 AVAIL_DTLB_ENTRIES, AVAIL_DTLB_ENTRIES, 78725cf1a30Sjl139090 AVAIL_DTLB_ENTRIES, AVAIL_DTLB_ENTRIES, 78825cf1a30Sjl139090 AVAIL_DTLB_ENTRIES, AVAIL_DTLB_ENTRIES}; 78925cf1a30Sjl139090 79025cf1a30Sjl139090 /* 79125cf1a30Sjl139090 * The function returns the mmu-specific values for the 792582cbfcbSjimand * hat's disable_large_pages, disable_ism_large_pages, and 793ec25b48fSsusans * disable_auto_data_large_pages and 794ec25b48fSsusans * disable_text_data_large_pages variables. 79525cf1a30Sjl139090 */ 796ec25b48fSsusans uint_t 79725cf1a30Sjl139090 mmu_large_pages_disabled(uint_t flag) 79825cf1a30Sjl139090 { 799ec25b48fSsusans uint_t pages_disable = 0; 800ec25b48fSsusans extern int use_text_pgsz64K; 801ec25b48fSsusans extern int use_text_pgsz512K; 80225cf1a30Sjl139090 80325cf1a30Sjl139090 if (flag == HAT_LOAD) { 80425cf1a30Sjl139090 pages_disable = mmu_disable_large_pages; 80525cf1a30Sjl139090 } else if (flag == HAT_LOAD_SHARE) { 80625cf1a30Sjl139090 pages_disable = mmu_disable_ism_large_pages; 807ec25b48fSsusans } else if (flag == HAT_AUTO_DATA) { 808ec25b48fSsusans pages_disable = mmu_disable_auto_data_large_pages; 809ec25b48fSsusans } else if (flag == HAT_AUTO_TEXT) { 810ec25b48fSsusans pages_disable = mmu_disable_auto_text_large_pages; 811ec25b48fSsusans if (use_text_pgsz512K) { 812ec25b48fSsusans pages_disable &= ~(1 << TTE512K); 813ec25b48fSsusans } 814ec25b48fSsusans if (use_text_pgsz64K) { 815ec25b48fSsusans pages_disable &= ~(1 << TTE64K); 816ec25b48fSsusans } 81725cf1a30Sjl139090 } 81825cf1a30Sjl139090 return (pages_disable); 81925cf1a30Sjl139090 } 82025cf1a30Sjl139090 82125cf1a30Sjl139090 /* 82225cf1a30Sjl139090 * mmu_init_large_pages is called with the desired ism_pagesize parameter. 8231426d65aSsm142603 * It may be called from set_platform_defaults, if some value other than 4M 82425cf1a30Sjl139090 * is desired. mmu_ism_pagesize is the tunable. If it has a bad value, 82525cf1a30Sjl139090 * then only warn, since it would be bad form to panic due to a user typo. 82625cf1a30Sjl139090 * 82725cf1a30Sjl139090 * The function re-initializes the mmu_disable_ism_large_pages variable. 82825cf1a30Sjl139090 */ 82925cf1a30Sjl139090 void 83025cf1a30Sjl139090 mmu_init_large_pages(size_t ism_pagesize) 83125cf1a30Sjl139090 { 8321426d65aSsm142603 83325cf1a30Sjl139090 switch (ism_pagesize) { 83425cf1a30Sjl139090 case MMU_PAGESIZE4M: 83525cf1a30Sjl139090 mmu_disable_ism_large_pages = ((1 << TTE64K) | 83625cf1a30Sjl139090 (1 << TTE512K) | (1 << TTE32M) | (1 << TTE256M)); 837ec25b48fSsusans mmu_disable_auto_data_large_pages = ((1 << TTE64K) | 838582cbfcbSjimand (1 << TTE512K) | (1 << TTE32M) | (1 << TTE256M)); 8391426d65aSsm142603 shctx_cext = TAGACCEXT_MKSZPAIR(TTE4M, TTE8K); 84025cf1a30Sjl139090 break; 84125cf1a30Sjl139090 case MMU_PAGESIZE32M: 84225cf1a30Sjl139090 mmu_disable_ism_large_pages = ((1 << TTE64K) | 84325cf1a30Sjl139090 (1 << TTE512K) | (1 << TTE256M)); 844ec25b48fSsusans mmu_disable_auto_data_large_pages = ((1 << TTE64K) | 845582cbfcbSjimand (1 << TTE512K) | (1 << TTE4M) | (1 << TTE256M)); 846ec25b48fSsusans adjust_data_maxlpsize(ism_pagesize); 8471426d65aSsm142603 shctx_cext = TAGACCEXT_MKSZPAIR(TTE32M, TTE8K); 84825cf1a30Sjl139090 break; 84925cf1a30Sjl139090 case MMU_PAGESIZE256M: 85025cf1a30Sjl139090 mmu_disable_ism_large_pages = ((1 << TTE64K) | 85125cf1a30Sjl139090 (1 << TTE512K) | (1 << TTE32M)); 852ec25b48fSsusans mmu_disable_auto_data_large_pages = ((1 << TTE64K) | 853582cbfcbSjimand (1 << TTE512K) | (1 << TTE4M) | (1 << TTE32M)); 854ec25b48fSsusans adjust_data_maxlpsize(ism_pagesize); 8551426d65aSsm142603 shctx_cext = TAGACCEXT_MKSZPAIR(TTE256M, TTE8K); 85625cf1a30Sjl139090 break; 85725cf1a30Sjl139090 default: 85825cf1a30Sjl139090 cmn_err(CE_WARN, "Unrecognized mmu_ism_pagesize value 0x%lx", 85925cf1a30Sjl139090 ism_pagesize); 86025cf1a30Sjl139090 break; 86125cf1a30Sjl139090 } 86225cf1a30Sjl139090 } 86325cf1a30Sjl139090 86425cf1a30Sjl139090 /* 86525cf1a30Sjl139090 * Function to reprogram the TLBs when page sizes used 86625cf1a30Sjl139090 * by a process change significantly. 86725cf1a30Sjl139090 */ 8681426d65aSsm142603 static void 86922a594afSjimand mmu_setup_page_sizes(struct hat *hat, uint64_t *ttecnt, uint8_t *tmp_pgsz) 87025cf1a30Sjl139090 { 87125cf1a30Sjl139090 uint8_t pgsz0, pgsz1; 87225cf1a30Sjl139090 87325cf1a30Sjl139090 /* 87425cf1a30Sjl139090 * Don't program 2nd dtlb for kernel and ism hat 87525cf1a30Sjl139090 */ 87622a594afSjimand ASSERT(hat->sfmmu_ismhat == NULL); 87722a594afSjimand ASSERT(hat != ksfmmup); 87825cf1a30Sjl139090 87925cf1a30Sjl139090 /* 88025cf1a30Sjl139090 * hat->sfmmu_pgsz[] is an array whose elements 88125cf1a30Sjl139090 * contain a sorted order of page sizes. Element 88225cf1a30Sjl139090 * 0 is the most commonly used page size, followed 88325cf1a30Sjl139090 * by element 1, and so on. 88425cf1a30Sjl139090 * 88525cf1a30Sjl139090 * ttecnt[] is an array of per-page-size page counts 88625cf1a30Sjl139090 * mapped into the process. 88725cf1a30Sjl139090 * 88825cf1a30Sjl139090 * If the HAT's choice for page sizes is unsuitable, 88925cf1a30Sjl139090 * we can override it here. The new values written 89025cf1a30Sjl139090 * to the array will be handed back to us later to 89125cf1a30Sjl139090 * do the actual programming of the TLB hardware. 89225cf1a30Sjl139090 * 89325cf1a30Sjl139090 */ 89422a594afSjimand pgsz0 = (uint8_t)MIN(tmp_pgsz[0], tmp_pgsz[1]); 89522a594afSjimand pgsz1 = (uint8_t)MAX(tmp_pgsz[0], tmp_pgsz[1]); 89625cf1a30Sjl139090 89725cf1a30Sjl139090 /* 89825cf1a30Sjl139090 * This implements PAGESIZE programming of the sTLB 89925cf1a30Sjl139090 * if large TTE counts don't exceed the thresholds. 90025cf1a30Sjl139090 */ 90125cf1a30Sjl139090 if (ttecnt[pgsz0] < ttecnt_threshold[pgsz0]) 90225cf1a30Sjl139090 pgsz0 = page_szc(MMU_PAGESIZE); 90325cf1a30Sjl139090 if (ttecnt[pgsz1] < ttecnt_threshold[pgsz1]) 90425cf1a30Sjl139090 pgsz1 = page_szc(MMU_PAGESIZE); 90522a594afSjimand tmp_pgsz[0] = pgsz0; 90622a594afSjimand tmp_pgsz[1] = pgsz1; 90725cf1a30Sjl139090 /* otherwise, accept what the HAT chose for us */ 90825cf1a30Sjl139090 } 90925cf1a30Sjl139090 91025cf1a30Sjl139090 /* 91125cf1a30Sjl139090 * The HAT calls this function when an MMU context is allocated so that we 91225cf1a30Sjl139090 * can reprogram the large TLBs appropriately for the new process using 91325cf1a30Sjl139090 * the context. 91425cf1a30Sjl139090 * 91525cf1a30Sjl139090 * The caller must hold the HAT lock. 91625cf1a30Sjl139090 */ 91725cf1a30Sjl139090 void 91825cf1a30Sjl139090 mmu_set_ctx_page_sizes(struct hat *hat) 91925cf1a30Sjl139090 { 92025cf1a30Sjl139090 uint8_t pgsz0, pgsz1; 92125cf1a30Sjl139090 uint8_t new_cext; 92225cf1a30Sjl139090 92325cf1a30Sjl139090 ASSERT(sfmmu_hat_lock_held(hat)); 92425cf1a30Sjl139090 /* 92525cf1a30Sjl139090 * Don't program 2nd dtlb for kernel and ism hat 92625cf1a30Sjl139090 */ 92725cf1a30Sjl139090 if (hat->sfmmu_ismhat || hat == ksfmmup) 92825cf1a30Sjl139090 return; 92925cf1a30Sjl139090 93025cf1a30Sjl139090 /* 93125cf1a30Sjl139090 * If supported, reprogram the TLBs to a larger pagesize. 93225cf1a30Sjl139090 */ 9331426d65aSsm142603 if (hat->sfmmu_scdp != NULL) { 93420064263SSean McEnroe new_cext = hat->sfmmu_scdp->scd_sfmmup->sfmmu_cext; 93520064263SSean McEnroe ASSERT(new_cext == shctx_cext); 9361426d65aSsm142603 } else { 93725cf1a30Sjl139090 pgsz0 = hat->sfmmu_pgsz[0]; 93825cf1a30Sjl139090 pgsz1 = hat->sfmmu_pgsz[1]; 93925cf1a30Sjl139090 ASSERT(pgsz0 < mmu_page_sizes); 94025cf1a30Sjl139090 ASSERT(pgsz1 < mmu_page_sizes); 94125cf1a30Sjl139090 new_cext = TAGACCEXT_MKSZPAIR(pgsz1, pgsz0); 9421426d65aSsm142603 } 94325cf1a30Sjl139090 if (hat->sfmmu_cext != new_cext) { 9441e2e7a75Shuah #ifdef DEBUG 9451e2e7a75Shuah int i; 9461e2e7a75Shuah /* 9471e2e7a75Shuah * assert cnum should be invalid, this is because pagesize 9481e2e7a75Shuah * can only be changed after a proc's ctxs are invalidated. 9491e2e7a75Shuah */ 9501e2e7a75Shuah for (i = 0; i < max_mmu_ctxdoms; i++) { 9511e2e7a75Shuah ASSERT(hat->sfmmu_ctxs[i].cnum == INVALID_CONTEXT); 9521e2e7a75Shuah } 9531e2e7a75Shuah #endif /* DEBUG */ 95425cf1a30Sjl139090 hat->sfmmu_cext = new_cext; 95525cf1a30Sjl139090 } 95625cf1a30Sjl139090 /* 95725cf1a30Sjl139090 * sfmmu_setctx_sec() will take care of the 95825cf1a30Sjl139090 * rest of the dirty work for us. 95925cf1a30Sjl139090 */ 96025cf1a30Sjl139090 } 96125cf1a30Sjl139090 96225cf1a30Sjl139090 /* 96322a594afSjimand * This function assumes that there are either four or six supported page 96422a594afSjimand * sizes and at most two programmable TLBs, so we need to decide which 96522a594afSjimand * page sizes are most important and then adjust the TLB page sizes 96622a594afSjimand * accordingly (if supported). 96722a594afSjimand * 96822a594afSjimand * If these assumptions change, this function will need to be 96922a594afSjimand * updated to support whatever the new limits are. 97022a594afSjimand */ 97122a594afSjimand void 97222a594afSjimand mmu_check_page_sizes(sfmmu_t *sfmmup, uint64_t *ttecnt) 97322a594afSjimand { 97422a594afSjimand uint64_t sortcnt[MMU_PAGE_SIZES]; 97522a594afSjimand uint8_t tmp_pgsz[MMU_PAGE_SIZES]; 97622a594afSjimand uint8_t i, j, max; 97722a594afSjimand uint16_t oldval, newval; 97822a594afSjimand 97922a594afSjimand /* 98022a594afSjimand * We only consider reprogramming the TLBs if one or more of 98122a594afSjimand * the two most used page sizes changes and we're using 98222a594afSjimand * large pages in this process. 98322a594afSjimand */ 98405d3dc4bSpaulsan if (SFMMU_LGPGS_INUSE(sfmmup)) { 98522a594afSjimand /* Sort page sizes. */ 98622a594afSjimand for (i = 0; i < mmu_page_sizes; i++) { 98722a594afSjimand sortcnt[i] = ttecnt[i]; 98822a594afSjimand } 98922a594afSjimand for (j = 0; j < mmu_page_sizes; j++) { 99022a594afSjimand for (i = mmu_page_sizes - 1, max = 0; i > 0; i--) { 99122a594afSjimand if (sortcnt[i] > sortcnt[max]) 99222a594afSjimand max = i; 99322a594afSjimand } 99422a594afSjimand tmp_pgsz[j] = max; 99522a594afSjimand sortcnt[max] = 0; 99622a594afSjimand } 99722a594afSjimand 99822a594afSjimand oldval = sfmmup->sfmmu_pgsz[0] << 8 | sfmmup->sfmmu_pgsz[1]; 99922a594afSjimand 100022a594afSjimand mmu_setup_page_sizes(sfmmup, ttecnt, tmp_pgsz); 100122a594afSjimand 100222a594afSjimand /* Check 2 largest values after the sort. */ 100322a594afSjimand newval = tmp_pgsz[0] << 8 | tmp_pgsz[1]; 100422a594afSjimand if (newval != oldval) { 100522a594afSjimand sfmmu_reprog_pgsz_arr(sfmmup, tmp_pgsz); 100622a594afSjimand } 100722a594afSjimand } 100822a594afSjimand } 100922a594afSjimand 101022a594afSjimand /* 101125cf1a30Sjl139090 * Return processor specific async error structure 101225cf1a30Sjl139090 * size used. 101325cf1a30Sjl139090 */ 101425cf1a30Sjl139090 int 101525cf1a30Sjl139090 cpu_aflt_size(void) 101625cf1a30Sjl139090 { 101725cf1a30Sjl139090 return (sizeof (opl_async_flt_t)); 101825cf1a30Sjl139090 } 101925cf1a30Sjl139090 102025cf1a30Sjl139090 /* 102125cf1a30Sjl139090 * The cpu_sync_log_err() function is called via the [uc]e_drain() function to 102225cf1a30Sjl139090 * post-process CPU events that are dequeued. As such, it can be invoked 102325cf1a30Sjl139090 * from softint context, from AST processing in the trap() flow, or from the 102425cf1a30Sjl139090 * panic flow. We decode the CPU-specific data, and take appropriate actions. 102525cf1a30Sjl139090 * Historically this entry point was used to log the actual cmn_err(9F) text; 102625cf1a30Sjl139090 * now with FMA it is used to prepare 'flt' to be converted into an ereport. 102725cf1a30Sjl139090 * With FMA this function now also returns a flag which indicates to the 102825cf1a30Sjl139090 * caller whether the ereport should be posted (1) or suppressed (0). 102925cf1a30Sjl139090 */ 103025cf1a30Sjl139090 /*ARGSUSED*/ 103125cf1a30Sjl139090 static int 103225cf1a30Sjl139090 cpu_sync_log_err(void *flt) 103325cf1a30Sjl139090 { 103425cf1a30Sjl139090 opl_async_flt_t *opl_flt = (opl_async_flt_t *)flt; 103525cf1a30Sjl139090 struct async_flt *aflt = (struct async_flt *)flt; 103625cf1a30Sjl139090 103725cf1a30Sjl139090 /* 103825cf1a30Sjl139090 * No extra processing of urgent error events. 103925cf1a30Sjl139090 * Always generate ereports for these events. 104025cf1a30Sjl139090 */ 104125cf1a30Sjl139090 if (aflt->flt_status == OPL_ECC_URGENT_TRAP) 104225cf1a30Sjl139090 return (1); 104325cf1a30Sjl139090 104425cf1a30Sjl139090 /* 104525cf1a30Sjl139090 * Additional processing for synchronous errors. 104625cf1a30Sjl139090 */ 104725cf1a30Sjl139090 switch (opl_flt->flt_type) { 104825cf1a30Sjl139090 case OPL_CPU_INV_SFSR: 104925cf1a30Sjl139090 return (1); 105025cf1a30Sjl139090 105125cf1a30Sjl139090 case OPL_CPU_SYNC_UE: 105225cf1a30Sjl139090 /* 105325cf1a30Sjl139090 * The validity: SFSR_MK_UE bit has been checked 105425cf1a30Sjl139090 * in opl_cpu_sync_error() 105525cf1a30Sjl139090 * No more check is required. 105625cf1a30Sjl139090 * 105725cf1a30Sjl139090 * opl_flt->flt_eid_mod and flt_eid_sid have been set by H/W, 105825cf1a30Sjl139090 * and they have been retrieved in cpu_queue_events() 105925cf1a30Sjl139090 */ 106025cf1a30Sjl139090 106125cf1a30Sjl139090 if (opl_flt->flt_eid_mod == OPL_ERRID_MEM) { 106225cf1a30Sjl139090 ASSERT(aflt->flt_in_memory); 106325cf1a30Sjl139090 /* 106425cf1a30Sjl139090 * We want to skip logging only if ALL the following 106525cf1a30Sjl139090 * conditions are true: 106625cf1a30Sjl139090 * 106725cf1a30Sjl139090 * 1. We are not panicing already. 106825cf1a30Sjl139090 * 2. The error is a memory error. 106925cf1a30Sjl139090 * 3. There is only one error. 107025cf1a30Sjl139090 * 4. The error is on a retired page. 107125cf1a30Sjl139090 * 5. The error occurred under on_trap 107225cf1a30Sjl139090 * protection AFLT_PROT_EC 107325cf1a30Sjl139090 */ 107425cf1a30Sjl139090 if (!panicstr && aflt->flt_prot == AFLT_PROT_EC && 107525cf1a30Sjl139090 page_retire_check(aflt->flt_addr, NULL) == 0) { 107625cf1a30Sjl139090 /* 107725cf1a30Sjl139090 * Do not log an error from 107825cf1a30Sjl139090 * the retired page 107925cf1a30Sjl139090 */ 108025cf1a30Sjl139090 softcall(ecc_page_zero, (void *)aflt->flt_addr); 108125cf1a30Sjl139090 return (0); 108225cf1a30Sjl139090 } 108325cf1a30Sjl139090 if (!panicstr) 108425cf1a30Sjl139090 cpu_page_retire(opl_flt); 108525cf1a30Sjl139090 } 108625cf1a30Sjl139090 return (1); 108725cf1a30Sjl139090 108825cf1a30Sjl139090 case OPL_CPU_SYNC_OTHERS: 108925cf1a30Sjl139090 /* 109025cf1a30Sjl139090 * For the following error cases, the processor HW does 109125cf1a30Sjl139090 * not set the flt_eid_mod/flt_eid_sid. Instead, SW will attempt 109225cf1a30Sjl139090 * to assign appropriate values here to reflect what we 109325cf1a30Sjl139090 * think is the most likely cause of the problem w.r.t to 109425cf1a30Sjl139090 * the particular error event. For Buserr and timeout 109525cf1a30Sjl139090 * error event, we will assign OPL_ERRID_CHANNEL as the 109625cf1a30Sjl139090 * most likely reason. For TLB parity or multiple hit 109725cf1a30Sjl139090 * error events, we will assign the reason as 109825cf1a30Sjl139090 * OPL_ERRID_CPU (cpu related problem) and set the 109925cf1a30Sjl139090 * flt_eid_sid to point to the cpuid. 110025cf1a30Sjl139090 */ 110125cf1a30Sjl139090 110225cf1a30Sjl139090 if (opl_flt->flt_bit & (SFSR_BERR|SFSR_TO)) { 110325cf1a30Sjl139090 /* 110425cf1a30Sjl139090 * flt_eid_sid will not be used for this case. 110525cf1a30Sjl139090 */ 110625cf1a30Sjl139090 opl_flt->flt_eid_mod = OPL_ERRID_CHANNEL; 110725cf1a30Sjl139090 } 110825cf1a30Sjl139090 if (opl_flt->flt_bit & (SFSR_TLB_MUL|SFSR_TLB_PRT)) { 110925cf1a30Sjl139090 opl_flt->flt_eid_mod = OPL_ERRID_CPU; 111025cf1a30Sjl139090 opl_flt->flt_eid_sid = aflt->flt_inst; 111125cf1a30Sjl139090 } 111225cf1a30Sjl139090 111325cf1a30Sjl139090 /* 111425cf1a30Sjl139090 * In case of no effective error bit 111525cf1a30Sjl139090 */ 111625cf1a30Sjl139090 if ((opl_flt->flt_bit & SFSR_ERRS) == 0) { 111725cf1a30Sjl139090 opl_flt->flt_eid_mod = OPL_ERRID_CPU; 111825cf1a30Sjl139090 opl_flt->flt_eid_sid = aflt->flt_inst; 111925cf1a30Sjl139090 } 112025cf1a30Sjl139090 break; 112125cf1a30Sjl139090 112225cf1a30Sjl139090 default: 112325cf1a30Sjl139090 return (1); 112425cf1a30Sjl139090 } 112525cf1a30Sjl139090 return (1); 112625cf1a30Sjl139090 } 112725cf1a30Sjl139090 112825cf1a30Sjl139090 /* 112925cf1a30Sjl139090 * Retire the bad page that may contain the flushed error. 113025cf1a30Sjl139090 */ 113125cf1a30Sjl139090 void 113225cf1a30Sjl139090 cpu_page_retire(opl_async_flt_t *opl_flt) 113325cf1a30Sjl139090 { 113425cf1a30Sjl139090 struct async_flt *aflt = (struct async_flt *)opl_flt; 113525cf1a30Sjl139090 (void) page_retire(aflt->flt_addr, PR_UE); 113625cf1a30Sjl139090 } 113725cf1a30Sjl139090 113825cf1a30Sjl139090 /* 113925cf1a30Sjl139090 * Invoked by error_init() early in startup and therefore before 114025cf1a30Sjl139090 * startup_errorq() is called to drain any error Q - 114125cf1a30Sjl139090 * 114225cf1a30Sjl139090 * startup() 114325cf1a30Sjl139090 * startup_end() 114425cf1a30Sjl139090 * error_init() 114525cf1a30Sjl139090 * cpu_error_init() 114625cf1a30Sjl139090 * errorq_init() 114725cf1a30Sjl139090 * errorq_drain() 114825cf1a30Sjl139090 * start_other_cpus() 114925cf1a30Sjl139090 * 115025cf1a30Sjl139090 * The purpose of this routine is to create error-related taskqs. Taskqs 115125cf1a30Sjl139090 * are used for this purpose because cpu_lock can't be grabbed from interrupt 115225cf1a30Sjl139090 * context. 115325cf1a30Sjl139090 * 115425cf1a30Sjl139090 */ 115525cf1a30Sjl139090 /*ARGSUSED*/ 115625cf1a30Sjl139090 void 115725cf1a30Sjl139090 cpu_error_init(int items) 115825cf1a30Sjl139090 { 115925cf1a30Sjl139090 opl_err_log = (opl_errlog_t *) 116025cf1a30Sjl139090 kmem_alloc(ERRLOG_ALLOC_SZ, KM_SLEEP); 116125cf1a30Sjl139090 if ((uint64_t)opl_err_log & MMU_PAGEOFFSET) 116225cf1a30Sjl139090 cmn_err(CE_PANIC, "The base address of the error log " 116325cf1a30Sjl139090 "is not page aligned"); 116425cf1a30Sjl139090 } 116525cf1a30Sjl139090 116625cf1a30Sjl139090 /* 116725cf1a30Sjl139090 * We route all errors through a single switch statement. 116825cf1a30Sjl139090 */ 116925cf1a30Sjl139090 void 117025cf1a30Sjl139090 cpu_ue_log_err(struct async_flt *aflt) 117125cf1a30Sjl139090 { 117225cf1a30Sjl139090 switch (aflt->flt_class) { 117325cf1a30Sjl139090 case CPU_FAULT: 117425cf1a30Sjl139090 if (cpu_sync_log_err(aflt)) 117525cf1a30Sjl139090 cpu_ereport_post(aflt); 117625cf1a30Sjl139090 break; 117725cf1a30Sjl139090 117825cf1a30Sjl139090 case BUS_FAULT: 117925cf1a30Sjl139090 bus_async_log_err(aflt); 118025cf1a30Sjl139090 break; 118125cf1a30Sjl139090 118225cf1a30Sjl139090 default: 118325cf1a30Sjl139090 cmn_err(CE_WARN, "discarding async error %p with invalid " 118425cf1a30Sjl139090 "fault class (0x%x)", (void *)aflt, aflt->flt_class); 118525cf1a30Sjl139090 return; 118625cf1a30Sjl139090 } 118725cf1a30Sjl139090 } 118825cf1a30Sjl139090 118925cf1a30Sjl139090 /* 119025cf1a30Sjl139090 * Routine for panic hook callback from panic_idle(). 119125cf1a30Sjl139090 * 119225cf1a30Sjl139090 * Nothing to do here. 119325cf1a30Sjl139090 */ 119425cf1a30Sjl139090 void 119525cf1a30Sjl139090 cpu_async_panic_callb(void) 119625cf1a30Sjl139090 { 119725cf1a30Sjl139090 } 119825cf1a30Sjl139090 119925cf1a30Sjl139090 /* 120025cf1a30Sjl139090 * Routine to return a string identifying the physical name 120125cf1a30Sjl139090 * associated with a memory/cache error. 120225cf1a30Sjl139090 */ 120325cf1a30Sjl139090 /*ARGSUSED*/ 120425cf1a30Sjl139090 int 120525cf1a30Sjl139090 cpu_get_mem_unum(int synd_status, ushort_t flt_synd, uint64_t flt_stat, 120625cf1a30Sjl139090 uint64_t flt_addr, int flt_bus_id, int flt_in_memory, 120725cf1a30Sjl139090 ushort_t flt_status, char *buf, int buflen, int *lenp) 120825cf1a30Sjl139090 { 120925cf1a30Sjl139090 int synd_code; 121025cf1a30Sjl139090 int ret; 121125cf1a30Sjl139090 121225cf1a30Sjl139090 /* 121325cf1a30Sjl139090 * An AFSR of -1 defaults to a memory syndrome. 121425cf1a30Sjl139090 */ 121525cf1a30Sjl139090 synd_code = (int)flt_synd; 121625cf1a30Sjl139090 121725cf1a30Sjl139090 if (&plat_get_mem_unum) { 121825cf1a30Sjl139090 if ((ret = plat_get_mem_unum(synd_code, flt_addr, flt_bus_id, 121925cf1a30Sjl139090 flt_in_memory, flt_status, buf, buflen, lenp)) != 0) { 122025cf1a30Sjl139090 buf[0] = '\0'; 122125cf1a30Sjl139090 *lenp = 0; 122225cf1a30Sjl139090 } 122325cf1a30Sjl139090 return (ret); 122425cf1a30Sjl139090 } 122525cf1a30Sjl139090 buf[0] = '\0'; 122625cf1a30Sjl139090 *lenp = 0; 122725cf1a30Sjl139090 return (ENOTSUP); 122825cf1a30Sjl139090 } 122925cf1a30Sjl139090 123025cf1a30Sjl139090 /* 123125cf1a30Sjl139090 * Wrapper for cpu_get_mem_unum() routine that takes an 123225cf1a30Sjl139090 * async_flt struct rather than explicit arguments. 123325cf1a30Sjl139090 */ 123425cf1a30Sjl139090 int 123525cf1a30Sjl139090 cpu_get_mem_unum_aflt(int synd_status, struct async_flt *aflt, 123625cf1a30Sjl139090 char *buf, int buflen, int *lenp) 123725cf1a30Sjl139090 { 123825cf1a30Sjl139090 /* 123925cf1a30Sjl139090 * We always pass -1 so that cpu_get_mem_unum will interpret this as a 124025cf1a30Sjl139090 * memory error. 124125cf1a30Sjl139090 */ 124225cf1a30Sjl139090 return (cpu_get_mem_unum(synd_status, aflt->flt_synd, 124325cf1a30Sjl139090 (uint64_t)-1, 124425cf1a30Sjl139090 aflt->flt_addr, aflt->flt_bus_id, aflt->flt_in_memory, 124525cf1a30Sjl139090 aflt->flt_status, buf, buflen, lenp)); 124625cf1a30Sjl139090 } 124725cf1a30Sjl139090 124825cf1a30Sjl139090 /* 124925cf1a30Sjl139090 * This routine is a more generic interface to cpu_get_mem_unum() 125025cf1a30Sjl139090 * that may be used by other modules (e.g. mm). 125125cf1a30Sjl139090 */ 125225cf1a30Sjl139090 /*ARGSUSED*/ 125325cf1a30Sjl139090 int 125425cf1a30Sjl139090 cpu_get_mem_name(uint64_t synd, uint64_t *afsr, uint64_t afar, 125525cf1a30Sjl139090 char *buf, int buflen, int *lenp) 125625cf1a30Sjl139090 { 125725cf1a30Sjl139090 int synd_status, flt_in_memory, ret; 125825cf1a30Sjl139090 ushort_t flt_status = 0; 125925cf1a30Sjl139090 char unum[UNUM_NAMLEN]; 126025cf1a30Sjl139090 126125cf1a30Sjl139090 /* 126225cf1a30Sjl139090 * Check for an invalid address. 126325cf1a30Sjl139090 */ 126425cf1a30Sjl139090 if (afar == (uint64_t)-1) 126525cf1a30Sjl139090 return (ENXIO); 126625cf1a30Sjl139090 126725cf1a30Sjl139090 if (synd == (uint64_t)-1) 126825cf1a30Sjl139090 synd_status = AFLT_STAT_INVALID; 126925cf1a30Sjl139090 else 127025cf1a30Sjl139090 synd_status = AFLT_STAT_VALID; 127125cf1a30Sjl139090 127225cf1a30Sjl139090 flt_in_memory = (*afsr & SFSR_MEMORY) && 127325cf1a30Sjl139090 pf_is_memory(afar >> MMU_PAGESHIFT); 127425cf1a30Sjl139090 127525cf1a30Sjl139090 ret = cpu_get_mem_unum(synd_status, (ushort_t)synd, *afsr, afar, 1276e98fafb9Sjl139090 CPU->cpu_id, flt_in_memory, flt_status, unum, UNUM_NAMLEN, lenp); 127725cf1a30Sjl139090 if (ret != 0) 127825cf1a30Sjl139090 return (ret); 127925cf1a30Sjl139090 128025cf1a30Sjl139090 if (*lenp >= buflen) 128125cf1a30Sjl139090 return (ENAMETOOLONG); 128225cf1a30Sjl139090 128325cf1a30Sjl139090 (void) strncpy(buf, unum, buflen); 128425cf1a30Sjl139090 128525cf1a30Sjl139090 return (0); 128625cf1a30Sjl139090 } 128725cf1a30Sjl139090 128825cf1a30Sjl139090 /* 128925cf1a30Sjl139090 * Routine to return memory information associated 129025cf1a30Sjl139090 * with a physical address and syndrome. 129125cf1a30Sjl139090 */ 129225cf1a30Sjl139090 /*ARGSUSED*/ 129325cf1a30Sjl139090 int 129425cf1a30Sjl139090 cpu_get_mem_info(uint64_t synd, uint64_t afar, 129525cf1a30Sjl139090 uint64_t *mem_sizep, uint64_t *seg_sizep, uint64_t *bank_sizep, 129625cf1a30Sjl139090 int *segsp, int *banksp, int *mcidp) 129725cf1a30Sjl139090 { 129825cf1a30Sjl139090 int synd_code = (int)synd; 129925cf1a30Sjl139090 130025cf1a30Sjl139090 if (afar == (uint64_t)-1) 130125cf1a30Sjl139090 return (ENXIO); 130225cf1a30Sjl139090 130325cf1a30Sjl139090 if (p2get_mem_info != NULL) 1304e98fafb9Sjl139090 return ((p2get_mem_info)(synd_code, afar, mem_sizep, seg_sizep, 1305e98fafb9Sjl139090 bank_sizep, segsp, banksp, mcidp)); 130625cf1a30Sjl139090 else 130725cf1a30Sjl139090 return (ENOTSUP); 130825cf1a30Sjl139090 } 130925cf1a30Sjl139090 131025cf1a30Sjl139090 /* 131125cf1a30Sjl139090 * Routine to return a string identifying the physical 131225cf1a30Sjl139090 * name associated with a cpuid. 131325cf1a30Sjl139090 */ 131425cf1a30Sjl139090 int 131525cf1a30Sjl139090 cpu_get_cpu_unum(int cpuid, char *buf, int buflen, int *lenp) 131625cf1a30Sjl139090 { 131725cf1a30Sjl139090 int ret; 131825cf1a30Sjl139090 char unum[UNUM_NAMLEN]; 131925cf1a30Sjl139090 132025cf1a30Sjl139090 if (&plat_get_cpu_unum) { 1321e98fafb9Sjl139090 if ((ret = plat_get_cpu_unum(cpuid, unum, UNUM_NAMLEN, 1322e98fafb9Sjl139090 lenp)) != 0) 132325cf1a30Sjl139090 return (ret); 132425cf1a30Sjl139090 } else { 132525cf1a30Sjl139090 return (ENOTSUP); 132625cf1a30Sjl139090 } 132725cf1a30Sjl139090 132825cf1a30Sjl139090 if (*lenp >= buflen) 132925cf1a30Sjl139090 return (ENAMETOOLONG); 133025cf1a30Sjl139090 133125cf1a30Sjl139090 (void) strncpy(buf, unum, *lenp); 133225cf1a30Sjl139090 133325cf1a30Sjl139090 return (0); 133425cf1a30Sjl139090 } 133525cf1a30Sjl139090 133625cf1a30Sjl139090 /* 133725cf1a30Sjl139090 * This routine exports the name buffer size. 133825cf1a30Sjl139090 */ 133925cf1a30Sjl139090 size_t 134025cf1a30Sjl139090 cpu_get_name_bufsize() 134125cf1a30Sjl139090 { 134225cf1a30Sjl139090 return (UNUM_NAMLEN); 134325cf1a30Sjl139090 } 134425cf1a30Sjl139090 134525cf1a30Sjl139090 /* 134625cf1a30Sjl139090 * Flush the entire ecache by ASI_L2_CNTL.U2_FLUSH 134725cf1a30Sjl139090 */ 134825cf1a30Sjl139090 void 134925cf1a30Sjl139090 cpu_flush_ecache(void) 135025cf1a30Sjl139090 { 135125cf1a30Sjl139090 flush_ecache(ecache_flushaddr, cpunodes[CPU->cpu_id].ecache_size, 135225cf1a30Sjl139090 cpunodes[CPU->cpu_id].ecache_linesize); 135325cf1a30Sjl139090 } 135425cf1a30Sjl139090 135525cf1a30Sjl139090 static uint8_t 135625cf1a30Sjl139090 flt_to_trap_type(struct async_flt *aflt) 135725cf1a30Sjl139090 { 135825cf1a30Sjl139090 if (aflt->flt_status & OPL_ECC_ISYNC_TRAP) 135925cf1a30Sjl139090 return (TRAP_TYPE_ECC_I); 136025cf1a30Sjl139090 if (aflt->flt_status & OPL_ECC_DSYNC_TRAP) 136125cf1a30Sjl139090 return (TRAP_TYPE_ECC_D); 136225cf1a30Sjl139090 if (aflt->flt_status & OPL_ECC_URGENT_TRAP) 136325cf1a30Sjl139090 return (TRAP_TYPE_URGENT); 136495400b06Sjimand return (TRAP_TYPE_UNKNOWN); 136525cf1a30Sjl139090 } 136625cf1a30Sjl139090 136725cf1a30Sjl139090 /* 136825cf1a30Sjl139090 * Encode the data saved in the opl_async_flt_t struct into 136925cf1a30Sjl139090 * the FM ereport payload. 137025cf1a30Sjl139090 */ 137125cf1a30Sjl139090 /* ARGSUSED */ 137225cf1a30Sjl139090 static void 137325cf1a30Sjl139090 cpu_payload_add_aflt(struct async_flt *aflt, nvlist_t *payload, 137425cf1a30Sjl139090 nvlist_t *resource) 137525cf1a30Sjl139090 { 137625cf1a30Sjl139090 opl_async_flt_t *opl_flt = (opl_async_flt_t *)aflt; 137725cf1a30Sjl139090 char unum[UNUM_NAMLEN]; 137825cf1a30Sjl139090 char sbuf[21]; /* sizeof (UINT64_MAX) + '\0' */ 137925cf1a30Sjl139090 int len; 138025cf1a30Sjl139090 138125cf1a30Sjl139090 138225cf1a30Sjl139090 if (aflt->flt_payload & FM_EREPORT_PAYLOAD_FLAG_SFSR) { 138325cf1a30Sjl139090 fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_SFSR, 138425cf1a30Sjl139090 DATA_TYPE_UINT64, aflt->flt_stat, NULL); 138525cf1a30Sjl139090 } 138625cf1a30Sjl139090 if (aflt->flt_payload & FM_EREPORT_PAYLOAD_FLAG_SFAR) { 138725cf1a30Sjl139090 fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_SFAR, 138825cf1a30Sjl139090 DATA_TYPE_UINT64, aflt->flt_addr, NULL); 138925cf1a30Sjl139090 } 139025cf1a30Sjl139090 if (aflt->flt_payload & FM_EREPORT_PAYLOAD_FLAG_UGESR) { 139125cf1a30Sjl139090 fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_UGESR, 139225cf1a30Sjl139090 DATA_TYPE_UINT64, aflt->flt_stat, NULL); 139325cf1a30Sjl139090 } 139425cf1a30Sjl139090 if (aflt->flt_payload & FM_EREPORT_PAYLOAD_FLAG_PC) { 139525cf1a30Sjl139090 fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_PC, 139625cf1a30Sjl139090 DATA_TYPE_UINT64, (uint64_t)aflt->flt_pc, NULL); 139725cf1a30Sjl139090 } 139825cf1a30Sjl139090 if (aflt->flt_payload & FM_EREPORT_PAYLOAD_FLAG_TL) { 139925cf1a30Sjl139090 fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_TL, 140025cf1a30Sjl139090 DATA_TYPE_UINT8, (uint8_t)aflt->flt_tl, NULL); 140125cf1a30Sjl139090 } 140225cf1a30Sjl139090 if (aflt->flt_payload & FM_EREPORT_PAYLOAD_FLAG_TT) { 140325cf1a30Sjl139090 fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_TT, 140425cf1a30Sjl139090 DATA_TYPE_UINT8, flt_to_trap_type(aflt), NULL); 140525cf1a30Sjl139090 } 140625cf1a30Sjl139090 if (aflt->flt_payload & FM_EREPORT_PAYLOAD_FLAG_PRIV) { 140725cf1a30Sjl139090 fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_PRIV, 140825cf1a30Sjl139090 DATA_TYPE_BOOLEAN_VALUE, 140925cf1a30Sjl139090 (aflt->flt_priv ? B_TRUE : B_FALSE), NULL); 141025cf1a30Sjl139090 } 141125cf1a30Sjl139090 if (aflt->flt_payload & FM_EREPORT_PAYLOAD_FLAG_FLT_STATUS) { 141225cf1a30Sjl139090 fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_FLT_STATUS, 141325cf1a30Sjl139090 DATA_TYPE_UINT64, (uint64_t)aflt->flt_status, NULL); 141425cf1a30Sjl139090 } 141525cf1a30Sjl139090 141625cf1a30Sjl139090 switch (opl_flt->flt_eid_mod) { 141725cf1a30Sjl139090 case OPL_ERRID_CPU: 141825cf1a30Sjl139090 (void) snprintf(sbuf, sizeof (sbuf), "%llX", 141925cf1a30Sjl139090 (u_longlong_t)cpunodes[opl_flt->flt_eid_sid].device_id); 142025cf1a30Sjl139090 (void) fm_fmri_cpu_set(resource, FM_CPU_SCHEME_VERSION, 142125cf1a30Sjl139090 NULL, opl_flt->flt_eid_sid, 1422e98fafb9Sjl139090 (uint8_t *)&cpunodes[opl_flt->flt_eid_sid].version, sbuf); 1423e98fafb9Sjl139090 fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_RESOURCE, 142425cf1a30Sjl139090 DATA_TYPE_NVLIST, resource, NULL); 142525cf1a30Sjl139090 break; 142625cf1a30Sjl139090 142725cf1a30Sjl139090 case OPL_ERRID_CHANNEL: 142825cf1a30Sjl139090 /* 142925cf1a30Sjl139090 * No resource is created but the cpumem DE will find 143025cf1a30Sjl139090 * the defective path by retreiving EID from SFSR which is 143125cf1a30Sjl139090 * included in the payload. 143225cf1a30Sjl139090 */ 143325cf1a30Sjl139090 break; 143425cf1a30Sjl139090 143525cf1a30Sjl139090 case OPL_ERRID_MEM: 143625cf1a30Sjl139090 (void) cpu_get_mem_unum_aflt(0, aflt, unum, UNUM_NAMLEN, &len); 1437e98fafb9Sjl139090 (void) fm_fmri_mem_set(resource, FM_MEM_SCHEME_VERSION, NULL, 1438e98fafb9Sjl139090 unum, NULL, (uint64_t)-1); 143925cf1a30Sjl139090 fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_RESOURCE, 144025cf1a30Sjl139090 DATA_TYPE_NVLIST, resource, NULL); 144125cf1a30Sjl139090 break; 144225cf1a30Sjl139090 144325cf1a30Sjl139090 case OPL_ERRID_PATH: 144425cf1a30Sjl139090 /* 144525cf1a30Sjl139090 * No resource is created but the cpumem DE will find 144625cf1a30Sjl139090 * the defective path by retreiving EID from SFSR which is 144725cf1a30Sjl139090 * included in the payload. 144825cf1a30Sjl139090 */ 144925cf1a30Sjl139090 break; 145025cf1a30Sjl139090 } 145125cf1a30Sjl139090 } 145225cf1a30Sjl139090 145325cf1a30Sjl139090 /* 145425cf1a30Sjl139090 * Returns whether fault address is valid for this error bit and 145525cf1a30Sjl139090 * whether the address is "in memory" (i.e. pf_is_memory returns 1). 145625cf1a30Sjl139090 */ 145725cf1a30Sjl139090 /*ARGSUSED*/ 145825cf1a30Sjl139090 static int 145925cf1a30Sjl139090 cpu_flt_in_memory(opl_async_flt_t *opl_flt, uint64_t t_afsr_bit) 146025cf1a30Sjl139090 { 146125cf1a30Sjl139090 struct async_flt *aflt = (struct async_flt *)opl_flt; 146225cf1a30Sjl139090 146325cf1a30Sjl139090 if (aflt->flt_status & (OPL_ECC_SYNC_TRAP)) { 146425cf1a30Sjl139090 return ((t_afsr_bit & SFSR_MEMORY) && 146525cf1a30Sjl139090 pf_is_memory(aflt->flt_addr >> MMU_PAGESHIFT)); 146625cf1a30Sjl139090 } 146725cf1a30Sjl139090 return (0); 146825cf1a30Sjl139090 } 146925cf1a30Sjl139090 147025cf1a30Sjl139090 /* 147125cf1a30Sjl139090 * In OPL SCF does the stick synchronization. 147225cf1a30Sjl139090 */ 147325cf1a30Sjl139090 void 147425cf1a30Sjl139090 sticksync_slave(void) 147525cf1a30Sjl139090 { 147625cf1a30Sjl139090 } 147725cf1a30Sjl139090 147825cf1a30Sjl139090 /* 147925cf1a30Sjl139090 * In OPL SCF does the stick synchronization. 148025cf1a30Sjl139090 */ 148125cf1a30Sjl139090 void 148225cf1a30Sjl139090 sticksync_master(void) 148325cf1a30Sjl139090 { 148425cf1a30Sjl139090 } 148525cf1a30Sjl139090 148625cf1a30Sjl139090 /* 148725cf1a30Sjl139090 * Cpu private unitialization. OPL cpus do not use the private area. 148825cf1a30Sjl139090 */ 148925cf1a30Sjl139090 void 149025cf1a30Sjl139090 cpu_uninit_private(struct cpu *cp) 149125cf1a30Sjl139090 { 149225cf1a30Sjl139090 cmp_delete_cpu(cp->cpu_id); 149325cf1a30Sjl139090 } 149425cf1a30Sjl139090 149525cf1a30Sjl139090 /* 149625cf1a30Sjl139090 * Always flush an entire cache. 149725cf1a30Sjl139090 */ 149825cf1a30Sjl139090 void 149925cf1a30Sjl139090 cpu_error_ecache_flush(void) 150025cf1a30Sjl139090 { 150125cf1a30Sjl139090 cpu_flush_ecache(); 150225cf1a30Sjl139090 } 150325cf1a30Sjl139090 150425cf1a30Sjl139090 void 150525cf1a30Sjl139090 cpu_ereport_post(struct async_flt *aflt) 150625cf1a30Sjl139090 { 150725cf1a30Sjl139090 char *cpu_type, buf[FM_MAX_CLASS]; 150825cf1a30Sjl139090 nv_alloc_t *nva = NULL; 150925cf1a30Sjl139090 nvlist_t *ereport, *detector, *resource; 151025cf1a30Sjl139090 errorq_elem_t *eqep; 151125cf1a30Sjl139090 char sbuf[21]; /* sizeof (UINT64_MAX) + '\0' */ 151225cf1a30Sjl139090 151325cf1a30Sjl139090 if (aflt->flt_panic || panicstr) { 151425cf1a30Sjl139090 eqep = errorq_reserve(ereport_errorq); 151525cf1a30Sjl139090 if (eqep == NULL) 151625cf1a30Sjl139090 return; 151725cf1a30Sjl139090 ereport = errorq_elem_nvl(ereport_errorq, eqep); 151825cf1a30Sjl139090 nva = errorq_elem_nva(ereport_errorq, eqep); 151925cf1a30Sjl139090 } else { 152025cf1a30Sjl139090 ereport = fm_nvlist_create(nva); 152125cf1a30Sjl139090 } 152225cf1a30Sjl139090 152325cf1a30Sjl139090 /* 152425cf1a30Sjl139090 * Create the scheme "cpu" FMRI. 152525cf1a30Sjl139090 */ 152625cf1a30Sjl139090 detector = fm_nvlist_create(nva); 152725cf1a30Sjl139090 resource = fm_nvlist_create(nva); 152825cf1a30Sjl139090 switch (cpunodes[aflt->flt_inst].implementation) { 152925cf1a30Sjl139090 case OLYMPUS_C_IMPL: 153025cf1a30Sjl139090 cpu_type = FM_EREPORT_CPU_SPARC64_VI; 153125cf1a30Sjl139090 break; 1532e98fafb9Sjl139090 case JUPITER_IMPL: 1533e98fafb9Sjl139090 cpu_type = FM_EREPORT_CPU_SPARC64_VII; 1534e98fafb9Sjl139090 break; 153525cf1a30Sjl139090 default: 153625cf1a30Sjl139090 cpu_type = FM_EREPORT_CPU_UNSUPPORTED; 153725cf1a30Sjl139090 break; 153825cf1a30Sjl139090 } 153925cf1a30Sjl139090 (void) snprintf(sbuf, sizeof (sbuf), "%llX", 154025cf1a30Sjl139090 (u_longlong_t)cpunodes[aflt->flt_inst].device_id); 154125cf1a30Sjl139090 (void) fm_fmri_cpu_set(detector, FM_CPU_SCHEME_VERSION, NULL, 154225cf1a30Sjl139090 aflt->flt_inst, (uint8_t *)&cpunodes[aflt->flt_inst].version, 154325cf1a30Sjl139090 sbuf); 154425cf1a30Sjl139090 154525cf1a30Sjl139090 /* 154625cf1a30Sjl139090 * Encode all the common data into the ereport. 154725cf1a30Sjl139090 */ 154825cf1a30Sjl139090 (void) snprintf(buf, FM_MAX_CLASS, "%s.%s.%s", 154925cf1a30Sjl139090 FM_ERROR_CPU, cpu_type, aflt->flt_erpt_class); 155025cf1a30Sjl139090 155125cf1a30Sjl139090 fm_ereport_set(ereport, FM_EREPORT_VERSION, buf, 155225cf1a30Sjl139090 fm_ena_generate(aflt->flt_id, FM_ENA_FMT1), detector, NULL); 155325cf1a30Sjl139090 155425cf1a30Sjl139090 /* 155525cf1a30Sjl139090 * Encode the error specific data that was saved in 155625cf1a30Sjl139090 * the async_flt structure into the ereport. 155725cf1a30Sjl139090 */ 155825cf1a30Sjl139090 cpu_payload_add_aflt(aflt, ereport, resource); 155925cf1a30Sjl139090 156025cf1a30Sjl139090 if (aflt->flt_panic || panicstr) { 156125cf1a30Sjl139090 errorq_commit(ereport_errorq, eqep, ERRORQ_SYNC); 156225cf1a30Sjl139090 } else { 156325cf1a30Sjl139090 (void) fm_ereport_post(ereport, EVCH_TRYHARD); 156425cf1a30Sjl139090 fm_nvlist_destroy(ereport, FM_NVA_FREE); 156525cf1a30Sjl139090 fm_nvlist_destroy(detector, FM_NVA_FREE); 156625cf1a30Sjl139090 fm_nvlist_destroy(resource, FM_NVA_FREE); 156725cf1a30Sjl139090 } 156825cf1a30Sjl139090 } 156925cf1a30Sjl139090 157025cf1a30Sjl139090 void 157125cf1a30Sjl139090 cpu_run_bus_error_handlers(struct async_flt *aflt, int expected) 157225cf1a30Sjl139090 { 157325cf1a30Sjl139090 int status; 157425cf1a30Sjl139090 ddi_fm_error_t de; 157525cf1a30Sjl139090 157625cf1a30Sjl139090 bzero(&de, sizeof (ddi_fm_error_t)); 157725cf1a30Sjl139090 157825cf1a30Sjl139090 de.fme_version = DDI_FME_VERSION; 157925cf1a30Sjl139090 de.fme_ena = fm_ena_generate(aflt->flt_id, FM_ENA_FMT1); 158025cf1a30Sjl139090 de.fme_flag = expected; 158125cf1a30Sjl139090 de.fme_bus_specific = (void *)aflt->flt_addr; 158225cf1a30Sjl139090 status = ndi_fm_handler_dispatch(ddi_root_node(), NULL, &de); 158325cf1a30Sjl139090 if ((aflt->flt_prot == AFLT_PROT_NONE) && (status == DDI_FM_FATAL)) 158425cf1a30Sjl139090 aflt->flt_panic = 1; 158525cf1a30Sjl139090 } 158625cf1a30Sjl139090 158725cf1a30Sjl139090 void 158825cf1a30Sjl139090 cpu_errorq_dispatch(char *error_class, void *payload, size_t payload_sz, 158925cf1a30Sjl139090 errorq_t *eqp, uint_t flag) 159025cf1a30Sjl139090 { 159125cf1a30Sjl139090 struct async_flt *aflt = (struct async_flt *)payload; 159225cf1a30Sjl139090 159325cf1a30Sjl139090 aflt->flt_erpt_class = error_class; 159425cf1a30Sjl139090 errorq_dispatch(eqp, payload, payload_sz, flag); 159525cf1a30Sjl139090 } 159625cf1a30Sjl139090 159725cf1a30Sjl139090 void 159825cf1a30Sjl139090 adjust_hw_copy_limits(int ecache_size) 159925cf1a30Sjl139090 { 160025cf1a30Sjl139090 /* 160125cf1a30Sjl139090 * Set hw copy limits. 160225cf1a30Sjl139090 * 160325cf1a30Sjl139090 * /etc/system will be parsed later and can override one or more 160425cf1a30Sjl139090 * of these settings. 160525cf1a30Sjl139090 * 160625cf1a30Sjl139090 * At this time, ecache size seems only mildly relevant. 160725cf1a30Sjl139090 * We seem to run into issues with the d-cache and stalls 160825cf1a30Sjl139090 * we see on misses. 160925cf1a30Sjl139090 * 161025cf1a30Sjl139090 * Cycle measurement indicates that 2 byte aligned copies fare 161125cf1a30Sjl139090 * little better than doing things with VIS at around 512 bytes. 161225cf1a30Sjl139090 * 4 byte aligned shows promise until around 1024 bytes. 8 Byte 161325cf1a30Sjl139090 * aligned is faster whenever the source and destination data 161425cf1a30Sjl139090 * in cache and the total size is less than 2 Kbytes. The 2K 161525cf1a30Sjl139090 * limit seems to be driven by the 2K write cache. 161625cf1a30Sjl139090 * When more than 2K of copies are done in non-VIS mode, stores 161725cf1a30Sjl139090 * backup in the write cache. In VIS mode, the write cache is 161825cf1a30Sjl139090 * bypassed, allowing faster cache-line writes aligned on cache 161925cf1a30Sjl139090 * boundaries. 162025cf1a30Sjl139090 * 162125cf1a30Sjl139090 * In addition, in non-VIS mode, there is no prefetching, so 162225cf1a30Sjl139090 * for larger copies, the advantage of prefetching to avoid even 162325cf1a30Sjl139090 * occasional cache misses is enough to justify using the VIS code. 162425cf1a30Sjl139090 * 162525cf1a30Sjl139090 * During testing, it was discovered that netbench ran 3% slower 162625cf1a30Sjl139090 * when hw_copy_limit_8 was 2K or larger. Apparently for server 162725cf1a30Sjl139090 * applications, data is only used once (copied to the output 162825cf1a30Sjl139090 * buffer, then copied by the network device off the system). Using 162925cf1a30Sjl139090 * the VIS copy saves more L2 cache state. Network copies are 163025cf1a30Sjl139090 * around 1.3K to 1.5K in size for historical reasons. 163125cf1a30Sjl139090 * 163225cf1a30Sjl139090 * Therefore, a limit of 1K bytes will be used for the 8 byte 163325cf1a30Sjl139090 * aligned copy even for large caches and 8 MB ecache. The 163425cf1a30Sjl139090 * infrastructure to allow different limits for different sized 163525cf1a30Sjl139090 * caches is kept to allow further tuning in later releases. 163625cf1a30Sjl139090 */ 163725cf1a30Sjl139090 163825cf1a30Sjl139090 if (min_ecache_size == 0 && use_hw_bcopy) { 163925cf1a30Sjl139090 /* 164025cf1a30Sjl139090 * First time through - should be before /etc/system 164125cf1a30Sjl139090 * is read. 164225cf1a30Sjl139090 * Could skip the checks for zero but this lets us 164325cf1a30Sjl139090 * preserve any debugger rewrites. 164425cf1a30Sjl139090 */ 164525cf1a30Sjl139090 if (hw_copy_limit_1 == 0) { 164625cf1a30Sjl139090 hw_copy_limit_1 = VIS_COPY_THRESHOLD; 164725cf1a30Sjl139090 priv_hcl_1 = hw_copy_limit_1; 164825cf1a30Sjl139090 } 164925cf1a30Sjl139090 if (hw_copy_limit_2 == 0) { 165025cf1a30Sjl139090 hw_copy_limit_2 = 2 * VIS_COPY_THRESHOLD; 165125cf1a30Sjl139090 priv_hcl_2 = hw_copy_limit_2; 165225cf1a30Sjl139090 } 165325cf1a30Sjl139090 if (hw_copy_limit_4 == 0) { 165425cf1a30Sjl139090 hw_copy_limit_4 = 4 * VIS_COPY_THRESHOLD; 165525cf1a30Sjl139090 priv_hcl_4 = hw_copy_limit_4; 165625cf1a30Sjl139090 } 165725cf1a30Sjl139090 if (hw_copy_limit_8 == 0) { 165825cf1a30Sjl139090 hw_copy_limit_8 = 4 * VIS_COPY_THRESHOLD; 165925cf1a30Sjl139090 priv_hcl_8 = hw_copy_limit_8; 166025cf1a30Sjl139090 } 166125cf1a30Sjl139090 min_ecache_size = ecache_size; 166225cf1a30Sjl139090 } else { 166325cf1a30Sjl139090 /* 166425cf1a30Sjl139090 * MP initialization. Called *after* /etc/system has 166525cf1a30Sjl139090 * been parsed. One CPU has already been initialized. 166625cf1a30Sjl139090 * Need to cater for /etc/system having scragged one 166725cf1a30Sjl139090 * of our values. 166825cf1a30Sjl139090 */ 166925cf1a30Sjl139090 if (ecache_size == min_ecache_size) { 167025cf1a30Sjl139090 /* 167125cf1a30Sjl139090 * Same size ecache. We do nothing unless we 167225cf1a30Sjl139090 * have a pessimistic ecache setting. In that 167325cf1a30Sjl139090 * case we become more optimistic (if the cache is 167425cf1a30Sjl139090 * large enough). 167525cf1a30Sjl139090 */ 167625cf1a30Sjl139090 if (hw_copy_limit_8 == 4 * VIS_COPY_THRESHOLD) { 167725cf1a30Sjl139090 /* 167825cf1a30Sjl139090 * Need to adjust hw_copy_limit* from our 167925cf1a30Sjl139090 * pessimistic uniprocessor value to a more 168025cf1a30Sjl139090 * optimistic UP value *iff* it hasn't been 168125cf1a30Sjl139090 * reset. 168225cf1a30Sjl139090 */ 168325cf1a30Sjl139090 if ((ecache_size > 1048576) && 168425cf1a30Sjl139090 (priv_hcl_8 == hw_copy_limit_8)) { 168525cf1a30Sjl139090 if (ecache_size <= 2097152) 168625cf1a30Sjl139090 hw_copy_limit_8 = 4 * 168725cf1a30Sjl139090 VIS_COPY_THRESHOLD; 168825cf1a30Sjl139090 else if (ecache_size <= 4194304) 168925cf1a30Sjl139090 hw_copy_limit_8 = 4 * 169025cf1a30Sjl139090 VIS_COPY_THRESHOLD; 169125cf1a30Sjl139090 else 169225cf1a30Sjl139090 hw_copy_limit_8 = 4 * 169325cf1a30Sjl139090 VIS_COPY_THRESHOLD; 169425cf1a30Sjl139090 priv_hcl_8 = hw_copy_limit_8; 169525cf1a30Sjl139090 } 169625cf1a30Sjl139090 } 169725cf1a30Sjl139090 } else if (ecache_size < min_ecache_size) { 169825cf1a30Sjl139090 /* 169925cf1a30Sjl139090 * A different ecache size. Can this even happen? 170025cf1a30Sjl139090 */ 170125cf1a30Sjl139090 if (priv_hcl_8 == hw_copy_limit_8) { 170225cf1a30Sjl139090 /* 170325cf1a30Sjl139090 * The previous value that we set 170425cf1a30Sjl139090 * is unchanged (i.e., it hasn't been 170525cf1a30Sjl139090 * scragged by /etc/system). Rewrite it. 170625cf1a30Sjl139090 */ 170725cf1a30Sjl139090 if (ecache_size <= 1048576) 170825cf1a30Sjl139090 hw_copy_limit_8 = 8 * 170925cf1a30Sjl139090 VIS_COPY_THRESHOLD; 171025cf1a30Sjl139090 else if (ecache_size <= 2097152) 171125cf1a30Sjl139090 hw_copy_limit_8 = 8 * 171225cf1a30Sjl139090 VIS_COPY_THRESHOLD; 171325cf1a30Sjl139090 else if (ecache_size <= 4194304) 171425cf1a30Sjl139090 hw_copy_limit_8 = 8 * 171525cf1a30Sjl139090 VIS_COPY_THRESHOLD; 171625cf1a30Sjl139090 else 171725cf1a30Sjl139090 hw_copy_limit_8 = 10 * 171825cf1a30Sjl139090 VIS_COPY_THRESHOLD; 171925cf1a30Sjl139090 priv_hcl_8 = hw_copy_limit_8; 172025cf1a30Sjl139090 min_ecache_size = ecache_size; 172125cf1a30Sjl139090 } 172225cf1a30Sjl139090 } 172325cf1a30Sjl139090 } 172425cf1a30Sjl139090 } 172525cf1a30Sjl139090 172625cf1a30Sjl139090 #define VIS_BLOCKSIZE 64 172725cf1a30Sjl139090 172825cf1a30Sjl139090 int 172925cf1a30Sjl139090 dtrace_blksuword32_err(uintptr_t addr, uint32_t *data) 173025cf1a30Sjl139090 { 173125cf1a30Sjl139090 int ret, watched; 173225cf1a30Sjl139090 173325cf1a30Sjl139090 watched = watch_disable_addr((void *)addr, VIS_BLOCKSIZE, S_WRITE); 173425cf1a30Sjl139090 ret = dtrace_blksuword32(addr, data, 0); 173525cf1a30Sjl139090 if (watched) 173625cf1a30Sjl139090 watch_enable_addr((void *)addr, VIS_BLOCKSIZE, S_WRITE); 173725cf1a30Sjl139090 173825cf1a30Sjl139090 return (ret); 173925cf1a30Sjl139090 } 174025cf1a30Sjl139090 174125cf1a30Sjl139090 void 174225cf1a30Sjl139090 opl_cpu_reg_init() 174325cf1a30Sjl139090 { 174425cf1a30Sjl139090 uint64_t this_cpu_log; 174525cf1a30Sjl139090 1746b9a675d4Smb158278 if (cpu[getprocessorid()] == &cpu0 && opl_cpu0_log_setup == 1) { 174750eff769Smb158278 /* 174850eff769Smb158278 * Support for "ta 3" 174950eff769Smb158278 */ 175050eff769Smb158278 opl_ta3(); 1751b9a675d4Smb158278 1752b9a675d4Smb158278 /* 1753b9a675d4Smb158278 * If we are being called at boot time on cpu0 the error 1754b9a675d4Smb158278 * log is already set up in cpu_setup. Clear the 1755b9a675d4Smb158278 * opl_cpu0_log_setup flag so that a subsequent DR of cpu0 will 1756b9a675d4Smb158278 * do the proper initialization. 1757b9a675d4Smb158278 */ 1758b9a675d4Smb158278 opl_cpu0_log_setup = 0; 175925cf1a30Sjl139090 return; 176050eff769Smb158278 } 176125cf1a30Sjl139090 176225cf1a30Sjl139090 /* 176325cf1a30Sjl139090 * Initialize Error log Scratch register for error handling. 176425cf1a30Sjl139090 */ 176525cf1a30Sjl139090 176625cf1a30Sjl139090 this_cpu_log = va_to_pa((void*)(((uint64_t)opl_err_log) + 176725cf1a30Sjl139090 ERRLOG_BUFSZ * (getprocessorid()))); 176825cf1a30Sjl139090 opl_error_setup(this_cpu_log); 176925cf1a30Sjl139090 } 177025cf1a30Sjl139090 177125cf1a30Sjl139090 /* 177225cf1a30Sjl139090 * Queue one event in ue_queue based on ecc_type_to_info entry. 177325cf1a30Sjl139090 */ 177425cf1a30Sjl139090 static void 177525cf1a30Sjl139090 cpu_queue_one_event(opl_async_flt_t *opl_flt, char *reason, 177625cf1a30Sjl139090 ecc_type_to_info_t *eccp) 177725cf1a30Sjl139090 { 177825cf1a30Sjl139090 struct async_flt *aflt = (struct async_flt *)opl_flt; 177925cf1a30Sjl139090 178025cf1a30Sjl139090 if (reason && 178125cf1a30Sjl139090 strlen(reason) + strlen(eccp->ec_reason) < MAX_REASON_STRING) { 178225cf1a30Sjl139090 (void) strcat(reason, eccp->ec_reason); 178325cf1a30Sjl139090 } 178425cf1a30Sjl139090 178525cf1a30Sjl139090 opl_flt->flt_bit = eccp->ec_afsr_bit; 178625cf1a30Sjl139090 opl_flt->flt_type = eccp->ec_flt_type; 178725cf1a30Sjl139090 aflt->flt_in_memory = cpu_flt_in_memory(opl_flt, opl_flt->flt_bit); 178825cf1a30Sjl139090 aflt->flt_payload = eccp->ec_err_payload; 178925cf1a30Sjl139090 179025cf1a30Sjl139090 ASSERT(aflt->flt_status & (OPL_ECC_SYNC_TRAP|OPL_ECC_URGENT_TRAP)); 1791e98fafb9Sjl139090 cpu_errorq_dispatch(eccp->ec_err_class, (void *)opl_flt, 1792e98fafb9Sjl139090 sizeof (opl_async_flt_t), ue_queue, aflt->flt_panic); 179325cf1a30Sjl139090 } 179425cf1a30Sjl139090 179525cf1a30Sjl139090 /* 179625cf1a30Sjl139090 * Queue events on async event queue one event per error bit. 179725cf1a30Sjl139090 * Return number of events queued. 179825cf1a30Sjl139090 */ 179925cf1a30Sjl139090 int 180025cf1a30Sjl139090 cpu_queue_events(opl_async_flt_t *opl_flt, char *reason, uint64_t t_afsr_errs) 180125cf1a30Sjl139090 { 180225cf1a30Sjl139090 struct async_flt *aflt = (struct async_flt *)opl_flt; 180325cf1a30Sjl139090 ecc_type_to_info_t *eccp; 180425cf1a30Sjl139090 int nevents = 0; 180525cf1a30Sjl139090 180625cf1a30Sjl139090 /* 180725cf1a30Sjl139090 * Queue expected errors, error bit and fault type must must match 180825cf1a30Sjl139090 * in the ecc_type_to_info table. 180925cf1a30Sjl139090 */ 181025cf1a30Sjl139090 for (eccp = ecc_type_to_info; t_afsr_errs != 0 && eccp->ec_desc != NULL; 181125cf1a30Sjl139090 eccp++) { 181225cf1a30Sjl139090 if ((eccp->ec_afsr_bit & t_afsr_errs) != 0 && 181325cf1a30Sjl139090 (eccp->ec_flags & aflt->flt_status) != 0) { 181425cf1a30Sjl139090 /* 181525cf1a30Sjl139090 * UE error event can be further 181625cf1a30Sjl139090 * classified/breakdown into finer granularity 181725cf1a30Sjl139090 * based on the flt_eid_mod value set by HW. We do 181825cf1a30Sjl139090 * special handling here so that we can report UE 181925cf1a30Sjl139090 * error in finer granularity as ue_mem, 182025cf1a30Sjl139090 * ue_channel, ue_cpu or ue_path. 182125cf1a30Sjl139090 */ 182225cf1a30Sjl139090 if (eccp->ec_flt_type == OPL_CPU_SYNC_UE) { 1823e98fafb9Sjl139090 opl_flt->flt_eid_mod = (aflt->flt_stat & 1824e98fafb9Sjl139090 SFSR_EID_MOD) >> SFSR_EID_MOD_SHIFT; 1825e98fafb9Sjl139090 opl_flt->flt_eid_sid = (aflt->flt_stat & 1826e98fafb9Sjl139090 SFSR_EID_SID) >> SFSR_EID_SID_SHIFT; 182725cf1a30Sjl139090 /* 182825cf1a30Sjl139090 * Need to advance eccp pointer by flt_eid_mod 182925cf1a30Sjl139090 * so that we get an appropriate ecc pointer 183025cf1a30Sjl139090 * 183125cf1a30Sjl139090 * EID # of advances 183225cf1a30Sjl139090 * ---------------------------------- 183325cf1a30Sjl139090 * OPL_ERRID_MEM 0 183425cf1a30Sjl139090 * OPL_ERRID_CHANNEL 1 183525cf1a30Sjl139090 * OPL_ERRID_CPU 2 183625cf1a30Sjl139090 * OPL_ERRID_PATH 3 183725cf1a30Sjl139090 */ 183825cf1a30Sjl139090 eccp += opl_flt->flt_eid_mod; 183925cf1a30Sjl139090 } 184025cf1a30Sjl139090 cpu_queue_one_event(opl_flt, reason, eccp); 184125cf1a30Sjl139090 t_afsr_errs &= ~eccp->ec_afsr_bit; 184225cf1a30Sjl139090 nevents++; 184325cf1a30Sjl139090 } 184425cf1a30Sjl139090 } 184525cf1a30Sjl139090 184625cf1a30Sjl139090 return (nevents); 184725cf1a30Sjl139090 } 184825cf1a30Sjl139090 184925cf1a30Sjl139090 /* 185025cf1a30Sjl139090 * Sync. error wrapper functions. 185125cf1a30Sjl139090 * We use these functions in order to transfer here from the 185225cf1a30Sjl139090 * nucleus trap handler information about trap type (data or 185325cf1a30Sjl139090 * instruction) and trap level (0 or above 0). This way we 185425cf1a30Sjl139090 * get rid of using SFSR's reserved bits. 185525cf1a30Sjl139090 */ 185625cf1a30Sjl139090 185725cf1a30Sjl139090 #define OPL_SYNC_TL0 0 185825cf1a30Sjl139090 #define OPL_SYNC_TL1 1 185925cf1a30Sjl139090 #define OPL_ISYNC_ERR 0 186025cf1a30Sjl139090 #define OPL_DSYNC_ERR 1 186125cf1a30Sjl139090 186225cf1a30Sjl139090 void 186325cf1a30Sjl139090 opl_cpu_isync_tl0_error(struct regs *rp, ulong_t p_sfar, ulong_t p_sfsr) 186425cf1a30Sjl139090 { 186525cf1a30Sjl139090 uint64_t t_sfar = p_sfar; 186625cf1a30Sjl139090 uint64_t t_sfsr = p_sfsr; 186725cf1a30Sjl139090 186825cf1a30Sjl139090 opl_cpu_sync_error(rp, t_sfar, t_sfsr, 186925cf1a30Sjl139090 OPL_SYNC_TL0, OPL_ISYNC_ERR); 187025cf1a30Sjl139090 } 187125cf1a30Sjl139090 187225cf1a30Sjl139090 void 187325cf1a30Sjl139090 opl_cpu_isync_tl1_error(struct regs *rp, ulong_t p_sfar, ulong_t p_sfsr) 187425cf1a30Sjl139090 { 187525cf1a30Sjl139090 uint64_t t_sfar = p_sfar; 187625cf1a30Sjl139090 uint64_t t_sfsr = p_sfsr; 187725cf1a30Sjl139090 187825cf1a30Sjl139090 opl_cpu_sync_error(rp, t_sfar, t_sfsr, 187925cf1a30Sjl139090 OPL_SYNC_TL1, OPL_ISYNC_ERR); 188025cf1a30Sjl139090 } 188125cf1a30Sjl139090 188225cf1a30Sjl139090 void 188325cf1a30Sjl139090 opl_cpu_dsync_tl0_error(struct regs *rp, ulong_t p_sfar, ulong_t p_sfsr) 188425cf1a30Sjl139090 { 188525cf1a30Sjl139090 uint64_t t_sfar = p_sfar; 188625cf1a30Sjl139090 uint64_t t_sfsr = p_sfsr; 188725cf1a30Sjl139090 188825cf1a30Sjl139090 opl_cpu_sync_error(rp, t_sfar, t_sfsr, 188925cf1a30Sjl139090 OPL_SYNC_TL0, OPL_DSYNC_ERR); 189025cf1a30Sjl139090 } 189125cf1a30Sjl139090 189225cf1a30Sjl139090 void 189325cf1a30Sjl139090 opl_cpu_dsync_tl1_error(struct regs *rp, ulong_t p_sfar, ulong_t p_sfsr) 189425cf1a30Sjl139090 { 189525cf1a30Sjl139090 uint64_t t_sfar = p_sfar; 189625cf1a30Sjl139090 uint64_t t_sfsr = p_sfsr; 189725cf1a30Sjl139090 189825cf1a30Sjl139090 opl_cpu_sync_error(rp, t_sfar, t_sfsr, 189925cf1a30Sjl139090 OPL_SYNC_TL1, OPL_DSYNC_ERR); 190025cf1a30Sjl139090 } 190125cf1a30Sjl139090 190225cf1a30Sjl139090 /* 190325cf1a30Sjl139090 * The fj sync err handler transfers control here for UE, BERR, TO, TLB_MUL 190425cf1a30Sjl139090 * and TLB_PRT. 190525cf1a30Sjl139090 * This function is designed based on cpu_deferred_error(). 190625cf1a30Sjl139090 */ 190725cf1a30Sjl139090 190825cf1a30Sjl139090 static void 190925cf1a30Sjl139090 opl_cpu_sync_error(struct regs *rp, ulong_t t_sfar, ulong_t t_sfsr, 191025cf1a30Sjl139090 uint_t tl, uint_t derr) 191125cf1a30Sjl139090 { 191225cf1a30Sjl139090 opl_async_flt_t opl_flt; 191325cf1a30Sjl139090 struct async_flt *aflt; 191425cf1a30Sjl139090 int trampolined = 0; 191525cf1a30Sjl139090 char pr_reason[MAX_REASON_STRING]; 191625cf1a30Sjl139090 uint64_t log_sfsr; 191725cf1a30Sjl139090 int expected = DDI_FM_ERR_UNEXPECTED; 191825cf1a30Sjl139090 ddi_acc_hdl_t *hp; 191925cf1a30Sjl139090 192025cf1a30Sjl139090 /* 192125cf1a30Sjl139090 * We need to look at p_flag to determine if the thread detected an 192225cf1a30Sjl139090 * error while dumping core. We can't grab p_lock here, but it's ok 192325cf1a30Sjl139090 * because we just need a consistent snapshot and we know that everyone 192425cf1a30Sjl139090 * else will store a consistent set of bits while holding p_lock. We 192525cf1a30Sjl139090 * don't have to worry about a race because SDOCORE is set once prior 192625cf1a30Sjl139090 * to doing i/o from the process's address space and is never cleared. 192725cf1a30Sjl139090 */ 192825cf1a30Sjl139090 uint_t pflag = ttoproc(curthread)->p_flag; 192925cf1a30Sjl139090 193025cf1a30Sjl139090 pr_reason[0] = '\0'; 193125cf1a30Sjl139090 193225cf1a30Sjl139090 /* 193325cf1a30Sjl139090 * handle the specific error 193425cf1a30Sjl139090 */ 193525cf1a30Sjl139090 bzero(&opl_flt, sizeof (opl_async_flt_t)); 193625cf1a30Sjl139090 aflt = (struct async_flt *)&opl_flt; 193725cf1a30Sjl139090 aflt->flt_id = gethrtime_waitfree(); 193825cf1a30Sjl139090 aflt->flt_bus_id = getprocessorid(); 193925cf1a30Sjl139090 aflt->flt_inst = CPU->cpu_id; 194025cf1a30Sjl139090 aflt->flt_stat = t_sfsr; 194125cf1a30Sjl139090 aflt->flt_addr = t_sfar; 194225cf1a30Sjl139090 aflt->flt_pc = (caddr_t)rp->r_pc; 194325cf1a30Sjl139090 aflt->flt_prot = (uchar_t)AFLT_PROT_NONE; 194425cf1a30Sjl139090 aflt->flt_class = (uchar_t)CPU_FAULT; 1945e98fafb9Sjl139090 aflt->flt_priv = (uchar_t)(tl == 1 ? 1 : ((rp->r_tstate & 1946e98fafb9Sjl139090 TSTATE_PRIV) ? 1 : 0)); 194725cf1a30Sjl139090 aflt->flt_tl = (uchar_t)tl; 194825cf1a30Sjl139090 aflt->flt_panic = (uchar_t)(tl != 0 || aft_testfatal != 0 || 194925cf1a30Sjl139090 (t_sfsr & (SFSR_TLB_MUL|SFSR_TLB_PRT)) != 0); 195025cf1a30Sjl139090 aflt->flt_core = (pflag & SDOCORE) ? 1 : 0; 195125cf1a30Sjl139090 aflt->flt_status = (derr) ? OPL_ECC_DSYNC_TRAP : OPL_ECC_ISYNC_TRAP; 195225cf1a30Sjl139090 195325cf1a30Sjl139090 /* 195425cf1a30Sjl139090 * If SFSR.FV is not set, both SFSR and SFAR/SFPAR values are uncertain. 195525cf1a30Sjl139090 * So, clear all error bits to avoid mis-handling and force the system 195625cf1a30Sjl139090 * panicked. 195725cf1a30Sjl139090 * We skip all the procedures below down to the panic message call. 195825cf1a30Sjl139090 */ 195925cf1a30Sjl139090 if (!(t_sfsr & SFSR_FV)) { 196025cf1a30Sjl139090 opl_flt.flt_type = OPL_CPU_INV_SFSR; 196125cf1a30Sjl139090 aflt->flt_panic = 1; 196225cf1a30Sjl139090 aflt->flt_payload = FM_EREPORT_PAYLOAD_SYNC; 1963e98fafb9Sjl139090 cpu_errorq_dispatch(FM_EREPORT_CPU_INV_SFSR, (void *)&opl_flt, 1964e98fafb9Sjl139090 sizeof (opl_async_flt_t), ue_queue, aflt->flt_panic); 196525cf1a30Sjl139090 fm_panic("%sErrors(s)", "invalid SFSR"); 196625cf1a30Sjl139090 } 196725cf1a30Sjl139090 196825cf1a30Sjl139090 /* 196925cf1a30Sjl139090 * If either UE and MK bit is off, this is not valid UE error. 197025cf1a30Sjl139090 * If it is not valid UE error, clear UE & MK_UE bits to prevent 197125cf1a30Sjl139090 * mis-handling below. 197225cf1a30Sjl139090 * aflt->flt_stat keeps the original bits as a reference. 197325cf1a30Sjl139090 */ 197425cf1a30Sjl139090 if ((t_sfsr & (SFSR_MK_UE|SFSR_UE)) != 197525cf1a30Sjl139090 (SFSR_MK_UE|SFSR_UE)) { 197625cf1a30Sjl139090 t_sfsr &= ~(SFSR_MK_UE|SFSR_UE); 197725cf1a30Sjl139090 } 197825cf1a30Sjl139090 197925cf1a30Sjl139090 /* 198025cf1a30Sjl139090 * If the trap occurred in privileged mode at TL=0, we need to check to 198125cf1a30Sjl139090 * see if we were executing in the kernel under on_trap() or t_lofault 198225cf1a30Sjl139090 * protection. If so, modify the saved registers so that we return 198325cf1a30Sjl139090 * from the trap to the appropriate trampoline routine. 198425cf1a30Sjl139090 */ 198525cf1a30Sjl139090 if (!aflt->flt_panic && aflt->flt_priv && tl == 0) { 198625cf1a30Sjl139090 if (curthread->t_ontrap != NULL) { 198725cf1a30Sjl139090 on_trap_data_t *otp = curthread->t_ontrap; 198825cf1a30Sjl139090 198925cf1a30Sjl139090 if (otp->ot_prot & OT_DATA_EC) { 199025cf1a30Sjl139090 aflt->flt_prot = (uchar_t)AFLT_PROT_EC; 199125cf1a30Sjl139090 otp->ot_trap |= (ushort_t)OT_DATA_EC; 199225cf1a30Sjl139090 rp->r_pc = otp->ot_trampoline; 199325cf1a30Sjl139090 rp->r_npc = rp->r_pc + 4; 199425cf1a30Sjl139090 trampolined = 1; 199525cf1a30Sjl139090 } 199625cf1a30Sjl139090 199725cf1a30Sjl139090 if ((t_sfsr & (SFSR_TO | SFSR_BERR)) && 199825cf1a30Sjl139090 (otp->ot_prot & OT_DATA_ACCESS)) { 199925cf1a30Sjl139090 aflt->flt_prot = (uchar_t)AFLT_PROT_ACCESS; 200025cf1a30Sjl139090 otp->ot_trap |= (ushort_t)OT_DATA_ACCESS; 200125cf1a30Sjl139090 rp->r_pc = otp->ot_trampoline; 200225cf1a30Sjl139090 rp->r_npc = rp->r_pc + 4; 200325cf1a30Sjl139090 trampolined = 1; 200425cf1a30Sjl139090 /* 200525cf1a30Sjl139090 * for peeks and caut_gets errors are expected 200625cf1a30Sjl139090 */ 200725cf1a30Sjl139090 hp = (ddi_acc_hdl_t *)otp->ot_handle; 200825cf1a30Sjl139090 if (!hp) 200925cf1a30Sjl139090 expected = DDI_FM_ERR_PEEK; 201025cf1a30Sjl139090 else if (hp->ah_acc.devacc_attr_access == 201125cf1a30Sjl139090 DDI_CAUTIOUS_ACC) 201225cf1a30Sjl139090 expected = DDI_FM_ERR_EXPECTED; 201325cf1a30Sjl139090 } 201425cf1a30Sjl139090 201525cf1a30Sjl139090 } else if (curthread->t_lofault) { 201625cf1a30Sjl139090 aflt->flt_prot = AFLT_PROT_COPY; 201725cf1a30Sjl139090 rp->r_g1 = EFAULT; 201825cf1a30Sjl139090 rp->r_pc = curthread->t_lofault; 201925cf1a30Sjl139090 rp->r_npc = rp->r_pc + 4; 202025cf1a30Sjl139090 trampolined = 1; 202125cf1a30Sjl139090 } 202225cf1a30Sjl139090 } 202325cf1a30Sjl139090 202425cf1a30Sjl139090 /* 202525cf1a30Sjl139090 * If we're in user mode or we're doing a protected copy, we either 202625cf1a30Sjl139090 * want the ASTON code below to send a signal to the user process 202725cf1a30Sjl139090 * or we want to panic if aft_panic is set. 202825cf1a30Sjl139090 * 202925cf1a30Sjl139090 * If we're in privileged mode and we're not doing a copy, then we 203025cf1a30Sjl139090 * need to check if we've trampolined. If we haven't trampolined, 203125cf1a30Sjl139090 * we should panic. 203225cf1a30Sjl139090 */ 203325cf1a30Sjl139090 if (!aflt->flt_priv || aflt->flt_prot == AFLT_PROT_COPY) { 203425cf1a30Sjl139090 if (t_sfsr & (SFSR_ERRS & ~(SFSR_BERR | SFSR_TO))) 203525cf1a30Sjl139090 aflt->flt_panic |= aft_panic; 203625cf1a30Sjl139090 } else if (!trampolined) { 203725cf1a30Sjl139090 aflt->flt_panic = 1; 203825cf1a30Sjl139090 } 203925cf1a30Sjl139090 204025cf1a30Sjl139090 /* 204125cf1a30Sjl139090 * If we've trampolined due to a privileged TO or BERR, or if an 204225cf1a30Sjl139090 * unprivileged TO or BERR occurred, we don't want to enqueue an 204325cf1a30Sjl139090 * event for that TO or BERR. Queue all other events (if any) besides 204425cf1a30Sjl139090 * the TO/BERR. 204525cf1a30Sjl139090 */ 204625cf1a30Sjl139090 log_sfsr = t_sfsr; 204725cf1a30Sjl139090 if (trampolined) { 204825cf1a30Sjl139090 log_sfsr &= ~(SFSR_TO | SFSR_BERR); 204925cf1a30Sjl139090 } else if (!aflt->flt_priv) { 205025cf1a30Sjl139090 /* 205125cf1a30Sjl139090 * User mode, suppress messages if 205225cf1a30Sjl139090 * cpu_berr_to_verbose is not set. 205325cf1a30Sjl139090 */ 205425cf1a30Sjl139090 if (!cpu_berr_to_verbose) 205525cf1a30Sjl139090 log_sfsr &= ~(SFSR_TO | SFSR_BERR); 205625cf1a30Sjl139090 } 205725cf1a30Sjl139090 2058e98fafb9Sjl139090 if (((log_sfsr & SFSR_ERRS) && (cpu_queue_events(&opl_flt, pr_reason, 2059e98fafb9Sjl139090 t_sfsr) == 0)) || ((t_sfsr & SFSR_ERRS) == 0)) { 206025cf1a30Sjl139090 opl_flt.flt_type = OPL_CPU_INV_SFSR; 206125cf1a30Sjl139090 aflt->flt_payload = FM_EREPORT_PAYLOAD_SYNC; 2062e98fafb9Sjl139090 cpu_errorq_dispatch(FM_EREPORT_CPU_INV_SFSR, (void *)&opl_flt, 2063e98fafb9Sjl139090 sizeof (opl_async_flt_t), ue_queue, aflt->flt_panic); 206425cf1a30Sjl139090 } 206525cf1a30Sjl139090 206625cf1a30Sjl139090 if (t_sfsr & (SFSR_UE|SFSR_TO|SFSR_BERR)) { 206725cf1a30Sjl139090 cpu_run_bus_error_handlers(aflt, expected); 206825cf1a30Sjl139090 } 206925cf1a30Sjl139090 207025cf1a30Sjl139090 /* 207125cf1a30Sjl139090 * Panic here if aflt->flt_panic has been set. Enqueued errors will 207225cf1a30Sjl139090 * be logged as part of the panic flow. 207325cf1a30Sjl139090 */ 207425cf1a30Sjl139090 if (aflt->flt_panic) { 207525cf1a30Sjl139090 if (pr_reason[0] == 0) 207625cf1a30Sjl139090 strcpy(pr_reason, "invalid SFSR "); 207725cf1a30Sjl139090 207825cf1a30Sjl139090 fm_panic("%sErrors(s)", pr_reason); 207925cf1a30Sjl139090 } 208025cf1a30Sjl139090 208125cf1a30Sjl139090 /* 208225cf1a30Sjl139090 * If we queued an error and we are going to return from the trap and 208325cf1a30Sjl139090 * the error was in user mode or inside of a copy routine, set AST flag 208425cf1a30Sjl139090 * so the queue will be drained before returning to user mode. The 208525cf1a30Sjl139090 * AST processing will also act on our failure policy. 208625cf1a30Sjl139090 */ 208725cf1a30Sjl139090 if (!aflt->flt_priv || aflt->flt_prot == AFLT_PROT_COPY) { 208825cf1a30Sjl139090 int pcb_flag = 0; 208925cf1a30Sjl139090 2090e98fafb9Sjl139090 if (t_sfsr & (SFSR_ERRS & ~(SFSR_BERR | SFSR_TO))) 209125cf1a30Sjl139090 pcb_flag |= ASYNC_HWERR; 209225cf1a30Sjl139090 209325cf1a30Sjl139090 if (t_sfsr & SFSR_BERR) 209425cf1a30Sjl139090 pcb_flag |= ASYNC_BERR; 209525cf1a30Sjl139090 209625cf1a30Sjl139090 if (t_sfsr & SFSR_TO) 209725cf1a30Sjl139090 pcb_flag |= ASYNC_BTO; 209825cf1a30Sjl139090 209925cf1a30Sjl139090 ttolwp(curthread)->lwp_pcb.pcb_flags |= pcb_flag; 210025cf1a30Sjl139090 aston(curthread); 210125cf1a30Sjl139090 } 210225cf1a30Sjl139090 } 210325cf1a30Sjl139090 210425cf1a30Sjl139090 /*ARGSUSED*/ 210525cf1a30Sjl139090 void 210625cf1a30Sjl139090 opl_cpu_urgent_error(struct regs *rp, ulong_t p_ugesr, ulong_t tl) 210725cf1a30Sjl139090 { 210825cf1a30Sjl139090 opl_async_flt_t opl_flt; 210925cf1a30Sjl139090 struct async_flt *aflt; 211025cf1a30Sjl139090 char pr_reason[MAX_REASON_STRING]; 211125cf1a30Sjl139090 211225cf1a30Sjl139090 /* normalize tl */ 211325cf1a30Sjl139090 tl = (tl >= 2 ? 1 : 0); 211425cf1a30Sjl139090 pr_reason[0] = '\0'; 211525cf1a30Sjl139090 211625cf1a30Sjl139090 bzero(&opl_flt, sizeof (opl_async_flt_t)); 211725cf1a30Sjl139090 aflt = (struct async_flt *)&opl_flt; 211825cf1a30Sjl139090 aflt->flt_id = gethrtime_waitfree(); 211925cf1a30Sjl139090 aflt->flt_bus_id = getprocessorid(); 212025cf1a30Sjl139090 aflt->flt_inst = CPU->cpu_id; 212125cf1a30Sjl139090 aflt->flt_stat = p_ugesr; 212225cf1a30Sjl139090 aflt->flt_pc = (caddr_t)rp->r_pc; 212325cf1a30Sjl139090 aflt->flt_class = (uchar_t)CPU_FAULT; 212425cf1a30Sjl139090 aflt->flt_tl = tl; 2125e98fafb9Sjl139090 aflt->flt_priv = (uchar_t)(tl == 1 ? 1 : ((rp->r_tstate & TSTATE_PRIV) ? 2126e98fafb9Sjl139090 1 : 0)); 212725cf1a30Sjl139090 aflt->flt_status = OPL_ECC_URGENT_TRAP; 212825cf1a30Sjl139090 aflt->flt_panic = 1; 212925cf1a30Sjl139090 /* 213025cf1a30Sjl139090 * HW does not set mod/sid in case of urgent error. 213125cf1a30Sjl139090 * So we have to set it here. 213225cf1a30Sjl139090 */ 213325cf1a30Sjl139090 opl_flt.flt_eid_mod = OPL_ERRID_CPU; 213425cf1a30Sjl139090 opl_flt.flt_eid_sid = aflt->flt_inst; 213525cf1a30Sjl139090 213625cf1a30Sjl139090 if (cpu_queue_events(&opl_flt, pr_reason, p_ugesr) == 0) { 213725cf1a30Sjl139090 opl_flt.flt_type = OPL_CPU_INV_UGESR; 213825cf1a30Sjl139090 aflt->flt_payload = FM_EREPORT_PAYLOAD_URGENT; 2139e98fafb9Sjl139090 cpu_errorq_dispatch(FM_EREPORT_CPU_INV_URG, (void *)&opl_flt, 2140e98fafb9Sjl139090 sizeof (opl_async_flt_t), ue_queue, aflt->flt_panic); 214125cf1a30Sjl139090 } 214225cf1a30Sjl139090 214325cf1a30Sjl139090 fm_panic("Urgent Error"); 214425cf1a30Sjl139090 } 214525cf1a30Sjl139090 214625cf1a30Sjl139090 /* 214725cf1a30Sjl139090 * Initialization error counters resetting. 214825cf1a30Sjl139090 */ 214925cf1a30Sjl139090 /* ARGSUSED */ 215025cf1a30Sjl139090 static void 215125cf1a30Sjl139090 opl_ras_online(void *arg, cpu_t *cp, cyc_handler_t *hdlr, cyc_time_t *when) 215225cf1a30Sjl139090 { 215325cf1a30Sjl139090 hdlr->cyh_func = (cyc_func_t)ras_cntr_reset; 215425cf1a30Sjl139090 hdlr->cyh_level = CY_LOW_LEVEL; 215525cf1a30Sjl139090 hdlr->cyh_arg = (void *)(uintptr_t)cp->cpu_id; 215625cf1a30Sjl139090 215725cf1a30Sjl139090 when->cyt_when = cp->cpu_id * (((hrtime_t)NANOSEC * 10)/ NCPU); 215825cf1a30Sjl139090 when->cyt_interval = (hrtime_t)NANOSEC * opl_async_check_interval; 215925cf1a30Sjl139090 } 216025cf1a30Sjl139090 216125cf1a30Sjl139090 void 216225cf1a30Sjl139090 cpu_mp_init(void) 216325cf1a30Sjl139090 { 216425cf1a30Sjl139090 cyc_omni_handler_t hdlr; 216525cf1a30Sjl139090 216625cf1a30Sjl139090 hdlr.cyo_online = opl_ras_online; 216725cf1a30Sjl139090 hdlr.cyo_offline = NULL; 216825cf1a30Sjl139090 hdlr.cyo_arg = NULL; 216925cf1a30Sjl139090 mutex_enter(&cpu_lock); 217025cf1a30Sjl139090 (void) cyclic_add_omni(&hdlr); 217125cf1a30Sjl139090 mutex_exit(&cpu_lock); 217225cf1a30Sjl139090 } 217325cf1a30Sjl139090 2174ca1293cbSjimand int heaplp_use_stlb = 0; 21753cbfd4cfSjimand 217625cf1a30Sjl139090 void 217725cf1a30Sjl139090 mmu_init_kernel_pgsz(struct hat *hat) 217825cf1a30Sjl139090 { 21793cbfd4cfSjimand uint_t tte = page_szc(segkmem_lpsize); 21803cbfd4cfSjimand uchar_t new_cext_primary, new_cext_nucleus; 21813cbfd4cfSjimand 21823cbfd4cfSjimand if (heaplp_use_stlb == 0) { 21833cbfd4cfSjimand /* do not reprogram stlb */ 21843cbfd4cfSjimand tte = TTE8K; 2185febcc4a5Sjimand } else if (!plat_prom_preserve_kctx_is_supported()) { 2186febcc4a5Sjimand /* OBP does not support non-zero primary context */ 2187febcc4a5Sjimand tte = TTE8K; 2188febcc4a5Sjimand heaplp_use_stlb = 0; 21893cbfd4cfSjimand } 21903cbfd4cfSjimand 21913cbfd4cfSjimand new_cext_nucleus = TAGACCEXT_MKSZPAIR(tte, TTE8K); 21923cbfd4cfSjimand new_cext_primary = TAGACCEXT_MKSZPAIR(TTE8K, tte); 21933cbfd4cfSjimand 21943cbfd4cfSjimand hat->sfmmu_cext = new_cext_primary; 21953cbfd4cfSjimand kcontextreg = ((uint64_t)new_cext_nucleus << CTXREG_NEXT_SHIFT) | 21963cbfd4cfSjimand ((uint64_t)new_cext_primary << CTXREG_EXT_SHIFT); 219725cf1a30Sjl139090 } 219825cf1a30Sjl139090 219925cf1a30Sjl139090 size_t 220025cf1a30Sjl139090 mmu_get_kernel_lpsize(size_t lpsize) 220125cf1a30Sjl139090 { 220225cf1a30Sjl139090 uint_t tte; 220325cf1a30Sjl139090 220425cf1a30Sjl139090 if (lpsize == 0) { 220525cf1a30Sjl139090 /* no setting for segkmem_lpsize in /etc/system: use default */ 220625cf1a30Sjl139090 return (MMU_PAGESIZE4M); 220725cf1a30Sjl139090 } 220825cf1a30Sjl139090 220925cf1a30Sjl139090 for (tte = TTE8K; tte <= TTE4M; tte++) { 221025cf1a30Sjl139090 if (lpsize == TTEBYTES(tte)) 221125cf1a30Sjl139090 return (lpsize); 221225cf1a30Sjl139090 } 221325cf1a30Sjl139090 221425cf1a30Sjl139090 return (TTEBYTES(TTE8K)); 221525cf1a30Sjl139090 } 221625cf1a30Sjl139090 221725cf1a30Sjl139090 /* 221850eff769Smb158278 * Support for ta 3. 221950eff769Smb158278 * We allocate here a buffer for each cpu 222050eff769Smb158278 * for saving the current register window. 222150eff769Smb158278 */ 222250eff769Smb158278 typedef struct win_regs { 222350eff769Smb158278 uint64_t l[8]; 222450eff769Smb158278 uint64_t i[8]; 222550eff769Smb158278 } win_regs_t; 222650eff769Smb158278 static void 222750eff769Smb158278 opl_ta3(void) 222850eff769Smb158278 { 2229b9a675d4Smb158278 /* 2230b9a675d4Smb158278 * opl_ta3 should only be called once at boot time. 2231b9a675d4Smb158278 */ 2232b9a675d4Smb158278 if (opl_ta3_save == NULL) 2233b9a675d4Smb158278 opl_ta3_save = (char *)kmem_alloc(NCPU * sizeof (win_regs_t), 2234b9a675d4Smb158278 KM_SLEEP); 223550eff769Smb158278 } 223650eff769Smb158278 223750eff769Smb158278 /* 223825cf1a30Sjl139090 * The following are functions that are unused in 223925cf1a30Sjl139090 * OPL cpu module. They are defined here to resolve 224025cf1a30Sjl139090 * dependencies in the "unix" module. 224125cf1a30Sjl139090 * Unused functions that should never be called in 224225cf1a30Sjl139090 * OPL are coded with ASSERT(0). 224325cf1a30Sjl139090 */ 224425cf1a30Sjl139090 224525cf1a30Sjl139090 void 224625cf1a30Sjl139090 cpu_disable_errors(void) 224725cf1a30Sjl139090 {} 224825cf1a30Sjl139090 224925cf1a30Sjl139090 void 225025cf1a30Sjl139090 cpu_enable_errors(void) 225125cf1a30Sjl139090 { ASSERT(0); } 225225cf1a30Sjl139090 225325cf1a30Sjl139090 /*ARGSUSED*/ 225425cf1a30Sjl139090 void 225525cf1a30Sjl139090 cpu_ce_scrub_mem_err(struct async_flt *ecc, boolean_t t) 225625cf1a30Sjl139090 { ASSERT(0); } 225725cf1a30Sjl139090 225825cf1a30Sjl139090 /*ARGSUSED*/ 225925cf1a30Sjl139090 void 226025cf1a30Sjl139090 cpu_faulted_enter(struct cpu *cp) 226125cf1a30Sjl139090 {} 226225cf1a30Sjl139090 226325cf1a30Sjl139090 /*ARGSUSED*/ 226425cf1a30Sjl139090 void 226525cf1a30Sjl139090 cpu_faulted_exit(struct cpu *cp) 226625cf1a30Sjl139090 {} 226725cf1a30Sjl139090 226825cf1a30Sjl139090 /*ARGSUSED*/ 226925cf1a30Sjl139090 void 227025cf1a30Sjl139090 cpu_check_allcpus(struct async_flt *aflt) 227125cf1a30Sjl139090 {} 227225cf1a30Sjl139090 227325cf1a30Sjl139090 /*ARGSUSED*/ 227425cf1a30Sjl139090 void 227525cf1a30Sjl139090 cpu_ce_log_err(struct async_flt *aflt, errorq_elem_t *t) 227625cf1a30Sjl139090 { ASSERT(0); } 227725cf1a30Sjl139090 227825cf1a30Sjl139090 /*ARGSUSED*/ 227925cf1a30Sjl139090 void 228025cf1a30Sjl139090 cpu_check_ce(int flag, uint64_t pa, caddr_t va, uint_t psz) 228125cf1a30Sjl139090 { ASSERT(0); } 228225cf1a30Sjl139090 228325cf1a30Sjl139090 /*ARGSUSED*/ 228425cf1a30Sjl139090 void 228525cf1a30Sjl139090 cpu_ce_count_unum(struct async_flt *ecc, int len, char *unum) 228625cf1a30Sjl139090 { ASSERT(0); } 228725cf1a30Sjl139090 228825cf1a30Sjl139090 /*ARGSUSED*/ 228925cf1a30Sjl139090 void 229025cf1a30Sjl139090 cpu_busy_ecache_scrub(struct cpu *cp) 229125cf1a30Sjl139090 {} 229225cf1a30Sjl139090 229325cf1a30Sjl139090 /*ARGSUSED*/ 229425cf1a30Sjl139090 void 229525cf1a30Sjl139090 cpu_idle_ecache_scrub(struct cpu *cp) 229625cf1a30Sjl139090 {} 229725cf1a30Sjl139090 229825cf1a30Sjl139090 /* ARGSUSED */ 229925cf1a30Sjl139090 void 230025cf1a30Sjl139090 cpu_change_speed(uint64_t divisor, uint64_t arg2) 230125cf1a30Sjl139090 { ASSERT(0); } 230225cf1a30Sjl139090 230325cf1a30Sjl139090 void 230425cf1a30Sjl139090 cpu_init_cache_scrub(void) 230525cf1a30Sjl139090 {} 230625cf1a30Sjl139090 230725cf1a30Sjl139090 /* ARGSUSED */ 230825cf1a30Sjl139090 int 230925cf1a30Sjl139090 cpu_get_mem_sid(char *unum, char *buf, int buflen, int *lenp) 231025cf1a30Sjl139090 { 23110cc8ae86Sav145390 if (&plat_get_mem_sid) { 23120cc8ae86Sav145390 return (plat_get_mem_sid(unum, buf, buflen, lenp)); 23130cc8ae86Sav145390 } else { 231425cf1a30Sjl139090 return (ENOTSUP); 231525cf1a30Sjl139090 } 23160cc8ae86Sav145390 } 231725cf1a30Sjl139090 231825cf1a30Sjl139090 /* ARGSUSED */ 231925cf1a30Sjl139090 int 232025cf1a30Sjl139090 cpu_get_mem_addr(char *unum, char *sid, uint64_t offset, uint64_t *addrp) 232125cf1a30Sjl139090 { 23220cc8ae86Sav145390 if (&plat_get_mem_addr) { 23230cc8ae86Sav145390 return (plat_get_mem_addr(unum, sid, offset, addrp)); 23240cc8ae86Sav145390 } else { 232525cf1a30Sjl139090 return (ENOTSUP); 232625cf1a30Sjl139090 } 23270cc8ae86Sav145390 } 232825cf1a30Sjl139090 232925cf1a30Sjl139090 /* ARGSUSED */ 233025cf1a30Sjl139090 int 233125cf1a30Sjl139090 cpu_get_mem_offset(uint64_t flt_addr, uint64_t *offp) 233225cf1a30Sjl139090 { 23330cc8ae86Sav145390 if (&plat_get_mem_offset) { 23340cc8ae86Sav145390 return (plat_get_mem_offset(flt_addr, offp)); 23350cc8ae86Sav145390 } else { 233625cf1a30Sjl139090 return (ENOTSUP); 233725cf1a30Sjl139090 } 23380cc8ae86Sav145390 } 233925cf1a30Sjl139090 234025cf1a30Sjl139090 /*ARGSUSED*/ 234125cf1a30Sjl139090 void 234225cf1a30Sjl139090 itlb_rd_entry(uint_t entry, tte_t *tte, uint64_t *va_tag) 234325cf1a30Sjl139090 { ASSERT(0); } 234425cf1a30Sjl139090 234525cf1a30Sjl139090 /*ARGSUSED*/ 234625cf1a30Sjl139090 void 234725cf1a30Sjl139090 dtlb_rd_entry(uint_t entry, tte_t *tte, uint64_t *va_tag) 234825cf1a30Sjl139090 { ASSERT(0); } 2349f4b0f0a6Shyw 2350f4b0f0a6Shyw /*ARGSUSED*/ 2351f4b0f0a6Shyw void 2352f4b0f0a6Shyw read_ecc_data(struct async_flt *aflt, short verbose, short ce_err) 2353f4b0f0a6Shyw { ASSERT(0); } 2354f4b0f0a6Shyw 2355f4b0f0a6Shyw /*ARGSUSED*/ 2356f4b0f0a6Shyw int 2357f4b0f0a6Shyw ce_scrub_xdiag_recirc(struct async_flt *aflt, errorq_t *eqp, 2358f4b0f0a6Shyw errorq_elem_t *eqep, size_t afltoffset) 2359f4b0f0a6Shyw { 2360f4b0f0a6Shyw ASSERT(0); 2361f4b0f0a6Shyw return (0); 2362f4b0f0a6Shyw } 2363f4b0f0a6Shyw 2364f4b0f0a6Shyw /*ARGSUSED*/ 2365f4b0f0a6Shyw char * 2366f4b0f0a6Shyw flt_to_error_type(struct async_flt *aflt) 2367f4b0f0a6Shyw { 2368f4b0f0a6Shyw ASSERT(0); 2369f4b0f0a6Shyw return (NULL); 2370f4b0f0a6Shyw } 23711ba18ff1Sjimand 23721ba18ff1Sjimand #define PROM_SPARC64VII_MODE_PROPNAME "SPARC64-VII-mode" 23731ba18ff1Sjimand 23741ba18ff1Sjimand /* 23751ba18ff1Sjimand * Check for existence of OPL OBP property that indicates 23761ba18ff1Sjimand * SPARC64-VII support. By default, only enable Jupiter 23771ba18ff1Sjimand * features if the property is present. It will be 23781ba18ff1Sjimand * present in all-Jupiter domains by OBP if the domain has 23791ba18ff1Sjimand * been selected by the user on the system controller to 23801ba18ff1Sjimand * run in Jupiter mode. Basically, this OBP property must 23811ba18ff1Sjimand * be present to turn on the cpu_alljupiter flag. 23821ba18ff1Sjimand */ 23831ba18ff1Sjimand static int 23841ba18ff1Sjimand prom_SPARC64VII_support_enabled(void) 23851ba18ff1Sjimand { 23861ba18ff1Sjimand int val; 23871ba18ff1Sjimand 23881ba18ff1Sjimand return ((prom_getprop(prom_rootnode(), PROM_SPARC64VII_MODE_PROPNAME, 23891ba18ff1Sjimand (caddr_t)&val) == 0) ? 1 : 0); 23901ba18ff1Sjimand } 2391febcc4a5Sjimand 2392febcc4a5Sjimand #define PROM_KCTX_PRESERVED_PROPNAME "context0-page-size-preserved" 2393febcc4a5Sjimand 2394febcc4a5Sjimand /* 2395febcc4a5Sjimand * Check for existence of OPL OBP property that indicates support for 2396febcc4a5Sjimand * preserving Solaris kernel page sizes when entering OBP. We need to 2397febcc4a5Sjimand * check the prom tree since the ddi tree is not yet built when the 2398febcc4a5Sjimand * platform startup sequence is called. 2399febcc4a5Sjimand */ 2400febcc4a5Sjimand static int 2401febcc4a5Sjimand plat_prom_preserve_kctx_is_supported(void) 2402febcc4a5Sjimand { 2403febcc4a5Sjimand pnode_t pnode; 2404febcc4a5Sjimand int val; 2405febcc4a5Sjimand 2406febcc4a5Sjimand /* 2407febcc4a5Sjimand * Check for existence of context0-page-size-preserved property 2408febcc4a5Sjimand * in virtual-memory prom node. 2409febcc4a5Sjimand */ 2410febcc4a5Sjimand pnode = (pnode_t)prom_getphandle(prom_mmu_ihandle()); 2411febcc4a5Sjimand return ((prom_getprop(pnode, PROM_KCTX_PRESERVED_PROPNAME, 2412febcc4a5Sjimand (caddr_t)&val) == 0) ? 1 : 0); 2413febcc4a5Sjimand } 2414