17c478bd9Sstevel@tonic-gate /* 27c478bd9Sstevel@tonic-gate * CDDL HEADER START 37c478bd9Sstevel@tonic-gate * 47c478bd9Sstevel@tonic-gate * The contents of this file are subject to the terms of the 57c478bd9Sstevel@tonic-gate * Common Development and Distribution License, Version 1.0 only 67c478bd9Sstevel@tonic-gate * (the "License"). You may not use this file except in compliance 77c478bd9Sstevel@tonic-gate * with the License. 87c478bd9Sstevel@tonic-gate * 97c478bd9Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 107c478bd9Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 117c478bd9Sstevel@tonic-gate * See the License for the specific language governing permissions 127c478bd9Sstevel@tonic-gate * and limitations under the License. 137c478bd9Sstevel@tonic-gate * 147c478bd9Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 157c478bd9Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 167c478bd9Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 177c478bd9Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 187c478bd9Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 197c478bd9Sstevel@tonic-gate * 207c478bd9Sstevel@tonic-gate * CDDL HEADER END 217c478bd9Sstevel@tonic-gate */ 227c478bd9Sstevel@tonic-gate /* 237c478bd9Sstevel@tonic-gate * Copyright 2005 Sun Microsystems, Inc. All rights reserved. 247c478bd9Sstevel@tonic-gate * Use is subject to license terms. 257c478bd9Sstevel@tonic-gate */ 267c478bd9Sstevel@tonic-gate 277c478bd9Sstevel@tonic-gate #pragma ident "%Z%%M% %I% %E% SMI" 287c478bd9Sstevel@tonic-gate 297c478bd9Sstevel@tonic-gate #include <sys/types.h> 307c478bd9Sstevel@tonic-gate #include <sys/systm.h> 317c478bd9Sstevel@tonic-gate #include <sys/archsystm.h> 327c478bd9Sstevel@tonic-gate #include <sys/machparam.h> 337c478bd9Sstevel@tonic-gate #include <sys/machsystm.h> 347c478bd9Sstevel@tonic-gate #include <sys/cpu.h> 357c478bd9Sstevel@tonic-gate #include <sys/elf_SPARC.h> 367c478bd9Sstevel@tonic-gate #include <vm/hat_sfmmu.h> 377c478bd9Sstevel@tonic-gate #include <vm/page.h> 387c478bd9Sstevel@tonic-gate #include <sys/cpuvar.h> 397c478bd9Sstevel@tonic-gate #include <sys/spitregs.h> 407c478bd9Sstevel@tonic-gate #include <sys/async.h> 417c478bd9Sstevel@tonic-gate #include <sys/cmn_err.h> 427c478bd9Sstevel@tonic-gate #include <sys/debug.h> 437c478bd9Sstevel@tonic-gate #include <sys/dditypes.h> 447c478bd9Sstevel@tonic-gate #include <sys/sunddi.h> 457c478bd9Sstevel@tonic-gate #include <sys/cpu_module.h> 467c478bd9Sstevel@tonic-gate #include <sys/prom_debug.h> 477c478bd9Sstevel@tonic-gate #include <sys/vmsystm.h> 487c478bd9Sstevel@tonic-gate #include <sys/prom_plat.h> 497c478bd9Sstevel@tonic-gate #include <sys/sysmacros.h> 507c478bd9Sstevel@tonic-gate #include <sys/intreg.h> 517c478bd9Sstevel@tonic-gate #include <sys/machtrap.h> 527c478bd9Sstevel@tonic-gate #include <sys/ontrap.h> 537c478bd9Sstevel@tonic-gate #include <sys/ivintr.h> 547c478bd9Sstevel@tonic-gate #include <sys/atomic.h> 557c478bd9Sstevel@tonic-gate #include <sys/panic.h> 567c478bd9Sstevel@tonic-gate #include <sys/ndifm.h> 577c478bd9Sstevel@tonic-gate #include <sys/fm/protocol.h> 587c478bd9Sstevel@tonic-gate #include <sys/fm/util.h> 597c478bd9Sstevel@tonic-gate #include <sys/fm/cpu/UltraSPARC-II.h> 607c478bd9Sstevel@tonic-gate #include <sys/ddi.h> 617c478bd9Sstevel@tonic-gate #include <sys/ecc_kstat.h> 627c478bd9Sstevel@tonic-gate #include <sys/watchpoint.h> 637c478bd9Sstevel@tonic-gate #include <sys/dtrace.h> 647c478bd9Sstevel@tonic-gate #include <sys/errclassify.h> 657c478bd9Sstevel@tonic-gate 667c478bd9Sstevel@tonic-gate uchar_t *ctx_pgsz_array = NULL; 677c478bd9Sstevel@tonic-gate 687c478bd9Sstevel@tonic-gate /* 697c478bd9Sstevel@tonic-gate * Structure for the 8 byte ecache data dump and the associated AFSR state. 707c478bd9Sstevel@tonic-gate * There will be 8 of these structures used to dump an ecache line (64 bytes). 717c478bd9Sstevel@tonic-gate */ 727c478bd9Sstevel@tonic-gate typedef struct sf_ec_data_elm { 737c478bd9Sstevel@tonic-gate uint64_t ec_d8; 747c478bd9Sstevel@tonic-gate uint64_t ec_afsr; 757c478bd9Sstevel@tonic-gate } ec_data_t; 767c478bd9Sstevel@tonic-gate 777c478bd9Sstevel@tonic-gate /* 787c478bd9Sstevel@tonic-gate * Define spitfire (Ultra I/II) specific asynchronous error structure 797c478bd9Sstevel@tonic-gate */ 807c478bd9Sstevel@tonic-gate typedef struct spitfire_async_flt { 817c478bd9Sstevel@tonic-gate struct async_flt cmn_asyncflt; /* common - see sun4u/sys/async.h */ 827c478bd9Sstevel@tonic-gate ushort_t flt_type; /* types of faults - cpu specific */ 837c478bd9Sstevel@tonic-gate ec_data_t flt_ec_data[8]; /* for E$ or mem dump/state */ 847c478bd9Sstevel@tonic-gate uint64_t flt_ec_tag; /* E$ tag info */ 857c478bd9Sstevel@tonic-gate int flt_ec_lcnt; /* number of bad E$ lines */ 867c478bd9Sstevel@tonic-gate ushort_t flt_sdbh; /* UDBH reg */ 877c478bd9Sstevel@tonic-gate ushort_t flt_sdbl; /* UDBL reg */ 887c478bd9Sstevel@tonic-gate } spitf_async_flt; 897c478bd9Sstevel@tonic-gate 907c478bd9Sstevel@tonic-gate /* 917c478bd9Sstevel@tonic-gate * Prototypes for support routines in spitfire_asm.s: 927c478bd9Sstevel@tonic-gate */ 937c478bd9Sstevel@tonic-gate extern void flush_ecache(uint64_t physaddr, size_t size, size_t linesize); 947c478bd9Sstevel@tonic-gate extern uint64_t get_lsu(void); 957c478bd9Sstevel@tonic-gate extern void set_lsu(uint64_t ncc); 967c478bd9Sstevel@tonic-gate extern void get_ecache_dtag(uint32_t ecache_idx, uint64_t *data, uint64_t *tag, 977c478bd9Sstevel@tonic-gate uint64_t *oafsr, uint64_t *acc_afsr); 987c478bd9Sstevel@tonic-gate extern uint64_t check_ecache_line(uint32_t id, uint64_t *acc_afsr); 997c478bd9Sstevel@tonic-gate extern uint64_t get_ecache_tag(uint32_t id, uint64_t *nafsr, 1007c478bd9Sstevel@tonic-gate uint64_t *acc_afsr); 1017c478bd9Sstevel@tonic-gate extern uint64_t read_and_clear_afsr(); 1027c478bd9Sstevel@tonic-gate extern void write_ec_tag_parity(uint32_t id); 1037c478bd9Sstevel@tonic-gate extern void write_hb_ec_tag_parity(uint32_t id); 1047c478bd9Sstevel@tonic-gate 1057c478bd9Sstevel@tonic-gate /* 1067c478bd9Sstevel@tonic-gate * Spitfire module routines: 1077c478bd9Sstevel@tonic-gate */ 1087c478bd9Sstevel@tonic-gate static void cpu_async_log_err(void *flt); 1097c478bd9Sstevel@tonic-gate /*PRINTFLIKE6*/ 1107c478bd9Sstevel@tonic-gate static void cpu_aflt_log(int ce_code, int tagnum, spitf_async_flt *spflt, 1117c478bd9Sstevel@tonic-gate uint_t logflags, const char *endstr, const char *fmt, ...); 1127c478bd9Sstevel@tonic-gate 1137c478bd9Sstevel@tonic-gate static void cpu_read_paddr(struct async_flt *aflt, short verbose, short ce_err); 1147c478bd9Sstevel@tonic-gate static void cpu_ce_log_status(spitf_async_flt *spf_flt, char *unum); 1157c478bd9Sstevel@tonic-gate static void cpu_log_ecmem_info(spitf_async_flt *spf_flt); 1167c478bd9Sstevel@tonic-gate 1177c478bd9Sstevel@tonic-gate static void log_ce_err(struct async_flt *aflt, char *unum); 1187c478bd9Sstevel@tonic-gate static void log_ue_err(struct async_flt *aflt, char *unum); 1197c478bd9Sstevel@tonic-gate static void check_misc_err(spitf_async_flt *spf_flt); 1207c478bd9Sstevel@tonic-gate static ushort_t ecc_gen(uint_t high_bytes, uint_t low_bytes); 1217c478bd9Sstevel@tonic-gate static int check_ecc(struct async_flt *aflt); 1227c478bd9Sstevel@tonic-gate static uint_t get_cpu_status(uint64_t arg); 1237c478bd9Sstevel@tonic-gate static uint64_t clear_errors(spitf_async_flt *spf_flt, uint64_t *acc_afsr); 1247c478bd9Sstevel@tonic-gate static void scan_ecache(uint64_t *afar, ec_data_t *data, uint64_t *tag, 1257c478bd9Sstevel@tonic-gate int *m, uint64_t *afsr); 1267c478bd9Sstevel@tonic-gate static void ecache_kstat_init(struct cpu *cp); 1277c478bd9Sstevel@tonic-gate static void ecache_scrub_log(ec_data_t *ec_data, uint64_t ec_tag, 1287c478bd9Sstevel@tonic-gate uint64_t paddr, int mpb, uint64_t); 1297c478bd9Sstevel@tonic-gate static uint64_t ecache_scrub_misc_err(int, uint64_t); 1307c478bd9Sstevel@tonic-gate static void ecache_scrub_tag_err(uint64_t, uchar_t, uint32_t); 1317c478bd9Sstevel@tonic-gate static void ecache_page_retire(void *); 1327c478bd9Sstevel@tonic-gate static int ecc_kstat_update(kstat_t *ksp, int rw); 1337c478bd9Sstevel@tonic-gate static int ce_count_unum(int status, int len, char *unum); 1347c478bd9Sstevel@tonic-gate static void add_leaky_bucket_timeout(void); 1357c478bd9Sstevel@tonic-gate static int synd_to_synd_code(int synd_status, ushort_t synd); 1367c478bd9Sstevel@tonic-gate 1377c478bd9Sstevel@tonic-gate extern uint_t read_all_memscrub; 1387c478bd9Sstevel@tonic-gate extern void memscrub_run(void); 1397c478bd9Sstevel@tonic-gate 1407c478bd9Sstevel@tonic-gate static uchar_t isus2i; /* set if sabre */ 1417c478bd9Sstevel@tonic-gate static uchar_t isus2e; /* set if hummingbird */ 1427c478bd9Sstevel@tonic-gate 1437c478bd9Sstevel@tonic-gate /* 1447c478bd9Sstevel@tonic-gate * Default ecache mask and shift settings for Spitfire. If we detect a 1457c478bd9Sstevel@tonic-gate * different CPU implementation, we will modify these values at boot time. 1467c478bd9Sstevel@tonic-gate */ 1477c478bd9Sstevel@tonic-gate static uint64_t cpu_ec_tag_mask = S_ECTAG_MASK; 1487c478bd9Sstevel@tonic-gate static uint64_t cpu_ec_state_mask = S_ECSTATE_MASK; 1497c478bd9Sstevel@tonic-gate static uint64_t cpu_ec_par_mask = S_ECPAR_MASK; 1507c478bd9Sstevel@tonic-gate static int cpu_ec_par_shift = S_ECPAR_SHIFT; 1517c478bd9Sstevel@tonic-gate static int cpu_ec_tag_shift = S_ECTAG_SHIFT; 1527c478bd9Sstevel@tonic-gate static int cpu_ec_state_shift = S_ECSTATE_SHIFT; 1537c478bd9Sstevel@tonic-gate static uchar_t cpu_ec_state_exl = S_ECSTATE_EXL; 1547c478bd9Sstevel@tonic-gate static uchar_t cpu_ec_state_mod = S_ECSTATE_MOD; 1557c478bd9Sstevel@tonic-gate static uchar_t cpu_ec_state_shr = S_ECSTATE_SHR; 1567c478bd9Sstevel@tonic-gate static uchar_t cpu_ec_state_own = S_ECSTATE_OWN; 1577c478bd9Sstevel@tonic-gate 1587c478bd9Sstevel@tonic-gate /* 1597c478bd9Sstevel@tonic-gate * Default ecache state bits for Spitfire. These individual bits indicate if 1607c478bd9Sstevel@tonic-gate * the given line is in any of the valid or modified states, respectively. 1617c478bd9Sstevel@tonic-gate * Again, we modify these at boot if we detect a different CPU. 1627c478bd9Sstevel@tonic-gate */ 1637c478bd9Sstevel@tonic-gate static uchar_t cpu_ec_state_valid = S_ECSTATE_VALID; 1647c478bd9Sstevel@tonic-gate static uchar_t cpu_ec_state_dirty = S_ECSTATE_DIRTY; 1657c478bd9Sstevel@tonic-gate static uchar_t cpu_ec_parity = S_EC_PARITY; 1667c478bd9Sstevel@tonic-gate static uchar_t cpu_ec_state_parity = S_ECSTATE_PARITY; 1677c478bd9Sstevel@tonic-gate 1687c478bd9Sstevel@tonic-gate /* 1697c478bd9Sstevel@tonic-gate * This table is used to determine which bit(s) is(are) bad when an ECC 1707c478bd9Sstevel@tonic-gate * error occurrs. The array is indexed an 8-bit syndrome. The entries 1717c478bd9Sstevel@tonic-gate * of this array have the following semantics: 1727c478bd9Sstevel@tonic-gate * 1737c478bd9Sstevel@tonic-gate * 00-63 The number of the bad bit, when only one bit is bad. 1747c478bd9Sstevel@tonic-gate * 64 ECC bit C0 is bad. 1757c478bd9Sstevel@tonic-gate * 65 ECC bit C1 is bad. 1767c478bd9Sstevel@tonic-gate * 66 ECC bit C2 is bad. 1777c478bd9Sstevel@tonic-gate * 67 ECC bit C3 is bad. 1787c478bd9Sstevel@tonic-gate * 68 ECC bit C4 is bad. 1797c478bd9Sstevel@tonic-gate * 69 ECC bit C5 is bad. 1807c478bd9Sstevel@tonic-gate * 70 ECC bit C6 is bad. 1817c478bd9Sstevel@tonic-gate * 71 ECC bit C7 is bad. 1827c478bd9Sstevel@tonic-gate * 72 Two bits are bad. 1837c478bd9Sstevel@tonic-gate * 73 Three bits are bad. 1847c478bd9Sstevel@tonic-gate * 74 Four bits are bad. 1857c478bd9Sstevel@tonic-gate * 75 More than Four bits are bad. 1867c478bd9Sstevel@tonic-gate * 76 NO bits are bad. 1877c478bd9Sstevel@tonic-gate * Based on "Galaxy Memory Subsystem SPECIFICATION" rev 0.6, pg. 28. 1887c478bd9Sstevel@tonic-gate */ 1897c478bd9Sstevel@tonic-gate 1907c478bd9Sstevel@tonic-gate #define C0 64 1917c478bd9Sstevel@tonic-gate #define C1 65 1927c478bd9Sstevel@tonic-gate #define C2 66 1937c478bd9Sstevel@tonic-gate #define C3 67 1947c478bd9Sstevel@tonic-gate #define C4 68 1957c478bd9Sstevel@tonic-gate #define C5 69 1967c478bd9Sstevel@tonic-gate #define C6 70 1977c478bd9Sstevel@tonic-gate #define C7 71 1987c478bd9Sstevel@tonic-gate #define M2 72 1997c478bd9Sstevel@tonic-gate #define M3 73 2007c478bd9Sstevel@tonic-gate #define M4 74 2017c478bd9Sstevel@tonic-gate #define MX 75 2027c478bd9Sstevel@tonic-gate #define NA 76 2037c478bd9Sstevel@tonic-gate 2047c478bd9Sstevel@tonic-gate #define SYND_IS_SINGLE_BIT_DATA(synd_code) ((synd_code >= 0) && \ 2057c478bd9Sstevel@tonic-gate (synd_code < C0)) 2067c478bd9Sstevel@tonic-gate #define SYND_IS_SINGLE_BIT_CHK(synd_code) ((synd_code >= C0) && \ 2077c478bd9Sstevel@tonic-gate (synd_code <= C7)) 2087c478bd9Sstevel@tonic-gate 2097c478bd9Sstevel@tonic-gate static char ecc_syndrome_tab[] = 2107c478bd9Sstevel@tonic-gate { 2117c478bd9Sstevel@tonic-gate NA, C0, C1, M2, C2, M2, M2, M3, C3, M2, M2, M3, M2, M3, M3, M4, 2127c478bd9Sstevel@tonic-gate C4, M2, M2, 32, M2, 57, MX, M2, M2, 37, 49, M2, 40, M2, M2, 44, 2137c478bd9Sstevel@tonic-gate C5, M2, M2, 33, M2, 61, 4, M2, M2, MX, 53, M2, 45, M2, M2, 41, 2147c478bd9Sstevel@tonic-gate M2, 0, 1, M2, 10, M2, M2, MX, 15, M2, M2, MX, M2, M3, M3, M2, 2157c478bd9Sstevel@tonic-gate C6, M2, M2, 42, M2, 59, 39, M2, M2, MX, 51, M2, 34, M2, M2, 46, 2167c478bd9Sstevel@tonic-gate M2, 25, 29, M2, 27, M4, M2, MX, 31, M2, M4, MX, M2, MX, MX, M2, 2177c478bd9Sstevel@tonic-gate M2, MX, 36, M2, 7, M2, M2, 54, MX, M2, M2, 62, M2, 48, 56, M2, 2187c478bd9Sstevel@tonic-gate M3, M2, M2, MX, M2, MX, 22, M2, M2, 18, MX, M2, M3, M2, M2, MX, 2197c478bd9Sstevel@tonic-gate C7, M2, M2, 47, M2, 63, MX, M2, M2, 6, 55, M2, 35, M2, M2, 43, 2207c478bd9Sstevel@tonic-gate M2, 5, MX, M2, MX, M2, M2, 50, 38, M2, M2, 58, M2, 52, 60, M2, 2217c478bd9Sstevel@tonic-gate M2, 17, 21, M2, 19, M4, M2, MX, 23, M2, M4, MX, M2, MX, MX, M2, 2227c478bd9Sstevel@tonic-gate M3, M2, M2, MX, M2, MX, 30, M2, M2, 26, MX, M2, M3, M2, M2, MX, 2237c478bd9Sstevel@tonic-gate M2, 8, 13, M2, 2, M2, M2, M3, 3, M2, M2, M3, M2, MX, MX, M2, 2247c478bd9Sstevel@tonic-gate M3, M2, M2, M3, M2, MX, 16, M2, M2, 20, MX, M2, MX, M2, M2, MX, 2257c478bd9Sstevel@tonic-gate M3, M2, M2, M3, M2, MX, 24, M2, M2, 28, MX, M2, MX, M2, M2, MX, 2267c478bd9Sstevel@tonic-gate M4, 12, 9, M2, 14, M2, M2, MX, 11, M2, M2, MX, M2, MX, MX, M4 2277c478bd9Sstevel@tonic-gate }; 2287c478bd9Sstevel@tonic-gate 2297c478bd9Sstevel@tonic-gate #define SYND_TBL_SIZE 256 2307c478bd9Sstevel@tonic-gate 2317c478bd9Sstevel@tonic-gate /* 2327c478bd9Sstevel@tonic-gate * Hack for determining UDBH/UDBL, for later cpu-specific error reporting. 2337c478bd9Sstevel@tonic-gate * Cannot use bit 3 in afar, because it is a valid bit on a Sabre/Hummingbird. 2347c478bd9Sstevel@tonic-gate */ 2357c478bd9Sstevel@tonic-gate #define UDBL_REG 0x8000 2367c478bd9Sstevel@tonic-gate #define UDBL(synd) ((synd & UDBL_REG) >> 15) 2377c478bd9Sstevel@tonic-gate #define SYND(synd) (synd & 0x7FFF) 2387c478bd9Sstevel@tonic-gate 2397c478bd9Sstevel@tonic-gate /* 2407c478bd9Sstevel@tonic-gate * These error types are specific to Spitfire and are used internally for the 2417c478bd9Sstevel@tonic-gate * spitfire fault structure flt_type field. 2427c478bd9Sstevel@tonic-gate */ 2437c478bd9Sstevel@tonic-gate #define CPU_UE_ERR 0 /* uncorrectable errors - UEs */ 2447c478bd9Sstevel@tonic-gate #define CPU_EDP_LDP_ERR 1 /* LDP or EDP parity error */ 2457c478bd9Sstevel@tonic-gate #define CPU_WP_ERR 2 /* WP parity error */ 2467c478bd9Sstevel@tonic-gate #define CPU_BTO_BERR_ERR 3 /* bus timeout errors */ 2477c478bd9Sstevel@tonic-gate #define CPU_PANIC_CP_ERR 4 /* cp error from panic polling */ 2487c478bd9Sstevel@tonic-gate #define CPU_TRAPPING_CP_ERR 5 /* for sabre/hbird only, cp error */ 2497c478bd9Sstevel@tonic-gate #define CPU_BADLINE_CI_ERR 6 /* E$ clean_bad line when idle */ 2507c478bd9Sstevel@tonic-gate #define CPU_BADLINE_CB_ERR 7 /* E$ clean_bad line when busy */ 2517c478bd9Sstevel@tonic-gate #define CPU_BADLINE_DI_ERR 8 /* E$ dirty_bad line when idle */ 2527c478bd9Sstevel@tonic-gate #define CPU_BADLINE_DB_ERR 9 /* E$ dirty_bad line when busy */ 2537c478bd9Sstevel@tonic-gate #define CPU_ORPHAN_CP_ERR 10 /* Orphan CP error */ 2547c478bd9Sstevel@tonic-gate #define CPU_ECACHE_ADDR_PAR_ERR 11 /* Ecache Address parity error */ 2557c478bd9Sstevel@tonic-gate #define CPU_ECACHE_STATE_ERR 12 /* Ecache state error */ 2567c478bd9Sstevel@tonic-gate #define CPU_ECACHE_ETP_ETS_ERR 13 /* ETP set but ETS is zero */ 2577c478bd9Sstevel@tonic-gate #define CPU_ECACHE_TAG_ERR 14 /* Scrub the E$ tag, if state clean */ 2587c478bd9Sstevel@tonic-gate #define CPU_ADDITIONAL_ERR 15 /* Additional errors occurred */ 2597c478bd9Sstevel@tonic-gate 2607c478bd9Sstevel@tonic-gate /* 2617c478bd9Sstevel@tonic-gate * Macro to access the "Spitfire cpu private" data structure. 2627c478bd9Sstevel@tonic-gate */ 2637c478bd9Sstevel@tonic-gate #define CPU_PRIVATE_PTR(cp, x) (&(((spitfire_private_t *)CPU_PRIVATE(cp))->x)) 2647c478bd9Sstevel@tonic-gate 2657c478bd9Sstevel@tonic-gate /* 2667c478bd9Sstevel@tonic-gate * set to 0 to disable automatic retiring of pages on 2677c478bd9Sstevel@tonic-gate * DIMMs that have excessive soft errors 2687c478bd9Sstevel@tonic-gate */ 2697c478bd9Sstevel@tonic-gate int automatic_page_removal = 1; 2707c478bd9Sstevel@tonic-gate 2717c478bd9Sstevel@tonic-gate /* 2727c478bd9Sstevel@tonic-gate * Heuristic for figuring out which module to replace. 2737c478bd9Sstevel@tonic-gate * Relative likelihood that this P_SYND indicates that this module is bad. 2747c478bd9Sstevel@tonic-gate * We call it a "score", though, not a relative likelihood. 2757c478bd9Sstevel@tonic-gate * 2767c478bd9Sstevel@tonic-gate * Step 1. 2777c478bd9Sstevel@tonic-gate * Assign a score to each byte of P_SYND according to the following rules: 2787c478bd9Sstevel@tonic-gate * If no bits on (0x00) or all bits on (0xFF), then give it a 5. 2797c478bd9Sstevel@tonic-gate * If one bit on, give it a 95. 2807c478bd9Sstevel@tonic-gate * If seven bits on, give it a 10. 2817c478bd9Sstevel@tonic-gate * If two bits on: 2827c478bd9Sstevel@tonic-gate * in different nybbles, a 90 2837c478bd9Sstevel@tonic-gate * in same nybble, but unaligned, 85 2847c478bd9Sstevel@tonic-gate * in same nybble and as an aligned pair, 80 2857c478bd9Sstevel@tonic-gate * If six bits on, look at the bits that are off: 2867c478bd9Sstevel@tonic-gate * in same nybble and as an aligned pair, 15 2877c478bd9Sstevel@tonic-gate * in same nybble, but unaligned, 20 2887c478bd9Sstevel@tonic-gate * in different nybbles, a 25 2897c478bd9Sstevel@tonic-gate * If three bits on: 2907c478bd9Sstevel@tonic-gate * in diferent nybbles, no aligned pairs, 75 2917c478bd9Sstevel@tonic-gate * in diferent nybbles, one aligned pair, 70 2927c478bd9Sstevel@tonic-gate * in the same nybble, 65 2937c478bd9Sstevel@tonic-gate * If five bits on, look at the bits that are off: 2947c478bd9Sstevel@tonic-gate * in the same nybble, 30 2957c478bd9Sstevel@tonic-gate * in diferent nybbles, one aligned pair, 35 2967c478bd9Sstevel@tonic-gate * in diferent nybbles, no aligned pairs, 40 2977c478bd9Sstevel@tonic-gate * If four bits on: 2987c478bd9Sstevel@tonic-gate * all in one nybble, 45 2997c478bd9Sstevel@tonic-gate * as two aligned pairs, 50 3007c478bd9Sstevel@tonic-gate * one aligned pair, 55 3017c478bd9Sstevel@tonic-gate * no aligned pairs, 60 3027c478bd9Sstevel@tonic-gate * 3037c478bd9Sstevel@tonic-gate * Step 2: 3047c478bd9Sstevel@tonic-gate * Take the higher of the two scores (one for each byte) as the score 3057c478bd9Sstevel@tonic-gate * for the module. 3067c478bd9Sstevel@tonic-gate * 3077c478bd9Sstevel@tonic-gate * Print the score for each module, and field service should replace the 3087c478bd9Sstevel@tonic-gate * module with the highest score. 3097c478bd9Sstevel@tonic-gate */ 3107c478bd9Sstevel@tonic-gate 3117c478bd9Sstevel@tonic-gate /* 3127c478bd9Sstevel@tonic-gate * In the table below, the first row/column comment indicates the 3137c478bd9Sstevel@tonic-gate * number of bits on in that nybble; the second row/column comment is 3147c478bd9Sstevel@tonic-gate * the hex digit. 3157c478bd9Sstevel@tonic-gate */ 3167c478bd9Sstevel@tonic-gate 3177c478bd9Sstevel@tonic-gate static int 3187c478bd9Sstevel@tonic-gate p_synd_score_table[256] = { 3197c478bd9Sstevel@tonic-gate /* 0 1 1 2 1 2 2 3 1 2 2 3 2 3 3 4 */ 3207c478bd9Sstevel@tonic-gate /* 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, A, B, C, D, E, F */ 3217c478bd9Sstevel@tonic-gate /* 0 0 */ 5, 95, 95, 80, 95, 85, 85, 65, 95, 85, 85, 65, 80, 65, 65, 45, 3227c478bd9Sstevel@tonic-gate /* 1 1 */ 95, 90, 90, 70, 90, 75, 75, 55, 90, 75, 75, 55, 70, 55, 55, 30, 3237c478bd9Sstevel@tonic-gate /* 1 2 */ 95, 90, 90, 70, 90, 75, 75, 55, 90, 75, 75, 55, 70, 55, 55, 30, 3247c478bd9Sstevel@tonic-gate /* 2 3 */ 80, 70, 70, 50, 70, 55, 55, 35, 70, 55, 55, 35, 50, 35, 35, 15, 3257c478bd9Sstevel@tonic-gate /* 1 4 */ 95, 90, 90, 70, 90, 75, 75, 55, 90, 75, 75, 55, 70, 55, 55, 30, 3267c478bd9Sstevel@tonic-gate /* 2 5 */ 85, 75, 75, 55, 75, 60, 60, 40, 75, 60, 60, 40, 55, 40, 40, 20, 3277c478bd9Sstevel@tonic-gate /* 2 6 */ 85, 75, 75, 55, 75, 60, 60, 40, 75, 60, 60, 40, 55, 40, 40, 20, 3287c478bd9Sstevel@tonic-gate /* 3 7 */ 65, 55, 55, 35, 55, 40, 40, 25, 55, 40, 40, 25, 35, 25, 25, 10, 3297c478bd9Sstevel@tonic-gate /* 1 8 */ 95, 90, 90, 70, 90, 75, 75, 55, 90, 75, 75, 55, 70, 55, 55, 30, 3307c478bd9Sstevel@tonic-gate /* 2 9 */ 85, 75, 75, 55, 75, 60, 60, 40, 75, 60, 60, 40, 55, 40, 40, 20, 3317c478bd9Sstevel@tonic-gate /* 2 A */ 85, 75, 75, 55, 75, 60, 60, 40, 75, 60, 60, 40, 55, 40, 40, 20, 3327c478bd9Sstevel@tonic-gate /* 3 B */ 65, 55, 55, 35, 55, 40, 40, 25, 55, 40, 40, 25, 35, 25, 25, 10, 3337c478bd9Sstevel@tonic-gate /* 2 C */ 80, 70, 70, 50, 70, 55, 55, 35, 70, 55, 55, 35, 50, 35, 35, 15, 3347c478bd9Sstevel@tonic-gate /* 3 D */ 65, 55, 55, 35, 55, 40, 40, 25, 55, 40, 40, 25, 35, 25, 25, 10, 3357c478bd9Sstevel@tonic-gate /* 3 E */ 65, 55, 55, 35, 55, 40, 40, 25, 55, 40, 40, 25, 35, 25, 25, 10, 3367c478bd9Sstevel@tonic-gate /* 4 F */ 45, 30, 30, 15, 30, 20, 20, 10, 30, 20, 20, 10, 15, 10, 10, 5, 3377c478bd9Sstevel@tonic-gate }; 3387c478bd9Sstevel@tonic-gate 3397c478bd9Sstevel@tonic-gate int 3407c478bd9Sstevel@tonic-gate ecc_psynd_score(ushort_t p_synd) 3417c478bd9Sstevel@tonic-gate { 3427c478bd9Sstevel@tonic-gate int i, j, a, b; 3437c478bd9Sstevel@tonic-gate 3447c478bd9Sstevel@tonic-gate i = p_synd & 0xFF; 3457c478bd9Sstevel@tonic-gate j = (p_synd >> 8) & 0xFF; 3467c478bd9Sstevel@tonic-gate 3477c478bd9Sstevel@tonic-gate a = p_synd_score_table[i]; 3487c478bd9Sstevel@tonic-gate b = p_synd_score_table[j]; 3497c478bd9Sstevel@tonic-gate 3507c478bd9Sstevel@tonic-gate return (a > b ? a : b); 3517c478bd9Sstevel@tonic-gate } 3527c478bd9Sstevel@tonic-gate 3537c478bd9Sstevel@tonic-gate /* 3547c478bd9Sstevel@tonic-gate * Async Fault Logging 3557c478bd9Sstevel@tonic-gate * 3567c478bd9Sstevel@tonic-gate * To ease identifying, reading, and filtering async fault log messages, the 3577c478bd9Sstevel@tonic-gate * label [AFT#] is now prepended to each async fault message. These messages 3587c478bd9Sstevel@tonic-gate * and the logging rules are implemented by cpu_aflt_log(), below. 3597c478bd9Sstevel@tonic-gate * 3607c478bd9Sstevel@tonic-gate * [AFT0] - Tag for log messages that are associated with corrected ECC errors. 3617c478bd9Sstevel@tonic-gate * This includes both corrected ECC memory and ecache faults. 3627c478bd9Sstevel@tonic-gate * 3637c478bd9Sstevel@tonic-gate * [AFT1] - Tag for log messages that are not ECC corrected (i.e. everything 3647c478bd9Sstevel@tonic-gate * else except CE errors) with a priority of 1 (highest). This tag 3657c478bd9Sstevel@tonic-gate * is also used for panic messages that result from an async fault. 3667c478bd9Sstevel@tonic-gate * 3677c478bd9Sstevel@tonic-gate * [AFT2] - These are lower priority diagnostic messages for uncorrected ECC 3687c478bd9Sstevel@tonic-gate * [AFT3] or parity errors. For example, AFT2 is used for the actual dump 3697c478bd9Sstevel@tonic-gate * of the E-$ data and tags. 3707c478bd9Sstevel@tonic-gate * 3717c478bd9Sstevel@tonic-gate * In a non-DEBUG kernel, AFT > 1 logs will be sent to the system log but not 3727c478bd9Sstevel@tonic-gate * printed on the console. To send all AFT logs to both the log and the 3737c478bd9Sstevel@tonic-gate * console, set aft_verbose = 1. 3747c478bd9Sstevel@tonic-gate */ 3757c478bd9Sstevel@tonic-gate 3767c478bd9Sstevel@tonic-gate #define CPU_FLTCPU 0x0001 /* print flt_inst as a CPU id */ 3777c478bd9Sstevel@tonic-gate #define CPU_SPACE 0x0002 /* print flt_status (data or instr) */ 3787c478bd9Sstevel@tonic-gate #define CPU_ERRID 0x0004 /* print flt_id */ 3797c478bd9Sstevel@tonic-gate #define CPU_TL 0x0008 /* print flt_tl */ 3807c478bd9Sstevel@tonic-gate #define CPU_ERRID_FIRST 0x0010 /* print flt_id first in message */ 3817c478bd9Sstevel@tonic-gate #define CPU_AFSR 0x0020 /* print flt_stat as decoded %afsr */ 3827c478bd9Sstevel@tonic-gate #define CPU_AFAR 0x0040 /* print flt_addr as %afar */ 3837c478bd9Sstevel@tonic-gate #define CPU_AF_PSYND 0x0080 /* print flt_stat %afsr.PSYND */ 3847c478bd9Sstevel@tonic-gate #define CPU_AF_ETS 0x0100 /* print flt_stat %afsr.ETS */ 3857c478bd9Sstevel@tonic-gate #define CPU_UDBH 0x0200 /* print flt_sdbh and syndrome */ 3867c478bd9Sstevel@tonic-gate #define CPU_UDBL 0x0400 /* print flt_sdbl and syndrome */ 3877c478bd9Sstevel@tonic-gate #define CPU_FAULTPC 0x0800 /* print flt_pc */ 3887c478bd9Sstevel@tonic-gate #define CPU_SYND 0x1000 /* print flt_synd and unum */ 3897c478bd9Sstevel@tonic-gate 3907c478bd9Sstevel@tonic-gate #define CMN_LFLAGS (CPU_FLTCPU | CPU_SPACE | CPU_ERRID | CPU_TL | \ 3917c478bd9Sstevel@tonic-gate CPU_AFSR | CPU_AFAR | CPU_AF_PSYND | \ 3927c478bd9Sstevel@tonic-gate CPU_AF_ETS | CPU_UDBH | CPU_UDBL | \ 3937c478bd9Sstevel@tonic-gate CPU_FAULTPC) 3947c478bd9Sstevel@tonic-gate #define UE_LFLAGS (CMN_LFLAGS | CPU_SYND) 3957c478bd9Sstevel@tonic-gate #define CE_LFLAGS (UE_LFLAGS & ~CPU_UDBH & ~CPU_UDBL & ~CPU_TL & \ 3967c478bd9Sstevel@tonic-gate ~CPU_SPACE) 3977c478bd9Sstevel@tonic-gate #define PARERR_LFLAGS (CMN_LFLAGS) 3987c478bd9Sstevel@tonic-gate #define WP_LFLAGS (CMN_LFLAGS & ~CPU_SPACE & ~CPU_TL) 3997c478bd9Sstevel@tonic-gate #define CP_LFLAGS (CMN_LFLAGS & ~CPU_SPACE & ~CPU_TL & \ 4007c478bd9Sstevel@tonic-gate ~CPU_FLTCPU & ~CPU_FAULTPC) 4017c478bd9Sstevel@tonic-gate #define BERRTO_LFLAGS (CMN_LFLAGS) 4027c478bd9Sstevel@tonic-gate #define NO_LFLAGS (0) 4037c478bd9Sstevel@tonic-gate 4047c478bd9Sstevel@tonic-gate #define AFSR_FMTSTR0 "\020\1ME" 4057c478bd9Sstevel@tonic-gate #define AFSR_FMTSTR1 "\020\040PRIV\037ISAP\036ETP\035IVUE\034TO" \ 4067c478bd9Sstevel@tonic-gate "\033BERR\032LDP\031CP\030WP\027EDP\026UE\025CE" 4077c478bd9Sstevel@tonic-gate #define UDB_FMTSTR "\020\012UE\011CE" 4087c478bd9Sstevel@tonic-gate 4097c478bd9Sstevel@tonic-gate /* 4107c478bd9Sstevel@tonic-gate * Maximum number of contexts for Spitfire. 4117c478bd9Sstevel@tonic-gate */ 4127c478bd9Sstevel@tonic-gate #define MAX_NCTXS (1 << 13) 4137c478bd9Sstevel@tonic-gate 4147c478bd9Sstevel@tonic-gate /* 4157c478bd9Sstevel@tonic-gate * Save the cache bootup state for use when internal 4167c478bd9Sstevel@tonic-gate * caches are to be re-enabled after an error occurs. 4177c478bd9Sstevel@tonic-gate */ 4187c478bd9Sstevel@tonic-gate uint64_t cache_boot_state = 0; 4197c478bd9Sstevel@tonic-gate 4207c478bd9Sstevel@tonic-gate /* 4217c478bd9Sstevel@tonic-gate * PA[31:0] represent Displacement in UPA configuration space. 4227c478bd9Sstevel@tonic-gate */ 4237c478bd9Sstevel@tonic-gate uint_t root_phys_addr_lo_mask = 0xffffffff; 4247c478bd9Sstevel@tonic-gate 4257c478bd9Sstevel@tonic-gate /* 4267c478bd9Sstevel@tonic-gate * Spitfire legacy globals 4277c478bd9Sstevel@tonic-gate */ 4287c478bd9Sstevel@tonic-gate int itlb_entries; 4297c478bd9Sstevel@tonic-gate int dtlb_entries; 4307c478bd9Sstevel@tonic-gate 4317c478bd9Sstevel@tonic-gate void 4327c478bd9Sstevel@tonic-gate cpu_setup(void) 4337c478bd9Sstevel@tonic-gate { 4347c478bd9Sstevel@tonic-gate extern int page_retire_messages; 435db874c57Selowe extern int page_retire_first_ue; 4367c478bd9Sstevel@tonic-gate extern int at_flags; 4377c478bd9Sstevel@tonic-gate #if defined(SF_ERRATA_57) 4387c478bd9Sstevel@tonic-gate extern caddr_t errata57_limit; 4397c478bd9Sstevel@tonic-gate #endif 4407c478bd9Sstevel@tonic-gate extern int disable_text_largepages; 4417c478bd9Sstevel@tonic-gate extern int disable_initdata_largepages; 4427c478bd9Sstevel@tonic-gate 4437c478bd9Sstevel@tonic-gate cache |= (CACHE_VAC | CACHE_PTAG | CACHE_IOCOHERENT); 4447c478bd9Sstevel@tonic-gate 4457c478bd9Sstevel@tonic-gate at_flags = EF_SPARC_32PLUS | EF_SPARC_SUN_US1; 4467c478bd9Sstevel@tonic-gate 4477c478bd9Sstevel@tonic-gate /* 4487c478bd9Sstevel@tonic-gate * Spitfire isn't currently FMA-aware, so we have to enable the 449db874c57Selowe * page retirement messages. We also change the default policy 450db874c57Selowe * for UE retirement to allow clearing of transient errors. 4517c478bd9Sstevel@tonic-gate */ 4527c478bd9Sstevel@tonic-gate page_retire_messages = 1; 453db874c57Selowe page_retire_first_ue = 0; 4547c478bd9Sstevel@tonic-gate 4557c478bd9Sstevel@tonic-gate /* 4567c478bd9Sstevel@tonic-gate * save the cache bootup state. 4577c478bd9Sstevel@tonic-gate */ 4587c478bd9Sstevel@tonic-gate cache_boot_state = get_lsu() & (LSU_IC | LSU_DC); 4597c478bd9Sstevel@tonic-gate 4607c478bd9Sstevel@tonic-gate /* 4617c478bd9Sstevel@tonic-gate * Use the maximum number of contexts available for Spitfire unless 4627c478bd9Sstevel@tonic-gate * it has been tuned for debugging. 4637c478bd9Sstevel@tonic-gate * We are checking against 0 here since this value can be patched 4647c478bd9Sstevel@tonic-gate * while booting. It can not be patched via /etc/system since it 4657c478bd9Sstevel@tonic-gate * will be patched too late and thus cause the system to panic. 4667c478bd9Sstevel@tonic-gate */ 4677c478bd9Sstevel@tonic-gate if (nctxs == 0) 4687c478bd9Sstevel@tonic-gate nctxs = MAX_NCTXS; 4697c478bd9Sstevel@tonic-gate 4707c478bd9Sstevel@tonic-gate if (use_page_coloring) { 4717c478bd9Sstevel@tonic-gate do_pg_coloring = 1; 4727c478bd9Sstevel@tonic-gate if (use_virtual_coloring) 4737c478bd9Sstevel@tonic-gate do_virtual_coloring = 1; 4747c478bd9Sstevel@tonic-gate } 4757c478bd9Sstevel@tonic-gate 4767c478bd9Sstevel@tonic-gate /* 4777c478bd9Sstevel@tonic-gate * Tune pp_slots to use up to 1/8th of the tlb entries. 4787c478bd9Sstevel@tonic-gate */ 4797c478bd9Sstevel@tonic-gate pp_slots = MIN(8, MAXPP_SLOTS); 4807c478bd9Sstevel@tonic-gate 4817c478bd9Sstevel@tonic-gate /* 4827c478bd9Sstevel@tonic-gate * Block stores invalidate all pages of the d$ so pagecopy 4837c478bd9Sstevel@tonic-gate * et. al. do not need virtual translations with virtual 4847c478bd9Sstevel@tonic-gate * coloring taken into consideration. 4857c478bd9Sstevel@tonic-gate */ 4867c478bd9Sstevel@tonic-gate pp_consistent_coloring = 0; 4877c478bd9Sstevel@tonic-gate 4887c478bd9Sstevel@tonic-gate isa_list = 4897c478bd9Sstevel@tonic-gate "sparcv9+vis sparcv9 " 4907c478bd9Sstevel@tonic-gate "sparcv8plus+vis sparcv8plus " 4917c478bd9Sstevel@tonic-gate "sparcv8 sparcv8-fsmuld sparcv7 sparc"; 4927c478bd9Sstevel@tonic-gate 4937c478bd9Sstevel@tonic-gate cpu_hwcap_flags = AV_SPARC_VIS; 4947c478bd9Sstevel@tonic-gate 4957c478bd9Sstevel@tonic-gate /* 4967c478bd9Sstevel@tonic-gate * On Spitfire, there's a hole in the address space 4977c478bd9Sstevel@tonic-gate * that we must never map (the hardware only support 44-bits of 4987c478bd9Sstevel@tonic-gate * virtual address). Later CPUs are expected to have wider 4997c478bd9Sstevel@tonic-gate * supported address ranges. 5007c478bd9Sstevel@tonic-gate * 5017c478bd9Sstevel@tonic-gate * See address map on p23 of the UltraSPARC 1 user's manual. 5027c478bd9Sstevel@tonic-gate */ 5037c478bd9Sstevel@tonic-gate hole_start = (caddr_t)0x80000000000ull; 5047c478bd9Sstevel@tonic-gate hole_end = (caddr_t)0xfffff80000000000ull; 5057c478bd9Sstevel@tonic-gate 5067c478bd9Sstevel@tonic-gate /* 5077c478bd9Sstevel@tonic-gate * A spitfire call bug requires us to be a further 4Gbytes of 5087c478bd9Sstevel@tonic-gate * firewall from the spec. 5097c478bd9Sstevel@tonic-gate * 5107c478bd9Sstevel@tonic-gate * See Spitfire Errata #21 5117c478bd9Sstevel@tonic-gate */ 5127c478bd9Sstevel@tonic-gate hole_start = (caddr_t)((uintptr_t)hole_start - (1ul << 32)); 5137c478bd9Sstevel@tonic-gate hole_end = (caddr_t)((uintptr_t)hole_end + (1ul << 32)); 5147c478bd9Sstevel@tonic-gate 5157c478bd9Sstevel@tonic-gate /* 5167c478bd9Sstevel@tonic-gate * The kpm mapping window. 5177c478bd9Sstevel@tonic-gate * kpm_size: 5187c478bd9Sstevel@tonic-gate * The size of a single kpm range. 5197c478bd9Sstevel@tonic-gate * The overall size will be: kpm_size * vac_colors. 5207c478bd9Sstevel@tonic-gate * kpm_vbase: 5217c478bd9Sstevel@tonic-gate * The virtual start address of the kpm range within the kernel 5227c478bd9Sstevel@tonic-gate * virtual address space. kpm_vbase has to be kpm_size aligned. 5237c478bd9Sstevel@tonic-gate */ 5247c478bd9Sstevel@tonic-gate kpm_size = (size_t)(2ull * 1024 * 1024 * 1024 * 1024); /* 2TB */ 5257c478bd9Sstevel@tonic-gate kpm_size_shift = 41; 5267c478bd9Sstevel@tonic-gate kpm_vbase = (caddr_t)0xfffffa0000000000ull; /* 16EB - 6TB */ 5277c478bd9Sstevel@tonic-gate 5287c478bd9Sstevel@tonic-gate #if defined(SF_ERRATA_57) 5297c478bd9Sstevel@tonic-gate errata57_limit = (caddr_t)0x80000000ul; 5307c478bd9Sstevel@tonic-gate #endif 5317c478bd9Sstevel@tonic-gate 5327c478bd9Sstevel@tonic-gate /* 5337c478bd9Sstevel@tonic-gate * Allow only 8K, 64K and 4M pages for text by default. 5347c478bd9Sstevel@tonic-gate * Allow only 8K and 64K page for initialized data segments by 5357c478bd9Sstevel@tonic-gate * default. 5367c478bd9Sstevel@tonic-gate */ 5377c478bd9Sstevel@tonic-gate disable_text_largepages = (1 << TTE512K) | (1 << TTE32M) | 5387c478bd9Sstevel@tonic-gate (1 << TTE256M); 5397c478bd9Sstevel@tonic-gate disable_initdata_largepages = (1 << TTE512K) | (1 << TTE4M) | 5407c478bd9Sstevel@tonic-gate (1 << TTE32M) | (1 << TTE256M); 5417c478bd9Sstevel@tonic-gate } 5427c478bd9Sstevel@tonic-gate 5437c478bd9Sstevel@tonic-gate static int 544fa9e4066Sahrens getintprop(pnode_t node, char *name, int deflt) 5457c478bd9Sstevel@tonic-gate { 5467c478bd9Sstevel@tonic-gate int value; 5477c478bd9Sstevel@tonic-gate 5487c478bd9Sstevel@tonic-gate switch (prom_getproplen(node, name)) { 5497c478bd9Sstevel@tonic-gate case 0: 5507c478bd9Sstevel@tonic-gate value = 1; /* boolean properties */ 5517c478bd9Sstevel@tonic-gate break; 5527c478bd9Sstevel@tonic-gate 5537c478bd9Sstevel@tonic-gate case sizeof (int): 5547c478bd9Sstevel@tonic-gate (void) prom_getprop(node, name, (caddr_t)&value); 5557c478bd9Sstevel@tonic-gate break; 5567c478bd9Sstevel@tonic-gate 5577c478bd9Sstevel@tonic-gate default: 5587c478bd9Sstevel@tonic-gate value = deflt; 5597c478bd9Sstevel@tonic-gate break; 5607c478bd9Sstevel@tonic-gate } 5617c478bd9Sstevel@tonic-gate 5627c478bd9Sstevel@tonic-gate return (value); 5637c478bd9Sstevel@tonic-gate } 5647c478bd9Sstevel@tonic-gate 5657c478bd9Sstevel@tonic-gate /* 5667c478bd9Sstevel@tonic-gate * Set the magic constants of the implementation. 5677c478bd9Sstevel@tonic-gate */ 5687c478bd9Sstevel@tonic-gate void 569fa9e4066Sahrens cpu_fiximp(pnode_t dnode) 5707c478bd9Sstevel@tonic-gate { 5717c478bd9Sstevel@tonic-gate extern int vac_size, vac_shift; 5727c478bd9Sstevel@tonic-gate extern uint_t vac_mask; 5737c478bd9Sstevel@tonic-gate extern int dcache_line_mask; 5747c478bd9Sstevel@tonic-gate int i, a; 5757c478bd9Sstevel@tonic-gate static struct { 5767c478bd9Sstevel@tonic-gate char *name; 5777c478bd9Sstevel@tonic-gate int *var; 5787c478bd9Sstevel@tonic-gate } prop[] = { 5797c478bd9Sstevel@tonic-gate "dcache-size", &dcache_size, 5807c478bd9Sstevel@tonic-gate "dcache-line-size", &dcache_linesize, 5817c478bd9Sstevel@tonic-gate "icache-size", &icache_size, 5827c478bd9Sstevel@tonic-gate "icache-line-size", &icache_linesize, 5837c478bd9Sstevel@tonic-gate "ecache-size", &ecache_size, 5847c478bd9Sstevel@tonic-gate "ecache-line-size", &ecache_alignsize, 5857c478bd9Sstevel@tonic-gate "ecache-associativity", &ecache_associativity, 5867c478bd9Sstevel@tonic-gate "#itlb-entries", &itlb_entries, 5877c478bd9Sstevel@tonic-gate "#dtlb-entries", &dtlb_entries, 5887c478bd9Sstevel@tonic-gate }; 5897c478bd9Sstevel@tonic-gate 5907c478bd9Sstevel@tonic-gate for (i = 0; i < sizeof (prop) / sizeof (prop[0]); i++) { 5917c478bd9Sstevel@tonic-gate if ((a = getintprop(dnode, prop[i].name, -1)) != -1) { 5927c478bd9Sstevel@tonic-gate *prop[i].var = a; 5937c478bd9Sstevel@tonic-gate } 5947c478bd9Sstevel@tonic-gate } 5957c478bd9Sstevel@tonic-gate 5967c478bd9Sstevel@tonic-gate ecache_setsize = ecache_size / ecache_associativity; 5977c478bd9Sstevel@tonic-gate 5987c478bd9Sstevel@tonic-gate vac_size = S_VAC_SIZE; 5997c478bd9Sstevel@tonic-gate vac_mask = MMU_PAGEMASK & (vac_size - 1); 6007c478bd9Sstevel@tonic-gate i = 0; a = vac_size; 6017c478bd9Sstevel@tonic-gate while (a >>= 1) 6027c478bd9Sstevel@tonic-gate ++i; 6037c478bd9Sstevel@tonic-gate vac_shift = i; 6047c478bd9Sstevel@tonic-gate shm_alignment = vac_size; 6057c478bd9Sstevel@tonic-gate vac = 1; 6067c478bd9Sstevel@tonic-gate 6077c478bd9Sstevel@tonic-gate dcache_line_mask = (dcache_size - 1) & ~(dcache_linesize - 1); 6087c478bd9Sstevel@tonic-gate 6097c478bd9Sstevel@tonic-gate /* 6107c478bd9Sstevel@tonic-gate * UltraSPARC I & II have ecache sizes running 6117c478bd9Sstevel@tonic-gate * as follows: .25 MB, .5 MB, 1 MB, 2 MB, 4 MB 6127c478bd9Sstevel@tonic-gate * and 8 MB. Adjust the copyin/copyout limits 6137c478bd9Sstevel@tonic-gate * according to the cache size. The magic number 6147c478bd9Sstevel@tonic-gate * of VIS_COPY_THRESHOLD comes from the copyin/copyout code 6157c478bd9Sstevel@tonic-gate * and its floor of VIS_COPY_THRESHOLD bytes before it will use 6167c478bd9Sstevel@tonic-gate * VIS instructions. 6177c478bd9Sstevel@tonic-gate * 6187c478bd9Sstevel@tonic-gate * We assume that all CPUs on the system have the same size 6197c478bd9Sstevel@tonic-gate * ecache. We're also called very early in the game. 6207c478bd9Sstevel@tonic-gate * /etc/system will be parsed *after* we're called so 6217c478bd9Sstevel@tonic-gate * these values can be overwritten. 6227c478bd9Sstevel@tonic-gate */ 6237c478bd9Sstevel@tonic-gate 6247c478bd9Sstevel@tonic-gate hw_copy_limit_1 = VIS_COPY_THRESHOLD; 6257c478bd9Sstevel@tonic-gate if (ecache_size <= 524288) { 6267c478bd9Sstevel@tonic-gate hw_copy_limit_2 = VIS_COPY_THRESHOLD; 6277c478bd9Sstevel@tonic-gate hw_copy_limit_4 = VIS_COPY_THRESHOLD; 6287c478bd9Sstevel@tonic-gate hw_copy_limit_8 = VIS_COPY_THRESHOLD; 6297c478bd9Sstevel@tonic-gate } else if (ecache_size == 1048576) { 6307c478bd9Sstevel@tonic-gate hw_copy_limit_2 = 1024; 6317c478bd9Sstevel@tonic-gate hw_copy_limit_4 = 1280; 6327c478bd9Sstevel@tonic-gate hw_copy_limit_8 = 1536; 6337c478bd9Sstevel@tonic-gate } else if (ecache_size == 2097152) { 6347c478bd9Sstevel@tonic-gate hw_copy_limit_2 = 1536; 6357c478bd9Sstevel@tonic-gate hw_copy_limit_4 = 2048; 6367c478bd9Sstevel@tonic-gate hw_copy_limit_8 = 2560; 6377c478bd9Sstevel@tonic-gate } else if (ecache_size == 4194304) { 6387c478bd9Sstevel@tonic-gate hw_copy_limit_2 = 2048; 6397c478bd9Sstevel@tonic-gate hw_copy_limit_4 = 2560; 6407c478bd9Sstevel@tonic-gate hw_copy_limit_8 = 3072; 6417c478bd9Sstevel@tonic-gate } else { 6427c478bd9Sstevel@tonic-gate hw_copy_limit_2 = 2560; 6437c478bd9Sstevel@tonic-gate hw_copy_limit_4 = 3072; 6447c478bd9Sstevel@tonic-gate hw_copy_limit_8 = 3584; 6457c478bd9Sstevel@tonic-gate } 6467c478bd9Sstevel@tonic-gate } 6477c478bd9Sstevel@tonic-gate 6487c478bd9Sstevel@tonic-gate /* 6497c478bd9Sstevel@tonic-gate * Called by setcpudelay 6507c478bd9Sstevel@tonic-gate */ 6517c478bd9Sstevel@tonic-gate void 6527c478bd9Sstevel@tonic-gate cpu_init_tick_freq(void) 6537c478bd9Sstevel@tonic-gate { 6547c478bd9Sstevel@tonic-gate /* 6557c478bd9Sstevel@tonic-gate * Determine the cpu frequency by calling 6567c478bd9Sstevel@tonic-gate * tod_get_cpufrequency. Use an approximate freqency 6577c478bd9Sstevel@tonic-gate * value computed by the prom if the tod module 6587c478bd9Sstevel@tonic-gate * is not initialized and loaded yet. 6597c478bd9Sstevel@tonic-gate */ 6607c478bd9Sstevel@tonic-gate if (tod_ops.tod_get_cpufrequency != NULL) { 6617c478bd9Sstevel@tonic-gate mutex_enter(&tod_lock); 6627c478bd9Sstevel@tonic-gate sys_tick_freq = tod_ops.tod_get_cpufrequency(); 6637c478bd9Sstevel@tonic-gate mutex_exit(&tod_lock); 6647c478bd9Sstevel@tonic-gate } else { 6657c478bd9Sstevel@tonic-gate #if defined(HUMMINGBIRD) 6667c478bd9Sstevel@tonic-gate /* 6677c478bd9Sstevel@tonic-gate * the hummingbird version of %stick is used as the basis for 6687c478bd9Sstevel@tonic-gate * low level timing; this provides an independent constant-rate 6697c478bd9Sstevel@tonic-gate * clock for general system use, and frees power mgmt to set 6707c478bd9Sstevel@tonic-gate * various cpu clock speeds. 6717c478bd9Sstevel@tonic-gate */ 6727c478bd9Sstevel@tonic-gate if (system_clock_freq == 0) 6737c478bd9Sstevel@tonic-gate cmn_err(CE_PANIC, "invalid system_clock_freq 0x%lx", 6747c478bd9Sstevel@tonic-gate system_clock_freq); 6757c478bd9Sstevel@tonic-gate sys_tick_freq = system_clock_freq; 6767c478bd9Sstevel@tonic-gate #else /* SPITFIRE */ 6777c478bd9Sstevel@tonic-gate sys_tick_freq = cpunodes[CPU->cpu_id].clock_freq; 6787c478bd9Sstevel@tonic-gate #endif 6797c478bd9Sstevel@tonic-gate } 6807c478bd9Sstevel@tonic-gate } 6817c478bd9Sstevel@tonic-gate 6827c478bd9Sstevel@tonic-gate 6837c478bd9Sstevel@tonic-gate void shipit(int upaid); 6847c478bd9Sstevel@tonic-gate extern uint64_t xc_tick_limit; 6857c478bd9Sstevel@tonic-gate extern uint64_t xc_tick_jump_limit; 6867c478bd9Sstevel@tonic-gate 6877c478bd9Sstevel@tonic-gate #ifdef SEND_MONDO_STATS 6887c478bd9Sstevel@tonic-gate uint64_t x_early[NCPU][64]; 6897c478bd9Sstevel@tonic-gate #endif 6907c478bd9Sstevel@tonic-gate 6917c478bd9Sstevel@tonic-gate /* 6927c478bd9Sstevel@tonic-gate * Note: A version of this function is used by the debugger via the KDI, 6937c478bd9Sstevel@tonic-gate * and must be kept in sync with this version. Any changes made to this 6947c478bd9Sstevel@tonic-gate * function to support new chips or to accomodate errata must also be included 6957c478bd9Sstevel@tonic-gate * in the KDI-specific version. See spitfire_kdi.c. 6967c478bd9Sstevel@tonic-gate */ 6977c478bd9Sstevel@tonic-gate void 6987c478bd9Sstevel@tonic-gate send_one_mondo(int cpuid) 6997c478bd9Sstevel@tonic-gate { 7007c478bd9Sstevel@tonic-gate uint64_t idsr, starttick, endtick; 7017c478bd9Sstevel@tonic-gate int upaid, busy, nack; 7027c478bd9Sstevel@tonic-gate uint64_t tick, tick_prev; 7037c478bd9Sstevel@tonic-gate ulong_t ticks; 7047c478bd9Sstevel@tonic-gate 7057c478bd9Sstevel@tonic-gate CPU_STATS_ADDQ(CPU, sys, xcalls, 1); 7067c478bd9Sstevel@tonic-gate upaid = CPUID_TO_UPAID(cpuid); 7077c478bd9Sstevel@tonic-gate tick = starttick = gettick(); 7087c478bd9Sstevel@tonic-gate shipit(upaid); 7097c478bd9Sstevel@tonic-gate endtick = starttick + xc_tick_limit; 7107c478bd9Sstevel@tonic-gate busy = nack = 0; 7117c478bd9Sstevel@tonic-gate for (;;) { 7127c478bd9Sstevel@tonic-gate idsr = getidsr(); 7137c478bd9Sstevel@tonic-gate if (idsr == 0) 7147c478bd9Sstevel@tonic-gate break; 7157c478bd9Sstevel@tonic-gate /* 7167c478bd9Sstevel@tonic-gate * When we detect an irregular tick jump, we adjust 7177c478bd9Sstevel@tonic-gate * the timer window to the current tick value. 7187c478bd9Sstevel@tonic-gate */ 7197c478bd9Sstevel@tonic-gate tick_prev = tick; 7207c478bd9Sstevel@tonic-gate tick = gettick(); 7217c478bd9Sstevel@tonic-gate ticks = tick - tick_prev; 7227c478bd9Sstevel@tonic-gate if (ticks > xc_tick_jump_limit) { 7237c478bd9Sstevel@tonic-gate endtick = tick + xc_tick_limit; 7247c478bd9Sstevel@tonic-gate } else if (tick > endtick) { 7257c478bd9Sstevel@tonic-gate if (panic_quiesce) 7267c478bd9Sstevel@tonic-gate return; 7277c478bd9Sstevel@tonic-gate cmn_err(CE_PANIC, 7287c478bd9Sstevel@tonic-gate "send mondo timeout (target 0x%x) [%d NACK %d BUSY]", 7297c478bd9Sstevel@tonic-gate upaid, nack, busy); 7307c478bd9Sstevel@tonic-gate } 7317c478bd9Sstevel@tonic-gate if (idsr & IDSR_BUSY) { 7327c478bd9Sstevel@tonic-gate busy++; 7337c478bd9Sstevel@tonic-gate continue; 7347c478bd9Sstevel@tonic-gate } 7357c478bd9Sstevel@tonic-gate drv_usecwait(1); 7367c478bd9Sstevel@tonic-gate shipit(upaid); 7377c478bd9Sstevel@tonic-gate nack++; 7387c478bd9Sstevel@tonic-gate busy = 0; 7397c478bd9Sstevel@tonic-gate } 7407c478bd9Sstevel@tonic-gate #ifdef SEND_MONDO_STATS 7417c478bd9Sstevel@tonic-gate x_early[getprocessorid()][highbit(gettick() - starttick) - 1]++; 7427c478bd9Sstevel@tonic-gate #endif 7437c478bd9Sstevel@tonic-gate } 7447c478bd9Sstevel@tonic-gate 7457c478bd9Sstevel@tonic-gate void 7467c478bd9Sstevel@tonic-gate send_mondo_set(cpuset_t set) 7477c478bd9Sstevel@tonic-gate { 7487c478bd9Sstevel@tonic-gate int i; 7497c478bd9Sstevel@tonic-gate 7507c478bd9Sstevel@tonic-gate for (i = 0; i < NCPU; i++) 7517c478bd9Sstevel@tonic-gate if (CPU_IN_SET(set, i)) { 7527c478bd9Sstevel@tonic-gate send_one_mondo(i); 7537c478bd9Sstevel@tonic-gate CPUSET_DEL(set, i); 7547c478bd9Sstevel@tonic-gate if (CPUSET_ISNULL(set)) 7557c478bd9Sstevel@tonic-gate break; 7567c478bd9Sstevel@tonic-gate } 7577c478bd9Sstevel@tonic-gate } 7587c478bd9Sstevel@tonic-gate 7597c478bd9Sstevel@tonic-gate void 7607c478bd9Sstevel@tonic-gate syncfpu(void) 7617c478bd9Sstevel@tonic-gate { 7627c478bd9Sstevel@tonic-gate } 7637c478bd9Sstevel@tonic-gate 7647c478bd9Sstevel@tonic-gate /* 7657c478bd9Sstevel@tonic-gate * Determine the size of the CPU module's error structure in bytes. This is 7667c478bd9Sstevel@tonic-gate * called once during boot to initialize the error queues. 7677c478bd9Sstevel@tonic-gate */ 7687c478bd9Sstevel@tonic-gate int 7697c478bd9Sstevel@tonic-gate cpu_aflt_size(void) 7707c478bd9Sstevel@tonic-gate { 7717c478bd9Sstevel@tonic-gate /* 7727c478bd9Sstevel@tonic-gate * We need to determine whether this is a sabre, Hummingbird or a 7737c478bd9Sstevel@tonic-gate * Spitfire/Blackbird impl and set the appropriate state variables for 7747c478bd9Sstevel@tonic-gate * ecache tag manipulation. We can't do this in cpu_setup() as it is 7757c478bd9Sstevel@tonic-gate * too early in the boot flow and the cpunodes are not initialized. 7767c478bd9Sstevel@tonic-gate * This routine will be called once after cpunodes[] is ready, so do 7777c478bd9Sstevel@tonic-gate * it here. 7787c478bd9Sstevel@tonic-gate */ 7797c478bd9Sstevel@tonic-gate if (cpunodes[CPU->cpu_id].implementation == SABRE_IMPL) { 7807c478bd9Sstevel@tonic-gate isus2i = 1; 7817c478bd9Sstevel@tonic-gate cpu_ec_tag_mask = SB_ECTAG_MASK; 7827c478bd9Sstevel@tonic-gate cpu_ec_state_mask = SB_ECSTATE_MASK; 7837c478bd9Sstevel@tonic-gate cpu_ec_par_mask = SB_ECPAR_MASK; 7847c478bd9Sstevel@tonic-gate cpu_ec_par_shift = SB_ECPAR_SHIFT; 7857c478bd9Sstevel@tonic-gate cpu_ec_tag_shift = SB_ECTAG_SHIFT; 7867c478bd9Sstevel@tonic-gate cpu_ec_state_shift = SB_ECSTATE_SHIFT; 7877c478bd9Sstevel@tonic-gate cpu_ec_state_exl = SB_ECSTATE_EXL; 7887c478bd9Sstevel@tonic-gate cpu_ec_state_mod = SB_ECSTATE_MOD; 7897c478bd9Sstevel@tonic-gate 7907c478bd9Sstevel@tonic-gate /* These states do not exist in sabre - set to 0xFF */ 7917c478bd9Sstevel@tonic-gate cpu_ec_state_shr = 0xFF; 7927c478bd9Sstevel@tonic-gate cpu_ec_state_own = 0xFF; 7937c478bd9Sstevel@tonic-gate 7947c478bd9Sstevel@tonic-gate cpu_ec_state_valid = SB_ECSTATE_VALID; 7957c478bd9Sstevel@tonic-gate cpu_ec_state_dirty = SB_ECSTATE_DIRTY; 7967c478bd9Sstevel@tonic-gate cpu_ec_state_parity = SB_ECSTATE_PARITY; 7977c478bd9Sstevel@tonic-gate cpu_ec_parity = SB_EC_PARITY; 7987c478bd9Sstevel@tonic-gate } else if (cpunodes[CPU->cpu_id].implementation == HUMMBRD_IMPL) { 7997c478bd9Sstevel@tonic-gate isus2e = 1; 8007c478bd9Sstevel@tonic-gate cpu_ec_tag_mask = HB_ECTAG_MASK; 8017c478bd9Sstevel@tonic-gate cpu_ec_state_mask = HB_ECSTATE_MASK; 8027c478bd9Sstevel@tonic-gate cpu_ec_par_mask = HB_ECPAR_MASK; 8037c478bd9Sstevel@tonic-gate cpu_ec_par_shift = HB_ECPAR_SHIFT; 8047c478bd9Sstevel@tonic-gate cpu_ec_tag_shift = HB_ECTAG_SHIFT; 8057c478bd9Sstevel@tonic-gate cpu_ec_state_shift = HB_ECSTATE_SHIFT; 8067c478bd9Sstevel@tonic-gate cpu_ec_state_exl = HB_ECSTATE_EXL; 8077c478bd9Sstevel@tonic-gate cpu_ec_state_mod = HB_ECSTATE_MOD; 8087c478bd9Sstevel@tonic-gate 8097c478bd9Sstevel@tonic-gate /* These states do not exist in hummingbird - set to 0xFF */ 8107c478bd9Sstevel@tonic-gate cpu_ec_state_shr = 0xFF; 8117c478bd9Sstevel@tonic-gate cpu_ec_state_own = 0xFF; 8127c478bd9Sstevel@tonic-gate 8137c478bd9Sstevel@tonic-gate cpu_ec_state_valid = HB_ECSTATE_VALID; 8147c478bd9Sstevel@tonic-gate cpu_ec_state_dirty = HB_ECSTATE_DIRTY; 8157c478bd9Sstevel@tonic-gate cpu_ec_state_parity = HB_ECSTATE_PARITY; 8167c478bd9Sstevel@tonic-gate cpu_ec_parity = HB_EC_PARITY; 8177c478bd9Sstevel@tonic-gate } 8187c478bd9Sstevel@tonic-gate 8197c478bd9Sstevel@tonic-gate return (sizeof (spitf_async_flt)); 8207c478bd9Sstevel@tonic-gate } 8217c478bd9Sstevel@tonic-gate 8227c478bd9Sstevel@tonic-gate 8237c478bd9Sstevel@tonic-gate /* 8247c478bd9Sstevel@tonic-gate * Correctable ecc error trap handler 8257c478bd9Sstevel@tonic-gate */ 8267c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 8277c478bd9Sstevel@tonic-gate void 8287c478bd9Sstevel@tonic-gate cpu_ce_error(struct regs *rp, ulong_t p_afar, ulong_t p_afsr, 8297c478bd9Sstevel@tonic-gate uint_t p_afsr_high, uint_t p_afar_high) 8307c478bd9Sstevel@tonic-gate { 8317c478bd9Sstevel@tonic-gate ushort_t sdbh, sdbl; 8327c478bd9Sstevel@tonic-gate ushort_t e_syndh, e_syndl; 8337c478bd9Sstevel@tonic-gate spitf_async_flt spf_flt; 8347c478bd9Sstevel@tonic-gate struct async_flt *ecc; 8357c478bd9Sstevel@tonic-gate int queue = 1; 8367c478bd9Sstevel@tonic-gate 8377c478bd9Sstevel@tonic-gate uint64_t t_afar = p_afar; 8387c478bd9Sstevel@tonic-gate uint64_t t_afsr = p_afsr; 8397c478bd9Sstevel@tonic-gate 8407c478bd9Sstevel@tonic-gate /* 8417c478bd9Sstevel@tonic-gate * Note: the Spitfire data buffer error registers 8427c478bd9Sstevel@tonic-gate * (upper and lower halves) are or'ed into the upper 8437c478bd9Sstevel@tonic-gate * word of the afsr by ce_err(). 8447c478bd9Sstevel@tonic-gate */ 8457c478bd9Sstevel@tonic-gate sdbh = (ushort_t)((t_afsr >> 33) & 0x3FF); 8467c478bd9Sstevel@tonic-gate sdbl = (ushort_t)((t_afsr >> 43) & 0x3FF); 8477c478bd9Sstevel@tonic-gate 8487c478bd9Sstevel@tonic-gate e_syndh = (uchar_t)(sdbh & (uint_t)P_DER_E_SYND); 8497c478bd9Sstevel@tonic-gate e_syndl = (uchar_t)(sdbl & (uint_t)P_DER_E_SYND); 8507c478bd9Sstevel@tonic-gate 8517c478bd9Sstevel@tonic-gate t_afsr &= S_AFSR_MASK; 8527c478bd9Sstevel@tonic-gate t_afar &= SABRE_AFAR_PA; /* must use Sabre AFAR mask */ 8537c478bd9Sstevel@tonic-gate 8547c478bd9Sstevel@tonic-gate /* Setup the async fault structure */ 8557c478bd9Sstevel@tonic-gate bzero(&spf_flt, sizeof (spitf_async_flt)); 8567c478bd9Sstevel@tonic-gate ecc = (struct async_flt *)&spf_flt; 8577c478bd9Sstevel@tonic-gate ecc->flt_id = gethrtime_waitfree(); 8587c478bd9Sstevel@tonic-gate ecc->flt_stat = t_afsr; 8597c478bd9Sstevel@tonic-gate ecc->flt_addr = t_afar; 8607c478bd9Sstevel@tonic-gate ecc->flt_status = ECC_C_TRAP; 8617c478bd9Sstevel@tonic-gate ecc->flt_bus_id = getprocessorid(); 8627c478bd9Sstevel@tonic-gate ecc->flt_inst = CPU->cpu_id; 8637c478bd9Sstevel@tonic-gate ecc->flt_pc = (caddr_t)rp->r_pc; 8647c478bd9Sstevel@tonic-gate ecc->flt_func = log_ce_err; 8657c478bd9Sstevel@tonic-gate ecc->flt_in_memory = 8667c478bd9Sstevel@tonic-gate (pf_is_memory(ecc->flt_addr >> MMU_PAGESHIFT)) ? 1: 0; 8677c478bd9Sstevel@tonic-gate spf_flt.flt_sdbh = sdbh; 8687c478bd9Sstevel@tonic-gate spf_flt.flt_sdbl = sdbl; 8697c478bd9Sstevel@tonic-gate 8707c478bd9Sstevel@tonic-gate /* 8717c478bd9Sstevel@tonic-gate * Check for fatal conditions. 8727c478bd9Sstevel@tonic-gate */ 8737c478bd9Sstevel@tonic-gate check_misc_err(&spf_flt); 8747c478bd9Sstevel@tonic-gate 8757c478bd9Sstevel@tonic-gate /* 8767c478bd9Sstevel@tonic-gate * Pananoid checks for valid AFSR and UDBs 8777c478bd9Sstevel@tonic-gate */ 8787c478bd9Sstevel@tonic-gate if ((t_afsr & P_AFSR_CE) == 0) { 8797c478bd9Sstevel@tonic-gate cpu_aflt_log(CE_PANIC, 1, &spf_flt, CMN_LFLAGS, 8807c478bd9Sstevel@tonic-gate "** Panic due to CE bit not set in the AFSR", 8817c478bd9Sstevel@tonic-gate " Corrected Memory Error on"); 8827c478bd9Sstevel@tonic-gate } 8837c478bd9Sstevel@tonic-gate 8847c478bd9Sstevel@tonic-gate /* 8857c478bd9Sstevel@tonic-gate * We want to skip logging only if ALL the following 8867c478bd9Sstevel@tonic-gate * conditions are true: 8877c478bd9Sstevel@tonic-gate * 8887c478bd9Sstevel@tonic-gate * 1. There is only one error 8897c478bd9Sstevel@tonic-gate * 2. That error is a correctable memory error 8907c478bd9Sstevel@tonic-gate * 3. The error is caused by the memory scrubber (in which case 8917c478bd9Sstevel@tonic-gate * the error will have occurred under on_trap protection) 8927c478bd9Sstevel@tonic-gate * 4. The error is on a retired page 8937c478bd9Sstevel@tonic-gate * 8947c478bd9Sstevel@tonic-gate * Note: OT_DATA_EC is used places other than the memory scrubber. 8957c478bd9Sstevel@tonic-gate * However, none of those errors should occur on a retired page. 8967c478bd9Sstevel@tonic-gate */ 8977c478bd9Sstevel@tonic-gate if ((ecc->flt_stat & (S_AFSR_ALL_ERRS & ~P_AFSR_ME)) == P_AFSR_CE && 8987c478bd9Sstevel@tonic-gate curthread->t_ontrap != NULL) { 8997c478bd9Sstevel@tonic-gate 9007c478bd9Sstevel@tonic-gate if (curthread->t_ontrap->ot_prot & OT_DATA_EC) { 901db874c57Selowe if (page_retire_check(ecc->flt_addr, NULL) == 0) { 9027c478bd9Sstevel@tonic-gate queue = 0; 9037c478bd9Sstevel@tonic-gate } 9047c478bd9Sstevel@tonic-gate } 9057c478bd9Sstevel@tonic-gate } 9067c478bd9Sstevel@tonic-gate 9077c478bd9Sstevel@tonic-gate if (((sdbh & P_DER_CE) == 0) && ((sdbl & P_DER_CE) == 0)) { 9087c478bd9Sstevel@tonic-gate cpu_aflt_log(CE_PANIC, 1, &spf_flt, CMN_LFLAGS, 9097c478bd9Sstevel@tonic-gate "** Panic due to CE bits not set in the UDBs", 9107c478bd9Sstevel@tonic-gate " Corrected Memory Error on"); 9117c478bd9Sstevel@tonic-gate } 9127c478bd9Sstevel@tonic-gate 9137c478bd9Sstevel@tonic-gate if ((sdbh >> 8) & 1) { 9147c478bd9Sstevel@tonic-gate ecc->flt_synd = e_syndh; 9157c478bd9Sstevel@tonic-gate ce_scrub(ecc); 9167c478bd9Sstevel@tonic-gate if (queue) { 9177c478bd9Sstevel@tonic-gate cpu_errorq_dispatch(FM_EREPORT_CPU_USII_CE, ecc, 9187c478bd9Sstevel@tonic-gate sizeof (*ecc), ce_queue, ERRORQ_ASYNC); 9197c478bd9Sstevel@tonic-gate } 9207c478bd9Sstevel@tonic-gate } 9217c478bd9Sstevel@tonic-gate 9227c478bd9Sstevel@tonic-gate if ((sdbl >> 8) & 1) { 9237c478bd9Sstevel@tonic-gate ecc->flt_addr = t_afar | 0x8; /* Sabres do not have a UDBL */ 9247c478bd9Sstevel@tonic-gate ecc->flt_synd = e_syndl | UDBL_REG; 9257c478bd9Sstevel@tonic-gate ce_scrub(ecc); 9267c478bd9Sstevel@tonic-gate if (queue) { 9277c478bd9Sstevel@tonic-gate cpu_errorq_dispatch(FM_EREPORT_CPU_USII_CE, ecc, 9287c478bd9Sstevel@tonic-gate sizeof (*ecc), ce_queue, ERRORQ_ASYNC); 9297c478bd9Sstevel@tonic-gate } 9307c478bd9Sstevel@tonic-gate } 9317c478bd9Sstevel@tonic-gate 9327c478bd9Sstevel@tonic-gate /* 9337c478bd9Sstevel@tonic-gate * Re-enable all error trapping (CEEN currently cleared). 9347c478bd9Sstevel@tonic-gate */ 9357c478bd9Sstevel@tonic-gate clr_datapath(); 9367c478bd9Sstevel@tonic-gate set_asyncflt(P_AFSR_CE); 9377c478bd9Sstevel@tonic-gate set_error_enable(EER_ENABLE); 9387c478bd9Sstevel@tonic-gate } 9397c478bd9Sstevel@tonic-gate 9407c478bd9Sstevel@tonic-gate /* 9417c478bd9Sstevel@tonic-gate * Cpu specific CE logging routine 9427c478bd9Sstevel@tonic-gate */ 9437c478bd9Sstevel@tonic-gate static void 9447c478bd9Sstevel@tonic-gate log_ce_err(struct async_flt *aflt, char *unum) 9457c478bd9Sstevel@tonic-gate { 9467c478bd9Sstevel@tonic-gate spitf_async_flt spf_flt; 9477c478bd9Sstevel@tonic-gate 9487c478bd9Sstevel@tonic-gate if ((aflt->flt_stat & P_AFSR_CE) && (ce_verbose_memory == 0)) { 9497c478bd9Sstevel@tonic-gate return; 9507c478bd9Sstevel@tonic-gate } 9517c478bd9Sstevel@tonic-gate 9527c478bd9Sstevel@tonic-gate spf_flt.cmn_asyncflt = *aflt; 9537c478bd9Sstevel@tonic-gate cpu_aflt_log(CE_CONT, 0, &spf_flt, CE_LFLAGS, unum, 9547c478bd9Sstevel@tonic-gate " Corrected Memory Error detected by"); 9557c478bd9Sstevel@tonic-gate } 9567c478bd9Sstevel@tonic-gate 9577c478bd9Sstevel@tonic-gate /* 9587c478bd9Sstevel@tonic-gate * Spitfire does not perform any further CE classification refinement 9597c478bd9Sstevel@tonic-gate */ 9607c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 9617c478bd9Sstevel@tonic-gate int 9627c478bd9Sstevel@tonic-gate ce_scrub_xdiag_recirc(struct async_flt *ecc, errorq_t *eqp, errorq_elem_t *eqep, 9637c478bd9Sstevel@tonic-gate size_t afltoffset) 9647c478bd9Sstevel@tonic-gate { 9657c478bd9Sstevel@tonic-gate return (0); 9667c478bd9Sstevel@tonic-gate } 9677c478bd9Sstevel@tonic-gate 9687c478bd9Sstevel@tonic-gate char * 9697c478bd9Sstevel@tonic-gate flt_to_error_type(struct async_flt *aflt) 9707c478bd9Sstevel@tonic-gate { 9717c478bd9Sstevel@tonic-gate if (aflt->flt_status & ECC_INTERMITTENT) 9727c478bd9Sstevel@tonic-gate return (ERR_TYPE_DESC_INTERMITTENT); 9737c478bd9Sstevel@tonic-gate if (aflt->flt_status & ECC_PERSISTENT) 9747c478bd9Sstevel@tonic-gate return (ERR_TYPE_DESC_PERSISTENT); 9757c478bd9Sstevel@tonic-gate if (aflt->flt_status & ECC_STICKY) 9767c478bd9Sstevel@tonic-gate return (ERR_TYPE_DESC_STICKY); 9777c478bd9Sstevel@tonic-gate return (ERR_TYPE_DESC_UNKNOWN); 9787c478bd9Sstevel@tonic-gate } 9797c478bd9Sstevel@tonic-gate 9807c478bd9Sstevel@tonic-gate /* 9817c478bd9Sstevel@tonic-gate * Called by correctable ecc error logging code to print out 9827c478bd9Sstevel@tonic-gate * the stick/persistent/intermittent status of the error. 9837c478bd9Sstevel@tonic-gate */ 9847c478bd9Sstevel@tonic-gate static void 9857c478bd9Sstevel@tonic-gate cpu_ce_log_status(spitf_async_flt *spf_flt, char *unum) 9867c478bd9Sstevel@tonic-gate { 9877c478bd9Sstevel@tonic-gate ushort_t status; 9887c478bd9Sstevel@tonic-gate char *status1_str = "Memory"; 9897c478bd9Sstevel@tonic-gate char *status2_str = "Intermittent"; 9907c478bd9Sstevel@tonic-gate struct async_flt *aflt = (struct async_flt *)spf_flt; 9917c478bd9Sstevel@tonic-gate 9927c478bd9Sstevel@tonic-gate status = aflt->flt_status; 9937c478bd9Sstevel@tonic-gate 9947c478bd9Sstevel@tonic-gate if (status & ECC_ECACHE) 9957c478bd9Sstevel@tonic-gate status1_str = "Ecache"; 9967c478bd9Sstevel@tonic-gate 9977c478bd9Sstevel@tonic-gate if (status & ECC_STICKY) 9987c478bd9Sstevel@tonic-gate status2_str = "Sticky"; 9997c478bd9Sstevel@tonic-gate else if (status & ECC_PERSISTENT) 10007c478bd9Sstevel@tonic-gate status2_str = "Persistent"; 10017c478bd9Sstevel@tonic-gate 10027c478bd9Sstevel@tonic-gate cpu_aflt_log(CE_CONT, 0, spf_flt, CPU_ERRID_FIRST, 10037c478bd9Sstevel@tonic-gate NULL, " Corrected %s Error on %s is %s", 10047c478bd9Sstevel@tonic-gate status1_str, unum, status2_str); 10057c478bd9Sstevel@tonic-gate } 10067c478bd9Sstevel@tonic-gate 10077c478bd9Sstevel@tonic-gate /* 10087c478bd9Sstevel@tonic-gate * check for a valid ce syndrome, then call the 10097c478bd9Sstevel@tonic-gate * displacement flush scrubbing code, and then check the afsr to see if 10107c478bd9Sstevel@tonic-gate * the error was persistent or intermittent. Reread the afar/afsr to see 10117c478bd9Sstevel@tonic-gate * if the error was not scrubbed successfully, and is therefore sticky. 10127c478bd9Sstevel@tonic-gate */ 10137c478bd9Sstevel@tonic-gate /*ARGSUSED1*/ 10147c478bd9Sstevel@tonic-gate void 10157c478bd9Sstevel@tonic-gate cpu_ce_scrub_mem_err(struct async_flt *ecc, boolean_t triedcpulogout) 10167c478bd9Sstevel@tonic-gate { 10177c478bd9Sstevel@tonic-gate uint64_t eer, afsr; 10187c478bd9Sstevel@tonic-gate ushort_t status; 10197c478bd9Sstevel@tonic-gate 10207c478bd9Sstevel@tonic-gate ASSERT(getpil() > LOCK_LEVEL); 10217c478bd9Sstevel@tonic-gate 10227c478bd9Sstevel@tonic-gate /* 10237c478bd9Sstevel@tonic-gate * It is possible that the flt_addr is not a valid 10247c478bd9Sstevel@tonic-gate * physical address. To deal with this, we disable 10257c478bd9Sstevel@tonic-gate * NCEEN while we scrub that address. If this causes 10267c478bd9Sstevel@tonic-gate * a TIMEOUT/BERR, we know this is an invalid 10277c478bd9Sstevel@tonic-gate * memory location. 10287c478bd9Sstevel@tonic-gate */ 10297c478bd9Sstevel@tonic-gate kpreempt_disable(); 10307c478bd9Sstevel@tonic-gate eer = get_error_enable(); 10317c478bd9Sstevel@tonic-gate if (eer & (EER_CEEN | EER_NCEEN)) 10327c478bd9Sstevel@tonic-gate set_error_enable(eer & ~(EER_CEEN | EER_NCEEN)); 10337c478bd9Sstevel@tonic-gate 10347c478bd9Sstevel@tonic-gate /* 10357c478bd9Sstevel@tonic-gate * To check if the error detected by IO is persistent, sticky or 10367c478bd9Sstevel@tonic-gate * intermittent. 10377c478bd9Sstevel@tonic-gate */ 10387c478bd9Sstevel@tonic-gate if (ecc->flt_status & ECC_IOBUS) { 10397c478bd9Sstevel@tonic-gate ecc->flt_stat = P_AFSR_CE; 10407c478bd9Sstevel@tonic-gate } 10417c478bd9Sstevel@tonic-gate 10427c478bd9Sstevel@tonic-gate scrubphys(P2ALIGN(ecc->flt_addr, 64), 10437c478bd9Sstevel@tonic-gate cpunodes[CPU->cpu_id].ecache_size); 10447c478bd9Sstevel@tonic-gate 10457c478bd9Sstevel@tonic-gate get_asyncflt(&afsr); 10467c478bd9Sstevel@tonic-gate if (afsr & (P_AFSR_TO | P_AFSR_BERR)) { 10477c478bd9Sstevel@tonic-gate /* 10487c478bd9Sstevel@tonic-gate * Must ensure that we don't get the TIMEOUT/BERR 10497c478bd9Sstevel@tonic-gate * when we reenable NCEEN, so we clear the AFSR. 10507c478bd9Sstevel@tonic-gate */ 10517c478bd9Sstevel@tonic-gate set_asyncflt(afsr & (P_AFSR_TO | P_AFSR_BERR)); 10527c478bd9Sstevel@tonic-gate if (eer & (EER_CEEN | EER_NCEEN)) 10537c478bd9Sstevel@tonic-gate set_error_enable(eer); 10547c478bd9Sstevel@tonic-gate kpreempt_enable(); 10557c478bd9Sstevel@tonic-gate return; 10567c478bd9Sstevel@tonic-gate } 10577c478bd9Sstevel@tonic-gate 10587c478bd9Sstevel@tonic-gate if (eer & EER_NCEEN) 10597c478bd9Sstevel@tonic-gate set_error_enable(eer & ~EER_CEEN); 10607c478bd9Sstevel@tonic-gate 10617c478bd9Sstevel@tonic-gate /* 10627c478bd9Sstevel@tonic-gate * Check and clear any ECC errors from the scrub. If the scrub did 10637c478bd9Sstevel@tonic-gate * not trip over the error, mark it intermittent. If the scrub did 10647c478bd9Sstevel@tonic-gate * trip the error again and it did not scrub away, mark it sticky. 10657c478bd9Sstevel@tonic-gate * Otherwise mark it persistent. 10667c478bd9Sstevel@tonic-gate */ 10677c478bd9Sstevel@tonic-gate if (check_ecc(ecc) != 0) { 10687c478bd9Sstevel@tonic-gate cpu_read_paddr(ecc, 0, 1); 10697c478bd9Sstevel@tonic-gate 10707c478bd9Sstevel@tonic-gate if (check_ecc(ecc) != 0) 10717c478bd9Sstevel@tonic-gate status = ECC_STICKY; 10727c478bd9Sstevel@tonic-gate else 10737c478bd9Sstevel@tonic-gate status = ECC_PERSISTENT; 10747c478bd9Sstevel@tonic-gate } else 10757c478bd9Sstevel@tonic-gate status = ECC_INTERMITTENT; 10767c478bd9Sstevel@tonic-gate 10777c478bd9Sstevel@tonic-gate if (eer & (EER_CEEN | EER_NCEEN)) 10787c478bd9Sstevel@tonic-gate set_error_enable(eer); 10797c478bd9Sstevel@tonic-gate kpreempt_enable(); 10807c478bd9Sstevel@tonic-gate 10817c478bd9Sstevel@tonic-gate ecc->flt_status &= ~(ECC_INTERMITTENT | ECC_PERSISTENT | ECC_STICKY); 10827c478bd9Sstevel@tonic-gate ecc->flt_status |= status; 10837c478bd9Sstevel@tonic-gate } 10847c478bd9Sstevel@tonic-gate 10857c478bd9Sstevel@tonic-gate /* 10867c478bd9Sstevel@tonic-gate * get the syndrome and unum, and then call the routines 10877c478bd9Sstevel@tonic-gate * to check the other cpus and iobuses, and then do the error logging. 10887c478bd9Sstevel@tonic-gate */ 10897c478bd9Sstevel@tonic-gate /*ARGSUSED1*/ 10907c478bd9Sstevel@tonic-gate void 10917c478bd9Sstevel@tonic-gate cpu_ce_log_err(struct async_flt *ecc, errorq_elem_t *eqep) 10927c478bd9Sstevel@tonic-gate { 10937c478bd9Sstevel@tonic-gate char unum[UNUM_NAMLEN]; 10947c478bd9Sstevel@tonic-gate int len = 0; 10957c478bd9Sstevel@tonic-gate int ce_verbose = 0; 1096db874c57Selowe int err; 10977c478bd9Sstevel@tonic-gate 10987c478bd9Sstevel@tonic-gate ASSERT(ecc->flt_func != NULL); 10997c478bd9Sstevel@tonic-gate 11007c478bd9Sstevel@tonic-gate /* Get the unum string for logging purposes */ 11017c478bd9Sstevel@tonic-gate (void) cpu_get_mem_unum_aflt(AFLT_STAT_VALID, ecc, unum, 11027c478bd9Sstevel@tonic-gate UNUM_NAMLEN, &len); 11037c478bd9Sstevel@tonic-gate 11047c478bd9Sstevel@tonic-gate /* Call specific error logging routine */ 11057c478bd9Sstevel@tonic-gate (void) (*ecc->flt_func)(ecc, unum); 11067c478bd9Sstevel@tonic-gate 11077c478bd9Sstevel@tonic-gate /* 11087c478bd9Sstevel@tonic-gate * Count errors per unum. 11097c478bd9Sstevel@tonic-gate * Non-memory errors are all counted via a special unum string. 11107c478bd9Sstevel@tonic-gate */ 1111db874c57Selowe if ((err = ce_count_unum(ecc->flt_status, len, unum)) != PR_OK && 11127c478bd9Sstevel@tonic-gate automatic_page_removal) { 1113db874c57Selowe (void) page_retire(ecc->flt_addr, err); 11147c478bd9Sstevel@tonic-gate } 11157c478bd9Sstevel@tonic-gate 11167c478bd9Sstevel@tonic-gate if (ecc->flt_panic) { 11177c478bd9Sstevel@tonic-gate ce_verbose = 1; 11187c478bd9Sstevel@tonic-gate } else if ((ecc->flt_class == BUS_FAULT) || 11197c478bd9Sstevel@tonic-gate (ecc->flt_stat & P_AFSR_CE)) { 11207c478bd9Sstevel@tonic-gate ce_verbose = (ce_verbose_memory > 0); 11217c478bd9Sstevel@tonic-gate } else { 11227c478bd9Sstevel@tonic-gate ce_verbose = 1; 11237c478bd9Sstevel@tonic-gate } 11247c478bd9Sstevel@tonic-gate 11257c478bd9Sstevel@tonic-gate if (ce_verbose) { 11267c478bd9Sstevel@tonic-gate spitf_async_flt sflt; 11277c478bd9Sstevel@tonic-gate int synd_code; 11287c478bd9Sstevel@tonic-gate 11297c478bd9Sstevel@tonic-gate sflt.cmn_asyncflt = *ecc; /* for cpu_aflt_log() */ 11307c478bd9Sstevel@tonic-gate 11317c478bd9Sstevel@tonic-gate cpu_ce_log_status(&sflt, unum); 11327c478bd9Sstevel@tonic-gate 11337c478bd9Sstevel@tonic-gate synd_code = synd_to_synd_code(AFLT_STAT_VALID, 11347c478bd9Sstevel@tonic-gate SYND(ecc->flt_synd)); 11357c478bd9Sstevel@tonic-gate 11367c478bd9Sstevel@tonic-gate if (SYND_IS_SINGLE_BIT_DATA(synd_code)) { 11377c478bd9Sstevel@tonic-gate cpu_aflt_log(CE_CONT, 0, &sflt, CPU_ERRID_FIRST, 11387c478bd9Sstevel@tonic-gate NULL, " ECC Data Bit %2d was in error " 11397c478bd9Sstevel@tonic-gate "and corrected", synd_code); 11407c478bd9Sstevel@tonic-gate } else if (SYND_IS_SINGLE_BIT_CHK(synd_code)) { 11417c478bd9Sstevel@tonic-gate cpu_aflt_log(CE_CONT, 0, &sflt, CPU_ERRID_FIRST, 11427c478bd9Sstevel@tonic-gate NULL, " ECC Check Bit %2d was in error " 11437c478bd9Sstevel@tonic-gate "and corrected", synd_code - C0); 11447c478bd9Sstevel@tonic-gate } else { 11457c478bd9Sstevel@tonic-gate /* 11467c478bd9Sstevel@tonic-gate * These are UE errors - we shouldn't be getting CE 11477c478bd9Sstevel@tonic-gate * traps for these; handle them in case of bad h/w. 11487c478bd9Sstevel@tonic-gate */ 11497c478bd9Sstevel@tonic-gate switch (synd_code) { 11507c478bd9Sstevel@tonic-gate case M2: 11517c478bd9Sstevel@tonic-gate cpu_aflt_log(CE_CONT, 0, &sflt, 11527c478bd9Sstevel@tonic-gate CPU_ERRID_FIRST, NULL, 11537c478bd9Sstevel@tonic-gate " Two ECC Bits were in error"); 11547c478bd9Sstevel@tonic-gate break; 11557c478bd9Sstevel@tonic-gate case M3: 11567c478bd9Sstevel@tonic-gate cpu_aflt_log(CE_CONT, 0, &sflt, 11577c478bd9Sstevel@tonic-gate CPU_ERRID_FIRST, NULL, 11587c478bd9Sstevel@tonic-gate " Three ECC Bits were in error"); 11597c478bd9Sstevel@tonic-gate break; 11607c478bd9Sstevel@tonic-gate case M4: 11617c478bd9Sstevel@tonic-gate cpu_aflt_log(CE_CONT, 0, &sflt, 11627c478bd9Sstevel@tonic-gate CPU_ERRID_FIRST, NULL, 11637c478bd9Sstevel@tonic-gate " Four ECC Bits were in error"); 11647c478bd9Sstevel@tonic-gate break; 11657c478bd9Sstevel@tonic-gate case MX: 11667c478bd9Sstevel@tonic-gate cpu_aflt_log(CE_CONT, 0, &sflt, 11677c478bd9Sstevel@tonic-gate CPU_ERRID_FIRST, NULL, 11687c478bd9Sstevel@tonic-gate " More than Four ECC bits were " 11697c478bd9Sstevel@tonic-gate "in error"); 11707c478bd9Sstevel@tonic-gate break; 11717c478bd9Sstevel@tonic-gate default: 11727c478bd9Sstevel@tonic-gate cpu_aflt_log(CE_CONT, 0, &sflt, 11737c478bd9Sstevel@tonic-gate CPU_ERRID_FIRST, NULL, 11747c478bd9Sstevel@tonic-gate " Unknown fault syndrome %d", 11757c478bd9Sstevel@tonic-gate synd_code); 11767c478bd9Sstevel@tonic-gate break; 11777c478bd9Sstevel@tonic-gate } 11787c478bd9Sstevel@tonic-gate } 11797c478bd9Sstevel@tonic-gate } 11807c478bd9Sstevel@tonic-gate 11817c478bd9Sstevel@tonic-gate /* Display entire cache line, if valid address */ 11827c478bd9Sstevel@tonic-gate if (ce_show_data && ecc->flt_addr != AFLT_INV_ADDR) 11837c478bd9Sstevel@tonic-gate read_ecc_data(ecc, 1, 1); 11847c478bd9Sstevel@tonic-gate } 11857c478bd9Sstevel@tonic-gate 11867c478bd9Sstevel@tonic-gate /* 11877c478bd9Sstevel@tonic-gate * We route all errors through a single switch statement. 11887c478bd9Sstevel@tonic-gate */ 11897c478bd9Sstevel@tonic-gate void 11907c478bd9Sstevel@tonic-gate cpu_ue_log_err(struct async_flt *aflt) 11917c478bd9Sstevel@tonic-gate { 11927c478bd9Sstevel@tonic-gate 11937c478bd9Sstevel@tonic-gate switch (aflt->flt_class) { 11947c478bd9Sstevel@tonic-gate case CPU_FAULT: 11957c478bd9Sstevel@tonic-gate cpu_async_log_err(aflt); 11967c478bd9Sstevel@tonic-gate break; 11977c478bd9Sstevel@tonic-gate 11987c478bd9Sstevel@tonic-gate case BUS_FAULT: 11997c478bd9Sstevel@tonic-gate bus_async_log_err(aflt); 12007c478bd9Sstevel@tonic-gate break; 12017c478bd9Sstevel@tonic-gate 12027c478bd9Sstevel@tonic-gate default: 12037c478bd9Sstevel@tonic-gate cmn_err(CE_WARN, "discarding async error 0x%p with invalid " 12047c478bd9Sstevel@tonic-gate "fault class (0x%x)", (void *)aflt, aflt->flt_class); 12057c478bd9Sstevel@tonic-gate break; 12067c478bd9Sstevel@tonic-gate } 12077c478bd9Sstevel@tonic-gate } 12087c478bd9Sstevel@tonic-gate 12097c478bd9Sstevel@tonic-gate /* Values for action variable in cpu_async_error() */ 12107c478bd9Sstevel@tonic-gate #define ACTION_NONE 0 12117c478bd9Sstevel@tonic-gate #define ACTION_TRAMPOLINE 1 12127c478bd9Sstevel@tonic-gate #define ACTION_AST_FLAGS 2 12137c478bd9Sstevel@tonic-gate 12147c478bd9Sstevel@tonic-gate /* 12157c478bd9Sstevel@tonic-gate * Access error trap handler for asynchronous cpu errors. This routine is 12167c478bd9Sstevel@tonic-gate * called to handle a data or instruction access error. All fatal errors are 12177c478bd9Sstevel@tonic-gate * completely handled by this routine (by panicking). Non fatal error logging 12187c478bd9Sstevel@tonic-gate * is queued for later processing either via AST or softint at a lower PIL. 12197c478bd9Sstevel@tonic-gate * In case of panic, the error log queue will also be processed as part of the 12207c478bd9Sstevel@tonic-gate * panic flow to ensure all errors are logged. This routine is called with all 12217c478bd9Sstevel@tonic-gate * errors disabled at PIL15. The AFSR bits are cleared and the UDBL and UDBH 12227c478bd9Sstevel@tonic-gate * error bits are also cleared. The hardware has also disabled the I and 12237c478bd9Sstevel@tonic-gate * D-caches for us, so we must re-enable them before returning. 12247c478bd9Sstevel@tonic-gate * 12257c478bd9Sstevel@tonic-gate * A summary of the handling of tl=0 UE/LDP/EDP/TO/BERR/WP/CP: 12267c478bd9Sstevel@tonic-gate * 12277c478bd9Sstevel@tonic-gate * _______________________________________________________________ 12287c478bd9Sstevel@tonic-gate * | Privileged tl0 | Unprivileged | 12297c478bd9Sstevel@tonic-gate * | Protected | Unprotected | Protected | Unprotected | 12307c478bd9Sstevel@tonic-gate * |on_trap|lofault| | | | 12317c478bd9Sstevel@tonic-gate * -------------|-------|-------+---------------+---------------+-------------| 12327c478bd9Sstevel@tonic-gate * | | | | | | 12337c478bd9Sstevel@tonic-gate * UE/LDP/EDP | L,T,p | L,R,p | L,P | n/a | L,R,p | 12347c478bd9Sstevel@tonic-gate * | | | | | | 12357c478bd9Sstevel@tonic-gate * TO/BERR | T | S | L,P | n/a | S | 12367c478bd9Sstevel@tonic-gate * | | | | | | 12377c478bd9Sstevel@tonic-gate * WP | L,M,p | L,M,p | L,M,p | n/a | L,M,p | 12387c478bd9Sstevel@tonic-gate * | | | | | | 12397c478bd9Sstevel@tonic-gate * CP (IIi/IIe) | L,P | L,P | L,P | n/a | L,P | 12407c478bd9Sstevel@tonic-gate * ____________________________________________________________________________ 12417c478bd9Sstevel@tonic-gate * 12427c478bd9Sstevel@tonic-gate * 12437c478bd9Sstevel@tonic-gate * Action codes: 12447c478bd9Sstevel@tonic-gate * 12457c478bd9Sstevel@tonic-gate * L - log 12467c478bd9Sstevel@tonic-gate * M - kick off memscrubber if flt_in_memory 12477c478bd9Sstevel@tonic-gate * P - panic 12487c478bd9Sstevel@tonic-gate * p - panic if US-IIi or US-IIe (Sabre); overrides R and M 12497c478bd9Sstevel@tonic-gate * R - i) if aft_panic is set, panic 12507c478bd9Sstevel@tonic-gate * ii) otherwise, send hwerr event to contract and SIGKILL to process 12517c478bd9Sstevel@tonic-gate * S - send SIGBUS to process 12527c478bd9Sstevel@tonic-gate * T - trampoline 12537c478bd9Sstevel@tonic-gate * 12547c478bd9Sstevel@tonic-gate * Special cases: 12557c478bd9Sstevel@tonic-gate * 12567c478bd9Sstevel@tonic-gate * 1) if aft_testfatal is set, all faults result in a panic regardless 12577c478bd9Sstevel@tonic-gate * of type (even WP), protection (even on_trap), or privilege. 12587c478bd9Sstevel@tonic-gate */ 12597c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 12607c478bd9Sstevel@tonic-gate void 12617c478bd9Sstevel@tonic-gate cpu_async_error(struct regs *rp, ulong_t p_afar, ulong_t p_afsr, 12627c478bd9Sstevel@tonic-gate uint_t p_afsr_high, uint_t p_afar_high) 12637c478bd9Sstevel@tonic-gate { 12647c478bd9Sstevel@tonic-gate ushort_t sdbh, sdbl, ttype, tl; 12657c478bd9Sstevel@tonic-gate spitf_async_flt spf_flt; 12667c478bd9Sstevel@tonic-gate struct async_flt *aflt; 12677c478bd9Sstevel@tonic-gate char pr_reason[28]; 12687c478bd9Sstevel@tonic-gate uint64_t oafsr; 12697c478bd9Sstevel@tonic-gate uint64_t acc_afsr = 0; /* accumulated afsr */ 12707c478bd9Sstevel@tonic-gate int action = ACTION_NONE; 12717c478bd9Sstevel@tonic-gate uint64_t t_afar = p_afar; 12727c478bd9Sstevel@tonic-gate uint64_t t_afsr = p_afsr; 12737c478bd9Sstevel@tonic-gate int expected = DDI_FM_ERR_UNEXPECTED; 12747c478bd9Sstevel@tonic-gate ddi_acc_hdl_t *hp; 12757c478bd9Sstevel@tonic-gate 12767c478bd9Sstevel@tonic-gate /* 12777c478bd9Sstevel@tonic-gate * We need to look at p_flag to determine if the thread detected an 12787c478bd9Sstevel@tonic-gate * error while dumping core. We can't grab p_lock here, but it's ok 12797c478bd9Sstevel@tonic-gate * because we just need a consistent snapshot and we know that everyone 12807c478bd9Sstevel@tonic-gate * else will store a consistent set of bits while holding p_lock. We 12817c478bd9Sstevel@tonic-gate * don't have to worry about a race because SDOCORE is set once prior 12827c478bd9Sstevel@tonic-gate * to doing i/o from the process's address space and is never cleared. 12837c478bd9Sstevel@tonic-gate */ 12847c478bd9Sstevel@tonic-gate uint_t pflag = ttoproc(curthread)->p_flag; 12857c478bd9Sstevel@tonic-gate 12867c478bd9Sstevel@tonic-gate pr_reason[0] = '\0'; 12877c478bd9Sstevel@tonic-gate 12887c478bd9Sstevel@tonic-gate /* 12897c478bd9Sstevel@tonic-gate * Note: the Spitfire data buffer error registers 12907c478bd9Sstevel@tonic-gate * (upper and lower halves) are or'ed into the upper 12917c478bd9Sstevel@tonic-gate * word of the afsr by async_err() if P_AFSR_UE is set. 12927c478bd9Sstevel@tonic-gate */ 12937c478bd9Sstevel@tonic-gate sdbh = (ushort_t)((t_afsr >> 33) & 0x3FF); 12947c478bd9Sstevel@tonic-gate sdbl = (ushort_t)((t_afsr >> 43) & 0x3FF); 12957c478bd9Sstevel@tonic-gate 12967c478bd9Sstevel@tonic-gate /* 12977c478bd9Sstevel@tonic-gate * Grab the ttype encoded in <63:53> of the saved 12987c478bd9Sstevel@tonic-gate * afsr passed from async_err() 12997c478bd9Sstevel@tonic-gate */ 13007c478bd9Sstevel@tonic-gate ttype = (ushort_t)((t_afsr >> 53) & 0x1FF); 13017c478bd9Sstevel@tonic-gate tl = (ushort_t)(t_afsr >> 62); 13027c478bd9Sstevel@tonic-gate 13037c478bd9Sstevel@tonic-gate t_afsr &= S_AFSR_MASK; 13047c478bd9Sstevel@tonic-gate t_afar &= SABRE_AFAR_PA; /* must use Sabre AFAR mask */ 13057c478bd9Sstevel@tonic-gate 13067c478bd9Sstevel@tonic-gate /* 13077c478bd9Sstevel@tonic-gate * Initialize most of the common and CPU-specific structure. We derive 13087c478bd9Sstevel@tonic-gate * aflt->flt_priv from %tstate, instead of from the AFSR.PRIV bit. The 13097c478bd9Sstevel@tonic-gate * initial setting of aflt->flt_panic is based on TL: we must panic if 13107c478bd9Sstevel@tonic-gate * the error occurred at TL > 0. We also set flt_panic if the test/demo 13117c478bd9Sstevel@tonic-gate * tuneable aft_testfatal is set (not the default). 13127c478bd9Sstevel@tonic-gate */ 13137c478bd9Sstevel@tonic-gate bzero(&spf_flt, sizeof (spitf_async_flt)); 13147c478bd9Sstevel@tonic-gate aflt = (struct async_flt *)&spf_flt; 13157c478bd9Sstevel@tonic-gate aflt->flt_id = gethrtime_waitfree(); 13167c478bd9Sstevel@tonic-gate aflt->flt_stat = t_afsr; 13177c478bd9Sstevel@tonic-gate aflt->flt_addr = t_afar; 13187c478bd9Sstevel@tonic-gate aflt->flt_bus_id = getprocessorid(); 13197c478bd9Sstevel@tonic-gate aflt->flt_inst = CPU->cpu_id; 13207c478bd9Sstevel@tonic-gate aflt->flt_pc = (caddr_t)rp->r_pc; 13217c478bd9Sstevel@tonic-gate aflt->flt_prot = AFLT_PROT_NONE; 13227c478bd9Sstevel@tonic-gate aflt->flt_class = CPU_FAULT; 13237c478bd9Sstevel@tonic-gate aflt->flt_priv = (rp->r_tstate & TSTATE_PRIV) ? 1 : 0; 13247c478bd9Sstevel@tonic-gate aflt->flt_tl = (uchar_t)tl; 13257c478bd9Sstevel@tonic-gate aflt->flt_panic = (tl != 0 || aft_testfatal != 0); 13267c478bd9Sstevel@tonic-gate aflt->flt_core = (pflag & SDOCORE) ? 1 : 0; 13277c478bd9Sstevel@tonic-gate 13287c478bd9Sstevel@tonic-gate /* 13297c478bd9Sstevel@tonic-gate * Set flt_status based on the trap type. If we end up here as the 13307c478bd9Sstevel@tonic-gate * result of a UE detected by the CE handling code, leave status 0. 13317c478bd9Sstevel@tonic-gate */ 13327c478bd9Sstevel@tonic-gate switch (ttype) { 13337c478bd9Sstevel@tonic-gate case T_DATA_ERROR: 13347c478bd9Sstevel@tonic-gate aflt->flt_status = ECC_D_TRAP; 13357c478bd9Sstevel@tonic-gate break; 13367c478bd9Sstevel@tonic-gate case T_INSTR_ERROR: 13377c478bd9Sstevel@tonic-gate aflt->flt_status = ECC_I_TRAP; 13387c478bd9Sstevel@tonic-gate break; 13397c478bd9Sstevel@tonic-gate } 13407c478bd9Sstevel@tonic-gate 13417c478bd9Sstevel@tonic-gate spf_flt.flt_sdbh = sdbh; 13427c478bd9Sstevel@tonic-gate spf_flt.flt_sdbl = sdbl; 13437c478bd9Sstevel@tonic-gate 13447c478bd9Sstevel@tonic-gate /* 13457c478bd9Sstevel@tonic-gate * Check for fatal async errors. 13467c478bd9Sstevel@tonic-gate */ 13477c478bd9Sstevel@tonic-gate check_misc_err(&spf_flt); 13487c478bd9Sstevel@tonic-gate 13497c478bd9Sstevel@tonic-gate /* 13507c478bd9Sstevel@tonic-gate * If the trap occurred in privileged mode at TL=0, we need to check to 13517c478bd9Sstevel@tonic-gate * see if we were executing in the kernel under on_trap() or t_lofault 13527c478bd9Sstevel@tonic-gate * protection. If so, modify the saved registers so that we return 13537c478bd9Sstevel@tonic-gate * from the trap to the appropriate trampoline routine. 13547c478bd9Sstevel@tonic-gate */ 13557c478bd9Sstevel@tonic-gate if (aflt->flt_priv && tl == 0) { 13567c478bd9Sstevel@tonic-gate if (curthread->t_ontrap != NULL) { 13577c478bd9Sstevel@tonic-gate on_trap_data_t *otp = curthread->t_ontrap; 13587c478bd9Sstevel@tonic-gate 13597c478bd9Sstevel@tonic-gate if (otp->ot_prot & OT_DATA_EC) { 13607c478bd9Sstevel@tonic-gate aflt->flt_prot = AFLT_PROT_EC; 13617c478bd9Sstevel@tonic-gate otp->ot_trap |= OT_DATA_EC; 13627c478bd9Sstevel@tonic-gate rp->r_pc = otp->ot_trampoline; 13637c478bd9Sstevel@tonic-gate rp->r_npc = rp->r_pc + 4; 13647c478bd9Sstevel@tonic-gate action = ACTION_TRAMPOLINE; 13657c478bd9Sstevel@tonic-gate } 13667c478bd9Sstevel@tonic-gate 13677c478bd9Sstevel@tonic-gate if ((t_afsr & (P_AFSR_TO | P_AFSR_BERR)) && 13687c478bd9Sstevel@tonic-gate (otp->ot_prot & OT_DATA_ACCESS)) { 13697c478bd9Sstevel@tonic-gate aflt->flt_prot = AFLT_PROT_ACCESS; 13707c478bd9Sstevel@tonic-gate otp->ot_trap |= OT_DATA_ACCESS; 13717c478bd9Sstevel@tonic-gate rp->r_pc = otp->ot_trampoline; 13727c478bd9Sstevel@tonic-gate rp->r_npc = rp->r_pc + 4; 13737c478bd9Sstevel@tonic-gate action = ACTION_TRAMPOLINE; 13747c478bd9Sstevel@tonic-gate /* 13757c478bd9Sstevel@tonic-gate * for peeks and caut_gets errors are expected 13767c478bd9Sstevel@tonic-gate */ 13777c478bd9Sstevel@tonic-gate hp = (ddi_acc_hdl_t *)otp->ot_handle; 13787c478bd9Sstevel@tonic-gate if (!hp) 13797c478bd9Sstevel@tonic-gate expected = DDI_FM_ERR_PEEK; 13807c478bd9Sstevel@tonic-gate else if (hp->ah_acc.devacc_attr_access == 13817c478bd9Sstevel@tonic-gate DDI_CAUTIOUS_ACC) 13827c478bd9Sstevel@tonic-gate expected = DDI_FM_ERR_EXPECTED; 13837c478bd9Sstevel@tonic-gate } 13847c478bd9Sstevel@tonic-gate 13857c478bd9Sstevel@tonic-gate } else if (curthread->t_lofault) { 13867c478bd9Sstevel@tonic-gate aflt->flt_prot = AFLT_PROT_COPY; 13877c478bd9Sstevel@tonic-gate rp->r_g1 = EFAULT; 13887c478bd9Sstevel@tonic-gate rp->r_pc = curthread->t_lofault; 13897c478bd9Sstevel@tonic-gate rp->r_npc = rp->r_pc + 4; 13907c478bd9Sstevel@tonic-gate action = ACTION_TRAMPOLINE; 13917c478bd9Sstevel@tonic-gate } 13927c478bd9Sstevel@tonic-gate } 13937c478bd9Sstevel@tonic-gate 13947c478bd9Sstevel@tonic-gate /* 13957c478bd9Sstevel@tonic-gate * Determine if this error needs to be treated as fatal. Note that 13967c478bd9Sstevel@tonic-gate * multiple errors detected upon entry to this trap handler does not 13977c478bd9Sstevel@tonic-gate * necessarily warrant a panic. We only want to panic if the trap 13987c478bd9Sstevel@tonic-gate * happened in privileged mode and not under t_ontrap or t_lofault 13997c478bd9Sstevel@tonic-gate * protection. The exception is WP: if we *only* get WP, it is not 14007c478bd9Sstevel@tonic-gate * fatal even if the trap occurred in privileged mode, except on Sabre. 14017c478bd9Sstevel@tonic-gate * 14027c478bd9Sstevel@tonic-gate * aft_panic, if set, effectively makes us treat usermode 14037c478bd9Sstevel@tonic-gate * UE/EDP/LDP faults as if they were privileged - so we we will 14047c478bd9Sstevel@tonic-gate * panic instead of sending a contract event. A lofault-protected 14057c478bd9Sstevel@tonic-gate * fault will normally follow the contract event; if aft_panic is 14067c478bd9Sstevel@tonic-gate * set this will be changed to a panic. 14077c478bd9Sstevel@tonic-gate * 14087c478bd9Sstevel@tonic-gate * For usermode BERR/BTO errors, eg from processes performing device 14097c478bd9Sstevel@tonic-gate * control through mapped device memory, we need only deliver 14107c478bd9Sstevel@tonic-gate * a SIGBUS to the offending process. 14117c478bd9Sstevel@tonic-gate * 14127c478bd9Sstevel@tonic-gate * Some additional flt_panic reasons (eg, WP on Sabre) will be 14137c478bd9Sstevel@tonic-gate * checked later; for now we implement the common reasons. 14147c478bd9Sstevel@tonic-gate */ 14157c478bd9Sstevel@tonic-gate if (aflt->flt_prot == AFLT_PROT_NONE) { 14167c478bd9Sstevel@tonic-gate /* 14177c478bd9Sstevel@tonic-gate * Beware - multiple bits may be set in AFSR 14187c478bd9Sstevel@tonic-gate */ 14197c478bd9Sstevel@tonic-gate if (t_afsr & (P_AFSR_UE | P_AFSR_LDP | P_AFSR_EDP)) { 14207c478bd9Sstevel@tonic-gate if (aflt->flt_priv || aft_panic) 14217c478bd9Sstevel@tonic-gate aflt->flt_panic = 1; 14227c478bd9Sstevel@tonic-gate } 14237c478bd9Sstevel@tonic-gate 14247c478bd9Sstevel@tonic-gate if (t_afsr & (P_AFSR_TO | P_AFSR_BERR)) { 14257c478bd9Sstevel@tonic-gate if (aflt->flt_priv) 14267c478bd9Sstevel@tonic-gate aflt->flt_panic = 1; 14277c478bd9Sstevel@tonic-gate } 14287c478bd9Sstevel@tonic-gate } else if (aflt->flt_prot == AFLT_PROT_COPY && aft_panic) { 14297c478bd9Sstevel@tonic-gate aflt->flt_panic = 1; 14307c478bd9Sstevel@tonic-gate } 14317c478bd9Sstevel@tonic-gate 14327c478bd9Sstevel@tonic-gate /* 14337c478bd9Sstevel@tonic-gate * UE/BERR/TO: Call our bus nexus friends to check for 14347c478bd9Sstevel@tonic-gate * IO errors that may have resulted in this trap. 14357c478bd9Sstevel@tonic-gate */ 14367c478bd9Sstevel@tonic-gate if (t_afsr & (P_AFSR_TO | P_AFSR_BERR | P_AFSR_UE)) { 14377c478bd9Sstevel@tonic-gate cpu_run_bus_error_handlers(aflt, expected); 14387c478bd9Sstevel@tonic-gate } 14397c478bd9Sstevel@tonic-gate 14407c478bd9Sstevel@tonic-gate /* 14417c478bd9Sstevel@tonic-gate * Handle UE: If the UE is in memory, we need to flush the bad line from 14427c478bd9Sstevel@tonic-gate * the E-cache. We also need to query the bus nexus for fatal errors. 14437c478bd9Sstevel@tonic-gate * For sabre, we will panic on UEs. Attempts to do diagnostic read on 14447c478bd9Sstevel@tonic-gate * caches may introduce more parity errors (especially when the module 14457c478bd9Sstevel@tonic-gate * is bad) and in sabre there is no guarantee that such errors 14467c478bd9Sstevel@tonic-gate * (if introduced) are written back as poisoned data. 14477c478bd9Sstevel@tonic-gate */ 14487c478bd9Sstevel@tonic-gate if (t_afsr & P_AFSR_UE) { 14497c478bd9Sstevel@tonic-gate int i; 14507c478bd9Sstevel@tonic-gate 14517c478bd9Sstevel@tonic-gate (void) strcat(pr_reason, "UE "); 14527c478bd9Sstevel@tonic-gate 14537c478bd9Sstevel@tonic-gate spf_flt.flt_type = CPU_UE_ERR; 14547c478bd9Sstevel@tonic-gate aflt->flt_in_memory = (pf_is_memory(aflt->flt_addr >> 14557c478bd9Sstevel@tonic-gate MMU_PAGESHIFT)) ? 1: 0; 14567c478bd9Sstevel@tonic-gate 14577c478bd9Sstevel@tonic-gate /* 14587c478bd9Sstevel@tonic-gate * With UE, we have the PA of the fault. 14597c478bd9Sstevel@tonic-gate * Let do a diagnostic read to get the ecache 14607c478bd9Sstevel@tonic-gate * data and tag info of the bad line for logging. 14617c478bd9Sstevel@tonic-gate */ 14627c478bd9Sstevel@tonic-gate if (aflt->flt_in_memory) { 14637c478bd9Sstevel@tonic-gate uint32_t ec_set_size; 14647c478bd9Sstevel@tonic-gate uchar_t state; 14657c478bd9Sstevel@tonic-gate uint32_t ecache_idx; 14667c478bd9Sstevel@tonic-gate uint64_t faultpa = P2ALIGN(aflt->flt_addr, 64); 14677c478bd9Sstevel@tonic-gate 14687c478bd9Sstevel@tonic-gate /* touch the line to put it in ecache */ 14697c478bd9Sstevel@tonic-gate acc_afsr |= read_and_clear_afsr(); 14707c478bd9Sstevel@tonic-gate (void) lddphys(faultpa); 14717c478bd9Sstevel@tonic-gate acc_afsr |= (read_and_clear_afsr() & 14727c478bd9Sstevel@tonic-gate ~(P_AFSR_EDP | P_AFSR_UE)); 14737c478bd9Sstevel@tonic-gate 14747c478bd9Sstevel@tonic-gate ec_set_size = cpunodes[CPU->cpu_id].ecache_size / 14757c478bd9Sstevel@tonic-gate ecache_associativity; 14767c478bd9Sstevel@tonic-gate 14777c478bd9Sstevel@tonic-gate for (i = 0; i < ecache_associativity; i++) { 14787c478bd9Sstevel@tonic-gate ecache_idx = i * ec_set_size + 14797c478bd9Sstevel@tonic-gate (aflt->flt_addr % ec_set_size); 14807c478bd9Sstevel@tonic-gate get_ecache_dtag(P2ALIGN(ecache_idx, 64), 14817c478bd9Sstevel@tonic-gate (uint64_t *)&spf_flt.flt_ec_data[0], 14827c478bd9Sstevel@tonic-gate &spf_flt.flt_ec_tag, &oafsr, &acc_afsr); 14837c478bd9Sstevel@tonic-gate acc_afsr |= oafsr; 14847c478bd9Sstevel@tonic-gate 14857c478bd9Sstevel@tonic-gate state = (uchar_t)((spf_flt.flt_ec_tag & 14867c478bd9Sstevel@tonic-gate cpu_ec_state_mask) >> cpu_ec_state_shift); 14877c478bd9Sstevel@tonic-gate 14887c478bd9Sstevel@tonic-gate if ((state & cpu_ec_state_valid) && 14897c478bd9Sstevel@tonic-gate ((spf_flt.flt_ec_tag & cpu_ec_tag_mask) == 14907c478bd9Sstevel@tonic-gate ((uint64_t)aflt->flt_addr >> 14917c478bd9Sstevel@tonic-gate cpu_ec_tag_shift))) 14927c478bd9Sstevel@tonic-gate break; 14937c478bd9Sstevel@tonic-gate } 14947c478bd9Sstevel@tonic-gate 14957c478bd9Sstevel@tonic-gate /* 14967c478bd9Sstevel@tonic-gate * Check to see if the ecache tag is valid for the 14977c478bd9Sstevel@tonic-gate * fault PA. In the very unlikely event where the 14987c478bd9Sstevel@tonic-gate * line could be victimized, no ecache info will be 14997c478bd9Sstevel@tonic-gate * available. If this is the case, capture the line 15007c478bd9Sstevel@tonic-gate * from memory instead. 15017c478bd9Sstevel@tonic-gate */ 15027c478bd9Sstevel@tonic-gate if ((state & cpu_ec_state_valid) == 0 || 15037c478bd9Sstevel@tonic-gate (spf_flt.flt_ec_tag & cpu_ec_tag_mask) != 15047c478bd9Sstevel@tonic-gate ((uint64_t)aflt->flt_addr >> cpu_ec_tag_shift)) { 15057c478bd9Sstevel@tonic-gate for (i = 0; i < 8; i++, faultpa += 8) { 15067c478bd9Sstevel@tonic-gate ec_data_t *ecdptr; 15077c478bd9Sstevel@tonic-gate 15087c478bd9Sstevel@tonic-gate ecdptr = &spf_flt.flt_ec_data[i]; 15097c478bd9Sstevel@tonic-gate acc_afsr |= read_and_clear_afsr(); 15107c478bd9Sstevel@tonic-gate ecdptr->ec_d8 = lddphys(faultpa); 15117c478bd9Sstevel@tonic-gate acc_afsr |= (read_and_clear_afsr() & 15127c478bd9Sstevel@tonic-gate ~(P_AFSR_EDP | P_AFSR_UE)); 15137c478bd9Sstevel@tonic-gate ecdptr->ec_afsr = 0; 15147c478bd9Sstevel@tonic-gate /* null afsr value */ 15157c478bd9Sstevel@tonic-gate } 15167c478bd9Sstevel@tonic-gate 15177c478bd9Sstevel@tonic-gate /* 15187c478bd9Sstevel@tonic-gate * Mark tag invalid to indicate mem dump 15197c478bd9Sstevel@tonic-gate * when we print out the info. 15207c478bd9Sstevel@tonic-gate */ 15217c478bd9Sstevel@tonic-gate spf_flt.flt_ec_tag = AFLT_INV_ADDR; 15227c478bd9Sstevel@tonic-gate } 15237c478bd9Sstevel@tonic-gate spf_flt.flt_ec_lcnt = 1; 15247c478bd9Sstevel@tonic-gate 15257c478bd9Sstevel@tonic-gate /* 15267c478bd9Sstevel@tonic-gate * Flush out the bad line 15277c478bd9Sstevel@tonic-gate */ 15287c478bd9Sstevel@tonic-gate flushecacheline(P2ALIGN(aflt->flt_addr, 64), 15297c478bd9Sstevel@tonic-gate cpunodes[CPU->cpu_id].ecache_size); 15307c478bd9Sstevel@tonic-gate 15317c478bd9Sstevel@tonic-gate acc_afsr |= clear_errors(NULL, NULL); 15327c478bd9Sstevel@tonic-gate } 15337c478bd9Sstevel@tonic-gate 15347c478bd9Sstevel@tonic-gate /* 15357c478bd9Sstevel@tonic-gate * Ask our bus nexus friends if they have any fatal errors. If 15367c478bd9Sstevel@tonic-gate * so, they will log appropriate error messages and panic as a 15377c478bd9Sstevel@tonic-gate * result. We then queue an event for each UDB that reports a 15387c478bd9Sstevel@tonic-gate * UE. Each UE reported in a UDB will have its own log message. 15397c478bd9Sstevel@tonic-gate * 15407c478bd9Sstevel@tonic-gate * Note from kbn: In the case where there are multiple UEs 15417c478bd9Sstevel@tonic-gate * (ME bit is set) - the AFAR address is only accurate to 15427c478bd9Sstevel@tonic-gate * the 16-byte granularity. One cannot tell whether the AFAR 15437c478bd9Sstevel@tonic-gate * belongs to the UDBH or UDBL syndromes. In this case, we 15447c478bd9Sstevel@tonic-gate * always report the AFAR address to be 16-byte aligned. 15457c478bd9Sstevel@tonic-gate * 15467c478bd9Sstevel@tonic-gate * If we're on a Sabre, there is no SDBL, but it will always 15477c478bd9Sstevel@tonic-gate * read as zero, so the sdbl test below will safely fail. 15487c478bd9Sstevel@tonic-gate */ 15497c478bd9Sstevel@tonic-gate if (bus_func_invoke(BF_TYPE_UE) == BF_FATAL || isus2i || isus2e) 15507c478bd9Sstevel@tonic-gate aflt->flt_panic = 1; 15517c478bd9Sstevel@tonic-gate 15527c478bd9Sstevel@tonic-gate if (sdbh & P_DER_UE) { 15537c478bd9Sstevel@tonic-gate aflt->flt_synd = sdbh & P_DER_E_SYND; 15547c478bd9Sstevel@tonic-gate cpu_errorq_dispatch(FM_EREPORT_CPU_USII_UE, 15557c478bd9Sstevel@tonic-gate (void *)&spf_flt, sizeof (spf_flt), ue_queue, 15567c478bd9Sstevel@tonic-gate aflt->flt_panic); 15577c478bd9Sstevel@tonic-gate } 15587c478bd9Sstevel@tonic-gate if (sdbl & P_DER_UE) { 15597c478bd9Sstevel@tonic-gate aflt->flt_synd = sdbl & P_DER_E_SYND; 15607c478bd9Sstevel@tonic-gate aflt->flt_synd |= UDBL_REG; /* indicates UDBL */ 15617c478bd9Sstevel@tonic-gate if (!(aflt->flt_stat & P_AFSR_ME)) 15627c478bd9Sstevel@tonic-gate aflt->flt_addr |= 0x8; 15637c478bd9Sstevel@tonic-gate cpu_errorq_dispatch(FM_EREPORT_CPU_USII_UE, 15647c478bd9Sstevel@tonic-gate (void *)&spf_flt, sizeof (spf_flt), ue_queue, 15657c478bd9Sstevel@tonic-gate aflt->flt_panic); 15667c478bd9Sstevel@tonic-gate } 15677c478bd9Sstevel@tonic-gate 15687c478bd9Sstevel@tonic-gate /* 15697c478bd9Sstevel@tonic-gate * We got a UE and are panicking, save the fault PA in a known 15707c478bd9Sstevel@tonic-gate * location so that the platform specific panic code can check 15717c478bd9Sstevel@tonic-gate * for copyback errors. 15727c478bd9Sstevel@tonic-gate */ 15737c478bd9Sstevel@tonic-gate if (aflt->flt_panic && aflt->flt_in_memory) { 15747c478bd9Sstevel@tonic-gate panic_aflt = *aflt; 15757c478bd9Sstevel@tonic-gate } 15767c478bd9Sstevel@tonic-gate } 15777c478bd9Sstevel@tonic-gate 15787c478bd9Sstevel@tonic-gate /* 15797c478bd9Sstevel@tonic-gate * Handle EDP and LDP: Locate the line with bad parity and enqueue an 15807c478bd9Sstevel@tonic-gate * async error for logging. For Sabre, we panic on EDP or LDP. 15817c478bd9Sstevel@tonic-gate */ 15827c478bd9Sstevel@tonic-gate if (t_afsr & (P_AFSR_EDP | P_AFSR_LDP)) { 15837c478bd9Sstevel@tonic-gate spf_flt.flt_type = CPU_EDP_LDP_ERR; 15847c478bd9Sstevel@tonic-gate 15857c478bd9Sstevel@tonic-gate if (t_afsr & P_AFSR_EDP) 15867c478bd9Sstevel@tonic-gate (void) strcat(pr_reason, "EDP "); 15877c478bd9Sstevel@tonic-gate 15887c478bd9Sstevel@tonic-gate if (t_afsr & P_AFSR_LDP) 15897c478bd9Sstevel@tonic-gate (void) strcat(pr_reason, "LDP "); 15907c478bd9Sstevel@tonic-gate 15917c478bd9Sstevel@tonic-gate /* 15927c478bd9Sstevel@tonic-gate * Here we have no PA to work with. 15937c478bd9Sstevel@tonic-gate * Scan each line in the ecache to look for 15947c478bd9Sstevel@tonic-gate * the one with bad parity. 15957c478bd9Sstevel@tonic-gate */ 15967c478bd9Sstevel@tonic-gate aflt->flt_addr = AFLT_INV_ADDR; 15977c478bd9Sstevel@tonic-gate scan_ecache(&aflt->flt_addr, &spf_flt.flt_ec_data[0], 15987c478bd9Sstevel@tonic-gate &spf_flt.flt_ec_tag, &spf_flt.flt_ec_lcnt, &oafsr); 15997c478bd9Sstevel@tonic-gate acc_afsr |= (oafsr & ~P_AFSR_WP); 16007c478bd9Sstevel@tonic-gate 16017c478bd9Sstevel@tonic-gate /* 16027c478bd9Sstevel@tonic-gate * If we found a bad PA, update the state to indicate if it is 16037c478bd9Sstevel@tonic-gate * memory or I/O space. This code will be important if we ever 16047c478bd9Sstevel@tonic-gate * support cacheable frame buffers. 16057c478bd9Sstevel@tonic-gate */ 16067c478bd9Sstevel@tonic-gate if (aflt->flt_addr != AFLT_INV_ADDR) { 16077c478bd9Sstevel@tonic-gate aflt->flt_in_memory = (pf_is_memory(aflt->flt_addr >> 16087c478bd9Sstevel@tonic-gate MMU_PAGESHIFT)) ? 1 : 0; 16097c478bd9Sstevel@tonic-gate } 16107c478bd9Sstevel@tonic-gate 16117c478bd9Sstevel@tonic-gate if (isus2i || isus2e) 16127c478bd9Sstevel@tonic-gate aflt->flt_panic = 1; 16137c478bd9Sstevel@tonic-gate 16147c478bd9Sstevel@tonic-gate cpu_errorq_dispatch((t_afsr & P_AFSR_EDP) ? 16157c478bd9Sstevel@tonic-gate FM_EREPORT_CPU_USII_EDP : FM_EREPORT_CPU_USII_LDP, 16167c478bd9Sstevel@tonic-gate (void *)&spf_flt, sizeof (spf_flt), ue_queue, 16177c478bd9Sstevel@tonic-gate aflt->flt_panic); 16187c478bd9Sstevel@tonic-gate } 16197c478bd9Sstevel@tonic-gate 16207c478bd9Sstevel@tonic-gate /* 16217c478bd9Sstevel@tonic-gate * Timeout and bus error handling. There are two cases to consider: 16227c478bd9Sstevel@tonic-gate * 16237c478bd9Sstevel@tonic-gate * (1) If we are in the kernel protected by ddi_peek or ddi_poke,we 16247c478bd9Sstevel@tonic-gate * have already modified the saved registers so that we will return 16257c478bd9Sstevel@tonic-gate * from the trap to the appropriate trampoline routine; otherwise panic. 16267c478bd9Sstevel@tonic-gate * 16277c478bd9Sstevel@tonic-gate * (2) In user mode, we can simply use our AST mechanism to deliver 16287c478bd9Sstevel@tonic-gate * a SIGBUS. We do not log the occurence - processes performing 16297c478bd9Sstevel@tonic-gate * device control would generate lots of uninteresting messages. 16307c478bd9Sstevel@tonic-gate */ 16317c478bd9Sstevel@tonic-gate if (t_afsr & (P_AFSR_TO | P_AFSR_BERR)) { 16327c478bd9Sstevel@tonic-gate if (t_afsr & P_AFSR_TO) 16337c478bd9Sstevel@tonic-gate (void) strcat(pr_reason, "BTO "); 16347c478bd9Sstevel@tonic-gate 16357c478bd9Sstevel@tonic-gate if (t_afsr & P_AFSR_BERR) 16367c478bd9Sstevel@tonic-gate (void) strcat(pr_reason, "BERR "); 16377c478bd9Sstevel@tonic-gate 16387c478bd9Sstevel@tonic-gate spf_flt.flt_type = CPU_BTO_BERR_ERR; 16397c478bd9Sstevel@tonic-gate if (aflt->flt_priv && aflt->flt_prot == AFLT_PROT_NONE) { 16407c478bd9Sstevel@tonic-gate cpu_errorq_dispatch((t_afsr & P_AFSR_TO) ? 16417c478bd9Sstevel@tonic-gate FM_EREPORT_CPU_USII_TO : FM_EREPORT_CPU_USII_BERR, 16427c478bd9Sstevel@tonic-gate (void *)&spf_flt, sizeof (spf_flt), ue_queue, 16437c478bd9Sstevel@tonic-gate aflt->flt_panic); 16447c478bd9Sstevel@tonic-gate } 16457c478bd9Sstevel@tonic-gate } 16467c478bd9Sstevel@tonic-gate 16477c478bd9Sstevel@tonic-gate /* 16487c478bd9Sstevel@tonic-gate * Handle WP: WP happens when the ecache is victimized and a parity 16497c478bd9Sstevel@tonic-gate * error was detected on a writeback. The data in question will be 16507c478bd9Sstevel@tonic-gate * poisoned as a UE will be written back. The PA is not logged and 16517c478bd9Sstevel@tonic-gate * it is possible that it doesn't belong to the trapped thread. The 16527c478bd9Sstevel@tonic-gate * WP trap is not fatal, but it could be fatal to someone that 16537c478bd9Sstevel@tonic-gate * subsequently accesses the toxic page. We set read_all_memscrub 16547c478bd9Sstevel@tonic-gate * to force the memscrubber to read all of memory when it awakens. 16557c478bd9Sstevel@tonic-gate * For Sabre/Hummingbird, WP is fatal because the HW doesn't write a 16567c478bd9Sstevel@tonic-gate * UE back to poison the data. 16577c478bd9Sstevel@tonic-gate */ 16587c478bd9Sstevel@tonic-gate if (t_afsr & P_AFSR_WP) { 16597c478bd9Sstevel@tonic-gate (void) strcat(pr_reason, "WP "); 16607c478bd9Sstevel@tonic-gate if (isus2i || isus2e) { 16617c478bd9Sstevel@tonic-gate aflt->flt_panic = 1; 16627c478bd9Sstevel@tonic-gate } else { 16637c478bd9Sstevel@tonic-gate read_all_memscrub = 1; 16647c478bd9Sstevel@tonic-gate } 16657c478bd9Sstevel@tonic-gate spf_flt.flt_type = CPU_WP_ERR; 16667c478bd9Sstevel@tonic-gate cpu_errorq_dispatch(FM_EREPORT_CPU_USII_WP, 16677c478bd9Sstevel@tonic-gate (void *)&spf_flt, sizeof (spf_flt), ue_queue, 16687c478bd9Sstevel@tonic-gate aflt->flt_panic); 16697c478bd9Sstevel@tonic-gate } 16707c478bd9Sstevel@tonic-gate 16717c478bd9Sstevel@tonic-gate /* 16727c478bd9Sstevel@tonic-gate * Handle trapping CP error: In Sabre/Hummingbird, parity error in 16737c478bd9Sstevel@tonic-gate * the ecache on a copyout due to a PCI DMA read is signaled as a CP. 16747c478bd9Sstevel@tonic-gate * This is fatal. 16757c478bd9Sstevel@tonic-gate */ 16767c478bd9Sstevel@tonic-gate 16777c478bd9Sstevel@tonic-gate if (t_afsr & P_AFSR_CP) { 16787c478bd9Sstevel@tonic-gate if (isus2i || isus2e) { 16797c478bd9Sstevel@tonic-gate (void) strcat(pr_reason, "CP "); 16807c478bd9Sstevel@tonic-gate aflt->flt_panic = 1; 16817c478bd9Sstevel@tonic-gate spf_flt.flt_type = CPU_TRAPPING_CP_ERR; 16827c478bd9Sstevel@tonic-gate cpu_errorq_dispatch(FM_EREPORT_CPU_USII_CP, 16837c478bd9Sstevel@tonic-gate (void *)&spf_flt, sizeof (spf_flt), ue_queue, 16847c478bd9Sstevel@tonic-gate aflt->flt_panic); 16857c478bd9Sstevel@tonic-gate } else { 16867c478bd9Sstevel@tonic-gate /* 16877c478bd9Sstevel@tonic-gate * Orphan CP: Happens due to signal integrity problem 16887c478bd9Sstevel@tonic-gate * on a CPU, where a CP is reported, without reporting 16897c478bd9Sstevel@tonic-gate * its associated UE. This is handled by locating the 16907c478bd9Sstevel@tonic-gate * bad parity line and would kick off the memscrubber 16917c478bd9Sstevel@tonic-gate * to find the UE if in memory or in another's cache. 16927c478bd9Sstevel@tonic-gate */ 16937c478bd9Sstevel@tonic-gate spf_flt.flt_type = CPU_ORPHAN_CP_ERR; 16947c478bd9Sstevel@tonic-gate (void) strcat(pr_reason, "ORPHAN_CP "); 16957c478bd9Sstevel@tonic-gate 16967c478bd9Sstevel@tonic-gate /* 16977c478bd9Sstevel@tonic-gate * Here we have no PA to work with. 16987c478bd9Sstevel@tonic-gate * Scan each line in the ecache to look for 16997c478bd9Sstevel@tonic-gate * the one with bad parity. 17007c478bd9Sstevel@tonic-gate */ 17017c478bd9Sstevel@tonic-gate aflt->flt_addr = AFLT_INV_ADDR; 17027c478bd9Sstevel@tonic-gate scan_ecache(&aflt->flt_addr, &spf_flt.flt_ec_data[0], 17037c478bd9Sstevel@tonic-gate &spf_flt.flt_ec_tag, &spf_flt.flt_ec_lcnt, 17047c478bd9Sstevel@tonic-gate &oafsr); 17057c478bd9Sstevel@tonic-gate acc_afsr |= oafsr; 17067c478bd9Sstevel@tonic-gate 17077c478bd9Sstevel@tonic-gate /* 17087c478bd9Sstevel@tonic-gate * If we found a bad PA, update the state to indicate 17097c478bd9Sstevel@tonic-gate * if it is memory or I/O space. 17107c478bd9Sstevel@tonic-gate */ 17117c478bd9Sstevel@tonic-gate if (aflt->flt_addr != AFLT_INV_ADDR) { 17127c478bd9Sstevel@tonic-gate aflt->flt_in_memory = 17137c478bd9Sstevel@tonic-gate (pf_is_memory(aflt->flt_addr >> 17147c478bd9Sstevel@tonic-gate MMU_PAGESHIFT)) ? 1 : 0; 17157c478bd9Sstevel@tonic-gate } 17167c478bd9Sstevel@tonic-gate read_all_memscrub = 1; 17177c478bd9Sstevel@tonic-gate cpu_errorq_dispatch(FM_EREPORT_CPU_USII_CP, 17187c478bd9Sstevel@tonic-gate (void *)&spf_flt, sizeof (spf_flt), ue_queue, 17197c478bd9Sstevel@tonic-gate aflt->flt_panic); 17207c478bd9Sstevel@tonic-gate 17217c478bd9Sstevel@tonic-gate } 17227c478bd9Sstevel@tonic-gate } 17237c478bd9Sstevel@tonic-gate 17247c478bd9Sstevel@tonic-gate /* 17257c478bd9Sstevel@tonic-gate * If we queued an error other than WP or CP and we are going to return 17267c478bd9Sstevel@tonic-gate * from the trap and the error was in user mode or inside of a 17277c478bd9Sstevel@tonic-gate * copy routine, set AST flag so the queue will be drained before 17287c478bd9Sstevel@tonic-gate * returning to user mode. 17297c478bd9Sstevel@tonic-gate * 17307c478bd9Sstevel@tonic-gate * For UE/LDP/EDP, the AST processing will SIGKILL the process 17317c478bd9Sstevel@tonic-gate * and send an event to its process contract. 17327c478bd9Sstevel@tonic-gate * 17337c478bd9Sstevel@tonic-gate * For BERR/BTO, the AST processing will SIGBUS the process. There 17347c478bd9Sstevel@tonic-gate * will have been no error queued in this case. 17357c478bd9Sstevel@tonic-gate */ 17367c478bd9Sstevel@tonic-gate if ((t_afsr & 17377c478bd9Sstevel@tonic-gate (P_AFSR_UE | P_AFSR_LDP | P_AFSR_EDP | P_AFSR_BERR | P_AFSR_TO)) && 17387c478bd9Sstevel@tonic-gate (!aflt->flt_priv || aflt->flt_prot == AFLT_PROT_COPY)) { 17397c478bd9Sstevel@tonic-gate int pcb_flag = 0; 17407c478bd9Sstevel@tonic-gate 17417c478bd9Sstevel@tonic-gate if (t_afsr & (P_AFSR_UE | P_AFSR_LDP | P_AFSR_EDP)) 17427c478bd9Sstevel@tonic-gate pcb_flag |= ASYNC_HWERR; 17437c478bd9Sstevel@tonic-gate 17447c478bd9Sstevel@tonic-gate if (t_afsr & P_AFSR_BERR) 17457c478bd9Sstevel@tonic-gate pcb_flag |= ASYNC_BERR; 17467c478bd9Sstevel@tonic-gate 17477c478bd9Sstevel@tonic-gate if (t_afsr & P_AFSR_TO) 17487c478bd9Sstevel@tonic-gate pcb_flag |= ASYNC_BTO; 17497c478bd9Sstevel@tonic-gate 17507c478bd9Sstevel@tonic-gate ttolwp(curthread)->lwp_pcb.pcb_flags |= pcb_flag; 17517c478bd9Sstevel@tonic-gate aston(curthread); 17527c478bd9Sstevel@tonic-gate action = ACTION_AST_FLAGS; 17537c478bd9Sstevel@tonic-gate } 17547c478bd9Sstevel@tonic-gate 17557c478bd9Sstevel@tonic-gate /* 17567c478bd9Sstevel@tonic-gate * In response to a deferred error, we must do one of three things: 17577c478bd9Sstevel@tonic-gate * (1) set the AST flags, (2) trampoline, or (3) panic. action is 17587c478bd9Sstevel@tonic-gate * set in cases (1) and (2) - check that either action is set or 17597c478bd9Sstevel@tonic-gate * (3) is true. 17607c478bd9Sstevel@tonic-gate * 17617c478bd9Sstevel@tonic-gate * On II, the WP writes poisoned data back to memory, which will 17627c478bd9Sstevel@tonic-gate * cause a UE and a panic or reboot when read. In this case, we 17637c478bd9Sstevel@tonic-gate * don't need to panic at this time. On IIi and IIe, 17647c478bd9Sstevel@tonic-gate * aflt->flt_panic is already set above. 17657c478bd9Sstevel@tonic-gate */ 17667c478bd9Sstevel@tonic-gate ASSERT((aflt->flt_panic != 0) || (action != ACTION_NONE) || 17677c478bd9Sstevel@tonic-gate (t_afsr & P_AFSR_WP)); 17687c478bd9Sstevel@tonic-gate 17697c478bd9Sstevel@tonic-gate /* 17707c478bd9Sstevel@tonic-gate * Make a final sanity check to make sure we did not get any more async 17717c478bd9Sstevel@tonic-gate * errors and accumulate the afsr. 17727c478bd9Sstevel@tonic-gate */ 17737c478bd9Sstevel@tonic-gate flush_ecache(ecache_flushaddr, cpunodes[CPU->cpu_id].ecache_size * 2, 17747c478bd9Sstevel@tonic-gate cpunodes[CPU->cpu_id].ecache_linesize); 17757c478bd9Sstevel@tonic-gate (void) clear_errors(&spf_flt, NULL); 17767c478bd9Sstevel@tonic-gate 17777c478bd9Sstevel@tonic-gate /* 17787c478bd9Sstevel@tonic-gate * Take care of a special case: If there is a UE in the ecache flush 17797c478bd9Sstevel@tonic-gate * area, we'll see it in flush_ecache(). This will trigger the 17807c478bd9Sstevel@tonic-gate * CPU_ADDITIONAL_ERRORS case below. 17817c478bd9Sstevel@tonic-gate * 17827c478bd9Sstevel@tonic-gate * This could occur if the original error was a UE in the flush area, 17837c478bd9Sstevel@tonic-gate * or if the original error was an E$ error that was flushed out of 17847c478bd9Sstevel@tonic-gate * the E$ in scan_ecache(). 17857c478bd9Sstevel@tonic-gate * 17867c478bd9Sstevel@tonic-gate * If it's at the same address that we're already logging, then it's 17877c478bd9Sstevel@tonic-gate * probably one of these cases. Clear the bit so we don't trip over 17887c478bd9Sstevel@tonic-gate * it on the additional errors case, which could cause an unnecessary 17897c478bd9Sstevel@tonic-gate * panic. 17907c478bd9Sstevel@tonic-gate */ 17917c478bd9Sstevel@tonic-gate if ((aflt->flt_stat & P_AFSR_UE) && aflt->flt_addr == t_afar) 17927c478bd9Sstevel@tonic-gate acc_afsr |= aflt->flt_stat & ~P_AFSR_UE; 17937c478bd9Sstevel@tonic-gate else 17947c478bd9Sstevel@tonic-gate acc_afsr |= aflt->flt_stat; 17957c478bd9Sstevel@tonic-gate 17967c478bd9Sstevel@tonic-gate /* 17977c478bd9Sstevel@tonic-gate * Check the acumulated afsr for the important bits. 17987c478bd9Sstevel@tonic-gate * Make sure the spf_flt.flt_type value is set, and 17997c478bd9Sstevel@tonic-gate * enque an error. 18007c478bd9Sstevel@tonic-gate */ 18017c478bd9Sstevel@tonic-gate if (acc_afsr & 18027c478bd9Sstevel@tonic-gate (P_AFSR_LEVEL1 | P_AFSR_IVUE | P_AFSR_ETP | P_AFSR_ISAP)) { 18037c478bd9Sstevel@tonic-gate if (acc_afsr & (P_AFSR_UE | P_AFSR_EDP | P_AFSR_LDP | 18047c478bd9Sstevel@tonic-gate P_AFSR_BERR | P_AFSR_TO | P_AFSR_IVUE | P_AFSR_ETP | 18057c478bd9Sstevel@tonic-gate P_AFSR_ISAP)) 18067c478bd9Sstevel@tonic-gate aflt->flt_panic = 1; 18077c478bd9Sstevel@tonic-gate 18087c478bd9Sstevel@tonic-gate spf_flt.flt_type = CPU_ADDITIONAL_ERR; 18097c478bd9Sstevel@tonic-gate aflt->flt_stat = acc_afsr; 18107c478bd9Sstevel@tonic-gate cpu_errorq_dispatch(FM_EREPORT_CPU_USII_UNKNOWN, 18117c478bd9Sstevel@tonic-gate (void *)&spf_flt, sizeof (spf_flt), ue_queue, 18127c478bd9Sstevel@tonic-gate aflt->flt_panic); 18137c478bd9Sstevel@tonic-gate } 18147c478bd9Sstevel@tonic-gate 18157c478bd9Sstevel@tonic-gate /* 18167c478bd9Sstevel@tonic-gate * If aflt->flt_panic is set at this point, we need to panic as the 18177c478bd9Sstevel@tonic-gate * result of a trap at TL > 0, or an error we determined to be fatal. 18187c478bd9Sstevel@tonic-gate * We've already enqueued the error in one of the if-clauses above, 18197c478bd9Sstevel@tonic-gate * and it will be dequeued and logged as part of the panic flow. 18207c478bd9Sstevel@tonic-gate */ 18217c478bd9Sstevel@tonic-gate if (aflt->flt_panic) { 18227c478bd9Sstevel@tonic-gate cpu_aflt_log(CE_PANIC, 1, &spf_flt, CPU_ERRID_FIRST, 18237c478bd9Sstevel@tonic-gate "See previous message(s) for details", " %sError(s)", 18247c478bd9Sstevel@tonic-gate pr_reason); 18257c478bd9Sstevel@tonic-gate } 18267c478bd9Sstevel@tonic-gate 18277c478bd9Sstevel@tonic-gate /* 18287c478bd9Sstevel@tonic-gate * Before returning, we must re-enable errors, and 18297c478bd9Sstevel@tonic-gate * reset the caches to their boot-up state. 18307c478bd9Sstevel@tonic-gate */ 18317c478bd9Sstevel@tonic-gate set_lsu(get_lsu() | cache_boot_state); 18327c478bd9Sstevel@tonic-gate set_error_enable(EER_ENABLE); 18337c478bd9Sstevel@tonic-gate } 18347c478bd9Sstevel@tonic-gate 18357c478bd9Sstevel@tonic-gate /* 18367c478bd9Sstevel@tonic-gate * Check for miscellaneous fatal errors and call CE_PANIC if any are seen. 18377c478bd9Sstevel@tonic-gate * This routine is shared by the CE and UE handling code. 18387c478bd9Sstevel@tonic-gate */ 18397c478bd9Sstevel@tonic-gate static void 18407c478bd9Sstevel@tonic-gate check_misc_err(spitf_async_flt *spf_flt) 18417c478bd9Sstevel@tonic-gate { 18427c478bd9Sstevel@tonic-gate struct async_flt *aflt = (struct async_flt *)spf_flt; 18437c478bd9Sstevel@tonic-gate char *fatal_str = NULL; 18447c478bd9Sstevel@tonic-gate 18457c478bd9Sstevel@tonic-gate /* 18467c478bd9Sstevel@tonic-gate * The ISAP and ETP errors are supposed to cause a POR 18477c478bd9Sstevel@tonic-gate * from the system, so in theory we never, ever see these messages. 18487c478bd9Sstevel@tonic-gate * ISAP, ETP and IVUE are considered to be fatal. 18497c478bd9Sstevel@tonic-gate */ 18507c478bd9Sstevel@tonic-gate if (aflt->flt_stat & P_AFSR_ISAP) 18517c478bd9Sstevel@tonic-gate fatal_str = " System Address Parity Error on"; 18527c478bd9Sstevel@tonic-gate else if (aflt->flt_stat & P_AFSR_ETP) 18537c478bd9Sstevel@tonic-gate fatal_str = " Ecache Tag Parity Error on"; 18547c478bd9Sstevel@tonic-gate else if (aflt->flt_stat & P_AFSR_IVUE) 18557c478bd9Sstevel@tonic-gate fatal_str = " Interrupt Vector Uncorrectable Error on"; 18567c478bd9Sstevel@tonic-gate if (fatal_str != NULL) { 18577c478bd9Sstevel@tonic-gate cpu_aflt_log(CE_PANIC, 1, spf_flt, CMN_LFLAGS, 18587c478bd9Sstevel@tonic-gate NULL, fatal_str); 18597c478bd9Sstevel@tonic-gate } 18607c478bd9Sstevel@tonic-gate } 18617c478bd9Sstevel@tonic-gate 18627c478bd9Sstevel@tonic-gate /* 18637c478bd9Sstevel@tonic-gate * Routine to convert a syndrome into a syndrome code. 18647c478bd9Sstevel@tonic-gate */ 18657c478bd9Sstevel@tonic-gate static int 18667c478bd9Sstevel@tonic-gate synd_to_synd_code(int synd_status, ushort_t synd) 18677c478bd9Sstevel@tonic-gate { 18687c478bd9Sstevel@tonic-gate if (synd_status != AFLT_STAT_VALID) 18697c478bd9Sstevel@tonic-gate return (-1); 18707c478bd9Sstevel@tonic-gate 18717c478bd9Sstevel@tonic-gate /* 18727c478bd9Sstevel@tonic-gate * Use the 8-bit syndrome to index the ecc_syndrome_tab 18737c478bd9Sstevel@tonic-gate * to get the code indicating which bit(s) is(are) bad. 18747c478bd9Sstevel@tonic-gate */ 18757c478bd9Sstevel@tonic-gate if ((synd == 0) || (synd >= SYND_TBL_SIZE)) 18767c478bd9Sstevel@tonic-gate return (-1); 18777c478bd9Sstevel@tonic-gate else 18787c478bd9Sstevel@tonic-gate return (ecc_syndrome_tab[synd]); 18797c478bd9Sstevel@tonic-gate } 18807c478bd9Sstevel@tonic-gate 1881*d00f0155Sayznaga /* ARGSUSED */ 1882*d00f0155Sayznaga int 1883*d00f0155Sayznaga cpu_get_mem_sid(char *unum, char *buf, int buflen, int *lenp) 1884*d00f0155Sayznaga { 1885*d00f0155Sayznaga return (ENOTSUP); 1886*d00f0155Sayznaga } 1887*d00f0155Sayznaga 1888*d00f0155Sayznaga /* ARGSUSED */ 1889*d00f0155Sayznaga int 1890*d00f0155Sayznaga cpu_get_mem_offset(uint64_t flt_addr, uint64_t *offp) 1891*d00f0155Sayznaga { 1892*d00f0155Sayznaga return (ENOTSUP); 1893*d00f0155Sayznaga } 1894*d00f0155Sayznaga 1895*d00f0155Sayznaga /* ARGSUSED */ 1896*d00f0155Sayznaga int 1897*d00f0155Sayznaga cpu_get_mem_addr(char *unum, char *sid, uint64_t offset, uint64_t *addrp) 1898*d00f0155Sayznaga { 1899*d00f0155Sayznaga return (ENOTSUP); 1900*d00f0155Sayznaga } 1901*d00f0155Sayznaga 19027c478bd9Sstevel@tonic-gate /* 19037c478bd9Sstevel@tonic-gate * Routine to return a string identifying the physical name 19047c478bd9Sstevel@tonic-gate * associated with a memory/cache error. 19057c478bd9Sstevel@tonic-gate */ 19067c478bd9Sstevel@tonic-gate /* ARGSUSED */ 19077c478bd9Sstevel@tonic-gate int 19087c478bd9Sstevel@tonic-gate cpu_get_mem_unum(int synd_status, ushort_t synd, uint64_t afsr, 19097c478bd9Sstevel@tonic-gate uint64_t afar, int cpuid, int flt_in_memory, ushort_t flt_status, 19107c478bd9Sstevel@tonic-gate char *buf, int buflen, int *lenp) 19117c478bd9Sstevel@tonic-gate { 19127c478bd9Sstevel@tonic-gate short synd_code; 19137c478bd9Sstevel@tonic-gate int ret; 19147c478bd9Sstevel@tonic-gate 19157c478bd9Sstevel@tonic-gate if (flt_in_memory) { 19167c478bd9Sstevel@tonic-gate synd_code = synd_to_synd_code(synd_status, synd); 19177c478bd9Sstevel@tonic-gate if (synd_code == -1) { 19187c478bd9Sstevel@tonic-gate ret = EINVAL; 19197c478bd9Sstevel@tonic-gate } else if (prom_get_unum(synd_code, P2ALIGN(afar, 8), 19207c478bd9Sstevel@tonic-gate buf, buflen, lenp) != 0) { 19217c478bd9Sstevel@tonic-gate ret = EIO; 19227c478bd9Sstevel@tonic-gate } else if (*lenp <= 1) { 19237c478bd9Sstevel@tonic-gate ret = EINVAL; 19247c478bd9Sstevel@tonic-gate } else { 19257c478bd9Sstevel@tonic-gate ret = 0; 19267c478bd9Sstevel@tonic-gate } 19277c478bd9Sstevel@tonic-gate } else { 19287c478bd9Sstevel@tonic-gate ret = ENOTSUP; 19297c478bd9Sstevel@tonic-gate } 19307c478bd9Sstevel@tonic-gate 19317c478bd9Sstevel@tonic-gate if (ret != 0) { 19327c478bd9Sstevel@tonic-gate buf[0] = '\0'; 19337c478bd9Sstevel@tonic-gate *lenp = 0; 19347c478bd9Sstevel@tonic-gate } 19357c478bd9Sstevel@tonic-gate 19367c478bd9Sstevel@tonic-gate return (ret); 19377c478bd9Sstevel@tonic-gate } 19387c478bd9Sstevel@tonic-gate 19397c478bd9Sstevel@tonic-gate /* 19407c478bd9Sstevel@tonic-gate * Wrapper for cpu_get_mem_unum() routine that takes an 19417c478bd9Sstevel@tonic-gate * async_flt struct rather than explicit arguments. 19427c478bd9Sstevel@tonic-gate */ 19437c478bd9Sstevel@tonic-gate int 19447c478bd9Sstevel@tonic-gate cpu_get_mem_unum_aflt(int synd_status, struct async_flt *aflt, 19457c478bd9Sstevel@tonic-gate char *buf, int buflen, int *lenp) 19467c478bd9Sstevel@tonic-gate { 19477c478bd9Sstevel@tonic-gate return (cpu_get_mem_unum(synd_status, SYND(aflt->flt_synd), 19487c478bd9Sstevel@tonic-gate aflt->flt_stat, aflt->flt_addr, aflt->flt_bus_id, 19497c478bd9Sstevel@tonic-gate aflt->flt_in_memory, aflt->flt_status, buf, buflen, lenp)); 19507c478bd9Sstevel@tonic-gate } 19517c478bd9Sstevel@tonic-gate 19527c478bd9Sstevel@tonic-gate /* 19537c478bd9Sstevel@tonic-gate * This routine is a more generic interface to cpu_get_mem_unum(), 19547c478bd9Sstevel@tonic-gate * that may be used by other modules (e.g. mm). 19557c478bd9Sstevel@tonic-gate */ 19567c478bd9Sstevel@tonic-gate int 19577c478bd9Sstevel@tonic-gate cpu_get_mem_name(uint64_t synd, uint64_t *afsr, uint64_t afar, 19587c478bd9Sstevel@tonic-gate char *buf, int buflen, int *lenp) 19597c478bd9Sstevel@tonic-gate { 19607c478bd9Sstevel@tonic-gate int synd_status, flt_in_memory, ret; 19617c478bd9Sstevel@tonic-gate char unum[UNUM_NAMLEN]; 19627c478bd9Sstevel@tonic-gate 19637c478bd9Sstevel@tonic-gate /* 19647c478bd9Sstevel@tonic-gate * Check for an invalid address. 19657c478bd9Sstevel@tonic-gate */ 19667c478bd9Sstevel@tonic-gate if (afar == (uint64_t)-1) 19677c478bd9Sstevel@tonic-gate return (ENXIO); 19687c478bd9Sstevel@tonic-gate 19697c478bd9Sstevel@tonic-gate if (synd == (uint64_t)-1) 19707c478bd9Sstevel@tonic-gate synd_status = AFLT_STAT_INVALID; 19717c478bd9Sstevel@tonic-gate else 19727c478bd9Sstevel@tonic-gate synd_status = AFLT_STAT_VALID; 19737c478bd9Sstevel@tonic-gate 19747c478bd9Sstevel@tonic-gate flt_in_memory = (pf_is_memory(afar >> MMU_PAGESHIFT)) ? 1 : 0; 19757c478bd9Sstevel@tonic-gate 19767c478bd9Sstevel@tonic-gate if ((ret = cpu_get_mem_unum(synd_status, (ushort_t)synd, *afsr, afar, 19777c478bd9Sstevel@tonic-gate CPU->cpu_id, flt_in_memory, 0, unum, UNUM_NAMLEN, lenp)) 19787c478bd9Sstevel@tonic-gate != 0) 19797c478bd9Sstevel@tonic-gate return (ret); 19807c478bd9Sstevel@tonic-gate 19817c478bd9Sstevel@tonic-gate if (*lenp >= buflen) 19827c478bd9Sstevel@tonic-gate return (ENAMETOOLONG); 19837c478bd9Sstevel@tonic-gate 19847c478bd9Sstevel@tonic-gate (void) strncpy(buf, unum, buflen); 19857c478bd9Sstevel@tonic-gate 19867c478bd9Sstevel@tonic-gate return (0); 19877c478bd9Sstevel@tonic-gate } 19887c478bd9Sstevel@tonic-gate 19897c478bd9Sstevel@tonic-gate /* 19907c478bd9Sstevel@tonic-gate * Routine to return memory information associated 19917c478bd9Sstevel@tonic-gate * with a physical address and syndrome. 19927c478bd9Sstevel@tonic-gate */ 19937c478bd9Sstevel@tonic-gate /* ARGSUSED */ 19947c478bd9Sstevel@tonic-gate int 19957c478bd9Sstevel@tonic-gate cpu_get_mem_info(uint64_t synd, uint64_t afar, 19967c478bd9Sstevel@tonic-gate uint64_t *mem_sizep, uint64_t *seg_sizep, uint64_t *bank_sizep, 19977c478bd9Sstevel@tonic-gate int *segsp, int *banksp, int *mcidp) 19987c478bd9Sstevel@tonic-gate { 19997c478bd9Sstevel@tonic-gate return (ENOTSUP); 20007c478bd9Sstevel@tonic-gate } 20017c478bd9Sstevel@tonic-gate 20027c478bd9Sstevel@tonic-gate /* 20037c478bd9Sstevel@tonic-gate * Routine to return a string identifying the physical 20047c478bd9Sstevel@tonic-gate * name associated with a cpuid. 20057c478bd9Sstevel@tonic-gate */ 20067c478bd9Sstevel@tonic-gate /* ARGSUSED */ 20077c478bd9Sstevel@tonic-gate int 20087c478bd9Sstevel@tonic-gate cpu_get_cpu_unum(int cpuid, char *buf, int buflen, int *lenp) 20097c478bd9Sstevel@tonic-gate { 20107c478bd9Sstevel@tonic-gate return (ENOTSUP); 20117c478bd9Sstevel@tonic-gate } 20127c478bd9Sstevel@tonic-gate 20137c478bd9Sstevel@tonic-gate /* 20147c478bd9Sstevel@tonic-gate * This routine returns the size of the kernel's FRU name buffer. 20157c478bd9Sstevel@tonic-gate */ 20167c478bd9Sstevel@tonic-gate size_t 20177c478bd9Sstevel@tonic-gate cpu_get_name_bufsize() 20187c478bd9Sstevel@tonic-gate { 20197c478bd9Sstevel@tonic-gate return (UNUM_NAMLEN); 20207c478bd9Sstevel@tonic-gate } 20217c478bd9Sstevel@tonic-gate 20227c478bd9Sstevel@tonic-gate /* 20237c478bd9Sstevel@tonic-gate * Cpu specific log func for UEs. 20247c478bd9Sstevel@tonic-gate */ 20257c478bd9Sstevel@tonic-gate static void 20267c478bd9Sstevel@tonic-gate log_ue_err(struct async_flt *aflt, char *unum) 20277c478bd9Sstevel@tonic-gate { 20287c478bd9Sstevel@tonic-gate spitf_async_flt *spf_flt = (spitf_async_flt *)aflt; 20297c478bd9Sstevel@tonic-gate int len = 0; 20307c478bd9Sstevel@tonic-gate 20317c478bd9Sstevel@tonic-gate #ifdef DEBUG 20327c478bd9Sstevel@tonic-gate int afsr_priv = (aflt->flt_stat & P_AFSR_PRIV) ? 1 : 0; 20337c478bd9Sstevel@tonic-gate 20347c478bd9Sstevel@tonic-gate /* 20357c478bd9Sstevel@tonic-gate * Paranoid Check for priv mismatch 20367c478bd9Sstevel@tonic-gate * Only applicable for UEs 20377c478bd9Sstevel@tonic-gate */ 20387c478bd9Sstevel@tonic-gate if (afsr_priv != aflt->flt_priv) { 20397c478bd9Sstevel@tonic-gate /* 20407c478bd9Sstevel@tonic-gate * The priv bits in %tstate and %afsr did not match; we expect 20417c478bd9Sstevel@tonic-gate * this to be very rare, so flag it with a message. 20427c478bd9Sstevel@tonic-gate */ 20437c478bd9Sstevel@tonic-gate cpu_aflt_log(CE_WARN, 2, spf_flt, CPU_ERRID_FIRST, NULL, 20447c478bd9Sstevel@tonic-gate ": PRIV bit in TSTATE and AFSR mismatched; " 20457c478bd9Sstevel@tonic-gate "TSTATE.PRIV=%d used", (aflt->flt_priv) ? 1 : 0); 20467c478bd9Sstevel@tonic-gate 20477c478bd9Sstevel@tonic-gate /* update saved afsr to reflect the correct priv */ 20487c478bd9Sstevel@tonic-gate aflt->flt_stat &= ~P_AFSR_PRIV; 20497c478bd9Sstevel@tonic-gate if (aflt->flt_priv) 20507c478bd9Sstevel@tonic-gate aflt->flt_stat |= P_AFSR_PRIV; 20517c478bd9Sstevel@tonic-gate } 20527c478bd9Sstevel@tonic-gate #endif /* DEBUG */ 20537c478bd9Sstevel@tonic-gate 20547c478bd9Sstevel@tonic-gate (void) cpu_get_mem_unum_aflt(AFLT_STAT_VALID, aflt, unum, 20557c478bd9Sstevel@tonic-gate UNUM_NAMLEN, &len); 20567c478bd9Sstevel@tonic-gate 20577c478bd9Sstevel@tonic-gate cpu_aflt_log(CE_WARN, 1, spf_flt, UE_LFLAGS, unum, 20587c478bd9Sstevel@tonic-gate " Uncorrectable Memory Error on"); 20597c478bd9Sstevel@tonic-gate 20607c478bd9Sstevel@tonic-gate if (SYND(aflt->flt_synd) == 0x3) { 20617c478bd9Sstevel@tonic-gate cpu_aflt_log(CE_WARN, 1, spf_flt, CPU_ERRID_FIRST, NULL, 20627c478bd9Sstevel@tonic-gate " Syndrome 0x3 indicates that this may not be a " 20637c478bd9Sstevel@tonic-gate "memory module problem"); 20647c478bd9Sstevel@tonic-gate } 20657c478bd9Sstevel@tonic-gate 20667c478bd9Sstevel@tonic-gate if (aflt->flt_in_memory) 20677c478bd9Sstevel@tonic-gate cpu_log_ecmem_info(spf_flt); 20687c478bd9Sstevel@tonic-gate } 20697c478bd9Sstevel@tonic-gate 20707c478bd9Sstevel@tonic-gate 20717c478bd9Sstevel@tonic-gate /* 20727c478bd9Sstevel@tonic-gate * The cpu_async_log_err() function is called via the ue_drain() function to 20737c478bd9Sstevel@tonic-gate * handle logging for CPU events that are dequeued. As such, it can be invoked 20747c478bd9Sstevel@tonic-gate * from softint context, from AST processing in the trap() flow, or from the 20757c478bd9Sstevel@tonic-gate * panic flow. We decode the CPU-specific data, and log appropriate messages. 20767c478bd9Sstevel@tonic-gate */ 20777c478bd9Sstevel@tonic-gate static void 20787c478bd9Sstevel@tonic-gate cpu_async_log_err(void *flt) 20797c478bd9Sstevel@tonic-gate { 20807c478bd9Sstevel@tonic-gate spitf_async_flt *spf_flt = (spitf_async_flt *)flt; 20817c478bd9Sstevel@tonic-gate struct async_flt *aflt = (struct async_flt *)flt; 20827c478bd9Sstevel@tonic-gate char unum[UNUM_NAMLEN]; 20837c478bd9Sstevel@tonic-gate char *space; 20847c478bd9Sstevel@tonic-gate char *ecache_scrub_logstr = NULL; 20857c478bd9Sstevel@tonic-gate 20867c478bd9Sstevel@tonic-gate switch (spf_flt->flt_type) { 20877c478bd9Sstevel@tonic-gate case CPU_UE_ERR: 20887c478bd9Sstevel@tonic-gate /* 20897c478bd9Sstevel@tonic-gate * We want to skip logging only if ALL the following 20907c478bd9Sstevel@tonic-gate * conditions are true: 20917c478bd9Sstevel@tonic-gate * 20927c478bd9Sstevel@tonic-gate * 1. We are not panicking 20937c478bd9Sstevel@tonic-gate * 2. There is only one error 20947c478bd9Sstevel@tonic-gate * 3. That error is a memory error 20957c478bd9Sstevel@tonic-gate * 4. The error is caused by the memory scrubber (in 20967c478bd9Sstevel@tonic-gate * which case the error will have occurred under 20977c478bd9Sstevel@tonic-gate * on_trap protection) 20987c478bd9Sstevel@tonic-gate * 5. The error is on a retired page 20997c478bd9Sstevel@tonic-gate * 21007c478bd9Sstevel@tonic-gate * Note 1: AFLT_PROT_EC is used places other than the memory 21017c478bd9Sstevel@tonic-gate * scrubber. However, none of those errors should occur 21027c478bd9Sstevel@tonic-gate * on a retired page. 21037c478bd9Sstevel@tonic-gate * 21047c478bd9Sstevel@tonic-gate * Note 2: In the CE case, these errors are discarded before 21057c478bd9Sstevel@tonic-gate * the errorq. In the UE case, we must wait until now -- 21067c478bd9Sstevel@tonic-gate * softcall() grabs a mutex, which we can't do at a high PIL. 21077c478bd9Sstevel@tonic-gate */ 21087c478bd9Sstevel@tonic-gate if (!panicstr && 21097c478bd9Sstevel@tonic-gate (aflt->flt_stat & S_AFSR_ALL_ERRS) == P_AFSR_UE && 21107c478bd9Sstevel@tonic-gate aflt->flt_prot == AFLT_PROT_EC) { 2111db874c57Selowe if (page_retire_check(aflt->flt_addr, NULL) == 0) { 21127c478bd9Sstevel@tonic-gate /* Zero the address to clear the error */ 21137c478bd9Sstevel@tonic-gate softcall(ecc_page_zero, (void *)aflt->flt_addr); 21147c478bd9Sstevel@tonic-gate return; 21157c478bd9Sstevel@tonic-gate } 21167c478bd9Sstevel@tonic-gate } 21177c478bd9Sstevel@tonic-gate 21187c478bd9Sstevel@tonic-gate /* 21197c478bd9Sstevel@tonic-gate * Log the UE and check for causes of this UE error that 21207c478bd9Sstevel@tonic-gate * don't cause a trap (Copyback error). cpu_async_error() 21217c478bd9Sstevel@tonic-gate * has already checked the i/o buses for us. 21227c478bd9Sstevel@tonic-gate */ 21237c478bd9Sstevel@tonic-gate log_ue_err(aflt, unum); 21247c478bd9Sstevel@tonic-gate if (aflt->flt_in_memory) 21257c478bd9Sstevel@tonic-gate cpu_check_allcpus(aflt); 21267c478bd9Sstevel@tonic-gate break; 21277c478bd9Sstevel@tonic-gate 21287c478bd9Sstevel@tonic-gate case CPU_EDP_LDP_ERR: 21297c478bd9Sstevel@tonic-gate if (aflt->flt_stat & P_AFSR_EDP) 21307c478bd9Sstevel@tonic-gate cpu_aflt_log(CE_WARN, 1, spf_flt, PARERR_LFLAGS, 21317c478bd9Sstevel@tonic-gate NULL, " EDP event on"); 21327c478bd9Sstevel@tonic-gate 21337c478bd9Sstevel@tonic-gate if (aflt->flt_stat & P_AFSR_LDP) 21347c478bd9Sstevel@tonic-gate cpu_aflt_log(CE_WARN, 1, spf_flt, PARERR_LFLAGS, 21357c478bd9Sstevel@tonic-gate NULL, " LDP event on"); 21367c478bd9Sstevel@tonic-gate 21377c478bd9Sstevel@tonic-gate /* Log ecache info if exist */ 21387c478bd9Sstevel@tonic-gate if (spf_flt->flt_ec_lcnt > 0) { 21397c478bd9Sstevel@tonic-gate cpu_log_ecmem_info(spf_flt); 21407c478bd9Sstevel@tonic-gate 21417c478bd9Sstevel@tonic-gate cpu_aflt_log(CE_CONT, 2, spf_flt, CPU_ERRID_FIRST, 21427c478bd9Sstevel@tonic-gate NULL, " AFAR was derived from E$Tag"); 21437c478bd9Sstevel@tonic-gate } else { 21447c478bd9Sstevel@tonic-gate cpu_aflt_log(CE_CONT, 2, spf_flt, CPU_ERRID_FIRST, 21457c478bd9Sstevel@tonic-gate NULL, " No error found in ecache (No fault " 21467c478bd9Sstevel@tonic-gate "PA available)"); 21477c478bd9Sstevel@tonic-gate } 21487c478bd9Sstevel@tonic-gate break; 21497c478bd9Sstevel@tonic-gate 21507c478bd9Sstevel@tonic-gate case CPU_WP_ERR: 21517c478bd9Sstevel@tonic-gate /* 21527c478bd9Sstevel@tonic-gate * If the memscrub thread hasn't yet read 21537c478bd9Sstevel@tonic-gate * all of memory, as we requested in the 21547c478bd9Sstevel@tonic-gate * trap handler, then give it a kick to 21557c478bd9Sstevel@tonic-gate * make sure it does. 21567c478bd9Sstevel@tonic-gate */ 21577c478bd9Sstevel@tonic-gate if (!isus2i && !isus2e && read_all_memscrub) 21587c478bd9Sstevel@tonic-gate memscrub_run(); 21597c478bd9Sstevel@tonic-gate 21607c478bd9Sstevel@tonic-gate cpu_aflt_log(CE_WARN, 1, spf_flt, WP_LFLAGS, NULL, 21617c478bd9Sstevel@tonic-gate " WP event on"); 21627c478bd9Sstevel@tonic-gate return; 21637c478bd9Sstevel@tonic-gate 21647c478bd9Sstevel@tonic-gate case CPU_BTO_BERR_ERR: 21657c478bd9Sstevel@tonic-gate /* 21667c478bd9Sstevel@tonic-gate * A bus timeout or error occurred that was in user mode or not 21677c478bd9Sstevel@tonic-gate * in a protected kernel code region. 21687c478bd9Sstevel@tonic-gate */ 21697c478bd9Sstevel@tonic-gate if (aflt->flt_stat & P_AFSR_BERR) { 21707c478bd9Sstevel@tonic-gate cpu_aflt_log(CE_WARN, aflt->flt_panic ? 1 : 2, 21717c478bd9Sstevel@tonic-gate spf_flt, BERRTO_LFLAGS, NULL, 21727c478bd9Sstevel@tonic-gate " Bus Error on System Bus in %s mode from", 21737c478bd9Sstevel@tonic-gate aflt->flt_priv ? "privileged" : "user"); 21747c478bd9Sstevel@tonic-gate } 21757c478bd9Sstevel@tonic-gate 21767c478bd9Sstevel@tonic-gate if (aflt->flt_stat & P_AFSR_TO) { 21777c478bd9Sstevel@tonic-gate cpu_aflt_log(CE_WARN, aflt->flt_panic ? 1 : 2, 21787c478bd9Sstevel@tonic-gate spf_flt, BERRTO_LFLAGS, NULL, 21797c478bd9Sstevel@tonic-gate " Timeout on System Bus in %s mode from", 21807c478bd9Sstevel@tonic-gate aflt->flt_priv ? "privileged" : "user"); 21817c478bd9Sstevel@tonic-gate } 21827c478bd9Sstevel@tonic-gate 21837c478bd9Sstevel@tonic-gate return; 21847c478bd9Sstevel@tonic-gate 21857c478bd9Sstevel@tonic-gate case CPU_PANIC_CP_ERR: 21867c478bd9Sstevel@tonic-gate /* 21877c478bd9Sstevel@tonic-gate * Process the Copyback (CP) error info (if any) obtained from 21887c478bd9Sstevel@tonic-gate * polling all the cpus in the panic flow. This case is only 21897c478bd9Sstevel@tonic-gate * entered if we are panicking. 21907c478bd9Sstevel@tonic-gate */ 21917c478bd9Sstevel@tonic-gate ASSERT(panicstr != NULL); 21927c478bd9Sstevel@tonic-gate ASSERT(aflt->flt_id == panic_aflt.flt_id); 21937c478bd9Sstevel@tonic-gate 21947c478bd9Sstevel@tonic-gate /* See which space - this info may not exist */ 21957c478bd9Sstevel@tonic-gate if (panic_aflt.flt_status & ECC_D_TRAP) 21967c478bd9Sstevel@tonic-gate space = "Data "; 21977c478bd9Sstevel@tonic-gate else if (panic_aflt.flt_status & ECC_I_TRAP) 21987c478bd9Sstevel@tonic-gate space = "Instruction "; 21997c478bd9Sstevel@tonic-gate else 22007c478bd9Sstevel@tonic-gate space = ""; 22017c478bd9Sstevel@tonic-gate 22027c478bd9Sstevel@tonic-gate cpu_aflt_log(CE_WARN, 1, spf_flt, CP_LFLAGS, NULL, 22037c478bd9Sstevel@tonic-gate " AFAR was derived from UE report," 22047c478bd9Sstevel@tonic-gate " CP event on CPU%d (caused %saccess error on %s%d)", 22057c478bd9Sstevel@tonic-gate aflt->flt_inst, space, (panic_aflt.flt_status & ECC_IOBUS) ? 22067c478bd9Sstevel@tonic-gate "IOBUS" : "CPU", panic_aflt.flt_bus_id); 22077c478bd9Sstevel@tonic-gate 22087c478bd9Sstevel@tonic-gate if (spf_flt->flt_ec_lcnt > 0) 22097c478bd9Sstevel@tonic-gate cpu_log_ecmem_info(spf_flt); 22107c478bd9Sstevel@tonic-gate else 22117c478bd9Sstevel@tonic-gate cpu_aflt_log(CE_WARN, 2, spf_flt, CPU_ERRID_FIRST, 22127c478bd9Sstevel@tonic-gate NULL, " No cache dump available"); 22137c478bd9Sstevel@tonic-gate 22147c478bd9Sstevel@tonic-gate return; 22157c478bd9Sstevel@tonic-gate 22167c478bd9Sstevel@tonic-gate case CPU_TRAPPING_CP_ERR: 22177c478bd9Sstevel@tonic-gate /* 22187c478bd9Sstevel@tonic-gate * For sabre only. This is a copyback ecache parity error due 22197c478bd9Sstevel@tonic-gate * to a PCI DMA read. We should be panicking if we get here. 22207c478bd9Sstevel@tonic-gate */ 22217c478bd9Sstevel@tonic-gate ASSERT(panicstr != NULL); 22227c478bd9Sstevel@tonic-gate cpu_aflt_log(CE_WARN, 1, spf_flt, CP_LFLAGS, NULL, 22237c478bd9Sstevel@tonic-gate " AFAR was derived from UE report," 22247c478bd9Sstevel@tonic-gate " CP event on CPU%d (caused Data access error " 22257c478bd9Sstevel@tonic-gate "on PCIBus)", aflt->flt_inst); 22267c478bd9Sstevel@tonic-gate return; 22277c478bd9Sstevel@tonic-gate 22287c478bd9Sstevel@tonic-gate /* 22297c478bd9Sstevel@tonic-gate * We log the ecache lines of the following states, 22307c478bd9Sstevel@tonic-gate * clean_bad_idle, clean_bad_busy, dirty_bad_idle and 22317c478bd9Sstevel@tonic-gate * dirty_bad_busy if ecache_scrub_verbose is set and panic 22327c478bd9Sstevel@tonic-gate * in addition to logging if ecache_scrub_panic is set. 22337c478bd9Sstevel@tonic-gate */ 22347c478bd9Sstevel@tonic-gate case CPU_BADLINE_CI_ERR: 22357c478bd9Sstevel@tonic-gate ecache_scrub_logstr = "CBI"; 22367c478bd9Sstevel@tonic-gate /* FALLTHRU */ 22377c478bd9Sstevel@tonic-gate 22387c478bd9Sstevel@tonic-gate case CPU_BADLINE_CB_ERR: 22397c478bd9Sstevel@tonic-gate if (ecache_scrub_logstr == NULL) 22407c478bd9Sstevel@tonic-gate ecache_scrub_logstr = "CBB"; 22417c478bd9Sstevel@tonic-gate /* FALLTHRU */ 22427c478bd9Sstevel@tonic-gate 22437c478bd9Sstevel@tonic-gate case CPU_BADLINE_DI_ERR: 22447c478bd9Sstevel@tonic-gate if (ecache_scrub_logstr == NULL) 22457c478bd9Sstevel@tonic-gate ecache_scrub_logstr = "DBI"; 22467c478bd9Sstevel@tonic-gate /* FALLTHRU */ 22477c478bd9Sstevel@tonic-gate 22487c478bd9Sstevel@tonic-gate case CPU_BADLINE_DB_ERR: 22497c478bd9Sstevel@tonic-gate if (ecache_scrub_logstr == NULL) 22507c478bd9Sstevel@tonic-gate ecache_scrub_logstr = "DBB"; 22517c478bd9Sstevel@tonic-gate 22527c478bd9Sstevel@tonic-gate cpu_aflt_log(CE_NOTE, 2, spf_flt, 22537c478bd9Sstevel@tonic-gate (CPU_ERRID_FIRST | CPU_FLTCPU), NULL, 22547c478bd9Sstevel@tonic-gate " %s event on", ecache_scrub_logstr); 22557c478bd9Sstevel@tonic-gate cpu_log_ecmem_info(spf_flt); 22567c478bd9Sstevel@tonic-gate 22577c478bd9Sstevel@tonic-gate return; 22587c478bd9Sstevel@tonic-gate 22597c478bd9Sstevel@tonic-gate case CPU_ORPHAN_CP_ERR: 22607c478bd9Sstevel@tonic-gate /* 22617c478bd9Sstevel@tonic-gate * Orphan CPs, where the CP bit is set, but when a CPU 22627c478bd9Sstevel@tonic-gate * doesn't report a UE. 22637c478bd9Sstevel@tonic-gate */ 22647c478bd9Sstevel@tonic-gate if (read_all_memscrub) 22657c478bd9Sstevel@tonic-gate memscrub_run(); 22667c478bd9Sstevel@tonic-gate 22677c478bd9Sstevel@tonic-gate cpu_aflt_log(CE_NOTE, 2, spf_flt, (CP_LFLAGS | CPU_FLTCPU), 22687c478bd9Sstevel@tonic-gate NULL, " Orphan CP event on"); 22697c478bd9Sstevel@tonic-gate 22707c478bd9Sstevel@tonic-gate /* Log ecache info if exist */ 22717c478bd9Sstevel@tonic-gate if (spf_flt->flt_ec_lcnt > 0) 22727c478bd9Sstevel@tonic-gate cpu_log_ecmem_info(spf_flt); 22737c478bd9Sstevel@tonic-gate else 22747c478bd9Sstevel@tonic-gate cpu_aflt_log(CE_NOTE, 2, spf_flt, 22757c478bd9Sstevel@tonic-gate (CP_LFLAGS | CPU_FLTCPU), NULL, 22767c478bd9Sstevel@tonic-gate " No error found in ecache (No fault " 22777c478bd9Sstevel@tonic-gate "PA available"); 22787c478bd9Sstevel@tonic-gate return; 22797c478bd9Sstevel@tonic-gate 22807c478bd9Sstevel@tonic-gate case CPU_ECACHE_ADDR_PAR_ERR: 22817c478bd9Sstevel@tonic-gate cpu_aflt_log(CE_WARN, 1, spf_flt, PARERR_LFLAGS, NULL, 22827c478bd9Sstevel@tonic-gate " E$ Tag Address Parity error on"); 22837c478bd9Sstevel@tonic-gate cpu_log_ecmem_info(spf_flt); 22847c478bd9Sstevel@tonic-gate return; 22857c478bd9Sstevel@tonic-gate 22867c478bd9Sstevel@tonic-gate case CPU_ECACHE_STATE_ERR: 22877c478bd9Sstevel@tonic-gate cpu_aflt_log(CE_WARN, 1, spf_flt, PARERR_LFLAGS, NULL, 22887c478bd9Sstevel@tonic-gate " E$ Tag State Parity error on"); 22897c478bd9Sstevel@tonic-gate cpu_log_ecmem_info(spf_flt); 22907c478bd9Sstevel@tonic-gate return; 22917c478bd9Sstevel@tonic-gate 22927c478bd9Sstevel@tonic-gate case CPU_ECACHE_TAG_ERR: 22937c478bd9Sstevel@tonic-gate cpu_aflt_log(CE_WARN, 1, spf_flt, PARERR_LFLAGS, NULL, 22947c478bd9Sstevel@tonic-gate " E$ Tag scrub event on"); 22957c478bd9Sstevel@tonic-gate cpu_log_ecmem_info(spf_flt); 22967c478bd9Sstevel@tonic-gate return; 22977c478bd9Sstevel@tonic-gate 22987c478bd9Sstevel@tonic-gate case CPU_ECACHE_ETP_ETS_ERR: 22997c478bd9Sstevel@tonic-gate cpu_aflt_log(CE_WARN, 1, spf_flt, PARERR_LFLAGS, NULL, 23007c478bd9Sstevel@tonic-gate " AFSR.ETP is set and AFSR.ETS is zero on"); 23017c478bd9Sstevel@tonic-gate cpu_log_ecmem_info(spf_flt); 23027c478bd9Sstevel@tonic-gate return; 23037c478bd9Sstevel@tonic-gate 23047c478bd9Sstevel@tonic-gate 23057c478bd9Sstevel@tonic-gate case CPU_ADDITIONAL_ERR: 23067c478bd9Sstevel@tonic-gate cpu_aflt_log(CE_WARN, 1, spf_flt, CMN_LFLAGS & ~CPU_SPACE, NULL, 23077c478bd9Sstevel@tonic-gate " Additional errors detected during error processing on"); 23087c478bd9Sstevel@tonic-gate return; 23097c478bd9Sstevel@tonic-gate 23107c478bd9Sstevel@tonic-gate default: 23117c478bd9Sstevel@tonic-gate cmn_err(CE_WARN, "cpu_async_log_err: fault %p has unknown " 23127c478bd9Sstevel@tonic-gate "fault type %x", (void *)spf_flt, spf_flt->flt_type); 23137c478bd9Sstevel@tonic-gate return; 23147c478bd9Sstevel@tonic-gate } 23157c478bd9Sstevel@tonic-gate 23167c478bd9Sstevel@tonic-gate /* ... fall through from the UE, EDP, or LDP cases */ 23177c478bd9Sstevel@tonic-gate 23187c478bd9Sstevel@tonic-gate if (aflt->flt_addr != AFLT_INV_ADDR && aflt->flt_in_memory) { 23197c478bd9Sstevel@tonic-gate if (!panicstr) { 2320db874c57Selowe (void) page_retire(aflt->flt_addr, PR_UE); 23217c478bd9Sstevel@tonic-gate } else { 23227c478bd9Sstevel@tonic-gate /* 23237c478bd9Sstevel@tonic-gate * Clear UEs on panic so that we don't 23247c478bd9Sstevel@tonic-gate * get haunted by them during panic or 23257c478bd9Sstevel@tonic-gate * after reboot 23267c478bd9Sstevel@tonic-gate */ 23277c478bd9Sstevel@tonic-gate clearphys(P2ALIGN(aflt->flt_addr, 64), 23287c478bd9Sstevel@tonic-gate cpunodes[CPU->cpu_id].ecache_size, 23297c478bd9Sstevel@tonic-gate cpunodes[CPU->cpu_id].ecache_linesize); 23307c478bd9Sstevel@tonic-gate 23317c478bd9Sstevel@tonic-gate (void) clear_errors(NULL, NULL); 23327c478bd9Sstevel@tonic-gate } 23337c478bd9Sstevel@tonic-gate } 23347c478bd9Sstevel@tonic-gate 23357c478bd9Sstevel@tonic-gate /* 23367c478bd9Sstevel@tonic-gate * Log final recover message 23377c478bd9Sstevel@tonic-gate */ 23387c478bd9Sstevel@tonic-gate if (!panicstr) { 23397c478bd9Sstevel@tonic-gate if (!aflt->flt_priv) { 23407c478bd9Sstevel@tonic-gate cpu_aflt_log(CE_CONT, 3, spf_flt, CPU_ERRID_FIRST, 23417c478bd9Sstevel@tonic-gate NULL, " Above Error is in User Mode" 23427c478bd9Sstevel@tonic-gate "\n and is fatal: " 23437c478bd9Sstevel@tonic-gate "will SIGKILL process and notify contract"); 23447c478bd9Sstevel@tonic-gate } else if (aflt->flt_prot == AFLT_PROT_COPY && aflt->flt_core) { 23457c478bd9Sstevel@tonic-gate cpu_aflt_log(CE_CONT, 3, spf_flt, CPU_ERRID_FIRST, 23467c478bd9Sstevel@tonic-gate NULL, " Above Error detected while dumping core;" 23477c478bd9Sstevel@tonic-gate "\n core file will be truncated"); 23487c478bd9Sstevel@tonic-gate } else if (aflt->flt_prot == AFLT_PROT_COPY) { 23497c478bd9Sstevel@tonic-gate cpu_aflt_log(CE_CONT, 3, spf_flt, CPU_ERRID_FIRST, 23507c478bd9Sstevel@tonic-gate NULL, " Above Error is due to Kernel access" 23517c478bd9Sstevel@tonic-gate "\n to User space and is fatal: " 23527c478bd9Sstevel@tonic-gate "will SIGKILL process and notify contract"); 23537c478bd9Sstevel@tonic-gate } else if (aflt->flt_prot == AFLT_PROT_EC) { 23547c478bd9Sstevel@tonic-gate cpu_aflt_log(CE_CONT, 3, spf_flt, CPU_ERRID_FIRST, NULL, 23557c478bd9Sstevel@tonic-gate " Above Error detected by protected Kernel code" 23567c478bd9Sstevel@tonic-gate "\n that will try to clear error from system"); 23577c478bd9Sstevel@tonic-gate } 23587c478bd9Sstevel@tonic-gate } 23597c478bd9Sstevel@tonic-gate } 23607c478bd9Sstevel@tonic-gate 23617c478bd9Sstevel@tonic-gate 23627c478bd9Sstevel@tonic-gate /* 23637c478bd9Sstevel@tonic-gate * Check all cpus for non-trapping UE-causing errors 23647c478bd9Sstevel@tonic-gate * In Ultra I/II, we look for copyback errors (CPs) 23657c478bd9Sstevel@tonic-gate */ 23667c478bd9Sstevel@tonic-gate void 23677c478bd9Sstevel@tonic-gate cpu_check_allcpus(struct async_flt *aflt) 23687c478bd9Sstevel@tonic-gate { 23697c478bd9Sstevel@tonic-gate spitf_async_flt cp; 23707c478bd9Sstevel@tonic-gate spitf_async_flt *spf_cpflt = &cp; 23717c478bd9Sstevel@tonic-gate struct async_flt *cpflt = (struct async_flt *)&cp; 23727c478bd9Sstevel@tonic-gate int pix; 23737c478bd9Sstevel@tonic-gate 23747c478bd9Sstevel@tonic-gate cpflt->flt_id = aflt->flt_id; 23757c478bd9Sstevel@tonic-gate cpflt->flt_addr = aflt->flt_addr; 23767c478bd9Sstevel@tonic-gate 23777c478bd9Sstevel@tonic-gate for (pix = 0; pix < NCPU; pix++) { 23787c478bd9Sstevel@tonic-gate if (CPU_XCALL_READY(pix)) { 23797c478bd9Sstevel@tonic-gate xc_one(pix, (xcfunc_t *)get_cpu_status, 23807c478bd9Sstevel@tonic-gate (uint64_t)cpflt, 0); 23817c478bd9Sstevel@tonic-gate 23827c478bd9Sstevel@tonic-gate if (cpflt->flt_stat & P_AFSR_CP) { 23837c478bd9Sstevel@tonic-gate char *space; 23847c478bd9Sstevel@tonic-gate 23857c478bd9Sstevel@tonic-gate /* See which space - this info may not exist */ 23867c478bd9Sstevel@tonic-gate if (aflt->flt_status & ECC_D_TRAP) 23877c478bd9Sstevel@tonic-gate space = "Data "; 23887c478bd9Sstevel@tonic-gate else if (aflt->flt_status & ECC_I_TRAP) 23897c478bd9Sstevel@tonic-gate space = "Instruction "; 23907c478bd9Sstevel@tonic-gate else 23917c478bd9Sstevel@tonic-gate space = ""; 23927c478bd9Sstevel@tonic-gate 23937c478bd9Sstevel@tonic-gate cpu_aflt_log(CE_WARN, 1, spf_cpflt, CP_LFLAGS, 23947c478bd9Sstevel@tonic-gate NULL, " AFAR was derived from UE report," 23957c478bd9Sstevel@tonic-gate " CP event on CPU%d (caused %saccess " 23967c478bd9Sstevel@tonic-gate "error on %s%d)", pix, space, 23977c478bd9Sstevel@tonic-gate (aflt->flt_status & ECC_IOBUS) ? 23987c478bd9Sstevel@tonic-gate "IOBUS" : "CPU", aflt->flt_bus_id); 23997c478bd9Sstevel@tonic-gate 24007c478bd9Sstevel@tonic-gate if (spf_cpflt->flt_ec_lcnt > 0) 24017c478bd9Sstevel@tonic-gate cpu_log_ecmem_info(spf_cpflt); 24027c478bd9Sstevel@tonic-gate else 24037c478bd9Sstevel@tonic-gate cpu_aflt_log(CE_WARN, 2, spf_cpflt, 24047c478bd9Sstevel@tonic-gate CPU_ERRID_FIRST, NULL, 24057c478bd9Sstevel@tonic-gate " No cache dump available"); 24067c478bd9Sstevel@tonic-gate } 24077c478bd9Sstevel@tonic-gate } 24087c478bd9Sstevel@tonic-gate } 24097c478bd9Sstevel@tonic-gate } 24107c478bd9Sstevel@tonic-gate 24117c478bd9Sstevel@tonic-gate #ifdef DEBUG 24127c478bd9Sstevel@tonic-gate int test_mp_cp = 0; 24137c478bd9Sstevel@tonic-gate #endif 24147c478bd9Sstevel@tonic-gate 24157c478bd9Sstevel@tonic-gate /* 24167c478bd9Sstevel@tonic-gate * Cross-call callback routine to tell a CPU to read its own %afsr to check 24177c478bd9Sstevel@tonic-gate * for copyback errors and capture relevant information. 24187c478bd9Sstevel@tonic-gate */ 24197c478bd9Sstevel@tonic-gate static uint_t 24207c478bd9Sstevel@tonic-gate get_cpu_status(uint64_t arg) 24217c478bd9Sstevel@tonic-gate { 24227c478bd9Sstevel@tonic-gate struct async_flt *aflt = (struct async_flt *)arg; 24237c478bd9Sstevel@tonic-gate spitf_async_flt *spf_flt = (spitf_async_flt *)arg; 24247c478bd9Sstevel@tonic-gate uint64_t afsr; 24257c478bd9Sstevel@tonic-gate uint32_t ec_idx; 24267c478bd9Sstevel@tonic-gate uint64_t sdbh, sdbl; 24277c478bd9Sstevel@tonic-gate int i; 24287c478bd9Sstevel@tonic-gate uint32_t ec_set_size; 24297c478bd9Sstevel@tonic-gate uchar_t valid; 24307c478bd9Sstevel@tonic-gate ec_data_t ec_data[8]; 24317c478bd9Sstevel@tonic-gate uint64_t ec_tag, flt_addr_tag, oafsr; 24327c478bd9Sstevel@tonic-gate uint64_t *acc_afsr = NULL; 24337c478bd9Sstevel@tonic-gate 24347c478bd9Sstevel@tonic-gate get_asyncflt(&afsr); 24357c478bd9Sstevel@tonic-gate if (CPU_PRIVATE(CPU) != NULL) { 24367c478bd9Sstevel@tonic-gate acc_afsr = CPU_PRIVATE_PTR(CPU, sfpr_scrub_afsr); 24377c478bd9Sstevel@tonic-gate afsr |= *acc_afsr; 24387c478bd9Sstevel@tonic-gate *acc_afsr = 0; 24397c478bd9Sstevel@tonic-gate } 24407c478bd9Sstevel@tonic-gate 24417c478bd9Sstevel@tonic-gate #ifdef DEBUG 24427c478bd9Sstevel@tonic-gate if (test_mp_cp) 24437c478bd9Sstevel@tonic-gate afsr |= P_AFSR_CP; 24447c478bd9Sstevel@tonic-gate #endif 24457c478bd9Sstevel@tonic-gate aflt->flt_stat = afsr; 24467c478bd9Sstevel@tonic-gate 24477c478bd9Sstevel@tonic-gate if (afsr & P_AFSR_CP) { 24487c478bd9Sstevel@tonic-gate /* 24497c478bd9Sstevel@tonic-gate * Capture the UDBs 24507c478bd9Sstevel@tonic-gate */ 24517c478bd9Sstevel@tonic-gate get_udb_errors(&sdbh, &sdbl); 24527c478bd9Sstevel@tonic-gate spf_flt->flt_sdbh = (ushort_t)(sdbh & 0x3FF); 24537c478bd9Sstevel@tonic-gate spf_flt->flt_sdbl = (ushort_t)(sdbl & 0x3FF); 24547c478bd9Sstevel@tonic-gate 24557c478bd9Sstevel@tonic-gate /* 24567c478bd9Sstevel@tonic-gate * Clear CP bit before capturing ecache data 24577c478bd9Sstevel@tonic-gate * and AFSR info. 24587c478bd9Sstevel@tonic-gate */ 24597c478bd9Sstevel@tonic-gate set_asyncflt(P_AFSR_CP); 24607c478bd9Sstevel@tonic-gate 24617c478bd9Sstevel@tonic-gate /* 24627c478bd9Sstevel@tonic-gate * See if we can capture the ecache line for the 24637c478bd9Sstevel@tonic-gate * fault PA. 24647c478bd9Sstevel@tonic-gate * 24657c478bd9Sstevel@tonic-gate * Return a valid matching ecache line, if any. 24667c478bd9Sstevel@tonic-gate * Otherwise, return the first matching ecache 24677c478bd9Sstevel@tonic-gate * line marked invalid. 24687c478bd9Sstevel@tonic-gate */ 24697c478bd9Sstevel@tonic-gate flt_addr_tag = aflt->flt_addr >> cpu_ec_tag_shift; 24707c478bd9Sstevel@tonic-gate ec_set_size = cpunodes[CPU->cpu_id].ecache_size / 24717c478bd9Sstevel@tonic-gate ecache_associativity; 24727c478bd9Sstevel@tonic-gate spf_flt->flt_ec_lcnt = 0; 24737c478bd9Sstevel@tonic-gate 24747c478bd9Sstevel@tonic-gate for (i = 0, ec_idx = (aflt->flt_addr % ec_set_size); 24757c478bd9Sstevel@tonic-gate i < ecache_associativity; i++, ec_idx += ec_set_size) { 24767c478bd9Sstevel@tonic-gate get_ecache_dtag(P2ALIGN(ec_idx, 64), 24777c478bd9Sstevel@tonic-gate (uint64_t *)&ec_data[0], &ec_tag, &oafsr, 24787c478bd9Sstevel@tonic-gate acc_afsr); 24797c478bd9Sstevel@tonic-gate 24807c478bd9Sstevel@tonic-gate if ((ec_tag & cpu_ec_tag_mask) != flt_addr_tag) 24817c478bd9Sstevel@tonic-gate continue; 24827c478bd9Sstevel@tonic-gate 24837c478bd9Sstevel@tonic-gate valid = cpu_ec_state_valid & 24847c478bd9Sstevel@tonic-gate (uchar_t)((ec_tag & cpu_ec_state_mask) >> 24857c478bd9Sstevel@tonic-gate cpu_ec_state_shift); 24867c478bd9Sstevel@tonic-gate 24877c478bd9Sstevel@tonic-gate if (valid || spf_flt->flt_ec_lcnt == 0) { 24887c478bd9Sstevel@tonic-gate spf_flt->flt_ec_tag = ec_tag; 24897c478bd9Sstevel@tonic-gate bcopy(&ec_data, &spf_flt->flt_ec_data, 24907c478bd9Sstevel@tonic-gate sizeof (ec_data)); 24917c478bd9Sstevel@tonic-gate spf_flt->flt_ec_lcnt = 1; 24927c478bd9Sstevel@tonic-gate 24937c478bd9Sstevel@tonic-gate if (valid) 24947c478bd9Sstevel@tonic-gate break; 24957c478bd9Sstevel@tonic-gate } 24967c478bd9Sstevel@tonic-gate } 24977c478bd9Sstevel@tonic-gate } 24987c478bd9Sstevel@tonic-gate return (0); 24997c478bd9Sstevel@tonic-gate } 25007c478bd9Sstevel@tonic-gate 25017c478bd9Sstevel@tonic-gate /* 25027c478bd9Sstevel@tonic-gate * CPU-module callback for the non-panicking CPUs. This routine is invoked 25037c478bd9Sstevel@tonic-gate * from panic_idle() as part of the other CPUs stopping themselves when a 25047c478bd9Sstevel@tonic-gate * panic occurs. We need to be VERY careful what we do here, since panicstr 25057c478bd9Sstevel@tonic-gate * is NOT set yet and we cannot blow through locks. If panic_aflt is set 25067c478bd9Sstevel@tonic-gate * (panic_aflt.flt_id is non-zero), we need to read our %afsr to look for 25077c478bd9Sstevel@tonic-gate * CP error information. 25087c478bd9Sstevel@tonic-gate */ 25097c478bd9Sstevel@tonic-gate void 25107c478bd9Sstevel@tonic-gate cpu_async_panic_callb(void) 25117c478bd9Sstevel@tonic-gate { 25127c478bd9Sstevel@tonic-gate spitf_async_flt cp; 25137c478bd9Sstevel@tonic-gate struct async_flt *aflt = (struct async_flt *)&cp; 25147c478bd9Sstevel@tonic-gate uint64_t *scrub_afsr; 25157c478bd9Sstevel@tonic-gate 25167c478bd9Sstevel@tonic-gate if (panic_aflt.flt_id != 0) { 25177c478bd9Sstevel@tonic-gate aflt->flt_addr = panic_aflt.flt_addr; 25187c478bd9Sstevel@tonic-gate (void) get_cpu_status((uint64_t)aflt); 25197c478bd9Sstevel@tonic-gate 25207c478bd9Sstevel@tonic-gate if (CPU_PRIVATE(CPU) != NULL) { 25217c478bd9Sstevel@tonic-gate scrub_afsr = CPU_PRIVATE_PTR(CPU, sfpr_scrub_afsr); 25227c478bd9Sstevel@tonic-gate if (*scrub_afsr & P_AFSR_CP) { 25237c478bd9Sstevel@tonic-gate aflt->flt_stat |= *scrub_afsr; 25247c478bd9Sstevel@tonic-gate *scrub_afsr = 0; 25257c478bd9Sstevel@tonic-gate } 25267c478bd9Sstevel@tonic-gate } 25277c478bd9Sstevel@tonic-gate if (aflt->flt_stat & P_AFSR_CP) { 25287c478bd9Sstevel@tonic-gate aflt->flt_id = panic_aflt.flt_id; 25297c478bd9Sstevel@tonic-gate aflt->flt_panic = 1; 25307c478bd9Sstevel@tonic-gate aflt->flt_inst = CPU->cpu_id; 25317c478bd9Sstevel@tonic-gate aflt->flt_class = CPU_FAULT; 25327c478bd9Sstevel@tonic-gate cp.flt_type = CPU_PANIC_CP_ERR; 25337c478bd9Sstevel@tonic-gate cpu_errorq_dispatch(FM_EREPORT_CPU_USII_CP, 25347c478bd9Sstevel@tonic-gate (void *)&cp, sizeof (cp), ue_queue, 25357c478bd9Sstevel@tonic-gate aflt->flt_panic); 25367c478bd9Sstevel@tonic-gate } 25377c478bd9Sstevel@tonic-gate } 25387c478bd9Sstevel@tonic-gate } 25397c478bd9Sstevel@tonic-gate 25407c478bd9Sstevel@tonic-gate /* 25417c478bd9Sstevel@tonic-gate * Turn off all cpu error detection, normally only used for panics. 25427c478bd9Sstevel@tonic-gate */ 25437c478bd9Sstevel@tonic-gate void 25447c478bd9Sstevel@tonic-gate cpu_disable_errors(void) 25457c478bd9Sstevel@tonic-gate { 25467c478bd9Sstevel@tonic-gate xt_all(set_error_enable_tl1, EER_DISABLE, EER_SET_ABSOLUTE); 25477c478bd9Sstevel@tonic-gate } 25487c478bd9Sstevel@tonic-gate 25497c478bd9Sstevel@tonic-gate /* 25507c478bd9Sstevel@tonic-gate * Enable errors. 25517c478bd9Sstevel@tonic-gate */ 25527c478bd9Sstevel@tonic-gate void 25537c478bd9Sstevel@tonic-gate cpu_enable_errors(void) 25547c478bd9Sstevel@tonic-gate { 25557c478bd9Sstevel@tonic-gate xt_all(set_error_enable_tl1, EER_ENABLE, EER_SET_ABSOLUTE); 25567c478bd9Sstevel@tonic-gate } 25577c478bd9Sstevel@tonic-gate 25587c478bd9Sstevel@tonic-gate static void 25597c478bd9Sstevel@tonic-gate cpu_read_paddr(struct async_flt *ecc, short verbose, short ce_err) 25607c478bd9Sstevel@tonic-gate { 25617c478bd9Sstevel@tonic-gate uint64_t aligned_addr = P2ALIGN(ecc->flt_addr, 8); 25627c478bd9Sstevel@tonic-gate int i, loop = 1; 25637c478bd9Sstevel@tonic-gate ushort_t ecc_0; 25647c478bd9Sstevel@tonic-gate uint64_t paddr; 25657c478bd9Sstevel@tonic-gate uint64_t data; 25667c478bd9Sstevel@tonic-gate 25677c478bd9Sstevel@tonic-gate if (verbose) 25687c478bd9Sstevel@tonic-gate loop = 8; 25697c478bd9Sstevel@tonic-gate for (i = 0; i < loop; i++) { 25707c478bd9Sstevel@tonic-gate paddr = aligned_addr + (i * 8); 25717c478bd9Sstevel@tonic-gate data = lddphys(paddr); 25727c478bd9Sstevel@tonic-gate if (verbose) { 25737c478bd9Sstevel@tonic-gate if (ce_err) { 25747c478bd9Sstevel@tonic-gate ecc_0 = ecc_gen((uint32_t)(data>>32), 25757c478bd9Sstevel@tonic-gate (uint32_t)data); 25767c478bd9Sstevel@tonic-gate cpu_aflt_log(CE_CONT, 0, NULL, NO_LFLAGS, 25777c478bd9Sstevel@tonic-gate NULL, " Paddr 0x%" PRIx64 ", " 25787c478bd9Sstevel@tonic-gate "Data 0x%08x.%08x, ECC 0x%x", paddr, 25797c478bd9Sstevel@tonic-gate (uint32_t)(data>>32), (uint32_t)data, ecc_0); 25807c478bd9Sstevel@tonic-gate } else { 25817c478bd9Sstevel@tonic-gate cpu_aflt_log(CE_CONT, 0, NULL, NO_LFLAGS, 25827c478bd9Sstevel@tonic-gate NULL, " Paddr 0x%" PRIx64 ", " 25837c478bd9Sstevel@tonic-gate "Data 0x%08x.%08x", paddr, 25847c478bd9Sstevel@tonic-gate (uint32_t)(data>>32), (uint32_t)data); 25857c478bd9Sstevel@tonic-gate } 25867c478bd9Sstevel@tonic-gate } 25877c478bd9Sstevel@tonic-gate } 25887c478bd9Sstevel@tonic-gate } 25897c478bd9Sstevel@tonic-gate 25907c478bd9Sstevel@tonic-gate static struct { /* sec-ded-s4ed ecc code */ 25917c478bd9Sstevel@tonic-gate uint_t hi, lo; 25927c478bd9Sstevel@tonic-gate } ecc_code[8] = { 25937c478bd9Sstevel@tonic-gate { 0xee55de23U, 0x16161161U }, 25947c478bd9Sstevel@tonic-gate { 0x55eede93U, 0x61612212U }, 25957c478bd9Sstevel@tonic-gate { 0xbb557b8cU, 0x49494494U }, 25967c478bd9Sstevel@tonic-gate { 0x55bb7b6cU, 0x94948848U }, 25977c478bd9Sstevel@tonic-gate { 0x16161161U, 0xee55de23U }, 25987c478bd9Sstevel@tonic-gate { 0x61612212U, 0x55eede93U }, 25997c478bd9Sstevel@tonic-gate { 0x49494494U, 0xbb557b8cU }, 26007c478bd9Sstevel@tonic-gate { 0x94948848U, 0x55bb7b6cU } 26017c478bd9Sstevel@tonic-gate }; 26027c478bd9Sstevel@tonic-gate 26037c478bd9Sstevel@tonic-gate static ushort_t 26047c478bd9Sstevel@tonic-gate ecc_gen(uint_t high_bytes, uint_t low_bytes) 26057c478bd9Sstevel@tonic-gate { 26067c478bd9Sstevel@tonic-gate int i, j; 26077c478bd9Sstevel@tonic-gate uchar_t checker, bit_mask; 26087c478bd9Sstevel@tonic-gate struct { 26097c478bd9Sstevel@tonic-gate uint_t hi, lo; 26107c478bd9Sstevel@tonic-gate } hex_data, masked_data[8]; 26117c478bd9Sstevel@tonic-gate 26127c478bd9Sstevel@tonic-gate hex_data.hi = high_bytes; 26137c478bd9Sstevel@tonic-gate hex_data.lo = low_bytes; 26147c478bd9Sstevel@tonic-gate 26157c478bd9Sstevel@tonic-gate /* mask out bits according to sec-ded-s4ed ecc code */ 26167c478bd9Sstevel@tonic-gate for (i = 0; i < 8; i++) { 26177c478bd9Sstevel@tonic-gate masked_data[i].hi = hex_data.hi & ecc_code[i].hi; 26187c478bd9Sstevel@tonic-gate masked_data[i].lo = hex_data.lo & ecc_code[i].lo; 26197c478bd9Sstevel@tonic-gate } 26207c478bd9Sstevel@tonic-gate 26217c478bd9Sstevel@tonic-gate /* 26227c478bd9Sstevel@tonic-gate * xor all bits in masked_data[i] to get bit_i of checker, 26237c478bd9Sstevel@tonic-gate * where i = 0 to 7 26247c478bd9Sstevel@tonic-gate */ 26257c478bd9Sstevel@tonic-gate checker = 0; 26267c478bd9Sstevel@tonic-gate for (i = 0; i < 8; i++) { 26277c478bd9Sstevel@tonic-gate bit_mask = 1 << i; 26287c478bd9Sstevel@tonic-gate for (j = 0; j < 32; j++) { 26297c478bd9Sstevel@tonic-gate if (masked_data[i].lo & 1) checker ^= bit_mask; 26307c478bd9Sstevel@tonic-gate if (masked_data[i].hi & 1) checker ^= bit_mask; 26317c478bd9Sstevel@tonic-gate masked_data[i].hi >>= 1; 26327c478bd9Sstevel@tonic-gate masked_data[i].lo >>= 1; 26337c478bd9Sstevel@tonic-gate } 26347c478bd9Sstevel@tonic-gate } 26357c478bd9Sstevel@tonic-gate return (checker); 26367c478bd9Sstevel@tonic-gate } 26377c478bd9Sstevel@tonic-gate 26387c478bd9Sstevel@tonic-gate /* 26397c478bd9Sstevel@tonic-gate * Flush the entire ecache using displacement flush by reading through a 26407c478bd9Sstevel@tonic-gate * physical address range as large as the ecache. 26417c478bd9Sstevel@tonic-gate */ 26427c478bd9Sstevel@tonic-gate void 26437c478bd9Sstevel@tonic-gate cpu_flush_ecache(void) 26447c478bd9Sstevel@tonic-gate { 26457c478bd9Sstevel@tonic-gate flush_ecache(ecache_flushaddr, cpunodes[CPU->cpu_id].ecache_size * 2, 26467c478bd9Sstevel@tonic-gate cpunodes[CPU->cpu_id].ecache_linesize); 26477c478bd9Sstevel@tonic-gate } 26487c478bd9Sstevel@tonic-gate 26497c478bd9Sstevel@tonic-gate /* 26507c478bd9Sstevel@tonic-gate * read and display the data in the cache line where the 26517c478bd9Sstevel@tonic-gate * original ce error occurred. 26527c478bd9Sstevel@tonic-gate * This routine is mainly used for debugging new hardware. 26537c478bd9Sstevel@tonic-gate */ 26547c478bd9Sstevel@tonic-gate void 26557c478bd9Sstevel@tonic-gate read_ecc_data(struct async_flt *ecc, short verbose, short ce_err) 26567c478bd9Sstevel@tonic-gate { 26577c478bd9Sstevel@tonic-gate kpreempt_disable(); 26587c478bd9Sstevel@tonic-gate /* disable ECC error traps */ 26597c478bd9Sstevel@tonic-gate set_error_enable(EER_ECC_DISABLE); 26607c478bd9Sstevel@tonic-gate 26617c478bd9Sstevel@tonic-gate /* 26627c478bd9Sstevel@tonic-gate * flush the ecache 26637c478bd9Sstevel@tonic-gate * read the data 26647c478bd9Sstevel@tonic-gate * check to see if an ECC error occured 26657c478bd9Sstevel@tonic-gate */ 26667c478bd9Sstevel@tonic-gate flush_ecache(ecache_flushaddr, cpunodes[CPU->cpu_id].ecache_size * 2, 26677c478bd9Sstevel@tonic-gate cpunodes[CPU->cpu_id].ecache_linesize); 26687c478bd9Sstevel@tonic-gate set_lsu(get_lsu() | cache_boot_state); 26697c478bd9Sstevel@tonic-gate cpu_read_paddr(ecc, verbose, ce_err); 26707c478bd9Sstevel@tonic-gate (void) check_ecc(ecc); 26717c478bd9Sstevel@tonic-gate 26727c478bd9Sstevel@tonic-gate /* enable ECC error traps */ 26737c478bd9Sstevel@tonic-gate set_error_enable(EER_ENABLE); 26747c478bd9Sstevel@tonic-gate kpreempt_enable(); 26757c478bd9Sstevel@tonic-gate } 26767c478bd9Sstevel@tonic-gate 26777c478bd9Sstevel@tonic-gate /* 26787c478bd9Sstevel@tonic-gate * Check the AFSR bits for UE/CE persistence. 26797c478bd9Sstevel@tonic-gate * If UE or CE errors are detected, the routine will 26807c478bd9Sstevel@tonic-gate * clears all the AFSR sticky bits (except CP for 26817c478bd9Sstevel@tonic-gate * spitfire/blackbird) and the UDBs. 26827c478bd9Sstevel@tonic-gate * if ce_debug or ue_debug is set, log any ue/ce errors detected. 26837c478bd9Sstevel@tonic-gate */ 26847c478bd9Sstevel@tonic-gate static int 26857c478bd9Sstevel@tonic-gate check_ecc(struct async_flt *ecc) 26867c478bd9Sstevel@tonic-gate { 26877c478bd9Sstevel@tonic-gate uint64_t t_afsr; 26887c478bd9Sstevel@tonic-gate uint64_t t_afar; 26897c478bd9Sstevel@tonic-gate uint64_t udbh; 26907c478bd9Sstevel@tonic-gate uint64_t udbl; 26917c478bd9Sstevel@tonic-gate ushort_t udb; 26927c478bd9Sstevel@tonic-gate int persistent = 0; 26937c478bd9Sstevel@tonic-gate 26947c478bd9Sstevel@tonic-gate /* 26957c478bd9Sstevel@tonic-gate * Capture the AFSR, AFAR and UDBs info 26967c478bd9Sstevel@tonic-gate */ 26977c478bd9Sstevel@tonic-gate get_asyncflt(&t_afsr); 26987c478bd9Sstevel@tonic-gate get_asyncaddr(&t_afar); 26997c478bd9Sstevel@tonic-gate t_afar &= SABRE_AFAR_PA; 27007c478bd9Sstevel@tonic-gate get_udb_errors(&udbh, &udbl); 27017c478bd9Sstevel@tonic-gate 27027c478bd9Sstevel@tonic-gate if ((t_afsr & P_AFSR_UE) || (t_afsr & P_AFSR_CE)) { 27037c478bd9Sstevel@tonic-gate /* 27047c478bd9Sstevel@tonic-gate * Clear the errors 27057c478bd9Sstevel@tonic-gate */ 27067c478bd9Sstevel@tonic-gate clr_datapath(); 27077c478bd9Sstevel@tonic-gate 27087c478bd9Sstevel@tonic-gate if (isus2i || isus2e) 27097c478bd9Sstevel@tonic-gate set_asyncflt(t_afsr); 27107c478bd9Sstevel@tonic-gate else 27117c478bd9Sstevel@tonic-gate set_asyncflt(t_afsr & ~P_AFSR_CP); 27127c478bd9Sstevel@tonic-gate 27137c478bd9Sstevel@tonic-gate /* 27147c478bd9Sstevel@tonic-gate * determine whether to check UDBH or UDBL for persistence 27157c478bd9Sstevel@tonic-gate */ 27167c478bd9Sstevel@tonic-gate if (ecc->flt_synd & UDBL_REG) { 27177c478bd9Sstevel@tonic-gate udb = (ushort_t)udbl; 27187c478bd9Sstevel@tonic-gate t_afar |= 0x8; 27197c478bd9Sstevel@tonic-gate } else { 27207c478bd9Sstevel@tonic-gate udb = (ushort_t)udbh; 27217c478bd9Sstevel@tonic-gate } 27227c478bd9Sstevel@tonic-gate 27237c478bd9Sstevel@tonic-gate if (ce_debug || ue_debug) { 27247c478bd9Sstevel@tonic-gate spitf_async_flt spf_flt; /* for logging */ 27257c478bd9Sstevel@tonic-gate struct async_flt *aflt = 27267c478bd9Sstevel@tonic-gate (struct async_flt *)&spf_flt; 27277c478bd9Sstevel@tonic-gate 27287c478bd9Sstevel@tonic-gate /* Package the info nicely in the spf_flt struct */ 27297c478bd9Sstevel@tonic-gate bzero(&spf_flt, sizeof (spitf_async_flt)); 27307c478bd9Sstevel@tonic-gate aflt->flt_stat = t_afsr; 27317c478bd9Sstevel@tonic-gate aflt->flt_addr = t_afar; 27327c478bd9Sstevel@tonic-gate spf_flt.flt_sdbh = (ushort_t)(udbh & 0x3FF); 27337c478bd9Sstevel@tonic-gate spf_flt.flt_sdbl = (ushort_t)(udbl & 0x3FF); 27347c478bd9Sstevel@tonic-gate 27357c478bd9Sstevel@tonic-gate cpu_aflt_log(CE_CONT, 0, &spf_flt, (CPU_AFSR | 27367c478bd9Sstevel@tonic-gate CPU_AFAR | CPU_UDBH | CPU_UDBL), NULL, 27377c478bd9Sstevel@tonic-gate " check_ecc: Dumping captured error states ..."); 27387c478bd9Sstevel@tonic-gate } 27397c478bd9Sstevel@tonic-gate 27407c478bd9Sstevel@tonic-gate /* 27417c478bd9Sstevel@tonic-gate * if the fault addresses don't match, not persistent 27427c478bd9Sstevel@tonic-gate */ 27437c478bd9Sstevel@tonic-gate if (t_afar != ecc->flt_addr) { 27447c478bd9Sstevel@tonic-gate return (persistent); 27457c478bd9Sstevel@tonic-gate } 27467c478bd9Sstevel@tonic-gate 27477c478bd9Sstevel@tonic-gate /* 27487c478bd9Sstevel@tonic-gate * check for UE persistence 27497c478bd9Sstevel@tonic-gate * since all DIMMs in the bank are identified for a UE, 27507c478bd9Sstevel@tonic-gate * there's no reason to check the syndrome 27517c478bd9Sstevel@tonic-gate */ 27527c478bd9Sstevel@tonic-gate if ((ecc->flt_stat & P_AFSR_UE) && (t_afsr & P_AFSR_UE)) { 27537c478bd9Sstevel@tonic-gate persistent = 1; 27547c478bd9Sstevel@tonic-gate } 27557c478bd9Sstevel@tonic-gate 27567c478bd9Sstevel@tonic-gate /* 27577c478bd9Sstevel@tonic-gate * check for CE persistence 27587c478bd9Sstevel@tonic-gate */ 27597c478bd9Sstevel@tonic-gate if ((ecc->flt_stat & P_AFSR_CE) && (t_afsr & P_AFSR_CE)) { 27607c478bd9Sstevel@tonic-gate if ((udb & P_DER_E_SYND) == 27617c478bd9Sstevel@tonic-gate (ecc->flt_synd & P_DER_E_SYND)) { 27627c478bd9Sstevel@tonic-gate persistent = 1; 27637c478bd9Sstevel@tonic-gate } 27647c478bd9Sstevel@tonic-gate } 27657c478bd9Sstevel@tonic-gate } 27667c478bd9Sstevel@tonic-gate return (persistent); 27677c478bd9Sstevel@tonic-gate } 27687c478bd9Sstevel@tonic-gate 27697c478bd9Sstevel@tonic-gate #ifdef HUMMINGBIRD 27707c478bd9Sstevel@tonic-gate #define HB_FULL_DIV 1 27717c478bd9Sstevel@tonic-gate #define HB_HALF_DIV 2 27727c478bd9Sstevel@tonic-gate #define HB_LOWEST_DIV 8 27737c478bd9Sstevel@tonic-gate #define HB_ECLK_INVALID 0xdeadbad 27747c478bd9Sstevel@tonic-gate static uint64_t hb_eclk[HB_LOWEST_DIV + 1] = { 27757c478bd9Sstevel@tonic-gate HB_ECLK_INVALID, HB_ECLK_1, HB_ECLK_2, HB_ECLK_INVALID, 27767c478bd9Sstevel@tonic-gate HB_ECLK_4, HB_ECLK_INVALID, HB_ECLK_6, HB_ECLK_INVALID, 27777c478bd9Sstevel@tonic-gate HB_ECLK_8 }; 27787c478bd9Sstevel@tonic-gate 27797c478bd9Sstevel@tonic-gate #define HB_SLOW_DOWN 0 27807c478bd9Sstevel@tonic-gate #define HB_SPEED_UP 1 27817c478bd9Sstevel@tonic-gate 27827c478bd9Sstevel@tonic-gate #define SET_ESTAR_MODE(mode) \ 27837c478bd9Sstevel@tonic-gate stdphysio(HB_ESTAR_MODE, (mode)); \ 27847c478bd9Sstevel@tonic-gate /* \ 27857c478bd9Sstevel@tonic-gate * PLL logic requires minimum of 16 clock \ 27867c478bd9Sstevel@tonic-gate * cycles to lock to the new clock speed. \ 27877c478bd9Sstevel@tonic-gate * Wait 1 usec to satisfy this requirement. \ 27887c478bd9Sstevel@tonic-gate */ \ 27897c478bd9Sstevel@tonic-gate drv_usecwait(1); 27907c478bd9Sstevel@tonic-gate 27917c478bd9Sstevel@tonic-gate #define CHANGE_REFRESH_COUNT(direction, cur_div, new_div) \ 27927c478bd9Sstevel@tonic-gate { \ 27937c478bd9Sstevel@tonic-gate volatile uint64_t data; \ 27947c478bd9Sstevel@tonic-gate uint64_t count, new_count; \ 27957c478bd9Sstevel@tonic-gate clock_t delay; \ 27967c478bd9Sstevel@tonic-gate data = lddphysio(HB_MEM_CNTRL0); \ 27977c478bd9Sstevel@tonic-gate count = (data & HB_REFRESH_COUNT_MASK) >> \ 27987c478bd9Sstevel@tonic-gate HB_REFRESH_COUNT_SHIFT; \ 27997c478bd9Sstevel@tonic-gate new_count = (HB_REFRESH_INTERVAL * \ 28007c478bd9Sstevel@tonic-gate cpunodes[CPU->cpu_id].clock_freq) / \ 28017c478bd9Sstevel@tonic-gate (HB_REFRESH_CLOCKS_PER_COUNT * (new_div) * NANOSEC);\ 28027c478bd9Sstevel@tonic-gate data = (data & ~HB_REFRESH_COUNT_MASK) | \ 28037c478bd9Sstevel@tonic-gate (new_count << HB_REFRESH_COUNT_SHIFT); \ 28047c478bd9Sstevel@tonic-gate stdphysio(HB_MEM_CNTRL0, data); \ 28057c478bd9Sstevel@tonic-gate data = lddphysio(HB_MEM_CNTRL0); \ 28067c478bd9Sstevel@tonic-gate /* \ 28077c478bd9Sstevel@tonic-gate * If we are slowing down the cpu and Memory \ 28087c478bd9Sstevel@tonic-gate * Self Refresh is not enabled, it is required \ 28097c478bd9Sstevel@tonic-gate * to wait for old refresh count to count-down and \ 28107c478bd9Sstevel@tonic-gate * new refresh count to go into effect (let new value \ 28117c478bd9Sstevel@tonic-gate * counts down once). \ 28127c478bd9Sstevel@tonic-gate */ \ 28137c478bd9Sstevel@tonic-gate if ((direction) == HB_SLOW_DOWN && \ 28147c478bd9Sstevel@tonic-gate (data & HB_SELF_REFRESH_MASK) == 0) { \ 28157c478bd9Sstevel@tonic-gate /* \ 28167c478bd9Sstevel@tonic-gate * Each count takes 64 cpu clock cycles \ 28177c478bd9Sstevel@tonic-gate * to decrement. Wait for current refresh \ 28187c478bd9Sstevel@tonic-gate * count plus new refresh count at current \ 28197c478bd9Sstevel@tonic-gate * cpu speed to count down to zero. Round \ 28207c478bd9Sstevel@tonic-gate * up the delay time. \ 28217c478bd9Sstevel@tonic-gate */ \ 28227c478bd9Sstevel@tonic-gate delay = ((HB_REFRESH_CLOCKS_PER_COUNT * \ 28237c478bd9Sstevel@tonic-gate (count + new_count) * MICROSEC * (cur_div)) /\ 28247c478bd9Sstevel@tonic-gate cpunodes[CPU->cpu_id].clock_freq) + 1; \ 28257c478bd9Sstevel@tonic-gate drv_usecwait(delay); \ 28267c478bd9Sstevel@tonic-gate } \ 28277c478bd9Sstevel@tonic-gate } 28287c478bd9Sstevel@tonic-gate 28297c478bd9Sstevel@tonic-gate #define SET_SELF_REFRESH(bit) \ 28307c478bd9Sstevel@tonic-gate { \ 28317c478bd9Sstevel@tonic-gate volatile uint64_t data; \ 28327c478bd9Sstevel@tonic-gate data = lddphysio(HB_MEM_CNTRL0); \ 28337c478bd9Sstevel@tonic-gate data = (data & ~HB_SELF_REFRESH_MASK) | \ 28347c478bd9Sstevel@tonic-gate ((bit) << HB_SELF_REFRESH_SHIFT); \ 28357c478bd9Sstevel@tonic-gate stdphysio(HB_MEM_CNTRL0, data); \ 28367c478bd9Sstevel@tonic-gate data = lddphysio(HB_MEM_CNTRL0); \ 28377c478bd9Sstevel@tonic-gate } 28387c478bd9Sstevel@tonic-gate #endif /* HUMMINGBIRD */ 28397c478bd9Sstevel@tonic-gate 28407c478bd9Sstevel@tonic-gate /* ARGSUSED */ 28417c478bd9Sstevel@tonic-gate void 28427c478bd9Sstevel@tonic-gate cpu_change_speed(uint64_t new_divisor, uint64_t arg2) 28437c478bd9Sstevel@tonic-gate { 28447c478bd9Sstevel@tonic-gate #ifdef HUMMINGBIRD 28457c478bd9Sstevel@tonic-gate uint64_t cur_mask, cur_divisor = 0; 28467c478bd9Sstevel@tonic-gate volatile uint64_t reg; 28477c478bd9Sstevel@tonic-gate int index; 28487c478bd9Sstevel@tonic-gate 28497c478bd9Sstevel@tonic-gate if ((new_divisor < HB_FULL_DIV || new_divisor > HB_LOWEST_DIV) || 28507c478bd9Sstevel@tonic-gate (hb_eclk[new_divisor] == HB_ECLK_INVALID)) { 28517c478bd9Sstevel@tonic-gate cmn_err(CE_WARN, "cpu_change_speed: bad divisor 0x%lx", 28527c478bd9Sstevel@tonic-gate new_divisor); 28537c478bd9Sstevel@tonic-gate return; 28547c478bd9Sstevel@tonic-gate } 28557c478bd9Sstevel@tonic-gate 28567c478bd9Sstevel@tonic-gate reg = lddphysio(HB_ESTAR_MODE); 28577c478bd9Sstevel@tonic-gate cur_mask = reg & HB_ECLK_MASK; 28587c478bd9Sstevel@tonic-gate for (index = HB_FULL_DIV; index <= HB_LOWEST_DIV; index++) { 28597c478bd9Sstevel@tonic-gate if (hb_eclk[index] == cur_mask) { 28607c478bd9Sstevel@tonic-gate cur_divisor = index; 28617c478bd9Sstevel@tonic-gate break; 28627c478bd9Sstevel@tonic-gate } 28637c478bd9Sstevel@tonic-gate } 28647c478bd9Sstevel@tonic-gate 28657c478bd9Sstevel@tonic-gate if (cur_divisor == 0) 28667c478bd9Sstevel@tonic-gate cmn_err(CE_PANIC, "cpu_change_speed: current divisor " 28677c478bd9Sstevel@tonic-gate "can't be determined!"); 28687c478bd9Sstevel@tonic-gate 28697c478bd9Sstevel@tonic-gate /* 28707c478bd9Sstevel@tonic-gate * If we are already at the requested divisor speed, just 28717c478bd9Sstevel@tonic-gate * return. 28727c478bd9Sstevel@tonic-gate */ 28737c478bd9Sstevel@tonic-gate if (cur_divisor == new_divisor) 28747c478bd9Sstevel@tonic-gate return; 28757c478bd9Sstevel@tonic-gate 28767c478bd9Sstevel@tonic-gate if (cur_divisor == HB_FULL_DIV && new_divisor == HB_HALF_DIV) { 28777c478bd9Sstevel@tonic-gate CHANGE_REFRESH_COUNT(HB_SLOW_DOWN, cur_divisor, new_divisor); 28787c478bd9Sstevel@tonic-gate SET_ESTAR_MODE(hb_eclk[new_divisor]); 28797c478bd9Sstevel@tonic-gate SET_SELF_REFRESH(HB_SELF_REFRESH_ENABLE); 28807c478bd9Sstevel@tonic-gate 28817c478bd9Sstevel@tonic-gate } else if (cur_divisor == HB_HALF_DIV && new_divisor == HB_FULL_DIV) { 28827c478bd9Sstevel@tonic-gate SET_SELF_REFRESH(HB_SELF_REFRESH_DISABLE); 28837c478bd9Sstevel@tonic-gate SET_ESTAR_MODE(hb_eclk[new_divisor]); 28847c478bd9Sstevel@tonic-gate /* LINTED: E_FALSE_LOGICAL_EXPR */ 28857c478bd9Sstevel@tonic-gate CHANGE_REFRESH_COUNT(HB_SPEED_UP, cur_divisor, new_divisor); 28867c478bd9Sstevel@tonic-gate 28877c478bd9Sstevel@tonic-gate } else if (cur_divisor == HB_FULL_DIV && new_divisor > HB_HALF_DIV) { 28887c478bd9Sstevel@tonic-gate /* 28897c478bd9Sstevel@tonic-gate * Transition to 1/2 speed first, then to 28907c478bd9Sstevel@tonic-gate * lower speed. 28917c478bd9Sstevel@tonic-gate */ 28927c478bd9Sstevel@tonic-gate CHANGE_REFRESH_COUNT(HB_SLOW_DOWN, cur_divisor, HB_HALF_DIV); 28937c478bd9Sstevel@tonic-gate SET_ESTAR_MODE(hb_eclk[HB_HALF_DIV]); 28947c478bd9Sstevel@tonic-gate SET_SELF_REFRESH(HB_SELF_REFRESH_ENABLE); 28957c478bd9Sstevel@tonic-gate 28967c478bd9Sstevel@tonic-gate CHANGE_REFRESH_COUNT(HB_SLOW_DOWN, HB_HALF_DIV, new_divisor); 28977c478bd9Sstevel@tonic-gate SET_ESTAR_MODE(hb_eclk[new_divisor]); 28987c478bd9Sstevel@tonic-gate 28997c478bd9Sstevel@tonic-gate } else if (cur_divisor > HB_HALF_DIV && new_divisor == HB_FULL_DIV) { 29007c478bd9Sstevel@tonic-gate /* 29017c478bd9Sstevel@tonic-gate * Transition to 1/2 speed first, then to 29027c478bd9Sstevel@tonic-gate * full speed. 29037c478bd9Sstevel@tonic-gate */ 29047c478bd9Sstevel@tonic-gate SET_ESTAR_MODE(hb_eclk[HB_HALF_DIV]); 29057c478bd9Sstevel@tonic-gate /* LINTED: E_FALSE_LOGICAL_EXPR */ 29067c478bd9Sstevel@tonic-gate CHANGE_REFRESH_COUNT(HB_SPEED_UP, cur_divisor, HB_HALF_DIV); 29077c478bd9Sstevel@tonic-gate 29087c478bd9Sstevel@tonic-gate SET_SELF_REFRESH(HB_SELF_REFRESH_DISABLE); 29097c478bd9Sstevel@tonic-gate SET_ESTAR_MODE(hb_eclk[new_divisor]); 29107c478bd9Sstevel@tonic-gate /* LINTED: E_FALSE_LOGICAL_EXPR */ 29117c478bd9Sstevel@tonic-gate CHANGE_REFRESH_COUNT(HB_SPEED_UP, HB_HALF_DIV, new_divisor); 29127c478bd9Sstevel@tonic-gate 29137c478bd9Sstevel@tonic-gate } else if (cur_divisor < new_divisor) { 29147c478bd9Sstevel@tonic-gate CHANGE_REFRESH_COUNT(HB_SLOW_DOWN, cur_divisor, new_divisor); 29157c478bd9Sstevel@tonic-gate SET_ESTAR_MODE(hb_eclk[new_divisor]); 29167c478bd9Sstevel@tonic-gate 29177c478bd9Sstevel@tonic-gate } else if (cur_divisor > new_divisor) { 29187c478bd9Sstevel@tonic-gate SET_ESTAR_MODE(hb_eclk[new_divisor]); 29197c478bd9Sstevel@tonic-gate /* LINTED: E_FALSE_LOGICAL_EXPR */ 29207c478bd9Sstevel@tonic-gate CHANGE_REFRESH_COUNT(HB_SPEED_UP, cur_divisor, new_divisor); 29217c478bd9Sstevel@tonic-gate } 29227c478bd9Sstevel@tonic-gate CPU->cpu_m.divisor = (uchar_t)new_divisor; 29237c478bd9Sstevel@tonic-gate #endif 29247c478bd9Sstevel@tonic-gate } 29257c478bd9Sstevel@tonic-gate 29267c478bd9Sstevel@tonic-gate /* 29277c478bd9Sstevel@tonic-gate * Clear the AFSR sticky bits and the UDBs. For Sabre/Spitfire/Blackbird, 29287c478bd9Sstevel@tonic-gate * we clear all the sticky bits. If a non-null pointer to a async fault 29297c478bd9Sstevel@tonic-gate * structure argument is passed in, the captured error state (AFSR, AFAR, UDBs) 29307c478bd9Sstevel@tonic-gate * info will be returned in the structure. If a non-null pointer to a 29317c478bd9Sstevel@tonic-gate * uint64_t is passed in, this will be updated if the CP bit is set in the 29327c478bd9Sstevel@tonic-gate * AFSR. The afsr will be returned. 29337c478bd9Sstevel@tonic-gate */ 29347c478bd9Sstevel@tonic-gate static uint64_t 29357c478bd9Sstevel@tonic-gate clear_errors(spitf_async_flt *spf_flt, uint64_t *acc_afsr) 29367c478bd9Sstevel@tonic-gate { 29377c478bd9Sstevel@tonic-gate struct async_flt *aflt = (struct async_flt *)spf_flt; 29387c478bd9Sstevel@tonic-gate uint64_t afsr; 29397c478bd9Sstevel@tonic-gate uint64_t udbh, udbl; 29407c478bd9Sstevel@tonic-gate 29417c478bd9Sstevel@tonic-gate get_asyncflt(&afsr); 29427c478bd9Sstevel@tonic-gate 29437c478bd9Sstevel@tonic-gate if ((acc_afsr != NULL) && (afsr & P_AFSR_CP)) 29447c478bd9Sstevel@tonic-gate *acc_afsr |= afsr; 29457c478bd9Sstevel@tonic-gate 29467c478bd9Sstevel@tonic-gate if (spf_flt != NULL) { 29477c478bd9Sstevel@tonic-gate aflt->flt_stat = afsr; 29487c478bd9Sstevel@tonic-gate get_asyncaddr(&aflt->flt_addr); 29497c478bd9Sstevel@tonic-gate aflt->flt_addr &= SABRE_AFAR_PA; 29507c478bd9Sstevel@tonic-gate 29517c478bd9Sstevel@tonic-gate get_udb_errors(&udbh, &udbl); 29527c478bd9Sstevel@tonic-gate spf_flt->flt_sdbh = (ushort_t)(udbh & 0x3FF); 29537c478bd9Sstevel@tonic-gate spf_flt->flt_sdbl = (ushort_t)(udbl & 0x3FF); 29547c478bd9Sstevel@tonic-gate } 29557c478bd9Sstevel@tonic-gate 29567c478bd9Sstevel@tonic-gate set_asyncflt(afsr); /* clear afsr */ 29577c478bd9Sstevel@tonic-gate clr_datapath(); /* clear udbs */ 29587c478bd9Sstevel@tonic-gate return (afsr); 29597c478bd9Sstevel@tonic-gate } 29607c478bd9Sstevel@tonic-gate 29617c478bd9Sstevel@tonic-gate /* 29627c478bd9Sstevel@tonic-gate * Scan the ecache to look for bad lines. If found, the afsr, afar, e$ data 29637c478bd9Sstevel@tonic-gate * tag of the first bad line will be returned. We also return the old-afsr 29647c478bd9Sstevel@tonic-gate * (before clearing the sticky bits). The linecnt data will be updated to 29657c478bd9Sstevel@tonic-gate * indicate the number of bad lines detected. 29667c478bd9Sstevel@tonic-gate */ 29677c478bd9Sstevel@tonic-gate static void 29687c478bd9Sstevel@tonic-gate scan_ecache(uint64_t *t_afar, ec_data_t *ecache_data, 29697c478bd9Sstevel@tonic-gate uint64_t *ecache_tag, int *linecnt, uint64_t *t_afsr) 29707c478bd9Sstevel@tonic-gate { 29717c478bd9Sstevel@tonic-gate ec_data_t t_ecdata[8]; 29727c478bd9Sstevel@tonic-gate uint64_t t_etag, oafsr; 29737c478bd9Sstevel@tonic-gate uint64_t pa = AFLT_INV_ADDR; 29747c478bd9Sstevel@tonic-gate uint32_t i, j, ecache_sz; 29757c478bd9Sstevel@tonic-gate uint64_t acc_afsr = 0; 29767c478bd9Sstevel@tonic-gate uint64_t *cpu_afsr = NULL; 29777c478bd9Sstevel@tonic-gate 29787c478bd9Sstevel@tonic-gate if (CPU_PRIVATE(CPU) != NULL) 29797c478bd9Sstevel@tonic-gate cpu_afsr = CPU_PRIVATE_PTR(CPU, sfpr_scrub_afsr); 29807c478bd9Sstevel@tonic-gate 29817c478bd9Sstevel@tonic-gate *linecnt = 0; 29827c478bd9Sstevel@tonic-gate ecache_sz = cpunodes[CPU->cpu_id].ecache_size; 29837c478bd9Sstevel@tonic-gate 29847c478bd9Sstevel@tonic-gate for (i = 0; i < ecache_sz; i += 64) { 29857c478bd9Sstevel@tonic-gate get_ecache_dtag(i, (uint64_t *)&t_ecdata[0], &t_etag, &oafsr, 29867c478bd9Sstevel@tonic-gate cpu_afsr); 29877c478bd9Sstevel@tonic-gate acc_afsr |= oafsr; 29887c478bd9Sstevel@tonic-gate 29897c478bd9Sstevel@tonic-gate /* 29907c478bd9Sstevel@tonic-gate * Scan through the whole 64 bytes line in 8 8-byte chunks 29917c478bd9Sstevel@tonic-gate * looking for the first occurrence of an EDP error. The AFSR 29927c478bd9Sstevel@tonic-gate * info is captured for each 8-byte chunk. Note that for 29937c478bd9Sstevel@tonic-gate * Spitfire/Blackbird, the AFSR.PSYND is captured by h/w in 29947c478bd9Sstevel@tonic-gate * 16-byte chunk granularity (i.e. the AFSR will be the same 29957c478bd9Sstevel@tonic-gate * for the high and low 8-byte words within the 16-byte chunk). 29967c478bd9Sstevel@tonic-gate * For Sabre/Hummingbird, the AFSR.PSYND is captured in 8-byte 29977c478bd9Sstevel@tonic-gate * granularity and only PSYND bits [7:0] are used. 29987c478bd9Sstevel@tonic-gate */ 29997c478bd9Sstevel@tonic-gate for (j = 0; j < 8; j++) { 30007c478bd9Sstevel@tonic-gate ec_data_t *ecdptr = &t_ecdata[j]; 30017c478bd9Sstevel@tonic-gate 30027c478bd9Sstevel@tonic-gate if (ecdptr->ec_afsr & P_AFSR_EDP) { 30037c478bd9Sstevel@tonic-gate uint64_t errpa; 30047c478bd9Sstevel@tonic-gate ushort_t psynd; 30057c478bd9Sstevel@tonic-gate uint32_t ec_set_size = ecache_sz / 30067c478bd9Sstevel@tonic-gate ecache_associativity; 30077c478bd9Sstevel@tonic-gate 30087c478bd9Sstevel@tonic-gate /* 30097c478bd9Sstevel@tonic-gate * For Spitfire/Blackbird, we need to look at 30107c478bd9Sstevel@tonic-gate * the PSYND to make sure that this 8-byte chunk 30117c478bd9Sstevel@tonic-gate * is the right one. PSYND bits [15:8] belong 30127c478bd9Sstevel@tonic-gate * to the upper 8-byte (even) chunk. Bits 30137c478bd9Sstevel@tonic-gate * [7:0] belong to the lower 8-byte chunk (odd). 30147c478bd9Sstevel@tonic-gate */ 30157c478bd9Sstevel@tonic-gate psynd = ecdptr->ec_afsr & P_AFSR_P_SYND; 30167c478bd9Sstevel@tonic-gate if (!isus2i && !isus2e) { 30177c478bd9Sstevel@tonic-gate if (j & 0x1) 30187c478bd9Sstevel@tonic-gate psynd = psynd & 0xFF; 30197c478bd9Sstevel@tonic-gate else 30207c478bd9Sstevel@tonic-gate psynd = psynd >> 8; 30217c478bd9Sstevel@tonic-gate 30227c478bd9Sstevel@tonic-gate if (!psynd) 30237c478bd9Sstevel@tonic-gate continue; /* wrong chunk */ 30247c478bd9Sstevel@tonic-gate } 30257c478bd9Sstevel@tonic-gate 30267c478bd9Sstevel@tonic-gate /* Construct the PA */ 30277c478bd9Sstevel@tonic-gate errpa = ((t_etag & cpu_ec_tag_mask) << 30287c478bd9Sstevel@tonic-gate cpu_ec_tag_shift) | ((i | (j << 3)) % 30297c478bd9Sstevel@tonic-gate ec_set_size); 30307c478bd9Sstevel@tonic-gate 30317c478bd9Sstevel@tonic-gate /* clean up the cache line */ 30327c478bd9Sstevel@tonic-gate flushecacheline(P2ALIGN(errpa, 64), 30337c478bd9Sstevel@tonic-gate cpunodes[CPU->cpu_id].ecache_size); 30347c478bd9Sstevel@tonic-gate 30357c478bd9Sstevel@tonic-gate oafsr = clear_errors(NULL, cpu_afsr); 30367c478bd9Sstevel@tonic-gate acc_afsr |= oafsr; 30377c478bd9Sstevel@tonic-gate 30387c478bd9Sstevel@tonic-gate (*linecnt)++; 30397c478bd9Sstevel@tonic-gate 30407c478bd9Sstevel@tonic-gate /* 30417c478bd9Sstevel@tonic-gate * Capture the PA for the first bad line found. 30427c478bd9Sstevel@tonic-gate * Return the ecache dump and tag info. 30437c478bd9Sstevel@tonic-gate */ 30447c478bd9Sstevel@tonic-gate if (pa == AFLT_INV_ADDR) { 30457c478bd9Sstevel@tonic-gate int k; 30467c478bd9Sstevel@tonic-gate 30477c478bd9Sstevel@tonic-gate pa = errpa; 30487c478bd9Sstevel@tonic-gate for (k = 0; k < 8; k++) 30497c478bd9Sstevel@tonic-gate ecache_data[k] = t_ecdata[k]; 30507c478bd9Sstevel@tonic-gate *ecache_tag = t_etag; 30517c478bd9Sstevel@tonic-gate } 30527c478bd9Sstevel@tonic-gate break; 30537c478bd9Sstevel@tonic-gate } 30547c478bd9Sstevel@tonic-gate } 30557c478bd9Sstevel@tonic-gate } 30567c478bd9Sstevel@tonic-gate *t_afar = pa; 30577c478bd9Sstevel@tonic-gate *t_afsr = acc_afsr; 30587c478bd9Sstevel@tonic-gate } 30597c478bd9Sstevel@tonic-gate 30607c478bd9Sstevel@tonic-gate static void 30617c478bd9Sstevel@tonic-gate cpu_log_ecmem_info(spitf_async_flt *spf_flt) 30627c478bd9Sstevel@tonic-gate { 30637c478bd9Sstevel@tonic-gate struct async_flt *aflt = (struct async_flt *)spf_flt; 30647c478bd9Sstevel@tonic-gate uint64_t ecache_tag = spf_flt->flt_ec_tag; 30657c478bd9Sstevel@tonic-gate char linestr[30]; 30667c478bd9Sstevel@tonic-gate char *state_str; 30677c478bd9Sstevel@tonic-gate int i; 30687c478bd9Sstevel@tonic-gate 30697c478bd9Sstevel@tonic-gate /* 30707c478bd9Sstevel@tonic-gate * Check the ecache tag to make sure it 30717c478bd9Sstevel@tonic-gate * is valid. If invalid, a memory dump was 30727c478bd9Sstevel@tonic-gate * captured instead of a ecache dump. 30737c478bd9Sstevel@tonic-gate */ 30747c478bd9Sstevel@tonic-gate if (spf_flt->flt_ec_tag != AFLT_INV_ADDR) { 30757c478bd9Sstevel@tonic-gate uchar_t eparity = (uchar_t) 30767c478bd9Sstevel@tonic-gate ((ecache_tag & cpu_ec_par_mask) >> cpu_ec_par_shift); 30777c478bd9Sstevel@tonic-gate 30787c478bd9Sstevel@tonic-gate uchar_t estate = (uchar_t) 30797c478bd9Sstevel@tonic-gate ((ecache_tag & cpu_ec_state_mask) >> cpu_ec_state_shift); 30807c478bd9Sstevel@tonic-gate 30817c478bd9Sstevel@tonic-gate if (estate == cpu_ec_state_shr) 30827c478bd9Sstevel@tonic-gate state_str = "Shared"; 30837c478bd9Sstevel@tonic-gate else if (estate == cpu_ec_state_exl) 30847c478bd9Sstevel@tonic-gate state_str = "Exclusive"; 30857c478bd9Sstevel@tonic-gate else if (estate == cpu_ec_state_own) 30867c478bd9Sstevel@tonic-gate state_str = "Owner"; 30877c478bd9Sstevel@tonic-gate else if (estate == cpu_ec_state_mod) 30887c478bd9Sstevel@tonic-gate state_str = "Modified"; 30897c478bd9Sstevel@tonic-gate else 30907c478bd9Sstevel@tonic-gate state_str = "Invalid"; 30917c478bd9Sstevel@tonic-gate 30927c478bd9Sstevel@tonic-gate if (spf_flt->flt_ec_lcnt > 1) { 30937c478bd9Sstevel@tonic-gate (void) snprintf(linestr, sizeof (linestr), 30947c478bd9Sstevel@tonic-gate "Badlines found=%d", spf_flt->flt_ec_lcnt); 30957c478bd9Sstevel@tonic-gate } else { 30967c478bd9Sstevel@tonic-gate linestr[0] = '\0'; 30977c478bd9Sstevel@tonic-gate } 30987c478bd9Sstevel@tonic-gate 30997c478bd9Sstevel@tonic-gate cpu_aflt_log(CE_CONT, 2, spf_flt, CPU_ERRID_FIRST, NULL, 31007c478bd9Sstevel@tonic-gate " PA=0x%08x.%08x\n E$tag 0x%08x.%08x E$State: %s " 31017c478bd9Sstevel@tonic-gate "E$parity 0x%02x %s", (uint32_t)(aflt->flt_addr >> 32), 31027c478bd9Sstevel@tonic-gate (uint32_t)aflt->flt_addr, (uint32_t)(ecache_tag >> 32), 31037c478bd9Sstevel@tonic-gate (uint32_t)ecache_tag, state_str, 31047c478bd9Sstevel@tonic-gate (uint32_t)eparity, linestr); 31057c478bd9Sstevel@tonic-gate } else { 31067c478bd9Sstevel@tonic-gate cpu_aflt_log(CE_CONT, 2, spf_flt, CPU_ERRID_FIRST, NULL, 31077c478bd9Sstevel@tonic-gate " E$tag != PA from AFAR; E$line was victimized" 31087c478bd9Sstevel@tonic-gate "\n dumping memory from PA 0x%08x.%08x instead", 31097c478bd9Sstevel@tonic-gate (uint32_t)(P2ALIGN(aflt->flt_addr, 64) >> 32), 31107c478bd9Sstevel@tonic-gate (uint32_t)P2ALIGN(aflt->flt_addr, 64)); 31117c478bd9Sstevel@tonic-gate } 31127c478bd9Sstevel@tonic-gate 31137c478bd9Sstevel@tonic-gate /* 31147c478bd9Sstevel@tonic-gate * Dump out all 8 8-byte ecache data captured 31157c478bd9Sstevel@tonic-gate * For each 8-byte data captured, we check the 31167c478bd9Sstevel@tonic-gate * captured afsr's parity syndrome to find out 31177c478bd9Sstevel@tonic-gate * which 8-byte chunk is bad. For memory dump, the 31187c478bd9Sstevel@tonic-gate * AFSR values were initialized to 0. 31197c478bd9Sstevel@tonic-gate */ 31207c478bd9Sstevel@tonic-gate for (i = 0; i < 8; i++) { 31217c478bd9Sstevel@tonic-gate ec_data_t *ecdptr; 31227c478bd9Sstevel@tonic-gate uint_t offset; 31237c478bd9Sstevel@tonic-gate ushort_t psynd; 31247c478bd9Sstevel@tonic-gate ushort_t bad; 31257c478bd9Sstevel@tonic-gate uint64_t edp; 31267c478bd9Sstevel@tonic-gate 31277c478bd9Sstevel@tonic-gate offset = i << 3; /* multiply by 8 */ 31287c478bd9Sstevel@tonic-gate ecdptr = &spf_flt->flt_ec_data[i]; 31297c478bd9Sstevel@tonic-gate psynd = ecdptr->ec_afsr & P_AFSR_P_SYND; 31307c478bd9Sstevel@tonic-gate edp = ecdptr->ec_afsr & P_AFSR_EDP; 31317c478bd9Sstevel@tonic-gate 31327c478bd9Sstevel@tonic-gate /* 31337c478bd9Sstevel@tonic-gate * For Sabre/Hummingbird, parity synd is captured only 31347c478bd9Sstevel@tonic-gate * in [7:0] of AFSR.PSYND for each 8-byte chunk. 31357c478bd9Sstevel@tonic-gate * For spitfire/blackbird, AFSR.PSYND is captured 31367c478bd9Sstevel@tonic-gate * in 16-byte granularity. [15:8] represent 31377c478bd9Sstevel@tonic-gate * the upper 8 byte and [7:0] the lower 8 byte. 31387c478bd9Sstevel@tonic-gate */ 31397c478bd9Sstevel@tonic-gate if (isus2i || isus2e || (i & 0x1)) 31407c478bd9Sstevel@tonic-gate bad = (psynd & 0xFF); /* check bits [7:0] */ 31417c478bd9Sstevel@tonic-gate else 31427c478bd9Sstevel@tonic-gate bad = (psynd & 0xFF00); /* check bits [15:8] */ 31437c478bd9Sstevel@tonic-gate 31447c478bd9Sstevel@tonic-gate if (bad && edp) { 31457c478bd9Sstevel@tonic-gate cpu_aflt_log(CE_CONT, 2, spf_flt, NO_LFLAGS, NULL, 31467c478bd9Sstevel@tonic-gate " E$Data (0x%02x): 0x%08x.%08x " 31477c478bd9Sstevel@tonic-gate "*Bad* PSYND=0x%04x", offset, 31487c478bd9Sstevel@tonic-gate (uint32_t)(ecdptr->ec_d8 >> 32), 31497c478bd9Sstevel@tonic-gate (uint32_t)ecdptr->ec_d8, psynd); 31507c478bd9Sstevel@tonic-gate } else { 31517c478bd9Sstevel@tonic-gate cpu_aflt_log(CE_CONT, 2, spf_flt, NO_LFLAGS, NULL, 31527c478bd9Sstevel@tonic-gate " E$Data (0x%02x): 0x%08x.%08x", offset, 31537c478bd9Sstevel@tonic-gate (uint32_t)(ecdptr->ec_d8 >> 32), 31547c478bd9Sstevel@tonic-gate (uint32_t)ecdptr->ec_d8); 31557c478bd9Sstevel@tonic-gate } 31567c478bd9Sstevel@tonic-gate } 31577c478bd9Sstevel@tonic-gate } 31587c478bd9Sstevel@tonic-gate 31597c478bd9Sstevel@tonic-gate /* 31607c478bd9Sstevel@tonic-gate * Common logging function for all cpu async errors. This function allows the 31617c478bd9Sstevel@tonic-gate * caller to generate a single cmn_err() call that logs the appropriate items 31627c478bd9Sstevel@tonic-gate * from the fault structure, and implements our rules for AFT logging levels. 31637c478bd9Sstevel@tonic-gate * 31647c478bd9Sstevel@tonic-gate * ce_code: cmn_err() code (e.g. CE_PANIC, CE_WARN, CE_CONT) 31657c478bd9Sstevel@tonic-gate * tagnum: 0, 1, 2, .. generate the [AFT#] tag 31667c478bd9Sstevel@tonic-gate * spflt: pointer to spitfire async fault structure 31677c478bd9Sstevel@tonic-gate * logflags: bitflags indicating what to output 31687c478bd9Sstevel@tonic-gate * endstr: a end string to appear at the end of this log 31697c478bd9Sstevel@tonic-gate * fmt: a format string to appear at the beginning of the log 31707c478bd9Sstevel@tonic-gate * 31717c478bd9Sstevel@tonic-gate * The logflags allows the construction of predetermined output from the spflt 31727c478bd9Sstevel@tonic-gate * structure. The individual data items always appear in a consistent order. 31737c478bd9Sstevel@tonic-gate * Note that either or both of the spflt structure pointer and logflags may be 31747c478bd9Sstevel@tonic-gate * NULL or zero respectively, indicating that the predetermined output 31757c478bd9Sstevel@tonic-gate * substrings are not requested in this log. The output looks like this: 31767c478bd9Sstevel@tonic-gate * 31777c478bd9Sstevel@tonic-gate * [AFT#] <CPU_ERRID_FIRST><fmt string><CPU_FLTCPU> 31787c478bd9Sstevel@tonic-gate * <CPU_SPACE><CPU_ERRID> 31797c478bd9Sstevel@tonic-gate * newline+4spaces<CPU_AFSR><CPU_AFAR> 31807c478bd9Sstevel@tonic-gate * newline+4spaces<CPU_AF_PSYND><CPU_AF_ETS><CPU_FAULTPC> 31817c478bd9Sstevel@tonic-gate * newline+4spaces<CPU_UDBH><CPU_UDBL> 31827c478bd9Sstevel@tonic-gate * newline+4spaces<CPU_SYND> 31837c478bd9Sstevel@tonic-gate * newline+4spaces<endstr> 31847c478bd9Sstevel@tonic-gate * 31857c478bd9Sstevel@tonic-gate * Note that <endstr> may not start on a newline if we are logging <CPU_PSYND>; 31867c478bd9Sstevel@tonic-gate * it is assumed that <endstr> will be the unum string in this case. The size 31877c478bd9Sstevel@tonic-gate * of our intermediate formatting buf[] is based on the worst case of all flags 31887c478bd9Sstevel@tonic-gate * being enabled. We pass the caller's varargs directly to vcmn_err() for 31897c478bd9Sstevel@tonic-gate * formatting so we don't need additional stack space to format them here. 31907c478bd9Sstevel@tonic-gate */ 31917c478bd9Sstevel@tonic-gate /*PRINTFLIKE6*/ 31927c478bd9Sstevel@tonic-gate static void 31937c478bd9Sstevel@tonic-gate cpu_aflt_log(int ce_code, int tagnum, spitf_async_flt *spflt, uint_t logflags, 31947c478bd9Sstevel@tonic-gate const char *endstr, const char *fmt, ...) 31957c478bd9Sstevel@tonic-gate { 31967c478bd9Sstevel@tonic-gate struct async_flt *aflt = (struct async_flt *)spflt; 31977c478bd9Sstevel@tonic-gate char buf[400], *p, *q; /* see comments about buf[] size above */ 31987c478bd9Sstevel@tonic-gate va_list ap; 31997c478bd9Sstevel@tonic-gate int console_log_flag; 32007c478bd9Sstevel@tonic-gate 32017c478bd9Sstevel@tonic-gate if ((aflt == NULL) || ((aflt->flt_class == CPU_FAULT) && 32027c478bd9Sstevel@tonic-gate (aflt->flt_stat & P_AFSR_LEVEL1)) || 32037c478bd9Sstevel@tonic-gate (aflt->flt_panic)) { 32047c478bd9Sstevel@tonic-gate console_log_flag = (tagnum < 2) || aft_verbose; 32057c478bd9Sstevel@tonic-gate } else { 32067c478bd9Sstevel@tonic-gate int verbose = ((aflt->flt_class == BUS_FAULT) || 32077c478bd9Sstevel@tonic-gate (aflt->flt_stat & P_AFSR_CE)) ? 32087c478bd9Sstevel@tonic-gate ce_verbose_memory : ce_verbose_other; 32097c478bd9Sstevel@tonic-gate 32107c478bd9Sstevel@tonic-gate if (!verbose) 32117c478bd9Sstevel@tonic-gate return; 32127c478bd9Sstevel@tonic-gate 32137c478bd9Sstevel@tonic-gate console_log_flag = (verbose > 1); 32147c478bd9Sstevel@tonic-gate } 32157c478bd9Sstevel@tonic-gate 32167c478bd9Sstevel@tonic-gate if (console_log_flag) 32177c478bd9Sstevel@tonic-gate (void) sprintf(buf, "[AFT%d]", tagnum); 32187c478bd9Sstevel@tonic-gate else 32197c478bd9Sstevel@tonic-gate (void) sprintf(buf, "![AFT%d]", tagnum); 32207c478bd9Sstevel@tonic-gate 32217c478bd9Sstevel@tonic-gate p = buf + strlen(buf); /* current buffer position */ 32227c478bd9Sstevel@tonic-gate q = buf + sizeof (buf); /* pointer past end of buffer */ 32237c478bd9Sstevel@tonic-gate 32247c478bd9Sstevel@tonic-gate if (spflt != NULL && (logflags & CPU_ERRID_FIRST)) { 32257c478bd9Sstevel@tonic-gate (void) snprintf(p, (size_t)(q - p), " errID 0x%08x.%08x", 32267c478bd9Sstevel@tonic-gate (uint32_t)(aflt->flt_id >> 32), (uint32_t)aflt->flt_id); 32277c478bd9Sstevel@tonic-gate p += strlen(p); 32287c478bd9Sstevel@tonic-gate } 32297c478bd9Sstevel@tonic-gate 32307c478bd9Sstevel@tonic-gate /* 32317c478bd9Sstevel@tonic-gate * Copy the caller's format string verbatim into buf[]. It will be 32327c478bd9Sstevel@tonic-gate * formatted by the call to vcmn_err() at the end of this function. 32337c478bd9Sstevel@tonic-gate */ 32347c478bd9Sstevel@tonic-gate if (fmt != NULL && p < q) { 32357c478bd9Sstevel@tonic-gate (void) strncpy(p, fmt, (size_t)(q - p - 1)); 32367c478bd9Sstevel@tonic-gate buf[sizeof (buf) - 1] = '\0'; 32377c478bd9Sstevel@tonic-gate p += strlen(p); 32387c478bd9Sstevel@tonic-gate } 32397c478bd9Sstevel@tonic-gate 32407c478bd9Sstevel@tonic-gate if (spflt != NULL) { 32417c478bd9Sstevel@tonic-gate if (logflags & CPU_FLTCPU) { 32427c478bd9Sstevel@tonic-gate (void) snprintf(p, (size_t)(q - p), " CPU%d", 32437c478bd9Sstevel@tonic-gate aflt->flt_inst); 32447c478bd9Sstevel@tonic-gate p += strlen(p); 32457c478bd9Sstevel@tonic-gate } 32467c478bd9Sstevel@tonic-gate 32477c478bd9Sstevel@tonic-gate if (logflags & CPU_SPACE) { 32487c478bd9Sstevel@tonic-gate if (aflt->flt_status & ECC_D_TRAP) 32497c478bd9Sstevel@tonic-gate (void) snprintf(p, (size_t)(q - p), 32507c478bd9Sstevel@tonic-gate " Data access"); 32517c478bd9Sstevel@tonic-gate else if (aflt->flt_status & ECC_I_TRAP) 32527c478bd9Sstevel@tonic-gate (void) snprintf(p, (size_t)(q - p), 32537c478bd9Sstevel@tonic-gate " Instruction access"); 32547c478bd9Sstevel@tonic-gate p += strlen(p); 32557c478bd9Sstevel@tonic-gate } 32567c478bd9Sstevel@tonic-gate 32577c478bd9Sstevel@tonic-gate if (logflags & CPU_TL) { 32587c478bd9Sstevel@tonic-gate (void) snprintf(p, (size_t)(q - p), " at TL%s", 32597c478bd9Sstevel@tonic-gate aflt->flt_tl ? ">0" : "=0"); 32607c478bd9Sstevel@tonic-gate p += strlen(p); 32617c478bd9Sstevel@tonic-gate } 32627c478bd9Sstevel@tonic-gate 32637c478bd9Sstevel@tonic-gate if (logflags & CPU_ERRID) { 32647c478bd9Sstevel@tonic-gate (void) snprintf(p, (size_t)(q - p), 32657c478bd9Sstevel@tonic-gate ", errID 0x%08x.%08x", 32667c478bd9Sstevel@tonic-gate (uint32_t)(aflt->flt_id >> 32), 32677c478bd9Sstevel@tonic-gate (uint32_t)aflt->flt_id); 32687c478bd9Sstevel@tonic-gate p += strlen(p); 32697c478bd9Sstevel@tonic-gate } 32707c478bd9Sstevel@tonic-gate 32717c478bd9Sstevel@tonic-gate if (logflags & CPU_AFSR) { 32727c478bd9Sstevel@tonic-gate (void) snprintf(p, (size_t)(q - p), 32737c478bd9Sstevel@tonic-gate "\n AFSR 0x%08b.%08b", 32747c478bd9Sstevel@tonic-gate (uint32_t)(aflt->flt_stat >> 32), AFSR_FMTSTR0, 32757c478bd9Sstevel@tonic-gate (uint32_t)aflt->flt_stat, AFSR_FMTSTR1); 32767c478bd9Sstevel@tonic-gate p += strlen(p); 32777c478bd9Sstevel@tonic-gate } 32787c478bd9Sstevel@tonic-gate 32797c478bd9Sstevel@tonic-gate if (logflags & CPU_AFAR) { 32807c478bd9Sstevel@tonic-gate (void) snprintf(p, (size_t)(q - p), " AFAR 0x%08x.%08x", 32817c478bd9Sstevel@tonic-gate (uint32_t)(aflt->flt_addr >> 32), 32827c478bd9Sstevel@tonic-gate (uint32_t)aflt->flt_addr); 32837c478bd9Sstevel@tonic-gate p += strlen(p); 32847c478bd9Sstevel@tonic-gate } 32857c478bd9Sstevel@tonic-gate 32867c478bd9Sstevel@tonic-gate if (logflags & CPU_AF_PSYND) { 32877c478bd9Sstevel@tonic-gate ushort_t psynd = (ushort_t) 32887c478bd9Sstevel@tonic-gate (aflt->flt_stat & P_AFSR_P_SYND); 32897c478bd9Sstevel@tonic-gate 32907c478bd9Sstevel@tonic-gate (void) snprintf(p, (size_t)(q - p), 32917c478bd9Sstevel@tonic-gate "\n AFSR.PSYND 0x%04x(Score %02d)", 32927c478bd9Sstevel@tonic-gate psynd, ecc_psynd_score(psynd)); 32937c478bd9Sstevel@tonic-gate p += strlen(p); 32947c478bd9Sstevel@tonic-gate } 32957c478bd9Sstevel@tonic-gate 32967c478bd9Sstevel@tonic-gate if (logflags & CPU_AF_ETS) { 32977c478bd9Sstevel@tonic-gate (void) snprintf(p, (size_t)(q - p), " AFSR.ETS 0x%02x", 32987c478bd9Sstevel@tonic-gate (uchar_t)((aflt->flt_stat & P_AFSR_ETS) >> 16)); 32997c478bd9Sstevel@tonic-gate p += strlen(p); 33007c478bd9Sstevel@tonic-gate } 33017c478bd9Sstevel@tonic-gate 33027c478bd9Sstevel@tonic-gate if (logflags & CPU_FAULTPC) { 33037c478bd9Sstevel@tonic-gate (void) snprintf(p, (size_t)(q - p), " Fault_PC 0x%p", 33047c478bd9Sstevel@tonic-gate (void *)aflt->flt_pc); 33057c478bd9Sstevel@tonic-gate p += strlen(p); 33067c478bd9Sstevel@tonic-gate } 33077c478bd9Sstevel@tonic-gate 33087c478bd9Sstevel@tonic-gate if (logflags & CPU_UDBH) { 33097c478bd9Sstevel@tonic-gate (void) snprintf(p, (size_t)(q - p), 33107c478bd9Sstevel@tonic-gate "\n UDBH 0x%04b UDBH.ESYND 0x%02x", 33117c478bd9Sstevel@tonic-gate spflt->flt_sdbh, UDB_FMTSTR, 33127c478bd9Sstevel@tonic-gate spflt->flt_sdbh & 0xFF); 33137c478bd9Sstevel@tonic-gate p += strlen(p); 33147c478bd9Sstevel@tonic-gate } 33157c478bd9Sstevel@tonic-gate 33167c478bd9Sstevel@tonic-gate if (logflags & CPU_UDBL) { 33177c478bd9Sstevel@tonic-gate (void) snprintf(p, (size_t)(q - p), 33187c478bd9Sstevel@tonic-gate " UDBL 0x%04b UDBL.ESYND 0x%02x", 33197c478bd9Sstevel@tonic-gate spflt->flt_sdbl, UDB_FMTSTR, 33207c478bd9Sstevel@tonic-gate spflt->flt_sdbl & 0xFF); 33217c478bd9Sstevel@tonic-gate p += strlen(p); 33227c478bd9Sstevel@tonic-gate } 33237c478bd9Sstevel@tonic-gate 33247c478bd9Sstevel@tonic-gate if (logflags & CPU_SYND) { 33257c478bd9Sstevel@tonic-gate ushort_t synd = SYND(aflt->flt_synd); 33267c478bd9Sstevel@tonic-gate 33277c478bd9Sstevel@tonic-gate (void) snprintf(p, (size_t)(q - p), 33287c478bd9Sstevel@tonic-gate "\n %s Syndrome 0x%x Memory Module ", 33297c478bd9Sstevel@tonic-gate UDBL(aflt->flt_synd) ? "UDBL" : "UDBH", synd); 33307c478bd9Sstevel@tonic-gate p += strlen(p); 33317c478bd9Sstevel@tonic-gate } 33327c478bd9Sstevel@tonic-gate } 33337c478bd9Sstevel@tonic-gate 33347c478bd9Sstevel@tonic-gate if (endstr != NULL) { 33357c478bd9Sstevel@tonic-gate if (!(logflags & CPU_SYND)) 33367c478bd9Sstevel@tonic-gate (void) snprintf(p, (size_t)(q - p), "\n %s", endstr); 33377c478bd9Sstevel@tonic-gate else 33387c478bd9Sstevel@tonic-gate (void) snprintf(p, (size_t)(q - p), "%s", endstr); 33397c478bd9Sstevel@tonic-gate p += strlen(p); 33407c478bd9Sstevel@tonic-gate } 33417c478bd9Sstevel@tonic-gate 33427c478bd9Sstevel@tonic-gate if (ce_code == CE_CONT && (p < q - 1)) 33437c478bd9Sstevel@tonic-gate (void) strcpy(p, "\n"); /* add final \n if needed */ 33447c478bd9Sstevel@tonic-gate 33457c478bd9Sstevel@tonic-gate va_start(ap, fmt); 33467c478bd9Sstevel@tonic-gate vcmn_err(ce_code, buf, ap); 33477c478bd9Sstevel@tonic-gate va_end(ap); 33487c478bd9Sstevel@tonic-gate } 33497c478bd9Sstevel@tonic-gate 33507c478bd9Sstevel@tonic-gate /* 33517c478bd9Sstevel@tonic-gate * Ecache Scrubbing 33527c478bd9Sstevel@tonic-gate * 33537c478bd9Sstevel@tonic-gate * The basic idea is to prevent lines from sitting in the ecache long enough 33547c478bd9Sstevel@tonic-gate * to build up soft errors which can lead to ecache parity errors. 33557c478bd9Sstevel@tonic-gate * 33567c478bd9Sstevel@tonic-gate * The following rules are observed when flushing the ecache: 33577c478bd9Sstevel@tonic-gate * 33587c478bd9Sstevel@tonic-gate * 1. When the system is busy, flush bad clean lines 33597c478bd9Sstevel@tonic-gate * 2. When the system is idle, flush all clean lines 33607c478bd9Sstevel@tonic-gate * 3. When the system is idle, flush good dirty lines 33617c478bd9Sstevel@tonic-gate * 4. Never flush bad dirty lines. 33627c478bd9Sstevel@tonic-gate * 33637c478bd9Sstevel@tonic-gate * modify parity busy idle 33647c478bd9Sstevel@tonic-gate * ---------------------------- 33657c478bd9Sstevel@tonic-gate * clean good X 33667c478bd9Sstevel@tonic-gate * clean bad X X 33677c478bd9Sstevel@tonic-gate * dirty good X 33687c478bd9Sstevel@tonic-gate * dirty bad 33697c478bd9Sstevel@tonic-gate * 33707c478bd9Sstevel@tonic-gate * Bad or good refers to whether a line has an E$ parity error or not. 33717c478bd9Sstevel@tonic-gate * Clean or dirty refers to the state of the modified bit. We currently 33727c478bd9Sstevel@tonic-gate * default the scan rate to 100 (scan 10% of the cache per second). 33737c478bd9Sstevel@tonic-gate * 33747c478bd9Sstevel@tonic-gate * The following are E$ states and actions. 33757c478bd9Sstevel@tonic-gate * 33767c478bd9Sstevel@tonic-gate * We encode our state as a 3-bit number, consisting of: 33777c478bd9Sstevel@tonic-gate * ECACHE_STATE_MODIFIED (0=clean, 1=dirty) 33787c478bd9Sstevel@tonic-gate * ECACHE_STATE_PARITY (0=good, 1=bad) 33797c478bd9Sstevel@tonic-gate * ECACHE_STATE_BUSY (0=idle, 1=busy) 33807c478bd9Sstevel@tonic-gate * 33817c478bd9Sstevel@tonic-gate * We associate a flushing and a logging action with each state. 33827c478bd9Sstevel@tonic-gate * 33837c478bd9Sstevel@tonic-gate * E$ actions are different for Spitfire and Sabre/Hummingbird modules. 33847c478bd9Sstevel@tonic-gate * MIRROR_FLUSH indicates that an E$ line will be flushed for the mirrored 33857c478bd9Sstevel@tonic-gate * E$ only, in addition to value being set by ec_flush. 33867c478bd9Sstevel@tonic-gate */ 33877c478bd9Sstevel@tonic-gate 33887c478bd9Sstevel@tonic-gate #define ALWAYS_FLUSH 0x1 /* flush E$ line on all E$ types */ 33897c478bd9Sstevel@tonic-gate #define NEVER_FLUSH 0x0 /* never the flush the E$ line */ 33907c478bd9Sstevel@tonic-gate #define MIRROR_FLUSH 0xF /* flush E$ line on mirrored E$ only */ 33917c478bd9Sstevel@tonic-gate 33927c478bd9Sstevel@tonic-gate struct { 33937c478bd9Sstevel@tonic-gate char ec_flush; /* whether to flush or not */ 33947c478bd9Sstevel@tonic-gate char ec_log; /* ecache logging */ 33957c478bd9Sstevel@tonic-gate char ec_log_type; /* log type info */ 33967c478bd9Sstevel@tonic-gate } ec_action[] = { /* states of the E$ line in M P B */ 33977c478bd9Sstevel@tonic-gate { ALWAYS_FLUSH, 0, 0 }, /* 0 0 0 clean_good_idle */ 33987c478bd9Sstevel@tonic-gate { MIRROR_FLUSH, 0, 0 }, /* 0 0 1 clean_good_busy */ 33997c478bd9Sstevel@tonic-gate { ALWAYS_FLUSH, 1, CPU_BADLINE_CI_ERR }, /* 0 1 0 clean_bad_idle */ 34007c478bd9Sstevel@tonic-gate { ALWAYS_FLUSH, 1, CPU_BADLINE_CB_ERR }, /* 0 1 1 clean_bad_busy */ 34017c478bd9Sstevel@tonic-gate { ALWAYS_FLUSH, 0, 0 }, /* 1 0 0 dirty_good_idle */ 34027c478bd9Sstevel@tonic-gate { MIRROR_FLUSH, 0, 0 }, /* 1 0 1 dirty_good_busy */ 34037c478bd9Sstevel@tonic-gate { NEVER_FLUSH, 1, CPU_BADLINE_DI_ERR }, /* 1 1 0 dirty_bad_idle */ 34047c478bd9Sstevel@tonic-gate { NEVER_FLUSH, 1, CPU_BADLINE_DB_ERR } /* 1 1 1 dirty_bad_busy */ 34057c478bd9Sstevel@tonic-gate }; 34067c478bd9Sstevel@tonic-gate 34077c478bd9Sstevel@tonic-gate /* 34087c478bd9Sstevel@tonic-gate * Offsets into the ec_action[] that determines clean_good_busy and 34097c478bd9Sstevel@tonic-gate * dirty_good_busy lines. 34107c478bd9Sstevel@tonic-gate */ 34117c478bd9Sstevel@tonic-gate #define ECACHE_CGB_LINE 1 /* E$ clean_good_busy line */ 34127c478bd9Sstevel@tonic-gate #define ECACHE_DGB_LINE 5 /* E$ dirty_good_busy line */ 34137c478bd9Sstevel@tonic-gate 34147c478bd9Sstevel@tonic-gate /* 34157c478bd9Sstevel@tonic-gate * We are flushing lines which are Clean_Good_Busy and also the lines 34167c478bd9Sstevel@tonic-gate * Dirty_Good_Busy. And we only follow it for non-mirrored E$. 34177c478bd9Sstevel@tonic-gate */ 34187c478bd9Sstevel@tonic-gate #define CGB(x, m) (((x) == ECACHE_CGB_LINE) && (m != ECACHE_CPU_MIRROR)) 34197c478bd9Sstevel@tonic-gate #define DGB(x, m) (((x) == ECACHE_DGB_LINE) && (m != ECACHE_CPU_MIRROR)) 34207c478bd9Sstevel@tonic-gate 34217c478bd9Sstevel@tonic-gate #define ECACHE_STATE_MODIFIED 0x4 34227c478bd9Sstevel@tonic-gate #define ECACHE_STATE_PARITY 0x2 34237c478bd9Sstevel@tonic-gate #define ECACHE_STATE_BUSY 0x1 34247c478bd9Sstevel@tonic-gate 34257c478bd9Sstevel@tonic-gate /* 34267c478bd9Sstevel@tonic-gate * If ecache is mirrored ecache_calls_a_sec and ecache_scan_rate are reduced. 34277c478bd9Sstevel@tonic-gate */ 34287c478bd9Sstevel@tonic-gate int ecache_calls_a_sec_mirrored = 1; 34297c478bd9Sstevel@tonic-gate int ecache_lines_per_call_mirrored = 1; 34307c478bd9Sstevel@tonic-gate 34317c478bd9Sstevel@tonic-gate int ecache_scrub_enable = 1; /* ecache scrubbing is on by default */ 34327c478bd9Sstevel@tonic-gate int ecache_scrub_verbose = 1; /* prints clean and dirty lines */ 34337c478bd9Sstevel@tonic-gate int ecache_scrub_panic = 0; /* panics on a clean and dirty line */ 34347c478bd9Sstevel@tonic-gate int ecache_calls_a_sec = 100; /* scrubber calls per sec */ 34357c478bd9Sstevel@tonic-gate int ecache_scan_rate = 100; /* scan rate (in tenths of a percent) */ 34367c478bd9Sstevel@tonic-gate int ecache_idle_factor = 1; /* increase the scan rate when idle */ 34377c478bd9Sstevel@tonic-gate int ecache_flush_clean_good_busy = 50; /* flush rate (in percent) */ 34387c478bd9Sstevel@tonic-gate int ecache_flush_dirty_good_busy = 100; /* flush rate (in percent) */ 34397c478bd9Sstevel@tonic-gate 34407c478bd9Sstevel@tonic-gate volatile int ec_timeout_calls = 1; /* timeout calls */ 34417c478bd9Sstevel@tonic-gate 34427c478bd9Sstevel@tonic-gate /* 34437c478bd9Sstevel@tonic-gate * Interrupt number and pil for ecache scrubber cross-trap calls. 34447c478bd9Sstevel@tonic-gate */ 34457c478bd9Sstevel@tonic-gate static uint_t ecache_scrub_inum; 34467c478bd9Sstevel@tonic-gate uint_t ecache_scrub_pil = PIL_9; 34477c478bd9Sstevel@tonic-gate 34487c478bd9Sstevel@tonic-gate /* 34497c478bd9Sstevel@tonic-gate * Kstats for the E$ scrubber. 34507c478bd9Sstevel@tonic-gate */ 34517c478bd9Sstevel@tonic-gate typedef struct ecache_kstat { 34527c478bd9Sstevel@tonic-gate kstat_named_t clean_good_idle; /* # of lines scrubbed */ 34537c478bd9Sstevel@tonic-gate kstat_named_t clean_good_busy; /* # of lines skipped */ 34547c478bd9Sstevel@tonic-gate kstat_named_t clean_bad_idle; /* # of lines scrubbed */ 34557c478bd9Sstevel@tonic-gate kstat_named_t clean_bad_busy; /* # of lines scrubbed */ 34567c478bd9Sstevel@tonic-gate kstat_named_t dirty_good_idle; /* # of lines scrubbed */ 34577c478bd9Sstevel@tonic-gate kstat_named_t dirty_good_busy; /* # of lines skipped */ 34587c478bd9Sstevel@tonic-gate kstat_named_t dirty_bad_idle; /* # of lines skipped */ 34597c478bd9Sstevel@tonic-gate kstat_named_t dirty_bad_busy; /* # of lines skipped */ 34607c478bd9Sstevel@tonic-gate kstat_named_t invalid_lines; /* # of invalid lines */ 34617c478bd9Sstevel@tonic-gate kstat_named_t clean_good_busy_flush; /* # of lines scrubbed */ 34627c478bd9Sstevel@tonic-gate kstat_named_t dirty_good_busy_flush; /* # of lines scrubbed */ 34637c478bd9Sstevel@tonic-gate kstat_named_t tags_cleared; /* # of E$ tags cleared */ 34647c478bd9Sstevel@tonic-gate } ecache_kstat_t; 34657c478bd9Sstevel@tonic-gate 34667c478bd9Sstevel@tonic-gate static ecache_kstat_t ec_kstat_template = { 34677c478bd9Sstevel@tonic-gate { "clean_good_idle", KSTAT_DATA_ULONG }, 34687c478bd9Sstevel@tonic-gate { "clean_good_busy", KSTAT_DATA_ULONG }, 34697c478bd9Sstevel@tonic-gate { "clean_bad_idle", KSTAT_DATA_ULONG }, 34707c478bd9Sstevel@tonic-gate { "clean_bad_busy", KSTAT_DATA_ULONG }, 34717c478bd9Sstevel@tonic-gate { "dirty_good_idle", KSTAT_DATA_ULONG }, 34727c478bd9Sstevel@tonic-gate { "dirty_good_busy", KSTAT_DATA_ULONG }, 34737c478bd9Sstevel@tonic-gate { "dirty_bad_idle", KSTAT_DATA_ULONG }, 34747c478bd9Sstevel@tonic-gate { "dirty_bad_busy", KSTAT_DATA_ULONG }, 34757c478bd9Sstevel@tonic-gate { "invalid_lines", KSTAT_DATA_ULONG }, 34767c478bd9Sstevel@tonic-gate { "clean_good_busy_flush", KSTAT_DATA_ULONG }, 34777c478bd9Sstevel@tonic-gate { "dirty_good_busy_flush", KSTAT_DATA_ULONG }, 34787c478bd9Sstevel@tonic-gate { "ecache_tags_cleared", KSTAT_DATA_ULONG } 34797c478bd9Sstevel@tonic-gate }; 34807c478bd9Sstevel@tonic-gate 34817c478bd9Sstevel@tonic-gate struct kmem_cache *sf_private_cache; 34827c478bd9Sstevel@tonic-gate 34837c478bd9Sstevel@tonic-gate /* 34847c478bd9Sstevel@tonic-gate * Called periodically on each CPU to scan the ecache once a sec. 34857c478bd9Sstevel@tonic-gate * adjusting the ecache line index appropriately 34867c478bd9Sstevel@tonic-gate */ 34877c478bd9Sstevel@tonic-gate void 34887c478bd9Sstevel@tonic-gate scrub_ecache_line() 34897c478bd9Sstevel@tonic-gate { 34907c478bd9Sstevel@tonic-gate spitfire_scrub_misc_t *ssmp = CPU_PRIVATE_PTR(CPU, sfpr_scrub_misc); 34917c478bd9Sstevel@tonic-gate int cpuid = CPU->cpu_id; 34927c478bd9Sstevel@tonic-gate uint32_t index = ssmp->ecache_flush_index; 34937c478bd9Sstevel@tonic-gate uint64_t ec_size = cpunodes[cpuid].ecache_size; 34947c478bd9Sstevel@tonic-gate size_t ec_linesize = cpunodes[cpuid].ecache_linesize; 34957c478bd9Sstevel@tonic-gate int nlines = ssmp->ecache_nlines; 34967c478bd9Sstevel@tonic-gate uint32_t ec_set_size = ec_size / ecache_associativity; 34977c478bd9Sstevel@tonic-gate int ec_mirror = ssmp->ecache_mirror; 34987c478bd9Sstevel@tonic-gate ecache_kstat_t *ec_ksp = (ecache_kstat_t *)ssmp->ecache_ksp->ks_data; 34997c478bd9Sstevel@tonic-gate 35007c478bd9Sstevel@tonic-gate int line, scan_lines, flush_clean_busy = 0, flush_dirty_busy = 0; 35017c478bd9Sstevel@tonic-gate int mpb; /* encode Modified, Parity, Busy for action */ 35027c478bd9Sstevel@tonic-gate uchar_t state; 35037c478bd9Sstevel@tonic-gate uint64_t ec_tag, paddr, oafsr, tafsr, nafsr; 35047c478bd9Sstevel@tonic-gate uint64_t *acc_afsr = CPU_PRIVATE_PTR(CPU, sfpr_scrub_afsr); 35057c478bd9Sstevel@tonic-gate ec_data_t ec_data[8]; 35067c478bd9Sstevel@tonic-gate kstat_named_t *ec_knp; 35077c478bd9Sstevel@tonic-gate 35087c478bd9Sstevel@tonic-gate switch (ec_mirror) { 35097c478bd9Sstevel@tonic-gate default: 35107c478bd9Sstevel@tonic-gate case ECACHE_CPU_NON_MIRROR: 35117c478bd9Sstevel@tonic-gate /* 35127c478bd9Sstevel@tonic-gate * The E$ scan rate is expressed in units of tenths of 35137c478bd9Sstevel@tonic-gate * a percent. ecache_scan_rate = 1000 (100%) means the 35147c478bd9Sstevel@tonic-gate * whole cache is scanned every second. 35157c478bd9Sstevel@tonic-gate */ 35167c478bd9Sstevel@tonic-gate scan_lines = (nlines * ecache_scan_rate) / 35177c478bd9Sstevel@tonic-gate (1000 * ecache_calls_a_sec); 35187c478bd9Sstevel@tonic-gate if (!(ssmp->ecache_busy)) { 35197c478bd9Sstevel@tonic-gate if (ecache_idle_factor > 0) { 35207c478bd9Sstevel@tonic-gate scan_lines *= ecache_idle_factor; 35217c478bd9Sstevel@tonic-gate } 35227c478bd9Sstevel@tonic-gate } else { 35237c478bd9Sstevel@tonic-gate flush_clean_busy = (scan_lines * 35247c478bd9Sstevel@tonic-gate ecache_flush_clean_good_busy) / 100; 35257c478bd9Sstevel@tonic-gate flush_dirty_busy = (scan_lines * 35267c478bd9Sstevel@tonic-gate ecache_flush_dirty_good_busy) / 100; 35277c478bd9Sstevel@tonic-gate } 35287c478bd9Sstevel@tonic-gate 35297c478bd9Sstevel@tonic-gate ec_timeout_calls = (ecache_calls_a_sec ? 35307c478bd9Sstevel@tonic-gate ecache_calls_a_sec : 1); 35317c478bd9Sstevel@tonic-gate break; 35327c478bd9Sstevel@tonic-gate 35337c478bd9Sstevel@tonic-gate case ECACHE_CPU_MIRROR: 35347c478bd9Sstevel@tonic-gate scan_lines = ecache_lines_per_call_mirrored; 35357c478bd9Sstevel@tonic-gate ec_timeout_calls = (ecache_calls_a_sec_mirrored ? 35367c478bd9Sstevel@tonic-gate ecache_calls_a_sec_mirrored : 1); 35377c478bd9Sstevel@tonic-gate break; 35387c478bd9Sstevel@tonic-gate } 35397c478bd9Sstevel@tonic-gate 35407c478bd9Sstevel@tonic-gate /* 35417c478bd9Sstevel@tonic-gate * The ecache scrubber algorithm operates by reading and 35427c478bd9Sstevel@tonic-gate * decoding the E$ tag to determine whether the corresponding E$ line 35437c478bd9Sstevel@tonic-gate * can be scrubbed. There is a implicit assumption in the scrubber 35447c478bd9Sstevel@tonic-gate * logic that the E$ tag is valid. Unfortunately, this assertion is 35457c478bd9Sstevel@tonic-gate * flawed since the E$ tag may also be corrupted and have parity errors 35467c478bd9Sstevel@tonic-gate * The scrubber logic is enhanced to check the validity of the E$ tag 35477c478bd9Sstevel@tonic-gate * before scrubbing. When a parity error is detected in the E$ tag, 35487c478bd9Sstevel@tonic-gate * it is possible to recover and scrub the tag under certain conditions 35497c478bd9Sstevel@tonic-gate * so that a ETP error condition can be avoided. 35507c478bd9Sstevel@tonic-gate */ 35517c478bd9Sstevel@tonic-gate 35527c478bd9Sstevel@tonic-gate for (mpb = line = 0; line < scan_lines; line++, mpb = 0) { 35537c478bd9Sstevel@tonic-gate /* 35547c478bd9Sstevel@tonic-gate * We get the old-AFSR before clearing the AFSR sticky bits 35557c478bd9Sstevel@tonic-gate * in {get_ecache_tag, check_ecache_line, get_ecache_dtag} 35567c478bd9Sstevel@tonic-gate * If CP bit is set in the old-AFSR, we log an Orphan CP event. 35577c478bd9Sstevel@tonic-gate */ 35587c478bd9Sstevel@tonic-gate ec_tag = get_ecache_tag(index, &nafsr, acc_afsr); 35597c478bd9Sstevel@tonic-gate state = (uchar_t)((ec_tag & cpu_ec_state_mask) >> 35607c478bd9Sstevel@tonic-gate cpu_ec_state_shift); 35617c478bd9Sstevel@tonic-gate 35627c478bd9Sstevel@tonic-gate /* 35637c478bd9Sstevel@tonic-gate * ETP is set try to scrub the ecache tag. 35647c478bd9Sstevel@tonic-gate */ 35657c478bd9Sstevel@tonic-gate if (nafsr & P_AFSR_ETP) { 35667c478bd9Sstevel@tonic-gate ecache_scrub_tag_err(nafsr, state, index); 35677c478bd9Sstevel@tonic-gate } else if (state & cpu_ec_state_valid) { 35687c478bd9Sstevel@tonic-gate /* 35697c478bd9Sstevel@tonic-gate * ETP is not set, E$ tag is valid. 35707c478bd9Sstevel@tonic-gate * Proceed with the E$ scrubbing. 35717c478bd9Sstevel@tonic-gate */ 35727c478bd9Sstevel@tonic-gate if (state & cpu_ec_state_dirty) 35737c478bd9Sstevel@tonic-gate mpb |= ECACHE_STATE_MODIFIED; 35747c478bd9Sstevel@tonic-gate 35757c478bd9Sstevel@tonic-gate tafsr = check_ecache_line(index, acc_afsr); 35767c478bd9Sstevel@tonic-gate 35777c478bd9Sstevel@tonic-gate if (tafsr & P_AFSR_EDP) { 35787c478bd9Sstevel@tonic-gate mpb |= ECACHE_STATE_PARITY; 35797c478bd9Sstevel@tonic-gate 35807c478bd9Sstevel@tonic-gate if (ecache_scrub_verbose || 35817c478bd9Sstevel@tonic-gate ecache_scrub_panic) { 35827c478bd9Sstevel@tonic-gate get_ecache_dtag(P2ALIGN(index, 64), 35837c478bd9Sstevel@tonic-gate (uint64_t *)&ec_data[0], 35847c478bd9Sstevel@tonic-gate &ec_tag, &oafsr, acc_afsr); 35857c478bd9Sstevel@tonic-gate } 35867c478bd9Sstevel@tonic-gate } 35877c478bd9Sstevel@tonic-gate 35887c478bd9Sstevel@tonic-gate if (ssmp->ecache_busy) 35897c478bd9Sstevel@tonic-gate mpb |= ECACHE_STATE_BUSY; 35907c478bd9Sstevel@tonic-gate 35917c478bd9Sstevel@tonic-gate ec_knp = (kstat_named_t *)ec_ksp + mpb; 35927c478bd9Sstevel@tonic-gate ec_knp->value.ul++; 35937c478bd9Sstevel@tonic-gate 35947c478bd9Sstevel@tonic-gate paddr = ((ec_tag & cpu_ec_tag_mask) << 35957c478bd9Sstevel@tonic-gate cpu_ec_tag_shift) | (index % ec_set_size); 35967c478bd9Sstevel@tonic-gate 35977c478bd9Sstevel@tonic-gate /* 35987c478bd9Sstevel@tonic-gate * We flush the E$ lines depending on the ec_flush, 35997c478bd9Sstevel@tonic-gate * we additionally flush clean_good_busy and 36007c478bd9Sstevel@tonic-gate * dirty_good_busy lines for mirrored E$. 36017c478bd9Sstevel@tonic-gate */ 36027c478bd9Sstevel@tonic-gate if (ec_action[mpb].ec_flush == ALWAYS_FLUSH) { 36037c478bd9Sstevel@tonic-gate flushecacheline(paddr, ec_size); 36047c478bd9Sstevel@tonic-gate } else if ((ec_mirror == ECACHE_CPU_MIRROR) && 36057c478bd9Sstevel@tonic-gate (ec_action[mpb].ec_flush == MIRROR_FLUSH)) { 36067c478bd9Sstevel@tonic-gate flushecacheline(paddr, ec_size); 36077c478bd9Sstevel@tonic-gate } else if (ec_action[mpb].ec_flush == NEVER_FLUSH) { 36087c478bd9Sstevel@tonic-gate softcall(ecache_page_retire, (void *)paddr); 36097c478bd9Sstevel@tonic-gate } 36107c478bd9Sstevel@tonic-gate 36117c478bd9Sstevel@tonic-gate /* 36127c478bd9Sstevel@tonic-gate * Conditionally flush both the clean_good and 36137c478bd9Sstevel@tonic-gate * dirty_good lines when busy. 36147c478bd9Sstevel@tonic-gate */ 36157c478bd9Sstevel@tonic-gate if (CGB(mpb, ec_mirror) && (flush_clean_busy > 0)) { 36167c478bd9Sstevel@tonic-gate flush_clean_busy--; 36177c478bd9Sstevel@tonic-gate flushecacheline(paddr, ec_size); 36187c478bd9Sstevel@tonic-gate ec_ksp->clean_good_busy_flush.value.ul++; 36197c478bd9Sstevel@tonic-gate } else if (DGB(mpb, ec_mirror) && 36207c478bd9Sstevel@tonic-gate (flush_dirty_busy > 0)) { 36217c478bd9Sstevel@tonic-gate flush_dirty_busy--; 36227c478bd9Sstevel@tonic-gate flushecacheline(paddr, ec_size); 36237c478bd9Sstevel@tonic-gate ec_ksp->dirty_good_busy_flush.value.ul++; 36247c478bd9Sstevel@tonic-gate } 36257c478bd9Sstevel@tonic-gate 36267c478bd9Sstevel@tonic-gate if (ec_action[mpb].ec_log && (ecache_scrub_verbose || 36277c478bd9Sstevel@tonic-gate ecache_scrub_panic)) { 36287c478bd9Sstevel@tonic-gate ecache_scrub_log(ec_data, ec_tag, paddr, mpb, 36297c478bd9Sstevel@tonic-gate tafsr); 36307c478bd9Sstevel@tonic-gate } 36317c478bd9Sstevel@tonic-gate 36327c478bd9Sstevel@tonic-gate } else { 36337c478bd9Sstevel@tonic-gate ec_ksp->invalid_lines.value.ul++; 36347c478bd9Sstevel@tonic-gate } 36357c478bd9Sstevel@tonic-gate 36367c478bd9Sstevel@tonic-gate if ((index += ec_linesize) >= ec_size) 36377c478bd9Sstevel@tonic-gate index = 0; 36387c478bd9Sstevel@tonic-gate 36397c478bd9Sstevel@tonic-gate } 36407c478bd9Sstevel@tonic-gate 36417c478bd9Sstevel@tonic-gate /* 36427c478bd9Sstevel@tonic-gate * set the ecache scrub index for the next time around 36437c478bd9Sstevel@tonic-gate */ 36447c478bd9Sstevel@tonic-gate ssmp->ecache_flush_index = index; 36457c478bd9Sstevel@tonic-gate 36467c478bd9Sstevel@tonic-gate if (*acc_afsr & P_AFSR_CP) { 36477c478bd9Sstevel@tonic-gate uint64_t ret_afsr; 36487c478bd9Sstevel@tonic-gate 36497c478bd9Sstevel@tonic-gate ret_afsr = ecache_scrub_misc_err(CPU_ORPHAN_CP_ERR, *acc_afsr); 36507c478bd9Sstevel@tonic-gate if ((ret_afsr & P_AFSR_CP) == 0) 36517c478bd9Sstevel@tonic-gate *acc_afsr = 0; 36527c478bd9Sstevel@tonic-gate } 36537c478bd9Sstevel@tonic-gate } 36547c478bd9Sstevel@tonic-gate 36557c478bd9Sstevel@tonic-gate /* 36567c478bd9Sstevel@tonic-gate * Handler for ecache_scrub_inum softint. Call scrub_ecache_line until 36577c478bd9Sstevel@tonic-gate * we decrement the outstanding request count to zero. 36587c478bd9Sstevel@tonic-gate */ 36597c478bd9Sstevel@tonic-gate 36607c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 36617c478bd9Sstevel@tonic-gate uint_t 36627c478bd9Sstevel@tonic-gate scrub_ecache_line_intr(caddr_t arg1, caddr_t arg2) 36637c478bd9Sstevel@tonic-gate { 36647c478bd9Sstevel@tonic-gate int i; 36657c478bd9Sstevel@tonic-gate int outstanding; 36667c478bd9Sstevel@tonic-gate spitfire_scrub_misc_t *ssmp = CPU_PRIVATE_PTR(CPU, sfpr_scrub_misc); 36677c478bd9Sstevel@tonic-gate uint32_t *countp = &ssmp->ec_scrub_outstanding; 36687c478bd9Sstevel@tonic-gate 36697c478bd9Sstevel@tonic-gate do { 36707c478bd9Sstevel@tonic-gate outstanding = *countp; 36717c478bd9Sstevel@tonic-gate ASSERT(outstanding > 0); 36727c478bd9Sstevel@tonic-gate for (i = 0; i < outstanding; i++) 36737c478bd9Sstevel@tonic-gate scrub_ecache_line(); 36747c478bd9Sstevel@tonic-gate } while (atomic_add_32_nv(countp, -outstanding)); 36757c478bd9Sstevel@tonic-gate 36767c478bd9Sstevel@tonic-gate return (DDI_INTR_CLAIMED); 36777c478bd9Sstevel@tonic-gate } 36787c478bd9Sstevel@tonic-gate 36797c478bd9Sstevel@tonic-gate /* 36807c478bd9Sstevel@tonic-gate * force each cpu to perform an ecache scrub, called from a timeout 36817c478bd9Sstevel@tonic-gate */ 36827c478bd9Sstevel@tonic-gate extern xcfunc_t ecache_scrubreq_tl1; 36837c478bd9Sstevel@tonic-gate 36847c478bd9Sstevel@tonic-gate void 36857c478bd9Sstevel@tonic-gate do_scrub_ecache_line(void) 36867c478bd9Sstevel@tonic-gate { 36877c478bd9Sstevel@tonic-gate long delta; 36887c478bd9Sstevel@tonic-gate 36897c478bd9Sstevel@tonic-gate if (ecache_calls_a_sec > hz) 36907c478bd9Sstevel@tonic-gate ecache_calls_a_sec = hz; 36917c478bd9Sstevel@tonic-gate else if (ecache_calls_a_sec <= 0) 36927c478bd9Sstevel@tonic-gate ecache_calls_a_sec = 1; 36937c478bd9Sstevel@tonic-gate 36947c478bd9Sstevel@tonic-gate if (ecache_calls_a_sec_mirrored > hz) 36957c478bd9Sstevel@tonic-gate ecache_calls_a_sec_mirrored = hz; 36967c478bd9Sstevel@tonic-gate else if (ecache_calls_a_sec_mirrored <= 0) 36977c478bd9Sstevel@tonic-gate ecache_calls_a_sec_mirrored = 1; 36987c478bd9Sstevel@tonic-gate 36997c478bd9Sstevel@tonic-gate if (ecache_scrub_enable) { 37007c478bd9Sstevel@tonic-gate xt_all(ecache_scrubreq_tl1, ecache_scrub_inum, 0); 37017c478bd9Sstevel@tonic-gate delta = hz / ec_timeout_calls; 37027c478bd9Sstevel@tonic-gate } else { 37037c478bd9Sstevel@tonic-gate delta = hz; 37047c478bd9Sstevel@tonic-gate } 37057c478bd9Sstevel@tonic-gate 37067c478bd9Sstevel@tonic-gate (void) realtime_timeout((void(*)(void *))do_scrub_ecache_line, 0, 37077c478bd9Sstevel@tonic-gate delta); 37087c478bd9Sstevel@tonic-gate } 37097c478bd9Sstevel@tonic-gate 37107c478bd9Sstevel@tonic-gate /* 37117c478bd9Sstevel@tonic-gate * initialization for ecache scrubbing 37127c478bd9Sstevel@tonic-gate * This routine is called AFTER all cpus have had cpu_init_private called 37137c478bd9Sstevel@tonic-gate * to initialize their private data areas. 37147c478bd9Sstevel@tonic-gate */ 37157c478bd9Sstevel@tonic-gate void 37167c478bd9Sstevel@tonic-gate cpu_init_cache_scrub(void) 37177c478bd9Sstevel@tonic-gate { 37187c478bd9Sstevel@tonic-gate if (ecache_calls_a_sec > hz) { 37197c478bd9Sstevel@tonic-gate cmn_err(CE_NOTE, "ecache_calls_a_sec set too high (%d); " 37207c478bd9Sstevel@tonic-gate "resetting to hz (%d)", ecache_calls_a_sec, hz); 37217c478bd9Sstevel@tonic-gate ecache_calls_a_sec = hz; 37227c478bd9Sstevel@tonic-gate } 37237c478bd9Sstevel@tonic-gate 37247c478bd9Sstevel@tonic-gate /* 37257c478bd9Sstevel@tonic-gate * Register softint for ecache scrubbing. 37267c478bd9Sstevel@tonic-gate */ 37277c478bd9Sstevel@tonic-gate ecache_scrub_inum = add_softintr(ecache_scrub_pil, 37287c478bd9Sstevel@tonic-gate scrub_ecache_line_intr, NULL); 37297c478bd9Sstevel@tonic-gate 37307c478bd9Sstevel@tonic-gate /* 37317c478bd9Sstevel@tonic-gate * kick off the scrubbing using realtime timeout 37327c478bd9Sstevel@tonic-gate */ 37337c478bd9Sstevel@tonic-gate (void) realtime_timeout((void(*)(void *))do_scrub_ecache_line, 0, 37347c478bd9Sstevel@tonic-gate hz / ecache_calls_a_sec); 37357c478bd9Sstevel@tonic-gate } 37367c478bd9Sstevel@tonic-gate 37377c478bd9Sstevel@tonic-gate /* 37387c478bd9Sstevel@tonic-gate * Unset the busy flag for this cpu. 37397c478bd9Sstevel@tonic-gate */ 37407c478bd9Sstevel@tonic-gate void 37417c478bd9Sstevel@tonic-gate cpu_idle_ecache_scrub(struct cpu *cp) 37427c478bd9Sstevel@tonic-gate { 37437c478bd9Sstevel@tonic-gate if (CPU_PRIVATE(cp) != NULL) { 37447c478bd9Sstevel@tonic-gate spitfire_scrub_misc_t *ssmp = CPU_PRIVATE_PTR(cp, 37457c478bd9Sstevel@tonic-gate sfpr_scrub_misc); 37467c478bd9Sstevel@tonic-gate ssmp->ecache_busy = ECACHE_CPU_IDLE; 37477c478bd9Sstevel@tonic-gate } 37487c478bd9Sstevel@tonic-gate } 37497c478bd9Sstevel@tonic-gate 37507c478bd9Sstevel@tonic-gate /* 37517c478bd9Sstevel@tonic-gate * Set the busy flag for this cpu. 37527c478bd9Sstevel@tonic-gate */ 37537c478bd9Sstevel@tonic-gate void 37547c478bd9Sstevel@tonic-gate cpu_busy_ecache_scrub(struct cpu *cp) 37557c478bd9Sstevel@tonic-gate { 37567c478bd9Sstevel@tonic-gate if (CPU_PRIVATE(cp) != NULL) { 37577c478bd9Sstevel@tonic-gate spitfire_scrub_misc_t *ssmp = CPU_PRIVATE_PTR(cp, 37587c478bd9Sstevel@tonic-gate sfpr_scrub_misc); 37597c478bd9Sstevel@tonic-gate ssmp->ecache_busy = ECACHE_CPU_BUSY; 37607c478bd9Sstevel@tonic-gate } 37617c478bd9Sstevel@tonic-gate } 37627c478bd9Sstevel@tonic-gate 37637c478bd9Sstevel@tonic-gate /* 37647c478bd9Sstevel@tonic-gate * initialize the ecache scrubber data structures 37657c478bd9Sstevel@tonic-gate * The global entry point cpu_init_private replaces this entry point. 37667c478bd9Sstevel@tonic-gate * 37677c478bd9Sstevel@tonic-gate */ 37687c478bd9Sstevel@tonic-gate static void 37697c478bd9Sstevel@tonic-gate cpu_init_ecache_scrub_dr(struct cpu *cp) 37707c478bd9Sstevel@tonic-gate { 37717c478bd9Sstevel@tonic-gate spitfire_scrub_misc_t *ssmp = CPU_PRIVATE_PTR(cp, sfpr_scrub_misc); 37727c478bd9Sstevel@tonic-gate int cpuid = cp->cpu_id; 37737c478bd9Sstevel@tonic-gate 37747c478bd9Sstevel@tonic-gate /* 37757c478bd9Sstevel@tonic-gate * intialize bookkeeping for cache scrubbing 37767c478bd9Sstevel@tonic-gate */ 37777c478bd9Sstevel@tonic-gate bzero(ssmp, sizeof (spitfire_scrub_misc_t)); 37787c478bd9Sstevel@tonic-gate 37797c478bd9Sstevel@tonic-gate ssmp->ecache_flush_index = 0; 37807c478bd9Sstevel@tonic-gate 37817c478bd9Sstevel@tonic-gate ssmp->ecache_nlines = 37827c478bd9Sstevel@tonic-gate cpunodes[cpuid].ecache_size / cpunodes[cpuid].ecache_linesize; 37837c478bd9Sstevel@tonic-gate 37847c478bd9Sstevel@tonic-gate /* 37857c478bd9Sstevel@tonic-gate * Determine whether we are running on mirrored SRAM 37867c478bd9Sstevel@tonic-gate */ 37877c478bd9Sstevel@tonic-gate 37887c478bd9Sstevel@tonic-gate if (cpunodes[cpuid].msram == ECACHE_CPU_MIRROR) 37897c478bd9Sstevel@tonic-gate ssmp->ecache_mirror = ECACHE_CPU_MIRROR; 37907c478bd9Sstevel@tonic-gate else 37917c478bd9Sstevel@tonic-gate ssmp->ecache_mirror = ECACHE_CPU_NON_MIRROR; 37927c478bd9Sstevel@tonic-gate 37937c478bd9Sstevel@tonic-gate cpu_busy_ecache_scrub(cp); 37947c478bd9Sstevel@tonic-gate 37957c478bd9Sstevel@tonic-gate /* 37967c478bd9Sstevel@tonic-gate * initialize the kstats 37977c478bd9Sstevel@tonic-gate */ 37987c478bd9Sstevel@tonic-gate ecache_kstat_init(cp); 37997c478bd9Sstevel@tonic-gate } 38007c478bd9Sstevel@tonic-gate 38017c478bd9Sstevel@tonic-gate /* 38027c478bd9Sstevel@tonic-gate * uninitialize the ecache scrubber data structures 38037c478bd9Sstevel@tonic-gate * The global entry point cpu_uninit_private replaces this entry point. 38047c478bd9Sstevel@tonic-gate */ 38057c478bd9Sstevel@tonic-gate static void 38067c478bd9Sstevel@tonic-gate cpu_uninit_ecache_scrub_dr(struct cpu *cp) 38077c478bd9Sstevel@tonic-gate { 38087c478bd9Sstevel@tonic-gate spitfire_scrub_misc_t *ssmp = CPU_PRIVATE_PTR(cp, sfpr_scrub_misc); 38097c478bd9Sstevel@tonic-gate 38107c478bd9Sstevel@tonic-gate if (ssmp->ecache_ksp != NULL) { 38117c478bd9Sstevel@tonic-gate kstat_delete(ssmp->ecache_ksp); 38127c478bd9Sstevel@tonic-gate ssmp->ecache_ksp = NULL; 38137c478bd9Sstevel@tonic-gate } 38147c478bd9Sstevel@tonic-gate 38157c478bd9Sstevel@tonic-gate /* 38167c478bd9Sstevel@tonic-gate * un-initialize bookkeeping for cache scrubbing 38177c478bd9Sstevel@tonic-gate */ 38187c478bd9Sstevel@tonic-gate bzero(ssmp, sizeof (spitfire_scrub_misc_t)); 38197c478bd9Sstevel@tonic-gate 38207c478bd9Sstevel@tonic-gate cpu_idle_ecache_scrub(cp); 38217c478bd9Sstevel@tonic-gate } 38227c478bd9Sstevel@tonic-gate 38237c478bd9Sstevel@tonic-gate struct kmem_cache *sf_private_cache; 38247c478bd9Sstevel@tonic-gate 38257c478bd9Sstevel@tonic-gate /* 38267c478bd9Sstevel@tonic-gate * Cpu private initialization. This includes allocating the cpu_private 38277c478bd9Sstevel@tonic-gate * data structure, initializing it, and initializing the scrubber for this 38287c478bd9Sstevel@tonic-gate * cpu. This is called once for EVERY cpu, including CPU 0. This function 38297c478bd9Sstevel@tonic-gate * calls cpu_init_ecache_scrub_dr to init the scrubber. 38307c478bd9Sstevel@tonic-gate * We use kmem_cache_create for the spitfire private data structure because it 38317c478bd9Sstevel@tonic-gate * needs to be allocated on a S_ECACHE_MAX_LSIZE (64) byte boundary. 38327c478bd9Sstevel@tonic-gate */ 38337c478bd9Sstevel@tonic-gate void 38347c478bd9Sstevel@tonic-gate cpu_init_private(struct cpu *cp) 38357c478bd9Sstevel@tonic-gate { 38367c478bd9Sstevel@tonic-gate spitfire_private_t *sfprp; 38377c478bd9Sstevel@tonic-gate 38387c478bd9Sstevel@tonic-gate ASSERT(CPU_PRIVATE(cp) == NULL); 38397c478bd9Sstevel@tonic-gate 38407c478bd9Sstevel@tonic-gate /* 38417c478bd9Sstevel@tonic-gate * If the sf_private_cache has not been created, create it. 38427c478bd9Sstevel@tonic-gate */ 38437c478bd9Sstevel@tonic-gate if (sf_private_cache == NULL) { 38447c478bd9Sstevel@tonic-gate sf_private_cache = kmem_cache_create("sf_private_cache", 38457c478bd9Sstevel@tonic-gate sizeof (spitfire_private_t), S_ECACHE_MAX_LSIZE, NULL, 38467c478bd9Sstevel@tonic-gate NULL, NULL, NULL, NULL, 0); 38477c478bd9Sstevel@tonic-gate ASSERT(sf_private_cache); 38487c478bd9Sstevel@tonic-gate } 38497c478bd9Sstevel@tonic-gate 38507c478bd9Sstevel@tonic-gate sfprp = CPU_PRIVATE(cp) = kmem_cache_alloc(sf_private_cache, KM_SLEEP); 38517c478bd9Sstevel@tonic-gate 38527c478bd9Sstevel@tonic-gate bzero(sfprp, sizeof (spitfire_private_t)); 38537c478bd9Sstevel@tonic-gate 38547c478bd9Sstevel@tonic-gate cpu_init_ecache_scrub_dr(cp); 38557c478bd9Sstevel@tonic-gate } 38567c478bd9Sstevel@tonic-gate 38577c478bd9Sstevel@tonic-gate /* 38587c478bd9Sstevel@tonic-gate * Cpu private unitialization. Uninitialize the Ecache scrubber and 38597c478bd9Sstevel@tonic-gate * deallocate the scrubber data structures and cpu_private data structure. 38607c478bd9Sstevel@tonic-gate * For now, this function just calls cpu_unint_ecache_scrub_dr to uninit 38617c478bd9Sstevel@tonic-gate * the scrubber for the specified cpu. 38627c478bd9Sstevel@tonic-gate */ 38637c478bd9Sstevel@tonic-gate void 38647c478bd9Sstevel@tonic-gate cpu_uninit_private(struct cpu *cp) 38657c478bd9Sstevel@tonic-gate { 38667c478bd9Sstevel@tonic-gate ASSERT(CPU_PRIVATE(cp)); 38677c478bd9Sstevel@tonic-gate 38687c478bd9Sstevel@tonic-gate cpu_uninit_ecache_scrub_dr(cp); 38697c478bd9Sstevel@tonic-gate kmem_cache_free(sf_private_cache, CPU_PRIVATE(cp)); 38707c478bd9Sstevel@tonic-gate CPU_PRIVATE(cp) = NULL; 38717c478bd9Sstevel@tonic-gate } 38727c478bd9Sstevel@tonic-gate 38737c478bd9Sstevel@tonic-gate /* 38747c478bd9Sstevel@tonic-gate * initialize the ecache kstats for each cpu 38757c478bd9Sstevel@tonic-gate */ 38767c478bd9Sstevel@tonic-gate static void 38777c478bd9Sstevel@tonic-gate ecache_kstat_init(struct cpu *cp) 38787c478bd9Sstevel@tonic-gate { 38797c478bd9Sstevel@tonic-gate struct kstat *ksp; 38807c478bd9Sstevel@tonic-gate spitfire_scrub_misc_t *ssmp = CPU_PRIVATE_PTR(cp, sfpr_scrub_misc); 38817c478bd9Sstevel@tonic-gate 38827c478bd9Sstevel@tonic-gate ASSERT(ssmp != NULL); 38837c478bd9Sstevel@tonic-gate 38847c478bd9Sstevel@tonic-gate if ((ksp = kstat_create("unix", cp->cpu_id, "ecache_kstat", "misc", 38857c478bd9Sstevel@tonic-gate KSTAT_TYPE_NAMED, 38867c478bd9Sstevel@tonic-gate sizeof (ecache_kstat_t) / sizeof (kstat_named_t), 38877c478bd9Sstevel@tonic-gate KSTAT_FLAG_WRITABLE)) == NULL) { 38887c478bd9Sstevel@tonic-gate ssmp->ecache_ksp = NULL; 38897c478bd9Sstevel@tonic-gate cmn_err(CE_NOTE, "!ecache_kstat_init(%d) failed\n", cp->cpu_id); 38907c478bd9Sstevel@tonic-gate return; 38917c478bd9Sstevel@tonic-gate } 38927c478bd9Sstevel@tonic-gate 38937c478bd9Sstevel@tonic-gate ssmp->ecache_ksp = ksp; 38947c478bd9Sstevel@tonic-gate bcopy(&ec_kstat_template, ksp->ks_data, sizeof (ecache_kstat_t)); 38957c478bd9Sstevel@tonic-gate kstat_install(ksp); 38967c478bd9Sstevel@tonic-gate } 38977c478bd9Sstevel@tonic-gate 38987c478bd9Sstevel@tonic-gate /* 38997c478bd9Sstevel@tonic-gate * log the bad ecache information 39007c478bd9Sstevel@tonic-gate */ 39017c478bd9Sstevel@tonic-gate static void 39027c478bd9Sstevel@tonic-gate ecache_scrub_log(ec_data_t *ec_data, uint64_t ec_tag, uint64_t paddr, int mpb, 39037c478bd9Sstevel@tonic-gate uint64_t afsr) 39047c478bd9Sstevel@tonic-gate { 39057c478bd9Sstevel@tonic-gate spitf_async_flt spf_flt; 39067c478bd9Sstevel@tonic-gate struct async_flt *aflt; 39077c478bd9Sstevel@tonic-gate int i; 39087c478bd9Sstevel@tonic-gate char *class; 39097c478bd9Sstevel@tonic-gate 39107c478bd9Sstevel@tonic-gate bzero(&spf_flt, sizeof (spitf_async_flt)); 39117c478bd9Sstevel@tonic-gate aflt = &spf_flt.cmn_asyncflt; 39127c478bd9Sstevel@tonic-gate 39137c478bd9Sstevel@tonic-gate for (i = 0; i < 8; i++) { 39147c478bd9Sstevel@tonic-gate spf_flt.flt_ec_data[i] = ec_data[i]; 39157c478bd9Sstevel@tonic-gate } 39167c478bd9Sstevel@tonic-gate 39177c478bd9Sstevel@tonic-gate spf_flt.flt_ec_tag = ec_tag; 39187c478bd9Sstevel@tonic-gate 39197c478bd9Sstevel@tonic-gate if (mpb < (sizeof (ec_action) / sizeof (ec_action[0]))) { 39207c478bd9Sstevel@tonic-gate spf_flt.flt_type = ec_action[mpb].ec_log_type; 39217c478bd9Sstevel@tonic-gate } else spf_flt.flt_type = (ushort_t)mpb; 39227c478bd9Sstevel@tonic-gate 39237c478bd9Sstevel@tonic-gate aflt->flt_inst = CPU->cpu_id; 39247c478bd9Sstevel@tonic-gate aflt->flt_class = CPU_FAULT; 39257c478bd9Sstevel@tonic-gate aflt->flt_id = gethrtime_waitfree(); 39267c478bd9Sstevel@tonic-gate aflt->flt_addr = paddr; 39277c478bd9Sstevel@tonic-gate aflt->flt_stat = afsr; 39287c478bd9Sstevel@tonic-gate aflt->flt_panic = (uchar_t)ecache_scrub_panic; 39297c478bd9Sstevel@tonic-gate 39307c478bd9Sstevel@tonic-gate switch (mpb) { 39317c478bd9Sstevel@tonic-gate case CPU_ECACHE_TAG_ERR: 39327c478bd9Sstevel@tonic-gate case CPU_ECACHE_ADDR_PAR_ERR: 39337c478bd9Sstevel@tonic-gate case CPU_ECACHE_ETP_ETS_ERR: 39347c478bd9Sstevel@tonic-gate case CPU_ECACHE_STATE_ERR: 39357c478bd9Sstevel@tonic-gate class = FM_EREPORT_CPU_USII_ESCRUB_TAG; 39367c478bd9Sstevel@tonic-gate break; 39377c478bd9Sstevel@tonic-gate default: 39387c478bd9Sstevel@tonic-gate class = FM_EREPORT_CPU_USII_ESCRUB_DATA; 39397c478bd9Sstevel@tonic-gate break; 39407c478bd9Sstevel@tonic-gate } 39417c478bd9Sstevel@tonic-gate 39427c478bd9Sstevel@tonic-gate cpu_errorq_dispatch(class, (void *)&spf_flt, sizeof (spf_flt), 39437c478bd9Sstevel@tonic-gate ue_queue, aflt->flt_panic); 39447c478bd9Sstevel@tonic-gate 39457c478bd9Sstevel@tonic-gate if (aflt->flt_panic) 39467c478bd9Sstevel@tonic-gate cmn_err(CE_PANIC, "ecache_scrub_panic set and bad E$" 39477c478bd9Sstevel@tonic-gate "line detected"); 39487c478bd9Sstevel@tonic-gate } 39497c478bd9Sstevel@tonic-gate 39507c478bd9Sstevel@tonic-gate /* 39517c478bd9Sstevel@tonic-gate * Process an ecache error that occured during the E$ scrubbing. 39527c478bd9Sstevel@tonic-gate * We do the ecache scan to find the bad line, flush the bad line 39537c478bd9Sstevel@tonic-gate * and start the memscrubber to find any UE (in memory or in another cache) 39547c478bd9Sstevel@tonic-gate */ 39557c478bd9Sstevel@tonic-gate static uint64_t 39567c478bd9Sstevel@tonic-gate ecache_scrub_misc_err(int type, uint64_t afsr) 39577c478bd9Sstevel@tonic-gate { 39587c478bd9Sstevel@tonic-gate spitf_async_flt spf_flt; 39597c478bd9Sstevel@tonic-gate struct async_flt *aflt; 39607c478bd9Sstevel@tonic-gate uint64_t oafsr; 39617c478bd9Sstevel@tonic-gate 39627c478bd9Sstevel@tonic-gate bzero(&spf_flt, sizeof (spitf_async_flt)); 39637c478bd9Sstevel@tonic-gate aflt = &spf_flt.cmn_asyncflt; 39647c478bd9Sstevel@tonic-gate 39657c478bd9Sstevel@tonic-gate /* 39667c478bd9Sstevel@tonic-gate * Scan each line in the cache to look for the one 39677c478bd9Sstevel@tonic-gate * with bad parity 39687c478bd9Sstevel@tonic-gate */ 39697c478bd9Sstevel@tonic-gate aflt->flt_addr = AFLT_INV_ADDR; 39707c478bd9Sstevel@tonic-gate scan_ecache(&aflt->flt_addr, &spf_flt.flt_ec_data[0], 39717c478bd9Sstevel@tonic-gate &spf_flt.flt_ec_tag, &spf_flt.flt_ec_lcnt, &oafsr); 39727c478bd9Sstevel@tonic-gate 39737c478bd9Sstevel@tonic-gate if (oafsr & P_AFSR_CP) { 39747c478bd9Sstevel@tonic-gate uint64_t *cp_afsr = CPU_PRIVATE_PTR(CPU, sfpr_scrub_afsr); 39757c478bd9Sstevel@tonic-gate *cp_afsr |= oafsr; 39767c478bd9Sstevel@tonic-gate } 39777c478bd9Sstevel@tonic-gate 39787c478bd9Sstevel@tonic-gate /* 39797c478bd9Sstevel@tonic-gate * If we found a bad PA, update the state to indicate if it is 39807c478bd9Sstevel@tonic-gate * memory or I/O space. 39817c478bd9Sstevel@tonic-gate */ 39827c478bd9Sstevel@tonic-gate if (aflt->flt_addr != AFLT_INV_ADDR) { 39837c478bd9Sstevel@tonic-gate aflt->flt_in_memory = (pf_is_memory(aflt->flt_addr >> 39847c478bd9Sstevel@tonic-gate MMU_PAGESHIFT)) ? 1 : 0; 39857c478bd9Sstevel@tonic-gate } 39867c478bd9Sstevel@tonic-gate 39877c478bd9Sstevel@tonic-gate spf_flt.flt_type = (ushort_t)type; 39887c478bd9Sstevel@tonic-gate 39897c478bd9Sstevel@tonic-gate aflt->flt_inst = CPU->cpu_id; 39907c478bd9Sstevel@tonic-gate aflt->flt_class = CPU_FAULT; 39917c478bd9Sstevel@tonic-gate aflt->flt_id = gethrtime_waitfree(); 39927c478bd9Sstevel@tonic-gate aflt->flt_status = afsr; 39937c478bd9Sstevel@tonic-gate aflt->flt_panic = (uchar_t)ecache_scrub_panic; 39947c478bd9Sstevel@tonic-gate 39957c478bd9Sstevel@tonic-gate /* 39967c478bd9Sstevel@tonic-gate * We have the bad line, flush that line and start 39977c478bd9Sstevel@tonic-gate * the memscrubber. 39987c478bd9Sstevel@tonic-gate */ 39997c478bd9Sstevel@tonic-gate if (spf_flt.flt_ec_lcnt > 0) { 40007c478bd9Sstevel@tonic-gate flushecacheline(P2ALIGN(aflt->flt_addr, 64), 40017c478bd9Sstevel@tonic-gate cpunodes[CPU->cpu_id].ecache_size); 40027c478bd9Sstevel@tonic-gate read_all_memscrub = 1; 40037c478bd9Sstevel@tonic-gate memscrub_run(); 40047c478bd9Sstevel@tonic-gate } 40057c478bd9Sstevel@tonic-gate 40067c478bd9Sstevel@tonic-gate cpu_errorq_dispatch((type == CPU_ORPHAN_CP_ERR) ? 40077c478bd9Sstevel@tonic-gate FM_EREPORT_CPU_USII_CP : FM_EREPORT_CPU_USII_UNKNOWN, 40087c478bd9Sstevel@tonic-gate (void *)&spf_flt, sizeof (spf_flt), ue_queue, aflt->flt_panic); 40097c478bd9Sstevel@tonic-gate 40107c478bd9Sstevel@tonic-gate return (oafsr); 40117c478bd9Sstevel@tonic-gate } 40127c478bd9Sstevel@tonic-gate 40137c478bd9Sstevel@tonic-gate static void 40147c478bd9Sstevel@tonic-gate ecache_scrub_tag_err(uint64_t afsr, uchar_t state, uint32_t index) 40157c478bd9Sstevel@tonic-gate { 40167c478bd9Sstevel@tonic-gate ushort_t afsr_ets = (afsr & P_AFSR_ETS) >> P_AFSR_ETS_SHIFT; 40177c478bd9Sstevel@tonic-gate spitfire_scrub_misc_t *ssmp = CPU_PRIVATE_PTR(CPU, sfpr_scrub_misc); 40187c478bd9Sstevel@tonic-gate ecache_kstat_t *ec_ksp = (ecache_kstat_t *)ssmp->ecache_ksp->ks_data; 40197c478bd9Sstevel@tonic-gate uint64_t ec_tag, paddr, oafsr; 40207c478bd9Sstevel@tonic-gate ec_data_t ec_data[8]; 40217c478bd9Sstevel@tonic-gate int cpuid = CPU->cpu_id; 40227c478bd9Sstevel@tonic-gate uint32_t ec_set_size = cpunodes[cpuid].ecache_size / 40237c478bd9Sstevel@tonic-gate ecache_associativity; 40247c478bd9Sstevel@tonic-gate uint64_t *cpu_afsr = CPU_PRIVATE_PTR(CPU, sfpr_scrub_afsr); 40257c478bd9Sstevel@tonic-gate 40267c478bd9Sstevel@tonic-gate get_ecache_dtag(P2ALIGN(index, 64), (uint64_t *)&ec_data[0], &ec_tag, 40277c478bd9Sstevel@tonic-gate &oafsr, cpu_afsr); 40287c478bd9Sstevel@tonic-gate paddr = ((ec_tag & cpu_ec_tag_mask) << cpu_ec_tag_shift) | 40297c478bd9Sstevel@tonic-gate (index % ec_set_size); 40307c478bd9Sstevel@tonic-gate 40317c478bd9Sstevel@tonic-gate /* 40327c478bd9Sstevel@tonic-gate * E$ tag state has good parity 40337c478bd9Sstevel@tonic-gate */ 40347c478bd9Sstevel@tonic-gate if ((afsr_ets & cpu_ec_state_parity) == 0) { 40357c478bd9Sstevel@tonic-gate if (afsr_ets & cpu_ec_parity) { 40367c478bd9Sstevel@tonic-gate /* 40377c478bd9Sstevel@tonic-gate * E$ tag state bits indicate the line is clean, 40387c478bd9Sstevel@tonic-gate * invalidate the E$ tag and continue. 40397c478bd9Sstevel@tonic-gate */ 40407c478bd9Sstevel@tonic-gate if (!(state & cpu_ec_state_dirty)) { 40417c478bd9Sstevel@tonic-gate /* 40427c478bd9Sstevel@tonic-gate * Zero the tag and mark the state invalid 40437c478bd9Sstevel@tonic-gate * with good parity for the tag. 40447c478bd9Sstevel@tonic-gate */ 40457c478bd9Sstevel@tonic-gate if (isus2i || isus2e) 40467c478bd9Sstevel@tonic-gate write_hb_ec_tag_parity(index); 40477c478bd9Sstevel@tonic-gate else 40487c478bd9Sstevel@tonic-gate write_ec_tag_parity(index); 40497c478bd9Sstevel@tonic-gate 40507c478bd9Sstevel@tonic-gate /* Sync with the dual tag */ 40517c478bd9Sstevel@tonic-gate flushecacheline(0, 40527c478bd9Sstevel@tonic-gate cpunodes[CPU->cpu_id].ecache_size); 40537c478bd9Sstevel@tonic-gate ec_ksp->tags_cleared.value.ul++; 40547c478bd9Sstevel@tonic-gate ecache_scrub_log(ec_data, ec_tag, paddr, 40557c478bd9Sstevel@tonic-gate CPU_ECACHE_TAG_ERR, afsr); 40567c478bd9Sstevel@tonic-gate return; 40577c478bd9Sstevel@tonic-gate } else { 40587c478bd9Sstevel@tonic-gate ecache_scrub_log(ec_data, ec_tag, paddr, 40597c478bd9Sstevel@tonic-gate CPU_ECACHE_ADDR_PAR_ERR, afsr); 40607c478bd9Sstevel@tonic-gate cmn_err(CE_PANIC, " E$ tag address has bad" 40617c478bd9Sstevel@tonic-gate " parity"); 40627c478bd9Sstevel@tonic-gate } 40637c478bd9Sstevel@tonic-gate } else if ((afsr_ets & cpu_ec_parity) == 0) { 40647c478bd9Sstevel@tonic-gate /* 40657c478bd9Sstevel@tonic-gate * ETS is zero but ETP is set 40667c478bd9Sstevel@tonic-gate */ 40677c478bd9Sstevel@tonic-gate ecache_scrub_log(ec_data, ec_tag, paddr, 40687c478bd9Sstevel@tonic-gate CPU_ECACHE_ETP_ETS_ERR, afsr); 40697c478bd9Sstevel@tonic-gate cmn_err(CE_PANIC, "AFSR.ETP is set and" 40707c478bd9Sstevel@tonic-gate " AFSR.ETS is zero"); 40717c478bd9Sstevel@tonic-gate } 40727c478bd9Sstevel@tonic-gate } else { 40737c478bd9Sstevel@tonic-gate /* 40747c478bd9Sstevel@tonic-gate * E$ tag state bit has a bad parity 40757c478bd9Sstevel@tonic-gate */ 40767c478bd9Sstevel@tonic-gate ecache_scrub_log(ec_data, ec_tag, paddr, 40777c478bd9Sstevel@tonic-gate CPU_ECACHE_STATE_ERR, afsr); 40787c478bd9Sstevel@tonic-gate cmn_err(CE_PANIC, "E$ tag state has bad parity"); 40797c478bd9Sstevel@tonic-gate } 40807c478bd9Sstevel@tonic-gate } 40817c478bd9Sstevel@tonic-gate 40827c478bd9Sstevel@tonic-gate static void 40837c478bd9Sstevel@tonic-gate ecache_page_retire(void *arg) 40847c478bd9Sstevel@tonic-gate { 40857c478bd9Sstevel@tonic-gate uint64_t paddr = (uint64_t)arg; 4086db874c57Selowe (void) page_retire(paddr, PR_UE); 40877c478bd9Sstevel@tonic-gate } 40887c478bd9Sstevel@tonic-gate 40897c478bd9Sstevel@tonic-gate void 40907c478bd9Sstevel@tonic-gate sticksync_slave(void) 40917c478bd9Sstevel@tonic-gate {} 40927c478bd9Sstevel@tonic-gate 40937c478bd9Sstevel@tonic-gate void 40947c478bd9Sstevel@tonic-gate sticksync_master(void) 40957c478bd9Sstevel@tonic-gate {} 40967c478bd9Sstevel@tonic-gate 40977c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 40987c478bd9Sstevel@tonic-gate void 40997c478bd9Sstevel@tonic-gate cpu_check_ce(int flag, uint64_t pa, caddr_t va, uint_t bpp) 41007c478bd9Sstevel@tonic-gate {} 41017c478bd9Sstevel@tonic-gate 41027c478bd9Sstevel@tonic-gate void 41037c478bd9Sstevel@tonic-gate cpu_run_bus_error_handlers(struct async_flt *aflt, int expected) 41047c478bd9Sstevel@tonic-gate { 41057c478bd9Sstevel@tonic-gate int status; 41067c478bd9Sstevel@tonic-gate ddi_fm_error_t de; 41077c478bd9Sstevel@tonic-gate 41087c478bd9Sstevel@tonic-gate bzero(&de, sizeof (ddi_fm_error_t)); 41097c478bd9Sstevel@tonic-gate 41107c478bd9Sstevel@tonic-gate de.fme_ena = fm_ena_generate_cpu(aflt->flt_id, aflt->flt_inst, 41117c478bd9Sstevel@tonic-gate FM_ENA_FMT1); 41127c478bd9Sstevel@tonic-gate de.fme_flag = expected; 41137c478bd9Sstevel@tonic-gate de.fme_bus_specific = (void *)aflt->flt_addr; 41147c478bd9Sstevel@tonic-gate status = ndi_fm_handler_dispatch(ddi_root_node(), NULL, &de); 41157c478bd9Sstevel@tonic-gate 41167c478bd9Sstevel@tonic-gate if ((aflt->flt_prot == AFLT_PROT_NONE) && (status == DDI_FM_FATAL)) 41177c478bd9Sstevel@tonic-gate aflt->flt_panic = 1; 41187c478bd9Sstevel@tonic-gate } 41197c478bd9Sstevel@tonic-gate 41207c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 41217c478bd9Sstevel@tonic-gate void 41227c478bd9Sstevel@tonic-gate cpu_errorq_dispatch(char *error_class, void *payload, size_t payload_sz, 41237c478bd9Sstevel@tonic-gate errorq_t *eqp, uint_t flag) 41247c478bd9Sstevel@tonic-gate { 41257c478bd9Sstevel@tonic-gate struct async_flt *aflt = (struct async_flt *)payload; 41267c478bd9Sstevel@tonic-gate 41277c478bd9Sstevel@tonic-gate aflt->flt_erpt_class = error_class; 41287c478bd9Sstevel@tonic-gate errorq_dispatch(eqp, payload, payload_sz, flag); 41297c478bd9Sstevel@tonic-gate } 41307c478bd9Sstevel@tonic-gate 41317c478bd9Sstevel@tonic-gate #define MAX_SIMM 8 41327c478bd9Sstevel@tonic-gate 41337c478bd9Sstevel@tonic-gate struct ce_info { 41347c478bd9Sstevel@tonic-gate char name[UNUM_NAMLEN]; 41357c478bd9Sstevel@tonic-gate uint64_t intermittent_total; 41367c478bd9Sstevel@tonic-gate uint64_t persistent_total; 41377c478bd9Sstevel@tonic-gate uint64_t sticky_total; 41387c478bd9Sstevel@tonic-gate unsigned short leaky_bucket_cnt; 41397c478bd9Sstevel@tonic-gate }; 41407c478bd9Sstevel@tonic-gate 41417c478bd9Sstevel@tonic-gate /* 41427c478bd9Sstevel@tonic-gate * Separately-defined structure for use in reporting the ce_info 41437c478bd9Sstevel@tonic-gate * to SunVTS without exposing the internal layout and implementation 41447c478bd9Sstevel@tonic-gate * of struct ce_info. 41457c478bd9Sstevel@tonic-gate */ 41467c478bd9Sstevel@tonic-gate static struct ecc_error_info ecc_error_info_data = { 41477c478bd9Sstevel@tonic-gate { "version", KSTAT_DATA_UINT32 }, 41487c478bd9Sstevel@tonic-gate { "maxcount", KSTAT_DATA_UINT32 }, 41497c478bd9Sstevel@tonic-gate { "count", KSTAT_DATA_UINT32 } 41507c478bd9Sstevel@tonic-gate }; 41517c478bd9Sstevel@tonic-gate static const size_t ecc_error_info_ndata = sizeof (ecc_error_info_data) / 41527c478bd9Sstevel@tonic-gate sizeof (struct kstat_named); 41537c478bd9Sstevel@tonic-gate 41547c478bd9Sstevel@tonic-gate #if KSTAT_CE_UNUM_NAMLEN < UNUM_NAMLEN 41557c478bd9Sstevel@tonic-gate #error "Need to rev ecc_error_info version and update KSTAT_CE_UNUM_NAMLEN" 41567c478bd9Sstevel@tonic-gate #endif 41577c478bd9Sstevel@tonic-gate 41587c478bd9Sstevel@tonic-gate struct ce_info *mem_ce_simm = NULL; 41597c478bd9Sstevel@tonic-gate size_t mem_ce_simm_size = 0; 41607c478bd9Sstevel@tonic-gate 41617c478bd9Sstevel@tonic-gate /* 41627c478bd9Sstevel@tonic-gate * Default values for the number of CE's allowed per interval. 41637c478bd9Sstevel@tonic-gate * Interval is defined in minutes 41647c478bd9Sstevel@tonic-gate * SOFTERR_MIN_TIMEOUT is defined in microseconds 41657c478bd9Sstevel@tonic-gate */ 41667c478bd9Sstevel@tonic-gate #define SOFTERR_LIMIT_DEFAULT 2 41677c478bd9Sstevel@tonic-gate #define SOFTERR_INTERVAL_DEFAULT 1440 /* This is 24 hours */ 41687c478bd9Sstevel@tonic-gate #define SOFTERR_MIN_TIMEOUT (60 * MICROSEC) /* This is 1 minute */ 41697c478bd9Sstevel@tonic-gate #define TIMEOUT_NONE ((timeout_id_t)0) 41707c478bd9Sstevel@tonic-gate #define TIMEOUT_SET ((timeout_id_t)1) 41717c478bd9Sstevel@tonic-gate 41727c478bd9Sstevel@tonic-gate /* 41737c478bd9Sstevel@tonic-gate * timeout identifer for leaky_bucket 41747c478bd9Sstevel@tonic-gate */ 41757c478bd9Sstevel@tonic-gate static timeout_id_t leaky_bucket_timeout_id = TIMEOUT_NONE; 41767c478bd9Sstevel@tonic-gate 41777c478bd9Sstevel@tonic-gate /* 41787c478bd9Sstevel@tonic-gate * Tunables for maximum number of allowed CE's in a given time 41797c478bd9Sstevel@tonic-gate */ 41807c478bd9Sstevel@tonic-gate int ecc_softerr_limit = SOFTERR_LIMIT_DEFAULT; 41817c478bd9Sstevel@tonic-gate int ecc_softerr_interval = SOFTERR_INTERVAL_DEFAULT; 41827c478bd9Sstevel@tonic-gate 41837c478bd9Sstevel@tonic-gate void 41847c478bd9Sstevel@tonic-gate cpu_mp_init(void) 41857c478bd9Sstevel@tonic-gate { 41867c478bd9Sstevel@tonic-gate size_t size = cpu_aflt_size(); 41877c478bd9Sstevel@tonic-gate size_t i; 41887c478bd9Sstevel@tonic-gate kstat_t *ksp; 41897c478bd9Sstevel@tonic-gate 41907c478bd9Sstevel@tonic-gate /* 41917c478bd9Sstevel@tonic-gate * Initialize the CE error handling buffers. 41927c478bd9Sstevel@tonic-gate */ 41937c478bd9Sstevel@tonic-gate mem_ce_simm_size = MAX_SIMM * max_ncpus; 41947c478bd9Sstevel@tonic-gate size = sizeof (struct ce_info) * mem_ce_simm_size; 41957c478bd9Sstevel@tonic-gate mem_ce_simm = kmem_zalloc(size, KM_SLEEP); 41967c478bd9Sstevel@tonic-gate 41977c478bd9Sstevel@tonic-gate ksp = kstat_create("unix", 0, "ecc-info", "misc", 41987c478bd9Sstevel@tonic-gate KSTAT_TYPE_NAMED, ecc_error_info_ndata, KSTAT_FLAG_VIRTUAL); 41997c478bd9Sstevel@tonic-gate if (ksp != NULL) { 42007c478bd9Sstevel@tonic-gate ksp->ks_data = (struct kstat_named *)&ecc_error_info_data; 42017c478bd9Sstevel@tonic-gate ecc_error_info_data.version.value.ui32 = KSTAT_CE_INFO_VER; 42027c478bd9Sstevel@tonic-gate ecc_error_info_data.maxcount.value.ui32 = mem_ce_simm_size; 42037c478bd9Sstevel@tonic-gate ecc_error_info_data.count.value.ui32 = 0; 42047c478bd9Sstevel@tonic-gate kstat_install(ksp); 42057c478bd9Sstevel@tonic-gate } 42067c478bd9Sstevel@tonic-gate 42077c478bd9Sstevel@tonic-gate for (i = 0; i < mem_ce_simm_size; i++) { 42087c478bd9Sstevel@tonic-gate struct kstat_ecc_mm_info *kceip; 42097c478bd9Sstevel@tonic-gate 42107c478bd9Sstevel@tonic-gate kceip = kmem_zalloc(sizeof (struct kstat_ecc_mm_info), 42117c478bd9Sstevel@tonic-gate KM_SLEEP); 42127c478bd9Sstevel@tonic-gate ksp = kstat_create("mm", i, "ecc-info", "misc", 42137c478bd9Sstevel@tonic-gate KSTAT_TYPE_NAMED, 42147c478bd9Sstevel@tonic-gate sizeof (struct kstat_ecc_mm_info) / sizeof (kstat_named_t), 42157c478bd9Sstevel@tonic-gate KSTAT_FLAG_VIRTUAL); 42167c478bd9Sstevel@tonic-gate if (ksp != NULL) { 42177c478bd9Sstevel@tonic-gate /* 42187c478bd9Sstevel@tonic-gate * Re-declare ks_data_size to include room for the 42197c478bd9Sstevel@tonic-gate * UNUM name since we don't have KSTAT_FLAG_VAR_SIZE 42207c478bd9Sstevel@tonic-gate * set. 42217c478bd9Sstevel@tonic-gate */ 42227c478bd9Sstevel@tonic-gate ksp->ks_data_size = sizeof (struct kstat_ecc_mm_info) + 42237c478bd9Sstevel@tonic-gate KSTAT_CE_UNUM_NAMLEN; 42247c478bd9Sstevel@tonic-gate ksp->ks_data = kceip; 42257c478bd9Sstevel@tonic-gate kstat_named_init(&kceip->name, 42267c478bd9Sstevel@tonic-gate "name", KSTAT_DATA_STRING); 42277c478bd9Sstevel@tonic-gate kstat_named_init(&kceip->intermittent_total, 42287c478bd9Sstevel@tonic-gate "intermittent_total", KSTAT_DATA_UINT64); 42297c478bd9Sstevel@tonic-gate kstat_named_init(&kceip->persistent_total, 42307c478bd9Sstevel@tonic-gate "persistent_total", KSTAT_DATA_UINT64); 42317c478bd9Sstevel@tonic-gate kstat_named_init(&kceip->sticky_total, 42327c478bd9Sstevel@tonic-gate "sticky_total", KSTAT_DATA_UINT64); 42337c478bd9Sstevel@tonic-gate /* 42347c478bd9Sstevel@tonic-gate * Use the default snapshot routine as it knows how to 42357c478bd9Sstevel@tonic-gate * deal with named kstats with long strings. 42367c478bd9Sstevel@tonic-gate */ 42377c478bd9Sstevel@tonic-gate ksp->ks_update = ecc_kstat_update; 42387c478bd9Sstevel@tonic-gate kstat_install(ksp); 42397c478bd9Sstevel@tonic-gate } else { 42407c478bd9Sstevel@tonic-gate kmem_free(kceip, sizeof (struct kstat_ecc_mm_info)); 42417c478bd9Sstevel@tonic-gate } 42427c478bd9Sstevel@tonic-gate } 42437c478bd9Sstevel@tonic-gate } 42447c478bd9Sstevel@tonic-gate 42457c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 42467c478bd9Sstevel@tonic-gate static void 42477c478bd9Sstevel@tonic-gate leaky_bucket_timeout(void *arg) 42487c478bd9Sstevel@tonic-gate { 42497c478bd9Sstevel@tonic-gate int i; 42507c478bd9Sstevel@tonic-gate struct ce_info *psimm = mem_ce_simm; 42517c478bd9Sstevel@tonic-gate 42527c478bd9Sstevel@tonic-gate for (i = 0; i < mem_ce_simm_size; i++) { 42537c478bd9Sstevel@tonic-gate if (psimm[i].leaky_bucket_cnt > 0) 42547c478bd9Sstevel@tonic-gate atomic_add_16(&psimm[i].leaky_bucket_cnt, -1); 42557c478bd9Sstevel@tonic-gate } 42567c478bd9Sstevel@tonic-gate add_leaky_bucket_timeout(); 42577c478bd9Sstevel@tonic-gate } 42587c478bd9Sstevel@tonic-gate 42597c478bd9Sstevel@tonic-gate static void 42607c478bd9Sstevel@tonic-gate add_leaky_bucket_timeout(void) 42617c478bd9Sstevel@tonic-gate { 42627c478bd9Sstevel@tonic-gate long timeout_in_microsecs; 42637c478bd9Sstevel@tonic-gate 42647c478bd9Sstevel@tonic-gate /* 42657c478bd9Sstevel@tonic-gate * create timeout for next leak. 42667c478bd9Sstevel@tonic-gate * 42677c478bd9Sstevel@tonic-gate * The timeout interval is calculated as follows 42687c478bd9Sstevel@tonic-gate * 42697c478bd9Sstevel@tonic-gate * (ecc_softerr_interval * 60 * MICROSEC) / ecc_softerr_limit 42707c478bd9Sstevel@tonic-gate * 42717c478bd9Sstevel@tonic-gate * ecc_softerr_interval is in minutes, so multiply this by 60 (seconds 42727c478bd9Sstevel@tonic-gate * in a minute), then multiply this by MICROSEC to get the interval 42737c478bd9Sstevel@tonic-gate * in microseconds. Divide this total by ecc_softerr_limit so that 42747c478bd9Sstevel@tonic-gate * the timeout interval is accurate to within a few microseconds. 42757c478bd9Sstevel@tonic-gate */ 42767c478bd9Sstevel@tonic-gate 42777c478bd9Sstevel@tonic-gate if (ecc_softerr_limit <= 0) 42787c478bd9Sstevel@tonic-gate ecc_softerr_limit = SOFTERR_LIMIT_DEFAULT; 42797c478bd9Sstevel@tonic-gate if (ecc_softerr_interval <= 0) 42807c478bd9Sstevel@tonic-gate ecc_softerr_interval = SOFTERR_INTERVAL_DEFAULT; 42817c478bd9Sstevel@tonic-gate 42827c478bd9Sstevel@tonic-gate timeout_in_microsecs = ((int64_t)ecc_softerr_interval * 60 * MICROSEC) / 42837c478bd9Sstevel@tonic-gate ecc_softerr_limit; 42847c478bd9Sstevel@tonic-gate 42857c478bd9Sstevel@tonic-gate if (timeout_in_microsecs < SOFTERR_MIN_TIMEOUT) 42867c478bd9Sstevel@tonic-gate timeout_in_microsecs = SOFTERR_MIN_TIMEOUT; 42877c478bd9Sstevel@tonic-gate 42887c478bd9Sstevel@tonic-gate leaky_bucket_timeout_id = timeout(leaky_bucket_timeout, 42897c478bd9Sstevel@tonic-gate (void *)NULL, drv_usectohz((clock_t)timeout_in_microsecs)); 42907c478bd9Sstevel@tonic-gate } 42917c478bd9Sstevel@tonic-gate 42927c478bd9Sstevel@tonic-gate /* 42937c478bd9Sstevel@tonic-gate * Legacy Correctable ECC Error Hash 42947c478bd9Sstevel@tonic-gate * 42957c478bd9Sstevel@tonic-gate * All of the code below this comment is used to implement a legacy array 42967c478bd9Sstevel@tonic-gate * which counted intermittent, persistent, and sticky CE errors by unum, 42977c478bd9Sstevel@tonic-gate * and then was later extended to publish the data as a kstat for SunVTS. 42987c478bd9Sstevel@tonic-gate * All of this code is replaced by FMA, and remains here until such time 42997c478bd9Sstevel@tonic-gate * that the UltraSPARC-I/II CPU code is converted to FMA, or is EOLed. 43007c478bd9Sstevel@tonic-gate * 43017c478bd9Sstevel@tonic-gate * Errors are saved in three buckets per-unum: 43027c478bd9Sstevel@tonic-gate * (1) sticky - scrub was unsuccessful, cannot be scrubbed 43037c478bd9Sstevel@tonic-gate * This could represent a problem, and is immediately printed out. 43047c478bd9Sstevel@tonic-gate * (2) persistent - was successfully scrubbed 43057c478bd9Sstevel@tonic-gate * These errors use the leaky bucket algorithm to determine 43067c478bd9Sstevel@tonic-gate * if there is a serious problem. 43077c478bd9Sstevel@tonic-gate * (3) intermittent - may have originated from the cpu or upa/safari bus, 43087c478bd9Sstevel@tonic-gate * and does not necessarily indicate any problem with the dimm itself, 43097c478bd9Sstevel@tonic-gate * is critical information for debugging new hardware. 43107c478bd9Sstevel@tonic-gate * Because we do not know if it came from the dimm, it would be 43117c478bd9Sstevel@tonic-gate * inappropriate to include these in the leaky bucket counts. 43127c478bd9Sstevel@tonic-gate * 43137c478bd9Sstevel@tonic-gate * If the E$ line was modified before the scrub operation began, then the 43147c478bd9Sstevel@tonic-gate * displacement flush at the beginning of scrubphys() will cause the modified 43157c478bd9Sstevel@tonic-gate * line to be written out, which will clean up the CE. Then, any subsequent 43167c478bd9Sstevel@tonic-gate * read will not cause an error, which will cause persistent errors to be 43177c478bd9Sstevel@tonic-gate * identified as intermittent. 43187c478bd9Sstevel@tonic-gate * 43197c478bd9Sstevel@tonic-gate * If a DIMM is going bad, it will produce true persistents as well as 43207c478bd9Sstevel@tonic-gate * false intermittents, so these intermittents can be safely ignored. 43217c478bd9Sstevel@tonic-gate * 43227c478bd9Sstevel@tonic-gate * If the error count is excessive for a DIMM, this function will return 4323db874c57Selowe * PR_MCE, and the CPU module may then decide to remove that page from use. 43247c478bd9Sstevel@tonic-gate */ 43257c478bd9Sstevel@tonic-gate static int 43267c478bd9Sstevel@tonic-gate ce_count_unum(int status, int len, char *unum) 43277c478bd9Sstevel@tonic-gate { 43287c478bd9Sstevel@tonic-gate int i; 43297c478bd9Sstevel@tonic-gate struct ce_info *psimm = mem_ce_simm; 4330db874c57Selowe int page_status = PR_OK; 43317c478bd9Sstevel@tonic-gate 43327c478bd9Sstevel@tonic-gate ASSERT(psimm != NULL); 43337c478bd9Sstevel@tonic-gate 43347c478bd9Sstevel@tonic-gate if (len <= 0 || 43357c478bd9Sstevel@tonic-gate (status & (ECC_STICKY | ECC_PERSISTENT | ECC_INTERMITTENT)) == 0) 43367c478bd9Sstevel@tonic-gate return (page_status); 43377c478bd9Sstevel@tonic-gate 43387c478bd9Sstevel@tonic-gate /* 43397c478bd9Sstevel@tonic-gate * Initialize the leaky_bucket timeout 43407c478bd9Sstevel@tonic-gate */ 43417c478bd9Sstevel@tonic-gate if (casptr(&leaky_bucket_timeout_id, 43427c478bd9Sstevel@tonic-gate TIMEOUT_NONE, TIMEOUT_SET) == TIMEOUT_NONE) 43437c478bd9Sstevel@tonic-gate add_leaky_bucket_timeout(); 43447c478bd9Sstevel@tonic-gate 43457c478bd9Sstevel@tonic-gate for (i = 0; i < mem_ce_simm_size; i++) { 43467c478bd9Sstevel@tonic-gate if (psimm[i].name[0] == '\0') { 43477c478bd9Sstevel@tonic-gate /* 43487c478bd9Sstevel@tonic-gate * Hit the end of the valid entries, add 43497c478bd9Sstevel@tonic-gate * a new one. 43507c478bd9Sstevel@tonic-gate */ 43517c478bd9Sstevel@tonic-gate (void) strncpy(psimm[i].name, unum, len); 43527c478bd9Sstevel@tonic-gate if (status & ECC_STICKY) { 43537c478bd9Sstevel@tonic-gate /* 43547c478bd9Sstevel@tonic-gate * Sticky - the leaky bucket is used to track 43557c478bd9Sstevel@tonic-gate * soft errors. Since a sticky error is a 43567c478bd9Sstevel@tonic-gate * hard error and likely to be retired soon, 43577c478bd9Sstevel@tonic-gate * we do not count it in the leaky bucket. 43587c478bd9Sstevel@tonic-gate */ 43597c478bd9Sstevel@tonic-gate psimm[i].leaky_bucket_cnt = 0; 43607c478bd9Sstevel@tonic-gate psimm[i].intermittent_total = 0; 43617c478bd9Sstevel@tonic-gate psimm[i].persistent_total = 0; 43627c478bd9Sstevel@tonic-gate psimm[i].sticky_total = 1; 43637c478bd9Sstevel@tonic-gate cmn_err(CE_WARN, 43647c478bd9Sstevel@tonic-gate "[AFT0] Sticky Softerror encountered " 43657c478bd9Sstevel@tonic-gate "on Memory Module %s\n", unum); 4366db874c57Selowe page_status = PR_MCE; 43677c478bd9Sstevel@tonic-gate } else if (status & ECC_PERSISTENT) { 43687c478bd9Sstevel@tonic-gate psimm[i].leaky_bucket_cnt = 1; 43697c478bd9Sstevel@tonic-gate psimm[i].intermittent_total = 0; 43707c478bd9Sstevel@tonic-gate psimm[i].persistent_total = 1; 43717c478bd9Sstevel@tonic-gate psimm[i].sticky_total = 0; 43727c478bd9Sstevel@tonic-gate } else { 43737c478bd9Sstevel@tonic-gate /* 43747c478bd9Sstevel@tonic-gate * Intermittent - Because the scrub operation 43757c478bd9Sstevel@tonic-gate * cannot find the error in the DIMM, we will 43767c478bd9Sstevel@tonic-gate * not count these in the leaky bucket 43777c478bd9Sstevel@tonic-gate */ 43787c478bd9Sstevel@tonic-gate psimm[i].leaky_bucket_cnt = 0; 43797c478bd9Sstevel@tonic-gate psimm[i].intermittent_total = 1; 43807c478bd9Sstevel@tonic-gate psimm[i].persistent_total = 0; 43817c478bd9Sstevel@tonic-gate psimm[i].sticky_total = 0; 43827c478bd9Sstevel@tonic-gate } 43837c478bd9Sstevel@tonic-gate ecc_error_info_data.count.value.ui32++; 43847c478bd9Sstevel@tonic-gate break; 43857c478bd9Sstevel@tonic-gate } else if (strncmp(unum, psimm[i].name, len) == 0) { 43867c478bd9Sstevel@tonic-gate /* 43877c478bd9Sstevel@tonic-gate * Found an existing entry for the current 43887c478bd9Sstevel@tonic-gate * memory module, adjust the counts. 43897c478bd9Sstevel@tonic-gate */ 43907c478bd9Sstevel@tonic-gate if (status & ECC_STICKY) { 43917c478bd9Sstevel@tonic-gate psimm[i].sticky_total++; 43927c478bd9Sstevel@tonic-gate cmn_err(CE_WARN, 43937c478bd9Sstevel@tonic-gate "[AFT0] Sticky Softerror encountered " 43947c478bd9Sstevel@tonic-gate "on Memory Module %s\n", unum); 4395db874c57Selowe page_status = PR_MCE; 43967c478bd9Sstevel@tonic-gate } else if (status & ECC_PERSISTENT) { 43977c478bd9Sstevel@tonic-gate int new_value; 43987c478bd9Sstevel@tonic-gate 43997c478bd9Sstevel@tonic-gate new_value = atomic_add_16_nv( 44007c478bd9Sstevel@tonic-gate &psimm[i].leaky_bucket_cnt, 1); 44017c478bd9Sstevel@tonic-gate psimm[i].persistent_total++; 44027c478bd9Sstevel@tonic-gate if (new_value > ecc_softerr_limit) { 44037c478bd9Sstevel@tonic-gate cmn_err(CE_WARN, "[AFT0] Most recent %d" 44047c478bd9Sstevel@tonic-gate " soft errors from Memory Module" 44057c478bd9Sstevel@tonic-gate " %s exceed threshold (N=%d," 44067c478bd9Sstevel@tonic-gate " T=%dh:%02dm) triggering page" 44077c478bd9Sstevel@tonic-gate " retire", new_value, unum, 44087c478bd9Sstevel@tonic-gate ecc_softerr_limit, 44097c478bd9Sstevel@tonic-gate ecc_softerr_interval / 60, 44107c478bd9Sstevel@tonic-gate ecc_softerr_interval % 60); 44117c478bd9Sstevel@tonic-gate atomic_add_16( 44127c478bd9Sstevel@tonic-gate &psimm[i].leaky_bucket_cnt, -1); 4413db874c57Selowe page_status = PR_MCE; 44147c478bd9Sstevel@tonic-gate } 44157c478bd9Sstevel@tonic-gate } else { /* Intermittent */ 44167c478bd9Sstevel@tonic-gate psimm[i].intermittent_total++; 44177c478bd9Sstevel@tonic-gate } 44187c478bd9Sstevel@tonic-gate break; 44197c478bd9Sstevel@tonic-gate } 44207c478bd9Sstevel@tonic-gate } 44217c478bd9Sstevel@tonic-gate 44227c478bd9Sstevel@tonic-gate if (i >= mem_ce_simm_size) 44237c478bd9Sstevel@tonic-gate cmn_err(CE_CONT, "[AFT0] Softerror: mem_ce_simm[] out of " 44247c478bd9Sstevel@tonic-gate "space.\n"); 44257c478bd9Sstevel@tonic-gate 44267c478bd9Sstevel@tonic-gate return (page_status); 44277c478bd9Sstevel@tonic-gate } 44287c478bd9Sstevel@tonic-gate 44297c478bd9Sstevel@tonic-gate /* 44307c478bd9Sstevel@tonic-gate * Function to support counting of IO detected CEs. 44317c478bd9Sstevel@tonic-gate */ 44327c478bd9Sstevel@tonic-gate void 44337c478bd9Sstevel@tonic-gate cpu_ce_count_unum(struct async_flt *ecc, int len, char *unum) 44347c478bd9Sstevel@tonic-gate { 4435db874c57Selowe int err; 44367c478bd9Sstevel@tonic-gate 4437db874c57Selowe err = ce_count_unum(ecc->flt_status, len, unum); 4438db874c57Selowe if (err != PR_OK && automatic_page_removal) { 4439db874c57Selowe (void) page_retire(ecc->flt_addr, err); 44407c478bd9Sstevel@tonic-gate } 44417c478bd9Sstevel@tonic-gate } 44427c478bd9Sstevel@tonic-gate 44437c478bd9Sstevel@tonic-gate static int 44447c478bd9Sstevel@tonic-gate ecc_kstat_update(kstat_t *ksp, int rw) 44457c478bd9Sstevel@tonic-gate { 44467c478bd9Sstevel@tonic-gate struct kstat_ecc_mm_info *kceip = ksp->ks_data; 44477c478bd9Sstevel@tonic-gate struct ce_info *ceip = mem_ce_simm; 44487c478bd9Sstevel@tonic-gate int i = ksp->ks_instance; 44497c478bd9Sstevel@tonic-gate 44507c478bd9Sstevel@tonic-gate if (rw == KSTAT_WRITE) 44517c478bd9Sstevel@tonic-gate return (EACCES); 44527c478bd9Sstevel@tonic-gate 44537c478bd9Sstevel@tonic-gate ASSERT(ksp->ks_data != NULL); 44547c478bd9Sstevel@tonic-gate ASSERT(i < mem_ce_simm_size && i >= 0); 44557c478bd9Sstevel@tonic-gate 44567c478bd9Sstevel@tonic-gate /* 44577c478bd9Sstevel@tonic-gate * Since we're not using locks, make sure that we don't get partial 44587c478bd9Sstevel@tonic-gate * data. The name is always copied before the counters are incremented 44597c478bd9Sstevel@tonic-gate * so only do this update routine if at least one of the counters is 44607c478bd9Sstevel@tonic-gate * non-zero, which ensures that ce_count_unum() is done, and the 44617c478bd9Sstevel@tonic-gate * string is fully copied. 44627c478bd9Sstevel@tonic-gate */ 44637c478bd9Sstevel@tonic-gate if (ceip[i].intermittent_total == 0 && 44647c478bd9Sstevel@tonic-gate ceip[i].persistent_total == 0 && 44657c478bd9Sstevel@tonic-gate ceip[i].sticky_total == 0) { 44667c478bd9Sstevel@tonic-gate /* 44677c478bd9Sstevel@tonic-gate * Uninitialized or partially initialized. Ignore. 44687c478bd9Sstevel@tonic-gate * The ks_data buffer was allocated via kmem_zalloc, 44697c478bd9Sstevel@tonic-gate * so no need to bzero it. 44707c478bd9Sstevel@tonic-gate */ 44717c478bd9Sstevel@tonic-gate return (0); 44727c478bd9Sstevel@tonic-gate } 44737c478bd9Sstevel@tonic-gate 44747c478bd9Sstevel@tonic-gate kstat_named_setstr(&kceip->name, ceip[i].name); 44757c478bd9Sstevel@tonic-gate kceip->intermittent_total.value.ui64 = ceip[i].intermittent_total; 44767c478bd9Sstevel@tonic-gate kceip->persistent_total.value.ui64 = ceip[i].persistent_total; 44777c478bd9Sstevel@tonic-gate kceip->sticky_total.value.ui64 = ceip[i].sticky_total; 44787c478bd9Sstevel@tonic-gate 44797c478bd9Sstevel@tonic-gate return (0); 44807c478bd9Sstevel@tonic-gate } 44817c478bd9Sstevel@tonic-gate 44827c478bd9Sstevel@tonic-gate #define VIS_BLOCKSIZE 64 44837c478bd9Sstevel@tonic-gate 44847c478bd9Sstevel@tonic-gate int 44857c478bd9Sstevel@tonic-gate dtrace_blksuword32_err(uintptr_t addr, uint32_t *data) 44867c478bd9Sstevel@tonic-gate { 44877c478bd9Sstevel@tonic-gate int ret, watched; 44887c478bd9Sstevel@tonic-gate 44897c478bd9Sstevel@tonic-gate watched = watch_disable_addr((void *)addr, VIS_BLOCKSIZE, S_WRITE); 44907c478bd9Sstevel@tonic-gate ret = dtrace_blksuword32(addr, data, 0); 44917c478bd9Sstevel@tonic-gate if (watched) 44927c478bd9Sstevel@tonic-gate watch_enable_addr((void *)addr, VIS_BLOCKSIZE, S_WRITE); 44937c478bd9Sstevel@tonic-gate 44947c478bd9Sstevel@tonic-gate return (ret); 44957c478bd9Sstevel@tonic-gate } 44967c478bd9Sstevel@tonic-gate 44977c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 44987c478bd9Sstevel@tonic-gate void 44997c478bd9Sstevel@tonic-gate cpu_faulted_enter(struct cpu *cp) 45007c478bd9Sstevel@tonic-gate { 45017c478bd9Sstevel@tonic-gate } 45027c478bd9Sstevel@tonic-gate 45037c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 45047c478bd9Sstevel@tonic-gate void 45057c478bd9Sstevel@tonic-gate cpu_faulted_exit(struct cpu *cp) 45067c478bd9Sstevel@tonic-gate { 45077c478bd9Sstevel@tonic-gate } 45087c478bd9Sstevel@tonic-gate 45097c478bd9Sstevel@tonic-gate static int mmu_disable_ism_large_pages = ((1 << TTE512K) | 45107c478bd9Sstevel@tonic-gate (1 << TTE32M) | (1 << TTE256M)); 45117c478bd9Sstevel@tonic-gate static int mmu_disable_large_pages = ((1 << TTE32M) | (1 << TTE256M)); 45127c478bd9Sstevel@tonic-gate 45137c478bd9Sstevel@tonic-gate /* 45147c478bd9Sstevel@tonic-gate * The function returns the US_II mmu-specific values for the 45157c478bd9Sstevel@tonic-gate * hat's disable_large_pages and disable_ism_large_pages variables. 45167c478bd9Sstevel@tonic-gate */ 45177c478bd9Sstevel@tonic-gate int 45187c478bd9Sstevel@tonic-gate mmu_large_pages_disabled(uint_t flag) 45197c478bd9Sstevel@tonic-gate { 45207c478bd9Sstevel@tonic-gate int pages_disable = 0; 45217c478bd9Sstevel@tonic-gate 45227c478bd9Sstevel@tonic-gate if (flag == HAT_LOAD) { 45237c478bd9Sstevel@tonic-gate pages_disable = mmu_disable_large_pages; 45247c478bd9Sstevel@tonic-gate } else if (flag == HAT_LOAD_SHARE) { 45257c478bd9Sstevel@tonic-gate pages_disable = mmu_disable_ism_large_pages; 45267c478bd9Sstevel@tonic-gate } 45277c478bd9Sstevel@tonic-gate return (pages_disable); 45287c478bd9Sstevel@tonic-gate } 45297c478bd9Sstevel@tonic-gate 45307c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 45317c478bd9Sstevel@tonic-gate void 45327c478bd9Sstevel@tonic-gate mmu_init_kernel_pgsz(struct hat *hat) 45337c478bd9Sstevel@tonic-gate { 45347c478bd9Sstevel@tonic-gate } 45357c478bd9Sstevel@tonic-gate 45367c478bd9Sstevel@tonic-gate size_t 45377c478bd9Sstevel@tonic-gate mmu_get_kernel_lpsize(size_t lpsize) 45387c478bd9Sstevel@tonic-gate { 45397c478bd9Sstevel@tonic-gate uint_t tte; 45407c478bd9Sstevel@tonic-gate 45417c478bd9Sstevel@tonic-gate if (lpsize == 0) { 45427c478bd9Sstevel@tonic-gate /* no setting for segkmem_lpsize in /etc/system: use default */ 45437c478bd9Sstevel@tonic-gate return (MMU_PAGESIZE4M); 45447c478bd9Sstevel@tonic-gate } 45457c478bd9Sstevel@tonic-gate 45467c478bd9Sstevel@tonic-gate for (tte = TTE8K; tte <= TTE4M; tte++) { 45477c478bd9Sstevel@tonic-gate if (lpsize == TTEBYTES(tte)) 45487c478bd9Sstevel@tonic-gate return (lpsize); 45497c478bd9Sstevel@tonic-gate } 45507c478bd9Sstevel@tonic-gate 45517c478bd9Sstevel@tonic-gate return (TTEBYTES(TTE8K)); 45527c478bd9Sstevel@tonic-gate } 4553