1 /* 2 * Contains CPU specific errata definitions 3 * 4 * Copyright (C) 2014 ARM Ltd. 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 * 15 * You should have received a copy of the GNU General Public License 16 * along with this program. If not, see <http://www.gnu.org/licenses/>. 17 */ 18 19 #include <linux/types.h> 20 #include <asm/cpu.h> 21 #include <asm/cputype.h> 22 #include <asm/cpufeature.h> 23 24 static bool __maybe_unused 25 is_affected_midr_range(const struct arm64_cpu_capabilities *entry, int scope) 26 { 27 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible()); 28 return MIDR_IS_CPU_MODEL_RANGE(read_cpuid_id(), entry->midr_model, 29 entry->midr_range_min, 30 entry->midr_range_max); 31 } 32 33 static bool 34 has_mismatched_cache_line_size(const struct arm64_cpu_capabilities *entry, 35 int scope) 36 { 37 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible()); 38 return (read_cpuid_cachetype() & arm64_ftr_reg_ctrel0.strict_mask) != 39 (arm64_ftr_reg_ctrel0.sys_val & arm64_ftr_reg_ctrel0.strict_mask); 40 } 41 42 static int cpu_enable_trap_ctr_access(void *__unused) 43 { 44 /* Clear SCTLR_EL1.UCT */ 45 config_sctlr_el1(SCTLR_EL1_UCT, 0); 46 return 0; 47 } 48 49 #define MIDR_RANGE(model, min, max) \ 50 .def_scope = SCOPE_LOCAL_CPU, \ 51 .matches = is_affected_midr_range, \ 52 .midr_model = model, \ 53 .midr_range_min = min, \ 54 .midr_range_max = max 55 56 const struct arm64_cpu_capabilities arm64_errata[] = { 57 #if defined(CONFIG_ARM64_ERRATUM_826319) || \ 58 defined(CONFIG_ARM64_ERRATUM_827319) || \ 59 defined(CONFIG_ARM64_ERRATUM_824069) 60 { 61 /* Cortex-A53 r0p[012] */ 62 .desc = "ARM errata 826319, 827319, 824069", 63 .capability = ARM64_WORKAROUND_CLEAN_CACHE, 64 MIDR_RANGE(MIDR_CORTEX_A53, 0x00, 0x02), 65 .enable = cpu_enable_cache_maint_trap, 66 }, 67 #endif 68 #ifdef CONFIG_ARM64_ERRATUM_819472 69 { 70 /* Cortex-A53 r0p[01] */ 71 .desc = "ARM errata 819472", 72 .capability = ARM64_WORKAROUND_CLEAN_CACHE, 73 MIDR_RANGE(MIDR_CORTEX_A53, 0x00, 0x01), 74 .enable = cpu_enable_cache_maint_trap, 75 }, 76 #endif 77 #ifdef CONFIG_ARM64_ERRATUM_832075 78 { 79 /* Cortex-A57 r0p0 - r1p2 */ 80 .desc = "ARM erratum 832075", 81 .capability = ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE, 82 MIDR_RANGE(MIDR_CORTEX_A57, 0x00, 83 (1 << MIDR_VARIANT_SHIFT) | 2), 84 }, 85 #endif 86 #ifdef CONFIG_ARM64_ERRATUM_834220 87 { 88 /* Cortex-A57 r0p0 - r1p2 */ 89 .desc = "ARM erratum 834220", 90 .capability = ARM64_WORKAROUND_834220, 91 MIDR_RANGE(MIDR_CORTEX_A57, 0x00, 92 (1 << MIDR_VARIANT_SHIFT) | 2), 93 }, 94 #endif 95 #ifdef CONFIG_ARM64_ERRATUM_845719 96 { 97 /* Cortex-A53 r0p[01234] */ 98 .desc = "ARM erratum 845719", 99 .capability = ARM64_WORKAROUND_845719, 100 MIDR_RANGE(MIDR_CORTEX_A53, 0x00, 0x04), 101 }, 102 #endif 103 #ifdef CONFIG_CAVIUM_ERRATUM_23154 104 { 105 /* Cavium ThunderX, pass 1.x */ 106 .desc = "Cavium erratum 23154", 107 .capability = ARM64_WORKAROUND_CAVIUM_23154, 108 MIDR_RANGE(MIDR_THUNDERX, 0x00, 0x01), 109 }, 110 #endif 111 #ifdef CONFIG_CAVIUM_ERRATUM_27456 112 { 113 /* Cavium ThunderX, T88 pass 1.x - 2.1 */ 114 .desc = "Cavium erratum 27456", 115 .capability = ARM64_WORKAROUND_CAVIUM_27456, 116 MIDR_RANGE(MIDR_THUNDERX, 0x00, 117 (1 << MIDR_VARIANT_SHIFT) | 1), 118 }, 119 { 120 /* Cavium ThunderX, T81 pass 1.0 */ 121 .desc = "Cavium erratum 27456", 122 .capability = ARM64_WORKAROUND_CAVIUM_27456, 123 MIDR_RANGE(MIDR_THUNDERX_81XX, 0x00, 0x00), 124 }, 125 #endif 126 { 127 .desc = "Mismatched cache line size", 128 .capability = ARM64_MISMATCHED_CACHE_LINE_SIZE, 129 .matches = has_mismatched_cache_line_size, 130 .def_scope = SCOPE_LOCAL_CPU, 131 .enable = cpu_enable_trap_ctr_access, 132 }, 133 { 134 } 135 }; 136 137 /* 138 * The CPU Errata work arounds are detected and applied at boot time 139 * and the related information is freed soon after. If the new CPU requires 140 * an errata not detected at boot, fail this CPU. 141 */ 142 void verify_local_cpu_errata_workarounds(void) 143 { 144 const struct arm64_cpu_capabilities *caps = arm64_errata; 145 146 for (; caps->matches; caps++) 147 if (!cpus_have_cap(caps->capability) && 148 caps->matches(caps, SCOPE_LOCAL_CPU)) { 149 pr_crit("CPU%d: Requires work around for %s, not detected" 150 " at boot time\n", 151 smp_processor_id(), 152 caps->desc ? : "an erratum"); 153 cpu_die_early(); 154 } 155 } 156 157 void update_cpu_errata_workarounds(void) 158 { 159 update_cpu_capabilities(arm64_errata, "enabling workaround for"); 160 } 161 162 void __init enable_errata_workarounds(void) 163 { 164 enable_cpu_capabilities(arm64_errata); 165 } 166