1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Contains CPU feature definitions 4 * 5 * Copyright (C) 2015 ARM Ltd. 6 */ 7 8 #define pr_fmt(fmt) "CPU features: " fmt 9 10 #include <linux/bsearch.h> 11 #include <linux/cpumask.h> 12 #include <linux/crash_dump.h> 13 #include <linux/sort.h> 14 #include <linux/stop_machine.h> 15 #include <linux/types.h> 16 #include <linux/mm.h> 17 #include <linux/cpu.h> 18 #include <asm/cpu.h> 19 #include <asm/cpufeature.h> 20 #include <asm/cpu_ops.h> 21 #include <asm/fpsimd.h> 22 #include <asm/mmu_context.h> 23 #include <asm/processor.h> 24 #include <asm/sysreg.h> 25 #include <asm/traps.h> 26 #include <asm/virt.h> 27 28 /* Kernel representation of AT_HWCAP and AT_HWCAP2 */ 29 static unsigned long elf_hwcap __read_mostly; 30 31 #ifdef CONFIG_COMPAT 32 #define COMPAT_ELF_HWCAP_DEFAULT \ 33 (COMPAT_HWCAP_HALF|COMPAT_HWCAP_THUMB|\ 34 COMPAT_HWCAP_FAST_MULT|COMPAT_HWCAP_EDSP|\ 35 COMPAT_HWCAP_TLS|COMPAT_HWCAP_IDIV|\ 36 COMPAT_HWCAP_LPAE) 37 unsigned int compat_elf_hwcap __read_mostly = COMPAT_ELF_HWCAP_DEFAULT; 38 unsigned int compat_elf_hwcap2 __read_mostly; 39 #endif 40 41 DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS); 42 EXPORT_SYMBOL(cpu_hwcaps); 43 static struct arm64_cpu_capabilities const __ro_after_init *cpu_hwcaps_ptrs[ARM64_NCAPS]; 44 45 /* Need also bit for ARM64_CB_PATCH */ 46 DECLARE_BITMAP(boot_capabilities, ARM64_NPATCHABLE); 47 48 bool arm64_use_ng_mappings = false; 49 EXPORT_SYMBOL(arm64_use_ng_mappings); 50 51 /* 52 * Flag to indicate if we have computed the system wide 53 * capabilities based on the boot time active CPUs. This 54 * will be used to determine if a new booting CPU should 55 * go through the verification process to make sure that it 56 * supports the system capabilities, without using a hotplug 57 * notifier. This is also used to decide if we could use 58 * the fast path for checking constant CPU caps. 59 */ 60 DEFINE_STATIC_KEY_FALSE(arm64_const_caps_ready); 61 EXPORT_SYMBOL(arm64_const_caps_ready); 62 static inline void finalize_system_capabilities(void) 63 { 64 static_branch_enable(&arm64_const_caps_ready); 65 } 66 67 static int dump_cpu_hwcaps(struct notifier_block *self, unsigned long v, void *p) 68 { 69 /* file-wide pr_fmt adds "CPU features: " prefix */ 70 pr_emerg("0x%*pb\n", ARM64_NCAPS, &cpu_hwcaps); 71 return 0; 72 } 73 74 static struct notifier_block cpu_hwcaps_notifier = { 75 .notifier_call = dump_cpu_hwcaps 76 }; 77 78 static int __init register_cpu_hwcaps_dumper(void) 79 { 80 atomic_notifier_chain_register(&panic_notifier_list, 81 &cpu_hwcaps_notifier); 82 return 0; 83 } 84 __initcall(register_cpu_hwcaps_dumper); 85 86 DEFINE_STATIC_KEY_ARRAY_FALSE(cpu_hwcap_keys, ARM64_NCAPS); 87 EXPORT_SYMBOL(cpu_hwcap_keys); 88 89 #define __ARM64_FTR_BITS(SIGNED, VISIBLE, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) \ 90 { \ 91 .sign = SIGNED, \ 92 .visible = VISIBLE, \ 93 .strict = STRICT, \ 94 .type = TYPE, \ 95 .shift = SHIFT, \ 96 .width = WIDTH, \ 97 .safe_val = SAFE_VAL, \ 98 } 99 100 /* Define a feature with unsigned values */ 101 #define ARM64_FTR_BITS(VISIBLE, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) \ 102 __ARM64_FTR_BITS(FTR_UNSIGNED, VISIBLE, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) 103 104 /* Define a feature with a signed value */ 105 #define S_ARM64_FTR_BITS(VISIBLE, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) \ 106 __ARM64_FTR_BITS(FTR_SIGNED, VISIBLE, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) 107 108 #define ARM64_FTR_END \ 109 { \ 110 .width = 0, \ 111 } 112 113 /* meta feature for alternatives */ 114 static bool __maybe_unused 115 cpufeature_pan_not_uao(const struct arm64_cpu_capabilities *entry, int __unused); 116 117 static void cpu_enable_cnp(struct arm64_cpu_capabilities const *cap); 118 119 /* 120 * NOTE: Any changes to the visibility of features should be kept in 121 * sync with the documentation of the CPU feature register ABI. 122 */ 123 static const struct arm64_ftr_bits ftr_id_aa64isar0[] = { 124 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_RNDR_SHIFT, 4, 0), 125 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_TS_SHIFT, 4, 0), 126 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_FHM_SHIFT, 4, 0), 127 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_DP_SHIFT, 4, 0), 128 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_SM4_SHIFT, 4, 0), 129 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_SM3_SHIFT, 4, 0), 130 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_SHA3_SHIFT, 4, 0), 131 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_RDM_SHIFT, 4, 0), 132 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_ATOMICS_SHIFT, 4, 0), 133 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_CRC32_SHIFT, 4, 0), 134 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_SHA2_SHIFT, 4, 0), 135 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_SHA1_SHIFT, 4, 0), 136 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_AES_SHIFT, 4, 0), 137 ARM64_FTR_END, 138 }; 139 140 static const struct arm64_ftr_bits ftr_id_aa64isar1[] = { 141 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_I8MM_SHIFT, 4, 0), 142 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_DGH_SHIFT, 4, 0), 143 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_BF16_SHIFT, 4, 0), 144 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_SPECRES_SHIFT, 4, 0), 145 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_SB_SHIFT, 4, 0), 146 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_FRINTTS_SHIFT, 4, 0), 147 ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_PTR_AUTH), 148 FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_GPI_SHIFT, 4, 0), 149 ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_PTR_AUTH), 150 FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_GPA_SHIFT, 4, 0), 151 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_LRCPC_SHIFT, 4, 0), 152 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_FCMA_SHIFT, 4, 0), 153 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_JSCVT_SHIFT, 4, 0), 154 ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_PTR_AUTH), 155 FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_API_SHIFT, 4, 0), 156 ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_PTR_AUTH), 157 FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_APA_SHIFT, 4, 0), 158 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_DPB_SHIFT, 4, 0), 159 ARM64_FTR_END, 160 }; 161 162 static const struct arm64_ftr_bits ftr_id_aa64pfr0[] = { 163 ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_CSV3_SHIFT, 4, 0), 164 ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_CSV2_SHIFT, 4, 0), 165 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_DIT_SHIFT, 4, 0), 166 ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE), 167 FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_SVE_SHIFT, 4, 0), 168 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_RAS_SHIFT, 4, 0), 169 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_GIC_SHIFT, 4, 0), 170 S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_ASIMD_SHIFT, 4, ID_AA64PFR0_ASIMD_NI), 171 S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_FP_SHIFT, 4, ID_AA64PFR0_FP_NI), 172 /* Linux doesn't care about the EL3 */ 173 ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL3_SHIFT, 4, 0), 174 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL2_SHIFT, 4, 0), 175 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_SHIFT, 4, ID_AA64PFR0_EL1_64BIT_ONLY), 176 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL0_SHIFT, 4, ID_AA64PFR0_EL0_64BIT_ONLY), 177 ARM64_FTR_END, 178 }; 179 180 static const struct arm64_ftr_bits ftr_id_aa64pfr1[] = { 181 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_SSBS_SHIFT, 4, ID_AA64PFR1_SSBS_PSTATE_NI), 182 ARM64_FTR_END, 183 }; 184 185 static const struct arm64_ftr_bits ftr_id_aa64zfr0[] = { 186 ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE), 187 FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_F64MM_SHIFT, 4, 0), 188 ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE), 189 FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_F32MM_SHIFT, 4, 0), 190 ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE), 191 FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_I8MM_SHIFT, 4, 0), 192 ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE), 193 FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_SM4_SHIFT, 4, 0), 194 ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE), 195 FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_SHA3_SHIFT, 4, 0), 196 ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE), 197 FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_BF16_SHIFT, 4, 0), 198 ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE), 199 FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_BITPERM_SHIFT, 4, 0), 200 ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE), 201 FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_AES_SHIFT, 4, 0), 202 ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE), 203 FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_SVEVER_SHIFT, 4, 0), 204 ARM64_FTR_END, 205 }; 206 207 static const struct arm64_ftr_bits ftr_id_aa64mmfr0[] = { 208 /* 209 * We already refuse to boot CPUs that don't support our configured 210 * page size, so we can only detect mismatches for a page size other 211 * than the one we're currently using. Unfortunately, SoCs like this 212 * exist in the wild so, even though we don't like it, we'll have to go 213 * along with it and treat them as non-strict. 214 */ 215 S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN4_SHIFT, 4, ID_AA64MMFR0_TGRAN4_NI), 216 S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN64_SHIFT, 4, ID_AA64MMFR0_TGRAN64_NI), 217 ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN16_SHIFT, 4, ID_AA64MMFR0_TGRAN16_NI), 218 219 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_BIGENDEL0_SHIFT, 4, 0), 220 /* Linux shouldn't care about secure memory */ 221 ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_SNSMEM_SHIFT, 4, 0), 222 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_BIGENDEL_SHIFT, 4, 0), 223 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_ASID_SHIFT, 4, 0), 224 /* 225 * Differing PARange is fine as long as all peripherals and memory are mapped 226 * within the minimum PARange of all CPUs 227 */ 228 ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_PARANGE_SHIFT, 4, 0), 229 ARM64_FTR_END, 230 }; 231 232 static const struct arm64_ftr_bits ftr_id_aa64mmfr1[] = { 233 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_PAN_SHIFT, 4, 0), 234 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_LOR_SHIFT, 4, 0), 235 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_HPD_SHIFT, 4, 0), 236 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_VHE_SHIFT, 4, 0), 237 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_VMIDBITS_SHIFT, 4, 0), 238 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_HADBS_SHIFT, 4, 0), 239 ARM64_FTR_END, 240 }; 241 242 static const struct arm64_ftr_bits ftr_id_aa64mmfr2[] = { 243 ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_E0PD_SHIFT, 4, 0), 244 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_FWB_SHIFT, 4, 0), 245 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_AT_SHIFT, 4, 0), 246 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_LVA_SHIFT, 4, 0), 247 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_IESB_SHIFT, 4, 0), 248 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_LSM_SHIFT, 4, 0), 249 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_UAO_SHIFT, 4, 0), 250 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_CNP_SHIFT, 4, 0), 251 ARM64_FTR_END, 252 }; 253 254 static const struct arm64_ftr_bits ftr_ctr[] = { 255 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, 31, 1, 1), /* RES1 */ 256 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_DIC_SHIFT, 1, 1), 257 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_IDC_SHIFT, 1, 1), 258 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_OR_ZERO_SAFE, CTR_CWG_SHIFT, 4, 0), 259 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_OR_ZERO_SAFE, CTR_ERG_SHIFT, 4, 0), 260 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_DMINLINE_SHIFT, 4, 1), 261 /* 262 * Linux can handle differing I-cache policies. Userspace JITs will 263 * make use of *minLine. 264 * If we have differing I-cache policies, report it as the weakest - VIPT. 265 */ 266 ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_EXACT, 14, 2, ICACHE_POLICY_VIPT), /* L1Ip */ 267 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_IMINLINE_SHIFT, 4, 0), 268 ARM64_FTR_END, 269 }; 270 271 struct arm64_ftr_reg arm64_ftr_reg_ctrel0 = { 272 .name = "SYS_CTR_EL0", 273 .ftr_bits = ftr_ctr 274 }; 275 276 static const struct arm64_ftr_bits ftr_id_mmfr0[] = { 277 S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 28, 4, 0xf), /* InnerShr */ 278 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 24, 4, 0), /* FCSE */ 279 ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, 20, 4, 0), /* AuxReg */ 280 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 16, 4, 0), /* TCM */ 281 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 12, 4, 0), /* ShareLvl */ 282 S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 8, 4, 0xf), /* OuterShr */ 283 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 4, 4, 0), /* PMSA */ 284 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0), /* VMSA */ 285 ARM64_FTR_END, 286 }; 287 288 static const struct arm64_ftr_bits ftr_id_aa64dfr0[] = { 289 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, 36, 28, 0), 290 ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64DFR0_PMSVER_SHIFT, 4, 0), 291 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_CTX_CMPS_SHIFT, 4, 0), 292 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_WRPS_SHIFT, 4, 0), 293 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_BRPS_SHIFT, 4, 0), 294 /* 295 * We can instantiate multiple PMU instances with different levels 296 * of support. 297 */ 298 S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_EXACT, ID_AA64DFR0_PMUVER_SHIFT, 4, 0), 299 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64DFR0_TRACEVER_SHIFT, 4, 0), 300 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64DFR0_DEBUGVER_SHIFT, 4, 0x6), 301 ARM64_FTR_END, 302 }; 303 304 static const struct arm64_ftr_bits ftr_mvfr2[] = { 305 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 4, 4, 0), /* FPMisc */ 306 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0), /* SIMDMisc */ 307 ARM64_FTR_END, 308 }; 309 310 static const struct arm64_ftr_bits ftr_dczid[] = { 311 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, 4, 1, 1), /* DZP */ 312 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0), /* BS */ 313 ARM64_FTR_END, 314 }; 315 316 317 static const struct arm64_ftr_bits ftr_id_isar5[] = { 318 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_RDM_SHIFT, 4, 0), 319 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_CRC32_SHIFT, 4, 0), 320 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_SHA2_SHIFT, 4, 0), 321 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_SHA1_SHIFT, 4, 0), 322 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_AES_SHIFT, 4, 0), 323 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_SEVL_SHIFT, 4, 0), 324 ARM64_FTR_END, 325 }; 326 327 static const struct arm64_ftr_bits ftr_id_mmfr4[] = { 328 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 4, 4, 0), /* ac2 */ 329 ARM64_FTR_END, 330 }; 331 332 static const struct arm64_ftr_bits ftr_id_isar6[] = { 333 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_I8MM_SHIFT, 4, 0), 334 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_BF16_SHIFT, 4, 0), 335 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_SPECRES_SHIFT, 4, 0), 336 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_SB_SHIFT, 4, 0), 337 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_FHM_SHIFT, 4, 0), 338 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_DP_SHIFT, 4, 0), 339 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_JSCVT_SHIFT, 4, 0), 340 ARM64_FTR_END, 341 }; 342 343 static const struct arm64_ftr_bits ftr_id_pfr0[] = { 344 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 12, 4, 0), /* State3 */ 345 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 8, 4, 0), /* State2 */ 346 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 4, 4, 0), /* State1 */ 347 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0), /* State0 */ 348 ARM64_FTR_END, 349 }; 350 351 static const struct arm64_ftr_bits ftr_id_dfr0[] = { 352 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 28, 4, 0), 353 S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 24, 4, 0xf), /* PerfMon */ 354 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 20, 4, 0), 355 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 16, 4, 0), 356 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 12, 4, 0), 357 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 8, 4, 0), 358 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 4, 4, 0), 359 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0), 360 ARM64_FTR_END, 361 }; 362 363 static const struct arm64_ftr_bits ftr_zcr[] = { 364 ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, 365 ZCR_ELx_LEN_SHIFT, ZCR_ELx_LEN_SIZE, 0), /* LEN */ 366 ARM64_FTR_END, 367 }; 368 369 /* 370 * Common ftr bits for a 32bit register with all hidden, strict 371 * attributes, with 4bit feature fields and a default safe value of 372 * 0. Covers the following 32bit registers: 373 * id_isar[0-4], id_mmfr[1-3], id_pfr1, mvfr[0-1] 374 */ 375 static const struct arm64_ftr_bits ftr_generic_32bits[] = { 376 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 28, 4, 0), 377 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 24, 4, 0), 378 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 20, 4, 0), 379 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 16, 4, 0), 380 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 12, 4, 0), 381 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 8, 4, 0), 382 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 4, 4, 0), 383 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0), 384 ARM64_FTR_END, 385 }; 386 387 /* Table for a single 32bit feature value */ 388 static const struct arm64_ftr_bits ftr_single32[] = { 389 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, 0, 32, 0), 390 ARM64_FTR_END, 391 }; 392 393 static const struct arm64_ftr_bits ftr_raz[] = { 394 ARM64_FTR_END, 395 }; 396 397 #define ARM64_FTR_REG(id, table) { \ 398 .sys_id = id, \ 399 .reg = &(struct arm64_ftr_reg){ \ 400 .name = #id, \ 401 .ftr_bits = &((table)[0]), \ 402 }} 403 404 static const struct __ftr_reg_entry { 405 u32 sys_id; 406 struct arm64_ftr_reg *reg; 407 } arm64_ftr_regs[] = { 408 409 /* Op1 = 0, CRn = 0, CRm = 1 */ 410 ARM64_FTR_REG(SYS_ID_PFR0_EL1, ftr_id_pfr0), 411 ARM64_FTR_REG(SYS_ID_PFR1_EL1, ftr_generic_32bits), 412 ARM64_FTR_REG(SYS_ID_DFR0_EL1, ftr_id_dfr0), 413 ARM64_FTR_REG(SYS_ID_MMFR0_EL1, ftr_id_mmfr0), 414 ARM64_FTR_REG(SYS_ID_MMFR1_EL1, ftr_generic_32bits), 415 ARM64_FTR_REG(SYS_ID_MMFR2_EL1, ftr_generic_32bits), 416 ARM64_FTR_REG(SYS_ID_MMFR3_EL1, ftr_generic_32bits), 417 418 /* Op1 = 0, CRn = 0, CRm = 2 */ 419 ARM64_FTR_REG(SYS_ID_ISAR0_EL1, ftr_generic_32bits), 420 ARM64_FTR_REG(SYS_ID_ISAR1_EL1, ftr_generic_32bits), 421 ARM64_FTR_REG(SYS_ID_ISAR2_EL1, ftr_generic_32bits), 422 ARM64_FTR_REG(SYS_ID_ISAR3_EL1, ftr_generic_32bits), 423 ARM64_FTR_REG(SYS_ID_ISAR4_EL1, ftr_generic_32bits), 424 ARM64_FTR_REG(SYS_ID_ISAR5_EL1, ftr_id_isar5), 425 ARM64_FTR_REG(SYS_ID_MMFR4_EL1, ftr_id_mmfr4), 426 ARM64_FTR_REG(SYS_ID_ISAR6_EL1, ftr_id_isar6), 427 428 /* Op1 = 0, CRn = 0, CRm = 3 */ 429 ARM64_FTR_REG(SYS_MVFR0_EL1, ftr_generic_32bits), 430 ARM64_FTR_REG(SYS_MVFR1_EL1, ftr_generic_32bits), 431 ARM64_FTR_REG(SYS_MVFR2_EL1, ftr_mvfr2), 432 433 /* Op1 = 0, CRn = 0, CRm = 4 */ 434 ARM64_FTR_REG(SYS_ID_AA64PFR0_EL1, ftr_id_aa64pfr0), 435 ARM64_FTR_REG(SYS_ID_AA64PFR1_EL1, ftr_id_aa64pfr1), 436 ARM64_FTR_REG(SYS_ID_AA64ZFR0_EL1, ftr_id_aa64zfr0), 437 438 /* Op1 = 0, CRn = 0, CRm = 5 */ 439 ARM64_FTR_REG(SYS_ID_AA64DFR0_EL1, ftr_id_aa64dfr0), 440 ARM64_FTR_REG(SYS_ID_AA64DFR1_EL1, ftr_raz), 441 442 /* Op1 = 0, CRn = 0, CRm = 6 */ 443 ARM64_FTR_REG(SYS_ID_AA64ISAR0_EL1, ftr_id_aa64isar0), 444 ARM64_FTR_REG(SYS_ID_AA64ISAR1_EL1, ftr_id_aa64isar1), 445 446 /* Op1 = 0, CRn = 0, CRm = 7 */ 447 ARM64_FTR_REG(SYS_ID_AA64MMFR0_EL1, ftr_id_aa64mmfr0), 448 ARM64_FTR_REG(SYS_ID_AA64MMFR1_EL1, ftr_id_aa64mmfr1), 449 ARM64_FTR_REG(SYS_ID_AA64MMFR2_EL1, ftr_id_aa64mmfr2), 450 451 /* Op1 = 0, CRn = 1, CRm = 2 */ 452 ARM64_FTR_REG(SYS_ZCR_EL1, ftr_zcr), 453 454 /* Op1 = 3, CRn = 0, CRm = 0 */ 455 { SYS_CTR_EL0, &arm64_ftr_reg_ctrel0 }, 456 ARM64_FTR_REG(SYS_DCZID_EL0, ftr_dczid), 457 458 /* Op1 = 3, CRn = 14, CRm = 0 */ 459 ARM64_FTR_REG(SYS_CNTFRQ_EL0, ftr_single32), 460 }; 461 462 static int search_cmp_ftr_reg(const void *id, const void *regp) 463 { 464 return (int)(unsigned long)id - (int)((const struct __ftr_reg_entry *)regp)->sys_id; 465 } 466 467 /* 468 * get_arm64_ftr_reg - Lookup a feature register entry using its 469 * sys_reg() encoding. With the array arm64_ftr_regs sorted in the 470 * ascending order of sys_id , we use binary search to find a matching 471 * entry. 472 * 473 * returns - Upon success, matching ftr_reg entry for id. 474 * - NULL on failure. It is upto the caller to decide 475 * the impact of a failure. 476 */ 477 static struct arm64_ftr_reg *get_arm64_ftr_reg(u32 sys_id) 478 { 479 const struct __ftr_reg_entry *ret; 480 481 ret = bsearch((const void *)(unsigned long)sys_id, 482 arm64_ftr_regs, 483 ARRAY_SIZE(arm64_ftr_regs), 484 sizeof(arm64_ftr_regs[0]), 485 search_cmp_ftr_reg); 486 if (ret) 487 return ret->reg; 488 return NULL; 489 } 490 491 static u64 arm64_ftr_set_value(const struct arm64_ftr_bits *ftrp, s64 reg, 492 s64 ftr_val) 493 { 494 u64 mask = arm64_ftr_mask(ftrp); 495 496 reg &= ~mask; 497 reg |= (ftr_val << ftrp->shift) & mask; 498 return reg; 499 } 500 501 static s64 arm64_ftr_safe_value(const struct arm64_ftr_bits *ftrp, s64 new, 502 s64 cur) 503 { 504 s64 ret = 0; 505 506 switch (ftrp->type) { 507 case FTR_EXACT: 508 ret = ftrp->safe_val; 509 break; 510 case FTR_LOWER_SAFE: 511 ret = new < cur ? new : cur; 512 break; 513 case FTR_HIGHER_OR_ZERO_SAFE: 514 if (!cur || !new) 515 break; 516 /* Fallthrough */ 517 case FTR_HIGHER_SAFE: 518 ret = new > cur ? new : cur; 519 break; 520 default: 521 BUG(); 522 } 523 524 return ret; 525 } 526 527 static void __init sort_ftr_regs(void) 528 { 529 int i; 530 531 /* Check that the array is sorted so that we can do the binary search */ 532 for (i = 1; i < ARRAY_SIZE(arm64_ftr_regs); i++) 533 BUG_ON(arm64_ftr_regs[i].sys_id < arm64_ftr_regs[i - 1].sys_id); 534 } 535 536 /* 537 * Initialise the CPU feature register from Boot CPU values. 538 * Also initiliases the strict_mask for the register. 539 * Any bits that are not covered by an arm64_ftr_bits entry are considered 540 * RES0 for the system-wide value, and must strictly match. 541 */ 542 static void __init init_cpu_ftr_reg(u32 sys_reg, u64 new) 543 { 544 u64 val = 0; 545 u64 strict_mask = ~0x0ULL; 546 u64 user_mask = 0; 547 u64 valid_mask = 0; 548 549 const struct arm64_ftr_bits *ftrp; 550 struct arm64_ftr_reg *reg = get_arm64_ftr_reg(sys_reg); 551 552 BUG_ON(!reg); 553 554 for (ftrp = reg->ftr_bits; ftrp->width; ftrp++) { 555 u64 ftr_mask = arm64_ftr_mask(ftrp); 556 s64 ftr_new = arm64_ftr_value(ftrp, new); 557 558 val = arm64_ftr_set_value(ftrp, val, ftr_new); 559 560 valid_mask |= ftr_mask; 561 if (!ftrp->strict) 562 strict_mask &= ~ftr_mask; 563 if (ftrp->visible) 564 user_mask |= ftr_mask; 565 else 566 reg->user_val = arm64_ftr_set_value(ftrp, 567 reg->user_val, 568 ftrp->safe_val); 569 } 570 571 val &= valid_mask; 572 573 reg->sys_val = val; 574 reg->strict_mask = strict_mask; 575 reg->user_mask = user_mask; 576 } 577 578 extern const struct arm64_cpu_capabilities arm64_errata[]; 579 static const struct arm64_cpu_capabilities arm64_features[]; 580 581 static void __init 582 init_cpu_hwcaps_indirect_list_from_array(const struct arm64_cpu_capabilities *caps) 583 { 584 for (; caps->matches; caps++) { 585 if (WARN(caps->capability >= ARM64_NCAPS, 586 "Invalid capability %d\n", caps->capability)) 587 continue; 588 if (WARN(cpu_hwcaps_ptrs[caps->capability], 589 "Duplicate entry for capability %d\n", 590 caps->capability)) 591 continue; 592 cpu_hwcaps_ptrs[caps->capability] = caps; 593 } 594 } 595 596 static void __init init_cpu_hwcaps_indirect_list(void) 597 { 598 init_cpu_hwcaps_indirect_list_from_array(arm64_features); 599 init_cpu_hwcaps_indirect_list_from_array(arm64_errata); 600 } 601 602 static void __init setup_boot_cpu_capabilities(void); 603 604 void __init init_cpu_features(struct cpuinfo_arm64 *info) 605 { 606 /* Before we start using the tables, make sure it is sorted */ 607 sort_ftr_regs(); 608 609 init_cpu_ftr_reg(SYS_CTR_EL0, info->reg_ctr); 610 init_cpu_ftr_reg(SYS_DCZID_EL0, info->reg_dczid); 611 init_cpu_ftr_reg(SYS_CNTFRQ_EL0, info->reg_cntfrq); 612 init_cpu_ftr_reg(SYS_ID_AA64DFR0_EL1, info->reg_id_aa64dfr0); 613 init_cpu_ftr_reg(SYS_ID_AA64DFR1_EL1, info->reg_id_aa64dfr1); 614 init_cpu_ftr_reg(SYS_ID_AA64ISAR0_EL1, info->reg_id_aa64isar0); 615 init_cpu_ftr_reg(SYS_ID_AA64ISAR1_EL1, info->reg_id_aa64isar1); 616 init_cpu_ftr_reg(SYS_ID_AA64MMFR0_EL1, info->reg_id_aa64mmfr0); 617 init_cpu_ftr_reg(SYS_ID_AA64MMFR1_EL1, info->reg_id_aa64mmfr1); 618 init_cpu_ftr_reg(SYS_ID_AA64MMFR2_EL1, info->reg_id_aa64mmfr2); 619 init_cpu_ftr_reg(SYS_ID_AA64PFR0_EL1, info->reg_id_aa64pfr0); 620 init_cpu_ftr_reg(SYS_ID_AA64PFR1_EL1, info->reg_id_aa64pfr1); 621 init_cpu_ftr_reg(SYS_ID_AA64ZFR0_EL1, info->reg_id_aa64zfr0); 622 623 if (id_aa64pfr0_32bit_el0(info->reg_id_aa64pfr0)) { 624 init_cpu_ftr_reg(SYS_ID_DFR0_EL1, info->reg_id_dfr0); 625 init_cpu_ftr_reg(SYS_ID_ISAR0_EL1, info->reg_id_isar0); 626 init_cpu_ftr_reg(SYS_ID_ISAR1_EL1, info->reg_id_isar1); 627 init_cpu_ftr_reg(SYS_ID_ISAR2_EL1, info->reg_id_isar2); 628 init_cpu_ftr_reg(SYS_ID_ISAR3_EL1, info->reg_id_isar3); 629 init_cpu_ftr_reg(SYS_ID_ISAR4_EL1, info->reg_id_isar4); 630 init_cpu_ftr_reg(SYS_ID_ISAR5_EL1, info->reg_id_isar5); 631 init_cpu_ftr_reg(SYS_ID_ISAR6_EL1, info->reg_id_isar6); 632 init_cpu_ftr_reg(SYS_ID_MMFR0_EL1, info->reg_id_mmfr0); 633 init_cpu_ftr_reg(SYS_ID_MMFR1_EL1, info->reg_id_mmfr1); 634 init_cpu_ftr_reg(SYS_ID_MMFR2_EL1, info->reg_id_mmfr2); 635 init_cpu_ftr_reg(SYS_ID_MMFR3_EL1, info->reg_id_mmfr3); 636 init_cpu_ftr_reg(SYS_ID_PFR0_EL1, info->reg_id_pfr0); 637 init_cpu_ftr_reg(SYS_ID_PFR1_EL1, info->reg_id_pfr1); 638 init_cpu_ftr_reg(SYS_MVFR0_EL1, info->reg_mvfr0); 639 init_cpu_ftr_reg(SYS_MVFR1_EL1, info->reg_mvfr1); 640 init_cpu_ftr_reg(SYS_MVFR2_EL1, info->reg_mvfr2); 641 } 642 643 if (id_aa64pfr0_sve(info->reg_id_aa64pfr0)) { 644 init_cpu_ftr_reg(SYS_ZCR_EL1, info->reg_zcr); 645 sve_init_vq_map(); 646 } 647 648 /* 649 * Initialize the indirect array of CPU hwcaps capabilities pointers 650 * before we handle the boot CPU below. 651 */ 652 init_cpu_hwcaps_indirect_list(); 653 654 /* 655 * Detect and enable early CPU capabilities based on the boot CPU, 656 * after we have initialised the CPU feature infrastructure. 657 */ 658 setup_boot_cpu_capabilities(); 659 } 660 661 static void update_cpu_ftr_reg(struct arm64_ftr_reg *reg, u64 new) 662 { 663 const struct arm64_ftr_bits *ftrp; 664 665 for (ftrp = reg->ftr_bits; ftrp->width; ftrp++) { 666 s64 ftr_cur = arm64_ftr_value(ftrp, reg->sys_val); 667 s64 ftr_new = arm64_ftr_value(ftrp, new); 668 669 if (ftr_cur == ftr_new) 670 continue; 671 /* Find a safe value */ 672 ftr_new = arm64_ftr_safe_value(ftrp, ftr_new, ftr_cur); 673 reg->sys_val = arm64_ftr_set_value(ftrp, reg->sys_val, ftr_new); 674 } 675 676 } 677 678 static int check_update_ftr_reg(u32 sys_id, int cpu, u64 val, u64 boot) 679 { 680 struct arm64_ftr_reg *regp = get_arm64_ftr_reg(sys_id); 681 682 BUG_ON(!regp); 683 update_cpu_ftr_reg(regp, val); 684 if ((boot & regp->strict_mask) == (val & regp->strict_mask)) 685 return 0; 686 pr_warn("SANITY CHECK: Unexpected variation in %s. Boot CPU: %#016llx, CPU%d: %#016llx\n", 687 regp->name, boot, cpu, val); 688 return 1; 689 } 690 691 /* 692 * Update system wide CPU feature registers with the values from a 693 * non-boot CPU. Also performs SANITY checks to make sure that there 694 * aren't any insane variations from that of the boot CPU. 695 */ 696 void update_cpu_features(int cpu, 697 struct cpuinfo_arm64 *info, 698 struct cpuinfo_arm64 *boot) 699 { 700 int taint = 0; 701 702 /* 703 * The kernel can handle differing I-cache policies, but otherwise 704 * caches should look identical. Userspace JITs will make use of 705 * *minLine. 706 */ 707 taint |= check_update_ftr_reg(SYS_CTR_EL0, cpu, 708 info->reg_ctr, boot->reg_ctr); 709 710 /* 711 * Userspace may perform DC ZVA instructions. Mismatched block sizes 712 * could result in too much or too little memory being zeroed if a 713 * process is preempted and migrated between CPUs. 714 */ 715 taint |= check_update_ftr_reg(SYS_DCZID_EL0, cpu, 716 info->reg_dczid, boot->reg_dczid); 717 718 /* If different, timekeeping will be broken (especially with KVM) */ 719 taint |= check_update_ftr_reg(SYS_CNTFRQ_EL0, cpu, 720 info->reg_cntfrq, boot->reg_cntfrq); 721 722 /* 723 * The kernel uses self-hosted debug features and expects CPUs to 724 * support identical debug features. We presently need CTX_CMPs, WRPs, 725 * and BRPs to be identical. 726 * ID_AA64DFR1 is currently RES0. 727 */ 728 taint |= check_update_ftr_reg(SYS_ID_AA64DFR0_EL1, cpu, 729 info->reg_id_aa64dfr0, boot->reg_id_aa64dfr0); 730 taint |= check_update_ftr_reg(SYS_ID_AA64DFR1_EL1, cpu, 731 info->reg_id_aa64dfr1, boot->reg_id_aa64dfr1); 732 /* 733 * Even in big.LITTLE, processors should be identical instruction-set 734 * wise. 735 */ 736 taint |= check_update_ftr_reg(SYS_ID_AA64ISAR0_EL1, cpu, 737 info->reg_id_aa64isar0, boot->reg_id_aa64isar0); 738 taint |= check_update_ftr_reg(SYS_ID_AA64ISAR1_EL1, cpu, 739 info->reg_id_aa64isar1, boot->reg_id_aa64isar1); 740 741 /* 742 * Differing PARange support is fine as long as all peripherals and 743 * memory are mapped within the minimum PARange of all CPUs. 744 * Linux should not care about secure memory. 745 */ 746 taint |= check_update_ftr_reg(SYS_ID_AA64MMFR0_EL1, cpu, 747 info->reg_id_aa64mmfr0, boot->reg_id_aa64mmfr0); 748 taint |= check_update_ftr_reg(SYS_ID_AA64MMFR1_EL1, cpu, 749 info->reg_id_aa64mmfr1, boot->reg_id_aa64mmfr1); 750 taint |= check_update_ftr_reg(SYS_ID_AA64MMFR2_EL1, cpu, 751 info->reg_id_aa64mmfr2, boot->reg_id_aa64mmfr2); 752 753 /* 754 * EL3 is not our concern. 755 */ 756 taint |= check_update_ftr_reg(SYS_ID_AA64PFR0_EL1, cpu, 757 info->reg_id_aa64pfr0, boot->reg_id_aa64pfr0); 758 taint |= check_update_ftr_reg(SYS_ID_AA64PFR1_EL1, cpu, 759 info->reg_id_aa64pfr1, boot->reg_id_aa64pfr1); 760 761 taint |= check_update_ftr_reg(SYS_ID_AA64ZFR0_EL1, cpu, 762 info->reg_id_aa64zfr0, boot->reg_id_aa64zfr0); 763 764 /* 765 * If we have AArch32, we care about 32-bit features for compat. 766 * If the system doesn't support AArch32, don't update them. 767 */ 768 if (id_aa64pfr0_32bit_el0(read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1)) && 769 id_aa64pfr0_32bit_el0(info->reg_id_aa64pfr0)) { 770 771 taint |= check_update_ftr_reg(SYS_ID_DFR0_EL1, cpu, 772 info->reg_id_dfr0, boot->reg_id_dfr0); 773 taint |= check_update_ftr_reg(SYS_ID_ISAR0_EL1, cpu, 774 info->reg_id_isar0, boot->reg_id_isar0); 775 taint |= check_update_ftr_reg(SYS_ID_ISAR1_EL1, cpu, 776 info->reg_id_isar1, boot->reg_id_isar1); 777 taint |= check_update_ftr_reg(SYS_ID_ISAR2_EL1, cpu, 778 info->reg_id_isar2, boot->reg_id_isar2); 779 taint |= check_update_ftr_reg(SYS_ID_ISAR3_EL1, cpu, 780 info->reg_id_isar3, boot->reg_id_isar3); 781 taint |= check_update_ftr_reg(SYS_ID_ISAR4_EL1, cpu, 782 info->reg_id_isar4, boot->reg_id_isar4); 783 taint |= check_update_ftr_reg(SYS_ID_ISAR5_EL1, cpu, 784 info->reg_id_isar5, boot->reg_id_isar5); 785 taint |= check_update_ftr_reg(SYS_ID_ISAR6_EL1, cpu, 786 info->reg_id_isar6, boot->reg_id_isar6); 787 788 /* 789 * Regardless of the value of the AuxReg field, the AIFSR, ADFSR, and 790 * ACTLR formats could differ across CPUs and therefore would have to 791 * be trapped for virtualization anyway. 792 */ 793 taint |= check_update_ftr_reg(SYS_ID_MMFR0_EL1, cpu, 794 info->reg_id_mmfr0, boot->reg_id_mmfr0); 795 taint |= check_update_ftr_reg(SYS_ID_MMFR1_EL1, cpu, 796 info->reg_id_mmfr1, boot->reg_id_mmfr1); 797 taint |= check_update_ftr_reg(SYS_ID_MMFR2_EL1, cpu, 798 info->reg_id_mmfr2, boot->reg_id_mmfr2); 799 taint |= check_update_ftr_reg(SYS_ID_MMFR3_EL1, cpu, 800 info->reg_id_mmfr3, boot->reg_id_mmfr3); 801 taint |= check_update_ftr_reg(SYS_ID_PFR0_EL1, cpu, 802 info->reg_id_pfr0, boot->reg_id_pfr0); 803 taint |= check_update_ftr_reg(SYS_ID_PFR1_EL1, cpu, 804 info->reg_id_pfr1, boot->reg_id_pfr1); 805 taint |= check_update_ftr_reg(SYS_MVFR0_EL1, cpu, 806 info->reg_mvfr0, boot->reg_mvfr0); 807 taint |= check_update_ftr_reg(SYS_MVFR1_EL1, cpu, 808 info->reg_mvfr1, boot->reg_mvfr1); 809 taint |= check_update_ftr_reg(SYS_MVFR2_EL1, cpu, 810 info->reg_mvfr2, boot->reg_mvfr2); 811 } 812 813 if (id_aa64pfr0_sve(info->reg_id_aa64pfr0)) { 814 taint |= check_update_ftr_reg(SYS_ZCR_EL1, cpu, 815 info->reg_zcr, boot->reg_zcr); 816 817 /* Probe vector lengths, unless we already gave up on SVE */ 818 if (id_aa64pfr0_sve(read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1)) && 819 !system_capabilities_finalized()) 820 sve_update_vq_map(); 821 } 822 823 /* 824 * Mismatched CPU features are a recipe for disaster. Don't even 825 * pretend to support them. 826 */ 827 if (taint) { 828 pr_warn_once("Unsupported CPU feature variation detected.\n"); 829 add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK); 830 } 831 } 832 833 u64 read_sanitised_ftr_reg(u32 id) 834 { 835 struct arm64_ftr_reg *regp = get_arm64_ftr_reg(id); 836 837 /* We shouldn't get a request for an unsupported register */ 838 BUG_ON(!regp); 839 return regp->sys_val; 840 } 841 842 #define read_sysreg_case(r) \ 843 case r: return read_sysreg_s(r) 844 845 /* 846 * __read_sysreg_by_encoding() - Used by a STARTING cpu before cpuinfo is populated. 847 * Read the system register on the current CPU 848 */ 849 static u64 __read_sysreg_by_encoding(u32 sys_id) 850 { 851 switch (sys_id) { 852 read_sysreg_case(SYS_ID_PFR0_EL1); 853 read_sysreg_case(SYS_ID_PFR1_EL1); 854 read_sysreg_case(SYS_ID_DFR0_EL1); 855 read_sysreg_case(SYS_ID_MMFR0_EL1); 856 read_sysreg_case(SYS_ID_MMFR1_EL1); 857 read_sysreg_case(SYS_ID_MMFR2_EL1); 858 read_sysreg_case(SYS_ID_MMFR3_EL1); 859 read_sysreg_case(SYS_ID_ISAR0_EL1); 860 read_sysreg_case(SYS_ID_ISAR1_EL1); 861 read_sysreg_case(SYS_ID_ISAR2_EL1); 862 read_sysreg_case(SYS_ID_ISAR3_EL1); 863 read_sysreg_case(SYS_ID_ISAR4_EL1); 864 read_sysreg_case(SYS_ID_ISAR5_EL1); 865 read_sysreg_case(SYS_ID_ISAR6_EL1); 866 read_sysreg_case(SYS_MVFR0_EL1); 867 read_sysreg_case(SYS_MVFR1_EL1); 868 read_sysreg_case(SYS_MVFR2_EL1); 869 870 read_sysreg_case(SYS_ID_AA64PFR0_EL1); 871 read_sysreg_case(SYS_ID_AA64PFR1_EL1); 872 read_sysreg_case(SYS_ID_AA64ZFR0_EL1); 873 read_sysreg_case(SYS_ID_AA64DFR0_EL1); 874 read_sysreg_case(SYS_ID_AA64DFR1_EL1); 875 read_sysreg_case(SYS_ID_AA64MMFR0_EL1); 876 read_sysreg_case(SYS_ID_AA64MMFR1_EL1); 877 read_sysreg_case(SYS_ID_AA64MMFR2_EL1); 878 read_sysreg_case(SYS_ID_AA64ISAR0_EL1); 879 read_sysreg_case(SYS_ID_AA64ISAR1_EL1); 880 881 read_sysreg_case(SYS_CNTFRQ_EL0); 882 read_sysreg_case(SYS_CTR_EL0); 883 read_sysreg_case(SYS_DCZID_EL0); 884 885 default: 886 BUG(); 887 return 0; 888 } 889 } 890 891 #include <linux/irqchip/arm-gic-v3.h> 892 893 static bool 894 feature_matches(u64 reg, const struct arm64_cpu_capabilities *entry) 895 { 896 int val = cpuid_feature_extract_field(reg, entry->field_pos, entry->sign); 897 898 return val >= entry->min_field_value; 899 } 900 901 static bool 902 has_cpuid_feature(const struct arm64_cpu_capabilities *entry, int scope) 903 { 904 u64 val; 905 906 WARN_ON(scope == SCOPE_LOCAL_CPU && preemptible()); 907 if (scope == SCOPE_SYSTEM) 908 val = read_sanitised_ftr_reg(entry->sys_reg); 909 else 910 val = __read_sysreg_by_encoding(entry->sys_reg); 911 912 return feature_matches(val, entry); 913 } 914 915 static bool has_useable_gicv3_cpuif(const struct arm64_cpu_capabilities *entry, int scope) 916 { 917 bool has_sre; 918 919 if (!has_cpuid_feature(entry, scope)) 920 return false; 921 922 has_sre = gic_enable_sre(); 923 if (!has_sre) 924 pr_warn_once("%s present but disabled by higher exception level\n", 925 entry->desc); 926 927 return has_sre; 928 } 929 930 static bool has_no_hw_prefetch(const struct arm64_cpu_capabilities *entry, int __unused) 931 { 932 u32 midr = read_cpuid_id(); 933 934 /* Cavium ThunderX pass 1.x and 2.x */ 935 return midr_is_cpu_model_range(midr, MIDR_THUNDERX, 936 MIDR_CPU_VAR_REV(0, 0), 937 MIDR_CPU_VAR_REV(1, MIDR_REVISION_MASK)); 938 } 939 940 static bool has_no_fpsimd(const struct arm64_cpu_capabilities *entry, int __unused) 941 { 942 u64 pfr0 = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1); 943 944 return cpuid_feature_extract_signed_field(pfr0, 945 ID_AA64PFR0_FP_SHIFT) < 0; 946 } 947 948 static bool has_cache_idc(const struct arm64_cpu_capabilities *entry, 949 int scope) 950 { 951 u64 ctr; 952 953 if (scope == SCOPE_SYSTEM) 954 ctr = arm64_ftr_reg_ctrel0.sys_val; 955 else 956 ctr = read_cpuid_effective_cachetype(); 957 958 return ctr & BIT(CTR_IDC_SHIFT); 959 } 960 961 static void cpu_emulate_effective_ctr(const struct arm64_cpu_capabilities *__unused) 962 { 963 /* 964 * If the CPU exposes raw CTR_EL0.IDC = 0, while effectively 965 * CTR_EL0.IDC = 1 (from CLIDR values), we need to trap accesses 966 * to the CTR_EL0 on this CPU and emulate it with the real/safe 967 * value. 968 */ 969 if (!(read_cpuid_cachetype() & BIT(CTR_IDC_SHIFT))) 970 sysreg_clear_set(sctlr_el1, SCTLR_EL1_UCT, 0); 971 } 972 973 static bool has_cache_dic(const struct arm64_cpu_capabilities *entry, 974 int scope) 975 { 976 u64 ctr; 977 978 if (scope == SCOPE_SYSTEM) 979 ctr = arm64_ftr_reg_ctrel0.sys_val; 980 else 981 ctr = read_cpuid_cachetype(); 982 983 return ctr & BIT(CTR_DIC_SHIFT); 984 } 985 986 static bool __maybe_unused 987 has_useable_cnp(const struct arm64_cpu_capabilities *entry, int scope) 988 { 989 /* 990 * Kdump isn't guaranteed to power-off all secondary CPUs, CNP 991 * may share TLB entries with a CPU stuck in the crashed 992 * kernel. 993 */ 994 if (is_kdump_kernel()) 995 return false; 996 997 return has_cpuid_feature(entry, scope); 998 } 999 1000 /* 1001 * This check is triggered during the early boot before the cpufeature 1002 * is initialised. Checking the status on the local CPU allows the boot 1003 * CPU to detect the need for non-global mappings and thus avoiding a 1004 * pagetable re-write after all the CPUs are booted. This check will be 1005 * anyway run on individual CPUs, allowing us to get the consistent 1006 * state once the SMP CPUs are up and thus make the switch to non-global 1007 * mappings if required. 1008 */ 1009 bool kaslr_requires_kpti(void) 1010 { 1011 if (!IS_ENABLED(CONFIG_RANDOMIZE_BASE)) 1012 return false; 1013 1014 /* 1015 * E0PD does a similar job to KPTI so can be used instead 1016 * where available. 1017 */ 1018 if (IS_ENABLED(CONFIG_ARM64_E0PD)) { 1019 u64 mmfr2 = read_sysreg_s(SYS_ID_AA64MMFR2_EL1); 1020 if (cpuid_feature_extract_unsigned_field(mmfr2, 1021 ID_AA64MMFR2_E0PD_SHIFT)) 1022 return false; 1023 } 1024 1025 /* 1026 * Systems affected by Cavium erratum 24756 are incompatible 1027 * with KPTI. 1028 */ 1029 if (IS_ENABLED(CONFIG_CAVIUM_ERRATUM_27456)) { 1030 extern const struct midr_range cavium_erratum_27456_cpus[]; 1031 1032 if (is_midr_in_range_list(read_cpuid_id(), 1033 cavium_erratum_27456_cpus)) 1034 return false; 1035 } 1036 1037 return kaslr_offset() > 0; 1038 } 1039 1040 static bool __meltdown_safe = true; 1041 static int __kpti_forced; /* 0: not forced, >0: forced on, <0: forced off */ 1042 1043 static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry, 1044 int scope) 1045 { 1046 /* List of CPUs that are not vulnerable and don't need KPTI */ 1047 static const struct midr_range kpti_safe_list[] = { 1048 MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2), 1049 MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN), 1050 MIDR_ALL_VERSIONS(MIDR_BRAHMA_B53), 1051 MIDR_ALL_VERSIONS(MIDR_CORTEX_A35), 1052 MIDR_ALL_VERSIONS(MIDR_CORTEX_A53), 1053 MIDR_ALL_VERSIONS(MIDR_CORTEX_A55), 1054 MIDR_ALL_VERSIONS(MIDR_CORTEX_A57), 1055 MIDR_ALL_VERSIONS(MIDR_CORTEX_A72), 1056 MIDR_ALL_VERSIONS(MIDR_CORTEX_A73), 1057 MIDR_ALL_VERSIONS(MIDR_HISI_TSV110), 1058 MIDR_ALL_VERSIONS(MIDR_NVIDIA_CARMEL), 1059 { /* sentinel */ } 1060 }; 1061 char const *str = "kpti command line option"; 1062 bool meltdown_safe; 1063 1064 meltdown_safe = is_midr_in_range_list(read_cpuid_id(), kpti_safe_list); 1065 1066 /* Defer to CPU feature registers */ 1067 if (has_cpuid_feature(entry, scope)) 1068 meltdown_safe = true; 1069 1070 if (!meltdown_safe) 1071 __meltdown_safe = false; 1072 1073 /* 1074 * For reasons that aren't entirely clear, enabling KPTI on Cavium 1075 * ThunderX leads to apparent I-cache corruption of kernel text, which 1076 * ends as well as you might imagine. Don't even try. 1077 */ 1078 if (cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_27456)) { 1079 str = "ARM64_WORKAROUND_CAVIUM_27456"; 1080 __kpti_forced = -1; 1081 } 1082 1083 /* Useful for KASLR robustness */ 1084 if (kaslr_requires_kpti()) { 1085 if (!__kpti_forced) { 1086 str = "KASLR"; 1087 __kpti_forced = 1; 1088 } 1089 } 1090 1091 if (cpu_mitigations_off() && !__kpti_forced) { 1092 str = "mitigations=off"; 1093 __kpti_forced = -1; 1094 } 1095 1096 if (!IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0)) { 1097 pr_info_once("kernel page table isolation disabled by kernel configuration\n"); 1098 return false; 1099 } 1100 1101 /* Forced? */ 1102 if (__kpti_forced) { 1103 pr_info_once("kernel page table isolation forced %s by %s\n", 1104 __kpti_forced > 0 ? "ON" : "OFF", str); 1105 return __kpti_forced > 0; 1106 } 1107 1108 return !meltdown_safe; 1109 } 1110 1111 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0 1112 static void 1113 kpti_install_ng_mappings(const struct arm64_cpu_capabilities *__unused) 1114 { 1115 typedef void (kpti_remap_fn)(int, int, phys_addr_t); 1116 extern kpti_remap_fn idmap_kpti_install_ng_mappings; 1117 kpti_remap_fn *remap_fn; 1118 1119 int cpu = smp_processor_id(); 1120 1121 /* 1122 * We don't need to rewrite the page-tables if either we've done 1123 * it already or we have KASLR enabled and therefore have not 1124 * created any global mappings at all. 1125 */ 1126 if (arm64_use_ng_mappings) 1127 return; 1128 1129 remap_fn = (void *)__pa_symbol(idmap_kpti_install_ng_mappings); 1130 1131 cpu_install_idmap(); 1132 remap_fn(cpu, num_online_cpus(), __pa_symbol(swapper_pg_dir)); 1133 cpu_uninstall_idmap(); 1134 1135 if (!cpu) 1136 arm64_use_ng_mappings = true; 1137 1138 return; 1139 } 1140 #else 1141 static void 1142 kpti_install_ng_mappings(const struct arm64_cpu_capabilities *__unused) 1143 { 1144 } 1145 #endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */ 1146 1147 static int __init parse_kpti(char *str) 1148 { 1149 bool enabled; 1150 int ret = strtobool(str, &enabled); 1151 1152 if (ret) 1153 return ret; 1154 1155 __kpti_forced = enabled ? 1 : -1; 1156 return 0; 1157 } 1158 early_param("kpti", parse_kpti); 1159 1160 #ifdef CONFIG_ARM64_HW_AFDBM 1161 static inline void __cpu_enable_hw_dbm(void) 1162 { 1163 u64 tcr = read_sysreg(tcr_el1) | TCR_HD; 1164 1165 write_sysreg(tcr, tcr_el1); 1166 isb(); 1167 } 1168 1169 static bool cpu_has_broken_dbm(void) 1170 { 1171 /* List of CPUs which have broken DBM support. */ 1172 static const struct midr_range cpus[] = { 1173 #ifdef CONFIG_ARM64_ERRATUM_1024718 1174 MIDR_RANGE(MIDR_CORTEX_A55, 0, 0, 1, 0), // A55 r0p0 -r1p0 1175 #endif 1176 {}, 1177 }; 1178 1179 return is_midr_in_range_list(read_cpuid_id(), cpus); 1180 } 1181 1182 static bool cpu_can_use_dbm(const struct arm64_cpu_capabilities *cap) 1183 { 1184 return has_cpuid_feature(cap, SCOPE_LOCAL_CPU) && 1185 !cpu_has_broken_dbm(); 1186 } 1187 1188 static void cpu_enable_hw_dbm(struct arm64_cpu_capabilities const *cap) 1189 { 1190 if (cpu_can_use_dbm(cap)) 1191 __cpu_enable_hw_dbm(); 1192 } 1193 1194 static bool has_hw_dbm(const struct arm64_cpu_capabilities *cap, 1195 int __unused) 1196 { 1197 static bool detected = false; 1198 /* 1199 * DBM is a non-conflicting feature. i.e, the kernel can safely 1200 * run a mix of CPUs with and without the feature. So, we 1201 * unconditionally enable the capability to allow any late CPU 1202 * to use the feature. We only enable the control bits on the 1203 * CPU, if it actually supports. 1204 * 1205 * We have to make sure we print the "feature" detection only 1206 * when at least one CPU actually uses it. So check if this CPU 1207 * can actually use it and print the message exactly once. 1208 * 1209 * This is safe as all CPUs (including secondary CPUs - due to the 1210 * LOCAL_CPU scope - and the hotplugged CPUs - via verification) 1211 * goes through the "matches" check exactly once. Also if a CPU 1212 * matches the criteria, it is guaranteed that the CPU will turn 1213 * the DBM on, as the capability is unconditionally enabled. 1214 */ 1215 if (!detected && cpu_can_use_dbm(cap)) { 1216 detected = true; 1217 pr_info("detected: Hardware dirty bit management\n"); 1218 } 1219 1220 return true; 1221 } 1222 1223 #endif 1224 1225 #ifdef CONFIG_ARM64_VHE 1226 static bool runs_at_el2(const struct arm64_cpu_capabilities *entry, int __unused) 1227 { 1228 return is_kernel_in_hyp_mode(); 1229 } 1230 1231 static void cpu_copy_el2regs(const struct arm64_cpu_capabilities *__unused) 1232 { 1233 /* 1234 * Copy register values that aren't redirected by hardware. 1235 * 1236 * Before code patching, we only set tpidr_el1, all CPUs need to copy 1237 * this value to tpidr_el2 before we patch the code. Once we've done 1238 * that, freshly-onlined CPUs will set tpidr_el2, so we don't need to 1239 * do anything here. 1240 */ 1241 if (!alternative_is_applied(ARM64_HAS_VIRT_HOST_EXTN)) 1242 write_sysreg(read_sysreg(tpidr_el1), tpidr_el2); 1243 } 1244 #endif 1245 1246 static void cpu_has_fwb(const struct arm64_cpu_capabilities *__unused) 1247 { 1248 u64 val = read_sysreg_s(SYS_CLIDR_EL1); 1249 1250 /* Check that CLIDR_EL1.LOU{U,IS} are both 0 */ 1251 WARN_ON(val & (7 << 27 | 7 << 21)); 1252 } 1253 1254 #ifdef CONFIG_ARM64_SSBD 1255 static int ssbs_emulation_handler(struct pt_regs *regs, u32 instr) 1256 { 1257 if (user_mode(regs)) 1258 return 1; 1259 1260 if (instr & BIT(PSTATE_Imm_shift)) 1261 regs->pstate |= PSR_SSBS_BIT; 1262 else 1263 regs->pstate &= ~PSR_SSBS_BIT; 1264 1265 arm64_skip_faulting_instruction(regs, 4); 1266 return 0; 1267 } 1268 1269 static struct undef_hook ssbs_emulation_hook = { 1270 .instr_mask = ~(1U << PSTATE_Imm_shift), 1271 .instr_val = 0xd500401f | PSTATE_SSBS, 1272 .fn = ssbs_emulation_handler, 1273 }; 1274 1275 static void cpu_enable_ssbs(const struct arm64_cpu_capabilities *__unused) 1276 { 1277 static bool undef_hook_registered = false; 1278 static DEFINE_RAW_SPINLOCK(hook_lock); 1279 1280 raw_spin_lock(&hook_lock); 1281 if (!undef_hook_registered) { 1282 register_undef_hook(&ssbs_emulation_hook); 1283 undef_hook_registered = true; 1284 } 1285 raw_spin_unlock(&hook_lock); 1286 1287 if (arm64_get_ssbd_state() == ARM64_SSBD_FORCE_DISABLE) { 1288 sysreg_clear_set(sctlr_el1, 0, SCTLR_ELx_DSSBS); 1289 arm64_set_ssbd_mitigation(false); 1290 } else { 1291 arm64_set_ssbd_mitigation(true); 1292 } 1293 } 1294 #endif /* CONFIG_ARM64_SSBD */ 1295 1296 #ifdef CONFIG_ARM64_PAN 1297 static void cpu_enable_pan(const struct arm64_cpu_capabilities *__unused) 1298 { 1299 /* 1300 * We modify PSTATE. This won't work from irq context as the PSTATE 1301 * is discarded once we return from the exception. 1302 */ 1303 WARN_ON_ONCE(in_interrupt()); 1304 1305 sysreg_clear_set(sctlr_el1, SCTLR_EL1_SPAN, 0); 1306 asm(SET_PSTATE_PAN(1)); 1307 } 1308 #endif /* CONFIG_ARM64_PAN */ 1309 1310 #ifdef CONFIG_ARM64_RAS_EXTN 1311 static void cpu_clear_disr(const struct arm64_cpu_capabilities *__unused) 1312 { 1313 /* Firmware may have left a deferred SError in this register. */ 1314 write_sysreg_s(0, SYS_DISR_EL1); 1315 } 1316 #endif /* CONFIG_ARM64_RAS_EXTN */ 1317 1318 #ifdef CONFIG_ARM64_PTR_AUTH 1319 static void cpu_enable_address_auth(struct arm64_cpu_capabilities const *cap) 1320 { 1321 sysreg_clear_set(sctlr_el1, 0, SCTLR_ELx_ENIA | SCTLR_ELx_ENIB | 1322 SCTLR_ELx_ENDA | SCTLR_ELx_ENDB); 1323 } 1324 #endif /* CONFIG_ARM64_PTR_AUTH */ 1325 1326 #ifdef CONFIG_ARM64_E0PD 1327 static void cpu_enable_e0pd(struct arm64_cpu_capabilities const *cap) 1328 { 1329 if (this_cpu_has_cap(ARM64_HAS_E0PD)) 1330 sysreg_clear_set(tcr_el1, 0, TCR_E0PD1); 1331 } 1332 #endif /* CONFIG_ARM64_E0PD */ 1333 1334 #ifdef CONFIG_ARM64_PSEUDO_NMI 1335 static bool enable_pseudo_nmi; 1336 1337 static int __init early_enable_pseudo_nmi(char *p) 1338 { 1339 return strtobool(p, &enable_pseudo_nmi); 1340 } 1341 early_param("irqchip.gicv3_pseudo_nmi", early_enable_pseudo_nmi); 1342 1343 static bool can_use_gic_priorities(const struct arm64_cpu_capabilities *entry, 1344 int scope) 1345 { 1346 return enable_pseudo_nmi && has_useable_gicv3_cpuif(entry, scope); 1347 } 1348 #endif 1349 1350 static const struct arm64_cpu_capabilities arm64_features[] = { 1351 { 1352 .desc = "GIC system register CPU interface", 1353 .capability = ARM64_HAS_SYSREG_GIC_CPUIF, 1354 .type = ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE, 1355 .matches = has_useable_gicv3_cpuif, 1356 .sys_reg = SYS_ID_AA64PFR0_EL1, 1357 .field_pos = ID_AA64PFR0_GIC_SHIFT, 1358 .sign = FTR_UNSIGNED, 1359 .min_field_value = 1, 1360 }, 1361 #ifdef CONFIG_ARM64_PAN 1362 { 1363 .desc = "Privileged Access Never", 1364 .capability = ARM64_HAS_PAN, 1365 .type = ARM64_CPUCAP_SYSTEM_FEATURE, 1366 .matches = has_cpuid_feature, 1367 .sys_reg = SYS_ID_AA64MMFR1_EL1, 1368 .field_pos = ID_AA64MMFR1_PAN_SHIFT, 1369 .sign = FTR_UNSIGNED, 1370 .min_field_value = 1, 1371 .cpu_enable = cpu_enable_pan, 1372 }, 1373 #endif /* CONFIG_ARM64_PAN */ 1374 #ifdef CONFIG_ARM64_LSE_ATOMICS 1375 { 1376 .desc = "LSE atomic instructions", 1377 .capability = ARM64_HAS_LSE_ATOMICS, 1378 .type = ARM64_CPUCAP_SYSTEM_FEATURE, 1379 .matches = has_cpuid_feature, 1380 .sys_reg = SYS_ID_AA64ISAR0_EL1, 1381 .field_pos = ID_AA64ISAR0_ATOMICS_SHIFT, 1382 .sign = FTR_UNSIGNED, 1383 .min_field_value = 2, 1384 }, 1385 #endif /* CONFIG_ARM64_LSE_ATOMICS */ 1386 { 1387 .desc = "Software prefetching using PRFM", 1388 .capability = ARM64_HAS_NO_HW_PREFETCH, 1389 .type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE, 1390 .matches = has_no_hw_prefetch, 1391 }, 1392 #ifdef CONFIG_ARM64_UAO 1393 { 1394 .desc = "User Access Override", 1395 .capability = ARM64_HAS_UAO, 1396 .type = ARM64_CPUCAP_SYSTEM_FEATURE, 1397 .matches = has_cpuid_feature, 1398 .sys_reg = SYS_ID_AA64MMFR2_EL1, 1399 .field_pos = ID_AA64MMFR2_UAO_SHIFT, 1400 .min_field_value = 1, 1401 /* 1402 * We rely on stop_machine() calling uao_thread_switch() to set 1403 * UAO immediately after patching. 1404 */ 1405 }, 1406 #endif /* CONFIG_ARM64_UAO */ 1407 #ifdef CONFIG_ARM64_PAN 1408 { 1409 .capability = ARM64_ALT_PAN_NOT_UAO, 1410 .type = ARM64_CPUCAP_SYSTEM_FEATURE, 1411 .matches = cpufeature_pan_not_uao, 1412 }, 1413 #endif /* CONFIG_ARM64_PAN */ 1414 #ifdef CONFIG_ARM64_VHE 1415 { 1416 .desc = "Virtualization Host Extensions", 1417 .capability = ARM64_HAS_VIRT_HOST_EXTN, 1418 .type = ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE, 1419 .matches = runs_at_el2, 1420 .cpu_enable = cpu_copy_el2regs, 1421 }, 1422 #endif /* CONFIG_ARM64_VHE */ 1423 { 1424 .desc = "32-bit EL0 Support", 1425 .capability = ARM64_HAS_32BIT_EL0, 1426 .type = ARM64_CPUCAP_SYSTEM_FEATURE, 1427 .matches = has_cpuid_feature, 1428 .sys_reg = SYS_ID_AA64PFR0_EL1, 1429 .sign = FTR_UNSIGNED, 1430 .field_pos = ID_AA64PFR0_EL0_SHIFT, 1431 .min_field_value = ID_AA64PFR0_EL0_32BIT_64BIT, 1432 }, 1433 { 1434 .desc = "Kernel page table isolation (KPTI)", 1435 .capability = ARM64_UNMAP_KERNEL_AT_EL0, 1436 .type = ARM64_CPUCAP_BOOT_RESTRICTED_CPU_LOCAL_FEATURE, 1437 /* 1438 * The ID feature fields below are used to indicate that 1439 * the CPU doesn't need KPTI. See unmap_kernel_at_el0 for 1440 * more details. 1441 */ 1442 .sys_reg = SYS_ID_AA64PFR0_EL1, 1443 .field_pos = ID_AA64PFR0_CSV3_SHIFT, 1444 .min_field_value = 1, 1445 .matches = unmap_kernel_at_el0, 1446 .cpu_enable = kpti_install_ng_mappings, 1447 }, 1448 { 1449 /* FP/SIMD is not implemented */ 1450 .capability = ARM64_HAS_NO_FPSIMD, 1451 .type = ARM64_CPUCAP_BOOT_RESTRICTED_CPU_LOCAL_FEATURE, 1452 .min_field_value = 0, 1453 .matches = has_no_fpsimd, 1454 }, 1455 #ifdef CONFIG_ARM64_PMEM 1456 { 1457 .desc = "Data cache clean to Point of Persistence", 1458 .capability = ARM64_HAS_DCPOP, 1459 .type = ARM64_CPUCAP_SYSTEM_FEATURE, 1460 .matches = has_cpuid_feature, 1461 .sys_reg = SYS_ID_AA64ISAR1_EL1, 1462 .field_pos = ID_AA64ISAR1_DPB_SHIFT, 1463 .min_field_value = 1, 1464 }, 1465 { 1466 .desc = "Data cache clean to Point of Deep Persistence", 1467 .capability = ARM64_HAS_DCPODP, 1468 .type = ARM64_CPUCAP_SYSTEM_FEATURE, 1469 .matches = has_cpuid_feature, 1470 .sys_reg = SYS_ID_AA64ISAR1_EL1, 1471 .sign = FTR_UNSIGNED, 1472 .field_pos = ID_AA64ISAR1_DPB_SHIFT, 1473 .min_field_value = 2, 1474 }, 1475 #endif 1476 #ifdef CONFIG_ARM64_SVE 1477 { 1478 .desc = "Scalable Vector Extension", 1479 .type = ARM64_CPUCAP_SYSTEM_FEATURE, 1480 .capability = ARM64_SVE, 1481 .sys_reg = SYS_ID_AA64PFR0_EL1, 1482 .sign = FTR_UNSIGNED, 1483 .field_pos = ID_AA64PFR0_SVE_SHIFT, 1484 .min_field_value = ID_AA64PFR0_SVE, 1485 .matches = has_cpuid_feature, 1486 .cpu_enable = sve_kernel_enable, 1487 }, 1488 #endif /* CONFIG_ARM64_SVE */ 1489 #ifdef CONFIG_ARM64_RAS_EXTN 1490 { 1491 .desc = "RAS Extension Support", 1492 .capability = ARM64_HAS_RAS_EXTN, 1493 .type = ARM64_CPUCAP_SYSTEM_FEATURE, 1494 .matches = has_cpuid_feature, 1495 .sys_reg = SYS_ID_AA64PFR0_EL1, 1496 .sign = FTR_UNSIGNED, 1497 .field_pos = ID_AA64PFR0_RAS_SHIFT, 1498 .min_field_value = ID_AA64PFR0_RAS_V1, 1499 .cpu_enable = cpu_clear_disr, 1500 }, 1501 #endif /* CONFIG_ARM64_RAS_EXTN */ 1502 { 1503 .desc = "Data cache clean to the PoU not required for I/D coherence", 1504 .capability = ARM64_HAS_CACHE_IDC, 1505 .type = ARM64_CPUCAP_SYSTEM_FEATURE, 1506 .matches = has_cache_idc, 1507 .cpu_enable = cpu_emulate_effective_ctr, 1508 }, 1509 { 1510 .desc = "Instruction cache invalidation not required for I/D coherence", 1511 .capability = ARM64_HAS_CACHE_DIC, 1512 .type = ARM64_CPUCAP_SYSTEM_FEATURE, 1513 .matches = has_cache_dic, 1514 }, 1515 { 1516 .desc = "Stage-2 Force Write-Back", 1517 .type = ARM64_CPUCAP_SYSTEM_FEATURE, 1518 .capability = ARM64_HAS_STAGE2_FWB, 1519 .sys_reg = SYS_ID_AA64MMFR2_EL1, 1520 .sign = FTR_UNSIGNED, 1521 .field_pos = ID_AA64MMFR2_FWB_SHIFT, 1522 .min_field_value = 1, 1523 .matches = has_cpuid_feature, 1524 .cpu_enable = cpu_has_fwb, 1525 }, 1526 #ifdef CONFIG_ARM64_HW_AFDBM 1527 { 1528 /* 1529 * Since we turn this on always, we don't want the user to 1530 * think that the feature is available when it may not be. 1531 * So hide the description. 1532 * 1533 * .desc = "Hardware pagetable Dirty Bit Management", 1534 * 1535 */ 1536 .type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE, 1537 .capability = ARM64_HW_DBM, 1538 .sys_reg = SYS_ID_AA64MMFR1_EL1, 1539 .sign = FTR_UNSIGNED, 1540 .field_pos = ID_AA64MMFR1_HADBS_SHIFT, 1541 .min_field_value = 2, 1542 .matches = has_hw_dbm, 1543 .cpu_enable = cpu_enable_hw_dbm, 1544 }, 1545 #endif 1546 { 1547 .desc = "CRC32 instructions", 1548 .capability = ARM64_HAS_CRC32, 1549 .type = ARM64_CPUCAP_SYSTEM_FEATURE, 1550 .matches = has_cpuid_feature, 1551 .sys_reg = SYS_ID_AA64ISAR0_EL1, 1552 .field_pos = ID_AA64ISAR0_CRC32_SHIFT, 1553 .min_field_value = 1, 1554 }, 1555 #ifdef CONFIG_ARM64_SSBD 1556 { 1557 .desc = "Speculative Store Bypassing Safe (SSBS)", 1558 .capability = ARM64_SSBS, 1559 .type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE, 1560 .matches = has_cpuid_feature, 1561 .sys_reg = SYS_ID_AA64PFR1_EL1, 1562 .field_pos = ID_AA64PFR1_SSBS_SHIFT, 1563 .sign = FTR_UNSIGNED, 1564 .min_field_value = ID_AA64PFR1_SSBS_PSTATE_ONLY, 1565 .cpu_enable = cpu_enable_ssbs, 1566 }, 1567 #endif 1568 #ifdef CONFIG_ARM64_CNP 1569 { 1570 .desc = "Common not Private translations", 1571 .capability = ARM64_HAS_CNP, 1572 .type = ARM64_CPUCAP_SYSTEM_FEATURE, 1573 .matches = has_useable_cnp, 1574 .sys_reg = SYS_ID_AA64MMFR2_EL1, 1575 .sign = FTR_UNSIGNED, 1576 .field_pos = ID_AA64MMFR2_CNP_SHIFT, 1577 .min_field_value = 1, 1578 .cpu_enable = cpu_enable_cnp, 1579 }, 1580 #endif 1581 { 1582 .desc = "Speculation barrier (SB)", 1583 .capability = ARM64_HAS_SB, 1584 .type = ARM64_CPUCAP_SYSTEM_FEATURE, 1585 .matches = has_cpuid_feature, 1586 .sys_reg = SYS_ID_AA64ISAR1_EL1, 1587 .field_pos = ID_AA64ISAR1_SB_SHIFT, 1588 .sign = FTR_UNSIGNED, 1589 .min_field_value = 1, 1590 }, 1591 #ifdef CONFIG_ARM64_PTR_AUTH 1592 { 1593 .desc = "Address authentication (architected algorithm)", 1594 .capability = ARM64_HAS_ADDRESS_AUTH_ARCH, 1595 .type = ARM64_CPUCAP_SYSTEM_FEATURE, 1596 .sys_reg = SYS_ID_AA64ISAR1_EL1, 1597 .sign = FTR_UNSIGNED, 1598 .field_pos = ID_AA64ISAR1_APA_SHIFT, 1599 .min_field_value = ID_AA64ISAR1_APA_ARCHITECTED, 1600 .matches = has_cpuid_feature, 1601 .cpu_enable = cpu_enable_address_auth, 1602 }, 1603 { 1604 .desc = "Address authentication (IMP DEF algorithm)", 1605 .capability = ARM64_HAS_ADDRESS_AUTH_IMP_DEF, 1606 .type = ARM64_CPUCAP_SYSTEM_FEATURE, 1607 .sys_reg = SYS_ID_AA64ISAR1_EL1, 1608 .sign = FTR_UNSIGNED, 1609 .field_pos = ID_AA64ISAR1_API_SHIFT, 1610 .min_field_value = ID_AA64ISAR1_API_IMP_DEF, 1611 .matches = has_cpuid_feature, 1612 .cpu_enable = cpu_enable_address_auth, 1613 }, 1614 { 1615 .desc = "Generic authentication (architected algorithm)", 1616 .capability = ARM64_HAS_GENERIC_AUTH_ARCH, 1617 .type = ARM64_CPUCAP_SYSTEM_FEATURE, 1618 .sys_reg = SYS_ID_AA64ISAR1_EL1, 1619 .sign = FTR_UNSIGNED, 1620 .field_pos = ID_AA64ISAR1_GPA_SHIFT, 1621 .min_field_value = ID_AA64ISAR1_GPA_ARCHITECTED, 1622 .matches = has_cpuid_feature, 1623 }, 1624 { 1625 .desc = "Generic authentication (IMP DEF algorithm)", 1626 .capability = ARM64_HAS_GENERIC_AUTH_IMP_DEF, 1627 .type = ARM64_CPUCAP_SYSTEM_FEATURE, 1628 .sys_reg = SYS_ID_AA64ISAR1_EL1, 1629 .sign = FTR_UNSIGNED, 1630 .field_pos = ID_AA64ISAR1_GPI_SHIFT, 1631 .min_field_value = ID_AA64ISAR1_GPI_IMP_DEF, 1632 .matches = has_cpuid_feature, 1633 }, 1634 #endif /* CONFIG_ARM64_PTR_AUTH */ 1635 #ifdef CONFIG_ARM64_PSEUDO_NMI 1636 { 1637 /* 1638 * Depends on having GICv3 1639 */ 1640 .desc = "IRQ priority masking", 1641 .capability = ARM64_HAS_IRQ_PRIO_MASKING, 1642 .type = ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE, 1643 .matches = can_use_gic_priorities, 1644 .sys_reg = SYS_ID_AA64PFR0_EL1, 1645 .field_pos = ID_AA64PFR0_GIC_SHIFT, 1646 .sign = FTR_UNSIGNED, 1647 .min_field_value = 1, 1648 }, 1649 #endif 1650 #ifdef CONFIG_ARM64_E0PD 1651 { 1652 .desc = "E0PD", 1653 .capability = ARM64_HAS_E0PD, 1654 .type = ARM64_CPUCAP_SYSTEM_FEATURE, 1655 .sys_reg = SYS_ID_AA64MMFR2_EL1, 1656 .sign = FTR_UNSIGNED, 1657 .field_pos = ID_AA64MMFR2_E0PD_SHIFT, 1658 .matches = has_cpuid_feature, 1659 .min_field_value = 1, 1660 .cpu_enable = cpu_enable_e0pd, 1661 }, 1662 #endif 1663 #ifdef CONFIG_ARCH_RANDOM 1664 { 1665 .desc = "Random Number Generator", 1666 .capability = ARM64_HAS_RNG, 1667 .type = ARM64_CPUCAP_SYSTEM_FEATURE, 1668 .matches = has_cpuid_feature, 1669 .sys_reg = SYS_ID_AA64ISAR0_EL1, 1670 .field_pos = ID_AA64ISAR0_RNDR_SHIFT, 1671 .sign = FTR_UNSIGNED, 1672 .min_field_value = 1, 1673 }, 1674 #endif 1675 {}, 1676 }; 1677 1678 #define HWCAP_CPUID_MATCH(reg, field, s, min_value) \ 1679 .matches = has_cpuid_feature, \ 1680 .sys_reg = reg, \ 1681 .field_pos = field, \ 1682 .sign = s, \ 1683 .min_field_value = min_value, 1684 1685 #define __HWCAP_CAP(name, cap_type, cap) \ 1686 .desc = name, \ 1687 .type = ARM64_CPUCAP_SYSTEM_FEATURE, \ 1688 .hwcap_type = cap_type, \ 1689 .hwcap = cap, \ 1690 1691 #define HWCAP_CAP(reg, field, s, min_value, cap_type, cap) \ 1692 { \ 1693 __HWCAP_CAP(#cap, cap_type, cap) \ 1694 HWCAP_CPUID_MATCH(reg, field, s, min_value) \ 1695 } 1696 1697 #define HWCAP_MULTI_CAP(list, cap_type, cap) \ 1698 { \ 1699 __HWCAP_CAP(#cap, cap_type, cap) \ 1700 .matches = cpucap_multi_entry_cap_matches, \ 1701 .match_list = list, \ 1702 } 1703 1704 #define HWCAP_CAP_MATCH(match, cap_type, cap) \ 1705 { \ 1706 __HWCAP_CAP(#cap, cap_type, cap) \ 1707 .matches = match, \ 1708 } 1709 1710 #ifdef CONFIG_ARM64_PTR_AUTH 1711 static const struct arm64_cpu_capabilities ptr_auth_hwcap_addr_matches[] = { 1712 { 1713 HWCAP_CPUID_MATCH(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_APA_SHIFT, 1714 FTR_UNSIGNED, ID_AA64ISAR1_APA_ARCHITECTED) 1715 }, 1716 { 1717 HWCAP_CPUID_MATCH(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_API_SHIFT, 1718 FTR_UNSIGNED, ID_AA64ISAR1_API_IMP_DEF) 1719 }, 1720 {}, 1721 }; 1722 1723 static const struct arm64_cpu_capabilities ptr_auth_hwcap_gen_matches[] = { 1724 { 1725 HWCAP_CPUID_MATCH(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_GPA_SHIFT, 1726 FTR_UNSIGNED, ID_AA64ISAR1_GPA_ARCHITECTED) 1727 }, 1728 { 1729 HWCAP_CPUID_MATCH(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_GPI_SHIFT, 1730 FTR_UNSIGNED, ID_AA64ISAR1_GPI_IMP_DEF) 1731 }, 1732 {}, 1733 }; 1734 #endif 1735 1736 static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = { 1737 HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_AES_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, KERNEL_HWCAP_PMULL), 1738 HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_AES_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_AES), 1739 HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA1_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_SHA1), 1740 HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA2_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_SHA2), 1741 HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA2_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, KERNEL_HWCAP_SHA512), 1742 HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_CRC32_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_CRC32), 1743 HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_ATOMICS_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, KERNEL_HWCAP_ATOMICS), 1744 HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_RDM_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_ASIMDRDM), 1745 HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA3_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_SHA3), 1746 HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SM3_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_SM3), 1747 HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SM4_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_SM4), 1748 HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_DP_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_ASIMDDP), 1749 HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_FHM_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_ASIMDFHM), 1750 HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_TS_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_FLAGM), 1751 HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_TS_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, KERNEL_HWCAP_FLAGM2), 1752 HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_RNDR_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_RNG), 1753 HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_FP_SHIFT, FTR_SIGNED, 0, CAP_HWCAP, KERNEL_HWCAP_FP), 1754 HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_FP_SHIFT, FTR_SIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_FPHP), 1755 HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_ASIMD_SHIFT, FTR_SIGNED, 0, CAP_HWCAP, KERNEL_HWCAP_ASIMD), 1756 HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_ASIMD_SHIFT, FTR_SIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_ASIMDHP), 1757 HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_DIT_SHIFT, FTR_SIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_DIT), 1758 HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_DPB_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_DCPOP), 1759 HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_DPB_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, KERNEL_HWCAP_DCPODP), 1760 HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_JSCVT_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_JSCVT), 1761 HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_FCMA_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_FCMA), 1762 HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_LRCPC_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_LRCPC), 1763 HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_LRCPC_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, KERNEL_HWCAP_ILRCPC), 1764 HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_FRINTTS_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_FRINT), 1765 HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_SB_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_SB), 1766 HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_BF16_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_BF16), 1767 HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_DGH_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_DGH), 1768 HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_I8MM_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_I8MM), 1769 HWCAP_CAP(SYS_ID_AA64MMFR2_EL1, ID_AA64MMFR2_AT_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_USCAT), 1770 #ifdef CONFIG_ARM64_SVE 1771 HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_SVE_SHIFT, FTR_UNSIGNED, ID_AA64PFR0_SVE, CAP_HWCAP, KERNEL_HWCAP_SVE), 1772 HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_SVEVER_SHIFT, FTR_UNSIGNED, ID_AA64ZFR0_SVEVER_SVE2, CAP_HWCAP, KERNEL_HWCAP_SVE2), 1773 HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_AES_SHIFT, FTR_UNSIGNED, ID_AA64ZFR0_AES, CAP_HWCAP, KERNEL_HWCAP_SVEAES), 1774 HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_AES_SHIFT, FTR_UNSIGNED, ID_AA64ZFR0_AES_PMULL, CAP_HWCAP, KERNEL_HWCAP_SVEPMULL), 1775 HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_BITPERM_SHIFT, FTR_UNSIGNED, ID_AA64ZFR0_BITPERM, CAP_HWCAP, KERNEL_HWCAP_SVEBITPERM), 1776 HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_BF16_SHIFT, FTR_UNSIGNED, ID_AA64ZFR0_BF16, CAP_HWCAP, KERNEL_HWCAP_SVEBF16), 1777 HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_SHA3_SHIFT, FTR_UNSIGNED, ID_AA64ZFR0_SHA3, CAP_HWCAP, KERNEL_HWCAP_SVESHA3), 1778 HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_SM4_SHIFT, FTR_UNSIGNED, ID_AA64ZFR0_SM4, CAP_HWCAP, KERNEL_HWCAP_SVESM4), 1779 HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_I8MM_SHIFT, FTR_UNSIGNED, ID_AA64ZFR0_I8MM, CAP_HWCAP, KERNEL_HWCAP_SVEI8MM), 1780 HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_F32MM_SHIFT, FTR_UNSIGNED, ID_AA64ZFR0_F32MM, CAP_HWCAP, KERNEL_HWCAP_SVEF32MM), 1781 HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_F64MM_SHIFT, FTR_UNSIGNED, ID_AA64ZFR0_F64MM, CAP_HWCAP, KERNEL_HWCAP_SVEF64MM), 1782 #endif 1783 HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_SSBS_SHIFT, FTR_UNSIGNED, ID_AA64PFR1_SSBS_PSTATE_INSNS, CAP_HWCAP, KERNEL_HWCAP_SSBS), 1784 #ifdef CONFIG_ARM64_PTR_AUTH 1785 HWCAP_MULTI_CAP(ptr_auth_hwcap_addr_matches, CAP_HWCAP, KERNEL_HWCAP_PACA), 1786 HWCAP_MULTI_CAP(ptr_auth_hwcap_gen_matches, CAP_HWCAP, KERNEL_HWCAP_PACG), 1787 #endif 1788 {}, 1789 }; 1790 1791 #ifdef CONFIG_COMPAT 1792 static bool compat_has_neon(const struct arm64_cpu_capabilities *cap, int scope) 1793 { 1794 /* 1795 * Check that all of MVFR1_EL1.{SIMDSP, SIMDInt, SIMDLS} are available, 1796 * in line with that of arm32 as in vfp_init(). We make sure that the 1797 * check is future proof, by making sure value is non-zero. 1798 */ 1799 u32 mvfr1; 1800 1801 WARN_ON(scope == SCOPE_LOCAL_CPU && preemptible()); 1802 if (scope == SCOPE_SYSTEM) 1803 mvfr1 = read_sanitised_ftr_reg(SYS_MVFR1_EL1); 1804 else 1805 mvfr1 = read_sysreg_s(SYS_MVFR1_EL1); 1806 1807 return cpuid_feature_extract_unsigned_field(mvfr1, MVFR1_SIMDSP_SHIFT) && 1808 cpuid_feature_extract_unsigned_field(mvfr1, MVFR1_SIMDINT_SHIFT) && 1809 cpuid_feature_extract_unsigned_field(mvfr1, MVFR1_SIMDLS_SHIFT); 1810 } 1811 #endif 1812 1813 static const struct arm64_cpu_capabilities compat_elf_hwcaps[] = { 1814 #ifdef CONFIG_COMPAT 1815 HWCAP_CAP_MATCH(compat_has_neon, CAP_COMPAT_HWCAP, COMPAT_HWCAP_NEON), 1816 HWCAP_CAP(SYS_MVFR1_EL1, MVFR1_SIMDFMAC_SHIFT, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP, COMPAT_HWCAP_VFPv4), 1817 /* Arm v8 mandates MVFR0.FPDP == {0, 2}. So, piggy back on this for the presence of VFP support */ 1818 HWCAP_CAP(SYS_MVFR0_EL1, MVFR0_FPDP_SHIFT, FTR_UNSIGNED, 2, CAP_COMPAT_HWCAP, COMPAT_HWCAP_VFP), 1819 HWCAP_CAP(SYS_MVFR0_EL1, MVFR0_FPDP_SHIFT, FTR_UNSIGNED, 2, CAP_COMPAT_HWCAP, COMPAT_HWCAP_VFPv3), 1820 HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_AES_SHIFT, FTR_UNSIGNED, 2, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_PMULL), 1821 HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_AES_SHIFT, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_AES), 1822 HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_SHA1_SHIFT, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_SHA1), 1823 HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_SHA2_SHIFT, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_SHA2), 1824 HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_CRC32_SHIFT, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_CRC32), 1825 #endif 1826 {}, 1827 }; 1828 1829 static void __init cap_set_elf_hwcap(const struct arm64_cpu_capabilities *cap) 1830 { 1831 switch (cap->hwcap_type) { 1832 case CAP_HWCAP: 1833 cpu_set_feature(cap->hwcap); 1834 break; 1835 #ifdef CONFIG_COMPAT 1836 case CAP_COMPAT_HWCAP: 1837 compat_elf_hwcap |= (u32)cap->hwcap; 1838 break; 1839 case CAP_COMPAT_HWCAP2: 1840 compat_elf_hwcap2 |= (u32)cap->hwcap; 1841 break; 1842 #endif 1843 default: 1844 WARN_ON(1); 1845 break; 1846 } 1847 } 1848 1849 /* Check if we have a particular HWCAP enabled */ 1850 static bool cpus_have_elf_hwcap(const struct arm64_cpu_capabilities *cap) 1851 { 1852 bool rc; 1853 1854 switch (cap->hwcap_type) { 1855 case CAP_HWCAP: 1856 rc = cpu_have_feature(cap->hwcap); 1857 break; 1858 #ifdef CONFIG_COMPAT 1859 case CAP_COMPAT_HWCAP: 1860 rc = (compat_elf_hwcap & (u32)cap->hwcap) != 0; 1861 break; 1862 case CAP_COMPAT_HWCAP2: 1863 rc = (compat_elf_hwcap2 & (u32)cap->hwcap) != 0; 1864 break; 1865 #endif 1866 default: 1867 WARN_ON(1); 1868 rc = false; 1869 } 1870 1871 return rc; 1872 } 1873 1874 static void __init setup_elf_hwcaps(const struct arm64_cpu_capabilities *hwcaps) 1875 { 1876 /* We support emulation of accesses to CPU ID feature registers */ 1877 cpu_set_named_feature(CPUID); 1878 for (; hwcaps->matches; hwcaps++) 1879 if (hwcaps->matches(hwcaps, cpucap_default_scope(hwcaps))) 1880 cap_set_elf_hwcap(hwcaps); 1881 } 1882 1883 static void update_cpu_capabilities(u16 scope_mask) 1884 { 1885 int i; 1886 const struct arm64_cpu_capabilities *caps; 1887 1888 scope_mask &= ARM64_CPUCAP_SCOPE_MASK; 1889 for (i = 0; i < ARM64_NCAPS; i++) { 1890 caps = cpu_hwcaps_ptrs[i]; 1891 if (!caps || !(caps->type & scope_mask) || 1892 cpus_have_cap(caps->capability) || 1893 !caps->matches(caps, cpucap_default_scope(caps))) 1894 continue; 1895 1896 if (caps->desc) 1897 pr_info("detected: %s\n", caps->desc); 1898 cpus_set_cap(caps->capability); 1899 1900 if ((scope_mask & SCOPE_BOOT_CPU) && (caps->type & SCOPE_BOOT_CPU)) 1901 set_bit(caps->capability, boot_capabilities); 1902 } 1903 } 1904 1905 /* 1906 * Enable all the available capabilities on this CPU. The capabilities 1907 * with BOOT_CPU scope are handled separately and hence skipped here. 1908 */ 1909 static int cpu_enable_non_boot_scope_capabilities(void *__unused) 1910 { 1911 int i; 1912 u16 non_boot_scope = SCOPE_ALL & ~SCOPE_BOOT_CPU; 1913 1914 for_each_available_cap(i) { 1915 const struct arm64_cpu_capabilities *cap = cpu_hwcaps_ptrs[i]; 1916 1917 if (WARN_ON(!cap)) 1918 continue; 1919 1920 if (!(cap->type & non_boot_scope)) 1921 continue; 1922 1923 if (cap->cpu_enable) 1924 cap->cpu_enable(cap); 1925 } 1926 return 0; 1927 } 1928 1929 /* 1930 * Run through the enabled capabilities and enable() it on all active 1931 * CPUs 1932 */ 1933 static void __init enable_cpu_capabilities(u16 scope_mask) 1934 { 1935 int i; 1936 const struct arm64_cpu_capabilities *caps; 1937 bool boot_scope; 1938 1939 scope_mask &= ARM64_CPUCAP_SCOPE_MASK; 1940 boot_scope = !!(scope_mask & SCOPE_BOOT_CPU); 1941 1942 for (i = 0; i < ARM64_NCAPS; i++) { 1943 unsigned int num; 1944 1945 caps = cpu_hwcaps_ptrs[i]; 1946 if (!caps || !(caps->type & scope_mask)) 1947 continue; 1948 num = caps->capability; 1949 if (!cpus_have_cap(num)) 1950 continue; 1951 1952 /* Ensure cpus_have_const_cap(num) works */ 1953 static_branch_enable(&cpu_hwcap_keys[num]); 1954 1955 if (boot_scope && caps->cpu_enable) 1956 /* 1957 * Capabilities with SCOPE_BOOT_CPU scope are finalised 1958 * before any secondary CPU boots. Thus, each secondary 1959 * will enable the capability as appropriate via 1960 * check_local_cpu_capabilities(). The only exception is 1961 * the boot CPU, for which the capability must be 1962 * enabled here. This approach avoids costly 1963 * stop_machine() calls for this case. 1964 */ 1965 caps->cpu_enable(caps); 1966 } 1967 1968 /* 1969 * For all non-boot scope capabilities, use stop_machine() 1970 * as it schedules the work allowing us to modify PSTATE, 1971 * instead of on_each_cpu() which uses an IPI, giving us a 1972 * PSTATE that disappears when we return. 1973 */ 1974 if (!boot_scope) 1975 stop_machine(cpu_enable_non_boot_scope_capabilities, 1976 NULL, cpu_online_mask); 1977 } 1978 1979 /* 1980 * Run through the list of capabilities to check for conflicts. 1981 * If the system has already detected a capability, take necessary 1982 * action on this CPU. 1983 * 1984 * Returns "false" on conflicts. 1985 */ 1986 static bool verify_local_cpu_caps(u16 scope_mask) 1987 { 1988 int i; 1989 bool cpu_has_cap, system_has_cap; 1990 const struct arm64_cpu_capabilities *caps; 1991 1992 scope_mask &= ARM64_CPUCAP_SCOPE_MASK; 1993 1994 for (i = 0; i < ARM64_NCAPS; i++) { 1995 caps = cpu_hwcaps_ptrs[i]; 1996 if (!caps || !(caps->type & scope_mask)) 1997 continue; 1998 1999 cpu_has_cap = caps->matches(caps, SCOPE_LOCAL_CPU); 2000 system_has_cap = cpus_have_cap(caps->capability); 2001 2002 if (system_has_cap) { 2003 /* 2004 * Check if the new CPU misses an advertised feature, 2005 * which is not safe to miss. 2006 */ 2007 if (!cpu_has_cap && !cpucap_late_cpu_optional(caps)) 2008 break; 2009 /* 2010 * We have to issue cpu_enable() irrespective of 2011 * whether the CPU has it or not, as it is enabeld 2012 * system wide. It is upto the call back to take 2013 * appropriate action on this CPU. 2014 */ 2015 if (caps->cpu_enable) 2016 caps->cpu_enable(caps); 2017 } else { 2018 /* 2019 * Check if the CPU has this capability if it isn't 2020 * safe to have when the system doesn't. 2021 */ 2022 if (cpu_has_cap && !cpucap_late_cpu_permitted(caps)) 2023 break; 2024 } 2025 } 2026 2027 if (i < ARM64_NCAPS) { 2028 pr_crit("CPU%d: Detected conflict for capability %d (%s), System: %d, CPU: %d\n", 2029 smp_processor_id(), caps->capability, 2030 caps->desc, system_has_cap, cpu_has_cap); 2031 return false; 2032 } 2033 2034 return true; 2035 } 2036 2037 /* 2038 * Check for CPU features that are used in early boot 2039 * based on the Boot CPU value. 2040 */ 2041 static void check_early_cpu_features(void) 2042 { 2043 verify_cpu_asid_bits(); 2044 /* 2045 * Early features are used by the kernel already. If there 2046 * is a conflict, we cannot proceed further. 2047 */ 2048 if (!verify_local_cpu_caps(SCOPE_BOOT_CPU)) 2049 cpu_panic_kernel(); 2050 } 2051 2052 static void 2053 verify_local_elf_hwcaps(const struct arm64_cpu_capabilities *caps) 2054 { 2055 2056 for (; caps->matches; caps++) 2057 if (cpus_have_elf_hwcap(caps) && !caps->matches(caps, SCOPE_LOCAL_CPU)) { 2058 pr_crit("CPU%d: missing HWCAP: %s\n", 2059 smp_processor_id(), caps->desc); 2060 cpu_die_early(); 2061 } 2062 } 2063 2064 static void verify_sve_features(void) 2065 { 2066 u64 safe_zcr = read_sanitised_ftr_reg(SYS_ZCR_EL1); 2067 u64 zcr = read_zcr_features(); 2068 2069 unsigned int safe_len = safe_zcr & ZCR_ELx_LEN_MASK; 2070 unsigned int len = zcr & ZCR_ELx_LEN_MASK; 2071 2072 if (len < safe_len || sve_verify_vq_map()) { 2073 pr_crit("CPU%d: SVE: vector length support mismatch\n", 2074 smp_processor_id()); 2075 cpu_die_early(); 2076 } 2077 2078 /* Add checks on other ZCR bits here if necessary */ 2079 } 2080 2081 2082 /* 2083 * Run through the enabled system capabilities and enable() it on this CPU. 2084 * The capabilities were decided based on the available CPUs at the boot time. 2085 * Any new CPU should match the system wide status of the capability. If the 2086 * new CPU doesn't have a capability which the system now has enabled, we 2087 * cannot do anything to fix it up and could cause unexpected failures. So 2088 * we park the CPU. 2089 */ 2090 static void verify_local_cpu_capabilities(void) 2091 { 2092 /* 2093 * The capabilities with SCOPE_BOOT_CPU are checked from 2094 * check_early_cpu_features(), as they need to be verified 2095 * on all secondary CPUs. 2096 */ 2097 if (!verify_local_cpu_caps(SCOPE_ALL & ~SCOPE_BOOT_CPU)) 2098 cpu_die_early(); 2099 2100 verify_local_elf_hwcaps(arm64_elf_hwcaps); 2101 2102 if (system_supports_32bit_el0()) 2103 verify_local_elf_hwcaps(compat_elf_hwcaps); 2104 2105 if (system_supports_sve()) 2106 verify_sve_features(); 2107 } 2108 2109 void check_local_cpu_capabilities(void) 2110 { 2111 /* 2112 * All secondary CPUs should conform to the early CPU features 2113 * in use by the kernel based on boot CPU. 2114 */ 2115 check_early_cpu_features(); 2116 2117 /* 2118 * If we haven't finalised the system capabilities, this CPU gets 2119 * a chance to update the errata work arounds and local features. 2120 * Otherwise, this CPU should verify that it has all the system 2121 * advertised capabilities. 2122 */ 2123 if (!system_capabilities_finalized()) 2124 update_cpu_capabilities(SCOPE_LOCAL_CPU); 2125 else 2126 verify_local_cpu_capabilities(); 2127 } 2128 2129 static void __init setup_boot_cpu_capabilities(void) 2130 { 2131 /* Detect capabilities with either SCOPE_BOOT_CPU or SCOPE_LOCAL_CPU */ 2132 update_cpu_capabilities(SCOPE_BOOT_CPU | SCOPE_LOCAL_CPU); 2133 /* Enable the SCOPE_BOOT_CPU capabilities alone right away */ 2134 enable_cpu_capabilities(SCOPE_BOOT_CPU); 2135 } 2136 2137 bool this_cpu_has_cap(unsigned int n) 2138 { 2139 if (!WARN_ON(preemptible()) && n < ARM64_NCAPS) { 2140 const struct arm64_cpu_capabilities *cap = cpu_hwcaps_ptrs[n]; 2141 2142 if (cap) 2143 return cap->matches(cap, SCOPE_LOCAL_CPU); 2144 } 2145 2146 return false; 2147 } 2148 2149 void cpu_set_feature(unsigned int num) 2150 { 2151 WARN_ON(num >= MAX_CPU_FEATURES); 2152 elf_hwcap |= BIT(num); 2153 } 2154 EXPORT_SYMBOL_GPL(cpu_set_feature); 2155 2156 bool cpu_have_feature(unsigned int num) 2157 { 2158 WARN_ON(num >= MAX_CPU_FEATURES); 2159 return elf_hwcap & BIT(num); 2160 } 2161 EXPORT_SYMBOL_GPL(cpu_have_feature); 2162 2163 unsigned long cpu_get_elf_hwcap(void) 2164 { 2165 /* 2166 * We currently only populate the first 32 bits of AT_HWCAP. Please 2167 * note that for userspace compatibility we guarantee that bits 62 2168 * and 63 will always be returned as 0. 2169 */ 2170 return lower_32_bits(elf_hwcap); 2171 } 2172 2173 unsigned long cpu_get_elf_hwcap2(void) 2174 { 2175 return upper_32_bits(elf_hwcap); 2176 } 2177 2178 static void __init setup_system_capabilities(void) 2179 { 2180 /* 2181 * We have finalised the system-wide safe feature 2182 * registers, finalise the capabilities that depend 2183 * on it. Also enable all the available capabilities, 2184 * that are not enabled already. 2185 */ 2186 update_cpu_capabilities(SCOPE_SYSTEM); 2187 enable_cpu_capabilities(SCOPE_ALL & ~SCOPE_BOOT_CPU); 2188 } 2189 2190 void __init setup_cpu_features(void) 2191 { 2192 u32 cwg; 2193 2194 setup_system_capabilities(); 2195 setup_elf_hwcaps(arm64_elf_hwcaps); 2196 2197 if (system_supports_32bit_el0()) 2198 setup_elf_hwcaps(compat_elf_hwcaps); 2199 2200 if (system_uses_ttbr0_pan()) 2201 pr_info("emulated: Privileged Access Never (PAN) using TTBR0_EL1 switching\n"); 2202 2203 sve_setup(); 2204 minsigstksz_setup(); 2205 2206 /* Advertise that we have computed the system capabilities */ 2207 finalize_system_capabilities(); 2208 2209 /* 2210 * Check for sane CTR_EL0.CWG value. 2211 */ 2212 cwg = cache_type_cwg(); 2213 if (!cwg) 2214 pr_warn("No Cache Writeback Granule information, assuming %d\n", 2215 ARCH_DMA_MINALIGN); 2216 } 2217 2218 static bool __maybe_unused 2219 cpufeature_pan_not_uao(const struct arm64_cpu_capabilities *entry, int __unused) 2220 { 2221 return (cpus_have_const_cap(ARM64_HAS_PAN) && !cpus_have_const_cap(ARM64_HAS_UAO)); 2222 } 2223 2224 static void __maybe_unused cpu_enable_cnp(struct arm64_cpu_capabilities const *cap) 2225 { 2226 cpu_replace_ttbr1(lm_alias(swapper_pg_dir)); 2227 } 2228 2229 /* 2230 * We emulate only the following system register space. 2231 * Op0 = 0x3, CRn = 0x0, Op1 = 0x0, CRm = [0, 4 - 7] 2232 * See Table C5-6 System instruction encodings for System register accesses, 2233 * ARMv8 ARM(ARM DDI 0487A.f) for more details. 2234 */ 2235 static inline bool __attribute_const__ is_emulated(u32 id) 2236 { 2237 return (sys_reg_Op0(id) == 0x3 && 2238 sys_reg_CRn(id) == 0x0 && 2239 sys_reg_Op1(id) == 0x0 && 2240 (sys_reg_CRm(id) == 0 || 2241 ((sys_reg_CRm(id) >= 4) && (sys_reg_CRm(id) <= 7)))); 2242 } 2243 2244 /* 2245 * With CRm == 0, reg should be one of : 2246 * MIDR_EL1, MPIDR_EL1 or REVIDR_EL1. 2247 */ 2248 static inline int emulate_id_reg(u32 id, u64 *valp) 2249 { 2250 switch (id) { 2251 case SYS_MIDR_EL1: 2252 *valp = read_cpuid_id(); 2253 break; 2254 case SYS_MPIDR_EL1: 2255 *valp = SYS_MPIDR_SAFE_VAL; 2256 break; 2257 case SYS_REVIDR_EL1: 2258 /* IMPLEMENTATION DEFINED values are emulated with 0 */ 2259 *valp = 0; 2260 break; 2261 default: 2262 return -EINVAL; 2263 } 2264 2265 return 0; 2266 } 2267 2268 static int emulate_sys_reg(u32 id, u64 *valp) 2269 { 2270 struct arm64_ftr_reg *regp; 2271 2272 if (!is_emulated(id)) 2273 return -EINVAL; 2274 2275 if (sys_reg_CRm(id) == 0) 2276 return emulate_id_reg(id, valp); 2277 2278 regp = get_arm64_ftr_reg(id); 2279 if (regp) 2280 *valp = arm64_ftr_reg_user_value(regp); 2281 else 2282 /* 2283 * The untracked registers are either IMPLEMENTATION DEFINED 2284 * (e.g, ID_AFR0_EL1) or reserved RAZ. 2285 */ 2286 *valp = 0; 2287 return 0; 2288 } 2289 2290 int do_emulate_mrs(struct pt_regs *regs, u32 sys_reg, u32 rt) 2291 { 2292 int rc; 2293 u64 val; 2294 2295 rc = emulate_sys_reg(sys_reg, &val); 2296 if (!rc) { 2297 pt_regs_write_reg(regs, rt, val); 2298 arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE); 2299 } 2300 return rc; 2301 } 2302 2303 static int emulate_mrs(struct pt_regs *regs, u32 insn) 2304 { 2305 u32 sys_reg, rt; 2306 2307 /* 2308 * sys_reg values are defined as used in mrs/msr instruction. 2309 * shift the imm value to get the encoding. 2310 */ 2311 sys_reg = (u32)aarch64_insn_decode_immediate(AARCH64_INSN_IMM_16, insn) << 5; 2312 rt = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RT, insn); 2313 return do_emulate_mrs(regs, sys_reg, rt); 2314 } 2315 2316 static struct undef_hook mrs_hook = { 2317 .instr_mask = 0xfff00000, 2318 .instr_val = 0xd5300000, 2319 .pstate_mask = PSR_AA32_MODE_MASK, 2320 .pstate_val = PSR_MODE_EL0t, 2321 .fn = emulate_mrs, 2322 }; 2323 2324 static int __init enable_mrs_emulation(void) 2325 { 2326 register_undef_hook(&mrs_hook); 2327 return 0; 2328 } 2329 2330 core_initcall(enable_mrs_emulation); 2331 2332 ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, 2333 char *buf) 2334 { 2335 if (__meltdown_safe) 2336 return sprintf(buf, "Not affected\n"); 2337 2338 if (arm64_kernel_unmapped_at_el0()) 2339 return sprintf(buf, "Mitigation: PTI\n"); 2340 2341 return sprintf(buf, "Vulnerable\n"); 2342 } 2343