1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Contains CPU specific errata definitions 4 * 5 * Copyright (C) 2014 ARM Ltd. 6 */ 7 8 #include <linux/arm-smccc.h> 9 #include <linux/types.h> 10 #include <linux/cpu.h> 11 #include <asm/cpu.h> 12 #include <asm/cputype.h> 13 #include <asm/cpufeature.h> 14 #include <asm/kvm_asm.h> 15 #include <asm/smp_plat.h> 16 17 static bool __maybe_unused 18 __is_affected_midr_range(const struct arm64_cpu_capabilities *entry, 19 u32 midr, u32 revidr) 20 { 21 const struct arm64_midr_revidr *fix; 22 if (!is_midr_in_range(&entry->midr_range)) 23 return false; 24 25 midr &= MIDR_REVISION_MASK | MIDR_VARIANT_MASK; 26 for (fix = entry->fixed_revs; fix && fix->revidr_mask; fix++) 27 if (midr == fix->midr_rv && (revidr & fix->revidr_mask)) 28 return false; 29 return true; 30 } 31 32 static bool __maybe_unused 33 is_affected_midr_range(const struct arm64_cpu_capabilities *entry, int scope) 34 { 35 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible()); 36 return __is_affected_midr_range(entry, read_cpuid_id(), 37 read_cpuid(REVIDR_EL1)); 38 } 39 40 static bool __maybe_unused 41 is_affected_midr_range_list(const struct arm64_cpu_capabilities *entry, 42 int scope) 43 { 44 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible()); 45 return is_midr_in_range_list(entry->midr_range_list); 46 } 47 48 static bool __maybe_unused 49 is_kryo_midr(const struct arm64_cpu_capabilities *entry, int scope) 50 { 51 u32 model; 52 53 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible()); 54 55 model = read_cpuid_id(); 56 model &= MIDR_IMPLEMENTOR_MASK | (0xf00 << MIDR_PARTNUM_SHIFT) | 57 MIDR_ARCHITECTURE_MASK; 58 59 return model == entry->midr_range.model; 60 } 61 62 static bool 63 has_mismatched_cache_type(const struct arm64_cpu_capabilities *entry, 64 int scope) 65 { 66 u64 mask = arm64_ftr_reg_ctrel0.strict_mask; 67 u64 sys = arm64_ftr_reg_ctrel0.sys_val & mask; 68 u64 ctr_raw, ctr_real; 69 70 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible()); 71 72 /* 73 * We want to make sure that all the CPUs in the system expose 74 * a consistent CTR_EL0 to make sure that applications behaves 75 * correctly with migration. 76 * 77 * If a CPU has CTR_EL0.IDC but does not advertise it via CTR_EL0 : 78 * 79 * 1) It is safe if the system doesn't support IDC, as CPU anyway 80 * reports IDC = 0, consistent with the rest. 81 * 82 * 2) If the system has IDC, it is still safe as we trap CTR_EL0 83 * access on this CPU via the ARM64_HAS_CACHE_IDC capability. 84 * 85 * So, we need to make sure either the raw CTR_EL0 or the effective 86 * CTR_EL0 matches the system's copy to allow a secondary CPU to boot. 87 */ 88 ctr_raw = read_cpuid_cachetype() & mask; 89 ctr_real = read_cpuid_effective_cachetype() & mask; 90 91 return (ctr_real != sys) && (ctr_raw != sys); 92 } 93 94 static void 95 cpu_enable_trap_ctr_access(const struct arm64_cpu_capabilities *cap) 96 { 97 u64 mask = arm64_ftr_reg_ctrel0.strict_mask; 98 bool enable_uct_trap = false; 99 100 /* Trap CTR_EL0 access on this CPU, only if it has a mismatch */ 101 if ((read_cpuid_cachetype() & mask) != 102 (arm64_ftr_reg_ctrel0.sys_val & mask)) 103 enable_uct_trap = true; 104 105 /* ... or if the system is affected by an erratum */ 106 if (cap->capability == ARM64_WORKAROUND_1542419) 107 enable_uct_trap = true; 108 109 if (enable_uct_trap) 110 sysreg_clear_set(sctlr_el1, SCTLR_EL1_UCT, 0); 111 } 112 113 #ifdef CONFIG_ARM64_ERRATUM_1463225 114 static bool 115 has_cortex_a76_erratum_1463225(const struct arm64_cpu_capabilities *entry, 116 int scope) 117 { 118 return is_affected_midr_range_list(entry, scope) && is_kernel_in_hyp_mode(); 119 } 120 #endif 121 122 static void __maybe_unused 123 cpu_enable_cache_maint_trap(const struct arm64_cpu_capabilities *__unused) 124 { 125 sysreg_clear_set(sctlr_el1, SCTLR_EL1_UCI, 0); 126 } 127 128 #define CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max) \ 129 .matches = is_affected_midr_range, \ 130 .midr_range = MIDR_RANGE(model, v_min, r_min, v_max, r_max) 131 132 #define CAP_MIDR_ALL_VERSIONS(model) \ 133 .matches = is_affected_midr_range, \ 134 .midr_range = MIDR_ALL_VERSIONS(model) 135 136 #define MIDR_FIXED(rev, revidr_mask) \ 137 .fixed_revs = (struct arm64_midr_revidr[]){{ (rev), (revidr_mask) }, {}} 138 139 #define ERRATA_MIDR_RANGE(model, v_min, r_min, v_max, r_max) \ 140 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, \ 141 CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max) 142 143 #define CAP_MIDR_RANGE_LIST(list) \ 144 .matches = is_affected_midr_range_list, \ 145 .midr_range_list = list 146 147 /* Errata affecting a range of revisions of given model variant */ 148 #define ERRATA_MIDR_REV_RANGE(m, var, r_min, r_max) \ 149 ERRATA_MIDR_RANGE(m, var, r_min, var, r_max) 150 151 /* Errata affecting a single variant/revision of a model */ 152 #define ERRATA_MIDR_REV(model, var, rev) \ 153 ERRATA_MIDR_RANGE(model, var, rev, var, rev) 154 155 /* Errata affecting all variants/revisions of a given a model */ 156 #define ERRATA_MIDR_ALL_VERSIONS(model) \ 157 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, \ 158 CAP_MIDR_ALL_VERSIONS(model) 159 160 /* Errata affecting a list of midr ranges, with same work around */ 161 #define ERRATA_MIDR_RANGE_LIST(midr_list) \ 162 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, \ 163 CAP_MIDR_RANGE_LIST(midr_list) 164 165 static const __maybe_unused struct midr_range tx2_family_cpus[] = { 166 MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN), 167 MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2), 168 {}, 169 }; 170 171 static bool __maybe_unused 172 needs_tx2_tvm_workaround(const struct arm64_cpu_capabilities *entry, 173 int scope) 174 { 175 int i; 176 177 if (!is_affected_midr_range_list(entry, scope) || 178 !is_hyp_mode_available()) 179 return false; 180 181 for_each_possible_cpu(i) { 182 if (MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 0) != 0) 183 return true; 184 } 185 186 return false; 187 } 188 189 static bool __maybe_unused 190 has_neoverse_n1_erratum_1542419(const struct arm64_cpu_capabilities *entry, 191 int scope) 192 { 193 bool has_dic = read_cpuid_cachetype() & BIT(CTR_EL0_DIC_SHIFT); 194 const struct midr_range range = MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1); 195 196 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible()); 197 return is_midr_in_range(&range) && has_dic; 198 } 199 200 #ifdef CONFIG_ARM64_WORKAROUND_REPEAT_TLBI 201 static const struct arm64_cpu_capabilities arm64_repeat_tlbi_list[] = { 202 #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1009 203 { 204 ERRATA_MIDR_REV(MIDR_QCOM_FALKOR_V1, 0, 0) 205 }, 206 { 207 .midr_range.model = MIDR_QCOM_KRYO, 208 .matches = is_kryo_midr, 209 }, 210 #endif 211 #ifdef CONFIG_ARM64_ERRATUM_1286807 212 { 213 ERRATA_MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 0), 214 }, 215 { 216 /* Kryo4xx Gold (rcpe to rfpe) => (r0p0 to r3p0) */ 217 ERRATA_MIDR_RANGE(MIDR_QCOM_KRYO_4XX_GOLD, 0xc, 0xe, 0xf, 0xe), 218 }, 219 #endif 220 #ifdef CONFIG_ARM64_ERRATUM_2441007 221 { 222 ERRATA_MIDR_ALL_VERSIONS(MIDR_CORTEX_A55), 223 }, 224 #endif 225 #ifdef CONFIG_ARM64_ERRATUM_2441009 226 { 227 /* Cortex-A510 r0p0 -> r1p1. Fixed in r1p2 */ 228 ERRATA_MIDR_RANGE(MIDR_CORTEX_A510, 0, 0, 1, 1), 229 }, 230 #endif 231 {}, 232 }; 233 #endif 234 235 #ifdef CONFIG_CAVIUM_ERRATUM_23154 236 static const struct midr_range cavium_erratum_23154_cpus[] = { 237 MIDR_ALL_VERSIONS(MIDR_THUNDERX), 238 MIDR_ALL_VERSIONS(MIDR_THUNDERX_81XX), 239 MIDR_ALL_VERSIONS(MIDR_THUNDERX_83XX), 240 MIDR_ALL_VERSIONS(MIDR_OCTX2_98XX), 241 MIDR_ALL_VERSIONS(MIDR_OCTX2_96XX), 242 MIDR_ALL_VERSIONS(MIDR_OCTX2_95XX), 243 MIDR_ALL_VERSIONS(MIDR_OCTX2_95XXN), 244 MIDR_ALL_VERSIONS(MIDR_OCTX2_95XXMM), 245 MIDR_ALL_VERSIONS(MIDR_OCTX2_95XXO), 246 {}, 247 }; 248 #endif 249 250 #ifdef CONFIG_CAVIUM_ERRATUM_27456 251 const struct midr_range cavium_erratum_27456_cpus[] = { 252 /* Cavium ThunderX, T88 pass 1.x - 2.1 */ 253 MIDR_RANGE(MIDR_THUNDERX, 0, 0, 1, 1), 254 /* Cavium ThunderX, T81 pass 1.0 */ 255 MIDR_REV(MIDR_THUNDERX_81XX, 0, 0), 256 {}, 257 }; 258 #endif 259 260 #ifdef CONFIG_CAVIUM_ERRATUM_30115 261 static const struct midr_range cavium_erratum_30115_cpus[] = { 262 /* Cavium ThunderX, T88 pass 1.x - 2.2 */ 263 MIDR_RANGE(MIDR_THUNDERX, 0, 0, 1, 2), 264 /* Cavium ThunderX, T81 pass 1.0 - 1.2 */ 265 MIDR_REV_RANGE(MIDR_THUNDERX_81XX, 0, 0, 2), 266 /* Cavium ThunderX, T83 pass 1.0 */ 267 MIDR_REV(MIDR_THUNDERX_83XX, 0, 0), 268 {}, 269 }; 270 #endif 271 272 #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003 273 static const struct arm64_cpu_capabilities qcom_erratum_1003_list[] = { 274 { 275 ERRATA_MIDR_REV(MIDR_QCOM_FALKOR_V1, 0, 0), 276 }, 277 { 278 .midr_range.model = MIDR_QCOM_KRYO, 279 .matches = is_kryo_midr, 280 }, 281 {}, 282 }; 283 #endif 284 285 #ifdef CONFIG_ARM64_WORKAROUND_CLEAN_CACHE 286 static const struct midr_range workaround_clean_cache[] = { 287 #if defined(CONFIG_ARM64_ERRATUM_826319) || \ 288 defined(CONFIG_ARM64_ERRATUM_827319) || \ 289 defined(CONFIG_ARM64_ERRATUM_824069) 290 /* Cortex-A53 r0p[012]: ARM errata 826319, 827319, 824069 */ 291 MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 2), 292 #endif 293 #ifdef CONFIG_ARM64_ERRATUM_819472 294 /* Cortex-A53 r0p[01] : ARM errata 819472 */ 295 MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 1), 296 #endif 297 {}, 298 }; 299 #endif 300 301 #ifdef CONFIG_ARM64_ERRATUM_1418040 302 /* 303 * - 1188873 affects r0p0 to r2p0 304 * - 1418040 affects r0p0 to r3p1 305 */ 306 static const struct midr_range erratum_1418040_list[] = { 307 /* Cortex-A76 r0p0 to r3p1 */ 308 MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 1), 309 /* Neoverse-N1 r0p0 to r3p1 */ 310 MIDR_RANGE(MIDR_NEOVERSE_N1, 0, 0, 3, 1), 311 /* Kryo4xx Gold (rcpe to rfpf) => (r0p0 to r3p1) */ 312 MIDR_RANGE(MIDR_QCOM_KRYO_4XX_GOLD, 0xc, 0xe, 0xf, 0xf), 313 {}, 314 }; 315 #endif 316 317 #ifdef CONFIG_ARM64_ERRATUM_845719 318 static const struct midr_range erratum_845719_list[] = { 319 /* Cortex-A53 r0p[01234] */ 320 MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 4), 321 /* Brahma-B53 r0p[0] */ 322 MIDR_REV(MIDR_BRAHMA_B53, 0, 0), 323 /* Kryo2XX Silver rAp4 */ 324 MIDR_REV(MIDR_QCOM_KRYO_2XX_SILVER, 0xa, 0x4), 325 {}, 326 }; 327 #endif 328 329 #ifdef CONFIG_ARM64_ERRATUM_843419 330 static const struct arm64_cpu_capabilities erratum_843419_list[] = { 331 { 332 /* Cortex-A53 r0p[01234] */ 333 .matches = is_affected_midr_range, 334 ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 4), 335 MIDR_FIXED(0x4, BIT(8)), 336 }, 337 { 338 /* Brahma-B53 r0p[0] */ 339 .matches = is_affected_midr_range, 340 ERRATA_MIDR_REV(MIDR_BRAHMA_B53, 0, 0), 341 }, 342 {}, 343 }; 344 #endif 345 346 #ifdef CONFIG_ARM64_WORKAROUND_SPECULATIVE_AT 347 static const struct midr_range erratum_speculative_at_list[] = { 348 #ifdef CONFIG_ARM64_ERRATUM_1165522 349 /* Cortex A76 r0p0 to r2p0 */ 350 MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 2, 0), 351 #endif 352 #ifdef CONFIG_ARM64_ERRATUM_1319367 353 MIDR_ALL_VERSIONS(MIDR_CORTEX_A57), 354 MIDR_ALL_VERSIONS(MIDR_CORTEX_A72), 355 #endif 356 #ifdef CONFIG_ARM64_ERRATUM_1530923 357 /* Cortex A55 r0p0 to r2p0 */ 358 MIDR_RANGE(MIDR_CORTEX_A55, 0, 0, 2, 0), 359 /* Kryo4xx Silver (rdpe => r1p0) */ 360 MIDR_REV(MIDR_QCOM_KRYO_4XX_SILVER, 0xd, 0xe), 361 #endif 362 {}, 363 }; 364 #endif 365 366 #ifdef CONFIG_ARM64_ERRATUM_1463225 367 static const struct midr_range erratum_1463225[] = { 368 /* Cortex-A76 r0p0 - r3p1 */ 369 MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 1), 370 /* Kryo4xx Gold (rcpe to rfpf) => (r0p0 to r3p1) */ 371 MIDR_RANGE(MIDR_QCOM_KRYO_4XX_GOLD, 0xc, 0xe, 0xf, 0xf), 372 {}, 373 }; 374 #endif 375 376 #ifdef CONFIG_ARM64_WORKAROUND_TRBE_OVERWRITE_FILL_MODE 377 static const struct midr_range trbe_overwrite_fill_mode_cpus[] = { 378 #ifdef CONFIG_ARM64_ERRATUM_2139208 379 MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2), 380 MIDR_ALL_VERSIONS(MIDR_MICROSOFT_AZURE_COBALT_100), 381 #endif 382 #ifdef CONFIG_ARM64_ERRATUM_2119858 383 MIDR_ALL_VERSIONS(MIDR_CORTEX_A710), 384 MIDR_RANGE(MIDR_CORTEX_X2, 0, 0, 2, 0), 385 #endif 386 {}, 387 }; 388 #endif /* CONFIG_ARM64_WORKAROUND_TRBE_OVERWRITE_FILL_MODE */ 389 390 #ifdef CONFIG_ARM64_WORKAROUND_TSB_FLUSH_FAILURE 391 static const struct midr_range tsb_flush_fail_cpus[] = { 392 #ifdef CONFIG_ARM64_ERRATUM_2067961 393 MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2), 394 MIDR_ALL_VERSIONS(MIDR_MICROSOFT_AZURE_COBALT_100), 395 #endif 396 #ifdef CONFIG_ARM64_ERRATUM_2054223 397 MIDR_ALL_VERSIONS(MIDR_CORTEX_A710), 398 #endif 399 {}, 400 }; 401 #endif /* CONFIG_ARM64_WORKAROUND_TSB_FLUSH_FAILURE */ 402 403 #ifdef CONFIG_ARM64_WORKAROUND_TRBE_WRITE_OUT_OF_RANGE 404 static struct midr_range trbe_write_out_of_range_cpus[] = { 405 #ifdef CONFIG_ARM64_ERRATUM_2253138 406 MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2), 407 MIDR_ALL_VERSIONS(MIDR_MICROSOFT_AZURE_COBALT_100), 408 #endif 409 #ifdef CONFIG_ARM64_ERRATUM_2224489 410 MIDR_ALL_VERSIONS(MIDR_CORTEX_A710), 411 MIDR_RANGE(MIDR_CORTEX_X2, 0, 0, 2, 0), 412 #endif 413 {}, 414 }; 415 #endif /* CONFIG_ARM64_WORKAROUND_TRBE_WRITE_OUT_OF_RANGE */ 416 417 #ifdef CONFIG_ARM64_ERRATUM_1742098 418 static struct midr_range broken_aarch32_aes[] = { 419 MIDR_RANGE(MIDR_CORTEX_A57, 0, 1, 0xf, 0xf), 420 MIDR_ALL_VERSIONS(MIDR_CORTEX_A72), 421 {}, 422 }; 423 #endif /* CONFIG_ARM64_WORKAROUND_TRBE_WRITE_OUT_OF_RANGE */ 424 425 #ifdef CONFIG_ARM64_WORKAROUND_SPECULATIVE_UNPRIV_LOAD 426 static const struct midr_range erratum_spec_unpriv_load_list[] = { 427 #ifdef CONFIG_ARM64_ERRATUM_3117295 428 MIDR_ALL_VERSIONS(MIDR_CORTEX_A510), 429 #endif 430 #ifdef CONFIG_ARM64_ERRATUM_2966298 431 /* Cortex-A520 r0p0 to r0p1 */ 432 MIDR_REV_RANGE(MIDR_CORTEX_A520, 0, 0, 1), 433 #endif 434 {}, 435 }; 436 #endif 437 438 #ifdef CONFIG_ARM64_ERRATUM_3194386 439 static const struct midr_range erratum_spec_ssbs_list[] = { 440 MIDR_ALL_VERSIONS(MIDR_CORTEX_A76), 441 MIDR_ALL_VERSIONS(MIDR_CORTEX_A77), 442 MIDR_ALL_VERSIONS(MIDR_CORTEX_A78), 443 MIDR_ALL_VERSIONS(MIDR_CORTEX_A78C), 444 MIDR_ALL_VERSIONS(MIDR_CORTEX_A710), 445 MIDR_ALL_VERSIONS(MIDR_CORTEX_A715), 446 MIDR_ALL_VERSIONS(MIDR_CORTEX_A720), 447 MIDR_ALL_VERSIONS(MIDR_CORTEX_A725), 448 MIDR_ALL_VERSIONS(MIDR_CORTEX_X1), 449 MIDR_ALL_VERSIONS(MIDR_CORTEX_X1C), 450 MIDR_ALL_VERSIONS(MIDR_CORTEX_X2), 451 MIDR_ALL_VERSIONS(MIDR_CORTEX_X3), 452 MIDR_ALL_VERSIONS(MIDR_CORTEX_X4), 453 MIDR_ALL_VERSIONS(MIDR_CORTEX_X925), 454 MIDR_ALL_VERSIONS(MIDR_MICROSOFT_AZURE_COBALT_100), 455 MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1), 456 MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2), 457 MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N3), 458 MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V1), 459 MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V2), 460 MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V3), 461 {} 462 }; 463 #endif 464 465 #ifdef CONFIG_AMPERE_ERRATUM_AC03_CPU_38 466 static const struct midr_range erratum_ac03_cpu_38_list[] = { 467 MIDR_ALL_VERSIONS(MIDR_AMPERE1), 468 MIDR_ALL_VERSIONS(MIDR_AMPERE1A), 469 {}, 470 }; 471 #endif 472 473 const struct arm64_cpu_capabilities arm64_errata[] = { 474 #ifdef CONFIG_ARM64_WORKAROUND_CLEAN_CACHE 475 { 476 .desc = "ARM errata 826319, 827319, 824069, or 819472", 477 .capability = ARM64_WORKAROUND_CLEAN_CACHE, 478 ERRATA_MIDR_RANGE_LIST(workaround_clean_cache), 479 .cpu_enable = cpu_enable_cache_maint_trap, 480 }, 481 #endif 482 #ifdef CONFIG_ARM64_ERRATUM_832075 483 { 484 /* Cortex-A57 r0p0 - r1p2 */ 485 .desc = "ARM erratum 832075", 486 .capability = ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE, 487 ERRATA_MIDR_RANGE(MIDR_CORTEX_A57, 488 0, 0, 489 1, 2), 490 }, 491 #endif 492 #ifdef CONFIG_ARM64_ERRATUM_834220 493 { 494 /* Cortex-A57 r0p0 - r1p2 */ 495 .desc = "ARM erratum 834220", 496 .capability = ARM64_WORKAROUND_834220, 497 ERRATA_MIDR_RANGE(MIDR_CORTEX_A57, 498 0, 0, 499 1, 2), 500 }, 501 #endif 502 #ifdef CONFIG_ARM64_ERRATUM_843419 503 { 504 .desc = "ARM erratum 843419", 505 .capability = ARM64_WORKAROUND_843419, 506 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, 507 .matches = cpucap_multi_entry_cap_matches, 508 .match_list = erratum_843419_list, 509 }, 510 #endif 511 #ifdef CONFIG_ARM64_ERRATUM_845719 512 { 513 .desc = "ARM erratum 845719", 514 .capability = ARM64_WORKAROUND_845719, 515 ERRATA_MIDR_RANGE_LIST(erratum_845719_list), 516 }, 517 #endif 518 #ifdef CONFIG_CAVIUM_ERRATUM_23154 519 { 520 .desc = "Cavium errata 23154 and 38545", 521 .capability = ARM64_WORKAROUND_CAVIUM_23154, 522 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, 523 ERRATA_MIDR_RANGE_LIST(cavium_erratum_23154_cpus), 524 }, 525 #endif 526 #ifdef CONFIG_CAVIUM_ERRATUM_27456 527 { 528 .desc = "Cavium erratum 27456", 529 .capability = ARM64_WORKAROUND_CAVIUM_27456, 530 ERRATA_MIDR_RANGE_LIST(cavium_erratum_27456_cpus), 531 }, 532 #endif 533 #ifdef CONFIG_CAVIUM_ERRATUM_30115 534 { 535 .desc = "Cavium erratum 30115", 536 .capability = ARM64_WORKAROUND_CAVIUM_30115, 537 ERRATA_MIDR_RANGE_LIST(cavium_erratum_30115_cpus), 538 }, 539 #endif 540 { 541 .desc = "Mismatched cache type (CTR_EL0)", 542 .capability = ARM64_MISMATCHED_CACHE_TYPE, 543 .matches = has_mismatched_cache_type, 544 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, 545 .cpu_enable = cpu_enable_trap_ctr_access, 546 }, 547 #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003 548 { 549 .desc = "Qualcomm Technologies Falkor/Kryo erratum 1003", 550 .capability = ARM64_WORKAROUND_QCOM_FALKOR_E1003, 551 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, 552 .matches = cpucap_multi_entry_cap_matches, 553 .match_list = qcom_erratum_1003_list, 554 }, 555 #endif 556 #ifdef CONFIG_ARM64_WORKAROUND_REPEAT_TLBI 557 { 558 .desc = "Qualcomm erratum 1009, or ARM erratum 1286807, 2441009", 559 .capability = ARM64_WORKAROUND_REPEAT_TLBI, 560 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, 561 .matches = cpucap_multi_entry_cap_matches, 562 .match_list = arm64_repeat_tlbi_list, 563 }, 564 #endif 565 #ifdef CONFIG_ARM64_ERRATUM_858921 566 { 567 /* Cortex-A73 all versions */ 568 .desc = "ARM erratum 858921", 569 .capability = ARM64_WORKAROUND_858921, 570 ERRATA_MIDR_ALL_VERSIONS(MIDR_CORTEX_A73), 571 }, 572 #endif 573 { 574 .desc = "Spectre-v2", 575 .capability = ARM64_SPECTRE_V2, 576 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, 577 .matches = has_spectre_v2, 578 .cpu_enable = spectre_v2_enable_mitigation, 579 }, 580 #ifdef CONFIG_RANDOMIZE_BASE 581 { 582 /* Must come after the Spectre-v2 entry */ 583 .desc = "Spectre-v3a", 584 .capability = ARM64_SPECTRE_V3A, 585 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, 586 .matches = has_spectre_v3a, 587 .cpu_enable = spectre_v3a_enable_mitigation, 588 }, 589 #endif 590 { 591 .desc = "Spectre-v4", 592 .capability = ARM64_SPECTRE_V4, 593 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, 594 .matches = has_spectre_v4, 595 .cpu_enable = spectre_v4_enable_mitigation, 596 }, 597 { 598 .desc = "Spectre-BHB", 599 .capability = ARM64_SPECTRE_BHB, 600 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, 601 .matches = is_spectre_bhb_affected, 602 .cpu_enable = spectre_bhb_enable_mitigation, 603 }, 604 #ifdef CONFIG_ARM64_ERRATUM_1418040 605 { 606 .desc = "ARM erratum 1418040", 607 .capability = ARM64_WORKAROUND_1418040, 608 ERRATA_MIDR_RANGE_LIST(erratum_1418040_list), 609 /* 610 * We need to allow affected CPUs to come in late, but 611 * also need the non-affected CPUs to be able to come 612 * in at any point in time. Wonderful. 613 */ 614 .type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE, 615 }, 616 #endif 617 #ifdef CONFIG_ARM64_WORKAROUND_SPECULATIVE_AT 618 { 619 .desc = "ARM errata 1165522, 1319367, or 1530923", 620 .capability = ARM64_WORKAROUND_SPECULATIVE_AT, 621 ERRATA_MIDR_RANGE_LIST(erratum_speculative_at_list), 622 }, 623 #endif 624 #ifdef CONFIG_ARM64_ERRATUM_1463225 625 { 626 .desc = "ARM erratum 1463225", 627 .capability = ARM64_WORKAROUND_1463225, 628 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, 629 .matches = has_cortex_a76_erratum_1463225, 630 .midr_range_list = erratum_1463225, 631 }, 632 #endif 633 #ifdef CONFIG_CAVIUM_TX2_ERRATUM_219 634 { 635 .desc = "Cavium ThunderX2 erratum 219 (KVM guest sysreg trapping)", 636 .capability = ARM64_WORKAROUND_CAVIUM_TX2_219_TVM, 637 ERRATA_MIDR_RANGE_LIST(tx2_family_cpus), 638 .matches = needs_tx2_tvm_workaround, 639 }, 640 { 641 .desc = "Cavium ThunderX2 erratum 219 (PRFM removal)", 642 .capability = ARM64_WORKAROUND_CAVIUM_TX2_219_PRFM, 643 ERRATA_MIDR_RANGE_LIST(tx2_family_cpus), 644 }, 645 #endif 646 #ifdef CONFIG_ARM64_ERRATUM_1542419 647 { 648 /* we depend on the firmware portion for correctness */ 649 .desc = "ARM erratum 1542419 (kernel portion)", 650 .capability = ARM64_WORKAROUND_1542419, 651 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, 652 .matches = has_neoverse_n1_erratum_1542419, 653 .cpu_enable = cpu_enable_trap_ctr_access, 654 }, 655 #endif 656 #ifdef CONFIG_ARM64_ERRATUM_1508412 657 { 658 /* we depend on the firmware portion for correctness */ 659 .desc = "ARM erratum 1508412 (kernel portion)", 660 .capability = ARM64_WORKAROUND_1508412, 661 ERRATA_MIDR_RANGE(MIDR_CORTEX_A77, 662 0, 0, 663 1, 0), 664 }, 665 #endif 666 #ifdef CONFIG_NVIDIA_CARMEL_CNP_ERRATUM 667 { 668 /* NVIDIA Carmel */ 669 .desc = "NVIDIA Carmel CNP erratum", 670 .capability = ARM64_WORKAROUND_NVIDIA_CARMEL_CNP, 671 ERRATA_MIDR_ALL_VERSIONS(MIDR_NVIDIA_CARMEL), 672 }, 673 #endif 674 #ifdef CONFIG_ARM64_WORKAROUND_TRBE_OVERWRITE_FILL_MODE 675 { 676 /* 677 * The erratum work around is handled within the TRBE 678 * driver and can be applied per-cpu. So, we can allow 679 * a late CPU to come online with this erratum. 680 */ 681 .desc = "ARM erratum 2119858 or 2139208", 682 .capability = ARM64_WORKAROUND_TRBE_OVERWRITE_FILL_MODE, 683 .type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE, 684 CAP_MIDR_RANGE_LIST(trbe_overwrite_fill_mode_cpus), 685 }, 686 #endif 687 #ifdef CONFIG_ARM64_WORKAROUND_TSB_FLUSH_FAILURE 688 { 689 .desc = "ARM erratum 2067961 or 2054223", 690 .capability = ARM64_WORKAROUND_TSB_FLUSH_FAILURE, 691 ERRATA_MIDR_RANGE_LIST(tsb_flush_fail_cpus), 692 }, 693 #endif 694 #ifdef CONFIG_ARM64_WORKAROUND_TRBE_WRITE_OUT_OF_RANGE 695 { 696 .desc = "ARM erratum 2253138 or 2224489", 697 .capability = ARM64_WORKAROUND_TRBE_WRITE_OUT_OF_RANGE, 698 .type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE, 699 CAP_MIDR_RANGE_LIST(trbe_write_out_of_range_cpus), 700 }, 701 #endif 702 #ifdef CONFIG_ARM64_ERRATUM_2645198 703 { 704 .desc = "ARM erratum 2645198", 705 .capability = ARM64_WORKAROUND_2645198, 706 ERRATA_MIDR_ALL_VERSIONS(MIDR_CORTEX_A715) 707 }, 708 #endif 709 #ifdef CONFIG_ARM64_ERRATUM_2077057 710 { 711 .desc = "ARM erratum 2077057", 712 .capability = ARM64_WORKAROUND_2077057, 713 ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A510, 0, 0, 2), 714 }, 715 #endif 716 #ifdef CONFIG_ARM64_ERRATUM_2064142 717 { 718 .desc = "ARM erratum 2064142", 719 .capability = ARM64_WORKAROUND_2064142, 720 721 /* Cortex-A510 r0p0 - r0p2 */ 722 ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A510, 0, 0, 2) 723 }, 724 #endif 725 #ifdef CONFIG_ARM64_ERRATUM_2457168 726 { 727 .desc = "ARM erratum 2457168", 728 .capability = ARM64_WORKAROUND_2457168, 729 .type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE, 730 731 /* Cortex-A510 r0p0-r1p1 */ 732 CAP_MIDR_RANGE(MIDR_CORTEX_A510, 0, 0, 1, 1) 733 }, 734 #endif 735 #ifdef CONFIG_ARM64_ERRATUM_2038923 736 { 737 .desc = "ARM erratum 2038923", 738 .capability = ARM64_WORKAROUND_2038923, 739 740 /* Cortex-A510 r0p0 - r0p2 */ 741 ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A510, 0, 0, 2) 742 }, 743 #endif 744 #ifdef CONFIG_ARM64_ERRATUM_1902691 745 { 746 .desc = "ARM erratum 1902691", 747 .capability = ARM64_WORKAROUND_1902691, 748 749 /* Cortex-A510 r0p0 - r0p1 */ 750 ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A510, 0, 0, 1) 751 }, 752 #endif 753 #ifdef CONFIG_ARM64_ERRATUM_1742098 754 { 755 .desc = "ARM erratum 1742098", 756 .capability = ARM64_WORKAROUND_1742098, 757 CAP_MIDR_RANGE_LIST(broken_aarch32_aes), 758 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, 759 }, 760 #endif 761 #ifdef CONFIG_ARM64_ERRATUM_2658417 762 { 763 .desc = "ARM erratum 2658417", 764 .capability = ARM64_WORKAROUND_2658417, 765 /* Cortex-A510 r0p0 - r1p1 */ 766 ERRATA_MIDR_RANGE(MIDR_CORTEX_A510, 0, 0, 1, 1), 767 MIDR_FIXED(MIDR_CPU_VAR_REV(1,1), BIT(25)), 768 }, 769 #endif 770 #ifdef CONFIG_ARM64_ERRATUM_3194386 771 { 772 .desc = "SSBS not fully self-synchronizing", 773 .capability = ARM64_WORKAROUND_SPECULATIVE_SSBS, 774 ERRATA_MIDR_RANGE_LIST(erratum_spec_ssbs_list), 775 }, 776 #endif 777 #ifdef CONFIG_ARM64_WORKAROUND_SPECULATIVE_UNPRIV_LOAD 778 { 779 .desc = "ARM errata 2966298, 3117295", 780 .capability = ARM64_WORKAROUND_SPECULATIVE_UNPRIV_LOAD, 781 /* Cortex-A520 r0p0 - r0p1 */ 782 ERRATA_MIDR_RANGE_LIST(erratum_spec_unpriv_load_list), 783 }, 784 #endif 785 #ifdef CONFIG_AMPERE_ERRATUM_AC03_CPU_38 786 { 787 .desc = "AmpereOne erratum AC03_CPU_38", 788 .capability = ARM64_WORKAROUND_AMPERE_AC03_CPU_38, 789 ERRATA_MIDR_RANGE_LIST(erratum_ac03_cpu_38_list), 790 }, 791 #endif 792 { 793 .desc = "Broken CNTVOFF_EL2", 794 .capability = ARM64_WORKAROUND_QCOM_ORYON_CNTVOFF, 795 ERRATA_MIDR_RANGE_LIST(((const struct midr_range[]) { 796 MIDR_ALL_VERSIONS(MIDR_QCOM_ORYON_X1), 797 {} 798 })), 799 }, 800 { 801 } 802 }; 803