1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Contains CPU specific errata definitions 4 * 5 * Copyright (C) 2014 ARM Ltd. 6 */ 7 8 #include <linux/arm-smccc.h> 9 #include <linux/psci.h> 10 #include <linux/types.h> 11 #include <linux/cpu.h> 12 #include <asm/cpu.h> 13 #include <asm/cputype.h> 14 #include <asm/cpufeature.h> 15 16 static bool __maybe_unused 17 is_affected_midr_range(const struct arm64_cpu_capabilities *entry, int scope) 18 { 19 const struct arm64_midr_revidr *fix; 20 u32 midr = read_cpuid_id(), revidr; 21 22 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible()); 23 if (!is_midr_in_range(midr, &entry->midr_range)) 24 return false; 25 26 midr &= MIDR_REVISION_MASK | MIDR_VARIANT_MASK; 27 revidr = read_cpuid(REVIDR_EL1); 28 for (fix = entry->fixed_revs; fix && fix->revidr_mask; fix++) 29 if (midr == fix->midr_rv && (revidr & fix->revidr_mask)) 30 return false; 31 32 return true; 33 } 34 35 static bool __maybe_unused 36 is_affected_midr_range_list(const struct arm64_cpu_capabilities *entry, 37 int scope) 38 { 39 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible()); 40 return is_midr_in_range_list(read_cpuid_id(), entry->midr_range_list); 41 } 42 43 static bool __maybe_unused 44 is_kryo_midr(const struct arm64_cpu_capabilities *entry, int scope) 45 { 46 u32 model; 47 48 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible()); 49 50 model = read_cpuid_id(); 51 model &= MIDR_IMPLEMENTOR_MASK | (0xf00 << MIDR_PARTNUM_SHIFT) | 52 MIDR_ARCHITECTURE_MASK; 53 54 return model == entry->midr_range.model; 55 } 56 57 static bool 58 has_mismatched_cache_type(const struct arm64_cpu_capabilities *entry, 59 int scope) 60 { 61 u64 mask = arm64_ftr_reg_ctrel0.strict_mask; 62 u64 sys = arm64_ftr_reg_ctrel0.sys_val & mask; 63 u64 ctr_raw, ctr_real; 64 65 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible()); 66 67 /* 68 * We want to make sure that all the CPUs in the system expose 69 * a consistent CTR_EL0 to make sure that applications behaves 70 * correctly with migration. 71 * 72 * If a CPU has CTR_EL0.IDC but does not advertise it via CTR_EL0 : 73 * 74 * 1) It is safe if the system doesn't support IDC, as CPU anyway 75 * reports IDC = 0, consistent with the rest. 76 * 77 * 2) If the system has IDC, it is still safe as we trap CTR_EL0 78 * access on this CPU via the ARM64_HAS_CACHE_IDC capability. 79 * 80 * So, we need to make sure either the raw CTR_EL0 or the effective 81 * CTR_EL0 matches the system's copy to allow a secondary CPU to boot. 82 */ 83 ctr_raw = read_cpuid_cachetype() & mask; 84 ctr_real = read_cpuid_effective_cachetype() & mask; 85 86 return (ctr_real != sys) && (ctr_raw != sys); 87 } 88 89 static void 90 cpu_enable_trap_ctr_access(const struct arm64_cpu_capabilities *__unused) 91 { 92 u64 mask = arm64_ftr_reg_ctrel0.strict_mask; 93 94 /* Trap CTR_EL0 access on this CPU, only if it has a mismatch */ 95 if ((read_cpuid_cachetype() & mask) != 96 (arm64_ftr_reg_ctrel0.sys_val & mask)) 97 sysreg_clear_set(sctlr_el1, SCTLR_EL1_UCT, 0); 98 } 99 100 atomic_t arm64_el2_vector_last_slot = ATOMIC_INIT(-1); 101 102 #include <asm/mmu_context.h> 103 #include <asm/cacheflush.h> 104 105 DEFINE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data); 106 107 #ifdef CONFIG_KVM_INDIRECT_VECTORS 108 extern char __smccc_workaround_1_smc_start[]; 109 extern char __smccc_workaround_1_smc_end[]; 110 111 static void __copy_hyp_vect_bpi(int slot, const char *hyp_vecs_start, 112 const char *hyp_vecs_end) 113 { 114 void *dst = lm_alias(__bp_harden_hyp_vecs_start + slot * SZ_2K); 115 int i; 116 117 for (i = 0; i < SZ_2K; i += 0x80) 118 memcpy(dst + i, hyp_vecs_start, hyp_vecs_end - hyp_vecs_start); 119 120 __flush_icache_range((uintptr_t)dst, (uintptr_t)dst + SZ_2K); 121 } 122 123 static void install_bp_hardening_cb(bp_hardening_cb_t fn, 124 const char *hyp_vecs_start, 125 const char *hyp_vecs_end) 126 { 127 static DEFINE_RAW_SPINLOCK(bp_lock); 128 int cpu, slot = -1; 129 130 /* 131 * enable_smccc_arch_workaround_1() passes NULL for the hyp_vecs 132 * start/end if we're a guest. Skip the hyp-vectors work. 133 */ 134 if (!hyp_vecs_start) { 135 __this_cpu_write(bp_hardening_data.fn, fn); 136 return; 137 } 138 139 raw_spin_lock(&bp_lock); 140 for_each_possible_cpu(cpu) { 141 if (per_cpu(bp_hardening_data.fn, cpu) == fn) { 142 slot = per_cpu(bp_hardening_data.hyp_vectors_slot, cpu); 143 break; 144 } 145 } 146 147 if (slot == -1) { 148 slot = atomic_inc_return(&arm64_el2_vector_last_slot); 149 BUG_ON(slot >= BP_HARDEN_EL2_SLOTS); 150 __copy_hyp_vect_bpi(slot, hyp_vecs_start, hyp_vecs_end); 151 } 152 153 __this_cpu_write(bp_hardening_data.hyp_vectors_slot, slot); 154 __this_cpu_write(bp_hardening_data.fn, fn); 155 raw_spin_unlock(&bp_lock); 156 } 157 #else 158 #define __smccc_workaround_1_smc_start NULL 159 #define __smccc_workaround_1_smc_end NULL 160 161 static void install_bp_hardening_cb(bp_hardening_cb_t fn, 162 const char *hyp_vecs_start, 163 const char *hyp_vecs_end) 164 { 165 __this_cpu_write(bp_hardening_data.fn, fn); 166 } 167 #endif /* CONFIG_KVM_INDIRECT_VECTORS */ 168 169 #include <uapi/linux/psci.h> 170 #include <linux/arm-smccc.h> 171 #include <linux/psci.h> 172 173 static void call_smc_arch_workaround_1(void) 174 { 175 arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL); 176 } 177 178 static void call_hvc_arch_workaround_1(void) 179 { 180 arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL); 181 } 182 183 static void qcom_link_stack_sanitization(void) 184 { 185 u64 tmp; 186 187 asm volatile("mov %0, x30 \n" 188 ".rept 16 \n" 189 "bl . + 4 \n" 190 ".endr \n" 191 "mov x30, %0 \n" 192 : "=&r" (tmp)); 193 } 194 195 static bool __nospectre_v2; 196 static int __init parse_nospectre_v2(char *str) 197 { 198 __nospectre_v2 = true; 199 return 0; 200 } 201 early_param("nospectre_v2", parse_nospectre_v2); 202 203 /* 204 * -1: No workaround 205 * 0: No workaround required 206 * 1: Workaround installed 207 */ 208 static int detect_harden_bp_fw(void) 209 { 210 bp_hardening_cb_t cb; 211 void *smccc_start, *smccc_end; 212 struct arm_smccc_res res; 213 u32 midr = read_cpuid_id(); 214 215 if (psci_ops.smccc_version == SMCCC_VERSION_1_0) 216 return -1; 217 218 switch (psci_ops.conduit) { 219 case PSCI_CONDUIT_HVC: 220 arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID, 221 ARM_SMCCC_ARCH_WORKAROUND_1, &res); 222 switch ((int)res.a0) { 223 case 1: 224 /* Firmware says we're just fine */ 225 return 0; 226 case 0: 227 cb = call_hvc_arch_workaround_1; 228 /* This is a guest, no need to patch KVM vectors */ 229 smccc_start = NULL; 230 smccc_end = NULL; 231 break; 232 default: 233 return -1; 234 } 235 break; 236 237 case PSCI_CONDUIT_SMC: 238 arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID, 239 ARM_SMCCC_ARCH_WORKAROUND_1, &res); 240 switch ((int)res.a0) { 241 case 1: 242 /* Firmware says we're just fine */ 243 return 0; 244 case 0: 245 cb = call_smc_arch_workaround_1; 246 smccc_start = __smccc_workaround_1_smc_start; 247 smccc_end = __smccc_workaround_1_smc_end; 248 break; 249 default: 250 return -1; 251 } 252 break; 253 254 default: 255 return -1; 256 } 257 258 if (((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR) || 259 ((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR_V1)) 260 cb = qcom_link_stack_sanitization; 261 262 if (IS_ENABLED(CONFIG_HARDEN_BRANCH_PREDICTOR)) 263 install_bp_hardening_cb(cb, smccc_start, smccc_end); 264 265 return 1; 266 } 267 268 DEFINE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required); 269 270 int ssbd_state __read_mostly = ARM64_SSBD_KERNEL; 271 static bool __ssb_safe = true; 272 273 static const struct ssbd_options { 274 const char *str; 275 int state; 276 } ssbd_options[] = { 277 { "force-on", ARM64_SSBD_FORCE_ENABLE, }, 278 { "force-off", ARM64_SSBD_FORCE_DISABLE, }, 279 { "kernel", ARM64_SSBD_KERNEL, }, 280 }; 281 282 static int __init ssbd_cfg(char *buf) 283 { 284 int i; 285 286 if (!buf || !buf[0]) 287 return -EINVAL; 288 289 for (i = 0; i < ARRAY_SIZE(ssbd_options); i++) { 290 int len = strlen(ssbd_options[i].str); 291 292 if (strncmp(buf, ssbd_options[i].str, len)) 293 continue; 294 295 ssbd_state = ssbd_options[i].state; 296 return 0; 297 } 298 299 return -EINVAL; 300 } 301 early_param("ssbd", ssbd_cfg); 302 303 void __init arm64_update_smccc_conduit(struct alt_instr *alt, 304 __le32 *origptr, __le32 *updptr, 305 int nr_inst) 306 { 307 u32 insn; 308 309 BUG_ON(nr_inst != 1); 310 311 switch (psci_ops.conduit) { 312 case PSCI_CONDUIT_HVC: 313 insn = aarch64_insn_get_hvc_value(); 314 break; 315 case PSCI_CONDUIT_SMC: 316 insn = aarch64_insn_get_smc_value(); 317 break; 318 default: 319 return; 320 } 321 322 *updptr = cpu_to_le32(insn); 323 } 324 325 void __init arm64_enable_wa2_handling(struct alt_instr *alt, 326 __le32 *origptr, __le32 *updptr, 327 int nr_inst) 328 { 329 BUG_ON(nr_inst != 1); 330 /* 331 * Only allow mitigation on EL1 entry/exit and guest 332 * ARCH_WORKAROUND_2 handling if the SSBD state allows it to 333 * be flipped. 334 */ 335 if (arm64_get_ssbd_state() == ARM64_SSBD_KERNEL) 336 *updptr = cpu_to_le32(aarch64_insn_gen_nop()); 337 } 338 339 void arm64_set_ssbd_mitigation(bool state) 340 { 341 if (!IS_ENABLED(CONFIG_ARM64_SSBD)) { 342 pr_info_once("SSBD disabled by kernel configuration\n"); 343 return; 344 } 345 346 if (this_cpu_has_cap(ARM64_SSBS)) { 347 if (state) 348 asm volatile(SET_PSTATE_SSBS(0)); 349 else 350 asm volatile(SET_PSTATE_SSBS(1)); 351 return; 352 } 353 354 switch (psci_ops.conduit) { 355 case PSCI_CONDUIT_HVC: 356 arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_2, state, NULL); 357 break; 358 359 case PSCI_CONDUIT_SMC: 360 arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2, state, NULL); 361 break; 362 363 default: 364 WARN_ON_ONCE(1); 365 break; 366 } 367 } 368 369 static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry, 370 int scope) 371 { 372 struct arm_smccc_res res; 373 bool required = true; 374 s32 val; 375 bool this_cpu_safe = false; 376 377 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible()); 378 379 if (cpu_mitigations_off()) 380 ssbd_state = ARM64_SSBD_FORCE_DISABLE; 381 382 /* delay setting __ssb_safe until we get a firmware response */ 383 if (is_midr_in_range_list(read_cpuid_id(), entry->midr_range_list)) 384 this_cpu_safe = true; 385 386 if (this_cpu_has_cap(ARM64_SSBS)) { 387 if (!this_cpu_safe) 388 __ssb_safe = false; 389 required = false; 390 goto out_printmsg; 391 } 392 393 if (psci_ops.smccc_version == SMCCC_VERSION_1_0) { 394 ssbd_state = ARM64_SSBD_UNKNOWN; 395 if (!this_cpu_safe) 396 __ssb_safe = false; 397 return false; 398 } 399 400 switch (psci_ops.conduit) { 401 case PSCI_CONDUIT_HVC: 402 arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID, 403 ARM_SMCCC_ARCH_WORKAROUND_2, &res); 404 break; 405 406 case PSCI_CONDUIT_SMC: 407 arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID, 408 ARM_SMCCC_ARCH_WORKAROUND_2, &res); 409 break; 410 411 default: 412 ssbd_state = ARM64_SSBD_UNKNOWN; 413 if (!this_cpu_safe) 414 __ssb_safe = false; 415 return false; 416 } 417 418 val = (s32)res.a0; 419 420 switch (val) { 421 case SMCCC_RET_NOT_SUPPORTED: 422 ssbd_state = ARM64_SSBD_UNKNOWN; 423 if (!this_cpu_safe) 424 __ssb_safe = false; 425 return false; 426 427 /* machines with mixed mitigation requirements must not return this */ 428 case SMCCC_RET_NOT_REQUIRED: 429 pr_info_once("%s mitigation not required\n", entry->desc); 430 ssbd_state = ARM64_SSBD_MITIGATED; 431 return false; 432 433 case SMCCC_RET_SUCCESS: 434 __ssb_safe = false; 435 required = true; 436 break; 437 438 case 1: /* Mitigation not required on this CPU */ 439 required = false; 440 break; 441 442 default: 443 WARN_ON(1); 444 if (!this_cpu_safe) 445 __ssb_safe = false; 446 return false; 447 } 448 449 switch (ssbd_state) { 450 case ARM64_SSBD_FORCE_DISABLE: 451 arm64_set_ssbd_mitigation(false); 452 required = false; 453 break; 454 455 case ARM64_SSBD_KERNEL: 456 if (required) { 457 __this_cpu_write(arm64_ssbd_callback_required, 1); 458 arm64_set_ssbd_mitigation(true); 459 } 460 break; 461 462 case ARM64_SSBD_FORCE_ENABLE: 463 arm64_set_ssbd_mitigation(true); 464 required = true; 465 break; 466 467 default: 468 WARN_ON(1); 469 break; 470 } 471 472 out_printmsg: 473 switch (ssbd_state) { 474 case ARM64_SSBD_FORCE_DISABLE: 475 pr_info_once("%s disabled from command-line\n", entry->desc); 476 break; 477 478 case ARM64_SSBD_FORCE_ENABLE: 479 pr_info_once("%s forced from command-line\n", entry->desc); 480 break; 481 } 482 483 return required; 484 } 485 486 /* known invulnerable cores */ 487 static const struct midr_range arm64_ssb_cpus[] = { 488 MIDR_ALL_VERSIONS(MIDR_CORTEX_A35), 489 MIDR_ALL_VERSIONS(MIDR_CORTEX_A53), 490 MIDR_ALL_VERSIONS(MIDR_CORTEX_A55), 491 {}, 492 }; 493 494 #ifdef CONFIG_ARM64_ERRATUM_1463225 495 DEFINE_PER_CPU(int, __in_cortex_a76_erratum_1463225_wa); 496 497 static bool 498 has_cortex_a76_erratum_1463225(const struct arm64_cpu_capabilities *entry, 499 int scope) 500 { 501 u32 midr = read_cpuid_id(); 502 /* Cortex-A76 r0p0 - r3p1 */ 503 struct midr_range range = MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 1); 504 505 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible()); 506 return is_midr_in_range(midr, &range) && is_kernel_in_hyp_mode(); 507 } 508 #endif 509 510 static void __maybe_unused 511 cpu_enable_cache_maint_trap(const struct arm64_cpu_capabilities *__unused) 512 { 513 sysreg_clear_set(sctlr_el1, SCTLR_EL1_UCI, 0); 514 } 515 516 #define CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max) \ 517 .matches = is_affected_midr_range, \ 518 .midr_range = MIDR_RANGE(model, v_min, r_min, v_max, r_max) 519 520 #define CAP_MIDR_ALL_VERSIONS(model) \ 521 .matches = is_affected_midr_range, \ 522 .midr_range = MIDR_ALL_VERSIONS(model) 523 524 #define MIDR_FIXED(rev, revidr_mask) \ 525 .fixed_revs = (struct arm64_midr_revidr[]){{ (rev), (revidr_mask) }, {}} 526 527 #define ERRATA_MIDR_RANGE(model, v_min, r_min, v_max, r_max) \ 528 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, \ 529 CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max) 530 531 #define CAP_MIDR_RANGE_LIST(list) \ 532 .matches = is_affected_midr_range_list, \ 533 .midr_range_list = list 534 535 /* Errata affecting a range of revisions of given model variant */ 536 #define ERRATA_MIDR_REV_RANGE(m, var, r_min, r_max) \ 537 ERRATA_MIDR_RANGE(m, var, r_min, var, r_max) 538 539 /* Errata affecting a single variant/revision of a model */ 540 #define ERRATA_MIDR_REV(model, var, rev) \ 541 ERRATA_MIDR_RANGE(model, var, rev, var, rev) 542 543 /* Errata affecting all variants/revisions of a given a model */ 544 #define ERRATA_MIDR_ALL_VERSIONS(model) \ 545 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, \ 546 CAP_MIDR_ALL_VERSIONS(model) 547 548 /* Errata affecting a list of midr ranges, with same work around */ 549 #define ERRATA_MIDR_RANGE_LIST(midr_list) \ 550 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, \ 551 CAP_MIDR_RANGE_LIST(midr_list) 552 553 /* Track overall mitigation state. We are only mitigated if all cores are ok */ 554 static bool __hardenbp_enab = true; 555 static bool __spectrev2_safe = true; 556 557 /* 558 * List of CPUs that do not need any Spectre-v2 mitigation at all. 559 */ 560 static const struct midr_range spectre_v2_safe_list[] = { 561 MIDR_ALL_VERSIONS(MIDR_CORTEX_A35), 562 MIDR_ALL_VERSIONS(MIDR_CORTEX_A53), 563 MIDR_ALL_VERSIONS(MIDR_CORTEX_A55), 564 { /* sentinel */ } 565 }; 566 567 /* 568 * Track overall bp hardening for all heterogeneous cores in the machine. 569 * We are only considered "safe" if all booted cores are known safe. 570 */ 571 static bool __maybe_unused 572 check_branch_predictor(const struct arm64_cpu_capabilities *entry, int scope) 573 { 574 int need_wa; 575 576 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible()); 577 578 /* If the CPU has CSV2 set, we're safe */ 579 if (cpuid_feature_extract_unsigned_field(read_cpuid(ID_AA64PFR0_EL1), 580 ID_AA64PFR0_CSV2_SHIFT)) 581 return false; 582 583 /* Alternatively, we have a list of unaffected CPUs */ 584 if (is_midr_in_range_list(read_cpuid_id(), spectre_v2_safe_list)) 585 return false; 586 587 /* Fallback to firmware detection */ 588 need_wa = detect_harden_bp_fw(); 589 if (!need_wa) 590 return false; 591 592 __spectrev2_safe = false; 593 594 if (!IS_ENABLED(CONFIG_HARDEN_BRANCH_PREDICTOR)) { 595 pr_warn_once("spectrev2 mitigation disabled by kernel configuration\n"); 596 __hardenbp_enab = false; 597 return false; 598 } 599 600 /* forced off */ 601 if (__nospectre_v2 || cpu_mitigations_off()) { 602 pr_info_once("spectrev2 mitigation disabled by command line option\n"); 603 __hardenbp_enab = false; 604 return false; 605 } 606 607 if (need_wa < 0) { 608 pr_warn_once("ARM_SMCCC_ARCH_WORKAROUND_1 missing from firmware\n"); 609 __hardenbp_enab = false; 610 } 611 612 return (need_wa > 0); 613 } 614 615 #ifdef CONFIG_HARDEN_EL2_VECTORS 616 617 static const struct midr_range arm64_harden_el2_vectors[] = { 618 MIDR_ALL_VERSIONS(MIDR_CORTEX_A57), 619 MIDR_ALL_VERSIONS(MIDR_CORTEX_A72), 620 {}, 621 }; 622 623 #endif 624 625 #ifdef CONFIG_ARM64_WORKAROUND_REPEAT_TLBI 626 627 static const struct midr_range arm64_repeat_tlbi_cpus[] = { 628 #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1009 629 MIDR_RANGE(MIDR_QCOM_FALKOR_V1, 0, 0, 0, 0), 630 #endif 631 #ifdef CONFIG_ARM64_ERRATUM_1286807 632 MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 0), 633 #endif 634 {}, 635 }; 636 637 #endif 638 639 #ifdef CONFIG_CAVIUM_ERRATUM_27456 640 const struct midr_range cavium_erratum_27456_cpus[] = { 641 /* Cavium ThunderX, T88 pass 1.x - 2.1 */ 642 MIDR_RANGE(MIDR_THUNDERX, 0, 0, 1, 1), 643 /* Cavium ThunderX, T81 pass 1.0 */ 644 MIDR_REV(MIDR_THUNDERX_81XX, 0, 0), 645 {}, 646 }; 647 #endif 648 649 #ifdef CONFIG_CAVIUM_ERRATUM_30115 650 static const struct midr_range cavium_erratum_30115_cpus[] = { 651 /* Cavium ThunderX, T88 pass 1.x - 2.2 */ 652 MIDR_RANGE(MIDR_THUNDERX, 0, 0, 1, 2), 653 /* Cavium ThunderX, T81 pass 1.0 - 1.2 */ 654 MIDR_REV_RANGE(MIDR_THUNDERX_81XX, 0, 0, 2), 655 /* Cavium ThunderX, T83 pass 1.0 */ 656 MIDR_REV(MIDR_THUNDERX_83XX, 0, 0), 657 {}, 658 }; 659 #endif 660 661 #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003 662 static const struct arm64_cpu_capabilities qcom_erratum_1003_list[] = { 663 { 664 ERRATA_MIDR_REV(MIDR_QCOM_FALKOR_V1, 0, 0), 665 }, 666 { 667 .midr_range.model = MIDR_QCOM_KRYO, 668 .matches = is_kryo_midr, 669 }, 670 {}, 671 }; 672 #endif 673 674 #ifdef CONFIG_ARM64_WORKAROUND_CLEAN_CACHE 675 static const struct midr_range workaround_clean_cache[] = { 676 #if defined(CONFIG_ARM64_ERRATUM_826319) || \ 677 defined(CONFIG_ARM64_ERRATUM_827319) || \ 678 defined(CONFIG_ARM64_ERRATUM_824069) 679 /* Cortex-A53 r0p[012]: ARM errata 826319, 827319, 824069 */ 680 MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 2), 681 #endif 682 #ifdef CONFIG_ARM64_ERRATUM_819472 683 /* Cortex-A53 r0p[01] : ARM errata 819472 */ 684 MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 1), 685 #endif 686 {}, 687 }; 688 #endif 689 690 #ifdef CONFIG_ARM64_ERRATUM_1418040 691 /* 692 * - 1188873 affects r0p0 to r2p0 693 * - 1418040 affects r0p0 to r3p1 694 */ 695 static const struct midr_range erratum_1418040_list[] = { 696 /* Cortex-A76 r0p0 to r3p1 */ 697 MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 1), 698 /* Neoverse-N1 r0p0 to r3p1 */ 699 MIDR_RANGE(MIDR_NEOVERSE_N1, 0, 0, 3, 1), 700 {}, 701 }; 702 #endif 703 704 const struct arm64_cpu_capabilities arm64_errata[] = { 705 #ifdef CONFIG_ARM64_WORKAROUND_CLEAN_CACHE 706 { 707 .desc = "ARM errata 826319, 827319, 824069, 819472", 708 .capability = ARM64_WORKAROUND_CLEAN_CACHE, 709 ERRATA_MIDR_RANGE_LIST(workaround_clean_cache), 710 .cpu_enable = cpu_enable_cache_maint_trap, 711 }, 712 #endif 713 #ifdef CONFIG_ARM64_ERRATUM_832075 714 { 715 /* Cortex-A57 r0p0 - r1p2 */ 716 .desc = "ARM erratum 832075", 717 .capability = ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE, 718 ERRATA_MIDR_RANGE(MIDR_CORTEX_A57, 719 0, 0, 720 1, 2), 721 }, 722 #endif 723 #ifdef CONFIG_ARM64_ERRATUM_834220 724 { 725 /* Cortex-A57 r0p0 - r1p2 */ 726 .desc = "ARM erratum 834220", 727 .capability = ARM64_WORKAROUND_834220, 728 ERRATA_MIDR_RANGE(MIDR_CORTEX_A57, 729 0, 0, 730 1, 2), 731 }, 732 #endif 733 #ifdef CONFIG_ARM64_ERRATUM_843419 734 { 735 /* Cortex-A53 r0p[01234] */ 736 .desc = "ARM erratum 843419", 737 .capability = ARM64_WORKAROUND_843419, 738 ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 4), 739 MIDR_FIXED(0x4, BIT(8)), 740 }, 741 #endif 742 #ifdef CONFIG_ARM64_ERRATUM_845719 743 { 744 /* Cortex-A53 r0p[01234] */ 745 .desc = "ARM erratum 845719", 746 .capability = ARM64_WORKAROUND_845719, 747 ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 4), 748 }, 749 #endif 750 #ifdef CONFIG_CAVIUM_ERRATUM_23154 751 { 752 /* Cavium ThunderX, pass 1.x */ 753 .desc = "Cavium erratum 23154", 754 .capability = ARM64_WORKAROUND_CAVIUM_23154, 755 ERRATA_MIDR_REV_RANGE(MIDR_THUNDERX, 0, 0, 1), 756 }, 757 #endif 758 #ifdef CONFIG_CAVIUM_ERRATUM_27456 759 { 760 .desc = "Cavium erratum 27456", 761 .capability = ARM64_WORKAROUND_CAVIUM_27456, 762 ERRATA_MIDR_RANGE_LIST(cavium_erratum_27456_cpus), 763 }, 764 #endif 765 #ifdef CONFIG_CAVIUM_ERRATUM_30115 766 { 767 .desc = "Cavium erratum 30115", 768 .capability = ARM64_WORKAROUND_CAVIUM_30115, 769 ERRATA_MIDR_RANGE_LIST(cavium_erratum_30115_cpus), 770 }, 771 #endif 772 { 773 .desc = "Mismatched cache type (CTR_EL0)", 774 .capability = ARM64_MISMATCHED_CACHE_TYPE, 775 .matches = has_mismatched_cache_type, 776 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, 777 .cpu_enable = cpu_enable_trap_ctr_access, 778 }, 779 #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003 780 { 781 .desc = "Qualcomm Technologies Falkor/Kryo erratum 1003", 782 .capability = ARM64_WORKAROUND_QCOM_FALKOR_E1003, 783 .matches = cpucap_multi_entry_cap_matches, 784 .match_list = qcom_erratum_1003_list, 785 }, 786 #endif 787 #ifdef CONFIG_ARM64_WORKAROUND_REPEAT_TLBI 788 { 789 .desc = "Qualcomm erratum 1009, ARM erratum 1286807", 790 .capability = ARM64_WORKAROUND_REPEAT_TLBI, 791 ERRATA_MIDR_RANGE_LIST(arm64_repeat_tlbi_cpus), 792 }, 793 #endif 794 #ifdef CONFIG_ARM64_ERRATUM_858921 795 { 796 /* Cortex-A73 all versions */ 797 .desc = "ARM erratum 858921", 798 .capability = ARM64_WORKAROUND_858921, 799 ERRATA_MIDR_ALL_VERSIONS(MIDR_CORTEX_A73), 800 }, 801 #endif 802 { 803 .capability = ARM64_HARDEN_BRANCH_PREDICTOR, 804 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, 805 .matches = check_branch_predictor, 806 }, 807 #ifdef CONFIG_HARDEN_EL2_VECTORS 808 { 809 .desc = "EL2 vector hardening", 810 .capability = ARM64_HARDEN_EL2_VECTORS, 811 ERRATA_MIDR_RANGE_LIST(arm64_harden_el2_vectors), 812 }, 813 #endif 814 { 815 .desc = "Speculative Store Bypass Disable", 816 .capability = ARM64_SSBD, 817 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, 818 .matches = has_ssbd_mitigation, 819 .midr_range_list = arm64_ssb_cpus, 820 }, 821 #ifdef CONFIG_ARM64_ERRATUM_1418040 822 { 823 .desc = "ARM erratum 1418040", 824 .capability = ARM64_WORKAROUND_1418040, 825 ERRATA_MIDR_RANGE_LIST(erratum_1418040_list), 826 }, 827 #endif 828 #ifdef CONFIG_ARM64_ERRATUM_1165522 829 { 830 /* Cortex-A76 r0p0 to r2p0 */ 831 .desc = "ARM erratum 1165522", 832 .capability = ARM64_WORKAROUND_1165522, 833 ERRATA_MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 2, 0), 834 }, 835 #endif 836 #ifdef CONFIG_ARM64_ERRATUM_1463225 837 { 838 .desc = "ARM erratum 1463225", 839 .capability = ARM64_WORKAROUND_1463225, 840 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, 841 .matches = has_cortex_a76_erratum_1463225, 842 }, 843 #endif 844 { 845 } 846 }; 847 848 ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, 849 char *buf) 850 { 851 return sprintf(buf, "Mitigation: __user pointer sanitization\n"); 852 } 853 854 ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, 855 char *buf) 856 { 857 if (__spectrev2_safe) 858 return sprintf(buf, "Not affected\n"); 859 860 if (__hardenbp_enab) 861 return sprintf(buf, "Mitigation: Branch predictor hardening\n"); 862 863 return sprintf(buf, "Vulnerable\n"); 864 } 865 866 ssize_t cpu_show_spec_store_bypass(struct device *dev, 867 struct device_attribute *attr, char *buf) 868 { 869 if (__ssb_safe) 870 return sprintf(buf, "Not affected\n"); 871 872 switch (ssbd_state) { 873 case ARM64_SSBD_KERNEL: 874 case ARM64_SSBD_FORCE_ENABLE: 875 if (IS_ENABLED(CONFIG_ARM64_SSBD)) 876 return sprintf(buf, 877 "Mitigation: Speculative Store Bypass disabled via prctl\n"); 878 } 879 880 return sprintf(buf, "Vulnerable\n"); 881 } 882