1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* 3 * Interface for managing mitigations for Spectre vulnerabilities. 4 * 5 * Copyright (C) 2020 Google LLC 6 * Author: Will Deacon <will@kernel.org> 7 */ 8 9 #ifndef __ASM_SPECTRE_H 10 #define __ASM_SPECTRE_H 11 12 #define BP_HARDEN_EL2_SLOTS 4 13 #define __BP_HARDEN_HYP_VECS_SZ ((BP_HARDEN_EL2_SLOTS - 1) * SZ_2K) 14 15 #ifndef __ASSEMBLY__ 16 #include <linux/smp.h> 17 #include <asm/percpu.h> 18 19 #include <asm/cpufeature.h> 20 #include <asm/virt.h> 21 22 /* Watch out, ordering is important here. */ 23 enum mitigation_state { 24 SPECTRE_UNAFFECTED, 25 SPECTRE_MITIGATED, 26 SPECTRE_VULNERABLE, 27 }; 28 29 struct pt_regs; 30 struct task_struct; 31 32 /* 33 * Note: the order of this enum corresponds to __bp_harden_hyp_vecs and 34 * we rely on having the direct vectors first. 35 */ 36 enum arm64_hyp_spectre_vector { 37 /* 38 * Take exceptions directly to __kvm_hyp_vector. This must be 39 * 0 so that it used by default when mitigations are not needed. 40 */ 41 HYP_VECTOR_DIRECT, 42 43 /* 44 * Bounce via a slot in the hypervisor text mapping of 45 * __bp_harden_hyp_vecs, which contains an SMC call. 46 */ 47 HYP_VECTOR_SPECTRE_DIRECT, 48 49 /* 50 * Bounce via a slot in a special mapping of __bp_harden_hyp_vecs 51 * next to the idmap page. 52 */ 53 HYP_VECTOR_INDIRECT, 54 55 /* 56 * Bounce via a slot in a special mapping of __bp_harden_hyp_vecs 57 * next to the idmap page, which contains an SMC call. 58 */ 59 HYP_VECTOR_SPECTRE_INDIRECT, 60 }; 61 62 typedef void (*bp_hardening_cb_t)(void); 63 64 struct bp_hardening_data { 65 enum arm64_hyp_spectre_vector slot; 66 bp_hardening_cb_t fn; 67 }; 68 69 DECLARE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data); 70 71 /* Called during entry so must be __always_inline */ 72 static __always_inline void arm64_apply_bp_hardening(void) 73 { 74 struct bp_hardening_data *d; 75 76 if (!alternative_has_cap_unlikely(ARM64_SPECTRE_V2)) 77 return; 78 79 d = this_cpu_ptr(&bp_hardening_data); 80 if (d->fn) 81 d->fn(); 82 } 83 84 enum mitigation_state arm64_get_spectre_v2_state(void); 85 bool has_spectre_v2(const struct arm64_cpu_capabilities *cap, int scope); 86 void spectre_v2_enable_mitigation(const struct arm64_cpu_capabilities *__unused); 87 88 bool has_spectre_v3a(const struct arm64_cpu_capabilities *cap, int scope); 89 void spectre_v3a_enable_mitigation(const struct arm64_cpu_capabilities *__unused); 90 91 enum mitigation_state arm64_get_spectre_v4_state(void); 92 bool has_spectre_v4(const struct arm64_cpu_capabilities *cap, int scope); 93 void spectre_v4_enable_mitigation(const struct arm64_cpu_capabilities *__unused); 94 void spectre_v4_enable_task_mitigation(struct task_struct *tsk); 95 96 enum mitigation_state arm64_get_meltdown_state(void); 97 98 enum mitigation_state arm64_get_spectre_bhb_state(void); 99 bool is_spectre_bhb_affected(const struct arm64_cpu_capabilities *entry, int scope); 100 u8 spectre_bhb_loop_affected(int scope); 101 void spectre_bhb_enable_mitigation(const struct arm64_cpu_capabilities *__unused); 102 bool try_emulate_el1_ssbs(struct pt_regs *regs, u32 instr); 103 104 void spectre_v4_patch_fw_mitigation_enable(struct alt_instr *alt, __le32 *origptr, 105 __le32 *updptr, int nr_inst); 106 void smccc_patch_fw_mitigation_conduit(struct alt_instr *alt, __le32 *origptr, 107 __le32 *updptr, int nr_inst); 108 void spectre_bhb_patch_loop_mitigation_enable(struct alt_instr *alt, __le32 *origptr, 109 __le32 *updptr, int nr_inst); 110 void spectre_bhb_patch_fw_mitigation_enabled(struct alt_instr *alt, __le32 *origptr, 111 __le32 *updptr, int nr_inst); 112 void spectre_bhb_patch_loop_iter(struct alt_instr *alt, 113 __le32 *origptr, __le32 *updptr, int nr_inst); 114 void spectre_bhb_patch_wa3(struct alt_instr *alt, 115 __le32 *origptr, __le32 *updptr, int nr_inst); 116 void spectre_bhb_patch_clearbhb(struct alt_instr *alt, 117 __le32 *origptr, __le32 *updptr, int nr_inst); 118 119 #endif /* __ASSEMBLY__ */ 120 #endif /* __ASM_SPECTRE_H */ 121