1 /* SPDX-License-Identifier: GPL-2.0 */ 2 3 #ifndef _ASM_X86_NOSPEC_BRANCH_H_ 4 #define _ASM_X86_NOSPEC_BRANCH_H_ 5 6 #include <linux/static_key.h> 7 #include <linux/objtool.h> 8 9 #include <asm/alternative.h> 10 #include <asm/cpufeatures.h> 11 #include <asm/msr-index.h> 12 #include <asm/unwind_hints.h> 13 14 /* 15 * Fill the CPU return stack buffer. 16 * 17 * Each entry in the RSB, if used for a speculative 'ret', contains an 18 * infinite 'pause; lfence; jmp' loop to capture speculative execution. 19 * 20 * This is required in various cases for retpoline and IBRS-based 21 * mitigations for the Spectre variant 2 vulnerability. Sometimes to 22 * eliminate potentially bogus entries from the RSB, and sometimes 23 * purely to ensure that it doesn't get empty, which on some CPUs would 24 * allow predictions from other (unwanted!) sources to be used. 25 * 26 * We define a CPP macro such that it can be used from both .S files and 27 * inline assembly. It's possible to do a .macro and then include that 28 * from C via asm(".include <asm/nospec-branch.h>") but let's not go there. 29 */ 30 31 #define RSB_CLEAR_LOOPS 32 /* To forcibly overwrite all entries */ 32 33 /* 34 * Google experimented with loop-unrolling and this turned out to be 35 * the optimal version — two calls, each with their own speculation 36 * trap should their return address end up getting used, in a loop. 37 */ 38 #define __FILL_RETURN_BUFFER(reg, nr, sp) \ 39 mov $(nr/2), reg; \ 40 771: \ 41 ANNOTATE_INTRA_FUNCTION_CALL; \ 42 call 772f; \ 43 773: /* speculation trap */ \ 44 UNWIND_HINT_EMPTY; \ 45 pause; \ 46 lfence; \ 47 jmp 773b; \ 48 772: \ 49 ANNOTATE_INTRA_FUNCTION_CALL; \ 50 call 774f; \ 51 775: /* speculation trap */ \ 52 UNWIND_HINT_EMPTY; \ 53 pause; \ 54 lfence; \ 55 jmp 775b; \ 56 774: \ 57 add $(BITS_PER_LONG/8) * 2, sp; \ 58 dec reg; \ 59 jnz 771b; 60 61 #ifdef __ASSEMBLY__ 62 63 /* 64 * This should be used immediately before an indirect jump/call. It tells 65 * objtool the subsequent indirect jump/call is vouched safe for retpoline 66 * builds. 67 */ 68 .macro ANNOTATE_RETPOLINE_SAFE 69 .Lannotate_\@: 70 .pushsection .discard.retpoline_safe 71 _ASM_PTR .Lannotate_\@ 72 .popsection 73 .endm 74 75 /* 76 * JMP_NOSPEC and CALL_NOSPEC macros can be used instead of a simple 77 * indirect jmp/call which may be susceptible to the Spectre variant 2 78 * attack. 79 */ 80 .macro JMP_NOSPEC reg:req 81 #ifdef CONFIG_RETPOLINE 82 ALTERNATIVE_2 __stringify(ANNOTATE_RETPOLINE_SAFE; jmp *%\reg), \ 83 __stringify(jmp __x86_retpoline_\reg), X86_FEATURE_RETPOLINE, \ 84 __stringify(lfence; ANNOTATE_RETPOLINE_SAFE; jmp *%\reg), X86_FEATURE_RETPOLINE_AMD 85 #else 86 jmp *%\reg 87 #endif 88 .endm 89 90 .macro CALL_NOSPEC reg:req 91 #ifdef CONFIG_RETPOLINE 92 ALTERNATIVE_2 __stringify(ANNOTATE_RETPOLINE_SAFE; call *%\reg), \ 93 __stringify(call __x86_retpoline_\reg), X86_FEATURE_RETPOLINE, \ 94 __stringify(lfence; ANNOTATE_RETPOLINE_SAFE; call *%\reg), X86_FEATURE_RETPOLINE_AMD 95 #else 96 call *%\reg 97 #endif 98 .endm 99 100 /* 101 * A simpler FILL_RETURN_BUFFER macro. Don't make people use the CPP 102 * monstrosity above, manually. 103 */ 104 .macro FILL_RETURN_BUFFER reg:req nr:req ftr:req 105 #ifdef CONFIG_RETPOLINE 106 ALTERNATIVE "jmp .Lskip_rsb_\@", "", \ftr 107 __FILL_RETURN_BUFFER(\reg,\nr,%_ASM_SP) 108 .Lskip_rsb_\@: 109 #endif 110 .endm 111 112 #else /* __ASSEMBLY__ */ 113 114 #define ANNOTATE_RETPOLINE_SAFE \ 115 "999:\n\t" \ 116 ".pushsection .discard.retpoline_safe\n\t" \ 117 _ASM_PTR " 999b\n\t" \ 118 ".popsection\n\t" 119 120 #ifdef CONFIG_RETPOLINE 121 #ifdef CONFIG_X86_64 122 123 /* 124 * Inline asm uses the %V modifier which is only in newer GCC 125 * which is ensured when CONFIG_RETPOLINE is defined. 126 */ 127 # define CALL_NOSPEC \ 128 ALTERNATIVE_2( \ 129 ANNOTATE_RETPOLINE_SAFE \ 130 "call *%[thunk_target]\n", \ 131 "call __x86_retpoline_%V[thunk_target]\n", \ 132 X86_FEATURE_RETPOLINE, \ 133 "lfence;\n" \ 134 ANNOTATE_RETPOLINE_SAFE \ 135 "call *%[thunk_target]\n", \ 136 X86_FEATURE_RETPOLINE_AMD) 137 138 # define THUNK_TARGET(addr) [thunk_target] "r" (addr) 139 140 #else /* CONFIG_X86_32 */ 141 /* 142 * For i386 we use the original ret-equivalent retpoline, because 143 * otherwise we'll run out of registers. We don't care about CET 144 * here, anyway. 145 */ 146 # define CALL_NOSPEC \ 147 ALTERNATIVE_2( \ 148 ANNOTATE_RETPOLINE_SAFE \ 149 "call *%[thunk_target]\n", \ 150 " jmp 904f;\n" \ 151 " .align 16\n" \ 152 "901: call 903f;\n" \ 153 "902: pause;\n" \ 154 " lfence;\n" \ 155 " jmp 902b;\n" \ 156 " .align 16\n" \ 157 "903: lea 4(%%esp), %%esp;\n" \ 158 " pushl %[thunk_target];\n" \ 159 " ret;\n" \ 160 " .align 16\n" \ 161 "904: call 901b;\n", \ 162 X86_FEATURE_RETPOLINE, \ 163 "lfence;\n" \ 164 ANNOTATE_RETPOLINE_SAFE \ 165 "call *%[thunk_target]\n", \ 166 X86_FEATURE_RETPOLINE_AMD) 167 168 # define THUNK_TARGET(addr) [thunk_target] "rm" (addr) 169 #endif 170 #else /* No retpoline for C / inline asm */ 171 # define CALL_NOSPEC "call *%[thunk_target]\n" 172 # define THUNK_TARGET(addr) [thunk_target] "rm" (addr) 173 #endif 174 175 /* The Spectre V2 mitigation variants */ 176 enum spectre_v2_mitigation { 177 SPECTRE_V2_NONE, 178 SPECTRE_V2_RETPOLINE_GENERIC, 179 SPECTRE_V2_RETPOLINE_AMD, 180 SPECTRE_V2_IBRS_ENHANCED, 181 }; 182 183 /* The indirect branch speculation control variants */ 184 enum spectre_v2_user_mitigation { 185 SPECTRE_V2_USER_NONE, 186 SPECTRE_V2_USER_STRICT, 187 SPECTRE_V2_USER_STRICT_PREFERRED, 188 SPECTRE_V2_USER_PRCTL, 189 SPECTRE_V2_USER_SECCOMP, 190 }; 191 192 /* The Speculative Store Bypass disable variants */ 193 enum ssb_mitigation { 194 SPEC_STORE_BYPASS_NONE, 195 SPEC_STORE_BYPASS_DISABLE, 196 SPEC_STORE_BYPASS_PRCTL, 197 SPEC_STORE_BYPASS_SECCOMP, 198 }; 199 200 extern char __indirect_thunk_start[]; 201 extern char __indirect_thunk_end[]; 202 203 static __always_inline 204 void alternative_msr_write(unsigned int msr, u64 val, unsigned int feature) 205 { 206 asm volatile(ALTERNATIVE("", "wrmsr", %c[feature]) 207 : : "c" (msr), 208 "a" ((u32)val), 209 "d" ((u32)(val >> 32)), 210 [feature] "i" (feature) 211 : "memory"); 212 } 213 214 static inline void indirect_branch_prediction_barrier(void) 215 { 216 u64 val = PRED_CMD_IBPB; 217 218 alternative_msr_write(MSR_IA32_PRED_CMD, val, X86_FEATURE_USE_IBPB); 219 } 220 221 /* The Intel SPEC CTRL MSR base value cache */ 222 extern u64 x86_spec_ctrl_base; 223 224 /* 225 * With retpoline, we must use IBRS to restrict branch prediction 226 * before calling into firmware. 227 * 228 * (Implemented as CPP macros due to header hell.) 229 */ 230 #define firmware_restrict_branch_speculation_start() \ 231 do { \ 232 u64 val = x86_spec_ctrl_base | SPEC_CTRL_IBRS; \ 233 \ 234 preempt_disable(); \ 235 alternative_msr_write(MSR_IA32_SPEC_CTRL, val, \ 236 X86_FEATURE_USE_IBRS_FW); \ 237 } while (0) 238 239 #define firmware_restrict_branch_speculation_end() \ 240 do { \ 241 u64 val = x86_spec_ctrl_base; \ 242 \ 243 alternative_msr_write(MSR_IA32_SPEC_CTRL, val, \ 244 X86_FEATURE_USE_IBRS_FW); \ 245 preempt_enable(); \ 246 } while (0) 247 248 DECLARE_STATIC_KEY_FALSE(switch_to_cond_stibp); 249 DECLARE_STATIC_KEY_FALSE(switch_mm_cond_ibpb); 250 DECLARE_STATIC_KEY_FALSE(switch_mm_always_ibpb); 251 252 DECLARE_STATIC_KEY_FALSE(mds_user_clear); 253 DECLARE_STATIC_KEY_FALSE(mds_idle_clear); 254 255 #include <asm/segment.h> 256 257 /** 258 * mds_clear_cpu_buffers - Mitigation for MDS and TAA vulnerability 259 * 260 * This uses the otherwise unused and obsolete VERW instruction in 261 * combination with microcode which triggers a CPU buffer flush when the 262 * instruction is executed. 263 */ 264 static __always_inline void mds_clear_cpu_buffers(void) 265 { 266 static const u16 ds = __KERNEL_DS; 267 268 /* 269 * Has to be the memory-operand variant because only that 270 * guarantees the CPU buffer flush functionality according to 271 * documentation. The register-operand variant does not. 272 * Works with any segment selector, but a valid writable 273 * data segment is the fastest variant. 274 * 275 * "cc" clobber is required because VERW modifies ZF. 276 */ 277 asm volatile("verw %[ds]" : : [ds] "m" (ds) : "cc"); 278 } 279 280 /** 281 * mds_user_clear_cpu_buffers - Mitigation for MDS and TAA vulnerability 282 * 283 * Clear CPU buffers if the corresponding static key is enabled 284 */ 285 static __always_inline void mds_user_clear_cpu_buffers(void) 286 { 287 if (static_branch_likely(&mds_user_clear)) 288 mds_clear_cpu_buffers(); 289 } 290 291 /** 292 * mds_idle_clear_cpu_buffers - Mitigation for MDS vulnerability 293 * 294 * Clear CPU buffers if the corresponding static key is enabled 295 */ 296 static inline void mds_idle_clear_cpu_buffers(void) 297 { 298 if (static_branch_likely(&mds_idle_clear)) 299 mds_clear_cpu_buffers(); 300 } 301 302 #endif /* __ASSEMBLY__ */ 303 304 /* 305 * Below is used in the eBPF JIT compiler and emits the byte sequence 306 * for the following assembly: 307 * 308 * With retpolines configured: 309 * 310 * callq do_rop 311 * spec_trap: 312 * pause 313 * lfence 314 * jmp spec_trap 315 * do_rop: 316 * mov %rcx,(%rsp) for x86_64 317 * mov %edx,(%esp) for x86_32 318 * retq 319 * 320 * Without retpolines configured: 321 * 322 * jmp *%rcx for x86_64 323 * jmp *%edx for x86_32 324 */ 325 #ifdef CONFIG_RETPOLINE 326 # ifdef CONFIG_X86_64 327 # define RETPOLINE_RCX_BPF_JIT_SIZE 17 328 # define RETPOLINE_RCX_BPF_JIT() \ 329 do { \ 330 EMIT1_off32(0xE8, 7); /* callq do_rop */ \ 331 /* spec_trap: */ \ 332 EMIT2(0xF3, 0x90); /* pause */ \ 333 EMIT3(0x0F, 0xAE, 0xE8); /* lfence */ \ 334 EMIT2(0xEB, 0xF9); /* jmp spec_trap */ \ 335 /* do_rop: */ \ 336 EMIT4(0x48, 0x89, 0x0C, 0x24); /* mov %rcx,(%rsp) */ \ 337 EMIT1(0xC3); /* retq */ \ 338 } while (0) 339 # else /* !CONFIG_X86_64 */ 340 # define RETPOLINE_EDX_BPF_JIT() \ 341 do { \ 342 EMIT1_off32(0xE8, 7); /* call do_rop */ \ 343 /* spec_trap: */ \ 344 EMIT2(0xF3, 0x90); /* pause */ \ 345 EMIT3(0x0F, 0xAE, 0xE8); /* lfence */ \ 346 EMIT2(0xEB, 0xF9); /* jmp spec_trap */ \ 347 /* do_rop: */ \ 348 EMIT3(0x89, 0x14, 0x24); /* mov %edx,(%esp) */ \ 349 EMIT1(0xC3); /* ret */ \ 350 } while (0) 351 # endif 352 #else /* !CONFIG_RETPOLINE */ 353 # ifdef CONFIG_X86_64 354 # define RETPOLINE_RCX_BPF_JIT_SIZE 2 355 # define RETPOLINE_RCX_BPF_JIT() \ 356 EMIT2(0xFF, 0xE1); /* jmp *%rcx */ 357 # else /* !CONFIG_X86_64 */ 358 # define RETPOLINE_EDX_BPF_JIT() \ 359 EMIT2(0xFF, 0xE2) /* jmp *%edx */ 360 # endif 361 #endif 362 363 #endif /* _ASM_X86_NOSPEC_BRANCH_H_ */ 364