1 /* SPDX-License-Identifier: GPL-2.0 */ 2 3 #ifndef _ASM_X86_NOSPEC_BRANCH_H_ 4 #define _ASM_X86_NOSPEC_BRANCH_H_ 5 6 #include <linux/static_key.h> 7 #include <linux/objtool.h> 8 #include <linux/linkage.h> 9 10 #include <asm/alternative.h> 11 #include <asm/cpufeatures.h> 12 #include <asm/msr-index.h> 13 #include <asm/unwind_hints.h> 14 #include <asm/percpu.h> 15 #include <asm/current.h> 16 17 /* 18 * Call depth tracking for Intel SKL CPUs to address the RSB underflow 19 * issue in software. 20 * 21 * The tracking does not use a counter. It uses uses arithmetic shift 22 * right on call entry and logical shift left on return. 23 * 24 * The depth tracking variable is initialized to 0x8000.... when the call 25 * depth is zero. The arithmetic shift right sign extends the MSB and 26 * saturates after the 12th call. The shift count is 5 for both directions 27 * so the tracking covers 12 nested calls. 28 * 29 * Call 30 * 0: 0x8000000000000000 0x0000000000000000 31 * 1: 0xfc00000000000000 0xf000000000000000 32 * ... 33 * 11: 0xfffffffffffffff8 0xfffffffffffffc00 34 * 12: 0xffffffffffffffff 0xffffffffffffffe0 35 * 36 * After a return buffer fill the depth is credited 12 calls before the 37 * next stuffing has to take place. 38 * 39 * There is a inaccuracy for situations like this: 40 * 41 * 10 calls 42 * 5 returns 43 * 3 calls 44 * 4 returns 45 * 3 calls 46 * .... 47 * 48 * The shift count might cause this to be off by one in either direction, 49 * but there is still a cushion vs. the RSB depth. The algorithm does not 50 * claim to be perfect and it can be speculated around by the CPU, but it 51 * is considered that it obfuscates the problem enough to make exploitation 52 * extremly difficult. 53 */ 54 #define RET_DEPTH_SHIFT 5 55 #define RSB_RET_STUFF_LOOPS 16 56 #define RET_DEPTH_INIT 0x8000000000000000ULL 57 #define RET_DEPTH_INIT_FROM_CALL 0xfc00000000000000ULL 58 #define RET_DEPTH_CREDIT 0xffffffffffffffffULL 59 60 #ifdef CONFIG_CALL_THUNKS_DEBUG 61 # define CALL_THUNKS_DEBUG_INC_CALLS \ 62 incq %gs:__x86_call_count; 63 # define CALL_THUNKS_DEBUG_INC_RETS \ 64 incq %gs:__x86_ret_count; 65 # define CALL_THUNKS_DEBUG_INC_STUFFS \ 66 incq %gs:__x86_stuffs_count; 67 # define CALL_THUNKS_DEBUG_INC_CTXSW \ 68 incq %gs:__x86_ctxsw_count; 69 #else 70 # define CALL_THUNKS_DEBUG_INC_CALLS 71 # define CALL_THUNKS_DEBUG_INC_RETS 72 # define CALL_THUNKS_DEBUG_INC_STUFFS 73 # define CALL_THUNKS_DEBUG_INC_CTXSW 74 #endif 75 76 #if defined(CONFIG_CALL_DEPTH_TRACKING) && !defined(COMPILE_OFFSETS) 77 78 #include <asm/asm-offsets.h> 79 80 #define CREDIT_CALL_DEPTH \ 81 movq $-1, PER_CPU_VAR(pcpu_hot + X86_call_depth); 82 83 #define ASM_CREDIT_CALL_DEPTH \ 84 movq $-1, PER_CPU_VAR(pcpu_hot + X86_call_depth); 85 86 #define RESET_CALL_DEPTH \ 87 xor %eax, %eax; \ 88 bts $63, %rax; \ 89 movq %rax, PER_CPU_VAR(pcpu_hot + X86_call_depth); 90 91 #define RESET_CALL_DEPTH_FROM_CALL \ 92 movb $0xfc, %al; \ 93 shl $56, %rax; \ 94 movq %rax, PER_CPU_VAR(pcpu_hot + X86_call_depth); \ 95 CALL_THUNKS_DEBUG_INC_CALLS 96 97 #define INCREMENT_CALL_DEPTH \ 98 sarq $5, %gs:pcpu_hot + X86_call_depth; \ 99 CALL_THUNKS_DEBUG_INC_CALLS 100 101 #define ASM_INCREMENT_CALL_DEPTH \ 102 sarq $5, PER_CPU_VAR(pcpu_hot + X86_call_depth); \ 103 CALL_THUNKS_DEBUG_INC_CALLS 104 105 #else 106 #define CREDIT_CALL_DEPTH 107 #define ASM_CREDIT_CALL_DEPTH 108 #define RESET_CALL_DEPTH 109 #define INCREMENT_CALL_DEPTH 110 #define ASM_INCREMENT_CALL_DEPTH 111 #define RESET_CALL_DEPTH_FROM_CALL 112 #endif 113 114 /* 115 * Fill the CPU return stack buffer. 116 * 117 * Each entry in the RSB, if used for a speculative 'ret', contains an 118 * infinite 'pause; lfence; jmp' loop to capture speculative execution. 119 * 120 * This is required in various cases for retpoline and IBRS-based 121 * mitigations for the Spectre variant 2 vulnerability. Sometimes to 122 * eliminate potentially bogus entries from the RSB, and sometimes 123 * purely to ensure that it doesn't get empty, which on some CPUs would 124 * allow predictions from other (unwanted!) sources to be used. 125 * 126 * We define a CPP macro such that it can be used from both .S files and 127 * inline assembly. It's possible to do a .macro and then include that 128 * from C via asm(".include <asm/nospec-branch.h>") but let's not go there. 129 */ 130 131 #define RETPOLINE_THUNK_SIZE 32 132 #define RSB_CLEAR_LOOPS 32 /* To forcibly overwrite all entries */ 133 134 /* 135 * Common helper for __FILL_RETURN_BUFFER and __FILL_ONE_RETURN. 136 */ 137 #define __FILL_RETURN_SLOT \ 138 ANNOTATE_INTRA_FUNCTION_CALL; \ 139 call 772f; \ 140 int3; \ 141 772: 142 143 /* 144 * Stuff the entire RSB. 145 * 146 * Google experimented with loop-unrolling and this turned out to be 147 * the optimal version - two calls, each with their own speculation 148 * trap should their return address end up getting used, in a loop. 149 */ 150 #ifdef CONFIG_X86_64 151 #define __FILL_RETURN_BUFFER(reg, nr) \ 152 mov $(nr/2), reg; \ 153 771: \ 154 __FILL_RETURN_SLOT \ 155 __FILL_RETURN_SLOT \ 156 add $(BITS_PER_LONG/8) * 2, %_ASM_SP; \ 157 dec reg; \ 158 jnz 771b; \ 159 /* barrier for jnz misprediction */ \ 160 lfence; \ 161 ASM_CREDIT_CALL_DEPTH \ 162 CALL_THUNKS_DEBUG_INC_CTXSW 163 #else 164 /* 165 * i386 doesn't unconditionally have LFENCE, as such it can't 166 * do a loop. 167 */ 168 #define __FILL_RETURN_BUFFER(reg, nr) \ 169 .rept nr; \ 170 __FILL_RETURN_SLOT; \ 171 .endr; \ 172 add $(BITS_PER_LONG/8) * nr, %_ASM_SP; 173 #endif 174 175 /* 176 * Stuff a single RSB slot. 177 * 178 * To mitigate Post-Barrier RSB speculation, one CALL instruction must be 179 * forced to retire before letting a RET instruction execute. 180 * 181 * On PBRSB-vulnerable CPUs, it is not safe for a RET to be executed 182 * before this point. 183 */ 184 #define __FILL_ONE_RETURN \ 185 __FILL_RETURN_SLOT \ 186 add $(BITS_PER_LONG/8), %_ASM_SP; \ 187 lfence; 188 189 #ifdef __ASSEMBLY__ 190 191 /* 192 * This should be used immediately before an indirect jump/call. It tells 193 * objtool the subsequent indirect jump/call is vouched safe for retpoline 194 * builds. 195 */ 196 .macro ANNOTATE_RETPOLINE_SAFE 197 .Lhere_\@: 198 .pushsection .discard.retpoline_safe 199 .long .Lhere_\@ - . 200 .popsection 201 .endm 202 203 /* 204 * (ab)use RETPOLINE_SAFE on RET to annotate away 'bare' RET instructions 205 * vs RETBleed validation. 206 */ 207 #define ANNOTATE_UNRET_SAFE ANNOTATE_RETPOLINE_SAFE 208 209 /* 210 * Abuse ANNOTATE_RETPOLINE_SAFE on a NOP to indicate UNRET_END, should 211 * eventually turn into it's own annotation. 212 */ 213 .macro VALIDATE_UNRET_END 214 #if defined(CONFIG_NOINSTR_VALIDATION) && \ 215 (defined(CONFIG_CPU_UNRET_ENTRY) || defined(CONFIG_CPU_SRSO)) 216 ANNOTATE_RETPOLINE_SAFE 217 nop 218 #endif 219 .endm 220 221 /* 222 * Equivalent to -mindirect-branch-cs-prefix; emit the 5 byte jmp/call 223 * to the retpoline thunk with a CS prefix when the register requires 224 * a RAX prefix byte to encode. Also see apply_retpolines(). 225 */ 226 .macro __CS_PREFIX reg:req 227 .irp rs,r8,r9,r10,r11,r12,r13,r14,r15 228 .ifc \reg,\rs 229 .byte 0x2e 230 .endif 231 .endr 232 .endm 233 234 /* 235 * JMP_NOSPEC and CALL_NOSPEC macros can be used instead of a simple 236 * indirect jmp/call which may be susceptible to the Spectre variant 2 237 * attack. 238 * 239 * NOTE: these do not take kCFI into account and are thus not comparable to C 240 * indirect calls, take care when using. The target of these should be an ENDBR 241 * instruction irrespective of kCFI. 242 */ 243 .macro JMP_NOSPEC reg:req 244 #ifdef CONFIG_RETPOLINE 245 __CS_PREFIX \reg 246 jmp __x86_indirect_thunk_\reg 247 #else 248 jmp *%\reg 249 int3 250 #endif 251 .endm 252 253 .macro CALL_NOSPEC reg:req 254 #ifdef CONFIG_RETPOLINE 255 __CS_PREFIX \reg 256 call __x86_indirect_thunk_\reg 257 #else 258 call *%\reg 259 #endif 260 .endm 261 262 /* 263 * A simpler FILL_RETURN_BUFFER macro. Don't make people use the CPP 264 * monstrosity above, manually. 265 */ 266 .macro FILL_RETURN_BUFFER reg:req nr:req ftr:req ftr2=ALT_NOT(X86_FEATURE_ALWAYS) 267 ALTERNATIVE_2 "jmp .Lskip_rsb_\@", \ 268 __stringify(__FILL_RETURN_BUFFER(\reg,\nr)), \ftr, \ 269 __stringify(nop;nop;__FILL_ONE_RETURN), \ftr2 270 271 .Lskip_rsb_\@: 272 .endm 273 274 #ifdef CONFIG_CPU_UNRET_ENTRY 275 #define CALL_ZEN_UNTRAIN_RET "call zen_untrain_ret" 276 #else 277 #define CALL_ZEN_UNTRAIN_RET "" 278 #endif 279 280 /* 281 * Mitigate RETBleed for AMD/Hygon Zen uarch. Requires KERNEL CR3 because the 282 * return thunk isn't mapped into the userspace tables (then again, AMD 283 * typically has NO_MELTDOWN). 284 * 285 * While zen_untrain_ret() doesn't clobber anything but requires stack, 286 * entry_ibpb() will clobber AX, CX, DX. 287 * 288 * As such, this must be placed after every *SWITCH_TO_KERNEL_CR3 at a point 289 * where we have a stack but before any RET instruction. 290 */ 291 .macro UNTRAIN_RET 292 #if defined(CONFIG_CPU_UNRET_ENTRY) || defined(CONFIG_CPU_IBPB_ENTRY) || \ 293 defined(CONFIG_CALL_DEPTH_TRACKING) || defined(CONFIG_CPU_SRSO) 294 VALIDATE_UNRET_END 295 ALTERNATIVE_3 "", \ 296 CALL_ZEN_UNTRAIN_RET, X86_FEATURE_UNRET, \ 297 "call entry_ibpb", X86_FEATURE_ENTRY_IBPB, \ 298 __stringify(RESET_CALL_DEPTH), X86_FEATURE_CALL_DEPTH 299 #endif 300 301 #ifdef CONFIG_CPU_SRSO 302 ALTERNATIVE_2 "", "call srso_untrain_ret", X86_FEATURE_SRSO, \ 303 "call srso_untrain_ret_alias", X86_FEATURE_SRSO_ALIAS 304 #endif 305 .endm 306 307 .macro UNTRAIN_RET_FROM_CALL 308 #if defined(CONFIG_CPU_UNRET_ENTRY) || defined(CONFIG_CPU_IBPB_ENTRY) || \ 309 defined(CONFIG_CALL_DEPTH_TRACKING) 310 VALIDATE_UNRET_END 311 ALTERNATIVE_3 "", \ 312 CALL_ZEN_UNTRAIN_RET, X86_FEATURE_UNRET, \ 313 "call entry_ibpb", X86_FEATURE_ENTRY_IBPB, \ 314 __stringify(RESET_CALL_DEPTH_FROM_CALL), X86_FEATURE_CALL_DEPTH 315 #endif 316 317 #ifdef CONFIG_CPU_SRSO 318 ALTERNATIVE_2 "", "call srso_untrain_ret", X86_FEATURE_SRSO, \ 319 "call srso_untrain_ret_alias", X86_FEATURE_SRSO_ALIAS 320 #endif 321 .endm 322 323 324 .macro CALL_DEPTH_ACCOUNT 325 #ifdef CONFIG_CALL_DEPTH_TRACKING 326 ALTERNATIVE "", \ 327 __stringify(ASM_INCREMENT_CALL_DEPTH), X86_FEATURE_CALL_DEPTH 328 #endif 329 .endm 330 331 #else /* __ASSEMBLY__ */ 332 333 #define ANNOTATE_RETPOLINE_SAFE \ 334 "999:\n\t" \ 335 ".pushsection .discard.retpoline_safe\n\t" \ 336 ".long 999b - .\n\t" \ 337 ".popsection\n\t" 338 339 typedef u8 retpoline_thunk_t[RETPOLINE_THUNK_SIZE]; 340 extern retpoline_thunk_t __x86_indirect_thunk_array[]; 341 extern retpoline_thunk_t __x86_indirect_call_thunk_array[]; 342 extern retpoline_thunk_t __x86_indirect_jump_thunk_array[]; 343 344 extern void __x86_return_thunk(void); 345 extern void zen_untrain_ret(void); 346 extern void srso_untrain_ret(void); 347 extern void srso_untrain_ret_alias(void); 348 extern void entry_ibpb(void); 349 350 #ifdef CONFIG_CALL_THUNKS 351 extern void (*x86_return_thunk)(void); 352 #else 353 #define x86_return_thunk (&__x86_return_thunk) 354 #endif 355 356 #ifdef CONFIG_CALL_DEPTH_TRACKING 357 extern void __x86_return_skl(void); 358 359 static inline void x86_set_skl_return_thunk(void) 360 { 361 x86_return_thunk = &__x86_return_skl; 362 } 363 364 #define CALL_DEPTH_ACCOUNT \ 365 ALTERNATIVE("", \ 366 __stringify(INCREMENT_CALL_DEPTH), \ 367 X86_FEATURE_CALL_DEPTH) 368 369 #ifdef CONFIG_CALL_THUNKS_DEBUG 370 DECLARE_PER_CPU(u64, __x86_call_count); 371 DECLARE_PER_CPU(u64, __x86_ret_count); 372 DECLARE_PER_CPU(u64, __x86_stuffs_count); 373 DECLARE_PER_CPU(u64, __x86_ctxsw_count); 374 #endif 375 #else 376 static inline void x86_set_skl_return_thunk(void) {} 377 378 #define CALL_DEPTH_ACCOUNT "" 379 380 #endif 381 382 #ifdef CONFIG_RETPOLINE 383 384 #define GEN(reg) \ 385 extern retpoline_thunk_t __x86_indirect_thunk_ ## reg; 386 #include <asm/GEN-for-each-reg.h> 387 #undef GEN 388 389 #define GEN(reg) \ 390 extern retpoline_thunk_t __x86_indirect_call_thunk_ ## reg; 391 #include <asm/GEN-for-each-reg.h> 392 #undef GEN 393 394 #define GEN(reg) \ 395 extern retpoline_thunk_t __x86_indirect_jump_thunk_ ## reg; 396 #include <asm/GEN-for-each-reg.h> 397 #undef GEN 398 399 #ifdef CONFIG_X86_64 400 401 /* 402 * Inline asm uses the %V modifier which is only in newer GCC 403 * which is ensured when CONFIG_RETPOLINE is defined. 404 */ 405 # define CALL_NOSPEC \ 406 ALTERNATIVE_2( \ 407 ANNOTATE_RETPOLINE_SAFE \ 408 "call *%[thunk_target]\n", \ 409 "call __x86_indirect_thunk_%V[thunk_target]\n", \ 410 X86_FEATURE_RETPOLINE, \ 411 "lfence;\n" \ 412 ANNOTATE_RETPOLINE_SAFE \ 413 "call *%[thunk_target]\n", \ 414 X86_FEATURE_RETPOLINE_LFENCE) 415 416 # define THUNK_TARGET(addr) [thunk_target] "r" (addr) 417 418 #else /* CONFIG_X86_32 */ 419 /* 420 * For i386 we use the original ret-equivalent retpoline, because 421 * otherwise we'll run out of registers. We don't care about CET 422 * here, anyway. 423 */ 424 # define CALL_NOSPEC \ 425 ALTERNATIVE_2( \ 426 ANNOTATE_RETPOLINE_SAFE \ 427 "call *%[thunk_target]\n", \ 428 " jmp 904f;\n" \ 429 " .align 16\n" \ 430 "901: call 903f;\n" \ 431 "902: pause;\n" \ 432 " lfence;\n" \ 433 " jmp 902b;\n" \ 434 " .align 16\n" \ 435 "903: lea 4(%%esp), %%esp;\n" \ 436 " pushl %[thunk_target];\n" \ 437 " ret;\n" \ 438 " .align 16\n" \ 439 "904: call 901b;\n", \ 440 X86_FEATURE_RETPOLINE, \ 441 "lfence;\n" \ 442 ANNOTATE_RETPOLINE_SAFE \ 443 "call *%[thunk_target]\n", \ 444 X86_FEATURE_RETPOLINE_LFENCE) 445 446 # define THUNK_TARGET(addr) [thunk_target] "rm" (addr) 447 #endif 448 #else /* No retpoline for C / inline asm */ 449 # define CALL_NOSPEC "call *%[thunk_target]\n" 450 # define THUNK_TARGET(addr) [thunk_target] "rm" (addr) 451 #endif 452 453 /* The Spectre V2 mitigation variants */ 454 enum spectre_v2_mitigation { 455 SPECTRE_V2_NONE, 456 SPECTRE_V2_RETPOLINE, 457 SPECTRE_V2_LFENCE, 458 SPECTRE_V2_EIBRS, 459 SPECTRE_V2_EIBRS_RETPOLINE, 460 SPECTRE_V2_EIBRS_LFENCE, 461 SPECTRE_V2_IBRS, 462 }; 463 464 /* The indirect branch speculation control variants */ 465 enum spectre_v2_user_mitigation { 466 SPECTRE_V2_USER_NONE, 467 SPECTRE_V2_USER_STRICT, 468 SPECTRE_V2_USER_STRICT_PREFERRED, 469 SPECTRE_V2_USER_PRCTL, 470 SPECTRE_V2_USER_SECCOMP, 471 }; 472 473 /* The Speculative Store Bypass disable variants */ 474 enum ssb_mitigation { 475 SPEC_STORE_BYPASS_NONE, 476 SPEC_STORE_BYPASS_DISABLE, 477 SPEC_STORE_BYPASS_PRCTL, 478 SPEC_STORE_BYPASS_SECCOMP, 479 }; 480 481 static __always_inline 482 void alternative_msr_write(unsigned int msr, u64 val, unsigned int feature) 483 { 484 asm volatile(ALTERNATIVE("", "wrmsr", %c[feature]) 485 : : "c" (msr), 486 "a" ((u32)val), 487 "d" ((u32)(val >> 32)), 488 [feature] "i" (feature) 489 : "memory"); 490 } 491 492 extern u64 x86_pred_cmd; 493 494 static inline void indirect_branch_prediction_barrier(void) 495 { 496 alternative_msr_write(MSR_IA32_PRED_CMD, x86_pred_cmd, X86_FEATURE_USE_IBPB); 497 } 498 499 /* The Intel SPEC CTRL MSR base value cache */ 500 extern u64 x86_spec_ctrl_base; 501 DECLARE_PER_CPU(u64, x86_spec_ctrl_current); 502 extern void update_spec_ctrl_cond(u64 val); 503 extern u64 spec_ctrl_current(void); 504 505 /* 506 * With retpoline, we must use IBRS to restrict branch prediction 507 * before calling into firmware. 508 * 509 * (Implemented as CPP macros due to header hell.) 510 */ 511 #define firmware_restrict_branch_speculation_start() \ 512 do { \ 513 preempt_disable(); \ 514 alternative_msr_write(MSR_IA32_SPEC_CTRL, \ 515 spec_ctrl_current() | SPEC_CTRL_IBRS, \ 516 X86_FEATURE_USE_IBRS_FW); \ 517 alternative_msr_write(MSR_IA32_PRED_CMD, PRED_CMD_IBPB, \ 518 X86_FEATURE_USE_IBPB_FW); \ 519 } while (0) 520 521 #define firmware_restrict_branch_speculation_end() \ 522 do { \ 523 alternative_msr_write(MSR_IA32_SPEC_CTRL, \ 524 spec_ctrl_current(), \ 525 X86_FEATURE_USE_IBRS_FW); \ 526 preempt_enable(); \ 527 } while (0) 528 529 DECLARE_STATIC_KEY_FALSE(switch_to_cond_stibp); 530 DECLARE_STATIC_KEY_FALSE(switch_mm_cond_ibpb); 531 DECLARE_STATIC_KEY_FALSE(switch_mm_always_ibpb); 532 533 DECLARE_STATIC_KEY_FALSE(mds_user_clear); 534 DECLARE_STATIC_KEY_FALSE(mds_idle_clear); 535 536 DECLARE_STATIC_KEY_FALSE(switch_mm_cond_l1d_flush); 537 538 DECLARE_STATIC_KEY_FALSE(mmio_stale_data_clear); 539 540 #include <asm/segment.h> 541 542 /** 543 * mds_clear_cpu_buffers - Mitigation for MDS and TAA vulnerability 544 * 545 * This uses the otherwise unused and obsolete VERW instruction in 546 * combination with microcode which triggers a CPU buffer flush when the 547 * instruction is executed. 548 */ 549 static __always_inline void mds_clear_cpu_buffers(void) 550 { 551 static const u16 ds = __KERNEL_DS; 552 553 /* 554 * Has to be the memory-operand variant because only that 555 * guarantees the CPU buffer flush functionality according to 556 * documentation. The register-operand variant does not. 557 * Works with any segment selector, but a valid writable 558 * data segment is the fastest variant. 559 * 560 * "cc" clobber is required because VERW modifies ZF. 561 */ 562 asm volatile("verw %[ds]" : : [ds] "m" (ds) : "cc"); 563 } 564 565 /** 566 * mds_user_clear_cpu_buffers - Mitigation for MDS and TAA vulnerability 567 * 568 * Clear CPU buffers if the corresponding static key is enabled 569 */ 570 static __always_inline void mds_user_clear_cpu_buffers(void) 571 { 572 if (static_branch_likely(&mds_user_clear)) 573 mds_clear_cpu_buffers(); 574 } 575 576 /** 577 * mds_idle_clear_cpu_buffers - Mitigation for MDS vulnerability 578 * 579 * Clear CPU buffers if the corresponding static key is enabled 580 */ 581 static __always_inline void mds_idle_clear_cpu_buffers(void) 582 { 583 if (static_branch_likely(&mds_idle_clear)) 584 mds_clear_cpu_buffers(); 585 } 586 587 #endif /* __ASSEMBLY__ */ 588 589 #endif /* _ASM_X86_NOSPEC_BRANCH_H_ */ 590