1 /* SPDX-License-Identifier: GPL-2.0 */
2
3 #ifndef _ASM_X86_NOSPEC_BRANCH_H_
4 #define _ASM_X86_NOSPEC_BRANCH_H_
5
6 #include <linux/static_key.h>
7 #include <linux/objtool.h>
8 #include <linux/linkage.h>
9
10 #include <asm/alternative.h>
11 #include <asm/cpufeatures.h>
12 #include <asm/msr-index.h>
13 #include <asm/unwind_hints.h>
14 #include <asm/percpu.h>
15
16 /*
17 * Call depth tracking for Intel SKL CPUs to address the RSB underflow
18 * issue in software.
19 *
20 * The tracking does not use a counter. It uses uses arithmetic shift
21 * right on call entry and logical shift left on return.
22 *
23 * The depth tracking variable is initialized to 0x8000.... when the call
24 * depth is zero. The arithmetic shift right sign extends the MSB and
25 * saturates after the 12th call. The shift count is 5 for both directions
26 * so the tracking covers 12 nested calls.
27 *
28 * Call
29 * 0: 0x8000000000000000 0x0000000000000000
30 * 1: 0xfc00000000000000 0xf000000000000000
31 * ...
32 * 11: 0xfffffffffffffff8 0xfffffffffffffc00
33 * 12: 0xffffffffffffffff 0xffffffffffffffe0
34 *
35 * After a return buffer fill the depth is credited 12 calls before the
36 * next stuffing has to take place.
37 *
38 * There is a inaccuracy for situations like this:
39 *
40 * 10 calls
41 * 5 returns
42 * 3 calls
43 * 4 returns
44 * 3 calls
45 * ....
46 *
47 * The shift count might cause this to be off by one in either direction,
48 * but there is still a cushion vs. the RSB depth. The algorithm does not
49 * claim to be perfect and it can be speculated around by the CPU, but it
50 * is considered that it obfuscates the problem enough to make exploitation
51 * extremely difficult.
52 */
53 #define RET_DEPTH_SHIFT 5
54 #define RSB_RET_STUFF_LOOPS 16
55 #define RET_DEPTH_INIT 0x8000000000000000ULL
56 #define RET_DEPTH_INIT_FROM_CALL 0xfc00000000000000ULL
57 #define RET_DEPTH_CREDIT 0xffffffffffffffffULL
58
59 #ifdef CONFIG_CALL_THUNKS_DEBUG
60 # define CALL_THUNKS_DEBUG_INC_CALLS \
61 incq PER_CPU_VAR(__x86_call_count);
62 # define CALL_THUNKS_DEBUG_INC_RETS \
63 incq PER_CPU_VAR(__x86_ret_count);
64 # define CALL_THUNKS_DEBUG_INC_STUFFS \
65 incq PER_CPU_VAR(__x86_stuffs_count);
66 # define CALL_THUNKS_DEBUG_INC_CTXSW \
67 incq PER_CPU_VAR(__x86_ctxsw_count);
68 #else
69 # define CALL_THUNKS_DEBUG_INC_CALLS
70 # define CALL_THUNKS_DEBUG_INC_RETS
71 # define CALL_THUNKS_DEBUG_INC_STUFFS
72 # define CALL_THUNKS_DEBUG_INC_CTXSW
73 #endif
74
75 #if defined(CONFIG_MITIGATION_CALL_DEPTH_TRACKING) && !defined(COMPILE_OFFSETS)
76
77 #include <asm/asm-offsets.h>
78
79 #define CREDIT_CALL_DEPTH \
80 movq $-1, PER_CPU_VAR(__x86_call_depth);
81
82 #define RESET_CALL_DEPTH \
83 xor %eax, %eax; \
84 bts $63, %rax; \
85 movq %rax, PER_CPU_VAR(__x86_call_depth);
86
87 #define RESET_CALL_DEPTH_FROM_CALL \
88 movb $0xfc, %al; \
89 shl $56, %rax; \
90 movq %rax, PER_CPU_VAR(__x86_call_depth); \
91 CALL_THUNKS_DEBUG_INC_CALLS
92
93 #define INCREMENT_CALL_DEPTH \
94 sarq $5, PER_CPU_VAR(__x86_call_depth); \
95 CALL_THUNKS_DEBUG_INC_CALLS
96
97 #else
98 #define CREDIT_CALL_DEPTH
99 #define RESET_CALL_DEPTH
100 #define RESET_CALL_DEPTH_FROM_CALL
101 #define INCREMENT_CALL_DEPTH
102 #endif
103
104 /*
105 * Fill the CPU return stack buffer.
106 *
107 * Each entry in the RSB, if used for a speculative 'ret', contains an
108 * infinite 'pause; lfence; jmp' loop to capture speculative execution.
109 *
110 * This is required in various cases for retpoline and IBRS-based
111 * mitigations for the Spectre variant 2 vulnerability. Sometimes to
112 * eliminate potentially bogus entries from the RSB, and sometimes
113 * purely to ensure that it doesn't get empty, which on some CPUs would
114 * allow predictions from other (unwanted!) sources to be used.
115 *
116 * We define a CPP macro such that it can be used from both .S files and
117 * inline assembly. It's possible to do a .macro and then include that
118 * from C via asm(".include <asm/nospec-branch.h>") but let's not go there.
119 */
120
121 #define RETPOLINE_THUNK_SIZE 32
122 #define RSB_CLEAR_LOOPS 32 /* To forcibly overwrite all entries */
123
124 /*
125 * Common helper for __FILL_RETURN_BUFFER and __FILL_ONE_RETURN.
126 */
127 #define __FILL_RETURN_SLOT \
128 ANNOTATE_INTRA_FUNCTION_CALL; \
129 call 772f; \
130 int3; \
131 772:
132
133 /*
134 * Stuff the entire RSB.
135 *
136 * Google experimented with loop-unrolling and this turned out to be
137 * the optimal version - two calls, each with their own speculation
138 * trap should their return address end up getting used, in a loop.
139 */
140 #ifdef CONFIG_X86_64
141 #define __FILL_RETURN_BUFFER(reg, nr) \
142 mov $(nr/2), reg; \
143 771: \
144 __FILL_RETURN_SLOT \
145 __FILL_RETURN_SLOT \
146 add $(BITS_PER_LONG/8) * 2, %_ASM_SP; \
147 dec reg; \
148 jnz 771b; \
149 /* barrier for jnz misprediction */ \
150 lfence; \
151 CREDIT_CALL_DEPTH \
152 CALL_THUNKS_DEBUG_INC_CTXSW
153 #else
154 /*
155 * i386 doesn't unconditionally have LFENCE, as such it can't
156 * do a loop.
157 */
158 #define __FILL_RETURN_BUFFER(reg, nr) \
159 .rept nr; \
160 __FILL_RETURN_SLOT; \
161 .endr; \
162 add $(BITS_PER_LONG/8) * nr, %_ASM_SP;
163 #endif
164
165 /*
166 * Stuff a single RSB slot.
167 *
168 * To mitigate Post-Barrier RSB speculation, one CALL instruction must be
169 * forced to retire before letting a RET instruction execute.
170 *
171 * On PBRSB-vulnerable CPUs, it is not safe for a RET to be executed
172 * before this point.
173 */
174 #define __FILL_ONE_RETURN \
175 __FILL_RETURN_SLOT \
176 add $(BITS_PER_LONG/8), %_ASM_SP; \
177 lfence;
178
179 #ifdef __ASSEMBLER__
180
181 /*
182 * (ab)use RETPOLINE_SAFE on RET to annotate away 'bare' RET instructions
183 * vs RETBleed validation.
184 */
185 #define ANNOTATE_UNRET_SAFE ANNOTATE_RETPOLINE_SAFE
186
187 /*
188 * Abuse ANNOTATE_RETPOLINE_SAFE on a NOP to indicate UNRET_END, should
189 * eventually turn into its own annotation.
190 */
191 .macro VALIDATE_UNRET_END
192 #if defined(CONFIG_NOINSTR_VALIDATION) && \
193 (defined(CONFIG_MITIGATION_UNRET_ENTRY) || defined(CONFIG_MITIGATION_SRSO))
194 ANNOTATE_RETPOLINE_SAFE
195 nop
196 #endif
197 .endm
198
199 /*
200 * Emits a conditional CS prefix that is compatible with
201 * -mindirect-branch-cs-prefix.
202 */
203 .macro __CS_PREFIX reg:req
204 .irp rs,r8,r9,r10,r11,r12,r13,r14,r15
205 .ifc \reg,\rs
206 .byte 0x2e
207 .endif
208 .endr
209 .endm
210
211 /*
212 * JMP_NOSPEC and CALL_NOSPEC macros can be used instead of a simple
213 * indirect jmp/call which may be susceptible to the Spectre variant 2
214 * attack.
215 *
216 * NOTE: these do not take kCFI into account and are thus not comparable to C
217 * indirect calls, take care when using. The target of these should be an ENDBR
218 * instruction irrespective of kCFI.
219 */
220 .macro JMP_NOSPEC reg:req
221 #ifdef CONFIG_MITIGATION_RETPOLINE
222 __CS_PREFIX \reg
223 jmp __x86_indirect_thunk_\reg
224 #else
225 jmp *%\reg
226 int3
227 #endif
228 .endm
229
230 .macro CALL_NOSPEC reg:req
231 #ifdef CONFIG_MITIGATION_RETPOLINE
232 __CS_PREFIX \reg
233 call __x86_indirect_thunk_\reg
234 #else
235 call *%\reg
236 #endif
237 .endm
238
239 /*
240 * A simpler FILL_RETURN_BUFFER macro. Don't make people use the CPP
241 * monstrosity above, manually.
242 */
243 .macro FILL_RETURN_BUFFER reg:req nr:req ftr:req ftr2=ALT_NOT(X86_FEATURE_ALWAYS)
244 ALTERNATIVE_2 "jmp .Lskip_rsb_\@", \
245 __stringify(__FILL_RETURN_BUFFER(\reg,\nr)), \ftr, \
246 __stringify(nop;nop;__FILL_ONE_RETURN), \ftr2
247
248 .Lskip_rsb_\@:
249 .endm
250
251 /*
252 * The CALL to srso_alias_untrain_ret() must be patched in directly at
253 * the spot where untraining must be done, ie., srso_alias_untrain_ret()
254 * must be the target of a CALL instruction instead of indirectly
255 * jumping to a wrapper which then calls it. Therefore, this macro is
256 * called outside of __UNTRAIN_RET below, for the time being, before the
257 * kernel can support nested alternatives with arbitrary nesting.
258 */
259 .macro CALL_UNTRAIN_RET
260 #if defined(CONFIG_MITIGATION_UNRET_ENTRY) || defined(CONFIG_MITIGATION_SRSO)
261 ALTERNATIVE_2 "", "call entry_untrain_ret", X86_FEATURE_UNRET, \
262 "call srso_alias_untrain_ret", X86_FEATURE_SRSO_ALIAS
263 #endif
264 .endm
265
266 /*
267 * Mitigate RETBleed for AMD/Hygon Zen uarch. Requires KERNEL CR3 because the
268 * return thunk isn't mapped into the userspace tables (then again, AMD
269 * typically has NO_MELTDOWN).
270 *
271 * While retbleed_untrain_ret() doesn't clobber anything but requires stack,
272 * entry_ibpb() will clobber AX, CX, DX.
273 *
274 * As such, this must be placed after every *SWITCH_TO_KERNEL_CR3 at a point
275 * where we have a stack but before any RET instruction.
276 */
277 .macro __UNTRAIN_RET ibpb_feature, call_depth_insns
278 #if defined(CONFIG_MITIGATION_RETHUNK) || defined(CONFIG_MITIGATION_IBPB_ENTRY)
279 VALIDATE_UNRET_END
280 CALL_UNTRAIN_RET
281 ALTERNATIVE_2 "", \
282 "call entry_ibpb", \ibpb_feature, \
283 __stringify(\call_depth_insns), X86_FEATURE_CALL_DEPTH
284 #endif
285 .endm
286
287 #define UNTRAIN_RET \
288 __UNTRAIN_RET X86_FEATURE_ENTRY_IBPB, __stringify(RESET_CALL_DEPTH)
289
290 #define UNTRAIN_RET_VM \
291 __UNTRAIN_RET X86_FEATURE_IBPB_ON_VMEXIT, __stringify(RESET_CALL_DEPTH)
292
293 #define UNTRAIN_RET_FROM_CALL \
294 __UNTRAIN_RET X86_FEATURE_ENTRY_IBPB, __stringify(RESET_CALL_DEPTH_FROM_CALL)
295
296
297 .macro CALL_DEPTH_ACCOUNT
298 #ifdef CONFIG_MITIGATION_CALL_DEPTH_TRACKING
299 ALTERNATIVE "", \
300 __stringify(INCREMENT_CALL_DEPTH), X86_FEATURE_CALL_DEPTH
301 #endif
302 .endm
303
304 /*
305 * Macro to execute VERW instruction that mitigate transient data sampling
306 * attacks such as MDS. On affected systems a microcode update overloaded VERW
307 * instruction to also clear the CPU buffers. VERW clobbers CFLAGS.ZF.
308 *
309 * Note: Only the memory operand variant of VERW clears the CPU buffers.
310 */
311 .macro CLEAR_CPU_BUFFERS
312 #ifdef CONFIG_X86_64
313 ALTERNATIVE "", "verw mds_verw_sel(%rip)", X86_FEATURE_CLEAR_CPU_BUF
314 #else
315 /*
316 * In 32bit mode, the memory operand must be a %cs reference. The data
317 * segments may not be usable (vm86 mode), and the stack segment may not
318 * be flat (ESPFIX32).
319 */
320 ALTERNATIVE "", "verw %cs:mds_verw_sel", X86_FEATURE_CLEAR_CPU_BUF
321 #endif
322 .endm
323
324 #ifdef CONFIG_X86_64
325 .macro CLEAR_BRANCH_HISTORY
326 ALTERNATIVE "", "call clear_bhb_loop", X86_FEATURE_CLEAR_BHB_LOOP
327 .endm
328
329 .macro CLEAR_BRANCH_HISTORY_VMEXIT
330 ALTERNATIVE "", "call clear_bhb_loop", X86_FEATURE_CLEAR_BHB_LOOP_ON_VMEXIT
331 .endm
332 #else
333 #define CLEAR_BRANCH_HISTORY
334 #define CLEAR_BRANCH_HISTORY_VMEXIT
335 #endif
336
337 #else /* __ASSEMBLER__ */
338
339 typedef u8 retpoline_thunk_t[RETPOLINE_THUNK_SIZE];
340 extern retpoline_thunk_t __x86_indirect_thunk_array[];
341 extern retpoline_thunk_t __x86_indirect_call_thunk_array[];
342 extern retpoline_thunk_t __x86_indirect_jump_thunk_array[];
343
344 #ifdef CONFIG_MITIGATION_RETHUNK
345 extern void __x86_return_thunk(void);
346 #else
__x86_return_thunk(void)347 static inline void __x86_return_thunk(void) {}
348 #endif
349
350 #ifdef CONFIG_MITIGATION_UNRET_ENTRY
351 extern void retbleed_return_thunk(void);
352 #else
retbleed_return_thunk(void)353 static inline void retbleed_return_thunk(void) {}
354 #endif
355
356 extern void srso_alias_untrain_ret(void);
357
358 #ifdef CONFIG_MITIGATION_SRSO
359 extern void srso_return_thunk(void);
360 extern void srso_alias_return_thunk(void);
361 #else
srso_return_thunk(void)362 static inline void srso_return_thunk(void) {}
srso_alias_return_thunk(void)363 static inline void srso_alias_return_thunk(void) {}
364 #endif
365
366 extern void retbleed_return_thunk(void);
367 extern void srso_return_thunk(void);
368 extern void srso_alias_return_thunk(void);
369
370 extern void entry_untrain_ret(void);
371 extern void entry_ibpb(void);
372
373 #ifdef CONFIG_X86_64
374 extern void clear_bhb_loop(void);
375 #endif
376
377 extern void (*x86_return_thunk)(void);
378
379 extern void __warn_thunk(void);
380
381 #ifdef CONFIG_MITIGATION_CALL_DEPTH_TRACKING
382 extern void call_depth_return_thunk(void);
383
384 #define CALL_DEPTH_ACCOUNT \
385 ALTERNATIVE("", \
386 __stringify(INCREMENT_CALL_DEPTH), \
387 X86_FEATURE_CALL_DEPTH)
388
389 DECLARE_PER_CPU_CACHE_HOT(u64, __x86_call_depth);
390
391 #ifdef CONFIG_CALL_THUNKS_DEBUG
392 DECLARE_PER_CPU(u64, __x86_call_count);
393 DECLARE_PER_CPU(u64, __x86_ret_count);
394 DECLARE_PER_CPU(u64, __x86_stuffs_count);
395 DECLARE_PER_CPU(u64, __x86_ctxsw_count);
396 #endif
397 #else /* !CONFIG_MITIGATION_CALL_DEPTH_TRACKING */
398
call_depth_return_thunk(void)399 static inline void call_depth_return_thunk(void) {}
400 #define CALL_DEPTH_ACCOUNT ""
401
402 #endif /* CONFIG_MITIGATION_CALL_DEPTH_TRACKING */
403
404 #ifdef CONFIG_MITIGATION_RETPOLINE
405
406 #define GEN(reg) \
407 extern retpoline_thunk_t __x86_indirect_thunk_ ## reg;
408 #include <asm/GEN-for-each-reg.h>
409 #undef GEN
410
411 #define GEN(reg) \
412 extern retpoline_thunk_t __x86_indirect_call_thunk_ ## reg;
413 #include <asm/GEN-for-each-reg.h>
414 #undef GEN
415
416 #define GEN(reg) \
417 extern retpoline_thunk_t __x86_indirect_jump_thunk_ ## reg;
418 #include <asm/GEN-for-each-reg.h>
419 #undef GEN
420
421 #ifdef CONFIG_X86_64
422
423 /*
424 * Emits a conditional CS prefix that is compatible with
425 * -mindirect-branch-cs-prefix.
426 */
427 #define __CS_PREFIX(reg) \
428 ".irp rs,r8,r9,r10,r11,r12,r13,r14,r15\n" \
429 ".ifc \\rs," reg "\n" \
430 ".byte 0x2e\n" \
431 ".endif\n" \
432 ".endr\n"
433
434 /*
435 * Inline asm uses the %V modifier which is only in newer GCC
436 * which is ensured when CONFIG_MITIGATION_RETPOLINE is defined.
437 */
438 #define CALL_NOSPEC __CS_PREFIX("%V[thunk_target]") \
439 "call __x86_indirect_thunk_%V[thunk_target]\n"
440
441 # define THUNK_TARGET(addr) [thunk_target] "r" (addr)
442
443 #else /* CONFIG_X86_32 */
444 /*
445 * For i386 we use the original ret-equivalent retpoline, because
446 * otherwise we'll run out of registers. We don't care about CET
447 * here, anyway.
448 */
449 # define CALL_NOSPEC \
450 ALTERNATIVE_2( \
451 ANNOTATE_RETPOLINE_SAFE \
452 "call *%[thunk_target]\n", \
453 " jmp 904f;\n" \
454 " .align 16\n" \
455 "901: call 903f;\n" \
456 "902: pause;\n" \
457 " lfence;\n" \
458 " jmp 902b;\n" \
459 " .align 16\n" \
460 "903: lea 4(%%esp), %%esp;\n" \
461 " pushl %[thunk_target];\n" \
462 " ret;\n" \
463 " .align 16\n" \
464 "904: call 901b;\n", \
465 X86_FEATURE_RETPOLINE, \
466 "lfence;\n" \
467 ANNOTATE_RETPOLINE_SAFE \
468 "call *%[thunk_target]\n", \
469 X86_FEATURE_RETPOLINE_LFENCE)
470
471 # define THUNK_TARGET(addr) [thunk_target] "rm" (addr)
472 #endif
473 #else /* No retpoline for C / inline asm */
474 # define CALL_NOSPEC "call *%[thunk_target]\n"
475 # define THUNK_TARGET(addr) [thunk_target] "rm" (addr)
476 #endif
477
478 /* The Spectre V2 mitigation variants */
479 enum spectre_v2_mitigation {
480 SPECTRE_V2_NONE,
481 SPECTRE_V2_RETPOLINE,
482 SPECTRE_V2_LFENCE,
483 SPECTRE_V2_EIBRS,
484 SPECTRE_V2_EIBRS_RETPOLINE,
485 SPECTRE_V2_EIBRS_LFENCE,
486 SPECTRE_V2_IBRS,
487 };
488
489 /* The indirect branch speculation control variants */
490 enum spectre_v2_user_mitigation {
491 SPECTRE_V2_USER_NONE,
492 SPECTRE_V2_USER_STRICT,
493 SPECTRE_V2_USER_STRICT_PREFERRED,
494 SPECTRE_V2_USER_PRCTL,
495 SPECTRE_V2_USER_SECCOMP,
496 };
497
498 /* The Speculative Store Bypass disable variants */
499 enum ssb_mitigation {
500 SPEC_STORE_BYPASS_NONE,
501 SPEC_STORE_BYPASS_DISABLE,
502 SPEC_STORE_BYPASS_PRCTL,
503 SPEC_STORE_BYPASS_SECCOMP,
504 };
505
506 static __always_inline
alternative_msr_write(unsigned int msr,u64 val,unsigned int feature)507 void alternative_msr_write(unsigned int msr, u64 val, unsigned int feature)
508 {
509 asm volatile(ALTERNATIVE("", "wrmsr", %c[feature])
510 : : "c" (msr),
511 "a" ((u32)val),
512 "d" ((u32)(val >> 32)),
513 [feature] "i" (feature)
514 : "memory");
515 }
516
517 extern u64 x86_pred_cmd;
518
indirect_branch_prediction_barrier(void)519 static inline void indirect_branch_prediction_barrier(void)
520 {
521 alternative_msr_write(MSR_IA32_PRED_CMD, x86_pred_cmd, X86_FEATURE_IBPB);
522 }
523
524 /* The Intel SPEC CTRL MSR base value cache */
525 extern u64 x86_spec_ctrl_base;
526 DECLARE_PER_CPU(u64, x86_spec_ctrl_current);
527 extern void update_spec_ctrl_cond(u64 val);
528 extern u64 spec_ctrl_current(void);
529
530 /*
531 * With retpoline, we must use IBRS to restrict branch prediction
532 * before calling into firmware.
533 *
534 * (Implemented as CPP macros due to header hell.)
535 */
536 #define firmware_restrict_branch_speculation_start() \
537 do { \
538 preempt_disable(); \
539 alternative_msr_write(MSR_IA32_SPEC_CTRL, \
540 spec_ctrl_current() | SPEC_CTRL_IBRS, \
541 X86_FEATURE_USE_IBRS_FW); \
542 alternative_msr_write(MSR_IA32_PRED_CMD, PRED_CMD_IBPB, \
543 X86_FEATURE_USE_IBPB_FW); \
544 } while (0)
545
546 #define firmware_restrict_branch_speculation_end() \
547 do { \
548 alternative_msr_write(MSR_IA32_SPEC_CTRL, \
549 spec_ctrl_current(), \
550 X86_FEATURE_USE_IBRS_FW); \
551 preempt_enable(); \
552 } while (0)
553
554 DECLARE_STATIC_KEY_FALSE(switch_to_cond_stibp);
555 DECLARE_STATIC_KEY_FALSE(switch_mm_cond_ibpb);
556 DECLARE_STATIC_KEY_FALSE(switch_mm_always_ibpb);
557
558 DECLARE_STATIC_KEY_FALSE(switch_vcpu_ibpb);
559
560 DECLARE_STATIC_KEY_FALSE(mds_idle_clear);
561
562 DECLARE_STATIC_KEY_FALSE(switch_mm_cond_l1d_flush);
563
564 DECLARE_STATIC_KEY_FALSE(mmio_stale_data_clear);
565
566 extern u16 mds_verw_sel;
567
568 #include <asm/segment.h>
569
570 /**
571 * mds_clear_cpu_buffers - Mitigation for MDS and TAA vulnerability
572 *
573 * This uses the otherwise unused and obsolete VERW instruction in
574 * combination with microcode which triggers a CPU buffer flush when the
575 * instruction is executed.
576 */
mds_clear_cpu_buffers(void)577 static __always_inline void mds_clear_cpu_buffers(void)
578 {
579 static const u16 ds = __KERNEL_DS;
580
581 /*
582 * Has to be the memory-operand variant because only that
583 * guarantees the CPU buffer flush functionality according to
584 * documentation. The register-operand variant does not.
585 * Works with any segment selector, but a valid writable
586 * data segment is the fastest variant.
587 *
588 * "cc" clobber is required because VERW modifies ZF.
589 */
590 asm volatile("verw %[ds]" : : [ds] "m" (ds) : "cc");
591 }
592
593 /**
594 * mds_idle_clear_cpu_buffers - Mitigation for MDS vulnerability
595 *
596 * Clear CPU buffers if the corresponding static key is enabled
597 */
mds_idle_clear_cpu_buffers(void)598 static __always_inline void mds_idle_clear_cpu_buffers(void)
599 {
600 if (static_branch_likely(&mds_idle_clear))
601 mds_clear_cpu_buffers();
602 }
603
604 #endif /* __ASSEMBLER__ */
605
606 #endif /* _ASM_X86_NOSPEC_BRANCH_H_ */
607