xref: /linux/arch/x86/include/asm/nospec-branch.h (revision e28c5efc31397af17bc5a7d55b963f59bcde0166)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 
3 #ifndef _ASM_X86_NOSPEC_BRANCH_H_
4 #define _ASM_X86_NOSPEC_BRANCH_H_
5 
6 #include <linux/static_key.h>
7 #include <linux/objtool.h>
8 #include <linux/linkage.h>
9 
10 #include <asm/alternative.h>
11 #include <asm/cpufeatures.h>
12 #include <asm/msr-index.h>
13 #include <asm/unwind_hints.h>
14 #include <asm/percpu.h>
15 #include <asm/current.h>
16 
17 /*
18  * Call depth tracking for Intel SKL CPUs to address the RSB underflow
19  * issue in software.
20  *
21  * The tracking does not use a counter. It uses uses arithmetic shift
22  * right on call entry and logical shift left on return.
23  *
24  * The depth tracking variable is initialized to 0x8000.... when the call
25  * depth is zero. The arithmetic shift right sign extends the MSB and
26  * saturates after the 12th call. The shift count is 5 for both directions
27  * so the tracking covers 12 nested calls.
28  *
29  *  Call
30  *  0: 0x8000000000000000	0x0000000000000000
31  *  1: 0xfc00000000000000	0xf000000000000000
32  * ...
33  * 11: 0xfffffffffffffff8	0xfffffffffffffc00
34  * 12: 0xffffffffffffffff	0xffffffffffffffe0
35  *
36  * After a return buffer fill the depth is credited 12 calls before the
37  * next stuffing has to take place.
38  *
39  * There is a inaccuracy for situations like this:
40  *
41  *  10 calls
42  *   5 returns
43  *   3 calls
44  *   4 returns
45  *   3 calls
46  *   ....
47  *
48  * The shift count might cause this to be off by one in either direction,
49  * but there is still a cushion vs. the RSB depth. The algorithm does not
50  * claim to be perfect and it can be speculated around by the CPU, but it
51  * is considered that it obfuscates the problem enough to make exploitation
52  * extremely difficult.
53  */
54 #define RET_DEPTH_SHIFT			5
55 #define RSB_RET_STUFF_LOOPS		16
56 #define RET_DEPTH_INIT			0x8000000000000000ULL
57 #define RET_DEPTH_INIT_FROM_CALL	0xfc00000000000000ULL
58 #define RET_DEPTH_CREDIT		0xffffffffffffffffULL
59 
60 #ifdef CONFIG_CALL_THUNKS_DEBUG
61 # define CALL_THUNKS_DEBUG_INC_CALLS				\
62 	incq	%gs:__x86_call_count;
63 # define CALL_THUNKS_DEBUG_INC_RETS				\
64 	incq	%gs:__x86_ret_count;
65 # define CALL_THUNKS_DEBUG_INC_STUFFS				\
66 	incq	%gs:__x86_stuffs_count;
67 # define CALL_THUNKS_DEBUG_INC_CTXSW				\
68 	incq	%gs:__x86_ctxsw_count;
69 #else
70 # define CALL_THUNKS_DEBUG_INC_CALLS
71 # define CALL_THUNKS_DEBUG_INC_RETS
72 # define CALL_THUNKS_DEBUG_INC_STUFFS
73 # define CALL_THUNKS_DEBUG_INC_CTXSW
74 #endif
75 
76 #if defined(CONFIG_CALL_DEPTH_TRACKING) && !defined(COMPILE_OFFSETS)
77 
78 #include <asm/asm-offsets.h>
79 
80 #define CREDIT_CALL_DEPTH					\
81 	movq	$-1, PER_CPU_VAR(pcpu_hot + X86_call_depth);
82 
83 #define ASM_CREDIT_CALL_DEPTH					\
84 	movq	$-1, PER_CPU_VAR(pcpu_hot + X86_call_depth);
85 
86 #define RESET_CALL_DEPTH					\
87 	xor	%eax, %eax;					\
88 	bts	$63, %rax;					\
89 	movq	%rax, PER_CPU_VAR(pcpu_hot + X86_call_depth);
90 
91 #define RESET_CALL_DEPTH_FROM_CALL				\
92 	movb	$0xfc, %al;					\
93 	shl	$56, %rax;					\
94 	movq	%rax, PER_CPU_VAR(pcpu_hot + X86_call_depth);	\
95 	CALL_THUNKS_DEBUG_INC_CALLS
96 
97 #define INCREMENT_CALL_DEPTH					\
98 	sarq	$5, %gs:pcpu_hot + X86_call_depth;		\
99 	CALL_THUNKS_DEBUG_INC_CALLS
100 
101 #define ASM_INCREMENT_CALL_DEPTH				\
102 	sarq	$5, PER_CPU_VAR(pcpu_hot + X86_call_depth);	\
103 	CALL_THUNKS_DEBUG_INC_CALLS
104 
105 #else
106 #define CREDIT_CALL_DEPTH
107 #define ASM_CREDIT_CALL_DEPTH
108 #define RESET_CALL_DEPTH
109 #define INCREMENT_CALL_DEPTH
110 #define ASM_INCREMENT_CALL_DEPTH
111 #define RESET_CALL_DEPTH_FROM_CALL
112 #endif
113 
114 /*
115  * Fill the CPU return stack buffer.
116  *
117  * Each entry in the RSB, if used for a speculative 'ret', contains an
118  * infinite 'pause; lfence; jmp' loop to capture speculative execution.
119  *
120  * This is required in various cases for retpoline and IBRS-based
121  * mitigations for the Spectre variant 2 vulnerability. Sometimes to
122  * eliminate potentially bogus entries from the RSB, and sometimes
123  * purely to ensure that it doesn't get empty, which on some CPUs would
124  * allow predictions from other (unwanted!) sources to be used.
125  *
126  * We define a CPP macro such that it can be used from both .S files and
127  * inline assembly. It's possible to do a .macro and then include that
128  * from C via asm(".include <asm/nospec-branch.h>") but let's not go there.
129  */
130 
131 #define RETPOLINE_THUNK_SIZE	32
132 #define RSB_CLEAR_LOOPS		32	/* To forcibly overwrite all entries */
133 
134 /*
135  * Common helper for __FILL_RETURN_BUFFER and __FILL_ONE_RETURN.
136  */
137 #define __FILL_RETURN_SLOT			\
138 	ANNOTATE_INTRA_FUNCTION_CALL;		\
139 	call	772f;				\
140 	int3;					\
141 772:
142 
143 /*
144  * Stuff the entire RSB.
145  *
146  * Google experimented with loop-unrolling and this turned out to be
147  * the optimal version - two calls, each with their own speculation
148  * trap should their return address end up getting used, in a loop.
149  */
150 #ifdef CONFIG_X86_64
151 #define __FILL_RETURN_BUFFER(reg, nr)			\
152 	mov	$(nr/2), reg;				\
153 771:							\
154 	__FILL_RETURN_SLOT				\
155 	__FILL_RETURN_SLOT				\
156 	add	$(BITS_PER_LONG/8) * 2, %_ASM_SP;	\
157 	dec	reg;					\
158 	jnz	771b;					\
159 	/* barrier for jnz misprediction */		\
160 	lfence;						\
161 	ASM_CREDIT_CALL_DEPTH				\
162 	CALL_THUNKS_DEBUG_INC_CTXSW
163 #else
164 /*
165  * i386 doesn't unconditionally have LFENCE, as such it can't
166  * do a loop.
167  */
168 #define __FILL_RETURN_BUFFER(reg, nr)			\
169 	.rept nr;					\
170 	__FILL_RETURN_SLOT;				\
171 	.endr;						\
172 	add	$(BITS_PER_LONG/8) * nr, %_ASM_SP;
173 #endif
174 
175 /*
176  * Stuff a single RSB slot.
177  *
178  * To mitigate Post-Barrier RSB speculation, one CALL instruction must be
179  * forced to retire before letting a RET instruction execute.
180  *
181  * On PBRSB-vulnerable CPUs, it is not safe for a RET to be executed
182  * before this point.
183  */
184 #define __FILL_ONE_RETURN				\
185 	__FILL_RETURN_SLOT				\
186 	add	$(BITS_PER_LONG/8), %_ASM_SP;		\
187 	lfence;
188 
189 #ifdef __ASSEMBLY__
190 
191 /*
192  * This should be used immediately before an indirect jump/call. It tells
193  * objtool the subsequent indirect jump/call is vouched safe for retpoline
194  * builds.
195  */
196 .macro ANNOTATE_RETPOLINE_SAFE
197 .Lhere_\@:
198 	.pushsection .discard.retpoline_safe
199 	.long .Lhere_\@
200 	.popsection
201 .endm
202 
203 /*
204  * (ab)use RETPOLINE_SAFE on RET to annotate away 'bare' RET instructions
205  * vs RETBleed validation.
206  */
207 #define ANNOTATE_UNRET_SAFE ANNOTATE_RETPOLINE_SAFE
208 
209 /*
210  * Abuse ANNOTATE_RETPOLINE_SAFE on a NOP to indicate UNRET_END, should
211  * eventually turn into its own annotation.
212  */
213 .macro VALIDATE_UNRET_END
214 #if defined(CONFIG_NOINSTR_VALIDATION) && \
215 	(defined(CONFIG_CPU_UNRET_ENTRY) || defined(CONFIG_CPU_SRSO))
216 	ANNOTATE_RETPOLINE_SAFE
217 	nop
218 #endif
219 .endm
220 
221 /*
222  * Equivalent to -mindirect-branch-cs-prefix; emit the 5 byte jmp/call
223  * to the retpoline thunk with a CS prefix when the register requires
224  * a RAX prefix byte to encode. Also see apply_retpolines().
225  */
226 .macro __CS_PREFIX reg:req
227 	.irp rs,r8,r9,r10,r11,r12,r13,r14,r15
228 	.ifc \reg,\rs
229 	.byte 0x2e
230 	.endif
231 	.endr
232 .endm
233 
234 /*
235  * JMP_NOSPEC and CALL_NOSPEC macros can be used instead of a simple
236  * indirect jmp/call which may be susceptible to the Spectre variant 2
237  * attack.
238  *
239  * NOTE: these do not take kCFI into account and are thus not comparable to C
240  * indirect calls, take care when using. The target of these should be an ENDBR
241  * instruction irrespective of kCFI.
242  */
243 .macro JMP_NOSPEC reg:req
244 #ifdef CONFIG_RETPOLINE
245 	__CS_PREFIX \reg
246 	jmp	__x86_indirect_thunk_\reg
247 #else
248 	jmp	*%\reg
249 	int3
250 #endif
251 .endm
252 
253 .macro CALL_NOSPEC reg:req
254 #ifdef CONFIG_RETPOLINE
255 	__CS_PREFIX \reg
256 	call	__x86_indirect_thunk_\reg
257 #else
258 	call	*%\reg
259 #endif
260 .endm
261 
262  /*
263   * A simpler FILL_RETURN_BUFFER macro. Don't make people use the CPP
264   * monstrosity above, manually.
265   */
266 .macro FILL_RETURN_BUFFER reg:req nr:req ftr:req ftr2=ALT_NOT(X86_FEATURE_ALWAYS)
267 	ALTERNATIVE_2 "jmp .Lskip_rsb_\@", \
268 		__stringify(__FILL_RETURN_BUFFER(\reg,\nr)), \ftr, \
269 		__stringify(nop;nop;__FILL_ONE_RETURN), \ftr2
270 
271 .Lskip_rsb_\@:
272 .endm
273 
274 #if defined(CONFIG_CPU_UNRET_ENTRY) || defined(CONFIG_CPU_SRSO)
275 #define CALL_UNTRAIN_RET	"call entry_untrain_ret"
276 #else
277 #define CALL_UNTRAIN_RET	""
278 #endif
279 
280 /*
281  * Mitigate RETBleed for AMD/Hygon Zen uarch. Requires KERNEL CR3 because the
282  * return thunk isn't mapped into the userspace tables (then again, AMD
283  * typically has NO_MELTDOWN).
284  *
285  * While retbleed_untrain_ret() doesn't clobber anything but requires stack,
286  * entry_ibpb() will clobber AX, CX, DX.
287  *
288  * As such, this must be placed after every *SWITCH_TO_KERNEL_CR3 at a point
289  * where we have a stack but before any RET instruction.
290  */
291 .macro __UNTRAIN_RET ibpb_feature, call_depth_insns
292 #if defined(CONFIG_RETHUNK) || defined(CONFIG_CPU_IBPB_ENTRY)
293 	VALIDATE_UNRET_END
294 	ALTERNATIVE_3 "",						\
295 		      CALL_UNTRAIN_RET, X86_FEATURE_UNRET,		\
296 		      "call entry_ibpb", \ibpb_feature,			\
297 		     __stringify(\call_depth_insns), X86_FEATURE_CALL_DEPTH
298 #endif
299 .endm
300 
301 #define UNTRAIN_RET \
302 	__UNTRAIN_RET X86_FEATURE_ENTRY_IBPB, __stringify(RESET_CALL_DEPTH)
303 
304 #define UNTRAIN_RET_VM \
305 	__UNTRAIN_RET X86_FEATURE_IBPB_ON_VMEXIT, __stringify(RESET_CALL_DEPTH)
306 
307 #define UNTRAIN_RET_FROM_CALL \
308 	__UNTRAIN_RET X86_FEATURE_ENTRY_IBPB, __stringify(RESET_CALL_DEPTH_FROM_CALL)
309 
310 
311 .macro CALL_DEPTH_ACCOUNT
312 #ifdef CONFIG_CALL_DEPTH_TRACKING
313 	ALTERNATIVE "",							\
314 		    __stringify(ASM_INCREMENT_CALL_DEPTH), X86_FEATURE_CALL_DEPTH
315 #endif
316 .endm
317 
318 /*
319  * Macro to execute VERW instruction that mitigate transient data sampling
320  * attacks such as MDS. On affected systems a microcode update overloaded VERW
321  * instruction to also clear the CPU buffers. VERW clobbers CFLAGS.ZF.
322  *
323  * Note: Only the memory operand variant of VERW clears the CPU buffers.
324  */
325 .macro CLEAR_CPU_BUFFERS
326 	ALTERNATIVE "", __stringify(verw _ASM_RIP(mds_verw_sel)), X86_FEATURE_CLEAR_CPU_BUF
327 .endm
328 
329 #else /* __ASSEMBLY__ */
330 
331 #define ANNOTATE_RETPOLINE_SAFE					\
332 	"999:\n\t"						\
333 	".pushsection .discard.retpoline_safe\n\t"		\
334 	".long 999b\n\t"					\
335 	".popsection\n\t"
336 
337 typedef u8 retpoline_thunk_t[RETPOLINE_THUNK_SIZE];
338 extern retpoline_thunk_t __x86_indirect_thunk_array[];
339 extern retpoline_thunk_t __x86_indirect_call_thunk_array[];
340 extern retpoline_thunk_t __x86_indirect_jump_thunk_array[];
341 
342 #ifdef CONFIG_RETHUNK
343 extern void __x86_return_thunk(void);
344 #else
345 static inline void __x86_return_thunk(void) {}
346 #endif
347 
348 #ifdef CONFIG_CPU_UNRET_ENTRY
349 extern void retbleed_return_thunk(void);
350 #else
351 static inline void retbleed_return_thunk(void) {}
352 #endif
353 
354 #ifdef CONFIG_CPU_SRSO
355 extern void srso_return_thunk(void);
356 extern void srso_alias_return_thunk(void);
357 #else
358 static inline void srso_return_thunk(void) {}
359 static inline void srso_alias_return_thunk(void) {}
360 #endif
361 
362 extern void retbleed_return_thunk(void);
363 extern void srso_return_thunk(void);
364 extern void srso_alias_return_thunk(void);
365 
366 extern void entry_untrain_ret(void);
367 extern void entry_ibpb(void);
368 
369 extern void (*x86_return_thunk)(void);
370 
371 #ifdef CONFIG_CALL_DEPTH_TRACKING
372 extern void call_depth_return_thunk(void);
373 
374 #define CALL_DEPTH_ACCOUNT					\
375 	ALTERNATIVE("",						\
376 		    __stringify(INCREMENT_CALL_DEPTH),		\
377 		    X86_FEATURE_CALL_DEPTH)
378 
379 #ifdef CONFIG_CALL_THUNKS_DEBUG
380 DECLARE_PER_CPU(u64, __x86_call_count);
381 DECLARE_PER_CPU(u64, __x86_ret_count);
382 DECLARE_PER_CPU(u64, __x86_stuffs_count);
383 DECLARE_PER_CPU(u64, __x86_ctxsw_count);
384 #endif
385 #else /* !CONFIG_CALL_DEPTH_TRACKING */
386 
387 static inline void call_depth_return_thunk(void) {}
388 #define CALL_DEPTH_ACCOUNT ""
389 
390 #endif /* CONFIG_CALL_DEPTH_TRACKING */
391 
392 #ifdef CONFIG_RETPOLINE
393 
394 #define GEN(reg) \
395 	extern retpoline_thunk_t __x86_indirect_thunk_ ## reg;
396 #include <asm/GEN-for-each-reg.h>
397 #undef GEN
398 
399 #define GEN(reg)						\
400 	extern retpoline_thunk_t __x86_indirect_call_thunk_ ## reg;
401 #include <asm/GEN-for-each-reg.h>
402 #undef GEN
403 
404 #define GEN(reg)						\
405 	extern retpoline_thunk_t __x86_indirect_jump_thunk_ ## reg;
406 #include <asm/GEN-for-each-reg.h>
407 #undef GEN
408 
409 #ifdef CONFIG_X86_64
410 
411 /*
412  * Inline asm uses the %V modifier which is only in newer GCC
413  * which is ensured when CONFIG_RETPOLINE is defined.
414  */
415 # define CALL_NOSPEC						\
416 	ALTERNATIVE_2(						\
417 	ANNOTATE_RETPOLINE_SAFE					\
418 	"call *%[thunk_target]\n",				\
419 	"call __x86_indirect_thunk_%V[thunk_target]\n",		\
420 	X86_FEATURE_RETPOLINE,					\
421 	"lfence;\n"						\
422 	ANNOTATE_RETPOLINE_SAFE					\
423 	"call *%[thunk_target]\n",				\
424 	X86_FEATURE_RETPOLINE_LFENCE)
425 
426 # define THUNK_TARGET(addr) [thunk_target] "r" (addr)
427 
428 #else /* CONFIG_X86_32 */
429 /*
430  * For i386 we use the original ret-equivalent retpoline, because
431  * otherwise we'll run out of registers. We don't care about CET
432  * here, anyway.
433  */
434 # define CALL_NOSPEC						\
435 	ALTERNATIVE_2(						\
436 	ANNOTATE_RETPOLINE_SAFE					\
437 	"call *%[thunk_target]\n",				\
438 	"       jmp    904f;\n"					\
439 	"       .align 16\n"					\
440 	"901:	call   903f;\n"					\
441 	"902:	pause;\n"					\
442 	"    	lfence;\n"					\
443 	"       jmp    902b;\n"					\
444 	"       .align 16\n"					\
445 	"903:	lea    4(%%esp), %%esp;\n"			\
446 	"       pushl  %[thunk_target];\n"			\
447 	"       ret;\n"						\
448 	"       .align 16\n"					\
449 	"904:	call   901b;\n",				\
450 	X86_FEATURE_RETPOLINE,					\
451 	"lfence;\n"						\
452 	ANNOTATE_RETPOLINE_SAFE					\
453 	"call *%[thunk_target]\n",				\
454 	X86_FEATURE_RETPOLINE_LFENCE)
455 
456 # define THUNK_TARGET(addr) [thunk_target] "rm" (addr)
457 #endif
458 #else /* No retpoline for C / inline asm */
459 # define CALL_NOSPEC "call *%[thunk_target]\n"
460 # define THUNK_TARGET(addr) [thunk_target] "rm" (addr)
461 #endif
462 
463 /* The Spectre V2 mitigation variants */
464 enum spectre_v2_mitigation {
465 	SPECTRE_V2_NONE,
466 	SPECTRE_V2_RETPOLINE,
467 	SPECTRE_V2_LFENCE,
468 	SPECTRE_V2_EIBRS,
469 	SPECTRE_V2_EIBRS_RETPOLINE,
470 	SPECTRE_V2_EIBRS_LFENCE,
471 	SPECTRE_V2_IBRS,
472 };
473 
474 /* The indirect branch speculation control variants */
475 enum spectre_v2_user_mitigation {
476 	SPECTRE_V2_USER_NONE,
477 	SPECTRE_V2_USER_STRICT,
478 	SPECTRE_V2_USER_STRICT_PREFERRED,
479 	SPECTRE_V2_USER_PRCTL,
480 	SPECTRE_V2_USER_SECCOMP,
481 };
482 
483 /* The Speculative Store Bypass disable variants */
484 enum ssb_mitigation {
485 	SPEC_STORE_BYPASS_NONE,
486 	SPEC_STORE_BYPASS_DISABLE,
487 	SPEC_STORE_BYPASS_PRCTL,
488 	SPEC_STORE_BYPASS_SECCOMP,
489 };
490 
491 static __always_inline
492 void alternative_msr_write(unsigned int msr, u64 val, unsigned int feature)
493 {
494 	asm volatile(ALTERNATIVE("", "wrmsr", %c[feature])
495 		: : "c" (msr),
496 		    "a" ((u32)val),
497 		    "d" ((u32)(val >> 32)),
498 		    [feature] "i" (feature)
499 		: "memory");
500 }
501 
502 extern u64 x86_pred_cmd;
503 
504 static inline void indirect_branch_prediction_barrier(void)
505 {
506 	alternative_msr_write(MSR_IA32_PRED_CMD, x86_pred_cmd, X86_FEATURE_USE_IBPB);
507 }
508 
509 /* The Intel SPEC CTRL MSR base value cache */
510 extern u64 x86_spec_ctrl_base;
511 DECLARE_PER_CPU(u64, x86_spec_ctrl_current);
512 extern void update_spec_ctrl_cond(u64 val);
513 extern u64 spec_ctrl_current(void);
514 
515 /*
516  * With retpoline, we must use IBRS to restrict branch prediction
517  * before calling into firmware.
518  *
519  * (Implemented as CPP macros due to header hell.)
520  */
521 #define firmware_restrict_branch_speculation_start()			\
522 do {									\
523 	preempt_disable();						\
524 	alternative_msr_write(MSR_IA32_SPEC_CTRL,			\
525 			      spec_ctrl_current() | SPEC_CTRL_IBRS,	\
526 			      X86_FEATURE_USE_IBRS_FW);			\
527 	alternative_msr_write(MSR_IA32_PRED_CMD, PRED_CMD_IBPB,		\
528 			      X86_FEATURE_USE_IBPB_FW);			\
529 } while (0)
530 
531 #define firmware_restrict_branch_speculation_end()			\
532 do {									\
533 	alternative_msr_write(MSR_IA32_SPEC_CTRL,			\
534 			      spec_ctrl_current(),			\
535 			      X86_FEATURE_USE_IBRS_FW);			\
536 	preempt_enable();						\
537 } while (0)
538 
539 DECLARE_STATIC_KEY_FALSE(switch_to_cond_stibp);
540 DECLARE_STATIC_KEY_FALSE(switch_mm_cond_ibpb);
541 DECLARE_STATIC_KEY_FALSE(switch_mm_always_ibpb);
542 
543 DECLARE_STATIC_KEY_FALSE(mds_idle_clear);
544 
545 DECLARE_STATIC_KEY_FALSE(switch_mm_cond_l1d_flush);
546 
547 DECLARE_STATIC_KEY_FALSE(mmio_stale_data_clear);
548 
549 extern u16 mds_verw_sel;
550 
551 #include <asm/segment.h>
552 
553 /**
554  * mds_clear_cpu_buffers - Mitigation for MDS and TAA vulnerability
555  *
556  * This uses the otherwise unused and obsolete VERW instruction in
557  * combination with microcode which triggers a CPU buffer flush when the
558  * instruction is executed.
559  */
560 static __always_inline void mds_clear_cpu_buffers(void)
561 {
562 	static const u16 ds = __KERNEL_DS;
563 
564 	/*
565 	 * Has to be the memory-operand variant because only that
566 	 * guarantees the CPU buffer flush functionality according to
567 	 * documentation. The register-operand variant does not.
568 	 * Works with any segment selector, but a valid writable
569 	 * data segment is the fastest variant.
570 	 *
571 	 * "cc" clobber is required because VERW modifies ZF.
572 	 */
573 	asm volatile("verw %[ds]" : : [ds] "m" (ds) : "cc");
574 }
575 
576 /**
577  * mds_idle_clear_cpu_buffers - Mitigation for MDS vulnerability
578  *
579  * Clear CPU buffers if the corresponding static key is enabled
580  */
581 static __always_inline void mds_idle_clear_cpu_buffers(void)
582 {
583 	if (static_branch_likely(&mds_idle_clear))
584 		mds_clear_cpu_buffers();
585 }
586 
587 #endif /* __ASSEMBLY__ */
588 
589 #endif /* _ASM_X86_NOSPEC_BRANCH_H_ */
590