xref: /linux/arch/x86/include/asm/nospec-branch.h (revision eb01fe7abbe2d0b38824d2a93fdb4cc3eaf2ccc1)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 
3 #ifndef _ASM_X86_NOSPEC_BRANCH_H_
4 #define _ASM_X86_NOSPEC_BRANCH_H_
5 
6 #include <linux/static_key.h>
7 #include <linux/objtool.h>
8 #include <linux/linkage.h>
9 
10 #include <asm/alternative.h>
11 #include <asm/cpufeatures.h>
12 #include <asm/msr-index.h>
13 #include <asm/unwind_hints.h>
14 #include <asm/percpu.h>
15 #include <asm/current.h>
16 
17 /*
18  * Call depth tracking for Intel SKL CPUs to address the RSB underflow
19  * issue in software.
20  *
21  * The tracking does not use a counter. It uses uses arithmetic shift
22  * right on call entry and logical shift left on return.
23  *
24  * The depth tracking variable is initialized to 0x8000.... when the call
25  * depth is zero. The arithmetic shift right sign extends the MSB and
26  * saturates after the 12th call. The shift count is 5 for both directions
27  * so the tracking covers 12 nested calls.
28  *
29  *  Call
30  *  0: 0x8000000000000000	0x0000000000000000
31  *  1: 0xfc00000000000000	0xf000000000000000
32  * ...
33  * 11: 0xfffffffffffffff8	0xfffffffffffffc00
34  * 12: 0xffffffffffffffff	0xffffffffffffffe0
35  *
36  * After a return buffer fill the depth is credited 12 calls before the
37  * next stuffing has to take place.
38  *
39  * There is a inaccuracy for situations like this:
40  *
41  *  10 calls
42  *   5 returns
43  *   3 calls
44  *   4 returns
45  *   3 calls
46  *   ....
47  *
48  * The shift count might cause this to be off by one in either direction,
49  * but there is still a cushion vs. the RSB depth. The algorithm does not
50  * claim to be perfect and it can be speculated around by the CPU, but it
51  * is considered that it obfuscates the problem enough to make exploitation
52  * extremely difficult.
53  */
54 #define RET_DEPTH_SHIFT			5
55 #define RSB_RET_STUFF_LOOPS		16
56 #define RET_DEPTH_INIT			0x8000000000000000ULL
57 #define RET_DEPTH_INIT_FROM_CALL	0xfc00000000000000ULL
58 #define RET_DEPTH_CREDIT		0xffffffffffffffffULL
59 
60 #ifdef CONFIG_CALL_THUNKS_DEBUG
61 # define CALL_THUNKS_DEBUG_INC_CALLS				\
62 	incq	PER_CPU_VAR(__x86_call_count);
63 # define CALL_THUNKS_DEBUG_INC_RETS				\
64 	incq	PER_CPU_VAR(__x86_ret_count);
65 # define CALL_THUNKS_DEBUG_INC_STUFFS				\
66 	incq	PER_CPU_VAR(__x86_stuffs_count);
67 # define CALL_THUNKS_DEBUG_INC_CTXSW				\
68 	incq	PER_CPU_VAR(__x86_ctxsw_count);
69 #else
70 # define CALL_THUNKS_DEBUG_INC_CALLS
71 # define CALL_THUNKS_DEBUG_INC_RETS
72 # define CALL_THUNKS_DEBUG_INC_STUFFS
73 # define CALL_THUNKS_DEBUG_INC_CTXSW
74 #endif
75 
76 #if defined(CONFIG_MITIGATION_CALL_DEPTH_TRACKING) && !defined(COMPILE_OFFSETS)
77 
78 #include <asm/asm-offsets.h>
79 
80 #define CREDIT_CALL_DEPTH					\
81 	movq	$-1, PER_CPU_VAR(pcpu_hot + X86_call_depth);
82 
83 #define RESET_CALL_DEPTH					\
84 	xor	%eax, %eax;					\
85 	bts	$63, %rax;					\
86 	movq	%rax, PER_CPU_VAR(pcpu_hot + X86_call_depth);
87 
88 #define RESET_CALL_DEPTH_FROM_CALL				\
89 	movb	$0xfc, %al;					\
90 	shl	$56, %rax;					\
91 	movq	%rax, PER_CPU_VAR(pcpu_hot + X86_call_depth);	\
92 	CALL_THUNKS_DEBUG_INC_CALLS
93 
94 #define INCREMENT_CALL_DEPTH					\
95 	sarq	$5, PER_CPU_VAR(pcpu_hot + X86_call_depth);	\
96 	CALL_THUNKS_DEBUG_INC_CALLS
97 
98 #else
99 #define CREDIT_CALL_DEPTH
100 #define RESET_CALL_DEPTH
101 #define RESET_CALL_DEPTH_FROM_CALL
102 #define INCREMENT_CALL_DEPTH
103 #endif
104 
105 /*
106  * Fill the CPU return stack buffer.
107  *
108  * Each entry in the RSB, if used for a speculative 'ret', contains an
109  * infinite 'pause; lfence; jmp' loop to capture speculative execution.
110  *
111  * This is required in various cases for retpoline and IBRS-based
112  * mitigations for the Spectre variant 2 vulnerability. Sometimes to
113  * eliminate potentially bogus entries from the RSB, and sometimes
114  * purely to ensure that it doesn't get empty, which on some CPUs would
115  * allow predictions from other (unwanted!) sources to be used.
116  *
117  * We define a CPP macro such that it can be used from both .S files and
118  * inline assembly. It's possible to do a .macro and then include that
119  * from C via asm(".include <asm/nospec-branch.h>") but let's not go there.
120  */
121 
122 #define RETPOLINE_THUNK_SIZE	32
123 #define RSB_CLEAR_LOOPS		32	/* To forcibly overwrite all entries */
124 
125 /*
126  * Common helper for __FILL_RETURN_BUFFER and __FILL_ONE_RETURN.
127  */
128 #define __FILL_RETURN_SLOT			\
129 	ANNOTATE_INTRA_FUNCTION_CALL;		\
130 	call	772f;				\
131 	int3;					\
132 772:
133 
134 /*
135  * Stuff the entire RSB.
136  *
137  * Google experimented with loop-unrolling and this turned out to be
138  * the optimal version - two calls, each with their own speculation
139  * trap should their return address end up getting used, in a loop.
140  */
141 #ifdef CONFIG_X86_64
142 #define __FILL_RETURN_BUFFER(reg, nr)			\
143 	mov	$(nr/2), reg;				\
144 771:							\
145 	__FILL_RETURN_SLOT				\
146 	__FILL_RETURN_SLOT				\
147 	add	$(BITS_PER_LONG/8) * 2, %_ASM_SP;	\
148 	dec	reg;					\
149 	jnz	771b;					\
150 	/* barrier for jnz misprediction */		\
151 	lfence;						\
152 	CREDIT_CALL_DEPTH				\
153 	CALL_THUNKS_DEBUG_INC_CTXSW
154 #else
155 /*
156  * i386 doesn't unconditionally have LFENCE, as such it can't
157  * do a loop.
158  */
159 #define __FILL_RETURN_BUFFER(reg, nr)			\
160 	.rept nr;					\
161 	__FILL_RETURN_SLOT;				\
162 	.endr;						\
163 	add	$(BITS_PER_LONG/8) * nr, %_ASM_SP;
164 #endif
165 
166 /*
167  * Stuff a single RSB slot.
168  *
169  * To mitigate Post-Barrier RSB speculation, one CALL instruction must be
170  * forced to retire before letting a RET instruction execute.
171  *
172  * On PBRSB-vulnerable CPUs, it is not safe for a RET to be executed
173  * before this point.
174  */
175 #define __FILL_ONE_RETURN				\
176 	__FILL_RETURN_SLOT				\
177 	add	$(BITS_PER_LONG/8), %_ASM_SP;		\
178 	lfence;
179 
180 #ifdef __ASSEMBLY__
181 
182 /*
183  * This should be used immediately before an indirect jump/call. It tells
184  * objtool the subsequent indirect jump/call is vouched safe for retpoline
185  * builds.
186  */
187 .macro ANNOTATE_RETPOLINE_SAFE
188 .Lhere_\@:
189 	.pushsection .discard.retpoline_safe
190 	.long .Lhere_\@
191 	.popsection
192 .endm
193 
194 /*
195  * (ab)use RETPOLINE_SAFE on RET to annotate away 'bare' RET instructions
196  * vs RETBleed validation.
197  */
198 #define ANNOTATE_UNRET_SAFE ANNOTATE_RETPOLINE_SAFE
199 
200 /*
201  * Abuse ANNOTATE_RETPOLINE_SAFE on a NOP to indicate UNRET_END, should
202  * eventually turn into its own annotation.
203  */
204 .macro VALIDATE_UNRET_END
205 #if defined(CONFIG_NOINSTR_VALIDATION) && \
206 	(defined(CONFIG_MITIGATION_UNRET_ENTRY) || defined(CONFIG_MITIGATION_SRSO))
207 	ANNOTATE_RETPOLINE_SAFE
208 	nop
209 #endif
210 .endm
211 
212 /*
213  * Equivalent to -mindirect-branch-cs-prefix; emit the 5 byte jmp/call
214  * to the retpoline thunk with a CS prefix when the register requires
215  * a RAX prefix byte to encode. Also see apply_retpolines().
216  */
217 .macro __CS_PREFIX reg:req
218 	.irp rs,r8,r9,r10,r11,r12,r13,r14,r15
219 	.ifc \reg,\rs
220 	.byte 0x2e
221 	.endif
222 	.endr
223 .endm
224 
225 /*
226  * JMP_NOSPEC and CALL_NOSPEC macros can be used instead of a simple
227  * indirect jmp/call which may be susceptible to the Spectre variant 2
228  * attack.
229  *
230  * NOTE: these do not take kCFI into account and are thus not comparable to C
231  * indirect calls, take care when using. The target of these should be an ENDBR
232  * instruction irrespective of kCFI.
233  */
234 .macro JMP_NOSPEC reg:req
235 #ifdef CONFIG_MITIGATION_RETPOLINE
236 	__CS_PREFIX \reg
237 	jmp	__x86_indirect_thunk_\reg
238 #else
239 	jmp	*%\reg
240 	int3
241 #endif
242 .endm
243 
244 .macro CALL_NOSPEC reg:req
245 #ifdef CONFIG_MITIGATION_RETPOLINE
246 	__CS_PREFIX \reg
247 	call	__x86_indirect_thunk_\reg
248 #else
249 	call	*%\reg
250 #endif
251 .endm
252 
253  /*
254   * A simpler FILL_RETURN_BUFFER macro. Don't make people use the CPP
255   * monstrosity above, manually.
256   */
257 .macro FILL_RETURN_BUFFER reg:req nr:req ftr:req ftr2=ALT_NOT(X86_FEATURE_ALWAYS)
258 	ALTERNATIVE_2 "jmp .Lskip_rsb_\@", \
259 		__stringify(__FILL_RETURN_BUFFER(\reg,\nr)), \ftr, \
260 		__stringify(nop;nop;__FILL_ONE_RETURN), \ftr2
261 
262 .Lskip_rsb_\@:
263 .endm
264 
265 #if defined(CONFIG_MITIGATION_UNRET_ENTRY) || defined(CONFIG_MITIGATION_SRSO)
266 #define CALL_UNTRAIN_RET	"call entry_untrain_ret"
267 #else
268 #define CALL_UNTRAIN_RET	""
269 #endif
270 
271 /*
272  * Mitigate RETBleed for AMD/Hygon Zen uarch. Requires KERNEL CR3 because the
273  * return thunk isn't mapped into the userspace tables (then again, AMD
274  * typically has NO_MELTDOWN).
275  *
276  * While retbleed_untrain_ret() doesn't clobber anything but requires stack,
277  * entry_ibpb() will clobber AX, CX, DX.
278  *
279  * As such, this must be placed after every *SWITCH_TO_KERNEL_CR3 at a point
280  * where we have a stack but before any RET instruction.
281  */
282 .macro __UNTRAIN_RET ibpb_feature, call_depth_insns
283 #if defined(CONFIG_MITIGATION_RETHUNK) || defined(CONFIG_MITIGATION_IBPB_ENTRY)
284 	VALIDATE_UNRET_END
285 	ALTERNATIVE_3 "",						\
286 		      CALL_UNTRAIN_RET, X86_FEATURE_UNRET,		\
287 		      "call entry_ibpb", \ibpb_feature,			\
288 		     __stringify(\call_depth_insns), X86_FEATURE_CALL_DEPTH
289 #endif
290 .endm
291 
292 #define UNTRAIN_RET \
293 	__UNTRAIN_RET X86_FEATURE_ENTRY_IBPB, __stringify(RESET_CALL_DEPTH)
294 
295 #define UNTRAIN_RET_VM \
296 	__UNTRAIN_RET X86_FEATURE_IBPB_ON_VMEXIT, __stringify(RESET_CALL_DEPTH)
297 
298 #define UNTRAIN_RET_FROM_CALL \
299 	__UNTRAIN_RET X86_FEATURE_ENTRY_IBPB, __stringify(RESET_CALL_DEPTH_FROM_CALL)
300 
301 
302 .macro CALL_DEPTH_ACCOUNT
303 #ifdef CONFIG_MITIGATION_CALL_DEPTH_TRACKING
304 	ALTERNATIVE "",							\
305 		    __stringify(INCREMENT_CALL_DEPTH), X86_FEATURE_CALL_DEPTH
306 #endif
307 .endm
308 
309 /*
310  * Macro to execute VERW instruction that mitigate transient data sampling
311  * attacks such as MDS. On affected systems a microcode update overloaded VERW
312  * instruction to also clear the CPU buffers. VERW clobbers CFLAGS.ZF.
313  *
314  * Note: Only the memory operand variant of VERW clears the CPU buffers.
315  */
316 .macro CLEAR_CPU_BUFFERS
317 	ALTERNATIVE "", __stringify(verw _ASM_RIP(mds_verw_sel)), X86_FEATURE_CLEAR_CPU_BUF
318 .endm
319 
320 #else /* __ASSEMBLY__ */
321 
322 #define ANNOTATE_RETPOLINE_SAFE					\
323 	"999:\n\t"						\
324 	".pushsection .discard.retpoline_safe\n\t"		\
325 	".long 999b\n\t"					\
326 	".popsection\n\t"
327 
328 typedef u8 retpoline_thunk_t[RETPOLINE_THUNK_SIZE];
329 extern retpoline_thunk_t __x86_indirect_thunk_array[];
330 extern retpoline_thunk_t __x86_indirect_call_thunk_array[];
331 extern retpoline_thunk_t __x86_indirect_jump_thunk_array[];
332 
333 #ifdef CONFIG_MITIGATION_RETHUNK
334 extern void __x86_return_thunk(void);
335 #else
336 static inline void __x86_return_thunk(void) {}
337 #endif
338 
339 #ifdef CONFIG_MITIGATION_UNRET_ENTRY
340 extern void retbleed_return_thunk(void);
341 #else
342 static inline void retbleed_return_thunk(void) {}
343 #endif
344 
345 #ifdef CONFIG_MITIGATION_SRSO
346 extern void srso_return_thunk(void);
347 extern void srso_alias_return_thunk(void);
348 #else
349 static inline void srso_return_thunk(void) {}
350 static inline void srso_alias_return_thunk(void) {}
351 #endif
352 
353 extern void retbleed_return_thunk(void);
354 extern void srso_return_thunk(void);
355 extern void srso_alias_return_thunk(void);
356 
357 extern void entry_untrain_ret(void);
358 extern void entry_ibpb(void);
359 
360 extern void (*x86_return_thunk)(void);
361 
362 extern void __warn_thunk(void);
363 
364 #ifdef CONFIG_MITIGATION_CALL_DEPTH_TRACKING
365 extern void call_depth_return_thunk(void);
366 
367 #define CALL_DEPTH_ACCOUNT					\
368 	ALTERNATIVE("",						\
369 		    __stringify(INCREMENT_CALL_DEPTH),		\
370 		    X86_FEATURE_CALL_DEPTH)
371 
372 #ifdef CONFIG_CALL_THUNKS_DEBUG
373 DECLARE_PER_CPU(u64, __x86_call_count);
374 DECLARE_PER_CPU(u64, __x86_ret_count);
375 DECLARE_PER_CPU(u64, __x86_stuffs_count);
376 DECLARE_PER_CPU(u64, __x86_ctxsw_count);
377 #endif
378 #else /* !CONFIG_MITIGATION_CALL_DEPTH_TRACKING */
379 
380 static inline void call_depth_return_thunk(void) {}
381 #define CALL_DEPTH_ACCOUNT ""
382 
383 #endif /* CONFIG_MITIGATION_CALL_DEPTH_TRACKING */
384 
385 #ifdef CONFIG_MITIGATION_RETPOLINE
386 
387 #define GEN(reg) \
388 	extern retpoline_thunk_t __x86_indirect_thunk_ ## reg;
389 #include <asm/GEN-for-each-reg.h>
390 #undef GEN
391 
392 #define GEN(reg)						\
393 	extern retpoline_thunk_t __x86_indirect_call_thunk_ ## reg;
394 #include <asm/GEN-for-each-reg.h>
395 #undef GEN
396 
397 #define GEN(reg)						\
398 	extern retpoline_thunk_t __x86_indirect_jump_thunk_ ## reg;
399 #include <asm/GEN-for-each-reg.h>
400 #undef GEN
401 
402 #ifdef CONFIG_X86_64
403 
404 /*
405  * Inline asm uses the %V modifier which is only in newer GCC
406  * which is ensured when CONFIG_MITIGATION_RETPOLINE is defined.
407  */
408 # define CALL_NOSPEC						\
409 	ALTERNATIVE_2(						\
410 	ANNOTATE_RETPOLINE_SAFE					\
411 	"call *%[thunk_target]\n",				\
412 	"call __x86_indirect_thunk_%V[thunk_target]\n",		\
413 	X86_FEATURE_RETPOLINE,					\
414 	"lfence;\n"						\
415 	ANNOTATE_RETPOLINE_SAFE					\
416 	"call *%[thunk_target]\n",				\
417 	X86_FEATURE_RETPOLINE_LFENCE)
418 
419 # define THUNK_TARGET(addr) [thunk_target] "r" (addr)
420 
421 #else /* CONFIG_X86_32 */
422 /*
423  * For i386 we use the original ret-equivalent retpoline, because
424  * otherwise we'll run out of registers. We don't care about CET
425  * here, anyway.
426  */
427 # define CALL_NOSPEC						\
428 	ALTERNATIVE_2(						\
429 	ANNOTATE_RETPOLINE_SAFE					\
430 	"call *%[thunk_target]\n",				\
431 	"       jmp    904f;\n"					\
432 	"       .align 16\n"					\
433 	"901:	call   903f;\n"					\
434 	"902:	pause;\n"					\
435 	"    	lfence;\n"					\
436 	"       jmp    902b;\n"					\
437 	"       .align 16\n"					\
438 	"903:	lea    4(%%esp), %%esp;\n"			\
439 	"       pushl  %[thunk_target];\n"			\
440 	"       ret;\n"						\
441 	"       .align 16\n"					\
442 	"904:	call   901b;\n",				\
443 	X86_FEATURE_RETPOLINE,					\
444 	"lfence;\n"						\
445 	ANNOTATE_RETPOLINE_SAFE					\
446 	"call *%[thunk_target]\n",				\
447 	X86_FEATURE_RETPOLINE_LFENCE)
448 
449 # define THUNK_TARGET(addr) [thunk_target] "rm" (addr)
450 #endif
451 #else /* No retpoline for C / inline asm */
452 # define CALL_NOSPEC "call *%[thunk_target]\n"
453 # define THUNK_TARGET(addr) [thunk_target] "rm" (addr)
454 #endif
455 
456 /* The Spectre V2 mitigation variants */
457 enum spectre_v2_mitigation {
458 	SPECTRE_V2_NONE,
459 	SPECTRE_V2_RETPOLINE,
460 	SPECTRE_V2_LFENCE,
461 	SPECTRE_V2_EIBRS,
462 	SPECTRE_V2_EIBRS_RETPOLINE,
463 	SPECTRE_V2_EIBRS_LFENCE,
464 	SPECTRE_V2_IBRS,
465 };
466 
467 /* The indirect branch speculation control variants */
468 enum spectre_v2_user_mitigation {
469 	SPECTRE_V2_USER_NONE,
470 	SPECTRE_V2_USER_STRICT,
471 	SPECTRE_V2_USER_STRICT_PREFERRED,
472 	SPECTRE_V2_USER_PRCTL,
473 	SPECTRE_V2_USER_SECCOMP,
474 };
475 
476 /* The Speculative Store Bypass disable variants */
477 enum ssb_mitigation {
478 	SPEC_STORE_BYPASS_NONE,
479 	SPEC_STORE_BYPASS_DISABLE,
480 	SPEC_STORE_BYPASS_PRCTL,
481 	SPEC_STORE_BYPASS_SECCOMP,
482 };
483 
484 static __always_inline
485 void alternative_msr_write(unsigned int msr, u64 val, unsigned int feature)
486 {
487 	asm volatile(ALTERNATIVE("", "wrmsr", %c[feature])
488 		: : "c" (msr),
489 		    "a" ((u32)val),
490 		    "d" ((u32)(val >> 32)),
491 		    [feature] "i" (feature)
492 		: "memory");
493 }
494 
495 extern u64 x86_pred_cmd;
496 
497 static inline void indirect_branch_prediction_barrier(void)
498 {
499 	alternative_msr_write(MSR_IA32_PRED_CMD, x86_pred_cmd, X86_FEATURE_USE_IBPB);
500 }
501 
502 /* The Intel SPEC CTRL MSR base value cache */
503 extern u64 x86_spec_ctrl_base;
504 DECLARE_PER_CPU(u64, x86_spec_ctrl_current);
505 extern void update_spec_ctrl_cond(u64 val);
506 extern u64 spec_ctrl_current(void);
507 
508 /*
509  * With retpoline, we must use IBRS to restrict branch prediction
510  * before calling into firmware.
511  *
512  * (Implemented as CPP macros due to header hell.)
513  */
514 #define firmware_restrict_branch_speculation_start()			\
515 do {									\
516 	preempt_disable();						\
517 	alternative_msr_write(MSR_IA32_SPEC_CTRL,			\
518 			      spec_ctrl_current() | SPEC_CTRL_IBRS,	\
519 			      X86_FEATURE_USE_IBRS_FW);			\
520 	alternative_msr_write(MSR_IA32_PRED_CMD, PRED_CMD_IBPB,		\
521 			      X86_FEATURE_USE_IBPB_FW);			\
522 } while (0)
523 
524 #define firmware_restrict_branch_speculation_end()			\
525 do {									\
526 	alternative_msr_write(MSR_IA32_SPEC_CTRL,			\
527 			      spec_ctrl_current(),			\
528 			      X86_FEATURE_USE_IBRS_FW);			\
529 	preempt_enable();						\
530 } while (0)
531 
532 DECLARE_STATIC_KEY_FALSE(switch_to_cond_stibp);
533 DECLARE_STATIC_KEY_FALSE(switch_mm_cond_ibpb);
534 DECLARE_STATIC_KEY_FALSE(switch_mm_always_ibpb);
535 
536 DECLARE_STATIC_KEY_FALSE(mds_idle_clear);
537 
538 DECLARE_STATIC_KEY_FALSE(switch_mm_cond_l1d_flush);
539 
540 DECLARE_STATIC_KEY_FALSE(mmio_stale_data_clear);
541 
542 extern u16 mds_verw_sel;
543 
544 #include <asm/segment.h>
545 
546 /**
547  * mds_clear_cpu_buffers - Mitigation for MDS and TAA vulnerability
548  *
549  * This uses the otherwise unused and obsolete VERW instruction in
550  * combination with microcode which triggers a CPU buffer flush when the
551  * instruction is executed.
552  */
553 static __always_inline void mds_clear_cpu_buffers(void)
554 {
555 	static const u16 ds = __KERNEL_DS;
556 
557 	/*
558 	 * Has to be the memory-operand variant because only that
559 	 * guarantees the CPU buffer flush functionality according to
560 	 * documentation. The register-operand variant does not.
561 	 * Works with any segment selector, but a valid writable
562 	 * data segment is the fastest variant.
563 	 *
564 	 * "cc" clobber is required because VERW modifies ZF.
565 	 */
566 	asm volatile("verw %[ds]" : : [ds] "m" (ds) : "cc");
567 }
568 
569 /**
570  * mds_idle_clear_cpu_buffers - Mitigation for MDS vulnerability
571  *
572  * Clear CPU buffers if the corresponding static key is enabled
573  */
574 static __always_inline void mds_idle_clear_cpu_buffers(void)
575 {
576 	if (static_branch_likely(&mds_idle_clear))
577 		mds_clear_cpu_buffers();
578 }
579 
580 #endif /* __ASSEMBLY__ */
581 
582 #endif /* _ASM_X86_NOSPEC_BRANCH_H_ */
583