Lines Matching +full:alternative +full:- +full:a
1 /* SPDX-License-Identifier: GPL-2.0 */
10 #include <asm/alternative.h>
12 #include <asm/msr-index.h>
21 * The tracking does not use a counter. It uses uses arithmetic shift
36 * After a return buffer fill the depth is credited 12 calls before the
39 * There is a inaccuracy for situations like this:
49 * but there is still a cushion vs. the RSB depth. The algorithm does not
78 #include <asm/asm-offsets.h>
81 movq $-1, PER_CPU_VAR(pcpu_hot + X86_call_depth);
108 * Each entry in the RSB, if used for a speculative 'ret', contains an
111 * This is required in various cases for retpoline and IBRS-based
117 * We define a CPP macro such that it can be used from both .S files and
118 * inline assembly. It's possible to do a .macro and then include that
119 * from C via asm(".include <asm/nospec-branch.h>") but let's not go there.
137 * Google experimented with loop-unrolling and this turned out to be
138 * the optimal version - two calls, each with their own speculation
139 * trap should their return address end up getting used, in a loop.
157 * do a loop.
167 * Stuff a single RSB slot.
169 * To mitigate Post-Barrier RSB speculation, one CALL instruction must be
170 * forced to retire before letting a RET instruction execute.
172 * On PBRSB-vulnerable CPUs, it is not safe for a RET to be executed
201 * Abuse ANNOTATE_RETPOLINE_SAFE on a NOP to indicate UNRET_END, should
213 * Equivalent to -mindirect-branch-cs-prefix; emit the 5 byte jmp/call
214 * to the retpoline thunk with a CS prefix when the register requires
215 * a RAX prefix byte to encode. Also see apply_retpolines().
226 * JMP_NOSPEC and CALL_NOSPEC macros can be used instead of a simple
254 * A simpler FILL_RETURN_BUFFER macro. Don't make people use the CPP
268 * must be the target of a CALL instruction instead of indirectly
269 * jumping to a wrapper which then calls it. Therefore, this macro is
288 * As such, this must be placed after every *SWITCH_TO_KERNEL_CR3 at a point
289 * where we have a stack but before any RET instruction.
313 ALTERNATIVE "", \
320 * attacks such as MDS. On affected systems a microcode update overloaded VERW
327 ALTERNATIVE "", "verw mds_verw_sel(%rip)", X86_FEATURE_CLEAR_CPU_BUF
330 * In 32bit mode, the memory operand must be a %cs reference. The data
334 ALTERNATIVE "", "verw %cs:mds_verw_sel", X86_FEATURE_CLEAR_CPU_BUF
340 ALTERNATIVE "", "call clear_bhb_loop", X86_FEATURE_CLEAR_BHB_LOOP
344 ALTERNATIVE "", "call clear_bhb_loop", X86_FEATURE_CLEAR_BHB_LOOP_ON_VMEXIT
405 ALTERNATIVE("", \
426 #include <asm/GEN-for-each-reg.h>
431 #include <asm/GEN-for-each-reg.h>
436 #include <asm/GEN-for-each-reg.h>
460 * For i386 we use the original ret-equivalent retpoline, because
524 asm volatile(ALTERNATIVE("", "wrmsr", %c[feature]) in alternative_msr_write()
526 "a" ((u32)val), in alternative_msr_write()
584 * mds_clear_cpu_buffers - Mitigation for MDS and TAA vulnerability
587 * combination with microcode which triggers a CPU buffer flush when the
595 * Has to be the memory-operand variant because only that in mds_clear_cpu_buffers()
597 * documentation. The register-operand variant does not. in mds_clear_cpu_buffers()
598 * Works with any segment selector, but a valid writable in mds_clear_cpu_buffers()
607 * mds_idle_clear_cpu_buffers - Mitigation for MDS vulnerability