xref: /linux/arch/x86/kvm/svm/vmenter.S (revision 331282fdb15edaf1beb1d27a64d3f65a34d7394d)
1199cd1d7SUros Bizjak/* SPDX-License-Identifier: GPL-2.0 */
2199cd1d7SUros Bizjak#include <linux/linkage.h>
3199cd1d7SUros Bizjak#include <asm/asm.h>
45d821386SThomas Gleixner#include <asm/asm-offsets.h>
5199cd1d7SUros Bizjak#include <asm/bitsperlong.h>
6199cd1d7SUros Bizjak#include <asm/kvm_vcpu_regs.h>
7f14eec0aSPaolo Bonzini#include <asm/nospec-branch.h>
816fdc1deSPaolo Bonzini#include "kvm-asm-offsets.h"
9199cd1d7SUros Bizjak
10199cd1d7SUros Bizjak#define WORD_SIZE (BITS_PER_LONG / 8)
11199cd1d7SUros Bizjak
12199cd1d7SUros Bizjak/* Intentionally omit RAX as it's context switched by hardware */
1316fdc1deSPaolo Bonzini#define VCPU_RCX	(SVM_vcpu_arch_regs + __VCPU_REGS_RCX * WORD_SIZE)
1416fdc1deSPaolo Bonzini#define VCPU_RDX	(SVM_vcpu_arch_regs + __VCPU_REGS_RDX * WORD_SIZE)
1516fdc1deSPaolo Bonzini#define VCPU_RBX	(SVM_vcpu_arch_regs + __VCPU_REGS_RBX * WORD_SIZE)
16199cd1d7SUros Bizjak/* Intentionally omit RSP as it's context switched by hardware */
1716fdc1deSPaolo Bonzini#define VCPU_RBP	(SVM_vcpu_arch_regs + __VCPU_REGS_RBP * WORD_SIZE)
1816fdc1deSPaolo Bonzini#define VCPU_RSI	(SVM_vcpu_arch_regs + __VCPU_REGS_RSI * WORD_SIZE)
1916fdc1deSPaolo Bonzini#define VCPU_RDI	(SVM_vcpu_arch_regs + __VCPU_REGS_RDI * WORD_SIZE)
20199cd1d7SUros Bizjak
21199cd1d7SUros Bizjak#ifdef CONFIG_X86_64
2216fdc1deSPaolo Bonzini#define VCPU_R8		(SVM_vcpu_arch_regs + __VCPU_REGS_R8  * WORD_SIZE)
2316fdc1deSPaolo Bonzini#define VCPU_R9		(SVM_vcpu_arch_regs + __VCPU_REGS_R9  * WORD_SIZE)
2416fdc1deSPaolo Bonzini#define VCPU_R10	(SVM_vcpu_arch_regs + __VCPU_REGS_R10 * WORD_SIZE)
2516fdc1deSPaolo Bonzini#define VCPU_R11	(SVM_vcpu_arch_regs + __VCPU_REGS_R11 * WORD_SIZE)
2616fdc1deSPaolo Bonzini#define VCPU_R12	(SVM_vcpu_arch_regs + __VCPU_REGS_R12 * WORD_SIZE)
2716fdc1deSPaolo Bonzini#define VCPU_R13	(SVM_vcpu_arch_regs + __VCPU_REGS_R13 * WORD_SIZE)
2816fdc1deSPaolo Bonzini#define VCPU_R14	(SVM_vcpu_arch_regs + __VCPU_REGS_R14 * WORD_SIZE)
2916fdc1deSPaolo Bonzini#define VCPU_R15	(SVM_vcpu_arch_regs + __VCPU_REGS_R15 * WORD_SIZE)
30199cd1d7SUros Bizjak#endif
31199cd1d7SUros Bizjak
32e61ab42dSPaolo Bonzini#define SVM_vmcb01_pa	(SVM_vmcb01 + KVM_VMCB_pa)
33e61ab42dSPaolo Bonzini
34135961e0SThomas Gleixner.section .noinstr.text, "ax"
35199cd1d7SUros Bizjak
369f2febf3SPaolo Bonzini.macro RESTORE_GUEST_SPEC_CTRL
379f2febf3SPaolo Bonzini	/* No need to do anything if SPEC_CTRL is unset or V_SPEC_CTRL is set */
389f2febf3SPaolo Bonzini	ALTERNATIVE_2 "", \
399f2febf3SPaolo Bonzini		"jmp 800f", X86_FEATURE_MSR_SPEC_CTRL, \
409f2febf3SPaolo Bonzini		"", X86_FEATURE_V_SPEC_CTRL
419f2febf3SPaolo Bonzini801:
429f2febf3SPaolo Bonzini.endm
439f2febf3SPaolo Bonzini.macro RESTORE_GUEST_SPEC_CTRL_BODY
449f2febf3SPaolo Bonzini800:
459f2febf3SPaolo Bonzini	/*
469f2febf3SPaolo Bonzini	 * SPEC_CTRL handling: if the guest's SPEC_CTRL value differs from the
479f2febf3SPaolo Bonzini	 * host's, write the MSR.  This is kept out-of-line so that the common
489f2febf3SPaolo Bonzini	 * case does not have to jump.
499f2febf3SPaolo Bonzini	 *
509f2febf3SPaolo Bonzini	 * IMPORTANT: To avoid RSB underflow attacks and any other nastiness,
519f2febf3SPaolo Bonzini	 * there must not be any returns or indirect branches between this code
529f2febf3SPaolo Bonzini	 * and vmentry.
539f2febf3SPaolo Bonzini	 */
549f2febf3SPaolo Bonzini	movl SVM_spec_ctrl(%_ASM_DI), %eax
559f2febf3SPaolo Bonzini	cmp PER_CPU_VAR(x86_spec_ctrl_current), %eax
569f2febf3SPaolo Bonzini	je 801b
579f2febf3SPaolo Bonzini	mov $MSR_IA32_SPEC_CTRL, %ecx
589f2febf3SPaolo Bonzini	xor %edx, %edx
599f2febf3SPaolo Bonzini	wrmsr
609f2febf3SPaolo Bonzini	jmp 801b
619f2febf3SPaolo Bonzini.endm
629f2febf3SPaolo Bonzini
639f2febf3SPaolo Bonzini.macro RESTORE_HOST_SPEC_CTRL
649f2febf3SPaolo Bonzini	/* No need to do anything if SPEC_CTRL is unset or V_SPEC_CTRL is set */
659f2febf3SPaolo Bonzini	ALTERNATIVE_2 "", \
669f2febf3SPaolo Bonzini		"jmp 900f", X86_FEATURE_MSR_SPEC_CTRL, \
679f2febf3SPaolo Bonzini		"", X86_FEATURE_V_SPEC_CTRL
689f2febf3SPaolo Bonzini901:
699f2febf3SPaolo Bonzini.endm
709f2febf3SPaolo Bonzini.macro RESTORE_HOST_SPEC_CTRL_BODY
719f2febf3SPaolo Bonzini900:
729f2febf3SPaolo Bonzini	/* Same for after vmexit.  */
739f2febf3SPaolo Bonzini	mov $MSR_IA32_SPEC_CTRL, %ecx
749f2febf3SPaolo Bonzini
759f2febf3SPaolo Bonzini	/*
769f2febf3SPaolo Bonzini	 * Load the value that the guest had written into MSR_IA32_SPEC_CTRL,
779f2febf3SPaolo Bonzini	 * if it was not intercepted during guest execution.
789f2febf3SPaolo Bonzini	 */
799f2febf3SPaolo Bonzini	cmpb $0, (%_ASM_SP)
809f2febf3SPaolo Bonzini	jnz 998f
819f2febf3SPaolo Bonzini	rdmsr
829f2febf3SPaolo Bonzini	movl %eax, SVM_spec_ctrl(%_ASM_DI)
839f2febf3SPaolo Bonzini998:
849f2febf3SPaolo Bonzini
859f2febf3SPaolo Bonzini	/* Now restore the host value of the MSR if different from the guest's.  */
869f2febf3SPaolo Bonzini	movl PER_CPU_VAR(x86_spec_ctrl_current), %eax
879f2febf3SPaolo Bonzini	cmp SVM_spec_ctrl(%_ASM_DI), %eax
889f2febf3SPaolo Bonzini	je 901b
899f2febf3SPaolo Bonzini	xor %edx, %edx
909f2febf3SPaolo Bonzini	wrmsr
919f2febf3SPaolo Bonzini	jmp 901b
929f2febf3SPaolo Bonzini.endm
939f2febf3SPaolo Bonzini
949f2febf3SPaolo Bonzini
95199cd1d7SUros Bizjak/**
96199cd1d7SUros Bizjak * __svm_vcpu_run - Run a vCPU via a transition to SVM guest mode
9716fdc1deSPaolo Bonzini * @svm:	struct vcpu_svm *
989f2febf3SPaolo Bonzini * @spec_ctrl_intercepted: bool
99199cd1d7SUros Bizjak */
100199cd1d7SUros BizjakSYM_FUNC_START(__svm_vcpu_run)
101199cd1d7SUros Bizjak	push %_ASM_BP
10219597a71SSean Christopherson	mov  %_ASM_SP, %_ASM_BP
103199cd1d7SUros Bizjak#ifdef CONFIG_X86_64
104199cd1d7SUros Bizjak	push %r15
105199cd1d7SUros Bizjak	push %r14
106199cd1d7SUros Bizjak	push %r13
107199cd1d7SUros Bizjak	push %r12
108199cd1d7SUros Bizjak#else
109199cd1d7SUros Bizjak	push %edi
110199cd1d7SUros Bizjak	push %esi
111199cd1d7SUros Bizjak#endif
112199cd1d7SUros Bizjak	push %_ASM_BX
113199cd1d7SUros Bizjak
114e287bd00SPaolo Bonzini	/*
115e287bd00SPaolo Bonzini	 * Save variables needed after vmexit on the stack, in inverse
116e287bd00SPaolo Bonzini	 * order compared to when they are needed.
117e287bd00SPaolo Bonzini	 */
118e287bd00SPaolo Bonzini
1199f2febf3SPaolo Bonzini	/* Accessed directly from the stack in RESTORE_HOST_SPEC_CTRL.  */
120199cd1d7SUros Bizjak	push %_ASM_ARG2
121199cd1d7SUros Bizjak
122e287bd00SPaolo Bonzini	/* Needed to restore access to percpu variables.  */
123e287bd00SPaolo Bonzini	__ASM_SIZE(push) PER_CPU_VAR(svm_data + SD_save_area_pa)
124e287bd00SPaolo Bonzini
1259f2febf3SPaolo Bonzini	/* Finally save @svm. */
126199cd1d7SUros Bizjak	push %_ASM_ARG1
127199cd1d7SUros Bizjak
128f6d58266SPaolo Bonzini.ifnc _ASM_ARG1, _ASM_DI
1299f2febf3SPaolo Bonzini	/*
1309f2febf3SPaolo Bonzini	 * Stash @svm in RDI early. On 32-bit, arguments are in RAX, RCX
1319f2febf3SPaolo Bonzini	 * and RDX which are clobbered by RESTORE_GUEST_SPEC_CTRL.
1329f2febf3SPaolo Bonzini	 */
133f6d58266SPaolo Bonzini	mov %_ASM_ARG1, %_ASM_DI
134f6d58266SPaolo Bonzini.endif
135199cd1d7SUros Bizjak
1369f2febf3SPaolo Bonzini	/* Clobbers RAX, RCX, RDX.  */
1379f2febf3SPaolo Bonzini	RESTORE_GUEST_SPEC_CTRL
1389f2febf3SPaolo Bonzini
139e61ab42dSPaolo Bonzini	/*
140e61ab42dSPaolo Bonzini	 * Use a single vmcb (vmcb01 because it's always valid) for
141e61ab42dSPaolo Bonzini	 * context switching guest state via VMLOAD/VMSAVE, that way
142e61ab42dSPaolo Bonzini	 * the state doesn't need to be copied between vmcb01 and
143e61ab42dSPaolo Bonzini	 * vmcb02 when switching vmcbs for nested virtualization.
144e61ab42dSPaolo Bonzini	 */
145e61ab42dSPaolo Bonzini	mov SVM_vmcb01_pa(%_ASM_DI), %_ASM_AX
146e61ab42dSPaolo Bonzini1:	vmload %_ASM_AX
147e61ab42dSPaolo Bonzini2:
148e61ab42dSPaolo Bonzini
149f6d58266SPaolo Bonzini	/* Get svm->current_vmcb->pa into RAX. */
150f6d58266SPaolo Bonzini	mov SVM_current_vmcb(%_ASM_DI), %_ASM_AX
151f6d58266SPaolo Bonzini	mov KVM_VMCB_pa(%_ASM_AX), %_ASM_AX
152199cd1d7SUros Bizjak
153199cd1d7SUros Bizjak	/* Load guest registers. */
154f7ef2801SPaolo Bonzini	mov VCPU_RCX(%_ASM_DI), %_ASM_CX
155f7ef2801SPaolo Bonzini	mov VCPU_RDX(%_ASM_DI), %_ASM_DX
156f7ef2801SPaolo Bonzini	mov VCPU_RBX(%_ASM_DI), %_ASM_BX
157f7ef2801SPaolo Bonzini	mov VCPU_RBP(%_ASM_DI), %_ASM_BP
158f7ef2801SPaolo Bonzini	mov VCPU_RSI(%_ASM_DI), %_ASM_SI
159199cd1d7SUros Bizjak#ifdef CONFIG_X86_64
160f7ef2801SPaolo Bonzini	mov VCPU_R8 (%_ASM_DI),  %r8
161f7ef2801SPaolo Bonzini	mov VCPU_R9 (%_ASM_DI),  %r9
162f7ef2801SPaolo Bonzini	mov VCPU_R10(%_ASM_DI), %r10
163f7ef2801SPaolo Bonzini	mov VCPU_R11(%_ASM_DI), %r11
164f7ef2801SPaolo Bonzini	mov VCPU_R12(%_ASM_DI), %r12
165f7ef2801SPaolo Bonzini	mov VCPU_R13(%_ASM_DI), %r13
166f7ef2801SPaolo Bonzini	mov VCPU_R14(%_ASM_DI), %r14
167f7ef2801SPaolo Bonzini	mov VCPU_R15(%_ASM_DI), %r15
168199cd1d7SUros Bizjak#endif
169f7ef2801SPaolo Bonzini	mov VCPU_RDI(%_ASM_DI), %_ASM_DI
170199cd1d7SUros Bizjak
171199cd1d7SUros Bizjak	/* Enter guest mode */
172f14eec0aSPaolo Bonzini	sti
173199cd1d7SUros Bizjak
174e61ab42dSPaolo Bonzini3:	vmrun %_ASM_AX
175e61ab42dSPaolo Bonzini4:
176e61ab42dSPaolo Bonzini	cli
177199cd1d7SUros Bizjak
178e61ab42dSPaolo Bonzini	/* Pop @svm to RAX while it's the only available register. */
179199cd1d7SUros Bizjak	pop %_ASM_AX
180199cd1d7SUros Bizjak
181199cd1d7SUros Bizjak	/* Save all guest registers.  */
182199cd1d7SUros Bizjak	mov %_ASM_CX,   VCPU_RCX(%_ASM_AX)
183199cd1d7SUros Bizjak	mov %_ASM_DX,   VCPU_RDX(%_ASM_AX)
184199cd1d7SUros Bizjak	mov %_ASM_BX,   VCPU_RBX(%_ASM_AX)
185199cd1d7SUros Bizjak	mov %_ASM_BP,   VCPU_RBP(%_ASM_AX)
186199cd1d7SUros Bizjak	mov %_ASM_SI,   VCPU_RSI(%_ASM_AX)
187199cd1d7SUros Bizjak	mov %_ASM_DI,   VCPU_RDI(%_ASM_AX)
188199cd1d7SUros Bizjak#ifdef CONFIG_X86_64
189199cd1d7SUros Bizjak	mov %r8,  VCPU_R8 (%_ASM_AX)
190199cd1d7SUros Bizjak	mov %r9,  VCPU_R9 (%_ASM_AX)
191199cd1d7SUros Bizjak	mov %r10, VCPU_R10(%_ASM_AX)
192199cd1d7SUros Bizjak	mov %r11, VCPU_R11(%_ASM_AX)
193199cd1d7SUros Bizjak	mov %r12, VCPU_R12(%_ASM_AX)
194199cd1d7SUros Bizjak	mov %r13, VCPU_R13(%_ASM_AX)
195199cd1d7SUros Bizjak	mov %r14, VCPU_R14(%_ASM_AX)
196199cd1d7SUros Bizjak	mov %r15, VCPU_R15(%_ASM_AX)
197199cd1d7SUros Bizjak#endif
198199cd1d7SUros Bizjak
199e61ab42dSPaolo Bonzini	/* @svm can stay in RDI from now on.  */
200e61ab42dSPaolo Bonzini	mov %_ASM_AX, %_ASM_DI
201e61ab42dSPaolo Bonzini
202e61ab42dSPaolo Bonzini	mov SVM_vmcb01_pa(%_ASM_DI), %_ASM_AX
203e61ab42dSPaolo Bonzini5:	vmsave %_ASM_AX
204e61ab42dSPaolo Bonzini6:
205e61ab42dSPaolo Bonzini
206e287bd00SPaolo Bonzini	/* Restores GSBASE among other things, allowing access to percpu data.  */
207e287bd00SPaolo Bonzini	pop %_ASM_AX
208e287bd00SPaolo Bonzini7:	vmload %_ASM_AX
209e287bd00SPaolo Bonzini8:
210e287bd00SPaolo Bonzini
211aefb2f2eSBreno Leitao#ifdef CONFIG_MITIGATION_RETPOLINE
212e61ab42dSPaolo Bonzini	/* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */
213e61ab42dSPaolo Bonzini	FILL_RETURN_BUFFER %_ASM_AX, RSB_CLEAR_LOOPS, X86_FEATURE_RETPOLINE
214e61ab42dSPaolo Bonzini#endif
215e61ab42dSPaolo Bonzini
2169f2febf3SPaolo Bonzini	/* Clobbers RAX, RCX, RDX.  */
2179f2febf3SPaolo Bonzini	RESTORE_HOST_SPEC_CTRL
2189f2febf3SPaolo Bonzini
219199cd1d7SUros Bizjak	/*
220a149180fSPeter Zijlstra	 * Mitigate RETBleed for AMD/Hygon Zen uarch. RET should be
221a149180fSPeter Zijlstra	 * untrained as soon as we exit the VM and are back to the
222a149180fSPeter Zijlstra	 * kernel. This should be done before re-enabling interrupts
223a149180fSPeter Zijlstra	 * because interrupt handlers won't sanitize 'ret' if the return is
224a149180fSPeter Zijlstra	 * from the kernel.
225a149180fSPeter Zijlstra	 */
226864bcaa3SPeter Zijlstra	UNTRAIN_RET_VM
227d893832dSBorislav Petkov (AMD)
228a149180fSPeter Zijlstra	/*
229199cd1d7SUros Bizjak	 * Clear all general purpose registers except RSP and RAX to prevent
230199cd1d7SUros Bizjak	 * speculative use of the guest's values, even those that are reloaded
231199cd1d7SUros Bizjak	 * via the stack.  In theory, an L1 cache miss when restoring registers
232199cd1d7SUros Bizjak	 * could lead to speculative execution with the guest's values.
233199cd1d7SUros Bizjak	 * Zeroing XORs are dirt cheap, i.e. the extra paranoia is essentially
234199cd1d7SUros Bizjak	 * free.  RSP and RAX are exempt as they are restored by hardware
235199cd1d7SUros Bizjak	 * during VM-Exit.
236199cd1d7SUros Bizjak	 */
237199cd1d7SUros Bizjak	xor %ecx, %ecx
238199cd1d7SUros Bizjak	xor %edx, %edx
239199cd1d7SUros Bizjak	xor %ebx, %ebx
240199cd1d7SUros Bizjak	xor %ebp, %ebp
241199cd1d7SUros Bizjak	xor %esi, %esi
242199cd1d7SUros Bizjak	xor %edi, %edi
243199cd1d7SUros Bizjak#ifdef CONFIG_X86_64
244199cd1d7SUros Bizjak	xor %r8d,  %r8d
245199cd1d7SUros Bizjak	xor %r9d,  %r9d
246199cd1d7SUros Bizjak	xor %r10d, %r10d
247199cd1d7SUros Bizjak	xor %r11d, %r11d
248199cd1d7SUros Bizjak	xor %r12d, %r12d
249199cd1d7SUros Bizjak	xor %r13d, %r13d
250199cd1d7SUros Bizjak	xor %r14d, %r14d
251199cd1d7SUros Bizjak	xor %r15d, %r15d
252199cd1d7SUros Bizjak#endif
253199cd1d7SUros Bizjak
2549f2febf3SPaolo Bonzini	/* "Pop" @spec_ctrl_intercepted.  */
2559f2febf3SPaolo Bonzini	pop %_ASM_BX
2569f2febf3SPaolo Bonzini
257199cd1d7SUros Bizjak	pop %_ASM_BX
258199cd1d7SUros Bizjak
259199cd1d7SUros Bizjak#ifdef CONFIG_X86_64
260199cd1d7SUros Bizjak	pop %r12
261199cd1d7SUros Bizjak	pop %r13
262199cd1d7SUros Bizjak	pop %r14
263199cd1d7SUros Bizjak	pop %r15
264199cd1d7SUros Bizjak#else
265199cd1d7SUros Bizjak	pop %esi
266199cd1d7SUros Bizjak	pop %edi
267199cd1d7SUros Bizjak#endif
268199cd1d7SUros Bizjak	pop %_ASM_BP
269f94909ceSPeter Zijlstra	RET
2707531b47cSUros Bizjak
2719f2febf3SPaolo Bonzini	RESTORE_GUEST_SPEC_CTRL_BODY
2729f2febf3SPaolo Bonzini	RESTORE_HOST_SPEC_CTRL_BODY
2739f2febf3SPaolo Bonzini
27415223c4fSUros Bizjak10:	cmpb $0, _ASM_RIP(kvm_rebooting)
2757531b47cSUros Bizjak	jne 2b
2767531b47cSUros Bizjak	ud2
27715223c4fSUros Bizjak30:	cmpb $0, _ASM_RIP(kvm_rebooting)
278e61ab42dSPaolo Bonzini	jne 4b
279e61ab42dSPaolo Bonzini	ud2
28015223c4fSUros Bizjak50:	cmpb $0, _ASM_RIP(kvm_rebooting)
281e61ab42dSPaolo Bonzini	jne 6b
282e61ab42dSPaolo Bonzini	ud2
28315223c4fSUros Bizjak70:	cmpb $0, _ASM_RIP(kvm_rebooting)
284e287bd00SPaolo Bonzini	jne 8b
285e287bd00SPaolo Bonzini	ud2
2867531b47cSUros Bizjak
287e61ab42dSPaolo Bonzini	_ASM_EXTABLE(1b, 10b)
288e61ab42dSPaolo Bonzini	_ASM_EXTABLE(3b, 30b)
289e61ab42dSPaolo Bonzini	_ASM_EXTABLE(5b, 50b)
290e287bd00SPaolo Bonzini	_ASM_EXTABLE(7b, 70b)
2917531b47cSUros Bizjak
292199cd1d7SUros BizjakSYM_FUNC_END(__svm_vcpu_run)
29316809ecdSTom Lendacky
2947774c8f3SSean Christopherson#ifdef CONFIG_KVM_AMD_SEV
29516809ecdSTom Lendacky/**
29616809ecdSTom Lendacky * __svm_sev_es_vcpu_run - Run a SEV-ES vCPU via a transition to SVM guest mode
297f6d58266SPaolo Bonzini * @svm:	struct vcpu_svm *
2989f2febf3SPaolo Bonzini * @spec_ctrl_intercepted: bool
29916809ecdSTom Lendacky */
30016809ecdSTom LendackySYM_FUNC_START(__svm_sev_es_vcpu_run)
301*331282fdSSean Christopherson	push %rbp
30216809ecdSTom Lendacky	push %r15
30316809ecdSTom Lendacky	push %r14
30416809ecdSTom Lendacky	push %r13
30516809ecdSTom Lendacky	push %r12
306*331282fdSSean Christopherson	push %rbx
30716809ecdSTom Lendacky
3089f2febf3SPaolo Bonzini	/*
3099f2febf3SPaolo Bonzini	 * Save variables needed after vmexit on the stack, in inverse
3109f2febf3SPaolo Bonzini	 * order compared to when they are needed.
3119f2febf3SPaolo Bonzini	 */
3129f2febf3SPaolo Bonzini
3139f2febf3SPaolo Bonzini	/* Accessed directly from the stack in RESTORE_HOST_SPEC_CTRL.  */
314*331282fdSSean Christopherson	push %rsi
3159f2febf3SPaolo Bonzini
3169f2febf3SPaolo Bonzini	/* Save @svm. */
317*331282fdSSean Christopherson	push %rdi
3189f2febf3SPaolo Bonzini
3199f2febf3SPaolo Bonzini	/* Clobbers RAX, RCX, RDX.  */
3209f2febf3SPaolo Bonzini	RESTORE_GUEST_SPEC_CTRL
3219f2febf3SPaolo Bonzini
322f6d58266SPaolo Bonzini	/* Get svm->current_vmcb->pa into RAX. */
323*331282fdSSean Christopherson	mov SVM_current_vmcb(%rdi), %rax
324*331282fdSSean Christopherson	mov KVM_VMCB_pa(%rax), %rax
3257531b47cSUros Bizjak
3267531b47cSUros Bizjak	/* Enter guest mode */
32716809ecdSTom Lendacky	sti
32816809ecdSTom Lendacky
329*331282fdSSean Christopherson1:	vmrun %rax
33016809ecdSTom Lendacky
3317531b47cSUros Bizjak2:	cli
33216809ecdSTom Lendacky
3339f2febf3SPaolo Bonzini	/* Pop @svm to RDI, guest registers have been saved already. */
334*331282fdSSean Christopherson	pop %rdi
3359f2febf3SPaolo Bonzini
336aefb2f2eSBreno Leitao#ifdef CONFIG_MITIGATION_RETPOLINE
33716809ecdSTom Lendacky	/* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */
338*331282fdSSean Christopherson	FILL_RETURN_BUFFER %rax, RSB_CLEAR_LOOPS, X86_FEATURE_RETPOLINE
33916809ecdSTom Lendacky#endif
34016809ecdSTom Lendacky
3419f2febf3SPaolo Bonzini	/* Clobbers RAX, RCX, RDX.  */
3429f2febf3SPaolo Bonzini	RESTORE_HOST_SPEC_CTRL
3439f2febf3SPaolo Bonzini
344a149180fSPeter Zijlstra	/*
345a149180fSPeter Zijlstra	 * Mitigate RETBleed for AMD/Hygon Zen uarch. RET should be
346a149180fSPeter Zijlstra	 * untrained as soon as we exit the VM and are back to the
347a149180fSPeter Zijlstra	 * kernel. This should be done before re-enabling interrupts
348a149180fSPeter Zijlstra	 * because interrupt handlers won't sanitize RET if the return is
349a149180fSPeter Zijlstra	 * from the kernel.
350a149180fSPeter Zijlstra	 */
351864bcaa3SPeter Zijlstra	UNTRAIN_RET_VM
352a149180fSPeter Zijlstra
3539f2febf3SPaolo Bonzini	/* "Pop" @spec_ctrl_intercepted.  */
354*331282fdSSean Christopherson	pop %rbx
3559f2febf3SPaolo Bonzini
356*331282fdSSean Christopherson	pop %rbx
35716809ecdSTom Lendacky
35816809ecdSTom Lendacky	pop %r12
35916809ecdSTom Lendacky	pop %r13
36016809ecdSTom Lendacky	pop %r14
36116809ecdSTom Lendacky	pop %r15
362*331282fdSSean Christopherson	pop %rbp
363f94909ceSPeter Zijlstra	RET
3647531b47cSUros Bizjak
3659f2febf3SPaolo Bonzini	RESTORE_GUEST_SPEC_CTRL_BODY
3669f2febf3SPaolo Bonzini	RESTORE_HOST_SPEC_CTRL_BODY
3679f2febf3SPaolo Bonzini
368*331282fdSSean Christopherson3:	cmpb $0, kvm_rebooting(%rip)
3697531b47cSUros Bizjak	jne 2b
3707531b47cSUros Bizjak	ud2
3717531b47cSUros Bizjak
3727531b47cSUros Bizjak	_ASM_EXTABLE(1b, 3b)
3737531b47cSUros Bizjak
37416809ecdSTom LendackySYM_FUNC_END(__svm_sev_es_vcpu_run)
3757774c8f3SSean Christopherson#endif /* CONFIG_KVM_AMD_SEV */
376