xref: /linux/arch/x86/kvm/svm/vmenter.S (revision 9f2febf3f04daebdaaa5a43cfa20e3844905c0f9)
1199cd1d7SUros Bizjak/* SPDX-License-Identifier: GPL-2.0 */
2199cd1d7SUros Bizjak#include <linux/linkage.h>
3199cd1d7SUros Bizjak#include <asm/asm.h>
4199cd1d7SUros Bizjak#include <asm/bitsperlong.h>
5199cd1d7SUros Bizjak#include <asm/kvm_vcpu_regs.h>
6f14eec0aSPaolo Bonzini#include <asm/nospec-branch.h>
716fdc1deSPaolo Bonzini#include "kvm-asm-offsets.h"
8199cd1d7SUros Bizjak
9199cd1d7SUros Bizjak#define WORD_SIZE (BITS_PER_LONG / 8)
10199cd1d7SUros Bizjak
11199cd1d7SUros Bizjak/* Intentionally omit RAX as it's context switched by hardware */
1216fdc1deSPaolo Bonzini#define VCPU_RCX	(SVM_vcpu_arch_regs + __VCPU_REGS_RCX * WORD_SIZE)
1316fdc1deSPaolo Bonzini#define VCPU_RDX	(SVM_vcpu_arch_regs + __VCPU_REGS_RDX * WORD_SIZE)
1416fdc1deSPaolo Bonzini#define VCPU_RBX	(SVM_vcpu_arch_regs + __VCPU_REGS_RBX * WORD_SIZE)
15199cd1d7SUros Bizjak/* Intentionally omit RSP as it's context switched by hardware */
1616fdc1deSPaolo Bonzini#define VCPU_RBP	(SVM_vcpu_arch_regs + __VCPU_REGS_RBP * WORD_SIZE)
1716fdc1deSPaolo Bonzini#define VCPU_RSI	(SVM_vcpu_arch_regs + __VCPU_REGS_RSI * WORD_SIZE)
1816fdc1deSPaolo Bonzini#define VCPU_RDI	(SVM_vcpu_arch_regs + __VCPU_REGS_RDI * WORD_SIZE)
19199cd1d7SUros Bizjak
20199cd1d7SUros Bizjak#ifdef CONFIG_X86_64
2116fdc1deSPaolo Bonzini#define VCPU_R8		(SVM_vcpu_arch_regs + __VCPU_REGS_R8  * WORD_SIZE)
2216fdc1deSPaolo Bonzini#define VCPU_R9		(SVM_vcpu_arch_regs + __VCPU_REGS_R9  * WORD_SIZE)
2316fdc1deSPaolo Bonzini#define VCPU_R10	(SVM_vcpu_arch_regs + __VCPU_REGS_R10 * WORD_SIZE)
2416fdc1deSPaolo Bonzini#define VCPU_R11	(SVM_vcpu_arch_regs + __VCPU_REGS_R11 * WORD_SIZE)
2516fdc1deSPaolo Bonzini#define VCPU_R12	(SVM_vcpu_arch_regs + __VCPU_REGS_R12 * WORD_SIZE)
2616fdc1deSPaolo Bonzini#define VCPU_R13	(SVM_vcpu_arch_regs + __VCPU_REGS_R13 * WORD_SIZE)
2716fdc1deSPaolo Bonzini#define VCPU_R14	(SVM_vcpu_arch_regs + __VCPU_REGS_R14 * WORD_SIZE)
2816fdc1deSPaolo Bonzini#define VCPU_R15	(SVM_vcpu_arch_regs + __VCPU_REGS_R15 * WORD_SIZE)
29199cd1d7SUros Bizjak#endif
30199cd1d7SUros Bizjak
31e61ab42dSPaolo Bonzini#define SVM_vmcb01_pa	(SVM_vmcb01 + KVM_VMCB_pa)
32e61ab42dSPaolo Bonzini
33135961e0SThomas Gleixner.section .noinstr.text, "ax"
34199cd1d7SUros Bizjak
35*9f2febf3SPaolo Bonzini.macro RESTORE_GUEST_SPEC_CTRL
36*9f2febf3SPaolo Bonzini	/* No need to do anything if SPEC_CTRL is unset or V_SPEC_CTRL is set */
37*9f2febf3SPaolo Bonzini	ALTERNATIVE_2 "", \
38*9f2febf3SPaolo Bonzini		"jmp 800f", X86_FEATURE_MSR_SPEC_CTRL, \
39*9f2febf3SPaolo Bonzini		"", X86_FEATURE_V_SPEC_CTRL
40*9f2febf3SPaolo Bonzini801:
41*9f2febf3SPaolo Bonzini.endm
42*9f2febf3SPaolo Bonzini.macro RESTORE_GUEST_SPEC_CTRL_BODY
43*9f2febf3SPaolo Bonzini800:
44*9f2febf3SPaolo Bonzini	/*
45*9f2febf3SPaolo Bonzini	 * SPEC_CTRL handling: if the guest's SPEC_CTRL value differs from the
46*9f2febf3SPaolo Bonzini	 * host's, write the MSR.  This is kept out-of-line so that the common
47*9f2febf3SPaolo Bonzini	 * case does not have to jump.
48*9f2febf3SPaolo Bonzini	 *
49*9f2febf3SPaolo Bonzini	 * IMPORTANT: To avoid RSB underflow attacks and any other nastiness,
50*9f2febf3SPaolo Bonzini	 * there must not be any returns or indirect branches between this code
51*9f2febf3SPaolo Bonzini	 * and vmentry.
52*9f2febf3SPaolo Bonzini	 */
53*9f2febf3SPaolo Bonzini	movl SVM_spec_ctrl(%_ASM_DI), %eax
54*9f2febf3SPaolo Bonzini	cmp PER_CPU_VAR(x86_spec_ctrl_current), %eax
55*9f2febf3SPaolo Bonzini	je 801b
56*9f2febf3SPaolo Bonzini	mov $MSR_IA32_SPEC_CTRL, %ecx
57*9f2febf3SPaolo Bonzini	xor %edx, %edx
58*9f2febf3SPaolo Bonzini	wrmsr
59*9f2febf3SPaolo Bonzini	jmp 801b
60*9f2febf3SPaolo Bonzini.endm
61*9f2febf3SPaolo Bonzini
62*9f2febf3SPaolo Bonzini.macro RESTORE_HOST_SPEC_CTRL
63*9f2febf3SPaolo Bonzini	/* No need to do anything if SPEC_CTRL is unset or V_SPEC_CTRL is set */
64*9f2febf3SPaolo Bonzini	ALTERNATIVE_2 "", \
65*9f2febf3SPaolo Bonzini		"jmp 900f", X86_FEATURE_MSR_SPEC_CTRL, \
66*9f2febf3SPaolo Bonzini		"", X86_FEATURE_V_SPEC_CTRL
67*9f2febf3SPaolo Bonzini901:
68*9f2febf3SPaolo Bonzini.endm
69*9f2febf3SPaolo Bonzini.macro RESTORE_HOST_SPEC_CTRL_BODY
70*9f2febf3SPaolo Bonzini900:
71*9f2febf3SPaolo Bonzini	/* Same for after vmexit.  */
72*9f2febf3SPaolo Bonzini	mov $MSR_IA32_SPEC_CTRL, %ecx
73*9f2febf3SPaolo Bonzini
74*9f2febf3SPaolo Bonzini	/*
75*9f2febf3SPaolo Bonzini	 * Load the value that the guest had written into MSR_IA32_SPEC_CTRL,
76*9f2febf3SPaolo Bonzini	 * if it was not intercepted during guest execution.
77*9f2febf3SPaolo Bonzini	 */
78*9f2febf3SPaolo Bonzini	cmpb $0, (%_ASM_SP)
79*9f2febf3SPaolo Bonzini	jnz 998f
80*9f2febf3SPaolo Bonzini	rdmsr
81*9f2febf3SPaolo Bonzini	movl %eax, SVM_spec_ctrl(%_ASM_DI)
82*9f2febf3SPaolo Bonzini998:
83*9f2febf3SPaolo Bonzini
84*9f2febf3SPaolo Bonzini	/* Now restore the host value of the MSR if different from the guest's.  */
85*9f2febf3SPaolo Bonzini	movl PER_CPU_VAR(x86_spec_ctrl_current), %eax
86*9f2febf3SPaolo Bonzini	cmp SVM_spec_ctrl(%_ASM_DI), %eax
87*9f2febf3SPaolo Bonzini	je 901b
88*9f2febf3SPaolo Bonzini	xor %edx, %edx
89*9f2febf3SPaolo Bonzini	wrmsr
90*9f2febf3SPaolo Bonzini	jmp 901b
91*9f2febf3SPaolo Bonzini.endm
92*9f2febf3SPaolo Bonzini
93*9f2febf3SPaolo Bonzini
94199cd1d7SUros Bizjak/**
95199cd1d7SUros Bizjak * __svm_vcpu_run - Run a vCPU via a transition to SVM guest mode
9616fdc1deSPaolo Bonzini * @svm:	struct vcpu_svm *
97*9f2febf3SPaolo Bonzini * @spec_ctrl_intercepted: bool
98199cd1d7SUros Bizjak */
99199cd1d7SUros BizjakSYM_FUNC_START(__svm_vcpu_run)
100199cd1d7SUros Bizjak	push %_ASM_BP
101199cd1d7SUros Bizjak#ifdef CONFIG_X86_64
102199cd1d7SUros Bizjak	push %r15
103199cd1d7SUros Bizjak	push %r14
104199cd1d7SUros Bizjak	push %r13
105199cd1d7SUros Bizjak	push %r12
106199cd1d7SUros Bizjak#else
107199cd1d7SUros Bizjak	push %edi
108199cd1d7SUros Bizjak	push %esi
109199cd1d7SUros Bizjak#endif
110199cd1d7SUros Bizjak	push %_ASM_BX
111199cd1d7SUros Bizjak
112e287bd00SPaolo Bonzini	/*
113e287bd00SPaolo Bonzini	 * Save variables needed after vmexit on the stack, in inverse
114e287bd00SPaolo Bonzini	 * order compared to when they are needed.
115e287bd00SPaolo Bonzini	 */
116e287bd00SPaolo Bonzini
117*9f2febf3SPaolo Bonzini	/* Accessed directly from the stack in RESTORE_HOST_SPEC_CTRL.  */
118*9f2febf3SPaolo Bonzini	push %_ASM_ARG2
119*9f2febf3SPaolo Bonzini
120e287bd00SPaolo Bonzini	/* Needed to restore access to percpu variables.  */
121e287bd00SPaolo Bonzini	__ASM_SIZE(push) PER_CPU_VAR(svm_data + SD_save_area_pa)
122e287bd00SPaolo Bonzini
123*9f2febf3SPaolo Bonzini	/* Finally save @svm. */
124199cd1d7SUros Bizjak	push %_ASM_ARG1
125199cd1d7SUros Bizjak
126f6d58266SPaolo Bonzini.ifnc _ASM_ARG1, _ASM_DI
127*9f2febf3SPaolo Bonzini	/*
128*9f2febf3SPaolo Bonzini	 * Stash @svm in RDI early. On 32-bit, arguments are in RAX, RCX
129*9f2febf3SPaolo Bonzini	 * and RDX which are clobbered by RESTORE_GUEST_SPEC_CTRL.
130*9f2febf3SPaolo Bonzini	 */
131f6d58266SPaolo Bonzini	mov %_ASM_ARG1, %_ASM_DI
132f6d58266SPaolo Bonzini.endif
133199cd1d7SUros Bizjak
134*9f2febf3SPaolo Bonzini	/* Clobbers RAX, RCX, RDX.  */
135*9f2febf3SPaolo Bonzini	RESTORE_GUEST_SPEC_CTRL
136*9f2febf3SPaolo Bonzini
137e61ab42dSPaolo Bonzini	/*
138e61ab42dSPaolo Bonzini	 * Use a single vmcb (vmcb01 because it's always valid) for
139e61ab42dSPaolo Bonzini	 * context switching guest state via VMLOAD/VMSAVE, that way
140e61ab42dSPaolo Bonzini	 * the state doesn't need to be copied between vmcb01 and
141e61ab42dSPaolo Bonzini	 * vmcb02 when switching vmcbs for nested virtualization.
142e61ab42dSPaolo Bonzini	 */
143e61ab42dSPaolo Bonzini	mov SVM_vmcb01_pa(%_ASM_DI), %_ASM_AX
144e61ab42dSPaolo Bonzini1:	vmload %_ASM_AX
145e61ab42dSPaolo Bonzini2:
146e61ab42dSPaolo Bonzini
147f6d58266SPaolo Bonzini	/* Get svm->current_vmcb->pa into RAX. */
148f6d58266SPaolo Bonzini	mov SVM_current_vmcb(%_ASM_DI), %_ASM_AX
149f6d58266SPaolo Bonzini	mov KVM_VMCB_pa(%_ASM_AX), %_ASM_AX
150199cd1d7SUros Bizjak
151f7ef2801SPaolo Bonzini	/* Load guest registers. */
152f7ef2801SPaolo Bonzini	mov VCPU_RCX(%_ASM_DI), %_ASM_CX
153f7ef2801SPaolo Bonzini	mov VCPU_RDX(%_ASM_DI), %_ASM_DX
154f7ef2801SPaolo Bonzini	mov VCPU_RBX(%_ASM_DI), %_ASM_BX
155f7ef2801SPaolo Bonzini	mov VCPU_RBP(%_ASM_DI), %_ASM_BP
156f7ef2801SPaolo Bonzini	mov VCPU_RSI(%_ASM_DI), %_ASM_SI
157f7ef2801SPaolo Bonzini#ifdef CONFIG_X86_64
158f7ef2801SPaolo Bonzini	mov VCPU_R8 (%_ASM_DI),  %r8
159f7ef2801SPaolo Bonzini	mov VCPU_R9 (%_ASM_DI),  %r9
160f7ef2801SPaolo Bonzini	mov VCPU_R10(%_ASM_DI), %r10
161f7ef2801SPaolo Bonzini	mov VCPU_R11(%_ASM_DI), %r11
162f7ef2801SPaolo Bonzini	mov VCPU_R12(%_ASM_DI), %r12
163f7ef2801SPaolo Bonzini	mov VCPU_R13(%_ASM_DI), %r13
164f7ef2801SPaolo Bonzini	mov VCPU_R14(%_ASM_DI), %r14
165f7ef2801SPaolo Bonzini	mov VCPU_R15(%_ASM_DI), %r15
166f7ef2801SPaolo Bonzini#endif
167f7ef2801SPaolo Bonzini	mov VCPU_RDI(%_ASM_DI), %_ASM_DI
168f7ef2801SPaolo Bonzini
169199cd1d7SUros Bizjak	/* Enter guest mode */
170f14eec0aSPaolo Bonzini	sti
171199cd1d7SUros Bizjak
172e61ab42dSPaolo Bonzini3:	vmrun %_ASM_AX
173e61ab42dSPaolo Bonzini4:
174e61ab42dSPaolo Bonzini	cli
175199cd1d7SUros Bizjak
176e61ab42dSPaolo Bonzini	/* Pop @svm to RAX while it's the only available register. */
177199cd1d7SUros Bizjak	pop %_ASM_AX
178199cd1d7SUros Bizjak
179199cd1d7SUros Bizjak	/* Save all guest registers.  */
180199cd1d7SUros Bizjak	mov %_ASM_CX,   VCPU_RCX(%_ASM_AX)
181199cd1d7SUros Bizjak	mov %_ASM_DX,   VCPU_RDX(%_ASM_AX)
182199cd1d7SUros Bizjak	mov %_ASM_BX,   VCPU_RBX(%_ASM_AX)
183199cd1d7SUros Bizjak	mov %_ASM_BP,   VCPU_RBP(%_ASM_AX)
184199cd1d7SUros Bizjak	mov %_ASM_SI,   VCPU_RSI(%_ASM_AX)
185199cd1d7SUros Bizjak	mov %_ASM_DI,   VCPU_RDI(%_ASM_AX)
186199cd1d7SUros Bizjak#ifdef CONFIG_X86_64
187199cd1d7SUros Bizjak	mov %r8,  VCPU_R8 (%_ASM_AX)
188199cd1d7SUros Bizjak	mov %r9,  VCPU_R9 (%_ASM_AX)
189199cd1d7SUros Bizjak	mov %r10, VCPU_R10(%_ASM_AX)
190199cd1d7SUros Bizjak	mov %r11, VCPU_R11(%_ASM_AX)
191199cd1d7SUros Bizjak	mov %r12, VCPU_R12(%_ASM_AX)
192199cd1d7SUros Bizjak	mov %r13, VCPU_R13(%_ASM_AX)
193199cd1d7SUros Bizjak	mov %r14, VCPU_R14(%_ASM_AX)
194199cd1d7SUros Bizjak	mov %r15, VCPU_R15(%_ASM_AX)
195199cd1d7SUros Bizjak#endif
196199cd1d7SUros Bizjak
197e61ab42dSPaolo Bonzini	/* @svm can stay in RDI from now on.  */
198e61ab42dSPaolo Bonzini	mov %_ASM_AX, %_ASM_DI
199e61ab42dSPaolo Bonzini
200e61ab42dSPaolo Bonzini	mov SVM_vmcb01_pa(%_ASM_DI), %_ASM_AX
201e61ab42dSPaolo Bonzini5:	vmsave %_ASM_AX
202e61ab42dSPaolo Bonzini6:
203e61ab42dSPaolo Bonzini
204e287bd00SPaolo Bonzini	/* Restores GSBASE among other things, allowing access to percpu data.  */
205e287bd00SPaolo Bonzini	pop %_ASM_AX
206e287bd00SPaolo Bonzini7:	vmload %_ASM_AX
207e287bd00SPaolo Bonzini8:
208e287bd00SPaolo Bonzini
209e61ab42dSPaolo Bonzini#ifdef CONFIG_RETPOLINE
210e61ab42dSPaolo Bonzini	/* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */
211e61ab42dSPaolo Bonzini	FILL_RETURN_BUFFER %_ASM_AX, RSB_CLEAR_LOOPS, X86_FEATURE_RETPOLINE
212e61ab42dSPaolo Bonzini#endif
213e61ab42dSPaolo Bonzini
214*9f2febf3SPaolo Bonzini	/* Clobbers RAX, RCX, RDX.  */
215*9f2febf3SPaolo Bonzini	RESTORE_HOST_SPEC_CTRL
216*9f2febf3SPaolo Bonzini
217199cd1d7SUros Bizjak	/*
218a149180fSPeter Zijlstra	 * Mitigate RETBleed for AMD/Hygon Zen uarch. RET should be
219a149180fSPeter Zijlstra	 * untrained as soon as we exit the VM and are back to the
220a149180fSPeter Zijlstra	 * kernel. This should be done before re-enabling interrupts
221a149180fSPeter Zijlstra	 * because interrupt handlers won't sanitize 'ret' if the return is
222a149180fSPeter Zijlstra	 * from the kernel.
223a149180fSPeter Zijlstra	 */
224a149180fSPeter Zijlstra	UNTRAIN_RET
225a149180fSPeter Zijlstra
226a149180fSPeter Zijlstra	/*
227199cd1d7SUros Bizjak	 * Clear all general purpose registers except RSP and RAX to prevent
228199cd1d7SUros Bizjak	 * speculative use of the guest's values, even those that are reloaded
229199cd1d7SUros Bizjak	 * via the stack.  In theory, an L1 cache miss when restoring registers
230199cd1d7SUros Bizjak	 * could lead to speculative execution with the guest's values.
231199cd1d7SUros Bizjak	 * Zeroing XORs are dirt cheap, i.e. the extra paranoia is essentially
232199cd1d7SUros Bizjak	 * free.  RSP and RAX are exempt as they are restored by hardware
233199cd1d7SUros Bizjak	 * during VM-Exit.
234199cd1d7SUros Bizjak	 */
235199cd1d7SUros Bizjak	xor %ecx, %ecx
236199cd1d7SUros Bizjak	xor %edx, %edx
237199cd1d7SUros Bizjak	xor %ebx, %ebx
238199cd1d7SUros Bizjak	xor %ebp, %ebp
239199cd1d7SUros Bizjak	xor %esi, %esi
240199cd1d7SUros Bizjak	xor %edi, %edi
241199cd1d7SUros Bizjak#ifdef CONFIG_X86_64
242199cd1d7SUros Bizjak	xor %r8d,  %r8d
243199cd1d7SUros Bizjak	xor %r9d,  %r9d
244199cd1d7SUros Bizjak	xor %r10d, %r10d
245199cd1d7SUros Bizjak	xor %r11d, %r11d
246199cd1d7SUros Bizjak	xor %r12d, %r12d
247199cd1d7SUros Bizjak	xor %r13d, %r13d
248199cd1d7SUros Bizjak	xor %r14d, %r14d
249199cd1d7SUros Bizjak	xor %r15d, %r15d
250199cd1d7SUros Bizjak#endif
251199cd1d7SUros Bizjak
252*9f2febf3SPaolo Bonzini	/* "Pop" @spec_ctrl_intercepted.  */
253*9f2febf3SPaolo Bonzini	pop %_ASM_BX
254*9f2febf3SPaolo Bonzini
255199cd1d7SUros Bizjak	pop %_ASM_BX
256199cd1d7SUros Bizjak
257199cd1d7SUros Bizjak#ifdef CONFIG_X86_64
258199cd1d7SUros Bizjak	pop %r12
259199cd1d7SUros Bizjak	pop %r13
260199cd1d7SUros Bizjak	pop %r14
261199cd1d7SUros Bizjak	pop %r15
262199cd1d7SUros Bizjak#else
263199cd1d7SUros Bizjak	pop %esi
264199cd1d7SUros Bizjak	pop %edi
265199cd1d7SUros Bizjak#endif
266199cd1d7SUros Bizjak	pop %_ASM_BP
267f94909ceSPeter Zijlstra	RET
2687531b47cSUros Bizjak
269*9f2febf3SPaolo Bonzini	RESTORE_GUEST_SPEC_CTRL_BODY
270*9f2febf3SPaolo Bonzini	RESTORE_HOST_SPEC_CTRL_BODY
271*9f2febf3SPaolo Bonzini
272e61ab42dSPaolo Bonzini10:	cmpb $0, kvm_rebooting
2737531b47cSUros Bizjak	jne 2b
2747531b47cSUros Bizjak	ud2
275e61ab42dSPaolo Bonzini30:	cmpb $0, kvm_rebooting
276e61ab42dSPaolo Bonzini	jne 4b
277e61ab42dSPaolo Bonzini	ud2
278e61ab42dSPaolo Bonzini50:	cmpb $0, kvm_rebooting
279e61ab42dSPaolo Bonzini	jne 6b
280e61ab42dSPaolo Bonzini	ud2
281e287bd00SPaolo Bonzini70:	cmpb $0, kvm_rebooting
282e287bd00SPaolo Bonzini	jne 8b
283e287bd00SPaolo Bonzini	ud2
2847531b47cSUros Bizjak
285e61ab42dSPaolo Bonzini	_ASM_EXTABLE(1b, 10b)
286e61ab42dSPaolo Bonzini	_ASM_EXTABLE(3b, 30b)
287e61ab42dSPaolo Bonzini	_ASM_EXTABLE(5b, 50b)
288e287bd00SPaolo Bonzini	_ASM_EXTABLE(7b, 70b)
2897531b47cSUros Bizjak
290199cd1d7SUros BizjakSYM_FUNC_END(__svm_vcpu_run)
29116809ecdSTom Lendacky
29216809ecdSTom Lendacky/**
29316809ecdSTom Lendacky * __svm_sev_es_vcpu_run - Run a SEV-ES vCPU via a transition to SVM guest mode
294f6d58266SPaolo Bonzini * @svm:	struct vcpu_svm *
295*9f2febf3SPaolo Bonzini * @spec_ctrl_intercepted: bool
29616809ecdSTom Lendacky */
29716809ecdSTom LendackySYM_FUNC_START(__svm_sev_es_vcpu_run)
29816809ecdSTom Lendacky	push %_ASM_BP
29916809ecdSTom Lendacky#ifdef CONFIG_X86_64
30016809ecdSTom Lendacky	push %r15
30116809ecdSTom Lendacky	push %r14
30216809ecdSTom Lendacky	push %r13
30316809ecdSTom Lendacky	push %r12
30416809ecdSTom Lendacky#else
30516809ecdSTom Lendacky	push %edi
30616809ecdSTom Lendacky	push %esi
30716809ecdSTom Lendacky#endif
30816809ecdSTom Lendacky	push %_ASM_BX
30916809ecdSTom Lendacky
310*9f2febf3SPaolo Bonzini	/*
311*9f2febf3SPaolo Bonzini	 * Save variables needed after vmexit on the stack, in inverse
312*9f2febf3SPaolo Bonzini	 * order compared to when they are needed.
313*9f2febf3SPaolo Bonzini	 */
314*9f2febf3SPaolo Bonzini
315*9f2febf3SPaolo Bonzini	/* Accessed directly from the stack in RESTORE_HOST_SPEC_CTRL.  */
316*9f2febf3SPaolo Bonzini	push %_ASM_ARG2
317*9f2febf3SPaolo Bonzini
318*9f2febf3SPaolo Bonzini	/* Save @svm. */
319*9f2febf3SPaolo Bonzini	push %_ASM_ARG1
320*9f2febf3SPaolo Bonzini
321*9f2febf3SPaolo Bonzini.ifnc _ASM_ARG1, _ASM_DI
322*9f2febf3SPaolo Bonzini	/*
323*9f2febf3SPaolo Bonzini	 * Stash @svm in RDI early. On 32-bit, arguments are in RAX, RCX
324*9f2febf3SPaolo Bonzini	 * and RDX which are clobbered by RESTORE_GUEST_SPEC_CTRL.
325*9f2febf3SPaolo Bonzini	 */
326*9f2febf3SPaolo Bonzini	mov %_ASM_ARG1, %_ASM_DI
327*9f2febf3SPaolo Bonzini.endif
328*9f2febf3SPaolo Bonzini
329*9f2febf3SPaolo Bonzini	/* Clobbers RAX, RCX, RDX.  */
330*9f2febf3SPaolo Bonzini	RESTORE_GUEST_SPEC_CTRL
331*9f2febf3SPaolo Bonzini
332f6d58266SPaolo Bonzini	/* Get svm->current_vmcb->pa into RAX. */
333*9f2febf3SPaolo Bonzini	mov SVM_current_vmcb(%_ASM_DI), %_ASM_AX
334f6d58266SPaolo Bonzini	mov KVM_VMCB_pa(%_ASM_AX), %_ASM_AX
3357531b47cSUros Bizjak
3367531b47cSUros Bizjak	/* Enter guest mode */
33716809ecdSTom Lendacky	sti
33816809ecdSTom Lendacky
33916809ecdSTom Lendacky1:	vmrun %_ASM_AX
34016809ecdSTom Lendacky
3417531b47cSUros Bizjak2:	cli
34216809ecdSTom Lendacky
343*9f2febf3SPaolo Bonzini	/* Pop @svm to RDI, guest registers have been saved already. */
344*9f2febf3SPaolo Bonzini	pop %_ASM_DI
345*9f2febf3SPaolo Bonzini
34616809ecdSTom Lendacky#ifdef CONFIG_RETPOLINE
34716809ecdSTom Lendacky	/* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */
34816809ecdSTom Lendacky	FILL_RETURN_BUFFER %_ASM_AX, RSB_CLEAR_LOOPS, X86_FEATURE_RETPOLINE
34916809ecdSTom Lendacky#endif
35016809ecdSTom Lendacky
351*9f2febf3SPaolo Bonzini	/* Clobbers RAX, RCX, RDX.  */
352*9f2febf3SPaolo Bonzini	RESTORE_HOST_SPEC_CTRL
353*9f2febf3SPaolo Bonzini
354a149180fSPeter Zijlstra	/*
355a149180fSPeter Zijlstra	 * Mitigate RETBleed for AMD/Hygon Zen uarch. RET should be
356a149180fSPeter Zijlstra	 * untrained as soon as we exit the VM and are back to the
357a149180fSPeter Zijlstra	 * kernel. This should be done before re-enabling interrupts
358a149180fSPeter Zijlstra	 * because interrupt handlers won't sanitize RET if the return is
359a149180fSPeter Zijlstra	 * from the kernel.
360a149180fSPeter Zijlstra	 */
361a149180fSPeter Zijlstra	UNTRAIN_RET
362a149180fSPeter Zijlstra
363*9f2febf3SPaolo Bonzini	/* "Pop" @spec_ctrl_intercepted.  */
364*9f2febf3SPaolo Bonzini	pop %_ASM_BX
365*9f2febf3SPaolo Bonzini
36616809ecdSTom Lendacky	pop %_ASM_BX
36716809ecdSTom Lendacky
36816809ecdSTom Lendacky#ifdef CONFIG_X86_64
36916809ecdSTom Lendacky	pop %r12
37016809ecdSTom Lendacky	pop %r13
37116809ecdSTom Lendacky	pop %r14
37216809ecdSTom Lendacky	pop %r15
37316809ecdSTom Lendacky#else
37416809ecdSTom Lendacky	pop %esi
37516809ecdSTom Lendacky	pop %edi
37616809ecdSTom Lendacky#endif
37716809ecdSTom Lendacky	pop %_ASM_BP
378f94909ceSPeter Zijlstra	RET
3797531b47cSUros Bizjak
380*9f2febf3SPaolo Bonzini	RESTORE_GUEST_SPEC_CTRL_BODY
381*9f2febf3SPaolo Bonzini	RESTORE_HOST_SPEC_CTRL_BODY
382*9f2febf3SPaolo Bonzini
3837531b47cSUros Bizjak3:	cmpb $0, kvm_rebooting
3847531b47cSUros Bizjak	jne 2b
3857531b47cSUros Bizjak	ud2
3867531b47cSUros Bizjak
3877531b47cSUros Bizjak	_ASM_EXTABLE(1b, 3b)
3887531b47cSUros Bizjak
38916809ecdSTom LendackySYM_FUNC_END(__svm_sev_es_vcpu_run)
390