xref: /linux/arch/x86/kvm/svm/vmenter.S (revision 4367a75887ec8d68932cd84ea9cffe24d7a55fa0)
1199cd1d7SUros Bizjak/* SPDX-License-Identifier: GPL-2.0 */
2199cd1d7SUros Bizjak#include <linux/linkage.h>
3199cd1d7SUros Bizjak#include <asm/asm.h>
45d821386SThomas Gleixner#include <asm/asm-offsets.h>
5199cd1d7SUros Bizjak#include <asm/bitsperlong.h>
6*4367a758SSean Christopherson#include <asm/frame.h>
7199cd1d7SUros Bizjak#include <asm/kvm_vcpu_regs.h>
8f14eec0aSPaolo Bonzini#include <asm/nospec-branch.h>
916fdc1deSPaolo Bonzini#include "kvm-asm-offsets.h"
10199cd1d7SUros Bizjak
11199cd1d7SUros Bizjak#define WORD_SIZE (BITS_PER_LONG / 8)
12199cd1d7SUros Bizjak
13199cd1d7SUros Bizjak/* Intentionally omit RAX as it's context switched by hardware */
1416fdc1deSPaolo Bonzini#define VCPU_RCX	(SVM_vcpu_arch_regs + __VCPU_REGS_RCX * WORD_SIZE)
1516fdc1deSPaolo Bonzini#define VCPU_RDX	(SVM_vcpu_arch_regs + __VCPU_REGS_RDX * WORD_SIZE)
1616fdc1deSPaolo Bonzini#define VCPU_RBX	(SVM_vcpu_arch_regs + __VCPU_REGS_RBX * WORD_SIZE)
17199cd1d7SUros Bizjak/* Intentionally omit RSP as it's context switched by hardware */
1816fdc1deSPaolo Bonzini#define VCPU_RBP	(SVM_vcpu_arch_regs + __VCPU_REGS_RBP * WORD_SIZE)
1916fdc1deSPaolo Bonzini#define VCPU_RSI	(SVM_vcpu_arch_regs + __VCPU_REGS_RSI * WORD_SIZE)
2016fdc1deSPaolo Bonzini#define VCPU_RDI	(SVM_vcpu_arch_regs + __VCPU_REGS_RDI * WORD_SIZE)
21199cd1d7SUros Bizjak
22199cd1d7SUros Bizjak#ifdef CONFIG_X86_64
2316fdc1deSPaolo Bonzini#define VCPU_R8		(SVM_vcpu_arch_regs + __VCPU_REGS_R8  * WORD_SIZE)
2416fdc1deSPaolo Bonzini#define VCPU_R9		(SVM_vcpu_arch_regs + __VCPU_REGS_R9  * WORD_SIZE)
2516fdc1deSPaolo Bonzini#define VCPU_R10	(SVM_vcpu_arch_regs + __VCPU_REGS_R10 * WORD_SIZE)
2616fdc1deSPaolo Bonzini#define VCPU_R11	(SVM_vcpu_arch_regs + __VCPU_REGS_R11 * WORD_SIZE)
2716fdc1deSPaolo Bonzini#define VCPU_R12	(SVM_vcpu_arch_regs + __VCPU_REGS_R12 * WORD_SIZE)
2816fdc1deSPaolo Bonzini#define VCPU_R13	(SVM_vcpu_arch_regs + __VCPU_REGS_R13 * WORD_SIZE)
2916fdc1deSPaolo Bonzini#define VCPU_R14	(SVM_vcpu_arch_regs + __VCPU_REGS_R14 * WORD_SIZE)
3016fdc1deSPaolo Bonzini#define VCPU_R15	(SVM_vcpu_arch_regs + __VCPU_REGS_R15 * WORD_SIZE)
31199cd1d7SUros Bizjak#endif
32199cd1d7SUros Bizjak
33e61ab42dSPaolo Bonzini#define SVM_vmcb01_pa	(SVM_vmcb01 + KVM_VMCB_pa)
34e61ab42dSPaolo Bonzini
35135961e0SThomas Gleixner.section .noinstr.text, "ax"
36199cd1d7SUros Bizjak
379f2febf3SPaolo Bonzini.macro RESTORE_GUEST_SPEC_CTRL
389f2febf3SPaolo Bonzini	/* No need to do anything if SPEC_CTRL is unset or V_SPEC_CTRL is set */
399f2febf3SPaolo Bonzini	ALTERNATIVE_2 "", \
409f2febf3SPaolo Bonzini		"jmp 800f", X86_FEATURE_MSR_SPEC_CTRL, \
419f2febf3SPaolo Bonzini		"", X86_FEATURE_V_SPEC_CTRL
429f2febf3SPaolo Bonzini801:
439f2febf3SPaolo Bonzini.endm
449f2febf3SPaolo Bonzini.macro RESTORE_GUEST_SPEC_CTRL_BODY
459f2febf3SPaolo Bonzini800:
469f2febf3SPaolo Bonzini	/*
479f2febf3SPaolo Bonzini	 * SPEC_CTRL handling: if the guest's SPEC_CTRL value differs from the
489f2febf3SPaolo Bonzini	 * host's, write the MSR.  This is kept out-of-line so that the common
499f2febf3SPaolo Bonzini	 * case does not have to jump.
509f2febf3SPaolo Bonzini	 *
519f2febf3SPaolo Bonzini	 * IMPORTANT: To avoid RSB underflow attacks and any other nastiness,
529f2febf3SPaolo Bonzini	 * there must not be any returns or indirect branches between this code
539f2febf3SPaolo Bonzini	 * and vmentry.
549f2febf3SPaolo Bonzini	 */
559f2febf3SPaolo Bonzini	movl SVM_spec_ctrl(%_ASM_DI), %eax
569f2febf3SPaolo Bonzini	cmp PER_CPU_VAR(x86_spec_ctrl_current), %eax
579f2febf3SPaolo Bonzini	je 801b
589f2febf3SPaolo Bonzini	mov $MSR_IA32_SPEC_CTRL, %ecx
599f2febf3SPaolo Bonzini	xor %edx, %edx
609f2febf3SPaolo Bonzini	wrmsr
619f2febf3SPaolo Bonzini	jmp 801b
629f2febf3SPaolo Bonzini.endm
639f2febf3SPaolo Bonzini
649f2febf3SPaolo Bonzini.macro RESTORE_HOST_SPEC_CTRL
659f2febf3SPaolo Bonzini	/* No need to do anything if SPEC_CTRL is unset or V_SPEC_CTRL is set */
669f2febf3SPaolo Bonzini	ALTERNATIVE_2 "", \
679f2febf3SPaolo Bonzini		"jmp 900f", X86_FEATURE_MSR_SPEC_CTRL, \
689f2febf3SPaolo Bonzini		"", X86_FEATURE_V_SPEC_CTRL
699f2febf3SPaolo Bonzini901:
709f2febf3SPaolo Bonzini.endm
71adac42bfSSean Christopherson.macro RESTORE_HOST_SPEC_CTRL_BODY spec_ctrl_intercepted:req
729f2febf3SPaolo Bonzini900:
739f2febf3SPaolo Bonzini	/* Same for after vmexit.  */
749f2febf3SPaolo Bonzini	mov $MSR_IA32_SPEC_CTRL, %ecx
759f2febf3SPaolo Bonzini
769f2febf3SPaolo Bonzini	/*
779f2febf3SPaolo Bonzini	 * Load the value that the guest had written into MSR_IA32_SPEC_CTRL,
789f2febf3SPaolo Bonzini	 * if it was not intercepted during guest execution.
799f2febf3SPaolo Bonzini	 */
80adac42bfSSean Christopherson	cmpb $0, \spec_ctrl_intercepted
819f2febf3SPaolo Bonzini	jnz 998f
829f2febf3SPaolo Bonzini	rdmsr
839f2febf3SPaolo Bonzini	movl %eax, SVM_spec_ctrl(%_ASM_DI)
849f2febf3SPaolo Bonzini998:
859f2febf3SPaolo Bonzini
869f2febf3SPaolo Bonzini	/* Now restore the host value of the MSR if different from the guest's.  */
879f2febf3SPaolo Bonzini	movl PER_CPU_VAR(x86_spec_ctrl_current), %eax
889f2febf3SPaolo Bonzini	cmp SVM_spec_ctrl(%_ASM_DI), %eax
899f2febf3SPaolo Bonzini	je 901b
909f2febf3SPaolo Bonzini	xor %edx, %edx
919f2febf3SPaolo Bonzini	wrmsr
929f2febf3SPaolo Bonzini	jmp 901b
939f2febf3SPaolo Bonzini.endm
949f2febf3SPaolo Bonzini
959f2febf3SPaolo Bonzini
96199cd1d7SUros Bizjak/**
97199cd1d7SUros Bizjak * __svm_vcpu_run - Run a vCPU via a transition to SVM guest mode
9816fdc1deSPaolo Bonzini * @svm:	struct vcpu_svm *
999f2febf3SPaolo Bonzini * @spec_ctrl_intercepted: bool
100199cd1d7SUros Bizjak */
101199cd1d7SUros BizjakSYM_FUNC_START(__svm_vcpu_run)
102199cd1d7SUros Bizjak	push %_ASM_BP
10319597a71SSean Christopherson	mov  %_ASM_SP, %_ASM_BP
104199cd1d7SUros Bizjak#ifdef CONFIG_X86_64
105199cd1d7SUros Bizjak	push %r15
106199cd1d7SUros Bizjak	push %r14
107199cd1d7SUros Bizjak	push %r13
108199cd1d7SUros Bizjak	push %r12
109199cd1d7SUros Bizjak#else
110199cd1d7SUros Bizjak	push %edi
111199cd1d7SUros Bizjak	push %esi
112199cd1d7SUros Bizjak#endif
113199cd1d7SUros Bizjak	push %_ASM_BX
114199cd1d7SUros Bizjak
115e287bd00SPaolo Bonzini	/*
116e287bd00SPaolo Bonzini	 * Save variables needed after vmexit on the stack, in inverse
117e287bd00SPaolo Bonzini	 * order compared to when they are needed.
118e287bd00SPaolo Bonzini	 */
119e287bd00SPaolo Bonzini
1209f2febf3SPaolo Bonzini	/* Accessed directly from the stack in RESTORE_HOST_SPEC_CTRL.  */
121199cd1d7SUros Bizjak	push %_ASM_ARG2
122199cd1d7SUros Bizjak
123e287bd00SPaolo Bonzini	/* Needed to restore access to percpu variables.  */
124e287bd00SPaolo Bonzini	__ASM_SIZE(push) PER_CPU_VAR(svm_data + SD_save_area_pa)
125e287bd00SPaolo Bonzini
1269f2febf3SPaolo Bonzini	/* Finally save @svm. */
127199cd1d7SUros Bizjak	push %_ASM_ARG1
128199cd1d7SUros Bizjak
129f6d58266SPaolo Bonzini.ifnc _ASM_ARG1, _ASM_DI
1309f2febf3SPaolo Bonzini	/*
1319f2febf3SPaolo Bonzini	 * Stash @svm in RDI early. On 32-bit, arguments are in RAX, RCX
1329f2febf3SPaolo Bonzini	 * and RDX which are clobbered by RESTORE_GUEST_SPEC_CTRL.
1339f2febf3SPaolo Bonzini	 */
134f6d58266SPaolo Bonzini	mov %_ASM_ARG1, %_ASM_DI
135f6d58266SPaolo Bonzini.endif
136199cd1d7SUros Bizjak
1379f2febf3SPaolo Bonzini	/* Clobbers RAX, RCX, RDX.  */
1389f2febf3SPaolo Bonzini	RESTORE_GUEST_SPEC_CTRL
1399f2febf3SPaolo Bonzini
140e61ab42dSPaolo Bonzini	/*
141e61ab42dSPaolo Bonzini	 * Use a single vmcb (vmcb01 because it's always valid) for
142e61ab42dSPaolo Bonzini	 * context switching guest state via VMLOAD/VMSAVE, that way
143e61ab42dSPaolo Bonzini	 * the state doesn't need to be copied between vmcb01 and
144e61ab42dSPaolo Bonzini	 * vmcb02 when switching vmcbs for nested virtualization.
145e61ab42dSPaolo Bonzini	 */
146e61ab42dSPaolo Bonzini	mov SVM_vmcb01_pa(%_ASM_DI), %_ASM_AX
147e61ab42dSPaolo Bonzini1:	vmload %_ASM_AX
148e61ab42dSPaolo Bonzini2:
149e61ab42dSPaolo Bonzini
150f6d58266SPaolo Bonzini	/* Get svm->current_vmcb->pa into RAX. */
151f6d58266SPaolo Bonzini	mov SVM_current_vmcb(%_ASM_DI), %_ASM_AX
152f6d58266SPaolo Bonzini	mov KVM_VMCB_pa(%_ASM_AX), %_ASM_AX
153199cd1d7SUros Bizjak
154199cd1d7SUros Bizjak	/* Load guest registers. */
155f7ef2801SPaolo Bonzini	mov VCPU_RCX(%_ASM_DI), %_ASM_CX
156f7ef2801SPaolo Bonzini	mov VCPU_RDX(%_ASM_DI), %_ASM_DX
157f7ef2801SPaolo Bonzini	mov VCPU_RBX(%_ASM_DI), %_ASM_BX
158f7ef2801SPaolo Bonzini	mov VCPU_RBP(%_ASM_DI), %_ASM_BP
159f7ef2801SPaolo Bonzini	mov VCPU_RSI(%_ASM_DI), %_ASM_SI
160199cd1d7SUros Bizjak#ifdef CONFIG_X86_64
161f7ef2801SPaolo Bonzini	mov VCPU_R8 (%_ASM_DI),  %r8
162f7ef2801SPaolo Bonzini	mov VCPU_R9 (%_ASM_DI),  %r9
163f7ef2801SPaolo Bonzini	mov VCPU_R10(%_ASM_DI), %r10
164f7ef2801SPaolo Bonzini	mov VCPU_R11(%_ASM_DI), %r11
165f7ef2801SPaolo Bonzini	mov VCPU_R12(%_ASM_DI), %r12
166f7ef2801SPaolo Bonzini	mov VCPU_R13(%_ASM_DI), %r13
167f7ef2801SPaolo Bonzini	mov VCPU_R14(%_ASM_DI), %r14
168f7ef2801SPaolo Bonzini	mov VCPU_R15(%_ASM_DI), %r15
169199cd1d7SUros Bizjak#endif
170f7ef2801SPaolo Bonzini	mov VCPU_RDI(%_ASM_DI), %_ASM_DI
171199cd1d7SUros Bizjak
172199cd1d7SUros Bizjak	/* Enter guest mode */
173f14eec0aSPaolo Bonzini	sti
174199cd1d7SUros Bizjak
175e61ab42dSPaolo Bonzini3:	vmrun %_ASM_AX
176e61ab42dSPaolo Bonzini4:
177e61ab42dSPaolo Bonzini	cli
178199cd1d7SUros Bizjak
179e61ab42dSPaolo Bonzini	/* Pop @svm to RAX while it's the only available register. */
180199cd1d7SUros Bizjak	pop %_ASM_AX
181199cd1d7SUros Bizjak
182199cd1d7SUros Bizjak	/* Save all guest registers.  */
183199cd1d7SUros Bizjak	mov %_ASM_CX,   VCPU_RCX(%_ASM_AX)
184199cd1d7SUros Bizjak	mov %_ASM_DX,   VCPU_RDX(%_ASM_AX)
185199cd1d7SUros Bizjak	mov %_ASM_BX,   VCPU_RBX(%_ASM_AX)
186199cd1d7SUros Bizjak	mov %_ASM_BP,   VCPU_RBP(%_ASM_AX)
187199cd1d7SUros Bizjak	mov %_ASM_SI,   VCPU_RSI(%_ASM_AX)
188199cd1d7SUros Bizjak	mov %_ASM_DI,   VCPU_RDI(%_ASM_AX)
189199cd1d7SUros Bizjak#ifdef CONFIG_X86_64
190199cd1d7SUros Bizjak	mov %r8,  VCPU_R8 (%_ASM_AX)
191199cd1d7SUros Bizjak	mov %r9,  VCPU_R9 (%_ASM_AX)
192199cd1d7SUros Bizjak	mov %r10, VCPU_R10(%_ASM_AX)
193199cd1d7SUros Bizjak	mov %r11, VCPU_R11(%_ASM_AX)
194199cd1d7SUros Bizjak	mov %r12, VCPU_R12(%_ASM_AX)
195199cd1d7SUros Bizjak	mov %r13, VCPU_R13(%_ASM_AX)
196199cd1d7SUros Bizjak	mov %r14, VCPU_R14(%_ASM_AX)
197199cd1d7SUros Bizjak	mov %r15, VCPU_R15(%_ASM_AX)
198199cd1d7SUros Bizjak#endif
199199cd1d7SUros Bizjak
200e61ab42dSPaolo Bonzini	/* @svm can stay in RDI from now on.  */
201e61ab42dSPaolo Bonzini	mov %_ASM_AX, %_ASM_DI
202e61ab42dSPaolo Bonzini
203e61ab42dSPaolo Bonzini	mov SVM_vmcb01_pa(%_ASM_DI), %_ASM_AX
204e61ab42dSPaolo Bonzini5:	vmsave %_ASM_AX
205e61ab42dSPaolo Bonzini6:
206e61ab42dSPaolo Bonzini
207e287bd00SPaolo Bonzini	/* Restores GSBASE among other things, allowing access to percpu data.  */
208e287bd00SPaolo Bonzini	pop %_ASM_AX
209e287bd00SPaolo Bonzini7:	vmload %_ASM_AX
210e287bd00SPaolo Bonzini8:
211e287bd00SPaolo Bonzini
212aefb2f2eSBreno Leitao#ifdef CONFIG_MITIGATION_RETPOLINE
213e61ab42dSPaolo Bonzini	/* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */
214e61ab42dSPaolo Bonzini	FILL_RETURN_BUFFER %_ASM_AX, RSB_CLEAR_LOOPS, X86_FEATURE_RETPOLINE
215e61ab42dSPaolo Bonzini#endif
216e61ab42dSPaolo Bonzini
2179f2febf3SPaolo Bonzini	/* Clobbers RAX, RCX, RDX.  */
2189f2febf3SPaolo Bonzini	RESTORE_HOST_SPEC_CTRL
2199f2febf3SPaolo Bonzini
220199cd1d7SUros Bizjak	/*
221a149180fSPeter Zijlstra	 * Mitigate RETBleed for AMD/Hygon Zen uarch. RET should be
222a149180fSPeter Zijlstra	 * untrained as soon as we exit the VM and are back to the
223a149180fSPeter Zijlstra	 * kernel. This should be done before re-enabling interrupts
224a149180fSPeter Zijlstra	 * because interrupt handlers won't sanitize 'ret' if the return is
225a149180fSPeter Zijlstra	 * from the kernel.
226a149180fSPeter Zijlstra	 */
227864bcaa3SPeter Zijlstra	UNTRAIN_RET_VM
228d893832dSBorislav Petkov (AMD)
229a149180fSPeter Zijlstra	/*
230199cd1d7SUros Bizjak	 * Clear all general purpose registers except RSP and RAX to prevent
231199cd1d7SUros Bizjak	 * speculative use of the guest's values, even those that are reloaded
232199cd1d7SUros Bizjak	 * via the stack.  In theory, an L1 cache miss when restoring registers
233199cd1d7SUros Bizjak	 * could lead to speculative execution with the guest's values.
234199cd1d7SUros Bizjak	 * Zeroing XORs are dirt cheap, i.e. the extra paranoia is essentially
235199cd1d7SUros Bizjak	 * free.  RSP and RAX are exempt as they are restored by hardware
236199cd1d7SUros Bizjak	 * during VM-Exit.
237199cd1d7SUros Bizjak	 */
238199cd1d7SUros Bizjak	xor %ecx, %ecx
239199cd1d7SUros Bizjak	xor %edx, %edx
240199cd1d7SUros Bizjak	xor %ebx, %ebx
241199cd1d7SUros Bizjak	xor %ebp, %ebp
242199cd1d7SUros Bizjak	xor %esi, %esi
243199cd1d7SUros Bizjak	xor %edi, %edi
244199cd1d7SUros Bizjak#ifdef CONFIG_X86_64
245199cd1d7SUros Bizjak	xor %r8d,  %r8d
246199cd1d7SUros Bizjak	xor %r9d,  %r9d
247199cd1d7SUros Bizjak	xor %r10d, %r10d
248199cd1d7SUros Bizjak	xor %r11d, %r11d
249199cd1d7SUros Bizjak	xor %r12d, %r12d
250199cd1d7SUros Bizjak	xor %r13d, %r13d
251199cd1d7SUros Bizjak	xor %r14d, %r14d
252199cd1d7SUros Bizjak	xor %r15d, %r15d
253199cd1d7SUros Bizjak#endif
254199cd1d7SUros Bizjak
2559f2febf3SPaolo Bonzini	/* "Pop" @spec_ctrl_intercepted.  */
2569f2febf3SPaolo Bonzini	pop %_ASM_BX
2579f2febf3SPaolo Bonzini
258199cd1d7SUros Bizjak	pop %_ASM_BX
259199cd1d7SUros Bizjak
260199cd1d7SUros Bizjak#ifdef CONFIG_X86_64
261199cd1d7SUros Bizjak	pop %r12
262199cd1d7SUros Bizjak	pop %r13
263199cd1d7SUros Bizjak	pop %r14
264199cd1d7SUros Bizjak	pop %r15
265199cd1d7SUros Bizjak#else
266199cd1d7SUros Bizjak	pop %esi
267199cd1d7SUros Bizjak	pop %edi
268199cd1d7SUros Bizjak#endif
269199cd1d7SUros Bizjak	pop %_ASM_BP
270f94909ceSPeter Zijlstra	RET
2717531b47cSUros Bizjak
2729f2febf3SPaolo Bonzini	RESTORE_GUEST_SPEC_CTRL_BODY
273adac42bfSSean Christopherson	RESTORE_HOST_SPEC_CTRL_BODY (%_ASM_SP)
2749f2febf3SPaolo Bonzini
27515223c4fSUros Bizjak10:	cmpb $0, _ASM_RIP(kvm_rebooting)
2767531b47cSUros Bizjak	jne 2b
2777531b47cSUros Bizjak	ud2
27815223c4fSUros Bizjak30:	cmpb $0, _ASM_RIP(kvm_rebooting)
279e61ab42dSPaolo Bonzini	jne 4b
280e61ab42dSPaolo Bonzini	ud2
28115223c4fSUros Bizjak50:	cmpb $0, _ASM_RIP(kvm_rebooting)
282e61ab42dSPaolo Bonzini	jne 6b
283e61ab42dSPaolo Bonzini	ud2
28415223c4fSUros Bizjak70:	cmpb $0, _ASM_RIP(kvm_rebooting)
285e287bd00SPaolo Bonzini	jne 8b
286e287bd00SPaolo Bonzini	ud2
2877531b47cSUros Bizjak
288e61ab42dSPaolo Bonzini	_ASM_EXTABLE(1b, 10b)
289e61ab42dSPaolo Bonzini	_ASM_EXTABLE(3b, 30b)
290e61ab42dSPaolo Bonzini	_ASM_EXTABLE(5b, 50b)
291e287bd00SPaolo Bonzini	_ASM_EXTABLE(7b, 70b)
2927531b47cSUros Bizjak
293199cd1d7SUros BizjakSYM_FUNC_END(__svm_vcpu_run)
29416809ecdSTom Lendacky
2957774c8f3SSean Christopherson#ifdef CONFIG_KVM_AMD_SEV
296c92be2fdSSean Christopherson
297c92be2fdSSean Christopherson
298c92be2fdSSean Christopherson#ifdef CONFIG_X86_64
299c92be2fdSSean Christopherson#define SEV_ES_GPRS_BASE 0x300
300c92be2fdSSean Christopherson#define SEV_ES_RBX	(SEV_ES_GPRS_BASE + __VCPU_REGS_RBX * WORD_SIZE)
301c92be2fdSSean Christopherson#define SEV_ES_RBP	(SEV_ES_GPRS_BASE + __VCPU_REGS_RBP * WORD_SIZE)
302adac42bfSSean Christopherson#define SEV_ES_RSI	(SEV_ES_GPRS_BASE + __VCPU_REGS_RSI * WORD_SIZE)
303adac42bfSSean Christopherson#define SEV_ES_RDI	(SEV_ES_GPRS_BASE + __VCPU_REGS_RDI * WORD_SIZE)
304c92be2fdSSean Christopherson#define SEV_ES_R12	(SEV_ES_GPRS_BASE + __VCPU_REGS_R12 * WORD_SIZE)
305c92be2fdSSean Christopherson#define SEV_ES_R13	(SEV_ES_GPRS_BASE + __VCPU_REGS_R13 * WORD_SIZE)
306c92be2fdSSean Christopherson#define SEV_ES_R14	(SEV_ES_GPRS_BASE + __VCPU_REGS_R14 * WORD_SIZE)
307c92be2fdSSean Christopherson#define SEV_ES_R15	(SEV_ES_GPRS_BASE + __VCPU_REGS_R15 * WORD_SIZE)
308c92be2fdSSean Christopherson#endif
309c92be2fdSSean Christopherson
31016809ecdSTom Lendacky/**
31116809ecdSTom Lendacky * __svm_sev_es_vcpu_run - Run a SEV-ES vCPU via a transition to SVM guest mode
312f6d58266SPaolo Bonzini * @svm:	struct vcpu_svm *
3139f2febf3SPaolo Bonzini * @spec_ctrl_intercepted: bool
31416809ecdSTom Lendacky */
31516809ecdSTom LendackySYM_FUNC_START(__svm_sev_es_vcpu_run)
316*4367a758SSean Christopherson	FRAME_BEGIN
317*4367a758SSean Christopherson
3189f2febf3SPaolo Bonzini	/*
319c92be2fdSSean Christopherson	 * Save non-volatile (callee-saved) registers to the host save area.
320c92be2fdSSean Christopherson	 * Except for RAX and RSP, all GPRs are restored on #VMEXIT, but not
321c92be2fdSSean Christopherson	 * saved on VMRUN.
3229f2febf3SPaolo Bonzini	 */
323c92be2fdSSean Christopherson	mov %rbp, SEV_ES_RBP (%rdx)
324c92be2fdSSean Christopherson	mov %r15, SEV_ES_R15 (%rdx)
325c92be2fdSSean Christopherson	mov %r14, SEV_ES_R14 (%rdx)
326c92be2fdSSean Christopherson	mov %r13, SEV_ES_R13 (%rdx)
327c92be2fdSSean Christopherson	mov %r12, SEV_ES_R12 (%rdx)
328c92be2fdSSean Christopherson	mov %rbx, SEV_ES_RBX (%rdx)
3299f2febf3SPaolo Bonzini
330adac42bfSSean Christopherson	/*
331adac42bfSSean Christopherson	 * Save volatile registers that hold arguments that are needed after
332adac42bfSSean Christopherson	 * #VMEXIT (RDI=@svm and RSI=@spec_ctrl_intercepted).
333adac42bfSSean Christopherson	 */
334adac42bfSSean Christopherson	mov %rdi, SEV_ES_RDI (%rdx)
335adac42bfSSean Christopherson	mov %rsi, SEV_ES_RSI (%rdx)
3369f2febf3SPaolo Bonzini
337c92be2fdSSean Christopherson	/* Clobbers RAX, RCX, RDX (@hostsa). */
3389f2febf3SPaolo Bonzini	RESTORE_GUEST_SPEC_CTRL
3399f2febf3SPaolo Bonzini
340f6d58266SPaolo Bonzini	/* Get svm->current_vmcb->pa into RAX. */
341331282fdSSean Christopherson	mov SVM_current_vmcb(%rdi), %rax
342331282fdSSean Christopherson	mov KVM_VMCB_pa(%rax), %rax
3437531b47cSUros Bizjak
3447531b47cSUros Bizjak	/* Enter guest mode */
34516809ecdSTom Lendacky	sti
34616809ecdSTom Lendacky
347331282fdSSean Christopherson1:	vmrun %rax
34816809ecdSTom Lendacky
3497531b47cSUros Bizjak2:	cli
35016809ecdSTom Lendacky
351aefb2f2eSBreno Leitao#ifdef CONFIG_MITIGATION_RETPOLINE
35216809ecdSTom Lendacky	/* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */
353331282fdSSean Christopherson	FILL_RETURN_BUFFER %rax, RSB_CLEAR_LOOPS, X86_FEATURE_RETPOLINE
35416809ecdSTom Lendacky#endif
35516809ecdSTom Lendacky
356adac42bfSSean Christopherson	/* Clobbers RAX, RCX, RDX, consumes RDI (@svm) and RSI (@spec_ctrl_intercepted). */
3579f2febf3SPaolo Bonzini	RESTORE_HOST_SPEC_CTRL
3589f2febf3SPaolo Bonzini
359a149180fSPeter Zijlstra	/*
360a149180fSPeter Zijlstra	 * Mitigate RETBleed for AMD/Hygon Zen uarch. RET should be
361a149180fSPeter Zijlstra	 * untrained as soon as we exit the VM and are back to the
362a149180fSPeter Zijlstra	 * kernel. This should be done before re-enabling interrupts
363a149180fSPeter Zijlstra	 * because interrupt handlers won't sanitize RET if the return is
364a149180fSPeter Zijlstra	 * from the kernel.
365a149180fSPeter Zijlstra	 */
366864bcaa3SPeter Zijlstra	UNTRAIN_RET_VM
367a149180fSPeter Zijlstra
368*4367a758SSean Christopherson	FRAME_END
369f94909ceSPeter Zijlstra	RET
3707531b47cSUros Bizjak
3719f2febf3SPaolo Bonzini	RESTORE_GUEST_SPEC_CTRL_BODY
372adac42bfSSean Christopherson	RESTORE_HOST_SPEC_CTRL_BODY %sil
3739f2febf3SPaolo Bonzini
374331282fdSSean Christopherson3:	cmpb $0, kvm_rebooting(%rip)
3757531b47cSUros Bizjak	jne 2b
3767531b47cSUros Bizjak	ud2
3777531b47cSUros Bizjak
3787531b47cSUros Bizjak	_ASM_EXTABLE(1b, 3b)
3797531b47cSUros Bizjak
38016809ecdSTom LendackySYM_FUNC_END(__svm_sev_es_vcpu_run)
3817774c8f3SSean Christopherson#endif /* CONFIG_KVM_AMD_SEV */
382