xref: /linux/arch/x86/kvm/svm/vmenter.S (revision e61ab42de874c5af8c5d98b327c77a374d9e7da1)
1199cd1d7SUros Bizjak/* SPDX-License-Identifier: GPL-2.0 */
2199cd1d7SUros Bizjak#include <linux/linkage.h>
3199cd1d7SUros Bizjak#include <asm/asm.h>
4199cd1d7SUros Bizjak#include <asm/bitsperlong.h>
5199cd1d7SUros Bizjak#include <asm/kvm_vcpu_regs.h>
6f14eec0aSPaolo Bonzini#include <asm/nospec-branch.h>
716fdc1deSPaolo Bonzini#include "kvm-asm-offsets.h"
8199cd1d7SUros Bizjak
9199cd1d7SUros Bizjak#define WORD_SIZE (BITS_PER_LONG / 8)
10199cd1d7SUros Bizjak
11199cd1d7SUros Bizjak/* Intentionally omit RAX as it's context switched by hardware */
1216fdc1deSPaolo Bonzini#define VCPU_RCX	(SVM_vcpu_arch_regs + __VCPU_REGS_RCX * WORD_SIZE)
1316fdc1deSPaolo Bonzini#define VCPU_RDX	(SVM_vcpu_arch_regs + __VCPU_REGS_RDX * WORD_SIZE)
1416fdc1deSPaolo Bonzini#define VCPU_RBX	(SVM_vcpu_arch_regs + __VCPU_REGS_RBX * WORD_SIZE)
15199cd1d7SUros Bizjak/* Intentionally omit RSP as it's context switched by hardware */
1616fdc1deSPaolo Bonzini#define VCPU_RBP	(SVM_vcpu_arch_regs + __VCPU_REGS_RBP * WORD_SIZE)
1716fdc1deSPaolo Bonzini#define VCPU_RSI	(SVM_vcpu_arch_regs + __VCPU_REGS_RSI * WORD_SIZE)
1816fdc1deSPaolo Bonzini#define VCPU_RDI	(SVM_vcpu_arch_regs + __VCPU_REGS_RDI * WORD_SIZE)
19199cd1d7SUros Bizjak
20199cd1d7SUros Bizjak#ifdef CONFIG_X86_64
2116fdc1deSPaolo Bonzini#define VCPU_R8		(SVM_vcpu_arch_regs + __VCPU_REGS_R8  * WORD_SIZE)
2216fdc1deSPaolo Bonzini#define VCPU_R9		(SVM_vcpu_arch_regs + __VCPU_REGS_R9  * WORD_SIZE)
2316fdc1deSPaolo Bonzini#define VCPU_R10	(SVM_vcpu_arch_regs + __VCPU_REGS_R10 * WORD_SIZE)
2416fdc1deSPaolo Bonzini#define VCPU_R11	(SVM_vcpu_arch_regs + __VCPU_REGS_R11 * WORD_SIZE)
2516fdc1deSPaolo Bonzini#define VCPU_R12	(SVM_vcpu_arch_regs + __VCPU_REGS_R12 * WORD_SIZE)
2616fdc1deSPaolo Bonzini#define VCPU_R13	(SVM_vcpu_arch_regs + __VCPU_REGS_R13 * WORD_SIZE)
2716fdc1deSPaolo Bonzini#define VCPU_R14	(SVM_vcpu_arch_regs + __VCPU_REGS_R14 * WORD_SIZE)
2816fdc1deSPaolo Bonzini#define VCPU_R15	(SVM_vcpu_arch_regs + __VCPU_REGS_R15 * WORD_SIZE)
29199cd1d7SUros Bizjak#endif
30199cd1d7SUros Bizjak
31*e61ab42dSPaolo Bonzini#define SVM_vmcb01_pa	(SVM_vmcb01 + KVM_VMCB_pa)
32*e61ab42dSPaolo Bonzini
33135961e0SThomas Gleixner.section .noinstr.text, "ax"
34199cd1d7SUros Bizjak
35199cd1d7SUros Bizjak/**
36199cd1d7SUros Bizjak * __svm_vcpu_run - Run a vCPU via a transition to SVM guest mode
3716fdc1deSPaolo Bonzini * @svm:	struct vcpu_svm *
38199cd1d7SUros Bizjak */
39199cd1d7SUros BizjakSYM_FUNC_START(__svm_vcpu_run)
40199cd1d7SUros Bizjak	push %_ASM_BP
41199cd1d7SUros Bizjak#ifdef CONFIG_X86_64
42199cd1d7SUros Bizjak	push %r15
43199cd1d7SUros Bizjak	push %r14
44199cd1d7SUros Bizjak	push %r13
45199cd1d7SUros Bizjak	push %r12
46199cd1d7SUros Bizjak#else
47199cd1d7SUros Bizjak	push %edi
48199cd1d7SUros Bizjak	push %esi
49199cd1d7SUros Bizjak#endif
50199cd1d7SUros Bizjak	push %_ASM_BX
51199cd1d7SUros Bizjak
5216fdc1deSPaolo Bonzini	/* Save @svm. */
53199cd1d7SUros Bizjak	push %_ASM_ARG1
54199cd1d7SUros Bizjak
55f6d58266SPaolo Bonzini.ifnc _ASM_ARG1, _ASM_DI
56f7ef2801SPaolo Bonzini	/* Move @svm to RDI. */
57f6d58266SPaolo Bonzini	mov %_ASM_ARG1, %_ASM_DI
58f6d58266SPaolo Bonzini.endif
59199cd1d7SUros Bizjak
60*e61ab42dSPaolo Bonzini	/*
61*e61ab42dSPaolo Bonzini	 * Use a single vmcb (vmcb01 because it's always valid) for
62*e61ab42dSPaolo Bonzini	 * context switching guest state via VMLOAD/VMSAVE, that way
63*e61ab42dSPaolo Bonzini	 * the state doesn't need to be copied between vmcb01 and
64*e61ab42dSPaolo Bonzini	 * vmcb02 when switching vmcbs for nested virtualization.
65*e61ab42dSPaolo Bonzini	 */
66*e61ab42dSPaolo Bonzini	mov SVM_vmcb01_pa(%_ASM_DI), %_ASM_AX
67*e61ab42dSPaolo Bonzini1:	vmload %_ASM_AX
68*e61ab42dSPaolo Bonzini2:
69*e61ab42dSPaolo Bonzini
70f6d58266SPaolo Bonzini	/* Get svm->current_vmcb->pa into RAX. */
71f6d58266SPaolo Bonzini	mov SVM_current_vmcb(%_ASM_DI), %_ASM_AX
72f6d58266SPaolo Bonzini	mov KVM_VMCB_pa(%_ASM_AX), %_ASM_AX
73199cd1d7SUros Bizjak
74f7ef2801SPaolo Bonzini	/* Load guest registers. */
75f7ef2801SPaolo Bonzini	mov VCPU_RCX(%_ASM_DI), %_ASM_CX
76f7ef2801SPaolo Bonzini	mov VCPU_RDX(%_ASM_DI), %_ASM_DX
77f7ef2801SPaolo Bonzini	mov VCPU_RBX(%_ASM_DI), %_ASM_BX
78f7ef2801SPaolo Bonzini	mov VCPU_RBP(%_ASM_DI), %_ASM_BP
79f7ef2801SPaolo Bonzini	mov VCPU_RSI(%_ASM_DI), %_ASM_SI
80f7ef2801SPaolo Bonzini#ifdef CONFIG_X86_64
81f7ef2801SPaolo Bonzini	mov VCPU_R8 (%_ASM_DI),  %r8
82f7ef2801SPaolo Bonzini	mov VCPU_R9 (%_ASM_DI),  %r9
83f7ef2801SPaolo Bonzini	mov VCPU_R10(%_ASM_DI), %r10
84f7ef2801SPaolo Bonzini	mov VCPU_R11(%_ASM_DI), %r11
85f7ef2801SPaolo Bonzini	mov VCPU_R12(%_ASM_DI), %r12
86f7ef2801SPaolo Bonzini	mov VCPU_R13(%_ASM_DI), %r13
87f7ef2801SPaolo Bonzini	mov VCPU_R14(%_ASM_DI), %r14
88f7ef2801SPaolo Bonzini	mov VCPU_R15(%_ASM_DI), %r15
89f7ef2801SPaolo Bonzini#endif
90f7ef2801SPaolo Bonzini	mov VCPU_RDI(%_ASM_DI), %_ASM_DI
91f7ef2801SPaolo Bonzini
92199cd1d7SUros Bizjak	/* Enter guest mode */
93f14eec0aSPaolo Bonzini	sti
94199cd1d7SUros Bizjak
95*e61ab42dSPaolo Bonzini3:	vmrun %_ASM_AX
96*e61ab42dSPaolo Bonzini4:
97*e61ab42dSPaolo Bonzini	cli
98199cd1d7SUros Bizjak
99*e61ab42dSPaolo Bonzini	/* Pop @svm to RAX while it's the only available register. */
100199cd1d7SUros Bizjak	pop %_ASM_AX
101199cd1d7SUros Bizjak
102199cd1d7SUros Bizjak	/* Save all guest registers.  */
103199cd1d7SUros Bizjak	mov %_ASM_CX,   VCPU_RCX(%_ASM_AX)
104199cd1d7SUros Bizjak	mov %_ASM_DX,   VCPU_RDX(%_ASM_AX)
105199cd1d7SUros Bizjak	mov %_ASM_BX,   VCPU_RBX(%_ASM_AX)
106199cd1d7SUros Bizjak	mov %_ASM_BP,   VCPU_RBP(%_ASM_AX)
107199cd1d7SUros Bizjak	mov %_ASM_SI,   VCPU_RSI(%_ASM_AX)
108199cd1d7SUros Bizjak	mov %_ASM_DI,   VCPU_RDI(%_ASM_AX)
109199cd1d7SUros Bizjak#ifdef CONFIG_X86_64
110199cd1d7SUros Bizjak	mov %r8,  VCPU_R8 (%_ASM_AX)
111199cd1d7SUros Bizjak	mov %r9,  VCPU_R9 (%_ASM_AX)
112199cd1d7SUros Bizjak	mov %r10, VCPU_R10(%_ASM_AX)
113199cd1d7SUros Bizjak	mov %r11, VCPU_R11(%_ASM_AX)
114199cd1d7SUros Bizjak	mov %r12, VCPU_R12(%_ASM_AX)
115199cd1d7SUros Bizjak	mov %r13, VCPU_R13(%_ASM_AX)
116199cd1d7SUros Bizjak	mov %r14, VCPU_R14(%_ASM_AX)
117199cd1d7SUros Bizjak	mov %r15, VCPU_R15(%_ASM_AX)
118199cd1d7SUros Bizjak#endif
119199cd1d7SUros Bizjak
120*e61ab42dSPaolo Bonzini	/* @svm can stay in RDI from now on.  */
121*e61ab42dSPaolo Bonzini	mov %_ASM_AX, %_ASM_DI
122*e61ab42dSPaolo Bonzini
123*e61ab42dSPaolo Bonzini	mov SVM_vmcb01_pa(%_ASM_DI), %_ASM_AX
124*e61ab42dSPaolo Bonzini5:	vmsave %_ASM_AX
125*e61ab42dSPaolo Bonzini6:
126*e61ab42dSPaolo Bonzini
127*e61ab42dSPaolo Bonzini#ifdef CONFIG_RETPOLINE
128*e61ab42dSPaolo Bonzini	/* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */
129*e61ab42dSPaolo Bonzini	FILL_RETURN_BUFFER %_ASM_AX, RSB_CLEAR_LOOPS, X86_FEATURE_RETPOLINE
130*e61ab42dSPaolo Bonzini#endif
131*e61ab42dSPaolo Bonzini
132199cd1d7SUros Bizjak	/*
133a149180fSPeter Zijlstra	 * Mitigate RETBleed for AMD/Hygon Zen uarch. RET should be
134a149180fSPeter Zijlstra	 * untrained as soon as we exit the VM and are back to the
135a149180fSPeter Zijlstra	 * kernel. This should be done before re-enabling interrupts
136a149180fSPeter Zijlstra	 * because interrupt handlers won't sanitize 'ret' if the return is
137a149180fSPeter Zijlstra	 * from the kernel.
138a149180fSPeter Zijlstra	 */
139a149180fSPeter Zijlstra	UNTRAIN_RET
140a149180fSPeter Zijlstra
141a149180fSPeter Zijlstra	/*
142199cd1d7SUros Bizjak	 * Clear all general purpose registers except RSP and RAX to prevent
143199cd1d7SUros Bizjak	 * speculative use of the guest's values, even those that are reloaded
144199cd1d7SUros Bizjak	 * via the stack.  In theory, an L1 cache miss when restoring registers
145199cd1d7SUros Bizjak	 * could lead to speculative execution with the guest's values.
146199cd1d7SUros Bizjak	 * Zeroing XORs are dirt cheap, i.e. the extra paranoia is essentially
147199cd1d7SUros Bizjak	 * free.  RSP and RAX are exempt as they are restored by hardware
148199cd1d7SUros Bizjak	 * during VM-Exit.
149199cd1d7SUros Bizjak	 */
150199cd1d7SUros Bizjak	xor %ecx, %ecx
151199cd1d7SUros Bizjak	xor %edx, %edx
152199cd1d7SUros Bizjak	xor %ebx, %ebx
153199cd1d7SUros Bizjak	xor %ebp, %ebp
154199cd1d7SUros Bizjak	xor %esi, %esi
155199cd1d7SUros Bizjak	xor %edi, %edi
156199cd1d7SUros Bizjak#ifdef CONFIG_X86_64
157199cd1d7SUros Bizjak	xor %r8d,  %r8d
158199cd1d7SUros Bizjak	xor %r9d,  %r9d
159199cd1d7SUros Bizjak	xor %r10d, %r10d
160199cd1d7SUros Bizjak	xor %r11d, %r11d
161199cd1d7SUros Bizjak	xor %r12d, %r12d
162199cd1d7SUros Bizjak	xor %r13d, %r13d
163199cd1d7SUros Bizjak	xor %r14d, %r14d
164199cd1d7SUros Bizjak	xor %r15d, %r15d
165199cd1d7SUros Bizjak#endif
166199cd1d7SUros Bizjak
167199cd1d7SUros Bizjak	pop %_ASM_BX
168199cd1d7SUros Bizjak
169199cd1d7SUros Bizjak#ifdef CONFIG_X86_64
170199cd1d7SUros Bizjak	pop %r12
171199cd1d7SUros Bizjak	pop %r13
172199cd1d7SUros Bizjak	pop %r14
173199cd1d7SUros Bizjak	pop %r15
174199cd1d7SUros Bizjak#else
175199cd1d7SUros Bizjak	pop %esi
176199cd1d7SUros Bizjak	pop %edi
177199cd1d7SUros Bizjak#endif
178199cd1d7SUros Bizjak	pop %_ASM_BP
179f94909ceSPeter Zijlstra	RET
1807531b47cSUros Bizjak
181*e61ab42dSPaolo Bonzini10:	cmpb $0, kvm_rebooting
1827531b47cSUros Bizjak	jne 2b
1837531b47cSUros Bizjak	ud2
184*e61ab42dSPaolo Bonzini30:	cmpb $0, kvm_rebooting
185*e61ab42dSPaolo Bonzini	jne 4b
186*e61ab42dSPaolo Bonzini	ud2
187*e61ab42dSPaolo Bonzini50:	cmpb $0, kvm_rebooting
188*e61ab42dSPaolo Bonzini	jne 6b
189*e61ab42dSPaolo Bonzini	ud2
1907531b47cSUros Bizjak
191*e61ab42dSPaolo Bonzini	_ASM_EXTABLE(1b, 10b)
192*e61ab42dSPaolo Bonzini	_ASM_EXTABLE(3b, 30b)
193*e61ab42dSPaolo Bonzini	_ASM_EXTABLE(5b, 50b)
1947531b47cSUros Bizjak
195199cd1d7SUros BizjakSYM_FUNC_END(__svm_vcpu_run)
19616809ecdSTom Lendacky
19716809ecdSTom Lendacky/**
19816809ecdSTom Lendacky * __svm_sev_es_vcpu_run - Run a SEV-ES vCPU via a transition to SVM guest mode
199f6d58266SPaolo Bonzini * @svm:	struct vcpu_svm *
20016809ecdSTom Lendacky */
20116809ecdSTom LendackySYM_FUNC_START(__svm_sev_es_vcpu_run)
20216809ecdSTom Lendacky	push %_ASM_BP
20316809ecdSTom Lendacky#ifdef CONFIG_X86_64
20416809ecdSTom Lendacky	push %r15
20516809ecdSTom Lendacky	push %r14
20616809ecdSTom Lendacky	push %r13
20716809ecdSTom Lendacky	push %r12
20816809ecdSTom Lendacky#else
20916809ecdSTom Lendacky	push %edi
21016809ecdSTom Lendacky	push %esi
21116809ecdSTom Lendacky#endif
21216809ecdSTom Lendacky	push %_ASM_BX
21316809ecdSTom Lendacky
214f6d58266SPaolo Bonzini	/* Get svm->current_vmcb->pa into RAX. */
215f6d58266SPaolo Bonzini	mov SVM_current_vmcb(%_ASM_ARG1), %_ASM_AX
216f6d58266SPaolo Bonzini	mov KVM_VMCB_pa(%_ASM_AX), %_ASM_AX
2177531b47cSUros Bizjak
2187531b47cSUros Bizjak	/* Enter guest mode */
21916809ecdSTom Lendacky	sti
22016809ecdSTom Lendacky
22116809ecdSTom Lendacky1:	vmrun %_ASM_AX
22216809ecdSTom Lendacky
2237531b47cSUros Bizjak2:	cli
22416809ecdSTom Lendacky
22516809ecdSTom Lendacky#ifdef CONFIG_RETPOLINE
22616809ecdSTom Lendacky	/* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */
22716809ecdSTom Lendacky	FILL_RETURN_BUFFER %_ASM_AX, RSB_CLEAR_LOOPS, X86_FEATURE_RETPOLINE
22816809ecdSTom Lendacky#endif
22916809ecdSTom Lendacky
230a149180fSPeter Zijlstra	/*
231a149180fSPeter Zijlstra	 * Mitigate RETBleed for AMD/Hygon Zen uarch. RET should be
232a149180fSPeter Zijlstra	 * untrained as soon as we exit the VM and are back to the
233a149180fSPeter Zijlstra	 * kernel. This should be done before re-enabling interrupts
234a149180fSPeter Zijlstra	 * because interrupt handlers won't sanitize RET if the return is
235a149180fSPeter Zijlstra	 * from the kernel.
236a149180fSPeter Zijlstra	 */
237a149180fSPeter Zijlstra	UNTRAIN_RET
238a149180fSPeter Zijlstra
23916809ecdSTom Lendacky	pop %_ASM_BX
24016809ecdSTom Lendacky
24116809ecdSTom Lendacky#ifdef CONFIG_X86_64
24216809ecdSTom Lendacky	pop %r12
24316809ecdSTom Lendacky	pop %r13
24416809ecdSTom Lendacky	pop %r14
24516809ecdSTom Lendacky	pop %r15
24616809ecdSTom Lendacky#else
24716809ecdSTom Lendacky	pop %esi
24816809ecdSTom Lendacky	pop %edi
24916809ecdSTom Lendacky#endif
25016809ecdSTom Lendacky	pop %_ASM_BP
251f94909ceSPeter Zijlstra	RET
2527531b47cSUros Bizjak
2537531b47cSUros Bizjak3:	cmpb $0, kvm_rebooting
2547531b47cSUros Bizjak	jne 2b
2557531b47cSUros Bizjak	ud2
2567531b47cSUros Bizjak
2577531b47cSUros Bizjak	_ASM_EXTABLE(1b, 3b)
2587531b47cSUros Bizjak
25916809ecdSTom LendackySYM_FUNC_END(__svm_sev_es_vcpu_run)
260