xref: /linux/arch/x86/kvm/svm/vmenter.S (revision e287bd005ad9d85dd6271dd795d3ecfb6bca46ad)
1199cd1d7SUros Bizjak/* SPDX-License-Identifier: GPL-2.0 */
2199cd1d7SUros Bizjak#include <linux/linkage.h>
3199cd1d7SUros Bizjak#include <asm/asm.h>
4199cd1d7SUros Bizjak#include <asm/bitsperlong.h>
5199cd1d7SUros Bizjak#include <asm/kvm_vcpu_regs.h>
6f14eec0aSPaolo Bonzini#include <asm/nospec-branch.h>
716fdc1deSPaolo Bonzini#include "kvm-asm-offsets.h"
8199cd1d7SUros Bizjak
9199cd1d7SUros Bizjak#define WORD_SIZE (BITS_PER_LONG / 8)
10199cd1d7SUros Bizjak
11199cd1d7SUros Bizjak/* Intentionally omit RAX as it's context switched by hardware */
1216fdc1deSPaolo Bonzini#define VCPU_RCX	(SVM_vcpu_arch_regs + __VCPU_REGS_RCX * WORD_SIZE)
1316fdc1deSPaolo Bonzini#define VCPU_RDX	(SVM_vcpu_arch_regs + __VCPU_REGS_RDX * WORD_SIZE)
1416fdc1deSPaolo Bonzini#define VCPU_RBX	(SVM_vcpu_arch_regs + __VCPU_REGS_RBX * WORD_SIZE)
15199cd1d7SUros Bizjak/* Intentionally omit RSP as it's context switched by hardware */
1616fdc1deSPaolo Bonzini#define VCPU_RBP	(SVM_vcpu_arch_regs + __VCPU_REGS_RBP * WORD_SIZE)
1716fdc1deSPaolo Bonzini#define VCPU_RSI	(SVM_vcpu_arch_regs + __VCPU_REGS_RSI * WORD_SIZE)
1816fdc1deSPaolo Bonzini#define VCPU_RDI	(SVM_vcpu_arch_regs + __VCPU_REGS_RDI * WORD_SIZE)
19199cd1d7SUros Bizjak
20199cd1d7SUros Bizjak#ifdef CONFIG_X86_64
2116fdc1deSPaolo Bonzini#define VCPU_R8		(SVM_vcpu_arch_regs + __VCPU_REGS_R8  * WORD_SIZE)
2216fdc1deSPaolo Bonzini#define VCPU_R9		(SVM_vcpu_arch_regs + __VCPU_REGS_R9  * WORD_SIZE)
2316fdc1deSPaolo Bonzini#define VCPU_R10	(SVM_vcpu_arch_regs + __VCPU_REGS_R10 * WORD_SIZE)
2416fdc1deSPaolo Bonzini#define VCPU_R11	(SVM_vcpu_arch_regs + __VCPU_REGS_R11 * WORD_SIZE)
2516fdc1deSPaolo Bonzini#define VCPU_R12	(SVM_vcpu_arch_regs + __VCPU_REGS_R12 * WORD_SIZE)
2616fdc1deSPaolo Bonzini#define VCPU_R13	(SVM_vcpu_arch_regs + __VCPU_REGS_R13 * WORD_SIZE)
2716fdc1deSPaolo Bonzini#define VCPU_R14	(SVM_vcpu_arch_regs + __VCPU_REGS_R14 * WORD_SIZE)
2816fdc1deSPaolo Bonzini#define VCPU_R15	(SVM_vcpu_arch_regs + __VCPU_REGS_R15 * WORD_SIZE)
29199cd1d7SUros Bizjak#endif
30199cd1d7SUros Bizjak
31e61ab42dSPaolo Bonzini#define SVM_vmcb01_pa	(SVM_vmcb01 + KVM_VMCB_pa)
32e61ab42dSPaolo Bonzini
33135961e0SThomas Gleixner.section .noinstr.text, "ax"
34199cd1d7SUros Bizjak
35199cd1d7SUros Bizjak/**
36199cd1d7SUros Bizjak * __svm_vcpu_run - Run a vCPU via a transition to SVM guest mode
3716fdc1deSPaolo Bonzini * @svm:	struct vcpu_svm *
38199cd1d7SUros Bizjak */
39199cd1d7SUros BizjakSYM_FUNC_START(__svm_vcpu_run)
40199cd1d7SUros Bizjak	push %_ASM_BP
41199cd1d7SUros Bizjak#ifdef CONFIG_X86_64
42199cd1d7SUros Bizjak	push %r15
43199cd1d7SUros Bizjak	push %r14
44199cd1d7SUros Bizjak	push %r13
45199cd1d7SUros Bizjak	push %r12
46199cd1d7SUros Bizjak#else
47199cd1d7SUros Bizjak	push %edi
48199cd1d7SUros Bizjak	push %esi
49199cd1d7SUros Bizjak#endif
50199cd1d7SUros Bizjak	push %_ASM_BX
51199cd1d7SUros Bizjak
52*e287bd00SPaolo Bonzini	/*
53*e287bd00SPaolo Bonzini	 * Save variables needed after vmexit on the stack, in inverse
54*e287bd00SPaolo Bonzini	 * order compared to when they are needed.
55*e287bd00SPaolo Bonzini	 */
56*e287bd00SPaolo Bonzini
57*e287bd00SPaolo Bonzini	/* Needed to restore access to percpu variables.  */
58*e287bd00SPaolo Bonzini	__ASM_SIZE(push) PER_CPU_VAR(svm_data + SD_save_area_pa)
59*e287bd00SPaolo Bonzini
6016fdc1deSPaolo Bonzini	/* Save @svm. */
61199cd1d7SUros Bizjak	push %_ASM_ARG1
62199cd1d7SUros Bizjak
63f6d58266SPaolo Bonzini.ifnc _ASM_ARG1, _ASM_DI
64f7ef2801SPaolo Bonzini	/* Move @svm to RDI. */
65f6d58266SPaolo Bonzini	mov %_ASM_ARG1, %_ASM_DI
66f6d58266SPaolo Bonzini.endif
67199cd1d7SUros Bizjak
68e61ab42dSPaolo Bonzini	/*
69e61ab42dSPaolo Bonzini	 * Use a single vmcb (vmcb01 because it's always valid) for
70e61ab42dSPaolo Bonzini	 * context switching guest state via VMLOAD/VMSAVE, that way
71e61ab42dSPaolo Bonzini	 * the state doesn't need to be copied between vmcb01 and
72e61ab42dSPaolo Bonzini	 * vmcb02 when switching vmcbs for nested virtualization.
73e61ab42dSPaolo Bonzini	 */
74e61ab42dSPaolo Bonzini	mov SVM_vmcb01_pa(%_ASM_DI), %_ASM_AX
75e61ab42dSPaolo Bonzini1:	vmload %_ASM_AX
76e61ab42dSPaolo Bonzini2:
77e61ab42dSPaolo Bonzini
78f6d58266SPaolo Bonzini	/* Get svm->current_vmcb->pa into RAX. */
79f6d58266SPaolo Bonzini	mov SVM_current_vmcb(%_ASM_DI), %_ASM_AX
80f6d58266SPaolo Bonzini	mov KVM_VMCB_pa(%_ASM_AX), %_ASM_AX
81199cd1d7SUros Bizjak
82f7ef2801SPaolo Bonzini	/* Load guest registers. */
83f7ef2801SPaolo Bonzini	mov VCPU_RCX(%_ASM_DI), %_ASM_CX
84f7ef2801SPaolo Bonzini	mov VCPU_RDX(%_ASM_DI), %_ASM_DX
85f7ef2801SPaolo Bonzini	mov VCPU_RBX(%_ASM_DI), %_ASM_BX
86f7ef2801SPaolo Bonzini	mov VCPU_RBP(%_ASM_DI), %_ASM_BP
87f7ef2801SPaolo Bonzini	mov VCPU_RSI(%_ASM_DI), %_ASM_SI
88f7ef2801SPaolo Bonzini#ifdef CONFIG_X86_64
89f7ef2801SPaolo Bonzini	mov VCPU_R8 (%_ASM_DI),  %r8
90f7ef2801SPaolo Bonzini	mov VCPU_R9 (%_ASM_DI),  %r9
91f7ef2801SPaolo Bonzini	mov VCPU_R10(%_ASM_DI), %r10
92f7ef2801SPaolo Bonzini	mov VCPU_R11(%_ASM_DI), %r11
93f7ef2801SPaolo Bonzini	mov VCPU_R12(%_ASM_DI), %r12
94f7ef2801SPaolo Bonzini	mov VCPU_R13(%_ASM_DI), %r13
95f7ef2801SPaolo Bonzini	mov VCPU_R14(%_ASM_DI), %r14
96f7ef2801SPaolo Bonzini	mov VCPU_R15(%_ASM_DI), %r15
97f7ef2801SPaolo Bonzini#endif
98f7ef2801SPaolo Bonzini	mov VCPU_RDI(%_ASM_DI), %_ASM_DI
99f7ef2801SPaolo Bonzini
100199cd1d7SUros Bizjak	/* Enter guest mode */
101f14eec0aSPaolo Bonzini	sti
102199cd1d7SUros Bizjak
103e61ab42dSPaolo Bonzini3:	vmrun %_ASM_AX
104e61ab42dSPaolo Bonzini4:
105e61ab42dSPaolo Bonzini	cli
106199cd1d7SUros Bizjak
107e61ab42dSPaolo Bonzini	/* Pop @svm to RAX while it's the only available register. */
108199cd1d7SUros Bizjak	pop %_ASM_AX
109199cd1d7SUros Bizjak
110199cd1d7SUros Bizjak	/* Save all guest registers.  */
111199cd1d7SUros Bizjak	mov %_ASM_CX,   VCPU_RCX(%_ASM_AX)
112199cd1d7SUros Bizjak	mov %_ASM_DX,   VCPU_RDX(%_ASM_AX)
113199cd1d7SUros Bizjak	mov %_ASM_BX,   VCPU_RBX(%_ASM_AX)
114199cd1d7SUros Bizjak	mov %_ASM_BP,   VCPU_RBP(%_ASM_AX)
115199cd1d7SUros Bizjak	mov %_ASM_SI,   VCPU_RSI(%_ASM_AX)
116199cd1d7SUros Bizjak	mov %_ASM_DI,   VCPU_RDI(%_ASM_AX)
117199cd1d7SUros Bizjak#ifdef CONFIG_X86_64
118199cd1d7SUros Bizjak	mov %r8,  VCPU_R8 (%_ASM_AX)
119199cd1d7SUros Bizjak	mov %r9,  VCPU_R9 (%_ASM_AX)
120199cd1d7SUros Bizjak	mov %r10, VCPU_R10(%_ASM_AX)
121199cd1d7SUros Bizjak	mov %r11, VCPU_R11(%_ASM_AX)
122199cd1d7SUros Bizjak	mov %r12, VCPU_R12(%_ASM_AX)
123199cd1d7SUros Bizjak	mov %r13, VCPU_R13(%_ASM_AX)
124199cd1d7SUros Bizjak	mov %r14, VCPU_R14(%_ASM_AX)
125199cd1d7SUros Bizjak	mov %r15, VCPU_R15(%_ASM_AX)
126199cd1d7SUros Bizjak#endif
127199cd1d7SUros Bizjak
128e61ab42dSPaolo Bonzini	/* @svm can stay in RDI from now on.  */
129e61ab42dSPaolo Bonzini	mov %_ASM_AX, %_ASM_DI
130e61ab42dSPaolo Bonzini
131e61ab42dSPaolo Bonzini	mov SVM_vmcb01_pa(%_ASM_DI), %_ASM_AX
132e61ab42dSPaolo Bonzini5:	vmsave %_ASM_AX
133e61ab42dSPaolo Bonzini6:
134e61ab42dSPaolo Bonzini
135*e287bd00SPaolo Bonzini	/* Restores GSBASE among other things, allowing access to percpu data.  */
136*e287bd00SPaolo Bonzini	pop %_ASM_AX
137*e287bd00SPaolo Bonzini7:	vmload %_ASM_AX
138*e287bd00SPaolo Bonzini8:
139*e287bd00SPaolo Bonzini
140e61ab42dSPaolo Bonzini#ifdef CONFIG_RETPOLINE
141e61ab42dSPaolo Bonzini	/* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */
142e61ab42dSPaolo Bonzini	FILL_RETURN_BUFFER %_ASM_AX, RSB_CLEAR_LOOPS, X86_FEATURE_RETPOLINE
143e61ab42dSPaolo Bonzini#endif
144e61ab42dSPaolo Bonzini
145199cd1d7SUros Bizjak	/*
146a149180fSPeter Zijlstra	 * Mitigate RETBleed for AMD/Hygon Zen uarch. RET should be
147a149180fSPeter Zijlstra	 * untrained as soon as we exit the VM and are back to the
148a149180fSPeter Zijlstra	 * kernel. This should be done before re-enabling interrupts
149a149180fSPeter Zijlstra	 * because interrupt handlers won't sanitize 'ret' if the return is
150a149180fSPeter Zijlstra	 * from the kernel.
151a149180fSPeter Zijlstra	 */
152a149180fSPeter Zijlstra	UNTRAIN_RET
153a149180fSPeter Zijlstra
154a149180fSPeter Zijlstra	/*
155199cd1d7SUros Bizjak	 * Clear all general purpose registers except RSP and RAX to prevent
156199cd1d7SUros Bizjak	 * speculative use of the guest's values, even those that are reloaded
157199cd1d7SUros Bizjak	 * via the stack.  In theory, an L1 cache miss when restoring registers
158199cd1d7SUros Bizjak	 * could lead to speculative execution with the guest's values.
159199cd1d7SUros Bizjak	 * Zeroing XORs are dirt cheap, i.e. the extra paranoia is essentially
160199cd1d7SUros Bizjak	 * free.  RSP and RAX are exempt as they are restored by hardware
161199cd1d7SUros Bizjak	 * during VM-Exit.
162199cd1d7SUros Bizjak	 */
163199cd1d7SUros Bizjak	xor %ecx, %ecx
164199cd1d7SUros Bizjak	xor %edx, %edx
165199cd1d7SUros Bizjak	xor %ebx, %ebx
166199cd1d7SUros Bizjak	xor %ebp, %ebp
167199cd1d7SUros Bizjak	xor %esi, %esi
168199cd1d7SUros Bizjak	xor %edi, %edi
169199cd1d7SUros Bizjak#ifdef CONFIG_X86_64
170199cd1d7SUros Bizjak	xor %r8d,  %r8d
171199cd1d7SUros Bizjak	xor %r9d,  %r9d
172199cd1d7SUros Bizjak	xor %r10d, %r10d
173199cd1d7SUros Bizjak	xor %r11d, %r11d
174199cd1d7SUros Bizjak	xor %r12d, %r12d
175199cd1d7SUros Bizjak	xor %r13d, %r13d
176199cd1d7SUros Bizjak	xor %r14d, %r14d
177199cd1d7SUros Bizjak	xor %r15d, %r15d
178199cd1d7SUros Bizjak#endif
179199cd1d7SUros Bizjak
180199cd1d7SUros Bizjak	pop %_ASM_BX
181199cd1d7SUros Bizjak
182199cd1d7SUros Bizjak#ifdef CONFIG_X86_64
183199cd1d7SUros Bizjak	pop %r12
184199cd1d7SUros Bizjak	pop %r13
185199cd1d7SUros Bizjak	pop %r14
186199cd1d7SUros Bizjak	pop %r15
187199cd1d7SUros Bizjak#else
188199cd1d7SUros Bizjak	pop %esi
189199cd1d7SUros Bizjak	pop %edi
190199cd1d7SUros Bizjak#endif
191199cd1d7SUros Bizjak	pop %_ASM_BP
192f94909ceSPeter Zijlstra	RET
1937531b47cSUros Bizjak
194e61ab42dSPaolo Bonzini10:	cmpb $0, kvm_rebooting
1957531b47cSUros Bizjak	jne 2b
1967531b47cSUros Bizjak	ud2
197e61ab42dSPaolo Bonzini30:	cmpb $0, kvm_rebooting
198e61ab42dSPaolo Bonzini	jne 4b
199e61ab42dSPaolo Bonzini	ud2
200e61ab42dSPaolo Bonzini50:	cmpb $0, kvm_rebooting
201e61ab42dSPaolo Bonzini	jne 6b
202e61ab42dSPaolo Bonzini	ud2
203*e287bd00SPaolo Bonzini70:	cmpb $0, kvm_rebooting
204*e287bd00SPaolo Bonzini	jne 8b
205*e287bd00SPaolo Bonzini	ud2
2067531b47cSUros Bizjak
207e61ab42dSPaolo Bonzini	_ASM_EXTABLE(1b, 10b)
208e61ab42dSPaolo Bonzini	_ASM_EXTABLE(3b, 30b)
209e61ab42dSPaolo Bonzini	_ASM_EXTABLE(5b, 50b)
210*e287bd00SPaolo Bonzini	_ASM_EXTABLE(7b, 70b)
2117531b47cSUros Bizjak
212199cd1d7SUros BizjakSYM_FUNC_END(__svm_vcpu_run)
21316809ecdSTom Lendacky
21416809ecdSTom Lendacky/**
21516809ecdSTom Lendacky * __svm_sev_es_vcpu_run - Run a SEV-ES vCPU via a transition to SVM guest mode
216f6d58266SPaolo Bonzini * @svm:	struct vcpu_svm *
21716809ecdSTom Lendacky */
21816809ecdSTom LendackySYM_FUNC_START(__svm_sev_es_vcpu_run)
21916809ecdSTom Lendacky	push %_ASM_BP
22016809ecdSTom Lendacky#ifdef CONFIG_X86_64
22116809ecdSTom Lendacky	push %r15
22216809ecdSTom Lendacky	push %r14
22316809ecdSTom Lendacky	push %r13
22416809ecdSTom Lendacky	push %r12
22516809ecdSTom Lendacky#else
22616809ecdSTom Lendacky	push %edi
22716809ecdSTom Lendacky	push %esi
22816809ecdSTom Lendacky#endif
22916809ecdSTom Lendacky	push %_ASM_BX
23016809ecdSTom Lendacky
231f6d58266SPaolo Bonzini	/* Get svm->current_vmcb->pa into RAX. */
232f6d58266SPaolo Bonzini	mov SVM_current_vmcb(%_ASM_ARG1), %_ASM_AX
233f6d58266SPaolo Bonzini	mov KVM_VMCB_pa(%_ASM_AX), %_ASM_AX
2347531b47cSUros Bizjak
2357531b47cSUros Bizjak	/* Enter guest mode */
23616809ecdSTom Lendacky	sti
23716809ecdSTom Lendacky
23816809ecdSTom Lendacky1:	vmrun %_ASM_AX
23916809ecdSTom Lendacky
2407531b47cSUros Bizjak2:	cli
24116809ecdSTom Lendacky
24216809ecdSTom Lendacky#ifdef CONFIG_RETPOLINE
24316809ecdSTom Lendacky	/* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */
24416809ecdSTom Lendacky	FILL_RETURN_BUFFER %_ASM_AX, RSB_CLEAR_LOOPS, X86_FEATURE_RETPOLINE
24516809ecdSTom Lendacky#endif
24616809ecdSTom Lendacky
247a149180fSPeter Zijlstra	/*
248a149180fSPeter Zijlstra	 * Mitigate RETBleed for AMD/Hygon Zen uarch. RET should be
249a149180fSPeter Zijlstra	 * untrained as soon as we exit the VM and are back to the
250a149180fSPeter Zijlstra	 * kernel. This should be done before re-enabling interrupts
251a149180fSPeter Zijlstra	 * because interrupt handlers won't sanitize RET if the return is
252a149180fSPeter Zijlstra	 * from the kernel.
253a149180fSPeter Zijlstra	 */
254a149180fSPeter Zijlstra	UNTRAIN_RET
255a149180fSPeter Zijlstra
25616809ecdSTom Lendacky	pop %_ASM_BX
25716809ecdSTom Lendacky
25816809ecdSTom Lendacky#ifdef CONFIG_X86_64
25916809ecdSTom Lendacky	pop %r12
26016809ecdSTom Lendacky	pop %r13
26116809ecdSTom Lendacky	pop %r14
26216809ecdSTom Lendacky	pop %r15
26316809ecdSTom Lendacky#else
26416809ecdSTom Lendacky	pop %esi
26516809ecdSTom Lendacky	pop %edi
26616809ecdSTom Lendacky#endif
26716809ecdSTom Lendacky	pop %_ASM_BP
268f94909ceSPeter Zijlstra	RET
2697531b47cSUros Bizjak
2707531b47cSUros Bizjak3:	cmpb $0, kvm_rebooting
2717531b47cSUros Bizjak	jne 2b
2727531b47cSUros Bizjak	ud2
2737531b47cSUros Bizjak
2747531b47cSUros Bizjak	_ASM_EXTABLE(1b, 3b)
2757531b47cSUros Bizjak
27616809ecdSTom LendackySYM_FUNC_END(__svm_sev_es_vcpu_run)
277