xref: /linux/arch/x86/kvm/svm/vmenter.S (revision f7ef280132f9bf6f82acf5aa5c3c837206eef501)
1199cd1d7SUros Bizjak/* SPDX-License-Identifier: GPL-2.0 */
2199cd1d7SUros Bizjak#include <linux/linkage.h>
3199cd1d7SUros Bizjak#include <asm/asm.h>
4199cd1d7SUros Bizjak#include <asm/bitsperlong.h>
5199cd1d7SUros Bizjak#include <asm/kvm_vcpu_regs.h>
6f14eec0aSPaolo Bonzini#include <asm/nospec-branch.h>
716fdc1deSPaolo Bonzini#include "kvm-asm-offsets.h"
8199cd1d7SUros Bizjak
9199cd1d7SUros Bizjak#define WORD_SIZE (BITS_PER_LONG / 8)
10199cd1d7SUros Bizjak
11199cd1d7SUros Bizjak/* Intentionally omit RAX as it's context switched by hardware */
1216fdc1deSPaolo Bonzini#define VCPU_RCX	(SVM_vcpu_arch_regs + __VCPU_REGS_RCX * WORD_SIZE)
1316fdc1deSPaolo Bonzini#define VCPU_RDX	(SVM_vcpu_arch_regs + __VCPU_REGS_RDX * WORD_SIZE)
1416fdc1deSPaolo Bonzini#define VCPU_RBX	(SVM_vcpu_arch_regs + __VCPU_REGS_RBX * WORD_SIZE)
15199cd1d7SUros Bizjak/* Intentionally omit RSP as it's context switched by hardware */
1616fdc1deSPaolo Bonzini#define VCPU_RBP	(SVM_vcpu_arch_regs + __VCPU_REGS_RBP * WORD_SIZE)
1716fdc1deSPaolo Bonzini#define VCPU_RSI	(SVM_vcpu_arch_regs + __VCPU_REGS_RSI * WORD_SIZE)
1816fdc1deSPaolo Bonzini#define VCPU_RDI	(SVM_vcpu_arch_regs + __VCPU_REGS_RDI * WORD_SIZE)
19199cd1d7SUros Bizjak
20199cd1d7SUros Bizjak#ifdef CONFIG_X86_64
2116fdc1deSPaolo Bonzini#define VCPU_R8		(SVM_vcpu_arch_regs + __VCPU_REGS_R8  * WORD_SIZE)
2216fdc1deSPaolo Bonzini#define VCPU_R9		(SVM_vcpu_arch_regs + __VCPU_REGS_R9  * WORD_SIZE)
2316fdc1deSPaolo Bonzini#define VCPU_R10	(SVM_vcpu_arch_regs + __VCPU_REGS_R10 * WORD_SIZE)
2416fdc1deSPaolo Bonzini#define VCPU_R11	(SVM_vcpu_arch_regs + __VCPU_REGS_R11 * WORD_SIZE)
2516fdc1deSPaolo Bonzini#define VCPU_R12	(SVM_vcpu_arch_regs + __VCPU_REGS_R12 * WORD_SIZE)
2616fdc1deSPaolo Bonzini#define VCPU_R13	(SVM_vcpu_arch_regs + __VCPU_REGS_R13 * WORD_SIZE)
2716fdc1deSPaolo Bonzini#define VCPU_R14	(SVM_vcpu_arch_regs + __VCPU_REGS_R14 * WORD_SIZE)
2816fdc1deSPaolo Bonzini#define VCPU_R15	(SVM_vcpu_arch_regs + __VCPU_REGS_R15 * WORD_SIZE)
29199cd1d7SUros Bizjak#endif
30199cd1d7SUros Bizjak
31135961e0SThomas Gleixner.section .noinstr.text, "ax"
32199cd1d7SUros Bizjak
33199cd1d7SUros Bizjak/**
34199cd1d7SUros Bizjak * __svm_vcpu_run - Run a vCPU via a transition to SVM guest mode
35199cd1d7SUros Bizjak * @vmcb_pa:	unsigned long
3616fdc1deSPaolo Bonzini * @svm:	struct vcpu_svm *
37199cd1d7SUros Bizjak */
38199cd1d7SUros BizjakSYM_FUNC_START(__svm_vcpu_run)
39199cd1d7SUros Bizjak	push %_ASM_BP
40199cd1d7SUros Bizjak#ifdef CONFIG_X86_64
41199cd1d7SUros Bizjak	push %r15
42199cd1d7SUros Bizjak	push %r14
43199cd1d7SUros Bizjak	push %r13
44199cd1d7SUros Bizjak	push %r12
45199cd1d7SUros Bizjak#else
46199cd1d7SUros Bizjak	push %edi
47199cd1d7SUros Bizjak	push %esi
48199cd1d7SUros Bizjak#endif
49199cd1d7SUros Bizjak	push %_ASM_BX
50199cd1d7SUros Bizjak
5116fdc1deSPaolo Bonzini	/* Save @svm. */
52199cd1d7SUros Bizjak	push %_ASM_ARG2
53199cd1d7SUros Bizjak
54199cd1d7SUros Bizjak	/* Save @vmcb. */
55199cd1d7SUros Bizjak	push %_ASM_ARG1
56199cd1d7SUros Bizjak
57*f7ef2801SPaolo Bonzini	/* Move @svm to RDI. */
58*f7ef2801SPaolo Bonzini	mov %_ASM_ARG2, %_ASM_DI
59199cd1d7SUros Bizjak
60199cd1d7SUros Bizjak	/* "POP" @vmcb to RAX. */
61199cd1d7SUros Bizjak	pop %_ASM_AX
62199cd1d7SUros Bizjak
63*f7ef2801SPaolo Bonzini	/* Load guest registers. */
64*f7ef2801SPaolo Bonzini	mov VCPU_RCX(%_ASM_DI), %_ASM_CX
65*f7ef2801SPaolo Bonzini	mov VCPU_RDX(%_ASM_DI), %_ASM_DX
66*f7ef2801SPaolo Bonzini	mov VCPU_RBX(%_ASM_DI), %_ASM_BX
67*f7ef2801SPaolo Bonzini	mov VCPU_RBP(%_ASM_DI), %_ASM_BP
68*f7ef2801SPaolo Bonzini	mov VCPU_RSI(%_ASM_DI), %_ASM_SI
69*f7ef2801SPaolo Bonzini#ifdef CONFIG_X86_64
70*f7ef2801SPaolo Bonzini	mov VCPU_R8 (%_ASM_DI),  %r8
71*f7ef2801SPaolo Bonzini	mov VCPU_R9 (%_ASM_DI),  %r9
72*f7ef2801SPaolo Bonzini	mov VCPU_R10(%_ASM_DI), %r10
73*f7ef2801SPaolo Bonzini	mov VCPU_R11(%_ASM_DI), %r11
74*f7ef2801SPaolo Bonzini	mov VCPU_R12(%_ASM_DI), %r12
75*f7ef2801SPaolo Bonzini	mov VCPU_R13(%_ASM_DI), %r13
76*f7ef2801SPaolo Bonzini	mov VCPU_R14(%_ASM_DI), %r14
77*f7ef2801SPaolo Bonzini	mov VCPU_R15(%_ASM_DI), %r15
78*f7ef2801SPaolo Bonzini#endif
79*f7ef2801SPaolo Bonzini	mov VCPU_RDI(%_ASM_DI), %_ASM_DI
80*f7ef2801SPaolo Bonzini
81199cd1d7SUros Bizjak	/* Enter guest mode */
82f14eec0aSPaolo Bonzini	sti
83199cd1d7SUros Bizjak
847531b47cSUros Bizjak1:	vmrun %_ASM_AX
85199cd1d7SUros Bizjak
867531b47cSUros Bizjak2:	cli
87f14eec0aSPaolo Bonzini
88f14eec0aSPaolo Bonzini#ifdef CONFIG_RETPOLINE
89f14eec0aSPaolo Bonzini	/* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */
90f14eec0aSPaolo Bonzini	FILL_RETURN_BUFFER %_ASM_AX, RSB_CLEAR_LOOPS, X86_FEATURE_RETPOLINE
91f14eec0aSPaolo Bonzini#endif
92f14eec0aSPaolo Bonzini
9316fdc1deSPaolo Bonzini	/* "POP" @svm to RAX. */
94199cd1d7SUros Bizjak	pop %_ASM_AX
95199cd1d7SUros Bizjak
96199cd1d7SUros Bizjak	/* Save all guest registers.  */
97199cd1d7SUros Bizjak	mov %_ASM_CX,   VCPU_RCX(%_ASM_AX)
98199cd1d7SUros Bizjak	mov %_ASM_DX,   VCPU_RDX(%_ASM_AX)
99199cd1d7SUros Bizjak	mov %_ASM_BX,   VCPU_RBX(%_ASM_AX)
100199cd1d7SUros Bizjak	mov %_ASM_BP,   VCPU_RBP(%_ASM_AX)
101199cd1d7SUros Bizjak	mov %_ASM_SI,   VCPU_RSI(%_ASM_AX)
102199cd1d7SUros Bizjak	mov %_ASM_DI,   VCPU_RDI(%_ASM_AX)
103199cd1d7SUros Bizjak#ifdef CONFIG_X86_64
104199cd1d7SUros Bizjak	mov %r8,  VCPU_R8 (%_ASM_AX)
105199cd1d7SUros Bizjak	mov %r9,  VCPU_R9 (%_ASM_AX)
106199cd1d7SUros Bizjak	mov %r10, VCPU_R10(%_ASM_AX)
107199cd1d7SUros Bizjak	mov %r11, VCPU_R11(%_ASM_AX)
108199cd1d7SUros Bizjak	mov %r12, VCPU_R12(%_ASM_AX)
109199cd1d7SUros Bizjak	mov %r13, VCPU_R13(%_ASM_AX)
110199cd1d7SUros Bizjak	mov %r14, VCPU_R14(%_ASM_AX)
111199cd1d7SUros Bizjak	mov %r15, VCPU_R15(%_ASM_AX)
112199cd1d7SUros Bizjak#endif
113199cd1d7SUros Bizjak
114199cd1d7SUros Bizjak	/*
115a149180fSPeter Zijlstra	 * Mitigate RETBleed for AMD/Hygon Zen uarch. RET should be
116a149180fSPeter Zijlstra	 * untrained as soon as we exit the VM and are back to the
117a149180fSPeter Zijlstra	 * kernel. This should be done before re-enabling interrupts
118a149180fSPeter Zijlstra	 * because interrupt handlers won't sanitize 'ret' if the return is
119a149180fSPeter Zijlstra	 * from the kernel.
120a149180fSPeter Zijlstra	 */
121a149180fSPeter Zijlstra	UNTRAIN_RET
122a149180fSPeter Zijlstra
123a149180fSPeter Zijlstra	/*
124199cd1d7SUros Bizjak	 * Clear all general purpose registers except RSP and RAX to prevent
125199cd1d7SUros Bizjak	 * speculative use of the guest's values, even those that are reloaded
126199cd1d7SUros Bizjak	 * via the stack.  In theory, an L1 cache miss when restoring registers
127199cd1d7SUros Bizjak	 * could lead to speculative execution with the guest's values.
128199cd1d7SUros Bizjak	 * Zeroing XORs are dirt cheap, i.e. the extra paranoia is essentially
129199cd1d7SUros Bizjak	 * free.  RSP and RAX are exempt as they are restored by hardware
130199cd1d7SUros Bizjak	 * during VM-Exit.
131199cd1d7SUros Bizjak	 */
132199cd1d7SUros Bizjak	xor %ecx, %ecx
133199cd1d7SUros Bizjak	xor %edx, %edx
134199cd1d7SUros Bizjak	xor %ebx, %ebx
135199cd1d7SUros Bizjak	xor %ebp, %ebp
136199cd1d7SUros Bizjak	xor %esi, %esi
137199cd1d7SUros Bizjak	xor %edi, %edi
138199cd1d7SUros Bizjak#ifdef CONFIG_X86_64
139199cd1d7SUros Bizjak	xor %r8d,  %r8d
140199cd1d7SUros Bizjak	xor %r9d,  %r9d
141199cd1d7SUros Bizjak	xor %r10d, %r10d
142199cd1d7SUros Bizjak	xor %r11d, %r11d
143199cd1d7SUros Bizjak	xor %r12d, %r12d
144199cd1d7SUros Bizjak	xor %r13d, %r13d
145199cd1d7SUros Bizjak	xor %r14d, %r14d
146199cd1d7SUros Bizjak	xor %r15d, %r15d
147199cd1d7SUros Bizjak#endif
148199cd1d7SUros Bizjak
149199cd1d7SUros Bizjak	pop %_ASM_BX
150199cd1d7SUros Bizjak
151199cd1d7SUros Bizjak#ifdef CONFIG_X86_64
152199cd1d7SUros Bizjak	pop %r12
153199cd1d7SUros Bizjak	pop %r13
154199cd1d7SUros Bizjak	pop %r14
155199cd1d7SUros Bizjak	pop %r15
156199cd1d7SUros Bizjak#else
157199cd1d7SUros Bizjak	pop %esi
158199cd1d7SUros Bizjak	pop %edi
159199cd1d7SUros Bizjak#endif
160199cd1d7SUros Bizjak	pop %_ASM_BP
161f94909ceSPeter Zijlstra	RET
1627531b47cSUros Bizjak
1637531b47cSUros Bizjak3:	cmpb $0, kvm_rebooting
1647531b47cSUros Bizjak	jne 2b
1657531b47cSUros Bizjak	ud2
1667531b47cSUros Bizjak
1677531b47cSUros Bizjak	_ASM_EXTABLE(1b, 3b)
1687531b47cSUros Bizjak
169199cd1d7SUros BizjakSYM_FUNC_END(__svm_vcpu_run)
17016809ecdSTom Lendacky
17116809ecdSTom Lendacky/**
17216809ecdSTom Lendacky * __svm_sev_es_vcpu_run - Run a SEV-ES vCPU via a transition to SVM guest mode
17316809ecdSTom Lendacky * @vmcb_pa:	unsigned long
17416809ecdSTom Lendacky */
17516809ecdSTom LendackySYM_FUNC_START(__svm_sev_es_vcpu_run)
17616809ecdSTom Lendacky	push %_ASM_BP
17716809ecdSTom Lendacky#ifdef CONFIG_X86_64
17816809ecdSTom Lendacky	push %r15
17916809ecdSTom Lendacky	push %r14
18016809ecdSTom Lendacky	push %r13
18116809ecdSTom Lendacky	push %r12
18216809ecdSTom Lendacky#else
18316809ecdSTom Lendacky	push %edi
18416809ecdSTom Lendacky	push %esi
18516809ecdSTom Lendacky#endif
18616809ecdSTom Lendacky	push %_ASM_BX
18716809ecdSTom Lendacky
1887531b47cSUros Bizjak	/* Move @vmcb to RAX. */
18916809ecdSTom Lendacky	mov %_ASM_ARG1, %_ASM_AX
1907531b47cSUros Bizjak
1917531b47cSUros Bizjak	/* Enter guest mode */
19216809ecdSTom Lendacky	sti
19316809ecdSTom Lendacky
19416809ecdSTom Lendacky1:	vmrun %_ASM_AX
19516809ecdSTom Lendacky
1967531b47cSUros Bizjak2:	cli
19716809ecdSTom Lendacky
19816809ecdSTom Lendacky#ifdef CONFIG_RETPOLINE
19916809ecdSTom Lendacky	/* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */
20016809ecdSTom Lendacky	FILL_RETURN_BUFFER %_ASM_AX, RSB_CLEAR_LOOPS, X86_FEATURE_RETPOLINE
20116809ecdSTom Lendacky#endif
20216809ecdSTom Lendacky
203a149180fSPeter Zijlstra	/*
204a149180fSPeter Zijlstra	 * Mitigate RETBleed for AMD/Hygon Zen uarch. RET should be
205a149180fSPeter Zijlstra	 * untrained as soon as we exit the VM and are back to the
206a149180fSPeter Zijlstra	 * kernel. This should be done before re-enabling interrupts
207a149180fSPeter Zijlstra	 * because interrupt handlers won't sanitize RET if the return is
208a149180fSPeter Zijlstra	 * from the kernel.
209a149180fSPeter Zijlstra	 */
210a149180fSPeter Zijlstra	UNTRAIN_RET
211a149180fSPeter Zijlstra
21216809ecdSTom Lendacky	pop %_ASM_BX
21316809ecdSTom Lendacky
21416809ecdSTom Lendacky#ifdef CONFIG_X86_64
21516809ecdSTom Lendacky	pop %r12
21616809ecdSTom Lendacky	pop %r13
21716809ecdSTom Lendacky	pop %r14
21816809ecdSTom Lendacky	pop %r15
21916809ecdSTom Lendacky#else
22016809ecdSTom Lendacky	pop %esi
22116809ecdSTom Lendacky	pop %edi
22216809ecdSTom Lendacky#endif
22316809ecdSTom Lendacky	pop %_ASM_BP
224f94909ceSPeter Zijlstra	RET
2257531b47cSUros Bizjak
2267531b47cSUros Bizjak3:	cmpb $0, kvm_rebooting
2277531b47cSUros Bizjak	jne 2b
2287531b47cSUros Bizjak	ud2
2297531b47cSUros Bizjak
2307531b47cSUros Bizjak	_ASM_EXTABLE(1b, 3b)
2317531b47cSUros Bizjak
23216809ecdSTom LendackySYM_FUNC_END(__svm_sev_es_vcpu_run)
233