xref: /linux/arch/x86/kvm/svm/vmenter.S (revision a149180fbcf336e97ce4eb2cdc13672727feb94d)
1199cd1d7SUros Bizjak/* SPDX-License-Identifier: GPL-2.0 */
2199cd1d7SUros Bizjak#include <linux/linkage.h>
3199cd1d7SUros Bizjak#include <asm/asm.h>
4199cd1d7SUros Bizjak#include <asm/bitsperlong.h>
5199cd1d7SUros Bizjak#include <asm/kvm_vcpu_regs.h>
6f14eec0aSPaolo Bonzini#include <asm/nospec-branch.h>
7199cd1d7SUros Bizjak
8199cd1d7SUros Bizjak#define WORD_SIZE (BITS_PER_LONG / 8)
9199cd1d7SUros Bizjak
10199cd1d7SUros Bizjak/* Intentionally omit RAX as it's context switched by hardware */
11199cd1d7SUros Bizjak#define VCPU_RCX	__VCPU_REGS_RCX * WORD_SIZE
12199cd1d7SUros Bizjak#define VCPU_RDX	__VCPU_REGS_RDX * WORD_SIZE
13199cd1d7SUros Bizjak#define VCPU_RBX	__VCPU_REGS_RBX * WORD_SIZE
14199cd1d7SUros Bizjak/* Intentionally omit RSP as it's context switched by hardware */
15199cd1d7SUros Bizjak#define VCPU_RBP	__VCPU_REGS_RBP * WORD_SIZE
16199cd1d7SUros Bizjak#define VCPU_RSI	__VCPU_REGS_RSI * WORD_SIZE
17199cd1d7SUros Bizjak#define VCPU_RDI	__VCPU_REGS_RDI * WORD_SIZE
18199cd1d7SUros Bizjak
19199cd1d7SUros Bizjak#ifdef CONFIG_X86_64
20199cd1d7SUros Bizjak#define VCPU_R8		__VCPU_REGS_R8  * WORD_SIZE
21199cd1d7SUros Bizjak#define VCPU_R9		__VCPU_REGS_R9  * WORD_SIZE
22199cd1d7SUros Bizjak#define VCPU_R10	__VCPU_REGS_R10 * WORD_SIZE
23199cd1d7SUros Bizjak#define VCPU_R11	__VCPU_REGS_R11 * WORD_SIZE
24199cd1d7SUros Bizjak#define VCPU_R12	__VCPU_REGS_R12 * WORD_SIZE
25199cd1d7SUros Bizjak#define VCPU_R13	__VCPU_REGS_R13 * WORD_SIZE
26199cd1d7SUros Bizjak#define VCPU_R14	__VCPU_REGS_R14 * WORD_SIZE
27199cd1d7SUros Bizjak#define VCPU_R15	__VCPU_REGS_R15 * WORD_SIZE
28199cd1d7SUros Bizjak#endif
29199cd1d7SUros Bizjak
30135961e0SThomas Gleixner.section .noinstr.text, "ax"
31199cd1d7SUros Bizjak
32199cd1d7SUros Bizjak/**
33199cd1d7SUros Bizjak * __svm_vcpu_run - Run a vCPU via a transition to SVM guest mode
34199cd1d7SUros Bizjak * @vmcb_pa:	unsigned long
35199cd1d7SUros Bizjak * @regs:	unsigned long * (to guest registers)
36199cd1d7SUros Bizjak */
37199cd1d7SUros BizjakSYM_FUNC_START(__svm_vcpu_run)
38199cd1d7SUros Bizjak	push %_ASM_BP
39199cd1d7SUros Bizjak#ifdef CONFIG_X86_64
40199cd1d7SUros Bizjak	push %r15
41199cd1d7SUros Bizjak	push %r14
42199cd1d7SUros Bizjak	push %r13
43199cd1d7SUros Bizjak	push %r12
44199cd1d7SUros Bizjak#else
45199cd1d7SUros Bizjak	push %edi
46199cd1d7SUros Bizjak	push %esi
47199cd1d7SUros Bizjak#endif
48199cd1d7SUros Bizjak	push %_ASM_BX
49199cd1d7SUros Bizjak
50199cd1d7SUros Bizjak	/* Save @regs. */
51199cd1d7SUros Bizjak	push %_ASM_ARG2
52199cd1d7SUros Bizjak
53199cd1d7SUros Bizjak	/* Save @vmcb. */
54199cd1d7SUros Bizjak	push %_ASM_ARG1
55199cd1d7SUros Bizjak
56199cd1d7SUros Bizjak	/* Move @regs to RAX. */
57199cd1d7SUros Bizjak	mov %_ASM_ARG2, %_ASM_AX
58199cd1d7SUros Bizjak
59199cd1d7SUros Bizjak	/* Load guest registers. */
60199cd1d7SUros Bizjak	mov VCPU_RCX(%_ASM_AX), %_ASM_CX
61199cd1d7SUros Bizjak	mov VCPU_RDX(%_ASM_AX), %_ASM_DX
62199cd1d7SUros Bizjak	mov VCPU_RBX(%_ASM_AX), %_ASM_BX
63199cd1d7SUros Bizjak	mov VCPU_RBP(%_ASM_AX), %_ASM_BP
64199cd1d7SUros Bizjak	mov VCPU_RSI(%_ASM_AX), %_ASM_SI
65199cd1d7SUros Bizjak	mov VCPU_RDI(%_ASM_AX), %_ASM_DI
66199cd1d7SUros Bizjak#ifdef CONFIG_X86_64
67199cd1d7SUros Bizjak	mov VCPU_R8 (%_ASM_AX),  %r8
68199cd1d7SUros Bizjak	mov VCPU_R9 (%_ASM_AX),  %r9
69199cd1d7SUros Bizjak	mov VCPU_R10(%_ASM_AX), %r10
70199cd1d7SUros Bizjak	mov VCPU_R11(%_ASM_AX), %r11
71199cd1d7SUros Bizjak	mov VCPU_R12(%_ASM_AX), %r12
72199cd1d7SUros Bizjak	mov VCPU_R13(%_ASM_AX), %r13
73199cd1d7SUros Bizjak	mov VCPU_R14(%_ASM_AX), %r14
74199cd1d7SUros Bizjak	mov VCPU_R15(%_ASM_AX), %r15
75199cd1d7SUros Bizjak#endif
76199cd1d7SUros Bizjak
77199cd1d7SUros Bizjak	/* "POP" @vmcb to RAX. */
78199cd1d7SUros Bizjak	pop %_ASM_AX
79199cd1d7SUros Bizjak
80199cd1d7SUros Bizjak	/* Enter guest mode */
81f14eec0aSPaolo Bonzini	sti
82199cd1d7SUros Bizjak
837531b47cSUros Bizjak1:	vmrun %_ASM_AX
84199cd1d7SUros Bizjak
857531b47cSUros Bizjak2:	cli
86f14eec0aSPaolo Bonzini
87f14eec0aSPaolo Bonzini#ifdef CONFIG_RETPOLINE
88f14eec0aSPaolo Bonzini	/* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */
89f14eec0aSPaolo Bonzini	FILL_RETURN_BUFFER %_ASM_AX, RSB_CLEAR_LOOPS, X86_FEATURE_RETPOLINE
90f14eec0aSPaolo Bonzini#endif
91f14eec0aSPaolo Bonzini
92199cd1d7SUros Bizjak	/* "POP" @regs to RAX. */
93199cd1d7SUros Bizjak	pop %_ASM_AX
94199cd1d7SUros Bizjak
95199cd1d7SUros Bizjak	/* Save all guest registers.  */
96199cd1d7SUros Bizjak	mov %_ASM_CX,   VCPU_RCX(%_ASM_AX)
97199cd1d7SUros Bizjak	mov %_ASM_DX,   VCPU_RDX(%_ASM_AX)
98199cd1d7SUros Bizjak	mov %_ASM_BX,   VCPU_RBX(%_ASM_AX)
99199cd1d7SUros Bizjak	mov %_ASM_BP,   VCPU_RBP(%_ASM_AX)
100199cd1d7SUros Bizjak	mov %_ASM_SI,   VCPU_RSI(%_ASM_AX)
101199cd1d7SUros Bizjak	mov %_ASM_DI,   VCPU_RDI(%_ASM_AX)
102199cd1d7SUros Bizjak#ifdef CONFIG_X86_64
103199cd1d7SUros Bizjak	mov %r8,  VCPU_R8 (%_ASM_AX)
104199cd1d7SUros Bizjak	mov %r9,  VCPU_R9 (%_ASM_AX)
105199cd1d7SUros Bizjak	mov %r10, VCPU_R10(%_ASM_AX)
106199cd1d7SUros Bizjak	mov %r11, VCPU_R11(%_ASM_AX)
107199cd1d7SUros Bizjak	mov %r12, VCPU_R12(%_ASM_AX)
108199cd1d7SUros Bizjak	mov %r13, VCPU_R13(%_ASM_AX)
109199cd1d7SUros Bizjak	mov %r14, VCPU_R14(%_ASM_AX)
110199cd1d7SUros Bizjak	mov %r15, VCPU_R15(%_ASM_AX)
111199cd1d7SUros Bizjak#endif
112199cd1d7SUros Bizjak
113199cd1d7SUros Bizjak	/*
114*a149180fSPeter Zijlstra	 * Mitigate RETBleed for AMD/Hygon Zen uarch. RET should be
115*a149180fSPeter Zijlstra	 * untrained as soon as we exit the VM and are back to the
116*a149180fSPeter Zijlstra	 * kernel. This should be done before re-enabling interrupts
117*a149180fSPeter Zijlstra	 * because interrupt handlers won't sanitize 'ret' if the return is
118*a149180fSPeter Zijlstra	 * from the kernel.
119*a149180fSPeter Zijlstra	 */
120*a149180fSPeter Zijlstra	UNTRAIN_RET
121*a149180fSPeter Zijlstra
122*a149180fSPeter Zijlstra	/*
123199cd1d7SUros Bizjak	 * Clear all general purpose registers except RSP and RAX to prevent
124199cd1d7SUros Bizjak	 * speculative use of the guest's values, even those that are reloaded
125199cd1d7SUros Bizjak	 * via the stack.  In theory, an L1 cache miss when restoring registers
126199cd1d7SUros Bizjak	 * could lead to speculative execution with the guest's values.
127199cd1d7SUros Bizjak	 * Zeroing XORs are dirt cheap, i.e. the extra paranoia is essentially
128199cd1d7SUros Bizjak	 * free.  RSP and RAX are exempt as they are restored by hardware
129199cd1d7SUros Bizjak	 * during VM-Exit.
130199cd1d7SUros Bizjak	 */
131199cd1d7SUros Bizjak	xor %ecx, %ecx
132199cd1d7SUros Bizjak	xor %edx, %edx
133199cd1d7SUros Bizjak	xor %ebx, %ebx
134199cd1d7SUros Bizjak	xor %ebp, %ebp
135199cd1d7SUros Bizjak	xor %esi, %esi
136199cd1d7SUros Bizjak	xor %edi, %edi
137199cd1d7SUros Bizjak#ifdef CONFIG_X86_64
138199cd1d7SUros Bizjak	xor %r8d,  %r8d
139199cd1d7SUros Bizjak	xor %r9d,  %r9d
140199cd1d7SUros Bizjak	xor %r10d, %r10d
141199cd1d7SUros Bizjak	xor %r11d, %r11d
142199cd1d7SUros Bizjak	xor %r12d, %r12d
143199cd1d7SUros Bizjak	xor %r13d, %r13d
144199cd1d7SUros Bizjak	xor %r14d, %r14d
145199cd1d7SUros Bizjak	xor %r15d, %r15d
146199cd1d7SUros Bizjak#endif
147199cd1d7SUros Bizjak
148199cd1d7SUros Bizjak	pop %_ASM_BX
149199cd1d7SUros Bizjak
150199cd1d7SUros Bizjak#ifdef CONFIG_X86_64
151199cd1d7SUros Bizjak	pop %r12
152199cd1d7SUros Bizjak	pop %r13
153199cd1d7SUros Bizjak	pop %r14
154199cd1d7SUros Bizjak	pop %r15
155199cd1d7SUros Bizjak#else
156199cd1d7SUros Bizjak	pop %esi
157199cd1d7SUros Bizjak	pop %edi
158199cd1d7SUros Bizjak#endif
159199cd1d7SUros Bizjak	pop %_ASM_BP
160f94909ceSPeter Zijlstra	RET
1617531b47cSUros Bizjak
1627531b47cSUros Bizjak3:	cmpb $0, kvm_rebooting
1637531b47cSUros Bizjak	jne 2b
1647531b47cSUros Bizjak	ud2
1657531b47cSUros Bizjak
1667531b47cSUros Bizjak	_ASM_EXTABLE(1b, 3b)
1677531b47cSUros Bizjak
168199cd1d7SUros BizjakSYM_FUNC_END(__svm_vcpu_run)
16916809ecdSTom Lendacky
17016809ecdSTom Lendacky/**
17116809ecdSTom Lendacky * __svm_sev_es_vcpu_run - Run a SEV-ES vCPU via a transition to SVM guest mode
17216809ecdSTom Lendacky * @vmcb_pa:	unsigned long
17316809ecdSTom Lendacky */
17416809ecdSTom LendackySYM_FUNC_START(__svm_sev_es_vcpu_run)
17516809ecdSTom Lendacky	push %_ASM_BP
17616809ecdSTom Lendacky#ifdef CONFIG_X86_64
17716809ecdSTom Lendacky	push %r15
17816809ecdSTom Lendacky	push %r14
17916809ecdSTom Lendacky	push %r13
18016809ecdSTom Lendacky	push %r12
18116809ecdSTom Lendacky#else
18216809ecdSTom Lendacky	push %edi
18316809ecdSTom Lendacky	push %esi
18416809ecdSTom Lendacky#endif
18516809ecdSTom Lendacky	push %_ASM_BX
18616809ecdSTom Lendacky
1877531b47cSUros Bizjak	/* Move @vmcb to RAX. */
18816809ecdSTom Lendacky	mov %_ASM_ARG1, %_ASM_AX
1897531b47cSUros Bizjak
1907531b47cSUros Bizjak	/* Enter guest mode */
19116809ecdSTom Lendacky	sti
19216809ecdSTom Lendacky
19316809ecdSTom Lendacky1:	vmrun %_ASM_AX
19416809ecdSTom Lendacky
1957531b47cSUros Bizjak2:	cli
19616809ecdSTom Lendacky
19716809ecdSTom Lendacky#ifdef CONFIG_RETPOLINE
19816809ecdSTom Lendacky	/* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */
19916809ecdSTom Lendacky	FILL_RETURN_BUFFER %_ASM_AX, RSB_CLEAR_LOOPS, X86_FEATURE_RETPOLINE
20016809ecdSTom Lendacky#endif
20116809ecdSTom Lendacky
202*a149180fSPeter Zijlstra	/*
203*a149180fSPeter Zijlstra	 * Mitigate RETBleed for AMD/Hygon Zen uarch. RET should be
204*a149180fSPeter Zijlstra	 * untrained as soon as we exit the VM and are back to the
205*a149180fSPeter Zijlstra	 * kernel. This should be done before re-enabling interrupts
206*a149180fSPeter Zijlstra	 * because interrupt handlers won't sanitize RET if the return is
207*a149180fSPeter Zijlstra	 * from the kernel.
208*a149180fSPeter Zijlstra	 */
209*a149180fSPeter Zijlstra	UNTRAIN_RET
210*a149180fSPeter Zijlstra
21116809ecdSTom Lendacky	pop %_ASM_BX
21216809ecdSTom Lendacky
21316809ecdSTom Lendacky#ifdef CONFIG_X86_64
21416809ecdSTom Lendacky	pop %r12
21516809ecdSTom Lendacky	pop %r13
21616809ecdSTom Lendacky	pop %r14
21716809ecdSTom Lendacky	pop %r15
21816809ecdSTom Lendacky#else
21916809ecdSTom Lendacky	pop %esi
22016809ecdSTom Lendacky	pop %edi
22116809ecdSTom Lendacky#endif
22216809ecdSTom Lendacky	pop %_ASM_BP
223f94909ceSPeter Zijlstra	RET
2247531b47cSUros Bizjak
2257531b47cSUros Bizjak3:	cmpb $0, kvm_rebooting
2267531b47cSUros Bizjak	jne 2b
2277531b47cSUros Bizjak	ud2
2287531b47cSUros Bizjak
2297531b47cSUros Bizjak	_ASM_EXTABLE(1b, 3b)
2307531b47cSUros Bizjak
23116809ecdSTom LendackySYM_FUNC_END(__svm_sev_es_vcpu_run)
232