xref: /linux/arch/x86/kvm/svm/vmenter.S (revision 16fdc1de169ee0a4e59a8c02244414ec7acd55c3)
1/* SPDX-License-Identifier: GPL-2.0 */
2#include <linux/linkage.h>
3#include <asm/asm.h>
4#include <asm/bitsperlong.h>
5#include <asm/kvm_vcpu_regs.h>
6#include <asm/nospec-branch.h>
7#include "kvm-asm-offsets.h"
8
9#define WORD_SIZE (BITS_PER_LONG / 8)
10
11/* Intentionally omit RAX as it's context switched by hardware */
12#define VCPU_RCX	(SVM_vcpu_arch_regs + __VCPU_REGS_RCX * WORD_SIZE)
13#define VCPU_RDX	(SVM_vcpu_arch_regs + __VCPU_REGS_RDX * WORD_SIZE)
14#define VCPU_RBX	(SVM_vcpu_arch_regs + __VCPU_REGS_RBX * WORD_SIZE)
15/* Intentionally omit RSP as it's context switched by hardware */
16#define VCPU_RBP	(SVM_vcpu_arch_regs + __VCPU_REGS_RBP * WORD_SIZE)
17#define VCPU_RSI	(SVM_vcpu_arch_regs + __VCPU_REGS_RSI * WORD_SIZE)
18#define VCPU_RDI	(SVM_vcpu_arch_regs + __VCPU_REGS_RDI * WORD_SIZE)
19
20#ifdef CONFIG_X86_64
21#define VCPU_R8		(SVM_vcpu_arch_regs + __VCPU_REGS_R8  * WORD_SIZE)
22#define VCPU_R9		(SVM_vcpu_arch_regs + __VCPU_REGS_R9  * WORD_SIZE)
23#define VCPU_R10	(SVM_vcpu_arch_regs + __VCPU_REGS_R10 * WORD_SIZE)
24#define VCPU_R11	(SVM_vcpu_arch_regs + __VCPU_REGS_R11 * WORD_SIZE)
25#define VCPU_R12	(SVM_vcpu_arch_regs + __VCPU_REGS_R12 * WORD_SIZE)
26#define VCPU_R13	(SVM_vcpu_arch_regs + __VCPU_REGS_R13 * WORD_SIZE)
27#define VCPU_R14	(SVM_vcpu_arch_regs + __VCPU_REGS_R14 * WORD_SIZE)
28#define VCPU_R15	(SVM_vcpu_arch_regs + __VCPU_REGS_R15 * WORD_SIZE)
29#endif
30
31.section .noinstr.text, "ax"
32
33/**
34 * __svm_vcpu_run - Run a vCPU via a transition to SVM guest mode
35 * @vmcb_pa:	unsigned long
36 * @svm:	struct vcpu_svm *
37 */
38SYM_FUNC_START(__svm_vcpu_run)
39	push %_ASM_BP
40#ifdef CONFIG_X86_64
41	push %r15
42	push %r14
43	push %r13
44	push %r12
45#else
46	push %edi
47	push %esi
48#endif
49	push %_ASM_BX
50
51	/* Save @svm. */
52	push %_ASM_ARG2
53
54	/* Save @vmcb. */
55	push %_ASM_ARG1
56
57	/* Move @svm to RAX. */
58	mov %_ASM_ARG2, %_ASM_AX
59
60	/* Load guest registers. */
61	mov VCPU_RCX(%_ASM_AX), %_ASM_CX
62	mov VCPU_RDX(%_ASM_AX), %_ASM_DX
63	mov VCPU_RBX(%_ASM_AX), %_ASM_BX
64	mov VCPU_RBP(%_ASM_AX), %_ASM_BP
65	mov VCPU_RSI(%_ASM_AX), %_ASM_SI
66	mov VCPU_RDI(%_ASM_AX), %_ASM_DI
67#ifdef CONFIG_X86_64
68	mov VCPU_R8 (%_ASM_AX),  %r8
69	mov VCPU_R9 (%_ASM_AX),  %r9
70	mov VCPU_R10(%_ASM_AX), %r10
71	mov VCPU_R11(%_ASM_AX), %r11
72	mov VCPU_R12(%_ASM_AX), %r12
73	mov VCPU_R13(%_ASM_AX), %r13
74	mov VCPU_R14(%_ASM_AX), %r14
75	mov VCPU_R15(%_ASM_AX), %r15
76#endif
77
78	/* "POP" @vmcb to RAX. */
79	pop %_ASM_AX
80
81	/* Enter guest mode */
82	sti
83
841:	vmrun %_ASM_AX
85
862:	cli
87
88#ifdef CONFIG_RETPOLINE
89	/* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */
90	FILL_RETURN_BUFFER %_ASM_AX, RSB_CLEAR_LOOPS, X86_FEATURE_RETPOLINE
91#endif
92
93	/* "POP" @svm to RAX. */
94	pop %_ASM_AX
95
96	/* Save all guest registers.  */
97	mov %_ASM_CX,   VCPU_RCX(%_ASM_AX)
98	mov %_ASM_DX,   VCPU_RDX(%_ASM_AX)
99	mov %_ASM_BX,   VCPU_RBX(%_ASM_AX)
100	mov %_ASM_BP,   VCPU_RBP(%_ASM_AX)
101	mov %_ASM_SI,   VCPU_RSI(%_ASM_AX)
102	mov %_ASM_DI,   VCPU_RDI(%_ASM_AX)
103#ifdef CONFIG_X86_64
104	mov %r8,  VCPU_R8 (%_ASM_AX)
105	mov %r9,  VCPU_R9 (%_ASM_AX)
106	mov %r10, VCPU_R10(%_ASM_AX)
107	mov %r11, VCPU_R11(%_ASM_AX)
108	mov %r12, VCPU_R12(%_ASM_AX)
109	mov %r13, VCPU_R13(%_ASM_AX)
110	mov %r14, VCPU_R14(%_ASM_AX)
111	mov %r15, VCPU_R15(%_ASM_AX)
112#endif
113
114	/*
115	 * Mitigate RETBleed for AMD/Hygon Zen uarch. RET should be
116	 * untrained as soon as we exit the VM and are back to the
117	 * kernel. This should be done before re-enabling interrupts
118	 * because interrupt handlers won't sanitize 'ret' if the return is
119	 * from the kernel.
120	 */
121	UNTRAIN_RET
122
123	/*
124	 * Clear all general purpose registers except RSP and RAX to prevent
125	 * speculative use of the guest's values, even those that are reloaded
126	 * via the stack.  In theory, an L1 cache miss when restoring registers
127	 * could lead to speculative execution with the guest's values.
128	 * Zeroing XORs are dirt cheap, i.e. the extra paranoia is essentially
129	 * free.  RSP and RAX are exempt as they are restored by hardware
130	 * during VM-Exit.
131	 */
132	xor %ecx, %ecx
133	xor %edx, %edx
134	xor %ebx, %ebx
135	xor %ebp, %ebp
136	xor %esi, %esi
137	xor %edi, %edi
138#ifdef CONFIG_X86_64
139	xor %r8d,  %r8d
140	xor %r9d,  %r9d
141	xor %r10d, %r10d
142	xor %r11d, %r11d
143	xor %r12d, %r12d
144	xor %r13d, %r13d
145	xor %r14d, %r14d
146	xor %r15d, %r15d
147#endif
148
149	pop %_ASM_BX
150
151#ifdef CONFIG_X86_64
152	pop %r12
153	pop %r13
154	pop %r14
155	pop %r15
156#else
157	pop %esi
158	pop %edi
159#endif
160	pop %_ASM_BP
161	RET
162
1633:	cmpb $0, kvm_rebooting
164	jne 2b
165	ud2
166
167	_ASM_EXTABLE(1b, 3b)
168
169SYM_FUNC_END(__svm_vcpu_run)
170
171/**
172 * __svm_sev_es_vcpu_run - Run a SEV-ES vCPU via a transition to SVM guest mode
173 * @vmcb_pa:	unsigned long
174 */
175SYM_FUNC_START(__svm_sev_es_vcpu_run)
176	push %_ASM_BP
177#ifdef CONFIG_X86_64
178	push %r15
179	push %r14
180	push %r13
181	push %r12
182#else
183	push %edi
184	push %esi
185#endif
186	push %_ASM_BX
187
188	/* Move @vmcb to RAX. */
189	mov %_ASM_ARG1, %_ASM_AX
190
191	/* Enter guest mode */
192	sti
193
1941:	vmrun %_ASM_AX
195
1962:	cli
197
198#ifdef CONFIG_RETPOLINE
199	/* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */
200	FILL_RETURN_BUFFER %_ASM_AX, RSB_CLEAR_LOOPS, X86_FEATURE_RETPOLINE
201#endif
202
203	/*
204	 * Mitigate RETBleed for AMD/Hygon Zen uarch. RET should be
205	 * untrained as soon as we exit the VM and are back to the
206	 * kernel. This should be done before re-enabling interrupts
207	 * because interrupt handlers won't sanitize RET if the return is
208	 * from the kernel.
209	 */
210	UNTRAIN_RET
211
212	pop %_ASM_BX
213
214#ifdef CONFIG_X86_64
215	pop %r12
216	pop %r13
217	pop %r14
218	pop %r15
219#else
220	pop %esi
221	pop %edi
222#endif
223	pop %_ASM_BP
224	RET
225
2263:	cmpb $0, kvm_rebooting
227	jne 2b
228	ud2
229
230	_ASM_EXTABLE(1b, 3b)
231
232SYM_FUNC_END(__svm_sev_es_vcpu_run)
233