xref: /linux/arch/x86/kvm/svm/vmenter.S (revision e61ab42de874c5af8c5d98b327c77a374d9e7da1)
1/* SPDX-License-Identifier: GPL-2.0 */
2#include <linux/linkage.h>
3#include <asm/asm.h>
4#include <asm/bitsperlong.h>
5#include <asm/kvm_vcpu_regs.h>
6#include <asm/nospec-branch.h>
7#include "kvm-asm-offsets.h"
8
9#define WORD_SIZE (BITS_PER_LONG / 8)
10
11/* Intentionally omit RAX as it's context switched by hardware */
12#define VCPU_RCX	(SVM_vcpu_arch_regs + __VCPU_REGS_RCX * WORD_SIZE)
13#define VCPU_RDX	(SVM_vcpu_arch_regs + __VCPU_REGS_RDX * WORD_SIZE)
14#define VCPU_RBX	(SVM_vcpu_arch_regs + __VCPU_REGS_RBX * WORD_SIZE)
15/* Intentionally omit RSP as it's context switched by hardware */
16#define VCPU_RBP	(SVM_vcpu_arch_regs + __VCPU_REGS_RBP * WORD_SIZE)
17#define VCPU_RSI	(SVM_vcpu_arch_regs + __VCPU_REGS_RSI * WORD_SIZE)
18#define VCPU_RDI	(SVM_vcpu_arch_regs + __VCPU_REGS_RDI * WORD_SIZE)
19
20#ifdef CONFIG_X86_64
21#define VCPU_R8		(SVM_vcpu_arch_regs + __VCPU_REGS_R8  * WORD_SIZE)
22#define VCPU_R9		(SVM_vcpu_arch_regs + __VCPU_REGS_R9  * WORD_SIZE)
23#define VCPU_R10	(SVM_vcpu_arch_regs + __VCPU_REGS_R10 * WORD_SIZE)
24#define VCPU_R11	(SVM_vcpu_arch_regs + __VCPU_REGS_R11 * WORD_SIZE)
25#define VCPU_R12	(SVM_vcpu_arch_regs + __VCPU_REGS_R12 * WORD_SIZE)
26#define VCPU_R13	(SVM_vcpu_arch_regs + __VCPU_REGS_R13 * WORD_SIZE)
27#define VCPU_R14	(SVM_vcpu_arch_regs + __VCPU_REGS_R14 * WORD_SIZE)
28#define VCPU_R15	(SVM_vcpu_arch_regs + __VCPU_REGS_R15 * WORD_SIZE)
29#endif
30
31#define SVM_vmcb01_pa	(SVM_vmcb01 + KVM_VMCB_pa)
32
33.section .noinstr.text, "ax"
34
35/**
36 * __svm_vcpu_run - Run a vCPU via a transition to SVM guest mode
37 * @svm:	struct vcpu_svm *
38 */
39SYM_FUNC_START(__svm_vcpu_run)
40	push %_ASM_BP
41#ifdef CONFIG_X86_64
42	push %r15
43	push %r14
44	push %r13
45	push %r12
46#else
47	push %edi
48	push %esi
49#endif
50	push %_ASM_BX
51
52	/* Save @svm. */
53	push %_ASM_ARG1
54
55.ifnc _ASM_ARG1, _ASM_DI
56	/* Move @svm to RDI. */
57	mov %_ASM_ARG1, %_ASM_DI
58.endif
59
60	/*
61	 * Use a single vmcb (vmcb01 because it's always valid) for
62	 * context switching guest state via VMLOAD/VMSAVE, that way
63	 * the state doesn't need to be copied between vmcb01 and
64	 * vmcb02 when switching vmcbs for nested virtualization.
65	 */
66	mov SVM_vmcb01_pa(%_ASM_DI), %_ASM_AX
671:	vmload %_ASM_AX
682:
69
70	/* Get svm->current_vmcb->pa into RAX. */
71	mov SVM_current_vmcb(%_ASM_DI), %_ASM_AX
72	mov KVM_VMCB_pa(%_ASM_AX), %_ASM_AX
73
74	/* Load guest registers. */
75	mov VCPU_RCX(%_ASM_DI), %_ASM_CX
76	mov VCPU_RDX(%_ASM_DI), %_ASM_DX
77	mov VCPU_RBX(%_ASM_DI), %_ASM_BX
78	mov VCPU_RBP(%_ASM_DI), %_ASM_BP
79	mov VCPU_RSI(%_ASM_DI), %_ASM_SI
80#ifdef CONFIG_X86_64
81	mov VCPU_R8 (%_ASM_DI),  %r8
82	mov VCPU_R9 (%_ASM_DI),  %r9
83	mov VCPU_R10(%_ASM_DI), %r10
84	mov VCPU_R11(%_ASM_DI), %r11
85	mov VCPU_R12(%_ASM_DI), %r12
86	mov VCPU_R13(%_ASM_DI), %r13
87	mov VCPU_R14(%_ASM_DI), %r14
88	mov VCPU_R15(%_ASM_DI), %r15
89#endif
90	mov VCPU_RDI(%_ASM_DI), %_ASM_DI
91
92	/* Enter guest mode */
93	sti
94
953:	vmrun %_ASM_AX
964:
97	cli
98
99	/* Pop @svm to RAX while it's the only available register. */
100	pop %_ASM_AX
101
102	/* Save all guest registers.  */
103	mov %_ASM_CX,   VCPU_RCX(%_ASM_AX)
104	mov %_ASM_DX,   VCPU_RDX(%_ASM_AX)
105	mov %_ASM_BX,   VCPU_RBX(%_ASM_AX)
106	mov %_ASM_BP,   VCPU_RBP(%_ASM_AX)
107	mov %_ASM_SI,   VCPU_RSI(%_ASM_AX)
108	mov %_ASM_DI,   VCPU_RDI(%_ASM_AX)
109#ifdef CONFIG_X86_64
110	mov %r8,  VCPU_R8 (%_ASM_AX)
111	mov %r9,  VCPU_R9 (%_ASM_AX)
112	mov %r10, VCPU_R10(%_ASM_AX)
113	mov %r11, VCPU_R11(%_ASM_AX)
114	mov %r12, VCPU_R12(%_ASM_AX)
115	mov %r13, VCPU_R13(%_ASM_AX)
116	mov %r14, VCPU_R14(%_ASM_AX)
117	mov %r15, VCPU_R15(%_ASM_AX)
118#endif
119
120	/* @svm can stay in RDI from now on.  */
121	mov %_ASM_AX, %_ASM_DI
122
123	mov SVM_vmcb01_pa(%_ASM_DI), %_ASM_AX
1245:	vmsave %_ASM_AX
1256:
126
127#ifdef CONFIG_RETPOLINE
128	/* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */
129	FILL_RETURN_BUFFER %_ASM_AX, RSB_CLEAR_LOOPS, X86_FEATURE_RETPOLINE
130#endif
131
132	/*
133	 * Mitigate RETBleed for AMD/Hygon Zen uarch. RET should be
134	 * untrained as soon as we exit the VM and are back to the
135	 * kernel. This should be done before re-enabling interrupts
136	 * because interrupt handlers won't sanitize 'ret' if the return is
137	 * from the kernel.
138	 */
139	UNTRAIN_RET
140
141	/*
142	 * Clear all general purpose registers except RSP and RAX to prevent
143	 * speculative use of the guest's values, even those that are reloaded
144	 * via the stack.  In theory, an L1 cache miss when restoring registers
145	 * could lead to speculative execution with the guest's values.
146	 * Zeroing XORs are dirt cheap, i.e. the extra paranoia is essentially
147	 * free.  RSP and RAX are exempt as they are restored by hardware
148	 * during VM-Exit.
149	 */
150	xor %ecx, %ecx
151	xor %edx, %edx
152	xor %ebx, %ebx
153	xor %ebp, %ebp
154	xor %esi, %esi
155	xor %edi, %edi
156#ifdef CONFIG_X86_64
157	xor %r8d,  %r8d
158	xor %r9d,  %r9d
159	xor %r10d, %r10d
160	xor %r11d, %r11d
161	xor %r12d, %r12d
162	xor %r13d, %r13d
163	xor %r14d, %r14d
164	xor %r15d, %r15d
165#endif
166
167	pop %_ASM_BX
168
169#ifdef CONFIG_X86_64
170	pop %r12
171	pop %r13
172	pop %r14
173	pop %r15
174#else
175	pop %esi
176	pop %edi
177#endif
178	pop %_ASM_BP
179	RET
180
18110:	cmpb $0, kvm_rebooting
182	jne 2b
183	ud2
18430:	cmpb $0, kvm_rebooting
185	jne 4b
186	ud2
18750:	cmpb $0, kvm_rebooting
188	jne 6b
189	ud2
190
191	_ASM_EXTABLE(1b, 10b)
192	_ASM_EXTABLE(3b, 30b)
193	_ASM_EXTABLE(5b, 50b)
194
195SYM_FUNC_END(__svm_vcpu_run)
196
197/**
198 * __svm_sev_es_vcpu_run - Run a SEV-ES vCPU via a transition to SVM guest mode
199 * @svm:	struct vcpu_svm *
200 */
201SYM_FUNC_START(__svm_sev_es_vcpu_run)
202	push %_ASM_BP
203#ifdef CONFIG_X86_64
204	push %r15
205	push %r14
206	push %r13
207	push %r12
208#else
209	push %edi
210	push %esi
211#endif
212	push %_ASM_BX
213
214	/* Get svm->current_vmcb->pa into RAX. */
215	mov SVM_current_vmcb(%_ASM_ARG1), %_ASM_AX
216	mov KVM_VMCB_pa(%_ASM_AX), %_ASM_AX
217
218	/* Enter guest mode */
219	sti
220
2211:	vmrun %_ASM_AX
222
2232:	cli
224
225#ifdef CONFIG_RETPOLINE
226	/* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */
227	FILL_RETURN_BUFFER %_ASM_AX, RSB_CLEAR_LOOPS, X86_FEATURE_RETPOLINE
228#endif
229
230	/*
231	 * Mitigate RETBleed for AMD/Hygon Zen uarch. RET should be
232	 * untrained as soon as we exit the VM and are back to the
233	 * kernel. This should be done before re-enabling interrupts
234	 * because interrupt handlers won't sanitize RET if the return is
235	 * from the kernel.
236	 */
237	UNTRAIN_RET
238
239	pop %_ASM_BX
240
241#ifdef CONFIG_X86_64
242	pop %r12
243	pop %r13
244	pop %r14
245	pop %r15
246#else
247	pop %esi
248	pop %edi
249#endif
250	pop %_ASM_BP
251	RET
252
2533:	cmpb $0, kvm_rebooting
254	jne 2b
255	ud2
256
257	_ASM_EXTABLE(1b, 3b)
258
259SYM_FUNC_END(__svm_sev_es_vcpu_run)
260