xref: /linux/arch/x86/kvm/svm/vmenter.S (revision 7774c8f32e99b1f314c0df7c204a897792b4f378)
1/* SPDX-License-Identifier: GPL-2.0 */
2#include <linux/linkage.h>
3#include <asm/asm.h>
4#include <asm/asm-offsets.h>
5#include <asm/bitsperlong.h>
6#include <asm/kvm_vcpu_regs.h>
7#include <asm/nospec-branch.h>
8#include "kvm-asm-offsets.h"
9
10#define WORD_SIZE (BITS_PER_LONG / 8)
11
12/* Intentionally omit RAX as it's context switched by hardware */
13#define VCPU_RCX	(SVM_vcpu_arch_regs + __VCPU_REGS_RCX * WORD_SIZE)
14#define VCPU_RDX	(SVM_vcpu_arch_regs + __VCPU_REGS_RDX * WORD_SIZE)
15#define VCPU_RBX	(SVM_vcpu_arch_regs + __VCPU_REGS_RBX * WORD_SIZE)
16/* Intentionally omit RSP as it's context switched by hardware */
17#define VCPU_RBP	(SVM_vcpu_arch_regs + __VCPU_REGS_RBP * WORD_SIZE)
18#define VCPU_RSI	(SVM_vcpu_arch_regs + __VCPU_REGS_RSI * WORD_SIZE)
19#define VCPU_RDI	(SVM_vcpu_arch_regs + __VCPU_REGS_RDI * WORD_SIZE)
20
21#ifdef CONFIG_X86_64
22#define VCPU_R8		(SVM_vcpu_arch_regs + __VCPU_REGS_R8  * WORD_SIZE)
23#define VCPU_R9		(SVM_vcpu_arch_regs + __VCPU_REGS_R9  * WORD_SIZE)
24#define VCPU_R10	(SVM_vcpu_arch_regs + __VCPU_REGS_R10 * WORD_SIZE)
25#define VCPU_R11	(SVM_vcpu_arch_regs + __VCPU_REGS_R11 * WORD_SIZE)
26#define VCPU_R12	(SVM_vcpu_arch_regs + __VCPU_REGS_R12 * WORD_SIZE)
27#define VCPU_R13	(SVM_vcpu_arch_regs + __VCPU_REGS_R13 * WORD_SIZE)
28#define VCPU_R14	(SVM_vcpu_arch_regs + __VCPU_REGS_R14 * WORD_SIZE)
29#define VCPU_R15	(SVM_vcpu_arch_regs + __VCPU_REGS_R15 * WORD_SIZE)
30#endif
31
32#define SVM_vmcb01_pa	(SVM_vmcb01 + KVM_VMCB_pa)
33
34.section .noinstr.text, "ax"
35
36.macro RESTORE_GUEST_SPEC_CTRL
37	/* No need to do anything if SPEC_CTRL is unset or V_SPEC_CTRL is set */
38	ALTERNATIVE_2 "", \
39		"jmp 800f", X86_FEATURE_MSR_SPEC_CTRL, \
40		"", X86_FEATURE_V_SPEC_CTRL
41801:
42.endm
43.macro RESTORE_GUEST_SPEC_CTRL_BODY
44800:
45	/*
46	 * SPEC_CTRL handling: if the guest's SPEC_CTRL value differs from the
47	 * host's, write the MSR.  This is kept out-of-line so that the common
48	 * case does not have to jump.
49	 *
50	 * IMPORTANT: To avoid RSB underflow attacks and any other nastiness,
51	 * there must not be any returns or indirect branches between this code
52	 * and vmentry.
53	 */
54	movl SVM_spec_ctrl(%_ASM_DI), %eax
55	cmp PER_CPU_VAR(x86_spec_ctrl_current), %eax
56	je 801b
57	mov $MSR_IA32_SPEC_CTRL, %ecx
58	xor %edx, %edx
59	wrmsr
60	jmp 801b
61.endm
62
63.macro RESTORE_HOST_SPEC_CTRL
64	/* No need to do anything if SPEC_CTRL is unset or V_SPEC_CTRL is set */
65	ALTERNATIVE_2 "", \
66		"jmp 900f", X86_FEATURE_MSR_SPEC_CTRL, \
67		"", X86_FEATURE_V_SPEC_CTRL
68901:
69.endm
70.macro RESTORE_HOST_SPEC_CTRL_BODY
71900:
72	/* Same for after vmexit.  */
73	mov $MSR_IA32_SPEC_CTRL, %ecx
74
75	/*
76	 * Load the value that the guest had written into MSR_IA32_SPEC_CTRL,
77	 * if it was not intercepted during guest execution.
78	 */
79	cmpb $0, (%_ASM_SP)
80	jnz 998f
81	rdmsr
82	movl %eax, SVM_spec_ctrl(%_ASM_DI)
83998:
84
85	/* Now restore the host value of the MSR if different from the guest's.  */
86	movl PER_CPU_VAR(x86_spec_ctrl_current), %eax
87	cmp SVM_spec_ctrl(%_ASM_DI), %eax
88	je 901b
89	xor %edx, %edx
90	wrmsr
91	jmp 901b
92.endm
93
94
95/**
96 * __svm_vcpu_run - Run a vCPU via a transition to SVM guest mode
97 * @svm:	struct vcpu_svm *
98 * @spec_ctrl_intercepted: bool
99 */
100SYM_FUNC_START(__svm_vcpu_run)
101	push %_ASM_BP
102	mov  %_ASM_SP, %_ASM_BP
103#ifdef CONFIG_X86_64
104	push %r15
105	push %r14
106	push %r13
107	push %r12
108#else
109	push %edi
110	push %esi
111#endif
112	push %_ASM_BX
113
114	/*
115	 * Save variables needed after vmexit on the stack, in inverse
116	 * order compared to when they are needed.
117	 */
118
119	/* Accessed directly from the stack in RESTORE_HOST_SPEC_CTRL.  */
120	push %_ASM_ARG2
121
122	/* Needed to restore access to percpu variables.  */
123	__ASM_SIZE(push) PER_CPU_VAR(svm_data + SD_save_area_pa)
124
125	/* Finally save @svm. */
126	push %_ASM_ARG1
127
128.ifnc _ASM_ARG1, _ASM_DI
129	/*
130	 * Stash @svm in RDI early. On 32-bit, arguments are in RAX, RCX
131	 * and RDX which are clobbered by RESTORE_GUEST_SPEC_CTRL.
132	 */
133	mov %_ASM_ARG1, %_ASM_DI
134.endif
135
136	/* Clobbers RAX, RCX, RDX.  */
137	RESTORE_GUEST_SPEC_CTRL
138
139	/*
140	 * Use a single vmcb (vmcb01 because it's always valid) for
141	 * context switching guest state via VMLOAD/VMSAVE, that way
142	 * the state doesn't need to be copied between vmcb01 and
143	 * vmcb02 when switching vmcbs for nested virtualization.
144	 */
145	mov SVM_vmcb01_pa(%_ASM_DI), %_ASM_AX
1461:	vmload %_ASM_AX
1472:
148
149	/* Get svm->current_vmcb->pa into RAX. */
150	mov SVM_current_vmcb(%_ASM_DI), %_ASM_AX
151	mov KVM_VMCB_pa(%_ASM_AX), %_ASM_AX
152
153	/* Load guest registers. */
154	mov VCPU_RCX(%_ASM_DI), %_ASM_CX
155	mov VCPU_RDX(%_ASM_DI), %_ASM_DX
156	mov VCPU_RBX(%_ASM_DI), %_ASM_BX
157	mov VCPU_RBP(%_ASM_DI), %_ASM_BP
158	mov VCPU_RSI(%_ASM_DI), %_ASM_SI
159#ifdef CONFIG_X86_64
160	mov VCPU_R8 (%_ASM_DI),  %r8
161	mov VCPU_R9 (%_ASM_DI),  %r9
162	mov VCPU_R10(%_ASM_DI), %r10
163	mov VCPU_R11(%_ASM_DI), %r11
164	mov VCPU_R12(%_ASM_DI), %r12
165	mov VCPU_R13(%_ASM_DI), %r13
166	mov VCPU_R14(%_ASM_DI), %r14
167	mov VCPU_R15(%_ASM_DI), %r15
168#endif
169	mov VCPU_RDI(%_ASM_DI), %_ASM_DI
170
171	/* Enter guest mode */
172	sti
173
1743:	vmrun %_ASM_AX
1754:
176	cli
177
178	/* Pop @svm to RAX while it's the only available register. */
179	pop %_ASM_AX
180
181	/* Save all guest registers.  */
182	mov %_ASM_CX,   VCPU_RCX(%_ASM_AX)
183	mov %_ASM_DX,   VCPU_RDX(%_ASM_AX)
184	mov %_ASM_BX,   VCPU_RBX(%_ASM_AX)
185	mov %_ASM_BP,   VCPU_RBP(%_ASM_AX)
186	mov %_ASM_SI,   VCPU_RSI(%_ASM_AX)
187	mov %_ASM_DI,   VCPU_RDI(%_ASM_AX)
188#ifdef CONFIG_X86_64
189	mov %r8,  VCPU_R8 (%_ASM_AX)
190	mov %r9,  VCPU_R9 (%_ASM_AX)
191	mov %r10, VCPU_R10(%_ASM_AX)
192	mov %r11, VCPU_R11(%_ASM_AX)
193	mov %r12, VCPU_R12(%_ASM_AX)
194	mov %r13, VCPU_R13(%_ASM_AX)
195	mov %r14, VCPU_R14(%_ASM_AX)
196	mov %r15, VCPU_R15(%_ASM_AX)
197#endif
198
199	/* @svm can stay in RDI from now on.  */
200	mov %_ASM_AX, %_ASM_DI
201
202	mov SVM_vmcb01_pa(%_ASM_DI), %_ASM_AX
2035:	vmsave %_ASM_AX
2046:
205
206	/* Restores GSBASE among other things, allowing access to percpu data.  */
207	pop %_ASM_AX
2087:	vmload %_ASM_AX
2098:
210
211#ifdef CONFIG_MITIGATION_RETPOLINE
212	/* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */
213	FILL_RETURN_BUFFER %_ASM_AX, RSB_CLEAR_LOOPS, X86_FEATURE_RETPOLINE
214#endif
215
216	/* Clobbers RAX, RCX, RDX.  */
217	RESTORE_HOST_SPEC_CTRL
218
219	/*
220	 * Mitigate RETBleed for AMD/Hygon Zen uarch. RET should be
221	 * untrained as soon as we exit the VM and are back to the
222	 * kernel. This should be done before re-enabling interrupts
223	 * because interrupt handlers won't sanitize 'ret' if the return is
224	 * from the kernel.
225	 */
226	UNTRAIN_RET_VM
227
228	/*
229	 * Clear all general purpose registers except RSP and RAX to prevent
230	 * speculative use of the guest's values, even those that are reloaded
231	 * via the stack.  In theory, an L1 cache miss when restoring registers
232	 * could lead to speculative execution with the guest's values.
233	 * Zeroing XORs are dirt cheap, i.e. the extra paranoia is essentially
234	 * free.  RSP and RAX are exempt as they are restored by hardware
235	 * during VM-Exit.
236	 */
237	xor %ecx, %ecx
238	xor %edx, %edx
239	xor %ebx, %ebx
240	xor %ebp, %ebp
241	xor %esi, %esi
242	xor %edi, %edi
243#ifdef CONFIG_X86_64
244	xor %r8d,  %r8d
245	xor %r9d,  %r9d
246	xor %r10d, %r10d
247	xor %r11d, %r11d
248	xor %r12d, %r12d
249	xor %r13d, %r13d
250	xor %r14d, %r14d
251	xor %r15d, %r15d
252#endif
253
254	/* "Pop" @spec_ctrl_intercepted.  */
255	pop %_ASM_BX
256
257	pop %_ASM_BX
258
259#ifdef CONFIG_X86_64
260	pop %r12
261	pop %r13
262	pop %r14
263	pop %r15
264#else
265	pop %esi
266	pop %edi
267#endif
268	pop %_ASM_BP
269	RET
270
271	RESTORE_GUEST_SPEC_CTRL_BODY
272	RESTORE_HOST_SPEC_CTRL_BODY
273
27410:	cmpb $0, _ASM_RIP(kvm_rebooting)
275	jne 2b
276	ud2
27730:	cmpb $0, _ASM_RIP(kvm_rebooting)
278	jne 4b
279	ud2
28050:	cmpb $0, _ASM_RIP(kvm_rebooting)
281	jne 6b
282	ud2
28370:	cmpb $0, _ASM_RIP(kvm_rebooting)
284	jne 8b
285	ud2
286
287	_ASM_EXTABLE(1b, 10b)
288	_ASM_EXTABLE(3b, 30b)
289	_ASM_EXTABLE(5b, 50b)
290	_ASM_EXTABLE(7b, 70b)
291
292SYM_FUNC_END(__svm_vcpu_run)
293
294#ifdef CONFIG_KVM_AMD_SEV
295/**
296 * __svm_sev_es_vcpu_run - Run a SEV-ES vCPU via a transition to SVM guest mode
297 * @svm:	struct vcpu_svm *
298 * @spec_ctrl_intercepted: bool
299 */
300SYM_FUNC_START(__svm_sev_es_vcpu_run)
301	push %_ASM_BP
302#ifdef CONFIG_X86_64
303	push %r15
304	push %r14
305	push %r13
306	push %r12
307#else
308	push %edi
309	push %esi
310#endif
311	push %_ASM_BX
312
313	/*
314	 * Save variables needed after vmexit on the stack, in inverse
315	 * order compared to when they are needed.
316	 */
317
318	/* Accessed directly from the stack in RESTORE_HOST_SPEC_CTRL.  */
319	push %_ASM_ARG2
320
321	/* Save @svm. */
322	push %_ASM_ARG1
323
324.ifnc _ASM_ARG1, _ASM_DI
325	/*
326	 * Stash @svm in RDI early. On 32-bit, arguments are in RAX, RCX
327	 * and RDX which are clobbered by RESTORE_GUEST_SPEC_CTRL.
328	 */
329	mov %_ASM_ARG1, %_ASM_DI
330.endif
331
332	/* Clobbers RAX, RCX, RDX.  */
333	RESTORE_GUEST_SPEC_CTRL
334
335	/* Get svm->current_vmcb->pa into RAX. */
336	mov SVM_current_vmcb(%_ASM_DI), %_ASM_AX
337	mov KVM_VMCB_pa(%_ASM_AX), %_ASM_AX
338
339	/* Enter guest mode */
340	sti
341
3421:	vmrun %_ASM_AX
343
3442:	cli
345
346	/* Pop @svm to RDI, guest registers have been saved already. */
347	pop %_ASM_DI
348
349#ifdef CONFIG_MITIGATION_RETPOLINE
350	/* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */
351	FILL_RETURN_BUFFER %_ASM_AX, RSB_CLEAR_LOOPS, X86_FEATURE_RETPOLINE
352#endif
353
354	/* Clobbers RAX, RCX, RDX.  */
355	RESTORE_HOST_SPEC_CTRL
356
357	/*
358	 * Mitigate RETBleed for AMD/Hygon Zen uarch. RET should be
359	 * untrained as soon as we exit the VM and are back to the
360	 * kernel. This should be done before re-enabling interrupts
361	 * because interrupt handlers won't sanitize RET if the return is
362	 * from the kernel.
363	 */
364	UNTRAIN_RET_VM
365
366	/* "Pop" @spec_ctrl_intercepted.  */
367	pop %_ASM_BX
368
369	pop %_ASM_BX
370
371#ifdef CONFIG_X86_64
372	pop %r12
373	pop %r13
374	pop %r14
375	pop %r15
376#else
377	pop %esi
378	pop %edi
379#endif
380	pop %_ASM_BP
381	RET
382
383	RESTORE_GUEST_SPEC_CTRL_BODY
384	RESTORE_HOST_SPEC_CTRL_BODY
385
3863:	cmpb $0, _ASM_RIP(kvm_rebooting)
387	jne 2b
388	ud2
389
390	_ASM_EXTABLE(1b, 3b)
391
392SYM_FUNC_END(__svm_sev_es_vcpu_run)
393#endif /* CONFIG_KVM_AMD_SEV */
394