xref: /linux/arch/x86/kvm/svm/vmenter.S (revision 05df6ab8eba625a1d97eb67ee06d786b8e460685)
1/* SPDX-License-Identifier: GPL-2.0 */
2#include <linux/linkage.h>
3#include <asm/asm.h>
4#include <asm/bitsperlong.h>
5#include <asm/kvm_vcpu_regs.h>
6#include <asm/nospec-branch.h>
7#include "kvm-asm-offsets.h"
8
9#define WORD_SIZE (BITS_PER_LONG / 8)
10
11/* Intentionally omit RAX as it's context switched by hardware */
12#define VCPU_RCX	(SVM_vcpu_arch_regs + __VCPU_REGS_RCX * WORD_SIZE)
13#define VCPU_RDX	(SVM_vcpu_arch_regs + __VCPU_REGS_RDX * WORD_SIZE)
14#define VCPU_RBX	(SVM_vcpu_arch_regs + __VCPU_REGS_RBX * WORD_SIZE)
15/* Intentionally omit RSP as it's context switched by hardware */
16#define VCPU_RBP	(SVM_vcpu_arch_regs + __VCPU_REGS_RBP * WORD_SIZE)
17#define VCPU_RSI	(SVM_vcpu_arch_regs + __VCPU_REGS_RSI * WORD_SIZE)
18#define VCPU_RDI	(SVM_vcpu_arch_regs + __VCPU_REGS_RDI * WORD_SIZE)
19
20#ifdef CONFIG_X86_64
21#define VCPU_R8		(SVM_vcpu_arch_regs + __VCPU_REGS_R8  * WORD_SIZE)
22#define VCPU_R9		(SVM_vcpu_arch_regs + __VCPU_REGS_R9  * WORD_SIZE)
23#define VCPU_R10	(SVM_vcpu_arch_regs + __VCPU_REGS_R10 * WORD_SIZE)
24#define VCPU_R11	(SVM_vcpu_arch_regs + __VCPU_REGS_R11 * WORD_SIZE)
25#define VCPU_R12	(SVM_vcpu_arch_regs + __VCPU_REGS_R12 * WORD_SIZE)
26#define VCPU_R13	(SVM_vcpu_arch_regs + __VCPU_REGS_R13 * WORD_SIZE)
27#define VCPU_R14	(SVM_vcpu_arch_regs + __VCPU_REGS_R14 * WORD_SIZE)
28#define VCPU_R15	(SVM_vcpu_arch_regs + __VCPU_REGS_R15 * WORD_SIZE)
29#endif
30
31#define SVM_vmcb01_pa	(SVM_vmcb01 + KVM_VMCB_pa)
32
33.section .noinstr.text, "ax"
34
35.macro RESTORE_GUEST_SPEC_CTRL
36	/* No need to do anything if SPEC_CTRL is unset or V_SPEC_CTRL is set */
37	ALTERNATIVE_2 "", \
38		"jmp 800f", X86_FEATURE_MSR_SPEC_CTRL, \
39		"", X86_FEATURE_V_SPEC_CTRL
40801:
41.endm
42.macro RESTORE_GUEST_SPEC_CTRL_BODY
43800:
44	/*
45	 * SPEC_CTRL handling: if the guest's SPEC_CTRL value differs from the
46	 * host's, write the MSR.  This is kept out-of-line so that the common
47	 * case does not have to jump.
48	 *
49	 * IMPORTANT: To avoid RSB underflow attacks and any other nastiness,
50	 * there must not be any returns or indirect branches between this code
51	 * and vmentry.
52	 */
53	movl SVM_spec_ctrl(%_ASM_DI), %eax
54	cmp PER_CPU_VAR(x86_spec_ctrl_current), %eax
55	je 801b
56	mov $MSR_IA32_SPEC_CTRL, %ecx
57	xor %edx, %edx
58	wrmsr
59	jmp 801b
60.endm
61
62.macro RESTORE_HOST_SPEC_CTRL
63	/* No need to do anything if SPEC_CTRL is unset or V_SPEC_CTRL is set */
64	ALTERNATIVE_2 "", \
65		"jmp 900f", X86_FEATURE_MSR_SPEC_CTRL, \
66		"", X86_FEATURE_V_SPEC_CTRL
67901:
68.endm
69.macro RESTORE_HOST_SPEC_CTRL_BODY
70900:
71	/* Same for after vmexit.  */
72	mov $MSR_IA32_SPEC_CTRL, %ecx
73
74	/*
75	 * Load the value that the guest had written into MSR_IA32_SPEC_CTRL,
76	 * if it was not intercepted during guest execution.
77	 */
78	cmpb $0, (%_ASM_SP)
79	jnz 998f
80	rdmsr
81	movl %eax, SVM_spec_ctrl(%_ASM_DI)
82998:
83
84	/* Now restore the host value of the MSR if different from the guest's.  */
85	movl PER_CPU_VAR(x86_spec_ctrl_current), %eax
86	cmp SVM_spec_ctrl(%_ASM_DI), %eax
87	je 901b
88	xor %edx, %edx
89	wrmsr
90	jmp 901b
91.endm
92
93
94/**
95 * __svm_vcpu_run - Run a vCPU via a transition to SVM guest mode
96 * @svm:	struct vcpu_svm *
97 * @spec_ctrl_intercepted: bool
98 */
99SYM_FUNC_START(__svm_vcpu_run)
100	push %_ASM_BP
101#ifdef CONFIG_X86_64
102	push %r15
103	push %r14
104	push %r13
105	push %r12
106#else
107	push %edi
108	push %esi
109#endif
110	push %_ASM_BX
111
112	/*
113	 * Save variables needed after vmexit on the stack, in inverse
114	 * order compared to when they are needed.
115	 */
116
117	/* Accessed directly from the stack in RESTORE_HOST_SPEC_CTRL.  */
118	push %_ASM_ARG2
119
120	/* Needed to restore access to percpu variables.  */
121	__ASM_SIZE(push) PER_CPU_VAR(svm_data + SD_save_area_pa)
122
123	/* Finally save @svm. */
124	push %_ASM_ARG1
125
126.ifnc _ASM_ARG1, _ASM_DI
127	/*
128	 * Stash @svm in RDI early. On 32-bit, arguments are in RAX, RCX
129	 * and RDX which are clobbered by RESTORE_GUEST_SPEC_CTRL.
130	 */
131	mov %_ASM_ARG1, %_ASM_DI
132.endif
133
134	/* Clobbers RAX, RCX, RDX.  */
135	RESTORE_GUEST_SPEC_CTRL
136
137	/*
138	 * Use a single vmcb (vmcb01 because it's always valid) for
139	 * context switching guest state via VMLOAD/VMSAVE, that way
140	 * the state doesn't need to be copied between vmcb01 and
141	 * vmcb02 when switching vmcbs for nested virtualization.
142	 */
143	mov SVM_vmcb01_pa(%_ASM_DI), %_ASM_AX
1441:	vmload %_ASM_AX
1452:
146
147	/* Get svm->current_vmcb->pa into RAX. */
148	mov SVM_current_vmcb(%_ASM_DI), %_ASM_AX
149	mov KVM_VMCB_pa(%_ASM_AX), %_ASM_AX
150
151	/* Load guest registers. */
152	mov VCPU_RCX(%_ASM_DI), %_ASM_CX
153	mov VCPU_RDX(%_ASM_DI), %_ASM_DX
154	mov VCPU_RBX(%_ASM_DI), %_ASM_BX
155	mov VCPU_RBP(%_ASM_DI), %_ASM_BP
156	mov VCPU_RSI(%_ASM_DI), %_ASM_SI
157#ifdef CONFIG_X86_64
158	mov VCPU_R8 (%_ASM_DI),  %r8
159	mov VCPU_R9 (%_ASM_DI),  %r9
160	mov VCPU_R10(%_ASM_DI), %r10
161	mov VCPU_R11(%_ASM_DI), %r11
162	mov VCPU_R12(%_ASM_DI), %r12
163	mov VCPU_R13(%_ASM_DI), %r13
164	mov VCPU_R14(%_ASM_DI), %r14
165	mov VCPU_R15(%_ASM_DI), %r15
166#endif
167	mov VCPU_RDI(%_ASM_DI), %_ASM_DI
168
169	/* Enter guest mode */
170	sti
171
1723:	vmrun %_ASM_AX
1734:
174	cli
175
176	/* Pop @svm to RAX while it's the only available register. */
177	pop %_ASM_AX
178
179	/* Save all guest registers.  */
180	mov %_ASM_CX,   VCPU_RCX(%_ASM_AX)
181	mov %_ASM_DX,   VCPU_RDX(%_ASM_AX)
182	mov %_ASM_BX,   VCPU_RBX(%_ASM_AX)
183	mov %_ASM_BP,   VCPU_RBP(%_ASM_AX)
184	mov %_ASM_SI,   VCPU_RSI(%_ASM_AX)
185	mov %_ASM_DI,   VCPU_RDI(%_ASM_AX)
186#ifdef CONFIG_X86_64
187	mov %r8,  VCPU_R8 (%_ASM_AX)
188	mov %r9,  VCPU_R9 (%_ASM_AX)
189	mov %r10, VCPU_R10(%_ASM_AX)
190	mov %r11, VCPU_R11(%_ASM_AX)
191	mov %r12, VCPU_R12(%_ASM_AX)
192	mov %r13, VCPU_R13(%_ASM_AX)
193	mov %r14, VCPU_R14(%_ASM_AX)
194	mov %r15, VCPU_R15(%_ASM_AX)
195#endif
196
197	/* @svm can stay in RDI from now on.  */
198	mov %_ASM_AX, %_ASM_DI
199
200	mov SVM_vmcb01_pa(%_ASM_DI), %_ASM_AX
2015:	vmsave %_ASM_AX
2026:
203
204	/* Restores GSBASE among other things, allowing access to percpu data.  */
205	pop %_ASM_AX
2067:	vmload %_ASM_AX
2078:
208
209#ifdef CONFIG_RETPOLINE
210	/* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */
211	FILL_RETURN_BUFFER %_ASM_AX, RSB_CLEAR_LOOPS, X86_FEATURE_RETPOLINE
212#endif
213
214	/* Clobbers RAX, RCX, RDX.  */
215	RESTORE_HOST_SPEC_CTRL
216
217	/*
218	 * Mitigate RETBleed for AMD/Hygon Zen uarch. RET should be
219	 * untrained as soon as we exit the VM and are back to the
220	 * kernel. This should be done before re-enabling interrupts
221	 * because interrupt handlers won't sanitize 'ret' if the return is
222	 * from the kernel.
223	 */
224	UNTRAIN_RET
225
226	/*
227	 * Clear all general purpose registers except RSP and RAX to prevent
228	 * speculative use of the guest's values, even those that are reloaded
229	 * via the stack.  In theory, an L1 cache miss when restoring registers
230	 * could lead to speculative execution with the guest's values.
231	 * Zeroing XORs are dirt cheap, i.e. the extra paranoia is essentially
232	 * free.  RSP and RAX are exempt as they are restored by hardware
233	 * during VM-Exit.
234	 */
235	xor %ecx, %ecx
236	xor %edx, %edx
237	xor %ebx, %ebx
238	xor %ebp, %ebp
239	xor %esi, %esi
240	xor %edi, %edi
241#ifdef CONFIG_X86_64
242	xor %r8d,  %r8d
243	xor %r9d,  %r9d
244	xor %r10d, %r10d
245	xor %r11d, %r11d
246	xor %r12d, %r12d
247	xor %r13d, %r13d
248	xor %r14d, %r14d
249	xor %r15d, %r15d
250#endif
251
252	/* "Pop" @spec_ctrl_intercepted.  */
253	pop %_ASM_BX
254
255	pop %_ASM_BX
256
257#ifdef CONFIG_X86_64
258	pop %r12
259	pop %r13
260	pop %r14
261	pop %r15
262#else
263	pop %esi
264	pop %edi
265#endif
266	pop %_ASM_BP
267	RET
268
269	RESTORE_GUEST_SPEC_CTRL_BODY
270	RESTORE_HOST_SPEC_CTRL_BODY
271
27210:	cmpb $0, kvm_rebooting
273	jne 2b
274	ud2
27530:	cmpb $0, kvm_rebooting
276	jne 4b
277	ud2
27850:	cmpb $0, kvm_rebooting
279	jne 6b
280	ud2
28170:	cmpb $0, kvm_rebooting
282	jne 8b
283	ud2
284
285	_ASM_EXTABLE(1b, 10b)
286	_ASM_EXTABLE(3b, 30b)
287	_ASM_EXTABLE(5b, 50b)
288	_ASM_EXTABLE(7b, 70b)
289
290SYM_FUNC_END(__svm_vcpu_run)
291
292/**
293 * __svm_sev_es_vcpu_run - Run a SEV-ES vCPU via a transition to SVM guest mode
294 * @svm:	struct vcpu_svm *
295 * @spec_ctrl_intercepted: bool
296 */
297SYM_FUNC_START(__svm_sev_es_vcpu_run)
298	push %_ASM_BP
299#ifdef CONFIG_X86_64
300	push %r15
301	push %r14
302	push %r13
303	push %r12
304#else
305	push %edi
306	push %esi
307#endif
308	push %_ASM_BX
309
310	/*
311	 * Save variables needed after vmexit on the stack, in inverse
312	 * order compared to when they are needed.
313	 */
314
315	/* Accessed directly from the stack in RESTORE_HOST_SPEC_CTRL.  */
316	push %_ASM_ARG2
317
318	/* Save @svm. */
319	push %_ASM_ARG1
320
321.ifnc _ASM_ARG1, _ASM_DI
322	/*
323	 * Stash @svm in RDI early. On 32-bit, arguments are in RAX, RCX
324	 * and RDX which are clobbered by RESTORE_GUEST_SPEC_CTRL.
325	 */
326	mov %_ASM_ARG1, %_ASM_DI
327.endif
328
329	/* Clobbers RAX, RCX, RDX.  */
330	RESTORE_GUEST_SPEC_CTRL
331
332	/* Get svm->current_vmcb->pa into RAX. */
333	mov SVM_current_vmcb(%_ASM_DI), %_ASM_AX
334	mov KVM_VMCB_pa(%_ASM_AX), %_ASM_AX
335
336	/* Enter guest mode */
337	sti
338
3391:	vmrun %_ASM_AX
340
3412:	cli
342
343	/* Pop @svm to RDI, guest registers have been saved already. */
344	pop %_ASM_DI
345
346#ifdef CONFIG_RETPOLINE
347	/* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */
348	FILL_RETURN_BUFFER %_ASM_AX, RSB_CLEAR_LOOPS, X86_FEATURE_RETPOLINE
349#endif
350
351	/* Clobbers RAX, RCX, RDX.  */
352	RESTORE_HOST_SPEC_CTRL
353
354	/*
355	 * Mitigate RETBleed for AMD/Hygon Zen uarch. RET should be
356	 * untrained as soon as we exit the VM and are back to the
357	 * kernel. This should be done before re-enabling interrupts
358	 * because interrupt handlers won't sanitize RET if the return is
359	 * from the kernel.
360	 */
361	UNTRAIN_RET
362
363	/* "Pop" @spec_ctrl_intercepted.  */
364	pop %_ASM_BX
365
366	pop %_ASM_BX
367
368#ifdef CONFIG_X86_64
369	pop %r12
370	pop %r13
371	pop %r14
372	pop %r15
373#else
374	pop %esi
375	pop %edi
376#endif
377	pop %_ASM_BP
378	RET
379
380	RESTORE_GUEST_SPEC_CTRL_BODY
381	RESTORE_HOST_SPEC_CTRL_BODY
382
3833:	cmpb $0, kvm_rebooting
384	jne 2b
385	ud2
386
387	_ASM_EXTABLE(1b, 3b)
388
389SYM_FUNC_END(__svm_sev_es_vcpu_run)
390