xref: /linux/arch/x86/kvm/svm/vmenter.S (revision 36a1818f5a1e50b805317ba13f827067d50f6970)
1/* SPDX-License-Identifier: GPL-2.0 */
2#include <linux/linkage.h>
3#include <asm/asm.h>
4#include <asm/asm-offsets.h>
5#include <asm/bitsperlong.h>
6#include <asm/kvm_vcpu_regs.h>
7#include <asm/nospec-branch.h>
8#include "kvm-asm-offsets.h"
9
10#define WORD_SIZE (BITS_PER_LONG / 8)
11
12/* Intentionally omit RAX as it's context switched by hardware */
13#define VCPU_RCX	(SVM_vcpu_arch_regs + __VCPU_REGS_RCX * WORD_SIZE)
14#define VCPU_RDX	(SVM_vcpu_arch_regs + __VCPU_REGS_RDX * WORD_SIZE)
15#define VCPU_RBX	(SVM_vcpu_arch_regs + __VCPU_REGS_RBX * WORD_SIZE)
16/* Intentionally omit RSP as it's context switched by hardware */
17#define VCPU_RBP	(SVM_vcpu_arch_regs + __VCPU_REGS_RBP * WORD_SIZE)
18#define VCPU_RSI	(SVM_vcpu_arch_regs + __VCPU_REGS_RSI * WORD_SIZE)
19#define VCPU_RDI	(SVM_vcpu_arch_regs + __VCPU_REGS_RDI * WORD_SIZE)
20
21#ifdef CONFIG_X86_64
22#define VCPU_R8		(SVM_vcpu_arch_regs + __VCPU_REGS_R8  * WORD_SIZE)
23#define VCPU_R9		(SVM_vcpu_arch_regs + __VCPU_REGS_R9  * WORD_SIZE)
24#define VCPU_R10	(SVM_vcpu_arch_regs + __VCPU_REGS_R10 * WORD_SIZE)
25#define VCPU_R11	(SVM_vcpu_arch_regs + __VCPU_REGS_R11 * WORD_SIZE)
26#define VCPU_R12	(SVM_vcpu_arch_regs + __VCPU_REGS_R12 * WORD_SIZE)
27#define VCPU_R13	(SVM_vcpu_arch_regs + __VCPU_REGS_R13 * WORD_SIZE)
28#define VCPU_R14	(SVM_vcpu_arch_regs + __VCPU_REGS_R14 * WORD_SIZE)
29#define VCPU_R15	(SVM_vcpu_arch_regs + __VCPU_REGS_R15 * WORD_SIZE)
30#endif
31
32#define SVM_vmcb01_pa	(SVM_vmcb01 + KVM_VMCB_pa)
33
34.section .noinstr.text, "ax"
35
36.macro RESTORE_GUEST_SPEC_CTRL
37	/* No need to do anything if SPEC_CTRL is unset or V_SPEC_CTRL is set */
38	ALTERNATIVE_2 "", \
39		"jmp 800f", X86_FEATURE_MSR_SPEC_CTRL, \
40		"", X86_FEATURE_V_SPEC_CTRL
41801:
42.endm
43.macro RESTORE_GUEST_SPEC_CTRL_BODY
44800:
45	/*
46	 * SPEC_CTRL handling: if the guest's SPEC_CTRL value differs from the
47	 * host's, write the MSR.  This is kept out-of-line so that the common
48	 * case does not have to jump.
49	 *
50	 * IMPORTANT: To avoid RSB underflow attacks and any other nastiness,
51	 * there must not be any returns or indirect branches between this code
52	 * and vmentry.
53	 */
54	movl SVM_spec_ctrl(%_ASM_DI), %eax
55	cmp PER_CPU_VAR(x86_spec_ctrl_current), %eax
56	je 801b
57	mov $MSR_IA32_SPEC_CTRL, %ecx
58	xor %edx, %edx
59	wrmsr
60	jmp 801b
61.endm
62
63.macro RESTORE_HOST_SPEC_CTRL
64	/* No need to do anything if SPEC_CTRL is unset or V_SPEC_CTRL is set */
65	ALTERNATIVE_2 "", \
66		"jmp 900f", X86_FEATURE_MSR_SPEC_CTRL, \
67		"", X86_FEATURE_V_SPEC_CTRL
68901:
69.endm
70.macro RESTORE_HOST_SPEC_CTRL_BODY
71900:
72	/* Same for after vmexit.  */
73	mov $MSR_IA32_SPEC_CTRL, %ecx
74
75	/*
76	 * Load the value that the guest had written into MSR_IA32_SPEC_CTRL,
77	 * if it was not intercepted during guest execution.
78	 */
79	cmpb $0, (%_ASM_SP)
80	jnz 998f
81	rdmsr
82	movl %eax, SVM_spec_ctrl(%_ASM_DI)
83998:
84
85	/* Now restore the host value of the MSR if different from the guest's.  */
86	movl PER_CPU_VAR(x86_spec_ctrl_current), %eax
87	cmp SVM_spec_ctrl(%_ASM_DI), %eax
88	je 901b
89	xor %edx, %edx
90	wrmsr
91	jmp 901b
92.endm
93
94
95/**
96 * __svm_vcpu_run - Run a vCPU via a transition to SVM guest mode
97 * @svm:	struct vcpu_svm *
98 * @spec_ctrl_intercepted: bool
99 */
100SYM_FUNC_START(__svm_vcpu_run)
101	push %_ASM_BP
102#ifdef CONFIG_X86_64
103	push %r15
104	push %r14
105	push %r13
106	push %r12
107#else
108	push %edi
109	push %esi
110#endif
111	push %_ASM_BX
112
113	/*
114	 * Save variables needed after vmexit on the stack, in inverse
115	 * order compared to when they are needed.
116	 */
117
118	/* Accessed directly from the stack in RESTORE_HOST_SPEC_CTRL.  */
119	push %_ASM_ARG2
120
121	/* Needed to restore access to percpu variables.  */
122	__ASM_SIZE(push) PER_CPU_VAR(svm_data + SD_save_area_pa)
123
124	/* Finally save @svm. */
125	push %_ASM_ARG1
126
127.ifnc _ASM_ARG1, _ASM_DI
128	/*
129	 * Stash @svm in RDI early. On 32-bit, arguments are in RAX, RCX
130	 * and RDX which are clobbered by RESTORE_GUEST_SPEC_CTRL.
131	 */
132	mov %_ASM_ARG1, %_ASM_DI
133.endif
134
135	/* Clobbers RAX, RCX, RDX.  */
136	RESTORE_GUEST_SPEC_CTRL
137
138	/*
139	 * Use a single vmcb (vmcb01 because it's always valid) for
140	 * context switching guest state via VMLOAD/VMSAVE, that way
141	 * the state doesn't need to be copied between vmcb01 and
142	 * vmcb02 when switching vmcbs for nested virtualization.
143	 */
144	mov SVM_vmcb01_pa(%_ASM_DI), %_ASM_AX
1451:	vmload %_ASM_AX
1462:
147
148	/* Get svm->current_vmcb->pa into RAX. */
149	mov SVM_current_vmcb(%_ASM_DI), %_ASM_AX
150	mov KVM_VMCB_pa(%_ASM_AX), %_ASM_AX
151
152	/* Load guest registers. */
153	mov VCPU_RCX(%_ASM_DI), %_ASM_CX
154	mov VCPU_RDX(%_ASM_DI), %_ASM_DX
155	mov VCPU_RBX(%_ASM_DI), %_ASM_BX
156	mov VCPU_RBP(%_ASM_DI), %_ASM_BP
157	mov VCPU_RSI(%_ASM_DI), %_ASM_SI
158#ifdef CONFIG_X86_64
159	mov VCPU_R8 (%_ASM_DI),  %r8
160	mov VCPU_R9 (%_ASM_DI),  %r9
161	mov VCPU_R10(%_ASM_DI), %r10
162	mov VCPU_R11(%_ASM_DI), %r11
163	mov VCPU_R12(%_ASM_DI), %r12
164	mov VCPU_R13(%_ASM_DI), %r13
165	mov VCPU_R14(%_ASM_DI), %r14
166	mov VCPU_R15(%_ASM_DI), %r15
167#endif
168	mov VCPU_RDI(%_ASM_DI), %_ASM_DI
169
170	/* Enter guest mode */
171	sti
172
1733:	vmrun %_ASM_AX
1744:
175	cli
176
177	/* Pop @svm to RAX while it's the only available register. */
178	pop %_ASM_AX
179
180	/* Save all guest registers.  */
181	mov %_ASM_CX,   VCPU_RCX(%_ASM_AX)
182	mov %_ASM_DX,   VCPU_RDX(%_ASM_AX)
183	mov %_ASM_BX,   VCPU_RBX(%_ASM_AX)
184	mov %_ASM_BP,   VCPU_RBP(%_ASM_AX)
185	mov %_ASM_SI,   VCPU_RSI(%_ASM_AX)
186	mov %_ASM_DI,   VCPU_RDI(%_ASM_AX)
187#ifdef CONFIG_X86_64
188	mov %r8,  VCPU_R8 (%_ASM_AX)
189	mov %r9,  VCPU_R9 (%_ASM_AX)
190	mov %r10, VCPU_R10(%_ASM_AX)
191	mov %r11, VCPU_R11(%_ASM_AX)
192	mov %r12, VCPU_R12(%_ASM_AX)
193	mov %r13, VCPU_R13(%_ASM_AX)
194	mov %r14, VCPU_R14(%_ASM_AX)
195	mov %r15, VCPU_R15(%_ASM_AX)
196#endif
197
198	/* @svm can stay in RDI from now on.  */
199	mov %_ASM_AX, %_ASM_DI
200
201	mov SVM_vmcb01_pa(%_ASM_DI), %_ASM_AX
2025:	vmsave %_ASM_AX
2036:
204
205	/* Restores GSBASE among other things, allowing access to percpu data.  */
206	pop %_ASM_AX
2077:	vmload %_ASM_AX
2088:
209
210#ifdef CONFIG_MITIGATION_RETPOLINE
211	/* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */
212	FILL_RETURN_BUFFER %_ASM_AX, RSB_CLEAR_LOOPS, X86_FEATURE_RETPOLINE
213#endif
214
215	/* Clobbers RAX, RCX, RDX.  */
216	RESTORE_HOST_SPEC_CTRL
217
218	/*
219	 * Mitigate RETBleed for AMD/Hygon Zen uarch. RET should be
220	 * untrained as soon as we exit the VM and are back to the
221	 * kernel. This should be done before re-enabling interrupts
222	 * because interrupt handlers won't sanitize 'ret' if the return is
223	 * from the kernel.
224	 */
225	UNTRAIN_RET_VM
226
227	/*
228	 * Clear all general purpose registers except RSP and RAX to prevent
229	 * speculative use of the guest's values, even those that are reloaded
230	 * via the stack.  In theory, an L1 cache miss when restoring registers
231	 * could lead to speculative execution with the guest's values.
232	 * Zeroing XORs are dirt cheap, i.e. the extra paranoia is essentially
233	 * free.  RSP and RAX are exempt as they are restored by hardware
234	 * during VM-Exit.
235	 */
236	xor %ecx, %ecx
237	xor %edx, %edx
238	xor %ebx, %ebx
239	xor %ebp, %ebp
240	xor %esi, %esi
241	xor %edi, %edi
242#ifdef CONFIG_X86_64
243	xor %r8d,  %r8d
244	xor %r9d,  %r9d
245	xor %r10d, %r10d
246	xor %r11d, %r11d
247	xor %r12d, %r12d
248	xor %r13d, %r13d
249	xor %r14d, %r14d
250	xor %r15d, %r15d
251#endif
252
253	/* "Pop" @spec_ctrl_intercepted.  */
254	pop %_ASM_BX
255
256	pop %_ASM_BX
257
258#ifdef CONFIG_X86_64
259	pop %r12
260	pop %r13
261	pop %r14
262	pop %r15
263#else
264	pop %esi
265	pop %edi
266#endif
267	pop %_ASM_BP
268	RET
269
270	RESTORE_GUEST_SPEC_CTRL_BODY
271	RESTORE_HOST_SPEC_CTRL_BODY
272
27310:	cmpb $0, _ASM_RIP(kvm_rebooting)
274	jne 2b
275	ud2
27630:	cmpb $0, _ASM_RIP(kvm_rebooting)
277	jne 4b
278	ud2
27950:	cmpb $0, _ASM_RIP(kvm_rebooting)
280	jne 6b
281	ud2
28270:	cmpb $0, _ASM_RIP(kvm_rebooting)
283	jne 8b
284	ud2
285
286	_ASM_EXTABLE(1b, 10b)
287	_ASM_EXTABLE(3b, 30b)
288	_ASM_EXTABLE(5b, 50b)
289	_ASM_EXTABLE(7b, 70b)
290
291SYM_FUNC_END(__svm_vcpu_run)
292
293/**
294 * __svm_sev_es_vcpu_run - Run a SEV-ES vCPU via a transition to SVM guest mode
295 * @svm:	struct vcpu_svm *
296 * @spec_ctrl_intercepted: bool
297 */
298SYM_FUNC_START(__svm_sev_es_vcpu_run)
299	push %_ASM_BP
300#ifdef CONFIG_X86_64
301	push %r15
302	push %r14
303	push %r13
304	push %r12
305#else
306	push %edi
307	push %esi
308#endif
309	push %_ASM_BX
310
311	/*
312	 * Save variables needed after vmexit on the stack, in inverse
313	 * order compared to when they are needed.
314	 */
315
316	/* Accessed directly from the stack in RESTORE_HOST_SPEC_CTRL.  */
317	push %_ASM_ARG2
318
319	/* Save @svm. */
320	push %_ASM_ARG1
321
322.ifnc _ASM_ARG1, _ASM_DI
323	/*
324	 * Stash @svm in RDI early. On 32-bit, arguments are in RAX, RCX
325	 * and RDX which are clobbered by RESTORE_GUEST_SPEC_CTRL.
326	 */
327	mov %_ASM_ARG1, %_ASM_DI
328.endif
329
330	/* Clobbers RAX, RCX, RDX.  */
331	RESTORE_GUEST_SPEC_CTRL
332
333	/* Get svm->current_vmcb->pa into RAX. */
334	mov SVM_current_vmcb(%_ASM_DI), %_ASM_AX
335	mov KVM_VMCB_pa(%_ASM_AX), %_ASM_AX
336
337	/* Enter guest mode */
338	sti
339
3401:	vmrun %_ASM_AX
341
3422:	cli
343
344	/* Pop @svm to RDI, guest registers have been saved already. */
345	pop %_ASM_DI
346
347#ifdef CONFIG_MITIGATION_RETPOLINE
348	/* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */
349	FILL_RETURN_BUFFER %_ASM_AX, RSB_CLEAR_LOOPS, X86_FEATURE_RETPOLINE
350#endif
351
352	/* Clobbers RAX, RCX, RDX.  */
353	RESTORE_HOST_SPEC_CTRL
354
355	/*
356	 * Mitigate RETBleed for AMD/Hygon Zen uarch. RET should be
357	 * untrained as soon as we exit the VM and are back to the
358	 * kernel. This should be done before re-enabling interrupts
359	 * because interrupt handlers won't sanitize RET if the return is
360	 * from the kernel.
361	 */
362	UNTRAIN_RET_VM
363
364	/* "Pop" @spec_ctrl_intercepted.  */
365	pop %_ASM_BX
366
367	pop %_ASM_BX
368
369#ifdef CONFIG_X86_64
370	pop %r12
371	pop %r13
372	pop %r14
373	pop %r15
374#else
375	pop %esi
376	pop %edi
377#endif
378	pop %_ASM_BP
379	RET
380
381	RESTORE_GUEST_SPEC_CTRL_BODY
382	RESTORE_HOST_SPEC_CTRL_BODY
383
3843:	cmpb $0, _ASM_RIP(kvm_rebooting)
385	jne 2b
386	ud2
387
388	_ASM_EXTABLE(1b, 3b)
389
390SYM_FUNC_END(__svm_sev_es_vcpu_run)
391