xref: /linux/arch/x86/kvm/svm/vmenter.S (revision 24bce201d79807b668bf9d9e0aca801c5c0d5f78)
1/* SPDX-License-Identifier: GPL-2.0 */
2#include <linux/linkage.h>
3#include <asm/asm.h>
4#include <asm/bitsperlong.h>
5#include <asm/kvm_vcpu_regs.h>
6#include <asm/nospec-branch.h>
7
8#define WORD_SIZE (BITS_PER_LONG / 8)
9
10/* Intentionally omit RAX as it's context switched by hardware */
11#define VCPU_RCX	__VCPU_REGS_RCX * WORD_SIZE
12#define VCPU_RDX	__VCPU_REGS_RDX * WORD_SIZE
13#define VCPU_RBX	__VCPU_REGS_RBX * WORD_SIZE
14/* Intentionally omit RSP as it's context switched by hardware */
15#define VCPU_RBP	__VCPU_REGS_RBP * WORD_SIZE
16#define VCPU_RSI	__VCPU_REGS_RSI * WORD_SIZE
17#define VCPU_RDI	__VCPU_REGS_RDI * WORD_SIZE
18
19#ifdef CONFIG_X86_64
20#define VCPU_R8		__VCPU_REGS_R8  * WORD_SIZE
21#define VCPU_R9		__VCPU_REGS_R9  * WORD_SIZE
22#define VCPU_R10	__VCPU_REGS_R10 * WORD_SIZE
23#define VCPU_R11	__VCPU_REGS_R11 * WORD_SIZE
24#define VCPU_R12	__VCPU_REGS_R12 * WORD_SIZE
25#define VCPU_R13	__VCPU_REGS_R13 * WORD_SIZE
26#define VCPU_R14	__VCPU_REGS_R14 * WORD_SIZE
27#define VCPU_R15	__VCPU_REGS_R15 * WORD_SIZE
28#endif
29
30.section .noinstr.text, "ax"
31
32/**
33 * __svm_vcpu_run - Run a vCPU via a transition to SVM guest mode
34 * @vmcb_pa:	unsigned long
35 * @regs:	unsigned long * (to guest registers)
36 */
37SYM_FUNC_START(__svm_vcpu_run)
38	push %_ASM_BP
39#ifdef CONFIG_X86_64
40	push %r15
41	push %r14
42	push %r13
43	push %r12
44#else
45	push %edi
46	push %esi
47#endif
48	push %_ASM_BX
49
50	/* Save @regs. */
51	push %_ASM_ARG2
52
53	/* Save @vmcb. */
54	push %_ASM_ARG1
55
56	/* Move @regs to RAX. */
57	mov %_ASM_ARG2, %_ASM_AX
58
59	/* Load guest registers. */
60	mov VCPU_RCX(%_ASM_AX), %_ASM_CX
61	mov VCPU_RDX(%_ASM_AX), %_ASM_DX
62	mov VCPU_RBX(%_ASM_AX), %_ASM_BX
63	mov VCPU_RBP(%_ASM_AX), %_ASM_BP
64	mov VCPU_RSI(%_ASM_AX), %_ASM_SI
65	mov VCPU_RDI(%_ASM_AX), %_ASM_DI
66#ifdef CONFIG_X86_64
67	mov VCPU_R8 (%_ASM_AX),  %r8
68	mov VCPU_R9 (%_ASM_AX),  %r9
69	mov VCPU_R10(%_ASM_AX), %r10
70	mov VCPU_R11(%_ASM_AX), %r11
71	mov VCPU_R12(%_ASM_AX), %r12
72	mov VCPU_R13(%_ASM_AX), %r13
73	mov VCPU_R14(%_ASM_AX), %r14
74	mov VCPU_R15(%_ASM_AX), %r15
75#endif
76
77	/* "POP" @vmcb to RAX. */
78	pop %_ASM_AX
79
80	/* Enter guest mode */
81	sti
82
831:	vmrun %_ASM_AX
84
852:	cli
86
87#ifdef CONFIG_RETPOLINE
88	/* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */
89	FILL_RETURN_BUFFER %_ASM_AX, RSB_CLEAR_LOOPS, X86_FEATURE_RETPOLINE
90#endif
91
92	/* "POP" @regs to RAX. */
93	pop %_ASM_AX
94
95	/* Save all guest registers.  */
96	mov %_ASM_CX,   VCPU_RCX(%_ASM_AX)
97	mov %_ASM_DX,   VCPU_RDX(%_ASM_AX)
98	mov %_ASM_BX,   VCPU_RBX(%_ASM_AX)
99	mov %_ASM_BP,   VCPU_RBP(%_ASM_AX)
100	mov %_ASM_SI,   VCPU_RSI(%_ASM_AX)
101	mov %_ASM_DI,   VCPU_RDI(%_ASM_AX)
102#ifdef CONFIG_X86_64
103	mov %r8,  VCPU_R8 (%_ASM_AX)
104	mov %r9,  VCPU_R9 (%_ASM_AX)
105	mov %r10, VCPU_R10(%_ASM_AX)
106	mov %r11, VCPU_R11(%_ASM_AX)
107	mov %r12, VCPU_R12(%_ASM_AX)
108	mov %r13, VCPU_R13(%_ASM_AX)
109	mov %r14, VCPU_R14(%_ASM_AX)
110	mov %r15, VCPU_R15(%_ASM_AX)
111#endif
112
113	/*
114	 * Clear all general purpose registers except RSP and RAX to prevent
115	 * speculative use of the guest's values, even those that are reloaded
116	 * via the stack.  In theory, an L1 cache miss when restoring registers
117	 * could lead to speculative execution with the guest's values.
118	 * Zeroing XORs are dirt cheap, i.e. the extra paranoia is essentially
119	 * free.  RSP and RAX are exempt as they are restored by hardware
120	 * during VM-Exit.
121	 */
122	xor %ecx, %ecx
123	xor %edx, %edx
124	xor %ebx, %ebx
125	xor %ebp, %ebp
126	xor %esi, %esi
127	xor %edi, %edi
128#ifdef CONFIG_X86_64
129	xor %r8d,  %r8d
130	xor %r9d,  %r9d
131	xor %r10d, %r10d
132	xor %r11d, %r11d
133	xor %r12d, %r12d
134	xor %r13d, %r13d
135	xor %r14d, %r14d
136	xor %r15d, %r15d
137#endif
138
139	pop %_ASM_BX
140
141#ifdef CONFIG_X86_64
142	pop %r12
143	pop %r13
144	pop %r14
145	pop %r15
146#else
147	pop %esi
148	pop %edi
149#endif
150	pop %_ASM_BP
151	RET
152
1533:	cmpb $0, kvm_rebooting
154	jne 2b
155	ud2
156
157	_ASM_EXTABLE(1b, 3b)
158
159SYM_FUNC_END(__svm_vcpu_run)
160
161/**
162 * __svm_sev_es_vcpu_run - Run a SEV-ES vCPU via a transition to SVM guest mode
163 * @vmcb_pa:	unsigned long
164 */
165SYM_FUNC_START(__svm_sev_es_vcpu_run)
166	push %_ASM_BP
167#ifdef CONFIG_X86_64
168	push %r15
169	push %r14
170	push %r13
171	push %r12
172#else
173	push %edi
174	push %esi
175#endif
176	push %_ASM_BX
177
178	/* Move @vmcb to RAX. */
179	mov %_ASM_ARG1, %_ASM_AX
180
181	/* Enter guest mode */
182	sti
183
1841:	vmrun %_ASM_AX
185
1862:	cli
187
188#ifdef CONFIG_RETPOLINE
189	/* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */
190	FILL_RETURN_BUFFER %_ASM_AX, RSB_CLEAR_LOOPS, X86_FEATURE_RETPOLINE
191#endif
192
193	pop %_ASM_BX
194
195#ifdef CONFIG_X86_64
196	pop %r12
197	pop %r13
198	pop %r14
199	pop %r15
200#else
201	pop %esi
202	pop %edi
203#endif
204	pop %_ASM_BP
205	RET
206
2073:	cmpb $0, kvm_rebooting
208	jne 2b
209	ud2
210
211	_ASM_EXTABLE(1b, 3b)
212
213SYM_FUNC_END(__svm_sev_es_vcpu_run)
214