xref: /linux/arch/x86/kvm/svm/vmenter.S (revision 16809ecdc1e8ab7278f1d60021ac809edd17d060)
1199cd1d7SUros Bizjak/* SPDX-License-Identifier: GPL-2.0 */
2199cd1d7SUros Bizjak#include <linux/linkage.h>
3199cd1d7SUros Bizjak#include <asm/asm.h>
4199cd1d7SUros Bizjak#include <asm/bitsperlong.h>
5199cd1d7SUros Bizjak#include <asm/kvm_vcpu_regs.h>
6f14eec0aSPaolo Bonzini#include <asm/nospec-branch.h>
7199cd1d7SUros Bizjak
8199cd1d7SUros Bizjak#define WORD_SIZE (BITS_PER_LONG / 8)
9199cd1d7SUros Bizjak
10199cd1d7SUros Bizjak/* Intentionally omit RAX as it's context switched by hardware */
11199cd1d7SUros Bizjak#define VCPU_RCX	__VCPU_REGS_RCX * WORD_SIZE
12199cd1d7SUros Bizjak#define VCPU_RDX	__VCPU_REGS_RDX * WORD_SIZE
13199cd1d7SUros Bizjak#define VCPU_RBX	__VCPU_REGS_RBX * WORD_SIZE
14199cd1d7SUros Bizjak/* Intentionally omit RSP as it's context switched by hardware */
15199cd1d7SUros Bizjak#define VCPU_RBP	__VCPU_REGS_RBP * WORD_SIZE
16199cd1d7SUros Bizjak#define VCPU_RSI	__VCPU_REGS_RSI * WORD_SIZE
17199cd1d7SUros Bizjak#define VCPU_RDI	__VCPU_REGS_RDI * WORD_SIZE
18199cd1d7SUros Bizjak
19199cd1d7SUros Bizjak#ifdef CONFIG_X86_64
20199cd1d7SUros Bizjak#define VCPU_R8		__VCPU_REGS_R8  * WORD_SIZE
21199cd1d7SUros Bizjak#define VCPU_R9		__VCPU_REGS_R9  * WORD_SIZE
22199cd1d7SUros Bizjak#define VCPU_R10	__VCPU_REGS_R10 * WORD_SIZE
23199cd1d7SUros Bizjak#define VCPU_R11	__VCPU_REGS_R11 * WORD_SIZE
24199cd1d7SUros Bizjak#define VCPU_R12	__VCPU_REGS_R12 * WORD_SIZE
25199cd1d7SUros Bizjak#define VCPU_R13	__VCPU_REGS_R13 * WORD_SIZE
26199cd1d7SUros Bizjak#define VCPU_R14	__VCPU_REGS_R14 * WORD_SIZE
27199cd1d7SUros Bizjak#define VCPU_R15	__VCPU_REGS_R15 * WORD_SIZE
28199cd1d7SUros Bizjak#endif
29199cd1d7SUros Bizjak
30135961e0SThomas Gleixner.section .noinstr.text, "ax"
31199cd1d7SUros Bizjak
32199cd1d7SUros Bizjak/**
33199cd1d7SUros Bizjak * __svm_vcpu_run - Run a vCPU via a transition to SVM guest mode
34199cd1d7SUros Bizjak * @vmcb_pa:	unsigned long
35199cd1d7SUros Bizjak * @regs:	unsigned long * (to guest registers)
36199cd1d7SUros Bizjak */
37199cd1d7SUros BizjakSYM_FUNC_START(__svm_vcpu_run)
38199cd1d7SUros Bizjak	push %_ASM_BP
39199cd1d7SUros Bizjak#ifdef CONFIG_X86_64
40199cd1d7SUros Bizjak	push %r15
41199cd1d7SUros Bizjak	push %r14
42199cd1d7SUros Bizjak	push %r13
43199cd1d7SUros Bizjak	push %r12
44199cd1d7SUros Bizjak#else
45199cd1d7SUros Bizjak	push %edi
46199cd1d7SUros Bizjak	push %esi
47199cd1d7SUros Bizjak#endif
48199cd1d7SUros Bizjak	push %_ASM_BX
49199cd1d7SUros Bizjak
50199cd1d7SUros Bizjak	/* Save @regs. */
51199cd1d7SUros Bizjak	push %_ASM_ARG2
52199cd1d7SUros Bizjak
53199cd1d7SUros Bizjak	/* Save @vmcb. */
54199cd1d7SUros Bizjak	push %_ASM_ARG1
55199cd1d7SUros Bizjak
56199cd1d7SUros Bizjak	/* Move @regs to RAX. */
57199cd1d7SUros Bizjak	mov %_ASM_ARG2, %_ASM_AX
58199cd1d7SUros Bizjak
59199cd1d7SUros Bizjak	/* Load guest registers. */
60199cd1d7SUros Bizjak	mov VCPU_RCX(%_ASM_AX), %_ASM_CX
61199cd1d7SUros Bizjak	mov VCPU_RDX(%_ASM_AX), %_ASM_DX
62199cd1d7SUros Bizjak	mov VCPU_RBX(%_ASM_AX), %_ASM_BX
63199cd1d7SUros Bizjak	mov VCPU_RBP(%_ASM_AX), %_ASM_BP
64199cd1d7SUros Bizjak	mov VCPU_RSI(%_ASM_AX), %_ASM_SI
65199cd1d7SUros Bizjak	mov VCPU_RDI(%_ASM_AX), %_ASM_DI
66199cd1d7SUros Bizjak#ifdef CONFIG_X86_64
67199cd1d7SUros Bizjak	mov VCPU_R8 (%_ASM_AX),  %r8
68199cd1d7SUros Bizjak	mov VCPU_R9 (%_ASM_AX),  %r9
69199cd1d7SUros Bizjak	mov VCPU_R10(%_ASM_AX), %r10
70199cd1d7SUros Bizjak	mov VCPU_R11(%_ASM_AX), %r11
71199cd1d7SUros Bizjak	mov VCPU_R12(%_ASM_AX), %r12
72199cd1d7SUros Bizjak	mov VCPU_R13(%_ASM_AX), %r13
73199cd1d7SUros Bizjak	mov VCPU_R14(%_ASM_AX), %r14
74199cd1d7SUros Bizjak	mov VCPU_R15(%_ASM_AX), %r15
75199cd1d7SUros Bizjak#endif
76199cd1d7SUros Bizjak
77199cd1d7SUros Bizjak	/* "POP" @vmcb to RAX. */
78199cd1d7SUros Bizjak	pop %_ASM_AX
79199cd1d7SUros Bizjak
80199cd1d7SUros Bizjak	/* Enter guest mode */
81f14eec0aSPaolo Bonzini	sti
82199cd1d7SUros Bizjak1:	vmload %_ASM_AX
83199cd1d7SUros Bizjak	jmp 3f
84199cd1d7SUros Bizjak2:	cmpb $0, kvm_rebooting
85199cd1d7SUros Bizjak	jne 3f
86199cd1d7SUros Bizjak	ud2
87199cd1d7SUros Bizjak	_ASM_EXTABLE(1b, 2b)
88199cd1d7SUros Bizjak
89199cd1d7SUros Bizjak3:	vmrun %_ASM_AX
90199cd1d7SUros Bizjak	jmp 5f
91199cd1d7SUros Bizjak4:	cmpb $0, kvm_rebooting
92199cd1d7SUros Bizjak	jne 5f
93199cd1d7SUros Bizjak	ud2
94199cd1d7SUros Bizjak	_ASM_EXTABLE(3b, 4b)
95199cd1d7SUros Bizjak
96199cd1d7SUros Bizjak5:	vmsave %_ASM_AX
97199cd1d7SUros Bizjak	jmp 7f
98199cd1d7SUros Bizjak6:	cmpb $0, kvm_rebooting
99199cd1d7SUros Bizjak	jne 7f
100199cd1d7SUros Bizjak	ud2
101199cd1d7SUros Bizjak	_ASM_EXTABLE(5b, 6b)
102199cd1d7SUros Bizjak7:
103f14eec0aSPaolo Bonzini	cli
104f14eec0aSPaolo Bonzini
105f14eec0aSPaolo Bonzini#ifdef CONFIG_RETPOLINE
106f14eec0aSPaolo Bonzini	/* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */
107f14eec0aSPaolo Bonzini	FILL_RETURN_BUFFER %_ASM_AX, RSB_CLEAR_LOOPS, X86_FEATURE_RETPOLINE
108f14eec0aSPaolo Bonzini#endif
109f14eec0aSPaolo Bonzini
110199cd1d7SUros Bizjak	/* "POP" @regs to RAX. */
111199cd1d7SUros Bizjak	pop %_ASM_AX
112199cd1d7SUros Bizjak
113199cd1d7SUros Bizjak	/* Save all guest registers.  */
114199cd1d7SUros Bizjak	mov %_ASM_CX,   VCPU_RCX(%_ASM_AX)
115199cd1d7SUros Bizjak	mov %_ASM_DX,   VCPU_RDX(%_ASM_AX)
116199cd1d7SUros Bizjak	mov %_ASM_BX,   VCPU_RBX(%_ASM_AX)
117199cd1d7SUros Bizjak	mov %_ASM_BP,   VCPU_RBP(%_ASM_AX)
118199cd1d7SUros Bizjak	mov %_ASM_SI,   VCPU_RSI(%_ASM_AX)
119199cd1d7SUros Bizjak	mov %_ASM_DI,   VCPU_RDI(%_ASM_AX)
120199cd1d7SUros Bizjak#ifdef CONFIG_X86_64
121199cd1d7SUros Bizjak	mov %r8,  VCPU_R8 (%_ASM_AX)
122199cd1d7SUros Bizjak	mov %r9,  VCPU_R9 (%_ASM_AX)
123199cd1d7SUros Bizjak	mov %r10, VCPU_R10(%_ASM_AX)
124199cd1d7SUros Bizjak	mov %r11, VCPU_R11(%_ASM_AX)
125199cd1d7SUros Bizjak	mov %r12, VCPU_R12(%_ASM_AX)
126199cd1d7SUros Bizjak	mov %r13, VCPU_R13(%_ASM_AX)
127199cd1d7SUros Bizjak	mov %r14, VCPU_R14(%_ASM_AX)
128199cd1d7SUros Bizjak	mov %r15, VCPU_R15(%_ASM_AX)
129199cd1d7SUros Bizjak#endif
130199cd1d7SUros Bizjak
131199cd1d7SUros Bizjak	/*
132199cd1d7SUros Bizjak	 * Clear all general purpose registers except RSP and RAX to prevent
133199cd1d7SUros Bizjak	 * speculative use of the guest's values, even those that are reloaded
134199cd1d7SUros Bizjak	 * via the stack.  In theory, an L1 cache miss when restoring registers
135199cd1d7SUros Bizjak	 * could lead to speculative execution with the guest's values.
136199cd1d7SUros Bizjak	 * Zeroing XORs are dirt cheap, i.e. the extra paranoia is essentially
137199cd1d7SUros Bizjak	 * free.  RSP and RAX are exempt as they are restored by hardware
138199cd1d7SUros Bizjak	 * during VM-Exit.
139199cd1d7SUros Bizjak	 */
140199cd1d7SUros Bizjak	xor %ecx, %ecx
141199cd1d7SUros Bizjak	xor %edx, %edx
142199cd1d7SUros Bizjak	xor %ebx, %ebx
143199cd1d7SUros Bizjak	xor %ebp, %ebp
144199cd1d7SUros Bizjak	xor %esi, %esi
145199cd1d7SUros Bizjak	xor %edi, %edi
146199cd1d7SUros Bizjak#ifdef CONFIG_X86_64
147199cd1d7SUros Bizjak	xor %r8d,  %r8d
148199cd1d7SUros Bizjak	xor %r9d,  %r9d
149199cd1d7SUros Bizjak	xor %r10d, %r10d
150199cd1d7SUros Bizjak	xor %r11d, %r11d
151199cd1d7SUros Bizjak	xor %r12d, %r12d
152199cd1d7SUros Bizjak	xor %r13d, %r13d
153199cd1d7SUros Bizjak	xor %r14d, %r14d
154199cd1d7SUros Bizjak	xor %r15d, %r15d
155199cd1d7SUros Bizjak#endif
156199cd1d7SUros Bizjak
157199cd1d7SUros Bizjak	pop %_ASM_BX
158199cd1d7SUros Bizjak
159199cd1d7SUros Bizjak#ifdef CONFIG_X86_64
160199cd1d7SUros Bizjak	pop %r12
161199cd1d7SUros Bizjak	pop %r13
162199cd1d7SUros Bizjak	pop %r14
163199cd1d7SUros Bizjak	pop %r15
164199cd1d7SUros Bizjak#else
165199cd1d7SUros Bizjak	pop %esi
166199cd1d7SUros Bizjak	pop %edi
167199cd1d7SUros Bizjak#endif
168199cd1d7SUros Bizjak	pop %_ASM_BP
169199cd1d7SUros Bizjak	ret
170199cd1d7SUros BizjakSYM_FUNC_END(__svm_vcpu_run)
171*16809ecdSTom Lendacky
172*16809ecdSTom Lendacky/**
173*16809ecdSTom Lendacky * __svm_sev_es_vcpu_run - Run a SEV-ES vCPU via a transition to SVM guest mode
174*16809ecdSTom Lendacky * @vmcb_pa:	unsigned long
175*16809ecdSTom Lendacky */
176*16809ecdSTom LendackySYM_FUNC_START(__svm_sev_es_vcpu_run)
177*16809ecdSTom Lendacky	push %_ASM_BP
178*16809ecdSTom Lendacky#ifdef CONFIG_X86_64
179*16809ecdSTom Lendacky	push %r15
180*16809ecdSTom Lendacky	push %r14
181*16809ecdSTom Lendacky	push %r13
182*16809ecdSTom Lendacky	push %r12
183*16809ecdSTom Lendacky#else
184*16809ecdSTom Lendacky	push %edi
185*16809ecdSTom Lendacky	push %esi
186*16809ecdSTom Lendacky#endif
187*16809ecdSTom Lendacky	push %_ASM_BX
188*16809ecdSTom Lendacky
189*16809ecdSTom Lendacky	/* Enter guest mode */
190*16809ecdSTom Lendacky	mov %_ASM_ARG1, %_ASM_AX
191*16809ecdSTom Lendacky	sti
192*16809ecdSTom Lendacky
193*16809ecdSTom Lendacky1:	vmrun %_ASM_AX
194*16809ecdSTom Lendacky	jmp 3f
195*16809ecdSTom Lendacky2:	cmpb $0, kvm_rebooting
196*16809ecdSTom Lendacky	jne 3f
197*16809ecdSTom Lendacky	ud2
198*16809ecdSTom Lendacky	_ASM_EXTABLE(1b, 2b)
199*16809ecdSTom Lendacky
200*16809ecdSTom Lendacky3:	cli
201*16809ecdSTom Lendacky
202*16809ecdSTom Lendacky#ifdef CONFIG_RETPOLINE
203*16809ecdSTom Lendacky	/* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */
204*16809ecdSTom Lendacky	FILL_RETURN_BUFFER %_ASM_AX, RSB_CLEAR_LOOPS, X86_FEATURE_RETPOLINE
205*16809ecdSTom Lendacky#endif
206*16809ecdSTom Lendacky
207*16809ecdSTom Lendacky	pop %_ASM_BX
208*16809ecdSTom Lendacky
209*16809ecdSTom Lendacky#ifdef CONFIG_X86_64
210*16809ecdSTom Lendacky	pop %r12
211*16809ecdSTom Lendacky	pop %r13
212*16809ecdSTom Lendacky	pop %r14
213*16809ecdSTom Lendacky	pop %r15
214*16809ecdSTom Lendacky#else
215*16809ecdSTom Lendacky	pop %esi
216*16809ecdSTom Lendacky	pop %edi
217*16809ecdSTom Lendacky#endif
218*16809ecdSTom Lendacky	pop %_ASM_BP
219*16809ecdSTom Lendacky	ret
220*16809ecdSTom LendackySYM_FUNC_END(__svm_sev_es_vcpu_run)
221