xref: /linux/arch/x86/kvm/svm/vmenter.S (revision 199cd1d7b5348de4b58208420687676c658efed3)
1*199cd1d7SUros Bizjak/* SPDX-License-Identifier: GPL-2.0 */
2*199cd1d7SUros Bizjak#include <linux/linkage.h>
3*199cd1d7SUros Bizjak#include <asm/asm.h>
4*199cd1d7SUros Bizjak#include <asm/bitsperlong.h>
5*199cd1d7SUros Bizjak#include <asm/kvm_vcpu_regs.h>
6*199cd1d7SUros Bizjak
7*199cd1d7SUros Bizjak#define WORD_SIZE (BITS_PER_LONG / 8)
8*199cd1d7SUros Bizjak
9*199cd1d7SUros Bizjak/* Intentionally omit RAX as it's context switched by hardware */
10*199cd1d7SUros Bizjak#define VCPU_RCX	__VCPU_REGS_RCX * WORD_SIZE
11*199cd1d7SUros Bizjak#define VCPU_RDX	__VCPU_REGS_RDX * WORD_SIZE
12*199cd1d7SUros Bizjak#define VCPU_RBX	__VCPU_REGS_RBX * WORD_SIZE
13*199cd1d7SUros Bizjak/* Intentionally omit RSP as it's context switched by hardware */
14*199cd1d7SUros Bizjak#define VCPU_RBP	__VCPU_REGS_RBP * WORD_SIZE
15*199cd1d7SUros Bizjak#define VCPU_RSI	__VCPU_REGS_RSI * WORD_SIZE
16*199cd1d7SUros Bizjak#define VCPU_RDI	__VCPU_REGS_RDI * WORD_SIZE
17*199cd1d7SUros Bizjak
18*199cd1d7SUros Bizjak#ifdef CONFIG_X86_64
19*199cd1d7SUros Bizjak#define VCPU_R8		__VCPU_REGS_R8  * WORD_SIZE
20*199cd1d7SUros Bizjak#define VCPU_R9		__VCPU_REGS_R9  * WORD_SIZE
21*199cd1d7SUros Bizjak#define VCPU_R10	__VCPU_REGS_R10 * WORD_SIZE
22*199cd1d7SUros Bizjak#define VCPU_R11	__VCPU_REGS_R11 * WORD_SIZE
23*199cd1d7SUros Bizjak#define VCPU_R12	__VCPU_REGS_R12 * WORD_SIZE
24*199cd1d7SUros Bizjak#define VCPU_R13	__VCPU_REGS_R13 * WORD_SIZE
25*199cd1d7SUros Bizjak#define VCPU_R14	__VCPU_REGS_R14 * WORD_SIZE
26*199cd1d7SUros Bizjak#define VCPU_R15	__VCPU_REGS_R15 * WORD_SIZE
27*199cd1d7SUros Bizjak#endif
28*199cd1d7SUros Bizjak
29*199cd1d7SUros Bizjak	.text
30*199cd1d7SUros Bizjak
31*199cd1d7SUros Bizjak/**
32*199cd1d7SUros Bizjak * __svm_vcpu_run - Run a vCPU via a transition to SVM guest mode
33*199cd1d7SUros Bizjak * @vmcb_pa:	unsigned long
34*199cd1d7SUros Bizjak * @regs:	unsigned long * (to guest registers)
35*199cd1d7SUros Bizjak */
36*199cd1d7SUros BizjakSYM_FUNC_START(__svm_vcpu_run)
37*199cd1d7SUros Bizjak	push %_ASM_BP
38*199cd1d7SUros Bizjak	mov  %_ASM_SP, %_ASM_BP
39*199cd1d7SUros Bizjak#ifdef CONFIG_X86_64
40*199cd1d7SUros Bizjak	push %r15
41*199cd1d7SUros Bizjak	push %r14
42*199cd1d7SUros Bizjak	push %r13
43*199cd1d7SUros Bizjak	push %r12
44*199cd1d7SUros Bizjak#else
45*199cd1d7SUros Bizjak	push %edi
46*199cd1d7SUros Bizjak	push %esi
47*199cd1d7SUros Bizjak#endif
48*199cd1d7SUros Bizjak	push %_ASM_BX
49*199cd1d7SUros Bizjak
50*199cd1d7SUros Bizjak	/* Save @regs. */
51*199cd1d7SUros Bizjak	push %_ASM_ARG2
52*199cd1d7SUros Bizjak
53*199cd1d7SUros Bizjak	/* Save @vmcb. */
54*199cd1d7SUros Bizjak	push %_ASM_ARG1
55*199cd1d7SUros Bizjak
56*199cd1d7SUros Bizjak	/* Move @regs to RAX. */
57*199cd1d7SUros Bizjak	mov %_ASM_ARG2, %_ASM_AX
58*199cd1d7SUros Bizjak
59*199cd1d7SUros Bizjak	/* Load guest registers. */
60*199cd1d7SUros Bizjak	mov VCPU_RCX(%_ASM_AX), %_ASM_CX
61*199cd1d7SUros Bizjak	mov VCPU_RDX(%_ASM_AX), %_ASM_DX
62*199cd1d7SUros Bizjak	mov VCPU_RBX(%_ASM_AX), %_ASM_BX
63*199cd1d7SUros Bizjak	mov VCPU_RBP(%_ASM_AX), %_ASM_BP
64*199cd1d7SUros Bizjak	mov VCPU_RSI(%_ASM_AX), %_ASM_SI
65*199cd1d7SUros Bizjak	mov VCPU_RDI(%_ASM_AX), %_ASM_DI
66*199cd1d7SUros Bizjak#ifdef CONFIG_X86_64
67*199cd1d7SUros Bizjak	mov VCPU_R8 (%_ASM_AX),  %r8
68*199cd1d7SUros Bizjak	mov VCPU_R9 (%_ASM_AX),  %r9
69*199cd1d7SUros Bizjak	mov VCPU_R10(%_ASM_AX), %r10
70*199cd1d7SUros Bizjak	mov VCPU_R11(%_ASM_AX), %r11
71*199cd1d7SUros Bizjak	mov VCPU_R12(%_ASM_AX), %r12
72*199cd1d7SUros Bizjak	mov VCPU_R13(%_ASM_AX), %r13
73*199cd1d7SUros Bizjak	mov VCPU_R14(%_ASM_AX), %r14
74*199cd1d7SUros Bizjak	mov VCPU_R15(%_ASM_AX), %r15
75*199cd1d7SUros Bizjak#endif
76*199cd1d7SUros Bizjak
77*199cd1d7SUros Bizjak	/* "POP" @vmcb to RAX. */
78*199cd1d7SUros Bizjak	pop %_ASM_AX
79*199cd1d7SUros Bizjak
80*199cd1d7SUros Bizjak	/* Enter guest mode */
81*199cd1d7SUros Bizjak1:	vmload %_ASM_AX
82*199cd1d7SUros Bizjak	jmp 3f
83*199cd1d7SUros Bizjak2:	cmpb $0, kvm_rebooting
84*199cd1d7SUros Bizjak	jne 3f
85*199cd1d7SUros Bizjak	ud2
86*199cd1d7SUros Bizjak	_ASM_EXTABLE(1b, 2b)
87*199cd1d7SUros Bizjak
88*199cd1d7SUros Bizjak3:	vmrun %_ASM_AX
89*199cd1d7SUros Bizjak	jmp 5f
90*199cd1d7SUros Bizjak4:	cmpb $0, kvm_rebooting
91*199cd1d7SUros Bizjak	jne 5f
92*199cd1d7SUros Bizjak	ud2
93*199cd1d7SUros Bizjak	_ASM_EXTABLE(3b, 4b)
94*199cd1d7SUros Bizjak
95*199cd1d7SUros Bizjak5:	vmsave %_ASM_AX
96*199cd1d7SUros Bizjak	jmp 7f
97*199cd1d7SUros Bizjak6:	cmpb $0, kvm_rebooting
98*199cd1d7SUros Bizjak	jne 7f
99*199cd1d7SUros Bizjak	ud2
100*199cd1d7SUros Bizjak	_ASM_EXTABLE(5b, 6b)
101*199cd1d7SUros Bizjak7:
102*199cd1d7SUros Bizjak	/* "POP" @regs to RAX. */
103*199cd1d7SUros Bizjak	pop %_ASM_AX
104*199cd1d7SUros Bizjak
105*199cd1d7SUros Bizjak	/* Save all guest registers.  */
106*199cd1d7SUros Bizjak	mov %_ASM_CX,   VCPU_RCX(%_ASM_AX)
107*199cd1d7SUros Bizjak	mov %_ASM_DX,   VCPU_RDX(%_ASM_AX)
108*199cd1d7SUros Bizjak	mov %_ASM_BX,   VCPU_RBX(%_ASM_AX)
109*199cd1d7SUros Bizjak	mov %_ASM_BP,   VCPU_RBP(%_ASM_AX)
110*199cd1d7SUros Bizjak	mov %_ASM_SI,   VCPU_RSI(%_ASM_AX)
111*199cd1d7SUros Bizjak	mov %_ASM_DI,   VCPU_RDI(%_ASM_AX)
112*199cd1d7SUros Bizjak#ifdef CONFIG_X86_64
113*199cd1d7SUros Bizjak	mov %r8,  VCPU_R8 (%_ASM_AX)
114*199cd1d7SUros Bizjak	mov %r9,  VCPU_R9 (%_ASM_AX)
115*199cd1d7SUros Bizjak	mov %r10, VCPU_R10(%_ASM_AX)
116*199cd1d7SUros Bizjak	mov %r11, VCPU_R11(%_ASM_AX)
117*199cd1d7SUros Bizjak	mov %r12, VCPU_R12(%_ASM_AX)
118*199cd1d7SUros Bizjak	mov %r13, VCPU_R13(%_ASM_AX)
119*199cd1d7SUros Bizjak	mov %r14, VCPU_R14(%_ASM_AX)
120*199cd1d7SUros Bizjak	mov %r15, VCPU_R15(%_ASM_AX)
121*199cd1d7SUros Bizjak#endif
122*199cd1d7SUros Bizjak
123*199cd1d7SUros Bizjak	/*
124*199cd1d7SUros Bizjak	 * Clear all general purpose registers except RSP and RAX to prevent
125*199cd1d7SUros Bizjak	 * speculative use of the guest's values, even those that are reloaded
126*199cd1d7SUros Bizjak	 * via the stack.  In theory, an L1 cache miss when restoring registers
127*199cd1d7SUros Bizjak	 * could lead to speculative execution with the guest's values.
128*199cd1d7SUros Bizjak	 * Zeroing XORs are dirt cheap, i.e. the extra paranoia is essentially
129*199cd1d7SUros Bizjak	 * free.  RSP and RAX are exempt as they are restored by hardware
130*199cd1d7SUros Bizjak	 * during VM-Exit.
131*199cd1d7SUros Bizjak	 */
132*199cd1d7SUros Bizjak	xor %ecx, %ecx
133*199cd1d7SUros Bizjak	xor %edx, %edx
134*199cd1d7SUros Bizjak	xor %ebx, %ebx
135*199cd1d7SUros Bizjak	xor %ebp, %ebp
136*199cd1d7SUros Bizjak	xor %esi, %esi
137*199cd1d7SUros Bizjak	xor %edi, %edi
138*199cd1d7SUros Bizjak#ifdef CONFIG_X86_64
139*199cd1d7SUros Bizjak	xor %r8d,  %r8d
140*199cd1d7SUros Bizjak	xor %r9d,  %r9d
141*199cd1d7SUros Bizjak	xor %r10d, %r10d
142*199cd1d7SUros Bizjak	xor %r11d, %r11d
143*199cd1d7SUros Bizjak	xor %r12d, %r12d
144*199cd1d7SUros Bizjak	xor %r13d, %r13d
145*199cd1d7SUros Bizjak	xor %r14d, %r14d
146*199cd1d7SUros Bizjak	xor %r15d, %r15d
147*199cd1d7SUros Bizjak#endif
148*199cd1d7SUros Bizjak
149*199cd1d7SUros Bizjak	pop %_ASM_BX
150*199cd1d7SUros Bizjak
151*199cd1d7SUros Bizjak#ifdef CONFIG_X86_64
152*199cd1d7SUros Bizjak	pop %r12
153*199cd1d7SUros Bizjak	pop %r13
154*199cd1d7SUros Bizjak	pop %r14
155*199cd1d7SUros Bizjak	pop %r15
156*199cd1d7SUros Bizjak#else
157*199cd1d7SUros Bizjak	pop %esi
158*199cd1d7SUros Bizjak	pop %edi
159*199cd1d7SUros Bizjak#endif
160*199cd1d7SUros Bizjak	pop %_ASM_BP
161*199cd1d7SUros Bizjak	ret
162*199cd1d7SUros BizjakSYM_FUNC_END(__svm_vcpu_run)
163