xref: /freebsd/sys/amd64/vmm/intel/vmx_support.S (revision a18eacbefdfa1085ca3db829e86ece78cd416493)
1/*-
2 * Copyright (c) 2011 NetApp, Inc.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 * $FreeBSD$
27 */
28
29#include <machine/asmacros.h>
30
31#include "vmx_assym.s"
32
33#ifdef SMP
34#define	LK	lock ;
35#else
36#define	LK
37#endif
38
39/*
40 * Disable interrupts before updating %rsp in VMX_CHECK_AST or
41 * VMX_GUEST_RESTORE.
42 *
43 * The location that %rsp points to is a 'vmxctx' and not a
44 * real stack so we don't want an interrupt handler to trash it
45 */
46#define	VMX_DISABLE_INTERRUPTS		cli
47
48/*
49 * If the thread hosting the vcpu has an ast pending then take care of it
50 * by returning from vmx_setjmp() with a return value of VMX_RETURN_AST.
51 *
52 * Assumes that %rdi holds a pointer to the 'vmxctx' and that interrupts
53 * are disabled.
54 */
55#define	VMX_CHECK_AST							\
56	movq	PCPU(CURTHREAD),%rax;					\
57	testl	$TDF_ASTPENDING | TDF_NEEDRESCHED,TD_FLAGS(%rax);	\
58	je	9f;							\
59	movq	$VMX_RETURN_AST,%rsi;					\
60	movq	%rdi,%rsp;						\
61	addq	$VMXCTX_TMPSTKTOP,%rsp;					\
62	callq	vmx_return;						\
639:
64
65/*
66 * Assumes that %rdi holds a pointer to the 'vmxctx'.
67 *
68 * On "return" all registers are updated to reflect guest state. The two
69 * exceptions are %rip and %rsp. These registers are atomically switched
70 * by hardware from the guest area of the vmcs.
71 *
72 * We modify %rsp to point to the 'vmxctx' so we can use it to restore
73 * host context in case of an error with 'vmlaunch' or 'vmresume'.
74 */
75#define	VMX_GUEST_RESTORE						\
76	movq	%rdi,%rsp;						\
77	movq	VMXCTX_GUEST_CR2(%rdi),%rsi;				\
78	movq	%rsi,%cr2;						\
79	movq	VMXCTX_GUEST_RSI(%rdi),%rsi;				\
80	movq	VMXCTX_GUEST_RDX(%rdi),%rdx;				\
81	movq	VMXCTX_GUEST_RCX(%rdi),%rcx;				\
82	movq	VMXCTX_GUEST_R8(%rdi),%r8;				\
83	movq	VMXCTX_GUEST_R9(%rdi),%r9;				\
84	movq	VMXCTX_GUEST_RAX(%rdi),%rax;				\
85	movq	VMXCTX_GUEST_RBX(%rdi),%rbx;				\
86	movq	VMXCTX_GUEST_RBP(%rdi),%rbp;				\
87	movq	VMXCTX_GUEST_R10(%rdi),%r10;				\
88	movq	VMXCTX_GUEST_R11(%rdi),%r11;				\
89	movq	VMXCTX_GUEST_R12(%rdi),%r12;				\
90	movq	VMXCTX_GUEST_R13(%rdi),%r13;				\
91	movq	VMXCTX_GUEST_R14(%rdi),%r14;				\
92	movq	VMXCTX_GUEST_R15(%rdi),%r15;				\
93	movq	VMXCTX_GUEST_RDI(%rdi),%rdi; /* restore rdi the last */
94
95/*
96 * Check for an error after executing a VMX instruction.
97 * 'errreg' will be zero on success and non-zero otherwise.
98 * 'ctxreg' points to the 'struct vmxctx' associated with the vcpu.
99 */
100#define	VM_INSTRUCTION_ERROR(errreg, ctxreg)				\
101	jnc 	1f;							\
102	movl 	$VM_FAIL_INVALID,errreg;		/* CF is set */	\
103	jmp 	3f;							\
1041:	jnz 	2f;							\
105	movl 	$VM_FAIL_VALID,errreg;		/* ZF is set */		\
106	jmp 	3f;							\
1072:	movl 	$VM_SUCCESS,errreg;					\
1083:	movl	errreg,VMXCTX_LAUNCH_ERROR(ctxreg)
109
110/*
111 * set or clear the appropriate bit in 'pm_active'
112 * %rdi = vmxctx
113 * %rax, %r11 = scratch registers
114 */
115#define	VMX_SET_PM_ACTIVE						\
116	movq	VMXCTX_PMAP(%rdi), %r11;				\
117	movl	PCPU(CPUID), %eax;					\
118	LK btsl	%eax, PM_ACTIVE(%r11)
119
120#define	VMX_CLEAR_PM_ACTIVE						\
121	movq	VMXCTX_PMAP(%rdi), %r11;				\
122	movl	PCPU(CPUID), %eax;					\
123	LK btrl	%eax, PM_ACTIVE(%r11)
124
125/*
126 * If 'vmxctx->eptgen[curcpu]' is not identical to 'pmap->pm_eptgen'
127 * then we must invalidate all mappings associated with this eptp.
128 *
129 * %rdi = vmxctx
130 * %rax, %rbx, %r11 = scratch registers
131 */
132#define	VMX_CHECK_EPTGEN						\
133	movl	PCPU(CPUID), %ebx;					\
134	movq	VMXCTX_PMAP(%rdi), %r11;				\
135	movq	PM_EPTGEN(%r11), %rax;					\
136	cmpq	%rax, VMXCTX_EPTGEN(%rdi, %rbx, 8);			\
137	je	9f;							\
138									\
139	/* Refresh 'vmxctx->eptgen[curcpu]' */				\
140	movq	%rax, VMXCTX_EPTGEN(%rdi, %rbx, 8);			\
141									\
142	/* Setup the invept descriptor at the top of tmpstk */		\
143	mov	%rdi, %r11;						\
144	addq	$VMXCTX_TMPSTKTOP, %r11;				\
145	movq	VMXCTX_EPTP(%rdi), %rax;				\
146	movq	%rax, -16(%r11);					\
147	movq	$0x0, -8(%r11);						\
148	mov	$0x1, %eax;	/* Single context invalidate */		\
149	invept	-16(%r11), %rax;					\
150									\
151	/* Check for invept error */					\
152	VM_INSTRUCTION_ERROR(%eax, %rdi);				\
153	testl	%eax, %eax;						\
154	jz	9f;							\
155									\
156	/* Return via vmx_setjmp with retval of VMX_RETURN_INVEPT */	\
157	movq	$VMX_RETURN_INVEPT, %rsi;				\
158	movq	%rdi,%rsp;						\
159	addq	$VMXCTX_TMPSTKTOP, %rsp;				\
160	callq	vmx_return;						\
1619:	;
162
163	.text
164/*
165 * int vmx_setjmp(ctxp)
166 * %rdi = ctxp
167 *
168 * Return value is '0' when it returns directly from here.
169 * Return value is '1' when it returns after a vm exit through vmx_longjmp.
170 */
171ENTRY(vmx_setjmp)
172	movq	(%rsp),%rax			/* return address */
173	movq    %r15,VMXCTX_HOST_R15(%rdi)
174	movq    %r14,VMXCTX_HOST_R14(%rdi)
175	movq    %r13,VMXCTX_HOST_R13(%rdi)
176	movq    %r12,VMXCTX_HOST_R12(%rdi)
177	movq    %rbp,VMXCTX_HOST_RBP(%rdi)
178	movq    %rsp,VMXCTX_HOST_RSP(%rdi)
179	movq    %rbx,VMXCTX_HOST_RBX(%rdi)
180	movq    %rax,VMXCTX_HOST_RIP(%rdi)
181
182	/*
183	 * XXX save host debug registers
184	 */
185	movl	$VMX_RETURN_DIRECT,%eax
186	ret
187END(vmx_setjmp)
188
189/*
190 * void vmx_return(struct vmxctx *ctxp, int retval)
191 * %rdi = ctxp
192 * %rsi = retval
193 * Return to vmm context through vmx_setjmp() with a value of 'retval'.
194 */
195ENTRY(vmx_return)
196	/* The pmap is no longer active on the host cpu */
197	VMX_CLEAR_PM_ACTIVE
198
199	/* Restore host context. */
200	movq	VMXCTX_HOST_R15(%rdi),%r15
201	movq	VMXCTX_HOST_R14(%rdi),%r14
202	movq	VMXCTX_HOST_R13(%rdi),%r13
203	movq	VMXCTX_HOST_R12(%rdi),%r12
204	movq	VMXCTX_HOST_RBP(%rdi),%rbp
205	movq	VMXCTX_HOST_RSP(%rdi),%rsp
206	movq	VMXCTX_HOST_RBX(%rdi),%rbx
207	movq	VMXCTX_HOST_RIP(%rdi),%rax
208	movq	%rax,(%rsp)			/* return address */
209
210	/*
211	 * XXX restore host debug registers
212	 */
213	movl	%esi,%eax
214	ret
215END(vmx_return)
216
217/*
218 * void vmx_longjmp(void)
219 * %rsp points to the struct vmxctx
220 */
221ENTRY(vmx_longjmp)
222	/*
223	 * Save guest state that is not automatically saved in the vmcs.
224	 */
225	movq	%rdi,VMXCTX_GUEST_RDI(%rsp)
226	movq	%rsi,VMXCTX_GUEST_RSI(%rsp)
227	movq	%rdx,VMXCTX_GUEST_RDX(%rsp)
228	movq	%rcx,VMXCTX_GUEST_RCX(%rsp)
229	movq	%r8,VMXCTX_GUEST_R8(%rsp)
230	movq	%r9,VMXCTX_GUEST_R9(%rsp)
231	movq	%rax,VMXCTX_GUEST_RAX(%rsp)
232	movq	%rbx,VMXCTX_GUEST_RBX(%rsp)
233	movq	%rbp,VMXCTX_GUEST_RBP(%rsp)
234	movq	%r10,VMXCTX_GUEST_R10(%rsp)
235	movq	%r11,VMXCTX_GUEST_R11(%rsp)
236	movq	%r12,VMXCTX_GUEST_R12(%rsp)
237	movq	%r13,VMXCTX_GUEST_R13(%rsp)
238	movq	%r14,VMXCTX_GUEST_R14(%rsp)
239	movq	%r15,VMXCTX_GUEST_R15(%rsp)
240
241	movq	%cr2,%rdi
242	movq	%rdi,VMXCTX_GUEST_CR2(%rsp)
243
244	movq	%rsp,%rdi
245	movq	$VMX_RETURN_LONGJMP,%rsi
246
247	addq	$VMXCTX_TMPSTKTOP,%rsp
248	callq	vmx_return
249END(vmx_longjmp)
250
251/*
252 * void vmx_resume(struct vmxctx *ctxp)
253 * %rdi = ctxp
254 *
255 * Although the return type is a 'void' this function may return indirectly
256 * through vmx_setjmp() with a return value of 2.
257 */
258ENTRY(vmx_resume)
259	VMX_DISABLE_INTERRUPTS
260
261	VMX_CHECK_AST
262
263	VMX_SET_PM_ACTIVE	/* This vcpu is now active on the host cpu */
264
265	VMX_CHECK_EPTGEN	/* Check if we have to invalidate TLB */
266
267	/*
268	 * Restore guest state that is not automatically loaded from the vmcs.
269	 */
270	VMX_GUEST_RESTORE
271
272	vmresume
273
274	/*
275	 * Capture the reason why vmresume failed.
276	 */
277	VM_INSTRUCTION_ERROR(%eax, %rsp)
278
279	/* Return via vmx_setjmp with return value of VMX_RETURN_VMRESUME */
280	movq	%rsp,%rdi
281	movq	$VMX_RETURN_VMRESUME,%rsi
282
283	addq	$VMXCTX_TMPSTKTOP,%rsp
284	callq	vmx_return
285END(vmx_resume)
286
287/*
288 * void vmx_launch(struct vmxctx *ctxp)
289 * %rdi = ctxp
290 *
291 * Although the return type is a 'void' this function may return indirectly
292 * through vmx_setjmp() with a return value of 3.
293 */
294ENTRY(vmx_launch)
295	VMX_DISABLE_INTERRUPTS
296
297	VMX_CHECK_AST
298
299	VMX_SET_PM_ACTIVE	/* This vcpu is now active on the host cpu */
300
301	VMX_CHECK_EPTGEN	/* Check if we have to invalidate TLB */
302
303	/*
304	 * Restore guest state that is not automatically loaded from the vmcs.
305	 */
306	VMX_GUEST_RESTORE
307
308	vmlaunch
309
310	/*
311	 * Capture the reason why vmlaunch failed.
312	 */
313	VM_INSTRUCTION_ERROR(%eax, %rsp)
314
315	/* Return via vmx_setjmp with return value of VMX_RETURN_VMLAUNCH */
316	movq	%rsp,%rdi
317	movq	$VMX_RETURN_VMLAUNCH,%rsi
318
319	addq	$VMXCTX_TMPSTKTOP,%rsp
320	callq	vmx_return
321END(vmx_launch)
322