xref: /freebsd/sys/amd64/vmm/intel/vmx_support.S (revision 52f72944b8f5abb2386eae924357dee8aea17d5b)
1/*-
2 * Copyright (c) 2011 NetApp, Inc.
3 * Copyright (c) 2013 Neel Natu <neel@freebsd.org>
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED.  IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 *
27 * $FreeBSD$
28 */
29
30#include <machine/asmacros.h>
31
32#include "vmx_assym.h"
33
34#ifdef SMP
35#define	LK	lock ;
36#else
37#define	LK
38#endif
39
40/* Be friendly to DTrace FBT's prologue/epilogue pattern matching */
41#define VENTER  push %rbp ; mov %rsp,%rbp
42#define VLEAVE  pop %rbp
43
44/*
45 * Save the guest context.
46 */
47#define	VMX_GUEST_SAVE							\
48	movq	%rdi,VMXCTX_GUEST_RDI(%rsp);				\
49	movq	%rsi,VMXCTX_GUEST_RSI(%rsp);				\
50	movq	%rdx,VMXCTX_GUEST_RDX(%rsp);				\
51	movq	%rcx,VMXCTX_GUEST_RCX(%rsp);				\
52	movq	%r8,VMXCTX_GUEST_R8(%rsp);				\
53	movq	%r9,VMXCTX_GUEST_R9(%rsp);				\
54	movq	%rax,VMXCTX_GUEST_RAX(%rsp);				\
55	movq	%rbx,VMXCTX_GUEST_RBX(%rsp);				\
56	movq	%rbp,VMXCTX_GUEST_RBP(%rsp);				\
57	movq	%r10,VMXCTX_GUEST_R10(%rsp);				\
58	movq	%r11,VMXCTX_GUEST_R11(%rsp);				\
59	movq	%r12,VMXCTX_GUEST_R12(%rsp);				\
60	movq	%r13,VMXCTX_GUEST_R13(%rsp);				\
61	movq	%r14,VMXCTX_GUEST_R14(%rsp);				\
62	movq	%r15,VMXCTX_GUEST_R15(%rsp);				\
63	movq	%cr2,%rdi;						\
64	movq	%rdi,VMXCTX_GUEST_CR2(%rsp);				\
65	movq	%rsp,%rdi;
66
67/*
68 * Assumes that %rdi holds a pointer to the 'vmxctx'.
69 *
70 * On "return" all registers are updated to reflect guest state. The two
71 * exceptions are %rip and %rsp. These registers are atomically switched
72 * by hardware from the guest area of the vmcs.
73 *
74 * We modify %rsp to point to the 'vmxctx' so we can use it to restore
75 * host context in case of an error with 'vmlaunch' or 'vmresume'.
76 */
77#define	VMX_GUEST_RESTORE						\
78	movq	%rdi,%rsp;						\
79	movq	VMXCTX_GUEST_CR2(%rdi),%rsi;				\
80	movq	%rsi,%cr2;						\
81	movq	VMXCTX_GUEST_RSI(%rdi),%rsi;				\
82	movq	VMXCTX_GUEST_RDX(%rdi),%rdx;				\
83	movq	VMXCTX_GUEST_RCX(%rdi),%rcx;				\
84	movq	VMXCTX_GUEST_R8(%rdi),%r8;				\
85	movq	VMXCTX_GUEST_R9(%rdi),%r9;				\
86	movq	VMXCTX_GUEST_RAX(%rdi),%rax;				\
87	movq	VMXCTX_GUEST_RBX(%rdi),%rbx;				\
88	movq	VMXCTX_GUEST_RBP(%rdi),%rbp;				\
89	movq	VMXCTX_GUEST_R10(%rdi),%r10;				\
90	movq	VMXCTX_GUEST_R11(%rdi),%r11;				\
91	movq	VMXCTX_GUEST_R12(%rdi),%r12;				\
92	movq	VMXCTX_GUEST_R13(%rdi),%r13;				\
93	movq	VMXCTX_GUEST_R14(%rdi),%r14;				\
94	movq	VMXCTX_GUEST_R15(%rdi),%r15;				\
95	movq	VMXCTX_GUEST_RDI(%rdi),%rdi; /* restore rdi the last */
96
97/*
98 * Clobber the remaining registers with guest contents so they can't
99 * be misused.
100 */
101#define	VMX_GUEST_CLOBBER						\
102	xor	%rax, %rax;						\
103	xor	%rcx, %rcx;						\
104	xor	%rdx, %rdx;						\
105	xor	%rsi, %rsi;						\
106	xor	%r8, %r8;						\
107	xor	%r9, %r9;						\
108	xor	%r10, %r10;						\
109	xor	%r11, %r11;
110
111/*
112 * Save and restore the host context.
113 *
114 * Assumes that %rdi holds a pointer to the 'vmxctx'.
115 */
116#define	VMX_HOST_SAVE							\
117	movq    %r15, VMXCTX_HOST_R15(%rdi);				\
118	movq    %r14, VMXCTX_HOST_R14(%rdi);				\
119	movq    %r13, VMXCTX_HOST_R13(%rdi);				\
120	movq    %r12, VMXCTX_HOST_R12(%rdi);				\
121	movq    %rbp, VMXCTX_HOST_RBP(%rdi);				\
122	movq    %rsp, VMXCTX_HOST_RSP(%rdi);				\
123	movq    %rbx, VMXCTX_HOST_RBX(%rdi);				\
124
125#define	VMX_HOST_RESTORE						\
126	movq	VMXCTX_HOST_R15(%rdi), %r15;				\
127	movq	VMXCTX_HOST_R14(%rdi), %r14;				\
128	movq	VMXCTX_HOST_R13(%rdi), %r13;				\
129	movq	VMXCTX_HOST_R12(%rdi), %r12;				\
130	movq	VMXCTX_HOST_RBP(%rdi), %rbp;				\
131	movq	VMXCTX_HOST_RSP(%rdi), %rsp;				\
132	movq	VMXCTX_HOST_RBX(%rdi), %rbx;				\
133
134/*
135 * vmx_enter_guest(struct vmxctx *vmxctx, int launched)
136 * %rdi: pointer to the 'vmxctx'
137 * %rsi: pointer to the 'vmx'
138 * %edx: launch state of the VMCS
139 * Interrupts must be disabled on entry.
140 */
141ENTRY(vmx_enter_guest)
142	VENTER
143	/*
144	 * Save host state before doing anything else.
145	 */
146	VMX_HOST_SAVE
147
148	/*
149	 * Activate guest pmap on this cpu.
150	 */
151	movq	VMXCTX_PMAP(%rdi), %r11
152	movl	PCPU(CPUID), %eax
153	LK btsl	%eax, PM_ACTIVE(%r11)
154
155	/*
156	 * If 'vmx->eptgen[curcpu]' is not identical to 'pmap->pm_eptgen'
157	 * then we must invalidate all mappings associated with this EPTP.
158	 */
159	movq	PM_EPTGEN(%r11), %r10
160	cmpq	%r10, VMX_EPTGEN(%rsi, %rax, 8)
161	je	guest_restore
162
163	/* Refresh 'vmx->eptgen[curcpu]' */
164	movq	%r10, VMX_EPTGEN(%rsi, %rax, 8)
165
166	/* Setup the invept descriptor on the host stack */
167	mov	%rsp, %r11
168	movq	VMX_EPTP(%rsi), %rax
169	movq	%rax, -16(%r11)
170	movq	$0x0, -8(%r11)
171	mov	$0x1, %eax		/* Single context invalidate */
172	invept	-16(%r11), %rax
173	jbe	invept_error		/* Check invept instruction error */
174
175guest_restore:
176	cmpl	$0, %edx
177	je	do_launch
178
179	VMX_GUEST_RESTORE
180	vmresume
181	/*
182	 * In the common case 'vmresume' returns back to the host through
183	 * 'vmx_exit_guest' with %rsp pointing to 'vmxctx'.
184	 *
185	 * If there is an error we return VMX_VMRESUME_ERROR to the caller.
186	 */
187	movq	%rsp, %rdi		/* point %rdi back to 'vmxctx' */
188	movl	$VMX_VMRESUME_ERROR, %eax
189	jmp	decode_inst_error
190
191do_launch:
192	VMX_GUEST_RESTORE
193	vmlaunch
194	/*
195	 * In the common case 'vmlaunch' returns back to the host through
196	 * 'vmx_exit_guest' with %rsp pointing to 'vmxctx'.
197	 *
198	 * If there is an error we return VMX_VMLAUNCH_ERROR to the caller.
199	 */
200	movq	%rsp, %rdi		/* point %rdi back to 'vmxctx' */
201	movl	$VMX_VMLAUNCH_ERROR, %eax
202	jmp	decode_inst_error
203
204invept_error:
205	movl	$VMX_INVEPT_ERROR, %eax
206	jmp	decode_inst_error
207
208decode_inst_error:
209	movl	$VM_FAIL_VALID, %r11d
210	jz	inst_error
211	movl	$VM_FAIL_INVALID, %r11d
212inst_error:
213	movl	%r11d, VMXCTX_INST_FAIL_STATUS(%rdi)
214
215	/*
216	 * The return value is already populated in %eax so we cannot use
217	 * it as a scratch register beyond this point.
218	 */
219
220	/*
221	 * Deactivate guest pmap from this cpu.
222	 */
223	movq	VMXCTX_PMAP(%rdi), %r11
224	movl	PCPU(CPUID), %r10d
225	LK btrl	%r10d, PM_ACTIVE(%r11)
226
227	VMX_HOST_RESTORE
228	VLEAVE
229	ret
230
231/*
232 * Non-error VM-exit from the guest. Make this a label so it can
233 * be used by C code when setting up the VMCS.
234 * The VMCS-restored %rsp points to the struct vmxctx
235 */
236	ALIGN_TEXT
237	.globl	vmx_exit_guest_flush_rsb
238vmx_exit_guest_flush_rsb:
239	/*
240	 * Save guest state that is not automatically saved in the vmcs.
241	 */
242	VMX_GUEST_SAVE
243
244	/*
245	 * Deactivate guest pmap from this cpu.
246	 */
247	movq	VMXCTX_PMAP(%rdi), %r11
248	movl	PCPU(CPUID), %r10d
249	LK btrl	%r10d, PM_ACTIVE(%r11)
250
251	VMX_HOST_RESTORE
252
253	VMX_GUEST_CLOBBER
254
255	/*
256	 * To prevent malicious branch target predictions from
257	 * affecting the host, overwrite all entries in the RSB upon
258	 * exiting a guest.
259	 */
260	mov	$16, %ecx	/* 16 iterations, two calls per loop */
261	mov	%rsp, %rax
2620:	call	2f		/* create an RSB entry. */
2631:	pause
264	call	1b		/* capture rogue speculation. */
2652:	call	2f		/* create an RSB entry. */
2661:	pause
267	call	1b		/* capture rogue speculation. */
2682:	sub	$1, %ecx
269	jnz	0b
270	mov	%rax, %rsp
271
272	/*
273	 * This will return to the caller of 'vmx_enter_guest()' with a return
274	 * value of VMX_GUEST_VMEXIT.
275	 */
276	movl	$VMX_GUEST_VMEXIT, %eax
277	VLEAVE
278	ret
279
280	.globl	vmx_exit_guest
281vmx_exit_guest:
282	/*
283	 * Save guest state that is not automatically saved in the vmcs.
284	 */
285	VMX_GUEST_SAVE
286
287	/*
288	 * Deactivate guest pmap from this cpu.
289	 */
290	movq	VMXCTX_PMAP(%rdi), %r11
291	movl	PCPU(CPUID), %r10d
292	LK btrl	%r10d, PM_ACTIVE(%r11)
293
294	VMX_HOST_RESTORE
295
296	VMX_GUEST_CLOBBER
297
298	/*
299	 * This will return to the caller of 'vmx_enter_guest()' with a return
300	 * value of VMX_GUEST_VMEXIT.
301	 */
302	movl	$VMX_GUEST_VMEXIT, %eax
303	VLEAVE
304	ret
305END(vmx_enter_guest)
306
307/*
308 * %rdi = interrupt handler entry point
309 *
310 * Calling sequence described in the "Instruction Set Reference" for the "INT"
311 * instruction in Intel SDM, Vol 2.
312 */
313ENTRY(vmx_call_isr)
314	VENTER
315	mov	%rsp, %r11			/* save %rsp */
316	and	$~0xf, %rsp			/* align on 16-byte boundary */
317	pushq	$KERNEL_SS			/* %ss */
318	pushq	%r11				/* %rsp */
319	pushfq					/* %rflags */
320	pushq	$KERNEL_CS			/* %cs */
321	cli					/* disable interrupts */
322	callq	*%rdi				/* push %rip and call isr */
323	VLEAVE
324	ret
325END(vmx_call_isr)
326