xref: /freebsd/sys/amd64/vmm/intel/vmx_support.S (revision e4c66ddabdb470bab319705c1834a4867c508a43)
1/*-
2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3 *
4 * Copyright (c) 2011 NetApp, Inc.
5 * Copyright (c) 2013 Neel Natu <neel@freebsd.org>
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED.  IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 *
29 * $FreeBSD$
30 */
31
32#include <machine/asmacros.h>
33
34#include "vmx_assym.h"
35
36#ifdef SMP
37#define	LK	lock ;
38#else
39#define	LK
40#endif
41
42/* Be friendly to DTrace FBT's prologue/epilogue pattern matching */
43#define VENTER  push %rbp ; mov %rsp,%rbp
44#define VLEAVE  pop %rbp
45
46/*
47 * Save the guest context.
48 */
49#define	VMX_GUEST_SAVE							\
50	movq	%rdi,VMXCTX_GUEST_RDI(%rsp);				\
51	movq	%rsi,VMXCTX_GUEST_RSI(%rsp);				\
52	movq	%rdx,VMXCTX_GUEST_RDX(%rsp);				\
53	movq	%rcx,VMXCTX_GUEST_RCX(%rsp);				\
54	movq	%r8,VMXCTX_GUEST_R8(%rsp);				\
55	movq	%r9,VMXCTX_GUEST_R9(%rsp);				\
56	movq	%rax,VMXCTX_GUEST_RAX(%rsp);				\
57	movq	%rbx,VMXCTX_GUEST_RBX(%rsp);				\
58	movq	%rbp,VMXCTX_GUEST_RBP(%rsp);				\
59	movq	%r10,VMXCTX_GUEST_R10(%rsp);				\
60	movq	%r11,VMXCTX_GUEST_R11(%rsp);				\
61	movq	%r12,VMXCTX_GUEST_R12(%rsp);				\
62	movq	%r13,VMXCTX_GUEST_R13(%rsp);				\
63	movq	%r14,VMXCTX_GUEST_R14(%rsp);				\
64	movq	%r15,VMXCTX_GUEST_R15(%rsp);				\
65	movq	%cr2,%rdi;						\
66	movq	%rdi,VMXCTX_GUEST_CR2(%rsp);				\
67	movq	%rsp,%rdi;
68
69/*
70 * Assumes that %rdi holds a pointer to the 'vmxctx'.
71 *
72 * On "return" all registers are updated to reflect guest state. The two
73 * exceptions are %rip and %rsp. These registers are atomically switched
74 * by hardware from the guest area of the vmcs.
75 *
76 * We modify %rsp to point to the 'vmxctx' so we can use it to restore
77 * host context in case of an error with 'vmlaunch' or 'vmresume'.
78 */
79#define	VMX_GUEST_RESTORE						\
80	movq	%rdi,%rsp;						\
81	movq	VMXCTX_GUEST_CR2(%rdi),%rsi;				\
82	movq	%rsi,%cr2;						\
83	movq	VMXCTX_GUEST_RSI(%rdi),%rsi;				\
84	movq	VMXCTX_GUEST_RDX(%rdi),%rdx;				\
85	movq	VMXCTX_GUEST_RCX(%rdi),%rcx;				\
86	movq	VMXCTX_GUEST_R8(%rdi),%r8;				\
87	movq	VMXCTX_GUEST_R9(%rdi),%r9;				\
88	movq	VMXCTX_GUEST_RAX(%rdi),%rax;				\
89	movq	VMXCTX_GUEST_RBX(%rdi),%rbx;				\
90	movq	VMXCTX_GUEST_RBP(%rdi),%rbp;				\
91	movq	VMXCTX_GUEST_R10(%rdi),%r10;				\
92	movq	VMXCTX_GUEST_R11(%rdi),%r11;				\
93	movq	VMXCTX_GUEST_R12(%rdi),%r12;				\
94	movq	VMXCTX_GUEST_R13(%rdi),%r13;				\
95	movq	VMXCTX_GUEST_R14(%rdi),%r14;				\
96	movq	VMXCTX_GUEST_R15(%rdi),%r15;				\
97	movq	VMXCTX_GUEST_RDI(%rdi),%rdi; /* restore rdi the last */
98
99/*
100 * Clobber the remaining registers with guest contents so they can't
101 * be misused.
102 */
103#define	VMX_GUEST_CLOBBER						\
104	xor	%rax, %rax;						\
105	xor	%rcx, %rcx;						\
106	xor	%rdx, %rdx;						\
107	xor	%rsi, %rsi;						\
108	xor	%r8, %r8;						\
109	xor	%r9, %r9;						\
110	xor	%r10, %r10;						\
111	xor	%r11, %r11;
112
113/*
114 * Save and restore the host context.
115 *
116 * Assumes that %rdi holds a pointer to the 'vmxctx'.
117 */
118#define	VMX_HOST_SAVE							\
119	movq    %r15, VMXCTX_HOST_R15(%rdi);				\
120	movq    %r14, VMXCTX_HOST_R14(%rdi);				\
121	movq    %r13, VMXCTX_HOST_R13(%rdi);				\
122	movq    %r12, VMXCTX_HOST_R12(%rdi);				\
123	movq    %rbp, VMXCTX_HOST_RBP(%rdi);				\
124	movq    %rsp, VMXCTX_HOST_RSP(%rdi);				\
125	movq    %rbx, VMXCTX_HOST_RBX(%rdi);				\
126
127#define	VMX_HOST_RESTORE						\
128	movq	VMXCTX_HOST_R15(%rdi), %r15;				\
129	movq	VMXCTX_HOST_R14(%rdi), %r14;				\
130	movq	VMXCTX_HOST_R13(%rdi), %r13;				\
131	movq	VMXCTX_HOST_R12(%rdi), %r12;				\
132	movq	VMXCTX_HOST_RBP(%rdi), %rbp;				\
133	movq	VMXCTX_HOST_RSP(%rdi), %rsp;				\
134	movq	VMXCTX_HOST_RBX(%rdi), %rbx;				\
135
136/*
137 * vmx_enter_guest(struct vmxctx *vmxctx, int launched)
138 * %rdi: pointer to the 'vmxctx'
139 * %rsi: pointer to the 'vmx'
140 * %edx: launch state of the VMCS
141 * Interrupts must be disabled on entry.
142 */
143ENTRY(vmx_enter_guest)
144	VENTER
145	/*
146	 * Save host state before doing anything else.
147	 */
148	VMX_HOST_SAVE
149
150	/*
151	 * Activate guest pmap on this cpu.
152	 */
153	movq	VMXCTX_PMAP(%rdi), %r11
154	movl	PCPU(CPUID), %eax
155	LK btsl	%eax, PM_ACTIVE(%r11)
156
157	/*
158	 * If 'vmx->eptgen[curcpu]' is not identical to 'pmap->pm_eptgen'
159	 * then we must invalidate all mappings associated with this EPTP.
160	 */
161	movq	PM_EPTGEN(%r11), %r10
162	cmpq	%r10, VMX_EPTGEN(%rsi, %rax, 8)
163	je	guest_restore
164
165	/* Refresh 'vmx->eptgen[curcpu]' */
166	movq	%r10, VMX_EPTGEN(%rsi, %rax, 8)
167
168	/* Setup the invept descriptor on the host stack */
169	mov	%rsp, %r11
170	movq	VMX_EPTP(%rsi), %rax
171	movq	%rax, -16(%r11)
172	movq	$0x0, -8(%r11)
173	mov	$0x1, %eax		/* Single context invalidate */
174	invept	-16(%r11), %rax
175	jbe	invept_error		/* Check invept instruction error */
176
177guest_restore:
178	cmpl	$0, %edx
179	je	do_launch
180
181	VMX_GUEST_RESTORE
182	vmresume
183	/*
184	 * In the common case 'vmresume' returns back to the host through
185	 * 'vmx_exit_guest' with %rsp pointing to 'vmxctx'.
186	 *
187	 * If there is an error we return VMX_VMRESUME_ERROR to the caller.
188	 */
189	movq	%rsp, %rdi		/* point %rdi back to 'vmxctx' */
190	movl	$VMX_VMRESUME_ERROR, %eax
191	jmp	decode_inst_error
192
193do_launch:
194	VMX_GUEST_RESTORE
195	vmlaunch
196	/*
197	 * In the common case 'vmlaunch' returns back to the host through
198	 * 'vmx_exit_guest' with %rsp pointing to 'vmxctx'.
199	 *
200	 * If there is an error we return VMX_VMLAUNCH_ERROR to the caller.
201	 */
202	movq	%rsp, %rdi		/* point %rdi back to 'vmxctx' */
203	movl	$VMX_VMLAUNCH_ERROR, %eax
204	jmp	decode_inst_error
205
206invept_error:
207	movl	$VMX_INVEPT_ERROR, %eax
208	jmp	decode_inst_error
209
210decode_inst_error:
211	movl	$VM_FAIL_VALID, %r11d
212	jz	inst_error
213	movl	$VM_FAIL_INVALID, %r11d
214inst_error:
215	movl	%r11d, VMXCTX_INST_FAIL_STATUS(%rdi)
216
217	/*
218	 * The return value is already populated in %eax so we cannot use
219	 * it as a scratch register beyond this point.
220	 */
221
222	/*
223	 * Deactivate guest pmap from this cpu.
224	 */
225	movq	VMXCTX_PMAP(%rdi), %r11
226	movl	PCPU(CPUID), %r10d
227	LK btrl	%r10d, PM_ACTIVE(%r11)
228
229	VMX_HOST_RESTORE
230	VLEAVE
231	ret
232
233/*
234 * Non-error VM-exit from the guest. Make this a label so it can
235 * be used by C code when setting up the VMCS.
236 * The VMCS-restored %rsp points to the struct vmxctx
237 */
238	ALIGN_TEXT
239	.globl	vmx_exit_guest_flush_rsb
240vmx_exit_guest_flush_rsb:
241	/*
242	 * Save guest state that is not automatically saved in the vmcs.
243	 */
244	VMX_GUEST_SAVE
245
246	/*
247	 * Deactivate guest pmap from this cpu.
248	 */
249	movq	VMXCTX_PMAP(%rdi), %r11
250	movl	PCPU(CPUID), %r10d
251	LK btrl	%r10d, PM_ACTIVE(%r11)
252
253	VMX_HOST_RESTORE
254
255	VMX_GUEST_CLOBBER
256
257	/*
258	 * To prevent malicious branch target predictions from
259	 * affecting the host, overwrite all entries in the RSB upon
260	 * exiting a guest.
261	 */
262	mov	$16, %ecx	/* 16 iterations, two calls per loop */
263	mov	%rsp, %rax
2640:	call	2f		/* create an RSB entry. */
2651:	pause
266	call	1b		/* capture rogue speculation. */
2672:	call	2f		/* create an RSB entry. */
2681:	pause
269	call	1b		/* capture rogue speculation. */
2702:	sub	$1, %ecx
271	jnz	0b
272	mov	%rax, %rsp
273
274	/*
275	 * This will return to the caller of 'vmx_enter_guest()' with a return
276	 * value of VMX_GUEST_VMEXIT.
277	 */
278	movl	$VMX_GUEST_VMEXIT, %eax
279	VLEAVE
280	ret
281
282	.globl	vmx_exit_guest
283vmx_exit_guest:
284	/*
285	 * Save guest state that is not automatically saved in the vmcs.
286	 */
287	VMX_GUEST_SAVE
288
289	/*
290	 * Deactivate guest pmap from this cpu.
291	 */
292	movq	VMXCTX_PMAP(%rdi), %r11
293	movl	PCPU(CPUID), %r10d
294	LK btrl	%r10d, PM_ACTIVE(%r11)
295
296	VMX_HOST_RESTORE
297
298	VMX_GUEST_CLOBBER
299
300	/*
301	 * This will return to the caller of 'vmx_enter_guest()' with a return
302	 * value of VMX_GUEST_VMEXIT.
303	 */
304	movl	$VMX_GUEST_VMEXIT, %eax
305	VLEAVE
306	ret
307END(vmx_enter_guest)
308
309/*
310 * %rdi = interrupt handler entry point
311 *
312 * Calling sequence described in the "Instruction Set Reference" for the "INT"
313 * instruction in Intel SDM, Vol 2.
314 */
315ENTRY(vmx_call_isr)
316	VENTER
317	mov	%rsp, %r11			/* save %rsp */
318	and	$~0xf, %rsp			/* align on 16-byte boundary */
319	pushq	$KERNEL_SS			/* %ss */
320	pushq	%r11				/* %rsp */
321	pushfq					/* %rflags */
322	pushq	$KERNEL_CS			/* %cs */
323	cli					/* disable interrupts */
324	callq	*%rdi				/* push %rip and call isr */
325	VLEAVE
326	ret
327END(vmx_call_isr)
328