xref: /freebsd/sys/amd64/vmm/intel/vmx_support.S (revision 366f60834ff8ef709f132fe8976c96a5e2caace9)
1*366f6083SPeter Grehan/*-
2*366f6083SPeter Grehan * Copyright (c) 2011 NetApp, Inc.
3*366f6083SPeter Grehan * All rights reserved.
4*366f6083SPeter Grehan *
5*366f6083SPeter Grehan * Redistribution and use in source and binary forms, with or without
6*366f6083SPeter Grehan * modification, are permitted provided that the following conditions
7*366f6083SPeter Grehan * are met:
8*366f6083SPeter Grehan * 1. Redistributions of source code must retain the above copyright
9*366f6083SPeter Grehan *    notice, this list of conditions and the following disclaimer.
10*366f6083SPeter Grehan * 2. Redistributions in binary form must reproduce the above copyright
11*366f6083SPeter Grehan *    notice, this list of conditions and the following disclaimer in the
12*366f6083SPeter Grehan *    documentation and/or other materials provided with the distribution.
13*366f6083SPeter Grehan *
14*366f6083SPeter Grehan * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
15*366f6083SPeter Grehan * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16*366f6083SPeter Grehan * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17*366f6083SPeter Grehan * ARE DISCLAIMED.  IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
18*366f6083SPeter Grehan * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19*366f6083SPeter Grehan * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20*366f6083SPeter Grehan * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21*366f6083SPeter Grehan * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22*366f6083SPeter Grehan * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23*366f6083SPeter Grehan * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24*366f6083SPeter Grehan * SUCH DAMAGE.
25*366f6083SPeter Grehan *
26*366f6083SPeter Grehan * $FreeBSD$
27*366f6083SPeter Grehan */
28*366f6083SPeter Grehan
29*366f6083SPeter Grehan#include <machine/asmacros.h>
30*366f6083SPeter Grehan
31*366f6083SPeter Grehan#include "vmx_assym.s"
32*366f6083SPeter Grehan
33*366f6083SPeter Grehan/*
34*366f6083SPeter Grehan * Assumes that %rdi holds a pointer to the 'vmxctx'
35*366f6083SPeter Grehan */
36*366f6083SPeter Grehan#define	VMX_GUEST_RESTORE						\
37*366f6083SPeter Grehan	/*								\
38*366f6083SPeter Grehan	 * Make sure that interrupts are disabled before restoring CR2.	\
39*366f6083SPeter Grehan	 * Otherwise there could be a page fault during the interrupt	\
40*366f6083SPeter Grehan	 * handler execution that would end up trashing CR2.		\
41*366f6083SPeter Grehan	 */								\
42*366f6083SPeter Grehan	cli;								\
43*366f6083SPeter Grehan	movq	VMXCTX_GUEST_CR2(%rdi),%rsi;				\
44*366f6083SPeter Grehan	movq	%rsi,%cr2;						\
45*366f6083SPeter Grehan	movq	VMXCTX_GUEST_RSI(%rdi),%rsi;				\
46*366f6083SPeter Grehan	movq	VMXCTX_GUEST_RDX(%rdi),%rdx;				\
47*366f6083SPeter Grehan	movq	VMXCTX_GUEST_RCX(%rdi),%rcx;				\
48*366f6083SPeter Grehan	movq	VMXCTX_GUEST_R8(%rdi),%r8;				\
49*366f6083SPeter Grehan	movq	VMXCTX_GUEST_R9(%rdi),%r9;				\
50*366f6083SPeter Grehan	movq	VMXCTX_GUEST_RAX(%rdi),%rax;				\
51*366f6083SPeter Grehan	movq	VMXCTX_GUEST_RBX(%rdi),%rbx;				\
52*366f6083SPeter Grehan	movq	VMXCTX_GUEST_RBP(%rdi),%rbp;				\
53*366f6083SPeter Grehan	movq	VMXCTX_GUEST_R10(%rdi),%r10;				\
54*366f6083SPeter Grehan	movq	VMXCTX_GUEST_R11(%rdi),%r11;				\
55*366f6083SPeter Grehan	movq	VMXCTX_GUEST_R12(%rdi),%r12;				\
56*366f6083SPeter Grehan	movq	VMXCTX_GUEST_R13(%rdi),%r13;				\
57*366f6083SPeter Grehan	movq	VMXCTX_GUEST_R14(%rdi),%r14;				\
58*366f6083SPeter Grehan	movq	VMXCTX_GUEST_R15(%rdi),%r15;				\
59*366f6083SPeter Grehan	movq	VMXCTX_GUEST_RDI(%rdi),%rdi; /* restore rdi the last */
60*366f6083SPeter Grehan
61*366f6083SPeter Grehan#define	VM_INSTRUCTION_ERROR(reg)					\
62*366f6083SPeter Grehan	jnc 	1f;							\
63*366f6083SPeter Grehan	movl 	$VM_FAIL_INVALID,reg;		/* CF is set */		\
64*366f6083SPeter Grehan	jmp 	3f;							\
65*366f6083SPeter Grehan1:	jnz 	2f;							\
66*366f6083SPeter Grehan	movl 	$VM_FAIL_VALID,reg;		/* ZF is set */		\
67*366f6083SPeter Grehan	jmp 	3f;							\
68*366f6083SPeter Grehan2:	movl 	$VM_SUCCESS,reg;					\
69*366f6083SPeter Grehan3:	movl	reg,VMXCTX_LAUNCH_ERROR(%rsp)
70*366f6083SPeter Grehan
71*366f6083SPeter Grehan	.text
72*366f6083SPeter Grehan/*
73*366f6083SPeter Grehan * int vmx_setjmp(ctxp)
74*366f6083SPeter Grehan * %rdi = ctxp
75*366f6083SPeter Grehan *
76*366f6083SPeter Grehan * Return value is '0' when it returns directly from here.
77*366f6083SPeter Grehan * Return value is '1' when it returns after a vm exit through vmx_longjmp.
78*366f6083SPeter Grehan */
79*366f6083SPeter GrehanENTRY(vmx_setjmp)
80*366f6083SPeter Grehan	movq	(%rsp),%rax			/* return address */
81*366f6083SPeter Grehan	movq    %r15,VMXCTX_HOST_R15(%rdi)
82*366f6083SPeter Grehan	movq    %r14,VMXCTX_HOST_R14(%rdi)
83*366f6083SPeter Grehan	movq    %r13,VMXCTX_HOST_R13(%rdi)
84*366f6083SPeter Grehan	movq    %r12,VMXCTX_HOST_R12(%rdi)
85*366f6083SPeter Grehan	movq    %rbp,VMXCTX_HOST_RBP(%rdi)
86*366f6083SPeter Grehan	movq    %rsp,VMXCTX_HOST_RSP(%rdi)
87*366f6083SPeter Grehan	movq    %rbx,VMXCTX_HOST_RBX(%rdi)
88*366f6083SPeter Grehan	movq    %rax,VMXCTX_HOST_RIP(%rdi)
89*366f6083SPeter Grehan
90*366f6083SPeter Grehan	/*
91*366f6083SPeter Grehan	 * XXX save host debug registers
92*366f6083SPeter Grehan	 */
93*366f6083SPeter Grehan	movl	$VMX_RETURN_DIRECT,%eax
94*366f6083SPeter Grehan	ret
95*366f6083SPeter GrehanEND(vmx_setjmp)
96*366f6083SPeter Grehan
97*366f6083SPeter Grehan/*
98*366f6083SPeter Grehan * void vmx_return(struct vmxctx *ctxp, int retval)
99*366f6083SPeter Grehan * %rdi = ctxp
100*366f6083SPeter Grehan * %rsi = retval
101*366f6083SPeter Grehan * Return to vmm context through vmx_setjmp() with a value of 'retval'.
102*366f6083SPeter Grehan */
103*366f6083SPeter GrehanENTRY(vmx_return)
104*366f6083SPeter Grehan	/* Restore host context. */
105*366f6083SPeter Grehan	movq	VMXCTX_HOST_R15(%rdi),%r15
106*366f6083SPeter Grehan	movq	VMXCTX_HOST_R14(%rdi),%r14
107*366f6083SPeter Grehan	movq	VMXCTX_HOST_R13(%rdi),%r13
108*366f6083SPeter Grehan	movq	VMXCTX_HOST_R12(%rdi),%r12
109*366f6083SPeter Grehan	movq	VMXCTX_HOST_RBP(%rdi),%rbp
110*366f6083SPeter Grehan	movq	VMXCTX_HOST_RSP(%rdi),%rsp
111*366f6083SPeter Grehan	movq	VMXCTX_HOST_RBX(%rdi),%rbx
112*366f6083SPeter Grehan	movq	VMXCTX_HOST_RIP(%rdi),%rax
113*366f6083SPeter Grehan	movq	%rax,(%rsp)			/* return address */
114*366f6083SPeter Grehan
115*366f6083SPeter Grehan	/*
116*366f6083SPeter Grehan	 * XXX restore host debug registers
117*366f6083SPeter Grehan	 */
118*366f6083SPeter Grehan	movl	%esi,%eax
119*366f6083SPeter Grehan	ret
120*366f6083SPeter GrehanEND(vmx_return)
121*366f6083SPeter Grehan
122*366f6083SPeter Grehan/*
123*366f6083SPeter Grehan * void vmx_longjmp(void)
124*366f6083SPeter Grehan * %rsp points to the struct vmxctx
125*366f6083SPeter Grehan */
126*366f6083SPeter GrehanENTRY(vmx_longjmp)
127*366f6083SPeter Grehan	/*
128*366f6083SPeter Grehan	 * Save guest state that is not automatically saved in the vmcs.
129*366f6083SPeter Grehan	 */
130*366f6083SPeter Grehan	movq	%rdi,VMXCTX_GUEST_RDI(%rsp)
131*366f6083SPeter Grehan	movq	%rsi,VMXCTX_GUEST_RSI(%rsp)
132*366f6083SPeter Grehan	movq	%rdx,VMXCTX_GUEST_RDX(%rsp)
133*366f6083SPeter Grehan	movq	%rcx,VMXCTX_GUEST_RCX(%rsp)
134*366f6083SPeter Grehan	movq	%r8,VMXCTX_GUEST_R8(%rsp)
135*366f6083SPeter Grehan	movq	%r9,VMXCTX_GUEST_R9(%rsp)
136*366f6083SPeter Grehan	movq	%rax,VMXCTX_GUEST_RAX(%rsp)
137*366f6083SPeter Grehan	movq	%rbx,VMXCTX_GUEST_RBX(%rsp)
138*366f6083SPeter Grehan	movq	%rbp,VMXCTX_GUEST_RBP(%rsp)
139*366f6083SPeter Grehan	movq	%r10,VMXCTX_GUEST_R10(%rsp)
140*366f6083SPeter Grehan	movq	%r11,VMXCTX_GUEST_R11(%rsp)
141*366f6083SPeter Grehan	movq	%r12,VMXCTX_GUEST_R12(%rsp)
142*366f6083SPeter Grehan	movq	%r13,VMXCTX_GUEST_R13(%rsp)
143*366f6083SPeter Grehan	movq	%r14,VMXCTX_GUEST_R14(%rsp)
144*366f6083SPeter Grehan	movq	%r15,VMXCTX_GUEST_R15(%rsp)
145*366f6083SPeter Grehan
146*366f6083SPeter Grehan	movq	%cr2,%rdi
147*366f6083SPeter Grehan	movq	%rdi,VMXCTX_GUEST_CR2(%rsp)
148*366f6083SPeter Grehan
149*366f6083SPeter Grehan	movq	%rsp,%rdi
150*366f6083SPeter Grehan	movq	$VMX_RETURN_LONGJMP,%rsi
151*366f6083SPeter Grehan	callq	vmx_return
152*366f6083SPeter GrehanEND(vmx_longjmp)
153*366f6083SPeter Grehan
154*366f6083SPeter Grehan/*
155*366f6083SPeter Grehan * void vmx_resume(struct vmxctx *ctxp)
156*366f6083SPeter Grehan * %rdi = ctxp
157*366f6083SPeter Grehan *
158*366f6083SPeter Grehan * Although the return type is a 'void' this function may return indirectly
159*366f6083SPeter Grehan * through vmx_setjmp() with a return value of 2.
160*366f6083SPeter Grehan */
161*366f6083SPeter GrehanENTRY(vmx_resume)
162*366f6083SPeter Grehan	/*
163*366f6083SPeter Grehan	 * Restore guest state that is not automatically loaded from the vmcs.
164*366f6083SPeter Grehan	 */
165*366f6083SPeter Grehan	VMX_GUEST_RESTORE
166*366f6083SPeter Grehan
167*366f6083SPeter Grehan	vmresume
168*366f6083SPeter Grehan
169*366f6083SPeter Grehan	/*
170*366f6083SPeter Grehan	 * Capture the reason why vmresume failed.
171*366f6083SPeter Grehan	 */
172*366f6083SPeter Grehan	VM_INSTRUCTION_ERROR(%eax)
173*366f6083SPeter Grehan
174*366f6083SPeter Grehan	/* Return via vmx_setjmp with return value of VMX_RETURN_VMRESUME */
175*366f6083SPeter Grehan	movq	%rsp,%rdi
176*366f6083SPeter Grehan	movq	$VMX_RETURN_VMRESUME,%rsi
177*366f6083SPeter Grehan	callq	vmx_return
178*366f6083SPeter GrehanEND(vmx_resume)
179*366f6083SPeter Grehan
180*366f6083SPeter Grehan/*
181*366f6083SPeter Grehan * void vmx_launch(struct vmxctx *ctxp)
182*366f6083SPeter Grehan * %rdi = ctxp
183*366f6083SPeter Grehan *
184*366f6083SPeter Grehan * Although the return type is a 'void' this function may return indirectly
185*366f6083SPeter Grehan * through vmx_setjmp() with a return value of 3.
186*366f6083SPeter Grehan */
187*366f6083SPeter GrehanENTRY(vmx_launch)
188*366f6083SPeter Grehan	/*
189*366f6083SPeter Grehan	 * Restore guest state that is not automatically loaded from the vmcs.
190*366f6083SPeter Grehan	 */
191*366f6083SPeter Grehan	VMX_GUEST_RESTORE
192*366f6083SPeter Grehan
193*366f6083SPeter Grehan	vmlaunch
194*366f6083SPeter Grehan
195*366f6083SPeter Grehan	/*
196*366f6083SPeter Grehan	 * Capture the reason why vmlaunch failed.
197*366f6083SPeter Grehan	 */
198*366f6083SPeter Grehan	VM_INSTRUCTION_ERROR(%eax)
199*366f6083SPeter Grehan
200*366f6083SPeter Grehan	/* Return via vmx_setjmp with return value of VMX_RETURN_VMLAUNCH */
201*366f6083SPeter Grehan	movq	%rsp,%rdi
202*366f6083SPeter Grehan	movq	$VMX_RETURN_VMLAUNCH,%rsi
203*366f6083SPeter Grehan	callq	vmx_return
204*366f6083SPeter GrehanEND(vmx_launch)
205