xref: /freebsd/sys/amd64/vmm/intel/vmx.h (revision 13de33a5dc2304b13d595d75d48c51793958474f)
1 /*-
2  * Copyright (c) 2011 NetApp, Inc.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  * $FreeBSD$
27  */
28 
29 #ifndef _VMX_H_
30 #define	_VMX_H_
31 
32 #include "vmcs.h"
33 
34 struct pmap;
35 
36 #define	GUEST_MSR_MAX_ENTRIES	64		/* arbitrary */
37 
38 struct vmxctx {
39 	register_t	tmpstk[32];		/* vmx_return() stack */
40 	register_t	tmpstktop;
41 
42 	register_t	guest_rdi;		/* Guest state */
43 	register_t	guest_rsi;
44 	register_t	guest_rdx;
45 	register_t	guest_rcx;
46 	register_t	guest_r8;
47 	register_t	guest_r9;
48 	register_t	guest_rax;
49 	register_t	guest_rbx;
50 	register_t	guest_rbp;
51 	register_t	guest_r10;
52 	register_t	guest_r11;
53 	register_t	guest_r12;
54 	register_t	guest_r13;
55 	register_t	guest_r14;
56 	register_t	guest_r15;
57 	register_t	guest_cr2;
58 
59 	register_t	host_r15;		/* Host state */
60 	register_t	host_r14;
61 	register_t	host_r13;
62 	register_t	host_r12;
63 	register_t	host_rbp;
64 	register_t	host_rsp;
65 	register_t	host_rbx;
66 	register_t	host_rip;
67 	/*
68 	 * XXX todo debug registers and fpu state
69 	 */
70 
71 	int		launched;		/* vmcs launch state */
72 	int		launch_error;
73 
74 	long		eptgen[MAXCPU];		/* cached pmap->pm_eptgen */
75 
76 	/*
77 	 * The 'eptp' and the 'pmap' do not change during the lifetime of
78 	 * the VM so it is safe to keep a copy in each vcpu's vmxctx.
79 	 */
80 	vm_paddr_t	eptp;
81 	struct pmap	*pmap;
82 };
83 
84 struct vmxcap {
85 	int	set;
86 	uint32_t proc_ctls;
87 	uint32_t proc_ctls2;
88 };
89 
90 struct vmxstate {
91 	int	lastcpu;	/* host cpu that this 'vcpu' last ran on */
92 	uint16_t vpid;
93 };
94 
95 /* virtual machine softc */
96 struct vmx {
97 	struct vmcs	vmcs[VM_MAXCPU];	/* one vmcs per virtual cpu */
98 	char		msr_bitmap[PAGE_SIZE];
99 	struct msr_entry guest_msrs[VM_MAXCPU][GUEST_MSR_MAX_ENTRIES];
100 	struct vmxctx	ctx[VM_MAXCPU];
101 	struct vmxcap	cap[VM_MAXCPU];
102 	struct vmxstate	state[VM_MAXCPU];
103 	uint64_t	eptp;
104 	struct vm	*vm;
105 };
106 CTASSERT((offsetof(struct vmx, vmcs) & PAGE_MASK) == 0);
107 CTASSERT((offsetof(struct vmx, msr_bitmap) & PAGE_MASK) == 0);
108 CTASSERT((offsetof(struct vmx, guest_msrs) & 15) == 0);
109 
110 #define	VMX_RETURN_DIRECT	0
111 #define	VMX_RETURN_LONGJMP	1
112 #define	VMX_RETURN_VMRESUME	2
113 #define	VMX_RETURN_VMLAUNCH	3
114 #define	VMX_RETURN_AST		4
115 #define	VMX_RETURN_INVEPT	5
116 /*
117  * vmx_setjmp() returns:
118  * - 0 when it returns directly
119  * - 1 when it returns from vmx_longjmp
120  * - 2 when it returns from vmx_resume (which would only be in the error case)
121  * - 3 when it returns from vmx_launch (which would only be in the error case)
122  * - 4 when it returns from vmx_resume or vmx_launch because of AST pending
123  * - 5 when it returns from vmx_launch/vmx_resume because of invept error
124  */
125 int	vmx_setjmp(struct vmxctx *ctx);
126 void	vmx_longjmp(void);			/* returns via vmx_setjmp */
127 void	vmx_launch(struct vmxctx *ctx) __dead2;	/* may return via vmx_setjmp */
128 void	vmx_resume(struct vmxctx *ctx) __dead2;	/* may return via vmx_setjmp */
129 
130 u_long	vmx_fix_cr0(u_long cr0);
131 u_long	vmx_fix_cr4(u_long cr4);
132 
133 #endif
134