xref: /freebsd/sys/amd64/include/vmm.h (revision f5f7c05209ca2c3748fd8b27c5e80ffad49120eb)
1 /*-
2  * Copyright (c) 2011 NetApp, Inc.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  * $FreeBSD$
27  */
28 
29 #ifndef _VMM_H_
30 #define	_VMM_H_
31 
32 #ifdef _KERNEL
33 
34 #define	VM_MAX_NAMELEN	32
35 
36 struct vm;
37 struct vm_memory_segment;
38 struct seg_desc;
39 struct vm_exit;
40 struct vm_run;
41 struct vlapic;
42 
43 enum x2apic_state;
44 
45 typedef int	(*vmm_init_func_t)(void);
46 typedef int	(*vmm_cleanup_func_t)(void);
47 typedef void *	(*vmi_init_func_t)(struct vm *vm); /* instance specific apis */
48 typedef int	(*vmi_run_func_t)(void *vmi, int vcpu, register_t rip);
49 typedef void	(*vmi_cleanup_func_t)(void *vmi);
50 typedef int	(*vmi_mmap_set_func_t)(void *vmi, vm_paddr_t gpa,
51 				       vm_paddr_t hpa, size_t length,
52 				       vm_memattr_t attr, int prot,
53 				       boolean_t superpages_ok);
54 typedef vm_paddr_t (*vmi_mmap_get_func_t)(void *vmi, vm_paddr_t gpa);
55 typedef int	(*vmi_get_register_t)(void *vmi, int vcpu, int num,
56 				      uint64_t *retval);
57 typedef int	(*vmi_set_register_t)(void *vmi, int vcpu, int num,
58 				      uint64_t val);
59 typedef int	(*vmi_get_desc_t)(void *vmi, int vcpu, int num,
60 				  struct seg_desc *desc);
61 typedef int	(*vmi_set_desc_t)(void *vmi, int vcpu, int num,
62 				  struct seg_desc *desc);
63 typedef int	(*vmi_inject_event_t)(void *vmi, int vcpu,
64 				      int type, int vector,
65 				      uint32_t code, int code_valid);
66 typedef int	(*vmi_get_cap_t)(void *vmi, int vcpu, int num, int *retval);
67 typedef int	(*vmi_set_cap_t)(void *vmi, int vcpu, int num, int val);
68 
69 struct vmm_ops {
70 	vmm_init_func_t		init;		/* module wide initialization */
71 	vmm_cleanup_func_t	cleanup;
72 
73 	vmi_init_func_t		vminit;		/* vm-specific initialization */
74 	vmi_run_func_t		vmrun;
75 	vmi_cleanup_func_t	vmcleanup;
76 	vmi_mmap_set_func_t	vmmmap_set;
77 	vmi_mmap_get_func_t	vmmmap_get;
78 	vmi_get_register_t	vmgetreg;
79 	vmi_set_register_t	vmsetreg;
80 	vmi_get_desc_t		vmgetdesc;
81 	vmi_set_desc_t		vmsetdesc;
82 	vmi_inject_event_t	vminject;
83 	vmi_get_cap_t		vmgetcap;
84 	vmi_set_cap_t		vmsetcap;
85 };
86 
87 extern struct vmm_ops vmm_ops_intel;
88 extern struct vmm_ops vmm_ops_amd;
89 
90 struct vm *vm_create(const char *name);
91 void vm_destroy(struct vm *vm);
92 const char *vm_name(struct vm *vm);
93 int vm_malloc(struct vm *vm, vm_paddr_t gpa, size_t len);
94 int vm_map_mmio(struct vm *vm, vm_paddr_t gpa, size_t len, vm_paddr_t hpa);
95 int vm_unmap_mmio(struct vm *vm, vm_paddr_t gpa, size_t len);
96 vm_paddr_t vm_gpa2hpa(struct vm *vm, vm_paddr_t gpa, size_t size);
97 int vm_gpabase2memseg(struct vm *vm, vm_paddr_t gpabase,
98 	      struct vm_memory_segment *seg);
99 int vm_get_register(struct vm *vm, int vcpu, int reg, uint64_t *retval);
100 int vm_set_register(struct vm *vm, int vcpu, int reg, uint64_t val);
101 int vm_get_seg_desc(struct vm *vm, int vcpu, int reg,
102 		    struct seg_desc *ret_desc);
103 int vm_set_seg_desc(struct vm *vm, int vcpu, int reg,
104 		    struct seg_desc *desc);
105 int vm_get_pinning(struct vm *vm, int vcpu, int *cpuid);
106 int vm_set_pinning(struct vm *vm, int vcpu, int cpuid);
107 int vm_run(struct vm *vm, struct vm_run *vmrun);
108 int vm_inject_event(struct vm *vm, int vcpu, int type,
109 		    int vector, uint32_t error_code, int error_code_valid);
110 int vm_inject_nmi(struct vm *vm, int vcpu);
111 int vm_nmi_pending(struct vm *vm, int vcpuid);
112 void vm_nmi_clear(struct vm *vm, int vcpuid);
113 uint64_t *vm_guest_msrs(struct vm *vm, int cpu);
114 struct vlapic *vm_lapic(struct vm *vm, int cpu);
115 int vm_get_capability(struct vm *vm, int vcpu, int type, int *val);
116 int vm_set_capability(struct vm *vm, int vcpu, int type, int val);
117 int vm_get_x2apic_state(struct vm *vm, int vcpu, enum x2apic_state *state);
118 int vm_set_x2apic_state(struct vm *vm, int vcpu, enum x2apic_state state);
119 void vm_activate_cpu(struct vm *vm, int vcpu);
120 cpuset_t vm_active_cpus(struct vm *vm);
121 struct vm_exit *vm_exitinfo(struct vm *vm, int vcpuid);
122 
123 /*
124  * Return 1 if device indicated by bus/slot/func is supposed to be a
125  * pci passthrough device.
126  *
127  * Return 0 otherwise.
128  */
129 int vmm_is_pptdev(int bus, int slot, int func);
130 
131 void *vm_iommu_domain(struct vm *vm);
132 
133 enum vcpu_state {
134 	VCPU_IDLE,
135 	VCPU_RUNNING,
136 	VCPU_CANNOT_RUN,
137 };
138 
139 int vcpu_set_state(struct vm *vm, int vcpu, enum vcpu_state state);
140 enum vcpu_state vcpu_get_state(struct vm *vm, int vcpu);
141 
142 static int __inline
143 vcpu_is_running(struct vm *vm, int vcpu)
144 {
145 	return (vcpu_get_state(vm, vcpu) == VCPU_RUNNING);
146 }
147 
148 void *vcpu_stats(struct vm *vm, int vcpu);
149 void vm_interrupt_hostcpu(struct vm *vm, int vcpu);
150 
151 #endif	/* KERNEL */
152 
153 #include <machine/vmm_instruction_emul.h>
154 
155 #define	VM_MAXCPU	8			/* maximum virtual cpus */
156 
157 /*
158  * Identifiers for events that can be injected into the VM
159  */
160 enum vm_event_type {
161 	VM_EVENT_NONE,
162 	VM_HW_INTR,
163 	VM_NMI,
164 	VM_HW_EXCEPTION,
165 	VM_SW_INTR,
166 	VM_PRIV_SW_EXCEPTION,
167 	VM_SW_EXCEPTION,
168 	VM_EVENT_MAX
169 };
170 
171 /*
172  * Identifiers for architecturally defined registers.
173  */
174 enum vm_reg_name {
175 	VM_REG_GUEST_RAX,
176 	VM_REG_GUEST_RBX,
177 	VM_REG_GUEST_RCX,
178 	VM_REG_GUEST_RDX,
179 	VM_REG_GUEST_RSI,
180 	VM_REG_GUEST_RDI,
181 	VM_REG_GUEST_RBP,
182 	VM_REG_GUEST_R8,
183 	VM_REG_GUEST_R9,
184 	VM_REG_GUEST_R10,
185 	VM_REG_GUEST_R11,
186 	VM_REG_GUEST_R12,
187 	VM_REG_GUEST_R13,
188 	VM_REG_GUEST_R14,
189 	VM_REG_GUEST_R15,
190 	VM_REG_GUEST_CR0,
191 	VM_REG_GUEST_CR3,
192 	VM_REG_GUEST_CR4,
193 	VM_REG_GUEST_DR7,
194 	VM_REG_GUEST_RSP,
195 	VM_REG_GUEST_RIP,
196 	VM_REG_GUEST_RFLAGS,
197 	VM_REG_GUEST_ES,
198 	VM_REG_GUEST_CS,
199 	VM_REG_GUEST_SS,
200 	VM_REG_GUEST_DS,
201 	VM_REG_GUEST_FS,
202 	VM_REG_GUEST_GS,
203 	VM_REG_GUEST_LDTR,
204 	VM_REG_GUEST_TR,
205 	VM_REG_GUEST_IDTR,
206 	VM_REG_GUEST_GDTR,
207 	VM_REG_GUEST_EFER,
208 	VM_REG_LAST
209 };
210 
211 /*
212  * Identifiers for optional vmm capabilities
213  */
214 enum vm_cap_type {
215 	VM_CAP_HALT_EXIT,
216 	VM_CAP_MTRAP_EXIT,
217 	VM_CAP_PAUSE_EXIT,
218 	VM_CAP_UNRESTRICTED_GUEST,
219 	VM_CAP_MAX
220 };
221 
222 enum x2apic_state {
223 	X2APIC_ENABLED,
224 	X2APIC_AVAILABLE,
225 	X2APIC_DISABLED,
226 	X2APIC_STATE_LAST
227 };
228 
229 /*
230  * The 'access' field has the format specified in Table 21-2 of the Intel
231  * Architecture Manual vol 3b.
232  *
233  * XXX The contents of the 'access' field are architecturally defined except
234  * bit 16 - Segment Unusable.
235  */
236 struct seg_desc {
237 	uint64_t	base;
238 	uint32_t	limit;
239 	uint32_t	access;
240 };
241 
242 enum vm_exitcode {
243 	VM_EXITCODE_INOUT,
244 	VM_EXITCODE_VMX,
245 	VM_EXITCODE_BOGUS,
246 	VM_EXITCODE_RDMSR,
247 	VM_EXITCODE_WRMSR,
248 	VM_EXITCODE_HLT,
249 	VM_EXITCODE_MTRAP,
250 	VM_EXITCODE_PAUSE,
251 	VM_EXITCODE_PAGING,
252 	VM_EXITCODE_SPINUP_AP,
253 	VM_EXITCODE_MAX
254 };
255 
256 struct vm_exit {
257 	enum vm_exitcode	exitcode;
258 	int			inst_length;	/* 0 means unknown */
259 	uint64_t		rip;
260 	union {
261 		struct {
262 			uint16_t	bytes:3;	/* 1 or 2 or 4 */
263 			uint16_t	in:1;		/* out is 0, in is 1 */
264 			uint16_t	string:1;
265 			uint16_t	rep:1;
266 			uint16_t	port;
267 			uint32_t	eax;		/* valid for out */
268 		} inout;
269 		struct {
270 			uint64_t	gpa;
271 			struct vie	vie;
272 		} paging;
273 		/*
274 		 * VMX specific payload. Used when there is no "better"
275 		 * exitcode to represent the VM-exit.
276 		 */
277 		struct {
278 			int		error;		/* vmx inst error */
279 			uint32_t	exit_reason;
280 			uint64_t	exit_qualification;
281 		} vmx;
282 		struct {
283 			uint32_t	code;		/* ecx value */
284 			uint64_t	wval;
285 		} msr;
286 		struct {
287 			int		vcpu;
288 			uint64_t	rip;
289 		} spinup_ap;
290 	} u;
291 };
292 
293 #endif	/* _VMM_H_ */
294