xref: /freebsd/sys/amd64/include/vmm.h (revision cd942e0f257c9d5156cd0a51d73f8b5dcf9b011c)
1 /*-
2  * Copyright (c) 2011 NetApp, Inc.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  * $FreeBSD: vmm.h 482 2011-05-09 21:22:43Z grehan $
27  */
28 
29 #ifndef _VMM_H_
30 #define	_VMM_H_
31 
32 #ifdef _KERNEL
33 
34 #define	VM_MAX_NAMELEN	32
35 
36 struct vm;
37 struct vm_memory_segment;
38 struct seg_desc;
39 struct vm_exit;
40 struct vm_run;
41 struct vlapic;
42 
43 typedef int	(*vmm_init_func_t)(void);
44 typedef int	(*vmm_cleanup_func_t)(void);
45 typedef void *	(*vmi_init_func_t)(struct vm *vm); /* instance specific apis */
46 typedef int	(*vmi_run_func_t)(void *vmi, int vcpu, register_t rip,
47 				  struct vm_exit *vmexit);
48 typedef void	(*vmi_cleanup_func_t)(void *vmi);
49 typedef int	(*vmi_mmap_func_t)(void *vmi, vm_paddr_t gpa, vm_paddr_t hpa,
50 				   size_t length, vm_memattr_t attr,
51 				   int prot, boolean_t superpages_ok);
52 typedef int	(*vmi_get_register_t)(void *vmi, int vcpu, int num,
53 				      uint64_t *retval);
54 typedef int	(*vmi_set_register_t)(void *vmi, int vcpu, int num,
55 				      uint64_t val);
56 typedef int	(*vmi_get_desc_t)(void *vmi, int vcpu, int num,
57 				  struct seg_desc *desc);
58 typedef int	(*vmi_set_desc_t)(void *vmi, int vcpu, int num,
59 				  struct seg_desc *desc);
60 typedef int	(*vmi_inject_event_t)(void *vmi, int vcpu,
61 				      int type, int vector,
62 				      uint32_t code, int code_valid);
63 typedef	int	(*vmi_inject_nmi_t)(void *vmi, int vcpu);
64 typedef int	(*vmi_get_cap_t)(void *vmi, int vcpu, int num, int *retval);
65 typedef int	(*vmi_set_cap_t)(void *vmi, int vcpu, int num, int val);
66 
67 struct vmm_ops {
68 	vmm_init_func_t		init;		/* module wide initialization */
69 	vmm_cleanup_func_t	cleanup;
70 
71 	vmi_init_func_t		vminit;		/* vm-specific initialization */
72 	vmi_run_func_t		vmrun;
73 	vmi_cleanup_func_t	vmcleanup;
74 	vmi_mmap_func_t		vmmmap;
75 	vmi_get_register_t	vmgetreg;
76 	vmi_set_register_t	vmsetreg;
77 	vmi_get_desc_t		vmgetdesc;
78 	vmi_set_desc_t		vmsetdesc;
79 	vmi_inject_event_t	vminject;
80 	vmi_inject_nmi_t	vmnmi;
81 	vmi_get_cap_t		vmgetcap;
82 	vmi_set_cap_t		vmsetcap;
83 };
84 
85 extern struct vmm_ops vmm_ops_intel;
86 extern struct vmm_ops vmm_ops_amd;
87 
88 struct vm *vm_create(const char *name);
89 void vm_destroy(struct vm *vm);
90 const char *vm_name(struct vm *vm);
91 int vm_malloc(struct vm *vm, vm_paddr_t gpa, size_t len, vm_paddr_t *ret_hpa);
92 int vm_map_mmio(struct vm *vm, vm_paddr_t gpa, size_t len, vm_paddr_t hpa);
93 int vm_unmap_mmio(struct vm *vm, vm_paddr_t gpa, size_t len);
94 vm_paddr_t vm_gpa2hpa(struct vm *vm, vm_paddr_t gpa, size_t size);
95 int vm_gpabase2memseg(struct vm *vm, vm_paddr_t gpabase,
96 	      struct vm_memory_segment *seg);
97 int vm_get_register(struct vm *vm, int vcpu, int reg, uint64_t *retval);
98 int vm_set_register(struct vm *vm, int vcpu, int reg, uint64_t val);
99 int vm_get_seg_desc(struct vm *vm, int vcpu, int reg,
100 		    struct seg_desc *ret_desc);
101 int vm_set_seg_desc(struct vm *vm, int vcpu, int reg,
102 		    struct seg_desc *desc);
103 int vm_get_pinning(struct vm *vm, int vcpu, int *cpuid);
104 int vm_set_pinning(struct vm *vm, int vcpu, int cpuid);
105 int vm_run(struct vm *vm, struct vm_run *vmrun);
106 int vm_inject_event(struct vm *vm, int vcpu, int type,
107 		    int vector, uint32_t error_code, int error_code_valid);
108 int vm_inject_nmi(struct vm *vm, int vcpu);
109 uint64_t *vm_guest_msrs(struct vm *vm, int cpu);
110 struct vlapic *vm_lapic(struct vm *vm, int cpu);
111 int vm_get_capability(struct vm *vm, int vcpu, int type, int *val);
112 int vm_set_capability(struct vm *vm, int vcpu, int type, int val);
113 void vm_activate_cpu(struct vm *vm, int vcpu);
114 cpuset_t vm_active_cpus(struct vm *vm);
115 
116 /*
117  * Return 1 if device indicated by bus/slot/func is supposed to be a
118  * pci passthrough device.
119  *
120  * Return 0 otherwise.
121  */
122 int vmm_is_pptdev(int bus, int slot, int func);
123 
124 void *vm_iommu_domain(struct vm *vm);
125 
126 #define	VCPU_STOPPED	0
127 #define	VCPU_RUNNING	1
128 void vm_set_run_state(struct vm *vm, int vcpu, int running);
129 int vm_get_run_state(struct vm *vm, int vcpu, int *hostcpu);
130 
131 void *vcpu_stats(struct vm *vm, int vcpu);
132 
133 static int __inline
134 vcpu_is_running(struct vm *vm, int vcpu, int *hostcpu)
135 {
136 	return (vm_get_run_state(vm, vcpu, hostcpu) == VCPU_RUNNING);
137 }
138 
139 #endif	/* KERNEL */
140 
141 #define	VM_MAXCPU	8			/* maximum virtual cpus */
142 
143 /*
144  * Identifiers for events that can be injected into the VM
145  */
146 enum vm_event_type {
147 	VM_EVENT_NONE,
148 	VM_HW_INTR,
149 	VM_NMI,
150 	VM_HW_EXCEPTION,
151 	VM_SW_INTR,
152 	VM_PRIV_SW_EXCEPTION,
153 	VM_SW_EXCEPTION,
154 	VM_EVENT_MAX
155 };
156 
157 /*
158  * Identifiers for architecturally defined registers.
159  */
160 enum vm_reg_name {
161 	VM_REG_GUEST_RAX,
162 	VM_REG_GUEST_RBX,
163 	VM_REG_GUEST_RCX,
164 	VM_REG_GUEST_RDX,
165 	VM_REG_GUEST_RSI,
166 	VM_REG_GUEST_RDI,
167 	VM_REG_GUEST_RBP,
168 	VM_REG_GUEST_R8,
169 	VM_REG_GUEST_R9,
170 	VM_REG_GUEST_R10,
171 	VM_REG_GUEST_R11,
172 	VM_REG_GUEST_R12,
173 	VM_REG_GUEST_R13,
174 	VM_REG_GUEST_R14,
175 	VM_REG_GUEST_R15,
176 	VM_REG_GUEST_CR0,
177 	VM_REG_GUEST_CR3,
178 	VM_REG_GUEST_CR4,
179 	VM_REG_GUEST_DR7,
180 	VM_REG_GUEST_RSP,
181 	VM_REG_GUEST_RIP,
182 	VM_REG_GUEST_RFLAGS,
183 	VM_REG_GUEST_ES,
184 	VM_REG_GUEST_CS,
185 	VM_REG_GUEST_SS,
186 	VM_REG_GUEST_DS,
187 	VM_REG_GUEST_FS,
188 	VM_REG_GUEST_GS,
189 	VM_REG_GUEST_LDTR,
190 	VM_REG_GUEST_TR,
191 	VM_REG_GUEST_IDTR,
192 	VM_REG_GUEST_GDTR,
193 	VM_REG_GUEST_EFER,
194 	VM_REG_LAST
195 };
196 
197 /*
198  * Identifiers for optional vmm capabilities
199  */
200 enum vm_cap_type {
201 	VM_CAP_HALT_EXIT,
202 	VM_CAP_MTRAP_EXIT,
203 	VM_CAP_PAUSE_EXIT,
204 	VM_CAP_UNRESTRICTED_GUEST,
205 	VM_CAP_MAX
206 };
207 
208 /*
209  * The 'access' field has the format specified in Table 21-2 of the Intel
210  * Architecture Manual vol 3b.
211  *
212  * XXX The contents of the 'access' field are architecturally defined except
213  * bit 16 - Segment Unusable.
214  */
215 struct seg_desc {
216 	uint64_t	base;
217 	uint32_t	limit;
218 	uint32_t	access;
219 };
220 
221 enum vm_exitcode {
222 	VM_EXITCODE_INOUT,
223 	VM_EXITCODE_VMX,
224 	VM_EXITCODE_BOGUS,
225 	VM_EXITCODE_RDMSR,
226 	VM_EXITCODE_WRMSR,
227 	VM_EXITCODE_HLT,
228 	VM_EXITCODE_MTRAP,
229 	VM_EXITCODE_PAUSE,
230 	VM_EXITCODE_PAGING,
231 	VM_EXITCODE_MAX
232 };
233 
234 struct vm_exit {
235 	enum vm_exitcode	exitcode;
236 	int			inst_length;	/* 0 means unknown */
237 	uint64_t		rip;
238 	union {
239 		struct {
240 			uint16_t	bytes:3;	/* 1 or 2 or 4 */
241 			uint16_t	in:1;		/* out is 0, in is 1 */
242 			uint16_t	string:1;
243 			uint16_t	rep:1;
244 			uint16_t	port;
245 			uint32_t	eax;		/* valid for out */
246 		} inout;
247 		struct {
248 			uint64_t	cr3;
249 		} paging;
250 		/*
251 		 * VMX specific payload. Used when there is no "better"
252 		 * exitcode to represent the VM-exit.
253 		 */
254 		struct {
255 			int		error;		/* vmx inst error */
256 			uint32_t	exit_reason;
257 			uint64_t	exit_qualification;
258 		} vmx;
259 		struct {
260 			uint32_t	code;		/* ecx value */
261 			uint64_t	wval;
262 		} msr;
263 	} u;
264 };
265 
266 #endif	/* _VMM_H_ */
267