xref: /freebsd/sys/amd64/include/vmm.h (revision d5b0e70f7e04d971691517ce1304d86a1e367e2e)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2011 NetApp, Inc.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  *
28  * $FreeBSD$
29  */
30 
31 #ifndef _VMM_H_
32 #define	_VMM_H_
33 
34 #include <sys/cpuset.h>
35 #include <sys/sdt.h>
36 #include <x86/segments.h>
37 
38 struct vm_snapshot_meta;
39 
40 #ifdef _KERNEL
41 SDT_PROVIDER_DECLARE(vmm);
42 #endif
43 
44 enum vm_suspend_how {
45 	VM_SUSPEND_NONE,
46 	VM_SUSPEND_RESET,
47 	VM_SUSPEND_POWEROFF,
48 	VM_SUSPEND_HALT,
49 	VM_SUSPEND_TRIPLEFAULT,
50 	VM_SUSPEND_LAST
51 };
52 
53 /*
54  * Identifiers for architecturally defined registers.
55  */
56 enum vm_reg_name {
57 	VM_REG_GUEST_RAX,
58 	VM_REG_GUEST_RBX,
59 	VM_REG_GUEST_RCX,
60 	VM_REG_GUEST_RDX,
61 	VM_REG_GUEST_RSI,
62 	VM_REG_GUEST_RDI,
63 	VM_REG_GUEST_RBP,
64 	VM_REG_GUEST_R8,
65 	VM_REG_GUEST_R9,
66 	VM_REG_GUEST_R10,
67 	VM_REG_GUEST_R11,
68 	VM_REG_GUEST_R12,
69 	VM_REG_GUEST_R13,
70 	VM_REG_GUEST_R14,
71 	VM_REG_GUEST_R15,
72 	VM_REG_GUEST_CR0,
73 	VM_REG_GUEST_CR3,
74 	VM_REG_GUEST_CR4,
75 	VM_REG_GUEST_DR7,
76 	VM_REG_GUEST_RSP,
77 	VM_REG_GUEST_RIP,
78 	VM_REG_GUEST_RFLAGS,
79 	VM_REG_GUEST_ES,
80 	VM_REG_GUEST_CS,
81 	VM_REG_GUEST_SS,
82 	VM_REG_GUEST_DS,
83 	VM_REG_GUEST_FS,
84 	VM_REG_GUEST_GS,
85 	VM_REG_GUEST_LDTR,
86 	VM_REG_GUEST_TR,
87 	VM_REG_GUEST_IDTR,
88 	VM_REG_GUEST_GDTR,
89 	VM_REG_GUEST_EFER,
90 	VM_REG_GUEST_CR2,
91 	VM_REG_GUEST_PDPTE0,
92 	VM_REG_GUEST_PDPTE1,
93 	VM_REG_GUEST_PDPTE2,
94 	VM_REG_GUEST_PDPTE3,
95 	VM_REG_GUEST_INTR_SHADOW,
96 	VM_REG_GUEST_DR0,
97 	VM_REG_GUEST_DR1,
98 	VM_REG_GUEST_DR2,
99 	VM_REG_GUEST_DR3,
100 	VM_REG_GUEST_DR6,
101 	VM_REG_GUEST_ENTRY_INST_LENGTH,
102 	VM_REG_LAST
103 };
104 
105 enum x2apic_state {
106 	X2APIC_DISABLED,
107 	X2APIC_ENABLED,
108 	X2APIC_STATE_LAST
109 };
110 
111 #define	VM_INTINFO_VECTOR(info)	((info) & 0xff)
112 #define	VM_INTINFO_DEL_ERRCODE	0x800
113 #define	VM_INTINFO_RSVD		0x7ffff000
114 #define	VM_INTINFO_VALID	0x80000000
115 #define	VM_INTINFO_TYPE		0x700
116 #define	VM_INTINFO_HWINTR	(0 << 8)
117 #define	VM_INTINFO_NMI		(2 << 8)
118 #define	VM_INTINFO_HWEXCEPTION	(3 << 8)
119 #define	VM_INTINFO_SWINTR	(4 << 8)
120 
121 /*
122  * The VM name has to fit into the pathname length constraints of devfs,
123  * governed primarily by SPECNAMELEN.  The length is the total number of
124  * characters in the full path, relative to the mount point and not
125  * including any leading '/' characters.
126  * A prefix and a suffix are added to the name specified by the user.
127  * The prefix is usually "vmm/" or "vmm.io/", but can be a few characters
128  * longer for future use.
129  * The suffix is a string that identifies a bootrom image or some similar
130  * image that is attached to the VM. A separator character gets added to
131  * the suffix automatically when generating the full path, so it must be
132  * accounted for, reducing the effective length by 1.
133  * The effective length of a VM name is 229 bytes for FreeBSD 13 and 37
134  * bytes for FreeBSD 12.  A minimum length is set for safety and supports
135  * a SPECNAMELEN as small as 32 on old systems.
136  */
137 #define VM_MAX_PREFIXLEN 10
138 #define VM_MAX_SUFFIXLEN 15
139 #define VM_MIN_NAMELEN   6
140 #define VM_MAX_NAMELEN \
141     (SPECNAMELEN - VM_MAX_PREFIXLEN - VM_MAX_SUFFIXLEN - 1)
142 
143 #ifdef _KERNEL
144 CTASSERT(VM_MAX_NAMELEN >= VM_MIN_NAMELEN);
145 
146 struct vm;
147 struct vm_exception;
148 struct seg_desc;
149 struct vm_exit;
150 struct vm_run;
151 struct vhpet;
152 struct vioapic;
153 struct vlapic;
154 struct vmspace;
155 struct vm_object;
156 struct vm_guest_paging;
157 struct pmap;
158 enum snapshot_req;
159 
160 struct vm_eventinfo {
161 	void	*rptr;		/* rendezvous cookie */
162 	int	*sptr;		/* suspend cookie */
163 	int	*iptr;		/* reqidle cookie */
164 };
165 
166 typedef int	(*vmm_init_func_t)(int ipinum);
167 typedef int	(*vmm_cleanup_func_t)(void);
168 typedef void	(*vmm_resume_func_t)(void);
169 typedef void *	(*vmi_init_func_t)(struct vm *vm, struct pmap *pmap);
170 typedef int	(*vmi_run_func_t)(void *vmi, int vcpu, register_t rip,
171 		    struct pmap *pmap, struct vm_eventinfo *info);
172 typedef void	(*vmi_cleanup_func_t)(void *vmi);
173 typedef int	(*vmi_get_register_t)(void *vmi, int vcpu, int num,
174 				      uint64_t *retval);
175 typedef int	(*vmi_set_register_t)(void *vmi, int vcpu, int num,
176 				      uint64_t val);
177 typedef int	(*vmi_get_desc_t)(void *vmi, int vcpu, int num,
178 				  struct seg_desc *desc);
179 typedef int	(*vmi_set_desc_t)(void *vmi, int vcpu, int num,
180 				  struct seg_desc *desc);
181 typedef int	(*vmi_get_cap_t)(void *vmi, int vcpu, int num, int *retval);
182 typedef int	(*vmi_set_cap_t)(void *vmi, int vcpu, int num, int val);
183 typedef struct vmspace * (*vmi_vmspace_alloc)(vm_offset_t min, vm_offset_t max);
184 typedef void	(*vmi_vmspace_free)(struct vmspace *vmspace);
185 typedef struct vlapic * (*vmi_vlapic_init)(void *vmi, int vcpu);
186 typedef void	(*vmi_vlapic_cleanup)(void *vmi, struct vlapic *vlapic);
187 typedef int	(*vmi_snapshot_t)(void *vmi, struct vm_snapshot_meta *meta);
188 typedef int	(*vmi_snapshot_vmcx_t)(void *vmi, struct vm_snapshot_meta *meta,
189 				       int vcpu);
190 typedef int	(*vmi_restore_tsc_t)(void *vmi, int vcpuid, uint64_t now);
191 
192 struct vmm_ops {
193 	vmm_init_func_t		modinit;	/* module wide initialization */
194 	vmm_cleanup_func_t	modcleanup;
195 	vmm_resume_func_t	modresume;
196 
197 	vmi_init_func_t		init;		/* vm-specific initialization */
198 	vmi_run_func_t		run;
199 	vmi_cleanup_func_t	cleanup;
200 	vmi_get_register_t	getreg;
201 	vmi_set_register_t	setreg;
202 	vmi_get_desc_t		getdesc;
203 	vmi_set_desc_t		setdesc;
204 	vmi_get_cap_t		getcap;
205 	vmi_set_cap_t		setcap;
206 	vmi_vmspace_alloc	vmspace_alloc;
207 	vmi_vmspace_free	vmspace_free;
208 	vmi_vlapic_init		vlapic_init;
209 	vmi_vlapic_cleanup	vlapic_cleanup;
210 
211 	/* checkpoint operations */
212 	vmi_snapshot_t		snapshot;
213 	vmi_snapshot_vmcx_t	vmcx_snapshot;
214 	vmi_restore_tsc_t	restore_tsc;
215 };
216 
217 extern const struct vmm_ops vmm_ops_intel;
218 extern const struct vmm_ops vmm_ops_amd;
219 
220 int vm_create(const char *name, struct vm **retvm);
221 void vm_destroy(struct vm *vm);
222 int vm_reinit(struct vm *vm);
223 const char *vm_name(struct vm *vm);
224 uint16_t vm_get_maxcpus(struct vm *vm);
225 void vm_get_topology(struct vm *vm, uint16_t *sockets, uint16_t *cores,
226     uint16_t *threads, uint16_t *maxcpus);
227 int vm_set_topology(struct vm *vm, uint16_t sockets, uint16_t cores,
228     uint16_t threads, uint16_t maxcpus);
229 
230 /*
231  * APIs that modify the guest memory map require all vcpus to be frozen.
232  */
233 int vm_mmap_memseg(struct vm *vm, vm_paddr_t gpa, int segid, vm_ooffset_t off,
234     size_t len, int prot, int flags);
235 int vm_munmap_memseg(struct vm *vm, vm_paddr_t gpa, size_t len);
236 int vm_alloc_memseg(struct vm *vm, int ident, size_t len, bool sysmem);
237 void vm_free_memseg(struct vm *vm, int ident);
238 int vm_map_mmio(struct vm *vm, vm_paddr_t gpa, size_t len, vm_paddr_t hpa);
239 int vm_unmap_mmio(struct vm *vm, vm_paddr_t gpa, size_t len);
240 int vm_assign_pptdev(struct vm *vm, int bus, int slot, int func);
241 int vm_unassign_pptdev(struct vm *vm, int bus, int slot, int func);
242 
243 /*
244  * APIs that inspect the guest memory map require only a *single* vcpu to
245  * be frozen. This acts like a read lock on the guest memory map since any
246  * modification requires *all* vcpus to be frozen.
247  */
248 int vm_mmap_getnext(struct vm *vm, vm_paddr_t *gpa, int *segid,
249     vm_ooffset_t *segoff, size_t *len, int *prot, int *flags);
250 int vm_get_memseg(struct vm *vm, int ident, size_t *len, bool *sysmem,
251     struct vm_object **objptr);
252 vm_paddr_t vmm_sysmem_maxaddr(struct vm *vm);
253 void *vm_gpa_hold(struct vm *, int vcpuid, vm_paddr_t gpa, size_t len,
254     int prot, void **cookie);
255 void vm_gpa_release(void *cookie);
256 bool vm_mem_allocated(struct vm *vm, int vcpuid, vm_paddr_t gpa);
257 
258 int vm_get_register(struct vm *vm, int vcpu, int reg, uint64_t *retval);
259 int vm_set_register(struct vm *vm, int vcpu, int reg, uint64_t val);
260 int vm_get_seg_desc(struct vm *vm, int vcpu, int reg,
261 		    struct seg_desc *ret_desc);
262 int vm_set_seg_desc(struct vm *vm, int vcpu, int reg,
263 		    struct seg_desc *desc);
264 int vm_run(struct vm *vm, struct vm_run *vmrun);
265 int vm_suspend(struct vm *vm, enum vm_suspend_how how);
266 int vm_inject_nmi(struct vm *vm, int vcpu);
267 int vm_nmi_pending(struct vm *vm, int vcpuid);
268 void vm_nmi_clear(struct vm *vm, int vcpuid);
269 int vm_inject_extint(struct vm *vm, int vcpu);
270 int vm_extint_pending(struct vm *vm, int vcpuid);
271 void vm_extint_clear(struct vm *vm, int vcpuid);
272 struct vlapic *vm_lapic(struct vm *vm, int cpu);
273 struct vioapic *vm_ioapic(struct vm *vm);
274 struct vhpet *vm_hpet(struct vm *vm);
275 int vm_get_capability(struct vm *vm, int vcpu, int type, int *val);
276 int vm_set_capability(struct vm *vm, int vcpu, int type, int val);
277 int vm_get_x2apic_state(struct vm *vm, int vcpu, enum x2apic_state *state);
278 int vm_set_x2apic_state(struct vm *vm, int vcpu, enum x2apic_state state);
279 int vm_apicid2vcpuid(struct vm *vm, int apicid);
280 int vm_activate_cpu(struct vm *vm, int vcpu);
281 int vm_suspend_cpu(struct vm *vm, int vcpu);
282 int vm_resume_cpu(struct vm *vm, int vcpu);
283 struct vm_exit *vm_exitinfo(struct vm *vm, int vcpuid);
284 void vm_exit_suspended(struct vm *vm, int vcpuid, uint64_t rip);
285 void vm_exit_debug(struct vm *vm, int vcpuid, uint64_t rip);
286 void vm_exit_rendezvous(struct vm *vm, int vcpuid, uint64_t rip);
287 void vm_exit_astpending(struct vm *vm, int vcpuid, uint64_t rip);
288 void vm_exit_reqidle(struct vm *vm, int vcpuid, uint64_t rip);
289 int vm_snapshot_req(struct vm *vm, struct vm_snapshot_meta *meta);
290 int vm_restore_time(struct vm *vm);
291 
292 #ifdef _SYS__CPUSET_H_
293 /*
294  * Rendezvous all vcpus specified in 'dest' and execute 'func(arg)'.
295  * The rendezvous 'func(arg)' is not allowed to do anything that will
296  * cause the thread to be put to sleep.
297  *
298  * If the rendezvous is being initiated from a vcpu context then the
299  * 'vcpuid' must refer to that vcpu, otherwise it should be set to -1.
300  *
301  * The caller cannot hold any locks when initiating the rendezvous.
302  *
303  * The implementation of this API may cause vcpus other than those specified
304  * by 'dest' to be stalled. The caller should not rely on any vcpus making
305  * forward progress when the rendezvous is in progress.
306  */
307 typedef void (*vm_rendezvous_func_t)(struct vm *vm, int vcpuid, void *arg);
308 int vm_smp_rendezvous(struct vm *vm, int vcpuid, cpuset_t dest,
309     vm_rendezvous_func_t func, void *arg);
310 cpuset_t vm_active_cpus(struct vm *vm);
311 cpuset_t vm_debug_cpus(struct vm *vm);
312 cpuset_t vm_suspended_cpus(struct vm *vm);
313 #endif	/* _SYS__CPUSET_H_ */
314 
315 static __inline int
316 vcpu_rendezvous_pending(struct vm_eventinfo *info)
317 {
318 
319 	return (*((uintptr_t *)(info->rptr)) != 0);
320 }
321 
322 static __inline int
323 vcpu_suspended(struct vm_eventinfo *info)
324 {
325 
326 	return (*info->sptr);
327 }
328 
329 static __inline int
330 vcpu_reqidle(struct vm_eventinfo *info)
331 {
332 
333 	return (*info->iptr);
334 }
335 
336 int vcpu_debugged(struct vm *vm, int vcpuid);
337 
338 /*
339  * Return true if device indicated by bus/slot/func is supposed to be a
340  * pci passthrough device.
341  *
342  * Return false otherwise.
343  */
344 bool vmm_is_pptdev(int bus, int slot, int func);
345 
346 void *vm_iommu_domain(struct vm *vm);
347 
348 enum vcpu_state {
349 	VCPU_IDLE,
350 	VCPU_FROZEN,
351 	VCPU_RUNNING,
352 	VCPU_SLEEPING,
353 };
354 
355 int vcpu_set_state(struct vm *vm, int vcpu, enum vcpu_state state,
356     bool from_idle);
357 enum vcpu_state vcpu_get_state(struct vm *vm, int vcpu, int *hostcpu);
358 
359 static int __inline
360 vcpu_is_running(struct vm *vm, int vcpu, int *hostcpu)
361 {
362 	return (vcpu_get_state(vm, vcpu, hostcpu) == VCPU_RUNNING);
363 }
364 
365 #ifdef _SYS_PROC_H_
366 static int __inline
367 vcpu_should_yield(struct vm *vm, int vcpu)
368 {
369 	struct thread *td;
370 
371 	td = curthread;
372 	return (td->td_ast != 0 || td->td_owepreempt != 0);
373 }
374 #endif
375 
376 void *vcpu_stats(struct vm *vm, int vcpu);
377 void vcpu_notify_event(struct vm *vm, int vcpuid, bool lapic_intr);
378 struct vmspace *vm_get_vmspace(struct vm *vm);
379 struct vatpic *vm_atpic(struct vm *vm);
380 struct vatpit *vm_atpit(struct vm *vm);
381 struct vpmtmr *vm_pmtmr(struct vm *vm);
382 struct vrtc *vm_rtc(struct vm *vm);
383 
384 /*
385  * Inject exception 'vector' into the guest vcpu. This function returns 0 on
386  * success and non-zero on failure.
387  *
388  * Wrapper functions like 'vm_inject_gp()' should be preferred to calling
389  * this function directly because they enforce the trap-like or fault-like
390  * behavior of an exception.
391  *
392  * This function should only be called in the context of the thread that is
393  * executing this vcpu.
394  */
395 int vm_inject_exception(struct vm *vm, int vcpuid, int vector, int err_valid,
396     uint32_t errcode, int restart_instruction);
397 
398 /*
399  * This function is called after a VM-exit that occurred during exception or
400  * interrupt delivery through the IDT. The format of 'intinfo' is described
401  * in Figure 15-1, "EXITINTINFO for All Intercepts", APM, Vol 2.
402  *
403  * If a VM-exit handler completes the event delivery successfully then it
404  * should call vm_exit_intinfo() to extinguish the pending event. For e.g.,
405  * if the task switch emulation is triggered via a task gate then it should
406  * call this function with 'intinfo=0' to indicate that the external event
407  * is not pending anymore.
408  *
409  * Return value is 0 on success and non-zero on failure.
410  */
411 int vm_exit_intinfo(struct vm *vm, int vcpuid, uint64_t intinfo);
412 
413 /*
414  * This function is called before every VM-entry to retrieve a pending
415  * event that should be injected into the guest. This function combines
416  * nested events into a double or triple fault.
417  *
418  * Returns 0 if there are no events that need to be injected into the guest
419  * and non-zero otherwise.
420  */
421 int vm_entry_intinfo(struct vm *vm, int vcpuid, uint64_t *info);
422 
423 int vm_get_intinfo(struct vm *vm, int vcpuid, uint64_t *info1, uint64_t *info2);
424 
425 /*
426  * Function used to keep track of the guest's TSC offset. The
427  * offset is used by the virutalization extensions to provide a consistent
428  * value for the Time Stamp Counter to the guest.
429  *
430  * Return value is 0 on success and non-zero on failure.
431  */
432 int vm_set_tsc_offset(struct vm *vm, int vcpu_id, uint64_t offset);
433 
434 enum vm_reg_name vm_segment_name(int seg_encoding);
435 
436 struct vm_copyinfo {
437 	uint64_t	gpa;
438 	size_t		len;
439 	void		*hva;
440 	void		*cookie;
441 };
442 
443 /*
444  * Set up 'copyinfo[]' to copy to/from guest linear address space starting
445  * at 'gla' and 'len' bytes long. The 'prot' should be set to PROT_READ for
446  * a copyin or PROT_WRITE for a copyout.
447  *
448  * retval	is_fault	Interpretation
449  *   0		   0		Success
450  *   0		   1		An exception was injected into the guest
451  * EFAULT	  N/A		Unrecoverable error
452  *
453  * The 'copyinfo[]' can be passed to 'vm_copyin()' or 'vm_copyout()' only if
454  * the return value is 0. The 'copyinfo[]' resources should be freed by calling
455  * 'vm_copy_teardown()' after the copy is done.
456  */
457 int vm_copy_setup(struct vm *vm, int vcpuid, struct vm_guest_paging *paging,
458     uint64_t gla, size_t len, int prot, struct vm_copyinfo *copyinfo,
459     int num_copyinfo, int *is_fault);
460 void vm_copy_teardown(struct vm *vm, int vcpuid, struct vm_copyinfo *copyinfo,
461     int num_copyinfo);
462 void vm_copyin(struct vm *vm, int vcpuid, struct vm_copyinfo *copyinfo,
463     void *kaddr, size_t len);
464 void vm_copyout(struct vm *vm, int vcpuid, const void *kaddr,
465     struct vm_copyinfo *copyinfo, size_t len);
466 
467 int vcpu_trace_exceptions(struct vm *vm, int vcpuid);
468 int vcpu_trap_wbinvd(struct vm *vm, int vcpuid);
469 #endif	/* KERNEL */
470 
471 #ifdef _KERNEL
472 #define	VM_MAXCPU	16			/* maximum virtual cpus */
473 #endif
474 
475 /*
476  * Identifiers for optional vmm capabilities
477  */
478 enum vm_cap_type {
479 	VM_CAP_HALT_EXIT,
480 	VM_CAP_MTRAP_EXIT,
481 	VM_CAP_PAUSE_EXIT,
482 	VM_CAP_UNRESTRICTED_GUEST,
483 	VM_CAP_ENABLE_INVPCID,
484 	VM_CAP_BPT_EXIT,
485 	VM_CAP_RDPID,
486 	VM_CAP_RDTSCP,
487 	VM_CAP_IPI_EXIT,
488 	VM_CAP_MAX
489 };
490 
491 enum vm_intr_trigger {
492 	EDGE_TRIGGER,
493 	LEVEL_TRIGGER
494 };
495 
496 /*
497  * The 'access' field has the format specified in Table 21-2 of the Intel
498  * Architecture Manual vol 3b.
499  *
500  * XXX The contents of the 'access' field are architecturally defined except
501  * bit 16 - Segment Unusable.
502  */
503 struct seg_desc {
504 	uint64_t	base;
505 	uint32_t	limit;
506 	uint32_t	access;
507 };
508 #define	SEG_DESC_TYPE(access)		((access) & 0x001f)
509 #define	SEG_DESC_DPL(access)		(((access) >> 5) & 0x3)
510 #define	SEG_DESC_PRESENT(access)	(((access) & 0x0080) ? 1 : 0)
511 #define	SEG_DESC_DEF32(access)		(((access) & 0x4000) ? 1 : 0)
512 #define	SEG_DESC_GRANULARITY(access)	(((access) & 0x8000) ? 1 : 0)
513 #define	SEG_DESC_UNUSABLE(access)	(((access) & 0x10000) ? 1 : 0)
514 
515 enum vm_cpu_mode {
516 	CPU_MODE_REAL,
517 	CPU_MODE_PROTECTED,
518 	CPU_MODE_COMPATIBILITY,		/* IA-32E mode (CS.L = 0) */
519 	CPU_MODE_64BIT,			/* IA-32E mode (CS.L = 1) */
520 };
521 
522 enum vm_paging_mode {
523 	PAGING_MODE_FLAT,
524 	PAGING_MODE_32,
525 	PAGING_MODE_PAE,
526 	PAGING_MODE_64,
527 	PAGING_MODE_64_LA57,
528 };
529 
530 struct vm_guest_paging {
531 	uint64_t	cr3;
532 	int		cpl;
533 	enum vm_cpu_mode cpu_mode;
534 	enum vm_paging_mode paging_mode;
535 };
536 
537 /*
538  * The data structures 'vie' and 'vie_op' are meant to be opaque to the
539  * consumers of instruction decoding. The only reason why their contents
540  * need to be exposed is because they are part of the 'vm_exit' structure.
541  */
542 struct vie_op {
543 	uint8_t		op_byte;	/* actual opcode byte */
544 	uint8_t		op_type;	/* type of operation (e.g. MOV) */
545 	uint16_t	op_flags;
546 };
547 _Static_assert(sizeof(struct vie_op) == 4, "ABI");
548 _Static_assert(_Alignof(struct vie_op) == 2, "ABI");
549 
550 #define	VIE_INST_SIZE	15
551 struct vie {
552 	uint8_t		inst[VIE_INST_SIZE];	/* instruction bytes */
553 	uint8_t		num_valid;		/* size of the instruction */
554 
555 /* The following fields are all zeroed upon restart. */
556 #define	vie_startzero	num_processed
557 	uint8_t		num_processed;
558 
559 	uint8_t		addrsize:4, opsize:4;	/* address and operand sizes */
560 	uint8_t		rex_w:1,		/* REX prefix */
561 			rex_r:1,
562 			rex_x:1,
563 			rex_b:1,
564 			rex_present:1,
565 			repz_present:1,		/* REP/REPE/REPZ prefix */
566 			repnz_present:1,	/* REPNE/REPNZ prefix */
567 			opsize_override:1,	/* Operand size override */
568 			addrsize_override:1,	/* Address size override */
569 			segment_override:1;	/* Segment override */
570 
571 	uint8_t		mod:2,			/* ModRM byte */
572 			reg:4,
573 			rm:4;
574 
575 	uint8_t		ss:2,			/* SIB byte */
576 			vex_present:1,		/* VEX prefixed */
577 			vex_l:1,		/* L bit */
578 			index:4,		/* SIB byte */
579 			base:4;			/* SIB byte */
580 
581 	uint8_t		disp_bytes;
582 	uint8_t		imm_bytes;
583 
584 	uint8_t		scale;
585 
586 	uint8_t		vex_reg:4,		/* vvvv: first source register specifier */
587 			vex_pp:2,		/* pp */
588 			_sparebits:2;
589 
590 	uint8_t		_sparebytes[2];
591 
592 	int		base_register;		/* VM_REG_GUEST_xyz */
593 	int		index_register;		/* VM_REG_GUEST_xyz */
594 	int		segment_register;	/* VM_REG_GUEST_xyz */
595 
596 	int64_t		displacement;		/* optional addr displacement */
597 	int64_t		immediate;		/* optional immediate operand */
598 
599 	uint8_t		decoded;	/* set to 1 if successfully decoded */
600 
601 	uint8_t		_sparebyte;
602 
603 	struct vie_op	op;			/* opcode description */
604 };
605 _Static_assert(sizeof(struct vie) == 64, "ABI");
606 _Static_assert(__offsetof(struct vie, disp_bytes) == 22, "ABI");
607 _Static_assert(__offsetof(struct vie, scale) == 24, "ABI");
608 _Static_assert(__offsetof(struct vie, base_register) == 28, "ABI");
609 
610 enum vm_exitcode {
611 	VM_EXITCODE_INOUT,
612 	VM_EXITCODE_VMX,
613 	VM_EXITCODE_BOGUS,
614 	VM_EXITCODE_RDMSR,
615 	VM_EXITCODE_WRMSR,
616 	VM_EXITCODE_HLT,
617 	VM_EXITCODE_MTRAP,
618 	VM_EXITCODE_PAUSE,
619 	VM_EXITCODE_PAGING,
620 	VM_EXITCODE_INST_EMUL,
621 	VM_EXITCODE_SPINUP_AP,
622 	VM_EXITCODE_DEPRECATED1,	/* used to be SPINDOWN_CPU */
623 	VM_EXITCODE_RENDEZVOUS,
624 	VM_EXITCODE_IOAPIC_EOI,
625 	VM_EXITCODE_SUSPENDED,
626 	VM_EXITCODE_INOUT_STR,
627 	VM_EXITCODE_TASK_SWITCH,
628 	VM_EXITCODE_MONITOR,
629 	VM_EXITCODE_MWAIT,
630 	VM_EXITCODE_SVM,
631 	VM_EXITCODE_REQIDLE,
632 	VM_EXITCODE_DEBUG,
633 	VM_EXITCODE_VMINSN,
634 	VM_EXITCODE_BPT,
635 	VM_EXITCODE_IPI,
636 	VM_EXITCODE_MAX
637 };
638 
639 struct vm_inout {
640 	uint16_t	bytes:3;	/* 1 or 2 or 4 */
641 	uint16_t	in:1;
642 	uint16_t	string:1;
643 	uint16_t	rep:1;
644 	uint16_t	port;
645 	uint32_t	eax;		/* valid for out */
646 };
647 
648 struct vm_inout_str {
649 	struct vm_inout	inout;		/* must be the first element */
650 	struct vm_guest_paging paging;
651 	uint64_t	rflags;
652 	uint64_t	cr0;
653 	uint64_t	index;
654 	uint64_t	count;		/* rep=1 (%rcx), rep=0 (1) */
655 	int		addrsize;
656 	enum vm_reg_name seg_name;
657 	struct seg_desc seg_desc;
658 };
659 
660 enum task_switch_reason {
661 	TSR_CALL,
662 	TSR_IRET,
663 	TSR_JMP,
664 	TSR_IDT_GATE,	/* task gate in IDT */
665 };
666 
667 struct vm_task_switch {
668 	uint16_t	tsssel;		/* new TSS selector */
669 	int		ext;		/* task switch due to external event */
670 	uint32_t	errcode;
671 	int		errcode_valid;	/* push 'errcode' on the new stack */
672 	enum task_switch_reason reason;
673 	struct vm_guest_paging paging;
674 };
675 
676 struct vm_exit {
677 	enum vm_exitcode	exitcode;
678 	int			inst_length;	/* 0 means unknown */
679 	uint64_t		rip;
680 	union {
681 		struct vm_inout	inout;
682 		struct vm_inout_str inout_str;
683 		struct {
684 			uint64_t	gpa;
685 			int		fault_type;
686 		} paging;
687 		struct {
688 			uint64_t	gpa;
689 			uint64_t	gla;
690 			uint64_t	cs_base;
691 			int		cs_d;		/* CS.D */
692 			struct vm_guest_paging paging;
693 			struct vie	vie;
694 		} inst_emul;
695 		/*
696 		 * VMX specific payload. Used when there is no "better"
697 		 * exitcode to represent the VM-exit.
698 		 */
699 		struct {
700 			int		status;		/* vmx inst status */
701 			/*
702 			 * 'exit_reason' and 'exit_qualification' are valid
703 			 * only if 'status' is zero.
704 			 */
705 			uint32_t	exit_reason;
706 			uint64_t	exit_qualification;
707 			/*
708 			 * 'inst_error' and 'inst_type' are valid
709 			 * only if 'status' is non-zero.
710 			 */
711 			int		inst_type;
712 			int		inst_error;
713 		} vmx;
714 		/*
715 		 * SVM specific payload.
716 		 */
717 		struct {
718 			uint64_t	exitcode;
719 			uint64_t	exitinfo1;
720 			uint64_t	exitinfo2;
721 		} svm;
722 		struct {
723 			int		inst_length;
724 		} bpt;
725 		struct {
726 			uint32_t	code;		/* ecx value */
727 			uint64_t	wval;
728 		} msr;
729 		struct {
730 			int		vcpu;
731 			uint64_t	rip;
732 		} spinup_ap;
733 		struct {
734 			uint64_t	rflags;
735 			uint64_t	intr_status;
736 		} hlt;
737 		struct {
738 			int		vector;
739 		} ioapic_eoi;
740 		struct {
741 			enum vm_suspend_how how;
742 		} suspended;
743 		struct {
744 			uint32_t mode;
745 			uint8_t vector;
746 			cpuset_t dmask;
747 		} ipi;
748 		struct vm_task_switch task_switch;
749 	} u;
750 };
751 
752 /* APIs to inject faults into the guest */
753 void vm_inject_fault(void *vm, int vcpuid, int vector, int errcode_valid,
754     int errcode);
755 
756 static __inline void
757 vm_inject_ud(void *vm, int vcpuid)
758 {
759 	vm_inject_fault(vm, vcpuid, IDT_UD, 0, 0);
760 }
761 
762 static __inline void
763 vm_inject_gp(void *vm, int vcpuid)
764 {
765 	vm_inject_fault(vm, vcpuid, IDT_GP, 1, 0);
766 }
767 
768 static __inline void
769 vm_inject_ac(void *vm, int vcpuid, int errcode)
770 {
771 	vm_inject_fault(vm, vcpuid, IDT_AC, 1, errcode);
772 }
773 
774 static __inline void
775 vm_inject_ss(void *vm, int vcpuid, int errcode)
776 {
777 	vm_inject_fault(vm, vcpuid, IDT_SS, 1, errcode);
778 }
779 
780 void vm_inject_pf(void *vm, int vcpuid, int error_code, uint64_t cr2);
781 
782 int vm_restart_instruction(void *vm, int vcpuid);
783 
784 #endif	/* _VMM_H_ */
785