1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2011 NetApp, Inc. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 * 28 * $FreeBSD$ 29 */ 30 31 #ifndef _VMM_H_ 32 #define _VMM_H_ 33 34 #include <sys/sdt.h> 35 #include <x86/segments.h> 36 37 #ifdef _KERNEL 38 SDT_PROVIDER_DECLARE(vmm); 39 #endif 40 41 enum vm_suspend_how { 42 VM_SUSPEND_NONE, 43 VM_SUSPEND_RESET, 44 VM_SUSPEND_POWEROFF, 45 VM_SUSPEND_HALT, 46 VM_SUSPEND_TRIPLEFAULT, 47 VM_SUSPEND_LAST 48 }; 49 50 /* 51 * Identifiers for architecturally defined registers. 52 */ 53 enum vm_reg_name { 54 VM_REG_GUEST_RAX, 55 VM_REG_GUEST_RBX, 56 VM_REG_GUEST_RCX, 57 VM_REG_GUEST_RDX, 58 VM_REG_GUEST_RSI, 59 VM_REG_GUEST_RDI, 60 VM_REG_GUEST_RBP, 61 VM_REG_GUEST_R8, 62 VM_REG_GUEST_R9, 63 VM_REG_GUEST_R10, 64 VM_REG_GUEST_R11, 65 VM_REG_GUEST_R12, 66 VM_REG_GUEST_R13, 67 VM_REG_GUEST_R14, 68 VM_REG_GUEST_R15, 69 VM_REG_GUEST_CR0, 70 VM_REG_GUEST_CR3, 71 VM_REG_GUEST_CR4, 72 VM_REG_GUEST_DR7, 73 VM_REG_GUEST_RSP, 74 VM_REG_GUEST_RIP, 75 VM_REG_GUEST_RFLAGS, 76 VM_REG_GUEST_ES, 77 VM_REG_GUEST_CS, 78 VM_REG_GUEST_SS, 79 VM_REG_GUEST_DS, 80 VM_REG_GUEST_FS, 81 VM_REG_GUEST_GS, 82 VM_REG_GUEST_LDTR, 83 VM_REG_GUEST_TR, 84 VM_REG_GUEST_IDTR, 85 VM_REG_GUEST_GDTR, 86 VM_REG_GUEST_EFER, 87 VM_REG_GUEST_CR2, 88 VM_REG_GUEST_PDPTE0, 89 VM_REG_GUEST_PDPTE1, 90 VM_REG_GUEST_PDPTE2, 91 VM_REG_GUEST_PDPTE3, 92 VM_REG_GUEST_INTR_SHADOW, 93 VM_REG_GUEST_DR0, 94 VM_REG_GUEST_DR1, 95 VM_REG_GUEST_DR2, 96 VM_REG_GUEST_DR3, 97 VM_REG_GUEST_DR6, 98 VM_REG_GUEST_ENTRY_INST_LENGTH, 99 VM_REG_LAST 100 }; 101 102 enum x2apic_state { 103 X2APIC_DISABLED, 104 X2APIC_ENABLED, 105 X2APIC_STATE_LAST 106 }; 107 108 #define VM_INTINFO_VECTOR(info) ((info) & 0xff) 109 #define VM_INTINFO_DEL_ERRCODE 0x800 110 #define VM_INTINFO_RSVD 0x7ffff000 111 #define VM_INTINFO_VALID 0x80000000 112 #define VM_INTINFO_TYPE 0x700 113 #define VM_INTINFO_HWINTR (0 << 8) 114 #define VM_INTINFO_NMI (2 << 8) 115 #define VM_INTINFO_HWEXCEPTION (3 << 8) 116 #define VM_INTINFO_SWINTR (4 << 8) 117 118 /* 119 * The VM name has to fit into the pathname length constraints of devfs, 120 * governed primarily by SPECNAMELEN. The length is the total number of 121 * characters in the full path, relative to the mount point and not 122 * including any leading '/' characters. 123 * A prefix and a suffix are added to the name specified by the user. 124 * The prefix is usually "vmm/" or "vmm.io/", but can be a few characters 125 * longer for future use. 126 * The suffix is a string that identifies a bootrom image or some similar 127 * image that is attached to the VM. A separator character gets added to 128 * the suffix automatically when generating the full path, so it must be 129 * accounted for, reducing the effective length by 1. 130 * The effective length of a VM name is 229 bytes for FreeBSD 13 and 37 131 * bytes for FreeBSD 12. A minimum length is set for safety and supports 132 * a SPECNAMELEN as small as 32 on old systems. 133 */ 134 #define VM_MAX_PREFIXLEN 10 135 #define VM_MAX_SUFFIXLEN 15 136 #define VM_MIN_NAMELEN 6 137 #define VM_MAX_NAMELEN \ 138 (SPECNAMELEN - VM_MAX_PREFIXLEN - VM_MAX_SUFFIXLEN - 1) 139 140 #ifdef _KERNEL 141 CTASSERT(VM_MAX_NAMELEN >= VM_MIN_NAMELEN); 142 143 struct vm; 144 struct vm_exception; 145 struct seg_desc; 146 struct vm_exit; 147 struct vm_run; 148 struct vhpet; 149 struct vioapic; 150 struct vlapic; 151 struct vmspace; 152 struct vm_object; 153 struct vm_guest_paging; 154 struct pmap; 155 156 struct vm_eventinfo { 157 void *rptr; /* rendezvous cookie */ 158 int *sptr; /* suspend cookie */ 159 int *iptr; /* reqidle cookie */ 160 }; 161 162 typedef int (*vmm_init_func_t)(int ipinum); 163 typedef int (*vmm_cleanup_func_t)(void); 164 typedef void (*vmm_resume_func_t)(void); 165 typedef void * (*vmi_init_func_t)(struct vm *vm, struct pmap *pmap); 166 typedef int (*vmi_run_func_t)(void *vmi, int vcpu, register_t rip, 167 struct pmap *pmap, struct vm_eventinfo *info); 168 typedef void (*vmi_cleanup_func_t)(void *vmi); 169 typedef int (*vmi_get_register_t)(void *vmi, int vcpu, int num, 170 uint64_t *retval); 171 typedef int (*vmi_set_register_t)(void *vmi, int vcpu, int num, 172 uint64_t val); 173 typedef int (*vmi_get_desc_t)(void *vmi, int vcpu, int num, 174 struct seg_desc *desc); 175 typedef int (*vmi_set_desc_t)(void *vmi, int vcpu, int num, 176 struct seg_desc *desc); 177 typedef int (*vmi_get_cap_t)(void *vmi, int vcpu, int num, int *retval); 178 typedef int (*vmi_set_cap_t)(void *vmi, int vcpu, int num, int val); 179 typedef struct vmspace * (*vmi_vmspace_alloc)(vm_offset_t min, vm_offset_t max); 180 typedef void (*vmi_vmspace_free)(struct vmspace *vmspace); 181 typedef struct vlapic * (*vmi_vlapic_init)(void *vmi, int vcpu); 182 typedef void (*vmi_vlapic_cleanup)(void *vmi, struct vlapic *vlapic); 183 184 struct vmm_ops { 185 vmm_init_func_t init; /* module wide initialization */ 186 vmm_cleanup_func_t cleanup; 187 vmm_resume_func_t resume; 188 189 vmi_init_func_t vminit; /* vm-specific initialization */ 190 vmi_run_func_t vmrun; 191 vmi_cleanup_func_t vmcleanup; 192 vmi_get_register_t vmgetreg; 193 vmi_set_register_t vmsetreg; 194 vmi_get_desc_t vmgetdesc; 195 vmi_set_desc_t vmsetdesc; 196 vmi_get_cap_t vmgetcap; 197 vmi_set_cap_t vmsetcap; 198 vmi_vmspace_alloc vmspace_alloc; 199 vmi_vmspace_free vmspace_free; 200 vmi_vlapic_init vlapic_init; 201 vmi_vlapic_cleanup vlapic_cleanup; 202 }; 203 204 extern struct vmm_ops vmm_ops_intel; 205 extern struct vmm_ops vmm_ops_amd; 206 207 int vm_create(const char *name, struct vm **retvm); 208 void vm_destroy(struct vm *vm); 209 int vm_reinit(struct vm *vm); 210 const char *vm_name(struct vm *vm); 211 uint16_t vm_get_maxcpus(struct vm *vm); 212 void vm_get_topology(struct vm *vm, uint16_t *sockets, uint16_t *cores, 213 uint16_t *threads, uint16_t *maxcpus); 214 int vm_set_topology(struct vm *vm, uint16_t sockets, uint16_t cores, 215 uint16_t threads, uint16_t maxcpus); 216 217 /* 218 * APIs that modify the guest memory map require all vcpus to be frozen. 219 */ 220 int vm_mmap_memseg(struct vm *vm, vm_paddr_t gpa, int segid, vm_ooffset_t off, 221 size_t len, int prot, int flags); 222 int vm_alloc_memseg(struct vm *vm, int ident, size_t len, bool sysmem); 223 void vm_free_memseg(struct vm *vm, int ident); 224 int vm_map_mmio(struct vm *vm, vm_paddr_t gpa, size_t len, vm_paddr_t hpa); 225 int vm_unmap_mmio(struct vm *vm, vm_paddr_t gpa, size_t len); 226 int vm_assign_pptdev(struct vm *vm, int bus, int slot, int func); 227 int vm_unassign_pptdev(struct vm *vm, int bus, int slot, int func); 228 229 /* 230 * APIs that inspect the guest memory map require only a *single* vcpu to 231 * be frozen. This acts like a read lock on the guest memory map since any 232 * modification requires *all* vcpus to be frozen. 233 */ 234 int vm_mmap_getnext(struct vm *vm, vm_paddr_t *gpa, int *segid, 235 vm_ooffset_t *segoff, size_t *len, int *prot, int *flags); 236 int vm_get_memseg(struct vm *vm, int ident, size_t *len, bool *sysmem, 237 struct vm_object **objptr); 238 vm_paddr_t vmm_sysmem_maxaddr(struct vm *vm); 239 void *vm_gpa_hold(struct vm *, int vcpuid, vm_paddr_t gpa, size_t len, 240 int prot, void **cookie); 241 void vm_gpa_release(void *cookie); 242 bool vm_mem_allocated(struct vm *vm, int vcpuid, vm_paddr_t gpa); 243 244 int vm_get_register(struct vm *vm, int vcpu, int reg, uint64_t *retval); 245 int vm_set_register(struct vm *vm, int vcpu, int reg, uint64_t val); 246 int vm_get_seg_desc(struct vm *vm, int vcpu, int reg, 247 struct seg_desc *ret_desc); 248 int vm_set_seg_desc(struct vm *vm, int vcpu, int reg, 249 struct seg_desc *desc); 250 int vm_run(struct vm *vm, struct vm_run *vmrun); 251 int vm_suspend(struct vm *vm, enum vm_suspend_how how); 252 int vm_inject_nmi(struct vm *vm, int vcpu); 253 int vm_nmi_pending(struct vm *vm, int vcpuid); 254 void vm_nmi_clear(struct vm *vm, int vcpuid); 255 int vm_inject_extint(struct vm *vm, int vcpu); 256 int vm_extint_pending(struct vm *vm, int vcpuid); 257 void vm_extint_clear(struct vm *vm, int vcpuid); 258 struct vlapic *vm_lapic(struct vm *vm, int cpu); 259 struct vioapic *vm_ioapic(struct vm *vm); 260 struct vhpet *vm_hpet(struct vm *vm); 261 int vm_get_capability(struct vm *vm, int vcpu, int type, int *val); 262 int vm_set_capability(struct vm *vm, int vcpu, int type, int val); 263 int vm_get_x2apic_state(struct vm *vm, int vcpu, enum x2apic_state *state); 264 int vm_set_x2apic_state(struct vm *vm, int vcpu, enum x2apic_state state); 265 int vm_apicid2vcpuid(struct vm *vm, int apicid); 266 int vm_activate_cpu(struct vm *vm, int vcpu); 267 int vm_suspend_cpu(struct vm *vm, int vcpu); 268 int vm_resume_cpu(struct vm *vm, int vcpu); 269 struct vm_exit *vm_exitinfo(struct vm *vm, int vcpuid); 270 void vm_exit_suspended(struct vm *vm, int vcpuid, uint64_t rip); 271 void vm_exit_debug(struct vm *vm, int vcpuid, uint64_t rip); 272 void vm_exit_rendezvous(struct vm *vm, int vcpuid, uint64_t rip); 273 void vm_exit_astpending(struct vm *vm, int vcpuid, uint64_t rip); 274 void vm_exit_reqidle(struct vm *vm, int vcpuid, uint64_t rip); 275 276 #ifdef _SYS__CPUSET_H_ 277 /* 278 * Rendezvous all vcpus specified in 'dest' and execute 'func(arg)'. 279 * The rendezvous 'func(arg)' is not allowed to do anything that will 280 * cause the thread to be put to sleep. 281 * 282 * If the rendezvous is being initiated from a vcpu context then the 283 * 'vcpuid' must refer to that vcpu, otherwise it should be set to -1. 284 * 285 * The caller cannot hold any locks when initiating the rendezvous. 286 * 287 * The implementation of this API may cause vcpus other than those specified 288 * by 'dest' to be stalled. The caller should not rely on any vcpus making 289 * forward progress when the rendezvous is in progress. 290 */ 291 typedef void (*vm_rendezvous_func_t)(struct vm *vm, int vcpuid, void *arg); 292 int vm_smp_rendezvous(struct vm *vm, int vcpuid, cpuset_t dest, 293 vm_rendezvous_func_t func, void *arg); 294 cpuset_t vm_active_cpus(struct vm *vm); 295 cpuset_t vm_debug_cpus(struct vm *vm); 296 cpuset_t vm_suspended_cpus(struct vm *vm); 297 #endif /* _SYS__CPUSET_H_ */ 298 299 static __inline int 300 vcpu_rendezvous_pending(struct vm_eventinfo *info) 301 { 302 303 return (*((uintptr_t *)(info->rptr)) != 0); 304 } 305 306 static __inline int 307 vcpu_suspended(struct vm_eventinfo *info) 308 { 309 310 return (*info->sptr); 311 } 312 313 static __inline int 314 vcpu_reqidle(struct vm_eventinfo *info) 315 { 316 317 return (*info->iptr); 318 } 319 320 int vcpu_debugged(struct vm *vm, int vcpuid); 321 322 /* 323 * Return true if device indicated by bus/slot/func is supposed to be a 324 * pci passthrough device. 325 * 326 * Return false otherwise. 327 */ 328 bool vmm_is_pptdev(int bus, int slot, int func); 329 330 void *vm_iommu_domain(struct vm *vm); 331 332 enum vcpu_state { 333 VCPU_IDLE, 334 VCPU_FROZEN, 335 VCPU_RUNNING, 336 VCPU_SLEEPING, 337 }; 338 339 int vcpu_set_state(struct vm *vm, int vcpu, enum vcpu_state state, 340 bool from_idle); 341 enum vcpu_state vcpu_get_state(struct vm *vm, int vcpu, int *hostcpu); 342 343 static int __inline 344 vcpu_is_running(struct vm *vm, int vcpu, int *hostcpu) 345 { 346 return (vcpu_get_state(vm, vcpu, hostcpu) == VCPU_RUNNING); 347 } 348 349 #ifdef _SYS_PROC_H_ 350 static int __inline 351 vcpu_should_yield(struct vm *vm, int vcpu) 352 { 353 354 if (curthread->td_flags & (TDF_ASTPENDING | TDF_NEEDRESCHED)) 355 return (1); 356 else if (curthread->td_owepreempt) 357 return (1); 358 else 359 return (0); 360 } 361 #endif 362 363 void *vcpu_stats(struct vm *vm, int vcpu); 364 void vcpu_notify_event(struct vm *vm, int vcpuid, bool lapic_intr); 365 struct vmspace *vm_get_vmspace(struct vm *vm); 366 struct vatpic *vm_atpic(struct vm *vm); 367 struct vatpit *vm_atpit(struct vm *vm); 368 struct vpmtmr *vm_pmtmr(struct vm *vm); 369 struct vrtc *vm_rtc(struct vm *vm); 370 371 /* 372 * Inject exception 'vector' into the guest vcpu. This function returns 0 on 373 * success and non-zero on failure. 374 * 375 * Wrapper functions like 'vm_inject_gp()' should be preferred to calling 376 * this function directly because they enforce the trap-like or fault-like 377 * behavior of an exception. 378 * 379 * This function should only be called in the context of the thread that is 380 * executing this vcpu. 381 */ 382 int vm_inject_exception(struct vm *vm, int vcpuid, int vector, int err_valid, 383 uint32_t errcode, int restart_instruction); 384 385 /* 386 * This function is called after a VM-exit that occurred during exception or 387 * interrupt delivery through the IDT. The format of 'intinfo' is described 388 * in Figure 15-1, "EXITINTINFO for All Intercepts", APM, Vol 2. 389 * 390 * If a VM-exit handler completes the event delivery successfully then it 391 * should call vm_exit_intinfo() to extinguish the pending event. For e.g., 392 * if the task switch emulation is triggered via a task gate then it should 393 * call this function with 'intinfo=0' to indicate that the external event 394 * is not pending anymore. 395 * 396 * Return value is 0 on success and non-zero on failure. 397 */ 398 int vm_exit_intinfo(struct vm *vm, int vcpuid, uint64_t intinfo); 399 400 /* 401 * This function is called before every VM-entry to retrieve a pending 402 * event that should be injected into the guest. This function combines 403 * nested events into a double or triple fault. 404 * 405 * Returns 0 if there are no events that need to be injected into the guest 406 * and non-zero otherwise. 407 */ 408 int vm_entry_intinfo(struct vm *vm, int vcpuid, uint64_t *info); 409 410 int vm_get_intinfo(struct vm *vm, int vcpuid, uint64_t *info1, uint64_t *info2); 411 412 enum vm_reg_name vm_segment_name(int seg_encoding); 413 414 struct vm_copyinfo { 415 uint64_t gpa; 416 size_t len; 417 void *hva; 418 void *cookie; 419 }; 420 421 /* 422 * Set up 'copyinfo[]' to copy to/from guest linear address space starting 423 * at 'gla' and 'len' bytes long. The 'prot' should be set to PROT_READ for 424 * a copyin or PROT_WRITE for a copyout. 425 * 426 * retval is_fault Interpretation 427 * 0 0 Success 428 * 0 1 An exception was injected into the guest 429 * EFAULT N/A Unrecoverable error 430 * 431 * The 'copyinfo[]' can be passed to 'vm_copyin()' or 'vm_copyout()' only if 432 * the return value is 0. The 'copyinfo[]' resources should be freed by calling 433 * 'vm_copy_teardown()' after the copy is done. 434 */ 435 int vm_copy_setup(struct vm *vm, int vcpuid, struct vm_guest_paging *paging, 436 uint64_t gla, size_t len, int prot, struct vm_copyinfo *copyinfo, 437 int num_copyinfo, int *is_fault); 438 void vm_copy_teardown(struct vm *vm, int vcpuid, struct vm_copyinfo *copyinfo, 439 int num_copyinfo); 440 void vm_copyin(struct vm *vm, int vcpuid, struct vm_copyinfo *copyinfo, 441 void *kaddr, size_t len); 442 void vm_copyout(struct vm *vm, int vcpuid, const void *kaddr, 443 struct vm_copyinfo *copyinfo, size_t len); 444 445 int vcpu_trace_exceptions(struct vm *vm, int vcpuid); 446 #endif /* KERNEL */ 447 448 #define VM_MAXCPU 16 /* maximum virtual cpus */ 449 450 /* 451 * Identifiers for optional vmm capabilities 452 */ 453 enum vm_cap_type { 454 VM_CAP_HALT_EXIT, 455 VM_CAP_MTRAP_EXIT, 456 VM_CAP_PAUSE_EXIT, 457 VM_CAP_UNRESTRICTED_GUEST, 458 VM_CAP_ENABLE_INVPCID, 459 VM_CAP_BPT_EXIT, 460 VM_CAP_MAX 461 }; 462 463 enum vm_intr_trigger { 464 EDGE_TRIGGER, 465 LEVEL_TRIGGER 466 }; 467 468 /* 469 * The 'access' field has the format specified in Table 21-2 of the Intel 470 * Architecture Manual vol 3b. 471 * 472 * XXX The contents of the 'access' field are architecturally defined except 473 * bit 16 - Segment Unusable. 474 */ 475 struct seg_desc { 476 uint64_t base; 477 uint32_t limit; 478 uint32_t access; 479 }; 480 #define SEG_DESC_TYPE(access) ((access) & 0x001f) 481 #define SEG_DESC_DPL(access) (((access) >> 5) & 0x3) 482 #define SEG_DESC_PRESENT(access) (((access) & 0x0080) ? 1 : 0) 483 #define SEG_DESC_DEF32(access) (((access) & 0x4000) ? 1 : 0) 484 #define SEG_DESC_GRANULARITY(access) (((access) & 0x8000) ? 1 : 0) 485 #define SEG_DESC_UNUSABLE(access) (((access) & 0x10000) ? 1 : 0) 486 487 enum vm_cpu_mode { 488 CPU_MODE_REAL, 489 CPU_MODE_PROTECTED, 490 CPU_MODE_COMPATIBILITY, /* IA-32E mode (CS.L = 0) */ 491 CPU_MODE_64BIT, /* IA-32E mode (CS.L = 1) */ 492 }; 493 494 enum vm_paging_mode { 495 PAGING_MODE_FLAT, 496 PAGING_MODE_32, 497 PAGING_MODE_PAE, 498 PAGING_MODE_64, 499 }; 500 501 struct vm_guest_paging { 502 uint64_t cr3; 503 int cpl; 504 enum vm_cpu_mode cpu_mode; 505 enum vm_paging_mode paging_mode; 506 }; 507 508 /* 509 * The data structures 'vie' and 'vie_op' are meant to be opaque to the 510 * consumers of instruction decoding. The only reason why their contents 511 * need to be exposed is because they are part of the 'vm_exit' structure. 512 */ 513 struct vie_op { 514 uint8_t op_byte; /* actual opcode byte */ 515 uint8_t op_type; /* type of operation (e.g. MOV) */ 516 uint16_t op_flags; 517 }; 518 _Static_assert(sizeof(struct vie_op) == 4, "ABI"); 519 _Static_assert(_Alignof(struct vie_op) == 2, "ABI"); 520 521 #define VIE_INST_SIZE 15 522 struct vie { 523 uint8_t inst[VIE_INST_SIZE]; /* instruction bytes */ 524 uint8_t num_valid; /* size of the instruction */ 525 uint8_t num_processed; 526 527 uint8_t addrsize:4, opsize:4; /* address and operand sizes */ 528 uint8_t rex_w:1, /* REX prefix */ 529 rex_r:1, 530 rex_x:1, 531 rex_b:1, 532 rex_present:1, 533 repz_present:1, /* REP/REPE/REPZ prefix */ 534 repnz_present:1, /* REPNE/REPNZ prefix */ 535 opsize_override:1, /* Operand size override */ 536 addrsize_override:1, /* Address size override */ 537 segment_override:1; /* Segment override */ 538 539 uint8_t mod:2, /* ModRM byte */ 540 reg:4, 541 rm:4; 542 543 uint8_t ss:2, /* SIB byte */ 544 vex_present:1, /* VEX prefixed */ 545 vex_l:1, /* L bit */ 546 index:4, /* SIB byte */ 547 base:4; /* SIB byte */ 548 549 uint8_t disp_bytes; 550 uint8_t imm_bytes; 551 552 uint8_t scale; 553 554 uint8_t vex_reg:4, /* vvvv: first source register specifier */ 555 vex_pp:2, /* pp */ 556 _sparebits:2; 557 558 uint8_t _sparebytes[2]; 559 560 int base_register; /* VM_REG_GUEST_xyz */ 561 int index_register; /* VM_REG_GUEST_xyz */ 562 int segment_register; /* VM_REG_GUEST_xyz */ 563 564 int64_t displacement; /* optional addr displacement */ 565 int64_t immediate; /* optional immediate operand */ 566 567 uint8_t decoded; /* set to 1 if successfully decoded */ 568 569 uint8_t _sparebyte; 570 571 struct vie_op op; /* opcode description */ 572 }; 573 _Static_assert(sizeof(struct vie) == 64, "ABI"); 574 _Static_assert(__offsetof(struct vie, disp_bytes) == 22, "ABI"); 575 _Static_assert(__offsetof(struct vie, scale) == 24, "ABI"); 576 _Static_assert(__offsetof(struct vie, base_register) == 28, "ABI"); 577 578 enum vm_exitcode { 579 VM_EXITCODE_INOUT, 580 VM_EXITCODE_VMX, 581 VM_EXITCODE_BOGUS, 582 VM_EXITCODE_RDMSR, 583 VM_EXITCODE_WRMSR, 584 VM_EXITCODE_HLT, 585 VM_EXITCODE_MTRAP, 586 VM_EXITCODE_PAUSE, 587 VM_EXITCODE_PAGING, 588 VM_EXITCODE_INST_EMUL, 589 VM_EXITCODE_SPINUP_AP, 590 VM_EXITCODE_DEPRECATED1, /* used to be SPINDOWN_CPU */ 591 VM_EXITCODE_RENDEZVOUS, 592 VM_EXITCODE_IOAPIC_EOI, 593 VM_EXITCODE_SUSPENDED, 594 VM_EXITCODE_INOUT_STR, 595 VM_EXITCODE_TASK_SWITCH, 596 VM_EXITCODE_MONITOR, 597 VM_EXITCODE_MWAIT, 598 VM_EXITCODE_SVM, 599 VM_EXITCODE_REQIDLE, 600 VM_EXITCODE_DEBUG, 601 VM_EXITCODE_VMINSN, 602 VM_EXITCODE_BPT, 603 VM_EXITCODE_MAX 604 }; 605 606 struct vm_inout { 607 uint16_t bytes:3; /* 1 or 2 or 4 */ 608 uint16_t in:1; 609 uint16_t string:1; 610 uint16_t rep:1; 611 uint16_t port; 612 uint32_t eax; /* valid for out */ 613 }; 614 615 struct vm_inout_str { 616 struct vm_inout inout; /* must be the first element */ 617 struct vm_guest_paging paging; 618 uint64_t rflags; 619 uint64_t cr0; 620 uint64_t index; 621 uint64_t count; /* rep=1 (%rcx), rep=0 (1) */ 622 int addrsize; 623 enum vm_reg_name seg_name; 624 struct seg_desc seg_desc; 625 }; 626 627 enum task_switch_reason { 628 TSR_CALL, 629 TSR_IRET, 630 TSR_JMP, 631 TSR_IDT_GATE, /* task gate in IDT */ 632 }; 633 634 struct vm_task_switch { 635 uint16_t tsssel; /* new TSS selector */ 636 int ext; /* task switch due to external event */ 637 uint32_t errcode; 638 int errcode_valid; /* push 'errcode' on the new stack */ 639 enum task_switch_reason reason; 640 struct vm_guest_paging paging; 641 }; 642 643 struct vm_exit { 644 enum vm_exitcode exitcode; 645 int inst_length; /* 0 means unknown */ 646 uint64_t rip; 647 union { 648 struct vm_inout inout; 649 struct vm_inout_str inout_str; 650 struct { 651 uint64_t gpa; 652 int fault_type; 653 } paging; 654 struct { 655 uint64_t gpa; 656 uint64_t gla; 657 uint64_t cs_base; 658 int cs_d; /* CS.D */ 659 struct vm_guest_paging paging; 660 struct vie vie; 661 } inst_emul; 662 /* 663 * VMX specific payload. Used when there is no "better" 664 * exitcode to represent the VM-exit. 665 */ 666 struct { 667 int status; /* vmx inst status */ 668 /* 669 * 'exit_reason' and 'exit_qualification' are valid 670 * only if 'status' is zero. 671 */ 672 uint32_t exit_reason; 673 uint64_t exit_qualification; 674 /* 675 * 'inst_error' and 'inst_type' are valid 676 * only if 'status' is non-zero. 677 */ 678 int inst_type; 679 int inst_error; 680 } vmx; 681 /* 682 * SVM specific payload. 683 */ 684 struct { 685 uint64_t exitcode; 686 uint64_t exitinfo1; 687 uint64_t exitinfo2; 688 } svm; 689 struct { 690 int inst_length; 691 } bpt; 692 struct { 693 uint32_t code; /* ecx value */ 694 uint64_t wval; 695 } msr; 696 struct { 697 int vcpu; 698 uint64_t rip; 699 } spinup_ap; 700 struct { 701 uint64_t rflags; 702 uint64_t intr_status; 703 } hlt; 704 struct { 705 int vector; 706 } ioapic_eoi; 707 struct { 708 enum vm_suspend_how how; 709 } suspended; 710 struct vm_task_switch task_switch; 711 } u; 712 }; 713 714 /* APIs to inject faults into the guest */ 715 void vm_inject_fault(void *vm, int vcpuid, int vector, int errcode_valid, 716 int errcode); 717 718 static __inline void 719 vm_inject_ud(void *vm, int vcpuid) 720 { 721 vm_inject_fault(vm, vcpuid, IDT_UD, 0, 0); 722 } 723 724 static __inline void 725 vm_inject_gp(void *vm, int vcpuid) 726 { 727 vm_inject_fault(vm, vcpuid, IDT_GP, 1, 0); 728 } 729 730 static __inline void 731 vm_inject_ac(void *vm, int vcpuid, int errcode) 732 { 733 vm_inject_fault(vm, vcpuid, IDT_AC, 1, errcode); 734 } 735 736 static __inline void 737 vm_inject_ss(void *vm, int vcpuid, int errcode) 738 { 739 vm_inject_fault(vm, vcpuid, IDT_SS, 1, errcode); 740 } 741 742 void vm_inject_pf(void *vm, int vcpuid, int error_code, uint64_t cr2); 743 744 int vm_restart_instruction(void *vm, int vcpuid); 745 746 #endif /* _VMM_H_ */ 747