1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2011 NetApp, Inc. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 * 28 * $FreeBSD$ 29 */ 30 31 #include <sys/cdefs.h> 32 __FBSDID("$FreeBSD$"); 33 34 #include "opt_bhyve_snapshot.h" 35 36 #include <sys/param.h> 37 #include <sys/systm.h> 38 #include <sys/kernel.h> 39 #include <sys/module.h> 40 #include <sys/sysctl.h> 41 #include <sys/malloc.h> 42 #include <sys/pcpu.h> 43 #include <sys/lock.h> 44 #include <sys/mutex.h> 45 #include <sys/proc.h> 46 #include <sys/rwlock.h> 47 #include <sys/sched.h> 48 #include <sys/smp.h> 49 #include <sys/sx.h> 50 #include <sys/vnode.h> 51 52 #include <vm/vm.h> 53 #include <vm/vm_param.h> 54 #include <vm/vm_extern.h> 55 #include <vm/vm_object.h> 56 #include <vm/vm_page.h> 57 #include <vm/pmap.h> 58 #include <vm/vm_map.h> 59 #include <vm/vm_pager.h> 60 #include <vm/vm_kern.h> 61 #include <vm/vnode_pager.h> 62 #include <vm/swap_pager.h> 63 #include <vm/uma.h> 64 65 #include <machine/cpu.h> 66 #include <machine/pcb.h> 67 #include <machine/smp.h> 68 #include <machine/md_var.h> 69 #include <x86/psl.h> 70 #include <x86/apicreg.h> 71 #include <x86/ifunc.h> 72 73 #include <machine/vmm.h> 74 #include <machine/vmm_dev.h> 75 #include <machine/vmm_instruction_emul.h> 76 #include <machine/vmm_snapshot.h> 77 78 #include "vmm_ioport.h" 79 #include "vmm_ktr.h" 80 #include "vmm_host.h" 81 #include "vmm_mem.h" 82 #include "vmm_util.h" 83 #include "vatpic.h" 84 #include "vatpit.h" 85 #include "vhpet.h" 86 #include "vioapic.h" 87 #include "vlapic.h" 88 #include "vpmtmr.h" 89 #include "vrtc.h" 90 #include "vmm_stat.h" 91 #include "vmm_lapic.h" 92 93 #include "io/ppt.h" 94 #include "io/iommu.h" 95 96 struct vlapic; 97 98 /* 99 * Initialization: 100 * (a) allocated when vcpu is created 101 * (i) initialized when vcpu is created and when it is reinitialized 102 * (o) initialized the first time the vcpu is created 103 * (x) initialized before use 104 */ 105 struct vcpu { 106 struct mtx mtx; /* (o) protects 'state' and 'hostcpu' */ 107 enum vcpu_state state; /* (o) vcpu state */ 108 int vcpuid; /* (o) */ 109 int hostcpu; /* (o) vcpu's host cpu */ 110 int reqidle; /* (i) request vcpu to idle */ 111 struct vm *vm; /* (o) */ 112 void *cookie; /* (i) cpu-specific data */ 113 struct vlapic *vlapic; /* (i) APIC device model */ 114 enum x2apic_state x2apic_state; /* (i) APIC mode */ 115 uint64_t exitintinfo; /* (i) events pending at VM exit */ 116 int nmi_pending; /* (i) NMI pending */ 117 int extint_pending; /* (i) INTR pending */ 118 int exception_pending; /* (i) exception pending */ 119 int exc_vector; /* (x) exception collateral */ 120 int exc_errcode_valid; 121 uint32_t exc_errcode; 122 struct savefpu *guestfpu; /* (a,i) guest fpu state */ 123 uint64_t guest_xcr0; /* (i) guest %xcr0 register */ 124 void *stats; /* (a,i) statistics */ 125 struct vm_exit exitinfo; /* (x) exit reason and collateral */ 126 uint64_t nextrip; /* (x) next instruction to execute */ 127 uint64_t tsc_offset; /* (o) TSC offsetting */ 128 }; 129 130 #define vcpu_lock_init(v) mtx_init(&((v)->mtx), "vcpu lock", 0, MTX_SPIN) 131 #define vcpu_lock_destroy(v) mtx_destroy(&((v)->mtx)) 132 #define vcpu_lock(v) mtx_lock_spin(&((v)->mtx)) 133 #define vcpu_unlock(v) mtx_unlock_spin(&((v)->mtx)) 134 #define vcpu_assert_locked(v) mtx_assert(&((v)->mtx), MA_OWNED) 135 136 struct mem_seg { 137 size_t len; 138 bool sysmem; 139 struct vm_object *object; 140 }; 141 #define VM_MAX_MEMSEGS 4 142 143 struct mem_map { 144 vm_paddr_t gpa; 145 size_t len; 146 vm_ooffset_t segoff; 147 int segid; 148 int prot; 149 int flags; 150 }; 151 #define VM_MAX_MEMMAPS 8 152 153 /* 154 * Initialization: 155 * (o) initialized the first time the VM is created 156 * (i) initialized when VM is created and when it is reinitialized 157 * (x) initialized before use 158 * 159 * Locking: 160 * [m] mem_segs_lock 161 * [r] rendezvous_mtx 162 * [v] reads require one frozen vcpu, writes require freezing all vcpus 163 */ 164 struct vm { 165 void *cookie; /* (i) cpu-specific data */ 166 void *iommu; /* (x) iommu-specific data */ 167 struct vhpet *vhpet; /* (i) virtual HPET */ 168 struct vioapic *vioapic; /* (i) virtual ioapic */ 169 struct vatpic *vatpic; /* (i) virtual atpic */ 170 struct vatpit *vatpit; /* (i) virtual atpit */ 171 struct vpmtmr *vpmtmr; /* (i) virtual ACPI PM timer */ 172 struct vrtc *vrtc; /* (o) virtual RTC */ 173 volatile cpuset_t active_cpus; /* (i) active vcpus */ 174 volatile cpuset_t debug_cpus; /* (i) vcpus stopped for debug */ 175 cpuset_t startup_cpus; /* (i) [r] waiting for startup */ 176 int suspend; /* (i) stop VM execution */ 177 bool dying; /* (o) is dying */ 178 volatile cpuset_t suspended_cpus; /* (i) suspended vcpus */ 179 volatile cpuset_t halted_cpus; /* (x) cpus in a hard halt */ 180 cpuset_t rendezvous_req_cpus; /* (x) [r] rendezvous requested */ 181 cpuset_t rendezvous_done_cpus; /* (x) [r] rendezvous finished */ 182 void *rendezvous_arg; /* (x) [r] rendezvous func/arg */ 183 vm_rendezvous_func_t rendezvous_func; 184 struct mtx rendezvous_mtx; /* (o) rendezvous lock */ 185 struct mem_map mem_maps[VM_MAX_MEMMAPS]; /* (i) [m+v] guest address space */ 186 struct mem_seg mem_segs[VM_MAX_MEMSEGS]; /* (o) [m+v] guest memory regions */ 187 struct vmspace *vmspace; /* (o) guest's address space */ 188 char name[VM_MAX_NAMELEN+1]; /* (o) virtual machine name */ 189 struct vcpu **vcpu; /* (o) guest vcpus */ 190 /* The following describe the vm cpu topology */ 191 uint16_t sockets; /* (o) num of sockets */ 192 uint16_t cores; /* (o) num of cores/socket */ 193 uint16_t threads; /* (o) num of threads/core */ 194 uint16_t maxcpus; /* (o) max pluggable cpus */ 195 struct sx mem_segs_lock; /* (o) */ 196 struct sx vcpus_init_lock; /* (o) */ 197 }; 198 199 #define VMM_CTR0(vcpu, format) \ 200 VCPU_CTR0((vcpu)->vm, (vcpu)->vcpuid, format) 201 202 #define VMM_CTR1(vcpu, format, p1) \ 203 VCPU_CTR1((vcpu)->vm, (vcpu)->vcpuid, format, p1) 204 205 #define VMM_CTR2(vcpu, format, p1, p2) \ 206 VCPU_CTR2((vcpu)->vm, (vcpu)->vcpuid, format, p1, p2) 207 208 #define VMM_CTR3(vcpu, format, p1, p2, p3) \ 209 VCPU_CTR3((vcpu)->vm, (vcpu)->vcpuid, format, p1, p2, p3) 210 211 #define VMM_CTR4(vcpu, format, p1, p2, p3, p4) \ 212 VCPU_CTR4((vcpu)->vm, (vcpu)->vcpuid, format, p1, p2, p3, p4) 213 214 static int vmm_initialized; 215 216 static void vmmops_panic(void); 217 218 static void 219 vmmops_panic(void) 220 { 221 panic("vmm_ops func called when !vmm_is_intel() && !vmm_is_svm()"); 222 } 223 224 #define DEFINE_VMMOPS_IFUNC(ret_type, opname, args) \ 225 DEFINE_IFUNC(static, ret_type, vmmops_##opname, args) \ 226 { \ 227 if (vmm_is_intel()) \ 228 return (vmm_ops_intel.opname); \ 229 else if (vmm_is_svm()) \ 230 return (vmm_ops_amd.opname); \ 231 else \ 232 return ((ret_type (*)args)vmmops_panic); \ 233 } 234 235 DEFINE_VMMOPS_IFUNC(int, modinit, (int ipinum)) 236 DEFINE_VMMOPS_IFUNC(int, modcleanup, (void)) 237 DEFINE_VMMOPS_IFUNC(void, modresume, (void)) 238 DEFINE_VMMOPS_IFUNC(void *, init, (struct vm *vm, struct pmap *pmap)) 239 DEFINE_VMMOPS_IFUNC(int, run, (void *vcpui, register_t rip, struct pmap *pmap, 240 struct vm_eventinfo *info)) 241 DEFINE_VMMOPS_IFUNC(void, cleanup, (void *vmi)) 242 DEFINE_VMMOPS_IFUNC(void *, vcpu_init, (void *vmi, struct vcpu *vcpu, 243 int vcpu_id)) 244 DEFINE_VMMOPS_IFUNC(void, vcpu_cleanup, (void *vcpui)) 245 DEFINE_VMMOPS_IFUNC(int, getreg, (void *vcpui, int num, uint64_t *retval)) 246 DEFINE_VMMOPS_IFUNC(int, setreg, (void *vcpui, int num, uint64_t val)) 247 DEFINE_VMMOPS_IFUNC(int, getdesc, (void *vcpui, int num, struct seg_desc *desc)) 248 DEFINE_VMMOPS_IFUNC(int, setdesc, (void *vcpui, int num, struct seg_desc *desc)) 249 DEFINE_VMMOPS_IFUNC(int, getcap, (void *vcpui, int num, int *retval)) 250 DEFINE_VMMOPS_IFUNC(int, setcap, (void *vcpui, int num, int val)) 251 DEFINE_VMMOPS_IFUNC(struct vmspace *, vmspace_alloc, (vm_offset_t min, 252 vm_offset_t max)) 253 DEFINE_VMMOPS_IFUNC(void, vmspace_free, (struct vmspace *vmspace)) 254 DEFINE_VMMOPS_IFUNC(struct vlapic *, vlapic_init, (void *vcpui)) 255 DEFINE_VMMOPS_IFUNC(void, vlapic_cleanup, (struct vlapic *vlapic)) 256 #ifdef BHYVE_SNAPSHOT 257 DEFINE_VMMOPS_IFUNC(int, vcpu_snapshot, (void *vcpui, 258 struct vm_snapshot_meta *meta)) 259 DEFINE_VMMOPS_IFUNC(int, restore_tsc, (void *vcpui, uint64_t now)) 260 #endif 261 262 #define fpu_start_emulating() load_cr0(rcr0() | CR0_TS) 263 #define fpu_stop_emulating() clts() 264 265 SDT_PROVIDER_DEFINE(vmm); 266 267 static MALLOC_DEFINE(M_VM, "vm", "vm"); 268 269 /* statistics */ 270 static VMM_STAT(VCPU_TOTAL_RUNTIME, "vcpu total runtime"); 271 272 SYSCTL_NODE(_hw, OID_AUTO, vmm, CTLFLAG_RW | CTLFLAG_MPSAFE, NULL, 273 NULL); 274 275 /* 276 * Halt the guest if all vcpus are executing a HLT instruction with 277 * interrupts disabled. 278 */ 279 static int halt_detection_enabled = 1; 280 SYSCTL_INT(_hw_vmm, OID_AUTO, halt_detection, CTLFLAG_RDTUN, 281 &halt_detection_enabled, 0, 282 "Halt VM if all vcpus execute HLT with interrupts disabled"); 283 284 static int vmm_ipinum; 285 SYSCTL_INT(_hw_vmm, OID_AUTO, ipinum, CTLFLAG_RD, &vmm_ipinum, 0, 286 "IPI vector used for vcpu notifications"); 287 288 static int trace_guest_exceptions; 289 SYSCTL_INT(_hw_vmm, OID_AUTO, trace_guest_exceptions, CTLFLAG_RDTUN, 290 &trace_guest_exceptions, 0, 291 "Trap into hypervisor on all guest exceptions and reflect them back"); 292 293 static int trap_wbinvd; 294 SYSCTL_INT(_hw_vmm, OID_AUTO, trap_wbinvd, CTLFLAG_RDTUN, &trap_wbinvd, 0, 295 "WBINVD triggers a VM-exit"); 296 297 u_int vm_maxcpu; 298 SYSCTL_UINT(_hw_vmm, OID_AUTO, maxcpu, CTLFLAG_RDTUN | CTLFLAG_NOFETCH, 299 &vm_maxcpu, 0, "Maximum number of vCPUs"); 300 301 static void vm_free_memmap(struct vm *vm, int ident); 302 static bool sysmem_mapping(struct vm *vm, struct mem_map *mm); 303 static void vcpu_notify_event_locked(struct vcpu *vcpu, bool lapic_intr); 304 305 /* 306 * Upper limit on vm_maxcpu. Limited by use of uint16_t types for CPU 307 * counts as well as range of vpid values for VT-x and by the capacity 308 * of cpuset_t masks. The call to new_unrhdr() in vpid_init() in 309 * vmx.c requires 'vm_maxcpu + 1 <= 0xffff', hence the '- 1' below. 310 */ 311 #define VM_MAXCPU MIN(0xffff - 1, CPU_SETSIZE) 312 313 #ifdef KTR 314 static const char * 315 vcpu_state2str(enum vcpu_state state) 316 { 317 318 switch (state) { 319 case VCPU_IDLE: 320 return ("idle"); 321 case VCPU_FROZEN: 322 return ("frozen"); 323 case VCPU_RUNNING: 324 return ("running"); 325 case VCPU_SLEEPING: 326 return ("sleeping"); 327 default: 328 return ("unknown"); 329 } 330 } 331 #endif 332 333 static void 334 vcpu_cleanup(struct vcpu *vcpu, bool destroy) 335 { 336 vmmops_vlapic_cleanup(vcpu->vlapic); 337 vmmops_vcpu_cleanup(vcpu->cookie); 338 vcpu->cookie = NULL; 339 if (destroy) { 340 vmm_stat_free(vcpu->stats); 341 fpu_save_area_free(vcpu->guestfpu); 342 vcpu_lock_destroy(vcpu); 343 free(vcpu, M_VM); 344 } 345 } 346 347 static struct vcpu * 348 vcpu_alloc(struct vm *vm, int vcpu_id) 349 { 350 struct vcpu *vcpu; 351 352 KASSERT(vcpu_id >= 0 && vcpu_id < vm->maxcpus, 353 ("vcpu_init: invalid vcpu %d", vcpu_id)); 354 355 vcpu = malloc(sizeof(*vcpu), M_VM, M_WAITOK | M_ZERO); 356 vcpu_lock_init(vcpu); 357 vcpu->state = VCPU_IDLE; 358 vcpu->hostcpu = NOCPU; 359 vcpu->vcpuid = vcpu_id; 360 vcpu->vm = vm; 361 vcpu->guestfpu = fpu_save_area_alloc(); 362 vcpu->stats = vmm_stat_alloc(); 363 vcpu->tsc_offset = 0; 364 return (vcpu); 365 } 366 367 static void 368 vcpu_init(struct vcpu *vcpu) 369 { 370 vcpu->cookie = vmmops_vcpu_init(vcpu->vm->cookie, vcpu, vcpu->vcpuid); 371 vcpu->vlapic = vmmops_vlapic_init(vcpu->cookie); 372 vm_set_x2apic_state(vcpu, X2APIC_DISABLED); 373 vcpu->reqidle = 0; 374 vcpu->exitintinfo = 0; 375 vcpu->nmi_pending = 0; 376 vcpu->extint_pending = 0; 377 vcpu->exception_pending = 0; 378 vcpu->guest_xcr0 = XFEATURE_ENABLED_X87; 379 fpu_save_area_reset(vcpu->guestfpu); 380 vmm_stat_init(vcpu->stats); 381 } 382 383 int 384 vcpu_trace_exceptions(struct vcpu *vcpu) 385 { 386 387 return (trace_guest_exceptions); 388 } 389 390 int 391 vcpu_trap_wbinvd(struct vcpu *vcpu) 392 { 393 return (trap_wbinvd); 394 } 395 396 struct vm_exit * 397 vm_exitinfo(struct vcpu *vcpu) 398 { 399 return (&vcpu->exitinfo); 400 } 401 402 static int 403 vmm_init(void) 404 { 405 int error; 406 407 if (!vmm_is_hw_supported()) 408 return (ENXIO); 409 410 vm_maxcpu = mp_ncpus; 411 TUNABLE_INT_FETCH("hw.vmm.maxcpu", &vm_maxcpu); 412 413 if (vm_maxcpu > VM_MAXCPU) { 414 printf("vmm: vm_maxcpu clamped to %u\n", VM_MAXCPU); 415 vm_maxcpu = VM_MAXCPU; 416 } 417 if (vm_maxcpu == 0) 418 vm_maxcpu = 1; 419 420 vmm_host_state_init(); 421 422 vmm_ipinum = lapic_ipi_alloc(pti ? &IDTVEC(justreturn1_pti) : 423 &IDTVEC(justreturn)); 424 if (vmm_ipinum < 0) 425 vmm_ipinum = IPI_AST; 426 427 error = vmm_mem_init(); 428 if (error) 429 return (error); 430 431 vmm_resume_p = vmmops_modresume; 432 433 return (vmmops_modinit(vmm_ipinum)); 434 } 435 436 static int 437 vmm_handler(module_t mod, int what, void *arg) 438 { 439 int error; 440 441 switch (what) { 442 case MOD_LOAD: 443 if (vmm_is_hw_supported()) { 444 vmmdev_init(); 445 error = vmm_init(); 446 if (error == 0) 447 vmm_initialized = 1; 448 } else { 449 error = ENXIO; 450 } 451 break; 452 case MOD_UNLOAD: 453 if (vmm_is_hw_supported()) { 454 error = vmmdev_cleanup(); 455 if (error == 0) { 456 vmm_resume_p = NULL; 457 iommu_cleanup(); 458 if (vmm_ipinum != IPI_AST) 459 lapic_ipi_free(vmm_ipinum); 460 error = vmmops_modcleanup(); 461 /* 462 * Something bad happened - prevent new 463 * VMs from being created 464 */ 465 if (error) 466 vmm_initialized = 0; 467 } 468 } else { 469 error = 0; 470 } 471 break; 472 default: 473 error = 0; 474 break; 475 } 476 return (error); 477 } 478 479 static moduledata_t vmm_kmod = { 480 "vmm", 481 vmm_handler, 482 NULL 483 }; 484 485 /* 486 * vmm initialization has the following dependencies: 487 * 488 * - VT-x initialization requires smp_rendezvous() and therefore must happen 489 * after SMP is fully functional (after SI_SUB_SMP). 490 */ 491 DECLARE_MODULE(vmm, vmm_kmod, SI_SUB_SMP + 1, SI_ORDER_ANY); 492 MODULE_VERSION(vmm, 1); 493 494 static void 495 vm_init(struct vm *vm, bool create) 496 { 497 vm->cookie = vmmops_init(vm, vmspace_pmap(vm->vmspace)); 498 vm->iommu = NULL; 499 vm->vioapic = vioapic_init(vm); 500 vm->vhpet = vhpet_init(vm); 501 vm->vatpic = vatpic_init(vm); 502 vm->vatpit = vatpit_init(vm); 503 vm->vpmtmr = vpmtmr_init(vm); 504 if (create) 505 vm->vrtc = vrtc_init(vm); 506 507 CPU_ZERO(&vm->active_cpus); 508 CPU_ZERO(&vm->debug_cpus); 509 CPU_ZERO(&vm->startup_cpus); 510 511 vm->suspend = 0; 512 CPU_ZERO(&vm->suspended_cpus); 513 514 if (!create) { 515 for (int i = 0; i < vm->maxcpus; i++) { 516 if (vm->vcpu[i] != NULL) 517 vcpu_init(vm->vcpu[i]); 518 } 519 } 520 } 521 522 void 523 vm_disable_vcpu_creation(struct vm *vm) 524 { 525 sx_xlock(&vm->vcpus_init_lock); 526 vm->dying = true; 527 sx_xunlock(&vm->vcpus_init_lock); 528 } 529 530 struct vcpu * 531 vm_alloc_vcpu(struct vm *vm, int vcpuid) 532 { 533 struct vcpu *vcpu; 534 535 if (vcpuid < 0 || vcpuid >= vm_get_maxcpus(vm)) 536 return (NULL); 537 538 vcpu = atomic_load_ptr(&vm->vcpu[vcpuid]); 539 if (__predict_true(vcpu != NULL)) 540 return (vcpu); 541 542 sx_xlock(&vm->vcpus_init_lock); 543 vcpu = vm->vcpu[vcpuid]; 544 if (vcpu == NULL && !vm->dying) { 545 vcpu = vcpu_alloc(vm, vcpuid); 546 vcpu_init(vcpu); 547 548 /* 549 * Ensure vCPU is fully created before updating pointer 550 * to permit unlocked reads above. 551 */ 552 atomic_store_rel_ptr((uintptr_t *)&vm->vcpu[vcpuid], 553 (uintptr_t)vcpu); 554 } 555 sx_xunlock(&vm->vcpus_init_lock); 556 return (vcpu); 557 } 558 559 void 560 vm_slock_vcpus(struct vm *vm) 561 { 562 sx_slock(&vm->vcpus_init_lock); 563 } 564 565 void 566 vm_unlock_vcpus(struct vm *vm) 567 { 568 sx_unlock(&vm->vcpus_init_lock); 569 } 570 571 /* 572 * The default CPU topology is a single thread per package. 573 */ 574 u_int cores_per_package = 1; 575 u_int threads_per_core = 1; 576 577 int 578 vm_create(const char *name, struct vm **retvm) 579 { 580 struct vm *vm; 581 struct vmspace *vmspace; 582 583 /* 584 * If vmm.ko could not be successfully initialized then don't attempt 585 * to create the virtual machine. 586 */ 587 if (!vmm_initialized) 588 return (ENXIO); 589 590 if (name == NULL || strnlen(name, VM_MAX_NAMELEN + 1) == 591 VM_MAX_NAMELEN + 1) 592 return (EINVAL); 593 594 vmspace = vmmops_vmspace_alloc(0, VM_MAXUSER_ADDRESS_LA48); 595 if (vmspace == NULL) 596 return (ENOMEM); 597 598 vm = malloc(sizeof(struct vm), M_VM, M_WAITOK | M_ZERO); 599 strcpy(vm->name, name); 600 vm->vmspace = vmspace; 601 mtx_init(&vm->rendezvous_mtx, "vm rendezvous lock", 0, MTX_DEF); 602 sx_init(&vm->mem_segs_lock, "vm mem_segs"); 603 sx_init(&vm->vcpus_init_lock, "vm vcpus"); 604 vm->vcpu = malloc(sizeof(*vm->vcpu) * vm_maxcpu, M_VM, M_WAITOK | 605 M_ZERO); 606 607 vm->sockets = 1; 608 vm->cores = cores_per_package; /* XXX backwards compatibility */ 609 vm->threads = threads_per_core; /* XXX backwards compatibility */ 610 vm->maxcpus = vm_maxcpu; 611 612 vm_init(vm, true); 613 614 *retvm = vm; 615 return (0); 616 } 617 618 void 619 vm_get_topology(struct vm *vm, uint16_t *sockets, uint16_t *cores, 620 uint16_t *threads, uint16_t *maxcpus) 621 { 622 *sockets = vm->sockets; 623 *cores = vm->cores; 624 *threads = vm->threads; 625 *maxcpus = vm->maxcpus; 626 } 627 628 uint16_t 629 vm_get_maxcpus(struct vm *vm) 630 { 631 return (vm->maxcpus); 632 } 633 634 int 635 vm_set_topology(struct vm *vm, uint16_t sockets, uint16_t cores, 636 uint16_t threads, uint16_t maxcpus __unused) 637 { 638 /* Ignore maxcpus. */ 639 if ((sockets * cores * threads) > vm->maxcpus) 640 return (EINVAL); 641 vm->sockets = sockets; 642 vm->cores = cores; 643 vm->threads = threads; 644 return(0); 645 } 646 647 static void 648 vm_cleanup(struct vm *vm, bool destroy) 649 { 650 struct mem_map *mm; 651 int i; 652 653 if (destroy) 654 vm_xlock_memsegs(vm); 655 656 ppt_unassign_all(vm); 657 658 if (vm->iommu != NULL) 659 iommu_destroy_domain(vm->iommu); 660 661 if (destroy) 662 vrtc_cleanup(vm->vrtc); 663 else 664 vrtc_reset(vm->vrtc); 665 vpmtmr_cleanup(vm->vpmtmr); 666 vatpit_cleanup(vm->vatpit); 667 vhpet_cleanup(vm->vhpet); 668 vatpic_cleanup(vm->vatpic); 669 vioapic_cleanup(vm->vioapic); 670 671 for (i = 0; i < vm->maxcpus; i++) { 672 if (vm->vcpu[i] != NULL) 673 vcpu_cleanup(vm->vcpu[i], destroy); 674 } 675 676 vmmops_cleanup(vm->cookie); 677 678 /* 679 * System memory is removed from the guest address space only when 680 * the VM is destroyed. This is because the mapping remains the same 681 * across VM reset. 682 * 683 * Device memory can be relocated by the guest (e.g. using PCI BARs) 684 * so those mappings are removed on a VM reset. 685 */ 686 for (i = 0; i < VM_MAX_MEMMAPS; i++) { 687 mm = &vm->mem_maps[i]; 688 if (destroy || !sysmem_mapping(vm, mm)) 689 vm_free_memmap(vm, i); 690 } 691 692 if (destroy) { 693 for (i = 0; i < VM_MAX_MEMSEGS; i++) 694 vm_free_memseg(vm, i); 695 vm_unlock_memsegs(vm); 696 697 vmmops_vmspace_free(vm->vmspace); 698 vm->vmspace = NULL; 699 700 free(vm->vcpu, M_VM); 701 sx_destroy(&vm->vcpus_init_lock); 702 sx_destroy(&vm->mem_segs_lock); 703 mtx_destroy(&vm->rendezvous_mtx); 704 } 705 } 706 707 void 708 vm_destroy(struct vm *vm) 709 { 710 vm_cleanup(vm, true); 711 free(vm, M_VM); 712 } 713 714 int 715 vm_reinit(struct vm *vm) 716 { 717 int error; 718 719 /* 720 * A virtual machine can be reset only if all vcpus are suspended. 721 */ 722 if (CPU_CMP(&vm->suspended_cpus, &vm->active_cpus) == 0) { 723 vm_cleanup(vm, false); 724 vm_init(vm, false); 725 error = 0; 726 } else { 727 error = EBUSY; 728 } 729 730 return (error); 731 } 732 733 const char * 734 vm_name(struct vm *vm) 735 { 736 return (vm->name); 737 } 738 739 void 740 vm_slock_memsegs(struct vm *vm) 741 { 742 sx_slock(&vm->mem_segs_lock); 743 } 744 745 void 746 vm_xlock_memsegs(struct vm *vm) 747 { 748 sx_xlock(&vm->mem_segs_lock); 749 } 750 751 void 752 vm_unlock_memsegs(struct vm *vm) 753 { 754 sx_unlock(&vm->mem_segs_lock); 755 } 756 757 int 758 vm_map_mmio(struct vm *vm, vm_paddr_t gpa, size_t len, vm_paddr_t hpa) 759 { 760 vm_object_t obj; 761 762 if ((obj = vmm_mmio_alloc(vm->vmspace, gpa, len, hpa)) == NULL) 763 return (ENOMEM); 764 else 765 return (0); 766 } 767 768 int 769 vm_unmap_mmio(struct vm *vm, vm_paddr_t gpa, size_t len) 770 { 771 772 vmm_mmio_free(vm->vmspace, gpa, len); 773 return (0); 774 } 775 776 /* 777 * Return 'true' if 'gpa' is allocated in the guest address space. 778 * 779 * This function is called in the context of a running vcpu which acts as 780 * an implicit lock on 'vm->mem_maps[]'. 781 */ 782 bool 783 vm_mem_allocated(struct vcpu *vcpu, vm_paddr_t gpa) 784 { 785 struct vm *vm = vcpu->vm; 786 struct mem_map *mm; 787 int i; 788 789 #ifdef INVARIANTS 790 int hostcpu, state; 791 state = vcpu_get_state(vcpu, &hostcpu); 792 KASSERT(state == VCPU_RUNNING && hostcpu == curcpu, 793 ("%s: invalid vcpu state %d/%d", __func__, state, hostcpu)); 794 #endif 795 796 for (i = 0; i < VM_MAX_MEMMAPS; i++) { 797 mm = &vm->mem_maps[i]; 798 if (mm->len != 0 && gpa >= mm->gpa && gpa < mm->gpa + mm->len) 799 return (true); /* 'gpa' is sysmem or devmem */ 800 } 801 802 if (ppt_is_mmio(vm, gpa)) 803 return (true); /* 'gpa' is pci passthru mmio */ 804 805 return (false); 806 } 807 808 int 809 vm_alloc_memseg(struct vm *vm, int ident, size_t len, bool sysmem) 810 { 811 struct mem_seg *seg; 812 vm_object_t obj; 813 814 sx_assert(&vm->mem_segs_lock, SX_XLOCKED); 815 816 if (ident < 0 || ident >= VM_MAX_MEMSEGS) 817 return (EINVAL); 818 819 if (len == 0 || (len & PAGE_MASK)) 820 return (EINVAL); 821 822 seg = &vm->mem_segs[ident]; 823 if (seg->object != NULL) { 824 if (seg->len == len && seg->sysmem == sysmem) 825 return (EEXIST); 826 else 827 return (EINVAL); 828 } 829 830 obj = vm_object_allocate(OBJT_SWAP, len >> PAGE_SHIFT); 831 if (obj == NULL) 832 return (ENOMEM); 833 834 seg->len = len; 835 seg->object = obj; 836 seg->sysmem = sysmem; 837 return (0); 838 } 839 840 int 841 vm_get_memseg(struct vm *vm, int ident, size_t *len, bool *sysmem, 842 vm_object_t *objptr) 843 { 844 struct mem_seg *seg; 845 846 sx_assert(&vm->mem_segs_lock, SX_LOCKED); 847 848 if (ident < 0 || ident >= VM_MAX_MEMSEGS) 849 return (EINVAL); 850 851 seg = &vm->mem_segs[ident]; 852 if (len) 853 *len = seg->len; 854 if (sysmem) 855 *sysmem = seg->sysmem; 856 if (objptr) 857 *objptr = seg->object; 858 return (0); 859 } 860 861 void 862 vm_free_memseg(struct vm *vm, int ident) 863 { 864 struct mem_seg *seg; 865 866 KASSERT(ident >= 0 && ident < VM_MAX_MEMSEGS, 867 ("%s: invalid memseg ident %d", __func__, ident)); 868 869 seg = &vm->mem_segs[ident]; 870 if (seg->object != NULL) { 871 vm_object_deallocate(seg->object); 872 bzero(seg, sizeof(struct mem_seg)); 873 } 874 } 875 876 int 877 vm_mmap_memseg(struct vm *vm, vm_paddr_t gpa, int segid, vm_ooffset_t first, 878 size_t len, int prot, int flags) 879 { 880 struct mem_seg *seg; 881 struct mem_map *m, *map; 882 vm_ooffset_t last; 883 int i, error; 884 885 if (prot == 0 || (prot & ~(VM_PROT_ALL)) != 0) 886 return (EINVAL); 887 888 if (flags & ~VM_MEMMAP_F_WIRED) 889 return (EINVAL); 890 891 if (segid < 0 || segid >= VM_MAX_MEMSEGS) 892 return (EINVAL); 893 894 seg = &vm->mem_segs[segid]; 895 if (seg->object == NULL) 896 return (EINVAL); 897 898 last = first + len; 899 if (first < 0 || first >= last || last > seg->len) 900 return (EINVAL); 901 902 if ((gpa | first | last) & PAGE_MASK) 903 return (EINVAL); 904 905 map = NULL; 906 for (i = 0; i < VM_MAX_MEMMAPS; i++) { 907 m = &vm->mem_maps[i]; 908 if (m->len == 0) { 909 map = m; 910 break; 911 } 912 } 913 914 if (map == NULL) 915 return (ENOSPC); 916 917 error = vm_map_find(&vm->vmspace->vm_map, seg->object, first, &gpa, 918 len, 0, VMFS_NO_SPACE, prot, prot, 0); 919 if (error != KERN_SUCCESS) 920 return (EFAULT); 921 922 vm_object_reference(seg->object); 923 924 if (flags & VM_MEMMAP_F_WIRED) { 925 error = vm_map_wire(&vm->vmspace->vm_map, gpa, gpa + len, 926 VM_MAP_WIRE_USER | VM_MAP_WIRE_NOHOLES); 927 if (error != KERN_SUCCESS) { 928 vm_map_remove(&vm->vmspace->vm_map, gpa, gpa + len); 929 return (error == KERN_RESOURCE_SHORTAGE ? ENOMEM : 930 EFAULT); 931 } 932 } 933 934 map->gpa = gpa; 935 map->len = len; 936 map->segoff = first; 937 map->segid = segid; 938 map->prot = prot; 939 map->flags = flags; 940 return (0); 941 } 942 943 int 944 vm_munmap_memseg(struct vm *vm, vm_paddr_t gpa, size_t len) 945 { 946 struct mem_map *m; 947 int i; 948 949 for (i = 0; i < VM_MAX_MEMMAPS; i++) { 950 m = &vm->mem_maps[i]; 951 if (m->gpa == gpa && m->len == len && 952 (m->flags & VM_MEMMAP_F_IOMMU) == 0) { 953 vm_free_memmap(vm, i); 954 return (0); 955 } 956 } 957 958 return (EINVAL); 959 } 960 961 int 962 vm_mmap_getnext(struct vm *vm, vm_paddr_t *gpa, int *segid, 963 vm_ooffset_t *segoff, size_t *len, int *prot, int *flags) 964 { 965 struct mem_map *mm, *mmnext; 966 int i; 967 968 mmnext = NULL; 969 for (i = 0; i < VM_MAX_MEMMAPS; i++) { 970 mm = &vm->mem_maps[i]; 971 if (mm->len == 0 || mm->gpa < *gpa) 972 continue; 973 if (mmnext == NULL || mm->gpa < mmnext->gpa) 974 mmnext = mm; 975 } 976 977 if (mmnext != NULL) { 978 *gpa = mmnext->gpa; 979 if (segid) 980 *segid = mmnext->segid; 981 if (segoff) 982 *segoff = mmnext->segoff; 983 if (len) 984 *len = mmnext->len; 985 if (prot) 986 *prot = mmnext->prot; 987 if (flags) 988 *flags = mmnext->flags; 989 return (0); 990 } else { 991 return (ENOENT); 992 } 993 } 994 995 static void 996 vm_free_memmap(struct vm *vm, int ident) 997 { 998 struct mem_map *mm; 999 int error __diagused; 1000 1001 mm = &vm->mem_maps[ident]; 1002 if (mm->len) { 1003 error = vm_map_remove(&vm->vmspace->vm_map, mm->gpa, 1004 mm->gpa + mm->len); 1005 KASSERT(error == KERN_SUCCESS, ("%s: vm_map_remove error %d", 1006 __func__, error)); 1007 bzero(mm, sizeof(struct mem_map)); 1008 } 1009 } 1010 1011 static __inline bool 1012 sysmem_mapping(struct vm *vm, struct mem_map *mm) 1013 { 1014 1015 if (mm->len != 0 && vm->mem_segs[mm->segid].sysmem) 1016 return (true); 1017 else 1018 return (false); 1019 } 1020 1021 vm_paddr_t 1022 vmm_sysmem_maxaddr(struct vm *vm) 1023 { 1024 struct mem_map *mm; 1025 vm_paddr_t maxaddr; 1026 int i; 1027 1028 maxaddr = 0; 1029 for (i = 0; i < VM_MAX_MEMMAPS; i++) { 1030 mm = &vm->mem_maps[i]; 1031 if (sysmem_mapping(vm, mm)) { 1032 if (maxaddr < mm->gpa + mm->len) 1033 maxaddr = mm->gpa + mm->len; 1034 } 1035 } 1036 return (maxaddr); 1037 } 1038 1039 static void 1040 vm_iommu_modify(struct vm *vm, bool map) 1041 { 1042 int i, sz; 1043 vm_paddr_t gpa, hpa; 1044 struct mem_map *mm; 1045 void *vp, *cookie, *host_domain; 1046 1047 sz = PAGE_SIZE; 1048 host_domain = iommu_host_domain(); 1049 1050 for (i = 0; i < VM_MAX_MEMMAPS; i++) { 1051 mm = &vm->mem_maps[i]; 1052 if (!sysmem_mapping(vm, mm)) 1053 continue; 1054 1055 if (map) { 1056 KASSERT((mm->flags & VM_MEMMAP_F_IOMMU) == 0, 1057 ("iommu map found invalid memmap %#lx/%#lx/%#x", 1058 mm->gpa, mm->len, mm->flags)); 1059 if ((mm->flags & VM_MEMMAP_F_WIRED) == 0) 1060 continue; 1061 mm->flags |= VM_MEMMAP_F_IOMMU; 1062 } else { 1063 if ((mm->flags & VM_MEMMAP_F_IOMMU) == 0) 1064 continue; 1065 mm->flags &= ~VM_MEMMAP_F_IOMMU; 1066 KASSERT((mm->flags & VM_MEMMAP_F_WIRED) != 0, 1067 ("iommu unmap found invalid memmap %#lx/%#lx/%#x", 1068 mm->gpa, mm->len, mm->flags)); 1069 } 1070 1071 gpa = mm->gpa; 1072 while (gpa < mm->gpa + mm->len) { 1073 vp = vm_gpa_hold_global(vm, gpa, PAGE_SIZE, 1074 VM_PROT_WRITE, &cookie); 1075 KASSERT(vp != NULL, ("vm(%s) could not map gpa %#lx", 1076 vm_name(vm), gpa)); 1077 1078 vm_gpa_release(cookie); 1079 1080 hpa = DMAP_TO_PHYS((uintptr_t)vp); 1081 if (map) { 1082 iommu_create_mapping(vm->iommu, gpa, hpa, sz); 1083 } else { 1084 iommu_remove_mapping(vm->iommu, gpa, sz); 1085 } 1086 1087 gpa += PAGE_SIZE; 1088 } 1089 } 1090 1091 /* 1092 * Invalidate the cached translations associated with the domain 1093 * from which pages were removed. 1094 */ 1095 if (map) 1096 iommu_invalidate_tlb(host_domain); 1097 else 1098 iommu_invalidate_tlb(vm->iommu); 1099 } 1100 1101 #define vm_iommu_unmap(vm) vm_iommu_modify((vm), false) 1102 #define vm_iommu_map(vm) vm_iommu_modify((vm), true) 1103 1104 int 1105 vm_unassign_pptdev(struct vm *vm, int bus, int slot, int func) 1106 { 1107 int error; 1108 1109 error = ppt_unassign_device(vm, bus, slot, func); 1110 if (error) 1111 return (error); 1112 1113 if (ppt_assigned_devices(vm) == 0) 1114 vm_iommu_unmap(vm); 1115 1116 return (0); 1117 } 1118 1119 int 1120 vm_assign_pptdev(struct vm *vm, int bus, int slot, int func) 1121 { 1122 int error; 1123 vm_paddr_t maxaddr; 1124 1125 /* Set up the IOMMU to do the 'gpa' to 'hpa' translation */ 1126 if (ppt_assigned_devices(vm) == 0) { 1127 KASSERT(vm->iommu == NULL, 1128 ("vm_assign_pptdev: iommu must be NULL")); 1129 maxaddr = vmm_sysmem_maxaddr(vm); 1130 vm->iommu = iommu_create_domain(maxaddr); 1131 if (vm->iommu == NULL) 1132 return (ENXIO); 1133 vm_iommu_map(vm); 1134 } 1135 1136 error = ppt_assign_device(vm, bus, slot, func); 1137 return (error); 1138 } 1139 1140 static void * 1141 _vm_gpa_hold(struct vm *vm, vm_paddr_t gpa, size_t len, int reqprot, 1142 void **cookie) 1143 { 1144 int i, count, pageoff; 1145 struct mem_map *mm; 1146 vm_page_t m; 1147 1148 pageoff = gpa & PAGE_MASK; 1149 if (len > PAGE_SIZE - pageoff) 1150 panic("vm_gpa_hold: invalid gpa/len: 0x%016lx/%lu", gpa, len); 1151 1152 count = 0; 1153 for (i = 0; i < VM_MAX_MEMMAPS; i++) { 1154 mm = &vm->mem_maps[i]; 1155 if (gpa >= mm->gpa && gpa < mm->gpa + mm->len) { 1156 count = vm_fault_quick_hold_pages(&vm->vmspace->vm_map, 1157 trunc_page(gpa), PAGE_SIZE, reqprot, &m, 1); 1158 break; 1159 } 1160 } 1161 1162 if (count == 1) { 1163 *cookie = m; 1164 return ((void *)(PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)) + pageoff)); 1165 } else { 1166 *cookie = NULL; 1167 return (NULL); 1168 } 1169 } 1170 1171 void * 1172 vm_gpa_hold(struct vcpu *vcpu, vm_paddr_t gpa, size_t len, int reqprot, 1173 void **cookie) 1174 { 1175 #ifdef INVARIANTS 1176 /* 1177 * The current vcpu should be frozen to ensure 'vm_memmap[]' 1178 * stability. 1179 */ 1180 int state = vcpu_get_state(vcpu, NULL); 1181 KASSERT(state == VCPU_FROZEN, ("%s: invalid vcpu state %d", 1182 __func__, state)); 1183 #endif 1184 return (_vm_gpa_hold(vcpu->vm, gpa, len, reqprot, cookie)); 1185 } 1186 1187 void * 1188 vm_gpa_hold_global(struct vm *vm, vm_paddr_t gpa, size_t len, int reqprot, 1189 void **cookie) 1190 { 1191 sx_assert(&vm->mem_segs_lock, SX_LOCKED); 1192 return (_vm_gpa_hold(vm, gpa, len, reqprot, cookie)); 1193 } 1194 1195 void 1196 vm_gpa_release(void *cookie) 1197 { 1198 vm_page_t m = cookie; 1199 1200 vm_page_unwire(m, PQ_ACTIVE); 1201 } 1202 1203 int 1204 vm_get_register(struct vcpu *vcpu, int reg, uint64_t *retval) 1205 { 1206 1207 if (reg >= VM_REG_LAST) 1208 return (EINVAL); 1209 1210 return (vmmops_getreg(vcpu->cookie, reg, retval)); 1211 } 1212 1213 int 1214 vm_set_register(struct vcpu *vcpu, int reg, uint64_t val) 1215 { 1216 int error; 1217 1218 if (reg >= VM_REG_LAST) 1219 return (EINVAL); 1220 1221 error = vmmops_setreg(vcpu->cookie, reg, val); 1222 if (error || reg != VM_REG_GUEST_RIP) 1223 return (error); 1224 1225 /* Set 'nextrip' to match the value of %rip */ 1226 VMM_CTR1(vcpu, "Setting nextrip to %#lx", val); 1227 vcpu->nextrip = val; 1228 return (0); 1229 } 1230 1231 static bool 1232 is_descriptor_table(int reg) 1233 { 1234 1235 switch (reg) { 1236 case VM_REG_GUEST_IDTR: 1237 case VM_REG_GUEST_GDTR: 1238 return (true); 1239 default: 1240 return (false); 1241 } 1242 } 1243 1244 static bool 1245 is_segment_register(int reg) 1246 { 1247 1248 switch (reg) { 1249 case VM_REG_GUEST_ES: 1250 case VM_REG_GUEST_CS: 1251 case VM_REG_GUEST_SS: 1252 case VM_REG_GUEST_DS: 1253 case VM_REG_GUEST_FS: 1254 case VM_REG_GUEST_GS: 1255 case VM_REG_GUEST_TR: 1256 case VM_REG_GUEST_LDTR: 1257 return (true); 1258 default: 1259 return (false); 1260 } 1261 } 1262 1263 int 1264 vm_get_seg_desc(struct vcpu *vcpu, int reg, struct seg_desc *desc) 1265 { 1266 1267 if (!is_segment_register(reg) && !is_descriptor_table(reg)) 1268 return (EINVAL); 1269 1270 return (vmmops_getdesc(vcpu->cookie, reg, desc)); 1271 } 1272 1273 int 1274 vm_set_seg_desc(struct vcpu *vcpu, int reg, struct seg_desc *desc) 1275 { 1276 1277 if (!is_segment_register(reg) && !is_descriptor_table(reg)) 1278 return (EINVAL); 1279 1280 return (vmmops_setdesc(vcpu->cookie, reg, desc)); 1281 } 1282 1283 static void 1284 restore_guest_fpustate(struct vcpu *vcpu) 1285 { 1286 1287 /* flush host state to the pcb */ 1288 fpuexit(curthread); 1289 1290 /* restore guest FPU state */ 1291 fpu_stop_emulating(); 1292 fpurestore(vcpu->guestfpu); 1293 1294 /* restore guest XCR0 if XSAVE is enabled in the host */ 1295 if (rcr4() & CR4_XSAVE) 1296 load_xcr(0, vcpu->guest_xcr0); 1297 1298 /* 1299 * The FPU is now "dirty" with the guest's state so turn on emulation 1300 * to trap any access to the FPU by the host. 1301 */ 1302 fpu_start_emulating(); 1303 } 1304 1305 static void 1306 save_guest_fpustate(struct vcpu *vcpu) 1307 { 1308 1309 if ((rcr0() & CR0_TS) == 0) 1310 panic("fpu emulation not enabled in host!"); 1311 1312 /* save guest XCR0 and restore host XCR0 */ 1313 if (rcr4() & CR4_XSAVE) { 1314 vcpu->guest_xcr0 = rxcr(0); 1315 load_xcr(0, vmm_get_host_xcr0()); 1316 } 1317 1318 /* save guest FPU state */ 1319 fpu_stop_emulating(); 1320 fpusave(vcpu->guestfpu); 1321 fpu_start_emulating(); 1322 } 1323 1324 static VMM_STAT(VCPU_IDLE_TICKS, "number of ticks vcpu was idle"); 1325 1326 static int 1327 vcpu_set_state_locked(struct vcpu *vcpu, enum vcpu_state newstate, 1328 bool from_idle) 1329 { 1330 int error; 1331 1332 vcpu_assert_locked(vcpu); 1333 1334 /* 1335 * State transitions from the vmmdev_ioctl() must always begin from 1336 * the VCPU_IDLE state. This guarantees that there is only a single 1337 * ioctl() operating on a vcpu at any point. 1338 */ 1339 if (from_idle) { 1340 while (vcpu->state != VCPU_IDLE) { 1341 vcpu->reqidle = 1; 1342 vcpu_notify_event_locked(vcpu, false); 1343 VMM_CTR1(vcpu, "vcpu state change from %s to " 1344 "idle requested", vcpu_state2str(vcpu->state)); 1345 msleep_spin(&vcpu->state, &vcpu->mtx, "vmstat", hz); 1346 } 1347 } else { 1348 KASSERT(vcpu->state != VCPU_IDLE, ("invalid transition from " 1349 "vcpu idle state")); 1350 } 1351 1352 if (vcpu->state == VCPU_RUNNING) { 1353 KASSERT(vcpu->hostcpu == curcpu, ("curcpu %d and hostcpu %d " 1354 "mismatch for running vcpu", curcpu, vcpu->hostcpu)); 1355 } else { 1356 KASSERT(vcpu->hostcpu == NOCPU, ("Invalid hostcpu %d for a " 1357 "vcpu that is not running", vcpu->hostcpu)); 1358 } 1359 1360 /* 1361 * The following state transitions are allowed: 1362 * IDLE -> FROZEN -> IDLE 1363 * FROZEN -> RUNNING -> FROZEN 1364 * FROZEN -> SLEEPING -> FROZEN 1365 */ 1366 switch (vcpu->state) { 1367 case VCPU_IDLE: 1368 case VCPU_RUNNING: 1369 case VCPU_SLEEPING: 1370 error = (newstate != VCPU_FROZEN); 1371 break; 1372 case VCPU_FROZEN: 1373 error = (newstate == VCPU_FROZEN); 1374 break; 1375 default: 1376 error = 1; 1377 break; 1378 } 1379 1380 if (error) 1381 return (EBUSY); 1382 1383 VMM_CTR2(vcpu, "vcpu state changed from %s to %s", 1384 vcpu_state2str(vcpu->state), vcpu_state2str(newstate)); 1385 1386 vcpu->state = newstate; 1387 if (newstate == VCPU_RUNNING) 1388 vcpu->hostcpu = curcpu; 1389 else 1390 vcpu->hostcpu = NOCPU; 1391 1392 if (newstate == VCPU_IDLE) 1393 wakeup(&vcpu->state); 1394 1395 return (0); 1396 } 1397 1398 static void 1399 vcpu_require_state(struct vcpu *vcpu, enum vcpu_state newstate) 1400 { 1401 int error; 1402 1403 if ((error = vcpu_set_state(vcpu, newstate, false)) != 0) 1404 panic("Error %d setting state to %d\n", error, newstate); 1405 } 1406 1407 static void 1408 vcpu_require_state_locked(struct vcpu *vcpu, enum vcpu_state newstate) 1409 { 1410 int error; 1411 1412 if ((error = vcpu_set_state_locked(vcpu, newstate, false)) != 0) 1413 panic("Error %d setting state to %d", error, newstate); 1414 } 1415 1416 static int 1417 vm_handle_rendezvous(struct vcpu *vcpu) 1418 { 1419 struct vm *vm = vcpu->vm; 1420 struct thread *td; 1421 int error, vcpuid; 1422 1423 error = 0; 1424 vcpuid = vcpu->vcpuid; 1425 td = curthread; 1426 mtx_lock(&vm->rendezvous_mtx); 1427 while (vm->rendezvous_func != NULL) { 1428 /* 'rendezvous_req_cpus' must be a subset of 'active_cpus' */ 1429 CPU_AND(&vm->rendezvous_req_cpus, &vm->rendezvous_req_cpus, &vm->active_cpus); 1430 1431 if (CPU_ISSET(vcpuid, &vm->rendezvous_req_cpus) && 1432 !CPU_ISSET(vcpuid, &vm->rendezvous_done_cpus)) { 1433 VMM_CTR0(vcpu, "Calling rendezvous func"); 1434 (*vm->rendezvous_func)(vcpu, vm->rendezvous_arg); 1435 CPU_SET(vcpuid, &vm->rendezvous_done_cpus); 1436 } 1437 if (CPU_CMP(&vm->rendezvous_req_cpus, 1438 &vm->rendezvous_done_cpus) == 0) { 1439 VMM_CTR0(vcpu, "Rendezvous completed"); 1440 CPU_ZERO(&vm->rendezvous_req_cpus); 1441 vm->rendezvous_func = NULL; 1442 wakeup(&vm->rendezvous_func); 1443 break; 1444 } 1445 VMM_CTR0(vcpu, "Wait for rendezvous completion"); 1446 mtx_sleep(&vm->rendezvous_func, &vm->rendezvous_mtx, 0, 1447 "vmrndv", hz); 1448 if (td_ast_pending(td, TDA_SUSPEND)) { 1449 mtx_unlock(&vm->rendezvous_mtx); 1450 error = thread_check_susp(td, true); 1451 if (error != 0) 1452 return (error); 1453 mtx_lock(&vm->rendezvous_mtx); 1454 } 1455 } 1456 mtx_unlock(&vm->rendezvous_mtx); 1457 return (0); 1458 } 1459 1460 /* 1461 * Emulate a guest 'hlt' by sleeping until the vcpu is ready to run. 1462 */ 1463 static int 1464 vm_handle_hlt(struct vcpu *vcpu, bool intr_disabled, bool *retu) 1465 { 1466 struct vm *vm = vcpu->vm; 1467 const char *wmesg; 1468 struct thread *td; 1469 int error, t, vcpuid, vcpu_halted, vm_halted; 1470 1471 vcpuid = vcpu->vcpuid; 1472 vcpu_halted = 0; 1473 vm_halted = 0; 1474 error = 0; 1475 td = curthread; 1476 1477 KASSERT(!CPU_ISSET(vcpuid, &vm->halted_cpus), ("vcpu already halted")); 1478 1479 vcpu_lock(vcpu); 1480 while (1) { 1481 /* 1482 * Do a final check for pending NMI or interrupts before 1483 * really putting this thread to sleep. Also check for 1484 * software events that would cause this vcpu to wakeup. 1485 * 1486 * These interrupts/events could have happened after the 1487 * vcpu returned from vmmops_run() and before it acquired the 1488 * vcpu lock above. 1489 */ 1490 if (vm->rendezvous_func != NULL || vm->suspend || vcpu->reqidle) 1491 break; 1492 if (vm_nmi_pending(vcpu)) 1493 break; 1494 if (!intr_disabled) { 1495 if (vm_extint_pending(vcpu) || 1496 vlapic_pending_intr(vcpu->vlapic, NULL)) { 1497 break; 1498 } 1499 } 1500 1501 /* Don't go to sleep if the vcpu thread needs to yield */ 1502 if (vcpu_should_yield(vcpu)) 1503 break; 1504 1505 if (vcpu_debugged(vcpu)) 1506 break; 1507 1508 /* 1509 * Some Linux guests implement "halt" by having all vcpus 1510 * execute HLT with interrupts disabled. 'halted_cpus' keeps 1511 * track of the vcpus that have entered this state. When all 1512 * vcpus enter the halted state the virtual machine is halted. 1513 */ 1514 if (intr_disabled) { 1515 wmesg = "vmhalt"; 1516 VMM_CTR0(vcpu, "Halted"); 1517 if (!vcpu_halted && halt_detection_enabled) { 1518 vcpu_halted = 1; 1519 CPU_SET_ATOMIC(vcpuid, &vm->halted_cpus); 1520 } 1521 if (CPU_CMP(&vm->halted_cpus, &vm->active_cpus) == 0) { 1522 vm_halted = 1; 1523 break; 1524 } 1525 } else { 1526 wmesg = "vmidle"; 1527 } 1528 1529 t = ticks; 1530 vcpu_require_state_locked(vcpu, VCPU_SLEEPING); 1531 /* 1532 * XXX msleep_spin() cannot be interrupted by signals so 1533 * wake up periodically to check pending signals. 1534 */ 1535 msleep_spin(vcpu, &vcpu->mtx, wmesg, hz); 1536 vcpu_require_state_locked(vcpu, VCPU_FROZEN); 1537 vmm_stat_incr(vcpu, VCPU_IDLE_TICKS, ticks - t); 1538 if (td_ast_pending(td, TDA_SUSPEND)) { 1539 vcpu_unlock(vcpu); 1540 error = thread_check_susp(td, false); 1541 if (error != 0) { 1542 if (vcpu_halted) { 1543 CPU_CLR_ATOMIC(vcpuid, 1544 &vm->halted_cpus); 1545 } 1546 return (error); 1547 } 1548 vcpu_lock(vcpu); 1549 } 1550 } 1551 1552 if (vcpu_halted) 1553 CPU_CLR_ATOMIC(vcpuid, &vm->halted_cpus); 1554 1555 vcpu_unlock(vcpu); 1556 1557 if (vm_halted) 1558 vm_suspend(vm, VM_SUSPEND_HALT); 1559 1560 return (0); 1561 } 1562 1563 static int 1564 vm_handle_paging(struct vcpu *vcpu, bool *retu) 1565 { 1566 struct vm *vm = vcpu->vm; 1567 int rv, ftype; 1568 struct vm_map *map; 1569 struct vm_exit *vme; 1570 1571 vme = &vcpu->exitinfo; 1572 1573 KASSERT(vme->inst_length == 0, ("%s: invalid inst_length %d", 1574 __func__, vme->inst_length)); 1575 1576 ftype = vme->u.paging.fault_type; 1577 KASSERT(ftype == VM_PROT_READ || 1578 ftype == VM_PROT_WRITE || ftype == VM_PROT_EXECUTE, 1579 ("vm_handle_paging: invalid fault_type %d", ftype)); 1580 1581 if (ftype == VM_PROT_READ || ftype == VM_PROT_WRITE) { 1582 rv = pmap_emulate_accessed_dirty(vmspace_pmap(vm->vmspace), 1583 vme->u.paging.gpa, ftype); 1584 if (rv == 0) { 1585 VMM_CTR2(vcpu, "%s bit emulation for gpa %#lx", 1586 ftype == VM_PROT_READ ? "accessed" : "dirty", 1587 vme->u.paging.gpa); 1588 goto done; 1589 } 1590 } 1591 1592 map = &vm->vmspace->vm_map; 1593 rv = vm_fault(map, vme->u.paging.gpa, ftype, VM_FAULT_NORMAL, NULL); 1594 1595 VMM_CTR3(vcpu, "vm_handle_paging rv = %d, gpa = %#lx, " 1596 "ftype = %d", rv, vme->u.paging.gpa, ftype); 1597 1598 if (rv != KERN_SUCCESS) 1599 return (EFAULT); 1600 done: 1601 return (0); 1602 } 1603 1604 static int 1605 vm_handle_inst_emul(struct vcpu *vcpu, bool *retu) 1606 { 1607 struct vie *vie; 1608 struct vm_exit *vme; 1609 uint64_t gla, gpa, cs_base; 1610 struct vm_guest_paging *paging; 1611 mem_region_read_t mread; 1612 mem_region_write_t mwrite; 1613 enum vm_cpu_mode cpu_mode; 1614 int cs_d, error, fault; 1615 1616 vme = &vcpu->exitinfo; 1617 1618 KASSERT(vme->inst_length == 0, ("%s: invalid inst_length %d", 1619 __func__, vme->inst_length)); 1620 1621 gla = vme->u.inst_emul.gla; 1622 gpa = vme->u.inst_emul.gpa; 1623 cs_base = vme->u.inst_emul.cs_base; 1624 cs_d = vme->u.inst_emul.cs_d; 1625 vie = &vme->u.inst_emul.vie; 1626 paging = &vme->u.inst_emul.paging; 1627 cpu_mode = paging->cpu_mode; 1628 1629 VMM_CTR1(vcpu, "inst_emul fault accessing gpa %#lx", gpa); 1630 1631 /* Fetch, decode and emulate the faulting instruction */ 1632 if (vie->num_valid == 0) { 1633 error = vmm_fetch_instruction(vcpu, paging, vme->rip + cs_base, 1634 VIE_INST_SIZE, vie, &fault); 1635 } else { 1636 /* 1637 * The instruction bytes have already been copied into 'vie' 1638 */ 1639 error = fault = 0; 1640 } 1641 if (error || fault) 1642 return (error); 1643 1644 if (vmm_decode_instruction(vcpu, gla, cpu_mode, cs_d, vie) != 0) { 1645 VMM_CTR1(vcpu, "Error decoding instruction at %#lx", 1646 vme->rip + cs_base); 1647 *retu = true; /* dump instruction bytes in userspace */ 1648 return (0); 1649 } 1650 1651 /* 1652 * Update 'nextrip' based on the length of the emulated instruction. 1653 */ 1654 vme->inst_length = vie->num_processed; 1655 vcpu->nextrip += vie->num_processed; 1656 VMM_CTR1(vcpu, "nextrip updated to %#lx after instruction decoding", 1657 vcpu->nextrip); 1658 1659 /* return to userland unless this is an in-kernel emulated device */ 1660 if (gpa >= DEFAULT_APIC_BASE && gpa < DEFAULT_APIC_BASE + PAGE_SIZE) { 1661 mread = lapic_mmio_read; 1662 mwrite = lapic_mmio_write; 1663 } else if (gpa >= VIOAPIC_BASE && gpa < VIOAPIC_BASE + VIOAPIC_SIZE) { 1664 mread = vioapic_mmio_read; 1665 mwrite = vioapic_mmio_write; 1666 } else if (gpa >= VHPET_BASE && gpa < VHPET_BASE + VHPET_SIZE) { 1667 mread = vhpet_mmio_read; 1668 mwrite = vhpet_mmio_write; 1669 } else { 1670 *retu = true; 1671 return (0); 1672 } 1673 1674 error = vmm_emulate_instruction(vcpu, gpa, vie, paging, mread, mwrite, 1675 retu); 1676 1677 return (error); 1678 } 1679 1680 static int 1681 vm_handle_suspend(struct vcpu *vcpu, bool *retu) 1682 { 1683 struct vm *vm = vcpu->vm; 1684 int error, i; 1685 struct thread *td; 1686 1687 error = 0; 1688 td = curthread; 1689 1690 CPU_SET_ATOMIC(vcpu->vcpuid, &vm->suspended_cpus); 1691 1692 /* 1693 * Wait until all 'active_cpus' have suspended themselves. 1694 * 1695 * Since a VM may be suspended at any time including when one or 1696 * more vcpus are doing a rendezvous we need to call the rendezvous 1697 * handler while we are waiting to prevent a deadlock. 1698 */ 1699 vcpu_lock(vcpu); 1700 while (error == 0) { 1701 if (CPU_CMP(&vm->suspended_cpus, &vm->active_cpus) == 0) { 1702 VMM_CTR0(vcpu, "All vcpus suspended"); 1703 break; 1704 } 1705 1706 if (vm->rendezvous_func == NULL) { 1707 VMM_CTR0(vcpu, "Sleeping during suspend"); 1708 vcpu_require_state_locked(vcpu, VCPU_SLEEPING); 1709 msleep_spin(vcpu, &vcpu->mtx, "vmsusp", hz); 1710 vcpu_require_state_locked(vcpu, VCPU_FROZEN); 1711 if (td_ast_pending(td, TDA_SUSPEND)) { 1712 vcpu_unlock(vcpu); 1713 error = thread_check_susp(td, false); 1714 vcpu_lock(vcpu); 1715 } 1716 } else { 1717 VMM_CTR0(vcpu, "Rendezvous during suspend"); 1718 vcpu_unlock(vcpu); 1719 error = vm_handle_rendezvous(vcpu); 1720 vcpu_lock(vcpu); 1721 } 1722 } 1723 vcpu_unlock(vcpu); 1724 1725 /* 1726 * Wakeup the other sleeping vcpus and return to userspace. 1727 */ 1728 for (i = 0; i < vm->maxcpus; i++) { 1729 if (CPU_ISSET(i, &vm->suspended_cpus)) { 1730 vcpu_notify_event(vm_vcpu(vm, i), false); 1731 } 1732 } 1733 1734 *retu = true; 1735 return (error); 1736 } 1737 1738 static int 1739 vm_handle_reqidle(struct vcpu *vcpu, bool *retu) 1740 { 1741 vcpu_lock(vcpu); 1742 KASSERT(vcpu->reqidle, ("invalid vcpu reqidle %d", vcpu->reqidle)); 1743 vcpu->reqidle = 0; 1744 vcpu_unlock(vcpu); 1745 *retu = true; 1746 return (0); 1747 } 1748 1749 int 1750 vm_suspend(struct vm *vm, enum vm_suspend_how how) 1751 { 1752 int i; 1753 1754 if (how <= VM_SUSPEND_NONE || how >= VM_SUSPEND_LAST) 1755 return (EINVAL); 1756 1757 if (atomic_cmpset_int(&vm->suspend, 0, how) == 0) { 1758 VM_CTR2(vm, "virtual machine already suspended %d/%d", 1759 vm->suspend, how); 1760 return (EALREADY); 1761 } 1762 1763 VM_CTR1(vm, "virtual machine successfully suspended %d", how); 1764 1765 /* 1766 * Notify all active vcpus that they are now suspended. 1767 */ 1768 for (i = 0; i < vm->maxcpus; i++) { 1769 if (CPU_ISSET(i, &vm->active_cpus)) 1770 vcpu_notify_event(vm_vcpu(vm, i), false); 1771 } 1772 1773 return (0); 1774 } 1775 1776 void 1777 vm_exit_suspended(struct vcpu *vcpu, uint64_t rip) 1778 { 1779 struct vm *vm = vcpu->vm; 1780 struct vm_exit *vmexit; 1781 1782 KASSERT(vm->suspend > VM_SUSPEND_NONE && vm->suspend < VM_SUSPEND_LAST, 1783 ("vm_exit_suspended: invalid suspend type %d", vm->suspend)); 1784 1785 vmexit = vm_exitinfo(vcpu); 1786 vmexit->rip = rip; 1787 vmexit->inst_length = 0; 1788 vmexit->exitcode = VM_EXITCODE_SUSPENDED; 1789 vmexit->u.suspended.how = vm->suspend; 1790 } 1791 1792 void 1793 vm_exit_debug(struct vcpu *vcpu, uint64_t rip) 1794 { 1795 struct vm_exit *vmexit; 1796 1797 vmexit = vm_exitinfo(vcpu); 1798 vmexit->rip = rip; 1799 vmexit->inst_length = 0; 1800 vmexit->exitcode = VM_EXITCODE_DEBUG; 1801 } 1802 1803 void 1804 vm_exit_rendezvous(struct vcpu *vcpu, uint64_t rip) 1805 { 1806 struct vm_exit *vmexit; 1807 1808 vmexit = vm_exitinfo(vcpu); 1809 vmexit->rip = rip; 1810 vmexit->inst_length = 0; 1811 vmexit->exitcode = VM_EXITCODE_RENDEZVOUS; 1812 vmm_stat_incr(vcpu, VMEXIT_RENDEZVOUS, 1); 1813 } 1814 1815 void 1816 vm_exit_reqidle(struct vcpu *vcpu, uint64_t rip) 1817 { 1818 struct vm_exit *vmexit; 1819 1820 vmexit = vm_exitinfo(vcpu); 1821 vmexit->rip = rip; 1822 vmexit->inst_length = 0; 1823 vmexit->exitcode = VM_EXITCODE_REQIDLE; 1824 vmm_stat_incr(vcpu, VMEXIT_REQIDLE, 1); 1825 } 1826 1827 void 1828 vm_exit_astpending(struct vcpu *vcpu, uint64_t rip) 1829 { 1830 struct vm_exit *vmexit; 1831 1832 vmexit = vm_exitinfo(vcpu); 1833 vmexit->rip = rip; 1834 vmexit->inst_length = 0; 1835 vmexit->exitcode = VM_EXITCODE_BOGUS; 1836 vmm_stat_incr(vcpu, VMEXIT_ASTPENDING, 1); 1837 } 1838 1839 int 1840 vm_run(struct vcpu *vcpu, struct vm_exit *vme_user) 1841 { 1842 struct vm *vm = vcpu->vm; 1843 struct vm_eventinfo evinfo; 1844 int error, vcpuid; 1845 struct pcb *pcb; 1846 uint64_t tscval; 1847 struct vm_exit *vme; 1848 bool retu, intr_disabled; 1849 pmap_t pmap; 1850 1851 vcpuid = vcpu->vcpuid; 1852 1853 if (!CPU_ISSET(vcpuid, &vm->active_cpus)) 1854 return (EINVAL); 1855 1856 if (CPU_ISSET(vcpuid, &vm->suspended_cpus)) 1857 return (EINVAL); 1858 1859 pmap = vmspace_pmap(vm->vmspace); 1860 vme = &vcpu->exitinfo; 1861 evinfo.rptr = &vm->rendezvous_req_cpus; 1862 evinfo.sptr = &vm->suspend; 1863 evinfo.iptr = &vcpu->reqidle; 1864 restart: 1865 critical_enter(); 1866 1867 KASSERT(!CPU_ISSET(curcpu, &pmap->pm_active), 1868 ("vm_run: absurd pm_active")); 1869 1870 tscval = rdtsc(); 1871 1872 pcb = PCPU_GET(curpcb); 1873 set_pcb_flags(pcb, PCB_FULL_IRET); 1874 1875 restore_guest_fpustate(vcpu); 1876 1877 vcpu_require_state(vcpu, VCPU_RUNNING); 1878 error = vmmops_run(vcpu->cookie, vcpu->nextrip, pmap, &evinfo); 1879 vcpu_require_state(vcpu, VCPU_FROZEN); 1880 1881 save_guest_fpustate(vcpu); 1882 1883 vmm_stat_incr(vcpu, VCPU_TOTAL_RUNTIME, rdtsc() - tscval); 1884 1885 critical_exit(); 1886 1887 if (error == 0) { 1888 retu = false; 1889 vcpu->nextrip = vme->rip + vme->inst_length; 1890 switch (vme->exitcode) { 1891 case VM_EXITCODE_REQIDLE: 1892 error = vm_handle_reqidle(vcpu, &retu); 1893 break; 1894 case VM_EXITCODE_SUSPENDED: 1895 error = vm_handle_suspend(vcpu, &retu); 1896 break; 1897 case VM_EXITCODE_IOAPIC_EOI: 1898 vioapic_process_eoi(vm, vme->u.ioapic_eoi.vector); 1899 break; 1900 case VM_EXITCODE_RENDEZVOUS: 1901 error = vm_handle_rendezvous(vcpu); 1902 break; 1903 case VM_EXITCODE_HLT: 1904 intr_disabled = ((vme->u.hlt.rflags & PSL_I) == 0); 1905 error = vm_handle_hlt(vcpu, intr_disabled, &retu); 1906 break; 1907 case VM_EXITCODE_PAGING: 1908 error = vm_handle_paging(vcpu, &retu); 1909 break; 1910 case VM_EXITCODE_INST_EMUL: 1911 error = vm_handle_inst_emul(vcpu, &retu); 1912 break; 1913 case VM_EXITCODE_INOUT: 1914 case VM_EXITCODE_INOUT_STR: 1915 error = vm_handle_inout(vcpu, vme, &retu); 1916 break; 1917 case VM_EXITCODE_MONITOR: 1918 case VM_EXITCODE_MWAIT: 1919 case VM_EXITCODE_VMINSN: 1920 vm_inject_ud(vcpu); 1921 break; 1922 default: 1923 retu = true; /* handled in userland */ 1924 break; 1925 } 1926 } 1927 1928 /* 1929 * VM_EXITCODE_INST_EMUL could access the apic which could transform the 1930 * exit code into VM_EXITCODE_IPI. 1931 */ 1932 if (error == 0 && vme->exitcode == VM_EXITCODE_IPI) 1933 error = vm_handle_ipi(vcpu, vme, &retu); 1934 1935 if (error == 0 && retu == false) 1936 goto restart; 1937 1938 vmm_stat_incr(vcpu, VMEXIT_USERSPACE, 1); 1939 VMM_CTR2(vcpu, "retu %d/%d", error, vme->exitcode); 1940 1941 /* copy the exit information */ 1942 *vme_user = *vme; 1943 return (error); 1944 } 1945 1946 int 1947 vm_restart_instruction(struct vcpu *vcpu) 1948 { 1949 enum vcpu_state state; 1950 uint64_t rip; 1951 int error __diagused; 1952 1953 state = vcpu_get_state(vcpu, NULL); 1954 if (state == VCPU_RUNNING) { 1955 /* 1956 * When a vcpu is "running" the next instruction is determined 1957 * by adding 'rip' and 'inst_length' in the vcpu's 'exitinfo'. 1958 * Thus setting 'inst_length' to zero will cause the current 1959 * instruction to be restarted. 1960 */ 1961 vcpu->exitinfo.inst_length = 0; 1962 VMM_CTR1(vcpu, "restarting instruction at %#lx by " 1963 "setting inst_length to zero", vcpu->exitinfo.rip); 1964 } else if (state == VCPU_FROZEN) { 1965 /* 1966 * When a vcpu is "frozen" it is outside the critical section 1967 * around vmmops_run() and 'nextrip' points to the next 1968 * instruction. Thus instruction restart is achieved by setting 1969 * 'nextrip' to the vcpu's %rip. 1970 */ 1971 error = vm_get_register(vcpu, VM_REG_GUEST_RIP, &rip); 1972 KASSERT(!error, ("%s: error %d getting rip", __func__, error)); 1973 VMM_CTR2(vcpu, "restarting instruction by updating " 1974 "nextrip from %#lx to %#lx", vcpu->nextrip, rip); 1975 vcpu->nextrip = rip; 1976 } else { 1977 panic("%s: invalid state %d", __func__, state); 1978 } 1979 return (0); 1980 } 1981 1982 int 1983 vm_exit_intinfo(struct vcpu *vcpu, uint64_t info) 1984 { 1985 int type, vector; 1986 1987 if (info & VM_INTINFO_VALID) { 1988 type = info & VM_INTINFO_TYPE; 1989 vector = info & 0xff; 1990 if (type == VM_INTINFO_NMI && vector != IDT_NMI) 1991 return (EINVAL); 1992 if (type == VM_INTINFO_HWEXCEPTION && vector >= 32) 1993 return (EINVAL); 1994 if (info & VM_INTINFO_RSVD) 1995 return (EINVAL); 1996 } else { 1997 info = 0; 1998 } 1999 VMM_CTR2(vcpu, "%s: info1(%#lx)", __func__, info); 2000 vcpu->exitintinfo = info; 2001 return (0); 2002 } 2003 2004 enum exc_class { 2005 EXC_BENIGN, 2006 EXC_CONTRIBUTORY, 2007 EXC_PAGEFAULT 2008 }; 2009 2010 #define IDT_VE 20 /* Virtualization Exception (Intel specific) */ 2011 2012 static enum exc_class 2013 exception_class(uint64_t info) 2014 { 2015 int type, vector; 2016 2017 KASSERT(info & VM_INTINFO_VALID, ("intinfo must be valid: %#lx", info)); 2018 type = info & VM_INTINFO_TYPE; 2019 vector = info & 0xff; 2020 2021 /* Table 6-4, "Interrupt and Exception Classes", Intel SDM, Vol 3 */ 2022 switch (type) { 2023 case VM_INTINFO_HWINTR: 2024 case VM_INTINFO_SWINTR: 2025 case VM_INTINFO_NMI: 2026 return (EXC_BENIGN); 2027 default: 2028 /* 2029 * Hardware exception. 2030 * 2031 * SVM and VT-x use identical type values to represent NMI, 2032 * hardware interrupt and software interrupt. 2033 * 2034 * SVM uses type '3' for all exceptions. VT-x uses type '3' 2035 * for exceptions except #BP and #OF. #BP and #OF use a type 2036 * value of '5' or '6'. Therefore we don't check for explicit 2037 * values of 'type' to classify 'intinfo' into a hardware 2038 * exception. 2039 */ 2040 break; 2041 } 2042 2043 switch (vector) { 2044 case IDT_PF: 2045 case IDT_VE: 2046 return (EXC_PAGEFAULT); 2047 case IDT_DE: 2048 case IDT_TS: 2049 case IDT_NP: 2050 case IDT_SS: 2051 case IDT_GP: 2052 return (EXC_CONTRIBUTORY); 2053 default: 2054 return (EXC_BENIGN); 2055 } 2056 } 2057 2058 static int 2059 nested_fault(struct vcpu *vcpu, uint64_t info1, uint64_t info2, 2060 uint64_t *retinfo) 2061 { 2062 enum exc_class exc1, exc2; 2063 int type1, vector1; 2064 2065 KASSERT(info1 & VM_INTINFO_VALID, ("info1 %#lx is not valid", info1)); 2066 KASSERT(info2 & VM_INTINFO_VALID, ("info2 %#lx is not valid", info2)); 2067 2068 /* 2069 * If an exception occurs while attempting to call the double-fault 2070 * handler the processor enters shutdown mode (aka triple fault). 2071 */ 2072 type1 = info1 & VM_INTINFO_TYPE; 2073 vector1 = info1 & 0xff; 2074 if (type1 == VM_INTINFO_HWEXCEPTION && vector1 == IDT_DF) { 2075 VMM_CTR2(vcpu, "triple fault: info1(%#lx), info2(%#lx)", 2076 info1, info2); 2077 vm_suspend(vcpu->vm, VM_SUSPEND_TRIPLEFAULT); 2078 *retinfo = 0; 2079 return (0); 2080 } 2081 2082 /* 2083 * Table 6-5 "Conditions for Generating a Double Fault", Intel SDM, Vol3 2084 */ 2085 exc1 = exception_class(info1); 2086 exc2 = exception_class(info2); 2087 if ((exc1 == EXC_CONTRIBUTORY && exc2 == EXC_CONTRIBUTORY) || 2088 (exc1 == EXC_PAGEFAULT && exc2 != EXC_BENIGN)) { 2089 /* Convert nested fault into a double fault. */ 2090 *retinfo = IDT_DF; 2091 *retinfo |= VM_INTINFO_VALID | VM_INTINFO_HWEXCEPTION; 2092 *retinfo |= VM_INTINFO_DEL_ERRCODE; 2093 } else { 2094 /* Handle exceptions serially */ 2095 *retinfo = info2; 2096 } 2097 return (1); 2098 } 2099 2100 static uint64_t 2101 vcpu_exception_intinfo(struct vcpu *vcpu) 2102 { 2103 uint64_t info = 0; 2104 2105 if (vcpu->exception_pending) { 2106 info = vcpu->exc_vector & 0xff; 2107 info |= VM_INTINFO_VALID | VM_INTINFO_HWEXCEPTION; 2108 if (vcpu->exc_errcode_valid) { 2109 info |= VM_INTINFO_DEL_ERRCODE; 2110 info |= (uint64_t)vcpu->exc_errcode << 32; 2111 } 2112 } 2113 return (info); 2114 } 2115 2116 int 2117 vm_entry_intinfo(struct vcpu *vcpu, uint64_t *retinfo) 2118 { 2119 uint64_t info1, info2; 2120 int valid; 2121 2122 info1 = vcpu->exitintinfo; 2123 vcpu->exitintinfo = 0; 2124 2125 info2 = 0; 2126 if (vcpu->exception_pending) { 2127 info2 = vcpu_exception_intinfo(vcpu); 2128 vcpu->exception_pending = 0; 2129 VMM_CTR2(vcpu, "Exception %d delivered: %#lx", 2130 vcpu->exc_vector, info2); 2131 } 2132 2133 if ((info1 & VM_INTINFO_VALID) && (info2 & VM_INTINFO_VALID)) { 2134 valid = nested_fault(vcpu, info1, info2, retinfo); 2135 } else if (info1 & VM_INTINFO_VALID) { 2136 *retinfo = info1; 2137 valid = 1; 2138 } else if (info2 & VM_INTINFO_VALID) { 2139 *retinfo = info2; 2140 valid = 1; 2141 } else { 2142 valid = 0; 2143 } 2144 2145 if (valid) { 2146 VMM_CTR4(vcpu, "%s: info1(%#lx), info2(%#lx), " 2147 "retinfo(%#lx)", __func__, info1, info2, *retinfo); 2148 } 2149 2150 return (valid); 2151 } 2152 2153 int 2154 vm_get_intinfo(struct vcpu *vcpu, uint64_t *info1, uint64_t *info2) 2155 { 2156 *info1 = vcpu->exitintinfo; 2157 *info2 = vcpu_exception_intinfo(vcpu); 2158 return (0); 2159 } 2160 2161 int 2162 vm_inject_exception(struct vcpu *vcpu, int vector, int errcode_valid, 2163 uint32_t errcode, int restart_instruction) 2164 { 2165 uint64_t regval; 2166 int error __diagused; 2167 2168 if (vector < 0 || vector >= 32) 2169 return (EINVAL); 2170 2171 /* 2172 * A double fault exception should never be injected directly into 2173 * the guest. It is a derived exception that results from specific 2174 * combinations of nested faults. 2175 */ 2176 if (vector == IDT_DF) 2177 return (EINVAL); 2178 2179 if (vcpu->exception_pending) { 2180 VMM_CTR2(vcpu, "Unable to inject exception %d due to " 2181 "pending exception %d", vector, vcpu->exc_vector); 2182 return (EBUSY); 2183 } 2184 2185 if (errcode_valid) { 2186 /* 2187 * Exceptions don't deliver an error code in real mode. 2188 */ 2189 error = vm_get_register(vcpu, VM_REG_GUEST_CR0, ®val); 2190 KASSERT(!error, ("%s: error %d getting CR0", __func__, error)); 2191 if (!(regval & CR0_PE)) 2192 errcode_valid = 0; 2193 } 2194 2195 /* 2196 * From section 26.6.1 "Interruptibility State" in Intel SDM: 2197 * 2198 * Event blocking by "STI" or "MOV SS" is cleared after guest executes 2199 * one instruction or incurs an exception. 2200 */ 2201 error = vm_set_register(vcpu, VM_REG_GUEST_INTR_SHADOW, 0); 2202 KASSERT(error == 0, ("%s: error %d clearing interrupt shadow", 2203 __func__, error)); 2204 2205 if (restart_instruction) 2206 vm_restart_instruction(vcpu); 2207 2208 vcpu->exception_pending = 1; 2209 vcpu->exc_vector = vector; 2210 vcpu->exc_errcode = errcode; 2211 vcpu->exc_errcode_valid = errcode_valid; 2212 VMM_CTR1(vcpu, "Exception %d pending", vector); 2213 return (0); 2214 } 2215 2216 void 2217 vm_inject_fault(struct vcpu *vcpu, int vector, int errcode_valid, int errcode) 2218 { 2219 int error __diagused, restart_instruction; 2220 2221 restart_instruction = 1; 2222 2223 error = vm_inject_exception(vcpu, vector, errcode_valid, 2224 errcode, restart_instruction); 2225 KASSERT(error == 0, ("vm_inject_exception error %d", error)); 2226 } 2227 2228 void 2229 vm_inject_pf(struct vcpu *vcpu, int error_code, uint64_t cr2) 2230 { 2231 int error __diagused; 2232 2233 VMM_CTR2(vcpu, "Injecting page fault: error_code %#x, cr2 %#lx", 2234 error_code, cr2); 2235 2236 error = vm_set_register(vcpu, VM_REG_GUEST_CR2, cr2); 2237 KASSERT(error == 0, ("vm_set_register(cr2) error %d", error)); 2238 2239 vm_inject_fault(vcpu, IDT_PF, 1, error_code); 2240 } 2241 2242 static VMM_STAT(VCPU_NMI_COUNT, "number of NMIs delivered to vcpu"); 2243 2244 int 2245 vm_inject_nmi(struct vcpu *vcpu) 2246 { 2247 2248 vcpu->nmi_pending = 1; 2249 vcpu_notify_event(vcpu, false); 2250 return (0); 2251 } 2252 2253 int 2254 vm_nmi_pending(struct vcpu *vcpu) 2255 { 2256 return (vcpu->nmi_pending); 2257 } 2258 2259 void 2260 vm_nmi_clear(struct vcpu *vcpu) 2261 { 2262 if (vcpu->nmi_pending == 0) 2263 panic("vm_nmi_clear: inconsistent nmi_pending state"); 2264 2265 vcpu->nmi_pending = 0; 2266 vmm_stat_incr(vcpu, VCPU_NMI_COUNT, 1); 2267 } 2268 2269 static VMM_STAT(VCPU_EXTINT_COUNT, "number of ExtINTs delivered to vcpu"); 2270 2271 int 2272 vm_inject_extint(struct vcpu *vcpu) 2273 { 2274 2275 vcpu->extint_pending = 1; 2276 vcpu_notify_event(vcpu, false); 2277 return (0); 2278 } 2279 2280 int 2281 vm_extint_pending(struct vcpu *vcpu) 2282 { 2283 return (vcpu->extint_pending); 2284 } 2285 2286 void 2287 vm_extint_clear(struct vcpu *vcpu) 2288 { 2289 if (vcpu->extint_pending == 0) 2290 panic("vm_extint_clear: inconsistent extint_pending state"); 2291 2292 vcpu->extint_pending = 0; 2293 vmm_stat_incr(vcpu, VCPU_EXTINT_COUNT, 1); 2294 } 2295 2296 int 2297 vm_get_capability(struct vcpu *vcpu, int type, int *retval) 2298 { 2299 if (type < 0 || type >= VM_CAP_MAX) 2300 return (EINVAL); 2301 2302 return (vmmops_getcap(vcpu->cookie, type, retval)); 2303 } 2304 2305 int 2306 vm_set_capability(struct vcpu *vcpu, int type, int val) 2307 { 2308 if (type < 0 || type >= VM_CAP_MAX) 2309 return (EINVAL); 2310 2311 return (vmmops_setcap(vcpu->cookie, type, val)); 2312 } 2313 2314 struct vm * 2315 vcpu_vm(struct vcpu *vcpu) 2316 { 2317 return (vcpu->vm); 2318 } 2319 2320 int 2321 vcpu_vcpuid(struct vcpu *vcpu) 2322 { 2323 return (vcpu->vcpuid); 2324 } 2325 2326 struct vcpu * 2327 vm_vcpu(struct vm *vm, int vcpuid) 2328 { 2329 return (vm->vcpu[vcpuid]); 2330 } 2331 2332 struct vlapic * 2333 vm_lapic(struct vcpu *vcpu) 2334 { 2335 return (vcpu->vlapic); 2336 } 2337 2338 struct vioapic * 2339 vm_ioapic(struct vm *vm) 2340 { 2341 2342 return (vm->vioapic); 2343 } 2344 2345 struct vhpet * 2346 vm_hpet(struct vm *vm) 2347 { 2348 2349 return (vm->vhpet); 2350 } 2351 2352 bool 2353 vmm_is_pptdev(int bus, int slot, int func) 2354 { 2355 int b, f, i, n, s; 2356 char *val, *cp, *cp2; 2357 bool found; 2358 2359 /* 2360 * XXX 2361 * The length of an environment variable is limited to 128 bytes which 2362 * puts an upper limit on the number of passthru devices that may be 2363 * specified using a single environment variable. 2364 * 2365 * Work around this by scanning multiple environment variable 2366 * names instead of a single one - yuck! 2367 */ 2368 const char *names[] = { "pptdevs", "pptdevs2", "pptdevs3", NULL }; 2369 2370 /* set pptdevs="1/2/3 4/5/6 7/8/9 10/11/12" */ 2371 found = false; 2372 for (i = 0; names[i] != NULL && !found; i++) { 2373 cp = val = kern_getenv(names[i]); 2374 while (cp != NULL && *cp != '\0') { 2375 if ((cp2 = strchr(cp, ' ')) != NULL) 2376 *cp2 = '\0'; 2377 2378 n = sscanf(cp, "%d/%d/%d", &b, &s, &f); 2379 if (n == 3 && bus == b && slot == s && func == f) { 2380 found = true; 2381 break; 2382 } 2383 2384 if (cp2 != NULL) 2385 *cp2++ = ' '; 2386 2387 cp = cp2; 2388 } 2389 freeenv(val); 2390 } 2391 return (found); 2392 } 2393 2394 void * 2395 vm_iommu_domain(struct vm *vm) 2396 { 2397 2398 return (vm->iommu); 2399 } 2400 2401 int 2402 vcpu_set_state(struct vcpu *vcpu, enum vcpu_state newstate, bool from_idle) 2403 { 2404 int error; 2405 2406 vcpu_lock(vcpu); 2407 error = vcpu_set_state_locked(vcpu, newstate, from_idle); 2408 vcpu_unlock(vcpu); 2409 2410 return (error); 2411 } 2412 2413 enum vcpu_state 2414 vcpu_get_state(struct vcpu *vcpu, int *hostcpu) 2415 { 2416 enum vcpu_state state; 2417 2418 vcpu_lock(vcpu); 2419 state = vcpu->state; 2420 if (hostcpu != NULL) 2421 *hostcpu = vcpu->hostcpu; 2422 vcpu_unlock(vcpu); 2423 2424 return (state); 2425 } 2426 2427 int 2428 vm_activate_cpu(struct vcpu *vcpu) 2429 { 2430 struct vm *vm = vcpu->vm; 2431 2432 if (CPU_ISSET(vcpu->vcpuid, &vm->active_cpus)) 2433 return (EBUSY); 2434 2435 VMM_CTR0(vcpu, "activated"); 2436 CPU_SET_ATOMIC(vcpu->vcpuid, &vm->active_cpus); 2437 return (0); 2438 } 2439 2440 int 2441 vm_suspend_cpu(struct vm *vm, struct vcpu *vcpu) 2442 { 2443 if (vcpu == NULL) { 2444 vm->debug_cpus = vm->active_cpus; 2445 for (int i = 0; i < vm->maxcpus; i++) { 2446 if (CPU_ISSET(i, &vm->active_cpus)) 2447 vcpu_notify_event(vm_vcpu(vm, i), false); 2448 } 2449 } else { 2450 if (!CPU_ISSET(vcpu->vcpuid, &vm->active_cpus)) 2451 return (EINVAL); 2452 2453 CPU_SET_ATOMIC(vcpu->vcpuid, &vm->debug_cpus); 2454 vcpu_notify_event(vcpu, false); 2455 } 2456 return (0); 2457 } 2458 2459 int 2460 vm_resume_cpu(struct vm *vm, struct vcpu *vcpu) 2461 { 2462 2463 if (vcpu == NULL) { 2464 CPU_ZERO(&vm->debug_cpus); 2465 } else { 2466 if (!CPU_ISSET(vcpu->vcpuid, &vm->debug_cpus)) 2467 return (EINVAL); 2468 2469 CPU_CLR_ATOMIC(vcpu->vcpuid, &vm->debug_cpus); 2470 } 2471 return (0); 2472 } 2473 2474 int 2475 vcpu_debugged(struct vcpu *vcpu) 2476 { 2477 2478 return (CPU_ISSET(vcpu->vcpuid, &vcpu->vm->debug_cpus)); 2479 } 2480 2481 cpuset_t 2482 vm_active_cpus(struct vm *vm) 2483 { 2484 2485 return (vm->active_cpus); 2486 } 2487 2488 cpuset_t 2489 vm_debug_cpus(struct vm *vm) 2490 { 2491 2492 return (vm->debug_cpus); 2493 } 2494 2495 cpuset_t 2496 vm_suspended_cpus(struct vm *vm) 2497 { 2498 2499 return (vm->suspended_cpus); 2500 } 2501 2502 /* 2503 * Returns the subset of vCPUs in tostart that are awaiting startup. 2504 * These vCPUs are also marked as no longer awaiting startup. 2505 */ 2506 cpuset_t 2507 vm_start_cpus(struct vm *vm, const cpuset_t *tostart) 2508 { 2509 cpuset_t set; 2510 2511 mtx_lock(&vm->rendezvous_mtx); 2512 CPU_AND(&set, &vm->startup_cpus, tostart); 2513 CPU_ANDNOT(&vm->startup_cpus, &vm->startup_cpus, &set); 2514 mtx_unlock(&vm->rendezvous_mtx); 2515 return (set); 2516 } 2517 2518 void 2519 vm_await_start(struct vm *vm, const cpuset_t *waiting) 2520 { 2521 mtx_lock(&vm->rendezvous_mtx); 2522 CPU_OR(&vm->startup_cpus, &vm->startup_cpus, waiting); 2523 mtx_unlock(&vm->rendezvous_mtx); 2524 } 2525 2526 void * 2527 vcpu_stats(struct vcpu *vcpu) 2528 { 2529 2530 return (vcpu->stats); 2531 } 2532 2533 int 2534 vm_get_x2apic_state(struct vcpu *vcpu, enum x2apic_state *state) 2535 { 2536 *state = vcpu->x2apic_state; 2537 2538 return (0); 2539 } 2540 2541 int 2542 vm_set_x2apic_state(struct vcpu *vcpu, enum x2apic_state state) 2543 { 2544 if (state >= X2APIC_STATE_LAST) 2545 return (EINVAL); 2546 2547 vcpu->x2apic_state = state; 2548 2549 vlapic_set_x2apic_state(vcpu, state); 2550 2551 return (0); 2552 } 2553 2554 /* 2555 * This function is called to ensure that a vcpu "sees" a pending event 2556 * as soon as possible: 2557 * - If the vcpu thread is sleeping then it is woken up. 2558 * - If the vcpu is running on a different host_cpu then an IPI will be directed 2559 * to the host_cpu to cause the vcpu to trap into the hypervisor. 2560 */ 2561 static void 2562 vcpu_notify_event_locked(struct vcpu *vcpu, bool lapic_intr) 2563 { 2564 int hostcpu; 2565 2566 hostcpu = vcpu->hostcpu; 2567 if (vcpu->state == VCPU_RUNNING) { 2568 KASSERT(hostcpu != NOCPU, ("vcpu running on invalid hostcpu")); 2569 if (hostcpu != curcpu) { 2570 if (lapic_intr) { 2571 vlapic_post_intr(vcpu->vlapic, hostcpu, 2572 vmm_ipinum); 2573 } else { 2574 ipi_cpu(hostcpu, vmm_ipinum); 2575 } 2576 } else { 2577 /* 2578 * If the 'vcpu' is running on 'curcpu' then it must 2579 * be sending a notification to itself (e.g. SELF_IPI). 2580 * The pending event will be picked up when the vcpu 2581 * transitions back to guest context. 2582 */ 2583 } 2584 } else { 2585 KASSERT(hostcpu == NOCPU, ("vcpu state %d not consistent " 2586 "with hostcpu %d", vcpu->state, hostcpu)); 2587 if (vcpu->state == VCPU_SLEEPING) 2588 wakeup_one(vcpu); 2589 } 2590 } 2591 2592 void 2593 vcpu_notify_event(struct vcpu *vcpu, bool lapic_intr) 2594 { 2595 vcpu_lock(vcpu); 2596 vcpu_notify_event_locked(vcpu, lapic_intr); 2597 vcpu_unlock(vcpu); 2598 } 2599 2600 struct vmspace * 2601 vm_get_vmspace(struct vm *vm) 2602 { 2603 2604 return (vm->vmspace); 2605 } 2606 2607 int 2608 vm_apicid2vcpuid(struct vm *vm, int apicid) 2609 { 2610 /* 2611 * XXX apic id is assumed to be numerically identical to vcpu id 2612 */ 2613 return (apicid); 2614 } 2615 2616 int 2617 vm_smp_rendezvous(struct vcpu *vcpu, cpuset_t dest, 2618 vm_rendezvous_func_t func, void *arg) 2619 { 2620 struct vm *vm = vcpu->vm; 2621 int error, i; 2622 2623 /* 2624 * Enforce that this function is called without any locks 2625 */ 2626 WITNESS_WARN(WARN_PANIC, NULL, "vm_smp_rendezvous"); 2627 2628 restart: 2629 mtx_lock(&vm->rendezvous_mtx); 2630 if (vm->rendezvous_func != NULL) { 2631 /* 2632 * If a rendezvous is already in progress then we need to 2633 * call the rendezvous handler in case this 'vcpu' is one 2634 * of the targets of the rendezvous. 2635 */ 2636 VMM_CTR0(vcpu, "Rendezvous already in progress"); 2637 mtx_unlock(&vm->rendezvous_mtx); 2638 error = vm_handle_rendezvous(vcpu); 2639 if (error != 0) 2640 return (error); 2641 goto restart; 2642 } 2643 KASSERT(vm->rendezvous_func == NULL, ("vm_smp_rendezvous: previous " 2644 "rendezvous is still in progress")); 2645 2646 VMM_CTR0(vcpu, "Initiating rendezvous"); 2647 vm->rendezvous_req_cpus = dest; 2648 CPU_ZERO(&vm->rendezvous_done_cpus); 2649 vm->rendezvous_arg = arg; 2650 vm->rendezvous_func = func; 2651 mtx_unlock(&vm->rendezvous_mtx); 2652 2653 /* 2654 * Wake up any sleeping vcpus and trigger a VM-exit in any running 2655 * vcpus so they handle the rendezvous as soon as possible. 2656 */ 2657 for (i = 0; i < vm->maxcpus; i++) { 2658 if (CPU_ISSET(i, &dest)) 2659 vcpu_notify_event(vm_vcpu(vm, i), false); 2660 } 2661 2662 return (vm_handle_rendezvous(vcpu)); 2663 } 2664 2665 struct vatpic * 2666 vm_atpic(struct vm *vm) 2667 { 2668 return (vm->vatpic); 2669 } 2670 2671 struct vatpit * 2672 vm_atpit(struct vm *vm) 2673 { 2674 return (vm->vatpit); 2675 } 2676 2677 struct vpmtmr * 2678 vm_pmtmr(struct vm *vm) 2679 { 2680 2681 return (vm->vpmtmr); 2682 } 2683 2684 struct vrtc * 2685 vm_rtc(struct vm *vm) 2686 { 2687 2688 return (vm->vrtc); 2689 } 2690 2691 enum vm_reg_name 2692 vm_segment_name(int seg) 2693 { 2694 static enum vm_reg_name seg_names[] = { 2695 VM_REG_GUEST_ES, 2696 VM_REG_GUEST_CS, 2697 VM_REG_GUEST_SS, 2698 VM_REG_GUEST_DS, 2699 VM_REG_GUEST_FS, 2700 VM_REG_GUEST_GS 2701 }; 2702 2703 KASSERT(seg >= 0 && seg < nitems(seg_names), 2704 ("%s: invalid segment encoding %d", __func__, seg)); 2705 return (seg_names[seg]); 2706 } 2707 2708 void 2709 vm_copy_teardown(struct vm_copyinfo *copyinfo, int num_copyinfo) 2710 { 2711 int idx; 2712 2713 for (idx = 0; idx < num_copyinfo; idx++) { 2714 if (copyinfo[idx].cookie != NULL) 2715 vm_gpa_release(copyinfo[idx].cookie); 2716 } 2717 bzero(copyinfo, num_copyinfo * sizeof(struct vm_copyinfo)); 2718 } 2719 2720 int 2721 vm_copy_setup(struct vcpu *vcpu, struct vm_guest_paging *paging, 2722 uint64_t gla, size_t len, int prot, struct vm_copyinfo *copyinfo, 2723 int num_copyinfo, int *fault) 2724 { 2725 int error, idx, nused; 2726 size_t n, off, remaining; 2727 void *hva, *cookie; 2728 uint64_t gpa; 2729 2730 bzero(copyinfo, sizeof(struct vm_copyinfo) * num_copyinfo); 2731 2732 nused = 0; 2733 remaining = len; 2734 while (remaining > 0) { 2735 KASSERT(nused < num_copyinfo, ("insufficient vm_copyinfo")); 2736 error = vm_gla2gpa(vcpu, paging, gla, prot, &gpa, fault); 2737 if (error || *fault) 2738 return (error); 2739 off = gpa & PAGE_MASK; 2740 n = min(remaining, PAGE_SIZE - off); 2741 copyinfo[nused].gpa = gpa; 2742 copyinfo[nused].len = n; 2743 remaining -= n; 2744 gla += n; 2745 nused++; 2746 } 2747 2748 for (idx = 0; idx < nused; idx++) { 2749 hva = vm_gpa_hold(vcpu, copyinfo[idx].gpa, 2750 copyinfo[idx].len, prot, &cookie); 2751 if (hva == NULL) 2752 break; 2753 copyinfo[idx].hva = hva; 2754 copyinfo[idx].cookie = cookie; 2755 } 2756 2757 if (idx != nused) { 2758 vm_copy_teardown(copyinfo, num_copyinfo); 2759 return (EFAULT); 2760 } else { 2761 *fault = 0; 2762 return (0); 2763 } 2764 } 2765 2766 void 2767 vm_copyin(struct vm_copyinfo *copyinfo, void *kaddr, size_t len) 2768 { 2769 char *dst; 2770 int idx; 2771 2772 dst = kaddr; 2773 idx = 0; 2774 while (len > 0) { 2775 bcopy(copyinfo[idx].hva, dst, copyinfo[idx].len); 2776 len -= copyinfo[idx].len; 2777 dst += copyinfo[idx].len; 2778 idx++; 2779 } 2780 } 2781 2782 void 2783 vm_copyout(const void *kaddr, struct vm_copyinfo *copyinfo, size_t len) 2784 { 2785 const char *src; 2786 int idx; 2787 2788 src = kaddr; 2789 idx = 0; 2790 while (len > 0) { 2791 bcopy(src, copyinfo[idx].hva, copyinfo[idx].len); 2792 len -= copyinfo[idx].len; 2793 src += copyinfo[idx].len; 2794 idx++; 2795 } 2796 } 2797 2798 /* 2799 * Return the amount of in-use and wired memory for the VM. Since 2800 * these are global stats, only return the values with for vCPU 0 2801 */ 2802 VMM_STAT_DECLARE(VMM_MEM_RESIDENT); 2803 VMM_STAT_DECLARE(VMM_MEM_WIRED); 2804 2805 static void 2806 vm_get_rescnt(struct vcpu *vcpu, struct vmm_stat_type *stat) 2807 { 2808 2809 if (vcpu->vcpuid == 0) { 2810 vmm_stat_set(vcpu, VMM_MEM_RESIDENT, PAGE_SIZE * 2811 vmspace_resident_count(vcpu->vm->vmspace)); 2812 } 2813 } 2814 2815 static void 2816 vm_get_wiredcnt(struct vcpu *vcpu, struct vmm_stat_type *stat) 2817 { 2818 2819 if (vcpu->vcpuid == 0) { 2820 vmm_stat_set(vcpu, VMM_MEM_WIRED, PAGE_SIZE * 2821 pmap_wired_count(vmspace_pmap(vcpu->vm->vmspace))); 2822 } 2823 } 2824 2825 VMM_STAT_FUNC(VMM_MEM_RESIDENT, "Resident memory", vm_get_rescnt); 2826 VMM_STAT_FUNC(VMM_MEM_WIRED, "Wired memory", vm_get_wiredcnt); 2827 2828 #ifdef BHYVE_SNAPSHOT 2829 static int 2830 vm_snapshot_vcpus(struct vm *vm, struct vm_snapshot_meta *meta) 2831 { 2832 uint64_t tsc, now; 2833 int ret; 2834 struct vcpu *vcpu; 2835 uint16_t i, maxcpus; 2836 2837 now = rdtsc(); 2838 maxcpus = vm_get_maxcpus(vm); 2839 for (i = 0; i < maxcpus; i++) { 2840 vcpu = vm->vcpu[i]; 2841 if (vcpu == NULL) 2842 continue; 2843 2844 SNAPSHOT_VAR_OR_LEAVE(vcpu->x2apic_state, meta, ret, done); 2845 SNAPSHOT_VAR_OR_LEAVE(vcpu->exitintinfo, meta, ret, done); 2846 SNAPSHOT_VAR_OR_LEAVE(vcpu->exc_vector, meta, ret, done); 2847 SNAPSHOT_VAR_OR_LEAVE(vcpu->exc_errcode_valid, meta, ret, done); 2848 SNAPSHOT_VAR_OR_LEAVE(vcpu->exc_errcode, meta, ret, done); 2849 SNAPSHOT_VAR_OR_LEAVE(vcpu->guest_xcr0, meta, ret, done); 2850 SNAPSHOT_VAR_OR_LEAVE(vcpu->exitinfo, meta, ret, done); 2851 SNAPSHOT_VAR_OR_LEAVE(vcpu->nextrip, meta, ret, done); 2852 2853 /* 2854 * Save the absolute TSC value by adding now to tsc_offset. 2855 * 2856 * It will be turned turned back into an actual offset when the 2857 * TSC restore function is called 2858 */ 2859 tsc = now + vcpu->tsc_offset; 2860 SNAPSHOT_VAR_OR_LEAVE(tsc, meta, ret, done); 2861 if (meta->op == VM_SNAPSHOT_RESTORE) 2862 vcpu->tsc_offset = tsc; 2863 } 2864 2865 done: 2866 return (ret); 2867 } 2868 2869 static int 2870 vm_snapshot_vm(struct vm *vm, struct vm_snapshot_meta *meta) 2871 { 2872 int ret; 2873 2874 ret = vm_snapshot_vcpus(vm, meta); 2875 if (ret != 0) 2876 goto done; 2877 2878 SNAPSHOT_VAR_OR_LEAVE(vm->startup_cpus, meta, ret, done); 2879 done: 2880 return (ret); 2881 } 2882 2883 static int 2884 vm_snapshot_vcpu(struct vm *vm, struct vm_snapshot_meta *meta) 2885 { 2886 int error; 2887 struct vcpu *vcpu; 2888 uint16_t i, maxcpus; 2889 2890 error = 0; 2891 2892 maxcpus = vm_get_maxcpus(vm); 2893 for (i = 0; i < maxcpus; i++) { 2894 vcpu = vm->vcpu[i]; 2895 if (vcpu == NULL) 2896 continue; 2897 2898 error = vmmops_vcpu_snapshot(vcpu->cookie, meta); 2899 if (error != 0) { 2900 printf("%s: failed to snapshot vmcs/vmcb data for " 2901 "vCPU: %d; error: %d\n", __func__, i, error); 2902 goto done; 2903 } 2904 } 2905 2906 done: 2907 return (error); 2908 } 2909 2910 /* 2911 * Save kernel-side structures to user-space for snapshotting. 2912 */ 2913 int 2914 vm_snapshot_req(struct vm *vm, struct vm_snapshot_meta *meta) 2915 { 2916 int ret = 0; 2917 2918 switch (meta->dev_req) { 2919 case STRUCT_VMCX: 2920 ret = vm_snapshot_vcpu(vm, meta); 2921 break; 2922 case STRUCT_VM: 2923 ret = vm_snapshot_vm(vm, meta); 2924 break; 2925 case STRUCT_VIOAPIC: 2926 ret = vioapic_snapshot(vm_ioapic(vm), meta); 2927 break; 2928 case STRUCT_VLAPIC: 2929 ret = vlapic_snapshot(vm, meta); 2930 break; 2931 case STRUCT_VHPET: 2932 ret = vhpet_snapshot(vm_hpet(vm), meta); 2933 break; 2934 case STRUCT_VATPIC: 2935 ret = vatpic_snapshot(vm_atpic(vm), meta); 2936 break; 2937 case STRUCT_VATPIT: 2938 ret = vatpit_snapshot(vm_atpit(vm), meta); 2939 break; 2940 case STRUCT_VPMTMR: 2941 ret = vpmtmr_snapshot(vm_pmtmr(vm), meta); 2942 break; 2943 case STRUCT_VRTC: 2944 ret = vrtc_snapshot(vm_rtc(vm), meta); 2945 break; 2946 default: 2947 printf("%s: failed to find the requested type %#x\n", 2948 __func__, meta->dev_req); 2949 ret = (EINVAL); 2950 } 2951 return (ret); 2952 } 2953 2954 void 2955 vm_set_tsc_offset(struct vcpu *vcpu, uint64_t offset) 2956 { 2957 vcpu->tsc_offset = offset; 2958 } 2959 2960 int 2961 vm_restore_time(struct vm *vm) 2962 { 2963 int error; 2964 uint64_t now; 2965 struct vcpu *vcpu; 2966 uint16_t i, maxcpus; 2967 2968 now = rdtsc(); 2969 2970 error = vhpet_restore_time(vm_hpet(vm)); 2971 if (error) 2972 return (error); 2973 2974 maxcpus = vm_get_maxcpus(vm); 2975 for (i = 0; i < maxcpus; i++) { 2976 vcpu = vm->vcpu[i]; 2977 if (vcpu == NULL) 2978 continue; 2979 2980 error = vmmops_restore_tsc(vcpu->cookie, 2981 vcpu->tsc_offset - now); 2982 if (error) 2983 return (error); 2984 } 2985 2986 return (0); 2987 } 2988 #endif 2989