1 /*- 2 * Copyright (c) 2011 NetApp, Inc. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 * $FreeBSD$ 27 */ 28 29 #include <sys/cdefs.h> 30 __FBSDID("$FreeBSD$"); 31 32 #include <sys/param.h> 33 #include <sys/systm.h> 34 #include <sys/kernel.h> 35 #include <sys/module.h> 36 #include <sys/sysctl.h> 37 #include <sys/malloc.h> 38 #include <sys/pcpu.h> 39 #include <sys/lock.h> 40 #include <sys/mutex.h> 41 #include <sys/proc.h> 42 #include <sys/rwlock.h> 43 #include <sys/sched.h> 44 #include <sys/smp.h> 45 #include <sys/systm.h> 46 47 #include <vm/vm.h> 48 #include <vm/vm_object.h> 49 #include <vm/vm_page.h> 50 #include <vm/pmap.h> 51 #include <vm/vm_map.h> 52 #include <vm/vm_extern.h> 53 #include <vm/vm_param.h> 54 55 #include <machine/cpu.h> 56 #include <machine/vm.h> 57 #include <machine/pcb.h> 58 #include <machine/smp.h> 59 #include <x86/psl.h> 60 #include <x86/apicreg.h> 61 #include <machine/vmparam.h> 62 63 #include <machine/vmm.h> 64 #include <machine/vmm_dev.h> 65 #include <machine/vmm_instruction_emul.h> 66 67 #include "vmm_ioport.h" 68 #include "vmm_ktr.h" 69 #include "vmm_host.h" 70 #include "vmm_mem.h" 71 #include "vmm_util.h" 72 #include "vatpic.h" 73 #include "vatpit.h" 74 #include "vhpet.h" 75 #include "vioapic.h" 76 #include "vlapic.h" 77 #include "vpmtmr.h" 78 #include "vrtc.h" 79 #include "vmm_ipi.h" 80 #include "vmm_stat.h" 81 #include "vmm_lapic.h" 82 83 #include "io/ppt.h" 84 #include "io/iommu.h" 85 86 struct vlapic; 87 88 /* 89 * Initialization: 90 * (a) allocated when vcpu is created 91 * (i) initialized when vcpu is created and when it is reinitialized 92 * (o) initialized the first time the vcpu is created 93 * (x) initialized before use 94 */ 95 struct vcpu { 96 struct mtx mtx; /* (o) protects 'state' and 'hostcpu' */ 97 enum vcpu_state state; /* (o) vcpu state */ 98 int hostcpu; /* (o) vcpu's host cpu */ 99 struct vlapic *vlapic; /* (i) APIC device model */ 100 enum x2apic_state x2apic_state; /* (i) APIC mode */ 101 uint64_t exitintinfo; /* (i) events pending at VM exit */ 102 int nmi_pending; /* (i) NMI pending */ 103 int extint_pending; /* (i) INTR pending */ 104 struct vm_exception exception; /* (x) exception collateral */ 105 int exception_pending; /* (i) exception pending */ 106 struct savefpu *guestfpu; /* (a,i) guest fpu state */ 107 uint64_t guest_xcr0; /* (i) guest %xcr0 register */ 108 void *stats; /* (a,i) statistics */ 109 struct vm_exit exitinfo; /* (x) exit reason and collateral */ 110 }; 111 112 #define vcpu_lock_initialized(v) mtx_initialized(&((v)->mtx)) 113 #define vcpu_lock_init(v) mtx_init(&((v)->mtx), "vcpu lock", 0, MTX_SPIN) 114 #define vcpu_lock(v) mtx_lock_spin(&((v)->mtx)) 115 #define vcpu_unlock(v) mtx_unlock_spin(&((v)->mtx)) 116 #define vcpu_assert_locked(v) mtx_assert(&((v)->mtx), MA_OWNED) 117 118 struct mem_seg { 119 vm_paddr_t gpa; 120 size_t len; 121 boolean_t wired; 122 vm_object_t object; 123 }; 124 #define VM_MAX_MEMORY_SEGMENTS 2 125 126 /* 127 * Initialization: 128 * (o) initialized the first time the VM is created 129 * (i) initialized when VM is created and when it is reinitialized 130 * (x) initialized before use 131 */ 132 struct vm { 133 void *cookie; /* (i) cpu-specific data */ 134 void *iommu; /* (x) iommu-specific data */ 135 struct vhpet *vhpet; /* (i) virtual HPET */ 136 struct vioapic *vioapic; /* (i) virtual ioapic */ 137 struct vatpic *vatpic; /* (i) virtual atpic */ 138 struct vatpit *vatpit; /* (i) virtual atpit */ 139 struct vpmtmr *vpmtmr; /* (i) virtual ACPI PM timer */ 140 struct vrtc *vrtc; /* (o) virtual RTC */ 141 volatile cpuset_t active_cpus; /* (i) active vcpus */ 142 int suspend; /* (i) stop VM execution */ 143 volatile cpuset_t suspended_cpus; /* (i) suspended vcpus */ 144 volatile cpuset_t halted_cpus; /* (x) cpus in a hard halt */ 145 cpuset_t rendezvous_req_cpus; /* (x) rendezvous requested */ 146 cpuset_t rendezvous_done_cpus; /* (x) rendezvous finished */ 147 void *rendezvous_arg; /* (x) rendezvous func/arg */ 148 vm_rendezvous_func_t rendezvous_func; 149 struct mtx rendezvous_mtx; /* (o) rendezvous lock */ 150 int num_mem_segs; /* (o) guest memory segments */ 151 struct mem_seg mem_segs[VM_MAX_MEMORY_SEGMENTS]; 152 struct vmspace *vmspace; /* (o) guest's address space */ 153 char name[VM_MAX_NAMELEN]; /* (o) virtual machine name */ 154 struct vcpu vcpu[VM_MAXCPU]; /* (i) guest vcpus */ 155 }; 156 157 static int vmm_initialized; 158 159 static struct vmm_ops *ops; 160 #define VMM_INIT(num) (ops != NULL ? (*ops->init)(num) : 0) 161 #define VMM_CLEANUP() (ops != NULL ? (*ops->cleanup)() : 0) 162 #define VMM_RESUME() (ops != NULL ? (*ops->resume)() : 0) 163 164 #define VMINIT(vm, pmap) (ops != NULL ? (*ops->vminit)(vm, pmap): NULL) 165 #define VMRUN(vmi, vcpu, rip, pmap, rptr, sptr) \ 166 (ops != NULL ? (*ops->vmrun)(vmi, vcpu, rip, pmap, rptr, sptr) : ENXIO) 167 #define VMCLEANUP(vmi) (ops != NULL ? (*ops->vmcleanup)(vmi) : NULL) 168 #define VMSPACE_ALLOC(min, max) \ 169 (ops != NULL ? (*ops->vmspace_alloc)(min, max) : NULL) 170 #define VMSPACE_FREE(vmspace) \ 171 (ops != NULL ? (*ops->vmspace_free)(vmspace) : ENXIO) 172 #define VMGETREG(vmi, vcpu, num, retval) \ 173 (ops != NULL ? (*ops->vmgetreg)(vmi, vcpu, num, retval) : ENXIO) 174 #define VMSETREG(vmi, vcpu, num, val) \ 175 (ops != NULL ? (*ops->vmsetreg)(vmi, vcpu, num, val) : ENXIO) 176 #define VMGETDESC(vmi, vcpu, num, desc) \ 177 (ops != NULL ? (*ops->vmgetdesc)(vmi, vcpu, num, desc) : ENXIO) 178 #define VMSETDESC(vmi, vcpu, num, desc) \ 179 (ops != NULL ? (*ops->vmsetdesc)(vmi, vcpu, num, desc) : ENXIO) 180 #define VMGETCAP(vmi, vcpu, num, retval) \ 181 (ops != NULL ? (*ops->vmgetcap)(vmi, vcpu, num, retval) : ENXIO) 182 #define VMSETCAP(vmi, vcpu, num, val) \ 183 (ops != NULL ? (*ops->vmsetcap)(vmi, vcpu, num, val) : ENXIO) 184 #define VLAPIC_INIT(vmi, vcpu) \ 185 (ops != NULL ? (*ops->vlapic_init)(vmi, vcpu) : NULL) 186 #define VLAPIC_CLEANUP(vmi, vlapic) \ 187 (ops != NULL ? (*ops->vlapic_cleanup)(vmi, vlapic) : NULL) 188 189 #define fpu_start_emulating() load_cr0(rcr0() | CR0_TS) 190 #define fpu_stop_emulating() clts() 191 192 static MALLOC_DEFINE(M_VM, "vm", "vm"); 193 194 /* statistics */ 195 static VMM_STAT(VCPU_TOTAL_RUNTIME, "vcpu total runtime"); 196 197 SYSCTL_NODE(_hw, OID_AUTO, vmm, CTLFLAG_RW, NULL, NULL); 198 199 /* 200 * Halt the guest if all vcpus are executing a HLT instruction with 201 * interrupts disabled. 202 */ 203 static int halt_detection_enabled = 1; 204 SYSCTL_INT(_hw_vmm, OID_AUTO, halt_detection, CTLFLAG_RDTUN, 205 &halt_detection_enabled, 0, 206 "Halt VM if all vcpus execute HLT with interrupts disabled"); 207 208 static int vmm_ipinum; 209 SYSCTL_INT(_hw_vmm, OID_AUTO, ipinum, CTLFLAG_RD, &vmm_ipinum, 0, 210 "IPI vector used for vcpu notifications"); 211 212 static int trace_guest_exceptions; 213 SYSCTL_INT(_hw_vmm, OID_AUTO, trace_guest_exceptions, CTLFLAG_RDTUN, 214 &trace_guest_exceptions, 0, 215 "Trap into hypervisor on all guest exceptions and reflect them back"); 216 217 static void 218 vcpu_cleanup(struct vm *vm, int i, bool destroy) 219 { 220 struct vcpu *vcpu = &vm->vcpu[i]; 221 222 VLAPIC_CLEANUP(vm->cookie, vcpu->vlapic); 223 if (destroy) { 224 vmm_stat_free(vcpu->stats); 225 fpu_save_area_free(vcpu->guestfpu); 226 } 227 } 228 229 static void 230 vcpu_init(struct vm *vm, int vcpu_id, bool create) 231 { 232 struct vcpu *vcpu; 233 234 KASSERT(vcpu_id >= 0 && vcpu_id < VM_MAXCPU, 235 ("vcpu_init: invalid vcpu %d", vcpu_id)); 236 237 vcpu = &vm->vcpu[vcpu_id]; 238 239 if (create) { 240 KASSERT(!vcpu_lock_initialized(vcpu), ("vcpu %d already " 241 "initialized", vcpu_id)); 242 vcpu_lock_init(vcpu); 243 vcpu->state = VCPU_IDLE; 244 vcpu->hostcpu = NOCPU; 245 vcpu->guestfpu = fpu_save_area_alloc(); 246 vcpu->stats = vmm_stat_alloc(); 247 } 248 249 vcpu->vlapic = VLAPIC_INIT(vm->cookie, vcpu_id); 250 vm_set_x2apic_state(vm, vcpu_id, X2APIC_DISABLED); 251 vcpu->exitintinfo = 0; 252 vcpu->nmi_pending = 0; 253 vcpu->extint_pending = 0; 254 vcpu->exception_pending = 0; 255 vcpu->guest_xcr0 = XFEATURE_ENABLED_X87; 256 fpu_save_area_reset(vcpu->guestfpu); 257 vmm_stat_init(vcpu->stats); 258 } 259 260 int 261 vcpu_trace_exceptions(struct vm *vm, int vcpuid) 262 { 263 264 return (trace_guest_exceptions); 265 } 266 267 struct vm_exit * 268 vm_exitinfo(struct vm *vm, int cpuid) 269 { 270 struct vcpu *vcpu; 271 272 if (cpuid < 0 || cpuid >= VM_MAXCPU) 273 panic("vm_exitinfo: invalid cpuid %d", cpuid); 274 275 vcpu = &vm->vcpu[cpuid]; 276 277 return (&vcpu->exitinfo); 278 } 279 280 static void 281 vmm_resume(void) 282 { 283 VMM_RESUME(); 284 } 285 286 static int 287 vmm_init(void) 288 { 289 int error; 290 291 vmm_host_state_init(); 292 293 vmm_ipinum = vmm_ipi_alloc(); 294 if (vmm_ipinum == 0) 295 vmm_ipinum = IPI_AST; 296 297 error = vmm_mem_init(); 298 if (error) 299 return (error); 300 301 if (vmm_is_intel()) 302 ops = &vmm_ops_intel; 303 else if (vmm_is_amd()) 304 ops = &vmm_ops_amd; 305 else 306 return (ENXIO); 307 308 vmm_resume_p = vmm_resume; 309 310 return (VMM_INIT(vmm_ipinum)); 311 } 312 313 static int 314 vmm_handler(module_t mod, int what, void *arg) 315 { 316 int error; 317 318 switch (what) { 319 case MOD_LOAD: 320 vmmdev_init(); 321 if (ppt_avail_devices() > 0) 322 iommu_init(); 323 error = vmm_init(); 324 if (error == 0) 325 vmm_initialized = 1; 326 break; 327 case MOD_UNLOAD: 328 error = vmmdev_cleanup(); 329 if (error == 0) { 330 vmm_resume_p = NULL; 331 iommu_cleanup(); 332 if (vmm_ipinum != IPI_AST) 333 vmm_ipi_free(vmm_ipinum); 334 error = VMM_CLEANUP(); 335 /* 336 * Something bad happened - prevent new 337 * VMs from being created 338 */ 339 if (error) 340 vmm_initialized = 0; 341 } 342 break; 343 default: 344 error = 0; 345 break; 346 } 347 return (error); 348 } 349 350 static moduledata_t vmm_kmod = { 351 "vmm", 352 vmm_handler, 353 NULL 354 }; 355 356 /* 357 * vmm initialization has the following dependencies: 358 * 359 * - iommu initialization must happen after the pci passthru driver has had 360 * a chance to attach to any passthru devices (after SI_SUB_CONFIGURE). 361 * 362 * - VT-x initialization requires smp_rendezvous() and therefore must happen 363 * after SMP is fully functional (after SI_SUB_SMP). 364 */ 365 DECLARE_MODULE(vmm, vmm_kmod, SI_SUB_SMP + 1, SI_ORDER_ANY); 366 MODULE_VERSION(vmm, 1); 367 368 static void 369 vm_init(struct vm *vm, bool create) 370 { 371 int i; 372 373 vm->cookie = VMINIT(vm, vmspace_pmap(vm->vmspace)); 374 vm->iommu = NULL; 375 vm->vioapic = vioapic_init(vm); 376 vm->vhpet = vhpet_init(vm); 377 vm->vatpic = vatpic_init(vm); 378 vm->vatpit = vatpit_init(vm); 379 vm->vpmtmr = vpmtmr_init(vm); 380 if (create) 381 vm->vrtc = vrtc_init(vm); 382 383 CPU_ZERO(&vm->active_cpus); 384 385 vm->suspend = 0; 386 CPU_ZERO(&vm->suspended_cpus); 387 388 for (i = 0; i < VM_MAXCPU; i++) 389 vcpu_init(vm, i, create); 390 } 391 392 int 393 vm_create(const char *name, struct vm **retvm) 394 { 395 struct vm *vm; 396 struct vmspace *vmspace; 397 398 /* 399 * If vmm.ko could not be successfully initialized then don't attempt 400 * to create the virtual machine. 401 */ 402 if (!vmm_initialized) 403 return (ENXIO); 404 405 if (name == NULL || strlen(name) >= VM_MAX_NAMELEN) 406 return (EINVAL); 407 408 vmspace = VMSPACE_ALLOC(0, VM_MAXUSER_ADDRESS); 409 if (vmspace == NULL) 410 return (ENOMEM); 411 412 vm = malloc(sizeof(struct vm), M_VM, M_WAITOK | M_ZERO); 413 strcpy(vm->name, name); 414 vm->num_mem_segs = 0; 415 vm->vmspace = vmspace; 416 mtx_init(&vm->rendezvous_mtx, "vm rendezvous lock", 0, MTX_DEF); 417 418 vm_init(vm, true); 419 420 *retvm = vm; 421 return (0); 422 } 423 424 static void 425 vm_free_mem_seg(struct vm *vm, struct mem_seg *seg) 426 { 427 428 if (seg->object != NULL) 429 vmm_mem_free(vm->vmspace, seg->gpa, seg->len); 430 431 bzero(seg, sizeof(*seg)); 432 } 433 434 static void 435 vm_cleanup(struct vm *vm, bool destroy) 436 { 437 int i; 438 439 ppt_unassign_all(vm); 440 441 if (vm->iommu != NULL) 442 iommu_destroy_domain(vm->iommu); 443 444 if (destroy) 445 vrtc_cleanup(vm->vrtc); 446 else 447 vrtc_reset(vm->vrtc); 448 vpmtmr_cleanup(vm->vpmtmr); 449 vatpit_cleanup(vm->vatpit); 450 vhpet_cleanup(vm->vhpet); 451 vatpic_cleanup(vm->vatpic); 452 vioapic_cleanup(vm->vioapic); 453 454 for (i = 0; i < VM_MAXCPU; i++) 455 vcpu_cleanup(vm, i, destroy); 456 457 VMCLEANUP(vm->cookie); 458 459 if (destroy) { 460 for (i = 0; i < vm->num_mem_segs; i++) 461 vm_free_mem_seg(vm, &vm->mem_segs[i]); 462 463 vm->num_mem_segs = 0; 464 465 VMSPACE_FREE(vm->vmspace); 466 vm->vmspace = NULL; 467 } 468 } 469 470 void 471 vm_destroy(struct vm *vm) 472 { 473 vm_cleanup(vm, true); 474 free(vm, M_VM); 475 } 476 477 int 478 vm_reinit(struct vm *vm) 479 { 480 int error; 481 482 /* 483 * A virtual machine can be reset only if all vcpus are suspended. 484 */ 485 if (CPU_CMP(&vm->suspended_cpus, &vm->active_cpus) == 0) { 486 vm_cleanup(vm, false); 487 vm_init(vm, false); 488 error = 0; 489 } else { 490 error = EBUSY; 491 } 492 493 return (error); 494 } 495 496 const char * 497 vm_name(struct vm *vm) 498 { 499 return (vm->name); 500 } 501 502 int 503 vm_map_mmio(struct vm *vm, vm_paddr_t gpa, size_t len, vm_paddr_t hpa) 504 { 505 vm_object_t obj; 506 507 if ((obj = vmm_mmio_alloc(vm->vmspace, gpa, len, hpa)) == NULL) 508 return (ENOMEM); 509 else 510 return (0); 511 } 512 513 int 514 vm_unmap_mmio(struct vm *vm, vm_paddr_t gpa, size_t len) 515 { 516 517 vmm_mmio_free(vm->vmspace, gpa, len); 518 return (0); 519 } 520 521 boolean_t 522 vm_mem_allocated(struct vm *vm, vm_paddr_t gpa) 523 { 524 int i; 525 vm_paddr_t gpabase, gpalimit; 526 527 for (i = 0; i < vm->num_mem_segs; i++) { 528 gpabase = vm->mem_segs[i].gpa; 529 gpalimit = gpabase + vm->mem_segs[i].len; 530 if (gpa >= gpabase && gpa < gpalimit) 531 return (TRUE); /* 'gpa' is regular memory */ 532 } 533 534 if (ppt_is_mmio(vm, gpa)) 535 return (TRUE); /* 'gpa' is pci passthru mmio */ 536 537 return (FALSE); 538 } 539 540 int 541 vm_malloc(struct vm *vm, vm_paddr_t gpa, size_t len) 542 { 543 int available, allocated; 544 struct mem_seg *seg; 545 vm_object_t object; 546 vm_paddr_t g; 547 548 if ((gpa & PAGE_MASK) || (len & PAGE_MASK) || len == 0) 549 return (EINVAL); 550 551 available = allocated = 0; 552 g = gpa; 553 while (g < gpa + len) { 554 if (vm_mem_allocated(vm, g)) 555 allocated++; 556 else 557 available++; 558 559 g += PAGE_SIZE; 560 } 561 562 /* 563 * If there are some allocated and some available pages in the address 564 * range then it is an error. 565 */ 566 if (allocated && available) 567 return (EINVAL); 568 569 /* 570 * If the entire address range being requested has already been 571 * allocated then there isn't anything more to do. 572 */ 573 if (allocated && available == 0) 574 return (0); 575 576 if (vm->num_mem_segs >= VM_MAX_MEMORY_SEGMENTS) 577 return (E2BIG); 578 579 seg = &vm->mem_segs[vm->num_mem_segs]; 580 581 if ((object = vmm_mem_alloc(vm->vmspace, gpa, len)) == NULL) 582 return (ENOMEM); 583 584 seg->gpa = gpa; 585 seg->len = len; 586 seg->object = object; 587 seg->wired = FALSE; 588 589 vm->num_mem_segs++; 590 591 return (0); 592 } 593 594 static vm_paddr_t 595 vm_maxmem(struct vm *vm) 596 { 597 int i; 598 vm_paddr_t gpa, maxmem; 599 600 maxmem = 0; 601 for (i = 0; i < vm->num_mem_segs; i++) { 602 gpa = vm->mem_segs[i].gpa + vm->mem_segs[i].len; 603 if (gpa > maxmem) 604 maxmem = gpa; 605 } 606 return (maxmem); 607 } 608 609 static void 610 vm_gpa_unwire(struct vm *vm) 611 { 612 int i, rv; 613 struct mem_seg *seg; 614 615 for (i = 0; i < vm->num_mem_segs; i++) { 616 seg = &vm->mem_segs[i]; 617 if (!seg->wired) 618 continue; 619 620 rv = vm_map_unwire(&vm->vmspace->vm_map, 621 seg->gpa, seg->gpa + seg->len, 622 VM_MAP_WIRE_USER | VM_MAP_WIRE_NOHOLES); 623 KASSERT(rv == KERN_SUCCESS, ("vm(%s) memory segment " 624 "%#lx/%ld could not be unwired: %d", 625 vm_name(vm), seg->gpa, seg->len, rv)); 626 627 seg->wired = FALSE; 628 } 629 } 630 631 static int 632 vm_gpa_wire(struct vm *vm) 633 { 634 int i, rv; 635 struct mem_seg *seg; 636 637 for (i = 0; i < vm->num_mem_segs; i++) { 638 seg = &vm->mem_segs[i]; 639 if (seg->wired) 640 continue; 641 642 /* XXX rlimits? */ 643 rv = vm_map_wire(&vm->vmspace->vm_map, 644 seg->gpa, seg->gpa + seg->len, 645 VM_MAP_WIRE_USER | VM_MAP_WIRE_NOHOLES); 646 if (rv != KERN_SUCCESS) 647 break; 648 649 seg->wired = TRUE; 650 } 651 652 if (i < vm->num_mem_segs) { 653 /* 654 * Undo the wiring before returning an error. 655 */ 656 vm_gpa_unwire(vm); 657 return (EAGAIN); 658 } 659 660 return (0); 661 } 662 663 static void 664 vm_iommu_modify(struct vm *vm, boolean_t map) 665 { 666 int i, sz; 667 vm_paddr_t gpa, hpa; 668 struct mem_seg *seg; 669 void *vp, *cookie, *host_domain; 670 671 sz = PAGE_SIZE; 672 host_domain = iommu_host_domain(); 673 674 for (i = 0; i < vm->num_mem_segs; i++) { 675 seg = &vm->mem_segs[i]; 676 KASSERT(seg->wired, ("vm(%s) memory segment %#lx/%ld not wired", 677 vm_name(vm), seg->gpa, seg->len)); 678 679 gpa = seg->gpa; 680 while (gpa < seg->gpa + seg->len) { 681 vp = vm_gpa_hold(vm, gpa, PAGE_SIZE, VM_PROT_WRITE, 682 &cookie); 683 KASSERT(vp != NULL, ("vm(%s) could not map gpa %#lx", 684 vm_name(vm), gpa)); 685 686 vm_gpa_release(cookie); 687 688 hpa = DMAP_TO_PHYS((uintptr_t)vp); 689 if (map) { 690 iommu_create_mapping(vm->iommu, gpa, hpa, sz); 691 iommu_remove_mapping(host_domain, hpa, sz); 692 } else { 693 iommu_remove_mapping(vm->iommu, gpa, sz); 694 iommu_create_mapping(host_domain, hpa, hpa, sz); 695 } 696 697 gpa += PAGE_SIZE; 698 } 699 } 700 701 /* 702 * Invalidate the cached translations associated with the domain 703 * from which pages were removed. 704 */ 705 if (map) 706 iommu_invalidate_tlb(host_domain); 707 else 708 iommu_invalidate_tlb(vm->iommu); 709 } 710 711 #define vm_iommu_unmap(vm) vm_iommu_modify((vm), FALSE) 712 #define vm_iommu_map(vm) vm_iommu_modify((vm), TRUE) 713 714 int 715 vm_unassign_pptdev(struct vm *vm, int bus, int slot, int func) 716 { 717 int error; 718 719 error = ppt_unassign_device(vm, bus, slot, func); 720 if (error) 721 return (error); 722 723 if (ppt_assigned_devices(vm) == 0) { 724 vm_iommu_unmap(vm); 725 vm_gpa_unwire(vm); 726 } 727 return (0); 728 } 729 730 int 731 vm_assign_pptdev(struct vm *vm, int bus, int slot, int func) 732 { 733 int error; 734 vm_paddr_t maxaddr; 735 736 /* 737 * Virtual machines with pci passthru devices get special treatment: 738 * - the guest physical memory is wired 739 * - the iommu is programmed to do the 'gpa' to 'hpa' translation 740 * 741 * We need to do this before the first pci passthru device is attached. 742 */ 743 if (ppt_assigned_devices(vm) == 0) { 744 KASSERT(vm->iommu == NULL, 745 ("vm_assign_pptdev: iommu must be NULL")); 746 maxaddr = vm_maxmem(vm); 747 vm->iommu = iommu_create_domain(maxaddr); 748 749 error = vm_gpa_wire(vm); 750 if (error) 751 return (error); 752 753 vm_iommu_map(vm); 754 } 755 756 error = ppt_assign_device(vm, bus, slot, func); 757 return (error); 758 } 759 760 void * 761 vm_gpa_hold(struct vm *vm, vm_paddr_t gpa, size_t len, int reqprot, 762 void **cookie) 763 { 764 int count, pageoff; 765 vm_page_t m; 766 767 pageoff = gpa & PAGE_MASK; 768 if (len > PAGE_SIZE - pageoff) 769 panic("vm_gpa_hold: invalid gpa/len: 0x%016lx/%lu", gpa, len); 770 771 count = vm_fault_quick_hold_pages(&vm->vmspace->vm_map, 772 trunc_page(gpa), PAGE_SIZE, reqprot, &m, 1); 773 774 if (count == 1) { 775 *cookie = m; 776 return ((void *)(PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)) + pageoff)); 777 } else { 778 *cookie = NULL; 779 return (NULL); 780 } 781 } 782 783 void 784 vm_gpa_release(void *cookie) 785 { 786 vm_page_t m = cookie; 787 788 vm_page_lock(m); 789 vm_page_unhold(m); 790 vm_page_unlock(m); 791 } 792 793 int 794 vm_gpabase2memseg(struct vm *vm, vm_paddr_t gpabase, 795 struct vm_memory_segment *seg) 796 { 797 int i; 798 799 for (i = 0; i < vm->num_mem_segs; i++) { 800 if (gpabase == vm->mem_segs[i].gpa) { 801 seg->gpa = vm->mem_segs[i].gpa; 802 seg->len = vm->mem_segs[i].len; 803 seg->wired = vm->mem_segs[i].wired; 804 return (0); 805 } 806 } 807 return (-1); 808 } 809 810 int 811 vm_get_memobj(struct vm *vm, vm_paddr_t gpa, size_t len, 812 vm_offset_t *offset, struct vm_object **object) 813 { 814 int i; 815 size_t seg_len; 816 vm_paddr_t seg_gpa; 817 vm_object_t seg_obj; 818 819 for (i = 0; i < vm->num_mem_segs; i++) { 820 if ((seg_obj = vm->mem_segs[i].object) == NULL) 821 continue; 822 823 seg_gpa = vm->mem_segs[i].gpa; 824 seg_len = vm->mem_segs[i].len; 825 826 if (gpa >= seg_gpa && gpa < seg_gpa + seg_len) { 827 *offset = gpa - seg_gpa; 828 *object = seg_obj; 829 vm_object_reference(seg_obj); 830 return (0); 831 } 832 } 833 834 return (EINVAL); 835 } 836 837 int 838 vm_get_register(struct vm *vm, int vcpu, int reg, uint64_t *retval) 839 { 840 841 if (vcpu < 0 || vcpu >= VM_MAXCPU) 842 return (EINVAL); 843 844 if (reg >= VM_REG_LAST) 845 return (EINVAL); 846 847 return (VMGETREG(vm->cookie, vcpu, reg, retval)); 848 } 849 850 int 851 vm_set_register(struct vm *vm, int vcpu, int reg, uint64_t val) 852 { 853 854 if (vcpu < 0 || vcpu >= VM_MAXCPU) 855 return (EINVAL); 856 857 if (reg >= VM_REG_LAST) 858 return (EINVAL); 859 860 return (VMSETREG(vm->cookie, vcpu, reg, val)); 861 } 862 863 static boolean_t 864 is_descriptor_table(int reg) 865 { 866 867 switch (reg) { 868 case VM_REG_GUEST_IDTR: 869 case VM_REG_GUEST_GDTR: 870 return (TRUE); 871 default: 872 return (FALSE); 873 } 874 } 875 876 static boolean_t 877 is_segment_register(int reg) 878 { 879 880 switch (reg) { 881 case VM_REG_GUEST_ES: 882 case VM_REG_GUEST_CS: 883 case VM_REG_GUEST_SS: 884 case VM_REG_GUEST_DS: 885 case VM_REG_GUEST_FS: 886 case VM_REG_GUEST_GS: 887 case VM_REG_GUEST_TR: 888 case VM_REG_GUEST_LDTR: 889 return (TRUE); 890 default: 891 return (FALSE); 892 } 893 } 894 895 int 896 vm_get_seg_desc(struct vm *vm, int vcpu, int reg, 897 struct seg_desc *desc) 898 { 899 900 if (vcpu < 0 || vcpu >= VM_MAXCPU) 901 return (EINVAL); 902 903 if (!is_segment_register(reg) && !is_descriptor_table(reg)) 904 return (EINVAL); 905 906 return (VMGETDESC(vm->cookie, vcpu, reg, desc)); 907 } 908 909 int 910 vm_set_seg_desc(struct vm *vm, int vcpu, int reg, 911 struct seg_desc *desc) 912 { 913 if (vcpu < 0 || vcpu >= VM_MAXCPU) 914 return (EINVAL); 915 916 if (!is_segment_register(reg) && !is_descriptor_table(reg)) 917 return (EINVAL); 918 919 return (VMSETDESC(vm->cookie, vcpu, reg, desc)); 920 } 921 922 static void 923 restore_guest_fpustate(struct vcpu *vcpu) 924 { 925 926 /* flush host state to the pcb */ 927 fpuexit(curthread); 928 929 /* restore guest FPU state */ 930 fpu_stop_emulating(); 931 fpurestore(vcpu->guestfpu); 932 933 /* restore guest XCR0 if XSAVE is enabled in the host */ 934 if (rcr4() & CR4_XSAVE) 935 load_xcr(0, vcpu->guest_xcr0); 936 937 /* 938 * The FPU is now "dirty" with the guest's state so turn on emulation 939 * to trap any access to the FPU by the host. 940 */ 941 fpu_start_emulating(); 942 } 943 944 static void 945 save_guest_fpustate(struct vcpu *vcpu) 946 { 947 948 if ((rcr0() & CR0_TS) == 0) 949 panic("fpu emulation not enabled in host!"); 950 951 /* save guest XCR0 and restore host XCR0 */ 952 if (rcr4() & CR4_XSAVE) { 953 vcpu->guest_xcr0 = rxcr(0); 954 load_xcr(0, vmm_get_host_xcr0()); 955 } 956 957 /* save guest FPU state */ 958 fpu_stop_emulating(); 959 fpusave(vcpu->guestfpu); 960 fpu_start_emulating(); 961 } 962 963 static VMM_STAT(VCPU_IDLE_TICKS, "number of ticks vcpu was idle"); 964 965 static int 966 vcpu_set_state_locked(struct vcpu *vcpu, enum vcpu_state newstate, 967 bool from_idle) 968 { 969 int error; 970 971 vcpu_assert_locked(vcpu); 972 973 /* 974 * State transitions from the vmmdev_ioctl() must always begin from 975 * the VCPU_IDLE state. This guarantees that there is only a single 976 * ioctl() operating on a vcpu at any point. 977 */ 978 if (from_idle) { 979 while (vcpu->state != VCPU_IDLE) 980 msleep_spin(&vcpu->state, &vcpu->mtx, "vmstat", hz); 981 } else { 982 KASSERT(vcpu->state != VCPU_IDLE, ("invalid transition from " 983 "vcpu idle state")); 984 } 985 986 if (vcpu->state == VCPU_RUNNING) { 987 KASSERT(vcpu->hostcpu == curcpu, ("curcpu %d and hostcpu %d " 988 "mismatch for running vcpu", curcpu, vcpu->hostcpu)); 989 } else { 990 KASSERT(vcpu->hostcpu == NOCPU, ("Invalid hostcpu %d for a " 991 "vcpu that is not running", vcpu->hostcpu)); 992 } 993 994 /* 995 * The following state transitions are allowed: 996 * IDLE -> FROZEN -> IDLE 997 * FROZEN -> RUNNING -> FROZEN 998 * FROZEN -> SLEEPING -> FROZEN 999 */ 1000 switch (vcpu->state) { 1001 case VCPU_IDLE: 1002 case VCPU_RUNNING: 1003 case VCPU_SLEEPING: 1004 error = (newstate != VCPU_FROZEN); 1005 break; 1006 case VCPU_FROZEN: 1007 error = (newstate == VCPU_FROZEN); 1008 break; 1009 default: 1010 error = 1; 1011 break; 1012 } 1013 1014 if (error) 1015 return (EBUSY); 1016 1017 vcpu->state = newstate; 1018 if (newstate == VCPU_RUNNING) 1019 vcpu->hostcpu = curcpu; 1020 else 1021 vcpu->hostcpu = NOCPU; 1022 1023 if (newstate == VCPU_IDLE) 1024 wakeup(&vcpu->state); 1025 1026 return (0); 1027 } 1028 1029 static void 1030 vcpu_require_state(struct vm *vm, int vcpuid, enum vcpu_state newstate) 1031 { 1032 int error; 1033 1034 if ((error = vcpu_set_state(vm, vcpuid, newstate, false)) != 0) 1035 panic("Error %d setting state to %d\n", error, newstate); 1036 } 1037 1038 static void 1039 vcpu_require_state_locked(struct vcpu *vcpu, enum vcpu_state newstate) 1040 { 1041 int error; 1042 1043 if ((error = vcpu_set_state_locked(vcpu, newstate, false)) != 0) 1044 panic("Error %d setting state to %d", error, newstate); 1045 } 1046 1047 static void 1048 vm_set_rendezvous_func(struct vm *vm, vm_rendezvous_func_t func) 1049 { 1050 1051 KASSERT(mtx_owned(&vm->rendezvous_mtx), ("rendezvous_mtx not locked")); 1052 1053 /* 1054 * Update 'rendezvous_func' and execute a write memory barrier to 1055 * ensure that it is visible across all host cpus. This is not needed 1056 * for correctness but it does ensure that all the vcpus will notice 1057 * that the rendezvous is requested immediately. 1058 */ 1059 vm->rendezvous_func = func; 1060 wmb(); 1061 } 1062 1063 #define RENDEZVOUS_CTR0(vm, vcpuid, fmt) \ 1064 do { \ 1065 if (vcpuid >= 0) \ 1066 VCPU_CTR0(vm, vcpuid, fmt); \ 1067 else \ 1068 VM_CTR0(vm, fmt); \ 1069 } while (0) 1070 1071 static void 1072 vm_handle_rendezvous(struct vm *vm, int vcpuid) 1073 { 1074 1075 KASSERT(vcpuid == -1 || (vcpuid >= 0 && vcpuid < VM_MAXCPU), 1076 ("vm_handle_rendezvous: invalid vcpuid %d", vcpuid)); 1077 1078 mtx_lock(&vm->rendezvous_mtx); 1079 while (vm->rendezvous_func != NULL) { 1080 /* 'rendezvous_req_cpus' must be a subset of 'active_cpus' */ 1081 CPU_AND(&vm->rendezvous_req_cpus, &vm->active_cpus); 1082 1083 if (vcpuid != -1 && 1084 CPU_ISSET(vcpuid, &vm->rendezvous_req_cpus) && 1085 !CPU_ISSET(vcpuid, &vm->rendezvous_done_cpus)) { 1086 VCPU_CTR0(vm, vcpuid, "Calling rendezvous func"); 1087 (*vm->rendezvous_func)(vm, vcpuid, vm->rendezvous_arg); 1088 CPU_SET(vcpuid, &vm->rendezvous_done_cpus); 1089 } 1090 if (CPU_CMP(&vm->rendezvous_req_cpus, 1091 &vm->rendezvous_done_cpus) == 0) { 1092 VCPU_CTR0(vm, vcpuid, "Rendezvous completed"); 1093 vm_set_rendezvous_func(vm, NULL); 1094 wakeup(&vm->rendezvous_func); 1095 break; 1096 } 1097 RENDEZVOUS_CTR0(vm, vcpuid, "Wait for rendezvous completion"); 1098 mtx_sleep(&vm->rendezvous_func, &vm->rendezvous_mtx, 0, 1099 "vmrndv", 0); 1100 } 1101 mtx_unlock(&vm->rendezvous_mtx); 1102 } 1103 1104 /* 1105 * Emulate a guest 'hlt' by sleeping until the vcpu is ready to run. 1106 */ 1107 static int 1108 vm_handle_hlt(struct vm *vm, int vcpuid, bool intr_disabled, bool *retu) 1109 { 1110 struct vcpu *vcpu; 1111 const char *wmesg; 1112 int t, vcpu_halted, vm_halted; 1113 1114 KASSERT(!CPU_ISSET(vcpuid, &vm->halted_cpus), ("vcpu already halted")); 1115 1116 vcpu = &vm->vcpu[vcpuid]; 1117 vcpu_halted = 0; 1118 vm_halted = 0; 1119 1120 vcpu_lock(vcpu); 1121 while (1) { 1122 /* 1123 * Do a final check for pending NMI or interrupts before 1124 * really putting this thread to sleep. Also check for 1125 * software events that would cause this vcpu to wakeup. 1126 * 1127 * These interrupts/events could have happened after the 1128 * vcpu returned from VMRUN() and before it acquired the 1129 * vcpu lock above. 1130 */ 1131 if (vm->rendezvous_func != NULL || vm->suspend) 1132 break; 1133 if (vm_nmi_pending(vm, vcpuid)) 1134 break; 1135 if (!intr_disabled) { 1136 if (vm_extint_pending(vm, vcpuid) || 1137 vlapic_pending_intr(vcpu->vlapic, NULL)) { 1138 break; 1139 } 1140 } 1141 1142 /* Don't go to sleep if the vcpu thread needs to yield */ 1143 if (vcpu_should_yield(vm, vcpuid)) 1144 break; 1145 1146 /* 1147 * Some Linux guests implement "halt" by having all vcpus 1148 * execute HLT with interrupts disabled. 'halted_cpus' keeps 1149 * track of the vcpus that have entered this state. When all 1150 * vcpus enter the halted state the virtual machine is halted. 1151 */ 1152 if (intr_disabled) { 1153 wmesg = "vmhalt"; 1154 VCPU_CTR0(vm, vcpuid, "Halted"); 1155 if (!vcpu_halted && halt_detection_enabled) { 1156 vcpu_halted = 1; 1157 CPU_SET_ATOMIC(vcpuid, &vm->halted_cpus); 1158 } 1159 if (CPU_CMP(&vm->halted_cpus, &vm->active_cpus) == 0) { 1160 vm_halted = 1; 1161 break; 1162 } 1163 } else { 1164 wmesg = "vmidle"; 1165 } 1166 1167 t = ticks; 1168 vcpu_require_state_locked(vcpu, VCPU_SLEEPING); 1169 /* 1170 * XXX msleep_spin() cannot be interrupted by signals so 1171 * wake up periodically to check pending signals. 1172 */ 1173 msleep_spin(vcpu, &vcpu->mtx, wmesg, hz); 1174 vcpu_require_state_locked(vcpu, VCPU_FROZEN); 1175 vmm_stat_incr(vm, vcpuid, VCPU_IDLE_TICKS, ticks - t); 1176 } 1177 1178 if (vcpu_halted) 1179 CPU_CLR_ATOMIC(vcpuid, &vm->halted_cpus); 1180 1181 vcpu_unlock(vcpu); 1182 1183 if (vm_halted) 1184 vm_suspend(vm, VM_SUSPEND_HALT); 1185 1186 return (0); 1187 } 1188 1189 static int 1190 vm_handle_paging(struct vm *vm, int vcpuid, bool *retu) 1191 { 1192 int rv, ftype; 1193 struct vm_map *map; 1194 struct vcpu *vcpu; 1195 struct vm_exit *vme; 1196 1197 vcpu = &vm->vcpu[vcpuid]; 1198 vme = &vcpu->exitinfo; 1199 1200 ftype = vme->u.paging.fault_type; 1201 KASSERT(ftype == VM_PROT_READ || 1202 ftype == VM_PROT_WRITE || ftype == VM_PROT_EXECUTE, 1203 ("vm_handle_paging: invalid fault_type %d", ftype)); 1204 1205 if (ftype == VM_PROT_READ || ftype == VM_PROT_WRITE) { 1206 rv = pmap_emulate_accessed_dirty(vmspace_pmap(vm->vmspace), 1207 vme->u.paging.gpa, ftype); 1208 if (rv == 0) { 1209 VCPU_CTR2(vm, vcpuid, "%s bit emulation for gpa %#lx", 1210 ftype == VM_PROT_READ ? "accessed" : "dirty", 1211 vme->u.paging.gpa); 1212 goto done; 1213 } 1214 } 1215 1216 map = &vm->vmspace->vm_map; 1217 rv = vm_fault(map, vme->u.paging.gpa, ftype, VM_FAULT_NORMAL); 1218 1219 VCPU_CTR3(vm, vcpuid, "vm_handle_paging rv = %d, gpa = %#lx, " 1220 "ftype = %d", rv, vme->u.paging.gpa, ftype); 1221 1222 if (rv != KERN_SUCCESS) 1223 return (EFAULT); 1224 done: 1225 /* restart execution at the faulting instruction */ 1226 vme->inst_length = 0; 1227 1228 return (0); 1229 } 1230 1231 static int 1232 vm_handle_inst_emul(struct vm *vm, int vcpuid, bool *retu) 1233 { 1234 struct vie *vie; 1235 struct vcpu *vcpu; 1236 struct vm_exit *vme; 1237 uint64_t gla, gpa; 1238 struct vm_guest_paging *paging; 1239 mem_region_read_t mread; 1240 mem_region_write_t mwrite; 1241 enum vm_cpu_mode cpu_mode; 1242 int cs_d, error, length; 1243 1244 vcpu = &vm->vcpu[vcpuid]; 1245 vme = &vcpu->exitinfo; 1246 1247 gla = vme->u.inst_emul.gla; 1248 gpa = vme->u.inst_emul.gpa; 1249 cs_d = vme->u.inst_emul.cs_d; 1250 vie = &vme->u.inst_emul.vie; 1251 paging = &vme->u.inst_emul.paging; 1252 cpu_mode = paging->cpu_mode; 1253 1254 VCPU_CTR1(vm, vcpuid, "inst_emul fault accessing gpa %#lx", gpa); 1255 1256 /* Fetch, decode and emulate the faulting instruction */ 1257 if (vie->num_valid == 0) { 1258 /* 1259 * If the instruction length is not known then assume a 1260 * maximum size instruction. 1261 */ 1262 length = vme->inst_length ? vme->inst_length : VIE_INST_SIZE; 1263 error = vmm_fetch_instruction(vm, vcpuid, paging, vme->rip, 1264 length, vie); 1265 } else { 1266 /* 1267 * The instruction bytes have already been copied into 'vie' 1268 */ 1269 error = 0; 1270 } 1271 if (error == 1) 1272 return (0); /* Resume guest to handle page fault */ 1273 else if (error == -1) 1274 return (EFAULT); 1275 else if (error != 0) 1276 panic("%s: vmm_fetch_instruction error %d", __func__, error); 1277 1278 if (vmm_decode_instruction(vm, vcpuid, gla, cpu_mode, cs_d, vie) != 0) 1279 return (EFAULT); 1280 1281 /* 1282 * If the instruction length is not specified the update it now. 1283 */ 1284 if (vme->inst_length == 0) 1285 vme->inst_length = vie->num_processed; 1286 1287 /* return to userland unless this is an in-kernel emulated device */ 1288 if (gpa >= DEFAULT_APIC_BASE && gpa < DEFAULT_APIC_BASE + PAGE_SIZE) { 1289 mread = lapic_mmio_read; 1290 mwrite = lapic_mmio_write; 1291 } else if (gpa >= VIOAPIC_BASE && gpa < VIOAPIC_BASE + VIOAPIC_SIZE) { 1292 mread = vioapic_mmio_read; 1293 mwrite = vioapic_mmio_write; 1294 } else if (gpa >= VHPET_BASE && gpa < VHPET_BASE + VHPET_SIZE) { 1295 mread = vhpet_mmio_read; 1296 mwrite = vhpet_mmio_write; 1297 } else { 1298 *retu = true; 1299 return (0); 1300 } 1301 1302 error = vmm_emulate_instruction(vm, vcpuid, gpa, vie, paging, 1303 mread, mwrite, retu); 1304 1305 return (error); 1306 } 1307 1308 static int 1309 vm_handle_suspend(struct vm *vm, int vcpuid, bool *retu) 1310 { 1311 int i, done; 1312 struct vcpu *vcpu; 1313 1314 done = 0; 1315 vcpu = &vm->vcpu[vcpuid]; 1316 1317 CPU_SET_ATOMIC(vcpuid, &vm->suspended_cpus); 1318 1319 /* 1320 * Wait until all 'active_cpus' have suspended themselves. 1321 * 1322 * Since a VM may be suspended at any time including when one or 1323 * more vcpus are doing a rendezvous we need to call the rendezvous 1324 * handler while we are waiting to prevent a deadlock. 1325 */ 1326 vcpu_lock(vcpu); 1327 while (1) { 1328 if (CPU_CMP(&vm->suspended_cpus, &vm->active_cpus) == 0) { 1329 VCPU_CTR0(vm, vcpuid, "All vcpus suspended"); 1330 break; 1331 } 1332 1333 if (vm->rendezvous_func == NULL) { 1334 VCPU_CTR0(vm, vcpuid, "Sleeping during suspend"); 1335 vcpu_require_state_locked(vcpu, VCPU_SLEEPING); 1336 msleep_spin(vcpu, &vcpu->mtx, "vmsusp", hz); 1337 vcpu_require_state_locked(vcpu, VCPU_FROZEN); 1338 } else { 1339 VCPU_CTR0(vm, vcpuid, "Rendezvous during suspend"); 1340 vcpu_unlock(vcpu); 1341 vm_handle_rendezvous(vm, vcpuid); 1342 vcpu_lock(vcpu); 1343 } 1344 } 1345 vcpu_unlock(vcpu); 1346 1347 /* 1348 * Wakeup the other sleeping vcpus and return to userspace. 1349 */ 1350 for (i = 0; i < VM_MAXCPU; i++) { 1351 if (CPU_ISSET(i, &vm->suspended_cpus)) { 1352 vcpu_notify_event(vm, i, false); 1353 } 1354 } 1355 1356 *retu = true; 1357 return (0); 1358 } 1359 1360 int 1361 vm_suspend(struct vm *vm, enum vm_suspend_how how) 1362 { 1363 int i; 1364 1365 if (how <= VM_SUSPEND_NONE || how >= VM_SUSPEND_LAST) 1366 return (EINVAL); 1367 1368 if (atomic_cmpset_int(&vm->suspend, 0, how) == 0) { 1369 VM_CTR2(vm, "virtual machine already suspended %d/%d", 1370 vm->suspend, how); 1371 return (EALREADY); 1372 } 1373 1374 VM_CTR1(vm, "virtual machine successfully suspended %d", how); 1375 1376 /* 1377 * Notify all active vcpus that they are now suspended. 1378 */ 1379 for (i = 0; i < VM_MAXCPU; i++) { 1380 if (CPU_ISSET(i, &vm->active_cpus)) 1381 vcpu_notify_event(vm, i, false); 1382 } 1383 1384 return (0); 1385 } 1386 1387 void 1388 vm_exit_suspended(struct vm *vm, int vcpuid, uint64_t rip) 1389 { 1390 struct vm_exit *vmexit; 1391 1392 KASSERT(vm->suspend > VM_SUSPEND_NONE && vm->suspend < VM_SUSPEND_LAST, 1393 ("vm_exit_suspended: invalid suspend type %d", vm->suspend)); 1394 1395 vmexit = vm_exitinfo(vm, vcpuid); 1396 vmexit->rip = rip; 1397 vmexit->inst_length = 0; 1398 vmexit->exitcode = VM_EXITCODE_SUSPENDED; 1399 vmexit->u.suspended.how = vm->suspend; 1400 } 1401 1402 void 1403 vm_exit_rendezvous(struct vm *vm, int vcpuid, uint64_t rip) 1404 { 1405 struct vm_exit *vmexit; 1406 1407 KASSERT(vm->rendezvous_func != NULL, ("rendezvous not in progress")); 1408 1409 vmexit = vm_exitinfo(vm, vcpuid); 1410 vmexit->rip = rip; 1411 vmexit->inst_length = 0; 1412 vmexit->exitcode = VM_EXITCODE_RENDEZVOUS; 1413 vmm_stat_incr(vm, vcpuid, VMEXIT_RENDEZVOUS, 1); 1414 } 1415 1416 void 1417 vm_exit_astpending(struct vm *vm, int vcpuid, uint64_t rip) 1418 { 1419 struct vm_exit *vmexit; 1420 1421 vmexit = vm_exitinfo(vm, vcpuid); 1422 vmexit->rip = rip; 1423 vmexit->inst_length = 0; 1424 vmexit->exitcode = VM_EXITCODE_BOGUS; 1425 vmm_stat_incr(vm, vcpuid, VMEXIT_ASTPENDING, 1); 1426 } 1427 1428 int 1429 vm_run(struct vm *vm, struct vm_run *vmrun) 1430 { 1431 int error, vcpuid; 1432 struct vcpu *vcpu; 1433 struct pcb *pcb; 1434 uint64_t tscval, rip; 1435 struct vm_exit *vme; 1436 bool retu, intr_disabled; 1437 pmap_t pmap; 1438 void *rptr, *sptr; 1439 1440 vcpuid = vmrun->cpuid; 1441 1442 if (vcpuid < 0 || vcpuid >= VM_MAXCPU) 1443 return (EINVAL); 1444 1445 if (!CPU_ISSET(vcpuid, &vm->active_cpus)) 1446 return (EINVAL); 1447 1448 if (CPU_ISSET(vcpuid, &vm->suspended_cpus)) 1449 return (EINVAL); 1450 1451 rptr = &vm->rendezvous_func; 1452 sptr = &vm->suspend; 1453 pmap = vmspace_pmap(vm->vmspace); 1454 vcpu = &vm->vcpu[vcpuid]; 1455 vme = &vcpu->exitinfo; 1456 rip = vmrun->rip; 1457 restart: 1458 critical_enter(); 1459 1460 KASSERT(!CPU_ISSET(curcpu, &pmap->pm_active), 1461 ("vm_run: absurd pm_active")); 1462 1463 tscval = rdtsc(); 1464 1465 pcb = PCPU_GET(curpcb); 1466 set_pcb_flags(pcb, PCB_FULL_IRET); 1467 1468 restore_guest_fpustate(vcpu); 1469 1470 vcpu_require_state(vm, vcpuid, VCPU_RUNNING); 1471 error = VMRUN(vm->cookie, vcpuid, rip, pmap, rptr, sptr); 1472 vcpu_require_state(vm, vcpuid, VCPU_FROZEN); 1473 1474 save_guest_fpustate(vcpu); 1475 1476 vmm_stat_incr(vm, vcpuid, VCPU_TOTAL_RUNTIME, rdtsc() - tscval); 1477 1478 critical_exit(); 1479 1480 if (error == 0) { 1481 retu = false; 1482 switch (vme->exitcode) { 1483 case VM_EXITCODE_SUSPENDED: 1484 error = vm_handle_suspend(vm, vcpuid, &retu); 1485 break; 1486 case VM_EXITCODE_IOAPIC_EOI: 1487 vioapic_process_eoi(vm, vcpuid, 1488 vme->u.ioapic_eoi.vector); 1489 break; 1490 case VM_EXITCODE_RENDEZVOUS: 1491 vm_handle_rendezvous(vm, vcpuid); 1492 error = 0; 1493 break; 1494 case VM_EXITCODE_HLT: 1495 intr_disabled = ((vme->u.hlt.rflags & PSL_I) == 0); 1496 error = vm_handle_hlt(vm, vcpuid, intr_disabled, &retu); 1497 break; 1498 case VM_EXITCODE_PAGING: 1499 error = vm_handle_paging(vm, vcpuid, &retu); 1500 break; 1501 case VM_EXITCODE_INST_EMUL: 1502 error = vm_handle_inst_emul(vm, vcpuid, &retu); 1503 break; 1504 case VM_EXITCODE_INOUT: 1505 case VM_EXITCODE_INOUT_STR: 1506 error = vm_handle_inout(vm, vcpuid, vme, &retu); 1507 break; 1508 case VM_EXITCODE_MONITOR: 1509 case VM_EXITCODE_MWAIT: 1510 vm_inject_ud(vm, vcpuid); 1511 break; 1512 default: 1513 retu = true; /* handled in userland */ 1514 break; 1515 } 1516 } 1517 1518 if (error == 0 && retu == false) { 1519 rip = vme->rip + vme->inst_length; 1520 goto restart; 1521 } 1522 1523 /* copy the exit information */ 1524 bcopy(vme, &vmrun->vm_exit, sizeof(struct vm_exit)); 1525 return (error); 1526 } 1527 1528 int 1529 vm_exit_intinfo(struct vm *vm, int vcpuid, uint64_t info) 1530 { 1531 struct vcpu *vcpu; 1532 int type, vector; 1533 1534 if (vcpuid < 0 || vcpuid >= VM_MAXCPU) 1535 return (EINVAL); 1536 1537 vcpu = &vm->vcpu[vcpuid]; 1538 1539 if (info & VM_INTINFO_VALID) { 1540 type = info & VM_INTINFO_TYPE; 1541 vector = info & 0xff; 1542 if (type == VM_INTINFO_NMI && vector != IDT_NMI) 1543 return (EINVAL); 1544 if (type == VM_INTINFO_HWEXCEPTION && vector >= 32) 1545 return (EINVAL); 1546 if (info & VM_INTINFO_RSVD) 1547 return (EINVAL); 1548 } else { 1549 info = 0; 1550 } 1551 VCPU_CTR2(vm, vcpuid, "%s: info1(%#lx)", __func__, info); 1552 vcpu->exitintinfo = info; 1553 return (0); 1554 } 1555 1556 enum exc_class { 1557 EXC_BENIGN, 1558 EXC_CONTRIBUTORY, 1559 EXC_PAGEFAULT 1560 }; 1561 1562 #define IDT_VE 20 /* Virtualization Exception (Intel specific) */ 1563 1564 static enum exc_class 1565 exception_class(uint64_t info) 1566 { 1567 int type, vector; 1568 1569 KASSERT(info & VM_INTINFO_VALID, ("intinfo must be valid: %#lx", info)); 1570 type = info & VM_INTINFO_TYPE; 1571 vector = info & 0xff; 1572 1573 /* Table 6-4, "Interrupt and Exception Classes", Intel SDM, Vol 3 */ 1574 switch (type) { 1575 case VM_INTINFO_HWINTR: 1576 case VM_INTINFO_SWINTR: 1577 case VM_INTINFO_NMI: 1578 return (EXC_BENIGN); 1579 default: 1580 /* 1581 * Hardware exception. 1582 * 1583 * SVM and VT-x use identical type values to represent NMI, 1584 * hardware interrupt and software interrupt. 1585 * 1586 * SVM uses type '3' for all exceptions. VT-x uses type '3' 1587 * for exceptions except #BP and #OF. #BP and #OF use a type 1588 * value of '5' or '6'. Therefore we don't check for explicit 1589 * values of 'type' to classify 'intinfo' into a hardware 1590 * exception. 1591 */ 1592 break; 1593 } 1594 1595 switch (vector) { 1596 case IDT_PF: 1597 case IDT_VE: 1598 return (EXC_PAGEFAULT); 1599 case IDT_DE: 1600 case IDT_TS: 1601 case IDT_NP: 1602 case IDT_SS: 1603 case IDT_GP: 1604 return (EXC_CONTRIBUTORY); 1605 default: 1606 return (EXC_BENIGN); 1607 } 1608 } 1609 1610 static int 1611 nested_fault(struct vm *vm, int vcpuid, uint64_t info1, uint64_t info2, 1612 uint64_t *retinfo) 1613 { 1614 enum exc_class exc1, exc2; 1615 int type1, vector1; 1616 1617 KASSERT(info1 & VM_INTINFO_VALID, ("info1 %#lx is not valid", info1)); 1618 KASSERT(info2 & VM_INTINFO_VALID, ("info2 %#lx is not valid", info2)); 1619 1620 /* 1621 * If an exception occurs while attempting to call the double-fault 1622 * handler the processor enters shutdown mode (aka triple fault). 1623 */ 1624 type1 = info1 & VM_INTINFO_TYPE; 1625 vector1 = info1 & 0xff; 1626 if (type1 == VM_INTINFO_HWEXCEPTION && vector1 == IDT_DF) { 1627 VCPU_CTR2(vm, vcpuid, "triple fault: info1(%#lx), info2(%#lx)", 1628 info1, info2); 1629 vm_suspend(vm, VM_SUSPEND_TRIPLEFAULT); 1630 *retinfo = 0; 1631 return (0); 1632 } 1633 1634 /* 1635 * Table 6-5 "Conditions for Generating a Double Fault", Intel SDM, Vol3 1636 */ 1637 exc1 = exception_class(info1); 1638 exc2 = exception_class(info2); 1639 if ((exc1 == EXC_CONTRIBUTORY && exc2 == EXC_CONTRIBUTORY) || 1640 (exc1 == EXC_PAGEFAULT && exc2 != EXC_BENIGN)) { 1641 /* Convert nested fault into a double fault. */ 1642 *retinfo = IDT_DF; 1643 *retinfo |= VM_INTINFO_VALID | VM_INTINFO_HWEXCEPTION; 1644 *retinfo |= VM_INTINFO_DEL_ERRCODE; 1645 } else { 1646 /* Handle exceptions serially */ 1647 *retinfo = info2; 1648 } 1649 return (1); 1650 } 1651 1652 static uint64_t 1653 vcpu_exception_intinfo(struct vcpu *vcpu) 1654 { 1655 uint64_t info = 0; 1656 1657 if (vcpu->exception_pending) { 1658 info = vcpu->exception.vector & 0xff; 1659 info |= VM_INTINFO_VALID | VM_INTINFO_HWEXCEPTION; 1660 if (vcpu->exception.error_code_valid) { 1661 info |= VM_INTINFO_DEL_ERRCODE; 1662 info |= (uint64_t)vcpu->exception.error_code << 32; 1663 } 1664 } 1665 return (info); 1666 } 1667 1668 int 1669 vm_entry_intinfo(struct vm *vm, int vcpuid, uint64_t *retinfo) 1670 { 1671 struct vcpu *vcpu; 1672 uint64_t info1, info2; 1673 int valid; 1674 1675 KASSERT(vcpuid >= 0 && vcpuid < VM_MAXCPU, ("invalid vcpu %d", vcpuid)); 1676 1677 vcpu = &vm->vcpu[vcpuid]; 1678 1679 info1 = vcpu->exitintinfo; 1680 vcpu->exitintinfo = 0; 1681 1682 info2 = 0; 1683 if (vcpu->exception_pending) { 1684 info2 = vcpu_exception_intinfo(vcpu); 1685 vcpu->exception_pending = 0; 1686 VCPU_CTR2(vm, vcpuid, "Exception %d delivered: %#lx", 1687 vcpu->exception.vector, info2); 1688 } 1689 1690 if ((info1 & VM_INTINFO_VALID) && (info2 & VM_INTINFO_VALID)) { 1691 valid = nested_fault(vm, vcpuid, info1, info2, retinfo); 1692 } else if (info1 & VM_INTINFO_VALID) { 1693 *retinfo = info1; 1694 valid = 1; 1695 } else if (info2 & VM_INTINFO_VALID) { 1696 *retinfo = info2; 1697 valid = 1; 1698 } else { 1699 valid = 0; 1700 } 1701 1702 if (valid) { 1703 VCPU_CTR4(vm, vcpuid, "%s: info1(%#lx), info2(%#lx), " 1704 "retinfo(%#lx)", __func__, info1, info2, *retinfo); 1705 } 1706 1707 return (valid); 1708 } 1709 1710 int 1711 vm_get_intinfo(struct vm *vm, int vcpuid, uint64_t *info1, uint64_t *info2) 1712 { 1713 struct vcpu *vcpu; 1714 1715 if (vcpuid < 0 || vcpuid >= VM_MAXCPU) 1716 return (EINVAL); 1717 1718 vcpu = &vm->vcpu[vcpuid]; 1719 *info1 = vcpu->exitintinfo; 1720 *info2 = vcpu_exception_intinfo(vcpu); 1721 return (0); 1722 } 1723 1724 int 1725 vm_inject_exception(struct vm *vm, int vcpuid, struct vm_exception *exception) 1726 { 1727 struct vcpu *vcpu; 1728 int error; 1729 1730 if (vcpuid < 0 || vcpuid >= VM_MAXCPU) 1731 return (EINVAL); 1732 1733 if (exception->vector < 0 || exception->vector >= 32) 1734 return (EINVAL); 1735 1736 /* 1737 * A double fault exception should never be injected directly into 1738 * the guest. It is a derived exception that results from specific 1739 * combinations of nested faults. 1740 */ 1741 if (exception->vector == IDT_DF) 1742 return (EINVAL); 1743 1744 vcpu = &vm->vcpu[vcpuid]; 1745 1746 if (vcpu->exception_pending) { 1747 VCPU_CTR2(vm, vcpuid, "Unable to inject exception %d due to " 1748 "pending exception %d", exception->vector, 1749 vcpu->exception.vector); 1750 return (EBUSY); 1751 } 1752 1753 /* 1754 * From section 26.6.1 "Interruptibility State" in Intel SDM: 1755 * 1756 * Event blocking by "STI" or "MOV SS" is cleared after guest executes 1757 * one instruction or incurs an exception. 1758 */ 1759 error = vm_set_register(vm, vcpuid, VM_REG_GUEST_INTR_SHADOW, 0); 1760 KASSERT(error == 0, ("%s: error %d clearing interrupt shadow", 1761 __func__, error)); 1762 1763 vcpu->exception_pending = 1; 1764 vcpu->exception = *exception; 1765 VCPU_CTR1(vm, vcpuid, "Exception %d pending", exception->vector); 1766 return (0); 1767 } 1768 1769 void 1770 vm_inject_fault(void *vmarg, int vcpuid, int vector, int errcode_valid, 1771 int errcode) 1772 { 1773 struct vm_exception exception; 1774 struct vm_exit *vmexit; 1775 struct vm *vm; 1776 int error; 1777 1778 vm = vmarg; 1779 1780 exception.vector = vector; 1781 exception.error_code = errcode; 1782 exception.error_code_valid = errcode_valid; 1783 error = vm_inject_exception(vm, vcpuid, &exception); 1784 KASSERT(error == 0, ("vm_inject_exception error %d", error)); 1785 1786 /* 1787 * A fault-like exception allows the instruction to be restarted 1788 * after the exception handler returns. 1789 * 1790 * By setting the inst_length to 0 we ensure that the instruction 1791 * pointer remains at the faulting instruction. 1792 */ 1793 vmexit = vm_exitinfo(vm, vcpuid); 1794 vmexit->inst_length = 0; 1795 } 1796 1797 void 1798 vm_inject_pf(void *vmarg, int vcpuid, int error_code, uint64_t cr2) 1799 { 1800 struct vm *vm; 1801 int error; 1802 1803 vm = vmarg; 1804 VCPU_CTR2(vm, vcpuid, "Injecting page fault: error_code %#x, cr2 %#lx", 1805 error_code, cr2); 1806 1807 error = vm_set_register(vm, vcpuid, VM_REG_GUEST_CR2, cr2); 1808 KASSERT(error == 0, ("vm_set_register(cr2) error %d", error)); 1809 1810 vm_inject_fault(vm, vcpuid, IDT_PF, 1, error_code); 1811 } 1812 1813 static VMM_STAT(VCPU_NMI_COUNT, "number of NMIs delivered to vcpu"); 1814 1815 int 1816 vm_inject_nmi(struct vm *vm, int vcpuid) 1817 { 1818 struct vcpu *vcpu; 1819 1820 if (vcpuid < 0 || vcpuid >= VM_MAXCPU) 1821 return (EINVAL); 1822 1823 vcpu = &vm->vcpu[vcpuid]; 1824 1825 vcpu->nmi_pending = 1; 1826 vcpu_notify_event(vm, vcpuid, false); 1827 return (0); 1828 } 1829 1830 int 1831 vm_nmi_pending(struct vm *vm, int vcpuid) 1832 { 1833 struct vcpu *vcpu; 1834 1835 if (vcpuid < 0 || vcpuid >= VM_MAXCPU) 1836 panic("vm_nmi_pending: invalid vcpuid %d", vcpuid); 1837 1838 vcpu = &vm->vcpu[vcpuid]; 1839 1840 return (vcpu->nmi_pending); 1841 } 1842 1843 void 1844 vm_nmi_clear(struct vm *vm, int vcpuid) 1845 { 1846 struct vcpu *vcpu; 1847 1848 if (vcpuid < 0 || vcpuid >= VM_MAXCPU) 1849 panic("vm_nmi_pending: invalid vcpuid %d", vcpuid); 1850 1851 vcpu = &vm->vcpu[vcpuid]; 1852 1853 if (vcpu->nmi_pending == 0) 1854 panic("vm_nmi_clear: inconsistent nmi_pending state"); 1855 1856 vcpu->nmi_pending = 0; 1857 vmm_stat_incr(vm, vcpuid, VCPU_NMI_COUNT, 1); 1858 } 1859 1860 static VMM_STAT(VCPU_EXTINT_COUNT, "number of ExtINTs delivered to vcpu"); 1861 1862 int 1863 vm_inject_extint(struct vm *vm, int vcpuid) 1864 { 1865 struct vcpu *vcpu; 1866 1867 if (vcpuid < 0 || vcpuid >= VM_MAXCPU) 1868 return (EINVAL); 1869 1870 vcpu = &vm->vcpu[vcpuid]; 1871 1872 vcpu->extint_pending = 1; 1873 vcpu_notify_event(vm, vcpuid, false); 1874 return (0); 1875 } 1876 1877 int 1878 vm_extint_pending(struct vm *vm, int vcpuid) 1879 { 1880 struct vcpu *vcpu; 1881 1882 if (vcpuid < 0 || vcpuid >= VM_MAXCPU) 1883 panic("vm_extint_pending: invalid vcpuid %d", vcpuid); 1884 1885 vcpu = &vm->vcpu[vcpuid]; 1886 1887 return (vcpu->extint_pending); 1888 } 1889 1890 void 1891 vm_extint_clear(struct vm *vm, int vcpuid) 1892 { 1893 struct vcpu *vcpu; 1894 1895 if (vcpuid < 0 || vcpuid >= VM_MAXCPU) 1896 panic("vm_extint_pending: invalid vcpuid %d", vcpuid); 1897 1898 vcpu = &vm->vcpu[vcpuid]; 1899 1900 if (vcpu->extint_pending == 0) 1901 panic("vm_extint_clear: inconsistent extint_pending state"); 1902 1903 vcpu->extint_pending = 0; 1904 vmm_stat_incr(vm, vcpuid, VCPU_EXTINT_COUNT, 1); 1905 } 1906 1907 int 1908 vm_get_capability(struct vm *vm, int vcpu, int type, int *retval) 1909 { 1910 if (vcpu < 0 || vcpu >= VM_MAXCPU) 1911 return (EINVAL); 1912 1913 if (type < 0 || type >= VM_CAP_MAX) 1914 return (EINVAL); 1915 1916 return (VMGETCAP(vm->cookie, vcpu, type, retval)); 1917 } 1918 1919 int 1920 vm_set_capability(struct vm *vm, int vcpu, int type, int val) 1921 { 1922 if (vcpu < 0 || vcpu >= VM_MAXCPU) 1923 return (EINVAL); 1924 1925 if (type < 0 || type >= VM_CAP_MAX) 1926 return (EINVAL); 1927 1928 return (VMSETCAP(vm->cookie, vcpu, type, val)); 1929 } 1930 1931 struct vlapic * 1932 vm_lapic(struct vm *vm, int cpu) 1933 { 1934 return (vm->vcpu[cpu].vlapic); 1935 } 1936 1937 struct vioapic * 1938 vm_ioapic(struct vm *vm) 1939 { 1940 1941 return (vm->vioapic); 1942 } 1943 1944 struct vhpet * 1945 vm_hpet(struct vm *vm) 1946 { 1947 1948 return (vm->vhpet); 1949 } 1950 1951 boolean_t 1952 vmm_is_pptdev(int bus, int slot, int func) 1953 { 1954 int found, i, n; 1955 int b, s, f; 1956 char *val, *cp, *cp2; 1957 1958 /* 1959 * XXX 1960 * The length of an environment variable is limited to 128 bytes which 1961 * puts an upper limit on the number of passthru devices that may be 1962 * specified using a single environment variable. 1963 * 1964 * Work around this by scanning multiple environment variable 1965 * names instead of a single one - yuck! 1966 */ 1967 const char *names[] = { "pptdevs", "pptdevs2", "pptdevs3", NULL }; 1968 1969 /* set pptdevs="1/2/3 4/5/6 7/8/9 10/11/12" */ 1970 found = 0; 1971 for (i = 0; names[i] != NULL && !found; i++) { 1972 cp = val = kern_getenv(names[i]); 1973 while (cp != NULL && *cp != '\0') { 1974 if ((cp2 = strchr(cp, ' ')) != NULL) 1975 *cp2 = '\0'; 1976 1977 n = sscanf(cp, "%d/%d/%d", &b, &s, &f); 1978 if (n == 3 && bus == b && slot == s && func == f) { 1979 found = 1; 1980 break; 1981 } 1982 1983 if (cp2 != NULL) 1984 *cp2++ = ' '; 1985 1986 cp = cp2; 1987 } 1988 freeenv(val); 1989 } 1990 return (found); 1991 } 1992 1993 void * 1994 vm_iommu_domain(struct vm *vm) 1995 { 1996 1997 return (vm->iommu); 1998 } 1999 2000 int 2001 vcpu_set_state(struct vm *vm, int vcpuid, enum vcpu_state newstate, 2002 bool from_idle) 2003 { 2004 int error; 2005 struct vcpu *vcpu; 2006 2007 if (vcpuid < 0 || vcpuid >= VM_MAXCPU) 2008 panic("vm_set_run_state: invalid vcpuid %d", vcpuid); 2009 2010 vcpu = &vm->vcpu[vcpuid]; 2011 2012 vcpu_lock(vcpu); 2013 error = vcpu_set_state_locked(vcpu, newstate, from_idle); 2014 vcpu_unlock(vcpu); 2015 2016 return (error); 2017 } 2018 2019 enum vcpu_state 2020 vcpu_get_state(struct vm *vm, int vcpuid, int *hostcpu) 2021 { 2022 struct vcpu *vcpu; 2023 enum vcpu_state state; 2024 2025 if (vcpuid < 0 || vcpuid >= VM_MAXCPU) 2026 panic("vm_get_run_state: invalid vcpuid %d", vcpuid); 2027 2028 vcpu = &vm->vcpu[vcpuid]; 2029 2030 vcpu_lock(vcpu); 2031 state = vcpu->state; 2032 if (hostcpu != NULL) 2033 *hostcpu = vcpu->hostcpu; 2034 vcpu_unlock(vcpu); 2035 2036 return (state); 2037 } 2038 2039 int 2040 vm_activate_cpu(struct vm *vm, int vcpuid) 2041 { 2042 2043 if (vcpuid < 0 || vcpuid >= VM_MAXCPU) 2044 return (EINVAL); 2045 2046 if (CPU_ISSET(vcpuid, &vm->active_cpus)) 2047 return (EBUSY); 2048 2049 VCPU_CTR0(vm, vcpuid, "activated"); 2050 CPU_SET_ATOMIC(vcpuid, &vm->active_cpus); 2051 return (0); 2052 } 2053 2054 cpuset_t 2055 vm_active_cpus(struct vm *vm) 2056 { 2057 2058 return (vm->active_cpus); 2059 } 2060 2061 cpuset_t 2062 vm_suspended_cpus(struct vm *vm) 2063 { 2064 2065 return (vm->suspended_cpus); 2066 } 2067 2068 void * 2069 vcpu_stats(struct vm *vm, int vcpuid) 2070 { 2071 2072 return (vm->vcpu[vcpuid].stats); 2073 } 2074 2075 int 2076 vm_get_x2apic_state(struct vm *vm, int vcpuid, enum x2apic_state *state) 2077 { 2078 if (vcpuid < 0 || vcpuid >= VM_MAXCPU) 2079 return (EINVAL); 2080 2081 *state = vm->vcpu[vcpuid].x2apic_state; 2082 2083 return (0); 2084 } 2085 2086 int 2087 vm_set_x2apic_state(struct vm *vm, int vcpuid, enum x2apic_state state) 2088 { 2089 if (vcpuid < 0 || vcpuid >= VM_MAXCPU) 2090 return (EINVAL); 2091 2092 if (state >= X2APIC_STATE_LAST) 2093 return (EINVAL); 2094 2095 vm->vcpu[vcpuid].x2apic_state = state; 2096 2097 vlapic_set_x2apic_state(vm, vcpuid, state); 2098 2099 return (0); 2100 } 2101 2102 /* 2103 * This function is called to ensure that a vcpu "sees" a pending event 2104 * as soon as possible: 2105 * - If the vcpu thread is sleeping then it is woken up. 2106 * - If the vcpu is running on a different host_cpu then an IPI will be directed 2107 * to the host_cpu to cause the vcpu to trap into the hypervisor. 2108 */ 2109 void 2110 vcpu_notify_event(struct vm *vm, int vcpuid, bool lapic_intr) 2111 { 2112 int hostcpu; 2113 struct vcpu *vcpu; 2114 2115 vcpu = &vm->vcpu[vcpuid]; 2116 2117 vcpu_lock(vcpu); 2118 hostcpu = vcpu->hostcpu; 2119 if (vcpu->state == VCPU_RUNNING) { 2120 KASSERT(hostcpu != NOCPU, ("vcpu running on invalid hostcpu")); 2121 if (hostcpu != curcpu) { 2122 if (lapic_intr) { 2123 vlapic_post_intr(vcpu->vlapic, hostcpu, 2124 vmm_ipinum); 2125 } else { 2126 ipi_cpu(hostcpu, vmm_ipinum); 2127 } 2128 } else { 2129 /* 2130 * If the 'vcpu' is running on 'curcpu' then it must 2131 * be sending a notification to itself (e.g. SELF_IPI). 2132 * The pending event will be picked up when the vcpu 2133 * transitions back to guest context. 2134 */ 2135 } 2136 } else { 2137 KASSERT(hostcpu == NOCPU, ("vcpu state %d not consistent " 2138 "with hostcpu %d", vcpu->state, hostcpu)); 2139 if (vcpu->state == VCPU_SLEEPING) 2140 wakeup_one(vcpu); 2141 } 2142 vcpu_unlock(vcpu); 2143 } 2144 2145 struct vmspace * 2146 vm_get_vmspace(struct vm *vm) 2147 { 2148 2149 return (vm->vmspace); 2150 } 2151 2152 int 2153 vm_apicid2vcpuid(struct vm *vm, int apicid) 2154 { 2155 /* 2156 * XXX apic id is assumed to be numerically identical to vcpu id 2157 */ 2158 return (apicid); 2159 } 2160 2161 void 2162 vm_smp_rendezvous(struct vm *vm, int vcpuid, cpuset_t dest, 2163 vm_rendezvous_func_t func, void *arg) 2164 { 2165 int i; 2166 2167 /* 2168 * Enforce that this function is called without any locks 2169 */ 2170 WITNESS_WARN(WARN_PANIC, NULL, "vm_smp_rendezvous"); 2171 KASSERT(vcpuid == -1 || (vcpuid >= 0 && vcpuid < VM_MAXCPU), 2172 ("vm_smp_rendezvous: invalid vcpuid %d", vcpuid)); 2173 2174 restart: 2175 mtx_lock(&vm->rendezvous_mtx); 2176 if (vm->rendezvous_func != NULL) { 2177 /* 2178 * If a rendezvous is already in progress then we need to 2179 * call the rendezvous handler in case this 'vcpuid' is one 2180 * of the targets of the rendezvous. 2181 */ 2182 RENDEZVOUS_CTR0(vm, vcpuid, "Rendezvous already in progress"); 2183 mtx_unlock(&vm->rendezvous_mtx); 2184 vm_handle_rendezvous(vm, vcpuid); 2185 goto restart; 2186 } 2187 KASSERT(vm->rendezvous_func == NULL, ("vm_smp_rendezvous: previous " 2188 "rendezvous is still in progress")); 2189 2190 RENDEZVOUS_CTR0(vm, vcpuid, "Initiating rendezvous"); 2191 vm->rendezvous_req_cpus = dest; 2192 CPU_ZERO(&vm->rendezvous_done_cpus); 2193 vm->rendezvous_arg = arg; 2194 vm_set_rendezvous_func(vm, func); 2195 mtx_unlock(&vm->rendezvous_mtx); 2196 2197 /* 2198 * Wake up any sleeping vcpus and trigger a VM-exit in any running 2199 * vcpus so they handle the rendezvous as soon as possible. 2200 */ 2201 for (i = 0; i < VM_MAXCPU; i++) { 2202 if (CPU_ISSET(i, &dest)) 2203 vcpu_notify_event(vm, i, false); 2204 } 2205 2206 vm_handle_rendezvous(vm, vcpuid); 2207 } 2208 2209 struct vatpic * 2210 vm_atpic(struct vm *vm) 2211 { 2212 return (vm->vatpic); 2213 } 2214 2215 struct vatpit * 2216 vm_atpit(struct vm *vm) 2217 { 2218 return (vm->vatpit); 2219 } 2220 2221 struct vpmtmr * 2222 vm_pmtmr(struct vm *vm) 2223 { 2224 2225 return (vm->vpmtmr); 2226 } 2227 2228 struct vrtc * 2229 vm_rtc(struct vm *vm) 2230 { 2231 2232 return (vm->vrtc); 2233 } 2234 2235 enum vm_reg_name 2236 vm_segment_name(int seg) 2237 { 2238 static enum vm_reg_name seg_names[] = { 2239 VM_REG_GUEST_ES, 2240 VM_REG_GUEST_CS, 2241 VM_REG_GUEST_SS, 2242 VM_REG_GUEST_DS, 2243 VM_REG_GUEST_FS, 2244 VM_REG_GUEST_GS 2245 }; 2246 2247 KASSERT(seg >= 0 && seg < nitems(seg_names), 2248 ("%s: invalid segment encoding %d", __func__, seg)); 2249 return (seg_names[seg]); 2250 } 2251 2252 void 2253 vm_copy_teardown(struct vm *vm, int vcpuid, struct vm_copyinfo *copyinfo, 2254 int num_copyinfo) 2255 { 2256 int idx; 2257 2258 for (idx = 0; idx < num_copyinfo; idx++) { 2259 if (copyinfo[idx].cookie != NULL) 2260 vm_gpa_release(copyinfo[idx].cookie); 2261 } 2262 bzero(copyinfo, num_copyinfo * sizeof(struct vm_copyinfo)); 2263 } 2264 2265 int 2266 vm_copy_setup(struct vm *vm, int vcpuid, struct vm_guest_paging *paging, 2267 uint64_t gla, size_t len, int prot, struct vm_copyinfo *copyinfo, 2268 int num_copyinfo) 2269 { 2270 int error, idx, nused; 2271 size_t n, off, remaining; 2272 void *hva, *cookie; 2273 uint64_t gpa; 2274 2275 bzero(copyinfo, sizeof(struct vm_copyinfo) * num_copyinfo); 2276 2277 nused = 0; 2278 remaining = len; 2279 while (remaining > 0) { 2280 KASSERT(nused < num_copyinfo, ("insufficient vm_copyinfo")); 2281 error = vmm_gla2gpa(vm, vcpuid, paging, gla, prot, &gpa); 2282 if (error) 2283 return (error); 2284 off = gpa & PAGE_MASK; 2285 n = min(remaining, PAGE_SIZE - off); 2286 copyinfo[nused].gpa = gpa; 2287 copyinfo[nused].len = n; 2288 remaining -= n; 2289 gla += n; 2290 nused++; 2291 } 2292 2293 for (idx = 0; idx < nused; idx++) { 2294 hva = vm_gpa_hold(vm, copyinfo[idx].gpa, copyinfo[idx].len, 2295 prot, &cookie); 2296 if (hva == NULL) 2297 break; 2298 copyinfo[idx].hva = hva; 2299 copyinfo[idx].cookie = cookie; 2300 } 2301 2302 if (idx != nused) { 2303 vm_copy_teardown(vm, vcpuid, copyinfo, num_copyinfo); 2304 return (-1); 2305 } else { 2306 return (0); 2307 } 2308 } 2309 2310 void 2311 vm_copyin(struct vm *vm, int vcpuid, struct vm_copyinfo *copyinfo, void *kaddr, 2312 size_t len) 2313 { 2314 char *dst; 2315 int idx; 2316 2317 dst = kaddr; 2318 idx = 0; 2319 while (len > 0) { 2320 bcopy(copyinfo[idx].hva, dst, copyinfo[idx].len); 2321 len -= copyinfo[idx].len; 2322 dst += copyinfo[idx].len; 2323 idx++; 2324 } 2325 } 2326 2327 void 2328 vm_copyout(struct vm *vm, int vcpuid, const void *kaddr, 2329 struct vm_copyinfo *copyinfo, size_t len) 2330 { 2331 const char *src; 2332 int idx; 2333 2334 src = kaddr; 2335 idx = 0; 2336 while (len > 0) { 2337 bcopy(src, copyinfo[idx].hva, copyinfo[idx].len); 2338 len -= copyinfo[idx].len; 2339 src += copyinfo[idx].len; 2340 idx++; 2341 } 2342 } 2343 2344 /* 2345 * Return the amount of in-use and wired memory for the VM. Since 2346 * these are global stats, only return the values with for vCPU 0 2347 */ 2348 VMM_STAT_DECLARE(VMM_MEM_RESIDENT); 2349 VMM_STAT_DECLARE(VMM_MEM_WIRED); 2350 2351 static void 2352 vm_get_rescnt(struct vm *vm, int vcpu, struct vmm_stat_type *stat) 2353 { 2354 2355 if (vcpu == 0) { 2356 vmm_stat_set(vm, vcpu, VMM_MEM_RESIDENT, 2357 PAGE_SIZE * vmspace_resident_count(vm->vmspace)); 2358 } 2359 } 2360 2361 static void 2362 vm_get_wiredcnt(struct vm *vm, int vcpu, struct vmm_stat_type *stat) 2363 { 2364 2365 if (vcpu == 0) { 2366 vmm_stat_set(vm, vcpu, VMM_MEM_WIRED, 2367 PAGE_SIZE * pmap_wired_count(vmspace_pmap(vm->vmspace))); 2368 } 2369 } 2370 2371 VMM_STAT_FUNC(VMM_MEM_RESIDENT, "Resident memory", vm_get_rescnt); 2372 VMM_STAT_FUNC(VMM_MEM_WIRED, "Wired memory", vm_get_wiredcnt); 2373