1 /*- 2 * Copyright (c) 2011 NetApp, Inc. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 * $FreeBSD$ 27 */ 28 29 #include <sys/cdefs.h> 30 __FBSDID("$FreeBSD$"); 31 32 #include <sys/param.h> 33 #include <sys/systm.h> 34 #include <sys/kernel.h> 35 #include <sys/module.h> 36 #include <sys/sysctl.h> 37 #include <sys/malloc.h> 38 #include <sys/pcpu.h> 39 #include <sys/lock.h> 40 #include <sys/mutex.h> 41 #include <sys/proc.h> 42 #include <sys/rwlock.h> 43 #include <sys/sched.h> 44 #include <sys/smp.h> 45 #include <sys/systm.h> 46 47 #include <vm/vm.h> 48 #include <vm/vm_object.h> 49 #include <vm/vm_page.h> 50 #include <vm/pmap.h> 51 #include <vm/vm_map.h> 52 #include <vm/vm_extern.h> 53 #include <vm/vm_param.h> 54 55 #include <machine/cpu.h> 56 #include <machine/vm.h> 57 #include <machine/pcb.h> 58 #include <machine/smp.h> 59 #include <x86/psl.h> 60 #include <x86/apicreg.h> 61 #include <machine/vmparam.h> 62 63 #include <machine/vmm.h> 64 #include <machine/vmm_dev.h> 65 66 #include "vmm_ktr.h" 67 #include "vmm_host.h" 68 #include "vmm_mem.h" 69 #include "vmm_util.h" 70 #include "vatpic.h" 71 #include "vhpet.h" 72 #include "vioapic.h" 73 #include "vlapic.h" 74 #include "vmm_msr.h" 75 #include "vmm_ipi.h" 76 #include "vmm_stat.h" 77 #include "vmm_lapic.h" 78 79 #include "io/ppt.h" 80 #include "io/iommu.h" 81 82 struct vlapic; 83 84 struct vcpu { 85 int flags; 86 enum vcpu_state state; 87 struct mtx mtx; 88 int hostcpu; /* host cpuid this vcpu last ran on */ 89 uint64_t guest_msrs[VMM_MSR_NUM]; 90 struct vlapic *vlapic; 91 int vcpuid; 92 struct savefpu *guestfpu; /* guest fpu state */ 93 uint64_t guest_xcr0; 94 void *stats; 95 struct vm_exit exitinfo; 96 enum x2apic_state x2apic_state; 97 int nmi_pending; 98 struct vm_exception exception; 99 int exception_pending; 100 }; 101 102 #define vcpu_lock_init(v) mtx_init(&((v)->mtx), "vcpu lock", 0, MTX_SPIN) 103 #define vcpu_lock(v) mtx_lock_spin(&((v)->mtx)) 104 #define vcpu_unlock(v) mtx_unlock_spin(&((v)->mtx)) 105 #define vcpu_assert_locked(v) mtx_assert(&((v)->mtx), MA_OWNED) 106 107 struct mem_seg { 108 vm_paddr_t gpa; 109 size_t len; 110 boolean_t wired; 111 vm_object_t object; 112 }; 113 #define VM_MAX_MEMORY_SEGMENTS 2 114 115 struct vm { 116 void *cookie; /* processor-specific data */ 117 void *iommu; /* iommu-specific data */ 118 struct vhpet *vhpet; /* virtual HPET */ 119 struct vioapic *vioapic; /* virtual ioapic */ 120 struct vatpic *vatpic; /* virtual atpic */ 121 struct vmspace *vmspace; /* guest's address space */ 122 struct vcpu vcpu[VM_MAXCPU]; 123 int num_mem_segs; 124 struct mem_seg mem_segs[VM_MAX_MEMORY_SEGMENTS]; 125 char name[VM_MAX_NAMELEN]; 126 127 /* 128 * Set of active vcpus. 129 * An active vcpu is one that has been started implicitly (BSP) or 130 * explicitly (AP) by sending it a startup ipi. 131 */ 132 cpuset_t active_cpus; 133 134 struct mtx rendezvous_mtx; 135 cpuset_t rendezvous_req_cpus; 136 cpuset_t rendezvous_done_cpus; 137 void *rendezvous_arg; 138 vm_rendezvous_func_t rendezvous_func; 139 }; 140 141 static int vmm_initialized; 142 143 static struct vmm_ops *ops; 144 #define VMM_INIT(num) (ops != NULL ? (*ops->init)(num) : 0) 145 #define VMM_CLEANUP() (ops != NULL ? (*ops->cleanup)() : 0) 146 #define VMM_RESUME() (ops != NULL ? (*ops->resume)() : 0) 147 148 #define VMINIT(vm, pmap) (ops != NULL ? (*ops->vminit)(vm, pmap): NULL) 149 #define VMRUN(vmi, vcpu, rip, pmap, rptr) \ 150 (ops != NULL ? (*ops->vmrun)(vmi, vcpu, rip, pmap, rptr) : ENXIO) 151 #define VMCLEANUP(vmi) (ops != NULL ? (*ops->vmcleanup)(vmi) : NULL) 152 #define VMSPACE_ALLOC(min, max) \ 153 (ops != NULL ? (*ops->vmspace_alloc)(min, max) : NULL) 154 #define VMSPACE_FREE(vmspace) \ 155 (ops != NULL ? (*ops->vmspace_free)(vmspace) : ENXIO) 156 #define VMGETREG(vmi, vcpu, num, retval) \ 157 (ops != NULL ? (*ops->vmgetreg)(vmi, vcpu, num, retval) : ENXIO) 158 #define VMSETREG(vmi, vcpu, num, val) \ 159 (ops != NULL ? (*ops->vmsetreg)(vmi, vcpu, num, val) : ENXIO) 160 #define VMGETDESC(vmi, vcpu, num, desc) \ 161 (ops != NULL ? (*ops->vmgetdesc)(vmi, vcpu, num, desc) : ENXIO) 162 #define VMSETDESC(vmi, vcpu, num, desc) \ 163 (ops != NULL ? (*ops->vmsetdesc)(vmi, vcpu, num, desc) : ENXIO) 164 #define VMGETCAP(vmi, vcpu, num, retval) \ 165 (ops != NULL ? (*ops->vmgetcap)(vmi, vcpu, num, retval) : ENXIO) 166 #define VMSETCAP(vmi, vcpu, num, val) \ 167 (ops != NULL ? (*ops->vmsetcap)(vmi, vcpu, num, val) : ENXIO) 168 #define VLAPIC_INIT(vmi, vcpu) \ 169 (ops != NULL ? (*ops->vlapic_init)(vmi, vcpu) : NULL) 170 #define VLAPIC_CLEANUP(vmi, vlapic) \ 171 (ops != NULL ? (*ops->vlapic_cleanup)(vmi, vlapic) : NULL) 172 173 #define fpu_start_emulating() load_cr0(rcr0() | CR0_TS) 174 #define fpu_stop_emulating() clts() 175 176 static MALLOC_DEFINE(M_VM, "vm", "vm"); 177 CTASSERT(VMM_MSR_NUM <= 64); /* msr_mask can keep track of up to 64 msrs */ 178 179 /* statistics */ 180 static VMM_STAT(VCPU_TOTAL_RUNTIME, "vcpu total runtime"); 181 182 SYSCTL_NODE(_hw, OID_AUTO, vmm, CTLFLAG_RW, NULL, NULL); 183 184 static int vmm_ipinum; 185 SYSCTL_INT(_hw_vmm, OID_AUTO, ipinum, CTLFLAG_RD, &vmm_ipinum, 0, 186 "IPI vector used for vcpu notifications"); 187 188 static void vm_deactivate_cpu(struct vm *vm, int vcpuid); 189 190 static void 191 vcpu_cleanup(struct vm *vm, int i) 192 { 193 struct vcpu *vcpu = &vm->vcpu[i]; 194 195 VLAPIC_CLEANUP(vm->cookie, vcpu->vlapic); 196 vmm_stat_free(vcpu->stats); 197 fpu_save_area_free(vcpu->guestfpu); 198 } 199 200 static void 201 vcpu_init(struct vm *vm, uint32_t vcpu_id) 202 { 203 struct vcpu *vcpu; 204 205 vcpu = &vm->vcpu[vcpu_id]; 206 207 vcpu_lock_init(vcpu); 208 vcpu->hostcpu = NOCPU; 209 vcpu->vcpuid = vcpu_id; 210 vcpu->vlapic = VLAPIC_INIT(vm->cookie, vcpu_id); 211 vm_set_x2apic_state(vm, vcpu_id, X2APIC_DISABLED); 212 vcpu->guest_xcr0 = XFEATURE_ENABLED_X87; 213 vcpu->guestfpu = fpu_save_area_alloc(); 214 fpu_save_area_reset(vcpu->guestfpu); 215 vcpu->stats = vmm_stat_alloc(); 216 } 217 218 struct vm_exit * 219 vm_exitinfo(struct vm *vm, int cpuid) 220 { 221 struct vcpu *vcpu; 222 223 if (cpuid < 0 || cpuid >= VM_MAXCPU) 224 panic("vm_exitinfo: invalid cpuid %d", cpuid); 225 226 vcpu = &vm->vcpu[cpuid]; 227 228 return (&vcpu->exitinfo); 229 } 230 231 static void 232 vmm_resume(void) 233 { 234 VMM_RESUME(); 235 } 236 237 static int 238 vmm_init(void) 239 { 240 int error; 241 242 vmm_host_state_init(); 243 244 vmm_ipinum = vmm_ipi_alloc(); 245 if (vmm_ipinum == 0) 246 vmm_ipinum = IPI_AST; 247 248 error = vmm_mem_init(); 249 if (error) 250 return (error); 251 252 if (vmm_is_intel()) 253 ops = &vmm_ops_intel; 254 else if (vmm_is_amd()) 255 ops = &vmm_ops_amd; 256 else 257 return (ENXIO); 258 259 vmm_msr_init(); 260 vmm_resume_p = vmm_resume; 261 262 return (VMM_INIT(vmm_ipinum)); 263 } 264 265 static int 266 vmm_handler(module_t mod, int what, void *arg) 267 { 268 int error; 269 270 switch (what) { 271 case MOD_LOAD: 272 vmmdev_init(); 273 if (ppt_avail_devices() > 0) 274 iommu_init(); 275 error = vmm_init(); 276 if (error == 0) 277 vmm_initialized = 1; 278 break; 279 case MOD_UNLOAD: 280 error = vmmdev_cleanup(); 281 if (error == 0) { 282 vmm_resume_p = NULL; 283 iommu_cleanup(); 284 if (vmm_ipinum != IPI_AST) 285 vmm_ipi_free(vmm_ipinum); 286 error = VMM_CLEANUP(); 287 /* 288 * Something bad happened - prevent new 289 * VMs from being created 290 */ 291 if (error) 292 vmm_initialized = 0; 293 } 294 break; 295 default: 296 error = 0; 297 break; 298 } 299 return (error); 300 } 301 302 static moduledata_t vmm_kmod = { 303 "vmm", 304 vmm_handler, 305 NULL 306 }; 307 308 /* 309 * vmm initialization has the following dependencies: 310 * 311 * - iommu initialization must happen after the pci passthru driver has had 312 * a chance to attach to any passthru devices (after SI_SUB_CONFIGURE). 313 * 314 * - VT-x initialization requires smp_rendezvous() and therefore must happen 315 * after SMP is fully functional (after SI_SUB_SMP). 316 */ 317 DECLARE_MODULE(vmm, vmm_kmod, SI_SUB_SMP + 1, SI_ORDER_ANY); 318 MODULE_VERSION(vmm, 1); 319 320 int 321 vm_create(const char *name, struct vm **retvm) 322 { 323 int i; 324 struct vm *vm; 325 struct vmspace *vmspace; 326 327 const int BSP = 0; 328 329 /* 330 * If vmm.ko could not be successfully initialized then don't attempt 331 * to create the virtual machine. 332 */ 333 if (!vmm_initialized) 334 return (ENXIO); 335 336 if (name == NULL || strlen(name) >= VM_MAX_NAMELEN) 337 return (EINVAL); 338 339 vmspace = VMSPACE_ALLOC(VM_MIN_ADDRESS, VM_MAXUSER_ADDRESS); 340 if (vmspace == NULL) 341 return (ENOMEM); 342 343 vm = malloc(sizeof(struct vm), M_VM, M_WAITOK | M_ZERO); 344 strcpy(vm->name, name); 345 vm->vmspace = vmspace; 346 mtx_init(&vm->rendezvous_mtx, "vm rendezvous lock", 0, MTX_DEF); 347 vm->cookie = VMINIT(vm, vmspace_pmap(vmspace)); 348 vm->vioapic = vioapic_init(vm); 349 vm->vhpet = vhpet_init(vm); 350 vm->vatpic = vatpic_init(vm); 351 352 for (i = 0; i < VM_MAXCPU; i++) { 353 vcpu_init(vm, i); 354 guest_msrs_init(vm, i); 355 } 356 357 vm_activate_cpu(vm, BSP); 358 359 *retvm = vm; 360 return (0); 361 } 362 363 static void 364 vm_free_mem_seg(struct vm *vm, struct mem_seg *seg) 365 { 366 367 if (seg->object != NULL) 368 vmm_mem_free(vm->vmspace, seg->gpa, seg->len); 369 370 bzero(seg, sizeof(*seg)); 371 } 372 373 void 374 vm_destroy(struct vm *vm) 375 { 376 int i; 377 378 ppt_unassign_all(vm); 379 380 if (vm->iommu != NULL) 381 iommu_destroy_domain(vm->iommu); 382 383 vhpet_cleanup(vm->vhpet); 384 vatpic_cleanup(vm->vatpic); 385 vioapic_cleanup(vm->vioapic); 386 387 for (i = 0; i < vm->num_mem_segs; i++) 388 vm_free_mem_seg(vm, &vm->mem_segs[i]); 389 390 vm->num_mem_segs = 0; 391 392 for (i = 0; i < VM_MAXCPU; i++) 393 vcpu_cleanup(vm, i); 394 395 VMSPACE_FREE(vm->vmspace); 396 397 VMCLEANUP(vm->cookie); 398 399 free(vm, M_VM); 400 } 401 402 const char * 403 vm_name(struct vm *vm) 404 { 405 return (vm->name); 406 } 407 408 int 409 vm_map_mmio(struct vm *vm, vm_paddr_t gpa, size_t len, vm_paddr_t hpa) 410 { 411 vm_object_t obj; 412 413 if ((obj = vmm_mmio_alloc(vm->vmspace, gpa, len, hpa)) == NULL) 414 return (ENOMEM); 415 else 416 return (0); 417 } 418 419 int 420 vm_unmap_mmio(struct vm *vm, vm_paddr_t gpa, size_t len) 421 { 422 423 vmm_mmio_free(vm->vmspace, gpa, len); 424 return (0); 425 } 426 427 boolean_t 428 vm_mem_allocated(struct vm *vm, vm_paddr_t gpa) 429 { 430 int i; 431 vm_paddr_t gpabase, gpalimit; 432 433 for (i = 0; i < vm->num_mem_segs; i++) { 434 gpabase = vm->mem_segs[i].gpa; 435 gpalimit = gpabase + vm->mem_segs[i].len; 436 if (gpa >= gpabase && gpa < gpalimit) 437 return (TRUE); /* 'gpa' is regular memory */ 438 } 439 440 if (ppt_is_mmio(vm, gpa)) 441 return (TRUE); /* 'gpa' is pci passthru mmio */ 442 443 return (FALSE); 444 } 445 446 int 447 vm_malloc(struct vm *vm, vm_paddr_t gpa, size_t len) 448 { 449 int available, allocated; 450 struct mem_seg *seg; 451 vm_object_t object; 452 vm_paddr_t g; 453 454 if ((gpa & PAGE_MASK) || (len & PAGE_MASK) || len == 0) 455 return (EINVAL); 456 457 available = allocated = 0; 458 g = gpa; 459 while (g < gpa + len) { 460 if (vm_mem_allocated(vm, g)) 461 allocated++; 462 else 463 available++; 464 465 g += PAGE_SIZE; 466 } 467 468 /* 469 * If there are some allocated and some available pages in the address 470 * range then it is an error. 471 */ 472 if (allocated && available) 473 return (EINVAL); 474 475 /* 476 * If the entire address range being requested has already been 477 * allocated then there isn't anything more to do. 478 */ 479 if (allocated && available == 0) 480 return (0); 481 482 if (vm->num_mem_segs >= VM_MAX_MEMORY_SEGMENTS) 483 return (E2BIG); 484 485 seg = &vm->mem_segs[vm->num_mem_segs]; 486 487 if ((object = vmm_mem_alloc(vm->vmspace, gpa, len)) == NULL) 488 return (ENOMEM); 489 490 seg->gpa = gpa; 491 seg->len = len; 492 seg->object = object; 493 seg->wired = FALSE; 494 495 vm->num_mem_segs++; 496 497 return (0); 498 } 499 500 static void 501 vm_gpa_unwire(struct vm *vm) 502 { 503 int i, rv; 504 struct mem_seg *seg; 505 506 for (i = 0; i < vm->num_mem_segs; i++) { 507 seg = &vm->mem_segs[i]; 508 if (!seg->wired) 509 continue; 510 511 rv = vm_map_unwire(&vm->vmspace->vm_map, 512 seg->gpa, seg->gpa + seg->len, 513 VM_MAP_WIRE_USER | VM_MAP_WIRE_NOHOLES); 514 KASSERT(rv == KERN_SUCCESS, ("vm(%s) memory segment " 515 "%#lx/%ld could not be unwired: %d", 516 vm_name(vm), seg->gpa, seg->len, rv)); 517 518 seg->wired = FALSE; 519 } 520 } 521 522 static int 523 vm_gpa_wire(struct vm *vm) 524 { 525 int i, rv; 526 struct mem_seg *seg; 527 528 for (i = 0; i < vm->num_mem_segs; i++) { 529 seg = &vm->mem_segs[i]; 530 if (seg->wired) 531 continue; 532 533 /* XXX rlimits? */ 534 rv = vm_map_wire(&vm->vmspace->vm_map, 535 seg->gpa, seg->gpa + seg->len, 536 VM_MAP_WIRE_USER | VM_MAP_WIRE_NOHOLES); 537 if (rv != KERN_SUCCESS) 538 break; 539 540 seg->wired = TRUE; 541 } 542 543 if (i < vm->num_mem_segs) { 544 /* 545 * Undo the wiring before returning an error. 546 */ 547 vm_gpa_unwire(vm); 548 return (EAGAIN); 549 } 550 551 return (0); 552 } 553 554 static void 555 vm_iommu_modify(struct vm *vm, boolean_t map) 556 { 557 int i, sz; 558 vm_paddr_t gpa, hpa; 559 struct mem_seg *seg; 560 void *vp, *cookie, *host_domain; 561 562 sz = PAGE_SIZE; 563 host_domain = iommu_host_domain(); 564 565 for (i = 0; i < vm->num_mem_segs; i++) { 566 seg = &vm->mem_segs[i]; 567 KASSERT(seg->wired, ("vm(%s) memory segment %#lx/%ld not wired", 568 vm_name(vm), seg->gpa, seg->len)); 569 570 gpa = seg->gpa; 571 while (gpa < seg->gpa + seg->len) { 572 vp = vm_gpa_hold(vm, gpa, PAGE_SIZE, VM_PROT_WRITE, 573 &cookie); 574 KASSERT(vp != NULL, ("vm(%s) could not map gpa %#lx", 575 vm_name(vm), gpa)); 576 577 vm_gpa_release(cookie); 578 579 hpa = DMAP_TO_PHYS((uintptr_t)vp); 580 if (map) { 581 iommu_create_mapping(vm->iommu, gpa, hpa, sz); 582 iommu_remove_mapping(host_domain, hpa, sz); 583 } else { 584 iommu_remove_mapping(vm->iommu, gpa, sz); 585 iommu_create_mapping(host_domain, hpa, hpa, sz); 586 } 587 588 gpa += PAGE_SIZE; 589 } 590 } 591 592 /* 593 * Invalidate the cached translations associated with the domain 594 * from which pages were removed. 595 */ 596 if (map) 597 iommu_invalidate_tlb(host_domain); 598 else 599 iommu_invalidate_tlb(vm->iommu); 600 } 601 602 #define vm_iommu_unmap(vm) vm_iommu_modify((vm), FALSE) 603 #define vm_iommu_map(vm) vm_iommu_modify((vm), TRUE) 604 605 int 606 vm_unassign_pptdev(struct vm *vm, int bus, int slot, int func) 607 { 608 int error; 609 610 error = ppt_unassign_device(vm, bus, slot, func); 611 if (error) 612 return (error); 613 614 if (ppt_assigned_devices(vm) == 0) { 615 vm_iommu_unmap(vm); 616 vm_gpa_unwire(vm); 617 } 618 return (0); 619 } 620 621 int 622 vm_assign_pptdev(struct vm *vm, int bus, int slot, int func) 623 { 624 int error; 625 vm_paddr_t maxaddr; 626 627 /* 628 * Virtual machines with pci passthru devices get special treatment: 629 * - the guest physical memory is wired 630 * - the iommu is programmed to do the 'gpa' to 'hpa' translation 631 * 632 * We need to do this before the first pci passthru device is attached. 633 */ 634 if (ppt_assigned_devices(vm) == 0) { 635 KASSERT(vm->iommu == NULL, 636 ("vm_assign_pptdev: iommu must be NULL")); 637 maxaddr = vmm_mem_maxaddr(); 638 vm->iommu = iommu_create_domain(maxaddr); 639 640 error = vm_gpa_wire(vm); 641 if (error) 642 return (error); 643 644 vm_iommu_map(vm); 645 } 646 647 error = ppt_assign_device(vm, bus, slot, func); 648 return (error); 649 } 650 651 void * 652 vm_gpa_hold(struct vm *vm, vm_paddr_t gpa, size_t len, int reqprot, 653 void **cookie) 654 { 655 int count, pageoff; 656 vm_page_t m; 657 658 pageoff = gpa & PAGE_MASK; 659 if (len > PAGE_SIZE - pageoff) 660 panic("vm_gpa_hold: invalid gpa/len: 0x%016lx/%lu", gpa, len); 661 662 count = vm_fault_quick_hold_pages(&vm->vmspace->vm_map, 663 trunc_page(gpa), PAGE_SIZE, reqprot, &m, 1); 664 665 if (count == 1) { 666 *cookie = m; 667 return ((void *)(PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)) + pageoff)); 668 } else { 669 *cookie = NULL; 670 return (NULL); 671 } 672 } 673 674 void 675 vm_gpa_release(void *cookie) 676 { 677 vm_page_t m = cookie; 678 679 vm_page_lock(m); 680 vm_page_unhold(m); 681 vm_page_unlock(m); 682 } 683 684 int 685 vm_gpabase2memseg(struct vm *vm, vm_paddr_t gpabase, 686 struct vm_memory_segment *seg) 687 { 688 int i; 689 690 for (i = 0; i < vm->num_mem_segs; i++) { 691 if (gpabase == vm->mem_segs[i].gpa) { 692 seg->gpa = vm->mem_segs[i].gpa; 693 seg->len = vm->mem_segs[i].len; 694 seg->wired = vm->mem_segs[i].wired; 695 return (0); 696 } 697 } 698 return (-1); 699 } 700 701 int 702 vm_get_memobj(struct vm *vm, vm_paddr_t gpa, size_t len, 703 vm_offset_t *offset, struct vm_object **object) 704 { 705 int i; 706 size_t seg_len; 707 vm_paddr_t seg_gpa; 708 vm_object_t seg_obj; 709 710 for (i = 0; i < vm->num_mem_segs; i++) { 711 if ((seg_obj = vm->mem_segs[i].object) == NULL) 712 continue; 713 714 seg_gpa = vm->mem_segs[i].gpa; 715 seg_len = vm->mem_segs[i].len; 716 717 if (gpa >= seg_gpa && gpa < seg_gpa + seg_len) { 718 *offset = gpa - seg_gpa; 719 *object = seg_obj; 720 vm_object_reference(seg_obj); 721 return (0); 722 } 723 } 724 725 return (EINVAL); 726 } 727 728 int 729 vm_get_register(struct vm *vm, int vcpu, int reg, uint64_t *retval) 730 { 731 732 if (vcpu < 0 || vcpu >= VM_MAXCPU) 733 return (EINVAL); 734 735 if (reg >= VM_REG_LAST) 736 return (EINVAL); 737 738 return (VMGETREG(vm->cookie, vcpu, reg, retval)); 739 } 740 741 int 742 vm_set_register(struct vm *vm, int vcpu, int reg, uint64_t val) 743 { 744 745 if (vcpu < 0 || vcpu >= VM_MAXCPU) 746 return (EINVAL); 747 748 if (reg >= VM_REG_LAST) 749 return (EINVAL); 750 751 return (VMSETREG(vm->cookie, vcpu, reg, val)); 752 } 753 754 static boolean_t 755 is_descriptor_table(int reg) 756 { 757 758 switch (reg) { 759 case VM_REG_GUEST_IDTR: 760 case VM_REG_GUEST_GDTR: 761 return (TRUE); 762 default: 763 return (FALSE); 764 } 765 } 766 767 static boolean_t 768 is_segment_register(int reg) 769 { 770 771 switch (reg) { 772 case VM_REG_GUEST_ES: 773 case VM_REG_GUEST_CS: 774 case VM_REG_GUEST_SS: 775 case VM_REG_GUEST_DS: 776 case VM_REG_GUEST_FS: 777 case VM_REG_GUEST_GS: 778 case VM_REG_GUEST_TR: 779 case VM_REG_GUEST_LDTR: 780 return (TRUE); 781 default: 782 return (FALSE); 783 } 784 } 785 786 int 787 vm_get_seg_desc(struct vm *vm, int vcpu, int reg, 788 struct seg_desc *desc) 789 { 790 791 if (vcpu < 0 || vcpu >= VM_MAXCPU) 792 return (EINVAL); 793 794 if (!is_segment_register(reg) && !is_descriptor_table(reg)) 795 return (EINVAL); 796 797 return (VMGETDESC(vm->cookie, vcpu, reg, desc)); 798 } 799 800 int 801 vm_set_seg_desc(struct vm *vm, int vcpu, int reg, 802 struct seg_desc *desc) 803 { 804 if (vcpu < 0 || vcpu >= VM_MAXCPU) 805 return (EINVAL); 806 807 if (!is_segment_register(reg) && !is_descriptor_table(reg)) 808 return (EINVAL); 809 810 return (VMSETDESC(vm->cookie, vcpu, reg, desc)); 811 } 812 813 static void 814 restore_guest_fpustate(struct vcpu *vcpu) 815 { 816 817 /* flush host state to the pcb */ 818 fpuexit(curthread); 819 820 /* restore guest FPU state */ 821 fpu_stop_emulating(); 822 fpurestore(vcpu->guestfpu); 823 824 /* restore guest XCR0 if XSAVE is enabled in the host */ 825 if (rcr4() & CR4_XSAVE) 826 load_xcr(0, vcpu->guest_xcr0); 827 828 /* 829 * The FPU is now "dirty" with the guest's state so turn on emulation 830 * to trap any access to the FPU by the host. 831 */ 832 fpu_start_emulating(); 833 } 834 835 static void 836 save_guest_fpustate(struct vcpu *vcpu) 837 { 838 839 if ((rcr0() & CR0_TS) == 0) 840 panic("fpu emulation not enabled in host!"); 841 842 /* save guest XCR0 and restore host XCR0 */ 843 if (rcr4() & CR4_XSAVE) { 844 vcpu->guest_xcr0 = rxcr(0); 845 load_xcr(0, vmm_get_host_xcr0()); 846 } 847 848 /* save guest FPU state */ 849 fpu_stop_emulating(); 850 fpusave(vcpu->guestfpu); 851 fpu_start_emulating(); 852 } 853 854 static VMM_STAT(VCPU_IDLE_TICKS, "number of ticks vcpu was idle"); 855 856 static int 857 vcpu_set_state_locked(struct vcpu *vcpu, enum vcpu_state newstate, 858 bool from_idle) 859 { 860 int error; 861 862 vcpu_assert_locked(vcpu); 863 864 /* 865 * State transitions from the vmmdev_ioctl() must always begin from 866 * the VCPU_IDLE state. This guarantees that there is only a single 867 * ioctl() operating on a vcpu at any point. 868 */ 869 if (from_idle) { 870 while (vcpu->state != VCPU_IDLE) 871 msleep_spin(&vcpu->state, &vcpu->mtx, "vmstat", hz); 872 } else { 873 KASSERT(vcpu->state != VCPU_IDLE, ("invalid transition from " 874 "vcpu idle state")); 875 } 876 877 if (vcpu->state == VCPU_RUNNING) { 878 KASSERT(vcpu->hostcpu == curcpu, ("curcpu %d and hostcpu %d " 879 "mismatch for running vcpu", curcpu, vcpu->hostcpu)); 880 } else { 881 KASSERT(vcpu->hostcpu == NOCPU, ("Invalid hostcpu %d for a " 882 "vcpu that is not running", vcpu->hostcpu)); 883 } 884 885 /* 886 * The following state transitions are allowed: 887 * IDLE -> FROZEN -> IDLE 888 * FROZEN -> RUNNING -> FROZEN 889 * FROZEN -> SLEEPING -> FROZEN 890 */ 891 switch (vcpu->state) { 892 case VCPU_IDLE: 893 case VCPU_RUNNING: 894 case VCPU_SLEEPING: 895 error = (newstate != VCPU_FROZEN); 896 break; 897 case VCPU_FROZEN: 898 error = (newstate == VCPU_FROZEN); 899 break; 900 default: 901 error = 1; 902 break; 903 } 904 905 if (error) 906 return (EBUSY); 907 908 vcpu->state = newstate; 909 if (newstate == VCPU_RUNNING) 910 vcpu->hostcpu = curcpu; 911 else 912 vcpu->hostcpu = NOCPU; 913 914 if (newstate == VCPU_IDLE) 915 wakeup(&vcpu->state); 916 917 return (0); 918 } 919 920 static void 921 vcpu_require_state(struct vm *vm, int vcpuid, enum vcpu_state newstate) 922 { 923 int error; 924 925 if ((error = vcpu_set_state(vm, vcpuid, newstate, false)) != 0) 926 panic("Error %d setting state to %d\n", error, newstate); 927 } 928 929 static void 930 vcpu_require_state_locked(struct vcpu *vcpu, enum vcpu_state newstate) 931 { 932 int error; 933 934 if ((error = vcpu_set_state_locked(vcpu, newstate, false)) != 0) 935 panic("Error %d setting state to %d", error, newstate); 936 } 937 938 static void 939 vm_set_rendezvous_func(struct vm *vm, vm_rendezvous_func_t func) 940 { 941 942 KASSERT(mtx_owned(&vm->rendezvous_mtx), ("rendezvous_mtx not locked")); 943 944 /* 945 * Update 'rendezvous_func' and execute a write memory barrier to 946 * ensure that it is visible across all host cpus. This is not needed 947 * for correctness but it does ensure that all the vcpus will notice 948 * that the rendezvous is requested immediately. 949 */ 950 vm->rendezvous_func = func; 951 wmb(); 952 } 953 954 #define RENDEZVOUS_CTR0(vm, vcpuid, fmt) \ 955 do { \ 956 if (vcpuid >= 0) \ 957 VCPU_CTR0(vm, vcpuid, fmt); \ 958 else \ 959 VM_CTR0(vm, fmt); \ 960 } while (0) 961 962 static void 963 vm_handle_rendezvous(struct vm *vm, int vcpuid) 964 { 965 966 KASSERT(vcpuid == -1 || (vcpuid >= 0 && vcpuid < VM_MAXCPU), 967 ("vm_handle_rendezvous: invalid vcpuid %d", vcpuid)); 968 969 mtx_lock(&vm->rendezvous_mtx); 970 while (vm->rendezvous_func != NULL) { 971 if (vcpuid != -1 && 972 CPU_ISSET(vcpuid, &vm->rendezvous_req_cpus)) { 973 VCPU_CTR0(vm, vcpuid, "Calling rendezvous func"); 974 (*vm->rendezvous_func)(vm, vcpuid, vm->rendezvous_arg); 975 CPU_SET(vcpuid, &vm->rendezvous_done_cpus); 976 } 977 if (CPU_CMP(&vm->rendezvous_req_cpus, 978 &vm->rendezvous_done_cpus) == 0) { 979 VCPU_CTR0(vm, vcpuid, "Rendezvous completed"); 980 vm_set_rendezvous_func(vm, NULL); 981 wakeup(&vm->rendezvous_func); 982 break; 983 } 984 RENDEZVOUS_CTR0(vm, vcpuid, "Wait for rendezvous completion"); 985 mtx_sleep(&vm->rendezvous_func, &vm->rendezvous_mtx, 0, 986 "vmrndv", 0); 987 } 988 mtx_unlock(&vm->rendezvous_mtx); 989 } 990 991 /* 992 * Emulate a guest 'hlt' by sleeping until the vcpu is ready to run. 993 */ 994 static int 995 vm_handle_hlt(struct vm *vm, int vcpuid, bool intr_disabled, bool *retu) 996 { 997 struct vm_exit *vmexit; 998 struct vcpu *vcpu; 999 int t, timo; 1000 1001 vcpu = &vm->vcpu[vcpuid]; 1002 1003 vcpu_lock(vcpu); 1004 1005 /* 1006 * Do a final check for pending NMI or interrupts before 1007 * really putting this thread to sleep. 1008 * 1009 * These interrupts could have happened any time after we 1010 * returned from VMRUN() and before we grabbed the vcpu lock. 1011 */ 1012 if (!vm_nmi_pending(vm, vcpuid) && 1013 (intr_disabled || !vlapic_pending_intr(vcpu->vlapic, NULL))) { 1014 t = ticks; 1015 vcpu_require_state_locked(vcpu, VCPU_SLEEPING); 1016 if (vlapic_enabled(vcpu->vlapic)) { 1017 /* 1018 * XXX msleep_spin() is not interruptible so use the 1019 * 'timo' to put an upper bound on the sleep time. 1020 */ 1021 timo = hz; 1022 msleep_spin(vcpu, &vcpu->mtx, "vmidle", timo); 1023 } else { 1024 /* 1025 * Spindown the vcpu if the apic is disabled and it 1026 * had entered the halted state. 1027 */ 1028 *retu = true; 1029 vmexit = vm_exitinfo(vm, vcpuid); 1030 vmexit->exitcode = VM_EXITCODE_SPINDOWN_CPU; 1031 vm_deactivate_cpu(vm, vcpuid); 1032 VCPU_CTR0(vm, vcpuid, "spinning down cpu"); 1033 } 1034 vcpu_require_state_locked(vcpu, VCPU_FROZEN); 1035 vmm_stat_incr(vm, vcpuid, VCPU_IDLE_TICKS, ticks - t); 1036 } 1037 vcpu_unlock(vcpu); 1038 1039 return (0); 1040 } 1041 1042 static int 1043 vm_handle_paging(struct vm *vm, int vcpuid, bool *retu) 1044 { 1045 int rv, ftype; 1046 struct vm_map *map; 1047 struct vcpu *vcpu; 1048 struct vm_exit *vme; 1049 1050 vcpu = &vm->vcpu[vcpuid]; 1051 vme = &vcpu->exitinfo; 1052 1053 ftype = vme->u.paging.fault_type; 1054 KASSERT(ftype == VM_PROT_READ || 1055 ftype == VM_PROT_WRITE || ftype == VM_PROT_EXECUTE, 1056 ("vm_handle_paging: invalid fault_type %d", ftype)); 1057 1058 if (ftype == VM_PROT_READ || ftype == VM_PROT_WRITE) { 1059 rv = pmap_emulate_accessed_dirty(vmspace_pmap(vm->vmspace), 1060 vme->u.paging.gpa, ftype); 1061 if (rv == 0) 1062 goto done; 1063 } 1064 1065 map = &vm->vmspace->vm_map; 1066 rv = vm_fault(map, vme->u.paging.gpa, ftype, VM_FAULT_NORMAL); 1067 1068 VCPU_CTR3(vm, vcpuid, "vm_handle_paging rv = %d, gpa = %#lx, " 1069 "ftype = %d", rv, vme->u.paging.gpa, ftype); 1070 1071 if (rv != KERN_SUCCESS) 1072 return (EFAULT); 1073 done: 1074 /* restart execution at the faulting instruction */ 1075 vme->inst_length = 0; 1076 1077 return (0); 1078 } 1079 1080 static int 1081 vm_handle_inst_emul(struct vm *vm, int vcpuid, bool *retu) 1082 { 1083 struct vie *vie; 1084 struct vcpu *vcpu; 1085 struct vm_exit *vme; 1086 int error, inst_length; 1087 uint64_t rip, gla, gpa, cr3; 1088 enum vie_cpu_mode cpu_mode; 1089 enum vie_paging_mode paging_mode; 1090 mem_region_read_t mread; 1091 mem_region_write_t mwrite; 1092 1093 vcpu = &vm->vcpu[vcpuid]; 1094 vme = &vcpu->exitinfo; 1095 1096 rip = vme->rip; 1097 inst_length = vme->inst_length; 1098 1099 gla = vme->u.inst_emul.gla; 1100 gpa = vme->u.inst_emul.gpa; 1101 cr3 = vme->u.inst_emul.cr3; 1102 cpu_mode = vme->u.inst_emul.cpu_mode; 1103 paging_mode = vme->u.inst_emul.paging_mode; 1104 vie = &vme->u.inst_emul.vie; 1105 1106 vie_init(vie); 1107 1108 /* Fetch, decode and emulate the faulting instruction */ 1109 if (vmm_fetch_instruction(vm, vcpuid, rip, inst_length, cr3, 1110 paging_mode, vie) != 0) 1111 return (EFAULT); 1112 1113 if (vmm_decode_instruction(vm, vcpuid, gla, cpu_mode, vie) != 0) 1114 return (EFAULT); 1115 1116 /* return to userland unless this is an in-kernel emulated device */ 1117 if (gpa >= DEFAULT_APIC_BASE && gpa < DEFAULT_APIC_BASE + PAGE_SIZE) { 1118 mread = lapic_mmio_read; 1119 mwrite = lapic_mmio_write; 1120 } else if (gpa >= VIOAPIC_BASE && gpa < VIOAPIC_BASE + VIOAPIC_SIZE) { 1121 mread = vioapic_mmio_read; 1122 mwrite = vioapic_mmio_write; 1123 } else if (gpa >= VHPET_BASE && gpa < VHPET_BASE + VHPET_SIZE) { 1124 mread = vhpet_mmio_read; 1125 mwrite = vhpet_mmio_write; 1126 } else { 1127 *retu = true; 1128 return (0); 1129 } 1130 1131 error = vmm_emulate_instruction(vm, vcpuid, gpa, vie, mread, mwrite, 1132 retu); 1133 1134 return (error); 1135 } 1136 1137 int 1138 vm_run(struct vm *vm, struct vm_run *vmrun) 1139 { 1140 int error, vcpuid; 1141 struct vcpu *vcpu; 1142 struct pcb *pcb; 1143 uint64_t tscval, rip; 1144 struct vm_exit *vme; 1145 bool retu, intr_disabled; 1146 pmap_t pmap; 1147 1148 vcpuid = vmrun->cpuid; 1149 1150 if (vcpuid < 0 || vcpuid >= VM_MAXCPU) 1151 return (EINVAL); 1152 1153 pmap = vmspace_pmap(vm->vmspace); 1154 vcpu = &vm->vcpu[vcpuid]; 1155 vme = &vcpu->exitinfo; 1156 rip = vmrun->rip; 1157 restart: 1158 critical_enter(); 1159 1160 KASSERT(!CPU_ISSET(curcpu, &pmap->pm_active), 1161 ("vm_run: absurd pm_active")); 1162 1163 tscval = rdtsc(); 1164 1165 pcb = PCPU_GET(curpcb); 1166 set_pcb_flags(pcb, PCB_FULL_IRET); 1167 1168 restore_guest_msrs(vm, vcpuid); 1169 restore_guest_fpustate(vcpu); 1170 1171 vcpu_require_state(vm, vcpuid, VCPU_RUNNING); 1172 error = VMRUN(vm->cookie, vcpuid, rip, pmap, &vm->rendezvous_func); 1173 vcpu_require_state(vm, vcpuid, VCPU_FROZEN); 1174 1175 save_guest_fpustate(vcpu); 1176 restore_host_msrs(vm, vcpuid); 1177 1178 vmm_stat_incr(vm, vcpuid, VCPU_TOTAL_RUNTIME, rdtsc() - tscval); 1179 1180 critical_exit(); 1181 1182 if (error == 0) { 1183 retu = false; 1184 switch (vme->exitcode) { 1185 case VM_EXITCODE_IOAPIC_EOI: 1186 vioapic_process_eoi(vm, vcpuid, 1187 vme->u.ioapic_eoi.vector); 1188 break; 1189 case VM_EXITCODE_RENDEZVOUS: 1190 vm_handle_rendezvous(vm, vcpuid); 1191 error = 0; 1192 break; 1193 case VM_EXITCODE_HLT: 1194 intr_disabled = ((vme->u.hlt.rflags & PSL_I) == 0); 1195 error = vm_handle_hlt(vm, vcpuid, intr_disabled, &retu); 1196 break; 1197 case VM_EXITCODE_PAGING: 1198 error = vm_handle_paging(vm, vcpuid, &retu); 1199 break; 1200 case VM_EXITCODE_INST_EMUL: 1201 error = vm_handle_inst_emul(vm, vcpuid, &retu); 1202 break; 1203 default: 1204 retu = true; /* handled in userland */ 1205 break; 1206 } 1207 } 1208 1209 if (error == 0 && retu == false) { 1210 rip = vme->rip + vme->inst_length; 1211 goto restart; 1212 } 1213 1214 /* copy the exit information */ 1215 bcopy(vme, &vmrun->vm_exit, sizeof(struct vm_exit)); 1216 return (error); 1217 } 1218 1219 int 1220 vm_inject_exception(struct vm *vm, int vcpuid, struct vm_exception *exception) 1221 { 1222 struct vcpu *vcpu; 1223 1224 if (vcpuid < 0 || vcpuid >= VM_MAXCPU) 1225 return (EINVAL); 1226 1227 if (exception->vector < 0 || exception->vector >= 32) 1228 return (EINVAL); 1229 1230 vcpu = &vm->vcpu[vcpuid]; 1231 1232 if (vcpu->exception_pending) { 1233 VCPU_CTR2(vm, vcpuid, "Unable to inject exception %d due to " 1234 "pending exception %d", exception->vector, 1235 vcpu->exception.vector); 1236 return (EBUSY); 1237 } 1238 1239 vcpu->exception_pending = 1; 1240 vcpu->exception = *exception; 1241 VCPU_CTR1(vm, vcpuid, "Exception %d pending", exception->vector); 1242 return (0); 1243 } 1244 1245 int 1246 vm_exception_pending(struct vm *vm, int vcpuid, struct vm_exception *exception) 1247 { 1248 struct vcpu *vcpu; 1249 int pending; 1250 1251 KASSERT(vcpuid >= 0 && vcpuid < VM_MAXCPU, ("invalid vcpu %d", vcpuid)); 1252 1253 vcpu = &vm->vcpu[vcpuid]; 1254 pending = vcpu->exception_pending; 1255 if (pending) { 1256 vcpu->exception_pending = 0; 1257 *exception = vcpu->exception; 1258 VCPU_CTR1(vm, vcpuid, "Exception %d delivered", 1259 exception->vector); 1260 } 1261 return (pending); 1262 } 1263 1264 static void 1265 vm_inject_fault(struct vm *vm, int vcpuid, struct vm_exception *exception) 1266 { 1267 struct vm_exit *vmexit; 1268 int error; 1269 1270 error = vm_inject_exception(vm, vcpuid, exception); 1271 KASSERT(error == 0, ("vm_inject_exception error %d", error)); 1272 1273 /* 1274 * A fault-like exception allows the instruction to be restarted 1275 * after the exception handler returns. 1276 * 1277 * By setting the inst_length to 0 we ensure that the instruction 1278 * pointer remains at the faulting instruction. 1279 */ 1280 vmexit = vm_exitinfo(vm, vcpuid); 1281 vmexit->inst_length = 0; 1282 } 1283 1284 void 1285 vm_inject_gp(struct vm *vm, int vcpuid) 1286 { 1287 struct vm_exception gpf = { 1288 .vector = IDT_GP, 1289 .error_code_valid = 1, 1290 .error_code = 0 1291 }; 1292 1293 vm_inject_fault(vm, vcpuid, &gpf); 1294 } 1295 1296 void 1297 vm_inject_ud(struct vm *vm, int vcpuid) 1298 { 1299 struct vm_exception udf = { 1300 .vector = IDT_UD, 1301 .error_code_valid = 0 1302 }; 1303 1304 vm_inject_fault(vm, vcpuid, &udf); 1305 } 1306 1307 static VMM_STAT(VCPU_NMI_COUNT, "number of NMIs delivered to vcpu"); 1308 1309 int 1310 vm_inject_nmi(struct vm *vm, int vcpuid) 1311 { 1312 struct vcpu *vcpu; 1313 1314 if (vcpuid < 0 || vcpuid >= VM_MAXCPU) 1315 return (EINVAL); 1316 1317 vcpu = &vm->vcpu[vcpuid]; 1318 1319 vcpu->nmi_pending = 1; 1320 vcpu_notify_event(vm, vcpuid, false); 1321 return (0); 1322 } 1323 1324 int 1325 vm_nmi_pending(struct vm *vm, int vcpuid) 1326 { 1327 struct vcpu *vcpu; 1328 1329 if (vcpuid < 0 || vcpuid >= VM_MAXCPU) 1330 panic("vm_nmi_pending: invalid vcpuid %d", vcpuid); 1331 1332 vcpu = &vm->vcpu[vcpuid]; 1333 1334 return (vcpu->nmi_pending); 1335 } 1336 1337 void 1338 vm_nmi_clear(struct vm *vm, int vcpuid) 1339 { 1340 struct vcpu *vcpu; 1341 1342 if (vcpuid < 0 || vcpuid >= VM_MAXCPU) 1343 panic("vm_nmi_pending: invalid vcpuid %d", vcpuid); 1344 1345 vcpu = &vm->vcpu[vcpuid]; 1346 1347 if (vcpu->nmi_pending == 0) 1348 panic("vm_nmi_clear: inconsistent nmi_pending state"); 1349 1350 vcpu->nmi_pending = 0; 1351 vmm_stat_incr(vm, vcpuid, VCPU_NMI_COUNT, 1); 1352 } 1353 1354 int 1355 vm_get_capability(struct vm *vm, int vcpu, int type, int *retval) 1356 { 1357 if (vcpu < 0 || vcpu >= VM_MAXCPU) 1358 return (EINVAL); 1359 1360 if (type < 0 || type >= VM_CAP_MAX) 1361 return (EINVAL); 1362 1363 return (VMGETCAP(vm->cookie, vcpu, type, retval)); 1364 } 1365 1366 int 1367 vm_set_capability(struct vm *vm, int vcpu, int type, int val) 1368 { 1369 if (vcpu < 0 || vcpu >= VM_MAXCPU) 1370 return (EINVAL); 1371 1372 if (type < 0 || type >= VM_CAP_MAX) 1373 return (EINVAL); 1374 1375 return (VMSETCAP(vm->cookie, vcpu, type, val)); 1376 } 1377 1378 uint64_t * 1379 vm_guest_msrs(struct vm *vm, int cpu) 1380 { 1381 return (vm->vcpu[cpu].guest_msrs); 1382 } 1383 1384 struct vlapic * 1385 vm_lapic(struct vm *vm, int cpu) 1386 { 1387 return (vm->vcpu[cpu].vlapic); 1388 } 1389 1390 struct vioapic * 1391 vm_ioapic(struct vm *vm) 1392 { 1393 1394 return (vm->vioapic); 1395 } 1396 1397 struct vhpet * 1398 vm_hpet(struct vm *vm) 1399 { 1400 1401 return (vm->vhpet); 1402 } 1403 1404 boolean_t 1405 vmm_is_pptdev(int bus, int slot, int func) 1406 { 1407 int found, i, n; 1408 int b, s, f; 1409 char *val, *cp, *cp2; 1410 1411 /* 1412 * XXX 1413 * The length of an environment variable is limited to 128 bytes which 1414 * puts an upper limit on the number of passthru devices that may be 1415 * specified using a single environment variable. 1416 * 1417 * Work around this by scanning multiple environment variable 1418 * names instead of a single one - yuck! 1419 */ 1420 const char *names[] = { "pptdevs", "pptdevs2", "pptdevs3", NULL }; 1421 1422 /* set pptdevs="1/2/3 4/5/6 7/8/9 10/11/12" */ 1423 found = 0; 1424 for (i = 0; names[i] != NULL && !found; i++) { 1425 cp = val = getenv(names[i]); 1426 while (cp != NULL && *cp != '\0') { 1427 if ((cp2 = strchr(cp, ' ')) != NULL) 1428 *cp2 = '\0'; 1429 1430 n = sscanf(cp, "%d/%d/%d", &b, &s, &f); 1431 if (n == 3 && bus == b && slot == s && func == f) { 1432 found = 1; 1433 break; 1434 } 1435 1436 if (cp2 != NULL) 1437 *cp2++ = ' '; 1438 1439 cp = cp2; 1440 } 1441 freeenv(val); 1442 } 1443 return (found); 1444 } 1445 1446 void * 1447 vm_iommu_domain(struct vm *vm) 1448 { 1449 1450 return (vm->iommu); 1451 } 1452 1453 int 1454 vcpu_set_state(struct vm *vm, int vcpuid, enum vcpu_state newstate, 1455 bool from_idle) 1456 { 1457 int error; 1458 struct vcpu *vcpu; 1459 1460 if (vcpuid < 0 || vcpuid >= VM_MAXCPU) 1461 panic("vm_set_run_state: invalid vcpuid %d", vcpuid); 1462 1463 vcpu = &vm->vcpu[vcpuid]; 1464 1465 vcpu_lock(vcpu); 1466 error = vcpu_set_state_locked(vcpu, newstate, from_idle); 1467 vcpu_unlock(vcpu); 1468 1469 return (error); 1470 } 1471 1472 enum vcpu_state 1473 vcpu_get_state(struct vm *vm, int vcpuid, int *hostcpu) 1474 { 1475 struct vcpu *vcpu; 1476 enum vcpu_state state; 1477 1478 if (vcpuid < 0 || vcpuid >= VM_MAXCPU) 1479 panic("vm_get_run_state: invalid vcpuid %d", vcpuid); 1480 1481 vcpu = &vm->vcpu[vcpuid]; 1482 1483 vcpu_lock(vcpu); 1484 state = vcpu->state; 1485 if (hostcpu != NULL) 1486 *hostcpu = vcpu->hostcpu; 1487 vcpu_unlock(vcpu); 1488 1489 return (state); 1490 } 1491 1492 void 1493 vm_activate_cpu(struct vm *vm, int vcpuid) 1494 { 1495 1496 if (vcpuid >= 0 && vcpuid < VM_MAXCPU) 1497 CPU_SET(vcpuid, &vm->active_cpus); 1498 } 1499 1500 static void 1501 vm_deactivate_cpu(struct vm *vm, int vcpuid) 1502 { 1503 1504 if (vcpuid >= 0 && vcpuid < VM_MAXCPU) 1505 CPU_CLR(vcpuid, &vm->active_cpus); 1506 } 1507 1508 cpuset_t 1509 vm_active_cpus(struct vm *vm) 1510 { 1511 1512 return (vm->active_cpus); 1513 } 1514 1515 void * 1516 vcpu_stats(struct vm *vm, int vcpuid) 1517 { 1518 1519 return (vm->vcpu[vcpuid].stats); 1520 } 1521 1522 int 1523 vm_get_x2apic_state(struct vm *vm, int vcpuid, enum x2apic_state *state) 1524 { 1525 if (vcpuid < 0 || vcpuid >= VM_MAXCPU) 1526 return (EINVAL); 1527 1528 *state = vm->vcpu[vcpuid].x2apic_state; 1529 1530 return (0); 1531 } 1532 1533 int 1534 vm_set_x2apic_state(struct vm *vm, int vcpuid, enum x2apic_state state) 1535 { 1536 if (vcpuid < 0 || vcpuid >= VM_MAXCPU) 1537 return (EINVAL); 1538 1539 if (state >= X2APIC_STATE_LAST) 1540 return (EINVAL); 1541 1542 vm->vcpu[vcpuid].x2apic_state = state; 1543 1544 vlapic_set_x2apic_state(vm, vcpuid, state); 1545 1546 return (0); 1547 } 1548 1549 /* 1550 * This function is called to ensure that a vcpu "sees" a pending event 1551 * as soon as possible: 1552 * - If the vcpu thread is sleeping then it is woken up. 1553 * - If the vcpu is running on a different host_cpu then an IPI will be directed 1554 * to the host_cpu to cause the vcpu to trap into the hypervisor. 1555 */ 1556 void 1557 vcpu_notify_event(struct vm *vm, int vcpuid, bool lapic_intr) 1558 { 1559 int hostcpu; 1560 struct vcpu *vcpu; 1561 1562 vcpu = &vm->vcpu[vcpuid]; 1563 1564 vcpu_lock(vcpu); 1565 hostcpu = vcpu->hostcpu; 1566 if (vcpu->state == VCPU_RUNNING) { 1567 KASSERT(hostcpu != NOCPU, ("vcpu running on invalid hostcpu")); 1568 if (hostcpu != curcpu) { 1569 if (lapic_intr) { 1570 vlapic_post_intr(vcpu->vlapic, hostcpu, 1571 vmm_ipinum); 1572 } else { 1573 ipi_cpu(hostcpu, vmm_ipinum); 1574 } 1575 } else { 1576 /* 1577 * If the 'vcpu' is running on 'curcpu' then it must 1578 * be sending a notification to itself (e.g. SELF_IPI). 1579 * The pending event will be picked up when the vcpu 1580 * transitions back to guest context. 1581 */ 1582 } 1583 } else { 1584 KASSERT(hostcpu == NOCPU, ("vcpu state %d not consistent " 1585 "with hostcpu %d", vcpu->state, hostcpu)); 1586 if (vcpu->state == VCPU_SLEEPING) 1587 wakeup_one(vcpu); 1588 } 1589 vcpu_unlock(vcpu); 1590 } 1591 1592 struct vmspace * 1593 vm_get_vmspace(struct vm *vm) 1594 { 1595 1596 return (vm->vmspace); 1597 } 1598 1599 int 1600 vm_apicid2vcpuid(struct vm *vm, int apicid) 1601 { 1602 /* 1603 * XXX apic id is assumed to be numerically identical to vcpu id 1604 */ 1605 return (apicid); 1606 } 1607 1608 void 1609 vm_smp_rendezvous(struct vm *vm, int vcpuid, cpuset_t dest, 1610 vm_rendezvous_func_t func, void *arg) 1611 { 1612 /* 1613 * Enforce that this function is called without any locks 1614 */ 1615 WITNESS_WARN(WARN_PANIC, NULL, "vm_smp_rendezvous"); 1616 KASSERT(vcpuid == -1 || (vcpuid >= 0 && vcpuid < VM_MAXCPU), 1617 ("vm_smp_rendezvous: invalid vcpuid %d", vcpuid)); 1618 1619 restart: 1620 mtx_lock(&vm->rendezvous_mtx); 1621 if (vm->rendezvous_func != NULL) { 1622 /* 1623 * If a rendezvous is already in progress then we need to 1624 * call the rendezvous handler in case this 'vcpuid' is one 1625 * of the targets of the rendezvous. 1626 */ 1627 RENDEZVOUS_CTR0(vm, vcpuid, "Rendezvous already in progress"); 1628 mtx_unlock(&vm->rendezvous_mtx); 1629 vm_handle_rendezvous(vm, vcpuid); 1630 goto restart; 1631 } 1632 KASSERT(vm->rendezvous_func == NULL, ("vm_smp_rendezvous: previous " 1633 "rendezvous is still in progress")); 1634 1635 RENDEZVOUS_CTR0(vm, vcpuid, "Initiating rendezvous"); 1636 vm->rendezvous_req_cpus = dest; 1637 CPU_ZERO(&vm->rendezvous_done_cpus); 1638 vm->rendezvous_arg = arg; 1639 vm_set_rendezvous_func(vm, func); 1640 mtx_unlock(&vm->rendezvous_mtx); 1641 1642 vm_handle_rendezvous(vm, vcpuid); 1643 } 1644 1645 struct vatpic * 1646 vm_atpic(struct vm *vm) 1647 { 1648 return (vm->vatpic); 1649 } 1650