1 /*- 2 * Copyright (c) 2011 NetApp, Inc. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 * $FreeBSD$ 27 */ 28 29 #include <sys/cdefs.h> 30 __FBSDID("$FreeBSD$"); 31 32 #include <sys/param.h> 33 #include <sys/systm.h> 34 #include <sys/kernel.h> 35 #include <sys/module.h> 36 #include <sys/sysctl.h> 37 #include <sys/malloc.h> 38 #include <sys/pcpu.h> 39 #include <sys/lock.h> 40 #include <sys/mutex.h> 41 #include <sys/proc.h> 42 #include <sys/rwlock.h> 43 #include <sys/sched.h> 44 #include <sys/smp.h> 45 #include <sys/systm.h> 46 47 #include <vm/vm.h> 48 #include <vm/vm_object.h> 49 #include <vm/vm_page.h> 50 #include <vm/pmap.h> 51 #include <vm/vm_map.h> 52 #include <vm/vm_extern.h> 53 #include <vm/vm_param.h> 54 55 #include <machine/cpu.h> 56 #include <machine/vm.h> 57 #include <machine/pcb.h> 58 #include <machine/smp.h> 59 #include <x86/psl.h> 60 #include <x86/apicreg.h> 61 #include <machine/vmparam.h> 62 63 #include <machine/vmm.h> 64 #include <machine/vmm_dev.h> 65 66 #include "vmm_ktr.h" 67 #include "vmm_host.h" 68 #include "vmm_mem.h" 69 #include "vmm_util.h" 70 #include "vhpet.h" 71 #include "vioapic.h" 72 #include "vlapic.h" 73 #include "vmm_msr.h" 74 #include "vmm_ipi.h" 75 #include "vmm_stat.h" 76 #include "vmm_lapic.h" 77 78 #include "io/ppt.h" 79 #include "io/iommu.h" 80 81 struct vlapic; 82 83 struct vcpu { 84 int flags; 85 enum vcpu_state state; 86 struct mtx mtx; 87 int hostcpu; /* host cpuid this vcpu last ran on */ 88 uint64_t guest_msrs[VMM_MSR_NUM]; 89 struct vlapic *vlapic; 90 int vcpuid; 91 struct savefpu *guestfpu; /* guest fpu state */ 92 void *stats; 93 struct vm_exit exitinfo; 94 enum x2apic_state x2apic_state; 95 int nmi_pending; 96 }; 97 98 #define vcpu_lock_init(v) mtx_init(&((v)->mtx), "vcpu lock", 0, MTX_SPIN) 99 #define vcpu_lock(v) mtx_lock_spin(&((v)->mtx)) 100 #define vcpu_unlock(v) mtx_unlock_spin(&((v)->mtx)) 101 #define vcpu_assert_locked(v) mtx_assert(&((v)->mtx), MA_OWNED) 102 103 struct mem_seg { 104 vm_paddr_t gpa; 105 size_t len; 106 boolean_t wired; 107 vm_object_t object; 108 }; 109 #define VM_MAX_MEMORY_SEGMENTS 2 110 111 struct vm { 112 void *cookie; /* processor-specific data */ 113 void *iommu; /* iommu-specific data */ 114 struct vhpet *vhpet; /* virtual HPET */ 115 struct vioapic *vioapic; /* virtual ioapic */ 116 struct vmspace *vmspace; /* guest's address space */ 117 struct vcpu vcpu[VM_MAXCPU]; 118 int num_mem_segs; 119 struct mem_seg mem_segs[VM_MAX_MEMORY_SEGMENTS]; 120 char name[VM_MAX_NAMELEN]; 121 122 /* 123 * Set of active vcpus. 124 * An active vcpu is one that has been started implicitly (BSP) or 125 * explicitly (AP) by sending it a startup ipi. 126 */ 127 cpuset_t active_cpus; 128 }; 129 130 static int vmm_initialized; 131 132 static struct vmm_ops *ops; 133 #define VMM_INIT(num) (ops != NULL ? (*ops->init)(num) : 0) 134 #define VMM_CLEANUP() (ops != NULL ? (*ops->cleanup)() : 0) 135 #define VMM_RESUME() (ops != NULL ? (*ops->resume)() : 0) 136 137 #define VMINIT(vm, pmap) (ops != NULL ? (*ops->vminit)(vm, pmap): NULL) 138 #define VMRUN(vmi, vcpu, rip, pmap) \ 139 (ops != NULL ? (*ops->vmrun)(vmi, vcpu, rip, pmap) : ENXIO) 140 #define VMCLEANUP(vmi) (ops != NULL ? (*ops->vmcleanup)(vmi) : NULL) 141 #define VMSPACE_ALLOC(min, max) \ 142 (ops != NULL ? (*ops->vmspace_alloc)(min, max) : NULL) 143 #define VMSPACE_FREE(vmspace) \ 144 (ops != NULL ? (*ops->vmspace_free)(vmspace) : ENXIO) 145 #define VMGETREG(vmi, vcpu, num, retval) \ 146 (ops != NULL ? (*ops->vmgetreg)(vmi, vcpu, num, retval) : ENXIO) 147 #define VMSETREG(vmi, vcpu, num, val) \ 148 (ops != NULL ? (*ops->vmsetreg)(vmi, vcpu, num, val) : ENXIO) 149 #define VMGETDESC(vmi, vcpu, num, desc) \ 150 (ops != NULL ? (*ops->vmgetdesc)(vmi, vcpu, num, desc) : ENXIO) 151 #define VMSETDESC(vmi, vcpu, num, desc) \ 152 (ops != NULL ? (*ops->vmsetdesc)(vmi, vcpu, num, desc) : ENXIO) 153 #define VMINJECT(vmi, vcpu, type, vec, ec, ecv) \ 154 (ops != NULL ? (*ops->vminject)(vmi, vcpu, type, vec, ec, ecv) : ENXIO) 155 #define VMGETCAP(vmi, vcpu, num, retval) \ 156 (ops != NULL ? (*ops->vmgetcap)(vmi, vcpu, num, retval) : ENXIO) 157 #define VMSETCAP(vmi, vcpu, num, val) \ 158 (ops != NULL ? (*ops->vmsetcap)(vmi, vcpu, num, val) : ENXIO) 159 #define VLAPIC_INIT(vmi, vcpu) \ 160 (ops != NULL ? (*ops->vlapic_init)(vmi, vcpu) : NULL) 161 #define VLAPIC_CLEANUP(vmi, vlapic) \ 162 (ops != NULL ? (*ops->vlapic_cleanup)(vmi, vlapic) : NULL) 163 164 #define fpu_start_emulating() load_cr0(rcr0() | CR0_TS) 165 #define fpu_stop_emulating() clts() 166 167 static MALLOC_DEFINE(M_VM, "vm", "vm"); 168 CTASSERT(VMM_MSR_NUM <= 64); /* msr_mask can keep track of up to 64 msrs */ 169 170 /* statistics */ 171 static VMM_STAT(VCPU_TOTAL_RUNTIME, "vcpu total runtime"); 172 173 SYSCTL_NODE(_hw, OID_AUTO, vmm, CTLFLAG_RW, NULL, NULL); 174 175 static int vmm_ipinum; 176 SYSCTL_INT(_hw_vmm, OID_AUTO, ipinum, CTLFLAG_RD, &vmm_ipinum, 0, 177 "IPI vector used for vcpu notifications"); 178 179 static void 180 vcpu_cleanup(struct vm *vm, int i) 181 { 182 struct vcpu *vcpu = &vm->vcpu[i]; 183 184 VLAPIC_CLEANUP(vm->cookie, vcpu->vlapic); 185 vmm_stat_free(vcpu->stats); 186 fpu_save_area_free(vcpu->guestfpu); 187 } 188 189 static void 190 vcpu_init(struct vm *vm, uint32_t vcpu_id) 191 { 192 struct vcpu *vcpu; 193 194 vcpu = &vm->vcpu[vcpu_id]; 195 196 vcpu_lock_init(vcpu); 197 vcpu->hostcpu = NOCPU; 198 vcpu->vcpuid = vcpu_id; 199 vcpu->vlapic = VLAPIC_INIT(vm->cookie, vcpu_id); 200 vm_set_x2apic_state(vm, vcpu_id, X2APIC_ENABLED); 201 vcpu->guestfpu = fpu_save_area_alloc(); 202 fpu_save_area_reset(vcpu->guestfpu); 203 vcpu->stats = vmm_stat_alloc(); 204 } 205 206 struct vm_exit * 207 vm_exitinfo(struct vm *vm, int cpuid) 208 { 209 struct vcpu *vcpu; 210 211 if (cpuid < 0 || cpuid >= VM_MAXCPU) 212 panic("vm_exitinfo: invalid cpuid %d", cpuid); 213 214 vcpu = &vm->vcpu[cpuid]; 215 216 return (&vcpu->exitinfo); 217 } 218 219 static void 220 vmm_resume(void) 221 { 222 VMM_RESUME(); 223 } 224 225 static int 226 vmm_init(void) 227 { 228 int error; 229 230 vmm_host_state_init(); 231 232 vmm_ipinum = vmm_ipi_alloc(); 233 if (vmm_ipinum == 0) 234 vmm_ipinum = IPI_AST; 235 236 error = vmm_mem_init(); 237 if (error) 238 return (error); 239 240 if (vmm_is_intel()) 241 ops = &vmm_ops_intel; 242 else if (vmm_is_amd()) 243 ops = &vmm_ops_amd; 244 else 245 return (ENXIO); 246 247 vmm_msr_init(); 248 vmm_resume_p = vmm_resume; 249 250 return (VMM_INIT(vmm_ipinum)); 251 } 252 253 static int 254 vmm_handler(module_t mod, int what, void *arg) 255 { 256 int error; 257 258 switch (what) { 259 case MOD_LOAD: 260 vmmdev_init(); 261 iommu_init(); 262 error = vmm_init(); 263 if (error == 0) 264 vmm_initialized = 1; 265 break; 266 case MOD_UNLOAD: 267 error = vmmdev_cleanup(); 268 if (error == 0) { 269 vmm_resume_p = NULL; 270 iommu_cleanup(); 271 if (vmm_ipinum != IPI_AST) 272 vmm_ipi_free(vmm_ipinum); 273 error = VMM_CLEANUP(); 274 /* 275 * Something bad happened - prevent new 276 * VMs from being created 277 */ 278 if (error) 279 vmm_initialized = 0; 280 } 281 break; 282 default: 283 error = 0; 284 break; 285 } 286 return (error); 287 } 288 289 static moduledata_t vmm_kmod = { 290 "vmm", 291 vmm_handler, 292 NULL 293 }; 294 295 /* 296 * vmm initialization has the following dependencies: 297 * 298 * - iommu initialization must happen after the pci passthru driver has had 299 * a chance to attach to any passthru devices (after SI_SUB_CONFIGURE). 300 * 301 * - VT-x initialization requires smp_rendezvous() and therefore must happen 302 * after SMP is fully functional (after SI_SUB_SMP). 303 */ 304 DECLARE_MODULE(vmm, vmm_kmod, SI_SUB_SMP + 1, SI_ORDER_ANY); 305 MODULE_VERSION(vmm, 1); 306 307 int 308 vm_create(const char *name, struct vm **retvm) 309 { 310 int i; 311 struct vm *vm; 312 struct vmspace *vmspace; 313 314 const int BSP = 0; 315 316 /* 317 * If vmm.ko could not be successfully initialized then don't attempt 318 * to create the virtual machine. 319 */ 320 if (!vmm_initialized) 321 return (ENXIO); 322 323 if (name == NULL || strlen(name) >= VM_MAX_NAMELEN) 324 return (EINVAL); 325 326 vmspace = VMSPACE_ALLOC(VM_MIN_ADDRESS, VM_MAXUSER_ADDRESS); 327 if (vmspace == NULL) 328 return (ENOMEM); 329 330 vm = malloc(sizeof(struct vm), M_VM, M_WAITOK | M_ZERO); 331 strcpy(vm->name, name); 332 vm->vmspace = vmspace; 333 vm->cookie = VMINIT(vm, vmspace_pmap(vmspace)); 334 vm->vioapic = vioapic_init(vm); 335 vm->vhpet = vhpet_init(vm); 336 337 for (i = 0; i < VM_MAXCPU; i++) { 338 vcpu_init(vm, i); 339 guest_msrs_init(vm, i); 340 } 341 342 vm_activate_cpu(vm, BSP); 343 344 *retvm = vm; 345 return (0); 346 } 347 348 static void 349 vm_free_mem_seg(struct vm *vm, struct mem_seg *seg) 350 { 351 352 if (seg->object != NULL) 353 vmm_mem_free(vm->vmspace, seg->gpa, seg->len); 354 355 bzero(seg, sizeof(*seg)); 356 } 357 358 void 359 vm_destroy(struct vm *vm) 360 { 361 int i; 362 363 ppt_unassign_all(vm); 364 365 if (vm->iommu != NULL) 366 iommu_destroy_domain(vm->iommu); 367 368 vhpet_cleanup(vm->vhpet); 369 vioapic_cleanup(vm->vioapic); 370 371 for (i = 0; i < vm->num_mem_segs; i++) 372 vm_free_mem_seg(vm, &vm->mem_segs[i]); 373 374 vm->num_mem_segs = 0; 375 376 for (i = 0; i < VM_MAXCPU; i++) 377 vcpu_cleanup(vm, i); 378 379 VMSPACE_FREE(vm->vmspace); 380 381 VMCLEANUP(vm->cookie); 382 383 free(vm, M_VM); 384 } 385 386 const char * 387 vm_name(struct vm *vm) 388 { 389 return (vm->name); 390 } 391 392 int 393 vm_map_mmio(struct vm *vm, vm_paddr_t gpa, size_t len, vm_paddr_t hpa) 394 { 395 vm_object_t obj; 396 397 if ((obj = vmm_mmio_alloc(vm->vmspace, gpa, len, hpa)) == NULL) 398 return (ENOMEM); 399 else 400 return (0); 401 } 402 403 int 404 vm_unmap_mmio(struct vm *vm, vm_paddr_t gpa, size_t len) 405 { 406 407 vmm_mmio_free(vm->vmspace, gpa, len); 408 return (0); 409 } 410 411 boolean_t 412 vm_mem_allocated(struct vm *vm, vm_paddr_t gpa) 413 { 414 int i; 415 vm_paddr_t gpabase, gpalimit; 416 417 for (i = 0; i < vm->num_mem_segs; i++) { 418 gpabase = vm->mem_segs[i].gpa; 419 gpalimit = gpabase + vm->mem_segs[i].len; 420 if (gpa >= gpabase && gpa < gpalimit) 421 return (TRUE); /* 'gpa' is regular memory */ 422 } 423 424 if (ppt_is_mmio(vm, gpa)) 425 return (TRUE); /* 'gpa' is pci passthru mmio */ 426 427 return (FALSE); 428 } 429 430 int 431 vm_malloc(struct vm *vm, vm_paddr_t gpa, size_t len) 432 { 433 int available, allocated; 434 struct mem_seg *seg; 435 vm_object_t object; 436 vm_paddr_t g; 437 438 if ((gpa & PAGE_MASK) || (len & PAGE_MASK) || len == 0) 439 return (EINVAL); 440 441 available = allocated = 0; 442 g = gpa; 443 while (g < gpa + len) { 444 if (vm_mem_allocated(vm, g)) 445 allocated++; 446 else 447 available++; 448 449 g += PAGE_SIZE; 450 } 451 452 /* 453 * If there are some allocated and some available pages in the address 454 * range then it is an error. 455 */ 456 if (allocated && available) 457 return (EINVAL); 458 459 /* 460 * If the entire address range being requested has already been 461 * allocated then there isn't anything more to do. 462 */ 463 if (allocated && available == 0) 464 return (0); 465 466 if (vm->num_mem_segs >= VM_MAX_MEMORY_SEGMENTS) 467 return (E2BIG); 468 469 seg = &vm->mem_segs[vm->num_mem_segs]; 470 471 if ((object = vmm_mem_alloc(vm->vmspace, gpa, len)) == NULL) 472 return (ENOMEM); 473 474 seg->gpa = gpa; 475 seg->len = len; 476 seg->object = object; 477 seg->wired = FALSE; 478 479 vm->num_mem_segs++; 480 481 return (0); 482 } 483 484 static void 485 vm_gpa_unwire(struct vm *vm) 486 { 487 int i, rv; 488 struct mem_seg *seg; 489 490 for (i = 0; i < vm->num_mem_segs; i++) { 491 seg = &vm->mem_segs[i]; 492 if (!seg->wired) 493 continue; 494 495 rv = vm_map_unwire(&vm->vmspace->vm_map, 496 seg->gpa, seg->gpa + seg->len, 497 VM_MAP_WIRE_USER | VM_MAP_WIRE_NOHOLES); 498 KASSERT(rv == KERN_SUCCESS, ("vm(%s) memory segment " 499 "%#lx/%ld could not be unwired: %d", 500 vm_name(vm), seg->gpa, seg->len, rv)); 501 502 seg->wired = FALSE; 503 } 504 } 505 506 static int 507 vm_gpa_wire(struct vm *vm) 508 { 509 int i, rv; 510 struct mem_seg *seg; 511 512 for (i = 0; i < vm->num_mem_segs; i++) { 513 seg = &vm->mem_segs[i]; 514 if (seg->wired) 515 continue; 516 517 /* XXX rlimits? */ 518 rv = vm_map_wire(&vm->vmspace->vm_map, 519 seg->gpa, seg->gpa + seg->len, 520 VM_MAP_WIRE_USER | VM_MAP_WIRE_NOHOLES); 521 if (rv != KERN_SUCCESS) 522 break; 523 524 seg->wired = TRUE; 525 } 526 527 if (i < vm->num_mem_segs) { 528 /* 529 * Undo the wiring before returning an error. 530 */ 531 vm_gpa_unwire(vm); 532 return (EAGAIN); 533 } 534 535 return (0); 536 } 537 538 static void 539 vm_iommu_modify(struct vm *vm, boolean_t map) 540 { 541 int i, sz; 542 vm_paddr_t gpa, hpa; 543 struct mem_seg *seg; 544 void *vp, *cookie, *host_domain; 545 546 sz = PAGE_SIZE; 547 host_domain = iommu_host_domain(); 548 549 for (i = 0; i < vm->num_mem_segs; i++) { 550 seg = &vm->mem_segs[i]; 551 KASSERT(seg->wired, ("vm(%s) memory segment %#lx/%ld not wired", 552 vm_name(vm), seg->gpa, seg->len)); 553 554 gpa = seg->gpa; 555 while (gpa < seg->gpa + seg->len) { 556 vp = vm_gpa_hold(vm, gpa, PAGE_SIZE, VM_PROT_WRITE, 557 &cookie); 558 KASSERT(vp != NULL, ("vm(%s) could not map gpa %#lx", 559 vm_name(vm), gpa)); 560 561 vm_gpa_release(cookie); 562 563 hpa = DMAP_TO_PHYS((uintptr_t)vp); 564 if (map) { 565 iommu_create_mapping(vm->iommu, gpa, hpa, sz); 566 iommu_remove_mapping(host_domain, hpa, sz); 567 } else { 568 iommu_remove_mapping(vm->iommu, gpa, sz); 569 iommu_create_mapping(host_domain, hpa, hpa, sz); 570 } 571 572 gpa += PAGE_SIZE; 573 } 574 } 575 576 /* 577 * Invalidate the cached translations associated with the domain 578 * from which pages were removed. 579 */ 580 if (map) 581 iommu_invalidate_tlb(host_domain); 582 else 583 iommu_invalidate_tlb(vm->iommu); 584 } 585 586 #define vm_iommu_unmap(vm) vm_iommu_modify((vm), FALSE) 587 #define vm_iommu_map(vm) vm_iommu_modify((vm), TRUE) 588 589 int 590 vm_unassign_pptdev(struct vm *vm, int bus, int slot, int func) 591 { 592 int error; 593 594 error = ppt_unassign_device(vm, bus, slot, func); 595 if (error) 596 return (error); 597 598 if (ppt_num_devices(vm) == 0) { 599 vm_iommu_unmap(vm); 600 vm_gpa_unwire(vm); 601 } 602 return (0); 603 } 604 605 int 606 vm_assign_pptdev(struct vm *vm, int bus, int slot, int func) 607 { 608 int error; 609 vm_paddr_t maxaddr; 610 611 /* 612 * Virtual machines with pci passthru devices get special treatment: 613 * - the guest physical memory is wired 614 * - the iommu is programmed to do the 'gpa' to 'hpa' translation 615 * 616 * We need to do this before the first pci passthru device is attached. 617 */ 618 if (ppt_num_devices(vm) == 0) { 619 KASSERT(vm->iommu == NULL, 620 ("vm_assign_pptdev: iommu must be NULL")); 621 maxaddr = vmm_mem_maxaddr(); 622 vm->iommu = iommu_create_domain(maxaddr); 623 624 error = vm_gpa_wire(vm); 625 if (error) 626 return (error); 627 628 vm_iommu_map(vm); 629 } 630 631 error = ppt_assign_device(vm, bus, slot, func); 632 return (error); 633 } 634 635 void * 636 vm_gpa_hold(struct vm *vm, vm_paddr_t gpa, size_t len, int reqprot, 637 void **cookie) 638 { 639 int count, pageoff; 640 vm_page_t m; 641 642 pageoff = gpa & PAGE_MASK; 643 if (len > PAGE_SIZE - pageoff) 644 panic("vm_gpa_hold: invalid gpa/len: 0x%016lx/%lu", gpa, len); 645 646 count = vm_fault_quick_hold_pages(&vm->vmspace->vm_map, 647 trunc_page(gpa), PAGE_SIZE, reqprot, &m, 1); 648 649 if (count == 1) { 650 *cookie = m; 651 return ((void *)(PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)) + pageoff)); 652 } else { 653 *cookie = NULL; 654 return (NULL); 655 } 656 } 657 658 void 659 vm_gpa_release(void *cookie) 660 { 661 vm_page_t m = cookie; 662 663 vm_page_lock(m); 664 vm_page_unhold(m); 665 vm_page_unlock(m); 666 } 667 668 int 669 vm_gpabase2memseg(struct vm *vm, vm_paddr_t gpabase, 670 struct vm_memory_segment *seg) 671 { 672 int i; 673 674 for (i = 0; i < vm->num_mem_segs; i++) { 675 if (gpabase == vm->mem_segs[i].gpa) { 676 seg->gpa = vm->mem_segs[i].gpa; 677 seg->len = vm->mem_segs[i].len; 678 seg->wired = vm->mem_segs[i].wired; 679 return (0); 680 } 681 } 682 return (-1); 683 } 684 685 int 686 vm_get_memobj(struct vm *vm, vm_paddr_t gpa, size_t len, 687 vm_offset_t *offset, struct vm_object **object) 688 { 689 int i; 690 size_t seg_len; 691 vm_paddr_t seg_gpa; 692 vm_object_t seg_obj; 693 694 for (i = 0; i < vm->num_mem_segs; i++) { 695 if ((seg_obj = vm->mem_segs[i].object) == NULL) 696 continue; 697 698 seg_gpa = vm->mem_segs[i].gpa; 699 seg_len = vm->mem_segs[i].len; 700 701 if (gpa >= seg_gpa && gpa < seg_gpa + seg_len) { 702 *offset = gpa - seg_gpa; 703 *object = seg_obj; 704 vm_object_reference(seg_obj); 705 return (0); 706 } 707 } 708 709 return (EINVAL); 710 } 711 712 int 713 vm_get_register(struct vm *vm, int vcpu, int reg, uint64_t *retval) 714 { 715 716 if (vcpu < 0 || vcpu >= VM_MAXCPU) 717 return (EINVAL); 718 719 if (reg >= VM_REG_LAST) 720 return (EINVAL); 721 722 return (VMGETREG(vm->cookie, vcpu, reg, retval)); 723 } 724 725 int 726 vm_set_register(struct vm *vm, int vcpu, int reg, uint64_t val) 727 { 728 729 if (vcpu < 0 || vcpu >= VM_MAXCPU) 730 return (EINVAL); 731 732 if (reg >= VM_REG_LAST) 733 return (EINVAL); 734 735 return (VMSETREG(vm->cookie, vcpu, reg, val)); 736 } 737 738 static boolean_t 739 is_descriptor_table(int reg) 740 { 741 742 switch (reg) { 743 case VM_REG_GUEST_IDTR: 744 case VM_REG_GUEST_GDTR: 745 return (TRUE); 746 default: 747 return (FALSE); 748 } 749 } 750 751 static boolean_t 752 is_segment_register(int reg) 753 { 754 755 switch (reg) { 756 case VM_REG_GUEST_ES: 757 case VM_REG_GUEST_CS: 758 case VM_REG_GUEST_SS: 759 case VM_REG_GUEST_DS: 760 case VM_REG_GUEST_FS: 761 case VM_REG_GUEST_GS: 762 case VM_REG_GUEST_TR: 763 case VM_REG_GUEST_LDTR: 764 return (TRUE); 765 default: 766 return (FALSE); 767 } 768 } 769 770 int 771 vm_get_seg_desc(struct vm *vm, int vcpu, int reg, 772 struct seg_desc *desc) 773 { 774 775 if (vcpu < 0 || vcpu >= VM_MAXCPU) 776 return (EINVAL); 777 778 if (!is_segment_register(reg) && !is_descriptor_table(reg)) 779 return (EINVAL); 780 781 return (VMGETDESC(vm->cookie, vcpu, reg, desc)); 782 } 783 784 int 785 vm_set_seg_desc(struct vm *vm, int vcpu, int reg, 786 struct seg_desc *desc) 787 { 788 if (vcpu < 0 || vcpu >= VM_MAXCPU) 789 return (EINVAL); 790 791 if (!is_segment_register(reg) && !is_descriptor_table(reg)) 792 return (EINVAL); 793 794 return (VMSETDESC(vm->cookie, vcpu, reg, desc)); 795 } 796 797 static void 798 restore_guest_fpustate(struct vcpu *vcpu) 799 { 800 801 /* flush host state to the pcb */ 802 fpuexit(curthread); 803 804 /* restore guest FPU state */ 805 fpu_stop_emulating(); 806 fpurestore(vcpu->guestfpu); 807 808 /* 809 * The FPU is now "dirty" with the guest's state so turn on emulation 810 * to trap any access to the FPU by the host. 811 */ 812 fpu_start_emulating(); 813 } 814 815 static void 816 save_guest_fpustate(struct vcpu *vcpu) 817 { 818 819 if ((rcr0() & CR0_TS) == 0) 820 panic("fpu emulation not enabled in host!"); 821 822 /* save guest FPU state */ 823 fpu_stop_emulating(); 824 fpusave(vcpu->guestfpu); 825 fpu_start_emulating(); 826 } 827 828 static VMM_STAT(VCPU_IDLE_TICKS, "number of ticks vcpu was idle"); 829 830 static int 831 vcpu_set_state_locked(struct vcpu *vcpu, enum vcpu_state newstate, 832 bool from_idle) 833 { 834 int error; 835 836 vcpu_assert_locked(vcpu); 837 838 /* 839 * State transitions from the vmmdev_ioctl() must always begin from 840 * the VCPU_IDLE state. This guarantees that there is only a single 841 * ioctl() operating on a vcpu at any point. 842 */ 843 if (from_idle) { 844 while (vcpu->state != VCPU_IDLE) 845 msleep_spin(&vcpu->state, &vcpu->mtx, "vmstat", hz); 846 } else { 847 KASSERT(vcpu->state != VCPU_IDLE, ("invalid transition from " 848 "vcpu idle state")); 849 } 850 851 /* 852 * The following state transitions are allowed: 853 * IDLE -> FROZEN -> IDLE 854 * FROZEN -> RUNNING -> FROZEN 855 * FROZEN -> SLEEPING -> FROZEN 856 */ 857 switch (vcpu->state) { 858 case VCPU_IDLE: 859 case VCPU_RUNNING: 860 case VCPU_SLEEPING: 861 error = (newstate != VCPU_FROZEN); 862 break; 863 case VCPU_FROZEN: 864 error = (newstate == VCPU_FROZEN); 865 break; 866 default: 867 error = 1; 868 break; 869 } 870 871 if (error) 872 return (EBUSY); 873 874 vcpu->state = newstate; 875 if (newstate == VCPU_IDLE) 876 wakeup(&vcpu->state); 877 878 return (0); 879 } 880 881 static void 882 vcpu_require_state(struct vm *vm, int vcpuid, enum vcpu_state newstate) 883 { 884 int error; 885 886 if ((error = vcpu_set_state(vm, vcpuid, newstate, false)) != 0) 887 panic("Error %d setting state to %d\n", error, newstate); 888 } 889 890 static void 891 vcpu_require_state_locked(struct vcpu *vcpu, enum vcpu_state newstate) 892 { 893 int error; 894 895 if ((error = vcpu_set_state_locked(vcpu, newstate, false)) != 0) 896 panic("Error %d setting state to %d", error, newstate); 897 } 898 899 /* 900 * Emulate a guest 'hlt' by sleeping until the vcpu is ready to run. 901 */ 902 static int 903 vm_handle_hlt(struct vm *vm, int vcpuid, bool intr_disabled, bool *retu) 904 { 905 struct vm_exit *vmexit; 906 struct vcpu *vcpu; 907 int t, timo; 908 909 vcpu = &vm->vcpu[vcpuid]; 910 911 vcpu_lock(vcpu); 912 913 /* 914 * Do a final check for pending NMI or interrupts before 915 * really putting this thread to sleep. 916 * 917 * These interrupts could have happened any time after we 918 * returned from VMRUN() and before we grabbed the vcpu lock. 919 */ 920 if (!vm_nmi_pending(vm, vcpuid) && 921 (intr_disabled || !vlapic_pending_intr(vcpu->vlapic, NULL))) { 922 t = ticks; 923 vcpu_require_state_locked(vcpu, VCPU_SLEEPING); 924 if (vlapic_enabled(vcpu->vlapic)) { 925 /* 926 * XXX msleep_spin() is not interruptible so use the 927 * 'timo' to put an upper bound on the sleep time. 928 */ 929 timo = hz; 930 msleep_spin(vcpu, &vcpu->mtx, "vmidle", timo); 931 } else { 932 /* 933 * Spindown the vcpu if the apic is disabled and it 934 * had entered the halted state. 935 */ 936 *retu = true; 937 vmexit = vm_exitinfo(vm, vcpuid); 938 vmexit->exitcode = VM_EXITCODE_SPINDOWN_CPU; 939 VCPU_CTR0(vm, vcpuid, "spinning down cpu"); 940 } 941 vcpu_require_state_locked(vcpu, VCPU_FROZEN); 942 vmm_stat_incr(vm, vcpuid, VCPU_IDLE_TICKS, ticks - t); 943 } 944 vcpu_unlock(vcpu); 945 946 return (0); 947 } 948 949 static int 950 vm_handle_paging(struct vm *vm, int vcpuid, bool *retu) 951 { 952 int rv, ftype; 953 struct vm_map *map; 954 struct vcpu *vcpu; 955 struct vm_exit *vme; 956 957 vcpu = &vm->vcpu[vcpuid]; 958 vme = &vcpu->exitinfo; 959 960 ftype = vme->u.paging.fault_type; 961 KASSERT(ftype == VM_PROT_READ || 962 ftype == VM_PROT_WRITE || ftype == VM_PROT_EXECUTE, 963 ("vm_handle_paging: invalid fault_type %d", ftype)); 964 965 if (ftype == VM_PROT_READ || ftype == VM_PROT_WRITE) { 966 rv = pmap_emulate_accessed_dirty(vmspace_pmap(vm->vmspace), 967 vme->u.paging.gpa, ftype); 968 if (rv == 0) 969 goto done; 970 } 971 972 map = &vm->vmspace->vm_map; 973 rv = vm_fault(map, vme->u.paging.gpa, ftype, VM_FAULT_NORMAL); 974 975 VCPU_CTR3(vm, vcpuid, "vm_handle_paging rv = %d, gpa = %#lx, " 976 "ftype = %d", rv, vme->u.paging.gpa, ftype); 977 978 if (rv != KERN_SUCCESS) 979 return (EFAULT); 980 done: 981 /* restart execution at the faulting instruction */ 982 vme->inst_length = 0; 983 984 return (0); 985 } 986 987 static int 988 vm_handle_inst_emul(struct vm *vm, int vcpuid, bool *retu) 989 { 990 struct vie *vie; 991 struct vcpu *vcpu; 992 struct vm_exit *vme; 993 int error, inst_length; 994 uint64_t rip, gla, gpa, cr3; 995 mem_region_read_t mread; 996 mem_region_write_t mwrite; 997 998 vcpu = &vm->vcpu[vcpuid]; 999 vme = &vcpu->exitinfo; 1000 1001 rip = vme->rip; 1002 inst_length = vme->inst_length; 1003 1004 gla = vme->u.inst_emul.gla; 1005 gpa = vme->u.inst_emul.gpa; 1006 cr3 = vme->u.inst_emul.cr3; 1007 vie = &vme->u.inst_emul.vie; 1008 1009 vie_init(vie); 1010 1011 /* Fetch, decode and emulate the faulting instruction */ 1012 if (vmm_fetch_instruction(vm, vcpuid, rip, inst_length, cr3, vie) != 0) 1013 return (EFAULT); 1014 1015 if (vmm_decode_instruction(vm, vcpuid, gla, vie) != 0) 1016 return (EFAULT); 1017 1018 /* return to userland unless this is an in-kernel emulated device */ 1019 if (gpa >= DEFAULT_APIC_BASE && gpa < DEFAULT_APIC_BASE + PAGE_SIZE) { 1020 mread = lapic_mmio_read; 1021 mwrite = lapic_mmio_write; 1022 } else if (gpa >= VIOAPIC_BASE && gpa < VIOAPIC_BASE + VIOAPIC_SIZE) { 1023 mread = vioapic_mmio_read; 1024 mwrite = vioapic_mmio_write; 1025 } else if (gpa >= VHPET_BASE && gpa < VHPET_BASE + VHPET_SIZE) { 1026 mread = vhpet_mmio_read; 1027 mwrite = vhpet_mmio_write; 1028 } else { 1029 *retu = true; 1030 return (0); 1031 } 1032 1033 error = vmm_emulate_instruction(vm, vcpuid, gpa, vie, mread, mwrite, 1034 retu); 1035 1036 return (error); 1037 } 1038 1039 int 1040 vm_run(struct vm *vm, struct vm_run *vmrun) 1041 { 1042 int error, vcpuid; 1043 struct vcpu *vcpu; 1044 struct pcb *pcb; 1045 uint64_t tscval, rip; 1046 struct vm_exit *vme; 1047 bool retu, intr_disabled; 1048 pmap_t pmap; 1049 1050 vcpuid = vmrun->cpuid; 1051 1052 if (vcpuid < 0 || vcpuid >= VM_MAXCPU) 1053 return (EINVAL); 1054 1055 pmap = vmspace_pmap(vm->vmspace); 1056 vcpu = &vm->vcpu[vcpuid]; 1057 vme = &vcpu->exitinfo; 1058 rip = vmrun->rip; 1059 restart: 1060 critical_enter(); 1061 1062 KASSERT(!CPU_ISSET(curcpu, &pmap->pm_active), 1063 ("vm_run: absurd pm_active")); 1064 1065 tscval = rdtsc(); 1066 1067 pcb = PCPU_GET(curpcb); 1068 set_pcb_flags(pcb, PCB_FULL_IRET); 1069 1070 restore_guest_msrs(vm, vcpuid); 1071 restore_guest_fpustate(vcpu); 1072 1073 vcpu_require_state(vm, vcpuid, VCPU_RUNNING); 1074 vcpu->hostcpu = curcpu; 1075 error = VMRUN(vm->cookie, vcpuid, rip, pmap); 1076 vcpu->hostcpu = NOCPU; 1077 vcpu_require_state(vm, vcpuid, VCPU_FROZEN); 1078 1079 save_guest_fpustate(vcpu); 1080 restore_host_msrs(vm, vcpuid); 1081 1082 vmm_stat_incr(vm, vcpuid, VCPU_TOTAL_RUNTIME, rdtsc() - tscval); 1083 1084 critical_exit(); 1085 1086 if (error == 0) { 1087 retu = false; 1088 switch (vme->exitcode) { 1089 case VM_EXITCODE_HLT: 1090 intr_disabled = ((vme->u.hlt.rflags & PSL_I) == 0); 1091 error = vm_handle_hlt(vm, vcpuid, intr_disabled, &retu); 1092 break; 1093 case VM_EXITCODE_PAGING: 1094 error = vm_handle_paging(vm, vcpuid, &retu); 1095 break; 1096 case VM_EXITCODE_INST_EMUL: 1097 error = vm_handle_inst_emul(vm, vcpuid, &retu); 1098 break; 1099 default: 1100 retu = true; /* handled in userland */ 1101 break; 1102 } 1103 } 1104 1105 if (error == 0 && retu == false) { 1106 rip = vme->rip + vme->inst_length; 1107 goto restart; 1108 } 1109 1110 /* copy the exit information */ 1111 bcopy(vme, &vmrun->vm_exit, sizeof(struct vm_exit)); 1112 return (error); 1113 } 1114 1115 int 1116 vm_inject_event(struct vm *vm, int vcpuid, int type, 1117 int vector, uint32_t code, int code_valid) 1118 { 1119 if (vcpuid < 0 || vcpuid >= VM_MAXCPU) 1120 return (EINVAL); 1121 1122 if ((type > VM_EVENT_NONE && type < VM_EVENT_MAX) == 0) 1123 return (EINVAL); 1124 1125 if (vector < 0 || vector > 255) 1126 return (EINVAL); 1127 1128 return (VMINJECT(vm->cookie, vcpuid, type, vector, code, code_valid)); 1129 } 1130 1131 static VMM_STAT(VCPU_NMI_COUNT, "number of NMIs delivered to vcpu"); 1132 1133 int 1134 vm_inject_nmi(struct vm *vm, int vcpuid) 1135 { 1136 struct vcpu *vcpu; 1137 1138 if (vcpuid < 0 || vcpuid >= VM_MAXCPU) 1139 return (EINVAL); 1140 1141 vcpu = &vm->vcpu[vcpuid]; 1142 1143 vcpu->nmi_pending = 1; 1144 vcpu_notify_event(vm, vcpuid, false); 1145 return (0); 1146 } 1147 1148 int 1149 vm_nmi_pending(struct vm *vm, int vcpuid) 1150 { 1151 struct vcpu *vcpu; 1152 1153 if (vcpuid < 0 || vcpuid >= VM_MAXCPU) 1154 panic("vm_nmi_pending: invalid vcpuid %d", vcpuid); 1155 1156 vcpu = &vm->vcpu[vcpuid]; 1157 1158 return (vcpu->nmi_pending); 1159 } 1160 1161 void 1162 vm_nmi_clear(struct vm *vm, int vcpuid) 1163 { 1164 struct vcpu *vcpu; 1165 1166 if (vcpuid < 0 || vcpuid >= VM_MAXCPU) 1167 panic("vm_nmi_pending: invalid vcpuid %d", vcpuid); 1168 1169 vcpu = &vm->vcpu[vcpuid]; 1170 1171 if (vcpu->nmi_pending == 0) 1172 panic("vm_nmi_clear: inconsistent nmi_pending state"); 1173 1174 vcpu->nmi_pending = 0; 1175 vmm_stat_incr(vm, vcpuid, VCPU_NMI_COUNT, 1); 1176 } 1177 1178 int 1179 vm_get_capability(struct vm *vm, int vcpu, int type, int *retval) 1180 { 1181 if (vcpu < 0 || vcpu >= VM_MAXCPU) 1182 return (EINVAL); 1183 1184 if (type < 0 || type >= VM_CAP_MAX) 1185 return (EINVAL); 1186 1187 return (VMGETCAP(vm->cookie, vcpu, type, retval)); 1188 } 1189 1190 int 1191 vm_set_capability(struct vm *vm, int vcpu, int type, int val) 1192 { 1193 if (vcpu < 0 || vcpu >= VM_MAXCPU) 1194 return (EINVAL); 1195 1196 if (type < 0 || type >= VM_CAP_MAX) 1197 return (EINVAL); 1198 1199 return (VMSETCAP(vm->cookie, vcpu, type, val)); 1200 } 1201 1202 uint64_t * 1203 vm_guest_msrs(struct vm *vm, int cpu) 1204 { 1205 return (vm->vcpu[cpu].guest_msrs); 1206 } 1207 1208 struct vlapic * 1209 vm_lapic(struct vm *vm, int cpu) 1210 { 1211 return (vm->vcpu[cpu].vlapic); 1212 } 1213 1214 struct vioapic * 1215 vm_ioapic(struct vm *vm) 1216 { 1217 1218 return (vm->vioapic); 1219 } 1220 1221 struct vhpet * 1222 vm_hpet(struct vm *vm) 1223 { 1224 1225 return (vm->vhpet); 1226 } 1227 1228 boolean_t 1229 vmm_is_pptdev(int bus, int slot, int func) 1230 { 1231 int found, i, n; 1232 int b, s, f; 1233 char *val, *cp, *cp2; 1234 1235 /* 1236 * XXX 1237 * The length of an environment variable is limited to 128 bytes which 1238 * puts an upper limit on the number of passthru devices that may be 1239 * specified using a single environment variable. 1240 * 1241 * Work around this by scanning multiple environment variable 1242 * names instead of a single one - yuck! 1243 */ 1244 const char *names[] = { "pptdevs", "pptdevs2", "pptdevs3", NULL }; 1245 1246 /* set pptdevs="1/2/3 4/5/6 7/8/9 10/11/12" */ 1247 found = 0; 1248 for (i = 0; names[i] != NULL && !found; i++) { 1249 cp = val = getenv(names[i]); 1250 while (cp != NULL && *cp != '\0') { 1251 if ((cp2 = strchr(cp, ' ')) != NULL) 1252 *cp2 = '\0'; 1253 1254 n = sscanf(cp, "%d/%d/%d", &b, &s, &f); 1255 if (n == 3 && bus == b && slot == s && func == f) { 1256 found = 1; 1257 break; 1258 } 1259 1260 if (cp2 != NULL) 1261 *cp2++ = ' '; 1262 1263 cp = cp2; 1264 } 1265 freeenv(val); 1266 } 1267 return (found); 1268 } 1269 1270 void * 1271 vm_iommu_domain(struct vm *vm) 1272 { 1273 1274 return (vm->iommu); 1275 } 1276 1277 int 1278 vcpu_set_state(struct vm *vm, int vcpuid, enum vcpu_state newstate, 1279 bool from_idle) 1280 { 1281 int error; 1282 struct vcpu *vcpu; 1283 1284 if (vcpuid < 0 || vcpuid >= VM_MAXCPU) 1285 panic("vm_set_run_state: invalid vcpuid %d", vcpuid); 1286 1287 vcpu = &vm->vcpu[vcpuid]; 1288 1289 vcpu_lock(vcpu); 1290 error = vcpu_set_state_locked(vcpu, newstate, from_idle); 1291 vcpu_unlock(vcpu); 1292 1293 return (error); 1294 } 1295 1296 enum vcpu_state 1297 vcpu_get_state(struct vm *vm, int vcpuid, int *hostcpu) 1298 { 1299 struct vcpu *vcpu; 1300 enum vcpu_state state; 1301 1302 if (vcpuid < 0 || vcpuid >= VM_MAXCPU) 1303 panic("vm_get_run_state: invalid vcpuid %d", vcpuid); 1304 1305 vcpu = &vm->vcpu[vcpuid]; 1306 1307 vcpu_lock(vcpu); 1308 state = vcpu->state; 1309 if (hostcpu != NULL) 1310 *hostcpu = vcpu->hostcpu; 1311 vcpu_unlock(vcpu); 1312 1313 return (state); 1314 } 1315 1316 void 1317 vm_activate_cpu(struct vm *vm, int vcpuid) 1318 { 1319 1320 if (vcpuid >= 0 && vcpuid < VM_MAXCPU) 1321 CPU_SET(vcpuid, &vm->active_cpus); 1322 } 1323 1324 cpuset_t 1325 vm_active_cpus(struct vm *vm) 1326 { 1327 1328 return (vm->active_cpus); 1329 } 1330 1331 void * 1332 vcpu_stats(struct vm *vm, int vcpuid) 1333 { 1334 1335 return (vm->vcpu[vcpuid].stats); 1336 } 1337 1338 int 1339 vm_get_x2apic_state(struct vm *vm, int vcpuid, enum x2apic_state *state) 1340 { 1341 if (vcpuid < 0 || vcpuid >= VM_MAXCPU) 1342 return (EINVAL); 1343 1344 *state = vm->vcpu[vcpuid].x2apic_state; 1345 1346 return (0); 1347 } 1348 1349 int 1350 vm_set_x2apic_state(struct vm *vm, int vcpuid, enum x2apic_state state) 1351 { 1352 if (vcpuid < 0 || vcpuid >= VM_MAXCPU) 1353 return (EINVAL); 1354 1355 if (state >= X2APIC_STATE_LAST) 1356 return (EINVAL); 1357 1358 vm->vcpu[vcpuid].x2apic_state = state; 1359 1360 vlapic_set_x2apic_state(vm, vcpuid, state); 1361 1362 return (0); 1363 } 1364 1365 /* 1366 * This function is called to ensure that a vcpu "sees" a pending event 1367 * as soon as possible: 1368 * - If the vcpu thread is sleeping then it is woken up. 1369 * - If the vcpu is running on a different host_cpu then an IPI will be directed 1370 * to the host_cpu to cause the vcpu to trap into the hypervisor. 1371 */ 1372 void 1373 vcpu_notify_event(struct vm *vm, int vcpuid, bool lapic_intr) 1374 { 1375 int hostcpu; 1376 struct vcpu *vcpu; 1377 1378 vcpu = &vm->vcpu[vcpuid]; 1379 1380 vcpu_lock(vcpu); 1381 hostcpu = vcpu->hostcpu; 1382 if (hostcpu == NOCPU) { 1383 if (vcpu->state == VCPU_SLEEPING) 1384 wakeup_one(vcpu); 1385 } else { 1386 if (vcpu->state != VCPU_RUNNING) 1387 panic("invalid vcpu state %d", vcpu->state); 1388 if (hostcpu != curcpu) { 1389 if (lapic_intr) 1390 vlapic_post_intr(vcpu->vlapic, hostcpu, 1391 vmm_ipinum); 1392 else 1393 ipi_cpu(hostcpu, vmm_ipinum); 1394 } 1395 } 1396 vcpu_unlock(vcpu); 1397 } 1398 1399 struct vmspace * 1400 vm_get_vmspace(struct vm *vm) 1401 { 1402 1403 return (vm->vmspace); 1404 } 1405 1406 int 1407 vm_apicid2vcpuid(struct vm *vm, int apicid) 1408 { 1409 /* 1410 * XXX apic id is assumed to be numerically identical to vcpu id 1411 */ 1412 return (apicid); 1413 } 1414