1 /*- 2 * Copyright (c) 2011 NetApp, Inc. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 * $FreeBSD$ 27 */ 28 29 #include <sys/cdefs.h> 30 __FBSDID("$FreeBSD$"); 31 32 #include <sys/param.h> 33 #include <sys/systm.h> 34 #include <sys/kernel.h> 35 #include <sys/module.h> 36 #include <sys/sysctl.h> 37 #include <sys/malloc.h> 38 #include <sys/pcpu.h> 39 #include <sys/lock.h> 40 #include <sys/mutex.h> 41 #include <sys/proc.h> 42 #include <sys/sched.h> 43 #include <sys/smp.h> 44 #include <sys/systm.h> 45 46 #include <vm/vm.h> 47 48 #include <machine/vm.h> 49 #include <machine/pcb.h> 50 #include <machine/smp.h> 51 #include <x86/apicreg.h> 52 53 #include <machine/vmm.h> 54 #include "vmm_host.h" 55 #include "vmm_mem.h" 56 #include "vmm_util.h" 57 #include <machine/vmm_dev.h> 58 #include "vlapic.h" 59 #include "vmm_msr.h" 60 #include "vmm_ipi.h" 61 #include "vmm_stat.h" 62 #include "vmm_lapic.h" 63 64 #include "io/ppt.h" 65 #include "io/iommu.h" 66 67 struct vlapic; 68 69 struct vcpu { 70 int flags; 71 enum vcpu_state state; 72 struct mtx mtx; 73 int hostcpu; /* host cpuid this vcpu last ran on */ 74 uint64_t guest_msrs[VMM_MSR_NUM]; 75 struct vlapic *vlapic; 76 int vcpuid; 77 struct savefpu *guestfpu; /* guest fpu state */ 78 void *stats; 79 struct vm_exit exitinfo; 80 enum x2apic_state x2apic_state; 81 int nmi_pending; 82 }; 83 84 #define vcpu_lock_init(v) mtx_init(&((v)->mtx), "vcpu lock", 0, MTX_SPIN) 85 #define vcpu_lock(v) mtx_lock_spin(&((v)->mtx)) 86 #define vcpu_unlock(v) mtx_unlock_spin(&((v)->mtx)) 87 88 #define VM_MAX_MEMORY_SEGMENTS 2 89 90 struct vm { 91 void *cookie; /* processor-specific data */ 92 void *iommu; /* iommu-specific data */ 93 struct vcpu vcpu[VM_MAXCPU]; 94 int num_mem_segs; 95 struct vm_memory_segment mem_segs[VM_MAX_MEMORY_SEGMENTS]; 96 char name[VM_MAX_NAMELEN]; 97 98 /* 99 * Set of active vcpus. 100 * An active vcpu is one that has been started implicitly (BSP) or 101 * explicitly (AP) by sending it a startup ipi. 102 */ 103 cpuset_t active_cpus; 104 }; 105 106 static int vmm_initialized; 107 108 static struct vmm_ops *ops; 109 #define VMM_INIT() (ops != NULL ? (*ops->init)() : 0) 110 #define VMM_CLEANUP() (ops != NULL ? (*ops->cleanup)() : 0) 111 112 #define VMINIT(vm) (ops != NULL ? (*ops->vminit)(vm): NULL) 113 #define VMRUN(vmi, vcpu, rip) \ 114 (ops != NULL ? (*ops->vmrun)(vmi, vcpu, rip) : ENXIO) 115 #define VMCLEANUP(vmi) (ops != NULL ? (*ops->vmcleanup)(vmi) : NULL) 116 #define VMMMAP_SET(vmi, gpa, hpa, len, attr, prot, spm) \ 117 (ops != NULL ? \ 118 (*ops->vmmmap_set)(vmi, gpa, hpa, len, attr, prot, spm) : \ 119 ENXIO) 120 #define VMMMAP_GET(vmi, gpa) \ 121 (ops != NULL ? (*ops->vmmmap_get)(vmi, gpa) : ENXIO) 122 #define VMGETREG(vmi, vcpu, num, retval) \ 123 (ops != NULL ? (*ops->vmgetreg)(vmi, vcpu, num, retval) : ENXIO) 124 #define VMSETREG(vmi, vcpu, num, val) \ 125 (ops != NULL ? (*ops->vmsetreg)(vmi, vcpu, num, val) : ENXIO) 126 #define VMGETDESC(vmi, vcpu, num, desc) \ 127 (ops != NULL ? (*ops->vmgetdesc)(vmi, vcpu, num, desc) : ENXIO) 128 #define VMSETDESC(vmi, vcpu, num, desc) \ 129 (ops != NULL ? (*ops->vmsetdesc)(vmi, vcpu, num, desc) : ENXIO) 130 #define VMINJECT(vmi, vcpu, type, vec, ec, ecv) \ 131 (ops != NULL ? (*ops->vminject)(vmi, vcpu, type, vec, ec, ecv) : ENXIO) 132 #define VMGETCAP(vmi, vcpu, num, retval) \ 133 (ops != NULL ? (*ops->vmgetcap)(vmi, vcpu, num, retval) : ENXIO) 134 #define VMSETCAP(vmi, vcpu, num, val) \ 135 (ops != NULL ? (*ops->vmsetcap)(vmi, vcpu, num, val) : ENXIO) 136 137 #define fpu_start_emulating() load_cr0(rcr0() | CR0_TS) 138 #define fpu_stop_emulating() clts() 139 140 static MALLOC_DEFINE(M_VM, "vm", "vm"); 141 CTASSERT(VMM_MSR_NUM <= 64); /* msr_mask can keep track of up to 64 msrs */ 142 143 /* statistics */ 144 static VMM_STAT(VCPU_TOTAL_RUNTIME, "vcpu total runtime"); 145 146 static void 147 vcpu_cleanup(struct vcpu *vcpu) 148 { 149 vlapic_cleanup(vcpu->vlapic); 150 vmm_stat_free(vcpu->stats); 151 fpu_save_area_free(vcpu->guestfpu); 152 } 153 154 static void 155 vcpu_init(struct vm *vm, uint32_t vcpu_id) 156 { 157 struct vcpu *vcpu; 158 159 vcpu = &vm->vcpu[vcpu_id]; 160 161 vcpu_lock_init(vcpu); 162 vcpu->hostcpu = NOCPU; 163 vcpu->vcpuid = vcpu_id; 164 vcpu->vlapic = vlapic_init(vm, vcpu_id); 165 vm_set_x2apic_state(vm, vcpu_id, X2APIC_ENABLED); 166 vcpu->guestfpu = fpu_save_area_alloc(); 167 fpu_save_area_reset(vcpu->guestfpu); 168 vcpu->stats = vmm_stat_alloc(); 169 } 170 171 struct vm_exit * 172 vm_exitinfo(struct vm *vm, int cpuid) 173 { 174 struct vcpu *vcpu; 175 176 if (cpuid < 0 || cpuid >= VM_MAXCPU) 177 panic("vm_exitinfo: invalid cpuid %d", cpuid); 178 179 vcpu = &vm->vcpu[cpuid]; 180 181 return (&vcpu->exitinfo); 182 } 183 184 static int 185 vmm_init(void) 186 { 187 int error; 188 189 vmm_host_state_init(); 190 vmm_ipi_init(); 191 192 error = vmm_mem_init(); 193 if (error) 194 return (error); 195 196 if (vmm_is_intel()) 197 ops = &vmm_ops_intel; 198 else if (vmm_is_amd()) 199 ops = &vmm_ops_amd; 200 else 201 return (ENXIO); 202 203 vmm_msr_init(); 204 205 return (VMM_INIT()); 206 } 207 208 static int 209 vmm_handler(module_t mod, int what, void *arg) 210 { 211 int error; 212 213 switch (what) { 214 case MOD_LOAD: 215 vmmdev_init(); 216 iommu_init(); 217 error = vmm_init(); 218 if (error == 0) 219 vmm_initialized = 1; 220 break; 221 case MOD_UNLOAD: 222 error = vmmdev_cleanup(); 223 if (error == 0) { 224 iommu_cleanup(); 225 vmm_ipi_cleanup(); 226 error = VMM_CLEANUP(); 227 } 228 vmm_initialized = 0; 229 break; 230 default: 231 error = 0; 232 break; 233 } 234 return (error); 235 } 236 237 static moduledata_t vmm_kmod = { 238 "vmm", 239 vmm_handler, 240 NULL 241 }; 242 243 /* 244 * vmm initialization has the following dependencies: 245 * 246 * - iommu initialization must happen after the pci passthru driver has had 247 * a chance to attach to any passthru devices (after SI_SUB_CONFIGURE). 248 * 249 * - VT-x initialization requires smp_rendezvous() and therefore must happen 250 * after SMP is fully functional (after SI_SUB_SMP). 251 */ 252 DECLARE_MODULE(vmm, vmm_kmod, SI_SUB_SMP + 1, SI_ORDER_ANY); 253 MODULE_VERSION(vmm, 1); 254 255 SYSCTL_NODE(_hw, OID_AUTO, vmm, CTLFLAG_RW, NULL, NULL); 256 257 int 258 vm_create(const char *name, struct vm **retvm) 259 { 260 int i; 261 struct vm *vm; 262 vm_paddr_t maxaddr; 263 264 const int BSP = 0; 265 266 /* 267 * If vmm.ko could not be successfully initialized then don't attempt 268 * to create the virtual machine. 269 */ 270 if (!vmm_initialized) 271 return (ENXIO); 272 273 if (name == NULL || strlen(name) >= VM_MAX_NAMELEN) 274 return (EINVAL); 275 276 vm = malloc(sizeof(struct vm), M_VM, M_WAITOK | M_ZERO); 277 strcpy(vm->name, name); 278 vm->cookie = VMINIT(vm); 279 280 for (i = 0; i < VM_MAXCPU; i++) { 281 vcpu_init(vm, i); 282 guest_msrs_init(vm, i); 283 } 284 285 maxaddr = vmm_mem_maxaddr(); 286 vm->iommu = iommu_create_domain(maxaddr); 287 vm_activate_cpu(vm, BSP); 288 289 *retvm = vm; 290 return (0); 291 } 292 293 static void 294 vm_free_mem_seg(struct vm *vm, struct vm_memory_segment *seg) 295 { 296 size_t len; 297 vm_paddr_t hpa; 298 void *host_domain; 299 300 host_domain = iommu_host_domain(); 301 302 len = 0; 303 while (len < seg->len) { 304 hpa = vm_gpa2hpa(vm, seg->gpa + len, PAGE_SIZE); 305 if (hpa == (vm_paddr_t)-1) { 306 panic("vm_free_mem_segs: cannot free hpa " 307 "associated with gpa 0x%016lx", seg->gpa + len); 308 } 309 310 /* 311 * Remove the 'gpa' to 'hpa' mapping in VMs domain. 312 * And resurrect the 1:1 mapping for 'hpa' in 'host_domain'. 313 */ 314 iommu_remove_mapping(vm->iommu, seg->gpa + len, PAGE_SIZE); 315 iommu_create_mapping(host_domain, hpa, hpa, PAGE_SIZE); 316 317 vmm_mem_free(hpa, PAGE_SIZE); 318 319 len += PAGE_SIZE; 320 } 321 322 /* 323 * Invalidate cached translations associated with 'vm->iommu' since 324 * we have now moved some pages from it. 325 */ 326 iommu_invalidate_tlb(vm->iommu); 327 328 bzero(seg, sizeof(struct vm_memory_segment)); 329 } 330 331 void 332 vm_destroy(struct vm *vm) 333 { 334 int i; 335 336 ppt_unassign_all(vm); 337 338 for (i = 0; i < vm->num_mem_segs; i++) 339 vm_free_mem_seg(vm, &vm->mem_segs[i]); 340 341 vm->num_mem_segs = 0; 342 343 for (i = 0; i < VM_MAXCPU; i++) 344 vcpu_cleanup(&vm->vcpu[i]); 345 346 iommu_destroy_domain(vm->iommu); 347 348 VMCLEANUP(vm->cookie); 349 350 free(vm, M_VM); 351 } 352 353 const char * 354 vm_name(struct vm *vm) 355 { 356 return (vm->name); 357 } 358 359 int 360 vm_map_mmio(struct vm *vm, vm_paddr_t gpa, size_t len, vm_paddr_t hpa) 361 { 362 const boolean_t spok = TRUE; /* superpage mappings are ok */ 363 364 return (VMMMAP_SET(vm->cookie, gpa, hpa, len, VM_MEMATTR_UNCACHEABLE, 365 VM_PROT_RW, spok)); 366 } 367 368 int 369 vm_unmap_mmio(struct vm *vm, vm_paddr_t gpa, size_t len) 370 { 371 const boolean_t spok = TRUE; /* superpage mappings are ok */ 372 373 return (VMMMAP_SET(vm->cookie, gpa, 0, len, 0, 374 VM_PROT_NONE, spok)); 375 } 376 377 /* 378 * Returns TRUE if 'gpa' is available for allocation and FALSE otherwise 379 */ 380 static boolean_t 381 vm_gpa_available(struct vm *vm, vm_paddr_t gpa) 382 { 383 int i; 384 vm_paddr_t gpabase, gpalimit; 385 386 if (gpa & PAGE_MASK) 387 panic("vm_gpa_available: gpa (0x%016lx) not page aligned", gpa); 388 389 for (i = 0; i < vm->num_mem_segs; i++) { 390 gpabase = vm->mem_segs[i].gpa; 391 gpalimit = gpabase + vm->mem_segs[i].len; 392 if (gpa >= gpabase && gpa < gpalimit) 393 return (FALSE); 394 } 395 396 return (TRUE); 397 } 398 399 int 400 vm_malloc(struct vm *vm, vm_paddr_t gpa, size_t len) 401 { 402 int error, available, allocated; 403 struct vm_memory_segment *seg; 404 vm_paddr_t g, hpa; 405 void *host_domain; 406 407 const boolean_t spok = TRUE; /* superpage mappings are ok */ 408 409 if ((gpa & PAGE_MASK) || (len & PAGE_MASK) || len == 0) 410 return (EINVAL); 411 412 available = allocated = 0; 413 g = gpa; 414 while (g < gpa + len) { 415 if (vm_gpa_available(vm, g)) 416 available++; 417 else 418 allocated++; 419 420 g += PAGE_SIZE; 421 } 422 423 /* 424 * If there are some allocated and some available pages in the address 425 * range then it is an error. 426 */ 427 if (allocated && available) 428 return (EINVAL); 429 430 /* 431 * If the entire address range being requested has already been 432 * allocated then there isn't anything more to do. 433 */ 434 if (allocated && available == 0) 435 return (0); 436 437 if (vm->num_mem_segs >= VM_MAX_MEMORY_SEGMENTS) 438 return (E2BIG); 439 440 host_domain = iommu_host_domain(); 441 442 seg = &vm->mem_segs[vm->num_mem_segs]; 443 444 error = 0; 445 seg->gpa = gpa; 446 seg->len = 0; 447 while (seg->len < len) { 448 hpa = vmm_mem_alloc(PAGE_SIZE); 449 if (hpa == 0) { 450 error = ENOMEM; 451 break; 452 } 453 454 error = VMMMAP_SET(vm->cookie, gpa + seg->len, hpa, PAGE_SIZE, 455 VM_MEMATTR_WRITE_BACK, VM_PROT_ALL, spok); 456 if (error) 457 break; 458 459 /* 460 * Remove the 1:1 mapping for 'hpa' from the 'host_domain'. 461 * Add mapping for 'gpa + seg->len' to 'hpa' in the VMs domain. 462 */ 463 iommu_remove_mapping(host_domain, hpa, PAGE_SIZE); 464 iommu_create_mapping(vm->iommu, gpa + seg->len, hpa, PAGE_SIZE); 465 466 seg->len += PAGE_SIZE; 467 } 468 469 if (error) { 470 vm_free_mem_seg(vm, seg); 471 return (error); 472 } 473 474 /* 475 * Invalidate cached translations associated with 'host_domain' since 476 * we have now moved some pages from it. 477 */ 478 iommu_invalidate_tlb(host_domain); 479 480 vm->num_mem_segs++; 481 482 return (0); 483 } 484 485 vm_paddr_t 486 vm_gpa2hpa(struct vm *vm, vm_paddr_t gpa, size_t len) 487 { 488 vm_paddr_t nextpage; 489 490 nextpage = rounddown(gpa + PAGE_SIZE, PAGE_SIZE); 491 if (len > nextpage - gpa) 492 panic("vm_gpa2hpa: invalid gpa/len: 0x%016lx/%lu", gpa, len); 493 494 return (VMMMAP_GET(vm->cookie, gpa)); 495 } 496 497 int 498 vm_gpabase2memseg(struct vm *vm, vm_paddr_t gpabase, 499 struct vm_memory_segment *seg) 500 { 501 int i; 502 503 for (i = 0; i < vm->num_mem_segs; i++) { 504 if (gpabase == vm->mem_segs[i].gpa) { 505 *seg = vm->mem_segs[i]; 506 return (0); 507 } 508 } 509 return (-1); 510 } 511 512 int 513 vm_get_register(struct vm *vm, int vcpu, int reg, uint64_t *retval) 514 { 515 516 if (vcpu < 0 || vcpu >= VM_MAXCPU) 517 return (EINVAL); 518 519 if (reg >= VM_REG_LAST) 520 return (EINVAL); 521 522 return (VMGETREG(vm->cookie, vcpu, reg, retval)); 523 } 524 525 int 526 vm_set_register(struct vm *vm, int vcpu, int reg, uint64_t val) 527 { 528 529 if (vcpu < 0 || vcpu >= VM_MAXCPU) 530 return (EINVAL); 531 532 if (reg >= VM_REG_LAST) 533 return (EINVAL); 534 535 return (VMSETREG(vm->cookie, vcpu, reg, val)); 536 } 537 538 static boolean_t 539 is_descriptor_table(int reg) 540 { 541 542 switch (reg) { 543 case VM_REG_GUEST_IDTR: 544 case VM_REG_GUEST_GDTR: 545 return (TRUE); 546 default: 547 return (FALSE); 548 } 549 } 550 551 static boolean_t 552 is_segment_register(int reg) 553 { 554 555 switch (reg) { 556 case VM_REG_GUEST_ES: 557 case VM_REG_GUEST_CS: 558 case VM_REG_GUEST_SS: 559 case VM_REG_GUEST_DS: 560 case VM_REG_GUEST_FS: 561 case VM_REG_GUEST_GS: 562 case VM_REG_GUEST_TR: 563 case VM_REG_GUEST_LDTR: 564 return (TRUE); 565 default: 566 return (FALSE); 567 } 568 } 569 570 int 571 vm_get_seg_desc(struct vm *vm, int vcpu, int reg, 572 struct seg_desc *desc) 573 { 574 575 if (vcpu < 0 || vcpu >= VM_MAXCPU) 576 return (EINVAL); 577 578 if (!is_segment_register(reg) && !is_descriptor_table(reg)) 579 return (EINVAL); 580 581 return (VMGETDESC(vm->cookie, vcpu, reg, desc)); 582 } 583 584 int 585 vm_set_seg_desc(struct vm *vm, int vcpu, int reg, 586 struct seg_desc *desc) 587 { 588 if (vcpu < 0 || vcpu >= VM_MAXCPU) 589 return (EINVAL); 590 591 if (!is_segment_register(reg) && !is_descriptor_table(reg)) 592 return (EINVAL); 593 594 return (VMSETDESC(vm->cookie, vcpu, reg, desc)); 595 } 596 597 static void 598 restore_guest_fpustate(struct vcpu *vcpu) 599 { 600 601 /* flush host state to the pcb */ 602 fpuexit(curthread); 603 604 /* restore guest FPU state */ 605 fpu_stop_emulating(); 606 fpurestore(vcpu->guestfpu); 607 608 /* 609 * The FPU is now "dirty" with the guest's state so turn on emulation 610 * to trap any access to the FPU by the host. 611 */ 612 fpu_start_emulating(); 613 } 614 615 static void 616 save_guest_fpustate(struct vcpu *vcpu) 617 { 618 619 if ((rcr0() & CR0_TS) == 0) 620 panic("fpu emulation not enabled in host!"); 621 622 /* save guest FPU state */ 623 fpu_stop_emulating(); 624 fpusave(vcpu->guestfpu); 625 fpu_start_emulating(); 626 } 627 628 static VMM_STAT(VCPU_IDLE_TICKS, "number of ticks vcpu was idle"); 629 630 int 631 vm_run(struct vm *vm, struct vm_run *vmrun) 632 { 633 int error, vcpuid, sleepticks, t; 634 struct vcpu *vcpu; 635 struct pcb *pcb; 636 uint64_t tscval, rip; 637 struct vm_exit *vme; 638 639 vcpuid = vmrun->cpuid; 640 641 if (vcpuid < 0 || vcpuid >= VM_MAXCPU) 642 return (EINVAL); 643 644 vcpu = &vm->vcpu[vcpuid]; 645 vme = &vmrun->vm_exit; 646 rip = vmrun->rip; 647 restart: 648 critical_enter(); 649 650 tscval = rdtsc(); 651 652 pcb = PCPU_GET(curpcb); 653 set_pcb_flags(pcb, PCB_FULL_IRET); 654 655 restore_guest_msrs(vm, vcpuid); 656 restore_guest_fpustate(vcpu); 657 658 vcpu->hostcpu = curcpu; 659 error = VMRUN(vm->cookie, vcpuid, rip); 660 vcpu->hostcpu = NOCPU; 661 662 save_guest_fpustate(vcpu); 663 restore_host_msrs(vm, vcpuid); 664 665 vmm_stat_incr(vm, vcpuid, VCPU_TOTAL_RUNTIME, rdtsc() - tscval); 666 667 /* copy the exit information */ 668 bcopy(&vcpu->exitinfo, vme, sizeof(struct vm_exit)); 669 670 critical_exit(); 671 672 /* 673 * Oblige the guest's desire to 'hlt' by sleeping until the vcpu 674 * is ready to run. 675 */ 676 if (error == 0 && vme->exitcode == VM_EXITCODE_HLT) { 677 vcpu_lock(vcpu); 678 679 /* 680 * Figure out the number of host ticks until the next apic 681 * timer interrupt in the guest. 682 */ 683 sleepticks = lapic_timer_tick(vm, vcpuid); 684 685 /* 686 * If the guest local apic timer is disabled then sleep for 687 * a long time but not forever. 688 */ 689 if (sleepticks < 0) 690 sleepticks = hz; 691 692 /* 693 * Do a final check for pending NMI or interrupts before 694 * really putting this thread to sleep. 695 * 696 * These interrupts could have happened any time after we 697 * returned from VMRUN() and before we grabbed the vcpu lock. 698 */ 699 if (!vm_nmi_pending(vm, vcpuid) && 700 lapic_pending_intr(vm, vcpuid) < 0) { 701 if (sleepticks <= 0) 702 panic("invalid sleepticks %d", sleepticks); 703 t = ticks; 704 msleep_spin(vcpu, &vcpu->mtx, "vmidle", sleepticks); 705 vmm_stat_incr(vm, vcpuid, VCPU_IDLE_TICKS, ticks - t); 706 } 707 708 vcpu_unlock(vcpu); 709 710 rip = vme->rip + vme->inst_length; 711 goto restart; 712 } 713 714 return (error); 715 } 716 717 int 718 vm_inject_event(struct vm *vm, int vcpuid, int type, 719 int vector, uint32_t code, int code_valid) 720 { 721 if (vcpuid < 0 || vcpuid >= VM_MAXCPU) 722 return (EINVAL); 723 724 if ((type > VM_EVENT_NONE && type < VM_EVENT_MAX) == 0) 725 return (EINVAL); 726 727 if (vector < 0 || vector > 255) 728 return (EINVAL); 729 730 return (VMINJECT(vm->cookie, vcpuid, type, vector, code, code_valid)); 731 } 732 733 static VMM_STAT(VCPU_NMI_COUNT, "number of NMIs delivered to vcpu"); 734 735 int 736 vm_inject_nmi(struct vm *vm, int vcpuid) 737 { 738 struct vcpu *vcpu; 739 740 if (vcpuid < 0 || vcpuid >= VM_MAXCPU) 741 return (EINVAL); 742 743 vcpu = &vm->vcpu[vcpuid]; 744 745 vcpu->nmi_pending = 1; 746 vm_interrupt_hostcpu(vm, vcpuid); 747 return (0); 748 } 749 750 int 751 vm_nmi_pending(struct vm *vm, int vcpuid) 752 { 753 struct vcpu *vcpu; 754 755 if (vcpuid < 0 || vcpuid >= VM_MAXCPU) 756 panic("vm_nmi_pending: invalid vcpuid %d", vcpuid); 757 758 vcpu = &vm->vcpu[vcpuid]; 759 760 return (vcpu->nmi_pending); 761 } 762 763 void 764 vm_nmi_clear(struct vm *vm, int vcpuid) 765 { 766 struct vcpu *vcpu; 767 768 if (vcpuid < 0 || vcpuid >= VM_MAXCPU) 769 panic("vm_nmi_pending: invalid vcpuid %d", vcpuid); 770 771 vcpu = &vm->vcpu[vcpuid]; 772 773 if (vcpu->nmi_pending == 0) 774 panic("vm_nmi_clear: inconsistent nmi_pending state"); 775 776 vcpu->nmi_pending = 0; 777 vmm_stat_incr(vm, vcpuid, VCPU_NMI_COUNT, 1); 778 } 779 780 int 781 vm_get_capability(struct vm *vm, int vcpu, int type, int *retval) 782 { 783 if (vcpu < 0 || vcpu >= VM_MAXCPU) 784 return (EINVAL); 785 786 if (type < 0 || type >= VM_CAP_MAX) 787 return (EINVAL); 788 789 return (VMGETCAP(vm->cookie, vcpu, type, retval)); 790 } 791 792 int 793 vm_set_capability(struct vm *vm, int vcpu, int type, int val) 794 { 795 if (vcpu < 0 || vcpu >= VM_MAXCPU) 796 return (EINVAL); 797 798 if (type < 0 || type >= VM_CAP_MAX) 799 return (EINVAL); 800 801 return (VMSETCAP(vm->cookie, vcpu, type, val)); 802 } 803 804 uint64_t * 805 vm_guest_msrs(struct vm *vm, int cpu) 806 { 807 return (vm->vcpu[cpu].guest_msrs); 808 } 809 810 struct vlapic * 811 vm_lapic(struct vm *vm, int cpu) 812 { 813 return (vm->vcpu[cpu].vlapic); 814 } 815 816 boolean_t 817 vmm_is_pptdev(int bus, int slot, int func) 818 { 819 int found, i, n; 820 int b, s, f; 821 char *val, *cp, *cp2; 822 823 /* 824 * XXX 825 * The length of an environment variable is limited to 128 bytes which 826 * puts an upper limit on the number of passthru devices that may be 827 * specified using a single environment variable. 828 * 829 * Work around this by scanning multiple environment variable 830 * names instead of a single one - yuck! 831 */ 832 const char *names[] = { "pptdevs", "pptdevs2", "pptdevs3", NULL }; 833 834 /* set pptdevs="1/2/3 4/5/6 7/8/9 10/11/12" */ 835 found = 0; 836 for (i = 0; names[i] != NULL && !found; i++) { 837 cp = val = getenv(names[i]); 838 while (cp != NULL && *cp != '\0') { 839 if ((cp2 = strchr(cp, ' ')) != NULL) 840 *cp2 = '\0'; 841 842 n = sscanf(cp, "%d/%d/%d", &b, &s, &f); 843 if (n == 3 && bus == b && slot == s && func == f) { 844 found = 1; 845 break; 846 } 847 848 if (cp2 != NULL) 849 *cp2++ = ' '; 850 851 cp = cp2; 852 } 853 freeenv(val); 854 } 855 return (found); 856 } 857 858 void * 859 vm_iommu_domain(struct vm *vm) 860 { 861 862 return (vm->iommu); 863 } 864 865 int 866 vcpu_set_state(struct vm *vm, int vcpuid, enum vcpu_state state) 867 { 868 int error; 869 struct vcpu *vcpu; 870 871 if (vcpuid < 0 || vcpuid >= VM_MAXCPU) 872 panic("vm_set_run_state: invalid vcpuid %d", vcpuid); 873 874 vcpu = &vm->vcpu[vcpuid]; 875 876 vcpu_lock(vcpu); 877 878 /* 879 * The following state transitions are allowed: 880 * IDLE -> RUNNING -> IDLE 881 * IDLE -> CANNOT_RUN -> IDLE 882 */ 883 if ((vcpu->state == VCPU_IDLE && state != VCPU_IDLE) || 884 (vcpu->state != VCPU_IDLE && state == VCPU_IDLE)) { 885 error = 0; 886 vcpu->state = state; 887 } else { 888 error = EBUSY; 889 } 890 891 vcpu_unlock(vcpu); 892 893 return (error); 894 } 895 896 enum vcpu_state 897 vcpu_get_state(struct vm *vm, int vcpuid, int *hostcpu) 898 { 899 struct vcpu *vcpu; 900 enum vcpu_state state; 901 902 if (vcpuid < 0 || vcpuid >= VM_MAXCPU) 903 panic("vm_get_run_state: invalid vcpuid %d", vcpuid); 904 905 vcpu = &vm->vcpu[vcpuid]; 906 907 vcpu_lock(vcpu); 908 state = vcpu->state; 909 if (hostcpu != NULL) 910 *hostcpu = vcpu->hostcpu; 911 vcpu_unlock(vcpu); 912 913 return (state); 914 } 915 916 void 917 vm_activate_cpu(struct vm *vm, int vcpuid) 918 { 919 920 if (vcpuid >= 0 && vcpuid < VM_MAXCPU) 921 CPU_SET(vcpuid, &vm->active_cpus); 922 } 923 924 cpuset_t 925 vm_active_cpus(struct vm *vm) 926 { 927 928 return (vm->active_cpus); 929 } 930 931 void * 932 vcpu_stats(struct vm *vm, int vcpuid) 933 { 934 935 return (vm->vcpu[vcpuid].stats); 936 } 937 938 int 939 vm_get_x2apic_state(struct vm *vm, int vcpuid, enum x2apic_state *state) 940 { 941 if (vcpuid < 0 || vcpuid >= VM_MAXCPU) 942 return (EINVAL); 943 944 *state = vm->vcpu[vcpuid].x2apic_state; 945 946 return (0); 947 } 948 949 int 950 vm_set_x2apic_state(struct vm *vm, int vcpuid, enum x2apic_state state) 951 { 952 if (vcpuid < 0 || vcpuid >= VM_MAXCPU) 953 return (EINVAL); 954 955 if (state >= X2APIC_STATE_LAST) 956 return (EINVAL); 957 958 vm->vcpu[vcpuid].x2apic_state = state; 959 960 vlapic_set_x2apic_state(vm, vcpuid, state); 961 962 return (0); 963 } 964 965 void 966 vm_interrupt_hostcpu(struct vm *vm, int vcpuid) 967 { 968 int hostcpu; 969 struct vcpu *vcpu; 970 971 vcpu = &vm->vcpu[vcpuid]; 972 973 vcpu_lock(vcpu); 974 hostcpu = vcpu->hostcpu; 975 if (hostcpu == NOCPU) { 976 /* 977 * If the vcpu is 'RUNNING' but without a valid 'hostcpu' then 978 * the host thread must be sleeping waiting for an event to 979 * kick the vcpu out of 'hlt'. 980 * 981 * XXX this is racy because the condition exists right before 982 * and after calling VMRUN() in vm_run(). The wakeup() is 983 * benign in this case. 984 */ 985 if (vcpu->state == VCPU_RUNNING) 986 wakeup_one(vcpu); 987 } else { 988 if (vcpu->state != VCPU_RUNNING) 989 panic("invalid vcpu state %d", vcpu->state); 990 if (hostcpu != curcpu) 991 ipi_cpu(hostcpu, vmm_ipinum); 992 } 993 vcpu_unlock(vcpu); 994 } 995