1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2011 NetApp, Inc. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 * 28 * $FreeBSD$ 29 */ 30 /* 31 * This file and its contents are supplied under the terms of the 32 * Common Development and Distribution License ("CDDL"), version 1.0. 33 * You may only use this file in accordance with the terms of version 34 * 1.0 of the CDDL. 35 * 36 * A full copy of the text of the CDDL should have accompanied this 37 * source. A copy of the CDDL is also available via the Internet at 38 * http://www.illumos.org/license/CDDL. 39 * 40 * Copyright 2015 Pluribus Networks Inc. 41 * Copyright 2018 Joyent, Inc. 42 * Copyright 2023 Oxide Computer Company 43 * Copyright 2021 OmniOS Community Edition (OmniOSce) Association. 44 */ 45 46 47 #include <sys/cdefs.h> 48 __FBSDID("$FreeBSD$"); 49 50 #include <sys/param.h> 51 #include <sys/systm.h> 52 #include <sys/kernel.h> 53 #include <sys/module.h> 54 #include <sys/sysctl.h> 55 #include <sys/kmem.h> 56 #include <sys/pcpu.h> 57 #include <sys/mutex.h> 58 #include <sys/proc.h> 59 #include <sys/rwlock.h> 60 #include <sys/sched.h> 61 #include <sys/systm.h> 62 #include <sys/sunddi.h> 63 #include <sys/hma.h> 64 #include <sys/archsystm.h> 65 66 #include <machine/md_var.h> 67 #include <x86/psl.h> 68 #include <x86/apicreg.h> 69 70 #include <machine/specialreg.h> 71 #include <machine/vmm.h> 72 #include <machine/vmm_dev.h> 73 #include <machine/vmparam.h> 74 #include <sys/vmm_instruction_emul.h> 75 #include <sys/vmm_vm.h> 76 #include <sys/vmm_gpt.h> 77 #include <sys/vmm_data.h> 78 79 #include "vmm_ioport.h" 80 #include "vmm_host.h" 81 #include "vmm_util.h" 82 #include "vatpic.h" 83 #include "vatpit.h" 84 #include "vhpet.h" 85 #include "vioapic.h" 86 #include "vlapic.h" 87 #include "vpmtmr.h" 88 #include "vrtc.h" 89 #include "vmm_stat.h" 90 #include "vmm_lapic.h" 91 92 #include "io/ppt.h" 93 #include "io/iommu.h" 94 95 struct vlapic; 96 97 /* Flags for vtc_status */ 98 #define VTCS_FPU_RESTORED 1 /* guest FPU restored, host FPU saved */ 99 #define VTCS_FPU_CTX_CRITICAL 2 /* in ctx where FPU restore cannot be lazy */ 100 101 typedef struct vm_thread_ctx { 102 struct vm *vtc_vm; 103 int vtc_vcpuid; 104 uint_t vtc_status; 105 enum vcpu_ustate vtc_ustate; 106 } vm_thread_ctx_t; 107 108 #define VMM_MTRR_VAR_MAX 10 109 #define VMM_MTRR_DEF_MASK \ 110 (MTRR_DEF_ENABLE | MTRR_DEF_FIXED_ENABLE | MTRR_DEF_TYPE) 111 #define VMM_MTRR_PHYSBASE_MASK (MTRR_PHYSBASE_PHYSBASE | MTRR_PHYSBASE_TYPE) 112 #define VMM_MTRR_PHYSMASK_MASK (MTRR_PHYSMASK_PHYSMASK | MTRR_PHYSMASK_VALID) 113 struct vm_mtrr { 114 uint64_t def_type; 115 uint64_t fixed4k[8]; 116 uint64_t fixed16k[2]; 117 uint64_t fixed64k; 118 struct { 119 uint64_t base; 120 uint64_t mask; 121 } var[VMM_MTRR_VAR_MAX]; 122 }; 123 124 /* 125 * Initialization: 126 * (a) allocated when vcpu is created 127 * (i) initialized when vcpu is created and when it is reinitialized 128 * (o) initialized the first time the vcpu is created 129 * (x) initialized before use 130 */ 131 struct vcpu { 132 /* (o) protects state, run_state, hostcpu, sipi_vector */ 133 kmutex_t lock; 134 135 enum vcpu_state state; /* (o) vcpu state */ 136 enum vcpu_run_state run_state; /* (i) vcpu init/sipi/run state */ 137 kcondvar_t vcpu_cv; /* (o) cpu waiter cv */ 138 kcondvar_t state_cv; /* (o) IDLE-transition cv */ 139 int hostcpu; /* (o) vcpu's current host cpu */ 140 int lastloccpu; /* (o) last host cpu localized to */ 141 int reqidle; /* (i) request vcpu to idle */ 142 bool reqconsist; /* (i) req. vcpu exit when consistent */ 143 struct vlapic *vlapic; /* (i) APIC device model */ 144 enum x2apic_state x2apic_state; /* (i) APIC mode */ 145 uint64_t exit_intinfo; /* (i) events pending at VM exit */ 146 uint64_t exc_pending; /* (i) exception pending */ 147 bool nmi_pending; /* (i) NMI pending */ 148 bool extint_pending; /* (i) INTR pending */ 149 150 uint8_t sipi_vector; /* (i) SIPI vector */ 151 hma_fpu_t *guestfpu; /* (a,i) guest fpu state */ 152 uint64_t guest_xcr0; /* (i) guest %xcr0 register */ 153 void *stats; /* (a,i) statistics */ 154 struct vm_exit exitinfo; /* (x) exit reason and collateral */ 155 uint64_t nextrip; /* (x) next instruction to execute */ 156 struct vie *vie_ctx; /* (x) instruction emulation context */ 157 vm_client_t *vmclient; /* (a) VM-system client */ 158 uint64_t tsc_offset; /* (x) vCPU TSC offset */ 159 struct vm_mtrr mtrr; /* (i) vcpu's MTRR */ 160 vcpu_cpuid_config_t cpuid_cfg; /* (x) cpuid configuration */ 161 162 enum vcpu_ustate ustate; /* (i) microstate for the vcpu */ 163 hrtime_t ustate_when; /* (i) time of last ustate change */ 164 uint64_t ustate_total[VU_MAX]; /* (o) total time spent in ustates */ 165 vm_thread_ctx_t vtc; /* (o) thread state for ctxops */ 166 struct ctxop *ctxop; /* (o) ctxop storage for vcpu */ 167 }; 168 169 #define vcpu_lock(v) mutex_enter(&((v)->lock)) 170 #define vcpu_unlock(v) mutex_exit(&((v)->lock)) 171 #define vcpu_assert_locked(v) ASSERT(MUTEX_HELD(&((v)->lock))) 172 173 struct mem_seg { 174 size_t len; 175 bool sysmem; 176 vm_object_t *object; 177 }; 178 #define VM_MAX_MEMSEGS 5 179 180 struct mem_map { 181 vm_paddr_t gpa; 182 size_t len; 183 vm_ooffset_t segoff; 184 int segid; 185 int prot; 186 int flags; 187 }; 188 #define VM_MAX_MEMMAPS 8 189 190 /* 191 * Initialization: 192 * (o) initialized the first time the VM is created 193 * (i) initialized when VM is created and when it is reinitialized 194 * (x) initialized before use 195 */ 196 struct vm { 197 void *cookie; /* (i) cpu-specific data */ 198 void *iommu; /* (x) iommu-specific data */ 199 struct vhpet *vhpet; /* (i) virtual HPET */ 200 struct vioapic *vioapic; /* (i) virtual ioapic */ 201 struct vatpic *vatpic; /* (i) virtual atpic */ 202 struct vatpit *vatpit; /* (i) virtual atpit */ 203 struct vpmtmr *vpmtmr; /* (i) virtual ACPI PM timer */ 204 struct vrtc *vrtc; /* (o) virtual RTC */ 205 volatile cpuset_t active_cpus; /* (i) active vcpus */ 206 volatile cpuset_t debug_cpus; /* (i) vcpus stopped for dbg */ 207 int suspend; /* (i) stop VM execution */ 208 volatile cpuset_t suspended_cpus; /* (i) suspended vcpus */ 209 volatile cpuset_t halted_cpus; /* (x) cpus in a hard halt */ 210 struct mem_map mem_maps[VM_MAX_MEMMAPS]; /* (i) guest address space */ 211 struct mem_seg mem_segs[VM_MAX_MEMSEGS]; /* (o) guest memory regions */ 212 struct vmspace *vmspace; /* (o) guest's address space */ 213 struct vcpu vcpu[VM_MAXCPU]; /* (i) guest vcpus */ 214 /* The following describe the vm cpu topology */ 215 uint16_t sockets; /* (o) num of sockets */ 216 uint16_t cores; /* (o) num of cores/socket */ 217 uint16_t threads; /* (o) num of threads/core */ 218 uint16_t maxcpus; /* (o) max pluggable cpus */ 219 220 hrtime_t boot_hrtime; /* (i) hrtime at VM boot */ 221 222 /* TSC and TSC scaling related values */ 223 uint64_t tsc_offset; /* (i) VM-wide TSC offset */ 224 uint64_t guest_freq; /* (i) guest TSC Frequency */ 225 uint64_t freq_multiplier; /* (i) guest/host TSC Ratio */ 226 227 struct ioport_config ioports; /* (o) ioport handling */ 228 229 bool mem_transient; /* (o) alloc transient memory */ 230 bool is_paused; /* (i) instance is paused */ 231 }; 232 233 static int vmm_initialized; 234 static uint64_t vmm_host_freq; 235 236 237 static void 238 nullop_panic(void) 239 { 240 panic("null vmm operation call"); 241 } 242 243 /* Do not allow use of an un-set `ops` to do anything but panic */ 244 static struct vmm_ops vmm_ops_null = { 245 .init = (vmm_init_func_t)nullop_panic, 246 .cleanup = (vmm_cleanup_func_t)nullop_panic, 247 .resume = (vmm_resume_func_t)nullop_panic, 248 .vminit = (vmi_init_func_t)nullop_panic, 249 .vmrun = (vmi_run_func_t)nullop_panic, 250 .vmcleanup = (vmi_cleanup_func_t)nullop_panic, 251 .vmgetreg = (vmi_get_register_t)nullop_panic, 252 .vmsetreg = (vmi_set_register_t)nullop_panic, 253 .vmgetdesc = (vmi_get_desc_t)nullop_panic, 254 .vmsetdesc = (vmi_set_desc_t)nullop_panic, 255 .vmgetcap = (vmi_get_cap_t)nullop_panic, 256 .vmsetcap = (vmi_set_cap_t)nullop_panic, 257 .vlapic_init = (vmi_vlapic_init)nullop_panic, 258 .vlapic_cleanup = (vmi_vlapic_cleanup)nullop_panic, 259 .vmpause = (vmi_pause_t)nullop_panic, 260 .vmsavectx = (vmi_savectx)nullop_panic, 261 .vmrestorectx = (vmi_restorectx)nullop_panic, 262 .vmgetmsr = (vmi_get_msr_t)nullop_panic, 263 .vmsetmsr = (vmi_set_msr_t)nullop_panic, 264 .vmfreqratio = (vmi_freqratio_t)nullop_panic, 265 .fr_fracsize = 0, 266 .fr_intsize = 0, 267 }; 268 269 static struct vmm_ops *ops = &vmm_ops_null; 270 static vmm_pte_ops_t *pte_ops = NULL; 271 272 #define VMM_INIT() ((*ops->init)()) 273 #define VMM_CLEANUP() ((*ops->cleanup)()) 274 #define VMM_RESUME() ((*ops->resume)()) 275 276 #define VMINIT(vm) ((*ops->vminit)(vm)) 277 #define VMRUN(vmi, vcpu, rip) ((*ops->vmrun)(vmi, vcpu, rip)) 278 #define VMCLEANUP(vmi) ((*ops->vmcleanup)(vmi)) 279 280 #define VMGETREG(vmi, vcpu, num, rv) ((*ops->vmgetreg)(vmi, vcpu, num, rv)) 281 #define VMSETREG(vmi, vcpu, num, val) ((*ops->vmsetreg)(vmi, vcpu, num, val)) 282 #define VMGETDESC(vmi, vcpu, num, dsc) ((*ops->vmgetdesc)(vmi, vcpu, num, dsc)) 283 #define VMSETDESC(vmi, vcpu, num, dsc) ((*ops->vmsetdesc)(vmi, vcpu, num, dsc)) 284 #define VMGETCAP(vmi, vcpu, num, rv) ((*ops->vmgetcap)(vmi, vcpu, num, rv)) 285 #define VMSETCAP(vmi, vcpu, num, val) ((*ops->vmsetcap)(vmi, vcpu, num, val)) 286 #define VLAPIC_INIT(vmi, vcpu) ((*ops->vlapic_init)(vmi, vcpu)) 287 #define VLAPIC_CLEANUP(vmi, vlapic) ((*ops->vlapic_cleanup)(vmi, vlapic)) 288 289 #define fpu_start_emulating() load_cr0(rcr0() | CR0_TS) 290 #define fpu_stop_emulating() clts() 291 292 SDT_PROVIDER_DEFINE(vmm); 293 294 SYSCTL_NODE(_hw, OID_AUTO, vmm, CTLFLAG_RW | CTLFLAG_MPSAFE, NULL, 295 NULL); 296 297 /* 298 * Halt the guest if all vcpus are executing a HLT instruction with 299 * interrupts disabled. 300 */ 301 int halt_detection_enabled = 1; 302 303 /* Trap into hypervisor on all guest exceptions and reflect them back */ 304 int trace_guest_exceptions; 305 306 /* Trap WBINVD and ignore it */ 307 int trap_wbinvd = 1; 308 309 static void vm_free_memmap(struct vm *vm, int ident); 310 static bool sysmem_mapping(struct vm *vm, struct mem_map *mm); 311 static void vcpu_notify_event_locked(struct vcpu *vcpu, vcpu_notify_t); 312 static bool vcpu_sleep_bailout_checks(struct vm *vm, int vcpuid); 313 static int vcpu_vector_sipi(struct vm *vm, int vcpuid, uint8_t vector); 314 315 static void vmm_savectx(void *); 316 static void vmm_restorectx(void *); 317 static const struct ctxop_template vmm_ctxop_tpl = { 318 .ct_rev = CTXOP_TPL_REV, 319 .ct_save = vmm_savectx, 320 .ct_restore = vmm_restorectx, 321 }; 322 323 static uint64_t calc_tsc_offset(uint64_t base_host_tsc, uint64_t base_guest_tsc, 324 uint64_t mult); 325 static uint64_t calc_guest_tsc(uint64_t host_tsc, uint64_t mult, 326 uint64_t offset); 327 328 /* functions implemented in vmm_time_support.S */ 329 uint64_t calc_freq_multiplier(uint64_t guest_hz, uint64_t host_hz, 330 uint32_t frac_size); 331 uint64_t scale_tsc(uint64_t tsc, uint64_t multiplier, uint32_t frac_size); 332 333 #ifdef KTR 334 static const char * 335 vcpu_state2str(enum vcpu_state state) 336 { 337 338 switch (state) { 339 case VCPU_IDLE: 340 return ("idle"); 341 case VCPU_FROZEN: 342 return ("frozen"); 343 case VCPU_RUNNING: 344 return ("running"); 345 case VCPU_SLEEPING: 346 return ("sleeping"); 347 default: 348 return ("unknown"); 349 } 350 } 351 #endif 352 353 static void 354 vcpu_cleanup(struct vm *vm, int i, bool destroy) 355 { 356 struct vcpu *vcpu = &vm->vcpu[i]; 357 358 VLAPIC_CLEANUP(vm->cookie, vcpu->vlapic); 359 if (destroy) { 360 vmm_stat_free(vcpu->stats); 361 362 vcpu_cpuid_cleanup(&vcpu->cpuid_cfg); 363 364 hma_fpu_free(vcpu->guestfpu); 365 vcpu->guestfpu = NULL; 366 367 vie_free(vcpu->vie_ctx); 368 vcpu->vie_ctx = NULL; 369 370 vmc_destroy(vcpu->vmclient); 371 vcpu->vmclient = NULL; 372 373 ctxop_free(vcpu->ctxop); 374 mutex_destroy(&vcpu->lock); 375 } 376 } 377 378 static void 379 vcpu_init(struct vm *vm, int vcpu_id, bool create) 380 { 381 struct vcpu *vcpu; 382 383 KASSERT(vcpu_id >= 0 && vcpu_id < vm->maxcpus, 384 ("vcpu_init: invalid vcpu %d", vcpu_id)); 385 386 vcpu = &vm->vcpu[vcpu_id]; 387 388 if (create) { 389 mutex_init(&vcpu->lock, NULL, MUTEX_ADAPTIVE, NULL); 390 391 vcpu->state = VCPU_IDLE; 392 vcpu->hostcpu = NOCPU; 393 vcpu->lastloccpu = NOCPU; 394 vcpu->guestfpu = hma_fpu_alloc(KM_SLEEP); 395 vcpu->stats = vmm_stat_alloc(); 396 vcpu->vie_ctx = vie_alloc(); 397 vcpu_cpuid_init(&vcpu->cpuid_cfg); 398 399 vcpu->ustate = VU_INIT; 400 vcpu->ustate_when = gethrtime(); 401 402 vcpu->vtc.vtc_vm = vm; 403 vcpu->vtc.vtc_vcpuid = vcpu_id; 404 vcpu->ctxop = ctxop_allocate(&vmm_ctxop_tpl, &vcpu->vtc); 405 } else { 406 vie_reset(vcpu->vie_ctx); 407 bzero(&vcpu->exitinfo, sizeof (vcpu->exitinfo)); 408 if (vcpu->ustate != VU_INIT) { 409 vcpu_ustate_change(vm, vcpu_id, VU_INIT); 410 } 411 bzero(&vcpu->mtrr, sizeof (vcpu->mtrr)); 412 } 413 414 vcpu->run_state = VRS_HALT; 415 vcpu->vlapic = VLAPIC_INIT(vm->cookie, vcpu_id); 416 (void) vm_set_x2apic_state(vm, vcpu_id, X2APIC_DISABLED); 417 vcpu->reqidle = 0; 418 vcpu->exit_intinfo = 0; 419 vcpu->nmi_pending = false; 420 vcpu->extint_pending = false; 421 vcpu->exc_pending = 0; 422 vcpu->guest_xcr0 = XFEATURE_ENABLED_X87; 423 (void) hma_fpu_init(vcpu->guestfpu); 424 vmm_stat_init(vcpu->stats); 425 vcpu->tsc_offset = 0; 426 } 427 428 int 429 vcpu_trace_exceptions(struct vm *vm, int vcpuid) 430 { 431 return (trace_guest_exceptions); 432 } 433 434 int 435 vcpu_trap_wbinvd(struct vm *vm, int vcpuid) 436 { 437 return (trap_wbinvd); 438 } 439 440 struct vm_exit * 441 vm_exitinfo(struct vm *vm, int cpuid) 442 { 443 struct vcpu *vcpu; 444 445 if (cpuid < 0 || cpuid >= vm->maxcpus) 446 panic("vm_exitinfo: invalid cpuid %d", cpuid); 447 448 vcpu = &vm->vcpu[cpuid]; 449 450 return (&vcpu->exitinfo); 451 } 452 453 struct vie * 454 vm_vie_ctx(struct vm *vm, int cpuid) 455 { 456 if (cpuid < 0 || cpuid >= vm->maxcpus) 457 panic("vm_vie_ctx: invalid cpuid %d", cpuid); 458 459 return (vm->vcpu[cpuid].vie_ctx); 460 } 461 462 static int 463 vmm_init(void) 464 { 465 vmm_host_state_init(); 466 vmm_host_freq = unscalehrtime(NANOSEC); 467 468 if (vmm_is_intel()) { 469 ops = &vmm_ops_intel; 470 pte_ops = &ept_pte_ops; 471 } else if (vmm_is_svm()) { 472 ops = &vmm_ops_amd; 473 pte_ops = &rvi_pte_ops; 474 } else { 475 return (ENXIO); 476 } 477 478 return (VMM_INIT()); 479 } 480 481 int 482 vmm_mod_load() 483 { 484 int error; 485 486 VERIFY(vmm_initialized == 0); 487 488 error = vmm_init(); 489 if (error == 0) 490 vmm_initialized = 1; 491 492 return (error); 493 } 494 495 int 496 vmm_mod_unload() 497 { 498 int error; 499 500 VERIFY(vmm_initialized == 1); 501 502 error = VMM_CLEANUP(); 503 if (error) 504 return (error); 505 vmm_initialized = 0; 506 507 return (0); 508 } 509 510 /* 511 * Create a test IOMMU domain to see if the host system has necessary hardware 512 * and drivers to do so. 513 */ 514 bool 515 vmm_check_iommu(void) 516 { 517 void *domain; 518 const size_t arb_test_sz = (1UL << 32); 519 520 domain = iommu_create_domain(arb_test_sz); 521 if (domain == NULL) { 522 return (false); 523 } 524 iommu_destroy_domain(domain); 525 return (true); 526 } 527 528 static void 529 vm_init(struct vm *vm, bool create) 530 { 531 int i; 532 533 vm->cookie = VMINIT(vm); 534 vm->iommu = NULL; 535 vm->vioapic = vioapic_init(vm); 536 vm->vhpet = vhpet_init(vm); 537 vm->vatpic = vatpic_init(vm); 538 vm->vatpit = vatpit_init(vm); 539 vm->vpmtmr = vpmtmr_init(vm); 540 if (create) 541 vm->vrtc = vrtc_init(vm); 542 543 vm_inout_init(vm, &vm->ioports); 544 545 CPU_ZERO(&vm->active_cpus); 546 CPU_ZERO(&vm->debug_cpus); 547 548 vm->suspend = 0; 549 CPU_ZERO(&vm->suspended_cpus); 550 551 for (i = 0; i < vm->maxcpus; i++) 552 vcpu_init(vm, i, create); 553 554 /* 555 * Configure VM time-related data, including: 556 * - VM-wide TSC offset 557 * - boot_hrtime 558 * - guest_freq (same as host at boot time) 559 * - freq_multiplier (used for scaling) 560 * 561 * This data is configured such that the call to vm_init() represents 562 * the boot time (when the TSC(s) read 0). Each vCPU will have its own 563 * offset from this, which is altered if/when the guest writes to 564 * MSR_TSC. 565 * 566 * Further changes to this data may occur if userspace writes to the 567 * time data. 568 */ 569 const uint64_t boot_tsc = rdtsc_offset(); 570 571 /* Convert the boot TSC reading to hrtime */ 572 vm->boot_hrtime = (hrtime_t)boot_tsc; 573 scalehrtime(&vm->boot_hrtime); 574 575 /* Guest frequency is the same as the host at boot time */ 576 vm->guest_freq = vmm_host_freq; 577 578 /* no scaling needed if guest_freq == host_freq */ 579 vm->freq_multiplier = VM_TSCM_NOSCALE; 580 581 /* configure VM-wide offset: initial guest TSC is 0 at boot */ 582 vm->tsc_offset = calc_tsc_offset(boot_tsc, 0, vm->freq_multiplier); 583 } 584 585 /* 586 * The default CPU topology is a single thread per package. 587 */ 588 uint_t cores_per_package = 1; 589 uint_t threads_per_core = 1; 590 591 int 592 vm_create(uint64_t flags, struct vm **retvm) 593 { 594 struct vm *vm; 595 struct vmspace *vmspace; 596 597 /* 598 * If vmm.ko could not be successfully initialized then don't attempt 599 * to create the virtual machine. 600 */ 601 if (!vmm_initialized) 602 return (ENXIO); 603 604 bool track_dirty = (flags & VCF_TRACK_DIRTY) != 0; 605 if (track_dirty && !pte_ops->vpeo_hw_ad_supported()) 606 return (ENOTSUP); 607 608 vmspace = vmspace_alloc(VM_MAXUSER_ADDRESS, pte_ops, track_dirty); 609 if (vmspace == NULL) 610 return (ENOMEM); 611 612 vm = kmem_zalloc(sizeof (struct vm), KM_SLEEP); 613 614 vm->vmspace = vmspace; 615 vm->mem_transient = (flags & VCF_RESERVOIR_MEM) == 0; 616 for (uint_t i = 0; i < VM_MAXCPU; i++) { 617 vm->vcpu[i].vmclient = vmspace_client_alloc(vmspace); 618 } 619 620 vm->sockets = 1; 621 vm->cores = cores_per_package; /* XXX backwards compatibility */ 622 vm->threads = threads_per_core; /* XXX backwards compatibility */ 623 vm->maxcpus = VM_MAXCPU; /* XXX temp to keep code working */ 624 625 vm_init(vm, true); 626 627 *retvm = vm; 628 return (0); 629 } 630 631 void 632 vm_get_topology(struct vm *vm, uint16_t *sockets, uint16_t *cores, 633 uint16_t *threads, uint16_t *maxcpus) 634 { 635 *sockets = vm->sockets; 636 *cores = vm->cores; 637 *threads = vm->threads; 638 *maxcpus = vm->maxcpus; 639 } 640 641 uint16_t 642 vm_get_maxcpus(struct vm *vm) 643 { 644 return (vm->maxcpus); 645 } 646 647 int 648 vm_set_topology(struct vm *vm, uint16_t sockets, uint16_t cores, 649 uint16_t threads, uint16_t maxcpus) 650 { 651 if (maxcpus != 0) 652 return (EINVAL); /* XXX remove when supported */ 653 if ((sockets * cores * threads) > vm->maxcpus) 654 return (EINVAL); 655 /* XXX need to check sockets * cores * threads == vCPU, how? */ 656 vm->sockets = sockets; 657 vm->cores = cores; 658 vm->threads = threads; 659 vm->maxcpus = VM_MAXCPU; /* XXX temp to keep code working */ 660 return (0); 661 } 662 663 static void 664 vm_cleanup(struct vm *vm, bool destroy) 665 { 666 struct mem_map *mm; 667 int i; 668 669 ppt_unassign_all(vm); 670 671 if (vm->iommu != NULL) 672 iommu_destroy_domain(vm->iommu); 673 674 /* 675 * Devices which attach their own ioport hooks should be cleaned up 676 * first so they can tear down those registrations. 677 */ 678 vpmtmr_cleanup(vm->vpmtmr); 679 680 vm_inout_cleanup(vm, &vm->ioports); 681 682 if (destroy) 683 vrtc_cleanup(vm->vrtc); 684 else 685 vrtc_reset(vm->vrtc); 686 687 vatpit_cleanup(vm->vatpit); 688 vhpet_cleanup(vm->vhpet); 689 vatpic_cleanup(vm->vatpic); 690 vioapic_cleanup(vm->vioapic); 691 692 for (i = 0; i < vm->maxcpus; i++) 693 vcpu_cleanup(vm, i, destroy); 694 695 VMCLEANUP(vm->cookie); 696 697 /* 698 * System memory is removed from the guest address space only when 699 * the VM is destroyed. This is because the mapping remains the same 700 * across VM reset. 701 * 702 * Device memory can be relocated by the guest (e.g. using PCI BARs) 703 * so those mappings are removed on a VM reset. 704 */ 705 for (i = 0; i < VM_MAX_MEMMAPS; i++) { 706 mm = &vm->mem_maps[i]; 707 if (destroy || !sysmem_mapping(vm, mm)) { 708 vm_free_memmap(vm, i); 709 } else { 710 /* 711 * We need to reset the IOMMU flag so this mapping can 712 * be reused when a VM is rebooted. Since the IOMMU 713 * domain has already been destroyed we can just reset 714 * the flag here. 715 */ 716 mm->flags &= ~VM_MEMMAP_F_IOMMU; 717 } 718 } 719 720 if (destroy) { 721 for (i = 0; i < VM_MAX_MEMSEGS; i++) 722 vm_free_memseg(vm, i); 723 724 vmspace_destroy(vm->vmspace); 725 vm->vmspace = NULL; 726 } 727 } 728 729 void 730 vm_destroy(struct vm *vm) 731 { 732 vm_cleanup(vm, true); 733 kmem_free(vm, sizeof (*vm)); 734 } 735 736 int 737 vm_reinit(struct vm *vm, uint64_t flags) 738 { 739 /* A virtual machine can be reset only if all vcpus are suspended. */ 740 if (CPU_CMP(&vm->suspended_cpus, &vm->active_cpus) != 0) { 741 if ((flags & VM_REINIT_F_FORCE_SUSPEND) == 0) { 742 return (EBUSY); 743 } 744 745 /* 746 * Force the VM (and all its vCPUs) into a suspended state. 747 * This should be quick and easy, since the vm_reinit() call is 748 * made while holding the VM write lock, which requires holding 749 * all of the vCPUs in the VCPU_FROZEN state. 750 */ 751 (void) atomic_cmpset_int((uint_t *)&vm->suspend, 0, 752 VM_SUSPEND_RESET); 753 for (uint_t i = 0; i < vm->maxcpus; i++) { 754 struct vcpu *vcpu = &vm->vcpu[i]; 755 756 if (CPU_ISSET(i, &vm->suspended_cpus) || 757 !CPU_ISSET(i, &vm->active_cpus)) { 758 continue; 759 } 760 761 vcpu_lock(vcpu); 762 VERIFY3U(vcpu->state, ==, VCPU_FROZEN); 763 CPU_SET_ATOMIC(i, &vm->suspended_cpus); 764 vcpu_unlock(vcpu); 765 } 766 767 VERIFY0(CPU_CMP(&vm->suspended_cpus, &vm->active_cpus)); 768 } 769 770 vm_cleanup(vm, false); 771 vm_init(vm, false); 772 return (0); 773 } 774 775 bool 776 vm_is_paused(struct vm *vm) 777 { 778 return (vm->is_paused); 779 } 780 781 int 782 vm_pause_instance(struct vm *vm) 783 { 784 if (vm->is_paused) { 785 return (EALREADY); 786 } 787 vm->is_paused = true; 788 789 for (uint_t i = 0; i < vm->maxcpus; i++) { 790 struct vcpu *vcpu = &vm->vcpu[i]; 791 792 if (!CPU_ISSET(i, &vm->active_cpus)) { 793 continue; 794 } 795 vlapic_pause(vcpu->vlapic); 796 797 /* 798 * vCPU-specific pause logic includes stashing any 799 * to-be-injected events in exit_intinfo where it can be 800 * accessed in a manner generic to the backend. 801 */ 802 ops->vmpause(vm->cookie, i); 803 } 804 vhpet_pause(vm->vhpet); 805 vatpit_pause(vm->vatpit); 806 vrtc_pause(vm->vrtc); 807 808 return (0); 809 } 810 811 int 812 vm_resume_instance(struct vm *vm) 813 { 814 if (!vm->is_paused) { 815 return (EALREADY); 816 } 817 vm->is_paused = false; 818 819 vrtc_resume(vm->vrtc); 820 vatpit_resume(vm->vatpit); 821 vhpet_resume(vm->vhpet); 822 for (uint_t i = 0; i < vm->maxcpus; i++) { 823 struct vcpu *vcpu = &vm->vcpu[i]; 824 825 if (!CPU_ISSET(i, &vm->active_cpus)) { 826 continue; 827 } 828 vlapic_resume(vcpu->vlapic); 829 } 830 831 return (0); 832 } 833 834 int 835 vm_map_mmio(struct vm *vm, vm_paddr_t gpa, size_t len, vm_paddr_t hpa) 836 { 837 vm_object_t *obj; 838 839 if ((obj = vmm_mmio_alloc(vm->vmspace, gpa, len, hpa)) == NULL) 840 return (ENOMEM); 841 else 842 return (0); 843 } 844 845 int 846 vm_unmap_mmio(struct vm *vm, vm_paddr_t gpa, size_t len) 847 { 848 return (vmspace_unmap(vm->vmspace, gpa, gpa + len)); 849 } 850 851 /* 852 * Return 'true' if 'gpa' is allocated in the guest address space. 853 * 854 * This function is called in the context of a running vcpu which acts as 855 * an implicit lock on 'vm->mem_maps[]'. 856 */ 857 bool 858 vm_mem_allocated(struct vm *vm, int vcpuid, vm_paddr_t gpa) 859 { 860 struct mem_map *mm; 861 int i; 862 863 #ifdef INVARIANTS 864 int hostcpu, state; 865 state = vcpu_get_state(vm, vcpuid, &hostcpu); 866 KASSERT(state == VCPU_RUNNING && hostcpu == curcpu, 867 ("%s: invalid vcpu state %d/%d", __func__, state, hostcpu)); 868 #endif 869 870 for (i = 0; i < VM_MAX_MEMMAPS; i++) { 871 mm = &vm->mem_maps[i]; 872 if (mm->len != 0 && gpa >= mm->gpa && gpa < mm->gpa + mm->len) 873 return (true); /* 'gpa' is sysmem or devmem */ 874 } 875 876 if (ppt_is_mmio(vm, gpa)) 877 return (true); /* 'gpa' is pci passthru mmio */ 878 879 return (false); 880 } 881 882 int 883 vm_alloc_memseg(struct vm *vm, int ident, size_t len, bool sysmem) 884 { 885 struct mem_seg *seg; 886 vm_object_t *obj; 887 888 if (ident < 0 || ident >= VM_MAX_MEMSEGS) 889 return (EINVAL); 890 891 if (len == 0 || (len & PAGE_MASK)) 892 return (EINVAL); 893 894 seg = &vm->mem_segs[ident]; 895 if (seg->object != NULL) { 896 if (seg->len == len && seg->sysmem == sysmem) 897 return (EEXIST); 898 else 899 return (EINVAL); 900 } 901 902 obj = vm_object_mem_allocate(len, vm->mem_transient); 903 if (obj == NULL) 904 return (ENOMEM); 905 906 seg->len = len; 907 seg->object = obj; 908 seg->sysmem = sysmem; 909 return (0); 910 } 911 912 int 913 vm_get_memseg(struct vm *vm, int ident, size_t *len, bool *sysmem, 914 vm_object_t **objptr) 915 { 916 struct mem_seg *seg; 917 918 if (ident < 0 || ident >= VM_MAX_MEMSEGS) 919 return (EINVAL); 920 921 seg = &vm->mem_segs[ident]; 922 if (len) 923 *len = seg->len; 924 if (sysmem) 925 *sysmem = seg->sysmem; 926 if (objptr) 927 *objptr = seg->object; 928 return (0); 929 } 930 931 void 932 vm_free_memseg(struct vm *vm, int ident) 933 { 934 struct mem_seg *seg; 935 936 KASSERT(ident >= 0 && ident < VM_MAX_MEMSEGS, 937 ("%s: invalid memseg ident %d", __func__, ident)); 938 939 seg = &vm->mem_segs[ident]; 940 if (seg->object != NULL) { 941 vm_object_release(seg->object); 942 bzero(seg, sizeof (struct mem_seg)); 943 } 944 } 945 946 int 947 vm_mmap_memseg(struct vm *vm, vm_paddr_t gpa, int segid, vm_ooffset_t first, 948 size_t len, int prot, int flags) 949 { 950 struct mem_seg *seg; 951 struct mem_map *m, *map; 952 vm_ooffset_t last; 953 int i, error; 954 955 if (prot == 0 || (prot & ~(PROT_ALL)) != 0) 956 return (EINVAL); 957 958 if (flags & ~VM_MEMMAP_F_WIRED) 959 return (EINVAL); 960 961 if (segid < 0 || segid >= VM_MAX_MEMSEGS) 962 return (EINVAL); 963 964 seg = &vm->mem_segs[segid]; 965 if (seg->object == NULL) 966 return (EINVAL); 967 968 last = first + len; 969 if (first < 0 || first >= last || last > seg->len) 970 return (EINVAL); 971 972 if ((gpa | first | last) & PAGE_MASK) 973 return (EINVAL); 974 975 map = NULL; 976 for (i = 0; i < VM_MAX_MEMMAPS; i++) { 977 m = &vm->mem_maps[i]; 978 if (m->len == 0) { 979 map = m; 980 break; 981 } 982 } 983 984 if (map == NULL) 985 return (ENOSPC); 986 987 error = vmspace_map(vm->vmspace, seg->object, first, gpa, len, prot); 988 if (error != 0) 989 return (EFAULT); 990 991 vm_object_reference(seg->object); 992 993 if ((flags & VM_MEMMAP_F_WIRED) != 0) { 994 error = vmspace_populate(vm->vmspace, gpa, gpa + len); 995 if (error != 0) { 996 VERIFY0(vmspace_unmap(vm->vmspace, gpa, gpa + len)); 997 return (EFAULT); 998 } 999 } 1000 1001 map->gpa = gpa; 1002 map->len = len; 1003 map->segoff = first; 1004 map->segid = segid; 1005 map->prot = prot; 1006 map->flags = flags; 1007 return (0); 1008 } 1009 1010 int 1011 vm_munmap_memseg(struct vm *vm, vm_paddr_t gpa, size_t len) 1012 { 1013 struct mem_map *m; 1014 int i; 1015 1016 for (i = 0; i < VM_MAX_MEMMAPS; i++) { 1017 m = &vm->mem_maps[i]; 1018 if (m->gpa == gpa && m->len == len && 1019 (m->flags & VM_MEMMAP_F_IOMMU) == 0) { 1020 vm_free_memmap(vm, i); 1021 return (0); 1022 } 1023 } 1024 1025 return (EINVAL); 1026 } 1027 1028 int 1029 vm_mmap_getnext(struct vm *vm, vm_paddr_t *gpa, int *segid, 1030 vm_ooffset_t *segoff, size_t *len, int *prot, int *flags) 1031 { 1032 struct mem_map *mm, *mmnext; 1033 int i; 1034 1035 mmnext = NULL; 1036 for (i = 0; i < VM_MAX_MEMMAPS; i++) { 1037 mm = &vm->mem_maps[i]; 1038 if (mm->len == 0 || mm->gpa < *gpa) 1039 continue; 1040 if (mmnext == NULL || mm->gpa < mmnext->gpa) 1041 mmnext = mm; 1042 } 1043 1044 if (mmnext != NULL) { 1045 *gpa = mmnext->gpa; 1046 if (segid) 1047 *segid = mmnext->segid; 1048 if (segoff) 1049 *segoff = mmnext->segoff; 1050 if (len) 1051 *len = mmnext->len; 1052 if (prot) 1053 *prot = mmnext->prot; 1054 if (flags) 1055 *flags = mmnext->flags; 1056 return (0); 1057 } else { 1058 return (ENOENT); 1059 } 1060 } 1061 1062 static void 1063 vm_free_memmap(struct vm *vm, int ident) 1064 { 1065 struct mem_map *mm; 1066 int error; 1067 1068 mm = &vm->mem_maps[ident]; 1069 if (mm->len) { 1070 error = vmspace_unmap(vm->vmspace, mm->gpa, 1071 mm->gpa + mm->len); 1072 KASSERT(error == 0, ("%s: vmspace_unmap error %d", 1073 __func__, error)); 1074 bzero(mm, sizeof (struct mem_map)); 1075 } 1076 } 1077 1078 static __inline bool 1079 sysmem_mapping(struct vm *vm, struct mem_map *mm) 1080 { 1081 1082 if (mm->len != 0 && vm->mem_segs[mm->segid].sysmem) 1083 return (true); 1084 else 1085 return (false); 1086 } 1087 1088 vm_paddr_t 1089 vmm_sysmem_maxaddr(struct vm *vm) 1090 { 1091 struct mem_map *mm; 1092 vm_paddr_t maxaddr; 1093 int i; 1094 1095 maxaddr = 0; 1096 for (i = 0; i < VM_MAX_MEMMAPS; i++) { 1097 mm = &vm->mem_maps[i]; 1098 if (sysmem_mapping(vm, mm)) { 1099 if (maxaddr < mm->gpa + mm->len) 1100 maxaddr = mm->gpa + mm->len; 1101 } 1102 } 1103 return (maxaddr); 1104 } 1105 1106 static void 1107 vm_iommu_modify(struct vm *vm, bool map) 1108 { 1109 int i, sz; 1110 vm_paddr_t gpa, hpa; 1111 struct mem_map *mm; 1112 vm_client_t *vmc; 1113 1114 sz = PAGE_SIZE; 1115 vmc = vmspace_client_alloc(vm->vmspace); 1116 1117 for (i = 0; i < VM_MAX_MEMMAPS; i++) { 1118 mm = &vm->mem_maps[i]; 1119 if (!sysmem_mapping(vm, mm)) 1120 continue; 1121 1122 if (map) { 1123 KASSERT((mm->flags & VM_MEMMAP_F_IOMMU) == 0, 1124 ("iommu map found invalid memmap %lx/%lx/%x", 1125 mm->gpa, mm->len, mm->flags)); 1126 if ((mm->flags & VM_MEMMAP_F_WIRED) == 0) 1127 continue; 1128 mm->flags |= VM_MEMMAP_F_IOMMU; 1129 } else { 1130 if ((mm->flags & VM_MEMMAP_F_IOMMU) == 0) 1131 continue; 1132 mm->flags &= ~VM_MEMMAP_F_IOMMU; 1133 KASSERT((mm->flags & VM_MEMMAP_F_WIRED) != 0, 1134 ("iommu unmap found invalid memmap %lx/%lx/%x", 1135 mm->gpa, mm->len, mm->flags)); 1136 } 1137 1138 gpa = mm->gpa; 1139 while (gpa < mm->gpa + mm->len) { 1140 vm_page_t *vmp; 1141 1142 vmp = vmc_hold(vmc, gpa, PROT_WRITE); 1143 ASSERT(vmp != NULL); 1144 hpa = ((uintptr_t)vmp_get_pfn(vmp) << PAGESHIFT); 1145 (void) vmp_release(vmp); 1146 1147 /* 1148 * When originally ported from FreeBSD, the logic for 1149 * adding memory to the guest domain would 1150 * simultaneously remove it from the host domain. The 1151 * justification for that is not clear, and FreeBSD has 1152 * subsequently changed the behavior to not remove the 1153 * memory from the host domain. 1154 * 1155 * Leaving the guest memory in the host domain for the 1156 * life of the VM is necessary to make it available for 1157 * DMA, such as through viona in the TX path. 1158 */ 1159 if (map) { 1160 iommu_create_mapping(vm->iommu, gpa, hpa, sz); 1161 } else { 1162 iommu_remove_mapping(vm->iommu, gpa, sz); 1163 } 1164 1165 gpa += PAGE_SIZE; 1166 } 1167 } 1168 vmc_destroy(vmc); 1169 1170 /* 1171 * Invalidate the cached translations associated with the domain 1172 * from which pages were removed. 1173 */ 1174 iommu_invalidate_tlb(vm->iommu); 1175 } 1176 1177 int 1178 vm_unassign_pptdev(struct vm *vm, int pptfd) 1179 { 1180 int error; 1181 1182 error = ppt_unassign_device(vm, pptfd); 1183 if (error) 1184 return (error); 1185 1186 if (ppt_assigned_devices(vm) == 0) 1187 vm_iommu_modify(vm, false); 1188 1189 return (0); 1190 } 1191 1192 int 1193 vm_assign_pptdev(struct vm *vm, int pptfd) 1194 { 1195 int error; 1196 vm_paddr_t maxaddr; 1197 1198 /* Set up the IOMMU to do the 'gpa' to 'hpa' translation */ 1199 if (ppt_assigned_devices(vm) == 0) { 1200 KASSERT(vm->iommu == NULL, 1201 ("vm_assign_pptdev: iommu must be NULL")); 1202 maxaddr = vmm_sysmem_maxaddr(vm); 1203 vm->iommu = iommu_create_domain(maxaddr); 1204 if (vm->iommu == NULL) 1205 return (ENXIO); 1206 vm_iommu_modify(vm, true); 1207 } 1208 1209 error = ppt_assign_device(vm, pptfd); 1210 return (error); 1211 } 1212 1213 int 1214 vm_get_register(struct vm *vm, int vcpuid, int reg, uint64_t *retval) 1215 { 1216 if (vcpuid < 0 || vcpuid >= vm->maxcpus) 1217 return (EINVAL); 1218 1219 if (reg >= VM_REG_LAST) 1220 return (EINVAL); 1221 1222 struct vcpu *vcpu = &vm->vcpu[vcpuid]; 1223 switch (reg) { 1224 case VM_REG_GUEST_XCR0: 1225 *retval = vcpu->guest_xcr0; 1226 return (0); 1227 default: 1228 return (VMGETREG(vm->cookie, vcpuid, reg, retval)); 1229 } 1230 } 1231 1232 int 1233 vm_set_register(struct vm *vm, int vcpuid, int reg, uint64_t val) 1234 { 1235 if (vcpuid < 0 || vcpuid >= vm->maxcpus) 1236 return (EINVAL); 1237 1238 if (reg >= VM_REG_LAST) 1239 return (EINVAL); 1240 1241 int error; 1242 struct vcpu *vcpu = &vm->vcpu[vcpuid]; 1243 switch (reg) { 1244 case VM_REG_GUEST_RIP: 1245 error = VMSETREG(vm->cookie, vcpuid, reg, val); 1246 if (error == 0) { 1247 vcpu->nextrip = val; 1248 } 1249 return (error); 1250 case VM_REG_GUEST_XCR0: 1251 if (!validate_guest_xcr0(val, vmm_get_host_xcr0())) { 1252 return (EINVAL); 1253 } 1254 vcpu->guest_xcr0 = val; 1255 return (0); 1256 default: 1257 return (VMSETREG(vm->cookie, vcpuid, reg, val)); 1258 } 1259 } 1260 1261 static bool 1262 is_descriptor_table(int reg) 1263 { 1264 switch (reg) { 1265 case VM_REG_GUEST_IDTR: 1266 case VM_REG_GUEST_GDTR: 1267 return (true); 1268 default: 1269 return (false); 1270 } 1271 } 1272 1273 static bool 1274 is_segment_register(int reg) 1275 { 1276 switch (reg) { 1277 case VM_REG_GUEST_ES: 1278 case VM_REG_GUEST_CS: 1279 case VM_REG_GUEST_SS: 1280 case VM_REG_GUEST_DS: 1281 case VM_REG_GUEST_FS: 1282 case VM_REG_GUEST_GS: 1283 case VM_REG_GUEST_TR: 1284 case VM_REG_GUEST_LDTR: 1285 return (true); 1286 default: 1287 return (false); 1288 } 1289 } 1290 1291 int 1292 vm_get_seg_desc(struct vm *vm, int vcpu, int reg, struct seg_desc *desc) 1293 { 1294 1295 if (vcpu < 0 || vcpu >= vm->maxcpus) 1296 return (EINVAL); 1297 1298 if (!is_segment_register(reg) && !is_descriptor_table(reg)) 1299 return (EINVAL); 1300 1301 return (VMGETDESC(vm->cookie, vcpu, reg, desc)); 1302 } 1303 1304 int 1305 vm_set_seg_desc(struct vm *vm, int vcpu, int reg, const struct seg_desc *desc) 1306 { 1307 if (vcpu < 0 || vcpu >= vm->maxcpus) 1308 return (EINVAL); 1309 1310 if (!is_segment_register(reg) && !is_descriptor_table(reg)) 1311 return (EINVAL); 1312 1313 return (VMSETDESC(vm->cookie, vcpu, reg, desc)); 1314 } 1315 1316 static int 1317 translate_hma_xsave_result(hma_fpu_xsave_result_t res) 1318 { 1319 switch (res) { 1320 case HFXR_OK: 1321 return (0); 1322 case HFXR_NO_SPACE: 1323 return (ENOSPC); 1324 case HFXR_BAD_ALIGN: 1325 case HFXR_UNSUP_FMT: 1326 case HFXR_UNSUP_FEAT: 1327 case HFXR_INVALID_DATA: 1328 return (EINVAL); 1329 default: 1330 panic("unexpected xsave result"); 1331 } 1332 } 1333 1334 int 1335 vm_get_fpu(struct vm *vm, int vcpuid, void *buf, size_t len) 1336 { 1337 if (vcpuid < 0 || vcpuid >= vm->maxcpus) 1338 return (EINVAL); 1339 1340 struct vcpu *vcpu = &vm->vcpu[vcpuid]; 1341 hma_fpu_xsave_result_t res; 1342 1343 res = hma_fpu_get_xsave_state(vcpu->guestfpu, buf, len); 1344 return (translate_hma_xsave_result(res)); 1345 } 1346 1347 int 1348 vm_set_fpu(struct vm *vm, int vcpuid, void *buf, size_t len) 1349 { 1350 if (vcpuid < 0 || vcpuid >= vm->maxcpus) 1351 return (EINVAL); 1352 1353 struct vcpu *vcpu = &vm->vcpu[vcpuid]; 1354 hma_fpu_xsave_result_t res; 1355 1356 res = hma_fpu_set_xsave_state(vcpu->guestfpu, buf, len); 1357 return (translate_hma_xsave_result(res)); 1358 } 1359 1360 int 1361 vm_get_run_state(struct vm *vm, int vcpuid, uint32_t *state, uint8_t *sipi_vec) 1362 { 1363 struct vcpu *vcpu; 1364 1365 if (vcpuid < 0 || vcpuid >= vm->maxcpus) { 1366 return (EINVAL); 1367 } 1368 1369 vcpu = &vm->vcpu[vcpuid]; 1370 1371 vcpu_lock(vcpu); 1372 *state = vcpu->run_state; 1373 *sipi_vec = vcpu->sipi_vector; 1374 vcpu_unlock(vcpu); 1375 1376 return (0); 1377 } 1378 1379 int 1380 vm_set_run_state(struct vm *vm, int vcpuid, uint32_t state, uint8_t sipi_vec) 1381 { 1382 struct vcpu *vcpu; 1383 1384 if (vcpuid < 0 || vcpuid >= vm->maxcpus) { 1385 return (EINVAL); 1386 } 1387 if (!VRS_IS_VALID(state)) { 1388 return (EINVAL); 1389 } 1390 1391 vcpu = &vm->vcpu[vcpuid]; 1392 1393 vcpu_lock(vcpu); 1394 vcpu->run_state = state; 1395 vcpu->sipi_vector = sipi_vec; 1396 vcpu_notify_event_locked(vcpu, VCPU_NOTIFY_EXIT); 1397 vcpu_unlock(vcpu); 1398 1399 return (0); 1400 } 1401 1402 int 1403 vm_track_dirty_pages(struct vm *vm, uint64_t gpa, size_t len, uint8_t *bitmap) 1404 { 1405 vmspace_t *vms = vm_get_vmspace(vm); 1406 return (vmspace_track_dirty(vms, gpa, len, bitmap)); 1407 } 1408 1409 static void 1410 restore_guest_fpustate(struct vcpu *vcpu) 1411 { 1412 /* Save host FPU and restore guest FPU */ 1413 fpu_stop_emulating(); 1414 hma_fpu_start_guest(vcpu->guestfpu); 1415 1416 /* restore guest XCR0 if XSAVE is enabled in the host */ 1417 if (rcr4() & CR4_XSAVE) 1418 load_xcr(0, vcpu->guest_xcr0); 1419 1420 /* 1421 * The FPU is now "dirty" with the guest's state so turn on emulation 1422 * to trap any access to the FPU by the host. 1423 */ 1424 fpu_start_emulating(); 1425 } 1426 1427 static void 1428 save_guest_fpustate(struct vcpu *vcpu) 1429 { 1430 1431 if ((rcr0() & CR0_TS) == 0) 1432 panic("fpu emulation not enabled in host!"); 1433 1434 /* save guest XCR0 and restore host XCR0 */ 1435 if (rcr4() & CR4_XSAVE) { 1436 vcpu->guest_xcr0 = rxcr(0); 1437 load_xcr(0, vmm_get_host_xcr0()); 1438 } 1439 1440 /* save guest FPU and restore host FPU */ 1441 fpu_stop_emulating(); 1442 hma_fpu_stop_guest(vcpu->guestfpu); 1443 /* 1444 * When the host state has been restored, we should not re-enable 1445 * CR0.TS on illumos for eager FPU. 1446 */ 1447 } 1448 1449 static int 1450 vcpu_set_state_locked(struct vm *vm, int vcpuid, enum vcpu_state newstate, 1451 bool from_idle) 1452 { 1453 struct vcpu *vcpu; 1454 int error; 1455 1456 vcpu = &vm->vcpu[vcpuid]; 1457 vcpu_assert_locked(vcpu); 1458 1459 /* 1460 * State transitions from the vmmdev_ioctl() must always begin from 1461 * the VCPU_IDLE state. This guarantees that there is only a single 1462 * ioctl() operating on a vcpu at any point. 1463 */ 1464 if (from_idle) { 1465 while (vcpu->state != VCPU_IDLE) { 1466 vcpu->reqidle = 1; 1467 vcpu_notify_event_locked(vcpu, VCPU_NOTIFY_EXIT); 1468 cv_wait(&vcpu->state_cv, &vcpu->lock); 1469 } 1470 } else { 1471 KASSERT(vcpu->state != VCPU_IDLE, ("invalid transition from " 1472 "vcpu idle state")); 1473 } 1474 1475 if (vcpu->state == VCPU_RUNNING) { 1476 KASSERT(vcpu->hostcpu == curcpu, ("curcpu %d and hostcpu %d " 1477 "mismatch for running vcpu", curcpu, vcpu->hostcpu)); 1478 } else { 1479 KASSERT(vcpu->hostcpu == NOCPU, ("Invalid hostcpu %d for a " 1480 "vcpu that is not running", vcpu->hostcpu)); 1481 } 1482 1483 /* 1484 * The following state transitions are allowed: 1485 * IDLE -> FROZEN -> IDLE 1486 * FROZEN -> RUNNING -> FROZEN 1487 * FROZEN -> SLEEPING -> FROZEN 1488 */ 1489 switch (vcpu->state) { 1490 case VCPU_IDLE: 1491 case VCPU_RUNNING: 1492 case VCPU_SLEEPING: 1493 error = (newstate != VCPU_FROZEN); 1494 break; 1495 case VCPU_FROZEN: 1496 error = (newstate == VCPU_FROZEN); 1497 break; 1498 default: 1499 error = 1; 1500 break; 1501 } 1502 1503 if (error) 1504 return (EBUSY); 1505 1506 vcpu->state = newstate; 1507 if (newstate == VCPU_RUNNING) 1508 vcpu->hostcpu = curcpu; 1509 else 1510 vcpu->hostcpu = NOCPU; 1511 1512 if (newstate == VCPU_IDLE) { 1513 cv_broadcast(&vcpu->state_cv); 1514 } 1515 1516 return (0); 1517 } 1518 1519 static void 1520 vcpu_require_state(struct vm *vm, int vcpuid, enum vcpu_state newstate) 1521 { 1522 int error; 1523 1524 if ((error = vcpu_set_state(vm, vcpuid, newstate, false)) != 0) 1525 panic("Error %d setting state to %d\n", error, newstate); 1526 } 1527 1528 static void 1529 vcpu_require_state_locked(struct vm *vm, int vcpuid, enum vcpu_state newstate) 1530 { 1531 int error; 1532 1533 if ((error = vcpu_set_state_locked(vm, vcpuid, newstate, false)) != 0) 1534 panic("Error %d setting state to %d", error, newstate); 1535 } 1536 1537 /* 1538 * Emulate a guest 'hlt' by sleeping until the vcpu is ready to run. 1539 */ 1540 static int 1541 vm_handle_hlt(struct vm *vm, int vcpuid, bool intr_disabled) 1542 { 1543 struct vcpu *vcpu; 1544 int vcpu_halted, vm_halted; 1545 bool userspace_exit = false; 1546 1547 KASSERT(!CPU_ISSET(vcpuid, &vm->halted_cpus), ("vcpu already halted")); 1548 1549 vcpu = &vm->vcpu[vcpuid]; 1550 vcpu_halted = 0; 1551 vm_halted = 0; 1552 1553 vcpu_lock(vcpu); 1554 while (1) { 1555 /* 1556 * Do a final check for pending interrupts (including NMI and 1557 * INIT) before putting this thread to sleep. 1558 */ 1559 if (vm_nmi_pending(vm, vcpuid)) 1560 break; 1561 if (vcpu_run_state_pending(vm, vcpuid)) 1562 break; 1563 if (!intr_disabled) { 1564 if (vm_extint_pending(vm, vcpuid) || 1565 vlapic_pending_intr(vcpu->vlapic, NULL)) { 1566 break; 1567 } 1568 } 1569 1570 /* 1571 * Also check for software events which would cause a wake-up. 1572 * This will set the appropriate exitcode directly, rather than 1573 * requiring a trip through VM_RUN(). 1574 */ 1575 if (vcpu_sleep_bailout_checks(vm, vcpuid)) { 1576 userspace_exit = true; 1577 break; 1578 } 1579 1580 /* 1581 * Some Linux guests implement "halt" by having all vcpus 1582 * execute HLT with interrupts disabled. 'halted_cpus' keeps 1583 * track of the vcpus that have entered this state. When all 1584 * vcpus enter the halted state the virtual machine is halted. 1585 */ 1586 if (intr_disabled) { 1587 if (!vcpu_halted && halt_detection_enabled) { 1588 vcpu_halted = 1; 1589 CPU_SET_ATOMIC(vcpuid, &vm->halted_cpus); 1590 } 1591 if (CPU_CMP(&vm->halted_cpus, &vm->active_cpus) == 0) { 1592 vm_halted = 1; 1593 break; 1594 } 1595 } 1596 1597 vcpu_ustate_change(vm, vcpuid, VU_IDLE); 1598 vcpu_require_state_locked(vm, vcpuid, VCPU_SLEEPING); 1599 (void) cv_wait_sig(&vcpu->vcpu_cv, &vcpu->lock); 1600 vcpu_require_state_locked(vm, vcpuid, VCPU_FROZEN); 1601 vcpu_ustate_change(vm, vcpuid, VU_EMU_KERN); 1602 } 1603 1604 if (vcpu_halted) 1605 CPU_CLR_ATOMIC(vcpuid, &vm->halted_cpus); 1606 1607 vcpu_unlock(vcpu); 1608 1609 if (vm_halted) { 1610 (void) vm_suspend(vm, VM_SUSPEND_HALT); 1611 } 1612 1613 return (userspace_exit ? -1 : 0); 1614 } 1615 1616 static int 1617 vm_handle_paging(struct vm *vm, int vcpuid) 1618 { 1619 struct vcpu *vcpu = &vm->vcpu[vcpuid]; 1620 vm_client_t *vmc = vcpu->vmclient; 1621 struct vm_exit *vme = &vcpu->exitinfo; 1622 const int ftype = vme->u.paging.fault_type; 1623 1624 ASSERT0(vme->inst_length); 1625 ASSERT(ftype == PROT_READ || ftype == PROT_WRITE || ftype == PROT_EXEC); 1626 1627 if (vmc_fault(vmc, vme->u.paging.gpa, ftype) != 0) { 1628 /* 1629 * If the fault cannot be serviced, kick it out to userspace for 1630 * handling (or more likely, halting the instance). 1631 */ 1632 return (-1); 1633 } 1634 1635 return (0); 1636 } 1637 1638 int 1639 vm_service_mmio_read(struct vm *vm, int cpuid, uint64_t gpa, uint64_t *rval, 1640 int rsize) 1641 { 1642 int err = ESRCH; 1643 1644 if (gpa >= DEFAULT_APIC_BASE && gpa < DEFAULT_APIC_BASE + PAGE_SIZE) { 1645 struct vlapic *vlapic = vm_lapic(vm, cpuid); 1646 1647 err = vlapic_mmio_read(vlapic, gpa, rval, rsize); 1648 } else if (gpa >= VIOAPIC_BASE && gpa < VIOAPIC_BASE + VIOAPIC_SIZE) { 1649 err = vioapic_mmio_read(vm, cpuid, gpa, rval, rsize); 1650 } else if (gpa >= VHPET_BASE && gpa < VHPET_BASE + VHPET_SIZE) { 1651 err = vhpet_mmio_read(vm, cpuid, gpa, rval, rsize); 1652 } 1653 1654 return (err); 1655 } 1656 1657 int 1658 vm_service_mmio_write(struct vm *vm, int cpuid, uint64_t gpa, uint64_t wval, 1659 int wsize) 1660 { 1661 int err = ESRCH; 1662 1663 if (gpa >= DEFAULT_APIC_BASE && gpa < DEFAULT_APIC_BASE + PAGE_SIZE) { 1664 struct vlapic *vlapic = vm_lapic(vm, cpuid); 1665 1666 err = vlapic_mmio_write(vlapic, gpa, wval, wsize); 1667 } else if (gpa >= VIOAPIC_BASE && gpa < VIOAPIC_BASE + VIOAPIC_SIZE) { 1668 err = vioapic_mmio_write(vm, cpuid, gpa, wval, wsize); 1669 } else if (gpa >= VHPET_BASE && gpa < VHPET_BASE + VHPET_SIZE) { 1670 err = vhpet_mmio_write(vm, cpuid, gpa, wval, wsize); 1671 } 1672 1673 return (err); 1674 } 1675 1676 static int 1677 vm_handle_mmio_emul(struct vm *vm, int vcpuid) 1678 { 1679 struct vie *vie; 1680 struct vcpu *vcpu; 1681 struct vm_exit *vme; 1682 uint64_t inst_addr; 1683 int error, fault, cs_d; 1684 1685 vcpu = &vm->vcpu[vcpuid]; 1686 vme = &vcpu->exitinfo; 1687 vie = vcpu->vie_ctx; 1688 1689 KASSERT(vme->inst_length == 0, ("%s: invalid inst_length %d", 1690 __func__, vme->inst_length)); 1691 1692 inst_addr = vme->rip + vme->u.mmio_emul.cs_base; 1693 cs_d = vme->u.mmio_emul.cs_d; 1694 1695 /* Fetch the faulting instruction */ 1696 if (vie_needs_fetch(vie)) { 1697 error = vie_fetch_instruction(vie, vm, vcpuid, inst_addr, 1698 &fault); 1699 if (error != 0) { 1700 return (error); 1701 } else if (fault) { 1702 /* 1703 * If a fault during instruction fetch was encountered, 1704 * it will have asserted that the appropriate exception 1705 * be injected at next entry. 1706 * No further work is required. 1707 */ 1708 return (0); 1709 } 1710 } 1711 1712 if (vie_decode_instruction(vie, vm, vcpuid, cs_d) != 0) { 1713 /* Dump (unrecognized) instruction bytes in userspace */ 1714 vie_fallback_exitinfo(vie, vme); 1715 return (-1); 1716 } 1717 if (vme->u.mmio_emul.gla != VIE_INVALID_GLA && 1718 vie_verify_gla(vie, vm, vcpuid, vme->u.mmio_emul.gla) != 0) { 1719 /* Decoded GLA does not match GLA from VM exit state */ 1720 vie_fallback_exitinfo(vie, vme); 1721 return (-1); 1722 } 1723 1724 repeat: 1725 error = vie_emulate_mmio(vie, vm, vcpuid); 1726 if (error < 0) { 1727 /* 1728 * MMIO not handled by any of the in-kernel-emulated devices, so 1729 * make a trip out to userspace for it. 1730 */ 1731 vie_exitinfo(vie, vme); 1732 } else if (error == EAGAIN) { 1733 /* 1734 * Continue emulating the rep-prefixed instruction, which has 1735 * not completed its iterations. 1736 * 1737 * In case this can be emulated in-kernel and has a high 1738 * repetition count (causing a tight spin), it should be 1739 * deferential to yield conditions. 1740 */ 1741 if (!vcpu_should_yield(vm, vcpuid)) { 1742 goto repeat; 1743 } else { 1744 /* 1745 * Defer to the contending load by making a trip to 1746 * userspace with a no-op (BOGUS) exit reason. 1747 */ 1748 vie_reset(vie); 1749 vme->exitcode = VM_EXITCODE_BOGUS; 1750 return (-1); 1751 } 1752 } else if (error == 0) { 1753 /* Update %rip now that instruction has been emulated */ 1754 vie_advance_pc(vie, &vcpu->nextrip); 1755 } 1756 return (error); 1757 } 1758 1759 static int 1760 vm_handle_inout(struct vm *vm, int vcpuid, struct vm_exit *vme) 1761 { 1762 struct vcpu *vcpu; 1763 struct vie *vie; 1764 int err; 1765 1766 vcpu = &vm->vcpu[vcpuid]; 1767 vie = vcpu->vie_ctx; 1768 1769 repeat: 1770 err = vie_emulate_inout(vie, vm, vcpuid); 1771 1772 if (err < 0) { 1773 /* 1774 * In/out not handled by any of the in-kernel-emulated devices, 1775 * so make a trip out to userspace for it. 1776 */ 1777 vie_exitinfo(vie, vme); 1778 return (err); 1779 } else if (err == EAGAIN) { 1780 /* 1781 * Continue emulating the rep-prefixed ins/outs, which has not 1782 * completed its iterations. 1783 * 1784 * In case this can be emulated in-kernel and has a high 1785 * repetition count (causing a tight spin), it should be 1786 * deferential to yield conditions. 1787 */ 1788 if (!vcpu_should_yield(vm, vcpuid)) { 1789 goto repeat; 1790 } else { 1791 /* 1792 * Defer to the contending load by making a trip to 1793 * userspace with a no-op (BOGUS) exit reason. 1794 */ 1795 vie_reset(vie); 1796 vme->exitcode = VM_EXITCODE_BOGUS; 1797 return (-1); 1798 } 1799 } else if (err != 0) { 1800 /* Emulation failure. Bail all the way out to userspace. */ 1801 vme->exitcode = VM_EXITCODE_INST_EMUL; 1802 bzero(&vme->u.inst_emul, sizeof (vme->u.inst_emul)); 1803 return (-1); 1804 } 1805 1806 vie_advance_pc(vie, &vcpu->nextrip); 1807 return (0); 1808 } 1809 1810 static int 1811 vm_handle_inst_emul(struct vm *vm, int vcpuid) 1812 { 1813 struct vie *vie; 1814 struct vcpu *vcpu; 1815 struct vm_exit *vme; 1816 uint64_t cs_base; 1817 int error, fault, cs_d; 1818 1819 vcpu = &vm->vcpu[vcpuid]; 1820 vme = &vcpu->exitinfo; 1821 vie = vcpu->vie_ctx; 1822 1823 vie_cs_info(vie, vm, vcpuid, &cs_base, &cs_d); 1824 1825 /* Fetch the faulting instruction */ 1826 ASSERT(vie_needs_fetch(vie)); 1827 error = vie_fetch_instruction(vie, vm, vcpuid, vme->rip + cs_base, 1828 &fault); 1829 if (error != 0) { 1830 return (error); 1831 } else if (fault) { 1832 /* 1833 * If a fault during instruction fetch was encounted, it will 1834 * have asserted that the appropriate exception be injected at 1835 * next entry. No further work is required. 1836 */ 1837 return (0); 1838 } 1839 1840 if (vie_decode_instruction(vie, vm, vcpuid, cs_d) != 0) { 1841 /* Dump (unrecognized) instruction bytes in userspace */ 1842 vie_fallback_exitinfo(vie, vme); 1843 return (-1); 1844 } 1845 1846 error = vie_emulate_other(vie, vm, vcpuid); 1847 if (error != 0) { 1848 /* 1849 * Instruction emulation was unable to complete successfully, so 1850 * kick it out to userspace for handling. 1851 */ 1852 vie_fallback_exitinfo(vie, vme); 1853 } else { 1854 /* Update %rip now that instruction has been emulated */ 1855 vie_advance_pc(vie, &vcpu->nextrip); 1856 } 1857 return (error); 1858 } 1859 1860 static int 1861 vm_handle_suspend(struct vm *vm, int vcpuid) 1862 { 1863 int i; 1864 struct vcpu *vcpu; 1865 1866 vcpu = &vm->vcpu[vcpuid]; 1867 1868 CPU_SET_ATOMIC(vcpuid, &vm->suspended_cpus); 1869 1870 /* 1871 * Wait until all 'active_cpus' have suspended themselves. 1872 */ 1873 vcpu_lock(vcpu); 1874 vcpu_ustate_change(vm, vcpuid, VU_INIT); 1875 while (1) { 1876 int rc; 1877 1878 if (CPU_CMP(&vm->suspended_cpus, &vm->active_cpus) == 0) { 1879 break; 1880 } 1881 1882 vcpu_require_state_locked(vm, vcpuid, VCPU_SLEEPING); 1883 rc = cv_reltimedwait_sig(&vcpu->vcpu_cv, &vcpu->lock, hz, 1884 TR_CLOCK_TICK); 1885 vcpu_require_state_locked(vm, vcpuid, VCPU_FROZEN); 1886 1887 /* 1888 * If the userspace process driving the instance is killed, any 1889 * vCPUs yet to be marked suspended (because they are not 1890 * VM_RUN-ing in the kernel presently) will never reach that 1891 * state. 1892 * 1893 * To avoid vm_handle_suspend() getting stuck in the kernel 1894 * waiting for those vCPUs, offer a bail-out even though it 1895 * means returning without all vCPUs in a suspended state. 1896 */ 1897 if (rc <= 0) { 1898 if ((curproc->p_flag & SEXITING) != 0) { 1899 break; 1900 } 1901 } 1902 } 1903 vcpu_unlock(vcpu); 1904 1905 /* 1906 * Wakeup the other sleeping vcpus and return to userspace. 1907 */ 1908 for (i = 0; i < vm->maxcpus; i++) { 1909 if (CPU_ISSET(i, &vm->suspended_cpus)) { 1910 vcpu_notify_event(vm, i); 1911 } 1912 } 1913 1914 return (-1); 1915 } 1916 1917 static int 1918 vm_handle_reqidle(struct vm *vm, int vcpuid) 1919 { 1920 struct vcpu *vcpu = &vm->vcpu[vcpuid]; 1921 1922 vcpu_lock(vcpu); 1923 KASSERT(vcpu->reqidle, ("invalid vcpu reqidle %d", vcpu->reqidle)); 1924 vcpu->reqidle = 0; 1925 vcpu_unlock(vcpu); 1926 return (-1); 1927 } 1928 1929 static int 1930 vm_handle_run_state(struct vm *vm, int vcpuid) 1931 { 1932 struct vcpu *vcpu = &vm->vcpu[vcpuid]; 1933 bool handled = false; 1934 1935 vcpu_lock(vcpu); 1936 while (1) { 1937 if ((vcpu->run_state & VRS_PEND_INIT) != 0) { 1938 vcpu_unlock(vcpu); 1939 VERIFY0(vcpu_arch_reset(vm, vcpuid, true)); 1940 vcpu_lock(vcpu); 1941 1942 vcpu->run_state &= ~(VRS_RUN | VRS_PEND_INIT); 1943 vcpu->run_state |= VRS_INIT; 1944 } 1945 1946 if ((vcpu->run_state & (VRS_INIT | VRS_RUN | VRS_PEND_SIPI)) == 1947 (VRS_INIT | VRS_PEND_SIPI)) { 1948 const uint8_t vector = vcpu->sipi_vector; 1949 1950 vcpu_unlock(vcpu); 1951 VERIFY0(vcpu_vector_sipi(vm, vcpuid, vector)); 1952 vcpu_lock(vcpu); 1953 1954 vcpu->run_state &= ~VRS_PEND_SIPI; 1955 vcpu->run_state |= VRS_RUN; 1956 } 1957 1958 /* 1959 * If the vCPU is now in the running state, there is no need to 1960 * wait for anything prior to re-entry. 1961 */ 1962 if ((vcpu->run_state & VRS_RUN) != 0) { 1963 handled = true; 1964 break; 1965 } 1966 1967 /* 1968 * Also check for software events which would cause a wake-up. 1969 * This will set the appropriate exitcode directly, rather than 1970 * requiring a trip through VM_RUN(). 1971 */ 1972 if (vcpu_sleep_bailout_checks(vm, vcpuid)) { 1973 break; 1974 } 1975 1976 vcpu_ustate_change(vm, vcpuid, VU_IDLE); 1977 vcpu_require_state_locked(vm, vcpuid, VCPU_SLEEPING); 1978 (void) cv_wait_sig(&vcpu->vcpu_cv, &vcpu->lock); 1979 vcpu_require_state_locked(vm, vcpuid, VCPU_FROZEN); 1980 vcpu_ustate_change(vm, vcpuid, VU_EMU_KERN); 1981 } 1982 vcpu_unlock(vcpu); 1983 1984 return (handled ? 0 : -1); 1985 } 1986 1987 static int 1988 vm_rdmtrr(const struct vm_mtrr *mtrr, uint32_t num, uint64_t *val) 1989 { 1990 switch (num) { 1991 case MSR_MTRRcap: 1992 *val = MTRR_CAP_WC | MTRR_CAP_FIXED | VMM_MTRR_VAR_MAX; 1993 break; 1994 case MSR_MTRRdefType: 1995 *val = mtrr->def_type; 1996 break; 1997 case MSR_MTRR4kBase ... MSR_MTRR4kBase + 7: 1998 *val = mtrr->fixed4k[num - MSR_MTRR4kBase]; 1999 break; 2000 case MSR_MTRR16kBase ... MSR_MTRR16kBase + 1: 2001 *val = mtrr->fixed16k[num - MSR_MTRR16kBase]; 2002 break; 2003 case MSR_MTRR64kBase: 2004 *val = mtrr->fixed64k; 2005 break; 2006 case MSR_MTRRVarBase ... MSR_MTRRVarBase + (VMM_MTRR_VAR_MAX * 2) - 1: { 2007 uint_t offset = num - MSR_MTRRVarBase; 2008 if (offset % 2 == 0) { 2009 *val = mtrr->var[offset / 2].base; 2010 } else { 2011 *val = mtrr->var[offset / 2].mask; 2012 } 2013 break; 2014 } 2015 default: 2016 return (EINVAL); 2017 } 2018 2019 return (0); 2020 } 2021 2022 static int 2023 vm_wrmtrr(struct vm_mtrr *mtrr, uint32_t num, uint64_t val) 2024 { 2025 switch (num) { 2026 case MSR_MTRRcap: 2027 /* MTRRCAP is read only */ 2028 return (EPERM); 2029 case MSR_MTRRdefType: 2030 if (val & ~VMM_MTRR_DEF_MASK) { 2031 /* generate #GP on writes to reserved fields */ 2032 return (EINVAL); 2033 } 2034 mtrr->def_type = val; 2035 break; 2036 case MSR_MTRR4kBase ... MSR_MTRR4kBase + 7: 2037 mtrr->fixed4k[num - MSR_MTRR4kBase] = val; 2038 break; 2039 case MSR_MTRR16kBase ... MSR_MTRR16kBase + 1: 2040 mtrr->fixed16k[num - MSR_MTRR16kBase] = val; 2041 break; 2042 case MSR_MTRR64kBase: 2043 mtrr->fixed64k = val; 2044 break; 2045 case MSR_MTRRVarBase ... MSR_MTRRVarBase + (VMM_MTRR_VAR_MAX * 2) - 1: { 2046 uint_t offset = num - MSR_MTRRVarBase; 2047 if (offset % 2 == 0) { 2048 if (val & ~VMM_MTRR_PHYSBASE_MASK) { 2049 /* generate #GP on writes to reserved fields */ 2050 return (EINVAL); 2051 } 2052 mtrr->var[offset / 2].base = val; 2053 } else { 2054 if (val & ~VMM_MTRR_PHYSMASK_MASK) { 2055 /* generate #GP on writes to reserved fields */ 2056 return (EINVAL); 2057 } 2058 mtrr->var[offset / 2].mask = val; 2059 } 2060 break; 2061 } 2062 default: 2063 return (EINVAL); 2064 } 2065 2066 return (0); 2067 } 2068 2069 static bool 2070 is_mtrr_msr(uint32_t msr) 2071 { 2072 switch (msr) { 2073 case MSR_MTRRcap: 2074 case MSR_MTRRdefType: 2075 case MSR_MTRR4kBase ... MSR_MTRR4kBase + 7: 2076 case MSR_MTRR16kBase ... MSR_MTRR16kBase + 1: 2077 case MSR_MTRR64kBase: 2078 case MSR_MTRRVarBase ... MSR_MTRRVarBase + (VMM_MTRR_VAR_MAX * 2) - 1: 2079 return (true); 2080 default: 2081 return (false); 2082 } 2083 } 2084 2085 static int 2086 vm_handle_rdmsr(struct vm *vm, int vcpuid, struct vm_exit *vme) 2087 { 2088 struct vcpu *vcpu = &vm->vcpu[vcpuid]; 2089 const uint32_t code = vme->u.msr.code; 2090 uint64_t val = 0; 2091 2092 switch (code) { 2093 case MSR_MCG_CAP: 2094 case MSR_MCG_STATUS: 2095 val = 0; 2096 break; 2097 2098 case MSR_MTRRcap: 2099 case MSR_MTRRdefType: 2100 case MSR_MTRR4kBase ... MSR_MTRR4kBase + 7: 2101 case MSR_MTRR16kBase ... MSR_MTRR16kBase + 1: 2102 case MSR_MTRR64kBase: 2103 case MSR_MTRRVarBase ... MSR_MTRRVarBase + (VMM_MTRR_VAR_MAX * 2) - 1: 2104 if (vm_rdmtrr(&vcpu->mtrr, code, &val) != 0) 2105 vm_inject_gp(vm, vcpuid); 2106 break; 2107 2108 case MSR_TSC: 2109 /* 2110 * Get the guest TSC, applying necessary vCPU offsets. 2111 * 2112 * In all likelihood, this should always be handled in guest 2113 * context by VMX/SVM rather than taking an exit. (Both VMX and 2114 * SVM pass through read-only access to MSR_TSC to the guest.) 2115 * 2116 * The VM-wide TSC offset and per-vCPU offset are included in 2117 * the calculations of vcpu_tsc_offset(), so this is sufficient 2118 * to use as the offset in our calculations. 2119 * 2120 * No physical offset is requested of vcpu_tsc_offset() since 2121 * rdtsc_offset() takes care of that instead. 2122 */ 2123 val = calc_guest_tsc(rdtsc_offset(), vm->freq_multiplier, 2124 vcpu_tsc_offset(vm, vcpuid, false)); 2125 break; 2126 2127 default: 2128 /* 2129 * Anything not handled at this point will be kicked out to 2130 * userspace for attempted processing there. 2131 */ 2132 return (-1); 2133 } 2134 2135 VERIFY0(vm_set_register(vm, vcpuid, VM_REG_GUEST_RAX, 2136 val & 0xffffffff)); 2137 VERIFY0(vm_set_register(vm, vcpuid, VM_REG_GUEST_RDX, 2138 val >> 32)); 2139 return (0); 2140 } 2141 2142 static int 2143 vm_handle_wrmsr(struct vm *vm, int vcpuid, struct vm_exit *vme) 2144 { 2145 struct vcpu *vcpu = &vm->vcpu[vcpuid]; 2146 const uint32_t code = vme->u.msr.code; 2147 const uint64_t val = vme->u.msr.wval; 2148 2149 switch (code) { 2150 case MSR_MCG_CAP: 2151 case MSR_MCG_STATUS: 2152 /* Ignore writes */ 2153 break; 2154 2155 case MSR_MTRRcap: 2156 case MSR_MTRRdefType: 2157 case MSR_MTRR4kBase ... MSR_MTRR4kBase + 7: 2158 case MSR_MTRR16kBase ... MSR_MTRR16kBase + 1: 2159 case MSR_MTRR64kBase: 2160 case MSR_MTRRVarBase ... MSR_MTRRVarBase + (VMM_MTRR_VAR_MAX * 2) - 1: 2161 if (vm_wrmtrr(&vcpu->mtrr, code, val) != 0) 2162 vm_inject_gp(vm, vcpuid); 2163 break; 2164 2165 case MSR_TSC: 2166 /* 2167 * The effect of writing the TSC MSR is that a subsequent read 2168 * of the TSC would report that value written (plus any time 2169 * elapsed between the write and the read). 2170 * 2171 * To calculate that per-vCPU offset, we can work backwards from 2172 * the guest TSC at the time of write: 2173 * 2174 * value = current guest TSC + vCPU offset 2175 * 2176 * so therefore: 2177 * 2178 * value - current guest TSC = vCPU offset 2179 */ 2180 vcpu->tsc_offset = val - calc_guest_tsc(rdtsc_offset(), 2181 vm->freq_multiplier, vm->tsc_offset); 2182 break; 2183 2184 default: 2185 /* 2186 * Anything not handled at this point will be kicked out to 2187 * userspace for attempted processing there. 2188 */ 2189 return (-1); 2190 } 2191 2192 return (0); 2193 } 2194 2195 int 2196 vm_suspend(struct vm *vm, enum vm_suspend_how how) 2197 { 2198 if (how <= VM_SUSPEND_NONE || how >= VM_SUSPEND_LAST) 2199 return (EINVAL); 2200 2201 if (atomic_cmpset_int((uint_t *)&vm->suspend, 0, how) == 0) { 2202 return (EALREADY); 2203 } 2204 2205 /* 2206 * Notify all active vcpus that they are now suspended. 2207 */ 2208 for (uint_t i = 0; i < vm->maxcpus; i++) { 2209 struct vcpu *vcpu = &vm->vcpu[i]; 2210 2211 vcpu_lock(vcpu); 2212 if (vcpu->state == VCPU_IDLE || vcpu->state == VCPU_FROZEN) { 2213 /* 2214 * Any vCPUs not actively running or in HLT can be 2215 * marked as suspended immediately. 2216 */ 2217 if (CPU_ISSET(i, &vm->active_cpus)) { 2218 CPU_SET_ATOMIC(i, &vm->suspended_cpus); 2219 } 2220 } else { 2221 /* 2222 * Those which are running or in HLT will pick up the 2223 * suspended state after notification. 2224 */ 2225 vcpu_notify_event_locked(vcpu, VCPU_NOTIFY_EXIT); 2226 } 2227 vcpu_unlock(vcpu); 2228 } 2229 return (0); 2230 } 2231 2232 void 2233 vm_exit_run_state(struct vm *vm, int vcpuid, uint64_t rip) 2234 { 2235 struct vm_exit *vmexit; 2236 2237 vmexit = vm_exitinfo(vm, vcpuid); 2238 vmexit->rip = rip; 2239 vmexit->inst_length = 0; 2240 vmexit->exitcode = VM_EXITCODE_RUN_STATE; 2241 vmm_stat_incr(vm, vcpuid, VMEXIT_RUN_STATE, 1); 2242 } 2243 2244 /* 2245 * Some vmm resources, such as the lapic, may have CPU-specific resources 2246 * allocated to them which would benefit from migration onto the host CPU which 2247 * is processing the vcpu state. 2248 */ 2249 static void 2250 vm_localize_resources(struct vm *vm, struct vcpu *vcpu) 2251 { 2252 /* 2253 * Localizing cyclic resources requires acquisition of cpu_lock, and 2254 * doing so with kpreempt disabled is a recipe for deadlock disaster. 2255 */ 2256 VERIFY(curthread->t_preempt == 0); 2257 2258 /* 2259 * Do not bother with localization if this vCPU is about to return to 2260 * the host CPU it was last localized to. 2261 */ 2262 if (vcpu->lastloccpu == curcpu) 2263 return; 2264 2265 /* 2266 * Localize system-wide resources to the primary boot vCPU. While any 2267 * of the other vCPUs may access them, it keeps the potential interrupt 2268 * footprint constrained to CPUs involved with this instance. 2269 */ 2270 if (vcpu == &vm->vcpu[0]) { 2271 vhpet_localize_resources(vm->vhpet); 2272 vrtc_localize_resources(vm->vrtc); 2273 vatpit_localize_resources(vm->vatpit); 2274 } 2275 2276 vlapic_localize_resources(vcpu->vlapic); 2277 2278 vcpu->lastloccpu = curcpu; 2279 } 2280 2281 static void 2282 vmm_savectx(void *arg) 2283 { 2284 vm_thread_ctx_t *vtc = arg; 2285 struct vm *vm = vtc->vtc_vm; 2286 const int vcpuid = vtc->vtc_vcpuid; 2287 2288 if (ops->vmsavectx != NULL) { 2289 ops->vmsavectx(vm->cookie, vcpuid); 2290 } 2291 2292 /* 2293 * Account for going off-cpu, unless the vCPU is idled, where being 2294 * off-cpu is the explicit point. 2295 */ 2296 if (vm->vcpu[vcpuid].ustate != VU_IDLE) { 2297 vtc->vtc_ustate = vm->vcpu[vcpuid].ustate; 2298 vcpu_ustate_change(vm, vcpuid, VU_SCHED); 2299 } 2300 2301 /* 2302 * If the CPU holds the restored guest FPU state, save it and restore 2303 * the host FPU state before this thread goes off-cpu. 2304 */ 2305 if ((vtc->vtc_status & VTCS_FPU_RESTORED) != 0) { 2306 struct vcpu *vcpu = &vm->vcpu[vcpuid]; 2307 2308 save_guest_fpustate(vcpu); 2309 vtc->vtc_status &= ~VTCS_FPU_RESTORED; 2310 } 2311 } 2312 2313 static void 2314 vmm_restorectx(void *arg) 2315 { 2316 vm_thread_ctx_t *vtc = arg; 2317 struct vm *vm = vtc->vtc_vm; 2318 const int vcpuid = vtc->vtc_vcpuid; 2319 2320 /* Complete microstate accounting for vCPU being off-cpu */ 2321 if (vm->vcpu[vcpuid].ustate != VU_IDLE) { 2322 vcpu_ustate_change(vm, vcpuid, vtc->vtc_ustate); 2323 } 2324 2325 /* 2326 * When coming back on-cpu, only restore the guest FPU status if the 2327 * thread is in a context marked as requiring it. This should be rare, 2328 * occurring only when a future logic error results in a voluntary 2329 * sleep during the VMRUN critical section. 2330 * 2331 * The common case will result in elision of the guest FPU state 2332 * restoration, deferring that action until it is clearly necessary 2333 * during vm_run. 2334 */ 2335 VERIFY((vtc->vtc_status & VTCS_FPU_RESTORED) == 0); 2336 if ((vtc->vtc_status & VTCS_FPU_CTX_CRITICAL) != 0) { 2337 struct vcpu *vcpu = &vm->vcpu[vcpuid]; 2338 2339 restore_guest_fpustate(vcpu); 2340 vtc->vtc_status |= VTCS_FPU_RESTORED; 2341 } 2342 2343 if (ops->vmrestorectx != NULL) { 2344 ops->vmrestorectx(vm->cookie, vcpuid); 2345 } 2346 2347 } 2348 2349 /* Convenience defines for parsing vm_entry`cmd values */ 2350 #define VEC_MASK_FLAGS (VEC_FLAG_EXIT_CONSISTENT) 2351 #define VEC_MASK_CMD (~VEC_MASK_FLAGS) 2352 2353 static int 2354 vm_entry_actions(struct vm *vm, int vcpuid, const struct vm_entry *entry, 2355 struct vm_exit *vme) 2356 { 2357 struct vcpu *vcpu = &vm->vcpu[vcpuid]; 2358 struct vie *vie = vcpu->vie_ctx; 2359 int err = 0; 2360 2361 const uint_t cmd = entry->cmd & VEC_MASK_CMD; 2362 const uint_t flags = entry->cmd & VEC_MASK_FLAGS; 2363 2364 switch (cmd) { 2365 case VEC_DEFAULT: 2366 break; 2367 case VEC_DISCARD_INSTR: 2368 vie_reset(vie); 2369 break; 2370 case VEC_FULFILL_MMIO: 2371 err = vie_fulfill_mmio(vie, &entry->u.mmio); 2372 if (err == 0) { 2373 err = vie_emulate_mmio(vie, vm, vcpuid); 2374 if (err == 0) { 2375 vie_advance_pc(vie, &vcpu->nextrip); 2376 } else if (err < 0) { 2377 vie_exitinfo(vie, vme); 2378 } else if (err == EAGAIN) { 2379 /* 2380 * Clear the instruction emulation state in 2381 * order to re-enter VM context and continue 2382 * this 'rep <instruction>' 2383 */ 2384 vie_reset(vie); 2385 err = 0; 2386 } 2387 } 2388 break; 2389 case VEC_FULFILL_INOUT: 2390 err = vie_fulfill_inout(vie, &entry->u.inout); 2391 if (err == 0) { 2392 err = vie_emulate_inout(vie, vm, vcpuid); 2393 if (err == 0) { 2394 vie_advance_pc(vie, &vcpu->nextrip); 2395 } else if (err < 0) { 2396 vie_exitinfo(vie, vme); 2397 } else if (err == EAGAIN) { 2398 /* 2399 * Clear the instruction emulation state in 2400 * order to re-enter VM context and continue 2401 * this 'rep ins/outs' 2402 */ 2403 vie_reset(vie); 2404 err = 0; 2405 } 2406 } 2407 break; 2408 default: 2409 return (EINVAL); 2410 } 2411 2412 /* 2413 * Pay heed to requests for exit-when-vCPU-is-consistent requests, at 2414 * least when we are not immediately bound for another exit due to 2415 * multi-part instruction emulation or related causes. 2416 */ 2417 if ((flags & VEC_FLAG_EXIT_CONSISTENT) != 0 && err == 0) { 2418 vcpu->reqconsist = true; 2419 } 2420 2421 return (err); 2422 } 2423 2424 static int 2425 vm_loop_checks(struct vm *vm, int vcpuid, struct vm_exit *vme) 2426 { 2427 struct vie *vie; 2428 2429 vie = vm->vcpu[vcpuid].vie_ctx; 2430 2431 if (vie_pending(vie)) { 2432 /* 2433 * Userspace has not fulfilled the pending needs of the 2434 * instruction emulation, so bail back out. 2435 */ 2436 vie_exitinfo(vie, vme); 2437 return (-1); 2438 } 2439 2440 return (0); 2441 } 2442 2443 int 2444 vm_run(struct vm *vm, int vcpuid, const struct vm_entry *entry) 2445 { 2446 int error; 2447 struct vcpu *vcpu; 2448 struct vm_exit *vme; 2449 bool intr_disabled; 2450 int affinity_type = CPU_CURRENT; 2451 2452 if (vcpuid < 0 || vcpuid >= vm->maxcpus) 2453 return (EINVAL); 2454 if (!CPU_ISSET(vcpuid, &vm->active_cpus)) 2455 return (EINVAL); 2456 if (vm->is_paused) { 2457 return (EBUSY); 2458 } 2459 2460 vcpu = &vm->vcpu[vcpuid]; 2461 vme = &vcpu->exitinfo; 2462 2463 vcpu_ustate_change(vm, vcpuid, VU_EMU_KERN); 2464 2465 vcpu->vtc.vtc_status = 0; 2466 ctxop_attach(curthread, vcpu->ctxop); 2467 2468 error = vm_entry_actions(vm, vcpuid, entry, vme); 2469 if (error != 0) { 2470 goto exit; 2471 } 2472 2473 restart: 2474 error = vm_loop_checks(vm, vcpuid, vme); 2475 if (error != 0) { 2476 goto exit; 2477 } 2478 2479 thread_affinity_set(curthread, affinity_type); 2480 /* 2481 * Resource localization should happen after the CPU affinity for the 2482 * thread has been set to ensure that access from restricted contexts, 2483 * such as VMX-accelerated APIC operations, can occur without inducing 2484 * cyclic cross-calls. 2485 * 2486 * This must be done prior to disabling kpreempt via critical_enter(). 2487 */ 2488 vm_localize_resources(vm, vcpu); 2489 affinity_type = CPU_CURRENT; 2490 critical_enter(); 2491 2492 /* Force a trip through update_sregs to reload %fs/%gs and friends */ 2493 PCB_SET_UPDATE_SEGS(&ttolwp(curthread)->lwp_pcb); 2494 2495 if ((vcpu->vtc.vtc_status & VTCS_FPU_RESTORED) == 0) { 2496 restore_guest_fpustate(vcpu); 2497 vcpu->vtc.vtc_status |= VTCS_FPU_RESTORED; 2498 } 2499 vcpu->vtc.vtc_status |= VTCS_FPU_CTX_CRITICAL; 2500 2501 vcpu_require_state(vm, vcpuid, VCPU_RUNNING); 2502 error = VMRUN(vm->cookie, vcpuid, vcpu->nextrip); 2503 vcpu_require_state(vm, vcpuid, VCPU_FROZEN); 2504 2505 /* 2506 * Once clear of the delicate contexts comprising the VM_RUN handler, 2507 * thread CPU affinity can be loosened while other processing occurs. 2508 */ 2509 vcpu->vtc.vtc_status &= ~VTCS_FPU_CTX_CRITICAL; 2510 thread_affinity_clear(curthread); 2511 critical_exit(); 2512 2513 if (error != 0) { 2514 /* Communicate out any error from VMRUN() above */ 2515 goto exit; 2516 } 2517 2518 vcpu->nextrip = vme->rip + vme->inst_length; 2519 switch (vme->exitcode) { 2520 case VM_EXITCODE_REQIDLE: 2521 error = vm_handle_reqidle(vm, vcpuid); 2522 break; 2523 case VM_EXITCODE_RUN_STATE: 2524 error = vm_handle_run_state(vm, vcpuid); 2525 break; 2526 case VM_EXITCODE_SUSPENDED: 2527 error = vm_handle_suspend(vm, vcpuid); 2528 break; 2529 case VM_EXITCODE_IOAPIC_EOI: 2530 vioapic_process_eoi(vm, vcpuid, 2531 vme->u.ioapic_eoi.vector); 2532 break; 2533 case VM_EXITCODE_HLT: 2534 intr_disabled = ((vme->u.hlt.rflags & PSL_I) == 0); 2535 error = vm_handle_hlt(vm, vcpuid, intr_disabled); 2536 break; 2537 case VM_EXITCODE_PAGING: 2538 error = vm_handle_paging(vm, vcpuid); 2539 break; 2540 case VM_EXITCODE_MMIO_EMUL: 2541 error = vm_handle_mmio_emul(vm, vcpuid); 2542 break; 2543 case VM_EXITCODE_INOUT: 2544 error = vm_handle_inout(vm, vcpuid, vme); 2545 break; 2546 case VM_EXITCODE_INST_EMUL: 2547 error = vm_handle_inst_emul(vm, vcpuid); 2548 break; 2549 case VM_EXITCODE_MONITOR: 2550 case VM_EXITCODE_MWAIT: 2551 case VM_EXITCODE_VMINSN: 2552 vm_inject_ud(vm, vcpuid); 2553 break; 2554 case VM_EXITCODE_RDMSR: 2555 error = vm_handle_rdmsr(vm, vcpuid, vme); 2556 break; 2557 case VM_EXITCODE_WRMSR: 2558 error = vm_handle_wrmsr(vm, vcpuid, vme); 2559 break; 2560 case VM_EXITCODE_HT: 2561 affinity_type = CPU_BEST; 2562 break; 2563 case VM_EXITCODE_MTRAP: 2564 VERIFY0(vm_suspend_cpu(vm, vcpuid)); 2565 error = -1; 2566 break; 2567 default: 2568 /* handled in userland */ 2569 error = -1; 2570 break; 2571 } 2572 2573 if (error == 0) { 2574 /* VM exit conditions handled in-kernel, continue running */ 2575 goto restart; 2576 } 2577 2578 exit: 2579 kpreempt_disable(); 2580 ctxop_detach(curthread, vcpu->ctxop); 2581 /* Make sure all of the needed vCPU context state is saved */ 2582 vmm_savectx(&vcpu->vtc); 2583 kpreempt_enable(); 2584 2585 vcpu_ustate_change(vm, vcpuid, VU_EMU_USER); 2586 return (error); 2587 } 2588 2589 int 2590 vm_restart_instruction(void *arg, int vcpuid) 2591 { 2592 struct vm *vm; 2593 struct vcpu *vcpu; 2594 enum vcpu_state state; 2595 uint64_t rip; 2596 int error; 2597 2598 vm = arg; 2599 if (vcpuid < 0 || vcpuid >= vm->maxcpus) 2600 return (EINVAL); 2601 2602 vcpu = &vm->vcpu[vcpuid]; 2603 state = vcpu_get_state(vm, vcpuid, NULL); 2604 if (state == VCPU_RUNNING) { 2605 /* 2606 * When a vcpu is "running" the next instruction is determined 2607 * by adding 'rip' and 'inst_length' in the vcpu's 'exitinfo'. 2608 * Thus setting 'inst_length' to zero will cause the current 2609 * instruction to be restarted. 2610 */ 2611 vcpu->exitinfo.inst_length = 0; 2612 } else if (state == VCPU_FROZEN) { 2613 /* 2614 * When a vcpu is "frozen" it is outside the critical section 2615 * around VMRUN() and 'nextrip' points to the next instruction. 2616 * Thus instruction restart is achieved by setting 'nextrip' 2617 * to the vcpu's %rip. 2618 */ 2619 error = vm_get_register(vm, vcpuid, VM_REG_GUEST_RIP, &rip); 2620 KASSERT(!error, ("%s: error %d getting rip", __func__, error)); 2621 vcpu->nextrip = rip; 2622 } else { 2623 panic("%s: invalid state %d", __func__, state); 2624 } 2625 return (0); 2626 } 2627 2628 int 2629 vm_exit_intinfo(struct vm *vm, int vcpuid, uint64_t info) 2630 { 2631 struct vcpu *vcpu; 2632 2633 if (vcpuid < 0 || vcpuid >= vm->maxcpus) 2634 return (EINVAL); 2635 2636 vcpu = &vm->vcpu[vcpuid]; 2637 2638 if (VM_INTINFO_PENDING(info)) { 2639 const uint32_t type = VM_INTINFO_TYPE(info); 2640 const uint8_t vector = VM_INTINFO_VECTOR(info); 2641 2642 if (type == VM_INTINFO_NMI && vector != IDT_NMI) 2643 return (EINVAL); 2644 if (type == VM_INTINFO_HWEXCP && vector >= 32) 2645 return (EINVAL); 2646 if (info & VM_INTINFO_MASK_RSVD) 2647 return (EINVAL); 2648 } else { 2649 info = 0; 2650 } 2651 vcpu->exit_intinfo = info; 2652 return (0); 2653 } 2654 2655 enum exc_class { 2656 EXC_BENIGN, 2657 EXC_CONTRIBUTORY, 2658 EXC_PAGEFAULT 2659 }; 2660 2661 #define IDT_VE 20 /* Virtualization Exception (Intel specific) */ 2662 2663 static enum exc_class 2664 exception_class(uint64_t info) 2665 { 2666 ASSERT(VM_INTINFO_PENDING(info)); 2667 2668 /* Table 6-4, "Interrupt and Exception Classes", Intel SDM, Vol 3 */ 2669 switch (VM_INTINFO_TYPE(info)) { 2670 case VM_INTINFO_HWINTR: 2671 case VM_INTINFO_SWINTR: 2672 case VM_INTINFO_NMI: 2673 return (EXC_BENIGN); 2674 default: 2675 /* 2676 * Hardware exception. 2677 * 2678 * SVM and VT-x use identical type values to represent NMI, 2679 * hardware interrupt and software interrupt. 2680 * 2681 * SVM uses type '3' for all exceptions. VT-x uses type '3' 2682 * for exceptions except #BP and #OF. #BP and #OF use a type 2683 * value of '5' or '6'. Therefore we don't check for explicit 2684 * values of 'type' to classify 'intinfo' into a hardware 2685 * exception. 2686 */ 2687 break; 2688 } 2689 2690 switch (VM_INTINFO_VECTOR(info)) { 2691 case IDT_PF: 2692 case IDT_VE: 2693 return (EXC_PAGEFAULT); 2694 case IDT_DE: 2695 case IDT_TS: 2696 case IDT_NP: 2697 case IDT_SS: 2698 case IDT_GP: 2699 return (EXC_CONTRIBUTORY); 2700 default: 2701 return (EXC_BENIGN); 2702 } 2703 } 2704 2705 /* 2706 * Fetch event pending injection into the guest, if one exists. 2707 * 2708 * Returns true if an event is to be injected (which is placed in `retinfo`). 2709 */ 2710 bool 2711 vm_entry_intinfo(struct vm *vm, int vcpuid, uint64_t *retinfo) 2712 { 2713 struct vcpu *vcpu = &vm->vcpu[vcpuid]; 2714 const uint64_t info1 = vcpu->exit_intinfo; 2715 vcpu->exit_intinfo = 0; 2716 const uint64_t info2 = vcpu->exc_pending; 2717 vcpu->exc_pending = 0; 2718 2719 if (VM_INTINFO_PENDING(info1) && VM_INTINFO_PENDING(info2)) { 2720 /* 2721 * If an exception occurs while attempting to call the 2722 * double-fault handler the processor enters shutdown mode 2723 * (aka triple fault). 2724 */ 2725 if (VM_INTINFO_TYPE(info1) == VM_INTINFO_HWEXCP && 2726 VM_INTINFO_VECTOR(info1) == IDT_DF) { 2727 (void) vm_suspend(vm, VM_SUSPEND_TRIPLEFAULT); 2728 *retinfo = 0; 2729 return (false); 2730 } 2731 /* 2732 * "Conditions for Generating a Double Fault" 2733 * Intel SDM, Vol3, Table 6-5 2734 */ 2735 const enum exc_class exc1 = exception_class(info1); 2736 const enum exc_class exc2 = exception_class(info2); 2737 if ((exc1 == EXC_CONTRIBUTORY && exc2 == EXC_CONTRIBUTORY) || 2738 (exc1 == EXC_PAGEFAULT && exc2 != EXC_BENIGN)) { 2739 /* Convert nested fault into a double fault. */ 2740 *retinfo = 2741 VM_INTINFO_VALID | 2742 VM_INTINFO_DEL_ERRCODE | 2743 VM_INTINFO_HWEXCP | 2744 IDT_DF; 2745 } else { 2746 /* Handle exceptions serially */ 2747 vcpu->exit_intinfo = info1; 2748 *retinfo = info2; 2749 } 2750 return (true); 2751 } else if (VM_INTINFO_PENDING(info1)) { 2752 *retinfo = info1; 2753 return (true); 2754 } else if (VM_INTINFO_PENDING(info2)) { 2755 *retinfo = info2; 2756 return (true); 2757 } 2758 2759 return (false); 2760 } 2761 2762 int 2763 vm_get_intinfo(struct vm *vm, int vcpuid, uint64_t *info1, uint64_t *info2) 2764 { 2765 struct vcpu *vcpu; 2766 2767 if (vcpuid < 0 || vcpuid >= vm->maxcpus) 2768 return (EINVAL); 2769 2770 vcpu = &vm->vcpu[vcpuid]; 2771 *info1 = vcpu->exit_intinfo; 2772 *info2 = vcpu->exc_pending; 2773 return (0); 2774 } 2775 2776 int 2777 vm_inject_exception(struct vm *vm, int vcpuid, uint8_t vector, 2778 bool errcode_valid, uint32_t errcode, bool restart_instruction) 2779 { 2780 struct vcpu *vcpu; 2781 uint64_t regval; 2782 int error; 2783 2784 if (vcpuid < 0 || vcpuid >= vm->maxcpus) 2785 return (EINVAL); 2786 2787 if (vector >= 32) 2788 return (EINVAL); 2789 2790 /* 2791 * NMIs are to be injected via their own specialized path using 2792 * vm_inject_nmi(). 2793 */ 2794 if (vector == IDT_NMI) { 2795 return (EINVAL); 2796 } 2797 2798 /* 2799 * A double fault exception should never be injected directly into 2800 * the guest. It is a derived exception that results from specific 2801 * combinations of nested faults. 2802 */ 2803 if (vector == IDT_DF) { 2804 return (EINVAL); 2805 } 2806 2807 vcpu = &vm->vcpu[vcpuid]; 2808 2809 if (VM_INTINFO_PENDING(vcpu->exc_pending)) { 2810 /* Unable to inject exception due to one already pending */ 2811 return (EBUSY); 2812 } 2813 2814 if (errcode_valid) { 2815 /* 2816 * Exceptions don't deliver an error code in real mode. 2817 */ 2818 error = vm_get_register(vm, vcpuid, VM_REG_GUEST_CR0, ®val); 2819 VERIFY0(error); 2820 if ((regval & CR0_PE) == 0) { 2821 errcode_valid = false; 2822 } 2823 } 2824 2825 /* 2826 * From section 26.6.1 "Interruptibility State" in Intel SDM: 2827 * 2828 * Event blocking by "STI" or "MOV SS" is cleared after guest executes 2829 * one instruction or incurs an exception. 2830 */ 2831 error = vm_set_register(vm, vcpuid, VM_REG_GUEST_INTR_SHADOW, 0); 2832 VERIFY0(error); 2833 2834 if (restart_instruction) { 2835 VERIFY0(vm_restart_instruction(vm, vcpuid)); 2836 } 2837 2838 uint64_t val = VM_INTINFO_VALID | VM_INTINFO_HWEXCP | vector; 2839 if (errcode_valid) { 2840 val |= VM_INTINFO_DEL_ERRCODE; 2841 val |= (uint64_t)errcode << VM_INTINFO_SHIFT_ERRCODE; 2842 } 2843 vcpu->exc_pending = val; 2844 return (0); 2845 } 2846 2847 void 2848 vm_inject_ud(struct vm *vm, int vcpuid) 2849 { 2850 VERIFY0(vm_inject_exception(vm, vcpuid, IDT_UD, false, 0, true)); 2851 } 2852 2853 void 2854 vm_inject_gp(struct vm *vm, int vcpuid) 2855 { 2856 VERIFY0(vm_inject_exception(vm, vcpuid, IDT_GP, true, 0, true)); 2857 } 2858 2859 void 2860 vm_inject_ac(struct vm *vm, int vcpuid, uint32_t errcode) 2861 { 2862 VERIFY0(vm_inject_exception(vm, vcpuid, IDT_AC, true, errcode, true)); 2863 } 2864 2865 void 2866 vm_inject_ss(struct vm *vm, int vcpuid, uint32_t errcode) 2867 { 2868 VERIFY0(vm_inject_exception(vm, vcpuid, IDT_SS, true, errcode, true)); 2869 } 2870 2871 void 2872 vm_inject_pf(struct vm *vm, int vcpuid, uint32_t errcode, uint64_t cr2) 2873 { 2874 VERIFY0(vm_set_register(vm, vcpuid, VM_REG_GUEST_CR2, cr2)); 2875 VERIFY0(vm_inject_exception(vm, vcpuid, IDT_PF, true, errcode, true)); 2876 } 2877 2878 static VMM_STAT(VCPU_NMI_COUNT, "number of NMIs delivered to vcpu"); 2879 2880 int 2881 vm_inject_nmi(struct vm *vm, int vcpuid) 2882 { 2883 struct vcpu *vcpu; 2884 2885 if (vcpuid < 0 || vcpuid >= vm->maxcpus) 2886 return (EINVAL); 2887 2888 vcpu = &vm->vcpu[vcpuid]; 2889 2890 vcpu->nmi_pending = true; 2891 vcpu_notify_event(vm, vcpuid); 2892 return (0); 2893 } 2894 2895 bool 2896 vm_nmi_pending(struct vm *vm, int vcpuid) 2897 { 2898 struct vcpu *vcpu = &vm->vcpu[vcpuid]; 2899 2900 return (vcpu->nmi_pending); 2901 } 2902 2903 void 2904 vm_nmi_clear(struct vm *vm, int vcpuid) 2905 { 2906 struct vcpu *vcpu = &vm->vcpu[vcpuid]; 2907 2908 ASSERT(vcpu->nmi_pending); 2909 2910 vcpu->nmi_pending = false; 2911 vmm_stat_incr(vm, vcpuid, VCPU_NMI_COUNT, 1); 2912 } 2913 2914 static VMM_STAT(VCPU_EXTINT_COUNT, "number of ExtINTs delivered to vcpu"); 2915 2916 int 2917 vm_inject_extint(struct vm *vm, int vcpuid) 2918 { 2919 struct vcpu *vcpu; 2920 2921 if (vcpuid < 0 || vcpuid >= vm->maxcpus) 2922 return (EINVAL); 2923 2924 vcpu = &vm->vcpu[vcpuid]; 2925 2926 vcpu->extint_pending = true; 2927 vcpu_notify_event(vm, vcpuid); 2928 return (0); 2929 } 2930 2931 bool 2932 vm_extint_pending(struct vm *vm, int vcpuid) 2933 { 2934 struct vcpu *vcpu = &vm->vcpu[vcpuid]; 2935 2936 return (vcpu->extint_pending); 2937 } 2938 2939 void 2940 vm_extint_clear(struct vm *vm, int vcpuid) 2941 { 2942 struct vcpu *vcpu = &vm->vcpu[vcpuid]; 2943 2944 ASSERT(vcpu->extint_pending); 2945 2946 vcpu->extint_pending = false; 2947 vmm_stat_incr(vm, vcpuid, VCPU_EXTINT_COUNT, 1); 2948 } 2949 2950 int 2951 vm_inject_init(struct vm *vm, int vcpuid) 2952 { 2953 struct vcpu *vcpu; 2954 2955 if (vcpuid < 0 || vcpuid >= vm->maxcpus) 2956 return (EINVAL); 2957 2958 vcpu = &vm->vcpu[vcpuid]; 2959 vcpu_lock(vcpu); 2960 vcpu->run_state |= VRS_PEND_INIT; 2961 /* 2962 * As part of queuing the INIT request, clear any pending SIPI. It 2963 * would not otherwise survive across the reset of the vCPU when it 2964 * undergoes the requested INIT. We would not want it to linger when it 2965 * could be mistaken as a subsequent (after the INIT) SIPI request. 2966 */ 2967 vcpu->run_state &= ~VRS_PEND_SIPI; 2968 vcpu_notify_event_locked(vcpu, VCPU_NOTIFY_EXIT); 2969 2970 vcpu_unlock(vcpu); 2971 return (0); 2972 } 2973 2974 int 2975 vm_inject_sipi(struct vm *vm, int vcpuid, uint8_t vector) 2976 { 2977 struct vcpu *vcpu; 2978 2979 if (vcpuid < 0 || vcpuid >= vm->maxcpus) 2980 return (EINVAL); 2981 2982 vcpu = &vm->vcpu[vcpuid]; 2983 vcpu_lock(vcpu); 2984 vcpu->run_state |= VRS_PEND_SIPI; 2985 vcpu->sipi_vector = vector; 2986 /* SIPI is only actionable if the CPU is waiting in INIT state */ 2987 if ((vcpu->run_state & (VRS_INIT | VRS_RUN)) == VRS_INIT) { 2988 vcpu_notify_event_locked(vcpu, VCPU_NOTIFY_EXIT); 2989 } 2990 vcpu_unlock(vcpu); 2991 return (0); 2992 } 2993 2994 bool 2995 vcpu_run_state_pending(struct vm *vm, int vcpuid) 2996 { 2997 struct vcpu *vcpu; 2998 2999 ASSERT(vcpuid >= 0 && vcpuid < vm->maxcpus); 3000 vcpu = &vm->vcpu[vcpuid]; 3001 3002 /* Of interest: vCPU not in running state or with pending INIT */ 3003 return ((vcpu->run_state & (VRS_RUN | VRS_PEND_INIT)) != VRS_RUN); 3004 } 3005 3006 int 3007 vcpu_arch_reset(struct vm *vm, int vcpuid, bool init_only) 3008 { 3009 struct seg_desc desc; 3010 const enum vm_reg_name clear_regs[] = { 3011 VM_REG_GUEST_CR2, 3012 VM_REG_GUEST_CR3, 3013 VM_REG_GUEST_CR4, 3014 VM_REG_GUEST_RAX, 3015 VM_REG_GUEST_RBX, 3016 VM_REG_GUEST_RCX, 3017 VM_REG_GUEST_RSI, 3018 VM_REG_GUEST_RDI, 3019 VM_REG_GUEST_RBP, 3020 VM_REG_GUEST_RSP, 3021 VM_REG_GUEST_R8, 3022 VM_REG_GUEST_R9, 3023 VM_REG_GUEST_R10, 3024 VM_REG_GUEST_R11, 3025 VM_REG_GUEST_R12, 3026 VM_REG_GUEST_R13, 3027 VM_REG_GUEST_R14, 3028 VM_REG_GUEST_R15, 3029 VM_REG_GUEST_DR0, 3030 VM_REG_GUEST_DR1, 3031 VM_REG_GUEST_DR2, 3032 VM_REG_GUEST_DR3, 3033 VM_REG_GUEST_EFER, 3034 }; 3035 const enum vm_reg_name data_segs[] = { 3036 VM_REG_GUEST_SS, 3037 VM_REG_GUEST_DS, 3038 VM_REG_GUEST_ES, 3039 VM_REG_GUEST_FS, 3040 VM_REG_GUEST_GS, 3041 }; 3042 struct vcpu *vcpu = &vm->vcpu[vcpuid]; 3043 3044 if (vcpuid < 0 || vcpuid >= vm->maxcpus) 3045 return (EINVAL); 3046 3047 for (uint_t i = 0; i < nitems(clear_regs); i++) { 3048 VERIFY0(vm_set_register(vm, vcpuid, clear_regs[i], 0)); 3049 } 3050 3051 VERIFY0(vm_set_register(vm, vcpuid, VM_REG_GUEST_RFLAGS, 2)); 3052 VERIFY0(vm_set_register(vm, vcpuid, VM_REG_GUEST_RIP, 0xfff0)); 3053 VERIFY0(vm_set_register(vm, vcpuid, VM_REG_GUEST_CR0, 0x60000010)); 3054 3055 /* 3056 * The prescribed contents of %rdx differ slightly between the Intel and 3057 * AMD architectural definitions. The former expects the Extended Model 3058 * in bits 16-19 where the latter expects all the Family, Model, and 3059 * Stepping be there. Common boot ROMs appear to disregard this 3060 * anyways, so we stick with a compromise value similar to what is 3061 * spelled out in the Intel SDM. 3062 */ 3063 VERIFY0(vm_set_register(vm, vcpuid, VM_REG_GUEST_RDX, 0x600)); 3064 3065 VERIFY0(vm_set_register(vm, vcpuid, VM_REG_GUEST_DR6, 0xffff0ff0)); 3066 VERIFY0(vm_set_register(vm, vcpuid, VM_REG_GUEST_DR7, 0x400)); 3067 3068 /* CS: Present, R/W, Accessed */ 3069 desc.access = 0x0093; 3070 desc.base = 0xffff0000; 3071 desc.limit = 0xffff; 3072 VERIFY0(vm_set_seg_desc(vm, vcpuid, VM_REG_GUEST_CS, &desc)); 3073 VERIFY0(vm_set_register(vm, vcpuid, VM_REG_GUEST_CS, 0xf000)); 3074 3075 /* SS, DS, ES, FS, GS: Present, R/W, Accessed */ 3076 desc.access = 0x0093; 3077 desc.base = 0; 3078 desc.limit = 0xffff; 3079 for (uint_t i = 0; i < nitems(data_segs); i++) { 3080 VERIFY0(vm_set_seg_desc(vm, vcpuid, data_segs[i], &desc)); 3081 VERIFY0(vm_set_register(vm, vcpuid, data_segs[i], 0)); 3082 } 3083 3084 /* GDTR, IDTR */ 3085 desc.base = 0; 3086 desc.limit = 0xffff; 3087 VERIFY0(vm_set_seg_desc(vm, vcpuid, VM_REG_GUEST_GDTR, &desc)); 3088 VERIFY0(vm_set_seg_desc(vm, vcpuid, VM_REG_GUEST_IDTR, &desc)); 3089 3090 /* LDTR: Present, LDT */ 3091 desc.access = 0x0082; 3092 desc.base = 0; 3093 desc.limit = 0xffff; 3094 VERIFY0(vm_set_seg_desc(vm, vcpuid, VM_REG_GUEST_LDTR, &desc)); 3095 VERIFY0(vm_set_register(vm, vcpuid, VM_REG_GUEST_LDTR, 0)); 3096 3097 /* TR: Present, 32-bit TSS */ 3098 desc.access = 0x008b; 3099 desc.base = 0; 3100 desc.limit = 0xffff; 3101 VERIFY0(vm_set_seg_desc(vm, vcpuid, VM_REG_GUEST_TR, &desc)); 3102 VERIFY0(vm_set_register(vm, vcpuid, VM_REG_GUEST_TR, 0)); 3103 3104 vlapic_reset(vm_lapic(vm, vcpuid)); 3105 3106 VERIFY0(vm_set_register(vm, vcpuid, VM_REG_GUEST_INTR_SHADOW, 0)); 3107 3108 vcpu->exit_intinfo = 0; 3109 vcpu->exc_pending = 0; 3110 vcpu->nmi_pending = false; 3111 vcpu->extint_pending = 0; 3112 3113 /* 3114 * A CPU reset caused by power-on or system reset clears more state than 3115 * one which is trigged from an INIT IPI. 3116 */ 3117 if (!init_only) { 3118 vcpu->guest_xcr0 = XFEATURE_ENABLED_X87; 3119 (void) hma_fpu_init(vcpu->guestfpu); 3120 3121 /* XXX: clear MSRs and other pieces */ 3122 bzero(&vcpu->mtrr, sizeof (vcpu->mtrr)); 3123 } 3124 3125 return (0); 3126 } 3127 3128 static int 3129 vcpu_vector_sipi(struct vm *vm, int vcpuid, uint8_t vector) 3130 { 3131 struct seg_desc desc; 3132 3133 if (vcpuid < 0 || vcpuid >= vm->maxcpus) 3134 return (EINVAL); 3135 3136 /* CS: Present, R/W, Accessed */ 3137 desc.access = 0x0093; 3138 desc.base = (uint64_t)vector << 12; 3139 desc.limit = 0xffff; 3140 VERIFY0(vm_set_seg_desc(vm, vcpuid, VM_REG_GUEST_CS, &desc)); 3141 VERIFY0(vm_set_register(vm, vcpuid, VM_REG_GUEST_CS, 3142 (uint64_t)vector << 8)); 3143 3144 VERIFY0(vm_set_register(vm, vcpuid, VM_REG_GUEST_RIP, 0)); 3145 3146 return (0); 3147 } 3148 3149 int 3150 vm_get_capability(struct vm *vm, int vcpu, int type, int *retval) 3151 { 3152 if (vcpu < 0 || vcpu >= vm->maxcpus) 3153 return (EINVAL); 3154 3155 if (type < 0 || type >= VM_CAP_MAX) 3156 return (EINVAL); 3157 3158 return (VMGETCAP(vm->cookie, vcpu, type, retval)); 3159 } 3160 3161 int 3162 vm_set_capability(struct vm *vm, int vcpu, int type, int val) 3163 { 3164 if (vcpu < 0 || vcpu >= vm->maxcpus) 3165 return (EINVAL); 3166 3167 if (type < 0 || type >= VM_CAP_MAX) 3168 return (EINVAL); 3169 3170 return (VMSETCAP(vm->cookie, vcpu, type, val)); 3171 } 3172 3173 vcpu_cpuid_config_t * 3174 vm_cpuid_config(struct vm *vm, int vcpuid) 3175 { 3176 ASSERT3S(vcpuid, >=, 0); 3177 ASSERT3S(vcpuid, <, VM_MAXCPU); 3178 3179 return (&vm->vcpu[vcpuid].cpuid_cfg); 3180 } 3181 3182 struct vlapic * 3183 vm_lapic(struct vm *vm, int cpu) 3184 { 3185 ASSERT3S(cpu, >=, 0); 3186 ASSERT3S(cpu, <, VM_MAXCPU); 3187 3188 return (vm->vcpu[cpu].vlapic); 3189 } 3190 3191 struct vioapic * 3192 vm_ioapic(struct vm *vm) 3193 { 3194 3195 return (vm->vioapic); 3196 } 3197 3198 struct vhpet * 3199 vm_hpet(struct vm *vm) 3200 { 3201 3202 return (vm->vhpet); 3203 } 3204 3205 void * 3206 vm_iommu_domain(struct vm *vm) 3207 { 3208 3209 return (vm->iommu); 3210 } 3211 3212 int 3213 vcpu_set_state(struct vm *vm, int vcpuid, enum vcpu_state newstate, 3214 bool from_idle) 3215 { 3216 int error; 3217 struct vcpu *vcpu; 3218 3219 if (vcpuid < 0 || vcpuid >= vm->maxcpus) 3220 panic("vcpu_set_state: invalid vcpuid %d", vcpuid); 3221 3222 vcpu = &vm->vcpu[vcpuid]; 3223 3224 vcpu_lock(vcpu); 3225 error = vcpu_set_state_locked(vm, vcpuid, newstate, from_idle); 3226 vcpu_unlock(vcpu); 3227 3228 return (error); 3229 } 3230 3231 enum vcpu_state 3232 vcpu_get_state(struct vm *vm, int vcpuid, int *hostcpu) 3233 { 3234 struct vcpu *vcpu; 3235 enum vcpu_state state; 3236 3237 if (vcpuid < 0 || vcpuid >= vm->maxcpus) 3238 panic("vcpu_get_state: invalid vcpuid %d", vcpuid); 3239 3240 vcpu = &vm->vcpu[vcpuid]; 3241 3242 vcpu_lock(vcpu); 3243 state = vcpu->state; 3244 if (hostcpu != NULL) 3245 *hostcpu = vcpu->hostcpu; 3246 vcpu_unlock(vcpu); 3247 3248 return (state); 3249 } 3250 3251 /* 3252 * Calculate the TSC offset for a vCPU, applying physical CPU adjustments if 3253 * requested. The offset calculations include the VM-wide TSC offset. 3254 */ 3255 uint64_t 3256 vcpu_tsc_offset(struct vm *vm, int vcpuid, bool phys_adj) 3257 { 3258 ASSERT(vcpuid >= 0 && vcpuid < vm->maxcpus); 3259 3260 uint64_t vcpu_off = vm->tsc_offset + vm->vcpu[vcpuid].tsc_offset; 3261 3262 if (phys_adj) { 3263 /* Include any offset for the current physical CPU too */ 3264 vcpu_off += vmm_host_tsc_delta(); 3265 } 3266 3267 return (vcpu_off); 3268 } 3269 3270 uint64_t 3271 vm_get_freq_multiplier(struct vm *vm) 3272 { 3273 return (vm->freq_multiplier); 3274 } 3275 3276 /* Normalize hrtime against the boot time for a VM */ 3277 hrtime_t 3278 vm_normalize_hrtime(struct vm *vm, hrtime_t hrt) 3279 { 3280 /* To avoid underflow/overflow UB, perform math as unsigned */ 3281 return ((hrtime_t)((uint64_t)hrt - (uint64_t)vm->boot_hrtime)); 3282 } 3283 3284 /* Denormalize hrtime against the boot time for a VM */ 3285 hrtime_t 3286 vm_denormalize_hrtime(struct vm *vm, hrtime_t hrt) 3287 { 3288 /* To avoid underflow/overflow UB, perform math as unsigned */ 3289 return ((hrtime_t)((uint64_t)hrt + (uint64_t)vm->boot_hrtime)); 3290 } 3291 3292 int 3293 vm_activate_cpu(struct vm *vm, int vcpuid) 3294 { 3295 3296 if (vcpuid < 0 || vcpuid >= vm->maxcpus) 3297 return (EINVAL); 3298 3299 if (CPU_ISSET(vcpuid, &vm->active_cpus)) 3300 return (EBUSY); 3301 3302 if (vm->suspend != 0) { 3303 return (EBUSY); 3304 } 3305 3306 CPU_SET_ATOMIC(vcpuid, &vm->active_cpus); 3307 3308 /* 3309 * It is possible that this vCPU was undergoing activation at the same 3310 * time that the VM was being suspended. If that happens to be the 3311 * case, it should reflect the suspended state immediately. 3312 */ 3313 if (atomic_load_acq_int((uint_t *)&vm->suspend) != 0) { 3314 CPU_SET_ATOMIC(vcpuid, &vm->suspended_cpus); 3315 } 3316 3317 return (0); 3318 } 3319 3320 int 3321 vm_suspend_cpu(struct vm *vm, int vcpuid) 3322 { 3323 int i; 3324 3325 if (vcpuid < -1 || vcpuid >= vm->maxcpus) 3326 return (EINVAL); 3327 3328 if (vcpuid == -1) { 3329 vm->debug_cpus = vm->active_cpus; 3330 for (i = 0; i < vm->maxcpus; i++) { 3331 if (CPU_ISSET(i, &vm->active_cpus)) 3332 vcpu_notify_event(vm, i); 3333 } 3334 } else { 3335 if (!CPU_ISSET(vcpuid, &vm->active_cpus)) 3336 return (EINVAL); 3337 3338 CPU_SET_ATOMIC(vcpuid, &vm->debug_cpus); 3339 vcpu_notify_event(vm, vcpuid); 3340 } 3341 return (0); 3342 } 3343 3344 int 3345 vm_resume_cpu(struct vm *vm, int vcpuid) 3346 { 3347 3348 if (vcpuid < -1 || vcpuid >= vm->maxcpus) 3349 return (EINVAL); 3350 3351 if (vcpuid == -1) { 3352 CPU_ZERO(&vm->debug_cpus); 3353 } else { 3354 if (!CPU_ISSET(vcpuid, &vm->debug_cpus)) 3355 return (EINVAL); 3356 3357 CPU_CLR_ATOMIC(vcpuid, &vm->debug_cpus); 3358 } 3359 return (0); 3360 } 3361 3362 static bool 3363 vcpu_bailout_checks(struct vm *vm, int vcpuid, bool on_entry, 3364 uint64_t entry_rip) 3365 { 3366 struct vcpu *vcpu = &vm->vcpu[vcpuid]; 3367 struct vm_exit *vme = &vcpu->exitinfo; 3368 bool bail = false; 3369 3370 ASSERT(vcpuid >= 0 && vcpuid < vm->maxcpus); 3371 3372 if (vm->suspend) { 3373 if (on_entry) { 3374 VERIFY(vm->suspend > VM_SUSPEND_NONE && 3375 vm->suspend < VM_SUSPEND_LAST); 3376 3377 vme->exitcode = VM_EXITCODE_SUSPENDED; 3378 vme->u.suspended.how = vm->suspend; 3379 } else { 3380 /* 3381 * Handling VM suspend is complicated, so if that 3382 * condition is detected outside of VM-entry itself, 3383 * just emit a BOGUS exitcode so we take a lap to pick 3384 * up the event during an entry and are directed into 3385 * the vm_handle_suspend() logic. 3386 */ 3387 vme->exitcode = VM_EXITCODE_BOGUS; 3388 } 3389 bail = true; 3390 } 3391 if (vcpu->reqidle) { 3392 vme->exitcode = VM_EXITCODE_REQIDLE; 3393 vmm_stat_incr(vm, vcpuid, VMEXIT_REQIDLE, 1); 3394 3395 if (!on_entry) { 3396 /* 3397 * A reqidle request detected outside of VM-entry can be 3398 * handled directly by clearing the request (and taking 3399 * a lap to userspace). 3400 */ 3401 vcpu_assert_locked(vcpu); 3402 vcpu->reqidle = 0; 3403 } 3404 bail = true; 3405 } 3406 if (vcpu->reqconsist) { 3407 /* 3408 * We only expect exit-when-consistent requests to be asserted 3409 * during entry, not as an otherwise spontaneous condition. As 3410 * such, we do not count it among the exit statistics, and emit 3411 * the expected BOGUS exitcode, while clearing the request. 3412 */ 3413 vme->exitcode = VM_EXITCODE_BOGUS; 3414 vcpu->reqconsist = false; 3415 bail = true; 3416 } 3417 if (vcpu_should_yield(vm, vcpuid)) { 3418 vme->exitcode = VM_EXITCODE_BOGUS; 3419 vmm_stat_incr(vm, vcpuid, VMEXIT_ASTPENDING, 1); 3420 bail = true; 3421 } 3422 if (CPU_ISSET(vcpuid, &vm->debug_cpus)) { 3423 vme->exitcode = VM_EXITCODE_DEBUG; 3424 bail = true; 3425 } 3426 3427 if (bail) { 3428 if (on_entry) { 3429 /* 3430 * If bailing out during VM-entry, the current %rip must 3431 * be recorded in the exitinfo. 3432 */ 3433 vme->rip = entry_rip; 3434 } 3435 vme->inst_length = 0; 3436 } 3437 return (bail); 3438 } 3439 3440 static bool 3441 vcpu_sleep_bailout_checks(struct vm *vm, int vcpuid) 3442 { 3443 /* 3444 * Bail-out check done prior to sleeping (in vCPU contexts like HLT or 3445 * wait-for-SIPI) expect that %rip is already populated in the vm_exit 3446 * structure, and we would only modify the exitcode. 3447 */ 3448 return (vcpu_bailout_checks(vm, vcpuid, false, 0)); 3449 } 3450 3451 bool 3452 vcpu_entry_bailout_checks(struct vm *vm, int vcpuid, uint64_t rip) 3453 { 3454 /* 3455 * Bail-out checks done as part of VM entry require an updated %rip to 3456 * populate the vm_exit struct if any of the conditions of interest are 3457 * matched in the check. 3458 */ 3459 return (vcpu_bailout_checks(vm, vcpuid, true, rip)); 3460 } 3461 3462 cpuset_t 3463 vm_active_cpus(struct vm *vm) 3464 { 3465 3466 return (vm->active_cpus); 3467 } 3468 3469 cpuset_t 3470 vm_debug_cpus(struct vm *vm) 3471 { 3472 3473 return (vm->debug_cpus); 3474 } 3475 3476 cpuset_t 3477 vm_suspended_cpus(struct vm *vm) 3478 { 3479 3480 return (vm->suspended_cpus); 3481 } 3482 3483 void * 3484 vcpu_stats(struct vm *vm, int vcpuid) 3485 { 3486 3487 return (vm->vcpu[vcpuid].stats); 3488 } 3489 3490 int 3491 vm_get_x2apic_state(struct vm *vm, int vcpuid, enum x2apic_state *state) 3492 { 3493 if (vcpuid < 0 || vcpuid >= vm->maxcpus) 3494 return (EINVAL); 3495 3496 *state = vm->vcpu[vcpuid].x2apic_state; 3497 3498 return (0); 3499 } 3500 3501 int 3502 vm_set_x2apic_state(struct vm *vm, int vcpuid, enum x2apic_state state) 3503 { 3504 if (vcpuid < 0 || vcpuid >= vm->maxcpus) 3505 return (EINVAL); 3506 3507 if (state >= X2APIC_STATE_LAST) 3508 return (EINVAL); 3509 3510 vm->vcpu[vcpuid].x2apic_state = state; 3511 3512 vlapic_set_x2apic_state(vm, vcpuid, state); 3513 3514 return (0); 3515 } 3516 3517 /* 3518 * This function is called to ensure that a vcpu "sees" a pending event 3519 * as soon as possible: 3520 * - If the vcpu thread is sleeping then it is woken up. 3521 * - If the vcpu is running on a different host_cpu then an IPI will be directed 3522 * to the host_cpu to cause the vcpu to trap into the hypervisor. 3523 */ 3524 static void 3525 vcpu_notify_event_locked(struct vcpu *vcpu, vcpu_notify_t ntype) 3526 { 3527 int hostcpu; 3528 3529 ASSERT(ntype == VCPU_NOTIFY_APIC || VCPU_NOTIFY_EXIT); 3530 3531 hostcpu = vcpu->hostcpu; 3532 if (vcpu->state == VCPU_RUNNING) { 3533 KASSERT(hostcpu != NOCPU, ("vcpu running on invalid hostcpu")); 3534 if (hostcpu != curcpu) { 3535 if (ntype == VCPU_NOTIFY_APIC) { 3536 vlapic_post_intr(vcpu->vlapic, hostcpu); 3537 } else { 3538 poke_cpu(hostcpu); 3539 } 3540 } else { 3541 /* 3542 * If the 'vcpu' is running on 'curcpu' then it must 3543 * be sending a notification to itself (e.g. SELF_IPI). 3544 * The pending event will be picked up when the vcpu 3545 * transitions back to guest context. 3546 */ 3547 } 3548 } else { 3549 KASSERT(hostcpu == NOCPU, ("vcpu state %d not consistent " 3550 "with hostcpu %d", vcpu->state, hostcpu)); 3551 if (vcpu->state == VCPU_SLEEPING) { 3552 cv_signal(&vcpu->vcpu_cv); 3553 } 3554 } 3555 } 3556 3557 void 3558 vcpu_notify_event(struct vm *vm, int vcpuid) 3559 { 3560 struct vcpu *vcpu = &vm->vcpu[vcpuid]; 3561 3562 vcpu_lock(vcpu); 3563 vcpu_notify_event_locked(vcpu, VCPU_NOTIFY_EXIT); 3564 vcpu_unlock(vcpu); 3565 } 3566 3567 void 3568 vcpu_notify_event_type(struct vm *vm, int vcpuid, vcpu_notify_t ntype) 3569 { 3570 struct vcpu *vcpu = &vm->vcpu[vcpuid]; 3571 3572 if (ntype == VCPU_NOTIFY_NONE) { 3573 return; 3574 } 3575 3576 vcpu_lock(vcpu); 3577 vcpu_notify_event_locked(vcpu, ntype); 3578 vcpu_unlock(vcpu); 3579 } 3580 3581 void 3582 vcpu_ustate_change(struct vm *vm, int vcpuid, enum vcpu_ustate ustate) 3583 { 3584 struct vcpu *vcpu = &vm->vcpu[vcpuid]; 3585 hrtime_t now = gethrtime(); 3586 3587 ASSERT3U(ustate, !=, vcpu->ustate); 3588 ASSERT3S(ustate, <, VU_MAX); 3589 ASSERT3S(ustate, >=, VU_INIT); 3590 3591 hrtime_t delta = now - vcpu->ustate_when; 3592 vcpu->ustate_total[vcpu->ustate] += delta; 3593 3594 membar_producer(); 3595 3596 vcpu->ustate_when = now; 3597 vcpu->ustate = ustate; 3598 } 3599 3600 struct vmspace * 3601 vm_get_vmspace(struct vm *vm) 3602 { 3603 3604 return (vm->vmspace); 3605 } 3606 3607 struct vm_client * 3608 vm_get_vmclient(struct vm *vm, int vcpuid) 3609 { 3610 return (vm->vcpu[vcpuid].vmclient); 3611 } 3612 3613 int 3614 vm_apicid2vcpuid(struct vm *vm, int apicid) 3615 { 3616 /* 3617 * XXX apic id is assumed to be numerically identical to vcpu id 3618 */ 3619 return (apicid); 3620 } 3621 3622 struct vatpic * 3623 vm_atpic(struct vm *vm) 3624 { 3625 return (vm->vatpic); 3626 } 3627 3628 struct vatpit * 3629 vm_atpit(struct vm *vm) 3630 { 3631 return (vm->vatpit); 3632 } 3633 3634 struct vpmtmr * 3635 vm_pmtmr(struct vm *vm) 3636 { 3637 3638 return (vm->vpmtmr); 3639 } 3640 3641 struct vrtc * 3642 vm_rtc(struct vm *vm) 3643 { 3644 3645 return (vm->vrtc); 3646 } 3647 3648 enum vm_reg_name 3649 vm_segment_name(int seg) 3650 { 3651 static enum vm_reg_name seg_names[] = { 3652 VM_REG_GUEST_ES, 3653 VM_REG_GUEST_CS, 3654 VM_REG_GUEST_SS, 3655 VM_REG_GUEST_DS, 3656 VM_REG_GUEST_FS, 3657 VM_REG_GUEST_GS 3658 }; 3659 3660 KASSERT(seg >= 0 && seg < nitems(seg_names), 3661 ("%s: invalid segment encoding %d", __func__, seg)); 3662 return (seg_names[seg]); 3663 } 3664 3665 void 3666 vm_copy_teardown(struct vm *vm, int vcpuid, struct vm_copyinfo *copyinfo, 3667 uint_t num_copyinfo) 3668 { 3669 for (uint_t idx = 0; idx < num_copyinfo; idx++) { 3670 if (copyinfo[idx].cookie != NULL) { 3671 (void) vmp_release((vm_page_t *)copyinfo[idx].cookie); 3672 } 3673 } 3674 bzero(copyinfo, num_copyinfo * sizeof (struct vm_copyinfo)); 3675 } 3676 3677 int 3678 vm_copy_setup(struct vm *vm, int vcpuid, struct vm_guest_paging *paging, 3679 uint64_t gla, size_t len, int prot, struct vm_copyinfo *copyinfo, 3680 uint_t num_copyinfo, int *fault) 3681 { 3682 uint_t idx, nused; 3683 size_t n, off, remaining; 3684 vm_client_t *vmc = vm_get_vmclient(vm, vcpuid); 3685 3686 bzero(copyinfo, sizeof (struct vm_copyinfo) * num_copyinfo); 3687 3688 nused = 0; 3689 remaining = len; 3690 while (remaining > 0) { 3691 uint64_t gpa; 3692 int error; 3693 3694 KASSERT(nused < num_copyinfo, ("insufficient vm_copyinfo")); 3695 error = vm_gla2gpa(vm, vcpuid, paging, gla, prot, &gpa, fault); 3696 if (error || *fault) 3697 return (error); 3698 off = gpa & PAGEOFFSET; 3699 n = min(remaining, PAGESIZE - off); 3700 copyinfo[nused].gpa = gpa; 3701 copyinfo[nused].len = n; 3702 remaining -= n; 3703 gla += n; 3704 nused++; 3705 } 3706 3707 for (idx = 0; idx < nused; idx++) { 3708 vm_page_t *vmp; 3709 caddr_t hva; 3710 3711 vmp = vmc_hold(vmc, copyinfo[idx].gpa & PAGEMASK, prot); 3712 if (vmp == NULL) { 3713 break; 3714 } 3715 if ((prot & PROT_WRITE) != 0) { 3716 hva = (caddr_t)vmp_get_writable(vmp); 3717 } else { 3718 hva = (caddr_t)vmp_get_readable(vmp); 3719 } 3720 copyinfo[idx].hva = hva + (copyinfo[idx].gpa & PAGEOFFSET); 3721 copyinfo[idx].cookie = vmp; 3722 copyinfo[idx].prot = prot; 3723 } 3724 3725 if (idx != nused) { 3726 vm_copy_teardown(vm, vcpuid, copyinfo, num_copyinfo); 3727 return (EFAULT); 3728 } else { 3729 *fault = 0; 3730 return (0); 3731 } 3732 } 3733 3734 void 3735 vm_copyin(struct vm *vm, int vcpuid, struct vm_copyinfo *copyinfo, void *kaddr, 3736 size_t len) 3737 { 3738 char *dst; 3739 int idx; 3740 3741 dst = kaddr; 3742 idx = 0; 3743 while (len > 0) { 3744 ASSERT(copyinfo[idx].prot & PROT_READ); 3745 3746 bcopy(copyinfo[idx].hva, dst, copyinfo[idx].len); 3747 len -= copyinfo[idx].len; 3748 dst += copyinfo[idx].len; 3749 idx++; 3750 } 3751 } 3752 3753 void 3754 vm_copyout(struct vm *vm, int vcpuid, const void *kaddr, 3755 struct vm_copyinfo *copyinfo, size_t len) 3756 { 3757 const char *src; 3758 int idx; 3759 3760 src = kaddr; 3761 idx = 0; 3762 while (len > 0) { 3763 ASSERT(copyinfo[idx].prot & PROT_WRITE); 3764 3765 bcopy(src, copyinfo[idx].hva, copyinfo[idx].len); 3766 len -= copyinfo[idx].len; 3767 src += copyinfo[idx].len; 3768 idx++; 3769 } 3770 } 3771 3772 /* 3773 * Return the amount of in-use and wired memory for the VM. Since 3774 * these are global stats, only return the values with for vCPU 0 3775 */ 3776 VMM_STAT_DECLARE(VMM_MEM_RESIDENT); 3777 3778 static void 3779 vm_get_rescnt(struct vm *vm, int vcpu, struct vmm_stat_type *stat) 3780 { 3781 if (vcpu == 0) { 3782 vmm_stat_set(vm, vcpu, VMM_MEM_RESIDENT, 3783 PAGE_SIZE * vmspace_resident_count(vm->vmspace)); 3784 } 3785 } 3786 3787 VMM_STAT_FUNC(VMM_MEM_RESIDENT, "Resident memory", vm_get_rescnt); 3788 3789 int 3790 vm_ioport_access(struct vm *vm, int vcpuid, bool in, uint16_t port, 3791 uint8_t bytes, uint32_t *val) 3792 { 3793 return (vm_inout_access(&vm->ioports, in, port, bytes, val)); 3794 } 3795 3796 /* 3797 * bhyve-internal interfaces to attach or detach IO port handlers. 3798 * Must be called with VM write lock held for safety. 3799 */ 3800 int 3801 vm_ioport_attach(struct vm *vm, uint16_t port, ioport_handler_t func, void *arg, 3802 void **cookie) 3803 { 3804 int err; 3805 err = vm_inout_attach(&vm->ioports, port, IOPF_DEFAULT, func, arg); 3806 if (err == 0) { 3807 *cookie = (void *)IOP_GEN_COOKIE(func, arg, port); 3808 } 3809 return (err); 3810 } 3811 int 3812 vm_ioport_detach(struct vm *vm, void **cookie, ioport_handler_t *old_func, 3813 void **old_arg) 3814 { 3815 uint16_t port = IOP_PORT_FROM_COOKIE((uintptr_t)*cookie); 3816 int err; 3817 3818 err = vm_inout_detach(&vm->ioports, port, false, old_func, old_arg); 3819 if (err == 0) { 3820 *cookie = NULL; 3821 } 3822 return (err); 3823 } 3824 3825 /* 3826 * External driver interfaces to attach or detach IO port handlers. 3827 * Must be called with VM write lock held for safety. 3828 */ 3829 int 3830 vm_ioport_hook(struct vm *vm, uint16_t port, ioport_handler_t func, 3831 void *arg, void **cookie) 3832 { 3833 int err; 3834 3835 if (port == 0) { 3836 return (EINVAL); 3837 } 3838 3839 err = vm_inout_attach(&vm->ioports, port, IOPF_DRV_HOOK, func, arg); 3840 if (err == 0) { 3841 *cookie = (void *)IOP_GEN_COOKIE(func, arg, port); 3842 } 3843 return (err); 3844 } 3845 void 3846 vm_ioport_unhook(struct vm *vm, void **cookie) 3847 { 3848 uint16_t port = IOP_PORT_FROM_COOKIE((uintptr_t)*cookie); 3849 ioport_handler_t old_func; 3850 void *old_arg; 3851 int err; 3852 3853 err = vm_inout_detach(&vm->ioports, port, true, &old_func, &old_arg); 3854 3855 /* ioport-hook-using drivers are expected to be well-behaved */ 3856 VERIFY0(err); 3857 VERIFY(IOP_GEN_COOKIE(old_func, old_arg, port) == (uintptr_t)*cookie); 3858 3859 *cookie = NULL; 3860 } 3861 3862 int 3863 vmm_kstat_update_vcpu(struct kstat *ksp, int rw) 3864 { 3865 struct vm *vm = ksp->ks_private; 3866 vmm_vcpu_kstats_t *vvk = ksp->ks_data; 3867 const int vcpuid = vvk->vvk_vcpu.value.ui32; 3868 struct vcpu *vcpu = &vm->vcpu[vcpuid]; 3869 3870 ASSERT3U(vcpuid, <, VM_MAXCPU); 3871 3872 vvk->vvk_time_init.value.ui64 = vcpu->ustate_total[VU_INIT]; 3873 vvk->vvk_time_run.value.ui64 = vcpu->ustate_total[VU_RUN]; 3874 vvk->vvk_time_idle.value.ui64 = vcpu->ustate_total[VU_IDLE]; 3875 vvk->vvk_time_emu_kern.value.ui64 = vcpu->ustate_total[VU_EMU_KERN]; 3876 vvk->vvk_time_emu_user.value.ui64 = vcpu->ustate_total[VU_EMU_USER]; 3877 vvk->vvk_time_sched.value.ui64 = vcpu->ustate_total[VU_SCHED]; 3878 3879 return (0); 3880 } 3881 3882 SET_DECLARE(vmm_data_version_entries, const vmm_data_version_entry_t); 3883 3884 static int 3885 vmm_data_find(const vmm_data_req_t *req, int vcpuid, 3886 const vmm_data_version_entry_t **resp) 3887 { 3888 const vmm_data_version_entry_t **vdpp, *vdp; 3889 3890 ASSERT(resp != NULL); 3891 ASSERT(req->vdr_result_len != NULL); 3892 3893 SET_FOREACH(vdpp, vmm_data_version_entries) { 3894 vdp = *vdpp; 3895 if (vdp->vdve_class != req->vdr_class || 3896 vdp->vdve_version != req->vdr_version) { 3897 continue; 3898 } 3899 3900 /* 3901 * Enforce any data length expectation expressed by the provider 3902 * for this data. 3903 */ 3904 if (vdp->vdve_len_expect != 0 && 3905 vdp->vdve_len_expect > req->vdr_len) { 3906 *req->vdr_result_len = vdp->vdve_len_expect; 3907 return (ENOSPC); 3908 } 3909 3910 /* 3911 * Make sure that the provided vcpuid is acceptable for the 3912 * backend handler. 3913 */ 3914 if (vdp->vdve_readf != NULL || vdp->vdve_writef != NULL) { 3915 /* 3916 * While it is tempting to demand the -1 sentinel value 3917 * in vcpuid here, that expectation was not established 3918 * for early consumers, so it is ignored. 3919 */ 3920 } else if (vdp->vdve_vcpu_readf != NULL || 3921 vdp->vdve_vcpu_writef != NULL) { 3922 /* 3923 * Per-vCPU handlers which permit "wildcard" access will 3924 * accept a vcpuid of -1 (for VM-wide data), while all 3925 * others expect vcpuid [0, VM_MAXCPU). 3926 */ 3927 const int llimit = vdp->vdve_vcpu_wildcard ? -1 : 0; 3928 if (vcpuid < llimit || vcpuid >= VM_MAXCPU) { 3929 return (EINVAL); 3930 } 3931 } else { 3932 /* 3933 * A provider with neither VM-wide nor per-vCPU handlers 3934 * is completely unexpected. Such a situation should be 3935 * made into a compile-time error. Bail out for now, 3936 * rather than punishing the user with a panic. 3937 */ 3938 return (EINVAL); 3939 } 3940 3941 3942 *resp = vdp; 3943 return (0); 3944 } 3945 return (EINVAL); 3946 } 3947 3948 static void * 3949 vmm_data_from_class(const vmm_data_req_t *req, struct vm *vm) 3950 { 3951 switch (req->vdr_class) { 3952 case VDC_REGISTER: 3953 case VDC_MSR: 3954 case VDC_FPU: 3955 case VDC_LAPIC: 3956 case VDC_VMM_ARCH: 3957 /* 3958 * These have per-CPU handling which is dispatched outside 3959 * vmm_data_version_entries listing. 3960 */ 3961 panic("Unexpected per-vcpu class %u", req->vdr_class); 3962 break; 3963 3964 case VDC_IOAPIC: 3965 return (vm->vioapic); 3966 case VDC_ATPIT: 3967 return (vm->vatpit); 3968 case VDC_ATPIC: 3969 return (vm->vatpic); 3970 case VDC_HPET: 3971 return (vm->vhpet); 3972 case VDC_PM_TIMER: 3973 return (vm->vpmtmr); 3974 case VDC_RTC: 3975 return (vm->vrtc); 3976 case VDC_VMM_TIME: 3977 return (vm); 3978 case VDC_VERSION: 3979 /* 3980 * Play along with all of the other classes which need backup 3981 * data, even though version info does not require it. 3982 */ 3983 return (vm); 3984 3985 default: 3986 /* The data class will have been validated by now */ 3987 panic("Unexpected class %u", req->vdr_class); 3988 } 3989 } 3990 3991 const uint32_t default_msr_iter[] = { 3992 /* 3993 * Although EFER is also available via the get/set-register interface, 3994 * we include it in the default list of emitted MSRs. 3995 */ 3996 MSR_EFER, 3997 3998 /* 3999 * While gsbase and fsbase are accessible via the MSR accessors, they 4000 * are not included in MSR iteration since they are covered by the 4001 * segment descriptor interface too. 4002 */ 4003 MSR_KGSBASE, 4004 4005 MSR_STAR, 4006 MSR_LSTAR, 4007 MSR_CSTAR, 4008 MSR_SF_MASK, 4009 4010 MSR_SYSENTER_CS_MSR, 4011 MSR_SYSENTER_ESP_MSR, 4012 MSR_SYSENTER_EIP_MSR, 4013 4014 MSR_PAT, 4015 4016 MSR_TSC, 4017 4018 MSR_MTRRcap, 4019 MSR_MTRRdefType, 4020 MSR_MTRR4kBase, MSR_MTRR4kBase + 1, MSR_MTRR4kBase + 2, 4021 MSR_MTRR4kBase + 3, MSR_MTRR4kBase + 4, MSR_MTRR4kBase + 5, 4022 MSR_MTRR4kBase + 6, MSR_MTRR4kBase + 7, 4023 MSR_MTRR16kBase, MSR_MTRR16kBase + 1, 4024 MSR_MTRR64kBase, 4025 }; 4026 4027 static int 4028 vmm_data_read_msr(struct vm *vm, int vcpuid, uint32_t msr, uint64_t *value) 4029 { 4030 int err = 0; 4031 4032 switch (msr) { 4033 case MSR_TSC: 4034 /* 4035 * The vmm-data interface for MSRs provides access to the 4036 * per-vCPU offset of the TSC, when reading/writing MSR_TSC. 4037 * 4038 * The VM-wide offset (and scaling) of the guest TSC is accessed 4039 * via the VMM_TIME data class. 4040 */ 4041 *value = vm->vcpu[vcpuid].tsc_offset; 4042 return (0); 4043 4044 default: 4045 if (is_mtrr_msr(msr)) { 4046 err = vm_rdmtrr(&vm->vcpu[vcpuid].mtrr, msr, value); 4047 } else { 4048 err = ops->vmgetmsr(vm->cookie, vcpuid, msr, value); 4049 } 4050 break; 4051 } 4052 4053 return (err); 4054 } 4055 4056 static int 4057 vmm_data_write_msr(struct vm *vm, int vcpuid, uint32_t msr, uint64_t value) 4058 { 4059 int err = 0; 4060 4061 switch (msr) { 4062 case MSR_TSC: 4063 /* See vmm_data_read_msr() for more detail */ 4064 vm->vcpu[vcpuid].tsc_offset = value; 4065 return (0); 4066 case MSR_MTRRcap: { 4067 /* 4068 * MTRRcap is read-only. If the desired value matches the 4069 * existing one, consider it a success. 4070 */ 4071 uint64_t comp; 4072 err = vm_rdmtrr(&vm->vcpu[vcpuid].mtrr, msr, &comp); 4073 if (err == 0 && comp != value) { 4074 return (EINVAL); 4075 } 4076 break; 4077 } 4078 default: 4079 if (is_mtrr_msr(msr)) { 4080 /* MTRRcap is already handled above */ 4081 ASSERT3U(msr, !=, MSR_MTRRcap); 4082 4083 err = vm_wrmtrr(&vm->vcpu[vcpuid].mtrr, msr, value); 4084 } else { 4085 err = ops->vmsetmsr(vm->cookie, vcpuid, msr, value); 4086 } 4087 break; 4088 } 4089 4090 return (err); 4091 } 4092 4093 static int 4094 vmm_data_read_msrs(struct vm *vm, int vcpuid, const vmm_data_req_t *req) 4095 { 4096 VERIFY3U(req->vdr_class, ==, VDC_MSR); 4097 VERIFY3U(req->vdr_version, ==, 1); 4098 4099 struct vdi_field_entry_v1 *entryp = req->vdr_data; 4100 4101 /* Specific MSRs requested */ 4102 if ((req->vdr_flags & VDX_FLAG_READ_COPYIN) != 0) { 4103 const uint_t count = 4104 req->vdr_len / sizeof (struct vdi_field_entry_v1); 4105 4106 for (uint_t i = 0; i < count; i++, entryp++) { 4107 int err = vmm_data_read_msr(vm, vcpuid, 4108 entryp->vfe_ident, &entryp->vfe_value); 4109 4110 if (err != 0) { 4111 return (err); 4112 } 4113 } 4114 4115 *req->vdr_result_len = 4116 count * sizeof (struct vdi_field_entry_v1); 4117 return (0); 4118 } 4119 4120 /* 4121 * If specific MSRs are not requested, try to provide all those which we 4122 * know about instead. 4123 */ 4124 const uint_t num_msrs = nitems(default_msr_iter) + 4125 (VMM_MTRR_VAR_MAX * 2); 4126 const uint32_t output_len = 4127 num_msrs * sizeof (struct vdi_field_entry_v1); 4128 4129 *req->vdr_result_len = output_len; 4130 if (req->vdr_len < output_len) { 4131 return (ENOSPC); 4132 } 4133 4134 /* Output the MSRs in the default list */ 4135 for (uint_t i = 0; i < nitems(default_msr_iter); i++, entryp++) { 4136 entryp->vfe_ident = default_msr_iter[i]; 4137 4138 /* All of these MSRs are expected to work */ 4139 VERIFY0(vmm_data_read_msr(vm, vcpuid, entryp->vfe_ident, 4140 &entryp->vfe_value)); 4141 } 4142 4143 /* Output the variable MTRRs */ 4144 for (uint_t i = 0; i < (VMM_MTRR_VAR_MAX * 2); i++, entryp++) { 4145 entryp->vfe_ident = MSR_MTRRVarBase + i; 4146 4147 /* All of these MSRs are expected to work */ 4148 VERIFY0(vmm_data_read_msr(vm, vcpuid, entryp->vfe_ident, 4149 &entryp->vfe_value)); 4150 } 4151 return (0); 4152 } 4153 4154 static int 4155 vmm_data_write_msrs(struct vm *vm, int vcpuid, const vmm_data_req_t *req) 4156 { 4157 VERIFY3U(req->vdr_class, ==, VDC_MSR); 4158 VERIFY3U(req->vdr_version, ==, 1); 4159 4160 const struct vdi_field_entry_v1 *entryp = req->vdr_data; 4161 const uint_t entry_count = 4162 req->vdr_len / sizeof (struct vdi_field_entry_v1); 4163 4164 /* 4165 * First make sure that all of the MSRs can be manipulated. 4166 * For now, this check is done by going though the getmsr handler 4167 */ 4168 for (uint_t i = 0; i < entry_count; i++, entryp++) { 4169 const uint64_t msr = entryp->vfe_ident; 4170 uint64_t val; 4171 4172 if (vmm_data_read_msr(vm, vcpuid, msr, &val) != 0) { 4173 return (EINVAL); 4174 } 4175 } 4176 4177 /* 4178 * Fairly confident that all of the 'set' operations are at least 4179 * targeting valid MSRs, continue on. 4180 */ 4181 entryp = req->vdr_data; 4182 for (uint_t i = 0; i < entry_count; i++, entryp++) { 4183 int err = vmm_data_write_msr(vm, vcpuid, entryp->vfe_ident, 4184 entryp->vfe_value); 4185 4186 if (err != 0) { 4187 return (err); 4188 } 4189 } 4190 *req->vdr_result_len = entry_count * sizeof (struct vdi_field_entry_v1); 4191 4192 return (0); 4193 } 4194 4195 static const vmm_data_version_entry_t msr_v1 = { 4196 .vdve_class = VDC_MSR, 4197 .vdve_version = 1, 4198 .vdve_len_per_item = sizeof (struct vdi_field_entry_v1), 4199 .vdve_vcpu_readf = vmm_data_read_msrs, 4200 .vdve_vcpu_writef = vmm_data_write_msrs, 4201 }; 4202 VMM_DATA_VERSION(msr_v1); 4203 4204 static const uint32_t vmm_arch_v1_fields[] = { 4205 VAI_VM_IS_PAUSED, 4206 }; 4207 4208 static const uint32_t vmm_arch_v1_vcpu_fields[] = { 4209 VAI_PEND_NMI, 4210 VAI_PEND_EXTINT, 4211 VAI_PEND_EXCP, 4212 VAI_PEND_INTINFO, 4213 }; 4214 4215 static bool 4216 vmm_read_arch_field(struct vm *vm, int vcpuid, uint32_t ident, uint64_t *valp) 4217 { 4218 ASSERT(valp != NULL); 4219 4220 if (vcpuid == -1) { 4221 switch (ident) { 4222 case VAI_VM_IS_PAUSED: 4223 *valp = vm->is_paused ? 1 : 0; 4224 return (true); 4225 default: 4226 break; 4227 } 4228 } else { 4229 VERIFY(vcpuid >= 0 && vcpuid <= VM_MAXCPU); 4230 4231 struct vcpu *vcpu = &vm->vcpu[vcpuid]; 4232 switch (ident) { 4233 case VAI_PEND_NMI: 4234 *valp = vcpu->nmi_pending != 0 ? 1 : 0; 4235 return (true); 4236 case VAI_PEND_EXTINT: 4237 *valp = vcpu->extint_pending != 0 ? 1 : 0; 4238 return (true); 4239 case VAI_PEND_EXCP: 4240 *valp = vcpu->exc_pending; 4241 return (true); 4242 case VAI_PEND_INTINFO: 4243 *valp = vcpu->exit_intinfo; 4244 return (true); 4245 default: 4246 break; 4247 } 4248 } 4249 return (false); 4250 } 4251 4252 static int 4253 vmm_data_read_varch(struct vm *vm, int vcpuid, const vmm_data_req_t *req) 4254 { 4255 VERIFY3U(req->vdr_class, ==, VDC_VMM_ARCH); 4256 VERIFY3U(req->vdr_version, ==, 1); 4257 4258 /* per-vCPU fields are handled separately from VM-wide ones */ 4259 if (vcpuid != -1 && (vcpuid < 0 || vcpuid >= VM_MAXCPU)) { 4260 return (EINVAL); 4261 } 4262 4263 struct vdi_field_entry_v1 *entryp = req->vdr_data; 4264 4265 /* Specific fields requested */ 4266 if ((req->vdr_flags & VDX_FLAG_READ_COPYIN) != 0) { 4267 const uint_t count = 4268 req->vdr_len / sizeof (struct vdi_field_entry_v1); 4269 4270 for (uint_t i = 0; i < count; i++, entryp++) { 4271 if (!vmm_read_arch_field(vm, vcpuid, entryp->vfe_ident, 4272 &entryp->vfe_value)) { 4273 return (EINVAL); 4274 } 4275 } 4276 *req->vdr_result_len = 4277 count * sizeof (struct vdi_field_entry_v1); 4278 return (0); 4279 } 4280 4281 /* Emit all of the possible values */ 4282 const uint32_t *idents; 4283 uint_t ident_count; 4284 4285 if (vcpuid == -1) { 4286 idents = vmm_arch_v1_fields; 4287 ident_count = nitems(vmm_arch_v1_fields); 4288 } else { 4289 idents = vmm_arch_v1_vcpu_fields; 4290 ident_count = nitems(vmm_arch_v1_vcpu_fields); 4291 4292 } 4293 4294 const uint32_t total_size = 4295 ident_count * sizeof (struct vdi_field_entry_v1); 4296 4297 *req->vdr_result_len = total_size; 4298 if (req->vdr_len < total_size) { 4299 return (ENOSPC); 4300 } 4301 for (uint_t i = 0; i < ident_count; i++, entryp++) { 4302 entryp->vfe_ident = idents[i]; 4303 VERIFY(vmm_read_arch_field(vm, vcpuid, entryp->vfe_ident, 4304 &entryp->vfe_value)); 4305 } 4306 return (0); 4307 } 4308 4309 static int 4310 vmm_data_write_varch_vcpu(struct vm *vm, int vcpuid, const vmm_data_req_t *req) 4311 { 4312 VERIFY3U(req->vdr_class, ==, VDC_VMM_ARCH); 4313 VERIFY3U(req->vdr_version, ==, 1); 4314 4315 if (vcpuid < 0 || vcpuid >= VM_MAXCPU) { 4316 return (EINVAL); 4317 } 4318 4319 const struct vdi_field_entry_v1 *entryp = req->vdr_data; 4320 const uint_t entry_count = 4321 req->vdr_len / sizeof (struct vdi_field_entry_v1); 4322 struct vcpu *vcpu = &vm->vcpu[vcpuid]; 4323 4324 for (uint_t i = 0; i < entry_count; i++, entryp++) { 4325 const uint64_t val = entryp->vfe_value; 4326 4327 switch (entryp->vfe_ident) { 4328 case VAI_PEND_NMI: 4329 vcpu->nmi_pending = (val != 0); 4330 break; 4331 case VAI_PEND_EXTINT: 4332 vcpu->extint_pending = (val != 0); 4333 break; 4334 case VAI_PEND_EXCP: 4335 if (!VM_INTINFO_PENDING(val)) { 4336 vcpu->exc_pending = 0; 4337 } else if (VM_INTINFO_TYPE(val) != VM_INTINFO_HWEXCP || 4338 (val & VM_INTINFO_MASK_RSVD) != 0) { 4339 /* reject improperly-formed hw exception */ 4340 return (EINVAL); 4341 } else { 4342 vcpu->exc_pending = val; 4343 } 4344 break; 4345 case VAI_PEND_INTINFO: 4346 if (vm_exit_intinfo(vm, vcpuid, val) != 0) { 4347 return (EINVAL); 4348 } 4349 break; 4350 default: 4351 return (EINVAL); 4352 } 4353 } 4354 4355 *req->vdr_result_len = entry_count * sizeof (struct vdi_field_entry_v1); 4356 return (0); 4357 } 4358 4359 static int 4360 vmm_data_write_varch(struct vm *vm, int vcpuid, const vmm_data_req_t *req) 4361 { 4362 VERIFY3U(req->vdr_class, ==, VDC_VMM_ARCH); 4363 VERIFY3U(req->vdr_version, ==, 1); 4364 4365 /* per-vCPU fields are handled separately from VM-wide ones */ 4366 if (vcpuid != -1) { 4367 return (vmm_data_write_varch_vcpu(vm, vcpuid, req)); 4368 } 4369 4370 const struct vdi_field_entry_v1 *entryp = req->vdr_data; 4371 const uint_t entry_count = 4372 req->vdr_len / sizeof (struct vdi_field_entry_v1); 4373 4374 if (entry_count > 0) { 4375 if (entryp->vfe_ident == VAI_VM_IS_PAUSED) { 4376 /* 4377 * The VM_PAUSE and VM_RESUME ioctls are the officially 4378 * sanctioned mechanisms for setting the is-paused state 4379 * of the VM. 4380 */ 4381 return (EPERM); 4382 } else { 4383 /* no other valid arch entries at this time */ 4384 return (EINVAL); 4385 } 4386 } 4387 4388 *req->vdr_result_len = entry_count * sizeof (struct vdi_field_entry_v1); 4389 return (0); 4390 } 4391 4392 static const vmm_data_version_entry_t vmm_arch_v1 = { 4393 .vdve_class = VDC_VMM_ARCH, 4394 .vdve_version = 1, 4395 .vdve_len_per_item = sizeof (struct vdi_field_entry_v1), 4396 .vdve_vcpu_readf = vmm_data_read_varch, 4397 .vdve_vcpu_writef = vmm_data_write_varch, 4398 4399 /* 4400 * Handlers for VMM_ARCH can process VM-wide (vcpuid == -1) entries in 4401 * addition to vCPU specific ones. 4402 */ 4403 .vdve_vcpu_wildcard = true, 4404 }; 4405 VMM_DATA_VERSION(vmm_arch_v1); 4406 4407 4408 /* 4409 * GUEST TIME SUPPORT 4410 * 4411 * Broadly, there are two categories of functionality related to time passing in 4412 * the guest: the guest's TSC and timers used by emulated devices. 4413 * 4414 * --------------------------- 4415 * GUEST TSC "VIRTUALIZATION" 4416 * --------------------------- 4417 * 4418 * The TSC can be read either via an instruction (rdtsc/rdtscp) or by reading 4419 * the TSC MSR. 4420 * 4421 * When a guest reads the TSC via its MSR, the guest will exit and we emulate 4422 * the rdmsr. More typically, the guest reads the TSC via a rdtsc(p) 4423 * instruction. Both SVM and VMX support virtualizing the guest TSC in hardware 4424 * -- that is, a guest will not generally exit on a rdtsc instruction. 4425 * 4426 * To support hardware-virtualized guest TSC, both SVM and VMX provide two knobs 4427 * for the hypervisor to adjust the guest's view of the TSC: 4428 * - TSC offset 4429 * - TSC frequency multiplier (also called "frequency ratio") 4430 * 4431 * When a guest calls rdtsc(p), the TSC value it sees is the sum of: 4432 * guest_tsc = (host TSC, scaled according to frequency multiplier) 4433 * + (TSC offset, programmed by hypervisor) 4434 * 4435 * See the discussions of the TSC offset and frequency multiplier below for more 4436 * details on each of these. 4437 * 4438 * -------------------- 4439 * TSC OFFSET OVERVIEW 4440 * -------------------- 4441 * 4442 * The TSC offset is a value added to the host TSC (which may be scaled first) 4443 * to provide the guest TSC. This offset addition is generally done by hardware, 4444 * but may be used in emulating the TSC if necessary. 4445 * 4446 * Recall that general formula for calculating the guest TSC is: 4447 * 4448 * guest_tsc = (host TSC, scaled if needed) + TSC offset 4449 * 4450 * Intuitively, the TSC offset is simply an offset of the host's TSC to make the 4451 * guest's view of the TSC appear correct: The guest TSC should be 0 at boot and 4452 * monotonically increase at a roughly constant frequency. Thus in the simplest 4453 * case, the TSC offset is just the negated value of the host TSC when the guest 4454 * was booted, assuming they have the same frequencies. 4455 * 4456 * In practice, there are several factors that can make calculating the TSC 4457 * offset more complicated, including: 4458 * 4459 * (1) the physical CPU the guest is running on 4460 * (2) whether the guest has written to the TSC of that vCPU 4461 * (3) differing host and guest frequencies, like after a live migration 4462 * (4) a guest running on a different system than where it was booted, like 4463 * after a live migration 4464 * 4465 * We will explore each of these factors individually. See below for a 4466 * summary. 4467 * 4468 * 4469 * (1) Physical CPU offsets 4470 * 4471 * The system maintains a set of per-CPU offsets to the TSC to provide a 4472 * consistent view of the TSC regardless of the CPU a thread is running on. 4473 * These offsets are included automatically as a part of rdtsc_offset(). 4474 * 4475 * The per-CPU offset must be included as a part reading the host TSC when 4476 * calculating the offset before running the guest on a given CPU. 4477 * 4478 * 4479 * (2) Guest TSC writes (vCPU offsets) 4480 * 4481 * The TSC is a writable MSR. When a guest writes to the TSC, this operation 4482 * should result in the TSC, when read from that vCPU, shows the value written, 4483 * plus whatever time has elapsed since the read. 4484 * 4485 * To support this, when the guest writes to the TSC, we store an additional 4486 * vCPU offset calculated to make future reads of the TSC map to what the guest 4487 * expects. 4488 * 4489 * 4490 * (3) Differing host and guest frequencies (host TSC scaling) 4491 * 4492 * A guest has the same frequency of its host when it boots, but it may be 4493 * migrated to a machine with a different TSC frequency. Systems expect that 4494 * their TSC frequency does not change. To support this fiction in which a guest 4495 * is running on hardware of a different TSC frequency, the hypervisor can 4496 * program a "frequency multiplier" that represents the ratio of guest/host 4497 * frequency. 4498 * 4499 * Any time a host TSC is used in calculations for the offset, it should be 4500 * "scaled" according to this multiplier, and the hypervisor should program the 4501 * multiplier before running a guest so that the hardware virtualization of the 4502 * TSC functions properly. Similarly, the multiplier should be used in any TSC 4503 * emulation. 4504 * 4505 * See below for more details about the frequency multiplier. 4506 * 4507 * 4508 * (4) Guest running on a system it did not boot on ("base guest TSC") 4509 * 4510 * When a guest boots, its TSC offset is simply the negated host TSC at the time 4511 * it booted. If a guest is migrated from a source host to a target host, the 4512 * TSC offset from the source host is no longer useful for several reasons: 4513 * - the target host TSC has no relationship to the source host TSC 4514 * - the guest did not boot on the target system, so the TSC of the target host 4515 * is not sufficient to describe how long the guest has been running prior to 4516 * migration 4517 * - the target system may have a different TSC frequency than the source system 4518 * 4519 * Ignoring the issue of frequency differences for a moment, let's consider how 4520 * to re-align the guest TSC with the host TSC of the target host. Intuitively, 4521 * for the guest to see the correct TSC, we still want to add some offset to the 4522 * host TSC that offsets how long this guest has been running on 4523 * the system. 4524 * 4525 * An example here might be helpful. Consider a source host and target host, 4526 * both with TSC frequencies of 1GHz. On the source host, the guest and host TSC 4527 * values might look like: 4528 * 4529 * +----------------------------------------------------------------------+ 4530 * | Event | source host TSC | guest TSC | 4531 * ------------------------------------------------------------------------ 4532 * | guest boot (t=0s) | 5000000000 | 5000000000 + -5000000000 | 4533 * | | | 0 | 4534 * ------------------------------------------------------------------------ 4535 * | guest rdtsc (t=10s)) | 15000000000 | 15000000000 + -5000000000 | 4536 * | | | 10000000000 | 4537 * ------------------------------------------------------------------------ 4538 * | migration (t=15s) | 20000000000 | 20000000000 + -5000000000 | 4539 * | | | 15000000000 | 4540 * +----------------------------------------------------------------------+ 4541 * 4542 * Ignoring the time it takes for a guest to physically migrate machines, on the 4543 * target host, we would expect the TSC to continue functioning as such: 4544 * 4545 * +----------------------------------------------------------------------+ 4546 * | Event | target host TSC | guest TSC | 4547 * ------------------------------------------------------------------------ 4548 * | guest migrate (t=15s) | 300000000000 | 15000000000 | 4549 * ------------------------------------------------------------------------ 4550 * | guest rdtsc (t=20s)) | 305000000000 | 20000000000 | 4551 * ------------------------------------------------------------------------ 4552 * 4553 * In order to produce a correct TSC value here, we can calculate a new 4554 * "effective" boot TSC that maps to what the host TSC would've been had it been 4555 * booted on the target. We add that to the guest TSC when it began to run on 4556 * this machine, and negate them both to get a new offset. In this example, the 4557 * effective boot TSC is: -(300000000000 - 15000000000) = -285000000000. 4558 * 4559 * +-------------------------------------------------------------------------+ 4560 * | Event | target host TSC | guest TSC | 4561 * --------------------------------------------------------------------------- 4562 * | guest "boot" (t=0s) | 285000000000 | 285000000000 + -285000000000 | 4563 * | | | 0 | 4564 * --------------------------------------------------------------------------- 4565 * | guest migrate (t=15s) | 300000000000 | 300000000000 + -285000000000 | 4566 * | | | 15000000000 | 4567 * --------------------------------------------------------------------------- 4568 * | guest rdtsc (t=20s)) | 305000000000 | 305000000000 + -285000000000 | 4569 * | | | 20000000000 | 4570 * --------------------------------------------------------------------------+ 4571 * 4572 * To support the offset calculation following a migration, the VMM data time 4573 * interface allows callers to set a "base guest TSC", which is the TSC value of 4574 * the guest when it began running on the host. The current guest TSC can be 4575 * requested via a read of the time data. See below for details on that 4576 * interface. 4577 * 4578 * Frequency differences between the host and the guest are accounted for when 4579 * scaling the host TSC. See below for details on the frequency multiplier. 4580 * 4581 * 4582 * -------------------- 4583 * TSC OFFSET SUMMARY 4584 * -------------------- 4585 * 4586 * Factoring in all of the components to the TSC above, the TSC offset that is 4587 * programmed by the hypervisor before running a given vCPU is: 4588 * 4589 * offset = -((base host TSC, scaled if needed) - base_guest_tsc) + vCPU offset 4590 * 4591 * This offset is stored in two pieces. Per-vCPU offsets are stored with the 4592 * given vCPU and added in when programming the offset. The rest of the offset 4593 * is stored as a VM-wide offset, and computed either at boot or when the time 4594 * data is written to. 4595 * 4596 * It is safe to add the vCPU offset and the VM-wide offsets together because 4597 * the vCPU offset is in terms of the guest TSC. The host TSC is scaled before 4598 * using it in calculations, so all TSC values are applicable to the same 4599 * frequency. 4600 * 4601 * Note: Though both the VM-wide offset and per-vCPU offsets may be negative, we 4602 * store them as unsigned values and perform all offsetting math unsigned. This 4603 * is to avoid UB from signed overflow. 4604 * 4605 * ------------------------- 4606 * TSC FREQUENCY MULTIPLIER 4607 * ------------------------- 4608 * 4609 * In order to account for frequency differences between the host and guest, SVM 4610 * and VMX provide an interface to set a "frequency multiplier" (or "frequency 4611 * ratio") representing guest to host frequency. In a hardware-virtualized read 4612 * of the TSC, the host TSC is scaled using this multiplier prior to adding the 4613 * programmed TSC offset. 4614 * 4615 * Both platforms represent the ratio as a fixed point number, where the lower 4616 * bits are used as a fractional component, and some number of the upper bits 4617 * are used as the integer component. 4618 * 4619 * Some example multipliers, for a platform with FRAC fractional bits in the 4620 * multiplier: 4621 * - guest frequency == host: 1 << FRAC 4622 * - guest frequency is 2x host: 1 << (FRAC + 1) 4623 * - guest frequency is 0.5x host: 1 << (FRAC - 1), as the highest-order 4624 * fractional bit represents 1/2 4625 * - guest frequency is 2.5x host: (1 << FRAC) | (1 << (FRAC - 1)) 4626 * and so on. 4627 * 4628 * In general, the frequency multiplier is calculated as follows: 4629 * (guest_hz * (1 << FRAC_SIZE)) / host_hz 4630 * 4631 * The multiplier should be used any time the host TSC value is used in 4632 * calculations with the guest TSC (and their frequencies differ). The function 4633 * `vmm_scale_tsc` is intended to be used for these purposes, as it will scale 4634 * the host TSC only if needed. 4635 * 4636 * The multiplier should also be programmed by the hypervisor before the guest 4637 * is run. 4638 * 4639 * 4640 * ---------------------------- 4641 * DEVICE TIMERS (BOOT_HRTIME) 4642 * ---------------------------- 4643 * 4644 * Emulated devices use timers to do things such as scheduling periodic events. 4645 * These timers are scheduled relative to the hrtime of the host. When device 4646 * state is exported or imported, we use boot_hrtime to normalize these timers 4647 * against the host hrtime. The boot_hrtime represents the hrtime of the host 4648 * when the guest was booted. 4649 * 4650 * If a guest is migrated to a different machine, boot_hrtime must be adjusted 4651 * to match the hrtime of when the guest was effectively booted on the target 4652 * host. This allows timers to continue functioning when device state is 4653 * imported on the target. 4654 * 4655 * 4656 * ------------------------ 4657 * VMM DATA TIME INTERFACE 4658 * ------------------------ 4659 * 4660 * In order to facilitate live migrations of guests, we provide an interface, 4661 * via the VMM data read/write ioctls, for userspace to make changes to the 4662 * guest's view of the TSC and device timers, allowing these features to 4663 * continue functioning after a migration. 4664 * 4665 * The interface was designed to expose the minimal amount of data needed for a 4666 * userspace component to make adjustments to the guest's view of time (e.g., to 4667 * account for time passing in a live migration). At a minimum, such a program 4668 * needs: 4669 * - the current guest TSC 4670 * - guest TSC frequency 4671 * - guest's boot_hrtime 4672 * - timestamps of when this data was taken (hrtime for hrtime calculations, and 4673 * wall clock time for computing time deltas between machines) 4674 * 4675 * The wall clock time is provided for consumers to make adjustments to the 4676 * guest TSC and boot_hrtime based on deltas observed during migrations. It may 4677 * be prudent for consumers to use this data only in circumstances where the 4678 * source and target have well-synchronized wall clocks, but nothing in the 4679 * interface depends on this assumption. 4680 * 4681 * On writes, consumers write back: 4682 * - the base guest TSC (used for TSC offset calculations) 4683 * - desired boot_hrtime 4684 * - guest_frequency (cannot change) 4685 * - hrtime of when this data was adjusted 4686 * - (wall clock time on writes is ignored) 4687 * 4688 * The interface will adjust the input guest TSC slightly, based on the input 4689 * hrtime, to account for latency between userspace calculations and application 4690 * of the data on the kernel side. This amounts to adding a small amount of 4691 * additional "uptime" for the guest. 4692 * 4693 * After the adjustments, the interface updates the VM-wide TSC offset and 4694 * boot_hrtime. Per-vCPU offsets are not adjusted, as those are already in terms 4695 * of the guest TSC and can be exported/imported via the MSR VMM data interface. 4696 * 4697 * 4698 * -------------------------------- 4699 * SUPPORTED PLATFORMS AND CAVEATS 4700 * -------------------------------- 4701 * 4702 * While both VMX and SVM offer TSC scaling as a feature, at this time only SVM 4703 * is supported by bhyve. 4704 * 4705 * The time data interface is designed such that Intel support can be added 4706 * easily, and all other aspects of the time interface should work on Intel. 4707 * (Without frequency control though, in practice, doing live migrations of 4708 * guests on Intel will not work for time-related things, as two machines 4709 * rarely have exactly the same frequency). 4710 * 4711 * Additionally, while on both SVM and VMX the frequency multiplier is a fixed 4712 * point number, each uses a different number of fractional and integer bits for 4713 * the multiplier. As such, calculating the multiplier and fractional bit size 4714 * is requested via the vmm_ops. 4715 * 4716 * Care should be taken to set reasonable limits for ratios based on the 4717 * platform, as the difference in fractional bits can lead to slightly different 4718 * tradeoffs in terms of representable ratios and potentially overflowing 4719 * calculations. 4720 */ 4721 4722 /* 4723 * Scales the TSC if needed, based on the input frequency multiplier. 4724 */ 4725 static uint64_t 4726 vmm_scale_tsc(uint64_t tsc, uint64_t mult) 4727 { 4728 const uint32_t frac_size = ops->fr_fracsize; 4729 4730 if (mult != VM_TSCM_NOSCALE) { 4731 VERIFY3U(frac_size, >, 0); 4732 return (scale_tsc(tsc, mult, frac_size)); 4733 } else { 4734 return (tsc); 4735 } 4736 } 4737 4738 /* 4739 * Calculate the frequency multiplier, which represents the ratio of 4740 * guest_hz / host_hz. The frequency multiplier is a fixed point number with 4741 * `frac_sz` fractional bits (fractional bits begin at bit 0). 4742 * 4743 * See comment for "calc_freq_multiplier" in "vmm_time_support.S" for more 4744 * information about valid input to this function. 4745 */ 4746 uint64_t 4747 vmm_calc_freq_multiplier(uint64_t guest_hz, uint64_t host_hz, 4748 uint32_t frac_size) 4749 { 4750 VERIFY3U(guest_hz, !=, 0); 4751 VERIFY3U(frac_size, >, 0); 4752 VERIFY3U(frac_size, <, 64); 4753 4754 return (calc_freq_multiplier(guest_hz, host_hz, frac_size)); 4755 } 4756 4757 /* 4758 * Calculate the guest VM-wide TSC offset. 4759 * 4760 * offset = - ((base host TSC, scaled if needed) - base_guest_tsc) 4761 * 4762 * The base_host_tsc and the base_guest_tsc are the TSC values of the host 4763 * (read on the system) and the guest (calculated) at the same point in time. 4764 * This allows us to fix the guest TSC at this point in time as a base, either 4765 * following boot (guest TSC = 0), or a change to the guest's time data from 4766 * userspace (such as in the case of a migration). 4767 */ 4768 static uint64_t 4769 calc_tsc_offset(uint64_t base_host_tsc, uint64_t base_guest_tsc, uint64_t mult) 4770 { 4771 const uint64_t htsc_scaled = vmm_scale_tsc(base_host_tsc, mult); 4772 if (htsc_scaled > base_guest_tsc) { 4773 return ((uint64_t)(- (int64_t)(htsc_scaled - base_guest_tsc))); 4774 } else { 4775 return (base_guest_tsc - htsc_scaled); 4776 } 4777 } 4778 4779 /* 4780 * Calculate an estimate of the guest TSC. 4781 * 4782 * guest_tsc = (host TSC, scaled if needed) + offset 4783 */ 4784 static uint64_t 4785 calc_guest_tsc(uint64_t host_tsc, uint64_t mult, uint64_t offset) 4786 { 4787 return (vmm_scale_tsc(host_tsc, mult) + offset); 4788 } 4789 4790 /* 4791 * Take a non-atomic "snapshot" of the current: 4792 * - TSC 4793 * - hrtime 4794 * - wall clock time 4795 */ 4796 static void 4797 vmm_time_snapshot(uint64_t *tsc, hrtime_t *hrtime, timespec_t *hrestime) 4798 { 4799 /* 4800 * Disable interrupts while we take the readings: In the absence of a 4801 * mechanism to convert hrtime to hrestime, we want the time between 4802 * each of these measurements to be as small as possible. 4803 */ 4804 ulong_t iflag = intr_clear(); 4805 4806 hrtime_t hrt = gethrtimeunscaledf(); 4807 *tsc = (uint64_t)hrt; 4808 *hrtime = hrt; 4809 scalehrtime(hrtime); 4810 gethrestime(hrestime); 4811 4812 intr_restore(iflag); 4813 } 4814 4815 /* 4816 * Read VMM Time data 4817 * 4818 * Provides: 4819 * - the current guest TSC and TSC frequency 4820 * - guest boot_hrtime 4821 * - timestamps of the read (hrtime and wall clock time) 4822 */ 4823 static int 4824 vmm_data_read_vmm_time(void *arg, const vmm_data_req_t *req) 4825 { 4826 VERIFY3U(req->vdr_class, ==, VDC_VMM_TIME); 4827 VERIFY3U(req->vdr_version, ==, 1); 4828 VERIFY3U(req->vdr_len, >=, sizeof (struct vdi_time_info_v1)); 4829 4830 struct vm *vm = arg; 4831 struct vdi_time_info_v1 *out = req->vdr_data; 4832 4833 /* Take a snapshot of this point in time */ 4834 uint64_t tsc; 4835 hrtime_t hrtime; 4836 timespec_t hrestime; 4837 vmm_time_snapshot(&tsc, &hrtime, &hrestime); 4838 4839 /* Write the output values */ 4840 out->vt_guest_freq = vm->guest_freq; 4841 4842 /* 4843 * Use only the VM-wide TSC offset for calculating the guest TSC, 4844 * ignoring per-vCPU offsets. This value is provided as a "base" guest 4845 * TSC at the time of the read; per-vCPU offsets are factored in as 4846 * needed elsewhere, either when running the vCPU or if the guest reads 4847 * the TSC via rdmsr. 4848 */ 4849 out->vt_guest_tsc = calc_guest_tsc(tsc, vm->freq_multiplier, 4850 vm->tsc_offset); 4851 out->vt_boot_hrtime = vm->boot_hrtime; 4852 out->vt_hrtime = hrtime; 4853 out->vt_hres_sec = hrestime.tv_sec; 4854 out->vt_hres_ns = hrestime.tv_nsec; 4855 4856 return (0); 4857 } 4858 4859 /* 4860 * Modify VMM Time data related values 4861 * 4862 * This interface serves to allow guests' TSC and device timers to continue 4863 * functioning across live migrations. On a successful write, the VM-wide TSC 4864 * offset and boot_hrtime of the guest are updated. 4865 * 4866 * The interface requires an hrtime of the system at which the caller wrote 4867 * this data; this allows us to adjust the TSC and boot_hrtime slightly to 4868 * account for time passing between the userspace call and application 4869 * of the data here. 4870 * 4871 * There are several possibilities for invalid input, including: 4872 * - a requested guest frequency of 0, or a frequency otherwise unsupported by 4873 * the underlying platform 4874 * - hrtime or boot_hrtime values that appear to be from the future 4875 * - the requested frequency does not match the host, and this system does not 4876 * have hardware TSC scaling support 4877 */ 4878 static int 4879 vmm_data_write_vmm_time(void *arg, const vmm_data_req_t *req) 4880 { 4881 VERIFY3U(req->vdr_class, ==, VDC_VMM_TIME); 4882 VERIFY3U(req->vdr_version, ==, 1); 4883 VERIFY3U(req->vdr_len, >=, sizeof (struct vdi_time_info_v1)); 4884 4885 struct vm *vm = arg; 4886 const struct vdi_time_info_v1 *src = req->vdr_data; 4887 4888 /* 4889 * Platform-specific checks will verify the requested frequency against 4890 * the supported range further, but a frequency of 0 is never valid. 4891 */ 4892 if (src->vt_guest_freq == 0) { 4893 return (EINVAL); 4894 } 4895 4896 /* 4897 * Check whether the request frequency is supported and get the 4898 * frequency multiplier. 4899 */ 4900 uint64_t mult = VM_TSCM_NOSCALE; 4901 freqratio_res_t res = ops->vmfreqratio(src->vt_guest_freq, 4902 vmm_host_freq, &mult); 4903 switch (res) { 4904 case FR_SCALING_NOT_SUPPORTED: 4905 /* 4906 * This system doesn't support TSC scaling, and the guest/host 4907 * frequencies differ 4908 */ 4909 return (EPERM); 4910 case FR_OUT_OF_RANGE: 4911 /* Requested frequency ratio is too small/large */ 4912 return (EINVAL); 4913 case FR_SCALING_NOT_NEEDED: 4914 /* Host and guest frequencies are the same */ 4915 VERIFY3U(mult, ==, VM_TSCM_NOSCALE); 4916 break; 4917 case FR_VALID: 4918 VERIFY3U(mult, !=, VM_TSCM_NOSCALE); 4919 break; 4920 } 4921 4922 /* 4923 * Find (and validate) the hrtime delta between the input request and 4924 * when we received it so that we can bump the TSC to account for time 4925 * passing. 4926 * 4927 * We ignore the hrestime as input, as this is a field that 4928 * exists for reads. 4929 */ 4930 uint64_t tsc; 4931 hrtime_t hrtime; 4932 timespec_t hrestime; 4933 vmm_time_snapshot(&tsc, &hrtime, &hrestime); 4934 if ((src->vt_hrtime > hrtime) || (src->vt_boot_hrtime > hrtime)) { 4935 /* 4936 * The caller has passed in an hrtime / boot_hrtime from the 4937 * future. 4938 */ 4939 return (EINVAL); 4940 } 4941 hrtime_t hrt_delta = hrtime - src->vt_hrtime; 4942 4943 /* Calculate guest TSC adjustment */ 4944 const uint64_t host_ticks = unscalehrtime(hrt_delta); 4945 const uint64_t guest_ticks = vmm_scale_tsc(host_ticks, 4946 vm->freq_multiplier); 4947 const uint64_t base_guest_tsc = src->vt_guest_tsc + guest_ticks; 4948 4949 /* Update guest time data */ 4950 vm->freq_multiplier = mult; 4951 vm->guest_freq = src->vt_guest_freq; 4952 vm->boot_hrtime = src->vt_boot_hrtime; 4953 vm->tsc_offset = calc_tsc_offset(tsc, base_guest_tsc, 4954 vm->freq_multiplier); 4955 4956 return (0); 4957 } 4958 4959 static const vmm_data_version_entry_t vmm_time_v1 = { 4960 .vdve_class = VDC_VMM_TIME, 4961 .vdve_version = 1, 4962 .vdve_len_expect = sizeof (struct vdi_time_info_v1), 4963 .vdve_readf = vmm_data_read_vmm_time, 4964 .vdve_writef = vmm_data_write_vmm_time, 4965 }; 4966 VMM_DATA_VERSION(vmm_time_v1); 4967 4968 4969 static int 4970 vmm_data_read_versions(void *arg, const vmm_data_req_t *req) 4971 { 4972 VERIFY3U(req->vdr_class, ==, VDC_VERSION); 4973 VERIFY3U(req->vdr_version, ==, 1); 4974 4975 const uint32_t total_size = SET_COUNT(vmm_data_version_entries) * 4976 sizeof (struct vdi_version_entry_v1); 4977 4978 /* Make sure there is room for all of the entries */ 4979 *req->vdr_result_len = total_size; 4980 if (req->vdr_len < *req->vdr_result_len) { 4981 return (ENOSPC); 4982 } 4983 4984 struct vdi_version_entry_v1 *entryp = req->vdr_data; 4985 const vmm_data_version_entry_t **vdpp; 4986 SET_FOREACH(vdpp, vmm_data_version_entries) { 4987 const vmm_data_version_entry_t *vdp = *vdpp; 4988 4989 entryp->vve_class = vdp->vdve_class; 4990 entryp->vve_version = vdp->vdve_version; 4991 entryp->vve_len_expect = vdp->vdve_len_expect; 4992 entryp->vve_len_per_item = vdp->vdve_len_per_item; 4993 entryp++; 4994 } 4995 return (0); 4996 } 4997 4998 static int 4999 vmm_data_write_versions(void *arg, const vmm_data_req_t *req) 5000 { 5001 /* Writing to the version information makes no sense */ 5002 return (EPERM); 5003 } 5004 5005 static const vmm_data_version_entry_t versions_v1 = { 5006 .vdve_class = VDC_VERSION, 5007 .vdve_version = 1, 5008 .vdve_len_per_item = sizeof (struct vdi_version_entry_v1), 5009 .vdve_readf = vmm_data_read_versions, 5010 .vdve_writef = vmm_data_write_versions, 5011 }; 5012 VMM_DATA_VERSION(versions_v1); 5013 5014 int 5015 vmm_data_read(struct vm *vm, int vcpuid, const vmm_data_req_t *req) 5016 { 5017 int err = 0; 5018 5019 const vmm_data_version_entry_t *entry = NULL; 5020 err = vmm_data_find(req, vcpuid, &entry); 5021 if (err != 0) { 5022 return (err); 5023 } 5024 ASSERT(entry != NULL); 5025 5026 if (entry->vdve_readf != NULL) { 5027 void *datap = vmm_data_from_class(req, vm); 5028 5029 err = entry->vdve_readf(datap, req); 5030 } else if (entry->vdve_vcpu_readf != NULL) { 5031 err = entry->vdve_vcpu_readf(vm, vcpuid, req); 5032 } else { 5033 err = EINVAL; 5034 } 5035 5036 /* 5037 * Successful reads of fixed-length data should populate the length of 5038 * that result. 5039 */ 5040 if (err == 0 && entry->vdve_len_expect != 0) { 5041 *req->vdr_result_len = entry->vdve_len_expect; 5042 } 5043 5044 return (err); 5045 } 5046 5047 int 5048 vmm_data_write(struct vm *vm, int vcpuid, const vmm_data_req_t *req) 5049 { 5050 int err = 0; 5051 5052 const vmm_data_version_entry_t *entry = NULL; 5053 err = vmm_data_find(req, vcpuid, &entry); 5054 if (err != 0) { 5055 return (err); 5056 } 5057 ASSERT(entry != NULL); 5058 5059 if (entry->vdve_writef != NULL) { 5060 void *datap = vmm_data_from_class(req, vm); 5061 5062 err = entry->vdve_writef(datap, req); 5063 } else if (entry->vdve_vcpu_writef != NULL) { 5064 err = entry->vdve_vcpu_writef(vm, vcpuid, req); 5065 } else { 5066 err = EINVAL; 5067 } 5068 5069 /* 5070 * Successful writes of fixed-length data should populate the length of 5071 * that result. 5072 */ 5073 if (err == 0 && entry->vdve_len_expect != 0) { 5074 *req->vdr_result_len = entry->vdve_len_expect; 5075 } 5076 5077 return (err); 5078 } 5079