1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2011 NetApp, Inc. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 * 28 * $FreeBSD$ 29 */ 30 /* 31 * This file and its contents are supplied under the terms of the 32 * Common Development and Distribution License ("CDDL"), version 1.0. 33 * You may only use this file in accordance with the terms of version 34 * 1.0 of the CDDL. 35 * 36 * A full copy of the text of the CDDL should have accompanied this 37 * source. A copy of the CDDL is also available via the Internet at 38 * http://www.illumos.org/license/CDDL. 39 * 40 * Copyright 2015 Pluribus Networks Inc. 41 * Copyright 2018 Joyent, Inc. 42 * Copyright 2023 Oxide Computer Company 43 * Copyright 2021 OmniOS Community Edition (OmniOSce) Association. 44 */ 45 46 47 #include <sys/cdefs.h> 48 __FBSDID("$FreeBSD$"); 49 50 #include <sys/param.h> 51 #include <sys/systm.h> 52 #include <sys/kernel.h> 53 #include <sys/module.h> 54 #include <sys/sysctl.h> 55 #include <sys/kmem.h> 56 #include <sys/pcpu.h> 57 #include <sys/mutex.h> 58 #include <sys/proc.h> 59 #include <sys/rwlock.h> 60 #include <sys/sched.h> 61 #include <sys/systm.h> 62 #include <sys/sunddi.h> 63 #include <sys/hma.h> 64 #include <sys/archsystm.h> 65 66 #include <machine/md_var.h> 67 #include <x86/psl.h> 68 #include <x86/apicreg.h> 69 70 #include <machine/specialreg.h> 71 #include <machine/vmm.h> 72 #include <machine/vmm_dev.h> 73 #include <machine/vmparam.h> 74 #include <sys/vmm_instruction_emul.h> 75 #include <sys/vmm_vm.h> 76 #include <sys/vmm_gpt.h> 77 #include <sys/vmm_data.h> 78 79 #include "vmm_ioport.h" 80 #include "vmm_host.h" 81 #include "vmm_util.h" 82 #include "vatpic.h" 83 #include "vatpit.h" 84 #include "vhpet.h" 85 #include "vioapic.h" 86 #include "vlapic.h" 87 #include "vpmtmr.h" 88 #include "vrtc.h" 89 #include "vmm_stat.h" 90 #include "vmm_lapic.h" 91 92 #include "io/ppt.h" 93 #include "io/iommu.h" 94 95 struct vlapic; 96 97 /* Flags for vtc_status */ 98 #define VTCS_FPU_RESTORED 1 /* guest FPU restored, host FPU saved */ 99 #define VTCS_FPU_CTX_CRITICAL 2 /* in ctx where FPU restore cannot be lazy */ 100 101 typedef struct vm_thread_ctx { 102 struct vm *vtc_vm; 103 int vtc_vcpuid; 104 uint_t vtc_status; 105 enum vcpu_ustate vtc_ustate; 106 } vm_thread_ctx_t; 107 108 #define VMM_MTRR_VAR_MAX 10 109 #define VMM_MTRR_DEF_MASK \ 110 (MTRR_DEF_ENABLE | MTRR_DEF_FIXED_ENABLE | MTRR_DEF_TYPE) 111 #define VMM_MTRR_PHYSBASE_MASK (MTRR_PHYSBASE_PHYSBASE | MTRR_PHYSBASE_TYPE) 112 #define VMM_MTRR_PHYSMASK_MASK (MTRR_PHYSMASK_PHYSMASK | MTRR_PHYSMASK_VALID) 113 struct vm_mtrr { 114 uint64_t def_type; 115 uint64_t fixed4k[8]; 116 uint64_t fixed16k[2]; 117 uint64_t fixed64k; 118 struct { 119 uint64_t base; 120 uint64_t mask; 121 } var[VMM_MTRR_VAR_MAX]; 122 }; 123 124 /* 125 * Initialization: 126 * (a) allocated when vcpu is created 127 * (i) initialized when vcpu is created and when it is reinitialized 128 * (o) initialized the first time the vcpu is created 129 * (x) initialized before use 130 */ 131 struct vcpu { 132 /* (o) protects state, run_state, hostcpu, sipi_vector */ 133 kmutex_t lock; 134 135 enum vcpu_state state; /* (o) vcpu state */ 136 enum vcpu_run_state run_state; /* (i) vcpu init/sipi/run state */ 137 kcondvar_t vcpu_cv; /* (o) cpu waiter cv */ 138 kcondvar_t state_cv; /* (o) IDLE-transition cv */ 139 int hostcpu; /* (o) vcpu's current host cpu */ 140 int lastloccpu; /* (o) last host cpu localized to */ 141 int reqidle; /* (i) request vcpu to idle */ 142 struct vlapic *vlapic; /* (i) APIC device model */ 143 enum x2apic_state x2apic_state; /* (i) APIC mode */ 144 uint64_t exit_intinfo; /* (i) events pending at VM exit */ 145 uint64_t exc_pending; /* (i) exception pending */ 146 bool nmi_pending; /* (i) NMI pending */ 147 bool extint_pending; /* (i) INTR pending */ 148 149 uint8_t sipi_vector; /* (i) SIPI vector */ 150 hma_fpu_t *guestfpu; /* (a,i) guest fpu state */ 151 uint64_t guest_xcr0; /* (i) guest %xcr0 register */ 152 void *stats; /* (a,i) statistics */ 153 struct vm_exit exitinfo; /* (x) exit reason and collateral */ 154 uint64_t nextrip; /* (x) next instruction to execute */ 155 struct vie *vie_ctx; /* (x) instruction emulation context */ 156 vm_client_t *vmclient; /* (a) VM-system client */ 157 uint64_t tsc_offset; /* (x) vCPU TSC offset */ 158 struct vm_mtrr mtrr; /* (i) vcpu's MTRR */ 159 vcpu_cpuid_config_t cpuid_cfg; /* (x) cpuid configuration */ 160 161 enum vcpu_ustate ustate; /* (i) microstate for the vcpu */ 162 hrtime_t ustate_when; /* (i) time of last ustate change */ 163 uint64_t ustate_total[VU_MAX]; /* (o) total time spent in ustates */ 164 vm_thread_ctx_t vtc; /* (o) thread state for ctxops */ 165 struct ctxop *ctxop; /* (o) ctxop storage for vcpu */ 166 }; 167 168 #define vcpu_lock(v) mutex_enter(&((v)->lock)) 169 #define vcpu_unlock(v) mutex_exit(&((v)->lock)) 170 #define vcpu_assert_locked(v) ASSERT(MUTEX_HELD(&((v)->lock))) 171 172 struct mem_seg { 173 size_t len; 174 bool sysmem; 175 vm_object_t *object; 176 }; 177 #define VM_MAX_MEMSEGS 5 178 179 struct mem_map { 180 vm_paddr_t gpa; 181 size_t len; 182 vm_ooffset_t segoff; 183 int segid; 184 int prot; 185 int flags; 186 }; 187 #define VM_MAX_MEMMAPS 8 188 189 /* 190 * Initialization: 191 * (o) initialized the first time the VM is created 192 * (i) initialized when VM is created and when it is reinitialized 193 * (x) initialized before use 194 */ 195 struct vm { 196 void *cookie; /* (i) cpu-specific data */ 197 void *iommu; /* (x) iommu-specific data */ 198 struct vhpet *vhpet; /* (i) virtual HPET */ 199 struct vioapic *vioapic; /* (i) virtual ioapic */ 200 struct vatpic *vatpic; /* (i) virtual atpic */ 201 struct vatpit *vatpit; /* (i) virtual atpit */ 202 struct vpmtmr *vpmtmr; /* (i) virtual ACPI PM timer */ 203 struct vrtc *vrtc; /* (o) virtual RTC */ 204 volatile cpuset_t active_cpus; /* (i) active vcpus */ 205 volatile cpuset_t debug_cpus; /* (i) vcpus stopped for dbg */ 206 int suspend; /* (i) stop VM execution */ 207 volatile cpuset_t suspended_cpus; /* (i) suspended vcpus */ 208 volatile cpuset_t halted_cpus; /* (x) cpus in a hard halt */ 209 struct mem_map mem_maps[VM_MAX_MEMMAPS]; /* (i) guest address space */ 210 struct mem_seg mem_segs[VM_MAX_MEMSEGS]; /* (o) guest memory regions */ 211 struct vmspace *vmspace; /* (o) guest's address space */ 212 struct vcpu vcpu[VM_MAXCPU]; /* (i) guest vcpus */ 213 /* The following describe the vm cpu topology */ 214 uint16_t sockets; /* (o) num of sockets */ 215 uint16_t cores; /* (o) num of cores/socket */ 216 uint16_t threads; /* (o) num of threads/core */ 217 uint16_t maxcpus; /* (o) max pluggable cpus */ 218 219 hrtime_t boot_hrtime; /* (i) hrtime at VM boot */ 220 221 /* TSC and TSC scaling related values */ 222 uint64_t tsc_offset; /* (i) VM-wide TSC offset */ 223 uint64_t guest_freq; /* (i) guest TSC Frequency */ 224 uint64_t freq_multiplier; /* (i) guest/host TSC Ratio */ 225 226 struct ioport_config ioports; /* (o) ioport handling */ 227 228 bool mem_transient; /* (o) alloc transient memory */ 229 bool is_paused; /* (i) instance is paused */ 230 }; 231 232 static int vmm_initialized; 233 static uint64_t vmm_host_freq; 234 235 236 static void 237 nullop_panic(void) 238 { 239 panic("null vmm operation call"); 240 } 241 242 /* Do not allow use of an un-set `ops` to do anything but panic */ 243 static struct vmm_ops vmm_ops_null = { 244 .init = (vmm_init_func_t)nullop_panic, 245 .cleanup = (vmm_cleanup_func_t)nullop_panic, 246 .resume = (vmm_resume_func_t)nullop_panic, 247 .vminit = (vmi_init_func_t)nullop_panic, 248 .vmrun = (vmi_run_func_t)nullop_panic, 249 .vmcleanup = (vmi_cleanup_func_t)nullop_panic, 250 .vmgetreg = (vmi_get_register_t)nullop_panic, 251 .vmsetreg = (vmi_set_register_t)nullop_panic, 252 .vmgetdesc = (vmi_get_desc_t)nullop_panic, 253 .vmsetdesc = (vmi_set_desc_t)nullop_panic, 254 .vmgetcap = (vmi_get_cap_t)nullop_panic, 255 .vmsetcap = (vmi_set_cap_t)nullop_panic, 256 .vlapic_init = (vmi_vlapic_init)nullop_panic, 257 .vlapic_cleanup = (vmi_vlapic_cleanup)nullop_panic, 258 .vmpause = (vmi_pause_t)nullop_panic, 259 .vmsavectx = (vmi_savectx)nullop_panic, 260 .vmrestorectx = (vmi_restorectx)nullop_panic, 261 .vmgetmsr = (vmi_get_msr_t)nullop_panic, 262 .vmsetmsr = (vmi_set_msr_t)nullop_panic, 263 .vmfreqratio = (vmi_freqratio_t)nullop_panic, 264 .fr_fracsize = 0, 265 .fr_intsize = 0, 266 }; 267 268 static struct vmm_ops *ops = &vmm_ops_null; 269 static vmm_pte_ops_t *pte_ops = NULL; 270 271 #define VMM_INIT() ((*ops->init)()) 272 #define VMM_CLEANUP() ((*ops->cleanup)()) 273 #define VMM_RESUME() ((*ops->resume)()) 274 275 #define VMINIT(vm) ((*ops->vminit)(vm)) 276 #define VMRUN(vmi, vcpu, rip) ((*ops->vmrun)(vmi, vcpu, rip)) 277 #define VMCLEANUP(vmi) ((*ops->vmcleanup)(vmi)) 278 279 #define VMGETREG(vmi, vcpu, num, rv) ((*ops->vmgetreg)(vmi, vcpu, num, rv)) 280 #define VMSETREG(vmi, vcpu, num, val) ((*ops->vmsetreg)(vmi, vcpu, num, val)) 281 #define VMGETDESC(vmi, vcpu, num, dsc) ((*ops->vmgetdesc)(vmi, vcpu, num, dsc)) 282 #define VMSETDESC(vmi, vcpu, num, dsc) ((*ops->vmsetdesc)(vmi, vcpu, num, dsc)) 283 #define VMGETCAP(vmi, vcpu, num, rv) ((*ops->vmgetcap)(vmi, vcpu, num, rv)) 284 #define VMSETCAP(vmi, vcpu, num, val) ((*ops->vmsetcap)(vmi, vcpu, num, val)) 285 #define VLAPIC_INIT(vmi, vcpu) ((*ops->vlapic_init)(vmi, vcpu)) 286 #define VLAPIC_CLEANUP(vmi, vlapic) ((*ops->vlapic_cleanup)(vmi, vlapic)) 287 288 #define fpu_start_emulating() load_cr0(rcr0() | CR0_TS) 289 #define fpu_stop_emulating() clts() 290 291 SDT_PROVIDER_DEFINE(vmm); 292 293 SYSCTL_NODE(_hw, OID_AUTO, vmm, CTLFLAG_RW | CTLFLAG_MPSAFE, NULL, 294 NULL); 295 296 /* 297 * Halt the guest if all vcpus are executing a HLT instruction with 298 * interrupts disabled. 299 */ 300 int halt_detection_enabled = 1; 301 302 /* Trap into hypervisor on all guest exceptions and reflect them back */ 303 int trace_guest_exceptions; 304 305 /* Trap WBINVD and ignore it */ 306 int trap_wbinvd = 1; 307 308 static void vm_free_memmap(struct vm *vm, int ident); 309 static bool sysmem_mapping(struct vm *vm, struct mem_map *mm); 310 static void vcpu_notify_event_locked(struct vcpu *vcpu, vcpu_notify_t); 311 static bool vcpu_sleep_bailout_checks(struct vm *vm, int vcpuid); 312 static int vcpu_vector_sipi(struct vm *vm, int vcpuid, uint8_t vector); 313 314 static void vmm_savectx(void *); 315 static void vmm_restorectx(void *); 316 static const struct ctxop_template vmm_ctxop_tpl = { 317 .ct_rev = CTXOP_TPL_REV, 318 .ct_save = vmm_savectx, 319 .ct_restore = vmm_restorectx, 320 }; 321 322 static uint64_t calc_tsc_offset(uint64_t base_host_tsc, uint64_t base_guest_tsc, 323 uint64_t mult); 324 static uint64_t calc_guest_tsc(uint64_t host_tsc, uint64_t mult, 325 uint64_t offset); 326 327 /* functions implemented in vmm_time_support.S */ 328 uint64_t calc_freq_multiplier(uint64_t guest_hz, uint64_t host_hz, 329 uint32_t frac_size); 330 uint64_t scale_tsc(uint64_t tsc, uint64_t multiplier, uint32_t frac_size); 331 332 #ifdef KTR 333 static const char * 334 vcpu_state2str(enum vcpu_state state) 335 { 336 337 switch (state) { 338 case VCPU_IDLE: 339 return ("idle"); 340 case VCPU_FROZEN: 341 return ("frozen"); 342 case VCPU_RUNNING: 343 return ("running"); 344 case VCPU_SLEEPING: 345 return ("sleeping"); 346 default: 347 return ("unknown"); 348 } 349 } 350 #endif 351 352 static void 353 vcpu_cleanup(struct vm *vm, int i, bool destroy) 354 { 355 struct vcpu *vcpu = &vm->vcpu[i]; 356 357 VLAPIC_CLEANUP(vm->cookie, vcpu->vlapic); 358 if (destroy) { 359 vmm_stat_free(vcpu->stats); 360 361 vcpu_cpuid_cleanup(&vcpu->cpuid_cfg); 362 363 hma_fpu_free(vcpu->guestfpu); 364 vcpu->guestfpu = NULL; 365 366 vie_free(vcpu->vie_ctx); 367 vcpu->vie_ctx = NULL; 368 369 vmc_destroy(vcpu->vmclient); 370 vcpu->vmclient = NULL; 371 372 ctxop_free(vcpu->ctxop); 373 mutex_destroy(&vcpu->lock); 374 } 375 } 376 377 static void 378 vcpu_init(struct vm *vm, int vcpu_id, bool create) 379 { 380 struct vcpu *vcpu; 381 382 KASSERT(vcpu_id >= 0 && vcpu_id < vm->maxcpus, 383 ("vcpu_init: invalid vcpu %d", vcpu_id)); 384 385 vcpu = &vm->vcpu[vcpu_id]; 386 387 if (create) { 388 mutex_init(&vcpu->lock, NULL, MUTEX_ADAPTIVE, NULL); 389 390 vcpu->state = VCPU_IDLE; 391 vcpu->hostcpu = NOCPU; 392 vcpu->lastloccpu = NOCPU; 393 vcpu->guestfpu = hma_fpu_alloc(KM_SLEEP); 394 vcpu->stats = vmm_stat_alloc(); 395 vcpu->vie_ctx = vie_alloc(); 396 vcpu_cpuid_init(&vcpu->cpuid_cfg); 397 398 vcpu->ustate = VU_INIT; 399 vcpu->ustate_when = gethrtime(); 400 401 vcpu->vtc.vtc_vm = vm; 402 vcpu->vtc.vtc_vcpuid = vcpu_id; 403 vcpu->ctxop = ctxop_allocate(&vmm_ctxop_tpl, &vcpu->vtc); 404 } else { 405 vie_reset(vcpu->vie_ctx); 406 bzero(&vcpu->exitinfo, sizeof (vcpu->exitinfo)); 407 if (vcpu->ustate != VU_INIT) { 408 vcpu_ustate_change(vm, vcpu_id, VU_INIT); 409 } 410 bzero(&vcpu->mtrr, sizeof (vcpu->mtrr)); 411 } 412 413 vcpu->run_state = VRS_HALT; 414 vcpu->vlapic = VLAPIC_INIT(vm->cookie, vcpu_id); 415 (void) vm_set_x2apic_state(vm, vcpu_id, X2APIC_DISABLED); 416 vcpu->reqidle = 0; 417 vcpu->exit_intinfo = 0; 418 vcpu->nmi_pending = false; 419 vcpu->extint_pending = false; 420 vcpu->exc_pending = 0; 421 vcpu->guest_xcr0 = XFEATURE_ENABLED_X87; 422 (void) hma_fpu_init(vcpu->guestfpu); 423 vmm_stat_init(vcpu->stats); 424 vcpu->tsc_offset = 0; 425 } 426 427 int 428 vcpu_trace_exceptions(struct vm *vm, int vcpuid) 429 { 430 return (trace_guest_exceptions); 431 } 432 433 int 434 vcpu_trap_wbinvd(struct vm *vm, int vcpuid) 435 { 436 return (trap_wbinvd); 437 } 438 439 struct vm_exit * 440 vm_exitinfo(struct vm *vm, int cpuid) 441 { 442 struct vcpu *vcpu; 443 444 if (cpuid < 0 || cpuid >= vm->maxcpus) 445 panic("vm_exitinfo: invalid cpuid %d", cpuid); 446 447 vcpu = &vm->vcpu[cpuid]; 448 449 return (&vcpu->exitinfo); 450 } 451 452 struct vie * 453 vm_vie_ctx(struct vm *vm, int cpuid) 454 { 455 if (cpuid < 0 || cpuid >= vm->maxcpus) 456 panic("vm_vie_ctx: invalid cpuid %d", cpuid); 457 458 return (vm->vcpu[cpuid].vie_ctx); 459 } 460 461 static int 462 vmm_init(void) 463 { 464 vmm_host_state_init(); 465 vmm_host_freq = unscalehrtime(NANOSEC); 466 467 if (vmm_is_intel()) { 468 ops = &vmm_ops_intel; 469 pte_ops = &ept_pte_ops; 470 } else if (vmm_is_svm()) { 471 ops = &vmm_ops_amd; 472 pte_ops = &rvi_pte_ops; 473 } else { 474 return (ENXIO); 475 } 476 477 return (VMM_INIT()); 478 } 479 480 int 481 vmm_mod_load() 482 { 483 int error; 484 485 VERIFY(vmm_initialized == 0); 486 487 error = vmm_init(); 488 if (error == 0) 489 vmm_initialized = 1; 490 491 return (error); 492 } 493 494 int 495 vmm_mod_unload() 496 { 497 int error; 498 499 VERIFY(vmm_initialized == 1); 500 501 error = VMM_CLEANUP(); 502 if (error) 503 return (error); 504 vmm_initialized = 0; 505 506 return (0); 507 } 508 509 /* 510 * Create a test IOMMU domain to see if the host system has necessary hardware 511 * and drivers to do so. 512 */ 513 bool 514 vmm_check_iommu(void) 515 { 516 void *domain; 517 const size_t arb_test_sz = (1UL << 32); 518 519 domain = iommu_create_domain(arb_test_sz); 520 if (domain == NULL) { 521 return (false); 522 } 523 iommu_destroy_domain(domain); 524 return (true); 525 } 526 527 static void 528 vm_init(struct vm *vm, bool create) 529 { 530 int i; 531 532 vm->cookie = VMINIT(vm); 533 vm->iommu = NULL; 534 vm->vioapic = vioapic_init(vm); 535 vm->vhpet = vhpet_init(vm); 536 vm->vatpic = vatpic_init(vm); 537 vm->vatpit = vatpit_init(vm); 538 vm->vpmtmr = vpmtmr_init(vm); 539 if (create) 540 vm->vrtc = vrtc_init(vm); 541 542 vm_inout_init(vm, &vm->ioports); 543 544 CPU_ZERO(&vm->active_cpus); 545 CPU_ZERO(&vm->debug_cpus); 546 547 vm->suspend = 0; 548 CPU_ZERO(&vm->suspended_cpus); 549 550 for (i = 0; i < vm->maxcpus; i++) 551 vcpu_init(vm, i, create); 552 553 /* 554 * Configure VM time-related data, including: 555 * - VM-wide TSC offset 556 * - boot_hrtime 557 * - guest_freq (same as host at boot time) 558 * - freq_multiplier (used for scaling) 559 * 560 * This data is configured such that the call to vm_init() represents 561 * the boot time (when the TSC(s) read 0). Each vCPU will have its own 562 * offset from this, which is altered if/when the guest writes to 563 * MSR_TSC. 564 * 565 * Further changes to this data may occur if userspace writes to the 566 * time data. 567 */ 568 const uint64_t boot_tsc = rdtsc_offset(); 569 570 /* Convert the boot TSC reading to hrtime */ 571 vm->boot_hrtime = (hrtime_t)boot_tsc; 572 scalehrtime(&vm->boot_hrtime); 573 574 /* Guest frequency is the same as the host at boot time */ 575 vm->guest_freq = vmm_host_freq; 576 577 /* no scaling needed if guest_freq == host_freq */ 578 vm->freq_multiplier = VM_TSCM_NOSCALE; 579 580 /* configure VM-wide offset: initial guest TSC is 0 at boot */ 581 vm->tsc_offset = calc_tsc_offset(boot_tsc, 0, vm->freq_multiplier); 582 } 583 584 /* 585 * The default CPU topology is a single thread per package. 586 */ 587 uint_t cores_per_package = 1; 588 uint_t threads_per_core = 1; 589 590 int 591 vm_create(uint64_t flags, struct vm **retvm) 592 { 593 struct vm *vm; 594 struct vmspace *vmspace; 595 596 /* 597 * If vmm.ko could not be successfully initialized then don't attempt 598 * to create the virtual machine. 599 */ 600 if (!vmm_initialized) 601 return (ENXIO); 602 603 bool track_dirty = (flags & VCF_TRACK_DIRTY) != 0; 604 if (track_dirty && !pte_ops->vpeo_hw_ad_supported()) 605 return (ENOTSUP); 606 607 vmspace = vmspace_alloc(VM_MAXUSER_ADDRESS, pte_ops, track_dirty); 608 if (vmspace == NULL) 609 return (ENOMEM); 610 611 vm = kmem_zalloc(sizeof (struct vm), KM_SLEEP); 612 613 vm->vmspace = vmspace; 614 vm->mem_transient = (flags & VCF_RESERVOIR_MEM) == 0; 615 for (uint_t i = 0; i < VM_MAXCPU; i++) { 616 vm->vcpu[i].vmclient = vmspace_client_alloc(vmspace); 617 } 618 619 vm->sockets = 1; 620 vm->cores = cores_per_package; /* XXX backwards compatibility */ 621 vm->threads = threads_per_core; /* XXX backwards compatibility */ 622 vm->maxcpus = VM_MAXCPU; /* XXX temp to keep code working */ 623 624 vm_init(vm, true); 625 626 *retvm = vm; 627 return (0); 628 } 629 630 void 631 vm_get_topology(struct vm *vm, uint16_t *sockets, uint16_t *cores, 632 uint16_t *threads, uint16_t *maxcpus) 633 { 634 *sockets = vm->sockets; 635 *cores = vm->cores; 636 *threads = vm->threads; 637 *maxcpus = vm->maxcpus; 638 } 639 640 uint16_t 641 vm_get_maxcpus(struct vm *vm) 642 { 643 return (vm->maxcpus); 644 } 645 646 int 647 vm_set_topology(struct vm *vm, uint16_t sockets, uint16_t cores, 648 uint16_t threads, uint16_t maxcpus) 649 { 650 if (maxcpus != 0) 651 return (EINVAL); /* XXX remove when supported */ 652 if ((sockets * cores * threads) > vm->maxcpus) 653 return (EINVAL); 654 /* XXX need to check sockets * cores * threads == vCPU, how? */ 655 vm->sockets = sockets; 656 vm->cores = cores; 657 vm->threads = threads; 658 vm->maxcpus = VM_MAXCPU; /* XXX temp to keep code working */ 659 return (0); 660 } 661 662 static void 663 vm_cleanup(struct vm *vm, bool destroy) 664 { 665 struct mem_map *mm; 666 int i; 667 668 ppt_unassign_all(vm); 669 670 if (vm->iommu != NULL) 671 iommu_destroy_domain(vm->iommu); 672 673 /* 674 * Devices which attach their own ioport hooks should be cleaned up 675 * first so they can tear down those registrations. 676 */ 677 vpmtmr_cleanup(vm->vpmtmr); 678 679 vm_inout_cleanup(vm, &vm->ioports); 680 681 if (destroy) 682 vrtc_cleanup(vm->vrtc); 683 else 684 vrtc_reset(vm->vrtc); 685 686 vatpit_cleanup(vm->vatpit); 687 vhpet_cleanup(vm->vhpet); 688 vatpic_cleanup(vm->vatpic); 689 vioapic_cleanup(vm->vioapic); 690 691 for (i = 0; i < vm->maxcpus; i++) 692 vcpu_cleanup(vm, i, destroy); 693 694 VMCLEANUP(vm->cookie); 695 696 /* 697 * System memory is removed from the guest address space only when 698 * the VM is destroyed. This is because the mapping remains the same 699 * across VM reset. 700 * 701 * Device memory can be relocated by the guest (e.g. using PCI BARs) 702 * so those mappings are removed on a VM reset. 703 */ 704 for (i = 0; i < VM_MAX_MEMMAPS; i++) { 705 mm = &vm->mem_maps[i]; 706 if (destroy || !sysmem_mapping(vm, mm)) { 707 vm_free_memmap(vm, i); 708 } else { 709 /* 710 * We need to reset the IOMMU flag so this mapping can 711 * be reused when a VM is rebooted. Since the IOMMU 712 * domain has already been destroyed we can just reset 713 * the flag here. 714 */ 715 mm->flags &= ~VM_MEMMAP_F_IOMMU; 716 } 717 } 718 719 if (destroy) { 720 for (i = 0; i < VM_MAX_MEMSEGS; i++) 721 vm_free_memseg(vm, i); 722 723 vmspace_destroy(vm->vmspace); 724 vm->vmspace = NULL; 725 } 726 } 727 728 void 729 vm_destroy(struct vm *vm) 730 { 731 vm_cleanup(vm, true); 732 kmem_free(vm, sizeof (*vm)); 733 } 734 735 int 736 vm_reinit(struct vm *vm, uint64_t flags) 737 { 738 /* A virtual machine can be reset only if all vcpus are suspended. */ 739 if (CPU_CMP(&vm->suspended_cpus, &vm->active_cpus) != 0) { 740 if ((flags & VM_REINIT_F_FORCE_SUSPEND) == 0) { 741 return (EBUSY); 742 } 743 744 /* 745 * Force the VM (and all its vCPUs) into a suspended state. 746 * This should be quick and easy, since the vm_reinit() call is 747 * made while holding the VM write lock, which requires holding 748 * all of the vCPUs in the VCPU_FROZEN state. 749 */ 750 (void) atomic_cmpset_int((uint_t *)&vm->suspend, 0, 751 VM_SUSPEND_RESET); 752 for (uint_t i = 0; i < vm->maxcpus; i++) { 753 struct vcpu *vcpu = &vm->vcpu[i]; 754 755 if (CPU_ISSET(i, &vm->suspended_cpus) || 756 !CPU_ISSET(i, &vm->active_cpus)) { 757 continue; 758 } 759 760 vcpu_lock(vcpu); 761 VERIFY3U(vcpu->state, ==, VCPU_FROZEN); 762 CPU_SET_ATOMIC(i, &vm->suspended_cpus); 763 vcpu_unlock(vcpu); 764 } 765 766 VERIFY0(CPU_CMP(&vm->suspended_cpus, &vm->active_cpus)); 767 } 768 769 vm_cleanup(vm, false); 770 vm_init(vm, false); 771 return (0); 772 } 773 774 bool 775 vm_is_paused(struct vm *vm) 776 { 777 return (vm->is_paused); 778 } 779 780 int 781 vm_pause_instance(struct vm *vm) 782 { 783 if (vm->is_paused) { 784 return (EALREADY); 785 } 786 vm->is_paused = true; 787 788 for (uint_t i = 0; i < vm->maxcpus; i++) { 789 struct vcpu *vcpu = &vm->vcpu[i]; 790 791 if (!CPU_ISSET(i, &vm->active_cpus)) { 792 continue; 793 } 794 vlapic_pause(vcpu->vlapic); 795 796 /* 797 * vCPU-specific pause logic includes stashing any 798 * to-be-injected events in exit_intinfo where it can be 799 * accessed in a manner generic to the backend. 800 */ 801 ops->vmpause(vm->cookie, i); 802 } 803 vhpet_pause(vm->vhpet); 804 vatpit_pause(vm->vatpit); 805 vrtc_pause(vm->vrtc); 806 807 return (0); 808 } 809 810 int 811 vm_resume_instance(struct vm *vm) 812 { 813 if (!vm->is_paused) { 814 return (EALREADY); 815 } 816 vm->is_paused = false; 817 818 vrtc_resume(vm->vrtc); 819 vatpit_resume(vm->vatpit); 820 vhpet_resume(vm->vhpet); 821 for (uint_t i = 0; i < vm->maxcpus; i++) { 822 struct vcpu *vcpu = &vm->vcpu[i]; 823 824 if (!CPU_ISSET(i, &vm->active_cpus)) { 825 continue; 826 } 827 vlapic_resume(vcpu->vlapic); 828 } 829 830 return (0); 831 } 832 833 int 834 vm_map_mmio(struct vm *vm, vm_paddr_t gpa, size_t len, vm_paddr_t hpa) 835 { 836 vm_object_t *obj; 837 838 if ((obj = vmm_mmio_alloc(vm->vmspace, gpa, len, hpa)) == NULL) 839 return (ENOMEM); 840 else 841 return (0); 842 } 843 844 int 845 vm_unmap_mmio(struct vm *vm, vm_paddr_t gpa, size_t len) 846 { 847 return (vmspace_unmap(vm->vmspace, gpa, gpa + len)); 848 } 849 850 /* 851 * Return 'true' if 'gpa' is allocated in the guest address space. 852 * 853 * This function is called in the context of a running vcpu which acts as 854 * an implicit lock on 'vm->mem_maps[]'. 855 */ 856 bool 857 vm_mem_allocated(struct vm *vm, int vcpuid, vm_paddr_t gpa) 858 { 859 struct mem_map *mm; 860 int i; 861 862 #ifdef INVARIANTS 863 int hostcpu, state; 864 state = vcpu_get_state(vm, vcpuid, &hostcpu); 865 KASSERT(state == VCPU_RUNNING && hostcpu == curcpu, 866 ("%s: invalid vcpu state %d/%d", __func__, state, hostcpu)); 867 #endif 868 869 for (i = 0; i < VM_MAX_MEMMAPS; i++) { 870 mm = &vm->mem_maps[i]; 871 if (mm->len != 0 && gpa >= mm->gpa && gpa < mm->gpa + mm->len) 872 return (true); /* 'gpa' is sysmem or devmem */ 873 } 874 875 if (ppt_is_mmio(vm, gpa)) 876 return (true); /* 'gpa' is pci passthru mmio */ 877 878 return (false); 879 } 880 881 int 882 vm_alloc_memseg(struct vm *vm, int ident, size_t len, bool sysmem) 883 { 884 struct mem_seg *seg; 885 vm_object_t *obj; 886 887 if (ident < 0 || ident >= VM_MAX_MEMSEGS) 888 return (EINVAL); 889 890 if (len == 0 || (len & PAGE_MASK)) 891 return (EINVAL); 892 893 seg = &vm->mem_segs[ident]; 894 if (seg->object != NULL) { 895 if (seg->len == len && seg->sysmem == sysmem) 896 return (EEXIST); 897 else 898 return (EINVAL); 899 } 900 901 obj = vm_object_mem_allocate(len, vm->mem_transient); 902 if (obj == NULL) 903 return (ENOMEM); 904 905 seg->len = len; 906 seg->object = obj; 907 seg->sysmem = sysmem; 908 return (0); 909 } 910 911 int 912 vm_get_memseg(struct vm *vm, int ident, size_t *len, bool *sysmem, 913 vm_object_t **objptr) 914 { 915 struct mem_seg *seg; 916 917 if (ident < 0 || ident >= VM_MAX_MEMSEGS) 918 return (EINVAL); 919 920 seg = &vm->mem_segs[ident]; 921 if (len) 922 *len = seg->len; 923 if (sysmem) 924 *sysmem = seg->sysmem; 925 if (objptr) 926 *objptr = seg->object; 927 return (0); 928 } 929 930 void 931 vm_free_memseg(struct vm *vm, int ident) 932 { 933 struct mem_seg *seg; 934 935 KASSERT(ident >= 0 && ident < VM_MAX_MEMSEGS, 936 ("%s: invalid memseg ident %d", __func__, ident)); 937 938 seg = &vm->mem_segs[ident]; 939 if (seg->object != NULL) { 940 vm_object_release(seg->object); 941 bzero(seg, sizeof (struct mem_seg)); 942 } 943 } 944 945 int 946 vm_mmap_memseg(struct vm *vm, vm_paddr_t gpa, int segid, vm_ooffset_t first, 947 size_t len, int prot, int flags) 948 { 949 struct mem_seg *seg; 950 struct mem_map *m, *map; 951 vm_ooffset_t last; 952 int i, error; 953 954 if (prot == 0 || (prot & ~(PROT_ALL)) != 0) 955 return (EINVAL); 956 957 if (flags & ~VM_MEMMAP_F_WIRED) 958 return (EINVAL); 959 960 if (segid < 0 || segid >= VM_MAX_MEMSEGS) 961 return (EINVAL); 962 963 seg = &vm->mem_segs[segid]; 964 if (seg->object == NULL) 965 return (EINVAL); 966 967 last = first + len; 968 if (first < 0 || first >= last || last > seg->len) 969 return (EINVAL); 970 971 if ((gpa | first | last) & PAGE_MASK) 972 return (EINVAL); 973 974 map = NULL; 975 for (i = 0; i < VM_MAX_MEMMAPS; i++) { 976 m = &vm->mem_maps[i]; 977 if (m->len == 0) { 978 map = m; 979 break; 980 } 981 } 982 983 if (map == NULL) 984 return (ENOSPC); 985 986 error = vmspace_map(vm->vmspace, seg->object, first, gpa, len, prot); 987 if (error != 0) 988 return (EFAULT); 989 990 vm_object_reference(seg->object); 991 992 if ((flags & VM_MEMMAP_F_WIRED) != 0) { 993 error = vmspace_populate(vm->vmspace, gpa, gpa + len); 994 if (error != 0) { 995 VERIFY0(vmspace_unmap(vm->vmspace, gpa, gpa + len)); 996 return (EFAULT); 997 } 998 } 999 1000 map->gpa = gpa; 1001 map->len = len; 1002 map->segoff = first; 1003 map->segid = segid; 1004 map->prot = prot; 1005 map->flags = flags; 1006 return (0); 1007 } 1008 1009 int 1010 vm_munmap_memseg(struct vm *vm, vm_paddr_t gpa, size_t len) 1011 { 1012 struct mem_map *m; 1013 int i; 1014 1015 for (i = 0; i < VM_MAX_MEMMAPS; i++) { 1016 m = &vm->mem_maps[i]; 1017 if (m->gpa == gpa && m->len == len && 1018 (m->flags & VM_MEMMAP_F_IOMMU) == 0) { 1019 vm_free_memmap(vm, i); 1020 return (0); 1021 } 1022 } 1023 1024 return (EINVAL); 1025 } 1026 1027 int 1028 vm_mmap_getnext(struct vm *vm, vm_paddr_t *gpa, int *segid, 1029 vm_ooffset_t *segoff, size_t *len, int *prot, int *flags) 1030 { 1031 struct mem_map *mm, *mmnext; 1032 int i; 1033 1034 mmnext = NULL; 1035 for (i = 0; i < VM_MAX_MEMMAPS; i++) { 1036 mm = &vm->mem_maps[i]; 1037 if (mm->len == 0 || mm->gpa < *gpa) 1038 continue; 1039 if (mmnext == NULL || mm->gpa < mmnext->gpa) 1040 mmnext = mm; 1041 } 1042 1043 if (mmnext != NULL) { 1044 *gpa = mmnext->gpa; 1045 if (segid) 1046 *segid = mmnext->segid; 1047 if (segoff) 1048 *segoff = mmnext->segoff; 1049 if (len) 1050 *len = mmnext->len; 1051 if (prot) 1052 *prot = mmnext->prot; 1053 if (flags) 1054 *flags = mmnext->flags; 1055 return (0); 1056 } else { 1057 return (ENOENT); 1058 } 1059 } 1060 1061 static void 1062 vm_free_memmap(struct vm *vm, int ident) 1063 { 1064 struct mem_map *mm; 1065 int error; 1066 1067 mm = &vm->mem_maps[ident]; 1068 if (mm->len) { 1069 error = vmspace_unmap(vm->vmspace, mm->gpa, 1070 mm->gpa + mm->len); 1071 KASSERT(error == 0, ("%s: vmspace_unmap error %d", 1072 __func__, error)); 1073 bzero(mm, sizeof (struct mem_map)); 1074 } 1075 } 1076 1077 static __inline bool 1078 sysmem_mapping(struct vm *vm, struct mem_map *mm) 1079 { 1080 1081 if (mm->len != 0 && vm->mem_segs[mm->segid].sysmem) 1082 return (true); 1083 else 1084 return (false); 1085 } 1086 1087 vm_paddr_t 1088 vmm_sysmem_maxaddr(struct vm *vm) 1089 { 1090 struct mem_map *mm; 1091 vm_paddr_t maxaddr; 1092 int i; 1093 1094 maxaddr = 0; 1095 for (i = 0; i < VM_MAX_MEMMAPS; i++) { 1096 mm = &vm->mem_maps[i]; 1097 if (sysmem_mapping(vm, mm)) { 1098 if (maxaddr < mm->gpa + mm->len) 1099 maxaddr = mm->gpa + mm->len; 1100 } 1101 } 1102 return (maxaddr); 1103 } 1104 1105 static void 1106 vm_iommu_modify(struct vm *vm, bool map) 1107 { 1108 int i, sz; 1109 vm_paddr_t gpa, hpa; 1110 struct mem_map *mm; 1111 vm_client_t *vmc; 1112 1113 sz = PAGE_SIZE; 1114 vmc = vmspace_client_alloc(vm->vmspace); 1115 1116 for (i = 0; i < VM_MAX_MEMMAPS; i++) { 1117 mm = &vm->mem_maps[i]; 1118 if (!sysmem_mapping(vm, mm)) 1119 continue; 1120 1121 if (map) { 1122 KASSERT((mm->flags & VM_MEMMAP_F_IOMMU) == 0, 1123 ("iommu map found invalid memmap %lx/%lx/%x", 1124 mm->gpa, mm->len, mm->flags)); 1125 if ((mm->flags & VM_MEMMAP_F_WIRED) == 0) 1126 continue; 1127 mm->flags |= VM_MEMMAP_F_IOMMU; 1128 } else { 1129 if ((mm->flags & VM_MEMMAP_F_IOMMU) == 0) 1130 continue; 1131 mm->flags &= ~VM_MEMMAP_F_IOMMU; 1132 KASSERT((mm->flags & VM_MEMMAP_F_WIRED) != 0, 1133 ("iommu unmap found invalid memmap %lx/%lx/%x", 1134 mm->gpa, mm->len, mm->flags)); 1135 } 1136 1137 gpa = mm->gpa; 1138 while (gpa < mm->gpa + mm->len) { 1139 vm_page_t *vmp; 1140 1141 vmp = vmc_hold(vmc, gpa, PROT_WRITE); 1142 ASSERT(vmp != NULL); 1143 hpa = ((uintptr_t)vmp_get_pfn(vmp) << PAGESHIFT); 1144 (void) vmp_release(vmp); 1145 1146 /* 1147 * When originally ported from FreeBSD, the logic for 1148 * adding memory to the guest domain would 1149 * simultaneously remove it from the host domain. The 1150 * justification for that is not clear, and FreeBSD has 1151 * subsequently changed the behavior to not remove the 1152 * memory from the host domain. 1153 * 1154 * Leaving the guest memory in the host domain for the 1155 * life of the VM is necessary to make it available for 1156 * DMA, such as through viona in the TX path. 1157 */ 1158 if (map) { 1159 iommu_create_mapping(vm->iommu, gpa, hpa, sz); 1160 } else { 1161 iommu_remove_mapping(vm->iommu, gpa, sz); 1162 } 1163 1164 gpa += PAGE_SIZE; 1165 } 1166 } 1167 vmc_destroy(vmc); 1168 1169 /* 1170 * Invalidate the cached translations associated with the domain 1171 * from which pages were removed. 1172 */ 1173 iommu_invalidate_tlb(vm->iommu); 1174 } 1175 1176 int 1177 vm_unassign_pptdev(struct vm *vm, int pptfd) 1178 { 1179 int error; 1180 1181 error = ppt_unassign_device(vm, pptfd); 1182 if (error) 1183 return (error); 1184 1185 if (ppt_assigned_devices(vm) == 0) 1186 vm_iommu_modify(vm, false); 1187 1188 return (0); 1189 } 1190 1191 int 1192 vm_assign_pptdev(struct vm *vm, int pptfd) 1193 { 1194 int error; 1195 vm_paddr_t maxaddr; 1196 1197 /* Set up the IOMMU to do the 'gpa' to 'hpa' translation */ 1198 if (ppt_assigned_devices(vm) == 0) { 1199 KASSERT(vm->iommu == NULL, 1200 ("vm_assign_pptdev: iommu must be NULL")); 1201 maxaddr = vmm_sysmem_maxaddr(vm); 1202 vm->iommu = iommu_create_domain(maxaddr); 1203 if (vm->iommu == NULL) 1204 return (ENXIO); 1205 vm_iommu_modify(vm, true); 1206 } 1207 1208 error = ppt_assign_device(vm, pptfd); 1209 return (error); 1210 } 1211 1212 int 1213 vm_get_register(struct vm *vm, int vcpuid, int reg, uint64_t *retval) 1214 { 1215 if (vcpuid < 0 || vcpuid >= vm->maxcpus) 1216 return (EINVAL); 1217 1218 if (reg >= VM_REG_LAST) 1219 return (EINVAL); 1220 1221 struct vcpu *vcpu = &vm->vcpu[vcpuid]; 1222 switch (reg) { 1223 case VM_REG_GUEST_XCR0: 1224 *retval = vcpu->guest_xcr0; 1225 return (0); 1226 default: 1227 return (VMGETREG(vm->cookie, vcpuid, reg, retval)); 1228 } 1229 } 1230 1231 int 1232 vm_set_register(struct vm *vm, int vcpuid, int reg, uint64_t val) 1233 { 1234 if (vcpuid < 0 || vcpuid >= vm->maxcpus) 1235 return (EINVAL); 1236 1237 if (reg >= VM_REG_LAST) 1238 return (EINVAL); 1239 1240 int error; 1241 struct vcpu *vcpu = &vm->vcpu[vcpuid]; 1242 switch (reg) { 1243 case VM_REG_GUEST_RIP: 1244 error = VMSETREG(vm->cookie, vcpuid, reg, val); 1245 if (error == 0) { 1246 vcpu->nextrip = val; 1247 } 1248 return (error); 1249 case VM_REG_GUEST_XCR0: 1250 if (!validate_guest_xcr0(val, vmm_get_host_xcr0())) { 1251 return (EINVAL); 1252 } 1253 vcpu->guest_xcr0 = val; 1254 return (0); 1255 default: 1256 return (VMSETREG(vm->cookie, vcpuid, reg, val)); 1257 } 1258 } 1259 1260 static bool 1261 is_descriptor_table(int reg) 1262 { 1263 switch (reg) { 1264 case VM_REG_GUEST_IDTR: 1265 case VM_REG_GUEST_GDTR: 1266 return (true); 1267 default: 1268 return (false); 1269 } 1270 } 1271 1272 static bool 1273 is_segment_register(int reg) 1274 { 1275 switch (reg) { 1276 case VM_REG_GUEST_ES: 1277 case VM_REG_GUEST_CS: 1278 case VM_REG_GUEST_SS: 1279 case VM_REG_GUEST_DS: 1280 case VM_REG_GUEST_FS: 1281 case VM_REG_GUEST_GS: 1282 case VM_REG_GUEST_TR: 1283 case VM_REG_GUEST_LDTR: 1284 return (true); 1285 default: 1286 return (false); 1287 } 1288 } 1289 1290 int 1291 vm_get_seg_desc(struct vm *vm, int vcpu, int reg, struct seg_desc *desc) 1292 { 1293 1294 if (vcpu < 0 || vcpu >= vm->maxcpus) 1295 return (EINVAL); 1296 1297 if (!is_segment_register(reg) && !is_descriptor_table(reg)) 1298 return (EINVAL); 1299 1300 return (VMGETDESC(vm->cookie, vcpu, reg, desc)); 1301 } 1302 1303 int 1304 vm_set_seg_desc(struct vm *vm, int vcpu, int reg, const struct seg_desc *desc) 1305 { 1306 if (vcpu < 0 || vcpu >= vm->maxcpus) 1307 return (EINVAL); 1308 1309 if (!is_segment_register(reg) && !is_descriptor_table(reg)) 1310 return (EINVAL); 1311 1312 return (VMSETDESC(vm->cookie, vcpu, reg, desc)); 1313 } 1314 1315 static int 1316 translate_hma_xsave_result(hma_fpu_xsave_result_t res) 1317 { 1318 switch (res) { 1319 case HFXR_OK: 1320 return (0); 1321 case HFXR_NO_SPACE: 1322 return (ENOSPC); 1323 case HFXR_BAD_ALIGN: 1324 case HFXR_UNSUP_FMT: 1325 case HFXR_UNSUP_FEAT: 1326 case HFXR_INVALID_DATA: 1327 return (EINVAL); 1328 default: 1329 panic("unexpected xsave result"); 1330 } 1331 } 1332 1333 int 1334 vm_get_fpu(struct vm *vm, int vcpuid, void *buf, size_t len) 1335 { 1336 if (vcpuid < 0 || vcpuid >= vm->maxcpus) 1337 return (EINVAL); 1338 1339 struct vcpu *vcpu = &vm->vcpu[vcpuid]; 1340 hma_fpu_xsave_result_t res; 1341 1342 res = hma_fpu_get_xsave_state(vcpu->guestfpu, buf, len); 1343 return (translate_hma_xsave_result(res)); 1344 } 1345 1346 int 1347 vm_set_fpu(struct vm *vm, int vcpuid, void *buf, size_t len) 1348 { 1349 if (vcpuid < 0 || vcpuid >= vm->maxcpus) 1350 return (EINVAL); 1351 1352 struct vcpu *vcpu = &vm->vcpu[vcpuid]; 1353 hma_fpu_xsave_result_t res; 1354 1355 res = hma_fpu_set_xsave_state(vcpu->guestfpu, buf, len); 1356 return (translate_hma_xsave_result(res)); 1357 } 1358 1359 int 1360 vm_get_run_state(struct vm *vm, int vcpuid, uint32_t *state, uint8_t *sipi_vec) 1361 { 1362 struct vcpu *vcpu; 1363 1364 if (vcpuid < 0 || vcpuid >= vm->maxcpus) { 1365 return (EINVAL); 1366 } 1367 1368 vcpu = &vm->vcpu[vcpuid]; 1369 1370 vcpu_lock(vcpu); 1371 *state = vcpu->run_state; 1372 *sipi_vec = vcpu->sipi_vector; 1373 vcpu_unlock(vcpu); 1374 1375 return (0); 1376 } 1377 1378 int 1379 vm_set_run_state(struct vm *vm, int vcpuid, uint32_t state, uint8_t sipi_vec) 1380 { 1381 struct vcpu *vcpu; 1382 1383 if (vcpuid < 0 || vcpuid >= vm->maxcpus) { 1384 return (EINVAL); 1385 } 1386 if (!VRS_IS_VALID(state)) { 1387 return (EINVAL); 1388 } 1389 1390 vcpu = &vm->vcpu[vcpuid]; 1391 1392 vcpu_lock(vcpu); 1393 vcpu->run_state = state; 1394 vcpu->sipi_vector = sipi_vec; 1395 vcpu_notify_event_locked(vcpu, VCPU_NOTIFY_EXIT); 1396 vcpu_unlock(vcpu); 1397 1398 return (0); 1399 } 1400 1401 int 1402 vm_track_dirty_pages(struct vm *vm, uint64_t gpa, size_t len, uint8_t *bitmap) 1403 { 1404 vmspace_t *vms = vm_get_vmspace(vm); 1405 return (vmspace_track_dirty(vms, gpa, len, bitmap)); 1406 } 1407 1408 static void 1409 restore_guest_fpustate(struct vcpu *vcpu) 1410 { 1411 /* Save host FPU and restore guest FPU */ 1412 fpu_stop_emulating(); 1413 hma_fpu_start_guest(vcpu->guestfpu); 1414 1415 /* restore guest XCR0 if XSAVE is enabled in the host */ 1416 if (rcr4() & CR4_XSAVE) 1417 load_xcr(0, vcpu->guest_xcr0); 1418 1419 /* 1420 * The FPU is now "dirty" with the guest's state so turn on emulation 1421 * to trap any access to the FPU by the host. 1422 */ 1423 fpu_start_emulating(); 1424 } 1425 1426 static void 1427 save_guest_fpustate(struct vcpu *vcpu) 1428 { 1429 1430 if ((rcr0() & CR0_TS) == 0) 1431 panic("fpu emulation not enabled in host!"); 1432 1433 /* save guest XCR0 and restore host XCR0 */ 1434 if (rcr4() & CR4_XSAVE) { 1435 vcpu->guest_xcr0 = rxcr(0); 1436 load_xcr(0, vmm_get_host_xcr0()); 1437 } 1438 1439 /* save guest FPU and restore host FPU */ 1440 fpu_stop_emulating(); 1441 hma_fpu_stop_guest(vcpu->guestfpu); 1442 /* 1443 * When the host state has been restored, we should not re-enable 1444 * CR0.TS on illumos for eager FPU. 1445 */ 1446 } 1447 1448 static int 1449 vcpu_set_state_locked(struct vm *vm, int vcpuid, enum vcpu_state newstate, 1450 bool from_idle) 1451 { 1452 struct vcpu *vcpu; 1453 int error; 1454 1455 vcpu = &vm->vcpu[vcpuid]; 1456 vcpu_assert_locked(vcpu); 1457 1458 /* 1459 * State transitions from the vmmdev_ioctl() must always begin from 1460 * the VCPU_IDLE state. This guarantees that there is only a single 1461 * ioctl() operating on a vcpu at any point. 1462 */ 1463 if (from_idle) { 1464 while (vcpu->state != VCPU_IDLE) { 1465 vcpu->reqidle = 1; 1466 vcpu_notify_event_locked(vcpu, VCPU_NOTIFY_EXIT); 1467 cv_wait(&vcpu->state_cv, &vcpu->lock); 1468 } 1469 } else { 1470 KASSERT(vcpu->state != VCPU_IDLE, ("invalid transition from " 1471 "vcpu idle state")); 1472 } 1473 1474 if (vcpu->state == VCPU_RUNNING) { 1475 KASSERT(vcpu->hostcpu == curcpu, ("curcpu %d and hostcpu %d " 1476 "mismatch for running vcpu", curcpu, vcpu->hostcpu)); 1477 } else { 1478 KASSERT(vcpu->hostcpu == NOCPU, ("Invalid hostcpu %d for a " 1479 "vcpu that is not running", vcpu->hostcpu)); 1480 } 1481 1482 /* 1483 * The following state transitions are allowed: 1484 * IDLE -> FROZEN -> IDLE 1485 * FROZEN -> RUNNING -> FROZEN 1486 * FROZEN -> SLEEPING -> FROZEN 1487 */ 1488 switch (vcpu->state) { 1489 case VCPU_IDLE: 1490 case VCPU_RUNNING: 1491 case VCPU_SLEEPING: 1492 error = (newstate != VCPU_FROZEN); 1493 break; 1494 case VCPU_FROZEN: 1495 error = (newstate == VCPU_FROZEN); 1496 break; 1497 default: 1498 error = 1; 1499 break; 1500 } 1501 1502 if (error) 1503 return (EBUSY); 1504 1505 vcpu->state = newstate; 1506 if (newstate == VCPU_RUNNING) 1507 vcpu->hostcpu = curcpu; 1508 else 1509 vcpu->hostcpu = NOCPU; 1510 1511 if (newstate == VCPU_IDLE) { 1512 cv_broadcast(&vcpu->state_cv); 1513 } 1514 1515 return (0); 1516 } 1517 1518 static void 1519 vcpu_require_state(struct vm *vm, int vcpuid, enum vcpu_state newstate) 1520 { 1521 int error; 1522 1523 if ((error = vcpu_set_state(vm, vcpuid, newstate, false)) != 0) 1524 panic("Error %d setting state to %d\n", error, newstate); 1525 } 1526 1527 static void 1528 vcpu_require_state_locked(struct vm *vm, int vcpuid, enum vcpu_state newstate) 1529 { 1530 int error; 1531 1532 if ((error = vcpu_set_state_locked(vm, vcpuid, newstate, false)) != 0) 1533 panic("Error %d setting state to %d", error, newstate); 1534 } 1535 1536 /* 1537 * Emulate a guest 'hlt' by sleeping until the vcpu is ready to run. 1538 */ 1539 static int 1540 vm_handle_hlt(struct vm *vm, int vcpuid, bool intr_disabled) 1541 { 1542 struct vcpu *vcpu; 1543 int vcpu_halted, vm_halted; 1544 bool userspace_exit = false; 1545 1546 KASSERT(!CPU_ISSET(vcpuid, &vm->halted_cpus), ("vcpu already halted")); 1547 1548 vcpu = &vm->vcpu[vcpuid]; 1549 vcpu_halted = 0; 1550 vm_halted = 0; 1551 1552 vcpu_lock(vcpu); 1553 while (1) { 1554 /* 1555 * Do a final check for pending interrupts (including NMI and 1556 * INIT) before putting this thread to sleep. 1557 */ 1558 if (vm_nmi_pending(vm, vcpuid)) 1559 break; 1560 if (vcpu_run_state_pending(vm, vcpuid)) 1561 break; 1562 if (!intr_disabled) { 1563 if (vm_extint_pending(vm, vcpuid) || 1564 vlapic_pending_intr(vcpu->vlapic, NULL)) { 1565 break; 1566 } 1567 } 1568 1569 /* 1570 * Also check for software events which would cause a wake-up. 1571 * This will set the appropriate exitcode directly, rather than 1572 * requiring a trip through VM_RUN(). 1573 */ 1574 if (vcpu_sleep_bailout_checks(vm, vcpuid)) { 1575 userspace_exit = true; 1576 break; 1577 } 1578 1579 /* 1580 * Some Linux guests implement "halt" by having all vcpus 1581 * execute HLT with interrupts disabled. 'halted_cpus' keeps 1582 * track of the vcpus that have entered this state. When all 1583 * vcpus enter the halted state the virtual machine is halted. 1584 */ 1585 if (intr_disabled) { 1586 if (!vcpu_halted && halt_detection_enabled) { 1587 vcpu_halted = 1; 1588 CPU_SET_ATOMIC(vcpuid, &vm->halted_cpus); 1589 } 1590 if (CPU_CMP(&vm->halted_cpus, &vm->active_cpus) == 0) { 1591 vm_halted = 1; 1592 break; 1593 } 1594 } 1595 1596 vcpu_ustate_change(vm, vcpuid, VU_IDLE); 1597 vcpu_require_state_locked(vm, vcpuid, VCPU_SLEEPING); 1598 (void) cv_wait_sig(&vcpu->vcpu_cv, &vcpu->lock); 1599 vcpu_require_state_locked(vm, vcpuid, VCPU_FROZEN); 1600 vcpu_ustate_change(vm, vcpuid, VU_EMU_KERN); 1601 } 1602 1603 if (vcpu_halted) 1604 CPU_CLR_ATOMIC(vcpuid, &vm->halted_cpus); 1605 1606 vcpu_unlock(vcpu); 1607 1608 if (vm_halted) { 1609 (void) vm_suspend(vm, VM_SUSPEND_HALT); 1610 } 1611 1612 return (userspace_exit ? -1 : 0); 1613 } 1614 1615 static int 1616 vm_handle_paging(struct vm *vm, int vcpuid) 1617 { 1618 struct vcpu *vcpu = &vm->vcpu[vcpuid]; 1619 vm_client_t *vmc = vcpu->vmclient; 1620 struct vm_exit *vme = &vcpu->exitinfo; 1621 const int ftype = vme->u.paging.fault_type; 1622 1623 ASSERT0(vme->inst_length); 1624 ASSERT(ftype == PROT_READ || ftype == PROT_WRITE || ftype == PROT_EXEC); 1625 1626 if (vmc_fault(vmc, vme->u.paging.gpa, ftype) != 0) { 1627 /* 1628 * If the fault cannot be serviced, kick it out to userspace for 1629 * handling (or more likely, halting the instance). 1630 */ 1631 return (-1); 1632 } 1633 1634 return (0); 1635 } 1636 1637 int 1638 vm_service_mmio_read(struct vm *vm, int cpuid, uint64_t gpa, uint64_t *rval, 1639 int rsize) 1640 { 1641 int err = ESRCH; 1642 1643 if (gpa >= DEFAULT_APIC_BASE && gpa < DEFAULT_APIC_BASE + PAGE_SIZE) { 1644 struct vlapic *vlapic = vm_lapic(vm, cpuid); 1645 1646 err = vlapic_mmio_read(vlapic, gpa, rval, rsize); 1647 } else if (gpa >= VIOAPIC_BASE && gpa < VIOAPIC_BASE + VIOAPIC_SIZE) { 1648 err = vioapic_mmio_read(vm, cpuid, gpa, rval, rsize); 1649 } else if (gpa >= VHPET_BASE && gpa < VHPET_BASE + VHPET_SIZE) { 1650 err = vhpet_mmio_read(vm, cpuid, gpa, rval, rsize); 1651 } 1652 1653 return (err); 1654 } 1655 1656 int 1657 vm_service_mmio_write(struct vm *vm, int cpuid, uint64_t gpa, uint64_t wval, 1658 int wsize) 1659 { 1660 int err = ESRCH; 1661 1662 if (gpa >= DEFAULT_APIC_BASE && gpa < DEFAULT_APIC_BASE + PAGE_SIZE) { 1663 struct vlapic *vlapic = vm_lapic(vm, cpuid); 1664 1665 err = vlapic_mmio_write(vlapic, gpa, wval, wsize); 1666 } else if (gpa >= VIOAPIC_BASE && gpa < VIOAPIC_BASE + VIOAPIC_SIZE) { 1667 err = vioapic_mmio_write(vm, cpuid, gpa, wval, wsize); 1668 } else if (gpa >= VHPET_BASE && gpa < VHPET_BASE + VHPET_SIZE) { 1669 err = vhpet_mmio_write(vm, cpuid, gpa, wval, wsize); 1670 } 1671 1672 return (err); 1673 } 1674 1675 static int 1676 vm_handle_mmio_emul(struct vm *vm, int vcpuid) 1677 { 1678 struct vie *vie; 1679 struct vcpu *vcpu; 1680 struct vm_exit *vme; 1681 uint64_t inst_addr; 1682 int error, fault, cs_d; 1683 1684 vcpu = &vm->vcpu[vcpuid]; 1685 vme = &vcpu->exitinfo; 1686 vie = vcpu->vie_ctx; 1687 1688 KASSERT(vme->inst_length == 0, ("%s: invalid inst_length %d", 1689 __func__, vme->inst_length)); 1690 1691 inst_addr = vme->rip + vme->u.mmio_emul.cs_base; 1692 cs_d = vme->u.mmio_emul.cs_d; 1693 1694 /* Fetch the faulting instruction */ 1695 if (vie_needs_fetch(vie)) { 1696 error = vie_fetch_instruction(vie, vm, vcpuid, inst_addr, 1697 &fault); 1698 if (error != 0) { 1699 return (error); 1700 } else if (fault) { 1701 /* 1702 * If a fault during instruction fetch was encountered, 1703 * it will have asserted that the appropriate exception 1704 * be injected at next entry. 1705 * No further work is required. 1706 */ 1707 return (0); 1708 } 1709 } 1710 1711 if (vie_decode_instruction(vie, vm, vcpuid, cs_d) != 0) { 1712 /* Dump (unrecognized) instruction bytes in userspace */ 1713 vie_fallback_exitinfo(vie, vme); 1714 return (-1); 1715 } 1716 if (vme->u.mmio_emul.gla != VIE_INVALID_GLA && 1717 vie_verify_gla(vie, vm, vcpuid, vme->u.mmio_emul.gla) != 0) { 1718 /* Decoded GLA does not match GLA from VM exit state */ 1719 vie_fallback_exitinfo(vie, vme); 1720 return (-1); 1721 } 1722 1723 repeat: 1724 error = vie_emulate_mmio(vie, vm, vcpuid); 1725 if (error < 0) { 1726 /* 1727 * MMIO not handled by any of the in-kernel-emulated devices, so 1728 * make a trip out to userspace for it. 1729 */ 1730 vie_exitinfo(vie, vme); 1731 } else if (error == EAGAIN) { 1732 /* 1733 * Continue emulating the rep-prefixed instruction, which has 1734 * not completed its iterations. 1735 * 1736 * In case this can be emulated in-kernel and has a high 1737 * repetition count (causing a tight spin), it should be 1738 * deferential to yield conditions. 1739 */ 1740 if (!vcpu_should_yield(vm, vcpuid)) { 1741 goto repeat; 1742 } else { 1743 /* 1744 * Defer to the contending load by making a trip to 1745 * userspace with a no-op (BOGUS) exit reason. 1746 */ 1747 vie_reset(vie); 1748 vme->exitcode = VM_EXITCODE_BOGUS; 1749 return (-1); 1750 } 1751 } else if (error == 0) { 1752 /* Update %rip now that instruction has been emulated */ 1753 vie_advance_pc(vie, &vcpu->nextrip); 1754 } 1755 return (error); 1756 } 1757 1758 static int 1759 vm_handle_inout(struct vm *vm, int vcpuid, struct vm_exit *vme) 1760 { 1761 struct vcpu *vcpu; 1762 struct vie *vie; 1763 int err; 1764 1765 vcpu = &vm->vcpu[vcpuid]; 1766 vie = vcpu->vie_ctx; 1767 1768 repeat: 1769 err = vie_emulate_inout(vie, vm, vcpuid); 1770 1771 if (err < 0) { 1772 /* 1773 * In/out not handled by any of the in-kernel-emulated devices, 1774 * so make a trip out to userspace for it. 1775 */ 1776 vie_exitinfo(vie, vme); 1777 return (err); 1778 } else if (err == EAGAIN) { 1779 /* 1780 * Continue emulating the rep-prefixed ins/outs, which has not 1781 * completed its iterations. 1782 * 1783 * In case this can be emulated in-kernel and has a high 1784 * repetition count (causing a tight spin), it should be 1785 * deferential to yield conditions. 1786 */ 1787 if (!vcpu_should_yield(vm, vcpuid)) { 1788 goto repeat; 1789 } else { 1790 /* 1791 * Defer to the contending load by making a trip to 1792 * userspace with a no-op (BOGUS) exit reason. 1793 */ 1794 vie_reset(vie); 1795 vme->exitcode = VM_EXITCODE_BOGUS; 1796 return (-1); 1797 } 1798 } else if (err != 0) { 1799 /* Emulation failure. Bail all the way out to userspace. */ 1800 vme->exitcode = VM_EXITCODE_INST_EMUL; 1801 bzero(&vme->u.inst_emul, sizeof (vme->u.inst_emul)); 1802 return (-1); 1803 } 1804 1805 vie_advance_pc(vie, &vcpu->nextrip); 1806 return (0); 1807 } 1808 1809 static int 1810 vm_handle_inst_emul(struct vm *vm, int vcpuid) 1811 { 1812 struct vie *vie; 1813 struct vcpu *vcpu; 1814 struct vm_exit *vme; 1815 uint64_t cs_base; 1816 int error, fault, cs_d; 1817 1818 vcpu = &vm->vcpu[vcpuid]; 1819 vme = &vcpu->exitinfo; 1820 vie = vcpu->vie_ctx; 1821 1822 vie_cs_info(vie, vm, vcpuid, &cs_base, &cs_d); 1823 1824 /* Fetch the faulting instruction */ 1825 ASSERT(vie_needs_fetch(vie)); 1826 error = vie_fetch_instruction(vie, vm, vcpuid, vme->rip + cs_base, 1827 &fault); 1828 if (error != 0) { 1829 return (error); 1830 } else if (fault) { 1831 /* 1832 * If a fault during instruction fetch was encounted, it will 1833 * have asserted that the appropriate exception be injected at 1834 * next entry. No further work is required. 1835 */ 1836 return (0); 1837 } 1838 1839 if (vie_decode_instruction(vie, vm, vcpuid, cs_d) != 0) { 1840 /* Dump (unrecognized) instruction bytes in userspace */ 1841 vie_fallback_exitinfo(vie, vme); 1842 return (-1); 1843 } 1844 1845 error = vie_emulate_other(vie, vm, vcpuid); 1846 if (error != 0) { 1847 /* 1848 * Instruction emulation was unable to complete successfully, so 1849 * kick it out to userspace for handling. 1850 */ 1851 vie_fallback_exitinfo(vie, vme); 1852 } else { 1853 /* Update %rip now that instruction has been emulated */ 1854 vie_advance_pc(vie, &vcpu->nextrip); 1855 } 1856 return (error); 1857 } 1858 1859 static int 1860 vm_handle_suspend(struct vm *vm, int vcpuid) 1861 { 1862 int i; 1863 struct vcpu *vcpu; 1864 1865 vcpu = &vm->vcpu[vcpuid]; 1866 1867 CPU_SET_ATOMIC(vcpuid, &vm->suspended_cpus); 1868 1869 /* 1870 * Wait until all 'active_cpus' have suspended themselves. 1871 */ 1872 vcpu_lock(vcpu); 1873 vcpu_ustate_change(vm, vcpuid, VU_INIT); 1874 while (1) { 1875 int rc; 1876 1877 if (CPU_CMP(&vm->suspended_cpus, &vm->active_cpus) == 0) { 1878 break; 1879 } 1880 1881 vcpu_require_state_locked(vm, vcpuid, VCPU_SLEEPING); 1882 rc = cv_reltimedwait_sig(&vcpu->vcpu_cv, &vcpu->lock, hz, 1883 TR_CLOCK_TICK); 1884 vcpu_require_state_locked(vm, vcpuid, VCPU_FROZEN); 1885 1886 /* 1887 * If the userspace process driving the instance is killed, any 1888 * vCPUs yet to be marked suspended (because they are not 1889 * VM_RUN-ing in the kernel presently) will never reach that 1890 * state. 1891 * 1892 * To avoid vm_handle_suspend() getting stuck in the kernel 1893 * waiting for those vCPUs, offer a bail-out even though it 1894 * means returning without all vCPUs in a suspended state. 1895 */ 1896 if (rc <= 0) { 1897 if ((curproc->p_flag & SEXITING) != 0) { 1898 break; 1899 } 1900 } 1901 } 1902 vcpu_unlock(vcpu); 1903 1904 /* 1905 * Wakeup the other sleeping vcpus and return to userspace. 1906 */ 1907 for (i = 0; i < vm->maxcpus; i++) { 1908 if (CPU_ISSET(i, &vm->suspended_cpus)) { 1909 vcpu_notify_event(vm, i); 1910 } 1911 } 1912 1913 return (-1); 1914 } 1915 1916 static int 1917 vm_handle_reqidle(struct vm *vm, int vcpuid) 1918 { 1919 struct vcpu *vcpu = &vm->vcpu[vcpuid]; 1920 1921 vcpu_lock(vcpu); 1922 KASSERT(vcpu->reqidle, ("invalid vcpu reqidle %d", vcpu->reqidle)); 1923 vcpu->reqidle = 0; 1924 vcpu_unlock(vcpu); 1925 return (-1); 1926 } 1927 1928 static int 1929 vm_handle_run_state(struct vm *vm, int vcpuid) 1930 { 1931 struct vcpu *vcpu = &vm->vcpu[vcpuid]; 1932 bool handled = false; 1933 1934 vcpu_lock(vcpu); 1935 while (1) { 1936 if ((vcpu->run_state & VRS_PEND_INIT) != 0) { 1937 vcpu_unlock(vcpu); 1938 VERIFY0(vcpu_arch_reset(vm, vcpuid, true)); 1939 vcpu_lock(vcpu); 1940 1941 vcpu->run_state &= ~(VRS_RUN | VRS_PEND_INIT); 1942 vcpu->run_state |= VRS_INIT; 1943 } 1944 1945 if ((vcpu->run_state & (VRS_INIT | VRS_RUN | VRS_PEND_SIPI)) == 1946 (VRS_INIT | VRS_PEND_SIPI)) { 1947 const uint8_t vector = vcpu->sipi_vector; 1948 1949 vcpu_unlock(vcpu); 1950 VERIFY0(vcpu_vector_sipi(vm, vcpuid, vector)); 1951 vcpu_lock(vcpu); 1952 1953 vcpu->run_state &= ~VRS_PEND_SIPI; 1954 vcpu->run_state |= VRS_RUN; 1955 } 1956 1957 /* 1958 * If the vCPU is now in the running state, there is no need to 1959 * wait for anything prior to re-entry. 1960 */ 1961 if ((vcpu->run_state & VRS_RUN) != 0) { 1962 handled = true; 1963 break; 1964 } 1965 1966 /* 1967 * Also check for software events which would cause a wake-up. 1968 * This will set the appropriate exitcode directly, rather than 1969 * requiring a trip through VM_RUN(). 1970 */ 1971 if (vcpu_sleep_bailout_checks(vm, vcpuid)) { 1972 break; 1973 } 1974 1975 vcpu_ustate_change(vm, vcpuid, VU_IDLE); 1976 vcpu_require_state_locked(vm, vcpuid, VCPU_SLEEPING); 1977 (void) cv_wait_sig(&vcpu->vcpu_cv, &vcpu->lock); 1978 vcpu_require_state_locked(vm, vcpuid, VCPU_FROZEN); 1979 vcpu_ustate_change(vm, vcpuid, VU_EMU_KERN); 1980 } 1981 vcpu_unlock(vcpu); 1982 1983 return (handled ? 0 : -1); 1984 } 1985 1986 static int 1987 vm_rdmtrr(const struct vm_mtrr *mtrr, uint32_t num, uint64_t *val) 1988 { 1989 switch (num) { 1990 case MSR_MTRRcap: 1991 *val = MTRR_CAP_WC | MTRR_CAP_FIXED | VMM_MTRR_VAR_MAX; 1992 break; 1993 case MSR_MTRRdefType: 1994 *val = mtrr->def_type; 1995 break; 1996 case MSR_MTRR4kBase ... MSR_MTRR4kBase + 7: 1997 *val = mtrr->fixed4k[num - MSR_MTRR4kBase]; 1998 break; 1999 case MSR_MTRR16kBase ... MSR_MTRR16kBase + 1: 2000 *val = mtrr->fixed16k[num - MSR_MTRR16kBase]; 2001 break; 2002 case MSR_MTRR64kBase: 2003 *val = mtrr->fixed64k; 2004 break; 2005 case MSR_MTRRVarBase ... MSR_MTRRVarBase + (VMM_MTRR_VAR_MAX * 2) - 1: { 2006 uint_t offset = num - MSR_MTRRVarBase; 2007 if (offset % 2 == 0) { 2008 *val = mtrr->var[offset / 2].base; 2009 } else { 2010 *val = mtrr->var[offset / 2].mask; 2011 } 2012 break; 2013 } 2014 default: 2015 return (-1); 2016 } 2017 2018 return (0); 2019 } 2020 2021 static int 2022 vm_wrmtrr(struct vm_mtrr *mtrr, uint32_t num, uint64_t val) 2023 { 2024 switch (num) { 2025 case MSR_MTRRcap: 2026 /* MTRRCAP is read only */ 2027 return (-1); 2028 case MSR_MTRRdefType: 2029 if (val & ~VMM_MTRR_DEF_MASK) { 2030 /* generate #GP on writes to reserved fields */ 2031 return (-1); 2032 } 2033 mtrr->def_type = val; 2034 break; 2035 case MSR_MTRR4kBase ... MSR_MTRR4kBase + 7: 2036 mtrr->fixed4k[num - MSR_MTRR4kBase] = val; 2037 break; 2038 case MSR_MTRR16kBase ... MSR_MTRR16kBase + 1: 2039 mtrr->fixed16k[num - MSR_MTRR16kBase] = val; 2040 break; 2041 case MSR_MTRR64kBase: 2042 mtrr->fixed64k = val; 2043 break; 2044 case MSR_MTRRVarBase ... MSR_MTRRVarBase + (VMM_MTRR_VAR_MAX * 2) - 1: { 2045 uint_t offset = num - MSR_MTRRVarBase; 2046 if (offset % 2 == 0) { 2047 if (val & ~VMM_MTRR_PHYSBASE_MASK) { 2048 /* generate #GP on writes to reserved fields */ 2049 return (-1); 2050 } 2051 mtrr->var[offset / 2].base = val; 2052 } else { 2053 if (val & ~VMM_MTRR_PHYSMASK_MASK) { 2054 /* generate #GP on writes to reserved fields */ 2055 return (-1); 2056 } 2057 mtrr->var[offset / 2].mask = val; 2058 } 2059 break; 2060 } 2061 default: 2062 return (-1); 2063 } 2064 2065 return (0); 2066 } 2067 2068 static bool 2069 is_mtrr_msr(uint32_t msr) 2070 { 2071 switch (msr) { 2072 case MSR_MTRRcap: 2073 case MSR_MTRRdefType: 2074 case MSR_MTRR4kBase ... MSR_MTRR4kBase + 7: 2075 case MSR_MTRR16kBase ... MSR_MTRR16kBase + 1: 2076 case MSR_MTRR64kBase: 2077 case MSR_MTRRVarBase ... MSR_MTRRVarBase + (VMM_MTRR_VAR_MAX * 2) - 1: 2078 return (true); 2079 default: 2080 return (false); 2081 } 2082 } 2083 2084 static int 2085 vm_handle_rdmsr(struct vm *vm, int vcpuid, struct vm_exit *vme) 2086 { 2087 struct vcpu *vcpu = &vm->vcpu[vcpuid]; 2088 const uint32_t code = vme->u.msr.code; 2089 uint64_t val = 0; 2090 2091 switch (code) { 2092 case MSR_MCG_CAP: 2093 case MSR_MCG_STATUS: 2094 val = 0; 2095 break; 2096 2097 case MSR_MTRRcap: 2098 case MSR_MTRRdefType: 2099 case MSR_MTRR4kBase ... MSR_MTRR4kBase + 7: 2100 case MSR_MTRR16kBase ... MSR_MTRR16kBase + 1: 2101 case MSR_MTRR64kBase: 2102 case MSR_MTRRVarBase ... MSR_MTRRVarBase + (VMM_MTRR_VAR_MAX * 2) - 1: 2103 if (vm_rdmtrr(&vcpu->mtrr, code, &val) != 0) 2104 vm_inject_gp(vm, vcpuid); 2105 break; 2106 2107 case MSR_TSC: 2108 /* 2109 * Get the guest TSC, applying necessary vCPU offsets. 2110 * 2111 * In all likelihood, this should always be handled in guest 2112 * context by VMX/SVM rather than taking an exit. (Both VMX and 2113 * SVM pass through read-only access to MSR_TSC to the guest.) 2114 * 2115 * The VM-wide TSC offset and per-vCPU offset are included in 2116 * the calculations of vcpu_tsc_offset(), so this is sufficient 2117 * to use as the offset in our calculations. 2118 * 2119 * No physical offset is requested of vcpu_tsc_offset() since 2120 * rdtsc_offset() takes care of that instead. 2121 */ 2122 val = calc_guest_tsc(rdtsc_offset(), vm->freq_multiplier, 2123 vcpu_tsc_offset(vm, vcpuid, false)); 2124 break; 2125 2126 default: 2127 /* 2128 * Anything not handled at this point will be kicked out to 2129 * userspace for attempted processing there. 2130 */ 2131 return (-1); 2132 } 2133 2134 VERIFY0(vm_set_register(vm, vcpuid, VM_REG_GUEST_RAX, 2135 val & 0xffffffff)); 2136 VERIFY0(vm_set_register(vm, vcpuid, VM_REG_GUEST_RDX, 2137 val >> 32)); 2138 return (0); 2139 } 2140 2141 static int 2142 vm_handle_wrmsr(struct vm *vm, int vcpuid, struct vm_exit *vme) 2143 { 2144 struct vcpu *vcpu = &vm->vcpu[vcpuid]; 2145 const uint32_t code = vme->u.msr.code; 2146 const uint64_t val = vme->u.msr.wval; 2147 2148 switch (code) { 2149 case MSR_MCG_CAP: 2150 case MSR_MCG_STATUS: 2151 /* Ignore writes */ 2152 break; 2153 2154 case MSR_MTRRcap: 2155 case MSR_MTRRdefType: 2156 case MSR_MTRR4kBase ... MSR_MTRR4kBase + 7: 2157 case MSR_MTRR16kBase ... MSR_MTRR16kBase + 1: 2158 case MSR_MTRR64kBase: 2159 case MSR_MTRRVarBase ... MSR_MTRRVarBase + (VMM_MTRR_VAR_MAX * 2) - 1: 2160 if (vm_wrmtrr(&vcpu->mtrr, code, val) != 0) 2161 vm_inject_gp(vm, vcpuid); 2162 break; 2163 2164 case MSR_TSC: 2165 /* 2166 * The effect of writing the TSC MSR is that a subsequent read 2167 * of the TSC would report that value written (plus any time 2168 * elapsed between the write and the read). 2169 * 2170 * To calculate that per-vCPU offset, we can work backwards from 2171 * the guest TSC at the time of write: 2172 * 2173 * value = current guest TSC + vCPU offset 2174 * 2175 * so therefore: 2176 * 2177 * value - current guest TSC = vCPU offset 2178 */ 2179 vcpu->tsc_offset = val - calc_guest_tsc(rdtsc_offset(), 2180 vm->freq_multiplier, vm->tsc_offset); 2181 break; 2182 2183 default: 2184 /* 2185 * Anything not handled at this point will be kicked out to 2186 * userspace for attempted processing there. 2187 */ 2188 return (-1); 2189 } 2190 2191 return (0); 2192 } 2193 2194 int 2195 vm_suspend(struct vm *vm, enum vm_suspend_how how) 2196 { 2197 if (how <= VM_SUSPEND_NONE || how >= VM_SUSPEND_LAST) 2198 return (EINVAL); 2199 2200 if (atomic_cmpset_int((uint_t *)&vm->suspend, 0, how) == 0) { 2201 return (EALREADY); 2202 } 2203 2204 /* 2205 * Notify all active vcpus that they are now suspended. 2206 */ 2207 for (uint_t i = 0; i < vm->maxcpus; i++) { 2208 struct vcpu *vcpu = &vm->vcpu[i]; 2209 2210 vcpu_lock(vcpu); 2211 if (vcpu->state == VCPU_IDLE || vcpu->state == VCPU_FROZEN) { 2212 /* 2213 * Any vCPUs not actively running or in HLT can be 2214 * marked as suspended immediately. 2215 */ 2216 if (CPU_ISSET(i, &vm->active_cpus)) { 2217 CPU_SET_ATOMIC(i, &vm->suspended_cpus); 2218 } 2219 } else { 2220 /* 2221 * Those which are running or in HLT will pick up the 2222 * suspended state after notification. 2223 */ 2224 vcpu_notify_event_locked(vcpu, VCPU_NOTIFY_EXIT); 2225 } 2226 vcpu_unlock(vcpu); 2227 } 2228 return (0); 2229 } 2230 2231 void 2232 vm_exit_run_state(struct vm *vm, int vcpuid, uint64_t rip) 2233 { 2234 struct vm_exit *vmexit; 2235 2236 vmexit = vm_exitinfo(vm, vcpuid); 2237 vmexit->rip = rip; 2238 vmexit->inst_length = 0; 2239 vmexit->exitcode = VM_EXITCODE_RUN_STATE; 2240 vmm_stat_incr(vm, vcpuid, VMEXIT_RUN_STATE, 1); 2241 } 2242 2243 /* 2244 * Some vmm resources, such as the lapic, may have CPU-specific resources 2245 * allocated to them which would benefit from migration onto the host CPU which 2246 * is processing the vcpu state. 2247 */ 2248 static void 2249 vm_localize_resources(struct vm *vm, struct vcpu *vcpu) 2250 { 2251 /* 2252 * Localizing cyclic resources requires acquisition of cpu_lock, and 2253 * doing so with kpreempt disabled is a recipe for deadlock disaster. 2254 */ 2255 VERIFY(curthread->t_preempt == 0); 2256 2257 /* 2258 * Do not bother with localization if this vCPU is about to return to 2259 * the host CPU it was last localized to. 2260 */ 2261 if (vcpu->lastloccpu == curcpu) 2262 return; 2263 2264 /* 2265 * Localize system-wide resources to the primary boot vCPU. While any 2266 * of the other vCPUs may access them, it keeps the potential interrupt 2267 * footprint constrained to CPUs involved with this instance. 2268 */ 2269 if (vcpu == &vm->vcpu[0]) { 2270 vhpet_localize_resources(vm->vhpet); 2271 vrtc_localize_resources(vm->vrtc); 2272 vatpit_localize_resources(vm->vatpit); 2273 } 2274 2275 vlapic_localize_resources(vcpu->vlapic); 2276 2277 vcpu->lastloccpu = curcpu; 2278 } 2279 2280 static void 2281 vmm_savectx(void *arg) 2282 { 2283 vm_thread_ctx_t *vtc = arg; 2284 struct vm *vm = vtc->vtc_vm; 2285 const int vcpuid = vtc->vtc_vcpuid; 2286 2287 if (ops->vmsavectx != NULL) { 2288 ops->vmsavectx(vm->cookie, vcpuid); 2289 } 2290 2291 /* 2292 * Account for going off-cpu, unless the vCPU is idled, where being 2293 * off-cpu is the explicit point. 2294 */ 2295 if (vm->vcpu[vcpuid].ustate != VU_IDLE) { 2296 vtc->vtc_ustate = vm->vcpu[vcpuid].ustate; 2297 vcpu_ustate_change(vm, vcpuid, VU_SCHED); 2298 } 2299 2300 /* 2301 * If the CPU holds the restored guest FPU state, save it and restore 2302 * the host FPU state before this thread goes off-cpu. 2303 */ 2304 if ((vtc->vtc_status & VTCS_FPU_RESTORED) != 0) { 2305 struct vcpu *vcpu = &vm->vcpu[vcpuid]; 2306 2307 save_guest_fpustate(vcpu); 2308 vtc->vtc_status &= ~VTCS_FPU_RESTORED; 2309 } 2310 } 2311 2312 static void 2313 vmm_restorectx(void *arg) 2314 { 2315 vm_thread_ctx_t *vtc = arg; 2316 struct vm *vm = vtc->vtc_vm; 2317 const int vcpuid = vtc->vtc_vcpuid; 2318 2319 /* Complete microstate accounting for vCPU being off-cpu */ 2320 if (vm->vcpu[vcpuid].ustate != VU_IDLE) { 2321 vcpu_ustate_change(vm, vcpuid, vtc->vtc_ustate); 2322 } 2323 2324 /* 2325 * When coming back on-cpu, only restore the guest FPU status if the 2326 * thread is in a context marked as requiring it. This should be rare, 2327 * occurring only when a future logic error results in a voluntary 2328 * sleep during the VMRUN critical section. 2329 * 2330 * The common case will result in elision of the guest FPU state 2331 * restoration, deferring that action until it is clearly necessary 2332 * during vm_run. 2333 */ 2334 VERIFY((vtc->vtc_status & VTCS_FPU_RESTORED) == 0); 2335 if ((vtc->vtc_status & VTCS_FPU_CTX_CRITICAL) != 0) { 2336 struct vcpu *vcpu = &vm->vcpu[vcpuid]; 2337 2338 restore_guest_fpustate(vcpu); 2339 vtc->vtc_status |= VTCS_FPU_RESTORED; 2340 } 2341 2342 if (ops->vmrestorectx != NULL) { 2343 ops->vmrestorectx(vm->cookie, vcpuid); 2344 } 2345 2346 } 2347 2348 static int 2349 vm_entry_actions(struct vm *vm, int vcpuid, const struct vm_entry *entry, 2350 struct vm_exit *vme) 2351 { 2352 struct vcpu *vcpu; 2353 struct vie *vie; 2354 int err; 2355 2356 vcpu = &vm->vcpu[vcpuid]; 2357 vie = vcpu->vie_ctx; 2358 err = 0; 2359 2360 switch (entry->cmd) { 2361 case VEC_DEFAULT: 2362 return (0); 2363 case VEC_DISCARD_INSTR: 2364 vie_reset(vie); 2365 return (0); 2366 case VEC_FULFILL_MMIO: 2367 err = vie_fulfill_mmio(vie, &entry->u.mmio); 2368 if (err == 0) { 2369 err = vie_emulate_mmio(vie, vm, vcpuid); 2370 if (err == 0) { 2371 vie_advance_pc(vie, &vcpu->nextrip); 2372 } else if (err < 0) { 2373 vie_exitinfo(vie, vme); 2374 } else if (err == EAGAIN) { 2375 /* 2376 * Clear the instruction emulation state in 2377 * order to re-enter VM context and continue 2378 * this 'rep <instruction>' 2379 */ 2380 vie_reset(vie); 2381 err = 0; 2382 } 2383 } 2384 break; 2385 case VEC_FULFILL_INOUT: 2386 err = vie_fulfill_inout(vie, &entry->u.inout); 2387 if (err == 0) { 2388 err = vie_emulate_inout(vie, vm, vcpuid); 2389 if (err == 0) { 2390 vie_advance_pc(vie, &vcpu->nextrip); 2391 } else if (err < 0) { 2392 vie_exitinfo(vie, vme); 2393 } else if (err == EAGAIN) { 2394 /* 2395 * Clear the instruction emulation state in 2396 * order to re-enter VM context and continue 2397 * this 'rep ins/outs' 2398 */ 2399 vie_reset(vie); 2400 err = 0; 2401 } 2402 } 2403 break; 2404 default: 2405 return (EINVAL); 2406 } 2407 return (err); 2408 } 2409 2410 static int 2411 vm_loop_checks(struct vm *vm, int vcpuid, struct vm_exit *vme) 2412 { 2413 struct vie *vie; 2414 2415 vie = vm->vcpu[vcpuid].vie_ctx; 2416 2417 if (vie_pending(vie)) { 2418 /* 2419 * Userspace has not fulfilled the pending needs of the 2420 * instruction emulation, so bail back out. 2421 */ 2422 vie_exitinfo(vie, vme); 2423 return (-1); 2424 } 2425 2426 return (0); 2427 } 2428 2429 int 2430 vm_run(struct vm *vm, int vcpuid, const struct vm_entry *entry) 2431 { 2432 int error; 2433 struct vcpu *vcpu; 2434 struct vm_exit *vme; 2435 bool intr_disabled; 2436 int affinity_type = CPU_CURRENT; 2437 2438 if (vcpuid < 0 || vcpuid >= vm->maxcpus) 2439 return (EINVAL); 2440 if (!CPU_ISSET(vcpuid, &vm->active_cpus)) 2441 return (EINVAL); 2442 if (vm->is_paused) { 2443 return (EBUSY); 2444 } 2445 2446 vcpu = &vm->vcpu[vcpuid]; 2447 vme = &vcpu->exitinfo; 2448 2449 vcpu_ustate_change(vm, vcpuid, VU_EMU_KERN); 2450 2451 vcpu->vtc.vtc_status = 0; 2452 ctxop_attach(curthread, vcpu->ctxop); 2453 2454 error = vm_entry_actions(vm, vcpuid, entry, vme); 2455 if (error != 0) { 2456 goto exit; 2457 } 2458 2459 restart: 2460 error = vm_loop_checks(vm, vcpuid, vme); 2461 if (error != 0) { 2462 goto exit; 2463 } 2464 2465 thread_affinity_set(curthread, affinity_type); 2466 /* 2467 * Resource localization should happen after the CPU affinity for the 2468 * thread has been set to ensure that access from restricted contexts, 2469 * such as VMX-accelerated APIC operations, can occur without inducing 2470 * cyclic cross-calls. 2471 * 2472 * This must be done prior to disabling kpreempt via critical_enter(). 2473 */ 2474 vm_localize_resources(vm, vcpu); 2475 affinity_type = CPU_CURRENT; 2476 critical_enter(); 2477 2478 /* Force a trip through update_sregs to reload %fs/%gs and friends */ 2479 PCB_SET_UPDATE_SEGS(&ttolwp(curthread)->lwp_pcb); 2480 2481 if ((vcpu->vtc.vtc_status & VTCS_FPU_RESTORED) == 0) { 2482 restore_guest_fpustate(vcpu); 2483 vcpu->vtc.vtc_status |= VTCS_FPU_RESTORED; 2484 } 2485 vcpu->vtc.vtc_status |= VTCS_FPU_CTX_CRITICAL; 2486 2487 vcpu_require_state(vm, vcpuid, VCPU_RUNNING); 2488 error = VMRUN(vm->cookie, vcpuid, vcpu->nextrip); 2489 vcpu_require_state(vm, vcpuid, VCPU_FROZEN); 2490 2491 /* 2492 * Once clear of the delicate contexts comprising the VM_RUN handler, 2493 * thread CPU affinity can be loosened while other processing occurs. 2494 */ 2495 vcpu->vtc.vtc_status &= ~VTCS_FPU_CTX_CRITICAL; 2496 thread_affinity_clear(curthread); 2497 critical_exit(); 2498 2499 if (error != 0) { 2500 /* Communicate out any error from VMRUN() above */ 2501 goto exit; 2502 } 2503 2504 vcpu->nextrip = vme->rip + vme->inst_length; 2505 switch (vme->exitcode) { 2506 case VM_EXITCODE_REQIDLE: 2507 error = vm_handle_reqidle(vm, vcpuid); 2508 break; 2509 case VM_EXITCODE_RUN_STATE: 2510 error = vm_handle_run_state(vm, vcpuid); 2511 break; 2512 case VM_EXITCODE_SUSPENDED: 2513 error = vm_handle_suspend(vm, vcpuid); 2514 break; 2515 case VM_EXITCODE_IOAPIC_EOI: 2516 vioapic_process_eoi(vm, vcpuid, 2517 vme->u.ioapic_eoi.vector); 2518 break; 2519 case VM_EXITCODE_HLT: 2520 intr_disabled = ((vme->u.hlt.rflags & PSL_I) == 0); 2521 error = vm_handle_hlt(vm, vcpuid, intr_disabled); 2522 break; 2523 case VM_EXITCODE_PAGING: 2524 error = vm_handle_paging(vm, vcpuid); 2525 break; 2526 case VM_EXITCODE_MMIO_EMUL: 2527 error = vm_handle_mmio_emul(vm, vcpuid); 2528 break; 2529 case VM_EXITCODE_INOUT: 2530 error = vm_handle_inout(vm, vcpuid, vme); 2531 break; 2532 case VM_EXITCODE_INST_EMUL: 2533 error = vm_handle_inst_emul(vm, vcpuid); 2534 break; 2535 case VM_EXITCODE_MONITOR: 2536 case VM_EXITCODE_MWAIT: 2537 case VM_EXITCODE_VMINSN: 2538 vm_inject_ud(vm, vcpuid); 2539 break; 2540 case VM_EXITCODE_RDMSR: 2541 error = vm_handle_rdmsr(vm, vcpuid, vme); 2542 break; 2543 case VM_EXITCODE_WRMSR: 2544 error = vm_handle_wrmsr(vm, vcpuid, vme); 2545 break; 2546 case VM_EXITCODE_HT: 2547 affinity_type = CPU_BEST; 2548 break; 2549 case VM_EXITCODE_MTRAP: 2550 VERIFY0(vm_suspend_cpu(vm, vcpuid)); 2551 error = -1; 2552 break; 2553 default: 2554 /* handled in userland */ 2555 error = -1; 2556 break; 2557 } 2558 2559 if (error == 0) { 2560 /* VM exit conditions handled in-kernel, continue running */ 2561 goto restart; 2562 } 2563 2564 exit: 2565 kpreempt_disable(); 2566 ctxop_detach(curthread, vcpu->ctxop); 2567 /* Make sure all of the needed vCPU context state is saved */ 2568 vmm_savectx(&vcpu->vtc); 2569 kpreempt_enable(); 2570 2571 vcpu_ustate_change(vm, vcpuid, VU_EMU_USER); 2572 return (error); 2573 } 2574 2575 int 2576 vm_restart_instruction(void *arg, int vcpuid) 2577 { 2578 struct vm *vm; 2579 struct vcpu *vcpu; 2580 enum vcpu_state state; 2581 uint64_t rip; 2582 int error; 2583 2584 vm = arg; 2585 if (vcpuid < 0 || vcpuid >= vm->maxcpus) 2586 return (EINVAL); 2587 2588 vcpu = &vm->vcpu[vcpuid]; 2589 state = vcpu_get_state(vm, vcpuid, NULL); 2590 if (state == VCPU_RUNNING) { 2591 /* 2592 * When a vcpu is "running" the next instruction is determined 2593 * by adding 'rip' and 'inst_length' in the vcpu's 'exitinfo'. 2594 * Thus setting 'inst_length' to zero will cause the current 2595 * instruction to be restarted. 2596 */ 2597 vcpu->exitinfo.inst_length = 0; 2598 } else if (state == VCPU_FROZEN) { 2599 /* 2600 * When a vcpu is "frozen" it is outside the critical section 2601 * around VMRUN() and 'nextrip' points to the next instruction. 2602 * Thus instruction restart is achieved by setting 'nextrip' 2603 * to the vcpu's %rip. 2604 */ 2605 error = vm_get_register(vm, vcpuid, VM_REG_GUEST_RIP, &rip); 2606 KASSERT(!error, ("%s: error %d getting rip", __func__, error)); 2607 vcpu->nextrip = rip; 2608 } else { 2609 panic("%s: invalid state %d", __func__, state); 2610 } 2611 return (0); 2612 } 2613 2614 int 2615 vm_exit_intinfo(struct vm *vm, int vcpuid, uint64_t info) 2616 { 2617 struct vcpu *vcpu; 2618 2619 if (vcpuid < 0 || vcpuid >= vm->maxcpus) 2620 return (EINVAL); 2621 2622 vcpu = &vm->vcpu[vcpuid]; 2623 2624 if (VM_INTINFO_PENDING(info)) { 2625 const uint32_t type = VM_INTINFO_TYPE(info); 2626 const uint8_t vector = VM_INTINFO_VECTOR(info); 2627 2628 if (type == VM_INTINFO_NMI && vector != IDT_NMI) 2629 return (EINVAL); 2630 if (type == VM_INTINFO_HWEXCP && vector >= 32) 2631 return (EINVAL); 2632 if (info & VM_INTINFO_MASK_RSVD) 2633 return (EINVAL); 2634 } else { 2635 info = 0; 2636 } 2637 vcpu->exit_intinfo = info; 2638 return (0); 2639 } 2640 2641 enum exc_class { 2642 EXC_BENIGN, 2643 EXC_CONTRIBUTORY, 2644 EXC_PAGEFAULT 2645 }; 2646 2647 #define IDT_VE 20 /* Virtualization Exception (Intel specific) */ 2648 2649 static enum exc_class 2650 exception_class(uint64_t info) 2651 { 2652 ASSERT(VM_INTINFO_PENDING(info)); 2653 2654 /* Table 6-4, "Interrupt and Exception Classes", Intel SDM, Vol 3 */ 2655 switch (VM_INTINFO_TYPE(info)) { 2656 case VM_INTINFO_HWINTR: 2657 case VM_INTINFO_SWINTR: 2658 case VM_INTINFO_NMI: 2659 return (EXC_BENIGN); 2660 default: 2661 /* 2662 * Hardware exception. 2663 * 2664 * SVM and VT-x use identical type values to represent NMI, 2665 * hardware interrupt and software interrupt. 2666 * 2667 * SVM uses type '3' for all exceptions. VT-x uses type '3' 2668 * for exceptions except #BP and #OF. #BP and #OF use a type 2669 * value of '5' or '6'. Therefore we don't check for explicit 2670 * values of 'type' to classify 'intinfo' into a hardware 2671 * exception. 2672 */ 2673 break; 2674 } 2675 2676 switch (VM_INTINFO_VECTOR(info)) { 2677 case IDT_PF: 2678 case IDT_VE: 2679 return (EXC_PAGEFAULT); 2680 case IDT_DE: 2681 case IDT_TS: 2682 case IDT_NP: 2683 case IDT_SS: 2684 case IDT_GP: 2685 return (EXC_CONTRIBUTORY); 2686 default: 2687 return (EXC_BENIGN); 2688 } 2689 } 2690 2691 /* 2692 * Fetch event pending injection into the guest, if one exists. 2693 * 2694 * Returns true if an event is to be injected (which is placed in `retinfo`). 2695 */ 2696 bool 2697 vm_entry_intinfo(struct vm *vm, int vcpuid, uint64_t *retinfo) 2698 { 2699 struct vcpu *vcpu = &vm->vcpu[vcpuid]; 2700 const uint64_t info1 = vcpu->exit_intinfo; 2701 vcpu->exit_intinfo = 0; 2702 const uint64_t info2 = vcpu->exc_pending; 2703 vcpu->exc_pending = 0; 2704 2705 if (VM_INTINFO_PENDING(info1) && VM_INTINFO_PENDING(info2)) { 2706 /* 2707 * If an exception occurs while attempting to call the 2708 * double-fault handler the processor enters shutdown mode 2709 * (aka triple fault). 2710 */ 2711 if (VM_INTINFO_TYPE(info1) == VM_INTINFO_HWEXCP && 2712 VM_INTINFO_VECTOR(info1) == IDT_DF) { 2713 (void) vm_suspend(vm, VM_SUSPEND_TRIPLEFAULT); 2714 *retinfo = 0; 2715 return (false); 2716 } 2717 /* 2718 * "Conditions for Generating a Double Fault" 2719 * Intel SDM, Vol3, Table 6-5 2720 */ 2721 const enum exc_class exc1 = exception_class(info1); 2722 const enum exc_class exc2 = exception_class(info2); 2723 if ((exc1 == EXC_CONTRIBUTORY && exc2 == EXC_CONTRIBUTORY) || 2724 (exc1 == EXC_PAGEFAULT && exc2 != EXC_BENIGN)) { 2725 /* Convert nested fault into a double fault. */ 2726 *retinfo = 2727 VM_INTINFO_VALID | 2728 VM_INTINFO_DEL_ERRCODE | 2729 VM_INTINFO_HWEXCP | 2730 IDT_DF; 2731 } else { 2732 /* Handle exceptions serially */ 2733 vcpu->exit_intinfo = info1; 2734 *retinfo = info2; 2735 } 2736 return (true); 2737 } else if (VM_INTINFO_PENDING(info1)) { 2738 *retinfo = info1; 2739 return (true); 2740 } else if (VM_INTINFO_PENDING(info2)) { 2741 *retinfo = info2; 2742 return (true); 2743 } 2744 2745 return (false); 2746 } 2747 2748 int 2749 vm_get_intinfo(struct vm *vm, int vcpuid, uint64_t *info1, uint64_t *info2) 2750 { 2751 struct vcpu *vcpu; 2752 2753 if (vcpuid < 0 || vcpuid >= vm->maxcpus) 2754 return (EINVAL); 2755 2756 vcpu = &vm->vcpu[vcpuid]; 2757 *info1 = vcpu->exit_intinfo; 2758 *info2 = vcpu->exc_pending; 2759 return (0); 2760 } 2761 2762 int 2763 vm_inject_exception(struct vm *vm, int vcpuid, uint8_t vector, 2764 bool errcode_valid, uint32_t errcode, bool restart_instruction) 2765 { 2766 struct vcpu *vcpu; 2767 uint64_t regval; 2768 int error; 2769 2770 if (vcpuid < 0 || vcpuid >= vm->maxcpus) 2771 return (EINVAL); 2772 2773 if (vector >= 32) 2774 return (EINVAL); 2775 2776 /* 2777 * NMIs are to be injected via their own specialized path using 2778 * vm_inject_nmi(). 2779 */ 2780 if (vector == IDT_NMI) { 2781 return (EINVAL); 2782 } 2783 2784 /* 2785 * A double fault exception should never be injected directly into 2786 * the guest. It is a derived exception that results from specific 2787 * combinations of nested faults. 2788 */ 2789 if (vector == IDT_DF) { 2790 return (EINVAL); 2791 } 2792 2793 vcpu = &vm->vcpu[vcpuid]; 2794 2795 if (VM_INTINFO_PENDING(vcpu->exc_pending)) { 2796 /* Unable to inject exception due to one already pending */ 2797 return (EBUSY); 2798 } 2799 2800 if (errcode_valid) { 2801 /* 2802 * Exceptions don't deliver an error code in real mode. 2803 */ 2804 error = vm_get_register(vm, vcpuid, VM_REG_GUEST_CR0, ®val); 2805 VERIFY0(error); 2806 if ((regval & CR0_PE) == 0) { 2807 errcode_valid = false; 2808 } 2809 } 2810 2811 /* 2812 * From section 26.6.1 "Interruptibility State" in Intel SDM: 2813 * 2814 * Event blocking by "STI" or "MOV SS" is cleared after guest executes 2815 * one instruction or incurs an exception. 2816 */ 2817 error = vm_set_register(vm, vcpuid, VM_REG_GUEST_INTR_SHADOW, 0); 2818 VERIFY0(error); 2819 2820 if (restart_instruction) { 2821 VERIFY0(vm_restart_instruction(vm, vcpuid)); 2822 } 2823 2824 uint64_t val = VM_INTINFO_VALID | VM_INTINFO_HWEXCP | vector; 2825 if (errcode_valid) { 2826 val |= VM_INTINFO_DEL_ERRCODE; 2827 val |= (uint64_t)errcode << VM_INTINFO_SHIFT_ERRCODE; 2828 } 2829 vcpu->exc_pending = val; 2830 return (0); 2831 } 2832 2833 void 2834 vm_inject_ud(struct vm *vm, int vcpuid) 2835 { 2836 VERIFY0(vm_inject_exception(vm, vcpuid, IDT_UD, false, 0, true)); 2837 } 2838 2839 void 2840 vm_inject_gp(struct vm *vm, int vcpuid) 2841 { 2842 VERIFY0(vm_inject_exception(vm, vcpuid, IDT_GP, true, 0, true)); 2843 } 2844 2845 void 2846 vm_inject_ac(struct vm *vm, int vcpuid, uint32_t errcode) 2847 { 2848 VERIFY0(vm_inject_exception(vm, vcpuid, IDT_AC, true, errcode, true)); 2849 } 2850 2851 void 2852 vm_inject_ss(struct vm *vm, int vcpuid, uint32_t errcode) 2853 { 2854 VERIFY0(vm_inject_exception(vm, vcpuid, IDT_SS, true, errcode, true)); 2855 } 2856 2857 void 2858 vm_inject_pf(struct vm *vm, int vcpuid, uint32_t errcode, uint64_t cr2) 2859 { 2860 VERIFY0(vm_set_register(vm, vcpuid, VM_REG_GUEST_CR2, cr2)); 2861 VERIFY0(vm_inject_exception(vm, vcpuid, IDT_PF, true, errcode, true)); 2862 } 2863 2864 static VMM_STAT(VCPU_NMI_COUNT, "number of NMIs delivered to vcpu"); 2865 2866 int 2867 vm_inject_nmi(struct vm *vm, int vcpuid) 2868 { 2869 struct vcpu *vcpu; 2870 2871 if (vcpuid < 0 || vcpuid >= vm->maxcpus) 2872 return (EINVAL); 2873 2874 vcpu = &vm->vcpu[vcpuid]; 2875 2876 vcpu->nmi_pending = true; 2877 vcpu_notify_event(vm, vcpuid); 2878 return (0); 2879 } 2880 2881 bool 2882 vm_nmi_pending(struct vm *vm, int vcpuid) 2883 { 2884 struct vcpu *vcpu = &vm->vcpu[vcpuid]; 2885 2886 return (vcpu->nmi_pending); 2887 } 2888 2889 void 2890 vm_nmi_clear(struct vm *vm, int vcpuid) 2891 { 2892 struct vcpu *vcpu = &vm->vcpu[vcpuid]; 2893 2894 ASSERT(vcpu->nmi_pending); 2895 2896 vcpu->nmi_pending = false; 2897 vmm_stat_incr(vm, vcpuid, VCPU_NMI_COUNT, 1); 2898 } 2899 2900 static VMM_STAT(VCPU_EXTINT_COUNT, "number of ExtINTs delivered to vcpu"); 2901 2902 int 2903 vm_inject_extint(struct vm *vm, int vcpuid) 2904 { 2905 struct vcpu *vcpu; 2906 2907 if (vcpuid < 0 || vcpuid >= vm->maxcpus) 2908 return (EINVAL); 2909 2910 vcpu = &vm->vcpu[vcpuid]; 2911 2912 vcpu->extint_pending = true; 2913 vcpu_notify_event(vm, vcpuid); 2914 return (0); 2915 } 2916 2917 bool 2918 vm_extint_pending(struct vm *vm, int vcpuid) 2919 { 2920 struct vcpu *vcpu = &vm->vcpu[vcpuid]; 2921 2922 return (vcpu->extint_pending); 2923 } 2924 2925 void 2926 vm_extint_clear(struct vm *vm, int vcpuid) 2927 { 2928 struct vcpu *vcpu = &vm->vcpu[vcpuid]; 2929 2930 ASSERT(vcpu->extint_pending); 2931 2932 vcpu->extint_pending = false; 2933 vmm_stat_incr(vm, vcpuid, VCPU_EXTINT_COUNT, 1); 2934 } 2935 2936 int 2937 vm_inject_init(struct vm *vm, int vcpuid) 2938 { 2939 struct vcpu *vcpu; 2940 2941 if (vcpuid < 0 || vcpuid >= vm->maxcpus) 2942 return (EINVAL); 2943 2944 vcpu = &vm->vcpu[vcpuid]; 2945 vcpu_lock(vcpu); 2946 vcpu->run_state |= VRS_PEND_INIT; 2947 /* 2948 * As part of queuing the INIT request, clear any pending SIPI. It 2949 * would not otherwise survive across the reset of the vCPU when it 2950 * undergoes the requested INIT. We would not want it to linger when it 2951 * could be mistaken as a subsequent (after the INIT) SIPI request. 2952 */ 2953 vcpu->run_state &= ~VRS_PEND_SIPI; 2954 vcpu_notify_event_locked(vcpu, VCPU_NOTIFY_EXIT); 2955 2956 vcpu_unlock(vcpu); 2957 return (0); 2958 } 2959 2960 int 2961 vm_inject_sipi(struct vm *vm, int vcpuid, uint8_t vector) 2962 { 2963 struct vcpu *vcpu; 2964 2965 if (vcpuid < 0 || vcpuid >= vm->maxcpus) 2966 return (EINVAL); 2967 2968 vcpu = &vm->vcpu[vcpuid]; 2969 vcpu_lock(vcpu); 2970 vcpu->run_state |= VRS_PEND_SIPI; 2971 vcpu->sipi_vector = vector; 2972 /* SIPI is only actionable if the CPU is waiting in INIT state */ 2973 if ((vcpu->run_state & (VRS_INIT | VRS_RUN)) == VRS_INIT) { 2974 vcpu_notify_event_locked(vcpu, VCPU_NOTIFY_EXIT); 2975 } 2976 vcpu_unlock(vcpu); 2977 return (0); 2978 } 2979 2980 bool 2981 vcpu_run_state_pending(struct vm *vm, int vcpuid) 2982 { 2983 struct vcpu *vcpu; 2984 2985 ASSERT(vcpuid >= 0 && vcpuid < vm->maxcpus); 2986 vcpu = &vm->vcpu[vcpuid]; 2987 2988 /* Of interest: vCPU not in running state or with pending INIT */ 2989 return ((vcpu->run_state & (VRS_RUN | VRS_PEND_INIT)) != VRS_RUN); 2990 } 2991 2992 int 2993 vcpu_arch_reset(struct vm *vm, int vcpuid, bool init_only) 2994 { 2995 struct seg_desc desc; 2996 const enum vm_reg_name clear_regs[] = { 2997 VM_REG_GUEST_CR2, 2998 VM_REG_GUEST_CR3, 2999 VM_REG_GUEST_CR4, 3000 VM_REG_GUEST_RAX, 3001 VM_REG_GUEST_RBX, 3002 VM_REG_GUEST_RCX, 3003 VM_REG_GUEST_RSI, 3004 VM_REG_GUEST_RDI, 3005 VM_REG_GUEST_RBP, 3006 VM_REG_GUEST_RSP, 3007 VM_REG_GUEST_R8, 3008 VM_REG_GUEST_R9, 3009 VM_REG_GUEST_R10, 3010 VM_REG_GUEST_R11, 3011 VM_REG_GUEST_R12, 3012 VM_REG_GUEST_R13, 3013 VM_REG_GUEST_R14, 3014 VM_REG_GUEST_R15, 3015 VM_REG_GUEST_DR0, 3016 VM_REG_GUEST_DR1, 3017 VM_REG_GUEST_DR2, 3018 VM_REG_GUEST_DR3, 3019 VM_REG_GUEST_EFER, 3020 }; 3021 const enum vm_reg_name data_segs[] = { 3022 VM_REG_GUEST_SS, 3023 VM_REG_GUEST_DS, 3024 VM_REG_GUEST_ES, 3025 VM_REG_GUEST_FS, 3026 VM_REG_GUEST_GS, 3027 }; 3028 struct vcpu *vcpu = &vm->vcpu[vcpuid]; 3029 3030 if (vcpuid < 0 || vcpuid >= vm->maxcpus) 3031 return (EINVAL); 3032 3033 for (uint_t i = 0; i < nitems(clear_regs); i++) { 3034 VERIFY0(vm_set_register(vm, vcpuid, clear_regs[i], 0)); 3035 } 3036 3037 VERIFY0(vm_set_register(vm, vcpuid, VM_REG_GUEST_RFLAGS, 2)); 3038 VERIFY0(vm_set_register(vm, vcpuid, VM_REG_GUEST_RIP, 0xfff0)); 3039 VERIFY0(vm_set_register(vm, vcpuid, VM_REG_GUEST_CR0, 0x60000010)); 3040 3041 /* 3042 * The prescribed contents of %rdx differ slightly between the Intel and 3043 * AMD architectural definitions. The former expects the Extended Model 3044 * in bits 16-19 where the latter expects all the Family, Model, and 3045 * Stepping be there. Common boot ROMs appear to disregard this 3046 * anyways, so we stick with a compromise value similar to what is 3047 * spelled out in the Intel SDM. 3048 */ 3049 VERIFY0(vm_set_register(vm, vcpuid, VM_REG_GUEST_RDX, 0x600)); 3050 3051 VERIFY0(vm_set_register(vm, vcpuid, VM_REG_GUEST_DR6, 0xffff0ff0)); 3052 VERIFY0(vm_set_register(vm, vcpuid, VM_REG_GUEST_DR7, 0x400)); 3053 3054 /* CS: Present, R/W, Accessed */ 3055 desc.access = 0x0093; 3056 desc.base = 0xffff0000; 3057 desc.limit = 0xffff; 3058 VERIFY0(vm_set_seg_desc(vm, vcpuid, VM_REG_GUEST_CS, &desc)); 3059 VERIFY0(vm_set_register(vm, vcpuid, VM_REG_GUEST_CS, 0xf000)); 3060 3061 /* SS, DS, ES, FS, GS: Present, R/W, Accessed */ 3062 desc.access = 0x0093; 3063 desc.base = 0; 3064 desc.limit = 0xffff; 3065 for (uint_t i = 0; i < nitems(data_segs); i++) { 3066 VERIFY0(vm_set_seg_desc(vm, vcpuid, data_segs[i], &desc)); 3067 VERIFY0(vm_set_register(vm, vcpuid, data_segs[i], 0)); 3068 } 3069 3070 /* GDTR, IDTR */ 3071 desc.base = 0; 3072 desc.limit = 0xffff; 3073 VERIFY0(vm_set_seg_desc(vm, vcpuid, VM_REG_GUEST_GDTR, &desc)); 3074 VERIFY0(vm_set_seg_desc(vm, vcpuid, VM_REG_GUEST_IDTR, &desc)); 3075 3076 /* LDTR: Present, LDT */ 3077 desc.access = 0x0082; 3078 desc.base = 0; 3079 desc.limit = 0xffff; 3080 VERIFY0(vm_set_seg_desc(vm, vcpuid, VM_REG_GUEST_LDTR, &desc)); 3081 VERIFY0(vm_set_register(vm, vcpuid, VM_REG_GUEST_LDTR, 0)); 3082 3083 /* TR: Present, 32-bit TSS */ 3084 desc.access = 0x008b; 3085 desc.base = 0; 3086 desc.limit = 0xffff; 3087 VERIFY0(vm_set_seg_desc(vm, vcpuid, VM_REG_GUEST_TR, &desc)); 3088 VERIFY0(vm_set_register(vm, vcpuid, VM_REG_GUEST_TR, 0)); 3089 3090 vlapic_reset(vm_lapic(vm, vcpuid)); 3091 3092 VERIFY0(vm_set_register(vm, vcpuid, VM_REG_GUEST_INTR_SHADOW, 0)); 3093 3094 vcpu->exit_intinfo = 0; 3095 vcpu->exc_pending = 0; 3096 vcpu->nmi_pending = false; 3097 vcpu->extint_pending = 0; 3098 3099 /* 3100 * A CPU reset caused by power-on or system reset clears more state than 3101 * one which is trigged from an INIT IPI. 3102 */ 3103 if (!init_only) { 3104 vcpu->guest_xcr0 = XFEATURE_ENABLED_X87; 3105 (void) hma_fpu_init(vcpu->guestfpu); 3106 3107 /* XXX: clear MSRs and other pieces */ 3108 bzero(&vcpu->mtrr, sizeof (vcpu->mtrr)); 3109 } 3110 3111 return (0); 3112 } 3113 3114 static int 3115 vcpu_vector_sipi(struct vm *vm, int vcpuid, uint8_t vector) 3116 { 3117 struct seg_desc desc; 3118 3119 if (vcpuid < 0 || vcpuid >= vm->maxcpus) 3120 return (EINVAL); 3121 3122 /* CS: Present, R/W, Accessed */ 3123 desc.access = 0x0093; 3124 desc.base = (uint64_t)vector << 12; 3125 desc.limit = 0xffff; 3126 VERIFY0(vm_set_seg_desc(vm, vcpuid, VM_REG_GUEST_CS, &desc)); 3127 VERIFY0(vm_set_register(vm, vcpuid, VM_REG_GUEST_CS, 3128 (uint64_t)vector << 8)); 3129 3130 VERIFY0(vm_set_register(vm, vcpuid, VM_REG_GUEST_RIP, 0)); 3131 3132 return (0); 3133 } 3134 3135 int 3136 vm_get_capability(struct vm *vm, int vcpu, int type, int *retval) 3137 { 3138 if (vcpu < 0 || vcpu >= vm->maxcpus) 3139 return (EINVAL); 3140 3141 if (type < 0 || type >= VM_CAP_MAX) 3142 return (EINVAL); 3143 3144 return (VMGETCAP(vm->cookie, vcpu, type, retval)); 3145 } 3146 3147 int 3148 vm_set_capability(struct vm *vm, int vcpu, int type, int val) 3149 { 3150 if (vcpu < 0 || vcpu >= vm->maxcpus) 3151 return (EINVAL); 3152 3153 if (type < 0 || type >= VM_CAP_MAX) 3154 return (EINVAL); 3155 3156 return (VMSETCAP(vm->cookie, vcpu, type, val)); 3157 } 3158 3159 vcpu_cpuid_config_t * 3160 vm_cpuid_config(struct vm *vm, int vcpuid) 3161 { 3162 ASSERT3S(vcpuid, >=, 0); 3163 ASSERT3S(vcpuid, <, VM_MAXCPU); 3164 3165 return (&vm->vcpu[vcpuid].cpuid_cfg); 3166 } 3167 3168 struct vlapic * 3169 vm_lapic(struct vm *vm, int cpu) 3170 { 3171 ASSERT3S(cpu, >=, 0); 3172 ASSERT3S(cpu, <, VM_MAXCPU); 3173 3174 return (vm->vcpu[cpu].vlapic); 3175 } 3176 3177 struct vioapic * 3178 vm_ioapic(struct vm *vm) 3179 { 3180 3181 return (vm->vioapic); 3182 } 3183 3184 struct vhpet * 3185 vm_hpet(struct vm *vm) 3186 { 3187 3188 return (vm->vhpet); 3189 } 3190 3191 void * 3192 vm_iommu_domain(struct vm *vm) 3193 { 3194 3195 return (vm->iommu); 3196 } 3197 3198 int 3199 vcpu_set_state(struct vm *vm, int vcpuid, enum vcpu_state newstate, 3200 bool from_idle) 3201 { 3202 int error; 3203 struct vcpu *vcpu; 3204 3205 if (vcpuid < 0 || vcpuid >= vm->maxcpus) 3206 panic("vcpu_set_state: invalid vcpuid %d", vcpuid); 3207 3208 vcpu = &vm->vcpu[vcpuid]; 3209 3210 vcpu_lock(vcpu); 3211 error = vcpu_set_state_locked(vm, vcpuid, newstate, from_idle); 3212 vcpu_unlock(vcpu); 3213 3214 return (error); 3215 } 3216 3217 enum vcpu_state 3218 vcpu_get_state(struct vm *vm, int vcpuid, int *hostcpu) 3219 { 3220 struct vcpu *vcpu; 3221 enum vcpu_state state; 3222 3223 if (vcpuid < 0 || vcpuid >= vm->maxcpus) 3224 panic("vcpu_get_state: invalid vcpuid %d", vcpuid); 3225 3226 vcpu = &vm->vcpu[vcpuid]; 3227 3228 vcpu_lock(vcpu); 3229 state = vcpu->state; 3230 if (hostcpu != NULL) 3231 *hostcpu = vcpu->hostcpu; 3232 vcpu_unlock(vcpu); 3233 3234 return (state); 3235 } 3236 3237 /* 3238 * Calculate the TSC offset for a vCPU, applying physical CPU adjustments if 3239 * requested. The offset calculations include the VM-wide TSC offset. 3240 */ 3241 uint64_t 3242 vcpu_tsc_offset(struct vm *vm, int vcpuid, bool phys_adj) 3243 { 3244 ASSERT(vcpuid >= 0 && vcpuid < vm->maxcpus); 3245 3246 uint64_t vcpu_off = vm->tsc_offset + vm->vcpu[vcpuid].tsc_offset; 3247 3248 if (phys_adj) { 3249 /* Include any offset for the current physical CPU too */ 3250 extern hrtime_t tsc_gethrtime_tick_delta(void); 3251 vcpu_off += tsc_gethrtime_tick_delta(); 3252 } 3253 3254 return (vcpu_off); 3255 } 3256 3257 uint64_t 3258 vm_get_freq_multiplier(struct vm *vm) 3259 { 3260 return (vm->freq_multiplier); 3261 } 3262 3263 /* Normalize hrtime against the boot time for a VM */ 3264 hrtime_t 3265 vm_normalize_hrtime(struct vm *vm, hrtime_t hrt) 3266 { 3267 /* To avoid underflow/overflow UB, perform math as unsigned */ 3268 return ((hrtime_t)((uint64_t)hrt - (uint64_t)vm->boot_hrtime)); 3269 } 3270 3271 /* Denormalize hrtime against the boot time for a VM */ 3272 hrtime_t 3273 vm_denormalize_hrtime(struct vm *vm, hrtime_t hrt) 3274 { 3275 /* To avoid underflow/overflow UB, perform math as unsigned */ 3276 return ((hrtime_t)((uint64_t)hrt + (uint64_t)vm->boot_hrtime)); 3277 } 3278 3279 int 3280 vm_activate_cpu(struct vm *vm, int vcpuid) 3281 { 3282 3283 if (vcpuid < 0 || vcpuid >= vm->maxcpus) 3284 return (EINVAL); 3285 3286 if (CPU_ISSET(vcpuid, &vm->active_cpus)) 3287 return (EBUSY); 3288 3289 if (vm->suspend != 0) { 3290 return (EBUSY); 3291 } 3292 3293 CPU_SET_ATOMIC(vcpuid, &vm->active_cpus); 3294 3295 /* 3296 * It is possible that this vCPU was undergoing activation at the same 3297 * time that the VM was being suspended. If that happens to be the 3298 * case, it should reflect the suspended state immediately. 3299 */ 3300 if (atomic_load_acq_int((uint_t *)&vm->suspend) != 0) { 3301 CPU_SET_ATOMIC(vcpuid, &vm->suspended_cpus); 3302 } 3303 3304 return (0); 3305 } 3306 3307 int 3308 vm_suspend_cpu(struct vm *vm, int vcpuid) 3309 { 3310 int i; 3311 3312 if (vcpuid < -1 || vcpuid >= vm->maxcpus) 3313 return (EINVAL); 3314 3315 if (vcpuid == -1) { 3316 vm->debug_cpus = vm->active_cpus; 3317 for (i = 0; i < vm->maxcpus; i++) { 3318 if (CPU_ISSET(i, &vm->active_cpus)) 3319 vcpu_notify_event(vm, i); 3320 } 3321 } else { 3322 if (!CPU_ISSET(vcpuid, &vm->active_cpus)) 3323 return (EINVAL); 3324 3325 CPU_SET_ATOMIC(vcpuid, &vm->debug_cpus); 3326 vcpu_notify_event(vm, vcpuid); 3327 } 3328 return (0); 3329 } 3330 3331 int 3332 vm_resume_cpu(struct vm *vm, int vcpuid) 3333 { 3334 3335 if (vcpuid < -1 || vcpuid >= vm->maxcpus) 3336 return (EINVAL); 3337 3338 if (vcpuid == -1) { 3339 CPU_ZERO(&vm->debug_cpus); 3340 } else { 3341 if (!CPU_ISSET(vcpuid, &vm->debug_cpus)) 3342 return (EINVAL); 3343 3344 CPU_CLR_ATOMIC(vcpuid, &vm->debug_cpus); 3345 } 3346 return (0); 3347 } 3348 3349 static bool 3350 vcpu_bailout_checks(struct vm *vm, int vcpuid, bool on_entry, 3351 uint64_t entry_rip) 3352 { 3353 struct vcpu *vcpu = &vm->vcpu[vcpuid]; 3354 struct vm_exit *vme = &vcpu->exitinfo; 3355 bool bail = false; 3356 3357 ASSERT(vcpuid >= 0 && vcpuid < vm->maxcpus); 3358 3359 if (vm->suspend) { 3360 if (on_entry) { 3361 VERIFY(vm->suspend > VM_SUSPEND_NONE && 3362 vm->suspend < VM_SUSPEND_LAST); 3363 3364 vme->exitcode = VM_EXITCODE_SUSPENDED; 3365 vme->u.suspended.how = vm->suspend; 3366 } else { 3367 /* 3368 * Handling VM suspend is complicated, so if that 3369 * condition is detected outside of VM-entry itself, 3370 * just emit a BOGUS exitcode so we take a lap to pick 3371 * up the event during an entry and are directed into 3372 * the vm_handle_suspend() logic. 3373 */ 3374 vme->exitcode = VM_EXITCODE_BOGUS; 3375 } 3376 bail = true; 3377 } 3378 if (vcpu->reqidle) { 3379 vme->exitcode = VM_EXITCODE_REQIDLE; 3380 vmm_stat_incr(vm, vcpuid, VMEXIT_REQIDLE, 1); 3381 3382 if (!on_entry) { 3383 /* 3384 * A reqidle request detected outside of VM-entry can be 3385 * handled directly by clearing the request (and taking 3386 * a lap to userspace). 3387 */ 3388 vcpu_assert_locked(vcpu); 3389 vcpu->reqidle = 0; 3390 } 3391 bail = true; 3392 } 3393 if (vcpu_should_yield(vm, vcpuid)) { 3394 vme->exitcode = VM_EXITCODE_BOGUS; 3395 vmm_stat_incr(vm, vcpuid, VMEXIT_ASTPENDING, 1); 3396 bail = true; 3397 } 3398 if (CPU_ISSET(vcpuid, &vm->debug_cpus)) { 3399 vme->exitcode = VM_EXITCODE_DEBUG; 3400 bail = true; 3401 } 3402 3403 if (bail) { 3404 if (on_entry) { 3405 /* 3406 * If bailing out during VM-entry, the current %rip must 3407 * be recorded in the exitinfo. 3408 */ 3409 vme->rip = entry_rip; 3410 } 3411 vme->inst_length = 0; 3412 } 3413 return (bail); 3414 } 3415 3416 static bool 3417 vcpu_sleep_bailout_checks(struct vm *vm, int vcpuid) 3418 { 3419 /* 3420 * Bail-out check done prior to sleeping (in vCPU contexts like HLT or 3421 * wait-for-SIPI) expect that %rip is already populated in the vm_exit 3422 * structure, and we would only modify the exitcode. 3423 */ 3424 return (vcpu_bailout_checks(vm, vcpuid, false, 0)); 3425 } 3426 3427 bool 3428 vcpu_entry_bailout_checks(struct vm *vm, int vcpuid, uint64_t rip) 3429 { 3430 /* 3431 * Bail-out checks done as part of VM entry require an updated %rip to 3432 * populate the vm_exit struct if any of the conditions of interest are 3433 * matched in the check. 3434 */ 3435 return (vcpu_bailout_checks(vm, vcpuid, true, rip)); 3436 } 3437 3438 cpuset_t 3439 vm_active_cpus(struct vm *vm) 3440 { 3441 3442 return (vm->active_cpus); 3443 } 3444 3445 cpuset_t 3446 vm_debug_cpus(struct vm *vm) 3447 { 3448 3449 return (vm->debug_cpus); 3450 } 3451 3452 cpuset_t 3453 vm_suspended_cpus(struct vm *vm) 3454 { 3455 3456 return (vm->suspended_cpus); 3457 } 3458 3459 void * 3460 vcpu_stats(struct vm *vm, int vcpuid) 3461 { 3462 3463 return (vm->vcpu[vcpuid].stats); 3464 } 3465 3466 int 3467 vm_get_x2apic_state(struct vm *vm, int vcpuid, enum x2apic_state *state) 3468 { 3469 if (vcpuid < 0 || vcpuid >= vm->maxcpus) 3470 return (EINVAL); 3471 3472 *state = vm->vcpu[vcpuid].x2apic_state; 3473 3474 return (0); 3475 } 3476 3477 int 3478 vm_set_x2apic_state(struct vm *vm, int vcpuid, enum x2apic_state state) 3479 { 3480 if (vcpuid < 0 || vcpuid >= vm->maxcpus) 3481 return (EINVAL); 3482 3483 if (state >= X2APIC_STATE_LAST) 3484 return (EINVAL); 3485 3486 vm->vcpu[vcpuid].x2apic_state = state; 3487 3488 vlapic_set_x2apic_state(vm, vcpuid, state); 3489 3490 return (0); 3491 } 3492 3493 /* 3494 * This function is called to ensure that a vcpu "sees" a pending event 3495 * as soon as possible: 3496 * - If the vcpu thread is sleeping then it is woken up. 3497 * - If the vcpu is running on a different host_cpu then an IPI will be directed 3498 * to the host_cpu to cause the vcpu to trap into the hypervisor. 3499 */ 3500 static void 3501 vcpu_notify_event_locked(struct vcpu *vcpu, vcpu_notify_t ntype) 3502 { 3503 int hostcpu; 3504 3505 ASSERT(ntype == VCPU_NOTIFY_APIC || VCPU_NOTIFY_EXIT); 3506 3507 hostcpu = vcpu->hostcpu; 3508 if (vcpu->state == VCPU_RUNNING) { 3509 KASSERT(hostcpu != NOCPU, ("vcpu running on invalid hostcpu")); 3510 if (hostcpu != curcpu) { 3511 if (ntype == VCPU_NOTIFY_APIC) { 3512 vlapic_post_intr(vcpu->vlapic, hostcpu); 3513 } else { 3514 poke_cpu(hostcpu); 3515 } 3516 } else { 3517 /* 3518 * If the 'vcpu' is running on 'curcpu' then it must 3519 * be sending a notification to itself (e.g. SELF_IPI). 3520 * The pending event will be picked up when the vcpu 3521 * transitions back to guest context. 3522 */ 3523 } 3524 } else { 3525 KASSERT(hostcpu == NOCPU, ("vcpu state %d not consistent " 3526 "with hostcpu %d", vcpu->state, hostcpu)); 3527 if (vcpu->state == VCPU_SLEEPING) { 3528 cv_signal(&vcpu->vcpu_cv); 3529 } 3530 } 3531 } 3532 3533 void 3534 vcpu_notify_event(struct vm *vm, int vcpuid) 3535 { 3536 struct vcpu *vcpu = &vm->vcpu[vcpuid]; 3537 3538 vcpu_lock(vcpu); 3539 vcpu_notify_event_locked(vcpu, VCPU_NOTIFY_EXIT); 3540 vcpu_unlock(vcpu); 3541 } 3542 3543 void 3544 vcpu_notify_event_type(struct vm *vm, int vcpuid, vcpu_notify_t ntype) 3545 { 3546 struct vcpu *vcpu = &vm->vcpu[vcpuid]; 3547 3548 if (ntype == VCPU_NOTIFY_NONE) { 3549 return; 3550 } 3551 3552 vcpu_lock(vcpu); 3553 vcpu_notify_event_locked(vcpu, ntype); 3554 vcpu_unlock(vcpu); 3555 } 3556 3557 void 3558 vcpu_ustate_change(struct vm *vm, int vcpuid, enum vcpu_ustate ustate) 3559 { 3560 struct vcpu *vcpu = &vm->vcpu[vcpuid]; 3561 hrtime_t now = gethrtime(); 3562 3563 ASSERT3U(ustate, !=, vcpu->ustate); 3564 ASSERT3S(ustate, <, VU_MAX); 3565 ASSERT3S(ustate, >=, VU_INIT); 3566 3567 hrtime_t delta = now - vcpu->ustate_when; 3568 vcpu->ustate_total[vcpu->ustate] += delta; 3569 3570 membar_producer(); 3571 3572 vcpu->ustate_when = now; 3573 vcpu->ustate = ustate; 3574 } 3575 3576 struct vmspace * 3577 vm_get_vmspace(struct vm *vm) 3578 { 3579 3580 return (vm->vmspace); 3581 } 3582 3583 struct vm_client * 3584 vm_get_vmclient(struct vm *vm, int vcpuid) 3585 { 3586 return (vm->vcpu[vcpuid].vmclient); 3587 } 3588 3589 int 3590 vm_apicid2vcpuid(struct vm *vm, int apicid) 3591 { 3592 /* 3593 * XXX apic id is assumed to be numerically identical to vcpu id 3594 */ 3595 return (apicid); 3596 } 3597 3598 struct vatpic * 3599 vm_atpic(struct vm *vm) 3600 { 3601 return (vm->vatpic); 3602 } 3603 3604 struct vatpit * 3605 vm_atpit(struct vm *vm) 3606 { 3607 return (vm->vatpit); 3608 } 3609 3610 struct vpmtmr * 3611 vm_pmtmr(struct vm *vm) 3612 { 3613 3614 return (vm->vpmtmr); 3615 } 3616 3617 struct vrtc * 3618 vm_rtc(struct vm *vm) 3619 { 3620 3621 return (vm->vrtc); 3622 } 3623 3624 enum vm_reg_name 3625 vm_segment_name(int seg) 3626 { 3627 static enum vm_reg_name seg_names[] = { 3628 VM_REG_GUEST_ES, 3629 VM_REG_GUEST_CS, 3630 VM_REG_GUEST_SS, 3631 VM_REG_GUEST_DS, 3632 VM_REG_GUEST_FS, 3633 VM_REG_GUEST_GS 3634 }; 3635 3636 KASSERT(seg >= 0 && seg < nitems(seg_names), 3637 ("%s: invalid segment encoding %d", __func__, seg)); 3638 return (seg_names[seg]); 3639 } 3640 3641 void 3642 vm_copy_teardown(struct vm *vm, int vcpuid, struct vm_copyinfo *copyinfo, 3643 uint_t num_copyinfo) 3644 { 3645 for (uint_t idx = 0; idx < num_copyinfo; idx++) { 3646 if (copyinfo[idx].cookie != NULL) { 3647 (void) vmp_release((vm_page_t *)copyinfo[idx].cookie); 3648 } 3649 } 3650 bzero(copyinfo, num_copyinfo * sizeof (struct vm_copyinfo)); 3651 } 3652 3653 int 3654 vm_copy_setup(struct vm *vm, int vcpuid, struct vm_guest_paging *paging, 3655 uint64_t gla, size_t len, int prot, struct vm_copyinfo *copyinfo, 3656 uint_t num_copyinfo, int *fault) 3657 { 3658 uint_t idx, nused; 3659 size_t n, off, remaining; 3660 vm_client_t *vmc = vm_get_vmclient(vm, vcpuid); 3661 3662 bzero(copyinfo, sizeof (struct vm_copyinfo) * num_copyinfo); 3663 3664 nused = 0; 3665 remaining = len; 3666 while (remaining > 0) { 3667 uint64_t gpa; 3668 int error; 3669 3670 KASSERT(nused < num_copyinfo, ("insufficient vm_copyinfo")); 3671 error = vm_gla2gpa(vm, vcpuid, paging, gla, prot, &gpa, fault); 3672 if (error || *fault) 3673 return (error); 3674 off = gpa & PAGEOFFSET; 3675 n = min(remaining, PAGESIZE - off); 3676 copyinfo[nused].gpa = gpa; 3677 copyinfo[nused].len = n; 3678 remaining -= n; 3679 gla += n; 3680 nused++; 3681 } 3682 3683 for (idx = 0; idx < nused; idx++) { 3684 vm_page_t *vmp; 3685 caddr_t hva; 3686 3687 vmp = vmc_hold(vmc, copyinfo[idx].gpa & PAGEMASK, prot); 3688 if (vmp == NULL) { 3689 break; 3690 } 3691 if ((prot & PROT_WRITE) != 0) { 3692 hva = (caddr_t)vmp_get_writable(vmp); 3693 } else { 3694 hva = (caddr_t)vmp_get_readable(vmp); 3695 } 3696 copyinfo[idx].hva = hva + (copyinfo[idx].gpa & PAGEOFFSET); 3697 copyinfo[idx].cookie = vmp; 3698 copyinfo[idx].prot = prot; 3699 } 3700 3701 if (idx != nused) { 3702 vm_copy_teardown(vm, vcpuid, copyinfo, num_copyinfo); 3703 return (EFAULT); 3704 } else { 3705 *fault = 0; 3706 return (0); 3707 } 3708 } 3709 3710 void 3711 vm_copyin(struct vm *vm, int vcpuid, struct vm_copyinfo *copyinfo, void *kaddr, 3712 size_t len) 3713 { 3714 char *dst; 3715 int idx; 3716 3717 dst = kaddr; 3718 idx = 0; 3719 while (len > 0) { 3720 ASSERT(copyinfo[idx].prot & PROT_READ); 3721 3722 bcopy(copyinfo[idx].hva, dst, copyinfo[idx].len); 3723 len -= copyinfo[idx].len; 3724 dst += copyinfo[idx].len; 3725 idx++; 3726 } 3727 } 3728 3729 void 3730 vm_copyout(struct vm *vm, int vcpuid, const void *kaddr, 3731 struct vm_copyinfo *copyinfo, size_t len) 3732 { 3733 const char *src; 3734 int idx; 3735 3736 src = kaddr; 3737 idx = 0; 3738 while (len > 0) { 3739 ASSERT(copyinfo[idx].prot & PROT_WRITE); 3740 3741 bcopy(src, copyinfo[idx].hva, copyinfo[idx].len); 3742 len -= copyinfo[idx].len; 3743 src += copyinfo[idx].len; 3744 idx++; 3745 } 3746 } 3747 3748 /* 3749 * Return the amount of in-use and wired memory for the VM. Since 3750 * these are global stats, only return the values with for vCPU 0 3751 */ 3752 VMM_STAT_DECLARE(VMM_MEM_RESIDENT); 3753 3754 static void 3755 vm_get_rescnt(struct vm *vm, int vcpu, struct vmm_stat_type *stat) 3756 { 3757 if (vcpu == 0) { 3758 vmm_stat_set(vm, vcpu, VMM_MEM_RESIDENT, 3759 PAGE_SIZE * vmspace_resident_count(vm->vmspace)); 3760 } 3761 } 3762 3763 VMM_STAT_FUNC(VMM_MEM_RESIDENT, "Resident memory", vm_get_rescnt); 3764 3765 int 3766 vm_ioport_access(struct vm *vm, int vcpuid, bool in, uint16_t port, 3767 uint8_t bytes, uint32_t *val) 3768 { 3769 return (vm_inout_access(&vm->ioports, in, port, bytes, val)); 3770 } 3771 3772 /* 3773 * bhyve-internal interfaces to attach or detach IO port handlers. 3774 * Must be called with VM write lock held for safety. 3775 */ 3776 int 3777 vm_ioport_attach(struct vm *vm, uint16_t port, ioport_handler_t func, void *arg, 3778 void **cookie) 3779 { 3780 int err; 3781 err = vm_inout_attach(&vm->ioports, port, IOPF_DEFAULT, func, arg); 3782 if (err == 0) { 3783 *cookie = (void *)IOP_GEN_COOKIE(func, arg, port); 3784 } 3785 return (err); 3786 } 3787 int 3788 vm_ioport_detach(struct vm *vm, void **cookie, ioport_handler_t *old_func, 3789 void **old_arg) 3790 { 3791 uint16_t port = IOP_PORT_FROM_COOKIE((uintptr_t)*cookie); 3792 int err; 3793 3794 err = vm_inout_detach(&vm->ioports, port, false, old_func, old_arg); 3795 if (err == 0) { 3796 *cookie = NULL; 3797 } 3798 return (err); 3799 } 3800 3801 /* 3802 * External driver interfaces to attach or detach IO port handlers. 3803 * Must be called with VM write lock held for safety. 3804 */ 3805 int 3806 vm_ioport_hook(struct vm *vm, uint16_t port, ioport_handler_t func, 3807 void *arg, void **cookie) 3808 { 3809 int err; 3810 3811 if (port == 0) { 3812 return (EINVAL); 3813 } 3814 3815 err = vm_inout_attach(&vm->ioports, port, IOPF_DRV_HOOK, func, arg); 3816 if (err == 0) { 3817 *cookie = (void *)IOP_GEN_COOKIE(func, arg, port); 3818 } 3819 return (err); 3820 } 3821 void 3822 vm_ioport_unhook(struct vm *vm, void **cookie) 3823 { 3824 uint16_t port = IOP_PORT_FROM_COOKIE((uintptr_t)*cookie); 3825 ioport_handler_t old_func; 3826 void *old_arg; 3827 int err; 3828 3829 err = vm_inout_detach(&vm->ioports, port, true, &old_func, &old_arg); 3830 3831 /* ioport-hook-using drivers are expected to be well-behaved */ 3832 VERIFY0(err); 3833 VERIFY(IOP_GEN_COOKIE(old_func, old_arg, port) == (uintptr_t)*cookie); 3834 3835 *cookie = NULL; 3836 } 3837 3838 int 3839 vmm_kstat_update_vcpu(struct kstat *ksp, int rw) 3840 { 3841 struct vm *vm = ksp->ks_private; 3842 vmm_vcpu_kstats_t *vvk = ksp->ks_data; 3843 const int vcpuid = vvk->vvk_vcpu.value.ui32; 3844 struct vcpu *vcpu = &vm->vcpu[vcpuid]; 3845 3846 ASSERT3U(vcpuid, <, VM_MAXCPU); 3847 3848 vvk->vvk_time_init.value.ui64 = vcpu->ustate_total[VU_INIT]; 3849 vvk->vvk_time_run.value.ui64 = vcpu->ustate_total[VU_RUN]; 3850 vvk->vvk_time_idle.value.ui64 = vcpu->ustate_total[VU_IDLE]; 3851 vvk->vvk_time_emu_kern.value.ui64 = vcpu->ustate_total[VU_EMU_KERN]; 3852 vvk->vvk_time_emu_user.value.ui64 = vcpu->ustate_total[VU_EMU_USER]; 3853 vvk->vvk_time_sched.value.ui64 = vcpu->ustate_total[VU_SCHED]; 3854 3855 return (0); 3856 } 3857 3858 SET_DECLARE(vmm_data_version_entries, const vmm_data_version_entry_t); 3859 3860 static inline bool 3861 vmm_data_is_cpu_specific(uint16_t data_class) 3862 { 3863 switch (data_class) { 3864 case VDC_REGISTER: 3865 case VDC_MSR: 3866 case VDC_FPU: 3867 case VDC_LAPIC: 3868 return (true); 3869 default: 3870 break; 3871 } 3872 3873 return (false); 3874 } 3875 3876 static int 3877 vmm_data_find(const vmm_data_req_t *req, const vmm_data_version_entry_t **resp) 3878 { 3879 const vmm_data_version_entry_t **vdpp, *vdp; 3880 3881 ASSERT(resp != NULL); 3882 ASSERT(req->vdr_result_len != NULL); 3883 3884 SET_FOREACH(vdpp, vmm_data_version_entries) { 3885 vdp = *vdpp; 3886 if (vdp->vdve_class == req->vdr_class && 3887 vdp->vdve_version == req->vdr_version) { 3888 /* 3889 * Enforce any data length expectation expressed by the 3890 * provider for this data. 3891 */ 3892 if (vdp->vdve_len_expect != 0 && 3893 vdp->vdve_len_expect > req->vdr_len) { 3894 *req->vdr_result_len = vdp->vdve_len_expect; 3895 return (ENOSPC); 3896 } 3897 *resp = vdp; 3898 return (0); 3899 } 3900 } 3901 return (EINVAL); 3902 } 3903 3904 static void * 3905 vmm_data_from_class(const vmm_data_req_t *req, struct vm *vm, int vcpuid) 3906 { 3907 switch (req->vdr_class) { 3908 /* per-cpu data/devices */ 3909 case VDC_LAPIC: 3910 return (vm_lapic(vm, vcpuid)); 3911 case VDC_VMM_ARCH: 3912 return (vm); 3913 case VDC_VMM_TIME: 3914 return (vm); 3915 3916 case VDC_FPU: 3917 case VDC_REGISTER: 3918 case VDC_MSR: 3919 /* 3920 * These have per-CPU handling which is dispatched outside 3921 * vmm_data_version_entries listing. 3922 */ 3923 return (NULL); 3924 3925 /* system-wide data/devices */ 3926 case VDC_IOAPIC: 3927 return (vm->vioapic); 3928 case VDC_ATPIT: 3929 return (vm->vatpit); 3930 case VDC_ATPIC: 3931 return (vm->vatpic); 3932 case VDC_HPET: 3933 return (vm->vhpet); 3934 case VDC_PM_TIMER: 3935 return (vm->vpmtmr); 3936 case VDC_RTC: 3937 return (vm->vrtc); 3938 3939 default: 3940 /* The data class will have been validated by now */ 3941 panic("Unexpected class %u", req->vdr_class); 3942 } 3943 } 3944 3945 const uint32_t arch_msr_iter[] = { 3946 MSR_EFER, 3947 3948 /* 3949 * While gsbase and fsbase are accessible via the MSR accessors, they 3950 * are not included in MSR iteration since they are covered by the 3951 * segment descriptor interface too. 3952 */ 3953 MSR_KGSBASE, 3954 3955 MSR_STAR, 3956 MSR_LSTAR, 3957 MSR_CSTAR, 3958 MSR_SF_MASK, 3959 3960 MSR_SYSENTER_CS_MSR, 3961 MSR_SYSENTER_ESP_MSR, 3962 MSR_SYSENTER_EIP_MSR, 3963 MSR_PAT, 3964 }; 3965 const uint32_t generic_msr_iter[] = { 3966 MSR_TSC, 3967 MSR_MTRRcap, 3968 MSR_MTRRdefType, 3969 3970 MSR_MTRR4kBase, MSR_MTRR4kBase + 1, MSR_MTRR4kBase + 2, 3971 MSR_MTRR4kBase + 3, MSR_MTRR4kBase + 4, MSR_MTRR4kBase + 5, 3972 MSR_MTRR4kBase + 6, MSR_MTRR4kBase + 7, 3973 3974 MSR_MTRR16kBase, MSR_MTRR16kBase + 1, 3975 3976 MSR_MTRR64kBase, 3977 }; 3978 3979 static int 3980 vmm_data_read_msrs(struct vm *vm, int vcpuid, const vmm_data_req_t *req) 3981 { 3982 VERIFY3U(req->vdr_class, ==, VDC_MSR); 3983 VERIFY3U(req->vdr_version, ==, 1); 3984 3985 const uint_t num_msrs = nitems(arch_msr_iter) + nitems(generic_msr_iter) 3986 + (VMM_MTRR_VAR_MAX * 2); 3987 const uint32_t output_len = 3988 num_msrs * sizeof (struct vdi_field_entry_v1); 3989 *req->vdr_result_len = output_len; 3990 3991 if (req->vdr_len < output_len) { 3992 return (ENOSPC); 3993 } 3994 3995 struct vdi_field_entry_v1 *entryp = req->vdr_data; 3996 for (uint_t i = 0; i < nitems(arch_msr_iter); i++, entryp++) { 3997 const uint32_t msr = arch_msr_iter[i]; 3998 uint64_t val = 0; 3999 4000 int err = ops->vmgetmsr(vm->cookie, vcpuid, msr, &val); 4001 /* All of these MSRs are expected to work */ 4002 VERIFY0(err); 4003 entryp->vfe_ident = msr; 4004 entryp->vfe_value = val; 4005 } 4006 4007 struct vm_mtrr *mtrr = &vm->vcpu[vcpuid].mtrr; 4008 for (uint_t i = 0; i < nitems(generic_msr_iter); i++, entryp++) { 4009 const uint32_t msr = generic_msr_iter[i]; 4010 4011 entryp->vfe_ident = msr; 4012 switch (msr) { 4013 case MSR_TSC: 4014 /* 4015 * Communicate this as the difference from the VM-wide 4016 * offset of the boot time. 4017 */ 4018 entryp->vfe_value = vm->vcpu[vcpuid].tsc_offset; 4019 break; 4020 case MSR_MTRRcap: 4021 case MSR_MTRRdefType: 4022 case MSR_MTRR4kBase ... MSR_MTRR4kBase + 7: 4023 case MSR_MTRR16kBase ... MSR_MTRR16kBase + 1: 4024 case MSR_MTRR64kBase: { 4025 int err = vm_rdmtrr(mtrr, msr, &entryp->vfe_value); 4026 VERIFY0(err); 4027 break; 4028 } 4029 default: 4030 panic("unexpected msr export %x", msr); 4031 } 4032 } 4033 /* Copy the variable MTRRs */ 4034 for (uint_t i = 0; i < (VMM_MTRR_VAR_MAX * 2); i++, entryp++) { 4035 const uint32_t msr = MSR_MTRRVarBase + i; 4036 4037 entryp->vfe_ident = msr; 4038 int err = vm_rdmtrr(mtrr, msr, &entryp->vfe_value); 4039 VERIFY0(err); 4040 } 4041 return (0); 4042 } 4043 4044 static int 4045 vmm_data_write_msrs(struct vm *vm, int vcpuid, const vmm_data_req_t *req) 4046 { 4047 VERIFY3U(req->vdr_class, ==, VDC_MSR); 4048 VERIFY3U(req->vdr_version, ==, 1); 4049 4050 const struct vdi_field_entry_v1 *entryp = req->vdr_data; 4051 const uint_t entry_count = 4052 req->vdr_len / sizeof (struct vdi_field_entry_v1); 4053 struct vm_mtrr *mtrr = &vm->vcpu[vcpuid].mtrr; 4054 4055 /* 4056 * First make sure that all of the MSRs can be manipulated. 4057 * For now, this check is done by going though the getmsr handler 4058 */ 4059 for (uint_t i = 0; i < entry_count; i++, entryp++) { 4060 const uint32_t msr = entryp->vfe_ident; 4061 uint64_t val; 4062 int err = 0; 4063 4064 switch (msr) { 4065 case MSR_TSC: 4066 break; 4067 default: 4068 if (is_mtrr_msr(msr)) { 4069 err = vm_rdmtrr(mtrr, msr, &val); 4070 } else { 4071 err = ops->vmgetmsr(vm->cookie, vcpuid, msr, 4072 &val); 4073 } 4074 break; 4075 } 4076 if (err != 0) { 4077 return (err); 4078 } 4079 } 4080 4081 /* 4082 * Fairly confident that all of the 'set' operations are at least 4083 * targeting valid MSRs, continue on. 4084 */ 4085 entryp = req->vdr_data; 4086 for (uint_t i = 0; i < entry_count; i++, entryp++) { 4087 const uint32_t msr = entryp->vfe_ident; 4088 const uint64_t val = entryp->vfe_value; 4089 int err = 0; 4090 4091 switch (msr) { 4092 case MSR_TSC: 4093 vm->vcpu[vcpuid].tsc_offset = entryp->vfe_value; 4094 break; 4095 default: 4096 if (is_mtrr_msr(msr)) { 4097 if (msr == MSR_MTRRcap) { 4098 /* 4099 * MTRRcap is read-only. If the current 4100 * value matches the incoming one, 4101 * consider it a success 4102 */ 4103 uint64_t comp; 4104 err = vm_rdmtrr(mtrr, msr, &comp); 4105 if (err != 0 || comp != val) { 4106 err = EINVAL; 4107 } 4108 } else { 4109 err = vm_wrmtrr(mtrr, msr, val); 4110 } 4111 } else { 4112 err = ops->vmsetmsr(vm->cookie, vcpuid, msr, 4113 val); 4114 } 4115 break; 4116 } 4117 if (err != 0) { 4118 return (err); 4119 } 4120 } 4121 *req->vdr_result_len = entry_count * sizeof (struct vdi_field_entry_v1); 4122 4123 return (0); 4124 } 4125 4126 static const vmm_data_version_entry_t msr_v1 = { 4127 .vdve_class = VDC_MSR, 4128 .vdve_version = 1, 4129 .vdve_len_per_item = sizeof (struct vdi_field_entry_v1), 4130 .vdve_vcpu_readf = vmm_data_read_msrs, 4131 .vdve_vcpu_writef = vmm_data_write_msrs, 4132 }; 4133 VMM_DATA_VERSION(msr_v1); 4134 4135 static const uint32_t vmm_arch_v1_fields[] = { 4136 VAI_VM_IS_PAUSED, 4137 }; 4138 4139 static const uint32_t vmm_arch_v1_vcpu_fields[] = { 4140 VAI_PEND_NMI, 4141 VAI_PEND_EXTINT, 4142 VAI_PEND_EXCP, 4143 VAI_PEND_INTINFO, 4144 }; 4145 4146 static bool 4147 vmm_read_arch_field(struct vm *vm, int vcpuid, uint32_t ident, uint64_t *valp) 4148 { 4149 ASSERT(valp != NULL); 4150 4151 if (vcpuid == -1) { 4152 switch (ident) { 4153 case VAI_VM_IS_PAUSED: 4154 *valp = vm->is_paused ? 1 : 0; 4155 return (true); 4156 default: 4157 break; 4158 } 4159 } else { 4160 VERIFY(vcpuid >= 0 && vcpuid <= VM_MAXCPU); 4161 4162 struct vcpu *vcpu = &vm->vcpu[vcpuid]; 4163 switch (ident) { 4164 case VAI_PEND_NMI: 4165 *valp = vcpu->nmi_pending != 0 ? 1 : 0; 4166 return (true); 4167 case VAI_PEND_EXTINT: 4168 *valp = vcpu->extint_pending != 0 ? 1 : 0; 4169 return (true); 4170 case VAI_PEND_EXCP: 4171 *valp = vcpu->exc_pending; 4172 return (true); 4173 case VAI_PEND_INTINFO: 4174 *valp = vcpu->exit_intinfo; 4175 return (true); 4176 default: 4177 break; 4178 } 4179 } 4180 return (false); 4181 } 4182 4183 static int 4184 vmm_data_read_varch(struct vm *vm, int vcpuid, const vmm_data_req_t *req) 4185 { 4186 VERIFY3U(req->vdr_class, ==, VDC_VMM_ARCH); 4187 VERIFY3U(req->vdr_version, ==, 1); 4188 4189 /* per-vCPU fields are handled separately from VM-wide ones */ 4190 if (vcpuid != -1 && (vcpuid < 0 || vcpuid >= VM_MAXCPU)) { 4191 return (EINVAL); 4192 } 4193 4194 struct vdi_field_entry_v1 *entryp = req->vdr_data; 4195 4196 /* Specific fields requested */ 4197 if ((req->vdr_flags & VDX_FLAG_READ_COPYIN) != 0) { 4198 const uint_t count = 4199 req->vdr_len / sizeof (struct vdi_field_entry_v1); 4200 4201 for (uint_t i = 0; i < count; i++, entryp++) { 4202 if (!vmm_read_arch_field(vm, vcpuid, entryp->vfe_ident, 4203 &entryp->vfe_value)) { 4204 return (EINVAL); 4205 } 4206 } 4207 *req->vdr_result_len = 4208 count * sizeof (struct vdi_field_entry_v1); 4209 return (0); 4210 } 4211 4212 /* Emit all of the possible values */ 4213 const uint32_t *idents; 4214 uint_t ident_count; 4215 4216 if (vcpuid == -1) { 4217 idents = vmm_arch_v1_fields; 4218 ident_count = nitems(vmm_arch_v1_fields); 4219 } else { 4220 idents = vmm_arch_v1_vcpu_fields; 4221 ident_count = nitems(vmm_arch_v1_vcpu_fields); 4222 4223 } 4224 4225 const uint32_t total_size = 4226 ident_count * sizeof (struct vdi_field_entry_v1); 4227 4228 *req->vdr_result_len = total_size; 4229 if (req->vdr_len < total_size) { 4230 return (ENOSPC); 4231 } 4232 for (uint_t i = 0; i < ident_count; i++, entryp++) { 4233 entryp->vfe_ident = idents[i]; 4234 VERIFY(vmm_read_arch_field(vm, vcpuid, entryp->vfe_ident, 4235 &entryp->vfe_value)); 4236 } 4237 return (0); 4238 } 4239 4240 static int 4241 vmm_data_write_varch_vcpu(struct vm *vm, int vcpuid, const vmm_data_req_t *req) 4242 { 4243 VERIFY3U(req->vdr_class, ==, VDC_VMM_ARCH); 4244 VERIFY3U(req->vdr_version, ==, 1); 4245 4246 if (vcpuid < 0 || vcpuid >= VM_MAXCPU) { 4247 return (EINVAL); 4248 } 4249 4250 const struct vdi_field_entry_v1 *entryp = req->vdr_data; 4251 const uint_t entry_count = 4252 req->vdr_len / sizeof (struct vdi_field_entry_v1); 4253 struct vcpu *vcpu = &vm->vcpu[vcpuid]; 4254 4255 for (uint_t i = 0; i < entry_count; i++, entryp++) { 4256 const uint64_t val = entryp->vfe_value; 4257 4258 switch (entryp->vfe_ident) { 4259 case VAI_PEND_NMI: 4260 vcpu->nmi_pending = (val != 0); 4261 break; 4262 case VAI_PEND_EXTINT: 4263 vcpu->extint_pending = (val != 0); 4264 break; 4265 case VAI_PEND_EXCP: 4266 if (!VM_INTINFO_PENDING(val)) { 4267 vcpu->exc_pending = 0; 4268 } else if (VM_INTINFO_TYPE(val) != VM_INTINFO_HWEXCP || 4269 (val & VM_INTINFO_MASK_RSVD) != 0) { 4270 /* reject improperly-formed hw exception */ 4271 return (EINVAL); 4272 } else { 4273 vcpu->exc_pending = val; 4274 } 4275 break; 4276 case VAI_PEND_INTINFO: 4277 if (vm_exit_intinfo(vm, vcpuid, val) != 0) { 4278 return (EINVAL); 4279 } 4280 break; 4281 default: 4282 return (EINVAL); 4283 } 4284 } 4285 4286 *req->vdr_result_len = entry_count * sizeof (struct vdi_field_entry_v1); 4287 return (0); 4288 } 4289 4290 static int 4291 vmm_data_write_varch(struct vm *vm, int vcpuid, const vmm_data_req_t *req) 4292 { 4293 VERIFY3U(req->vdr_class, ==, VDC_VMM_ARCH); 4294 VERIFY3U(req->vdr_version, ==, 1); 4295 4296 /* per-vCPU fields are handled separately from VM-wide ones */ 4297 if (vcpuid != -1) { 4298 return (vmm_data_write_varch_vcpu(vm, vcpuid, req)); 4299 } 4300 4301 const struct vdi_field_entry_v1 *entryp = req->vdr_data; 4302 const uint_t entry_count = 4303 req->vdr_len / sizeof (struct vdi_field_entry_v1); 4304 4305 if (entry_count > 0) { 4306 if (entryp->vfe_ident == VAI_VM_IS_PAUSED) { 4307 /* 4308 * The VM_PAUSE and VM_RESUME ioctls are the officially 4309 * sanctioned mechanisms for setting the is-paused state 4310 * of the VM. 4311 */ 4312 return (EPERM); 4313 } else { 4314 /* no other valid arch entries at this time */ 4315 return (EINVAL); 4316 } 4317 } 4318 4319 *req->vdr_result_len = entry_count * sizeof (struct vdi_field_entry_v1); 4320 return (0); 4321 } 4322 4323 static const vmm_data_version_entry_t vmm_arch_v1 = { 4324 .vdve_class = VDC_VMM_ARCH, 4325 .vdve_version = 1, 4326 .vdve_len_per_item = sizeof (struct vdi_field_entry_v1), 4327 .vdve_vcpu_readf = vmm_data_read_varch, 4328 .vdve_vcpu_writef = vmm_data_write_varch, 4329 }; 4330 VMM_DATA_VERSION(vmm_arch_v1); 4331 4332 4333 /* 4334 * GUEST TIME SUPPORT 4335 * 4336 * Broadly, there are two categories of functionality related to time passing in 4337 * the guest: the guest's TSC and timers used by emulated devices. 4338 * 4339 * --------------------------- 4340 * GUEST TSC "VIRTUALIZATION" 4341 * --------------------------- 4342 * 4343 * The TSC can be read either via an instruction (rdtsc/rdtscp) or by reading 4344 * the TSC MSR. 4345 * 4346 * When a guest reads the TSC via its MSR, the guest will exit and we emulate 4347 * the rdmsr. More typically, the guest reads the TSC via a rdtsc(p) 4348 * instruction. Both SVM and VMX support virtualizing the guest TSC in hardware 4349 * -- that is, a guest will not generally exit on a rdtsc instruction. 4350 * 4351 * To support hardware-virtualized guest TSC, both SVM and VMX provide two knobs 4352 * for the hypervisor to adjust the guest's view of the TSC: 4353 * - TSC offset 4354 * - TSC frequency multiplier (also called "frequency ratio") 4355 * 4356 * When a guest calls rdtsc(p), the TSC value it sees is the sum of: 4357 * guest_tsc = (host TSC, scaled according to frequency multiplier) 4358 * + (TSC offset, programmed by hypervisor) 4359 * 4360 * See the discussions of the TSC offset and frequency multiplier below for more 4361 * details on each of these. 4362 * 4363 * -------------------- 4364 * TSC OFFSET OVERVIEW 4365 * -------------------- 4366 * 4367 * The TSC offset is a value added to the host TSC (which may be scaled first) 4368 * to provide the guest TSC. This offset addition is generally done by hardware, 4369 * but may be used in emulating the TSC if necessary. 4370 * 4371 * Recall that general formula for calculating the guest TSC is: 4372 * 4373 * guest_tsc = (host TSC, scaled if needed) + TSC offset 4374 * 4375 * Intuitively, the TSC offset is simply an offset of the host's TSC to make the 4376 * guest's view of the TSC appear correct: The guest TSC should be 0 at boot and 4377 * monotonically increase at a roughly constant frequency. Thus in the simplest 4378 * case, the TSC offset is just the negated value of the host TSC when the guest 4379 * was booted, assuming they have the same frequencies. 4380 * 4381 * In practice, there are several factors that can make calculating the TSC 4382 * offset more complicated, including: 4383 * 4384 * (1) the physical CPU the guest is running on 4385 * (2) whether the guest has written to the TSC of that vCPU 4386 * (3) differing host and guest frequencies, like after a live migration 4387 * (4) a guest running on a different system than where it was booted, like 4388 * after a live migration 4389 * 4390 * We will explore each of these factors individually. See below for a 4391 * summary. 4392 * 4393 * 4394 * (1) Physical CPU offsets 4395 * 4396 * The system maintains a set of per-CPU offsets to the TSC to provide a 4397 * consistent view of the TSC regardless of the CPU a thread is running on. 4398 * These offsets are included automatically as a part of rdtsc_offset(). 4399 * 4400 * The per-CPU offset must be included as a part reading the host TSC when 4401 * calculating the offset before running the guest on a given CPU. 4402 * 4403 * 4404 * (2) Guest TSC writes (vCPU offsets) 4405 * 4406 * The TSC is a writable MSR. When a guest writes to the TSC, this operation 4407 * should result in the TSC, when read from that vCPU, shows the value written, 4408 * plus whatever time has elapsed since the read. 4409 * 4410 * To support this, when the guest writes to the TSC, we store an additional 4411 * vCPU offset calculated to make future reads of the TSC map to what the guest 4412 * expects. 4413 * 4414 * 4415 * (3) Differing host and guest frequencies (host TSC scaling) 4416 * 4417 * A guest has the same frequency of its host when it boots, but it may be 4418 * migrated to a machine with a different TSC frequency. Systems expect that 4419 * their TSC frequency does not change. To support this fiction in which a guest 4420 * is running on hardware of a different TSC frequency, the hypervisor can 4421 * program a "frequency multiplier" that represents the ratio of guest/host 4422 * frequency. 4423 * 4424 * Any time a host TSC is used in calculations for the offset, it should be 4425 * "scaled" according to this multiplier, and the hypervisor should program the 4426 * multiplier before running a guest so that the hardware virtualization of the 4427 * TSC functions properly. Similarly, the multiplier should be used in any TSC 4428 * emulation. 4429 * 4430 * See below for more details about the frequency multiplier. 4431 * 4432 * 4433 * (4) Guest running on a system it did not boot on ("base guest TSC") 4434 * 4435 * When a guest boots, its TSC offset is simply the negated host TSC at the time 4436 * it booted. If a guest is migrated from a source host to a target host, the 4437 * TSC offset from the source host is no longer useful for several reasons: 4438 * - the target host TSC has no relationship to the source host TSC 4439 * - the guest did not boot on the target system, so the TSC of the target host 4440 * is not sufficient to describe how long the guest has been running prior to 4441 * migration 4442 * - the target system may have a different TSC frequency than the source system 4443 * 4444 * Ignoring the issue of frequency differences for a moment, let's consider how 4445 * to re-align the guest TSC with the host TSC of the target host. Intuitively, 4446 * for the guest to see the correct TSC, we still want to add some offset to the 4447 * host TSC that offsets how long this guest has been running on 4448 * the system. 4449 * 4450 * An example here might be helpful. Consider a source host and target host, 4451 * both with TSC frequencies of 1GHz. On the source host, the guest and host TSC 4452 * values might look like: 4453 * 4454 * +----------------------------------------------------------------------+ 4455 * | Event | source host TSC | guest TSC | 4456 * ------------------------------------------------------------------------ 4457 * | guest boot (t=0s) | 5000000000 | 5000000000 + -5000000000 | 4458 * | | | 0 | 4459 * ------------------------------------------------------------------------ 4460 * | guest rdtsc (t=10s)) | 15000000000 | 15000000000 + -5000000000 | 4461 * | | | 10000000000 | 4462 * ------------------------------------------------------------------------ 4463 * | migration (t=15s) | 20000000000 | 20000000000 + -5000000000 | 4464 * | | | 15000000000 | 4465 * +----------------------------------------------------------------------+ 4466 * 4467 * Ignoring the time it takes for a guest to physically migrate machines, on the 4468 * target host, we would expect the TSC to continue functioning as such: 4469 * 4470 * +----------------------------------------------------------------------+ 4471 * | Event | target host TSC | guest TSC | 4472 * ------------------------------------------------------------------------ 4473 * | guest migrate (t=15s) | 300000000000 | 15000000000 | 4474 * ------------------------------------------------------------------------ 4475 * | guest rdtsc (t=20s)) | 305000000000 | 20000000000 | 4476 * ------------------------------------------------------------------------ 4477 * 4478 * In order to produce a correct TSC value here, we can calculate a new 4479 * "effective" boot TSC that maps to what the host TSC would've been had it been 4480 * booted on the target. We add that to the guest TSC when it began to run on 4481 * this machine, and negate them both to get a new offset. In this example, the 4482 * effective boot TSC is: -(300000000000 - 15000000000) = -285000000000. 4483 * 4484 * +-------------------------------------------------------------------------+ 4485 * | Event | target host TSC | guest TSC | 4486 * --------------------------------------------------------------------------- 4487 * | guest "boot" (t=0s) | 285000000000 | 285000000000 + -285000000000 | 4488 * | | | 0 | 4489 * --------------------------------------------------------------------------- 4490 * | guest migrate (t=15s) | 300000000000 | 300000000000 + -285000000000 | 4491 * | | | 15000000000 | 4492 * --------------------------------------------------------------------------- 4493 * | guest rdtsc (t=20s)) | 305000000000 | 305000000000 + -285000000000 | 4494 * | | | 20000000000 | 4495 * --------------------------------------------------------------------------+ 4496 * 4497 * To support the offset calculation following a migration, the VMM data time 4498 * interface allows callers to set a "base guest TSC", which is the TSC value of 4499 * the guest when it began running on the host. The current guest TSC can be 4500 * requested via a read of the time data. See below for details on that 4501 * interface. 4502 * 4503 * Frequency differences between the host and the guest are accounted for when 4504 * scaling the host TSC. See below for details on the frequency multiplier. 4505 * 4506 * 4507 * -------------------- 4508 * TSC OFFSET SUMMARY 4509 * -------------------- 4510 * 4511 * Factoring in all of the components to the TSC above, the TSC offset that is 4512 * programmed by the hypervisor before running a given vCPU is: 4513 * 4514 * offset = -((base host TSC, scaled if needed) - base_guest_tsc) + vCPU offset 4515 * 4516 * This offset is stored in two pieces. Per-vCPU offsets are stored with the 4517 * given vCPU and added in when programming the offset. The rest of the offset 4518 * is stored as a VM-wide offset, and computed either at boot or when the time 4519 * data is written to. 4520 * 4521 * It is safe to add the vCPU offset and the VM-wide offsets together because 4522 * the vCPU offset is in terms of the guest TSC. The host TSC is scaled before 4523 * using it in calculations, so all TSC values are applicable to the same 4524 * frequency. 4525 * 4526 * Note: Though both the VM-wide offset and per-vCPU offsets may be negative, we 4527 * store them as unsigned values and perform all offsetting math unsigned. This 4528 * is to avoid UB from signed overflow. 4529 * 4530 * ------------------------- 4531 * TSC FREQUENCY MULTIPLIER 4532 * ------------------------- 4533 * 4534 * In order to account for frequency differences between the host and guest, SVM 4535 * and VMX provide an interface to set a "frequency multiplier" (or "frequency 4536 * ratio") representing guest to host frequency. In a hardware-virtualized read 4537 * of the TSC, the host TSC is scaled using this multiplier prior to adding the 4538 * programmed TSC offset. 4539 * 4540 * Both platforms represent the ratio as a fixed point number, where the lower 4541 * bits are used as a fractional component, and some number of the upper bits 4542 * are used as the integer component. 4543 * 4544 * Some example multipliers, for a platform with FRAC fractional bits in the 4545 * multiplier: 4546 * - guest frequency == host: 1 << FRAC 4547 * - guest frequency is 2x host: 1 << (FRAC + 1) 4548 * - guest frequency is 0.5x host: 1 << (FRAC - 1), as the highest-order 4549 * fractional bit represents 1/2 4550 * - guest frequency is 2.5x host: (1 << FRAC) | (1 << (FRAC - 1)) 4551 * and so on. 4552 * 4553 * In general, the frequency multiplier is calculated as follows: 4554 * (guest_hz * (1 << FRAC_SIZE)) / host_hz 4555 * 4556 * The multiplier should be used any time the host TSC value is used in 4557 * calculations with the guest TSC (and their frequencies differ). The function 4558 * `vmm_scale_tsc` is intended to be used for these purposes, as it will scale 4559 * the host TSC only if needed. 4560 * 4561 * The multiplier should also be programmed by the hypervisor before the guest 4562 * is run. 4563 * 4564 * 4565 * ---------------------------- 4566 * DEVICE TIMERS (BOOT_HRTIME) 4567 * ---------------------------- 4568 * 4569 * Emulated devices use timers to do things such as scheduling periodic events. 4570 * These timers are scheduled relative to the hrtime of the host. When device 4571 * state is exported or imported, we use boot_hrtime to normalize these timers 4572 * against the host hrtime. The boot_hrtime represents the hrtime of the host 4573 * when the guest was booted. 4574 * 4575 * If a guest is migrated to a different machine, boot_hrtime must be adjusted 4576 * to match the hrtime of when the guest was effectively booted on the target 4577 * host. This allows timers to continue functioning when device state is 4578 * imported on the target. 4579 * 4580 * 4581 * ------------------------ 4582 * VMM DATA TIME INTERFACE 4583 * ------------------------ 4584 * 4585 * In order to facilitate live migrations of guests, we provide an interface, 4586 * via the VMM data read/write ioctls, for userspace to make changes to the 4587 * guest's view of the TSC and device timers, allowing these features to 4588 * continue functioning after a migration. 4589 * 4590 * The interface was designed to expose the minimal amount of data needed for a 4591 * userspace component to make adjustments to the guest's view of time (e.g., to 4592 * account for time passing in a live migration). At a minimum, such a program 4593 * needs: 4594 * - the current guest TSC 4595 * - guest TSC frequency 4596 * - guest's boot_hrtime 4597 * - timestamps of when this data was taken (hrtime for hrtime calculations, and 4598 * wall clock time for computing time deltas between machines) 4599 * 4600 * The wall clock time is provided for consumers to make adjustments to the 4601 * guest TSC and boot_hrtime based on deltas observed during migrations. It may 4602 * be prudent for consumers to use this data only in circumstances where the 4603 * source and target have well-synchronized wall clocks, but nothing in the 4604 * interface depends on this assumption. 4605 * 4606 * On writes, consumers write back: 4607 * - the base guest TSC (used for TSC offset calculations) 4608 * - desired boot_hrtime 4609 * - guest_frequency (cannot change) 4610 * - hrtime of when this data was adjusted 4611 * - (wall clock time on writes is ignored) 4612 * 4613 * The interface will adjust the input guest TSC slightly, based on the input 4614 * hrtime, to account for latency between userspace calculations and application 4615 * of the data on the kernel side. This amounts to adding a small amount of 4616 * additional "uptime" for the guest. 4617 * 4618 * After the adjustments, the interface updates the VM-wide TSC offset and 4619 * boot_hrtime. Per-vCPU offsets are not adjusted, as those are already in terms 4620 * of the guest TSC and can be exported/imported via the MSR VMM data interface. 4621 * 4622 * 4623 * -------------------------------- 4624 * SUPPORTED PLATFORMS AND CAVEATS 4625 * -------------------------------- 4626 * 4627 * While both VMX and SVM offer TSC scaling as a feature, at this time only SVM 4628 * is supported by bhyve. 4629 * 4630 * The time data interface is designed such that Intel support can be added 4631 * easily, and all other aspects of the time interface should work on Intel. 4632 * (Without frequency control though, in practice, doing live migrations of 4633 * guests on Intel will not work for time-related things, as two machines 4634 * rarely have exactly the same frequency). 4635 * 4636 * Additionally, while on both SVM and VMX the frequency multiplier is a fixed 4637 * point number, each uses a different number of fractional and integer bits for 4638 * the multiplier. As such, calculating the multiplier and fractional bit size 4639 * is requested via the vmm_ops. 4640 * 4641 * Care should be taken to set reasonable limits for ratios based on the 4642 * platform, as the difference in fractional bits can lead to slightly different 4643 * tradeoffs in terms of representable ratios and potentially overflowing 4644 * calculations. 4645 */ 4646 4647 /* 4648 * Scales the TSC if needed, based on the input frequency multiplier. 4649 */ 4650 static uint64_t 4651 vmm_scale_tsc(uint64_t tsc, uint64_t mult) 4652 { 4653 const uint32_t frac_size = ops->fr_fracsize; 4654 4655 if (mult != VM_TSCM_NOSCALE) { 4656 VERIFY3U(frac_size, >, 0); 4657 return (scale_tsc(tsc, mult, frac_size)); 4658 } else { 4659 return (tsc); 4660 } 4661 } 4662 4663 /* 4664 * Calculate the frequency multiplier, which represents the ratio of 4665 * guest_hz / host_hz. The frequency multiplier is a fixed point number with 4666 * `frac_sz` fractional bits (fractional bits begin at bit 0). 4667 * 4668 * See comment for "calc_freq_multiplier" in "vmm_time_support.S" for more 4669 * information about valid input to this function. 4670 */ 4671 uint64_t 4672 vmm_calc_freq_multiplier(uint64_t guest_hz, uint64_t host_hz, 4673 uint32_t frac_size) 4674 { 4675 VERIFY3U(guest_hz, !=, 0); 4676 VERIFY3U(frac_size, >, 0); 4677 VERIFY3U(frac_size, <, 64); 4678 4679 return (calc_freq_multiplier(guest_hz, host_hz, frac_size)); 4680 } 4681 4682 /* 4683 * Calculate the guest VM-wide TSC offset. 4684 * 4685 * offset = - ((base host TSC, scaled if needed) - base_guest_tsc) 4686 * 4687 * The base_host_tsc and the base_guest_tsc are the TSC values of the host 4688 * (read on the system) and the guest (calculated) at the same point in time. 4689 * This allows us to fix the guest TSC at this point in time as a base, either 4690 * following boot (guest TSC = 0), or a change to the guest's time data from 4691 * userspace (such as in the case of a migration). 4692 */ 4693 static uint64_t 4694 calc_tsc_offset(uint64_t base_host_tsc, uint64_t base_guest_tsc, uint64_t mult) 4695 { 4696 const uint64_t htsc_scaled = vmm_scale_tsc(base_host_tsc, mult); 4697 if (htsc_scaled > base_guest_tsc) { 4698 return ((uint64_t)(- (int64_t)(htsc_scaled - base_guest_tsc))); 4699 } else { 4700 return (base_guest_tsc - htsc_scaled); 4701 } 4702 } 4703 4704 /* 4705 * Calculate an estimate of the guest TSC. 4706 * 4707 * guest_tsc = (host TSC, scaled if needed) + offset 4708 */ 4709 static uint64_t 4710 calc_guest_tsc(uint64_t host_tsc, uint64_t mult, uint64_t offset) 4711 { 4712 return (vmm_scale_tsc(host_tsc, mult) + offset); 4713 } 4714 4715 /* 4716 * Take a non-atomic "snapshot" of the current: 4717 * - TSC 4718 * - hrtime 4719 * - wall clock time 4720 */ 4721 static void 4722 vmm_time_snapshot(uint64_t *tsc, hrtime_t *hrtime, timespec_t *hrestime) 4723 { 4724 /* 4725 * Disable interrupts while we take the readings: In the absence of a 4726 * mechanism to convert hrtime to hrestime, we want the time between 4727 * each of these measurements to be as small as possible. 4728 */ 4729 ulong_t iflag = intr_clear(); 4730 4731 hrtime_t hrt = gethrtimeunscaledf(); 4732 *tsc = (uint64_t)hrt; 4733 *hrtime = hrt; 4734 scalehrtime(hrtime); 4735 gethrestime(hrestime); 4736 4737 intr_restore(iflag); 4738 } 4739 4740 /* 4741 * Read VMM Time data 4742 * 4743 * Provides: 4744 * - the current guest TSC and TSC frequency 4745 * - guest boot_hrtime 4746 * - timestamps of the read (hrtime and wall clock time) 4747 */ 4748 static int 4749 vmm_data_read_vmm_time(void *arg, const vmm_data_req_t *req) 4750 { 4751 VERIFY3U(req->vdr_class, ==, VDC_VMM_TIME); 4752 VERIFY3U(req->vdr_version, ==, 1); 4753 VERIFY3U(req->vdr_len, >=, sizeof (struct vdi_time_info_v1)); 4754 4755 struct vm *vm = arg; 4756 struct vdi_time_info_v1 *out = req->vdr_data; 4757 4758 /* Take a snapshot of this point in time */ 4759 uint64_t tsc; 4760 hrtime_t hrtime; 4761 timespec_t hrestime; 4762 vmm_time_snapshot(&tsc, &hrtime, &hrestime); 4763 4764 /* Write the output values */ 4765 out->vt_guest_freq = vm->guest_freq; 4766 4767 /* 4768 * Use only the VM-wide TSC offset for calculating the guest TSC, 4769 * ignoring per-vCPU offsets. This value is provided as a "base" guest 4770 * TSC at the time of the read; per-vCPU offsets are factored in as 4771 * needed elsewhere, either when running the vCPU or if the guest reads 4772 * the TSC via rdmsr. 4773 */ 4774 out->vt_guest_tsc = calc_guest_tsc(tsc, vm->freq_multiplier, 4775 vm->tsc_offset); 4776 out->vt_boot_hrtime = vm->boot_hrtime; 4777 out->vt_hrtime = hrtime; 4778 out->vt_hres_sec = hrestime.tv_sec; 4779 out->vt_hres_ns = hrestime.tv_nsec; 4780 4781 return (0); 4782 } 4783 4784 /* 4785 * Modify VMM Time data related values 4786 * 4787 * This interface serves to allow guests' TSC and device timers to continue 4788 * functioning across live migrations. On a successful write, the VM-wide TSC 4789 * offset and boot_hrtime of the guest are updated. 4790 * 4791 * The interface requires an hrtime of the system at which the caller wrote 4792 * this data; this allows us to adjust the TSC and boot_hrtime slightly to 4793 * account for time passing between the userspace call and application 4794 * of the data here. 4795 * 4796 * There are several possibilities for invalid input, including: 4797 * - a requested guest frequency of 0, or a frequency otherwise unsupported by 4798 * the underlying platform 4799 * - hrtime or boot_hrtime values that appear to be from the future 4800 * - the requested frequency does not match the host, and this system does not 4801 * have hardware TSC scaling support 4802 */ 4803 static int 4804 vmm_data_write_vmm_time(void *arg, const vmm_data_req_t *req) 4805 { 4806 VERIFY3U(req->vdr_class, ==, VDC_VMM_TIME); 4807 VERIFY3U(req->vdr_version, ==, 1); 4808 VERIFY3U(req->vdr_len, >=, sizeof (struct vdi_time_info_v1)); 4809 4810 struct vm *vm = arg; 4811 const struct vdi_time_info_v1 *src = req->vdr_data; 4812 4813 /* 4814 * Platform-specific checks will verify the requested frequency against 4815 * the supported range further, but a frequency of 0 is never valid. 4816 */ 4817 if (src->vt_guest_freq == 0) { 4818 return (EINVAL); 4819 } 4820 4821 /* 4822 * Check whether the request frequency is supported and get the 4823 * frequency multiplier. 4824 */ 4825 uint64_t mult = VM_TSCM_NOSCALE; 4826 freqratio_res_t res = ops->vmfreqratio(src->vt_guest_freq, 4827 vmm_host_freq, &mult); 4828 switch (res) { 4829 case FR_SCALING_NOT_SUPPORTED: 4830 /* 4831 * This system doesn't support TSC scaling, and the guest/host 4832 * frequencies differ 4833 */ 4834 return (EPERM); 4835 case FR_OUT_OF_RANGE: 4836 /* Requested frequency ratio is too small/large */ 4837 return (EINVAL); 4838 case FR_SCALING_NOT_NEEDED: 4839 /* Host and guest frequencies are the same */ 4840 VERIFY3U(mult, ==, VM_TSCM_NOSCALE); 4841 break; 4842 case FR_VALID: 4843 VERIFY3U(mult, !=, VM_TSCM_NOSCALE); 4844 break; 4845 } 4846 4847 /* 4848 * Find (and validate) the hrtime delta between the input request and 4849 * when we received it so that we can bump the TSC to account for time 4850 * passing. 4851 * 4852 * We ignore the hrestime as input, as this is a field that 4853 * exists for reads. 4854 */ 4855 uint64_t tsc; 4856 hrtime_t hrtime; 4857 timespec_t hrestime; 4858 vmm_time_snapshot(&tsc, &hrtime, &hrestime); 4859 if ((src->vt_hrtime > hrtime) || (src->vt_boot_hrtime > hrtime)) { 4860 /* 4861 * The caller has passed in an hrtime / boot_hrtime from the 4862 * future. 4863 */ 4864 return (EINVAL); 4865 } 4866 hrtime_t hrt_delta = hrtime - src->vt_hrtime; 4867 4868 /* Calculate guest TSC adjustment */ 4869 const uint64_t host_ticks = unscalehrtime(hrt_delta); 4870 const uint64_t guest_ticks = vmm_scale_tsc(host_ticks, 4871 vm->freq_multiplier); 4872 const uint64_t base_guest_tsc = src->vt_guest_tsc + guest_ticks; 4873 4874 /* Update guest time data */ 4875 vm->freq_multiplier = mult; 4876 vm->guest_freq = src->vt_guest_freq; 4877 vm->boot_hrtime = src->vt_boot_hrtime; 4878 vm->tsc_offset = calc_tsc_offset(tsc, base_guest_tsc, 4879 vm->freq_multiplier); 4880 4881 return (0); 4882 } 4883 4884 static const vmm_data_version_entry_t vmm_time_v1 = { 4885 .vdve_class = VDC_VMM_TIME, 4886 .vdve_version = 1, 4887 .vdve_len_expect = sizeof (struct vdi_time_info_v1), 4888 .vdve_readf = vmm_data_read_vmm_time, 4889 .vdve_writef = vmm_data_write_vmm_time, 4890 }; 4891 VMM_DATA_VERSION(vmm_time_v1); 4892 4893 4894 static int 4895 vmm_data_read_versions(void *arg, const vmm_data_req_t *req) 4896 { 4897 VERIFY3U(req->vdr_class, ==, VDC_VERSION); 4898 VERIFY3U(req->vdr_version, ==, 1); 4899 4900 const uint32_t total_size = SET_COUNT(vmm_data_version_entries) * 4901 sizeof (struct vdi_version_entry_v1); 4902 4903 /* Make sure there is room for all of the entries */ 4904 *req->vdr_result_len = total_size; 4905 if (req->vdr_len < *req->vdr_result_len) { 4906 return (ENOSPC); 4907 } 4908 4909 struct vdi_version_entry_v1 *entryp = req->vdr_data; 4910 const vmm_data_version_entry_t **vdpp; 4911 SET_FOREACH(vdpp, vmm_data_version_entries) { 4912 const vmm_data_version_entry_t *vdp = *vdpp; 4913 4914 entryp->vve_class = vdp->vdve_class; 4915 entryp->vve_version = vdp->vdve_version; 4916 entryp->vve_len_expect = vdp->vdve_len_expect; 4917 entryp->vve_len_per_item = vdp->vdve_len_per_item; 4918 entryp++; 4919 } 4920 return (0); 4921 } 4922 4923 static int 4924 vmm_data_write_versions(void *arg, const vmm_data_req_t *req) 4925 { 4926 /* Writing to the version information makes no sense */ 4927 return (EPERM); 4928 } 4929 4930 static const vmm_data_version_entry_t versions_v1 = { 4931 .vdve_class = VDC_VERSION, 4932 .vdve_version = 1, 4933 .vdve_len_per_item = sizeof (struct vdi_version_entry_v1), 4934 .vdve_readf = vmm_data_read_versions, 4935 .vdve_writef = vmm_data_write_versions, 4936 }; 4937 VMM_DATA_VERSION(versions_v1); 4938 4939 int 4940 vmm_data_read(struct vm *vm, int vcpuid, const vmm_data_req_t *req) 4941 { 4942 int err = 0; 4943 4944 if (vmm_data_is_cpu_specific(req->vdr_class)) { 4945 if (vcpuid >= VM_MAXCPU) { 4946 return (EINVAL); 4947 } 4948 } 4949 4950 const vmm_data_version_entry_t *entry = NULL; 4951 err = vmm_data_find(req, &entry); 4952 if (err != 0) { 4953 return (err); 4954 } 4955 ASSERT(entry != NULL); 4956 4957 if (entry->vdve_readf != NULL) { 4958 void *datap = vmm_data_from_class(req, vm, vcpuid); 4959 4960 err = entry->vdve_readf(datap, req); 4961 } else if (entry->vdve_vcpu_readf != NULL) { 4962 err = entry->vdve_vcpu_readf(vm, vcpuid, req); 4963 } else { 4964 err = EINVAL; 4965 } 4966 4967 /* 4968 * Successful reads of fixed-length data should populate the length of 4969 * that result. 4970 */ 4971 if (err == 0 && entry->vdve_len_expect != 0) { 4972 *req->vdr_result_len = entry->vdve_len_expect; 4973 } 4974 4975 return (err); 4976 } 4977 4978 int 4979 vmm_data_write(struct vm *vm, int vcpuid, const vmm_data_req_t *req) 4980 { 4981 int err = 0; 4982 4983 if (vmm_data_is_cpu_specific(req->vdr_class)) { 4984 if (vcpuid >= VM_MAXCPU) { 4985 return (EINVAL); 4986 } 4987 } 4988 4989 const vmm_data_version_entry_t *entry = NULL; 4990 err = vmm_data_find(req, &entry); 4991 if (err != 0) { 4992 return (err); 4993 } 4994 ASSERT(entry != NULL); 4995 4996 if (entry->vdve_writef != NULL) { 4997 void *datap = vmm_data_from_class(req, vm, vcpuid); 4998 4999 err = entry->vdve_writef(datap, req); 5000 } else if (entry->vdve_vcpu_writef != NULL) { 5001 err = entry->vdve_vcpu_writef(vm, vcpuid, req); 5002 } else { 5003 err = EINVAL; 5004 } 5005 5006 /* 5007 * Successful writes of fixed-length data should populate the length of 5008 * that result. 5009 */ 5010 if (err == 0 && entry->vdve_len_expect != 0) { 5011 *req->vdr_result_len = entry->vdve_len_expect; 5012 } 5013 5014 return (err); 5015 } 5016