1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2011 NetApp, Inc. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 /* 29 * This file and its contents are supplied under the terms of the 30 * Common Development and Distribution License ("CDDL"), version 1.0. 31 * You may only use this file in accordance with the terms of version 32 * 1.0 of the CDDL. 33 * 34 * A full copy of the text of the CDDL should have accompanied this 35 * source. A copy of the CDDL is also available via the Internet at 36 * http://www.illumos.org/license/CDDL. 37 * 38 * Copyright 2014 Pluribus Networks Inc. 39 * Copyright 2018 Joyent, Inc. 40 * Copyright 2024 Oxide Computer Company 41 */ 42 43 #include <sys/types.h> 44 #include <sys/stdbool.h> 45 #include <sys/errno.h> 46 47 #include <machine/md_var.h> 48 #include <machine/specialreg.h> 49 50 #include <machine/vmm.h> 51 #include <sys/vmm_kernel.h> 52 53 #include "vmm_host.h" 54 #include "vmm_util.h" 55 56 /* 57 * CPUID Emulation 58 * 59 * All CPUID instruction exits are handled by the in-kernel emulation. 60 * 61 * ---------------- 62 * Legacy Emulation 63 * ---------------- 64 * 65 * Originally, the kernel vmm portion of bhyve relied on fixed logic to filter 66 * and/or generate CPUID results based on what was reported by the host CPU, as 67 * well as attributes of the VM (such as CPU topology, and enabled features). 68 * This is largely adequate to expose CPU capabilities to the guest in manner 69 * which allows it to operate properly. 70 * 71 * ------------------------------ 72 * Userspace-Controlled Emulation 73 * ------------------------------ 74 * 75 * In certain situations, more control over the CPUID emulation results present 76 * to the guest is desired. Live migration between physical hosts is one such 77 * example, where the underlying CPUs, or at least their microcode, may differ 78 * between the source and destination. In such cases, where changes to the 79 * CPUID results cannot be tolerated, the userspace portion of the VMM can be in 80 * complete control over the leaves which are presented to the guest. It may 81 * still consult the "legacy" CPUID data for guidance about which CPU features 82 * are safe to expose (due to hypervisor limitations, etc). This leaf 83 * information is configured on a per-vCPU basis. 84 * 85 * The emulation entries provided by userspace are expected to be in sorted 86 * order, running from lowest function and index to highest. 87 * 88 * For example: 89 * (func: 00h idx: 00h) -> 90 * (flags: 0, eax: highest std leaf, ebx-edx: vendor id) 91 * (func: 0Dh idx: 00h) -> 92 * (flags: VCE_FLAG_MATCH_INDEX, eax - edx: XCR0/XSAVE info) 93 * (func: 0Dh idx: 01h) -> 94 * (flags: VCE_FLAG_MATCH_INDEX, eax - edx: XSAVE/XSAVEOPT details) 95 * ... 96 * (func: 0Dh idx: 07H) -> 97 * (flags: VCE_FLAG_MATCH_INDEX, eax - edx: AVX-512 details) 98 * (func: 8000000h idx: 0h) -> 99 * (flags: 0, eax: highest extd leaf ...) 100 * ... 101 */ 102 103 104 #define CPUID_TYPE_MASK 0xf0000000 105 #define CPUID_TYPE_STD 0x00000000 106 #define CPUID_TYPE_EXTD 0x80000000 107 108 static const struct vcpu_cpuid_entry cpuid_empty_entry = { 0 }; 109 110 /* 111 * Given the CPUID configuration for a vCPU, locate the entry which matches the 112 * provided function/index tuple. The entries list is walked in order, and the 113 * first valid match based on the function/index and flags will be emitted. 114 * 115 * If no match is found, but Intel-style fallback is configured, then the 116 * highest standard leaf encountered will be emitted. 117 */ 118 static const struct vcpu_cpuid_entry * 119 cpuid_find_entry(const vcpu_cpuid_config_t *cfg, uint32_t func, uint32_t idx) 120 { 121 const struct vcpu_cpuid_entry *last_std = NULL; 122 const bool intel_fallback = 123 (cfg->vcc_flags & VCC_FLAG_INTEL_FALLBACK) != 0; 124 bool matched_leaf = false; 125 126 ASSERT0(cfg->vcc_flags & VCC_FLAG_LEGACY_HANDLING); 127 128 for (uint_t i = 0; i < cfg->vcc_nent; i++) { 129 const struct vcpu_cpuid_entry *ent = &cfg->vcc_entries[i]; 130 const bool ent_is_std = 131 (ent->vce_function & CPUID_TYPE_MASK) == CPUID_TYPE_STD; 132 const bool ent_must_match_idx = 133 (ent->vce_flags & VCE_FLAG_MATCH_INDEX) != 0; 134 135 if (ent_is_std) { 136 /* 137 * Keep track of the last "standard" leaf for 138 * Intel-style fallback behavior. 139 * 140 * This does currently not account for the sub-leaf 141 * index matching behavior for fallback described in the 142 * SDM. It is not clear if any consumers rely on such 143 * matching when encountering fallback. 144 */ 145 last_std = ent; 146 } 147 if (ent->vce_function == func) { 148 if (ent->vce_index == idx || !ent_must_match_idx) { 149 return (ent); 150 } 151 /* 152 * Make note of when the top-level leaf matches, even 153 * when the index does not. 154 */ 155 matched_leaf = true; 156 } else if (ent->vce_function > func) { 157 if ((ent->vce_function & CPUID_TYPE_MASK) == 158 (func & CPUID_TYPE_MASK)) { 159 /* 160 * We are beyond a valid leaf to match, but have 161 * not exceeded the maximum leaf for this "type" 162 * (standard, extended, hvm, etc), so return an 163 * empty entry. 164 */ 165 return (&cpuid_empty_entry); 166 } else { 167 /* 168 * Otherwise, we can stop now, having gone 169 * beyond the last entry which could match the 170 * target function in a sorted list. 171 */ 172 break; 173 } 174 } 175 } 176 177 if (matched_leaf || !intel_fallback) { 178 return (&cpuid_empty_entry); 179 } else { 180 return (last_std); 181 } 182 } 183 184 void 185 vcpu_emulate_cpuid(struct vm *vm, int vcpuid, uint64_t *rax, uint64_t *rbx, 186 uint64_t *rcx, uint64_t *rdx) 187 { 188 const vcpu_cpuid_config_t *cfg = vm_cpuid_config(vm, vcpuid); 189 190 ASSERT3P(rax, !=, NULL); 191 ASSERT3P(rbx, !=, NULL); 192 ASSERT3P(rcx, !=, NULL); 193 ASSERT3P(rdx, !=, NULL); 194 195 /* Fall back to legacy handling if specified */ 196 if ((cfg->vcc_flags & VCC_FLAG_LEGACY_HANDLING) != 0) { 197 uint32_t regs[4] = { *rax, 0, *rcx, 0 }; 198 199 legacy_emulate_cpuid(vm, vcpuid, ®s[0], ®s[1], ®s[2], 200 ®s[3]); 201 /* CPUID clears the upper 32-bits of the long-mode registers. */ 202 *rax = regs[0]; 203 *rbx = regs[1]; 204 *rcx = regs[2]; 205 *rdx = regs[3]; 206 return; 207 } 208 209 const struct vcpu_cpuid_entry *ent = cpuid_find_entry(cfg, *rax, *rcx); 210 ASSERT(ent != NULL); 211 /* CPUID clears the upper 32-bits of the long-mode registers. */ 212 *rax = ent->vce_eax; 213 *rbx = ent->vce_ebx; 214 *rcx = ent->vce_ecx; 215 *rdx = ent->vce_edx; 216 } 217 218 /* 219 * Get the current CPUID emulation configuration for this vCPU. 220 * 221 * Only the existing flags will be emitted if the vCPU is configured for legacy 222 * operation via the VCC_FLAG_LEGACY_HANDLING flag. If in userspace-controlled 223 * mode, then we will attempt to copy the existing entries into vcc_entries, 224 * its side specified by vcc_nent. 225 * 226 * Regardless of whether vcc_entries is adequately sized (or even present), 227 * vcc_nent will be set to the number of existing entries. 228 */ 229 int 230 vm_get_cpuid(struct vm *vm, int vcpuid, vcpu_cpuid_config_t *res) 231 { 232 if (vcpuid < 0 || vcpuid > VM_MAXCPU) { 233 return (EINVAL); 234 } 235 236 const vcpu_cpuid_config_t *src = vm_cpuid_config(vm, vcpuid); 237 if (src->vcc_nent > res->vcc_nent) { 238 res->vcc_nent = src->vcc_nent; 239 return (E2BIG); 240 } else if (src->vcc_nent != 0) { 241 bcopy(src->vcc_entries, res->vcc_entries, 242 src->vcc_nent * sizeof (struct vcpu_cpuid_entry)); 243 } 244 res->vcc_flags = src->vcc_flags; 245 res->vcc_nent = src->vcc_nent; 246 return (0); 247 } 248 249 /* 250 * Set the CPUID emulation configuration for this vCPU. 251 * 252 * If VCC_FLAG_LEGACY_HANDLING is set in vcc_flags, then vcc_nent is expected to 253 * be set to 0, as configuring a list of entries would be useless when using the 254 * legacy handling. 255 * 256 * Any existing entries which are configured are freed, and the newly provided 257 * ones will be copied into their place. 258 */ 259 int 260 vm_set_cpuid(struct vm *vm, int vcpuid, const vcpu_cpuid_config_t *src) 261 { 262 if (vcpuid < 0 || vcpuid > VM_MAXCPU) { 263 return (EINVAL); 264 } 265 if (src->vcc_nent > VMM_MAX_CPUID_ENTRIES) { 266 return (EINVAL); 267 } 268 if ((src->vcc_flags & ~VCC_FLAGS_VALID) != 0) { 269 return (EINVAL); 270 } 271 if ((src->vcc_flags & VCC_FLAG_LEGACY_HANDLING) != 0 && 272 src->vcc_nent != 0) { 273 /* No entries should be provided if using legacy handling */ 274 return (EINVAL); 275 } 276 for (uint_t i = 0; i < src->vcc_nent; i++) { 277 /* Ensure all entries carry valid flags */ 278 if ((src->vcc_entries[i].vce_flags & ~VCE_FLAGS_VALID) != 0) { 279 return (EINVAL); 280 } 281 } 282 283 vcpu_cpuid_config_t *cfg = vm_cpuid_config(vm, vcpuid); 284 285 /* Free any existing entries first */ 286 vcpu_cpuid_cleanup(cfg); 287 288 /* Copy supplied entries into freshly allocated space */ 289 if (src->vcc_nent != 0) { 290 const size_t entries_sz = 291 src->vcc_nent * sizeof (struct vcpu_cpuid_entry); 292 293 cfg->vcc_nent = src->vcc_nent; 294 cfg->vcc_entries = kmem_alloc(entries_sz, KM_SLEEP); 295 bcopy(src->vcc_entries, cfg->vcc_entries, entries_sz); 296 } 297 cfg->vcc_flags = src->vcc_flags; 298 299 return (0); 300 } 301 302 void 303 vcpu_cpuid_init(vcpu_cpuid_config_t *cfg) 304 { 305 /* Default to legacy-style handling */ 306 cfg->vcc_flags = VCC_FLAG_LEGACY_HANDLING; 307 cfg->vcc_nent = 0; 308 cfg->vcc_entries = NULL; 309 } 310 311 void 312 vcpu_cpuid_cleanup(vcpu_cpuid_config_t *cfg) 313 { 314 if (cfg->vcc_nent != 0) { 315 ASSERT3P(cfg->vcc_entries, !=, NULL); 316 317 kmem_free(cfg->vcc_entries, 318 cfg->vcc_nent * sizeof (struct vcpu_cpuid_entry)); 319 320 cfg->vcc_nent = 0; 321 cfg->vcc_entries = NULL; 322 } 323 } 324 325 static const char bhyve_id[12] = "bhyve bhyve "; 326 327 /* 328 * Force exposition of the invariant TSC capability, regardless of whether the 329 * host CPU reports having it. 330 */ 331 static int vmm_force_invariant_tsc = 0; 332 333 #define CPUID_0000_0000 (0x0) 334 #define CPUID_0000_0001 (0x1) 335 #define CPUID_0000_0002 (0x2) 336 #define CPUID_0000_0003 (0x3) 337 #define CPUID_0000_0004 (0x4) 338 #define CPUID_0000_0006 (0x6) 339 #define CPUID_0000_0007 (0x7) 340 #define CPUID_0000_000A (0xA) 341 #define CPUID_0000_000B (0xB) 342 #define CPUID_0000_000D (0xD) 343 #define CPUID_0000_000F (0xF) 344 #define CPUID_0000_0010 (0x10) 345 #define CPUID_0000_0015 (0x15) 346 #define CPUID_8000_0000 (0x80000000) 347 #define CPUID_8000_0001 (0x80000001) 348 #define CPUID_8000_0002 (0x80000002) 349 #define CPUID_8000_0003 (0x80000003) 350 #define CPUID_8000_0004 (0x80000004) 351 #define CPUID_8000_0006 (0x80000006) 352 #define CPUID_8000_0007 (0x80000007) 353 #define CPUID_8000_0008 (0x80000008) 354 #define CPUID_8000_001D (0x8000001D) 355 #define CPUID_8000_001E (0x8000001E) 356 357 #define CPUID_VM_HIGH 0x40000000 358 359 /* 360 * CPUID instruction Fn0000_0001: 361 */ 362 #define CPUID_0000_0001_APICID_SHIFT 24 363 364 365 /* 366 * Round up to the next power of two, if necessary, and then take log2. 367 * Returns -1 if argument is zero. 368 */ 369 static __inline int 370 log2(uint_t x) 371 { 372 return (fls(x << (1 - powerof2(x))) - 1); 373 } 374 375 /* 376 * The "legacy" bhyve cpuid emulation, which largly applies statically defined 377 * masks to the data provided by the host CPU. 378 */ 379 void 380 legacy_emulate_cpuid(struct vm *vm, int vcpu_id, uint32_t *eax, uint32_t *ebx, 381 uint32_t *ecx, uint32_t *edx) 382 { 383 const struct xsave_limits *limits; 384 uint64_t cr4; 385 int error, enable_invpcid, level, width = 0, x2apic_id = 0; 386 unsigned int func, regs[4], logical_cpus = 0, param; 387 enum x2apic_state x2apic_state; 388 uint16_t cores, maxcpus, sockets, threads; 389 390 /* 391 * The function of CPUID is controlled through the provided value of 392 * %eax (and secondarily %ecx, for certain leaf data). 393 */ 394 func = (uint32_t)*eax; 395 param = (uint32_t)*ecx; 396 397 /* 398 * Requests for invalid CPUID levels should map to the highest 399 * available level instead. 400 */ 401 if (cpu_exthigh != 0 && func >= 0x80000000) { 402 if (func > cpu_exthigh) 403 func = cpu_exthigh; 404 } else if (func >= 0x40000000) { 405 if (func > CPUID_VM_HIGH) 406 func = CPUID_VM_HIGH; 407 } else if (func > cpu_high) { 408 func = cpu_high; 409 } 410 411 /* 412 * In general the approach used for CPU topology is to 413 * advertise a flat topology where all CPUs are packages with 414 * no multi-core or SMT. 415 */ 416 switch (func) { 417 /* 418 * Pass these through to the guest 419 */ 420 case CPUID_0000_0000: 421 case CPUID_0000_0002: 422 case CPUID_0000_0003: 423 case CPUID_8000_0000: 424 case CPUID_8000_0002: 425 case CPUID_8000_0003: 426 case CPUID_8000_0004: 427 case CPUID_8000_0006: 428 cpuid_count(func, param, regs); 429 break; 430 case CPUID_8000_0008: 431 cpuid_count(func, param, regs); 432 if (vmm_is_svm()) { 433 /* 434 * As on Intel (0000_0007:0, EDX), mask out 435 * unsupported or unsafe AMD extended features 436 * (8000_0008 EBX). 437 */ 438 regs[1] &= (AMDFEID_CLZERO | AMDFEID_IRPERF | 439 AMDFEID_XSAVEERPTR); 440 441 vm_get_topology(vm, &sockets, &cores, &threads, 442 &maxcpus); 443 /* 444 * Here, width is ApicIdCoreIdSize, present on 445 * at least Family 15h and newer. It 446 * represents the "number of bits in the 447 * initial apicid that indicate thread id 448 * within a package." 449 * 450 * Our topo_probe_amd() uses it for 451 * pkg_id_shift and other OSes may rely on it. 452 */ 453 width = MIN(0xF, log2(threads * cores)); 454 if (width < 0x4) 455 width = 0; 456 logical_cpus = MIN(0xFF, threads * cores - 1); 457 regs[2] = (width << AMDID_COREID_SIZE_SHIFT) | 458 logical_cpus; 459 } 460 break; 461 462 case CPUID_8000_0001: 463 cpuid_count(func, param, regs); 464 465 /* 466 * Hide SVM from guest. 467 */ 468 regs[2] &= ~AMDID2_SVM; 469 470 /* 471 * Don't advertise extended performance counter MSRs 472 * to the guest. 473 */ 474 regs[2] &= ~AMDID2_PCXC; 475 regs[2] &= ~AMDID2_PNXC; 476 regs[2] &= ~AMDID2_PTSCEL2I; 477 478 /* 479 * Don't advertise Instruction Based Sampling feature. 480 */ 481 regs[2] &= ~AMDID2_IBS; 482 483 /* NodeID MSR not available */ 484 regs[2] &= ~AMDID2_NODE_ID; 485 486 /* Don't advertise the OS visible workaround feature */ 487 regs[2] &= ~AMDID2_OSVW; 488 489 /* Hide mwaitx/monitorx capability from the guest */ 490 regs[2] &= ~AMDID2_MWAITX; 491 492 #ifndef __FreeBSD__ 493 /* 494 * Detection routines for TCE and FFXSR are missing 495 * from our vm_cpuid_capability() detection logic 496 * today. Mask them out until that is remedied. 497 * They do not appear to be in common usage, so their 498 * absence should not cause undue trouble. 499 */ 500 regs[2] &= ~AMDID2_TCE; 501 regs[3] &= ~AMDID_FFXSR; 502 #endif 503 504 /* 505 * Hide rdtscp/ia32_tsc_aux until we know how 506 * to deal with them. 507 */ 508 regs[3] &= ~AMDID_RDTSCP; 509 break; 510 511 case CPUID_8000_0007: 512 cpuid_count(func, param, regs); 513 /* 514 * AMD uses this leaf to advertise the processor's 515 * power monitoring and RAS capabilities. These 516 * features are hardware-specific and exposing 517 * them to a guest doesn't make a lot of sense. 518 * 519 * Intel uses this leaf only to advertise the 520 * "Invariant TSC" feature with all other bits 521 * being reserved (set to zero). 522 */ 523 regs[0] = 0; 524 regs[1] = 0; 525 regs[2] = 0; 526 527 /* 528 * If the host system possesses an invariant TSC, then 529 * it is safe to expose to the guest. 530 * 531 * If there is measured skew between host TSCs, it will 532 * be properly offset so guests do not observe any 533 * change between CPU migrations. 534 */ 535 regs[3] &= AMDPM_TSC_INVARIANT; 536 537 /* 538 * Since illumos avoids deep C-states on CPUs which do 539 * not support an invariant TSC, it may be safe (and 540 * desired) to unconditionally expose that capability to 541 * the guest. 542 */ 543 if (vmm_force_invariant_tsc != 0) { 544 regs[3] |= AMDPM_TSC_INVARIANT; 545 } 546 break; 547 548 case CPUID_8000_001D: 549 /* AMD Cache topology, like 0000_0004 for Intel. */ 550 if (!vmm_is_svm()) 551 goto default_leaf; 552 553 /* 554 * Similar to Intel, generate a fictitious cache 555 * topology for the guest with L3 shared by the 556 * package, and L1 and L2 local to a core. 557 */ 558 vm_get_topology(vm, &sockets, &cores, &threads, 559 &maxcpus); 560 switch (param) { 561 case 0: 562 logical_cpus = threads; 563 level = 1; 564 func = 1; /* data cache */ 565 break; 566 case 1: 567 logical_cpus = threads; 568 level = 2; 569 func = 3; /* unified cache */ 570 break; 571 case 2: 572 logical_cpus = threads * cores; 573 level = 3; 574 func = 3; /* unified cache */ 575 break; 576 default: 577 logical_cpus = 0; 578 level = 0; 579 func = 0; 580 break; 581 } 582 583 if (level == 0) { 584 regs[0] = 0; 585 regs[1] = 0; 586 } else { 587 logical_cpus = MIN(0xfff, logical_cpus - 1); 588 regs[0] = (logical_cpus << 14) | (1 << 8) | 589 (level << 5) | func; 590 regs[1] = func > 0 ? CACHE_LINE_SIZE - 1 : 0; 591 } 592 regs[2] = 0; 593 regs[3] = 0; 594 break; 595 596 case CPUID_8000_001E: 597 /* 598 * AMD Family 16h+ and Hygon Family 18h additional 599 * identifiers. 600 */ 601 if (!vmm_is_svm() || CPUID_TO_FAMILY(cpu_id) < 0x16) 602 goto default_leaf; 603 604 vm_get_topology(vm, &sockets, &cores, &threads, 605 &maxcpus); 606 regs[0] = vcpu_id; 607 threads = MIN(0xFF, threads - 1); 608 regs[1] = (threads << 8) | 609 (vcpu_id >> log2(threads + 1)); 610 /* 611 * XXX Bhyve topology cannot yet represent >1 node per 612 * processor. 613 */ 614 regs[2] = 0; 615 regs[3] = 0; 616 break; 617 618 case CPUID_0000_0001: 619 do_cpuid(1, regs); 620 621 error = vm_get_x2apic_state(vm, vcpu_id, &x2apic_state); 622 VERIFY0(error); 623 624 /* 625 * Override the APIC ID only in ebx 626 */ 627 regs[1] &= ~(CPUID_LOCAL_APIC_ID); 628 regs[1] |= (vcpu_id << CPUID_0000_0001_APICID_SHIFT); 629 630 /* 631 * Don't expose VMX, SpeedStep, TME or SMX capability. 632 * Advertise x2APIC capability and Hypervisor guest. 633 */ 634 regs[2] &= ~(CPUID2_VMX | CPUID2_EST | CPUID2_TM2); 635 regs[2] &= ~(CPUID2_SMX); 636 637 regs[2] |= CPUID2_HV; 638 639 if (x2apic_state != X2APIC_DISABLED) 640 regs[2] |= CPUID2_X2APIC; 641 else 642 regs[2] &= ~CPUID2_X2APIC; 643 644 /* 645 * Only advertise CPUID2_XSAVE in the guest if 646 * the host is using XSAVE. 647 */ 648 if (!(regs[2] & CPUID2_OSXSAVE)) 649 regs[2] &= ~CPUID2_XSAVE; 650 651 /* 652 * If CPUID2_XSAVE is being advertised and the 653 * guest has set CR4_XSAVE, set 654 * CPUID2_OSXSAVE. 655 */ 656 regs[2] &= ~CPUID2_OSXSAVE; 657 if (regs[2] & CPUID2_XSAVE) { 658 error = vm_get_register(vm, vcpu_id, 659 VM_REG_GUEST_CR4, &cr4); 660 VERIFY0(error); 661 if (cr4 & CR4_XSAVE) 662 regs[2] |= CPUID2_OSXSAVE; 663 } 664 665 /* 666 * Hide monitor/mwait until we know how to deal with 667 * these instructions. 668 */ 669 regs[2] &= ~CPUID2_MON; 670 671 /* 672 * Hide the performance and debug features. 673 */ 674 regs[2] &= ~CPUID2_PDCM; 675 676 /* 677 * No TSC deadline support in the APIC yet 678 */ 679 regs[2] &= ~CPUID2_TSCDLT; 680 681 /* 682 * Hide thermal monitoring 683 */ 684 regs[3] &= ~(CPUID_ACPI | CPUID_TM); 685 686 /* 687 * Hide the debug store capability. 688 */ 689 regs[3] &= ~CPUID_DS; 690 691 /* 692 * Advertise the Machine Check and MTRR capability. 693 * 694 * Some guest OSes (e.g. Windows) will not boot if 695 * these features are absent. 696 */ 697 regs[3] |= (CPUID_MCA | CPUID_MCE | CPUID_MTRR); 698 699 vm_get_topology(vm, &sockets, &cores, &threads, 700 &maxcpus); 701 logical_cpus = threads * cores; 702 regs[1] &= ~CPUID_HTT_CORES; 703 regs[1] |= (logical_cpus & 0xff) << 16; 704 regs[3] |= CPUID_HTT; 705 break; 706 707 case CPUID_0000_0004: 708 cpuid_count(func, param, regs); 709 710 if (regs[0] || regs[1] || regs[2] || regs[3]) { 711 vm_get_topology(vm, &sockets, &cores, &threads, 712 &maxcpus); 713 regs[0] &= 0x3ff; 714 regs[0] |= (cores - 1) << 26; 715 /* 716 * Cache topology: 717 * - L1 and L2 are shared only by the logical 718 * processors in a single core. 719 * - L3 and above are shared by all logical 720 * processors in the package. 721 */ 722 logical_cpus = threads; 723 level = (regs[0] >> 5) & 0x7; 724 if (level >= 3) 725 logical_cpus *= cores; 726 regs[0] |= (logical_cpus - 1) << 14; 727 } 728 break; 729 730 case CPUID_0000_0007: 731 regs[0] = 0; 732 regs[1] = 0; 733 regs[2] = 0; 734 regs[3] = 0; 735 736 /* leaf 0 */ 737 if (param == 0) { 738 cpuid_count(func, param, regs); 739 740 /* Only leaf 0 is supported */ 741 regs[0] = 0; 742 743 /* 744 * Expose known-safe features. 745 */ 746 regs[1] &= CPUID_STDEXT_FSGSBASE | 747 CPUID_STDEXT_BMI1 | CPUID_STDEXT_HLE | 748 CPUID_STDEXT_AVX2 | CPUID_STDEXT_SMEP | 749 CPUID_STDEXT_BMI2 | 750 CPUID_STDEXT_ERMS | CPUID_STDEXT_RTM | 751 CPUID_STDEXT_AVX512F | 752 CPUID_STDEXT_AVX512DQ | 753 CPUID_STDEXT_RDSEED | 754 CPUID_STDEXT_SMAP | 755 CPUID_STDEXT_AVX512PF | 756 CPUID_STDEXT_AVX512ER | 757 CPUID_STDEXT_AVX512CD | CPUID_STDEXT_SHA | 758 CPUID_STDEXT_AVX512BW | 759 CPUID_STDEXT_AVX512VL; 760 regs[2] &= CPUID_STDEXT2_VAES | 761 CPUID_STDEXT2_VPCLMULQDQ; 762 regs[3] &= CPUID_STDEXT3_MD_CLEAR; 763 764 /* Advertise INVPCID if it is enabled. */ 765 error = vm_get_capability(vm, vcpu_id, 766 VM_CAP_ENABLE_INVPCID, &enable_invpcid); 767 if (error == 0 && enable_invpcid) 768 regs[1] |= CPUID_STDEXT_INVPCID; 769 } 770 break; 771 772 case CPUID_0000_0006: 773 regs[0] = CPUTPM1_ARAT; 774 regs[1] = 0; 775 regs[2] = 0; 776 regs[3] = 0; 777 break; 778 779 case CPUID_0000_000A: 780 /* 781 * Handle the access, but report 0 for 782 * all options 783 */ 784 regs[0] = 0; 785 regs[1] = 0; 786 regs[2] = 0; 787 regs[3] = 0; 788 break; 789 790 case CPUID_0000_000B: 791 /* 792 * Intel processor topology enumeration 793 */ 794 if (vmm_is_intel()) { 795 vm_get_topology(vm, &sockets, &cores, &threads, 796 &maxcpus); 797 if (param == 0) { 798 logical_cpus = threads; 799 width = log2(logical_cpus); 800 level = CPUID_TYPE_SMT; 801 x2apic_id = vcpu_id; 802 } 803 804 if (param == 1) { 805 logical_cpus = threads * cores; 806 width = log2(logical_cpus); 807 level = CPUID_TYPE_CORE; 808 x2apic_id = vcpu_id; 809 } 810 811 if (param >= 2) { 812 width = 0; 813 logical_cpus = 0; 814 level = 0; 815 x2apic_id = 0; 816 } 817 818 regs[0] = width & 0x1f; 819 regs[1] = logical_cpus & 0xffff; 820 regs[2] = (level << 8) | (param & 0xff); 821 regs[3] = x2apic_id; 822 } else { 823 regs[0] = 0; 824 regs[1] = 0; 825 regs[2] = 0; 826 regs[3] = 0; 827 } 828 break; 829 830 case CPUID_0000_000D: 831 limits = vmm_get_xsave_limits(); 832 if (!limits->xsave_enabled) { 833 regs[0] = 0; 834 regs[1] = 0; 835 regs[2] = 0; 836 regs[3] = 0; 837 break; 838 } 839 840 cpuid_count(func, param, regs); 841 switch (param) { 842 case 0: 843 /* 844 * Only permit the guest to use bits 845 * that are active in the host in 846 * %xcr0. Also, claim that the 847 * maximum save area size is 848 * equivalent to the host's current 849 * save area size. Since this runs 850 * "inside" of vmrun(), it runs with 851 * the guest's xcr0, so the current 852 * save area size is correct as-is. 853 */ 854 regs[0] &= limits->xcr0_allowed; 855 regs[2] = limits->xsave_max_size; 856 regs[3] &= (limits->xcr0_allowed >> 32); 857 break; 858 case 1: 859 /* Only permit XSAVEOPT. */ 860 regs[0] &= CPUID_EXTSTATE_XSAVEOPT; 861 regs[1] = 0; 862 regs[2] = 0; 863 regs[3] = 0; 864 break; 865 default: 866 /* 867 * If the leaf is for a permitted feature, 868 * pass through as-is, otherwise return 869 * all zeroes. 870 */ 871 if (!(limits->xcr0_allowed & (1ul << param))) { 872 regs[0] = 0; 873 regs[1] = 0; 874 regs[2] = 0; 875 regs[3] = 0; 876 } 877 break; 878 } 879 break; 880 881 case CPUID_0000_000F: 882 case CPUID_0000_0010: 883 /* 884 * Do not report any Resource Director Technology 885 * capabilities. Exposing control of cache or memory 886 * controller resource partitioning to the guest is not 887 * at all sensible. 888 * 889 * This is already hidden at a high level by masking of 890 * leaf 0x7. Even still, a guest may look here for 891 * detailed capability information. 892 */ 893 regs[0] = 0; 894 regs[1] = 0; 895 regs[2] = 0; 896 regs[3] = 0; 897 break; 898 899 case CPUID_0000_0015: 900 /* 901 * Don't report CPU TSC/Crystal ratio and clock 902 * values since guests may use these to derive the 903 * local APIC frequency.. 904 */ 905 regs[0] = 0; 906 regs[1] = 0; 907 regs[2] = 0; 908 regs[3] = 0; 909 break; 910 911 case 0x40000000: 912 regs[0] = CPUID_VM_HIGH; 913 bcopy(bhyve_id, ®s[1], 4); 914 bcopy(bhyve_id + 4, ®s[2], 4); 915 bcopy(bhyve_id + 8, ®s[3], 4); 916 break; 917 918 default: 919 default_leaf: 920 /* 921 * The leaf value has already been clamped so 922 * simply pass this through. 923 */ 924 cpuid_count(func, param, regs); 925 break; 926 } 927 928 *eax = regs[0]; 929 *ebx = regs[1]; 930 *ecx = regs[2]; 931 *edx = regs[3]; 932 } 933