1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Kernel-based Virtual Machine driver for Linux 4 * cpuid support routines 5 * 6 * derived from arch/x86/kvm/x86.c 7 * 8 * Copyright 2011 Red Hat, Inc. and/or its affiliates. 9 * Copyright IBM Corporation, 2008 10 */ 11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 12 13 #include <linux/kvm_host.h> 14 #include "linux/lockdep.h" 15 #include <linux/export.h> 16 #include <linux/vmalloc.h> 17 #include <linux/uaccess.h> 18 #include <linux/sched/stat.h> 19 20 #include <asm/processor.h> 21 #include <asm/user.h> 22 #include <asm/fpu/xstate.h> 23 #include <asm/sgx.h> 24 #include <asm/cpuid.h> 25 #include "cpuid.h" 26 #include "lapic.h" 27 #include "mmu.h" 28 #include "trace.h" 29 #include "pmu.h" 30 #include "xen.h" 31 32 /* 33 * Unlike "struct cpuinfo_x86.x86_capability", kvm_cpu_caps doesn't need to be 34 * aligned to sizeof(unsigned long) because it's not accessed via bitops. 35 */ 36 u32 kvm_cpu_caps[NR_KVM_CPU_CAPS] __read_mostly; 37 EXPORT_SYMBOL_GPL(kvm_cpu_caps); 38 39 u32 xstate_required_size(u64 xstate_bv, bool compacted) 40 { 41 int feature_bit = 0; 42 u32 ret = XSAVE_HDR_SIZE + XSAVE_HDR_OFFSET; 43 44 xstate_bv &= XFEATURE_MASK_EXTEND; 45 while (xstate_bv) { 46 if (xstate_bv & 0x1) { 47 u32 eax, ebx, ecx, edx, offset; 48 cpuid_count(0xD, feature_bit, &eax, &ebx, &ecx, &edx); 49 /* ECX[1]: 64B alignment in compacted form */ 50 if (compacted) 51 offset = (ecx & 0x2) ? ALIGN(ret, 64) : ret; 52 else 53 offset = ebx; 54 ret = max(ret, offset + eax); 55 } 56 57 xstate_bv >>= 1; 58 feature_bit++; 59 } 60 61 return ret; 62 } 63 64 #define F feature_bit 65 66 /* Scattered Flag - For features that are scattered by cpufeatures.h. */ 67 #define SF(name) \ 68 ({ \ 69 BUILD_BUG_ON(X86_FEATURE_##name >= MAX_CPU_FEATURES); \ 70 (boot_cpu_has(X86_FEATURE_##name) ? F(name) : 0); \ 71 }) 72 73 /* 74 * Magic value used by KVM when querying userspace-provided CPUID entries and 75 * doesn't care about the CPIUD index because the index of the function in 76 * question is not significant. Note, this magic value must have at least one 77 * bit set in bits[63:32] and must be consumed as a u64 by cpuid_entry2_find() 78 * to avoid false positives when processing guest CPUID input. 79 */ 80 #define KVM_CPUID_INDEX_NOT_SIGNIFICANT -1ull 81 82 static inline struct kvm_cpuid_entry2 *cpuid_entry2_find( 83 struct kvm_cpuid_entry2 *entries, int nent, u32 function, u64 index) 84 { 85 struct kvm_cpuid_entry2 *e; 86 int i; 87 88 /* 89 * KVM has a semi-arbitrary rule that querying the guest's CPUID model 90 * with IRQs disabled is disallowed. The CPUID model can legitimately 91 * have over one hundred entries, i.e. the lookup is slow, and IRQs are 92 * typically disabled in KVM only when KVM is in a performance critical 93 * path, e.g. the core VM-Enter/VM-Exit run loop. Nothing will break 94 * if this rule is violated, this assertion is purely to flag potential 95 * performance issues. If this fires, consider moving the lookup out 96 * of the hotpath, e.g. by caching information during CPUID updates. 97 */ 98 lockdep_assert_irqs_enabled(); 99 100 for (i = 0; i < nent; i++) { 101 e = &entries[i]; 102 103 if (e->function != function) 104 continue; 105 106 /* 107 * If the index isn't significant, use the first entry with a 108 * matching function. It's userspace's responsibility to not 109 * provide "duplicate" entries in all cases. 110 */ 111 if (!(e->flags & KVM_CPUID_FLAG_SIGNIFCANT_INDEX) || e->index == index) 112 return e; 113 114 115 /* 116 * Similarly, use the first matching entry if KVM is doing a 117 * lookup (as opposed to emulating CPUID) for a function that's 118 * architecturally defined as not having a significant index. 119 */ 120 if (index == KVM_CPUID_INDEX_NOT_SIGNIFICANT) { 121 /* 122 * Direct lookups from KVM should not diverge from what 123 * KVM defines internally (the architectural behavior). 124 */ 125 WARN_ON_ONCE(cpuid_function_is_indexed(function)); 126 return e; 127 } 128 } 129 130 return NULL; 131 } 132 133 static int kvm_check_cpuid(struct kvm_vcpu *vcpu, 134 struct kvm_cpuid_entry2 *entries, 135 int nent) 136 { 137 struct kvm_cpuid_entry2 *best; 138 u64 xfeatures; 139 140 /* 141 * The existing code assumes virtual address is 48-bit or 57-bit in the 142 * canonical address checks; exit if it is ever changed. 143 */ 144 best = cpuid_entry2_find(entries, nent, 0x80000008, 145 KVM_CPUID_INDEX_NOT_SIGNIFICANT); 146 if (best) { 147 int vaddr_bits = (best->eax & 0xff00) >> 8; 148 149 if (vaddr_bits != 48 && vaddr_bits != 57 && vaddr_bits != 0) 150 return -EINVAL; 151 } 152 153 /* 154 * Exposing dynamic xfeatures to the guest requires additional 155 * enabling in the FPU, e.g. to expand the guest XSAVE state size. 156 */ 157 best = cpuid_entry2_find(entries, nent, 0xd, 0); 158 if (!best) 159 return 0; 160 161 xfeatures = best->eax | ((u64)best->edx << 32); 162 xfeatures &= XFEATURE_MASK_USER_DYNAMIC; 163 if (!xfeatures) 164 return 0; 165 166 return fpu_enable_guest_xfd_features(&vcpu->arch.guest_fpu, xfeatures); 167 } 168 169 /* Check whether the supplied CPUID data is equal to what is already set for the vCPU. */ 170 static int kvm_cpuid_check_equal(struct kvm_vcpu *vcpu, struct kvm_cpuid_entry2 *e2, 171 int nent) 172 { 173 struct kvm_cpuid_entry2 *orig; 174 int i; 175 176 if (nent != vcpu->arch.cpuid_nent) 177 return -EINVAL; 178 179 for (i = 0; i < nent; i++) { 180 orig = &vcpu->arch.cpuid_entries[i]; 181 if (e2[i].function != orig->function || 182 e2[i].index != orig->index || 183 e2[i].flags != orig->flags || 184 e2[i].eax != orig->eax || e2[i].ebx != orig->ebx || 185 e2[i].ecx != orig->ecx || e2[i].edx != orig->edx) 186 return -EINVAL; 187 } 188 189 return 0; 190 } 191 192 static struct kvm_hypervisor_cpuid __kvm_get_hypervisor_cpuid(struct kvm_cpuid_entry2 *entries, 193 int nent, const char *sig) 194 { 195 struct kvm_hypervisor_cpuid cpuid = {}; 196 struct kvm_cpuid_entry2 *entry; 197 u32 base; 198 199 for_each_possible_hypervisor_cpuid_base(base) { 200 entry = cpuid_entry2_find(entries, nent, base, KVM_CPUID_INDEX_NOT_SIGNIFICANT); 201 202 if (entry) { 203 u32 signature[3]; 204 205 signature[0] = entry->ebx; 206 signature[1] = entry->ecx; 207 signature[2] = entry->edx; 208 209 if (!memcmp(signature, sig, sizeof(signature))) { 210 cpuid.base = base; 211 cpuid.limit = entry->eax; 212 break; 213 } 214 } 215 } 216 217 return cpuid; 218 } 219 220 static struct kvm_hypervisor_cpuid kvm_get_hypervisor_cpuid(struct kvm_vcpu *vcpu, 221 const char *sig) 222 { 223 return __kvm_get_hypervisor_cpuid(vcpu->arch.cpuid_entries, 224 vcpu->arch.cpuid_nent, sig); 225 } 226 227 static struct kvm_cpuid_entry2 *__kvm_find_kvm_cpuid_features(struct kvm_cpuid_entry2 *entries, 228 int nent, u32 kvm_cpuid_base) 229 { 230 return cpuid_entry2_find(entries, nent, kvm_cpuid_base | KVM_CPUID_FEATURES, 231 KVM_CPUID_INDEX_NOT_SIGNIFICANT); 232 } 233 234 static struct kvm_cpuid_entry2 *kvm_find_kvm_cpuid_features(struct kvm_vcpu *vcpu) 235 { 236 u32 base = vcpu->arch.kvm_cpuid.base; 237 238 if (!base) 239 return NULL; 240 241 return __kvm_find_kvm_cpuid_features(vcpu->arch.cpuid_entries, 242 vcpu->arch.cpuid_nent, base); 243 } 244 245 void kvm_update_pv_runtime(struct kvm_vcpu *vcpu) 246 { 247 struct kvm_cpuid_entry2 *best = kvm_find_kvm_cpuid_features(vcpu); 248 249 /* 250 * save the feature bitmap to avoid cpuid lookup for every PV 251 * operation 252 */ 253 if (best) 254 vcpu->arch.pv_cpuid.features = best->eax; 255 } 256 257 /* 258 * Calculate guest's supported XCR0 taking into account guest CPUID data and 259 * KVM's supported XCR0 (comprised of host's XCR0 and KVM_SUPPORTED_XCR0). 260 */ 261 static u64 cpuid_get_supported_xcr0(struct kvm_cpuid_entry2 *entries, int nent) 262 { 263 struct kvm_cpuid_entry2 *best; 264 265 best = cpuid_entry2_find(entries, nent, 0xd, 0); 266 if (!best) 267 return 0; 268 269 return (best->eax | ((u64)best->edx << 32)) & kvm_caps.supported_xcr0; 270 } 271 272 static void __kvm_update_cpuid_runtime(struct kvm_vcpu *vcpu, struct kvm_cpuid_entry2 *entries, 273 int nent) 274 { 275 struct kvm_cpuid_entry2 *best; 276 struct kvm_hypervisor_cpuid kvm_cpuid; 277 278 best = cpuid_entry2_find(entries, nent, 1, KVM_CPUID_INDEX_NOT_SIGNIFICANT); 279 if (best) { 280 /* Update OSXSAVE bit */ 281 if (boot_cpu_has(X86_FEATURE_XSAVE)) 282 cpuid_entry_change(best, X86_FEATURE_OSXSAVE, 283 kvm_is_cr4_bit_set(vcpu, X86_CR4_OSXSAVE)); 284 285 cpuid_entry_change(best, X86_FEATURE_APIC, 286 vcpu->arch.apic_base & MSR_IA32_APICBASE_ENABLE); 287 } 288 289 best = cpuid_entry2_find(entries, nent, 7, 0); 290 if (best && boot_cpu_has(X86_FEATURE_PKU) && best->function == 0x7) 291 cpuid_entry_change(best, X86_FEATURE_OSPKE, 292 kvm_is_cr4_bit_set(vcpu, X86_CR4_PKE)); 293 294 best = cpuid_entry2_find(entries, nent, 0xD, 0); 295 if (best) 296 best->ebx = xstate_required_size(vcpu->arch.xcr0, false); 297 298 best = cpuid_entry2_find(entries, nent, 0xD, 1); 299 if (best && (cpuid_entry_has(best, X86_FEATURE_XSAVES) || 300 cpuid_entry_has(best, X86_FEATURE_XSAVEC))) 301 best->ebx = xstate_required_size(vcpu->arch.xcr0, true); 302 303 kvm_cpuid = __kvm_get_hypervisor_cpuid(entries, nent, KVM_SIGNATURE); 304 if (kvm_cpuid.base) { 305 best = __kvm_find_kvm_cpuid_features(entries, nent, kvm_cpuid.base); 306 if (kvm_hlt_in_guest(vcpu->kvm) && best) 307 best->eax &= ~(1 << KVM_FEATURE_PV_UNHALT); 308 } 309 310 if (!kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_MISC_ENABLE_NO_MWAIT)) { 311 best = cpuid_entry2_find(entries, nent, 0x1, KVM_CPUID_INDEX_NOT_SIGNIFICANT); 312 if (best) 313 cpuid_entry_change(best, X86_FEATURE_MWAIT, 314 vcpu->arch.ia32_misc_enable_msr & 315 MSR_IA32_MISC_ENABLE_MWAIT); 316 } 317 } 318 319 void kvm_update_cpuid_runtime(struct kvm_vcpu *vcpu) 320 { 321 __kvm_update_cpuid_runtime(vcpu, vcpu->arch.cpuid_entries, vcpu->arch.cpuid_nent); 322 } 323 EXPORT_SYMBOL_GPL(kvm_update_cpuid_runtime); 324 325 static bool kvm_cpuid_has_hyperv(struct kvm_cpuid_entry2 *entries, int nent) 326 { 327 #ifdef CONFIG_KVM_HYPERV 328 struct kvm_cpuid_entry2 *entry; 329 330 entry = cpuid_entry2_find(entries, nent, HYPERV_CPUID_INTERFACE, 331 KVM_CPUID_INDEX_NOT_SIGNIFICANT); 332 return entry && entry->eax == HYPERV_CPUID_SIGNATURE_EAX; 333 #else 334 return false; 335 #endif 336 } 337 338 static bool guest_cpuid_is_amd_or_hygon(struct kvm_vcpu *vcpu) 339 { 340 struct kvm_cpuid_entry2 *entry; 341 342 entry = kvm_find_cpuid_entry(vcpu, 0); 343 if (!entry) 344 return false; 345 346 return is_guest_vendor_amd(entry->ebx, entry->ecx, entry->edx) || 347 is_guest_vendor_hygon(entry->ebx, entry->ecx, entry->edx); 348 } 349 350 static void kvm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu) 351 { 352 struct kvm_lapic *apic = vcpu->arch.apic; 353 struct kvm_cpuid_entry2 *best; 354 bool allow_gbpages; 355 356 BUILD_BUG_ON(KVM_NR_GOVERNED_FEATURES > KVM_MAX_NR_GOVERNED_FEATURES); 357 bitmap_zero(vcpu->arch.governed_features.enabled, 358 KVM_MAX_NR_GOVERNED_FEATURES); 359 360 /* 361 * If TDP is enabled, let the guest use GBPAGES if they're supported in 362 * hardware. The hardware page walker doesn't let KVM disable GBPAGES, 363 * i.e. won't treat them as reserved, and KVM doesn't redo the GVA->GPA 364 * walk for performance and complexity reasons. Not to mention KVM 365 * _can't_ solve the problem because GVA->GPA walks aren't visible to 366 * KVM once a TDP translation is installed. Mimic hardware behavior so 367 * that KVM's is at least consistent, i.e. doesn't randomly inject #PF. 368 * If TDP is disabled, honor *only* guest CPUID as KVM has full control 369 * and can install smaller shadow pages if the host lacks 1GiB support. 370 */ 371 allow_gbpages = tdp_enabled ? boot_cpu_has(X86_FEATURE_GBPAGES) : 372 guest_cpuid_has(vcpu, X86_FEATURE_GBPAGES); 373 if (allow_gbpages) 374 kvm_governed_feature_set(vcpu, X86_FEATURE_GBPAGES); 375 376 best = kvm_find_cpuid_entry(vcpu, 1); 377 if (best && apic) { 378 if (cpuid_entry_has(best, X86_FEATURE_TSC_DEADLINE_TIMER)) 379 apic->lapic_timer.timer_mode_mask = 3 << 17; 380 else 381 apic->lapic_timer.timer_mode_mask = 1 << 17; 382 383 kvm_apic_set_version(vcpu); 384 } 385 386 vcpu->arch.guest_supported_xcr0 = 387 cpuid_get_supported_xcr0(vcpu->arch.cpuid_entries, vcpu->arch.cpuid_nent); 388 389 kvm_update_pv_runtime(vcpu); 390 391 vcpu->arch.is_amd_compatible = guest_cpuid_is_amd_or_hygon(vcpu); 392 vcpu->arch.maxphyaddr = cpuid_query_maxphyaddr(vcpu); 393 vcpu->arch.reserved_gpa_bits = kvm_vcpu_reserved_gpa_bits_raw(vcpu); 394 395 kvm_pmu_refresh(vcpu); 396 vcpu->arch.cr4_guest_rsvd_bits = 397 __cr4_reserved_bits(guest_cpuid_has, vcpu); 398 399 kvm_hv_set_cpuid(vcpu, kvm_cpuid_has_hyperv(vcpu->arch.cpuid_entries, 400 vcpu->arch.cpuid_nent)); 401 402 /* Invoke the vendor callback only after the above state is updated. */ 403 kvm_x86_call(vcpu_after_set_cpuid)(vcpu); 404 405 /* 406 * Except for the MMU, which needs to do its thing any vendor specific 407 * adjustments to the reserved GPA bits. 408 */ 409 kvm_mmu_after_set_cpuid(vcpu); 410 } 411 412 int cpuid_query_maxphyaddr(struct kvm_vcpu *vcpu) 413 { 414 struct kvm_cpuid_entry2 *best; 415 416 best = kvm_find_cpuid_entry(vcpu, 0x80000000); 417 if (!best || best->eax < 0x80000008) 418 goto not_found; 419 best = kvm_find_cpuid_entry(vcpu, 0x80000008); 420 if (best) 421 return best->eax & 0xff; 422 not_found: 423 return 36; 424 } 425 426 /* 427 * This "raw" version returns the reserved GPA bits without any adjustments for 428 * encryption technologies that usurp bits. The raw mask should be used if and 429 * only if hardware does _not_ strip the usurped bits, e.g. in virtual MTRRs. 430 */ 431 u64 kvm_vcpu_reserved_gpa_bits_raw(struct kvm_vcpu *vcpu) 432 { 433 return rsvd_bits(cpuid_maxphyaddr(vcpu), 63); 434 } 435 436 static int kvm_set_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid_entry2 *e2, 437 int nent) 438 { 439 int r; 440 441 __kvm_update_cpuid_runtime(vcpu, e2, nent); 442 443 /* 444 * KVM does not correctly handle changing guest CPUID after KVM_RUN, as 445 * MAXPHYADDR, GBPAGES support, AMD reserved bit behavior, etc.. aren't 446 * tracked in kvm_mmu_page_role. As a result, KVM may miss guest page 447 * faults due to reusing SPs/SPTEs. In practice no sane VMM mucks with 448 * the core vCPU model on the fly. It would've been better to forbid any 449 * KVM_SET_CPUID{,2} calls after KVM_RUN altogether but unfortunately 450 * some VMMs (e.g. QEMU) reuse vCPU fds for CPU hotplug/unplug and do 451 * KVM_SET_CPUID{,2} again. To support this legacy behavior, check 452 * whether the supplied CPUID data is equal to what's already set. 453 */ 454 if (kvm_vcpu_has_run(vcpu)) { 455 r = kvm_cpuid_check_equal(vcpu, e2, nent); 456 if (r) 457 return r; 458 459 kvfree(e2); 460 return 0; 461 } 462 463 #ifdef CONFIG_KVM_HYPERV 464 if (kvm_cpuid_has_hyperv(e2, nent)) { 465 r = kvm_hv_vcpu_init(vcpu); 466 if (r) 467 return r; 468 } 469 #endif 470 471 r = kvm_check_cpuid(vcpu, e2, nent); 472 if (r) 473 return r; 474 475 kvfree(vcpu->arch.cpuid_entries); 476 vcpu->arch.cpuid_entries = e2; 477 vcpu->arch.cpuid_nent = nent; 478 479 vcpu->arch.kvm_cpuid = kvm_get_hypervisor_cpuid(vcpu, KVM_SIGNATURE); 480 #ifdef CONFIG_KVM_XEN 481 vcpu->arch.xen.cpuid = kvm_get_hypervisor_cpuid(vcpu, XEN_SIGNATURE); 482 #endif 483 kvm_vcpu_after_set_cpuid(vcpu); 484 485 return 0; 486 } 487 488 /* when an old userspace process fills a new kernel module */ 489 int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu, 490 struct kvm_cpuid *cpuid, 491 struct kvm_cpuid_entry __user *entries) 492 { 493 int r, i; 494 struct kvm_cpuid_entry *e = NULL; 495 struct kvm_cpuid_entry2 *e2 = NULL; 496 497 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES) 498 return -E2BIG; 499 500 if (cpuid->nent) { 501 e = vmemdup_array_user(entries, cpuid->nent, sizeof(*e)); 502 if (IS_ERR(e)) 503 return PTR_ERR(e); 504 505 e2 = kvmalloc_array(cpuid->nent, sizeof(*e2), GFP_KERNEL_ACCOUNT); 506 if (!e2) { 507 r = -ENOMEM; 508 goto out_free_cpuid; 509 } 510 } 511 for (i = 0; i < cpuid->nent; i++) { 512 e2[i].function = e[i].function; 513 e2[i].eax = e[i].eax; 514 e2[i].ebx = e[i].ebx; 515 e2[i].ecx = e[i].ecx; 516 e2[i].edx = e[i].edx; 517 e2[i].index = 0; 518 e2[i].flags = 0; 519 e2[i].padding[0] = 0; 520 e2[i].padding[1] = 0; 521 e2[i].padding[2] = 0; 522 } 523 524 r = kvm_set_cpuid(vcpu, e2, cpuid->nent); 525 if (r) 526 kvfree(e2); 527 528 out_free_cpuid: 529 kvfree(e); 530 531 return r; 532 } 533 534 int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu, 535 struct kvm_cpuid2 *cpuid, 536 struct kvm_cpuid_entry2 __user *entries) 537 { 538 struct kvm_cpuid_entry2 *e2 = NULL; 539 int r; 540 541 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES) 542 return -E2BIG; 543 544 if (cpuid->nent) { 545 e2 = vmemdup_array_user(entries, cpuid->nent, sizeof(*e2)); 546 if (IS_ERR(e2)) 547 return PTR_ERR(e2); 548 } 549 550 r = kvm_set_cpuid(vcpu, e2, cpuid->nent); 551 if (r) 552 kvfree(e2); 553 554 return r; 555 } 556 557 int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu, 558 struct kvm_cpuid2 *cpuid, 559 struct kvm_cpuid_entry2 __user *entries) 560 { 561 if (cpuid->nent < vcpu->arch.cpuid_nent) 562 return -E2BIG; 563 564 if (copy_to_user(entries, vcpu->arch.cpuid_entries, 565 vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2))) 566 return -EFAULT; 567 568 cpuid->nent = vcpu->arch.cpuid_nent; 569 return 0; 570 } 571 572 /* Mask kvm_cpu_caps for @leaf with the raw CPUID capabilities of this CPU. */ 573 static __always_inline void __kvm_cpu_cap_mask(unsigned int leaf) 574 { 575 const struct cpuid_reg cpuid = x86_feature_cpuid(leaf * 32); 576 struct kvm_cpuid_entry2 entry; 577 578 reverse_cpuid_check(leaf); 579 580 cpuid_count(cpuid.function, cpuid.index, 581 &entry.eax, &entry.ebx, &entry.ecx, &entry.edx); 582 583 kvm_cpu_caps[leaf] &= *__cpuid_entry_get_reg(&entry, cpuid.reg); 584 } 585 586 static __always_inline 587 void kvm_cpu_cap_init_kvm_defined(enum kvm_only_cpuid_leafs leaf, u32 mask) 588 { 589 /* Use kvm_cpu_cap_mask for leafs that aren't KVM-only. */ 590 BUILD_BUG_ON(leaf < NCAPINTS); 591 592 kvm_cpu_caps[leaf] = mask; 593 594 __kvm_cpu_cap_mask(leaf); 595 } 596 597 static __always_inline void kvm_cpu_cap_mask(enum cpuid_leafs leaf, u32 mask) 598 { 599 /* Use kvm_cpu_cap_init_kvm_defined for KVM-only leafs. */ 600 BUILD_BUG_ON(leaf >= NCAPINTS); 601 602 kvm_cpu_caps[leaf] &= mask; 603 604 __kvm_cpu_cap_mask(leaf); 605 } 606 607 void kvm_set_cpu_caps(void) 608 { 609 #ifdef CONFIG_X86_64 610 unsigned int f_gbpages = F(GBPAGES); 611 unsigned int f_lm = F(LM); 612 unsigned int f_xfd = F(XFD); 613 #else 614 unsigned int f_gbpages = 0; 615 unsigned int f_lm = 0; 616 unsigned int f_xfd = 0; 617 #endif 618 memset(kvm_cpu_caps, 0, sizeof(kvm_cpu_caps)); 619 620 BUILD_BUG_ON(sizeof(kvm_cpu_caps) - (NKVMCAPINTS * sizeof(*kvm_cpu_caps)) > 621 sizeof(boot_cpu_data.x86_capability)); 622 623 memcpy(&kvm_cpu_caps, &boot_cpu_data.x86_capability, 624 sizeof(kvm_cpu_caps) - (NKVMCAPINTS * sizeof(*kvm_cpu_caps))); 625 626 kvm_cpu_cap_mask(CPUID_1_ECX, 627 /* 628 * NOTE: MONITOR (and MWAIT) are emulated as NOP, but *not* 629 * advertised to guests via CPUID! 630 */ 631 F(XMM3) | F(PCLMULQDQ) | 0 /* DTES64, MONITOR */ | 632 0 /* DS-CPL, VMX, SMX, EST */ | 633 0 /* TM2 */ | F(SSSE3) | 0 /* CNXT-ID */ | 0 /* Reserved */ | 634 F(FMA) | F(CX16) | 0 /* xTPR Update */ | F(PDCM) | 635 F(PCID) | 0 /* Reserved, DCA */ | F(XMM4_1) | 636 F(XMM4_2) | F(X2APIC) | F(MOVBE) | F(POPCNT) | 637 0 /* Reserved*/ | F(AES) | F(XSAVE) | 0 /* OSXSAVE */ | F(AVX) | 638 F(F16C) | F(RDRAND) 639 ); 640 /* KVM emulates x2apic in software irrespective of host support. */ 641 kvm_cpu_cap_set(X86_FEATURE_X2APIC); 642 643 kvm_cpu_cap_mask(CPUID_1_EDX, 644 F(FPU) | F(VME) | F(DE) | F(PSE) | 645 F(TSC) | F(MSR) | F(PAE) | F(MCE) | 646 F(CX8) | F(APIC) | 0 /* Reserved */ | F(SEP) | 647 F(MTRR) | F(PGE) | F(MCA) | F(CMOV) | 648 F(PAT) | F(PSE36) | 0 /* PSN */ | F(CLFLUSH) | 649 0 /* Reserved, DS, ACPI */ | F(MMX) | 650 F(FXSR) | F(XMM) | F(XMM2) | F(SELFSNOOP) | 651 0 /* HTT, TM, Reserved, PBE */ 652 ); 653 654 kvm_cpu_cap_mask(CPUID_7_0_EBX, 655 F(FSGSBASE) | F(SGX) | F(BMI1) | F(HLE) | F(AVX2) | 656 F(FDP_EXCPTN_ONLY) | F(SMEP) | F(BMI2) | F(ERMS) | F(INVPCID) | 657 F(RTM) | F(ZERO_FCS_FDS) | 0 /*MPX*/ | F(AVX512F) | 658 F(AVX512DQ) | F(RDSEED) | F(ADX) | F(SMAP) | F(AVX512IFMA) | 659 F(CLFLUSHOPT) | F(CLWB) | 0 /*INTEL_PT*/ | F(AVX512PF) | 660 F(AVX512ER) | F(AVX512CD) | F(SHA_NI) | F(AVX512BW) | 661 F(AVX512VL)); 662 663 kvm_cpu_cap_mask(CPUID_7_ECX, 664 F(AVX512VBMI) | F(LA57) | F(PKU) | 0 /*OSPKE*/ | F(RDPID) | 665 F(AVX512_VPOPCNTDQ) | F(UMIP) | F(AVX512_VBMI2) | F(GFNI) | 666 F(VAES) | F(VPCLMULQDQ) | F(AVX512_VNNI) | F(AVX512_BITALG) | 667 F(CLDEMOTE) | F(MOVDIRI) | F(MOVDIR64B) | 0 /*WAITPKG*/ | 668 F(SGX_LC) | F(BUS_LOCK_DETECT) 669 ); 670 /* Set LA57 based on hardware capability. */ 671 if (cpuid_ecx(7) & F(LA57)) 672 kvm_cpu_cap_set(X86_FEATURE_LA57); 673 674 /* 675 * PKU not yet implemented for shadow paging and requires OSPKE 676 * to be set on the host. Clear it if that is not the case 677 */ 678 if (!tdp_enabled || !boot_cpu_has(X86_FEATURE_OSPKE)) 679 kvm_cpu_cap_clear(X86_FEATURE_PKU); 680 681 kvm_cpu_cap_mask(CPUID_7_EDX, 682 F(AVX512_4VNNIW) | F(AVX512_4FMAPS) | F(SPEC_CTRL) | 683 F(SPEC_CTRL_SSBD) | F(ARCH_CAPABILITIES) | F(INTEL_STIBP) | 684 F(MD_CLEAR) | F(AVX512_VP2INTERSECT) | F(FSRM) | 685 F(SERIALIZE) | F(TSXLDTRK) | F(AVX512_FP16) | 686 F(AMX_TILE) | F(AMX_INT8) | F(AMX_BF16) | F(FLUSH_L1D) 687 ); 688 689 /* TSC_ADJUST and ARCH_CAPABILITIES are emulated in software. */ 690 kvm_cpu_cap_set(X86_FEATURE_TSC_ADJUST); 691 kvm_cpu_cap_set(X86_FEATURE_ARCH_CAPABILITIES); 692 693 if (boot_cpu_has(X86_FEATURE_IBPB) && boot_cpu_has(X86_FEATURE_IBRS)) 694 kvm_cpu_cap_set(X86_FEATURE_SPEC_CTRL); 695 if (boot_cpu_has(X86_FEATURE_STIBP)) 696 kvm_cpu_cap_set(X86_FEATURE_INTEL_STIBP); 697 if (boot_cpu_has(X86_FEATURE_AMD_SSBD)) 698 kvm_cpu_cap_set(X86_FEATURE_SPEC_CTRL_SSBD); 699 700 kvm_cpu_cap_mask(CPUID_7_1_EAX, 701 F(AVX_VNNI) | F(AVX512_BF16) | F(CMPCCXADD) | 702 F(FZRM) | F(FSRS) | F(FSRC) | 703 F(AMX_FP16) | F(AVX_IFMA) | F(LAM) 704 ); 705 706 kvm_cpu_cap_init_kvm_defined(CPUID_7_1_EDX, 707 F(AVX_VNNI_INT8) | F(AVX_NE_CONVERT) | F(PREFETCHITI) | 708 F(AMX_COMPLEX) 709 ); 710 711 kvm_cpu_cap_init_kvm_defined(CPUID_7_2_EDX, 712 F(INTEL_PSFD) | F(IPRED_CTRL) | F(RRSBA_CTRL) | F(DDPD_U) | 713 F(BHI_CTRL) | F(MCDT_NO) 714 ); 715 716 kvm_cpu_cap_mask(CPUID_D_1_EAX, 717 F(XSAVEOPT) | F(XSAVEC) | F(XGETBV1) | F(XSAVES) | f_xfd 718 ); 719 720 kvm_cpu_cap_init_kvm_defined(CPUID_12_EAX, 721 SF(SGX1) | SF(SGX2) | SF(SGX_EDECCSSA) 722 ); 723 724 kvm_cpu_cap_mask(CPUID_8000_0001_ECX, 725 F(LAHF_LM) | F(CMP_LEGACY) | 0 /*SVM*/ | 0 /* ExtApicSpace */ | 726 F(CR8_LEGACY) | F(ABM) | F(SSE4A) | F(MISALIGNSSE) | 727 F(3DNOWPREFETCH) | F(OSVW) | 0 /* IBS */ | F(XOP) | 728 0 /* SKINIT, WDT, LWP */ | F(FMA4) | F(TBM) | 729 F(TOPOEXT) | 0 /* PERFCTR_CORE */ 730 ); 731 732 kvm_cpu_cap_mask(CPUID_8000_0001_EDX, 733 F(FPU) | F(VME) | F(DE) | F(PSE) | 734 F(TSC) | F(MSR) | F(PAE) | F(MCE) | 735 F(CX8) | F(APIC) | 0 /* Reserved */ | F(SYSCALL) | 736 F(MTRR) | F(PGE) | F(MCA) | F(CMOV) | 737 F(PAT) | F(PSE36) | 0 /* Reserved */ | 738 F(NX) | 0 /* Reserved */ | F(MMXEXT) | F(MMX) | 739 F(FXSR) | F(FXSR_OPT) | f_gbpages | F(RDTSCP) | 740 0 /* Reserved */ | f_lm | F(3DNOWEXT) | F(3DNOW) 741 ); 742 743 if (!tdp_enabled && IS_ENABLED(CONFIG_X86_64)) 744 kvm_cpu_cap_set(X86_FEATURE_GBPAGES); 745 746 kvm_cpu_cap_init_kvm_defined(CPUID_8000_0007_EDX, 747 SF(CONSTANT_TSC) 748 ); 749 750 kvm_cpu_cap_mask(CPUID_8000_0008_EBX, 751 F(CLZERO) | F(XSAVEERPTR) | 752 F(WBNOINVD) | F(AMD_IBPB) | F(AMD_IBRS) | F(AMD_SSBD) | F(VIRT_SSBD) | 753 F(AMD_SSB_NO) | F(AMD_STIBP) | F(AMD_STIBP_ALWAYS_ON) | 754 F(AMD_PSFD) 755 ); 756 757 /* 758 * AMD has separate bits for each SPEC_CTRL bit. 759 * arch/x86/kernel/cpu/bugs.c is kind enough to 760 * record that in cpufeatures so use them. 761 */ 762 if (boot_cpu_has(X86_FEATURE_IBPB)) 763 kvm_cpu_cap_set(X86_FEATURE_AMD_IBPB); 764 if (boot_cpu_has(X86_FEATURE_IBRS)) 765 kvm_cpu_cap_set(X86_FEATURE_AMD_IBRS); 766 if (boot_cpu_has(X86_FEATURE_STIBP)) 767 kvm_cpu_cap_set(X86_FEATURE_AMD_STIBP); 768 if (boot_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD)) 769 kvm_cpu_cap_set(X86_FEATURE_AMD_SSBD); 770 if (!boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS)) 771 kvm_cpu_cap_set(X86_FEATURE_AMD_SSB_NO); 772 /* 773 * The preference is to use SPEC CTRL MSR instead of the 774 * VIRT_SPEC MSR. 775 */ 776 if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD) && 777 !boot_cpu_has(X86_FEATURE_AMD_SSBD)) 778 kvm_cpu_cap_set(X86_FEATURE_VIRT_SSBD); 779 780 /* 781 * Hide all SVM features by default, SVM will set the cap bits for 782 * features it emulates and/or exposes for L1. 783 */ 784 kvm_cpu_cap_mask(CPUID_8000_000A_EDX, 0); 785 786 kvm_cpu_cap_mask(CPUID_8000_001F_EAX, 787 0 /* SME */ | 0 /* SEV */ | 0 /* VM_PAGE_FLUSH */ | 0 /* SEV_ES */ | 788 F(SME_COHERENT)); 789 790 kvm_cpu_cap_mask(CPUID_8000_0021_EAX, 791 F(NO_NESTED_DATA_BP) | F(LFENCE_RDTSC) | 0 /* SmmPgCfgLock */ | 792 F(NULL_SEL_CLR_BASE) | F(AUTOIBRS) | 0 /* PrefetchCtlMsr */ | 793 F(WRMSR_XX_BASE_NS) 794 ); 795 796 kvm_cpu_cap_check_and_set(X86_FEATURE_SBPB); 797 kvm_cpu_cap_check_and_set(X86_FEATURE_IBPB_BRTYPE); 798 kvm_cpu_cap_check_and_set(X86_FEATURE_SRSO_NO); 799 800 kvm_cpu_cap_init_kvm_defined(CPUID_8000_0022_EAX, 801 F(PERFMON_V2) 802 ); 803 804 /* 805 * Synthesize "LFENCE is serializing" into the AMD-defined entry in 806 * KVM's supported CPUID if the feature is reported as supported by the 807 * kernel. LFENCE_RDTSC was a Linux-defined synthetic feature long 808 * before AMD joined the bandwagon, e.g. LFENCE is serializing on most 809 * CPUs that support SSE2. On CPUs that don't support AMD's leaf, 810 * kvm_cpu_cap_mask() will unfortunately drop the flag due to ANDing 811 * the mask with the raw host CPUID, and reporting support in AMD's 812 * leaf can make it easier for userspace to detect the feature. 813 */ 814 if (cpu_feature_enabled(X86_FEATURE_LFENCE_RDTSC)) 815 kvm_cpu_cap_set(X86_FEATURE_LFENCE_RDTSC); 816 if (!static_cpu_has_bug(X86_BUG_NULL_SEG)) 817 kvm_cpu_cap_set(X86_FEATURE_NULL_SEL_CLR_BASE); 818 kvm_cpu_cap_set(X86_FEATURE_NO_SMM_CTL_MSR); 819 820 kvm_cpu_cap_mask(CPUID_C000_0001_EDX, 821 F(XSTORE) | F(XSTORE_EN) | F(XCRYPT) | F(XCRYPT_EN) | 822 F(ACE2) | F(ACE2_EN) | F(PHE) | F(PHE_EN) | 823 F(PMM) | F(PMM_EN) 824 ); 825 826 /* 827 * Hide RDTSCP and RDPID if either feature is reported as supported but 828 * probing MSR_TSC_AUX failed. This is purely a sanity check and 829 * should never happen, but the guest will likely crash if RDTSCP or 830 * RDPID is misreported, and KVM has botched MSR_TSC_AUX emulation in 831 * the past. For example, the sanity check may fire if this instance of 832 * KVM is running as L1 on top of an older, broken KVM. 833 */ 834 if (WARN_ON((kvm_cpu_cap_has(X86_FEATURE_RDTSCP) || 835 kvm_cpu_cap_has(X86_FEATURE_RDPID)) && 836 !kvm_is_supported_user_return_msr(MSR_TSC_AUX))) { 837 kvm_cpu_cap_clear(X86_FEATURE_RDTSCP); 838 kvm_cpu_cap_clear(X86_FEATURE_RDPID); 839 } 840 } 841 EXPORT_SYMBOL_GPL(kvm_set_cpu_caps); 842 843 struct kvm_cpuid_array { 844 struct kvm_cpuid_entry2 *entries; 845 int maxnent; 846 int nent; 847 }; 848 849 static struct kvm_cpuid_entry2 *get_next_cpuid(struct kvm_cpuid_array *array) 850 { 851 if (array->nent >= array->maxnent) 852 return NULL; 853 854 return &array->entries[array->nent++]; 855 } 856 857 static struct kvm_cpuid_entry2 *do_host_cpuid(struct kvm_cpuid_array *array, 858 u32 function, u32 index) 859 { 860 struct kvm_cpuid_entry2 *entry = get_next_cpuid(array); 861 862 if (!entry) 863 return NULL; 864 865 memset(entry, 0, sizeof(*entry)); 866 entry->function = function; 867 entry->index = index; 868 switch (function & 0xC0000000) { 869 case 0x40000000: 870 /* Hypervisor leaves are always synthesized by __do_cpuid_func. */ 871 return entry; 872 873 case 0x80000000: 874 /* 875 * 0x80000021 is sometimes synthesized by __do_cpuid_func, which 876 * would result in out-of-bounds calls to do_host_cpuid. 877 */ 878 { 879 static int max_cpuid_80000000; 880 if (!READ_ONCE(max_cpuid_80000000)) 881 WRITE_ONCE(max_cpuid_80000000, cpuid_eax(0x80000000)); 882 if (function > READ_ONCE(max_cpuid_80000000)) 883 return entry; 884 } 885 break; 886 887 default: 888 break; 889 } 890 891 cpuid_count(entry->function, entry->index, 892 &entry->eax, &entry->ebx, &entry->ecx, &entry->edx); 893 894 if (cpuid_function_is_indexed(function)) 895 entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX; 896 897 return entry; 898 } 899 900 static int __do_cpuid_func_emulated(struct kvm_cpuid_array *array, u32 func) 901 { 902 struct kvm_cpuid_entry2 *entry; 903 904 if (array->nent >= array->maxnent) 905 return -E2BIG; 906 907 entry = &array->entries[array->nent]; 908 entry->function = func; 909 entry->index = 0; 910 entry->flags = 0; 911 912 switch (func) { 913 case 0: 914 entry->eax = 7; 915 ++array->nent; 916 break; 917 case 1: 918 entry->ecx = F(MOVBE); 919 ++array->nent; 920 break; 921 case 7: 922 entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX; 923 entry->eax = 0; 924 if (kvm_cpu_cap_has(X86_FEATURE_RDTSCP)) 925 entry->ecx = F(RDPID); 926 ++array->nent; 927 break; 928 default: 929 break; 930 } 931 932 return 0; 933 } 934 935 static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function) 936 { 937 struct kvm_cpuid_entry2 *entry; 938 int r, i, max_idx; 939 940 /* all calls to cpuid_count() should be made on the same cpu */ 941 get_cpu(); 942 943 r = -E2BIG; 944 945 entry = do_host_cpuid(array, function, 0); 946 if (!entry) 947 goto out; 948 949 switch (function) { 950 case 0: 951 /* Limited to the highest leaf implemented in KVM. */ 952 entry->eax = min(entry->eax, 0x1fU); 953 break; 954 case 1: 955 cpuid_entry_override(entry, CPUID_1_EDX); 956 cpuid_entry_override(entry, CPUID_1_ECX); 957 break; 958 case 2: 959 /* 960 * On ancient CPUs, function 2 entries are STATEFUL. That is, 961 * CPUID(function=2, index=0) may return different results each 962 * time, with the least-significant byte in EAX enumerating the 963 * number of times software should do CPUID(2, 0). 964 * 965 * Modern CPUs, i.e. every CPU KVM has *ever* run on are less 966 * idiotic. Intel's SDM states that EAX & 0xff "will always 967 * return 01H. Software should ignore this value and not 968 * interpret it as an informational descriptor", while AMD's 969 * APM states that CPUID(2) is reserved. 970 * 971 * WARN if a frankenstein CPU that supports virtualization and 972 * a stateful CPUID.0x2 is encountered. 973 */ 974 WARN_ON_ONCE((entry->eax & 0xff) > 1); 975 break; 976 /* functions 4 and 0x8000001d have additional index. */ 977 case 4: 978 case 0x8000001d: 979 /* 980 * Read entries until the cache type in the previous entry is 981 * zero, i.e. indicates an invalid entry. 982 */ 983 for (i = 1; entry->eax & 0x1f; ++i) { 984 entry = do_host_cpuid(array, function, i); 985 if (!entry) 986 goto out; 987 } 988 break; 989 case 6: /* Thermal management */ 990 entry->eax = 0x4; /* allow ARAT */ 991 entry->ebx = 0; 992 entry->ecx = 0; 993 entry->edx = 0; 994 break; 995 /* function 7 has additional index. */ 996 case 7: 997 max_idx = entry->eax = min(entry->eax, 2u); 998 cpuid_entry_override(entry, CPUID_7_0_EBX); 999 cpuid_entry_override(entry, CPUID_7_ECX); 1000 cpuid_entry_override(entry, CPUID_7_EDX); 1001 1002 /* KVM only supports up to 0x7.2, capped above via min(). */ 1003 if (max_idx >= 1) { 1004 entry = do_host_cpuid(array, function, 1); 1005 if (!entry) 1006 goto out; 1007 1008 cpuid_entry_override(entry, CPUID_7_1_EAX); 1009 cpuid_entry_override(entry, CPUID_7_1_EDX); 1010 entry->ebx = 0; 1011 entry->ecx = 0; 1012 } 1013 if (max_idx >= 2) { 1014 entry = do_host_cpuid(array, function, 2); 1015 if (!entry) 1016 goto out; 1017 1018 cpuid_entry_override(entry, CPUID_7_2_EDX); 1019 entry->ecx = 0; 1020 entry->ebx = 0; 1021 entry->eax = 0; 1022 } 1023 break; 1024 case 0xa: { /* Architectural Performance Monitoring */ 1025 union cpuid10_eax eax; 1026 union cpuid10_edx edx; 1027 1028 if (!enable_pmu || !static_cpu_has(X86_FEATURE_ARCH_PERFMON)) { 1029 entry->eax = entry->ebx = entry->ecx = entry->edx = 0; 1030 break; 1031 } 1032 1033 eax.split.version_id = kvm_pmu_cap.version; 1034 eax.split.num_counters = kvm_pmu_cap.num_counters_gp; 1035 eax.split.bit_width = kvm_pmu_cap.bit_width_gp; 1036 eax.split.mask_length = kvm_pmu_cap.events_mask_len; 1037 edx.split.num_counters_fixed = kvm_pmu_cap.num_counters_fixed; 1038 edx.split.bit_width_fixed = kvm_pmu_cap.bit_width_fixed; 1039 1040 if (kvm_pmu_cap.version) 1041 edx.split.anythread_deprecated = 1; 1042 edx.split.reserved1 = 0; 1043 edx.split.reserved2 = 0; 1044 1045 entry->eax = eax.full; 1046 entry->ebx = kvm_pmu_cap.events_mask; 1047 entry->ecx = 0; 1048 entry->edx = edx.full; 1049 break; 1050 } 1051 case 0x1f: 1052 case 0xb: 1053 /* 1054 * No topology; a valid topology is indicated by the presence 1055 * of subleaf 1. 1056 */ 1057 entry->eax = entry->ebx = entry->ecx = 0; 1058 break; 1059 case 0xd: { 1060 u64 permitted_xcr0 = kvm_get_filtered_xcr0(); 1061 u64 permitted_xss = kvm_caps.supported_xss; 1062 1063 entry->eax &= permitted_xcr0; 1064 entry->ebx = xstate_required_size(permitted_xcr0, false); 1065 entry->ecx = entry->ebx; 1066 entry->edx &= permitted_xcr0 >> 32; 1067 if (!permitted_xcr0) 1068 break; 1069 1070 entry = do_host_cpuid(array, function, 1); 1071 if (!entry) 1072 goto out; 1073 1074 cpuid_entry_override(entry, CPUID_D_1_EAX); 1075 if (entry->eax & (F(XSAVES)|F(XSAVEC))) 1076 entry->ebx = xstate_required_size(permitted_xcr0 | permitted_xss, 1077 true); 1078 else { 1079 WARN_ON_ONCE(permitted_xss != 0); 1080 entry->ebx = 0; 1081 } 1082 entry->ecx &= permitted_xss; 1083 entry->edx &= permitted_xss >> 32; 1084 1085 for (i = 2; i < 64; ++i) { 1086 bool s_state; 1087 if (permitted_xcr0 & BIT_ULL(i)) 1088 s_state = false; 1089 else if (permitted_xss & BIT_ULL(i)) 1090 s_state = true; 1091 else 1092 continue; 1093 1094 entry = do_host_cpuid(array, function, i); 1095 if (!entry) 1096 goto out; 1097 1098 /* 1099 * The supported check above should have filtered out 1100 * invalid sub-leafs. Only valid sub-leafs should 1101 * reach this point, and they should have a non-zero 1102 * save state size. Furthermore, check whether the 1103 * processor agrees with permitted_xcr0/permitted_xss 1104 * on whether this is an XCR0- or IA32_XSS-managed area. 1105 */ 1106 if (WARN_ON_ONCE(!entry->eax || (entry->ecx & 0x1) != s_state)) { 1107 --array->nent; 1108 continue; 1109 } 1110 1111 if (!kvm_cpu_cap_has(X86_FEATURE_XFD)) 1112 entry->ecx &= ~BIT_ULL(2); 1113 entry->edx = 0; 1114 } 1115 break; 1116 } 1117 case 0x12: 1118 /* Intel SGX */ 1119 if (!kvm_cpu_cap_has(X86_FEATURE_SGX)) { 1120 entry->eax = entry->ebx = entry->ecx = entry->edx = 0; 1121 break; 1122 } 1123 1124 /* 1125 * Index 0: Sub-features, MISCSELECT (a.k.a extended features) 1126 * and max enclave sizes. The SGX sub-features and MISCSELECT 1127 * are restricted by kernel and KVM capabilities (like most 1128 * feature flags), while enclave size is unrestricted. 1129 */ 1130 cpuid_entry_override(entry, CPUID_12_EAX); 1131 entry->ebx &= SGX_MISC_EXINFO; 1132 1133 entry = do_host_cpuid(array, function, 1); 1134 if (!entry) 1135 goto out; 1136 1137 /* 1138 * Index 1: SECS.ATTRIBUTES. ATTRIBUTES are restricted a la 1139 * feature flags. Advertise all supported flags, including 1140 * privileged attributes that require explicit opt-in from 1141 * userspace. ATTRIBUTES.XFRM is not adjusted as userspace is 1142 * expected to derive it from supported XCR0. 1143 */ 1144 entry->eax &= SGX_ATTR_PRIV_MASK | SGX_ATTR_UNPRIV_MASK; 1145 entry->ebx &= 0; 1146 break; 1147 /* Intel PT */ 1148 case 0x14: 1149 if (!kvm_cpu_cap_has(X86_FEATURE_INTEL_PT)) { 1150 entry->eax = entry->ebx = entry->ecx = entry->edx = 0; 1151 break; 1152 } 1153 1154 for (i = 1, max_idx = entry->eax; i <= max_idx; ++i) { 1155 if (!do_host_cpuid(array, function, i)) 1156 goto out; 1157 } 1158 break; 1159 /* Intel AMX TILE */ 1160 case 0x1d: 1161 if (!kvm_cpu_cap_has(X86_FEATURE_AMX_TILE)) { 1162 entry->eax = entry->ebx = entry->ecx = entry->edx = 0; 1163 break; 1164 } 1165 1166 for (i = 1, max_idx = entry->eax; i <= max_idx; ++i) { 1167 if (!do_host_cpuid(array, function, i)) 1168 goto out; 1169 } 1170 break; 1171 case 0x1e: /* TMUL information */ 1172 if (!kvm_cpu_cap_has(X86_FEATURE_AMX_TILE)) { 1173 entry->eax = entry->ebx = entry->ecx = entry->edx = 0; 1174 break; 1175 } 1176 break; 1177 case KVM_CPUID_SIGNATURE: { 1178 const u32 *sigptr = (const u32 *)KVM_SIGNATURE; 1179 entry->eax = KVM_CPUID_FEATURES; 1180 entry->ebx = sigptr[0]; 1181 entry->ecx = sigptr[1]; 1182 entry->edx = sigptr[2]; 1183 break; 1184 } 1185 case KVM_CPUID_FEATURES: 1186 entry->eax = (1 << KVM_FEATURE_CLOCKSOURCE) | 1187 (1 << KVM_FEATURE_NOP_IO_DELAY) | 1188 (1 << KVM_FEATURE_CLOCKSOURCE2) | 1189 (1 << KVM_FEATURE_ASYNC_PF) | 1190 (1 << KVM_FEATURE_PV_EOI) | 1191 (1 << KVM_FEATURE_CLOCKSOURCE_STABLE_BIT) | 1192 (1 << KVM_FEATURE_PV_UNHALT) | 1193 (1 << KVM_FEATURE_PV_TLB_FLUSH) | 1194 (1 << KVM_FEATURE_ASYNC_PF_VMEXIT) | 1195 (1 << KVM_FEATURE_PV_SEND_IPI) | 1196 (1 << KVM_FEATURE_POLL_CONTROL) | 1197 (1 << KVM_FEATURE_PV_SCHED_YIELD) | 1198 (1 << KVM_FEATURE_ASYNC_PF_INT); 1199 1200 if (sched_info_on()) 1201 entry->eax |= (1 << KVM_FEATURE_STEAL_TIME); 1202 1203 entry->ebx = 0; 1204 entry->ecx = 0; 1205 entry->edx = 0; 1206 break; 1207 case 0x80000000: 1208 entry->eax = min(entry->eax, 0x80000022); 1209 /* 1210 * Serializing LFENCE is reported in a multitude of ways, and 1211 * NullSegClearsBase is not reported in CPUID on Zen2; help 1212 * userspace by providing the CPUID leaf ourselves. 1213 * 1214 * However, only do it if the host has CPUID leaf 0x8000001d. 1215 * QEMU thinks that it can query the host blindly for that 1216 * CPUID leaf if KVM reports that it supports 0x8000001d or 1217 * above. The processor merrily returns values from the 1218 * highest Intel leaf which QEMU tries to use as the guest's 1219 * 0x8000001d. Even worse, this can result in an infinite 1220 * loop if said highest leaf has no subleaves indexed by ECX. 1221 */ 1222 if (entry->eax >= 0x8000001d && 1223 (static_cpu_has(X86_FEATURE_LFENCE_RDTSC) 1224 || !static_cpu_has_bug(X86_BUG_NULL_SEG))) 1225 entry->eax = max(entry->eax, 0x80000021); 1226 break; 1227 case 0x80000001: 1228 entry->ebx &= ~GENMASK(27, 16); 1229 cpuid_entry_override(entry, CPUID_8000_0001_EDX); 1230 cpuid_entry_override(entry, CPUID_8000_0001_ECX); 1231 break; 1232 case 0x80000005: 1233 /* Pass host L1 cache and TLB info. */ 1234 break; 1235 case 0x80000006: 1236 /* Drop reserved bits, pass host L2 cache and TLB info. */ 1237 entry->edx &= ~GENMASK(17, 16); 1238 break; 1239 case 0x80000007: /* Advanced power management */ 1240 cpuid_entry_override(entry, CPUID_8000_0007_EDX); 1241 1242 /* mask against host */ 1243 entry->edx &= boot_cpu_data.x86_power; 1244 entry->eax = entry->ebx = entry->ecx = 0; 1245 break; 1246 case 0x80000008: { 1247 /* 1248 * GuestPhysAddrSize (EAX[23:16]) is intended for software 1249 * use. 1250 * 1251 * KVM's ABI is to report the effective MAXPHYADDR for the 1252 * guest in PhysAddrSize (phys_as), and the maximum 1253 * *addressable* GPA in GuestPhysAddrSize (g_phys_as). 1254 * 1255 * GuestPhysAddrSize is valid if and only if TDP is enabled, 1256 * in which case the max GPA that can be addressed by KVM may 1257 * be less than the max GPA that can be legally generated by 1258 * the guest, e.g. if MAXPHYADDR>48 but the CPU doesn't 1259 * support 5-level TDP. 1260 */ 1261 unsigned int virt_as = max((entry->eax >> 8) & 0xff, 48U); 1262 unsigned int phys_as, g_phys_as; 1263 1264 /* 1265 * If TDP (NPT) is disabled use the adjusted host MAXPHYADDR as 1266 * the guest operates in the same PA space as the host, i.e. 1267 * reductions in MAXPHYADDR for memory encryption affect shadow 1268 * paging, too. 1269 * 1270 * If TDP is enabled, use the raw bare metal MAXPHYADDR as 1271 * reductions to the HPAs do not affect GPAs. The max 1272 * addressable GPA is the same as the max effective GPA, except 1273 * that it's capped at 48 bits if 5-level TDP isn't supported 1274 * (hardware processes bits 51:48 only when walking the fifth 1275 * level page table). 1276 */ 1277 if (!tdp_enabled) { 1278 phys_as = boot_cpu_data.x86_phys_bits; 1279 g_phys_as = 0; 1280 } else { 1281 phys_as = entry->eax & 0xff; 1282 g_phys_as = phys_as; 1283 if (kvm_mmu_get_max_tdp_level() < 5) 1284 g_phys_as = min(g_phys_as, 48); 1285 } 1286 1287 entry->eax = phys_as | (virt_as << 8) | (g_phys_as << 16); 1288 entry->ecx &= ~(GENMASK(31, 16) | GENMASK(11, 8)); 1289 entry->edx = 0; 1290 cpuid_entry_override(entry, CPUID_8000_0008_EBX); 1291 break; 1292 } 1293 case 0x8000000A: 1294 if (!kvm_cpu_cap_has(X86_FEATURE_SVM)) { 1295 entry->eax = entry->ebx = entry->ecx = entry->edx = 0; 1296 break; 1297 } 1298 entry->eax = 1; /* SVM revision 1 */ 1299 entry->ebx = 8; /* Lets support 8 ASIDs in case we add proper 1300 ASID emulation to nested SVM */ 1301 entry->ecx = 0; /* Reserved */ 1302 cpuid_entry_override(entry, CPUID_8000_000A_EDX); 1303 break; 1304 case 0x80000019: 1305 entry->ecx = entry->edx = 0; 1306 break; 1307 case 0x8000001a: 1308 entry->eax &= GENMASK(2, 0); 1309 entry->ebx = entry->ecx = entry->edx = 0; 1310 break; 1311 case 0x8000001e: 1312 /* Do not return host topology information. */ 1313 entry->eax = entry->ebx = entry->ecx = 0; 1314 entry->edx = 0; /* reserved */ 1315 break; 1316 case 0x8000001F: 1317 if (!kvm_cpu_cap_has(X86_FEATURE_SEV)) { 1318 entry->eax = entry->ebx = entry->ecx = entry->edx = 0; 1319 } else { 1320 cpuid_entry_override(entry, CPUID_8000_001F_EAX); 1321 /* Clear NumVMPL since KVM does not support VMPL. */ 1322 entry->ebx &= ~GENMASK(31, 12); 1323 /* 1324 * Enumerate '0' for "PA bits reduction", the adjusted 1325 * MAXPHYADDR is enumerated directly (see 0x80000008). 1326 */ 1327 entry->ebx &= ~GENMASK(11, 6); 1328 } 1329 break; 1330 case 0x80000020: 1331 entry->eax = entry->ebx = entry->ecx = entry->edx = 0; 1332 break; 1333 case 0x80000021: 1334 entry->ebx = entry->ecx = entry->edx = 0; 1335 cpuid_entry_override(entry, CPUID_8000_0021_EAX); 1336 break; 1337 /* AMD Extended Performance Monitoring and Debug */ 1338 case 0x80000022: { 1339 union cpuid_0x80000022_ebx ebx; 1340 1341 entry->ecx = entry->edx = 0; 1342 if (!enable_pmu || !kvm_cpu_cap_has(X86_FEATURE_PERFMON_V2)) { 1343 entry->eax = entry->ebx; 1344 break; 1345 } 1346 1347 cpuid_entry_override(entry, CPUID_8000_0022_EAX); 1348 1349 if (kvm_cpu_cap_has(X86_FEATURE_PERFMON_V2)) 1350 ebx.split.num_core_pmc = kvm_pmu_cap.num_counters_gp; 1351 else if (kvm_cpu_cap_has(X86_FEATURE_PERFCTR_CORE)) 1352 ebx.split.num_core_pmc = AMD64_NUM_COUNTERS_CORE; 1353 else 1354 ebx.split.num_core_pmc = AMD64_NUM_COUNTERS; 1355 1356 entry->ebx = ebx.full; 1357 break; 1358 } 1359 /*Add support for Centaur's CPUID instruction*/ 1360 case 0xC0000000: 1361 /*Just support up to 0xC0000004 now*/ 1362 entry->eax = min(entry->eax, 0xC0000004); 1363 break; 1364 case 0xC0000001: 1365 cpuid_entry_override(entry, CPUID_C000_0001_EDX); 1366 break; 1367 case 3: /* Processor serial number */ 1368 case 5: /* MONITOR/MWAIT */ 1369 case 0xC0000002: 1370 case 0xC0000003: 1371 case 0xC0000004: 1372 default: 1373 entry->eax = entry->ebx = entry->ecx = entry->edx = 0; 1374 break; 1375 } 1376 1377 r = 0; 1378 1379 out: 1380 put_cpu(); 1381 1382 return r; 1383 } 1384 1385 static int do_cpuid_func(struct kvm_cpuid_array *array, u32 func, 1386 unsigned int type) 1387 { 1388 if (type == KVM_GET_EMULATED_CPUID) 1389 return __do_cpuid_func_emulated(array, func); 1390 1391 return __do_cpuid_func(array, func); 1392 } 1393 1394 #define CENTAUR_CPUID_SIGNATURE 0xC0000000 1395 1396 static int get_cpuid_func(struct kvm_cpuid_array *array, u32 func, 1397 unsigned int type) 1398 { 1399 u32 limit; 1400 int r; 1401 1402 if (func == CENTAUR_CPUID_SIGNATURE && 1403 boot_cpu_data.x86_vendor != X86_VENDOR_CENTAUR) 1404 return 0; 1405 1406 r = do_cpuid_func(array, func, type); 1407 if (r) 1408 return r; 1409 1410 limit = array->entries[array->nent - 1].eax; 1411 for (func = func + 1; func <= limit; ++func) { 1412 r = do_cpuid_func(array, func, type); 1413 if (r) 1414 break; 1415 } 1416 1417 return r; 1418 } 1419 1420 static bool sanity_check_entries(struct kvm_cpuid_entry2 __user *entries, 1421 __u32 num_entries, unsigned int ioctl_type) 1422 { 1423 int i; 1424 __u32 pad[3]; 1425 1426 if (ioctl_type != KVM_GET_EMULATED_CPUID) 1427 return false; 1428 1429 /* 1430 * We want to make sure that ->padding is being passed clean from 1431 * userspace in case we want to use it for something in the future. 1432 * 1433 * Sadly, this wasn't enforced for KVM_GET_SUPPORTED_CPUID and so we 1434 * have to give ourselves satisfied only with the emulated side. /me 1435 * sheds a tear. 1436 */ 1437 for (i = 0; i < num_entries; i++) { 1438 if (copy_from_user(pad, entries[i].padding, sizeof(pad))) 1439 return true; 1440 1441 if (pad[0] || pad[1] || pad[2]) 1442 return true; 1443 } 1444 return false; 1445 } 1446 1447 int kvm_dev_ioctl_get_cpuid(struct kvm_cpuid2 *cpuid, 1448 struct kvm_cpuid_entry2 __user *entries, 1449 unsigned int type) 1450 { 1451 static const u32 funcs[] = { 1452 0, 0x80000000, CENTAUR_CPUID_SIGNATURE, KVM_CPUID_SIGNATURE, 1453 }; 1454 1455 struct kvm_cpuid_array array = { 1456 .nent = 0, 1457 }; 1458 int r, i; 1459 1460 if (cpuid->nent < 1) 1461 return -E2BIG; 1462 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES) 1463 cpuid->nent = KVM_MAX_CPUID_ENTRIES; 1464 1465 if (sanity_check_entries(entries, cpuid->nent, type)) 1466 return -EINVAL; 1467 1468 array.entries = kvcalloc(cpuid->nent, sizeof(struct kvm_cpuid_entry2), GFP_KERNEL); 1469 if (!array.entries) 1470 return -ENOMEM; 1471 1472 array.maxnent = cpuid->nent; 1473 1474 for (i = 0; i < ARRAY_SIZE(funcs); i++) { 1475 r = get_cpuid_func(&array, funcs[i], type); 1476 if (r) 1477 goto out_free; 1478 } 1479 cpuid->nent = array.nent; 1480 1481 if (copy_to_user(entries, array.entries, 1482 array.nent * sizeof(struct kvm_cpuid_entry2))) 1483 r = -EFAULT; 1484 1485 out_free: 1486 kvfree(array.entries); 1487 return r; 1488 } 1489 1490 struct kvm_cpuid_entry2 *kvm_find_cpuid_entry_index(struct kvm_vcpu *vcpu, 1491 u32 function, u32 index) 1492 { 1493 return cpuid_entry2_find(vcpu->arch.cpuid_entries, vcpu->arch.cpuid_nent, 1494 function, index); 1495 } 1496 EXPORT_SYMBOL_GPL(kvm_find_cpuid_entry_index); 1497 1498 struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu, 1499 u32 function) 1500 { 1501 return cpuid_entry2_find(vcpu->arch.cpuid_entries, vcpu->arch.cpuid_nent, 1502 function, KVM_CPUID_INDEX_NOT_SIGNIFICANT); 1503 } 1504 EXPORT_SYMBOL_GPL(kvm_find_cpuid_entry); 1505 1506 /* 1507 * Intel CPUID semantics treats any query for an out-of-range leaf as if the 1508 * highest basic leaf (i.e. CPUID.0H:EAX) were requested. AMD CPUID semantics 1509 * returns all zeroes for any undefined leaf, whether or not the leaf is in 1510 * range. Centaur/VIA follows Intel semantics. 1511 * 1512 * A leaf is considered out-of-range if its function is higher than the maximum 1513 * supported leaf of its associated class or if its associated class does not 1514 * exist. 1515 * 1516 * There are three primary classes to be considered, with their respective 1517 * ranges described as "<base> - <top>[,<base2> - <top2>] inclusive. A primary 1518 * class exists if a guest CPUID entry for its <base> leaf exists. For a given 1519 * class, CPUID.<base>.EAX contains the max supported leaf for the class. 1520 * 1521 * - Basic: 0x00000000 - 0x3fffffff, 0x50000000 - 0x7fffffff 1522 * - Hypervisor: 0x40000000 - 0x4fffffff 1523 * - Extended: 0x80000000 - 0xbfffffff 1524 * - Centaur: 0xc0000000 - 0xcfffffff 1525 * 1526 * The Hypervisor class is further subdivided into sub-classes that each act as 1527 * their own independent class associated with a 0x100 byte range. E.g. if Qemu 1528 * is advertising support for both HyperV and KVM, the resulting Hypervisor 1529 * CPUID sub-classes are: 1530 * 1531 * - HyperV: 0x40000000 - 0x400000ff 1532 * - KVM: 0x40000100 - 0x400001ff 1533 */ 1534 static struct kvm_cpuid_entry2 * 1535 get_out_of_range_cpuid_entry(struct kvm_vcpu *vcpu, u32 *fn_ptr, u32 index) 1536 { 1537 struct kvm_cpuid_entry2 *basic, *class; 1538 u32 function = *fn_ptr; 1539 1540 basic = kvm_find_cpuid_entry(vcpu, 0); 1541 if (!basic) 1542 return NULL; 1543 1544 if (is_guest_vendor_amd(basic->ebx, basic->ecx, basic->edx) || 1545 is_guest_vendor_hygon(basic->ebx, basic->ecx, basic->edx)) 1546 return NULL; 1547 1548 if (function >= 0x40000000 && function <= 0x4fffffff) 1549 class = kvm_find_cpuid_entry(vcpu, function & 0xffffff00); 1550 else if (function >= 0xc0000000) 1551 class = kvm_find_cpuid_entry(vcpu, 0xc0000000); 1552 else 1553 class = kvm_find_cpuid_entry(vcpu, function & 0x80000000); 1554 1555 if (class && function <= class->eax) 1556 return NULL; 1557 1558 /* 1559 * Leaf specific adjustments are also applied when redirecting to the 1560 * max basic entry, e.g. if the max basic leaf is 0xb but there is no 1561 * entry for CPUID.0xb.index (see below), then the output value for EDX 1562 * needs to be pulled from CPUID.0xb.1. 1563 */ 1564 *fn_ptr = basic->eax; 1565 1566 /* 1567 * The class does not exist or the requested function is out of range; 1568 * the effective CPUID entry is the max basic leaf. Note, the index of 1569 * the original requested leaf is observed! 1570 */ 1571 return kvm_find_cpuid_entry_index(vcpu, basic->eax, index); 1572 } 1573 1574 bool kvm_cpuid(struct kvm_vcpu *vcpu, u32 *eax, u32 *ebx, 1575 u32 *ecx, u32 *edx, bool exact_only) 1576 { 1577 u32 orig_function = *eax, function = *eax, index = *ecx; 1578 struct kvm_cpuid_entry2 *entry; 1579 bool exact, used_max_basic = false; 1580 1581 entry = kvm_find_cpuid_entry_index(vcpu, function, index); 1582 exact = !!entry; 1583 1584 if (!entry && !exact_only) { 1585 entry = get_out_of_range_cpuid_entry(vcpu, &function, index); 1586 used_max_basic = !!entry; 1587 } 1588 1589 if (entry) { 1590 *eax = entry->eax; 1591 *ebx = entry->ebx; 1592 *ecx = entry->ecx; 1593 *edx = entry->edx; 1594 if (function == 7 && index == 0) { 1595 u64 data; 1596 if (!__kvm_get_msr(vcpu, MSR_IA32_TSX_CTRL, &data, true) && 1597 (data & TSX_CTRL_CPUID_CLEAR)) 1598 *ebx &= ~(F(RTM) | F(HLE)); 1599 } else if (function == 0x80000007) { 1600 if (kvm_hv_invtsc_suppressed(vcpu)) 1601 *edx &= ~SF(CONSTANT_TSC); 1602 } 1603 } else { 1604 *eax = *ebx = *ecx = *edx = 0; 1605 /* 1606 * When leaf 0BH or 1FH is defined, CL is pass-through 1607 * and EDX is always the x2APIC ID, even for undefined 1608 * subleaves. Index 1 will exist iff the leaf is 1609 * implemented, so we pass through CL iff leaf 1 1610 * exists. EDX can be copied from any existing index. 1611 */ 1612 if (function == 0xb || function == 0x1f) { 1613 entry = kvm_find_cpuid_entry_index(vcpu, function, 1); 1614 if (entry) { 1615 *ecx = index & 0xff; 1616 *edx = entry->edx; 1617 } 1618 } 1619 } 1620 trace_kvm_cpuid(orig_function, index, *eax, *ebx, *ecx, *edx, exact, 1621 used_max_basic); 1622 return exact; 1623 } 1624 EXPORT_SYMBOL_GPL(kvm_cpuid); 1625 1626 int kvm_emulate_cpuid(struct kvm_vcpu *vcpu) 1627 { 1628 u32 eax, ebx, ecx, edx; 1629 1630 if (cpuid_fault_enabled(vcpu) && !kvm_require_cpl(vcpu, 0)) 1631 return 1; 1632 1633 eax = kvm_rax_read(vcpu); 1634 ecx = kvm_rcx_read(vcpu); 1635 kvm_cpuid(vcpu, &eax, &ebx, &ecx, &edx, false); 1636 kvm_rax_write(vcpu, eax); 1637 kvm_rbx_write(vcpu, ebx); 1638 kvm_rcx_write(vcpu, ecx); 1639 kvm_rdx_write(vcpu, edx); 1640 return kvm_skip_emulated_instruction(vcpu); 1641 } 1642 EXPORT_SYMBOL_GPL(kvm_emulate_cpuid); 1643