Lines Matching +full:0 +full:x40000000 +full:- +full:0 +full:x4fffffff
1 // SPDX-License-Identifier: GPL-2.0-only
3 * Kernel-based Virtual Machine driver for Linux
55 cpuid_count(0xD, i, &xs->eax, &xs->ebx, &xs->ecx, &ign);
61 int feature_bit = 0;
66 if (xstate_bv & 0x1) {
72 offset = (xs->ecx & 0x2) ? ALIGN(ret, 64) : ret;
74 offset = xs->ebx;
75 ret = max(ret, offset + xs->eax);
86 * Magic value used by KVM when querying userspace-provided CPUID entries and
92 #define KVM_CPUID_INDEX_NOT_SIGNIFICANT -1ull
101 * KVM has a semi-arbitrary rule that querying the guest's CPUID model
105 * path, e.g. the core VM-Enter/VM-Exit run loop. Nothing will break
112 for (i = 0; i < vcpu->arch.cpuid_nent; i++) {
113 e = &vcpu->arch.cpuid_entries[i];
115 if (e->function != function)
123 if (!(e->flags & KVM_CPUID_FLAG_SIGNIFCANT_INDEX) || e->index == index)
171 * The existing code assumes virtual address is 48-bit or 57-bit in the
174 best = kvm_find_cpuid_entry(vcpu, 0x80000008);
176 int vaddr_bits = (best->eax & 0xff00) >> 8;
178 if (vaddr_bits != 48 && vaddr_bits != 57 && vaddr_bits != 0)
179 return -EINVAL;
186 best = kvm_find_cpuid_entry_index(vcpu, 0xd, 0);
188 return 0;
190 xfeatures = best->eax | ((u64)best->edx << 32);
193 return 0;
195 return fpu_enable_guest_xfd_features(&vcpu->arch.guest_fpu, xfeatures);
209 * false positives due mismatches on KVM-owned feature flags.
216 if (nent != vcpu->arch.cpuid_nent)
217 return -EINVAL;
219 for (i = 0; i < nent; i++) {
220 orig = &vcpu->arch.cpuid_entries[i];
221 if (e2[i].function != orig->function ||
222 e2[i].index != orig->index ||
223 e2[i].flags != orig->flags ||
224 e2[i].eax != orig->eax || e2[i].ebx != orig->ebx ||
225 e2[i].ecx != orig->ecx || e2[i].edx != orig->edx)
226 return -EINVAL;
229 return 0;
245 signature[0] = entry->ebx;
246 signature[1] = entry->ecx;
247 signature[2] = entry->edx;
251 cpuid.limit = entry->eax;
267 return 0;
271 return 0;
273 if (kvm_hlt_in_guest(vcpu->kvm))
274 best->eax &= ~(1 << KVM_FEATURE_PV_UNHALT);
276 return best->eax;
287 best = kvm_find_cpuid_entry_index(vcpu, 0xd, 0);
289 return 0;
291 return (best->eax | ((u64)best->edx << 32)) & kvm_caps.supported_xcr0;
313 vcpu->arch.apic_base & MSR_IA32_APICBASE_ENABLE);
315 if (!kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_MISC_ENABLE_NO_MWAIT))
317 vcpu->arch.ia32_misc_enable_msr &
321 best = kvm_find_cpuid_entry_index(vcpu, 7, 0);
327 best = kvm_find_cpuid_entry_index(vcpu, 0xD, 0);
329 best->ebx = xstate_required_size(vcpu->arch.xcr0, false);
331 best = kvm_find_cpuid_entry_index(vcpu, 0xD, 1);
334 best->ebx = xstate_required_size(vcpu->arch.xcr0, true);
344 return entry && entry->eax == HYPERV_CPUID_SIGNATURE_EAX;
354 entry = kvm_find_cpuid_entry(vcpu, 0);
358 return is_guest_vendor_amd(entry->ebx, entry->ecx, entry->edx) ||
359 is_guest_vendor_hygon(entry->ebx, entry->ecx, entry->edx);
365 * compile-time validation of the input.
371 return entry->eax;
373 return entry->ebx;
375 return entry->ecx;
377 return entry->edx;
380 return 0;
389 struct kvm_lapic *apic = vcpu->arch.apic;
395 memset(vcpu->arch.cpu_caps, 0, sizeof(vcpu->arch.cpu_caps));
403 for (i = 0; i < NR_KVM_CPU_CAPS; i++) {
421 vcpu->arch.cpu_caps[i] = kvm_cpu_caps[i] |
423 vcpu->arch.cpu_caps[i] &= cpuid_get_reg_unsafe(entry, cpuid.reg);
431 * i.e. won't treat them as reserved, and KVM doesn't redo the GVA->GPA
433 * _can't_ solve the problem because GVA->GPA walks aren't visible to
446 apic->lapic_timer.timer_mode_mask = 3 << 17;
448 apic->lapic_timer.timer_mode_mask = 1 << 17;
453 vcpu->arch.guest_supported_xcr0 = cpuid_get_supported_xcr0(vcpu);
455 vcpu->arch.pv_cpuid.features = kvm_apply_cpuid_pv_features_quirk(vcpu);
457 vcpu->arch.is_amd_compatible = guest_cpuid_is_amd_or_hygon(vcpu);
458 vcpu->arch.maxphyaddr = cpuid_query_maxphyaddr(vcpu);
459 vcpu->arch.reserved_gpa_bits = kvm_vcpu_reserved_gpa_bits_raw(vcpu);
464 vcpu->arch.cr4_guest_rsvd_bits = __cr4_reserved_bits(__kvm_cpu_cap_has, UNUSED_) |
484 best = kvm_find_cpuid_entry(vcpu, 0x80000000);
485 if (!best || best->eax < 0x80000008)
487 best = kvm_find_cpuid_entry(vcpu, 0x80000008);
489 return best->eax & 0xff;
521 swap(vcpu->arch.cpuid_entries, e2);
522 swap(vcpu->arch.cpuid_nent, nent);
524 memcpy(vcpu_caps, vcpu->arch.cpu_caps, sizeof(vcpu_caps));
525 BUILD_BUG_ON(sizeof(vcpu_caps) != sizeof(vcpu->arch.cpu_caps));
558 vcpu->arch.xen.cpuid = kvm_get_hypervisor_cpuid(vcpu, XEN_SIGNATURE);
564 return 0;
567 memcpy(vcpu->arch.cpu_caps, vcpu_caps, sizeof(vcpu_caps));
568 swap(vcpu->arch.cpuid_entries, e2);
569 swap(vcpu->arch.cpuid_nent, nent);
582 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
583 return -E2BIG;
585 if (cpuid->nent) {
586 e = vmemdup_array_user(entries, cpuid->nent, sizeof(*e));
590 e2 = kvmalloc_array(cpuid->nent, sizeof(*e2), GFP_KERNEL_ACCOUNT);
592 r = -ENOMEM;
596 for (i = 0; i < cpuid->nent; i++) {
602 e2[i].index = 0;
603 e2[i].flags = 0;
604 e2[i].padding[0] = 0;
605 e2[i].padding[1] = 0;
606 e2[i].padding[2] = 0;
609 r = kvm_set_cpuid(vcpu, e2, cpuid->nent);
626 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
627 return -E2BIG;
629 if (cpuid->nent) {
630 e2 = vmemdup_array_user(entries, cpuid->nent, sizeof(*e2));
635 r = kvm_set_cpuid(vcpu, e2, cpuid->nent);
646 if (cpuid->nent < vcpu->arch.cpuid_nent)
647 return -E2BIG;
649 if (copy_to_user(entries, vcpu->arch.cpuid_entries,
650 vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
651 return -EFAULT;
653 cpuid->nent = vcpu->arch.cpuid_nent;
654 return 0;
663 * KVM only supports features defined by Intel (0x0), AMD (0x80000000),
664 * and Centaur (0xc0000000). WARN if a feature for new vendor base is
667 base = cpuid.function & 0xffff0000;
668 if (WARN_ON_ONCE(base && base != 0x80000000 && base != 0xc0000000))
669 return 0;
672 return 0;
681 * For kernel-defined leafs, mask KVM's supported feature set with the kernel's
682 * capabilities as well as raw CPUID. For KVM-defined leafs, consult only raw
690 u32 kvm_cpu_cap_passthrough = 0; \
691 u32 kvm_cpu_cap_synthesized = 0; \
692 u32 kvm_cpu_cap_emulated = 0; \
693 u32 kvm_cpu_cap_features = 0; \
706 } while (0)
710 * word that's being initialized. Exempt 0x8000_0001.EDX usage of 0x1.EDX
711 * features, as AMD duplicated many 0x1.EDX features into 0x8000_0001.EDX.
718 } while (0)
726 /* Scattered Flag - For features that are scattered by cpufeatures.h. */
735 /* Features that KVM supports only on 64-bit kernels. */
744 * Emulated Feature - For features that KVM emulates in software irrespective
754 * Synthesized Feature - For features that are synthesized into boot_cpu_data,
765 * Passthrough Feature - For features that KVM supports based purely on raw
777 * Aliased Features - For features in 0x8000_0001.EDX that are duplicates of
778 * identical 0x1.EDX features, and thus are aliased from 0x1 to 0x8000_0001.
788 * Vendor Features - For features that KVM supports, but are added in later
797 * Runtime Features - For features that KVM dynamically sets/clears at runtime,
811 /* DS is defined by ptrace-abi.h on 32-bit builds. */
816 memset(kvm_cpu_caps, 0, sizeof(kvm_cpu_caps));
818 BUILD_BUG_ON(sizeof(kvm_cpu_caps) - (NKVMCAPINTS * sizeof(*kvm_cpu_caps)) >
832 /* DS-CPL */
837 /* CNXT-ID */
1164 * Synthesize "LFENCE is serializing" into the AMD-defined entry
1166 * supported by the kernel. LFENCE_RDTSC was a Linux-defined
1241 if (array->nent >= array->maxnent)
1244 return &array->entries[array->nent++];
1255 memset(entry, 0, sizeof(*entry));
1256 entry->function = function;
1257 entry->index = index;
1258 switch (function & 0xC0000000) {
1259 case 0x40000000:
1263 case 0x80000000:
1265 * 0x80000021 is sometimes synthesized by __do_cpuid_func, which
1266 * would result in out-of-bounds calls to do_host_cpuid.
1271 WRITE_ONCE(max_cpuid_80000000, cpuid_eax(0x80000000));
1281 cpuid_count(entry->function, entry->index,
1282 &entry->eax, &entry->ebx, &entry->ecx, &entry->edx);
1285 entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
1293 memset(entry, 0, sizeof(*entry));
1295 entry->function = func;
1296 entry->index = 0;
1297 entry->flags = 0;
1300 case 0:
1301 entry->eax = 7;
1304 entry->ecx = feature_bit(MOVBE);
1311 * them natively requires enabling a per-VM capability.
1314 entry->ecx |= feature_bit(MWAIT);
1317 entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
1318 entry->eax = 0;
1320 entry->ecx = feature_bit(RDPID);
1323 return 0;
1329 if (array->nent >= array->maxnent)
1330 return -E2BIG;
1332 array->nent += cpuid_func_emulated(&array->entries[array->nent], func, false);
1333 return 0;
1344 r = -E2BIG;
1346 entry = do_host_cpuid(array, function, 0);
1351 case 0:
1353 entry->eax = min(entry->eax, 0x24U);
1362 * CPUID(function=2, index=0) may return different results each
1363 * time, with the least-significant byte in EAX enumerating the
1364 * number of times software should do CPUID(2, 0).
1367 * idiotic. Intel's SDM states that EAX & 0xff "will always
1373 * a stateful CPUID.0x2 is encountered.
1375 WARN_ON_ONCE((entry->eax & 0xff) > 1);
1377 /* functions 4 and 0x8000001d have additional index. */
1379 case 0x8000001d:
1384 for (i = 1; entry->eax & 0x1f; ++i) {
1391 entry->eax = 0x4; /* allow ARAT */
1392 entry->ebx = 0;
1393 entry->ecx = 0;
1394 entry->edx = 0;
1398 max_idx = entry->eax = min(entry->eax, 2u);
1403 /* KVM only supports up to 0x7.2, capped above via min(). */
1411 entry->ebx = 0;
1412 entry->ecx = 0;
1420 entry->ecx = 0;
1421 entry->ebx = 0;
1422 entry->eax = 0;
1425 case 0xa: { /* Architectural Performance Monitoring */
1430 entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
1443 edx.split.reserved1 = 0;
1444 edx.split.reserved2 = 0;
1446 entry->eax = eax.full;
1447 entry->ebx = kvm_pmu_cap.events_mask;
1448 entry->ecx = 0;
1449 entry->edx = edx.full;
1452 case 0x1f:
1453 case 0xb:
1458 entry->eax = entry->ebx = entry->ecx = 0;
1460 case 0xd: {
1464 entry->eax &= permitted_xcr0;
1465 entry->ebx = xstate_required_size(permitted_xcr0, false);
1466 entry->ecx = entry->ebx;
1467 entry->edx &= permitted_xcr0 >> 32;
1476 if (entry->eax & (feature_bit(XSAVES) | feature_bit(XSAVEC)))
1477 entry->ebx = xstate_required_size(permitted_xcr0 | permitted_xss,
1480 WARN_ON_ONCE(permitted_xss != 0);
1481 entry->ebx = 0;
1483 entry->ecx &= permitted_xss;
1484 entry->edx &= permitted_xss >> 32;
1501 * invalid sub-leafs. Only valid sub-leafs should
1502 * reach this point, and they should have a non-zero
1505 * on whether this is an XCR0- or IA32_XSS-managed area.
1507 if (WARN_ON_ONCE(!entry->eax || (entry->ecx & 0x1) != s_state)) {
1508 --array->nent;
1513 entry->ecx &= ~BIT_ULL(2);
1514 entry->edx = 0;
1518 case 0x12:
1521 entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
1526 * Index 0: Sub-features, MISCSELECT (a.k.a extended features)
1527 * and max enclave sizes. The SGX sub-features and MISCSELECT
1532 entry->ebx &= SGX_MISC_EXINFO;
1541 * privileged attributes that require explicit opt-in from
1545 entry->eax &= SGX_ATTR_PRIV_MASK | SGX_ATTR_UNPRIV_MASK;
1546 entry->ebx &= 0;
1549 case 0x14:
1551 entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
1555 for (i = 1, max_idx = entry->eax; i <= max_idx; ++i) {
1561 case 0x1d:
1563 entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
1567 for (i = 1, max_idx = entry->eax; i <= max_idx; ++i) {
1572 case 0x1e: /* TMUL information */
1574 entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
1578 case 0x24: {
1582 entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
1587 * The AVX10 version is encoded in EBX[7:0]. Note, the version
1591 avx10_version = min_t(u8, entry->ebx & 0xff, 1);
1593 entry->ebx |= avx10_version;
1595 entry->eax = 0;
1596 entry->ecx = 0;
1597 entry->edx = 0;
1602 entry->eax = KVM_CPUID_FEATURES;
1603 entry->ebx = sigptr[0];
1604 entry->ecx = sigptr[1];
1605 entry->edx = sigptr[2];
1609 entry->eax = (1 << KVM_FEATURE_CLOCKSOURCE) |
1624 entry->eax |= (1 << KVM_FEATURE_STEAL_TIME);
1626 entry->ebx = 0;
1627 entry->ecx = 0;
1628 entry->edx = 0;
1630 case 0x80000000:
1631 entry->eax = min(entry->eax, 0x80000022);
1637 * However, only do it if the host has CPUID leaf 0x8000001d.
1639 * CPUID leaf if KVM reports that it supports 0x8000001d or
1642 * 0x8000001d. Even worse, this can result in an infinite
1645 if (entry->eax >= 0x8000001d &&
1648 entry->eax = max(entry->eax, 0x80000021);
1650 case 0x80000001:
1651 entry->ebx &= ~GENMASK(27, 16);
1655 case 0x80000005:
1658 case 0x80000006:
1660 entry->edx &= ~GENMASK(17, 16);
1662 case 0x80000007: /* Advanced power management */
1666 entry->edx &= boot_cpu_data.x86_power;
1667 entry->eax = entry->ebx = entry->ecx = 0;
1669 case 0x80000008: {
1682 * support 5-level TDP.
1684 unsigned int virt_as = max((entry->eax >> 8) & 0xff, 48U);
1696 * that it's capped at 48 bits if 5-level TDP isn't supported
1702 g_phys_as = 0;
1704 phys_as = entry->eax & 0xff;
1710 entry->eax = phys_as | (virt_as << 8) | (g_phys_as << 16);
1711 entry->ecx &= ~(GENMASK(31, 16) | GENMASK(11, 8));
1712 entry->edx = 0;
1716 case 0x8000000A:
1718 entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
1721 entry->eax = 1; /* SVM revision 1 */
1722 entry->ebx = 8; /* Lets support 8 ASIDs in case we add proper
1724 entry->ecx = 0; /* Reserved */
1727 case 0x80000019:
1728 entry->ecx = entry->edx = 0;
1730 case 0x8000001a:
1731 entry->eax &= GENMASK(2, 0);
1732 entry->ebx = entry->ecx = entry->edx = 0;
1734 case 0x8000001e:
1736 entry->eax = entry->ebx = entry->ecx = 0;
1737 entry->edx = 0; /* reserved */
1739 case 0x8000001F:
1741 entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
1745 entry->ebx &= ~GENMASK(31, 12);
1747 * Enumerate '0' for "PA bits reduction", the adjusted
1748 * MAXPHYADDR is enumerated directly (see 0x80000008).
1750 entry->ebx &= ~GENMASK(11, 6);
1753 case 0x80000020:
1754 entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
1756 case 0x80000021:
1757 entry->ebx = entry->ecx = entry->edx = 0;
1761 case 0x80000022: {
1764 entry->ecx = entry->edx = 0;
1766 entry->eax = entry->ebx;
1779 entry->ebx = ebx.full;
1783 case 0xC0000000:
1784 /*Just support up to 0xC0000004 now*/
1785 entry->eax = min(entry->eax, 0xC0000004);
1787 case 0xC0000001:
1792 case 0xC0000002:
1793 case 0xC0000003:
1794 case 0xC0000004:
1796 entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
1800 r = 0;
1817 #define CENTAUR_CPUID_SIGNATURE 0xC0000000
1827 return 0;
1833 limit = array->entries[array->nent - 1].eax;
1853 * We want to make sure that ->padding is being passed clean from
1860 for (i = 0; i < num_entries; i++) {
1864 if (pad[0] || pad[1] || pad[2])
1875 0, 0x80000000, CENTAUR_CPUID_SIGNATURE, KVM_CPUID_SIGNATURE,
1879 .nent = 0,
1883 if (cpuid->nent < 1)
1884 return -E2BIG;
1885 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
1886 cpuid->nent = KVM_MAX_CPUID_ENTRIES;
1888 if (sanity_check_entries(entries, cpuid->nent, type))
1889 return -EINVAL;
1891 array.entries = kvcalloc(cpuid->nent, sizeof(struct kvm_cpuid_entry2), GFP_KERNEL);
1893 return -ENOMEM;
1895 array.maxnent = cpuid->nent;
1897 for (i = 0; i < ARRAY_SIZE(funcs); i++) {
1902 cpuid->nent = array.nent;
1906 r = -EFAULT;
1914 * Intel CPUID semantics treats any query for an out-of-range leaf as if the
1915 * highest basic leaf (i.e. CPUID.0H:EAX) were requested. AMD CPUID semantics
1919 * A leaf is considered out-of-range if its function is higher than the maximum
1924 * ranges described as "<base> - <top>[,<base2> - <top2>] inclusive. A primary
1928 * - Basic: 0x00000000 - 0x3fffffff, 0x50000000 - 0x7fffffff
1929 * - Hypervisor: 0x40000000 - 0x4fffffff
1930 * - Extended: 0x80000000 - 0xbfffffff
1931 * - Centaur: 0xc0000000 - 0xcfffffff
1933 * The Hypervisor class is further subdivided into sub-classes that each act as
1934 * their own independent class associated with a 0x100 byte range. E.g. if Qemu
1936 * CPUID sub-classes are:
1938 * - HyperV: 0x40000000 - 0x400000ff
1939 * - KVM: 0x40000100 - 0x400001ff
1947 basic = kvm_find_cpuid_entry(vcpu, 0);
1951 if (is_guest_vendor_amd(basic->ebx, basic->ecx, basic->edx) ||
1952 is_guest_vendor_hygon(basic->ebx, basic->ecx, basic->edx))
1955 if (function >= 0x40000000 && function <= 0x4fffffff)
1956 class = kvm_find_cpuid_entry(vcpu, function & 0xffffff00);
1957 else if (function >= 0xc0000000)
1958 class = kvm_find_cpuid_entry(vcpu, 0xc0000000);
1960 class = kvm_find_cpuid_entry(vcpu, function & 0x80000000);
1962 if (class && function <= class->eax)
1967 * max basic entry, e.g. if the max basic leaf is 0xb but there is no
1968 * entry for CPUID.0xb.index (see below), then the output value for EDX
1969 * needs to be pulled from CPUID.0xb.1.
1971 *fn_ptr = basic->eax;
1978 return kvm_find_cpuid_entry_index(vcpu, basic->eax, index);
1997 *eax = entry->eax;
1998 *ebx = entry->ebx;
1999 *ecx = entry->ecx;
2000 *edx = entry->edx;
2001 if (function == 7 && index == 0) {
2006 } else if (function == 0x80000007) {
2011 *eax = *ebx = *ecx = *edx = 0;
2013 * When leaf 0BH or 1FH is defined, CL is pass-through
2019 if (function == 0xb || function == 0x1f) {
2022 *ecx = index & 0xff;
2023 *edx = entry->edx;
2037 if (cpuid_fault_enabled(vcpu) && !kvm_require_cpl(vcpu, 0))