Lines Matching +full:0 +full:x80000007
55 cpuid_count(0xD, i, &xs->eax, &xs->ebx, &xs->ecx, &ign); in kvm_init_xstate_sizes()
74 offset = (xs->ecx & 0x2) ? ALIGN(ret, 64) : ret; in xstate_required_size()
102 for (i = 0; i < nent; i++) { in kvm_find_cpuid_entry2()
145 best = kvm_find_cpuid_entry(vcpu, 0x80000008); in kvm_check_cpuid()
147 int vaddr_bits = (best->eax & 0xff00) >> 8; in kvm_check_cpuid()
149 if (vaddr_bits != 48 && vaddr_bits != 57 && vaddr_bits != 0) in kvm_check_cpuid()
157 best = kvm_find_cpuid_entry_index(vcpu, 0xd, 0); in kvm_check_cpuid()
159 return 0; in kvm_check_cpuid()
164 return 0; in kvm_check_cpuid()
191 for (i = 0; i < nent; i++) { in kvm_cpuid_check_equal()
201 return 0; in kvm_cpuid_check_equal()
217 signature[0] = entry->ebx; in kvm_get_hypervisor_cpuid()
239 return 0; in kvm_apply_cpuid_pv_features_quirk()
243 return 0; in kvm_apply_cpuid_pv_features_quirk()
259 best = kvm_find_cpuid_entry_index(vcpu, 0xd, 0); in cpuid_get_supported_xcr0()
261 return 0; in cpuid_get_supported_xcr0()
270 best = kvm_find_cpuid_entry_index(vcpu, 0xd, 1); in cpuid_get_supported_xss()
272 return 0; in cpuid_get_supported_xss()
306 best = kvm_find_cpuid_entry_index(vcpu, 7, 0); in kvm_update_cpuid_runtime()
312 best = kvm_find_cpuid_entry_index(vcpu, 0xD, 0); in kvm_update_cpuid_runtime()
316 best = kvm_find_cpuid_entry_index(vcpu, 0xD, 1); in kvm_update_cpuid_runtime()
339 entry = kvm_find_cpuid_entry(vcpu, 0); in guest_cpuid_is_amd_or_hygon()
365 return 0; in cpuid_get_reg_unsafe()
380 memset(vcpu->arch.cpu_caps, 0, sizeof(vcpu->arch.cpu_caps)); in kvm_vcpu_after_set_cpuid()
388 for (i = 0; i < NR_KVM_CPU_CAPS; i++) { in kvm_vcpu_after_set_cpuid()
472 best = kvm_find_cpuid_entry(vcpu, 0x80000000); in cpuid_query_maxphyaddr()
473 if (!best || best->eax < 0x80000008) in cpuid_query_maxphyaddr()
475 best = kvm_find_cpuid_entry(vcpu, 0x80000008); in cpuid_query_maxphyaddr()
477 return best->eax & 0xff; in cpuid_query_maxphyaddr()
486 best = kvm_find_cpuid_entry(vcpu, 0x80000000); in cpuid_query_maxguestphyaddr()
487 if (!best || best->eax < 0x80000008) in cpuid_query_maxguestphyaddr()
489 best = kvm_find_cpuid_entry(vcpu, 0x80000008); in cpuid_query_maxguestphyaddr()
491 return (best->eax >> 16) & 0xff; in cpuid_query_maxguestphyaddr()
493 return 0; in cpuid_query_maxguestphyaddr()
566 return 0; in kvm_set_cpuid()
598 for (i = 0; i < cpuid->nent; i++) { in kvm_vcpu_ioctl_set_cpuid()
604 e2[i].index = 0; in kvm_vcpu_ioctl_set_cpuid()
605 e2[i].flags = 0; in kvm_vcpu_ioctl_set_cpuid()
606 e2[i].padding[0] = 0; in kvm_vcpu_ioctl_set_cpuid()
607 e2[i].padding[1] = 0; in kvm_vcpu_ioctl_set_cpuid()
608 e2[i].padding[2] = 0; in kvm_vcpu_ioctl_set_cpuid()
659 return 0; in kvm_vcpu_ioctl_get_cpuid2()
668 * KVM only supports features defined by Intel (0x0), AMD (0x80000000), in raw_cpuid_get()
669 * and Centaur (0xc0000000). WARN if a feature for new vendor base is in raw_cpuid_get()
672 base = cpuid.function & 0xffff0000; in raw_cpuid_get()
673 if (WARN_ON_ONCE(base && base != 0x80000000 && base != 0xc0000000)) in raw_cpuid_get()
674 return 0; in raw_cpuid_get()
677 return 0; in raw_cpuid_get()
695 u32 kvm_cpu_cap_passthrough = 0; \
696 u32 kvm_cpu_cap_synthesized = 0; \
697 u32 kvm_cpu_cap_emulated = 0; \
698 u32 kvm_cpu_cap_features = 0; \
711 } while (0)
715 * word that's being initialized. Exempt 0x8000_0001.EDX usage of 0x1.EDX
716 * features, as AMD duplicated many 0x1.EDX features into 0x8000_0001.EDX.
723 } while (0)
782 * Aliased Features - For features in 0x8000_0001.EDX that are duplicates of
783 * identical 0x1.EDX features, and thus are aliased from 0x1 to 0x8000_0001.
821 memset(kvm_cpu_caps, 0, sizeof(kvm_cpu_caps)); in kvm_set_cpu_caps()
961 * accesses require "magic" Writable=0,Dirty=1 protection, which KVM in kvm_set_cpu_caps()
1298 memset(entry, 0, sizeof(*entry)); in do_host_cpuid()
1301 switch (function & 0xC0000000) { in do_host_cpuid()
1302 case 0x40000000: in do_host_cpuid()
1306 case 0x80000000: in do_host_cpuid()
1308 * 0x80000021 is sometimes synthesized by __do_cpuid_func, which in do_host_cpuid()
1314 WRITE_ONCE(max_cpuid_80000000, cpuid_eax(0x80000000)); in do_host_cpuid()
1336 memset(entry, 0, sizeof(*entry)); in cpuid_func_emulated()
1339 entry->index = 0; in cpuid_func_emulated()
1340 entry->flags = 0; in cpuid_func_emulated()
1343 case 0: in cpuid_func_emulated()
1361 entry->eax = 0; in cpuid_func_emulated()
1366 return 0; in cpuid_func_emulated()
1376 return 0; in __do_cpuid_func_emulated()
1389 entry = do_host_cpuid(array, function, 0); in __do_cpuid_func()
1394 case 0: in __do_cpuid_func()
1396 entry->eax = min(entry->eax, 0x24U); in __do_cpuid_func()
1405 * CPUID(function=2, index=0) may return different results each in __do_cpuid_func()
1407 * number of times software should do CPUID(2, 0). in __do_cpuid_func()
1410 * idiotic. Intel's SDM states that EAX & 0xff "will always in __do_cpuid_func()
1416 * a stateful CPUID.0x2 is encountered. in __do_cpuid_func()
1418 WARN_ON_ONCE((entry->eax & 0xff) > 1); in __do_cpuid_func()
1420 /* functions 4 and 0x8000001d have additional index. */ in __do_cpuid_func()
1422 case 0x8000001d: in __do_cpuid_func()
1427 for (i = 1; entry->eax & 0x1f; ++i) { in __do_cpuid_func()
1434 entry->eax = 0x4; /* allow ARAT */ in __do_cpuid_func()
1435 entry->ebx = 0; in __do_cpuid_func()
1436 entry->ecx = 0; in __do_cpuid_func()
1437 entry->edx = 0; in __do_cpuid_func()
1446 /* KVM only supports up to 0x7.2, capped above via min(). */ in __do_cpuid_func()
1455 entry->ebx = 0; in __do_cpuid_func()
1463 entry->ecx = 0; in __do_cpuid_func()
1464 entry->ebx = 0; in __do_cpuid_func()
1465 entry->eax = 0; in __do_cpuid_func()
1468 case 0xa: { /* Architectural Performance Monitoring */ in __do_cpuid_func()
1473 entry->eax = entry->ebx = entry->ecx = entry->edx = 0; in __do_cpuid_func()
1489 entry->ecx = 0; in __do_cpuid_func()
1493 case 0x1f: in __do_cpuid_func()
1494 case 0xb: in __do_cpuid_func()
1499 entry->eax = entry->ebx = entry->ecx = 0; in __do_cpuid_func()
1501 case 0xd: { in __do_cpuid_func()
1521 WARN_ON_ONCE(permitted_xss != 0); in __do_cpuid_func()
1522 entry->ebx = 0; in __do_cpuid_func()
1548 if (WARN_ON_ONCE(!entry->eax || (entry->ecx & 0x1) != s_state)) { in __do_cpuid_func()
1555 entry->edx = 0; in __do_cpuid_func()
1559 case 0x12: in __do_cpuid_func()
1562 entry->eax = entry->ebx = entry->ecx = entry->edx = 0; in __do_cpuid_func()
1567 * Index 0: Sub-features, MISCSELECT (a.k.a extended features) in __do_cpuid_func()
1587 entry->ebx &= 0; in __do_cpuid_func()
1590 case 0x14: in __do_cpuid_func()
1592 entry->eax = entry->ebx = entry->ecx = entry->edx = 0; in __do_cpuid_func()
1602 case 0x1d: in __do_cpuid_func()
1604 entry->eax = entry->ebx = entry->ecx = entry->edx = 0; in __do_cpuid_func()
1613 case 0x1e: /* TMUL information */ in __do_cpuid_func()
1615 entry->eax = entry->ebx = entry->ecx = entry->edx = 0; in __do_cpuid_func()
1619 case 0x24: { in __do_cpuid_func()
1623 entry->eax = entry->ebx = entry->ecx = entry->edx = 0; in __do_cpuid_func()
1628 * The AVX10 version is encoded in EBX[7:0]. Note, the version in __do_cpuid_func()
1632 avx10_version = min_t(u8, entry->ebx & 0xff, 1); in __do_cpuid_func()
1636 entry->eax = 0; in __do_cpuid_func()
1637 entry->ecx = 0; in __do_cpuid_func()
1638 entry->edx = 0; in __do_cpuid_func()
1644 entry->ebx = sigptr[0]; in __do_cpuid_func()
1667 entry->ebx = 0; in __do_cpuid_func()
1668 entry->ecx = 0; in __do_cpuid_func()
1669 entry->edx = 0; in __do_cpuid_func()
1671 case 0x80000000: in __do_cpuid_func()
1672 entry->eax = min(entry->eax, 0x80000022); in __do_cpuid_func()
1678 * However, only do it if the host has CPUID leaf 0x8000001d. in __do_cpuid_func()
1680 * CPUID leaf if KVM reports that it supports 0x8000001d or in __do_cpuid_func()
1683 * 0x8000001d. Even worse, this can result in an infinite in __do_cpuid_func()
1686 if (entry->eax >= 0x8000001d && in __do_cpuid_func()
1689 entry->eax = max(entry->eax, 0x80000021); in __do_cpuid_func()
1691 case 0x80000001: in __do_cpuid_func()
1696 case 0x80000005: in __do_cpuid_func()
1699 case 0x80000006: in __do_cpuid_func()
1703 case 0x80000007: /* Advanced power management */ in __do_cpuid_func()
1708 entry->eax = entry->ebx = entry->ecx = 0; in __do_cpuid_func()
1710 case 0x80000008: { in __do_cpuid_func()
1725 unsigned int virt_as = max((entry->eax >> 8) & 0xff, 48U); in __do_cpuid_func()
1743 g_phys_as = 0; in __do_cpuid_func()
1745 phys_as = entry->eax & 0xff; in __do_cpuid_func()
1753 entry->edx = 0; in __do_cpuid_func()
1757 case 0x8000000A: in __do_cpuid_func()
1759 entry->eax = entry->ebx = entry->ecx = entry->edx = 0; in __do_cpuid_func()
1765 entry->ecx = 0; /* Reserved */ in __do_cpuid_func()
1768 case 0x80000019: in __do_cpuid_func()
1769 entry->ecx = entry->edx = 0; in __do_cpuid_func()
1771 case 0x8000001a: in __do_cpuid_func()
1772 entry->eax &= GENMASK(2, 0); in __do_cpuid_func()
1773 entry->ebx = entry->ecx = entry->edx = 0; in __do_cpuid_func()
1775 case 0x8000001e: in __do_cpuid_func()
1777 entry->eax = entry->ebx = entry->ecx = 0; in __do_cpuid_func()
1778 entry->edx = 0; /* reserved */ in __do_cpuid_func()
1780 case 0x8000001F: in __do_cpuid_func()
1782 entry->eax = entry->ebx = entry->ecx = entry->edx = 0; in __do_cpuid_func()
1788 * Enumerate '0' for "PA bits reduction", the adjusted in __do_cpuid_func()
1789 * MAXPHYADDR is enumerated directly (see 0x80000008). in __do_cpuid_func()
1794 case 0x80000020: in __do_cpuid_func()
1795 entry->eax = entry->ebx = entry->ecx = entry->edx = 0; in __do_cpuid_func()
1797 case 0x80000021: in __do_cpuid_func()
1798 entry->ebx = entry->edx = 0; in __do_cpuid_func()
1803 case 0x80000022: { in __do_cpuid_func()
1806 entry->ecx = entry->edx = 0; in __do_cpuid_func()
1808 entry->eax = entry->ebx = 0; in __do_cpuid_func()
1819 case 0xC0000000: in __do_cpuid_func()
1820 /*Just support up to 0xC0000004 now*/ in __do_cpuid_func()
1821 entry->eax = min(entry->eax, 0xC0000004); in __do_cpuid_func()
1823 case 0xC0000001: in __do_cpuid_func()
1828 case 0xC0000002: in __do_cpuid_func()
1829 case 0xC0000003: in __do_cpuid_func()
1830 case 0xC0000004: in __do_cpuid_func()
1832 entry->eax = entry->ebx = entry->ecx = entry->edx = 0; in __do_cpuid_func()
1836 r = 0; in __do_cpuid_func()
1853 #define CENTAUR_CPUID_SIGNATURE 0xC0000000
1864 return 0; in get_cpuid_func()
1897 for (i = 0; i < num_entries; i++) { in sanity_check_entries()
1901 if (pad[0] || pad[1] || pad[2]) in sanity_check_entries()
1912 0, 0x80000000, CENTAUR_CPUID_SIGNATURE, KVM_CPUID_SIGNATURE, in kvm_dev_ioctl_get_cpuid()
1916 .nent = 0, in kvm_dev_ioctl_get_cpuid()
1934 for (i = 0; i < ARRAY_SIZE(funcs); i++) { in kvm_dev_ioctl_get_cpuid()
1952 * highest basic leaf (i.e. CPUID.0H:EAX) were requested. AMD CPUID semantics
1965 * - Basic: 0x00000000 - 0x3fffffff, 0x50000000 - 0x7fffffff
1966 * - Hypervisor: 0x40000000 - 0x4fffffff
1967 * - Extended: 0x80000000 - 0xbfffffff
1968 * - Centaur: 0xc0000000 - 0xcfffffff
1971 * their own independent class associated with a 0x100 byte range. E.g. if Qemu
1975 * - HyperV: 0x40000000 - 0x400000ff
1976 * - KVM: 0x40000100 - 0x400001ff
1984 basic = kvm_find_cpuid_entry(vcpu, 0); in get_out_of_range_cpuid_entry()
1992 if (function >= 0x40000000 && function <= 0x4fffffff) in get_out_of_range_cpuid_entry()
1993 class = kvm_find_cpuid_entry(vcpu, function & 0xffffff00); in get_out_of_range_cpuid_entry()
1994 else if (function >= 0xc0000000) in get_out_of_range_cpuid_entry()
1995 class = kvm_find_cpuid_entry(vcpu, 0xc0000000); in get_out_of_range_cpuid_entry()
1997 class = kvm_find_cpuid_entry(vcpu, function & 0x80000000); in get_out_of_range_cpuid_entry()
2004 * max basic entry, e.g. if the max basic leaf is 0xb but there is no in get_out_of_range_cpuid_entry()
2005 * entry for CPUID.0xb.index (see below), then the output value for EDX in get_out_of_range_cpuid_entry()
2006 * needs to be pulled from CPUID.0xb.1. in get_out_of_range_cpuid_entry()
2041 if (function == 7 && index == 0) { in kvm_cpuid()
2047 } else if (function == 0x80000007) { in kvm_cpuid()
2068 *eax = *ebx = *ecx = *edx = 0; in kvm_cpuid()
2070 * When leaf 0BH or 1FH is defined, CL is pass-through in kvm_cpuid()
2076 if (function == 0xb || function == 0x1f) { in kvm_cpuid()
2079 *ecx = index & 0xff; in kvm_cpuid()
2094 if (cpuid_fault_enabled(vcpu) && !kvm_require_cpl(vcpu, 0)) in kvm_emulate_cpuid()