xref: /linux/arch/x86/kvm/cpuid.h (revision 4949009eb8d40a441dcddcd96e101e77d31cf1b2)
1 #ifndef ARCH_X86_KVM_CPUID_H
2 #define ARCH_X86_KVM_CPUID_H
3 
4 #include "x86.h"
5 
6 int kvm_update_cpuid(struct kvm_vcpu *vcpu);
7 struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
8 					      u32 function, u32 index);
9 int kvm_dev_ioctl_get_cpuid(struct kvm_cpuid2 *cpuid,
10 			    struct kvm_cpuid_entry2 __user *entries,
11 			    unsigned int type);
12 int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
13 			     struct kvm_cpuid *cpuid,
14 			     struct kvm_cpuid_entry __user *entries);
15 int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
16 			      struct kvm_cpuid2 *cpuid,
17 			      struct kvm_cpuid_entry2 __user *entries);
18 int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
19 			      struct kvm_cpuid2 *cpuid,
20 			      struct kvm_cpuid_entry2 __user *entries);
21 void kvm_cpuid(struct kvm_vcpu *vcpu, u32 *eax, u32 *ebx, u32 *ecx, u32 *edx);
22 
23 
24 static inline bool guest_cpuid_has_xsave(struct kvm_vcpu *vcpu)
25 {
26 	struct kvm_cpuid_entry2 *best;
27 
28 	if (!static_cpu_has(X86_FEATURE_XSAVE))
29 		return 0;
30 
31 	best = kvm_find_cpuid_entry(vcpu, 1, 0);
32 	return best && (best->ecx & bit(X86_FEATURE_XSAVE));
33 }
34 
35 static inline bool guest_cpuid_has_tsc_adjust(struct kvm_vcpu *vcpu)
36 {
37 	struct kvm_cpuid_entry2 *best;
38 
39 	best = kvm_find_cpuid_entry(vcpu, 7, 0);
40 	return best && (best->ebx & bit(X86_FEATURE_TSC_ADJUST));
41 }
42 
43 static inline bool guest_cpuid_has_smep(struct kvm_vcpu *vcpu)
44 {
45 	struct kvm_cpuid_entry2 *best;
46 
47 	best = kvm_find_cpuid_entry(vcpu, 7, 0);
48 	return best && (best->ebx & bit(X86_FEATURE_SMEP));
49 }
50 
51 static inline bool guest_cpuid_has_smap(struct kvm_vcpu *vcpu)
52 {
53 	struct kvm_cpuid_entry2 *best;
54 
55 	best = kvm_find_cpuid_entry(vcpu, 7, 0);
56 	return best && (best->ebx & bit(X86_FEATURE_SMAP));
57 }
58 
59 static inline bool guest_cpuid_has_fsgsbase(struct kvm_vcpu *vcpu)
60 {
61 	struct kvm_cpuid_entry2 *best;
62 
63 	best = kvm_find_cpuid_entry(vcpu, 7, 0);
64 	return best && (best->ebx & bit(X86_FEATURE_FSGSBASE));
65 }
66 
67 static inline bool guest_cpuid_has_osvw(struct kvm_vcpu *vcpu)
68 {
69 	struct kvm_cpuid_entry2 *best;
70 
71 	best = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
72 	return best && (best->ecx & bit(X86_FEATURE_OSVW));
73 }
74 
75 static inline bool guest_cpuid_has_pcid(struct kvm_vcpu *vcpu)
76 {
77 	struct kvm_cpuid_entry2 *best;
78 
79 	best = kvm_find_cpuid_entry(vcpu, 1, 0);
80 	return best && (best->ecx & bit(X86_FEATURE_PCID));
81 }
82 
83 static inline bool guest_cpuid_has_x2apic(struct kvm_vcpu *vcpu)
84 {
85 	struct kvm_cpuid_entry2 *best;
86 
87 	best = kvm_find_cpuid_entry(vcpu, 1, 0);
88 	return best && (best->ecx & bit(X86_FEATURE_X2APIC));
89 }
90 
91 static inline bool guest_cpuid_is_amd(struct kvm_vcpu *vcpu)
92 {
93 	struct kvm_cpuid_entry2 *best;
94 
95 	best = kvm_find_cpuid_entry(vcpu, 0, 0);
96 	return best && best->ebx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx;
97 }
98 
99 static inline bool guest_cpuid_has_gbpages(struct kvm_vcpu *vcpu)
100 {
101 	struct kvm_cpuid_entry2 *best;
102 
103 	best = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
104 	return best && (best->edx & bit(X86_FEATURE_GBPAGES));
105 }
106 
107 static inline bool guest_cpuid_has_rtm(struct kvm_vcpu *vcpu)
108 {
109 	struct kvm_cpuid_entry2 *best;
110 
111 	best = kvm_find_cpuid_entry(vcpu, 7, 0);
112 	return best && (best->ebx & bit(X86_FEATURE_RTM));
113 }
114 #endif
115