1 #ifndef ARCH_X86_KVM_CPUID_H 2 #define ARCH_X86_KVM_CPUID_H 3 4 #include "x86.h" 5 6 int kvm_update_cpuid(struct kvm_vcpu *vcpu); 7 struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu, 8 u32 function, u32 index); 9 int kvm_dev_ioctl_get_cpuid(struct kvm_cpuid2 *cpuid, 10 struct kvm_cpuid_entry2 __user *entries, 11 unsigned int type); 12 int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu, 13 struct kvm_cpuid *cpuid, 14 struct kvm_cpuid_entry __user *entries); 15 int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu, 16 struct kvm_cpuid2 *cpuid, 17 struct kvm_cpuid_entry2 __user *entries); 18 int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu, 19 struct kvm_cpuid2 *cpuid, 20 struct kvm_cpuid_entry2 __user *entries); 21 void kvm_cpuid(struct kvm_vcpu *vcpu, u32 *eax, u32 *ebx, u32 *ecx, u32 *edx); 22 23 int cpuid_query_maxphyaddr(struct kvm_vcpu *vcpu); 24 25 static inline int cpuid_maxphyaddr(struct kvm_vcpu *vcpu) 26 { 27 return vcpu->arch.maxphyaddr; 28 } 29 30 static inline bool guest_cpuid_has_xsave(struct kvm_vcpu *vcpu) 31 { 32 struct kvm_cpuid_entry2 *best; 33 34 if (!static_cpu_has(X86_FEATURE_XSAVE)) 35 return false; 36 37 best = kvm_find_cpuid_entry(vcpu, 1, 0); 38 return best && (best->ecx & bit(X86_FEATURE_XSAVE)); 39 } 40 41 static inline bool guest_cpuid_has_tsc_adjust(struct kvm_vcpu *vcpu) 42 { 43 struct kvm_cpuid_entry2 *best; 44 45 best = kvm_find_cpuid_entry(vcpu, 7, 0); 46 return best && (best->ebx & bit(X86_FEATURE_TSC_ADJUST)); 47 } 48 49 static inline bool guest_cpuid_has_smep(struct kvm_vcpu *vcpu) 50 { 51 struct kvm_cpuid_entry2 *best; 52 53 best = kvm_find_cpuid_entry(vcpu, 7, 0); 54 return best && (best->ebx & bit(X86_FEATURE_SMEP)); 55 } 56 57 static inline bool guest_cpuid_has_smap(struct kvm_vcpu *vcpu) 58 { 59 struct kvm_cpuid_entry2 *best; 60 61 best = kvm_find_cpuid_entry(vcpu, 7, 0); 62 return best && (best->ebx & bit(X86_FEATURE_SMAP)); 63 } 64 65 static inline bool guest_cpuid_has_fsgsbase(struct kvm_vcpu *vcpu) 66 { 67 struct kvm_cpuid_entry2 *best; 68 69 best = kvm_find_cpuid_entry(vcpu, 7, 0); 70 return best && (best->ebx & bit(X86_FEATURE_FSGSBASE)); 71 } 72 73 static inline bool guest_cpuid_has_longmode(struct kvm_vcpu *vcpu) 74 { 75 struct kvm_cpuid_entry2 *best; 76 77 best = kvm_find_cpuid_entry(vcpu, 0x80000001, 0); 78 return best && (best->edx & bit(X86_FEATURE_LM)); 79 } 80 81 static inline bool guest_cpuid_has_osvw(struct kvm_vcpu *vcpu) 82 { 83 struct kvm_cpuid_entry2 *best; 84 85 best = kvm_find_cpuid_entry(vcpu, 0x80000001, 0); 86 return best && (best->ecx & bit(X86_FEATURE_OSVW)); 87 } 88 89 static inline bool guest_cpuid_has_pcid(struct kvm_vcpu *vcpu) 90 { 91 struct kvm_cpuid_entry2 *best; 92 93 best = kvm_find_cpuid_entry(vcpu, 1, 0); 94 return best && (best->ecx & bit(X86_FEATURE_PCID)); 95 } 96 97 static inline bool guest_cpuid_has_x2apic(struct kvm_vcpu *vcpu) 98 { 99 struct kvm_cpuid_entry2 *best; 100 101 best = kvm_find_cpuid_entry(vcpu, 1, 0); 102 return best && (best->ecx & bit(X86_FEATURE_X2APIC)); 103 } 104 105 static inline bool guest_cpuid_is_amd(struct kvm_vcpu *vcpu) 106 { 107 struct kvm_cpuid_entry2 *best; 108 109 best = kvm_find_cpuid_entry(vcpu, 0, 0); 110 return best && best->ebx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx; 111 } 112 113 static inline bool guest_cpuid_has_gbpages(struct kvm_vcpu *vcpu) 114 { 115 struct kvm_cpuid_entry2 *best; 116 117 best = kvm_find_cpuid_entry(vcpu, 0x80000001, 0); 118 return best && (best->edx & bit(X86_FEATURE_GBPAGES)); 119 } 120 121 static inline bool guest_cpuid_has_rtm(struct kvm_vcpu *vcpu) 122 { 123 struct kvm_cpuid_entry2 *best; 124 125 best = kvm_find_cpuid_entry(vcpu, 7, 0); 126 return best && (best->ebx & bit(X86_FEATURE_RTM)); 127 } 128 129 static inline bool guest_cpuid_has_mpx(struct kvm_vcpu *vcpu) 130 { 131 struct kvm_cpuid_entry2 *best; 132 133 best = kvm_find_cpuid_entry(vcpu, 7, 0); 134 return best && (best->ebx & bit(X86_FEATURE_MPX)); 135 } 136 137 static inline bool guest_cpuid_has_pcommit(struct kvm_vcpu *vcpu) 138 { 139 struct kvm_cpuid_entry2 *best; 140 141 best = kvm_find_cpuid_entry(vcpu, 7, 0); 142 return best && (best->ebx & bit(X86_FEATURE_PCOMMIT)); 143 } 144 145 static inline bool guest_cpuid_has_rdtscp(struct kvm_vcpu *vcpu) 146 { 147 struct kvm_cpuid_entry2 *best; 148 149 best = kvm_find_cpuid_entry(vcpu, 0x80000001, 0); 150 return best && (best->edx & bit(X86_FEATURE_RDTSCP)); 151 } 152 153 /* 154 * NRIPS is provided through cpuidfn 0x8000000a.edx bit 3 155 */ 156 #define BIT_NRIPS 3 157 158 static inline bool guest_cpuid_has_nrips(struct kvm_vcpu *vcpu) 159 { 160 struct kvm_cpuid_entry2 *best; 161 162 best = kvm_find_cpuid_entry(vcpu, 0x8000000a, 0); 163 164 /* 165 * NRIPS is a scattered cpuid feature, so we can't use 166 * X86_FEATURE_NRIPS here (X86_FEATURE_NRIPS would be bit 167 * position 8, not 3). 168 */ 169 return best && (best->edx & bit(BIT_NRIPS)); 170 } 171 #undef BIT_NRIPS 172 173 #endif 174