1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef ARCH_X86_KVM_CPUID_H 3 #define ARCH_X86_KVM_CPUID_H 4 5 #include "x86.h" 6 #include <asm/cpu.h> 7 #include <asm/processor.h> 8 9 extern u32 kvm_cpu_caps[NCAPINTS] __read_mostly; 10 void kvm_set_cpu_caps(void); 11 12 int kvm_update_cpuid(struct kvm_vcpu *vcpu); 13 struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu, 14 u32 function, u32 index); 15 int kvm_dev_ioctl_get_cpuid(struct kvm_cpuid2 *cpuid, 16 struct kvm_cpuid_entry2 __user *entries, 17 unsigned int type); 18 int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu, 19 struct kvm_cpuid *cpuid, 20 struct kvm_cpuid_entry __user *entries); 21 int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu, 22 struct kvm_cpuid2 *cpuid, 23 struct kvm_cpuid_entry2 __user *entries); 24 int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu, 25 struct kvm_cpuid2 *cpuid, 26 struct kvm_cpuid_entry2 __user *entries); 27 bool kvm_cpuid(struct kvm_vcpu *vcpu, u32 *eax, u32 *ebx, 28 u32 *ecx, u32 *edx, bool exact_only); 29 30 int cpuid_query_maxphyaddr(struct kvm_vcpu *vcpu); 31 32 static inline int cpuid_maxphyaddr(struct kvm_vcpu *vcpu) 33 { 34 return vcpu->arch.maxphyaddr; 35 } 36 37 struct cpuid_reg { 38 u32 function; 39 u32 index; 40 int reg; 41 }; 42 43 static const struct cpuid_reg reverse_cpuid[] = { 44 [CPUID_1_EDX] = { 1, 0, CPUID_EDX}, 45 [CPUID_8000_0001_EDX] = {0x80000001, 0, CPUID_EDX}, 46 [CPUID_8086_0001_EDX] = {0x80860001, 0, CPUID_EDX}, 47 [CPUID_1_ECX] = { 1, 0, CPUID_ECX}, 48 [CPUID_C000_0001_EDX] = {0xc0000001, 0, CPUID_EDX}, 49 [CPUID_8000_0001_ECX] = {0x80000001, 0, CPUID_ECX}, 50 [CPUID_7_0_EBX] = { 7, 0, CPUID_EBX}, 51 [CPUID_D_1_EAX] = { 0xd, 1, CPUID_EAX}, 52 [CPUID_8000_0008_EBX] = {0x80000008, 0, CPUID_EBX}, 53 [CPUID_6_EAX] = { 6, 0, CPUID_EAX}, 54 [CPUID_8000_000A_EDX] = {0x8000000a, 0, CPUID_EDX}, 55 [CPUID_7_ECX] = { 7, 0, CPUID_ECX}, 56 [CPUID_8000_0007_EBX] = {0x80000007, 0, CPUID_EBX}, 57 [CPUID_7_EDX] = { 7, 0, CPUID_EDX}, 58 [CPUID_7_1_EAX] = { 7, 1, CPUID_EAX}, 59 }; 60 61 /* 62 * Reverse CPUID and its derivatives can only be used for hardware-defined 63 * feature words, i.e. words whose bits directly correspond to a CPUID leaf. 64 * Retrieving a feature bit or masking guest CPUID from a Linux-defined word 65 * is nonsensical as the bit number/mask is an arbitrary software-defined value 66 * and can't be used by KVM to query/control guest capabilities. And obviously 67 * the leaf being queried must have an entry in the lookup table. 68 */ 69 static __always_inline void reverse_cpuid_check(unsigned int x86_leaf) 70 { 71 BUILD_BUG_ON(x86_leaf == CPUID_LNX_1); 72 BUILD_BUG_ON(x86_leaf == CPUID_LNX_2); 73 BUILD_BUG_ON(x86_leaf == CPUID_LNX_3); 74 BUILD_BUG_ON(x86_leaf == CPUID_LNX_4); 75 BUILD_BUG_ON(x86_leaf >= ARRAY_SIZE(reverse_cpuid)); 76 BUILD_BUG_ON(reverse_cpuid[x86_leaf].function == 0); 77 } 78 79 /* 80 * Retrieve the bit mask from an X86_FEATURE_* definition. Features contain 81 * the hardware defined bit number (stored in bits 4:0) and a software defined 82 * "word" (stored in bits 31:5). The word is used to index into arrays of 83 * bit masks that hold the per-cpu feature capabilities, e.g. this_cpu_has(). 84 */ 85 static __always_inline u32 __feature_bit(int x86_feature) 86 { 87 reverse_cpuid_check(x86_feature / 32); 88 return 1 << (x86_feature & 31); 89 } 90 91 #define feature_bit(name) __feature_bit(X86_FEATURE_##name) 92 93 static __always_inline struct cpuid_reg x86_feature_cpuid(unsigned int x86_feature) 94 { 95 unsigned int x86_leaf = x86_feature / 32; 96 97 reverse_cpuid_check(x86_leaf); 98 return reverse_cpuid[x86_leaf]; 99 } 100 101 static __always_inline u32 *__cpuid_entry_get_reg(struct kvm_cpuid_entry2 *entry, 102 u32 reg) 103 { 104 switch (reg) { 105 case CPUID_EAX: 106 return &entry->eax; 107 case CPUID_EBX: 108 return &entry->ebx; 109 case CPUID_ECX: 110 return &entry->ecx; 111 case CPUID_EDX: 112 return &entry->edx; 113 default: 114 BUILD_BUG(); 115 return NULL; 116 } 117 } 118 119 static __always_inline u32 *cpuid_entry_get_reg(struct kvm_cpuid_entry2 *entry, 120 unsigned int x86_feature) 121 { 122 const struct cpuid_reg cpuid = x86_feature_cpuid(x86_feature); 123 124 return __cpuid_entry_get_reg(entry, cpuid.reg); 125 } 126 127 static __always_inline u32 cpuid_entry_get(struct kvm_cpuid_entry2 *entry, 128 unsigned int x86_feature) 129 { 130 u32 *reg = cpuid_entry_get_reg(entry, x86_feature); 131 132 return *reg & __feature_bit(x86_feature); 133 } 134 135 static __always_inline bool cpuid_entry_has(struct kvm_cpuid_entry2 *entry, 136 unsigned int x86_feature) 137 { 138 return cpuid_entry_get(entry, x86_feature); 139 } 140 141 static __always_inline void cpuid_entry_clear(struct kvm_cpuid_entry2 *entry, 142 unsigned int x86_feature) 143 { 144 u32 *reg = cpuid_entry_get_reg(entry, x86_feature); 145 146 *reg &= ~__feature_bit(x86_feature); 147 } 148 149 static __always_inline void cpuid_entry_set(struct kvm_cpuid_entry2 *entry, 150 unsigned int x86_feature) 151 { 152 u32 *reg = cpuid_entry_get_reg(entry, x86_feature); 153 154 *reg |= __feature_bit(x86_feature); 155 } 156 157 static __always_inline void cpuid_entry_change(struct kvm_cpuid_entry2 *entry, 158 unsigned int x86_feature, 159 bool set) 160 { 161 u32 *reg = cpuid_entry_get_reg(entry, x86_feature); 162 163 /* 164 * Open coded instead of using cpuid_entry_{clear,set}() to coerce the 165 * compiler into using CMOV instead of Jcc when possible. 166 */ 167 if (set) 168 *reg |= __feature_bit(x86_feature); 169 else 170 *reg &= ~__feature_bit(x86_feature); 171 } 172 173 static __always_inline void cpuid_entry_override(struct kvm_cpuid_entry2 *entry, 174 enum cpuid_leafs leaf) 175 { 176 u32 *reg = cpuid_entry_get_reg(entry, leaf * 32); 177 178 BUILD_BUG_ON(leaf >= ARRAY_SIZE(kvm_cpu_caps)); 179 *reg = kvm_cpu_caps[leaf]; 180 } 181 182 static __always_inline u32 *guest_cpuid_get_register(struct kvm_vcpu *vcpu, 183 unsigned int x86_feature) 184 { 185 const struct cpuid_reg cpuid = x86_feature_cpuid(x86_feature); 186 struct kvm_cpuid_entry2 *entry; 187 188 entry = kvm_find_cpuid_entry(vcpu, cpuid.function, cpuid.index); 189 if (!entry) 190 return NULL; 191 192 return __cpuid_entry_get_reg(entry, cpuid.reg); 193 } 194 195 static __always_inline bool guest_cpuid_has(struct kvm_vcpu *vcpu, 196 unsigned int x86_feature) 197 { 198 u32 *reg; 199 200 reg = guest_cpuid_get_register(vcpu, x86_feature); 201 if (!reg) 202 return false; 203 204 return *reg & __feature_bit(x86_feature); 205 } 206 207 static __always_inline void guest_cpuid_clear(struct kvm_vcpu *vcpu, 208 unsigned int x86_feature) 209 { 210 u32 *reg; 211 212 reg = guest_cpuid_get_register(vcpu, x86_feature); 213 if (reg) 214 *reg &= ~__feature_bit(x86_feature); 215 } 216 217 static inline bool guest_cpuid_is_amd_or_hygon(struct kvm_vcpu *vcpu) 218 { 219 struct kvm_cpuid_entry2 *best; 220 221 best = kvm_find_cpuid_entry(vcpu, 0, 0); 222 return best && 223 (is_guest_vendor_amd(best->ebx, best->ecx, best->edx) || 224 is_guest_vendor_hygon(best->ebx, best->ecx, best->edx)); 225 } 226 227 static inline int guest_cpuid_family(struct kvm_vcpu *vcpu) 228 { 229 struct kvm_cpuid_entry2 *best; 230 231 best = kvm_find_cpuid_entry(vcpu, 0x1, 0); 232 if (!best) 233 return -1; 234 235 return x86_family(best->eax); 236 } 237 238 static inline int guest_cpuid_model(struct kvm_vcpu *vcpu) 239 { 240 struct kvm_cpuid_entry2 *best; 241 242 best = kvm_find_cpuid_entry(vcpu, 0x1, 0); 243 if (!best) 244 return -1; 245 246 return x86_model(best->eax); 247 } 248 249 static inline int guest_cpuid_stepping(struct kvm_vcpu *vcpu) 250 { 251 struct kvm_cpuid_entry2 *best; 252 253 best = kvm_find_cpuid_entry(vcpu, 0x1, 0); 254 if (!best) 255 return -1; 256 257 return x86_stepping(best->eax); 258 } 259 260 static inline bool supports_cpuid_fault(struct kvm_vcpu *vcpu) 261 { 262 return vcpu->arch.msr_platform_info & MSR_PLATFORM_INFO_CPUID_FAULT; 263 } 264 265 static inline bool cpuid_fault_enabled(struct kvm_vcpu *vcpu) 266 { 267 return vcpu->arch.msr_misc_features_enables & 268 MSR_MISC_FEATURES_ENABLES_CPUID_FAULT; 269 } 270 271 static __always_inline void kvm_cpu_cap_clear(unsigned int x86_feature) 272 { 273 unsigned int x86_leaf = x86_feature / 32; 274 275 reverse_cpuid_check(x86_leaf); 276 kvm_cpu_caps[x86_leaf] &= ~__feature_bit(x86_feature); 277 } 278 279 static __always_inline void kvm_cpu_cap_set(unsigned int x86_feature) 280 { 281 unsigned int x86_leaf = x86_feature / 32; 282 283 reverse_cpuid_check(x86_leaf); 284 kvm_cpu_caps[x86_leaf] |= __feature_bit(x86_feature); 285 } 286 287 static __always_inline u32 kvm_cpu_cap_get(unsigned int x86_feature) 288 { 289 unsigned int x86_leaf = x86_feature / 32; 290 291 reverse_cpuid_check(x86_leaf); 292 return kvm_cpu_caps[x86_leaf] & __feature_bit(x86_feature); 293 } 294 295 static __always_inline bool kvm_cpu_cap_has(unsigned int x86_feature) 296 { 297 return !!kvm_cpu_cap_get(x86_feature); 298 } 299 300 static __always_inline void kvm_cpu_cap_check_and_set(unsigned int x86_feature) 301 { 302 if (boot_cpu_has(x86_feature)) 303 kvm_cpu_cap_set(x86_feature); 304 } 305 306 #endif 307