1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef ARCH_X86_KVM_REVERSE_CPUID_H 3 #define ARCH_X86_KVM_REVERSE_CPUID_H 4 5 #include <uapi/asm/kvm.h> 6 #include <asm/cpufeature.h> 7 #include <asm/cpufeatures.h> 8 9 /* 10 * Define a KVM-only feature flag. 11 * 12 * For features that are scattered by cpufeatures.h, __feature_translate() also 13 * needs to be updated to translate the kernel-defined feature into the 14 * KVM-defined feature. 15 * 16 * For features that are 100% KVM-only, i.e. not defined by cpufeatures.h, 17 * forego the intermediate KVM_X86_FEATURE and directly define X86_FEATURE_* so 18 * that X86_FEATURE_* can be used in KVM. No __feature_translate() handling is 19 * needed in this case. 20 */ 21 #define KVM_X86_FEATURE(w, f) ((w)*32 + (f)) 22 23 /* Intel-defined SGX sub-features, CPUID level 0x12 (EAX). */ 24 #define KVM_X86_FEATURE_SGX1 KVM_X86_FEATURE(CPUID_12_EAX, 0) 25 #define KVM_X86_FEATURE_SGX2 KVM_X86_FEATURE(CPUID_12_EAX, 1) 26 #define KVM_X86_FEATURE_SGX_EDECCSSA KVM_X86_FEATURE(CPUID_12_EAX, 11) 27 28 /* Intel-defined sub-features, CPUID level 0x00000007:1 (EDX) */ 29 #define X86_FEATURE_AVX_VNNI_INT8 KVM_X86_FEATURE(CPUID_7_1_EDX, 4) 30 #define X86_FEATURE_AVX_NE_CONVERT KVM_X86_FEATURE(CPUID_7_1_EDX, 5) 31 #define X86_FEATURE_AMX_COMPLEX KVM_X86_FEATURE(CPUID_7_1_EDX, 8) 32 #define X86_FEATURE_AVX_VNNI_INT16 KVM_X86_FEATURE(CPUID_7_1_EDX, 10) 33 #define X86_FEATURE_PREFETCHITI KVM_X86_FEATURE(CPUID_7_1_EDX, 14) 34 #define X86_FEATURE_AVX10 KVM_X86_FEATURE(CPUID_7_1_EDX, 19) 35 36 /* Intel-defined sub-features, CPUID level 0x00000007:2 (EDX) */ 37 #define X86_FEATURE_INTEL_PSFD KVM_X86_FEATURE(CPUID_7_2_EDX, 0) 38 #define X86_FEATURE_IPRED_CTRL KVM_X86_FEATURE(CPUID_7_2_EDX, 1) 39 #define KVM_X86_FEATURE_RRSBA_CTRL KVM_X86_FEATURE(CPUID_7_2_EDX, 2) 40 #define X86_FEATURE_DDPD_U KVM_X86_FEATURE(CPUID_7_2_EDX, 3) 41 #define KVM_X86_FEATURE_BHI_CTRL KVM_X86_FEATURE(CPUID_7_2_EDX, 4) 42 #define X86_FEATURE_MCDT_NO KVM_X86_FEATURE(CPUID_7_2_EDX, 5) 43 44 /* Intel-defined sub-features, CPUID level 0x00000024:0 (EBX) */ 45 #define X86_FEATURE_AVX10_128 KVM_X86_FEATURE(CPUID_24_0_EBX, 16) 46 #define X86_FEATURE_AVX10_256 KVM_X86_FEATURE(CPUID_24_0_EBX, 17) 47 #define X86_FEATURE_AVX10_512 KVM_X86_FEATURE(CPUID_24_0_EBX, 18) 48 49 /* CPUID level 0x80000007 (EDX). */ 50 #define KVM_X86_FEATURE_CONSTANT_TSC KVM_X86_FEATURE(CPUID_8000_0007_EDX, 8) 51 52 /* CPUID level 0x80000022 (EAX) */ 53 #define KVM_X86_FEATURE_PERFMON_V2 KVM_X86_FEATURE(CPUID_8000_0022_EAX, 0) 54 55 struct cpuid_reg { 56 u32 function; 57 u32 index; 58 int reg; 59 }; 60 61 static const struct cpuid_reg reverse_cpuid[] = { 62 [CPUID_1_EDX] = { 1, 0, CPUID_EDX}, 63 [CPUID_8000_0001_EDX] = {0x80000001, 0, CPUID_EDX}, 64 [CPUID_8086_0001_EDX] = {0x80860001, 0, CPUID_EDX}, 65 [CPUID_1_ECX] = { 1, 0, CPUID_ECX}, 66 [CPUID_C000_0001_EDX] = {0xc0000001, 0, CPUID_EDX}, 67 [CPUID_8000_0001_ECX] = {0x80000001, 0, CPUID_ECX}, 68 [CPUID_7_0_EBX] = { 7, 0, CPUID_EBX}, 69 [CPUID_D_1_EAX] = { 0xd, 1, CPUID_EAX}, 70 [CPUID_8000_0008_EBX] = {0x80000008, 0, CPUID_EBX}, 71 [CPUID_6_EAX] = { 6, 0, CPUID_EAX}, 72 [CPUID_8000_000A_EDX] = {0x8000000a, 0, CPUID_EDX}, 73 [CPUID_7_ECX] = { 7, 0, CPUID_ECX}, 74 [CPUID_8000_0007_EBX] = {0x80000007, 0, CPUID_EBX}, 75 [CPUID_7_EDX] = { 7, 0, CPUID_EDX}, 76 [CPUID_7_1_EAX] = { 7, 1, CPUID_EAX}, 77 [CPUID_12_EAX] = {0x00000012, 0, CPUID_EAX}, 78 [CPUID_8000_001F_EAX] = {0x8000001f, 0, CPUID_EAX}, 79 [CPUID_7_1_EDX] = { 7, 1, CPUID_EDX}, 80 [CPUID_8000_0007_EDX] = {0x80000007, 0, CPUID_EDX}, 81 [CPUID_8000_0021_EAX] = {0x80000021, 0, CPUID_EAX}, 82 [CPUID_8000_0022_EAX] = {0x80000022, 0, CPUID_EAX}, 83 [CPUID_7_2_EDX] = { 7, 2, CPUID_EDX}, 84 [CPUID_24_0_EBX] = { 0x24, 0, CPUID_EBX}, 85 }; 86 87 /* 88 * Reverse CPUID and its derivatives can only be used for hardware-defined 89 * feature words, i.e. words whose bits directly correspond to a CPUID leaf. 90 * Retrieving a feature bit or masking guest CPUID from a Linux-defined word 91 * is nonsensical as the bit number/mask is an arbitrary software-defined value 92 * and can't be used by KVM to query/control guest capabilities. And obviously 93 * the leaf being queried must have an entry in the lookup table. 94 */ 95 static __always_inline void reverse_cpuid_check(unsigned int x86_leaf) 96 { 97 BUILD_BUG_ON(NR_CPUID_WORDS != NCAPINTS); 98 BUILD_BUG_ON(x86_leaf == CPUID_LNX_1); 99 BUILD_BUG_ON(x86_leaf == CPUID_LNX_2); 100 BUILD_BUG_ON(x86_leaf == CPUID_LNX_3); 101 BUILD_BUG_ON(x86_leaf == CPUID_LNX_4); 102 BUILD_BUG_ON(x86_leaf == CPUID_LNX_5); 103 BUILD_BUG_ON(x86_leaf >= ARRAY_SIZE(reverse_cpuid)); 104 BUILD_BUG_ON(reverse_cpuid[x86_leaf].function == 0); 105 } 106 107 /* 108 * Translate feature bits that are scattered in the kernel's cpufeatures word 109 * into KVM feature words that align with hardware's definitions. 110 */ 111 static __always_inline u32 __feature_translate(int x86_feature) 112 { 113 #define KVM_X86_TRANSLATE_FEATURE(f) \ 114 case X86_FEATURE_##f: return KVM_X86_FEATURE_##f 115 116 switch (x86_feature) { 117 KVM_X86_TRANSLATE_FEATURE(SGX1); 118 KVM_X86_TRANSLATE_FEATURE(SGX2); 119 KVM_X86_TRANSLATE_FEATURE(SGX_EDECCSSA); 120 KVM_X86_TRANSLATE_FEATURE(CONSTANT_TSC); 121 KVM_X86_TRANSLATE_FEATURE(PERFMON_V2); 122 KVM_X86_TRANSLATE_FEATURE(RRSBA_CTRL); 123 KVM_X86_TRANSLATE_FEATURE(BHI_CTRL); 124 default: 125 return x86_feature; 126 } 127 } 128 129 static __always_inline u32 __feature_leaf(int x86_feature) 130 { 131 u32 x86_leaf = __feature_translate(x86_feature) / 32; 132 133 reverse_cpuid_check(x86_leaf); 134 return x86_leaf; 135 } 136 137 /* 138 * Retrieve the bit mask from an X86_FEATURE_* definition. Features contain 139 * the hardware defined bit number (stored in bits 4:0) and a software defined 140 * "word" (stored in bits 31:5). The word is used to index into arrays of 141 * bit masks that hold the per-cpu feature capabilities, e.g. this_cpu_has(). 142 */ 143 static __always_inline u32 __feature_bit(int x86_feature) 144 { 145 x86_feature = __feature_translate(x86_feature); 146 147 reverse_cpuid_check(x86_feature / 32); 148 return 1 << (x86_feature & 31); 149 } 150 151 #define feature_bit(name) __feature_bit(X86_FEATURE_##name) 152 153 static __always_inline struct cpuid_reg x86_feature_cpuid(unsigned int x86_feature) 154 { 155 unsigned int x86_leaf = __feature_leaf(x86_feature); 156 157 return reverse_cpuid[x86_leaf]; 158 } 159 160 static __always_inline u32 *__cpuid_entry_get_reg(struct kvm_cpuid_entry2 *entry, 161 u32 reg) 162 { 163 switch (reg) { 164 case CPUID_EAX: 165 return &entry->eax; 166 case CPUID_EBX: 167 return &entry->ebx; 168 case CPUID_ECX: 169 return &entry->ecx; 170 case CPUID_EDX: 171 return &entry->edx; 172 default: 173 BUILD_BUG(); 174 return NULL; 175 } 176 } 177 178 static __always_inline u32 *cpuid_entry_get_reg(struct kvm_cpuid_entry2 *entry, 179 unsigned int x86_feature) 180 { 181 const struct cpuid_reg cpuid = x86_feature_cpuid(x86_feature); 182 183 return __cpuid_entry_get_reg(entry, cpuid.reg); 184 } 185 186 static __always_inline u32 cpuid_entry_get(struct kvm_cpuid_entry2 *entry, 187 unsigned int x86_feature) 188 { 189 u32 *reg = cpuid_entry_get_reg(entry, x86_feature); 190 191 return *reg & __feature_bit(x86_feature); 192 } 193 194 static __always_inline bool cpuid_entry_has(struct kvm_cpuid_entry2 *entry, 195 unsigned int x86_feature) 196 { 197 return cpuid_entry_get(entry, x86_feature); 198 } 199 200 static __always_inline void cpuid_entry_clear(struct kvm_cpuid_entry2 *entry, 201 unsigned int x86_feature) 202 { 203 u32 *reg = cpuid_entry_get_reg(entry, x86_feature); 204 205 *reg &= ~__feature_bit(x86_feature); 206 } 207 208 static __always_inline void cpuid_entry_set(struct kvm_cpuid_entry2 *entry, 209 unsigned int x86_feature) 210 { 211 u32 *reg = cpuid_entry_get_reg(entry, x86_feature); 212 213 *reg |= __feature_bit(x86_feature); 214 } 215 216 static __always_inline void cpuid_entry_change(struct kvm_cpuid_entry2 *entry, 217 unsigned int x86_feature, 218 bool set) 219 { 220 u32 *reg = cpuid_entry_get_reg(entry, x86_feature); 221 222 /* 223 * Open coded instead of using cpuid_entry_{clear,set}() to coerce the 224 * compiler into using CMOV instead of Jcc when possible. 225 */ 226 if (set) 227 *reg |= __feature_bit(x86_feature); 228 else 229 *reg &= ~__feature_bit(x86_feature); 230 } 231 232 #endif /* ARCH_X86_KVM_REVERSE_CPUID_H */ 233